diff --git a/.gitattributes b/.gitattributes index 89c411b5ce6b..5aa29e952a14 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,3 @@ *.c diff=cpp *.h diff=cpp +dist/ export-ignore diff --git a/.gitignore b/.gitignore index 70580bdd352c..f39c82316b23 100644 --- a/.gitignore +++ b/.gitignore @@ -146,3 +146,7 @@ x509.genkey # Clang's compilation database file /compile_commands.json + +# Tencent dist files +/dist/rpm +/dist/workdir diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 680451695422..c3767d4d01a6 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -1566,7 +1566,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw KernelVersion: 4.3 Contact: linux-iio@vger.kernel.org Description: - Raw (unscaled no offset etc.) percentage reading of a substance. + Raw (unscaled no offset etc.) reading of a substance. Units + after application of scale and offset are percents. What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index fc20cde63d1e..b39531a3c5bc 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -196,6 +196,12 @@ Description: does not reflect it. Likewise, if one enables a deep state but a lighter state still is disabled, then this has no effect. +What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/default_status +Date: December 2019 +KernelVersion: v5.6 +Contact: Linux power management list +Description: + (RO) The default status of this state, "enabled" or "disabled". What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency Date: March 2014 @@ -486,6 +492,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/l1tf /sys/devices/system/cpu/vulnerabilities/mds + /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/itlb_multihit Date: January 2018 diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst index a30aa91b5fbe..3463883844c0 100644 --- a/Documentation/admin-guide/device-mapper/dm-integrity.rst +++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst @@ -177,6 +177,12 @@ bitmap_flush_interval:number The bitmap flush interval in milliseconds. The metadata buffers are synchronized when this interval expires. +legacy_recalculate + Allow recalculating of volumes with HMAC keys. This is disabled by + default for security reasons - an attacker could modify the volume, + set recalc_sector to zero, and the kernel would not detect the + modification. + The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can be changed when reloading the target (load an inactive table and swap the diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index 0795e3c2643f..ca4dbdd9016d 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -14,3 +14,4 @@ are configurable at compile, boot or run time. mds tsx_async_abort multihit.rst + special-register-buffer-data-sampling.rst diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst new file mode 100644 index 000000000000..47b1b3afac99 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst @@ -0,0 +1,149 @@ +.. SPDX-License-Identifier: GPL-2.0 + +SRBDS - Special Register Buffer Data Sampling +============================================= + +SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to +infer values returned from special register accesses. Special register +accesses are accesses to off core registers. According to Intel's evaluation, +the special register reads that have a security expectation of privacy are +RDRAND, RDSEED and SGX EGETKEY. + +When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved +to the core through the special register mechanism that is susceptible +to MDS attacks. + +Affected processors +-------------------- +Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may +be affected. + +A processor is affected by SRBDS if its Family_Model and stepping is +in the following list, with the exception of the listed processors +exporting MDS_NO while Intel TSX is available yet not enabled. The +latter class of processors are only affected when Intel TSX is enabled +by software using TSX_CTRL_MSR otherwise they are not affected. + + ============= ============ ======== + common name Family_Model Stepping + ============= ============ ======== + IvyBridge 06_3AH All + + Haswell 06_3CH All + Haswell_L 06_45H All + Haswell_G 06_46H All + + Broadwell_G 06_47H All + Broadwell 06_3DH All + + Skylake_L 06_4EH All + Skylake 06_5EH All + + Kabylake_L 06_8EH <= 0xC + Kabylake 06_9EH <= 0xD + ============= ============ ======== + +Related CVEs +------------ + +The following CVE entry is related to this SRBDS issue: + + ============== ===== ===================================== + CVE-2020-0543 SRBDS Special Register Buffer Data Sampling + ============== ===== ===================================== + +Attack scenarios +---------------- +An unprivileged user can extract values returned from RDRAND and RDSEED +executed on another core or sibling thread using MDS techniques. + + +Mitigation mechanism +------------------- +Intel will release microcode updates that modify the RDRAND, RDSEED, and +EGETKEY instructions to overwrite secret special register data in the shared +staging buffer before the secret data can be accessed by another logical +processor. + +During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core +accesses from other logical processors will be delayed until the special +register read is complete and the secret data in the shared staging buffer is +overwritten. + +This has three effects on performance: + +#. RDRAND, RDSEED, or EGETKEY instructions have higher latency. + +#. Executing RDRAND at the same time on multiple logical processors will be + serialized, resulting in an overall reduction in the maximum RDRAND + bandwidth. + +#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other + logical processors that miss their core caches, with an impact similar to + legacy locked cache-line-split accesses. + +The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable +the mitigation for RDRAND and RDSEED instructions executed outside of Intel +Software Guard Extensions (Intel SGX) enclaves. On logical processors that +disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not +take longer to execute and do not impact performance of sibling logical +processors memory accesses. The opt-out mechanism does not affect Intel SGX +enclaves (including execution of RDRAND or RDSEED inside an enclave, as well +as EGETKEY execution). + +IA32_MCU_OPT_CTRL MSR Definition +-------------------------------- +Along with the mitigation for this issue, Intel added a new thread-scope +IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and +RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL = +9]==1. This MSR is introduced through the microcode update. + +Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor +disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX +enclave on that logical processor. Opting out of the mitigation for a +particular logical processor does not affect the RDRAND and RDSEED mitigations +for other logical processors. + +Note that inside of an Intel SGX enclave, the mitigation is applied regardless +of the value of RNGDS_MITG_DS. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows control over the SRBDS mitigation at boot time +with the option "srbds=". The option for this is: + + ============= ============================================================= + off This option disables SRBDS mitigation for RDRAND and RDSEED on + affected platforms. + ============= ============================================================= + +SRBDS System Information +----------------------- +The Linux kernel provides vulnerability status information through sysfs. For +SRBDS this can be accessed by the following sysfs file: +/sys/devices/system/cpu/vulnerabilities/srbds + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable + Vulnerable Processor vulnerable and mitigation disabled + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: TSX disabled Processor is only vulnerable when TSX is + enabled while this system was booted with TSX + disabled. + Unknown: Dependent on + hypervisor status Running on virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +SRBDS Default mitigation +------------------------ +This new microcode serializes processor access during execution of RDRAND, +RDSEED ensures that the shared buffer is overwritten before it is released for +reuse. Use the "srbds=off" kernel command line to disable the mitigation for +RDRAND and RDSEED. diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst index 5d63b18bd6d1..60c45c916f7d 100644 --- a/Documentation/admin-guide/iostats.rst +++ b/Documentation/admin-guide/iostats.rst @@ -99,7 +99,7 @@ Field 10 -- # of milliseconds spent doing I/Os Since 5.0 this field counts jiffies when at least one request was started or completed. If request runs more than 2 jiffies then some - I/O time will not be accounted unless there are other requests. + I/O time might be not accounted in case of concurrent requests. Field 11 -- weighted # of milliseconds spent doing I/Os This field is incremented at each I/O start, I/O completion, I/O @@ -133,6 +133,9 @@ are summed (possibly overflowing the unsigned long variable they are summed to) and the result given to the user. There is no convenient user interface for accessing the per-CPU counters themselves. +Since 4.19 request times are measured with nanoseconds precision and +truncated to milliseconds before showing in this interface. + Disks vs Partitions ------------------- diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b5c933fa971f..47eb94d8d011 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -567,7 +567,7 @@ loops can be debugged more effectively on production systems. - clearcpuid=BITNUM [X86] + clearcpuid=BITNUM[,BITNUM...] [X86] Disable CPUID feature X for the kernel. See arch/x86/include/asm/cpufeatures.h for the valid bit numbers. Note the Linux specific bits are not necessarily @@ -2667,6 +2667,8 @@ mds=off [X86] tsx_async_abort=off [X86] kvm.nx_huge_pages=off [X86] + no_entry_flush [PPC] + no_uaccess_flush [PPC] Exceptions: This does not have any effect on @@ -2741,7 +2743,7 @@ ,[,,,,] mtdparts= [MTD] - See drivers/mtd/cmdlinepart.c. + See drivers/mtd/parsers/cmdlinepart.c multitce=off [PPC] This parameter disables the use of the pSeries firmware feature for updating multiple TCE entries @@ -2936,6 +2938,8 @@ no5lvl [X86-64] Disable 5-level paging mode. Forces kernel to use 4-level paging instead. + nofsgsbase [X86] Disables FSGSBASE instructions. + no_console_suspend [HW] Never suspend the console Disable suspending of consoles during suspend and @@ -2989,6 +2993,8 @@ noefi Disable EFI runtime services support. + no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel. + noexec [IA-64] noexec [X86] @@ -3038,6 +3044,9 @@ nospec_store_bypass_disable [HW] Disable all mitigations for the Speculative Store Bypass vulnerability + no_uaccess_flush + [PPC] Don't flush the L1-D cache after accessing user data. + noxsave [BUGS=X86] Disables x86 extended register state save and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. @@ -3171,6 +3180,8 @@ nosep [BUGS=X86-32] Disables x86 SYSENTER/SYSEXIT support. + nosgx [X86-64,SGX] Disables Intel SGX kernel support. + nosmp [SMP] Tells an SMP kernel to act as a UP kernel, and disable the IO APIC. legacy for "maxcpus=0". @@ -3577,6 +3588,8 @@ even if the platform doesn't give the OS permission to use them. This may cause conflicts if the platform also tries to use these services. + dpc-native Use native PCIe service for DPC only. May + cause conflicts if firmware uses AER or DPC. compat Disable native PCIe services (PME, AER, DPC, PCIe hotplug). @@ -3771,6 +3784,14 @@ [KNL] Number of legacy pty's. Overwrites compiled-in default number. +qspinlock.numa_spinlock_threshold_ns= [NUMA, PV_OPS] + Set the time threshold in nanoseconds for the + number of intra-node lock hand-offs before the + NUMA-aware spinlock is forced to be passed to + a thread on another NUMA node. Smaller values + result in a more fair, but less performant spinlock, + and vice versa. The default value is 1000000 (=1ms). + quiet [KNL] Disable most log messages r128= [HW,DRM] @@ -4579,6 +4600,26 @@ spia_pedr= spia_peddr= + srbds= [X86,INTEL] + Control the Special Register Buffer Data Sampling + (SRBDS) mitigation. + + Certain CPUs are vulnerable to an MDS-like + exploit which can leak bits from the random + number generator. + + By default, this issue is mitigated by + microcode. However, the microcode fix can cause + the RDRAND and RDSEED instructions to become + much slower. Among other effects, this will + result in reduced throughput from /dev/urandom. + + The microcode mitigation can be disabled with + the following option: + + off: Disable mitigation and remove + performance impact to RDRAND and RDSEED + srcutree.counter_wrap_check [KNL] Specifies how frequently to check for grace-period sequence counter wrap for the @@ -5005,8 +5046,7 @@ usbcore.old_scheme_first= [USB] Start with the old device initialization - scheme, applies only to low and full-speed devices - (default 0 = off). + scheme (default 0 = off). usbcore.usbfs_memory_mb= [USB] Memory limit (in MB) for buffers allocated by @@ -5125,6 +5165,7 @@ device); j = NO_REPORT_LUNS (don't use report luns command, uas only); + k = NO_SAME (do not use WRITE_SAME, uas only) l = NOT_LOCKABLE (don't try to lock and unlock ejectable media, not on uas); m = MAX_SECTORS_64 (don't transfer more @@ -5425,6 +5466,10 @@ This option is obsoleted by the "nopv" option, which has equivalent effect for XEN platform. + xen_no_vector_callback + [KNL,X86,XEN] Disable the vector callback for Xen + event channel interrupts. + xen_scrub_pages= [XEN] Boolean option to control scrubbing pages before giving them back to Xen, for use by other domains. Can be also changed at runtime @@ -5443,6 +5488,18 @@ as generic guest with no PV drivers. Currently support XEN HVM, KVM, HYPER_V and VMWARE guest. + xen.event_eoi_delay= [XEN] + How long to delay EOI handling in case of event + storms (jiffies). Default is 10. + + xen.event_loop_timeout= [XEN] + After which time (jiffies) the event handling loop + should start to delay EOI handling. Default is 2. + nopvspin [X86,XEN,KVM] + Disables the qspinlock slow path using PV optimizations + which allow the hypervisor to 'idle' the guest on lock + contention. + xirc2ps_cs= [NET,PCMCIA] Format: ,,,,,[,[,[,]]] diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst index e70b365dbc60..311cd7cc2b75 100644 --- a/Documentation/admin-guide/pm/cpuidle.rst +++ b/Documentation/admin-guide/pm/cpuidle.rst @@ -506,6 +506,9 @@ object corresponding to it, as follows: ``disable`` Whether or not this idle state is disabled. +``default_status`` + The default status of this state, "enabled" or "disabled". + ``latency`` Exit latency of the idle state in microseconds. diff --git a/Documentation/admin-guide/pm/intel_idle.rst b/Documentation/admin-guide/pm/intel_idle.rst new file mode 100644 index 000000000000..afbf778035f8 --- /dev/null +++ b/Documentation/admin-guide/pm/intel_idle.rst @@ -0,0 +1,246 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: + +============================================== +``intel_idle`` CPU Idle Time Management Driver +============================================== + +:Copyright: |copy| 2020 Intel Corporation + +:Author: Rafael J. Wysocki + + +General Information +=================== + +``intel_idle`` is a part of the +:doc:`CPU idle time management subsystem ` in the Linux kernel +(``CPUIdle``). It is the default CPU idle time management driver for the +Nehalem and later generations of Intel processors, but the level of support for +a particular processor model in it depends on whether or not it recognizes that +processor model and may also depend on information coming from the platform +firmware. [To understand ``intel_idle`` it is necessary to know how ``CPUIdle`` +works in general, so this is the time to get familiar with :doc:`cpuidle` if you +have not done that yet.] + +``intel_idle`` uses the ``MWAIT`` instruction to inform the processor that the +logical CPU executing it is idle and so it may be possible to put some of the +processor's functional blocks into low-power states. That instruction takes two +arguments (passed in the ``EAX`` and ``ECX`` registers of the target CPU), the +first of which, referred to as a *hint*, can be used by the processor to +determine what can be done (for details refer to Intel Software Developer’s +Manual [1]_). Accordingly, ``intel_idle`` refuses to work with processors in +which the support for the ``MWAIT`` instruction has been disabled (for example, +via the platform firmware configuration menu) or which do not support that +instruction at all. + +``intel_idle`` is not modular, so it cannot be unloaded, which means that the +only way to pass early-configuration-time parameters to it is via the kernel +command line. + + +.. _intel-idle-enumeration-of-states: + +Enumeration of Idle States +========================== + +Each ``MWAIT`` hint value is interpreted by the processor as a license to +reconfigure itself in a certain way in order to save energy. The processor +configurations (with reduced power draw) resulting from that are referred to +as C-states (in the ACPI terminology) or idle states. The list of meaningful +``MWAIT`` hint values and idle states (i.e. low-power configurations of the +processor) corresponding to them depends on the processor model and it may also +depend on the configuration of the platform. + +In order to create a list of available idle states required by the ``CPUIdle`` +subsystem (see :ref:`idle-states-representation` in :doc:`cpuidle`), +``intel_idle`` can use two sources of information: static tables of idle states +for different processor models included in the driver itself and the ACPI tables +of the system. The former are always used if the processor model at hand is +recognized by ``intel_idle`` and the latter are used if that is required for +the given processor model (which is the case for all server processor models +recognized by ``intel_idle``) or if the processor model is not recognized. + +If the ACPI tables are going to be used for building the list of available idle +states, ``intel_idle`` first looks for a ``_CST`` object under one of the ACPI +objects corresponding to the CPUs in the system (refer to the ACPI specification +[2]_ for the description of ``_CST`` and its output package). Because the +``CPUIdle`` subsystem expects that the list of idle states supplied by the +driver will be suitable for all of the CPUs handled by it and ``intel_idle`` is +registered as the ``CPUIdle`` driver for all of the CPUs in the system, the +driver looks for the first ``_CST`` object returning at least one valid idle +state description and such that all of the idle states included in its return +package are of the FFH (Functional Fixed Hardware) type, which means that the +``MWAIT`` instruction is expected to be used to tell the processor that it can +enter one of them. The return package of that ``_CST`` is then assumed to be +applicable to all of the other CPUs in the system and the idle state +descriptions extracted from it are stored in a preliminary list of idle states +coming from the ACPI tables. [This step is skipped if ``intel_idle`` is +configured to ignore the ACPI tables; see `below `_.] + +Next, the first (index 0) entry in the list of available idle states is +initialized to represent a "polling idle state" (a pseudo-idle state in which +the target CPU continuously fetches and executes instructions), and the +subsequent (real) idle state entries are populated as follows. + +If the processor model at hand is recognized by ``intel_idle``, there is a +(static) table of idle state descriptions for it in the driver. In that case, +the "internal" table is the primary source of information on idle states and the +information from it is copied to the final list of available idle states. If +using the ACPI tables for the enumeration of idle states is not required +(depending on the processor model), all of the listed idle state are enabled by +default (so all of them will be taken into consideration by ``CPUIdle`` +governors during CPU idle state selection). Otherwise, some of the listed idle +states may not be enabled by default if there are no matching entries in the +preliminary list of idle states coming from the ACPI tables. In that case user +space still can enable them later (on a per-CPU basis) with the help of +the ``disable`` idle state attribute in ``sysfs`` (see +:ref:`idle-states-representation` in :doc:`cpuidle`). This basically means that +the idle states "known" to the driver may not be enabled by default if they have +not been exposed by the platform firmware (through the ACPI tables). + +If the given processor model is not recognized by ``intel_idle``, but it +supports ``MWAIT``, the preliminary list of idle states coming from the ACPI +tables is used for building the final list that will be supplied to the +``CPUIdle`` core during driver registration. For each idle state in that list, +the description, ``MWAIT`` hint and exit latency are copied to the corresponding +entry in the final list of idle states. The name of the idle state represented +by it (to be returned by the ``name`` idle state attribute in ``sysfs``) is +"CX_ACPI", where X is the index of that idle state in the final list (note that +the minimum value of X is 1, because 0 is reserved for the "polling" state), and +its target residency is based on the exit latency value. Specifically, for +C1-type idle states the exit latency value is also used as the target residency +(for compatibility with the majority of the "internal" tables of idle states for +various processor models recognized by ``intel_idle``) and for the other idle +state types (C2 and C3) the target residency value is 3 times the exit latency +(again, that is because it reflects the target residency to exit latency ratio +in the majority of cases for the processor models recognized by ``intel_idle``). +All of the idle states in the final list are enabled by default in this case. + + +.. _intel-idle-initialization: + +Initialization +============== + +The initialization of ``intel_idle`` starts with checking if the kernel command +line options forbid the use of the ``MWAIT`` instruction. If that is the case, +an error code is returned right away. + +The next step is to check whether or not the processor model is known to the +driver, which determines the idle states enumeration method (see +`above `_), and whether or not the processor +supports ``MWAIT`` (the initialization fails if that is not the case). Then, +the ``MWAIT`` support in the processor is enumerated through ``CPUID`` and the +driver initialization fails if the level of support is not as expected (for +example, if the total number of ``MWAIT`` substates returned is 0). + +Next, if the driver is not configured to ignore the ACPI tables (see +`below `_), the idle states information provided by the +platform firmware is extracted from them. + +Then, ``CPUIdle`` device objects are allocated for all CPUs and the list of +available idle states is created as explained +`above `_. + +Finally, ``intel_idle`` is registered with the help of cpuidle_register_driver() +as the ``CPUIdle`` driver for all CPUs in the system and a CPU online callback +for configuring individual CPUs is registered via cpuhp_setup_state(), which +(among other things) causes the callback routine to be invoked for all of the +CPUs present in the system at that time (each CPU executes its own instance of +the callback routine). That routine registers a ``CPUIdle`` device for the CPU +running it (which enables the ``CPUIdle`` subsystem to operate that CPU) and +optionally performs some CPU-specific initialization actions that may be +required for the given processor model. + + +.. _intel-idle-parameters: + +Kernel Command Line Options and Module Parameters +================================================= + +The *x86* architecture support code recognizes three kernel command line +options related to CPU idle time management: ``idle=poll``, ``idle=halt``, +and ``idle=nomwait``. If any of them is present in the kernel command line, the +``MWAIT`` instruction is not allowed to be used, so the initialization of +``intel_idle`` will fail. + +Apart from that there are two module parameters recognized by ``intel_idle`` +itself that can be set via the kernel command line (they cannot be updated via +sysfs, so that is the only way to change their values). + +The ``max_cstate`` parameter value is the maximum idle state index in the list +of idle states supplied to the ``CPUIdle`` core during the registration of the +driver. It is also the maximum number of regular (non-polling) idle states that +can be used by ``intel_idle``, so the enumeration of idle states is terminated +after finding that number of usable idle states (the other idle states that +potentially might have been used if ``max_cstate`` had been greater are not +taken into consideration at all). Setting ``max_cstate`` can prevent +``intel_idle`` from exposing idle states that are regarded as "too deep" for +some reason to the ``CPUIdle`` core, but it does so by making them effectively +invisible until the system is shut down and started again which may not always +be desirable. In practice, it is only really necessary to do that if the idle +states in question cannot be enabled during system startup, because in the +working state of the system the CPU power management quality of service (PM +QoS) feature can be used to prevent ``CPUIdle`` from touching those idle states +even if they have been enumerated (see :ref:`cpu-pm-qos` in :doc:`cpuidle`). +Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail. + +The ``noacpi`` module parameter (which is recognized by ``intel_idle`` if the +kernel has been configured with ACPI support), can be set to make the driver +ignore the system's ACPI tables entirely (it is unset by default). + + +.. _intel-idle-core-and-package-idle-states: + +Core and Package Levels of Idle States +====================================== + +Typically, in a processor supporting the ``MWAIT`` instruction there are (at +least) two levels of idle states (or C-states). One level, referred to as +"core C-states", covers individual cores in the processor, whereas the other +level, referred to as "package C-states", covers the entire processor package +and it may also involve other components of the system (GPUs, memory +controllers, I/O hubs etc.). + +Some of the ``MWAIT`` hint values allow the processor to use core C-states only +(most importantly, that is the case for the ``MWAIT`` hint value corresponding +to the ``C1`` idle state), but the majority of them give it a license to put +the target core (i.e. the core containing the logical CPU executing ``MWAIT`` +with the given hint value) into a specific core C-state and then (if possible) +to enter a specific package C-state at the deeper level. For example, the +``MWAIT`` hint value representing the ``C3`` idle state allows the processor to +put the target core into the low-power state referred to as "core ``C3``" (or +``CC3``), which happens if all of the logical CPUs (SMT siblings) in that core +have executed ``MWAIT`` with the ``C3`` hint value (or with a hint value +representing a deeper idle state), and in addition to that (in the majority of +cases) it gives the processor a license to put the entire package (possibly +including some non-CPU components such as a GPU or a memory controller) into the +low-power state referred to as "package ``C3``" (or ``PC3``), which happens if +all of the cores have gone into the ``CC3`` state and (possibly) some additional +conditions are satisfied (for instance, if the GPU is covered by ``PC3``, it may +be required to be in a certain GPU-specific low-power state for ``PC3`` to be +reachable). + +As a rule, there is no simple way to make the processor use core C-states only +if the conditions for entering the corresponding package C-states are met, so +the logical CPU executing ``MWAIT`` with a hint value that is not core-level +only (like for ``C1``) must always assume that this may cause the processor to +enter a package C-state. [That is why the exit latency and target residency +values corresponding to the majority of ``MWAIT`` hint values in the "internal" +tables of idle states in ``intel_idle`` reflect the properties of package +C-states.] If using package C-states is not desirable at all, either +:ref:`PM QoS ` or the ``max_cstate`` module parameter of +``intel_idle`` described `above `_ must be used to +restrict the range of permissible idle states to the ones with core-level only +``MWAIT`` hint values (like ``C1``). + + +References +========== + +.. [1] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 2B*, + https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-2b-manual.html + +.. [2] *Advanced Configuration and Power Interface (ACPI) Specification*, + https://uefi.org/specifications diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst index fc298eb1234b..88f717e59a42 100644 --- a/Documentation/admin-guide/pm/working-state.rst +++ b/Documentation/admin-guide/pm/working-state.rst @@ -8,6 +8,7 @@ Working-State Power Management :maxdepth: 2 cpuidle + intel_idle cpufreq intel_pstate intel_epb diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 5a09661330fc..13f595e6fe18 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -52,6 +52,9 @@ stable kernels. | Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ +| Ampere | Altra | #82288 | ALTRA_ERRATUM_82288 | ++----------------+-----------------+-----------------+-----------------------------+ ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | @@ -88,6 +91,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst new file mode 100644 index 000000000000..29ccd6e61fe5 --- /dev/null +++ b/Documentation/asm-annotations.rst @@ -0,0 +1,216 @@ +Assembler Annotations +===================== + +Copyright (c) 2017-2019 Jiri Slaby + +This document describes the new macros for annotation of data and code in +assembly. In particular, it contains information about ``SYM_FUNC_START``, +``SYM_FUNC_END``, ``SYM_CODE_START``, and similar. + +Rationale +--------- +Some code like entries, trampolines, or boot code needs to be written in +assembly. The same as in C, such code is grouped into functions and +accompanied with data. Standard assemblers do not force users into precisely +marking these pieces as code, data, or even specifying their length. +Nevertheless, assemblers provide developers with such annotations to aid +debuggers throughout assembly. On top of that, developers also want to mark +some functions as *global* in order to be visible outside of their translation +units. + +Over time, the Linux kernel has adopted macros from various projects (like +``binutils``) to facilitate such annotations. So for historic reasons, +developers have been using ``ENTRY``, ``END``, ``ENDPROC``, and other +annotations in assembly. Due to the lack of their documentation, the macros +are used in rather wrong contexts at some locations. Clearly, ``ENTRY`` was +intended to denote the beginning of global symbols (be it data or code). +``END`` used to mark the end of data or end of special functions with +*non-standard* calling convention. In contrast, ``ENDPROC`` should annotate +only ends of *standard* functions. + +When these macros are used correctly, they help assemblers generate a nice +object with both sizes and types set correctly. For example, the result of +``arch/x86/lib/putuser.S``:: + + Num: Value Size Type Bind Vis Ndx Name + 25: 0000000000000000 33 FUNC GLOBAL DEFAULT 1 __put_user_1 + 29: 0000000000000030 37 FUNC GLOBAL DEFAULT 1 __put_user_2 + 32: 0000000000000060 36 FUNC GLOBAL DEFAULT 1 __put_user_4 + 35: 0000000000000090 37 FUNC GLOBAL DEFAULT 1 __put_user_8 + +This is not only important for debugging purposes. When there are properly +annotated objects like this, tools can be run on them to generate more useful +information. In particular, on properly annotated objects, ``objtool`` can be +run to check and fix the object if needed. Currently, ``objtool`` can report +missing frame pointer setup/destruction in functions. It can also +automatically generate annotations for :doc:`ORC unwinder ` +for most code. Both of these are especially important to support reliable +stack traces which are in turn necessary for :doc:`Kernel live patching +`. + +Caveat and Discussion +--------------------- +As one might realize, there were only three macros previously. That is indeed +insufficient to cover all the combinations of cases: + +* standard/non-standard function +* code/data +* global/local symbol + +There was a discussion_ and instead of extending the current ``ENTRY/END*`` +macros, it was decided that brand new macros should be introduced instead:: + + So how about using macro names that actually show the purpose, instead + of importing all the crappy, historic, essentially randomly chosen + debug symbol macro names from the binutils and older kernels? + +.. _discussion: https://lkml.kernel.org/r/20170217104757.28588-1-jslaby@suse.cz + +Macros Description +------------------ + +The new macros are prefixed with the ``SYM_`` prefix and can be divided into +three main groups: + +1. ``SYM_FUNC_*`` -- to annotate C-like functions. This means functions with + standard C calling conventions, i.e. the stack contains a return address at + the predefined place and a return from the function can happen in a + standard way. When frame pointers are enabled, save/restore of frame + pointer shall happen at the start/end of a function, respectively, too. + + Checking tools like ``objtool`` should ensure such marked functions conform + to these rules. The tools can also easily annotate these functions with + debugging information (like *ORC data*) automatically. + +2. ``SYM_CODE_*`` -- special functions called with special stack. Be it + interrupt handlers with special stack content, trampolines, or startup + functions. + + Checking tools mostly ignore checking of these functions. But some debug + information still can be generated automatically. For correct debug data, + this code needs hints like ``UNWIND_HINT_REGS`` provided by developers. + +3. ``SYM_DATA*`` -- obviously data belonging to ``.data`` sections and not to + ``.text``. Data do not contain instructions, so they have to be treated + specially by the tools: they should not treat the bytes as instructions, + nor assign any debug information to them. + +Instruction Macros +~~~~~~~~~~~~~~~~~~ +This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above. + +* ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the + most frequent markings**. They are used for functions with standard calling + conventions -- global and local. Like in C, they both align the functions to + architecture specific ``__ALIGN`` bytes. There are also ``_NOALIGN`` variants + for special cases where developers do not want this implicit alignment. + + ``SYM_FUNC_START_WEAK`` and ``SYM_FUNC_START_WEAK_NOALIGN`` markings are + also offered as an assembler counterpart to the *weak* attribute known from + C. + + All of these **shall** be coupled with ``SYM_FUNC_END``. First, it marks + the sequence of instructions as a function and computes its size to the + generated object file. Second, it also eases checking and processing such + object files as the tools can trivially find exact function boundaries. + + So in most cases, developers should write something like in the following + example, having some asm instructions in between the macros, of course:: + + SYM_FUNC_START(function_hook) + ... asm insns ... + SYM_FUNC_END(function_hook) + + In fact, this kind of annotation corresponds to the now deprecated ``ENTRY`` + and ``ENDPROC`` macros. + +* ``SYM_FUNC_START_ALIAS`` and ``SYM_FUNC_START_LOCAL_ALIAS`` serve for those + who decided to have two or more names for one function. The typical use is:: + + SYM_FUNC_START_ALIAS(__memset) + SYM_FUNC_START(memset) + ... asm insns ... + SYM_FUNC_END(memset) + SYM_FUNC_END_ALIAS(__memset) + + In this example, one can call ``__memset`` or ``memset`` with the same + result, except the debug information for the instructions is generated to + the object file only once -- for the non-``ALIAS`` case. + +* ``SYM_CODE_START`` and ``SYM_CODE_START_LOCAL`` should be used only in + special cases -- if you know what you are doing. This is used exclusively + for interrupt handlers and similar where the calling convention is not the C + one. ``_NOALIGN`` variants exist too. The use is the same as for the ``FUNC`` + category above:: + + SYM_CODE_START_LOCAL(bad_put_user) + ... asm insns ... + SYM_CODE_END(bad_put_user) + + Again, every ``SYM_CODE_START*`` **shall** be coupled by ``SYM_CODE_END``. + + To some extent, this category corresponds to deprecated ``ENTRY`` and + ``END``. Except ``END`` had several other meanings too. + +* ``SYM_INNER_LABEL*`` is used to denote a label inside some + ``SYM_{CODE,FUNC}_START`` and ``SYM_{CODE,FUNC}_END``. They are very similar + to C labels, except they can be made global. An example of use:: + + SYM_CODE_START(ftrace_caller) + /* save_mcount_regs fills in first two parameters */ + ... + + SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL) + /* Load the ftrace_ops into the 3rd parameter */ + ... + + SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) + call ftrace_stub + ... + retq + SYM_CODE_END(ftrace_caller) + +Data Macros +~~~~~~~~~~~ +Similar to instructions, there is a couple of macros to describe data in the +assembly. + +* ``SYM_DATA_START`` and ``SYM_DATA_START_LOCAL`` mark the start of some data + and shall be used in conjunction with either ``SYM_DATA_END``, or + ``SYM_DATA_END_LABEL``. The latter adds also a label to the end, so that + people can use ``lstack`` and (local) ``lstack_end`` in the following + example:: + + SYM_DATA_START_LOCAL(lstack) + .skip 4096 + SYM_DATA_END_LABEL(lstack, SYM_L_LOCAL, lstack_end) + +* ``SYM_DATA`` and ``SYM_DATA_LOCAL`` are variants for simple, mostly one-line + data:: + + SYM_DATA(HEAP, .long rm_heap) + SYM_DATA(heap_end, .long rm_stack) + + In the end, they expand to ``SYM_DATA_START`` with ``SYM_DATA_END`` + internally. + +Support Macros +~~~~~~~~~~~~~~ +All the above reduce themselves to some invocation of ``SYM_START``, +``SYM_END``, or ``SYM_ENTRY`` at last. Normally, developers should avoid using +these. + +Further, in the above examples, one could see ``SYM_L_LOCAL``. There are also +``SYM_L_GLOBAL`` and ``SYM_L_WEAK``. All are intended to denote linkage of a +symbol marked by them. They are used either in ``_LABEL`` variants of the +earlier macros, or in ``SYM_START``. + + +Overriding Macros +~~~~~~~~~~~~~~~~~ +Architecture can also override any of the macros in their own +``asm/linkage.h``, including macros specifying the type of a symbol +(``SYM_T_FUNC``, ``SYM_T_OBJECT``, and ``SYM_T_NONE``). As every macro +described in this file is surrounded by ``#ifdef`` + ``#endif``, it is enough +to define the macros differently in the aforementioned architecture-dependent +header. diff --git a/Documentation/bpf/prog_flow_dissector.rst b/Documentation/bpf/prog_flow_dissector.rst index a78bf036cadd..4d86780ab0f1 100644 --- a/Documentation/bpf/prog_flow_dissector.rst +++ b/Documentation/bpf/prog_flow_dissector.rst @@ -142,3 +142,6 @@ BPF flow dissector doesn't support exporting all the metadata that in-kernel C-based implementation can export. Notable example is single VLAN (802.1Q) and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys`` for a set of information that's currently can be exported from the BPF context. + +When BPF flow dissector is attached to the root network namespace (machine-wide +policy), users can't override it in their child network namespaces. diff --git a/Documentation/bpf/ringbuf.rst b/Documentation/bpf/ringbuf.rst new file mode 100644 index 000000000000..75f943f0009d --- /dev/null +++ b/Documentation/bpf/ringbuf.rst @@ -0,0 +1,209 @@ +=============== +BPF ring buffer +=============== + +This document describes BPF ring buffer design, API, and implementation details. + +.. contents:: + :local: + :depth: 2 + +Motivation +---------- + +There are two distinctive motivators for this work, which are not satisfied by +existing perf buffer, which prompted creation of a new ring buffer +implementation. + +- more efficient memory utilization by sharing ring buffer across CPUs; +- preserving ordering of events that happen sequentially in time, even across + multiple CPUs (e.g., fork/exec/exit events for a task). + +These two problems are independent, but perf buffer fails to satisfy both. +Both are a result of a choice to have per-CPU perf ring buffer. Both can be +also solved by having an MPSC implementation of ring buffer. The ordering +problem could technically be solved for perf buffer with some in-kernel +counting, but given the first one requires an MPSC buffer, the same solution +would solve the second problem automatically. + +Semantics and APIs +------------------ + +Single ring buffer is presented to BPF programs as an instance of BPF map of +type ``BPF_MAP_TYPE_RINGBUF``. Two other alternatives considered, but +ultimately rejected. + +One way would be to, similar to ``BPF_MAP_TYPE_PERF_EVENT_ARRAY``, make +``BPF_MAP_TYPE_RINGBUF`` could represent an array of ring buffers, but not +enforce "same CPU only" rule. This would be more familiar interface compatible +with existing perf buffer use in BPF, but would fail if application needed more +advanced logic to lookup ring buffer by arbitrary key. +``BPF_MAP_TYPE_HASH_OF_MAPS`` addresses this with current approach. +Additionally, given the performance of BPF ringbuf, many use cases would just +opt into a simple single ring buffer shared among all CPUs, for which current +approach would be an overkill. + +Another approach could introduce a new concept, alongside BPF map, to represent +generic "container" object, which doesn't necessarily have key/value interface +with lookup/update/delete operations. This approach would add a lot of extra +infrastructure that has to be built for observability and verifier support. It +would also add another concept that BPF developers would have to familiarize +themselves with, new syntax in libbpf, etc. But then would really provide no +additional benefits over the approach of using a map. ``BPF_MAP_TYPE_RINGBUF`` +doesn't support lookup/update/delete operations, but so doesn't few other map +types (e.g., queue and stack; array doesn't support delete, etc). + +The approach chosen has an advantage of re-using existing BPF map +infrastructure (introspection APIs in kernel, libbpf support, etc), being +familiar concept (no need to teach users a new type of object in BPF program), +and utilizing existing tooling (bpftool). For common scenario of using a single +ring buffer for all CPUs, it's as simple and straightforward, as would be with +a dedicated "container" object. On the other hand, by being a map, it can be +combined with ``ARRAY_OF_MAPS`` and ``HASH_OF_MAPS`` map-in-maps to implement +a wide variety of topologies, from one ring buffer for each CPU (e.g., as +a replacement for perf buffer use cases), to a complicated application +hashing/sharding of ring buffers (e.g., having a small pool of ring buffers +with hashed task's tgid being a look up key to preserve order, but reduce +contention). + +Key and value sizes are enforced to be zero. ``max_entries`` is used to specify +the size of ring buffer and has to be a power of 2 value. + +There are a bunch of similarities between perf buffer +(``BPF_MAP_TYPE_PERF_EVENT_ARRAY``) and new BPF ring buffer semantics: + +- variable-length records; +- if there is no more space left in ring buffer, reservation fails, no + blocking; +- memory-mappable data area for user-space applications for ease of + consumption and high performance; +- epoll notifications for new incoming data; +- but still the ability to do busy polling for new data to achieve the + lowest latency, if necessary. + +BPF ringbuf provides two sets of APIs to BPF programs: + +- ``bpf_ringbuf_output()`` allows to *copy* data from one place to a ring + buffer, similarly to ``bpf_perf_event_output()``; +- ``bpf_ringbuf_reserve()``/``bpf_ringbuf_commit()``/``bpf_ringbuf_discard()`` + APIs split the whole process into two steps. First, a fixed amount of space + is reserved. If successful, a pointer to a data inside ring buffer data + area is returned, which BPF programs can use similarly to a data inside + array/hash maps. Once ready, this piece of memory is either committed or + discarded. Discard is similar to commit, but makes consumer ignore the + record. + +``bpf_ringbuf_output()`` has disadvantage of incurring extra memory copy, +because record has to be prepared in some other place first. But it allows to +submit records of the length that's not known to verifier beforehand. It also +closely matches ``bpf_perf_event_output()``, so will simplify migration +significantly. + +``bpf_ringbuf_reserve()`` avoids the extra copy of memory by providing a memory +pointer directly to ring buffer memory. In a lot of cases records are larger +than BPF stack space allows, so many programs have use extra per-CPU array as +a temporary heap for preparing sample. bpf_ringbuf_reserve() avoid this needs +completely. But in exchange, it only allows a known constant size of memory to +be reserved, such that verifier can verify that BPF program can't access memory +outside its reserved record space. bpf_ringbuf_output(), while slightly slower +due to extra memory copy, covers some use cases that are not suitable for +``bpf_ringbuf_reserve()``. + +The difference between commit and discard is very small. Discard just marks +a record as discarded, and such records are supposed to be ignored by consumer +code. Discard is useful for some advanced use-cases, such as ensuring +all-or-nothing multi-record submission, or emulating temporary +``malloc()``/``free()`` within single BPF program invocation. + +Each reserved record is tracked by verifier through existing +reference-tracking logic, similar to socket ref-tracking. It is thus +impossible to reserve a record, but forget to submit (or discard) it. + +``bpf_ringbuf_query()`` helper allows to query various properties of ring +buffer. Currently 4 are supported: + +- ``BPF_RB_AVAIL_DATA`` returns amount of unconsumed data in ring buffer; +- ``BPF_RB_RING_SIZE`` returns the size of ring buffer; +- ``BPF_RB_CONS_POS``/``BPF_RB_PROD_POS`` returns current logical possition + of consumer/producer, respectively. + +Returned values are momentarily snapshots of ring buffer state and could be +off by the time helper returns, so this should be used only for +debugging/reporting reasons or for implementing various heuristics, that take +into account highly-changeable nature of some of those characteristics. + +One such heuristic might involve more fine-grained control over poll/epoll +notifications about new data availability in ring buffer. Together with +``BPF_RB_NO_WAKEUP``/``BPF_RB_FORCE_WAKEUP`` flags for output/commit/discard +helpers, it allows BPF program a high degree of control and, e.g., more +efficient batched notifications. Default self-balancing strategy, though, +should be adequate for most applications and will work reliable and efficiently +already. + +Design and Implementation +------------------------- + +This reserve/commit schema allows a natural way for multiple producers, either +on different CPUs or even on the same CPU/in the same BPF program, to reserve +independent records and work with them without blocking other producers. This +means that if BPF program was interruped by another BPF program sharing the +same ring buffer, they will both get a record reserved (provided there is +enough space left) and can work with it and submit it independently. This +applies to NMI context as well, except that due to using a spinlock during +reservation, in NMI context, ``bpf_ringbuf_reserve()`` might fail to get +a lock, in which case reservation will fail even if ring buffer is not full. + +The ring buffer itself internally is implemented as a power-of-2 sized +circular buffer, with two logical and ever-increasing counters (which might +wrap around on 32-bit architectures, that's not a problem): + +- consumer counter shows up to which logical position consumer consumed the + data; +- producer counter denotes amount of data reserved by all producers. + +Each time a record is reserved, producer that "owns" the record will +successfully advance producer counter. At that point, data is still not yet +ready to be consumed, though. Each record has 8 byte header, which contains the +length of reserved record, as well as two extra bits: busy bit to denote that +record is still being worked on, and discard bit, which might be set at commit +time if record is discarded. In the latter case, consumer is supposed to skip +the record and move on to the next one. Record header also encodes record's +relative offset from the beginning of ring buffer data area (in pages). This +allows ``bpf_ringbuf_commit()``/``bpf_ringbuf_discard()`` to accept only the +pointer to the record itself, without requiring also the pointer to ring buffer +itself. Ring buffer memory location will be restored from record metadata +header. This significantly simplifies verifier, as well as improving API +usability. + +Producer counter increments are serialized under spinlock, so there is +a strict ordering between reservations. Commits, on the other hand, are +completely lockless and independent. All records become available to consumer +in the order of reservations, but only after all previous records where +already committed. It is thus possible for slow producers to temporarily hold +off submitted records, that were reserved later. + +Reservation/commit/consumer protocol is verified by litmus tests in +Documentation/litmus_tests/bpf-rb/_. + +One interesting implementation bit, that significantly simplifies (and thus +speeds up as well) implementation of both producers and consumers is how data +area is mapped twice contiguously back-to-back in the virtual memory. This +allows to not take any special measures for samples that have to wrap around +at the end of the circular buffer data area, because the next page after the +last data page would be first data page again, and thus the sample will still +appear completely contiguous in virtual memory. See comment and a simple ASCII +diagram showing this visually in ``bpf_ringbuf_area_alloc()``. + +Another feature that distinguishes BPF ringbuf from perf ring buffer is +a self-pacing notifications of new data being availability. +``bpf_ringbuf_commit()`` implementation will send a notification of new record +being available after commit only if consumer has already caught up right up to +the record being committed. If not, consumer still has to catch up and thus +will see new data anyways without needing an extra poll notification. +Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbuf.c_) show that +this allows to achieve a very high throughput without having to resort to +tricks like "notify only every Nth sample", which are necessary with perf +buffer. For extreme cases, when BPF program wants more manual control of +notifications, commit/discard/output helpers accept ``BPF_RB_NO_WAKEUP`` and +``BPF_RB_FORCE_WAKEUP`` flags, which give full control over notifications of +data availability, but require extra caution and diligence in using this API. diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt index b6a7e7397b8b..b944fe067188 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt @@ -16,6 +16,9 @@ Required properties: Documentation/devicetree/bindings/graph.txt. This port should be connected to the input port of an attached HDMI or LVDS encoder chip. +Optional properties: +- pinctrl-names: Contain "default" and "sleep". + Example: dpi0: dpi@1401d000 { @@ -26,6 +29,9 @@ dpi0: dpi@1401d000 { <&mmsys CLK_MM_DPI_ENGINE>, <&apmixedsys CLK_APMIXED_TVDPLL>; clock-names = "pixel", "engine", "pll"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&dpi_pin_func>; + pinctrl-1 = <&dpi_pin_idle>; port { dpi0_out: endpoint { diff --git a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt index d4d83916c09d..be329ea4794f 100644 --- a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt +++ b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt @@ -20,8 +20,9 @@ Required properties: - gpio-controller : Marks the device node as a GPIO controller - interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt - interrupt-controller : Mark the GPIO controller as an interrupt-controller -- ngpios : number of GPIO lines, see gpio.txt - (should be multiple of 8, up to 80 pins) +- ngpios : number of *hardware* GPIO lines, see gpio.txt. This will expose + 2 software GPIOs per hardware GPIO: one for hardware input, one for hardware + output. Up to 80 pins, must be a multiple of 8. - clocks : A phandle to the APB clock for SGPM clock division - bus-frequency : SGPM CLK frequency diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt index c82794002595..89647d714387 100644 --- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt +++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt @@ -21,7 +21,7 @@ controller state. The mux controller state is described in Example: mux: mux-controller { - compatible = "mux-gpio"; + compatible = "gpio-mux"; #mux-control-cells = <0>; mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>, diff --git a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt index 4438432bfe9b..ad76edccf881 100644 --- a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt +++ b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt @@ -87,7 +87,7 @@ Example: ranges; /* APU<->RPU0 IPI mailbox controller */ - ipi_mailbox_rpu0: mailbox@ff90400 { + ipi_mailbox_rpu0: mailbox@ff990400 { reg = <0xff990400 0x20>, <0xff990420 0x20>, <0xff990080 0x20>, diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt index 8a532f4453f2..09aecec47003 100644 --- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt +++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt @@ -49,6 +49,8 @@ Optional properties: error caused by stop clock(fifo full) Valid range = [0:0x7]. if not present, default value is 0. applied to compatible "mediatek,mt2701-mmc". +- resets: Phandle and reset specifier pair to softreset line of MSDC IP. +- reset-names: Should be "hrst". Examples: mmc0: mmc@11230000 { diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt index 2cf3affa1be7..96c0b1440c9c 100644 --- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt @@ -15,8 +15,15 @@ Required properties: - "nvidia,tegra210-sdhci": for Tegra210 - "nvidia,tegra186-sdhci": for Tegra186 - "nvidia,tegra194-sdhci": for Tegra194 -- clocks : Must contain one entry, for the module clock. - See ../clocks/clock-bindings.txt for details. +- clocks: For Tegra210, Tegra186 and Tegra194 must contain two entries. + One for the module clock and one for the timeout clock. + For all other Tegra devices, must contain a single entry for + the module clock. See ../clocks/clock-bindings.txt for details. +- clock-names: For Tegra210, Tegra186 and Tegra194 must contain the + strings 'sdhci' and 'tmclk' to represent the module and + the timeout clocks, respectively. + For all other Tegra devices must contain the string 'sdhci' + to represent the module clock. - resets : Must contain an entry for each entry in reset-names. See ../reset/reset.txt for details. - reset-names : Must include the following entries: @@ -99,7 +106,7 @@ Optional properties for Tegra210, Tegra186 and Tegra194: Example: sdhci@700b0000 { - compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; + compatible = "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0000 0x0 0x200>; interrupts = ; clocks = <&tegra_car TEGRA210_CLK_SDMMC1>; @@ -115,3 +122,22 @@ sdhci@700b0000 { nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>; status = "disabled"; }; + +sdhci@700b0000 { + compatible = "nvidia,tegra210-sdhci"; + reg = <0x0 0x700b0000 0x0 0x200>; + interrupts = ; + clocks = <&tegra_car TEGRA210_CLK_SDMMC1>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; + resets = <&tegra_car 14>; + reset-names = "sdhci"; + pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; + pinctrl-0 = <&sdmmc1_3v3>; + pinctrl-1 = <&sdmmc1_1v8>; + nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>; + nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>; + nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>; + nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>; + status = "disabled"; +}; diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt index b1ad6ee68e90..c51dd99dc0d3 100644 --- a/Documentation/devicetree/bindings/net/btusb.txt +++ b/Documentation/devicetree/bindings/net/btusb.txt @@ -38,7 +38,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt: compatible = "usb1286,204e"; reg = <1>; interrupt-parent = <&gpio0>; - interrupt-name = "wakeup"; + interrupt-names = "wakeup"; interrupts = <3 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt index 27e1b4cebfbd..9cb3560756d0 100644 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt @@ -33,7 +33,7 @@ tcan4x5x: tcan4x5x@0 { spi-max-frequency = <10000000>; bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; interrupt-parent = <&gpio1>; - interrupts = <14 GPIO_ACTIVE_LOW>; + interrupts = <14 IRQ_TYPE_LEVEL_LOW>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index 0e7c31794ae6..fcafce635ff0 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -51,7 +51,7 @@ properties: description: Reference to an nvmem node for the MAC address - nvmem-cells-names: + nvmem-cell-names: const: mac-address phy-connection-type: @@ -190,6 +190,11 @@ properties: Indicates that full-duplex is used. When absent, half duplex is assumed. + pause: + $ref: /schemas/types.yaml#definitions/flag + description: + Indicates that pause should be enabled. + asym-pause: $ref: /schemas/types.yaml#definitions/flag description: diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt index cfaf88998918..9e4dc510a40a 100644 --- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt +++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt @@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2): clock-frequency = <100000>; interrupt-parent = <&gpio1>; - interrupts = <29 GPIO_ACTIVE_HIGH>; + interrupts = <29 IRQ_TYPE_LEVEL_HIGH>; enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>; firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>; diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt index 92f399ec22b8..2bd82562ce8e 100644 --- a/Documentation/devicetree/bindings/net/nfc/pn544.txt +++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt @@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2): clock-frequency = <400000>; interrupt-parent = <&gpio1>; - interrupts = <17 GPIO_ACTIVE_HIGH>; + interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt index b739f92da58e..1f90eb39870b 100644 --- a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt +++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt @@ -118,7 +118,7 @@ Tegra194: -------- pcie@14180000 { - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + compatible = "nvidia,tegra194-pcie"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */ 0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */ diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt index 68cccc4653ba..367b58ce1bb9 100644 --- a/Documentation/devicetree/bindings/sound/wm8994.txt +++ b/Documentation/devicetree/bindings/sound/wm8994.txt @@ -14,9 +14,15 @@ Required properties: - #gpio-cells : Must be 2. The first cell is the pin number and the second cell is used to specify optional parameters (currently unused). - - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, - SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered - in Documentation/devicetree/bindings/regulator/regulator.txt + - power supplies for the device, as covered in + Documentation/devicetree/bindings/regulator/regulator.txt, depending + on compatible: + - for wlf,wm1811 and wlf,wm8958: + AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, + DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply + - for wlf,wm8994: + AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply, + SPKVDD1-supply, SPKVDD2-supply Optional properties: @@ -73,11 +79,11 @@ wm8994: codec@1a { lineout1-se; + AVDD1-supply = <®ulator>; AVDD2-supply = <®ulator>; CPVDD-supply = <®ulator>; - DBVDD1-supply = <®ulator>; - DBVDD2-supply = <®ulator>; - DBVDD3-supply = <®ulator>; + DBVDD-supply = <®ulator>; + DCVDD-supply = <®ulator>; SPKVDD1-supply = <®ulator>; SPKVDD2-supply = <®ulator>; }; diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt index 66780a47ad85..c977a3ba2f35 100644 --- a/Documentation/devicetree/bindings/usb/dwc3.txt +++ b/Documentation/devicetree/bindings/usb/dwc3.txt @@ -75,6 +75,8 @@ Optional properties: from P0 to P1/P2/P3 without delay. - snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check during HS transmit. + - snps,parkmode-disable-ss-quirk: when set, all SuperSpeed bus instances in + park mode are disabled. - snps,dis_metastability_quirk: when set, disable metastability workaround. CAUTION: use only if you are absolutely sure of it. - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst index 70e180e6b93d..9f3e5dc31184 100644 --- a/Documentation/driver-api/libata.rst +++ b/Documentation/driver-api/libata.rst @@ -250,7 +250,7 @@ High-level taskfile hooks :: - void (*qc_prep) (struct ata_queued_cmd *qc); + enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc); int (*qc_issue) (struct ata_queued_cmd *qc); diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt index 71b63c2b9841..a8f1a58e3692 100644 --- a/Documentation/filesystems/affs.txt +++ b/Documentation/filesystems/affs.txt @@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows: - R maps to r for user, group and others. On directories, R implies x. - - If both W and D are allowed, w will be set. + - W maps to w. - E maps to x. - - H and P are always retained and ignored under Linux. + - D is ignored. - - A is always reset when a file is written to. + - H, S and P are always retained and ignored under Linux. + + - A is cleared when a file is written to. User id and group id will be used unless set[gu]id are given as mount options. Since most of the Amiga file systems are single user systems @@ -111,11 +113,13 @@ Linux -> Amiga: The Linux rwxrwxrwx file mode is handled as follows: - - r permission will set R for user, group and others. + - r permission will allow R for user, group and others. - - w permission will set W and D for user, group and others. + - w permission will allow W for user, group and others. - - x permission of the user will set E for plain files. + - x permission of the user will allow E for plain files. + + - D will be allowed for user, group and others. - All other flags (suid, sgid, ...) are ignored and will not be retained. diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt index d412b236a9d6..7cf7143921a1 100644 --- a/Documentation/filesystems/seq_file.txt +++ b/Documentation/filesystems/seq_file.txt @@ -192,6 +192,12 @@ between the calls to start() and stop(), so holding a lock during that time is a reasonable thing to do. The seq_file code will also avoid taking any other locks while the iterator is active. +The iterater value returned by start() or next() is guaranteed to be +passed to a subsequent next() or stop() call. This allows resources +such as locks that were taken to be reliably released. There is *no* +guarantee that the iterator will be passed to show(), though in practice +it often will be. + Formatted output diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index ddf15b1b0d5a..33ec0a01450d 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -232,12 +232,10 @@ Other notes: is 4096. - show() methods should return the number of bytes printed into the - buffer. This is the return value of scnprintf(). + buffer. -- show() must not use snprintf() when formatting the value to be - returned to user space. If you can guarantee that an overflow - will never happen you can use sprintf() otherwise you must use - scnprintf(). +- show() should only use sysfs_emit() or sysfs_emit_at() when formatting + the value to be returned to user space. - store() should return the number of bytes used from the buffer. If the entire buffer has been used, just return the count argument. diff --git a/Documentation/index.rst b/Documentation/index.rst index b843e313d2f2..2ceab197246f 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -135,6 +135,14 @@ needed). mic/index scheduler/index +Architecture-agnostic documentation +----------------------------------- + +.. toctree:: + :maxdepth: 2 + + asm-annotations + Architecture-specific documentation ----------------------------------- diff --git a/Documentation/ioctl/ioctl-number.rst b/Documentation/ioctl/ioctl-number.rst index bef79cd4c6b4..d1702f9e7c39 100644 --- a/Documentation/ioctl/ioctl-number.rst +++ b/Documentation/ioctl/ioctl-number.rst @@ -321,6 +321,7 @@ Code Seq# Include File Comments 0xA3 90-9F linux/dtlk.h 0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem +0xA4 00-1F uapi/asm/sgx.h 0xAA 00-3F linux/uapi/linux/userfaultfd.h 0xAB 00-1F linux/nbd.h 0xAC 00-1F linux/raw.h diff --git a/Documentation/kbuild/index.rst b/Documentation/kbuild/index.rst index 0f144fad99a6..3882bd5f7728 100644 --- a/Documentation/kbuild/index.rst +++ b/Documentation/kbuild/index.rst @@ -19,6 +19,7 @@ Kernel Build System issues reproducible-builds + llvm .. only:: subproject and html diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst index f1e5dce86af7..852ccc551bb3 100644 --- a/Documentation/kbuild/kbuild.rst +++ b/Documentation/kbuild/kbuild.rst @@ -262,3 +262,8 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST These two variables allow to override the user@host string displayed during boot and in /proc/version. The default value is the output of the commands whoami and host, respectively. + +LLVM +---- +If this variable is set to 1, Kbuild will use Clang and LLVM utilities instead +of GCC and GNU binutils to build the kernel. diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst new file mode 100644 index 000000000000..c776b6eee969 --- /dev/null +++ b/Documentation/kbuild/llvm.rst @@ -0,0 +1,87 @@ +============================== +Building Linux with Clang/LLVM +============================== + +This document covers how to build the Linux kernel with Clang and LLVM +utilities. + +About +----- + +The Linux kernel has always traditionally been compiled with GNU toolchains +such as GCC and binutils. Ongoing work has allowed for `Clang +`_ and `LLVM `_ utilities to be +used as viable substitutes. Distributions such as `Android +`_, `ChromeOS +`_, and `OpenMandriva +`_ use Clang built kernels. `LLVM is a +collection of toolchain components implemented in terms of C++ objects +`_. Clang is a front-end to LLVM that +supports C and the GNU C extensions required by the kernel, and is pronounced +"klang," not "see-lang." + +Clang +----- + +The compiler used can be swapped out via `CC=` command line argument to `make`. +`CC=` should be set when selecting a config and during a build. + + make CC=clang defconfig + + make CC=clang + +Cross Compiling +--------------- + +A single Clang compiler binary will typically contain all supported backends, +which can help simplify cross compiling. + + ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang + +`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead +`CROSS_COMPILE` is used to set a command line flag: `--target `. For +example: + + clang --target aarch64-linux-gnu foo.c + +LLVM Utilities +-------------- + +LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1` +to enable them. + + make LLVM=1 + +They can be enabled individually. The full list of the parameters: + + make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ + OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\ + READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\ + HOSTLD=ld.lld + +Currently, the integrated assembler is disabled by default. You can pass +`LLVM_IAS=1` to enable it. + +Getting Help +------------ + +- `Website `_ +- `Mailing List `_: +- `Issue Tracker `_ +- IRC: #clangbuiltlinux on chat.freenode.net +- `Telegram `_: @ClangBuiltLinux +- `Wiki `_ +- `Beginner Bugs `_ + +Getting LLVM +------------- + +- http://releases.llvm.org/download.html +- https://github.com/llvm/llvm-project +- https://llvm.org/docs/GettingStarted.html +- https://llvm.org/docs/CMake.html +- https://apt.llvm.org/ +- https://www.archlinux.org/packages/extra/x86_64/llvm/ +- https://github.com/ClangBuiltLinux/tc-build +- https://github.com/ClangBuiltLinux/linux/wiki/Building-Clang-from-source +- https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86/ diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt index ca983328976b..f65b51523014 100644 --- a/Documentation/lzo.txt +++ b/Documentation/lzo.txt @@ -159,11 +159,15 @@ Byte sequences distance = 16384 + (H << 14) + D state = S (copy S literals after this block) End of stream is reached if distance == 16384 + In version 1 only, to prevent ambiguity with the RLE case when + ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the + compressor must not emit block copies where distance and length + meet these conditions. In version 1 only, this instruction is also used to encode a run of - zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. + zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. In this case, it is followed by a fourth byte, X. - run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4. + run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4 0 0 1 L L L L L (32..63) Copy of small block within 16kB distance (preferably less than 34B) diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst index e122bbe3d799..aabb08130354 100644 --- a/Documentation/media/uapi/v4l/colorspaces-defs.rst +++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst @@ -36,8 +36,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum :c:type:`v4l2_hsv_encoding` specifies which encoding is used. .. note:: The default R'G'B' quantization is full range for all - colorspaces except for BT.2020 which uses limited range R'G'B' - quantization. + colorspaces. HSV formats are always full range. .. tabularcolumns:: |p{6.7cm}|p{10.8cm}| @@ -169,8 +168,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum - Details * - ``V4L2_QUANTIZATION_DEFAULT`` - Use the default quantization encoding as defined by the - colorspace. This is always full range for R'G'B' (except for the - BT.2020 colorspace) and HSV. It is usually limited range for Y'CbCr. + colorspace. This is always full range for R'G'B' and HSV. + It is usually limited range for Y'CbCr. * - ``V4L2_QUANTIZATION_FULL_RANGE`` - Use the full range quantization encoding. I.e. the range [0…1] is mapped to [0…255] (with possible clipping to [1…254] to avoid the @@ -180,4 +179,4 @@ whole range, 0-255, dividing the angular value by 1.41. The enum * - ``V4L2_QUANTIZATION_LIM_RANGE`` - Use the limited range quantization encoding. I.e. the range [0…1] is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to - [16…240]. + [16…240]. Limited Range cannot be used with HSV. diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst index 8b0ba3668101..fd0cf57691d8 100644 --- a/Documentation/media/uapi/v4l/colorspaces-details.rst +++ b/Documentation/media/uapi/v4l/colorspaces-details.rst @@ -377,9 +377,8 @@ Colorspace BT.2020 (V4L2_COLORSPACE_BT2020) The :ref:`itu2020` standard defines the colorspace used by Ultra-high definition television (UHDTV). The default transfer function is ``V4L2_XFER_FUNC_709``. The default Y'CbCr encoding is -``V4L2_YCBCR_ENC_BT2020``. The default R'G'B' quantization is limited -range (!), and so is the default Y'CbCr quantization. The chromaticities -of the primary colors and the white reference are: +``V4L2_YCBCR_ENC_BT2020``. The default Y'CbCr quantization is limited range. +The chromaticities of the primary colors and the white reference are: diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index f274aede80a4..95b532d1adbf 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -750,6 +750,11 @@ tcp_tw_reuse - INTEGER experts. Default: 2 +tcp_tw_timeout - INTEGER + How long to wait to destroy TIME-WAIT state. The maximum value + is 60 seconds, the minimum value is 10 second. + Default: 60 seconds + tcp_window_scaling - BOOLEAN Enable window scaling as defined in RFC1323. @@ -1008,12 +1013,14 @@ icmp_ratelimit - INTEGER icmp_msgs_per_sec - INTEGER Limit maximal number of ICMP packets sent per second from this host. Only messages whose type matches icmp_ratemask (see below) are - controlled by this limit. + controlled by this limit. For security reasons, the precise count + of messages per second is randomized. Default: 1000 icmp_msgs_burst - INTEGER icmp_msgs_per_sec controls number of ICMP packets sent per second, while icmp_msgs_burst controls the burst size of these packets. + For security reasons, the precise burst size is randomized. Default: 50 icmp_ratemask - INTEGER diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index f5be243d250a..4b0db514b201 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -414,8 +414,8 @@ Send: .can_family = AF_CAN, .can_addr.j1939 = { .name = J1939_NO_NAME; - .pgn = 0x30, - .addr = 0x12300, + .addr = 0x30, + .pgn = 0x12300, }, }; diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst index f8a72ffffe66..6e12de9fc34e 100644 --- a/Documentation/sound/hd-audio/index.rst +++ b/Documentation/sound/hd-audio/index.rst @@ -8,3 +8,4 @@ HD-Audio models controls dp-mst + realtek-pc-beep diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index 11298f0ce44d..0ea967d34583 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst @@ -216,8 +216,6 @@ alc298-dell-aio ALC298 fixups on Dell AIO machines alc275-dell-xps ALC275 fixups on Dell XPS models -alc256-dell-xps13 - ALC256 fixups on Dell XPS13 lenovo-spk-noise Workaround for speaker noise on Lenovo machines lenovo-hotkey diff --git a/Documentation/sound/hd-audio/realtek-pc-beep.rst b/Documentation/sound/hd-audio/realtek-pc-beep.rst new file mode 100644 index 000000000000..be47c6f76a6e --- /dev/null +++ b/Documentation/sound/hd-audio/realtek-pc-beep.rst @@ -0,0 +1,129 @@ +=============================== +Realtek PC Beep Hidden Register +=============================== + +This file documents the "PC Beep Hidden Register", which is present in certain +Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can +route audio between pins but aren't themselves exposed as HDA widgets. As far +as I can tell, these hidden routes are designed to allow flexible PC Beep output +for codecs that don't have mixer widgets in their output paths. Why it's easier +to hide a mixer behind an undocumented vendor register than to just expose it +as a widget, I have no idea. + +Register Description +==================== + +The register is accessed via processing coefficient 0x36 on NID 20h. Bits not +identified below have no discernible effect on my machine, a Dell XPS 13 9350:: + + MSB LSB + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | |h|S|L| | B |R| | Known bits + +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ + |0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +1Ah input select (B): 2 bits + When zero, expose the PC Beep line (from the internal beep generator, when + enabled with the Set Beep Generation verb on NID 01h, or else from the + external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone + jack (or possibly Line In on some machines) input instead. If PC Beep is + selected, the 1Ah boost control has no effect. + +Amplify 1Ah loopback, left (L): 1 bit + Amplify the left channel of 1Ah before mixing it into outputs as specified + by h and S bits. Does not affect the level of 1Ah exposed to other widgets. + +Amplify 1Ah loopback, right (R): 1 bit + Amplify the right channel of 1Ah before mixing it into outputs as specified + by h and S bits. Does not affect the level of 1Ah exposed to other widgets. + +Loopback 1Ah to 21h [active low] (h): 1 bit + When zero, mix 1Ah (possibly with amplification, depending on L and R bits) + into 21h (headphone jack on my machine). Mixed signal respects the mute + setting on 21h. + +Loopback 1Ah to 14h (S): 1 bit + When one, mix 1Ah (possibly with amplification, depending on L and R bits) + into 14h (internal speaker on my machine). Mixed signal **ignores** the mute + setting on 14h and is present whenever 14h is configured as an output. + +Path diagrams +============= + +1Ah input selection (DIV is the PC Beep divider set on NID 01h):: + + + | | | + +--DIV--+--!DIV--+ {1Ah boost control} + | | + +--(b == 0)--+--(b != 0)--+ + | + >1Ah (Beep/Headphone Mic/Line In)< + +Loopback of 1Ah to 21h/14h:: + + <1Ah (Beep/Headphone Mic/Line In)> + | + {amplify if L/R} + | + +-----!h-----+-----S-----+ + | | + {21h mute control} | + | | + >21h (Headphone)< >14h (Internal Speaker)< + +Background +========== + +All Realtek HDA codecs have a vendor-defined widget with node ID 20h which +provides access to a bank of registers that control various codec functions. +Registers are read and written via the standard HDA processing coefficient +verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is +named "Realtek Vendor Registers" in public datasheets' verb listings and, +apart from that, is entirely undocumented. + +This particular register, exposed at coefficient 0x36 and named in commits from +Realtek, is of note: unlike most registers, which seem to control detailed +amplifier parameters not in scope of the HDA specification, it controls audio +routing which could just as easily have been defined using standard HDA mixer +and selector widgets. + +Specifically, it selects between two sources for the input pin widget with Node +ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my +laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek +commits indicate that it might be a Line In on some machines) or from the PC +Beep line (which is itself multiplexed between the codec's internal beep +generator and external PCBEEP pin, depending on if the beep generator is +enabled via verbs on NID 01h). Additionally, it can mix (with optional +amplification) that signal onto the 21h and/or 14h output pins. + +The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is +then amplified and mixed into both the headphones and the speakers. Not only +does this violate the HDA specification, which says that "[a vendor defined +beep input pin] connection may be maintained *only* while the Link reset +(**RST#**) is asserted", it means that we cannot ignore the register if we care +about the input that 1Ah would otherwise expose or if the PCBEEP trace is +poorly shielded and picks up chassis noise (both of which are the case on my +machine). + +Unfortunately, there are lots of ways to get this register configuration wrong. +Linux, it seems, has gone through most of them. For one, the register resets +after S3 suspend: judging by existing code, this isn't the case for all vendor +registers, and it's led to some fixes that improve behavior on cold boot but +don't last after suspend. Other fixes have successfully switched the 1Ah input +away from PC Beep but have failed to disable both loopback paths. On my +machine, this means that the headphone input is amplified and looped back to +the headphone output, which uses the exact same pins! As you might expect, this +causes terrible headphone noise, the character of which is controlled by the +1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone +noise by changing "Headphone Mic Boost" in ALSA, now you know why.) + +The information here has been obtained through black-box reverse engineering of +the ALC256 codec's behavior and is not guaranteed to be correct. It likely +also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs +seem to be close relatives of the ALC256. (They all share one initialization +function.) Additionally, other codecs like the ALC225 and ALC285 also have this +register, judging by existing fixups in ``patch_realtek.c``, but specific +data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ +from what I've described here. diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt index 4833904d32a5..fd22224853e5 100644 --- a/Documentation/virt/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -172,6 +172,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION ioctl() at run-time. +Creation of the VM will fail if the requested IPA size (whether it is +implicit or explicit) is unsupported on the host. + Please note that configuring the IPA size does not affect the capability exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects size of the address translated by the stage2 level (guest physical to @@ -1132,6 +1135,9 @@ field userspace_addr, which must point at user addressable memory for the entire memory slot size. Any object may back this memory, including anonymous memory, ordinary files, and hugetlbfs. +On architectures that support a form of address tagging, userspace_addr must +be an untagged address. + It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr be identical. This allows large pages in the guest to be backed by large pages in the host. @@ -4444,9 +4450,11 @@ EOI was received. #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; diff --git a/Documentation/virt/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt index dadb29e8738f..ec072c6bc03f 100644 --- a/Documentation/virt/kvm/mmu.txt +++ b/Documentation/virt/kvm/mmu.txt @@ -420,7 +420,7 @@ If the generation number of the spte does not equal the global generation number, it will ignore the cached MMIO information and handle the page fault through the slow path. -Since only 19 bits are used to store generation-number on mmio spte, all +Since only 18 bits are used to store generation-number on mmio spte, all pages are zapped when there is an overflow. Unfortunately, a single memory access might access kvm_memslots(kvm) multiple diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst index a8de2fbc1caa..971f30a7d166 100644 --- a/Documentation/x86/index.rst +++ b/Documentation/x86/index.rst @@ -31,3 +31,4 @@ x86-specific Documentation usb-legacy-support i386/index x86_64/index + sgx diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst new file mode 100644 index 000000000000..eaee1368b4fd --- /dev/null +++ b/Documentation/x86/sgx.rst @@ -0,0 +1,211 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============================== +Software Guard eXtensions (SGX) +=============================== + +Overview +======== + +Software Guard eXtensions (SGX) hardware enables for user space applications +to set aside private memory regions of code and data: + +* Privileged (ring-0) ENCLS functions orchestrate the construction of the. + regions. +* Unprivileged (ring-3) ENCLU functions allow an application to enter and + execute inside the regions. + +These memory regions are called enclaves. An enclave can be only entered at a +fixed set of entry points. Each entry point can hold a single hardware thread +at a time. While the enclave is loaded from a regular binary file by using +ENCLS functions, only the threads inside the enclave can access its memory. The +region is denied from outside access by the CPU, and encrypted before it leaves +from LLC. + +The support can be determined by + + ``grep sgx /proc/cpuinfo`` + +SGX must both be supported in the processor and enabled by the BIOS. If SGX +appears to be unsupported on a system which has hardware support, ensure +support is enabled in the BIOS. If a BIOS presents a choice between "Enabled" +and "Software Enabled" modes for SGX, choose "Enabled". + +Enclave Page Cache +================== + +SGX utilizes an *Enclave Page Cache (EPC)* to store pages that are associated +with an enclave. It is contained in a BIOS-reserved region of physical memory. +Unlike pages used for regular memory, pages can only be accessed from outside of +the enclave during enclave construction with special, limited SGX instructions. + +Only a CPU executing inside an enclave can directly access enclave memory. +However, a CPU executing inside an enclave may access normal memory outside the +enclave. + +The kernel manages enclave memory similar to how it treats device memory. + +Enclave Page Types +------------------ + +**SGX Enclave Control Structure (SECS)** + Enclave's address range, attributes and other global data are defined + by this structure. + +**Regular (REG)** + Regular EPC pages contain the code and data of an enclave. + +**Thread Control Structure (TCS)** + Thread Control Structure pages define the entry points to an enclave and + track the execution state of an enclave thread. + +**Version Array (VA)** + Version Array pages contain 512 slots, each of which can contain a version + number for a page evicted from the EPC. + +Enclave Page Cache Map +---------------------- + +The processor tracks EPC pages in a hardware metadata structure called the +*Enclave Page Cache Map (EPCM)*. The EPCM contains an entry for each EPC page +which describes the owning enclave, access rights and page type among the other +things. + +EPCM permissions are separate from the normal page tables. This prevents the +kernel from, for instance, allowing writes to data which an enclave wishes to +remain read-only. EPCM permissions may only impose additional restrictions on +top of normal x86 page permissions. + +For all intents and purposes, the SGX architecture allows the processor to +invalidate all EPCM entries at will. This requires that software be prepared to +handle an EPCM fault at any time. In practice, this can happen on events like +power transitions when the ephemeral key that encrypts enclave memory is lost. + +Application interface +===================== + +Enclave build functions +----------------------- + +In addition to the traditional compiler and linker build process, SGX has a +separate enclave “build” process. Enclaves must be built before they can be +executed (entered). The first step in building an enclave is opening the +**/dev/sgx_enclave** device. Since enclave memory is protected from direct +access, special privileged instructions are Then used to copy data into enclave +pages and establish enclave page permissions. + +.. kernel-doc:: arch/x86/kernel/cpu/sgx/ioctl.c + :functions: sgx_ioc_enclave_create + sgx_ioc_enclave_add_pages + sgx_ioc_enclave_init + sgx_ioc_enclave_provision + +Enclave vDSO +------------ + +Entering an enclave can only be done through SGX-specific EENTER and ERESUME +functions, and is a non-trivial process. Because of the complexity of +transitioning to and from an enclave, enclaves typically utilize a library to +handle the actual transitions. This is roughly analogous to how glibc +implementations are used by most applications to wrap system calls. + +Another crucial characteristic of enclaves is that they can generate exceptions +as part of their normal operation that need to be handled in the enclave or are +unique to SGX. + +Instead of the traditional signal mechanism to handle these exceptions, SGX +can leverage special exception fixup provided by the vDSO. The kernel-provided +vDSO function wraps low-level transitions to/from the enclave like EENTER and +ERESUME. The vDSO function intercepts exceptions that would otherwise generate +a signal and return the fault information directly to its caller. This avoids +the need to juggle signal handlers. + +.. kernel-doc:: arch/x86/include/uapi/asm/sgx.h + :functions: vdso_sgx_enter_enclave_t + +ksgxd +===== + +SGX support includes a kernel thread called *ksgxwapd*. + +EPC sanitization +---------------- + +ksgxd is started when SGX initializes. Enclave memory is typically ready +For use when the processor powers on or resets. However, if SGX has been in +use since the reset, enclave pages may be in an inconsistent state. This might +occur after a crash and kexec() cycle, for instance. At boot, ksgxd +reinitializes all enclave pages so that they can be allocated and re-used. + +The sanitization is done by going through EPC address space and applying the +EREMOVE function to each physical page. Some enclave pages like SECS pages have +hardware dependencies on other pages which prevents EREMOVE from functioning. +Executing two EREMOVE passes removes the dependencies. + +Page reclaimer +-------------- + +Similar to the core kswapd, ksgxd, is responsible for managing the +overcommitment of enclave memory. If the system runs out of enclave memory, +*ksgxwapd* “swaps” enclave memory to normal memory. + +Launch Control +============== + +SGX provides a launch control mechanism. After all enclave pages have been +copied, kernel executes EINIT function, which initializes the enclave. Only after +this the CPU can execute inside the enclave. + +ENIT function takes an RSA-3072 signature of the enclave measurement. The function +checks that the measurement is correct and signature is signed with the key +hashed to the four **IA32_SGXLEPUBKEYHASH{0, 1, 2, 3}** MSRs representing the +SHA256 of a public key. + +Those MSRs can be configured by the BIOS to be either readable or writable. +Linux supports only writable configuration in order to give full control to the +kernel on launch control policy. Before calling EINIT function, the driver sets +the MSRs to match the enclave's signing key. + +Encryption engines +================== + +In order to conceal the enclave data while it is out of the CPU package, the +memory controller has an encryption engine to transparently encrypt and decrypt +enclave memory. + +In CPUs prior to Ice Lake, the Memory Encryption Engine (MEE) is used to +encrypt pages leaving the CPU caches. MEE uses a n-ary Merkle tree with root in +SRAM to maintain integrity of the encrypted data. This provides integrity and +anti-replay protection but does not scale to large memory sizes because the time +required to update the Merkle tree grows logarithmically in relation to the +memory size. + +CPUs starting from Icelake use Total Memory Encryption (TME) in the place of +MEE. TME-based SGX implementations do not have an integrity Merkle tree, which +means integrity and replay-attacks are not mitigated. B, it includes +additional changes to prevent cipher text from being returned and SW memory +aliases from being Created. + +DMA to enclave memory is blocked by range registers on both MEE and TME systems +(SDM section 41.10). + +Usage Models +============ + +Shared Library +-------------- + +Sensitive data and the code that acts on it is partitioned from the application +into a separate library. The library is then linked as a DSO which can be loaded +into an enclave. The application can then make individual function calls into +the enclave through special SGX instructions. A run-time within the enclave is +configured to marshal function parameters into and out of the enclave and to +call the correct library function. + +Application Container +--------------------- + +An application may be loaded into a container enclave which is specially +configured with a library OS and run-time which permits the application to run. +The enclave run-time and library OS work together to execute the application +when a thread enters the enclave. diff --git a/Documentation/x86/topology.rst b/Documentation/x86/topology.rst index e29739904e37..7f58010ea86a 100644 --- a/Documentation/x86/topology.rst +++ b/Documentation/x86/topology.rst @@ -41,6 +41,8 @@ Package Packages contain a number of cores plus shared resources, e.g. DRAM controller, shared caches etc. +Modern systems may also use the term 'Die' for package. + AMD nomenclature for package is 'Node'. Package-related topology information in the kernel: @@ -53,11 +55,18 @@ Package-related topology information in the kernel: The number of dies in a package. This information is retrieved via CPUID. + - cpuinfo_x86.cpu_die_id: + + The physical ID of the die. This information is retrieved via CPUID. + - cpuinfo_x86.phys_proc_id: The physical ID of the package. This information is retrieved via CPUID and deduced from the APIC IDs of the cores in the package. + Modern systems use this value for the socket. There may be multiple + packages within a socket. This value may differ from cpu_die_id. + - cpuinfo_x86.logical_proc_id: The logical ID of the package. As we do not trust BIOSes to enumerate the diff --git a/Documentation/x86/x86_64/fsgs.rst b/Documentation/x86/x86_64/fsgs.rst new file mode 100644 index 000000000000..50960e09e1f6 --- /dev/null +++ b/Documentation/x86/x86_64/fsgs.rst @@ -0,0 +1,199 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Using FS and GS segments in user space applications +=================================================== + +The x86 architecture supports segmentation. Instructions which access +memory can use segment register based addressing mode. The following +notation is used to address a byte within a segment: + + Segment-register:Byte-address + +The segment base address is added to the Byte-address to compute the +resulting virtual address which is accessed. This allows to access multiple +instances of data with the identical Byte-address, i.e. the same code. The +selection of a particular instance is purely based on the base-address in +the segment register. + +In 32-bit mode the CPU provides 6 segments, which also support segment +limits. The limits can be used to enforce address space protections. + +In 64-bit mode the CS/SS/DS/ES segments are ignored and the base address is +always 0 to provide a full 64bit address space. The FS and GS segments are +still functional in 64-bit mode. + +Common FS and GS usage +------------------------------ + +The FS segment is commonly used to address Thread Local Storage (TLS). FS +is usually managed by runtime code or a threading library. Variables +declared with the '__thread' storage class specifier are instantiated per +thread and the compiler emits the FS: address prefix for accesses to these +variables. Each thread has its own FS base address so common code can be +used without complex address offset calculations to access the per thread +instances. Applications should not use FS for other purposes when they use +runtimes or threading libraries which manage the per thread FS. + +The GS segment has no common use and can be used freely by +applications. GCC and Clang support GS based addressing via address space +identifiers. + +Reading and writing the FS/GS base address +------------------------------------------ + +There exist two mechanisms to read and write the FS/GS base address: + + - the arch_prctl() system call + + - the FSGSBASE instruction family + +Accessing FS/GS base with arch_prctl() +-------------------------------------- + + The arch_prctl(2) based mechanism is available on all 64-bit CPUs and all + kernel versions. + + Reading the base: + + arch_prctl(ARCH_GET_FS, &fsbase); + arch_prctl(ARCH_GET_GS, &gsbase); + + Writing the base: + + arch_prctl(ARCH_SET_FS, fsbase); + arch_prctl(ARCH_SET_GS, gsbase); + + The ARCH_SET_GS prctl may be disabled depending on kernel configuration + and security settings. + +Accessing FS/GS base with the FSGSBASE instructions +--------------------------------------------------- + + With the Ivy Bridge CPU generation Intel introduced a new set of + instructions to access the FS and GS base registers directly from user + space. These instructions are also supported on AMD Family 17H CPUs. The + following instructions are available: + + =============== =========================== + RDFSBASE %reg Read the FS base register + RDGSBASE %reg Read the GS base register + WRFSBASE %reg Write the FS base register + WRGSBASE %reg Write the GS base register + =============== =========================== + + The instructions avoid the overhead of the arch_prctl() syscall and allow + more flexible usage of the FS/GS addressing modes in user space + applications. This does not prevent conflicts between threading libraries + and runtimes which utilize FS and applications which want to use it for + their own purpose. + +FSGSBASE instructions enablement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + The instructions are enumerated in CPUID leaf 7, bit 0 of EBX. If + available /proc/cpuinfo shows 'fsgsbase' in the flag entry of the CPUs. + + The availability of the instructions does not enable them + automatically. The kernel has to enable them explicitly in CR4. The + reason for this is that older kernels make assumptions about the values in + the GS register and enforce them when GS base is set via + arch_prctl(). Allowing user space to write arbitrary values to GS base + would violate these assumptions and cause malfunction. + + On kernels which do not enable FSGSBASE the execution of the FSGSBASE + instructions will fault with a #UD exception. + + The kernel provides reliable information about the enabled state in the + ELF AUX vector. If the HWCAP2_FSGSBASE bit is set in the AUX vector, the + kernel has FSGSBASE instructions enabled and applications can use them. + The following code example shows how this detection works:: + + #include + #include + + /* Will be eventually in asm/hwcap.h */ + #ifndef HWCAP2_FSGSBASE + #define HWCAP2_FSGSBASE (1 << 1) + #endif + + .... + + unsigned val = getauxval(AT_HWCAP2); + + if (val & HWCAP2_FSGSBASE) + printf("FSGSBASE enabled\n"); + +FSGSBASE instructions compiler support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +GCC version 4.6.4 and newer provide instrinsics for the FSGSBASE +instructions. Clang 5 supports them as well. + + =================== =========================== + _readfsbase_u64() Read the FS base register + _readfsbase_u64() Read the GS base register + _writefsbase_u64() Write the FS base register + _writegsbase_u64() Write the GS base register + =================== =========================== + +To utilize these instrinsics must be included in the source +code and the compiler option -mfsgsbase has to be added. + +Compiler support for FS/GS based addressing +------------------------------------------- + +GCC version 6 and newer provide support for FS/GS based addressing via +Named Address Spaces. GCC implements the following address space +identifiers for x86: + + ========= ==================================== + __seg_fs Variable is addressed relative to FS + __seg_gs Variable is addressed relative to GS + ========= ==================================== + +The preprocessor symbols __SEG_FS and __SEG_GS are defined when these +address spaces are supported. Code which implements fallback modes should +check whether these symbols are defined. Usage example:: + + #ifdef __SEG_GS + + long data0 = 0; + long data1 = 1; + + long __seg_gs *ptr; + + /* Check whether FSGSBASE is enabled by the kernel (HWCAP2_FSGSBASE) */ + .... + + /* Set GS base to point to data0 */ + _writegsbase_u64(&data0); + + /* Access offset 0 of GS */ + ptr = 0; + printf("data0 = %ld\n", *ptr); + + /* Set GS base to point to data1 */ + _writegsbase_u64(&data1); + /* ptr still addresses offset 0! */ + printf("data1 = %ld\n", *ptr); + + +Clang does not provide the GCC address space identifiers, but it provides +address spaces via an attribute based mechanism in Clang 2.6 and newer +versions: + + ==================================== ===================================== + __attribute__((address_space(256)) Variable is addressed relative to GS + __attribute__((address_space(257)) Variable is addressed relative to FS + ==================================== ===================================== + +FS/GS based addressing with inline assembly +------------------------------------------- + +In case the compiler does not support address spaces, inline assembly can +be used for FS/GS based addressing mode:: + + mov %fs:offset, %reg + mov %gs:offset, %reg + + mov %reg, %fs:offset + mov %reg, %gs:offset diff --git a/Documentation/x86/x86_64/index.rst b/Documentation/x86/x86_64/index.rst index d6eaaa5a35fc..a56070fc8e77 100644 --- a/Documentation/x86/x86_64/index.rst +++ b/Documentation/x86/x86_64/index.rst @@ -14,3 +14,4 @@ x86_64 Support fake-numa-for-cpusets cpu-hotplug-spec machinecheck + fsgs diff --git a/Documentation/xtensa/mmu.rst b/Documentation/xtensa/mmu.rst index e52a12960fdc..450573afa31a 100644 --- a/Documentation/xtensa/mmu.rst +++ b/Documentation/xtensa/mmu.rst @@ -82,7 +82,8 @@ Default MMUv2-compatible layout:: +------------------+ | VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB +------------------+ VMALLOC_END - | Cache aliasing | TLBTEMP_BASE_1 0xc7ff0000 DCACHE_WAY_SIZE + +------------------+ + | Cache aliasing | TLBTEMP_BASE_1 0xc8000000 DCACHE_WAY_SIZE | remap area 1 | +------------------+ | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE @@ -124,7 +125,8 @@ Default MMUv2-compatible layout:: +------------------+ | VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB +------------------+ VMALLOC_END - | Cache aliasing | TLBTEMP_BASE_1 0xa7ff0000 DCACHE_WAY_SIZE + +------------------+ + | Cache aliasing | TLBTEMP_BASE_1 0xa8000000 DCACHE_WAY_SIZE | remap area 1 | +------------------+ | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE @@ -167,7 +169,8 @@ Default MMUv2-compatible layout:: +------------------+ | VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB +------------------+ VMALLOC_END - | Cache aliasing | TLBTEMP_BASE_1 0x97ff0000 DCACHE_WAY_SIZE + +------------------+ + | Cache aliasing | TLBTEMP_BASE_1 0x98000000 DCACHE_WAY_SIZE | remap area 1 | +------------------+ | Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE diff --git a/MAINTAINERS b/MAINTAINERS index fe6fa5d3a63e..974064e82ca9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3054,6 +3054,7 @@ R: Martin KaFai Lau R: Song Liu R: Yonghong Song R: Andrii Nakryiko +R: KP Singh L: netdev@vger.kernel.org L: bpf@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git @@ -4028,6 +4029,7 @@ B: https://github.com/ClangBuiltLinux/linux/issues C: irc://chat.freenode.net/clangbuiltlinux S: Supported K: \b(?i:clang|llvm)\b +F: Documentation/kbuild/llvm.rst CLEANCACHE API M: Konrad Rzeszutek Wilk @@ -8516,6 +8518,19 @@ F: Documentation/x86/intel_txt.rst F: include/linux/tboot.h F: arch/x86/kernel/tboot.c +INTEL SGX +M: Jarkko Sakkinen +L: linux-sgx@vger.kernel.org +S: Supported +Q: https://patchwork.kernel.org/project/intel-sgx/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-sgx.git +F: Documentation/x86/sgx.rst +F: arch/x86/entry/vdso/vsgx.S +F: arch/x86/include/uapi/asm/sgx.h +F: arch/x86/kernel/cpu/sgx/* +F: tools/testing/selftests/sgx/* +K: \bSGX_ + INTERCONNECT API M: Georgi Djakov L: linux-pm@vger.kernel.org @@ -9144,6 +9159,7 @@ F: include/linux/skmsg.h F: net/core/skmsg.c F: net/core/sock_map.c F: net/ipv4/tcp_bpf.c +F: net/ipv4/udp_bpf.c LANTIQ / INTEL Ethernet drivers M: Hauke Mehrtens diff --git a/Makefile b/Makefile index d6587d9dc476..19917c88a81f 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +# dist_make: Tencent Dist Makefile, which contains dist-* make targets +ifneq ($(shell echo $(MAKECMDGOALS) | grep "^dist-"),) +include dist/Makefile +else + # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 4 -SUBLEVEL = 32 -EXTRAVERSION = -1 +SUBLEVEL = 119 +EXTRAVERSION = -20 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -394,8 +401,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) -HOSTCC = gcc -HOSTCXX = g++ +ifneq ($(LLVM),) +HOSTCC = clang +HOSTCXX = clang++ +else +HOSTCC = gcc +HOSTCXX = g++ +endif KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \ $(HOSTCFLAGS) @@ -404,28 +416,46 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS) KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS) # Make variables (CC, etc...) -AS = $(CROSS_COMPILE)as -LD = $(CROSS_COMPILE)ld -CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E +ifneq ($(LLVM),) +CC = clang +LD = ld.lld +AR = llvm-ar +NM = llvm-nm +OBJCOPY = llvm-objcopy +OBJDUMP = llvm-objdump +READELF = llvm-readelf +OBJSIZE = llvm-size +STRIP = llvm-strip +else +CC = $(CROSS_COMPILE)gcc +LD = $(CROSS_COMPILE)ld AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm -STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump +READELF = $(CROSS_COMPILE)readelf OBJSIZE = $(CROSS_COMPILE)size +STRIP = $(CROSS_COMPILE)strip +endif PAHOLE = pahole +RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids LEX = flex YACC = bison AWK = awk INSTALLKERNEL := installkernel -DEPMOD = /sbin/depmod +DEPMOD = depmod PERL = perl PYTHON = python -PYTHON2 = python2 PYTHON3 = python3 CHECK = sparse BASH = bash +KGZIP = gzip +KBZIP2 = bzip2 +KLZOP = lzop +LZMA = lzma +LZ4 = lz4c +XZ = xz CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) @@ -458,7 +488,7 @@ KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \ -Werror=implicit-function-declaration -Werror=implicit-int \ - -Wno-format-security \ + -Werror=return-type -Wno-format-security \ -std=gnu89 KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := @@ -471,9 +501,10 @@ KBUILD_LDFLAGS := GCC_PLUGINS_CFLAGS := CLANG_FLAGS := -export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL -export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX +export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL +export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX +export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS @@ -528,13 +559,13 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),) +ifneq ($(LLVM_IAS),1) CLANG_FLAGS += -no-integrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option @@ -587,12 +618,8 @@ KBUILD_MODULES := KBUILD_BUILTIN := 1 # If we have only "make modules", don't compile built-in objects. -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. - ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) + KBUILD_BUILTIN := endif # If we have "make modules", compile modules @@ -707,12 +734,9 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os endif -ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED -KBUILD_CFLAGS += -Wno-maybe-uninitialized -endif - # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) include scripts/Makefile.kcov include scripts/Makefile.gcc-plugins @@ -749,17 +773,20 @@ KBUILD_CFLAGS += -Wno-tautological-compare KBUILD_CFLAGS += -mno-global-merge else -# These warnings generated too much noise in a regular build. -# Use make W=1 to enable them (see scripts/Makefile.extrawarn) -KBUILD_CFLAGS += -Wno-unused-but-set-variable - # Warn about unmarked fall-throughs in switch statement. # Disabled for clang while comment to attribute conversion happens and # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed. KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) endif +# These warnings generated too much noise in a regular build. +# Use make W=1 to enable them (see scripts/Makefile.extrawarn) +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) + +# These result in bogus false positives +KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer) + ifdef CONFIG_FRAME_POINTER KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls else @@ -786,8 +813,11 @@ DEBUG_CFLAGS += -gsplit-dwarf else DEBUG_CFLAGS += -g endif +ifneq ($(LLVM_IAS),1) KBUILD_AFLAGS += -Wa,-gdwarf-2 endif +endif + ifdef CONFIG_DEBUG_INFO_DWARF4 DEBUG_CFLAGS += -gdwarf-4 endif @@ -860,6 +890,17 @@ KBUILD_CFLAGS += -Wno-pointer-sign # disable stringop warnings in gcc 8+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) +# We'll want to enable this eventually, but it's not going away for 5.7 at least +KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) + +# Another good warning that we'll want to enable eventually +KBUILD_CFLAGS += $(call cc-disable-warning, restrict) + +# Enabled with W=2, disabled by default as noisy +KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) + # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) @@ -890,12 +931,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) # change __FILE__ to the relative path from the srctree KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) -# ensure -fcf-protection is disabled when using retpoline as it is -# incompatible with -mindirect-branch=thunk-extern -ifdef CONFIG_RETPOLINE -KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) -endif - include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan @@ -977,10 +1012,10 @@ export mod_strip_cmd mod_compress_cmd = true ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS_GZIP - mod_compress_cmd = gzip -n -f + mod_compress_cmd = $(KGZIP) -n -f endif # CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_XZ - mod_compress_cmd = xz -f + mod_compress_cmd = $(XZ) -f endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd @@ -996,9 +1031,10 @@ export mod_sign_cmd HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) +has_libelf = $(call try-run,\ + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) + ifdef CONFIG_STACK_VALIDATION - has_libelf := $(call try-run,\ - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else @@ -1007,6 +1043,14 @@ ifdef CONFIG_STACK_VALIDATION endif endif +ifdef CONFIG_DEBUG_INFO_BTF + ifeq ($(has_libelf),1) + resolve_btfids_target := tools/bpf/resolve_btfids FORCE + else + ERROR_RESOLVE_BTFIDS := 1 + endif +endif + PHONY += prepare0 export MODORDER := $(extmod-prefix)modules.order @@ -1111,7 +1155,7 @@ prepare0: archprepare $(Q)$(MAKE) $(build)=. # All the preparing.. -prepare: prepare0 prepare-objtool +prepare: prepare0 prepare-objtool prepare-resolve_btfids # Support for using generic headers in asm-generic asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj @@ -1124,7 +1168,7 @@ uapi-asm-generic: $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \ generic=include/uapi/asm-generic -PHONY += prepare-objtool +PHONY += prepare-objtool prepare-resolve_btfids prepare-objtool: $(objtool_target) ifeq ($(SKIP_STACK_VALIDATION),1) ifdef CONFIG_UNWINDER_ORC @@ -1135,6 +1179,11 @@ else endif endif +prepare-resolve_btfids: $(resolve_btfids_target) +ifeq ($(ERROR_RESOLVE_BTFIDS),1) + @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 + @false +endif # Generate some files # --------------------------------------------------------------------------- @@ -1151,11 +1200,19 @@ define filechk_utsrelease.h endef define filechk_version.h - echo \#define LINUX_VERSION_CODE $(shell \ - expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ - echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))' + if [ $(SUBLEVEL) -gt 255 ]; then \ + echo \#define LINUX_VERSION_CODE $(shell \ + expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + 255); \ + else \ + echo \#define LINUX_VERSION_CODE $(shell \ + expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \ + fi; \ + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \ + ((c) > 255 ? 255 : (c)))' endef +$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0) +$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0) $(version_h): FORCE $(call filechk,version.h) $(Q)rm -f $(old_version_h) @@ -1238,11 +1295,15 @@ ifneq ($(dtstree),) $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ PHONY += dtbs dtbs_install dtbs_check -dtbs dtbs_check: include/config/kernel.release scripts_dtc +dtbs: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) +ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),) +dtbs: dt_binding_check +endif + dtbs_check: export CHECK_DTBS=1 -dtbs_check: dt_binding_check +dtbs_check: dtbs dtbs_install: $(Q)$(MAKE) $(dtbinst)=$(dtstree) @@ -1270,6 +1331,13 @@ ifdef CONFIG_MODULES all: modules +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. +ifdef CONFIG_MODVERSIONS + KBUILD_BUILTIN := 1 +endif + # Build modules # # A module can be listed more than once in obj-m resulting in @@ -1823,3 +1891,5 @@ FORCE: # Declare the contents of the PHONY variable as phony. We keep that # information in a variable so we can use it in if_changed and friends. .PHONY: $(PHONY) + +endif # dist_make diff --git a/arch/Kconfig b/arch/Kconfig index 238dccfa7691..a8df66e64544 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -131,6 +131,22 @@ config UPROBES managed by the kernel and kept transparent to the probed application. ) +config HAVE_64BIT_ALIGNED_ACCESS + def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS + help + Some architectures require 64 bit accesses to be 64 bit + aligned, which also requires structs containing 64 bit values + to be 64 bit aligned too. This includes some 32 bit + architectures which can do 64 bit accesses, as well as 64 bit + architectures without unaligned access. + + This symbol should be selected by an architecture if 64 bit + accesses are required to be 64 bit aligned in this way even + though it is not a 64 bit architecture. + + See Documentation/unaligned-memory-access.txt for more + information on the topic of unaligned memory accesses. + config HAVE_EFFICIENT_UNALIGNED_ACCESS bool help @@ -405,6 +421,13 @@ config MMU_GATHER_NO_RANGE config HAVE_MMU_GATHER_NO_GATHER bool +config ARCH_WANT_IRQS_OFF_ACTIVATE_MM + bool + help + Temporary select until all architectures can be converted to have + irqs disabled over activate_mm. Architectures that do IPI based TLB + shootdowns should enable this. + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig index f4ec420d7f2d..3a132c91d45b 100644 --- a/arch/alpha/configs/defconfig +++ b/arch/alpha/configs/defconfig @@ -36,7 +36,6 @@ CONFIG_BLK_DEV_CY82C693=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_SCSI_AIC7XXX=m CONFIG_AIC7XXX_CMDS_PER_DEVICE=253 # CONFIG_AIC7XXX_DEBUG_ENABLE is not set diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index af2c0063dc75..103270d5a9fc 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -322,14 +322,18 @@ static inline int __is_mmio(const volatile void __iomem *addr) #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) extern inline unsigned int ioread8(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); mb(); return ret; } extern inline unsigned int ioread16(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); mb(); return ret; } @@ -370,7 +374,9 @@ extern inline void outw(u16 b, unsigned long port) #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) extern inline unsigned int ioread32(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); mb(); return ret; } @@ -415,14 +421,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr) extern inline u8 readb(const volatile void __iomem *addr) { - u8 ret = __raw_readb(addr); + u8 ret; + mb(); + ret = __raw_readb(addr); mb(); return ret; } extern inline u16 readw(const volatile void __iomem *addr) { - u16 ret = __raw_readw(addr); + u16 ret; + mb(); + ret = __raw_readw(addr); mb(); return ret; } @@ -463,14 +473,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) extern inline u32 readl(const volatile void __iomem *addr) { - u32 ret = __raw_readl(addr); + u32 ret; + mb(); + ret = __raw_readl(addr); mb(); return ret; } extern inline u64 readq(const volatile void __iomem *addr) { - u64 ret = __raw_readq(addr); + u64 ret; + mb(); + ret = __raw_readq(addr); mb(); return ret; } @@ -488,10 +502,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) } #endif -#define ioread16be(p) be16_to_cpu(ioread16(p)) -#define ioread32be(p) be32_to_cpu(ioread32(p)) -#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) -#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) +#define ioread16be(p) swab16(ioread16(p)) +#define ioread32be(p) swab32(ioread32(p)) +#define iowrite16be(v,p) iowrite16(swab16(v), (p)) +#define iowrite32be(v,p) iowrite32(swab32(v), (p)) #define inb_p inb #define inw_p inw @@ -499,14 +513,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) #define outb_p outb #define outw_p outw #define outl_p outl -#define readb_relaxed(addr) __raw_readb(addr) -#define readw_relaxed(addr) __raw_readw(addr) -#define readl_relaxed(addr) __raw_readl(addr) -#define readq_relaxed(addr) __raw_readq(addr) -#define writeb_relaxed(b, addr) __raw_writeb(b, addr) -#define writew_relaxed(b, addr) __raw_writew(b, addr) -#define writel_relaxed(b, addr) __raw_writel(b, addr) -#define writeq_relaxed(b, addr) __raw_writeq(b, addr) + +extern u8 readb_relaxed(const volatile void __iomem *addr); +extern u16 readw_relaxed(const volatile void __iomem *addr); +extern u32 readl_relaxed(const volatile void __iomem *addr); +extern u64 readq_relaxed(const volatile void __iomem *addr); + +#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) +extern inline u8 readb_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readb(addr); +} + +extern inline u16 readw_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readw(addr); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) +extern inline u32 readl_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readl(addr); +} + +extern inline u64 readq_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readq(addr); +} +#endif + +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq /* * String version of IO memory access ops: diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index de6c4df61082..d033d3f92d6d 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -124,6 +124,8 @@ #define SO_DETACH_REUSEPORT_BPF 68 +#define SO_NETNS_COOKIE 71 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c index c025a3e5e357..938de13adfbf 100644 --- a/arch/alpha/kernel/io.c +++ b/arch/alpha/kernel/io.c @@ -16,21 +16,27 @@ unsigned int ioread8(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); mb(); return ret; } unsigned int ioread16(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); mb(); return ret; } unsigned int ioread32(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); mb(); return ret; } @@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq); u8 readb(const volatile void __iomem *addr) { - u8 ret = __raw_readb(addr); + u8 ret; + mb(); + ret = __raw_readb(addr); mb(); return ret; } u16 readw(const volatile void __iomem *addr) { - u16 ret = __raw_readw(addr); + u16 ret; + mb(); + ret = __raw_readw(addr); mb(); return ret; } u32 readl(const volatile void __iomem *addr) { - u32 ret = __raw_readl(addr); + u32 ret; + mb(); + ret = __raw_readl(addr); mb(); return ret; } u64 readq(const volatile void __iomem *addr) { - u64 ret = __raw_readq(addr); + u64 ret; + mb(); + ret = __raw_readq(addr); mb(); return ret; } @@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew); EXPORT_SYMBOL(writel); EXPORT_SYMBOL(writeq); +/* + * The _relaxed functions must be ordered w.r.t. each other, but they don't + * have to be ordered w.r.t. other memory accesses. + */ +u8 readb_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readb(addr); +} + +u16 readw_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readw(addr); +} + +u32 readl_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readl(addr); +} + +u64 readq_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readq(addr); +} + +EXPORT_SYMBOL(readb_relaxed); +EXPORT_SYMBOL(readw_relaxed); +EXPORT_SYMBOL(readl_relaxed); +EXPORT_SYMBOL(readq_relaxed); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. diff --git a/arch/arc/Makefile b/arch/arc/Makefile index f1c44cccf8d6..6f05e509889f 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -90,16 +90,22 @@ libs-y += arch/arc/lib/ $(LIBGCC) boot := arch/arc/boot -#default target for make without any arguments. -KBUILD_IMAGE := $(boot)/bootpImage - -all: bootpImage -bootpImage: vmlinux - -boot_targets += uImage uImage.bin uImage.gz +boot_targets := uImage.bin uImage.gz uImage.lzma +PHONY += $(boot_targets) $(boot_targets): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +uimage-default-y := uImage.bin +uimage-default-$(CONFIG_KERNEL_GZIP) := uImage.gz +uimage-default-$(CONFIG_KERNEL_LZMA) := uImage.lzma + +PHONY += uImage +uImage: $(uimage-default-y) + @ln -sf $< $(boot)/uImage + @$(kecho) ' Image $(boot)/uImage is ready' + +CLEAN_FILES += $(boot)/uImage + archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile index 538b92f4dd25..3b1f8a69a89e 100644 --- a/arch/arc/boot/Makefile +++ b/arch/arc/boot/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -targets := vmlinux.bin vmlinux.bin.gz uImage +targets := vmlinux.bin vmlinux.bin.gz # uImage build relies on mkimage being availble on your host for ARC target # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage @@ -13,11 +13,6 @@ LINUX_START_TEXT = $$(readelf -h vmlinux | \ UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE) UIMAGE_ENTRYADDR = $(LINUX_START_TEXT) -suffix-y := bin -suffix-$(CONFIG_KERNEL_GZIP) := gz -suffix-$(CONFIG_KERNEL_LZMA) := lzma - -targets += uImage targets += uImage.bin targets += uImage.gz targets += uImage.lzma @@ -42,7 +37,3 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE $(call if_changed,uimage,lzma) - -$(obj)/uImage: $(obj)/uImage.$(suffix-y) - @ln -sf $(notdir $<) $@ - @echo ' Image $@ is ready' diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index 6ec1fcdfc0d7..92247288d056 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi @@ -85,7 +85,7 @@ * avoid duplicating the MB dtsi file given that IRQ from * this intc to cpu intc are different for axs101 and axs103 */ - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0x0 0xe0012000 0x0 0x200 >; diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index ac8e1b463a70..cd1edcf4f95e 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -129,7 +129,7 @@ * avoid duplicating the MB dtsi file given that IRQ from * this intc to cpu intc are different for axs101 and axs103 */ - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0x0 0xe0012000 0x0 0x200 >; diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 9da21e7fd246..70779386ca79 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -135,7 +135,7 @@ * avoid duplicating the MB dtsi file given that IRQ from * this intc to cpu intc are different for axs101 and axs103 */ - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0x0 0xe0012000 0x0 0x200 >; diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 9acbeba832c0..dcaa44e408ac 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -88,6 +88,8 @@ arcpct: pct { compatible = "snps,archs-pct"; + interrupt-parent = <&cpu_intc>; + interrupts = <20>; }; /* TIMER0 with interrupt for clockevent */ @@ -208,7 +210,7 @@ reg = <0x8000 0x2000>; interrupts = <10>; interrupt-names = "macirq"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; snps,pbl = <32>; snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; @@ -226,7 +228,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; - phy0: ethernet-phy@0 { + phy0: ethernet-phy@0 { /* Micrel KSZ9031 */ reg = <0>; }; }; diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi index f8be7ba8dad4..c21d0eb07bf6 100644 --- a/arch/arc/boot/dts/vdk_axc003.dtsi +++ b/arch/arc/boot/dts/vdk_axc003.dtsi @@ -46,7 +46,7 @@ }; - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0xe0012000 0x200 >; diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi index 0afa3e53a4e3..4d348853ac7c 100644 --- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi +++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi @@ -54,7 +54,7 @@ }; - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0xe0012000 0x200 >; diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index c77a0e3671ac..0284ace0e1ab 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h @@ -19,7 +19,7 @@ #define R_ARC_32_PCREL 0x31 /*to set parameters in the core dumps */ -#define ELF_ARCH EM_ARCOMPACT +#define ELF_ARCH EM_ARC_INUSE #define ELF_CLASS ELFCLASS32 #ifdef CONFIG_CPU_BIG_ENDIAN diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 0a32e8cfd074..bcd1920ae75a 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -10,6 +10,7 @@ #ifndef __ASSEMBLY__ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) struct vm_area_struct; diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 7addd0301c51..6bdcf9b495b8 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -135,8 +135,10 @@ #ifdef CONFIG_ARC_HAS_PAE40 #define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE) +#define MAX_POSSIBLE_PHYSMEM_BITS 40 #else #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) +#define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif /************************************************************************** diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 72be01270e24..ea74a1eee5d9 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -153,7 +153,6 @@ END(EV_Extension) tracesys: ; save EFA in case tracer wants the PC of traced task ; using ERET won't work since next-PC has already committed - lr r12, [efa] GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address @@ -196,15 +195,9 @@ tracesys_exit: ; Breakpoint TRAP ; --------------------------------------------- trap_with_param: - - ; stop_pc info by gdb needs this info - lr r0, [efa] + mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc mov r1, sp - ; Now that we have read EFA, it is safe to do "fake" rtie - ; and get out of CPU exception mode - FAKE_RET_FROM_EXCPN - ; Save callee regs in case gdb wants to have a look ; SP will grow up by size of CALLEE Reg-File ; NOTE: clobbers r12 @@ -231,6 +224,10 @@ ENTRY(EV_Trap) EXCEPTION_PROLOGUE + lr r12, [efa] + + FAKE_RET_FROM_EXCPN + ;============ TRAP 1 :breakpoints ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR) bmsk.f 0, r10, 7 @@ -238,9 +235,6 @@ ENTRY(EV_Trap) ;============ TRAP (no param): syscall top level - ; First return from Exception to pure K mode (Exception/IRQs renabled) - FAKE_RET_FROM_EXCPN - ; If syscall tracing ongoing, invoke pre-post-hooks GET_CURR_THR_INFO_FLAGS r10 btst r10, TIF_SYSCALL_TRACE diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 661fd842ea97..145722f80c9b 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; struct arc_reg_cc_build cc_bcr; - int i, has_interrupts; + int i, has_interrupts, irq = -1; int counter_size; /* in bits */ union cc_name { @@ -638,22 +638,25 @@ static int arc_pmu_device_probe(struct platform_device *pdev) }; if (has_interrupts) { - int irq = platform_get_irq(pdev, 0); + irq = platform_get_irq(pdev, 0); + if (irq >= 0) { + int ret; - if (irq < 0) { - pr_err("Cannot get IRQ number for the platform\n"); - return -ENODEV; + arc_pmu->irq = irq; + + /* intc map function ensures irq_set_percpu_devid() called */ + ret = request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", + this_cpu_ptr(&arc_pmu_cpu)); + + if (!ret) + on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); + else + irq = -1; } - arc_pmu->irq = irq; + } - /* intc map function ensures irq_set_percpu_devid() called */ - request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", - this_cpu_ptr(&arc_pmu_cpu)); - - on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); - - } else + if (irq == -1) arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; /* diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 7ee89dc61f6e..23dc002aa574 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -409,12 +410,12 @@ static void arc_chk_core_config(void) if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) panic("Linux built with incorrect DCCM Base address\n"); - if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) + if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz) panic("Linux built with incorrect DCCM Size\n"); #endif #ifdef CONFIG_ARC_HAS_ICCM - if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) + if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz) panic("Linux built with incorrect ICCM Size\n"); #endif diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 3d57ed0d8535..404518051093 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, sizeof(sf->uc.uc_mcontext.regs.scratch)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); - return err; + return err ? -EFAULT : 0; } static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) @@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); if (err) - return err; + return -EFAULT; set_current_blocked(&set); regs->bta = uregs.scratch.bta; diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 1e440bbfa876..fc3054c34db1 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -38,15 +38,15 @@ #ifdef CONFIG_ARC_DW2_UNWIND -static void seed_unwind_frame_info(struct task_struct *tsk, - struct pt_regs *regs, - struct unwind_frame_info *frame_info) +static int +seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, + struct unwind_frame_info *frame_info) { /* * synchronous unwinding (e.g. dump_stack) * - uses current values of SP and friends */ - if (tsk == NULL && regs == NULL) { + if (regs == NULL && (tsk == NULL || tsk == current)) { unsigned long fp, sp, blink, ret; frame_info->task = current; @@ -65,11 +65,15 @@ static void seed_unwind_frame_info(struct task_struct *tsk, frame_info->call_frame = 0; } else if (regs == NULL) { /* - * Asynchronous unwinding of sleeping task - * - Gets SP etc from task's pt_regs (saved bottom of kernel - * mode stack of task) + * Asynchronous unwinding of a likely sleeping task + * - first ensure it is actually sleeping + * - if so, it will be in __switch_to, kernel mode SP of task + * is safe-kept and BLINK at a well known location in there */ + if (tsk->state == TASK_RUNNING) + return -1; + frame_info->task = tsk; frame_info->regs.r27 = TSK_K_FP(tsk); @@ -103,6 +107,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk, frame_info->regs.r63 = regs->ret; frame_info->call_frame = 0; } + + return 0; } #endif @@ -112,11 +118,12 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, int (*consumer_fn) (unsigned int, void *), void *arg) { #ifdef CONFIG_ARC_DW2_UNWIND - int ret = 0; + int ret = 0, cnt = 0; unsigned int address; struct unwind_frame_info frame_info; - seed_unwind_frame_info(tsk, regs, &frame_info); + if (seed_unwind_frame_info(tsk, regs, &frame_info)) + return 0; while (1) { address = UNW_PC(&frame_info); @@ -132,6 +139,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, break; frame_info.regs.r63 = frame_info.regs.r31; + + if (cnt++ > 128) { + printk("unwinder looping too long, aborting !\n"); + return 0; + } } return address; /* return the last address it saw */ diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig index a931d0a256d0..a645bca5899a 100644 --- a/arch/arc/plat-eznps/Kconfig +++ b/arch/arc/plat-eznps/Kconfig @@ -6,6 +6,7 @@ menuconfig ARC_PLAT_EZNPS bool "\"EZchip\" ARC dev platform" + depends on ISA_ARCOMPACT select CPU_BIG_ENDIAN select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select EZNPS_GIC diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index a4a61531c7fb..77712c5ffe84 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -33,7 +33,6 @@ #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) -#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index ce8101834518..6b5c54576f54 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK select ARC_HAS_ACCL_REGS select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK + select RESET_CONTROLLER select RESET_HSDK select HAVE_PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 05c9bbfe444d..9aa88715f196 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -507,8 +507,10 @@ config ARCH_S3C24XX select HAVE_S3C2410_WATCHDOG if WATCHDOG select HAVE_S3C_RTC if RTC_CLASS select NEED_MACH_IO_H + select S3C2410_WATCHDOG select SAMSUNG_ATAGS select USE_OF + select WATCHDOG help Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443 and S3C2450 SoCs based systems, such as the Simtec Electronics BAST diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 1483966dcf23..f0b3a9281d69 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -121,9 +121,9 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \ asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. -KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ - sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ - -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) +KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \ + sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \ + -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) # Supply ZRELADDR to the decompressor via a linker symbol. ifneq ($(CONFIG_AUTO_ZRELADDR),y) @@ -165,7 +165,7 @@ $(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S # The .data section is already discarded by the linker script so no need # to bother about it here. check_for_bad_syms = \ -bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ +bad_syms=$$($(NM) $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ [ -z "$$bad_syms" ] || \ ( echo "following symbols must have non local/private scope:" >&2; \ echo "$$bad_syms" >&2; false ) diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 93dffed0ac6e..cbe126297f54 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -1142,9 +1142,9 @@ __armv4_mmu_cache_off: __armv7_mmu_cache_off: mrc p15, 0, r0, c1, c0 #ifdef CONFIG_MMU - bic r0, r0, #0x000d + bic r0, r0, #0x0005 #else - bic r0, r0, #0x000c + bic r0, r0, #0x0004 #endif mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r12, lr diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S index fc7ed03d8b93..51b078604978 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.S +++ b/arch/arm/boot/compressed/vmlinux.lds.S @@ -43,7 +43,7 @@ SECTIONS } .table : ALIGN(4) { _table_start = .; - LONG(ZIMAGE_MAGIC(2)) + LONG(ZIMAGE_MAGIC(4)) LONG(ZIMAGE_MAGIC(0x5a534c4b)) LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) LONG(ZIMAGE_MAGIC(_kernel_bss_size)) diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh index 40937248cebe..304495c3c2c5 100755 --- a/arch/arm/boot/deflate_xip_data.sh +++ b/arch/arm/boot/deflate_xip_data.sh @@ -56,7 +56,7 @@ trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3 # substitute the data section by a compressed version $DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp" $DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes | -gzip -9 >> "$XIPIMAGE.tmp" +$KGZIP -9 >> "$XIPIMAGE.tmp" # replace kernel binary mv -f "$XIPIMAGE.tmp" "$XIPIMAGE" diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts index ff4f919d22f6..abf2badce53d 100644 --- a/arch/arm/boot/dts/am335x-pocketbeagle.dts +++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts @@ -88,7 +88,6 @@ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0) - AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */ >; }; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index fb6b8aa12cc5..77fa7c0f2104 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -40,6 +40,9 @@ ethernet1 = &cpsw_emac1; spi0 = &spi0; spi1 = &spi1; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; }; cpus { diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index 59770dd3785e..bbe15775fccd 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -1576,8 +1576,9 @@ reg-names = "rev"; ti,hwmods = "d_can0"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; - clock-names = "fck"; + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>, + <&dcan0_fck>; + clock-names = "fck", "osc"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0xcc000 0x2000>; @@ -1585,6 +1586,8 @@ dcan0: can@0 { compatible = "ti,am4372-d_can", "ti,am3352-d_can"; reg = <0x0 0x2000>; + clocks = <&dcan0_fck>; + clock-names = "fck"; syscon-raminit = <&scm_conf 0x644 0>; interrupts = ; status = "disabled"; @@ -1597,8 +1600,9 @@ reg-names = "rev"; ti,hwmods = "d_can1"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; - clock-names = "fck"; + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>, + <&dcan1_fck>; + clock-names = "fck", "osc"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0xd0000 0x2000>; @@ -1606,6 +1610,8 @@ dcan1: can@0 { compatible = "ti,am4372-d_can", "ti,am3352-d_can"; reg = <0x0 0x2000>; + clocks = <&dcan1_fck>; + clock-name = "fck"; syscon-raminit = <&scm_conf 0x644 1>; interrupts = ; status = "disabled"; diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index 768b6c5d2129..fde4c302f08e 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -236,6 +236,7 @@ status = "okay"; compatible = "ethernet-phy-id0141.0DD1", "ethernet-phy-ieee802.3-c22"; reg = <1>; + marvell,reg-init = <3 18 0 0x4985>; /* irq is connected to &pcawan pin 7 */ }; diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts index 705adfa8c680..a94758090fb0 100644 --- a/arch/arm/boot/dts/armada-388-helios4.dts +++ b/arch/arm/boot/dts/armada-388-helios4.dts @@ -70,6 +70,9 @@ system-leds { compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&helios_system_led_pins>; + status-led { label = "helios4:green:status"; gpios = <&gpio0 24 GPIO_ACTIVE_LOW>; @@ -86,6 +89,9 @@ io-leds { compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&helios_io_led_pins>; + sata1-led { label = "helios4:green:ata1"; gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; @@ -121,11 +127,15 @@ fan1: j10-pwm { compatible = "pwm-fan"; pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */ + pinctrl-names = "default"; + pinctrl-0 = <&helios_fan1_pins>; }; fan2: j17-pwm { compatible = "pwm-fan"; pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */ + pinctrl-names = "default"; + pinctrl-0 = <&helios_fan2_pins>; }; usb2_phy: usb2-phy { @@ -291,16 +301,22 @@ "mpp39", "mpp40"; marvell,function = "sd0"; }; - helios_led_pins: helios-led-pins { - marvell,pins = "mpp24", "mpp25", - "mpp49", "mpp50", + helios_system_led_pins: helios-system-led-pins { + marvell,pins = "mpp24", "mpp25"; + marvell,function = "gpio"; + }; + helios_io_led_pins: helios-io-led-pins { + marvell,pins = "mpp49", "mpp50", "mpp52", "mpp53", "mpp54"; marvell,function = "gpio"; }; - helios_fan_pins: helios-fan-pins { - marvell,pins = "mpp41", "mpp43", - "mpp48", "mpp55"; + helios_fan1_pins: helios_fan1_pins { + marvell,pins = "mpp41", "mpp43"; + marvell,function = "gpio"; + }; + helios_fan2_pins: helios_fan2_pins { + marvell,pins = "mpp48", "mpp55"; marvell,function = "gpio"; }; microsom_spi1_cs_pins: spi1-cs-pins { diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 3f4bb44d85f0..669da3a33d82 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -339,7 +339,8 @@ comphy: phy@18300 { compatible = "marvell,armada-380-comphy"; - reg = <0x18300 0x100>; + reg-names = "comphy", "conf"; + reg = <0x18300 0x100>, <0x18460 4>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi index 267d0c178e55..30abb4b64a1b 100644 --- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi +++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi @@ -266,11 +266,6 @@ reg = <0x11000 0x100>; }; -&i2c1 { - compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c"; - reg = <0x11100 0x100>; -}; - &mpic { reg = <0x20a00 0x2d0>, <0x21070 0x58>; }; diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts index 682f729ea25e..c58230fea45f 100644 --- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts +++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts @@ -81,11 +81,6 @@ status = "okay"; }; -&vuart { - // VUART Host Console - status = "okay"; -}; - &uart1 { // Host Console status = "okay"; diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts index 22dade6393d0..d1dbe3b6ad5a 100644 --- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts +++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts @@ -22,9 +22,9 @@ #size-cells = <1>; ranges; - vga_memory: framebuffer@7f000000 { + vga_memory: framebuffer@9f000000 { no-map; - reg = <0x7f000000 0x01000000>; + reg = <0x9f000000 0x01000000>; /* 16M */ }; }; diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index dffb595d30e4..679d04d585a4 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -371,6 +371,7 @@ compatible = "aspeed,ast2400-ibt-bmc"; reg = <0xc0 0x18>; interrupts = <8>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi index e8feb8b66a2f..412c96b3c3ac 100644 --- a/arch/arm/boot/dts/aspeed-g5.dtsi +++ b/arch/arm/boot/dts/aspeed-g5.dtsi @@ -464,6 +464,7 @@ compatible = "aspeed,ast2500-ibt-bmc"; reg = <0xc0 0x18>; interrupts = <8>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi index 7788d5db65c2..ae6d07dc0283 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi +++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi @@ -44,8 +44,8 @@ pinctrl-0 = <&pinctrl_macb0_default>; phy-mode = "rmii"; - ethernet-phy@0 { - reg = <0x0>; + ethernet-phy@7 { + reg = <0x7>; interrupt-parent = <&pioA>; interrupts = ; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts index ba7f3e646c26..b8db77b7f5d8 100644 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts @@ -40,7 +40,7 @@ ahb { usb0: gadget@300000 { - atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>; + atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usba_vbus>; status = "okay"; @@ -125,8 +125,6 @@ bus-width = <8>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sdmmc0_default>; - non-removable; - mmc-ddr-1_8v; status = "okay"; }; diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index 61f068a7b362..400eaf640fe4 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -242,6 +242,11 @@ atmel,pins = ; /* PE9, conflicts with A9 */ }; + pinctrl_usb_default: usb_default { + atmel,pins = + ; + }; }; }; }; @@ -259,6 +264,8 @@ &pioE 3 GPIO_ACTIVE_LOW &pioE 4 GPIO_ACTIVE_LOW >; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usb_default>; status = "okay"; }; diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index fdfc37d716e0..1d101067371b 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts @@ -133,6 +133,11 @@ atmel,pins = ; }; + pinctrl_usb_default: usb_default { + atmel,pins = + ; + }; pinctrl_key_gpio: key_gpio_0 { atmel,pins = ; @@ -158,6 +163,8 @@ &pioE 11 GPIO_ACTIVE_HIGH &pioE 14 GPIO_ACTIVE_HIGH >; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usb_default>; status = "okay"; }; diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi index ea024e4b6e09..0121bb0ecde1 100644 --- a/arch/arm/boot/dts/at91sam9rl.dtsi +++ b/arch/arm/boot/dts/at91sam9rl.dtsi @@ -278,23 +278,26 @@ atmel,adc-use-res = "highres"; trigger0 { - trigger-name = "timer-counter-0"; + trigger-name = "external-rising"; trigger-value = <0x1>; + trigger-external; }; + trigger1 { - trigger-name = "timer-counter-1"; - trigger-value = <0x3>; + trigger-name = "external-falling"; + trigger-value = <0x2>; + trigger-external; }; trigger2 { - trigger-name = "timer-counter-2"; - trigger-value = <0x5>; + trigger-name = "external-any"; + trigger-value = <0x3>; + trigger-external; }; trigger3 { - trigger-name = "external"; - trigger-value = <0x13>; - trigger-external; + trigger-name = "continuous"; + trigger-value = <0x6>; }; }; diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index e4d49731287f..dd71ab08136b 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -75,7 +75,7 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; @@ -83,7 +83,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x20600 0x20>; interrupts = ; + IRQ_TYPE_EDGE_RISING)>; clocks = <&periph_clk>; }; @@ -91,7 +91,7 @@ compatible = "arm,cortex-a9-twd-wdt"; reg = <0x20620 0x20>; interrupts = ; + IRQ_TYPE_EDGE_RISING)>; clocks = <&periph_clk>; }; @@ -217,7 +217,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index da6d70f09ef1..8615d89fa469 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -257,10 +257,10 @@ status = "disabled"; }; - mailbox: mailbox@25000 { + mailbox: mailbox@25c00 { compatible = "brcm,iproc-fa2-mbox"; - reg = <0x25000 0x445>; - interrupts = ; + reg = <0x25c00 0x400>; + interrupts = ; #mbox-cells = <1>; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; @@ -282,7 +282,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts index 4c3f606e5b8d..f65448c01e31 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts @@ -24,7 +24,7 @@ leds { act { - gpios = <&gpio 47 GPIO_ACTIVE_HIGH>; + gpios = <&gpio 47 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 90125ce19a1b..50c64146d492 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -488,6 +488,7 @@ "dsi0_ddr2", "dsi0_ddr"; + status = "disabled"; }; thermal: thermal@7e212000 { diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 2d9b4dd05830..0016720ce530 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -488,7 +488,7 @@ }; spi@18029200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x18029200 0x184>, <0x18029000 0x124>, <0x1811b408 0x004>, diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts index 3931fb068ff0..91d1018ab75f 100644 --- a/arch/arm/boot/dts/dm8148-evm.dts +++ b/arch/arm/boot/dts/dm8148-evm.dts @@ -24,12 +24,12 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &davinci_mdio { diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts index 9e43d5ec0bb2..79ccdd4470f4 100644 --- a/arch/arm/boot/dts/dm8148-t410.dts +++ b/arch/arm/boot/dts/dm8148-t410.dts @@ -33,12 +33,12 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &davinci_mdio { diff --git a/arch/arm/boot/dts/dra62x-j5eco-evm.dts b/arch/arm/boot/dts/dra62x-j5eco-evm.dts index 861ab90a3f3a..c16e183822be 100644 --- a/arch/arm/boot/dts/dra62x-j5eco-evm.dts +++ b/arch/arm/boot/dts/dra62x-j5eco-evm.dts @@ -24,12 +24,12 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &davinci_mdio { diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index c6be65249f42..a6ef3d137c7a 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -172,6 +172,7 @@ #address-cells = <1>; ranges = <0x51000000 0x51000000 0x3000 0x0 0x20000000 0x10000000>; + dma-ranges; /** * To enable PCI endpoint mode, disable the pcie1_rc * node and enable pcie1_ep mode. @@ -185,7 +186,6 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x20013000 0x13000 0 0xffed000>; - dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; @@ -230,6 +230,7 @@ #address-cells = <1>; ranges = <0x51800000 0x51800000 0x3000 0x0 0x30000000 0x10000000>; + dma-ranges; status = "disabled"; pcie2_rc: pcie@51800000 { reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>; @@ -240,7 +241,6 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x30013000 0x13000 0 0xffed000>; - dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi index 9f6fbe4c1fee..859e4382ac4b 100644 --- a/arch/arm/boot/dts/dra76x.dtsi +++ b/arch/arm/boot/dts/dra76x.dtsi @@ -32,8 +32,8 @@ interrupts = , ; interrupt-names = "int0", "int1"; - clocks = <&mcan_clk>, <&l3_iclk_div>; - clock-names = "cclk", "hclk"; + clocks = <&l3_iclk_div>, <&mcan_clk>; + clock-names = "hclk", "cclk"; bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; }; }; diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi index dee35e3a5c4b..69d134db6e94 100644 --- a/arch/arm/boot/dts/exynos3250-artik5.dtsi +++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi @@ -75,7 +75,7 @@ s2mps14_pmic@66 { compatible = "samsung,s2mps14-pmic"; interrupt-parent = <&gpx3>; - interrupts = <5 IRQ_TYPE_NONE>; + interrupts = <5 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s2mps14_irq>; reg = <0x66>; diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts index 248bd372fe70..a23a8749c94e 100644 --- a/arch/arm/boot/dts/exynos3250-monk.dts +++ b/arch/arm/boot/dts/exynos3250-monk.dts @@ -195,7 +195,7 @@ s2mps14_pmic@66 { compatible = "samsung,s2mps14-pmic"; interrupt-parent = <&gpx0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; reg = <0x66>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts index 86c26a4edfd7..468932f45289 100644 --- a/arch/arm/boot/dts/exynos3250-rinato.dts +++ b/arch/arm/boot/dts/exynos3250-rinato.dts @@ -260,7 +260,7 @@ s2mps14_pmic@66 { compatible = "samsung,s2mps14-pmic"; interrupt-parent = <&gpx0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; reg = <0x66>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index 09d3d54d09ff..1b5578381d78 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts @@ -115,7 +115,7 @@ gpio-sck = <&gpy3 1 GPIO_ACTIVE_HIGH>; gpio-mosi = <&gpy3 3 GPIO_ACTIVE_HIGH>; num-chipselects = <1>; - cs-gpios = <&gpy4 3 GPIO_ACTIVE_HIGH>; + cs-gpios = <&gpy4 3 GPIO_ACTIVE_LOW>; lcd@0 { compatible = "samsung,ld9040"; @@ -124,8 +124,6 @@ vci-supply = <&ldo17_reg>; reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>; spi-max-frequency = <1200000>; - spi-cpol; - spi-cpha; power-on-delay = <10>; reset-delay = <10>; panel-width-mm = <90>; diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi index ce87d2ff27aa..4b9c4cab0314 100644 --- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi +++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi @@ -68,7 +68,7 @@ i2c_cm36651: i2c-gpio-2 { compatible = "i2c-gpio"; - gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>; + gpios = <&gpf0 0 GPIO_ACTIVE_HIGH>, <&gpf0 1 GPIO_ACTIVE_HIGH>; i2c-gpio,delay-us = <2>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi index 83be3a797411..fedb21377c66 100644 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi @@ -139,7 +139,7 @@ max77693@66 { compatible = "maxim,max77693"; interrupt-parent = <&gpx1>; - interrupts = <5 IRQ_TYPE_EDGE_FALLING>; + interrupts = <5 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77693_irq>; reg = <0x66>; @@ -187,7 +187,7 @@ max77693-fuel-gauge@36 { compatible = "maxim,max17047"; interrupt-parent = <&gpx2>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77693_fuel_irq>; reg = <0x36>; @@ -588,7 +588,7 @@ max77686: max77686_pmic@9 { compatible = "maxim,max77686"; interrupt-parent = <&gpx0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; pinctrl-0 = <&max77686_irq>; pinctrl-names = "default"; reg = <0x09>; diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index ea55f377d17c..424d12ecc901 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -274,7 +274,7 @@ max77686: pmic@9 { compatible = "maxim,max77686"; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; reg = <0x09>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 6dc96948a9cc..70a2b6e2ad3f 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -133,7 +133,7 @@ compatible = "maxim,max77686"; reg = <0x09>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi index c952a615148e..737f0e20a452 100644 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi @@ -292,7 +292,7 @@ max77686: max77686@9 { compatible = "maxim,max77686"; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts index 3d501926c227..2355c5316484 100644 --- a/arch/arm/boot/dts/exynos5250-spring.dts +++ b/arch/arm/boot/dts/exynos5250-spring.dts @@ -108,7 +108,7 @@ compatible = "samsung,s5m8767-pmic"; reg = <0x66>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts index e0db251e253f..f68baaf58f9e 100644 --- a/arch/arm/boot/dts/exynos5410-odroidxu.dts +++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts @@ -327,6 +327,8 @@ regulator-name = "vddq_lcd"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + /* Supplies also GPK and GPJ */ + regulator-always-on; }; ldo8_reg: LDO8 { @@ -637,11 +639,11 @@ }; &usbdrd_dwc3_0 { - dr_mode = "host"; + dr_mode = "peripheral"; }; &usbdrd_dwc3_1 { - dr_mode = "peripheral"; + dr_mode = "host"; }; &usbdrd3_0 { diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi index 369a8a7f2105..481ee99aa9c9 100644 --- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi +++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi @@ -560,6 +560,34 @@ interrupt-controller; #interrupt-cells = <2>; }; + + usb3_1_oc: usb3-1-oc { + samsung,pins = "gpk2-4", "gpk2-5"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; + + usb3_1_vbusctrl: usb3-1-vbusctrl { + samsung,pins = "gpk2-6", "gpk2-7"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; + + usb3_0_oc: usb3-0-oc { + samsung,pins = "gpk3-0", "gpk3-1"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; + + usb3_0_vbusctrl: usb3-0-vbusctrl { + samsung,pins = "gpk3-2", "gpk3-3"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; }; &pinctrl_2 { diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi index e6f78b1cee7c..d077373cf872 100644 --- a/arch/arm/boot/dts/exynos5410.dtsi +++ b/arch/arm/boot/dts/exynos5410.dtsi @@ -398,6 +398,8 @@ &usbdrd3_0 { clocks = <&clock CLK_USBD300>; clock-names = "usbdrd30"; + pinctrl-names = "default"; + pinctrl-0 = <&usb3_0_oc>, <&usb3_0_vbusctrl>; }; &usbdrd_phy0 { @@ -409,6 +411,8 @@ &usbdrd3_1 { clocks = <&clock CLK_USBD301>; clock-names = "usbdrd30"; + pinctrl-names = "default"; + pinctrl-0 = <&usb3_1_oc>, <&usb3_1_vbusctrl>; }; &usbdrd_dwc3_1 { diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts index 592d7b45ecc8..53bf988855e0 100644 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts @@ -349,7 +349,7 @@ reg = <0x66>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_EDGE_FALLING>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s2mps11_irq>; diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi index 829147e320e0..9e64a4ab9494 100644 --- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi @@ -141,7 +141,7 @@ samsung,s2mps11-acokb-ground; interrupt-parent = <&gpx0>; - interrupts = <4 IRQ_TYPE_EDGE_FALLING>; + interrupts = <4 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s2mps11_irq>; diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts index 0cd75dadf292..188639738dc3 100644 --- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts +++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts @@ -75,8 +75,8 @@ imx27-phycard-s-rdk { pinctrl_i2c1: i2c1grp { fsl,pins = < - MX27_PAD_I2C2_SDA__I2C2_SDA 0x0 - MX27_PAD_I2C2_SCL__I2C2_SCL 0x0 + MX27_PAD_I2C_DATA__I2C_DATA 0x0 + MX27_PAD_I2C_CLK__I2C_CLK 0x0 >; }; diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts index a25da415cb02..907339bc81e5 100644 --- a/arch/arm/boot/dts/imx50-evk.dts +++ b/arch/arm/boot/dts/imx50-evk.dts @@ -59,7 +59,7 @@ MX50_PAD_CSPI_MISO__CSPI_MISO 0x00 MX50_PAD_CSPI_MOSI__CSPI_MOSI 0x00 MX50_PAD_CSPI_SS0__GPIO4_11 0xc4 - MX50_PAD_ECSPI1_MOSI__CSPI_SS1 0xf4 + MX50_PAD_ECSPI1_MOSI__GPIO4_13 0x84 >; }; diff --git a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts index 0d594e4bd559..a1173bf5bff5 100644 --- a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts +++ b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts @@ -38,7 +38,7 @@ }; &switch_ports { - /delete-node/ port@2; + /delete-node/ port@3; }; &touchscreen { diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts index 95b8f2d71821..fb0980190aa0 100644 --- a/arch/arm/boot/dts/imx6q-b450v3.dts +++ b/arch/arm/boot/dts/imx6q-b450v3.dts @@ -65,13 +65,6 @@ }; }; -&clks { - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, - <&clks IMX6QDL_CLK_PLL3_USB_OTG>; -}; - &ldb { status = "okay"; diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts index 611cb7ae7e55..8f762d9c5ae9 100644 --- a/arch/arm/boot/dts/imx6q-b650v3.dts +++ b/arch/arm/boot/dts/imx6q-b650v3.dts @@ -65,13 +65,6 @@ }; }; -&clks { - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, - <&clks IMX6QDL_CLK_PLL3_USB_OTG>; -}; - &ldb { status = "okay"; diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts index e4cb118f88c6..1ea64ecf4291 100644 --- a/arch/arm/boot/dts/imx6q-b850v3.dts +++ b/arch/arm/boot/dts/imx6q-b850v3.dts @@ -53,17 +53,6 @@ }; }; -&clks { - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, - <&clks IMX6QDL_CLK_LDB_DI1_SEL>, - <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, - <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>; - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, - <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, - <&clks IMX6QDL_CLK_PLL2_PFD2_396M>, - <&clks IMX6QDL_CLK_PLL2_PFD2_396M>; -}; - &ldb { fsl,dual-channel; status = "okay"; diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi index fa27dcdf06f1..1938b04199c4 100644 --- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi +++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi @@ -377,3 +377,18 @@ #interrupt-cells = <1>; }; }; + +&clks { + assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, + <&clks IMX6QDL_CLK_LDB_DI1_SEL>, + <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, + <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>, + <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>, + <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>; + assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, + <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>; +}; diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index 1a9a9d98f284..14d6fec50dee 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi @@ -273,7 +273,7 @@ /* VDD_AUD_1P8: Audio codec */ reg_aud_1p8v: ldo3 { - regulator-name = "vdd1p8"; + regulator-name = "vdd1p8a"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-boot-on; diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi index c23ba229fd05..8c33510c9519 100644 --- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi @@ -105,19 +105,16 @@ sound-digital { compatible = "simple-audio-card"; simple-audio-card,name = "tda1997x-audio"; + simple-audio-card,format = "i2s"; + simple-audio-card,bitclock-master = <&sound_codec>; + simple-audio-card,frame-master = <&sound_codec>; - simple-audio-card,dai-link@0 { - format = "i2s"; + sound_cpu: simple-audio-card,cpu { + sound-dai = <&ssi1>; + }; - cpu { - sound-dai = <&ssi2>; - }; - - codec { - bitclock-master; - frame-master; - sound-dai = <&hdmi_receiver>; - }; + sound_codec: simple-audio-card,codec { + sound-dai = <&hdmi_receiver>; }; }; }; diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi index 7814f1ef0804..fde56f98398d 100644 --- a/arch/arm/boot/dts/imx6qdl-icore.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi @@ -384,7 +384,7 @@ pinctrl_usbotg: usbotggrp { fsl,pins = < - MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 + MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059 >; }; @@ -396,6 +396,7 @@ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070 MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070 MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070 + MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0 >; }; diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi index 81c7ebb4b3fb..eea317b41020 100644 --- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi @@ -167,7 +167,7 @@ i2c-gpio,delay-us = <2>; /* ~100 kHz */ #address-cells = <1>; #size-cells = <0>; - status = "disabld"; + status = "disabled"; }; i2c_cam: i2c-gpio-cam { @@ -179,7 +179,7 @@ i2c-gpio,delay-us = <2>; /* ~100 kHz */ #address-cells = <1>; #size-cells = <0>; - status = "disabld"; + status = "disabled"; }; }; @@ -551,7 +551,7 @@ pinctrl_i2c3: i2c3grp { fsl,pins = < - MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1 + MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1 MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1 >; }; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index bc43c75f1745..6678b97b1007 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -432,6 +432,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + vmmc-supply = <&vdd_sd1_reg>; status = "disabled"; }; @@ -441,5 +442,6 @@ &pinctrl_usdhc3_cdwp>; cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; + vmmc-supply = <&vdd_sd0_reg>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi index 44a97ba93a95..352ac585ca6b 100644 --- a/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi @@ -153,6 +153,7 @@ bus-width = <4>; keep-power-in-suspend; mmc-pwrseq = <&pwrseq_ti_wifi>; + cap-power-off-card; non-removable; vmmc-supply = <&vcc_3v3>; /* vqmmc-supply = <&nvcc_sd1>; - MMC layer doesn't like it! */ diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi index 776bfc77f89d..16672cbada28 100644 --- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi +++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi @@ -98,7 +98,7 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi index 93909796885a..b9b698f72b26 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi @@ -166,7 +166,6 @@ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030 MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030 - MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1 >; }; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index e6b4b8525f98..bc488df31511 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -1039,9 +1039,8 @@ compatible = "fsl,imx6q-fec"; reg = <0x02188000 0x4000>; interrupt-names = "int0", "pps"; - interrupts-extended = - <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>, - <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, + <0 119 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET_REF>; diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi index 5f51f8e5c1fa..d91f92f944c5 100644 --- a/arch/arm/boot/dts/imx6qp.dtsi +++ b/arch/arm/boot/dts/imx6qp.dtsi @@ -77,7 +77,6 @@ }; &fec { - /delete-property/interrupts-extended; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, <0 119 IRQ_TYPE_LEVEL_HIGH>; }; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 3a96b5538a2a..540880f0413f 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -936,8 +936,10 @@ }; rngb: rngb@21b4000 { + compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb"; reg = <0x021b4000 0x4000>; interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clks IMX6SL_CLK_DUMMY>; }; weim: weim@21b8000 { diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index 315044ccd65f..e4719566133c 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -99,7 +99,7 @@ &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy0>; fsl,magic-packet; status = "okay"; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index f6972deb5e39..865528b134d8 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -213,7 +213,7 @@ &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy2>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index eed78f12e79e..8bba03de51ad 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -346,7 +346,7 @@ &iomuxc { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_gpio1 &pinctrl_gpio2 &pinctrl_gpio3 &pinctrl_gpio4 - &pinctrl_gpio7>; + &pinctrl_gpio7 &pinctrl_usbc_det>; pinctrl_gpio1: gpio1-grp { fsl,pins = < @@ -451,7 +451,6 @@ pinctrl_enet1: enet1grp { fsl,pins = < - MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x14 MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x73 MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x73 MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x73 @@ -649,6 +648,12 @@ >; }; + pinctrl_usbc_det: gpio-usbc-det { + fsl,pins = < + MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x14 + >; + }; + pinctrl_usbh_reg: gpio-usbh-vbus { fsl,pins = < MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14 /* SODIMM 129 USBH PEN */ diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index 3dac6898cdc5..0108b63df77d 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -397,7 +397,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLC>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 0 32>; + gpio-ranges = <&iomuxc1 0 0 20>; }; gpio_ptd: gpio@40af0000 { @@ -411,7 +411,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLD>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 32 32>; + gpio-ranges = <&iomuxc1 0 32 12>; }; gpio_pte: gpio@40b00000 { @@ -425,7 +425,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLE>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 64 32>; + gpio-ranges = <&iomuxc1 0 64 16>; }; gpio_ptf: gpio@40b10000 { @@ -439,7 +439,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLF>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 96 32>; + gpio-ranges = <&iomuxc1 0 96 20>; }; }; diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi index 100396f6c2fe..395e05f10d36 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi @@ -51,6 +51,8 @@ &mcbsp2 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; }; &charger { @@ -102,35 +104,18 @@ regulator-max-microvolt = <3300000>; }; - lcd0: display@0 { - compatible = "panel-dpi"; - label = "28"; - status = "okay"; - /* default-on; */ + lcd0: display { + /* This isn't the exact LCD, but the timings meet spec */ + compatible = "logicpd,type28"; pinctrl-names = "default"; pinctrl-0 = <&lcd_enable_pin>; - enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */ + backlight = <&bl>; + enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; port { lcd_in: endpoint { remote-endpoint = <&dpi_out>; }; }; - - panel-timing { - clock-frequency = <9000000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <3>; - hback-porch = <2>; - hsync-len = <42>; - vback-porch = <3>; - vfront-porch = <2>; - vsync-len = <11>; - hsync-active = <1>; - vsync-active = <1>; - de-active = <1>; - pixelclk-active = <0>; - }; }; bl: backlight { diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi index 449cc7616da6..e7a8f8addb6e 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi @@ -80,6 +80,8 @@ }; &mcbsp2 { + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; status = "okay"; }; diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi index 7b7ec7b1217b..824393e1bcfb 100644 --- a/arch/arm/boot/dts/lpc32xx.dtsi +++ b/arch/arm/boot/dts/lpc32xx.dtsi @@ -329,9 +329,6 @@ clocks = <&xtal_32k>, <&xtal>; clock-names = "xtal_32k", "xtal"; - - assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>; - assigned-clock-rates = <208000000>; }; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 63d9f4a066e3..c62fcca7b426 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -181,7 +181,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0x0 0x1550000 0x0 0x10000>, - <0x0 0x40000000 0x0 0x40000000>; + <0x0 0x40000000 0x0 0x20000000>; reg-names = "QuadSPI", "QuadSPI-memory"; interrupts = ; clock-names = "qspi_en", "qspi"; @@ -753,7 +753,7 @@ fsl,tmr-prsc = <2>; fsl,tmr-add = <0xaaaaaaab>; fsl,tmr-fiper1 = <999999995>; - fsl,tmr-fiper2 = <99990>; + fsl,tmr-fiper2 = <999999995>; fsl,max-adj = <499999999>; fsl,extts-fifo; }; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index db2033f674c6..3efe9d41c2bb 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -230,8 +230,6 @@ , , , - , - , , , , diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts index a24eccc354b9..0f9c71137bed 100644 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts @@ -219,7 +219,7 @@ reg = <0>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts index d54477b1001c..84b6ed51099d 100644 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts @@ -83,7 +83,7 @@ reg = <0>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index 82f7ae030600..ab91c4ebb146 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -13,8 +13,10 @@ #interrupt-cells = <2>; #address-cells = <1>; #size-cells = <0>; - spi-max-frequency = <3000000>; + spi-max-frequency = <9600000>; spi-cs-high; + spi-cpol; + spi-cpha; cpcap_adc: adc { compatible = "motorola,mapphone-cpcap-adc"; diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts index 2b760f90f38c..5375c6699843 100644 --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts @@ -192,6 +192,7 @@ fixed-link { speed = <1000>; full-duplex; + pause; }; }; }; diff --git a/arch/arm/boot/dts/mt7623n-rfb-emmc.dts b/arch/arm/boot/dts/mt7623n-rfb-emmc.dts index b7606130ade9..0447748f9fa0 100644 --- a/arch/arm/boot/dts/mt7623n-rfb-emmc.dts +++ b/arch/arm/boot/dts/mt7623n-rfb-emmc.dts @@ -138,6 +138,7 @@ mac@1 { compatible = "mediatek,eth-mac"; reg = <1>; + phy-mode = "rgmii"; phy-handle = <&phy5>; }; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 7f2ddb78da5f..4227da71cc62 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -105,6 +105,14 @@ linux,code = ; linux,can-disable; }; + + machine_cover { + label = "Machine Cover"; + gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ + linux,input-type = ; + linux,code = ; + linux,can-disable; + }; }; isp1707: isp1707 { @@ -814,10 +822,6 @@ pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; bus-width = <4>; - /* For debugging, it is often good idea to remove this GPIO. - It means you can remove back cover (to reboot by removing - battery) and still use the MMC card. */ - cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ }; /* most boards use vaux3, only some old versions use vmmc2 instead */ diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 4043ecb38016..0c8fcfb292bf 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -23,6 +23,9 @@ i2c0 = &i2c1; i2c1 = &i2c2; i2c2 = &i2c3; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts index 8047e8cdb3af..4548d87534e3 100644 --- a/arch/arm/boot/dts/omap4-duovero-parlor.dts +++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts @@ -139,7 +139,7 @@ ethernet@gpmc { reg = <5 0 0xff>; interrupt-parent = <&gpio2>; - interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */ + interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */ phy-mode = "mii"; diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts index 9dd307b52604..468ad1b64138 100644 --- a/arch/arm/boot/dts/omap4-panda-es.dts +++ b/arch/arm/boot/dts/omap4-panda-es.dts @@ -46,7 +46,7 @@ button_pins: pinmux_button_pins { pinctrl-single,pins = < - OMAP4_IOPAD(0x11b, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */ + OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */ >; }; }; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index e5506ab669fc..0a36b8fe3fa9 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -22,6 +22,11 @@ i2c1 = &i2c2; i2c2 = &i2c3; i2c3 = &i2c4; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; + mmc3 = &mmc4; + mmc4 = &mmc5; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; @@ -328,7 +333,7 @@ status = "disabled"; }; - target-module@56000000 { + sgx_module: target-module@56000000 { compatible = "ti,sysc-omap4", "ti,sysc"; reg = <0x5600fe00 0x4>, <0x5600fe10 0x4>; diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi index cbcdcb4e7d1c..6e320efd9fc1 100644 --- a/arch/arm/boot/dts/omap443x.dtsi +++ b/arch/arm/boot/dts/omap443x.dtsi @@ -33,10 +33,12 @@ }; ocp { + /* 4430 has only gpio_86 tshut and no talert interrupt */ bandgap: bandgap@4a002260 { reg = <0x4a002260 0x4 0x4a00232C 0x4>; compatible = "ti,omap4430-bandgap"; + gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; #thermal-sensor-cells = <0>; }; @@ -74,3 +76,13 @@ }; /include/ "omap443x-clocks.dtsi" + +/* + * Use dpll_per for sgx at 153.6MHz like droid4 stock v3.0.8 Android kernel + */ +&sgx_module { + assigned-clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 24>, + <&dpll_per_m7x2_ck>; + assigned-clock-rates = <0>, <153600000>; + assigned-clock-parents = <&dpll_per_m7x2_ck>; +}; diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi index e9d9c8460682..68ab6a95f222 100644 --- a/arch/arm/boot/dts/omap44xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi @@ -770,14 +770,6 @@ ti,max-div = <2>; }; - sha2md5_fck: sha2md5_fck@15c8 { - #clock-cells = <0>; - compatible = "ti,gate-clock"; - clocks = <&l3_div_ck>; - ti,bit-shift = <1>; - reg = <0x15c8>; - }; - usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 { #clock-cells = <0>; compatible = "ti,gate-clock"; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 041646fabb2d..3b56e993326d 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -25,6 +25,11 @@ i2c2 = &i2c3; i2c3 = &i2c4; i2c4 = &i2c5; + mmc0 = &mmc1; + mmc1 = &mmc2; + mmc2 = &mmc3; + mmc3 = &mmc4; + mmc4 = &mmc5; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi index 5ceb6cc4451d..1dbe4e8b38ac 100644 --- a/arch/arm/boot/dts/owl-s500.dtsi +++ b/arch/arm/boot/dts/owl-s500.dtsi @@ -84,21 +84,21 @@ global_timer: timer@b0020200 { compatible = "arm,cortex-a9-global-timer"; reg = <0xb0020200 0x100>; - interrupts = ; + interrupts = ; status = "disabled"; }; twd_timer: timer@b0020600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0xb0020600 0x20>; - interrupts = ; + interrupts = ; status = "disabled"; }; twd_wdt: wdt@b0020620 { compatible = "arm,cortex-a9-twd-wdt"; reg = <0xb0020620 0xe0>; - interrupts = ; + interrupts = ; status = "disabled"; }; diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi index 5ae860788339..3fcc86d7b735 100644 --- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi +++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi @@ -45,18 +45,21 @@ emac: gem@30000 { compatible = "cadence,gem"; reg = <0x30000 0x10000>; + interrupt-parent = <&vic0>; interrupts = <31>; }; dmac1: dmac@40000 { compatible = "snps,dw-dmac"; reg = <0x40000 0x10000>; + interrupt-parent = <&vic0>; interrupts = <25>; }; dmac2: dmac@50000 { compatible = "snps,dw-dmac"; reg = <0x50000 0x10000>; + interrupt-parent = <&vic0>; interrupts = <26>; }; @@ -234,6 +237,7 @@ axi2pico@c0000000 { compatible = "picochip,axi2pico-pc3x2"; reg = <0xc0000000 0x10000>; + interrupt-parent = <&vic0>; interrupts = <13 14 15 16 17 18 19 20 21>; }; }; diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi index dd865f3c2eda..4447f45f0cba 100644 --- a/arch/arm/boot/dts/r8a73a4.dtsi +++ b/arch/arm/boot/dts/r8a73a4.dtsi @@ -131,7 +131,14 @@ cmt1: timer@e6130000 { compatible = "renesas,r8a73a4-cmt1", "renesas,rcar-gen2-cmt1"; reg = <0 0xe6130000 0 0x1004>; - interrupts = ; + interrupts = , + , + , + , + , + , + , + ; clocks = <&mstp3_clks R8A73A4_CLK_CMT1>; clock-names = "fck"; power-domains = <&pd_c5>; diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi index 12ffe73bf2bc..155f58e6d4e8 100644 --- a/arch/arm/boot/dts/r8a7740.dtsi +++ b/arch/arm/boot/dts/r8a7740.dtsi @@ -479,7 +479,7 @@ cpg_clocks: cpg_clocks@e6150000 { compatible = "renesas,r8a7740-cpg-clocks"; reg = <0xe6150000 0x10000>; - clocks = <&extal1_clk>, <&extalr_clk>; + clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>; #clock-cells = <1>; clock-output-names = "system", "pllc0", "pllc1", "pllc2", "r", diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index de981d629bdd..fdd267819319 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi @@ -338,7 +338,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -348,7 +348,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -357,7 +357,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -367,7 +367,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -376,7 +376,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -386,7 +386,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi index fa74a262107b..8264481bf876 100644 --- a/arch/arm/boot/dts/r8a7744.dtsi +++ b/arch/arm/boot/dts/r8a7744.dtsi @@ -338,7 +338,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -348,7 +348,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -357,7 +357,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -367,7 +367,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -376,7 +376,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -386,7 +386,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi index c53f7ff20695..c306713f2ab7 100644 --- a/arch/arm/boot/dts/r8a7745.dtsi +++ b/arch/arm/boot/dts/r8a7745.dtsi @@ -302,7 +302,7 @@ resets = <&cpg 407>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -312,7 +312,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -321,7 +321,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -331,7 +331,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -340,7 +340,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -350,7 +350,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 5a2747758f67..e3ba00a22eeb 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -427,7 +427,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -437,7 +437,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -446,7 +446,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -456,7 +456,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -465,7 +465,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -475,7 +475,7 @@ status = "disabled"; }; - ipmmu_rt: mmu@ffc80000 { + ipmmu_rt: iommu@ffc80000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xffc80000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 6f875502453c..a26f86ccc579 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -350,7 +350,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -360,7 +360,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -369,7 +369,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -379,7 +379,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -388,7 +388,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -398,7 +398,7 @@ status = "disabled"; }; - ipmmu_rt: mmu@ffc80000 { + ipmmu_rt: iommu@ffc80000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xffc80000 0 0x1000>; @@ -407,7 +407,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts index 42f3313e6988..9f507393c375 100644 --- a/arch/arm/boot/dts/r8a7793-gose.dts +++ b/arch/arm/boot/dts/r8a7793-gose.dts @@ -339,7 +339,7 @@ reg = <0x20>; remote = <&vin1>; - port { + ports { #address-cells = <1>; #size-cells = <0>; @@ -399,7 +399,7 @@ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; default-input = <0>; - port { + ports { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi index bf05110fac4e..fa3839795018 100644 --- a/arch/arm/boot/dts/r8a7793.dtsi +++ b/arch/arm/boot/dts/r8a7793.dtsi @@ -336,7 +336,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -346,7 +346,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -355,7 +355,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -365,7 +365,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -374,7 +374,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -384,7 +384,7 @@ status = "disabled"; }; - ipmmu_rt: mmu@ffc80000 { + ipmmu_rt: iommu@ffc80000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xffc80000 0 0x1000>; @@ -393,7 +393,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi index 8d797d34816e..9dd952479e68 100644 --- a/arch/arm/boot/dts/r8a7794.dtsi +++ b/arch/arm/boot/dts/r8a7794.dtsi @@ -290,7 +290,7 @@ resets = <&cpg 407>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -300,7 +300,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -309,7 +309,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -319,7 +319,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -328,7 +328,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -338,7 +338,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index c776321b2cc4..d282a7b638d8 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -128,7 +128,7 @@ assigned-clocks = <&cru SCLK_GPU>; assigned-clock-rates = <100000000>; clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>; - clock-names = "core", "bus"; + clock-names = "bus", "core"; resets = <&cru SRST_GPU>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts index ad1afd403052..66a0ff196eb1 100644 --- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts +++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts @@ -58,20 +58,25 @@ lvds-encoder { compatible = "ti,sn75lvds83", "lvds-encoder"; - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - lvds_in_vop0: endpoint { - remote-endpoint = <&vop0_out_lvds>; + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + lvds_in_vop0: endpoint { + remote-endpoint = <&vop0_out_lvds>; + }; }; - }; - port@1 { - reg = <1>; - lvds_out_panel: endpoint { - remote-endpoint = <&panel_in_lvds>; + port@1 { + reg = <1>; + + lvds_out_panel: endpoint { + remote-endpoint = <&panel_in_lvds>; + }; }; }; }; @@ -465,7 +470,7 @@ non-removable; pinctrl-names = "default"; pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>; - vmmcq-supply = <&vccio_wl>; + vqmmc-supply = <&vccio_wl>; #address-cells = <1>; #size-cells = <0>; status = "okay"; diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts index 5670b33fd1bd..aed879db6c15 100644 --- a/arch/arm/boot/dts/rk3228-evb.dts +++ b/arch/arm/boot/dts/rk3228-evb.dts @@ -46,7 +46,7 @@ #address-cells = <1>; #size-cells = <0>; - phy: phy@0 { + phy: ethernet-phy@0 { compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; reg = <0>; clocks = <&cru SCLK_MAC_PHY>; diff --git a/arch/arm/boot/dts/rk3229-xms6.dts b/arch/arm/boot/dts/rk3229-xms6.dts index 679fc2b00e5a..933ef69da32a 100644 --- a/arch/arm/boot/dts/rk3229-xms6.dts +++ b/arch/arm/boot/dts/rk3229-xms6.dts @@ -150,7 +150,7 @@ #address-cells = <1>; #size-cells = <0>; - phy: phy@0 { + phy: ethernet-phy@0 { compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; reg = <0>; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index 340ed6ccb08f..6bb78b19c555 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -561,7 +561,7 @@ "pp1", "ppmmu1"; clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>; - clock-names = "core", "bus"; + clock-names = "bus", "core"; resets = <&cru SRST_GPU_A>; status = "disabled"; }; @@ -1033,7 +1033,7 @@ }; }; - spi-0 { + spi0 { spi0_clk: spi0-clk { rockchip,pins = <0 RK_PB1 2 &pcfg_pull_up>; }; @@ -1051,7 +1051,7 @@ }; }; - spi-1 { + spi1 { spi1_clk: spi1-clk { rockchip,pins = <0 RK_PC7 2 &pcfg_pull_up>; }; diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi index 97307a405e60..bce0b05ef7bf 100644 --- a/arch/arm/boot/dts/rk3xxx.dtsi +++ b/arch/arm/boot/dts/rk3xxx.dtsi @@ -84,7 +84,7 @@ compatible = "arm,mali-400"; reg = <0x10090000 0x10000>; clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>; - clock-names = "core", "bus"; + clock-names = "bus", "core"; assigned-clocks = <&cru ACLK_GPU>; assigned-clock-rates = <100000000>; resets = <&cru SRST_GPU>; diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi index 8ff70b856334..d419b77201f7 100644 --- a/arch/arm/boot/dts/s5pv210-aries.dtsi +++ b/arch/arm/boot/dts/s5pv210-aries.dtsi @@ -454,6 +454,7 @@ pinctrl-names = "default"; cap-sd-highspeed; cap-mmc-highspeed; + keep-power-in-suspend; mmc-pwrseq = <&wifi_pwrseq>; non-removable; diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index 2ad642f51fd9..61822afa30ab 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi @@ -52,34 +52,26 @@ }; }; + xxti: oscillator-0 { + compatible = "fixed-clock"; + clock-frequency = <0>; + clock-output-names = "xxti"; + #clock-cells = <0>; + }; + + xusbxti: oscillator-1 { + compatible = "fixed-clock"; + clock-frequency = <0>; + clock-output-names = "xusbxti"; + #clock-cells = <0>; + }; + soc { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; - external-clocks { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <0>; - - xxti: oscillator@0 { - compatible = "fixed-clock"; - reg = <0>; - clock-frequency = <0>; - clock-output-names = "xxti"; - #clock-cells = <0>; - }; - - xusbxti: oscillator@1 { - compatible = "fixed-clock"; - reg = <1>; - clock-frequency = <0>; - clock-output-names = "xusbxti"; - #clock-cells = <0>; - }; - }; - onenand: onenand@b0600000 { compatible = "samsung,s5pv210-onenand"; reg = <0xb0600000 0x2000>, @@ -100,19 +92,16 @@ }; clocks: clock-controller@e0100000 { - compatible = "samsung,s5pv210-clock", "simple-bus"; + compatible = "samsung,s5pv210-clock"; reg = <0xe0100000 0x10000>; clock-names = "xxti", "xusbxti"; clocks = <&xxti>, <&xusbxti>; #clock-cells = <1>; - #address-cells = <1>; - #size-cells = <1>; - ranges; + }; - pmu_syscon: syscon@e0108000 { - compatible = "samsung-s5pv210-pmu", "syscon"; - reg = <0xe0108000 0x8000>; - }; + pmu_syscon: syscon@e0108000 { + compatible = "samsung-s5pv210-pmu", "syscon"; + reg = <0xe0108000 0x8000>; }; pinctrl0: pinctrl@e0200000 { @@ -128,35 +117,28 @@ }; }; - amba { - #address-cells = <1>; - #size-cells = <1>; - compatible = "simple-bus"; - ranges; + pdma0: dma@e0900000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0xe0900000 0x1000>; + interrupt-parent = <&vic0>; + interrupts = <19>; + clocks = <&clocks CLK_PDMA0>; + clock-names = "apb_pclk"; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; + }; - pdma0: dma@e0900000 { - compatible = "arm,pl330", "arm,primecell"; - reg = <0xe0900000 0x1000>; - interrupt-parent = <&vic0>; - interrupts = <19>; - clocks = <&clocks CLK_PDMA0>; - clock-names = "apb_pclk"; - #dma-cells = <1>; - #dma-channels = <8>; - #dma-requests = <32>; - }; - - pdma1: dma@e0a00000 { - compatible = "arm,pl330", "arm,primecell"; - reg = <0xe0a00000 0x1000>; - interrupt-parent = <&vic0>; - interrupts = <20>; - clocks = <&clocks CLK_PDMA1>; - clock-names = "apb_pclk"; - #dma-cells = <1>; - #dma-channels = <8>; - #dma-requests = <32>; - }; + pdma1: dma@e0a00000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0xe0a00000 0x1000>; + interrupt-parent = <&vic0>; + interrupts = <20>; + clocks = <&clocks CLK_PDMA1>; + clock-names = "apb_pclk"; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; spi0: spi@e1300000 { @@ -229,43 +211,36 @@ status = "disabled"; }; - audio-subsystem { - compatible = "samsung,s5pv210-audss", "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges; + clk_audss: clock-controller@eee10000 { + compatible = "samsung,s5pv210-audss-clock"; + reg = <0xeee10000 0x1000>; + clock-names = "hclk", "xxti", + "fout_epll", + "sclk_audio0"; + clocks = <&clocks DOUT_HCLKP>, <&xxti>, + <&clocks FOUT_EPLL>, + <&clocks SCLK_AUDIO0>; + #clock-cells = <1>; + }; - clk_audss: clock-controller@eee10000 { - compatible = "samsung,s5pv210-audss-clock"; - reg = <0xeee10000 0x1000>; - clock-names = "hclk", "xxti", - "fout_epll", - "sclk_audio0"; - clocks = <&clocks DOUT_HCLKP>, <&xxti>, - <&clocks FOUT_EPLL>, - <&clocks SCLK_AUDIO0>; - #clock-cells = <1>; - }; - - i2s0: i2s@eee30000 { - compatible = "samsung,s5pv210-i2s"; - reg = <0xeee30000 0x1000>; - interrupt-parent = <&vic2>; - interrupts = <16>; - dma-names = "rx", "tx", "tx-sec"; - dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; - clock-names = "iis", - "i2s_opclk0", - "i2s_opclk1"; - clocks = <&clk_audss CLK_I2S>, - <&clk_audss CLK_I2S>, - <&clk_audss CLK_DOUT_AUD_BUS>; - samsung,idma-addr = <0xc0010000>; - pinctrl-names = "default"; - pinctrl-0 = <&i2s0_bus>; - #sound-dai-cells = <0>; - status = "disabled"; - }; + i2s0: i2s@eee30000 { + compatible = "samsung,s5pv210-i2s"; + reg = <0xeee30000 0x1000>; + interrupt-parent = <&vic2>; + interrupts = <16>; + dma-names = "rx", "tx", "tx-sec"; + dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; + clock-names = "iis", + "i2s_opclk0", + "i2s_opclk1"; + clocks = <&clk_audss CLK_I2S>, + <&clk_audss CLK_I2S>, + <&clk_audss CLK_DOUT_AUD_BUS>; + samsung,idma-addr = <0xc0010000>; + pinctrl-names = "default"; + pinctrl-0 = <&i2s0_bus>; + #sound-dai-cells = <0>; + status = "disabled"; }; i2s1: i2s@e2100000 { diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 2e2c1a7b1d1d..b05bab57f90a 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -648,6 +648,7 @@ clocks = <&pmc PMC_TYPE_PERIPHERAL 51>; #address-cells = <1>; #size-cells = <1>; + no-memory-wc; ranges = <0 0xf8044000 0x1420>; }; @@ -716,7 +717,7 @@ can0: can@f8054000 { compatible = "bosch,m_can"; - reg = <0xf8054000 0x4000>, <0x210000 0x4000>; + reg = <0xf8054000 0x4000>, <0x210000 0x1c00>; reg-names = "m_can", "message_ram"; interrupts = <56 IRQ_TYPE_LEVEL_HIGH 7>, <64 IRQ_TYPE_LEVEL_HIGH 7>; @@ -938,7 +939,7 @@ can1: can@fc050000 { compatible = "bosch,m_can"; - reg = <0xfc050000 0x4000>, <0x210000 0x4000>; + reg = <0xfc050000 0x4000>, <0x210000 0x3800>; reg-names = "m_can", "message_ram"; interrupts = <57 IRQ_TYPE_LEVEL_HIGH 7>, <65 IRQ_TYPE_LEVEL_HIGH 7>; @@ -948,7 +949,7 @@ assigned-clocks = <&pmc PMC_TYPE_GCK 57>; assigned-clock-parents = <&pmc PMC_TYPE_CORE PMC_UTMI>; assigned-clock-rates = <40000000>; - bosch,mram-cfg = <0x1100 0 0 64 0 0 32 32>; + bosch,mram-cfg = <0x1c00 0 0 64 0 0 32 32>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 4f3993cc0227..451030897220 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -710,7 +710,7 @@ }; }; - L2: l2-cache@fffef000 { + L2: cache-controller@fffef000 { compatible = "arm,pl310-cache"; reg = <0xfffef000 0x1000>; interrupts = <0 38 0x04>; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index 2a86e72d9791..f261a3344071 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -636,7 +636,7 @@ reg = <0xffcfb100 0x80>; }; - L2: l2-cache@fffff000 { + L2: cache-controller@fffff000 { compatible = "arm,pl310-cache"; reg = <0xfffff000 0x1000>; interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>; @@ -819,7 +819,7 @@ timer3: timer3@ffd00100 { compatible = "snps,dw-apb-timer"; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; - reg = <0xffd01000 0x100>; + reg = <0xffd00100 0x100>; clocks = <&l4_sys_free_clk>; clock-names = "timer"; resets = <&rst L4SYSTIMER1_RESET>; diff --git a/arch/arm/boot/dts/stm32mp157a-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-avenger96.dts index 2e4742c53d04..7b8c3f25861c 100644 --- a/arch/arm/boot/dts/stm32mp157a-avenger96.dts +++ b/arch/arm/boot/dts/stm32mp157a-avenger96.dts @@ -91,6 +91,9 @@ #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; + reset-gpios = <&gpioz 2 GPIO_ACTIVE_LOW>; + reset-delay-us = <1000>; + phy0: ethernet-phy@7 { reg = <7>; }; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 4c268b70b735..2265ca24c0c7 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -143,7 +143,7 @@ trips { cpu_alert0: cpu-alert0 { /* milliCelsius */ - temperature = <850000>; + temperature = <85000>; hysteresis = <2000>; type = "passive"; }; @@ -198,7 +198,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index 6befa236ba99..fd31da8fd311 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -117,7 +117,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts index 049e6ab3cf56..73de34ae37fd 100644 --- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts +++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts @@ -154,7 +154,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts index 32d5d45a35c0..8945dbb114a2 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts @@ -130,7 +130,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_gmac_3v3>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts index bb3987e101c2..0b3d9ae75650 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts @@ -132,7 +132,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_gmac_3v3>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts index 01ccff756996..5740f9442705 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts @@ -110,7 +110,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_gmac_3v3>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts index 8c8dee6ea461..9109ca0919ad 100644 --- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts +++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts @@ -151,7 +151,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts index fce2f7fcd084..bf38c66c1815 100644 --- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts +++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts @@ -1,5 +1,5 @@ /* - * Copyright 2015 Adam Sampson + * Copyright 2015-2020 Adam Sampson * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual @@ -115,7 +115,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 8aebefd6accf..1f8b45f07e58 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -180,7 +180,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts index 9d34eabba121..431f70234d36 100644 --- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts @@ -131,7 +131,7 @@ pinctrl-0 = <&emac_rgmii_pins>; phy-supply = <®_sw>; phy-handle = <&rgmii_phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; allwinner,rx-delay-ps = <700>; allwinner,tx-delay-ps = <700>; status = "okay"; diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts index d9be511f054f..d8326a5c681d 100644 --- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts +++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts @@ -183,7 +183,7 @@ pinctrl-0 = <&emac_rgmii_pins>; phy-supply = <®_dldo4>; phy-handle = <&rgmii_phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts index 397140454132..6bf93e5ed681 100644 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts @@ -358,8 +358,8 @@ }; ®_dldo3 { - regulator-min-microvolt = <2800000>; - regulator-max-microvolt = <2800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-name = "vdd-csi"; }; diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi index 74bb053cf23c..4e485e45fbc5 100644 --- a/arch/arm/boot/dts/sun8i-a83t.dtsi +++ b/arch/arm/boot/dts/sun8i-a83t.dtsi @@ -313,7 +313,7 @@ display_clocks: clock@1000000 { compatible = "allwinner,sun8i-a83t-de2-clk"; - reg = <0x01000000 0x100000>; + reg = <0x01000000 0x10000>; clocks = <&ccu CLK_BUS_DE>, <&ccu CLK_PLL_DE>; clock-names = "bus", diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts index d277d043031b..4c6704e4c57e 100644 --- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts +++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts @@ -31,7 +31,7 @@ pwr_led { label = "bananapi-m2-zero:red:pwr"; - gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */ + gpios = <&r_pio 0 10 GPIO_ACTIVE_LOW>; /* PL10 */ default-state = "on"; }; }; diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts index 71fb73208939..babf4cf1b2f6 100644 --- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts +++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts @@ -53,11 +53,6 @@ }; }; -&emac { - /* LEDs changed to active high on the plus */ - /delete-property/ allwinner,leds-active-low; -}; - &mmc1 { vmmc-supply = <®_vcc3v3>; bus-width = <4>; diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts index 6dbf7b2e0c13..b6ca45d18e51 100644 --- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts +++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts @@ -67,7 +67,7 @@ pinctrl-0 = <&emac_rgmii_pins>; phy-supply = <®_gmac_3v3>; phy-handle = <&ext_rgmii_phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts index 42d62d1ba1dc..7db89500f399 100644 --- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts +++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts @@ -129,7 +129,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_dc1sw>; status = "okay"; }; @@ -223,16 +223,16 @@ }; ®_dc1sw { - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-name = "vcc-gmac-phy"; }; ®_dcdc1 { regulator-always-on; - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; - regulator-name = "vcc-3v0"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; }; ®_dcdc2 { diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi index 80f4dc34df34..339402601990 100644 --- a/arch/arm/boot/dts/sun8i-r40.dtsi +++ b/arch/arm/boot/dts/sun8i-r40.dtsi @@ -118,7 +118,7 @@ display_clocks: clock@1000000 { compatible = "allwinner,sun8i-r40-de2-clk", "allwinner,sun8i-h3-de2-clk"; - reg = <0x01000000 0x100000>; + reg = <0x01000000 0x10000>; clocks = <&ccu CLK_BUS_DE>, <&ccu CLK_DE>; clock-names = "bus", diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi index 23ba56df38f7..50c32cf72c65 100644 --- a/arch/arm/boot/dts/sun8i-v3s.dtsi +++ b/arch/arm/boot/dts/sun8i-v3s.dtsi @@ -105,7 +105,7 @@ display_clocks: clock@1000000 { compatible = "allwinner,sun8i-v3s-de2-clk"; - reg = <0x01000000 0x100000>; + reg = <0x01000000 0x10000>; clocks = <&ccu CLK_BUS_DE>, <&ccu CLK_DE>; clock-names = "bus", @@ -423,7 +423,7 @@ gic: interrupt-controller@1c81000 { compatible = "arm,gic-400"; reg = <0x01c81000 0x1000>, - <0x01c82000 0x1000>, + <0x01c82000 0x2000>, <0x01c84000 0x2000>, <0x01c86000 0x2000>; interrupt-controller; diff --git a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts index 15c22b06fc4b..47954551f573 100644 --- a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts +++ b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts @@ -120,7 +120,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_dc1sw>; status = "okay"; }; @@ -198,16 +198,16 @@ }; ®_dc1sw { - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-name = "vcc-gmac-phy"; }; ®_dcdc1 { regulator-always-on; - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; - regulator-name = "vcc-3v0"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; }; ®_dcdc2 { diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts index d3b337b043a1..484b93df20cb 100644 --- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts +++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts @@ -129,7 +129,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_cldo1>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts index bbc6335e5631..5c3580d712e4 100644 --- a/arch/arm/boot/dts/sun9i-a80-optimus.dts +++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts @@ -124,7 +124,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_cldo1>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi index 22466afd38a3..235994a4a2eb 100644 --- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi +++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi @@ -16,15 +16,27 @@ regulator-type = "voltage"; regulator-boot-on; regulator-always-on; - regulator-min-microvolt = <1100000>; - regulator-max-microvolt = <1300000>; + regulator-min-microvolt = <1108475>; + regulator-max-microvolt = <1308475>; regulator-ramp-delay = <50>; /* 4ms */ gpios = <&r_pio 0 1 GPIO_ACTIVE_HIGH>; /* PL1 */ gpios-states = <0x1>; - states = <1100000 0>, <1300000 1>; + states = <1108475 0>, <1308475 1>; }; }; &cpu0 { cpu-supply = <®_vdd_cpux>; }; + +&cpu1 { + cpu-supply = <®_vdd_cpux>; +}; + +&cpu2 { + cpu-supply = <®_vdd_cpux>; +}; + +&cpu3 { + cpu-supply = <®_vdd_cpux>; +}; diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi index 39263e74fbb5..8e5cb3b3fd68 100644 --- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi +++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi @@ -126,7 +126,7 @@ pinctrl-0 = <&emac_rgmii_pins>; phy-supply = <®_gmac_3v3>; phy-handle = <&ext_rgmii_phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index 107eeafad20a..b3141c964c3a 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi @@ -113,7 +113,7 @@ display_clocks: clock@1000000 { /* compatible is in per SoC .dtsi file */ - reg = <0x01000000 0x100000>; + reg = <0x01000000 0x10000>; clocks = <&ccu CLK_BUS_DE>, <&ccu CLK_DE>; clock-names = "bus", diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi index 4eddbb8d7fca..60a588ce45e1 100644 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi @@ -571,7 +571,7 @@ clocks = <&sys_clk 6>; reset-names = "ether"; resets = <&sys_rst 6>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 0>; diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi index dfae90adbb7c..ce64bfb22f22 100644 --- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi @@ -31,7 +31,7 @@ #interrupt-cells = <1>; ranges; - nor_flash: flash@0,00000000 { + nor_flash: flash@0 { compatible = "arm,vexpress-flash", "cfi-flash"; reg = <0 0x00000000 0x04000000>, <4 0x00000000 0x04000000>; @@ -41,13 +41,13 @@ }; }; - psram@1,00000000 { + psram@100000000 { compatible = "arm,vexpress-psram", "mtd-ram"; reg = <1 0x00000000 0x02000000>; bank-width = <4>; }; - ethernet@2,02000000 { + ethernet@202000000 { compatible = "smsc,lan9118", "smsc,lan9115"; reg = <2 0x02000000 0x10000>; interrupts = <15>; @@ -59,14 +59,14 @@ vddvario-supply = <&v2m_fixed_3v3>; }; - usb@2,03000000 { + usb@203000000 { compatible = "nxp,usb-isp1761"; reg = <2 0x03000000 0x20000>; interrupts = <16>; port1-otg; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index 028e0ec30e0c..fa248066d9d9 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi @@ -495,7 +495,7 @@ }; ocotp: ocotp@400a5000 { - compatible = "fsl,vf610-ocotp"; + compatible = "fsl,vf610-ocotp", "syscon"; reg = <0x400a5000 0x1000>; clocks = <&clks VF610_CLK_OCOTP>; }; diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig index 3b82b64950d9..c090643b1ecb 100644 --- a/arch/arm/configs/rpc_defconfig +++ b/arch/arm/configs/rpc_defconfig @@ -32,7 +32,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 73ed73a8785a..153009130dab 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -202,7 +202,6 @@ CONFIG_EEPROM_AT24=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_CONSTANTS=y diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S index 4d1707388d94..312428d83eed 100644 --- a/arch/arm/crypto/aes-ce-core.S +++ b/arch/arm/crypto/aes-ce-core.S @@ -386,20 +386,32 @@ ENTRY(ce_aes_ctr_encrypt) .Lctrloop4x: subs r4, r4, #4 bmi .Lctr1x - add r6, r6, #1 + + /* + * NOTE: the sequence below has been carefully tweaked to avoid + * a silicon erratum that exists in Cortex-A57 (#1742098) and + * Cortex-A72 (#1655431) cores, where AESE/AESMC instruction pairs + * may produce an incorrect result if they take their input from a + * register of which a single 32-bit lane has been updated the last + * time it was modified. To work around this, the lanes of registers + * q0-q3 below are not manipulated individually, and the different + * counter values are prepared by successive manipulations of q7. + */ + add ip, r6, #1 vmov q0, q7 + rev ip, ip + add lr, r6, #2 + vmov s31, ip @ set lane 3 of q1 via q7 + add ip, r6, #3 + rev lr, lr vmov q1, q7 - rev ip, r6 - add r6, r6, #1 + vmov s31, lr @ set lane 3 of q2 via q7 + rev ip, ip vmov q2, q7 - vmov s7, ip - rev ip, r6 - add r6, r6, #1 + vmov s31, ip @ set lane 3 of q3 via q7 + add r6, r6, #4 vmov q3, q7 - vmov s11, ip - rev ip, r6 - add r6, r6, #1 - vmov s15, ip + vld1.8 {q4-q5}, [r1]! vld1.8 {q6}, [r1]! vld1.8 {q15}, [r1]! diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c index ae5aefc44a4d..ffa8d73fe722 100644 --- a/arch/arm/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm/crypto/nhpoly1305-neon-glue.c @@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, return crypto_nhpoly1305_update(desc, src, srclen); do { - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_neon_begin(); crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 99929122dad7..3546d294d55f 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -18,11 +18,11 @@ #endif #include -#include #include #include #include #include +#include #define IOMEM(x) (x) @@ -446,79 +446,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .size \name , . - \name .endm - .macro csdb -#ifdef CONFIG_THUMB2_KERNEL - .inst.w 0xf3af8014 -#else - .inst 0xe320f014 -#endif - .endm - - .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req -#ifndef CONFIG_CPU_USE_DOMAINS - adds \tmp, \addr, #\size - 1 - sbcscc \tmp, \tmp, \limit - bcs \bad -#ifdef CONFIG_CPU_SPECTRE - movcs \addr, #0 - csdb -#endif -#endif - .endm - - .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req -#ifdef CONFIG_CPU_SPECTRE - sub \tmp, \limit, #1 - subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr - addhs \tmp, \tmp, #1 @ if (tmp >= 0) { - subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } - movlo \addr, #0 @ if (tmp < 0) addr = NULL - csdb -#endif - .endm - - .macro uaccess_disable, tmp, isb=1 -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - /* - * Whenever we re-enter userspace, the domains should always be - * set appropriately. - */ - mov \tmp, #DACR_UACCESS_DISABLE - mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register - .if \isb - instr_sync - .endif -#endif - .endm - - .macro uaccess_enable, tmp, isb=1 -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - /* - * Whenever we re-enter userspace, the domains should always be - * set appropriately. - */ - mov \tmp, #DACR_UACCESS_ENABLE - mcr p15, 0, \tmp, c3, c0, 0 - .if \isb - instr_sync - .endif -#endif - .endm - - .macro uaccess_save, tmp -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - mrc p15, 0, \tmp, c3, c0, 0 - str \tmp, [sp, #SVC_DACR] -#endif - .endm - - .macro uaccess_restore -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r0, [sp, #SVC_DACR] - mcr p15, 0, r0, c3, c0, 0 -#endif - .endm - .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h index 0b350a7e26f3..afb7a59828fe 100644 --- a/arch/arm/include/asm/clocksource.h +++ b/arch/arm/include/asm/clocksource.h @@ -1,8 +1,17 @@ #ifndef _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H +enum vdso_arch_clockmode { + /* vdso clocksource not usable */ + VDSO_CLOCKMODE_NONE, + /* vdso clocksource usable */ + VDSO_CLOCKMODE_ARCHTIMER, + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT = VDSO_CLOCKMODE_ARCHTIMER, +}; + struct arch_clocksource_data { - bool vdso_direct; /* Usable for direct VDSO access? */ + /* Usable for direct VDSO access? */ + enum vdso_arch_clockmode clock_mode; }; #endif diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 83c391b597d4..fdc4ae3e7378 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -164,8 +164,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) preempt_enable(); #endif - if (!ret) - *oval = oldval; + /* + * Store unconditionally. If ret != 0 the extra store is the least + * of the worries but GCC cannot figure out that __futex_atomic_op() + * is either setting ret to -EFAULT or storing the old value in + * oldval which results in a uninitialized warning at the call site. + */ + *oval = oldval; return ret; } diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h new file mode 100644 index 000000000000..ecc2322db7aa --- /dev/null +++ b/arch/arm/include/asm/kexec-internal.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ARM_KEXEC_INTERNAL_H +#define _ARM_KEXEC_INTERNAL_H + +struct kexec_relocate_data { + unsigned long kexec_start_address; + unsigned long kexec_indirection_page; + unsigned long kexec_mach_type; + unsigned long kexec_r2; +}; + +#endif diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 213607a1f45c..e26a278d301a 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -44,20 +44,20 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); /* optinsn template addresses */ -extern __visible kprobe_opcode_t optprobe_template_entry; -extern __visible kprobe_opcode_t optprobe_template_val; -extern __visible kprobe_opcode_t optprobe_template_call; -extern __visible kprobe_opcode_t optprobe_template_end; -extern __visible kprobe_opcode_t optprobe_template_sub_sp; -extern __visible kprobe_opcode_t optprobe_template_add_sp; -extern __visible kprobe_opcode_t optprobe_template_restore_begin; -extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn; -extern __visible kprobe_opcode_t optprobe_template_restore_end; +extern __visible kprobe_opcode_t optprobe_template_entry[]; +extern __visible kprobe_opcode_t optprobe_template_val[]; +extern __visible kprobe_opcode_t optprobe_template_call[]; +extern __visible kprobe_opcode_t optprobe_template_end[]; +extern __visible kprobe_opcode_t optprobe_template_sub_sp[]; +extern __visible kprobe_opcode_t optprobe_template_add_sp[]; +extern __visible kprobe_opcode_t optprobe_template_restore_begin[]; +extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[]; +extern __visible kprobe_opcode_t optprobe_template_restore_end[]; #define MAX_OPTIMIZED_LENGTH 4 #define MAX_OPTINSN_SIZE \ - ((unsigned long)&optprobe_template_end - \ - (unsigned long)&optprobe_template_entry) + ((unsigned long)optprobe_template_end - \ + (unsigned long)optprobe_template_entry) #define RELATIVEJUMP_SIZE 4 struct arch_optimized_insn { diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index f615830f9f57..9d0b7e677faa 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -56,7 +56,7 @@ extern char __kvm_hyp_init_end[]; extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8e995ec796c8..c1747fcb86d3 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -204,7 +204,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } @@ -236,16 +236,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_IL; } -static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; } -static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) { return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; } +static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); +} + static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; @@ -363,6 +368,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, } } -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} +static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; } +static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { } #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 8a37c8e89777..dd03d5e01a94 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -266,7 +266,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); @@ -421,4 +421,6 @@ static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) return true; } +#define kvm_arm_vcpu_loaded(vcpu) (false) + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h index 529d2cf4d06f..e9cb87cd37c2 100644 --- a/arch/arm/include/asm/mcs_spinlock.h +++ b/arch/arm/include/asm/mcs_spinlock.h @@ -6,7 +6,7 @@ #include /* MCS spin-locking. */ -#define arch_mcs_spin_lock_contended(lock) \ +#define arch_mcs_spin_wait(lock) \ do { \ /* Ensure prior stores are observed before we enter wfe. */ \ smp_mb(); \ @@ -14,9 +14,9 @@ do { \ wfe(); \ } while (0) \ -#define arch_mcs_spin_unlock_contended(lock) \ +#define arch_mcs_lock_handoff(lock, val) \ do { \ - smp_store_release(lock, 1); \ + smp_store_release((lock), (val)); \ dsb_sev(); \ } while (0) diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index f44f448537f2..1a3eedbac4a2 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -5,6 +5,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 51beec41d48c..50b51ac91fcb 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -75,6 +75,8 @@ #define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) +#define MAX_POSSIBLE_PHYSMEM_BITS 32 + /* * PMD_SHIFT determines the size of the area a second-level page table can map * PGDIR_SHIFT determines what a third-level page table entry can map diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 5b18295021a0..8006a56cc2ce 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -25,6 +25,8 @@ #define PTE_HWTABLE_OFF (0) #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) +#define MAX_POSSIBLE_PHYSMEM_BITS 40 + /* * PGDIR_SHIFT determines the size a top-level page table entry can map. */ diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h new file mode 100644 index 000000000000..907571fd05c6 --- /dev/null +++ b/arch/arm/include/asm/uaccess-asm.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_UACCESS_ASM_H__ +#define __ASM_UACCESS_ASM_H__ + +#include +#include +#include +#include + + .macro csdb +#ifdef CONFIG_THUMB2_KERNEL + .inst.w 0xf3af8014 +#else + .inst 0xe320f014 +#endif + .endm + + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcscc \tmp, \tmp, \limit + bcs \bad +#ifdef CONFIG_CPU_SPECTRE + movcs \addr, #0 + csdb +#endif +#endif + .endm + + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req +#ifdef CONFIG_CPU_SPECTRE + sub \tmp, \limit, #1 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { + subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } + movlo \addr, #0 @ if (tmp < 0) addr = NULL + csdb +#endif + .endm + + .macro uaccess_disable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_enable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_ENABLE + mcr p15, 0, \tmp, c3, c0, 0 + .if \isb + instr_sync + .endif +#endif + .endm + +#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS) +#define DACR(x...) x +#else +#define DACR(x...) +#endif + + /* + * Save the address limit on entry to a privileged exception. + * + * If we are using the DACR for kernel access by the user accessors + * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain + * back to client mode, whether or not \disable is set. + * + * If we are using SW PAN, set the DACR user domain to no access + * if \disable is set. + */ + .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable + ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] + mov \tmp2, #TASK_SIZE + str \tmp2, [\tsk, #TI_ADDR_LIMIT] + DACR( mrc p15, 0, \tmp0, c3, c0, 0) + DACR( str \tmp0, [sp, #SVC_DACR]) + str \tmp1, [sp, #SVC_ADDR_LIMIT] + .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) + /* kernel=client, user=no access */ + mov \tmp2, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp2, c3, c0, 0 + instr_sync + .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS) + /* kernel=client */ + bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL) + orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) + mcr p15, 0, \tmp2, c3, c0, 0 + instr_sync + .endif + .endm + + /* Restore the user access state previously saved by uaccess_entry */ + .macro uaccess_exit, tsk, tmp0, tmp1 + ldr \tmp1, [sp, #SVC_ADDR_LIMIT] + DACR( ldr \tmp0, [sp, #SVC_DACR]) + str \tmp1, [\tsk, #TI_ADDR_LIMIT] + DACR( mcr p15, 0, \tmp0, c3, c0, 0) + .endm + +#undef DACR + +#endif /* __ASM_UACCESS_ASM_H__ */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c773b829ee8e..bfb05c93494d 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -15,6 +15,7 @@ #include #endif #include +#include #include #include #include @@ -190,5 +191,9 @@ int main(void) DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar)); DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar)); #endif + DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address)); + DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page)); + DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type)); + DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2)); return 0; } diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index 182422981386..7eef1891d0c4 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -78,13 +78,32 @@ void elf_set_personality(const struct elf32_hdr *x) EXPORT_SYMBOL(elf_set_personality); /* - * Set READ_IMPLIES_EXEC if: - * - the binary requires an executable stack - * - we're running on a CPU which doesn't support NX. + * An executable for which elf_read_implies_exec() returns TRUE will + * have the READ_IMPLIES_EXEC personality flag set automatically. + * + * The decision process for determining the results are: + * + * CPU: | lacks NX* | has NX | + * ELF: | | | + * ---------------------|------------|------------| + * missing PT_GNU_STACK | exec-all | exec-all | + * PT_GNU_STACK == RWX | exec-all | exec-stack | + * PT_GNU_STACK == RW | exec-all | exec-none | + * + * exec-all : all PROT_READ user mappings are executable, except when + * backed by files on a noexec-filesystem. + * exec-none : only PROT_EXEC user mappings are executable. + * exec-stack: only the stack and PROT_EXEC user mappings are executable. + * + * *this column has no architectural effect: NX markings are ignored by + * hardware, but may have behavioral effects when "wants X" collides with + * "cannot be X" constraints in memory permission flags, as in + * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com + * */ int arm_elf_read_implies_exec(int executable_stack) { - if (executable_stack != EXSTACK_DISABLE_X) + if (executable_stack == EXSTACK_DEFAULT) return 1; if (cpu_architecture() < CPU_ARCH_ARMv6) return 1; diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 858d4e541532..b62d74a2c73a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -27,6 +27,7 @@ #include #include #include +#include #include "entry-header.S" #include @@ -179,15 +180,7 @@ ENDPROC(__und_invalid) stmia r7, {r2 - r6} get_thread_info tsk - ldr r0, [tsk, #TI_ADDR_LIMIT] - mov r1, #TASK_SIZE - str r1, [tsk, #TI_ADDR_LIMIT] - str r0, [sp, #SVC_ADDR_LIMIT] - - uaccess_save r0 - .if \uaccess - uaccess_disable r0 - .endif + uaccess_entry tsk, r0, r1, r2, \uaccess .if \trace #ifdef CONFIG_TRACE_IRQFLAGS @@ -259,31 +252,10 @@ __und_svc: #else svc_entry #endif - @ - @ call emulation code, which returns using r9 if it has emulated - @ the instruction, or the more conventional lr if we are to treat - @ this as a real undefined instruction - @ - @ r0 - instruction - @ -#ifndef CONFIG_THUMB2_KERNEL - ldr r0, [r4, #-4] -#else - mov r1, #2 - ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 - cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 - blo __und_svc_fault - ldrh r9, [r4] @ bottom 16 bits - add r4, r4, #2 - str r4, [sp, #S_PC] - orr r0, r9, r0, lsl #16 -#endif - badr r9, __und_svc_finish - mov r2, r4 - bl call_fpe mov r1, #4 @ PC correction to apply -__und_svc_fault: + THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode? + THUMB( movne r1, #2 ) @ if so, fix up PC correction mov r0, sp @ struct pt_regs *regs bl __und_fault diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 32051ec5b33f..40db0f9188b6 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -6,6 +6,7 @@ #include #include #include +#include #include @ Bad Abort numbers @@ -217,9 +218,7 @@ blne trace_hardirqs_off #endif .endif - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode SVC restore @@ -263,9 +262,7 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore mov r0, sp diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c49b39340ddb..f1cdc1f36957 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -671,12 +671,8 @@ ARM_BE8(rev16 ip, ip) ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b bx lr -#else -#ifdef CONFIG_CPU_ENDIAN_BE8 - moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction #else moveq r0, #0x400000 @ set bit 22, mov to mvn instruction -#endif b 2f 1: ldr ip, [r7, r3] #ifdef CONFIG_CPU_ENDIAN_BE8 @@ -685,7 +681,7 @@ ARM_BE8(rev16 ip, ip) tst ip, #0x000f0000 @ check the rotation field orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 biceq ip, ip, #0x00004000 @ clear bit 22 - orreq ip, ip, r0 @ mask in offset bits 7-0 + orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0 #else bic ip, ip, #0x000000ff tst ip, #0xf00 @ check the rotation field diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index b0c195e3a06d..7021ef0b4e71 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -680,26 +680,68 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +/* + * Arm32 hardware does not always report a watchpoint hit address that matches + * one of the watchpoints set. It can also report an address "near" the + * watchpoint if a single instruction access both watched and unwatched + * addresses. There is no straight-forward way, short of disassembling the + * offending instruction, to map that address back to the watchpoint. This + * function computes the distance of the memory access from the watchpoint as a + * heuristic for the likelyhood that a given access triggered the watchpoint. + * + * See this same function in the arm64 platform code, which has the same + * problem. + * + * The function returns the distance of the address from the bytes watched by + * the watchpoint. In case of an exact match, it returns 0. + */ +static u32 get_distance_from_watchpoint(unsigned long addr, u32 val, + struct arch_hw_breakpoint_ctrl *ctrl) +{ + u32 wp_low, wp_high; + u32 lens, lene; + + lens = __ffs(ctrl->len); + lene = __fls(ctrl->len); + + wp_low = val + lens; + wp_high = val + lene; + if (addr < wp_low) + return wp_low - addr; + else if (addr > wp_high) + return addr - wp_high; + else + return 0; +} + +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - int i, access; - u32 val, ctrl_reg, alignment_mask; + int i, access, closest_match = 0; + u32 min_dist = -1, dist; + u32 val, ctrl_reg; struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); + /* + * Find all watchpoints that match the reported address. If no exact + * match is found. Attribute the hit to the closest watchpoint. + */ + rcu_read_lock(); for (i = 0; i < core_num_wrps; ++i) { - rcu_read_lock(); - wp = slots[i]; - if (wp == NULL) - goto unlock; + continue; - info = counter_arch_bp(wp); /* * The DFAR is an unknown value on debug architectures prior * to 7.1. Since we only allow a single watchpoint on these @@ -708,50 +750,69 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, */ if (debug_arch < ARM_DEBUG_ARCH_V7_1) { BUG_ON(i > 0); + info = counter_arch_bp(wp); info->trigger = wp->attr.bp_addr; } else { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) - alignment_mask = 0x7; - else - alignment_mask = 0x3; - - /* Check if the watchpoint value matches. */ - val = read_wb_reg(ARM_BASE_WVR + i); - if (val != (addr & ~alignment_mask)) - goto unlock; - - /* Possible match, check the byte address select. */ - ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); - decode_ctrl_reg(ctrl_reg, &ctrl); - if (!((1 << (addr & alignment_mask)) & ctrl.len)) - goto unlock; - /* Check that the access type matches. */ if (debug_exception_updates_fsr()) { access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) - goto unlock; + continue; } + val = read_wb_reg(ARM_BASE_WVR + i); + ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); + decode_ctrl_reg(ctrl_reg, &ctrl); + dist = get_distance_from_watchpoint(addr, val, &ctrl); + if (dist < min_dist) { + min_dist = dist; + closest_match = i; + } + /* Is this an exact match? */ + if (dist != 0) + continue; + /* We have a winner. */ + info = counter_arch_bp(wp); info->trigger = addr; } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ + if (!is_default_overflow_handler(wp)) + continue; +step: + enable_single_step(wp, instruction_pointer(regs)); + } + + if (min_dist > 0 && min_dist != -1) { + /* No exact match found. */ + wp = slots[closest_match]; + info = counter_arch_bp(wp); + info->trigger = addr; + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + perf_bp_event(wp, regs); if (is_default_overflow_handler(wp)) enable_single_step(wp, instruction_pointer(regs)); - -unlock: - rcu_read_unlock(); } + + rcu_read_unlock(); } static void watchpoint_single_step_handler(unsigned long pc) diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 76300f3813e8..734adeb42df8 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -24,11 +25,6 @@ extern void relocate_new_kernel(void); extern const unsigned int relocate_new_kernel_size; -extern unsigned long kexec_start_address; -extern unsigned long kexec_indirection_page; -extern unsigned long kexec_mach_type; -extern unsigned long kexec_boot_atags; - static atomic_t waiting_for_crash_ipi; /* @@ -161,6 +157,7 @@ void (*kexec_reinit)(void); void machine_kexec(struct kimage *image) { unsigned long page_list, reboot_entry_phys; + struct kexec_relocate_data *data; void (*reboot_entry)(void); void *reboot_code_buffer; @@ -176,18 +173,17 @@ void machine_kexec(struct kimage *image) reboot_code_buffer = page_address(image->control_code_page); - /* Prepare parameters for reboot_code_buffer*/ - set_kernel_text_rw(); - kexec_start_address = image->start; - kexec_indirection_page = page_list; - kexec_mach_type = machine_arch_type; - kexec_boot_atags = image->arch.kernel_r2; - /* copy our kernel relocation code to the control code page */ reboot_entry = fncpy(reboot_code_buffer, &relocate_new_kernel, relocate_new_kernel_size); + data = reboot_code_buffer + relocate_new_kernel_size; + data->kexec_start_address = image->start; + data->kexec_indirection_page = page_list; + data->kexec_mach_type = machine_arch_type; + data->kexec_r2 = image->arch.kernel_r2; + /* get the identity mapping physical address for the reboot code */ reboot_entry_phys = virt_to_idmap(reboot_entry); diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 324352787aea..db9401581cd2 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = { }; static struct undef_hook thumb_break_hook = { - .instr_mask = 0xffff, - .instr_val = 0xde01, + .instr_mask = 0xffffffff, + .instr_val = 0x0000de01, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index 7eaa2ae7aff5..5e15b5912cb0 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -5,14 +5,16 @@ #include #include +#include #include .align 3 /* not needed for this code, but keeps fncpy() happy */ ENTRY(relocate_new_kernel) - ldr r0,kexec_indirection_page - ldr r1,kexec_start_address + adr r7, relocate_new_kernel_end + ldr r0, [r7, #KEXEC_INDIR_PAGE] + ldr r1, [r7, #KEXEC_START_ADDR] /* * If there is no indirection page (we are doing crashdumps) @@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel) 2: /* Jump to relocated kernel */ - mov lr,r1 - mov r0,#0 - ldr r1,kexec_mach_type - ldr r2,kexec_boot_atags - ARM( ret lr ) - THUMB( bx lr ) - - .align - - .globl kexec_start_address -kexec_start_address: - .long 0x0 - - .globl kexec_indirection_page -kexec_indirection_page: - .long 0x0 - - .globl kexec_mach_type -kexec_mach_type: - .long 0x0 - - /* phy addr of the atags for the new kernel */ - .globl kexec_boot_atags -kexec_boot_atags: - .long 0x0 + mov lr, r1 + mov r0, #0 + ldr r1, [r7, #KEXEC_MACH_TYPE] + ldr r2, [r7, #KEXEC_R2] + ARM( ret lr ) + THUMB( bx lr ) ENDPROC(relocate_new_kernel) + .align 3 relocate_new_kernel_end: .globl relocate_new_kernel_size diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ab2568996ddb..c01f76cd0242 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -694,18 +694,20 @@ struct page *get_signal_page(void) addr = page_address(page); + /* Poison the entire page */ + memset32(addr, __opcode_to_mem_arm(0xe7fddef1), + PAGE_SIZE / sizeof(u32)); + /* Give the signal return code some randomness */ offset = 0x200 + (get_random_int() & 0x7fc); signal_return_offset = offset; - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ + /* Copy signal return handlers into the page */ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); - ptr = (unsigned long)addr + offset; - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); + /* Flush out all instructions in this page */ + ptr = (unsigned long)addr; + flush_icache_range(ptr, ptr + PAGE_SIZE); return page; } diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 71778bb0475b..76ea4178a55c 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -22,6 +22,19 @@ * A simple function epilogue looks like this: * ldm sp, {fp, sp, pc} * + * When compiled with clang, pc and sp are not pushed. A simple function + * prologue looks like this when built with clang: + * + * stmdb {..., fp, lr} + * add fp, sp, #x + * sub sp, sp, #y + * + * A simple function epilogue looks like this when built with clang: + * + * sub sp, fp, #x + * ldm {..., fp, pc} + * + * * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ @@ -34,6 +47,16 @@ int notrace unwind_frame(struct stackframe *frame) low = frame->sp; high = ALIGN(low, THREAD_SIZE); +#ifdef CONFIG_CC_IS_CLANG + /* check current frame pointer is within bounds */ + if (fp < low + 4 || fp > high - 4) + return -EINVAL; + + frame->sp = frame->fp; + frame->fp = *(unsigned long *)(fp); + frame->pc = frame->lr; + frame->lr = *(unsigned long *)(fp + 4); +#else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) return -EINVAL; @@ -42,6 +65,7 @@ int notrace unwind_frame(struct stackframe *frame) frame->fp = *(unsigned long *)(fp - 12); frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); +#endif return 0; } @@ -92,6 +116,8 @@ static int save_trace(struct stackframe *frame, void *d) return 0; regs = (struct pt_regs *)frame->sp; + if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE)) + return 0; trace->entries[trace->nr_entries++] = regs->ARM_pc; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c053abd1fb53..97a512551b21 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { + unsigned long end = frame + 4 + sizeof(struct pt_regs); + #ifdef CONFIG_KALLSYMS printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif - if (in_entry_text(from)) - dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) + dump_mem("", "Exception stack", frame + 4, end); } void dump_backtrace_stm(u32 *stack, u32 instruction) diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index f00e45fa62c4..6c69a5548ba2 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -281,7 +281,7 @@ static bool tk_is_cntvct(const struct timekeeper *tk) if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) return false; - if (!tk->tkr_mono.clock->archdata.vdso_direct) + if (tk->tkr_mono.clock->archdata.clock_mode != VDSO_CLOCKMODE_ARCHTIMER) return false; return true; diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index 848f27bbad9d..80e67108d39d 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -45,7 +45,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) __kvm_tlb_flush_vmid(kvm); } -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); @@ -54,6 +54,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) isb(); write_sysreg(0, TLBIALL); + write_sysreg(0, ICIALLU); dsb(nsh); isb(); diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 52665f30d236..676cc2a318f4 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -592,13 +592,13 @@ static void __init at91_pm_sram_init(void) sram_pool = gen_pool_get(&pdev->dev, NULL); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); - return; + goto out_put_device; } sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); if (!sram_base) { pr_warn("%s: unable to alloc sram!\n", __func__); - return; + goto out_put_device; } sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); @@ -606,12 +606,17 @@ static void __init at91_pm_sram_init(void) at91_pm_suspend_in_sram_sz, false); if (!at91_suspend_sram_fn) { pr_warn("SRAM: Could not map\n"); - return; + goto out_put_device; } /* Copy the pm suspend handler to SRAM */ at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); + return; + +out_put_device: + put_device(&pdev->dev); + return; } static bool __init at91_is_pm_mode_active(int pm_mode) @@ -772,6 +777,7 @@ static void __init at91_pm_init(void (*pm_idle)(void)) pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id); soc_pm.data.pmc = of_iomap(pmc_np, 0); + of_node_put(pmc_np); if (!soc_pm.data.pmc) { pr_err("AT91: PM not supported, PMC not found\n"); return; diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S index ed57c879d4e1..2591cba61937 100644 --- a/arch/arm/mach-at91/pm_suspend.S +++ b/arch/arm/mach-at91/pm_suspend.S @@ -268,6 +268,10 @@ ENDPROC(at91_backup_mode) orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] + /* Quirk for SAM9X60's PMC */ + nop + nop + wait_mckrdy /* Enable the crystal oscillator */ diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index 9a681b421ae1..cd861c57d5ad 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c @@ -26,6 +26,7 @@ #define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30) static void __iomem *ns_sram_base_addr __ro_after_init; +static bool secure_firmware __ro_after_init; /* * The common v7_exit_coherency_flush API could not be used because of the @@ -58,15 +59,16 @@ static void __iomem *ns_sram_base_addr __ro_after_init; static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) { unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); + bool state; pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER || cluster >= EXYNOS5420_NR_CLUSTERS) return -EINVAL; - if (!exynos_cpu_power_state(cpunr)) { - exynos_cpu_power_up(cpunr); - + state = exynos_cpu_power_state(cpunr); + exynos_cpu_power_up(cpunr); + if (!state && secure_firmware) { /* * This assumes the cluster number of the big cores(Cortex A15) * is 0 and the Little cores(Cortex A7) is 1. @@ -258,6 +260,8 @@ static int __init exynos_mcpm_init(void) return -ENOMEM; } + secure_firmware = exynos_secure_firmware_available(); + /* * To increase the stability of KFC reset we need to program * the PMU SPARE3 register diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c index 0b2fd7e2e9b4..90b1e9be430e 100644 --- a/arch/arm/mach-footbridge/cats-pci.c +++ b/arch/arm/mach-footbridge/cats-pci.c @@ -15,14 +15,14 @@ #include /* cats host-specific stuff */ -static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; +static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin) { return 0; } -static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->irq >= 255) return -1; /* not a valid interrupt. */ diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index 8b81a17f675d..e17ec92b90dd 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c @@ -66,15 +66,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, if (addr) switch (size) { case 1: - asm("ldrb %0, [%1, %2]" + asm volatile("ldrb %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 2: - asm("ldrh %0, [%1, %2]" + asm volatile("ldrh %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 4: - asm("ldr %0, [%1, %2]" + asm volatile("ldr %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; } @@ -100,17 +100,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, if (addr) switch (size) { case 1: - asm("strb %0, [%1, %2]" + asm volatile("strb %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 2: - asm("strh %0, [%1, %2]" + asm volatile("strh %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 4: - asm("str %0, [%1, %2]" + asm volatile("str %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c index 6f28aaa9ca79..c3f280d08fa7 100644 --- a/arch/arm/mach-footbridge/ebsa285-pci.c +++ b/arch/arm/mach-footbridge/ebsa285-pci.c @@ -14,9 +14,9 @@ #include #include -static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; +static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; -static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->vendor == PCI_VENDOR_ID_CONTAQ && dev->device == PCI_DEVICE_ID_CONTAQ_82C693) diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c index 9473aa0305e5..e8304392074b 100644 --- a/arch/arm/mach-footbridge/netwinder-pci.c +++ b/arch/arm/mach-footbridge/netwinder-pci.c @@ -18,7 +18,7 @@ * We now use the slot ID instead of the device identifiers to select * which interrupt is routed where. */ -static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (slot) { case 0: /* host bridge */ diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c index 4391e433a4b2..9d19aa98a663 100644 --- a/arch/arm/mach-footbridge/personal-pci.c +++ b/arch/arm/mach-footbridge/personal-pci.c @@ -14,13 +14,12 @@ #include #include -static int irqmap_personal_server[] __initdata = { +static int irqmap_personal_server[] = { IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0, IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI }; -static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot, - u8 pin) +static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char line; diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 03506ce46149..e7364e6c8c6b 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -91,8 +91,10 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o endif +ifeq ($(CONFIG_ARM_CPU_SUSPEND),y) AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += resume-imx6.o +endif obj-$(CONFIG_SOC_IMX6) += pm-imx6.o obj-$(CONFIG_SOC_IMX1) += mach-imx1.o diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index f057df813f83..e9962b48e30c 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram( if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, size); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram( if (virt_out) *virt_out = virt; +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index 1c0ecad3620e..baf3b47601af 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -493,14 +493,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -523,7 +523,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat); if (ret) { pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret); - goto put_node; + goto put_device; } ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat); @@ -570,7 +570,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) &imx6_suspend, MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); - goto put_node; + goto put_device; pl310_cache_map_failed: iounmap(pm_info->gpc_base.vbase); @@ -580,6 +580,8 @@ iomuxc_map_failed: iounmap(pm_info->src_base.vbase); src_map_failed: iounmap(pm_info->mmdc_base.vbase); +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S index 1eabf2d2834b..e06f946b75b9 100644 --- a/arch/arm/mach-imx/suspend-imx6.S +++ b/arch/arm/mach-imx/suspend-imx6.S @@ -67,6 +67,7 @@ #define MX6Q_CCM_CCR 0x0 .align 3 + .arm .macro sync_l2_cache diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig index 982eabc36163..2406cab73835 100644 --- a/arch/arm/mach-integrator/Kconfig +++ b/arch/arm/mach-integrator/Kconfig @@ -4,6 +4,8 @@ menuconfig ARCH_INTEGRATOR depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6 select ARM_AMBA select COMMON_CLK_VERSATILE + select CMA + select DMA_CMA select HAVE_TCM select ICST select MFD_SYSCON @@ -35,14 +37,13 @@ config INTEGRATOR_IMPD1 select ARM_VIC select GPIO_PL061 select GPIOLIB + select REGULATOR + select REGULATOR_FIXED_VOLTAGE help The IM-PD1 is an add-on logic module for the Integrator which allows ARM(R) Ltd PrimeCells to be developed and evaluated. The IM-PD1 can be found on the Integrator/PP2 platform. - To compile this driver as a module, choose M here: the - module will be called impd1. - config INTEGRATOR_CM7TDMI bool "Integrator/CM7TDMI core module" depends on ARCH_INTEGRATOR_AP diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index f7211b57b1e7..165c184801e1 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig @@ -13,7 +13,6 @@ config MACH_IXP4XX_OF select I2C select I2C_IOP3XX select PCI - select TIMER_OF select USE_OF help Say 'Y' here to support Device Tree-based IXP4xx platforms. diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index 638808c4e122..697adedaced4 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -62,7 +62,7 @@ static void __init keystone_init(void) static long long __init keystone_pv_fixup(void) { long long offset; - phys_addr_t mem_start, mem_end; + u64 mem_start, mem_end; mem_start = memblock_start_of_DRAM(); mem_end = memblock_end_of_DRAM(); @@ -75,7 +75,7 @@ static long long __init keystone_pv_fixup(void) if (mem_start < KEYSTONE_HIGH_PHYS_START || mem_end > KEYSTONE_HIGH_PHYS_END) { pr_crit("Invalid address space for memory (%08llx-%08llx)\n", - (u64)mem_start, (u64)mem_end); + mem_start, mem_end); return 0; } diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S index 14a6c3eb3298..f745a65d3bd7 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S +++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S @@ -15,6 +15,7 @@ #include #include +#include #include "ams-delta-fiq.h" #include "board-ams-delta.h" diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 532a3e4b98c6..090a8aafb25e 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, int index) { struct omap3_idle_statedata *cx = &omap3_idle_data[index]; + int error; if (omap_irq_pending() || need_resched()) goto return_sleep_time; @@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev, * Call idle CPU PM enter notifier chain so that * VFP context is saved. */ - if (cx->mpu_state == PWRDM_POWER_OFF) - cpu_pm_enter(); + if (cx->mpu_state == PWRDM_POWER_OFF) { + error = cpu_pm_enter(); + if (error) + goto out_clkdm_set; + } /* Execute ARM wfi */ omap_sram_idle(); @@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) cpu_pm_exit(); +out_clkdm_set: /* Re-allow idle for C1 */ if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]); diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index fe75d4fa6073..de37027ad758 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, { struct idle_statedata *cx = state_ptr + index; u32 mpuss_can_lose_context = 0; + int error; /* * CPU0 has to wait and stay ON until CPU1 is OFF state. @@ -150,27 +151,37 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, (cx->mpu_logic_state == PWRDM_POWER_OFF); /* Enter broadcast mode for periodic timers */ - tick_broadcast_enable(); + RCU_NONIDLE(tick_broadcast_enable()); /* Enter broadcast mode for one-shot timers */ - tick_broadcast_enter(); + RCU_NONIDLE(tick_broadcast_enter()); /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. */ - cpu_pm_enter(); + error = cpu_pm_enter(); + if (error) + goto cpu_pm_out; if (dev->cpu == 0) { pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); - omap_set_pwrdm_state(mpu_pd, cx->mpu_state); + RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state)); /* * Call idle CPU cluster PM enter notifier chain * to save GIC and wakeupgen context. */ - if (mpuss_can_lose_context) - cpu_cluster_pm_enter(); + if (mpuss_can_lose_context) { + error = cpu_cluster_pm_enter(); + if (error) { + index = 0; + cx = state_ptr + index; + pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); + RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state)); + mpuss_can_lose_context = 0; + } + } } omap4_enter_lowpower(dev->cpu, cx->cpu_state); @@ -183,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, mpuss_can_lose_context) gic_dist_disable(); - clkdm_deny_idle(cpu_clkdm[1]); - omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON); - clkdm_allow_idle(cpu_clkdm[1]); + RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1])); + RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON)); + RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1])); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && mpuss_can_lose_context) { @@ -197,12 +208,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, } } - /* - * Call idle CPU PM exit notifier chain to restore - * VFP and per CPU IRQ context. - */ - cpu_pm_exit(); - /* * Call idle CPU cluster PM exit notifier chain * to restore GIC and wakeupgen context. @@ -210,7 +215,14 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, if (dev->cpu == 0 && mpuss_can_lose_context) cpu_cluster_pm_exit(); - tick_broadcast_exit(); + /* + * Call idle CPU PM exit notifier chain to restore + * VFP and per CPU IRQ context. + */ + cpu_pm_exit(); + +cpu_pm_out: + RCU_NONIDLE(tick_broadcast_exit()); fail: cpuidle_coupled_parallel_barrier(dev, &abort_barrier); diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index f1a6ece8108e..78247e6f4a72 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -11,14 +11,43 @@ #include "omap_hwmod.h" #include "omap_device.h" +#include "clockdomain.h" #include "powerdomain.h" +static void omap_iommu_dra7_emu_swsup_config(struct platform_device *pdev, + bool enable) +{ + static struct clockdomain *emu_clkdm; + static DEFINE_SPINLOCK(emu_lock); + static atomic_t count; + struct device_node *np = pdev->dev.of_node; + + if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) + return; + + if (!emu_clkdm) { + emu_clkdm = clkdm_lookup("emu_clkdm"); + if (WARN_ON_ONCE(!emu_clkdm)) + return; + } + + spin_lock(&emu_lock); + + if (enable && (atomic_inc_return(&count) == 1)) + clkdm_deny_idle(emu_clkdm); + else if (!enable && (atomic_dec_return(&count) == 0)) + clkdm_allow_idle(emu_clkdm); + + spin_unlock(&emu_lock); +} + int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, u8 *pwrst) { struct powerdomain *pwrdm; struct omap_device *od; u8 next_pwrst; + int ret = 0; od = to_omap_device(pdev); if (!od) @@ -31,13 +60,21 @@ int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, if (!pwrdm) return -EINVAL; - if (request) + if (request) { *pwrst = pwrdm_read_next_pwrst(pwrdm); + omap_iommu_dra7_emu_swsup_config(pdev, true); + } if (*pwrst > PWRDM_POWER_RET) - return 0; + goto out; next_pwrst = request ? PWRDM_POWER_ON : *pwrst; - return pwrdm_set_next_pwrst(pwrdm, next_pwrst); + ret = pwrdm_set_next_pwrst(pwrdm, next_pwrst); + +out: + if (!request) + omap_iommu_dra7_emu_swsup_config(pdev, false); + + return ret; } diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 3acb4192918d..f85a0fd6aca5 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -234,10 +234,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb, break; case BUS_NOTIFY_BIND_DRIVER: od = to_omap_device(pdev); - if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && - pm_runtime_status_suspended(dev)) { + if (od) { od->_driver_status = BUS_NOTIFY_BIND_DRIVER; - pm_runtime_set_active(dev); + if (od->_state == OMAP_DEVICE_STATE_ENABLED && + pm_runtime_status_suspended(dev)) { + pm_runtime_set_active(dev); + } } break; case BUS_NOTIFY_ADD_DEVICE: diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 203664c40d3d..eb74aa182661 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -3535,7 +3535,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = { }; static const struct omap_hwmod_reset omap_reset_quirks[] = { - { .match = "dss", .len = 3, .reset = omap_dss_reset, }, + { .match = "dss_core", .len = 8, .reset = omap_dss_reset, }, { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, }, { .match = "i2c", .len = 3, .reset = omap_i2c_reset, }, { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, }, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 247e3f8acffe..ca07e310d9ed 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -44,6 +44,17 @@ struct pdata_init { static struct of_dev_auxdata omap_auxdata_lookup[]; static struct twl4030_gpio_platform_data twl_gpio_auxdata; +#if IS_ENABLED(CONFIG_OMAP_IOMMU) +int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, + u8 *pwrst); +#else +static inline int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, + bool request, u8 *pwrst) +{ + return 0; +} +#endif + #ifdef CONFIG_MACH_NOKIA_N8X0 static void __init omap2420_n8x0_legacy_init(void) { @@ -311,16 +322,6 @@ static void __init omap3_pandora_legacy_init(void) } #endif /* CONFIG_ARCH_OMAP3 */ -#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) -static struct iommu_platform_data omap4_iommu_pdata = { - .reset_name = "mmu_cache", - .assert_reset = omap_device_assert_hardreset, - .deassert_reset = omap_device_deassert_hardreset, - .device_enable = omap_device_enable, - .device_idle = omap_device_idle, -}; -#endif - #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) static struct wkup_m3_platform_data wkup_m3_data = { .reset_name = "wkup_m3", @@ -336,6 +337,10 @@ static void __init omap5_uevm_legacy_init(void) #endif #ifdef CONFIG_SOC_DRA7XX +static struct iommu_platform_data dra7_ipu1_dsp_iommu_pdata = { + .set_pwrdm_constraint = omap_iommu_set_pwrdm_constraint, +}; + static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc1; static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc2; static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc3; @@ -543,10 +548,6 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = { &wkup_m3_data), #endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) - OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu", - &omap4_iommu_pdata), - OF_DEV_AUXDATA("ti,omap4-iommu", 0x55082000, "55082000.mmu", - &omap4_iommu_pdata), OF_DEV_AUXDATA("ti,omap4-smartreflex-iva", 0x4a0db000, "4a0db000.smartreflex", &omap_sr_pdata[OMAP_SR_IVA]), OF_DEV_AUXDATA("ti,omap4-smartreflex-core", 0x4a0dd000, @@ -561,6 +562,12 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = { &dra7_hsmmc_data_mmc2), OF_DEV_AUXDATA("ti,dra7-hsmmc", 0x480ad000, "480ad000.mmc", &dra7_hsmmc_data_mmc3), + OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x40d01000, "40d01000.mmu", + &dra7_ipu1_dsp_iommu_pdata), + OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x41501000, "41501000.mmu", + &dra7_ipu1_dsp_iommu_pdata), + OF_DEV_AUXDATA("ti,dra7-iommu", 0x58882000, "58882000.mmu", + &dra7_ipu1_dsp_iommu_pdata), #endif /* Common auxdata */ OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata), diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 54254fc92c2e..fa66534a7ae2 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -194,6 +194,7 @@ void omap_sram_idle(void) int per_next_state = PWRDM_POWER_ON; int core_next_state = PWRDM_POWER_ON; u32 sdrc_pwr = 0; + int error; mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); switch (mpu_next_state) { @@ -222,8 +223,11 @@ void omap_sram_idle(void) pwrdm_pre_transition(NULL); /* PER */ - if (per_next_state == PWRDM_POWER_OFF) - cpu_cluster_pm_enter(); + if (per_next_state == PWRDM_POWER_OFF) { + error = cpu_cluster_pm_enter(); + if (error) + return; + } /* CORE */ if (core_next_state < PWRDM_POWER_ON) { diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c index 58c5ef3cf1d7..2d370f7f75fa 100644 --- a/arch/arm/mach-s3c24xx/mach-at2440evb.c +++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c @@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = { .dev_id = "s3c2410-sdi", .table = { /* Card detect S3C2410_GPG(10) */ - GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c index 74d6b68e91c7..8d9d8e7c71d4 100644 --- a/arch/arm/mach-s3c24xx/mach-h1940.c +++ b/arch/arm/mach-s3c24xx/mach-h1940.c @@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = { .dev_id = "s3c2410-sdi", .table = { /* Card detect S3C2410_GPF(5) */ - GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW), /* Write protect S3C2410_GPH(8) */ - GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c index 9035f868fb34..3a5b1124037b 100644 --- a/arch/arm/mach-s3c24xx/mach-mini2440.c +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c @@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = { .dev_id = "s3c2410-sdi", .table = { /* Card detect S3C2410_GPG(8) */ - GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW), /* Write protect S3C2410_GPH(8) */ - GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH), { }, }, }; diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c index d856f23939af..ffa20f52aa83 100644 --- a/arch/arm/mach-s3c24xx/mach-n30.c +++ b/arch/arm/mach-s3c24xx/mach-n30.c @@ -359,9 +359,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = { .dev_id = "s3c2410-sdi", .table = { /* Card detect S3C2410_GPF(1) */ - GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW), /* Write protect S3C2410_GPG(10) */ - GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c index 29f9b345a531..534e9c1d8161 100644 --- a/arch/arm/mach-s3c24xx/mach-rx1950.c +++ b/arch/arm/mach-s3c24xx/mach-rx1950.c @@ -567,9 +567,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = { .dev_id = "s3c2410-sdi", .table = { /* Card detect S3C2410_GPF(5) */ - GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW), /* Write protect S3C2410_GPH(8) */ - GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c index 6ed887cf8dc9..365c0428b21b 100644 --- a/arch/arm/mach-socfpga/pm.c +++ b/arch/arm/mach-socfpga/pm.c @@ -49,14 +49,14 @@ static int socfpga_setup_ocram_self_refresh(void) if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -67,7 +67,7 @@ static int socfpga_setup_ocram_self_refresh(void) if (!suspend_ocram_base) { pr_warn("%s: __arm_ioremap_exec failed!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } /* Copy the code that puts DDR in self refresh to ocram */ @@ -81,6 +81,8 @@ static int socfpga_setup_ocram_self_refresh(void) if (!socfpga_sdram_self_refresh_in_ocram) ret = -EFAULT; +put_device: + put_device(&pdev->dev); put_node: of_node_put(np); diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 933b6930f024..a0ca5e7a68de 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -66,6 +66,7 @@ static const char * const sun8i_board_dt_compat[] = { "allwinner,sun8i-h2-plus", "allwinner,sun8i-h3", "allwinner,sun8i-r40", + "allwinner,sun8i-v3", "allwinner,sun8i-v3s", NULL, }; diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index e512e606eabd..5ea3421fa1e8 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -106,8 +106,8 @@ static const char * const tegra_dt_board_compat[] = { }; DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") - .l2c_aux_val = 0x3c400001, - .l2c_aux_mask = 0xc20fc3fe, + .l2c_aux_val = 0x3c400000, + .l2c_aux_mask = 0xc20fc3ff, .smp = smp_ops(tegra_smp_ops), .map_io = tegra_map_common_io, .init_early = tegra_init_early, diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 12c26eb88afb..43d91bfd2360 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np, ret = of_property_read_u32(np, "prefetch-data", &val); if (ret == 0) { - if (val) + if (val) { prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; - else + *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH; + } else { prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; + *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; + } + *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; } else if (ret != -EINVAL) { pr_err("L2C-310 OF prefetch-data property value is missing\n"); } ret = of_property_read_u32(np, "prefetch-instr", &val); if (ret == 0) { - if (val) + if (val) { prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; - else + *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH; + } else { prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; + *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; + } + *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; } else if (ret != -EINVAL) { pr_err("L2C-310 OF prefetch-instr property value is missing\n"); } diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 5461d589a1e2..60ac7c5999a9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -5,6 +5,7 @@ * VMA_VM_FLAGS * VM_EXEC */ +#include #include #include @@ -30,7 +31,7 @@ * act_mm - get current->active_mm */ .macro act_mm, rd - bic \rd, sp, #8128 + bic \rd, sp, #(THREAD_SIZE - 1) & ~63 bic \rd, \rd, #63 ldr \rd, [\rd, #TI_TASK] .if (TSK_ACTIVE_MM > IMM12_MASK) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 97dc386e3cb8..b51a8c7b0111 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -929,7 +929,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[], rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSR operation */ - if (val < 32) { + if (val == 0) { + /* An immediate value of 0 encodes a shift amount of 32 + * for LSR. To shift by 0, don't do anything. + */ + } else if (val < 32) { emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); @@ -955,7 +959,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[], rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do ARSH operation */ - if (val < 32) { + if (val == 0) { + /* An immediate value of 0 encodes a shift amount of 32 + * for ASR. To shift by 0, don't do anything. + */ + } else if (val < 32) { emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); @@ -992,21 +1000,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], arm_bpf_put_reg32(dst_hi, rd[0], ctx); } +static bool is_ldst_imm(s16 off, const u8 size) +{ + s16 off_max = 0; + + switch (size) { + case BPF_B: + case BPF_W: + off_max = 0xfff; + break; + case BPF_H: + off_max = 0xff; + break; + case BPF_DW: + /* Need to make sure off+4 does not overflow. */ + off_max = 0xfff - 4; + break; + } + return -off_max <= off && off <= off_max; +} + /* *(size *)(dst + off) = src */ static inline void emit_str_r(const s8 dst, const s8 src[], - s32 off, struct jit_ctx *ctx, const u8 sz){ + s16 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; - s32 off_max; s8 rd; rd = arm_bpf_get_reg32(dst, tmp[1], ctx); - if (sz == BPF_H) - off_max = 0xff; - else - off_max = 0xfff; - - if (off < 0 || off > off_max) { + if (!is_ldst_imm(off, sz)) { emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); rd = tmp[0]; @@ -1035,18 +1057,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[], /* dst = *(size*)(src + off) */ static inline void emit_ldx_r(const s8 dst[], const s8 src, - s32 off, struct jit_ctx *ctx, const u8 sz){ + s16 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *rd = is_stacked(dst_lo) ? tmp : dst; s8 rm = src; - s32 off_max; - if (sz == BPF_H) - off_max = 0xff; - else - off_max = 0xfff; - - if (off < 0 || off > off_max) { + if (!is_ldst_imm(off, sz)) { emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; @@ -1586,6 +1602,9 @@ exit: rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index 301e572651c0..790c87ee7271 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig @@ -241,6 +241,7 @@ config SAMSUNG_PM_DEBUG depends on PM && DEBUG_KERNEL depends on PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 depends on DEBUG_EXYNOS_UART || DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART + depends on DEBUG_LL && MMU help Say Y here if you want verbose debugging from the PM Suspend and Resume code. See diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 7a449df0b359..c78180172120 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -85,21 +85,21 @@ asm ( "optprobe_template_end:\n"); #define TMPL_VAL_IDX \ - ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry) #define TMPL_CALL_IDX \ - ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry) #define TMPL_END_IDX \ - ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry) #define TMPL_ADD_SP \ - ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry) #define TMPL_SUB_SP \ - ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry) #define TMPL_RESTORE_BEGIN \ - ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry) #define TMPL_RESTORE_ORIGN_INSN \ - ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry) #define TMPL_RESTORE_END \ - ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry) + ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry) /* * ARM can always optimize an instruction when using ARM ISA, except @@ -234,7 +234,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or } /* Copy arch-dep-instance from template. */ - memcpy(code, (unsigned long *)&optprobe_template_entry, + memcpy(code, (unsigned long *)optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c index c4b49b322e8a..f5f790c6e5f8 100644 --- a/arch/arm/probes/uprobes/core.c +++ b/arch/arm/probes/uprobes/core.c @@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) static struct undef_hook uprobes_arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff), - .cpsr_mask = MODE_MASK, + .cpsr_mask = (PSR_T_BIT | MODE_MASK), .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; @@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = { static struct undef_hook uprobes_arm_ss_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff), - .cpsr_mask = MODE_MASK, + .cpsr_mask = (PSR_T_BIT | MODE_MASK), .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 0186cf9da890..27b0a1f27fbd 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -37,20 +37,3 @@ ENDPROC(vfp_null_entry) .align 2 .LCvfp: .word vfp_vector - -@ This code is called if the VFP does not exist. It needs to flag the -@ failure to the VFP initialisation code. - - __INIT -ENTRY(vfp_testing_entry) - dec_preempt_count_ti r10, r4 - ldr r0, VFP_arch_address - str r0, [r0] @ set to non-zero value - ret r9 @ we have handled the fault -ENDPROC(vfp_testing_entry) - - .align 2 -VFP_arch_address: - .word VFP_arch - - __FINIT diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index b2e560290860..b530db8f2c6c 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -78,11 +78,6 @@ ENTRY(vfp_support_entry) DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 - ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions - and r3, r3, #MODE_MASK @ are supported in kernel mode - teq r3, #USR_MODE - bne vfp_kmode_exception @ Returns through lr - VFPFMRX r1, FPEXC @ Is the VFP enabled? DBGSTR1 "fpexc %08x", r1 tst r1, #FPEXC_EN diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 8c9e7f9f0277..2cb355c1b5b7 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "vfpinstr.h" @@ -31,7 +32,6 @@ /* * Our undef handlers (in entry.S) */ -asmlinkage void vfp_testing_entry(void); asmlinkage void vfp_support_entry(void); asmlinkage void vfp_null_entry(void); @@ -42,7 +42,7 @@ asmlinkage void (*vfp_vector)(void) = vfp_null_entry; * Used in startup: set to non-zero if VFP checks fail * After startup, holds VFP architecture */ -unsigned int VFP_arch; +static unsigned int __initdata VFP_arch; /* * The pointer to the vfpstate structure of the thread which currently @@ -436,7 +436,7 @@ static void vfp_enable(void *unused) * present on all CPUs within a SMP complex. Needs to be called prior to * vfp_init(). */ -void vfp_disable(void) +void __init vfp_disable(void) { if (VFP_arch) { pr_debug("%s: should be called prior to vfp_init\n", __func__); @@ -642,7 +642,9 @@ static int vfp_starting_cpu(unsigned int unused) return 0; } -void vfp_kmode_exception(void) +#ifdef CONFIG_KERNEL_MODE_NEON + +static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) { /* * If we reach this point, a floating point exception has been raised @@ -660,9 +662,51 @@ void vfp_kmode_exception(void) pr_crit("BUG: unsupported FP instruction in kernel mode\n"); else pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n"); + pr_crit("FPEXC == 0x%08x\n", fmrx(FPEXC)); + return 1; } -#ifdef CONFIG_KERNEL_MODE_NEON +static struct undef_hook vfp_kmode_exception_hook[] = {{ + .instr_mask = 0xfe000000, + .instr_val = 0xf2000000, + .cpsr_mask = MODE_MASK | PSR_T_BIT, + .cpsr_val = SVC_MODE, + .fn = vfp_kmode_exception, +}, { + .instr_mask = 0xff100000, + .instr_val = 0xf4000000, + .cpsr_mask = MODE_MASK | PSR_T_BIT, + .cpsr_val = SVC_MODE, + .fn = vfp_kmode_exception, +}, { + .instr_mask = 0xef000000, + .instr_val = 0xef000000, + .cpsr_mask = MODE_MASK | PSR_T_BIT, + .cpsr_val = SVC_MODE | PSR_T_BIT, + .fn = vfp_kmode_exception, +}, { + .instr_mask = 0xff100000, + .instr_val = 0xf9000000, + .cpsr_mask = MODE_MASK | PSR_T_BIT, + .cpsr_val = SVC_MODE | PSR_T_BIT, + .fn = vfp_kmode_exception, +}, { + .instr_mask = 0x0c000e00, + .instr_val = 0x0c000a00, + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, + .fn = vfp_kmode_exception, +}}; + +static int __init vfp_kmode_exception_hook_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfp_kmode_exception_hook); i++) + register_undef_hook(&vfp_kmode_exception_hook[i]); + return 0; +} +subsys_initcall(vfp_kmode_exception_hook_init); /* * Kernel-side NEON support functions @@ -708,6 +752,21 @@ EXPORT_SYMBOL(kernel_neon_end); #endif /* CONFIG_KERNEL_MODE_NEON */ +static int __init vfp_detect(struct pt_regs *regs, unsigned int instr) +{ + VFP_arch = UINT_MAX; /* mark as not present */ + regs->ARM_pc += 4; + return 0; +} + +static struct undef_hook vfp_detect_hook __initdata = { + .instr_mask = 0x0c000e00, + .instr_val = 0x0c000a00, + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, + .fn = vfp_detect, +}; + /* * VFP support code initialisation. */ @@ -728,10 +787,11 @@ static int __init vfp_init(void) * The handler is already setup to just log calls, so * we just need to read the VFPSID register. */ - vfp_vector = vfp_testing_entry; + register_undef_hook(&vfp_detect_hook); barrier(); vfpsid = fmrx(FPSID); barrier(); + unregister_undef_hook(&vfp_detect_hook); vfp_vector = vfp_null_entry; pr_info("VFP support v0.3: "); diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index dd6804a64f1a..57dfc13b2752 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -370,8 +370,6 @@ static int __init xen_guest_init(void) return -ENOMEM; } gnttab_init(); - if (!xen_initial_domain()) - xenbus_probe(NULL); /* * Making sure board specific code will not set up ops for diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index e52950a43f2e..acb464547a54 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -93,10 +93,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i; for (i = 0; i < count; i++) { + struct gnttab_unmap_grant_ref unmap; + int rc; + if (map_ops[i].status) continue; - set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, - map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); + if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) + continue; + + /* + * Signal an error for this slot. This in turn requires + * immediate unmapping. + */ + map_ops[i].status = GNTST_general_error; + unmap.host_addr = map_ops[i].host_addr, + unmap.handle = map_ops[i].handle; + map_ops[i].handle = ~0; + if (map_ops[i].flags & GNTMAP_device_map) + unmap.dev_bus_addr = map_ops[i].dev_bus_addr; + else + unmap.dev_bus_addr = 0; + + /* + * Pre-populate the status field, to be recognizable in + * the log message below. + */ + unmap.status = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + &unmap, 1); + if (rc || unmap.status != GNTST_okay) + pr_err_once("gnttab unmap failed: rc=%d st=%d\n", + rc, unmap.status); } return 0; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a3c45f6f97db..09d370f8c989 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -325,6 +325,27 @@ menu "ARM errata workarounds via the alternatives framework" config ARM64_WORKAROUND_CLEAN_CACHE bool +config ALTRA_ERRATUM_82288 + bool "Ampere Altra: 82288: PCIE_65: PCIe Root Port outbound write combining issue" + default y + help + This option adds an alternative code sequence to work around + Ampere Altra erratum 82288. + + PCIe device drivers may map MMIO space as Normal, non-cacheable + memory attribute (e.g. Linux kernel drivers mapping MMIO + using ioremap_wc). This may be for the purpose of enabling write + combining or unaligned accesses. This can result in data corruption + on the PCIe interface’s outbound MMIO writes due to issues with the + write-combining operation. + + The workaround modifies software that maps PCIe MMIO space as Normal, + non-cacheable memory (e.g. ioremap_wc) to instead Device, + non-gatheringmemory (e.g. ioremap). And all memory operations on PCIe + MMIO space must be strictly aligned. + + If unsure, say Y. + config ARM64_ERRATUM_826319 bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted" default y @@ -492,7 +513,7 @@ config ARM64_ERRATUM_1024718 help This option adds a workaround for ARM Cortex-A55 Erratum 1024718. - Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect + Affected Cortex-A55 cores (all revisions) could cause incorrect update of the hardware dirty bit when the DBM/AP bits are updated without a break-before-make. The workaround is to disable the usage of hardware DBM locally on the affected cores. CPUs not affected by @@ -562,6 +583,22 @@ config ARM64_ERRATUM_1463225 If unsure, say Y. +config ARM64_ERRATUM_1542419 + bool "Neoverse-N1: workaround mis-ordering of instruction fetches" + default y + help + This option adds a workaround for ARM Neoverse-N1 erratum + 1542419. + + Affected Neoverse-N1 cores could execute a stale instruction when + modified by another CPU. The workaround depends on a firmware + counterpart. + + Workaround the issue by hiding the DIC feature from EL0. This + forces user-space to perform cache maintenance. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -916,6 +953,13 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK config HOLES_IN_ZONE def_bool y +# Some NUMA nodes have memory ranges that span other nodes. +# Even though a pfn is valid and between a node's start and end pfns, +# it may not reside on that node. +config NODES_SPAN_OTHER_NODES + def_bool y + depends on ACPI_NUMA + source "kernel/Kconfig.hz" config ARCH_SUPPORTS_DEBUG_PAGEALLOC diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index fa2d6e200ae1..b1d280a95714 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -54,6 +54,7 @@ config ARCH_BCM_IPROC config ARCH_BERLIN bool "Marvell Berlin SoC Family" select DW_APB_ICTL + select DW_APB_TIMER_OF select GPIOLIB select PINCTRL help @@ -179,6 +180,12 @@ config ARCH_MXC This enables support for the ARMv8 based SoCs in the NXP i.MX family. +config ARCH_PHYTIUM + bool "Phytium SoC Family" + help + This enables support for Phytium ARMv8 SoC family. + select ARM_GIC_PHYTIUM_2500 + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 1fbe24d4fdb6..4ed02864e652 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -10,7 +10,7 @@ # # Copyright (C) 1995-2001 by Russell King -LDFLAGS_vmlinux :=--no-undefined -X +LDFLAGS_vmlinux :=--no-undefined -X -z norelro CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 @@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y) # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour # for relative relocs, since this leads to better Image compression # with the relocation offsets always being zero. -LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \ +LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \ $(call ld-option, --no-apply-dynamic-relocs) endif @@ -72,6 +72,10 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif +# Ensure that if the compiler supports branch protection we default it +# off. +KBUILD_CFLAGS += $(call cc-option,-mbranch-protection=none) + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ @@ -147,6 +151,8 @@ zinstall install: PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ + $(if $(CONFIG_COMPAT_VDSO), \ + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@) # We use MRPROPER_FILES and CLEAN_FILES now archclean: diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi index 2006ad5424fa..f8eb72bb4125 100644 --- a/arch/arm64/boot/dts/actions/s700.dtsi +++ b/arch/arm64/boot/dts/actions/s700.dtsi @@ -231,7 +231,7 @@ pinctrl: pinctrl@e01b0000 { compatible = "actions,s700-pinctrl"; - reg = <0x0 0xe01b0000 0x0 0x1000>; + reg = <0x0 0xe01b0000 0x0 0x100>; clocks = <&cmu CLK_GPIO>; gpio-controller; gpio-ranges = <&pinctrl 0 0 136>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts index 208373efee49..7d1e89e5b1ae 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts @@ -127,7 +127,7 @@ &emac { pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&ext_rgmii_phy>; phy-supply = <®_dc1sw>; status = "okay"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index 04446e4716c4..a0db02504b69 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts @@ -129,7 +129,7 @@ &emac { pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&ext_rgmii_phy>; phy-supply = <®_gmac_3v3>; status = "okay"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts index 72d6961dc312..7eb252adf9f0 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts @@ -11,3 +11,7 @@ compatible = "pine64,pine64-lts", "allwinner,sun50i-r18", "allwinner,sun50i-a64"; }; + +&mmc0 { + broken-cd; /* card detect is broken on *some* boards */ +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts index d5b6e8159a33..5d0905f0f1c1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts @@ -52,7 +52,7 @@ &emac { pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - phy-mode = "rgmii"; + phy-mode = "rgmii-txid"; phy-handle = <&ext_rgmii_phy>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts index 78c82a665c84..bb1de8217b86 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts @@ -103,8 +103,6 @@ }; &ehci0 { - phys = <&usbphy 0>; - phy-names = "usb"; status = "okay"; }; @@ -142,6 +140,7 @@ pinctrl-0 = <&mmc2_pins>, <&mmc2_ds_pin>; vmmc-supply = <®_dcdc1>; vqmmc-supply = <®_eldo1>; + max-frequency = <200000000>; bus-width = <8>; non-removable; cap-mmc-hw-reset; @@ -150,8 +149,6 @@ }; &ohci0 { - phys = <&usbphy 0>; - phy-names = "usb"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi index 9d20e13f0c02..19e5b7e298fd 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi @@ -55,10 +55,9 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins>; vmmc-supply = <®_dcdc1>; - non-removable; disable-wp; bus-width = <4>; - cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */ + cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */ status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index ba41c1b85887..cf9e3234afaf 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -227,7 +227,7 @@ display_clocks: clock@0 { compatible = "allwinner,sun50i-a64-de2-clk"; - reg = <0x0 0x100000>; + reg = <0x0 0x10000>; clocks = <&ccu CLK_BUS_DE>, <&ccu CLK_DE>; clock-names = "bus", @@ -476,7 +476,7 @@ resets = <&ccu RST_BUS_MMC2>; reset-names = "ahb"; interrupts = ; - max-frequency = <200000000>; + max-frequency = <150000000>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -530,6 +530,8 @@ <&ccu CLK_USB_OHCI0>; resets = <&ccu RST_BUS_OHCI0>, <&ccu RST_BUS_EHCI0>; + phys = <&usbphy 0>; + phy-names = "usb"; status = "disabled"; }; @@ -540,6 +542,8 @@ clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>; resets = <&ccu RST_BUS_OHCI0>; + phys = <&usbphy 0>; + phy-names = "usb"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts index e126c1c9f05c..4d357b81b0c0 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts @@ -157,7 +157,7 @@ pinctrl-0 = <&emac_rgmii_pins>; phy-supply = <®_gmac_3v3>; phy-handle = <&ext_rgmii_phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts index d9b3ed257088..f10340339007 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts @@ -164,7 +164,7 @@ pinctrl-0 = <&emac_rgmii_pins>; phy-supply = <®_gmac_3v3>; phy-handle = <&ext_rgmii_phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi index 1d34e3eefda3..8466d44ee0b1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi @@ -77,8 +77,7 @@ }; pmu { - compatible = "arm,cortex-a53-pmu", - "arm,armv8-pmuv3"; + compatible = "arm,cortex-a53-pmu"; interrupts = , , , @@ -156,8 +155,7 @@ , , , - , - ; + ; interrupt-names = "gp", "gpmmu", "pp", @@ -168,8 +166,7 @@ "pp2", "ppmmu2", "pp3", - "ppmmu3", - "pmu"; + "ppmmu3"; clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>; clock-names = "bus", "core"; resets = <&ccu RST_BUS_GPU>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts index 1d05d570142f..0a04730960fc 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts @@ -83,7 +83,7 @@ &emac { pinctrl-names = "default"; pinctrl-0 = <&ext_rgmii_pins>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&ext_rgmii_phy>; phy-supply = <®_aldo2>; status = "okay"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts index 30102daf83cc..3f2882b36616 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts @@ -66,7 +66,7 @@ &emac { pinctrl-names = "default"; pinctrl-0 = <&ext_rgmii_pins>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&ext_rgmii_phy>; phy-supply = <®_aldo2>; allwinner,rx-delay-ps = <200>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index d19253891672..1583cd591521 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -71,8 +71,7 @@ }; pmu { - compatible = "arm,cortex-a53-pmu", - "arm,armv8-pmuv3"; + compatible = "arm,cortex-a53-pmu"; interrupts = , , , @@ -333,6 +332,7 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins>; + max-frequency = <150000000>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -349,6 +349,7 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; + max-frequency = <150000000>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -365,6 +366,7 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; + max-frequency = <150000000>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -534,6 +536,8 @@ <&ccu CLK_USB_OHCI0>; resets = <&ccu RST_BUS_OHCI0>, <&ccu RST_BUS_EHCI0>; + phys = <&usb2phy 0>; + phy-names = "usb"; status = "disabled"; }; @@ -544,6 +548,8 @@ clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>; resets = <&ccu RST_BUS_OHCI0>; + phys = <&usb2phy 0>; + phy-names = "usb"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index d1fc9c2055f4..9498d1de730c 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -77,7 +77,7 @@ method = "smc"; }; - intc: intc@fffc1000 { + intc: interrupt-controller@fffc1000 { compatible = "arm,gic-400", "arm,cortex-a15-gic"; #interrupt-cells = <3>; interrupt-controller; @@ -302,7 +302,7 @@ status = "disabled"; }; - nand: nand@ffb90000 { + nand: nand-controller@ffb90000 { #address-cells = <1>; #size-cells = <0>; compatible = "altr,socfpga-denali-nand"; @@ -445,7 +445,7 @@ clock-names = "timer"; }; - uart0: serial0@ffc02000 { + uart0: serial@ffc02000 { compatible = "snps,dw-apb-uart"; reg = <0xffc02000 0x100>; interrupts = <0 108 4>; @@ -456,7 +456,7 @@ status = "disabled"; }; - uart1: serial1@ffc02100 { + uart1: serial@ffc02100 { compatible = "snps,dw-apb-uart"; reg = <0xffc02100 0x100>; interrupts = <0 109 4>; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 66e4ffb4e929..2c8c2b322c72 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -155,6 +155,7 @@ }; &qspi { + status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index bb4a2acb9970..502c4ac45c29 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -1728,18 +1728,18 @@ }; sram: sram@fffc0000 { - compatible = "amlogic,meson-axg-sram", "mmio-sram"; + compatible = "mmio-sram"; reg = <0x0 0xfffc0000 0x0 0x20000>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x0 0xfffc0000 0x20000>; - cpu_scp_lpri: scp-shmem@13000 { + cpu_scp_lpri: scp-sram@13000 { compatible = "amlogic,meson-axg-scp-shmem"; reg = <0x13000 0x400>; }; - cpu_scp_hpri: scp-shmem@13400 { + cpu_scp_hpri: scp-sram@13400 { compatible = "amlogic,meson-axg-scp-shmem"; reg = <0x13400 0x400>; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 0ee8a369c547..9533c85fb0a3 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -167,6 +167,8 @@ hwrng: rng@218 { compatible = "amlogic,meson-rng"; reg = <0x0 0x218 0x0 0x4>; + clocks = <&clkc CLKID_RNG0>; + clock-names = "core"; }; }; @@ -2365,7 +2367,7 @@ reg = <0x0 0xff400000 0x0 0x40000>; interrupts = ; clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; - clock-names = "ddr"; + clock-names = "otg"; phys = <&usb2_phy1>; phy-names = "usb2-phy"; dr_mode = "peripheral"; @@ -2380,7 +2382,8 @@ interrupts = ; dr_mode = "host"; snps,dis_u2_susphy_quirk; - snps,quirk-frame-length-adjustment; + snps,quirk-frame-length-adjustment = <0x20>; + snps,parkmode-disable-ss-quirk; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts index 17155fb73fce..c48125bf9d1e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts @@ -340,7 +340,7 @@ eee-broken-1000t; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; interrupt-parent = <&gpio_intc>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi index 554863429aa6..e2094575f528 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi @@ -152,6 +152,10 @@ clock-latency = <50000>; }; +&frddr_a { + status = "okay"; +}; + &frddr_b { status = "okay"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 6733050d735f..ce230d6ac35c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -345,20 +345,20 @@ }; sram: sram@c8000000 { - compatible = "amlogic,meson-gx-sram", "amlogic,meson-gxbb-sram", "mmio-sram"; + compatible = "mmio-sram"; reg = <0x0 0xc8000000 0x0 0x14000>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x0 0xc8000000 0x14000>; - cpu_scp_lpri: scp-shmem@0 { - compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem"; + cpu_scp_lpri: scp-sram@0 { + compatible = "amlogic,meson-gxbb-scp-shmem"; reg = <0x13000 0x400>; }; - cpu_scp_hpri: scp-shmem@200 { - compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem"; + cpu_scp_hpri: scp-sram@200 { + compatible = "amlogic,meson-gxbb-scp-shmem"; reg = <0x13400 0x400>; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 233eb1cd7967..d94b695916a3 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts @@ -165,7 +165,7 @@ reg = <0>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index b0b12e389835..8828acb3fd4c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -138,7 +138,7 @@ reg = <0>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 43b11e3dfe11..29976215e144 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -126,7 +126,7 @@ reg = <0>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index 4c539881fbb7..e3d17569d98a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi @@ -147,7 +147,7 @@ reg = <0>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts index 82b1c4851147..e034bbff8e66 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts @@ -9,7 +9,7 @@ #include -#include "meson-gxl-s905x.dtsi" +#include "meson-gxl-s805x.dtsi" / { compatible = "libretech,aml-s805x-ac", "amlogic,s805x", diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts index 3a1484e5b8e1..fbc687c9ff83 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts @@ -9,7 +9,7 @@ #include -#include "meson-gxl-s905x.dtsi" +#include "meson-gxl-s805x.dtsi" / { compatible = "amlogic,p241", "amlogic,s805x", "amlogic,meson-gxl"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi new file mode 100644 index 000000000000..f9d705648426 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2020 BayLibre SAS + * Author: Neil Armstrong + */ + +#include "meson-gxl-s905x.dtsi" + +/ { + compatible = "amlogic,s805x", "amlogic,meson-gxl"; +}; + +/* The S805X Package doesn't seem to handle the 744MHz OPP correctly */ +&mali { + assigned-clocks = <&clkc CLKID_MALI_0_SEL>, + <&clkc CLKID_MALI_0>, + <&clkc CLKID_MALI>; /* Glitch free mux */ + assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>, + <0>, /* Do Nothing */ + <&clkc CLKID_MALI_0>; + assigned-clock-rates = <0>, /* Do Nothing */ + <666666666>, + <0>; /* Do Nothing */ +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts index b08c4537f260..b2ab05c22090 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts @@ -82,7 +82,7 @@ /* External PHY reset is shared with internal PHY Led signal */ reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 49ff0a7d0210..e3cfa24dca5a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -288,6 +288,11 @@ }; }; +&hwrng { + clocks = <&clkc CLKID_RNG0>; + clock-names = "core"; +}; + &i2c_A { clocks = <&clkc CLKID_I2C>; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 3f43716d5c45..c8a4205117f1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -251,7 +251,7 @@ reg = <0>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; @@ -395,7 +395,7 @@ #size-cells = <1>; compatible = "winbond,w25q16", "jedec,spi-nor"; reg = <0>; - spi-max-frequency = <3000000>; + spi-max-frequency = <104000000>; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index c2bd4dbbf38c..8dccf91d68da 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts @@ -112,7 +112,7 @@ max-speed = <1000>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts index ea45ae0c71b7..8edbfe040805 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts @@ -64,7 +64,7 @@ /* External PHY reset is shared with internal PHY Led signal */ reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; interrupt-parent = <&gpio_intc>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 5cd4d35006d0..f72d29e33a9e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts @@ -114,7 +114,7 @@ max-speed = <1000>; reset-assert-us = <10000>; - reset-deassert-us = <30000>; + reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi index 8647da7d6609..f6694aad84db 100644 --- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi @@ -43,13 +43,13 @@ white { label = "vim3:white:sys"; - gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>; + gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; }; red { label = "vim3:red"; - gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>; + gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi index 521573f3a5ba..8ba3555ca369 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi @@ -90,7 +90,7 @@ opp-microvolt = <790000>; }; - opp-1512000000 { + opp-1500000000 { opp-hz = /bits/ 64 <1500000000>; opp-microvolt = <800000>; }; diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi index 15fe81738e94..dfb23dfc0b0f 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi @@ -8,7 +8,7 @@ gic: interrupt-controller@2c001000 { compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic"; #interrupt-cells = <3>; - #address-cells = <2>; + #address-cells = <1>; interrupt-controller; reg = <0x0 0x2c001000 0 0x1000>, <0x0 0x2c002000 0 0x2000>, diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi index f2c75c756039..906f51935b36 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi @@ -8,9 +8,9 @@ gic: interrupt-controller@2f000000 { compatible = "arm,gic-v3"; #interrupt-cells = <3>; - #address-cells = <2>; - #size-cells = <2>; - ranges; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x2f000000 0x100000>; interrupt-controller; reg = <0x0 0x2f000000 0x0 0x10000>, <0x0 0x2f100000 0x0 0x200000>, @@ -22,7 +22,7 @@ its: its@2f020000 { compatible = "arm,gic-v3-its"; msi-controller; - reg = <0x0 0x2f020000 0x0 0x20000>; + reg = <0x20000 0x20000>; }; }; }; diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index 3f78373f708a..05d1657170b4 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -107,51 +107,51 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 63>; - interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, - <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, - <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, - <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, - <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, - <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, - <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, - <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, - <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, - <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, - <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, - <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, - <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, - <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, - <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, - <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, - <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, - <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, - <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, - <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, - <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, - <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, - <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, - <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, - <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, - <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, - <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, - <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, - <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, - <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, - <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, - <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, - <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, - <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, - <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, - <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, - <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, - <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, - <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, - <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, - <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, - <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, - <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 &gic 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, + <0 0 13 &gic 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, + <0 0 14 &gic 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, + <0 0 15 &gic 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, + <0 0 16 &gic 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, + <0 0 17 &gic 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, + <0 0 18 &gic 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <0 0 19 &gic 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, + <0 0 20 &gic 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + <0 0 21 &gic 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, + <0 0 22 &gic 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, + <0 0 23 &gic 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, + <0 0 24 &gic 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <0 0 25 &gic 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, + <0 0 26 &gic 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, + <0 0 27 &gic 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <0 0 28 &gic 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0 0 29 &gic 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0 0 30 &gic 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0 0 31 &gic 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, + <0 0 32 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, + <0 0 33 &gic 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, + <0 0 34 &gic 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <0 0 35 &gic 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, + <0 0 36 &gic 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <0 0 37 &gic 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, + <0 0 38 &gic 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, + <0 0 39 &gic 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, + <0 0 40 &gic 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <0 0 41 &gic 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <0 0 42 &gic 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; - ethernet@2,02000000 { + ethernet@202000000 { compatible = "smsc,lan91c111"; reg = <2 0x02000000 0x10000>; interrupts = <15>; @@ -178,7 +178,7 @@ clock-output-names = "v2m:refclk32khz"; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 8c11660bbe40..c47f76b01c4b 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -62,35 +62,35 @@ <0x0 0x2c02f000 0 0x2000>, <0x0 0x2c04f000 0 0x2000>, <0x0 0x2c06f000 0 0x2000>; - #address-cells = <2>; + #address-cells = <1>; #interrupt-cells = <3>; - #size-cells = <2>; + #size-cells = <1>; interrupt-controller; interrupts = ; - ranges = <0 0 0 0x2c1c0000 0 0x40000>; + ranges = <0 0 0x2c1c0000 0x40000>; v2m_0: v2m@0 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0 0 0x10000>; + reg = <0 0x10000>; }; v2m@10000 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0x10000 0 0x10000>; + reg = <0x10000 0x10000>; }; v2m@20000 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0x20000 0 0x10000>; + reg = <0x20000 0x10000>; }; v2m@30000 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0x30000 0 0x10000>; + reg = <0x30000 0x10000>; }; }; @@ -519,10 +519,10 @@ <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 2 &gic 0 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 3 &gic 0 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 4 &gic 0 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 1 &gic 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; msi-parent = <&v2m_0>; status = "disabled"; iommu-map-mask = <0x0>; /* RC has no means to output PCI RID */ @@ -786,19 +786,19 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 15>; - interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, - <0 0 1 &gic 0 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, - <0 0 2 &gic 0 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, - <0 0 3 &gic 0 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, - <0 0 4 &gic 0 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, - <0 0 5 &gic 0 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, - <0 0 6 &gic 0 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, - <0 0 7 &gic 0 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, - <0 0 8 &gic 0 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, - <0 0 9 &gic 0 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, - <0 0 10 &gic 0 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, - <0 0 11 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, - <0 0 12 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 &gic 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; }; site2: tlx@60000000 { @@ -808,6 +808,6 @@ ranges = <0 0 0x60000000 0x10000000>; #interrupt-cells = <1>; interrupt-map-mask = <0 0>; - interrupt-map = <0 0 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; }; }; diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi index 9f60dacb4f80..1234a8cfc0a9 100644 --- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi @@ -103,7 +103,7 @@ }; }; - flash@0,00000000 { + flash@0 { /* 2 * 32MiB NOR Flash memory mounted on CS0 */ compatible = "arm,vexpress-flash", "cfi-flash"; reg = <0 0x00000000 0x04000000>; @@ -120,7 +120,7 @@ }; }; - ethernet@2,00000000 { + ethernet@200000000 { compatible = "smsc,lan9118", "smsc,lan9115"; reg = <2 0x00000000 0x10000>; interrupts = <3>; @@ -133,7 +133,7 @@ vddvario-supply = <&mb_fixed_3v3>; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi index 57b0b9d7f3fa..29e6962c70bd 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi +++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi @@ -9,7 +9,7 @@ motherboard { arm,v2m-memory-map = "rs2"; - iofpga@3,00000000 { + iofpga@300000000 { virtio-p9@140000 { compatible = "virtio,mmio"; reg = <0x140000 0x200>; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi index 03a7bf079c8f..ad20076357f5 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi @@ -17,14 +17,14 @@ #interrupt-cells = <1>; ranges; - flash@0,00000000 { + flash@0 { compatible = "arm,vexpress-flash", "cfi-flash"; reg = <0 0x00000000 0x04000000>, <4 0x00000000 0x04000000>; bank-width = <4>; }; - ethernet@2,02000000 { + ethernet@202000000 { compatible = "smsc,lan91c111"; reg = <2 0x02000000 0x10000>; interrupts = <15>; @@ -51,7 +51,7 @@ clock-output-names = "v2m:refclk32khz"; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 15f7b0ed3836..39802066232e 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -745,7 +745,7 @@ }; qspi: spi@66470200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; + compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"; reg = <0x66470200 0x184>, <0x66470000 0x124>, <0x67017408 0x004>, diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi index 55259f973b5a..5401a646c840 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi @@ -4,21 +4,26 @@ */ usb { compatible = "simple-bus"; - dma-ranges; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x0 0x68500000 0x00400000>; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>; + + /* + * Internally, USB bus to the interconnect can only address up + * to 40-bit + */ + dma-ranges = <0 0 0 0 0x100 0x0>; usbphy0: usb-phy@0 { compatible = "brcm,sr-usb-combo-phy"; - reg = <0x00000000 0x100>; + reg = <0x0 0x00000000 0x0 0x100>; #phy-cells = <1>; status = "disabled"; }; xhci0: usb@1000 { compatible = "generic-xhci"; - reg = <0x00001000 0x1000>; + reg = <0x0 0x00001000 0x0 0x1000>; interrupts = ; phys = <&usbphy0 1>, <&usbphy0 0>; phy-names = "phy0", "phy1"; @@ -28,7 +33,7 @@ bdc0: usb@2000 { compatible = "brcm,bdc-v0.16"; - reg = <0x00002000 0x1000>; + reg = <0x0 0x00002000 0x0 0x1000>; interrupts = ; phys = <&usbphy0 0>, <&usbphy0 1>; phy-names = "phy0", "phy1"; @@ -38,21 +43,21 @@ usbphy1: usb-phy@10000 { compatible = "brcm,sr-usb-combo-phy"; - reg = <0x00010000 0x100>; + reg = <0x0 0x00010000 0x0 0x100>; #phy-cells = <1>; status = "disabled"; }; usbphy2: usb-phy@20000 { compatible = "brcm,sr-usb-hs-phy"; - reg = <0x00020000 0x100>; + reg = <0x0 0x00020000 0x0 0x100>; #phy-cells = <0>; status = "disabled"; }; xhci1: usb@11000 { compatible = "generic-xhci"; - reg = <0x00011000 0x1000>; + reg = <0x0 0x00011000 0x0 0x1000>; interrupts = ; phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>; phy-names = "phy0", "phy1", "phy2"; @@ -62,7 +67,7 @@ bdc1: usb@21000 { compatible = "brcm,bdc-v0.16"; - reg = <0x00021000 0x1000>; + reg = <0x0 0x00021000 0x0 0x1000>; interrupts = ; phys = <&usbphy2>; phy-names = "phy0"; diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index 6f90b0e62cba..148bdca8d9c9 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -389,7 +389,7 @@ s2mps13-pmic@66 { compatible = "samsung,s2mps13-pmic"; interrupt-parent = <&gpa0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; reg = <0x66>; samsung,s2mps11-wrstbi-ground; diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index 080e0f56e108..09aead2be000 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -90,7 +90,7 @@ s2mps15_pmic@66 { compatible = "samsung,s2mps15-pmic"; reg = <0x66>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; interrupt-parent = <&gpa0>; pinctrl-names = "default"; pinctrl-0 = <&pmic_irq>; @@ -157,6 +157,7 @@ regulator-min-microvolt = <700000>; regulator-max-microvolt = <1150000>; regulator-enable-ramp-delay = <125>; + regulator-always-on; }; ldo8_reg: LDO8 { diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index 0821489a874d..25549d9552ae 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -90,8 +90,10 @@ }; psci { - compatible = "arm,psci-0.2"; + compatible = "arm,psci"; method = "smc"; + cpu_off = <0x84000002>; + cpu_on = <0xC4000003>; }; soc: soc { @@ -494,13 +496,6 @@ pmu_system_controller: system-controller@105c0000 { compatible = "samsung,exynos7-pmu", "syscon"; reg = <0x105c0000 0x5000>; - - reboot: syscon-reboot { - compatible = "syscon-reboot"; - regmap = <&pmu_system_controller>; - offset = <0x0400>; - mask = <0x1>; - }; }; rtc: rtc@10590000 { @@ -650,3 +645,4 @@ }; #include "exynos7-pinctrl.dtsi" +#include "arm/exynos-syscon-restart.dtsi" diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi index 337919366dc8..ec141c985289 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi @@ -177,6 +177,7 @@ ranges = <0x0 0x00 0x1700000 0x100000>; reg = <0x00 0x1700000 0x0 0x100000>; interrupts = ; + dma-coherent; sec_jr0: jr@10000 { compatible = "fsl,sec-v5.4-job-ring", diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 9589b15693d6..bd99fa68b763 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -103,7 +103,7 @@ reboot { compatible ="syscon-reboot"; regmap = <&rst>; - offset = <0xb0>; + offset = <0>; mask = <0x02>; }; @@ -673,7 +673,7 @@ ethernet@0,4 { compatible = "fsl,enetc-ptp"; reg = <0x000400 0 0 0 0>; - clocks = <&clockgen 4 0>; + clocks = <&clockgen 2 3>; little-endian; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index c084c7a4b6a6..b611d835dc25 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -241,6 +241,7 @@ ranges = <0x0 0x00 0x1700000 0x100000>; reg = <0x00 0x1700000 0x0 0x100000>; interrupts = <0 75 0x4>; + dma-coherent; sec_jr0: jr@10000 { compatible = "fsl,sec-v5.4-job-ring", diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index d4c1da3d4bde..ca087918c250 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -244,6 +244,7 @@ ranges = <0x0 0x00 0x1700000 0x100000>; reg = <0x00 0x1700000 0x0 0x100000>; interrupts = ; + dma-coherent; sec_jr0: jr@10000 { compatible = "fsl,sec-v5.4-job-ring", @@ -304,7 +305,7 @@ dcfg: dcfg@1ee0000 { compatible = "fsl,ls1046a-dcfg", "syscon"; - reg = <0x0 0x1ee0000 0x0 0x10000>; + reg = <0x0 0x1ee0000 0x0 0x1000>; big-endian; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts index 13137451b438..b9f8b7aac8ff 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts @@ -231,7 +231,7 @@ ldo1_reg: LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <3000000>; + regulator-min-microvolt = <1600000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; @@ -239,7 +239,7 @@ ldo2_reg: LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <900000>; + regulator-min-microvolt = <800000>; regulator-max-microvolt = <900000>; regulator-boot-on; regulator-always-on; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h index cffa8991880d..93b44efdbc52 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -124,7 +124,7 @@ #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 -#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index fde1849d36ca..7b178a77cc71 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -126,7 +126,7 @@ opp-1600000000 { opp-hz = /bits/ 64 <1600000000>; - opp-microvolt = <900000>; + opp-microvolt = <950000>; opp-supported-hw = <0xc>, <0x7>; clock-latency-ns = <150000>; opp-suspend; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts index 11c705d225d0..9ad1d43b8ce7 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts @@ -268,7 +268,7 @@ ldo1_reg: LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <3000000>; + regulator-min-microvolt = <1600000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; @@ -276,7 +276,7 @@ ldo2_reg: LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <900000>; + regulator-min-microvolt = <800000>; regulator-max-microvolt = <900000>; regulator-boot-on; regulator-always-on; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index 43c4db312146..546511b373d4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -616,7 +616,7 @@ reg = <0x30bd0000 0x10000>; interrupts = ; clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>, - <&clk IMX8MN_CLK_SDMA1_ROOT>; + <&clk IMX8MN_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; @@ -677,28 +677,6 @@ #index-cells = <1>; reg = <0x32e40200 0x200>; }; - - usbotg2: usb@32e50000 { - compatible = "fsl,imx8mn-usb", "fsl,imx7d-usb"; - reg = <0x32e50000 0x200>; - interrupts = ; - clocks = <&clk IMX8MN_CLK_USB1_CTRL_ROOT>; - clock-names = "usb1_ctrl_root_clk"; - assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>, - <&clk IMX8MN_CLK_USB_CORE_REF>; - assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>, - <&clk IMX8MN_SYS_PLL1_100M>; - fsl,usbphy = <&usbphynop2>; - fsl,usbmisc = <&usbmisc2 0>; - status = "disabled"; - }; - - usbmisc2: usbmisc@32e50200 { - compatible = "fsl,imx8mn-usbmisc", "fsl,imx7d-usbmisc"; - #index-cells = <1>; - reg = <0x32e50200 0x200>; - }; - }; dma_apbh: dma-controller@33000000 { @@ -747,12 +725,4 @@ assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>; clock-names = "main_clk"; }; - - usbphynop2: usbphynop2 { - compatible = "usb-nop-xceiv"; - clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; - assigned-clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; - assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>; - clock-names = "main_clk"; - }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts index 98cfe67b7db7..19b427f68dad 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts @@ -743,6 +743,7 @@ }; &usb3_phy0 { + vbus-supply = <®_5v_p>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h index b94b02080a34..68e8fa172974 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h @@ -130,7 +130,7 @@ #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 -#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 55a3d1c4bdf0..f1011bcd5ed5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -349,7 +349,7 @@ tmu: tmu@30260000 { compatible = "fsl,imx8mq-tmu"; reg = <0x30260000 0x10000>; - interrupt = ; + interrupts = ; clocks = <&clk IMX8MQ_CLK_TMU_ROOT>; little-endian; fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>; @@ -516,6 +516,7 @@ gpc: gpc@303a0000 { compatible = "fsl,imx8mq-gpc"; reg = <0x303a0000 0x10000>; + interrupts = ; interrupt-parent = <&gic>; interrupt-controller; #interrupt-cells = <3>; diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index e035cf195b19..8c4bfbaf3a80 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -530,6 +530,17 @@ status = "ok"; compatible = "adi,adv7533"; reg = <0x39>; + adi,dsi-lanes = <4>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + }; + port@1 { + reg = <1>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index c14205cd6bf5..3e47150c05ec 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -516,7 +516,7 @@ reg = <0x39>; interrupt-parent = <&gpio1>; interrupts = <1 2>; - pd-gpio = <&gpio0 4 0>; + pd-gpios = <&gpio0 4 0>; adi,dsi-lanes = <4>; #sound-dai-cells = <0>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts index fbcf03f86c96..6226e7e80980 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts @@ -19,6 +19,16 @@ model = "Globalscale Marvell ESPRESSOBin Board"; compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710"; + aliases { + ethernet0 = ð0; + /* for dsa slave device */ + ethernet1 = &switch0port1; + ethernet2 = &switch0port2; + ethernet3 = &switch0port3; + serial0 = &uart0; + serial1 = &uart1; + }; + chosen { stdout-path = "serial0:115200n8"; }; @@ -141,7 +151,7 @@ #address-cells = <1>; #size-cells = <0>; - port@0 { + switch0port0: port@0 { reg = <0>; label = "cpu"; ethernet = <ð0>; @@ -152,19 +162,19 @@ }; }; - port@1 { + switch0port1: port@1 { reg = <1>; label = "wan"; phy-handle = <&switch0phy0>; }; - port@2 { + switch0port2: port@2 { reg = <2>; label = "lan0"; phy-handle = <&switch0phy1>; }; - port@3 { + switch0port3: port@3 { reg = <3>; label = "lan1"; phy-handle = <&switch0phy2>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index 5f350cc71a2f..fad70c2df7bc 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -95,7 +95,7 @@ }; sfp: sfp { - compatible = "sff,sfp+"; + compatible = "sff,sfp"; i2c-bus = <&i2c0>; los-gpio = <&moxtet_sfp 0 GPIO_ACTIVE_HIGH>; tx-fault-gpio = <&moxtet_sfp 1 GPIO_ACTIVE_HIGH>; @@ -144,7 +144,7 @@ pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; phy-mode = "rgmii-id"; - phy = <&phy1>; + phy-handle = <&phy1>; status = "okay"; }; @@ -171,6 +171,8 @@ marvell,pad-type = "sd"; vqmmc-supply = <&vsdio_reg>; mmc-pwrseq = <&sdhci1_pwrseq>; + /* forbid SDR104 for FCC purposes */ + sdhci-caps-mask = <0x2 0x0>; status = "okay"; }; @@ -200,7 +202,7 @@ }; partition@20000 { - label = "u-boot"; + label = "a53-firmware"; reg = <0x20000 0x160000>; }; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 000c135e39b7..28ad59ee6c34 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -156,7 +156,8 @@ }; nb_periph_clk: nb-periph-clk@13000 { - compatible = "marvell,armada-3700-periph-clock-nb"; + compatible = "marvell,armada-3700-periph-clock-nb", + "syscon"; reg = <0x13000 0x100>; clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, <&tbg 3>, <&xtalclk>; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index a211a046b2f2..b90d78a5724b 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -367,6 +367,7 @@ pinctrl-0 = <&cp0_copper_eth_phy_reset>; reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>; reset-assert-us = <10000>; + reset-deassert-us = <10000>; }; switch0: switch0@4 { diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index dac51e98204c..e7e002d8b108 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -686,6 +686,8 @@ clocks = <&pericfg CLK_PERI_MSDC30_0_PD>, <&topckgen CLK_TOP_MSDC50_0_SEL>; clock-names = "source", "hclk"; + resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>; + reset-names = "hrst"; status = "disabled"; }; @@ -696,6 +698,8 @@ clocks = <&pericfg CLK_PERI_MSDC30_1_PD>, <&topckgen CLK_TOP_AXI_SEL>; clock-names = "source", "hclk"; + resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>; + reset-names = "hrst"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 15f1842f6df3..c4f742e6d160 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -238,21 +238,21 @@ cpu_on = <0x84000003>; }; - clk26m: oscillator@0 { + clk26m: oscillator0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { + clk32k: oscillator1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32000>; clock-output-names = "clk32k"; }; - cpum_ck: oscillator@2 { + cpum_ck: oscillator2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <0>; @@ -268,19 +268,19 @@ sustainable-power = <1500>; /* milliwatts */ trips { - threshold: trip-point@0 { + threshold: trip-point0 { temperature = <68000>; hysteresis = <2000>; type = "passive"; }; - target: trip-point@1 { + target: trip-point1 { temperature = <85000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit: cpu_crit@0 { + cpu_crit: cpu_crit0 { temperature = <115000>; hysteresis = <2000>; type = "critical"; @@ -288,13 +288,13 @@ }; cooling-maps { - map@0 { + map0 { trip = <&target>; cooling-device = <&cpu0 0 0>, <&cpu1 0 0>; contribution = <3072>; }; - map@1 { + map1 { trip = <&target>; cooling-device = <&cpu2 0 0>, <&cpu3 0 0>; @@ -308,7 +308,7 @@ #address-cells = <2>; #size-cells = <2>; ranges; - vpu_dma_reserved: vpu_dma_mem_region { + vpu_dma_reserved: vpu_dma_mem_region@b7000000 { compatible = "shared-dma-pool"; reg = <0 0xb7000000 0 0x500000>; alignment = <0x1000>; @@ -360,7 +360,7 @@ reg = <0 0x10005000 0 0x1000>; }; - pio: pinctrl@10005000 { + pio: pinctrl@1000b000 { compatible = "mediatek,mt8173-pinctrl"; reg = <0 0x1000b000 0 0x1000>; mediatek,pctl-regmap = <&syscfg_pctl_a>; @@ -567,7 +567,7 @@ status = "disabled"; }; - gic: interrupt-controller@10220000 { + gic: interrupt-controller@10221000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; interrupt-parent = <&gic>; @@ -1137,7 +1137,7 @@ <&mmsys CLK_MM_DSI1_DIGITAL>, <&mipi_tx1>; clock-names = "engine", "digital", "hs"; - phy = <&mipi_tx1>; + phys = <&mipi_tx1>; phy-names = "dphy"; status = "disabled"; }; @@ -1397,8 +1397,8 @@ "venc_lt_sel"; assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>, <&topckgen CLK_TOP_VENC_LT_SEL>; - assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>, - <&topckgen CLK_TOP_UNIVPLL1_D2>; + assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>, + <&topckgen CLK_TOP_VCODECPLL_370P5>; }; vencltsys: clock-controller@19000000 { diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts index bdace01561ba..9df4782c90f3 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts @@ -10,18 +10,6 @@ model = "NVIDIA Jetson TX2 Developer Kit"; compatible = "nvidia,p2771-0000", "nvidia,tegra186"; - aconnect { - status = "okay"; - - dma-controller@2930000 { - status = "okay"; - }; - - interrupt-controller@2a40000 { - status = "okay"; - }; - }; - i2c@3160000 { power-monitor@42 { compatible = "ti,ina3221"; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 47cd831fcf44..9abf0cb1dd67 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -309,8 +309,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03400000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC1>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC1>; reset-names = "sdhci"; iommus = <&smmu TEGRA186_SID_SDMMC1>; @@ -335,8 +336,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03420000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC2>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC2>; reset-names = "sdhci"; iommus = <&smmu TEGRA186_SID_SDMMC2>; @@ -356,8 +358,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03440000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC3>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC3>; reset-names = "sdhci"; iommus = <&smmu TEGRA186_SID_SDMMC3>; @@ -379,8 +382,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03460000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC4>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>, <&bpmp TEGRA186_CLK_PLLC4_VCO>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi index 02909a48dfcd..7899759a12f8 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi @@ -32,7 +32,7 @@ phy-reset-gpios = <&gpio TEGRA194_MAIN_GPIO(G, 5) GPIO_ACTIVE_LOW>; phy-handle = <&phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; mdio { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 3c0cf54f0aab..0821754f0fd6 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -144,7 +144,7 @@ nvidia,schmitt = ; nvidia,lpdr = ; nvidia,enable-input = ; - nvidia,io-high-voltage = ; + nvidia,io-hv = ; nvidia,tristate = ; nvidia,pull = ; }; @@ -156,7 +156,7 @@ nvidia,schmitt = ; nvidia,lpdr = ; nvidia,enable-input = ; - nvidia,io-high-voltage = ; + nvidia,io-hv = ; nvidia,tristate = ; nvidia,pull = ; }; @@ -403,8 +403,9 @@ compatible = "nvidia,tegra194-sdhci", "nvidia,tegra186-sdhci"; reg = <0x03400000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC1>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC1>; reset-names = "sdhci"; nvidia,pad-autocal-pull-up-offset-3v3-timeout = @@ -425,8 +426,9 @@ compatible = "nvidia,tegra194-sdhci", "nvidia,tegra186-sdhci"; reg = <0x03440000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC3>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC3>; reset-names = "sdhci"; nvidia,pad-autocal-pull-up-offset-1v8 = <0x00>; @@ -448,8 +450,9 @@ compatible = "nvidia,tegra194-sdhci", "nvidia,tegra186-sdhci"; reg = <0x03460000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC4>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>, <&bpmp TEGRA194_CLK_PLLC4>; assigned-clock-parents = @@ -689,7 +692,7 @@ hsp_aon: hsp@c150000 { compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp"; - reg = <0x0c150000 0xa0000>; + reg = <0x0c150000 0x90000>; interrupts = , , , @@ -1151,7 +1154,7 @@ }; pcie@14100000 { - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + compatible = "nvidia,tegra194-pcie"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>; reg = <0x00 0x14100000 0x0 0x00020000 /* appl registers (128K) */ 0x00 0x30000000 0x0 0x00040000 /* configuration space (256K) */ @@ -1192,12 +1195,12 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x30100000 0x0 0x30100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0xc3000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */ 0x82000000 0x0 0x40000000 0x12 0x30000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ }; pcie@14120000 { - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + compatible = "nvidia,tegra194-pcie"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>; reg = <0x00 0x14120000 0x0 0x00020000 /* appl registers (128K) */ 0x00 0x32000000 0x0 0x00040000 /* configuration space (256K) */ @@ -1238,12 +1241,12 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x32100000 0x0 0x32100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0xc3000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */ 0x82000000 0x0 0x40000000 0x12 0x70000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ }; pcie@14140000 { - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + compatible = "nvidia,tegra194-pcie"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>; reg = <0x00 0x14140000 0x0 0x00020000 /* appl registers (128K) */ 0x00 0x34000000 0x0 0x00040000 /* configuration space (256K) */ @@ -1284,12 +1287,12 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x34100000 0x0 0x34100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0xc3000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */ 0x82000000 0x0 0x40000000 0x12 0xb0000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ }; pcie@14160000 { - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + compatible = "nvidia,tegra194-pcie"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>; reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */ 0x00 0x36000000 0x0 0x00040000 /* configuration space (256K) */ @@ -1330,12 +1333,12 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x36100000 0x0 0x36100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0xc3000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ 0x82000000 0x0 0x40000000 0x17 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ }; pcie@14180000 { - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + compatible = "nvidia,tegra194-pcie"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */ 0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */ @@ -1376,12 +1379,12 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0xc3000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ 0x82000000 0x0 0x40000000 0x1b 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ }; pcie@141a0000 { - compatible = "nvidia,tegra194-pcie", "snps,dw-pcie"; + compatible = "nvidia,tegra194-pcie"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>; reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */ 0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */ @@ -1426,10 +1429,109 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x3a100000 0x0 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0xc3000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ 0x82000000 0x0 0x40000000 0x1f 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ }; + pcie_ep@14160000 { + compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>; + reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ + 0x00 0x36080000 0x0 0x00040000 /* DBI reg space (256K) */ + 0x14 0x00000000 0x4 0x00000000>; /* Address Space (16G) */ + reg-names = "appl", "atu_dma", "dbi", "addr_space"; + + status = "disabled"; + + num-lanes = <4>; + num-ib-windows = <2>; + num-ob-windows = <8>; + + clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_4>; + clock-names = "core"; + + resets = <&bpmp TEGRA194_RESET_PEX0_CORE_4_APB>, + <&bpmp TEGRA194_RESET_PEX0_CORE_4>; + reset-names = "apb", "core"; + + interrupts = ; /* controller interrupt */ + interrupt-names = "intr"; + + nvidia,bpmp = <&bpmp 4>; + + nvidia,aspm-cmrt-us = <60>; + nvidia,aspm-pwr-on-t-us = <20>; + nvidia,aspm-l0s-entrance-latency-us = <3>; + }; + + pcie_ep@14180000 { + compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; + reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x38040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ + 0x00 0x38080000 0x0 0x00040000 /* DBI reg space (256K) */ + 0x18 0x00000000 0x4 0x00000000>; /* Address Space (16G) */ + reg-names = "appl", "atu_dma", "dbi", "addr_space"; + + status = "disabled"; + + num-lanes = <8>; + num-ib-windows = <2>; + num-ob-windows = <8>; + + clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>; + clock-names = "core"; + + resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>, + <&bpmp TEGRA194_RESET_PEX0_CORE_0>; + reset-names = "apb", "core"; + + interrupts = ; /* controller interrupt */ + interrupt-names = "intr"; + + nvidia,bpmp = <&bpmp 0>; + + nvidia,aspm-cmrt-us = <60>; + nvidia,aspm-pwr-on-t-us = <20>; + nvidia,aspm-l0s-entrance-latency-us = <3>; + }; + + pcie_ep@141a0000 { + compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep"; + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>; + reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ + 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */ + 0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */ + reg-names = "appl", "atu_dma", "dbi", "addr_space"; + + status = "disabled"; + + num-lanes = <8>; + num-ib-windows = <2>; + num-ob-windows = <8>; + + pinctrl-names = "default"; + pinctrl-0 = <&clkreq_c5_bi_dir_state>; + + clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>; + clock-names = "core"; + + resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>, + <&bpmp TEGRA194_RESET_PEX1_CORE_5>; + reset-names = "apb", "core"; + + interrupts = ; /* controller interrupt */ + interrupt-names = "intr"; + + nvidia,bpmp = <&bpmp 5>; + + nvidia,aspm-cmrt-us = <60>; + nvidia,aspm-pwr-on-t-us = <20>; + nvidia,aspm-l0s-entrance-latency-us = <3>; + }; + sysram@40000000 { compatible = "nvidia,tegra194-sysram", "mmio-sram"; reg = <0x0 0x40000000 0x0 0x50000>; diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 659753118e96..8a02b26d07cd 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -917,6 +917,7 @@ <&tegra_car 128>, /* hda2hdmi */ <&tegra_car 111>; /* hda2codec_2x */ reset-names = "hda", "hda2hdmi", "hda2codec_2x"; + power-domains = <&pd_sor>; status = "disabled"; }; @@ -1116,8 +1117,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0000 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC1>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 14>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1144,8 +1146,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0200 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC2>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 9>; reset-names = "sdhci"; pinctrl-names = "sdmmc-1v8-drv"; @@ -1161,8 +1164,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0400 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC3>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 69>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1184,8 +1188,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0600 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC4>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 15>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv"; diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi index 242aaea68804..38c0d74767e3 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi @@ -508,7 +508,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; cdc_pdm_lines_sus: pdm_lines_off { @@ -521,7 +521,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <2>; - bias-disable; + bias-pull-down; }; }; }; @@ -537,7 +537,7 @@ pins = "gpio113", "gpio114", "gpio115", "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -565,7 +565,7 @@ pinconf { pins = "gpio110"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -591,7 +591,7 @@ pinconf { pins = "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_mclk_tlmm_lines_sus: mclk_lines_off { @@ -619,7 +619,7 @@ pins = "gpio112", "gpio117", "gpio118", "gpio119"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_sec_tlmm_lines_sus: tlmm_lines_off { diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 5ea9fb8f2f87..449843f2184d 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -53,7 +53,7 @@ no-map; }; - reserved@8668000 { + reserved@86680000 { reg = <0x0 0x86680000 0x0 0x80000>; no-map; }; @@ -66,7 +66,7 @@ qcom,client-id = <1>; }; - rfsa@867e00000 { + rfsa@867e0000 { reg = <0x0 0x867e0000 0x0 0x20000>; no-map; }; @@ -175,14 +175,14 @@ }; thermal-zones { - cpu0_1-thermal { + cpu0-1-thermal { polling-delay-passive = <250>; polling-delay = <1000>; thermal-sensors = <&tsens 4>; trips { - cpu0_1_alert0: trip-point@0 { + cpu0_1_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "passive"; @@ -205,14 +205,14 @@ }; }; - cpu2_3-thermal { + cpu2-3-thermal { polling-delay-passive = <250>; polling-delay = <1000>; thermal-sensors = <&tsens 3>; trips { - cpu2_3_alert0: trip-point@0 { + cpu2_3_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "passive"; @@ -242,7 +242,7 @@ thermal-sensors = <&tsens 2>; trips { - gpu_alert0: trip-point@0 { + gpu_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "passive"; @@ -262,7 +262,7 @@ thermal-sensors = <&tsens 1>; trips { - cam_alert0: trip-point@0 { + cam_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "hot"; @@ -277,7 +277,7 @@ thermal-sensors = <&tsens 0>; trips { - modem_alert0: trip-point@0 { + modem_alert0: trip-point0 { temperature = <85000>; hysteresis = <2000>; type = "hot"; @@ -934,7 +934,7 @@ reg-names = "mdp_phys"; interrupt-parent = <&mdss>; - interrupts = <0 0>; + interrupts = <0>; clocks = <&gcc GCC_MDSS_AHB_CLK>, <&gcc GCC_MDSS_AXI_CLK>, @@ -966,7 +966,7 @@ reg-names = "dsi_ctrl"; interrupt-parent = <&mdss>; - interrupts = <4 0>; + interrupts = <4>; assigned-clocks = <&gcc BYTE0_CLK_SRC>, <&gcc PCLK0_CLK_SRC>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index fbb8ce78f95b..d303df3887d9 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -1681,16 +1681,16 @@ "csi_clk_mux", "vfe0", "vfe1"; - interrupts = , - , - , - , - , - , - , - , - , - ; + interrupts = , + , + , + , + , + , + , + , + , + ; interrupt-names = "csiphy0", "csiphy1", "csiphy2", diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi index b6e304748a57..c0b197458665 100644 --- a/arch/arm64/boot/dts/qcom/pm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi @@ -73,18 +73,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0x0 0xc0 0x0 IRQ_TYPE_NONE>, - <0x0 0xc1 0x0 IRQ_TYPE_NONE>, - <0x0 0xc2 0x0 IRQ_TYPE_NONE>, - <0x0 0xc3 0x0 IRQ_TYPE_NONE>, - <0x0 0xc4 0x0 IRQ_TYPE_NONE>, - <0x0 0xc5 0x0 IRQ_TYPE_NONE>, - <0x0 0xc6 0x0 IRQ_TYPE_NONE>, - <0x0 0xc7 0x0 IRQ_TYPE_NONE>, - <0x0 0xc8 0x0 IRQ_TYPE_NONE>, - <0x0 0xc9 0x0 IRQ_TYPE_NONE>, - <0x0 0xca 0x0 IRQ_TYPE_NONE>, - <0x0 0xcb 0x0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi index 322379d5c31f..40b5d75a4a1d 100644 --- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi @@ -62,18 +62,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0x2 0xc0 0x0 IRQ_TYPE_NONE>, - <0x2 0xc1 0x0 IRQ_TYPE_NONE>, - <0x2 0xc2 0x0 IRQ_TYPE_NONE>, - <0x2 0xc3 0x0 IRQ_TYPE_NONE>, - <0x2 0xc4 0x0 IRQ_TYPE_NONE>, - <0x2 0xc5 0x0 IRQ_TYPE_NONE>, - <0x2 0xc6 0x0 IRQ_TYPE_NONE>, - <0x2 0xc7 0x0 IRQ_TYPE_NONE>, - <0x2 0xc8 0x0 IRQ_TYPE_NONE>, - <0x2 0xc9 0x0 IRQ_TYPE_NONE>, - <0x2 0xca 0x0 IRQ_TYPE_NONE>, - <0x2 0xcb 0x0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi index eb0e9a090e42..cf05e0685d10 100644 --- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi @@ -56,18 +56,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0x4 0xc0 0x0 IRQ_TYPE_NONE>, - <0x4 0xc1 0x0 IRQ_TYPE_NONE>, - <0x4 0xc2 0x0 IRQ_TYPE_NONE>, - <0x4 0xc3 0x0 IRQ_TYPE_NONE>, - <0x4 0xc4 0x0 IRQ_TYPE_NONE>, - <0x4 0xc5 0x0 IRQ_TYPE_NONE>, - <0x4 0xc6 0x0 IRQ_TYPE_NONE>, - <0x4 0xc7 0x0 IRQ_TYPE_NONE>, - <0x4 0xc8 0x0 IRQ_TYPE_NONE>, - <0x4 0xc9 0x0 IRQ_TYPE_NONE>, - <0x4 0xca 0x0 IRQ_TYPE_NONE>, - <0x4 0xcb 0x0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi index 9dd2df1cbf47..40df4d95a47a 100644 --- a/arch/arm64/boot/dts/qcom/pm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi @@ -113,7 +113,7 @@ wcd_codec: codec@f000 { compatible = "qcom,pm8916-wcd-analog-codec"; - reg = <0xf000 0x200>; + reg = <0xf000>; reg-names = "pmic-codec-core"; clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; clock-names = "mclk"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index 751651a6cd81..bf4fde88011c 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -337,7 +337,9 @@ &gcc { protected-clocks = , , - ; + , + , + ; }; &pm8998_gpio { diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index ded120d3aef5..840d6b9bbb59 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -232,7 +232,9 @@ &gcc { protected-clocks = , , - ; + , + , + ; }; &i2c1 { @@ -243,24 +245,23 @@ &i2c3 { status = "okay"; clock-frequency = <400000>; + /* Overwrite pinctrl-0 from sdm845.dtsi */ + pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>; - hid@15 { + tsel: hid@15 { compatible = "hid-over-i2c"; reg = <0x15>; hid-descr-addr = <0x1>; - interrupts-extended = <&tlmm 37 IRQ_TYPE_EDGE_RISING>; + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; }; - hid@2c { + tsc2: hid@2c { compatible = "hid-over-i2c"; reg = <0x2c>; hid-descr-addr = <0x20>; - interrupts-extended = <&tlmm 37 IRQ_TYPE_EDGE_RISING>; - - pinctrl-names = "default"; - pinctrl-0 = <&i2c2_hid_active>; + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; }; }; @@ -268,15 +269,15 @@ status = "okay"; clock-frequency = <400000>; - hid@10 { + tsc1: hid@10 { compatible = "hid-over-i2c"; reg = <0x10>; hid-descr-addr = <0x1>; - interrupts-extended = <&tlmm 125 IRQ_TYPE_EDGE_FALLING>; + interrupts-extended = <&tlmm 125 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; - pinctrl-0 = <&i2c6_hid_active>; + pinctrl-0 = <&i2c5_hid_active>; }; }; @@ -284,7 +285,7 @@ status = "okay"; clock-frequency = <400000>; - hid@5c { + ecsh: hid@5c { compatible = "hid-over-i2c"; reg = <0x5c>; hid-descr-addr = <0x1>; @@ -292,7 +293,7 @@ interrupts-extended = <&tlmm 92 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; - pinctrl-0 = <&i2c12_hid_active>; + pinctrl-0 = <&i2c11_hid_active>; }; }; @@ -335,7 +336,7 @@ &tlmm { gpio-reserved-ranges = <0 4>, <81 4>; - i2c2_hid_active: i2c2-hid-active { + i2c3_hid_active: i2c2-hid-active { pins = <37>; function = "gpio"; @@ -344,7 +345,7 @@ drive-strength = <2>; }; - i2c6_hid_active: i2c6-hid-active { + i2c5_hid_active: i2c5-hid-active { pins = <125>; function = "gpio"; @@ -353,7 +354,7 @@ drive-strength = <2>; }; - i2c12_hid_active: i2c12-hid-active { + i2c11_hid_active: i2c11-hid-active { pins = <92>; function = "gpio"; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 8f23fcadecb8..9573da378826 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -336,7 +336,7 @@ <0x0 0x03D00000 0x0 0x300000>; reg-names = "west", "east", "north", "south"; interrupts = ; - gpio-ranges = <&tlmm 0 0 175>; + gpio-ranges = <&tlmm 0 0 176>; gpio-controller; #gpio-cells = <2>; interrupt-controller; diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi index aaefc3ae56d5..dbdb8b093e73 100644 --- a/arch/arm64/boot/dts/renesas/cat875.dtsi +++ b/arch/arm64/boot/dts/renesas/cat875.dtsi @@ -22,7 +22,6 @@ status = "okay"; phy0: ethernet-phy@0 { - rxc-skew-ps = <1500>; reg = <0>; interrupt-parent = <&gpio2>; interrupts = <21 IRQ_TYPE_LEVEL_LOW>; diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi index 4280b190dc68..6a001cdfd38e 100644 --- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi +++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi @@ -23,7 +23,6 @@ status = "okay"; phy0: ethernet-phy@0 { - rxc-skew-ps = <1500>; reg = <0>; interrupt-parent = <&gpio2>; interrupts = <11 IRQ_TYPE_LEVEL_LOW>; diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi index a1c2de90e470..73ded80a79ba 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi @@ -1212,9 +1212,8 @@ reg = <0 0xe6ea0000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 210>; - dmas = <&dmac1 0x43>, <&dmac1 0x42>, - <&dmac2 0x43>, <&dmac2 0x42>; - dma-names = "tx", "rx", "tx", "rx"; + dmas = <&dmac0 0x43>, <&dmac0 0x42>; + dma-names = "tx", "rx"; power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; resets = <&cpg 210>; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi index 461a47ea656d..4b9d4f1bbe01 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi @@ -990,8 +990,8 @@ reg = <1>; - vin4csi41: endpoint@2 { - reg = <2>; + vin4csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin4>; }; }; @@ -1018,8 +1018,8 @@ reg = <1>; - vin5csi41: endpoint@2 { - reg = <2>; + vin5csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin5>; }; }; @@ -1046,8 +1046,8 @@ reg = <1>; - vin6csi41: endpoint@2 { - reg = <2>; + vin6csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin6>; }; }; @@ -1074,8 +1074,8 @@ reg = <1>; - vin7csi41: endpoint@2 { - reg = <2>; + vin7csi41: endpoint@3 { + reg = <3>; remote-endpoint = <&csi41vin7>; }; }; @@ -1318,6 +1318,7 @@ ipmmu_vip0: mmu@e7b00000 { compatible = "renesas,ipmmu-r8a77980"; reg = <0 0xe7b00000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 4>; power-domains = <&sysc R8A77980_PD_ALWAYS_ON>; #iommu-cells = <1>; }; @@ -1325,6 +1326,7 @@ ipmmu_vip1: mmu@e7960000 { compatible = "renesas,ipmmu-r8a77980"; reg = <0 0xe7960000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 11>; power-domains = <&sysc R8A77980_PD_ALWAYS_ON>; #iommu-cells = <1>; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi index 455954c3d98e..dabee157119f 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi @@ -1168,9 +1168,8 @@ reg = <0 0xe6ea0000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 210>; - dmas = <&dmac1 0x43>, <&dmac1 0x42>, - <&dmac2 0x43>, <&dmac2 0x42>; - dma-names = "tx", "rx", "tx", "rx"; + dmas = <&dmac0 0x43>, <&dmac0 0x42>; + dma-names = "tx", "rx"; power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; resets = <&cpg 210>; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index 3ef89171538f..d8fccf3d4987 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -470,6 +470,7 @@ mmc-hs200-1_8v; mmc-hs400-1_8v; non-removable; + full-pwr-cycle-in-suspend; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi index 9e09909a510a..98b014a8f916 100644 --- a/arch/arm64/boot/dts/rockchip/px30.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30.dtsi @@ -860,7 +860,7 @@ vopl_mmu: iommu@ff470f00 { compatible = "rockchip,iommu"; reg = <0x0 0xff470f00 0x0 0x100>; - interrupts = ; + interrupts = ; interrupt-names = "vopl_mmu"; clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>; clock-names = "aclk", "hclk"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts index 49c4b96da3d4..05265b38cc02 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts @@ -86,13 +86,13 @@ assigned-clock-rate = <50000000>; assigned-clocks = <&cru SCLK_MAC2PHY>; assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>; - + status = "okay"; }; &i2c1 { status = "okay"; - rk805: rk805@18 { + rk805: pmic@18 { compatible = "rockchip,rk805"; reg = <0x18>; interrupt-parent = <&gpio2>; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index bb40c163b05d..6c3368f795ca 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts @@ -333,6 +333,7 @@ }; &usb20_otg { + dr_mode = "host"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 62936b432f9a..304fad1a0b57 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -169,7 +169,7 @@ &i2c1 { status = "okay"; - rk805: rk805@18 { + rk805: pmic@18 { compatible = "rockchip,rk805"; reg = <0x18>; interrupt-parent = <&gpio2>; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 31cc1541f1f5..e0ed323935a4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -1190,8 +1190,8 @@ uart0 { uart0_xfer: uart0-xfer { - rockchip,pins = <1 RK_PB1 1 &pcfg_pull_up>, - <1 RK_PB0 1 &pcfg_pull_none>; + rockchip,pins = <1 RK_PB1 1 &pcfg_pull_none>, + <1 RK_PB0 1 &pcfg_pull_up>; }; uart0_cts: uart0-cts { @@ -1209,8 +1209,8 @@ uart1 { uart1_xfer: uart1-xfer { - rockchip,pins = <3 RK_PA4 4 &pcfg_pull_up>, - <3 RK_PA6 4 &pcfg_pull_none>; + rockchip,pins = <3 RK_PA4 4 &pcfg_pull_none>, + <3 RK_PA6 4 &pcfg_pull_up>; }; uart1_cts: uart1-cts { @@ -1228,15 +1228,15 @@ uart2-0 { uart2m0_xfer: uart2m0-xfer { - rockchip,pins = <1 RK_PA0 2 &pcfg_pull_up>, - <1 RK_PA1 2 &pcfg_pull_none>; + rockchip,pins = <1 RK_PA0 2 &pcfg_pull_none>, + <1 RK_PA1 2 &pcfg_pull_up>; }; }; uart2-1 { uart2m1_xfer: uart2m1-xfer { - rockchip,pins = <2 RK_PA0 1 &pcfg_pull_up>, - <2 RK_PA1 1 &pcfg_pull_none>; + rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>, + <2 RK_PA1 1 &pcfg_pull_up>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi index e17311e09082..216aafd90e7f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi @@ -156,7 +156,7 @@ pinctrl-0 = <&rgmii_pins>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>; tx_delay = <0x10>; rx_delay = <0x10>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 62ea288a1a70..45b86933c6ea 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -101,7 +101,7 @@ vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; @@ -157,7 +157,7 @@ phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; tx_delay = <0x10>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index cede1ad81be2..9d6ed8cda2c8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -29,6 +29,9 @@ i2c6 = &i2c6; i2c7 = &i2c7; i2c8 = &i2c8; + mmc0 = &sdio0; + mmc1 = &sdmmc; + mmc2 = &sdhci; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; @@ -229,6 +232,7 @@ reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>; reg-names = "axi-base", "apb-base"; + device_type = "pci"; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; @@ -247,7 +251,6 @@ <0 0 0 2 &pcie0_intc 1>, <0 0 0 3 &pcie0_intc 2>, <0 0 0 4 &pcie0_intc 3>; - linux,pci-domain = <0>; max-link-speed = <1>; msi-map = <0x0 &its 0x0 0x1000>; phys = <&pcie_phy 0>, <&pcie_phy 1>, @@ -410,7 +413,7 @@ reset-names = "usb3-otg"; status = "disabled"; - usbdrd_dwc3_0: dwc3 { + usbdrd_dwc3_0: usb@fe800000 { compatible = "snps,dwc3"; reg = <0x0 0xfe800000 0x0 0x100000>; interrupts = ; @@ -446,7 +449,7 @@ reset-names = "usb3-otg"; status = "disabled"; - usbdrd_dwc3_1: dwc3 { + usbdrd_dwc3_1: usb@fe900000 { compatible = "snps,dwc3"; reg = <0x0 0xfe900000 0x0 0x100000>; interrupts = ; @@ -1881,10 +1884,10 @@ gpu: gpu@ff9a0000 { compatible = "rockchip,rk3399-mali", "arm,mali-t860"; reg = <0x0 0xff9a0000 0x0 0x10000>; - interrupts = , - , - ; - interrupt-names = "gpu", "job", "mmu"; + interrupts = , + , + ; + interrupt-names = "job", "mmu", "gpu"; clocks = <&cru ACLK_GPU>; power-domains = <&power RK3399_PD_GPU>; status = "disabled"; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi index b658f2b641e2..3348a32f7d0b 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi @@ -718,7 +718,7 @@ clocks = <&sys_clk 6>; reset-names = "ether"; resets = <&sys_rst 6>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 0>; diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi index d6f6cee4d549..6537c69de3dd 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi @@ -509,7 +509,7 @@ clocks = <&sys_clk 6>; reset-names = "ether"; resets = <&sys_rst 6>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 0>; @@ -530,7 +530,7 @@ clocks = <&sys_clk 7>; reset-names = "ether"; resets = <&sys_rst 7>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; local-mac-address = [00 00 00 00 00 00]; socionext,syscon-phy-mode = <&soc_glue 1>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 799c75fa7981..34a30842c47a 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -307,6 +307,7 @@ interrupts = ; dma-coherent; power-domains = <&k3_pds 151 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 151 2>, <&k3_clks 151 7>; assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>; assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ <&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */ @@ -346,6 +347,7 @@ interrupts = ; dma-coherent; power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>; + clocks = <&k3_clks 152 2>; assigned-clocks = <&k3_clks 152 2>; assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index 9aa67340a4d8..a2645262f862 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -419,7 +419,7 @@ }; i2c0: i2c@ff020000 { - compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; + compatible = "cdns,i2c-r1p14"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 17 4>; @@ -429,7 +429,7 @@ }; i2c1: i2c@ff030000 { - compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; + compatible = "cdns,i2c-r1p14"; status = "disabled"; interrupt-parent = <&gic>; interrupts = <0 18 4>; diff --git a/arch/arm64/configs/oc.config b/arch/arm64/configs/oc.config new file mode 100644 index 000000000000..9157c78e3899 --- /dev/null +++ b/arch/arm64/configs/oc.config @@ -0,0 +1,1250 @@ +CONFIG_LOCALVERSION="-tlinux4" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_USELIB=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_NUMA_BALANCING=y +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_USERFAULTFD=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_HISI=y +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARCH_XGENE=y +CONFIG_FIX_OPTION_ROM=y +CONFIG_KPATCH=m +# CONFIG_ARM64_ERRATUM_843419 is not set +CONFIG_ARM64_64K_PAGES=y +CONFIG_ARM64_VA_BITS_48=y +CONFIG_SCHED_MC=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=8 +CONFIG_SECCOMP=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_COMPAT=y +# CONFIG_ARM64_HW_AFDBM is not set +# CONFIG_ARM64_PAN is not set +CONFIG_ARM64_MODULE_PLTS=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_QORIQ_CPUFREQ=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_EFI_VARS=y +CONFIG_EFI_VARS_PSTORE=m +CONFIG_ACPI=y +# CONFIG_ACPI_FAN is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_UNUSED_SYMBOLS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_AIX_PARTITION=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SYSV68_PARTITION=y +CONFIG_CMDLINE_PARTITION=y +CONFIG_BINFMT_MISC=m +CONFIG_MEMORY_HOTPLUG=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_ZPOOL=m +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETLABEL=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +# CONFIG_NF_CT_PROTO_DCCP is not set +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_DECNET_NF_GRABULATOR=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_DCCP=m +# CONFIG_IP_DCCP_CCID3 is not set +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_DEBUGFS=y +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +# CONFIG_BATMAN_ADV_BATMAN_V is not set +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUGFS=y +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_HSR=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +CONFIG_HAMRADIO=y +CONFIG_AX25=m +CONFIG_NETROM=m +CONFIG_ROSE=m +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +CONFIG_YAM=m +CONFIG_CAN=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_GRCAN=m +CONFIG_CAN_XILINXCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_ISA=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_M_CAN=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_ISA=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_MCP251X=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +CONFIG_CAN_GS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_AF_RXRPC_IPV6=y +# CONFIG_WIRELESS is not set +CONFIG_WIMAX=y +CONFIG_RFKILL=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_NET_9P_RDMA=m +CONFIG_CAIF=m +CONFIG_CAIF_USB=m +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_NCI=m +CONFIG_NFC_NCI_SPI=m +CONFIG_NFC_NCI_UART=m +CONFIG_NFC_HCI=m +CONFIG_NFC_SHDLC=y +CONFIG_NFC_TRF7970A=m +CONFIG_NFC_SIM=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_FDP=m +CONFIG_NFC_FDP_I2C=m +CONFIG_NFC_PN544_I2C=m +CONFIG_NFC_MICROREAD_I2C=m +CONFIG_NFC_MRVL_USB=m +CONFIG_NFC_MRVL_UART=m +CONFIG_NFC_MRVL_I2C=m +CONFIG_NFC_MRVL_SPI=m +CONFIG_NFC_ST21NFCA_I2C=m +CONFIG_NFC_ST_NCI_I2C=m +CONFIG_NFC_ST_NCI_SPI=m +CONFIG_NFC_NXP_NCI=m +CONFIG_NFC_NXP_NCI_I2C=m +CONFIG_NFC_S3FWRN5_I2C=m +CONFIG_LWTUNNEL=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCI_STUB=m +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI_HISI=y +CONFIG_PCIE_KIRIN=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_CONNECTOR=y +CONFIG_MTD=m +CONFIG_MTD_UBI=m +CONFIG_OF_OVERLAY=y +CONFIG_PARPORT=m +CONFIG_ZRAM=m +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_DRBD=m +CONFIG_DRBD_FAULT_INJECTION=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m +CONFIG_EEPROM_93CX6=y +CONFIG_TI_ST=m +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_HISI_SAS=y +CONFIG_SCSI_HISI_SAS_PCI=y +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=m +CONFIG_SCSI_ARCMSR=m +CONFIG_MEGARAID_SAS=y +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_VIRTIO=m +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_SATA_SIL24=y +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_FUSION=y +CONFIG_FUSION_SAS=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_MACVLAN=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_TUN=y +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_CAIF_VIRTIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_BE2NET=m +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_HIX5HD2_GMAC=m +CONFIG_HISI_FEMAC=m +CONFIG_HIP04_ETH=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +# CONFIG_NET_VENDOR_HP is not set +CONFIG_HINIC=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=y +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_FPGA_IPSEC=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PCI=m +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MDIO_HISI_FEMAC=y +CONFIG_MICREL_PHY=m +CONFIG_PLIP=m +CONFIG_PPP=m +CONFIG_PPPOE=m +CONFIG_SLIP=m +CONFIG_USB_NET_DRIVERS=m +CONFIG_USB_RTL8152=m +# CONFIG_WLAN is not set +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=m +# CONFIG_INPUT_LEDS is not set +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_ATKBD=m +CONFIG_MOUSE_PS2=m +# CONFIG_MOUSE_PS2_FOCALTECH is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_USB_COMPOSITE=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_HISI_POWERKEY=y +CONFIG_SERIO_I8042=m +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +CONFIG_LEGACY_PTY_COUNT=0 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_DMA is not set +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_RAW_DRIVER=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_DESIGNWARE_PCI=y +CONFIG_I2C_XGENE_SLIMPRO=m +CONFIG_I2C_SLAVE=y +CONFIG_SPI=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_PL061=y +CONFIG_POWER_RESET_HISI=y +CONFIG_SENSORS_ALTRA=m +CONFIG_SENSORS_W83795=y +CONFIG_SENSORS_XGENE=m +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=m +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m +CONFIG_REGULATOR_USERSPACE_CONSUMER=m +CONFIG_REGULATOR_GPIO=m +CONFIG_RC_CORE=y +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_DRM=m +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE=m +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_GART_DEBUGFS=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_HISI_HIBMC=m +CONFIG_DRM_HISI_KIRIN=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_CIRRUS=y +CONFIG_FB_UVESA=m +CONFIG_FB_EFI=y +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_DEBUG=y +CONFIG_FB_VIRTUAL=y +CONFIG_FB_SIMPLE=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=m +CONFIG_SND=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_MULTITOUCH=m +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_DYNAMIC_MINORS=y +CONFIG_ARCH_PHYTIUM_1500A_USB_RESET_PROBE_REMOVE=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PLATFORM=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_ACM=y +CONFIG_USB_PRINTER=m +CONFIG_USB_STORAGE=y +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=y +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_GADGET=y +CONFIG_USB_ETH=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_ALTRA_SPCI=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_DS1307=y +CONFIG_RTC_DRV_PCF8563=y +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_PL031=y +CONFIG_DMADEVICES=y +CONFIG_UIO=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VFIO_PLATFORM=m +CONFIG_VFIO_AMBA=m +CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET=m +CONFIG_VFIO_PLATFORM_AMDXGBE_RESET=m +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_STAGING=y +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_XGENE_SLIMPRO_MBOX=m +CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +CONFIG_VIRTIO_IOMMU=y +CONFIG_SMMU_BYPASS_DEV=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_JFS_STATISTICS=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_NILFS2_FS=m +CONFIG_F2FS_FS=m +CONFIG_F2FS_FS_SECURITY=y +CONFIG_FS_DAX=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_AUTOFS4_FS=m +CONFIG_FUSE_FS=y +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_EFIVAR_FS=y +CONFIG_ADFS_FS=m +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_CMODE_FAVOURLZO=y +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_ATIME_SUPPORT=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +CONFIG_HPFS_FS=m +CONFIG_QNX4FS_FS=m +CONFIG_QNX6FS_FS=m +CONFIG_ROMFS_FS=m +CONFIG_PSTORE=y +CONFIG_PSTORE_RAM=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_UFS_FS_WRITE=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +CONFIG_AFS_FSCACHE=y +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_DLM=m +CONFIG_ENCRYPTED_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_IMA=y +CONFIG_IMA_APPRAISE=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux" +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_CORDIC=m +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_XZ_DEC_TEST=m +CONFIG_FONTS=y +CONFIG_PRINTK_TIME=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_GDB_SCRIPTS=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_NOTIFIER_ERROR_INJECTION=m +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_RBTREE_TEST=m +CONFIG_INTERVAL_TREE_TEST=m +CONFIG_PERCPU_TEST=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_HEXDUMP=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_PRINTF=m +CONFIG_TEST_LKM=m +CONFIG_TEST_USER_COPY=m +CONFIG_TEST_BPF=m +CONFIG_TEST_FIRMWARE=m +CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m +CONFIG_MEMTEST=y +CONFIG_KGDB=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_KEYBOARD=y +CONFIG_PID_IN_CONTEXTIDR=y diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index aa57dc639f77..aa13344a3a5e 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -55,7 +55,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #define aes_mac_update neon_aes_mac_update MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); #endif -#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS) +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS) MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); @@ -668,7 +668,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) } static struct skcipher_alg aes_algs[] = { { -#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS) +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS) .base = { .cra_name = "__ecb(aes)", .cra_driver_name = "__ecb-aes-" MODE, diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c index 895d3727c1fb..c5405e6a6db7 100644 --- a/arch/arm64/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c @@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, return crypto_nhpoly1305_update(desc, src, srclen); do { - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_neon_begin(); crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index bdc1b6d7aff7..05cdad31b022 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -19,6 +19,7 @@ MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha1"); struct sha1_ce_state { struct sha1_state sst; diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 604a01a4ede6..1de80293ac31 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -19,6 +19,8 @@ MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); struct sha256_ce_state { struct sha256_state sst; diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index 9a4bbfc45f40..ddf7aca9ff45 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -23,6 +23,10 @@ MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha3-224"); +MODULE_ALIAS_CRYPTO("sha3-256"); +MODULE_ALIAS_CRYPTO("sha3-384"); +MODULE_ALIAS_CRYPTO("sha3-512"); asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks, int md_len); diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index 2369540040aa..6dfcb4f3e776 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -23,6 +23,8 @@ MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src, int blocks); diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index b263e239cb59..a45366c3909b 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -31,14 +32,14 @@ * is therefore used to delimit the MADT GICC structure minimum length * appropriately. */ -#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \ +#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \ struct acpi_madt_generic_interrupt, efficiency_class) #define BAD_MADT_GICC_ENTRY(entry, end) \ (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \ (unsigned long)(entry) + (entry)->header.length > (end)) -#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \ +#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \ spe_interrupt) + sizeof(u16)) /* Basic configuration for ACPI */ diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 5e5dc05d63a0..3cb3c4ab3ea5 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -73,13 +73,13 @@ static inline void apply_alternatives_module(void *start, size_t length) { } ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(feature) \ ".popsection\n" \ - ".pushsection .altinstr_replacement, \"a\"\n" \ + ".subsection 1\n" \ "663:\n\t" \ newinstr "\n" \ "664:\n\t" \ - ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ ".endif\n" #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ @@ -117,11 +117,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { } 662: .pushsection .altinstructions, "a" altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f .popsection - .pushsection .altinstr_replacement, "ax" + .subsection 1 663: \insn2 -664: .popsection - .org . - (664b-663b) + (662b-661b) +664: .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) + .previous .endif .endm @@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { } .pushsection .altinstructions, "a" altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f .popsection - .pushsection .altinstr_replacement, "ax" + .subsection 1 .align 2 /* So GAS knows label 661 is suitably aligned */ 661: .endm @@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } .macro alternative_else 662: .if .Lasm_alt_mode==0 - .pushsection .altinstr_replacement, "ax" + .subsection 1 .else - .popsection + .previous .endif 663: .endm @@ -191,11 +191,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { } */ .macro alternative_endif 664: - .if .Lasm_alt_mode==0 - .popsection - .endif .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) + .if .Lasm_alt_mode==0 + .previous + .endif .endm /* diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 89e4c8b79349..ee9bdaa40532 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void) return read_sysreg_s(SYS_ICC_PMR_EL1); } -static inline void gic_write_pmr(u32 val) +static __always_inline void gic_write_pmr(u32 val) { write_sysreg_s(val, SYS_ICC_PMR_EL1); } diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 7ae54d7d333a..9f0ec21d6327 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround { u64 (*read_cntvct_el0)(void); int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *); + bool disable_compat_vdso; }; DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b8cf7c85ffa2..4a4258f17c86 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -462,6 +462,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* + * Deprecated! Use SYM_FUNC_{START,START_WEAK,END}_PI instead. * Annotate a function as position independent, i.e., safe to be called before * the kernel virtual mapping is activated. */ diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 9543b5e0534d..6e0f48ddfc65 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -17,7 +17,7 @@ #include #define ATOMIC_OP(op) \ -static inline void arch_##op(int i, atomic_t *v) \ +static __always_inline void arch_##op(int i, atomic_t *v) \ { \ __lse_ll_sc_body(op, i, v); \ } @@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, op) \ -static inline int arch_##op##name(int i, atomic_t *v) \ +static __always_inline int arch_##op##name(int i, atomic_t *v) \ { \ return __lse_ll_sc_body(op##name, i, v); \ } @@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return) #undef ATOMIC_FETCH_OPS #define ATOMIC64_OP(op) \ -static inline void arch_##op(long i, atomic64_t *v) \ +static __always_inline void arch_##op(long i, atomic64_t *v) \ { \ __lse_ll_sc_body(op, i, v); \ } @@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub) #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, op) \ -static inline long arch_##op##name(long i, atomic64_t *v) \ +static __always_inline long arch_##op##name(long i, atomic64_t *v) \ { \ return __lse_ll_sc_body(op##name, i, v); \ } @@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OPS -static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v) { return __lse_ll_sc_body(atomic64_dec_if_positive, v); } diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 43da6dd29592..806e9dc2a852 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -11,6 +11,7 @@ #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 #define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 @@ -18,7 +19,7 @@ #define CTR_DIC_SHIFT 29 #define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 665c78e0665a..3e7dda6f1ab1 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -79,7 +79,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) * IPI all online CPUs so that they undergo a context synchronization * event and are forced to refetch the new instructions. */ -#ifdef CONFIG_KGDB + /* * KGDB performs cache maintenance with interrupts disabled, so we * will deadlock trying to IPI the secondary CPUs. In theory, we can @@ -89,9 +89,9 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) * the patching operation, so we don't need extra IPIs here anyway. * In which case, add a KGDB-specific bodge and return early. */ - if (kgdb_connected && irqs_disabled()) + if (in_dbg_master()) return; -#endif + kick_all_cpus_sync(); } diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index d064a50deb5f..5665a3fc14be 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -19,16 +19,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); diff --git a/arch/arm64/include/asm/clocksource.h b/arch/arm64/include/asm/clocksource.h index 0ece64a26c8c..0c7910447235 100644 --- a/arch/arm64/include/asm/clocksource.h +++ b/arch/arm64/include/asm/clocksource.h @@ -2,8 +2,11 @@ #ifndef _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H +#include + struct arch_clocksource_data { - bool vdso_direct; /* Usable for direct VDSO access? */ + /* Usable for direct VDSO access? */ + enum vdso_arch_clockmode clock_mode; }; #endif diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index b0d53a265f1d..7b4172ce497c 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -4,6 +4,9 @@ */ #ifndef __ASM_COMPAT_H #define __ASM_COMPAT_H + +#include + #ifdef CONFIG_COMPAT /* @@ -13,8 +16,6 @@ #include #include -#include - #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ #define COMPAT_UTS_MACHINE "armv8b\0\0" diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index ac1dbca3d0cd..1dc3c762fdcb 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -54,7 +54,8 @@ #define ARM64_WORKAROUND_1463225 44 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 +#define ARM64_WORKAROUND_1542419 47 -#define ARM64_NCAPS 47 +#define ARM64_NCAPS 48 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 9cde5d2e768f..ccae05da98a7 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -262,6 +262,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; /* * CPU feature detected at boot time based on feature of one or more CPUs. * All possible conflicts for a late CPU are ignored. + * NOTE: this means that a late CPU with the feature will *not* cause the + * capability to be advertised by cpus_have_*cap()! */ #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ @@ -601,7 +603,7 @@ static inline bool system_supports_generic_auth(void) cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); } -static inline bool system_uses_irq_prio_masking(void) +static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index aca07c2f6e6e..679db0297a5e 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -54,6 +54,7 @@ #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 +#define ARM_CPU_IMP_PHYTIUM 0x70 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E @@ -73,6 +74,11 @@ #define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define APM_CPU_PART_POTENZA 0x000 +#define PHYTIUM_CPU_PART_1500A 0X660 +#define PHYTIUM_CPU_PART_2000AHK 0X661 +#define PHYTIUM_CPU_PART_2000PLUS 0X662 +#define PHYTIUM_CPU_PART_2004 0X663 +#define PHYTIUM_CPU_PART_2500 0X663 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 @@ -98,6 +104,11 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_FT_1500A MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_1500A) +#define MIDR_FT_2000AHK MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000AHK) +#define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) +#define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) +#define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) @@ -121,6 +132,9 @@ #define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0)) #define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) +#define PHYTIUM_CPU_PART_2500 0X663 +#define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7619f473155f..d825e3585e28 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -109,6 +109,8 @@ void disable_debug_monitors(enum dbg_active_el el); void user_rewind_single_step(struct task_struct *task); void user_fastforward_single_step(struct task_struct *task); +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task); void kernel_enable_single_step(struct pt_regs *regs); void kernel_disable_single_step(void); diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index b618017205a3..344b09260db6 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -96,7 +96,28 @@ */ #define elf_check_arch(x) ((x)->e_machine == EM_AARCH64) -#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X) +/* + * An executable for which elf_read_implies_exec() returns TRUE will + * have the READ_IMPLIES_EXEC personality flag set automatically. + * + * The decision process for determining the results are: + * + * CPU*: | arm32 | arm64 | + * ELF: | | | + * ---------------------|------------|------------| + * missing PT_GNU_STACK | exec-all | exec-none | + * PT_GNU_STACK == RWX | exec-stack | exec-stack | + * PT_GNU_STACK == RW | exec-none | exec-none | + * + * exec-all : all PROT_READ user mappings are executable, except when + * backed by files on a noexec-filesystem. + * exec-none : only PROT_EXEC user mappings are executable. + * exec-stack: only the stack and PROT_EXEC user mappings are executable. + * + * *all arm64 CPUs support NX, so there is no "lacks NX" column. + * + */ +#define compat_elf_read_implies_exec(ex, stk) (stk == EXSTACK_DEFAULT) #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index bb313dde58a4..28dfb96e9240 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -341,6 +341,7 @@ __AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0) __AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000) __AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F) __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000) +__AARCH64_INSN_FUNCS(dc_zva, 0xFFFFFFE0, 0xD50B7420) #undef __AARCH64_INSN_FUNCS diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index ddf9d762ac62..9be64c0ad31f 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -72,11 +72,12 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO) + HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) @@ -275,6 +276,7 @@ #define CPTR_EL2_DEFAULT CPTR_EL2_RES1 /* Hyp Debug Configuration Register bits */ +#define MDCR_EL2_TTRF (1 << 19) #define MDCR_EL2_TPMS (1 << 14) #define MDCR_EL2_E2PB_MASK (UL(0x3)) #define MDCR_EL2_E2PB_SHIFT (UL(12)) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 44a243754c1b..c54e759896c1 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -60,7 +60,7 @@ extern char __kvm_hyp_vector[]; extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); @@ -88,6 +88,34 @@ extern u32 __kvm_get_mdcr_el2(void); *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp @@ -113,6 +141,21 @@ extern u32 __kvm_get_mdcr_el2(void); kern_hyp_va \vcpu .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 6ff84f1f3b4c..f65ff6b90f4a 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -97,12 +97,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); } -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) -{ - if (vcpu_has_ptrauth(vcpu)) - vcpu_ptrauth_disable(vcpu); -} - static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) { return vcpu->arch.vsesr_el2; @@ -305,7 +299,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } @@ -313,7 +307,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || - kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ + kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */ } static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) @@ -342,6 +336,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } +static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); +} + static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; @@ -379,6 +378,9 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) { + if (kvm_vcpu_abt_iss1tw(vcpu)) + return true; + if (kvm_vcpu_trap_is_iabt(vcpu)) return false; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f656169db8c3..dfa6dc4575be 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -182,6 +182,7 @@ enum vcpu_sysreg { #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ +#define c2_TTBCR2 (c2_TTBCR + 1) /* Translation Table Base Control R. 2 */ #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ @@ -209,6 +210,7 @@ enum vcpu_sysreg { #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) #define cp14_DBGDCCINT (MDCCINT_EL1 * 2) +#define cp14_DBGVCR (DBGVCR32_EL2 * 2) #define NR_COPRO_REGS (NR_SYS_REGS * 2) @@ -392,8 +394,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); * CP14 and CP15 live in the same array, as they are backed by the * same system registers. */ -#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) -#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) +#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) + +#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) +#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) struct kvm_vm_stat { ulong remote_tlb_flush; @@ -425,7 +429,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); @@ -677,4 +681,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); #define kvm_arm_vcpu_sve_finalized(vcpu) \ ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) +#define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu) + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 97f21cc66657..7f7fdb16bb96 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -71,6 +71,9 @@ void __sysreg32_restore_state(struct kvm_vcpu *vcpu); void __debug_switch_to_guest(struct kvm_vcpu *vcpu); void __debug_switch_to_host(struct kvm_vcpu *vcpu); +void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu); +void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); + void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 1b266292f0be..ebee3113a62f 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -4,4 +4,20 @@ #define __ALIGN .align 2 #define __ALIGN_STR ".align 2" +/* + * Annotate a function as position independent, i.e., safe to be called before + * the kernel virtual mapping is activated. + */ +#define SYM_FUNC_START_PI(x) \ + SYM_FUNC_START_ALIAS(__pi_##x); \ + SYM_FUNC_START(x) + +#define SYM_FUNC_START_WEAK_PI(x) \ + SYM_FUNC_START_ALIAS(__pi_##x); \ + SYM_FUNC_START_WEAK(x) + +#define SYM_FUNC_END_PI(x) \ + SYM_FUNC_END(x); \ + SYM_FUNC_END_ALIAS(__pi_##x) + #endif diff --git a/arch/arm64/include/asm/machine_types.h b/arch/arm64/include/asm/machine_types.h new file mode 100644 index 000000000000..b1d2db574d4e --- /dev/null +++ b/arch/arm64/include/asm/machine_types.h @@ -0,0 +1,39 @@ +/* + * Authors: Wang Yinfeng + * + * Copyright (C) 2021, PHYTIUM Information Technology Co., Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +#ifndef _MACHINE_TYPE_H_ +#define _MACHINE_TYPE_H_ + +#include +#include + +static inline bool phytium_part(u32 cpuid) +{ + return ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == cpuid); +} + +#define typeof_ft1500a() phytium_part(MIDR_FT_1500A) +#define typeof_ft2000ahk() phytium_part(MIDR_FT_2000AHK) +#define typeof_ft2000plus() phytium_part(MIDR_FT_2000PLUS) +#define typeof_ft2004() phytium_part(MIDR_FT_2004) +#define typeof_s2500() phytium_part(MIDR_FT_2500) + +#endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 95362e37555a..3b22e9b82147 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -186,7 +186,6 @@ extern u64 vabits_actual; #include #include -extern s64 physvirt_offset; extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) @@ -256,13 +255,13 @@ static inline const void *__tag_set(const void *addr, u8 tag) /* - * The linear kernel range starts at the bottom of the virtual address - * space. Testing the top bit for the start of the region is a - * sufficient check and avoids having to worry about the tag. + * Check whether an arbitrary address is within the linear map, which + * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the + * kernel's TTBR1 address range. */ -#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) +#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) -#define __lm_to_phys(addr) (((addr) + physvirt_offset)) +#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __virt_to_phys_nodebug(x) ({ \ @@ -280,7 +279,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) #endif /* CONFIG_DEBUG_VIRTUAL */ -#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) +#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) /* @@ -324,6 +323,11 @@ static inline void *phys_to_virt(phys_addr_t x) #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) +#define page_to_virt(x) ({ \ + __typeof__(x) __page = x; \ + void *__addr = __va(page_to_phys(__page)); \ + (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ +}) #define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) #else #define page_to_virt(x) ({ \ @@ -341,7 +345,7 @@ static inline void *phys_to_virt(phys_addr_t x) #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ #define virt_addr_valid(addr) ({ \ - __typeof__(addr) __addr = addr; \ + __typeof__(addr) __addr = __tag_reset(addr); \ __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ }) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3827ff4040a3..3a5d9f1c91b6 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -63,10 +63,7 @@ extern u64 idmap_ptrs_per_pgd; static inline bool __cpu_uses_extended_idmap(void) { - if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52)) - return false; - - return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); + return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual)); } /* diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index 626ad01e83bf..dd870390d639 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node); /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ static inline const struct cpumask *cpumask_of_node(int node) { + if (node == NUMA_NO_NODE) + return cpu_all_mask; + return node_to_cpumask_map[node]; } #endif diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index 70b323cf8300..5e3770c2ebea 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -17,6 +17,10 @@ #define pcibios_assign_all_busses() \ (pci_has_flag(PCI_REASSIGN_ALL_BUS)) +#ifdef CONFIG_ALTRA_ERRATUM_82288 +extern bool __read_mostly have_altra_erratum_82288; +#endif + #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 extern int isa_dma_bridge_buggy; diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index baf52baaa2a5..99b0a32e25c1 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -54,7 +54,7 @@ #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) -#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) +#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN) #define PAGE_S2_MEMATTR(attr) \ ({ \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 13ebe2bad79f..db2d87e4fab9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -23,6 +23,8 @@ #define VMALLOC_START (MODULES_END) #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) +#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) + #define FIRST_USER_ADDRESS 0UL #ifndef __ASSEMBLY__ @@ -33,8 +35,6 @@ #include #include -extern struct page *vmemmap; - extern void __pte_error(const char *file, int line, unsigned long val); extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pud_error(const char *file, int line, unsigned long val); @@ -98,8 +98,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) #define pte_valid_not_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) -#define pte_valid_young(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) #define pte_valid_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) @@ -107,9 +105,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; * Could the pte be present in the TLB? We must check mm_tlb_flush_pending * so that we don't erroneously return false for pages that have been * remapped as PROT_NONE but are yet to be flushed from the TLB. + * Note that we can't make any assumptions based on the state of the access + * flag, since ptep_clear_flush_young() elides a DSB when invalidating the + * TLB. */ #define pte_accessible(mm, pte) \ - (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) /* * p??_access_permitted() is true for valid user mappings (subject to the @@ -135,13 +136,6 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) return pte; } -static inline pte_t pte_wrprotect(pte_t pte) -{ - pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); - pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); - return pte; -} - static inline pte_t pte_mkwrite(pte_t pte) { pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); @@ -167,6 +161,20 @@ static inline pte_t pte_mkdirty(pte_t pte) return pte; } +static inline pte_t pte_wrprotect(pte_t pte) +{ + /* + * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY + * clear), set the PTE_DIRTY bit. + */ + if (pte_hw_dirty(pte)) + pte = pte_mkdirty(pte); + + pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); + pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); + return pte; +} + static inline pte_t pte_mkold(pte_t pte) { return clear_pte_bit(pte, __pgprot(PTE_AF)); @@ -177,11 +185,6 @@ static inline pte_t pte_mkyoung(pte_t pte) return set_pte_bit(pte, __pgprot(PTE_AF)); } -static inline pte_t pte_mkspecial(pte_t pte) -{ - return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); -} - static inline pte_t pte_mkcont(pte_t pte) { pte = set_pte_bit(pte, __pgprot(PTE_CONT)); @@ -428,6 +431,27 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) +#ifdef CONFIG_ALTRA_ERRATUM_82288 +extern bool have_altra_erratum_82288; +#endif + +static inline pte_t pte_mkspecial(pte_t pte) +{ +#ifdef CONFIG_ALTRA_ERRATUM_82288 + phys_addr_t phys = __pte_to_phys(pte); + pgprot_t prot = __pgprot(pte_val(pte) & ~PTE_ADDR_MASK); + + if (unlikely(have_altra_erratum_82288) && + (phys < 0x80000000 || + (phys >= 0x200000000000 && phys < 0x400000000000) || + (phys >= 0x600000000000 && phys < 0x800000000000))) { + pte = __pte(__phys_to_pte_val(phys) | pgprot_val(pgprot_device(prot))); + } +#endif + + return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); +} + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, @@ -456,6 +480,7 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD]; extern pgd_t init_pg_end[]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; +extern pgd_t idmap_pg_end[]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); @@ -781,12 +806,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres pte = READ_ONCE(*ptep); do { old_pte = pte; - /* - * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY - * clear), set the PTE_DIRTY bit. - */ - if (pte_hw_dirty(pte)) - pte = pte_mkdirty(pte); pte = pte_wrprotect(pte); pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), pte_val(old_pte), pte_val(pte)); diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 7a24bad1a58b..076a4157a74f 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -3,7 +3,6 @@ #define __ASM_POINTER_AUTH_H #include -#include #include #include @@ -30,6 +29,13 @@ struct ptrauth_keys { struct ptrauth_key apga; }; +/* + * Only include random.h once ptrauth_keys_* structures are defined + * to avoid yet another circular include hell (random.h * ends up + * including asm/smp.h, which requires ptrauth_keys_kernel). + */ +#include + static inline void ptrauth_keys_init(struct ptrauth_keys *keys) { if (system_supports_address_auth()) { diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a0c8a0b65259..0eadbf933e35 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -46,7 +46,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); * Logical CPU mapping. */ extern u64 __cpu_logical_map[NR_CPUS]; -#define cpu_logical_map(cpu) __cpu_logical_map[cpu] +extern u64 cpu_logical_map(int cpu); + +static inline void set_cpu_logical_map(int cpu, u64 hwid) +{ + __cpu_logical_map[cpu] = hwid; +} struct seq_file; diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 65299a2dcf9c..cfc0672013f6 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -34,6 +34,10 @@ static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { unsigned long error = regs->regs[0]; + + if (is_compat_thread(task_thread_info(task))) + error = sign_extend64(error, 31); + return IS_ERR_VALUE(error) ? error : 0; } @@ -47,7 +51,13 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - regs->regs[0] = (long) error ? error : val; + if (error) + val = error; + + if (is_compat_thread(task_thread_info(task))) + val = lower_32_bits(val); + + regs->regs[0] = val; } #define SYSCALL_MAX_ARGS 6 diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..9b68f1b3915e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -49,7 +49,9 @@ #ifndef CONFIG_BROKEN_GAS_INST #ifdef __ASSEMBLY__ -#define __emit_inst(x) .inst (x) +// The space separator is omitted so that __emit_inst(x) can be parsed as +// either an assembler directive or an assembler macro argument. +#define __emit_inst(x) .inst(x) #else #define __emit_inst(x) ".inst " __stringify((x)) "\n\t" #endif diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index f0cec4160136..4e3ed702bec7 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -91,6 +91,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_FSCHECK (1 << TIF_FSCHECK) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) diff --git a/arch/arm64/include/asm/vdso/clocksource.h b/arch/arm64/include/asm/vdso/clocksource.h new file mode 100644 index 000000000000..8019f616e1f7 --- /dev/null +++ b/arch/arm64/include/asm/vdso/clocksource.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +enum vdso_arch_clockmode { + /* vdso clocksource not usable */ + VDSO_CLOCKMODE_NONE, + /* vdso clocksource for both 32 and 64bit tasks */ + VDSO_CLOCKMODE_ARCHTIMER, + /* vdso clocksource for 64bit tasks only */ + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT, +}; + +#endif diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index c50ee1b7d5cd..413d42e197c7 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -10,6 +10,7 @@ #include #include +#include #include #define __VDSO_USE_SYSCALL ULLONG_MAX @@ -117,10 +118,10 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) u64 res; /* - * clock_mode == 0 implies that vDSO are enabled otherwise + * clock_mode == ARCHTIMER implies that vDSO are enabled otherwise * fallback on syscall. */ - if (clock_mode) + if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER) return __VDSO_USE_SYSCALL; /* diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index b08f476b72b4..ff83b8b574fc 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -10,6 +10,8 @@ #include #include +#include + #define __VDSO_USE_SYSCALL ULLONG_MAX #define VDSO_HAS_CLOCK_GETRES 1 @@ -71,10 +73,10 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) u64 res; /* - * clock_mode == 0 implies that vDSO are enabled otherwise + * clock_mode != NONE implies that vDSO are enabled otherwise * fallback on syscall. */ - if (clock_mode) + if (clock_mode == VDSO_CLOCKMODE_NONE) return __VDSO_USE_SYSCALL; /* diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h index 0c20a7c1bee5..e50f26741946 100644 --- a/arch/arm64/include/asm/vdso/vsyscall.h +++ b/arch/arm64/include/asm/vdso/vsyscall.h @@ -24,9 +24,7 @@ struct vdso_data *__arm64_get_k_vdso_data(void) static __always_inline int __arm64_get_clock_mode(struct timekeeper *tk) { - u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct; - - return use_syscall; + return tk->tkr_mono.clock->archdata.clock_mode; } #define __arch_get_clock_mode __arm64_get_clock_mode diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h index 3333950b5909..ea487218db79 100644 --- a/arch/arm64/include/asm/word-at-a-time.h +++ b/arch/arm64/include/asm/word-at-a-time.h @@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask) */ static inline unsigned long load_unaligned_zeropad(const void *addr) { - unsigned long ret, offset; + unsigned long ret, tmp; /* Load word from unaligned pointer addr */ asm( @@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) "2:\n" " .pushsection .fixup,\"ax\"\n" " .align 2\n" - "3: and %1, %2, #0x7\n" - " bic %2, %2, #0x7\n" - " ldr %0, [%2]\n" + "3: bic %1, %2, #0x7\n" + " ldr %0, [%1]\n" + " and %1, %2, #0x7\n" " lsl %1, %1, #0x3\n" #ifndef __AARCH64EB__ " lsr %0, %0, %1\n" @@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) " b 2b\n" " .popsection\n" _ASM_EXTABLE(1b, 3b) - : "=&r" (ret), "=&r" (offset) + : "=&r" (ret), "=&r" (tmp) : "r" (addr), "Q" (*(unsigned long *)addr)); return ret; diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index a100483b47c4..46ec402e97ed 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -269,6 +270,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) int apei_claim_sea(struct pt_regs *regs) { int err = -ENOENT; + bool return_to_irqs_enabled; unsigned long current_flags; if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) @@ -276,6 +278,12 @@ int apei_claim_sea(struct pt_regs *regs) current_flags = local_daif_save_flags(); + /* current_flags isn't useful here as daif doesn't tell us about pNMI */ + return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags()); + + if (regs) + return_to_irqs_enabled = interrupts_enabled(regs); + /* * SEA can interrupt SError, mask it and describe this as an NMI so * that APEI defers the handling. @@ -284,6 +292,23 @@ int apei_claim_sea(struct pt_regs *regs) nmi_enter(); err = ghes_notify_sea(); nmi_exit(); + + /* + * APEI NMI-like notifications are deferred to irq_work. Unless + * we interrupted irqs-masked code, we can do that now. + */ + if (!err) { + if (return_to_irqs_enabled) { + local_daif_restore(DAIF_PROCCTX_NOIRQ); + __irq_enter(); + irq_work_run(); + __irq_exit(); + } else { + pr_warn_ratelimited("APEI work queued but not completed"); + err = -EINPROGRESS; + } + } + local_daif_restore(current_flags); return err; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index d1757ef1b1e7..73039949b5ce 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature) */ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { - unsigned long replptr; - - if (kernel_text_address(pc)) - return true; - - replptr = (unsigned long)ALT_REPL_PTR(alt); - if (pc >= replptr && pc <= (replptr + alt->alt_len)) - return false; - - /* - * Branching into *another* alternate sequence is doomed, and - * we're not even trying to fix it up. - */ - BUG(); + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); } #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index ca158be21f83..bcb14d11232f 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -601,7 +601,7 @@ static struct undef_hook setend_hooks[] = { }, { /* Thumb mode */ - .instr_mask = 0x0000fff7, + .instr_mask = 0xfffffff7, .instr_val = 0x0000b650, .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK), .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR), diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 96f576e9ea46..1e16c4e00e77 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -88,13 +88,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, } static void -cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) +cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) { u64 mask = arm64_ftr_reg_ctrel0.strict_mask; + bool enable_uct_trap = false; /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ if ((read_cpuid_cachetype() & mask) != (arm64_ftr_reg_ctrel0.sys_val & mask)) + enable_uct_trap = true; + + /* ... or if the system is affected by an erratum */ + if (cap->capability == ARM64_WORKAROUND_1542419) + enable_uct_trap = true; + + if (enable_uct_trap) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); } @@ -484,6 +492,12 @@ out_printmsg: return required; } +static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap) +{ + if (ssbd_state != ARM64_SSBD_FORCE_DISABLE) + cap->matches(cap, SCOPE_LOCAL_CPU); +} + /* known invulnerable cores */ static const struct midr_range arm64_ssb_cpus[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), @@ -627,6 +641,12 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) return (need_wa > 0); } +static void +cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap) +{ + cap->matches(cap, SCOPE_LOCAL_CPU); +} + static const __maybe_unused struct midr_range tx2_family_cpus[] = { MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), @@ -651,6 +671,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, return false; } +static bool __maybe_unused +has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); + const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && has_dic; +} + #ifdef CONFIG_HARDEN_EL2_VECTORS static const struct midr_range arm64_harden_el2_vectors[] = { @@ -874,9 +906,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { }, #endif { + .desc = "Branch predictor hardening", .capability = ARM64_HARDEN_BRANCH_PREDICTOR, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = check_branch_predictor, + .cpu_enable = cpu_enable_branch_predictor_hardening, }, #ifdef CONFIG_HARDEN_EL2_VECTORS { @@ -890,6 +924,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_SSBD, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = has_ssbd_mitigation, + .cpu_enable = cpu_enable_ssbd_mitigation, .midr_range_list = arm64_ssb_cpus, }, #ifdef CONFIG_ARM64_ERRATUM_1418040 @@ -897,6 +932,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .desc = "ARM erratum 1418040", .capability = ARM64_WORKAROUND_1418040, ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), + /* + * We need to allow affected CPUs to come in late, but + * also need the non-affected CPUs to be able to come + * in at any point in time. Wonderful. + */ + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, }, #endif #ifdef CONFIG_ARM64_ERRATUM_1165522 @@ -927,6 +968,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1542419 + { + /* we depend on the firmware portion for correctness */ + .desc = "ARM erratum 1542419 (kernel portion)", + .capability = ARM64_WORKAROUND_1542419, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_neoverse_n1_erratum_1542419, + .cpu_enable = cpu_enable_trap_ctr_access, + }, #endif { } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f400cb29b811..acdef8d76c64 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -160,11 +160,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), - /* Linux doesn't care about the EL3 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), ARM64_FTR_END, }; @@ -278,7 +277,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { * of support. */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), ARM64_FTR_END, }; @@ -320,7 +318,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = { }; static const struct arm64_ftr_bits ftr_id_dfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), + /* [31:28] TraceFilt */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), @@ -719,9 +717,6 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); - /* - * EL3 is not our concern. - */ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, @@ -1096,7 +1091,7 @@ static bool cpu_has_broken_dbm(void) /* List of CPUs which have broken DBM support. */ static const struct midr_range cpus[] = { #ifdef CONFIG_ARM64_ERRATUM_1024718 - MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), #endif {}, }; diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c index e6e284265f19..58303a9ec32c 100644 --- a/arch/arm64/kernel/crash_dump.c +++ b/arch/arm64/kernel/crash_dump.c @@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) { memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count); + *ppos += count; + return count; } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 48222a4760c2..d64a3c1e1b6b 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init); /* * Single step API and exception handling. */ -static void set_regs_spsr_ss(struct pt_regs *regs) +static void set_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate |= DBG_SPSR_SS; } -NOKPROBE_SYMBOL(set_regs_spsr_ss); +NOKPROBE_SYMBOL(set_user_regs_spsr_ss); -static void clear_regs_spsr_ss(struct pt_regs *regs) +static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate &= ~DBG_SPSR_SS; } -NOKPROBE_SYMBOL(clear_regs_spsr_ss); +NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); + +#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) +#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) static DEFINE_SPINLOCK(debug_hook_lock); static LIST_HEAD(user_step_hook); @@ -393,17 +396,26 @@ void user_rewind_single_step(struct task_struct *task) * If single step is active for this thread, then set SPSR.SS * to 1 to avoid returning to the active-pending state. */ - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) set_regs_spsr_ss(task_pt_regs(task)); } NOKPROBE_SYMBOL(user_rewind_single_step); void user_fastforward_single_step(struct task_struct *task) { - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) clear_regs_spsr_ss(task_pt_regs(task)); } +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + set_user_regs_spsr_ss(regs); + else + clear_user_regs_spsr_ss(regs); +} + /* Kernel API */ void kernel_enable_single_step(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 1765e5284994..04b982a2799e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -338,7 +338,7 @@ static unsigned int find_supported_vector_length(unsigned int vl) return sve_vl_from_vq(__bit_to_vq(bit)); } -#ifdef CONFIG_SYSCTL +#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) static int sve_proc_do_default_vl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, @@ -384,9 +384,9 @@ static int __init sve_sysctl_init(void) return 0; } -#else /* ! CONFIG_SYSCTL */ +#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ static int __init sve_sysctl_init(void) { return 0; } -#endif /* ! CONFIG_SYSCTL */ +#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..a2e0b3754943 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -337,7 +337,7 @@ __create_page_tables: */ adrp x5, __idmap_text_end clz x5, x5 - cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? + cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough? b.ge 1f // .. then skip VA range extension adr_l x6, idmap_t0sz @@ -393,13 +393,19 @@ __create_page_tables: /* * Since the page tables have been populated with non-cacheable - * accesses (MMU disabled), invalidate the idmap and swapper page - * tables again to remove any speculatively loaded cache lines. + * accesses (MMU disabled), invalidate those tables again to + * remove any speculatively loaded cache lines. */ + dmb sy + adrp x0, idmap_pg_dir + adrp x1, idmap_pg_end + sub x1, x1, x0 + bl __inval_dcache_area + + adrp x0, init_pg_dir adrp x1, init_pg_end sub x1, x1, x0 - dmb sy bl __inval_dcache_area ret x28 @@ -964,6 +970,7 @@ __primary_switch: tlbi vmalle1 // Remove any stale TLB entries dsb nsh + isb msr sctlr_el1, x19 // re-enable the MMU isb diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 38ee1514cd9c..b4a160795824 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, return 0; } +static int watchpoint_report(struct perf_event *wp, unsigned long addr, + struct pt_regs *regs) +{ + int step = is_default_overflow_handler(wp); + struct arch_hw_breakpoint *info = counter_arch_bp(wp); + + info->trigger = addr; + + /* + * If we triggered a user watchpoint from a uaccess routine, then + * handle the stepping ourselves since userspace really can't help + * us with this. + */ + if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) + step = 1; + else + perf_bp_event(wp, regs); + + return step; +} + static int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, u64 val; struct perf_event *wp, **slots; struct debug_info *debug_info; - struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); @@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, if (dist != 0) continue; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; + step = watchpoint_report(wp, addr, regs); } - if (min_dist > 0 && min_dist != -1) { - /* No exact match found. */ - wp = slots[closest_match]; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; - } + /* No exact match found? */ + if (min_dist > 0 && min_dist != -1) + step = watchpoint_report(slots[closest_match], addr, regs); + rcu_read_unlock(); if (!step) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 4a9e773a177f..cc2f3d901c91 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -1535,16 +1535,10 @@ static u32 aarch64_encode_immediate(u64 imm, u32 insn) { unsigned int immr, imms, n, ones, ror, esz, tmp; - u64 mask = ~0UL; - - /* Can't encode full zeroes or full ones */ - if (!imm || !~imm) - return AARCH64_BREAK_FAULT; + u64 mask; switch (variant) { case AARCH64_INSN_VARIANT_32BIT: - if (upper_32_bits(imm)) - return AARCH64_BREAK_FAULT; esz = 32; break; case AARCH64_INSN_VARIANT_64BIT: @@ -1556,6 +1550,12 @@ static u32 aarch64_encode_immediate(u64 imm, return AARCH64_BREAK_FAULT; } + mask = GENMASK(esz - 1, 0); + + /* Can't encode full zeroes, full ones, or value wider than the mask */ + if (!imm || imm == mask || imm & ~mask) + return AARCH64_BREAK_FAULT; + /* * Inverse of Replicate(). Try to spot a repeating pattern * with a pow2 stride. diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 43119922341f..1a157ca33262 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) if (!kgdb_single_step) return DBG_HOOK_ERROR; - kgdb_handle_exception(1, SIGTRAP, 0, regs); + kgdb_handle_exception(0, SIGTRAP, 0, regs); return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 0df8493624e0..cc049ff5c6a5 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -189,6 +189,7 @@ void machine_kexec(struct kimage *kimage) * the offline CPUs. Therefore, we must use the __* variant here. */ __flush_icache_range((uintptr_t)reboot_code_buffer, + (uintptr_t)reboot_code_buffer + arm64_relocate_new_kernel_size); /* Flush the kimage list and its buffers. */ diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 7b08bf9499b6..d2a62dd17d79 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -150,8 +150,10 @@ static int create_dtb(struct kimage *image, /* duplicate a device tree blob */ ret = fdt_open_into(initial_boot_params, buf, buf_size); - if (ret) + if (ret) { + vfree(buf); return -EINVAL; + } ret = setup_dtb(image, initrd_load_addr, initrd_len, cmdline, buf); diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 915a39dcf32c..a7febf004be7 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -301,8 +301,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt_shndx = i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) mod->arch.init.plt_shndx = i; - else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && - !strcmp(secstrings + sechdrs[i].sh_name, + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".text.ftrace_trampoline")) tramp = sechdrs + i; else if (sechdrs[i].sh_type == SHT_SYMTAB) diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds index 22e36a21c113..09a0eef71d12 100644 --- a/arch/arm64/kernel/module.lds +++ b/arch/arm64/kernel/module.lds @@ -1,5 +1,5 @@ SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .init.plt (NOLOAD) : { BYTE(0) } - .text.ftrace_trampoline (NOLOAD) : { BYTE(0) } + .plt 0 (NOLOAD) : { BYTE(0) } + .init.plt 0 (NOLOAD) : { BYTE(0) } + .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index a0b4f1bca491..19128d994ee9 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -155,7 +155,7 @@ armv8pmu_events_sysfs_show(struct device *dev, pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); - return sprintf(page, "event=0x%03llx\n", pmu_attr->id); + return sprintf(page, "event=0x%04llx\n", pmu_attr->id); } #define ARMV8_EVENT_ATTR(name, config) \ @@ -303,10 +303,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj, test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) return attr->mode; - pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; - if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && - test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap)) - return attr->mode; + if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) { + u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; + + if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS && + test_bit(id, cpu_pmu->pmceid_ext_bitmap)) + return attr->mode; + } return 0; } diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 0bbac612146e..666b225aeb3a 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return 0; /* - * Compat (i.e. 32 bit) mode: - * - PC has been set in the pt_regs struct in kernel_entry, - * - Handle SP and LR here. + * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but + * we're stuck with it for ABI compatability reasons. + * + * For a 32-bit consumer inspecting a 32-bit task, then it will look at + * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). + * These correspond directly to a prefix of the registers saved in our + * 'struct pt_regs', with the exception of the PC, so we copy that down + * (x15 corresponds to SP_hyp in the architecture). + * + * So far, so good. + * + * The oddity arises when a 64-bit consumer looks at a 32-bit task and + * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return + * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and + * PC registers would normally live. The initial idea was to allow a + * 64-bit unwinder to unwind a 32-bit task and, although it's not clear + * how well that works in practice, somebody might be relying on it. + * + * At the time we make a sample, we don't know whether the consumer is + * 32-bit or 64-bit, so we have to cater for both possibilities. */ if (compat_user_mode(regs)) { if ((u32)idx == PERF_REG_ARM64_SP) return regs->compat_sp; if ((u32)idx == PERF_REG_ARM64_LR) return regs->compat_lr; + if (idx == 15) + return regs->pc; } if ((u32)idx == PERF_REG_ARM64_SP) diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index a412d8edbcd2..2c247634552b 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, /* TODO: Currently we do not support AARCH32 instruction probing */ if (mm->context.flags & MMCF_AARCH32) - return -ENOTSUPP; + return -EOPNOTSUPP; else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE)) return -EINVAL; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 81b761d1da8f..ad3de3fd6ccd 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -522,6 +522,38 @@ static void entry_task_switch(struct task_struct *next) __this_cpu_write(__entry_task, next); } +/* + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. + * Assuming the virtual counter is enabled at the beginning of times: + * + * - disable access when switching from a 64bit task to a 32bit task + * - enable access when switching from a 32bit task to a 64bit task + */ +static void erratum_1418040_thread_switch(struct task_struct *prev, + struct task_struct *next) +{ + bool prev32, next32; + u64 val; + + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040)) + return; + + prev32 = is_compat_thread(task_thread_info(prev)); + next32 = is_compat_thread(task_thread_info(next)); + + if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) + return; + + val = read_sysreg(cntkctl_el1); + + if (!next32) + val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + else + val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + + write_sysreg(val, cntkctl_el1); +} + /* * Thread switching. */ @@ -538,6 +570,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, uao_thread_switch(next); ptrauth_thread_switch(next); ssbs_thread_switch(next); + erratum_1418040_thread_switch(prev, next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 43ae4e0c968f..62d2bda7adb8 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -66,7 +66,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu) static void cpu_psci_cpu_die(unsigned int cpu) { - int ret; /* * There are no known implementations of PSCI actually using the * power state field, pass a sensible default for now. @@ -74,9 +73,7 @@ static void cpu_psci_cpu_die(unsigned int cpu) u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT; - ret = psci_ops.cpu_off(state); - - pr_crit("unable to power off CPU%u (%d)\n", cpu, ret); + psci_ops.cpu_off(state); } static int cpu_psci_cpu_kill(unsigned int cpu) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9168c4f1a37f..0cfd68577489 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1819,20 +1819,32 @@ static void tracehook_report_syscall(struct pt_regs *regs, saved_reg = regs->regs[regno]; regs->regs[regno] = dir; - if (dir == PTRACE_SYSCALL_EXIT) + if (dir == PTRACE_SYSCALL_ENTER) { + if (tracehook_report_syscall_entry(regs)) + forget_syscall(regs); + regs->regs[regno] = saved_reg; + } else if (!test_thread_flag(TIF_SINGLESTEP)) { tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) - forget_syscall(regs); + regs->regs[regno] = saved_reg; + } else { + regs->regs[regno] = saved_reg; - regs->regs[regno] = saved_reg; + /* + * Signal a pseudo-step exception since we are stepping but + * tracer modifications to the registers may have rewound the + * state machine. + */ + tracehook_report_syscall_exit(regs, 1); + } } int syscall_trace_enter(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) || - test_thread_flag(TIF_SYSCALL_EMU)) { + unsigned long flags = READ_ONCE(current_thread_info()->flags); + + if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); - if (!in_syscall(regs) || test_thread_flag(TIF_SYSCALL_EMU)) + if (flags & _TIF_SYSCALL_EMU) return -1; } @@ -1851,12 +1863,14 @@ int syscall_trace_enter(struct pt_regs *regs) void syscall_trace_exit(struct pt_regs *regs) { + unsigned long flags = READ_ONCE(current_thread_info()->flags); + audit_syscall_exit(regs); - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + if (flags & _TIF_SYSCALL_TRACEPOINT) trace_sys_exit(regs, regs_return_value(regs)); - if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); rseq_syscall(regs); @@ -1934,8 +1948,8 @@ static int valid_native_regs(struct user_pt_regs *regs) */ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) { - if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) - regs->pstate &= ~DBG_SPSR_SS; + /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ + user_regs_reset_single_step(regs, task); if (is_compat_thread(task_thread_info(task))) return valid_compat_regs(regs); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 56f664561754..d98987b82874 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -85,7 +85,7 @@ u64 __cacheline_aligned boot_args[4]; void __init smp_setup_processor_id(void) { u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; - cpu_logical_map(0) = mpidr; + set_cpu_logical_map(0, mpidr); /* * clear __my_cpu_offset on boot CPU to avoid hang caused by @@ -276,6 +276,12 @@ arch_initcall(reserve_memblock_reserved_regions); u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; +u64 cpu_logical_map(int cpu) +{ + return __cpu_logical_map[cpu]; +} +EXPORT_SYMBOL_GPL(cpu_logical_map); + void __init setup_arch(char **cmdline_p) { init_mm.start_code = (unsigned long) _text; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index dd2cdc0d5be2..ddb757b2c3e5 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -782,7 +782,6 @@ static void setup_restart_syscall(struct pt_regs *regs) */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { - struct task_struct *tsk = current; sigset_t *oldset = sigmask_to_save(); int usig = ksig->sig; int ret; @@ -806,14 +805,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) */ ret |= !valid_user_regs(®s->user_regs, current); - /* - * Fast forward the stepping logic so we step into the signal - * handler. - */ - if (!ret) - user_fastforward_single_step(tsk); - - signal_setup_done(ret, ksig, 0); + /* Step into the signal handler if we are stepping */ + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); } /* diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 993a4aedfd37..dd1416100d6d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -215,6 +216,7 @@ asmlinkage notrace void secondary_start_kernel(void) if (system_uses_irq_prio_masking()) init_gic_priority_masking(); + rcu_cpu_starting(cpu); preempt_disable(); trace_hardirqs_off(); @@ -387,6 +389,7 @@ void cpu_die_early(void) /* Mark this CPU absent */ set_cpu_present(cpu, 0); + rcu_report_dead(cpu); #ifdef CONFIG_HOTPLUG_CPU update_cpu_boot_status(CPU_KILL_ME); @@ -499,6 +502,32 @@ static int __init smp_cpu_setup(int cpu) static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; +#ifdef CONFIG_ARCH_PHYTIUM +/* + * On phytium S2500 multi-socket server, for example 2-socket(2P), there are + * socekt0 and socket1 on the server: + * If storage device(like SAS controller and disks to save vmcore into) is + * installed on socket1 and second kernel brings up 2 CPUs both on socket0 with + * nr_cpus=2, then vmcore will fail to be saved into the disk. + * To avoid this issue, Bypass other non-cpu0 to ensure that each cpu0 on each + * socket can bootup and handle interrupt when booting the second kernel. + */ +static bool __init is_phytium_kdump_cpu_need_bypass(u64 hwid) +{ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_FT_2500) + return false; + + /* + * Bypass other non-cpu0 to ensure second kernel can bring up each cpu0 + * on each socket + */ + if (is_kdump_kernel() && (hwid & 0xffff) != (cpu_logical_map(0) & 0xffff)) + return true; + + return false; +} +#endif + #ifdef CONFIG_ACPI static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; @@ -548,8 +577,13 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) if (cpu_count >= NR_CPUS) return; +#ifdef CONFIG_ARCH_PHYTIUM + if (is_phytium_kdump_cpu_need_bypass(hwid)) + return; +#endif + /* map the logical cpu id to cpu MPIDR */ - cpu_logical_map(cpu_count) = hwid; + set_cpu_logical_map(cpu_count, hwid); cpu_madt_gicc[cpu_count] = *processor; @@ -663,7 +697,7 @@ static void __init of_parse_and_init_cpus(void) goto next; pr_debug("cpu logical map 0x%llx\n", hwid); - cpu_logical_map(cpu_count) = hwid; + set_cpu_logical_map(cpu_count, hwid); early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); next: @@ -704,7 +738,7 @@ void __init smp_init_cpus(void) for (i = 1; i < nr_cpu_ids; i++) { if (cpu_logical_map(i) != INVALID_HWID) { if (smp_cpu_setup(i)) - cpu_logical_map(i) = INVALID_HWID; + set_cpu_logical_map(i, INVALID_HWID); } } } diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index f1cb64959427..3c18c2454089 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include +#include #include static long @@ -30,6 +32,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end) if (fatal_signal_pending(current)) return 0; + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* + * The workaround requires an inner-shareable tlbi. + * We pick the reserved-ASID to minimise the impact. + */ + __tlbi(aside1is, __TLBI_VADDR(0, 0)); + dsb(ish); + } + ret = __flush_cache_user_range(start, start + chunk); if (ret) return ret; diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 871c739f060a..f2d2dbbbfca2 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, ret = do_ni_syscall(regs, scno); } + if (is_compat_task()) + ret = lower_32_bits(ret); + regs->regs[0] = ret; } @@ -99,8 +102,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, regs->syscallno = scno; cortex_a76_erratum_1463225_svc_handler(); + user_exit_irqoff(); local_daif_restore(DAIF_PROCCTX); - user_exit(); if (has_syscall_work(flags)) { /* set default errno for user-issued syscall(-1) */ @@ -121,7 +124,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { local_daif_mask(); flags = current_thread_info()->flags; - if (!has_syscall_work(flags)) { + if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) { /* * We're off to userspace, where interrupts are * always enabled after we restore the flags from diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index fa9528dfd0ce..113903db666c 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -35,21 +35,23 @@ void store_cpu_topology(unsigned int cpuid) if (mpidr & MPIDR_UP_BITMASK) return; - /* Create cpu topology mapping based on MPIDR. */ - if (mpidr & MPIDR_MT_BITMASK) { - /* Multiprocessor system : Multi-threads per core */ - cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; - } else { - /* Multiprocessor system : Single-thread per core */ - cpuid_topo->thread_id = -1; - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | - MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; - } + /* + * This would be the place to create cpu topology based on MPIDR. + * + * However, it cannot be trusted to depict the actual topology; some + * pieces of the architecture enforce an artificial cap on Aff0 values + * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an + * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up + * having absolutely no relationship to the actual underlying system + * topology, and cannot be reasonably used as core / package ID. + * + * If the MT bit is set, Aff0 *could* be used to define a thread ID, but + * we still wouldn't be able to obtain a sane core ID. This means we + * need to entirely ignore MPIDR for any topology deduction. + */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = cpu_to_node(cpuid); pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", cpuid, cpuid_topo->package_id, cpuid_topo->core_id, diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 34739e80211b..4e3e9d9c8151 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -470,6 +470,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) int rt = ESR_ELx_SYS64_ISS_RT(esr); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* Hide DIC so that we can trap the unnecessary maintenance...*/ + val &= ~BIT(CTR_DIC_SHIFT); + + /* ... and fake IminLine to reduce the number of traps. */ + val &= ~CTR_IMINLINE_MASK; + val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK; + } + pt_regs_write_reg(regs, rt, val); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 354b11e27c07..033a48f30dbb 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -260,18 +260,7 @@ static int __aarch32_alloc_vdso_pages(void) if (ret) return ret; - ret = aarch32_alloc_kuser_vdso_page(); - if (ret) { - unsigned long c_vvar = - (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]); - unsigned long c_vdso = - (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]); - - free_page(c_vvar); - free_page(c_vdso); - } - - return ret; + return aarch32_alloc_kuser_vdso_page(); } #else static int __aarch32_alloc_vdso_pages(void) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index dd2514bb1511..3862cad2410c 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -32,7 +32,7 @@ UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n -CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny +CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 7ad2d3a0cd48..815df253f96e 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -28,6 +28,13 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + /* + * Discard .note.gnu.property sections which are unused and have + * different alignment requirement from vDSO note sections. + */ + /DISCARD/ : { + *(.note.GNU-stack .note.gnu.property) + } .note : { *(.note.*) } :text :note . = ALIGN(16); @@ -48,7 +55,6 @@ SECTIONS PROVIDE(end = .); /DISCARD/ : { - *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 76b327f88fbb..40dffe60b845 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -190,7 +190,7 @@ quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ # Install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL $@ +quiet_cmd_vdso_install = INSTALL32 $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so vdso.so: $(obj)/vdso.so.dbg diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 93752718f9d5..0bab37b1acbe 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -24,6 +24,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -39,6 +46,7 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ @@ -142,6 +150,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); idmap_pg_dir = .; . += IDMAP_DIR_SIZE; + idmap_pg_end = .; #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_pg_dir = .; @@ -174,9 +183,6 @@ SECTIONS *(.altinstructions) __alt_instructions_end = .; } - .altinstr_replacement : { - *(.altinstr_replacement) - } . = ALIGN(PAGE_SIZE); __inittext_end = .; diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 7a7e425616b5..dbc890511631 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) * - Debug ROM Address (MDCR_EL2_TDRA) * - OS related registers (MDCR_EL2_TDOSA) * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB) + * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF) * * Additionally, KVM only traps guest accesses to the debug registers if * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY @@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | MDCR_EL2_TPMS | + MDCR_EL2_TTRF | MDCR_EL2_TPMCR | MDCR_EL2_TDRA | MDCR_EL2_TDOSA); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index dfd626447482..5271ab366bee 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -202,6 +202,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) } memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); + + if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { + int i; + + for (i = 0; i < 16; i++) + *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i); + } out: return err; } diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 706cca23f0d2..1249f68a9418 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -#define __ptrauth_save_key(regs, key) \ -({ \ - regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ - regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ -}) - /* * Handle the guest trying to use a ptrauth instruction, or trying to access a * ptrauth register. */ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *ctxt; - - if (vcpu_has_ptrauth(vcpu)) { + if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_enable(vcpu); - ctxt = vcpu->arch.host_cpu_context; - __ptrauth_save_key(ctxt->sys_regs, APIA); - __ptrauth_save_key(ctxt->sys_regs, APIB); - __ptrauth_save_key(ctxt->sys_regs, APDA); - __ptrauth_save_key(ctxt->sys_regs, APDB); - __ptrauth_save_key(ctxt->sys_regs, APGA); - } else { + else kvm_inject_undefined(vcpu); - } } /* diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 160be2b4696d..dc41b505507d 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -136,11 +136,15 @@ ENTRY(__kvm_handle_stub_hvc) 1: cmp x0, #HVC_RESET_VECTORS b.ne 1f -reset: + /* - * Reset kvm back to the hyp stub. Do not clobber x0-x4 in - * case we coming via HVC_SOFT_RESTART. + * Set the HVC_RESET_VECTORS return code before entering the common + * path so that we do not clobber x0-x2 in case we are coming via + * HVC_SOFT_RESTART. */ + mov x0, xzr +reset: + /* Reset kvm back to the hyp stub. */ mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc @@ -151,7 +155,6 @@ reset: /* Install stub vectors */ adr_l x5, __hyp_stub_vectors msr vbar_el2, x5 - mov x0, xzr eret 1: /* Bad stub call */ diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 0fc9872a1467..aead8a5fbe91 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -168,6 +168,21 @@ static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); } +void __hyp_text __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) +{ + /* + * Non-VHE: Disable and flush SPE data generation + * VHE: The vcpu can run, but it can't hide. + */ + __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1); + +} + +void __hyp_text __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) +{ + __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1); +} + void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; @@ -175,13 +190,6 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) struct kvm_guest_debug_arch *host_dbg; struct kvm_guest_debug_arch *guest_dbg; - /* - * Non-VHE: Disable and flush SPE data generation - * VHE: The vcpu can run, but it can't hide. - */ - if (!has_vhe()) - __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1); - if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; @@ -201,8 +209,6 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) struct kvm_guest_debug_arch *host_dbg; struct kvm_guest_debug_arch *guest_dbg; - if (!has_vhe()) - __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1); if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index e5cc8d66bf53..dc3d7bc2292f 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -173,20 +173,23 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: msr daifset, #4 // Mask aborts + ret - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) + + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index ffa68d5713f1..f36aad0f207b 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,30 @@ #include #include +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .pushsection .hyp.text, "ax" @@ -142,13 +166,19 @@ el1_error: b __guest_exit el2_sync: - /* Check for illegal exception return, otherwise panic */ + /* Check for illegal exception return */ mrs x0, spsr_el2 + tbnz x0, #20, 1f - /* if this was something else, then panic! */ - tst x0, #PSR_IL_BIT - b.eq __hyp_panic + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + eret + +1: /* Let's attempt a recovery from the illegal exception return */ get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IL @@ -156,27 +186,14 @@ el2_sync: el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret sb diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index d76a3d39b269..14607fac7ca3 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -25,6 +26,9 @@ #include #include +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -257,10 +261,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = SYS_PAR_EL1_F; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & SYS_PAR_EL1_F)) @@ -492,7 +496,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_dabt_isvalid(vcpu) && !kvm_vcpu_dabt_isextabt(vcpu) && - !kvm_vcpu_dabt_iss1tw(vcpu); + !kvm_vcpu_abt_iss1tw(vcpu); if (valid) { int ret = __vgic_v2_perform_cpuif_access(vcpu); @@ -678,6 +682,15 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) __sysreg_save_state_nvhe(host_ctxt); + /* + * We must flush and disable the SPE buffer for nVHE, as + * the translation regime(EL1&0) is going to be loaded with + * that of the guest. And we must do this before we change the + * translation regime to EL2 (via MDCR_EL2_EPB == 0) and + * before we load guest Stage1. + */ + __debug_save_host_buffers_nvhe(vcpu); + __activate_vm(kern_hyp_va(vcpu->kvm)); __activate_traps(vcpu); @@ -716,11 +729,13 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) __fpsimd_save_fpexc32(vcpu); + __debug_switch_to_host(vcpu); + /* * This must come after restoring the host sysregs, since a non-VHE * system may enable SPE here and make use of the TTBRs. */ - __debug_switch_to_host(vcpu); + __debug_restore_host_buffers_nvhe(vcpu); if (pmu_switch_needed) __pmu_switch_to_host(host_ctxt); @@ -754,7 +769,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, * making sure it is a kernel address and not a PC-relative * reference. */ - asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); + asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string)); __hyp_do_panic(str_va, spsr, elr, @@ -791,3 +806,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) unreachable(); } + +asmlinkage void __hyp_text kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index eb0efc5557f3..7b7213fc17d9 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -182,7 +182,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) __tlb_switch_to_host(kvm, &cxt); } -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); struct tlb_inv_context cxt; @@ -191,6 +191,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) __tlb_switch_to_guest(kvm, &cxt); __tlbi(vmalle1); + asm volatile("ic iallu"); dsb(nsh); isb(); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f4a8ae918827..a3105ae464be 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -258,7 +258,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { const struct kvm_regs *cpu_reset; - int ret = -EINVAL; + int ret; bool loaded; /* Reset PMU outside of the non-preemptible section */ @@ -281,15 +281,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { - if (kvm_vcpu_enable_ptrauth(vcpu)) + if (kvm_vcpu_enable_ptrauth(vcpu)) { + ret = -EINVAL; goto out; + } } switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { - if (!cpu_has_32bit_el1()) + if (!cpu_has_32bit_el1()) { + ret = -EINVAL; goto out; + } cpu_reset = &default_regs_reset32; } else { cpu_reset = &default_regs_reset; @@ -374,10 +378,10 @@ void kvm_set_ipa_limit(void) pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", (va_max < pa_max) ? "Virtual" : "Physical"); - WARN(ipa_max < KVM_PHYS_SHIFT, - "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); kvm_ipa_limit = ipa_max; - kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); + kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit, + ((kvm_ipa_limit < KVM_PHYS_SHIFT) ? + " (Reduced IPA size, limited VM/VMM compatibility)" : "")); } /* @@ -404,6 +408,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) return -EINVAL; } else { phys_shift = KVM_PHYS_SHIFT; + if (phys_shift > kvm_ipa_limit) { + pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", + current->comm); + return -EINVAL; + } } parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 01a515e0171e..98a177dd1f89 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -625,6 +625,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 pmcr, val; + /* No PMU available, PMCR_EL0 may UNDEF... */ + if (!kvm_arm_support_pmu_v3()) + return; + pmcr = read_sysreg(pmcr_el0); /* * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN @@ -1132,16 +1136,6 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, return REG_HIDDEN_USER | REG_HIDDEN_GUEST; } -/* Visibility overrides for SVE-specific ID registers */ -static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) -{ - if (vcpu_has_sve(vcpu)) - return 0; - - return REG_HIDDEN_USER; -} - /* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */ static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu) { @@ -1168,9 +1162,6 @@ static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, { u64 val; - if (WARN_ON(!vcpu_has_sve(vcpu))) - return -ENOENT; - val = guest_id_aa64zfr0_el1(vcpu); return reg_to_user(uaddr, &val, reg->id); } @@ -1183,9 +1174,6 @@ static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, int err; u64 val; - if (WARN_ON(!vcpu_has_sve(vcpu))) - return -ENOENT; - err = reg_from_user(&val, uaddr, id); if (err) return err; @@ -1280,10 +1268,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { + int reg = r->reg; + + /* See the 32bit mapping in kvm_host.h */ + if (p->is_aarch32) + reg = r->reg / 2; + if (p->is_write) - vcpu_write_sys_reg(vcpu, p->regval, r->reg); + vcpu_write_sys_reg(vcpu, p->regval, reg); else - p->regval = vcpu_read_sys_reg(vcpu, r->reg); + p->regval = vcpu_read_sys_reg(vcpu, reg); return true; } @@ -1442,7 +1436,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_SANITISED(ID_AA64PFR1_EL1), ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,3), - { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility }, + { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, }, ID_UNALLOCATED(4,5), ID_UNALLOCATED(4,6), ID_UNALLOCATED(4,7), @@ -1740,9 +1734,9 @@ static const struct sys_reg_desc cp14_regs[] = { { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(1), /* DBGDCCINT */ - { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, + { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT }, /* DBGDSCRext */ - { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, + { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext }, DBG_BCR_BVR_WCR_WVR(2), /* DBGDTR[RT]Xint */ { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, @@ -1757,7 +1751,7 @@ static const struct sys_reg_desc cp14_regs[] = { { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(6), /* DBGVCR */ - { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, + { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR }, DBG_BCR_BVR_WCR_WVR(7), DBG_BCR_BVR_WCR_WVR(8), DBG_BCR_BVR_WCR_WVR(9), @@ -1847,6 +1841,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 }, { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index 78a9ef66288a..073acbf02a7c 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -14,7 +14,7 @@ * Parameters: * x0 - dest */ -ENTRY(clear_page) +SYM_FUNC_START(clear_page) mrs x1, dczid_el0 and w1, w1, #0xf mov x2, #4 @@ -25,5 +25,5 @@ ENTRY(clear_page) tst x0, #(PAGE_SIZE - 1) b.ne 1b ret -ENDPROC(clear_page) +SYM_FUNC_END(clear_page) EXPORT_SYMBOL(clear_page) diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index aeafc03e961a..48a3a26eff66 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -19,7 +19,7 @@ * * Alignment fixed up by hardware. */ -ENTRY(__arch_clear_user) +SYM_FUNC_START(__arch_clear_user) mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -40,7 +40,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 ret -ENDPROC(__arch_clear_user) +SYM_FUNC_END(__arch_clear_user) EXPORT_SYMBOL(__arch_clear_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index ebb3c06cbb5d..8e25e89ad01f 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -53,12 +53,12 @@ .endm end .req x5 -ENTRY(__arch_copy_from_user) +SYM_FUNC_START(__arch_copy_from_user) add end, x0, x2 #include "copy_template.S" mov x0, #0 // Nothing to copy ret -ENDPROC(__arch_copy_from_user) +SYM_FUNC_END(__arch_copy_from_user) EXPORT_SYMBOL(__arch_copy_from_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 3d8153a1ebce..667139013ed1 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -55,12 +55,12 @@ end .req x5 -ENTRY(__arch_copy_in_user) +SYM_FUNC_START(__arch_copy_in_user) add end, x0, x2 #include "copy_template.S" mov x0, #0 ret -ENDPROC(__arch_copy_in_user) +SYM_FUNC_END(__arch_copy_in_user) EXPORT_SYMBOL(__arch_copy_in_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index bbb8562396af..e125a84eb400 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -17,7 +17,7 @@ * x0 - dest * x1 - src */ -ENTRY(copy_page) +SYM_FUNC_START(copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH // Prefetch three cache lines ahead. prfm pldl1strm, [x1, #128] @@ -75,5 +75,5 @@ alternative_else_nop_endif stnp x16, x17, [x0, #112] ret -ENDPROC(copy_page) +SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 357eae2c18eb..1a104d0089f3 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -52,12 +52,12 @@ .endm end .req x5 -ENTRY(__arch_copy_to_user) +SYM_FUNC_START(__arch_copy_to_user) add end, x0, x2 #include "copy_template.S" mov x0, #0 ret -ENDPROC(__arch_copy_to_user) +SYM_FUNC_END(__arch_copy_to_user) EXPORT_SYMBOL(__arch_copy_to_user) .section .fixup,"ax" diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S index e6135f16649b..243e107e9896 100644 --- a/arch/arm64/lib/crc32.S +++ b/arch/arm64/lib/crc32.S @@ -85,17 +85,17 @@ CPU_BE( rev16 w3, w3 ) .endm .align 5 -ENTRY(crc32_le) +SYM_FUNC_START(crc32_le) alternative_if_not ARM64_HAS_CRC32 b crc32_le_base alternative_else_nop_endif __crc32 -ENDPROC(crc32_le) +SYM_FUNC_END(crc32_le) .align 5 -ENTRY(__crc32c_le) +SYM_FUNC_START(__crc32c_le) alternative_if_not ARM64_HAS_CRC32 b __crc32c_le_base alternative_else_nop_endif __crc32 c -ENDPROC(__crc32c_le) +SYM_FUNC_END(__crc32c_le) diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S index 48a3ab636e4f..edf6b970a277 100644 --- a/arch/arm64/lib/memchr.S +++ b/arch/arm64/lib/memchr.S @@ -19,7 +19,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -WEAK(memchr) +SYM_FUNC_START_WEAK_PI(memchr) and w1, w1, #0xff 1: subs x2, x2, #1 b.mi 2f @@ -30,5 +30,5 @@ WEAK(memchr) ret 2: mov x0, #0 ret -ENDPIPROC(memchr) +SYM_FUNC_END_PI(memchr) EXPORT_SYMBOL_NOKASAN(memchr) diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S index b297bdaaf549..c0671e793ea9 100644 --- a/arch/arm64/lib/memcmp.S +++ b/arch/arm64/lib/memcmp.S @@ -46,7 +46,7 @@ pos .req x11 limit_wd .req x12 mask .req x13 -WEAK(memcmp) +SYM_FUNC_START_WEAK_PI(memcmp) cbz limit, .Lret0 eor tmp1, src1, src2 tst tmp1, #7 @@ -243,5 +243,5 @@ CPU_LE( rev data2, data2 ) .Lret0: mov result, #0 ret -ENDPIPROC(memcmp) +SYM_FUNC_END_PI(memcmp) EXPORT_SYMBOL_NOKASAN(memcmp) diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index d79f48994dbb..b03cbb3455d4 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -56,12 +56,11 @@ stp \ptr, \regB, [\regC], \val .endm - .weak memcpy -ENTRY(__memcpy) -ENTRY(memcpy) +SYM_FUNC_START_ALIAS(__memcpy) +SYM_FUNC_START_WEAK_PI(memcpy) #include "copy_template.S" ret -ENDPIPROC(memcpy) +SYM_FUNC_END_PI(memcpy) EXPORT_SYMBOL(memcpy) -ENDPROC(__memcpy) +SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(__memcpy) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index 784775136480..1035dce4bdaf 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -45,9 +45,8 @@ C_h .req x12 D_l .req x13 D_h .req x14 - .weak memmove -ENTRY(__memmove) -ENTRY(memmove) +SYM_FUNC_START_ALIAS(__memmove) +SYM_FUNC_START_WEAK_PI(memmove) cmp dstin, src b.lo __memcpy add tmp1, src, count @@ -184,7 +183,7 @@ ENTRY(memmove) tst count, #0x3f b.ne .Ltail63 ret -ENDPIPROC(memmove) +SYM_FUNC_END_PI(memmove) EXPORT_SYMBOL(memmove) -ENDPROC(__memmove) +SYM_FUNC_END_ALIAS(__memmove) EXPORT_SYMBOL(__memmove) diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index 9fb97e6bc560..a9c1c9a01ea9 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -42,9 +42,8 @@ dst .req x8 tmp3w .req w9 tmp3 .req x9 - .weak memset -ENTRY(__memset) -ENTRY(memset) +SYM_FUNC_START_ALIAS(__memset) +SYM_FUNC_START_WEAK_PI(memset) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 orr A_lw, A_lw, A_lw, lsl #8 @@ -203,7 +202,7 @@ ENTRY(memset) ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret -ENDPIPROC(memset) +SYM_FUNC_END_PI(memset) EXPORT_SYMBOL(memset) -ENDPROC(__memset) +SYM_FUNC_END_ALIAS(__memset) EXPORT_SYMBOL(__memset) diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S index ca3ec18171a4..1f47eae3b0d6 100644 --- a/arch/arm64/lib/strchr.S +++ b/arch/arm64/lib/strchr.S @@ -18,7 +18,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -WEAK(strchr) +SYM_FUNC_START_WEAK(strchr) and w1, w1, #0xff 1: ldrb w2, [x0], #1 cmp w2, w1 @@ -28,5 +28,5 @@ WEAK(strchr) cmp w2, w1 csel x0, x0, xzr, eq ret -ENDPROC(strchr) +SYM_FUNC_END(strchr) EXPORT_SYMBOL_NOKASAN(strchr) diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index e9aefbe0b740..4767540d1b94 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -48,7 +48,7 @@ tmp3 .req x9 zeroones .req x10 pos .req x11 -WEAK(strcmp) +SYM_FUNC_START_WEAK_PI(strcmp) eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 @@ -219,5 +219,5 @@ CPU_BE( orr syndrome, diff, has_nul ) lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret -ENDPIPROC(strcmp) +SYM_FUNC_END_PI(strcmp) EXPORT_SYMBOL_NOKASAN(strcmp) diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 87b0cb066915..ee3ed882dd79 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -44,7 +44,7 @@ pos .req x12 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -WEAK(strlen) +SYM_FUNC_START_WEAK_PI(strlen) mov zeroones, #REP8_01 bic src, srcin, #15 ands tmp1, srcin, #15 @@ -111,5 +111,5 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned -ENDPIPROC(strlen) +SYM_FUNC_END_PI(strlen) EXPORT_SYMBOL_NOKASAN(strlen) diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S index f571581888fa..2a7ee949ed47 100644 --- a/arch/arm64/lib/strncmp.S +++ b/arch/arm64/lib/strncmp.S @@ -52,7 +52,7 @@ limit_wd .req x13 mask .req x14 endloop .req x15 -WEAK(strncmp) +SYM_FUNC_START_WEAK_PI(strncmp) cbz limit, .Lret0 eor tmp1, src1, src2 mov zeroones, #REP8_01 @@ -295,5 +295,5 @@ CPU_BE( orr syndrome, diff, has_nul ) .Lret0: mov result, #0 ret -ENDPIPROC(strncmp) +SYM_FUNC_END_PI(strncmp) EXPORT_SYMBOL_NOKASAN(strncmp) diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S index c0bac9493c68..b72913a99038 100644 --- a/arch/arm64/lib/strnlen.S +++ b/arch/arm64/lib/strnlen.S @@ -47,7 +47,7 @@ limit_wd .req x14 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -WEAK(strnlen) +SYM_FUNC_START_WEAK_PI(strnlen) cbz limit, .Lhit_limit mov zeroones, #REP8_01 bic src, srcin, #15 @@ -156,5 +156,5 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ .Lhit_limit: mov len, limit ret -ENDPIPROC(strnlen) +SYM_FUNC_END_PI(strnlen) EXPORT_SYMBOL_NOKASAN(strnlen) diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S index 794ac49ea433..13132d1ed6d1 100644 --- a/arch/arm64/lib/strrchr.S +++ b/arch/arm64/lib/strrchr.S @@ -18,7 +18,7 @@ * Returns: * x0 - address of last occurrence of 'c' or 0 */ -WEAK(strrchr) +SYM_FUNC_START_WEAK_PI(strrchr) mov x3, #0 and w1, w1, #0xff 1: ldrb w2, [x0], #1 @@ -29,5 +29,5 @@ WEAK(strrchr) b 1b 2: mov x0, x3 ret -ENDPIPROC(strrchr) +SYM_FUNC_END_PI(strrchr) EXPORT_SYMBOL_NOKASAN(strrchr) diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S index 047622536535..a88613834fb0 100644 --- a/arch/arm64/lib/tishift.S +++ b/arch/arm64/lib/tishift.S @@ -7,7 +7,7 @@ #include -ENTRY(__ashlti3) +SYM_FUNC_START(__ashlti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 @@ -26,10 +26,10 @@ ENTRY(__ashlti3) lsl x1, x0, x1 mov x0, x2 ret -ENDPROC(__ashlti3) +SYM_FUNC_END(__ashlti3) EXPORT_SYMBOL(__ashlti3) -ENTRY(__ashrti3) +SYM_FUNC_START(__ashrti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 @@ -48,10 +48,10 @@ ENTRY(__ashrti3) asr x0, x1, x0 mov x1, x2 ret -ENDPROC(__ashrti3) +SYM_FUNC_END(__ashrti3) EXPORT_SYMBOL(__ashrti3) -ENTRY(__lshrti3) +SYM_FUNC_START(__lshrti3) cbz x2, 1f mov x3, #64 sub x3, x3, x2 @@ -70,5 +70,5 @@ ENTRY(__lshrti3) lsr x0, x1, x0 mov x1, x2 ret -ENDPROC(__lshrti3) +SYM_FUNC_END(__lshrti3) EXPORT_SYMBOL(__lshrti3) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index d26e6cd28953..61a8502894d5 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -5,6 +5,7 @@ * Copyright (C) 1995 Linus Torvalds * Copyright (C) 1995-2004 Russell King * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2020 Ampere Computing LLC */ #include @@ -635,9 +636,752 @@ static int __kprobes do_translation_fault(unsigned long addr, return 0; } +static int copy_from_user_io(void *to, const void __user *from, unsigned long n) +{ + const u8 __user *src = from; + u8 *dest = to; + + for (; n; n--) + if (get_user(*dest++, src++)) + break; + return n; +} + +static int copy_to_user_io(void __user *to, const void *from, unsigned long n) +{ + const u8 *src = from; + u8 __user *dest = to; + + for (; n; n--) + if (put_user(*src++, dest++)) + break; + return n; +} + +static int align_load(unsigned long addr, int sz, u64 *out) +{ + union { + u8 d8; + u16 d16; + u32 d32; + u64 d64; + char c[8]; + } data; + + if (sz != 1 && sz != 2 && sz != 4 && sz != 8) + return 1; + if (is_ttbr0_addr(addr)) { + if (copy_from_user_io(data.c, (const void __user *)addr, sz)) + return 1; + } else + memcpy_fromio(data.c, (const void __iomem *)addr, sz); + switch (sz) { + case 1: + *out = data.d8; + break; + case 2: + *out = data.d16; + break; + case 4: + *out = data.d32; + break; + case 8: + *out = data.d64; + break; + default: + return 1; + } + return 0; +} + +static int align_store(unsigned long addr, int sz, u64 val) +{ + union { + u8 d8; + u16 d16; + u32 d32; + u64 d64; + char c[8]; + } data; + + switch (sz) { + case 1: + data.d8 = val; + break; + case 2: + data.d16 = val; + break; + case 4: + data.d32 = val; + break; + case 8: + data.d64 = val; + break; + default: + return 1; + } + if (is_ttbr0_addr(addr)) { + if (copy_to_user_io((void __user *)addr, data.c, sz)) + return 1; + } else + memcpy_toio((void __iomem *)addr, data.c, sz); + return 0; +} + +static int align_dc_zva(unsigned long addr, struct pt_regs *regs) +{ + int bs = read_cpuid(DCZID_EL0) & 0xf; + int sz = 1 << (bs + 2); + + addr &= ~(sz - 1); + if (is_ttbr0_addr(addr)) { + for (; sz; sz--) { + if (align_store(addr, 1, 0)) + return 1; + } + } else + memset_io((void *)addr, 0, sz); + return 0; +} + +static u64 get_vn_dt(int n, int t) { + u64 res; + + switch (n) { +#define V(n) \ + case n: \ + asm("cbnz %w1, 1f\n\t" \ + "mov %0, v"#n".d[0]\n\t" \ + "b 2f\n\t" \ + "1: mov %0, v"#n".d[1]\n\t" \ + "2:" : "=r" (res) : "r" (t)); \ + break + V( 0); V( 1); V( 2); V( 3); V( 4); V( 5); V( 6); V( 7); + V( 8); V( 9); V(10); V(11); V(12); V(13); V(14); V(15); + V(16); V(17); V(18); V(19); V(20); V(21); V(22); V(23); + V(24); V(25); V(26); V(27); V(28); V(29); V(30); V(31); +#undef V + default: + res = 0; + break; + } + return res; +} + +static void set_vn_dt(int n, int t, u64 val) { + switch (n) { +#define V(n) \ + case n: \ + asm("cbnz %w1, 1f\n\t" \ + "mov v"#n".d[0], %0\n\t" \ + "b 2f\n\t" \ + "1: mov v"#n".d[1], %0\n\t" \ + "2:" :: "r" (val), "r" (t)); \ + break + V( 0); V( 1); V( 2); V( 3); V( 4); V( 5); V( 6); V( 7); + V( 8); V( 9); V(10); V(11); V(12); V(13); V(14); V(15); + V(16); V(17); V(18); V(19); V(20); V(21); V(22); V(23); + V(24); V(25); V(26); V(27); V(28); V(29); V(30); V(31); +#undef Q + default: + break; + } +} + +static int align_ldst_pair(u32 insn, struct pt_regs *regs) +{ + const u32 OPC = GENMASK(31, 30); + const u32 L_MASK = BIT(22); + + int opc = FIELD_GET(OPC, insn); + int L = FIELD_GET(L_MASK, insn); + + bool wback = !!(insn & BIT(23)); + bool postindex = !(insn & BIT(24)); + + int n = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, insn); + int t = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); + int t2 = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT2, insn); + bool is_store = !L; + bool is_signed = !!(opc & 1); + int scale = 2 + (opc >> 1); + int datasize = 8 << scale; + u64 uoffset = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_7, insn); + s64 offset = sign_extend64(uoffset, 6) << scale; + u64 address; + u64 data1, data2; + u64 dbytes; + + if ((is_store && (opc & 1)) || opc == 3) + return 1; + + if (wback && (t == n || t2 == n) && n != 31) + return 1; + + if (!is_store && t == t2) + return 1; + + dbytes = datasize / 8; + + address = regs_get_register(regs, n << 3); + + if (!postindex) + address += offset; + + if (is_store) { + data1 = pt_regs_read_reg(regs, t); + data2 = pt_regs_read_reg(regs, t2); + if (align_store(address, dbytes, data1) || + align_store(address + dbytes, dbytes, data2)) + return 1; + } else { + if (align_load(address, dbytes, &data1) || + align_load(address + dbytes, dbytes, &data2)) + return 1; + if (is_signed) { + data1 = sign_extend64(data1, datasize - 1); + data2 = sign_extend64(data2, datasize - 1); + } + pt_regs_write_reg(regs, t, data1); + pt_regs_write_reg(regs, t2, data2); + } + + if (wback) { + if (postindex) + address += offset; + if (n == 31) + regs->sp = address; + else + pt_regs_write_reg(regs, n, address); + } + + return 0; +} + +static int align_ldst_pair_simdfp(u32 insn, struct pt_regs *regs) +{ + const u32 OPC = GENMASK(31, 30); + const u32 L_MASK = BIT(22); + + int opc = FIELD_GET(OPC, insn); + int L = FIELD_GET(L_MASK, insn); + + bool wback = !!(insn & BIT(23)); + bool postindex = !(insn & BIT(24)); + + int n = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, insn); + int t = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); + int t2 = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT2, insn); + bool is_store = !L; + int scale = 2 + opc; + int datasize = 8 << scale; + u64 uoffset = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_7, insn); + s64 offset = sign_extend64(uoffset, 6) << scale; + u64 address; + u64 data1_d0, data1_d1, data2_d0, data2_d1; + u64 dbytes; + + if (opc == 0x3) + return 1; + + if (!is_store && t == t2) + return 1; + + dbytes = datasize / 8; + + address = regs_get_register(regs, n << 3); + + if (!postindex) + address += offset; + + if (is_store) { + data1_d0 = get_vn_dt(t, 0); + data2_d0 = get_vn_dt(t2, 0); + if (datasize == 128) { + data1_d1 = get_vn_dt(t, 1); + data2_d1 = get_vn_dt(t2, 1); + if (align_store(address, 8, data1_d0) || + align_store(address + 8, 8, data1_d1) || + align_store(address + 16, 8, data2_d0) || + align_store(address + 24, 8, data2_d1)) + return 1; + } else { + if (align_store(address, dbytes, data1_d0) || + align_store(address + dbytes, dbytes, data2_d0)) + return 1; + } + } else { + if (datasize == 128) { + if (align_load(address, 8, &data1_d0) || + align_load(address + 8, 8, &data1_d1) || + align_load(address + 16, 8, &data2_d0) || + align_load(address + 24, 8, &data2_d1)) + return 1; + } else { + if (align_load(address, dbytes, &data1_d0) || + align_load(address + dbytes, dbytes, &data2_d0)) + return 1; + data1_d1 = data2_d1 = 0; + } + set_vn_dt(t, 0, data1_d0); + set_vn_dt(t, 1, data1_d1); + set_vn_dt(t2, 0, data2_d0); + set_vn_dt(t2, 1, data2_d1); + } + + if (wback) { + if (postindex) + address += offset; + if (n == 31) + regs->sp = address; + else + pt_regs_write_reg(regs, n, address); + } + + return 0; +} + +static int align_ldst_regoff(u32 insn, struct pt_regs *regs) +{ + const u32 SIZE = GENMASK(31, 30); + const u32 OPC = GENMASK(23, 22); + const u32 OPTION = GENMASK(15, 13); + const u32 S = BIT(12); + + u32 size = FIELD_GET(SIZE, insn); + u32 opc = FIELD_GET(OPC, insn); + u32 option = FIELD_GET(OPTION, insn); + u32 s = FIELD_GET(S, insn); + int scale = size; + int extend_len = (option & 0x1) ? 64 : 32; + bool extend_unsigned = !(option & 0x4); + int shift = s ? scale : 0; + + int n = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, insn); + int t = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); + int m = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RM, insn); + bool is_store; + bool is_signed; + int regsize; + int datasize; + u64 offset; + u64 address; + u64 data; + + if ((opc & 0x2) == 0) { + /* store or zero-extending load */ + is_store = !(opc & 0x1); + regsize = size == 0x3 ? 64 : 32; + is_signed = false; + } else { + if (size == 0x3) { + if ((opc & 0x1) == 0) { + /* prefetch */ + return 0; + } else { + /* undefined */ + return 1; + } + } else { + /* sign-extending load */ + is_store = false; + if (size == 0x2 && (opc & 0x1) == 0x1) { + /* undefined */ + return 1; + } + regsize = (opc & 0x1) == 0x1 ? 32 : 64; + is_signed = true; + } + } + + datasize = 8 << scale; + + if (n == t && n != 31) + return 1; + + offset = pt_regs_read_reg(regs, m); + if (extend_len == 32) { + offset &= (u32)~0; + if (!extend_unsigned) + sign_extend64(offset, 31); + } + offset <<= shift; + + address = regs_get_register(regs, n << 3) + offset; + + if (is_store) { + data = pt_regs_read_reg(regs, t); + if (align_store(address, datasize / 8, data)) + return 1; + } else { + if (align_load(address, datasize / 8, &data)) + return 1; + if (is_signed) { + if (regsize == 32) + data = sign_extend32(data, datasize - 1); + else + data = sign_extend64(data, datasize - 1); + } + } + + return 0; +} + +static int align_ldst_regoff_simdfp(u32 insn, struct pt_regs *regs) +{ + const u32 SIZE = GENMASK(31, 30); + const u32 OPC = GENMASK(23, 22); + const u32 OPTION = GENMASK(15, 13); + const u32 S = BIT(12); + + u32 size = FIELD_GET(SIZE, insn); + u32 opc = FIELD_GET(OPC, insn); + u32 option = FIELD_GET(OPTION, insn); + u32 s = FIELD_GET(S, insn); + int scale = (opc & 0x2) << 1 | size; + int extend_len = (option & 0x1) ? 64 : 32; + bool extend_unsigned = !(option & 0x4); + int shift = s ? scale : 0; + + int n = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, insn); + int t = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); + int m = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RM, insn); + bool is_store = !(opc & BIT(0)); + int datasize; + u64 offset; + u64 address; + u64 data_d0, data_d1; + + if ((opc & 0x2) == 0) + return 1; + + datasize = 8 << scale; + + if (n == t && n != 31) + return 1; + + offset = pt_regs_read_reg(regs, m); + if (extend_len == 32) { + offset &= (u32)~0; + if (!extend_unsigned) + sign_extend64(offset, 31); + } + offset <<= shift; + + address = regs_get_register(regs, n << 3) + offset; + + if (is_store) { + data_d0 = get_vn_dt(t, 0); + if (datasize == 128) { + data_d1 = get_vn_dt(t, 1); + if (align_store(address, 8, data_d0) || + align_store(address + 8, 8, data_d1)) + return 1; + } else { + if (align_store(address, datasize / 8, data_d0)) + return 1; + } + } else { + if (datasize == 128) { + if (align_load(address, 8, &data_d0) || + align_load(address + 8, 8, &data_d1)) + return 1; + } else { + if (align_load(address, datasize / 8, &data_d0)) + return 1; + data_d1 = 0; + } + set_vn_dt(t, 0, data_d0); + set_vn_dt(t, 1, data_d1); + } + + return 0; +} + +static int align_ldst_imm(u32 insn, struct pt_regs *regs) +{ + const u32 SIZE = GENMASK(31, 30); + const u32 OPC = GENMASK(23, 22); + + u32 size = FIELD_GET(SIZE, insn); + u32 opc = FIELD_GET(OPC, insn); + bool wback = !(insn & BIT(24)) && !!(insn & BIT(10)); + bool postindex = wback && !(insn & BIT(11)); + int scale = size; + u64 offset; + + int n = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, insn); + int t = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); + bool is_store; + bool is_signed; + int regsize; + int datasize; + u64 address; + u64 data; + + if (!(insn & BIT(24))) { + u64 uoffset = + aarch64_insn_decode_immediate(AARCH64_INSN_IMM_9, insn); + offset = sign_extend64(uoffset, 8); + } else { + offset = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12, insn); + offset <<= scale; + } + + if ((opc & 0x2) == 0) { + /* store or zero-extending load */ + is_store = !(opc & 0x1); + regsize = size == 0x3 ? 64 : 32; + is_signed = false; + } else { + if (size == 0x3) { + if (FIELD_GET(GENMASK(11, 10), insn) == 0 && (opc & 0x1) == 0) { + /* prefetch */ + return 0; + } else { + /* undefined */ + return 1; + } + } else { + /* sign-extending load */ + is_store = false; + if (size == 0x2 && (opc & 0x1) == 0x1) { + /* undefined */ + return 1; + } + regsize = (opc & 0x1) == 0x1 ? 32 : 64; + is_signed = true; + } + } + + datasize = 8 << scale; + + if (n == t && n != 31) + return 1; + + address = regs_get_register(regs, n << 3); + + if (!postindex) + address += offset; + + if (is_store) { + data = pt_regs_read_reg(regs, t); + if (align_store(address, datasize / 8, data)) + return 1; + } else { + if (align_load(address, datasize / 8, &data)) + return 1; + if (is_signed) { + if (regsize == 32) + data = sign_extend32(data, datasize - 1); + else + data = sign_extend64(data, datasize - 1); + } + pt_regs_write_reg(regs, t, data); + } + + if (wback) { + if (postindex) + address += offset; + if (n == 31) + regs->sp = address; + else + pt_regs_write_reg(regs, n, address); + } + + return 0; +} + +static int align_ldst_imm_simdfp(u32 insn, struct pt_regs *regs) +{ + const u32 SIZE = GENMASK(31, 30); + const u32 OPC = GENMASK(23, 22); + + u32 size = FIELD_GET(SIZE, insn); + u32 opc = FIELD_GET(OPC, insn); + bool wback = !(insn & BIT(24)) && !!(insn & BIT(10)); + bool postindex = wback && !(insn & BIT(11)); + int scale = (opc & 0x2) << 1 | size; + u64 offset; + + int n = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, insn); + int t = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); + bool is_store = !(opc & BIT(0)) ; + int datasize; + u64 address; + u64 data_d0, data_d1; + + if (scale > 4) + return 1; + + if (!(insn & BIT(24))) { + u64 uoffset = + aarch64_insn_decode_immediate(AARCH64_INSN_IMM_9, insn); + offset = sign_extend64(uoffset, 8); + } else { + offset = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12, insn); + offset <<= scale; + } + + datasize = 8 << scale; + + address = regs_get_register(regs, n << 3); + + if (!postindex) + address += offset; + + if (is_store) { + data_d0 = get_vn_dt(t, 0); + if (datasize == 128) { + data_d1 = get_vn_dt(t, 1); + if (align_store(address, 8, data_d0) || + align_store(address + 8, 8, data_d1)) + return 1; + } else { + if (align_store(address, datasize / 8, data_d0)) + return 1; + } + } else { + if (datasize == 128) { + if (align_load(address, 8, &data_d0) || + align_load(address + 8, 8, &data_d1)) + return 1; + } else { + if (align_load(address, datasize / 8, &data_d0)) + return 1; + data_d1 = 0; + } + set_vn_dt(t, 0, data_d0); + set_vn_dt(t, 1, data_d1); + } + + if (wback) { + if (postindex) + address += offset; + if (n == 31) + regs->sp = address; + else + pt_regs_write_reg(regs, n, address); + } + + return 0; +} + +static int align_ldst(u32 insn, struct pt_regs *regs) +{ + const u32 op0 = FIELD_GET(GENMASK(31, 28), insn); + const u32 op1 = FIELD_GET(BIT(26), insn); + const u32 op2 = FIELD_GET(GENMASK(24, 23), insn); + const u32 op3 = FIELD_GET(GENMASK(21, 16), insn); + const u32 op4 = FIELD_GET(GENMASK(11, 10), insn); + + if ((op0 & 0x3) == 0x2) { + /* + * |------+-----+-----+-----+-----+-----------------------------------------| + * | op0 | op1 | op2 | op3 | op4 | Decode group | + * |------+-----+-----+-----+-----+-----------------------------------------| + * | xx10 | - | 00 | - | - | Load/store no-allocate pair (offset) | + * | xx10 | - | 01 | - | - | Load/store register pair (post-indexed) | + * | xx10 | - | 10 | - | - | Load/store register pair (offset) | + * | xx10 | - | 11 | - | - | Load/store register pair (pre-indexed) | + * |------+-----+-----+-----+-----+-----------------------------------------| + */ + + if (op1 == 0) { /* V == 0 */ + /* general */ + return align_ldst_pair(insn, regs); + } else { + /* simdfp */ + return align_ldst_pair_simdfp(insn, regs); + } + } else if ((op0 & 0x3) == 0x3 && + (((op2 & 0x2) == 0 && (op3 & 0x20) == 0 && op4 != 0x2) || + ((op2 & 0x2) == 0x2))) { + /* + * |------+-----+-----+--------+-----+----------------------------------------------| + * | op0 | op1 | op2 | op3 | op4 | Decode group | + * |------+-----+-----+--------+-----+----------------------------------------------| + * | xx11 | - | 0x | 0xxxxx | 00 | Load/store register (unscaled immediate) | + * | xx11 | - | 0x | 0xxxxx | 01 | Load/store register (immediate post-indexed) | + * | xx11 | - | 0x | 0xxxxx | 11 | Load/store register (immediate pre-indexed) | + * | xx11 | - | 1x | - | - | Load/store register (unsigned immediate) | + * |------+-----+-----+--------+-----+----------------------------------------------| + */ + + if (op1 == 0) { /* V == 0 */ + /* general */ + return align_ldst_imm(insn, regs); + } else { + /* simdfp */ + return align_ldst_imm_simdfp(insn, regs); + } + } else if ((op0 & 0x3) == 0x3 && (op2 & 0x2) == 0 && + (op3 & 0x20) == 0x20 && op4 == 0x2) { + /* + * |------+-----+-----+--------+-----+---------------------------------------| + * | op0 | op1 | op2 | op3 | op4 | | + * |------+-----+-----+--------+-----+---------------------------------------| + * | xx11 | - | 0x | 1xxxxx | 10 | Load/store register (register offset) | + * |------+-----+-----+--------+-----+---------------------------------------| + */ + if (op1 == 0) { /* V == 0 */ + /* general */ + return align_ldst_regoff(insn, regs); + } else { + /* simdfp */ + return align_ldst_regoff_simdfp(insn, regs); + } + } else + return 1; +} + +static int fixup_alignment(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + u32 insn; + int res; + + if (user_mode(regs)) { + __le32 insn_le; + + if (!is_ttbr0_addr(addr)) + return 1; + + if (get_user(insn_le, + (__le32 __user *)instruction_pointer(regs))) + return 1; + insn = le32_to_cpu(insn_le); + } else { + if (aarch64_insn_read((void *)instruction_pointer(regs), &insn)) + return 1; + } + + switch (aarch64_get_insn_class(insn)) { + case AARCH64_INSN_CLS_BR_SYS: + if (aarch64_insn_is_dc_zva(insn)) + res = align_dc_zva(addr, regs); + else + res = 1; + break; + case AARCH64_INSN_CLS_LDST: + res = align_ldst(insn, regs); + break; + default: + res = 1; + } + if (!res) { + instruction_pointer_set(regs, instruction_pointer(regs) + 4); + } + return res; +} + static int do_alignment_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { +#ifdef CONFIG_ALTRA_ERRATUM_82288 + if (!fixup_alignment(addr, esr, regs)) + return 0; +#endif do_bad_area(addr, esr, regs); return 0; } @@ -654,11 +1398,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) inf = esr_to_fault_info(esr); - /* - * Return value ignored as we rely on signal merging. - * Future patches will make this more robust. - */ - apei_claim_sea(regs); + if (user_mode(regs) && apei_claim_sea(regs) == 0) { + /* + * APEI claimed this as a firmware-first notification. + * Some processing deferred to task_work before ret_to_user(). + */ + return 0; + } if (esr & ESR_ELx_FnV) siaddr = NULL; diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index bbeb6a5a6ba6..0be3355e3499 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ptep = (pte_t *)pudp; } else if (sz == (CONT_PTE_SIZE)) { pmdp = pmd_alloc(mm, pudp, addr); + if (!pmdp) + return NULL; WARN_ON(addr & (sz - 1)); /* diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7be414815780..2cfefe62d56b 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -50,12 +50,6 @@ s64 memstart_addr __ro_after_init = -1; EXPORT_SYMBOL(memstart_addr); -s64 physvirt_offset __ro_after_init; -EXPORT_SYMBOL(physvirt_offset); - -struct page *vmemmap __ro_after_init; -EXPORT_SYMBOL(vmemmap); - phys_addr_t arm64_dma_phys_limit __ro_after_init; #ifdef CONFIG_KEXEC_CORE @@ -252,6 +246,18 @@ int pfn_valid(unsigned long pfn) if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) return 0; + + /* + * ZONE_DEVICE memory does not have the memblock entries. + * memblock_is_map_memory() check for ZONE_DEVICE based + * addresses will always fail. Even the normal hotplugged + * memory will never have MEMBLOCK_NOMAP flag set in their + * memblock entries. Skip memblock search for all non early + * memory sections covering all of hotplug memory including + * both normal and ZONE_DEVICE based. + */ + if (!early_section(__pfn_to_section(pfn))) + return pfn_section_valid(__pfn_to_section(pfn), pfn); #endif return memblock_is_map_memory(addr); } @@ -322,20 +328,6 @@ void __init arm64_memblock_init(void) memstart_addr = round_down(memblock_start_of_DRAM(), ARM64_MEMSTART_ALIGN); - physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; - - vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); - - /* - * If we are running with a 52-bit kernel VA config on a system that - * does not support it, we have to offset our vmemmap and physvirt_offset - * s.t. we avoid the 52-bit portion of the direct linear map - */ - if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { - vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; - physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); - } - /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be @@ -350,6 +342,16 @@ void __init arm64_memblock_init(void) memblock_remove(0, memstart_addr); } + /* + * If we are running with a 52-bit kernel VA config on a system that + * does not support it, we have to place the available physical + * memory in the 48-bit addressable part of the linear region, i.e., + * we have to move it upward. Since memstart_addr represents the + * physical address of PAGE_OFFSET, we have to *subtract* from it. + */ + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) + memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); + /* * Apply the memory limit if it was set. Since the kernel may be loaded * high up in memory, add back the kernel region that must be accessible diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 9be71bee902c..618b819691fe 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -18,6 +18,20 @@ #include #include +#ifdef CONFIG_ALTRA_ERRATUM_82288 +bool have_altra_erratum_82288 __read_mostly; +EXPORT_SYMBOL(have_altra_erratum_82288); + +static bool is_altra_pci(phys_addr_t phys_addr, size_t size) +{ + phys_addr_t end = phys_addr + size; + + return (phys_addr < 0x80000000 || + (end > 0x200000000000 && phys_addr < 0x400000000000) || + (end > 0x600000000000 && phys_addr < 0x800000000000)); +} +#endif + static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, pgprot_t prot, void *caller) { @@ -53,6 +67,11 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, addr = (unsigned long)area->addr; area->phys_addr = phys_addr; +#ifdef CONFIG_ALTRA_ERRATUM_82288 + if (unlikely(have_altra_erratum_82288 && is_altra_pci(phys_addr, size))) + prot = pgprot_device(prot); +#endif + err = ioremap_page_range(addr, addr + size, phys_addr, prot); if (err) { vunmap((void *)addr); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d10247fab0fd..99bc0289ab2b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -38,7 +38,7 @@ #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) -u64 idmap_t0sz = TCR_T0SZ(VA_BITS); +u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 __section(".mmuoff.data.write") vabits_actual; diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 4decf1659700..53ebb4babf3a 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map); */ const struct cpumask *cpumask_of_node(int node) { - if (WARN_ON(node >= nr_node_ids)) + + if (node == NUMA_NO_NODE) + return cpu_all_mask; + + if (WARN_ON(node < 0 || node >= nr_node_ids)) return cpu_none_mask; if (WARN_ON(node_to_cpumask_map[node] == NULL)) diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c index 67a9ba9eaa96..cde44c13dda1 100644 --- a/arch/arm64/mm/physaddr.c +++ b/arch/arm64/mm/physaddr.c @@ -9,7 +9,7 @@ phys_addr_t __virt_to_phys(unsigned long x) { - WARN(!__is_lm_address(x), + WARN(!__is_lm_address(__tag_reset(x)), "virt_to_phys used for non-linear address: %pK (%pS)\n", (void *)x, (void *)x); diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index cdc79de0c794..afc7d41347f7 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -141,14 +141,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val, } } -static inline int bpf2a64_offset(int bpf_to, int bpf_from, +static inline int bpf2a64_offset(int bpf_insn, int off, const struct jit_ctx *ctx) { - int to = ctx->offset[bpf_to]; - /* -1 to account for the Branch instruction */ - int from = ctx->offset[bpf_from] - 1; - - return to - from; + /* BPF JMP offset is relative to the next instruction */ + bpf_insn++; + /* + * Whereas arm64 branch instructions encode the offset + * from the branch itself, so we must subtract 1 from the + * instruction offset. + */ + return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1); } static void jit_fill_hole(void *area, unsigned int size) @@ -532,7 +535,7 @@ emit_bswap_uxt: /* JUMP off */ case BPF_JMP | BPF_JA: - jmp_offset = bpf2a64_offset(i + off, i, ctx); + jmp_offset = bpf2a64_offset(i, off, ctx); check_imm26(jmp_offset); emit(A64_B(jmp_offset), ctx); break; @@ -559,7 +562,7 @@ emit_bswap_uxt: case BPF_JMP32 | BPF_JSLE | BPF_X: emit(A64_CMP(is64, dst, src), ctx); emit_cond_jmp: - jmp_offset = bpf2a64_offset(i + off, i, ctx); + jmp_offset = bpf2a64_offset(i, off, ctx); check_imm19(jmp_offset); switch (BPF_OP(code)) { case BPF_JEQ: @@ -698,6 +701,19 @@ emit_cond_jmp: } break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + /* + * Nothing required here. + * + * In case of arm64, we rely on the firmware mitigation of + * Speculative Store Bypass as controlled via the ssbd kernel + * parameter. Whenever the mitigation is enabled, it works + * for all of the kernel code with no need to provide any + * additional instructions. + */ + break; + /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: @@ -780,10 +796,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) const struct bpf_prog *prog = ctx->prog; int i; + /* + * - offset[0] offset of the end of prologue, + * start of the 1st instruction. + * - offset[1] - offset of the end of 1st instruction, + * start of the 2nd instruction + * [....] + * - offset[3] - offset of the end of 3rd instruction, + * start of 4th instruction + */ for (i = 0; i < prog->len; i++) { const struct bpf_insn *insn = &prog->insnsi[i]; int ret; + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; ret = build_insn(insn, ctx, extra_pass); if (ret > 0) { i++; @@ -791,11 +818,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) ctx->offset[i] = ctx->idx; continue; } - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; if (ret) return ret; } + /* + * offset is allocated with prog->len + 1 so fill in + * the last element with the offset after the last + * instruction (end of program) + */ + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; return 0; } @@ -871,7 +903,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; - ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); if (ctx.offset == NULL) { prog = orig_prog; goto out_off; @@ -951,7 +983,7 @@ skip_init_ctx: prog->jited_len = image_size; if (!prog->is_func || extra_pass) { - bpf_prog_fill_jited_linfo(prog, ctx.offset); + bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); out_off: kfree(ctx.offset); kfree(jit_data); diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 48b2e1b59119..4f48a2f0513b 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -220,7 +220,7 @@ config FORCE_MAX_ZONEORDER int "Maximum zone order" default "11" -config RAM_BASE +config DRAM_BASE hex "DRAM start addr (the same with memory-section in dts)" default 0x0 diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h index f35a9f3315ee..61d94ec7dd16 100644 --- a/arch/csky/abiv1/inc/abi/entry.h +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -167,15 +167,12 @@ * BA Reserved C D V */ cprcr r6, cpcr30 - lsri r6, 28 - lsli r6, 28 + lsri r6, 29 + lsli r6, 29 addi r6, 0xe cpwcr r6, cpcr30 - lsri r6, 28 - addi r6, 2 - lsli r6, 28 - addi r6, 0xe + movi r6, 0 cpwcr r6, cpcr31 .endm diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c index 86d187d4e5af..5acc5c2e544e 100644 --- a/arch/csky/abiv2/fpu.c +++ b/arch/csky/abiv2/fpu.c @@ -10,11 +10,6 @@ #define MTCR_DIST 0xC0006420 #define MFCR_DIST 0xC0006020 -void __init init_fpu(void) -{ - mtcr("cr<1, 2>", 0); -} - /* * fpu_libc_helper() is to help libc to excute: * - mfcr %a, cr<1, 2> diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h index 94a7a58765df..ac8f65a3e75a 100644 --- a/arch/csky/abiv2/inc/abi/entry.h +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -13,6 +13,8 @@ #define LSAVE_A1 28 #define LSAVE_A2 32 #define LSAVE_A3 36 +#define LSAVE_A4 40 +#define LSAVE_A5 44 #define KSPTOUSP #define USPTOKSP @@ -225,16 +227,13 @@ */ mfcr r6, cr<30, 15> /* Get MSA0 */ 2: - lsri r6, 28 - lsli r6, 28 + lsri r6, 29 + lsli r6, 29 addi r6, 0x1ce mtcr r6, cr<30, 15> /* Set MSA0 */ - lsri r6, 28 - addi r6, 2 - lsli r6, 28 - addi r6, 0x1ce - mtcr r6, cr<31, 15> /* Set MSA1 */ + movi r6, 0 + mtcr r6, cr<31, 15> /* Clr MSA1 */ /* enable MMU */ mfcr r6, cr18 diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h index 22ca3cf2794a..09e2700a3693 100644 --- a/arch/csky/abiv2/inc/abi/fpu.h +++ b/arch/csky/abiv2/inc/abi/fpu.h @@ -9,7 +9,8 @@ int fpu_libc_helper(struct pt_regs *regs); void fpu_fpe(struct pt_regs *regs); -void __init init_fpu(void); + +static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); } void save_to_user_fp(struct user_fp *user_fp); void restore_from_user_fp(struct user_fp *user_fp); diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h index 9738eacefdc7..62bb307459ca 100644 --- a/arch/csky/include/asm/page.h +++ b/arch/csky/include/asm/page.h @@ -28,7 +28,7 @@ #define SSEG_SIZE 0x20000000 #define LOWMEM_LIMIT (SSEG_SIZE * 2) -#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1)) +#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) #ifndef __ASSEMBLY__ diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 21e0bd5293dd..c6bcd7f7c720 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -43,6 +43,7 @@ extern struct cpuinfo_csky cpu_data[]; struct thread_struct { unsigned long ksp; /* kernel stack pointer */ unsigned long sr; /* saved status register */ + unsigned long trap_no; /* saved status register */ /* FPU regs */ struct user_fp __aligned(16) user_fp; diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index eaa1c3403a42..60f8a4112588 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -254,7 +254,7 @@ do { \ extern int __get_user_bad(void); -#define __copy_user(to, from, n) \ +#define ___copy_to_user(to, from, n) \ do { \ int w0, w1, w2, w3; \ asm volatile( \ @@ -289,31 +289,34 @@ do { \ " subi %0, 4 \n" \ " br 3b \n" \ "5: cmpnei %0, 0 \n" /* 1B */ \ - " bf 8f \n" \ + " bf 13f \n" \ " ldb %3, (%2, 0) \n" \ "6: stb %3, (%1, 0) \n" \ " addi %2, 1 \n" \ " addi %1, 1 \n" \ " subi %0, 1 \n" \ " br 5b \n" \ - "7: br 8f \n" \ + "7: subi %0, 4 \n" \ + "8: subi %0, 4 \n" \ + "12: subi %0, 4 \n" \ + " br 13f \n" \ ".section __ex_table, \"a\" \n" \ ".align 2 \n" \ - ".long 2b, 7b \n" \ - ".long 9b, 7b \n" \ - ".long 10b, 7b \n" \ + ".long 2b, 13f \n" \ + ".long 4b, 13f \n" \ + ".long 6b, 13f \n" \ + ".long 9b, 12b \n" \ + ".long 10b, 8b \n" \ ".long 11b, 7b \n" \ - ".long 4b, 7b \n" \ - ".long 6b, 7b \n" \ ".previous \n" \ - "8: \n" \ + "13: \n" \ : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ "=r"(w1), "=r"(w2), "=r"(w3) \ : "0"(n), "1"(to), "2"(from) \ : "memory"); \ } while (0) -#define __copy_user_zeroing(to, from, n) \ +#define ___copy_from_user(to, from, n) \ do { \ int tmp; \ int nsave; \ @@ -356,22 +359,22 @@ do { \ " addi %1, 1 \n" \ " subi %0, 1 \n" \ " br 5b \n" \ - "8: mov %3, %0 \n" \ - " movi %4, 0 \n" \ - "9: stb %4, (%1, 0) \n" \ - " addi %1, 1 \n" \ - " subi %3, 1 \n" \ - " cmpnei %3, 0 \n" \ - " bt 9b \n" \ - " br 7f \n" \ + "8: stw %3, (%1, 0) \n" \ + " subi %0, 4 \n" \ + " bf 7f \n" \ + "9: subi %0, 8 \n" \ + " bf 7f \n" \ + "13: stw %3, (%1, 8) \n" \ + " subi %0, 12 \n" \ + " bf 7f \n" \ ".section __ex_table, \"a\" \n" \ ".align 2 \n" \ - ".long 2b, 8b \n" \ + ".long 2b, 7f \n" \ + ".long 4b, 7f \n" \ + ".long 6b, 7f \n" \ ".long 10b, 8b \n" \ - ".long 11b, 8b \n" \ - ".long 12b, 8b \n" \ - ".long 4b, 8b \n" \ - ".long 6b, 8b \n" \ + ".long 11b, 9b \n" \ + ".long 12b,13b \n" \ ".previous \n" \ "7: \n" \ : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index a7a5b67df898..4349528fbf38 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -170,8 +170,10 @@ csky_syscall_trace: ldw a3, (sp, LSAVE_A3) #if defined(__CSKYABIV2__) subi sp, 8 - stw r5, (sp, 0x4) - stw r4, (sp, 0x0) + ldw r9, (sp, LSAVE_A4) + stw r9, (sp, 0x0) + ldw r9, (sp, LSAVE_A5) + stw r9, (sp, 0x4) #else ldw r6, (sp, LSAVE_A4) ldw r7, (sp, LSAVE_A5) @@ -318,8 +320,6 @@ ENTRY(__switch_to) mfcr a2, psr /* Save PSR value */ stw a2, (a3, THREAD_SR) /* Save PSR in task struct */ - bclri a2, 6 /* Disable interrupts */ - mtcr a2, psr SAVE_SWITCH_STACK diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S index 61989f9241c0..17ed9d250480 100644 --- a/arch/csky/kernel/head.S +++ b/arch/csky/kernel/head.S @@ -21,6 +21,11 @@ END(_start) ENTRY(_start_smp_secondary) SETUP_MMU + /* copy msa1 from CPU0 */ + lrw r6, secondary_msa1 + ld.w r6, (r6, 0) + mtcr r6, cr<31, 15> + /* set stack point */ lrw r6, secondary_stack ld.w r6, (r6, 0) diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c index e68ff375c8f8..ab55e98ee8f6 100644 --- a/arch/csky/kernel/perf_callchain.c +++ b/arch/csky/kernel/perf_callchain.c @@ -12,12 +12,17 @@ struct stackframe { static int unwind_frame_kernel(struct stackframe *frame) { - if (kstack_end((void *)frame->fp)) + unsigned long low = (unsigned long)task_stack_page(current); + unsigned long high = low + THREAD_SIZE; + + if (unlikely(frame->fp < low || frame->fp > high)) return -EPERM; - if (frame->fp & 0x3 || frame->fp < TASK_SIZE) + + if (kstack_end((void *)frame->fp) || frame->fp & 0x3) return -EPERM; *frame = *(struct stackframe *)frame->fp; + if (__kernel_text_address(frame->lr)) { int graph = 0; diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index 23ee604aafdb..2c1e253abb74 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -24,26 +24,9 @@ struct screen_info screen_info = { }; #endif -phys_addr_t __init_memblock memblock_end_of_REG0(void) -{ - return (memblock.memory.regions[0].base + - memblock.memory.regions[0].size); -} - -phys_addr_t __init_memblock memblock_start_of_REG1(void) -{ - return memblock.memory.regions[1].base; -} - -size_t __init_memblock memblock_size_of_REG1(void) -{ - return memblock.memory.regions[1].size; -} - static void __init csky_memblock_init(void) { unsigned long zone_size[MAX_NR_ZONES]; - unsigned long zhole_size[MAX_NR_ZONES]; signed long size; memblock_reserve(__pa(_stext), _end - _stext); @@ -57,54 +40,36 @@ static void __init csky_memblock_init(void) memblock_dump_all(); memset(zone_size, 0, sizeof(zone_size)); - memset(zhole_size, 0, sizeof(zhole_size)); min_low_pfn = PFN_UP(memblock_start_of_DRAM()); - max_pfn = PFN_DOWN(memblock_end_of_DRAM()); - - max_low_pfn = PFN_UP(memblock_end_of_REG0()); - if (max_low_pfn == 0) - max_low_pfn = max_pfn; + max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM()); size = max_pfn - min_low_pfn; - if (memblock.memory.cnt > 1) { - zone_size[ZONE_NORMAL] = - PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn; - zhole_size[ZONE_NORMAL] = - PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn; + if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET)) + zone_size[ZONE_NORMAL] = size; + else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) { + zone_size[ZONE_NORMAL] = + PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET); + max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; } else { - if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) - zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn; - else { - zone_size[ZONE_NORMAL] = + zone_size[ZONE_NORMAL] = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); - max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; - } + max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL]; + write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); } #ifdef CONFIG_HIGHMEM - size = 0; - if (memblock.memory.cnt > 1) { - size = PFN_DOWN(memblock_size_of_REG1()); - highstart_pfn = PFN_DOWN(memblock_start_of_REG1()); - } else { - size = max_pfn - min_low_pfn - - PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); - highstart_pfn = min_low_pfn + - PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); - } + zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; - if (size > 0) - zone_size[ZONE_HIGHMEM] = size; - - highend_pfn = max_pfn; + highstart_pfn = max_low_pfn; + highend_pfn = max_pfn; #endif memblock_set_current_limit(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(0); - free_area_init_node(0, zone_size, min_low_pfn, zhole_size); + free_area_init_node(0, zone_size, min_low_pfn, NULL); } void __init setup_arch(char **cmdline_p) diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index 0bb0954d5570..b5c5bc3afeb5 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -22,6 +22,9 @@ #include #include #include +#ifdef CONFIG_CPU_HAS_FPU +#include +#endif struct ipi_data_struct { unsigned long bits ____cacheline_aligned; @@ -156,6 +159,8 @@ volatile unsigned int secondary_hint; volatile unsigned int secondary_ccr; volatile unsigned int secondary_stack; +unsigned long secondary_msa1; + int __cpu_up(unsigned int cpu, struct task_struct *tidle) { unsigned long mask = 1 << cpu; @@ -164,6 +169,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; secondary_hint = mfcr("cr31"); secondary_ccr = mfcr("cr18"); + secondary_msa1 = read_mmu_msa1(); /* * Because other CPUs are in reset status, we must flush data diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index b057480e7463..63715cb90ee9 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -115,8 +115,9 @@ asmlinkage void trap_c(struct pt_regs *regs) int sig; unsigned long vector; siginfo_t info; + struct task_struct *tsk = current; - vector = (mfcr("psr") >> 16) & 0xff; + vector = (regs->sr >> 16) & 0xff; switch (vector) { case VEC_ZERODIV: @@ -129,6 +130,7 @@ asmlinkage void trap_c(struct pt_regs *regs) sig = SIGTRAP; break; case VEC_ILLEGAL: + tsk->thread.trap_no = vector; die_if_kernel("Kernel mode ILLEGAL", regs, vector); #ifndef CONFIG_CPU_NO_USER_BKPT if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) @@ -146,16 +148,20 @@ asmlinkage void trap_c(struct pt_regs *regs) sig = SIGTRAP; break; case VEC_ACCESS: + tsk->thread.trap_no = vector; return buserr(regs); #ifdef CONFIG_CPU_NEED_SOFTALIGN case VEC_ALIGN: + tsk->thread.trap_no = vector; return csky_alignment(regs); #endif #ifdef CONFIG_CPU_HAS_FPU case VEC_FPE: + tsk->thread.trap_no = vector; die_if_kernel("Kernel mode FPE", regs, vector); return fpu_fpe(regs); case VEC_PRIV: + tsk->thread.trap_no = vector; die_if_kernel("Kernel mode PRIV", regs, vector); if (fpu_libc_helper(regs)) return; @@ -164,5 +170,8 @@ asmlinkage void trap_c(struct pt_regs *regs) sig = SIGSEGV; break; } + + tsk->thread.trap_no = vector; + send_sig(sig, current, 0); } diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c index 647a23986fb5..3c9bd645e643 100644 --- a/arch/csky/lib/usercopy.c +++ b/arch/csky/lib/usercopy.c @@ -7,10 +7,7 @@ unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n) { - if (access_ok(from, n)) - __copy_user_zeroing(to, from, n); - else - memset(to, 0, n); + ___copy_from_user(to, from, n); return n; } EXPORT_SYMBOL(raw_copy_from_user); @@ -18,8 +15,7 @@ EXPORT_SYMBOL(raw_copy_from_user); unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n) { - if (access_ok(to, n)) - __copy_user(to, from, n); + ___copy_to_user(to, from, n); return n; } EXPORT_SYMBOL(raw_copy_to_user); diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index f76618b630f9..562c7f708749 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -179,11 +179,14 @@ bad_area: bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; force_sig_fault(SIGSEGV, si_code, (void __user *)address); return; } no_context: + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; @@ -198,6 +201,8 @@ no_context: die_if_kernel("Oops", regs, write); out_of_memory: + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; + /* * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). @@ -206,6 +211,8 @@ out_of_memory: return; do_sigbus: + tsk->thread.trap_no = (regs->sr >> 16) & 0xff; + up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c index 85e60509f0a8..d4b53af657c8 100644 --- a/arch/h8300/kernel/asm-offsets.c +++ b/arch/h8300/kernel/asm-offsets.c @@ -63,6 +63,9 @@ int main(void) OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE, thread_info, preempt_count); +#ifdef CONFIG_PREEMPTION + DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); +#endif return 0; } diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index ba1a444d55b3..68a68147504d 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -171,16 +171,10 @@ static inline void writel(u32 data, volatile void __iomem *addr) #define writew_relaxed __raw_writew #define writel_relaxed __raw_writel -/* - * Need an mtype somewhere in here, for cache type deals? - * This is probably too long for an inline. - */ -void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size); +void __iomem *ioremap(unsigned long phys_addr, unsigned long size); +#define ioremap_nocache ioremap +#define ioremap_uc(X, Y) ioremap((X), (Y)) -static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size) -{ - return ioremap_nocache(phys_addr, size); -} static inline void iounmap(volatile void __iomem *addr) { diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c index cf8974beb500..b3dbb472572e 100644 --- a/arch/hexagon/kernel/hexagon_ksyms.c +++ b/arch/hexagon/kernel/hexagon_ksyms.c @@ -20,7 +20,7 @@ EXPORT_SYMBOL(__vmgetie); EXPORT_SYMBOL(__vmsetie); EXPORT_SYMBOL(__vmyield); EXPORT_SYMBOL(empty_zero_page); -EXPORT_SYMBOL(ioremap_nocache); +EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c index 77d8e1e69e9b..b103d83b5fbb 100644 --- a/arch/hexagon/mm/ioremap.c +++ b/arch/hexagon/mm/ioremap.c @@ -9,7 +9,7 @@ #include #include -void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) +void __iomem *ioremap(unsigned long phys_addr, unsigned long size) { unsigned long last_addr, addr; unsigned long offset = phys_addr & ~PAGE_MASK; diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 32240000dc0c..2876a7df1b0a 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -40,7 +40,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from endif quiet_cmd_gzip = GZIP $@ -cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@ +cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@ quiet_cmd_objcopy = OBJCOPY $@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 8c92e095f8bb..d42f79a33e91 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -35,7 +35,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_CHR_DEV_OSST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 7ff574d56429..f31e07fc936d 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -54,8 +54,7 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) { - /* FIXME: should this be bspstore + nr_dirty regs? */ - return regs->ar_bspstore; + return regs->r12; } static inline int is_syscall_success(struct pt_regs *regs) @@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs) unsigned long __ip = instruction_pointer(regs); \ (__ip & ~3UL) + ((__ip & 3UL) << 2); \ }) -/* - * Why not default? Because user_stack_pointer() on ia64 gives register - * stack backing store instead... - */ -#define current_user_stack_pointer() (current_pt_regs()->r12) /* given a pointer to a task_struct, return the user's pt_regs */ # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 6c6f16e409a8..0d23c0049301 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h @@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return regs->r10 == -1 ? regs->r8:0; + return regs->r10 == -1 ? -regs->r8:0; } static inline long syscall_get_return_value(struct task_struct *task, diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 1a8df6669eee..18d6008b151f 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -41,7 +41,7 @@ obj-y += esi_stub.o # must be in kernel proper endif obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o -obj-$(CONFIG_BINFMT_ELF) += elfcore.o +obj-$(CONFIG_ELF_CORE) += elfcore.o # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 8b5b8e6bc9d9..dd5bfed52031 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ u32 cpu=dev->id; \ - return sprintf(buf, "%lx\n", name[cpu]); \ + return sprintf(buf, "%llx\n", name[cpu]); \ } #define store(name) \ @@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr, #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); - printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); - printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); - printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", + printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]); + printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]); + printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); @@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr, #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); - printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]); - printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); + printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]); + printk(KERN_DEBUG "resources=%llx\n", resources[cpu]); #endif return size; } @@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; - return sprintf(buf, "%lx\n", phys_addr[cpu]); + return sprintf(buf, "%llx\n", phys_addr[cpu]); } static ssize_t @@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG - printk("Virtual address %lx is not existing.\n",virt_addr); + printk("Virtual address %llx is not existing.\n", virt_addr); #endif return -EINVAL; } @@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev, { unsigned int cpu=dev->id; - return sprintf(buf, "%lx, %lx, %lx\n", + return sprintf(buf, "%llx, %llx, %llx\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); @@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev, int ret; #ifdef ERR_INJ_DEBUG - printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", + printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3, cpu); #endif - ret=sscanf(buf, "%lx, %lx, %lx", + ret = sscanf(buf, "%llx, %llx, %llx", &err_data_buffer[cpu].data1, &err_data_buffer[cpu].data2, &err_data_buffer[cpu].data3); diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index b8356edbde65..b3dc39050c1a 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -396,83 +396,9 @@ static void kretprobe_trampoline(void) { } -/* - * At this point the target function has been tricked into - * returning into our trampoline. Lookup the associated instance - * and then: - * - call the handler function - * - cleanup by marking the instance as unused - * - long jump back to the original return address - */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = - ((struct fnptr *)kretprobe_trampoline)->ip; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - regs->cr_iip = orig_ret_address; - - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler @@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->b0; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index bf2cb9294795..d96c68122eae 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1851,7 +1851,7 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = (void *)__get_free_pages(GFP_KERNEL, + data = (void *)__get_free_pages(GFP_ATOMIC, get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index bf9c24d9ce84..54e12b0ecebd 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -2147,27 +2147,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) { struct syscall_get_set_args *args = data; struct pt_regs *pt = args->regs; - unsigned long *krbs, cfm, ndirty; + unsigned long *krbs, cfm, ndirty, nlocals, nouts; int i, count; if (unw_unwind_to_user(info) < 0) return; + /* + * We get here via a few paths: + * - break instruction: cfm is shared with caller. + * syscall args are in out= regs, locals are non-empty. + * - epsinstruction: cfm is set by br.call + * locals don't exist. + * + * For both cases argguments are reachable in cfm.sof - cfm.sol. + * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ] + */ cfm = pt->cr_ifs; + nlocals = (cfm >> 7) & 0x7f; /* aka sol */ + nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */ krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); count = 0; if (in_syscall(pt)) - count = min_t(int, args->n, cfm & 0x7f); + count = min_t(int, args->n, nouts); + /* Iterate over outs. */ for (i = 0; i < count; i++) { + int j = ndirty + nlocals + i + args->i; if (args->rw) - *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = - args->args[i]; + *ia64_rse_skip_regs(krbs, j) = args->args[i]; else - args->args[i] = *ia64_rse_skip_regs(krbs, - ndirty + i + args->i); + args->args[i] = *ia64_rse_skip_regs(krbs, j); } if (!args->rw) { diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 4f33f6e7e206..41d243c0c626 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * called yet. Note that node 0 will also count all non-existent cpus. */ -static int __meminit early_nr_cpus_node(int node) +static int early_nr_cpus_node(int node) { int cpu, n = 0; @@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node) * compute_pernodesize - compute size of pernode data * @node: the node id. */ -static unsigned long __meminit compute_pernodesize(int node) +static unsigned long compute_pernodesize(int node) { unsigned long pernodesize = 0, cpus; @@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void) } } -static void __meminit scatter_node_data(void) +static void scatter_node_data(void) { pg_data_t **dst; int node; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index a6dd80a2c939..ee50506d86f4 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -518,7 +518,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), - MEMMAP_EARLY, NULL); + MEMINIT_EARLY, NULL); return 0; } @@ -527,8 +527,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { if (!vmem_map) { - memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, - NULL); + memmap_init_zone(size, nid, zone, start_pfn, + MEMINIT_EARLY, NULL); } else { struct page *start; struct memmap_init_callback_data args; diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index 5d9288384096..0415d28dbe4f 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -135,10 +135,10 @@ vmlinux.gz: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - gzip -9c vmlinux.tmp >vmlinux.gz + $(KGZIP) -9c vmlinux.tmp >vmlinux.gz rm vmlinux.tmp else - gzip -9c vmlinux >vmlinux.gz + $(KGZIP) -9c vmlinux >vmlinux.gz endif bzImage: vmlinux.bz2 @@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - bzip2 -1c vmlinux.tmp >vmlinux.bz2 + $(KBZIP2) -1c vmlinux.tmp >vmlinux.bz2 rm vmlinux.tmp else - bzip2 -1c vmlinux >vmlinux.bz2 + $(KBZIP2) -1c vmlinux >vmlinux.bz2 endif archclean: diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c index 62b0eb6cf69a..84eab0f5e00a 100644 --- a/arch/m68k/coldfire/pci.c +++ b/arch/m68k/coldfire/pci.c @@ -216,8 +216,10 @@ static int __init mcf_pci_init(void) /* Keep a virtual mapping to IO/config space active */ iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE); - if (iospace == 0) + if (iospace == 0) { + pci_free_host_bridge(bridge); return -ENODEV; + } pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n", (u32) iospace); diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 9a33c1c006a1..cf8103fa2f34 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -334,7 +334,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 7fdbc797a05d..5636288a4b45 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -319,7 +319,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index f1763405a539..015a7f401ffd 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -334,7 +334,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 91154d6acb31..1209430e61e1 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -316,7 +316,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index c398c4a94d95..a41b16067f5c 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -318,7 +318,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 350d004559be..8af104a8c000 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -325,7 +325,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index b838dd820348..354ff30e22c9 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -358,7 +358,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 3f8dd61559cf..eac7685cea42 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -315,7 +315,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index ae3b2d4f636c..0f38c4a3c87a 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -316,7 +316,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index cd61ef14b582..6ede6869db1c 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -324,7 +324,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 151f5371cd3d..8644c4789938 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -313,7 +313,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 1dcb0ee1fe98..f2fd0da2346e 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -313,7 +313,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h index 9138a624c5c8..692f90e7fecc 100644 --- a/arch/m68k/include/asm/m53xxacr.h +++ b/arch/m68k/include/asm/m53xxacr.h @@ -89,9 +89,9 @@ * coherency though in all cases. And for copyback caches we will need * to push cached data as well. */ -#define CACHE_INIT CACR_CINVA -#define CACHE_INVALIDATE CACR_CINVA -#define CACHE_INVALIDATED CACR_CINVA +#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC) +#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA) +#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA) #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ (0x000f0000) + \ diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index de1470c4d829..1149251ea58d 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping; struct irq_desc; +extern void via_l2_flush(int writeback); extern void via_register_interrupts(void); extern void via_irq_enable(int); extern void via_irq_disable(int); diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h index 257b29184af9..e28eb1c0e0bf 100644 --- a/arch/m68k/include/asm/mvme147hw.h +++ b/arch/m68k/include/asm/mvme147hw.h @@ -66,6 +66,9 @@ struct pcc_regs { #define PCC_INT_ENAB 0x08 #define PCC_TIMER_INT_CLR 0x80 + +#define PCC_TIMER_TIC_EN 0x01 +#define PCC_TIMER_COC_EN 0x02 #define PCC_TIMER_CLR_OVF 0x04 #define PCC_LEVEL_ABORT 0x07 diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 3c5def10d486..caa260f877f2 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -139,7 +139,8 @@ void __init setup_arch(char **cmdline_p) pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", __bss_stop, memory_start, memory_start, memory_end); - memblock_add(memory_start, memory_end - memory_start); + memblock_add(_rambase, memory_end - _rambase); + memblock_reserve(_rambase, memory_start - _rambase); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 611f73bfc87c..d0126ab01360 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -59,7 +59,6 @@ extern void iop_preinit(void); extern void iop_init(void); extern void via_init(void); extern void via_init_clock(irq_handler_t func); -extern void via_flush_cache(void); extern void oss_init(void); extern void psc_init(void); extern void baboon_init(void); @@ -130,21 +129,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record) return unknown; } -/* - * Flip into 24bit mode for an instant - flushes the L2 cache card. We - * have to disable interrupts for this. Our IRQ handlers will crap - * themselves if they take an IRQ in 24bit mode! - */ - -static void mac_cache_card_flush(int writeback) -{ - unsigned long flags; - - local_irq_save(flags); - via_flush_cache(); - local_irq_restore(flags); -} - void __init config_mac(void) { if (!MACH_IS_MAC) @@ -175,9 +159,8 @@ void __init config_mac(void) * not. */ - if (macintosh_config->ident == MAC_MODEL_IICI - || macintosh_config->ident == MAC_MODEL_IIFX) - mach_l2_flush = mac_cache_card_flush; + if (macintosh_config->ident == MAC_MODEL_IICI) + mach_l2_flush = via_l2_flush; } diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 9bfa17015768..c432bfafe63e 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 static __inline__ void iop_stop(volatile struct mac_iop *iop) { - iop->status_ctrl &= ~IOP_RUN; + iop->status_ctrl = IOP_AUTOINC; } static __inline__ void iop_start(volatile struct mac_iop *iop) @@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop) iop->status_ctrl = IOP_RUN | IOP_AUTOINC; } -static __inline__ void iop_bypass(volatile struct mac_iop *iop) -{ - iop->status_ctrl |= IOP_BYPASS; -} - static __inline__ void iop_interrupt(volatile struct mac_iop *iop) { - iop->status_ctrl |= IOP_IRQ; + iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC; } static int iop_alive(volatile struct mac_iop *iop) @@ -244,7 +239,6 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_SCC]->status_ctrl = 0x87; iop_scc_present = 1; } else { iop_base[IOP_NUM_SCC] = NULL; @@ -256,7 +250,7 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_ISM]->status_ctrl = 0; + iop_stop(iop_base[IOP_NUM_ISM]); iop_ism_present = 1; } else { iop_base[IOP_NUM_ISM] = NULL; @@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan) msg->status = IOP_MSGSTATUS_UNUSED; msg = msg->next; iop_send_queue[iop_num][chan] = msg; - if (msg) iop_do_send(msg); + if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) + iop_do_send(msg); } /* @@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata, if (!(q = iop_send_queue[iop_num][chan])) { iop_send_queue[iop_num][chan] = msg; + iop_do_send(msg); } else { while (q->next) q = q->next; q->next = msg; } - if (iop_readb(iop_base[iop_num], - IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) { - iop_do_send(msg); - } - return 0; } diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 3c2cfcb74982..1f0fad2a98a0 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -294,10 +294,14 @@ void via_debug_dump(void) * the system into 24-bit mode for an instant. */ -void via_flush_cache(void) +void via_l2_flush(int writeback) { + unsigned long flags; + + local_irq_save(flags); via2[gBufB] &= ~VIA2B_vMode32; via2[gBufB] |= VIA2B_vMode32; + local_irq_restore(flags); } /* diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 6cb1e41d58d0..70a5f55ea664 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -164,7 +164,7 @@ void __init cf_bootmem_alloc(void) m68k_memory[0].addr = _rambase; m68k_memory[0].size = _ramend - _rambase; - memblock_add(m68k_memory[0].addr, m68k_memory[0].size); + memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0); /* compute total pages in system */ num_pages = PFN_DOWN(_ramend - _rambase); diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 545a1fe0e119..245376630c3d 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -117,8 +117,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id) unsigned long flags; local_irq_save(flags); - m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; - m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF; + m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN | + PCC_TIMER_TIC_EN; + m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR | + PCC_LEVEL_TIMER1; clk_total += PCC_TIMER_CYCLES; timer_routine(0, NULL); local_irq_restore(flags); @@ -136,10 +138,10 @@ void mvme147_sched_init (irq_handler_t timer_routine) /* Init the clock with a value */ /* The clock counter increments until 0xFFFF then reloads */ m147_pcc->t1_preload = PCC_TIMER_PRELOAD; - m147_pcc->t1_cntrl = 0x0; /* clear timer */ - m147_pcc->t1_cntrl = 0x3; /* start timer */ - m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */ - m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1; + m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN | + PCC_TIMER_TIC_EN; + m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR | + PCC_LEVEL_TIMER1; clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ); } diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 9bc2da69f80c..2f2c2d172b24 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -368,6 +368,7 @@ static u32 clk_total; #define PCCTOVR1_COC_EN 0x02 #define PCCTOVR1_OVR_CLR 0x04 +#define PCCTIC1_INT_LEVEL 6 #define PCCTIC1_INT_CLR 0x08 #define PCCTIC1_INT_EN 0x10 @@ -377,8 +378,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id) unsigned long flags; local_irq_save(flags); - out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR); - out_8(PCCTOVR1, PCCTOVR1_OVR_CLR); + out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN); + out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL); clk_total += PCC_TIMER_CYCLES; timer_routine(0, NULL); local_irq_restore(flags); @@ -392,14 +393,15 @@ void mvme16x_sched_init (irq_handler_t timer_routine) int irq; /* Using PCCchip2 or MC2 chip tick timer 1 */ - out_be32(PCCTCNT1, 0); - out_be32(PCCTCMP1, PCC_TIMER_CYCLES); - out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN); - out_8(PCCTIC1, PCCTIC1_INT_EN | 6); if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer", timer_routine)) panic ("Couldn't register timer int"); + out_be32(PCCTCNT1, 0); + out_be32(PCCTCMP1, PCC_TIMER_CYCLES); + out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN); + out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL); + clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ); if (brdno == 0x0162 || brdno == 0x172) diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index e63eb5f06999..f31890078197 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -264,6 +264,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll) { int tmp = Q40_RTC_CTRL; + pll->pll_ctrl = 0; pll->pll_value = tmp & Q40_RTC_PLL_MASK; if (tmp & Q40_RTC_PLL_SIGN) pll->pll_value = -pll->pll_value; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e5c2d47608fe..6ecdc690f733 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -862,6 +862,7 @@ config SNI_RM select I8253 select I8259 select ISA + select MIPS_L1_CACHE_SHIFT_6 select SWAP_IO_SPACE if CPU_BIG_ENDIAN select SYS_HAS_CPU_R4X00 select SYS_HAS_CPU_R5000 diff --git a/arch/mips/Makefile b/arch/mips/Makefile index cdc09b71febe..5403a91ce098 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -285,12 +285,23 @@ ifdef CONFIG_64BIT endif endif +# When linking a 32-bit executable the LLVM linker cannot cope with a +# 32-bit load address that has been sign-extended to 64 bits. Simply +# remove the upper 32 bits then, as it is safe to do so with other +# linkers. +ifdef CONFIG_64BIT + load-ld = $(load-y) +else + load-ld = $(subst 0xffffffff,0x,$(load-y)) +endif + KBUILD_AFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y) -KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) +KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld) KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ + LINKER_LOAD_ADDRESS=$(load-ld) \ VMLINUX_ENTRY_ADDRESS=$(entry-y) \ PLATFORM="$(platform-y)" \ ITS_INPUTS="$(its-y)" diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index a95a894aceaf..f0c830337104 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -152,6 +152,7 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, { struct clk_init_data id; struct clk_hw *h; + struct clk *clk; h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) @@ -164,7 +165,13 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, id.ops = &alchemy_clkops_cpu; h->init = &id; - return clk_register(NULL, h); + clk = clk_register(NULL, h); + if (IS_ERR(clk)) { + pr_err("failed to register clock\n"); + kfree(h); + } + + return clk; } /* AUXPLLs ************************************************************/ diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig index 6889f74e06f5..490bb6da74b7 100644 --- a/arch/mips/bcm47xx/Kconfig +++ b/arch/mips/bcm47xx/Kconfig @@ -27,6 +27,7 @@ config BCM47XX_BCMA select BCMA select BCMA_HOST_SOC select BCMA_DRIVER_MIPS + select BCMA_DRIVER_PCI if PCI select BCMA_DRIVER_PCI_HOSTMODE if PCI select BCMA_DRIVER_GPIO default y diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index d859f079b771..378cbfb31ee7 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -90,7 +90,7 @@ ifneq ($(zload-y),) VMLINUZ_LOAD_ADDRESS := $(zload-y) else VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ - $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) + $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS)) endif UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS) diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index 88f5d637b1c4..a52e929381ea 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c @@ -13,6 +13,7 @@ #include #include +#include /* * These two variables specify the free mem region @@ -113,7 +114,7 @@ void decompress_kernel(unsigned long boot_heap_start) dtb_size = fdt_totalsize((void *)&__appended_dtb); /* last four bytes is always image size in little endian */ - image_size = le32_to_cpup((void *)&__image_end - 4); + image_size = get_unaligned_le32((void *)&__image_end - 4); /* copy dtb to where the booted kernel will expect it */ memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size, diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi index 69cbef472377..d4b2b430dad0 100644 --- a/arch/mips/boot/dts/brcm/bcm3368.dtsi +++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi @@ -59,7 +59,7 @@ periph_cntl: syscon@fff8c008 { compatible = "syscon"; - reg = <0xfff8c000 0x4>; + reg = <0xfff8c008 0x4>; native-endian; }; diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi index beec24145af7..30fdd38aef6d 100644 --- a/arch/mips/boot/dts/brcm/bcm63268.dtsi +++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi @@ -59,7 +59,7 @@ periph_cntl: syscon@10000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; }; diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi index f21176cac038..89a3107cad28 100644 --- a/arch/mips/boot/dts/brcm/bcm6358.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi @@ -59,7 +59,7 @@ periph_cntl: syscon@fffe0008 { compatible = "syscon"; - reg = <0xfffe0000 0x4>; + reg = <0xfffe0008 0x4>; native-endian; }; diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi index 8ae6981735b8..e48946a51242 100644 --- a/arch/mips/boot/dts/brcm/bcm6362.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi @@ -59,7 +59,7 @@ periph_cntl: syscon@10000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; }; diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi index 449c167dd892..b84a3bfe8c51 100644 --- a/arch/mips/boot/dts/brcm/bcm6368.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi @@ -59,7 +59,7 @@ periph_cntl: syscon@100000008 { compatible = "syscon"; - reg = <0x10000000 0xc>; + reg = <0x10000008 0x4>; native-endian; }; diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts index 7a371d9c5a33..eda37fb516f0 100644 --- a/arch/mips/boot/dts/ingenic/qi_lb60.dts +++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts @@ -69,7 +69,7 @@ "Speaker", "OUTL", "Speaker", "OUTR", "INL", "LOUT", - "INL", "ROUT"; + "INR", "ROUT"; simple-audio-card,aux-devs = <&>; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index f97be32bf699..3ad1f76c063a 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d, } cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) + return -ENOMEM; + cd->host_data = host_data; cd->bit = hw; diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index cc88a08bc1f7..4017398519cf 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -518,6 +518,7 @@ static int __init dwc3_octeon_device_init(void) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { + put_device(&pdev->dev); dev_err(&pdev->dev, "No memory resources\n"); return -ENXIO; } @@ -529,8 +530,10 @@ static int __init dwc3_octeon_device_init(void) * know the difference. */ base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) + if (IS_ERR(base)) { + put_device(&pdev->dev); return PTR_ERR(base); + } mutex_lock(&dwc3_octeon_clocks_mutex); dwc3_octeon_clocks_start(&pdev->dev, (u64)base); diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index f14ad0538f4e..eea9b613bb74 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -112,7 +112,6 @@ CONFIG_BLK_DEV_TC86C001=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m CONFIG_ATA=y diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index 7a7af706e898..c5f66b7f2b22 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -99,7 +99,6 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y # CONFIG_SCSI_LOWLEVEL is not set diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 82d942a6026e..638d7cf5ef01 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -99,7 +99,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_CONSTANTS=y diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig index 370884018aad..7b1fab518317 100644 --- a/arch/mips/configs/ip32_defconfig +++ b/arch/mips/configs/ip32_defconfig @@ -50,7 +50,6 @@ CONFIG_RAID_ATTRS=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 328d4dfeb4cb..982b990469af 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig @@ -191,7 +191,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 90ee0084d786..e41f4841cb4d 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -231,7 +231,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=m CONFIG_DRM=y -CONFIG_DRM_RADEON=y +CONFIG_DRM_RADEON=m CONFIG_FB_RADEON=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 59eedf55419d..211bd3d6e6cb 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -239,7 +239,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 8ef612552a19..62b1969b4f55 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -247,7 +247,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index d2a008c9907c..9185e0a0aa45 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@ -245,7 +245,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig index 970df6d42728..636311d67a53 100644 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@ -245,7 +245,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 2c7adea7638f..30d7c3db884e 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -203,7 +203,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index 61a0bf13e308..1fc8dffa8d1d 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -6,7 +6,7 @@ * for more details. * * Copyright (C) 1998 Harald Koerfgen - * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki + * Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020 Maciej W. Rozycki */ #include #include @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ #include #include +#include #include #include #include @@ -29,7 +31,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -166,6 +170,9 @@ void __init plat_mem_setup(void) ioport_resource.start = ~0UL; ioport_resource.end = 0UL; + + /* Stay away from the firmware working memory area for now. */ + memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET); } /* diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index c23527ba65d0..64bffc1f75e0 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -20,10 +20,27 @@ #include #include +#ifndef __VDSO__ +/* + * Emit CFI data in .debug_frame sections, not .eh_frame sections. + * We don't do DWARF unwinding at runtime, so only the offline DWARF + * information is useful to anyone. Note we should change this if we + * ever decide to enable DWARF unwinding at runtime. + */ +#define CFI_SECTIONS .cfi_sections .debug_frame +#else + /* + * For the vDSO, emit both runtime unwind information and debug + * symbols for the .dbg file. + */ +#define CFI_SECTIONS +#endif + /* * LEAF - declare leaf routine */ #define LEAF(symbol) \ + CFI_SECTIONS; \ .globl symbol; \ .align 2; \ .type symbol, @function; \ @@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \ * NESTED - declare nested routine entry point */ #define NESTED(symbol, framesize, rpc) \ + CFI_SECTIONS; \ .globl symbol; \ .align 2; \ .type symbol, @function; \ diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 983a6a7f43a1..3e26b0c7391b 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -288,10 +288,12 @@ # define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6) #endif #ifndef cpu_has_mips64r1 -# define cpu_has_mips64r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1) +# define cpu_has_mips64r1 (cpu_has_64bits && \ + __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1)) #endif #ifndef cpu_has_mips64r2 -# define cpu_has_mips64r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2) +# define cpu_has_mips64r2 (cpu_has_64bits && \ + __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2)) #endif #ifndef cpu_has_mips64r6 # define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6) diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index 7bbb66760a07..1809c408736b 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_34K: case CPU_1004K: case CPU_74K: + case CPU_1074K: case CPU_M14KC: case CPU_M14KEC: case CPU_INTERAPTIV: diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 41204a49cf95..356c61074d13 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -274,8 +274,12 @@ enum emulation_result { #define MIPS3_PG_SHIFT 6 #define MIPS3_PG_FRAME 0x3fffffc0 +#if defined(CONFIG_64BIT) +#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) +#else #define VPN2_MASK 0xffffe000 -#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID +#endif +#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) @@ -935,7 +939,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index bdbdc19a2b8f..3afdb39d092a 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -750,7 +750,7 @@ /* MAAR bit definitions */ #define MIPS_MAAR_VH (_U64CAST_(1) << 63) -#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) +#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12) #define MIPS_MAAR_ADDR_SHIFT 12 #define MIPS_MAAR_S (_ULCAST_(1) << 1) #define MIPS_MAAR_VL (_ULCAST_(1) << 0) diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index ba967148b016..2604fab8a92d 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -155,6 +155,7 @@ static inline void pmd_clear(pmd_t *pmdp) #if defined(CONFIG_XPA) +#define MAX_POSSIBLE_PHYSMEM_BITS 40 #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) @@ -170,6 +171,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#define MAX_POSSIBLE_PHYSMEM_BITS 36 #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) @@ -184,6 +186,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) #else +#define MAX_POSSIBLE_PHYSMEM_BITS 32 #ifdef CONFIG_CPU_VR41XX #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h index 29030cb398ee..1de3bbce8e88 100644 --- a/arch/mips/include/asm/string.h +++ b/arch/mips/include/asm/string.h @@ -10,127 +10,6 @@ #ifndef _ASM_STRING_H #define _ASM_STRING_H - -/* - * Most of the inline functions are rather naive implementations so I just - * didn't bother updating them for 64-bit ... - */ -#ifdef CONFIG_32BIT - -#ifndef IN_STRING_C - -#define __HAVE_ARCH_STRCPY -static __inline__ char *strcpy(char *__dest, __const__ char *__src) -{ - char *__xdest = __dest; - - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n" - "1:\tlbu\t$1,(%1)\n\t" - "addiu\t%1,1\n\t" - "sb\t$1,(%0)\n\t" - "bnez\t$1,1b\n\t" - "addiu\t%0,1\n\t" - ".set\tat\n\t" - ".set\treorder" - : "=r" (__dest), "=r" (__src) - : "0" (__dest), "1" (__src) - : "memory"); - - return __xdest; -} - -#define __HAVE_ARCH_STRNCPY -static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n) -{ - char *__xdest = __dest; - - if (__n == 0) - return __xdest; - - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n" - "1:\tlbu\t$1,(%1)\n\t" - "subu\t%2,1\n\t" - "sb\t$1,(%0)\n\t" - "beqz\t$1,2f\n\t" - "addiu\t%0,1\n\t" - "bnez\t%2,1b\n\t" - "addiu\t%1,1\n" - "2:\n\t" - ".set\tat\n\t" - ".set\treorder" - : "=r" (__dest), "=r" (__src), "=r" (__n) - : "0" (__dest), "1" (__src), "2" (__n) - : "memory"); - - return __xdest; -} - -#define __HAVE_ARCH_STRCMP -static __inline__ int strcmp(__const__ char *__cs, __const__ char *__ct) -{ - int __res; - - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n\t" - "lbu\t%2,(%0)\n" - "1:\tlbu\t$1,(%1)\n\t" - "addiu\t%0,1\n\t" - "bne\t$1,%2,2f\n\t" - "addiu\t%1,1\n\t" - "bnez\t%2,1b\n\t" - "lbu\t%2,(%0)\n\t" -#if defined(CONFIG_CPU_R3000) - "nop\n\t" -#endif - "move\t%2,$1\n" - "2:\tsubu\t%2,$1\n" - "3:\t.set\tat\n\t" - ".set\treorder" - : "=r" (__cs), "=r" (__ct), "=r" (__res) - : "0" (__cs), "1" (__ct)); - - return __res; -} - -#endif /* !defined(IN_STRING_C) */ - -#define __HAVE_ARCH_STRNCMP -static __inline__ int -strncmp(__const__ char *__cs, __const__ char *__ct, size_t __count) -{ - int __res; - - __asm__ __volatile__( - ".set\tnoreorder\n\t" - ".set\tnoat\n" - "1:\tlbu\t%3,(%0)\n\t" - "beqz\t%2,2f\n\t" - "lbu\t$1,(%1)\n\t" - "subu\t%2,1\n\t" - "bne\t$1,%3,3f\n\t" - "addiu\t%0,1\n\t" - "bnez\t%3,1b\n\t" - "addiu\t%1,1\n" - "2:\n\t" -#if defined(CONFIG_CPU_R3000) - "nop\n\t" -#endif - "move\t%3,$1\n" - "3:\tsubu\t%3,$1\n\t" - ".set\tat\n\t" - ".set\treorder" - : "=r" (__cs), "=r" (__ct), "=r" (__count), "=r" (__res) - : "0" (__cs), "1" (__ct), "2" (__count)); - - return __res; -} -#endif /* CONFIG_32BIT */ - #define __HAVE_ARCH_MEMSET extern void *memset(void *__s, int __c, size_t __count); diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index 0ae9b4cbc153..83f1cbbbf217 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -26,6 +26,12 @@ #define __VDSO_USE_SYSCALL ULLONG_MAX +#if MIPS_ISA_REV < 6 +#define VDSO_SYSCALL_CLOBBERS "hi", "lo", +#else +#define VDSO_SYSCALL_CLOBBERS +#endif + static __always_inline long gettimeofday_fallback( struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -41,7 +47,9 @@ static __always_inline long gettimeofday_fallback( : "=r" (ret), "=r" (error) : "r" (tv), "r" (tz), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -65,7 +73,9 @@ static __always_inline long clock_gettime_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -89,7 +99,9 @@ static __always_inline int clock_getres_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -113,7 +125,9 @@ static __always_inline long clock_gettime32_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } @@ -133,7 +147,9 @@ static __always_inline int clock_getres32_fallback( : "=r" (ret), "=r" (error) : "r" (clkid), "r" (ts), "r" (nr) : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + "$14", "$15", "$24", "$25", + VDSO_SYSCALL_CLOBBERS + "memory"); return error ? -ret : ret; } diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index d0a9ed2ca2d6..ff3ab771e769 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -135,6 +135,8 @@ #define SO_DETACH_REUSEPORT_BPF 68 +#define SO_NETNS_COOKIE 71 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index efde27c99414..9c5f8a5d097f 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -474,20 +474,20 @@ NESTED(nmi_handler, PT_SIZE, sp) .endm .macro __build_clear_fpe + CLI + TRACE_IRQS_OFF .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ .set mips1 SET_HARDFLOAT cfc1 a1, fcr31 .set pop - CLI - TRACE_IRQS_OFF .endm .macro __build_clear_msa_fpe - _cfcmsa a1, MSA_CSR CLI TRACE_IRQS_OFF + _cfcmsa a1, MSA_CSR .endm .macro __build_clear_ade diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index e5ea3db23d6b..a9eab83d9148 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -119,9 +119,9 @@ static char *cm2_causes[32] = { "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", "0x08", "0x09", "0x0a", "0x0b", "0x0c", "0x0d", "0x0e", "0x0f", - "0x10", "0x11", "0x12", "0x13", - "0x14", "0x15", "0x16", "INTVN_WR_ERR", - "INTVN_RD_ERR", "0x19", "0x1a", "0x1b", + "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", + "0x14", "0x15", "0x16", "0x17", + "0x18", "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f" }; diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index 3d80a51256de..dab8febb5741 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset) static inline __init unsigned long rotate_xor(unsigned long hash, const void *area, size_t size) { - size_t i; - unsigned long *ptr = (unsigned long *)area; + const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash)); + size_t diff, i; + + diff = (void *)ptr - area; + if (unlikely(size < diff + sizeof(hash))) + return hash; + + size = ALIGN_DOWN(size - diff, sizeof(hash)); for (i = 0; i < size / sizeof(hash); i++) { /* Rotate by odd number of bits and XOR. */ diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 5eec13b8d222..82e44b31aad5 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -494,7 +494,7 @@ static void __init mips_parse_crashkernel(void) if (ret != 0 || crash_size <= 0) return; - if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) { + if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) { pr_warn("Invalid memory region reserved for crash kernel\n"); return; } @@ -529,8 +529,8 @@ static void __init request_crashkernel(struct resource *res) static void __init check_kernel_sections_mem(void) { - phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text))); - phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start; + phys_addr_t start = __pa_symbol(&_text); + phys_addr_t size = __pa_symbol(&_end) - start; if (!memblock_is_region_memory(start, size)) { pr_info("Kernel sections are not in the memory maps\n"); @@ -653,7 +653,17 @@ static void __init arch_mem_init(char **cmdline_p) crashk_res.end - crashk_res.start + 1); #endif device_tree_init(); + + /* + * In order to reduce the possibility of kernel panic when failed to + * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate + * low memory as small as possible before plat_swiotlb_setup(), so + * make sparse_init() using top-down allocation. + */ + memblock_set_bottom_up(false); sparse_init(); + memblock_set_bottom_up(true); + plat_swiotlb_setup(); dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 712c15de6ab9..6b304acf506f 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -241,6 +241,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { + bmips_cpu_setup(); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 37e9413a393d..caa01457dce6 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -18,12 +18,82 @@ #include #include #include +#include +#include #include #include #include #include +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); +static unsigned long glb_lpj_ref; +static unsigned long glb_lpj_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpumask *cpus = freq->policy->cpus; + unsigned long lpj; + int cpu; + + /* + * Skip lpj numbers adjustment if the CPU-freq transition is safe for + * the loops delay. (Is this possible?) + */ + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + /* Save the initial values of the lpjes for future scaling. */ + if (!glb_lpj_ref) { + glb_lpj_ref = boot_cpu_data.udelay_val; + glb_lpj_ref_freq = freq->old; + + for_each_online_cpu(cpu) { + per_cpu(pcp_lpj_ref, cpu) = + cpu_data[cpu].udelay_val; + per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; + } + } + + /* + * Adjust global lpj variable and per-CPU udelay_val number in + * accordance with the new CPU frequency. + */ + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + loops_per_jiffy = cpufreq_scale(glb_lpj_ref, + glb_lpj_ref_freq, + freq->new); + + for_each_cpu(cpu, cpus) { + lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), + per_cpu(pcp_lpj_ref_freq, cpu), + freq->new); + cpu_data[cpu].udelay_val = (unsigned int)lpj; + } + } + + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + return cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif /* CONFIG_CPU_FREQ */ + /* * forward reference */ diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c index cd3e1f82e1a5..08ad6371fbe0 100644 --- a/arch/mips/kernel/topology.c +++ b/arch/mips/kernel/topology.c @@ -20,7 +20,7 @@ static int __init topology_init(void) for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); - c->hotpluggable = 1; + c->hotpluggable = !!i; ret = register_cpu(c, i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 342e41de9d64..8282d0feb0b2 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1240,6 +1240,18 @@ static int enable_restore_fp_context(int msa) err = own_fpu_inatomic(1); if (msa && !err) { enable_msa(); + /* + * with MSA enabled, userspace can see MSACSR + * and MSA regs, but the values in them are from + * other task before current task, restore them + * from saved fp/msa context + */ + write_msa_csr(current->thread.fpu.msacsr); + /* + * own_fpu_inatomic(1) just restore low 64bit, + * fix the high 64bit + */ + init_msa_upper(); set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_MSA_CTX_LIVE); } @@ -2126,6 +2138,7 @@ static void configure_status(void) change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); + back_to_back_c0_hazard(); } unsigned int hwrena; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 33ee0d18fb0a..faf98f209b3f 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS /* . = 0xa800000000300000; */ . = 0xffffffff80300000; #endif - . = VMLINUX_LOAD_ADDRESS; + . = LINKER_LOAD_ADDRESS; /* read-only */ _text = .; /* Text and read-only data */ .text : { @@ -93,6 +93,7 @@ SECTIONS INIT_TASK_DATA(THREAD_SIZE) NOSAVE_DATA + PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA @@ -225,6 +226,5 @@ SECTIONS *(.options) *(.pdr) *(.reginfo) - *(.eh_frame) } } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 1109924560d8..b22a3565e133 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -131,6 +131,8 @@ int kvm_arch_check_processor_compat(void) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { switch (type) { + case KVM_VM_MIPS_AUTO: + break; #ifdef CONFIG_KVM_MIPS_VZ case KVM_VM_MIPS_VZ: #else diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 97e538a8c1be..97f63a84aa51 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return 1; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 115b417dfb8e..9fcc118312cb 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -302,7 +302,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc) generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); /* if this is a EBU irq, we need to ack it or get a deadlock */ - if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) + if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); } diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 156a95ac5c72..2ee68d6e8bb9 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -514,8 +514,8 @@ void __init ltq_soc_init(void) clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); @@ -538,8 +538,8 @@ void __init ltq_soc_init(void) PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | PMU_PPE_QSB | PMU_PPE_TOP); - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c index 09d5deea747f..f80a67c092b6 100644 --- a/arch/mips/lib/uncached.c +++ b/arch/mips/lib/uncached.c @@ -37,10 +37,12 @@ */ unsigned long run_uncached(void *func) { - register long sp __asm__("$sp"); register long ret __asm__("$2"); long lfunc = (long)func, ufunc; long usp; + long sp; + + __asm__("move %0, $sp" : "=r" (sp)); if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) usp = CKSEG1ADDR(sp); diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 89b9c851d822..3375bbe63284 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1560,7 +1560,7 @@ static int probe_scache(void) return 1; } -static void __init loongson2_sc_init(void) +static void loongson2_sc_init(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -1576,7 +1576,7 @@ static void __init loongson2_sc_init(void) c->options |= MIPS_CPU_INCLUSIVE_CACHES; } -static void __init loongson3_sc_init(void) +static void loongson3_sc_init(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config2, lsize; @@ -1676,7 +1676,11 @@ static void setup_scache(void) printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); + + if (current_cpu_type() == CPU_BMIPS5000) + c->options |= MIPS_CPU_INCLUSIVE_CACHES; } + #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index dbdbfe5d8408..e67374268b42 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -147,7 +147,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) return 1; } -static int __init mips_sc_probe_cm3(void) +static int mips_sc_probe_cm3(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned long cfg = read_gcr_l2_config(); @@ -181,7 +181,7 @@ static int __init mips_sc_probe_cm3(void) return 0; } -static inline int __init mips_sc_probe(void) +static inline int mips_sc_probe(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config1, config2; diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index c13e46ced425..60046445122b 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -437,6 +437,7 @@ int has_transparent_hugepage(void) } return mask == PM_HUGE_MASK; } +EXPORT_SYMBOL(has_transparent_hugepage); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 41bb91f05688..547d813ead48 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1480,6 +1480,7 @@ static void build_r4000_tlb_refill_handler(void) static void setup_pw(void) { + unsigned int pwctl; unsigned long pgd_i, pgd_w; #ifndef __PAGETABLE_PMD_FOLDED unsigned long pmd_i, pmd_w; @@ -1506,6 +1507,7 @@ static void setup_pw(void) pte_i = ilog2(_PAGE_GLOBAL); pte_w = 0; + pwctl = 1 << 30; /* Set PWDirExt */ #ifndef __PAGETABLE_PMD_FOLDED write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i); @@ -1516,8 +1518,9 @@ static void setup_pw(void) #endif #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT - write_c0_pwctl(1 << 6 | psn); + pwctl |= (1 << 6 | psn); #endif + write_c0_pwctl(pwctl); write_c0_kpgd((long)swapper_pg_dir); kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */ } diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 561154cbcc40..b31b91e57c34 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -1355,6 +1355,9 @@ jeq_common: } break; + case BPF_ST | BPF_NOSPEC: /* speculation barrier */ + break; + case BPF_ST | BPF_B | BPF_MEM: case BPF_ST | BPF_H | BPF_MEM: case BPF_ST | BPF_W | BPF_MEM: diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index 39052de915f3..3a909194284a 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) res = hose->mem_resource; break; } - if (res != NULL) - of_pci_range_to_resource(&range, node, res); + if (res != NULL) { + res->name = node->full_name; + res->flags = range.flags; + res->start = range.cpu_addr; + res->end = range.cpu_addr + range.size - 1; + res->parent = res->child = res->sibling = NULL; + } } } diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index d36061603752..e032932348d6 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -30,6 +30,7 @@ #define RALINK_GPIOMODE 0x60 #define PPLL_CFG1 0x9c +#define PPLL_LD BIT(23) #define PPLL_DRV 0xa0 #define PDRV_SW_SET BIT(31) @@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev) rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); mdelay(100); - if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) { - dev_err(&pdev->dev, "MT7620 PPLL unlock\n"); + if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) { + dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n"); reset_control_assert(rstpcie0); rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); return -1; diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index c9f4d4ba058a..2ceda2a040ed 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { - u16 cmd; int irq = -1; if (dev->bus->number != 0) @@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) switch (PCI_SLOT(dev->devfn)) { case 0x00: - rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); - (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); break; case 0x11: irq = RT288X_CPU_IRQ_PCI; @@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) break; } - pci_write_config_byte((struct pci_dev *) dev, - PCI_CACHE_LINE_SIZE, 0x14); - pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF); - pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd); - cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | - PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK | - PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY; - pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd); - pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE, - dev->irq); return irq; } @@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev) int pcibios_plat_dev_init(struct pci_dev *dev) { + static bool slot0_init; + + /* + * Nobody seems to initialize slot 0, but this platform requires it, so + * do it once when some other slot is being enabled. The PCI subsystem + * should configure other slots properly, so no need to do anything + * special for those. + */ + if (!slot0_init && dev->bus->number == 0) { + u16 cmd; + u32 bar0; + + slot0_init = true; + + pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + 0x08000000); + pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, + &bar0); + + pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd); + } + return 0; } diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index 30017d5945bc..adc9f83b2c44 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -284,7 +284,7 @@ static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask, ret = irq_chip_set_affinity_parent(d, mask, force); if (ret >= 0) { cpu = cpumask_first_and(mask, cpu_online_mask); - data->nnasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + data->nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); bridge_write(data->bc, b_int_addr[pin].addr, (((data->bc->intr_addr >> 30) & 0x30000) | bit | (data->nasid << 8))); @@ -444,9 +444,10 @@ static int bridge_probe(struct platform_device *pdev) return -ENOMEM; domain = irq_domain_create_hierarchy(parent, 0, 8, fn, &bridge_domain_ops, NULL); - irq_domain_free_fwnode(fn); - if (!domain) + if (!domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } pci_set_flags(PCI_PROBE_ONLY); @@ -538,6 +539,7 @@ err_free_resource: pci_free_resource_list(&host->windows); err_remove_domain: irq_domain_remove(domain); + irq_domain_free_fwnode(fn); return err; } @@ -545,8 +547,10 @@ static int bridge_remove(struct platform_device *pdev) { struct pci_bus *bus = platform_get_drvdata(pdev); struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + struct fwnode_handle *fn = bc->domain->fwnode; irq_domain_remove(bc->domain); + irq_domain_free_fwnode(fn); pci_lock_rescan_remove(); pci_stop_root_bus(bus); pci_remove_root_bus(bus); diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c index f9407e170476..c6af7047eb0d 100644 --- a/arch/mips/sni/a20r.c +++ b/arch/mips/sni/a20r.c @@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = { }, }; -static u32 a20r_ack_hwint(void) +/* + * Trigger chipset to update CPU's CAUSE IP field + */ +static u32 a20r_update_cause_ip(void) { u32 status = read_c0_status(); @@ -205,12 +208,14 @@ static void a20r_hwint(void) int irq; clear_c0_status(IE_IRQ0); - status = a20r_ack_hwint(); + status = a20r_update_cause_ip(); cause = read_c0_cause(); irq = ffs(((cause & status) >> 8) & 0xf8); if (likely(irq > 0)) do_IRQ(SNI_A20R_IRQ_BASE + irq - 1); + + a20r_update_cause_ip(); set_c0_status(IE_IRQ0); } diff --git a/arch/mips/tools/elf-entry.c b/arch/mips/tools/elf-entry.c index adde79ce7fc0..dbd14ff05b4c 100644 --- a/arch/mips/tools/elf-entry.c +++ b/arch/mips/tools/elf-entry.c @@ -51,11 +51,14 @@ int main(int argc, const char *argv[]) nread = fread(&hdr, 1, sizeof(hdr), file); if (nread != sizeof(hdr)) { perror("Unable to read input file"); + fclose(file); return EXIT_FAILURE; } - if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) + if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) { + fclose(file); die("Input is not an ELF\n"); + } switch (hdr.ehdr32.e_ident[EI_CLASS]) { case ELFCLASS32: @@ -67,6 +70,7 @@ int main(int argc, const char *argv[]) entry = be32toh(hdr.ehdr32.e_entry); break; default: + fclose(file); die("Invalid ELF encoding\n"); } @@ -83,14 +87,17 @@ int main(int argc, const char *argv[]) entry = be64toh(hdr.ehdr64.e_entry); break; default: + fclose(file); die("Invalid ELF encoding\n"); } break; default: + fclose(file); die("Invalid ELF class\n"); } printf("0x%016" PRIx64 "\n", entry); + fclose(file); return EXIT_SUCCESS; } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 996a934ece7d..d3cd9c4cadc2 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -16,12 +16,9 @@ ccflags-vdso := \ $(filter -march=%,$(KBUILD_CFLAGS)) \ $(filter -m%-float,$(KBUILD_CFLAGS)) \ $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \ + $(CLANG_FLAGS) \ -D__VDSO__ -ifdef CONFIG_CC_IS_CLANG -ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS)) -endif - # # The -fno-jump-tables flag only prevents the compiler from generating # jump tables but does not prevent the compiler from emitting absolute diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c index b66b6b1c4aeb..8f581a2c8578 100644 --- a/arch/mips/vdso/genvdso.c +++ b/arch/mips/vdso/genvdso.c @@ -122,6 +122,7 @@ static void *map_vdso(const char *path, size_t *_size) if (fstat(fd, &stat) != 0) { fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -130,6 +131,7 @@ static void *map_vdso(const char *path, size_t *_size) if (addr == MAP_FAILED) { fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -139,6 +141,7 @@ static void *map_vdso(const char *path, size_t *_size) if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) { fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name, path); + close(fd); return NULL; } @@ -150,6 +153,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF class\n", program_name, path); + close(fd); return NULL; } @@ -161,6 +165,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF data order\n", program_name, path); + close(fd); return NULL; } @@ -168,15 +173,18 @@ static void *map_vdso(const char *path, size_t *_size) fprintf(stderr, "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n", program_name, path); + close(fd); return NULL; } else if (swap_uint16(ehdr->e_type) != ET_DYN) { fprintf(stderr, "%s: '%s' has invalid ELF type (expected ET_DYN)\n", program_name, path); + close(fd); return NULL; } *_size = stat.st_size; + close(fd); return addr; } @@ -280,10 +288,12 @@ int main(int argc, char **argv) /* Calculate and write symbol offsets to */ if (!get_symbols(dbg_vdso_path, dbg_vdso)) { unlink(out_path); + fclose(out_file); return EXIT_FAILURE; } fprintf(out_file, "};\n"); + fclose(out_file); return EXIT_SUCCESS; } diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index 254703653b6f..f34dc9bc6758 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c @@ -239,7 +239,7 @@ void flush_dcache_page(struct page *page) { struct address_space *mapping; - mapping = page_mapping(page); + mapping = page_mapping_file(page); if (mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); else { diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 17c24f14615f..6839f8fcf76b 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -164,19 +164,19 @@ struct __large_struct { #define __get_user_nocheck(x, ptr, size) \ ({ \ - long __gu_err, __gu_val; \ - __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + long __gu_err; \ + __get_user_size((x), (ptr), (size), __gu_err); \ __gu_err; \ }) #define __get_user_check(x, ptr, size) \ ({ \ - long __gu_err = -EFAULT, __gu_val = 0; \ - const __typeof__(*(ptr)) * __gu_addr = (ptr); \ - if (access_ok(__gu_addr, size)) \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + long __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + if (access_ok(__gu_addr, size)) \ + __get_user_size((x), __gu_addr, (size), __gu_err); \ + else \ + (x) = (__typeof__(*(ptr))) 0; \ __gu_err; \ }) @@ -190,11 +190,13 @@ do { \ case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ case 8: __get_user_asm2(x, ptr, retval); break; \ - default: (x) = __get_user_bad(); \ + default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, op) \ +{ \ + unsigned long __gu_tmp; \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ "2:\n" \ @@ -208,10 +210,14 @@ do { \ " .align 2\n" \ " .long 1b,3b\n" \ ".previous" \ - : "=r"(err), "=r"(x) \ - : "r"(addr), "i"(-EFAULT), "0"(err)) + : "=r"(err), "=r"(__gu_tmp) \ + : "r"(addr), "i"(-EFAULT), "0"(err)); \ + (x) = (__typeof__(*(addr)))__gu_tmp; \ +} #define __get_user_asm2(x, addr, err) \ +{ \ + unsigned long long __gu_tmp; \ __asm__ __volatile__( \ "1: l.lwz %1,0(%2)\n" \ "2: l.lwz %H1,4(%2)\n" \ @@ -228,8 +234,11 @@ do { \ " .long 1b,4b\n" \ " .long 2b,4b\n" \ ".previous" \ - : "=r"(err), "=&r"(x) \ - : "r"(addr), "i"(-EFAULT), "0"(err)) + : "=r"(err), "=&r"(__gu_tmp) \ + : "r"(addr), "i"(-EFAULT), "0"(err)); \ + (x) = (__typeof__(*(addr)))( \ + (__typeof__((x)-(x)))__gu_tmp); \ +} /* more complex routines */ diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index e4a78571f883..c6481cfc5220 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -1166,13 +1166,13 @@ ENTRY(__sys_clone) l.movhi r29,hi(sys_clone) l.ori r29,r29,lo(sys_clone) l.j _fork_save_extra_regs_and_call - l.addi r7,r1,0 + l.nop ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) l.ori r29,r29,lo(sys_fork) l.j _fork_save_extra_regs_and_call - l.addi r3,r1,0 + l.nop ENTRY(sys_rt_sigreturn) l.jal _sys_rt_sigreturn diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c index 43f140a28bc7..54d38809e22c 100644 --- a/arch/openrisc/kernel/stacktrace.c +++ b/arch/openrisc/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = NULL; + if (!try_get_task_stack(tsk)) + return; + if (tsk == current) sp = (unsigned long *) &sp; - else - sp = (unsigned long *) KSTK_ESP(tsk); + else { + unsigned long ksp; + + /* Locate stack from kernel context */ + ksp = task_thread_info(tsk)->ksp; + ksp += STACK_FRAME_OVERHEAD; /* redzone */ + ksp += sizeof(struct pt_regs); + + sp = (unsigned long *) ksp; + } unwind_stack(trace, sp, save_stack_address_nosched); + + put_task_stack(tsk); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index 08f56af387ac..534a52ec5e66 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -16,7 +16,7 @@ #include #include -static void cache_loop(struct page *page, const unsigned int reg) +static __always_inline void cache_loop(struct page *page, const unsigned int reg) { unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 36b834f1c933..53f974817aff 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -156,7 +156,7 @@ vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ else vmlinuz: vmlinux - @gzip -cf -9 $< > $@ + @$(KGZIP) -cf -9 $< > $@ endif install: diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d41763..6dd4171c9530 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i) _atomic_spin_unlock_irqrestore(v, flags); } +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + static __inline__ s64 atomic64_read(const atomic64_t *v) { diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index dbaaca84f27f..640d46edf32e 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -26,6 +26,67 @@ #define __smp_rmb() mb() #define __smp_wmb() mb() +#define __smp_store_release(p, v) \ +do { \ + typeof(p) __p = (p); \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("stb,ma %0,0(%1)" \ + : : "r"(*(__u8 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("sth,ma %0,0(%1)" \ + : : "r"(*(__u16 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("stw,ma %0,0(%1)" \ + : : "r"(*(__u32 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("std,ma %0,0(%1)" \ + : : "r"(*(__u64 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + } \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + typeof(p) __p = (p); \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("ldb,ma 0(%1),%0" \ + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("ldh,ma 0(%1),%0" \ + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("ldw,ma 0(%1),%0" \ + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("ldd,ma 0(%1),%0" \ + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + } \ + __u.__val; \ +}) #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index ab5c215cf46c..a736dc59bbef 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 197d2247e4db..16aec9ba2580 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -37,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) volatile unsigned int *a; a = __ldcw_align(x); -#ifdef CONFIG_SMP - (void) __ldcw(a); -#else - mb(); -#endif - *a = 1; + /* Release with ordered store. */ + __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *x) diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 10173c32195e..1a8ec3838c9b 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -116,6 +116,8 @@ #define SO_DETACH_REUSEPORT_BPF 0x4042 +#define SO_NETNS_COOKIE 0x4045 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index b96d74496977..873bf3434da9 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -454,7 +454,6 @@ nop LDREG 0(\ptp),\pte bb,<,n \pte,_PAGE_PRESENT_BIT,3f - LDCW 0(\tmp),\tmp1 b \fault stw \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) @@ -464,23 +463,26 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. */ - .macro tlb_unlock0 spc,tmp,tmp1 + /* Release pa_tlb_lock lock without reloading lock address. + Note that the values in the register spc are limited to + NR_SPACE_IDS (262144). Thus, the stw instruction always + stores a nonzero value even when register spc is 64 bits. + We use an ordered store to ensure all prior accesses are + performed prior to releasing the lock. */ + .macro tlb_unlock0 spc,tmp #ifdef CONFIG_SMP 98: or,COND(=) %r0,\spc,%r0 - LDCW 0(\tmp),\tmp1 - or,COND(=) %r0,\spc,%r0 - stw \spc,0(\tmp) + stw,ma \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm /* Release pa_tlb_lock lock. */ - .macro tlb_unlock1 spc,tmp,tmp1 + .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP 98: load_pa_tlb_lock \tmp 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) - tlb_unlock0 \spc,\tmp,\tmp1 + tlb_unlock0 \spc,\tmp #endif .endm @@ -1163,7 +1165,7 @@ dtlb_miss_20w: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1189,7 +1191,7 @@ nadtlb_miss_20w: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1223,7 +1225,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1256,7 +1258,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1285,7 +1287,7 @@ dtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1313,7 +1315,7 @@ nadtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1420,7 +1422,7 @@ itlb_miss_20w: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1444,7 +1446,7 @@ naitlb_miss_20w: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1478,7 +1480,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1502,7 +1504,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1532,7 +1534,7 @@ itlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1552,7 +1554,7 @@ naitlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1582,7 +1584,7 @@ dbit_trap_20w: idtlbt pte,prot - tlb_unlock0 spc,t0,t1 + tlb_unlock0 spc,t0 rfir nop #else @@ -1608,7 +1610,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock0 spc,t0,t1 + tlb_unlock0 spc,t0 rfir nop @@ -1628,7 +1630,7 @@ dbit_trap_20: idtlbt pte,prot - tlb_unlock0 spc,t0,t1 + tlb_unlock0 spc,t0 rfir nop #endif diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index e5fcfb70cc7c..4d54aa70ea5f 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -376,7 +376,11 @@ static inline int eirr_to_irq(unsigned long eirr) /* * IRQ STACK - used for irq handler */ +#ifdef CONFIG_64BIT +#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */ +#else #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ +#endif union irq_stack_union { unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 97ac707c6bff..a37814cb66c7 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -640,11 +640,7 @@ cas_action: sub,<> %r28, %r25, %r0 2: stw %r24, 0(%r26) /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) @@ -658,11 +654,7 @@ cas_action: 3: /* Error occurred on load or store */ /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) #endif @@ -863,11 +855,7 @@ cas2_action: cas2_end: /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) /* Enable interrupts */ ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ @@ -877,11 +865,7 @@ cas2_end: 22: /* Error occurred on load or store */ /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) ssm PSW_SM_I, %r0 ldo 1(%r0),%r28 b lws_exit diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8..2e4d1f05a926 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; } + +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) +{ + unsigned long flags; + u8 prev; + + _atomic_spin_lock_irqsave(ptr, flags); + if ((prev = *ptr) == old) + *ptr = new; + _atomic_spin_unlock_irqrestore(ptr, flags); + return prev; +} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ddca8287d43b..3e54484797f6 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -588,7 +588,7 @@ void __init mem_init(void) > BITS_PER_LONG); high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); + set_max_mapnr(max_low_pfn); memblock_free_all(); #ifdef CONFIG_PA11 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2b1033f13210..757175ccf53c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -133,7 +133,7 @@ config PPC select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MEMBARRIER_CALLBACKS select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 - select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) + select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UACCESS_MCSAFE if PPC64 @@ -147,6 +147,7 @@ config PPC select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF select BUILDTIME_EXTABLE_SORT @@ -171,7 +172,7 @@ config PPC select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if PPC32 + select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT @@ -213,7 +214,7 @@ config PPC select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) - select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S) + select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC64 && PPC_BOOK3S && SMP select HAVE_OPROFILE select HAVE_OPTPROBES if PPC64 select HAVE_PERF_EVENTS @@ -722,7 +723,7 @@ config PPC_64K_PAGES config PPC_256K_PAGES bool "256k page size" - depends on 44x && !STDBINUTILS + depends on 44x && !STDBINUTILS && !PPC_47x help Make the page size 256k. @@ -747,6 +748,7 @@ config THREAD_SHIFT range 13 15 default "15" if PPC_256K_PAGES default "14" if PPC64 + default "14" if KASAN default "13" help Used to define the stack size. The default is almost always what you @@ -1022,6 +1024,19 @@ config FSL_RIO Include support for RapidIO controller on Freescale embedded processors (MPC8548, MPC8641, etc). +config PPC_RTAS_FILTER + bool "Enable filtering of RTAS syscalls" + default y + depends on PPC_RTAS + help + The RTAS syscall API has security issues that could be used to + compromise system integrity. This option enforces restrictions on the + RTAS calls and arguments passed by userspace programs to mitigate + these issues. + + Say Y unless you know what you are doing and the filter is causing + problems for you. + endmenu config NONSTATIC_KERNEL diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index b915fe658979..2ca9114fcf00 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -352,6 +352,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR config FAIL_IOMMU bool "Fault-injection capability for IOMMU" depends on FAULT_INJECTION + depends on PCI || IBMVIO help Provide fault-injection capability for IOMMU. Each device can be selectively enabled via the fail_iommu property. diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 37ac731a556b..9f73fb6b1cc9 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -250,7 +250,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-string) cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) -cpu-as-$(CONFIG_E200) += -Wa,-me200 cpu-as-$(CONFIG_E500) += -Wa,-me500 # When using '-many -mpower4' gas will first try and find a matching power4 diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index dfbd7f22eef5..8c69bd07ada6 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -119,7 +119,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ elf_util.c $(zlib-y) devtree.c stdlib.c \ oflib.c ofconsole.c cuboot.c -src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c +src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c ifndef CONFIG_PPC64_BOOT_WRAPPER src-wlib-y += crtsavres.S diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 9457863147f9..00179cd6bdd0 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -128,7 +128,7 @@ int serial_console_init(void) dt_is_compatible(devp, "fsl,cpm2-smc-uart")) rc = cpm_console_init(devp, &serial_cd); #endif -#ifdef CONFIG_PPC_MPC52XX +#ifdef CONFIG_PPC_MPC52xx else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); #endif diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config index 9575a38c9155..b507df6ac69f 100644 --- a/arch/powerpc/configs/85xx-hw.config +++ b/arch/powerpc/configs/85xx-hw.config @@ -2,7 +2,6 @@ CONFIG_AQUANTIA_PHY=y CONFIG_AT803X_PHY=y CONFIG_ATA=y CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_BLK_DEV_SR=y CONFIG_BROADCOM_PHY=y CONFIG_C293_PCIE=y diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig index cf94d28d0e31..340140160c7b 100644 --- a/arch/powerpc/configs/amigaone_defconfig +++ b/arch/powerpc/configs/amigaone_defconfig @@ -47,7 +47,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SYM53C8XX_2=y diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig index 9ff493dd8439..6c5a4414e9ee 100644 --- a/arch/powerpc/configs/chrp32_defconfig +++ b/arch/powerpc/configs/chrp32_defconfig @@ -45,7 +45,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SYM53C8XX_2=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index fbfcc85e4dc0..a68c7f3af10e 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -62,7 +62,6 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 2975e64629aa..161351a18517 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -41,7 +41,6 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_IPR=y CONFIG_ATA=y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 4b6d31d4474e..ddf5e97877e2 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -60,7 +60,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_CHR_DEV_OSST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=y CONFIG_SCSI_CONSTANTS=y @@ -110,7 +109,6 @@ CONFIG_FB_NVIDIA=y CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 4e6e95f92646..5cad09f93562 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -119,7 +119,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 6658cceb928c..2a7c53cc2f83 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -111,7 +111,6 @@ CONFIG_BLK_DEV_NVME=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index b250e6f5a7ca..5569d36066dc 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -110,7 +110,6 @@ CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 0d746774c2bd..33a01a9e86be 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -60,7 +60,6 @@ CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 9dca4cffa623..682d68f39c2b 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -372,7 +372,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m @@ -778,7 +777,6 @@ CONFIG_FB_TRIDENT=m CONFIG_FB_SM501=m CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 26126b4d4de3..d58686a66388 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -97,7 +97,6 @@ CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index 1253482a67c0..2e25b264f70f 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -83,7 +83,6 @@ CONFIG_EEPROM_AT24=m # CONFIG_OCXL is not set CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 603aed229af7..46338f236004 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -217,15 +217,34 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) */ static __inline__ int fls(unsigned int x) { - return 32 - __builtin_clz(x); + int lz; + + if (__builtin_constant_p(x)) + return x ? 32 - __builtin_clz(x) : 0; + asm("cntlzw %0,%1" : "=r" (lz) : "r" (x)); + return 32 - lz; } #include +/* + * 64-bit can do this using one cntlzd (count leading zeroes doubleword) + * instruction; for 32-bit we use the generic version, which does two + * 32-bit fls calls. + */ +#ifdef CONFIG_PPC64 static __inline__ int fls64(__u64 x) { - return 64 - __builtin_clzll(x); + int lz; + + if (__builtin_constant_p(x)) + return x ? 64 - __builtin_clzll(x) : 0; + asm("cntlzd %0,%1" : "=r" (lz) : "r" (x)); + return 64 - lz; } +#else +#include +#endif #ifdef CONFIG_PPC64 unsigned int __arch_hweight8(unsigned int w); diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 91c8f1d9bcee..6eb311eb818b 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -2,6 +2,7 @@ #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H #define _ASM_POWERPC_BOOK3S_32_KUP_H +#include #include #ifdef __ASSEMBLY__ @@ -75,7 +76,7 @@ .macro kuap_check current, gpr #ifdef CONFIG_PPC_KUAP_DEBUG - lwz \gpr2, KUAP(thread) + lwz \gpr, THREAD + KUAP(\current) 999: twnei \gpr, 0 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) #endif diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 0796533d37dd..5325bd9c9b47 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -37,8 +37,10 @@ static inline bool pte_user(pte_t pte) */ #ifdef CONFIG_PTE_64BIT #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 36 #else #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif /* @@ -555,9 +557,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, if (pte_val(*ptep) & _PAGE_HASHPTE) flush_hash_entry(mm, ptep, addr); __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ + stw%X0 %2,%0\n\ eieio\n\ - stw%U0%X0 %L2,%1" + stw%X1 %L2,%1" : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 8fd8599c9395..80c953414882 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -13,20 +13,19 @@ */ #define MAX_EA_BITS_PER_CONTEXT 46 -#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2) /* - * Our page table limit us to 64TB. Hence for the kernel mapping, - * each MAP area is limited to 16 TB. - * The four map areas are: linear mapping, vmap, IO and vmemmap + * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB + * of vmemmap space. To better support sparse memory layout, we use 61TB + * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap. */ +#define REGION_SHIFT (40) #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT) /* - * Define the address range of the kernel non-linear virtual area - * 16TB + * Define the address range of the kernel non-linear virtual area (61TB) */ -#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000) +#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000) #ifndef __ASSEMBLY__ #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) @@ -156,6 +155,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, extern int hash__has_transparent_hugepage(void); #endif +static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) +{ + BUG(); + return pmd; +} + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index d1d9177d9ebd..0729c034e56f 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -246,7 +246,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, */ static inline int hash__pmd_trans_huge(pmd_t pmd) { - return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == + return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == (_PAGE_PTE | H_PAGE_THP_HUGE)); } @@ -272,6 +272,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); extern int hash__has_transparent_hugepage(void); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h index c8d1076e0ebb..a29b64129a7d 100644 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h @@ -11,13 +11,12 @@ #ifdef __ASSEMBLY__ -.macro kuap_restore_amr gpr #ifdef CONFIG_PPC_KUAP +.macro kuap_restore_amr gpr BEGIN_MMU_FTR_SECTION_NESTED(67) ld \gpr, STACK_REGS_KUAP(r1) mtspr SPRN_AMR, \gpr END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) -#endif .endm .macro kuap_check_amr gpr1, gpr2 @@ -31,6 +30,7 @@ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) #endif .endm +#endif .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr #ifdef CONFIG_PPC_KUAP @@ -54,6 +54,10 @@ #else /* !__ASSEMBLY__ */ +#include + +DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); + #ifdef CONFIG_PPC_KUAP #include @@ -77,6 +81,18 @@ static inline void set_kuap(unsigned long value) isync(); } +static inline bool +bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +{ + return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && + (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), + "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); +} +#else /* CONFIG_PPC_KUAP */ +static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { } +static inline void set_kuap(unsigned long value) { } +#endif /* !CONFIG_PPC_KUAP */ + static __always_inline void allow_user_access(void __user *to, const void __user *from, unsigned long size, unsigned long dir) { @@ -94,17 +110,10 @@ static inline void prevent_user_access(void __user *to, const void __user *from, unsigned long size, unsigned long dir) { set_kuap(AMR_KUAP_BLOCKED); + if (static_branch_unlikely(&uaccess_flush_key)) + do_uaccess_flush(); } -static inline bool -bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) -{ - return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && - (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), - "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); -} -#endif /* CONFIG_PPC_KUAP */ - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */ diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index bb3deb76c951..2f4ddc802fe9 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -225,14 +225,14 @@ static inline void early_init_mmu_secondary(void) extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size); -extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size); static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { - if (early_radix_enabled()) - return radix__setup_initial_memory_limit(first_memblock_base, - first_memblock_size); + /* + * Hash has more strict restrictions. At this point we don't + * know which translations we will pick. Hence go with hash + * restrictions. + */ return hash__setup_initial_memory_limit(first_memblock_base, first_memblock_size); } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index b01624e5c467..e1eb8aa9cfbb 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -998,10 +998,25 @@ extern struct page *pgd_page(pgd_t pgd); #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) -#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) -#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) -#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) +static inline unsigned long pgd_index(unsigned long address) +{ + return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); +} + +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} + +static inline unsigned long pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} + +static inline unsigned long pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} /* * Find an entry in a page-table-directory. We combine the address region @@ -1303,7 +1318,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm); static inline pmd_t pmd_mkdevmap(pmd_t pmd) { - return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); + if (radix_enabled()) + return radix__pmd_mkdevmap(pmd); + return hash__pmd_mkdevmap(pmd); } static inline int pmd_devmap(pmd_t pmd) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index d97db3ad9aae..5131ed787409 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -206,8 +206,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr, * from ptesync, it should probably go into update_mmu_cache, rather * than set_pte_at (which is used to set ptes unrelated to faults). * - * Spurious faults to vmalloc region are not tolerated, so there is - * a ptesync in flush_cache_vmap. + * Spurious faults from the kernel memory are not tolerated, so there + * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows + * the pte update sequence from ISA Book III 6.10 Translation Table + * Update Synchronization Requirements. */ } @@ -263,6 +265,11 @@ static inline int radix__has_transparent_hugepage(void) } #endif +static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); +} + extern int __meminit radix__vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys); diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 898b54262881..be0f7257b13c 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -72,7 +72,7 @@ void __patch_exception(int exc, unsigned long addr); #endif #define OP_RT_RA_MASK 0xffff0000UL -#define LIS_R2 0x3c020000UL +#define LIS_R2 0x3c400000UL #define ADDIS_R2_R12 0x3c4c0000UL #define ADDI_R2_R2 0x38420000UL diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index a116fe931789..3bdd74739cb8 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -68,6 +68,7 @@ extern void cpm_reset(void); #define PROFF_SPI ((uint)0x0180) #define PROFF_SCC3 ((uint)0x0200) #define PROFF_SMC1 ((uint)0x0280) +#define PROFF_DSP1 ((uint)0x02c0) #define PROFF_SCC4 ((uint)0x0300) #define PROFF_SMC2 ((uint)0x0380) diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 7897d16e0990..727d4b321937 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -7,7 +7,7 @@ #include #include -static inline bool early_cpu_has_feature(unsigned long feature) +static __always_inline bool early_cpu_has_feature(unsigned long feature) { return !!((CPU_FTRS_ALWAYS & feature) || (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); @@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature) return static_branch_likely(&cpu_feature_keys[i]); } #else -static inline bool cpu_has_feature(unsigned long feature) +static __always_inline bool cpu_has_feature(unsigned long feature) { return early_cpu_has_feature(feature); } diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index cf00ff0d121d..235911fb0e24 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -367,7 +367,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_MAYBE_CAN_NAP) + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON | CPU_FTR_NOEXECUTE) @@ -407,7 +407,6 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT) -#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ #define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \ @@ -507,8 +506,6 @@ enum { CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX | CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 | CPU_FTRS_CLASSIC32 | -#else - CPU_FTRS_GENERIC_32 | #endif #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX | @@ -585,8 +582,6 @@ enum { CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX & CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 & CPU_FTRS_CLASSIC32 & -#else - CPU_FTRS_GENERIC_32 & #endif #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX & diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h index 7141ccea8c94..a92059964579 100644 --- a/arch/powerpc/include/asm/dcr-native.h +++ b/arch/powerpc/include/asm/dcr-native.h @@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) #define mfdcr(rn) \ ({unsigned int rval; \ if (__builtin_constant_p(rn) && rn < 1024) \ - asm volatile("mfdcr %0," __stringify(rn) \ - : "=r" (rval)); \ + asm volatile("mfdcr %0, %1" : "=r" (rval) \ + : "n" (rn)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ rval = mfdcrx(rn); \ else \ @@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) #define mtdcr(rn, v) \ do { \ if (__builtin_constant_p(rn) && rn < 1024) \ - asm volatile("mtdcr " __stringify(rn) ",%0" \ - : : "r" (v)); \ + asm volatile("mtdcr %0, %1" \ + : : "n" (rn), "r" (v)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ mtdcrx(rn, v); \ else \ diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index 3d76e1c388c2..ee61542c6c3d 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -8,31 +8,44 @@ #ifndef _ASM_POWERPC_LMB_H #define _ASM_POWERPC_LMB_H +#include + struct drmem_lmb { u64 base_addr; u32 drc_index; u32 aa_index; u32 flags; -#ifdef CONFIG_MEMORY_HOTPLUG - int nid; -#endif }; struct drmem_lmb_info { struct drmem_lmb *lmbs; int n_lmbs; - u32 lmb_size; + u64 lmb_size; }; extern struct drmem_lmb_info *drmem_info; +static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb, + const struct drmem_lmb *start) +{ + /* + * DLPAR code paths can take several milliseconds per element + * when interacting with firmware. Ensure that we don't + * unfairly monopolize the CPU. + */ + if (((++lmb - start) % 16) == 0) + cond_resched(); + + return lmb; +} + #define for_each_drmem_lmb_in_range(lmb, start, end) \ - for ((lmb) = (start); (lmb) <= (end); (lmb)++) + for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start)) #define for_each_drmem_lmb(lmb) \ for_each_drmem_lmb_in_range((lmb), \ &drmem_info->lmbs[0], \ - &drmem_info->lmbs[drmem_info->n_lmbs - 1]) + &drmem_info->lmbs[drmem_info->n_lmbs]) /* * The of_drconf_cell_v1 struct defines the layout of the LMB data @@ -66,7 +79,7 @@ struct of_drconf_cell_v2 { #define DRCONF_MEM_AI_INVALID 0x00000040 #define DRCONF_MEM_RESERVED 0x00000080 -static inline u32 drmem_lmb_size(void) +static inline u64 drmem_lmb_size(void) { return drmem_info->lmb_size; } @@ -103,22 +116,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb) lmb->aa_index = 0xffffffff; } -#ifdef CONFIG_MEMORY_HOTPLUG -static inline void lmb_set_nid(struct drmem_lmb *lmb) -{ - lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr); -} -static inline void lmb_clear_nid(struct drmem_lmb *lmb) -{ - lmb->nid = -1; -} -#else -static inline void lmb_set_nid(struct drmem_lmb *lmb) -{ -} -static inline void lmb_clear_nid(struct drmem_lmb *lmb) -{ -} -#endif - #endif /* _ASM_POWERPC_LMB_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 33f4f72eb035..6d0795d7b89c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -61,11 +61,18 @@ nop; \ nop +#define ENTRY_FLUSH_SLOT \ + ENTRY_FLUSH_FIXUP_SECTION; \ + nop; \ + nop; \ + nop; + /* * r10 must be free to use, r13 must be paca */ #define INTERRUPT_TO_KERNEL \ - STF_ENTRY_BARRIER_SLOT + STF_ENTRY_BARRIER_SLOT; \ + ENTRY_FLUSH_SLOT /* * Macros for annotating the expected destination of (h)rfid @@ -127,6 +134,9 @@ hrfid; \ b hrfi_flush_fallback +#else /* __ASSEMBLY__ */ +/* Prototype for function defined in exceptions-64s.S */ +void do_uaccess_flush(void); #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_EXCEPTION_H */ diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h index c814a2b55389..8d61c8f3fec4 100644 --- a/arch/powerpc/include/asm/fadump-internal.h +++ b/arch/powerpc/include/asm/fadump-internal.h @@ -64,12 +64,14 @@ struct fadump_memory_range { }; /* fadump memory ranges info */ +#define RNG_NAME_SZ 16 struct fadump_mrange_info { - char name[16]; + char name[RNG_NAME_SZ]; struct fadump_memory_range *mem_ranges; u32 mem_ranges_sz; u32 mem_range_cnt; u32 max_mem_ranges; + bool is_static; }; /* Platform specific callback functions */ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index b0af97add751..fbd406cd6916 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -205,6 +205,22 @@ label##3: \ FTR_ENTRY_OFFSET 955b-956b; \ .popsection; +#define UACCESS_FLUSH_FIXUP_SECTION \ +959: \ + .pushsection __uaccess_flush_fixup,"a"; \ + .align 2; \ +960: \ + FTR_ENTRY_OFFSET 959b-960b; \ + .popsection; + +#define ENTRY_FLUSH_FIXUP_SECTION \ +957: \ + .pushsection __entry_flush_fixup,"a"; \ + .align 2; \ +958: \ + FTR_ENTRY_OFFSET 957b-958b; \ + .popsection; + #define RFI_FLUSH_FIXUP_SECTION \ 951: \ .pushsection __rfi_flush_fixup,"a"; \ @@ -237,8 +253,11 @@ label##3: \ #include extern long stf_barrier_fallback; +extern long entry_flush_fallback; extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; +extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup; +extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 296e51c2f066..6db06f58deed 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -23,9 +23,7 @@ #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) -#define KASAN_SHADOW_END 0UL - -#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START) +#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT)) #ifdef CONFIG_KASAN void kasan_early_init(void); diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 94f24928916a..ed4f5f536fc1 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -6,7 +6,7 @@ #define KUAP_WRITE 2 #define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE) -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S_64 #include #endif #ifdef CONFIG_PPC_8xx @@ -24,9 +24,15 @@ .macro kuap_restore sp, current, gpr1, gpr2, gpr3 .endm +.macro kuap_restore_amr gpr +.endm + .macro kuap_check current, gpr .endm +.macro kuap_check_amr gpr1, gpr2 +.endm + #endif #else /* !__ASSEMBLY__ */ @@ -45,15 +51,26 @@ static inline void setup_kuep(bool disabled) { } void setup_kuap(bool disabled); #else static inline void setup_kuap(bool disabled) { } -static inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) { } -static inline void prevent_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) { } + static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { return false; } + +static inline void kuap_check_amr(void) { } + +/* + * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush + * the L1D cache after user accesses. Only include the empty stubs for other + * platforms. + */ +#ifndef CONFIG_PPC_BOOK3S_64 +static inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { } +static inline void prevent_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) { } +#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_KUAP */ static inline void allow_read_from_user(const void __user *from, unsigned long size) diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 635fb154b33f..a3633560493b 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -150,4 +150,7 @@ #define KVM_INST_FETCH_FAILED -1 +/* Extract PO and XOP opcode fields */ +#define PO_XOP_OPCODE_MASK 0xfc0007fe + #endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6fe6ad64cba5..740b52ec3509 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -58,7 +58,8 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, + unsigned flags); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 7bcb64444a39..f71c361dc356 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -59,6 +59,9 @@ struct machdep_calls { int (*pcibios_root_bridge_prepare)(struct pci_host_bridge *bridge); + /* finds all the pci_controllers present at boot */ + void (*discover_phbs)(void); + /* To setup PHBs when using automatic OF platform driver for PCI */ int (*pci_setup_phb)(struct pci_controller *host); diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 58efca934311..f132b418a8c7 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -216,7 +216,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - switch_mm(prev, next, current); + switch_mm_irqs_off(prev, next, current); } /* We don't currently use enter_lazy_tlb() for anything */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 552b96eef0c8..3d32d7103ec8 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -148,8 +148,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 36 #else #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif /* diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 7fed9dc0f147..3d2a78ab051a 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -199,9 +199,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ + stw%X0 %2,%0\n\ eieio\n\ - stw%U0%X0 %L2,%1" + stw%X1 %L2,%1" : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); return; diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index dce863a7635c..8e5b7d0b851c 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -10,8 +10,6 @@ #ifdef CONFIG_SMP -#include - #define __my_cpu_offset local_paca->data_offset #endif /* CONFIG_SMP */ @@ -19,4 +17,6 @@ #include +#include + #endif /* _ASM_POWERPC_PERCPU_H_ */ diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 7426d7a90e1e..7aba3c7ea25c 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -12,6 +12,8 @@ #ifdef CONFIG_PPC_PERF_CTRS #include +#else +static inline bool is_sier_available(void) { return false; } #endif #ifdef CONFIG_FSL_EMB_PERF_EVENT diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index a9993e7a443b..64b998db9d3e 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -291,7 +291,6 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ - .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .addr_limit = KERNEL_DS, \ .fpexc_mode = 0, \ .fscr = FSCR_TAR | FSCR_EBB \ diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index ee3ada66deb5..5a424f867c82 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -62,6 +62,9 @@ struct pt_regs }; #endif + +#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) + #ifdef __powerpc64__ /* @@ -203,7 +206,7 @@ do { \ #endif /* __powerpc64__ */ #define arch_has_single_step() (1) -#ifndef CONFIG_BOOK3S_601 +#ifndef CONFIG_PPC_BOOK3S_601 #define arch_has_block_step() (true) #else #define arch_has_block_step() (false) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index b3cbb1136bce..34d08ff21b98 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -796,7 +796,7 @@ #define THRM1_TIN (1 << 31) #define THRM1_TIV (1 << 30) #define THRM1_THRES(x) ((x&0x7f)<<23) -#define THRM3_SITV(x) ((x&0x3fff)<<1) +#define THRM3_SITV(x) ((x & 0x1fff) << 1) #define THRM1_TID (1<<2) #define THRM1_TIE (1<<1) #define THRM1_V (1<<0) diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 3c1887351c71..bd227e0eab07 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -368,8 +368,6 @@ extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); -extern int rtas_online_cpus_mask(cpumask_var_t cpus); -extern int rtas_offline_cpus_mask(cpumask_var_t cpus); extern int rtas_ibm_suspend_me(u64 handle); struct rtc_time; diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 7c05e95a5c44..e9e3f85134e5 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -84,12 +84,19 @@ static inline bool security_ftr_enabled(u64 feature) // Software required to flush link stack on context switch #define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull +// The L1-D cache should be flushed when entering the kernel +#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull + +// The L1-D cache should be flushed after user accesses from the kernel +#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull // Features enabled by default #define SEC_FTR_DEFAULT \ (SEC_FTR_L1D_FLUSH_HV | \ SEC_FTR_L1D_FLUSH_PR | \ SEC_FTR_BNDS_CHK_SPEC_BAR | \ + SEC_FTR_L1D_FLUSH_ENTRY | \ + SEC_FTR_L1D_FLUSH_UACCESS | \ SEC_FTR_FAVOUR_SECURITY) #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h index e9f81bb3f83b..f798e80e4106 100644 --- a/arch/powerpc/include/asm/setjmp.h +++ b/arch/powerpc/include/asm/setjmp.h @@ -7,7 +7,9 @@ #define JMP_BUF_LEN 23 -extern long setjmp(long *) __attribute__((returns_twice)); -extern void longjmp(long *, long) __attribute__((noreturn)); +typedef long jmp_buf[JMP_BUF_LEN]; + +extern int setjmp(jmp_buf env) __attribute__((returns_twice)); +extern void longjmp(jmp_buf env, int val) __attribute__((noreturn)); #endif /* _ASM_POWERPC_SETJMP_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 65676e2325b8..6f2f4497e13b 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,12 +52,16 @@ enum l1d_flush_type { }; void setup_rfi_flush(enum l1d_flush_type, bool enable); +void setup_entry_flush(bool enable); +void setup_uaccess_flush(bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); #ifdef CONFIG_PPC_BARRIER_NOSPEC void setup_barrier_nospec(void); #else static inline void setup_barrier_nospec(void) { }; #endif +void do_uaccess_flush_fixups(enum l1d_flush_type types); +void do_entry_flush_fixups(enum l1d_flush_type types); void do_barrier_nospec_fixups(bool enable); extern bool barrier_nospec_enabled; diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index d2d2c4bd8435..6047402b0a4d 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -17,7 +17,7 @@ typedef unsigned long cycles_t; static inline cycles_t get_cycles(void) { - if (IS_ENABLED(CONFIG_BOOK3S_601)) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) return 0; return mftb(); diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 7f3a8b902325..02a1c18cdba3 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -67,19 +67,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm) return false; return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); } -static inline void mm_reset_thread_local(struct mm_struct *mm) -{ - WARN_ON(atomic_read(&mm->context.copros) > 0); - /* - * It's possible for mm_access to take a reference on mm_users to - * access the remote mm from another thread, but it's not allowed - * to set mm_cpumask, so mm_users may be > 1 here. - */ - WARN_ON(current->mm != mm); - atomic_set(&mm->context.active_cpus, 1); - cpumask_clear(mm_cpumask(mm)); - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); -} #else /* CONFIG_PPC_BOOK3S_64 */ static inline int mm_is_thread_local(struct mm_struct *mm) { diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h index cc79856896a1..4ba87de32be0 100644 --- a/arch/powerpc/include/uapi/asm/errno.h +++ b/arch/powerpc/include/uapi/asm/errno.h @@ -2,6 +2,7 @@ #ifndef _ASM_POWERPC_ERRNO_H #define _ASM_POWERPC_ERRNO_H +#undef EDEADLOCK #include #undef EDEADLOCK diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 3c02445cf086..afbd47b0a75c 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -5,9 +5,6 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' -# Avoid clang warnings around longjmp/setjmp declarations -CFLAGS_crash.o += -ffreestanding - ifdef CONFIG_PPC64 CFLAGS_prom_init.o += $(NO_MINIMAL_TOC) endif @@ -22,6 +19,7 @@ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) +CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code @@ -39,7 +37,6 @@ KASAN_SANITIZE_btext.o := n ifdef CONFIG_KASAN CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING -CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING endif @@ -184,6 +181,9 @@ KCOV_INSTRUMENT_cputable.o := n KCOV_INSTRUMENT_setup_64.o := n KCOV_INSTRUMENT_paca.o := n +CFLAGS_setup_64.o += -fno-stack-protector +CFLAGS_paca.o += -fno-stack-protector + extra-$(CONFIG_PPC_FPU) += fpu.o extra-$(CONFIG_ALTIVEC) += vector.o extra-$(CONFIG_PPC64) += entry_64.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 5c0a1e17219b..af399675248e 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -285,7 +285,7 @@ int main(void) /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); - DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); + DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS); STACK_PT_REGS_OFFSET(GPR0, gpr[0]); STACK_PT_REGS_OFFSET(GPR1, gpr[1]); STACK_PT_REGS_OFFSET(GPR2, gpr[2]); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index a460298c7ddb..f91ecb10d0ae 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -184,7 +184,7 @@ __init_LPCR_ISA300: __init_FSCR: mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB + ori r3,r3,FSCR_TAR|FSCR_EBB mtspr SPRN_FSCR,r3 blr diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index e486d1d78de2..f4cb2c546adb 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -160,7 +160,8 @@ u64 dma_iommu_get_required_mask(struct device *dev) return bypass_mask; } - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); + mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + + tbl->it_page_shift - 1); mask += mask - 1; return mask; diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 180b3a5d1001..3551f11accf0 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -139,7 +139,6 @@ static void __init cpufeatures_setup_cpu(void) /* Initialize the base environment -- clear FSCR/HFSCR. */ hv_mode = !!(mfmsr() & MSR_HV); if (hv_mode) { - /* CPU_FTR_HVMODE is used early in PACA setup */ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; mtspr(SPRN_HFSCR, 0); } @@ -347,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f) { u64 lpcr; + /* + * Linux relies on FSCR[DSCR] being clear, so that we can take the + * facility unavailable interrupt and track the task's usage of DSCR. + * See facility_unavailable_exception(). + * Clear the bit here so that feat_enable() doesn't set it. + */ + f->fscr_bit_nr = -1; + feat_enable(f); lpcr = mfspr(SPRN_LPCR); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index bc8a551013be..8b0e523b2abb 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -368,14 +368,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) pa = pte_pfn(*ptep); /* On radix we can do hugepage mappings for io, so handle that */ - if (hugepage_shift) { - pa <<= hugepage_shift; - pa |= token & ((1ul << hugepage_shift) - 1); - } else { - pa <<= PAGE_SHIFT; - pa |= token & (PAGE_SIZE - 1); - } + if (!hugepage_shift) + hugepage_shift = PAGE_SHIFT; + pa <<= PAGE_SHIFT; + pa |= token & ((1ul << hugepage_shift) - 1); return pa; } @@ -503,7 +500,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) rc = 1; if (pe->state & EEH_PE_ISOLATED) { pe->check_count++; - if (pe->check_count % EEH_MAX_FAILS == 0) { + if (pe->check_count == EEH_MAX_FAILS) { dn = pci_device_to_OF_node(dev); if (dn) location = of_get_property(dn, "ibm,loc-code", diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index cf11277ebd02..000ebb5a6fb3 100644 --- a/arch/powerpc/kernel/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c @@ -272,8 +272,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v) { struct pci_io_addr_range *piar; struct rb_node *n; + unsigned long flags; - spin_lock(&pci_io_addr_cache_root.piar_lock); + spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { piar = rb_entry(n, struct pci_io_addr_range, rb_node); @@ -281,7 +282,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v) (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev)); } - spin_unlock(&pci_io_addr_cache_root.piar_lock); + spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); return 0; } diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 13f699256258..c72894ff9d61 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -336,6 +336,9 @@ trace_syscall_entry_irq_off: .globl transfer_to_syscall transfer_to_syscall: +#ifdef CONFIG_PPC_BOOK3S_32 + kuep_lock r11, r12 +#endif #ifdef CONFIG_TRACE_IRQFLAGS andi. r12,r9,MSR_EE beq- trace_syscall_entry_irq_off @@ -705,7 +708,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) stw r10,_CCR(r1) stw r1,KSP(r3) /* Set old stack pointer */ - kuap_check r2, r4 + kuap_check r2, r0 #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index d0018dd17e0a..88bba0a931d6 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1090,17 +1090,19 @@ EXC_COMMON_BEGIN(machine_check_idle_common) bl machine_check_queue_event /* - * We have not used any non-volatile GPRs here, and as a rule - * most exception code including machine check does not. - * Therefore PACA_NAPSTATELOST does not need to be set. Idle - * wakeup will restore volatile registers. + * GPR-loss wakeups are relatively straightforward, because the + * idle sleep code has saved all non-volatile registers on its + * own stack, and r1 in PACAR1. * - * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. + * For no-loss wakeups the r1 and lr registers used by the + * early machine check handler have to be restored first. r2 is + * the kernel TOC, so no need to restore it. * * Then decrement MCE nesting after finishing with the stack. */ ld r3,_MSR(r1) ld r4,_LINK(r1) + ld r1,GPR1(r1) lhz r11,PACA_IN_MCE(r13) subi r11,r11,1 @@ -1109,7 +1111,7 @@ EXC_COMMON_BEGIN(machine_check_idle_common) mtlr r4 rlwinm r10,r3,47-31,30,31 cmpwi cr1,r10,2 - bltlr cr1 /* no state loss, return to idle caller */ + bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ b idle_return_gpr_loss #endif @@ -1148,7 +1150,7 @@ EXC_REAL_BEGIN(data_access, 0x300, 0x80) INT_HANDLER data_access, 0x300, ool=1, dar=1, dsisr=1, kvm=1 EXC_REAL_END(data_access, 0x300, 0x80) EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) - INT_HANDLER data_access, 0x300, virt=1, dar=1, dsisr=1 + INT_HANDLER data_access, 0x300, ool=1, virt=1, dar=1, dsisr=1 EXC_VIRT_END(data_access, 0x4300, 0x80) INT_KVM_HANDLER data_access, 0x300, EXC_STD, PACA_EXGEN, 1 EXC_COMMON_BEGIN(data_access_common) @@ -1203,7 +1205,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(instruction_access, 0x400, 0x80) - INT_HANDLER instruction_access, 0x400, kvm=1 + INT_HANDLER instruction_access, 0x400, ool=1, kvm=1 EXC_REAL_END(instruction_access, 0x400, 0x80) EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80) INT_HANDLER instruction_access, 0x400, virt=1 @@ -1223,7 +1225,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) - INT_HANDLER instruction_access_slb, 0x480, area=PACA_EXSLB, kvm=1 + INT_HANDLER instruction_access_slb, 0x480, ool=1, area=PACA_EXSLB, kvm=1 EXC_REAL_END(instruction_access_slb, 0x480, 0x80) EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) INT_HANDLER instruction_access_slb, 0x480, virt=1, area=PACA_EXSLB @@ -1363,17 +1365,17 @@ EXC_REAL_BEGIN(decrementer, 0x900, 0x80) INT_HANDLER decrementer, 0x900, ool=1, bitmask=IRQS_DISABLED, kvm=1 EXC_REAL_END(decrementer, 0x900, 0x80) EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80) - INT_HANDLER decrementer, 0x900, virt=1, bitmask=IRQS_DISABLED + INT_HANDLER decrementer, 0x900, ool=1, virt=1, bitmask=IRQS_DISABLED EXC_VIRT_END(decrementer, 0x4900, 0x80) INT_KVM_HANDLER decrementer, 0x900, EXC_STD, PACA_EXGEN, 0 EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80) - INT_HANDLER hdecrementer, 0x980, hsrr=EXC_HV, kvm=1 + INT_HANDLER hdecrementer, 0x980, ool=1, hsrr=EXC_HV, kvm=1 EXC_REAL_END(hdecrementer, 0x980, 0x80) EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80) - INT_HANDLER hdecrementer, 0x980, virt=1, hsrr=EXC_HV, kvm=1 + INT_HANDLER hdecrementer, 0x980, ool=1, virt=1, hsrr=EXC_HV, kvm=1 EXC_VIRT_END(hdecrementer, 0x4980, 0x80) INT_KVM_HANDLER hdecrementer, 0x980, EXC_HV, PACA_EXGEN, 0 EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt) @@ -2044,15 +2046,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback) .endr blr -TRAMP_REAL_BEGIN(rfi_flush_fallback) - SET_SCRATCH0(r13); - GET_PACA(r13); - std r1,PACA_EXRFI+EX_R12(r13) - ld r1,PACAKSAVE(r13) - std r9,PACA_EXRFI+EX_R9(r13) - std r10,PACA_EXRFI+EX_R10(r13) - std r11,PACA_EXRFI+EX_R11(r13) - mfctr r9 +/* Clobbers r10, r11, ctr */ +.macro L1D_DISPLACEMENT_FLUSH ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ld r11,PACA_L1D_FLUSH_SIZE(r13) srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ @@ -2063,7 +2058,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) sync /* - * The load adresses are at staggered offsets within cachelines, + * The load addresses are at staggered offsets within cachelines, * which suits some pipelines better (on others it should not * hurt). */ @@ -2078,7 +2073,30 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) ld r11,(0x80 + 8)*7(r10) addi r10,r10,0x80*8 bdnz 1b +.endm +TRAMP_REAL_BEGIN(entry_flush_fallback) + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 + L1D_DISPLACEMENT_FLUSH + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + blr + +TRAMP_REAL_BEGIN(rfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r1,PACA_EXRFI+EX_R12(r13) + ld r1,PACAKSAVE(r13) + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 + L1D_DISPLACEMENT_FLUSH mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) @@ -2096,32 +2114,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) mfctr r9 - ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) - ld r11,PACA_L1D_FLUSH_SIZE(r13) - srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ - mtctr r11 - DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ - - /* order ld/st prior to dcbt stop all streams with flushing */ - sync - - /* - * The load adresses are at staggered offsets within cachelines, - * which suits some pipelines better (on others it should not - * hurt). - */ -1: - ld r11,(0x80 + 8)*0(r10) - ld r11,(0x80 + 8)*1(r10) - ld r11,(0x80 + 8)*2(r10) - ld r11,(0x80 + 8)*3(r10) - ld r11,(0x80 + 8)*4(r10) - ld r11,(0x80 + 8)*5(r10) - ld r11,(0x80 + 8)*6(r10) - ld r11,(0x80 + 8)*7(r10) - addi r10,r10,0x80*8 - bdnz 1b - + L1D_DISPLACEMENT_FLUSH mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) @@ -2130,6 +2123,19 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) GET_SCRATCH0(r13); hrfid +USE_TEXT_SECTION() + +_GLOBAL(do_uaccess_flush) + UACCESS_FLUSH_FIXUP_SECTION + nop + nop + nop + blr + L1D_DISPLACEMENT_FLUSH + blr +_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) +EXPORT_SYMBOL(do_uaccess_flush) + /* * Real mode exceptions actually use this too, but alternate * instruction code patches (which end up in the common .text area) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index ed59855430b9..0455dc1b2797 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -38,8 +38,17 @@ static void __init fadump_reserve_crash_area(u64 base); #ifndef CONFIG_PRESERVE_FA_DUMP static DEFINE_MUTEX(fadump_mutex); -struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 }; -struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 }; +struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; + +#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ +#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \ + sizeof(struct fadump_memory_range)) +static struct fadump_memory_range rngs[RESERVED_RNGS_CNT]; +struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs, + RESERVED_RNGS_SZ, 0, + RESERVED_RNGS_CNT, true }; + +static void __init early_init_dt_scan_reserved_ranges(unsigned long node); #ifdef CONFIG_CMA static struct cma *fadump_cma; @@ -108,6 +117,11 @@ static int __init fadump_cma_init(void) { return 1; } int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) { + if (depth == 0) { + early_init_dt_scan_reserved_ranges(node); + return 0; + } + if (depth != 1) return 0; @@ -265,7 +279,7 @@ static void fadump_show_config(void) * that is required for a kernel to boot successfully. * */ -static inline u64 fadump_calculate_reserve_size(void) +static __init u64 fadump_calculate_reserve_size(void) { u64 base, size, bootmem_min; int ret; @@ -429,10 +443,72 @@ static int __init fadump_get_boot_mem_regions(void) return ret; } +/* + * Returns true, if the given range overlaps with reserved memory ranges + * starting at idx. Also, updates idx to index of overlapping memory range + * with the given memory range. + * False, otherwise. + */ +static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx) +{ + bool ret = false; + int i; + + for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) { + u64 rbase = reserved_mrange_info.mem_ranges[i].base; + u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size; + + if (end <= rbase) + break; + + if ((end > rbase) && (base < rend)) { + *idx = i; + ret = true; + break; + } + } + + return ret; +} + +/* + * Locate a suitable memory area to reserve memory for FADump. While at it, + * lookup reserved-ranges & avoid overlap with them, as they are used by F/W. + */ +static u64 __init fadump_locate_reserve_mem(u64 base, u64 size) +{ + struct fadump_memory_range *mrngs; + phys_addr_t mstart, mend; + int idx = 0; + u64 i, ret = 0; + + mrngs = reserved_mrange_info.mem_ranges; + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &mstart, &mend, NULL) { + pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n", + i, mstart, mend, base); + + if (mstart > base) + base = PAGE_ALIGN(mstart); + + while ((mend > base) && ((mend - base) >= size)) { + if (!overlaps_reserved_ranges(base, base+size, &idx)) { + ret = base; + goto out; + } + + base = mrngs[idx].base + mrngs[idx].size; + base = PAGE_ALIGN(base); + } + } + +out: + return ret; +} + int __init fadump_reserve_mem(void) { - u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE; - bool is_memblock_bottom_up = memblock_bottom_up(); + u64 base, size, mem_boundary, bootmem_min; int ret = 1; if (!fw_dump.fadump_enabled) @@ -453,9 +529,9 @@ int __init fadump_reserve_mem(void) PAGE_ALIGN(fadump_calculate_reserve_size()); #ifdef CONFIG_CMA if (!fw_dump.nocma) { - align = FADUMP_CMA_ALIGNMENT; fw_dump.boot_memory_size = - ALIGN(fw_dump.boot_memory_size, align); + ALIGN(fw_dump.boot_memory_size, + FADUMP_CMA_ALIGNMENT); } #endif @@ -523,13 +599,9 @@ int __init fadump_reserve_mem(void) * Reserve memory at an offset closer to bottom of the RAM to * minimize the impact of memory hot-remove operation. */ - memblock_set_bottom_up(true); - base = memblock_find_in_range(base, mem_boundary, size, align); + base = fadump_locate_reserve_mem(base, size); - /* Restore the previous allocation mode */ - memblock_set_bottom_up(is_memblock_bottom_up); - - if (!base) { + if (!base || (base + size > mem_boundary)) { pr_err("Failed to find memory chunk for reservation!\n"); goto error_out; } @@ -726,10 +798,14 @@ void fadump_free_cpu_notes_buf(void) static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info) { + if (mrange_info->is_static) { + mrange_info->mem_range_cnt = 0; + return; + } + kfree(mrange_info->mem_ranges); - mrange_info->mem_ranges = NULL; - mrange_info->mem_ranges_sz = 0; - mrange_info->max_mem_ranges = 0; + memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0, + (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ)); } /* @@ -786,6 +862,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) { int ret; + if (mrange_info->is_static) { + pr_err("Reached array size limit for %s memory ranges\n", + mrange_info->name); + return -ENOSPC; + } + ret = fadump_alloc_mem_ranges(mrange_info); if (ret) return ret; @@ -1202,20 +1284,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) * Scan reserved-ranges to consider them while reserving/releasing * memory for FADump. */ -static inline int fadump_scan_reserved_mem_ranges(void) +static void __init early_init_dt_scan_reserved_ranges(unsigned long node) { - struct device_node *root; const __be32 *prop; int len, ret = -1; unsigned long i; - root = of_find_node_by_path("/"); - if (!root) - return ret; + /* reserved-ranges already scanned */ + if (reserved_mrange_info.mem_range_cnt != 0) + return; - prop = of_get_property(root, "reserved-ranges", &len); + prop = of_get_flat_dt_prop(node, "reserved-ranges", &len); if (!prop) - return ret; + return; /* * Each reserved range is an (address,size) pair, 2 cells each, @@ -1237,7 +1318,8 @@ static inline int fadump_scan_reserved_mem_ranges(void) } } - return ret; + /* Compact reserved ranges */ + sort_and_merge_mem_ranges(&reserved_mrange_info); } /* @@ -1251,32 +1333,21 @@ static void fadump_release_memory(u64 begin, u64 end) u64 ra_start, ra_end, tstart; int i, ret; - fadump_scan_reserved_mem_ranges(); - ra_start = fw_dump.reserve_dump_area_start; ra_end = ra_start + fw_dump.reserve_dump_area_size; /* - * Add reserved dump area to reserved ranges list - * and exclude all these ranges while releasing memory. + * If reserved ranges array limit is hit, overwrite the last reserved + * memory range with reserved dump area to ensure it is excluded from + * the memory being released (reused for next FADump registration). */ - ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); - if (ret != 0) { - /* - * Not enough memory to setup reserved ranges but the system is - * running shortage of memory. So, release all the memory except - * Reserved dump area (reused for next fadump registration). - */ - if (begin < ra_end && end > ra_start) { - if (begin < ra_start) - fadump_release_reserved_area(begin, ra_start); - if (end > ra_end) - fadump_release_reserved_area(ra_end, end); - } else - fadump_release_reserved_area(begin, end); + if (reserved_mrange_info.mem_range_cnt == + reserved_mrange_info.max_mem_ranges) + reserved_mrange_info.mem_range_cnt--; + ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); + if (ret != 0) return; - } /* Get the reserved ranges list in order first. */ sort_and_merge_mem_ranges(&reserved_mrange_info); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 4a24f8f026c7..edaab1142498 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -418,14 +418,11 @@ InstructionTLBMiss: cmplw 0,r1,r3 #endif mfspr r2, SPRN_SPRG_PGDIR -#ifdef CONFIG_SWAP - li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC -#else - li r1,_PAGE_PRESENT | _PAGE_EXEC -#endif + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) bge- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ #endif 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ @@ -484,13 +481,10 @@ DataLoadTLBMiss: lis r1,PAGE_OFFSET@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SPRG_PGDIR -#ifdef CONFIG_SWAP - li r1, _PAGE_PRESENT | _PAGE_ACCESSED -#else - li r1, _PAGE_PRESENT -#endif + li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER bge- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + li r1, _PAGE_PRESENT | _PAGE_ACCESSED addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ @@ -564,13 +558,10 @@ DataStoreTLBMiss: lis r1,PAGE_OFFSET@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SPRG_PGDIR -#ifdef CONFIG_SWAP - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED -#else - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT -#endif + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER bge- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ @@ -843,7 +834,7 @@ BEGIN_MMU_FTR_SECTION END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr -load_segment_registers: +_GLOBAL(load_segment_registers) li r0, NUM_USER_SEGMENTS /* load up user segment register values */ mtctr r0 /* for context 0 */ li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index ad79fddb974d..9019f1395d39 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -420,6 +420,10 @@ generic_secondary_common_init: /* From now on, r24 is expected to be logical cpuid */ mr r24,r5 + /* Create a temp kernel stack for use before relocation is on. */ + ld r1,PACAEMERGSP(r13) + subi r1,r1,STACK_FRAME_OVERHEAD + /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) ld r23,0(r23) @@ -448,10 +452,6 @@ generic_secondary_common_init: sync /* order paca.run and cur_cpu_spec */ isync /* In case code patching happened */ - /* Create a temp kernel stack for use before relocation is on. */ - ld r1,PACAEMERGSP(r13) - subi r1,r1,STACK_FRAME_OVERHEAD - b __secondary_start #endif /* SMP */ @@ -945,15 +945,8 @@ start_here_multiplatform: std r0,0(r4) #endif - /* The following gets the stack set up with the regs */ - /* pointing to the real addr of the kernel stack. This is */ - /* all done to support the C function call below which sets */ - /* up the htab. This is done because we have relocated the */ - /* kernel but are still running in real mode. */ - - LOAD_REG_ADDR(r3,init_thread_union) - /* set up a stack pointer */ + LOAD_REG_ADDR(r3,init_thread_union) LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) add r1,r3,r1 li r0,0 @@ -999,7 +992,7 @@ start_here_common: bl start_kernel /* Not reached */ - trap +0: trap EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 /* diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 98d8b6832fcb..6f3e417f55a3 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -191,7 +191,7 @@ SystemCall: /* On the MPC8xx, this is a software emulation interrupt. It occurs * for all unimplemented and illegal instructions. */ - EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD) + EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD) /* Called from DataStoreTLBMiss when perf TLB misses events are activated */ #ifdef CONFIG_PERF_EVENTS @@ -229,9 +229,7 @@ SystemCall: InstructionTLBMiss: mtspr SPRN_SPRG_SCRATCH0, r10 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) mtspr SPRN_SPRG_SCRATCH1, r11 -#endif /* If we are faulting a kernel address, we have to use the * kernel page tables. @@ -278,11 +276,9 @@ InstructionTLBMiss: #ifdef ITLB_MISS_KERNEL mtcr r11 #endif -#ifdef CONFIG_SWAP - rlwinm r11, r10, 32-5, _PAGE_PRESENT + rlwinm r11, r10, 32-7, _PAGE_PRESENT and r11, r11, r10 rlwimi r10, r11, 0, _PAGE_PRESENT -#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 20 and 23 must be clear. * Software indicator bits 22, 24, 25, 26, and 27 must be @@ -296,9 +292,7 @@ InstructionTLBMiss: /* Restore registers */ 0: mfspr r10, SPRN_SPRG_SCRATCH0 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) mfspr r11, SPRN_SPRG_SCRATCH1 -#endif rfi patch_site 0b, patch__itlbmiss_exit_1 @@ -308,9 +302,7 @@ InstructionTLBMiss: addi r10, r10, 1 stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0) mfspr r10, SPRN_SPRG_SCRATCH0 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) mfspr r11, SPRN_SPRG_SCRATCH1 -#endif rfi #endif @@ -394,11 +386,9 @@ DataStoreTLBMiss: * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); * r10 = (r10 & ~PRESENT) | r11; */ -#ifdef CONFIG_SWAP - rlwinm r11, r10, 32-5, _PAGE_PRESENT + rlwinm r11, r10, 32-7, _PAGE_PRESENT and r11, r11, r10 rlwimi r10, r11, 0, _PAGE_PRESENT -#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 2d27ec4feee4..9b340af02c38 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -264,6 +264,9 @@ int kprobe_handler(struct pt_regs *regs) if (user_mode(regs)) return 0; + if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) + return 0; + /* * We don't want to be preempted for the entire * duration of kprobe processing diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c4ed328a7b96..7a1c11a7cba5 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -114,11 +114,12 @@ void machine_kexec(struct kimage *image) void __init reserve_crashkernel(void) { - unsigned long long crash_size, crash_base; + unsigned long long crash_size, crash_base, total_mem_sz; int ret; + total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); /* use common parsing */ - ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; @@ -177,6 +178,7 @@ void __init reserve_crashkernel(void) /* Crash kernel trumps memory limit */ if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; + total_mem_sz = memory_limit; printk("Adjusted memory limit for crashkernel, now 0x%llx\n", memory_limit); } @@ -185,7 +187,7 @@ void __init reserve_crashkernel(void) "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start >> 20), - (unsigned long)(memblock_phys_mem_size() >> 20)); + (unsigned long)(total_mem_sz >> 20)); if (!memblock_is_region_memory(crashk_res.start, crash_size) || memblock_reserve(crashk_res.start, crash_size)) { diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 949eceb254d8..c786adfb9413 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -86,7 +86,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, * This is very early in boot, so no harm done if the kernel crashes at * this point. */ - BUG_ON(shared_lppaca_size >= shared_lppaca_total_size); + BUG_ON(shared_lppaca_size > shared_lppaca_total_size); return ptr; } @@ -214,11 +214,15 @@ void setup_paca(struct paca_struct *new_paca) /* On Book3E, initialize the TLB miss exception frames */ mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); #else - /* In HV mode, we setup both HPACA and PACA to avoid problems + /* + * In HV mode, we setup both HPACA and PACA to avoid problems * if we do a GET_PACA() before the feature fixups have been - * applied + * applied. + * + * Normally you should test against CPU_FTR_HVMODE, but CPU features + * are not yet set up when we first reach here. */ - if (early_cpu_has_feature(CPU_FTR_HVMODE)) + if (mfmsr() & MSR_HV) mtspr(SPRN_SPRG_HPACA, local_paca); #endif mtspr(SPRN_SPRG_PACA, local_paca); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 1c448cf25506..a2c258a8d736 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1669,3 +1669,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); + + +static int __init discover_phbs(void) +{ + if (ppc_md.discover_phbs) + ppc_md.discover_phbs(); + + return 0; +} +core_initcall(discover_phbs); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 639ceae7da9d..c94bba9142e7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1218,29 +1218,31 @@ struct task_struct *__switch_to(struct task_struct *prev, static void show_instructions(struct pt_regs *regs) { int i; + unsigned long nip = regs->nip; unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); printk("Instruction dump:"); + /* + * If we were executing with the MMU off for instructions, adjust pc + * rather than printing XXXXXXXX. + */ + if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) { + pc = (unsigned long)phys_to_virt(pc); + nip = (unsigned long)phys_to_virt(regs->nip); + } + for (i = 0; i < NR_INSN_TO_PRINT; i++) { int instr; if (!(i % 8)) pr_cont("\n"); -#if !defined(CONFIG_BOOKE) - /* If executing with the IMMU off, adjust pc rather - * than print XXXXXXXX. - */ - if (!(regs->msr & MSR_IR)) - pc = (unsigned long)phys_to_virt(pc); -#endif - if (!__kernel_text_address(pc) || probe_kernel_address((const void *)pc, instr)) { pr_cont("XXXXXXXX "); } else { - if (regs->nip == pc) + if (nip == pc) pr_cont("<%08x> ", instr); else pr_cont("%08x ", instr); @@ -2079,7 +2081,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) * See if this is an exception frame. * We look for the "regshere" marker in the current frame. */ - if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) + if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS) && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 6620f37abe73..537142b877b8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -266,7 +266,7 @@ static struct feature_property { }; #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU) -static inline void identical_pvr_fixup(unsigned long node) +static __init void identical_pvr_fixup(unsigned long node) { unsigned int pvr; const char *model = of_get_flat_dt_prop(node, "model", NULL); @@ -685,6 +685,23 @@ static void __init tm_init(void) static void tm_init(void) { } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +#ifdef CONFIG_PPC64 +static void __init save_fscr_to_task(void) +{ + /* + * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we + * have configured via the device tree features or via __init_FSCR(). + * That value will then be propagated to pid 1 (init) and all future + * processes. + */ + if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) + init_task.thread.fscr = mfspr(SPRN_FSCR); +} +#else +static inline void save_fscr_to_task(void) {}; +#endif + + void __init early_init_devtree(void *params) { phys_addr_t limit; @@ -773,6 +790,8 @@ void __init early_init_devtree(void *params) BUG(); } + save_fscr_to_task(); + #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) /* We'll later wait for secondaries to check in; there are * NCPUS-1 non-boot CPUs :-) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index eba9d4ee4baf..1b65fb7c0bda 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1305,14 +1305,10 @@ static void __init prom_check_platform_support(void) if (prop_len > sizeof(vec)) prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", prop_len); - prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", - &vec, sizeof(vec)); - for (i = 0; i < sizeof(vec); i += 2) { - prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 - , vec[i] - , vec[i + 1]); - prom_parse_platform_support(vec[i], vec[i + 1], - &supported); + prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec)); + for (i = 0; i < prop_len; i += 2) { + prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]); + prom_parse_platform_support(vec[i], vec[i + 1], &supported); } } @@ -1761,6 +1757,9 @@ static void __init prom_rtas_os_term(char *str) if (token == 0) prom_panic("Could not get token for ibm,os-term\n"); os_term_args.token = cpu_to_be32(token); + os_term_args.nargs = cpu_to_be32(1); + os_term_args.nret = cpu_to_be32(1); + os_term_args.args[0] = cpu_to_be32(__pa(str)); prom_rtas_hcall((uint64_t)&os_term_args); } #endif /* CONFIG_PPC_SVM */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index c5fa251b8950..c1e2e351ebff 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -842,96 +842,6 @@ static void rtas_percpu_suspend_me(void *info) __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); } -enum rtas_cpu_state { - DOWN, - UP, -}; - -#ifndef CONFIG_SMP -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - if (!cpumask_empty(cpus)) { - cpumask_clear(cpus); - return -EINVAL; - } else - return 0; -} -#else -/* On return cpumask will be altered to indicate CPUs changed. - * CPUs with states changed will be set in the mask, - * CPUs with status unchanged will be unset in the mask. */ -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - int cpu; - int cpuret = 0; - int ret = 0; - - if (cpumask_empty(cpus)) - return 0; - - for_each_cpu(cpu, cpus) { - struct device *dev = get_cpu_device(cpu); - - switch (state) { - case DOWN: - cpuret = device_offline(dev); - break; - case UP: - cpuret = device_online(dev); - break; - } - if (cpuret < 0) { - pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", - __func__, - ((state == UP) ? "up" : "down"), - cpu, cpuret); - if (!ret) - ret = cpuret; - if (state == UP) { - /* clear bits for unchanged cpus, return */ - cpumask_shift_right(cpus, cpus, cpu); - cpumask_shift_left(cpus, cpus, cpu); - break; - } else { - /* clear bit for unchanged cpu, continue */ - cpumask_clear_cpu(cpu, cpus); - } - } - cond_resched(); - } - - return ret; -} -#endif - -int rtas_online_cpus_mask(cpumask_var_t cpus) -{ - int ret; - - ret = rtas_cpu_state_change_mask(UP, cpus); - - if (ret) { - cpumask_var_t tmp_mask; - - if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) - return ret; - - /* Use tmp_mask to preserve cpus mask from first failure */ - cpumask_copy(tmp_mask, cpus); - rtas_offline_cpus_mask(tmp_mask); - free_cpumask_var(tmp_mask); - } - - return ret; -} - -int rtas_offline_cpus_mask(cpumask_var_t cpus) -{ - return rtas_cpu_state_change_mask(DOWN, cpus); -} - int rtas_ibm_suspend_me(u64 handle) { long state; @@ -939,8 +849,6 @@ int rtas_ibm_suspend_me(u64 handle) unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); - cpumask_var_t offline_mask; - int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; @@ -961,9 +869,6 @@ int rtas_ibm_suspend_me(u64 handle) return -EIO; } - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) - return -ENOMEM; - atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); @@ -972,24 +877,8 @@ int rtas_ibm_suspend_me(u64 handle) lock_device_hotplug(); - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); - cpuret = rtas_online_cpus_mask(offline_mask); - if (cpuret) { - pr_err("%s: Could not bring present CPUs online.\n", __func__); - atomic_set(&data.error, cpuret); - goto out; - } - cpu_hotplug_disable(); - /* Check if we raced with a CPU-Offline Operation */ - if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) { - pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__); - atomic_set(&data.error, -EAGAIN); - goto out_hotplug_enable; - } - /* Call function on all CPUs. One of us will make the * rtas call */ @@ -1000,18 +889,11 @@ int rtas_ibm_suspend_me(u64 handle) if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); -out_hotplug_enable: + cpu_hotplug_enable(); - /* Take down CPUs not online prior to suspend */ - cpuret = rtas_offline_cpus_mask(offline_mask); - if (cpuret) - pr_warn("%s: Could not restore CPUs to offline state.\n", - __func__); - -out: unlock_device_hotplug(); - free_cpumask_var(offline_mask); + return atomic_read(&data.error); } #else /* CONFIG_PPC_PSERIES */ @@ -1058,6 +940,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, return NULL; } +#ifdef CONFIG_PPC_RTAS_FILTER + +/* + * The sys_rtas syscall, as originally designed, allows root to pass + * arbitrary physical addresses to RTAS calls. A number of RTAS calls + * can be abused to write to arbitrary memory and do other things that + * are potentially harmful to system integrity, and thus should only + * be used inside the kernel and not exposed to userspace. + * + * All known legitimate users of the sys_rtas syscall will only ever + * pass addresses that fall within the RMO buffer, and use a known + * subset of RTAS calls. + * + * Accordingly, we filter RTAS requests to check that the call is + * permitted, and that provided pointers fall within the RMO buffer. + * The rtas_filters list contains an entry for each permitted call, + * with the indexes of the parameters which are expected to contain + * addresses and sizes of buffers allocated inside the RMO buffer. + */ +struct rtas_filter { + const char *name; + int token; + /* Indexes into the args buffer, -1 if not used */ + int buf_idx1; + int size_idx1; + int buf_idx2; + int size_idx2; + + int fixed_size; +}; + +static struct rtas_filter rtas_filters[] __ro_after_init = { + { "ibm,activate-firmware", -1, -1, -1, -1, -1 }, + { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */ + { "display-character", -1, -1, -1, -1, -1 }, + { "ibm,display-message", -1, 0, -1, -1, -1 }, + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 }, + { "ibm,close-errinjct", -1, -1, -1, -1, -1 }, + { "ibm,open-errinjct", -1, -1, -1, -1, -1 }, + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 }, + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 }, + { "ibm,get-indices", -1, 2, 3, -1, -1 }, + { "get-power-level", -1, -1, -1, -1, -1 }, + { "get-sensor-state", -1, -1, -1, -1, -1 }, + { "ibm,get-system-parameter", -1, 1, 2, -1, -1 }, + { "get-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,get-vpd", -1, 0, -1, 1, 2 }, + { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, + { "ibm,platform-dump", -1, 4, 5, -1, -1 }, + { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, + { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, + { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, + { "ibm,set-eeh-option", -1, -1, -1, -1, -1 }, + { "set-indicator", -1, -1, -1, -1, -1 }, + { "set-power-level", -1, -1, -1, -1, -1 }, + { "set-time-for-power-on", -1, -1, -1, -1, -1 }, + { "ibm,set-system-parameter", -1, 1, -1, -1, -1 }, + { "set-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,suspend-me", -1, -1, -1, -1, -1 }, + { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 }, + { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 }, + { "ibm,physical-attestation", -1, 0, 1, -1, -1 }, +}; + +static bool in_rmo_buf(u32 base, u32 end) +{ + return base >= rtas_rmo_buf && + base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) && + base <= end && + end >= rtas_rmo_buf && + end < (rtas_rmo_buf + RTAS_RMOBUF_MAX); +} + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + struct rtas_filter *f = &rtas_filters[i]; + u32 base, size, end; + + if (token != f->token) + continue; + + if (f->buf_idx1 != -1) { + base = be32_to_cpu(args->args[f->buf_idx1]); + if (f->size_idx1 != -1) + size = be32_to_cpu(args->args[f->size_idx1]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + + end = base + size - 1; + if (!in_rmo_buf(base, end)) + goto err; + } + + if (f->buf_idx2 != -1) { + base = be32_to_cpu(args->args[f->buf_idx2]); + if (f->size_idx2 != -1) + size = be32_to_cpu(args->args[f->size_idx2]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + end = base + size - 1; + + /* + * Special case for ibm,configure-connector where the + * address can be 0 + */ + if (!strcmp(f->name, "ibm,configure-connector") && + base == 0) + return false; + + if (!in_rmo_buf(base, end)) + goto err; + } + + return false; + } + +err: + pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n"); + pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n", + token, nargs, current->comm); + return true; +} + +#else + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + return false; +} + +#endif /* CONFIG_PPC_RTAS_FILTER */ + /* We assume to be passed big endian arguments */ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { @@ -1095,6 +1118,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) args.rets = &args.args[nargs]; memset(args.rets, 0, nret * sizeof(rtas_arg_t)); + if (block_rtas_call(token, nargs, &args)) + return -EINVAL; + /* Need to handle ibm,suspend_me call specially */ if (token == ibm_suspend_me_token) { @@ -1156,6 +1182,9 @@ void __init rtas_initialize(void) unsigned long rtas_region = RTAS_INSTANTIATE_MAX; u32 base, size, entry; int no_base, no_size, no_entry; +#ifdef CONFIG_PPC_RTAS_FILTER + int i; +#endif /* Get RTAS dev node and fill up our "rtas" structure with infos * about it. @@ -1195,6 +1224,12 @@ void __init rtas_initialize(void) #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); #endif + +#ifdef CONFIG_PPC_RTAS_FILTER + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + rtas_filters[i].token = rtas_token(rtas_filters[i].name); + } +#endif } int __init early_init_dt_scan_rtas(unsigned long node, diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 25aaa3903000..f281d011f4b9 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -903,8 +903,6 @@ void __init setup_arch(char **cmdline_p) /* On BookE, setup per-core TLB data structures. */ setup_tlb_core_data(); - - smp_release_cpus(); #endif /* Print various info about the machine that has been gathered so far. */ @@ -925,6 +923,8 @@ void __init setup_arch(char **cmdline_p) exc_lvl_early_init(); emergency_stack_init(); + smp_release_cpus(); + initmem_init(); early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 44b4c432a273..5bc7e753df4d 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -290,18 +290,36 @@ void __init early_setup(unsigned long dt_ptr) /* -------- printk is _NOT_ safe to use here ! ------- */ - /* Try new device tree based feature discovery ... */ - if (!dt_cpu_ftrs_init(__va(dt_ptr))) - /* Otherwise use the old style CPU table */ - identify_cpu(0, mfspr(SPRN_PVR)); - - /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ + /* + * Assume we're on cpu 0 for now. + * + * We need to load a PACA very early for a few reasons. + * + * The stack protector canary is stored in the paca, so as soon as we + * call any stack protected code we need r13 pointing somewhere valid. + * + * If we are using kcov it will call in_task() in its instrumentation, + * which relies on the current task from the PACA. + * + * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as + * printk(), which can trigger both stack protector and kcov. + * + * percpu variables and spin locks also use the paca. + * + * So set up a temporary paca. It will be replaced below once we know + * what CPU we are on. + */ initialise_paca(&boot_paca, 0); setup_paca(&boot_paca); fixup_boot_paca(); /* -------- printk is now safe to use ------- */ + /* Try new device tree based feature discovery ... */ + if (!dt_cpu_ftrs_init(__va(dt_ptr))) + /* Otherwise use the old style CPU table */ + identify_cpu(0, mfspr(SPRN_PVR)); + /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); @@ -523,6 +541,8 @@ static bool __init parse_cache_info(struct device_node *np, lsizep = of_get_property(np, propnames[3], NULL); if (bsizep == NULL) bsizep = lsizep; + if (lsizep == NULL) + lsizep = bsizep; if (lsizep != NULL) lsize = be32_to_cpu(*lsizep); if (bsizep != NULL) @@ -839,7 +859,13 @@ early_initcall(disable_hardlockup_detector); static enum l1d_flush_type enabled_flush_types; static void *l1d_flush_fallback_area; static bool no_rfi_flush; +static bool no_entry_flush; +static bool no_uaccess_flush; bool rfi_flush; +bool entry_flush; +bool uaccess_flush; +DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); +EXPORT_SYMBOL(uaccess_flush_key); static int __init handle_no_rfi_flush(char *p) { @@ -849,6 +875,22 @@ static int __init handle_no_rfi_flush(char *p) } early_param("no_rfi_flush", handle_no_rfi_flush); +static int __init handle_no_entry_flush(char *p) +{ + pr_info("entry-flush: disabled on command line."); + no_entry_flush = true; + return 0; +} +early_param("no_entry_flush", handle_no_entry_flush); + +static int __init handle_no_uaccess_flush(char *p) +{ + pr_info("uaccess-flush: disabled on command line."); + no_uaccess_flush = true; + return 0; +} +early_param("no_uaccess_flush", handle_no_uaccess_flush); + /* * The RFI flush is not KPTI, but because users will see doco that says to use * nopti we hijack that option here to also disable the RFI flush. @@ -880,6 +922,32 @@ void rfi_flush_enable(bool enable) rfi_flush = enable; } +void entry_flush_enable(bool enable) +{ + if (enable) { + do_entry_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); + } else { + do_entry_flush_fixups(L1D_FLUSH_NONE); + } + + entry_flush = enable; +} + +void uaccess_flush_enable(bool enable) +{ + if (enable) { + do_uaccess_flush_fixups(enabled_flush_types); + static_branch_enable(&uaccess_flush_key); + on_each_cpu(do_nothing, NULL, 1); + } else { + static_branch_disable(&uaccess_flush_key); + do_uaccess_flush_fixups(L1D_FLUSH_NONE); + } + + uaccess_flush = enable; +} + static void __ref init_fallback_flush(void) { u64 l1d_size, limit; @@ -938,10 +1006,28 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable) enabled_flush_types = types; - if (!no_rfi_flush && !cpu_mitigations_off()) + if (!cpu_mitigations_off() && !no_rfi_flush) rfi_flush_enable(enable); } +void setup_entry_flush(bool enable) +{ + if (cpu_mitigations_off()) + return; + + if (!no_entry_flush) + entry_flush_enable(enable); +} + +void setup_uaccess_flush(bool enable) +{ + if (cpu_mitigations_off()) + return; + + if (!no_uaccess_flush) + uaccess_flush_enable(enable); +} + #ifdef CONFIG_DEBUG_FS static int rfi_flush_set(void *data, u64 val) { @@ -969,9 +1055,63 @@ static int rfi_flush_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); +static int entry_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != entry_flush) + entry_flush_enable(enable); + + return 0; +} + +static int entry_flush_get(void *data, u64 *val) +{ + *val = entry_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); + +static int uaccess_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != uaccess_flush) + uaccess_flush_enable(enable); + + return 0; +} + +static int uaccess_flush_get(void *data, u64 *val) +{ + *val = uaccess_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); + static __init int rfi_flush_debugfs_init(void) { debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); + debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); + debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); return 0; } device_initcall(rfi_flush_debugfs_init); diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 84ed2e77ef9c..adfde59cf4ba 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -473,8 +473,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, err |= __get_user(tsk->thread.ckpt_regs.ccr, &sc->gp_regs[PT_CCR]); + /* Don't allow userspace to set the trap value */ + regs->trap = 0; + /* These regs are not checkpointed; they can go in 'regs'. */ - err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 80a676da11cb..f08ca604a394 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -31,29 +31,27 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); -/* - * SMT snooze delay stuff, 64-bit only for now - */ - #ifdef CONFIG_PPC64 -/* Time in microseconds we delay before sleeping in the idle loop */ -static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; +/* + * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle: + * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in + * 2014: + * + * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean + * up the kernel code." + * + * powerpc-utils stopped using it as of 1.3.8. At some point in the future this + * code should be removed. + */ static ssize_t store_smt_snooze_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct cpu *cpu = container_of(dev, struct cpu, dev); - ssize_t ret; - long snooze; - - ret = sscanf(buf, "%ld", &snooze); - if (ret != 1) - return -EINVAL; - - per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; + pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n", + current->comm, current->pid); return count; } @@ -61,9 +59,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev, struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); + pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n", + current->comm, current->pid); + return sprintf(buf, "100\n"); } static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, @@ -71,16 +69,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, static int __init setup_smt_snooze_delay(char *str) { - unsigned int cpu; - long snooze; - if (!cpu_has_feature(CPU_FTR_SMT)) return 1; - snooze = simple_strtol(str, NULL, 10); - for_each_possible_cpu(cpu) - per_cpu(smt_snooze_delay, cpu) = snooze; - + pr_warn("smt-snooze-delay command line option has no effect\n"); return 1; } __setup("smt-snooze-delay=", setup_smt_snooze_delay); diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index e2ab8a111b69..0b4694b8d248 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -13,13 +13,14 @@ */ #include -#include #include #include #include #include #include #include +#include +#include #include #include @@ -39,9 +40,7 @@ static struct tau_temp unsigned char grew; } tau[NR_CPUS]; -struct timer_list tau_timer; - -#undef DEBUG +static bool tau_int_enable; /* TODO: put these in a /proc interface, with some sanity checks, and maybe * dynamic adjustment to minimize # of interrupts */ @@ -50,72 +49,49 @@ struct timer_list tau_timer; #define step_size 2 /* step size when temp goes out of range */ #define window_expand 1 /* expand the window by this much */ /* configurable values for shrinking the window */ -#define shrink_timer 2*HZ /* period between shrinking the window */ +#define shrink_timer 2000 /* period between shrinking the window */ #define min_window 2 /* minimum window size, degrees C */ static void set_thresholds(unsigned long cpu) { -#ifdef CONFIG_TAU_INT - /* - * setup THRM1, - * threshold, valid bit, enable interrupts, interrupt when below threshold - */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); + u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0; - /* setup THRM2, - * threshold, valid bit, enable interrupts, interrupt when above threshold - */ - mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); -#else - /* same thing but don't enable interrupts */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); - mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); -#endif + /* setup THRM1, threshold, valid bit, interrupt when below threshold */ + mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID); + + /* setup THRM2, threshold, valid bit, interrupt when above threshold */ + mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie); } static void TAUupdate(int cpu) { - unsigned thrm; - -#ifdef DEBUG - printk("TAUupdate "); -#endif + u32 thrm; + u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V; /* if both thresholds are crossed, the step_sizes cancel out * and the window winds up getting expanded twice. */ - if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed low threshold */ - if (tau[cpu].low >= step_size){ - tau[cpu].low -= step_size; - tau[cpu].high -= (step_size - window_expand); - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("low threshold crossed "); -#endif + thrm = mfspr(SPRN_THRM1); + if ((thrm & bits) == bits) { + mtspr(SPRN_THRM1, 0); + + if (tau[cpu].low >= step_size) { + tau[cpu].low -= step_size; + tau[cpu].high -= (step_size - window_expand); } + tau[cpu].grew = 1; + pr_debug("%s: low threshold crossed\n", __func__); } - if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed high threshold */ - if (tau[cpu].high <= 127-step_size){ - tau[cpu].low += (step_size - window_expand); - tau[cpu].high += step_size; - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("high threshold crossed "); -#endif + thrm = mfspr(SPRN_THRM2); + if ((thrm & bits) == bits) { + mtspr(SPRN_THRM2, 0); + + if (tau[cpu].high <= 127 - step_size) { + tau[cpu].low += (step_size - window_expand); + tau[cpu].high += step_size; } + tau[cpu].grew = 1; + pr_debug("%s: high threshold crossed\n", __func__); } - -#ifdef DEBUG - printk("grew = %d\n", tau[cpu].grew); -#endif - -#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ - set_thresholds(cpu); -#endif - } #ifdef CONFIG_TAU_INT @@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs) static void tau_timeout(void * info) { int cpu; - unsigned long flags; int size; int shrink; - /* disabling interrupts *should* be okay */ - local_irq_save(flags); cpu = smp_processor_id(); -#ifndef CONFIG_TAU_INT - TAUupdate(cpu); -#endif + if (!tau_int_enable) + TAUupdate(cpu); + + /* Stop thermal sensor comparisons and interrupts */ + mtspr(SPRN_THRM3, 0); size = tau[cpu].high - tau[cpu].low; if (size > min_window && ! tau[cpu].grew) { @@ -173,32 +148,26 @@ static void tau_timeout(void * info) set_thresholds(cpu); - /* - * Do the enable every time, since otherwise a bunch of (relatively) - * complex sleep code needs to be added. One mtspr every time - * tau_timeout is called is probably not a big deal. - * - * Enable thermal sensor and set up sample interval timer - * need 20 us to do the compare.. until a nice 'cpu_speed' function - * call is implemented, just assume a 500 mhz clock. It doesn't really - * matter if we take too long for a compare since it's all interrupt - * driven anyway. - * - * use a extra long time.. (60 us @ 500 mhz) + /* Restart thermal sensor comparisons and interrupts. + * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet" + * recommends that "the maximum value be set in THRM3 under all + * conditions." */ - mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); - - local_irq_restore(flags); + mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E); } -static void tau_timeout_smp(struct timer_list *unused) +static struct workqueue_struct *tau_workq; + +static void tau_work_func(struct work_struct *work) { - - /* schedule ourselves to be run again */ - mod_timer(&tau_timer, jiffies + shrink_timer) ; + msleep(shrink_timer); on_each_cpu(tau_timeout, NULL, 0); + /* schedule ourselves to be run again */ + queue_work(tau_workq, work); } +DECLARE_WORK(tau_work, tau_work_func); + /* * setup the TAU * @@ -231,21 +200,19 @@ static int __init TAU_init(void) return 1; } + tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) && + !strcmp(cur_cpu_spec->platform, "ppc750"); - /* first, set up the window shrinking timer */ - timer_setup(&tau_timer, tau_timeout_smp, 0); - tau_timer.expires = jiffies + shrink_timer; - add_timer(&tau_timer); + tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0); + if (!tau_workq) + return -ENOMEM; on_each_cpu(TAU_init_smp, NULL, 0); - printk("Thermal assist unit "); -#ifdef CONFIG_TAU_INT - printk("using interrupts, "); -#else - printk("using timers, "); -#endif - printk("shrink_timer: %d jiffies\n", shrink_timer); + queue_work(tau_workq, &tau_work); + + pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n", + tau_int_enable ? "interrupts" : "workqueue", shrink_timer); tau_initialized = 1; return 0; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 11301a1187f3..0e0a2227af7d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -522,35 +522,6 @@ static inline void clear_irq_work_pending(void) "i" (offsetof(struct paca_struct, irq_work_pending))); } -void arch_irq_work_raise(void) -{ - preempt_disable(); - set_irq_work_pending_flag(); - /* - * Non-nmi code running with interrupts disabled will replay - * irq_happened before it re-enables interrupts, so setthe - * decrementer there instead of causing a hardware exception - * which would immediately hit the masked interrupt handler - * and have the net effect of setting the decrementer in - * irq_happened. - * - * NMI interrupts can not check this when they return, so the - * decrementer hardware exception is raised, which will fire - * when interrupts are next enabled. - * - * BookE does not support this yet, it must audit all NMI - * interrupt handlers to ensure they call nmi_enter() so this - * check would be correct. - */ - if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) { - set_dec(1); - } else { - hard_irq_disable(); - local_paca->irq_happened |= PACA_IRQ_DEC; - } - preempt_enable(); -} - #else /* 32-bit */ DEFINE_PER_CPU(u8, irq_work_pending); @@ -559,16 +530,27 @@ DEFINE_PER_CPU(u8, irq_work_pending); #define test_irq_work_pending() __this_cpu_read(irq_work_pending) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) +#endif /* 32 vs 64 bit */ + void arch_irq_work_raise(void) { + /* + * 64-bit code that uses irq soft-mask can just cause an immediate + * interrupt here that gets soft masked, if this is called under + * local_irq_disable(). It might be possible to prevent that happening + * by noticing interrupts are disabled and setting decrementer pending + * to be replayed when irqs are enabled. The problem there is that + * tracing can call irq_work_raise, including in code that does low + * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on) + * which could get tangled up if we're messing with the same state + * here. + */ preempt_disable(); set_irq_work_pending_flag(); set_dec(1); preempt_enable(); } -#endif /* 32 vs 64 bit */ - #else /* CONFIG_IRQ_WORK */ #define test_irq_work_pending() 0 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 014ff0701f24..ecfa460f66d1 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -510,11 +510,14 @@ out: #ifdef CONFIG_PPC_BOOK3S_64 BUG_ON(get_paca()->in_nmi == 0); if (get_paca()->in_nmi > 1) - nmi_panic(regs, "Unrecoverable nested System Reset"); + die("Unrecoverable nested System Reset", regs, SIGABRT); #endif /* Must die if the interrupt is not recoverable */ - if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable System Reset"); + if (!(regs->msr & MSR_RI)) { + /* For the reason explained in die_mce, nmi_exit before die */ + nmi_exit(); + die("Unrecoverable System Reset", regs, SIGABRT); + } if (saved_hsrrs) { mtspr(SPRN_HSRR0, hsrr0); @@ -858,7 +861,7 @@ void machine_check_exception(struct pt_regs *regs) /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable Machine check"); + die("Unrecoverable Machine check", regs, SIGBUS); return; @@ -877,7 +880,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs) { unsigned int ra, rb, t, i, sel, instr, rc; const void __user *addr; - u8 vbuf[16], *vdst; + u8 vbuf[16] __aligned(16), *vdst; unsigned long ea, msr, msr_mask; bool swap; diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index eae9ddaecbcf..efb1ba40274a 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -682,7 +682,7 @@ int vdso_getcpu_init(void) node = cpu_to_node(cpu); WARN_ON_ONCE(node > 0xffff); - val = (cpu & 0xfff) | ((node & 0xffff) << 16); + val = (cpu & 0xffff) | ((node & 0xffff) << 16); mtspr(SPRN_SPRG_VDSO_WRITE, val); get_paca()->sprg_vdso = val; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 4638d2863388..3ea360cad337 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -98,10 +98,11 @@ SECTIONS ALIGN_FUNCTION(); #endif /* careful! __ftr_alt_* sections need to be close to .text */ - *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text); + *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text); #ifdef CONFIG_PPC64 *(.tramp.ftrace.text); #endif + NOINSTR_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT @@ -143,6 +144,20 @@ SECTIONS __stop___stf_entry_barrier_fixup = .; } + . = ALIGN(8); + __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) { + __start___uaccess_flush_fixup = .; + *(__uaccess_flush_fixup) + __stop___uaccess_flush_fixup = .; + } + + . = ALIGN(8); + __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { + __start___entry_flush_fixup = .; + *(__entry_flush_fixup) + __stop___entry_flush_fixup = .; + } + . = ALIGN(8); __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { __start___stf_exit_barrier_fixup = .; @@ -196,6 +211,12 @@ SECTIONS .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT + + /* + *.init.text might be RO so we must ensure this section ends on + * a page boundary. + */ + . = ALIGN(PAGE_SIZE); _einittext = .; #ifdef CONFIG_PPC64 *(.tramp.ftrace.init); @@ -209,21 +230,9 @@ SECTIONS EXIT_TEXT } - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { - INIT_DATA - } + . = ALIGN(PAGE_SIZE); - .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { - INIT_SETUP(16) - } - - .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { - INIT_CALLS - } - - .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { - CON_INITCALL - } + INIT_DATA_SECTION(16) . = ALIGN(8); __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { @@ -251,9 +260,6 @@ SECTIONS __stop___fw_ftr_fixup = .; } #endif - .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { - INIT_RAM_FS - } PERCPU_SECTION(L1_CACHE_BYTES) @@ -326,12 +332,6 @@ SECTIONS *(.branch_lt) } -#ifdef CONFIG_DEBUG_INFO_BTF - .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { - *(.BTF) - } -#endif - .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; KEEP(*(.opd)) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index ec2547cc5ecb..1ff971f3b06f 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -867,7 +867,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); } diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 2d415c36a61d..9d7344835469 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -38,7 +38,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ if (kvmhv_on_pseries()) return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, - __pa(to), __pa(from), n); + (to != NULL) ? __pa(to): 0, + (from != NULL) ? __pa(from): 0, n); quadrant = 1; if (!pid) @@ -353,7 +354,13 @@ static struct kmem_cache *kvm_pmd_cache; static pte_t *kvmppc_pte_alloc(void) { - return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); + pte_t *pte; + + pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); + /* pmd_populate() will only reference _pa(pte). */ + kmemleak_ignore(pte); + + return pte; } static void kvmppc_pte_free(pte_t *ptep) @@ -363,7 +370,13 @@ static void kvmppc_pte_free(pte_t *ptep) static pmd_t *kvmppc_pmd_alloc(void) { - return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); + pmd_t *pmd; + + pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); + /* pud_populate() will only reference _pa(pmd). */ + kmemleak_ignore(pmd); + + return pmd; } static void kvmppc_pmd_free(pmd_t *pmdp) @@ -1091,6 +1104,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, kvm->arch.lpid); gpa += PAGE_SIZE; } + /* + * Increase the mmu notifier sequence number to prevent any page + * fault that read the memslot earlier from writing a PTE. + */ + kvm->mmu_notifier_seq++; spin_unlock(&kvm->mmu_lock); } diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 5834db0a54c6..03b947429e4d 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -74,6 +74,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, struct kvmppc_spapr_tce_iommu_table *stit, *tmp; struct iommu_table_group *table_group = NULL; + rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { table_group = iommu_group_get_iommudata(grp); @@ -88,7 +89,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, kref_put(&stit->kref, kvm_spapr_tce_liobn_put); } } + cond_resched_rcu(); } + rcu_read_unlock(); } extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, @@ -106,12 +109,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!f.file) return -EBADF; + rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { if (stt == f.file->private_data) { found = true; break; } } + rcu_read_unlock(); fdput(f); @@ -144,6 +149,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!tbl) return -EINVAL; + rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { if (tbl != stit->tbl) continue; @@ -151,14 +157,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!kref_get_unless_zero(&stit->kref)) { /* stit is being destroyed */ iommu_tce_table_put(tbl); + rcu_read_unlock(); return -ENOTTY; } /* * The table is already known to this KVM, we just increased * its KVM reference counter and can return. */ + rcu_read_unlock(); return 0; } + rcu_read_unlock(); stit = kzalloc(sizeof(*stit), GFP_KERNEL); if (!stit) { @@ -364,18 +373,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) return H_TOO_HARD; + rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { unsigned long hpa = 0; struct mm_iommu_table_group_mem_t *mem; long shift = stit->tbl->it_page_shift; mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); - if (!mem) - return H_TOO_HARD; - - if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) + if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { + rcu_read_unlock(); return H_TOO_HARD; + } } + rcu_read_unlock(); return H_SUCCESS; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 36abbe3c346d..6938b793a015 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3623,6 +3623,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && kvmppc_get_gpr(vcpu, 3) == H_CEDE) { kvmppc_nested_cede(vcpu); + kvmppc_set_gpr(vcpu, 3, 0); trap = 0; } } else { @@ -3637,7 +3638,10 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.dec_expires = dec + tb; vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; + /* Save guest CTRL register, set runlatch to 1 */ vcpu->arch.ctrl = mfspr(SPRN_CTRLF); + if (!(vcpu->arch.ctrl & 1)) + mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1); vcpu->arch.iamr = mfspr(SPRN_IAMR); vcpu->arch.pspb = mfspr(SPRN_PSPB); @@ -5190,6 +5194,12 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp, case KVM_PPC_ALLOCATE_HTAB: { u32 htab_order; + /* If we're a nested hypervisor, we currently only support radix */ + if (kvmhv_on_pseries()) { + r = -EOPNOTSUPP; + break; + } + r = -EFAULT; if (get_user(htab_order, (u32 __user *)argp)) break; diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 0db937497169..cc90b8b82329 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -3,6 +3,8 @@ * Copyright 2017 Paul Mackerras, IBM Corp. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include @@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) u64 newmsr, bescr; int ra, rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For treclaim., tsr., and trechkpt. instructions if bit + * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section + * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit + * 31 is an acceptable way to handle these invalid forms that have + * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ + * bit 31 set) can generate a softpatch interrupt. Hence both forms + * are handled below for these instructions so they behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return RESUME_GUEST; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ @@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = msr; return RESUME_GUEST; - case PPC_INST_TRECLAIM: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { /* generate an illegal instruction interrupt */ @@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST; - case PPC_INST_TRECHKPT: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): /* XXX do we need to check for PR=0 here? */ /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { @@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) } /* What should we do here? We didn't recognize the instruction */ - WARN_ON_ONCE(1); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); + return RESUME_GUEST; } diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index 217246279dfa..fad931f224ef 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) u64 newmsr, msr, bescr; int rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For the tsr. instruction if bit 31 = 0 then it is per + * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid + * Forms, informs specifically that ignoring bit 31 is an acceptable way + * to handle TM-related invalid forms that have bit 31 = 0. Moreover, + * for emulation purposes both forms (w/ and wo/ bit 31 set) can + * generate a softpatch interrupt. Hence both forms are handled below + * for tsr. to make them behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return 1; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* we know the MSR has the TS field = S (0b01) here */ msr = vcpu->arch.shregs.msr; /* check for PR=1 and arch 2.06 bit set in PCR */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 235d57d6c205..d78d8487c1d6 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -252,6 +252,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf) } state = &sb->irq_state[src]; + + /* Some sanity checking */ + if (!state->valid) { + pr_devel("%s: source %lx invalid !\n", __func__, irq); + return VM_FAULT_SIGBUS; + } + kvmppc_xive_select_irq(state, &hw_num, &xd); arch_spin_lock(&sb->lock); diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 321db0fdb9db..7154bd424d24 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3a77bb643452..e03c06471678 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1513,7 +1513,7 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1531,7 +1531,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1549,7 +1549,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1567,7 +1567,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 4ba634b89ce5..e8b25f74454d 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -228,6 +228,110 @@ void do_stf_barrier_fixups(enum stf_barrier_type types) do_stf_exit_barrier_fixups(types); } +void do_uaccess_flush_fixups(enum l1d_flush_type types) +{ + unsigned int instrs[4], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___uaccess_flush_fixup); + end = PTRRELOC(&__stop___uaccess_flush_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + instrs[3] = 0x4e800020; /* blr */ + + i = 0; + if (types == L1D_FLUSH_FALLBACK) { + instrs[3] = 0x60000000; /* nop */ + /* fallthrough to fallback flush */ + } + + if (types & L1D_FLUSH_ORI) { + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ + } + + if (types & L1D_FLUSH_MTTRIG) + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + + patch_instruction((dest + 1), instrs[1]); + patch_instruction((dest + 2), instrs[2]); + patch_instruction((dest + 3), instrs[3]); + } + + printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, + (types == L1D_FLUSH_NONE) ? "no" : + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) + ? "ori+mttrig type" + : "ori type" : + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); +} + +void do_entry_flush_fixups(enum l1d_flush_type types) +{ + unsigned int instrs[3], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___entry_flush_fixup); + end = PTRRELOC(&__stop___entry_flush_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + + i = 0; + if (types == L1D_FLUSH_FALLBACK) { + instrs[i++] = 0x7d4802a6; /* mflr r10 */ + instrs[i++] = 0x60000000; /* branch patched below */ + instrs[i++] = 0x7d4803a6; /* mtlr r10 */ + } + + if (types & L1D_FLUSH_ORI) { + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ + } + + if (types & L1D_FLUSH_MTTRIG) + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + + if (types == L1D_FLUSH_FALLBACK) + patch_branch((dest + 1), (unsigned long)&entry_flush_fallback, + BRANCH_SET_LINK); + else + patch_instruction((dest + 1), instrs[1]); + + patch_instruction((dest + 2), instrs[2]); + } + + printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, + (types == L1D_FLUSH_NONE) ? "no" : + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) + ? "ori+mttrig type" + : "ori type" : + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); +} + void do_rfi_flush_fixups(enum l1d_flush_type types) { unsigned int instrs[3], *dest; diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 84d5fab94f8f..1424a120710e 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -187,6 +187,7 @@ void mmu_mark_initmem_nx(void) int i; unsigned long base = (unsigned long)_stext - PAGE_OFFSET; unsigned long top = (unsigned long)_etext - PAGE_OFFSET; + unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long size; if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) @@ -201,9 +202,10 @@ void mmu_mark_initmem_nx(void) size = block_size(base, top); size = max(size, 128UL << 10); if ((top - base) > size) { - if (strict_kernel_rwx_enabled()) - pr_warn("Kernel _etext not properly aligned\n"); size <<= 1; + if (strict_kernel_rwx_enabled() && base + size > border) + pr_warn("Some RW data is getting mapped X. " + "Adjust CONFIG_DATA_SHIFT to avoid that.\n"); } setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); base += size; diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 56cc84520577..ef164851738b 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, goto free_exit; } - pageshift = PAGE_SHIFT; - for (i = 0; i < entries; ++i) { - struct page *page = mem->hpages[i]; - - /* - * Allow to use larger than 64k IOMMU pages. Only do that - * if we are backed by hugetlb. - */ - if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) - pageshift = page_shift(compound_head(page)); - mem->pageshift = min(mem->pageshift, pageshift); - /* - * We don't need struct page reference any more, switch - * to physical address. - */ - mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; - } - good_exit: atomic64_set(&mem->mapped, 1); mem->used = 1; @@ -158,6 +140,27 @@ good_exit: } } + if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { + /* + * Allow to use larger than 64k IOMMU pages. Only do that + * if we are backed by hugetlb. Skip device memory as it is not + * backed with page structs. + */ + pageshift = PAGE_SHIFT; + for (i = 0; i < entries; ++i) { + struct page *page = mem->hpages[i]; + + if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) + pageshift = page_shift(compound_head(page)); + mem->pageshift = min(mem->pageshift, pageshift); + /* + * We don't need struct page reference any more, switch + * to physical address. + */ + mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; + } + } + list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); mutex_unlock(&mem_list_mutex); diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index ae7fca40e5b3..432fd9fa8c3f 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -83,13 +83,17 @@ static int pkey_initialize(void) scan_pkey_feature(); /* - * Let's assume 32 pkeys on P8 bare metal, if its not defined by device - * tree. We make this exception since skiboot forgot to expose this - * property on power8. + * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device + * tree. We make this exception since some version of skiboot forgot to + * expose this property on power8/9. */ - if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) && - cpu_has_feature(CPU_FTRS_POWER8)) - pkeys_total = 32; + if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) { + unsigned long pvr = mfspr(SPRN_PVR); + + if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) + pkeys_total = 32; + } /* * Adjust the upper limit, based on the number of bits supported by @@ -367,12 +371,14 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute) return true; pkey_shift = pkeyshift(pkey); - if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift))) - return true; + if (execute) + return !(read_iamr() & (IAMR_EX_BIT << pkey_shift)); - amr = read_amr(); /* Delay reading amr until absolutely needed */ - return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) || - (write && !(amr & (AMR_WR_BIT << pkey_shift)))); + amr = read_amr(); + if (write) + return !(amr & (AMR_WR_BIT << pkey_shift)); + + return !(amr & (AMR_RD_BIT << pkey_shift)); } bool arch_pte_access_permitted(u64 pte, bool write, bool execute) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 6ee17d09649c..bdcb07a98cd3 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -97,7 +97,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); - smp_wmb(); + asm volatile("ptesync": : :"memory"); return 0; } @@ -155,7 +155,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa, set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); - smp_wmb(); + asm volatile("ptesync": : :"memory"); return 0; } @@ -643,21 +643,6 @@ void radix__mmu_cleanup_all(void) } } -void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - /* - * We don't currently support the first MEMBLOCK not mapping 0 - * physical on those processors - */ - BUG_ON(first_memblock_base != 0); - - /* - * Radix mode is not limited by RMA / VRMA addressing. - */ - ppc64_rma_size = ULONG_MAX; -} - #ifdef CONFIG_MEMORY_HOTPLUG static void free_pte_table(pte_t *pte_start, pmd_t *pmd) { diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 67af871190c6..b0f240afffa2 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -639,19 +639,29 @@ static void do_exit_flush_lazy_tlb(void *arg) struct mm_struct *mm = arg; unsigned long pid = mm->context.id; + /* + * A kthread could have done a mmget_not_zero() after the flushing CPU + * checked mm_is_singlethreaded, and be in the process of + * kthread_use_mm when interrupted here. In that case, current->mm will + * be set to mm, because kthread_use_mm() setting ->mm and switching to + * the mm is done with interrupts off. + */ if (current->mm == mm) - return; /* Local CPU */ + goto out_flush; if (current->active_mm == mm) { - /* - * Must be a kernel thread because sender is single-threaded. - */ - BUG_ON(current->mm); + WARN_ON_ONCE(current->mm != NULL); + /* Is a kernel thread and is using mm as the lazy tlb */ mmgrab(&init_mm); - switch_mm(mm, &init_mm, current); current->active_mm = &init_mm; + switch_mm_irqs_off(mm, &init_mm, current); mmdrop(mm); } + + atomic_dec(&mm->context.active_cpus); + cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm)); + +out_flush: _tlbiel_pid(pid, RIC_FLUSH_ALL); } @@ -666,7 +676,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm) */ smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb, (void *)mm, 1); - mm_reset_thread_local(mm); } void radix__flush_tlb_mm(struct mm_struct *mm) diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index 59327cefbc6a..873fcfc7b875 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c @@ -362,10 +362,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop) if (!drmem_info->lmbs) return; - for_each_drmem_lmb(lmb) { + for_each_drmem_lmb(lmb) read_drconf_v1_cell(lmb, &prop); - lmb_set_nid(lmb); - } } static void __init init_drmem_v2_lmbs(const __be32 *prop) @@ -410,8 +408,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop) lmb->aa_index = dr_cell.aa_index; lmb->flags = dr_cell.flags; - - lmb_set_nid(lmb); } } } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 881a026a603a..bb01a862aaf8 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -241,6 +241,9 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, return false; } +// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE +#define SIGFRAME_MAX_SIZE (4096 + 128) + static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma, unsigned int flags, bool *must_retry) @@ -248,7 +251,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, /* * N.B. The POWER/Open ABI allows programs to access up to * 288 bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB + * The kernel signal delivery code writes a bit over 4KB * below the stack pointer (r1) before decrementing it. * The exec code can write slightly over 640kB to the stack * before setting the user r1. Thus we allow the stack to @@ -273,7 +276,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 >= uregs->gpr[1]) + if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1]) return false; if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && @@ -346,7 +349,6 @@ static inline void cmo_account_page_fault(void) static inline void cmo_account_page_fault(void) { } #endif /* CONFIG_PPC_SMLPAR */ -#ifdef CONFIG_PPC_BOOK3S static void sanity_check_fault(bool is_write, bool is_user, unsigned long error_code, unsigned long address) { @@ -363,6 +365,9 @@ static void sanity_check_fault(bool is_write, bool is_user, return; } + if (!IS_ENABLED(CONFIG_PPC_BOOK3S)) + return; + /* * For hash translation mode, we should never get a * PROTFAULT. Any update to pte to reduce access will result in us @@ -397,10 +402,6 @@ static void sanity_check_fault(bool is_write, bool is_user, WARN_ON_ONCE(error_code & DSISR_PROTFAULT); } -#else -static void sanity_check_fault(bool is_write, bool is_user, - unsigned long error_code, unsigned long address) { } -#endif /* CONFIG_PPC_BOOK3S */ /* * Define the correct "is_write" bit in error_code based diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 4e08246acd79..210f1c28b8e4 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -415,9 +415,16 @@ void __init mmu_early_init_devtree(void) if (!(mfmsr() & MSR_HV)) early_check_vec5(); - if (early_radix_enabled()) + if (early_radix_enabled()) { radix__early_init_devtree(); - else + /* + * We have finalized the translation we are going to use by now. + * Radix mode is not limited by RMA / VRMA addressing. + * Hence don't limit memblock allocations. + */ + ppc64_rma_size = ULONG_MAX; + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); + } else hash__early_init_devtree(); } #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 0e6ed4413eea..1cfe57b51d7e 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -117,7 +117,7 @@ static void __init kasan_remap_early_shadow_ro(void) kasan_populate_pte(kasan_early_shadow_pte, prot); - for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { + for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 96ca90ce0264..c48705c726ac 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -530,7 +530,7 @@ void __flush_dcache_icache(void *p) * space occurs, before returning to user space. */ - if (cpu_has_feature(MMU_FTR_TYPE_44x)) + if (mmu_has_feature(MMU_FTR_TYPE_44x)) return; invalidate_icache_range(addr, addr + PAGE_SIZE); diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index 2ca407cedbe7..eaeee402f96e 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -397,7 +397,7 @@ _GLOBAL(set_context) * extern void loadcam_entry(unsigned int index) * * Load TLBCAM[index] entry in to the L2 CAM MMU - * Must preserve r7, r8, r9, and r10 + * Must preserve r7, r8, r9, r10 and r11 */ _GLOBAL(loadcam_entry) mflr r5 @@ -433,6 +433,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) */ _GLOBAL(loadcam_multi) mflr r8 + /* Don't switch to AS=1 if already there */ + mfmsr r11 + andi. r11,r11,MSR_IS + bne 10f /* * Set up temporary TLB entry that is the same as what we're @@ -458,6 +462,7 @@ _GLOBAL(loadcam_multi) mtmsr r6 isync +10: mr r9,r3 add r10,r3,r4 2: bl loadcam_entry @@ -466,6 +471,10 @@ _GLOBAL(loadcam_multi) mr r3,r9 blt 2b + /* Don't return to AS=0 if we were in AS=1 at function start */ + andi. r11,r11,MSR_IS + bne 3f + /* Return to AS=0 and clear the temporary entry */ mfmsr r6 rlwinm. r6,r6,0,~(MSR_IS|MSR_DS) @@ -481,6 +490,7 @@ _GLOBAL(loadcam_multi) tlbwe isync +3: mtlr r8 blr #endif diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 784cae9f5697..da9f722d9f16 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -207,7 +207,7 @@ void mark_initmem_nx(void) unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); - if (v_block_mapped((unsigned long)_stext + 1)) + if (v_block_mapped((unsigned long)_sinittext)) mmu_mark_initmem_nx(); else change_page_attr(page, numpages, PAGE_KERNEL); @@ -219,7 +219,7 @@ void mark_rodata_ro(void) struct page *page; unsigned long numpages; - if (v_block_mapped((unsigned long)_sinittext)) { + if (v_block_mapped((unsigned long)_stext + 1)) { mmu_mark_rodata_ro(); ptdump_check_wx(); return; diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c index a07278027c6f..a2e8c3b2cf35 100644 --- a/arch/powerpc/mm/ptdump/hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -259,7 +259,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 * for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); - if (lpar_rc != H_SUCCESS) + if (lpar_rc) continue; for (j = 0; j < 4; j++) { if (HPTE_V_COMPARE(ptes[j].v, want_v) && diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index c73205172447..633711bf1cae 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -58,6 +58,7 @@ struct pg_state { unsigned long start_address; unsigned long start_pa; unsigned long last_pa; + unsigned long page_size; unsigned int level; u64 current_flags; bool check_wx; @@ -155,9 +156,9 @@ static void dump_addr(struct pg_state *st, unsigned long addr) #endif pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); - if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) { + if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) { pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa); - delta = PAGE_SIZE >> 10; + delta = st->page_size >> 10; } else { pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); delta = (addr - st->start_address) >> 10; @@ -188,7 +189,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) } static void note_page(struct pg_state *st, unsigned long addr, - unsigned int level, u64 val) + unsigned int level, u64 val, unsigned long page_size) { u64 flag = val & pg_level[level].mask; u64 pa = val & PTE_RPN_MASK; @@ -200,6 +201,7 @@ static void note_page(struct pg_state *st, unsigned long addr, st->start_address = addr; st->start_pa = pa; st->last_pa = pa; + st->page_size = page_size; pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); /* * Dump the section of virtual memory when: @@ -211,7 +213,7 @@ static void note_page(struct pg_state *st, unsigned long addr, */ } else if (flag != st->current_flags || level != st->level || addr >= st->marker[1].start_address || - (pa != st->last_pa + PAGE_SIZE && + (pa != st->last_pa + st->page_size && (pa != st->start_pa || st->start_pa != st->last_pa))) { /* Check the PTE flags */ @@ -239,6 +241,7 @@ static void note_page(struct pg_state *st, unsigned long addr, st->start_address = addr; st->start_pa = pa; st->last_pa = pa; + st->page_size = page_size; st->current_flags = flag; st->level = level; } else { @@ -254,7 +257,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) for (i = 0; i < PTRS_PER_PTE; i++, pte++) { addr = start + i * PAGE_SIZE; - note_page(st, addr, 4, pte_val(*pte)); + note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE); } } @@ -271,7 +274,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) /* pmd exists */ walk_pte(st, pmd, addr); else - note_page(st, addr, 3, pmd_val(*pmd)); + note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE); } } @@ -287,7 +290,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) /* pud exists */ walk_pmd(st, pud, addr); else - note_page(st, addr, 2, pud_val(*pud)); + note_page(st, addr, 2, pud_val(*pud), PUD_SIZE); } } @@ -306,7 +309,7 @@ static void walk_pagetables(struct pg_state *st) /* pgd exists */ walk_pud(st, pgd, addr); else - note_page(st, addr, 1, pgd_val(*pgd)); + note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE); } } @@ -361,7 +364,7 @@ static int ptdump_show(struct seq_file *m, void *v) /* Traverse kernel page tables */ walk_pagetables(&st); - note_page(&st, 0, 0, 0); + note_page(&st, 0, 0, 0, 0); return 0; } diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c index f7ed2f187cb0..784f8df17f73 100644 --- a/arch/powerpc/mm/ptdump/shared.c +++ b/arch/powerpc/mm/ptdump/shared.c @@ -30,6 +30,11 @@ static const struct flag_info flag_array[] = { .val = _PAGE_PRESENT, .set = "present", .clear = " ", + }, { + .mask = _PAGE_COHERENT, + .val = _PAGE_COHERENT, + .set = "coherent", + .clear = " ", }, { .mask = _PAGE_GUARDED, .val = _PAGE_GUARDED, diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index be3517ef0574..20bfd753bcba 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -644,6 +644,12 @@ emit_clear: } break; + /* + * BPF_ST NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; + /* * BPF_ST(X) */ diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index ca92e01d0bd1..6f013e418834 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -133,6 +133,9 @@ static void pmao_restore_workaround(bool ebb) { } bool is_sier_available(void) { + if (!ppmu) + return false; + if (ppmu->flags & PPMU_HAS_SIER) return true; @@ -1522,9 +1525,16 @@ nocheck: ret = 0; out: if (has_branch_stack(event)) { - power_pmu_bhrb_enable(event); - cpuhw->bhrb_filter = ppmu->bhrb_filter_map( - event->attr.branch_sample_type); + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( + event->attr.branch_sample_type); + + if (bhrb_filter != -1) { + cpuhw->bhrb_filter = bhrb_filter; + power_pmu_bhrb_enable(event); + } } perf_pmu_enable(event->pmu); @@ -1846,7 +1856,6 @@ static int power_pmu_event_init(struct perf_event *event) int n; int err; struct cpu_hw_events *cpuhw; - u64 bhrb_filter; if (!ppmu) return -ENOENT; @@ -1952,7 +1961,10 @@ static int power_pmu_event_init(struct perf_event *event) err = power_check_constraints(cpuhw, events, cflags, n + 1); if (has_branch_stack(event)) { - bhrb_filter = ppmu->bhrb_filter_map( + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( event->attr.branch_sample_type); if (bhrb_filter == -1) { @@ -2065,7 +2077,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, left += period; if (left <= 0) left = period; - record = siar_valid(regs); + + /* + * If address is not requested in the sample via + * PERF_SAMPLE_IP, just record that sample irrespective + * of SIAR valid check. + */ + if (event->attr.sample_type & PERF_SAMPLE_IP) + record = siar_valid(regs); + else + record = 1; + event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) @@ -2077,6 +2099,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); + /* + * Due to hardware limitation, sometimes SIAR could sample a kernel + * address even when freeze on supervisor state (kernel) is set in + * MMCR2. Check attr.exclude_kernel and address to drop the sample in + * these cases. + */ + if (event->attr.exclude_kernel && + (event->attr.sample_type & PERF_SAMPLE_IP) && + is_kernel_addr(mfspr(SPRN_SIAR))) + record = 0; + /* * Finally record data if requested. */ @@ -2106,6 +2139,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); + } else if (period) { + /* Account for interrupt in case of invalid SIAR */ + if (perf_event_account_interrupt(event)) + power_pmu_stop(event, 0); } } diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 573e0b309c0c..48e8f4b17b91 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1400,16 +1400,6 @@ static void h_24x7_event_read(struct perf_event *event) h24x7hw = &get_cpu_var(hv_24x7_hw); h24x7hw->events[i] = event; put_cpu_var(h24x7hw); - /* - * Clear the event count so we can compute the _change_ - * in the 24x7 raw counter value at the end of the txn. - * - * Note that we could alternatively read the 24x7 value - * now and save its value in event->hw.prev_count. But - * that would require issuing a hcall, which would then - * defeat the purpose of using the txn interface. - */ - local64_set(&event->count, 0); } put_cpu_var(hv_24x7_reqb); diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h index e608f9db12dd..8965b4463d43 100644 --- a/arch/powerpc/perf/hv-gpci-requests.h +++ b/arch/powerpc/perf/hv-gpci-requests.h @@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id) #define REQUEST_NAME system_performance_capabilities #define REQUEST_NUM 0x40 -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" +#define REQUEST_IDX_KIND "starting_index=0xffffffff" #include I(REQUEST_BEGIN) REQUEST(__field(0, 1, perf_collect_privileged) __field(0x1, 1, capability_mask) @@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id) #define REQUEST_NAME system_hypervisor_times #define REQUEST_NUM 0xF0 -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" +#define REQUEST_IDX_KIND "starting_index=0xffffffff" #include I(REQUEST_BEGIN) REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) __count(0x8, 8, time_spent_processing_virtual_processor_timers) @@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) #define REQUEST_NAME system_tlbie_count_and_time #define REQUEST_NUM 0xF4 -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" +#define REQUEST_IDX_KIND "starting_index=0xffffffff" #include I(REQUEST_BEGIN) REQUEST(__count(0, 8, tlbie_instructions_issued) /* diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index cb50a9e1fd2d..eb82dda884e5 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -44,6 +44,16 @@ static DEFINE_PER_CPU(u64 *, trace_imc_mem); static struct imc_pmu_ref *trace_imc_refc; static int trace_imc_mem_size; +/* + * Global data structure used to avoid races between thread, + * core and trace-imc + */ +static struct imc_pmu_ref imc_global_refc = { + .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), + .id = 0, + .refc = 0, +}; + static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) { return container_of(event->pmu, struct imc_pmu, pmu); @@ -698,6 +708,16 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) return -EINVAL; ref->refc = 0; + /* + * Reduce the global reference count, if this is the + * last cpu in this core and core-imc event running + * in this cpu. + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == IMC_DOMAIN_CORE) + imc_global_refc.refc--; + + mutex_unlock(&imc_global_refc.lock); } return 0; } @@ -710,6 +730,23 @@ static int core_imc_pmu_cpumask_init(void) ppc_core_imc_cpu_offline); } +static void reset_global_refc(struct perf_event *event) +{ + mutex_lock(&imc_global_refc.lock); + imc_global_refc.refc--; + + /* + * If no other thread is running any + * event for this domain(thread/core/trace), + * set the global id to zero. + */ + if (imc_global_refc.refc <= 0) { + imc_global_refc.refc = 0; + imc_global_refc.id = 0; + } + mutex_unlock(&imc_global_refc.lock); +} + static void core_imc_counters_release(struct perf_event *event) { int rc, core_id; @@ -759,6 +796,8 @@ static void core_imc_counters_release(struct perf_event *event) ref->refc = 0; } mutex_unlock(&ref->lock); + + reset_global_refc(event); } static int core_imc_event_init(struct perf_event *event) @@ -819,6 +858,29 @@ static int core_imc_event_init(struct perf_event *event) ++ref->refc; mutex_unlock(&ref->lock); + /* + * Since the system can run either in accumulation or trace-mode + * of IMC at a time, core-imc events are allowed only if no other + * trace/thread imc events are enabled/monitored. + * + * Take the global lock, and check the refc.id + * to know whether any other trace/thread imc + * events are running. + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { + /* + * No other trace/thread imc events are running in + * the system, so set the refc.id to core-imc. + */ + imc_global_refc.id = IMC_DOMAIN_CORE; + imc_global_refc.refc++; + } else { + mutex_unlock(&imc_global_refc.lock); + return -EBUSY; + } + mutex_unlock(&imc_global_refc.lock); + event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); event->destroy = core_imc_counters_release; return 0; @@ -877,7 +939,23 @@ static int ppc_thread_imc_cpu_online(unsigned int cpu) static int ppc_thread_imc_cpu_offline(unsigned int cpu) { - mtspr(SPRN_LDBAR, 0); + /* + * Set the bit 0 of LDBAR to zero. + * + * If bit 0 of LDBAR is unset, it will stop posting + * the counter data to memory. + * For thread-imc, bit 0 of LDBAR will be set to 1 in the + * event_add function. So reset this bit here, to stop the updates + * to memory in the cpu_offline path. + */ + mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); + + /* Reduce the refc if thread-imc event running on this cpu */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == IMC_DOMAIN_THREAD) + imc_global_refc.refc--; + mutex_unlock(&imc_global_refc.lock); + return 0; } @@ -916,7 +994,22 @@ static int thread_imc_event_init(struct perf_event *event) if (!target) return -EINVAL; + mutex_lock(&imc_global_refc.lock); + /* + * Check if any other trace/core imc events are running in the + * system, if not set the global id to thread-imc. + */ + if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) { + imc_global_refc.id = IMC_DOMAIN_THREAD; + imc_global_refc.refc++; + } else { + mutex_unlock(&imc_global_refc.lock); + return -EBUSY; + } + mutex_unlock(&imc_global_refc.lock); + event->pmu->task_ctx_nr = perf_sw_context; + event->destroy = reset_global_refc; return 0; } @@ -1063,10 +1156,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags) int core_id; struct imc_pmu_ref *ref; - mtspr(SPRN_LDBAR, 0); - core_id = smp_processor_id() / threads_per_core; ref = &core_imc_refc[core_id]; + if (!ref) { + pr_debug("imc: Failed to get event reference count\n"); + return; + } mutex_lock(&ref->lock); ref->refc--; @@ -1082,6 +1177,10 @@ static void thread_imc_event_del(struct perf_event *event, int flags) ref->refc = 0; } mutex_unlock(&ref->lock); + + /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ + mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); + /* * Take a snapshot and calculate the delta and update * the event counter values. @@ -1133,7 +1232,18 @@ static int ppc_trace_imc_cpu_online(unsigned int cpu) static int ppc_trace_imc_cpu_offline(unsigned int cpu) { - mtspr(SPRN_LDBAR, 0); + /* + * No need to set bit 0 of LDBAR to zero, as + * it is set to zero for imc trace-mode + * + * Reduce the refc if any trace-imc event running + * on this cpu. + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == IMC_DOMAIN_TRACE) + imc_global_refc.refc--; + mutex_unlock(&imc_global_refc.lock); + return 0; } @@ -1226,15 +1336,14 @@ static int trace_imc_event_add(struct perf_event *event, int flags) local_mem = get_trace_imc_event_base_addr(); ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE; - if (core_imc_refc) - ref = &core_imc_refc[core_id]; + /* trace-imc reference count */ + if (trace_imc_refc) + ref = &trace_imc_refc[core_id]; if (!ref) { - /* If core-imc is not enabled, use trace-imc reference count */ - if (trace_imc_refc) - ref = &trace_imc_refc[core_id]; - if (!ref) - return -EINVAL; + pr_debug("imc: Failed to get the event reference count\n"); + return -EINVAL; } + mtspr(SPRN_LDBAR, ldbar_value); mutex_lock(&ref->lock); if (ref->refc == 0) { @@ -1242,13 +1351,11 @@ static int trace_imc_event_add(struct perf_event *event, int flags) get_hard_smp_processor_id(smp_processor_id()))) { mutex_unlock(&ref->lock); pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); - mtspr(SPRN_LDBAR, 0); return -EINVAL; } } ++ref->refc; mutex_unlock(&ref->lock); - return 0; } @@ -1274,16 +1381,13 @@ static void trace_imc_event_del(struct perf_event *event, int flags) int core_id = smp_processor_id() / threads_per_core; struct imc_pmu_ref *ref = NULL; - if (core_imc_refc) - ref = &core_imc_refc[core_id]; + if (trace_imc_refc) + ref = &trace_imc_refc[core_id]; if (!ref) { - /* If core-imc is not enabled, use trace-imc reference count */ - if (trace_imc_refc) - ref = &trace_imc_refc[core_id]; - if (!ref) - return; + pr_debug("imc: Failed to get event reference count\n"); + return; } - mtspr(SPRN_LDBAR, 0); + mutex_lock(&ref->lock); ref->refc--; if (ref->refc == 0) { @@ -1297,6 +1401,7 @@ static void trace_imc_event_del(struct perf_event *event, int flags) ref->refc = 0; } mutex_unlock(&ref->lock); + trace_imc_event_stop(event, flags); } @@ -1314,10 +1419,30 @@ static int trace_imc_event_init(struct perf_event *event) if (event->attr.sample_period == 0) return -ENOENT; + /* + * Take the global lock, and make sure + * no other thread is running any core/thread imc + * events + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { + /* + * No core/thread imc events are running in the + * system, so set the refc.id to trace-imc. + */ + imc_global_refc.id = IMC_DOMAIN_TRACE; + imc_global_refc.refc++; + } else { + mutex_unlock(&imc_global_refc.lock); + return -EBUSY; + } + mutex_unlock(&imc_global_refc.lock); + event->hw.idx = -1; target = event->hw.target; event->pmu->task_ctx_nr = perf_hw_context; + event->destroy = reset_global_refc; return 0; } @@ -1429,10 +1554,10 @@ static void cleanup_all_core_imc_memory(void) static void thread_imc_ldbar_disable(void *dummy) { /* - * By Zeroing LDBAR, we disable thread-imc - * updates. + * By setting 0th bit of LDBAR to zero, we disable thread-imc + * updates to memory. */ - mtspr(SPRN_LDBAR, 0); + mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); } void thread_imc_disable(void) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 4c86da5eb28a..944180f55a3c 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -269,6 +269,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) mask |= CNST_PMC_MASK(pmc); value |= CNST_PMC_VAL(pmc); + + /* + * PMC5 and PMC6 are used to count cycles and instructions and + * they do not support most of the constraint bits. Add a check + * to exclude PMC5/6 from most of the constraints except for + * EBB/BHRB. + */ + if (pmc >= 5) + goto ebb_bhrb; } if (pmc <= 4) { @@ -335,6 +344,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) } } +ebb_bhrb: if (!pmc && ebb) /* EBB events must specify the PMC */ return -1; @@ -353,8 +363,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) * EBB events are pinned & exclusive, so this should never actually * hit, but we leave it as a fallback in case. */ - mask |= CNST_EBB_VAL(ebb); - value |= CNST_EBB_MASK; + mask |= CNST_EBB_MASK; + value |= CNST_EBB_VAL(ebb); *maskp = mask; *valp = value; diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c index e6e2adcc7b64..c13d64c3b019 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) if (mbase == NULL) { printk(KERN_ERR "%pOF: Can't map internal config space !", port->node); - goto done; + return; } while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA) @@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) } if (attempt) port->link = 1; -done: iounmap(mbase); - } static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index 3a9969c429b3..054f927bfef9 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -181,7 +181,7 @@ sram_code: udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ mullw r12, r12, r11 mftb r13 /* start */ - addi r12, r13, r12 /* end */ + add r12, r13, r12 /* end */ 1: mftb r13 /* current */ cmp cr0, r13, r12 diff --git a/arch/powerpc/platforms/8xx/micropatch.c b/arch/powerpc/platforms/8xx/micropatch.c index c80bd7afd6c5..b06c6d26dd72 100644 --- a/arch/powerpc/platforms/8xx/micropatch.c +++ b/arch/powerpc/platforms/8xx/micropatch.c @@ -361,6 +361,17 @@ void __init cpm_load_patch(cpm8xx_t *cp) if (IS_ENABLED(CONFIG_SMC_UCODE_PATCH)) { smc_uart_t *smp; + if (IS_ENABLED(CONFIG_PPC_EARLY_DEBUG_CPM)) { + int i; + + for (i = 0; i < sizeof(*smp); i += 4) { + u32 __iomem *src = (u32 __iomem *)&cp->cp_dparam[PROFF_SMC1 + i]; + u32 __iomem *dst = (u32 __iomem *)&cp->cp_dparam[PROFF_DSP1 + i]; + + out_be32(dst, in_be32(src)); + } + } + smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1]; out_be16(&smp->smc_rpbase, 0x1ec0); smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC2]; diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index d82e3664ffdf..18792a5b003a 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -219,12 +219,11 @@ config TAU temperature within 2-4 degrees Celsius. This option shows the current on-die temperature in /proc/cpuinfo if the cpu supports it. - Unfortunately, on some chip revisions, this sensor is very inaccurate - and in many cases, does not work at all, so don't assume the cpu - temp is actually what /proc/cpuinfo says it is. + Unfortunately, this sensor is very inaccurate when uncalibrated, so + don't assume the cpu temp is actually what /proc/cpuinfo says it is. config TAU_INT - bool "Interrupt driven TAU driver (DANGEROUS)" + bool "Interrupt driven TAU driver (EXPERIMENTAL)" depends on TAU ---help--- The TAU supports an interrupt driven mode which causes an interrupt @@ -232,12 +231,7 @@ config TAU_INT to get notified the temp has exceeded a range. With this option off, a timer is used to re-check the temperature periodically. - However, on some cpus it appears that the TAU interrupt hardware - is buggy and can cause a situation which would lead unexplained hard - lockups. - - Unless you are extending the TAU driver, or enjoy kernel/hardware - debugging, leave this option off. + If in doubt, say N here. config TAU_AVERAGE bool "Average high and low temp" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 12543e53fa96..f0330ce498d1 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -389,7 +389,7 @@ config PPC_KUAP config PPC_KUAP_DEBUG bool "Extra debugging for Kernel Userspace Access Protection" - depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32) + depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32) help Add extra debugging for Kernel Userspace Access Protection (KUAP) If you're unsure, say N. diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 0f7c8241912b..f2ff359041ee 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -44,6 +44,7 @@ config SPU_FS tristate "SPU file system" default m depends on PPC_CELL + depends on COREDUMP select SPU_BASE help The SPU file system is used to access Synergistic Processing diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c0f950a3f4e1..f4a4dfb191e7 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { - int ret; struct spu_context *ctx = file->private_data; + u32 stat, data; + int ret; if (!access_ok(buf, len)) return -EFAULT; @@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_mbox_info_read(ctx, buf, len, pos); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.prob.pu_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + /* EOF if there's no entry in the mbox */ + if (!(stat & 0x0000ff)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_mbox_info_fops = { @@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + u32 stat, data; int ret; if (!access_ok(buf, len)) @@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_ibox_info_read(ctx, buf, len, pos); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.priv2.puint_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + /* EOF if there's no entry in the ibox */ + if (!(stat & 0xff0000)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_ibox_info_fops = { @@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = { .llseek = generic_file_llseek, }; +static size_t spufs_wbox_info_cnt(struct spu_context *ctx) +{ + return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); +} + static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { @@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, u32 wbox_stat; wbox_stat = ctx->csa.prob.mb_stat_R; - cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); + cnt = spufs_wbox_info_cnt(ctx); for (i = 0; i < cnt; i++) { data[i] = ctx->csa.spu_mailbox_data[i]; } @@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; - int ret; + u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; + int ret, count; if (!access_ok(buf, len)) return -EFAULT; @@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_wbox_info_read(ctx, buf, len, pos); + count = spufs_wbox_info_cnt(ctx); + memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &data, + count * sizeof(u32)); } static const struct file_operations spufs_wbox_info_fops = { @@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = { .llseek = generic_file_llseek, }; -static ssize_t __spufs_dma_info_read(struct spu_context *ctx, - char __user *buf, size_t len, loff_t *pos) +static void spufs_get_dma_info(struct spu_context *ctx, + struct spu_dma_info *info) { - struct spu_dma_info info; - struct mfc_cq_sr *qp, *spuqp; int i; - info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; - info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; - info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; - info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; - info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; + info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; + info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; + info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; + info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; for (i = 0; i < 16; i++) { - qp = &info.dma_info_command_data[i]; - spuqp = &ctx->csa.priv2.spuq[i]; + struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; + struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; } +} + +static ssize_t __spufs_dma_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) +{ + struct spu_dma_info info; + + spufs_get_dma_info(ctx, &info); return simple_read_from_buffer(buf, len, pos, &info, sizeof info); @@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + struct spu_dma_info info; int ret; if (!access_ok(buf, len)) @@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_dma_info_read(ctx, buf, len, pos); + spufs_get_dma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); } static const struct file_operations spufs_dma_info_fops = { @@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = { .llseek = no_llseek, }; +static void spufs_get_proxydma_info(struct spu_context *ctx, + struct spu_proxydma_info *info) +{ + int i; + + info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; + info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; + info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; + + for (i = 0; i < 8; i++) { + struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; + struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; + + qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; + } +} + static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { struct spu_proxydma_info info; - struct mfc_cq_sr *qp, *puqp; int ret = sizeof info; - int i; if (len < ret) return -EINVAL; @@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, if (!access_ok(buf, len)) return -EFAULT; - info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; - info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; - info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; - for (i = 0; i < 8; i++) { - qp = &info.proxydma_info_command_data[i]; - puqp = &ctx->csa.priv2.puq[i]; - - qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; - qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; - qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; - qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; - } + spufs_get_proxydma_info(ctx, &info); return simple_read_from_buffer(buf, len, pos, &info, sizeof info); @@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + struct spu_proxydma_info info; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_proxydma_info_read(ctx, buf, len, pos); + spufs_get_proxydma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); } static const struct file_operations spufs_proxydma_info_fops = { diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 9cd6f3e1000b..09a0594350b6 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -294,23 +294,6 @@ static int __init maple_probe(void) return 1; } -define_machine(maple) { - .name = "Maple", - .probe = maple_probe, - .setup_arch = maple_setup_arch, - .init_IRQ = maple_init_IRQ, - .pci_irq_fixup = maple_pci_irq_fixup, - .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, - .restart = maple_restart, - .halt = maple_halt, - .get_boot_time = maple_get_boot_time, - .set_rtc_time = maple_set_rtc_time, - .get_rtc_time = maple_get_rtc_time, - .calibrate_decr = generic_calibrate_decr, - .progress = maple_progress, - .power_save = power4_idle, -}; - #ifdef CONFIG_EDAC /* * Register a platform device for CPC925 memory controller on @@ -367,3 +350,20 @@ static int __init maple_cpc925_edac_setup(void) } machine_device_initcall(maple, maple_cpc925_edac_setup); #endif + +define_machine(maple) { + .name = "Maple", + .probe = maple_probe, + .setup_arch = maple_setup_arch, + .init_IRQ = maple_init_IRQ, + .pci_irq_fixup = maple_pci_irq_fixup, + .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, + .restart = maple_restart, + .halt = maple_halt, + .get_boot_time = maple_get_boot_time, + .set_rtc_time = maple_set_rtc_time, + .get_rtc_time = maple_get_rtc_time, + .calibrate_decr = generic_calibrate_decr, + .progress = maple_progress, + .power_save = power4_idle, +}; diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index bd6085b470b7..935dcac4c02d 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -293,14 +293,7 @@ grackle_wake_up: * we do any r1 memory access as we are not sure they * are in a sane state above the first 256Mb region */ - li r0,16 /* load up segment register values */ - mtctr r0 /* for context 0 */ - lis r3,0x2000 /* Ku = 1, VSID = 0 */ - li r4,0 -3: mtsrin r3,r4 - addi r3,r3,0x111 /* increment VSID */ - addis r4,r4,0x1000 /* address of next segment */ - bdnz 3b + bl load_segment_registers sync isync diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index eb2e75dac369..b45ab455a18e 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -30,6 +30,7 @@ struct memtrace_entry { char name[16]; }; +static DEFINE_MUTEX(memtrace_mutex); static u64 memtrace_size; static struct memtrace_entry *memtrace_array; @@ -67,6 +68,23 @@ static int change_memblock_state(struct memory_block *mem, void *arg) return 0; } +static void memtrace_clear_range(unsigned long start_pfn, + unsigned long nr_pages) +{ + unsigned long pfn; + + /* + * As pages are offline, we cannot trust the memmap anymore. As HIGHMEM + * does not apply, avoid passing around "struct page" and use + * clear_page() instead directly. + */ + for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { + if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) + cond_resched(); + clear_page(__va(PFN_PHYS(pfn))); + } +} + /* called with device_hotplug_lock held */ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) { @@ -111,6 +129,11 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) lock_device_hotplug(); for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { + /* + * Clear the range while we still have a linear + * mapping. + */ + memtrace_clear_range(base_pfn, nr_pages); /* * Remove memory in memory block size chunks so that * iomem resources are always split to the same size and @@ -268,6 +291,7 @@ static int memtrace_online(void) static int memtrace_enable_set(void *data, u64 val) { + int rc = -EAGAIN; u64 bytes; /* @@ -280,25 +304,31 @@ static int memtrace_enable_set(void *data, u64 val) return -EINVAL; } + mutex_lock(&memtrace_mutex); + /* Re-add/online previously removed/offlined memory */ if (memtrace_size) { if (memtrace_online()) - return -EAGAIN; + goto out_unlock; } - if (!val) - return 0; + if (!val) { + rc = 0; + goto out_unlock; + } /* Offline and remove memory */ if (memtrace_init_regions_runtime(val)) - return -EINVAL; + goto out_unlock; if (memtrace_init_debugfs()) - return -EINVAL; + goto out_unlock; memtrace_size = val; - - return 0; + rc = 0; +out_unlock: + mutex_unlock(&memtrace_mutex); + return rc; } static int memtrace_enable_get(void *data, u64 *val) diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index b95b9e3c4c98..c640cb993209 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -384,7 +384,8 @@ static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group) for (i = 0; i < npucomp->pe_num; ++i) { struct pnv_ioda_pe *pe = npucomp->pe[i]; - if (!pe->table_group.ops->take_ownership) + if (!pe->table_group.ops || + !pe->table_group.ops->take_ownership) continue; pe->table_group.ops->take_ownership(&pe->table_group); } @@ -400,7 +401,8 @@ static void pnv_npu_peers_release_ownership( for (i = 0; i < npucomp->pe_num; ++i) { struct pnv_ioda_pe *pe = npucomp->pe[i]; - if (!pe->table_group.ops->release_ownership) + if (!pe->table_group.ops || + !pe->table_group.ops->release_ownership) continue; pe->table_group.ops->release_ownership(&pe->table_group); } @@ -560,6 +562,11 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, return -ENODEV; hose = pci_bus_to_host(npdev->bus); + if (hose->npu == NULL) { + dev_info_once(&npdev->dev, "Nvlink1 does not support contexts"); + return 0; + } + nphb = hose->private_data; dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n", @@ -607,6 +614,11 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev) return -ENODEV; hose = pci_bus_to_host(npdev->bus); + if (hose->npu == NULL) { + dev_info_once(&npdev->dev, "Nvlink1 does not support contexts"); + return 0; + } + nphb = hose->private_data; dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n", diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 543c816fa99e..0e6693bacb7e 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c @@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, return count; } -static struct dump_obj *create_dump_obj(uint32_t id, size_t size, - uint32_t type) +static void create_dump_obj(uint32_t id, size_t size, uint32_t type) { struct dump_obj *dump; int rc; dump = kzalloc(sizeof(*dump), GFP_KERNEL); if (!dump) - return NULL; + return; dump->kobj.kset = dump_kset; @@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size, rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id); if (rc) { kobject_put(&dump->kobj); - return NULL; + return; } + /* + * As soon as the sysfs file for this dump is created/activated there is + * a chance the opal_errd daemon (or any userspace) might read and + * acknowledge the dump before kobject_uevent() is called. If that + * happens then there is a potential race between + * dump_ack_store->kobject_put() and kobject_uevent() which leads to a + * use-after-free of a kernfs object resulting in a kernel crash. + * + * To avoid that, we need to take a reference on behalf of the bin file, + * so that our reference remains valid while we call kobject_uevent(). + * We then drop our reference before exiting the function, leaving the + * bin file to drop the last reference (if it hasn't already). + */ + + /* Take a reference for the bin file */ + kobject_get(&dump->kobj); rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr); - if (rc) { + if (rc == 0) { + kobject_uevent(&dump->kobj, KOBJ_ADD); + + pr_info("%s: New platform dump. ID = 0x%x Size %u\n", + __func__, dump->id, dump->size); + } else { + /* Drop reference count taken for bin file */ kobject_put(&dump->kobj); - return NULL; } - pr_info("%s: New platform dump. ID = 0x%x Size %u\n", - __func__, dump->id, dump->size); - - kobject_uevent(&dump->kobj, KOBJ_ADD); - - return dump; + /* Drop our reference */ + kobject_put(&dump->kobj); + return; } static irqreturn_t process_dump(int irq, void *data) diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 62ef7ad995da..5e33b1fc67c2 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -179,14 +179,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, return count; } -static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) +static void create_elog_obj(uint64_t id, size_t size, uint64_t type) { struct elog_obj *elog; int rc; elog = kzalloc(sizeof(*elog), GFP_KERNEL); if (!elog) - return NULL; + return; elog->kobj.kset = elog_kset; @@ -219,18 +219,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) rc = kobject_add(&elog->kobj, NULL, "0x%llx", id); if (rc) { kobject_put(&elog->kobj); - return NULL; + return; } + /* + * As soon as the sysfs file for this elog is created/activated there is + * a chance the opal_errd daemon (or any userspace) might read and + * acknowledge the elog before kobject_uevent() is called. If that + * happens then there is a potential race between + * elog_ack_store->kobject_put() and kobject_uevent() which leads to a + * use-after-free of a kernfs object resulting in a kernel crash. + * + * To avoid that, we need to take a reference on behalf of the bin file, + * so that our reference remains valid while we call kobject_uevent(). + * We then drop our reference before exiting the function, leaving the + * bin file to drop the last reference (if it hasn't already). + */ + + /* Take a reference for the bin file */ + kobject_get(&elog->kobj); rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr); - if (rc) { + if (rc == 0) { + kobject_uevent(&elog->kobj, KOBJ_ADD); + } else { + /* Drop the reference taken for the bin file */ kobject_put(&elog->kobj); - return NULL; } - kobject_uevent(&elog->kobj, KOBJ_ADD); + /* Drop our reference */ + kobject_put(&elog->kobj); - return elog; + return; } static irqreturn_t elog_event(int irq, void *data) diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 7ccc5c85c74e..000b350d4060 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node, imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); - /* - * Return here, either because 'imc' directory already exists, - * Or failed to create a new one. - */ if (!imc_debugfs_parent) return; @@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node, } pmu_ptr->imc_counter_mmaped = true; - export_imc_mode_and_cmd(node, pmu_ptr); kfree(base_addr_arr); kfree(chipid_arr); return 0; @@ -151,7 +146,7 @@ error: * and domain as the inputs. * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets */ -static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) +static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain) { int ret = 0; struct imc_pmu *pmu_ptr; @@ -159,27 +154,23 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) /* Return for unknown domain */ if (domain < 0) - return -EINVAL; + return NULL; /* memory for pmu */ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) - return -ENOMEM; + return NULL; /* Set the domain */ pmu_ptr->domain = domain; ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size); - if (ret) { - ret = -EINVAL; + if (ret) goto free_pmu; - } if (!of_property_read_u32(parent, "offset", &offset)) { - if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) { - ret = -EINVAL; + if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) goto free_pmu; - } } /* Function to register IMC pmu */ @@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) if (pmu_ptr->domain == IMC_DOMAIN_NEST) kfree(pmu_ptr->mem_info); kfree(pmu_ptr); - return ret; + return NULL; } - return 0; + return pmu_ptr; free_pmu: kfree(pmu_ptr); - return ret; + return NULL; } static void disable_nest_pmu_counters(void) @@ -254,6 +245,7 @@ int get_max_nest_dev(void) static int opal_imc_counters_probe(struct platform_device *pdev) { struct device_node *imc_dev = pdev->dev.of_node; + struct imc_pmu *pmu; int pmu_count = 0, domain; bool core_imc_reg = false, thread_imc_reg = false; u32 type; @@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev) } for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) { + pmu = NULL; if (of_property_read_u32(imc_dev, "type", &type)) { pr_warn("IMC Device without type property\n"); continue; @@ -300,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev) break; } - if (!imc_pmu_create(imc_dev, pmu_count, domain)) { - if (domain == IMC_DOMAIN_NEST) + pmu = imc_pmu_create(imc_dev, pmu_count, domain); + if (pmu != NULL) { + if (domain == IMC_DOMAIN_NEST) { + if (!imc_debugfs_parent) + export_imc_mode_and_cmd(imc_dev, pmu); pmu_count++; + } if (domain == IMC_DOMAIN_CORE) core_imc_reg = true; if (domain == IMC_DOMAIN_THREAD) @@ -310,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev) } } - /* If none of the nest units are registered, remove debugfs interface */ - if (pmu_count == 0) - debugfs_remove_recursive(imc_debugfs_parent); - /* If core imc is not registered, unregister thread-imc */ if (!core_imc_reg && thread_imc_reg) unregister_thread_imc(); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 83498604d322..1b7b0d0c3ebd 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -122,12 +122,29 @@ static void pnv_setup_rfi_flush(void) type = L1D_FLUSH_ORI; } + /* + * If we are non-Power9 bare metal, we don't need to flush on kernel + * entry or after user access: they fix a P9 specific vulnerability. + */ + if (!pvr_version_is(PVR_POWER9)) { + security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); + security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); + } + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); setup_rfi_flush(type, enable); setup_count_cache_flush(); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); + setup_entry_flush(enable); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); + setup_uaccess_flush(enable); } static void __init pnv_setup_arch(void) @@ -169,11 +186,16 @@ static void __init pnv_init(void) add_preferred_console("hvc", 0, NULL); if (!radix_enabled()) { + size_t size = sizeof(struct slb_entry) * mmu_slb_size; int i; /* Allocate per cpu area to save old slb contents during MCE */ - for_each_possible_cpu(i) - paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i)); + for_each_possible_cpu(i) { + paca_ptrs[i]->mce_faulty_slbs = + memblock_alloc_node(size, + __alignof__(struct slb_entry), + cpu_to_node(i)); + } } } diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 13e251699346..bbf361f23ae8 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -43,7 +43,7 @@ #include #define DBG(fmt...) udbg_printf(fmt) #else -#define DBG(fmt...) +#define DBG(fmt...) do { } while (0) #endif static void pnv_smp_setup_cpu(int cpu) @@ -167,7 +167,6 @@ static void pnv_smp_cpu_kill_self(void) /* Standard hot unplug procedure */ idle_task_exit(); - current->active_mm = NULL; /* for sanity */ cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 423be34f0f5f..f42fe4e86ce5 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -200,13 +200,14 @@ void ps3_mm_vas_destroy(void) { int result; - DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); - if (map.vas_id) { result = lv1_select_virtual_address_space(0); - BUG_ON(result); - result = lv1_destruct_virtual_address_space(map.vas_id); - BUG_ON(result); + result += lv1_destruct_virtual_address_space(map.vas_id); + + if (result) { + lv1_panic(0); + } + map.vas_id = 0; } } @@ -304,19 +305,20 @@ static void ps3_mm_region_destroy(struct mem_region *r) int result; if (!r->destroy) { - pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", - __func__, __LINE__, r->base, r->size); return; } - DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); - if (r->base) { result = lv1_release_memory(r->base); - BUG_ON(result); + + if (result) { + lv1_panic(0); + } + r->size = r->base = r->offset = 0; map.total = map.rm.size; } + ps3_mm_set_repository_highmem(NULL); } diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 16e86ba8aa20..f6b7749d6ada 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn) #define NEXT_PROPERTY 3 #define PREV_PARENT 4 #define MORE_MEMORY 5 -#define CALL_AGAIN -2 #define ERR_CFG_USE -9003 struct device_node *dlpar_configure_connector(__be32 drc_index, @@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, spin_unlock(&rtas_data_buf_lock); + if (rtas_busy_delay(rc)) + continue; + switch (rc) { case COMPLETE: break; @@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, last_dn = last_dn->parent; break; - case CALL_AGAIN: - break; - case MORE_MEMORY: case ERR_CFG_USE: default: diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 4c3af2e9eb8e..f364909d0c08 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -27,7 +27,7 @@ static bool rtas_hp_event; unsigned long pseries_memory_block_size(void) { struct device_node *np; - unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; + u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); @@ -223,7 +223,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs, struct drmem_lmb **end_lmb) { struct drmem_lmb *lmb, *start, *end; - struct drmem_lmb *last_lmb; + struct drmem_lmb *limit; start = NULL; for_each_drmem_lmb(lmb) { @@ -236,10 +236,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs, if (!start) return -EINVAL; - end = &start[n_lmbs - 1]; + end = &start[n_lmbs]; - last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1]; - if (end > last_lmb) + limit = &drmem_info->lmbs[drmem_info->n_lmbs]; + if (end > limit) return -EINVAL; *start_lmb = start; @@ -279,7 +279,7 @@ static int dlpar_offline_lmb(struct drmem_lmb *lmb) return dlpar_change_lmb_state(lmb, false); } -static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) +static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size) { unsigned long block_sz, start_pfn; int sections_per_block; @@ -310,10 +310,11 @@ out: static int pseries_remove_mem_node(struct device_node *np) { - const __be32 *regs; + const __be32 *prop; unsigned long base; - unsigned int lmb_size; + unsigned long lmb_size; int ret = -EINVAL; + int addr_cells, size_cells; /* * Check to see if we are actually removing memory @@ -324,12 +325,19 @@ static int pseries_remove_mem_node(struct device_node *np) /* * Find the base address and size of the memblock */ - regs = of_get_property(np, "reg", NULL); - if (!regs) + prop = of_get_property(np, "reg", NULL); + if (!prop) return ret; - base = be64_to_cpu(*(unsigned long *)regs); - lmb_size = be32_to_cpu(regs[3]); + addr_cells = of_n_addr_cells(np); + size_cells = of_n_size_cells(np); + + /* + * "reg" property represents (addr,size) tuple. + */ + base = of_read_number(prop, addr_cells); + prop += addr_cells; + lmb_size = of_read_number(prop, size_cells); pseries_remove_memblock(base, lmb_size); return 0; @@ -376,25 +384,32 @@ static int dlpar_add_lmb(struct drmem_lmb *); static int dlpar_remove_lmb(struct drmem_lmb *lmb) { + struct memory_block *mem_block; unsigned long block_sz; int rc; if (!lmb_is_removable(lmb)) return -EINVAL; + mem_block = lmb_to_memblock(lmb); + if (mem_block == NULL) + return -EINVAL; + rc = dlpar_offline_lmb(lmb); - if (rc) + if (rc) { + put_device(&mem_block->dev); return rc; + } block_sz = pseries_memory_block_size(); - __remove_memory(lmb->nid, lmb->base_addr, block_sz); + __remove_memory(mem_block->nid, lmb->base_addr, block_sz); + put_device(&mem_block->dev); /* Update memory regions for memory remove */ memblock_remove(lmb->base_addr, block_sz); invalidate_lmb_associativity_index(lmb); - lmb_clear_nid(lmb); lmb->flags &= ~DRCONF_MEM_ASSIGNED; return 0; @@ -613,7 +628,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) #else static inline int pseries_remove_memblock(unsigned long base, - unsigned int memblock_size) + unsigned long memblock_size) { return -EOPNOTSUPP; } @@ -651,7 +666,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) static int dlpar_add_lmb(struct drmem_lmb *lmb) { unsigned long block_sz; - int rc; + int nid, rc; if (lmb->flags & DRCONF_MEM_ASSIGNED) return -EINVAL; @@ -662,11 +677,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) return rc; } - lmb_set_nid(lmb); block_sz = memory_block_size_bytes(); + /* Find the node id for this address. */ + nid = memory_add_physaddr_to_nid(lmb->base_addr); + /* Add the memory */ - rc = __add_memory(lmb->nid, lmb->base_addr, block_sz); + rc = __add_memory(nid, lmb->base_addr, block_sz); if (rc) { invalidate_lmb_associativity_index(lmb); return rc; @@ -674,9 +691,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) rc = dlpar_online_lmb(lmb); if (rc) { - __remove_memory(lmb->nid, lmb->base_addr, block_sz); + __remove_memory(nid, lmb->base_addr, block_sz); invalidate_lmb_associativity_index(lmb); - lmb_clear_nid(lmb); } else { lmb->flags |= DRCONF_MEM_ASSIGNED; } @@ -945,10 +961,11 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) static int pseries_add_mem_node(struct device_node *np) { - const __be32 *regs; + const __be32 *prop; unsigned long base; - unsigned int lmb_size; + unsigned long lmb_size; int ret = -EINVAL; + int addr_cells, size_cells; /* * Check to see if we are actually adding memory @@ -959,12 +976,18 @@ static int pseries_add_mem_node(struct device_node *np) /* * Find the base and size of the memblock */ - regs = of_get_property(np, "reg", NULL); - if (!regs) + prop = of_get_property(np, "reg", NULL); + if (!prop) return ret; - base = be64_to_cpu(*(unsigned long *)regs); - lmb_size = be32_to_cpu(regs[3]); + addr_cells = of_n_addr_cells(np); + size_cells = of_n_size_cells(np); + /* + * "reg" property represents (addr,size) tuple. + */ + base = of_read_number(prop, addr_cells); + prop += addr_cells; + lmb_size = of_read_number(prop, size_cells); /* * Update memory region to represent the memory add diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index f87a5c64e24d..c93b9a3bf237 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1992,7 +1992,7 @@ static int __init vpa_debugfs_init(void) { char name[16]; long i; - static struct dentry *vpa_dir; + struct dentry *vpa_dir; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return 0; diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 133f6adcb39c..637300330507 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -4,6 +4,7 @@ * Copyright 2006-2007 Michael Ellerman, IBM Corp. */ +#include #include #include #include @@ -458,7 +459,28 @@ again: return hwirq; } - virq = irq_create_mapping(NULL, hwirq); + /* + * Depending on the number of online CPUs in the original + * kernel, it is likely for CPU #0 to be offline in a kdump + * kernel. The associated IRQs in the affinity mappings + * provided by irq_create_affinity_masks() are thus not + * started by irq_startup(), as per-design for managed IRQs. + * This can be a problem with multi-queue block devices driven + * by blk-mq : such a non-started IRQ is very likely paired + * with the single queue enforced by blk-mq during kdump (see + * blk_mq_alloc_tag_set()). This causes the device to remain + * silent and likely hangs the guest at some point. + * + * We don't really care for fine-grained affinity when doing + * kdump actually : simply ignore the pre-computed affinity + * masks in this case and let the default mask with all CPUs + * be used when creating the IRQ mappings. + */ + if (is_kdump_kernel()) + virq = irq_create_mapping(NULL, hwirq); + else + virq = irq_create_mapping_affinity(NULL, hwirq, + entry->affinity); if (!virq) { pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq); diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 561917fa54a8..afca4b737e80 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -66,6 +66,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic); int remove_phb_dynamic(struct pci_controller *phb) { struct pci_bus *b = phb->bus; + struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge); struct resource *res; int rc, i; @@ -92,7 +93,8 @@ int remove_phb_dynamic(struct pci_controller *phb) /* Remove the PCI bus and unregister the bridge device from sysfs */ phb->bus = NULL; pci_remove_bus(b); - device_unregister(b->bridge); + host_bridge->bus = NULL; + device_unregister(&host_bridge->dev); /* Now release the IO resource */ if (res->flags & IORESOURCE_IO) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 3acdcc3bb908..b658fa627a34 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier) case EPOW_SHUTDOWN_ON_UPS: pr_emerg("Loss of system power detected. System is running on" " UPS/battery. Check RTAS error log for details\n"); - orderly_poweroff(true); break; case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: @@ -395,10 +394,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) /* * Some versions of FWNMI place the buffer inside the 4kB page starting at * 0x7000. Other versions place it inside the rtas buffer. We check both. + * Minimum size of the buffer is 16 bytes. */ #define VALID_FWNMI_BUFFER(A) \ - ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ - (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) + ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \ + (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16)))) static inline struct rtas_error_log *fwnmi_get_errlog(void) { @@ -494,18 +494,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs) return 0; /* need to perform reset */ } +static int mce_handle_err_realmode(int disposition, u8 error_type) +{ +#ifdef CONFIG_PPC_BOOK3S_64 + if (disposition == RTAS_DISP_NOT_RECOVERED) { + switch (error_type) { + case MC_ERROR_TYPE_SLB: + case MC_ERROR_TYPE_ERAT: + /* + * Store the old slb content in paca before flushing. + * Print this when we go to virtual mode. + * There are chances that we may hit MCE again if there + * is a parity error on the SLB entry we trying to read + * for saving. Hence limit the slb saving to single + * level of recursion. + */ + if (local_paca->in_mce == 1) + slb_save_contents(local_paca->mce_faulty_slbs); + flush_and_reload_slb(); + disposition = RTAS_DISP_FULLY_RECOVERED; + break; + default: + break; + } + } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) { + /* Platform corrected itself but could be degraded */ + pr_err("MCE: limited recovery, system may be degraded\n"); + disposition = RTAS_DISP_FULLY_RECOVERED; + } +#endif + return disposition; +} -static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) +static int mce_handle_err_virtmode(struct pt_regs *regs, + struct rtas_error_log *errp, + struct pseries_mc_errorlog *mce_log, + int disposition) { struct mce_error_info mce_err = { 0 }; - unsigned long eaddr = 0, paddr = 0; - struct pseries_errorlog *pseries_log; - struct pseries_mc_errorlog *mce_log; - int disposition = rtas_error_disposition(errp); int initiator = rtas_error_initiator(errp); int severity = rtas_error_severity(errp); + unsigned long eaddr = 0, paddr = 0; u8 error_type, err_sub_type; + if (!mce_log) + goto out; + + error_type = mce_log->error_type; + err_sub_type = rtas_mc_error_sub_type(mce_log); + if (initiator == RTAS_INITIATOR_UNKNOWN) mce_err.initiator = MCE_INITIATOR_UNKNOWN; else if (initiator == RTAS_INITIATOR_CPU) @@ -544,18 +581,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN; mce_err.error_class = MCE_ECLASS_UNKNOWN; - if (!rtas_error_extended(errp)) - goto out; - - pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); - if (pseries_log == NULL) - goto out; - - mce_log = (struct pseries_mc_errorlog *)pseries_log->data; - error_type = mce_log->error_type; - err_sub_type = rtas_mc_error_sub_type(mce_log); - - switch (mce_log->error_type) { + switch (error_type) { case MC_ERROR_TYPE_UE: mce_err.error_type = MCE_ERROR_TYPE_UE; switch (err_sub_type) { @@ -652,40 +678,45 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN; break; } - -#ifdef CONFIG_PPC_BOOK3S_64 - if (disposition == RTAS_DISP_NOT_RECOVERED) { - switch (error_type) { - case MC_ERROR_TYPE_SLB: - case MC_ERROR_TYPE_ERAT: - /* - * Store the old slb content in paca before flushing. - * Print this when we go to virtual mode. - * There are chances that we may hit MCE again if there - * is a parity error on the SLB entry we trying to read - * for saving. Hence limit the slb saving to single - * level of recursion. - */ - if (local_paca->in_mce == 1) - slb_save_contents(local_paca->mce_faulty_slbs); - flush_and_reload_slb(); - disposition = RTAS_DISP_FULLY_RECOVERED; - break; - default: - break; - } - } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) { - /* Platform corrected itself but could be degraded */ - printk(KERN_ERR "MCE: limited recovery, system may " - "be degraded\n"); - disposition = RTAS_DISP_FULLY_RECOVERED; - } -#endif - out: save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED, - &mce_err, regs->nip, eaddr, paddr); + &mce_err, regs->nip, eaddr, paddr); + return disposition; +} +static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) +{ + struct pseries_errorlog *pseries_log; + struct pseries_mc_errorlog *mce_log = NULL; + int disposition = rtas_error_disposition(errp); + u8 error_type; + + if (!rtas_error_extended(errp)) + goto out; + + pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); + if (!pseries_log) + goto out; + + mce_log = (struct pseries_mc_errorlog *)pseries_log->data; + error_type = mce_log->error_type; + + disposition = mce_handle_err_realmode(disposition, error_type); + + /* + * Enable translation as we will be accessing per-cpu variables + * in save_mce_event() which may fall outside RMO region, also + * leave it enabled because subsequently we will be queuing work + * to workqueues where again per-cpu variables accessed, besides + * fwnmi_release_errinfo() crashes when called in realmode on + * pseries. + * Note: All the realmode handling like flushing SLB entries for + * SLB multihit is done by now. + */ +out: + mtmsr(mfmsr() | MSR_IR | MSR_DR); + disposition = mce_handle_err_virtmode(regs, errp, mce_log, + disposition); return disposition; } diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c index bbb97169bf63..6268545947b8 100644 --- a/arch/powerpc/platforms/pseries/rng.c +++ b/arch/powerpc/platforms/pseries/rng.c @@ -36,6 +36,7 @@ static __init int rng_init(void) ppc_md.get_random_seed = pseries_get_random_long; + of_node_put(dn); return 0; } machine_subsys_initcall(pseries, rng_init); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0c8421dd01ab..ce71235c8b81 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -561,6 +561,14 @@ void pseries_setup_rfi_flush(void) setup_rfi_flush(types, enable); setup_count_cache_flush(); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); + setup_entry_flush(enable); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); + setup_uaccess_flush(enable); } #ifdef CONFIG_PCI_IOV diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 0a24a5a185f0..e5ecadfb5dea 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -13,7 +13,6 @@ #include #include #include -#include "../../kernel/cacheinfo.h" static u64 stream_id; static struct device suspend_dev; @@ -78,9 +77,7 @@ static void pseries_suspend_enable_irqs(void) * Update configuration which can be modified based on device tree * changes during resume. */ - cacheinfo_cpu_offline(smp_processor_id()); post_mobility_fixup(); - cacheinfo_cpu_online(smp_processor_id()); } /** @@ -132,15 +129,11 @@ static ssize_t store_hibernate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - cpumask_var_t offline_mask; int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) - return -ENOMEM; - stream_id = simple_strtoul(buf, NULL, 16); do { @@ -150,32 +143,16 @@ static ssize_t store_hibernate(struct device *dev, } while (rc == -EAGAIN); if (!rc) { - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, - cpu_online_mask); - rc = rtas_online_cpus_mask(offline_mask); - if (rc) { - pr_err("%s: Could not bring present CPUs online.\n", - __func__); - goto out; - } - stop_topology_update(); rc = pm_suspend(PM_SUSPEND_MEM); start_topology_update(); - - /* Take down CPUs not online prior to suspend */ - if (!rtas_offline_cpus_mask(offline_mask)) - pr_warn("%s: Could not restore CPUs to offline " - "state.\n", __func__); } stream_id = 0; if (!rc) rc = count; -out: - free_cpumask_var(offline_mask); + return rc; } @@ -210,7 +187,6 @@ static struct bus_type suspend_subsys = { static const struct platform_suspend_ops pseries_suspend_ops = { .valid = suspend_valid_only_mem, - .begin = pseries_suspend_begin, .prepare_late = pseries_prepare_late, .enter = pseries_suspend_enter, }; diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index f6b253e2be40..36ec0bdd8b63 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -191,7 +191,7 @@ static int mpic_msgr_probe(struct platform_device *dev) /* IO map the message register block. */ of_address_to_resource(np, 0, &rsrc); - msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); + msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc)); if (!msgr_block_addr) { dev_err(&dev->dev, "Failed to iomap MPIC message registers"); return -EFAULT; diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c index ad8117148ea3..21b9d1bf39ff 100644 --- a/arch/powerpc/sysdev/xics/icp-hv.c +++ b/arch/powerpc/sysdev/xics/icp-hv.c @@ -174,6 +174,7 @@ int icp_hv_init(void) icp_ops = &icp_hv_ops; + of_node_put(np); return 0; } diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 9651ca061828..2d4c09a77910 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -68,13 +69,6 @@ static u32 xive_ipi_irq; /* Xive state for each CPU */ static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); -/* - * A "disabled" interrupt should never fire, to catch problems - * we set its logical number to this - */ -#define XIVE_BAD_IRQ 0x7fffffff -#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) - /* An invalid CPU target */ #define XIVE_INVALID_TARGET (-1) @@ -263,6 +257,13 @@ notrace void xmon_xive_do_dump(int cpu) xmon_printf("\n"); } +static struct irq_data *xive_get_irq_data(u32 hw_irq) +{ + unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq); + + return irq ? irq_get_irq_data(irq) : NULL; +} + int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) { int rc; @@ -279,6 +280,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", hw_irq, target, prio, lirq); + if (!d) + d = xive_get_irq_data(hw_irq); + if (d) { struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); u64 val = xive_esb_read(xd, XIVE_ESB_GET); @@ -1016,12 +1020,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { if (xd->eoi_mmio) { + unmap_kernel_range((unsigned long)xd->eoi_mmio, + 1u << xd->esb_shift); iounmap(xd->eoi_mmio); if (xd->eoi_mmio == xd->trig_mmio) xd->trig_mmio = NULL; xd->eoi_mmio = NULL; } if (xd->trig_mmio) { + unmap_kernel_range((unsigned long)xd->trig_mmio, + 1u << xd->esb_shift); iounmap(xd->trig_mmio); xd->trig_mmio = NULL; } @@ -1150,7 +1158,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu) xc = per_cpu(xive_cpu, cpu); /* Check if we are already setup */ - if (xc->hw_ipi != 0) + if (xc->hw_ipi != XIVE_BAD_IRQ) return 0; /* Grab an IPI from the backend, this will populate xc->hw_ipi */ @@ -1187,7 +1195,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) /* Disable the IPI and free the IRQ data */ /* Already cleaned up ? */ - if (xc->hw_ipi == 0) + if (xc->hw_ipi == XIVE_BAD_IRQ) return; /* Mask the IPI */ @@ -1343,6 +1351,7 @@ static int xive_prepare_cpu(unsigned int cpu) if (np) xc->chip_id = of_get_ibm_chip_id(np); of_node_put(np); + xc->hw_ipi = XIVE_BAD_IRQ; per_cpu(xive_cpu, cpu) = xc; } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 0ff6b739052c..3fd086533dcf 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -312,7 +313,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) s64 rc; /* Free the IPI */ - if (!xc->hw_ipi) + if (xc->hw_ipi == XIVE_BAD_IRQ) return; for (;;) { rc = opal_xive_free_irq(xc->hw_ipi); @@ -320,7 +321,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) msleep(OPAL_BUSY_DELAY_MS); continue; } - xc->hw_ipi = 0; + xc->hw_ipi = XIVE_BAD_IRQ; break; } } @@ -646,6 +647,7 @@ static bool xive_native_provision_pages(void) pr_err("Failed to allocate provisioning page\n"); return false; } + kmemleak_ignore(p); opal_xive_donate_page(chip, __pa(p)); } return true; diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 55dc61cb4867..3f15615712b5 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -560,11 +560,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) { - if (!xc->hw_ipi) + if (xc->hw_ipi == XIVE_BAD_IRQ) return; xive_irq_bitmap_free(xc->hw_ipi); - xc->hw_ipi = 0; + xc->hw_ipi = XIVE_BAD_IRQ; } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h index 59cd366e7933..382980f4de2d 100644 --- a/arch/powerpc/sysdev/xive/xive-internal.h +++ b/arch/powerpc/sysdev/xive/xive-internal.h @@ -5,6 +5,13 @@ #ifndef __XIVE_INTERNAL_H #define __XIVE_INTERNAL_H +/* + * A "disabled" interrupt should never fire, to catch problems + * we set its logical number to this + */ +#define XIVE_BAD_IRQ 0x7fffffff +#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) + /* Each CPU carry one of these with various per-CPU state */ struct xive_cpu { #ifdef CONFIG_SMP diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index c3842dbeb1b7..6f9cccea54f3 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -1,9 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for xmon -# Avoid clang warnings around longjmp/setjmp declarations -subdir-ccflags-y := -ffreestanding - GCOV_PROFILE := n KCOV_INSTRUMENT := n UBSAN_SANITIZE := n diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c index 5c1a50912229..9b0d85bff021 100644 --- a/arch/powerpc/xmon/nonstdio.c +++ b/arch/powerpc/xmon/nonstdio.c @@ -178,7 +178,7 @@ void xmon_printf(const char *format, ...) if (n && rc == 0) { /* No udbg hooks, fallback to printk() - dangerous */ - printk("%s", xmon_outbuf); + pr_cont("%s", xmon_outbuf); } } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 8057aafd5f5e..6d130c89fbd8 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -187,6 +188,8 @@ static void dump_tlb_44x(void); static void dump_tlb_book3e(void); #endif +static void clear_all_bpt(void); + #ifdef CONFIG_PPC64 #define REG "%.16lx" #else @@ -283,10 +286,38 @@ Commands:\n\ " U show uptime information\n" " ? help\n" " # n limit output to n lines per page (for dp, dpa, dl)\n" -" zr reboot\n\ - zh halt\n" +" zr reboot\n" +" zh halt\n" ; +#ifdef CONFIG_SECURITY +static bool xmon_is_locked_down(void) +{ + static bool lockdown; + + if (!lockdown) { + lockdown = !!security_locked_down(LOCKDOWN_XMON_RW); + if (lockdown) { + printf("xmon: Disabled due to kernel lockdown\n"); + xmon_is_ro = true; + } + } + + if (!xmon_is_ro) { + xmon_is_ro = !!security_locked_down(LOCKDOWN_XMON_WR); + if (xmon_is_ro) + printf("xmon: Read-only due to kernel lockdown\n"); + } + + return lockdown; +} +#else /* CONFIG_SECURITY */ +static inline bool xmon_is_locked_down(void) +{ + return false; +} +#endif + static struct pt_regs *xmon_regs; static inline void sync(void) @@ -438,7 +469,10 @@ static bool wait_for_other_cpus(int ncpus) return false; } -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ +static inline void get_output_lock(void) {} +static inline void release_output_lock(void) {} +#endif static inline int unrecoverable_excp(struct pt_regs *regs) { @@ -455,6 +489,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) int cmd = 0; struct bpt *bp; long recurse_jmp[JMP_BUF_LEN]; + bool locked_down; unsigned long offset; unsigned long flags; #ifdef CONFIG_SMP @@ -465,6 +500,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) local_irq_save(flags); hard_irq_disable(); + locked_down = xmon_is_locked_down(); + if (!fromipi) { tracing_enabled = tracing_is_on(); tracing_off(); @@ -518,7 +555,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) if (!fromipi) { get_output_lock(); - excprint(regs); + if (!locked_down) + excprint(regs); if (bp) { printf("cpu 0x%x stopped at breakpoint 0x%tx (", cpu, BP_NUM(bp)); @@ -570,10 +608,14 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } remove_bpts(); disable_surveillance(); - /* for breakpoint or single step, print the current instr. */ - if (bp || TRAP(regs) == 0xd00) - ppc_inst_dump(regs->nip, 1, 0); - printf("enter ? for help\n"); + + if (!locked_down) { + /* for breakpoint or single step, print curr insn */ + if (bp || TRAP(regs) == 0xd00) + ppc_inst_dump(regs->nip, 1, 0); + printf("enter ? for help\n"); + } + mb(); xmon_gate = 1; barrier(); @@ -597,8 +639,9 @@ static int xmon_core(struct pt_regs *regs, int fromipi) spin_cpu_relax(); touch_nmi_watchdog(); } else { - cmd = cmds(regs); - if (cmd != 0) { + if (!locked_down) + cmd = cmds(regs); + if (locked_down || cmd != 0) { /* exiting xmon */ insert_bpts(); xmon_gate = 0; @@ -635,13 +678,16 @@ static int xmon_core(struct pt_regs *regs, int fromipi) "can't continue\n"); remove_bpts(); disable_surveillance(); - /* for breakpoint or single step, print the current instr. */ - if (bp || TRAP(regs) == 0xd00) - ppc_inst_dump(regs->nip, 1, 0); - printf("enter ? for help\n"); + if (!locked_down) { + /* for breakpoint or single step, print current insn */ + if (bp || TRAP(regs) == 0xd00) + ppc_inst_dump(regs->nip, 1, 0); + printf("enter ? for help\n"); + } } - cmd = cmds(regs); + if (!locked_down) + cmd = cmds(regs); insert_bpts(); in_xmon = 0; @@ -670,7 +716,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } } #endif - insert_cpu_bpts(); + if (locked_down) + clear_all_bpt(); + else + insert_cpu_bpts(); touch_nmi_watchdog(); local_irq_restore(flags); @@ -3761,6 +3810,11 @@ static void xmon_init(int enable) #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handle_xmon(int key) { + if (xmon_is_locked_down()) { + clear_all_bpt(); + xmon_init(0); + return; + } /* ensure xmon is enabled */ xmon_init(1); debugger(get_irq_regs()); @@ -3782,7 +3836,6 @@ static int __init setup_xmon_sysrq(void) device_initcall(setup_xmon_sysrq); #endif /* CONFIG_MAGIC_SYSRQ */ -#ifdef CONFIG_DEBUG_FS static void clear_all_bpt(void) { int i; @@ -3800,18 +3853,22 @@ static void clear_all_bpt(void) iabr = NULL; dabr.enabled = 0; } - - printf("xmon: All breakpoints cleared\n"); } +#ifdef CONFIG_DEBUG_FS static int xmon_dbgfs_set(void *data, u64 val) { xmon_on = !!val; xmon_init(xmon_on); /* make sure all breakpoints removed when disabling */ - if (!xmon_on) + if (!xmon_on) { clear_all_bpt(); + get_output_lock(); + printf("xmon: All breakpoints cleared\n"); + release_output_lock(); + } + return 0; } @@ -3837,7 +3894,11 @@ static int xmon_early __initdata; static int __init early_parse_xmon(char *p) { - if (!p || strncmp(p, "early", 5) == 0) { + if (xmon_is_locked_down()) { + xmon_init(0); + xmon_early = 0; + xmon_on = 0; + } else if (!p || strncmp(p, "early", 5) == 0) { /* just "xmon" is equivalent to "xmon=early" */ xmon_init(1); xmon_early = 1; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a0fa4be94a68..b21549a34447 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -58,7 +58,6 @@ config RISCV select EDAC_SUPPORT select ARCH_HAS_GIGANTIC_PAGE select ARCH_WANT_HUGE_PMD_SHARE if 64BIT - select SPARSEMEM_STATIC if 32BIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select HAVE_ARCH_MMAP_RND_BITS select HAVE_COPY_THREAD_TLS @@ -102,7 +101,8 @@ config ARCH_FLATMEM_ENABLE config ARCH_SPARSEMEM_ENABLE def_bool y depends on MMU - select SPARSEMEM_VMEMMAP_ENABLE + select SPARSEMEM_STATIC if 32BIT && SPARSEMEM + select SPARSEMEM_VMEMMAP_ENABLE if 64BIT config ARCH_SELECT_MEMORY_MODEL def_bool ARCH_SPARSEMEM_ENABLE diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index 88cfcb96bf23..cc04e66752aa 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -83,6 +83,7 @@ phy-mode = "gmii"; phy-handle = <&phy0>; phy0: ethernet-phy@0 { + compatible = "ethernet-phy-id0007.0771"; reg = <0>; }; }; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 420a0dbef386..3c656fe97e58 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -62,6 +62,8 @@ CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y CONFIG_SPI=y CONFIG_SPI_SIFIVE=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SIFIVE=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_DRM=y CONFIG_DRM_RADEON=y diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 3f1737f301cc..d0e24aaa2aa0 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -58,8 +58,16 @@ do { \ * The AQ/RL pair provides a RCpc critical section, but there's not really any * way we can take advantage of that here because the ordering is only enforced * on that one lock. Thus, we're just doing a full fence. + * + * Since we allow writeX to be called from preemptive regions we need at least + * an "o" in the predecessor set to ensure device writes are visible before the + * task is marked as available for scheduling on a new hart. While I don't see + * any concrete reason we need a full IO fence, it seems safer to just upgrade + * this in order to avoid any IO crossing a scheduling boundary. In both + * instances the scheduler pairs this with an mb(), so nothing is necessary on + * the new hart. */ -#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) +#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) #include diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index d969bab4a26b..262e5bbb2776 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -179,7 +179,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -224,7 +224,7 @@ RISCV_ACQUIRE_BARRIER \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -270,7 +270,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -316,7 +316,7 @@ " fence rw, rw\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index c6dcc5291f97..02fbc175142e 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -63,4 +63,11 @@ do { \ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. */ #define MCOUNT_INSN_SIZE 8 + +#ifndef __ASSEMBLY__ +struct dyn_ftrace; +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop +#endif + #endif diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 3db261c4810f..6a30794aa1ee 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -119,7 +119,10 @@ extern unsigned long min_low_pfn; #endif /* __ASSEMBLY__ */ -#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) +#define virt_addr_valid(vaddr) ({ \ + unsigned long _addr = (unsigned long)vaddr; \ + (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ +}) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h index b0ab66e5fdb1..5b2e79e5bfa5 100644 --- a/arch/riscv/include/asm/pgtable-32.h +++ b/arch/riscv/include/asm/pgtable-32.h @@ -14,4 +14,6 @@ #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 34 + #endif /* _ASM_RISCV_PGTABLE_32_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 905372d7eeb8..3df9a82bdbfd 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -12,7 +12,11 @@ #include /* thread information allocation */ +#ifdef CONFIG_64BIT +#define THREAD_SIZE_ORDER (2) +#else #define THREAD_SIZE_ORDER (1) +#endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #ifndef __ASSEMBLY__ diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h index d86cb17bbabe..22e0ae888406 100644 --- a/arch/riscv/include/uapi/asm/auxvec.h +++ b/arch/riscv/include/uapi/asm/auxvec.h @@ -10,4 +10,7 @@ /* vDSO location */ #define AT_SYSINFO_EHDR 33 +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 1 + #endif /* _UAPI_ASM_RISCV_AUXVEC_H */ diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 8ca479831142..9c87ae77ad5d 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -387,6 +387,7 @@ ENTRY(__switch_to) ENDPROC(__switch_to) .section ".rodata" + .align LGREG /* Exception vector table */ ENTRY(excp_vect_table) RISCV_PTR do_trap_insn_misaligned diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index c40fdcdeb950..291c579e1245 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return __ftrace_modify_call(rec->ip, addr, false); } + +/* + * This is called early on, and isn't wrapped by + * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold + * text_mutex, which triggers a lockdep failure. SMP isn't running so we could + * just directly poke the text, but it's simpler to just take the lock + * ourselves. + */ +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + int out; + + ftrace_arch_code_modify_prepare(); + out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); + ftrace_arch_code_modify_post_process(); + + return out; +} + int ftrace_update_ftrace_func(ftrace_func_t func) { int ret = __ftrace_modify_call((unsigned long)&ftrace_call, diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 72f89b7590dd..344793159b97 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -26,12 +26,17 @@ ENTRY(_start) /* reserved */ .word 0 .balign 8 +#ifdef CONFIG_RISCV_M_MODE + /* Image load offset (0MB) from start of RAM for M-mode */ + .dword 0 +#else #if __riscv_xlen == 64 /* Image load offset(2MB) from start of RAM */ .dword 0x200000 #else /* Image load offset(4MB) from start of RAM */ .dword 0x400000 +#endif #endif /* Effective size of kernel image */ .dword _end - _start diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 0940681d2f68..19e46f4160cc 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -63,7 +63,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, #else /* !CONFIG_FRAME_POINTER */ -static void notrace walk_stackframe(struct task_struct *task, +void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) { unsigned long sp, pc; diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index f3619f59d85c..12f8a7fce78b 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -8,6 +8,7 @@ #include #include #include +#include static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, { if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; + + if ((prot & PROT_WRITE) && (prot & PROT_EXEC)) + if (unlikely(!(prot & PROT_READ))) + return -EINVAL; + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 6a53c02e9c73..8aa70b519e04 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -4,6 +4,7 @@ * Copyright (C) 2017 SiFive */ +#include #include #include #include @@ -24,5 +25,7 @@ void __init time_init(void) riscv_timebase = prop; lpj_fine = riscv_timebase / HZ; + + of_clk_init(NULL); timer_probe(); } diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 33b16f4212f7..a4ee3a0e7d20 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) # We also create a special relocatable object that should mirror the symbol -# table and layout of the linked DSO. With ld -R we can then refer to -# these symbols in the kernel code rather than hand-coded addresses. +# table and layout of the linked DSO. With ld --just-symbols we can then +# refer to these symbols in the kernel code rather than hand-coded addresses. SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ -Wl,--build-id -Wl,--hash-style=both $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE $(call if_changed,vdsold) -LDFLAGS_vdso-syms.o := -r -R +LDFLAGS_vdso-syms.o := -r --just-symbols $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE $(call if_changed,ld) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index f5d813c1304d..d49e334071d4 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -115,8 +115,9 @@ void __init setup_bootmem(void) /* Reserve from the start of the kernel to the end of the kernel */ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); - set_max_mapnr(PFN_DOWN(mem_size)); - max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); + max_pfn = PFN_DOWN(memblock_end_of_DRAM()); + max_low_pfn = max_pfn; + set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); @@ -166,12 +167,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) ptep = &fixmap_pte[pte_index(addr)]; - if (pgprot_val(prot)) { + if (pgprot_val(prot)) set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); - } else { + else pte_clear(&init_mm, addr, ptep); - local_flush_tlb_page(addr); - } + local_flush_tlb_page(addr); } static pte_t *__init get_pte_virt(phys_addr_t pa) diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c index e2279fed8f56..0eefe6193253 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp.c @@ -1313,6 +1313,10 @@ out_be: emit(rv_ld(rd, 0, RV_REG_T1), ctx); break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; + /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: emit_imm(RV_REG_T1, imm, ctx); diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index 4b86a8d3c121..e6bf5f40bff3 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -360,22 +360,23 @@ ENTRY(startup_kdump) # the save area and does disabled wait with a faulty address. # ENTRY(startup_pgm_check_handler) - stmg %r0,%r15,__LC_SAVE_AREA_SYNC - la %r1,4095 - stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1) - mvc __LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC - mvc __LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW + stmg %r8,%r15,__LC_SAVE_AREA_SYNC + la %r8,4095 + stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8) + stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8) + mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC + mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW ni __LC_RETURN_PSW,0xfc # remove IO and EX bits ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit oi __LC_RETURN_PSW+1,0x2 # set wait state bit - larl %r2,.Lold_psw_disabled_wait - stg %r2,__LC_PGM_NEW_PSW+8 - l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2) + larl %r9,.Lold_psw_disabled_wait + stg %r9,__LC_PGM_NEW_PSW+8 + l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9) brasl %r14,print_pgm_check_info .Lold_psw_disabled_wait: - la %r1,4095 - lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) + la %r8,4095 + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8) lpswe __LC_RETURN_PSW # disabled wait .Ldump_info_stack: .long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c index dd95cdbd22ce..4cbb4b6d85a8 100644 --- a/arch/s390/crypto/arch_random.c +++ b/arch/s390/crypto/arch_random.c @@ -53,6 +53,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) { + /* max hunk is ARCH_RNG_BUF_SIZE */ + if (nbytes > ARCH_RNG_BUF_SIZE) + return false; + /* lock rng buffer */ if (!spin_trylock(&arch_rng_lock)) return false; diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index abe60268335d..0fe5600a037e 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -31,12 +31,12 @@ #define KVM_USER_MEM_SLOTS 32 /* - * These seem to be used for allocating ->chip in the routing table, - * which we don't use. 4096 is an out-of-thin-air value. If we need - * to look at ->chip later on, we'll need to revisit this. + * These seem to be used for allocating ->chip in the routing table, which we + * don't use. 1 is as small as we can get to reduce the needed memory. If we + * need to look at ->chip later on, we'll need to revisit this. */ #define KVM_NR_IRQCHIPS 1 -#define KVM_IRQCHIP_NUM_PINS 4096 +#define KVM_IRQCHIP_NUM_PINS 1 #define KVM_HALT_POLL_NS_DEFAULT 50000 /* s390-specific vcpu->requests bit members */ diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h index 35f8cbe7e5bb..c759dcffa9ea 100644 --- a/arch/s390/include/asm/numa.h +++ b/arch/s390/include/asm/numa.h @@ -17,7 +17,6 @@ void numa_setup(void); int numa_pfn_to_nid(unsigned long pfn); -int __node_distance(int a, int b); void numa_update_cpu_topology(void); extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index cd060b5dd8fd..e4dc64cc9c55 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -8,6 +8,10 @@ #include #include +/* I/O size constraints */ +#define ZPCI_MAX_READ_SIZE 8 +#define ZPCI_MAX_WRITE_SIZE 128 + /* I/O Map */ #define ZPCI_IOMAP_SHIFT 48 #define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL @@ -140,7 +144,8 @@ static inline int zpci_memcpy_fromio(void *dst, while (n > 0) { size = zpci_get_max_write_size((u64 __force) src, - (u64) dst, n, 8); + (u64) dst, n, + ZPCI_MAX_READ_SIZE); rc = zpci_read_single(dst, src, size); if (rc) break; @@ -161,7 +166,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst, while (n > 0) { size = zpci_get_max_write_size((u64 __force) dst, - (u64) src, n, 128); + (u64) src, n, + ZPCI_MAX_WRITE_SIZE); if (size > 8) /* main path */ rc = zpci_write_block(dst, src, size); else diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 50b4ce8cddfd..918f0ba4f4d2 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -29,7 +29,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ prev__ = *ptr__; \ do { \ @@ -37,7 +37,7 @@ new__ = old__ op (val); \ prev__ = cmpxchg(ptr__, old__, new__); \ } while (prev__ != old__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ new__; \ }) @@ -68,7 +68,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ if (__builtin_constant_p(val__) && \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ @@ -84,7 +84,7 @@ : [val__] "d" (val__) \ : "cc"); \ } \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) @@ -95,14 +95,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ old__ + val__; \ }) @@ -114,14 +114,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") @@ -136,10 +136,10 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ ret__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = cmpxchg(ptr__, oval, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -152,10 +152,10 @@ ({ \ typeof(pcp) *ptr__; \ typeof(pcp) ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = xchg(ptr__, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -171,11 +171,11 @@ typeof(pcp1) *p1__; \ typeof(pcp2) *p2__; \ int ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ p1__ = raw_cpu_ptr(&(pcp1)); \ p2__ = raw_cpu_ptr(&(pcp2)); \ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 34a655ad7123..5ce586948d92 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1247,26 +1247,46 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) #define pgd_offset_k(address) pgd_offset(&init_mm, address) -static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) { - if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) - return (p4d_t *) pgd_deref(*pgd) + p4d_index(address); - return (p4d_t *) pgd; + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) + return (p4d_t *) pgd_deref(pgd) + p4d_index(address); + return (p4d_t *) pgdp; +} +#define p4d_offset_lockless p4d_offset_lockless + +static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) +{ + return p4d_offset_lockless(pgdp, *pgdp, address); } -static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) { - if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) - return (pud_t *) p4d_deref(*p4d) + pud_index(address); - return (pud_t *) p4d; + if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) + return (pud_t *) p4d_deref(p4d) + pud_index(address); + return (pud_t *) p4dp; } +#define pud_offset_lockless pud_offset_lockless -static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) { - if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) - return (pmd_t *) pud_deref(*pud) + pmd_index(address); - return (pmd_t *) pud; + return pud_offset_lockless(p4dp, *p4dp, address); } +#define pud_offset pud_offset + +static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) +{ + if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) + return (pmd_t *) pud_deref(pud) + pmd_index(address); + return (pmd_t *) pudp; +} +#define pmd_offset_lockless pmd_offset_lockless + +static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) +{ + return pmd_offset_lockless(pudp, *pudp, address); +} +#define pmd_offset pmd_offset static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address) { diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 0ae4bbf7779c..3679d224fd3c 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -111,4 +111,15 @@ struct stack_frame { r2; \ }) +#define CALL_ON_STACK_NORETURN(fn, stack) \ +({ \ + asm volatile( \ + " la 15,0(%[_stack])\n" \ + " xc %[_bc](8,15),%[_bc](15)\n" \ + " brasl 14,%[_fn]\n" \ + ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ + [_stack] "a" (stack), [_fn] "X" (fn)); \ + BUG(); \ +}) + #endif /* _ASM_S390_STACKTRACE_H */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index f073292e9fdb..d9d5de0f67ff 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -33,7 +33,17 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; + unsigned long error = regs->gprs[2]; +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) { + /* + * Sign-extend the value so (int)-EFOO becomes (long)-EFOO + * and will match correctly in comparisons. + */ + error = (long)(int)error; + } +#endif + return IS_ERR_VALUE(error) ? error : 0; } static inline long syscall_get_return_value(struct task_struct *task, diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index cca406fdbe51..ef9dd253dfad 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -83,8 +83,6 @@ static inline const struct cpumask *cpumask_of_node(int node) #define pcibus_to_node(bus) __pcibus_to_node(bus) -#define node_distance(a, b) __node_distance(a, b) - #else /* !CONFIG_NUMA */ #define numa_node_id numa_node_id diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index a470f1fa9f2a..324438889fe1 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -276,6 +276,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo } int copy_to_user_real(void __user *dest, void *src, unsigned long count); -void s390_kernel_write(void *dst, const void *src, size_t size); +void *s390_kernel_write(void *dst, const void *src, size_t size); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 169d7604eb80..f3ba84fa9bd1 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -36,6 +36,7 @@ struct vdso_data { __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */ __u32 ts_dir; /* TOD steering direction 0x64 */ __u64 ts_end; /* TOD steering end 0x68 */ + __u32 hrtimer_res; /* hrtimer resolution 0x70 */ }; struct vdso_per_cpu_data { diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index b6628586ab70..a65cb4924bdb 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -76,6 +76,7 @@ int main(void) OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir); OFFSET(__VDSO_TS_END, vdso_data, ts_end); + OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res); OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr); @@ -87,7 +88,6 @@ int main(void) DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); - DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC); BLANK(); /* idle data offsets */ diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index af013b4244d3..2da027359798 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen) static int diag8_response(int cmdlen, char *response, int *rlen) { + unsigned long _cmdlen = cmdlen | 0x40000000L; + unsigned long _rlen = *rlen; register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; register unsigned long reg3 asm ("3") = (addr_t) response; - register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; - register unsigned long reg5 asm ("5") = *rlen; + register unsigned long reg4 asm ("4") = _cmdlen; + register unsigned long reg5 asm ("5") = _rlen; asm volatile( " diag %2,%0,0x8\n" diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 6d321f5f101d..7184d55d87aa 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas) if (!areas) goto fail_malloc_areas; for (i = 0; i < nr_areas; i++) { + /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */ areas[i] = kmalloc_array(pages_per_area, sizeof(debug_entry_t *), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!areas[i]) goto fail_malloc_areas2; for (j = 0; j < pages_per_area; j++) { diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index e9dac9a24d3f..ccba63aaeb47 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -84,7 +84,7 @@ static int show_diag_stat(struct seq_file *m, void *v) static void *show_diag_stat_start(struct seq_file *m, loff_t *pos) { - return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; + return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL; } static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos) @@ -133,7 +133,7 @@ void diag_stat_inc(enum diag_stat_enum nr) } EXPORT_SYMBOL(diag_stat_inc); -void diag_stat_inc_norecursion(enum diag_stat_enum nr) +void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr) { this_cpu_inc(diag_stat.counter[nr]); trace_s390_diagnose_norecursion(diag_map[nr].code); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index f304802ecf7b..03e35b399c4e 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -557,7 +557,7 @@ void show_code(struct pt_regs *regs) void print_fn_code(unsigned char *code, unsigned long len) { - char buffer[64], *ptr; + char buffer[128], *ptr; int opsize, i; while (len) { diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index b432d63d0b37..2531776cf6cf 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -169,6 +169,8 @@ static noinline __init void setup_lowcore_early(void) psw_t psw; psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; + if (IS_ENABLED(CONFIG_KASAN)) + psw.mask |= PSW_MASK_DAT; psw.addr = (unsigned long) s390_base_ext_handler; S390_lowcore.external_new_psw = psw; psw.addr = (unsigned long) s390_base_pgm_handler; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index bc85987727f0..5cba1815b8f8 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -368,9 +368,9 @@ ENTRY(system_call) jnz .Lsysc_nr_ok # svc 0: system call number in %r1 llgfr %r1,%r1 # clear high word in r1 + sth %r1,__PT_INT_CODE+2(%r11) cghi %r1,NR_syscalls jnl .Lsysc_nr_ok - sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,3 .Lsysc_nr_ok: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) @@ -993,6 +993,7 @@ ENDPROC(ext_int_handler) * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. */ ENTRY(psw_idle) + stg %r14,(__SF_GPRS+8*8)(%r15) stg %r3,__SF_EMPTY(%r15) larl %r1,.Lpsw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 8371855042dc..da550cb8b31b 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -294,11 +294,6 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy) return IRQ_HANDLED; } -static struct irqaction external_interrupt = { - .name = "EXT", - .handler = do_ext_interrupt, -}; - void __init init_ext_interrupts(void) { int idx; @@ -308,7 +303,8 @@ void __init init_ext_interrupts(void) irq_set_chip_and_handler(EXT_INTERRUPT, &dummy_irq_chip, handle_percpu_irq); - setup_irq(EXT_INTERRUPT, &external_interrupt); + if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL)) + panic("Failed to register EXT interrupt\n"); } static DEFINE_SPINLOCK(irq_subclass_lock); diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c index 8415ae7d2a23..f9e4baa64b67 100644 --- a/arch/s390/kernel/machine_kexec_file.c +++ b/arch/s390/kernel/machine_kexec_file.c @@ -151,7 +151,7 @@ static int kexec_file_add_initrd(struct kimage *image, buf.mem += crashk_res.start; buf.memsz = buf.bufsz; - data->parm->initrd_start = buf.mem; + data->parm->initrd_start = data->memsz; data->parm->initrd_size = buf.memsz; data->memsz += buf.memsz; diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c index d5035de9020e..b7182cec48dc 100644 --- a/arch/s390/kernel/machine_kexec_reloc.c +++ b/arch/s390/kernel/machine_kexec_reloc.c @@ -28,6 +28,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, break; case R_390_64: /* Direct 64 bit. */ case R_390_GLOB_DAT: + case R_390_JMP_SLOT: *(u64 *)loc = val; break; case R_390_PC16: /* PC relative 16 bit. */ diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 3431b2d5e334..f942341429b1 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -41,6 +41,7 @@ EXPORT_SYMBOL(_mcount) ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller + stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller lgr %r1,%r15 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) aghi %r0,MCOUNT_RETURN_FIXUP diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index fdb8083e7870..b83bddf35e06 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1429,8 +1429,8 @@ static int aux_output_begin(struct perf_output_handle *handle, idx = aux->empty_mark + 1; for (i = 0; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); - te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; - te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; + te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | + SDB_TE_ALERT_REQ_MASK); te->overflow = 0; } /* Save the position of empty SDBs */ @@ -1477,8 +1477,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, te = aux_sdb_trailer(aux, alert_index); do { orig_flags = te->flags; - orig_overflow = te->overflow; - *overflow = orig_overflow; + *overflow = orig_overflow = te->overflow; if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { /* * SDB is already set by hardware. @@ -1589,6 +1588,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) perf_aux_output_end(handle, size); num_sdb = aux->sfb.num_sdb; + num_sdb = aux->sfb.num_sdb; while (!done) { /* Get an output handle */ aux = perf_aux_output_begin(handle, cpuhw->event); @@ -1711,7 +1711,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages, } /* Allocate aux_buffer struct for the event */ - aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); + aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL); if (!aux) goto no_aux; sfb = &aux->sfb; @@ -2217,4 +2217,4 @@ out: } arch_initcall(init_cpum_sampling_pmu); -core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); +core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644); diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 6ebc2117c66c..91b9b3f73de6 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -165,8 +165,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n) static int show_cpuinfo(struct seq_file *m, void *v) { unsigned long n = (unsigned long) v - 1; + unsigned long first = cpumask_first(cpu_online_mask); - if (!n) + if (n == first) show_cpu_summary(m, v); if (!machine_has_cpu_mhz) return 0; @@ -179,6 +180,8 @@ static inline void *c_update(loff_t *pos) { if (*pos) *pos = cpumask_next(*pos - 1, cpu_online_mask); + else + *pos = cpumask_first(cpu_online_mask); return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL; } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index ad71132374f0..ad74472ce967 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -324,6 +324,25 @@ static inline void __poke_user_per(struct task_struct *child, child->thread.per_user.end = data; } +static void fixup_int_code(struct task_struct *child, addr_t data) +{ + struct pt_regs *regs = task_pt_regs(child); + int ilc = regs->int_code >> 16; + u16 insn; + + if (ilc > 6) + return; + + if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), + &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) + return; + + /* double check that tracee stopped on svc instruction */ + if ((insn >> 8) != 0xa) + return; + + regs->int_code = 0x20000 | (data & 0xffff); +} /* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. @@ -335,7 +354,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) struct user *dummy = NULL; addr_t offset; + if (addr < (addr_t) &dummy->regs.acrs) { + struct pt_regs *regs = task_pt_regs(child); /* * psw and gprs are stored on the stack */ @@ -353,7 +374,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* Invalid addressing mode bits */ return -EINVAL; } - *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct user, regs.gprs[2])) + fixup_int_code(child, data); + *(addr_t *)((addr_t) ®s->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* @@ -719,6 +744,10 @@ static int __poke_user_compat(struct task_struct *child, regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct compat_user, regs.gprs[2])) + fixup_int_code(child, data); /* gpr 0-15 */ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; } @@ -838,40 +867,45 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { unsigned long mask = -1UL; + long ret = -1; /* * The sysc_tracesys code in entry.S stored the system * call number to gprs[2]. */ if (test_thread_flag(TIF_SYSCALL_TRACE) && - (tracehook_report_syscall_entry(regs) || - regs->gprs[2] >= NR_syscalls)) { + tracehook_report_syscall_entry(regs)) { /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip + * Tracing decided this syscall should not happen. Skip * the system call and the system call restart handling. */ - clear_pt_regs_flag(regs, PIF_SYSCALL); - return -1; + goto skip; } /* Do the secure computing check after ptrace. */ if (secure_computing(NULL)) { /* seccomp failures shouldn't expose any additional code. */ - return -1; + goto skip; } if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_enter(regs, regs->gprs[2]); + trace_sys_enter(regs, regs->int_code & 0xffff); if (is_compat_task()) mask = 0xffffffff; - audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask, + audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask, regs->gprs[3] &mask, regs->gprs[4] &mask, regs->gprs[5] &mask); + if ((signed long)regs->gprs[2] >= NR_syscalls) { + regs->gprs[2] = -ENOSYS; + ret = -ENOSYS; + } return regs->gprs[2]; +skip: + clear_pt_regs_flag(regs, PIF_SYSCALL); + return ret; } asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) @@ -1256,7 +1290,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb) cb->pc == 1 && cb->qc == 0 && cb->reserved2 == 0 && - cb->key == PAGE_DEFAULT_KEY && cb->reserved3 == 0 && cb->reserved4 == 0 && cb->reserved5 == 0 && @@ -1320,7 +1353,11 @@ static int s390_runtime_instr_set(struct task_struct *target, kfree(data); return -EINVAL; } - + /* + * Override access key in any case, since user space should + * not be able to set it, nor should it care about it. + */ + ri_cb.key = PAGE_DEFAULT_KEY >> 4; preempt_disable(); if (!target->thread.ri_cb) target->thread.ri_cb = data; diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 125c7f6e8715..1788a5454b6f 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->k = 1; cb->ps = 1; cb->pc = 1; - cb->key = PAGE_DEFAULT_KEY; + cb->key = PAGE_DEFAULT_KEY >> 4; cb->v = 1; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4366962f4930..3588f4c65a4d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -356,7 +356,6 @@ early_initcall(async_stack_realloc); void __init arch_call_rest_init(void) { - struct stack_frame *frame; unsigned long stack; stack = stack_alloc(); @@ -369,13 +368,7 @@ void __init arch_call_rest_init(void) set_task_stack_end_magic(current); stack += STACK_INIT_OFFSET; S390_lowcore.kernel_stack = stack; - frame = (struct stack_frame *) stack; - memset(frame, 0, sizeof(*frame)); - /* Branch to rest_init on the new stack, never returns */ - asm volatile( - " la 15,0(%[_frame])\n" - " jg rest_init\n" - : : [_frame] "a" (frame)); + CALL_ON_STACK_NORETURN(rest_init, stack); } static void __init setup_lowcore_dat_off(void) @@ -634,7 +627,7 @@ static struct notifier_block kdump_mem_nb = { /* * Make sure that the area behind memory_end is protected */ -static void reserve_memory_end(void) +static void __init reserve_memory_end(void) { if (memory_end_set) memblock_reserve(memory_end, ULONG_MAX); @@ -643,7 +636,7 @@ static void reserve_memory_end(void) /* * Make sure that oldmem, where the dump is stored, is protected */ -static void reserve_oldmem(void) +static void __init reserve_oldmem(void) { #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) @@ -655,7 +648,7 @@ static void reserve_oldmem(void) /* * Make sure that oldmem, where the dump is stored, is protected */ -static void remove_oldmem(void) +static void __init remove_oldmem(void) { #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) @@ -929,9 +922,9 @@ static int __init setup_hwcaps(void) if (MACHINE_HAS_VX) { elf_hwcap |= HWCAP_S390_VXRS; if (test_facility(134)) - elf_hwcap |= HWCAP_S390_VXRS_EXT; - if (test_facility(135)) elf_hwcap |= HWCAP_S390_VXRS_BCD; + if (test_facility(135)) + elf_hwcap |= HWCAP_S390_VXRS_EXT; if (test_facility(148)) elf_hwcap |= HWCAP_S390_VXRS_EXT2; if (test_facility(152)) @@ -1120,6 +1113,7 @@ void __init setup_arch(char **cmdline_p) if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) nospec_auto_detect(); + jump_label_init(); parse_early_param(); #ifdef CONFIG_CRASH_DUMP /* Deactivate elfcorehdr= kernel parameter */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f468a10e5206..8c51462f13fd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -403,7 +403,7 @@ int smp_find_processor_id(u16 address) return -1; } -bool arch_vcpu_is_preempted(int cpu) +bool notrace arch_vcpu_is_preempted(int cpu) { if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) return false; @@ -413,7 +413,7 @@ bool arch_vcpu_is_preempted(int cpu) } EXPORT_SYMBOL(arch_vcpu_is_preempted); -void smp_yield_cpu(int cpu) +void notrace smp_yield_cpu(int cpu) { if (MACHINE_HAS_DIAG9C) { diag_stat_inc_norecursion(DIAG_STAT_X09C); @@ -765,7 +765,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) { struct sclp_core_entry *core; - cpumask_t avail; + static cpumask_t avail; bool configured; u16 core_id; int nr, i; @@ -845,13 +845,14 @@ void __init smp_detect_cpus(void) static void smp_init_secondary(void) { - int cpu = smp_processor_id(); + int cpu = raw_smp_processor_id(); S390_lowcore.last_update_clock = get_tod_clock(); restore_access_regs(S390_lowcore.access_regs_save_area); set_cpu_flag(CIF_ASCE_PRIMARY); set_cpu_flag(CIF_ASCE_SECONDARY); cpu_init(); + rcu_cpu_starting(cpu); preempt_disable(); init_cpu_timer(); vtime_init(); @@ -878,30 +879,18 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid) S390_lowcore.restart_source = -1UL; __ctl_load(S390_lowcore.cregs_save_area, 0, 15); __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); - CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); + CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack); } /* Upping and downing of CPUs */ int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - struct pcpu *pcpu; - int base, i, rc; + struct pcpu *pcpu = pcpu_devices + cpu; + int rc; - pcpu = pcpu_devices + cpu; if (pcpu->state != CPU_STATE_CONFIGURED) return -EIO; - base = smp_get_base_cpu(cpu); - for (i = 0; i <= smp_cpu_mtid; i++) { - if (base + i < nr_cpu_ids) - if (cpu_online(base + i)) - break; - } - /* - * If this is the first CPU of the core to get online - * do an initial CPU reset. - */ - if (i > smp_cpu_mtid && - pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) != + if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e8766beee5ad..11c32b228f51 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -310,6 +310,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->tk_mult = tk->tkr_mono.mult; vdso_data->tk_shift = tk->tkr_mono.shift; + vdso_data->hrtimer_res = hrtimer_resolution; smp_wmb(); ++vdso_data->tb_update_count; } @@ -353,8 +354,9 @@ static DEFINE_PER_CPU(atomic_t, clock_sync_word); static DEFINE_MUTEX(clock_sync_mutex); static unsigned long clock_sync_flags; -#define CLOCK_SYNC_HAS_STP 0 -#define CLOCK_SYNC_STP 1 +#define CLOCK_SYNC_HAS_STP 0 +#define CLOCK_SYNC_STP 1 +#define CLOCK_SYNC_STPINFO_VALID 2 /* * The get_clock function for the physical clock. It will get the current @@ -591,6 +593,22 @@ void stp_queue_work(void) queue_work(time_sync_wq, &stp_work); } +static int __store_stpinfo(void) +{ + int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); + + if (rc) + clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); + else + set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); + return rc; +} + +static int stpinfo_valid(void) +{ + return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); +} + static int stp_sync_clock(void *data) { struct clock_sync_data *sync = data; @@ -612,8 +630,7 @@ static int stp_sync_clock(void *data) if (rc == 0) { sync->clock_delta = clock_delta; clock_sync_global(clock_delta); - rc = chsc_sstpi(stp_page, &stp_info, - sizeof(struct stp_sstpi)); + rc = __store_stpinfo(); if (rc == 0 && stp_info.tmd != 2) rc = -EAGAIN; } @@ -658,7 +675,7 @@ static void stp_work_fn(struct work_struct *work) if (rc) goto out_unlock; - rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); + rc = __store_stpinfo(); if (rc || stp_info.c == 0) goto out_unlock; @@ -695,10 +712,14 @@ static ssize_t stp_ctn_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%016llx\n", - *(unsigned long long *) stp_info.ctnid); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%016llx\n", + *(unsigned long long *) stp_info.ctnid); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); @@ -707,9 +728,13 @@ static ssize_t stp_ctn_type_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", stp_info.ctn); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", stp_info.ctn); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); @@ -718,9 +743,13 @@ static ssize_t stp_dst_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x2000)) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x2000)) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); @@ -729,9 +758,13 @@ static ssize_t stp_leap_seconds_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x8000)) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x8000)) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); @@ -740,9 +773,13 @@ static ssize_t stp_stratum_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL); @@ -751,9 +788,13 @@ static ssize_t stp_time_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x0800)) - return -ENODATA; - return sprintf(buf, "%i\n", (int) stp_info.tto); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x0800)) + ret = sprintf(buf, "%i\n", (int) stp_info.tto); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL); @@ -762,9 +803,13 @@ static ssize_t stp_time_zone_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x4000)) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x4000)) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(time_zone_offset, 0400, @@ -774,9 +819,13 @@ static ssize_t stp_timing_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", stp_info.tmd); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", stp_info.tmd); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); @@ -785,9 +834,13 @@ static ssize_t stp_timing_state_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", stp_info.tst); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_work_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", stp_info.tst); + mutex_unlock(&stp_work_mutex); + return ret; } static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL); diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c index 490b52e85014..11a669f3cc93 100644 --- a/arch/s390/kernel/trace.c +++ b/arch/s390/kernel/trace.c @@ -14,7 +14,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_diagnose); static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); -void trace_s390_diagnose_norecursion(int diag_nr) +void notrace trace_s390_diagnose_norecursion(int diag_nr) { unsigned long flags; unsigned int *depth; diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index bec19e7e6e1c..4a66a1cb919b 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin -KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ - -Wl,--hash-style=both +ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \ + --hash-style=both --build-id -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) @@ -37,8 +37,8 @@ KASAN_SANITIZE := n $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE - $(call if_changed,vdso64ld) +$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE + $(call if_changed,ld) # strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) # actual build commands -quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S index 081435398e0a..0c79caa32b59 100644 --- a/arch/s390/kernel/vdso64/clock_getres.S +++ b/arch/s390/kernel/vdso64/clock_getres.S @@ -17,12 +17,14 @@ .type __kernel_clock_getres,@function __kernel_clock_getres: CFI_STARTPROC - larl %r1,4f + larl %r1,3f + lg %r0,0(%r1) cghi %r2,__CLOCK_REALTIME_COARSE je 0f cghi %r2,__CLOCK_MONOTONIC_COARSE je 0f - larl %r1,3f + larl %r1,_vdso_data + llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1) cghi %r2,__CLOCK_REALTIME je 0f cghi %r2,__CLOCK_MONOTONIC @@ -36,7 +38,6 @@ __kernel_clock_getres: jz 2f 0: ltgr %r3,%r3 jz 1f /* res == NULL */ - lg %r0,0(%r1) xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ stg %r0,8(%r3) /* store tp->tv_usec */ 1: lghi %r2,0 @@ -45,6 +46,5 @@ __kernel_clock_getres: svc 0 br %r14 CFI_ENDPROC -3: .quad __CLOCK_REALTIME_RES -4: .quad __CLOCK_COARSE_RES +3: .quad __CLOCK_COARSE_RES .size __kernel_clock_getres,.-__kernel_clock_getres diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c475ca49cfc6..6e60cc2443b2 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -136,7 +136,8 @@ static int do_account_vtime(struct task_struct *tsk) " stck %1" /* Store current tod clock value */ #endif : "=Q" (S390_lowcore.last_update_timer), - "=Q" (S390_lowcore.last_update_clock)); + "=Q" (S390_lowcore.last_update_clock) + : : "cc"); clock = S390_lowcore.last_update_clock - clock; timer -= S390_lowcore.last_update_timer; @@ -216,7 +217,7 @@ void vtime_flush(struct task_struct *tsk) avg_steal = S390_lowcore.avg_steal_timer / 2; if ((s64) steal > 0) { S390_lowcore.steal_timer = 0; - account_steal_time(steal); + account_steal_time(cputime_to_nsecs(steal)); avg_steal += steal; } S390_lowcore.avg_steal_timer = avg_steal; diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index f4c51756c462..4c56de542960 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -16,6 +16,23 @@ #include #include "kvm-s390.h" +/** + * kvm_s390_real_to_abs - convert guest real address to guest absolute address + * @prefix - guest prefix + * @gra - guest real address + * + * Returns the guest absolute address that corresponds to the passed guest real + * address @gra of by applying the given prefix. + */ +static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra) +{ + if (gra < 2 * PAGE_SIZE) + gra += prefix; + else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE) + gra -= prefix; + return gra; +} + /** * kvm_s390_real_to_abs - convert guest real address to guest absolute address * @vcpu - guest virtual cpu @@ -27,13 +44,30 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, unsigned long gra) { - unsigned long prefix = kvm_s390_get_prefix(vcpu); + return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra); +} - if (gra < 2 * PAGE_SIZE) - gra += prefix; - else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE) - gra -= prefix; - return gra; +/** + * _kvm_s390_logical_to_effective - convert guest logical to effective address + * @psw: psw of the guest + * @ga: guest logical address + * + * Convert a guest logical address to an effective address by applying the + * rules of the addressing mode defined by bits 31 and 32 of the given PSW + * (extendended/basic addressing mode). + * + * Depending on the addressing mode, the upper 40 bits (24 bit addressing + * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing + * mode) of @ga will be zeroed and the remaining bits will be returned. + */ +static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw, + unsigned long ga) +{ + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT) + return ga; + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT) + return ga & ((1UL << 31) - 1); + return ga & ((1UL << 24) - 1); } /** @@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, unsigned long ga) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; - - if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT) - return ga; - if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT) - return ga & ((1UL << 31) - 1); - return ga & ((1UL << 24) - 1); + return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga); } /* diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 756c627f7e54..d08e13c6dc98 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1932,6 +1932,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } + if (start >= slots->used_slots) + return slots->used_slots - 1; + if (gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); @@ -3977,16 +3980,16 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; if (MACHINE_HAS_GS) { + preempt_disable(); __ctl_set_bit(2, 4); if (vcpu->arch.gs_enabled) save_gs_cb(current->thread.gs_cb); - preempt_disable(); current->thread.gs_cb = vcpu->arch.host_gscb; restore_gs_cb(vcpu->arch.host_gscb); - preempt_enable(); if (!vcpu->arch.host_gscb) __ctl_clear_bit(2, 4); vcpu->arch.host_gscb = NULL; + preempt_enable(); } /* SIE will save etoken directly into SDNX and therefore kvm_run */ } diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index ed52ffa8d5d4..560310e29e27 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu) * available for the guest are AQIC and TAPQ with the t bit set * since we do not set IC.3 (FIII) we currently will only intercept * the AQIC function code. + * Note: running nested under z/VM can result in intercepts for other + * function codes, e.g. PQAP(QCI). We do not support this and bail out. */ reg0 = vcpu->run->s.regs.gprs[0]; fc = (reg0 >> 24) & 0xff; - if (WARN_ON_ONCE(fc != 0x03)) + if (fc != 0x03) return -EOPNOTSUPP; /* PQAP instruction is allowed for guest kernel only */ diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 076090f9e666..4f6c22d72072 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -1202,6 +1202,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->iprcc = PGM_ADDRESSING; scb_s->pgmilc = 4; scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); + rc = 1; } return rc; } diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index c4f8039a35e8..0267405ab7c6 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void) { mm_segment_t old_fs; unsigned long asce, cr; + unsigned long flags; old_fs = current->thread.mm_segment; if (old_fs & 1) return old_fs; + /* protect against a concurrent page table upgrade */ + local_irq_save(flags); current->thread.mm_segment |= 1; asce = S390_lowcore.kernel_asce; if (likely(old_fs == USER_DS)) { @@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void) __ctl_load(asce, 7, 7); set_cpu_flag(CIF_ASCE_SECONDARY); } + local_irq_restore(flags); return old_fs; } EXPORT_SYMBOL(enable_sacf_uaccess); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index edcdca97e85e..4fa7a562c6fc 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start, static inline unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level) { + const int asce_type = gmap->asce & _ASCE_TYPE_MASK; unsigned long *table; if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4)) return NULL; if (gmap_is_shadow(gmap) && gmap->removed) return NULL; - if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11))) + + if (asce_type != _ASCE_TYPE_REGION1 && + gaddr & (-1UL << (31 + (asce_type >> 2) * 11))) return NULL; + table = gmap->table; switch (gmap->asce & _ASCE_TYPE_MASK) { case _ASCE_TYPE_REGION1: @@ -1840,6 +1844,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, goto out_free; } else if (*table & _REGION_ENTRY_ORIGIN) { rc = -EAGAIN; /* Race with shadow */ + goto out_free; } crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY); /* mark as invalid as long as the parent table is not protected */ @@ -2480,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4], } EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct vm_area_struct *vma = walk->vma; + + split_huge_pmd(vma, pmd, addr); + return 0; +} + +static const struct mm_walk_ops thp_split_walk_ops = { + .pmd_entry = thp_split_walk_pmd_entry, +}; + static inline void thp_split_mm(struct mm_struct *mm) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE struct vm_area_struct *vma; - unsigned long addr; for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { - for (addr = vma->vm_start; - addr < vma->vm_end; - addr += PAGE_SIZE) - follow_page(vma, addr, FOLL_SPLIT); vma->vm_flags &= ~VM_HUGEPAGE; vma->vm_flags |= VM_NOHUGEPAGE; + walk_page_vma(vma, &thp_split_walk_ops, NULL); } mm->def_flags |= VM_NOHUGEPAGE; -#endif } +#else +static inline void thp_split_mm(struct mm_struct *mm) +{ +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* * Remove all empty zero pages from the mapping for lazy refaulting diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 5674710a4841..ff8234bca56c 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste) _PAGE_YOUNG); #ifdef CONFIG_MEM_SOFT_DIRTY pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, - _PAGE_DIRTY); + _PAGE_SOFT_DIRTY); #endif pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC); @@ -159,10 +159,13 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, rste &= ~_SEGMENT_ENTRY_NOEXEC; /* Set correct table type for 2G hugepages */ - if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) - rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE; - else + if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { + if (likely(pte_present(pte))) + rste |= _REGION3_ENTRY_LARGE; + rste |= _REGION_ENTRY_TYPE_R3; + } else if (likely(pte_present(pte))) rste |= _SEGMENT_ENTRY_LARGE; + clear_huge_pte_skeys(mm, rste); pte_val(*ptep) = rste; } diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index de7ca4b6718f..1d17413b319a 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -55,19 +55,26 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz */ static DEFINE_SPINLOCK(s390_kernel_write_lock); -void notrace s390_kernel_write(void *dst, const void *src, size_t size) +notrace void *s390_kernel_write(void *dst, const void *src, size_t size) { + void *tmp = dst; unsigned long flags; long copied; spin_lock_irqsave(&s390_kernel_write_lock, flags); - while (size) { - copied = s390_kernel_write_odd(dst, src, size); - dst += copied; - src += copied; - size -= copied; + if (!(flags & PSW_MASK_DAT)) { + memcpy(dst, src, size); + } else { + while (size) { + copied = s390_kernel_write_odd(tmp, src, size); + tmp += copied; + src += copied; + size -= copied; + } } spin_unlock_irqrestore(&s390_kernel_write_lock, flags); + + return dst; } static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 3dd253f81a77..46071be897ab 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; - if (current->active_mm == mm) - set_user_asce(mm); + /* we must change all active ASCEs to avoid the creation of new TLBs */ + if (current->active_mm == mm) { + S390_lowcore.user_asce = mm->context.asce; + if (current->thread.mm_segment == USER_DS) { + __ctl_load(S390_lowcore.user_asce, 1, 1); + /* Mark user-ASCE present in CR1 */ + clear_cpu_flag(CIF_ASCE_PRIMARY); + } + if (current->thread.mm_segment == USER_DS_SACF) { + __ctl_load(S390_lowcore.user_asce, 7, 7); + /* enable_sacf_uaccess does all or nothing */ + WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY)); + } + } __tlb_flush_local(); } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index c8c16b5eed6b..71fa4eb973ae 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -913,6 +913,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, break; } break; + /* + * BPF_NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; /* * BPF_ST(X) */ diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index d2910fa834c8..8386c58fdb3a 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -49,12 +49,6 @@ void numa_update_cpu_topology(void) mode->update_cpu_topology(); } -int __node_distance(int a, int b) -{ - return mode->distance ? mode->distance(a, b) : 0; -} -EXPORT_SYMBOL(__node_distance); - int numa_debug_enabled; /* diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 281e0dd4c614..20e093f86329 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -309,14 +309,13 @@ out: int clp_disable_fh(struct zpci_dev *zdev) { - u32 fh = zdev->fh; int rc; if (!zdev_enabled(zdev)) return 0; rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN); - zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc); + zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); return rc; } diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index fbe97ab2e228..743f257cf2cb 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -115,7 +115,6 @@ static struct irq_chip zpci_irq_chip = { .name = "PCI-MSI", .irq_unmask = pci_msi_unmask_irq, .irq_mask = pci_msi_mask_irq, - .irq_set_affinity = zpci_set_irq_affinity, }; static void zpci_handle_cpu_local_irq(bool rescan) @@ -276,7 +275,9 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) rc = -EIO; if (hwirq - bit >= msi_vecs) break; - irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity); + irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, + (irq_delivery == DIRECTED) ? + msi->affinity : NULL); if (irq < 0) return -ENOMEM; rc = irq_set_msi_desc(irq, msi); diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 7d42a8794f10..020a2c514d96 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -11,6 +11,113 @@ #include #include #include +#include +#include + +static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset) +{ + struct { + u64 offset; + u8 cc; + u8 status; + } data = {offset, cc, status}; + + zpci_err_hex(&data, sizeof(data)); +} + +static inline int __pcistb_mio_inuser( + void __iomem *ioaddr, const void __user *src, + u64 len, u8 *status) +{ + int cc = -ENXIO; + + asm volatile ( + " sacf 256\n" + "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" + "1: ipm %[cc]\n" + " srl %[cc],28\n" + "2: sacf 768\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : [cc] "+d" (cc), [len] "+d" (len) + : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src)) + : "cc", "memory"); + *status = len >> 24 & 0xff; + return cc; +} + +static inline int __pcistg_mio_inuser( + void __iomem *ioaddr, const void __user *src, + u64 ulen, u8 *status) +{ + register u64 addr asm("2") = (u64 __force) ioaddr; + register u64 len asm("3") = ulen; + int cc = -ENXIO; + u64 val = 0; + u64 cnt = ulen; + u8 tmp; + + /* + * copy 0 < @len <= 8 bytes from @src into the right most bytes of + * a register, then store it to PCI at @ioaddr while in secondary + * address space. pcistg then uses the user mappings. + */ + asm volatile ( + " sacf 256\n" + "0: llgc %[tmp],0(%[src])\n" + " sllg %[val],%[val],8\n" + " aghi %[src],1\n" + " ogr %[val],%[tmp]\n" + " brctg %[cnt],0b\n" + "1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n" + "2: ipm %[cc]\n" + " srl %[cc],28\n" + "3: sacf 768\n" + EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) + : + [src] "+a" (src), [cnt] "+d" (cnt), + [val] "+d" (val), [tmp] "=d" (tmp), + [len] "+d" (len), [cc] "+d" (cc), + [ioaddr] "+a" (addr) + :: "cc", "memory"); + *status = len >> 24 & 0xff; + + /* did we read everything from user memory? */ + if (!cc && cnt != 0) + cc = -EFAULT; + + return cc; +} + +static inline int __memcpy_toio_inuser(void __iomem *dst, + const void __user *src, size_t n) +{ + int size, rc = 0; + u8 status = 0; + mm_segment_t old_fs; + + if (!src) + return -EINVAL; + + old_fs = enable_sacf_uaccess(); + while (n > 0) { + size = zpci_get_max_write_size((u64 __force) dst, + (u64 __force) src, n, + ZPCI_MAX_WRITE_SIZE); + if (size > 8) /* main path */ + rc = __pcistb_mio_inuser(dst, src, size, &status); + else + rc = __pcistg_mio_inuser(dst, src, size, &status); + if (rc) + break; + src += size; + dst += size; + n -= size; + } + disable_sacf_uaccess(old_fs); + if (rc) + zpci_err_mmio(rc, status, (__force u64) dst); + return rc; +} static long get_pfn(unsigned long user_addr, unsigned long access, unsigned long *pfn) @@ -46,6 +153,20 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) return -EINVAL; + + /* + * Only support read access to MIO capable devices on a MIO enabled + * system. Otherwise we would have to check for every address if it is + * a special ZPCI_ADDR and we would have to do a get_pfn() which we + * don't need for MIO capable devices. + */ + if (static_branch_likely(&have_mio)) { + ret = __memcpy_toio_inuser((void __iomem *) mmio_addr, + user_buffer, + length); + return ret; + } + if (length > 64) { buf = kmalloc(length, GFP_KERNEL); if (!buf) @@ -56,7 +177,8 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, ret = get_pfn(mmio_addr, VM_WRITE, &pfn); if (ret) goto out; - io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); + io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | + (mmio_addr & ~PAGE_MASK)); ret = -EFAULT; if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) @@ -72,6 +194,78 @@ out: return ret; } +static inline int __pcilg_mio_inuser( + void __user *dst, const void __iomem *ioaddr, + u64 ulen, u8 *status) +{ + register u64 addr asm("2") = (u64 __force) ioaddr; + register u64 len asm("3") = ulen; + u64 cnt = ulen; + int shift = ulen * 8; + int cc = -ENXIO; + u64 val, tmp; + + /* + * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in + * user space) into a register using pcilg then store these bytes at + * user address @dst + */ + asm volatile ( + " sacf 256\n" + "0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n" + "1: ipm %[cc]\n" + " srl %[cc],28\n" + " ltr %[cc],%[cc]\n" + " jne 4f\n" + "2: ahi %[shift],-8\n" + " srlg %[tmp],%[val],0(%[shift])\n" + "3: stc %[tmp],0(%[dst])\n" + " aghi %[dst],1\n" + " brctg %[cnt],2b\n" + "4: sacf 768\n" + EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) + : + [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len), + [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), + [shift] "+d" (shift) + : + [ioaddr] "a" (addr) + : "cc", "memory"); + + /* did we write everything to the user space buffer? */ + if (!cc && cnt != 0) + cc = -EFAULT; + + *status = len >> 24 & 0xff; + return cc; +} + +static inline int __memcpy_fromio_inuser(void __user *dst, + const void __iomem *src, + unsigned long n) +{ + int size, rc = 0; + u8 status; + mm_segment_t old_fs; + + old_fs = enable_sacf_uaccess(); + while (n > 0) { + size = zpci_get_max_write_size((u64 __force) src, + (u64 __force) dst, n, + ZPCI_MAX_READ_SIZE); + rc = __pcilg_mio_inuser(dst, src, size, &status); + if (rc) + break; + src += size; + dst += size; + n -= size; + } + disable_sacf_uaccess(old_fs); + if (rc) + zpci_err_mmio(rc, status, (__force u64) dst); + return rc; +} + SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, void __user *, user_buffer, size_t, length) { @@ -86,12 +280,27 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) return -EINVAL; + + /* + * Only support write access to MIO capable devices on a MIO enabled + * system. Otherwise we would have to check for every address if it is + * a special ZPCI_ADDR and we would have to do a get_pfn() which we + * don't need for MIO capable devices. + */ + if (static_branch_likely(&have_mio)) { + ret = __memcpy_fromio_inuser( + user_buffer, (const void __iomem *)mmio_addr, + length); + return ret; + } + if (length > 64) { buf = kmalloc(length, GFP_KERNEL); if (!buf) return -ENOMEM; - } else + } else { buf = local_buf; + } ret = get_pfn(mmio_addr, VM_READ, &pfn); if (ret) diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S index 5a10ce34b95d..3d1c31e0cf3d 100644 --- a/arch/s390/purgatory/head.S +++ b/arch/s390/purgatory/head.S @@ -62,14 +62,15 @@ jh 10b .endm -.macro START_NEXT_KERNEL base +.macro START_NEXT_KERNEL base subcode lg %r4,kernel_entry-\base(%r13) lg %r5,load_psw_mask-\base(%r13) ogr %r4,%r5 stg %r4,0(%r0) xgr %r0,%r0 - diag %r0,%r0,0x308 + lghi %r1,\subcode + diag %r0,%r1,0x308 .endm .text @@ -123,7 +124,7 @@ ENTRY(purgatory_start) je .start_crash_kernel /* start normal kernel */ - START_NEXT_KERNEL .base_crash + START_NEXT_KERNEL .base_crash 0 .return_old_kernel: lmg %r6,%r15,gprregs-.base_crash(%r13) @@ -227,7 +228,7 @@ ENTRY(purgatory_start) MEMCPY %r9,%r10,%r11 /* start crash kernel */ - START_NEXT_KERNEL .base_dst + START_NEXT_KERNEL .base_dst 1 load_psw_mask: diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index 16b4d8b0bb85..2c44b94f82fb 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup); static void __init landisk_setup(char **cmdline_p) { + /* I/O port identity mapping */ + __set_io_port_base(0); + /* LED ON */ __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED); diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index e5beb625ab88..87db9a84b5ec 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -46,7 +46,6 @@ CONFIG_BLK_DEV_IDETAPE=m CONFIG_SCSI=m CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index d0de378beefe..7d54f284ce10 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -63,8 +63,7 @@ config PVR2_DMA config G2_DMA tristate "G2 Bus DMA support" - depends on SH_DREAMCAST - select SH_DMA_API + depends on SH_DREAMCAST && SH_DMA_API help This enables support for the DMA controller for the Dreamcast's G2 bus. Drivers that want this will generally enable this on diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 22d968bfe9bb..d770da3f8b6f 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); +#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp)) #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -33,13 +34,4 @@ do { \ tlb_remove_page((tlb), (pte)); \ } while (0) -#if CONFIG_PGTABLE_LEVELS > 2 -#define __pmd_free_tlb(tlb, pmdp, addr) \ -do { \ - struct page *page = virt_to_page(pmdp); \ - pgtable_pmd_page_dtor(page); \ - tlb_remove_page((tlb), page); \ -} while (0); -#endif - #endif /* __ASM_SH_PGALLOC_H */ diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d31f66e82ce5..4a8ec9e40cc2 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -199,7 +199,7 @@ syscall_trace_entry: mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! - mov.l 2f, r10 ! Number of syscalls + mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 @@ -353,7 +353,7 @@ ENTRY(system_call) tst r9, r8 bf syscall_trace_entry ! - mov.l 2f, r8 ! Number of syscalls + mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! @@ -392,7 +392,7 @@ syscall_exit: #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif -2: .long NR_syscalls +6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 18e9fb6fcf1b..349e27771cea 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -524,7 +524,7 @@ config COMPAT bool depends on SPARC64 default y - select COMPAT_BINFMT_ELF + select COMPAT_BINFMT_ELF if BINFMT_ELF select HAVE_UID16 select ARCH_WANT_OLD_COMPAT_IPC select COMPAT_OLD_SIGACTION diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 6c325d53a20a..bde4d21a8ac8 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -73,7 +73,6 @@ CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index f94532f25db1..274217e7ed70 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h @@ -57,36 +57,40 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr) { if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI)) return 0; - if (prot & PROT_ADI) { - if (!adi_capable()) - return 0; - - if (addr) { - struct vm_area_struct *vma; - - vma = find_vma(current->mm, addr); - if (vma) { - /* ADI can not be enabled on PFN - * mapped pages - */ - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) - return 0; - - /* Mergeable pages can become unmergeable - * if ADI is enabled on them even if they - * have identical data on them. This can be - * because ADI enabled pages with identical - * data may still not have identical ADI - * tags on them. Disallow ADI on mergeable - * pages. - */ - if (vma->vm_flags & VM_MERGEABLE) - return 0; - } - } - } return 1; } + +#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) +/* arch_validate_flags() - Ensure combination of flags is valid for a + * VMA. + */ +static inline bool arch_validate_flags(unsigned long vm_flags) +{ + /* If ADI is being enabled on this VMA, check for ADI + * capability on the platform and ensure VMA is suitable + * for ADI + */ + if (vm_flags & VM_SPARC_ADI) { + if (!adi_capable()) + return false; + + /* ADI can not be enabled on PFN mapped pages */ + if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + return false; + + /* Mergeable pages can become unmergeable + * if ADI is enabled on them even if they + * have identical data on them. This can be + * because ADI enabled pages with identical + * data may still not have identical ADI + * tags on them. Disallow ADI on mergeable + * pages. + */ + if (vm_flags & VM_MERGEABLE) + return false; + } + return true; +} #endif /* CONFIG_SPARC64 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 8029b681fc7c..08f9bbbf5bf2 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -117,6 +117,8 @@ #define SO_DETACH_REUSEPORT_BPF 0x0047 +#define SO_NETNS_COOKIE 0x0050 + #if !defined(__KERNEL__) diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 16b50afe7b52..646dd58169ec 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -46,82 +46,79 @@ enum sparc_regset { REGSET_FP, }; +static int regwindow32_get(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_from_user(uregs, (void __user *)reg_window, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE) != size) + return -EFAULT; + } + return 0; +} + +static int regwindow32_set(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_to_user((void __user *)reg_window, uregs, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE | FOLL_WRITE) != size) + return -EFAULT; + } + return 0; +} + static int genregs32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct pt_regs *regs = target->thread.kregs; - unsigned long __user *reg_window; - unsigned long *k = kbuf; - unsigned long __user *u = ubuf; - unsigned long reg; + u32 uregs[16]; + int ret; if (target == current) flush_user_windows(); - pos /= sizeof(reg); - count /= sizeof(reg); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u32)); + if (ret || !count) + return ret; - if (kbuf) { - for (; count > 0 && pos < 16; count--) - *k++ = regs->u_regs[pos++]; - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(*k++, ®_window[pos++])) - return -EFAULT; - } - } else { - for (; count > 0 && pos < 16; count--) { - if (put_user(regs->u_regs[pos++], u++)) - return -EFAULT; - } - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(reg, ®_window[pos++]) || - put_user(reg, u++)) - return -EFAULT; - } - } - while (count > 0) { - switch (pos) { - case 32: /* PSR */ - reg = regs->psr; - break; - case 33: /* PC */ - reg = regs->pc; - break; - case 34: /* NPC */ - reg = regs->npc; - break; - case 35: /* Y */ - reg = regs->y; - break; - case 36: /* WIM */ - case 37: /* TBR */ - reg = 0; - break; - default: - goto finish; - } - - if (kbuf) - *k++ = reg; - else if (put_user(reg, u++)) + if (pos < 32 * sizeof(u32)) { + if (regwindow32_get(target, regs, uregs)) return -EFAULT; - pos++; - count--; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + uregs, + 16 * sizeof(u32), 32 * sizeof(u32)); + if (ret || !count) + return ret; } -finish: - pos *= sizeof(reg); - count *= sizeof(reg); - return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - 38 * sizeof(reg), -1); + uregs[0] = regs->psr; + uregs[1] = regs->pc; + uregs[2] = regs->npc; + uregs[3] = regs->y; + uregs[4] = 0; /* WIM */ + uregs[5] = 0; /* TBR */ + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + uregs, + 32 * sizeof(u32), 38 * sizeof(u32)); } static int genregs32_set(struct task_struct *target, @@ -130,82 +127,58 @@ static int genregs32_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = target->thread.kregs; - unsigned long __user *reg_window; - const unsigned long *k = kbuf; - const unsigned long __user *u = ubuf; - unsigned long reg; + u32 uregs[16]; + u32 psr; + int ret; if (target == current) flush_user_windows(); - pos /= sizeof(reg); - count /= sizeof(reg); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u32)); + if (ret || !count) + return ret; - if (kbuf) { - for (; count > 0 && pos < 16; count--) - regs->u_regs[pos++] = *k++; - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (put_user(*k++, ®_window[pos++])) - return -EFAULT; - } - } else { - for (; count > 0 && pos < 16; count--) { - if (get_user(reg, u++)) - return -EFAULT; - regs->u_regs[pos++] = reg; - } - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(reg, u++) || - put_user(reg, ®_window[pos++])) - return -EFAULT; - } - } - while (count > 0) { - unsigned long psr; - - if (kbuf) - reg = *k++; - else if (get_user(reg, u++)) + if (pos < 32 * sizeof(u32)) { + if (regwindow32_get(target, regs, uregs)) return -EFAULT; - - switch (pos) { - case 32: /* PSR */ - psr = regs->psr; - psr &= ~(PSR_ICC | PSR_SYSCALL); - psr |= (reg & (PSR_ICC | PSR_SYSCALL)); - regs->psr = psr; - break; - case 33: /* PC */ - regs->pc = reg; - break; - case 34: /* NPC */ - regs->npc = reg; - break; - case 35: /* Y */ - regs->y = reg; - break; - case 36: /* WIM */ - case 37: /* TBR */ - break; - default: - goto finish; - } - - pos++; - count--; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + uregs, + 16 * sizeof(u32), 32 * sizeof(u32)); + if (ret) + return ret; + if (regwindow32_set(target, regs, uregs)) + return -EFAULT; + if (!count) + return 0; } -finish: - pos *= sizeof(reg); - count *= sizeof(reg); - + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &psr, + 32 * sizeof(u32), 33 * sizeof(u32)); + if (ret) + return ret; + regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | + (psr & (PSR_ICC | PSR_SYSCALL)); + if (!count) + return 0; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, + 33 * sizeof(u32), 34 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->npc, + 34 * sizeof(u32), 35 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->y, + 35 * sizeof(u32), 36 * sizeof(u32)); + if (ret || !count) + return ret; return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - 38 * sizeof(reg), -1); + 36 * sizeof(u32), 38 * sizeof(u32)); } static int fpregs32_get(struct task_struct *target, diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index c9d41a96468f..3f5930bfab06 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -572,19 +572,13 @@ static int genregs32_get(struct task_struct *target, for (; count > 0 && pos < 32; count--) { if (access_process_vm(target, (unsigned long) - ®_window[pos], + ®_window[pos++], ®, sizeof(reg), FOLL_FORCE) != sizeof(reg)) return -EFAULT; - if (access_process_vm(target, - (unsigned long) u, - ®, sizeof(reg), - FOLL_FORCE | FOLL_WRITE) - != sizeof(reg)) + if (put_user(reg, u++)) return -EFAULT; - pos++; - u++; } } } @@ -684,12 +678,7 @@ static int genregs32_set(struct task_struct *target, } } else { for (; count > 0 && pos < 32; count--) { - if (access_process_vm(target, - (unsigned long) - u, - ®, sizeof(reg), - FOLL_FORCE) - != sizeof(reg)) + if (get_user(reg, u++)) return -EFAULT; if (access_process_vm(target, (unsigned long) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index a8275fea4b70..aa81c25b44cf 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void) * are flush_tlb_*() routines, and these run after flush_cache_*() * which performs the flushw. * - * The SMP TLB coherency scheme we use works as follows: - * - * 1) mm->cpu_vm_mask is a bit mask of which cpus an address - * space has (potentially) executed on, this is the heuristic - * we use to avoid doing cross calls. - * - * Also, for flushing from kswapd and also for clones, we - * use cpu_vm_mask as the list of cpus to make run the TLB. - * - * 2) TLB context numbers are shared globally across all processors - * in the system, this allows us to play several games to avoid - * cross calls. - * - * One invariant is that when a cpu switches to a process, and - * that processes tsk->active_mm->cpu_vm_mask does not have the - * current cpu's bit set, that tlb context is flushed locally. - * - * If the address space is non-shared (ie. mm->count == 1) we avoid - * cross calls when we want to flush the currently running process's - * tlb state. This is done by clearing all cpu bits except the current - * processor's in current->mm->cpu_vm_mask and performing the - * flush locally only. This will force any subsequent cpus which run - * this task to flush the context from the local tlb if the process - * migrates to another cpu (again). - * - * 3) For shared address spaces (threads) and swapping we bite the - * bullet for most cases and perform the cross call (but only to - * the cpus listed in cpu_vm_mask). - * - * The performance gain from "optimizing" away the cross call for threads is - * questionable (in theory the big win for threads is the massive sharing of - * address space state across processors). + * mm->cpu_vm_mask is a bit mask of which cpus an address + * space has (potentially) executed on, this is the heuristic + * we use to limit cross calls. */ /* This currently is only used by the hugetlb arch pre-fault @@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void) void smp_flush_tlb_mm(struct mm_struct *mm) { u32 ctx = CTX_HWBITS(mm->context); - int cpu = get_cpu(); - if (atomic_read(&mm->mm_users) == 1) { - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - goto local_flush_and_out; - } + get_cpu(); smp_cross_call_masked(&xcall_flush_tlb_mm, ctx, 0, 0, mm_cpumask(mm)); -local_flush_and_out: __flush_tlb_mm(ctx, SECONDARY_CONTEXT); put_cpu(); @@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long { u32 ctx = CTX_HWBITS(mm->context); struct tlb_pending_info info; - int cpu = get_cpu(); + + get_cpu(); info.ctx = ctx; info.nr = nr; info.vaddrs = vaddrs; - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - else - smp_call_function_many(mm_cpumask(mm), tlb_pending_func, - &info, 1); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); @@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long context = CTX_HWBITS(mm->context); - int cpu = get_cpu(); - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - else - smp_cross_call_masked(&xcall_flush_tlb_page, - context, vaddr, 0, - mm_cpumask(mm)); + get_cpu(); + + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); put_cpu(); diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 27778b65a965..f2b22c496fb9 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -275,14 +275,13 @@ bool is_no_fault_exception(struct pt_regs *regs) asi = (regs->tstate >> 24); /* saved %asi */ else asi = (insn >> 5); /* immediate asi */ - if ((asi & 0xf2) == ASI_PNF) { - if (insn & 0x1000000) { /* op3[5:4]=3 */ - handle_ldf_stq(insn, regs); - return true; - } else if (insn & 0x200000) { /* op3[2], stores */ + if ((asi & 0xf6) == ASI_PNF) { + if (insn & 0x200000) /* op3[2], stores */ return false; - } - handle_ld_nf(insn, regs); + if (insn & 0x1000000) /* op3[5:4]=3 (fp) */ + handle_ldf_stq(insn, regs); + else + handle_ld_nf(insn, regs); return true; } } diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index b89d42b29e34..f427f34b8b79 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -142,6 +142,7 @@ __bzero: ZERO_LAST_BLOCKS(%o0, 0x48, %g2) ZERO_LAST_BLOCKS(%o0, 0x08, %g2) 13: + EXT(12b, 13b, 21f) be 8f andcc %o1, 4, %g0 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 906eda1158b4..40dd6cb4a413 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) size = memblock_phys_mem_size() - memblock_reserved_size(); *pages_avail = (size >> PAGE_SHIFT) - high_pages; + /* Only allow low memory to be allocated via memblock allocation */ + memblock_set_current_limit(max_low_pfn << PAGE_SHIFT); + return max_pfn; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index e6d91819da92..28b9ffd85db0 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2904,7 +2904,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) if (!page) return NULL; if (!pgtable_pte_page_ctor(page)) { - free_unref_page(page); + __free_page(page); return NULL; } return (pte_t *) page_address(page); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index cc3ad64479ac..9e256d4d1f4c 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -379,7 +379,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) return NULL; page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); if (!pgtable_pte_page_ctor(page)) { - __free_page(page); return NULL; } return page; diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 3364e2a00989..fef734473c0f 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) return 1; break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/um/Makefile b/arch/um/Makefile index d2daa206872d..275f5ffdf6f0 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -140,6 +140,7 @@ export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) # When cleaning we don't include .config, so we don't include # TT or skas makefiles and don't clean skas_ptregs.h. CLEAN_FILES += linux x.i gmon.out +MRPROPER_DIRS += arch/$(SUBARCH)/include/generated archclean: @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c index 4d80526a4236..d8845d4aac6a 100644 --- a/arch/um/drivers/chan_user.c +++ b/arch/um/drivers/chan_user.c @@ -26,10 +26,10 @@ int generic_read(int fd, char *c_out, void *unused) n = read(fd, c_out, sizeof(*c_out)); if (n > 0) return n; - else if (errno == EAGAIN) - return 0; else if (n == 0) return -EIO; + else if (errno == EAGAIN) + return 0; return -errno; } diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 6627d7c30f37..4e59ab817d3e 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -47,18 +47,25 @@ /* Max request size is determined by sector mask - 32K */ #define UBD_MAX_REQUEST (8 * sizeof(long)) +struct io_desc { + char *buffer; + unsigned long length; + unsigned long sector_mask; + unsigned long long cow_offset; + unsigned long bitmap_words[2]; +}; + struct io_thread_req { struct request *req; int fds[2]; unsigned long offsets[2]; unsigned long long offset; - unsigned long length; - char *buffer; int sectorsize; - unsigned long sector_mask; - unsigned long long cow_offset; - unsigned long bitmap_words[2]; int error; + + int desc_cnt; + /* io_desc has to be the last element of the struct */ + struct io_desc io_desc[]; }; @@ -524,12 +531,7 @@ static void ubd_handler(void) blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q); } - if ((io_req->error) || (io_req->buffer == NULL)) - blk_mq_end_request(io_req->req, io_req->error); - else { - if (!blk_update_request(io_req->req, io_req->error, io_req->length)) - __blk_mq_end_request(io_req->req, io_req->error); - } + blk_mq_end_request(io_req->req, io_req->error); kfree(io_req); } } @@ -945,6 +947,7 @@ static int ubd_add(int n, char **error_out) blk_queue_write_cache(ubd_dev->queue, true, false); blk_queue_max_segments(ubd_dev->queue, MAX_SG); + blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1); err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); if(err){ *error_out = "Failed to register device"; @@ -1288,37 +1291,74 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, *cow_offset += bitmap_offset; } -static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, +static void cowify_req(struct io_thread_req *req, struct io_desc *segment, + unsigned long offset, unsigned long *bitmap, __u64 bitmap_offset, __u64 bitmap_len) { - __u64 sector = req->offset >> SECTOR_SHIFT; + __u64 sector = offset >> SECTOR_SHIFT; int i; - if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT) + if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT) panic("Operation too long"); if (req_op(req->req) == REQ_OP_READ) { - for (i = 0; i < req->length >> SECTOR_SHIFT; i++) { + for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) { if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) ubd_set_bit(i, (unsigned char *) - &req->sector_mask); + &segment->sector_mask); } + } else { + cowify_bitmap(offset, segment->length, &segment->sector_mask, + &segment->cow_offset, bitmap, bitmap_offset, + segment->bitmap_words, bitmap_len); } - else cowify_bitmap(req->offset, req->length, &req->sector_mask, - &req->cow_offset, bitmap, bitmap_offset, - req->bitmap_words, bitmap_len); } -static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, - u64 off, struct bio_vec *bvec) +static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, + struct request *req) { - struct ubd *dev = hctx->queue->queuedata; - struct io_thread_req *io_req; - int ret; + struct bio_vec bvec; + struct req_iterator iter; + int i = 0; + unsigned long byte_offset = io_req->offset; + int op = req_op(req); - io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); + if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) { + io_req->io_desc[0].buffer = NULL; + io_req->io_desc[0].length = blk_rq_bytes(req); + } else { + rq_for_each_segment(bvec, req, iter) { + BUG_ON(i >= io_req->desc_cnt); + + io_req->io_desc[i].buffer = + page_address(bvec.bv_page) + bvec.bv_offset; + io_req->io_desc[i].length = bvec.bv_len; + i++; + } + } + + if (dev->cow.file) { + for (i = 0; i < io_req->desc_cnt; i++) { + cowify_req(io_req, &io_req->io_desc[i], byte_offset, + dev->cow.bitmap, dev->cow.bitmap_offset, + dev->cow.bitmap_len); + byte_offset += io_req->io_desc[i].length; + } + + } +} + +static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req, + int desc_cnt) +{ + struct io_thread_req *io_req; + int i; + + io_req = kmalloc(sizeof(*io_req) + + (desc_cnt * sizeof(struct io_desc)), + GFP_ATOMIC); if (!io_req) - return -ENOMEM; + return NULL; io_req->req = req; if (dev->cow.file) @@ -1326,26 +1366,41 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, else io_req->fds[0] = dev->fd; io_req->error = 0; - - if (bvec != NULL) { - io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset; - io_req->length = bvec->bv_len; - } else { - io_req->buffer = NULL; - io_req->length = blk_rq_bytes(req); - } - io_req->sectorsize = SECTOR_SIZE; io_req->fds[1] = dev->fd; - io_req->cow_offset = -1; - io_req->offset = off; - io_req->sector_mask = 0; + io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT; io_req->offsets[0] = 0; io_req->offsets[1] = dev->cow.data_offset; - if (dev->cow.file) - cowify_req(io_req, dev->cow.bitmap, - dev->cow.bitmap_offset, dev->cow.bitmap_len); + for (i = 0 ; i < desc_cnt; i++) { + io_req->io_desc[i].sector_mask = 0; + io_req->io_desc[i].cow_offset = -1; + } + + return io_req; +} + +static int ubd_submit_request(struct ubd *dev, struct request *req) +{ + int segs = 0; + struct io_thread_req *io_req; + int ret; + int op = req_op(req); + + if (op == REQ_OP_FLUSH) + segs = 0; + else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) + segs = 1; + else + segs = blk_rq_nr_phys_segments(req); + + io_req = ubd_alloc_req(dev, req, segs); + if (!io_req) + return -ENOMEM; + + io_req->desc_cnt = segs; + if (segs) + ubd_map_req(dev, io_req, req); ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); if (ret != sizeof(io_req)) { @@ -1356,22 +1411,6 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, return ret; } -static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req) -{ - struct req_iterator iter; - struct bio_vec bvec; - int ret; - u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT; - - rq_for_each_segment(bvec, req, iter) { - ret = ubd_queue_one_vec(hctx, req, off, &bvec); - if (ret < 0) - return ret; - off += bvec.bv_len; - } - return 0; -} - static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -1384,17 +1423,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, spin_lock_irq(&ubd_dev->lock); switch (req_op(req)) { - /* operations with no lentgth/offset arguments */ case REQ_OP_FLUSH: - ret = ubd_queue_one_vec(hctx, req, 0, NULL); - break; case REQ_OP_READ: case REQ_OP_WRITE: - ret = queue_rw_req(hctx, req); - break; case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: - ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL); + ret = ubd_submit_request(ubd_dev, req); break; default: WARN_ON_ONCE(1); @@ -1482,22 +1516,22 @@ static int map_error(int error_code) * will result in unpredictable behaviour and/or crashes. */ -static int update_bitmap(struct io_thread_req *req) +static int update_bitmap(struct io_thread_req *req, struct io_desc *segment) { int n; - if(req->cow_offset == -1) + if (segment->cow_offset == -1) return map_error(0); - n = os_pwrite_file(req->fds[1], &req->bitmap_words, - sizeof(req->bitmap_words), req->cow_offset); - if (n != sizeof(req->bitmap_words)) + n = os_pwrite_file(req->fds[1], &segment->bitmap_words, + sizeof(segment->bitmap_words), segment->cow_offset); + if (n != sizeof(segment->bitmap_words)) return map_error(-n); return map_error(0); } -static void do_io(struct io_thread_req *req) +static void do_io(struct io_thread_req *req, struct io_desc *desc) { char *buf = NULL; unsigned long len; @@ -1512,21 +1546,20 @@ static void do_io(struct io_thread_req *req) return; } - nsectors = req->length / req->sectorsize; + nsectors = desc->length / req->sectorsize; start = 0; do { - bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); + bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask); end = start; while((end < nsectors) && - (ubd_test_bit(end, (unsigned char *) - &req->sector_mask) == bit)) + (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit)) end++; off = req->offset + req->offsets[bit] + start * req->sectorsize; len = (end - start) * req->sectorsize; - if (req->buffer != NULL) - buf = &req->buffer[start * req->sectorsize]; + if (desc->buffer != NULL) + buf = &desc->buffer[start * req->sectorsize]; switch (req_op(req->req)) { case REQ_OP_READ: @@ -1566,7 +1599,8 @@ static void do_io(struct io_thread_req *req) start = end; } while(start < nsectors); - req->error = update_bitmap(req); + req->offset += len; + req->error = update_bitmap(req, desc); } /* Changed in start_io_thread, which is serialized by being called only @@ -1599,14 +1633,21 @@ int io_thread(void *arg) } for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { + struct io_thread_req *req = (*io_req_buffer)[count]; + int i; + io_count++; - do_io((*io_req_buffer)[count]); + for (i = 0; !req->error && i < req->desc_cnt; i++) + do_io(req, &(req->io_desc[i])); + } written = 0; do { - res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n); + res = os_write_file(kernel_fd, + ((char *) io_req_buffer) + written, + n - written); if (res >= 0) { written += res; } diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index 179b41ad63ba..18618af3835f 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -959,6 +959,7 @@ static void virtio_uml_release_dev(struct device *d) } os_close_file(vu_dev->sock); + kfree(vu_dev); } /* Platform device */ @@ -977,7 +978,7 @@ static int virtio_uml_probe(struct platform_device *pdev) if (!pdata) return -EINVAL; - vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL); + vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); if (!vu_dev) return -ENOMEM; diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c index fc7f1e746703..87ca4a47cd66 100644 --- a/arch/um/drivers/xterm.c +++ b/arch/um/drivers/xterm.c @@ -18,6 +18,7 @@ struct xterm_chan { int pid; int helper_pid; + int chan_fd; char *title; int device; int raw; @@ -33,6 +34,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts) return NULL; *data = ((struct xterm_chan) { .pid = -1, .helper_pid = -1, + .chan_fd = -1, .device = device, .title = opts->xterm_title, .raw = opts->raw } ); @@ -149,6 +151,7 @@ static int xterm_open(int input, int output, int primary, void *d, goto out_kill; } + data->chan_fd = fd; new = xterm_fd(fd, &data->helper_pid); if (new < 0) { err = new; @@ -206,6 +209,8 @@ static void xterm_close(int fd, void *d) os_kill_process(data->helper_pid, 0); data->helper_pid = -1; + if (data->chan_fd != -1) + os_close_file(data->chan_fd); os_close_file(fd); } diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c index 10c99e058fca..d1cffc2a7f21 100644 --- a/arch/um/kernel/sigio.c +++ b/arch/um/kernel/sigio.c @@ -35,14 +35,14 @@ int write_sigio_irq(int fd) } /* These are called from os-Linux/sigio.c to protect its pollfds arrays. */ -static DEFINE_SPINLOCK(sigio_spinlock); +static DEFINE_MUTEX(sigio_mutex); void sigio_lock(void) { - spin_lock(&sigio_spinlock); + mutex_lock(&sigio_mutex); } void sigio_unlock(void) { - spin_unlock(&sigio_spinlock); + mutex_unlock(&sigio_mutex); } diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index b7eaf655635c..11499136720d 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -126,6 +126,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, struct host_vm_op *last; int fd = -1, ret = 0; + if (virt + len > STUB_START && virt < STUB_END) + return -EINVAL; + if (hvc->userspace) fd = phys_mapping(phys, &offset); else @@ -163,7 +166,7 @@ static int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; - if ((addr >= STUB_START) && (addr < STUB_END)) + if (addr + len > STUB_START && addr < STUB_END) return -EINVAL; if (hvc->index != 0) { @@ -193,6 +196,9 @@ static int add_mprotect(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; + if (addr + len > STUB_START && addr < STUB_END) + return -EINVAL; + if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MPROTECT) && @@ -433,6 +439,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) struct mm_id *mm_id; address &= PAGE_MASK; + + if (address >= STUB_START && address < STUB_END) + goto kill; + pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto kill; diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index 5133e3afb96f..3996937e2c0d 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c index d508310ee5e1..f1732c308c61 100644 --- a/arch/um/os-Linux/irq.c +++ b/arch/um/os-Linux/irq.c @@ -48,7 +48,7 @@ int os_epoll_triggered(int index, int events) int os_event_mask(int irq_type) { if (irq_type == IRQ_READ) - return EPOLLIN | EPOLLPRI; + return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP; if (irq_type == IRQ_WRITE) return EPOLLOUT; return 0; diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c index 44def53a11cd..ea5c60f4393e 100644 --- a/arch/um/os-Linux/umid.c +++ b/arch/um/os-Linux/umid.c @@ -137,20 +137,13 @@ static inline int is_umdir_used(char *dir) { char pid[sizeof("nnnnn\0")], *end, *file; int dead, fd, p, n, err; - size_t filelen; + size_t filelen = strlen(dir) + sizeof("/pid") + 1; - err = asprintf(&file, "%s/pid", dir); - if (err < 0) - return 0; + file = malloc(filelen); + if (!file) + return -ENOMEM; - filelen = strlen(file); - - n = snprintf(file, filelen, "%s/pid", dir); - if (n >= filelen) { - printk(UM_KERN_ERR "is_umdir_used - pid filename too long\n"); - err = -E2BIG; - goto out; - } + snprintf(file, filelen, "%s/pid", dir); dead = 0; fd = open(file, O_RDONLY); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8ef85139553f..3ece88cebd68 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -550,6 +550,7 @@ config X86_UV depends on X86_EXTENDED_PLATFORM depends on NUMA depends on EFI + depends on KEXEC_CORE depends on X86_X2APIC depends on PCI ---help--- @@ -1573,6 +1574,27 @@ config NUMA Otherwise, you should say N. +config NUMA_AWARE_SPINLOCKS + bool "Numa-aware spinlocks" + depends on NUMA + depends on QUEUED_SPINLOCKS + depends on 64BIT + # For now, we depend on PARAVIRT_SPINLOCKS to make the patching work. + # This is awkward, but hopefully would be resolved once static_call() + # is available. + depends on PARAVIRT_SPINLOCKS + default y + help + Introduce NUMA (Non Uniform Memory Access) awareness into + the slow path of spinlocks. + + In this variant of qspinlock, the kernel will try to keep the lock + on the same node, thus reducing the number of remote cache misses, + while trading some of the short term fairness for better performance. + + Say N if you want absolute first come first serve fairness. + + config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" @@ -1985,6 +2007,23 @@ config X86_INTEL_TSX_MODE_AUTO side channel attacks- equals the tsx=auto command line parameter. endchoice +config X86_SGX + bool "Software Guard eXtensions (SGX)" + depends on X86_64 && CPU_SUP_INTEL + depends on CRYPTO=y + depends on CRYPTO_SHA256=y + select SRCU + select MMU_NOTIFIER + help + Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions + that can be used by applications to set aside private regions of code + and data, referred to as enclaves. An enclave's private memory can + only be accessed by code running within the enclave. Accesses from + outside the enclave, including other enclaves, are disallowed by + hardware. + + If unsure, say N. + config EFI bool "EFI runtime service support" depends on ACPI diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 8e29c991ba3e..159b5e799baa 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -380,6 +380,10 @@ config X86_DEBUGCTLMSR def_bool y depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486) && !UML +config IA32_FEAT_CTL + def_bool y + depends on CPU_SUP_INTEL + menuconfig PROCESSOR_SELECT bool "Supported processor vendors" if EXPERT ---help--- diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94df0868804b..69f0cb01c666 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -34,12 +34,13 @@ M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS)) REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ - -mno-mmx -mno-sse + -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) +REALMODE_CFLAGS += $(CLANG_FLAGS) export REALMODE_CFLAGS # BITS is used as extension for files which are available in a 32 bit @@ -61,6 +62,9 @@ endif KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow KBUILD_CFLAGS += $(call cc-option,-mno-avx,) +# Intel CET isn't enabled in the kernel +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) + ifeq ($(CONFIG_X86_32),y) BITS := 32 UTS_MACHINE := i386 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index e2839b5c246c..6539c50fb9aa 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -87,7 +87,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) -sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' +sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' quiet_cmd_zoffset = ZOFFSET $@ cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 6b84afdd7538..292b5bc6e3a3 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -38,6 +38,8 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign +# Disable relocation relaxation in case the link is not PIE. +KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n @@ -102,7 +104,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o quiet_cmd_check_data_rel = DATAREL $@ define cmd_check_data_rel for obj in $(filter %.o,$^); do \ - ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ + $(READELF) -S $$obj | grep -qF .rel.local && { \ echo "error: $$obj has data relocations!" >&2; \ exit 1; \ } || true; \ diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 5e30eaaf8576..d7c0fcc1dbf9 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -49,16 +49,17 @@ * Position Independent Executable (PIE) so that linker won't optimize * R_386_GOT32X relocation to its fixed symbol address. Older * linkers generate R_386_32 relocations against locally defined symbols, - * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less + * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle * R_386_32 relocations when relocating the kernel. To generate - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as + * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as * hidden: */ .hidden _bss .hidden _ebss .hidden _got .hidden _egot + .hidden _end __HEAD ENTRY(startup_32) @@ -106,7 +107,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx - jge 1f + jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index e9a7f7cadb12..50c9eeb36f0d 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -42,6 +42,7 @@ .hidden _ebss .hidden _got .hidden _egot + .hidden _end __HEAD .code32 @@ -106,7 +107,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx - jge 1f + jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: @@ -297,7 +298,7 @@ ENTRY(startup_64) notq %rax andq %rax, %rbp cmpq $LOAD_PHYSICAL_ADDR, %rbp - jge 1f + jae 1f #endif movq $LOAD_PHYSICAL_ADDR, %rbp 1: diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index c8862696a47b..7d0394f4ebf9 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -5,15 +5,6 @@ #include "pgtable.h" #include "../string.h" -/* - * __force_order is used by special_insns.h asm code to force instruction - * serialization. - * - * It is not referenced from the code, but GCC < 5 with -fPIE would fail - * due to an undefined symbol. Define it to make these ancient GCCs work. - */ -unsigned long __force_order; - #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */ #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */ diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 59ce9ed58430..088709089e9b 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -137,7 +137,6 @@ CONFIG_CONNECTOR=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y @@ -205,7 +204,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/arch/x86/configs/oc.config b/arch/x86/configs/oc.config new file mode 100644 index 000000000000..d190a290bbf0 --- /dev/null +++ b/arch/x86/configs/oc.config @@ -0,0 +1,1539 @@ +CONFIG_LOCALVERSION="-tlinux4_public" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=19 +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_BZIP2 is not set +CONFIG_EXPERT=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_USERFAULTFD=y +CONFIG_SLUB_MEMCG_SYSFS_ON=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_PROFILING=y +CONFIG_SMP=y +CONFIG_X86_X2APIC=y +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_NUMACHIP=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_PVH=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_JAILHOUSE_GUEST=y +CONFIG_GART_IOMMU=y +CONFIG_MAXSMP=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INJECT=m +CONFIG_PERF_EVENTS_AMD_POWER=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_X86_5LEVEL=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_X86_INTEL_MPX=y +CONFIG_X86_SGX=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_1000=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_CRASH_DUMP=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_COMPAT_VDSO=y +CONFIG_LEGACY_VSYSCALL_EMULATE=y +CONFIG_LIVEPATCH=y +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_EC_DEBUGFS=m +# CONFIG_ACPI_AC is not set +# CONFIG_ACPI_BATTERY is not set +# CONFIG_ACPI_FAN is not set +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=y +CONFIG_ACPI_HMAT=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_ACPI_EXTLOG=m +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_X86_PCC_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +CONFIG_X86_SPEEDSTEP_CENTRINO=m +CONFIG_X86_P4_CLOCKMOD=m +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_INTEL_IDLE=y +CONFIG_IA32_EMULATION=y +CONFIG_EDD=m +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_VARS=y +CONFIG_EFI_VARS_PSTORE=m +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_OPROFILE=m +CONFIG_OPROFILE_EVENT_MULTIPLEX=y +CONFIG_KPROBES=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_LDM_PARTITION=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_ZSMALLOC=y +CONFIG_ZONE_DEVICE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_XFRM_USER=y +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_XDP_SOCKETS=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +# CONFIG_NF_CT_PROTO_DCCP is not set +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_IEEE802154=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_NET_PKTGEN=m +CONFIG_LWTUNNEL=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCI_STUB=m +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_HYPERV=m +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +CONFIG_VMD=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_CONNECTOR=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_BLK_DEV_FD=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=y +CONFIG_ZRAM=m +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=y +CONFIG_BLK_DEV_SX8=y +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_HP_ILO=m +CONFIG_VMWARE_BALLOON=m +CONFIG_ALTERA_STAPL=m +CONFIG_VMWARE_VMCI=m +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_BE2ISCSI=y +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=y +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_ACARD=y +CONFIG_SCSI_AACRAID=y +CONFIG_SCSI_AIC7XXX=y +CONFIG_SCSI_AIC79XX=y +CONFIG_AIC79XX_CMDS_PER_DEVICE=4 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set +CONFIG_SCSI_AIC94XX=y +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_SCSI_ARCMSR=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PCI=y +CONFIG_SCSI_HPTIOP=y +CONFIG_SCSI_BUSLOGIC=y +CONFIG_VMWARE_PVSCSI=m +CONFIG_LIBFC=y +CONFIG_LIBFCOE=y +CONFIG_FCOE=y +CONFIG_FCOE_FNIC=y +CONFIG_SCSI_GDTH=y +CONFIG_SCSI_ISCI=y +CONFIG_SCSI_IPS=y +CONFIG_SCSI_INITIO=y +CONFIG_SCSI_STEX=y +CONFIG_SCSI_QLA_FC=y +CONFIG_SCSI_QLA_ISCSI=y +CONFIG_SCSI_LPFC=y +CONFIG_SCSI_PMCRAID=y +CONFIG_SCSI_PM8001=y +CONFIG_SCSI_BFA_FC=y +CONFIG_SCSI_VIRTIO=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_SATA_ACARD_AHCI=y +CONFIG_SATA_SIL24=y +CONFIG_PDC_ADMA=y +CONFIG_SATA_QSTOR=y +CONFIG_SATA_SX4=y +CONFIG_ATA_PIIX=y +CONFIG_SATA_MV=y +CONFIG_SATA_NV=y +CONFIG_SATA_PROMISE=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_SVW=y +CONFIG_SATA_ULI=y +CONFIG_SATA_VIA=y +CONFIG_SATA_VITESSE=y +CONFIG_PATA_ALI=y +CONFIG_PATA_AMD=y +CONFIG_PATA_ARTOP=y +CONFIG_PATA_ATIIXP=y +CONFIG_PATA_ATP867X=y +CONFIG_PATA_CMD64X=y +CONFIG_PATA_HPT366=y +CONFIG_PATA_HPT37X=y +CONFIG_PATA_HPT3X2N=y +CONFIG_PATA_HPT3X3=y +CONFIG_PATA_IT8213=y +CONFIG_PATA_IT821X=y +CONFIG_PATA_JMICRON=y +CONFIG_PATA_MARVELL=y +CONFIG_PATA_NETCELL=y +CONFIG_PATA_NINJA32=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_PDC2027X=y +CONFIG_PATA_PDC_OLD=y +CONFIG_PATA_RDC=y +CONFIG_PATA_SCH=y +CONFIG_PATA_SERVERWORKS=y +CONFIG_PATA_SIL680=y +CONFIG_PATA_TOSHIBA=y +CONFIG_PATA_VIA=y +CONFIG_PATA_ACPI=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_LEGACY=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=y +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=y +CONFIG_FUSION_FC=y +CONFIG_FUSION_SAS=y +CONFIG_FUSION_CTL=y +CONFIG_FUSION_LOGGING=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_GENEVE=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_BNX2=m +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +CONFIG_BNA=m +CONFIG_MACB=m +CONFIG_LIQUIDIO=m +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +CONFIG_ENIC=m +CONFIG_DNET=m +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_TULIP=m +CONFIG_TULIP_MMIO=y +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_IGC=m +CONFIG_JME=m +CONFIG_MVMDIO=m +CONFIG_SKGE=m +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_FPGA_IPSEC=y +CONFIG_MLX5_EN_IPSEC=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_MYRI10GE=m +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_ETHOC=m +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_NETXEN_NIC=m +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_EPIC100=m +CONFIG_SMSC9420=m +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MDIO_BITBANG=m +CONFIG_PHYLIB=y +CONFIG_AMD_PHY=m +CONFIG_AT803X_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MICREL_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_STE10XP=m +CONFIG_VITESSE_PHY=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_WLAN is not set +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_HYPERV_NET=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_SUNKBD=y +CONFIG_KEYBOARD_XTKBD=y +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_BYD is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_CYPRESS is not set +# CONFIG_MOUSE_PS2_LIFEBOOK is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +# CONFIG_MOUSE_PS2_FOCALTECH is not set +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +CONFIG_SERIO_RAW=y +CONFIG_SERIO_ALTERA_PS2=y +CONFIG_SERIO_ARC_PS2=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_CYCLADES=m +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_NVRAM=y +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +CONFIG_HANGCHECK_TIMER=m +CONFIG_TCG_TPM=y +CONFIG_TCG_TIS=y +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TELCLOCK=m +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_POWER_RESET=y +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_RC_CORE=y +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_SANYO_DECODER=y +CONFIG_IR_SHARP_DECODER=y +CONFIG_IR_MCE_KBD_DECODER=y +CONFIG_IR_XMP_DECODER=y +CONFIG_AGP=y +CONFIG_AGP_INTEL=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_DRM=m +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_I915=m +CONFIG_DRM_VMWGFX=m +CONFIG_DRM_VMWGFX_FBCON=y +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VBOXVIDEO=m +CONFIG_FB=y +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +CONFIG_FB_HYPERV=m +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_SOUND=m +CONFIG_SND=m +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_PCSP=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_SERIAL_U16550=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AD1889=m +CONFIG_SND_ALS300=m +CONFIG_SND_ALS4000=m +CONFIG_SND_ALI5451=m +CONFIG_SND_ASIHPI=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_AW2=m +CONFIG_SND_AZT3328=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS4281=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_ES1938=m +CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y +CONFIG_SND_FM801=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y +CONFIG_SND_MIXART=m +CONFIG_SND_NM256=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RIPTIDE=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_SONICVIBES=m +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_YMFPCI=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_INTEL_DETECT_DMIC=y +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_USX2Y=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_US122L=m +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_AMD_ACP=m +CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m +CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m +CONFIG_SND_SOC_AMD_ACP3x=m +CONFIG_SND_ATMEL_SOC=m +CONFIG_SND_DESIGNWARE_I2S=m +CONFIG_SND_DESIGNWARE_PCM=y +CONFIG_SND_SOC_FSL_ASRC=m +CONFIG_SND_SOC_FSL_SAI=m +CONFIG_SND_SOC_FSL_AUDMIX=m +CONFIG_SND_SOC_FSL_SSI=m +CONFIG_SND_SOC_FSL_SPDIF=m +CONFIG_SND_SOC_FSL_ESAI=m +CONFIG_SND_SOC_FSL_MICFIL=m +CONFIG_SND_SOC_IMX_AUDMUX=m +CONFIG_SND_I2S_HI6210_I2S=m +CONFIG_SND_SOC_IMG=y +CONFIG_SND_SOC_IMG_I2S_IN=m +CONFIG_SND_SOC_IMG_I2S_OUT=m +CONFIG_SND_SOC_IMG_PARALLEL_OUT=m +CONFIG_SND_SOC_IMG_SPDIF_IN=m +CONFIG_SND_SOC_IMG_SPDIF_OUT=m +CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m +CONFIG_SND_SOC_INTEL_HASWELL=m +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m +CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_INTEL_CML_H=m +CONFIG_SND_SOC_INTEL_CML_LP=m +CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC=y +CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH=m +CONFIG_SND_SOC_MTK_BTCVSD=m +CONFIG_SND_SOC_SOF_TOPLEVEL=y +CONFIG_SND_SOC_SOF_PCI=m +CONFIG_SND_SOC_SOF_ACPI=m +CONFIG_SND_SOC_SOF_NOCODEC_SUPPORT=y +CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS=y +CONFIG_SND_SOC_SOF_DEBUG=y +CONFIG_SND_SOC_SOF_FORCE_NOCODEC_MODE=y +CONFIG_SND_SOC_SOF_DEBUG_XRUN_STOP=y +CONFIG_SND_SOC_SOF_DEBUG_VERBOSE_IPC=y +CONFIG_SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION=y +CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE=y +CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST=y +CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y +CONFIG_SND_SOC_SOF_BAYTRAIL_SUPPORT=y +CONFIG_SND_SOC_SOF_MERRIFIELD_SUPPORT=y +CONFIG_SND_SOC_SOF_APOLLOLAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_GEMINILAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_CANNONLAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_COFFEELAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_ICELAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_COMETLAKE_LP_SUPPORT=y +CONFIG_SND_SOC_SOF_COMETLAKE_H_SUPPORT=y +CONFIG_SND_SOC_SOF_TIGERLAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_ELKHARTLAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1=y +CONFIG_SND_SOC_XILINX_I2S=m +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m +CONFIG_SND_SOC_XILINX_SPDIF=m +CONFIG_SND_SOC_XTFPGA_I2S=m +CONFIG_ZX_TDM=m +CONFIG_SND_SOC_AC97_CODEC=m +CONFIG_SND_SOC_ADAU1701=m +CONFIG_SND_SOC_ADAU1761_I2C=m +CONFIG_SND_SOC_AK4118=m +CONFIG_SND_SOC_AK4458=m +CONFIG_SND_SOC_AK4554=m +CONFIG_SND_SOC_AK4613=m +CONFIG_SND_SOC_AK4642=m +CONFIG_SND_SOC_AK5386=m +CONFIG_SND_SOC_AK5558=m +CONFIG_SND_SOC_ALC5623=m +CONFIG_SND_SOC_BD28623=m +CONFIG_SND_SOC_BT_SCO=m +CONFIG_SND_SOC_CS35L32=m +CONFIG_SND_SOC_CS35L33=m +CONFIG_SND_SOC_CS35L34=m +CONFIG_SND_SOC_CS35L35=m +CONFIG_SND_SOC_CS35L36=m +CONFIG_SND_SOC_CS42L42=m +CONFIG_SND_SOC_CS42L51_I2C=m +CONFIG_SND_SOC_CS42L52=m +CONFIG_SND_SOC_CS42L56=m +CONFIG_SND_SOC_CS42L73=m +CONFIG_SND_SOC_CS4265=m +CONFIG_SND_SOC_CS4270=m +CONFIG_SND_SOC_CS4271_I2C=m +CONFIG_SND_SOC_CS42XX8_I2C=m +CONFIG_SND_SOC_CS43130=m +CONFIG_SND_SOC_CS4341=m +CONFIG_SND_SOC_CS4349=m +CONFIG_SND_SOC_CS53L30=m +CONFIG_SND_SOC_CX2072X=m +CONFIG_SND_SOC_ES7134=m +CONFIG_SND_SOC_ES7241=m +CONFIG_SND_SOC_ES8316=m +CONFIG_SND_SOC_ES8328_I2C=m +CONFIG_SND_SOC_GTM601=m +CONFIG_SND_SOC_INNO_RK3036=m +CONFIG_SND_SOC_MAX98088=m +CONFIG_SND_SOC_MAX98504=m +CONFIG_SND_SOC_MAX9867=m +CONFIG_SND_SOC_MAX98927=m +CONFIG_SND_SOC_MAX98373=m +CONFIG_SND_SOC_MAX9860=m +CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m +CONFIG_SND_SOC_PCM1681=m +CONFIG_SND_SOC_PCM1789_I2C=m +CONFIG_SND_SOC_PCM179X_I2C=m +CONFIG_SND_SOC_PCM186X_I2C=m +CONFIG_SND_SOC_PCM3060_I2C=m +CONFIG_SND_SOC_PCM3168A_I2C=m +CONFIG_SND_SOC_PCM512x_I2C=m +CONFIG_SND_SOC_RK3328=m +CONFIG_SND_SOC_RT5616=m +CONFIG_SND_SOC_RT5631=m +CONFIG_SND_SOC_SGTL5000=m +CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m +CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m +CONFIG_SND_SOC_SPDIF=m +CONFIG_SND_SOC_SSM2305=m +CONFIG_SND_SOC_SSM2602_I2C=m +CONFIG_SND_SOC_SSM4567=m +CONFIG_SND_SOC_STA32X=m +CONFIG_SND_SOC_STA350=m +CONFIG_SND_SOC_STI_SAS=m +CONFIG_SND_SOC_TAS2552=m +CONFIG_SND_SOC_TAS5086=m +CONFIG_SND_SOC_TAS571X=m +CONFIG_SND_SOC_TAS5720=m +CONFIG_SND_SOC_TAS6424=m +CONFIG_SND_SOC_TDA7419=m +CONFIG_SND_SOC_TFA9879=m +CONFIG_SND_SOC_TLV320AIC23_I2C=m +CONFIG_SND_SOC_TLV320AIC31XX=m +CONFIG_SND_SOC_TLV320AIC32X4_I2C=m +CONFIG_SND_SOC_TLV320AIC3X=m +CONFIG_SND_SOC_TS3A227E=m +CONFIG_SND_SOC_TSCS42XX=m +CONFIG_SND_SOC_TSCS454=m +CONFIG_SND_SOC_UDA1334=m +CONFIG_SND_SOC_WM8510=m +CONFIG_SND_SOC_WM8523=m +CONFIG_SND_SOC_WM8524=m +CONFIG_SND_SOC_WM8580=m +CONFIG_SND_SOC_WM8711=m +CONFIG_SND_SOC_WM8728=m +CONFIG_SND_SOC_WM8731=m +CONFIG_SND_SOC_WM8737=m +CONFIG_SND_SOC_WM8741=m +CONFIG_SND_SOC_WM8750=m +CONFIG_SND_SOC_WM8753=m +CONFIG_SND_SOC_WM8776=m +CONFIG_SND_SOC_WM8782=m +CONFIG_SND_SOC_WM8804_I2C=m +CONFIG_SND_SOC_WM8903=m +CONFIG_SND_SOC_WM8904=m +CONFIG_SND_SOC_WM8960=m +CONFIG_SND_SOC_WM8962=m +CONFIG_SND_SOC_WM8974=m +CONFIG_SND_SOC_WM8978=m +CONFIG_SND_SOC_WM8985=m +CONFIG_SND_SOC_ZX_AUD96P22=m +CONFIG_SND_SOC_MAX9759=m +CONFIG_SND_SOC_MT6351=m +CONFIG_SND_SOC_MT6358=m +CONFIG_SND_SOC_NAU8540=m +CONFIG_SND_SOC_NAU8810=m +CONFIG_SND_SOC_NAU8822=m +CONFIG_SND_SOC_NAU8824=m +CONFIG_SND_SOC_TPA6130A2=m +CONFIG_SND_SIMPLE_CARD=m +CONFIG_HDMI_LPE_AUDIO=m +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_EZKEY=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_USB_HIDDEV=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_OCRDMA=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_EDAC=m +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_CLASS=y +CONFIG_DMADEVICES=y +CONFIG_INTEL_IDMA64=y +CONFIG_INTEL_IOATDMA=y +CONFIG_UIO=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VBOXGUEST=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MMIO=y +CONFIG_HYPERV=m +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_QLGE=m +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_IRQ_REMAP=y +CONFIG_SOUNDWIRE=m +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_DEV_DAX=m +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +CONFIG_XFS_WARN=y +CONFIG_GFS2_FS=m +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_NILFS2_FS=m +CONFIG_FS_DAX=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_AUTOFS4_FS=m +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +CONFIG_NTFS_FS=m +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_CGROUPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_PSTORE_DEFLATE_COMPRESS=m +CONFIG_PSTORE_LZO_COMPRESS=m +CONFIG_NFS_FS=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="" +CONFIG_NFS_FSCACHE=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_ENCRYPTED_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +# CONFIG_INTEGRITY is not set +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux" +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_SHA256_SSSE3=m +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_AES_NI_INTEL=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_CORDIC=m +CONFIG_CRC_ITU_T=y +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_PRINTK_TIME=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_LATENCYTOP=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_BPF_KPROBE_OVERRIDE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_KSTRTOX=y +CONFIG_TEST_BPF=m +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_UNWINDER_FRAME_POINTER=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index d0a5ffeae8df..8092d7baf8b5 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -136,7 +136,6 @@ CONFIG_CONNECTOR=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y @@ -201,7 +200,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 5f6a5af9c489..77043a82da51 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -127,10 +127,6 @@ ddq_add_8: /* generate a unique variable for ddq_add_x */ -.macro setddq n - var_ddq_add = ddq_add_\n -.endm - /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n @@ -140,9 +136,7 @@ ddq_add_8: .macro club name, id .altmacro - .if \name == DDQ_DATA - setddq %\id - .elseif \name == XDATA + .if \name == XDATA setxdata %\id .endif .noaltmacro @@ -165,9 +159,8 @@ ddq_add_8: .set i, 1 .rept (by - 1) - club DDQ_DATA, i club XDATA, i - vpaddq var_ddq_add(%rip), xcounter, var_xdata + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata @@ -180,8 +173,7 @@ ddq_add_8: vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 - club DDQ_DATA, by - vpaddq var_ddq_add(%rip), xcounter, xcounter + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index e40bdf024ba7..dd954d8db629 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -266,7 +266,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff PSHUFB_XMM %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv - PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ @@ -319,7 +319,7 @@ _initial_blocks_\@: # Main loop - Encrypt/Decrypt remaining blocks - cmp $0, %r13 + test %r13, %r13 je _zero_cipher_left_\@ sub $64, %r13 je _four_cipher_left_\@ @@ -438,7 +438,7 @@ _multiple_of_16_bytes_\@: mov PBlockLen(%arg2), %r12 - cmp $0, %r12 + test %r12, %r12 je _partial_done\@ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 @@ -475,7 +475,7 @@ _T_8_\@: add $8, %r10 sub $8, %r11 psrldq $8, %xmm0 - cmp $0, %r11 + test %r11, %r11 je _return_T_done_\@ _T_4_\@: movd %xmm0, %eax @@ -483,7 +483,7 @@ _T_4_\@: add $4, %r10 sub $4, %r11 psrldq $4, %xmm0 - cmp $0, %r11 + test %r11, %r11 je _return_T_done_\@ _T_123_\@: movd %xmm0, %eax @@ -620,7 +620,7 @@ _get_AAD_blocks\@: /* read the last <16B of AAD */ _get_AAD_rest\@: - cmp $0, %r11 + test %r11, %r11 je _get_AAD_done\@ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7 @@ -641,7 +641,7 @@ _get_AAD_done\@: .macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ AAD_HASH operation mov PBlockLen(%arg2), %r13 - cmp $0, %r13 + test %r13, %r13 je _partial_block_done_\@ # Leave Macro if no partial blocks # Read in input data without over reading cmp $16, \PLAIN_CYPH_LEN @@ -693,7 +693,7 @@ _no_extra_mask_1_\@: PSHUFB_XMM %xmm2, %xmm3 pxor %xmm3, \AAD_HASH - cmp $0, %r10 + test %r10, %r10 jl _partial_incomplete_1_\@ # GHASH computation for the last <16 Byte block @@ -728,7 +728,7 @@ _no_extra_mask_2_\@: PSHUFB_XMM %xmm2, %xmm9 pxor %xmm9, \AAD_HASH - cmp $0, %r10 + test %r10, %r10 jl _partial_incomplete_2_\@ # GHASH computation for the last <16 Byte block @@ -748,7 +748,7 @@ _encode_done_\@: PSHUFB_XMM %xmm2, %xmm9 .endif # output encrypted Bytes - cmp $0, %r10 + test %r10, %r10 jl _partial_fill_\@ mov %r13, %r12 mov $16, %r13 @@ -978,7 +978,7 @@ _initial_blocks_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -1186,7 +1186,7 @@ aes_loop_par_enc_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -1946,7 +1946,7 @@ ENTRY(aesni_set_key) ENDPROC(aesni_set_key) /* - * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + * void aesni_enc(const void *ctx, u8 *dst, const u8 *src) */ ENTRY(aesni_enc) FRAME_BEGIN @@ -2137,7 +2137,7 @@ _aesni_enc4: ENDPROC(_aesni_enc4) /* - * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + * void aesni_dec (const void *ctx, u8 *dst, const u8 *src) */ ENTRY(aesni_dec) FRAME_BEGIN @@ -2726,25 +2726,18 @@ ENDPROC(aesni_ctr_enc) pxor CTR, IV; /* - * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, - * bool enc, u8 *iv) + * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, + * const u8 *src, unsigned int len, le128 *iv) */ -ENTRY(aesni_xts_crypt8) +ENTRY(aesni_xts_encrypt) FRAME_BEGIN - cmpb $0, %cl - movl $0, %ecx - movl $240, %r10d - leaq _aesni_enc4, %r11 - leaq _aesni_dec4, %rax - cmovel %r10d, %ecx - cmoveq %rax, %r11 movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK movups (IVP), IV mov 480(KEYP), KLEN - addq %rcx, KEYP +.Lxts_enc_loop4: movdqa IV, STATE1 movdqu 0x00(INP), INC pxor INC, STATE1 @@ -2768,71 +2761,103 @@ ENTRY(aesni_xts_crypt8) pxor INC, STATE4 movdqu IV, 0x30(OUTP) - CALL_NOSPEC %r11 + call _aesni_enc4 movdqu 0x00(OUTP), INC pxor INC, STATE1 movdqu STATE1, 0x00(OUTP) - _aesni_gf128mul_x_ble() - movdqa IV, STATE1 - movdqu 0x40(INP), INC - pxor INC, STATE1 - movdqu IV, 0x40(OUTP) - movdqu 0x10(OUTP), INC pxor INC, STATE2 movdqu STATE2, 0x10(OUTP) - _aesni_gf128mul_x_ble() - movdqa IV, STATE2 - movdqu 0x50(INP), INC - pxor INC, STATE2 - movdqu IV, 0x50(OUTP) - movdqu 0x20(OUTP), INC pxor INC, STATE3 movdqu STATE3, 0x20(OUTP) - _aesni_gf128mul_x_ble() - movdqa IV, STATE3 - movdqu 0x60(INP), INC - pxor INC, STATE3 - movdqu IV, 0x60(OUTP) - movdqu 0x30(OUTP), INC pxor INC, STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() - movdqa IV, STATE4 - movdqu 0x70(INP), INC - pxor INC, STATE4 - movdqu IV, 0x70(OUTP) - _aesni_gf128mul_x_ble() + add $64, INP + add $64, OUTP + sub $64, LEN + ja .Lxts_enc_loop4 + movups IV, (IVP) - CALL_NOSPEC %r11 - - movdqu 0x40(OUTP), INC - pxor INC, STATE1 - movdqu STATE1, 0x40(OUTP) - - movdqu 0x50(OUTP), INC - pxor INC, STATE2 - movdqu STATE2, 0x50(OUTP) - - movdqu 0x60(OUTP), INC - pxor INC, STATE3 - movdqu STATE3, 0x60(OUTP) - - movdqu 0x70(OUTP), INC - pxor INC, STATE4 - movdqu STATE4, 0x70(OUTP) - FRAME_END ret -ENDPROC(aesni_xts_crypt8) +ENDPROC(aesni_xts_encrypt) + +/* + * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, + * const u8 *src, unsigned int len, le128 *iv) + */ +ENTRY(aesni_xts_decrypt) + FRAME_BEGIN + + movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK + movups (IVP), IV + + mov 480(KEYP), KLEN + add $240, KEYP + +.Lxts_dec_loop4: + movdqa IV, STATE1 + movdqu 0x00(INP), INC + pxor INC, STATE1 + movdqu IV, 0x00(OUTP) + + _aesni_gf128mul_x_ble() + movdqa IV, STATE2 + movdqu 0x10(INP), INC + pxor INC, STATE2 + movdqu IV, 0x10(OUTP) + + _aesni_gf128mul_x_ble() + movdqa IV, STATE3 + movdqu 0x20(INP), INC + pxor INC, STATE3 + movdqu IV, 0x20(OUTP) + + _aesni_gf128mul_x_ble() + movdqa IV, STATE4 + movdqu 0x30(INP), INC + pxor INC, STATE4 + movdqu IV, 0x30(OUTP) + + call _aesni_dec4 + + movdqu 0x00(OUTP), INC + pxor INC, STATE1 + movdqu STATE1, 0x00(OUTP) + + movdqu 0x10(OUTP), INC + pxor INC, STATE2 + movdqu STATE2, 0x10(OUTP) + + movdqu 0x20(OUTP), INC + pxor INC, STATE3 + movdqu STATE3, 0x20(OUTP) + + movdqu 0x30(OUTP), INC + pxor INC, STATE4 + movdqu STATE4, 0x30(OUTP) + + _aesni_gf128mul_x_ble() + + add $64, INP + add $64, OUTP + sub $64, LEN + ja .Lxts_dec_loop4 + + movups IV, (IVP) + + FRAME_END + ret +ENDPROC(aesni_xts_decrypt) #endif diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 91c039ab5699..4e4d34956170 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -370,7 +370,7 @@ _initial_num_blocks_is_0\@: _initial_blocks_encrypted\@: - cmp $0, %r13 + test %r13, %r13 je _zero_cipher_left\@ sub $128, %r13 @@ -529,7 +529,7 @@ _multiple_of_16_bytes\@: vmovdqu HashKey(arg2), %xmm13 mov PBlockLen(arg2), %r12 - cmp $0, %r12 + test %r12, %r12 je _partial_done\@ #GHASH computation for the last <16 Byte block @@ -574,7 +574,7 @@ _T_8\@: add $8, %r10 sub $8, %r11 vpsrldq $8, %xmm9, %xmm9 - cmp $0, %r11 + test %r11, %r11 je _return_T_done\@ _T_4\@: vmovd %xmm9, %eax @@ -582,7 +582,7 @@ _T_4\@: add $4, %r10 sub $4, %r11 vpsrldq $4, %xmm9, %xmm9 - cmp $0, %r11 + test %r11, %r11 je _return_T_done\@ _T_123\@: vmovd %xmm9, %eax @@ -626,7 +626,7 @@ _get_AAD_blocks\@: cmp $16, %r11 jge _get_AAD_blocks\@ vmovdqu \T8, \T7 - cmp $0, %r11 + test %r11, %r11 je _get_AAD_done\@ vpxor \T7, \T7, \T7 @@ -645,7 +645,7 @@ _get_AAD_rest8\@: vpxor \T1, \T7, \T7 jmp _get_AAD_rest8\@ _get_AAD_rest4\@: - cmp $0, %r11 + test %r11, %r11 jle _get_AAD_rest0\@ mov (%r10), %eax movq %rax, \T1 @@ -750,7 +750,7 @@ _done_read_partial_block_\@: .macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ AAD_HASH ENC_DEC mov PBlockLen(arg2), %r13 - cmp $0, %r13 + test %r13, %r13 je _partial_block_done_\@ # Leave Macro if no partial blocks # Read in input data without over reading cmp $16, \PLAIN_CYPH_LEN @@ -802,7 +802,7 @@ _no_extra_mask_1_\@: vpshufb %xmm2, %xmm3, %xmm3 vpxor %xmm3, \AAD_HASH, \AAD_HASH - cmp $0, %r10 + test %r10, %r10 jl _partial_incomplete_1_\@ # GHASH computation for the last <16 Byte block @@ -837,7 +837,7 @@ _no_extra_mask_2_\@: vpshufb %xmm2, %xmm9, %xmm9 vpxor %xmm9, \AAD_HASH, \AAD_HASH - cmp $0, %r10 + test %r10, %r10 jl _partial_incomplete_2_\@ # GHASH computation for the last <16 Byte block @@ -857,7 +857,7 @@ _encode_done_\@: vpshufb %xmm2, %xmm9, %xmm9 .endif # output encrypted Bytes - cmp $0, %r10 + test %r10, %r10 jl _partial_fill_\@ mov %r13, %r12 mov $16, %r13 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 3e707e81afdb..18cfb76daa23 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -83,10 +83,8 @@ struct gcm_context_data { asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); -asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in); -asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in); +asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in); +asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in); asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len); asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, @@ -99,6 +97,12 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, #define AVX_GEN2_OPTSIZE 640 #define AVX_GEN4_OPTSIZE 4096 +asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); + +asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); + #ifdef CONFIG_X86_64 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, @@ -106,9 +110,6 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); -asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in, bool enc, u8 *iv); - /* asmlinkage void aesni_gcm_enc() * void *ctx, AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data. May be uninitialized. @@ -550,29 +551,24 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, } -static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) +static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - aesni_enc(ctx, out, in); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc); } -static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec); } -static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); + aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); } -static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); -} - -static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) -{ - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); + aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); } static const struct common_glue_ctx aesni_enc_xts = { @@ -580,11 +576,11 @@ static const struct common_glue_ctx aesni_enc_xts = { .fpu_blocks_limit = 1, .funcs = { { - .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } + .num_blocks = 32, + .fn_u = { .xts = aesni_xts_enc32 } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } + .fn_u = { .xts = aesni_xts_enc } } } }; @@ -593,11 +589,11 @@ static const struct common_glue_ctx aesni_dec_xts = { .fpu_blocks_limit = 1, .funcs = { { - .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } + .num_blocks = 32, + .fn_u = { .xts = aesni_xts_dec32 } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } + .fn_u = { .xts = aesni_xts_dec } } } }; @@ -606,8 +602,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&aesni_enc_xts, req, - XTS_TWEAK_CAST(aesni_xts_tweak), + return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc, aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx), false); @@ -618,8 +613,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&aesni_dec_xts, req, - XTS_TWEAK_CAST(aesni_xts_tweak), + return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc, aes_ctx(ctx->raw_tweak_ctx), aes_ctx(ctx->raw_crypt_ctx), true); @@ -707,7 +701,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned long auth_tag_len = crypto_aead_authsize(tfm); const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; - struct gcm_context_data data AESNI_ALIGN_ATTR; + u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8); + struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN); struct scatter_walk dst_sg_walk = {}; unsigned long left = req->cryptlen; unsigned long len, srclen, dstlen; @@ -760,8 +755,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, } kernel_fpu_begin(); - gcm_tfm->init(aes_ctx, &data, iv, - hash_subkey, assoc, assoclen); + gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen); if (req->src != req->dst) { while (left) { src = scatterwalk_map(&src_sg_walk); @@ -771,10 +765,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, len = min(srclen, dstlen); if (len) { if (enc) - gcm_tfm->enc_update(aes_ctx, &data, + gcm_tfm->enc_update(aes_ctx, data, dst, src, len); else - gcm_tfm->dec_update(aes_ctx, &data, + gcm_tfm->dec_update(aes_ctx, data, dst, src, len); } left -= len; @@ -792,10 +786,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, len = scatterwalk_clamp(&src_sg_walk, left); if (len) { if (enc) - gcm_tfm->enc_update(aes_ctx, &data, + gcm_tfm->enc_update(aes_ctx, data, src, src, len); else - gcm_tfm->dec_update(aes_ctx, &data, + gcm_tfm->dec_update(aes_ctx, data, src, src, len); } left -= len; @@ -804,7 +798,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, scatterwalk_done(&src_sg_walk, 1, left); } } - gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len); + gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len); kernel_fpu_end(); if (!assocmem) @@ -853,7 +847,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); unsigned int i; __be32 counter = cpu_to_be32(1); @@ -880,7 +875,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); unsigned int i; if (unlikely(req->assoclen != 16 && req->assoclen != 20)) @@ -1010,7 +1006,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); __be32 counter = cpu_to_be32(1); memcpy(iv, req->iv, 12); @@ -1026,7 +1023,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); - u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); + u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8); + u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN); memcpy(iv, req->iv, 12); *((__be32 *)(iv+12)) = counter; diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index a4f00128ea55..a8cc2c83fe1b 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -19,20 +19,17 @@ #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 /* 32-way AVX2/AES-NI parallel cipher functions */ -asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static const struct common_glue_ctx camellia_enc = { .num_funcs = 4, @@ -40,16 +37,16 @@ static const struct common_glue_ctx camellia_enc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_32way) } + .fn_u = { .ecb = camellia_ecb_enc_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } + .fn_u = { .ecb = camellia_ecb_enc_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } + .fn_u = { .ecb = camellia_enc_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } + .fn_u = { .ecb = camellia_enc_blk } } } }; @@ -59,16 +56,16 @@ static const struct common_glue_ctx camellia_ctr = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_32way) } + .fn_u = { .ctr = camellia_ctr_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } + .fn_u = { .ctr = camellia_ctr_16way } }, { .num_blocks = 2, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } + .fn_u = { .ctr = camellia_crypt_ctr_2way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } + .fn_u = { .ctr = camellia_crypt_ctr } } } }; @@ -78,13 +75,13 @@ static const struct common_glue_ctx camellia_enc_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_32way) } + .fn_u = { .xts = camellia_xts_enc_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } + .fn_u = { .xts = camellia_xts_enc_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } + .fn_u = { .xts = camellia_xts_enc } } } }; @@ -94,16 +91,16 @@ static const struct common_glue_ctx camellia_dec = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_32way) } + .fn_u = { .ecb = camellia_ecb_dec_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } + .fn_u = { .ecb = camellia_ecb_dec_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } + .fn_u = { .ecb = camellia_dec_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .ecb = camellia_dec_blk } } } }; @@ -113,16 +110,16 @@ static const struct common_glue_ctx camellia_dec_cbc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_32way) } + .fn_u = { .cbc = camellia_cbc_dec_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } + .fn_u = { .cbc = camellia_cbc_dec_16way } }, { .num_blocks = 2, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } + .fn_u = { .cbc = camellia_decrypt_cbc_2way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .cbc = camellia_dec_blk } } } }; @@ -132,13 +129,13 @@ static const struct common_glue_ctx camellia_dec_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_32way) } + .fn_u = { .xts = camellia_xts_dec_32way } }, { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } + .fn_u = { .xts = camellia_xts_dec_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } + .fn_u = { .xts = camellia_xts_dec } } } }; @@ -161,8 +158,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -180,8 +176,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_enc_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -190,8 +185,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_dec_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index f28d282779b8..31a82a79f4ac 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -18,41 +18,36 @@ #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 /* 16-way parallel cipher functions (avx/aes-ni) */ -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way); -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(camellia_ctr_16way); -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(camellia_xts_enc_16way); -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(camellia_xts_dec_16way); -void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(camellia_enc_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk); } EXPORT_SYMBOL_GPL(camellia_xts_enc); -void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(camellia_dec_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk); } EXPORT_SYMBOL_GPL(camellia_xts_dec); @@ -62,13 +57,13 @@ static const struct common_glue_ctx camellia_enc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } + .fn_u = { .ecb = camellia_ecb_enc_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } + .fn_u = { .ecb = camellia_enc_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } + .fn_u = { .ecb = camellia_enc_blk } } } }; @@ -78,13 +73,13 @@ static const struct common_glue_ctx camellia_ctr = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } + .fn_u = { .ctr = camellia_ctr_16way } }, { .num_blocks = 2, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } + .fn_u = { .ctr = camellia_crypt_ctr_2way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } + .fn_u = { .ctr = camellia_crypt_ctr } } } }; @@ -94,10 +89,10 @@ static const struct common_glue_ctx camellia_enc_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } + .fn_u = { .xts = camellia_xts_enc_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } + .fn_u = { .xts = camellia_xts_enc } } } }; @@ -107,13 +102,13 @@ static const struct common_glue_ctx camellia_dec = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } + .fn_u = { .ecb = camellia_ecb_dec_16way } }, { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } + .fn_u = { .ecb = camellia_dec_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .ecb = camellia_dec_blk } } } }; @@ -123,13 +118,13 @@ static const struct common_glue_ctx camellia_dec_cbc = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } + .fn_u = { .cbc = camellia_cbc_dec_16way } }, { .num_blocks = 2, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } + .fn_u = { .cbc = camellia_decrypt_cbc_2way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .cbc = camellia_dec_blk } } } }; @@ -139,10 +134,10 @@ static const struct common_glue_ctx camellia_dec_xts = { .funcs = { { .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } + .fn_u = { .xts = camellia_xts_dec_16way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } + .fn_u = { .xts = camellia_xts_dec } } } }; @@ -165,8 +160,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -206,8 +200,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_enc_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -216,8 +209,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&camellia_dec_xts, req, - XTS_TWEAK_CAST(camellia_enc_blk), + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 7c62db56ffe1..5f3ed5af68d7 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -18,19 +18,17 @@ #include /* regular block cipher functions */ -asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); +asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, + bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk); -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk); /* 2-way parallel cipher functions */ -asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); +asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, + bool xor); EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way); -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(camellia_dec_blk_2way); static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -1267,8 +1265,10 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, return camellia_setkey(&tfm->base, key, key_len); } -void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) +void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s) { + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; u128 iv = *src; camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); @@ -1277,9 +1277,11 @@ void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) } EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); -void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) *dst = *src; @@ -1291,9 +1293,11 @@ void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) } EXPORT_SYMBOL_GPL(camellia_crypt_ctr); -void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblks[2]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) { dst[0] = src[0]; @@ -1315,10 +1319,10 @@ static const struct common_glue_ctx camellia_enc = { .funcs = { { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } + .fn_u = { .ecb = camellia_enc_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } + .fn_u = { .ecb = camellia_enc_blk } } } }; @@ -1328,10 +1332,10 @@ static const struct common_glue_ctx camellia_ctr = { .funcs = { { .num_blocks = 2, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } + .fn_u = { .ctr = camellia_crypt_ctr_2way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } + .fn_u = { .ctr = camellia_crypt_ctr } } } }; @@ -1341,10 +1345,10 @@ static const struct common_glue_ctx camellia_dec = { .funcs = { { .num_blocks = 2, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } + .fn_u = { .ecb = camellia_dec_blk_2way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .ecb = camellia_dec_blk } } } }; @@ -1354,10 +1358,10 @@ static const struct common_glue_ctx camellia_dec_cbc = { .funcs = { { .num_blocks = 2, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } + .fn_u = { .cbc = camellia_decrypt_cbc_2way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } + .fn_u = { .cbc = camellia_dec_blk } } } }; @@ -1373,8 +1377,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index a8a38fffb4a9..da5297475f9e 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -20,20 +20,17 @@ #define CAST6_PARALLEL_BLOCKS 8 -asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src, +asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -41,21 +38,21 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, return cast6_setkey(&tfm->base, key, keylen); } -static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__cast6_encrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt); } -static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__cast6_decrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt); } -static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; le128_to_be128(&ctrblk, iv); le128_inc(iv); @@ -70,10 +67,10 @@ static const struct common_glue_ctx cast6_enc = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) } + .fn_u = { .ecb = cast6_ecb_enc_8way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) } + .fn_u = { .ecb = __cast6_encrypt } } } }; @@ -83,10 +80,10 @@ static const struct common_glue_ctx cast6_ctr = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) } + .fn_u = { .ctr = cast6_ctr_8way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) } + .fn_u = { .ctr = cast6_crypt_ctr } } } }; @@ -96,10 +93,10 @@ static const struct common_glue_ctx cast6_enc_xts = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc_8way) } + .fn_u = { .xts = cast6_xts_enc_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc) } + .fn_u = { .xts = cast6_xts_enc } } } }; @@ -109,10 +106,10 @@ static const struct common_glue_ctx cast6_dec = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) } + .fn_u = { .ecb = cast6_ecb_dec_8way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) } + .fn_u = { .ecb = __cast6_decrypt } } } }; @@ -122,10 +119,10 @@ static const struct common_glue_ctx cast6_dec_cbc = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) } + .fn_u = { .cbc = cast6_cbc_dec_8way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) } + .fn_u = { .cbc = __cast6_decrypt } } } }; @@ -135,10 +132,10 @@ static const struct common_glue_ctx cast6_dec_xts = { .funcs = { { .num_blocks = CAST6_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec_8way) } + .fn_u = { .xts = cast6_xts_dec_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec) } + .fn_u = { .xts = cast6_xts_dec } } } }; @@ -154,8 +151,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt), - req); + return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -199,8 +195,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&cast6_enc_xts, req, - XTS_TWEAK_CAST(__cast6_encrypt), + return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -209,8 +204,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&cast6_dec_xts, req, - XTS_TWEAK_CAST(__cast6_encrypt), + return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index d9b734d0c8cc..3c6e01520a97 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -170,7 +170,7 @@ continue_block: ## branch into array lea jump_table(%rip), bufp - movzxw (bufp, %rax, 2), len + movzwq (bufp, %rax, 2), len lea crc_array(%rip), bufp lea (bufp, len, 1), bufp JMP_NOSPEC bufp diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index d15b99397480..d3d91a0abf88 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -134,7 +134,8 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, src -= num_blocks - 1; dst -= num_blocks - 1; - gctx->funcs[i].fn_u.cbc(ctx, dst, src); + gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, + (const u8 *)src); nbytes -= func_bytes; if (nbytes < bsize) @@ -188,7 +189,9 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, /* Process multi-block batch */ do { - gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); + gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst, + (const u8 *)src, + &ctrblk); src += num_blocks; dst += num_blocks; nbytes -= func_bytes; @@ -210,7 +213,8 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, be128_to_le128(&ctrblk, (be128 *)walk.iv); memcpy(&tmp, walk.src.virt.addr, nbytes); - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp, + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp, + (const u8 *)&tmp, &ctrblk); memcpy(walk.dst.virt.addr, &tmp, nbytes); le128_to_be128((be128 *)walk.iv, &ctrblk); @@ -240,7 +244,8 @@ static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx, if (nbytes >= func_bytes) { do { - gctx->funcs[i].fn_u.xts(ctx, dst, src, + gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst, + (const u8 *)src, walk->iv); src += num_blocks; @@ -354,8 +359,8 @@ out: } EXPORT_SYMBOL_GPL(glue_xts_req_128bit); -void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, - common_glue_func_t fn) +void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src, + le128 *iv, common_glue_func_t fn) { le128 ivblk = *iv; @@ -363,13 +368,13 @@ void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, gf128mul_x_ble(iv, &ivblk); /* CC <- T xor C */ - u128_xor(dst, src, (u128 *)&ivblk); + u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk); /* PP <- D(Key2,CC) */ - fn(ctx, (u8 *)dst, (u8 *)dst); + fn(ctx, dst, dst); /* P <- T xor PP */ - u128_xor(dst, dst, (u128 *)&ivblk); + u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk); } EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one); diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c index f7567cbd35b6..80fcb85736e1 100644 --- a/arch/x86/crypto/nhpoly1305-avx2-glue.c +++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c @@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc, return crypto_nhpoly1305_update(desc, src, srclen); do { - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_fpu_begin(); crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c index a661ede3b5cf..cc6b7c1a2705 100644 --- a/arch/x86/crypto/nhpoly1305-sse2-glue.c +++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c @@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc, return crypto_nhpoly1305_update(desc, src, srclen); do { - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); + unsigned int n = min_t(unsigned int, srclen, SZ_4K); kernel_fpu_begin(); crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 13fd8d3d2da0..f973ace44ad3 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -19,18 +19,16 @@ #define SERPENT_AVX2_PARALLEL_BLOCKS 16 /* 16-way AVX2 parallel cipher functions */ -asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src); +asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src, +asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -44,13 +42,13 @@ static const struct common_glue_ctx serpent_enc = { .funcs = { { .num_blocks = 16, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) } + .fn_u = { .ecb = serpent_ecb_enc_16way } }, { .num_blocks = 8, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } + .fn_u = { .ecb = __serpent_encrypt } } } }; @@ -60,13 +58,13 @@ static const struct common_glue_ctx serpent_ctr = { .funcs = { { .num_blocks = 16, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) } + .fn_u = { .ctr = serpent_ctr_16way } }, { .num_blocks = 8, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } + .fn_u = { .ctr = serpent_ctr_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } + .fn_u = { .ctr = __serpent_crypt_ctr } } } }; @@ -76,13 +74,13 @@ static const struct common_glue_ctx serpent_enc_xts = { .funcs = { { .num_blocks = 16, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) } + .fn_u = { .xts = serpent_xts_enc_16way } }, { .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } + .fn_u = { .xts = serpent_xts_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } + .fn_u = { .xts = serpent_xts_enc } } } }; @@ -92,13 +90,13 @@ static const struct common_glue_ctx serpent_dec = { .funcs = { { .num_blocks = 16, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) } + .fn_u = { .ecb = serpent_ecb_dec_16way } }, { .num_blocks = 8, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .ecb = __serpent_decrypt } } } }; @@ -108,13 +106,13 @@ static const struct common_glue_ctx serpent_dec_cbc = { .funcs = { { .num_blocks = 16, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) } + .fn_u = { .cbc = serpent_cbc_dec_16way } }, { .num_blocks = 8, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .cbc = __serpent_decrypt } } } }; @@ -124,13 +122,13 @@ static const struct common_glue_ctx serpent_dec_xts = { .funcs = { { .num_blocks = 16, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) } + .fn_u = { .xts = serpent_xts_dec_16way } }, { .num_blocks = 8, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } + .fn_u = { .xts = serpent_xts_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } + .fn_u = { .xts = serpent_xts_dec } } } }; @@ -146,8 +144,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), - req); + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -166,8 +163,8 @@ static int xts_encrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_enc_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, false); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, false); } static int xts_decrypt(struct skcipher_request *req) @@ -176,8 +173,8 @@ static int xts_decrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_dec_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, true); } static struct skcipher_alg serpent_algs[] = { diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 7d3dca38a5a2..7806d1cbe854 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -20,33 +20,35 @@ #include /* 8-way parallel cipher functions */ -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx); -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx); -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); -void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; le128_to_be128(&ctrblk, iv); le128_inc(iv); @@ -56,17 +58,15 @@ void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) } EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); -void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__serpent_encrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt); } EXPORT_SYMBOL_GPL(serpent_xts_enc); -void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(__serpent_decrypt)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt); } EXPORT_SYMBOL_GPL(serpent_xts_dec); @@ -102,10 +102,10 @@ static const struct common_glue_ctx serpent_enc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } + .fn_u = { .ecb = __serpent_encrypt } } } }; @@ -115,10 +115,10 @@ static const struct common_glue_ctx serpent_ctr = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } + .fn_u = { .ctr = serpent_ctr_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } + .fn_u = { .ctr = __serpent_crypt_ctr } } } }; @@ -128,10 +128,10 @@ static const struct common_glue_ctx serpent_enc_xts = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } + .fn_u = { .xts = serpent_xts_enc_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } + .fn_u = { .xts = serpent_xts_enc } } } }; @@ -141,10 +141,10 @@ static const struct common_glue_ctx serpent_dec = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .ecb = __serpent_decrypt } } } }; @@ -154,10 +154,10 @@ static const struct common_glue_ctx serpent_dec_cbc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .cbc = __serpent_decrypt } } } }; @@ -167,10 +167,10 @@ static const struct common_glue_ctx serpent_dec_xts = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } + .fn_u = { .xts = serpent_xts_dec_8way_avx } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } + .fn_u = { .xts = serpent_xts_dec } } } }; @@ -186,8 +186,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), - req); + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -206,8 +205,8 @@ static int xts_encrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_enc_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, false); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, false); } static int xts_decrypt(struct skcipher_request *req) @@ -216,8 +215,8 @@ static int xts_decrypt(struct skcipher_request *req) struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); return glue_xts_req_128bit(&serpent_dec_xts, req, - XTS_TWEAK_CAST(__serpent_encrypt), - &ctx->tweak_ctx, &ctx->crypt_ctx, true); + __serpent_encrypt, &ctx->tweak_ctx, + &ctx->crypt_ctx, true); } static struct skcipher_alg serpent_algs[] = { diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 5fdf1931d069..4fed8d26b91a 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -31,9 +31,11 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } -static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) +static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s) { u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; unsigned int j; for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) @@ -45,9 +47,11 @@ static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); } -static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; le128_to_be128(&ctrblk, iv); le128_inc(iv); @@ -56,10 +60,12 @@ static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) u128_xor(dst, src, (u128 *)&ctrblk); } -static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, +static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; unsigned int i; for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { @@ -79,10 +85,10 @@ static const struct common_glue_ctx serpent_enc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) } + .fn_u = { .ecb = serpent_enc_blk_xway } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } + .fn_u = { .ecb = __serpent_encrypt } } } }; @@ -92,10 +98,10 @@ static const struct common_glue_ctx serpent_ctr = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) } + .fn_u = { .ctr = serpent_crypt_ctr_xway } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) } + .fn_u = { .ctr = serpent_crypt_ctr } } } }; @@ -105,10 +111,10 @@ static const struct common_glue_ctx serpent_dec = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) } + .fn_u = { .ecb = serpent_dec_blk_xway } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .ecb = __serpent_decrypt } } } }; @@ -118,10 +124,10 @@ static const struct common_glue_ctx serpent_dec_cbc = { .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) } + .fn_u = { .cbc = serpent_decrypt_cbc_xway } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } + .fn_u = { .cbc = __serpent_decrypt } } } }; @@ -137,7 +143,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); } diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index d561c821788b..3b36e97ec7ab 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -22,20 +22,17 @@ #define TWOFISH_PARALLEL_BLOCKS 8 /* 8-way parallel cipher functions */ -asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) @@ -43,22 +40,19 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, return twofish_setkey(&tfm->base, key, keylen); } -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src) +static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, false); } -static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(twofish_enc_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk); } -static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) +static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) { - glue_xts_crypt_128bit_one(ctx, dst, src, iv, - GLUE_FUNC_CAST(twofish_dec_blk)); + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk); } struct twofish_xts_ctx { @@ -93,13 +87,13 @@ static const struct common_glue_ctx twofish_enc = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) } + .fn_u = { .ecb = twofish_ecb_enc_8way } }, { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } + .fn_u = { .ecb = twofish_enc_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } + .fn_u = { .ecb = twofish_enc_blk } } } }; @@ -109,13 +103,13 @@ static const struct common_glue_ctx twofish_ctr = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) } + .fn_u = { .ctr = twofish_ctr_8way } }, { .num_blocks = 3, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } }, { .num_blocks = 1, - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } + .fn_u = { .ctr = twofish_enc_blk_ctr } } } }; @@ -125,10 +119,10 @@ static const struct common_glue_ctx twofish_enc_xts = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) } + .fn_u = { .xts = twofish_xts_enc_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) } + .fn_u = { .xts = twofish_xts_enc } } } }; @@ -138,13 +132,13 @@ static const struct common_glue_ctx twofish_dec = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) } + .fn_u = { .ecb = twofish_ecb_dec_8way } }, { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } + .fn_u = { .ecb = twofish_dec_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .ecb = twofish_dec_blk } } } }; @@ -154,13 +148,13 @@ static const struct common_glue_ctx twofish_dec_cbc = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) } + .fn_u = { .cbc = twofish_cbc_dec_8way } }, { .num_blocks = 3, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .cbc = twofish_dec_blk } } } }; @@ -170,10 +164,10 @@ static const struct common_glue_ctx twofish_dec_xts = { .funcs = { { .num_blocks = TWOFISH_PARALLEL_BLOCKS, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) } + .fn_u = { .xts = twofish_xts_dec_8way } }, { .num_blocks = 1, - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) } + .fn_u = { .xts = twofish_xts_dec } } } }; @@ -189,8 +183,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) @@ -208,8 +201,7 @@ static int xts_encrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&twofish_enc_xts, req, - XTS_TWEAK_CAST(twofish_enc_blk), + return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, false); } @@ -218,8 +210,7 @@ static int xts_decrypt(struct skcipher_request *req) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); - return glue_xts_req_128bit(&twofish_dec_xts, req, - XTS_TWEAK_CAST(twofish_enc_blk), + return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk, &ctx->tweak_ctx, &ctx->crypt_ctx, true); } diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 1dc9e29f221e..768af6075479 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -25,21 +25,22 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, return twofish_setkey(&tfm->base, key, keylen); } -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src) +static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, false); } -static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, +static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, true); } -void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) +void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) { u128 ivs[2]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; ivs[0] = src[0]; ivs[1] = src[1]; @@ -51,9 +52,11 @@ void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) } EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); -void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) +void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblk; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) *dst = *src; @@ -66,10 +69,11 @@ void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) } EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr); -void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, - le128 *iv) +void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv) { be128 ctrblks[3]; + u128 *dst = (u128 *)d; + const u128 *src = (const u128 *)s; if (dst != src) { dst[0] = src[0]; @@ -94,10 +98,10 @@ static const struct common_glue_ctx twofish_enc = { .funcs = { { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } + .fn_u = { .ecb = twofish_enc_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } + .fn_u = { .ecb = twofish_enc_blk } } } }; @@ -107,10 +111,10 @@ static const struct common_glue_ctx twofish_ctr = { .funcs = { { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) } + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) } + .fn_u = { .ctr = twofish_enc_blk_ctr } } } }; @@ -120,10 +124,10 @@ static const struct common_glue_ctx twofish_dec = { .funcs = { { .num_blocks = 3, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } + .fn_u = { .ecb = twofish_dec_blk_3way } }, { .num_blocks = 1, - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .ecb = twofish_dec_blk } } } }; @@ -133,10 +137,10 @@ static const struct common_glue_ctx twofish_dec_cbc = { .funcs = { { .num_blocks = 3, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } }, { .num_blocks = 1, - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } + .fn_u = { .cbc = twofish_dec_blk } } } }; @@ -152,8 +156,7 @@ static int ecb_decrypt(struct skcipher_request *req) static int cbc_encrypt(struct skcipher_request *req) { - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), - req); + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); } static int cbc_decrypt(struct skcipher_request *req) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 515c0ceeb4a3..3b73407da33d 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -6,6 +6,7 @@ #include #include #include +#include /* @@ -98,13 +99,6 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 - /* - * Push registers and sanitize registers of values that a - * speculation attack might otherwise want to exploit. The - * lower registers are likely clobbered well before they - * could be put to use in a speculative execution gadget. - * Interleave XOR with PUSH for better uop scheduling: - */ .if \save_ret pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ @@ -114,34 +108,43 @@ For 32-bit we have the following conventions - kernel is built with pushq %rsi /* pt_regs->si */ .endif pushq \rdx /* pt_regs->dx */ - xorl %edx, %edx /* nospec dx */ pushq %rcx /* pt_regs->cx */ - xorl %ecx, %ecx /* nospec cx */ pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ - xorl %r8d, %r8d /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ - xorl %r9d, %r9d /* nospec r9 */ pushq %r10 /* pt_regs->r10 */ - xorl %r10d, %r10d /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ - xorl %r11d, %r11d /* nospec r11*/ pushq %rbx /* pt_regs->rbx */ - xorl %ebx, %ebx /* nospec rbx*/ pushq %rbp /* pt_regs->rbp */ - xorl %ebp, %ebp /* nospec rbp*/ pushq %r12 /* pt_regs->r12 */ - xorl %r12d, %r12d /* nospec r12*/ pushq %r13 /* pt_regs->r13 */ - xorl %r13d, %r13d /* nospec r13*/ pushq %r14 /* pt_regs->r14 */ - xorl %r14d, %r14d /* nospec r14*/ pushq %r15 /* pt_regs->r15 */ - xorl %r15d, %r15d /* nospec r15*/ UNWIND_HINT_REGS + .if \save_ret pushq %rsi /* return address on top of stack */ .endif + + /* + * Sanitize registers of values that a speculation attack might + * otherwise want to exploit. The lower registers are likely clobbered + * well before they could be put to use in a speculative execution + * gadget. + */ + xorl %edx, %edx /* nospec dx */ + xorl %ecx, %ecx /* nospec cx */ + xorl %r8d, %r8d /* nospec r8 */ + xorl %r9d, %r9d /* nospec r9 */ + xorl %r10d, %r10d /* nospec r10 */ + xorl %r11d, %r11d /* nospec r11 */ + xorl %ebx, %ebx /* nospec rbx */ + xorl %ebp, %ebp /* nospec rbp */ + xorl %r12d, %r12d /* nospec r12 */ + xorl %r13d, %r13d /* nospec r13 */ + xorl %r14d, %r14d /* nospec r14 */ + xorl %r15d, %r15d /* nospec r15 */ + .endm .macro POP_REGS pop_rdi=1 skip_r11rcx=0 @@ -339,6 +342,11 @@ For 32-bit we have the following conventions - kernel is built with #endif .endm +.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req + rdgsbase \save_reg + GET_PERCPU_BASE \scratch_reg + wrgsbase \scratch_reg +.endm #endif /* CONFIG_X86_64 */ .macro STACKLEAK_ERASE @@ -366,3 +374,37 @@ For 32-bit we have the following conventions - kernel is built with #else #define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg #endif +#ifdef CONFIG_SMP + +/* + * CPU/node NR is loaded from the limit (size) field of a special segment + * descriptor entry in GDT. + */ +.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req + movq $__CPUNODE_SEG, \reg + lsl \reg, \reg +.endm + +/* + * Fetch the per-CPU GSBASE value for this processor and put it in @reg. + * We normally use %gs for accessing per-CPU data, but we are setting up + * %gs here and obviously can not use %gs itself to access per-CPU data. + * + * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and + * may not restore the host's value until the CPU returns to userspace. + * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives + * while running KVM's run loop. + */ +.macro GET_PERCPU_BASE reg:req + LOAD_CPU_AND_NODE_SEG_LIMIT \reg + andq $VDSO_CPUNODE_MASK, \reg + movq __per_cpu_offset(, \reg, 8), \reg +.endm + +#else + +.macro GET_PERCPU_BASE reg:req + movq pcpu_unit_offsets(%rip), \reg +.endm + +#endif /* CONFIG_SMP */ diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 3f8e22615812..47179aae6828 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index f07baf0388bc..bde3e0f85425 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -869,9 +869,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region) * Xen doesn't set %esp to be precisely what the normal SYSENTER * entry point expects, so fix it up before using the normal path. */ -ENTRY(xen_sysenter_target) +SYM_CODE_START(xen_sysenter_target) addl $5*4, %esp /* remove xen-provided frame */ jmp .Lsysenter_past_esp +SYM_CODE_END(xen_sysenter_target) #endif /* @@ -1647,6 +1648,7 @@ ENTRY(int3) END(int3) ENTRY(general_protection) + ASM_CLAC pushl $do_general_protection jmp common_exception END(general_protection) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index b7c3ea4cb19d..5c1813e49ee7 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -38,6 +38,7 @@ #include #include #include +#include #include #include "calling.h" @@ -249,7 +250,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ - UNWIND_HINT_EMPTY POP_REGS pop_rdi=0 skip_r11rcx=1 /* @@ -258,6 +258,7 @@ syscall_return_via_sysret: */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ @@ -512,7 +513,7 @@ END(spurious_entries_start) * +----------------------------------------------------+ */ ENTRY(interrupt_entry) - UNWIND_HINT_FUNC + UNWIND_HINT_IRET_REGS offset=16 ASM_CLAC cld @@ -544,9 +545,9 @@ ENTRY(interrupt_entry) pushq 5*8(%rdi) /* regs->eflags */ pushq 4*8(%rdi) /* regs->cs */ pushq 3*8(%rdi) /* regs->ip */ + UNWIND_HINT_IRET_REGS pushq 2*8(%rdi) /* regs->orig_ax */ pushq 8(%rdi) /* return address */ - UNWIND_HINT_FUNC movq (%rdi), %rdi jmp 2f @@ -637,6 +638,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ @@ -1210,24 +1212,21 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* - * Save all registers in pt_regs, and switch gs if needed. - * Use slow, but surefire "are we in kernel?" check. - * Return: ebx=0: need swapgs on exit, ebx=1: otherwise + * Save all registers in pt_regs. Return GSBASE related information + * in EBX depending on the availability of the FSGSBASE instructions: + * + * FSGSBASE R/EBX + * N 0 -> SWAPGS on exit + * 1 -> no SWAPGS on exit + * + * Y GSBASE value at entry, must be restored in paranoid_exit */ ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 - movl $1, %ebx - movl $MSR_GS_BASE, %ecx - rdmsr - testl %edx, %edx - js 1f /* negative -> in kernel */ - SWAPGS - xorl %ebx, %ebx -1: /* * Always stash CR3 in %r14. This value will be restored, * verbatim, at exit. Needed if paranoid_entry interrupted @@ -1237,9 +1236,52 @@ ENTRY(paranoid_entry) * This is also why CS (stashed in the "iret frame" by the * hardware at entry) can not be used: this may be a return * to kernel code, but with a user CR3 value. + * + * Switching CR3 does not depend on kernel GSBASE so it can + * be done before switching to the kernel GSBASE. This is + * required for FSGSBASE because the kernel GSBASE has to + * be retrieved from a kernel internal table. */ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + /* + * Handling GSBASE depends on the availability of FSGSBASE. + * + * Without FSGSBASE the kernel enforces that negative GSBASE + * values indicate kernel GSBASE. With FSGSBASE no assumptions + * can be made about the GSBASE value when entering from user + * space. + */ + ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE + + /* + * Read the current GSBASE and store it in %rbx unconditionally, + * retrieve and set the current CPUs kernel GSBASE. The stored value + * has to be restored in paranoid_exit unconditionally. + * + * The unconditional write to GS base below ensures that no subsequent + * loads based on a mispredicted GS base can happen, therefore no LFENCE + * is needed here. + */ + SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx + ret + +.Lparanoid_entry_checkgs: + /* EBX = 1 -> kernel GSBASE active, no restore required */ + movl $1, %ebx + /* + * The kernel-enforced convention is a negative GSBASE indicates + * a kernel value. No SWAPGS needed on entry and exit. + */ + movl $MSR_GS_BASE, %ecx + rdmsr + testl %edx, %edx + jns .Lparanoid_entry_swapgs + ret + +.Lparanoid_entry_swapgs: + SWAPGS + /* * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an * unconditional CR3 write, even in the PTI case. So do an lfence @@ -1247,6 +1289,8 @@ ENTRY(paranoid_entry) */ FENCE_SWAPGS_KERNEL_ENTRY + /* EBX = 0 -> SWAPGS required on exit */ + xorl %ebx, %ebx ret END(paranoid_entry) @@ -1257,28 +1301,48 @@ END(paranoid_entry) * * We may be returning to very strange contexts (e.g. very early * in syscall entry), so checking for preemption here would - * be complicated. Fortunately, we there's no good reason - * to try to handle preemption here. + * be complicated. Fortunately, there's no good reason to try + * to handle preemption here. * - * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) + * R/EBX contains the GSBASE related information depending on the + * availability of the FSGSBASE instructions: + * + * FSGSBASE R/EBX + * N 0 -> SWAPGS on exit + * 1 -> no SWAPGS on exit + * + * Y User space GSBASE, must be restored unconditionally */ ENTRY(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG - testl %ebx, %ebx /* swapgs needed? */ - jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ - /* Always restore stashed CR3 value (see paranoid_entry) */ - RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 + /* + * The order of operations is important. RESTORE_CR3 requires + * kernel GSBASE. + * + * NB to anyone to try to optimize this code: this code does + * not execute at all for exceptions from user mode. Those + * exceptions go through error_exit instead. + */ + RESTORE_CR3 scratch_reg=%rax save_reg=%r14 + + /* Handle the three GSBASE cases */ + ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE + + /* With FSGSBASE enabled, unconditionally restore GSBASE */ + wrgsbase %rbx + jmp restore_regs_and_return_to_kernel + +.Lparanoid_exit_checkgs: + /* On non-FSGSBASE systems, conditionally do SWAPGS */ + testl %ebx, %ebx + jnz restore_regs_and_return_to_kernel + + /* We are returning to a context with user GSBASE */ SWAPGS_UNSAFE_STACK - jmp .Lparanoid_exit_restore -.Lparanoid_exit_no_swapgs: - TRACE_IRQS_IRETQ_DEBUG - /* Always restore stashed CR3 value (see paranoid_entry) */ - RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 -.Lparanoid_exit_restore: - jmp restore_regs_and_return_to_kernel + jmp restore_regs_and_return_to_kernel END(paranoid_exit) /* @@ -1339,7 +1403,6 @@ ENTRY(error_entry) */ SWAPGS FENCE_SWAPGS_USER_ENTRY - SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done .Lbstep_iret: @@ -1686,10 +1749,27 @@ end_repeat_nmi: /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 - testl %ebx, %ebx /* swapgs needed? */ + /* + * The above invocation of paranoid_entry stored the GSBASE + * related information in R/EBX depending on the availability + * of FSGSBASE. + * + * If FSGSBASE is enabled, restore the saved GSBASE value + * unconditionally, otherwise take the conditional SWAPGS path. + */ + ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE + + wrgsbase %rbx + jmp nmi_restore + +nmi_no_fsgsbase: + /* EBX == 0 -> invoke SWAPGS */ + testl %ebx, %ebx jnz nmi_restore + nmi_swapgs: SWAPGS_UNSAFE_STACK + nmi_restore: POP_REGS @@ -1739,7 +1819,7 @@ ENTRY(rewind_stack_do_exit) movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -PTREGS_SIZE(%rax), %rsp - UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE + UNWIND_HINT_REGS call do_exit END(rewind_stack_do_exit) diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 0f2154106d01..6902c50a0c76 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -24,9 +24,10 @@ VDSO32-$(CONFIG_IA32_EMULATION) := y # files to link into the vdso vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o +vobjs-$(CONFIG_X86_SGX) += vsgx.o # files to link into kernel -obj-y += vma.o +obj-y += vma.o extable.o OBJECT_FILES_NON_STANDARD_vma.o := n # vDSO images to build @@ -92,6 +93,7 @@ CFLAGS_REMOVE_vclock_gettime.o = -pg CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg CFLAGS_REMOVE_vgetcpu.o = -pg CFLAGS_REMOVE_vvar.o = -pg +CFLAGS_REMOVE_vsgx.o = -pg # # X32 processes use x32 vDSO to access 64bit kernel data. @@ -122,8 +124,8 @@ $(obj)/%-x32.o: $(obj)/%.o FORCE targets += vdsox32.lds $(vobjx32s-y) -$(obj)/%.so: OBJCOPYFLAGS := -S -$(obj)/%.so: $(obj)/%.so.dbg FORCE +$(obj)/%.so: OBJCOPYFLAGS := -S --remove-section __ex_table +$(obj)/%.so: $(obj)/%.so.dbg $(call if_changed,objcopy) $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE diff --git a/arch/x86/entry/vdso/extable.c b/arch/x86/entry/vdso/extable.c new file mode 100644 index 000000000000..afcf5b65beef --- /dev/null +++ b/arch/x86/entry/vdso/extable.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +struct vdso_exception_table_entry { + int insn, fixup; +}; + +bool fixup_vdso_exception(struct pt_regs *regs, int trapnr, + unsigned long error_code, unsigned long fault_addr) +{ + const struct vdso_image *image = current->mm->context.vdso_image; + const struct vdso_exception_table_entry *extable; + unsigned int nr_entries, i; + unsigned long base; + + /* + * Do not attempt to fixup #DB or #BP. It's impossible to identify + * whether or not a #DB/#BP originated from within an SGX enclave and + * SGX enclaves are currently the only use case for vDSO fixup. + */ + if (trapnr == X86_TRAP_DB || trapnr == X86_TRAP_BP) + return false; + + if (!current->mm->context.vdso) + return false; + + base = (unsigned long)current->mm->context.vdso + image->extable_base; + nr_entries = image->extable_len / (sizeof(*extable)); + extable = image->extable; + + for (i = 0; i < nr_entries; i++) { + if (regs->ip == base + extable[i].insn) { + regs->ip = base + extable[i].fixup; + regs->di = trapnr; + regs->si = error_code; + regs->dx = fault_addr; + return true; + } + } + + return false; +} diff --git a/arch/x86/entry/vdso/extable.h b/arch/x86/entry/vdso/extable.h new file mode 100644 index 000000000000..b56f6b012941 --- /dev/null +++ b/arch/x86/entry/vdso/extable.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_EXTABLE_H +#define __VDSO_EXTABLE_H + +/* + * Inject exception fixup for vDSO code. Unlike normal exception fixup, + * vDSO uses a dedicated handler the addresses are relative to the overall + * exception table, not each individual entry. + */ +#ifdef __ASSEMBLY__ +#define _ASM_VDSO_EXTABLE_HANDLE(from, to) \ + ASM_VDSO_EXTABLE_HANDLE from to + +.macro ASM_VDSO_EXTABLE_HANDLE from:req to:req + .pushsection __ex_table, "a" + .long (\from) - __ex_table + .long (\to) - __ex_table + .popsection +.endm +#else +#define _ASM_VDSO_EXTABLE_HANDLE(from, to) \ + ".pushsection __ex_table, \"a\"\n" \ + ".long (" #from ") - __ex_table\n" \ + ".long (" #to ") - __ex_table\n" \ + ".popsection\n" +#endif + +#endif /* __VDSO_EXTABLE_H */ diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S index 93c6dc7812d0..8ef849064501 100644 --- a/arch/x86/entry/vdso/vdso-layout.lds.S +++ b/arch/x86/entry/vdso/vdso-layout.lds.S @@ -63,11 +63,18 @@ SECTIONS * stuff that isn't used at runtime in between. */ - .text : { *(.text*) } :text =0x90909090, + .text : { + *(.text*) + *(.fixup) + } :text =0x90909090, + + .altinstructions : { *(.altinstructions) } :text .altinstr_replacement : { *(.altinstr_replacement) } :text + __ex_table : { *(__ex_table) } :text + /DISCARD/ : { *(.discard) *(.discard.*) diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S index 36b644e16272..4bf48462fca7 100644 --- a/arch/x86/entry/vdso/vdso.lds.S +++ b/arch/x86/entry/vdso/vdso.lds.S @@ -27,6 +27,7 @@ VERSION { __vdso_time; clock_getres; __vdso_clock_getres; + __vdso_sgx_enter_enclave; local: *; }; } diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h index a20b134de2a8..7c93bbab97ec 100644 --- a/arch/x86/entry/vdso/vdso2c.h +++ b/arch/x86/entry/vdso/vdso2c.h @@ -5,6 +5,41 @@ * are built for 32-bit userspace. */ +static void BITSFUNC(copy)(FILE *outfile, const unsigned char *data, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i % 10 == 0) + fprintf(outfile, "\n\t"); + fprintf(outfile, "0x%02X, ", (int)(data)[i]); + } +} + + +/* + * Extract a section from the input data into a standalone blob. Used to + * capture kernel-only data that needs to persist indefinitely, e.g. the + * exception fixup tables, but only in the kernel, i.e. the section can + * be stripped from the final vDSO image. + */ +static void BITSFUNC(extract)(const unsigned char *data, size_t data_len, + FILE *outfile, ELF(Shdr) *sec, const char *name) +{ + unsigned long offset; + size_t len; + + offset = (unsigned long)GET_LE(&sec->sh_offset); + len = (size_t)GET_LE(&sec->sh_size); + + if (offset + len > data_len) + fail("section to extract overruns input data"); + + fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len); + BITSFUNC(copy)(outfile, data + offset, len); + fprintf(outfile, "\n};\n\n"); +} + static void BITSFUNC(go)(void *raw_addr, size_t raw_len, void *stripped_addr, size_t stripped_len, FILE *outfile, const char *image_name) @@ -16,7 +51,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, int i; unsigned long j; ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, - *alt_sec = NULL; + *alt_sec = NULL, *extable_sec = NULL; ELF(Dyn) *dyn = 0, *dyn_end = 0; const char *secstrings; INT_BITS syms[NSYMS] = {}; @@ -78,6 +113,8 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, if (!strcmp(secstrings + GET_LE(&sh->sh_name), ".altinstructions")) alt_sec = sh; + if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__ex_table")) + extable_sec = sh; } if (!symtab_hdr) @@ -157,6 +194,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, (int)((unsigned char *)stripped_addr)[j]); } fprintf(outfile, "\n};\n\n"); + if (extable_sec) + BITSFUNC(extract)(raw_addr, raw_len, outfile, + extable_sec, "extable"); fprintf(outfile, "const struct vdso_image %s = {\n", image_name); fprintf(outfile, "\t.data = raw_data,\n"); @@ -167,6 +207,14 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, fprintf(outfile, "\t.alt_len = %lu,\n", (unsigned long)GET_LE(&alt_sec->sh_size)); } + if (extable_sec) { + fprintf(outfile, "\t.extable_base = %lu,\n", + (unsigned long)GET_LE(&extable_sec->sh_offset)); + fprintf(outfile, "\t.extable_len = %lu,\n", + (unsigned long)GET_LE(&extable_sec->sh_size)); + fprintf(outfile, "\t.extable = extable,\n"); + } + for (i = 0; i < NSYMS; i++) { if (required_syms[i].export && syms[i]) fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n", diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S new file mode 100644 index 000000000000..9d54ae436ef4 --- /dev/null +++ b/arch/x86/entry/vdso/vsgx.S @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include + +#include "extable.h" + +/* Relative to %rbp. */ +#define SGX_ENCLAVE_OFFSET_OF_RUN 16 + +/* The offsets relative to struct sgx_enclave_run. */ +#define SGX_ENCLAVE_RUN_TCS 0 +#define SGX_ENCLAVE_RUN_LEAF 8 +#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12 +#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14 +#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16 +#define SGX_ENCLAVE_RUN_USER_HANDLER 24 +#define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */ +#define SGX_ENCLAVE_RUN_RESERVED_START 40 +#define SGX_ENCLAVE_RUN_RESERVED_END 256 + +.code64 +.section .text, "ax" + +ENTRY(__vdso_sgx_enter_enclave) + /* Prolog */ + .cfi_startproc + push %rbp + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rbp, 0 + mov %rsp, %rbp + .cfi_def_cfa_register %rbp + push %rbx + .cfi_rel_offset %rbx, -8 + + mov %ecx, %eax +.Lenter_enclave: + /* EENTER <= function <= ERESUME */ + cmp $EENTER, %eax + jb .Linvalid_input + cmp $ERESUME, %eax + ja .Linvalid_input + + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx + + /* Validate that the reserved area contains only zeros. */ + mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx +1: + cmpq $0, (%rcx, %rbx) + jne .Linvalid_input + add $8, %rbx + cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx + jne 1b + + /* Load TCS and AEP */ + mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx + lea .Lasync_exit_pointer(%rip), %rcx + + /* Single ENCLU serving as both EENTER and AEP (ERESUME) */ +.Lasync_exit_pointer: +.Lenclu_eenter_eresume: + enclu + + /* EEXIT jumps here unless the enclave is doing something fancy. */ + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx + + /* Set exit_reason. */ + movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx) + + /* Invoke userspace's exit handler if one was provided. */ +.Lhandle_exit: + cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx) + jne .Linvoke_userspace_handler + + /* Success, in the sense that ENCLU was attempted. */ + xor %eax, %eax + +.Lout: + pop %rbx + leave + .cfi_def_cfa %rsp, 8 + ret + + /* The out-of-line code runs with the pre-leave stack frame. */ + .cfi_def_cfa %rbp, 16 + +.Linvalid_input: + mov $(-EINVAL), %eax + jmp .Lout + +.Lhandle_exception: + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx + + /* Set the exception info. */ + mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx) + mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx) + mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx) + mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx) + jmp .Lhandle_exit + +.Linvoke_userspace_handler: + /* Pass the untrusted RSP (at exit) to the callback via %rcx. */ + mov %rsp, %rcx + + /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */ + mov %rbx, %rax + + /* Save the untrusted RSP offset in %rbx (non-volatile register). */ + mov %rsp, %rbx + and $0xf, %rbx + + /* + * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned + * _after_ pushing the parameters on the stack, hence the bonus push. + */ + and $-0x10, %rsp + push %rax + + /* Push struct sgx_enclave_exception as a param to the callback. */ + push %rax + + /* Clear RFLAGS.DF per x86_64 ABI */ + cld + + /* + * Load the callback pointer to %rax and lfence for LVI (load value + * injection) protection before making the call. + */ + mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax + lfence + call *%rax + + /* Undo the post-exit %rsp adjustment. */ + lea 0x10(%rsp, %rbx), %rsp + + /* + * If the return from callback is zero or negative, return immediately, + * else re-execute ENCLU with the postive return value interpreted as + * the requested ENCLU function. + */ + cmp $0, %eax + jle .Lout + jmp .Lenter_enclave + + .cfi_endproc + +_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception) + +ENDPROC(__vdso_sgx_enter_enclave) diff --git a/arch/x86/events/Kconfig b/arch/x86/events/Kconfig index 9a7a1446cb3a..4a809c6cbd2f 100644 --- a/arch/x86/events/Kconfig +++ b/arch/x86/events/Kconfig @@ -10,11 +10,11 @@ config PERF_EVENTS_INTEL_UNCORE available on NehalemEX and more modern processors. config PERF_EVENTS_INTEL_RAPL - tristate "Intel rapl performance events" - depends on PERF_EVENTS && CPU_SUP_INTEL && PCI + tristate "Intel/AMD rapl performance events" + depends on PERF_EVENTS && (CPU_SUP_INTEL || CPU_SUP_AMD) && PCI default y ---help--- - Include support for Intel rapl performance events for power + Include support for Intel and AMD rapl performance events for power monitoring on modern processors. config PERF_EVENTS_INTEL_CSTATE diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile index 9e07f554333f..726e83c0a31a 100644 --- a/arch/x86/events/Makefile +++ b/arch/x86/events/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += core.o probe.o +obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += rapl.o obj-y += amd/ obj-$(CONFIG_X86_LOCAL_APIC) += msr.o obj-$(CONFIG_CPU_SUP_INTEL) += intel/ diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 3ea8056148d8..7198b5f11706 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -14,6 +14,10 @@ static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp); static unsigned long perf_nmi_window; +/* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */ +#define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL) +#define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE) + static __initconst const u64 amd_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -336,6 +340,9 @@ static int amd_core_hw_config(struct perf_event *event) else if (event->attr.exclude_guest) event->hw.config |= AMD64_EVENTSEL_HOSTONLY; + if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw)) + event->hw.flags |= PERF_X86_EVENT_PAIR; + return 0; } @@ -890,6 +897,15 @@ amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx, return &unconstrained; } +static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (is_counter_pair(hwc)) + --cpuc->n_pair; +} + static ssize_t amd_event_sysfs_show(char *page, u64 config) { u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | @@ -977,6 +993,8 @@ static int __init amd_core_pmu_init(void) PERF_X86_EVENT_PAIR); x86_pmu.get_event_constraints = amd_get_event_constraints_f17h; + x86_pmu.put_event_constraints = amd_put_event_constraints_f17h; + x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE; x86_pmu.flags |= PMU_FL_PAIR; } diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 26c36357c4c9..39169885adfa 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -89,6 +89,7 @@ struct perf_ibs { u64 max_period; unsigned long offset_mask[1]; int offset_max; + unsigned int fetch_count_reset_broken : 1; struct cpu_perf_ibs __percpu *pcpu; struct attribute **format_attrs; @@ -334,11 +335,15 @@ static u64 get_ibs_op_count(u64 config) { u64 count = 0; + /* + * If the internal 27-bit counter rolled over, the count is MaxCnt + * and the lower 7 bits of CurCnt are randomized. + * Otherwise CurCnt has the full 27-bit current counter value. + */ if (config & IBS_OP_VAL) - count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */ - - if (ibs_caps & IBS_CAPS_RDWROPCNT) - count += (config & IBS_OP_CUR_CNT) >> 32; + count = (config & IBS_OP_MAX_CNT) << 4; + else if (ibs_caps & IBS_CAPS_RDWROPCNT) + count = (config & IBS_OP_CUR_CNT) >> 32; return count; } @@ -363,7 +368,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, struct hw_perf_event *hwc, u64 config) { - wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask); + u64 tmp = hwc->config | config; + + if (perf_ibs->fetch_count_reset_broken) + wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask); + + wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask); } /* @@ -626,18 +636,24 @@ fail: perf_ibs->offset_max, offset + 1); } while (offset < offset_max); + /* + * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately + * depending on their availability. + * Can't add to offset_max as they are staggered + */ if (event->attr.sample_type & PERF_SAMPLE_RAW) { - /* - * Read IbsBrTarget and IbsOpData4 separately - * depending on their availability. - * Can't add to offset_max as they are staggered - */ - if (ibs_caps & IBS_CAPS_BRNTRGT) { - rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); - size++; + if (perf_ibs == &perf_ibs_op) { + if (ibs_caps & IBS_CAPS_BRNTRGT) { + rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); + size++; + } + if (ibs_caps & IBS_CAPS_OPDATA4) { + rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + size++; + } } - if (ibs_caps & IBS_CAPS_OPDATA4) { - rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { + rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); size++; } } @@ -733,6 +749,13 @@ static __init void perf_event_ibs_init(void) { struct attribute **attr = ibs_op_format_attrs; + /* + * Some chips fail to reset the fetch count when it is written; instead + * they need a 0-1 transition of IbsFetchEn. + */ + if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18) + perf_ibs_fetch.fetch_count_reset_broken = 1; + perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); if (ibs_caps & IBS_CAPS_OPCNT) { diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index fb616203ce42..6a98a7651621 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = { }; struct amd_iommu_event_desc { - struct kobj_attribute attr; + struct device_attribute attr; const char *event; }; -static ssize_t _iommu_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t _iommu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct amd_iommu_event_desc *event = container_of(attr, struct amd_iommu_event_desc, attr); @@ -379,7 +379,7 @@ static __init int _init_events_attrs(void) while (amd_iommu_v2_event_descs[i].attr.attr.name) i++; - attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL); + attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) return -ENOMEM; diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 4d867a752f0e..76400c052b0e 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -180,6 +180,31 @@ static void amd_uncore_del(struct perf_event *event, int flags) hwc->idx = -1; } +/* + * Convert logical CPU number to L3 PMC Config ThreadMask format + */ +static u64 l3_thread_slice_mask(int cpu) +{ + u64 thread_mask, core = topology_core_id(cpu); + unsigned int shift, thread = 0; + + if (topology_smt_supported() && !topology_is_primary_thread(cpu)) + thread = 1; + + if (boot_cpu_data.x86 <= 0x18) { + shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread; + thread_mask = BIT_ULL(shift); + + return AMD64_L3_SLICE_MASK | thread_mask; + } + + core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK; + shift = AMD64_L3_THREAD_SHIFT + thread; + thread_mask = BIT_ULL(shift); + + return AMD64_L3_EN_ALL_SLICES | core | thread_mask; +} + static int amd_uncore_event_init(struct perf_event *event) { struct amd_uncore *uncore; @@ -203,18 +228,11 @@ static int amd_uncore_event_init(struct perf_event *event) return -EINVAL; /* - * SliceMask and ThreadMask need to be set for certain L3 events in - * Family 17h. For other events, the two fields do not affect the count. + * SliceMask and ThreadMask need to be set for certain L3 events. + * For other events, the two fields do not affect the count. */ - if (l3_mask && is_llc_event(event)) { - int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4); - - if (smp_num_siblings > 1) - thread += cpu_data(event->cpu).apicid & 1; - - hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) & - AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK; - } + if (l3_mask && is_llc_event(event)) + hwc->config |= l3_thread_slice_mask(event->cpu); uncore = event_to_amd_uncore(event); if (!uncore) @@ -520,9 +538,9 @@ static int __init amd_uncore_init(void) if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) return -ENODEV; - if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86 >= 0x17) { /* - * For F17h or F18h, the Northbridge counters are + * For F17h and above, the Northbridge counters are * repurposed as Data Fabric counters. Also, L3 * counters are supported too. The PMUs are exported * based on family as either L2 or L3 and NB or DF. diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e622158f5659..94794c41b285 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -70,12 +70,14 @@ u64 x86_perf_event_update(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int shift = 64 - x86_pmu.cntval_bits; u64 prev_raw_count, new_raw_count; - int idx = hwc->idx; u64 delta; - if (idx == INTEL_PMC_IDX_FIXED_BTS) + if (unlikely(!hwc->event_base)) return 0; + if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event) + return x86_pmu.update_topdown_event(event); + /* * Careful: an NMI might modify the previous event value. * @@ -617,6 +619,7 @@ void x86_pmu_disable_all(void) int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { + struct hw_perf_event *hwc = &cpuc->events[idx]->hw; u64 val; if (!test_bit(idx, cpuc->active_mask)) @@ -626,6 +629,8 @@ void x86_pmu_disable_all(void) continue; val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(x86_pmu_config_addr(idx), val); + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_config_addr(idx + 1), 0); } } @@ -698,7 +703,7 @@ struct sched_state { int counter; /* counter index */ int unassigned; /* number of events to be assigned left */ int nr_gp; /* number of GP counters used */ - unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + u64 used; }; /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */ @@ -755,8 +760,12 @@ static bool perf_sched_restore_state(struct perf_sched *sched) sched->saved_states--; sched->state = sched->saved[sched->saved_states]; - /* continue with next counter: */ - clear_bit(sched->state.counter++, sched->state.used); + /* this assignment didn't work out */ + /* XXX broken vs EVENT_PAIR */ + sched->state.used &= ~BIT_ULL(sched->state.counter); + + /* try the next one */ + sched->state.counter++; return true; } @@ -781,20 +790,32 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { idx = INTEL_PMC_IDX_FIXED; for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { - if (!__test_and_set_bit(idx, sched->state.used)) - goto done; + u64 mask = BIT_ULL(idx); + + if (sched->state.used & mask) + continue; + + sched->state.used |= mask; + goto done; } } /* Grab the first unused counter starting with idx */ idx = sched->state.counter; for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { - if (!__test_and_set_bit(idx, sched->state.used)) { - if (sched->state.nr_gp++ >= sched->max_gp) - return false; + u64 mask = BIT_ULL(idx); - goto done; - } + if (c->flags & PERF_X86_EVENT_PAIR) + mask |= mask << 1; + + if (sched->state.used & mask) + continue; + + if (sched->state.nr_gp++ >= sched->max_gp) + return false; + + sched->state.used |= mask; + goto done; } return false; @@ -871,12 +892,10 @@ EXPORT_SYMBOL_GPL(perf_assign_events); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { struct event_constraint *c; - unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; struct perf_event *e; int n0, i, wmin, wmax, unsched = 0; struct hw_perf_event *hwc; - - bitmap_zero(used_mask, X86_PMC_IDX_MAX); + u64 used_mask = 0; /* * Compute the number of events already present; see x86_pmu_add(), @@ -919,6 +938,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) * fastpath, try to reuse previous register */ for (i = 0; i < n; i++) { + u64 mask; + hwc = &cpuc->event_list[i]->hw; c = cpuc->event_constraint[i]; @@ -930,11 +951,16 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (!test_bit(hwc->idx, c->idxmsk)) break; + mask = BIT_ULL(hwc->idx); + if (is_counter_pair(hwc)) + mask |= mask << 1; + /* not already used */ - if (test_bit(hwc->idx, used_mask)) + if (used_mask & mask) break; - __set_bit(hwc->idx, used_mask); + used_mask |= mask; + if (assign) assign[i] = hwc->idx; } @@ -957,6 +983,15 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) READ_ONCE(cpuc->excl_cntrs->exclusive_present)) gpmax /= 2; + /* + * Reduce the amount of available counters to allow fitting + * the extra Merge events needed by large increment events. + */ + if (x86_pmu.flags & PMU_FL_PAIR) { + gpmax = x86_pmu.num_counters - cpuc->n_pair; + WARN_ON(gpmax <= 0); + } + unsched = perf_assign_events(cpuc->event_constraint, n, wmin, wmax, gpmax, assign); } @@ -997,6 +1032,43 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) return unsched ? -EINVAL : 0; } +static int add_nr_metric_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_metric_event(event)) { + if (cpuc->n_metric == INTEL_TD_METRIC_NUM) + return -EINVAL; + cpuc->n_metric++; + cpuc->n_txn_metric++; + } + + return 0; +} + +static void del_nr_metric_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_metric_event(event)) + cpuc->n_metric--; +} + +static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, + int max_count, int n) +{ + + if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) + return -EINVAL; + + if (n >= max_count + cpuc->n_metric) + return -EINVAL; + + cpuc->event_list[n] = event; + if (is_counter_pair(&event->hw)) + cpuc->n_pair++; + + return 0; +} + /* * dogrp: true if must collect siblings events (group) * returns total number of events and error code @@ -1033,24 +1105,25 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, } if (is_x86_event(leader)) { - if (n >= max_count) + if (collect_event(cpuc, leader, max_count, n)) return -EINVAL; - cpuc->event_list[n] = leader; n++; + if (is_counter_pair(&leader->hw)) + cpuc->n_pair++; } if (!dogrp) return n; for_each_sibling_event(event, leader) { - if (!is_x86_event(event) || - event->state <= PERF_EVENT_STATE_OFF) + if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; - if (n >= max_count) + if (collect_event(cpuc, event, max_count, n)) return -EINVAL; - cpuc->event_list[n] = event; n++; + if (is_counter_pair(&event->hw)) + cpuc->n_pair++; } return n; } @@ -1059,22 +1132,36 @@ static inline void x86_assign_hw_event(struct perf_event *event, struct cpu_hw_events *cpuc, int i) { struct hw_perf_event *hwc = &event->hw; + int idx; - hwc->idx = cpuc->assign[i]; + idx = hwc->idx = cpuc->assign[i]; hwc->last_cpu = smp_processor_id(); hwc->last_tag = ++cpuc->tags[i]; - if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) { + switch (hwc->idx) { + case INTEL_PMC_IDX_FIXED_BTS: + case INTEL_PMC_IDX_FIXED_VLBR: hwc->config_base = 0; hwc->event_base = 0; - } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) { + break; + + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: + /* All the metric events are mapped onto the fixed counter 3. */ + idx = INTEL_PMC_IDX_FIXED_SLOTS; + /* fall through */ + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1: hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; - hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED); - hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30; - } else { + hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + + (idx - INTEL_PMC_IDX_FIXED); + hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | + INTEL_PMC_FIXED_RDPMC_BASE; + break; + + default: hwc->config_base = x86_pmu_config_addr(hwc->idx); hwc->event_base = x86_pmu_event_addr(hwc->idx); hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); + break; } } @@ -1195,9 +1282,13 @@ int x86_perf_event_set_period(struct perf_event *event) s64 period = hwc->sample_period; int ret = 0, idx = hwc->idx; - if (idx == INTEL_PMC_IDX_FIXED_BTS) + if (unlikely(!hwc->event_base)) return 0; + if (unlikely(is_topdown_count(event)) && + x86_pmu.set_topdown_event_period) + return x86_pmu.set_topdown_event_period(event); + /* * If we are way outside a reasonable range then just skip forward: */ @@ -1236,6 +1327,13 @@ int x86_perf_event_set_period(struct perf_event *event) wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); + /* + * Clear the Merge event counter's upper 16 bits since + * we currently declare a 48-bit counter width + */ + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_event_addr(idx + 1), 0); + /* * Due to erratum on certan cpu we need * a second write to be sure the register @@ -1475,6 +1573,8 @@ static void x86_pmu_del(struct perf_event *event, int flags) } cpuc->event_constraint[i-1] = NULL; --cpuc->n_events; + if (x86_pmu.intel_cap.perf_metrics) + del_nr_metric_event(cpuc, event); perf_event_update_userpage(event); @@ -1904,6 +2004,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) perf_pmu_disable(pmu); __this_cpu_write(cpu_hw_events.n_txn, 0); + __this_cpu_write(cpu_hw_events.n_txn_metric, 0); } /* @@ -1929,6 +2030,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) */ __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); + __this_cpu_sub(cpu_hw_events.n_metric, __this_cpu_read(cpu_hw_events.n_txn_metric)); perf_pmu_enable(pmu); } @@ -2155,17 +2257,15 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m static int x86_pmu_event_idx(struct perf_event *event) { - int idx = event->hw.idx; + struct hw_perf_event *hwc = &event->hw; - if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) + if (!(hwc->flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return 0; - if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { - idx -= INTEL_PMC_IDX_FIXED; - idx |= 1 << 30; - } - - return idx + 1; + if (is_metric_idx(hwc->idx)) + return INTEL_PMC_FIXED_RDPMC_METRICS + 1; + else + return hwc->event_base_rdpmc + 1; } static ssize_t get_attr_rdpmc(struct device *cdev, diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile index 3468b0c1dc7c..e67a5886336c 100644 --- a/arch/x86/events/intel/Makefile +++ b/arch/x86/events/intel/Makefile @@ -2,8 +2,6 @@ obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o -obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o -intel-rapl-perf-objs := rapl.o obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index c531e3f3269e..f0ac975f3d22 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -243,17 +243,22 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { static struct event_constraint intel_icl_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ - INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */ + FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ - INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ + INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ + INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), @@ -309,6 +314,12 @@ EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, "4", "2"); +EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4"); +EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80"); +EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81"); +EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82"); +EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83"); + static struct attribute *snb_events_attrs[] = { EVENT_PTR(td_slots_issued), EVENT_PTR(td_slots_retired), @@ -1892,8 +1903,8 @@ static __initconst const u64 tnt_hw_cache_extra_regs static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ - INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0), - INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1), + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), EVENT_EXTRA_END }; @@ -2129,43 +2140,86 @@ static inline void intel_pmu_ack_status(u64 ack) wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) +static inline bool event_is_checkpointed(struct perf_event *event) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; + return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; +} + +static inline void intel_set_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + if (event->attr.exclude_host) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + if (event->attr.exclude_guest) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); + if (event_is_checkpointed(event)) + __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); +} + +static inline void intel_clear_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); + __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); +} + +static void intel_pmu_disable_fixed(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; u64 ctrl_val, mask; + int idx = hwc->idx; - mask = 0xfULL << (idx * 4); + if (is_topdown_idx(idx)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + /* + * When there are other active TopDown events, + * don't disable the fixed counter 3. + */ + if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) + return; + idx = INTEL_PMC_IDX_FIXED_SLOTS; + } + + intel_clear_masks(event, idx); + + mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4); rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; wrmsrl(hwc->config_base, ctrl_val); } -static inline bool event_is_checkpointed(struct perf_event *event) -{ - return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; -} - static void intel_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx = hwc->idx; - if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { + switch (idx) { + case 0 ... INTEL_PMC_IDX_FIXED - 1: + intel_clear_masks(event, idx); + x86_pmu_disable_event(event); + break; + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: + intel_pmu_disable_fixed(event); + break; + case INTEL_PMC_IDX_FIXED_BTS: intel_pmu_disable_bts(); intel_pmu_drain_bts_buffer(); return; + case INTEL_PMC_IDX_FIXED_VLBR: + intel_clear_masks(event, idx); + break; + default: + intel_clear_masks(event, idx); + pr_warn("Failed to disable the event with invalid index %d\n", + idx); + return; } - cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); - cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); - cpuc->intel_cp_status &= ~(1ull << hwc->idx); - - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) - intel_pmu_disable_fixed(hwc); - else - x86_pmu_disable_event(event); - /* * Needs to be called after x86_pmu_disable_event, * so we don't trigger the event without PEBS bit set. @@ -2182,10 +2236,189 @@ static void intel_pmu_del_event(struct perf_event *event) intel_pmu_pebs_del(event); } +static int icl_set_topdown_event_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + + /* + * The values in PERF_METRICS MSR are derived from fixed counter 3. + * Software should start both registers, PERF_METRICS and fixed + * counter 3, from zero. + * Clear PERF_METRICS and Fixed counter 3 in initialization. + * After that, both MSRs will be cleared for each read. + * Don't need to clear them again. + */ + if (left == x86_pmu.max_period) { + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrl(MSR_PERF_METRICS, 0); + hwc->saved_slots = 0; + hwc->saved_metric = 0; + } + + if ((hwc->saved_slots) && is_slots_event(event)) { + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); + wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); + } + + perf_event_update_userpage(event); + + return 0; +} + +static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) +{ + u32 val; + + /* + * The metric is reported as an 8bit integer fraction + * suming up to 0xff. + * slots-in-metric = (Metric / 0xff) * slots + */ + val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; + return mul_u64_u32_div(slots, val, 0xff); +} + +static u64 icl_get_topdown_value(struct perf_event *event, + u64 slots, u64 metrics) +{ + int idx = event->hw.idx; + u64 delta; + + if (is_metric_idx(idx)) + delta = icl_get_metrics_event_value(metrics, slots, idx); + else + delta = slots; + + return delta; +} + +static void __icl_update_topdown_event(struct perf_event *event, + u64 slots, u64 metrics, + u64 last_slots, u64 last_metrics) +{ + u64 delta, last = 0; + + delta = icl_get_topdown_value(event, slots, metrics); + if (last_slots) + last = icl_get_topdown_value(event, last_slots, last_metrics); + + /* + * The 8bit integer fraction of metric may be not accurate, + * especially when the changes is very small. + * For example, if only a few bad_spec happens, the fraction + * may be reduced from 1 to 0. If so, the bad_spec event value + * will be 0 which is definitely less than the last value. + * Avoid update event->count for this case. + */ + if (delta > last) { + delta -= last; + local64_add(delta, &event->count); + } +} + +static void update_saved_topdown_regs(struct perf_event *event, + u64 slots, u64 metrics) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *other; + int idx; + + event->hw.saved_slots = slots; + event->hw.saved_metric = metrics; + + for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) { + if (!is_topdown_idx(idx)) + continue; + other = cpuc->events[idx]; + other->hw.saved_slots = slots; + other->hw.saved_metric = metrics; + } +} + +/* + * Update all active Topdown events. + * + * The PERF_METRICS and Fixed counter 3 are read separately. The values may be + * modify by a NMI. PMU has to be disabled before calling this function. + */ +static u64 icl_update_topdown_event(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *other; + u64 slots, metrics; + bool reset = true; + int idx; + + /* read Fixed counter 3 */ + rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots); + if (!slots) + return 0; + + /* read PERF_METRICS */ + rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics); + + for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) { + if (!is_topdown_idx(idx)) + continue; + other = cpuc->events[idx]; + __icl_update_topdown_event(other, slots, metrics, + event ? event->hw.saved_slots : 0, + event ? event->hw.saved_metric : 0); + } + + /* + * Check and update this event, which may have been cleared + * in active_mask e.g. x86_pmu_stop() + */ + if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { + __icl_update_topdown_event(event, slots, metrics, + event->hw.saved_slots, + event->hw.saved_metric); + + /* + * In x86_pmu_stop(), the event is cleared in active_mask first, + * then drain the delta, which indicates context switch for + * counting. + * Save metric and slots for context switch. + * Don't need to reset the PERF_METRICS and Fixed counter 3. + * Because the values will be restored in next schedule in. + */ + update_saved_topdown_regs(event, slots, metrics); + reset = false; + } + + if (reset) { + /* The fixed counter 3 has to be written before the PERF_METRICS. */ + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrl(MSR_PERF_METRICS, 0); + if (event) + update_saved_topdown_regs(event, 0, 0); + } + + return slots; +} + +static void intel_pmu_read_topdown_event(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* Only need to call update_topdown_event() once for group read. */ + if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && + !is_slots_event(event)) + return; + + perf_pmu_disable(event->pmu); + x86_pmu.update_topdown_event(event); + perf_pmu_enable(event->pmu); +} + static void intel_pmu_read_event(struct perf_event *event) { if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_auto_reload_read(event); + else if (is_topdown_count(event) && x86_pmu.update_topdown_event) + intel_pmu_read_topdown_event(event); else x86_perf_event_update(event); } @@ -2193,8 +2426,22 @@ static void intel_pmu_read_event(struct perf_event *event) static void intel_pmu_enable_fixed(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; u64 ctrl_val, mask, bits = 0; + int idx = hwc->idx; + + if (is_topdown_idx(idx)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + /* + * When there are other active TopDown events, + * don't enable the fixed counter 3 again. + */ + if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) + return; + + idx = INTEL_PMC_IDX_FIXED_SLOTS; + } + + intel_set_masks(event, idx); /* * Enable IRQ generation (0x8), if not PEBS, @@ -2214,6 +2461,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event) if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) bits |= 0x4; + idx -= INTEL_PMC_IDX_FIXED; bits <<= (idx * 4); mask = 0xfULL << (idx * 4); @@ -2231,33 +2479,32 @@ static void intel_pmu_enable_fixed(struct perf_event *event) static void intel_pmu_enable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - - if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { - if (!__this_cpu_read(cpu_hw_events.enabled)) - return; - - intel_pmu_enable_bts(hwc->config); - return; - } - - if (event->attr.exclude_host) - cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); - if (event->attr.exclude_guest) - cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); - - if (unlikely(event_is_checkpointed(event))) - cpuc->intel_cp_status |= (1ull << hwc->idx); + int idx = hwc->idx; if (unlikely(event->attr.precise_ip)) intel_pmu_pebs_enable(event); - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + switch (idx) { + case 0 ... INTEL_PMC_IDX_FIXED - 1: + intel_set_masks(event, idx); + __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); + break; + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_enable_fixed(event); - return; + break; + case INTEL_PMC_IDX_FIXED_BTS: + if (!__this_cpu_read(cpu_hw_events.enabled)) + return; + intel_pmu_enable_bts(hwc->config); + break; + case INTEL_PMC_IDX_FIXED_VLBR: + intel_set_masks(event, idx); + break; + default: + pr_warn("Failed to enable the event with invalid index %d\n", + idx); } - - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); } static void intel_pmu_add_event(struct perf_event *event) @@ -2373,7 +2620,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) /* * PEBS overflow sets bit 62 in the global status register */ - if (__test_and_clear_bit(62, (unsigned long *)&status)) { + if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { handled++; x86_pmu.drain_pebs(regs); status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; @@ -2382,7 +2629,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) /* * Intel PT */ - if (__test_and_clear_bit(55, (unsigned long *)&status)) { + if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { handled++; if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() && perf_guest_cbs->handle_intel_pt_intr)) @@ -2391,6 +2638,15 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) intel_pt_interrupt(); } + /* + * Intel Perf mertrics + */ + if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { + handled++; + if (x86_pmu.update_topdown_event) + x86_pmu.update_topdown_event(NULL); + } + /* * Checkpointed counters can lead to 'spurious' PMIs because the * rollback caused by the PMI will have cleared the overflow status @@ -2595,6 +2851,20 @@ intel_bts_constraints(struct perf_event *event) return NULL; } +/* + * Note: matches a fake event, like Fixed2. + */ +static struct event_constraint * +intel_vlbr_constraints(struct perf_event *event) +{ + struct event_constraint *c = &vlbr_constraint; + + if (unlikely(constraint_match(c, event->hw.config))) + return c; + + return NULL; +} + static int intel_alt_er(int idx, u64 config) { int alt_idx = idx; @@ -2785,6 +3055,10 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct event_constraint *c; + c = intel_vlbr_constraints(event); + if (c) + return c; + c = intel_bts_constraints(event); if (c) return c; @@ -3309,6 +3583,56 @@ static int intel_pmu_hw_config(struct perf_event *event) if (event->attr.type != PERF_TYPE_RAW) return 0; + /* + * Config Topdown slots and metric events + * + * The slots event on Fixed Counter 3 can support sampling, + * which will be handled normally in x86_perf_event_update(). + * + * Metric events don't support sampling and require being paired + * with a slots event as group leader. When the slots event + * is used in a metrics group, it too cannot support sampling. + */ + if (x86_pmu.intel_cap.perf_metrics && is_topdown_event(event)) { + if (event->attr.config1 || event->attr.config2) + return -EINVAL; + + /* + * The TopDown metrics events and slots event don't + * support any filters. + */ + if (event->attr.config & X86_ALL_EVENT_FLAGS) + return -EINVAL; + + if (is_metric_event(event)) { + struct perf_event *leader = event->group_leader; + + /* The metric events don't support sampling. */ + if (is_sampling_event(event)) + return -EINVAL; + + /* The metric events require a slots group leader. */ + if (!is_slots_event(leader)) + return -EINVAL; + + /* + * The leader/SLOTS must not be a sampling event for + * metric use; hardware requires it starts at 0 when used + * in conjunction with MSR_PERF_METRICS. + */ + if (is_sampling_event(leader)) + return -EINVAL; + + event->event_caps |= PERF_EV_CAP_SIBLING; + /* + * Only once we have a METRICs sibling do we + * need TopDown magic. + */ + leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; + event->hw.flags |= PERF_X86_EVENT_TOPDOWN; + } + } + if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) return 0; @@ -3729,6 +4053,17 @@ static void intel_pmu_cpu_starting(int cpu) if (x86_pmu.counter_freezing) enable_counter_freeze(); + /* Disable perf metrics if any added CPU doesn't support it. */ + if (x86_pmu.intel_cap.perf_metrics) { + union perf_capabilities perf_cap; + + rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); + if (!perf_cap.perf_metrics) { + x86_pmu.intel_cap.perf_metrics = 0; + x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); + } + } + if (!cpuc->shared_regs) return; @@ -3998,9 +4333,12 @@ static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), @@ -4280,6 +4618,15 @@ static struct attribute *icl_events_attrs[] = { NULL, }; +static struct attribute *icl_td_events_attrs[] = { + EVENT_PTR(slots), + EVENT_PTR(td_retiring), + EVENT_PTR(td_bad_spec), + EVENT_PTR(td_fe_bound), + EVENT_PTR(td_be_bound), + NULL, +}; + static struct attribute *icl_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_abort), @@ -5056,10 +5403,13 @@ __init int intel_pmu_init(void) hsw_format_attr : nhm_format_attr; extra_skl_attr = skl_format_attr; mem_attr = icl_events_attrs; + td_attr = icl_td_events_attrs; tsx_attr = icl_tsx_events_attrs; - x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02); + x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); x86_pmu.lbr_pt_coexist = true; intel_pmu_pebs_data_source_skl(pmem); + x86_pmu.update_topdown_event = icl_update_topdown_event; + x86_pmu.set_topdown_event_period = icl_set_topdown_event_period; pr_cont("Icelake events, "); name = "icelake"; break; @@ -5115,6 +5465,15 @@ __init int intel_pmu_init(void) * counter, so do not extend mask to generic counters */ for_each_event_constraint(c, x86_pmu.event_constraints) { + /* + * Don't extend the topdown slots and metrics + * events to the generic counters. + */ + if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { + c->weight = hweight64(c->idxmsk64); + continue; + } + if (c->cmask == FIXED_EVENT_FLAGS && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) { c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; @@ -5170,6 +5529,9 @@ __init int intel_pmu_init(void) if (x86_pmu.counter_freezing) x86_pmu.handle_irq = intel_pmu_handle_irq_v4; + if (x86_pmu.intel_cap.perf_metrics) + x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + return 0; } diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 4814c964692c..0b50119ea12c 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -107,14 +107,14 @@ MODULE_LICENSE("GPL"); #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __cstate_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ +static ssize_t __cstate_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ -static struct kobj_attribute format_attr_##_var = \ +static struct device_attribute format_attr_##_var = \ __ATTR(_name, 0444, __cstate_##_var##_show, NULL) static ssize_t cstate_get_attr_cpumask(struct device *dev, diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index e5ad97a82342..5965d341350c 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -669,9 +669,7 @@ unlock: static inline void intel_pmu_drain_pebs_buffer(void) { - struct pt_regs regs; - - x86_pmu.drain_pebs(®s); + x86_pmu.drain_pebs(NULL); } /* @@ -1736,6 +1734,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, struct x86_perf_regs perf_regs; struct pt_regs *regs = &perf_regs.regs; void *at = get_next_pebs_record_by_bit(base, top, bit); + struct pt_regs dummy_iregs; if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { /* @@ -1748,6 +1747,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event, } else if (!intel_pmu_save_and_restart(event)) return; + if (!iregs) + iregs = &dummy_iregs; + while (count > 1) { setup_sample(event, iregs, at, &data, regs); perf_event_output(event, &data, regs); @@ -1757,16 +1759,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event, } setup_sample(event, iregs, at, &data, regs); - - /* - * All but the last records are processed. - * The last one is left to be able to call the overflow handler. - */ - if (perf_event_overflow(event, &data, regs)) { - x86_pmu_stop(event, 0); - return; + if (iregs == &dummy_iregs) { + /* + * The PEBS records may be drained in the non-overflow context, + * e.g., large PEBS + context switch. Perf should treat the + * last record the same as other PEBS records, and doesn't + * invoke the generic overflow handler. + */ + perf_event_output(event, &data, regs); + } else { + /* + * All but the last records are processed. + * The last one is left to be able to call the overflow handler. + */ + if (perf_event_overflow(event, &data, regs)) + x86_pmu_stop(event, 0); } - } static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) @@ -1882,7 +1890,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) */ if (!pebs_status && cpuc->pebs_enabled && !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) - pebs_status = cpuc->pebs_enabled; + pebs_status = p->status = cpuc->pebs_enabled; bit = find_first_bit((unsigned long *)&pebs_status, x86_pmu.max_pebs_events); @@ -1904,7 +1912,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * that caused the PEBS record. It's called collision. * If collision happened, the record will be dropped. */ - if (p->status != (1ULL << bit)) { + if (pebs_status != (1ULL << bit)) { for_each_set_bit(i, (unsigned long *)&pebs_status, size) error[i]++; continue; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index ea54634eabf3..73dbd30f82d3 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -383,6 +383,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) wrmsrl(x86_pmu.lbr_tos, tos); task_ctx->lbr_stack_state = LBR_NONE; + + if (cpuc->lbr_select) + wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); } static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) @@ -415,6 +418,9 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) cpuc->last_task_ctx = task_ctx; cpuc->last_log_id = ++task_ctx->log_id; + + if (cpuc->lbr_select) + rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); } void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) @@ -462,6 +468,9 @@ void intel_pmu_lbr_add(struct perf_event *event) if (!x86_pmu.lbr_nr) return; + if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT) + cpuc->lbr_select = 1; + cpuc->br_sel = event->hw.branch_reg.reg; if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) { @@ -509,6 +518,9 @@ void intel_pmu_lbr_del(struct perf_event *event) task_ctx->lbr_callstack_users--; } + if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT) + cpuc->lbr_select = 0; + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) cpuc->lbr_pebs_users--; cpuc->lbr_users--; @@ -517,11 +529,19 @@ void intel_pmu_lbr_del(struct perf_event *event) perf_sched_cb_dec(event->ctx->pmu); } +static inline bool vlbr_exclude_host(void) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + return test_bit(INTEL_PMC_IDX_FIXED_VLBR, + (unsigned long *)&cpuc->intel_ctrl_guest_mask); +} + void intel_pmu_lbr_enable_all(bool pmi) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - if (cpuc->lbr_users) + if (cpuc->lbr_users && !vlbr_exclude_host()) __intel_pmu_lbr_enable(pmi); } @@ -529,7 +549,7 @@ void intel_pmu_lbr_disable_all(void) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - if (cpuc->lbr_users) + if (cpuc->lbr_users && !vlbr_exclude_host()) __intel_pmu_lbr_disable(); } @@ -669,7 +689,8 @@ void intel_pmu_lbr_read(void) * This could be smarter and actually check the event, * but this simple approach seems to work for now. */ - if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users) + if (!cpuc->lbr_users || vlbr_exclude_host() || + cpuc->lbr_users == cpuc->lbr_pebs_users) return; if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) @@ -1311,3 +1332,27 @@ void intel_pmu_lbr_init_knl(void) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP) x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS; } + +/** + * x86_perf_get_lbr - get the LBR records information + * + * @lbr: the caller's memory to store the LBR records information + * + * Returns: 0 indicates the LBR info has been successfully obtained + */ +int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) +{ + int lbr_fmt = x86_pmu.intel_cap.lbr_format; + + lbr->nr = x86_pmu.lbr_nr; + lbr->from = x86_pmu.lbr_from; + lbr->to = x86_pmu.lbr_to; + lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? MSR_LBR_INFO_0 : 0; + + return 0; +} +EXPORT_SYMBOL_GPL(x86_perf_get_lbr); + +struct event_constraint vlbr_constraint = + __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR), + FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT); diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 86467f85c383..7fb1f904ab63 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -92,8 +92,8 @@ end: return map; } -ssize_t uncore_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +ssize_t uncore_event_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct uncore_event_desc *event = container_of(attr, struct uncore_event_desc, attr); @@ -1470,6 +1470,12 @@ static const struct intel_uncore_init_fun icl_uncore_init __initconst = { .pci_init = skl_uncore_pci_init, }; +static const struct intel_uncore_init_fun icx_uncore_init __initconst = { + .cpu_init = icx_uncore_cpu_init, + .pci_init = icx_uncore_pci_init, + .mmio_init = icx_uncore_mmio_init, +}; + static const struct intel_uncore_init_fun snr_uncore_init __initconst = { .cpu_init = snr_uncore_cpu_init, .pci_init = snr_uncore_pci_init, @@ -1505,6 +1511,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_D, icx_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_X, icx_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_D, snr_uncore_init), {}, }; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index bbfdaa720b45..97bb76605f7a 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -144,7 +144,7 @@ struct intel_uncore_box { #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 struct uncore_event_desc { - struct kobj_attribute attr; + struct device_attribute attr; const char *config; }; @@ -154,6 +154,7 @@ struct freerunning_counters { unsigned int box_offset; unsigned int num_counters; unsigned int bits; + unsigned *box_offsets; }; struct pci2phy_map { @@ -165,8 +166,8 @@ struct pci2phy_map { struct pci2phy_map *__find_pci2phy_map(int segment); int uncore_pcibus_to_physid(struct pci_bus *bus); -ssize_t uncore_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf); +ssize_t uncore_event_show(struct device *dev, + struct device_attribute *attr, char *buf); #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ { \ @@ -175,14 +176,14 @@ ssize_t uncore_event_show(struct kobject *kobj, } #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ -static struct kobj_attribute format_attr_##_var = \ +static struct device_attribute format_attr_##_var = \ __ATTR(_name, 0444, __uncore_##_var##_show, NULL) static inline bool uncore_pmc_fixed(int idx) @@ -310,7 +311,9 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, return pmu->type->freerunning[type].counter_base + pmu->type->freerunning[type].counter_offset * idx + - pmu->type->freerunning[type].box_offset * pmu->pmu_idx; + (pmu->type->freerunning[type].box_offsets ? + pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : + pmu->type->freerunning[type].box_offset * pmu->pmu_idx); } static inline @@ -545,6 +548,9 @@ void skx_uncore_cpu_init(void); int snr_uncore_pci_init(void); void snr_uncore_cpu_init(void); void snr_uncore_mmio_init(void); +int icx_uncore_pci_init(void); +void icx_uncore_cpu_init(void); +void icx_uncore_mmio_init(void); /* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index c37cb12d0ef6..aec6e63c6a04 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -110,6 +110,10 @@ #define ICL_UNC_CBO_0_PER_CTR0 0x702 #define ICL_UNC_CBO_MSR_OFFSET 0x8 +/* ICL ARB register */ +#define ICL_UNC_ARB_PER_CTR 0x3b1 +#define ICL_UNC_ARB_PERFEVTSEL 0x3b3 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); @@ -297,15 +301,21 @@ void skl_uncore_cpu_init(void) snb_uncore_arb.ops = &skl_uncore_msr_ops; } +static struct intel_uncore_ops icl_uncore_msr_ops = { + .disable_event = snb_uncore_msr_disable_event, + .enable_event = snb_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + static struct intel_uncore_type icl_uncore_cbox = { .name = "cbox", - .num_counters = 4, + .num_counters = 2, .perf_ctr_bits = 44, .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, .event_mask = SNB_UNC_RAW_EVENT_MASK, .msr_offset = ICL_UNC_CBO_MSR_OFFSET, - .ops = &skl_uncore_msr_ops, + .ops = &icl_uncore_msr_ops, .format_group = &snb_uncore_format_group, }; @@ -334,13 +344,25 @@ static struct intel_uncore_type icl_uncore_clockbox = { .single_fixed = 1, .event_mask = SNB_UNC_CTL_EV_SEL_MASK, .format_group = &icl_uncore_clock_format_group, - .ops = &skl_uncore_msr_ops, + .ops = &icl_uncore_msr_ops, .event_descs = icl_uncore_events, }; +static struct intel_uncore_type icl_uncore_arb = { + .name = "arb", + .num_counters = 1, + .num_boxes = 1, + .perf_ctr_bits = 44, + .perf_ctr = ICL_UNC_ARB_PER_CTR, + .event_ctl = ICL_UNC_ARB_PERFEVTSEL, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .ops = &icl_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + static struct intel_uncore_type *icl_msr_uncores[] = { &icl_uncore_cbox, - &snb_uncore_arb, + &icl_uncore_arb, &icl_uncore_clockbox, NULL, }; @@ -358,7 +380,6 @@ void icl_uncore_cpu_init(void) { uncore_msr_uncores = icl_msr_uncores; icl_uncore_cbox.num_boxes = icl_get_cbox_num(); - snb_uncore_arb.ops = &skl_uncore_msr_ops; } enum { diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index ad20220af303..0aa40798252b 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -382,6 +382,42 @@ #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF +/* ICX CHA */ +#define ICX_C34_MSR_PMON_CTR0 0xb68 +#define ICX_C34_MSR_PMON_CTL0 0xb61 +#define ICX_C34_MSR_PMON_BOX_CTL 0xb60 +#define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65 + +/* ICX IIO */ +#define ICX_IIO_MSR_PMON_CTL0 0xa58 +#define ICX_IIO_MSR_PMON_CTR0 0xa51 +#define ICX_IIO_MSR_PMON_BOX_CTL 0xa50 + +/* ICX IRP */ +#define ICX_IRP0_MSR_PMON_CTL0 0xa4d +#define ICX_IRP0_MSR_PMON_CTR0 0xa4b +#define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a + +/* ICX M2PCIE */ +#define ICX_M2PCIE_MSR_PMON_CTL0 0xa46 +#define ICX_M2PCIE_MSR_PMON_CTR0 0xa41 +#define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40 + +/* ICX UPI */ +#define ICX_UPI_PCI_PMON_CTL0 0x350 +#define ICX_UPI_PCI_PMON_CTR0 0x320 +#define ICX_UPI_PCI_PMON_BOX_CTL 0x318 +#define ICX_UPI_CTL_UMASK_EXT 0xffffff + +/* ICX M3UPI*/ +#define ICX_M3UPI_PCI_PMON_CTL0 0xd8 +#define ICX_M3UPI_PCI_PMON_CTR0 0xa8 +#define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0 + +/* ICX IMC */ +#define ICX_NUMBER_IMC_CHN 2 +#define ICX_IMC_MEM_STRIDE 0x4 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); @@ -390,6 +426,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55"); DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57"); DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39"); +DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55"); DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); @@ -1093,7 +1130,6 @@ enum { SNBEP_PCI_QPI_PORT0_FILTER, SNBEP_PCI_QPI_PORT1_FILTER, BDX_PCI_QPI_PORT2_FILTER, - HSWEP_PCI_PCU_3, }; static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) @@ -2750,22 +2786,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = { NULL, }; +#define HSWEP_PCU_DID 0x2fc0 +#define HSWEP_PCU_CAPID4_OFFET 0x94 +#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3) + +static bool hswep_has_limit_sbox(unsigned int device) +{ + struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + u32 capid4; + + if (!dev) + return false; + + pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); + if (!hswep_get_chop(capid4)) + return true; + + return false; +} + void hswep_uncore_cpu_init(void) { - int pkg = boot_cpu_data.logical_proc_id; - if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; /* Detect 6-8 core systems with only two SBOXes */ - if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { - u32 capid4; - - pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3], - 0x94, &capid4); - if (((capid4 >> 6) & 0x3) == 0) - hswep_uncore_sbox.num_boxes = 2; - } + if (hswep_has_limit_sbox(HSWEP_PCU_DID)) + hswep_uncore_sbox.num_boxes = 2; uncore_msr_uncores = hswep_msr_uncores; } @@ -3028,11 +3075,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = { .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, SNBEP_PCI_QPI_PORT1_FILTER), }, - { /* PCU.3 (for Capability registers) */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - HSWEP_PCI_PCU_3), - }, { /* end: all zeroes */ } }; @@ -3124,27 +3166,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { EVENT_CONSTRAINT_END }; +#define BDX_PCU_DID 0x6fc0 + void bdx_uncore_cpu_init(void) { - int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id); - if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; - /* BDX-DE doesn't have SBOX */ - if (boot_cpu_data.x86_model == 86) { - uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; /* Detect systems with no SBOXes */ - } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { - struct pci_dev *pdev; - u32 capid4; + if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) + uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; - pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; - pci_read_config_dword(pdev, 0x94, &capid4); - if (((capid4 >> 6) & 0x3) == 0) - bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; - } hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } @@ -3365,11 +3398,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, BDX_PCI_QPI_PORT2_FILTER), }, - { /* PCU.3 (for Capability registers) */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, - HSWEP_PCI_PCU_3), - }, { /* end: all zeroes */ } }; @@ -4380,10 +4408,10 @@ static struct pci_dev *snr_uncore_get_mc_dev(int id) return mc_dev; } -static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) +static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, + unsigned int box_ctl, int mem_offset) { struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid); - unsigned int box_ctl = uncore_mmio_box_ctl(box); resource_size_t addr; u32 pci_dword; @@ -4393,7 +4421,7 @@ static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword); addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23; - pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword); + pci_read_config_dword(pdev, mem_offset, &pci_dword); addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12; addr += box_ctl; @@ -4405,6 +4433,12 @@ static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr); } +static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) +{ + __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), + SNR_IMC_MMIO_MEM0_OFFSET); +} + static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box) { u32 config; @@ -4545,3 +4579,477 @@ void snr_uncore_mmio_init(void) } /* end of SNR uncore support */ + +/* ICX uncore support */ + +static unsigned icx_cha_msr_offsets[] = { + 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, + 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, + 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, + 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe, + 0x1c, 0x2a, 0x38, 0x46, +}; + +static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN); + + if (tie_en) { + reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 + + icx_cha_msr_offsets[box->pmu->pmu_idx]; + reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; + reg1->idx = 0; + } + + return 0; +} + +static struct intel_uncore_ops icx_uncore_chabox_ops = { + .init_box = ivbep_uncore_msr_init_box, + .disable_box = snbep_uncore_msr_disable_box, + .enable_box = snbep_uncore_msr_enable_box, + .disable_event = snbep_uncore_msr_disable_event, + .enable_event = snr_cha_enable_event, + .read_counter = uncore_msr_read_counter, + .hw_config = icx_cha_hw_config, +}; + +static struct intel_uncore_type icx_uncore_chabox = { + .name = "cha", + .num_counters = 4, + .perf_ctr_bits = 48, + .event_ctl = ICX_C34_MSR_PMON_CTL0, + .perf_ctr = ICX_C34_MSR_PMON_CTR0, + .box_ctl = ICX_C34_MSR_PMON_BOX_CTL, + .msr_offsets = icx_cha_msr_offsets, + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, + .constraints = skx_uncore_chabox_constraints, + .ops = &icx_uncore_chabox_ops, + .format_group = &snr_uncore_chabox_format_group, +}; + +static unsigned icx_msr_offsets[] = { + 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, +}; + +static struct event_constraint icx_uncore_iio_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x02, 0x3), + UNCORE_EVENT_CONSTRAINT(0x03, 0x3), + UNCORE_EVENT_CONSTRAINT(0x83, 0x3), + UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), + UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type icx_uncore_iio = { + .name = "iio", + .num_counters = 4, + .num_boxes = 6, + .perf_ctr_bits = 48, + .event_ctl = ICX_IIO_MSR_PMON_CTL0, + .perf_ctr = ICX_IIO_MSR_PMON_CTR0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, + .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL, + .msr_offsets = icx_msr_offsets, + .constraints = icx_uncore_iio_constraints, + .ops = &skx_uncore_iio_ops, + .format_group = &snr_uncore_iio_format_group, +}; + +static struct intel_uncore_type icx_uncore_irp = { + .name = "irp", + .num_counters = 2, + .num_boxes = 6, + .perf_ctr_bits = 48, + .event_ctl = ICX_IRP0_MSR_PMON_CTL0, + .perf_ctr = ICX_IRP0_MSR_PMON_CTR0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL, + .msr_offsets = icx_msr_offsets, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_format_group, +}; + +static struct event_constraint icx_uncore_m2pcie_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x14, 0x3), + UNCORE_EVENT_CONSTRAINT(0x23, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type icx_uncore_m2pcie = { + .name = "m2pcie", + .num_counters = 4, + .num_boxes = 6, + .perf_ctr_bits = 48, + .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0, + .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0, + .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL, + .msr_offsets = icx_msr_offsets, + .constraints = icx_uncore_m2pcie_constraints, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_format_group, +}; + +enum perf_uncore_icx_iio_freerunning_type_id { + ICX_IIO_MSR_IOCLK, + ICX_IIO_MSR_BW_IN, + + ICX_IIO_FREERUNNING_TYPE_MAX, +}; + +static unsigned icx_iio_clk_freerunning_box_offsets[] = { + 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, +}; + +static unsigned icx_iio_bw_freerunning_box_offsets[] = { + 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0, +}; + +static struct freerunning_counters icx_iio_freerunning[] = { + [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets }, + [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets }, +}; + +static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = { + /* Free-Running IIO CLOCKS Counter */ + INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), + /* Free-Running IIO BANDWIDTH IN Counters */ + INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_type icx_uncore_iio_free_running = { + .name = "iio_free_running", + .num_counters = 9, + .num_boxes = 6, + .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, + .freerunning = icx_iio_freerunning, + .ops = &skx_uncore_iio_freerunning_ops, + .event_descs = icx_uncore_iio_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + +static struct intel_uncore_type *icx_msr_uncores[] = { + &skx_uncore_ubox, + &icx_uncore_chabox, + &icx_uncore_iio, + &icx_uncore_irp, + &icx_uncore_m2pcie, + &skx_uncore_pcu, + &icx_uncore_iio_free_running, + NULL, +}; + +/* + * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High) + * registers which located at Device 30, Function 3 + */ +#define ICX_CAPID6 0x9c +#define ICX_CAPID7 0xa0 + +static u64 icx_count_chabox(void) +{ + struct pci_dev *dev = NULL; + u64 caps = 0; + + dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev); + if (!dev) + goto out; + + pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps); + pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1); +out: + pci_dev_put(dev); + return hweight64(caps); +} + +void icx_uncore_cpu_init(void) +{ + u64 num_boxes = icx_count_chabox(); + + if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets))) + return; + icx_uncore_chabox.num_boxes = num_boxes; + uncore_msr_uncores = icx_msr_uncores; +} + +static struct intel_uncore_type icx_uncore_m2m = { + .name = "m2m", + .num_counters = 4, + .num_boxes = 4, + .perf_ctr_bits = 48, + .perf_ctr = SNR_M2M_PCI_PMON_CTR0, + .event_ctl = SNR_M2M_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, + .ops = &snr_m2m_uncore_pci_ops, + .format_group = &skx_uncore_format_group, +}; + +static struct attribute *icx_upi_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask_ext4.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static const struct attribute_group icx_upi_uncore_format_group = { + .name = "format", + .attrs = icx_upi_uncore_formats_attr, +}; + +static struct intel_uncore_type icx_uncore_upi = { + .name = "upi", + .num_counters = 4, + .num_boxes = 3, + .perf_ctr_bits = 48, + .perf_ctr = ICX_UPI_PCI_PMON_CTR0, + .event_ctl = ICX_UPI_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = ICX_UPI_CTL_UMASK_EXT, + .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, + .ops = &skx_upi_uncore_pci_ops, + .format_group = &icx_upi_uncore_format_group, +}; + +static struct event_constraint icx_uncore_m3upi_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x1c, 0x1), + UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), + UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), + UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), + UNCORE_EVENT_CONSTRAINT(0x40, 0x7), + UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), + UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), + UNCORE_EVENT_CONSTRAINT(0x50, 0x7), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type icx_uncore_m3upi = { + .name = "m3upi", + .num_counters = 4, + .num_boxes = 3, + .perf_ctr_bits = 48, + .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, + .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, + .constraints = icx_uncore_m3upi_constraints, + .ops = &ivbep_uncore_pci_ops, + .format_group = &skx_uncore_format_group, +}; + +enum { + ICX_PCI_UNCORE_M2M, + ICX_PCI_UNCORE_UPI, + ICX_PCI_UNCORE_M3UPI, +}; + +static struct intel_uncore_type *icx_pci_uncores[] = { + [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m, + [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi, + [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi, + NULL, +}; + +static const struct pci_device_id icx_uncore_pci_ids[] = { + { /* M2M 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0), + }, + { /* M2M 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1), + }, + { /* M2M 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2), + }, + { /* M2M 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3), + }, + { /* UPI Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0), + }, + { /* UPI Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1), + }, + { /* UPI Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2), + }, + { /* M3UPI Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0), + }, + { /* M3UPI Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1), + }, + { /* M3UPI Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver icx_uncore_pci_driver = { + .name = "icx_uncore", + .id_table = icx_uncore_pci_ids, +}; + +int icx_uncore_pci_init(void) +{ + /* ICX UBOX DID */ + int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID, + SKX_GIDNIDMAP, true); + + if (ret) + return ret; + + uncore_pci_uncores = icx_pci_uncores; + uncore_pci_driver = &icx_uncore_pci_driver; + return 0; +} + +static void icx_uncore_imc_init_box(struct intel_uncore_box *box) +{ + unsigned int box_ctl = box->pmu->type->box_ctl + + box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN); + int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE + + SNR_IMC_MMIO_MEM0_OFFSET; + + __snr_uncore_mmio_init_box(box, box_ctl, mem_offset); +} + +static struct intel_uncore_ops icx_uncore_mmio_ops = { + .init_box = icx_uncore_imc_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = snr_uncore_mmio_disable_box, + .enable_box = snr_uncore_mmio_enable_box, + .disable_event = snr_uncore_mmio_disable_event, + .enable_event = snr_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct intel_uncore_type icx_uncore_imc = { + .name = "imc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, + .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, + .event_descs = hswep_uncore_imc_events, + .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, + .event_ctl = SNR_IMC_MMIO_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, + .mmio_offset = SNR_IMC_MMIO_OFFSET, + .ops = &icx_uncore_mmio_ops, + .format_group = &skx_uncore_format_group, +}; + +enum perf_uncore_icx_imc_freerunning_type_id { + ICX_IMC_DCLK, + ICX_IMC_DDR, + ICX_IMC_DDRT, + + ICX_IMC_FREERUNNING_TYPE_MAX, +}; + +static struct freerunning_counters icx_imc_freerunning[] = { + [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, + [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 }, + [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 }, +}; + +static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = { + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), + + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + + INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"), + INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"), + INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"), + { /* end: all zeroes */ }, +}; + +static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) +{ + int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + + SNR_IMC_MMIO_MEM0_OFFSET; + + __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset); +} + +static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = { + .init_box = icx_uncore_imc_freerunning_init_box, + .exit_box = uncore_mmio_exit_box, + .read_counter = uncore_mmio_read_counter, + .hw_config = uncore_freerunning_hw_config, +}; + +static struct intel_uncore_type icx_uncore_imc_free_running = { + .name = "imc_free_running", + .num_counters = 5, + .num_boxes = 4, + .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX, + .freerunning = icx_imc_freerunning, + .ops = &icx_uncore_imc_freerunning_ops, + .event_descs = icx_uncore_imc_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + +static struct intel_uncore_type *icx_mmio_uncores[] = { + &icx_uncore_imc, + &icx_uncore_imc_free_running, + NULL, +}; + +void icx_uncore_mmio_init(void) +{ + uncore_mmio_uncores = icx_mmio_uncores; +} + +/* end of ICX uncore support */ diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 0ed910237c4d..5648023e3a65 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -78,6 +78,32 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode) #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */ #define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */ +#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */ +#define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */ + +static inline bool is_topdown_count(struct perf_event *event) +{ + return event->hw.flags & PERF_X86_EVENT_TOPDOWN; +} + +static inline bool is_metric_event(struct perf_event *event) +{ + u64 config = event->attr.config; + + return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && + ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && + ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); +} + +static inline bool is_slots_event(struct perf_event *event) +{ + return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; +} + +static inline bool is_topdown_event(struct perf_event *event) +{ + return is_metric_event(event) || is_slots_event(event); +} struct amd_nb { int nb_id; /* NorthBridge id */ @@ -198,6 +224,7 @@ struct cpu_hw_events { they've never been enabled yet */ int n_txn; /* the # last events in the below arrays; added in the current transaction */ + int n_txn_metric; int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; @@ -237,6 +264,7 @@ struct cpu_hw_events { u64 br_sel; struct x86_perf_task_context *last_task_ctx; int last_log_id; + int lbr_select; /* * Intel host/guest exclude bits @@ -267,12 +295,19 @@ struct cpu_hw_events { */ u64 tfa_shadow; + /* + * Perf Metrics + */ + /* number of accepted metrics events */ + int n_metric; + /* * AMD specific bits */ struct amd_nb *amd_nb; /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ u64 perf_ctr_virt_mask; + int n_pair; /* Large increment events */ void *kfree_on_online[X86_PERF_KFREE_MAX]; }; @@ -357,6 +392,19 @@ struct cpu_hw_events { #define FIXED_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) +/* + * The special metric counters do not actually exist. They are calculated from + * the combination of the FxCtr3 + MSR_PERF_METRICS. + * + * The special metric counters are mapped to a dummy offset for the scheduler. + * The sharing between multiple users of the same metric without multiplexing + * is not allowed, even though the hardware supports that in principle. + */ + +#define METRIC_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ + INTEL_ARCH_EVENT_MASK) + /* * Constraint on the Event code + UMask */ @@ -519,7 +567,7 @@ union perf_capabilities { */ u64 full_width_write:1; u64 pebs_baseline:1; - u64 pebs_metrics_available:1; + u64 perf_metrics:1; u64 pebs_output_pt_available:1; }; u64 capabilities; @@ -671,8 +719,8 @@ struct x86_pmu { /* * Intel LBR */ - unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ - int lbr_nr; /* hardware stack size */ + unsigned int lbr_tos, lbr_from, lbr_to, + lbr_nr; /* LBR base regs and size */ u64 lbr_sel_mask; /* LBR_SELECT valid bits */ const int *lbr_sel_map; /* lbr_select mappings */ bool lbr_double_abort; /* duplicated lbr aborts */ @@ -683,10 +731,17 @@ struct x86_pmu { */ atomic_t lbr_exclusive[x86_lbr_exclusive_max]; + /* + * Intel perf metrics + */ + u64 (*update_topdown_event)(struct perf_event *event); + int (*set_topdown_event_period)(struct perf_event *event); + /* * AMD bits */ unsigned int amd_nb_constraints : 1; + u64 perf_ctr_pair_en; /* * Extra registers for events @@ -711,6 +766,7 @@ struct x86_perf_task_context { u64 lbr_from[MAX_LBR_ENTRIES]; u64 lbr_to[MAX_LBR_ENTRIES]; u64 lbr_info[MAX_LBR_ENTRIES]; + u64 lbr_sel; int tos; int valid_lbrs; int lbr_callstack_users; @@ -832,6 +888,11 @@ int x86_pmu_hw_config(struct perf_event *event); void x86_pmu_disable_all(void); +static inline bool is_counter_pair(struct hw_perf_event *hwc) +{ + return hwc->flags & PERF_X86_EVENT_PAIR; +} + static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, u64 enable_mask) { @@ -839,6 +900,14 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, if (hwc->extra_reg.reg) wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); + + /* + * Add enabled Merge event on next counter + * if large increment event being enabled on this counter + */ + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); + wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); } @@ -855,6 +924,9 @@ static inline void x86_pmu_disable_event(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; wrmsrl(hwc->config_base, hwc->config); + + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); } void x86_pmu_enable_event(struct perf_event *event); @@ -963,6 +1035,7 @@ void release_ds_buffers(void); void reserve_ds_buffers(void); extern struct event_constraint bts_constraint; +extern struct event_constraint vlbr_constraint; void intel_pmu_enable_bts(u64 config); diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/rapl.c similarity index 97% rename from arch/x86/events/intel/rapl.c rename to arch/x86/events/rapl.c index 5053a403e4ae..9050d7b8abc5 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/rapl.c @@ -1,11 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Support Intel RAPL energy consumption counters + * Support Intel/AMD RAPL energy consumption counters * Copyright (C) 2013 Google, Inc., Stephane Eranian * * Intel RAPL interface is specified in the IA-32 Manual Vol3b * section 14.7.1 (September 2013) * + * AMD RAPL interface for Fam17h is described in the public PPR: + * https://bugzilla.kernel.org/show_bug.cgi?id=206537 + * * RAPL provides more controls than just reporting energy consumption * however here we only expose the 3 energy consumption free running * counters (pp0, pkg, dram). @@ -58,8 +61,8 @@ #include #include #include -#include "../perf_event.h" -#include "../probe.h" +#include "perf_event.h" +#include "probe.h" MODULE_LICENSE("GPL"); @@ -90,18 +93,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { * any other bit is reserved */ #define RAPL_EVENT_MASK 0xFFULL - -#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ - char *page) \ -{ \ - BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ - return sprintf(page, _format "\n"); \ -} \ -static struct kobj_attribute format_attr_##_var = \ - __ATTR(_name, 0444, __rapl_##_var##_show, NULL) - #define RAPL_CNTR_WIDTH 32 #define RAPL_EVENT_ATTR_STR(_name, v, str) \ @@ -430,7 +421,7 @@ static struct attribute_group rapl_pmu_events_group = { .attrs = attrs_empty, }; -DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); +PMU_FORMAT_ATTR(event, "config:0-7"); static struct attribute *rapl_formats_attr[] = { &format_attr_event.attr, NULL, @@ -639,7 +630,7 @@ static const struct attribute_group *rapl_attr_update[] = { &rapl_events_pkg_group, &rapl_events_ram_group, &rapl_events_gpu_group, - &rapl_events_gpu_group, + &rapl_events_psys_group, NULL, }; diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 2db3972c0e0f..79583bac9ac4 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -354,11 +355,14 @@ void hyperv_cleanup(void) } EXPORT_SYMBOL_GPL(hyperv_cleanup); -void hyperv_report_panic(struct pt_regs *regs, long err) +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die) { static bool panic_reported; u64 guest_id; + if (in_die && !panic_on_oops) + return; + /* * We prefer to report panic on 'die' chain as we have proper * registers to report, but if we miss it (e.g. on BUG()) we need diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 5208ba49c89a..2c87350c1fb0 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, if (!hv_hypercall_pg) goto do_native; - if (cpumask_empty(cpus)) - return; - local_irq_save(flags); + /* + * Only check the mask _after_ interrupt has been disabled to avoid the + * mask changing under our feet. + */ + if (cpumask_empty(cpus)) { + local_irq_restore(flags); + return; + } + flush_pcpu = (struct hv_tlb_flush **) this_cpu_ptr(hyperv_pcpu_input_arg); diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 19e94af9cc5d..5bef1575708d 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; } #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_X2APIC -/* - * Make previous memory operations globally visible before - * sending the IPI through x2apic wrmsr. We need a serializing instruction or - * mfence for this. - */ -static inline void x2apic_wrmsr_fence(void) -{ - asm volatile("mfence" : : : "memory"); -} - static inline void native_apic_msr_write(u32 reg, u32 v) { if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || @@ -259,6 +249,7 @@ static inline u64 native_x2apic_icr_read(void) extern int x2apic_mode; extern int x2apic_phys; +extern void __init x2apic_set_max_apicid(u32 apicid); extern void __init check_x2apic(void); extern void x2apic_setup(void); static inline int x2apic_enabled(void) diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 3ff577c0b102..12933d406207 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -133,6 +133,9 @@ # define _ASM_EXTABLE_UA(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) +# define _ASM_EXTABLE_CPY(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy) + # define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) @@ -164,6 +167,9 @@ # define _ASM_EXTABLE_UA(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) +# define _ASM_EXTABLE_CPY(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy) + # define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 7f828fe49797..4819d5e5a335 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -84,4 +84,22 @@ do { \ #include +/* + * Make previous memory operations globally visible before + * a WRMSR. + * + * MFENCE makes writes visible, but only affects load/store + * instructions. WRMSR is unfortunately not a load/store + * instruction and is unaffected by MFENCE. The LFENCE ensures + * that the WRMSR is not reordered. + * + * Most WRMSRs are full serializing instructions themselves and + * do not require this barrier. This is only required for the + * IA32_TSC_DEADLINE and X2APIC MSRs. + */ +static inline void weak_wrmsr_fence(void) +{ + asm volatile("mfence; lfence" : : : "memory"); +} + #endif /* _ASM_X86_BARRIER_H */ diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index facba9bc30ca..37e4480dba75 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -3,6 +3,7 @@ #define _ASM_X86_BUG_H #include +#include /* * Despite that some emulators terminate on UD2, we use it for WARN(). diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h index 86b63c7feab7..86b2e0dcc4bf 100644 --- a/arch/x86/include/asm/cacheinfo.h +++ b/arch/x86/include/asm/cacheinfo.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_CACHEINFO_H #define _ASM_X86_CACHEINFO_H -void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); -void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); +void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu); +void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu); #endif /* _ASM_X86_CACHEINFO_H */ diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h index 31c379c1da41..0c814cd9ea42 100644 --- a/arch/x86/include/asm/cpu_device_id.h +++ b/arch/x86/include/asm/cpu_device_id.h @@ -9,6 +9,36 @@ #include +#define X86_CENTAUR_FAM6_C7_D 0xd +#define X86_CENTAUR_FAM6_NANO 0xf + +#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins) + +/** + * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching + * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@_vendor + * @_family: The family number or X86_FAMILY_ANY + * @_model: The model number, model constant or X86_MODEL_ANY + * @_steppings: Bitmask for steppings, stepping constant or X86_STEPPING_ANY + * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY + * @_data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * Backport version to keep the SRBDS pile consistant. No shorter variants + * required for this. + */ +#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \ + _steppings, _feature, _data) { \ + .vendor = X86_VENDOR_##_vendor, \ + .family = _family, \ + .model = _model, \ + .steppings = _steppings, \ + .feature = _feature, \ + .driver_data = (unsigned long) _data \ +} + /* * Match specific microcode revisions. * diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c4fbe379cc0b..f885908107f5 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -237,6 +237,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */ +#define X86_FEATURE_SGX ( 9*32+ 2) /* Software Guard Extensions */ #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ @@ -348,6 +349,7 @@ #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ #define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ +#define X86_FEATURE_SGX_LC (16*32+30) /* Software Guard Extensions Launch Control */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ #define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ @@ -356,7 +358,9 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */ #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */ +#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ @@ -401,5 +405,6 @@ #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ +#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index ef5638f641f2..88eadd08ad70 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -10,4 +10,10 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params); void crash_smp_send_stop(void); +#ifdef CONFIG_KEXEC_CORE +void __init crash_reserve_low_1M(void); +#else +static inline void __init crash_reserve_low_1M(void) { } +#endif + #endif /* _ASM_X86_CRASH_H */ diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h index a5d86fc0593f..f1592619dd65 100644 --- a/arch/x86/include/asm/crypto/camellia.h +++ b/arch/x86/include/asm/crypto/camellia.h @@ -32,65 +32,60 @@ extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); /* regular block cipher functions */ -asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, + bool xor); +asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); /* 2-way parallel cipher functions */ -asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, bool xor); -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, + bool xor); +asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); /* 16-way parallel cipher functions (avx/aes-ni) */ -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, - const u8 *src) +static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk(ctx, dst, src, false); } -static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst, - const u8 *src) +static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk(ctx, dst, src, true); } -static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, +static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk_2way(ctx, dst, src, false); } -static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst, +static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst, const u8 *src) { __camellia_enc_blk_2way(ctx, dst, src, true); } /* glue helpers */ -extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src); -extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, +extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src); +extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, +extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); -extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); +extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); +extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); #endif /* ASM_X86_CAMELLIA_H */ diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h index 8d4a8e1226ee..777c0f63418c 100644 --- a/arch/x86/include/asm/crypto/glue_helper.h +++ b/arch/x86/include/asm/crypto/glue_helper.h @@ -11,18 +11,13 @@ #include #include -typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); -typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); -typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, +typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src); +typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src); +typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src, +typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) -#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) -#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) -#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn)) - struct common_glue_func_entry { unsigned int num_blocks; /* number of blocks that @fn will process */ union { @@ -116,7 +111,8 @@ extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx, common_glue_func_t tweak_fn, void *tweak_ctx, void *crypt_ctx, bool decrypt); -extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, - le128 *iv, common_glue_func_t fn); +extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, + const u8 *src, le128 *iv, + common_glue_func_t fn); #endif /* _CRYPTO_GLUE_HELPER_H */ diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h index db7c9cc32234..251c2c89d7cf 100644 --- a/arch/x86/include/asm/crypto/serpent-avx.h +++ b/arch/x86/include/asm/crypto/serpent-avx.h @@ -15,26 +15,26 @@ struct serpent_xts_ctx { struct serpent_ctx crypt_ctx; }; -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, - const u8 *src, le128 *iv); +asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, + le128 *iv); -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, +extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); -extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); +extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv); +extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv); extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h index 1a345e8a7496..860ca248914b 100644 --- a/arch/x86/include/asm/crypto/serpent-sse2.h +++ b/arch/x86/include/asm/crypto/serpent-sse2.h @@ -9,25 +9,23 @@ #define SERPENT_PARALLEL_BLOCKS 4 -asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src, bool xor); -asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src); -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) { __serpent_enc_blk_4way(ctx, dst, src, false); } -static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, + u8 *dst, const u8 *src) { __serpent_enc_blk_4way(ctx, dst, src, true); } -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) { serpent_dec_blk_4way(ctx, dst, src); } @@ -36,25 +34,23 @@ static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, #define SERPENT_PARALLEL_BLOCKS 8 -asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src, bool xor); -asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst, +asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst, const u8 *src); -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) { __serpent_enc_blk_8way(ctx, dst, src, false); } -static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, + u8 *dst, const u8 *src) { __serpent_enc_blk_8way(ctx, dst, src, true); } -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, - const u8 *src) +static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) { serpent_dec_blk_8way(ctx, dst, src); } diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h index f618bf272b90..2c377a8042e1 100644 --- a/arch/x86/include/asm/crypto/twofish.h +++ b/arch/x86/include/asm/crypto/twofish.h @@ -7,22 +7,19 @@ #include /* regular block cipher functions from twofish_x86_64 module */ -asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); -asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src); +asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src); /* 3-way parallel cipher functions */ -asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src, bool xor); -asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, - const u8 *src); +asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src, + bool xor); +asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src); /* helpers from twofish_x86_64-3way module */ -extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); -extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, +extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src); +extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src, le128 *iv); -extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, +extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src, le128 *iv); #endif /* ASM_X86_TWOFISH_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index a5ea841cc6d2..8473c786632d 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -62,6 +62,12 @@ # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) #endif +#ifdef CONFIG_X86_SGX +# define DISABLE_SGX 0 +#else +# define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -74,7 +80,7 @@ #define DISABLED_MASK6 0 #define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 -#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP) +#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP|DISABLE_SGX) #define DISABLED_MASK10 0 #define DISABLED_MASK11 0 #define DISABLED_MASK12 0 diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h index 00f7cf45e699..8e95aa4b0d17 100644 --- a/arch/x86/include/asm/dma.h +++ b/arch/x86/include/asm/dma.h @@ -74,7 +74,7 @@ #define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ -#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) #ifdef CONFIG_X86_32 /* The maximum address that we can perform a DMA transfer to on this platform */ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 69c0f892e310..ae7f38bdce05 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -281,9 +281,29 @@ extern u32 elf_hwcap2; /* * An executable for which elf_read_implies_exec() returns TRUE will * have the READ_IMPLIES_EXEC personality flag set automatically. + * + * The decision process for determining the results are: + * + * CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 | + * ELF: | | | | + * ---------------------|------------|------------------|----------------| + * missing PT_GNU_STACK | exec-all | exec-all | exec-none | + * PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack | + * PT_GNU_STACK == RW | exec-none | exec-none | exec-none | + * + * exec-all : all PROT_READ user mappings are executable, except when + * backed by files on a noexec-filesystem. + * exec-none : only PROT_EXEC user mappings are executable. + * exec-stack: only the stack and PROT_EXEC user mappings are executable. + * + * *this column has no architectural effect: NX markings are ignored by + * hardware, but may have behavioral effects when "wants X" collides with + * "cannot be X" constraints in memory permission flags, as in + * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com + * */ #define elf_read_implies_exec(ex, executable_stack) \ - (executable_stack != EXSTACK_DISABLE_X) + (mmap_is_ia32() && executable_stack == EXSTACK_DEFAULT) struct task_struct; diff --git a/arch/x86/include/asm/enclu.h b/arch/x86/include/asm/enclu.h new file mode 100644 index 000000000000..b1314e41a744 --- /dev/null +++ b/arch/x86/include/asm/enclu.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_ENCLU_H +#define _ASM_X86_ENCLU_H + +#define EENTER 0x02 +#define ERESUME 0x03 +#define EEXIT 0x04 + +#endif /* _ASM_X86_ENCLU_H */ diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h index d8c2198d543b..1f0cbc52937c 100644 --- a/arch/x86/include/asm/extable.h +++ b/arch/x86/include/asm/extable.h @@ -29,10 +29,17 @@ struct pt_regs; (b)->handler = (tmp).handler - (delta); \ } while (0) +enum handler_type { + EX_HANDLER_NONE, + EX_HANDLER_FAULT, + EX_HANDLER_UACCESS, + EX_HANDLER_OTHER +}; + extern int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, unsigned long fault_addr); extern int fixup_bug(struct pt_regs *regs, int trapnr); -extern bool ex_has_fault_handler(unsigned long ip); +extern enum handler_type ex_get_fault_handler_type(unsigned long ip); extern void early_fixup_exception(struct pt_regs *regs, int trapnr); #endif diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index b774c52e5411..06e767bca0c1 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -16,14 +16,25 @@ * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It * disables preemption so be careful if you intend to use it for long periods * of time. - * If you intend to use the FPU in softirq you need to check first with + * If you intend to use the FPU in irq/softirq you need to check first with * irq_fpu_usable() if it is possible. */ -extern void kernel_fpu_begin(void); + +/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */ +#define KFPU_387 _BITUL(0) /* 387 state will be initialized */ +#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */ + +extern void kernel_fpu_begin_mask(unsigned int kfpu_mask); extern void kernel_fpu_end(void); extern bool irq_fpu_usable(void); extern void fpregs_mark_activate(void); +/* Code that is unaware of kernel_fpu_begin_mask() can use this */ +static inline void kernel_fpu_begin(void) +{ + kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR); +} + /* * Use fpregs_lock() while editing CPU's FPU registers or fpu->state. * A context switch will (and softirq might) save CPU's FPU registers to diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 44c48e34d799..00eac7f1529b 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -619,6 +619,11 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * MXCSR and XCR definitions: */ +static inline void ldmxcsr(u32 mxcsr) +{ + asm volatile("ldmxcsr %0" :: "m" (mxcsr)); +} + extern unsigned int mxcsr_feature_mask; #define XCR_XFEATURE_ENABLED_MASK 0x00000000 diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h index bca4c743de77..35cff5f2becf 100644 --- a/arch/x86/include/asm/fsgsbase.h +++ b/arch/x86/include/asm/fsgsbase.h @@ -19,35 +19,64 @@ extern unsigned long x86_gsbase_read_task(struct task_struct *task); extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); +/* Must be protected by X86_FEATURE_FSGSBASE check. */ + +static __always_inline unsigned long rdfsbase(void) +{ + unsigned long fsbase; + + asm volatile("rdfsbase %0" : "=r" (fsbase) :: "memory"); + + return fsbase; +} + +static __always_inline unsigned long rdgsbase(void) +{ + unsigned long gsbase; + + asm volatile("rdgsbase %0" : "=r" (gsbase) :: "memory"); + + return gsbase; +} + +static __always_inline void wrfsbase(unsigned long fsbase) +{ + asm volatile("wrfsbase %0" :: "r" (fsbase) : "memory"); +} + +static __always_inline void wrgsbase(unsigned long gsbase) +{ + asm volatile("wrgsbase %0" :: "r" (gsbase) : "memory"); +} + +#include + /* Helper functions for reading/writing FS/GS base */ static inline unsigned long x86_fsbase_read_cpu(void) { unsigned long fsbase; - rdmsrl(MSR_FS_BASE, fsbase); + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) + fsbase = rdfsbase(); + else + rdmsrl(MSR_FS_BASE, fsbase); return fsbase; } -static inline unsigned long x86_gsbase_read_cpu_inactive(void) -{ - unsigned long gsbase; - - rdmsrl(MSR_KERNEL_GS_BASE, gsbase); - - return gsbase; -} - static inline void x86_fsbase_write_cpu(unsigned long fsbase) { - wrmsrl(MSR_FS_BASE, fsbase); + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) + wrfsbase(fsbase); + else + wrmsrl(MSR_FS_BASE, fsbase); } -static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase) -{ - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); -} +extern unsigned long x86_gsbase_read_cpu_inactive(void); +extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase); +extern unsigned long x86_fsgsbase_read_task(struct task_struct *task, + unsigned short selector); #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 154f27be8bfc..a51ffeea6d87 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -195,6 +195,21 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +/** + * for_each_insn_prefix() -- Iterate prefixes in the instruction + * @insn: Pointer to struct insn. + * @idx: Index storage. + * @prefix: Prefix byte. + * + * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix + * and the index is stored in @idx (note that this @idx is just for a cursor, + * do not change it.) + * Since prefixes.nbytes can be bigger than 4 if some prefixes + * are repeated, it cannot be used for looping over the prefixes. + */ +#define for_each_insn_prefix(insn, idx, prefix) \ + for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) + #define POP_SS_OPCODE 0x1f #define MOV_SREG_OPCODE 0x8e diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h index f5a796da07f8..d063841a17e3 100644 --- a/arch/x86/include/asm/inst.h +++ b/arch/x86/include/asm/inst.h @@ -306,6 +306,21 @@ .endif MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2 .endm + +.macro RDPID opd + REG_TYPE rdpid_opd_type \opd + .if rdpid_opd_type == REG_TYPE_R64 + R64_NUM rdpid_opd \opd + .else + R32_NUM rdpid_opd \opd + .endif + .byte 0xf3 + .if rdpid_opd > 7 + PFX_REX rdpid_opd 0 + .endif + .byte 0x0f, 0xc7 + MODRM 0xc0 rdpid_opd 0x7 +.endm #endif #endif diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 734a3334e0f0..c52b7073a5ab 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -550,6 +550,7 @@ struct kvm_vcpu_arch { unsigned long cr4; unsigned long cr4_guest_owned_bits; unsigned long cr8; + u32 host_pkru; u32 pkru; u32 hflags; u64 efer; @@ -1130,7 +1131,7 @@ struct kvm_x86_ops { bool (*pt_supported)(void); bool (*pku_supported)(void); - int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); + int (*check_nested_events)(struct kvm_vcpu *vcpu); void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*sched_in)(struct kvm_vcpu *kvm, int cpu); @@ -1159,7 +1160,7 @@ struct kvm_x86_ops { void (*enable_log_dirty_pt_masked)(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t offset, unsigned long mask); - int (*write_log_dirty)(struct kvm_vcpu *vcpu); + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); /* pmu operations of sub-arch */ const struct kvm_pmu_ops *pmu_ops; @@ -1552,12 +1553,14 @@ asmlinkage void kvm_spurious_fault(void); _ASM_EXTABLE(666b, 667b) #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); +int kvm_cpu_has_extint(struct kvm_vcpu *v); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); int kvm_cpu_get_interrupt(struct kvm_vcpu *v); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); @@ -1608,8 +1611,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) { /* We can only post Fixed and LowPrio IRQs */ - return (irq->delivery_mode == dest_Fixed || - irq->delivery_mode == dest_LowestPrio); + return (irq->delivery_mode == APIC_DM_FIXED || + irq->delivery_mode == APIC_DM_LOWEST); } static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 14caa9d9fb7f..e07188e8d763 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -13,9 +13,13 @@ #ifdef __ASSEMBLY__ -#define GLOBAL(name) \ - .globl name; \ - name: +/* + * GLOBAL is DEPRECATED + * + * use SYM_DATA_START, SYM_FUNC_START, SYM_INNER_LABEL, SYM_CODE_START, or + * similar + */ +#define GLOBAL(name) SYM_ENTRY(name, SYM_L_GLOBAL, SYM_A_NONE) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16) #define __ALIGN .p2align 4, 0x90 diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index dc2d4b206ab7..b0b54e18aa8b 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -127,6 +127,30 @@ #define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x)) #define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x))) +/* mce.kflags flag bits for logging etc. */ +#define MCE_HANDLED_CEC BIT_ULL(0) +#define MCE_HANDLED_UC BIT_ULL(1) +#define MCE_HANDLED_EXTLOG BIT_ULL(2) +#define MCE_HANDLED_NFIT BIT_ULL(3) +#define MCE_HANDLED_EDAC BIT_ULL(4) +#define MCE_HANDLED_MCELOG BIT_ULL(5) + +/* + * Indicates an MCE which has happened in kernel space but from + * which the kernel can recover simply by executing fixup_exception() + * so that an error is returned to the caller of the function that + * hit the machine check. + */ +#define MCE_IN_KERNEL_RECOV BIT_ULL(6) + +/* + * Indicates an MCE that happened in kernel space while copying data + * from user. In this case fixup_exception() gets the kernel to the + * error exit for the copy function. Machine check handler can then + * treat it like a fault taken in user mode. + */ +#define MCE_IN_KERNEL_COPYIN BIT_ULL(7) + /* * This structure contains all data related to the MCE log. Also * carries a signature to make it easier to find from external @@ -143,13 +167,14 @@ struct mce_log_buffer { }; enum mce_notifier_prios { - MCE_PRIO_FIRST = INT_MAX, - MCE_PRIO_SRAO = INT_MAX - 1, - MCE_PRIO_EXTLOG = INT_MAX - 2, - MCE_PRIO_NFIT = INT_MAX - 3, - MCE_PRIO_EDAC = INT_MAX - 4, - MCE_PRIO_MCELOG = 1, - MCE_PRIO_LOWEST = 0, + MCE_PRIO_LOWEST, + MCE_PRIO_MCELOG, + MCE_PRIO_EDAC, + MCE_PRIO_NFIT, + MCE_PRIO_EXTLOG, + MCE_PRIO_SRAO, + MCE_PRIO_EARLY, + MCE_PRIO_CEC }; struct notifier_block; @@ -290,6 +315,7 @@ extern void apei_mce_report_mem_error(int corrected, /* These may be used by multiple smca_hwid_mcatypes */ enum smca_bank_types { SMCA_LS = 0, /* Load Store */ + SMCA_LS_V2, /* Load Store */ SMCA_IF, /* Instruction Fetch */ SMCA_L2_CACHE, /* L2 Cache */ SMCA_DE, /* Decoder Unit */ diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 209492849566..5c524d4f71cd 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -41,7 +41,7 @@ struct microcode_amd { unsigned int mpb[0]; }; -#define PATCH_MAX_SIZE PAGE_SIZE +#define PATCH_MAX_SIZE (3 * PAGE_SIZE) #ifdef CONFIG_MICROCODE_AMD extern void __init load_ucode_amd_bsp(unsigned int family); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 1682e4b5ce75..6a80c597b09f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -119,6 +119,10 @@ #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ +/* SRBDS support */ +#define MSR_IA32_MCU_OPT_CTRL 0x00000123 +#define RNGDS_MITG_DIS BIT(0) + #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 #define MSR_IA32_SYSENTER_EIP 0x00000176 @@ -428,6 +432,7 @@ #define MSR_AMD64_IBSOP_REG_MASK ((1UL<thread */ -void save_fsgs_for_kvm(void); -#endif +void current_save_fsgs(void); #else /* X86_64 */ #ifdef CONFIG_STACKPROTECTOR /* @@ -506,15 +504,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, *size = fpu_kernel_xstate_size; } -/* - * Thread-synchronous status. - * - * This is different from the flags in that nobody else - * ever touches our thread-synchronous status, so we don't - * have to worry about atomic accesses. - */ -#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ - /* * Set IOPL bits in EFLAGS from given mask */ @@ -540,7 +529,7 @@ native_load_sp0(unsigned long sp0) this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } -static inline void native_swapgs(void) +static __always_inline void native_swapgs(void) { #ifdef CONFIG_X86_64 asm volatile("swapgs" ::: "memory"); diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 332eb3525867..59f47eb7aeb9 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -159,6 +159,19 @@ static inline bool user_64bit_mode(struct pt_regs *regs) #endif } +/* + * Determine whether the register set came from any context that is running in + * 64-bit mode. + */ +static inline bool any_64bit_mode(struct pt_regs *regs) +{ +#ifdef CONFIG_X86_64 + return !user_mode(regs) || user_64bit_mode(regs); +#else + return false; +#endif +} + #ifdef CONFIG_X86_64 #define current_user_stack_pointer() current_pt_regs()->sp #define compat_user_stack_pointer() current_pt_regs()->sp @@ -309,8 +322,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, static const unsigned int argument_offs[] = { #ifdef __i386__ offsetof(struct pt_regs, ax), - offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), + offsetof(struct pt_regs, cx), #define NR_REG_ARGUMENTS 3 #else offsetof(struct pt_regs, di), @@ -361,5 +374,11 @@ extern int do_get_thread_area(struct task_struct *p, int idx, extern int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *info, int can_allocate); +#ifdef CONFIG_X86_64 +# define do_set_thread_area_64(p, s, t) do_arch_prctl_64(p, s, t) +#else +# define do_set_thread_area_64(p, s, t) (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PTRACE_H */ diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 444d6fd9a6d8..35e5a252c089 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -27,11 +27,17 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo return val; } +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS +extern void cna_configure_spin_lock_slowpath(void); +extern void cna_set_llc_id_per_cpu(unsigned int cpu); +#endif + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); +extern bool nopvspin; #define queued_spin_unlock queued_spin_unlock /** diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index 2ee8e469dcf5..162128cdfbf2 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -85,28 +85,35 @@ void set_kernel_text_rw(void); void set_kernel_text_ro(void); #ifdef CONFIG_X86_64 -static inline int set_mce_nospec(unsigned long pfn) +/* + * Prevent speculative access to the page by either unmapping + * it (if we do not require access to any part of the page) or + * marking it uncacheable (if we want to try to retrieve data + * from non-poisoned lines in the page). + */ +static inline int set_mce_nospec(unsigned long pfn, bool unmap) { unsigned long decoy_addr; int rc; /* - * Mark the linear address as UC to make sure we don't log more - * errors because of speculative access to the page. * We would like to just call: - * set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1); + * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); * but doing that would radically increase the odds of a * speculative access to the poison page because we'd have * the virtual address of the kernel 1:1 mapping sitting * around in registers. * Instead we get tricky. We create a non-canonical address * that looks just like the one we want, but has bit 63 flipped. - * This relies on set_memory_uc() properly sanitizing any __pa() + * This relies on set_memory_XX() properly sanitizing any __pa() * results with __PHYSICAL_MASK or PTE_PFN_MASK. */ decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); - rc = set_memory_uc(decoy_addr, 1); + if (unmap) + rc = set_memory_np(decoy_addr, 1); + else + rc = set_memory_uc(decoy_addr, 1); if (rc) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); return rc; diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index 27c47d183f4b..8b58d6975d5d 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -57,8 +57,10 @@ static __always_inline unsigned long smap_save(void) { unsigned long flags; - asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC, - X86_FEATURE_SMAP) + asm volatile ("# smap_save\n\t" + ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP) + "pushf; pop %0; " __ASM_CLAC "\n\t" + "1:" : "=rm" (flags) : : "memory", "cc"); return flags; @@ -66,7 +68,10 @@ static __always_inline unsigned long smap_save(void) static __always_inline void smap_restore(unsigned long flags) { - asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP) + asm volatile ("# smap_restore\n\t" + ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP) + "push %0; popf\n\t" + "1:" : : "g" (flags) : "memory", "cc"); } diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 6d37b8fcfc77..2e0cdc64cb50 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -10,45 +10,47 @@ #include /* - * Volatile isn't enough to prevent the compiler from reordering the - * read/write functions for the control registers and messing everything up. - * A memory clobber would solve the problem, but would prevent reordering of - * all loads stores around it, which can hurt performance. Solution is to - * use a variable and mimic reads and writes to it to enforce serialization + * The compiler should not reorder volatile asm statements with respect to each + * other: they should execute in program order. However GCC 4.9.x and 5.x have + * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder + * volatile asm. The write functions are not affected since they have memory + * clobbers preventing reordering. To prevent reads from being reordered with + * respect to writes, use a dummy memory operand. */ -extern unsigned long __force_order; + +#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL) void native_write_cr0(unsigned long val); static inline unsigned long native_read_cr0(void) { unsigned long val; - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER); return val; } static inline unsigned long native_read_cr2(void) { unsigned long val; - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER); return val; } static inline void native_write_cr2(unsigned long val) { - asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr2": : "r" (val) : "memory"); } static inline unsigned long __native_read_cr3(void) { unsigned long val; - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER); return val; } static inline void native_write_cr3(unsigned long val) { - asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr3": : "r" (val) : "memory"); } static inline unsigned long native_read_cr4(void) @@ -63,10 +65,10 @@ static inline unsigned long native_read_cr4(void) asm volatile("1: mov %%cr4, %0\n" "2:\n" _ASM_EXTABLE(1b, 2b) - : "=r" (val), "=m" (__force_order) : "0" (0)); + : "=r" (val) : "0" (0), __FORCE_ORDER); #else /* CR4 always exists on x86_64. */ - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER); #endif return val; } diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 91e29b6a86a5..9804a7957f4e 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -55,8 +55,13 @@ /* * Initialize the stackprotector canary value. * - * NOTE: this must only be called from functions that never return, + * NOTE: this must only be called from functions that never return * and it must always be inlined. + * + * In addition, it should be called from a compilation unit for which + * stack protector is disabled. Alternatively, the caller should not end + * with a function call which gets tail-call optimized as that would + * lead to checking a modified canary value. */ static __always_inline void boot_init_stack_canary(void) { diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 18a4b6890fa8..0e059b73437b 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -103,7 +103,17 @@ static inline void update_task_stack(struct task_struct *task) if (static_cpu_has(X86_FEATURE_XENPV)) load_sp0(task_top_of_stack(task)); #endif +} +static inline void kthread_frame_init(struct inactive_task_frame *frame, + unsigned long fun, unsigned long arg) +{ + frame->bx = fun; +#ifdef CONFIG_X86_32 + frame->di = arg; +#else + frame->r12 = arg; +#endif } #endif /* _ASM_X86_SWITCH_TO_H */ diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h index c67caafd3381..43b5e02a7b4b 100644 --- a/arch/x86/include/asm/sync_core.h +++ b/arch/x86/include/asm/sync_core.h @@ -16,12 +16,13 @@ static inline void sync_core_before_usermode(void) /* With PTI, we unconditionally serialize before running user code. */ if (static_cpu_has(X86_FEATURE_PTI)) return; + /* - * Return from interrupt and NMI is done through iret, which is core - * serializing. + * Even if we're in an interrupt, we might reschedule before returning, + * in which case we could switch to a different thread in the same mm + * and return using SYSRET or SYSEXIT. Instead of trying to keep + * track of our need to sync the core, just sync right away. */ - if (in_irq() || in_nmi()) - return; sync_core(); } diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index f9453536f9bb..a4de7aa7500f 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -221,10 +221,31 @@ static inline int arch_within_stack_frames(const void * const stack, #endif +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ + +#ifndef __ASSEMBLY__ #ifdef CONFIG_COMPAT #define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */ +#define TS_COMPAT_RESTART 0x0008 + +#define arch_set_restart_data arch_set_restart_data + +static inline void arch_set_restart_data(struct restart_block *restart) +{ + struct thread_info *ti = current_thread_info(); + if (ti->status & TS_COMPAT) + ti->status |= TS_COMPAT_RESTART; + else + ti->status &= ~TS_COMPAT_RESTART; +} #endif -#ifndef __ASSEMBLY__ #ifdef CONFIG_X86_32 #define in_ia32_syscall() true diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 4b14d2318251..f3459df117aa 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu); #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) +extern unsigned int __max_die_per_package; + #ifdef CONFIG_SMP #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu)) #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) @@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu); extern unsigned int __max_logical_packages; #define topology_max_packages() (__max_logical_packages) -extern unsigned int __max_die_per_package; - static inline int topology_max_die_per_package(void) { return __max_die_per_package; diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index 879b77792f94..fd86d4bd23df 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -90,7 +90,7 @@ DEFINE_EVENT(x86_fpu, x86_fpu_xstate_check_failed, ); #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH asm/trace/ +#define TRACE_INCLUDE_PATH asm/trace #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE fpu #endif /* _TRACE_FPU_H */ diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h index ace464f09681..8fecd21c6afe 100644 --- a/arch/x86/include/asm/trace/hyperv.h +++ b/arch/x86/include/asm/trace/hyperv.h @@ -74,7 +74,7 @@ TRACE_EVENT(hyperv_send_ipi_mask, #endif /* CONFIG_HYPERV */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH asm/trace/ +#define TRACE_INCLUDE_PATH asm/trace #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE hyperv #endif /* _TRACE_HYPERV_H */ diff --git a/arch/x86/include/asm/trace/mpx.h b/arch/x86/include/asm/trace/mpx.h index 54133017267c..43b8e9ba42b2 100644 --- a/arch/x86/include/asm/trace/mpx.h +++ b/arch/x86/include/asm/trace/mpx.h @@ -125,7 +125,7 @@ void trace_bounds_exception_mpx(const struct mpx_bndcsr *bndcsr) #endif /* CONFIG_X86_INTEL_MPX */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH asm/trace/ +#define TRACE_INCLUDE_PATH asm/trace #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE mpx #endif /* _TRACE_MPX_H */ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index b25e633033c3..71abccd78a4f 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -118,10 +118,7 @@ void smp_spurious_interrupt(struct pt_regs *regs); void smp_error_interrupt(struct pt_regs *regs); asmlinkage void smp_irq_move_cleanup_interrupt(void); -extern void ist_enter(struct pt_regs *regs); -extern void ist_exit(struct pt_regs *regs); -extern void ist_begin_non_atomic(struct pt_regs *regs); -extern void ist_end_non_atomic(void); +bool fault_in_kernel_space(unsigned long address); #ifdef CONFIG_VMAP_STACK void __noreturn handle_stack_overflow(const char *message, @@ -163,6 +160,7 @@ enum { * bit 3 == 1: use of reserved bit detected * bit 4 == 1: fault was an instruction fetch * bit 5 == 1: protection keys block access + * bit 15 == 1: SGX MMU page-fault */ enum x86_pf_error_code { X86_PF_PROT = 1 << 0, @@ -171,5 +169,6 @@ enum x86_pf_error_code { X86_PF_RSVD = 1 << 3, X86_PF_INSTR = 1 << 4, X86_PF_PK = 1 << 5, + X86_PF_SGX = 1 << 15, }; #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 499578f7e6d7..70fc159ebe69 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -19,7 +19,7 @@ struct unwind_state { #if defined(CONFIG_UNWINDER_ORC) bool signal, full_regs; unsigned long sp, bp, ip; - struct pt_regs *regs; + struct pt_regs *regs, *prev_regs; #elif defined(CONFIG_UNWINDER_FRAME_POINTER) bool got_irq; unsigned long *bp, *orig_sp, ip; diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index 230474e2ddb5..745300a05f25 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h @@ -15,6 +15,8 @@ struct vdso_image { unsigned long size; /* Always a multiple of PAGE_SIZE */ unsigned long alt, alt_len; + unsigned long extable_base, extable_len; + const void *extable; long sym_vvar_start; /* Negative offset to the vvar area */ @@ -44,6 +46,9 @@ extern void __init init_vdso_image(const struct vdso_image *image); extern int map_vdso_once(const struct vdso_image *image, unsigned long addr); +extern bool fixup_vdso_exception(struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr); #endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_VDSO_H */ diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 9aad0e0876fb..fda3e7747c22 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void) } -/** Disable VMX on the current CPU +/** + * cpu_vmxoff() - Disable VMX on the current CPU * - * vmxoff causes a undefined-opcode exception if vmxon was not run - * on the CPU previously. Only call this function if you know VMX - * is enabled. + * Disable VMX and clear CR4.VMXE (even if VMXOFF faults) + * + * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to + * atomically track post-VMXON state, e.g. this may be called in NMI context. + * Eat all faults as all other faults on VMXOFF faults are mode related, i.e. + * faults are guaranteed to be due to the !post-VMXON check unless the CPU is + * magically in RM, VM86, compat mode, or at CPL>0. */ static inline void cpu_vmxoff(void) { - asm volatile ("vmxoff"); + asm_volatile_goto("1: vmxoff\n\t" + _ASM_EXTABLE(1b, %l[fault]) :::: fault); +fault: cr4_clear_bits(X86_CR4_VMXE); } diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h index 8b2effe6efb8..5fdfcb47000f 100644 --- a/arch/x86/include/uapi/asm/hwcap2.h +++ b/arch/x86/include/uapi/asm/hwcap2.h @@ -5,4 +5,7 @@ /* MONITOR/MWAIT enabled in Ring 3 */ #define HWCAP2_RING3MWAIT (1 << 0) +/* Kernel allows FSGSBASE instructions available in Ring 3 */ +#define HWCAP2_FSGSBASE BIT(1) + #endif diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 955c2a2e1cf9..5b59d80f1d4e 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -35,6 +35,7 @@ struct mce { __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ __u64 ppin; /* Protected Processor Inventory Number */ __u32 microcode; /* Microcode revision */ + __u64 kflags; /* Internal kernel use. See below */ }; #define MCE_GET_RECORD_LEN _IOR('M', 1, int) diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h new file mode 100644 index 000000000000..9034f3007c4e --- /dev/null +++ b/arch/x86/include/uapi/asm/sgx.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright(c) 2016-20 Intel Corporation. + */ +#ifndef _UAPI_ASM_X86_SGX_H +#define _UAPI_ASM_X86_SGX_H + +#include +#include + +/** + * enum sgx_page_flags - page control flags + * %SGX_PAGE_MEASURE: Measure the page contents with a sequence of + * ENCLS[EEXTEND] operations. + */ +enum sgx_page_flags { + SGX_PAGE_MEASURE = 0x01, +}; + +#define SGX_MAGIC 0xA4 + +#define SGX_IOC_ENCLAVE_CREATE \ + _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create) +#define SGX_IOC_ENCLAVE_ADD_PAGES \ + _IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages) +#define SGX_IOC_ENCLAVE_INIT \ + _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init) +#define SGX_IOC_ENCLAVE_PROVISION \ + _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_provision) + +/** + * struct sgx_enclave_create - parameter structure for the + * %SGX_IOC_ENCLAVE_CREATE ioctl + * @src: address for the SECS page data + */ +struct sgx_enclave_create { + __u64 src; +}; + +/** + * struct sgx_enclave_add_pages - parameter structure for the + * %SGX_IOC_ENCLAVE_ADD_PAGE ioctl + * @src: start address for the page data + * @offset: starting page offset + * @length: length of the data (multiple of the page size) + * @secinfo: address for the SECINFO data + * @flags: page control flags + * @count: number of bytes added (multiple of the page size) + */ +struct sgx_enclave_add_pages { + __u64 src; + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 flags; + __u64 count; +}; + +/** + * struct sgx_enclave_init - parameter structure for the + * %SGX_IOC_ENCLAVE_INIT ioctl + * @sigstruct: address for the SIGSTRUCT data + */ +struct sgx_enclave_init { + __u64 sigstruct; +}; + +/** + * struct sgx_enclave_provision - parameter structure for the + * %SGX_IOC_ENCLAVE_PROVISION ioctl + * @fd: file handle of /dev/sgx_provision + */ +struct sgx_enclave_provision { + __u64 fd; +}; + +struct sgx_enclave_run; + +/** + * typedef sgx_enclave_user_handler_t - Exit handler function accepted by + * __vdso_sgx_enter_enclave() + * @run: The run instance given by the caller + * + * The register parameters contain the snapshot of their values at enclave + * exit. An invalid ENCLU function number will cause -EINVAL to be returned + * to the caller. + * + * Return: + * - <= 0: The given value is returned back to the caller. + * - > 0: ENCLU function to invoke, either EENTER or ERESUME. + */ +typedef int (*sgx_enclave_user_handler_t)(long rdi, long rsi, long rdx, + long rsp, long r8, long r9, + struct sgx_enclave_run *run); + +/** + * struct sgx_enclave_run - the execution context of __vdso_sgx_enter_enclave() + * @tcs: TCS used to enter the enclave + * @function: The last seen ENCLU function (EENTER, ERESUME or EEXIT) + * @exception_vector: The interrupt vector of the exception + * @exception_error_code: The exception error code pulled out of the stack + * @exception_addr: The address that triggered the exception + * @user_handler: User provided callback run on exception + * @user_data: Data passed to the user handler + * @reserved Reserved for future extensions + * + * If @user_handler is provided, the handler will be invoked on all return paths + * of the normal flow. The user handler may transfer control, e.g. via a + * longjmp() call or a C++ exception, without returning to + * __vdso_sgx_enter_enclave(). + */ +struct sgx_enclave_run { + __u64 tcs; + __u32 function; + __u16 exception_vector; + __u16 exception_error_code; + __u64 exception_addr; + __u64 user_handler; + __u64 user_data; + __u8 reserved[216]; +}; + +/** + * typedef vdso_sgx_enter_enclave_t - Prototype for __vdso_sgx_enter_enclave(), + * a vDSO function to enter an SGX enclave. + * @rdi: Pass-through value for RDI + * @rsi: Pass-through value for RSI + * @rdx: Pass-through value for RDX + * @function: ENCLU function, must be EENTER or ERESUME + * @r8: Pass-through value for R8 + * @r9: Pass-through value for R9 + * @run: struct sgx_enclave_run, must be non-NULL + * + * NOTE: __vdso_sgx_enter_enclave() does not ensure full compliance with the + * x86-64 ABI, e.g. doesn't handle XSAVE state. Except for non-volatile + * general purpose registers, EFLAGS.DF, and RSP alignment, preserving/setting + * state in accordance with the x86-64 ABI is the responsibility of the enclave + * and its runtime, i.e. __vdso_sgx_enter_enclave() cannot be called from C + * code without careful consideration by both the enclave and its runtime. + * + * All general purpose registers except RAX, RBX and RCX are passed as-is to the + * enclave. RAX, RBX and RCX are consumed by EENTER and ERESUME and are loaded + * with @function, asynchronous exit pointer, and @run.tcs respectively. + * + * RBP and the stack are used to anchor __vdso_sgx_enter_enclave() to the + * pre-enclave state, e.g. to retrieve @run.exception and @run.user_handler + * after an enclave exit. All other registers are available for use by the + * enclave and its runtime, e.g. an enclave can push additional data onto the + * stack (and modify RSP) to pass information to the optional user handler (see + * below). + * + * Most exceptions reported on ENCLU, including those that occur within the + * enclave, are fixed up and reported synchronously instead of being delivered + * via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are + * never fixed up and are always delivered via standard signals. On synchrously + * reported exceptions, -EFAULT is returned and details about the exception are + * recorded in @run.exception, the optional sgx_enclave_exception struct. + * + * Return: + * - 0: ENCLU function was successfully executed. + * - -EINVAL: Invalid ENCL number (neither EENTER nor ERESUME). + */ +typedef int (*vdso_sgx_enter_enclave_t)(unsigned long rdi, unsigned long rsi, + unsigned long rdx, unsigned int function, + unsigned long r8, unsigned long r9, + struct sgx_enclave_run *run); + +#endif /* _UAPI_ASM_X86_SGX_H */ diff --git a/arch/x86/include/uapi/asm/unistd.h b/arch/x86/include/uapi/asm/unistd.h index 196fdd02b8b1..be5e2e747f50 100644 --- a/arch/x86/include/uapi/asm/unistd.h +++ b/arch/x86/include/uapi/asm/unistd.h @@ -2,8 +2,15 @@ #ifndef _UAPI_ASM_X86_UNISTD_H #define _UAPI_ASM_X86_UNISTD_H -/* x32 syscall flag bit */ -#define __X32_SYSCALL_BIT 0x40000000UL +/* + * x32 syscall flag bit. Some user programs expect syscall NR macros + * and __X32_SYSCALL_BIT to have type int, even though syscall numbers + * are, for practical purposes, unsigned long. + * + * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right + * thing regardless. + */ +#define __X32_SYSCALL_BIT 0x40000000 #ifndef __KERNEL__ # ifdef __i386__ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 04205ce127a1..4137a7342d68 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1553,10 +1553,18 @@ void __init acpi_boot_table_init(void) /* * Initialize the ACPI boot-time table parser. */ - if (acpi_table_init()) { + if (acpi_locate_initial_tables()) disable_acpi(); - return; - } + else + acpi_reserve_initial_tables(); +} + +int __init early_acpi_boot_init(void) +{ + if (acpi_disabled) + return 1; + + acpi_table_init_complete(); acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); @@ -1569,18 +1577,9 @@ void __init acpi_boot_table_init(void) } else { printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); disable_acpi(); - return; + return 1; } } -} - -int __init early_acpi_boot_init(void) -{ - /* - * If acpi_disabled, bail out - */ - if (acpi_disabled) - return 1; /* * Process the Multiple APIC Description Table (MADT), if present @@ -1740,7 +1739,7 @@ int __acpi_acquire_global_lock(unsigned int *lock) new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); - return (new < 3) ? -1 : 0; + return ((new & 0x3) < 3) ? -1 : 0; } int __acpi_release_global_lock(unsigned int *lock) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index caf2edccbad2..49ae4e1ac9cd 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -161,7 +161,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, /* Make sure we are running on right CPU */ - retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); + retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx, + false); if (retval == 0) { /* Use the hint in CST */ percpu_entry->states[cx->index].eax = cx->address; diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index e95e95960156..5b076cb79f5f 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -9,8 +9,7 @@ .code32 ALIGN -ENTRY(wakeup_pmode_return) -wakeup_pmode_return: +SYM_CODE_START(wakeup_pmode_return) movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %fs @@ -39,6 +38,7 @@ wakeup_pmode_return: # jump to place where we left off movl saved_eip, %eax jmp *%eax +SYM_CODE_END(wakeup_pmode_return) bogus_magic: jmp bogus_magic @@ -72,7 +72,7 @@ restore_registers: popfl ret -ENTRY(do_suspend_lowlevel) +SYM_CODE_START(do_suspend_lowlevel) call save_processor_state call save_registers pushl $3 @@ -87,6 +87,7 @@ ret_point: call restore_registers call restore_processor_state ret +SYM_CODE_END(do_suspend_lowlevel) .data ALIGN diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 9d3a971ea364..e0b2510aa0b8 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -720,6 +720,10 @@ void __init alternative_instructions(void) * patching. */ +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) + cna_configure_spin_lock_slowpath(); +#endif + apply_alternatives(__alt_instructions, __alt_instructions_end); #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 251c795b4eb3..c4bc01da820e 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -18,10 +18,13 @@ #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 +#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 +#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); @@ -32,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, {} }; @@ -50,8 +54,10 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, {} }; EXPORT_SYMBOL_GPL(amd_nb_misc_ids); @@ -65,7 +71,9 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, {} }; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index df891f874614..7fafa859e9f2 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -352,8 +353,6 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) * According to Intel, MFENCE can do the serialization here. */ asm volatile("mfence" : : : "memory"); - - printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); return; } @@ -474,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta, { u64 tsc; + /* This MSR is special and need a special fence: */ + weak_wrmsr_fence(); + tsc = rdtsc(); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); return 0; @@ -552,7 +554,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); #define DEADLINE_MODEL_MATCH_REV(model, rev) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev } -static u32 hsx_deadline_rev(void) +static __init u32 hsx_deadline_rev(void) { switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x3a; /* EP */ @@ -562,7 +564,7 @@ static u32 hsx_deadline_rev(void) return ~0U; } -static u32 bdx_deadline_rev(void) +static __init u32 bdx_deadline_rev(void) { switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x00000011; @@ -574,7 +576,7 @@ static u32 bdx_deadline_rev(void) return ~0U; } -static u32 skx_deadline_rev(void) +static __init u32 skx_deadline_rev(void) { switch (boot_cpu_data.x86_stepping) { case 0x03: return 0x01000136; @@ -587,7 +589,7 @@ static u32 skx_deadline_rev(void) return ~0U; } -static const struct x86_cpu_id deadline_match[] = { +static const struct x86_cpu_id deadline_match[] __initconst = { DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_D, bdx_deadline_rev), @@ -609,18 +611,19 @@ static const struct x86_cpu_id deadline_match[] = { {}, }; -static void apic_check_deadline_errata(void) +static __init bool apic_validate_deadline_timer(void) { const struct x86_cpu_id *m; u32 rev; - if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) || - boot_cpu_has(X86_FEATURE_HYPERVISOR)) - return; + if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) + return false; + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return true; m = x86_match_cpu(deadline_match); if (!m) - return; + return true; /* * Function pointers will have the MSB set due to address layout, @@ -632,11 +635,12 @@ static void apic_check_deadline_errata(void) rev = (u32)m->driver_data; if (boot_cpu_data.microcode >= rev) - return; + return true; setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; " "please update microcode to version: 0x%x (or later)\n", rev); + return false; } /* @@ -1886,20 +1890,22 @@ static __init void try_to_enable_x2apic(int remap_mode) return; if (remap_mode != IRQ_REMAP_X2APIC_MODE) { - /* IR is required if there is APIC ID > 255 even when running - * under KVM + /* + * Using X2APIC without IR is not architecturally supported + * on bare metal but may be supported in guests. */ - if (max_physical_apicid > 255 || - !x86_init.hyper.x2apic_available()) { + if (!x86_init.hyper.x2apic_available()) { pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); x2apic_disable(); return; } /* - * without IR all CPUs can be addressed by IOAPIC/MSI - * only in physical mode + * Without IR, all CPUs can be addressed by IOAPIC/MSI only + * in physical mode, and CPUs with an APIC ID that cannnot + * be addressed must not be brought online. */ + x2apic_set_max_apicid(255); x2apic_phys = 1; } x2apic_enable(); @@ -2098,7 +2104,8 @@ void __init init_apic_mappings(void) { unsigned int new_apicid; - apic_check_deadline_errata(); + if (apic_validate_deadline_timer()) + pr_info("TSC deadline timer available\n"); if (x2apic_mode) { boot_cpu_physical_apicid = read_apic_id(); @@ -2347,6 +2354,11 @@ static int cpuid_to_apicid[] = { [0 ... NR_CPUS - 1] = -1, }; +bool arch_match_cpu_phys_id(int cpu, u64 phys_id) +{ + return phys_id == cpuid_to_apicid[cpu]; +} + #ifdef CONFIG_SMP /** * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index f0262cb5657a..0edcf69659ee 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1046,6 +1046,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) { irq = mp_irqs[idx].srcbusirq; legacy = mp_is_legacy_irq(irq); + /* + * IRQ2 is unusable for historical reasons on systems which + * have a legacy PIC. See the comment vs. IRQ2 further down. + * + * If this gets removed at some point then the related code + * in lapic_assign_system_vectors() needs to be adjusted as + * well. + */ + if (legacy && irq == PIC_CASCADE_IR) + return -EINVAL; } mutex_lock(&ioapic_mutex); @@ -2256,6 +2266,7 @@ static inline void __init check_timer(void) legacy_pic->init(0); legacy_pic->make_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); + legacy_pic->unmask(0); unlock_ExtINT_logic(); @@ -2329,12 +2340,12 @@ static int mp_irqdomain_create(int ioapic) ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops, (void *)(long)ioapic); - /* Release fw handle if it was allocated above */ - if (!cfg->dev) - irq_domain_free_fwnode(fn); - - if (!ip->irqdomain) + if (!ip->irqdomain) { + /* Release fw handle if it was allocated above */ + if (!cfg->dev) + irq_domain_free_fwnode(fn); return -ENOMEM; + } ip->irqdomain->parent = parent; @@ -2348,8 +2359,13 @@ static int mp_irqdomain_create(int ioapic) static void ioapic_destroy_irqdomain(int idx) { + struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg; + struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode; + if (ioapics[idx].irqdomain) { irq_domain_remove(ioapics[idx].irqdomain); + if (!cfg->dev) + irq_domain_free_fwnode(fn); ioapics[idx].irqdomain = NULL; } } diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 159bd0cb8548..a20873bbbed6 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -262,12 +262,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent) msi_default_domain = pci_msi_create_irq_domain(fn, &pci_msi_domain_info, parent); - irq_domain_free_fwnode(fn); } - if (!msi_default_domain) + if (!msi_default_domain) { + irq_domain_free_fwnode(fn); pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); - else + } else { msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; + } } #ifdef CONFIG_IRQ_REMAP @@ -300,7 +301,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent, if (!fn) return NULL; d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) + irq_domain_free_fwnode(fn); return d; } #endif @@ -363,7 +365,8 @@ static struct irq_domain *dmar_get_irq_domain(void) if (fn) { dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); + if (!dmar_domain) + irq_domain_free_fwnode(fn); } out: mutex_unlock(&dmar_lock); @@ -488,7 +491,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id) } d = msi_create_irq_domain(fn, domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) { + irq_domain_free_fwnode(fn); + kfree(domain_info); + } return d; } diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 2c5676b0a6e7..bf6662d37a33 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -272,20 +272,24 @@ static int assign_irq_vector_any_locked(struct irq_data *irqd) const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); int node = irq_data_get_node(irqd); - if (node == NUMA_NO_NODE) - goto all; - /* Try the intersection of @affmsk and node mask */ - cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk); - if (!assign_vector_locked(irqd, vector_searchmask)) - return 0; - /* Try the node mask */ - if (!assign_vector_locked(irqd, cpumask_of_node(node))) - return 0; -all: + if (node != NUMA_NO_NODE) { + /* Try the intersection of @affmsk and node mask */ + cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk); + if (!assign_vector_locked(irqd, vector_searchmask)) + return 0; + } + /* Try the full affinity mask */ cpumask_and(vector_searchmask, affmsk, cpu_online_mask); if (!assign_vector_locked(irqd, vector_searchmask)) return 0; + + if (node != NUMA_NO_NODE) { + /* Try the node mask */ + if (!assign_vector_locked(irqd, cpumask_of_node(node))) + return 0; + } + /* Try the full online mask */ return assign_vector_locked(irqd, cpu_online_mask); } @@ -446,12 +450,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, trace_vector_activate(irqd->irq, apicd->is_managed, apicd->can_reserve, reserve); - /* Nothing to do for fixed assigned vectors */ - if (!apicd->can_reserve && !apicd->is_managed) - return 0; - raw_spin_lock_irqsave(&vector_lock, flags); - if (reserve || irqd_is_managed_and_shutdown(irqd)) + if (!apicd->can_reserve && !apicd->is_managed) + assign_irq_vector_any_locked(irqd); + else if (reserve || irqd_is_managed_and_shutdown(irqd)) vector_assign_managed_shutdown(irqd); else if (apicd->is_managed) ret = activate_managed(irqd); @@ -556,6 +558,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irqd->chip_data = apicd; irqd->hwirq = virq + i; irqd_set_single_target(irqd); + + /* Don't invoke affinity setter on deactivated interrupts */ + irqd_set_affinity_on_activate(irqd); + /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is @@ -703,7 +709,6 @@ int __init arch_early_irq_init(void) x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, NULL); BUG_ON(x86_vector_domain == NULL); - irq_domain_free_fwnode(fn); irq_set_default_host(x86_vector_domain); arch_init_msi_domain(x86_vector_domain); @@ -769,20 +774,10 @@ void lapic_offline(void) static int apic_set_affinity(struct irq_data *irqd, const struct cpumask *dest, bool force) { - struct apic_chip_data *apicd = apic_chip_data(irqd); int err; - /* - * Core code can call here for inactive interrupts. For inactive - * interrupts which use managed or reservation mode there is no - * point in going through the vector assignment right now as the - * activation will assign a vector which fits the destination - * cpumask. Let the core code store the destination mask and be - * done with it. - */ - if (!irqd_is_activated(irqd) && - (apicd->is_managed || apicd->can_reserve)) - return IRQ_SET_MASK_OK; + if (WARN_ON_ONCE(!irqd_is_activated(irqd))) + return -EIO; raw_spin_lock(&vector_lock); cpumask_and(vector_searchmask, dest, cpu_online_mask); diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index b0889c48a2ac..7eec3c154fa2 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector) { u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); - x2apic_wrmsr_fence(); + /* x2apic MSRs are special and need a special fence: */ + weak_wrmsr_fence(); __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); } @@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) unsigned long flags; u32 dest; - x2apic_wrmsr_fence(); + /* x2apic MSRs are special and need a special fence: */ + weak_wrmsr_fence(); local_irq_save(flags); tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index bc9693841353..032a00e5d9fa 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -8,6 +8,12 @@ int x2apic_phys; static struct apic apic_x2apic_phys; +static u32 x2apic_max_apicid __ro_after_init; + +void __init x2apic_set_max_apicid(u32 apicid) +{ + x2apic_max_apicid = apicid; +} static int __init set_x2apic_phys_mode(char *arg) { @@ -37,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector) { u32 dest = per_cpu(x86_cpu_to_apicid, cpu); - x2apic_wrmsr_fence(); + /* x2apic MSRs are special and need a special fence: */ + weak_wrmsr_fence(); __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); } @@ -48,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) unsigned long this_cpu; unsigned long flags; - x2apic_wrmsr_fence(); + /* x2apic MSRs are special and need a special fence: */ + weak_wrmsr_fence(); local_irq_save(flags); @@ -98,6 +106,9 @@ static int x2apic_phys_probe(void) /* Common x2apic functions, also used by x2apic_cluster */ int x2apic_apic_id_valid(u32 apicid) { + if (x2apic_max_apicid && apicid > x2apic_max_apicid) + return 0; + return 1; } @@ -116,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which) { unsigned long cfg = __prepare_ICR(which, vector, 0); - x2apic_wrmsr_fence(); + /* x2apic MSRs are special and need a special fence: */ + weak_wrmsr_fence(); native_x2apic_icr_write(cfg, 0); } diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 890f60083eca..368c92c86609 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -29,6 +29,7 @@ obj-y += umwait.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o +obj-$(CONFIG_IA32_FEAT_CTL) += feat_ctl.o ifdef CONFIG_CPU_SUP_INTEL obj-y += intel.o intel_pconfig.o tsx.o obj-$(CONFIG_PM) += intel_epb.o @@ -45,6 +46,7 @@ obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MICROCODE) += microcode/ obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/ +obj-$(CONFIG_X86_SGX) += sgx/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c3f4dd4ae155..eea739a66a4b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -335,7 +335,6 @@ static void amd_get_topology_early(struct cpuinfo_x86 *c) */ static void amd_get_topology(struct cpuinfo_x86 *c) { - u8 node_id; int cpu = smp_processor_id(); /* get information required for multi-node processors */ @@ -345,7 +344,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); - node_id = ecx & 0xff; + c->cpu_die_id = ecx & 0xff; if (c->x86 == 0x15) c->cu_id = ebx & 0xff; @@ -365,15 +364,15 @@ static void amd_get_topology(struct cpuinfo_x86 *c) if (!err) c->x86_coreid_bits = get_count_order(c->x86_max_cores); - cacheinfo_amd_init_llc_id(c, cpu, node_id); + cacheinfo_amd_init_llc_id(c, cpu); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); - node_id = value & 7; + c->cpu_die_id = value & 7; - per_cpu(cpu_llc_id, cpu) = node_id; + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; } else return; @@ -398,7 +397,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) /* Convert the initial APIC ID into the socket ID */ c->phys_proc_id = c->initial_apicid >> bits; /* use socket ID also for last level cache */ - per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; } u16 amd_get_nb_id(int cpu) @@ -546,12 +545,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) u32 ecx; ecx = cpuid_ecx(0x8000001e); - nodes_per_socket = ((ecx >> 8) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); - nodes_per_socket = ((value >> 3) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; } if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && @@ -935,6 +934,13 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x17: init_amd_zn(c); break; } + /* force to enable erms/fsrm for AMD EPYC 7K8 cpu*/ + if (c->x86 == 25) { + if (!strncmp(&c->x86_model_id[0],"AMD EPYC 7K83 64-Core Processor",31)) { + set_cpu_cap(c, X86_FEATURE_ERMS); + set_cpu_cap(c, X86_FEATURE_FSRM); + } + } /* * Enable workaround for FXSAVE leak on CPUs * without a XSaveErPtr feature @@ -1117,8 +1123,7 @@ static const int amd_erratum_383[] = /* #1054: Instructions Retired Performance Counter May Be Inaccurate */ static const int amd_erratum_1054[] = - AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); - + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 8bf64899f56a..b290aa9bfb1c 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); static void __init mds_print_mitigation(void); static void __init taa_select_mitigation(void); +static void __init srbds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ u64 x86_spec_ctrl_base; @@ -108,6 +109,7 @@ void __init check_bugs(void) l1tf_select_mitigation(); mds_select_mitigation(); taa_select_mitigation(); + srbds_select_mitigation(); /* * As MDS and TAA mitigations are inter-related, print MDS @@ -390,6 +392,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str) } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "SRBDS: " fmt + +enum srbds_mitigations { + SRBDS_MITIGATION_OFF, + SRBDS_MITIGATION_UCODE_NEEDED, + SRBDS_MITIGATION_FULL, + SRBDS_MITIGATION_TSX_OFF, + SRBDS_MITIGATION_HYPERVISOR, +}; + +static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; + +static const char * const srbds_strings[] = { + [SRBDS_MITIGATION_OFF] = "Vulnerable", + [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", + [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", + [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +static bool srbds_off; + +void update_srbds_msr(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return; + + if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) + return; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + switch (srbds_mitigation) { + case SRBDS_MITIGATION_OFF: + case SRBDS_MITIGATION_TSX_OFF: + mcu_ctrl |= RNGDS_MITG_DIS; + break; + case SRBDS_MITIGATION_FULL: + mcu_ctrl &= ~RNGDS_MITG_DIS; + break; + default: + break; + } + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); +} + +static void __init srbds_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + /* + * Check to see if this is one of the MDS_NO systems supporting + * TSX that are only exposed to SRBDS when TSX is enabled. + */ + ia32_cap = x86_read_arch_cap_msr(); + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) + srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; + else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; + else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) + srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; + else if (cpu_mitigations_off() || srbds_off) + srbds_mitigation = SRBDS_MITIGATION_OFF; + + update_srbds_msr(); + pr_info("%s\n", srbds_strings[srbds_mitigation]); +} + +static int __init srbds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return 0; + + srbds_off = !strcmp(str, "off"); + return 0; +} +early_param("srbds", srbds_parse_cmdline); + #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt @@ -443,14 +536,12 @@ static void __init spectre_v1_select_mitigation(void) * If FSGSBASE is enabled, the user can put a kernel address in * GS, in which case SMAP provides no protection. * - * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the - * FSGSBASE enablement patches have been merged. ] - * * If FSGSBASE is disabled, the user can only put a user space * address in GS. That makes an attack harder, but still * possible if there's no SMAP protection. */ - if (!smap_works_speculatively()) { + if (boot_cpu_has(X86_FEATURE_FSGSBASE) || + !smap_works_speculatively()) { /* * Mitigation can be provided from SWAPGS itself or * PTI as the CR3 write in the Meltdown mitigation @@ -488,7 +579,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; -static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = +static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = + SPECTRE_V2_USER_NONE; +static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = SPECTRE_V2_USER_NONE; #ifdef CONFIG_RETPOLINE @@ -634,24 +727,17 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) break; } - /* - * At this point, an STIBP mode other than "off" has been set. - * If STIBP support is not being forced, check if STIBP always-on - * is preferred. - */ - if (mode != SPECTRE_V2_USER_STRICT && - boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) - mode = SPECTRE_V2_USER_STRICT_PREFERRED; - /* Initialize Indirect Branch Prediction Barrier */ if (boot_cpu_has(X86_FEATURE_IBPB)) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + spectre_v2_user_ibpb = mode; switch (cmd) { case SPECTRE_V2_USER_CMD_FORCE: case SPECTRE_V2_USER_CMD_PRCTL_IBPB: case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: static_branch_enable(&switch_mm_always_ibpb); + spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; break; case SPECTRE_V2_USER_CMD_PRCTL: case SPECTRE_V2_USER_CMD_AUTO: @@ -667,21 +753,32 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) "always-on" : "conditional"); } - /* If enhanced IBRS is enabled no STIBP required */ - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + /* + * If enhanced IBRS is enabled or SMT impossible, STIBP is not + * required. + */ + if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) return; /* - * If SMT is not possible or STIBP is not available clear the STIBP - * mode. + * At this point, an STIBP mode other than "off" has been set. + * If STIBP support is not being forced, check if STIBP always-on + * is preferred. */ - if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) + if (mode != SPECTRE_V2_USER_STRICT && + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) + mode = SPECTRE_V2_USER_STRICT_PREFERRED; + + /* + * If STIBP is not available, clear the STIBP mode. + */ + if (!boot_cpu_has(X86_FEATURE_STIBP)) mode = SPECTRE_V2_USER_NONE; + + spectre_v2_user_stibp = mode; + set_mode: - spectre_v2_user = mode; - /* Only print the STIBP mode when SMT possible */ - if (smt_possible) - pr_info("%s\n", spectre_v2_user_strings[mode]); + pr_info("%s\n", spectre_v2_user_strings[mode]); } static const char * const spectre_v2_strings[] = { @@ -914,7 +1011,7 @@ void cpu_bugs_smt_update(void) { mutex_lock(&spec_ctrl_mutex); - switch (spectre_v2_user) { + switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: break; case SPECTRE_V2_USER_STRICT: @@ -1153,19 +1250,41 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) return 0; } +static bool is_spec_ib_user_controlled(void) +{ + return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; +} + static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) { switch (ctrl) { case PR_SPEC_ENABLE: - if (spectre_v2_user == SPECTRE_V2_USER_NONE) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return 0; + /* - * Indirect branch speculation is always disabled in strict - * mode. + * With strict mode for both IBPB and STIBP, the instruction + * code paths avoid checking this task flag and instead, + * unconditionally run the instruction. However, STIBP and IBPB + * are independent and either can be set to conditionally + * enabled regardless of the mode of the other. + * + * If either is set to conditional, allow the task flag to be + * updated, unless it was force-disabled by a previous prctl + * call. Currently, this is possible on an AMD CPU which has the + * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the + * kernel is booted with 'spectre_v2_user=seccomp', then + * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and + * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. */ - if (spectre_v2_user == SPECTRE_V2_USER_STRICT || - spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) + if (!is_spec_ib_user_controlled() || + task_spec_ib_force_disable(task)) return -EPERM; + task_clear_spec_ib_disable(task); task_update_spec_tif(task); break; @@ -1175,11 +1294,13 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) * Indirect branch speculation is always allowed when * mitigation is force disabled. */ - if (spectre_v2_user == SPECTRE_V2_USER_NONE) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return -EPERM; - if (spectre_v2_user == SPECTRE_V2_USER_STRICT || - spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) + + if (!is_spec_ib_user_controlled()) return 0; + task_set_spec_ib_disable(task); if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); @@ -1209,7 +1330,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); - if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif @@ -1240,22 +1362,21 @@ static int ib_prctl_get(struct task_struct *task) if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return PR_SPEC_NOT_AFFECTED; - switch (spectre_v2_user) { - case SPECTRE_V2_USER_NONE: + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return PR_SPEC_ENABLE; - case SPECTRE_V2_USER_PRCTL: - case SPECTRE_V2_USER_SECCOMP: + else if (is_spec_ib_user_controlled()) { if (task_spec_ib_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ib_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; - case SPECTRE_V2_USER_STRICT: - case SPECTRE_V2_USER_STRICT_PREFERRED: + } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) return PR_SPEC_DISABLE; - default: + else return PR_SPEC_NOT_AFFECTED; - } } int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) @@ -1494,7 +1615,7 @@ static char *stibp_state(void) if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) return ""; - switch (spectre_v2_user) { + switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: return ", STIBP: disabled"; case SPECTRE_V2_USER_STRICT: @@ -1521,6 +1642,11 @@ static char *ibpb_state(void) return ""; } +static ssize_t srbds_show_state(char *buf) +{ + return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -1565,6 +1691,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_ITLB_MULTIHIT: return itlb_multihit_show_state(buf); + case X86_BUG_SRBDS: + return srbds_show_state(buf); + default: break; } @@ -1611,4 +1740,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr { return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); } + +ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); +} #endif diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index c7503be92f35..30f33b75209a 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -646,7 +646,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) return i; } -void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) +void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu) { /* * We may have multiple LLCs if L3 caches exist, so check if we @@ -657,7 +657,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) if (c->x86 < 0x17) { /* LLC is at the node level. */ - per_cpu(cpu_llc_id, cpu) = node_id; + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) { /* * LLC is at the core complex level. @@ -684,7 +684,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) } } -void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) +void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) { /* * We may have multiple LLCs if L3 caches exist, so check if we diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 704caec136cf..8ec156923ebb 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -58,6 +58,10 @@ #include #endif +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS +#include +#endif + #include "cpu.h" u32 elf_hwcap2 __read_mostly; @@ -366,6 +370,9 @@ out: cr4_clear_bits(X86_CR4_UMIP); } +/* These bits should not change their value after CPU init is finished. */ +static const unsigned long cr4_pinned_mask = + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE; static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); static unsigned long cr4_pinned_bits __ro_after_init; @@ -374,7 +381,7 @@ void native_write_cr0(unsigned long val) unsigned long bits_missing = 0; set_register: - asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order)); + asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); if (static_branch_likely(&cr_pinning)) { if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { @@ -390,20 +397,20 @@ EXPORT_SYMBOL(native_write_cr0); void native_write_cr4(unsigned long val) { - unsigned long bits_missing = 0; + unsigned long bits_changed = 0; set_register: - asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); + asm volatile("mov %0,%%cr4": "+r" (val) : : "memory"); if (static_branch_likely(&cr_pinning)) { - if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) { - bits_missing = ~val & cr4_pinned_bits; - val |= bits_missing; + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { + bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits; + val = (val & ~cr4_pinned_mask) | cr4_pinned_bits; goto set_register; } - /* Warn after we've set the missing bits. */ - WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n", - bits_missing); + /* Warn after we've corrected the changed bits. */ + WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", + bits_changed); } } EXPORT_SYMBOL(native_write_cr4); @@ -415,7 +422,7 @@ void cr4_init(void) if (boot_cpu_has(X86_FEATURE_PCID)) cr4 |= X86_CR4_PCIDE; if (static_branch_likely(&cr_pinning)) - cr4 |= cr4_pinned_bits; + cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits; __write_cr4(cr4); @@ -430,13 +437,26 @@ void cr4_init(void) */ static void __init setup_cr_pinning(void) { - unsigned long mask; - - mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP); - cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; + cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask; static_key_enable(&cr_pinning.key); } +static __init int x86_nofsgsbase_setup(char *arg) +{ + /* Require an exact match without trailing characters. */ + if (strlen(arg)) + return 0; + + /* Do not emit a message if the feature is not present. */ + if (!boot_cpu_has(X86_FEATURE_FSGSBASE)) + return 1; + + setup_clear_cpu_cap(X86_FEATURE_FSGSBASE); + pr_info("FSGSBASE disabled via kernel command line\n"); + return 1; +} +__setup("nofsgsbase", x86_nofsgsbase_setup); + /* * Protection Keys are not available in 32-bit mode. */ @@ -1024,6 +1044,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #define MSBDS_ONLY BIT(5) #define NO_SWAPGS BIT(6) #define NO_ITLB_MULTIHIT BIT(7) +#define NO_SPECTRE_V2 BIT(8) #define VULNWL(_vendor, _family, _model, _whitelist) \ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } @@ -1085,12 +1106,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + + /* Zhaoxin Family 7 */ + VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2), + VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2), {} }; -static bool __init cpu_matches(unsigned long which) +#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ + X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ + INTEL_FAM6_##model, steppings, \ + X86_FEATURE_ANY, issues) + +#define SRBDS BIT(0) + +static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), + {} +}; + +static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) { - const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); + const struct x86_cpu_id *m = x86_match_cpu(table); return m && !!(m->driver_data & which); } @@ -1110,29 +1156,34 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) u64 ia32_cap = x86_read_arch_cap_msr(); /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ - if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) + if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && + !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); - if (cpu_matches(NO_SPECULATION)) + if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) return; setup_force_cpu_bug(X86_BUG_SPECTRE_V1); - setup_force_cpu_bug(X86_BUG_SPECTRE_V2); - if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && + if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + + if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && + !(ia32_cap & ARCH_CAP_SSB_NO) && !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); if (ia32_cap & ARCH_CAP_IBRS_ALL) setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); - if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { + if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && + !(ia32_cap & ARCH_CAP_MDS_NO)) { setup_force_cpu_bug(X86_BUG_MDS); - if (cpu_matches(MSBDS_ONLY)) + if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); } - if (!cpu_matches(NO_SWAPGS)) + if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) setup_force_cpu_bug(X86_BUG_SWAPGS); /* @@ -1150,7 +1201,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) setup_force_cpu_bug(X86_BUG_TAA); - if (cpu_matches(NO_MELTDOWN)) + /* + * SRBDS affects CPUs which support RDRAND or RDSEED and are listed + * in the vulnerability blacklist. + */ + if ((cpu_has(c, X86_FEATURE_RDRAND) || + cpu_has(c, X86_FEATURE_RDSEED)) && + cpu_matches(cpu_vuln_blacklist, SRBDS)) + setup_force_cpu_bug(X86_BUG_SRBDS); + + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; /* Rogue Data Cache Load? No! */ @@ -1159,7 +1219,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); - if (cpu_matches(NO_L1TF)) + if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) return; setup_force_cpu_bug(X86_BUG_L1TF); @@ -1476,6 +1536,10 @@ static void identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_init) this_cpu->c_init(c); +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS + cna_set_llc_id_per_cpu(smp_processor_id()); +#endif + /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); @@ -1484,6 +1548,12 @@ static void identify_cpu(struct cpuinfo_x86 *c) setup_smap(c); setup_umip(c); + /* Enable FSGSBASE instructions if available. */ + if (cpu_has(c, X86_FEATURE_FSGSBASE)) { + cr4_set_bits(X86_CR4_FSGSBASE); + elf_hwcap2 |= HWCAP2_FSGSBASE; + } + /* * The vendor-specific functions might have changed features. * Now we do "generic changes." @@ -1597,6 +1667,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) mtrr_ap_init(); validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); + update_srbds_msr(); } static __init int setup_noclflush(char *arg) @@ -1785,7 +1856,7 @@ static void setup_getcpu(int cpu) unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); struct desc_struct d = { }; - if (boot_cpu_has(X86_FEATURE_RDTSCP)) + if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID)) write_rdtscp_aux(cpudata); /* Store CPU and node number in limit. */ diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 38ab6e115eac..fb538fccd24c 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -77,7 +77,12 @@ extern void detect_ht(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); extern void x86_spec_ctrl_setup_ap(void); +extern void update_srbds_msr(void); extern u64 x86_read_arch_cap_msr(void); +#ifdef CONFIG_IA32_FEAT_CTL +void init_ia32_feat_ctl(struct cpuinfo_x86 *c); +#endif + #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c new file mode 100644 index 000000000000..ccec52073f8a --- /dev/null +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include +#include + +static void clear_sgx_caps(void) +{ + setup_clear_cpu_cap(X86_FEATURE_SGX); + setup_clear_cpu_cap(X86_FEATURE_SGX_LC); +} + +static int __init nosgx(char *str) +{ + clear_sgx_caps(); + + return 0; +} + +early_param("nosgx", nosgx); + +void init_ia32_feat_ctl(struct cpuinfo_x86 *c) +{ + bool enable_sgx; + u64 msr; + + if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { + clear_sgx_caps(); + return; + } + + /* + * Enable SGX if and only if the kernel supports SGX and Launch Control + * is supported, i.e. disable SGX if the LE hash MSRs can't be written. + */ + enable_sgx = cpu_has(c, X86_FEATURE_SGX) && + cpu_has(c, X86_FEATURE_SGX_LC) && + IS_ENABLED(CONFIG_X86_SGX); + + if (msr & FEAT_CTL_LOCKED) + goto update_sgx; + + /* + * Ignore whatever value BIOS left in the MSR to avoid enabling random + * features or faulting on the WRMSR. + */ + msr = FEAT_CTL_LOCKED; + + /* + * Enable VMX if and only if the kernel may do VMXON at some point, + * i.e. KVM is enabled, to avoid unnecessarily adding an attack vector + * for the kernel, e.g. using VMX to hide malicious code. + */ + if (cpu_has(c, X86_FEATURE_VMX) && IS_ENABLED(CONFIG_KVM_INTEL)) { + msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; + + if (tboot_enabled()) + msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX; + } + + if (enable_sgx) + msr |= FEAT_CTL_SGX_ENABLED | FEAT_CTL_SGX_LC_ENABLED; + + wrmsrl(MSR_IA32_FEAT_CTL, msr); + +update_sgx: + if (!(msr & FEAT_CTL_SGX_ENABLED) || + !(msr & FEAT_CTL_SGX_LC_ENABLED) || !enable_sgx) { + if (enable_sgx) + pr_err_once("SGX disabled by BIOS\n"); + clear_sgx_caps(); + } +} diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 4e28c1fc8749..62e9a982adaf 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -64,7 +64,6 @@ static void hygon_get_topology_early(struct cpuinfo_x86 *c) */ static void hygon_get_topology(struct cpuinfo_x86 *c) { - u8 node_id; int cpu = smp_processor_id(); /* get information required for multi-node processors */ @@ -74,7 +73,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); - node_id = ecx & 0xff; + c->cpu_die_id = ecx & 0xff; c->cpu_core_id = ebx & 0xff; @@ -92,14 +91,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) /* Socket ID is ApicId[6] for these processors. */ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; - cacheinfo_hygon_init_llc_id(c, cpu, node_id); + cacheinfo_hygon_init_llc_id(c, cpu); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); - node_id = value & 7; + c->cpu_die_id = value & 7; - per_cpu(cpu_llc_id, cpu) = node_id; + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; } else return; @@ -122,7 +121,7 @@ static void hygon_detect_cmp(struct cpuinfo_x86 *c) /* Convert the initial APIC ID into the socket ID */ c->phys_proc_id = c->initial_apicid >> bits; /* use socket ID also for last level cache */ - per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; } static void srat_detect_node(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 11d5c5950e2d..f3f42e216992 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -755,6 +755,8 @@ static void init_intel(struct cpuinfo_x86 *c) /* Work around errata */ srat_detect_node(c); + init_ia32_feat_ctl(c); + if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c index 6dd78d8235e4..2f163e6646b6 100644 --- a/arch/x86/kernel/cpu/match.c +++ b/arch/x86/kernel/cpu/match.c @@ -34,13 +34,18 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) const struct x86_cpu_id *m; struct cpuinfo_x86 *c = &boot_cpu_data; - for (m = match; m->vendor | m->family | m->model | m->feature; m++) { + for (m = match; + m->vendor | m->family | m->model | m->steppings | m->feature; + m++) { if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) continue; if (m->family != X86_FAMILY_ANY && c->x86 != m->family) continue; if (m->model != X86_MODEL_ANY && c->x86_model != m->model) continue; + if (m->steppings != X86_STEPPING_ANY && + !(BIT(c->x86_stepping) & m->steppings)) + continue; if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) continue; return m; diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 1cf34fcc3a8e..7d76f41591da 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -78,6 +78,7 @@ struct smca_bank_name { static struct smca_bank_name smca_names[] = { [SMCA_LS] = { "load_store", "Load Store Unit" }, + [SMCA_LS_V2] = { "load_store", "Load Store Unit" }, [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, [SMCA_DE] = { "decode_unit", "Decode Unit" }, @@ -138,6 +139,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = { /* ZN Core (HWID=0xB0) MCA types */ { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF }, + { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF }, { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF }, { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF }, diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index aecb15ba66cd..2e6371a2f123 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -42,6 +42,8 @@ #include #include #include +#include +#include #include #include @@ -167,29 +169,17 @@ EXPORT_SYMBOL_GPL(mce_inject_log); static struct notifier_block mce_srao_nb; -/* - * We run the default notifier if we have only the SRAO, the first and the - * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS - * notifiers registered on the chain. - */ -#define NUM_DEFAULT_NOTIFIERS 3 -static atomic_t num_notifiers; - void mce_register_decode_chain(struct notifier_block *nb) { if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC)) return; - atomic_inc(&num_notifiers); - blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); } EXPORT_SYMBOL_GPL(mce_register_decode_chain); void mce_unregister_decode_chain(struct notifier_block *nb) { - atomic_dec(&num_notifiers); - blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); } EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); @@ -272,6 +262,7 @@ static void __print_mce(struct mce *m) } pr_cont("\n"); + /* * Note this output is parsed by external tools and old fields * should not be changed. @@ -388,10 +379,28 @@ static int msr_to_offset(u32 msr) return -1; } +__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr) +{ + pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", + (unsigned int)regs->cx, regs->ip, (void *)regs->ip); + + show_stack_regs(regs); + + panic("MCA architectural violation!\n"); + + while (true) + cpu_relax(); + + return true; +} + /* MSR access wrappers used for error injection */ static u64 mce_rdmsrl(u32 msr) { - u64 v; + DECLARE_ARGS(val, low, high); if (__this_cpu_read(injectm.finished)) { int offset = msr_to_offset(msr); @@ -401,21 +410,43 @@ static u64 mce_rdmsrl(u32 msr) return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); } - if (rdmsrl_safe(msr, &v)) { - WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr); - /* - * Return zero in case the access faulted. This should - * not happen normally but can happen if the CPU does - * something weird, or if the code is buggy. - */ - v = 0; - } + /* + * RDMSR on MCA MSRs should not fault. If they do, this is very much an + * architectural violation and needs to be reported to hw vendor. Panic + * the box to not allow any further progress. + */ + asm volatile("1: rdmsr\n" + "2:\n" + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault) + : EAX_EDX_RET(val, low, high) : "c" (msr)); - return v; + + return EAX_EDX_VAL(val, low, high); +} + +__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr) +{ + pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", + (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, + regs->ip, (void *)regs->ip); + + show_stack_regs(regs); + + panic("MCA architectural violation!\n"); + + while (true) + cpu_relax(); + + return true; } static void mce_wrmsrl(u32 msr, u64 v) { + u32 low, high; + if (__this_cpu_read(injectm.finished)) { int offset = msr_to_offset(msr); @@ -423,7 +454,15 @@ static void mce_wrmsrl(u32 msr, u64 v) *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; return; } - wrmsrl(msr, v); + + low = (u32)v; + high = (u32)(v >> 32); + + /* See comment in mce_rdmsrl() */ + asm volatile("1: wrmsr\n" + "2:\n" + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault) + : : "c" (msr), "a"(low), "d" (high) : "memory"); } /* @@ -533,6 +572,13 @@ bool mce_is_memory_error(struct mce *m) } EXPORT_SYMBOL_GPL(mce_is_memory_error); +static bool whole_page(struct mce *m) +{ + if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV)) + return true; + return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT; +} + bool mce_is_correctable(struct mce *m) { if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) @@ -548,22 +594,7 @@ bool mce_is_correctable(struct mce *m) } EXPORT_SYMBOL_GPL(mce_is_correctable); -static bool cec_add_mce(struct mce *m) -{ - if (!m) - return false; - - /* We eat only correctable DRAM errors with usable addresses. */ - if (mce_is_memory_error(m) && - mce_is_correctable(m) && - mce_usable_address(m)) - if (!cec_add_elem(m->addr >> PAGE_SHIFT)) - return true; - - return false; -} - -static int mce_first_notifier(struct notifier_block *nb, unsigned long val, +static int mce_early_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct mce *m = (struct mce *)data; @@ -571,9 +602,6 @@ static int mce_first_notifier(struct notifier_block *nb, unsigned long val, if (!m) return NOTIFY_DONE; - if (cec_add_mce(m)) - return NOTIFY_STOP; - /* Emit the trace record: */ trace_mce_record(m); @@ -584,9 +612,9 @@ static int mce_first_notifier(struct notifier_block *nb, unsigned long val, return NOTIFY_DONE; } -static struct notifier_block first_nb = { - .notifier_call = mce_first_notifier, - .priority = MCE_PRIO_FIRST, +static struct notifier_block early_nb = { + .notifier_call = mce_early_notifier, + .priority = MCE_PRIO_EARLY, }; static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, @@ -600,8 +628,10 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; - if (!memory_failure(pfn, 0)) - set_mce_nospec(pfn); + if (!memory_failure(pfn, 0)) { + set_mce_nospec(pfn, whole_page(mce)); + mce->kflags |= MCE_HANDLED_UC; + } } return NOTIFY_OK; @@ -619,10 +649,8 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val, if (!m) return NOTIFY_DONE; - if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS) - return NOTIFY_DONE; - - __print_mce(m); + if (mca_cfg.print_all || !m->kflags) + __print_mce(m); return NOTIFY_DONE; } @@ -759,9 +787,7 @@ log_it: error_seen = true; mce_read_aux(&m, i); - - m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); - + m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false); /* * Don't get the IP here because it's unlikely to * have anything to do with the actual error location. @@ -815,7 +841,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, quirk_no_way_out(i, m, regs); m->bank = i; - if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { mce_read_aux(m, i); *msg = tmp; return 1; @@ -893,7 +919,6 @@ static void mce_reign(void) struct mce *m = NULL; int global_worst = 0; char *msg = NULL; - char *nmsg = NULL; /* * This CPU is the Monarch and the other CPUs have run @@ -901,12 +926,10 @@ static void mce_reign(void) * Grade the severity of the errors of all the CPUs. */ for_each_possible_cpu(cpu) { - int severity = mce_severity(&per_cpu(mces_seen, cpu), - mca_cfg.tolerant, - &nmsg, true); - if (severity > global_worst) { - msg = nmsg; - global_worst = severity; + struct mce *mtmp = &per_cpu(mces_seen, cpu); + + if (mtmp->severity > global_worst) { + global_worst = mtmp->severity; m = &per_cpu(mces_seen, cpu); } } @@ -916,8 +939,11 @@ static void mce_reign(void) * This dumps all the mces in the log buffer and stops the * other CPUs. */ - if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) + if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { + /* call mce_severity() to get "msg" for panic */ + mce_severity(m, NULL, mca_cfg.tolerant, &msg, true); mce_panic("Fatal machine check", m, msg); + } /* * For UC somewhere we let the CPU who detects it handle it. @@ -1091,23 +1117,6 @@ static void mce_clear_state(unsigned long *toclear) } } -static int do_memory_failure(struct mce *m) -{ - int flags = MF_ACTION_REQUIRED; - int ret; - - pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr); - if (!(m->mcgstatus & MCG_STATUS_RIPV)) - flags |= MF_MUST_KILL; - ret = memory_failure(m->addr >> PAGE_SHIFT, flags); - if (ret) - pr_err("Memory error not recovered"); - else - set_mce_nospec(m->addr >> PAGE_SHIFT); - return ret; -} - - /* * Cases where we avoid rendezvous handler timeout: * 1) If this CPU is offline. @@ -1135,7 +1144,7 @@ static bool __mc_check_crashing_cpu(int cpu) return false; } -static void __mc_scan_banks(struct mce *m, struct mce *final, +static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, unsigned long *toclear, unsigned long *valid_banks, int no_way_out, int *worst) { @@ -1170,7 +1179,7 @@ static void __mc_scan_banks(struct mce *m, struct mce *final, /* Set taint even when machine check was not enabled. */ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - severity = mce_severity(m, cfg->tolerant, NULL, true); + severity = mce_severity(m, regs, cfg->tolerant, NULL, true); /* * When machine check was for corrected/deferred handler don't @@ -1203,6 +1212,49 @@ static void __mc_scan_banks(struct mce *m, struct mce *final, *m = *final; } +static void kill_me_now(struct callback_head *ch) +{ + force_sig(SIGBUS); +} + +static void kill_me_maybe(struct callback_head *cb) +{ + struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); + int flags = MF_ACTION_REQUIRED; + + pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); + if (!p->mce_ripv) + flags |= MF_MUST_KILL; + + if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) && + !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) { + set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); + return; + } + + if (p->mce_vaddr != (void __user *)-1l) { + force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT); + } else { + pr_err("Memory error not recovered"); + kill_me_now(cb); + } +} + +static void queue_task_work(struct mce *m, int kill_it) +{ + current->mce_addr = m->addr; + current->mce_kflags = m->kflags; + current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); + current->mce_whole_page = whole_page(m); + + if (kill_it) + current->mce_kill_me.func = kill_me_now; + else + current->mce_kill_me.func = kill_me_maybe; + + task_work_add(current, ¤t->mce_kill_me, true); +} + /* * The actual machine check handler. This only handles real * exceptions when something got corrupted coming in through int 18. @@ -1252,7 +1304,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) if (__mc_check_crashing_cpu(cpu)) return; - ist_enter(regs); + nmi_enter(); this_cpu_inc(mce_exception_count); @@ -1296,7 +1348,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) order = mce_start(&no_way_out); } - __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst); + __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst); if (!no_way_out) mce_clear_state(toclear); @@ -1306,8 +1358,10 @@ void do_machine_check(struct pt_regs *regs, long error_code) * When there's any problem use only local no_way_out state. */ if (!lmce) { - if (mce_end(order) < 0) - no_way_out = worst >= MCE_PANIC_SEVERITY; + if (mce_end(order) < 0) { + if (!no_way_out) + no_way_out = worst >= MCE_PANIC_SEVERITY; + } } else { /* * If there was a fatal machine check we should have @@ -1318,7 +1372,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) * make sure we have the right "msg". */ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { - mce_severity(&m, cfg->tolerant, &msg, true); + mce_severity(&m, regs, cfg->tolerant, &msg, true); mce_panic("Local fatal machine check!", &m, msg); } } @@ -1344,20 +1398,32 @@ void do_machine_check(struct pt_regs *regs, long error_code) /* Fault was in user mode and we need to take some action */ if ((m.cs & 3) == 3) { - ist_begin_non_atomic(regs); - local_irq_enable(); + /* If this triggers there is no way to recover. Die hard. */ + BUG_ON(!on_thread_stack() || !user_mode(regs)); + + queue_task_work(&m, kill_it); - if (kill_it || do_memory_failure(&m)) - force_sig(SIGBUS); - local_irq_disable(); - ist_end_non_atomic(); } else { - if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0)) - mce_panic("Failed kernel mode recovery", &m, NULL); + /* + * Handle an MCE which has happened in kernel space but from + * which the kernel can recover: ex_has_fault_handler() has + * already verified that the rIP at which the error happened is + * a rIP from which the kernel can recover (by jumping to + * recovery code specified in _ASM_EXTABLE_FAULT()) and the + * corresponding exception handler which would do that is the + * proper one. + */ + if (m.kflags & MCE_IN_KERNEL_RECOV) { + if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0)) + mce_panic("Failed kernel mode recovery", &m, msg); + } + + if (m.kflags & MCE_IN_KERNEL_COPYIN) + queue_task_work(&m, kill_it); } out_ist: - ist_exit(regs); + nmi_exit(); } EXPORT_SYMBOL_GPL(do_machine_check); @@ -1928,6 +1994,7 @@ void mce_disable_bank(int bank) * mce=no_cmci Disables CMCI * mce=no_lmce Disables LMCE * mce=dont_log_ce Clears corrected events silently, no log created for CEs. + * mce=print_all Print all machine check logs to console * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) * monarchtimeout is how long to wait for other CPUs on machine @@ -1956,6 +2023,8 @@ static int __init mcheck_enable(char *str) cfg->lmce_disabled = 1; else if (!strcmp(str, "dont_log_ce")) cfg->dont_log_ce = true; + else if (!strcmp(str, "print_all")) + cfg->print_all = true; else if (!strcmp(str, "ignore_ce")) cfg->ignore_ce = true; else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) @@ -1978,7 +2047,7 @@ __setup("mce", mcheck_enable); int __init mcheck_init(void) { mcheck_intel_therm_init(); - mce_register_decode_chain(&first_nb); + mce_register_decode_chain(&early_nb); mce_register_decode_chain(&mce_srao_nb); mce_register_decode_chain(&mce_default_nb); mcheck_vendor_init_severity(); @@ -2221,6 +2290,7 @@ static ssize_t store_int_with_restart(struct device *s, static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); +static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all); static struct dev_ext_attribute dev_attr_check_interval = { __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), @@ -2245,6 +2315,7 @@ static struct device_attribute *mce_device_attrs[] = { #endif &dev_attr_monarch_timeout.attr, &dev_attr_dont_log_ce.attr, + &dev_attr_print_all.attr, &dev_attr_ignore_ce.attr, &dev_attr_cmci_disabled.attr, NULL @@ -2537,7 +2608,6 @@ static int __init mcheck_late_init(void) static_branch_inc(&mcsafe_key); mcheck_debugfs_init(); - cec_init(); /* * Flush out everything that has been logged during early boot, now that diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c index 7c8958dee103..f1bf7535ead7 100644 --- a/arch/x86/kernel/cpu/mce/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -43,6 +43,9 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val, struct mce *mce = (struct mce *)data; unsigned int entry; + if (mce->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; + mutex_lock(&mce_chrdev_read_mutex); entry = mcelog.next; @@ -60,6 +63,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val, memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); mcelog.entry[entry].finished = 1; + mcelog.entry[entry].kflags = 0; /* wake processes polling /dev/mcelog */ wake_up_interruptible(&mce_chrdev_wait); @@ -67,6 +71,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val, unlock: mutex_unlock(&mce_chrdev_read_mutex); + mce->kflags |= MCE_HANDLED_MCELOG; return NOTIFY_OK; } diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 1f30117b24ba..eb2d41c1816d 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -511,7 +511,7 @@ static void do_inject(void) */ if (inj_type == DFR_INT_INJ) { i_mce.status |= MCI_STATUS_DEFERRED; - i_mce.status |= (i_mce.status & ~MCI_STATUS_UC); + i_mce.status &= ~MCI_STATUS_UC; } /* diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 43031db429d2..4f90f720b281 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -35,7 +35,8 @@ int mce_gen_pool_add(struct mce *mce); int mce_gen_pool_init(void); struct llist_node *mce_gen_pool_prepare_records(void); -extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp); +extern int (*mce_severity)(struct mce *a, struct pt_regs *regs, + int tolerant, char **msg, bool is_excp); struct dentry *mce_get_debugfs_dir(void); extern mce_banks_t mce_banks_ce_disabled; @@ -110,6 +111,7 @@ struct mca_config { bool dont_log_ce; bool cmci_disabled; bool ignore_ce; + bool print_all; __u64 lmce_disabled : 1, disabled : 1, @@ -172,4 +174,14 @@ extern bool amd_filter_mce(struct mce *m); static inline bool amd_filter_mce(struct mce *m) { return false; }; #endif +__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr); + +__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr); + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mce/p5.c b/arch/x86/kernel/cpu/mce/p5.c index 4ae6df556526..5ee94aa1b766 100644 --- a/arch/x86/kernel/cpu/mce/p5.c +++ b/arch/x86/kernel/cpu/mce/p5.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -24,7 +25,7 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code) { u32 loaddr, hi, lotype; - ist_enter(regs); + nmi_enter(); rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); @@ -39,7 +40,7 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code) add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - ist_exit(regs); + nmi_exit(); } /* Set up machine check reporting for processors with Intel style MCE: */ diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index 87bcdc6dc2f0..2c5ea37a2e9b 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -9,9 +9,14 @@ #include #include #include -#include #include +#include +#include +#include +#include +#include + #include "internal.h" /* @@ -40,9 +45,14 @@ static struct severity { unsigned char context; unsigned char excp; unsigned char covered; + unsigned char cpu_model; + unsigned char cpu_minstepping; + unsigned char bank_lo, bank_hi; char *msg; } severities[] = { #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } +#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h +#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s #define KERNEL .context = IN_KERNEL #define USER .context = IN_USER #define KERNEL_RECOV .context = IN_KERNEL_RECOV @@ -97,7 +107,6 @@ static struct severity { KEEP, "Corrected error", NOSER, BITCLR(MCI_STATUS_UC) ), - /* * known AO MCACODs reported via MCE or CMC: * @@ -113,6 +122,18 @@ static struct severity { AO, "Action optional: last level cache writeback error", SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB) ), + /* + * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured + * to report uncorrected errors using CMCI with a special signature. + * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported + * in one of the memory controller banks. + * Set severity to "AO" for same action as normal patrol scrub error. + */ + MCESEV( + AO, "Uncorrected Patrol Scrub Error", + SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0), + MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18) + ), /* ignore OVER for UCNA */ MCESEV( @@ -198,6 +219,47 @@ static struct severity { #define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \ (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) +static bool is_copy_from_user(struct pt_regs *regs) +{ + u8 insn_buf[MAX_INSN_SIZE]; + struct insn insn; + unsigned long addr; + + if (probe_kernel_read(insn_buf, (void *)regs->ip, MAX_INSN_SIZE)) + return false; + + kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE); + insn_get_opcode(&insn); + if (!insn.opcode.got) + return false; + + switch (insn.opcode.value) { + /* MOV mem,reg */ + case 0x8A: case 0x8B: + /* MOVZ mem,reg */ + case 0xB60F: case 0xB70F: + insn_get_modrm(&insn); + insn_get_sib(&insn); + if (!insn.modrm.got || !insn.sib.got) + return false; + addr = (unsigned long)insn_get_addr_ref(&insn, regs); + break; + /* REP MOVS */ + case 0xA4: case 0xA5: + addr = regs->si; + break; + default: + return false; + } + + if (fault_in_kernel_space(addr)) + return false; + + current->mce_vaddr = (void __user *)addr; + + return true; +} + /* * If mcgstatus indicated that ip/cs on the stack were * no good, then "m->cs" will be zero and we will have @@ -209,12 +271,26 @@ static struct severity { * distinguish an exception taken in user from from one * taken in the kernel. */ -static int error_context(struct mce *m) +static int error_context(struct mce *m, struct pt_regs *regs) { + enum handler_type t; + if ((m->cs & 3) == 3) return IN_USER; - if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) + if (!mc_recoverable(m->mcgstatus)) + return IN_KERNEL; + + t = ex_get_fault_handler_type(m->ip); + if (t == EX_HANDLER_FAULT) { + m->kflags |= MCE_IN_KERNEL_RECOV; return IN_KERNEL_RECOV; + } + if (t == EX_HANDLER_UACCESS && regs && is_copy_from_user(regs)) { + m->kflags |= MCE_IN_KERNEL_RECOV; + m->kflags |= MCE_IN_KERNEL_COPYIN; + return IN_KERNEL_RECOV; + } + return IN_KERNEL; } @@ -249,9 +325,10 @@ static int mce_severity_amd_smca(struct mce *m, enum context err_ctx) * See AMD Error Scope Hierarchy table in a newer BKDG. For example * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features" */ -static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp) +static int mce_severity_amd(struct mce *m, struct pt_regs *regs, int tolerant, + char **msg, bool is_excp) { - enum context ctx = error_context(m); + enum context ctx = error_context(m, regs); /* Processor Context Corrupt, no need to fumble too much, die! */ if (m->status & MCI_STATUS_PCC) @@ -301,10 +378,11 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc return MCE_KEEP_SEVERITY; } -static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp) +static int mce_severity_intel(struct mce *m, struct pt_regs *regs, + int tolerant, char **msg, bool is_excp) { enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP); - enum context ctx = error_context(m); + enum context ctx = error_context(m, regs); struct severity *s; for (s = severities;; s++) { @@ -320,6 +398,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e continue; if (s->excp && excp != s->excp) continue; + if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model) + continue; + if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping) + continue; + if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi)) + continue; if (msg) *msg = s->msg; s->covered = 1; @@ -332,7 +416,7 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e } /* Default to mce_severity_intel */ -int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) = +int (*mce_severity)(struct mce *m, struct pt_regs *regs, int tolerant, char **msg, bool is_excp) = mce_severity_intel; void __init mcheck_vendor_init_severity(void) diff --git a/arch/x86/kernel/cpu/mce/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c index a30ea13cccc2..b3938c195365 100644 --- a/arch/x86/kernel/cpu/mce/winchip.c +++ b/arch/x86/kernel/cpu/mce/winchip.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -18,12 +19,12 @@ /* Machine check handler for WinChip C6: */ static void winchip_machine_check(struct pt_regs *regs, long error_code) { - ist_enter(regs); + nmi_enter(); pr_emerg("CPU0: Machine Check Exception.\n"); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - ist_exit(regs); + nmi_exit(); } /* Set up machine check reporting on the Winchip C6 series */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index cb0fdcaf1415..4a4198b806b4 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -626,16 +626,16 @@ static ssize_t reload_store(struct device *dev, if (val != 1) return size; - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); - if (tmp_ret != UCODE_NEW) - return size; - get_online_cpus(); ret = check_online_cpus(); if (ret) goto put; + tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); + if (tmp_ret != UCODE_NEW) + goto put; + mutex_lock(µcode_mutex); ret = microcode_reload_late(); mutex_unlock(µcode_mutex); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index ce799cfe9434..b224d4dae2ff 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -100,53 +100,6 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev return find_matching_signature(mc, csig, cpf); } -/* - * Given CPU signature and a microcode patch, this function finds if the - * microcode patch has matching family and model with the CPU. - * - * %true - if there's a match - * %false - otherwise - */ -static bool microcode_matches(struct microcode_header_intel *mc_header, - unsigned long sig) -{ - unsigned long total_size = get_totalsize(mc_header); - unsigned long data_size = get_datasize(mc_header); - struct extended_sigtable *ext_header; - unsigned int fam_ucode, model_ucode; - struct extended_signature *ext_sig; - unsigned int fam, model; - int ext_sigcount, i; - - fam = x86_family(sig); - model = x86_model(sig); - - fam_ucode = x86_family(mc_header->sig); - model_ucode = x86_model(mc_header->sig); - - if (fam == fam_ucode && model == model_ucode) - return true; - - /* Look for ext. headers: */ - if (total_size <= data_size + MC_HEADER_SIZE) - return false; - - ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; - ext_sig = (void *)ext_header + EXT_HEADER_SIZE; - ext_sigcount = ext_header->count; - - for (i = 0; i < ext_sigcount; i++) { - fam_ucode = x86_family(ext_sig->sig); - model_ucode = x86_model(ext_sig->sig); - - if (fam == fam_ucode && model == model_ucode) - return true; - - ext_sig++; - } - return false; -} - static struct ucode_patch *memdup_patch(void *data, unsigned int size) { struct ucode_patch *p; @@ -164,7 +117,7 @@ static struct ucode_patch *memdup_patch(void *data, unsigned int size) return p; } -static void save_microcode_patch(void *data, unsigned int size) +static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) { struct microcode_header_intel *mc_hdr, *mc_saved_hdr; struct ucode_patch *iter, *tmp, *p = NULL; @@ -210,6 +163,9 @@ static void save_microcode_patch(void *data, unsigned int size) if (!p) return; + if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) + return; + /* * Save for early loading. On 32-bit, that needs to be a physical * address as the APs are running from physical addresses, before @@ -344,13 +300,14 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) size -= mc_size; - if (!microcode_matches(mc_header, uci->cpu_sig.sig)) { + if (!find_matching_signature(data, uci->cpu_sig.sig, + uci->cpu_sig.pf)) { data += mc_size; continue; } if (save) { - save_microcode_patch(data, mc_size); + save_microcode_patch(uci, data, mc_size); goto next; } @@ -483,14 +440,14 @@ static void show_saved_mc(void) * Save this microcode patch. It will be loaded early when a CPU is * hot-added or resumes. */ -static void save_mc_for_early(u8 *mc, unsigned int size) +static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size) { /* Synchronization during CPU hotplug. */ static DEFINE_MUTEX(x86_cpu_microcode_mutex); mutex_lock(&x86_cpu_microcode_mutex); - save_microcode_patch(mc, size); + save_microcode_patch(uci, mc, size); show_saved_mc(); mutex_unlock(&x86_cpu_microcode_mutex); @@ -934,7 +891,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) * permanent memory. So it will be loaded early when a CPU is hot added * or resumes. */ - save_mc_for_early(new_mc, new_mc_size); + save_mc_for_early(uci, new_mc, new_mc_size); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index c656d92cd708..1c2f9baf8483 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -227,8 +227,8 @@ static void __init ms_hyperv_init_platform(void) ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); - pr_info("Hyper-V: features 0x%x, hints 0x%x\n", - ms_hyperv.features, ms_hyperv.hints); + pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n", + ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features); ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS); ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS); @@ -263,6 +263,16 @@ static void __init ms_hyperv_init_platform(void) cpuid_eax(HYPERV_CPUID_NESTED_FEATURES); } + /* + * Hyper-V expects to get crash register data or kmsg when + * crash enlightment is available and system crashes. Set + * crash_kexec_post_notifiers to be true to make sure that + * calling crash enlightment interface before running kdump + * kernel. + */ + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) + crash_kexec_post_notifiers = true; + #ifdef CONFIG_X86_LOCAL_APIC if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index aa5c064a6a22..4ea906fe1c35 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -167,9 +167,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, *repeat = 0; *uniform = 1; - /* Make end inclusive instead of exclusive */ - end--; - prev_match = MTRR_TYPE_INVALID; for (i = 0; i < num_var_ranges; ++i) { unsigned short start_state, end_state, inclusive; @@ -261,6 +258,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) int repeat; u64 partial_end; + /* Make end inclusive instead of exclusive */ + end--; + if (!mtrr_state_set) return MTRR_TYPE_INVALID; diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 89049b343c7a..87a34b6e06a2 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -260,6 +260,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r) r->num_closid = edx.split.cos_max + 1; r->membw.max_delay = eax.split.max_delay + 1; r->default_ctrl = MAX_MBA_BW; + r->membw.mbm_width = MBM_CNTR_WIDTH; if (ecx & MBA_IS_LINEAR) { r->membw.delay_linear = true; r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay; @@ -289,6 +290,7 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r) /* AMD does not use delay */ r->membw.delay_linear = false; + r->membw.mbm_width = MBM_CNTR_WIDTH_AMD; r->membw.min_bw = 0; r->membw.bw_gran = 1; /* Max value is 2048, Data width should be 4 in decimal */ @@ -578,6 +580,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) d->id = id; cpumask_set_cpu(cpu, &d->cpu_mask); + rdt_domain_reconfigure_cdp(r); + if (r->alloc_capable && domain_setup_ctrlval(r, d)) { kfree(d); return; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 181c992f448c..499cb2e727a0 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -32,6 +32,7 @@ #define CQM_LIMBOCHECK_INTERVAL 1000 #define MBM_CNTR_WIDTH 24 +#define MBM_CNTR_WIDTH_AMD 44 #define MBM_OVERFLOW_INTERVAL 1000 #define MAX_MBA_BW 100u #define MBA_IS_LINEAR 0x4 @@ -275,7 +276,6 @@ struct rftype { * struct mbm_state - status for each MBM counter in each domain * @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes) * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it - * @chunks_bw Total local data moved. Used for bandwidth calculation * @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting * @prev_bw The most recent bandwidth in MBps * @delta_bw Difference between the current and previous bandwidth @@ -284,7 +284,6 @@ struct rftype { struct mbm_state { u64 chunks; u64 prev_msr; - u64 chunks_bw; u64 prev_bw_msr; u32 prev_bw; u32 delta_bw; @@ -368,6 +367,7 @@ struct rdt_cache { * @min_bw: Minimum memory bandwidth percentage user can request * @bw_gran: Granularity at which the memory bandwidth is allocated * @delay_linear: True if memory B/W delay is in linear scale + * @mbm_width: memory B/W monitor counter width * @mba_sc: True if MBA software controller(mba_sc) is enabled * @mb_map: Mapping of memory B/W percentage to memory B/W delay */ @@ -376,6 +376,7 @@ struct rdt_membw { u32 min_bw; u32 bw_gran; u32 delay_linear; + u32 mbm_width; bool mba_sc; u32 *mb_map; }; @@ -601,5 +602,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); +void rdt_domain_reconfigure_cdp(struct rdt_resource *r); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 773124b0e18a..50f683ecd2c6 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -216,8 +216,9 @@ void free_rmid(u32 rmid) static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr) { - u64 shift = 64 - MBM_CNTR_WIDTH, chunks; + u64 shift, chunks; + shift = 64 - rdt_resources_all[RDT_RESOURCE_MBA].membw.mbm_width; chunks = (cur_msr << shift) - (prev_msr << shift); return chunks >>= shift; } @@ -279,8 +280,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr) return; chunks = mbm_overflow_count(m->prev_bw_msr, tval); - m->chunks_bw += chunks; - m->chunks = m->chunks_bw; cur_bw = (chunks * r->mon_scale) >> 20; if (m->delta_comp) @@ -450,15 +449,14 @@ static void mbm_update(struct rdt_domain *d, int rmid) } if (is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; + __mon_event_count(rmid, &rr); /* * Call the MBA software controller only for the * control groups and when user has enabled * the software controller explicitly. */ - if (!is_mba_sc(NULL)) - __mon_event_count(rmid, &rr); - else + if (is_mba_sc(NULL)) mbm_bw_count(rmid, &rr); } } diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 954fd048ad9b..28f786289fce 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -507,85 +507,88 @@ unlock: return ret ?: nbytes; } -struct task_move_callback { - struct callback_head work; - struct rdtgroup *rdtgrp; -}; - -static void move_myself(struct callback_head *head) +/** + * rdtgroup_remove - the helper to remove resource group safely + * @rdtgrp: resource group to remove + * + * On resource group creation via a mkdir, an extra kernfs_node reference is + * taken to ensure that the rdtgroup structure remains accessible for the + * rdtgroup_kn_unlock() calls where it is removed. + * + * Drop the extra reference here, then free the rdtgroup structure. + * + * Return: void + */ +static void rdtgroup_remove(struct rdtgroup *rdtgrp) { - struct task_move_callback *callback; - struct rdtgroup *rdtgrp; - - callback = container_of(head, struct task_move_callback, work); - rdtgrp = callback->rdtgrp; + kernfs_put(rdtgrp->kn); + kfree(rdtgrp); +} +static void _update_task_closid_rmid(void *task) +{ /* - * If resource group was deleted before this task work callback - * was invoked, then assign the task to root group and free the - * resource group. + * If the task is still current on this CPU, update PQR_ASSOC MSR. + * Otherwise, the MSR is updated when the task is scheduled in. */ - if (atomic_dec_and_test(&rdtgrp->waitcount) && - (rdtgrp->flags & RDT_DELETED)) { - current->closid = 0; - current->rmid = 0; - kfree(rdtgrp); - } + if (task == current) + resctrl_sched_in(); +} - preempt_disable(); - /* update PQR_ASSOC MSR to make resource group go into effect */ - resctrl_sched_in(); - preempt_enable(); - - kfree(callback); +static void update_task_closid_rmid(struct task_struct *t) +{ + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) + smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); + else + _update_task_closid_rmid(t); } static int __rdtgroup_move_task(struct task_struct *tsk, struct rdtgroup *rdtgrp) { - struct task_move_callback *callback; - int ret; - - callback = kzalloc(sizeof(*callback), GFP_KERNEL); - if (!callback) - return -ENOMEM; - callback->work.func = move_myself; - callback->rdtgrp = rdtgrp; + /* If the task is already in rdtgrp, no need to move the task. */ + if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && + tsk->rmid == rdtgrp->mon.rmid) || + (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && + tsk->closid == rdtgrp->mon.parent->closid)) + return 0; /* - * Take a refcount, so rdtgrp cannot be freed before the - * callback has been invoked. + * Set the task's closid/rmid before the PQR_ASSOC MSR can be + * updated by them. + * + * For ctrl_mon groups, move both closid and rmid. + * For monitor groups, can move the tasks only from + * their parent CTRL group. */ - atomic_inc(&rdtgrp->waitcount); - ret = task_work_add(tsk, &callback->work, true); - if (ret) { - /* - * Task is exiting. Drop the refcount and free the callback. - * No need to check the refcount as the group cannot be - * deleted before the write function unlocks rdtgroup_mutex. - */ - atomic_dec(&rdtgrp->waitcount); - kfree(callback); - rdt_last_cmd_puts("Task exited\n"); - } else { - /* - * For ctrl_mon groups move both closid and rmid. - * For monitor groups, can move the tasks only from - * their parent CTRL group. - */ - if (rdtgrp->type == RDTCTRL_GROUP) { - tsk->closid = rdtgrp->closid; + + if (rdtgrp->type == RDTCTRL_GROUP) { + tsk->closid = rdtgrp->closid; + tsk->rmid = rdtgrp->mon.rmid; + } else if (rdtgrp->type == RDTMON_GROUP) { + if (rdtgrp->mon.parent->closid == tsk->closid) { tsk->rmid = rdtgrp->mon.rmid; - } else if (rdtgrp->type == RDTMON_GROUP) { - if (rdtgrp->mon.parent->closid == tsk->closid) { - tsk->rmid = rdtgrp->mon.rmid; - } else { - rdt_last_cmd_puts("Can't move task to different control group\n"); - ret = -EINVAL; - } + } else { + rdt_last_cmd_puts("Can't move task to different control group\n"); + return -EINVAL; } } - return ret; + + /* + * Ensure the task's closid and rmid are written before determining if + * the task is current that will decide if it will be interrupted. + */ + barrier(); + + /* + * By now, the task's closid and rmid are set. If the task is current + * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource + * group go into effect. If the task is not current, the MSR will be + * updated when the task is scheduled in. + */ + update_task_closid_rmid(tsk); + + return 0; } /** @@ -1027,6 +1030,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { _r_cdp = NULL; + _d_cdp = NULL; ret = -EINVAL; } @@ -1617,7 +1621,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name, if (IS_ERR(kn_subdir)) return PTR_ERR(kn_subdir); - kernfs_get(kn_subdir); ret = rdtgroup_kn_set_ugid(kn_subdir); if (ret) return ret; @@ -1640,7 +1643,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); if (IS_ERR(kn_info)) return PTR_ERR(kn_info); - kernfs_get(kn_info); ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); if (ret) @@ -1661,12 +1663,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) goto out_destroy; } - /* - * This extra ref will be put in kernfs_remove() and guarantees - * that @rdtgrp->kn is always accessible. - */ - kernfs_get(kn_info); - ret = rdtgroup_kn_set_ugid(kn_info); if (ret) goto out_destroy; @@ -1695,12 +1691,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, if (dest_kn) *dest_kn = kn; - /* - * This extra ref will be put in kernfs_remove() and guarantees - * that @rdtgrp->kn is always accessible. - */ - kernfs_get(kn); - ret = rdtgroup_kn_set_ugid(kn); if (ret) goto out_destroy; @@ -1769,6 +1759,19 @@ static int set_cache_qos_cfg(int level, bool enable) return 0; } +/* Restore the qos cfg state when a domain comes online */ +void rdt_domain_reconfigure_cdp(struct rdt_resource *r) +{ + if (!r->alloc_capable) + return; + + if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) + l2_qos_cfg_update(&r->alloc_enabled); + + if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) + l3_qos_cfg_update(&r->alloc_enabled); +} + /* * Enable or disable the MBA software controller * which helps user specify bandwidth in MBps. @@ -1914,8 +1917,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn) rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) rdtgroup_pseudo_lock_remove(rdtgrp); kernfs_unbreak_active_protection(kn); - kernfs_put(rdtgrp->kn); - kfree(rdtgrp); + rdtgroup_remove(rdtgrp); } else { kernfs_unbreak_active_protection(kn); } @@ -1974,13 +1976,11 @@ static int rdt_get_tree(struct fs_context *fc) &kn_mongrp); if (ret < 0) goto out_info; - kernfs_get(kn_mongrp); ret = mkdir_mondata_all(rdtgroup_default.kn, &rdtgroup_default, &kn_mondata); if (ret < 0) goto out_mongrp; - kernfs_get(kn_mondata); rdtgroup_default.mon.mon_data_kn = kn_mondata; } @@ -2209,7 +2209,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) if (atomic_read(&sentry->waitcount) != 0) sentry->flags = RDT_DELETED; else - kfree(sentry); + rdtgroup_remove(sentry); } } @@ -2251,7 +2251,7 @@ static void rmdir_all_sub(void) if (atomic_read(&rdtgrp->waitcount) != 0) rdtgrp->flags = RDT_DELETED; else - kfree(rdtgrp); + rdtgroup_remove(rdtgrp); } /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ update_closid_rmid(cpu_online_mask, &rdtgroup_default); @@ -2351,11 +2351,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, if (IS_ERR(kn)) return PTR_ERR(kn); - /* - * This extra ref will be put in kernfs_remove() and guarantees - * that kn is always accessible. - */ - kernfs_get(kn); ret = rdtgroup_kn_set_ugid(kn); if (ret) goto out_destroy; @@ -2691,8 +2686,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, /* * kernfs_remove() will drop the reference count on "kn" which * will free it. But we still need it to stick around for the - * rdtgroup_kn_unlock(kn} call below. Take one extra reference - * here, which will be dropped inside rdtgroup_kn_unlock(). + * rdtgroup_kn_unlock(kn) call. Take one extra reference here, + * which will be dropped by kernfs_put() in rdtgroup_remove(). */ kernfs_get(kn); @@ -2733,6 +2728,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, out_idfree: free_rmid(rdtgrp->mon.rmid); out_destroy: + kernfs_put(rdtgrp->kn); kernfs_remove(rdtgrp->kn); out_free_rgrp: kfree(rdtgrp); @@ -2745,7 +2741,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) { kernfs_remove(rgrp->kn); free_rmid(rgrp->mon.rmid); - kfree(rgrp); + rdtgroup_remove(rgrp); } /* @@ -2907,11 +2903,6 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp, WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); list_del(&rdtgrp->mon.crdtgrp_list); - /* - * one extra hold on this, will drop when we kfree(rdtgrp) - * in rdtgroup_kn_unlock() - */ - kernfs_get(kn); kernfs_remove(rdtgrp->kn); return 0; @@ -2923,11 +2914,6 @@ static int rdtgroup_ctrl_remove(struct kernfs_node *kn, rdtgrp->flags = RDT_DELETED; list_del(&rdtgrp->rdtgroup_list); - /* - * one extra hold on this, will drop when we kfree(rdtgrp) - * in rdtgroup_kn_unlock() - */ - kernfs_get(kn); kernfs_remove(rdtgrp->kn); return 0; } @@ -2993,7 +2979,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) * If the rdtgroup is a mon group and parent directory * is a valid "mon_groups" directory, remove the mon group. */ - if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) { + if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && + rdtgrp != &rdtgroup_default) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { ret = rdtgroup_ctrl_remove(kn, rdtgrp); diff --git a/arch/x86/kernel/cpu/sgx/Makefile b/arch/x86/kernel/cpu/sgx/Makefile new file mode 100644 index 000000000000..91d3dc784a29 --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/Makefile @@ -0,0 +1,5 @@ +obj-y += \ + driver.o \ + encl.o \ + ioctl.o \ + main.o diff --git a/arch/x86/kernel/cpu/sgx/arch.h b/arch/x86/kernel/cpu/sgx/arch.h new file mode 100644 index 000000000000..dd7602c44c72 --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/arch.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright(c) 2016-20 Intel Corporation. + * + * Contains data structures defined by the SGX architecture. Data structures + * defined by the Linux software stack should not be placed here. + */ +#ifndef _ASM_X86_SGX_ARCH_H +#define _ASM_X86_SGX_ARCH_H + +#include +#include + +/* The SGX specific CPUID function. */ +#define SGX_CPUID 0x12 +/* EPC enumeration. */ +#define SGX_CPUID_EPC 2 +/* An invalid EPC section, i.e. the end marker. */ +#define SGX_CPUID_EPC_INVALID 0x0 +/* A valid EPC section. */ +#define SGX_CPUID_EPC_SECTION 0x1 +/* The bitmask for the EPC section type. */ +#define SGX_CPUID_EPC_MASK GENMASK(3, 0) + +/** + * enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV + * %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not + * been completed yet. + * %SGX_INVALID_EINITTOKEN: EINITTOKEN is invalid and enclave signer's + * public key does not match IA32_SGXLEPUBKEYHASH. + * %SGX_UNMASKED_EVENT: An unmasked event, e.g. INTR, was received + */ +enum sgx_return_code { + SGX_NOT_TRACKED = 11, + SGX_INVALID_EINITTOKEN = 16, + SGX_UNMASKED_EVENT = 128, +}; + +/* The modulus size for 3072-bit RSA keys. */ +#define SGX_MODULUS_SIZE 384 + +/** + * enum sgx_miscselect - additional information to an SSA frame + * %SGX_MISC_EXINFO: Report #PF or #GP to the SSA frame. + * + * Save State Area (SSA) is a stack inside the enclave used to store processor + * state when an exception or interrupt occurs. This enum defines additional + * information stored to an SSA frame. + */ +enum sgx_miscselect { + SGX_MISC_EXINFO = BIT(0), +}; + +#define SGX_MISC_RESERVED_MASK GENMASK_ULL(63, 1) + +#define SGX_SSA_GPRS_SIZE 184 +#define SGX_SSA_MISC_EXINFO_SIZE 16 + +/** + * enum sgx_attributes - the attributes field in &struct sgx_secs + * %SGX_ATTR_INIT: Enclave can be entered (is initialized). + * %SGX_ATTR_DEBUG: Allow ENCLS(EDBGRD) and ENCLS(EDBGWR). + * %SGX_ATTR_MODE64BIT: Tell that this a 64-bit enclave. + * %SGX_ATTR_PROVISIONKEY: Allow to use provisioning keys for remote + * attestation. + * %SGX_ATTR_KSS: Allow to use key separation and sharing (KSS). + * %SGX_ATTR_EINITTOKENKEY: Allow to use token signing key that is used to + * sign cryptographic tokens that can be passed to + * EINIT as an authorization to run an enclave. + */ +enum sgx_attribute { + SGX_ATTR_INIT = BIT(0), + SGX_ATTR_DEBUG = BIT(1), + SGX_ATTR_MODE64BIT = BIT(2), + SGX_ATTR_PROVISIONKEY = BIT(4), + SGX_ATTR_EINITTOKENKEY = BIT(5), + SGX_ATTR_KSS = BIT(7), +}; + +#define SGX_ATTR_RESERVED_MASK (BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8)) + +/** + * struct sgx_secs - SGX Enclave Control Structure (SECS) + * @size: size of the address space + * @base: base address of the address space + * @ssa_frame_size: size of an SSA frame + * @miscselect: additional information stored to an SSA frame + * @attributes: attributes for enclave + * @xfrm: XSave-Feature Request Mask (subset of XCR0) + * @mrenclave: SHA256-hash of the enclave contents + * @mrsigner: SHA256-hash of the public key used to sign the SIGSTRUCT + * @config_id: a user-defined value that is used in key derivation + * @isv_prod_id: a user-defined value that is used in key derivation + * @isv_svn: a user-defined value that is used in key derivation + * @config_svn: a user-defined value that is used in key derivation + * + * SGX Enclave Control Structure (SECS) is a special enclave page that is not + * visible in the address space. In fact, this structure defines the address + * range and other global attributes for the enclave and it is the first EPC + * page created for any enclave. It is moved from a temporary buffer to an EPC + * by the means of ENCLS[ECREATE] function. + */ +struct sgx_secs { + u64 size; + u64 base; + u32 ssa_frame_size; + u32 miscselect; + u8 reserved1[24]; + u64 attributes; + u64 xfrm; + u32 mrenclave[8]; + u8 reserved2[32]; + u32 mrsigner[8]; + u8 reserved3[32]; + u32 config_id[16]; + u16 isv_prod_id; + u16 isv_svn; + u16 config_svn; + u8 reserved4[3834]; +} __packed; + +/** + * enum sgx_tcs_flags - execution flags for TCS + * %SGX_TCS_DBGOPTIN: If enabled allows single-stepping and breakpoints + * inside an enclave. It is cleared by EADD but can + * be set later with EDBGWR. + */ +enum sgx_tcs_flags { + SGX_TCS_DBGOPTIN = 0x01, +}; + +#define SGX_TCS_RESERVED_MASK GENMASK_ULL(63, 1) +#define SGX_TCS_RESERVED_SIZE 4024 + +/** + * struct sgx_tcs - Thread Control Structure (TCS) + * @state: used to mark an entered TCS + * @flags: execution flags (cleared by EADD) + * @ssa_offset: SSA stack offset relative to the enclave base + * @ssa_index: the current SSA frame index (cleard by EADD) + * @nr_ssa_frames: the number of frame in the SSA stack + * @entry_offset: entry point offset relative to the enclave base + * @exit_addr: address outside the enclave to exit on an exception or + * interrupt + * @fs_offset: offset relative to the enclave base to become FS + * segment inside the enclave + * @gs_offset: offset relative to the enclave base to become GS + * segment inside the enclave + * @fs_limit: size to become a new FS-limit (only 32-bit enclaves) + * @gs_limit: size to become a new GS-limit (only 32-bit enclaves) + * + * Thread Control Structure (TCS) is an enclave page visible in its address + * space that defines an entry point inside the enclave. A thread enters inside + * an enclave by supplying address of TCS to ENCLU(EENTER). A TCS can be entered + * by only one thread at a time. + */ +struct sgx_tcs { + u64 state; + u64 flags; + u64 ssa_offset; + u32 ssa_index; + u32 nr_ssa_frames; + u64 entry_offset; + u64 exit_addr; + u64 fs_offset; + u64 gs_offset; + u32 fs_limit; + u32 gs_limit; + u8 reserved[SGX_TCS_RESERVED_SIZE]; +} __packed; + +/** + * struct sgx_pageinfo - an enclave page descriptor + * @addr: address of the enclave page + * @contents: pointer to the page contents + * @metadata: pointer either to a SECINFO or PCMD instance + * @secs: address of the SECS page + */ +struct sgx_pageinfo { + u64 addr; + u64 contents; + u64 metadata; + u64 secs; +} __packed __aligned(32); + + +/** + * enum sgx_page_type - bits in the SECINFO flags defining the page type + * %SGX_PAGE_TYPE_SECS: a SECS page + * %SGX_PAGE_TYPE_TCS: a TCS page + * %SGX_PAGE_TYPE_REG: a regular page + * %SGX_PAGE_TYPE_VA: a VA page + * %SGX_PAGE_TYPE_TRIM: a page in trimmed state + */ +enum sgx_page_type { + SGX_PAGE_TYPE_SECS, + SGX_PAGE_TYPE_TCS, + SGX_PAGE_TYPE_REG, + SGX_PAGE_TYPE_VA, + SGX_PAGE_TYPE_TRIM, +}; + +#define SGX_NR_PAGE_TYPES 5 +#define SGX_PAGE_TYPE_MASK GENMASK(7, 0) + +/** + * enum sgx_secinfo_flags - the flags field in &struct sgx_secinfo + * %SGX_SECINFO_R: allow read + * %SGX_SECINFO_W: allow write + * %SGX_SECINFO_X: allow execution + * %SGX_SECINFO_SECS: a SECS page + * %SGX_SECINFO_TCS: a TCS page + * %SGX_SECINFO_REG: a regular page + * %SGX_SECINFO_VA: a VA page + * %SGX_SECINFO_TRIM: a page in trimmed state + */ +enum sgx_secinfo_flags { + SGX_SECINFO_R = BIT(0), + SGX_SECINFO_W = BIT(1), + SGX_SECINFO_X = BIT(2), + SGX_SECINFO_SECS = (SGX_PAGE_TYPE_SECS << 8), + SGX_SECINFO_TCS = (SGX_PAGE_TYPE_TCS << 8), + SGX_SECINFO_REG = (SGX_PAGE_TYPE_REG << 8), + SGX_SECINFO_VA = (SGX_PAGE_TYPE_VA << 8), + SGX_SECINFO_TRIM = (SGX_PAGE_TYPE_TRIM << 8), +}; + +#define SGX_SECINFO_PERMISSION_MASK GENMASK_ULL(2, 0) +#define SGX_SECINFO_PAGE_TYPE_MASK (SGX_PAGE_TYPE_MASK << 8) +#define SGX_SECINFO_RESERVED_MASK ~(SGX_SECINFO_PERMISSION_MASK | \ + SGX_SECINFO_PAGE_TYPE_MASK) + +/** + * struct sgx_secinfo - describes attributes of an EPC page + * @flags: permissions and type + * + * Used together with ENCLS leaves that add or modify an EPC page to an + * enclave to define page permissions and type. + */ +struct sgx_secinfo { + u64 flags; + u8 reserved[56]; +} __packed __aligned(64); + +#define SGX_PCMD_RESERVED_SIZE 40 + +/** + * struct sgx_pcmd - Paging Crypto Metadata (PCMD) + * @enclave_id: enclave identifier + * @mac: MAC over PCMD, page contents and isvsvn + * + * PCMD is stored for every swapped page to the regular memory. When ELDU loads + * the page back it recalculates the MAC by using a isvsvn number stored in a + * VA page. Together these two structures bring integrity and rollback + * protection. + */ +struct sgx_pcmd { + struct sgx_secinfo secinfo; + u64 enclave_id; + u8 reserved[SGX_PCMD_RESERVED_SIZE]; + u8 mac[16]; +} __packed __aligned(128); + +#define SGX_SIGSTRUCT_RESERVED1_SIZE 84 +#define SGX_SIGSTRUCT_RESERVED2_SIZE 20 +#define SGX_SIGSTRUCT_RESERVED3_SIZE 32 +#define SGX_SIGSTRUCT_RESERVED4_SIZE 12 + +/** + * struct sgx_sigstruct_header - defines author of the enclave + * @header1: constant byte string + * @vendor: must be either 0x0000 or 0x8086 + * @date: YYYYMMDD in BCD + * @header2: costant byte string + * @swdefined: software defined value + */ +struct sgx_sigstruct_header { + u64 header1[2]; + u32 vendor; + u32 date; + u64 header2[2]; + u32 swdefined; + u8 reserved1[84]; +} __packed; + +/** + * struct sgx_sigstruct_body - defines contents of the enclave + * @miscselect: additional information stored to an SSA frame + * @misc_mask: required miscselect in SECS + * @attributes: attributes for enclave + * @xfrm: XSave-Feature Request Mask (subset of XCR0) + * @attributes_mask: required attributes in SECS + * @xfrm_mask: required XFRM in SECS + * @mrenclave: SHA256-hash of the enclave contents + * @isvprodid: a user-defined value that is used in key derivation + * @isvsvn: a user-defined value that is used in key derivation + */ +struct sgx_sigstruct_body { + u32 miscselect; + u32 misc_mask; + u8 reserved2[20]; + u64 attributes; + u64 xfrm; + u64 attributes_mask; + u64 xfrm_mask; + u8 mrenclave[32]; + u8 reserved3[32]; + u16 isvprodid; + u16 isvsvn; +} __packed; + +/** + * struct sgx_sigstruct - an enclave signature + * @header: defines author of the enclave + * @modulus: the modulus of the public key + * @exponent: the exponent of the public key + * @signature: the signature calculated over the fields except modulus, + * @body: defines contents of the enclave + * @q1: a value used in RSA signature verification + * @q2: a value used in RSA signature verification + * + * Header and body are the parts that are actual signed. The remaining fields + * define the signature of the enclave. + */ +struct sgx_sigstruct { + struct sgx_sigstruct_header header; + u8 modulus[SGX_MODULUS_SIZE]; + u32 exponent; + u8 signature[SGX_MODULUS_SIZE]; + struct sgx_sigstruct_body body; + u8 reserved4[12]; + u8 q1[SGX_MODULUS_SIZE]; + u8 q2[SGX_MODULUS_SIZE]; +} __packed; + +#define SGX_LAUNCH_TOKEN_SIZE 304 + +#endif /* _ASM_X86_SGX_ARCH_H */ diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c new file mode 100644 index 000000000000..8ce6d8371cfb --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#include +#include +#include +#include +#include +#include +#include "driver.h" +#include "encl.h" + +u64 sgx_attributes_reserved_mask; +u64 sgx_xfrm_reserved_mask = ~0x3; +u32 sgx_misc_reserved_mask; + +static int sgx_open(struct inode *inode, struct file *file) +{ + struct sgx_encl *encl; + int ret; + + encl = kzalloc(sizeof(*encl), GFP_KERNEL); + if (!encl) + return -ENOMEM; + + kref_init(&encl->refcount); + xa_init(&encl->page_array); + mutex_init(&encl->lock); + INIT_LIST_HEAD(&encl->va_pages); + INIT_LIST_HEAD(&encl->mm_list); + spin_lock_init(&encl->mm_lock); + + ret = init_srcu_struct(&encl->srcu); + if (ret) { + kfree(encl); + return ret; + } + + file->private_data = encl; + + return 0; +} + +static int sgx_release(struct inode *inode, struct file *file) +{ + struct sgx_encl *encl = file->private_data; + struct sgx_encl_mm *encl_mm; + + /* + * Drain the remaining mm_list entries. At this point the list contains + * entries for processes, which have closed the enclave file but have + * not exited yet. The processes, which have exited, are gone from the + * list by sgx_mmu_notifier_release(). + */ + for ( ; ; ) { + spin_lock(&encl->mm_lock); + + if (list_empty(&encl->mm_list)) { + encl_mm = NULL; + } else { + encl_mm = list_first_entry(&encl->mm_list, + struct sgx_encl_mm, list); + list_del_rcu(&encl_mm->list); + } + + spin_unlock(&encl->mm_lock); + + /* The enclave is no longer mapped by any mm. */ + if (!encl_mm) + break; + + synchronize_srcu(&encl->srcu); + mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm); + kfree(encl_mm); + + /* 'encl_mm' is gone, put encl_mm->encl reference: */ + kref_put(&encl->refcount, sgx_encl_release); + } + + kref_put(&encl->refcount, sgx_encl_release); + return 0; +} + +static int sgx_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct sgx_encl *encl = file->private_data; + int ret; + + ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags); + if (ret) + return ret; + + ret = sgx_encl_mm_add(encl, vma->vm_mm); + if (ret) + return ret; + + vma->vm_ops = &sgx_vm_ops; + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; + vma->vm_private_data = encl; + + return 0; +} + +static unsigned long sgx_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + if ((flags & MAP_TYPE) == MAP_PRIVATE) + return -EINVAL; + + if (flags & MAP_FIXED) + return addr; + + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +} + +#ifdef CONFIG_COMPAT +static long sgx_compat_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + return sgx_ioctl(filep, cmd, arg); +} +#endif + +static const struct file_operations sgx_encl_fops = { + .owner = THIS_MODULE, + .open = sgx_open, + .release = sgx_release, + .unlocked_ioctl = sgx_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sgx_compat_ioctl, +#endif + .mmap = sgx_mmap, + .get_unmapped_area = sgx_get_unmapped_area, +}; + +const struct file_operations sgx_provision_fops = { + .owner = THIS_MODULE, +}; + +static struct miscdevice sgx_dev_enclave = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sgx_enclave", + .nodename = "sgx_enclave", + .fops = &sgx_encl_fops, +}; + +static struct miscdevice sgx_dev_provision = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sgx_provision", + .nodename = "sgx_provision", + .fops = &sgx_provision_fops, +}; + +int __init sgx_drv_init(void) +{ + unsigned int eax, ebx, ecx, edx; + u64 attr_mask; + u64 xfrm_mask; + int ret; + + if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) + return -ENODEV; + + cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx); + + if (!(eax & 1)) { + pr_err("SGX disabled: SGX1 instruction support not available.\n"); + return -ENODEV; + } + + sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK; + + cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx); + + attr_mask = (((u64)ebx) << 32) + (u64)eax; + sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK; + + if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) { + xfrm_mask = (((u64)edx) << 32) + (u64)ecx; + sgx_xfrm_reserved_mask = ~xfrm_mask; + } + + ret = misc_register(&sgx_dev_enclave); + if (ret) + return ret; + + ret = misc_register(&sgx_dev_provision); + if (ret) { + misc_deregister(&sgx_dev_enclave); + return ret; + } + + return 0; +} diff --git a/arch/x86/kernel/cpu/sgx/driver.h b/arch/x86/kernel/cpu/sgx/driver.h new file mode 100644 index 000000000000..4eddb4d571ef --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/driver.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ARCH_SGX_DRIVER_H__ +#define __ARCH_SGX_DRIVER_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "sgx.h" + +#define SGX_EINIT_SPIN_COUNT 20 +#define SGX_EINIT_SLEEP_COUNT 50 +#define SGX_EINIT_SLEEP_TIME 20 + +extern u64 sgx_attributes_reserved_mask; +extern u64 sgx_xfrm_reserved_mask; +extern u32 sgx_misc_reserved_mask; + +extern const struct file_operations sgx_provision_fops; + +long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); + +int sgx_drv_init(void); + +#endif /* __ARCH_X86_SGX_DRIVER_H__ */ diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c new file mode 100644 index 000000000000..332977248ae0 --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -0,0 +1,738 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#include +#include +#include +#include +#include +#include +#include "arch.h" +#include "encl.h" +#include "encls.h" +#include "sgx.h" + +/* + * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC + * Pages" in the SDM. + */ +static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, + struct sgx_epc_page *epc_page, + struct sgx_epc_page *secs_page) +{ + unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; + struct sgx_encl *encl = encl_page->encl; + struct sgx_pageinfo pginfo; + struct sgx_backing b; + pgoff_t page_index; + int ret; + + if (secs_page) + page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); + else + page_index = PFN_DOWN(encl->size); + + ret = sgx_encl_get_backing(encl, page_index, &b); + if (ret) + return ret; + + pginfo.addr = encl_page->desc & PAGE_MASK; + pginfo.contents = (unsigned long)kmap_atomic(b.contents); + pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) + + b.pcmd_offset; + + if (secs_page) + pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page); + else + pginfo.secs = 0; + + ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page), + sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset); + if (ret) { + if (encls_failed(ret)) + ENCLS_WARN(ret, "ELDU"); + + ret = -EFAULT; + } + + kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset)); + kunmap_atomic((void *)(unsigned long)pginfo.contents); + + sgx_encl_put_backing(&b, false); + + return ret; +} + +static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, + struct sgx_epc_page *secs_page) +{ + + unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; + struct sgx_encl *encl = encl_page->encl; + struct sgx_epc_page *epc_page; + int ret; + + epc_page = sgx_alloc_epc_page(encl_page, false); + if (IS_ERR(epc_page)) + return epc_page; + + ret = __sgx_encl_eldu(encl_page, epc_page, secs_page); + if (ret) { + sgx_free_epc_page(epc_page); + return ERR_PTR(ret); + } + + sgx_free_va_slot(encl_page->va_page, va_offset); + list_move(&encl_page->va_page->list, &encl->va_pages); + encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK; + encl_page->epc_page = epc_page; + + return epc_page; +} + +static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, + unsigned long addr, + unsigned long vm_flags) +{ + unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); + struct sgx_epc_page *epc_page; + struct sgx_encl_page *entry; + + entry = xa_load(&encl->page_array, PFN_DOWN(addr)); + if (!entry) + return ERR_PTR(-EFAULT); + + /* + * Verify that the faulted page has equal or higher build time + * permissions than the VMA permissions (i.e. the subset of {VM_READ, + * VM_WRITE, VM_EXECUTE} in vma->vm_flags). + */ + if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits) + return ERR_PTR(-EFAULT); + + /* Entry successfully located. */ + if (entry->epc_page) { + if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED) + return ERR_PTR(-EBUSY); + + return entry; + } + + if (!(encl->secs.epc_page)) { + epc_page = sgx_encl_eldu(&encl->secs, NULL); + if (IS_ERR(epc_page)) + return ERR_CAST(epc_page); + } + + epc_page = sgx_encl_eldu(entry, encl->secs.epc_page); + if (IS_ERR(epc_page)) + return ERR_CAST(epc_page); + + encl->secs_child_cnt++; + sgx_mark_page_reclaimable(entry->epc_page); + + return entry; +} + +static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) +{ + unsigned long addr = (unsigned long)vmf->address; + struct vm_area_struct *vma = vmf->vma; + struct sgx_encl_page *entry; + unsigned long phys_addr; + struct sgx_encl *encl; + vm_fault_t ret; + + encl = vma->vm_private_data; + + /* + * It's very unlikely but possible that allocating memory for the + * mm_list entry of a forked process failed in sgx_vma_open(). When + * this happens, vm_private_data is set to NULL. + */ + if (unlikely(!encl)) + return VM_FAULT_SIGBUS; + + mutex_lock(&encl->lock); + + entry = sgx_encl_load_page(encl, addr, vma->vm_flags); + if (IS_ERR(entry)) { + mutex_unlock(&encl->lock); + + if (PTR_ERR(entry) == -EBUSY) + return VM_FAULT_NOPAGE; + + return VM_FAULT_SIGBUS; + } + + phys_addr = sgx_get_epc_phys_addr(entry->epc_page); + + ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr)); + if (ret != VM_FAULT_NOPAGE) { + mutex_unlock(&encl->lock); + + return VM_FAULT_SIGBUS; + } + + sgx_encl_test_and_clear_young(vma->vm_mm, entry); + mutex_unlock(&encl->lock); + + return VM_FAULT_NOPAGE; +} + +static void sgx_vma_open(struct vm_area_struct *vma) +{ + struct sgx_encl *encl = vma->vm_private_data; + + /* + * It's possible but unlikely that vm_private_data is NULL. This can + * happen in a grandchild of a process, when sgx_encl_mm_add() had + * failed to allocate memory in this callback. + */ + if (unlikely(!encl)) + return; + + if (sgx_encl_mm_add(encl, vma->vm_mm)) + vma->vm_private_data = NULL; +} + + +/** + * sgx_encl_may_map() - Check if a requested VMA mapping is allowed + * @encl: an enclave pointer + * @start: lower bound of the address range, inclusive + * @end: upper bound of the address range, exclusive + * @vm_flags: VMA flags + * + * Iterate through the enclave pages contained within [@start, @end) to verify + * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC} + * does not contain any permissions that are not contained in the build time + * permissions of any of the enclave pages within the given address range. + * + * An enclave creator must declare the strongest permissions that will be + * needed for each enclave page. This ensures that mappings have the identical + * or weaker permissions than the earlier declared permissions. + * + * Return: 0 on success, -EACCES otherwise + */ +int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, + unsigned long end, unsigned long vm_flags) +{ + unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); + struct sgx_encl_page *page; + unsigned long count = 0; + int ret = 0; + + XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); + + /* + * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might + * conflict with the enclave page permissions. + */ + if (current->personality & READ_IMPLIES_EXEC) + return -EACCES; + + mutex_lock(&encl->lock); + xas_lock(&xas); + xas_for_each(&xas, page, PFN_DOWN(end - 1)) { + if (~page->vm_max_prot_bits & vm_prot_bits) { + ret = -EACCES; + break; + } + + /* Reschedule on every XA_CHECK_SCHED iteration. */ + if (!(++count % XA_CHECK_SCHED)) { + xas_pause(&xas); + xas_unlock(&xas); + mutex_unlock(&encl->lock); + + cond_resched(); + + mutex_lock(&encl->lock); + xas_lock(&xas); + } + } + xas_unlock(&xas); + mutex_unlock(&encl->lock); + + return ret; +} + +static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long newflags) +{ + return sgx_encl_may_map(vma->vm_private_data, start, end, newflags); +} + +static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page, + unsigned long addr, void *data) +{ + unsigned long offset = addr & ~PAGE_MASK; + int ret; + + + ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data); + if (ret) + return -EIO; + + return 0; +} + +static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page, + unsigned long addr, void *data) +{ + unsigned long offset = addr & ~PAGE_MASK; + int ret; + + ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data); + if (ret) + return -EIO; + + return 0; +} + +/* + * Load an enclave page to EPC if required, and take encl->lock. + */ +static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, + unsigned long addr, + unsigned long vm_flags) +{ + struct sgx_encl_page *entry; + + for ( ; ; ) { + mutex_lock(&encl->lock); + + entry = sgx_encl_load_page(encl, addr, vm_flags); + if (PTR_ERR(entry) != -EBUSY) + break; + + mutex_unlock(&encl->lock); + } + + if (IS_ERR(entry)) + mutex_unlock(&encl->lock); + + return entry; +} + +static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write) +{ + struct sgx_encl *encl = vma->vm_private_data; + struct sgx_encl_page *entry = NULL; + char data[sizeof(unsigned long)]; + unsigned long align; + int offset; + int cnt; + int ret = 0; + int i; + + /* + * If process was forked, VMA is still there but vm_private_data is set + * to NULL. + */ + if (!encl) + return -EFAULT; + + if (!test_bit(SGX_ENCL_DEBUG, &encl->flags)) + return -EFAULT; + + for (i = 0; i < len; i += cnt) { + entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK, + vma->vm_flags); + if (IS_ERR(entry)) { + ret = PTR_ERR(entry); + break; + } + + align = ALIGN_DOWN(addr + i, sizeof(unsigned long)); + offset = (addr + i) & (sizeof(unsigned long) - 1); + cnt = sizeof(unsigned long) - offset; + cnt = min(cnt, len - i); + + ret = sgx_encl_debug_read(encl, entry, align, data); + if (ret) + goto out; + + if (write) { + memcpy(data + offset, buf + i, cnt); + ret = sgx_encl_debug_write(encl, entry, align, data); + if (ret) + goto out; + } else { + memcpy(buf + i, data + offset, cnt); + } + +out: + mutex_unlock(&encl->lock); + + if (ret) + break; + } + + return ret < 0 ? ret : i; +} + +const struct vm_operations_struct sgx_vm_ops = { + .fault = sgx_vma_fault, + .mprotect = sgx_vma_mprotect, + .open = sgx_vma_open, + .access = sgx_vma_access, +}; + +/** + * sgx_encl_release - Destroy an enclave instance + * @kref: address of a kref inside &sgx_encl + * + * Used together with kref_put(). Frees all the resources associated with the + * enclave and the instance itself. + */ +void sgx_encl_release(struct kref *ref) +{ + struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); + struct sgx_va_page *va_page; + struct sgx_encl_page *entry; + unsigned long index; + + xa_for_each(&encl->page_array, index, entry) { + if (entry->epc_page) { + /* + * The page and its radix tree entry cannot be freed + * if the page is being held by the reclaimer. + */ + if (sgx_unmark_page_reclaimable(entry->epc_page)) + continue; + + sgx_free_epc_page(entry->epc_page); + encl->secs_child_cnt--; + entry->epc_page = NULL; + } + + cond_resched(); + kfree(entry); + } + + xa_destroy(&encl->page_array); + + if (!encl->secs_child_cnt && encl->secs.epc_page) { + sgx_free_epc_page(encl->secs.epc_page); + encl->secs.epc_page = NULL; + } + + while (!list_empty(&encl->va_pages)) { + va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, + list); + list_del(&va_page->list); + sgx_free_epc_page(va_page->epc_page); + kfree(va_page); + } + + if (encl->backing) + fput(encl->backing); + + cleanup_srcu_struct(&encl->srcu); + + WARN_ON_ONCE(!list_empty(&encl->mm_list)); + + /* Detect EPC page leak's. */ + WARN_ON_ONCE(encl->secs_child_cnt); + WARN_ON_ONCE(encl->secs.epc_page); + + kfree(encl); +} + +/* + * 'mm' is exiting and no longer needs mmu notifications. + */ +static void sgx_mmu_notifier_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); + struct sgx_encl_mm *tmp = NULL; + + /* + * The enclave itself can remove encl_mm. Note, objects can't be moved + * off an RCU protected list, but deletion is ok. + */ + spin_lock(&encl_mm->encl->mm_lock); + list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) { + if (tmp == encl_mm) { + list_del_rcu(&encl_mm->list); + break; + } + } + spin_unlock(&encl_mm->encl->mm_lock); + + if (tmp == encl_mm) { + synchronize_srcu(&encl_mm->encl->srcu); + mmu_notifier_put(mn); + } +} + +static void sgx_mmu_notifier_free(struct mmu_notifier *mn) +{ + struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); + + /* 'encl_mm' is going away, put encl_mm->encl reference: */ + kref_put(&encl_mm->encl->refcount, sgx_encl_release); + + kfree(encl_mm); +} + +static const struct mmu_notifier_ops sgx_mmu_notifier_ops = { + .release = sgx_mmu_notifier_release, + .free_notifier = sgx_mmu_notifier_free, +}; + +static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl, + struct mm_struct *mm) +{ + struct sgx_encl_mm *encl_mm = NULL; + struct sgx_encl_mm *tmp; + int idx; + + idx = srcu_read_lock(&encl->srcu); + + list_for_each_entry_rcu(tmp, &encl->mm_list, list) { + if (tmp->mm == mm) { + encl_mm = tmp; + break; + } + } + + srcu_read_unlock(&encl->srcu, idx); + + return encl_mm; +} + +int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) +{ + struct sgx_encl_mm *encl_mm; + int ret; + + /* + * Even though a single enclave may be mapped into an mm more than once, + * each 'mm' only appears once on encl->mm_list. This is guaranteed by + * holding the mm's mmap lock for write before an mm can be added or + * remove to an encl->mm_list. + */ + lockdep_assert_held_write(&mm->mmap_sem); + + /* + * It's possible that an entry already exists in the mm_list, because it + * is removed only on VFS release or process exit. + */ + if (sgx_encl_find_mm(encl, mm)) + return 0; + + encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL); + if (!encl_mm) + return -ENOMEM; + + /* Grab a refcount for the encl_mm->encl reference: */ + kref_get(&encl->refcount); + encl_mm->encl = encl; + encl_mm->mm = mm; + encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops; + + ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm); + if (ret) { + kfree(encl_mm); + return ret; + } + + spin_lock(&encl->mm_lock); + list_add_rcu(&encl_mm->list, &encl->mm_list); + /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ + smp_wmb(); + encl->mm_list_version++; + spin_unlock(&encl->mm_lock); + + return 0; +} + +static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, + pgoff_t index) +{ + struct inode *inode = encl->backing->f_path.dentry->d_inode; + struct address_space *mapping = inode->i_mapping; + gfp_t gfpmask = mapping_gfp_mask(mapping); + + return shmem_read_mapping_page_gfp(mapping, index, gfpmask); +} + +/** + * sgx_encl_get_backing() - Pin the backing storage + * @encl: an enclave pointer + * @page_index: enclave page index + * @backing: data for accessing backing storage for the page + * + * Pin the backing storage pages for storing the encrypted contents and Paging + * Crypto MetaData (PCMD) of an enclave page. + * + * Return: + * 0 on success, + * -errno otherwise. + */ +int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, + struct sgx_backing *backing) +{ + pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5); + struct page *contents; + struct page *pcmd; + + contents = sgx_encl_get_backing_page(encl, page_index); + if (IS_ERR(contents)) + return PTR_ERR(contents); + + pcmd = sgx_encl_get_backing_page(encl, pcmd_index); + if (IS_ERR(pcmd)) { + put_page(contents); + return PTR_ERR(pcmd); + } + + backing->page_index = page_index; + backing->contents = contents; + backing->pcmd = pcmd; + backing->pcmd_offset = + (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) * + sizeof(struct sgx_pcmd); + + return 0; +} + +/** + * sgx_encl_put_backing() - Unpin the backing storage + * @backing: data for accessing backing storage for the page + * @do_write: mark pages dirty + */ +void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write) +{ + if (do_write) { + set_page_dirty(backing->pcmd); + set_page_dirty(backing->contents); + } + + put_page(backing->pcmd); + put_page(backing->contents); +} + +static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr, + void *data) +{ + pte_t pte; + int ret; + + ret = pte_young(*ptep); + if (ret) { + pte = pte_mkold(*ptep); + set_pte_at((struct mm_struct *)data, addr, ptep, pte); + } + + return ret; +} + +/** + * sgx_encl_test_and_clear_young() - Test and reset the accessed bit + * @mm: mm_struct that is checked + * @page: enclave page to be tested for recent access + * + * Checks the Access (A) bit from the PTE corresponding to the enclave page and + * clears it. + * + * Return: 1 if the page has been recently accessed and 0 if not. + */ +int sgx_encl_test_and_clear_young(struct mm_struct *mm, + struct sgx_encl_page *page) +{ + unsigned long addr = page->desc & PAGE_MASK; + struct sgx_encl *encl = page->encl; + struct vm_area_struct *vma; + int ret; + + ret = sgx_encl_find(mm, addr, &vma); + if (ret) + return 0; + + if (encl != vma->vm_private_data) + return 0; + + ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE, + sgx_encl_test_and_clear_young_cb, vma->vm_mm); + if (ret < 0) + return 0; + + return ret; +} + +/** + * sgx_alloc_va_page() - Allocate a Version Array (VA) page + * + * Allocate a free EPC page and convert it to a Version Array (VA) page. + * + * Return: + * a VA page, + * -errno otherwise + */ +struct sgx_epc_page *sgx_alloc_va_page(void) +{ + struct sgx_epc_page *epc_page; + int ret; + + epc_page = sgx_alloc_epc_page(NULL, true); + if (IS_ERR(epc_page)) + return ERR_CAST(epc_page); + + ret = __epa(sgx_get_epc_virt_addr(epc_page)); + if (ret) { + WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret); + sgx_free_epc_page(epc_page); + return ERR_PTR(-EFAULT); + } + + return epc_page; +} + +/** + * sgx_alloc_va_slot - allocate a VA slot + * @va_page: a &struct sgx_va_page instance + * + * Allocates a slot from a &struct sgx_va_page instance. + * + * Return: offset of the slot inside the VA page + */ +unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page) +{ + int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); + + if (slot < SGX_VA_SLOT_COUNT) + set_bit(slot, va_page->slots); + + return slot << 3; +} + +/** + * sgx_free_va_slot - free a VA slot + * @va_page: a &struct sgx_va_page instance + * @offset: offset of the slot inside the VA page + * + * Frees a slot from a &struct sgx_va_page instance. + */ +void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset) +{ + clear_bit(offset >> 3, va_page->slots); +} + +/** + * sgx_va_page_full - is the VA page full? + * @va_page: a &struct sgx_va_page instance + * + * Return: true if all slots have been taken + */ +bool sgx_va_page_full(struct sgx_va_page *va_page) +{ + int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); + + return slot == SGX_VA_SLOT_COUNT; +} diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h new file mode 100644 index 000000000000..d8d30ccbef4c --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/encl.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright(c) 2016-20 Intel Corporation. + * + * Contains the software defined data structures for enclaves. + */ +#ifndef _X86_ENCL_H +#define _X86_ENCL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sgx.h" + +/* 'desc' bits holding the offset in the VA (version array) page. */ +#define SGX_ENCL_PAGE_VA_OFFSET_MASK GENMASK_ULL(11, 3) + +/* 'desc' bit marking that the page is being reclaimed. */ +#define SGX_ENCL_PAGE_BEING_RECLAIMED BIT(3) + +struct sgx_encl_page { + unsigned long desc; + unsigned long vm_max_prot_bits; + struct sgx_epc_page *epc_page; + struct sgx_encl *encl; + struct sgx_va_page *va_page; +}; + +enum sgx_encl_flags { + SGX_ENCL_IOCTL = BIT(0), + SGX_ENCL_DEBUG = BIT(1), + SGX_ENCL_CREATED = BIT(2), + SGX_ENCL_INITIALIZED = BIT(3), +}; + +struct sgx_encl_mm { + struct sgx_encl *encl; + struct mm_struct *mm; + struct list_head list; + struct mmu_notifier mmu_notifier; +}; + +struct sgx_encl { + unsigned long base; + unsigned long size; + unsigned long flags; + unsigned int page_cnt; + unsigned int secs_child_cnt; + struct mutex lock; + struct xarray page_array; + struct sgx_encl_page secs; + unsigned long attributes; + unsigned long attributes_mask; + + cpumask_t cpumask; + struct file *backing; + struct kref refcount; + struct list_head va_pages; + unsigned long mm_list_version; + struct list_head mm_list; + spinlock_t mm_lock; + struct srcu_struct srcu; +}; + +#define SGX_VA_SLOT_COUNT 512 + +struct sgx_va_page { + struct sgx_epc_page *epc_page; + DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT); + struct list_head list; +}; + +struct sgx_backing { + pgoff_t page_index; + struct page *contents; + struct page *pcmd; + unsigned long pcmd_offset; +}; + +extern const struct vm_operations_struct sgx_vm_ops; + +static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr, + struct vm_area_struct **vma) +{ + struct vm_area_struct *result; + + result = find_vma(mm, addr); + if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start) + return -EINVAL; + + *vma = result; + + return 0; +} + +int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, + unsigned long end, unsigned long vm_flags); + +void sgx_encl_release(struct kref *ref); +int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm); +int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, + struct sgx_backing *backing); +void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write); +int sgx_encl_test_and_clear_young(struct mm_struct *mm, + struct sgx_encl_page *page); + +struct sgx_epc_page *sgx_alloc_va_page(void); +unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page); +void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset); +bool sgx_va_page_full(struct sgx_va_page *va_page); + +#endif /* _X86_ENCL_H */ diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h new file mode 100644 index 000000000000..443188fe7e70 --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/encls.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _X86_ENCLS_H +#define _X86_ENCLS_H + +#include +#include +#include +#include +#include +#include +#include +#include "sgx.h" + +enum sgx_encls_function { + ECREATE = 0x00, + EADD = 0x01, + EINIT = 0x02, + EREMOVE = 0x03, + EDGBRD = 0x04, + EDGBWR = 0x05, + EEXTEND = 0x06, + ELDU = 0x08, + EBLOCK = 0x09, + EPA = 0x0A, + EWB = 0x0B, + ETRACK = 0x0C, +}; + +/** + * ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr + * + * ENCLS has its own (positive value) error codes and also generates + * ENCLS specific #GP and #PF faults. And the ENCLS values get munged + * with system error codes as everything percolates back up the stack. + * Unfortunately (for us), we need to precisely identify each unique + * error code, e.g. the action taken if EWB fails varies based on the + * type of fault and on the exact SGX error code, i.e. we can't simply + * convert all faults to -EFAULT. + * + * To make all three error types coexist, we set bit 30 to identify an + * ENCLS fault. Bit 31 (technically bits N:31) is used to differentiate + * between positive (faults and SGX error codes) and negative (system + * error codes) values. + */ +#define ENCLS_FAULT_FLAG 0x40000000 + +/* Retrieve the encoded trapnr from the specified return code. */ +#define ENCLS_TRAPNR(r) ((r) & ~ENCLS_FAULT_FLAG) + +/* Issue a WARN() about an ENCLS function. */ +#define ENCLS_WARN(r, name) { \ + do { \ + int _r = (r); \ + WARN_ONCE(_r, "%s returned %d (0x%x)\n", (name), _r, _r); \ + } while (0); \ +} + +/** + * encls_failed() - Check if an ENCLS function failed + * @ret: the return value of an ENCLS function call + * + * Check if an ENCLS function failed. This happens when the function causes a + * fault that is not caused by an EPCM conflict or when the function returns a + * non-zero value. + */ +static inline bool encls_failed(int ret) +{ + if (ret & ENCLS_FAULT_FLAG) + return ENCLS_TRAPNR(ret) != X86_TRAP_PF; + + return !!ret; +} + +/** + * __encls_ret_N - encode an ENCLS function that returns an error code in EAX + * @rax: function number + * @inputs: asm inputs for the function + * + * Emit assembly for an ENCLS function that returns an error code, e.g. EREMOVE. + * And because SGX isn't complex enough as it is, function that return an error + * code also modify flags. + * + * Return: + * 0 on success, + * SGX error code on failure + */ +#define __encls_ret_N(rax, inputs...) \ + ({ \ + int ret; \ + asm volatile( \ + "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE_FAULT(1b, 3b) \ + : "=a"(ret) \ + : "a"(rax), inputs \ + : "memory", "cc"); \ + ret; \ + }) + +#define __encls_ret_1(rax, rcx) \ + ({ \ + __encls_ret_N(rax, "c"(rcx)); \ + }) + +#define __encls_ret_2(rax, rbx, rcx) \ + ({ \ + __encls_ret_N(rax, "b"(rbx), "c"(rcx)); \ + }) + +#define __encls_ret_3(rax, rbx, rcx, rdx) \ + ({ \ + __encls_ret_N(rax, "b"(rbx), "c"(rcx), "d"(rdx)); \ + }) + +/** + * __encls_N - encode an ENCLS function that doesn't return an error code + * @rax: function number + * @rbx_out: optional output variable + * @inputs: asm inputs for the function + * + * Emit assembly for an ENCLS function that does not return an error code, e.g. + * ECREATE. Leaves without error codes either succeed or fault. @rbx_out is an + * optional parameter for use by EDGBRD, which returns the requested value in + * RBX. + * + * Return: + * 0 on success, + * trapnr with ENCLS_FAULT_FLAG set on fault + */ +#define __encls_N(rax, rbx_out, inputs...) \ + ({ \ + int ret; \ + asm volatile( \ + "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ + " xor %%eax,%%eax;\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE_FAULT(1b, 3b) \ + : "=a"(ret), "=b"(rbx_out) \ + : "a"(rax), inputs \ + : "memory"); \ + ret; \ + }) + +#define __encls_2(rax, rbx, rcx) \ + ({ \ + unsigned long ign_rbx_out; \ + __encls_N(rax, ign_rbx_out, "b"(rbx), "c"(rcx)); \ + }) + +#define __encls_1_1(rax, data, rcx) \ + ({ \ + unsigned long rbx_out; \ + int ret = __encls_N(rax, rbx_out, "c"(rcx)); \ + if (!ret) \ + data = rbx_out; \ + ret; \ + }) + +static inline int __ecreate(struct sgx_pageinfo *pginfo, void *secs) +{ + return __encls_2(ECREATE, pginfo, secs); +} + +static inline int __eextend(void *secs, void *addr) +{ + return __encls_2(EEXTEND, secs, addr); +} + +static inline int __eadd(struct sgx_pageinfo *pginfo, void *addr) +{ + return __encls_2(EADD, pginfo, addr); +} + +static inline int __einit(void *sigstruct, void *token, void *secs) +{ + return __encls_ret_3(EINIT, sigstruct, secs, token); +} + +static inline int __eremove(void *addr) +{ + return __encls_ret_1(EREMOVE, addr); +} + +static inline int __edbgwr(void *addr, unsigned long *data) +{ + return __encls_2(EDGBWR, *data, addr); +} + +static inline int __edbgrd(void *addr, unsigned long *data) +{ + return __encls_1_1(EDGBRD, *data, addr); +} + +static inline int __etrack(void *addr) +{ + return __encls_ret_1(ETRACK, addr); +} + +static inline int __eldu(struct sgx_pageinfo *pginfo, void *addr, + void *va) +{ + return __encls_ret_3(ELDU, pginfo, addr, va); +} + +static inline int __eblock(void *addr) +{ + return __encls_ret_1(EBLOCK, addr); +} + +static inline int __epa(void *addr) +{ + unsigned long rbx = SGX_PAGE_TYPE_VA; + + return __encls_2(EPA, rbx, addr); +} + +static inline int __ewb(struct sgx_pageinfo *pginfo, void *addr, + void *va) +{ + return __encls_ret_3(EWB, pginfo, addr, va); +} + +#endif /* _X86_ENCLS_H */ diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c new file mode 100644 index 000000000000..9228d8c7660f --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -0,0 +1,716 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "driver.h" +#include "encl.h" +#include "encls.h" + +static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl) +{ + struct sgx_va_page *va_page = NULL; + void *err; + + BUILD_BUG_ON(SGX_VA_SLOT_COUNT != + (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1); + + if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) { + va_page = kzalloc(sizeof(*va_page), GFP_KERNEL); + if (!va_page) + return ERR_PTR(-ENOMEM); + + va_page->epc_page = sgx_alloc_va_page(); + if (IS_ERR(va_page->epc_page)) { + err = ERR_CAST(va_page->epc_page); + kfree(va_page); + return err; + } + + WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT); + } + encl->page_cnt++; + return va_page; +} + +static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page) +{ + encl->page_cnt--; + + if (va_page) { + sgx_free_epc_page(va_page->epc_page); + list_del(&va_page->list); + kfree(va_page); + } +} + +static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) +{ + struct sgx_epc_page *secs_epc; + struct sgx_va_page *va_page; + struct sgx_pageinfo pginfo; + struct sgx_secinfo secinfo; + unsigned long encl_size; + struct file *backing; + long ret; + + va_page = sgx_encl_grow(encl); + if (IS_ERR(va_page)) + return PTR_ERR(va_page); + else if (va_page) + list_add(&va_page->list, &encl->va_pages); + /* else the tail page of the VA page list had free slots. */ + + /* The extra page goes to SECS. */ + encl_size = secs->size + PAGE_SIZE; + + backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5), + VM_NORESERVE); + if (IS_ERR(backing)) { + ret = PTR_ERR(backing); + goto err_out_shrink; + } + + encl->backing = backing; + + secs_epc = sgx_alloc_epc_page(&encl->secs, true); + if (IS_ERR(secs_epc)) { + ret = PTR_ERR(secs_epc); + goto err_out_backing; + } + + encl->secs.epc_page = secs_epc; + + pginfo.addr = 0; + pginfo.contents = (unsigned long)secs; + pginfo.metadata = (unsigned long)&secinfo; + pginfo.secs = 0; + memset(&secinfo, 0, sizeof(secinfo)); + + ret = __ecreate((void *)&pginfo, sgx_get_epc_virt_addr(secs_epc)); + if (ret) { + ret = -EIO; + goto err_out; + } + + if (secs->attributes & SGX_ATTR_DEBUG) + set_bit(SGX_ENCL_DEBUG, &encl->flags); + + encl->secs.encl = encl; + encl->base = secs->base; + encl->size = secs->size; + encl->attributes = secs->attributes; + encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS; + + /* Set only after completion, as encl->lock has not been taken. */ + set_bit(SGX_ENCL_CREATED, &encl->flags); + + return 0; + +err_out: + sgx_free_epc_page(encl->secs.epc_page); + encl->secs.epc_page = NULL; + +err_out_backing: + fput(encl->backing); + encl->backing = NULL; + +err_out_shrink: + sgx_encl_shrink(encl, va_page); + + return ret; +} + +/** + * sgx_ioc_enclave_create() - handler for %SGX_IOC_ENCLAVE_CREATE + * @encl: An enclave pointer. + * @arg: The ioctl argument. + * + * Allocate kernel data structures for the enclave and invoke ECREATE. + * + * Return: + * - 0: Success. + * - -EIO: ECREATE failed. + * - -errno: POSIX error. + */ +static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg) +{ + struct sgx_enclave_create create_arg; + void *secs; + int ret; + + if (test_bit(SGX_ENCL_CREATED, &encl->flags)) + return -EINVAL; + + if (copy_from_user(&create_arg, arg, sizeof(create_arg))) + return -EFAULT; + + secs = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!secs) + return -ENOMEM; + + if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE)) + ret = -EFAULT; + else + ret = sgx_encl_create(encl, secs); + + kfree(secs); + return ret; +} + +static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl, + unsigned long offset, + u64 secinfo_flags) +{ + struct sgx_encl_page *encl_page; + unsigned long prot; + + encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL); + if (!encl_page) + return ERR_PTR(-ENOMEM); + + encl_page->desc = encl->base + offset; + encl_page->encl = encl; + + prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) | + _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) | + _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC); + + /* + * TCS pages must always RW set for CPU access while the SECINFO + * permissions are *always* zero - the CPU ignores the user provided + * values and silently overwrites them with zero permissions. + */ + if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS) + prot |= PROT_READ | PROT_WRITE; + + /* Calculate maximum of the VM flags for the page. */ + encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0); + + return encl_page; +} + +static int sgx_validate_secinfo(struct sgx_secinfo *secinfo) +{ + u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK; + u64 pt = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK; + + if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS) + return -EINVAL; + + if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) + return -EINVAL; + + /* + * CPU will silently overwrite the permissions as zero, which means + * that we need to validate it ourselves. + */ + if (pt == SGX_SECINFO_TCS && perm) + return -EINVAL; + + if (secinfo->flags & SGX_SECINFO_RESERVED_MASK) + return -EINVAL; + + if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved))) + return -EINVAL; + + return 0; +} + +static int __sgx_encl_add_page(struct sgx_encl *encl, + struct sgx_encl_page *encl_page, + struct sgx_epc_page *epc_page, + struct sgx_secinfo *secinfo, unsigned long src) +{ + struct sgx_pageinfo pginfo; + struct vm_area_struct *vma; + struct page *src_page; + int ret; + + /* Deny noexec. */ + vma = find_vma(current->mm, src); + if (!vma) + return -EFAULT; + + if (!(vma->vm_flags & VM_MAYEXEC)) + return -EACCES; + + ret = get_user_pages(src, 1, 0, &src_page, NULL); + if (ret < 1) + return -EFAULT; + + pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); + pginfo.addr = encl_page->desc & PAGE_MASK; + pginfo.metadata = (unsigned long)secinfo; + pginfo.contents = (unsigned long)kmap_atomic(src_page); + + ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page)); + + kunmap_atomic((void *)pginfo.contents); + put_page(src_page); + + return ret ? -EIO : 0; +} + +/* + * If the caller requires measurement of the page as a proof for the content, + * use EEXTEND to add a measurement for 256 bytes of the page. Repeat this + * operation until the entire page is measured." + */ +static int __sgx_encl_extend(struct sgx_encl *encl, + struct sgx_epc_page *epc_page) +{ + unsigned long offset; + int ret; + + for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) { + ret = __eextend(sgx_get_epc_virt_addr(encl->secs.epc_page), + sgx_get_epc_virt_addr(epc_page) + offset); + if (ret) { + if (encls_failed(ret)) + ENCLS_WARN(ret, "EEXTEND"); + + return -EIO; + } + } + + return 0; +} + +static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src, + unsigned long offset, struct sgx_secinfo *secinfo, + unsigned long flags) +{ + struct sgx_encl_page *encl_page; + struct sgx_epc_page *epc_page; + struct sgx_va_page *va_page; + int ret; + + encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags); + if (IS_ERR(encl_page)) + return PTR_ERR(encl_page); + + epc_page = sgx_alloc_epc_page(encl_page, true); + if (IS_ERR(epc_page)) { + kfree(encl_page); + return PTR_ERR(epc_page); + } + + va_page = sgx_encl_grow(encl); + if (IS_ERR(va_page)) { + ret = PTR_ERR(va_page); + goto err_out_free; + } + + down_read(¤t->mm->mmap_sem); + mutex_lock(&encl->lock); + + /* + * Adding to encl->va_pages must be done under encl->lock. Ditto for + * deleting (via sgx_encl_shrink()) in the error path. + */ + if (va_page) + list_add(&va_page->list, &encl->va_pages); + + /* + * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e. + * can't be gracefully unwound, while failure on EADD/EXTEND is limited + * to userspace errors (or kernel/hardware bugs). + */ + ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc), + encl_page, GFP_KERNEL); + if (ret) + goto err_out_unlock; + + ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo, + src); + if (ret) + goto err_out; + + /* + * Complete the "add" before doing the "extend" so that the "add" + * isn't in a half-baked state in the extremely unlikely scenario + * the enclave will be destroyed in response to EEXTEND failure. + */ + encl_page->encl = encl; + encl_page->epc_page = epc_page; + encl->secs_child_cnt++; + + if (flags & SGX_PAGE_MEASURE) { + ret = __sgx_encl_extend(encl, epc_page); + if (ret) + goto err_out; + } + + sgx_mark_page_reclaimable(encl_page->epc_page); + mutex_unlock(&encl->lock); + up_read(¤t->mm->mmap_sem); + return ret; + +err_out: + xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc)); + +err_out_unlock: + sgx_encl_shrink(encl, va_page); + mutex_unlock(&encl->lock); + up_read(¤t->mm->mmap_sem); + +err_out_free: + sgx_free_epc_page(epc_page); + kfree(encl_page); + + return ret; +} + +/** + * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES + * @encl: an enclave pointer + * @arg: a user pointer to a struct sgx_enclave_add_pages instance + * + * Add one or more pages to an uninitialized enclave, and optionally extend the + * measurement with the contents of the page. The SECINFO and measurement mask + * are applied to all pages. + * + * A SECINFO for a TCS is required to always contain zero permissions because + * CPU silently zeros them. Allowing anything else would cause a mismatch in + * the measurement. + * + * mmap()'s protection bits are capped by the page permissions. For each page + * address, the maximum protection bits are computed with the following + * heuristics: + * + * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions. + * 2. A TCS page: PROT_R | PROT_W. + * + * mmap() is not allowed to surpass the minimum of the maximum protection bits + * within the given address range. + * + * The function deinitializes kernel data structures for enclave and returns + * -EIO in any of the following conditions: + * + * - Enclave Page Cache (EPC), the physical memory holding enclaves, has + * been invalidated. This will cause EADD and EEXTEND to fail. + * - If the source address is corrupted somehow when executing EADD. + * + * Return: + * - 0: Success. + * - -EACCES: The source page is located in a noexec partition. + * - -ENOMEM: Out of EPC pages. + * - -EINTR: The call was interrupted before data was processed. + * - -EIO: Either EADD or EEXTEND failed because invalid source address + * or power cycle. + * - -errno: POSIX error. + */ +static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg) +{ + struct sgx_enclave_add_pages add_arg; + struct sgx_secinfo secinfo; + unsigned long c; + int ret; + + if (!test_bit(SGX_ENCL_CREATED, &encl->flags) || + test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) + return -EINVAL; + + if (copy_from_user(&add_arg, arg, sizeof(add_arg))) + return -EFAULT; + + if (!IS_ALIGNED(add_arg.offset, PAGE_SIZE) || + !IS_ALIGNED(add_arg.src, PAGE_SIZE)) + return -EINVAL; + + if (!add_arg.length || add_arg.length & (PAGE_SIZE - 1)) + return -EINVAL; + + if (add_arg.offset + add_arg.length - PAGE_SIZE >= encl->size) + return -EINVAL; + + if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo, + sizeof(secinfo))) + return -EFAULT; + + if (sgx_validate_secinfo(&secinfo)) + return -EINVAL; + + for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) { + if (signal_pending(current)) { + if (!c) + ret = -ERESTARTSYS; + + break; + } + + if (need_resched()) + cond_resched(); + + ret = sgx_encl_add_page(encl, add_arg.src + c, add_arg.offset + c, + &secinfo, add_arg.flags); + if (ret) + break; + } + + add_arg.count = c; + + if (copy_to_user(arg, &add_arg, sizeof(add_arg))) + return -EFAULT; + + return ret; +} + +static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus, + void *hash) +{ + SHASH_DESC_ON_STACK(shash, tfm); + + shash->tfm = tfm; + + return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash); +} + +static int sgx_get_key_hash(const void *modulus, void *hash) +{ + struct crypto_shash *tfm; + int ret; + + tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + ret = __sgx_get_key_hash(tfm, modulus, hash); + + crypto_free_shash(tfm); + return ret; +} + +static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, + void *token) +{ + u64 mrsigner[4]; + int i, j, k; + void *addr; + int ret; + + /* + * Deny initializing enclaves with attributes (namely provisioning) + * that have not been explicitly allowed. + */ + if (encl->attributes & ~encl->attributes_mask) + return -EACCES; + + /* + * Attributes should not be enforced *only* against what's available on + * platform (done in sgx_encl_create) but checked and enforced against + * the mask for enforcement in sigstruct. For example an enclave could + * opt to sign with AVX bit in xfrm, but still be loadable on a platform + * without it if the sigstruct->body.attributes_mask does not turn that + * bit on. + */ + if (sigstruct->body.attributes & sigstruct->body.attributes_mask & + sgx_attributes_reserved_mask) + return -EINVAL; + + if (sigstruct->body.miscselect & sigstruct->body.misc_mask & + sgx_misc_reserved_mask) + return -EINVAL; + + if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask & + sgx_xfrm_reserved_mask) + return -EINVAL; + + ret = sgx_get_key_hash(sigstruct->modulus, mrsigner); + if (ret) + return ret; + + mutex_lock(&encl->lock); + + /* + * ENCLS[EINIT] is interruptible because it has such a high latency, + * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending, + * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be + * serviced. + */ + for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) { + for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) { + addr = sgx_get_epc_virt_addr(encl->secs.epc_page); + + preempt_disable(); + + for (k = 0; k < 4; k++) + wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + k, mrsigner[k]); + + ret = __einit(sigstruct, token, addr); + + preempt_enable(); + + if (ret == SGX_UNMASKED_EVENT) + continue; + else + break; + } + + if (ret != SGX_UNMASKED_EVENT) + break; + + msleep_interruptible(SGX_EINIT_SLEEP_TIME); + + if (signal_pending(current)) { + ret = -ERESTARTSYS; + goto err_out; + } + } + + if (ret & ENCLS_FAULT_FLAG) { + if (encls_failed(ret)) + ENCLS_WARN(ret, "EINIT"); + + ret = -EIO; + } else if (ret) { + pr_debug("EINIT returned %d\n", ret); + ret = -EPERM; + } else { + set_bit(SGX_ENCL_INITIALIZED, &encl->flags); + } + +err_out: + mutex_unlock(&encl->lock); + return ret; +} + +/** + * sgx_ioc_enclave_init() - handler for %SGX_IOC_ENCLAVE_INIT + * @encl: an enclave pointer + * @arg: userspace pointer to a struct sgx_enclave_init instance + * + * Flush any outstanding enqueued EADD operations and perform EINIT. The + * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match + * the enclave's MRSIGNER, which is caculated from the provided sigstruct. + * + * Return: + * - 0: Success. + * - -EPERM: Invalid SIGSTRUCT. + * - -EIO: EINIT failed because of a power cycle. + * - -errno: POSIX error. + */ +static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg) +{ + struct sgx_sigstruct *sigstruct; + struct sgx_enclave_init init_arg; + struct page *initp_page; + void *token; + int ret; + + if (!test_bit(SGX_ENCL_CREATED, &encl->flags) || + test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) + return -EINVAL; + + if (copy_from_user(&init_arg, arg, sizeof(init_arg))) + return -EFAULT; + + initp_page = alloc_page(GFP_KERNEL); + if (!initp_page) + return -ENOMEM; + + sigstruct = kmap(initp_page); + token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2); + memset(token, 0, SGX_LAUNCH_TOKEN_SIZE); + + if (copy_from_user(sigstruct, (void __user *)init_arg.sigstruct, + sizeof(*sigstruct))) { + ret = -EFAULT; + goto out; + } + + /* + * A legacy field used with Intel signed enclaves. These used to mean + * regular and architectural enclaves. The CPU only accepts these values + * but they do not have any other meaning. + * + * Thus, reject any other values. + */ + if (sigstruct->header.vendor != 0x0000 && + sigstruct->header.vendor != 0x8086) { + ret = -EINVAL; + goto out; + } + + ret = sgx_encl_init(encl, sigstruct, token); + +out: + kunmap(initp_page); + __free_page(initp_page); + return ret; +} + +/** + * sgx_ioc_enclave_provision() - handler for %SGX_IOC_ENCLAVE_PROVISION + * @encl: an enclave pointer + * @arg: userspace pointer to a struct sgx_enclave_provision instance + * + * Allow ATTRIBUTE.PROVISION_KEY for an enclave by providing a file handle to + * /dev/sgx_provision. + * + * Return: + * - 0: Success. + * - -errno: Otherwise. + */ +static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg) +{ + struct sgx_enclave_provision params; + struct file *file; + + if (copy_from_user(¶ms, arg, sizeof(params))) + return -EFAULT; + + file = fget(params.fd); + if (!file) + return -EINVAL; + + if (file->f_op != &sgx_provision_fops) { + fput(file); + return -EINVAL; + } + + encl->attributes_mask |= SGX_ATTR_PROVISIONKEY; + + fput(file); + return 0; +} + +long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct sgx_encl *encl = filep->private_data; + int ret; + + if (test_and_set_bit(SGX_ENCL_IOCTL, &encl->flags)) + return -EBUSY; + + switch (cmd) { + case SGX_IOC_ENCLAVE_CREATE: + ret = sgx_ioc_enclave_create(encl, (void __user *)arg); + break; + case SGX_IOC_ENCLAVE_ADD_PAGES: + ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg); + break; + case SGX_IOC_ENCLAVE_INIT: + ret = sgx_ioc_enclave_init(encl, (void __user *)arg); + break; + case SGX_IOC_ENCLAVE_PROVISION: + ret = sgx_ioc_enclave_provision(encl, (void __user *)arg); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + + clear_bit(SGX_ENCL_IOCTL, &encl->flags); + return ret; +} diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c new file mode 100644 index 000000000000..010fea31942e --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -0,0 +1,737 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "driver.h" +#include "encl.h" +#include "encls.h" + +struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; +static int sgx_nr_epc_sections; +static struct task_struct *ksgxd_tsk; +static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); + +/* + * These variables are part of the state of the reclaimer, and must be accessed + * with sgx_reclaimer_lock acquired. + */ +static LIST_HEAD(sgx_active_page_list); + +static DEFINE_SPINLOCK(sgx_reclaimer_lock); + +/* + * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS + * pages whose child pages blocked EREMOVE. + */ +static void sgx_sanitize_section(struct sgx_epc_section *section) +{ + struct sgx_epc_page *page; + LIST_HEAD(dirty); + int ret; + + /* init_laundry_list is thread-local, no need for a lock: */ + while (!list_empty(§ion->init_laundry_list)) { + if (kthread_should_stop()) + return; + + /* needed for access to ->page_list: */ + spin_lock(§ion->lock); + + page = list_first_entry(§ion->init_laundry_list, + struct sgx_epc_page, list); + + ret = __eremove(sgx_get_epc_virt_addr(page)); + if (!ret) + list_move(&page->list, §ion->page_list); + else + list_move_tail(&page->list, &dirty); + + spin_unlock(§ion->lock); + + cond_resched(); + } + + list_splice(&dirty, §ion->init_laundry_list); +} + +static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page) +{ + struct sgx_encl_page *page = epc_page->owner; + struct sgx_encl *encl = page->encl; + struct sgx_encl_mm *encl_mm; + bool ret = true; + int idx; + + idx = srcu_read_lock(&encl->srcu); + + list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { + if (!mmget_not_zero(encl_mm->mm)) + continue; + + down_read(&encl_mm->mm->mmap_sem); + ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page); + up_read(&encl_mm->mm->mmap_sem); + + mmput_async(encl_mm->mm); + + if (!ret) + break; + } + + srcu_read_unlock(&encl->srcu, idx); + + if (!ret) + return false; + + return true; +} + +static void sgx_reclaimer_block(struct sgx_epc_page *epc_page) +{ + struct sgx_encl_page *page = epc_page->owner; + unsigned long addr = page->desc & PAGE_MASK; + struct sgx_encl *encl = page->encl; + unsigned long mm_list_version; + struct sgx_encl_mm *encl_mm; + struct vm_area_struct *vma; + int idx, ret; + + do { + mm_list_version = encl->mm_list_version; + + /* Pairs with smp_rmb() in sgx_encl_mm_add(). */ + smp_rmb(); + + idx = srcu_read_lock(&encl->srcu); + + list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { + if (!mmget_not_zero(encl_mm->mm)) + continue; + + down_read(&encl_mm->mm->mmap_sem); + + ret = sgx_encl_find(encl_mm->mm, addr, &vma); + if (!ret && encl == vma->vm_private_data) + zap_vma_ptes(vma, addr, PAGE_SIZE); + + up_read(&encl_mm->mm->mmap_sem); + + mmput_async(encl_mm->mm); + } + + srcu_read_unlock(&encl->srcu, idx); + } while (unlikely(encl->mm_list_version != mm_list_version)); + + mutex_lock(&encl->lock); + + ret = __eblock(sgx_get_epc_virt_addr(epc_page)); + if (encls_failed(ret)) + ENCLS_WARN(ret, "EBLOCK"); + + mutex_unlock(&encl->lock); +} + +static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot, + struct sgx_backing *backing) +{ + struct sgx_pageinfo pginfo; + int ret; + + pginfo.addr = 0; + pginfo.secs = 0; + + pginfo.contents = (unsigned long)kmap_atomic(backing->contents); + pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) + + backing->pcmd_offset; + + ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot); + + kunmap_atomic((void *)(unsigned long)(pginfo.metadata - + backing->pcmd_offset)); + kunmap_atomic((void *)(unsigned long)pginfo.contents); + + return ret; +} + +static void sgx_ipi_cb(void *info) +{ +} + +static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl) +{ + cpumask_t *cpumask = &encl->cpumask; + struct sgx_encl_mm *encl_mm; + int idx; + + /* + * Can race with sgx_encl_mm_add(), but ETRACK has already been + * executed, which means that the CPUs running in the new mm will enter + * into the enclave with a fresh epoch. + */ + cpumask_clear(cpumask); + + idx = srcu_read_lock(&encl->srcu); + + list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { + if (!mmget_not_zero(encl_mm->mm)) + continue; + + cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm)); + + mmput_async(encl_mm->mm); + } + + srcu_read_unlock(&encl->srcu, idx); + + return cpumask; +} + +/* + * Swap page to the regular memory transformed to the blocked state by using + * EBLOCK, which means that it can no loger be referenced (no new TLB entries). + * + * The first trial just tries to write the page assuming that some other thread + * has reset the count for threads inside the enlave by using ETRACK, and + * previous thread count has been zeroed out. The second trial calls ETRACK + * before EWB. If that fails we kick all the HW threads out, and then do EWB, + * which should be guaranteed the succeed. + */ +static void sgx_encl_ewb(struct sgx_epc_page *epc_page, + struct sgx_backing *backing) +{ + struct sgx_encl_page *encl_page = epc_page->owner; + struct sgx_encl *encl = encl_page->encl; + struct sgx_va_page *va_page; + unsigned int va_offset; + void *va_slot; + int ret; + + encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED; + + va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, + list); + va_offset = sgx_alloc_va_slot(va_page); + va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset; + if (sgx_va_page_full(va_page)) + list_move_tail(&va_page->list, &encl->va_pages); + + ret = __sgx_encl_ewb(epc_page, va_slot, backing); + if (ret == SGX_NOT_TRACKED) { + ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page)); + if (ret) { + if (encls_failed(ret)) + ENCLS_WARN(ret, "ETRACK"); + } + + ret = __sgx_encl_ewb(epc_page, va_slot, backing); + if (ret == SGX_NOT_TRACKED) { + /* + * Slow path, send IPIs to kick cpus out of the + * enclave. Note, it's imperative that the cpu + * mask is generated *after* ETRACK, else we'll + * miss cpus that entered the enclave between + * generating the mask and incrementing epoch. + */ + on_each_cpu_mask(sgx_encl_ewb_cpumask(encl), + sgx_ipi_cb, NULL, 1); + ret = __sgx_encl_ewb(epc_page, va_slot, backing); + } + } + + if (ret) { + if (encls_failed(ret)) + ENCLS_WARN(ret, "EWB"); + + sgx_free_va_slot(va_page, va_offset); + } else { + encl_page->desc |= va_offset; + encl_page->va_page = va_page; + } +} + +static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, + struct sgx_backing *backing) +{ + struct sgx_encl_page *encl_page = epc_page->owner; + struct sgx_encl *encl = encl_page->encl; + struct sgx_backing secs_backing; + int ret; + + mutex_lock(&encl->lock); + + sgx_encl_ewb(epc_page, backing); + encl_page->epc_page = NULL; + encl->secs_child_cnt--; + + if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) { + ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size), + &secs_backing); + if (ret) + goto out; + + sgx_encl_ewb(encl->secs.epc_page, &secs_backing); + + sgx_free_epc_page(encl->secs.epc_page); + encl->secs.epc_page = NULL; + + sgx_encl_put_backing(&secs_backing, true); + } + +out: + mutex_unlock(&encl->lock); +} + +/* + * Take a fixed number of pages from the head of the active page pool and + * reclaim them to the enclave's private shmem files. Skip the pages, which have + * been accessed since the last scan. Move those pages to the tail of active + * page pool so that the pages get scanned in LRU like fashion. + * + * Batch process a chunk of pages (at the moment 16) in order to degrade amount + * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit + * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI + * + EWB) but not sufficiently. Reclaiming one page at a time would also be + * problematic as it would increase the lock contention too much, which would + * halt forward progress. + */ +static void sgx_reclaim_pages(void) +{ + struct sgx_epc_page *chunk[SGX_NR_TO_SCAN]; + struct sgx_backing backing[SGX_NR_TO_SCAN]; + struct sgx_epc_section *section; + struct sgx_encl_page *encl_page; + struct sgx_epc_page *epc_page; + pgoff_t page_index; + int cnt = 0; + int ret; + int i; + + spin_lock(&sgx_reclaimer_lock); + for (i = 0; i < SGX_NR_TO_SCAN; i++) { + if (list_empty(&sgx_active_page_list)) + break; + + epc_page = list_first_entry(&sgx_active_page_list, + struct sgx_epc_page, list); + list_del_init(&epc_page->list); + encl_page = epc_page->owner; + + if (kref_get_unless_zero(&encl_page->encl->refcount) != 0) + chunk[cnt++] = epc_page; + else + /* The owner is freeing the page. No need to add the + * page back to the list of reclaimable pages. + */ + epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; + } + spin_unlock(&sgx_reclaimer_lock); + + for (i = 0; i < cnt; i++) { + epc_page = chunk[i]; + encl_page = epc_page->owner; + + if (!sgx_reclaimer_age(epc_page)) + goto skip; + + page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); + ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]); + if (ret) + goto skip; + + mutex_lock(&encl_page->encl->lock); + encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED; + mutex_unlock(&encl_page->encl->lock); + continue; + +skip: + spin_lock(&sgx_reclaimer_lock); + list_add_tail(&epc_page->list, &sgx_active_page_list); + spin_unlock(&sgx_reclaimer_lock); + + kref_put(&encl_page->encl->refcount, sgx_encl_release); + + chunk[i] = NULL; + } + + for (i = 0; i < cnt; i++) { + epc_page = chunk[i]; + if (epc_page) + sgx_reclaimer_block(epc_page); + } + + for (i = 0; i < cnt; i++) { + epc_page = chunk[i]; + if (!epc_page) + continue; + + encl_page = epc_page->owner; + sgx_reclaimer_write(epc_page, &backing[i]); + sgx_encl_put_backing(&backing[i], true); + + kref_put(&encl_page->encl->refcount, sgx_encl_release); + epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; + + section = &sgx_epc_sections[epc_page->section]; + spin_lock(§ion->lock); + list_add_tail(&epc_page->list, §ion->page_list); + section->free_cnt++; + spin_unlock(§ion->lock); + } +} + +static unsigned long sgx_nr_free_pages(void) +{ + unsigned long cnt = 0; + int i; + + for (i = 0; i < sgx_nr_epc_sections; i++) + cnt += sgx_epc_sections[i].free_cnt; + + return cnt; +} + +static bool sgx_should_reclaim(unsigned long watermark) +{ + return sgx_nr_free_pages() < watermark && + !list_empty(&sgx_active_page_list); +} + +static int ksgxd(void *p) +{ + int i; + + set_freezable(); + + /* + * Sanitize pages in order to recover from kexec(). The 2nd pass is + * required for SECS pages, whose child pages blocked EREMOVE. + */ + for (i = 0; i < sgx_nr_epc_sections; i++) + sgx_sanitize_section(&sgx_epc_sections[i]); + + for (i = 0; i < sgx_nr_epc_sections; i++) { + sgx_sanitize_section(&sgx_epc_sections[i]); + + /* Should never happen. */ + if (!list_empty(&sgx_epc_sections[i].init_laundry_list)) + WARN(1, "EPC section %d has unsanitized pages.\n", i); + } + + while (!kthread_should_stop()) { + if (try_to_freeze()) + continue; + + wait_event_freezable(ksgxd_waitq, + kthread_should_stop() || + sgx_should_reclaim(SGX_NR_HIGH_PAGES)); + + if (sgx_should_reclaim(SGX_NR_HIGH_PAGES)) + sgx_reclaim_pages(); + + cond_resched(); + } + + return 0; +} + +static bool __init sgx_page_reclaimer_init(void) +{ + struct task_struct *tsk; + + tsk = kthread_run(ksgxd, NULL, "ksgxd"); + if (IS_ERR(tsk)) + return false; + + ksgxd_tsk = tsk; + + return true; +} + +static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section) +{ + struct sgx_epc_page *page; + + spin_lock(§ion->lock); + + if (list_empty(§ion->page_list)) { + spin_unlock(§ion->lock); + return NULL; + } + + page = list_first_entry(§ion->page_list, struct sgx_epc_page, list); + list_del_init(&page->list); + section->free_cnt--; + + spin_unlock(§ion->lock); + return page; +} + +/** + * __sgx_alloc_epc_page() - Allocate an EPC page + * + * Iterate through EPC sections and borrow a free EPC page to the caller. When a + * page is no longer needed it must be released with sgx_free_epc_page(). + * + * Return: + * an EPC page, + * -errno on error + */ +struct sgx_epc_page *__sgx_alloc_epc_page(void) +{ + struct sgx_epc_section *section; + struct sgx_epc_page *page; + int i; + + for (i = 0; i < sgx_nr_epc_sections; i++) { + section = &sgx_epc_sections[i]; + + page = __sgx_alloc_epc_page_from_section(section); + if (page) + return page; + } + + return ERR_PTR(-ENOMEM); +} + +/** + * sgx_mark_page_reclaimable() - Mark a page as reclaimable + * @page: EPC page + * + * Mark a page as reclaimable and add it to the active page list. Pages + * are automatically removed from the active list when freed. + */ +void sgx_mark_page_reclaimable(struct sgx_epc_page *page) +{ + spin_lock(&sgx_reclaimer_lock); + page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED; + list_add_tail(&page->list, &sgx_active_page_list); + spin_unlock(&sgx_reclaimer_lock); +} + +/** + * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list + * @page: EPC page + * + * Clear the reclaimable flag and remove the page from the active page list. + * + * Return: + * 0 on success, + * -EBUSY if the page is in the process of being reclaimed + */ +int sgx_unmark_page_reclaimable(struct sgx_epc_page *page) +{ + spin_lock(&sgx_reclaimer_lock); + if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) { + /* The page is being reclaimed. */ + if (list_empty(&page->list)) { + spin_unlock(&sgx_reclaimer_lock); + return -EBUSY; + } + + list_del(&page->list); + page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; + } + spin_unlock(&sgx_reclaimer_lock); + + return 0; +} + +/** + * sgx_alloc_epc_page() - Allocate an EPC page + * @owner: the owner of the EPC page + * @reclaim: reclaim pages if necessary + * + * Iterate through EPC sections and borrow a free EPC page to the caller. When a + * page is no longer needed it must be released with sgx_free_epc_page(). If + * @reclaim is set to true, directly reclaim pages when we are out of pages. No + * mm's can be locked when @reclaim is set to true. + * + * Finally, wake up ksgxd when the number of pages goes below the watermark + * before returning back to the caller. + * + * Return: + * an EPC page, + * -errno on error + */ +struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) +{ + struct sgx_epc_page *page; + + for ( ; ; ) { + page = __sgx_alloc_epc_page(); + if (!IS_ERR(page)) { + page->owner = owner; + break; + } + + if (list_empty(&sgx_active_page_list)) + return ERR_PTR(-ENOMEM); + + if (!reclaim) { + page = ERR_PTR(-EBUSY); + break; + } + + if (signal_pending(current)) { + page = ERR_PTR(-ERESTARTSYS); + break; + } + + sgx_reclaim_pages(); + cond_resched(); + } + + if (sgx_should_reclaim(SGX_NR_LOW_PAGES)) + wake_up(&ksgxd_waitq); + + return page; +} + +/** + * sgx_free_epc_page() - Free an EPC page + * @page: an EPC page + * + * Call EREMOVE for an EPC page and insert it back to the list of free pages. + */ +void sgx_free_epc_page(struct sgx_epc_page *page) +{ + struct sgx_epc_section *section = &sgx_epc_sections[page->section]; + int ret; + + WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); + + ret = __eremove(sgx_get_epc_virt_addr(page)); + if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret)) + return; + + spin_lock(§ion->lock); + list_add_tail(&page->list, §ion->page_list); + section->free_cnt++; + spin_unlock(§ion->lock); +} + +static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, + unsigned long index, + struct sgx_epc_section *section) +{ + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long i; + + section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB); + if (!section->virt_addr) + return false; + + section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page)); + if (!section->pages) { + memunmap(section->virt_addr); + return false; + } + + section->phys_addr = phys_addr; + spin_lock_init(§ion->lock); + INIT_LIST_HEAD(§ion->page_list); + INIT_LIST_HEAD(§ion->init_laundry_list); + + for (i = 0; i < nr_pages; i++) { + section->pages[i].section = index; + section->pages[i].flags = 0; + section->pages[i].owner = NULL; + list_add_tail(§ion->pages[i].list, §ion->init_laundry_list); + } + + section->free_cnt = nr_pages; + return true; +} + +/** + * A section metric is concatenated in a way that @low bits 12-31 define the + * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the + * metric. + */ +static inline u64 __init sgx_calc_section_metric(u64 low, u64 high) +{ + return (low & GENMASK_ULL(31, 12)) + + ((high & GENMASK_ULL(19, 0)) << 32); +} + +static bool __init sgx_page_cache_init(void) +{ + u32 eax, ebx, ecx, edx, type; + u64 pa, size; + int i; + + for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) { + cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx); + + type = eax & SGX_CPUID_EPC_MASK; + if (type == SGX_CPUID_EPC_INVALID) + break; + + if (type != SGX_CPUID_EPC_SECTION) { + pr_err_once("Unknown EPC section type: %u\n", type); + break; + } + + pa = sgx_calc_section_metric(eax, ebx); + size = sgx_calc_section_metric(ecx, edx); + + pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1); + + if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) { + pr_err("No free memory for an EPC section\n"); + break; + } + + sgx_nr_epc_sections++; + } + + if (!sgx_nr_epc_sections) { + pr_err("There are zero EPC sections.\n"); + return false; + } + + return true; +} + +static int __init sgx_init(void) +{ + int ret; + int i; + + if (!cpu_feature_enabled(X86_FEATURE_SGX)) + return -ENODEV; + + if (!sgx_page_cache_init()) + return -ENOMEM; + + if (!sgx_page_reclaimer_init()) { + ret = -ENOMEM; + goto err_page_cache; + } + + ret = sgx_drv_init(); + if (ret) + goto err_kthread; + + return 0; + +err_kthread: + kthread_stop(ksgxd_tsk); + +err_page_cache: + for (i = 0; i < sgx_nr_epc_sections; i++) { + vfree(sgx_epc_sections[i].pages); + memunmap(sgx_epc_sections[i].virt_addr); + } + + return ret; +} + +device_initcall(sgx_init); diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h new file mode 100644 index 000000000000..5fa42d143feb --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/sgx.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _X86_SGX_H +#define _X86_SGX_H + +#include +#include +#include +#include +#include +#include +#include "arch.h" + +#undef pr_fmt +#define pr_fmt(fmt) "sgx: " fmt + +#define SGX_MAX_EPC_SECTIONS 8 +#define SGX_EEXTEND_BLOCK_SIZE 256 +#define SGX_NR_TO_SCAN 16 +#define SGX_NR_LOW_PAGES 32 +#define SGX_NR_HIGH_PAGES 64 + +/* Pages, which are being tracked by the page reclaimer. */ +#define SGX_EPC_PAGE_RECLAIMER_TRACKED BIT(0) + +struct sgx_epc_page { + unsigned int section; + unsigned int flags; + struct sgx_encl_page *owner; + struct list_head list; +}; + +/* + * The firmware can define multiple chunks of EPC to the different areas of the + * physical memory e.g. for memory areas of the each node. This structure is + * used to store EPC pages for one EPC section and virtual memory area where + * the pages have been mapped. + * + * 'lock' must be held before accessing 'page_list' or 'free_cnt'. + */ +struct sgx_epc_section { + unsigned long phys_addr; + void *virt_addr; + struct sgx_epc_page *pages; + + spinlock_t lock; + struct list_head page_list; + unsigned long free_cnt; + + /* + * Pages which need EREMOVE run on them before they can be + * used. Only safe to be accessed in ksgxd and init code. + * Not protected by locks. + */ + struct list_head init_laundry_list; +}; + +extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; + +static inline unsigned long sgx_get_epc_phys_addr(struct sgx_epc_page *page) +{ + struct sgx_epc_section *section = &sgx_epc_sections[page->section]; + unsigned long index; + + index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page); + + return section->phys_addr + index * PAGE_SIZE; +} + +static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page) +{ + struct sgx_epc_section *section = &sgx_epc_sections[page->section]; + unsigned long index; + + index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page); + + return section->virt_addr + index * PAGE_SIZE; +} + +struct sgx_epc_page *__sgx_alloc_epc_page(void); +void sgx_free_epc_page(struct sgx_epc_page *page); + +void sgx_mark_page_reclaimable(struct sgx_epc_page *page); +int sgx_unmark_page_reclaimable(struct sgx_epc_page *page); +struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim); + +#endif /* _X86_SGX_H */ diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index ee48c3fc8a65..24da5ee4f022 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -25,10 +25,10 @@ #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) -#ifdef CONFIG_SMP unsigned int __max_die_per_package __read_mostly = 1; EXPORT_SYMBOL(__max_die_per_package); +#ifdef CONFIG_SMP /* * Check if given CPUID extended toplogy "leaf" is implemented */ diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c index c222f283b456..32b4dc9030aa 100644 --- a/arch/x86/kernel/cpu/umwait.c +++ b/arch/x86/kernel/cpu/umwait.c @@ -17,12 +17,6 @@ */ static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); -u32 get_umwait_control_msr(void) -{ - return umwait_control_cached; -} -EXPORT_SYMBOL_GPL(get_umwait_control_msr); - /* * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by * hardware or BIOS before kernel boot. diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index eb651fbde92a..0c319d09378d 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -39,6 +40,7 @@ #include #include #include +#include /* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { @@ -68,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void) rcu_read_unlock(); } +/* + * When the crashkernel option is specified, only use the low + * 1M for the real mode trampoline. + */ +void __init crash_reserve_low_1M(void) +{ + if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0) + return; + + memblock_reserve(0, 1<<20); + pr_info("Reserving the low 1M of memory for crashkernel\n"); +} + #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static void kdump_nmi_callback(int cpu, struct pt_regs *regs) @@ -349,7 +364,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) struct crash_memmap_data cmd; struct crash_mem *cmem; - cmem = vzalloc(sizeof(struct crash_mem)); + cmem = vzalloc(struct_size(cmem, ranges, 1)); if (!cmem) return -ENOMEM; diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 12c70840980e..8c9b202f3e6d 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -82,7 +82,7 @@ bool irq_fpu_usable(void) } EXPORT_SYMBOL(irq_fpu_usable); -void kernel_fpu_begin(void) +void kernel_fpu_begin_mask(unsigned int kfpu_mask) { preempt_disable(); @@ -101,8 +101,15 @@ void kernel_fpu_begin(void) copy_fpregs_to_fpstate(¤t->thread.fpu); } __cpu_invalidate_fpregs_state(); + + /* Put sane initial values into the control registers. */ + if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM)) + ldmxcsr(MXCSR_DEFAULT); + + if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) + asm volatile ("fninit"); } -EXPORT_SYMBOL_GPL(kernel_fpu_begin); +EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); void kernel_fpu_end(void) { diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 6ce7e0a23268..b271da0fa219 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -242,9 +242,9 @@ static void __init fpu__init_system_ctx_switch(void) */ static void __init fpu__init_parse_early_param(void) { - char arg[32]; + char arg[128]; char *argptr = arg; - int bit; + int arglen, res, bit; #ifdef CONFIG_X86_32 if (cmdline_find_option_bool(boot_command_line, "no387")) @@ -267,12 +267,26 @@ static void __init fpu__init_parse_early_param(void) if (cmdline_find_option_bool(boot_command_line, "noxsaves")) setup_clear_cpu_cap(X86_FEATURE_XSAVES); - if (cmdline_find_option(boot_command_line, "clearcpuid", arg, - sizeof(arg)) && - get_option(&argptr, &bit) && - bit >= 0 && - bit < NCAPINTS * 32) - setup_clear_cpu_cap(bit); + arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg)); + if (arglen <= 0) + return; + + pr_info("Clearing CPUID bits:"); + do { + res = get_option(&argptr, &bit); + if (res == 0 || res == 3) + break; + + /* If the argument was too long, the last bit may be cut off */ + if (res == 1 && arglen >= sizeof(arg)) + break; + + if (bit >= 0 && bit < NCAPINTS * 32) { + pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit)); + setup_clear_cpu_cap(bit); + } + } while (res == 2); + pr_cont("\n"); } /* diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index e5cb67d67c03..735d1f1bbabc 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -895,8 +895,6 @@ const void *get_xsave_field_ptr(int xfeature_nr) #ifdef CONFIG_ARCH_HAS_PKEYS -#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) -#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) /* * This will go out and modify PKRU register to set the access * rights for @pkey to @init_val. @@ -915,6 +913,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, if (!boot_cpu_has(X86_FEATURE_OSPKE)) return -EINVAL; + /* + * This code should only be called with valid 'pkey' + * values originating from in-kernel users. Complain + * if a bad value is observed. + */ + WARN_ON_ONCE(pkey >= arch_max_pkey()); + /* Set the bits we need in PKRU: */ if (init_val & PKEY_DISABLE_ACCESS) new_pkru_bits |= PKRU_AD_BIT; @@ -952,18 +957,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) return true; } -/* - * This is similar to user_regset_copyout(), but will not add offset to - * the source data pointer or increment pos, count, kbuf, and ubuf. - */ -static inline void -__copy_xstate_to_kernel(void *kbuf, const void *data, - unsigned int offset, unsigned int size, unsigned int size_total) +static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count) { - if (offset < size_total) { - unsigned int copy = min(size, size_total - offset); + if (*pos < to) { + unsigned size = to - *pos; - memcpy(kbuf + offset, data, copy); + if (size > *count) + size = *count; + memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size); + *kbuf += size; + *pos += size; + *count -= size; + } +} + +static void copy_part(unsigned offset, unsigned size, void *from, + void **kbuf, unsigned *pos, unsigned *count) +{ + fill_gap(offset, kbuf, pos, count); + if (size > *count) + size = *count; + if (size) { + memcpy(*kbuf, from, size); + *kbuf += size; + *pos += size; + *count -= size; } } @@ -976,8 +994,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data, */ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) { - unsigned int offset, size; struct xstate_header header; + const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr); + unsigned count = size_total; int i; /* @@ -993,46 +1012,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of header.xfeatures = xsave->header.xfeatures; header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR; + if (header.xfeatures & XFEATURE_MASK_FP) + copy_part(0, off_mxcsr, + &xsave->i387, &kbuf, &offset_start, &count); + if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)) + copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE, + &xsave->i387.mxcsr, &kbuf, &offset_start, &count); + if (header.xfeatures & XFEATURE_MASK_FP) + copy_part(offsetof(struct fxregs_state, st_space), 128, + &xsave->i387.st_space, &kbuf, &offset_start, &count); + if (header.xfeatures & XFEATURE_MASK_SSE) + copy_part(xstate_offsets[XFEATURE_SSE], 256, + &xsave->i387.xmm_space, &kbuf, &offset_start, &count); + /* + * Fill xsave->i387.sw_reserved value for ptrace frame: + */ + copy_part(offsetof(struct fxregs_state, sw_reserved), 48, + xstate_fx_sw_bytes, &kbuf, &offset_start, &count); /* * Copy xregs_state->header: */ - offset = offsetof(struct xregs_state, header); - size = sizeof(header); + copy_part(offsetof(struct xregs_state, header), sizeof(header), + &header, &kbuf, &offset_start, &count); - __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total); - - for (i = 0; i < XFEATURE_MAX; i++) { + for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { /* * Copy only in-use xstates: */ if ((header.xfeatures >> i) & 1) { void *src = __raw_xsave_addr(xsave, i); - offset = xstate_offsets[i]; - size = xstate_sizes[i]; - - /* The next component has to fit fully into the output buffer: */ - if (offset + size > size_total) - break; - - __copy_xstate_to_kernel(kbuf, src, offset, size, size_total); + copy_part(xstate_offsets[i], xstate_sizes[i], + src, &kbuf, &offset_start, &count); } } - - if (xfeatures_mxcsr_quirk(header.xfeatures)) { - offset = offsetof(struct fxregs_state, mxcsr); - size = MXCSR_AND_FLAGS_SIZE; - __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total); - } - - /* - * Fill xsave->i387.sw_reserved value for ptrace frame: - */ - offset = offsetof(struct fxregs_state, sw_reserved); - size = sizeof(xstate_fx_sw_bytes); - - __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total); + fill_gap(size_total, &kbuf, &offset_start, &count); return 0; } diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index 073aab525d80..2cc0303522c9 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -89,7 +89,7 @@ WEAK(ftrace_stub) ret END(ftrace_caller) -ENTRY(ftrace_regs_caller) +SYM_CODE_START(ftrace_regs_caller) /* * We're here from an mcount/fentry CALL, and the stack frame looks like: * @@ -163,6 +163,7 @@ GLOBAL(ftrace_regs_call) popl %eax jmp .Lftrace_ret +SYM_CODE_END(ftrace_regs_caller) #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 2e6a0676c1f4..11a5d5ade52c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) * can. */ __HEAD -ENTRY(startup_32) +SYM_CODE_START(startup_32) movl pa(initial_stack),%ecx /* test KEEP_SEGMENTS flag to see if the bootloader is asking @@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4 #else jmp .Ldefault_entry #endif /* CONFIG_PARAVIRT */ +SYM_CODE_END(startup_32) #ifdef CONFIG_HOTPLUG_CPU /* diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 519649ddf100..fe522691ac71 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -207,7 +207,7 @@ spurious_8259A_irq: * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { - printk(KERN_DEBUG + printk_deferred(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 87ef69a72c52..7bb4c3cbf4dc 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -318,7 +318,11 @@ void __init idt_setup_apic_and_irq_gates(void) #ifdef CONFIG_X86_LOCAL_APIC for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { - set_bit(i, system_vectors); + /* + * Don't set the non assigned system vectors in the + * system_vectors bitmap. Otherwise they show up in + * /proc/interrupts. + */ entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR); set_intr_gate(i, entry); } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 12df3a4abfdd..6b32ab009c19 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -43,7 +43,7 @@ static int map_irq_stack(unsigned int cpu) pages[i] = pfn_to_page(pa >> PAGE_SHIFT); } - va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); + va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); if (!va) return -ENOMEM; diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index d2f4e706a428..b8b3b84308ed 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -210,8 +210,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch; /* Copying screen_info will do? */ - memcpy(¶ms->screen_info, &boot_params.screen_info, - sizeof(struct screen_info)); + memcpy(¶ms->screen_info, &screen_info, sizeof(struct screen_info)); /* Fill in memsize later */ params->screen_info.ext_mem_k = 0; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 43fc13c831af..c205d77d57da 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -157,6 +157,8 @@ NOKPROBE_SYMBOL(skip_prefixes); int can_boost(struct insn *insn, void *addr) { kprobe_opcode_t opcode; + insn_byte_t prefix; + int i; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ @@ -169,9 +171,14 @@ int can_boost(struct insn *insn, void *addr) if (insn->opcode.nbytes != 1) return 0; - /* Can't boost Address-size override prefix */ - if (unlikely(inat_is_address_size_prefix(insn->attr))) - return 0; + for_each_insn_prefix(insn, i, prefix) { + insn_attr_t attr; + + attr = inat_get_opcode_attribute(prefix); + /* Can't boost Address-size override prefix and CS override prefix */ + if (prefix == 0x2e || inat_is_address_size_prefix(attr)) + return 0; + } opcode = insn->opcode.bytes[0]; @@ -196,8 +203,8 @@ int can_boost(struct insn *insn, void *addr) /* clear and set flags are boostable */ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); default: - /* CS override prefix and call are not boostable */ - return (opcode != 0x2e && opcode != 0x9a); + /* call is not boostable */ + return opcode != 0x9a; } } @@ -746,16 +753,11 @@ asm( NOKPROBE_SYMBOL(kretprobe_trampoline); STACK_FRAME_NON_STANDARD(kretprobe_trampoline); -static struct kprobe kretprobe_kprobe = { - .addr = (void *)kretprobe_trampoline, -}; - /* * Called from kretprobe_trampoline */ __used __visible void *trampoline_handler(struct pt_regs *regs) { - struct kprobe_ctlblk *kcb; struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *tmp; @@ -765,16 +767,12 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) void *frame_pointer; bool skipped = false; - preempt_disable(); - /* * Set a dummy kprobe for avoiding kretprobe recursion. * Since kretprobe never run in kprobe handler, kprobe must not * be running at this point. */ - kcb = get_kprobe_ctlblk(); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; + kprobe_busy_begin(); INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -850,7 +848,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) __this_cpu_write(current_kprobe, &ri->rp->kp); ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); + __this_cpu_write(current_kprobe, &kprobe_busy); } recycle_rp_inst(ri, &empty_rp); @@ -866,8 +864,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) kretprobe_hash_unlock(current, &flags); - __this_cpu_write(current_kprobe, NULL); - preempt_enable(); + kprobe_busy_end(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); @@ -1029,6 +1026,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) * So clear it by resetting the current kprobe: */ regs->flags &= ~X86_EFLAGS_TF; + /* + * Since the single step (trap) has been cancelled, + * we need to restore BTF here. + */ + restore_btf(); /* * If the TF flag was set before the kprobe hit, diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index e820568ed4d5..13d1c3a62e2a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -831,16 +831,36 @@ asm( */ void __init kvm_spinlock_init(void) { - /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ - if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) + /* + * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an + * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is + * preferred over native qspinlock when vCPU is preempted. + */ + if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) { + pr_info("PV spinlocks disabled, no host support\n"); return; + } - if (kvm_para_has_hint(KVM_HINTS_REALTIME)) - return; + /* + * Disable PV spinlocks and use native qspinlock when dedicated pCPUs + * are available. + */ + if (kvm_para_has_hint(KVM_HINTS_REALTIME)) { + pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n"); + goto out; + } - /* Don't use the pvqspinlock code if there is only 1 vCPU. */ - if (num_possible_cpus() == 1) - return; + if (num_possible_cpus() == 1) { + pr_info("PV spinlocks disabled, single CPU\n"); + goto out; + } + + if (nopvspin) { + pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n"); + goto out; + } + + pr_info("PV spinlocks enabled\n"); __pv_init_lock_hash(); pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; @@ -853,6 +873,13 @@ void __init kvm_spinlock_init(void) pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); } + /* + * When PV spinlock is enabled which is preferred over + * virt_spin_lock(), virt_spin_lock_key's value is meaningless. + * Just disable it anyway. + */ +out: + static_branch_disable(&virt_spin_lock_key); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index d5c72cb877b3..77dabedaa9d1 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -114,6 +114,7 @@ int apply_relocate(Elf32_Shdr *sechdrs, *location += sym->st_value; break; case R_386_PC32: + case R_386_PLT32: /* Add the value, subtract its position */ *location += sym->st_value - (uint32_t)location; break; diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 54c21d6abd5a..5bb001c0c771 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -106,7 +106,6 @@ fs_initcall(nmi_warning_debugfs); static void nmi_check_duration(struct nmiaction *action, u64 duration) { - u64 whole_msecs = READ_ONCE(action->max_duration); int remainder_ns, decimal_msecs; if (duration < nmi_longest_ns || duration < action->max_duration) @@ -114,12 +113,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) action->max_duration = duration; - remainder_ns = do_div(whole_msecs, (1000 * 1000)); + remainder_ns = do_div(duration, (1000 * 1000)); decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", - action->handler, whole_msecs, decimal_msecs); + action->handler, duration, decimal_msecs); } static int nmi_handle(unsigned int type, struct pt_regs *regs) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 5e94c4354d4e..0105fd785e9a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -132,6 +132,108 @@ void exit_thread(struct task_struct *tsk) fpu__drop(fpu); } +static int set_new_tls(struct task_struct *p, unsigned long tls) +{ + struct user_desc __user *utls = (struct user_desc __user *)tls; + + if (in_ia32_syscall()) + return do_set_thread_area(p, -1, utls, 0); + else + return do_set_thread_area_64(p, ARCH_SET_FS, tls); +} + +static inline int copy_io_bitmap(struct task_struct *tsk) +{ + if (likely(!test_tsk_thread_flag(current, TIF_IO_BITMAP))) + return 0; + + tsk->thread.io_bitmap_ptr = kmemdup(current->thread.io_bitmap_ptr, + IO_BITMAP_BYTES, GFP_KERNEL); + if (!tsk->thread.io_bitmap_ptr) { + tsk->thread.io_bitmap_max = 0; + return -ENOMEM; + } + set_tsk_thread_flag(tsk, TIF_IO_BITMAP); + return 0; +} + +static inline void free_io_bitmap(struct task_struct *tsk) +{ + if (tsk->thread.io_bitmap_ptr) { + kfree(tsk->thread.io_bitmap_ptr); + tsk->thread.io_bitmap_ptr = NULL; + tsk->thread.io_bitmap_max = 0; + } +} + +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct *p, unsigned long tls) +{ + struct inactive_task_frame *frame; + struct fork_frame *fork_frame; + struct pt_regs *childregs; + int ret; + + childregs = task_pt_regs(p); + fork_frame = container_of(childregs, struct fork_frame, regs); + frame = &fork_frame->frame; + + frame->bp = 0; + frame->ret_addr = (unsigned long) ret_from_fork; + p->thread.sp = (unsigned long) fork_frame; + p->thread.io_bitmap_ptr = NULL; + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); + +#ifdef CONFIG_X86_64 + current_save_fsgs(); + p->thread.fsindex = current->thread.fsindex; + p->thread.fsbase = current->thread.fsbase; + p->thread.gsindex = current->thread.gsindex; + p->thread.gsbase = current->thread.gsbase; + + savesegment(es, p->thread.es); + savesegment(ds, p->thread.ds); +#else + p->thread.sp0 = (unsigned long) (childregs + 1); + /* + * Clear all status flags including IF and set fixed bit. 64bit + * does not have this initialization as the frame does not contain + * flags. The flags consistency (especially vs. AC) is there + * ensured via objtool, which lacks 32bit support. + */ + frame->flags = X86_EFLAGS_FIXED; +#endif + + /* Kernel thread ? */ + if (unlikely(p->flags & PF_KTHREAD)) { + memset(childregs, 0, sizeof(struct pt_regs)); + kthread_frame_init(frame, sp, arg); + return 0; + } + + frame->bx = 0; + *childregs = *current_pt_regs(); + childregs->ax = 0; + if (sp) + childregs->sp = sp; + +#ifdef CONFIG_X86_32 + task_user_gs(p) = get_user_gs(current_pt_regs()); +#endif + + ret = copy_io_bitmap(p); + if (ret) + return ret; + + /* Set a new TLS for the child thread? */ + if (clone_flags & CLONE_SETTLS) { + ret = set_new_tls(p, tls); + if (ret) + free_io_bitmap(p); + } + return ret; +} + void flush_thread(void) { struct task_struct *tsk = current; @@ -428,28 +530,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, lockdep_assert_irqs_disabled(); - /* - * If TIF_SSBD is different, select the proper mitigation - * method. Note that if SSBD mitigation is disabled or permanentely - * enabled this branch can't be taken because nothing can set - * TIF_SSBD. - */ - if (tif_diff & _TIF_SSBD) { - if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + /* Handle change of TIF_SSBD depending on the mitigation method. */ + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + if (tif_diff & _TIF_SSBD) amd_set_ssb_virt_state(tifn); - } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + if (tif_diff & _TIF_SSBD) amd_set_core_ssb_state(tifn); - } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || - static_cpu_has(X86_FEATURE_AMD_SSBD)) { - msr |= ssbd_tif_to_spec_ctrl(tifn); - updmsr = true; - } + } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) { + updmsr |= !!(tif_diff & _TIF_SSBD); + msr |= ssbd_tif_to_spec_ctrl(tifn); } - /* - * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, - * otherwise avoid the MSR write. - */ + /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ if (IS_ENABLED(CONFIG_SMP) && static_branch_unlikely(&switch_to_cond_stibp)) { updmsr |= !!(tif_diff & _TIF_SPEC_IB); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index b8ceec4974fe..6c7d90527156 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -112,74 +112,6 @@ void release_thread(struct task_struct *dead_task) release_vm86_irqs(dead_task); } -int copy_thread_tls(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p, unsigned long tls) -{ - struct pt_regs *childregs = task_pt_regs(p); - struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs); - struct inactive_task_frame *frame = &fork_frame->frame; - struct task_struct *tsk; - int err; - - /* - * For a new task use the RESET flags value since there is no before. - * All the status flags are zero; DF and all the system flags must also - * be 0, specifically IF must be 0 because we context switch to the new - * task with interrupts disabled. - */ - frame->flags = X86_EFLAGS_FIXED; - frame->bp = 0; - frame->ret_addr = (unsigned long) ret_from_fork; - p->thread.sp = (unsigned long) fork_frame; - p->thread.sp0 = (unsigned long) (childregs+1); - memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); - - if (unlikely(p->flags & PF_KTHREAD)) { - /* kernel thread */ - memset(childregs, 0, sizeof(struct pt_regs)); - frame->bx = sp; /* function */ - frame->di = arg; - p->thread.io_bitmap_ptr = NULL; - return 0; - } - frame->bx = 0; - *childregs = *current_pt_regs(); - childregs->ax = 0; - if (sp) - childregs->sp = sp; - - task_user_gs(p) = get_user_gs(current_pt_regs()); - - p->thread.io_bitmap_ptr = NULL; - tsk = current; - err = -ENOMEM; - - if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { - p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, - IO_BITMAP_BYTES, GFP_KERNEL); - if (!p->thread.io_bitmap_ptr) { - p->thread.io_bitmap_max = 0; - return -ENOMEM; - } - set_tsk_thread_flag(p, TIF_IO_BITMAP); - } - - err = 0; - - /* - * Set a new TLS for the child thread? - */ - if (clone_flags & CLONE_SETTLS) - err = do_set_thread_area(p, -1, - (struct user_desc __user *)tls, 0); - - if (err && p->thread.io_bitmap_ptr) { - kfree(p->thread.io_bitmap_ptr); - p->thread.io_bitmap_max = 0; - } - return err; -} - void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index af64519b2695..5db6a0737e6d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -151,6 +151,56 @@ enum which_selector { GS }; +/* + * Out of line to be protected from kprobes and tracing. If this would be + * traced or probed than any access to a per CPU variable happens with + * the wrong GS. + * + * It is not used on Xen paravirt. When paravirt support is needed, it + * needs to be renamed with native_ prefix. + */ +static noinstr unsigned long __rdgsbase_inactive(void) +{ + unsigned long gsbase; + + lockdep_assert_irqs_disabled(); + + if (!static_cpu_has(X86_FEATURE_XENPV)) { + native_swapgs(); + gsbase = rdgsbase(); + native_swapgs(); + } else { + instrumentation_begin(); + rdmsrl(MSR_KERNEL_GS_BASE, gsbase); + instrumentation_end(); + } + + return gsbase; +} + +/* + * Out of line to be protected from kprobes and tracing. If this would be + * traced or probed than any access to a per CPU variable happens with + * the wrong GS. + * + * It is not used on Xen paravirt. When paravirt support is needed, it + * needs to be renamed with native_ prefix. + */ +static noinstr void __wrgsbase_inactive(unsigned long gsbase) +{ + lockdep_assert_irqs_disabled(); + + if (!static_cpu_has(X86_FEATURE_XENPV)) { + native_swapgs(); + wrgsbase(gsbase); + native_swapgs(); + } else { + instrumentation_begin(); + wrmsrl(MSR_KERNEL_GS_BASE, gsbase); + instrumentation_end(); + } +} + /* * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are * not available. The goal is to be reasonably fast on non-FSGSBASE systems. @@ -200,22 +250,35 @@ static __always_inline void save_fsgs(struct task_struct *task) { savesegment(fs, task->thread.fsindex); savesegment(gs, task->thread.gsindex); - save_base_legacy(task, task->thread.fsindex, FS); - save_base_legacy(task, task->thread.gsindex, GS); + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + /* + * If FSGSBASE is enabled, we can't make any useful guesses + * about the base, and user code expects us to save the current + * value. Fortunately, reading the base directly is efficient. + */ + task->thread.fsbase = rdfsbase(); + task->thread.gsbase = __rdgsbase_inactive(); + } else { + save_base_legacy(task, task->thread.fsindex, FS); + save_base_legacy(task, task->thread.gsindex, GS); + } } -#if IS_ENABLED(CONFIG_KVM) /* * While a process is running,current->thread.fsbase and current->thread.gsbase - * may not match the corresponding CPU registers (see save_base_legacy()). KVM - * wants an efficient way to save and restore FSBASE and GSBASE. - * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE. + * may not match the corresponding CPU registers (see save_base_legacy()). */ -void save_fsgs_for_kvm(void) +void current_save_fsgs(void) { + unsigned long flags; + + /* Interrupts need to be off for FSGSBASE */ + local_irq_save(flags); save_fsgs(current); + local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(save_fsgs_for_kvm); +#if IS_ENABLED(CONFIG_KVM) +EXPORT_SYMBOL_GPL(current_save_fsgs); #endif static __always_inline void loadseg(enum which_selector which, @@ -280,14 +343,26 @@ static __always_inline void load_seg_legacy(unsigned short prev_index, static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) { - load_seg_legacy(prev->fsindex, prev->fsbase, - next->fsindex, next->fsbase, FS); - load_seg_legacy(prev->gsindex, prev->gsbase, - next->gsindex, next->gsbase, GS); + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + /* Update the FS and GS selectors if they could have changed. */ + if (unlikely(prev->fsindex || next->fsindex)) + loadseg(FS, next->fsindex); + if (unlikely(prev->gsindex || next->gsindex)) + loadseg(GS, next->gsindex); + + /* Update the bases. */ + wrfsbase(next->fsbase); + __wrgsbase_inactive(next->gsbase); + } else { + load_seg_legacy(prev->fsindex, prev->fsbase, + next->fsindex, next->fsbase, FS); + load_seg_legacy(prev->gsindex, prev->gsbase, + next->gsindex, next->gsbase, GS); + } } -static unsigned long x86_fsgsbase_read_task(struct task_struct *task, - unsigned short selector) +unsigned long x86_fsgsbase_read_task(struct task_struct *task, + unsigned short selector) { unsigned short idx = selector >> 3; unsigned long base; @@ -316,7 +391,7 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; - if (unlikely(idx >= ldt->nr_entries)) + if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); @@ -329,13 +404,44 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, return base; } +unsigned long x86_gsbase_read_cpu_inactive(void) +{ + unsigned long gsbase; + + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { + unsigned long flags; + + local_irq_save(flags); + gsbase = __rdgsbase_inactive(); + local_irq_restore(flags); + } else { + rdmsrl(MSR_KERNEL_GS_BASE, gsbase); + } + + return gsbase; +} + +void x86_gsbase_write_cpu_inactive(unsigned long gsbase) +{ + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { + unsigned long flags; + + local_irq_save(flags); + __wrgsbase_inactive(gsbase); + local_irq_restore(flags); + } else { + wrmsrl(MSR_KERNEL_GS_BASE, gsbase); + } +} + unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; if (task == current) fsbase = x86_fsbase_read_cpu(); - else if (task->thread.fsindex == 0) + else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || + (task->thread.fsindex == 0)) fsbase = task->thread.fsbase; else fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); @@ -349,7 +455,8 @@ unsigned long x86_gsbase_read_task(struct task_struct *task) if (task == current) gsbase = x86_gsbase_read_cpu_inactive(); - else if (task->thread.gsindex == 0) + else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || + (task->thread.gsindex == 0)) gsbase = task->thread.gsbase; else gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); @@ -371,81 +478,6 @@ void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) task->thread.gsbase = gsbase; } -int copy_thread_tls(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p, unsigned long tls) -{ - int err; - struct pt_regs *childregs; - struct fork_frame *fork_frame; - struct inactive_task_frame *frame; - struct task_struct *me = current; - - childregs = task_pt_regs(p); - fork_frame = container_of(childregs, struct fork_frame, regs); - frame = &fork_frame->frame; - - frame->bp = 0; - frame->ret_addr = (unsigned long) ret_from_fork; - p->thread.sp = (unsigned long) fork_frame; - p->thread.io_bitmap_ptr = NULL; - - savesegment(gs, p->thread.gsindex); - p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase; - savesegment(fs, p->thread.fsindex); - p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase; - savesegment(es, p->thread.es); - savesegment(ds, p->thread.ds); - memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); - - if (unlikely(p->flags & PF_KTHREAD)) { - /* kernel thread */ - memset(childregs, 0, sizeof(struct pt_regs)); - frame->bx = sp; /* function */ - frame->r12 = arg; - return 0; - } - frame->bx = 0; - *childregs = *current_pt_regs(); - - childregs->ax = 0; - if (sp) - childregs->sp = sp; - - err = -ENOMEM; - if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { - p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, - IO_BITMAP_BYTES, GFP_KERNEL); - if (!p->thread.io_bitmap_ptr) { - p->thread.io_bitmap_max = 0; - return -ENOMEM; - } - set_tsk_thread_flag(p, TIF_IO_BITMAP); - } - - /* - * Set a new TLS for the child thread? - */ - if (clone_flags & CLONE_SETTLS) { -#ifdef CONFIG_IA32_EMULATION - if (in_ia32_syscall()) - err = do_set_thread_area(p, -1, - (struct user_desc __user *)tls, 0); - else -#endif - err = do_arch_prctl_64(p, ARCH_SET_FS, tls); - if (err) - goto out; - } - err = 0; -out: - if (err && p->thread.io_bitmap_ptr) { - kfree(p->thread.io_bitmap_ptr); - p->thread.io_bitmap_max = 0; - } - - return err; -} - static void start_thread_common(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp, diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 3c5bbe8e4120..2366dbeaf263 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -277,6 +277,12 @@ static int set_segment_reg(struct task_struct *task, if (invalid_selector(value)) return -EIO; + /* + * Writes to FS and GS will change the stored selector. Whether + * this changes the segment base as well depends on whether + * FSGSBASE is enabled. + */ + switch (offset) { case offsetof(struct user_regs_struct,fs): task->thread.fsindex = value; @@ -370,22 +376,12 @@ static int putreg(struct task_struct *child, case offsetof(struct user_regs_struct,fs_base): if (value >= TASK_SIZE_MAX) return -EIO; - /* - * When changing the FS base, use do_arch_prctl_64() - * to set the index to zero and to set the base - * as requested. - */ - if (child->thread.fsbase != value) - return do_arch_prctl_64(child, ARCH_SET_FS, value); + x86_fsbase_write_task(child, value); return 0; case offsetof(struct user_regs_struct,gs_base): - /* - * Exactly the same here as the %fs handling above. - */ if (value >= TASK_SIZE_MAX) return -EIO; - if (child->thread.gsbase != value) - return do_arch_prctl_64(child, ARCH_SET_GS, value); + x86_gsbase_write_task(child, value); return 0; #endif } @@ -865,14 +861,39 @@ long arch_ptrace(struct task_struct *child, long request, static int putreg32(struct task_struct *child, unsigned regno, u32 value) { struct pt_regs *regs = task_pt_regs(child); + int ret; switch (regno) { SEG32(cs); SEG32(ds); SEG32(es); - SEG32(fs); - SEG32(gs); + + /* + * A 32-bit ptracer on a 64-bit kernel expects that writing + * FS or GS will also update the base. This is needed for + * operations like PTRACE_SETREGS to fully restore a saved + * CPU state. + */ + + case offsetof(struct user32, regs.fs): + ret = set_segment_reg(child, + offsetof(struct user_regs_struct, fs), + value); + if (ret == 0) + child->thread.fsbase = + x86_fsgsbase_read_task(child, value); + return ret; + + case offsetof(struct user32, regs.gs): + ret = set_segment_reg(child, + offsetof(struct user_regs_struct, gs), + value); + if (ret == 0) + child->thread.gsbase = + x86_fsgsbase_read_task(child, value); + return ret; + SEG32(ss); R32(ebx, bx); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 0cc7c0b106bb..b1b96d461bc7 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), }, }, + { /* Handle problems with rebooting on Apple MacBook6,1 */ + .callback = set_pci_reboot, + .ident = "Apple MacBook6,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), + }, + }, { /* Handle problems with rebooting on Apple MacBookPro5 */ .callback = set_pci_reboot, .ident = "Apple MacBookPro5", @@ -469,6 +477,15 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { }, }, + { /* PCIe Wifi card isn't detected after reboot otherwise */ + .callback = set_pci_reboot, + .ident = "Zotac ZBOX CI327 nano", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NA"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"), + }, + }, + /* Sony */ { /* Handle problems with rebooting on Sony VGN-Z540N */ .callback = set_bios_reboot, @@ -530,29 +547,20 @@ static void emergency_vmx_disable_all(void) local_irq_disable(); /* - * We need to disable VMX on all CPUs before rebooting, otherwise - * we risk hanging up the machine, because the CPU ignore INIT - * signals when VMX is enabled. + * Disable VMX on all CPUs before rebooting, otherwise we risk hanging + * the machine, because the CPU blocks INIT when it's in VMX root. * - * We can't take any locks and we may be on an inconsistent - * state, so we use NMIs as IPIs to tell the other CPUs to disable - * VMX and halt. + * We can't take any locks and we may be on an inconsistent state, so + * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. * - * For safety, we will avoid running the nmi_shootdown_cpus() - * stuff unnecessarily, but we don't have a way to check - * if other CPUs have VMX enabled. So we will call it only if the - * CPU we are running on has VMX enabled. - * - * We will miss cases where VMX is not enabled on all CPUs. This - * shouldn't do much harm because KVM always enable VMX on all - * CPUs anyway. But we can miss it on the small window where KVM - * is still enabling VMX. + * Do the NMI shootdown even if VMX if off on _this_ CPU, as that + * doesn't prevent a different CPU from being in VMX root operation. */ - if (cpu_has_vmx() && cpu_vmx_enabled()) { - /* Disable VMX on this CPU. */ - cpu_vmxoff(); + if (cpu_has_vmx()) { + /* Safely force _this_ CPU out of VMX root operation. */ + __cpu_emergency_vmxoff(); - /* Halt and disable VMX on the other CPUs */ + /* Halt and exit VMX root operation on the other CPUs. */ nmi_shootdown_cpus(vmxoff_nmi); } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 77ea96b794bd..366875a828b3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1196,6 +1196,8 @@ void __init setup_arch(char **cmdline_p) reserve_initrd(); acpi_table_upgrade(); + /* Look for ACPI tables and reserve memory occupied by them. */ + acpi_boot_table_init(); vsmp_init(); @@ -1203,11 +1205,6 @@ void __init setup_arch(char **cmdline_p) early_platform_quirks(); - /* - * Parse the ACPI tables for possible boot-time SMP configuration. - */ - acpi_boot_table_init(); - early_acpi_boot_init(); initmem_init(); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 8eb7193e158d..2fdbf5ef8c39 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -770,30 +770,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) { - /* - * This function is fundamentally broken as currently - * implemented. - * - * The idea is that we want to trigger a call to the - * restart_block() syscall and that we want in_ia32_syscall(), - * in_x32_syscall(), etc. to match whatever they were in the - * syscall being restarted. We assume that the syscall - * instruction at (regs->ip - 2) matches whatever syscall - * instruction we used to enter in the first place. - * - * The problem is that we can get here when ptrace pokes - * syscall-like values into regs even if we're not in a syscall - * at all. - * - * For now, we maintain historical behavior and guess based on - * stored state. We could do better by saving the actual - * syscall arch in restart_block or (with caveats on x32) by - * checking if regs->ip points to 'int $0x80'. The current - * behavior is incorrect if a tracer has a different bitness - * than the tracee. - */ #ifdef CONFIG_IA32_EMULATION - if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (current_thread_info()->status & TS_COMPAT_RESTART) return __NR_ia32_restart_syscall; #endif #ifdef CONFIG_X86_X32_ABI diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 69881b2d446c..8367bd7a9a81 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -262,6 +262,14 @@ static void notrace start_secondary(void *unused) wmb(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + + /* + * Prevent tail call to cpu_startup_entry() because the stack protector + * guard has been changed a couple of function calls up, in + * boot_init_stack_canary() and must not be checked before tail calling + * another function. + */ + prevent_tail_call_optimization(); } /** @@ -1591,14 +1599,28 @@ int native_cpu_disable(void) if (ret) return ret; - /* - * Disable the local APIC. Otherwise IPI broadcasts will reach - * it. It still responds normally to INIT, NMI, SMI, and SIPI - * messages. - */ - apic_soft_disable(); cpu_disable_common(); + /* + * Disable the local APIC. Otherwise IPI broadcasts will reach + * it. It still responds normally to INIT, NMI, SMI, and SIPI + * messages. + * + * Disabling the APIC must happen after cpu_disable_common() + * which invokes fixup_irqs(). + * + * Disabling the APIC preserves already set bits in IRR, but + * an interrupt arriving after disabling the local APIC does not + * set the corresponding IRR bit. + * + * fixup_irqs() scans IRR for set bits so it can raise a not + * yet handled interrupt on the new destination CPU via an IPI + * but obviously it can't do so for IRR bits which are not set. + * IOW, interrupts arriving after disabling the local APIC will + * be lost. + */ + apic_soft_disable(); + return 0; } diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 2d6898c2cb64..6d83b4b857e6 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, * or a page fault), which can make frame pointers * unreliable. */ - if (IS_ENABLED(CONFIG_FRAME_POINTER)) return -EINVAL; } @@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, if (unwind_error(&state)) return -EINVAL; - /* Success path for non-user tasks, i.e. kthreads and idle tasks */ - if (!(task->flags & (PF_KTHREAD | PF_IDLE))) - return -EINVAL; - return 0; } diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index a49fe1dcb47e..bded81591ad9 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -512,9 +512,6 @@ int tboot_force_iommu(void) if (!tboot_enabled()) return 0; - if (intel_iommu_tboot_noforce) - return 1; - if (no_iommu || swiotlb || dmar_disabled) pr_warning("Forcing Intel-IOMMU to enabled\n"); diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index d8673d8a779b..36a585b80d9e 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -25,10 +25,6 @@ #include #include -#ifdef CONFIG_X86_64 -__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; -#endif - unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 4bb0f8447112..18648baeccf7 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -37,6 +37,8 @@ #include #include #include +#include +#include #if defined(CONFIG_EDAC) #include @@ -45,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -61,6 +62,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 #include @@ -86,78 +88,6 @@ static inline void cond_local_irq_disable(struct pt_regs *regs) local_irq_disable(); } -/* - * In IST context, we explicitly disable preemption. This serves two - * purposes: it makes it much less likely that we would accidentally - * schedule in IST context and it will force a warning if we somehow - * manage to schedule by accident. - */ -void ist_enter(struct pt_regs *regs) -{ - if (user_mode(regs)) { - RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); - } else { - /* - * We might have interrupted pretty much anything. In - * fact, if we're a machine check, we can even interrupt - * NMI processing. We don't want in_nmi() to return true, - * but we need to notify RCU. - */ - rcu_nmi_enter(); - } - - preempt_disable(); - - /* This code is a bit fragile. Test it. */ - RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); -} -NOKPROBE_SYMBOL(ist_enter); - -void ist_exit(struct pt_regs *regs) -{ - preempt_enable_no_resched(); - - if (!user_mode(regs)) - rcu_nmi_exit(); -} - -/** - * ist_begin_non_atomic() - begin a non-atomic section in an IST exception - * @regs: regs passed to the IST exception handler - * - * IST exception handlers normally cannot schedule. As a special - * exception, if the exception interrupted userspace code (i.e. - * user_mode(regs) would return true) and the exception was not - * a double fault, it can be safe to schedule. ist_begin_non_atomic() - * begins a non-atomic section within an ist_enter()/ist_exit() region. - * Callers are responsible for enabling interrupts themselves inside - * the non-atomic section, and callers must call ist_end_non_atomic() - * before ist_exit(). - */ -void ist_begin_non_atomic(struct pt_regs *regs) -{ - BUG_ON(!user_mode(regs)); - - /* - * Sanity check: we need to be on the normal thread stack. This - * will catch asm bugs and any attempt to use ist_preempt_enable - * from double_fault. - */ - BUG_ON(!on_thread_stack()); - - preempt_enable_no_resched(); -} - -/** - * ist_end_non_atomic() - begin a non-atomic section in an IST exception - * - * Ends a non-atomic section started with ist_begin_non_atomic(). - */ -void ist_end_non_atomic(void) -{ - preempt_disable(); -} - int is_valid_bugaddr(unsigned long addr) { unsigned short ud; @@ -210,6 +140,9 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; die(str, regs, error_code); + } else { + if (fixup_vdso_exception(regs, trapnr, error_code, 0)) + return 0; } /* @@ -333,7 +266,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign * The net result is that our #GP handler will think that we * entered from usermode with the bad user context. * - * No need for ist_enter here because we don't use RCU. + * No need for nmi_enter() here because we don't use RCU. */ if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && regs->cs == __KERNEL_CS && @@ -368,7 +301,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign } #endif - ist_enter(regs); + nmi_enter(); notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); tsk->thread.error_code = error_code; @@ -560,6 +493,9 @@ do_general_protection(struct pt_regs *regs, long error_code) tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; + if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0)) + return; + show_signal(tsk, SIGSEGV, "", desc, regs, error_code); force_sig(SIGSEGV); @@ -580,15 +516,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) if (poke_int3_handler(regs)) return; - /* - * Use ist_enter despite the fact that we don't use an IST stack. - * We can be called from a kprobe in non-CONTEXT_KERNEL kernel - * mode or even during context tracking state changes. - * - * This means that we can't schedule. That's okay. - */ - ist_enter(regs); - RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + nmi_enter(); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -609,7 +537,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) cond_local_irq_disable(regs); exit: - ist_exit(regs); + nmi_exit(); } NOKPROBE_SYMBOL(do_int3); @@ -713,7 +641,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) unsigned long dr6; int si_code; - ist_enter(regs); + nmi_enter(); get_debugreg(dr6, 6); /* @@ -806,7 +734,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: - ist_exit(regs); + nmi_exit(); } NOKPROBE_SYMBOL(do_debug); @@ -851,6 +779,9 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) if (!si_code) return; + if (fixup_vdso_exception(regs, trapnr, 0, 0)) + return; + force_sig_fault(SIGFPE, si_code, (void __user *)uprobe_get_trap_addr(regs)); } diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index e0cbe4f2af49..41200706e6da 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -15,18 +15,46 @@ #include #include -#define MAX_NUM_FREQS 9 +#define MAX_NUM_FREQS 16 /* 4 bits to select the frequency */ + +/* + * The frequency numbers in the SDM are e.g. 83.3 MHz, which does not contain a + * lot of accuracy which leads to clock drift. As far as we know Bay Trail SoCs + * use a 25 MHz crystal and Cherry Trail uses a 19.2 MHz crystal, the crystal + * is the source clk for a root PLL which outputs 1600 and 100 MHz. It is + * unclear if the root PLL outputs are used directly by the CPU clock PLL or + * if there is another PLL in between. + * This does not matter though, we can model the chain of PLLs as a single PLL + * with a quotient equal to the quotients of all PLLs in the chain multiplied. + * So we can create a simplified model of the CPU clock setup using a reference + * clock of 100 MHz plus a quotient which gets us as close to the frequency + * from the SDM as possible. + * For the 83.3 MHz example from above this would give us 100 MHz * 5 / 6 = + * 83 and 1/3 MHz, which matches exactly what has been measured on actual hw. + */ +#define TSC_REFERENCE_KHZ 100000 + +struct muldiv { + u32 multiplier; + u32 divider; +}; /* * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. * Unfortunately some Intel Atom SoCs aren't quite compliant to this, * so we need manually differentiate SoC families. This is what the - * field msr_plat does. + * field use_msr_plat does. */ struct freq_desc { - u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ + bool use_msr_plat; + struct muldiv muldiv[MAX_NUM_FREQS]; + /* + * Some CPU frequencies in the SDM do not map to known PLL freqs, in + * that case the muldiv array is empty and the freqs array is used. + */ u32 freqs[MAX_NUM_FREQS]; + u32 mask; }; /* @@ -35,31 +63,86 @@ struct freq_desc { * by MSR based on SDM. */ static const struct freq_desc freq_desc_pnw = { - 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } + .use_msr_plat = false, + .freqs = { 0, 0, 0, 0, 0, 99840, 0, 83200 }, + .mask = 0x07, }; static const struct freq_desc freq_desc_clv = { - 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } + .use_msr_plat = false, + .freqs = { 0, 133200, 0, 0, 0, 99840, 0, 83200 }, + .mask = 0x07, }; +/* + * Bay Trail SDM MSR_FSB_FREQ frequencies simplified PLL model: + * 000: 100 * 5 / 6 = 83.3333 MHz + * 001: 100 * 1 / 1 = 100.0000 MHz + * 010: 100 * 4 / 3 = 133.3333 MHz + * 011: 100 * 7 / 6 = 116.6667 MHz + * 100: 100 * 4 / 5 = 80.0000 MHz + */ static const struct freq_desc freq_desc_byt = { - 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } + .use_msr_plat = true, + .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 }, + { 4, 5 } }, + .mask = 0x07, }; +/* + * Cherry Trail SDM MSR_FSB_FREQ frequencies simplified PLL model: + * 0000: 100 * 5 / 6 = 83.3333 MHz + * 0001: 100 * 1 / 1 = 100.0000 MHz + * 0010: 100 * 4 / 3 = 133.3333 MHz + * 0011: 100 * 7 / 6 = 116.6667 MHz + * 0100: 100 * 4 / 5 = 80.0000 MHz + * 0101: 100 * 14 / 15 = 93.3333 MHz + * 0110: 100 * 9 / 10 = 90.0000 MHz + * 0111: 100 * 8 / 9 = 88.8889 MHz + * 1000: 100 * 7 / 8 = 87.5000 MHz + */ static const struct freq_desc freq_desc_cht = { - 1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 } + .use_msr_plat = true, + .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 }, + { 4, 5 }, { 14, 15 }, { 9, 10 }, { 8, 9 }, + { 7, 8 } }, + .mask = 0x0f, }; +/* + * Merriefield SDM MSR_FSB_FREQ frequencies simplified PLL model: + * 0001: 100 * 1 / 1 = 100.0000 MHz + * 0010: 100 * 4 / 3 = 133.3333 MHz + */ static const struct freq_desc freq_desc_tng = { - 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } + .use_msr_plat = true, + .muldiv = { { 0, 0 }, { 1, 1 }, { 4, 3 } }, + .mask = 0x07, }; +/* + * Moorefield SDM MSR_FSB_FREQ frequencies simplified PLL model: + * 0000: 100 * 5 / 6 = 83.3333 MHz + * 0001: 100 * 1 / 1 = 100.0000 MHz + * 0010: 100 * 4 / 3 = 133.3333 MHz + * 0011: 100 * 1 / 1 = 100.0000 MHz + */ static const struct freq_desc freq_desc_ann = { - 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } + .use_msr_plat = true, + .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 1, 1 } }, + .mask = 0x0f, }; +/* + * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz + * Frequency step for Lightning Mountain SoC is fixed to 78 MHz, + * so all the frequency entries are 78000. + */ static const struct freq_desc freq_desc_lgm = { - 1, { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 } + .use_msr_plat = true, + .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, + 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }, + .mask = 0x0f, }; static const struct x86_cpu_id tsc_msr_cpu_ids[] = { @@ -81,17 +164,19 @@ static const struct x86_cpu_id tsc_msr_cpu_ids[] = { */ unsigned long cpu_khz_from_msr(void) { - u32 lo, hi, ratio, freq; + u32 lo, hi, ratio, freq, tscref; const struct freq_desc *freq_desc; const struct x86_cpu_id *id; + const struct muldiv *md; unsigned long res; + int index; id = x86_match_cpu(tsc_msr_cpu_ids); if (!id) return 0; freq_desc = (struct freq_desc *)id->driver_data; - if (freq_desc->msr_plat) { + if (freq_desc->use_msr_plat) { rdmsr(MSR_PLATFORM_INFO, lo, hi); ratio = (lo >> 8) & 0xff; } else { @@ -101,12 +186,28 @@ unsigned long cpu_khz_from_msr(void) /* Get FSB FREQ ID */ rdmsr(MSR_FSB_FREQ, lo, hi); + index = lo & freq_desc->mask; + md = &freq_desc->muldiv[index]; - /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ - freq = freq_desc->freqs[lo & 0x7]; + /* + * Note this also catches cases where the index points to an unpopulated + * part of muldiv, in that case the else will set freq and res to 0. + */ + if (md->divider) { + tscref = TSC_REFERENCE_KHZ * md->multiplier; + freq = DIV_ROUND_CLOSEST(tscref, md->divider); + /* + * Multiplying by ratio before the division has better + * accuracy than just calculating freq * ratio. + */ + res = DIV_ROUND_CLOSEST(tscref * ratio, md->divider); + } else { + freq = freq_desc->freqs[index]; + res = freq * ratio; + } - /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ - res = freq * ratio; + if (freq == 0) + pr_err("Error MSR_FSB_FREQ index %d is unknown\n", index); #ifdef CONFIG_X86_LOCAL_APIC lapic_timer_period = (freq * 1000) / HZ; diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 332ae6530fa8..b934f9f68a16 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip) { static struct orc_entry *orc; - if (!orc_init) - return NULL; - if (ip == 0) return &null_orc_entry; @@ -360,8 +357,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, if (!stack_access_ok(state, addr, sizeof(struct pt_regs))) return false; - *ip = regs->ip; - *sp = regs->sp; + *ip = READ_ONCE_NOCHECK(regs->ip); + *sp = READ_ONCE_NOCHECK(regs->sp); return true; } @@ -373,14 +370,43 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr if (!stack_access_ok(state, addr, IRET_FRAME_SIZE)) return false; - *ip = regs->ip; - *sp = regs->sp; + *ip = READ_ONCE_NOCHECK(regs->ip); + *sp = READ_ONCE_NOCHECK(regs->sp); return true; } +/* + * If state->regs is non-NULL, and points to a full pt_regs, just get the reg + * value from state->regs. + * + * Otherwise, if state->regs just points to IRET regs, and the previous frame + * had full regs, it's safe to get the value from the previous regs. This can + * happen when early/late IRQ entry code gets interrupted by an NMI. + */ +static bool get_reg(struct unwind_state *state, unsigned int reg_off, + unsigned long *val) +{ + unsigned int reg = reg_off/8; + + if (!state->regs) + return false; + + if (state->full_regs) { + *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]); + return true; + } + + if (state->prev_regs) { + *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]); + return true; + } + + return false; +} + bool unwind_next_frame(struct unwind_state *state) { - unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; + unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; enum stack_type prev_type = state->stack_info.type; struct orc_entry *orc; bool indirect = false; @@ -398,8 +424,11 @@ bool unwind_next_frame(struct unwind_state *state) /* * Find the orc_entry associated with the text address. * - * Decrement call return addresses by one so they work for sibling - * calls and calls to noreturn functions. + * For a call frame (as opposed to a signal frame), state->ip points to + * the instruction after the call. That instruction's stack layout + * could be different from the call instruction's layout, for example + * if the call was to a noreturn function. So get the ORC data for the + * call instruction itself. */ orc = orc_find(state->signal ? state->ip : state->ip - 1); if (!orc) { @@ -442,39 +471,35 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_REG_R10: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) { orc_warn("missing regs for base reg R10 at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->r10; break; case ORC_REG_R13: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) { orc_warn("missing regs for base reg R13 at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->r13; break; case ORC_REG_DI: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) { orc_warn("missing regs for base reg DI at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->di; break; case ORC_REG_DX: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) { orc_warn("missing regs for base reg DX at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->dx; break; default: @@ -501,6 +526,7 @@ bool unwind_next_frame(struct unwind_state *state) state->sp = sp; state->regs = NULL; + state->prev_regs = NULL; state->signal = false; break; @@ -512,6 +538,7 @@ bool unwind_next_frame(struct unwind_state *state) } state->regs = (struct pt_regs *)sp; + state->prev_regs = NULL; state->full_regs = true; state->signal = true; break; @@ -523,6 +550,8 @@ bool unwind_next_frame(struct unwind_state *state) goto err; } + if (state->full_regs) + state->prev_regs = state->regs; state->regs = (void *)sp - IRET_FRAME_OFFSET; state->full_regs = false; state->signal = true; @@ -531,14 +560,14 @@ bool unwind_next_frame(struct unwind_state *state) default: orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", orc->type, (void *)orig_ip); - break; + goto err; } /* Find BP: */ switch (orc->bp_reg) { case ORC_REG_UNDEFINED: - if (state->regs && state->full_regs) - state->bp = state->regs->bp; + if (get_reg(state, offsetof(struct pt_regs, bp), &tmp)) + state->bp = tmp; break; case ORC_REG_PREV_SP: @@ -585,17 +614,20 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, memset(state, 0, sizeof(*state)); state->task = task; + if (!orc_init) + goto err; + /* * Refuse to unwind the stack of a task while it's executing on another * CPU. This check is racy, but that's ok: the unwinder has other * checks to prevent it from going off the rails. */ if (task_on_another_cpu(task)) - goto done; + goto err; if (regs) { if (user_mode(regs)) - goto done; + goto the_end; state->ip = regs->ip; state->sp = regs->sp; @@ -614,9 +646,10 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, } else { struct inactive_task_frame *frame = (void *)task->thread.sp; - state->sp = task->thread.sp; + state->sp = task->thread.sp + sizeof(*frame); state->bp = READ_ONCE_NOCHECK(frame->bp); state->ip = READ_ONCE_NOCHECK(frame->ret_addr); + state->signal = (void *)state->ip == ret_from_fork; } if (get_stack_info((unsigned long *)state->sp, state->task, @@ -628,6 +661,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, * generate some kind of backtrace if this happens. */ void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); + state->error = true; if (get_stack_info(next_page, state->task, &state->stack_info, &state->stack_mask)) return; @@ -648,13 +682,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, /* Otherwise, skip ahead to the user-specified starting frame: */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->sp <= (unsigned long)first_frame)) + state->sp < (unsigned long)first_frame)) unwind_next_frame(state); return; -done: +err: + state->error = true; +the_end: state->stack_info.type = STACK_TYPE_UNKNOWN; - return; } EXPORT_SYMBOL_GPL(__unwind_start); diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 8cd745ef8c7b..fae5b00cbccf 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256 / 32] = { static bool is_prefix_bad(struct insn *insn) { + insn_byte_t p; int i; - for (i = 0; i < insn->prefixes.nbytes; i++) { + for_each_insn_prefix(insn, i, p) { insn_attr_t attr; - attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + attr = inat_get_opcode_attribute(p); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_ES): case INAT_MAKE_PREFIX(INAT_PFX_CS): @@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_xol_ops = { static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) { u8 opc1 = OPCODE1(insn); + insn_byte_t p; int i; switch (opc1) { @@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. * No one uses these insns, reject any branch insns with such prefix. */ - for (i = 0; i < insn->prefixes.nbytes; i++) { - if (insn->prefixes.bytes[i] == 0x66) + for_each_insn_prefix(insn, i, p) { + if (p == 0x66) return -ENOTSUPP; } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index e2feacf921a0..1afe211d7a7c 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -36,13 +36,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) -jiffies = jiffies_64; #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) -jiffies_64 = jiffies; #endif +jiffies = jiffies_64; + #if defined(CONFIG_X86_64) /* * On 64-bit, align RODATA to 2MB so we retain large page mappings for @@ -362,6 +362,7 @@ SECTIONS .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) + . = ALIGN(PAGE_SIZE); *(BSS_MAIN) BSS_DECRYPTED . = ALIGN(PAGE_SIZE); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index d78a61408243..7dec43b2c420 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -154,6 +154,20 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) return x86_stepping(best->eax); } +static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) +{ + return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || + guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) || + guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) || + guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)); +} + +static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) +{ + return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || + guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)); +} + static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu) { return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 128d3ad46e96..60c8dcb907a5 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2890,6 +2890,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : (u32)msr_data; + if (efer & EFER_LMA) + ctxt->mode = X86EMUL_MODE_PROT64; return X86EMUL_CONTINUE; } @@ -3617,7 +3619,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt) u64 tsc_aux = 0; if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) - return emulate_gp(ctxt, 0); + return emulate_ud(ctxt); ctxt->dst.val = tsc_aux; return X86EMUL_CONTINUE; } @@ -4050,6 +4052,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_clflushopt(struct x86_emulate_ctxt *ctxt) +{ + /* emulating clflushopt regardless of cpuid */ + return X86EMUL_CONTINUE; +} + static int em_movsxd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = (s32) ctxt->src.val; @@ -4592,7 +4600,7 @@ static const struct opcode group11[] = { }; static const struct gprefix pfx_0f_ae_7 = { - I(SrcMem | ByteOp, em_clflush), N, N, N, + I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N, }; static const struct group_dual group15 = { { @@ -5836,6 +5844,8 @@ writeback: } ctxt->eip = ctxt->_eip; + if (ctxt->mode != X86EMUL_MODE_PROT64) + ctxt->eip = (u32)ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index e330e7d125f7..896db1aa77e7 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v) * check if there is pending interrupt from * non-APIC source without intack. */ -static int kvm_cpu_has_extint(struct kvm_vcpu *v) -{ - u8 accept = kvm_apic_accept_pic_intr(v); - - if (accept) { - if (irqchip_split(v->kvm)) - return pending_userspace_extint(v); - else - return v->kvm->arch.vpic->output; - } else - return 0; -} - -/* - * check if there is injectable interrupt: - * when virtual interrupt delivery enabled, - * interrupt from apic will handled by hardware, - * we don't need to check it here. - */ -int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) +int kvm_cpu_has_extint(struct kvm_vcpu *v) { /* - * FIXME: interrupt.injected represents an interrupt that it's + * FIXME: interrupt.injected represents an interrupt whose * side-effects have already been applied (e.g. bit from IRR * already moved to ISR). Therefore, it is incorrect to rely * on interrupt.injected to know if there is a pending @@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) if (!lapic_in_kernel(v)) return v->arch.interrupt.injected; + if (!kvm_apic_accept_pic_intr(v)) + return 0; + + if (irqchip_split(v->kvm)) + return pending_userspace_extint(v); + else + return v->kvm->arch.vpic->output; +} + +/* + * check if there is injectable interrupt: + * when virtual interrupt delivery enabled, + * interrupt from apic will handled by hardware, + * we don't need to check it here. + */ +int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) +{ if (kvm_cpu_has_extint(v)) return 1; @@ -90,20 +88,6 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) */ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) { - /* - * FIXME: interrupt.injected represents an interrupt that it's - * side-effects have already been applied (e.g. bit from IRR - * already moved to ISR). Therefore, it is incorrect to rely - * on interrupt.injected to know if there is a pending - * interrupt in the user-mode LAPIC. - * This leads to nVMX/nSVM not be able to distinguish - * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on - * pending interrupt or should re-inject an injected - * interrupt. - */ - if (!lapic_in_kernel(v)) - return v->arch.interrupt.injected; - if (kvm_cpu_has_extint(v)) return 1; @@ -117,16 +101,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); */ static int kvm_cpu_get_extint(struct kvm_vcpu *v) { - if (kvm_cpu_has_extint(v)) { - if (irqchip_split(v->kvm)) { - int vector = v->arch.pending_external_vector; - - v->arch.pending_external_vector = -1; - return vector; - } else - return kvm_pic_read_irq(v->kvm); /* PIC */ - } else + if (!kvm_cpu_has_extint(v)) { + WARN_ON(!lapic_in_kernel(v)); return -1; + } + + if (!lapic_in_kernel(v)) + return v->arch.interrupt.nr; + + if (irqchip_split(v->kvm)) { + int vector = v->arch.pending_external_vector; + + v->arch.pending_external_vector = -1; + return vector; + } else + return kvm_pic_read_irq(v->kvm); /* PIC */ } /* @@ -134,13 +123,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v) */ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) { - int vector; - - if (!lapic_in_kernel(v)) - return v->arch.interrupt.nr; - - vector = kvm_cpu_get_extint(v); - + int vector = kvm_cpu_get_extint(v); if (vector != -1) return vector; /* PIC */ diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 1cc6c47dc77e..341f58a01de0 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -7,7 +7,7 @@ #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE) + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD) #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5d2587005d0e..3f6b866c644d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1684,7 +1684,7 @@ static void start_sw_period(struct kvm_lapic *apic) hrtimer_start(&apic->lapic_timer.timer, apic->lapic_timer.target_expiration, - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_HARD); } bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) @@ -2085,7 +2085,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; @@ -2330,7 +2330,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) struct kvm_lapic *apic = vcpu->arch.apic; u32 ppr; - if (!kvm_apic_hw_enabled(apic)) + if (!kvm_apic_present(vcpu)) return -1; __apic_update_ppr(apic, &ppr); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 518100ea5ef4..47c27c6e3842 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -343,6 +343,8 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) { BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((mmio_mask & mmio_value) != mmio_value); + WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); + WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; shadow_mmio_access_mask = access_mask; @@ -405,11 +407,11 @@ static inline bool is_access_track_spte(u64 spte) } /* - * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of + * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of * the memslots generation and is derived as follows: * * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 - * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61 + * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62 * * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in * the MMIO generation number, as doing so would require stealing a bit from @@ -418,18 +420,29 @@ static inline bool is_access_track_spte(u64 spte) * requires a full MMU zap). The flag is instead explicitly queried when * checking for MMIO spte cache hits. */ -#define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0) #define MMIO_SPTE_GEN_LOW_START 3 #define MMIO_SPTE_GEN_LOW_END 11 -#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ - MMIO_SPTE_GEN_LOW_START) #define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT #define MMIO_SPTE_GEN_HIGH_END 62 + +#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ + MMIO_SPTE_GEN_LOW_START) #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ MMIO_SPTE_GEN_HIGH_START) +#define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1) +#define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1) + +/* remember to adjust the comment above as well if you change these */ +static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9); + +#define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0) +#define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS) + +#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0) + static u64 generation_mmio_spte_mask(u64 gen) { u64 mask; @@ -437,8 +450,8 @@ static u64 generation_mmio_spte_mask(u64 gen) WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); - mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK; - mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK; + mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; + mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; return mask; } @@ -446,8 +459,8 @@ static u64 get_mmio_spte_generation(u64 spte) { u64 gen; - gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START; - gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START; + gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT; + gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT; return gen; } @@ -580,16 +593,15 @@ static void kvm_mmu_reset_all_pte_masks(void) * the most significant bits of legal physical address space. */ shadow_nonpresent_or_rsvd_mask = 0; - low_phys_bits = boot_cpu_data.x86_cache_bits; - if (boot_cpu_data.x86_cache_bits < - 52 - shadow_nonpresent_or_rsvd_mask_len) { + low_phys_bits = boot_cpu_data.x86_phys_bits; + if (boot_cpu_has_bug(X86_BUG_L1TF) && + !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= + 52 - shadow_nonpresent_or_rsvd_mask_len)) { + low_phys_bits = boot_cpu_data.x86_cache_bits + - shadow_nonpresent_or_rsvd_mask_len; shadow_nonpresent_or_rsvd_mask = - rsvd_bits(boot_cpu_data.x86_cache_bits - - shadow_nonpresent_or_rsvd_mask_len, - boot_cpu_data.x86_cache_bits - 1); - low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; - } else - WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); + rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); + } shadow_nonpresent_or_rsvd_lower_gfn_mask = GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); @@ -1818,10 +1830,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, * Emulate arch specific page modification logging for the * nested hypervisor */ -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa) { if (kvm_x86_ops->write_log_dirty) - return kvm_x86_ops->write_log_dirty(vcpu); + return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa); return 0; } @@ -2044,7 +2056,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); } @@ -4579,7 +4592,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | - nonleaf_bit8_rsvd | gbpages_bit_rsvd | + gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51); @@ -6247,25 +6260,16 @@ static void kvm_set_mmio_spte_mask(void) u64 mask; /* - * Set the reserved bits and the present bit of an paging-structure - * entry to generate page fault with PFER.RSV = 1. + * Set a reserved PA bit in MMIO SPTEs to generate page faults with + * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT + * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports + * 52-bit physical addresses then there are no reserved PA bits in the + * PTEs and so the reserved PA approach must be disabled. */ - - /* - * Mask the uppermost physical address bit, which would be reserved as - * long as the supported physical address width is less than 52. - */ - mask = 1ull << 51; - - /* Set the present bit. */ - mask |= 1ull; - - /* - * If reserved bit is not supported, clear the present bit to disable - * mmio page fault. - */ - if (shadow_phys_bits == 52) - mask &= ~1ull; + if (shadow_phys_bits < 52) + mask = BIT_ULL(51) | PT_PRESENT_MASK; + else + mask = 0; kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); } @@ -6460,6 +6464,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) cond_resched_lock(&kvm->mmu_lock); } } + kvm_mmu_commit_zap_page(kvm, &invalid_list); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, rcu_idx); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index d55674f44a18..ea9945a05b83 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -48,7 +48,7 @@ static inline u64 rsvd_bits(int s, int e) if (e < s) return 0; - return ((1ULL << (e - s + 1)) - 1) << s; + return ((2ULL << (e - s)) - 1) << s; } void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask); @@ -209,7 +209,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 3c6522b84ff1..ffcd96fc02d0 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -339,7 +339,7 @@ TRACE_EVENT( /* These depend on page entry type, so compute them now. */ __field(bool, r) __field(bool, x) - __field(u8, u) + __field(signed char, u) ), TP_fast_assign( diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 4e3f137ffa8c..a20fc1ba607f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -220,7 +220,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte) static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, - int write_fault) + gpa_t addr, int write_fault) { unsigned level, index; pt_element_t pte, orig_pte; @@ -245,7 +245,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); #if PTTYPE == PTTYPE_EPT - if (kvm_arch_write_log_dirty(vcpu)) + if (kvm_arch_write_log_dirty(vcpu, addr)) return -EINVAL; #endif pte |= PT_GUEST_DIRTY_MASK; @@ -442,7 +442,8 @@ retry_walk: (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); if (unlikely(!accessed_dirty)) { - ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, + addr, write_fault); if (unlikely(ret < 0)) goto error; else if (ret) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 07459120a222..b9d14fdbd2d8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -787,9 +787,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) return 0; } else { - if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) - pr_err("%s: ip 0x%lx next 0x%llx\n", - __func__, kvm_rip_read(vcpu), svm->next_rip); kvm_rip_write(vcpu, svm->next_rip); } svm_set_interrupt_shadow(vcpu, 0); @@ -892,6 +889,11 @@ static int has_svm(void) return 0; } + if (sev_active()) { + pr_info("KVM is unsupported when running as an SEV guest\n"); + return 0; + } + return 1; } @@ -998,33 +1000,32 @@ static void svm_cpu_uninit(int cpu) static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd; - int r; sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); if (!sd) return -ENOMEM; sd->cpu = cpu; - r = -ENOMEM; sd->save_area = alloc_page(GFP_KERNEL); if (!sd->save_area) - goto err_1; + goto free_cpu_data; if (svm_sev_enabled()) { - r = -ENOMEM; sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); if (!sd->sev_vmcbs) - goto err_1; + goto free_save_area; } per_cpu(svm_data, cpu) = sd; return 0; -err_1: +free_save_area: + __free_page(sd->save_area); +free_cpu_data: kfree(sd); - return r; + return -ENOMEM; } @@ -1834,6 +1835,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, struct page **pages; unsigned long first, last; + lockdep_assert_held(&kvm->lock); + if (ulen == 0 || uaddr + ulen < uaddr) return NULL; @@ -1861,7 +1864,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, return NULL; /* Pin the user virtual address. */ - npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); + npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); if (npinned != npages) { pr_err("SEV: Failure locking %lu pages.\n", npages); goto err; @@ -1926,6 +1929,10 @@ static struct kvm *svm_vm_alloc(void) struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm), GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL); + + if (!kvm_svm) + return NULL; + return &kvm_svm->kvm; } @@ -3234,8 +3241,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + PF_VECTOR: - /* When we're shadowing, trap PFs, but not async PF */ - if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0) + /* Trap async PF even if not shadowing */ + if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason) return NESTED_EXIT_HOST; break; default: @@ -3324,7 +3331,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr dst->iopm_base_pa = from->iopm_base_pa; dst->msrpm_base_pa = from->msrpm_base_pa; dst->tsc_offset = from->tsc_offset; - dst->asid = from->asid; + /* asid not copied, it is handled manually for svm->vmcb. */ dst->tlb_ctl = from->tlb_ctl; dst->int_ctl = from->int_ctl; dst->int_vector = from->int_vector; @@ -3967,6 +3974,12 @@ static int iret_interception(struct vcpu_svm *svm) return 1; } +static int invd_interception(struct vcpu_svm *svm) +{ + /* Treat an INVD instruction as a NOP and just skip it. */ + return kvm_skip_emulated_instruction(&svm->vcpu); +} + static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) @@ -4227,8 +4240,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) + !guest_has_spec_ctrl_msr(vcpu)) return 1; msr_info->data = svm->spec_ctrl; @@ -4312,16 +4324,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; case MSR_IA32_SPEC_CTRL: if (!msr->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) + !guest_has_spec_ctrl_msr(vcpu)) return 1; - /* The STIBP bit doesn't fault even if it's not advertised */ - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) + if (kvm_spec_ctrl_test_value(data)) return 1; svm->spec_ctrl = data; - if (!data) break; @@ -4340,18 +4349,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)) + !guest_has_pred_cmd_msr(vcpu)) return 1; if (data & ~PRED_CMD_IBPB) return 1; - + if (!boot_cpu_has(X86_FEATURE_IBPB)) + return 1; if (!data) break; wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - if (is_guest_mode(vcpu)) - break; set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); break; case MSR_AMD64_VIRT_SPEC_CTRL: @@ -4819,7 +4827,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, - [SVM_EXIT_INVD] = emulate_on_interception, + [SVM_EXIT_INVD] = invd_interception, [SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, @@ -5377,6 +5385,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, * - Tell IOMMU to use legacy mode for this interrupt. * - Retrieve ga_tag of prior interrupt remapping data. */ + pi.prev_ga_tag = 0; pi.is_guest_mode = false; ret = irq_set_vcpu_affinity(host_irq, &pi); @@ -7084,12 +7093,20 @@ static int svm_register_enc_region(struct kvm *kvm, if (!region) return -ENOMEM; + mutex_lock(&kvm->lock); region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); if (!region->pages) { ret = -ENOMEM; + mutex_unlock(&kvm->lock); goto e_free; } + region->uaddr = range->addr; + region->size = range->size; + + list_add_tail(®ion->list, &sev->regions_list); + mutex_unlock(&kvm->lock); + /* * The guest may change the memory encryption attribute from C=0 -> C=1 * or vice versa for this memory range. Lets make sure caches are @@ -7098,13 +7115,6 @@ static int svm_register_enc_region(struct kvm *kvm, */ sev_clflush_pages(region->pages, region->npages); - region->uaddr = range->addr; - region->size = range->size; - - mutex_lock(&kvm->lock); - list_add_tail(®ion->list, &sev->regions_list); - mutex_unlock(&kvm->lock); - return ret; e_free: diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 2b44554baf28..3f63bd7421ac 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -302,7 +302,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) cpu = get_cpu(); prev = vmx->loaded_vmcs; vmx->loaded_vmcs = vmcs; - vmx_vcpu_load_vmcs(vcpu, cpu); + vmx_vcpu_load_vmcs(vcpu, cpu, prev); vmx_sync_vmcs_host_state(vmx, prev); put_cpu(); @@ -2231,6 +2231,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + + vmx->segment_cache.bitmask = 0; } if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & @@ -3094,8 +3096,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, prepare_vmcs02_early(vmx, vmcs12); if (from_vmentry) { - if (unlikely(!nested_get_vmcs12_pages(vcpu))) + if (unlikely(!nested_get_vmcs12_pages(vcpu))) { + vmx_switch_vmcs(vcpu, &vmx->vmcs01); return NVMX_VMENTRY_KVM_INTERNAL_ERROR; + } if (nested_vmx_check_vmentry_hw(vcpu)) { vmx_switch_vmcs(vcpu, &vmx->vmcs01); @@ -3460,7 +3464,7 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); } -static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) +static int vmx_check_nested_events(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qual; @@ -3507,8 +3511,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) return 0; } - if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && - nested_exit_on_intr(vcpu)) { + if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) { if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); @@ -4158,17 +4161,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (likely(!vmx->fail)) { - /* - * TODO: SDM says that with acknowledge interrupt on - * exit, bit 31 of the VM-exit interrupt information - * (valid interrupt) is always set to 1 on - * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't - * need kvm_cpu_has_interrupt(). See the commit - * message for details. - */ - if (nested_exit_intr_ack_set(vcpu) && - exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && - kvm_cpu_has_interrupt(vcpu)) { + if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && + nested_exit_intr_ack_set(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | @@ -5314,7 +5308,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, /* Decode instruction info and find the field to access */ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Out-of-range fields always cause a VM exit from L2 to L1 */ if (field >> 15) @@ -5367,7 +5361,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); - switch (exit_reason) { + switch ((u16)exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (is_nmi(intr_info)) return false; @@ -5585,11 +5579,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu)) { sync_vmcs02_to_vmcs12(vcpu, vmcs12); sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); - } else if (!vmx->nested.need_vmcs12_to_shadow_sync) { - if (vmx->nested.hv_evmcs) - copy_enlightened_to_vmcs12(vmx); - else if (enable_shadow_vmcs) - copy_shadow_to_vmcs12(vmx); + } else { + copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); + if (!vmx->nested.need_vmcs12_to_shadow_sync) { + if (vmx->nested.hv_evmcs) + copy_enlightened_to_vmcs12(vmx); + else if (enable_shadow_vmcs) + copy_shadow_to_vmcs12(vmx); + } } BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h index 45eaedee2ac0..19717d0a1100 100644 --- a/arch/x86/kvm/vmx/ops.h +++ b/arch/x86/kvm/vmx/ops.h @@ -13,6 +13,8 @@ #define __ex(x) __kvm_handle_fault_on_reboot(x) asmlinkage void vmread_error(unsigned long field, bool fault); +__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field, + bool fault); void vmwrite_error(unsigned long field, unsigned long value); void vmclear_error(struct vmcs *vmcs, u64 phys_addr); void vmptrld_error(struct vmcs *vmcs, u64 phys_addr); @@ -70,15 +72,28 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) asm volatile("1: vmread %2, %1\n\t" ".byte 0x3e\n\t" /* branch taken hint */ "ja 3f\n\t" - "mov %2, %%" _ASM_ARG1 "\n\t" - "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t" - "2: call vmread_error\n\t" - "xor %k1, %k1\n\t" + + /* + * VMREAD failed. Push '0' for @fault, push the failing + * @field, and bounce through the trampoline to preserve + * volatile registers. + */ + "push $0\n\t" + "push %2\n\t" + "2:call vmread_error_trampoline\n\t" + + /* + * Unwind the stack. Note, the trampoline zeros out the + * memory for @fault so that the result is '0' on error. + */ + "pop %2\n\t" + "pop %1\n\t" "3:\n\t" + /* VMREAD faulted. As above, except push '1' for @fault. */ ".pushsection .fixup, \"ax\"\n\t" - "4: mov %2, %%" _ASM_ARG1 "\n\t" - "mov $1, %%" _ASM_ARG2 "\n\t" + "4: push $1\n\t" + "push %2\n\t" "jmp 2b\n\t" ".popsection\n\t" _ASM_EXTABLE(1b, 4b) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index f8998a7bc7d5..181e352d38de 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -26,7 +26,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, - [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, + [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, }; /* mapping between fixed pmc index and intel_arch_events array */ @@ -296,7 +296,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, x86_pmu.num_counters_gp); + eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp); pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; + eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len); pmu->available_event_types = ~entry->ebx & ((1ull << eax.split.mask_length) - 1); @@ -306,6 +308,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, x86_pmu.num_counters_fixed); + edx.split.bit_width_fixed = min_t(int, + edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); pmu->counter_bitmask[KVM_PMC_FIXED] = ((u64)1 << edx.split.bit_width_fixed) - 1; } diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 751a384c2eb0..ca4252f81bf8 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -86,6 +86,9 @@ ENTRY(vmx_vmexit) /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE + /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */ + or $1, %_ASM_AX + pop %_ASM_AX .Lvmexit_skip_rsb: #endif @@ -234,3 +237,61 @@ ENTRY(__vmx_vcpu_run) 2: mov $1, %eax jmp 1b ENDPROC(__vmx_vcpu_run) + +/** + * vmread_error_trampoline - Trampoline from inline asm to vmread_error() + * @field: VMCS field encoding that failed + * @fault: %true if the VMREAD faulted, %false if it failed + + * Save and restore volatile registers across a call to vmread_error(). Note, + * all parameters are passed on the stack. + */ +ENTRY(vmread_error_trampoline) + push %_ASM_BP + mov %_ASM_SP, %_ASM_BP + + push %_ASM_AX + push %_ASM_CX + push %_ASM_DX +#ifdef CONFIG_X86_64 + push %rdi + push %rsi + push %r8 + push %r9 + push %r10 + push %r11 +#endif +#ifdef CONFIG_X86_64 + /* Load @field and @fault to arg1 and arg2 respectively. */ + mov 3*WORD_SIZE(%rbp), %_ASM_ARG2 + mov 2*WORD_SIZE(%rbp), %_ASM_ARG1 +#else + /* Parameters are passed on the stack for 32-bit (see asmlinkage). */ + push 3*WORD_SIZE(%ebp) + push 2*WORD_SIZE(%ebp) +#endif + + call vmread_error + +#ifndef CONFIG_X86_64 + add $8, %esp +#endif + + /* Zero out @fault, which will be popped into the result register. */ + _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) + +#ifdef CONFIG_X86_64 + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rsi + pop %rdi +#endif + pop %_ASM_DX + pop %_ASM_CX + pop %_ASM_AX + pop %_ASM_BP + + ret +ENDPROC(vmread_error_trampoline) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8129b6b27c93..0564c05c1ce0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -648,43 +648,15 @@ void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) } #ifdef CONFIG_KEXEC_CORE -/* - * This bitmap is used to indicate whether the vmclear - * operation is enabled on all cpus. All disabled by - * default. - */ -static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; - -static inline void crash_enable_local_vmclear(int cpu) -{ - cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - -static inline void crash_disable_local_vmclear(int cpu) -{ - cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - -static inline int crash_local_vmclear_enabled(int cpu) -{ - return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; - if (!crash_local_vmclear_enabled(cpu)) - return; - list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } -#else -static inline void crash_enable_local_vmclear(int cpu) { } -static inline void crash_disable_local_vmclear(int cpu) { } #endif /* CONFIG_KEXEC_CORE */ static void __loaded_vmcs_clear(void *arg) @@ -696,19 +668,24 @@ static void __loaded_vmcs_clear(void *arg) return; /* vcpu migration can race with cpu offline */ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; - crash_disable_local_vmclear(cpu); + + vmcs_clear(loaded_vmcs->vmcs); + if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) + vmcs_clear(loaded_vmcs->shadow_vmcs); + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); /* - * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link - * is before setting loaded_vmcs->vcpu to -1 which is done in - * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist - * then adds the vmcs into percpu list before it is deleted. + * Ensure all writes to loaded_vmcs, including deleting it from its + * current percpu list, complete before setting loaded_vmcs->vcpu to + * -1, otherwise a different cpu can see vcpu == -1 first and add + * loaded_vmcs to its percpu list before it's deleted from this cpu's + * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). */ smp_wmb(); - loaded_vmcs_init(loaded_vmcs); - crash_enable_local_vmclear(cpu); + loaded_vmcs->cpu = -1; + loaded_vmcs->launched = 0; } void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) @@ -1153,6 +1130,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmx->guest_msrs[i].mask); } + + if (vmx->nested.need_vmcs12_to_shadow_sync) + nested_sync_vmcs12_to_shadow(vcpu); + if (vmx->guest_state_loaded) return; @@ -1170,7 +1151,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) gs_base = cpu_kernelmode_gs_base(cpu); if (likely(is_64bit_mm(current->mm))) { - save_fsgs_for_kvm(); + current_save_fsgs(); fs_sel = current->thread.fsindex; gs_sel = current->thread.gsindex; fs_base = current->thread.fsbase; @@ -1309,33 +1290,42 @@ after_clear_sn: pi_set_on(pi_desc); } -void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) +void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, + struct loaded_vmcs *buddy) { struct vcpu_vmx *vmx = to_vmx(vcpu); bool already_loaded = vmx->loaded_vmcs->cpu == cpu; + struct vmcs *prev; if (!already_loaded) { loaded_vmcs_clear(vmx->loaded_vmcs); local_irq_disable(); - crash_disable_local_vmclear(cpu); /* - * Read loaded_vmcs->cpu should be before fetching - * loaded_vmcs->loaded_vmcss_on_cpu_link. - * See the comments in __loaded_vmcs_clear(). + * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to + * this cpu's percpu list, otherwise it may not yet be deleted + * from its previous cpu's percpu list. Pairs with the + * smb_wmb() in __loaded_vmcs_clear(). */ smp_rmb(); list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); - crash_enable_local_vmclear(cpu); local_irq_enable(); } - if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { + prev = per_cpu(current_vmcs, cpu); + if (prev != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); - indirect_branch_prediction_barrier(); + + /* + * No indirect branch prediction barrier needed when switching + * the active VMCS within a guest, e.g. on nested VM-Enter. + * The L1 VMM can protect itself with retpolines, IBPB or IBRS. + */ + if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) + indirect_branch_prediction_barrier(); } if (!already_loaded) { @@ -1380,11 +1370,10 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - vmx_vcpu_load_vmcs(vcpu, cpu); + vmx_vcpu_load_vmcs(vcpu, cpu, NULL); vmx_vcpu_pi_load(vcpu, cpu); - vmx->host_pkru = read_pkru(); vmx->host_debugctlmsr = get_debugctlmsr(); } @@ -1552,7 +1541,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) static int skip_emulated_instruction(struct kvm_vcpu *vcpu) { - unsigned long rip; + unsigned long rip, orig_rip; /* * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on @@ -1564,8 +1553,17 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) */ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { - rip = kvm_rip_read(vcpu); - rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + orig_rip = kvm_rip_read(vcpu); + rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); +#ifdef CONFIG_X86_64 + /* + * We need to mask out the high 32 bits of RIP if not in 64-bit + * mode, but just finding out that we are in 64-bit mode is + * quite expensive. Only do it if there was a carry. + */ + if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu)) + rip = (u32)rip; +#endif kvm_rip_write(vcpu, rip); } else { if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) @@ -1790,7 +1788,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + !guest_has_spec_ctrl_msr(vcpu)) return 1; msr_info->data = to_vmx(vcpu)->spec_ctrl; @@ -1973,15 +1971,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + !guest_has_spec_ctrl_msr(vcpu)) return 1; - /* The STIBP bit doesn't fault even if it's not advertised */ - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) + if (kvm_spec_ctrl_test_value(data)) return 1; vmx->spec_ctrl = data; - if (!data) break; @@ -2003,12 +1999,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_PRED_CMD: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + !guest_has_pred_cmd_msr(vcpu)) return 1; if (data & ~PRED_CMD_IBPB) return 1; - + if (!boot_cpu_has(X86_FEATURE_IBPB)) + return 1; if (!data) break; @@ -2252,21 +2249,6 @@ static int hardware_enable(void) !hv_get_vp_assist_page(cpu)) return -EFAULT; - INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); - INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); - spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); - - /* - * Now we can enable the vmclear operation in kdump - * since the loaded_vmcss_on_cpu list on this cpu - * has been initialized. - * - * Though the cpu is not in VMX operation now, there - * is no problem to enable the vmclear operation - * for the loaded_vmcss_on_cpu list is empty! - */ - crash_enable_local_vmclear(cpu); - rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; @@ -3943,6 +3925,8 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx) void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { + BUILD_BUG_ON(KVM_CR4_GUEST_OWNED_BITS & ~KVM_POSSIBLE_CR4_GUEST_BITS); + vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; @@ -4505,8 +4489,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { - return (!to_vmx(vcpu)->nested.nested_run_pending && - vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && + if (to_vmx(vcpu)->nested.nested_run_pending) + return false; + + if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) + return true; + + return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } @@ -4600,7 +4589,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, */ static void kvm_machine_check(void) { -#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) +#if defined(CONFIG_X86_MCE) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, @@ -5918,6 +5907,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_PML_FULL && + exit_reason != EXIT_REASON_APIC_ACCESS && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; @@ -6452,23 +6442,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); } -static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx) -{ - u32 host_umwait_control; - - if (!vmx_has_waitpkg(vmx)) - return; - - host_umwait_control = get_umwait_control_msr(); - - if (vmx->msr_ia32_umwait_control != host_umwait_control) - add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL, - vmx->msr_ia32_umwait_control, - host_umwait_control, false); - else - clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL); -} - static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -6525,8 +6498,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_write32(PLE_WINDOW, vmx->ple_window); } - if (vmx->nested.need_vmcs12_to_shadow_sync) - nested_sync_vmcs12_to_shadow(vcpu); + /* + * We did this in prepare_switch_to_guest, because it needs to + * be within srcu_read_lock. + */ + WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); @@ -6555,15 +6531,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) kvm_load_guest_xcr0(vcpu); - if (static_cpu_has(X86_FEATURE_PKU) && - kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && - vcpu->arch.pkru != vmx->host_pkru) - __write_pkru(vcpu->arch.pkru); - pt_guest_enter(vmx); atomic_switch_perf_msrs(vmx); - atomic_switch_umwait_control_msr(vmx); if (enable_preemption_timer) vmx_update_hv_timer(vcpu); @@ -6648,18 +6618,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) pt_guest_exit(vmx); - /* - * eager fpu is enabled if PKEY is supported and CR4 is switched - * back on host, so it is safe to read guest PKRU from current - * XSAVE. - */ - if (static_cpu_has(X86_FEATURE_PKU) && - kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { - vcpu->arch.pkru = rdpkru(); - if (vcpu->arch.pkru != vmx->host_pkru) - __write_pkru(vmx->host_pkru); - } - kvm_put_guest_xcr0(vcpu); vmx->nested.nested_run_pending = 0; @@ -6684,6 +6642,10 @@ static struct kvm *vmx_vm_alloc(void) struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx), GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL); + + if (!kvm_vmx) + return NULL; + return &kvm_vmx->kvm; } @@ -7310,11 +7272,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm) kvm_flush_pml_buffers(kvm); } -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) +static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t gpa, dst; + gpa_t dst; if (is_guest_mode(vcpu)) { WARN_ON_ONCE(vmx->nested.pml_full); @@ -7333,7 +7295,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) return 1; } - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; + gpa &= ~0xFFFull; dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, @@ -8022,7 +7984,7 @@ module_exit(vmx_exit); static int __init vmx_init(void) { - int r; + int r, cpu; #if IS_ENABLED(CONFIG_HYPERV) /* @@ -8076,6 +8038,12 @@ static int __init vmx_init(void) return r; } + for_each_possible_cpu(cpu) { + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); + INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); + spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + } + #ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 5a0f34b1e226..a1919ec7fd10 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -14,8 +14,6 @@ extern const u32 vmx_msr_index[]; extern u64 host_efer; -extern u32 get_umwait_control_msr(void); - #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 #define MSR_TYPE_RW 3 @@ -304,7 +302,8 @@ struct kvm_vmx { }; bool nested_vmx_allowed(struct kvm_vcpu *vcpu); -void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu); +void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, + struct loaded_vmcs *buddy); void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); int allocate_vpid(void); void free_vpid(int vpid); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c5e15eba8052..153659e8f403 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -102,6 +102,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); +static void process_smi(struct kvm_vcpu *vcpu); static void enter_smm(struct kvm_vcpu *vcpu); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); static void store_regs(struct kvm_vcpu *vcpu); @@ -832,11 +833,25 @@ void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } + + if (static_cpu_has(X86_FEATURE_PKU) && + (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || + (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && + vcpu->arch.pkru != vcpu->arch.host_pkru) + __write_pkru(vcpu->arch.pkru); } EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { + if (static_cpu_has(X86_FEATURE_PKU) && + (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || + (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { + vcpu->arch.pkru = rdpkru(); + if (vcpu->arch.pkru != vcpu->arch.host_pkru) + __write_pkru(vcpu->arch.host_pkru); + } + if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); @@ -958,7 +973,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + X86_CR4_SMEP; + unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE; if (kvm_valid_cr4(vcpu, cr4)) return 1; @@ -966,6 +982,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; + if ((cr4 ^ old_cr4) & X86_CR4_LA57) + return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, @@ -984,7 +1002,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; - if (((cr4 ^ old_cr4) & pdptr_bits) || + if (((cr4 ^ old_cr4) & mmu_role_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); @@ -2739,7 +2757,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); @@ -3043,7 +3061,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_APICBASE: msr_info->data = kvm_get_apic_base(vcpu); break; - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); break; case MSR_IA32_TSCDEADLINE: @@ -3607,21 +3625,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) { + /* + * We can accept userspace's request for interrupt injection + * as long as we have a place to store the interrupt number. + * The actual injection will happen when the CPU is able to + * deliver the interrupt. + */ + if (kvm_cpu_has_extint(vcpu)) + return false; + + /* Acknowledging ExtINT does not happen if LINT0 is masked. */ return (!lapic_in_kernel(vcpu) || kvm_apic_accept_pic_intr(vcpu)); } -/* - * if userspace requested an interrupt window, check that the - * interrupt window is open. - * - * No need to exit to userspace if we already have an interrupt queued. - */ static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) { return kvm_arch_interrupt_allowed(vcpu) && - !kvm_cpu_has_interrupt(vcpu) && - !kvm_event_needs_reinjection(vcpu) && kvm_cpu_accept_dm_intr(vcpu); } @@ -3682,7 +3702,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; - if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) + if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) goto out; @@ -3753,6 +3773,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, { process_nmi(vcpu); + + if (kvm_check_request(KVM_REQ_SMI, vcpu)) + process_smi(vcpu); + /* * The API doesn't provide the instruction length for software * exceptions, so don't report them. As long as the guest RIP @@ -5034,10 +5058,13 @@ set_identity_unlock: r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(u.ps))) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit_out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); +set_pit_out: + mutex_unlock(&kvm->lock); break; } case KVM_GET_PIT2: { @@ -5057,10 +5084,13 @@ set_identity_unlock: r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit2_out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); +set_pit2_out: + mutex_unlock(&kvm->lock); break; } case KVM_REINJECT_CONTROL: { @@ -5212,6 +5242,10 @@ static void kvm_init_msr_list(void) if (!kvm_x86_ops->rdtscp_supported()) continue; break; + case MSR_IA32_UMWAIT_CONTROL: + if (!boot_cpu_has(X86_FEATURE_WAITPKG)) + continue; + break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_x86_ops->pt_supported()) @@ -6819,7 +6853,7 @@ restart: if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) { kvm_rip_write(vcpu, ctxt->eip); - if (r && ctxt->tf) + if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); __kvm_set_rflags(vcpu, ctxt->eflags); } @@ -7555,7 +7589,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } -static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) +static int inject_pending_event(struct kvm_vcpu *vcpu) { int r; @@ -7591,7 +7625,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) * from L2 to L1. */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { - r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + r = kvm_x86_ops->check_nested_events(vcpu); if (r != 0) return r; } @@ -7653,7 +7687,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) * KVM_REQ_EVENT only on certain events and not unconditionally? */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { - r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + r = kvm_x86_ops->check_nested_events(vcpu); if (r != 0) return r; } @@ -7964,9 +7998,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); } -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, - bool blockable) +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) { unsigned long apic_address; @@ -7977,8 +8010,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); if (start <= apic_address && apic_address < end) kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); - - return 0; } void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) @@ -8130,7 +8161,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; } - if (inject_pending_event(vcpu, req_int_win) != 0) + if (inject_pending_event(vcpu) != 0) req_immediate_exit = true; else { /* Enable SMI/NMI/IRQ window open exits if needed. @@ -8222,6 +8253,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu->vcpu_id); guest_enter_irqoff(); + /* Save host pkru register if supported */ + vcpu->arch.host_pkru = read_pkru(); + fpregs_assert_state_consistent(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_return(); @@ -8360,7 +8394,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) - kvm_x86_ops->check_nested_events(vcpu, false); + kvm_x86_ops->check_nested_events(vcpu); return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted); @@ -9726,6 +9760,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, { int i; + /* + * Clear out the previous array pointers for the KVM_MR_MOVE case. The + * old arrays will be freed by __kvm_set_memory_region() if installing + * the new memslot is successful. + */ + memset(&slot->arch, 0, sizeof(slot->arch)); + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { struct kvm_lpage_info *linfo; unsigned long ugfn; @@ -9807,6 +9848,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { + if (change == KVM_MR_MOVE) + return kvm_arch_create_memslot(kvm, memslot, + mem->memory_size >> PAGE_SHIFT); + return 0; } @@ -10330,6 +10375,32 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) EXPORT_SYMBOL_GPL(kvm_arch_no_poll); +int kvm_spec_ctrl_test_value(u64 value) +{ + /* + * test that setting IA32_SPEC_CTRL to given value + * is allowed by the host processor + */ + + u64 saved_value; + unsigned long flags; + int ret = 0; + + local_irq_save(flags); + + if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) + ret = 1; + else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) + ret = 1; + else + wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); + + local_irq_restore(flags); + + return ret; +} +EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index de6b55484876..c520d373790a 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -368,5 +368,6 @@ static inline bool kvm_pat_valid(u64 data) void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); +int kvm_spec_ctrl_test_value(u64 value); #endif diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 86976b55ae74..497b4a8414b3 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -36,8 +36,8 @@ jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(100b, 103b) - _ASM_EXTABLE_UA(101b, 103b) + _ASM_EXTABLE_CPY(100b, 103b) + _ASM_EXTABLE_CPY(101b, 103b) .endm /* @@ -116,26 +116,26 @@ ENTRY(copy_user_generic_unrolled) 60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */ .previous - _ASM_EXTABLE_UA(1b, 30b) - _ASM_EXTABLE_UA(2b, 30b) - _ASM_EXTABLE_UA(3b, 30b) - _ASM_EXTABLE_UA(4b, 30b) - _ASM_EXTABLE_UA(5b, 30b) - _ASM_EXTABLE_UA(6b, 30b) - _ASM_EXTABLE_UA(7b, 30b) - _ASM_EXTABLE_UA(8b, 30b) - _ASM_EXTABLE_UA(9b, 30b) - _ASM_EXTABLE_UA(10b, 30b) - _ASM_EXTABLE_UA(11b, 30b) - _ASM_EXTABLE_UA(12b, 30b) - _ASM_EXTABLE_UA(13b, 30b) - _ASM_EXTABLE_UA(14b, 30b) - _ASM_EXTABLE_UA(15b, 30b) - _ASM_EXTABLE_UA(16b, 30b) - _ASM_EXTABLE_UA(18b, 40b) - _ASM_EXTABLE_UA(19b, 40b) - _ASM_EXTABLE_UA(21b, 50b) - _ASM_EXTABLE_UA(22b, 50b) + _ASM_EXTABLE_CPY(1b, 30b) + _ASM_EXTABLE_CPY(2b, 30b) + _ASM_EXTABLE_CPY(3b, 30b) + _ASM_EXTABLE_CPY(4b, 30b) + _ASM_EXTABLE_CPY(5b, 30b) + _ASM_EXTABLE_CPY(6b, 30b) + _ASM_EXTABLE_CPY(7b, 30b) + _ASM_EXTABLE_CPY(8b, 30b) + _ASM_EXTABLE_CPY(9b, 30b) + _ASM_EXTABLE_CPY(10b, 30b) + _ASM_EXTABLE_CPY(11b, 30b) + _ASM_EXTABLE_CPY(12b, 30b) + _ASM_EXTABLE_CPY(13b, 30b) + _ASM_EXTABLE_CPY(14b, 30b) + _ASM_EXTABLE_CPY(15b, 30b) + _ASM_EXTABLE_CPY(16b, 30b) + _ASM_EXTABLE_CPY(18b, 40b) + _ASM_EXTABLE_CPY(19b, 40b) + _ASM_EXTABLE_CPY(21b, 50b) + _ASM_EXTABLE_CPY(22b, 50b) ENDPROC(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) @@ -180,8 +180,8 @@ ENTRY(copy_user_generic_string) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, 11b) - _ASM_EXTABLE_UA(3b, 12b) + _ASM_EXTABLE_CPY(1b, 11b) + _ASM_EXTABLE_CPY(3b, 12b) ENDPROC(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) @@ -213,7 +213,7 @@ ENTRY(copy_user_enhanced_fast_string) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, 12b) + _ASM_EXTABLE_CPY(1b, 12b) ENDPROC(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) @@ -221,6 +221,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) * Try to copy last bytes and clear the rest if needed. * Since protection fault in copy_from/to_user is not a normal situation, * it is not necessary to optimize tail handling. + * Don't try to copy the tail if machine check happened * * Input: * rdi destination @@ -233,12 +234,25 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) ALIGN; .Lcopy_user_handle_tail: movl %edx,%ecx + cmp $18,%eax /* check if X86_TRAP_MC */ + je 3f 1: rep movsb 2: mov %ecx,%eax ASM_CLAC ret - _ASM_EXTABLE_UA(1b, 2b) + /* + * Return zero to pretend that this copy succeeded. This + * is counter-intuitive, but needed to prevent the code + * in lib/iov_iter.c from retrying and running back into + * the poison cache line again. The machine check handler + * will ensure that a SIGBUS is sent to the task. + */ +3: xorl %eax,%eax + ASM_CLAC + ret + + _ASM_EXTABLE_CPY(1b, 2b) END(.Lcopy_user_handle_tail) /* @@ -367,27 +381,27 @@ ENTRY(__copy_user_nocache) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(20b, .L_fixup_8b_copy) - _ASM_EXTABLE_UA(21b, .L_fixup_8b_copy) - _ASM_EXTABLE_UA(30b, .L_fixup_4b_copy) - _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy) - _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy) - _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy) + _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy) + _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy) + _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy) + _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy) + _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy) + _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy) ENDPROC(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 306c3a0902ba..ecdd7b5beda5 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -70,14 +70,15 @@ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; + insn_byte_t p; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ - for (i = 0; i < insn->prefixes.nbytes; i++) { + for_each_insn_prefix(insn, i, p) { insn_attr_t attr; - attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + attr = inat_get_opcode_attribute(p); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; @@ -155,7 +156,7 @@ static bool check_seg_overrides(struct insn *insn, int regoff) */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { - if (user_64bit_mode(regs)) + if (any_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 @@ -266,7 +267,7 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { - if (user_64bit_mode(regs)) + if (any_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; @@ -289,7 +290,7 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ - if (user_64bit_mode(regs)) { + if (any_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; @@ -646,23 +647,27 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) */ return (unsigned long)(sel << 4); - if (user_64bit_mode(regs)) { + if (any_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; - if (seg_reg_idx == INAT_SEG_REG_FS) + if (seg_reg_idx == INAT_SEG_REG_FS) { rdmsrl(MSR_FS_BASE, base); - else if (seg_reg_idx == INAT_SEG_REG_GS) + } else if (seg_reg_idx == INAT_SEG_REG_GS) { /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ - rdmsrl(MSR_KERNEL_GS_BASE, base); - else + if (user_mode(regs)) + rdmsrl(MSR_KERNEL_GS_BASE, base); + else + rdmsrl(MSR_GS_BASE, base); + } else { base = 0; + } return base; } @@ -703,7 +708,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) if (sel < 0) return 0; - if (user_64bit_mode(regs) || v8086_mode(regs)) + if (any_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) @@ -948,7 +953,7 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, * following instruction. */ if (*regoff == -EDOM) { - if (user_64bit_mode(regs)) + if (any_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; @@ -1250,7 +1255,7 @@ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) * After computed, the effective address is treated as an unsigned * quantity. */ - if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) + if (!any_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 92748660ba51..dc2fb886db2b 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -15,8 +15,6 @@ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. */ -.weak memcpy - /* * memcpy - Copy a memory block. * @@ -29,7 +27,7 @@ * rax original destination */ ENTRY(__memcpy) -ENTRY(memcpy) +SYM_FUNC_START_WEAK(memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index bbec69d8223b..5daea8ef588e 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -24,15 +24,10 @@ * Output: * rax: dest */ -.weak memmove - -ENTRY(memmove) +SYM_FUNC_START_WEAK(memmove) ENTRY(__memmove) - /* Handle more 32 bytes in loop */ mov %rdi, %rax - cmp $0x20, %rdx - jb 1f /* Decide forward/backward copy mode */ cmp %rdi, %rsi @@ -42,7 +37,9 @@ ENTRY(__memmove) cmp %rdi, %r8 jg 2f + /* FSRM implies ERMS => no length checks, do the copy directly */ .Lmemmove_begin_forward: + ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS /* @@ -114,6 +111,8 @@ ENTRY(__memmove) */ .p2align 4 2: + cmp $0x20, %rdx + jb 1f cmp $680, %rdx jb 6f cmp %dil, %sil diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 9bc861c71e75..e3376c7d4c97 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -6,8 +6,6 @@ #include #include -.weak memset - /* * ISO C memset - set a memory block to a byte value. This function uses fast * string to get better performance than the original function. The code is @@ -19,7 +17,7 @@ * * rax original destination */ -ENTRY(memset) +SYM_FUNC_START_WEAK(memset) ENTRY(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index 4321fa02e18d..419365c48b2a 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -26,6 +26,16 @@ #include #include +/* + * Use KFPU_387. MMX instructions are not affected by MXCSR, + * but both AMD and Intel documentation states that even integer MMX + * operations will result in #MF if an exception is pending in FCW. + * + * EMMS is not needed afterwards because, after calling kernel_fpu_end(), + * any subsequent user of the 387 stack will reinitialize it using + * KFPU_387. + */ + void *_mmx_memcpy(void *to, const void *from, size_t len) { void *p; @@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) p = to; i = len >> 6; /* len/64 */ - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_387); __asm__ __volatile__ ( "1: prefetch (%0)\n" /* This set is 28 bytes */ @@ -127,7 +137,7 @@ static void fast_clear_page(void *page) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_387); __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : @@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_387); /* * maybe the prefetch stuff can go before the expensive fnsave... @@ -247,7 +257,7 @@ static void fast_clear_page(void *page) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_387); __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : @@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_387); __asm__ __volatile__ ( "1: prefetch (%0)\n" diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index fff28c6f73a2..1847e993ac63 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) asm volatile( " testq %[size8],%[size8]\n" " jz 4f\n" + " .align 16\n" "0: movq $0,(%[dst])\n" " addq $8,%[dst]\n" " decl %%ecx ; jnz 0b\n" @@ -119,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) */ if (size < 8) { if (!IS_ALIGNED(dest, 4) || size != 4) - clean_cache_range(dst, 1); + clean_cache_range(dst, size); } else { if (!IS_ALIGNED(dest, 8)) { dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S index f031c0e19356..515cdee90df7 100644 --- a/arch/x86/math-emu/wm_sqrt.S +++ b/arch/x86/math-emu/wm_sqrt.S @@ -209,7 +209,7 @@ sqrt_stage_2_finish: #ifdef PARANOID /* It should be possible to get here only if the arg is ffff....ffff */ - cmp $0xffffffff,FPU_fsqrt_arg_1 + cmpl $0xffffffff,FPU_fsqrt_arg_1 jnz sqrt_stage_2_error #endif /* PARANOID */ diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 84373dc9b341..bbc68a54795e 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -13,7 +13,7 @@ CFLAGS_REMOVE_mem_encrypt_identity.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o + pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o # Make sure __phys_addr has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 4d75bc656f97..3626bae87d40 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -141,6 +141,18 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup, } EXPORT_SYMBOL(ex_handler_ext); +__visible bool ex_handler_copy(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr) +{ + WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); + regs->ip = ex_fixup_addr(fixup); + regs->ax = trapnr; + return true; +} +EXPORT_SYMBOL(ex_handler_copy); + __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr, unsigned long error_code, @@ -186,17 +198,21 @@ __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup, } EXPORT_SYMBOL(ex_handler_clear_fs); -__visible bool ex_has_fault_handler(unsigned long ip) +enum handler_type ex_get_fault_handler_type(unsigned long ip) { const struct exception_table_entry *e; ex_handler_t handler; e = search_exception_tables(ip); if (!e) - return false; + return EX_HANDLER_NONE; handler = ex_fixup_handler(e); - - return handler == ex_handler_fault; + if (handler == ex_handler_fault) + return EX_HANDLER_FAULT; + else if (handler == ex_handler_uaccess || handler == ex_handler_copy) + return EX_HANDLER_UACCESS; + else + return EX_HANDLER_OTHER; } int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c494c8c05824..61562a6c310c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -29,6 +29,7 @@ #include /* efi_recover_from_page_fault()*/ #include /* store_idt(), ... */ #include /* exception stack */ +#include /* fixup_vdso_exception() */ #define CREATE_TRACE_POINTS #include @@ -703,11 +704,9 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code, oops_end(flags, regs, sig); } -static void set_signal_archinfo(unsigned long address, - unsigned long error_code) +static void sanitize_error_code(unsigned long address, + unsigned long *error_code) { - struct task_struct *tsk = current; - /* * To avoid leaking information about the kernel page * table layout, pretend that user-mode accesses to @@ -718,7 +717,13 @@ static void set_signal_archinfo(unsigned long address, * information and does not appear to cause any problems. */ if (address >= TASK_SIZE_MAX) - error_code |= X86_PF_PROT; + *error_code |= X86_PF_PROT; +} + +static void set_signal_archinfo(unsigned long address, + unsigned long error_code) +{ + struct task_struct *tsk = current; tsk->thread.trap_nr = X86_TRAP_PF; tsk->thread.error_code = error_code | X86_PF_USER; @@ -759,6 +764,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, * faulting through the emulate_vsyscall() logic. */ if (current->thread.sig_on_uaccess_err && signal) { + sanitize_error_code(address, &error_code); + set_signal_archinfo(address, error_code); /* XXX: hwpoison faults will set the wrong code. */ @@ -907,13 +914,10 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, if (is_errata100(regs, address)) return; - /* - * To avoid leaking information about the kernel page table - * layout, pretend that user-mode accesses to kernel addresses - * are always protection faults. - */ - if (address >= TASK_SIZE_MAX) - error_code |= X86_PF_PROT; + sanitize_error_code(address, &error_code); + + if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address)) + return; if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); @@ -1030,6 +1034,11 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, if (is_prefetch(regs, error_code, address)) return; + sanitize_error_code(address, &error_code); + + if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address)) + return; + set_signal_archinfo(address, error_code); #ifdef CONFIG_MEMORY_FAILURE @@ -1200,6 +1209,18 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) if (error_code & X86_PF_PK) return 1; + /* + * SGX hardware blocked the access. This usually happens + * when the enclave memory contents have been destroyed, like + * after a suspend/resume cycle. In any case, the kernel can't + * fix the cause of the fault. Handle the fault as an access + * error even in cases where no actual access violation + * occurred. This allows userspace to rebuild the enclave in + * response to the signal. + */ + if (unlikely(error_code & X86_PF_SGX)) + return 1; + /* * Make sure to check the VMA so that we do not perform * faults just to hit a X86_PF_PK as soon as we fill in a @@ -1227,7 +1248,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) return 0; } -static int fault_in_kernel_space(unsigned long address) +bool fault_in_kernel_space(unsigned long address) { /* * On 64-bit systems, the vsyscall page is at an address above diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index fe7a12599d8e..968d7005f4a7 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -62,6 +62,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page, unsigned long addr, unsigned long end) { unsigned long next; + int result; for (; addr < end; addr = next) { p4d_t *p4d = p4d_page + p4d_index(addr); @@ -73,13 +74,20 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page, if (p4d_present(*p4d)) { pud = pud_offset(p4d, 0); - ident_pud_init(info, pud, addr, next); + result = ident_pud_init(info, pud, addr, next); + if (result) + return result; + continue; } pud = (pud_t *)info->alloc_pgt_page(info->context); if (!pud) return -ENOMEM; - ident_pud_init(info, pud, addr, next); + + result = ident_pud_init(info, pud, addr, next); + if (result) + return result; + set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag)); } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fd10d91a6115..af352e228fa2 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -121,8 +121,6 @@ __ref void *alloc_low_pages(unsigned int num) } else { pfn = pgt_buf_end; pgt_buf_end += num; - printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", - pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); } for (i = 0; i < num; i++) { diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c new file mode 100644 index 000000000000..f5b85bdc0535 --- /dev/null +++ b/arch/x86/mm/maccess.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#ifdef CONFIG_X86_64 +static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); +} + +static __always_inline bool invalid_probe_range(u64 vaddr) +{ + /* + * Range covering the highest possible canonical userspace address + * as well as non-canonical address range. For the canonical range + * we also need to include the userspace guard page. + */ + return vaddr < TASK_SIZE_MAX + PAGE_SIZE || + canonical_address(vaddr, boot_cpu_data.x86_virt_bits) != vaddr; +} +#else +static __always_inline bool invalid_probe_range(u64 vaddr) +{ + return vaddr < TASK_SIZE_MAX; +} +#endif + +long probe_kernel_read_strict(void *dst, const void *src, size_t size) +{ + if (unlikely(invalid_probe_range((unsigned long)src))) + return -EFAULT; + + return __probe_kernel_read(dst, src, size); +} + +long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, long count) +{ + if (unlikely(invalid_probe_range((unsigned long)unsafe_addr))) + return -EFAULT; + + return __strncpy_from_unsafe(dst, unsafe_addr, count); +} diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 9268c12458c8..7b558939b89c 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -229,7 +229,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) if (pgprot_val(old_prot) == pgprot_val(new_prot)) return; - pa = pfn << page_level_shift(level); + pa = pfn << PAGE_SHIFT; size = page_level_size(level); /* @@ -375,6 +375,7 @@ bool force_dma_unencrypted(struct device *dev) return false; } +EXPORT_SYMBOL_GPL(sev_active); /* Architecture __weak replacement functions */ void __init mem_encrypt_free_decrypted_mem(void) diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index e2b0e2ac07bb..84cda5dc0387 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -45,8 +45,8 @@ #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) #define PMD_FLAGS_DEC PMD_FLAGS_LARGE -#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ - (_PAGE_PAT | _PAGE_PWT)) +#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \ + (_PAGE_PAT_LARGE | _PAGE_PWT)) #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index b8ef8557d4b3..2a36902d418c 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c @@ -372,7 +372,7 @@ static void enter_uniprocessor(void) int cpu; int err; - if (downed_cpus == NULL && + if (!cpumask_available(downed_cpus) && !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { pr_notice("Failed to allocate mask\n"); goto out; @@ -402,7 +402,7 @@ static void leave_uniprocessor(void) int cpu; int err; - if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) + if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) return; pr_notice("Re-enabling CPUs...\n"); for_each_cpu(cpu, downed_cpus) { diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index abffa0be80da..87282258d5be 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -321,7 +321,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, u64 addr, u64 max_addr, u64 size) { return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size, - 0, NULL, NUMA_NO_NODE); + 0, NULL, 0); } int __init setup_emu2phys_nid(int *dfl_phys_nid) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index a19a71b4d185..281e584cfe39 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -42,7 +42,8 @@ struct cpa_data { unsigned long pfn; unsigned int flags; unsigned int force_split : 1, - force_static_prot : 1; + force_static_prot : 1, + force_flush_all : 1; struct page **pages; }; @@ -352,10 +353,10 @@ static void cpa_flush(struct cpa_data *data, int cache) return; } - if (cpa->numpages <= tlb_single_page_flush_ceiling) - on_each_cpu(__cpa_flush_tlb, cpa, 1); - else + if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling) flush_tlb_all(); + else + on_each_cpu(__cpa_flush_tlb, cpa, 1); if (!cache) return; @@ -1584,6 +1585,8 @@ static int cpa_process_alias(struct cpa_data *cpa) alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); alias_cpa.curpage = 0; + cpa->force_flush_all = 1; + ret = __change_page_attr_set_clr(&alias_cpa, 0); if (ret) return ret; @@ -1604,6 +1607,7 @@ static int cpa_process_alias(struct cpa_data *cpa) alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); alias_cpa.curpage = 0; + cpa->force_flush_all = 1; /* * The high mapping range is imprecise, so ignore the * return value. diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index d9fbd4f69920..35b2e35c2203 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -1132,12 +1132,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + kfree(v); ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { + kfree(v); } static int memtype_seq_show(struct seq_file *seq, void *v) @@ -1146,7 +1148,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), print_entry->start, print_entry->end); - kfree(print_entry); return 0; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 7bd2c3a52297..7982f13807aa 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -826,6 +826,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) } free_page((unsigned long)pmd_sv); + + pgtable_pmd_page_dtor(virt_to_page(pmd)); free_page((unsigned long)pmd); return 1; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index e6a9edc5baaf..851359b7edc5 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -327,8 +327,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, /* * The membarrier system call requires a full memory barrier and * core serialization before returning to user-space, after - * storing to rq->curr. Writing to CR3 provides that full - * memory barrier and core serializing instruction. + * storing to rq->curr, when changing mm. This is because + * membarrier() sends IPIs to all CPUs that are in the target mm + * to make them issue memory barriers. However, if another CPU + * switches to/from the target mm concurrently with + * membarrier(), it can cause that CPU not to receive an IPI + * when it really should issue a memory barrier. Writing to CR3 + * provides that full memory barrier and core serializing + * instruction. */ if (real_prev == next) { VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 991549a1c5f3..f85b7365a770 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -9,9 +9,13 @@ #include #include #include - +#include +#include +#include #include #include +#include +#include static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -96,6 +100,7 @@ static int bpf_size_to_x86_bytes(int bpf_size) /* Pick a register outside of BPF range for JIT internal work */ #define AUX_REG (MAX_BPF_JIT_REG + 1) +#define X86_REG_R9 (MAX_BPF_JIT_REG + 2) /* * The following table maps BPF registers to x86-64 registers. @@ -104,8 +109,8 @@ static int bpf_size_to_x86_bytes(int bpf_size) * register in load/store instructions, it always needs an * extra byte of encoding and is callee saved. * - * Also x86-64 register R9 is unused. x86-64 register R10 is - * used for blinding (if enabled). + * x86-64 register R9 is not used by BPF programs, but can be used by BPF + * trampoline. x86-64 register R10 is used for blinding (if enabled). */ static const int reg2hex[] = { [BPF_REG_0] = 0, /* RAX */ @@ -121,6 +126,20 @@ static const int reg2hex[] = { [BPF_REG_FP] = 5, /* RBP readonly */ [BPF_REG_AX] = 2, /* R10 temp register */ [AUX_REG] = 3, /* R11 temp register */ + [X86_REG_R9] = 1, /* R9 register, 6th function argument */ +}; + +static const int reg2pt_regs[] = { + [BPF_REG_0] = offsetof(struct pt_regs, ax), + [BPF_REG_1] = offsetof(struct pt_regs, di), + [BPF_REG_2] = offsetof(struct pt_regs, si), + [BPF_REG_3] = offsetof(struct pt_regs, dx), + [BPF_REG_4] = offsetof(struct pt_regs, cx), + [BPF_REG_5] = offsetof(struct pt_regs, r8), + [BPF_REG_6] = offsetof(struct pt_regs, bx), + [BPF_REG_7] = offsetof(struct pt_regs, r13), + [BPF_REG_8] = offsetof(struct pt_regs, r14), + [BPF_REG_9] = offsetof(struct pt_regs, r15), }; /* @@ -135,9 +154,23 @@ static bool is_ereg(u32 reg) BIT(BPF_REG_7) | BIT(BPF_REG_8) | BIT(BPF_REG_9) | + BIT(X86_REG_R9) | BIT(BPF_REG_AX)); } +/* + * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 + * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte + * of encoding. al,cl,dl,bl have simpler encoding. + */ +static bool is_ereg_8l(u32 reg) +{ + return is_ereg(reg) || + (1 << reg) & (BIT(BPF_REG_1) | + BIT(BPF_REG_2) | + BIT(BPF_REG_FP)); +} + static bool is_axreg(u32 reg) { return reg == BPF_REG_0; @@ -186,7 +219,10 @@ struct jit_context { #define BPF_MAX_INSN_SIZE 128 #define BPF_INSN_SAFETY 64 -#define PROLOGUE_SIZE 20 +/* Number of bytes emit_patch() needs to generate instructions */ +#define X86_PATCH_SIZE 5 + +#define PROLOGUE_SIZE 25 /* * Emit x86-64 prologue code for BPF program and check its size. @@ -195,8 +231,13 @@ struct jit_context { static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) { u8 *prog = *pprog; - int cnt = 0; + int cnt = X86_PATCH_SIZE; + /* BPF trampoline can be made to work without these nops, + * but let's waste 5 bytes for now and optimize later + */ + memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt); + prog += cnt; EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ /* sub rsp, rounded_stack_depth */ @@ -213,6 +254,89 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) *pprog = prog; } +static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) +{ + u8 *prog = *pprog; + int cnt = 0; + s64 offset; + + offset = func - (ip + X86_PATCH_SIZE); + if (!is_simm32(offset)) { + pr_err("Target call %p is out of range\n", func); + return -ERANGE; + } + EMIT1_off32(opcode, offset); + *pprog = prog; + return 0; +} + +static int emit_call(u8 **pprog, void *func, void *ip) +{ + return emit_patch(pprog, func, ip, 0xE8); +} + +static int emit_jump(u8 **pprog, void *func, void *ip) +{ + return emit_patch(pprog, func, ip, 0xE9); +} + +static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *old_addr, void *new_addr, + const bool text_live) +{ + const u8 *nop_insn = ideal_nops[NOP_ATOMIC5]; + u8 old_insn[X86_PATCH_SIZE]; + u8 new_insn[X86_PATCH_SIZE]; + u8 *prog; + int ret; + + memcpy(old_insn, nop_insn, X86_PATCH_SIZE); + if (old_addr) { + prog = old_insn; + ret = t == BPF_MOD_CALL ? + emit_call(&prog, old_addr, ip) : + emit_jump(&prog, old_addr, ip); + if (ret) + return ret; + } + + memcpy(new_insn, nop_insn, X86_PATCH_SIZE); + if (new_addr) { + prog = new_insn; + ret = t == BPF_MOD_CALL ? + emit_call(&prog, new_addr, ip) : + emit_jump(&prog, new_addr, ip); + if (ret) + return ret; + } + + ret = -EBUSY; + mutex_lock(&text_mutex); + if (memcmp(ip, old_insn, X86_PATCH_SIZE)) + goto out; + if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { + if (text_live) + text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); + else + memcpy(ip, new_insn, X86_PATCH_SIZE); + } + ret = 0; +out: + mutex_unlock(&text_mutex); + return ret; +} + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *old_addr, void *new_addr) +{ + if (!is_kernel_text((long)ip) && + !is_bpf_text_address((long)ip)) + /* BPF poking in modules is not supported */ + return -EINVAL; + + return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); +} + /* * Generate the following code: * @@ -227,7 +351,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) * goto *(prog->bpf_func + prologue_size); * out: */ -static void emit_bpf_tail_call(u8 **pprog) +static void emit_bpf_tail_call_indirect(u8 **pprog) { u8 *prog = *pprog; int label1, label2, label3; @@ -294,6 +418,68 @@ static void emit_bpf_tail_call(u8 **pprog) *pprog = prog; } +static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, + u8 **pprog, int addr, u8 *image) +{ + u8 *prog = *pprog; + int cnt = 0; + + /* + * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + */ + EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */ + EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ + EMIT2(X86_JA, 14); /* ja out */ + EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ + EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */ + + poke->ip = image + (addr - X86_PATCH_SIZE); + poke->adj_off = PROLOGUE_SIZE; + + memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE); + prog += X86_PATCH_SIZE; + /* out: */ + + *pprog = prog; +} + +static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) +{ + struct bpf_jit_poke_descriptor *poke; + struct bpf_array *array; + struct bpf_prog *target; + int i, ret; + + for (i = 0; i < prog->aux->size_poke_tab; i++) { + poke = &prog->aux->poke_tab[i]; + WARN_ON_ONCE(READ_ONCE(poke->ip_stable)); + + if (poke->reason != BPF_POKE_REASON_TAIL_CALL) + continue; + + array = container_of(poke->tail_call.map, struct bpf_array, map); + mutex_lock(&array->aux->poke_mutex); + target = array->ptrs[poke->tail_call.key]; + if (target) { + /* Plain memcpy is used when image is not live yet + * and still not locked as read-only. Once poke + * location is active (poke->ip_stable), any parallel + * bpf_arch_text_poke() might occur still on the + * read-write image until we finally locked it as + * read-only. Both modifications on the given image + * are under text_mutex to avoid interference. + */ + ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL, + (u8 *)target->bpf_func + + poke->adj_off, false); + BUG_ON(ret < 0); + } + WRITE_ONCE(poke->ip_stable, true); + mutex_unlock(&array->aux->poke_mutex); + } +} + static void emit_mov_imm32(u8 **pprog, bool sign_propagate, u32 dst_reg, const u32 imm32) { @@ -377,6 +563,96 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) *pprog = prog; } +/* LDX: dst_reg = *(u8*)(src_reg + off) */ +static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) +{ + u8 *prog = *pprog; + int cnt = 0; + + switch (size) { + case BPF_B: + /* Emit 'movzx rax, byte ptr [rax + off]' */ + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); + break; + case BPF_H: + /* Emit 'movzx rax, word ptr [rax + off]' */ + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); + break; + case BPF_W: + /* Emit 'mov eax, dword ptr [rax+0x14]' */ + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); + else + EMIT1(0x8B); + break; + case BPF_DW: + /* Emit 'mov rax, qword ptr [rax+0x14]' */ + EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); + break; + } + /* + * If insn->off == 0 we can save one extra byte, but + * special case of x86 R13 which always needs an offset + * is not worth the hassle + */ + if (is_imm8(off)) + EMIT2(add_2reg(0x40, src_reg, dst_reg), off); + else + EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off); + *pprog = prog; +} + +/* STX: *(u8*)(dst_reg + off) = src_reg */ +static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) +{ + u8 *prog = *pprog; + int cnt = 0; + + switch (size) { + case BPF_B: + /* Emit 'mov byte ptr [rax + off], al' */ + if (is_ereg(dst_reg) || is_ereg(src_reg) || + /* We have to add extra byte for x86 SIL, DIL regs */ + src_reg == BPF_REG_1 || src_reg == BPF_REG_2) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); + else + EMIT1(0x88); + break; + case BPF_H: + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); + else + EMIT2(0x66, 0x89); + break; + case BPF_W: + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); + else + EMIT1(0x89); + break; + case BPF_DW: + EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); + break; + } + if (is_imm8(off)) + EMIT2(add_2reg(0x40, dst_reg, src_reg), off); + else + EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off); + *pprog = prog; +} + +static bool ex_handler_bpf(const struct exception_table_entry *x, + struct pt_regs *regs, int trapnr, + unsigned long error_code, unsigned long fault_addr) +{ + u32 reg = x->fixup >> 8; + + /* jump over faulting load and clear dest register */ + *(unsigned long *)((void *)regs + reg) = 0; + regs->ip += x->fixup & 0xff; + return true; +} + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, int oldproglen, struct jit_context *ctx) { @@ -384,7 +660,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, int insn_cnt = bpf_prog->len; bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; - int i, cnt = 0; + int i, cnt = 0, excnt = 0; int proglen = 0; u8 *prog = temp; @@ -715,6 +991,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; + /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: if (is_ereg(dst_reg)) @@ -747,64 +1030,64 @@ st: if (is_imm8(insn->off)) /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: - /* Emit 'mov byte ptr [rax + off], al' */ - if (is_ereg(dst_reg) || is_ereg(src_reg) || - /* We have to add extra byte for x86 SIL, DIL regs */ - src_reg == BPF_REG_1 || src_reg == BPF_REG_2) - EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); - else - EMIT1(0x88); - goto stx; case BPF_STX | BPF_MEM | BPF_H: - if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); - else - EMIT2(0x66, 0x89); - goto stx; case BPF_STX | BPF_MEM | BPF_W: - if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); - else - EMIT1(0x89); - goto stx; case BPF_STX | BPF_MEM | BPF_DW: - EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); -stx: if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); - else - EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), - insn->off); + emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); break; /* LDX: dst_reg = *(u8*)(src_reg + off) */ case BPF_LDX | BPF_MEM | BPF_B: - /* Emit 'movzx rax, byte ptr [rax + off]' */ - EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); - goto ldx; + case BPF_LDX | BPF_PROBE_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_H: - /* Emit 'movzx rax, word ptr [rax + off]' */ - EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); - goto ldx; + case BPF_LDX | BPF_PROBE_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_W: - /* Emit 'mov eax, dword ptr [rax+0x14]' */ - if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); - else - EMIT1(0x8B); - goto ldx; + case BPF_LDX | BPF_PROBE_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_DW: - /* Emit 'mov rax, qword ptr [rax+0x14]' */ - EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); -ldx: /* - * If insn->off == 0 we can save one extra byte, but - * special case of x86 R13 which always needs an offset - * is not worth the hassle - */ - if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off); - else - EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), - insn->off); + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); + if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { + struct exception_table_entry *ex; + u8 *_insn = image + proglen; + s64 delta; + + if (!bpf_prog->aux->extable) + break; + + if (excnt >= bpf_prog->aux->num_exentries) { + pr_err("ex gen bug\n"); + return -EFAULT; + } + ex = &bpf_prog->aux->extable[excnt++]; + + delta = _insn - (u8 *)&ex->insn; + if (!is_simm32(delta)) { + pr_err("extable->insn doesn't fit into 32-bit\n"); + return -EFAULT; + } + ex->insn = delta; + + delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler; + if (!is_simm32(delta)) { + pr_err("extable->handler doesn't fit into 32-bit\n"); + return -EFAULT; + } + ex->handler = delta; + + if (dst_reg > BPF_REG_9) { + pr_err("verifier error\n"); + return -EFAULT; + } + /* + * Compute size of x86 insn and its target dest x86 register. + * ex_handler_bpf() will use lower 8 bits to adjust + * pt_regs->ip to jump over this x86 instruction + * and upper bits to figure out which pt_regs to zero out. + * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" + * of 4 bytes will be ignored and rbx will be zero inited. + */ + ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8); + } break; /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ @@ -827,17 +1110,16 @@ xadd: if (is_imm8(insn->off)) /* call */ case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; - jmp_offset = func - (image + addrs[i]); - if (!imm32 || !is_simm32(jmp_offset)) { - pr_err("unsupported BPF func %d addr %p image %p\n", - imm32, func, image); + if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) return -EINVAL; - } - EMIT1_off32(0xE8, jmp_offset); break; case BPF_JMP | BPF_TAIL_CALL: - emit_bpf_tail_call(&prog); + if (imm32) + emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], + &prog, addrs[i], image); + else + emit_bpf_tail_call_indirect(&prog); break; /* cond jump */ @@ -1038,7 +1320,16 @@ emit_jmp: } if (image) { - if (unlikely(proglen + ilen > oldproglen)) { + /* + * When populating the image, assert that: + * + * i) We do not write beyond the allocated space, and + * ii) addrs[i] did not change from the prior run, in order + * to validate assumptions made for computing branch + * displacements. + */ + if (unlikely(proglen + ilen > oldproglen || + proglen + ilen != addrs[i])) { pr_err("bpf_jit: fatal error\n"); return -EFAULT; } @@ -1048,9 +1339,473 @@ emit_jmp: addrs[i] = proglen; prog = temp; } + + if (image && excnt != bpf_prog->aux->num_exentries) { + pr_err("extable is not populated\n"); + return -EFAULT; + } return proglen; } +static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, + int stack_size) +{ + int i; + /* Store function arguments to stack. + * For a function that accepts two pointers the sequence will be: + * mov QWORD PTR [rbp-0x10],rdi + * mov QWORD PTR [rbp-0x8],rsi + */ + for (i = 0; i < min(nr_args, 6); i++) + emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), + BPF_REG_FP, + i == 5 ? X86_REG_R9 : BPF_REG_1 + i, + -(stack_size - i * 8)); +} + +static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, + int stack_size) +{ + int i; + + /* Restore function arguments from stack. + * For a function that accepts two pointers the sequence will be: + * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] + * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] + */ + for (i = 0; i < min(nr_args, 6); i++) + emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), + i == 5 ? X86_REG_R9 : BPF_REG_1 + i, + BPF_REG_FP, + -(stack_size - i * 8)); +} + +static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, + struct bpf_prog *p, int stack_size, bool mod_ret) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (emit_call(&prog, __bpf_prog_enter, prog)) + return -EINVAL; + /* remember prog start time returned by __bpf_prog_enter */ + emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); + + /* arg1: lea rdi, [rbp - stack_size] */ + EMIT4(0x48, 0x8D, 0x7D, -stack_size); + /* arg2: progs[i]->insnsi for interpreter */ + if (!p->jited) + emit_mov_imm64(&prog, BPF_REG_2, + (long) p->insnsi >> 32, + (u32) (long) p->insnsi); + /* call JITed bpf program or interpreter */ + if (emit_call(&prog, p->bpf_func, prog)) + return -EINVAL; + + /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return + * of the previous call which is then passed on the stack to + * the next BPF program. + */ + if (mod_ret) + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + + /* arg1: mov rdi, progs[i] */ + emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, + (u32) (long) p); + /* arg2: mov rsi, rbx <- start time in nsec */ + emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); + if (emit_call(&prog, __bpf_prog_exit, prog)) + return -EINVAL; + + *pprog = prog; + return 0; +} + +static void emit_nops(u8 **pprog, unsigned int len) +{ + unsigned int i, noplen; + u8 *prog = *pprog; + int cnt = 0; + + while (len > 0) { + noplen = len; + + if (noplen > ASM_NOP_MAX) + noplen = ASM_NOP_MAX; + + for (i = 0; i < noplen; i++) + EMIT1(ideal_nops[noplen][i]); + len -= noplen; + } + + *pprog = prog; +} + +static void emit_align(u8 **pprog, u32 align) +{ + u8 *target, *prog = *pprog; + + target = PTR_ALIGN(prog, align); + if (target != prog) + emit_nops(&prog, target - prog); + + *pprog = prog; +} + +static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) +{ + u8 *prog = *pprog; + int cnt = 0; + s64 offset; + + offset = func - (ip + 2 + 4); + if (!is_simm32(offset)) { + pr_err("Target %p is out of range\n", func); + return -EINVAL; + } + EMIT2_off32(0x0F, jmp_cond + 0x10, offset); + *pprog = prog; + return 0; +} + +static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, + struct bpf_tramp_progs *tp, int stack_size) +{ + int i; + u8 *prog = *pprog; + + for (i = 0; i < tp->nr_progs; i++) { + if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) + return -EINVAL; + } + *pprog = prog; + return 0; +} + +static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, + struct bpf_tramp_progs *tp, int stack_size, + u8 **branches) +{ + u8 *prog = *pprog; + int i, cnt = 0; + + /* The first fmod_ret program will receive a garbage return value. + * Set this to 0 to avoid confusing the program. + */ + emit_mov_imm32(&prog, false, BPF_REG_0, 0); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + for (i = 0; i < tp->nr_progs; i++) { + if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true)) + return -EINVAL; + + /* mod_ret prog stored return value into [rbp - 8]. Emit: + * if (*(u64 *)(rbp - 8) != 0) + * goto do_fexit; + */ + /* cmp QWORD PTR [rbp - 0x8], 0x0 */ + EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); + + /* Save the location of the branch and Generate 6 nops + * (4 bytes for an offset and 2 bytes for the jump) These nops + * are replaced with a conditional jump once do_fexit (i.e. the + * start of the fexit invocation) is finalized. + */ + branches[i] = prog; + emit_nops(&prog, 4 + 2); + } + + *pprog = prog; + return 0; +} + +/* Example: + * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); + * its 'struct btf_func_model' will be nr_args=2 + * The assembly code when eth_type_trans is executing after trampoline: + * + * push rbp + * mov rbp, rsp + * sub rsp, 16 // space for skb and dev + * push rbx // temp regs to pass start time + * mov qword ptr [rbp - 16], rdi // save skb pointer to stack + * mov qword ptr [rbp - 8], rsi // save dev pointer to stack + * call __bpf_prog_enter // rcu_read_lock and preempt_disable + * mov rbx, rax // remember start time in bpf stats are enabled + * lea rdi, [rbp - 16] // R1==ctx of bpf prog + * call addr_of_jited_FENTRY_prog + * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off + * mov rsi, rbx // prog start time + * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math + * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack + * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack + * pop rbx + * leave + * ret + * + * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be + * replaced with 'call generated_bpf_trampoline'. When it returns + * eth_type_trans will continue executing with original skb and dev pointers. + * + * The assembly code when eth_type_trans is called from trampoline: + * + * push rbp + * mov rbp, rsp + * sub rsp, 24 // space for skb, dev, return value + * push rbx // temp regs to pass start time + * mov qword ptr [rbp - 24], rdi // save skb pointer to stack + * mov qword ptr [rbp - 16], rsi // save dev pointer to stack + * call __bpf_prog_enter // rcu_read_lock and preempt_disable + * mov rbx, rax // remember start time if bpf stats are enabled + * lea rdi, [rbp - 24] // R1==ctx of bpf prog + * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev + * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off + * mov rsi, rbx // prog start time + * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math + * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack + * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack + * call eth_type_trans+5 // execute body of eth_type_trans + * mov qword ptr [rbp - 8], rax // save return value + * call __bpf_prog_enter // rcu_read_lock and preempt_disable + * mov rbx, rax // remember start time in bpf stats are enabled + * lea rdi, [rbp - 24] // R1==ctx of bpf prog + * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value + * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off + * mov rsi, rbx // prog start time + * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math + * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value + * pop rbx + * leave + * add rsp, 8 // skip eth_type_trans's frame + * ret // return to its caller + */ +int arch_prepare_bpf_trampoline(void *image, void *image_end, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_progs *tprogs, + void *orig_call) +{ + int ret, i, cnt = 0, nr_args = m->nr_args; + int stack_size = nr_args * 8; + struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; + struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; + struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; + u8 **branches = NULL; + u8 *prog; + + /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ + if (nr_args > 6) + return -ENOTSUPP; + + if ((flags & BPF_TRAMP_F_RESTORE_REGS) && + (flags & BPF_TRAMP_F_SKIP_FRAME)) + return -EINVAL; + + if (flags & BPF_TRAMP_F_CALL_ORIG) + stack_size += 8; /* room for return value of orig_call */ + + if (flags & BPF_TRAMP_F_SKIP_FRAME) + /* skip patched call instruction and point orig_call to actual + * body of the kernel function. + */ + orig_call += X86_PATCH_SIZE; + + prog = image; + + EMIT1(0x55); /* push rbp */ + EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ + EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ + EMIT1(0x53); /* push rbx */ + + save_regs(m, &prog, nr_args, stack_size); + + if (fentry->nr_progs) + if (invoke_bpf(m, &prog, fentry, stack_size)) + return -EINVAL; + + if (fmod_ret->nr_progs) { + branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *), + GFP_KERNEL); + if (!branches) + return -ENOMEM; + + if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size, + branches)) { + ret = -EINVAL; + goto cleanup; + } + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + if (fentry->nr_progs || fmod_ret->nr_progs) + restore_regs(m, &prog, nr_args, stack_size); + + /* call original function */ + if (emit_call(&prog, orig_call, prog)) { + ret = -EINVAL; + goto cleanup; + } + /* remember return value in a stack for bpf prog to access */ + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + } + + if (fmod_ret->nr_progs) { + /* From Intel 64 and IA-32 Architectures Optimization + * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler + * Coding Rule 11: All branch targets should be 16-byte + * aligned. + */ + emit_align(&prog, 16); + /* Update the branches saved in invoke_bpf_mod_ret with the + * aligned address of do_fexit. + */ + for (i = 0; i < fmod_ret->nr_progs; i++) + emit_cond_near_jump(&branches[i], prog, branches[i], + X86_JNE); + } + + if (fexit->nr_progs) + if (invoke_bpf(m, &prog, fexit, stack_size)) { + ret = -EINVAL; + goto cleanup; + } + + if (flags & BPF_TRAMP_F_RESTORE_REGS) + restore_regs(m, &prog, nr_args, stack_size); + + /* This needs to be done regardless. If there were fmod_ret programs, + * the return value is only updated on the stack and still needs to be + * restored to R0. + */ + if (flags & BPF_TRAMP_F_CALL_ORIG) + /* restore original return value back into RAX */ + emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); + + EMIT1(0x5B); /* pop rbx */ + EMIT1(0xC9); /* leave */ + if (flags & BPF_TRAMP_F_SKIP_FRAME) + /* skip our return address and return to parent */ + EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ + EMIT1(0xC3); /* ret */ + /* Make sure the trampoline generation logic doesn't overflow */ + if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { + ret = -EFAULT; + goto cleanup; + } + ret = prog - (u8 *)image; + +cleanup: + kfree(branches); + return ret; +} + +static int emit_fallback_jump(u8 **pprog) +{ + u8 *prog = *pprog; + int err = 0; + +#ifdef CONFIG_RETPOLINE + /* Note that this assumes the the compiler uses external + * thunks for indirect calls. Both clang and GCC use the same + * naming convention for external thunks. + */ + err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog); +#else + int cnt = 0; + + EMIT2(0xFF, 0xE2); /* jmp rdx */ +#endif + *pprog = prog; + return err; +} + +static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) +{ + u8 *jg_reloc, *prog = *pprog; + int pivot, err, jg_bytes = 1, cnt = 0; + s64 jg_offset; + + if (a == b) { + /* Leaf node of recursion, i.e. not a range of indices + * anymore. + */ + EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ + if (!is_simm32(progs[a])) + return -1; + EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), + progs[a]); + err = emit_cond_near_jump(&prog, /* je func */ + (void *)progs[a], prog, + X86_JE); + if (err) + return err; + + err = emit_fallback_jump(&prog); /* jmp thunk/indirect */ + if (err) + return err; + + *pprog = prog; + return 0; + } + + /* Not a leaf node, so we pivot, and recursively descend into + * the lower and upper ranges. + */ + pivot = (b - a) / 2; + EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ + if (!is_simm32(progs[a + pivot])) + return -1; + EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); + + if (pivot > 2) { /* jg upper_part */ + /* Require near jump. */ + jg_bytes = 4; + EMIT2_off32(0x0F, X86_JG + 0x10, 0); + } else { + EMIT2(X86_JG, 0); + } + jg_reloc = prog; + + err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ + progs); + if (err) + return err; + + /* From Intel 64 and IA-32 Architectures Optimization + * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler + * Coding Rule 11: All branch targets should be 16-byte + * aligned. + */ + emit_align(&prog, 16); + jg_offset = prog - jg_reloc; + emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); + + err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ + b, progs); + if (err) + return err; + + *pprog = prog; + return 0; +} + +static int cmp_ips(const void *a, const void *b) +{ + const s64 *ipa = a; + const s64 *ipb = b; + + if (*ipa > *ipb) + return 1; + if (*ipa < *ipb) + return -1; + return 0; +} + +int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) +{ + u8 *prog = image; + + sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); + return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); +} struct x64_jit_data { struct bpf_binary_header *header; int *addrs; @@ -1106,7 +1861,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) extra_pass = true; goto skip_init_addrs; } - addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); + addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); if (!addrs) { prog = orig_prog; goto out_addrs; @@ -1148,12 +1903,24 @@ out_image: break; } if (proglen == oldproglen) { - header = bpf_jit_binary_alloc(proglen, &image, - 1, jit_fill_hole); + /* + * The number of entries in extable is the number of BPF_LDX + * insns that access kernel memory via "pointer to BTF type". + * The verifier changed their opcode from LDX|MEM|size + * to LDX|PROBE_MEM|size to make JITing easier. + */ + u32 align = __alignof__(struct exception_table_entry); + u32 extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + + /* allocate module memory for x86 insns and extable */ + header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size, + &image, align, jit_fill_hole); if (!header) { prog = orig_prog; goto out_addrs; } + prog->aux->extable = (void *) image + roundup(proglen, align); } oldproglen = proglen; cond_resched(); @@ -1164,6 +1931,7 @@ out_image: if (image) { if (!prog->is_func || extra_pass) { + bpf_tail_call_direct_fixup(prog); bpf_jit_binary_lock_ro(header); } else { jit_data->addrs = addrs; @@ -1183,7 +1951,7 @@ out_image: if (image) bpf_prog_fill_jited_linfo(prog, addrs + 1); out_addrs: - kfree(addrs); + kvfree(addrs); kfree(jit_data); prog->aux->jit_data = NULL; } diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 4d2a7a764602..2914f900034e 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1705,6 +1705,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, i++; break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: @@ -1847,14 +1853,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_B: case BPF_H: case BPF_W: - if (!bpf_prog->aux->verifier_zext) + if (bpf_prog->aux->verifier_zext) break; if (dstk) { EMIT3(0xC7, add_1reg(0x40, IA32_EBP), STACK_VAR(dst_hi)); EMIT(0x0, 4); } else { - EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0); + /* xor dst_hi,dst_hi */ + EMIT2(0x33, + add_2reg(0xC0, dst_hi, dst_hi)); } break; case BPF_DW: @@ -2013,8 +2021,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_X: { bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; - u8 dreg_lo = dstk ? IA32_EAX : dst_lo; - u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 dreg_lo = IA32_EAX; + u8 dreg_hi = IA32_EDX; u8 sreg_lo = sstk ? IA32_ECX : src_lo; u8 sreg_hi = sstk ? IA32_EBX : src_hi; @@ -2026,6 +2034,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst_hi)); + } else { + /* mov dreg_lo,dst_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo)); + if (is_jmp64) + /* mov dreg_hi,dst_hi */ + EMIT2(0x89, + add_2reg(0xC0, dreg_hi, dst_hi)); } if (sstk) { @@ -2050,8 +2065,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_K: { bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; - u8 dreg_lo = dstk ? IA32_EAX : dst_lo; - u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 dreg_lo = IA32_EAX; + u8 dreg_hi = IA32_EDX; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; u32 hi; @@ -2064,6 +2079,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst_hi)); + } else { + /* mov dreg_lo,dst_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo)); + if (is_jmp64) + /* mov dreg_hi,dst_hi */ + EMIT2(0x89, + add_2reg(0xC0, dreg_hi, dst_hi)); } /* mov ecx,imm32 */ @@ -2262,7 +2284,16 @@ notyet: } if (image) { - if (unlikely(proglen + ilen > oldproglen)) { + /* + * When populating the image, assert that: + * + * i) We do not write beyond the allocated space, and + * ii) addrs[i] did not change from the prior run, in order + * to validate assumptions made for computing branch + * displacements. + */ + if (unlikely(proglen + ilen > oldproglen || + proglen + ilen != addrs[i])) { pr_err("bpf_jit: fatal error\n"); return -EFAULT; } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index e723559c386a..0c67a5a94de3 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); /* * Device [1022:7808] diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 43867bc85368..eea5a0f3b959 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -33,6 +33,7 @@ #include #include #include +#include #define PCIE_CAP_OFFSET 0x100 diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 91220cc25854..5c11ae66b5d8 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -26,6 +26,7 @@ #include #include #include +#include #include static int xen_pcifront_enable_irq(struct pci_dev *dev) diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 01d7ca492741..ae1c5baf27cd 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -85,6 +85,8 @@ static const unsigned long * const efi_tables[] = { #ifdef CONFIG_EFI_RCI2_TABLE &rci2_table_phys, #endif + &efi.tpm_log, + &efi.tpm_final_log, }; u64 efi_setup; /* efi setup_data physical address */ diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index fe0e647411da..528397818110 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -217,28 +217,30 @@ int __init efi_alloc_page_tables(void) gfp_mask = GFP_KERNEL | __GFP_ZERO; efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER); if (!efi_pgd) - return -ENOMEM; + goto fail; pgd = efi_pgd + pgd_index(EFI_VA_END); p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END); - if (!p4d) { - free_page((unsigned long)efi_pgd); - return -ENOMEM; - } + if (!p4d) + goto free_pgd; pud = pud_alloc(&init_mm, p4d, EFI_VA_END); - if (!pud) { - if (pgtable_l5_enabled()) - free_page((unsigned long) pgd_page_vaddr(*pgd)); - free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER); - return -ENOMEM; - } + if (!pud) + goto free_p4d; efi_mm.pgd = efi_pgd; mm_init_cpumask(&efi_mm); init_new_context(NULL, &efi_mm); return 0; + +free_p4d: + if (pgtable_l5_enabled()) + free_page((unsigned long)pgd_page_vaddr(*pgd)); +free_pgd: + free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER); +fail: + return -ENOMEM; } /* @@ -834,7 +836,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - if (!phys_name || !phys_data) + if (!phys_name || (data && !phys_data)) status = EFI_INVALID_PARAMETER; else status = efi_thunk(set_variable, phys_name, phys_vendor, @@ -865,7 +867,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - if (!phys_name || !phys_data) + if (!phys_name || (data && !phys_data)) status = EFI_INVALID_PARAMETER; else status = efi_thunk(set_variable, phys_name, phys_vendor, diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index fc13cbbb2dce..abb6075397f0 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c @@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void) goto out; uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL); - irq_domain_free_fwnode(fn); if (uv_domain) uv_domain->parent = x86_vector_domain; + else + irq_domain_free_fwnode(fn); out: mutex_unlock(&uv_lock); diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 6fe383002125..a19ed3d23185 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend) ret ENDPROC(swsusp_arch_suspend) -ENTRY(restore_image) +SYM_CODE_START(restore_image) /* prepare to jump to the image kernel */ movl restore_jump_address, %ebx movl restore_cr3, %ebp @@ -45,9 +45,10 @@ ENTRY(restore_image) /* jump to relocated restore code */ movl relocated_restore_code, %eax jmpl *%eax +SYM_CODE_END(restore_image) /* code below has been relocated to a safe page */ -ENTRY(core_restore_code) +SYM_CODE_START(core_restore_code) movl temp_pgt, %eax movl %eax, %cr3 @@ -77,6 +78,7 @@ copy_loop: done: jmpl *%ebx +SYM_CODE_END(core_restore_code) /* code below belongs to the image kernel */ .align PAGE_SIZE diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index fb4ee5444379..9733d1cc791d 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -17,7 +17,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib targets += purgatory.ro +# Sanitizer, etc. runtimes are unavailable and cannot be linked here. +GCOV_PROFILE := n KASAN_SANITIZE := n +UBSAN_SANITIZE := n KCOV_INSTRUMENT := n # These are adjustments to the compiler flags used for objects that @@ -25,7 +28,7 @@ KCOV_INSTRUMENT := n PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) +PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 7dce39c8c034..262f83cad355 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -8,6 +8,7 @@ #include #include #include +#include struct real_mode_header *real_mode_header; u32 *trampoline_cr4_features; @@ -34,6 +35,7 @@ void __init reserve_real_mode(void) memblock_reserve(mem, size); set_real_mode_mem(mem); + crash_reserve_low_1M(); } static void __init setup_real_mode(void) diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index 1868b158480d..3a0ef0d57734 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S @@ -29,7 +29,7 @@ .code16 .balign PAGE_SIZE -ENTRY(trampoline_start) +SYM_CODE_START(trampoline_start) wbinvd # Needed for NUMA-Q should be harmless for others LJMPW_RM(1f) @@ -54,11 +54,13 @@ ENTRY(trampoline_start) lmsw %dx # into protected mode ljmpl $__BOOT_CS, $pa_startup_32 +SYM_CODE_END(trampoline_start) .section ".text32","ax" .code32 -ENTRY(startup_32) # note: also used from wakeup_asm.S +SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S jmp *%eax +SYM_CODE_END(startup_32) .bss .balign 8 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index ce7188cbdae5..1c3a1962cade 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -867,9 +867,11 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, case R_386_PC32: case R_386_PC16: case R_386_PC8: + case R_386_PLT32: /* - * NONE can be ignored and PC relative relocations don't - * need to be adjusted. + * NONE can be ignored and PC relative relocations don't need + * to be adjusted. Because sym must be defined, R_386_PLT32 can + * be treated the same way as R_386_PC32. */ break; @@ -910,9 +912,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, case R_386_PC32: case R_386_PC16: case R_386_PC8: + case R_386_PLT32: /* - * NONE can be ignored and PC relative relocations don't - * need to be adjusted. + * NONE can be ignored and PC relative relocations don't need + * to be adjusted. Because sym must be defined, R_386_PLT32 can + * be treated the same way as R_386_PC32. */ break; diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index e138f7de52d2..6024fafed164 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -175,6 +175,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu) return 0; } +static bool no_vector_callback __initdata; + static void __init xen_hvm_guest_init(void) { if (xen_pv_domain()) @@ -194,7 +196,7 @@ static void __init xen_hvm_guest_init(void) xen_panic_handler_init(); - if (xen_feature(XENFEAT_hvm_callback_vector)) + if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector)) xen_have_vector_callback = 1; xen_hvm_smp_init(); @@ -220,6 +222,13 @@ static __init int xen_parse_nopv(char *arg) } early_param("xen_nopv", xen_parse_nopv); +static __init int xen_parse_no_vector_callback(char *arg) +{ + no_vector_callback = true; + return 0; +} +early_param("xen_no_vector_callback", xen_parse_no_vector_callback); + bool __init xen_hvm_need_lapic(void) { if (xen_pv_domain()) diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 6d4d8a5700b7..3e66feff524a 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1382,6 +1382,15 @@ asmlinkage __visible void __init xen_start_kernel(void) x86_init.mpparse.get_smp_config = x86_init_uint_noop; xen_boot_params_init_edd(); + +#ifdef CONFIG_ACPI + /* + * Disable selecting "Firmware First mode" for correctable + * memory errors, as this is the duty of the hypervisor to + * decide. + */ + acpi_disable_cmcff = 1; +#endif } if (!boot_params.screen_info.orig_video_isVGA) diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 0acba2c712ab..12fcb3858303 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -714,9 +714,12 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, for (i = 0; i < count; i++) { unsigned long mfn, pfn; + struct gnttab_unmap_grant_ref unmap[2]; + int rc; /* Do not add to override if the map failed. */ - if (map_ops[i].status) + if (map_ops[i].status != GNTST_okay || + (kmap_ops && kmap_ops[i].status != GNTST_okay)) continue; if (map_ops[i].flags & GNTMAP_contains_pte) { @@ -730,10 +733,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { - ret = -ENOMEM; - goto out; + if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) + continue; + + /* + * Signal an error for this slot. This in turn requires + * immediate unmapping. + */ + map_ops[i].status = GNTST_general_error; + unmap[0].host_addr = map_ops[i].host_addr, + unmap[0].handle = map_ops[i].handle; + map_ops[i].handle = ~0; + if (map_ops[i].flags & GNTMAP_device_map) + unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr; + else + unmap[0].dev_bus_addr = 0; + + if (kmap_ops) { + kmap_ops[i].status = GNTST_general_error; + unmap[1].host_addr = kmap_ops[i].host_addr, + unmap[1].handle = kmap_ops[i].handle; + kmap_ops[i].handle = ~0; + if (kmap_ops[i].flags & GNTMAP_device_map) + unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr; + else + unmap[1].dev_bus_addr = 0; } + + /* + * Pre-populate both status fields, to be recognizable in + * the log message below. + */ + unmap[0].status = 1; + unmap[1].status = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + unmap, 1 + !!kmap_ops); + if (rc || unmap[0].status != GNTST_okay || + unmap[1].status != GNTST_okay) + pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n", + rc, unmap[0].status, unmap[1].status); } out: @@ -754,17 +793,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { + if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + else ret = -EINVAL; - goto out; - } - - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } if (kunmap_ops) ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, - kunmap_ops, count); -out: + kunmap_ops, count) ?: ret; + return ret; } EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 802ee5bba66c..0cebe5db691d 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -92,6 +92,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void) cpu_bringup(); boot_init_stack_canary(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + prevent_tail_call_optimization(); } void xen_smp_intr_free_pv(unsigned int cpu) diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 6deb49094c60..d817b7c862a6 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -93,10 +93,20 @@ void xen_init_lock_cpu(int cpu) void xen_uninit_lock_cpu(int cpu) { + int irq; + if (!xen_pvspin) return; - unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); + /* + * When booting the kernel with 'mitigations=auto,nosmt', the secondary + * CPUs are not activated, and lock_kicker_irq is not initialized. + */ + irq = per_cpu(lock_kicker_irq, cpu); + if (irq == -1) + return; + + unbind_from_irqhandler(irq, NULL); per_cpu(lock_kicker_irq, cpu) = -1; kfree(per_cpu(irq_name, cpu)); per_cpu(irq_name, cpu) = NULL; diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index cd177772fe4d..2712e9155306 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -56,7 +56,7 @@ _ASM_EXTABLE(1b,2b) .endm -ENTRY(xen_iret) +SYM_CODE_START(xen_iret) /* test eflags for special cases */ testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) jnz hyper_iret @@ -122,6 +122,7 @@ xen_iret_end_crit: hyper_iret: /* put this out of line since its very rarely used */ jmp hypercall_page + __HYPERVISOR_iret * 32 +SYM_CODE_END(xen_iret) .globl xen_iret_start_crit, xen_iret_end_crit @@ -152,7 +153,7 @@ hyper_iret: * The only caveat is that if the outer eax hasn't been restored yet (i.e. * it's still on stack), we need to restore its value here. */ -ENTRY(xen_iret_crit_fixup) +SYM_CODE_START(xen_iret_crit_fixup) /* * Paranoia: Make sure we're really coming from kernel space. * One could imagine a case where userspace jumps into the @@ -179,4 +180,4 @@ ENTRY(xen_iret_crit_fixup) 2: ret -END(xen_iret_crit_fixup) +SYM_CODE_END(xen_iret_crit_fixup) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 3f7fe5a8c286..359c7bd01f14 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -70,7 +70,7 @@ */ #define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000) #define VMALLOC_END (VMALLOC_START + 0x07FEFFFF) -#define TLBTEMP_BASE_1 (VMALLOC_END + 1) +#define TLBTEMP_BASE_1 (VMALLOC_START + 0x08000000) #define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) #if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE #define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index f092cc3f4e66..956d4d47c6cd 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -55,6 +55,10 @@ struct thread_info { mm_segment_t addr_limit; /* thread address space */ unsigned long cpenable; +#if XCHAL_HAVE_EXCLUSIVE + /* result of the most recent exclusive store */ + unsigned long atomctl8; +#endif /* Allocate storage for extra user states and coprocessor states. */ #if XTENSA_HAVE_COPROCESSORS diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 3f80386f1883..5cb24a789e9e 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -300,7 +300,7 @@ strncpy_from_user(char *dst, const char *src, long count) return -EFAULT; } #else -long strncpy_from_user(char *dst, const char *src, long count); +long strncpy_from_user(char *dst, const char __user *src, long count); #endif /* diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 33a257b33723..dc5c83cad9be 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -93,6 +93,9 @@ int main(void) DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); +#if XCHAL_HAVE_EXCLUSIVE + DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8)); +#endif #if XTENSA_HAVE_COPROCESSORS DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 80828b95a51f..d956f87fcb09 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -108,37 +108,6 @@ .previous -/* - * coprocessor_flush(struct thread_info*, index) - * a2 a3 - * - * Save coprocessor registers for coprocessor 'index'. - * The register values are saved to or loaded from the coprocessor area - * inside the task_info structure. - * - * Note that this function doesn't update the coprocessor_owner information! - * - */ - -ENTRY(coprocessor_flush) - - /* reserve 4 bytes on stack to save a0 */ - abi_entry(4) - - s32i a0, a1, 0 - movi a0, .Lsave_cp_regs_jump_table - addx8 a3, a3, a0 - l32i a4, a3, 4 - l32i a3, a3, 0 - add a2, a2, a4 - beqz a3, 1f - callx0 a3 -1: l32i a0, a1, 0 - - abi_ret(4) - -ENDPROC(coprocessor_flush) - /* * Entry condition: * @@ -261,6 +230,39 @@ ENTRY(fast_coprocessor) ENDPROC(fast_coprocessor) + .text + +/* + * coprocessor_flush(struct thread_info*, index) + * a2 a3 + * + * Save coprocessor registers for coprocessor 'index'. + * The register values are saved to or loaded from the coprocessor area + * inside the task_info structure. + * + * Note that this function doesn't update the coprocessor_owner information! + * + */ + +ENTRY(coprocessor_flush) + + /* reserve 4 bytes on stack to save a0 */ + abi_entry(4) + + s32i a0, a1, 0 + movi a0, .Lsave_cp_regs_jump_table + addx8 a3, a3, a0 + l32i a4, a3, 4 + l32i a3, a3, 0 + add a2, a2, a4 + beqz a3, 1f + callx0 a3 +1: l32i a0, a1, 0 + + abi_ret(4) + +ENDPROC(coprocessor_flush) + .data ENTRY(coprocessor_owner) diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 9e3676879168..1f07876ea2ed 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -374,6 +374,11 @@ common_exception: s32i a2, a1, PT_LCOUNT #endif +#if XCHAL_HAVE_EXCLUSIVE + /* Clear exclusive access monitor set by interrupted code */ + clrex +#endif + /* It is now save to restore the EXC_TABLE_FIXUP variable. */ rsr a2, exccause @@ -1892,6 +1897,7 @@ ENTRY(system_call) mov a6, a2 call4 do_syscall_trace_enter + beqz a6, .Lsyscall_exit l32i a7, a2, PT_SYSCALL 1: @@ -1906,8 +1912,6 @@ ENTRY(system_call) addx4 a4, a7, a4 l32i a4, a4, 0 - movi a5, sys_ni_syscall; - beq a4, a5, 1f /* Load args: arg0 - arg5 are passed via regs. */ @@ -1927,6 +1931,7 @@ ENTRY(system_call) s32i a6, a2, PT_AREG2 bnez a3, 1f +.Lsyscall_exit: abi_ret(4) 1: @@ -2024,6 +2029,12 @@ ENTRY(_switch_to) s32i a3, a4, THREAD_CPENABLE #endif +#if XCHAL_HAVE_EXCLUSIVE + l32i a3, a5, THREAD_ATOMCTL8 + getex a3 + s32i a3, a4, THREAD_ATOMCTL8 +#endif + /* Flush register file. */ spill_registers_kernel diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index 9bae79f70301..86c9ba963155 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -401,7 +401,7 @@ static struct pmu xtensa_pmu = { .read = xtensa_pmu_read, }; -static int xtensa_pmu_setup(int cpu) +static int xtensa_pmu_setup(unsigned int cpu) { unsigned i; diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index b964f0b2d886..145742d70a9f 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } -void do_syscall_trace_enter(struct pt_regs *regs) +void do_syscall_trace_leave(struct pt_regs *regs); +int do_syscall_trace_enter(struct pt_regs *regs) { + if (regs->syscall == NO_SYSCALL) + regs->areg[2] = -ENOSYS; + if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + tracehook_report_syscall_entry(regs)) { + regs->areg[2] = -ENOSYS; regs->syscall = NO_SYSCALL; + return 0; + } + + if (regs->syscall == NO_SYSCALL) { + do_syscall_trace_leave(regs); + return 0; + } if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, syscall_get_nr(current, regs)); + + return 1; } void do_syscall_trace_leave(struct pt_regs *regs) diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index e0e1e1892b86..d08172138369 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -716,7 +716,8 @@ c_start(struct seq_file *f, loff_t *pos) static void * c_next(struct seq_file *f, void *v, loff_t *pos) { - return NULL; + ++*pos; + return c_start(f, pos); } static void diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 4092555828b1..24cf6972eace 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void) } EXPORT_SYMBOL(__xtensa_libgcc_window_spill); -unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_and_4); -unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) { BUG(); } diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index b27359e2a464..f769f3f66729 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -71,8 +71,10 @@ static inline void kmap_invalidate_coherent(struct page *page, kvaddr = TLBTEMP_BASE_1 + (page_to_phys(page) & DCACHE_ALIAS_MASK); + preempt_disable(); __invalidate_dcache_page_alias(kvaddr, page_to_phys(page)); + preempt_enable(); } } } @@ -157,6 +159,7 @@ void flush_dcache_page(struct page *page) if (!alias && !mapping) return; + preempt_disable(); virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(virt, phys); @@ -167,6 +170,7 @@ void flush_dcache_page(struct page *page) if (mapping) __invalidate_icache_page_alias(virt, phys); + preempt_enable(); } /* There shouldn't be an entry in the cache for this page anymore. */ @@ -200,8 +204,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long phys = page_to_phys(pfn_to_page(pfn)); unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK); + preempt_disable(); __flush_invalidate_dcache_page_alias(virt, phys); __invalidate_icache_page_alias(virt, phys); + preempt_enable(); } EXPORT_SYMBOL(local_flush_cache_page); @@ -228,11 +234,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) unsigned long phys = page_to_phys(page); unsigned long tmp; + preempt_disable(); tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); __invalidate_icache_page_alias(tmp, phys); + preempt_enable(); clear_bit(PG_arch_1, &page->flags); } @@ -266,7 +274,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + preempt_disable(); __flush_invalidate_dcache_page_alias(t, phys); + preempt_enable(); } /* Copy data */ @@ -281,9 +291,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + preempt_disable(); __flush_invalidate_dcache_range((unsigned long) dst, len); if ((vma->vm_flags & VM_EXEC) != 0) __invalidate_icache_page_alias(t, phys); + preempt_enable(); } else if ((vma->vm_flags & VM_EXEC) != 0) { __flush_dcache_range((unsigned long)dst,len); @@ -305,7 +317,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + preempt_disable(); __flush_invalidate_dcache_page_alias(t, phys); + preempt_enable(); } memcpy(dst, src, len); diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 86cd718e0380..342a1cfa48c5 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg) kfree(bfqg); } -void bfqg_and_blkg_get(struct bfq_group *bfqg) +static void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); @@ -625,6 +625,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, { struct bfq_entity *entity = &bfqq->entity; + /* + * Get extra reference to prevent bfqq from being freed in + * next possible expire or deactivate. + */ + bfqq->ref++; + /* If bfqq is empty, then bfq_bfqq_expire also invokes * bfq_del_bfqq_busy, thereby removing bfqq and its entity * from data structures related to current group. Otherwise we @@ -635,12 +641,6 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_bfqq_expire(bfqd, bfqd->in_service_queue, false, BFQQE_PREEMPTED); - /* - * get extra reference to prevent bfqq from being freed in - * next possible deactivate - */ - bfqq->ref++; - if (bfq_bfqq_busy(bfqq)) bfq_deactivate_bfqq(bfqd, bfqq, false, false); else if (entity->on_st) @@ -660,7 +660,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (!bfqd->in_service_queue && !bfqd->rq_in_driver) bfq_schedule_dispatch(bfqd); - /* release extra ref taken above */ + /* release extra ref taken above, bfqq may happen to be freed now */ bfq_put_queue(bfqq); } @@ -697,10 +697,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, if (entity->sched_data != &bfqg->sched_data) { bic_set_bfqq(bic, NULL, 0); - bfq_log_bfqq(bfqd, async_bfqq, - "bic_change_group: %p %d", - async_bfqq, async_bfqq->ref); - bfq_put_queue(async_bfqq); + bfq_release_process_ref(bfqd, async_bfqq); } } @@ -801,39 +798,53 @@ static void bfq_flush_idle_tree(struct bfq_service_tree *st) /** * bfq_reparent_leaf_entity - move leaf entity to the root_group. * @bfqd: the device data structure with the root group. - * @entity: the entity to move. + * @entity: the entity to move, if entity is a leaf; or the parent entity + * of an active leaf entity to move, if entity is not a leaf. */ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, - struct bfq_entity *entity) + struct bfq_entity *entity, + int ioprio_class) { - struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_queue *bfqq; + struct bfq_entity *child_entity = entity; + while (child_entity->my_sched_data) { /* leaf not reached yet */ + struct bfq_sched_data *child_sd = child_entity->my_sched_data; + struct bfq_service_tree *child_st = child_sd->service_tree + + ioprio_class; + struct rb_root *child_active = &child_st->active; + + child_entity = bfq_entity_of(rb_first(child_active)); + + if (!child_entity) + child_entity = child_sd->in_service_entity; + } + + bfqq = bfq_entity_to_bfqq(child_entity); bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); } /** - * bfq_reparent_active_entities - move to the root group all active - * entities. + * bfq_reparent_active_queues - move to the root group all active queues. * @bfqd: the device data structure with the root group. * @bfqg: the group to move from. - * @st: the service tree with the entities. + * @st: the service tree to start the search from. */ -static void bfq_reparent_active_entities(struct bfq_data *bfqd, - struct bfq_group *bfqg, - struct bfq_service_tree *st) +static void bfq_reparent_active_queues(struct bfq_data *bfqd, + struct bfq_group *bfqg, + struct bfq_service_tree *st, + int ioprio_class) { struct rb_root *active = &st->active; - struct bfq_entity *entity = NULL; + struct bfq_entity *entity; - if (!RB_EMPTY_ROOT(&st->active)) - entity = bfq_entity_of(rb_first(active)); - - for (; entity ; entity = bfq_entity_of(rb_first(active))) - bfq_reparent_leaf_entity(bfqd, entity); + while ((entity = bfq_entity_of(rb_first(active)))) + bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); if (bfqg->sched_data.in_service_entity) bfq_reparent_leaf_entity(bfqd, - bfqg->sched_data.in_service_entity); + bfqg->sched_data.in_service_entity, + ioprio_class); } /** @@ -865,13 +876,6 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { st = bfqg->sched_data.service_tree + i; - /* - * The idle tree may still contain bfq_queues belonging - * to exited task because they never migrated to a different - * cgroup from the one being destroyed now. - */ - bfq_flush_idle_tree(st); - /* * It may happen that some queues are still active * (busy) upon group destruction (if the corresponding @@ -884,7 +888,20 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) * There is no need to put the sync queues, as the * scheduler has taken no reference. */ - bfq_reparent_active_entities(bfqd, bfqg, st); + bfq_reparent_active_queues(bfqd, bfqg, st, i); + + /* + * The idle tree may still contain bfq_queues + * belonging to exited task because they never + * migrated to a different cgroup from the one being + * destroyed now. In addition, even + * bfq_reparent_active_queues() may happen to add some + * entities to the idle tree. It happens if, in some + * of the calls to bfq_bfqq_move() performed by + * bfq_reparent_active_queues(), the queue to move is + * empty and gets expired. + */ + bfq_flush_idle_tree(st); } __bfq_deactivate_entity(entity, false); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 48189ff88916..c19006d59b79 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2717,8 +2717,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) } } - -static void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) { /* @@ -2939,6 +2937,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, } bfqd->in_service_queue = bfqq; + bfqd->in_serv_last_pos = 0; } /* @@ -5892,18 +5891,6 @@ static void bfq_finish_requeue_request(struct request *rq) struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_data *bfqd; - /* - * Requeue and finish hooks are invoked in blk-mq without - * checking whether the involved request is actually still - * referenced in the scheduler. To handle this fact, the - * following two checks make this function exit in case of - * spurious invocations, for which there is nothing to do. - * - * First, check whether rq has nothing to do with an elevator. - */ - if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) - return; - /* * rq either is not associated with any icq, or is an already * requeued request that has not (yet) been re-inserted into @@ -6210,20 +6197,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) return bfqq; } -static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) +static void +bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) { - struct bfq_data *bfqd = bfqq->bfqd; enum bfqq_expiration reason; unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); - bfq_clear_bfqq_wait_request(bfqq); + /* + * Considering that bfqq may be in race, we should firstly check + * whether bfqq is in service before doing something on it. If + * the bfqq in race is not in service, it has already been expired + * through __bfq_bfqq_expire func and its wait_request flags has + * been cleared in __bfq_bfqd_reset_in_service func. + */ if (bfqq != bfqd->in_service_queue) { spin_unlock_irqrestore(&bfqd->lock, flags); return; } + bfq_clear_bfqq_wait_request(bfqq); + if (bfq_bfqq_budget_timeout(bfqq)) /* * Also here the queue can be safely expired @@ -6268,7 +6263,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) * early. */ if (bfqq) - bfq_idle_slice_timer_body(bfqq); + bfq_idle_slice_timer_body(bfqd, bfqq); return HRTIMER_NORESTART; } diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 1553a4e8f7ad..de98fdfe9ea1 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -950,6 +950,7 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, bool compensate, enum bfqq_expiration reason); void bfq_put_queue(struct bfq_queue *bfqq); void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); +void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq); void bfq_schedule_dispatch(struct bfq_data *bfqd); void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); @@ -979,7 +980,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); -void bfqg_and_blkg_get(struct bfq_group *bfqg); void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 44079147e396..05f0bf4a1144 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -536,9 +536,7 @@ static void bfq_get_entity(struct bfq_entity *entity) bfqq->ref++; bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, bfqq->ref); - } else - bfqg_and_blkg_get(container_of(entity, struct bfq_group, - entity)); + } } /** @@ -652,14 +650,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st, entity->on_st = false; st->wsum -= entity->weight; - if (is_in_service) - return; - - if (bfqq) + if (bfqq && !is_in_service) bfq_put_queue(bfqq); - else - bfqg_and_blkg_put(container_of(entity, struct bfq_group, - entity)); } /** diff --git a/block/bio-integrity.c b/block/bio-integrity.c index bf62c25cde8f..c9dc2b17ce25 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -24,6 +24,18 @@ void blk_flush_integrity(void) flush_workqueue(kintegrityd_wq); } +void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip) +{ + if (bs && mempool_initialized(&bs->bio_integrity_pool)) { + if (bip->bip_vec) + bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, + bip->bip_slab); + mempool_free(bip, &bs->bio_integrity_pool); + } else { + kfree(bip); + } +} + /** * bio_integrity_alloc - Allocate integrity payload and attach it to bio * @bio: bio to attach integrity metadata to @@ -75,7 +87,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, return bip; err: - mempool_free(bip, &bs->bio_integrity_pool); + __bio_integrity_free(bs, bip); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(bio_integrity_alloc); @@ -96,14 +108,7 @@ void bio_integrity_free(struct bio *bio) kfree(page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset); - if (bs && mempool_initialized(&bs->bio_integrity_pool)) { - bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab); - - mempool_free(bip, &bs->bio_integrity_pool); - } else { - kfree(bip); - } - + __bio_integrity_free(bs, bip); bio->bi_integrity = NULL; bio->bi_opf &= ~REQ_INTEGRITY; } @@ -278,7 +283,6 @@ bool bio_integrity_prep(struct bio *bio) if (ret == 0) { printk(KERN_ERR "could not attach integrity payload\n"); - kfree(buf); status = BLK_STS_RESOURCE; goto err_end_io; } diff --git a/block/bio.c b/block/bio.c index 94d697217887..9e45621d9fd9 100644 --- a/block/bio.c +++ b/block/bio.c @@ -305,7 +305,7 @@ static struct bio *__bio_chain_endio(struct bio *bio) { struct bio *parent = bio->bi_private; - if (!parent->bi_status) + if (bio->bi_status && !parent->bi_status) parent->bi_status = bio->bi_status; bio_put(bio); return parent; @@ -683,8 +683,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) { - phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + - bv->bv_offset + bv->bv_len - 1; + size_t bv_end = bv->bv_offset + bv->bv_len; + phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; phys_addr_t page_addr = page_to_phys(page); if (vec_end_addr + 1 != page_addr + off) @@ -693,9 +693,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, return false; *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); - if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page) - return false; - return true; + if (*same_page) + return true; + return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); } static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio, @@ -807,8 +807,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) + if (bio->bi_iter.bi_size > UINT_MAX - len) { + *same_page = false; return false; + } bv->bv_len += len; bio->bi_iter.bi_size += len; return true; @@ -1752,20 +1754,48 @@ defer: schedule_work(&bio_dirty_work); } -void update_io_ticks(struct hd_struct *part, unsigned long now) +void update_io_ticks(struct hd_struct *part, unsigned long now, bool end) { unsigned long stamp; again: stamp = READ_ONCE(part->stamp); - if (unlikely(stamp != now)) { - if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) { - __part_stat_add(part, io_ticks, 1); + if (unlikely(time_before(stamp, now))) { + unsigned long old; +again2: + if (likely((old = cmpxchg(&part->stamp, stamp, now)) == stamp)) { + __part_stat_add(part, io_ticks, end ? now - stamp : 1); + } else if(time_before(old, now)) { + stamp = old; + goto again2; + } + if (part->partno) { + part = &part_to_disk(part)->part0; + goto again; } } - if (part->partno) { - part = &part_to_disk(part)->part0; - goto again; +} + +void sync_io_ticks(struct hd_struct *part, bool busy) +{ + part_stat_lock(); + if(busy) { + /* count ticks till now as busy */ + update_io_ticks(part, jiffies, true); + } else { + /* advance stamp to previous jiffies */ + unsigned long prevj = jiffies - 1; + unsigned long stamp; +again: + stamp = READ_ONCE(part->stamp); + if (unlikely(time_before(stamp, prevj))) { + cmpxchg(&part->stamp, stamp, prevj); + if (part->partno) { + part = &part_to_disk(part)->part0; + goto again; + } + } } + part_stat_unlock(); } void generic_start_io_acct(struct request_queue *q, int op, @@ -1775,7 +1805,7 @@ void generic_start_io_acct(struct request_queue *q, int op, part_stat_lock(); - update_io_ticks(part, jiffies); + update_io_ticks(part, jiffies, false); part_stat_inc(part, ios[sgrp]); part_stat_add(part, sectors[sgrp], sectors); part_inc_in_flight(q, part, op_is_write(op)); @@ -1793,7 +1823,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op, part_stat_lock(); - update_io_ticks(part, now); + update_io_ticks(part, now, true); part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); part_stat_add(part, time_in_queue, duration); part_dec_in_flight(q, part, op_is_write(req_op)); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ba25f771e751..a3bf040dc6ce 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -721,7 +721,9 @@ static void blkcg_dkstats_seqf_print(struct blkcg *blkcg, struct hd_struct *hd, char buf[BDEVNAME_SIZE]; unsigned long rd_nsecs = blkcg_part_stat_read(blkcg, hd, nsecs[READ]); unsigned long wr_nsecs = blkcg_part_stat_read(blkcg, hd, nsecs[WRITE]); + struct request_queue *q = part_to_disk(hd)->queue; + sync_io_ticks(hd, blk_queue_quiesced(q) || part_in_flight(q, hd) > 0); seq_printf(seqf, "%4d %7d %s %lu %lu %lu " "%u %lu %lu %lu %u %u %u %u %u %u\n", MAJOR(part_devt(hd)), MINOR(part_devt(hd)), @@ -801,9 +803,8 @@ static int blkcg_dkstats_enable(struct cgroup_subsys_state *css, return 0; } -static int blkcg_dkstats_show(struct seq_file *sf, void *v) +static int blkcg_dkstats_show_comm(struct seq_file *sf, void *v, struct blkcg *blkcg) { - struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct class_dev_iter iter; struct device *dev; @@ -824,6 +825,18 @@ static int blkcg_dkstats_show(struct seq_file *sf, void *v) return 0; } +int blkcg_cgroupfs_dkstats_show(struct seq_file *m, void *v) +{ + struct blkcg *blkcg = css_to_blkcg(task_css(current, io_cgrp_id)); + return blkcg_dkstats_show_comm(m, v, blkcg); +} + +static int blkcg_dkstats_show(struct seq_file *sf, void *v) +{ + struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); + return blkcg_dkstats_show_comm(sf, v, blkcg); +} + const char *blkg_dev_name(struct blkcg_gq *blkg) { /* some drivers (floppy) instantiate a queue w/o disk registered */ @@ -1188,13 +1201,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, goto fail; } + if (radix_tree_preload(GFP_KERNEL)) { + blkg_free(new_blkg); + ret = -ENOMEM; + goto fail; + } + rcu_read_lock(); spin_lock_irq(&q->queue_lock); blkg = blkg_lookup_check(pos, pol, q); if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); - goto fail_unlock; + blkg_free(new_blkg); + goto fail_preloaded; } if (blkg) { @@ -1203,10 +1223,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, blkg = blkg_create(pos, q, new_blkg); if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); - goto fail_unlock; + goto fail_preloaded; } } + radix_tree_preload_end(); + if (pos == blkcg) goto success; } @@ -1216,6 +1238,8 @@ success: ctx->body = input; return 0; +fail_preloaded: + radix_tree_preload_end(); fail_unlock: spin_unlock_irq(&q->queue_lock); rcu_read_unlock(); @@ -1423,6 +1447,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css) */ void blkcg_destroy_blkgs(struct blkcg *blkcg) { + might_sleep(); + spin_lock_irq(&blkcg->lock); while (!hlist_empty(&blkcg->blkg_list)) { @@ -1430,14 +1456,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg) struct blkcg_gq, blkcg_node); struct request_queue *q = blkg->q; - if (spin_trylock(&q->queue_lock)) { - blkg_destroy(blkg); - spin_unlock(&q->queue_lock); - } else { + if (need_resched() || !spin_trylock(&q->queue_lock)) { + /* + * Given that the system can accumulate a huge number + * of blkgs in pathological cases, check to see if we + * need to rescheduling to avoid softlockup. + */ spin_unlock_irq(&blkcg->lock); - cpu_relax(); + cond_resched(); spin_lock_irq(&blkcg->lock); + continue; } + + blkg_destroy(blkg); + spin_unlock(&q->queue_lock); } spin_unlock_irq(&blkcg->lock); @@ -1565,13 +1597,15 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); - ret = blk_iolatency_init(q); - if (ret) - goto err_destroy_all; - ret = blk_throtl_init(q); if (ret) goto err_destroy_all; + + ret = blk_iolatency_init(q); + if (ret) { + blk_throtl_exit(q); + goto err_destroy_all; + } return 0; err_destroy_all: diff --git a/block/blk-core.c b/block/blk-core.c index 00a1d478ddc5..c651944cf79c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -551,6 +551,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) goto fail_stats; q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; + q->backing_dev_info->io_pages = VM_READAHEAD_PAGES; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; @@ -793,11 +794,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector) { char b[BDEVNAME_SIZE]; - printk(KERN_INFO "attempt to access beyond end of device\n"); - printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", - bio_devname(bio, b), bio->bi_opf, - (unsigned long long)bio_end_sector(bio), - (long long)maxsector); + pr_info_ratelimited("attempt to access beyond end of device\n" + "%s: rw=%d, want=%llu, limit=%llu\n", + bio_devname(bio, b), bio->bi_opf, + bio_end_sector(bio), maxsector); } #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -1390,7 +1390,7 @@ void blk_account_io_done(struct request *req, u64 now) part_stat_lock(); part = req->part; - update_io_ticks(part, jiffies); + update_io_ticks(part, jiffies, true); part_stat_inc(part, ios[sgrp]); part_stat_add(part, nsecs[sgrp], now - req->start_time_ns); part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns)); @@ -1432,7 +1432,7 @@ void blk_account_io_start(struct request *rq, bool new_io) rq->part = part; } - update_io_ticks(part, jiffies); + update_io_ticks(part, jiffies, false); part_stat_unlock(); } diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 5ed59ac6ae58..9df50fb507ca 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -84,6 +84,7 @@ static void ioc_destroy_icq(struct io_cq *icq) * making it impossible to determine icq_cache. Record it in @icq. */ icq->__rcu_icq_cache = et->icq_cache; + icq->flags |= ICQ_DESTROYED; call_rcu(&icq->__rcu_head, icq_free_icq_rcu); } @@ -212,15 +213,21 @@ static void __ioc_clear_queue(struct list_head *icq_list) { unsigned long flags; + rcu_read_lock(); while (!list_empty(icq_list)) { struct io_cq *icq = list_entry(icq_list->next, struct io_cq, q_node); struct io_context *ioc = icq->ioc; spin_lock_irqsave(&ioc->lock, flags); + if (icq->flags & ICQ_DESTROYED) { + spin_unlock_irqrestore(&ioc->lock, flags); + continue; + } ioc_destroy_icq(icq); spin_unlock_irqrestore(&ioc->lock, flags); } + rcu_read_unlock(); } /** diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 9a599cc28c29..ef287c33d6d9 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -469,7 +469,7 @@ struct ioc_gq { */ atomic64_t vtime; atomic64_t done_vtime; - atomic64_t abs_vdebt; + u64 abs_vdebt; u64 last_vtime; /* @@ -1145,7 +1145,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) struct iocg_wake_ctx ctx = { .iocg = iocg }; u64 margin_ns = (u64)(ioc->period_us * WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC; - u64 abs_vdebt, vdebt, vshortage, expires, oexpires; + u64 vdebt, vshortage, expires, oexpires; s64 vbudget; u32 hw_inuse; @@ -1155,18 +1155,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) vbudget = now->vnow - atomic64_read(&iocg->vtime); /* pay off debt */ - abs_vdebt = atomic64_read(&iocg->abs_vdebt); - vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse); + vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); if (vdebt && vbudget > 0) { u64 delta = min_t(u64, vbudget, vdebt); u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), - abs_vdebt); + iocg->abs_vdebt); atomic64_add(delta, &iocg->vtime); atomic64_add(delta, &iocg->done_vtime); - atomic64_sub(abs_delta, &iocg->abs_vdebt); - if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0)) - atomic64_set(&iocg->abs_vdebt, 0); + iocg->abs_vdebt -= abs_delta; } /* @@ -1222,12 +1219,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) u64 expires, oexpires; u32 hw_inuse; + lockdep_assert_held(&iocg->waitq.lock); + /* debt-adjust vtime */ current_hweight(iocg, NULL, &hw_inuse); - vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse); + vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); - /* clear or maintain depending on the overage */ - if (time_before_eq64(vtime, now->vnow)) { + /* + * Clear or maintain depending on the overage. Non-zero vdebt is what + * guarantees that @iocg is online and future iocg_kick_delay() will + * clear use_delay. Don't leave it on when there's no vdebt. + */ + if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) { blkcg_clear_delay(blkg); return false; } @@ -1261,9 +1264,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) { struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); struct ioc_now now; + unsigned long flags; + spin_lock_irqsave(&iocg->waitq.lock, flags); ioc_now(iocg->ioc, &now); iocg_kick_delay(iocg, &now, 0); + spin_unlock_irqrestore(&iocg->waitq.lock, flags); return HRTIMER_NORESTART; } @@ -1371,14 +1377,13 @@ static void ioc_timer_fn(struct timer_list *timer) * should have woken up in the last period and expire idle iocgs. */ list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { - if (!waitqueue_active(&iocg->waitq) && - !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg)) + if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && + !iocg_is_idle(iocg)) continue; spin_lock(&iocg->waitq.lock); - if (waitqueue_active(&iocg->waitq) || - atomic64_read(&iocg->abs_vdebt)) { + if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) { /* might be oversleeping vtime / hweight changes, kick */ iocg_kick_waitq(iocg, &now); iocg_kick_delay(iocg, &now, 0); @@ -1541,19 +1546,39 @@ skip_surplus_transfers: if (rq_wait_pct > RQ_WAIT_BUSY_PCT || missed_ppm[READ] > ppm_rthr || missed_ppm[WRITE] > ppm_wthr) { + /* clearly missing QoS targets, slow down vrate */ ioc->busy_level = max(ioc->busy_level, 0); ioc->busy_level++; } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 && missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 && missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) { - /* take action iff there is contention */ - if (nr_shortages && !nr_lagging) { + /* QoS targets are being met with >25% margin */ + if (nr_shortages) { + /* + * We're throttling while the device has spare + * capacity. If vrate was being slowed down, stop. + */ ioc->busy_level = min(ioc->busy_level, 0); - /* redistribute surpluses first */ - if (!nr_surpluses) + + /* + * If there are IOs spanning multiple periods, wait + * them out before pushing the device harder. If + * there are surpluses, let redistribution work it + * out first. + */ + if (!nr_lagging && !nr_surpluses) ioc->busy_level--; + } else { + /* + * Nobody is being throttled and the users aren't + * issuing enough IOs to saturate the device. We + * simply don't know how close the device is to + * saturation. Coast. + */ + ioc->busy_level = 0; } } else { + /* inside the hysterisis margin, we're good */ ioc->busy_level = 0; } @@ -1594,7 +1619,7 @@ skip_surplus_transfers: vrate_min, vrate_max); } - trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct, + trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages, nr_surpluses); @@ -1603,7 +1628,7 @@ skip_surplus_transfers: ioc->period_us * vrate * INUSE_MARGIN_PCT, 100); } else if (ioc->busy_level != prev_busy_level || nr_lagging) { trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), - &missed_ppm, rq_wait_pct, nr_lagging, + missed_ppm, rq_wait_pct, nr_lagging, nr_shortages, nr_surpluses); } @@ -1721,28 +1746,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) * tests are racy but the races aren't systemic - we only miss once * in a while which is fine. */ - if (!waitqueue_active(&iocg->waitq) && - !atomic64_read(&iocg->abs_vdebt) && + if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && time_before_eq64(vtime + cost, now.vnow)) { iocg_commit_bio(iocg, bio, cost); return; } /* - * We're over budget. If @bio has to be issued regardless, - * remember the abs_cost instead of advancing vtime. - * iocg_kick_waitq() will pay off the debt before waking more IOs. + * We activated above but w/o any synchronization. Deactivation is + * synchronized with waitq.lock and we won't get deactivated as long + * as we're waiting or has debt, so we're good if we're activated + * here. In the unlikely case that we aren't, just issue the IO. + */ + spin_lock_irq(&iocg->waitq.lock); + + if (unlikely(list_empty(&iocg->active_list))) { + spin_unlock_irq(&iocg->waitq.lock); + iocg_commit_bio(iocg, bio, cost); + return; + } + + /* + * We're over budget. If @bio has to be issued regardless, remember + * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay + * off the debt before waking more IOs. + * * This way, the debt is continuously paid off each period with the - * actual budget available to the cgroup. If we just wound vtime, - * we would incorrectly use the current hw_inuse for the entire - * amount which, for example, can lead to the cgroup staying - * blocked for a long time even with substantially raised hw_inuse. + * actual budget available to the cgroup. If we just wound vtime, we + * would incorrectly use the current hw_inuse for the entire amount + * which, for example, can lead to the cgroup staying blocked for a + * long time even with substantially raised hw_inuse. + * + * An iocg with vdebt should stay online so that the timer can keep + * deducting its vdebt and [de]activate use_delay mechanism + * accordingly. We don't want to race against the timer trying to + * clear them and leave @iocg inactive w/ dangling use_delay heavily + * penalizing the cgroup and its descendants. */ if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { - atomic64_add(abs_cost, &iocg->abs_vdebt); + iocg->abs_vdebt += abs_cost; if (iocg_kick_delay(iocg, &now, cost)) blkcg_schedule_throttle(rqos->q, (bio->bi_opf & REQ_SWAP) == REQ_SWAP); + spin_unlock_irq(&iocg->waitq.lock); return; } @@ -1759,20 +1805,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) * All waiters are on iocg->waitq and the wait states are * synchronized using waitq.lock. */ - spin_lock_irq(&iocg->waitq.lock); - - /* - * We activated above but w/o any synchronization. Deactivation is - * synchronized with waitq.lock and we won't get deactivated as - * long as we're waiting, so we're good if we're activated here. - * In the unlikely case that we are deactivated, just issue the IO. - */ - if (unlikely(list_empty(&iocg->active_list))) { - spin_unlock_irq(&iocg->waitq.lock); - iocg_commit_bio(iocg, bio, cost); - return; - } - init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); wait.wait.private = current; wait.bio = bio; @@ -1804,6 +1836,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, struct ioc_now now; u32 hw_inuse; u64 abs_cost, cost; + unsigned long flags; /* bypass if disabled or for root cgroup */ if (!ioc->enabled || !iocg->level) @@ -1823,15 +1856,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, iocg->cursor = bio_end; /* - * Charge if there's enough vtime budget and the existing request - * has cost assigned. Otherwise, account it as debt. See debt - * handling in ioc_rqos_throttle() for details. + * Charge if there's enough vtime budget and the existing request has + * cost assigned. */ if (rq->bio && rq->bio->bi_iocost_cost && - time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) + time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { iocg_commit_bio(iocg, bio, cost); - else - atomic64_add(abs_cost, &iocg->abs_vdebt); + return; + } + + /* + * Otherwise, account it as debt if @iocg is online, which it should + * be for the vast majority of cases. See debt handling in + * ioc_rqos_throttle() for details. + */ + spin_lock_irqsave(&iocg->waitq.lock, flags); + if (likely(!list_empty(&iocg->active_list))) { + iocg->abs_vdebt += abs_cost; + iocg_kick_delay(iocg, &now, cost); + } else { + iocg_commit_bio(iocg, bio, cost); + } + spin_unlock_irqrestore(&iocg->waitq.lock, flags); } static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) @@ -2001,7 +2047,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd) iocg->ioc = ioc; atomic64_set(&iocg->vtime, now.vnow); atomic64_set(&iocg->done_vtime, now.vnow); - atomic64_set(&iocg->abs_vdebt, 0); atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); INIT_LIST_HEAD(&iocg->active_list); iocg->hweight_active = HWEIGHT_WHOLE; @@ -2029,14 +2074,15 @@ static void ioc_pd_free(struct blkg_policy_data *pd) { struct ioc_gq *iocg = pd_to_iocg(pd); struct ioc *ioc = iocg->ioc; + unsigned long flags; if (ioc) { - spin_lock(&ioc->lock); + spin_lock_irqsave(&ioc->lock, flags); if (!list_empty(&iocg->active_list)) { propagate_active_weight(iocg, 0, 0); list_del_init(&iocg->active_list); } - spin_unlock(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); hrtimer_cancel(&iocg->waitq_timer); hrtimer_cancel(&iocg->delay_timer); diff --git a/block/blk-merge.c b/block/blk-merge.c index 48e6725b32ee..03959bfe961c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -154,20 +154,23 @@ static inline unsigned get_max_io_size(struct request_queue *q, if (max_sectors > start_offset) return max_sectors - start_offset; - return sectors & (lbs - 1); + return sectors & ~(lbs - 1); } -static unsigned get_max_segment_size(const struct request_queue *q, - unsigned offset) +static inline unsigned get_max_segment_size(const struct request_queue *q, + struct page *start_page, + unsigned long offset) { unsigned long mask = queue_segment_boundary(q); - /* default segment boundary mask means no boundary limit */ - if (mask == BLK_SEG_BOUNDARY_MASK) - return queue_max_segment_size(q); + offset = mask & (page_to_phys(start_page) + offset); - return min_t(unsigned long, mask - (mask & offset) + 1, - queue_max_segment_size(q)); + /* + * overflow may be triggered in case of zero page physical address + * on 32bit arch, use queue's max segment size when that happens. + */ + return min_not_zero(mask - offset + 1, + (unsigned long)queue_max_segment_size(q)); } /** @@ -201,7 +204,8 @@ static bool bvec_split_segs(const struct request_queue *q, unsigned seg_size = 0; while (len && *nsegs < max_segs) { - seg_size = get_max_segment_size(q, bv->bv_offset + total_len); + seg_size = get_max_segment_size(q, bv->bv_page, + bv->bv_offset + total_len); seg_size = min(seg_size, len); (*nsegs)++; @@ -366,6 +370,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq) switch (bio_op(rq->bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: + if (queue_max_discard_segments(rq->q) > 1) { + struct bio *bio = rq->bio; + + for_each_bio(bio) + nr_phys_segs++; + return nr_phys_segs; + } + return 1; case REQ_OP_WRITE_ZEROES: return 0; case REQ_OP_WRITE_SAME: @@ -404,7 +416,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q, while (nbytes > 0) { unsigned offset = bvec->bv_offset + total; - unsigned len = min(get_max_segment_size(q, offset), nbytes); + unsigned len = min(get_max_segment_size(q, bvec->bv_page, + offset), nbytes); struct page *page = bvec->bv_page; /* @@ -548,10 +561,17 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); +static inline unsigned int blk_rq_get_max_segments(struct request *rq) +{ + if (req_op(rq) == REQ_OP_DISCARD) + return queue_max_discard_segments(rq->q); + return queue_max_segments(rq->q); +} + static inline int ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) { - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) + if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) goto no_merge; if (blk_integrity_merge_bio(req->q, req, bio) == false) @@ -635,7 +655,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; - if (total_phys_segments > queue_max_segments(q)) + if (total_phys_segments > blk_rq_get_max_segments(req)) return 0; if (blk_integrity_merge_rq(q, req, next) == false) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index b3f2ba483992..121f4c1e0697 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -125,6 +125,9 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), QUEUE_FLAG_NAME(QUIESCED), + QUEUE_FLAG_NAME(PCI_P2PDMA), + QUEUE_FLAG_NAME(ZONE_RESETALL), + QUEUE_FLAG_NAME(RQ_ALLOC_TIME), }; #undef QUEUE_FLAG_NAME diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 74cedea56034..7620734d5542 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -77,6 +77,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 126021fc3a11..e81ca1bf6e10 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e && e->type->ops.requeue_request) + if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) e->type->ops.requeue_request(rq); } diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index a09ab0c3d074..5dafd7a8ec91 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj) struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); - cancel_delayed_work_sync(&hctx->run_work); - if (hctx->flags & BLK_MQ_F_BLOCKING) cleanup_srcu_struct(hctx->srcu); blk_free_flush_queue(hctx->fq); diff --git a/block/blk-mq.c b/block/blk-mq.c index a8c1a45cedde..da3fe704ade2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -105,7 +105,9 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, /* * index[0] counts the specific partition that was asked for. */ - if (rq->part == mi->part) + if (rq->part && blk_do_io_stat(rq) && + !(rq->rq_flags & RQF_FLUSH_SEQ) && + (!mi->part->partno || rq->part == mi->part)) mi->inflight[0]++; return true; @@ -128,7 +130,9 @@ static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, { struct mq_inflight *mi = priv; - if (rq->part == mi->part) + if (rq->part && blk_do_io_stat(rq) && + !(rq->rq_flags & RQF_FLUSH_SEQ) && + (!mi->part->partno || rq->part == mi->part)) mi->inflight[rq_data_dir(rq)]++; return true; @@ -829,10 +833,10 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { /* - * If we find a request that is inflight and the queue matches, + * If we find a request that isn't idle and the queue matches, * we know the queue is busy. Return false to stop the iteration. */ - if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { + if (blk_mq_request_started(rq) && rq->q == hctx->queue) { bool *busy = priv; *busy = true; @@ -1205,6 +1209,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ +static void blk_mq_handle_dev_resource(struct request *rq, + struct list_head *list) +{ + struct request *next = + list_first_entry_or_null(list, struct request, queuelist); + + /* + * If an I/O scheduler has been configured and we got a driver tag for + * the next request already, free it. + */ + if (next) + blk_mq_put_driver_tag(next); + + list_add(&rq->queuelist, list); + __blk_mq_requeue_request(rq); +} + /* * Returns true if we did some work AND can potentially do more. */ @@ -1216,6 +1237,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, bool no_tag = false; int errors, queued; blk_status_t ret = BLK_STS_OK; + bool no_budget_avail = false; if (list_empty(list)) return false; @@ -1232,8 +1254,11 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, rq = list_first_entry(list, struct request, queuelist); hctx = rq->mq_hctx; - if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) + if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) { + blk_mq_put_driver_tag(rq); + no_budget_avail = true; break; + } if (!blk_mq_get_driver_tag(rq)) { /* @@ -1272,17 +1297,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ret = q->mq_ops->queue_rq(hctx, &bd); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { - /* - * If an I/O scheduler has been configured and we got a - * driver tag for the next request already, free it - * again. - */ - if (!list_empty(list)) { - nxt = list_first_entry(list, struct request, queuelist); - blk_mq_put_driver_tag(nxt); - } - list_add(&rq->queuelist, list); - __blk_mq_requeue_request(rq); + blk_mq_handle_dev_resource(rq, list); break; } @@ -1316,6 +1331,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another @@ -1338,13 +1362,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, * * If driver returns BLK_STS_RESOURCE and SCHED_RESTART * bit is set, run queue after a delay to avoid IO stalls - * that could otherwise occur if the queue is idle. + * that could otherwise occur if the queue is idle. We'll do + * similar if we couldn't get budget and SCHED_RESTART is set. */ needs_restart = blk_mq_sched_needs_restart(hctx); if (!needs_restart || (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) blk_mq_run_hw_queue(hctx, true); - else if (needs_restart && (ret == BLK_STS_RESOURCE)) + else if (needs_restart && (ret == BLK_STS_RESOURCE || + no_budget_avail)) blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); blk_mq_update_dispatch_busy(hctx, true); @@ -1867,7 +1893,8 @@ insert: if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_request_bypass_insert(rq, false, run_queue); + blk_mq_sched_insert_request(rq, false, run_queue, false); + return BLK_STS_OK; } @@ -2491,18 +2518,6 @@ static void blk_mq_map_swqueue(struct request_queue *q) * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { - hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i]; - /* unmapped hw queue can be remapped after CPU topo changed */ - if (!set->tags[hctx_idx] && - !__blk_mq_alloc_rq_map(set, hctx_idx)) { - /* - * If tags initialization fail for some hctx, - * that hctx won't be brought online. In this - * case, remap the current ctx to hctx[0] which - * is guaranteed to always have tags allocated - */ - set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0; - } ctx = per_cpu_ptr(q->queue_ctx, i); for (j = 0; j < set->nr_maps; j++) { @@ -2511,6 +2526,18 @@ static void blk_mq_map_swqueue(struct request_queue *q) HCTX_TYPE_DEFAULT, i); continue; } + hctx_idx = set->map[j].mq_map[i]; + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[hctx_idx] && + !__blk_mq_alloc_rq_map(set, hctx_idx)) { + /* + * If tags initialization fail for some hctx, + * that hctx won't be brought online. In this + * case, remap the current ctx to hctx[0] which + * is guaranteed to always have tags allocated + */ + set->map[j].mq_map[i] = 0; + } hctx = blk_mq_map_queue_type(q, j, i); ctx->hctxs[j] = hctx; @@ -3277,7 +3304,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) nr_hw_queues = nr_cpu_ids; - if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) + if (nr_hw_queues < 1) + return; + if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) return; list_for_each_entry(q, &set->tag_list, tag_set_list) @@ -3302,8 +3331,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, prev_nr_hw_queues = set->nr_hw_queues; set->nr_hw_queues = nr_hw_queues; - blk_mq_update_queue_map(set); fallback: + blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); if (q->nr_hw_queues != set->nr_hw_queues) { diff --git a/block/blk-pm.c b/block/blk-pm.c index 1adc1cd748b4..2ccf88dbaa40 100644 --- a/block/blk-pm.c +++ b/block/blk-pm.c @@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q) WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); + spin_lock_irq(&q->queue_lock); + q->rpm_status = RPM_SUSPENDING; + spin_unlock_irq(&q->queue_lock); + /* * Increase the pm_only counter before checking whether any * non-PM blk_queue_enter() calls are in progress to avoid that any @@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q) /* Switch q_usage_counter back to per-cpu mode. */ blk_mq_unfreeze_queue(q); - spin_lock_irq(&q->queue_lock); - if (ret < 0) + if (ret < 0) { + spin_lock_irq(&q->queue_lock); + q->rpm_status = RPM_ACTIVE; pm_runtime_mark_last_busy(q->dev); - else - q->rpm_status = RPM_SUSPENDING; - spin_unlock_irq(&q->queue_lock); + spin_unlock_irq(&q->queue_lock); - if (ret) blk_clear_pm_only(q); + } return ret; } diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index 656460636ad3..ff87998e5a43 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data, if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) return; - prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); - has_sleeper = !wq_has_single_sleeper(&rqw->wait); + has_sleeper = !prepare_to_wait_exclusive_with_ret(&rqw->wait, &data.wq, + TASK_UNINTERRUPTIBLE); do { /* The memory barrier in set_task_state saves us here. */ if (data.got_token) diff --git a/block/blk-settings.c b/block/blk-settings.c index 5119211390a3..812563e56de2 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -474,6 +474,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) } EXPORT_SYMBOL(blk_queue_stack_limits); +static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) +{ + sectors = round_down(sectors, lbs >> SECTOR_SHIFT); + if (sectors < PAGE_SIZE >> SECTOR_SHIFT) + sectors = PAGE_SIZE >> SECTOR_SHIFT; + return sectors; +} + /** * blk_stack_limits - adjust queue_limits for stacked devices * @t: the stacking driver limits (top device) @@ -587,6 +595,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ret = -1; } + t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); + t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); + t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); + /* Discard alignment and granularity */ if (b->discard_granularity) { alignment = queue_limit_discard_alignment(b, start); @@ -665,6 +677,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", top, bottom); } + + t->backing_dev_info->io_pages = + t->limits.max_sectors >> (PAGE_SHIFT - 9); } EXPORT_SYMBOL(disk_stack_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index f69e0249f2da..c5eea3ced180 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -916,9 +916,16 @@ static void __blk_release_queue(struct work_struct *work) blk_free_queue_stats(q->stats); - if (queue_is_mq(q)) + if (queue_is_mq(q)) { + struct blk_mq_hw_ctx *hctx; + int i; + cancel_delayed_work_sync(&q->requeue_work); + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->run_work); + } + blk_exit_queue(q); blk_queue_free_zone_bitmaps(q); diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 4bc5f260248a..b17c094cb977 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -202,32 +202,14 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL_GPL(blkdev_report_zones); -/* - * Special case of zone reset operation to reset all zones in one command, - * useful for applications like mkfs. - */ -static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask) -{ - struct bio *bio = bio_alloc(gfp_mask, 0); - int ret; - - /* across the zones operations, don't need any sectors */ - bio_set_dev(bio, bdev); - bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0); - - ret = submit_bio_wait(bio); - bio_put(bio); - - return ret; -} - static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev, + sector_t sector, sector_t nr_sectors) { if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) return false; - if (nr_sectors != part_nr_sects_read(bdev->bd_part)) + if (sector || nr_sectors != part_nr_sects_read(bdev->bd_part)) return false; /* * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is @@ -271,9 +253,6 @@ int blkdev_reset_zones(struct block_device *bdev, /* Out of range */ return -EINVAL; - if (blkdev_allow_reset_all_zones(bdev, nr_sectors)) - return __blkdev_reset_all_zones(bdev, gfp_mask); - /* Check alignment (handle eventual smaller last zone) */ zone_sectors = blk_queue_zone_sectors(q); if (sector & (zone_sectors - 1)) @@ -285,17 +264,24 @@ int blkdev_reset_zones(struct block_device *bdev, blk_start_plug(&plug); while (sector < end_sector) { - bio = blk_next_bio(bio, 0, gfp_mask); - bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); - bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); + /* + * Special case for the zone reset operation that reset all + * zones, this is useful for applications like mkfs. + */ + if (blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) { + bio->bi_opf = REQ_OP_ZONE_RESET_ALL; + break; + } + + bio->bi_opf = REQ_OP_ZONE_RESET; + bio->bi_iter.bi_sector = sector; sector += zone_sectors; /* This may take a while, so be nice to others */ cond_resched(); - } ret = submit_bio_wait(bio); diff --git a/block/bsg.c b/block/bsg.c index 833c44b3d458..0d012efef527 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -157,8 +157,10 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) return PTR_ERR(rq); ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); - if (ret) + if (ret) { + blk_put_request(rq); return ret; + } rq->timeout = msecs_to_jiffies(hdr.timeout); if (!rq->timeout) diff --git a/block/genhd.c b/block/genhd.c index ecc45a34e581..f87494163379 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -222,14 +222,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) part = rcu_dereference(ptbl->part[piter->idx]); if (!part) continue; + get_device(part_to_dev(part)); + piter->part = part; if (!part_nr_sects_read(part) && !(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && - piter->idx == 0)) + piter->idx == 0)) { + put_device(part_to_dev(part)); + piter->part = NULL; continue; + } - get_device(part_to_dev(part)); - piter->part = part; piter->idx += inc; break; } @@ -634,10 +637,8 @@ static void register_disk(struct device *parent, struct gendisk *disk, disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); - if (disk->flags & GENHD_FL_HIDDEN) { - dev_set_uevent_suppress(ddev, 0); + if (disk->flags & GENHD_FL_HIDDEN) return; - } /* No minors to use for partitions */ if (!disk_part_scan_enabled(disk)) @@ -835,6 +836,56 @@ void del_gendisk(struct gendisk *disk) } EXPORT_SYMBOL(del_gendisk); +static int hide_disk(struct gendisk *disk) +{ + struct device *ddev = disk_to_dev(disk); + struct disk_part_iter piter; + struct hd_struct *part; + int ret; + + ret = device_hide(ddev); + if (ret) + return ret; + + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); + while ((part = disk_part_iter_next(&piter))) + device_hide(part_to_dev(part)); + disk_part_iter_exit(&piter); + + if (!sysfs_deprecated) + sysfs_remove_link(block_depr, dev_name(ddev)); + + return ret; +} + +static int unhide_disk(struct gendisk *disk) +{ + struct device *ddev = disk_to_dev(disk); + struct disk_part_iter piter; + struct hd_struct *part; + int ret; + + ret = device_unhide(ddev); + if (ret) + return ret; + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) + device_unhide(part_to_dev(part)); + disk_part_iter_exit(&piter); + + if (!sysfs_deprecated) { + if (sysfs_create_link(block_depr, &ddev->kobj, + kobject_name(&ddev->kobj))) + pr_warn("%s: failed to restore /sys/block link\n", + disk->disk_name); + } + + return ret; +} + + /* sysfs access to bad-blocks list. */ static ssize_t disk_badblocks_show(struct device *dev, struct device_attribute *attr, @@ -1180,6 +1231,31 @@ static ssize_t disk_discard_alignment_show(struct device *dev, return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); } +static ssize_t disk_hiddens_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", device_is_hidden(dev)); +} + +static ssize_t disk_hiddens_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct gendisk *disk = dev_to_disk(dev); + bool hide; + int ret; + + ret = kstrtobool(buf, &hide); + if (ret) + return ret; + + if (hide != device_is_hidden(dev)) + ret = hide ? hide_disk(disk) : unhide_disk(disk); + + return ret ?: len; +} + static DEVICE_ATTR(range, 0444, disk_range_show, NULL); static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL); static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL); @@ -1192,6 +1268,8 @@ static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); +static DEVICE_ATTR(hiddens, S_IRUGO | S_IWUSR, disk_hiddens_show, + disk_hiddens_store); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store); @@ -1220,6 +1298,7 @@ static struct attribute *disk_attrs[] = { #ifdef CONFIG_FAIL_IO_TIMEOUT &dev_attr_fail_timeout.attr, #endif + &dev_attr_hiddens.attr, NULL }; @@ -1381,6 +1460,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); while ((hd = disk_part_iter_next(&piter))) { inflight = part_in_flight(gp->queue, hd); + sync_io_ticks(hd, inflight > 0 || blk_queue_quiesced(gp->queue)); seq_printf(seqf, "%4d %7d %s " "%lu %lu %lu %u " "%lu %lu %lu %u " diff --git a/block/ioctl.c b/block/ioctl.c index 15a0eb80ada9..ce16477b7b2e 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -204,8 +204,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, uint64_t range[2]; uint64_t start, len; struct request_queue *q = bdev_get_queue(bdev); - struct address_space *mapping = bdev->bd_inode->i_mapping; - + int err; if (!(mode & FMODE_WRITE)) return -EBADF; @@ -226,7 +225,11 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, if (start + len > i_size_read(bdev->bd_inode)) return -EINVAL; - truncate_inode_pages_range(mapping, start, start + len - 1); + + err = truncate_bdev_range(bdev, mode, start, start + len - 1); + if (err) + return err; + return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, flags); } @@ -235,8 +238,8 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, unsigned long arg) { uint64_t range[2]; - struct address_space *mapping; uint64_t start, end, len; + int err; if (!(mode & FMODE_WRITE)) return -EBADF; @@ -258,8 +261,9 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, return -EINVAL; /* Invalidate the page cache, including dirty pages */ - mapping = bdev->bd_inode->i_mapping; - truncate_inode_pages_range(mapping, start, end); + err = truncate_bdev_range(bdev, mode, start, end); + if (err) + return err; return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); diff --git a/block/partition-generic.c b/block/partition-generic.c index aee643ce13d1..ee320fbb19d8 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -123,6 +123,7 @@ ssize_t part_stat_show(struct device *dev, unsigned int inflight; inflight = part_in_flight(q, p); + sync_io_ticks(p, inflight > 0 || blk_queue_quiesced(q)); return sprintf(buf, "%8lu %8lu %8llu %8u " "%8lu %8lu %8llu %8u " @@ -379,6 +380,9 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, if (err) goto out_put; + if (device_is_hidden(ddev)) + device_hide(pdev); + err = -ENOMEM; p->holder_dir = kobject_create_and_add("holders", &pdev->kobj); if (!p->holder_dir) diff --git a/certs/blacklist.c b/certs/blacklist.c index ec00bf337eb6..025a41de28fd 100644 --- a/certs/blacklist.c +++ b/certs/blacklist.c @@ -153,7 +153,7 @@ static int __init blacklist_init(void) KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH, KEY_ALLOC_NOT_IN_QUOTA | - KEY_FLAG_KEEP, + KEY_ALLOC_SET_KEEP, NULL, NULL); if (IS_ERR(blacklist_keyring)) panic("Can't allocate system blacklist keyring\n"); diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 3d8e53010cda..4a2e91baabde 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -128,21 +129,15 @@ EXPORT_SYMBOL_GPL(af_alg_release); void af_alg_release_parent(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - unsigned int nokey = ask->nokey_refcnt; - bool last = nokey && !ask->refcnt; + unsigned int nokey = atomic_read(&ask->nokey_refcnt); sk = ask->parent; ask = alg_sk(sk); - local_bh_disable(); - bh_lock_sock(sk); - ask->nokey_refcnt -= nokey; - if (!last) - last = !--ask->refcnt; - bh_unlock_sock(sk); - local_bh_enable(); + if (nokey) + atomic_dec(&ask->nokey_refcnt); - if (last) + if (atomic_dec_and_test(&ask->refcnt)) sock_put(sk); } EXPORT_SYMBOL_GPL(af_alg_release_parent); @@ -152,7 +147,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); - struct sockaddr_alg *sa = (void *)uaddr; + struct sockaddr_alg_new *sa = (void *)uaddr; const struct af_alg_type *type; void *private; int err; @@ -160,7 +155,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (sock->state == SS_CONNECTED) return -EINVAL; - if (addr_len < sizeof(*sa)) + BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) != + offsetof(struct sockaddr_alg, salg_name)); + BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa)); + + if (addr_len < sizeof(*sa) + 1) return -EINVAL; /* If caller uses non-allowed flag, return error. */ @@ -168,7 +167,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EINVAL; sa->salg_type[sizeof(sa->salg_type) - 1] = 0; - sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0; + sa->salg_name[addr_len - sizeof(*sa) - 1] = 0; type = alg_get_type(sa->salg_type); if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) { @@ -187,7 +186,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EBUSY; lock_sock(sk); - if (ask->refcnt | ask->nokey_refcnt) + if (atomic_read(&ask->refcnt)) goto unlock; swap(ask->type, type); @@ -236,7 +235,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, int err = -EBUSY; lock_sock(sk); - if (ask->refcnt) + if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) goto unlock; type = ask->type; @@ -301,12 +300,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (err) goto unlock; - if (nokey || !ask->refcnt++) + if (atomic_inc_return_relaxed(&ask->refcnt) == 1) sock_hold(sk); - ask->nokey_refcnt += nokey; + if (nokey) { + atomic_inc(&ask->nokey_refcnt); + atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); + } alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; - alg_sk(sk2)->nokey_refcnt = nokey; newsock->ops = type->ops; newsock->state = SS_CONNECTED; @@ -639,6 +640,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -738,9 +740,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -758,7 +761,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -847,10 +852,17 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { - err = -EINVAL; - goto unlock; + if (ctx->init && !ctx->more) { + if (ctx->used) { + err = -EINVAL; + goto unlock; + } + + pr_info_once( + "%s sent an empty control message without MSG_MORE.\n", + current->comm); } + ctx->init = true; if (init) { ctx->enc = enc; diff --git a/crypto/algapi.c b/crypto/algapi.c index bb8329e49956..fff52bc9d97d 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -374,7 +374,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval) err = wait_for_completion_killable(&larval->completion); WARN_ON(err); if (!err) - crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval); + crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); out: crypto_larval_kill(&larval->alg); diff --git a/crypto/algboss.c b/crypto/algboss.c index a62149d6c839..2d41e67532c0 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -188,8 +188,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (IS_ERR(thread)) goto err_put_larval; - wait_for_completion_interruptible(&larval->completion); - return NOTIFY_STOP; err_put_larval: diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index eb1910b6d434..e62d735ed266 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); skcipher_request_set_sync_tfm(skreq, null_tfm); - skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(skreq, src, dst, len, NULL); @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } @@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, areq->outlen = outlen; aead_request_set_callback(&areq->cra_u.aead_req, - CRYPTO_TFM_REQ_MAY_BACKLOG, + CRYPTO_TFM_REQ_MAY_SLEEP, af_alg_async_cb, areq); err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) + if (err == -EINPROGRESS) return -EIOCBQUEUED; sock_put(sk); } else { /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, + CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &ctx->wait); err = crypto_wait_req(ctx->enc ? @@ -384,7 +385,7 @@ static int aead_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -396,11 +397,8 @@ static int aead_check_key(struct socket *sock) if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -561,12 +559,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; - ctx->aead_assoclen = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 178f4cd75ef1..8673ac8828e9 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -301,7 +301,7 @@ static int hash_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -313,11 +313,8 @@ static int hash_check_key(struct socket *sock) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index e2c8ab408bed..30069a92a9b2 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } @@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, return PTR_ERR(areq); /* convert iovecs of output buffers into RX SGL */ - err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); + err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); if (err) goto free; - /* Process only as much RX buffers for which we have TX data */ - if (len > ctx->used) - len = ctx->used; - /* * If more buffers are to be expected to be processed, process only * full block size buffers. @@ -127,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) + if (err == -EINPROGRESS) return -EIOCBQUEUED; sock_put(sk); @@ -215,7 +211,7 @@ static int skcipher_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -227,11 +223,8 @@ static int skcipher_check_key(struct socket *sock) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -340,6 +333,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; + memset(ctx, 0, len); ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), GFP_KERNEL); @@ -347,16 +341,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/api.c b/crypto/api.c index eda0c56b8615..c71d1485541c 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -568,7 +568,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) { struct crypto_alg *alg; - if (unlikely(!mem)) + if (IS_ERR_OR_NULL(mem)) return; alg = tfm->__crt_alg; diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c index 5154e280ada2..08baa10a254b 100644 --- a/crypto/asymmetric_keys/asym_tpm.c +++ b/crypto/asymmetric_keys/asym_tpm.c @@ -370,7 +370,7 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf) memcpy(cur, e, sizeof(e)); cur += sizeof(e); /* Zero parameters to satisfy set_pub_key ABI. */ - memset(cur, 0, SETKEY_PARAMS_SIZE); + memzero_explicit(cur, SETKEY_PARAMS_SIZE); return cur - buf; } diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d7f43d4ea925..e5fae4e838c0 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -119,6 +119,7 @@ static int software_key_query(const struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index a8248f8e2777..85328522c5ca 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -154,7 +154,7 @@ int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) EXPORT_SYMBOL_GPL(cast6_setkey); /*forward quad round*/ -static inline void Q(u32 *block, u8 *Kr, u32 *Km) +static inline void Q(u32 *block, const u8 *Kr, const u32 *Km) { u32 I; block[2] ^= F1(block[3], Kr[0], Km[0]); @@ -164,7 +164,7 @@ static inline void Q(u32 *block, u8 *Kr, u32 *Km) } /*reverse quad round*/ -static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) +static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km) { u32 I; block[3] ^= F1(block[0], Kr[3], Km[3]); @@ -173,13 +173,14 @@ static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) block[2] ^= F1(block[3], Kr[0], Km[0]); } -void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) +void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { + const struct cast6_ctx *c = ctx; const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; - u32 *Km; - u8 *Kr; + const u32 *Km; + const u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); @@ -211,13 +212,14 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } -void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) +void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { + const struct cast6_ctx *c = ctx; const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; - u32 *Km; - u8 *Kr; + const u32 *Km; + const u8 *Kr; block[0] = be32_to_cpu(src[0]); block[1] = be32_to_cpu(src[1]); diff --git a/crypto/drbg.c b/crypto/drbg.c index b6929eb5f565..04379ca624cd 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1294,8 +1294,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), GFP_KERNEL); - if (!drbg->prev) + if (!drbg->prev) { + ret = -ENOMEM; goto fini; + } drbg->fips_primed = false; } diff --git a/crypto/ecdh.c b/crypto/ecdh.c index bd599053a8c4..46570b517175 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, struct ecdh params; unsigned int ndigits; - if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0 || + params.key_size > sizeof(ctx->private_key)) return -EINVAL; ndigits = ecdh_supported_curve(params.curve_id); @@ -53,12 +54,13 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, ctx->private_key); - if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, - (const u64 *)params.key, params.key_size) < 0) - return -EINVAL; - memcpy(ctx->private_key, params.key, params.key_size); + if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, + ctx->private_key, params.key_size) < 0) { + memzero_explicit(ctx->private_key, params.key_size); + return -EINVAL; + } return 0; } diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index 66fcb2ea8154..fca63b559f65 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c @@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len, if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH) return -EINVAL; + if (unlikely(len < secret.len)) + return -EINVAL; + ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); if (secret.len != crypto_ecdh_key_len(params)) diff --git a/crypto/lrw.c b/crypto/lrw.c index be829f6afc8e..3d40e1f32bea 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -289,7 +289,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_skcipher(ctx->child); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -401,7 +401,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) diff --git a/crypto/rng.c b/crypto/rng.c index 1e21231f71c9..608d6ce1440a 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -34,7 +34,6 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) u8 *buf = NULL; int err; - crypto_stats_get(alg); if (!seed && slen) { buf = kmalloc(slen, GFP_KERNEL); if (!buf) @@ -46,6 +45,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) seed = buf; } + crypto_stats_get(alg); err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); crypto_stats_rng_seed(alg, err); out: diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 56fa665a4f01..492c1d0bfe06 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -449,8 +449,9 @@ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) } EXPORT_SYMBOL_GPL(serpent_setkey); -void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) +void __serpent_encrypt(const void *c, u8 *dst, const u8 *src) { + const struct serpent_ctx *ctx = c; const u32 *k = ctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; @@ -514,8 +515,9 @@ static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) __serpent_encrypt(ctx, dst, src); } -void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) +void __serpent_decrypt(const void *c, u8 *dst, const u8 *src) { + const struct serpent_ctx *ctx = c; const u32 *k = ctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 83ad0b1fab30..0cece1f883eb 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -198,8 +198,8 @@ static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -468,8 +468,8 @@ static int test_aead_jiffies(struct aead_request *req, int enc, return ret; } - printk("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount, secs, (u64)bcount * blen); return 0; } @@ -759,8 +759,8 @@ static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -1196,8 +1196,8 @@ static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -1434,8 +1434,8 @@ static int test_acipher_jiffies(struct skcipher_request *req, int enc, return ret; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount, secs, (u64)bcount * blen); return 0; } diff --git a/crypto/xts.c b/crypto/xts.c index ab117633d64e..9d72429f666e 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -328,7 +328,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_cipher(ctx->tweak); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -439,7 +439,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) diff --git a/dist/Makefile b/dist/Makefile new file mode 100644 index 000000000000..f2d2ac0ae0af --- /dev/null +++ b/dist/Makefile @@ -0,0 +1,388 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# A simple and clean build system initially inspired by Fedora ARK Kernel and Tencent Linux Kernel public. +# + +TOPDIR := $(shell git rev-parse --show-toplevel) +ifeq ("$(TOPDIR)", "") + TOPDIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))/..) +endif + +DISTPATH = dist +DISTDIR := $(TOPDIR)/$(DISTPATH) +ifeq ("$(abspath $(DISTDIR)/Makefile)", "$(lastword $(MAKEFILE_LIST))") +$(error Can't detect Makefile, aborting) +endif + +### Downstream marker, update this when forking to another downstream +KDIST = stable +### NOTE: This export only applies to sub targets, for global commands, export KDIST explicitly in shell. +export KDIST := $(KDIST) + +### Vendor mark +VENDOR = opencloudos +VENDOR_CAPITALIZED = OpenCloudOS +URL = https://gitee.com/OpenCloudOS/OpenCloudOS-Kernel + +### Get native arch for binary build by default +NATIVE_ARCH := $(shell uname -m | sed -e 's/amd64/x86_64/;s/arm64/aarch64/;s/*86$$/x86/') + +###### Build parameters, change them with `make =` ###### +# When building binary package, which arch to build against +ARCH := $(NATIVE_ARCH) +# ARCH to be covered by spec file +SPEC_ARCH := x86_64 aarch64 riscv64 +# Which kernel config to use, this build system supports multiple config targets, +# Get the available config by scripts/ls-config.sh +CONFIG := generic-release +# Build a specific tag/commit with `make TAG=` or `make COMMIT=` +TAG := $(COMMIT) +# Extra RPM flags +RPMFLAGS := +####### Build parameters end ###### + +### Basic variables +DISTCONFIGDIR = $(DISTDIR)/configs +DISTWORKDIR = $(DISTDIR)/workdir + +DISTTEMPLATES = $(wildcard $(DISTDIR)/templates/*) +DISTCONFIGS = $(shell find $(DISTDIR)/configs -type f) +DISTSCRIPTS = $(wildcard $(DISTDIR)/scripts/*) +DISTSOURCES = $(wildcard $(DISTDIR)/sources/*) +DISTKABIS = $(wildcard $(DISTDIR)/kabi/*) +DISTFILES = $(DISTDIR) $(DISTTEMPLATES) $(DISTCONFIGS) $(DISTSCRIPTS) $(DISTSOURCES) $(DISTKABIS) + +### Force unify arch name, so Kbuild, and other Makefiles can work seamlessly +# BUILD_ARCH is for RPM, Dist make +BUILD_ARCH := $(ARCH) +override BUILD_ARCH := $(shell echo $(BUILD_ARCH) | sed -e 's/amd64/x86_64/;s/arm64/aarch64/;s/*86$$/x86/') +# ARCH is for Kbuild +override ARCH := $(shell echo $(BUILD_ARCH) | sed -e 's/amd64/x86_64/;s/aarch64/arm64/;s/*86$$/x86/') + +### Check if TAG is valid +TAG := HEAD +# If COMMIT is specified, this line won't take effect +COMMIT := $(shell git rev-parse --verify --quiet $(TAG)) +ifeq ($(COMMIT),) +$(error Invalid git reference, tag '$(TAG)' commit '$(COMMIT)', aborting) +endif +# Prefer using TAG, if not set, use COMMIT +TAG := $(COMMIT) +GITREF := $(TAG) + +### Build files +KFULLVER := $(shell KDIST=$(KDIST) $(DISTDIR)/scripts/get-version.sh $(GITREF) vr) +CONFIGFILE := $(shell KDIST=$(KDIST) $(DISTDIR)/scripts/ls-config-files.sh $(CONFIG)) +WORKDIRS = $(DISTWORKDIR) $(RPM_TOPDIR) $(RPM_BUILDDIR) $(RPM_RPMDIR) $(RPM_SOURCEDIR) $(RPM_SPECDIR) $(RPM_SRCRPMDIR) $(RPM_BUILDROOTDIR) +SPECFILE = $(RPM_SOURCEDIR)/kernel.spec +TARFILE = $(RPM_SOURCEDIR)/kernel-$(KFULLVER).tar +TESTPATCH = $(RPM_SOURCEDIR)/linux-kernel-test.patch +ifeq ($(CONFIGFILE),) +$(error Invalid CONFIG value '$(CONFIG)', no matching config target found) +endif + +### RPM files +RPM_TOPDIR := $(DISTDIR)/rpm +RPM_BUILDDIR := $(RPM_TOPDIR)/BUILD +RPM_RPMDIR := $(RPM_TOPDIR)/RPMS +RPM_SOURCEDIR := $(RPM_TOPDIR)/SOURCES +RPM_SPECDIR := $(RPM_TOPDIR)/SPECS +RPM_SRCRPMDIR := $(RPM_TOPDIR)/SRPMS +RPM_BUILDROOTDIR := $(RPM_TOPDIR)/BUILDROOT + +### RPM build options +# All params enabled by default (except kABI check, see below), DEFAULT_ENABLED overrides DEFAULT_DISABLE. +DEFAULT_DISABLED= +DEFAULT_ENABLED= + +## A few shortcut for commonly used params: +# Disable KABI check by default +KABI=0 +ifeq ($(KABI), 0) +override DEFAULT_DISABLED := kabichk $(DEFAULT_DISABLED) +endif + +# Enabled module sign by default +MODSIGN=1 +ifeq ($(MODSIGN), 0) +override DEFAULT_DISABLED := modsign $(DEFAULT_DISABLED) +endif + +# Allow to skip RPM dependency check +NODEP=0 +ifeq ($(NODEP), 1) +override RPMFLAGS := --nodeps $(RPMFLAGS) +endif + +# Crossbuild +ifneq ($(BUILD_ARCH),$(NATIVE_ARCH)) +RPMCROSSFLAGS = --with crossbuild --target $(BUILD_ARCH) --define "_cross_compile $(BUILD_ARCH)-linux-gnu-" +else +RPMCROSSFLAGS = --without crossbuild +endif + +# For re-distribute to another distro +ifneq ($(DIST),) +override RPMFLAGS := --define "dist .$(DIST)" $(RPMFLAGS) +$(info "NOTE: DIST is set, building for another distro $(DIST)") +$(info " You shoudn't do this unless you know what you are doing.") +endif + +# Always make sure workdir exists +$(shell mkdir -p $(WORKDIRS)) + +default: dist-help + +$(TARFILE): + @echo "Generating kernel source tar: $(TARFILE)" + @cd $(TOPDIR); git archive $(GITREF) $(TOPDIR) --format=tar --prefix=kernel-$(KFULLVER)/ --output $(TARFILE) + +dist-tarball: $(TARFILE) + @echo "$(TARFILE)" + +$(CONFIGFILE): $(DISTFILES) + @echo "Generating kernel config style '$(CONFIG)'" + @$(DISTDIR)/scripts/gen-configs.sh "$(CONFIG)" "$(GITREF)" + +dist-configs: $(CONFIGFILE) +dist-config: dist-configs + rm -f $(TOPDIR)/.config + cp $(RPM_SOURCEDIR)/$(CONFIG).$(BUILD_ARCH).config $(TOPDIR)/.config + @printf "\033[0;32mDefault kernel config copied as $(TOPDIR)/.config\033[0m\n" + +# TODO: Build from a unclean tree is not working yet +# which can't be detected by make. +# $(TESTPATCH): always-rebuild +# @git diff --no-renames HEAD -- ":(exclude)$(DISTDIR)" > $(TESTPATCH) +# @[ -s "$(TESTPATCH)" ] && echo "Building from a unclean tree" || : + +# TODO: Remove always-rebuild - currently these targets depend on git worktree or variables, +.PHONY: always-rebuild +$(SPECFILE): always-rebuild + @echo "Generating kernel RPM spec: $(SPECFILE)" + @$(DISTDIR)/scripts/gen-spec.sh \ + --gitref "$(GITREF)" \ + --build-arch "$(SRPM_ARCH)" \ + --kernel-config "$(CONFIG)" \ + --set-default-disabled "$(DEFAULT_DISABLED)" \ + --set-default-enabled "$(DEFAULT_ENABLED)" \ + > $(SPECFILE) + @grep -A2 "# == Package options ==" $(SPECFILE) | cut -c3- + +dist-specfile: $(SPECFILE) dist-configs + @echo "$(SPECFILE)" + +dist-sources: dist-configs $(TARFILE) $(DISTSOURCES) $(DISTKABIS) $(SPECFILE) + @cp $(DISTSOURCES) $(DISTKABIS) $(RPM_SOURCEDIR) + +define DO_RPMBUILD +@echo "=== DISTBUILD ===" +@echo "Building kernel: $(shell rpmspec -q --qf "%{name}-%{version}-%{release}\n" --srpm $(SPECFILE))" +@echo "Kernel uname-r: $(shell rpmspec -q --provides $(SPECFILE) | grep kernel-uname-r | awk -F ' = ' '{print $$2}')" +@echo "Config style: $(CONFIG)" +@echo "RPM build flags: $(1)" +@echo "=== RPMBULID ===" +rpmbuild \ + --define '_topdir $(RPM_TOPDIR)' \ + --define '_builddir $(RPM_BUILDDIR)' \ + --define '_rpmdir $(RPM_RPMDIR)' \ + --define '_sourcedir $(RPM_SOURCEDIR)' \ + --define '_specdir $(RPM_SPECDIR)' \ + --define '_srcrpmdir $(RPM_SRCRPMDIR)' \ + --define '_buildrootdir $(RPM_BUILDROOTDIR)' \ + $(SPECFILE) $(1) +endef + +dist-srpm: dist-sources + $(call DO_RPMBUILD,-bs --nodeps --rmsource --rmspec $(RPMFLAGS)) + +dist: dist-rpm +dist-rpms: dist-rpm +dist-rpm: dist-sources + $(call DO_RPMBUILD,-bb $(RPMCROSSFLAGS) --rmsource --rmspec $(RPMFLAGS)) + +dist-prep: dist-sources + $(call DO_RPMBUILD,-bp --nodeps $(RPMCROSSFLAGS) $(RPMFLAGS)) + +dist-new-release: dist-new-maj-release + +dist-new-maj-release: + @$(DISTDIR)/scripts/make-release.sh --maj-release + +dist-new-sub-release: + @$(DISTDIR)/scripts/make-release.sh --sub-release + +dist-clean: + @for i in $(RPM_TOPDIR)/* $(DISTWORKDIR); do \ + echo Cleaning up $$i; \ + rm -rf $$i/*; \ + rm -rf $$i/.* 2>/dev/null; \ + done; :; + +BUILDDEPS=$(shell rpmspec -q --buildrequires $(SPECFILE) | cut -d ' ' -f 1) +MISSINGDEPS=$(shell echo "$(BUILDDEPS)" | xargs -n1 echo | while read -r _d; do rpm -q --whatprovides "$$_d" >/dev/null || echo "$$_d"; done) +dist-check-buildrequires: dist-specfile + @if [ -n "$(MISSINGDEPS)" ]; then \ + echo "Error: Build dependency packages missing, please install: $(MISSINGDEPS)"; \ + echo "Hint: You can try run \`make dist-install-buildrequires\` to fix this."; \ + exit 1; \ + fi; + +dist-install-buildrequires: dist-specfile + @if [ -n "$(MISSINGDEPS)" ]; then \ + echo "Installing kernel build dependency '$(MISSINGDEPS)' using yum..."; \ + echo "Missing dependency packages: '$(MISSINGDEPS)...'"; \ + if [ -x /usr/bin/yum ]; then \ + echo "Trying to install..."; \ + echo "$(MISSINGDEPS)" | sudo xargs yum install -y && exit 0; \ + else \ + echo "Yum is not available for current user."; \ + fi; \ + echo "Error: Intallation failed."; \ + exit 1; \ + fi + +dist-check-requires: dist-check-buildrequires + @if [ ! -x /usr/bin/python3 ]; then \ + echo "ERROR: Python 3 is required." ; \ + exit 1; \ + fi + @if [ ! -x /usr/bin/git ]; then \ + echo "ERROR: Git is required." ; \ + exit 1; \ + fi + +dist-format-config: + @$(DISTDIR)/scripts/format-configs.sh +dist-format-configs: dist-format-config + +dist-check-new-config: + @echo "Checking for unset Kconfig..." + @$(DISTDIR)/scripts/check-configs.sh check-new-configs $(CONFIG) +dist-check-new-configs: dist-check-new-config + +dist-fix-new-config: + @echo "Fixing unset Kconfig issue..." + @$(DISTDIR)/scripts/check-configs.sh check-new-configs --autofix $(CONFIG) +dist-fix-new-configs: dist-fix-new-config + +dist-check-dup-config: + @echo "Checking for duplicated Kconfig..." + @$(DISTDIR)/scripts/check-configs.sh check-dup-configs $(CONFIG) +dist-check-dup-configs: dist-check-dup-config + +dist-fix-dup-config: + @echo "Fixing duplicated Kconfig issue..." + @$(DISTDIR)/scripts/check-configs.sh check-dup-configs --autofix $(CONFIG) +dist-fix-dup-configs: dist-fix-dup-config + +dist-check-diff-config: + @echo "Checking for duplicated Kconfig..." + @$(DISTDIR)/scripts/check-configs.sh check-diff-configs $(CONFIG) +dist-check-diff-configs: dist-check-diff-config + +dist-check-config: dist-check-new-config dist-check-dup-config dist-check-diff-config +dist-check-configs: dist-check-config + +dist-check-commit: + @echo "Checking for new commits..." + @$(DISTDIR)/scripts/check-commits.sh +dist-check-commits: dist-check-commit + +dist-check-tag: + @echo "Checking tag of '$(TAG)' ..." + @$(DISTDIR)/scripts/check-tag.sh '$(TAG)' + +dist-check-kabi: dist-sources + @echo "Checking kABI for $(BUILD_ARCH)..." + $(call DO_RPMBUILD,-bi $(RPMCROSSFLAGS) \ + --without doc --without headers --without perf --without tools --without bpftool \ + --without debuginfo --without selftest --without modsign \ + --with kabichk \ + ) + +dist-update-kabi: KABI_WORKDIR:=$(shell mktemp -d $(DISTWORKDIR)/kabi.XXXX) +dist-update-kabi: RPM_BUILDDIR:=$(KABI_WORKDIR) +dist-update-kabi: dist-sources dist-check-buildrequires + @echo "Updating kABI for $(BUILD_ARCH)..." + $(call DO_RPMBUILD,-bc $(RPMCROSSFLAGS) \ + --without doc --without headers --without perf --without tools --without bpftool \ + --without debuginfo --without selftest --without modsign --without kabichk \ + ) + MODSYM=$$(find $(KABI_WORKDIR) -maxdepth 3 -type f -name 'Module.symvers'); \ + if [ -f "$$MODSYM" ]; then \ + echo "Using $$MODSYM as kABI reference file."; \ + $(DISTDIR)/scripts/helper/update-kabi.sh \ + $(DISTDIR)/kabi/Module.kabi_$(BUILD_ARCH) $$MODSYM > $(KABI_WORKDIR)/Module.kabi.updated; \ + cp $(KABI_WORKDIR)/Module.kabi.updated $(DISTDIR)/kabi/Module.kabi_$(BUILD_ARCH); \ + else \ + echo "Failed to find Module.symvers."; \ + exit 1; \ + fi + +# Do a fast check +dist-check: dist-check-commit dist-check-new-config +dist-help: + @echo 'This helps you to manage, release, and develop $(VENDOR_CAPITALIZED) Linux kernel.' + @echo 'Use below make targets as sub-command:' + @echo + @echo 'NOTE: Before you submit any patch, please see the "Sanity check" targets and at lease run:' + @echo '`make dist-check`' + @echo 'And ensure there is no error.' + @echo + @echo 'For building a kernel RPM distrobution package:' + @echo ' dist, dist-rpms - Alias to dist-rpm.' + @echo ' dist-rpm - Create the binary RPMS for the kernel and put it under:' + @echo ' $(RPM_RPMDIR)' + @echo ' dist-srpm - Create a source RPM and put it under:' + @echo ' $(RPM_SRCRPMDIR)' + @echo ' dist-prep - Prep the kernel dist build source code under:' + @echo ' $(RPM_BUILDDIR)' + @echo + @echo 'Available params (most of these params are usable for all sub-commands besides dist-rpm/dist-srpm):' + @echo ' ARCH="$(ARCH)"' + @echo ' Target ARCH used for binary/RPM build.' + @echo ' SPEC_ARCH="$(SPEC_ARCH)"' + @echo ' Targer ARCH coverted by spec/SRPM build.' + @echo ' TAG="$(TAG)" (or COMMIT="$(COMMIT)")' + @echo ' Specify a git tag or commit, and this Makefile will build the kernel from that version.' + @echo ' RPMFLAGS="$(RPMFLAGS)"' + @echo ' Extra RPM flags to be passed to rpmbuild for RPM bulding related commands.' + @echo ' CONFIG="$(CONFIG)"' + @echo ' Which kernel config to use, $(VENDOR_CAPITALIZED) build system supports multiple config targets,' + @echo ' Avaiable targets:' + @$(DISTDIR)/scripts/ls-config-targets.sh | xargs -n3 printf " %s %s %s\n" + @echo + @echo 'To make a new kernel version release:' + @echo ' dist-new-release - Update changelog, increase release number and tag a new commit properly.' + @echo ' dist-new-sub-release - Same as dist-new-release but increase the sub release number (eg. 5.18.0-1, 5.18.0-1.1, 5.18.0-1.2, ...).' + @echo + @echo 'Build prepare:' + @echo ' dist-check-requires - Check build time and dist targets package dependency.' + @echo ' dist-check-buildrequires - Check build time package dependency.' + @echo ' dist-install-buildrequires - Check and automatically install build time dependency, requires root.' + @echo + @echo 'Sanity check:' + @echo ' dist-check-commit - Check commit message and patch.' + @printf " \033[1;33m* Please at least ensure this check doesn't raise any error before submitting a patch.\033[0m\n" + @echo ' dist-check-diff-config - Check for invalid configs. If a CONFIG_XXX=y is set in config file but gone after make oldconfig,' + @echo ' it it considered a invalid config. There are many potential resons causing this, changed or missing' + @echo ' config dependency, deprecated config, auto-select configs, this tool will also provide some hint.' + @echo ' dist-check-dup-config - Check for duplicated configs, ' + @echo ' dist-check-new-config - Check for unset configs. If any config is not set, kbuild will prompt during built time.' + @echo ' This helper will check for unset configs so you can set them properly in predefined config file.' + @echo 'Code clean up:' + @echo ' dist-fix-new-config - Same as dist-check-new-configs but automatically set unset configs to its default value and' + @echo ' update config files.' + @echo ' dist-fix-dup-config - Automatically remove duplicated configs.' + @echo " dist-format-config - Sort and simplify the base config files, won\'t change the final config output." + @printf ' \033[1;33m* Please run this before submitting any config change.\033[0m\n' + @echo ' dist-clean - Clean up directories under:' + @echo ' $(DISTDIR)' + @echo + @echo 'Configuration targets:' + @echo ' dist-configs - Generate dist config files, using config matrix in:' + @echo ' $(DISTDIR)/config/' + @echo ' dist-config - Generate config, and override .config file with the config corresponding' + @echo ' to arch $(BUILD_ARCH)' + @echo diff --git a/dist/README b/dist/README new file mode 100644 index 000000000000..a171f5f91eac --- /dev/null +++ b/dist/README @@ -0,0 +1,10 @@ +RPM based dist Makefile and build system for the Linux kernel. + +Run `make dist-help` to see the usage. + +Authors: +Kairui Song +Katrin Zhou +Kaixu Xia + +Bundle with the Linux Kernel, licensed under the GPLv2 diff --git a/dist/configs/00base/defconfig/aarch64.config b/dist/configs/00base/defconfig/aarch64.config new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/dist/configs/00base/defconfig/default.config b/dist/configs/00base/defconfig/default.config new file mode 100644 index 000000000000..2c8761954e63 --- /dev/null +++ b/dist/configs/00base/defconfig/default.config @@ -0,0 +1,8 @@ +CONFIG_BPF_SYSCALL=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +CONFIG_DEBUG_KERNEL=y +CONFIG_IKCONFIG=m +CONFIG_IKCONFIG_PROC=y +CONFIG_MODULES=y +CONFIG_MODVERSIONS=y diff --git a/dist/configs/00base/defconfig/x86_64.config b/dist/configs/00base/defconfig/x86_64.config new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/dist/configs/00base/generic/aarch64.config b/dist/configs/00base/generic/aarch64.config new file mode 120000 index 000000000000..884abfae5e4f --- /dev/null +++ b/dist/configs/00base/generic/aarch64.config @@ -0,0 +1 @@ +../../../../arch/arm64/configs/oc.config \ No newline at end of file diff --git a/dist/configs/00base/generic/default.config b/dist/configs/00base/generic/default.config new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/dist/configs/00base/generic/x86_64.config b/dist/configs/00base/generic/x86_64.config new file mode 120000 index 000000000000..a0fd2d466818 --- /dev/null +++ b/dist/configs/00base/generic/x86_64.config @@ -0,0 +1 @@ +../../../../arch/x86/configs/oc.config \ No newline at end of file diff --git a/dist/configs/50variant/debug/aarch64.config b/dist/configs/50variant/debug/aarch64.config new file mode 100644 index 000000000000..88c8ed1ce096 --- /dev/null +++ b/dist/configs/50variant/debug/aarch64.config @@ -0,0 +1,17 @@ +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_DEBUG_VM is not set +CONFIG_DEVMEM=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_KASAN=y +CONFIG_KASAN_GENERIC=y +# CONFIG_KASAN_HW_TAGS is not set +CONFIG_KASAN_INLINE=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +# CONFIG_LOCK_STAT is not set +CONFIG_MAILBOX_TEST=m +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PERCPU_TEST=m +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_SPI_DEBUG=y +CONFIG_WQ_WATCHDOG=y diff --git a/dist/configs/50variant/debug/default.config b/dist/configs/50variant/debug/default.config new file mode 100644 index 000000000000..243704601619 --- /dev/null +++ b/dist/configs/50variant/debug/default.config @@ -0,0 +1,128 @@ +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_LOCALVERSION="+debug" +CONFIG_ACPI_CONFIGFS=m +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ATH10K_DEBUG=y +CONFIG_ATH10K_TRACING=y +CONFIG_ATH_DEBUG=y +# CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION is not set +CONFIG_CAN_DEBUG_DEVICES=y +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CRYPTO_DEV_CCP_DEBUGFS=y +CONFIG_CSD_LOCK_WAIT_DEBUG=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_DEBUG_IRQFLAGS=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=40000 +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_DEBUG_PAGE_REF=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_DEBUG_PREEMPT=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_VM_PGTABLE=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +CONFIG_DMABUF_DEBUG=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +CONFIG_EDAC_DEBUG=y +CONFIG_EXT4_DEBUG=y +CONFIG_FAILSLAB=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_FAIL_IO_TIMEOUT=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_MMC_REQUEST=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAIL_SUNRPC=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_FSCACHE_OBJECT_LIST=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +CONFIG_I2C_GPIO_FAULT_INJECTOR=y +CONFIG_IOMMU_DEBUGFS=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +CONFIG_KASAN_KUNIT_TEST=m +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_VMALLOC=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_KPROBE_EVENT_GEN_TEST=m +CONFIG_LATENCYTOP=y +CONFIG_LOCKDEP_BITS=16 +CONFIG_LOCKDEP_CHAINS_BITS=17 +CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 +CONFIG_LOCKDEP_STACK_TRACE_BITS=19 +CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_LOCK_STAT=y +CONFIG_MAC80211_DEBUGFS=y +CONFIG_MAC80211_MESSAGE_TRACING=y +CONFIG_MMIOTRACE=y +CONFIG_NET_DEV_REFCNT_TRACKER=y +CONFIG_NET_NS_REFCNT_TRACKER=y +CONFIG_NFP_DEBUG=y +CONFIG_NOUVEAU_DEBUG_MMU=y +CONFIG_NOUVEAU_DEBUG_PUSH=y +CONFIG_PAGE_TABLE_CHECK=y +CONFIG_PAGE_TABLE_CHECK_ENFORCED=y +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PERCPU_STATS=y +CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_PM_TEST_SUSPEND=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PROVE_LOCKING=y +CONFIG_PTDUMP_DEBUGFS=y +CONFIG_QUOTA_DEBUG=y +CONFIG_RANDOM32_SELFTEST=y +CONFIG_RC_LOOPBACK=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +CONFIG_RTW89_DEBUGFS=y +CONFIG_RTW89_DEBUGMSG=y +CONFIG_SCF_TORTURE_TEST=m +CONFIG_SND_CTL_VALIDATION=y +CONFIG_SND_DEBUG=y +CONFIG_SND_JACK_INJECTION_DEBUG=y +CONFIG_SND_PCM_XRUN_DEBUG=y +CONFIG_SND_SOC_SOF_DEBUG_PROBES=y +CONFIG_SND_SOC_SOF_HDA_PROBES=y +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SYNTH_EVENT_GEN_TEST=m +CONFIG_TEST_FPU=m +CONFIG_TEST_LIST_SORT=m +CONFIG_TEST_MIN_HEAP=m +CONFIG_TEST_STRING_HELPERS=m +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_XFS_WARN=y +CONFIG_ZRAM_MEMORY_TRACKING=y diff --git a/dist/configs/50variant/debug/x86_64.config b/dist/configs/50variant/debug/x86_64.config new file mode 100644 index 000000000000..f981bef2062c --- /dev/null +++ b/dist/configs/50variant/debug/x86_64.config @@ -0,0 +1,9 @@ +CONFIG_HYPERV_TESTING=y +CONFIG_KASAN=y +CONFIG_KASAN_GENERIC=y +CONFIG_KASAN_INLINE=y +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_DEBUG_FPU=y diff --git a/dist/configs/50variant/release/default.config b/dist/configs/50variant/release/default.config new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/dist/kabi/Module.kabi_aarch64 b/dist/kabi/Module.kabi_aarch64 new file mode 120000 index 000000000000..97f2669b56ff --- /dev/null +++ b/dist/kabi/Module.kabi_aarch64 @@ -0,0 +1 @@ +../../package/arm/Module.kabi_aarch64 \ No newline at end of file diff --git a/dist/kabi/Module.kabi_riscv64 b/dist/kabi/Module.kabi_riscv64 new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/dist/kabi/Module.kabi_x86_64 b/dist/kabi/Module.kabi_x86_64 new file mode 120000 index 000000000000..407d6f410843 --- /dev/null +++ b/dist/kabi/Module.kabi_x86_64 @@ -0,0 +1 @@ +../../package/default/Module.kabi_x86_64 \ No newline at end of file diff --git a/dist/scripts/check-commits.sh b/dist/scripts/check-commits.sh new file mode 100755 index 000000000000..77edada62593 --- /dev/null +++ b/dist/scripts/check-commits.sh @@ -0,0 +1,323 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 +# +# Check kernel commit messages + +# shellcheck source=./lib.sh +. "$(dirname "$(realpath "$0")")/lib.sh" + +usage() +{ + cat << EOF +check-commit-msg.sh ... + + --extra-author Who is triggering this check, check for corresponding signed-off-by in commit body + For example, if this script is triggered by a Merge Request, the one who submitted + the Merge Request should also sign all commits within the MR. + + Can be also enabled by setting EXTRA_AUTHOR= in environmental variable. + + --gen-report Generate error report under corrent directory for each commit being checked + in current directory. + + Can be also enabled by setting GEN_REPORT=1 in environmental variable. + + --output-dir Specify where the report should be put if '--gen-report' is used + An directory path is expected. + + Defaults to current directory. + + --vendor-only Author should be limited to vendor only. + (Git commit email address must end with @$VENDOR.com). +EOF +} + +if ! [[ -x "$TOPDIR/scripts/checkpatch.pl" ]]; then + die "This command requires scripts/checkpatch.pl" +fi + +COMMITS=() +HEAD_COMMIT= +OUTPUT_DIR=$(pwd) +VENDOR_ONLY= +while [[ $# -gt 0 ]]; do + case $1 in + --extra-author ) + EXTRA_AUTHOR=$2 + shift 2 + ;; + --output-dir ) + OUTPUT_DIR=$2 + shift 2 + ;; + --gen-report ) + GEN_REPORT=1 + shift + ;; + --vendor-only ) + VENDOR_ONLY=1 + shift + ;; + -* ) + usage + exit 1 + ;; + * ) + COMMITS+=( "$1" ) + shift + esac +done + +if [[ -z "${COMMITS[*]}" ]]; then + echo "Checking all commits that are ahead of remote tracking branch." + + if ! UPSTREAM_BASE=$(git -C "$repo" merge-base HEAD "@{u}" 2>/dev/null); then + warn "Can't find a valid upstream, will only check HEAD commit." + COMMITS=HEAD + else + if [[ $(git rev-list "$UPSTREAM_BASE..HEAD" --count) -eq 0 ]]; then + COMMITS="HEAD" + else + COMMITS="$UPSTREAM_BASE..HEAD" + fi + fi +fi + +is_valid_commit() { + case $1 in + *[!0-9A-Fa-f]* | "" ) + return 1 + ;; + * ) + if [ "${#1}" -lt 12 ] || [ "${#1}" -gt 41 ]; then + return 1 + fi + ;; + esac + + return 0 +} + +# Check if a commit id is valid upstream commit id +# $1: commit id to check +is_valid_upstream_commit() { + local commit=$1 + local upstream_repo=( + "torvalds https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=<>" + "stable https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/patch/?id=<>" + "tip https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/patch/?id=<>" + ) + local repo repo_name repo_url commit_url + + for repo in "${upstream_repo[@]}"; do + repo_name=${repo%% *} + repo_url=${repo#* } + commit_url=${repo_url/<>/$commit} + if curl --fail --silent --output /dev/null "$commit_url"; then + echo "$commit_url" + return 0 + fi + done + + return 1 +} + +# TODO(katinzhou): 需要测试。且能支持本地调用检查commit msg是否合规 +# Check a commit for all potential issues +# returns number of issues found +check_commit() { + local ret=0 + local commit=$1 + local commit_msg=$2 + + local author=$(git log --pretty=format:"%an" "$commit" -1) + local author_email=$(git log --pretty=format:"%ae" "$commit" -1) + local committer=$(git log --pretty=format:"%cn" "$commit" -1) + local committer_email=$(git log --pretty=format:"%ce" "$commit" -1) + local commit_body=$(git log --pretty=format:"%b" "$commit" -1) + local commit_summary=$(git log --pretty=format:"%s" "$commit" -1) + + # Ignore merge commit + if [ "$(git show --no-patch --format="%P" "$commit" | wc -w)" -ne 1 ]; then + echo "Found merge commit $commit" + echo "NOTICE: This checking routine only follows one parent commit." + return 0 + fi + + local ignore_line="ERROR: Please use git commit description style 'commit <12+ chars of sha1> (\"\")'" + + local patch_content + if ! patch_content=$(git format-patch --stdout -1 "$commit"); then + echo "ERROR: FATAL: Failed to format $commit into a patch" + echo + return 1 + fi + + if ! echo "$commit_body" | grep -iq "^checkpatch:.*\bno\b"; then + local patch_err + patch_err=$(echo "$patch_content" | "$TOPDIR/scripts/checkpatch.pl" --terse | grep -v "$ignore_line" | grep "ERROR: ") + + if [[ -n "$patch_err" ]]; then + echo "ERROR: scripts/checkpatch.pl reported following error:" + echo " $patch_err" + echo + ret=$(( ret + 1 )) + fi + fi + + if ! echo "$commit_body" | grep -q "Signed-off-by: .*\b$author\b"; then + echo "ERROR: Commit author ($author) need to sign the commit, so a Signed-off-by: <$author's sign> is needed" + echo + ret=$(( ret + 1 )) + fi + + if [[ $EXTRA_AUTHOR ]]; then + if ! echo "$commit_body" | grep -q "Signed-off-by: .*\b$EXTRA_AUTHOR\b"; then + echo "ERROR: Commit co-author (eg. MR author) need to sign the commit, so a Signed-off-by: <$EXTRA_AUTHOR's sign> is needed" + echo + ret=$(( ret + 1 )) + fi + fi + + upstream_commit=$(echo "$commit_body" | sed -nE "s/^(Upstream commit:|Upstream: commit) (\S+)/\2/pg") + upstream_status=$(echo "$commit_body" | sed -nE "s/^(Upstream status:|Upstream:) (\S+)/\2/pg") + commits_cnt=$(echo "$upstream_commit" | wc -w) + + if [[ $commit_summary == $VENDOR:* ]]; then + if [[ -z "$upstream_status" ]]; then + echo "ERROR: It seems this is a downstream commit." + echo " Please add following mark in the commit message:" + echo ' ```' + echo ' Upstream status: <no/pending/downstream-only...>' + echo ' ```' + echo " to explain this is a downstream commit." + echo + ret=$(( ret + 1 )) + fi + + if [[ $commits_cnt -ne 0 ]]; then + echo "ERROR: It seems this is a downstream commit, but a Upstream commit mark is provided." + echo + ret=$(( ret + 1 )) + fi + fi + + # This is an upstream commit + if [[ $commits_cnt -eq 0 ]]; then + if [[ $VENDOR_ONLY ]] && [[ $commit_summary != $VENDOR:* ]]; then + echo "ERROR: '$VENDOR:' is missing in the commit summary, and no 'Upstream commit:' mark is found." + echo " If this is a downstream commit, please add '$VENDOR': in commit summary, and add this mark in commit message:" + echo + ret=$(( ret + 1 )) + fi + + if [[ -z "$upstream_status" ]]; then + echo "ERROR: No upstream mark found." + echo " If this is a downstream commit, please add this mark:" + echo ' ```' + echo ' Upstream status: <no/pending/downstream-only...>' + echo ' ```' + echo " to explain this is a downstream commit." + echo + echo " If this is a backported upstream commit, please add one and only one mark:" + echo ' ```' + echo " Upstream commit: <commit id>" + echo ' ```' + echo " to indicate which commit is being backported." + echo + ret=$(( ret + 1 )) + elif [[ "$upstream_status" =~ ^[a-f0-9]{7,}$ ]]; then + echo "ERROR: It seems you pasted a plain commit id after 'Upstream:' or 'Upstream status:' mark." + echo " Please use this format instead:" + echo ' ```' + echo " Upstream commit: <commit id>" + echo ' ```' + echo " If this is a actually downstream commit, please use less confusing words" + echo " for the 'Upstream status:' mark." + echo + ret=$(( ret + 1 )) + fi + else + if [[ $commits_cnt -ne 1 ]]; then + echo "ERROR: It seems this is a upstream commit, please add one *and only one* upstream commit indicator per commit, in following format:" + echo ' ```' + echo " Upstream commit: <commit id>" + echo ' ```' + echo " If this is a downstream commit, please add \"$VENDOR:\" header in commit summary." + echo + ret=$(( ret + 1 )) + else + if ! is_valid_commit "$upstream_commit"; then + echo "ERROR: $upstream_commit is not an valid commit!" + echo + ret=$(( ret + 1 )) + else + if ! is_valid_upstream_commit "$upstream_commit" > /dev/null; then + echo "ERROR: $upstream_commit is not an valid upstream commit!" + echo + ret=$(( ret + 1 )) + fi + fi + fi + + conflict_cnt=$(echo "$commit_body" | grep -c "^Conflict: ") + if [ "$conflict_cnt" -ne 1 ]; then + echo "ERROR: This is an upstream commit, please add one (and only one) conflict indicator in following format:" + echo ' ```' + echo " Conflict: <none/refactored/minor/resolved...>" + echo ' ```' + echo + ret=$(( ret + 1 )) + fi + fi + + if [[ $VENDOR_ONLY ]] && [[ $author_email != *@$VENDOR.com ]]; then + echo "ERROR: The author of this commit is not from $VENDOR.com, did you forget to reset author?" + echo " You can reset author to your self for better backports commits tracking, use:" + echo " > git commit --amend --reset-author" + echo " Or:" + echo " > git commit --amend --author=\"yourname <yourname@$VENDOR.com>\"" + echo " If there are many commits to be modified, use:" + echo " > git filter-branch." + echo + ret=$(( ret + 1 )) + fi + + return $ret +} + +if ! git_logs=$(git log --no-walk=sorted --no-decorate --pretty=oneline --first-parent "${COMMITS[*]}"); then + die "Failed to parse git references '${COMMITS[*]}'" + exit 1 +fi + +IDX=0 +ERROR_MSG="" +echo "$git_logs" | while read -r commit_id commit_msg; do + echo "=== Checking $commit_id ('$commit_msg') ..." + + IDX=$(( IDX + 1 )) + ERROR_MSG=$(check_commit "$commit_id") + RET=$? + OUTFILE=$(printf "$OUTPUT_DIR/%04d-%s.err" $IDX "$commit_id") + + if [[ $RET -ne 0 ]]; then + echo_yellow "Found following issues with $commit_id ('$commit_msg'):" + echo_red "$ERROR_MSG" + if [[ "$GEN_REPORT" ]] && [[ "$GEN_REPORT" != "0" ]]; then + { + echo "$ERROR_MSG" + } > "$OUTFILE" + fi + else + if [[ -n "$ERROR_MSG" ]]; then + echo_yellow "$commit_id have some warnings:" + echo_red "$ERROR_MSG" + else + echo_green "$commit_id looks OK" + fi + fi + + # Newline as seperator + echo +done diff --git a/dist/scripts/check-configs.sh b/dist/scripts/check-configs.sh new file mode 100755 index 000000000000..6fc453147647 --- /dev/null +++ b/dist/scripts/check-configs.sh @@ -0,0 +1,215 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 +# +# This script takes the merged config files and processes them through listnewconfig +# then append the new options to pending config + +# shellcheck source=./lib-config.sh +. "$(dirname "$(realpath "$0")")/lib-config.sh" + +# shellcheck disable=SC2164 +export CONFIG_OUTDIR=$DISTDIR/workdir +AUTOFIX=0 + +usage() +{ + cat << EOF +check-configs.sh [sub command] [--autofix] [ <config targets> ... ] + Available sub command: + + check-new-configs Check for new unset kernel options. + If --autofix (or env var AUTOFIX) is enabled, + will append to pending configs + + check-dup-configs Check for kernel configs of the same value in different archs. + If --autofix (or env var AUTOFIX) is enabled, + will move these configs from arch + specific config to the default base config. + + check-diff-configs Check for kernel configs that failed to enabled, + which means, they are set =y/=m in config matrix, + but end up disabled in the final .config file. + Doesn't support AUTOFIX since issues caused by + Kconfig dependency is complex and always need manual fix. +EOF +} + +# Use make listnewconfig to check new Kconfigs +check_new_configs() { + # First generate plain concated config files + populate_configs "$@" + + # Check if there are unset config + _check_new_kconfigs() { + local arch=$1 config_file=$2 base_file=$3 base_configs new_configs + + echo_green "=== Checking $config_file..." + new_configs=$(config_make "$arch" KCONFIG_CONFIG="$config_file" --no-print-directory listnewconfig) + + if [ -n "$new_configs" ]; then + echo_yellow "=== Following configs are not set properly:" + echo "$new_configs" + + if [[ $AUTOFIX -eq 1 ]]; then + echo_green "=== Appending these new configs to the base config file: '$base_file'" + echo "$new_configs" >> "$base_file" + + base_configs=$(<"$base_file") + { + echo "$base_configs" + echo "$new_configs" + } | config_sanitizer > "$base_file" + fi + else + echo_green "=== Config is all clear" + fi + + # Add a newline as seperator + echo + } + + for_each_config_product _check_new_kconfigs "$@" +} + +check_dup_configs() { + # Merge common config items among different archs with same value into base default config. + _dedup_single_dir() { + local dir=$1 + local common_conf base_conf + + echo_green "=== Checking config dir '$dir'" + for arch in "${CONFIG_ARCH[@]}"; do + [[ -e "$dir/$arch.config" ]] || continue + + if [[ -z "$common_conf" ]]; then + # Read-in first arch specific config + common_conf=$(config_sanitizer < "$dir/$arch.config") + else + # Keep the common config items of differnt archs + # The content is filtered with config_sanitizer so `comm` is enough + common_conf=$(comm -12 \ + <(echo "$common_conf") \ + <(config_sanitizer < "$dir/$arch.config") + ) + fi + done + + if [[ "$common_conf" ]]; then + echo_yellow "=== Common configs shared by all sub arch:" + echo "$common_conf" + + if [[ $AUTOFIX -eq 1 ]]; then + base_conf=$(<"$dir/default.config") + { + echo "$base_conf" + echo "$common_conf" + } | config_sanitizer > "$dir/default.config" + fi + else + echo_green "=== Config is all clear" + fi + } + + _get_config_dirs() { + shift; printf "%s\n" "$@" + } + + for dir in $(for_each_config_target _get_config_dirs "$@" | sort -u); do + _dedup_single_dir "$dir" + done +} + +# generate diff file using kernel script diffconfig after make olddefconfig +# param: config.file.old config.file +check_diff_configs() { + local ALL_CONFIGS + + [[ -x "$TOPDIR"/scripts/diffconfig ]] || die "This command rely on linux's in-tree diffconfig script" + + ALL_CONFIGS=$(find "$TOPDIR" -name "Kconfig*" -exec grep -h "^config .*" {} + | sed "s/^config //") + + # First generate plain concated config files + populate_configs "$@" + + _make_a_copy() { + cp "$2" "$2.orig" + } + for_each_config_product _make_a_copy "$@" + + makedef_configs "$@" + + # Check why a config is not enabled + _check_config_issue() { + local _config=$1 _config_file=$2 + + if ! echo "$ALL_CONFIGS" | grep -q "\b$_config\b"; then + echo "$_config no longer exists, maybe it's deprecated or renamed." + return + fi + + echo "$_config have following depends:" + get_all_depends "$_config" "$_config_file" || echo "No dependency found, likely not available on this arch." + echo + } + + _check_diff_configs() { + local arch=$1 config_file=$2 diff_configs + + echo "=== Checking config $config_file" + + echo "= Parsing Kconfig ..." + kconfig_parser "$arch" "$TOPDIR/Kconfig" "$config_file" + + echo "= Checking configs ..." + diff_configs=$("$TOPDIR"/scripts/diffconfig "$config_file.orig" "$config_file" | + # Ignore new added config, they should be covered by check_new_configs + # If kernel didn't ask us to fill one config, then it's an auto-selected config + grep -v "^+" | \ + # Ignore disappeared configs that were unset + grep -v "^\-.*n$" \ + ) + + if [[ -z "$diff_configs" ]]; then + echo_green "=== Config is all clear" + return + fi + + echo "$diff_configs" | while read -r _line; do + local config config_hint="" + case $_line in + -*" m" | -*" y" ) + config=${_line% *} + config=${config#-} + config_hint=$(_check_config_issue "$config" "$config_file") + ;; + esac + + echo_yellow "$_line" + if [[ "$config_hint" ]]; then + echo_yellow "$config_hint" | sed "s/^/ /" + fi + echo + done + echo + } + + for_each_config_product _check_diff_configs "$@" +} + +CMD=$1; shift +[[ $1 == '--autofix' ]] && AUTOFIX=1 +case "$CMD" in + check-new-configs ) + check_new_configs "$@" + ;; + check-dup-configs ) + check_dup_configs "$@" + ;; + check-diff-configs ) + check_diff_configs "$@" + ;; + *) + usage + exit 1 + ;; +esac diff --git a/dist/scripts/check-tag.sh b/dist/scripts/check-tag.sh new file mode 100755 index 000000000000..dbf3c29b6ad8 --- /dev/null +++ b/dist/scripts/check-tag.sh @@ -0,0 +1,19 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 +# +# $1: tag +# $2: (opt) git repo + +# shellcheck source=./lib-version.sh +. "$(dirname "$(realpath "$0")")/lib-version.sh" + +prepare_kernel_ver "$@" + +# If tag is not recognized, prepare_kernel_ver will version it as snapshot +# use this as an indicator of invalid tag +if ! [[ $KTAGRELEASE ]]; then + warn "Invalid tag, will override with Kernel uname-r '$KERNEL_UNAMER', RPM NVR version '${KERNEL_NAME}-${KERNEL_MAJVER}-${KERNEL_RELVER}'" + exit 1 +else + info "Tag '$KTAGRELEASE' OK, Kernel uname-r '$KERNEL_UNAMER', RPM NVR version '${KERNEL_NAME}-${KERNEL_MAJVER}-${KERNEL_RELVER}'" +fi diff --git a/dist/scripts/dummy-tools/gcc b/dist/scripts/dummy-tools/gcc new file mode 100755 index 000000000000..b2483149bbe5 --- /dev/null +++ b/dist/scripts/dummy-tools/gcc @@ -0,0 +1,111 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only +# +# Staring v4.18, Kconfig evaluates compiler capabilities, and hides CONFIG +# options your compiler does not support. This works well if you configure and +# build the kernel on the same host machine. +# +# It is inconvenient if you prepare the .config that is carried to a different +# build environment (typically this happens when you package the kernel for +# distros) because using a different compiler potentially produces different +# CONFIG options than the real build environment. So, you probably want to make +# as many options visible as possible. In other words, you need to create a +# super-set of CONFIG options that cover any build environment. If some of the +# CONFIG options turned out to be unsupported on the build machine, they are +# automatically disabled by the nature of Kconfig. +# +# However, it is not feasible to get a full-featured compiler for every arch. +# Hence these dummy toolchains to make all compiler tests pass. +# +# Usage: +# +# From the top directory of the source tree, run +# +# $ make CROSS_COMPILE=scripts/dummy-tools/ oldconfig +# +# Most of compiler features are tested by cc-option, which simply checks the +# exit code of $(CC). This script does nothing and just exits with 0 in most +# cases. So, $(cc-option, ...) is evaluated as 'y'. +# +# This scripts caters to more checks; handle --version and pre-process __GNUC__ +# etc. to pretend to be GCC, and also do right things to satisfy some scripts. + +# Check if the first parameter appears in the rest. Succeeds if found. +# This helper is useful if a particular option was passed to this script. +# Typically used like this: +# arg_contain <word-you-are-searching-for> "$@" +arg_contain () +{ + search="$1" + shift + + while [ $# -gt 0 ] + do + if [ "$search" = "$1" ]; then + return 0 + fi + shift + done + + return 1 +} + +# To set CONFIG_CC_IS_GCC=y +if arg_contain --version "$@"; then + echo "gcc (scripts/dummy-tools/gcc)" + exit 0 +fi + +if arg_contain -E "$@"; then + # For scripts/cc-version.sh; This emulates GCC 20.0.0 + if arg_contain - "$@"; then + sed -n '/^GCC/{s/__GNUC__/20/; s/__GNUC_MINOR__/0/; s/__GNUC_PATCHLEVEL__/0/; p;}' + exit 0 + else + echo "no input files" >&2 + exit 1 + fi +fi + +# To set CONFIG_AS_IS_GNU +if arg_contain -Wa,--version "$@"; then + echo "GNU assembler (scripts/dummy-tools) 2.50" + exit 0 +fi + +if arg_contain -S "$@"; then + # For scripts/gcc-x86-*-has-stack-protector.sh + if arg_contain -fstack-protector "$@"; then + if arg_contain -mstack-protector-guard-reg=fs "$@"; then + echo "%fs" + else + echo "%gs" + fi + exit 0 + fi + + # For arch/powerpc/tools/gcc-check-mprofile-kernel.sh + if arg_contain -m64 "$@" && arg_contain -mlittle-endian "$@" && + arg_contain -mprofile-kernel "$@"; then + if ! test -t 0 && ! grep -q notrace; then + echo "_mcount" + fi + exit 0 + fi +fi + +# To set GCC_PLUGINS +if arg_contain -print-file-name=plugin "$@"; then + plugin_dir=$(mktemp -d) + + mkdir -p $plugin_dir/include + touch $plugin_dir/include/plugin-version.h + + echo $plugin_dir + exit 0 +fi + +# inverted return value +if arg_contain -D__SIZEOF_INT128__=0 "$@"; then + exit 1 +fi diff --git a/dist/scripts/dummy-tools/ld b/dist/scripts/dummy-tools/ld new file mode 100755 index 000000000000..f68233050405 --- /dev/null +++ b/dist/scripts/dummy-tools/ld @@ -0,0 +1,30 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only + +# Dummy script that always succeeds. + +# Check if the first parameter appears in the rest. Succeeds if found. +# This helper is useful if a particular option was passed to this script. +# Typically used like this: +# arg_contain <word-you-are-searching-for> "$@" +arg_contain () +{ + search="$1" + shift + + while [ $# -gt 0 ] + do + if [ "$search" = "$1" ]; then + return 0 + fi + shift + done + + return 1 +} + +if arg_contain --version "$@" || arg_contain -v "$@"; then + progname=$(basename $0) + echo "GNU $progname (scripts/dummy-tools/$progname) 2.50" + exit 0 +fi diff --git a/dist/scripts/dummy-tools/nm b/dist/scripts/dummy-tools/nm new file mode 120000 index 000000000000..c0648b38dd42 --- /dev/null +++ b/dist/scripts/dummy-tools/nm @@ -0,0 +1 @@ +ld \ No newline at end of file diff --git a/dist/scripts/dummy-tools/objcopy b/dist/scripts/dummy-tools/objcopy new file mode 120000 index 000000000000..c0648b38dd42 --- /dev/null +++ b/dist/scripts/dummy-tools/objcopy @@ -0,0 +1 @@ +ld \ No newline at end of file diff --git a/dist/scripts/format-configs.sh b/dist/scripts/format-configs.sh new file mode 100755 index 000000000000..2abe34df4033 --- /dev/null +++ b/dist/scripts/format-configs.sh @@ -0,0 +1,14 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 +# +# Generate kernel config according to config matrix + +# shellcheck source=./lib-config.sh +. "/$(dirname "$(realpath "$0")")/lib-config.sh" + +# Sort and remove duplicated items in each config base file +for file in "$CONFIG_PATH"/*/*/*.config; do + config_sanitizer < "$file" > "$file.fmt.tmp" + + mv "$file.fmt.tmp" "$file" +done diff --git a/dist/scripts/gen-configs.sh b/dist/scripts/gen-configs.sh new file mode 100755 index 000000000000..51a93b999402 --- /dev/null +++ b/dist/scripts/gen-configs.sh @@ -0,0 +1,19 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 +# +# Generate kernel config according to config matrix +# +# Params: +# $@: [ <filter>... ] When non-empty, only generate kernel configs style matching <filter> + +# shellcheck source=./lib-config.sh +. "$(dirname "$(realpath "$0")")/lib-config.sh" + +# Populate basic config entries based on our config file tree +populate_configs "$@" + +# Process the config files with make olddefconfig +makedef_configs "$@" + +# Check config values (eg. LOCALVERSION) +sanity_check_configs "$@" diff --git a/dist/scripts/gen-spec.sh b/dist/scripts/gen-spec.sh new file mode 100755 index 000000000000..aa8014698859 --- /dev/null +++ b/dist/scripts/gen-spec.sh @@ -0,0 +1,245 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 + +# shellcheck source=./lib-version.sh +. "/$(dirname "$(realpath "$0")")/lib-version.sh" + +usage() +{ + cat << EOF +gen-spec.sh [OPTION] + + --kernel-dist Kernel distribution marker, eg. tks/tlinux4/tlinux3 + --kernel-config Kernel config base name, will look for the corresponding kernel config under $DISTDIR/configs + --kernel-variant Kernel variant, eg. debug, gcov, kdb + --build-arch What arch's are supported, defaults to '$SPEC_ARCH'" +EOF +} + +# gen-spec need to parse info from source (currently only parse LOCALVERSION from config), +# so check if the kernel configs are valid here. +prepare_source_info() { + local localversion arch_localversion file + + if [ -z "$KERNEL_CONFIG" ]; then + die "Config target not specified." + fi + + for arch in $BUILD_ARCH; do + file="$SOURCEDIR/$KERNEL_CONFIG.$arch.config" + if ! [ -e "$file" ]; then + die "Config file missing '$file'" + fi + if [ -z "$localversion" ]; then + localversion=$(sed -ne 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/pg' "$file") + else + arch_localversion=$(sed -ne 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/pg' "$file") + if [ "$arch_localversion" != "$localversion" ]; then + die "LOCALVERSION inconsistent between sub-arches for config target '$file', this breaks SRPM package naming." + fi + fi + done + + localversion=${localversion#\"} + localversion=${localversion%\"} + LOCALVERSION=$localversion +} + +DEFAULT_DISALBED=" kabichk " +while [[ $# -gt 0 ]]; do + case $1 in + --kernel-config ) + KERNEL_CONFIG=$2 + shift 2 + ;; + --build-arch ) + BUILD_ARCH=$2 + shift 2 + ;; + --gitref ) + GITREF=$2 + shift 2 + ;; + --set-default-disabled ) + for param in $2; do + DEFAULT_DISALBED=" $param $DEFAULT_DISALBED" + done + shift 2 + ;; + --set-default-enabled ) + for param in $2; do + DEFAULT_DISALBED="${DEFAULT_DISALBED/ $param /}" + done + shift 2 + ;; + * ) + die "Unrecognized parameter $1" + ;; + esac +done + +BUILD_ARCH="${BUILD_ARCH:-$SPEC_ARCH}" + +# This helper prepares LOCALVERSION +prepare_source_info +# This function will prepare $KERNEL_MAJVER, $KERNEL_RELVER +prepare_kernel_ver "${GITREF:-HEAD}" "$LOCALVERSION" + +_gen_arch_spec() { + local arch kernel_arch + cat << EOF +ExclusiveArch: $BUILD_ARCH +EOF + for arch in $BUILD_ARCH; do + if ! kernel_arch=$(get_kernel_arch "$arch"); then + die "Unsupported arch '$arch'" + fi + cat << EOF +%ifarch $arch +%define kernel_arch $kernel_arch +%endif +EOF + done +} + +_gen_kerver_spec() { + cat << EOF +%define kernel_majver $KERNEL_MAJVER +%define kernel_relver $KERNEL_RELVER +%define kernel_unamer $KERNEL_UNAMER +%define rpm_name $KERNEL_NAME +%define rpm_vendor $(get_dist_makefile_var VENDOR_CAPITALIZED) +%define rpm_url $(get_dist_makefile_var URL) +EOF +} + +_gen_arch_source() { + # Source1000 - Source1199 for kernel config + local config_source_num=1000 arch + for arch in $BUILD_ARCH; do + echo "Source$config_source_num: $KERNEL_CONFIG.$arch.config" + config_source_num=$((config_source_num + 1)) + done + + # Source1200 - Source1399 for kabi source + local kabi_source_num=1200 arch + for arch in $BUILD_ARCH; do + echo "Source$kabi_source_num: Module.kabi_$arch" + kabi_source_num=$((kabi_source_num + 1)) + done +} + +_gen_config_build() { + cat << EOF +%ifnarch $BUILD_ARCH +{error:unsupported arch} +%endif +EOF + local config_source_num=1000 arch + for arch in $BUILD_ARCH; do + cat << EOF +%ifarch $arch +BuildConfig %{SOURCE$config_source_num} +%endif +EOF + config_source_num=$((config_source_num + 1)) + done +} + +_gen_kabi_check() { + cat << EOF +%ifnarch $BUILD_ARCH +{error:unsupported arch} +%endif +EOF + local kabi_source_num=1200 arch + for arch in $BUILD_ARCH; do + cat << EOF +%ifarch $arch +CheckKernelABI %{SOURCE$kabi_source_num} +%endif +EOF + kabi_source_num=$((kabi_source_num + 1)) + done +} + +_gen_changelog_spec() { + cat "$DISTDIR/templates/changelog" +} + +_gen_pkgopt_spec() { + local enabled_opts disabled_opts opts_output + for opt in \ + core \ + doc \ + headers \ + perf \ + tools \ + bpftool \ + debuginfo \ + modsign \ + kabichk + do + case $DEFAULT_DISALBED in + *" $opt "* ) + disabled_opts="$disabled_opts $opt" + opts_output="$opts_output +%define with_$opt %{?_with_$opt: 1} %{?!_with_$opt: 0}" + ;; + * ) + enabled_opts="$enabled_opts $opt" + opts_output="$opts_output +%define with_$opt %{?_without_$opt: 0} %{?!_without_$opt: 1}" + esac + done + + # Marker for dist build system, a little bit ugly, maybe find a better way to present this info later + echo "" + echo "# === Package options ===" + echo "# Eanbled by default: $enabled_opts" + echo "# Disabled by default: $disabled_opts" + echo "$opts_output" +} + +gen_spec() { + local _line + local _spec + + while IFS='' read -r _line; do + case $_line in + "{{PKGPARAMSPEC}}"* ) + _spec+="$(_gen_pkgopt_spec)" + ;; + "{{ARCHSPEC}}"* ) + _spec+="$(_gen_arch_spec)" + ;; + "{{VERSIONSPEC}}"* ) + _spec+="$(_gen_kerver_spec)" + ;; + "{{ARCHSOURCESPEC}}"* ) + _spec+="$(_gen_arch_source)" + ;; + "{{CONFBUILDSPEC}}"* ) + _spec+="$(_gen_config_build)" + ;; + "{{KABICHECKSPEC}}"* ) + _spec+="$(_gen_kabi_check)" + ;; + "{{CHANGELOGSPEC}}"* ) + _spec+="$(_gen_changelog_spec)" + ;; + "{{"*"}}"*) + die "Unrecognized template keywork $_line" + ;; + * ) + _spec+="$_line" + ;; + esac + _spec+=" +" + done + + echo "$_spec" +} + +gen_spec < "$DISTDIR/templates/kernel.template.spec" diff --git a/dist/scripts/get-next-sub-version.sh b/dist/scripts/get-next-sub-version.sh new file mode 100755 index 000000000000..f328d308797e --- /dev/null +++ b/dist/scripts/get-next-sub-version.sh @@ -0,0 +1,9 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 + +# shellcheck source=./lib-version.sh +. "$(dirname "$(realpath "$0")")/lib-version.sh" + +prepare_next_sub_kernel_ver "$@" + +echo "$KERNEL_UNAMER" diff --git a/dist/scripts/get-next-version.sh b/dist/scripts/get-next-version.sh new file mode 100755 index 000000000000..07b5c576076b --- /dev/null +++ b/dist/scripts/get-next-version.sh @@ -0,0 +1,9 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 + +# shellcheck source=./lib-version.sh +. "$(dirname "$(realpath "$0")")/lib-version.sh" + +prepare_next_kernel_ver "$@" + +echo "$KERNEL_UNAMER" diff --git a/dist/scripts/get-version.sh b/dist/scripts/get-version.sh new file mode 100755 index 000000000000..c49f7de28324 --- /dev/null +++ b/dist/scripts/get-version.sh @@ -0,0 +1,23 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 +# +# Print out the tkernel version based on git commit and work tree. +# +# $1: git reference, tag or commit +# $2: version type, 'nvr' or 'unamer', defaults to 'unamer' +# +# shellcheck source=./lib-version.sh +. "$(dirname "$(realpath "$0")")/lib-version.sh" + +prepare_kernel_ver "$1" +case $2 in + vr ) + echo "$KERNEL_MAJVER"-"$KERNEL_RELVER" + ;; + nvr ) + echo "$KERNEL_NAME"-"$KERNEL_MAJVER"-"$KERNEL_RELVER" + ;; + * | unamer ) + echo "$KERNEL_UNAMER" + ;; +esac diff --git a/dist/scripts/helper/backporter.sh b/dist/scripts/helper/backporter.sh new file mode 100755 index 000000000000..8da635148c44 --- /dev/null +++ b/dist/scripts/helper/backporter.sh @@ -0,0 +1,91 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 + +# shellcheck source=../lib.sh +. "/$(dirname "$(realpath "$0")")/../lib.sh" + +AUTHOR="$(git config user.name) <$(git config user.email)>" +COMMIT=$1 + +[ -z "$COMMIT" ] && die "Usage: $0 <upstream commit id>" + +_resolve_conflict_shell() { + error "Failed to backport, please fix the problem, then commit it." + error "then exit the shell with \`exit 0\`." + error "To abort the backport, exited with a dirty worktree or exit with non-zero." + + export PS1="RESOLVING CONFLICT" + + $SHELL || { + RET=$? + error "Shell exited with non-zero, aborting backport" + git reset --hard HEAD + exit $RET + } + + git diff-index --quiet HEAD || { + RET=$? + error "Dirty worktree, aborting backport" + git reset --hard HEAD + exit $RET + } +} + +_fetch_upstream() { + # Try to fetch from well-known upstream + local commit=$1 + local upstream_repos=( + "torvalds https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=<> git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git" + "tip https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/patch/?id=<> git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git" + "mm https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git/patch/?id=<> git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git" + "stable https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/patch/?id=<> git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git" + ) + + for repo in "${upstream_repos[@]}"; do + repo_http_git=${repo#* } + repo_name=${repo%% *} + repo_http=${repo_http_git% *} + repo_git=${repo_http_git#* } + commit_url=${repo_http/<>/$commit} + echo curl --fail --silent --output /dev/null "$commit_url" + if curl --fail --silent --output /dev/null "$commit_url"; then + echo "Found commit '$commit' in '$repo_name' repo" + echo git fetch "$repo_git" "$commit" + git fetch "$repo_git" "$commit" + return + fi + done + + echo "Can't find commit $commit in upstream." + return 1 +} + +if ! git cat-file -e "$COMMIT"; then + # Available locally, just cherry-pick + _fetch_upstream "$COMMIT" +fi + +if ! git cherry-pick "$COMMIT"; then + _resolve_conflict_shell + CONFLICT=Resolved +else + CONFLICT=None +fi + +git commit \ + --am \ + --author="$AUTHOR" \ + --date=now \ + --message \ + "$(git log -1 --format=%s "$COMMIT") + +$MSG + +Upstream commit: $COMMIT +Conflict: $CONFLICT + +Backport of following upstream commit: + +$(git log -1 "$COMMIT") + +Signed-off-by: $AUTHOR" diff --git a/dist/scripts/helper/dist-repo-init.sh b/dist/scripts/helper/dist-repo-init.sh new file mode 100755 index 000000000000..0ebdf301431c --- /dev/null +++ b/dist/scripts/helper/dist-repo-init.sh @@ -0,0 +1,81 @@ +#!/bin/bash --norc +# SPDX-License-Identifier: GPL-2.0 +# +# After dropping the dist build system into a upstream kernel repo, +# use this to hook the dist build system into Kbuild Makefile. + +# shellcheck source=../lib.sh +. "/$(dirname "$(realpath "$0")")/../lib.sh" + +VENDOR_CAPITALIZED=$(get_dist_makefile_var VENDOR_CAPITALIZED) +KMAKEFILE_CONTENT="" + +[ -d "$TOPDIR" ] || { + die "Not a valid git repo." +} + +[ -e "$TOPDIR/Kbuild" ] && [ -e "$TOPDIR/Kconfig" ] && [ -e "$TOPDIR/Makefile" ] || { + die "Not a valid kernel repo." +} + +[ -e "$DISTDIR/Makefile" ] || { + die "Dist files are not properly configured, aborting." +} + +[[ "$DISTDIR" == "$TOPDIR"/* ]] || { + die "Dist files are not properly configured, aborting." +} + +if [[ $1 == "--reset" ]]; then + ### Clean up changelog + echo -n > "$DISTDIR"/templates/changelog + + ### Clean up kABI + for i in $SPEC_ARCH; do + echo -n > "$DISTDIR"/kabi/Module.kabi_$i + done + + ### Clean up generic-default config + echo -n > "$DISTDIR"/configs/00base/generic/default.config + for i in $SPEC_ARCH; do + echo -n > "$DISTDIR"/configs/00base/generic/$i.config + done +fi + +### Patch .gitignore +grep -qF "# ${VENDOR_CAPITALIZED:+${VENDOR_CAPITALIZED} }dist files" "$TOPDIR/.gitignore" || { + echo "Updating Kernel .gitignore to ignore dist files..." + cat >> "$TOPDIR/.gitignore" << EOF + +# ${VENDOR_CAPITALIZED:+${VENDOR_CAPITALIZED} }dist files +/$DISTPATH/rpm +/$DISTPATH/workdir +EOF +} + +### Patch .gitattributes +for i in ".ci" "$DISTPATH"; do \ + [[ -d $TOPDIR/$i ]] || continue + grep -qF "$i/" $TOPDIR/.gitattributes 2>/dev/null || { + echo "Updating .gitattributes to exclude $i/..." + echo "$i/ export-ignore" >> $TOPDIR/.gitattributes + } +done; + +### Patch kernel Makefile +grep -qF "# dist_make: ${VENDOR_CAPITALIZED:+${VENDOR_CAPITALIZED} }" "$TOPDIR/Makefile" || { + echo "Updating Kenrel Makefile..." + KMAKEFILE_CONTENT=$(<"$TOPDIR/Makefile") + cat > "$TOPDIR/Makefile" << EOF +# SPDX-License-Identifier: GPL-2.0 + +# dist_make: ${VENDOR_CAPITALIZED:+${VENDOR_CAPITALIZED} }Dist Makefile, which contains dist-* make targets +ifneq (\$(shell echo \$(MAKECMDGOALS) | grep "^dist-"),) +include $DISTPATH/Makefile +else + +$KMAKEFILE_CONTENT + +endif # dist_make +EOF +} diff --git a/dist/scripts/helper/gen-module-kabi.sh b/dist/scripts/helper/gen-module-kabi.sh new file mode 100755 index 000000000000..e91ff6a9e79f --- /dev/null +++ b/dist/scripts/helper/gen-module-kabi.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env sh +# + +KERNEL= +SYMVER= +TMPDIR= + +error() { + echo "error: $*" > /dev/stderr +} + +usage() { + cat << EOF +Provide a list of modules, this script will print the SYMBOLS used by these modules + +gen-module-kabi.sh: [--kernel <kernel name>] [--symver <symver file>] [<module name or file>, ...] + +<kernel name>: default to \$(uname -r), or specify a installed kernel +<symver file>: default to /usr/lib/<kernel name>/symvers.gz, or specify a symver file +<module>: module name +EOF +} + +while true; do + case $1 in + --symver ) + SYMVER=$2 + shift 2 + ;; + --kernel ) + KERNEL=$2 + shift 2 + ;; + --usage|--help|-h ) + usage + exit 0 + ;; + * ) + break + ;; + esac +done + +if ! TMPDIR="$(mktemp -d -t gen-module-kabi.XXXXXX)"; then + error "mktemp failed" + exit 1 +fi + +trap 'ret=$?; +[[ -d $TMPDIR ]] && rm --one-file-system -rf -- "$TMPDIR"; +exit $ret; +' EXIT + +[ "$KERNEL" ] || KERNEL=$(uname -r) +[ -f "$SYMVER" ] || SYMVER=/lib/modules/$KERNEL/symvers.gz +[ -f "$SYMVER" ] || SYMVER=/boot/symvers-$KERNEL.gz +[ -f "$SYMVER" ] || SYMVER=./Module.symvers + +if [[ "$#" -eq 0 ]]; then + usage +fi + +case $SYMVER in *.gz ) + gzip < "$SYMVER" -d > "$TMPDIR/symver" + SYMVER=$TMPDIR/symver + ;; +esac + +_get_export_syms() { + _temp_extract=$TMPDIR/extracted.ko + case $1 in + *.ko ) + modfile=$1 + ;; + *.xz ) + modfile=$_temp_extract + xz -c -d "$1" > "$_temp_extract" + ;; + * ) + error "unsupported file format of file: $1" + esac + + nm -uAg "$modfile" +} + +for mod in "$@"; do + if [[ $mod == *.ko ]] && [[ -f $mod ]]; then + mods=$mod + elif [[ $mod == *.ko.* ]] && [[ -f $mod ]]; then + mods=$mod + elif ! mods="$(modinfo --set-version "$KERNEL" --filename "$mod" 2> /dev/null)"; then + error "'$mod' is not a valid module name" + fi + + for m in $mods; do + _get_export_syms "$m" + done +done | sort | while read -r file_path type sym_name; do + in_symver=0 + in_vmlinux=0 + sym_line=$(grep "\s$sym_name\s" "$SYMVER") && in_symver=1 + case $sym_line in + # print only symbol in vmlinux + *vmlinux*EXPORT_SYMBOL* ) + in_vmlinux=1 + ;; + esac + echo -n "$file_path $type $sym_name" + if [[ $in_symver -eq 0 ]]; then + echo -n " (NO-KABI: NOT IN SYMVER)" + fi + if [[ $in_vmlinux -eq 0 ]]; then + echo -n " (NO-KABI: NOT IN VMLINUX)" + fi + echo +done diff --git a/dist/scripts/helper/update-kabi.sh b/dist/scripts/helper/update-kabi.sh new file mode 100755 index 000000000000..b8fa69db7426 --- /dev/null +++ b/dist/scripts/helper/update-kabi.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env sh +# + +error() { + echo "error: $*" > /dev/stderr +} + +usage() { + cat << EOF +update-kabi.sh: <kABI Module.symvers> <new Module.symvers> + +Take kABI Module.symvers as reference and generate a new kABI symvers file. +EOF +} + +KABI=$1 +SYMVER=$2 + +if ! [ -s "$KABI" ] || ! [ -s "$SYMVER" ]; then + error "Invalid params." + usage + exit 1 +fi + +cat "$KABI" | while read -r _crc _symbol _vmlinux _gpl; do + grep "\b$_symbol\b.*vmlinux" "$SYMVER" +done | sed -e "s/\t$//" diff --git a/dist/scripts/lib-config.sh b/dist/scripts/lib-config.sh new file mode 100755 index 000000000000..221a30d37032 --- /dev/null +++ b/dist/scripts/lib-config.sh @@ -0,0 +1,452 @@ +#!/bin/bash --norc +# +# To support different arches, compile options, and distributions, +# Kernel configs are managed with a hierarchy matrix. +# +# This script is the helper for parsing and updating the config matrix +# +# shellcheck source=./lib.sh +. "$(dirname "$(realpath "$0")")/lib.sh" + +CONFIG_PATH=${CONFIG_PATH:-$DISTDIR/configs} +# shellcheck disable=SC2206 +CONFIG_ARCH=( $SPEC_ARCH ) +CONFIG_SPECS=( "$CONFIG_PATH"/[0-9][0-9]* ) +CONFIG_OUTDIR=$SOURCEDIR +CONFIG_CACHE=$DISTDIR/workdir/config_cache + +_get_config_cross_compiler () { + if [[ "$1" == $(get_native_arch) ]]; then + : + elif [[ -d "$TOPDIR/scripts/dummy-tools/" ]]; then + echo "scripts/dummy-tools/" + else + echo "$DISTDIR/scripts/dummy-tools/" + fi +} + +get_config_val() { + # If it's y/m/n/0-9, return plain text, else get from .config + # + # To be more accurate, maybe we need to check right/left-value? + case $1 in + n ) + ;; + y|m|[0-9] ) + echo "$1" ;; + * ) + local _val + _val=$(grep "^CONFIG_$1=" "$2") + _val=${_val#*=} + echo "${_val:-not set}" + ;; + esac +} + +# Eval a Kconfig condition statement +kconfig_eval() { + local _line=$1 + + local _if + # Convert Kconfig cond statement to bash syntax then eval it + _if=$(echo "$_line" | sed -E \ + -e "s/(\|\||&&|=|!=|<|<=|>|>=|!|\(|\))/ \1 /g" \ + -e "s/([[:alnum:]_-]+)/ \"\$(get_kconf_val \'\1\')\" /g") + eval "[[ $_if ]]" +} + +get_depends() { + local _conf=$1 + local _deps _line + + sed -nE "s/^[[:space:]]*depends on //p;" "$CONFIG_CACHE/$_conf" +} + +# $1: Parsed Kconfig file +get_all_depends() { + local _dep + local _deps=( ) + + [[ ! -e "$CONFIG_CACHE/$1" ]] && return 1 + + # shellcheck disable=SC2207 + while read -r _dep; do + if ! [[ "$_dep" ]]; then + continue + elif [[ "$_dep" =~ y|n|m ]]; then + _deps+=( "$_dep" ) + elif [[ "$_dep" =~ [a-zA-Z0-9] ]]; then + _deps+=( "$_dep[=$(get_config_val "$_dep" "$2")]" ) + else + _deps+=( "$_dep" ) + fi + done <<< "$(get_depends "$1" | sed -E \ + -e 's/^([^\(])/(\1/' \ + -e 's/([^\)])$/\1)/' \ + -e '2,$s/^/\&\&/' \ + -e 's/(\|\||&&|=|!=|<|<=|>|>=|!|\(|\)|([[:alnum:]_-]+))/\n\1\n/g')" + # The regex above simply tokenize the Kconfig expression + + echo "${_deps[@]}" + + return 0 +} + +kconfig_parser() { + local _arch=$1 _kconfig=$2 _config=$3 + + [[ -s $_kconfig ]] || error "Invalid Kconfig '$_kconfig'" + [[ -s $_config ]] || error "Invalid config file '$_kconfig'" + + rm -r "${CONFIG_CACHE:?}" 2>/dev/null + mkdir -p "$CONFIG_CACHE" + + local _CONFIG_FILE=$CONFIG_CACHE/_invalid + local _PARSE_DEPS=() + + _reader() { + local _f=$1 + + if [[ $_f == *"\$(SRCARCH)"* ]]; then + _f=${_f/\$(SRCARCH)/$(get_kernel_src_arch "$_arch")} + fi + + local _line + while :; do + _line=${_line%%#*} + case $_line in + "config "* | "menuconfig "* ) + _CONFIG_FILE=${_line#* } + _CONFIG_FILE=${_CONFIG_FILE## } + _CONFIG_FILE=$CONFIG_CACHE/$_CONFIG_FILE + for _dep in "${_PARSE_DEPS[@]}"; do [[ $_dep ]] && echo "depends on $_dep"; done >> "$_CONFIG_FILE" + ;; + "source "* ) + _line=${_line#* } + _line=${_line#\"} + _line=${_line%\"} + _reader "$_line" + ;; + "if "* ) + _line=${_line#if } + _PARSE_DEPS+=( "${_line}" ) + ;; + "menu "* | "choice" ) + local _sub_dep="" + while read -r _line; do + case $_line in + if*|menu*|config*|end*|source*|choice* ) break ;; + "depends on"* ) + _line="${_line#depends on}" + _sub_dep="${_sub_dep:+$_sub_dep && }(${_line## })" + ;; + "visible if"* ) + _line="${_line#visible if}" + _sub_dep="${_sub_dep:+$_sub_dep && }(${_line## })" + ;; + esac + done + _PARSE_DEPS+=( "${_sub_dep## }" ) + continue + ;; + "end"* ) + # We don't care about correctness, + # it's always paired with previous if/menu/choice + unset '_PARSE_DEPS[${#_PARSE_DEPS[@]}-1]' + ;; + '' | ' ' ) + ;; + * ) + echo "$_line" >> "$_CONFIG_FILE" + esac + IFS='' read -r _line || break + done <<< "$(<"$_f")" + } + + pushd "$(dirname "$_kconfig")" > /dev/null || die "Failed pushd to '$TOPDIR'" + + _reader "$_kconfig" + + popd || die +} + +# Call make for config related make target. Will set proper cross compiler and kernel arch name. +# $1: Target arch. +config_make() { + local arch=$1; shift + local config_cross_compiler config_arch + + pushd "$TOPDIR" >/dev/null || die "Not in a valid git repo." + + config_cross_compiler=$(_get_config_cross_compiler "$arch") + config_arch=$(get_kernel_arch "$arch") + + if [[ -z "$config_arch" ]]; then + die "Unsupported arch $arch" + fi + + make ARCH="$config_arch" CROSS_COMPILE="$config_cross_compiler" "$@" + + popd >/dev/null || die "Failed popd" +} + +# Dedup, and filter valid config lines, print the sanitized content sorted by config name. +config_sanitizer() { + # AWK will add an extra column of CONFIG_NAME for sort to work properly + LC_ALL=C + awk ' + /is not set/ { + split($0, val, "#"); + split(val[2], val); + configs[val[1]]=$0 + } + /^CONFIG.*=/ { + split($0, val, "="); + if (val[2] == "n") + configs[val[1]]="# "val[1]" is not set" + else + configs[val[1]]=$0 + } + END { + for (config in configs) { + print config " " configs[config] + } + }' \ + | LC_ALL=C sort -k 1 \ + | cut -d ' ' -f 2- +} + +# Iterate the dot product of all config options +# param: <callback> [<filter>... ] +# [<filter>... ]: Filters config targets to use. eg. kernel-default-*, kernel-minimal-debug +# <callback>: A callback function, see below for <callback> params. +# +# Will generate a matrix, and call <callback> like this: +# <callback> <config-target> [ <dir entry of configs in inherit order> ... ] +# +# eg. +# <callback> "kernel-generic-release" "$TOPDIR/00pending/kernel" "$TOPDIR/20preset/generic" "$TOPDIR/50variant/release" +# <callback> "kernel-generic-debug" "$TOPDIR/00pending/kernel" "$TOPDIR/20preset/generic" "$TOPDIR/50variant/debug" +# ... +# +for_each_config_target () { + local filters callback + local target cur_target i j + + callback=$1; shift + filters=("$@") + + _match () { + [[ ${#filters[@]} -eq 0 ]] && return 0 + + for _f in "${filters[@]}"; do + # shellcheck disable=SC2053 + [[ $1 == $_f ]] && return 0 + done + + return 1 + } + + # The '_' is used to bootstrap the file loop + local -a hierarchy=( _ ) prev_hierarchy target_conf prev_target_conf + + for folder in "${CONFIG_SPECS[@]}"; do + prev_hierarchy=( "${hierarchy[@]}" ) + prev_target_conf=( "${target_conf[@]}" ) + + hierarchy=( ) + target_conf=( ) + + i=0 + for config in "$folder"/*; do + if ! [[ -e "$config/default.config" ]]; then + error "Invalid config folder $config" + return 1 + fi + + j=0 + for target in "${prev_hierarchy[@]}"; do + cur_target=$target-$(basename "$config") + hierarchy+=( "$cur_target" ) + # config files can't have spece in their path since the + # list is split by space, seems no better way + target_conf[$i]="${prev_target_conf[$j]} $config" + i=$(( i + 1 )) + j=$(( j + 1 )) + done + done + done + + i=0 + for target in "${hierarchy[@]}"; do + # split target_conf by space is what we want here + target="${target#_-}" + # shellcheck disable=SC2086 + _match $target && "$callback" "${target#_-}" ${target_conf[$i]} + i=$(( i + 1 )) + done +} + +# Iterate populated config files +# param: <callback> [<filter>... ] +# [<filter>... ]: Filters config targets to use. eg. kernel-default-*, kernel-minimal-debug +# <callback>: A callback function, see below for <callback> params. +# +# Will generate a matrix, and call <callback> like this: +# <callback> <arch> <config-file> [ <backing config files in inherit order>... ] +# +# eg. +# <callback> "x86_64" "kernel-generic-release" \ +# "$TOPDIR/00pending/kernel/default.config" "$TOPDIR/00pending/kernel/x86_64.config" \ +# "$TOPDIR/20preset/generic/default.config" "$TOPDIR/20preset/generic/x86_64.config" \ +# "$TOPDIR/50variant/release/default.config" "$TOPDIR/50variant/release/x86_64.config" +# <callback> "aarch64" "kernel-generic-release" \ +# "$TOPDIR/00pending/kernel/default.config" "$TOPDIR/00pending/kernel/aarch64.config" \ +# "$TOPDIR/20preset/generic/default.config" "$TOPDIR/20preset/generic/aarch64.config" \ +# "$TOPDIR/50variant/release/default.config" "$TOPDIR/50variant/release/aarch64.config" +# ... +# +for_each_config_product () { + local _wrapper_cb=$1; shift + + _wrapper () { + local target=$1; shift + local files=( "$@" ) + + for arch in "${CONFIG_ARCH[@]}"; do + local config_basename="$target.$arch.config" + local config_product="$CONFIG_OUTDIR/$config_basename" + local config_files=( ) + + for _conf in "${files[@]}"; do + config_files+=( "$_conf/default.config" ) + if [[ -e "$_conf/$arch.config" ]]; then + config_files+=( "$_conf/$arch.config" ) + fi + done + + $_wrapper_cb "$arch" "$config_product" "${config_files[@]}" + done + } + + for_each_config_target _wrapper "$@" +} + +# Simply concat the backing config files in order into a single files for each config target +populate_configs () { + _merge_config () { + local target=$1; shift + local config_basename output_config + for arch in "${CONFIG_ARCH[@]}"; do + config_basename="$target.$arch.config" + output_config="$CONFIG_OUTDIR/$config_basename" + + echo "Populating $config_basename from base configs..." + + for conf in "$@"; do + cat "$conf/default.config" + if [[ -e "$conf/$arch.config" ]]; then + cat "$conf/$arch.config" + fi + done | config_sanitizer > "$output_config" + done + } + + for_each_config_target _merge_config "$@" +} + +makedef_configs () { + _makedef_config() { + local target=$1; shift + local populated_config + + for arch in "${CONFIG_ARCH[@]}"; do + populated_config="$CONFIG_OUTDIR/$target.$arch.config" + + # config base name is always in this format: <name>.<arch>.config + echo "Processing $(basename "$populated_config") with make olddefconfig..." + + if ! [ -f "$populated_config" ]; then + error "Config not found: '$populated_config'" + continue + fi + + pushd "$TOPDIR" > /dev/null || exit 1 + + config_make "$arch" KCONFIG_CONFIG="$populated_config" olddefconfig + + popd > /dev/null || return + done + } + + for_each_config_target _makedef_config "$@" +} + +sanity_check_configs () { + # We use LOCALVERSION in Kconfig for variant, save a make flag, but require more sanity check to avoid misuse. + # First ensure all arch have the same value + # Then ensure it's in a acceptable format + _sanity_check_configs() { + local target=$1; shift + local populated_config + local localversion arch_localversion auto_localversion + + for arch in "${CONFIG_ARCH[@]}"; do + populated_config="$CONFIG_OUTDIR/$target.$arch.config" + + # config base name is always in this format: <name>.<arch>.config + echo "Checking $(basename "$populated_config")..." + + if ! [ -f "$populated_config" ]; then + error "Config not populated: '$populated_config'" + error "sanity_check_configs need to be called after the configs are populated" + exit 1 + fi + + auto_localversion=$(sed -ne 's/^CONFIG_LOCALVERSION_AUTO=\(.*\)$/\1/pg' "$populated_config") + if [ -n "$auto_localversion" ] && [ "$auto_localversion" != "n" ]; then + error "CONFIG_LOCALVERSION_AUTO must be unset, but it's set in config target $target" + error "This will break dist build system's release versioning." + exit 1 + fi + + if [ -z "$localversion" ]; then + localversion=$(sed -ne 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/pg' "$populated_config") + if [ 1 -lt $(echo "$localversion" | wc -l) ]; then + error "More than one LOCALVERSION is set for config target '$target'" + exit 1 + fi + else + arch_localversion=$(sed -ne 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/pg' "$populated_config") + if [ "$arch_localversion" != "$localversion" ]; then + error "Unexpected '$arch_localversion' != '$localversion':" + error "This breaks SRPM package naming, LOCALVERSION inconsistent between sub-arches for config target '$target'" + exit 1 + fi + fi + done + + localversion=${localversion#\"} + localversion=${localversion%\"} + + case $localversion in + +debuginfo|+core|+devel|+headers|+modules|+$KDIST ) + error "Unexpected LOCALVERSION '$localversion':" + error "LOCALVERSION is using a reserved keyword, this will cause package dependency breakage." + exit 1 + ;; + +* ) + ;; + '' ) + # Empty value is default and fine + ;; + *) + error "Unexpected LOCALVERSION '$localversion':" + error "LOCALVERSION is not in acceptable format, for dist building system, it need to start with a '+'" + error "to distinguish it from release string, and dist build system also need to manipulate the string based on above fact." + exit 1 + esac + } + + for_each_config_target _sanity_check_configs "$@" +} diff --git a/dist/scripts/lib-version.sh b/dist/scripts/lib-version.sh new file mode 100755 index 000000000000..c13947a97c99 --- /dev/null +++ b/dist/scripts/lib-version.sh @@ -0,0 +1,725 @@ +#!/bin/bash +# +# Version conversion helpers for building a TK kernel. +# +# shellcheck source=./lib.sh +. "$(dirname "$(realpath "$0")")/lib.sh" + +## Standardized kernel package version and uname are composed as follows: +# +# A valid tag: [PREFIX-]<KERNEL_MAJVER>-<KERNEL_RELVER>[.<KERNEL_DIST>] +# uname -r: <KERNEL_MAJVER>-<KERNEL_RELVER>[.<KERNEL_DIST>][+<LOCALVER>] +# RPM NVR: kepnel[-<KERNEL_DIST>][-<LOCALVER>]-<KERNEL_MAJVER>-<KERNEL_RELVER> +# <NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>-<VVVVVVVVVVVVV>-<RRRRRRRRRRRRR> +# +# Some NOTES about why we compose these string in above way: +# - Notice KERNEL_DIST is moved to N part of the RPM NVR, this is how TK3/TK4 release have been doing +# and that is correct because we need to distingush between kernel release streams. And there are +# things will break if we move it out of this part (mostly due to package name change). +# - RPM split the whole package name by '-', then recognize the right most part as R, second right +# most part is V, so KERNEL_MAJVER and KERNEL_RELVER can't contain '-'. +# - LOCALVER is commonly used to present variants of kernel, that is, using same kernel repo commit/version/tag, +# just built with a different config. +# A example is RPM pkg kernel-5.4.119-1 (uname 5.4.119-1) and kernel-debug-5.4.119-1 (uname 5.4.119-1+debug), +# the later one is same kernel built with more debug configs enabled. When kernel-5.4.119-1 run into unkown +# issues, kernel-debug-5.4.119-1 could be installed to do more diagnosis. +# - Notice LOCALVER is moved to "N" part of the RPM NVR, because adding to "V" or "R" part breaks kernel +# package versioning. A suffix, prefix or in-fix of "V" or "R" could cause the package or repo manager +# to make variants override each other, and fails the system unexpectly. For example, an debug kernel +# could be wrongly installed with a normal system wide package update, since the suffix made it had a +# high version number and it shares same Name with vanilla kernel. +# - Some old TK4 tag will have KERNEL_DIST as part of KERNEL_RELVER, we cover that too. +# +## More explanations of each field: +# +### PREFIX: <none>/release/x86/aarch64/oc/.... +# - Could be some well-known string like "release", "x86", ..., could be used to make tags more distinguishable. +# +### KERNEL_MAJVER: <VERSION>.<PATCHLEVEL>.<SUBLEVEL> +# - It's the standdard upstream linux kernel release version, presents in kernel's root Makefile, eg: +# VERSION = 5 +# PATCHLEVEL = 4 +# SUBLEVEL = 203 +# Which stands for kernel 5.4.203 +# +### KERNEL_RELVER: [0.<SNAPSHOT>.][<EXTRAVERSION>.]<REL> +# - If starts with 0, indicates it's a snapshot, unofficial release. Else it must be a tagged release. +# The <SNAPSHOT> string is automatically generated using git commit as versioning base for untagged +# test builds. +# - If EXTRAVERSION is non-empty, it must present here. +# - REL is a custom release string, should be alphanums be splitted by '.'. +# eg. 0011, 0009.12, 0011.prerelease1, ... +# eg. 2207.1.1, 2207.1.2, ... +# +# NOTE: due to historical reason, in KERNEL_RELVER, it could contain '-', but the final generated string that will be used in +# spec file and uname will always be converted to contain '.' only, to comply the RPM NVR naming style, also make things cleaner. +# +### KERNEL_DIST: <none>/tks/tlinux4/stable/stream/... +# Indicates this is a special build kernel, will show up in RPM package name to distinguish different kernel release stream. +# Is configurable through the KDIST variable in dist/Makefile. +# +# NOTE: Due to historical reason, if KDIST is added as first part of KERNEL_RELVER's <REL> string, it will be move to tail. +# To make the KERNEL_RELVER part consistent between RPM name, tag and uname. +# +# Example: +# git describe --tag RPM uname -r +# 5.4.119-1-tlinux4-0007 kernel-tlinux4-5.4.119-1.0007 5.4.119-1.0007.tlinux4 +# 5.4.119-1-tlinux4-0007-2-g884a77bf0ba6 kernel-tlinux4-5.4.119-0.20211115git1135ec008ef3.1.0007 5.4.119-0.20211115git1135ec008ef3.1.0007.tlinux4 +# 5.4.119-1-tlinux4-0007.subrelease kernel-tlinux4-5.4.119-1.0007.subrelease 5.4.119-1.0007.subrelease.tlinux4 +# 5.4.119-1-tlinux4-0007~rc1 kernel-tlinux4-5.4.119-1.0007~rc1 5.4.119-1.0007~rc1.tlinux4 (*) +# +# NOTE: Sometime TK4's release version may go backwards, it's a known issue we have to live with. +# TK4 used tag like 5.4.119-1-tlinux4-0007.prerelease to indicate a release candidate. +# You can check this with `rpmdev-vercmp`: +# $ rpmdev-vercmp 5.4.119-1-tlinux4-0007.prerelease 5.4.119-1-tlinux4-0007 +# $ 5.4.119-1-tlinux4-0007.prerelease > 5.4.119-1-tlinux4-0007 +# This means RPM thinks the release candidate is higher than the real release, to fix that, +# try use tilde symbol to indicate it's a RC. + +## Macros and values: +# Alias of four-part linux kernel version from kernel's Makefile +KVERSION= +KPATCHLEVEL= +KSUBLEVEL= +KEXTRAVERSION= + +# KPREMERGEWINDOW: If we are building a commit in the first merge window +# In the first merge time window, after a formal kernel release, and before rc1 release of next kernel, +# the KPATCHLEVEL will be stuck in lower value, which confuses RPM in many ways. So just bump +# KSUBLEVEL upon build in such case. +# +# For example, after 5.15 release, and before 5.16, upstream will start merging features for 5.16 +# without bumping KPATCHLEVEL, which is stuck in 15. +# As now our build will be newer than 5.15 build, we have to make the version higher than 5.15 +# To avoid conflict with 5.15 stable build like 5.15.Y-Z, we can't bump the Y part or Z part. +# So instead bump to 5.16 and mark it rc0 as 5.16.0-0.rc0. (Just as what Fedora does). +KPREMERGEWINDOW= +# Set to true if this is a rolling build tracks upstream +KROLLING= +# Set to 1 to allow git tag force override uname +KFORCEUNAMER= + +# git snapshot versioning +KGIT_SNAPSHOT= +# Raw info from current git tag +KGIT_TAG_RELEASE_INFO_RAW= +# Set if current commit is tagged with valid release info +KGIT_TAG_RELEASE_INFO= +# Set if a previous commit is found tagged with valid release info +KGIT_LATEST_TAG_RELEASE_INFO= +# Set if current commit is tagged with a valid test tag name +KGIT_TESTBUILD_TAG= +# Release: Start from 1.0, indicate the release version, info embedded in git tag +KGIT_RELEASE_NUM= +KGIT_SUB_RELEASE_NUM= + +### The formal kernel version and release +# Simulate `uname -r` output, which is always "$KVERSION.$KPATCHLEVEL.$KSUBLEVEL$KEXTRAVERSION" +export KERNEL_UNAMER= +# Basically: $KVERSION.$KPATCHLEVEL.$KSUBLEVEL (eg. 5.17.0, 5.16.3) +export KERNEL_MAJVER= +# Release version (eg. 1, 0.rc0, 0.20220329gita11bf64a6e8f), see comments at the beginning of this file +export KERNEL_RELVER= +# Kernel distro variable (eg. tks, tlinux4, <none>), with any leading "." or "-" removed, see comments at the beginning of this file +export KERNEL_DIST= +# Only used for make-release sub command, get latest release tag of current commit +export KERNEL_PREV_RELREASE_TAG= + +# Set if it's a tagged release +export KTAGRELEASE= +# KTESTRELEASE: If we are building based on a test tag +KTESTRELEASE= +# KSNAPRELEASE: If we are building a snapshot-release +KSNAPRELEASE= +# KRCRELEASE: If we are building a rc-release +KRCRELEASE= + +_is_num() { + [ "$1" -eq "$1" ] &>/dev/null +} + +# Get the tag of a git ref, if the git ref itself is a valid tag, just return itself +# else, search latest tag before this git ref. +_get_git_tag_of() { + local gitref=$1; shift + local tag tags + + tags=$(git "$@" tag --points-at "$gitref") + # If multiple tags presents, used the one specified by user + for tag in $tags; do + if [[ "$tag" == "$gitref" ]]; then + echo "$tag" + return + fi + done + + # If HEAD is tagged with multiple tags and user is not asking to use one of them, + # use the first one found matching release info. + for tag in $tags; do + if _get_rel_info_from_tag "$tag" > /dev/null; then + echo "$tag" + return + fi + done + + # Else just return the first tag + for tag in $tags; do + echo "$tag" + return + done +} + +# git-describe, but customized for kernel, and prefer specified tag +_get_git_describe_of() { + local gitref=$1; shift + local gitdesc + + gitdesc=$(_get_git_tag_of "$gitref" "$@") + + if [[ -n "$gitdesc" ]]; then + echo "$gitdesc" + else + git "$@" describe --tags --abbrev=12 "$gitref" + fi +} + +# Get the tag of a git ref, if the git ref itself is a valid tag, just return itself +# else, search latest tag before this git ref. +# Return 1 if git ref is tagged, +_get_last_git_tag_of() { + local gitref=$1; shift + local gittag + + gittag=$(_get_git_tag_of "$gitref" "$@") + + if [[ -z "$gittag" ]]; then + git "$@" describe --tags --abbrev=0 "$gitref" + return 0 + else + echo "$gittag" + return 1 + fi +} + +# $1: git tag or git commit, defaults to HEAD +# $2: kernel source tree, should be a git repo +# Parse fondunmental kernel versioning info from Makefiles. +get_kernel_code_version() { + local gitref=${1:-HEAD} + local repo=${2:-$TOPDIR} + local makefile + + makefile=$(git -C "$repo" show "$gitref:Makefile" 2>/dev/null || cat "$repo/Makefile") + + if [ ! "$makefile" ]; then + die "Error: Failed to read Makefile" + return 1 + fi + + KVERSION=$(sed -nE '/^VERSION\s*:?=\s*/{s///;p;q}' <<< "$makefile") + KPATCHLEVEL=$(sed -nE '/^PATCHLEVEL\s*:?=\s*/{s///;p;q}' <<< "$makefile") + KSUBLEVEL=$(sed -nE '/^SUBLEVEL\s*:?=\s*/{s///;p;q}' <<< "$makefile") + KEXTRAVERSION=$(sed -nE '/^EXTRAVERSION\s*:?=\s*/{s///;p;q}' <<< "$makefile") + # Replace '-' in KEXTRAVERSION + KEXTRAVERSION=${KEXTRAVERSION//-/.} + KEXTRAVERSION=${KEXTRAVERSION#.} + + if [[ -z "$KVERSION" ]] || [[ -z "$KPATCHLEVEL" ]] || [[ -z "$KSUBLEVEL" ]]; then + die "Invalid VERSION, PATCHLEVEL or SUBLEVEL in Makefile" + return 1 + fi + + if [[ "$KEXTRAVERSION" == "rc"* ]] || [[ $KEXTRAVERSION == "-rc"* ]]; then + KRCRELEASE=1 + fi + + # Read KDIST using gitref for historical accurate value. + KERNEL_DIST=$(get_dist_makefile_var KDIST "$gitref" "$repo") + + return 0 +} + +_first_merge_window_detection() { + local gitref=${1:-HEAD} + local repo=${2:-$TOPDIR} + local upstream_base upstream_lasttag upstream_gitdesc + local tagged + + # If KSUBLEVEL or KEXTRAVERSION is set, it's not in the first merge window of a major release + [[ $KSUBLEVEL -eq 0 ]] || return 1 + [[ -n $KEXTRAVERSION ]] && return 1 + [[ $upstream ]] || upstream="@{u}" + + # Get latest merge base if forked from upstream to merge window detection + # merge window is an upstream-only thing + if upstream_base=$(git -C "$repo" merge-base "$gitref" "$upstream" 2>/dev/null); then + upstream_gitdesc=$(git -C "$repo" describe --tags --abbrev=12 "$upstream_base" 2>/dev/null) + upstream_lasttag=$(_get_last_git_tag_of "$upstream_base" -C "$repo") + tagged=$? + + if \ + # If last tag is an tagged upstream release + [[ $upstream_lasttag == v$KVERSION.$KPATCHLEVEL ]] && \ + # And if merge base is ahead of the taggewd release + [[ "$tagged" -eq 1 ]]; then + # Then it's in first merge window + return 0 + fi + else + warn "Not tracking a valid upstream, merge window detecting is disabled ." + fi + + return 1 +} + +_do_strip_kernel_majver() { + local rel + + if [[ $1 == *"$KVERSION.$KPATCHLEVEL.$KSUBLEVEL"* ]]; then + rel=${1#*"$KVERSION.$KPATCHLEVEL.$KSUBLEVEL"} + elif [[ "$KSUBLEVEL" = "0" ]] && [[ $1 == *"$KVERSION.$KPATCHLEVEL"* ]]; then + rel=${1#*"$KVERSION.$KPATCHLEVEL"} + elif [[ $KPREMERGEWINDOW ]] && [[ $1 == *"$KVERSION.$((KPATCHLEVEL + 1)).$KSUBLEVEL"* ]]; then + rel=${1#*"$KVERSION.$((KPATCHLEVEL + 1)).$KSUBLEVEL"} + else + return 1 + fi + + echo "$rel" +} + +# Check and strip the leading VERSION.PATCHLEVEL.SUBLEVEL of a tag, +# (eg. 5.18.19) and potential prefixes. If the tag doesn't match its corresponding, +# kernel version, return 1. +_check_strip_kernel_majver() { + local tag=$1 rel + local makefile + local _kversion _kpatchlevel _ksublevel + + if rel=$(_do_strip_kernel_majver "$tag"); then + echo "$rel" + return 0 + fi + + # Update VERSION/PATCHLEVEL/SUBLEVEL using target Makefile, because y upstream + # changes them very frequently and may out of sync with previous tag. + if makefile=$(git show "$tag:Makefile" 2>/dev/null); then + _kversion=$(sed -nE '/^VERSION\s*:?=\s*/{s///;p;q}' <<< "$makefile") + _kpatchlevel=$(sed -nE '/^PATCHLEVEL\s*:?=\s*/{s///;p;q}' <<< "$makefile") + _ksublevel=$(sed -nE '/^SUBLEVEL\s*:?=\s*/{s///;p;q}' <<< "$makefile") + fi + + if rel=$(KVERSION=$_kversion KPATCHLEVEL=$_kpatchlevel KSUBLEVEL=$_ksublevel _do_strip_kernel_majver "$tag"); then + echo "$rel" + return 0 + fi + + return 1 +} + +# Get release info from git tag +_get_rel_info_from_tag() { + local tag=$1 rel + + if ! rel=$(_check_strip_kernel_majver "$@"); then + return 1 + fi + rel=${rel//-/.} + rel=${rel#.} + + # If KERNEL_DIST is added as prefix/semi-prefix/suffix, remove it from rel + if [[ $KERNEL_DIST ]]; then + case $rel in + $KERNEL_DIST.*) + rel=${rel#$KERNEL_DIST.} + ;; + $KEXTRAVERSION.$KERNEL_DIST.*) + rel=${rel#$KEXTRAVERSION.$KERNEL_DIST.} + rel=$KEXTRAVERSION.$rel + ;; + *.$KERNEL_DIST) + rel=${rel%.$KERNEL_DIST} + ;; + esac + fi + + # If KEXTRAVERSION is added, remove it + if [[ -z "$KEXTRAVERSION" ]]; then + # If previous KEXTRAVERSION is not empty but now empty, + # still consider it a valid release tag since release candidate mark may get dropped. + # But this really should look at the Makefile corresponding to that tag commit + : + elif _is_num "$KEXTRAVERSION"; then + case $rel in + # Extra version is release number, remove it and add later + $KEXTRAVERSION | "$KEXTRAVERSION."* ) + rel=${rel#$KEXTRAVERSION} + rel=${rel#.} + ;; + * ) return 1; ;; + esac + else + # Remove RC liked tag, append them as suffix later. + case $rel in + # Plain version tag, eg. 5.17-rc3 + $KEXTRAVERSION ) + rel="" + ;; + # Plain version tag plus suffix, eg. 5.17-rc3.* + "$KEXTRAVERSION."* ) + rel=${rel#$KEXTRAVERSION.} + ;; + # Already appended as , eg 5.17-1.rc3* + *".$KEXTRAVERSION" ) + rel=${rel%.$KEXTRAVERSION} + ;; + * ) return 1; ;; + esac + fi + + echo "$rel" +} + +_search_for_release_tag() { + local tag=$1 + local repo=$2 + + # Look back for 5 commits for a valid tag + local limit=5 + while :; do + limit=$((limit - 1)) + + if [[ $limit -le 0 ]] || ! tag=$(git -C "$repo" describe --tags --abbrev=0 "$tag" 2>/dev/null); then + warn "No valid tag found that are eligible for versioning, please fix your repo and tag it properly." + return 1 + fi + + if _get_rel_info_from_tag "$tag" > /dev/null; then + echo "$tag" + return 0 + fi + done +} + +# Get release info from git tag +# +# We try to parse and verify RPM NVR (Name, Version, Release) info's 'VR' part using git tag or commit info +# N: is always kernel +# V: is kernel's major release version (eg. 5.18, 5.18.0, 5.17.2) +# R: is a tokens seperated with '.' (eg 1[.KDIST], 2[.KDIST], 2.1[.KDIST], 0.rc1[.KDIST]) +# could also be 0.YYYYMMDDgit<commit> for snapshot release. +# But ideally all git tag are for formal release so snapshot tag shouldn't appear in repo. +# +# With a tag that contains valid VR info it's considered a tag release, else it's a snapshot release. +# +# $1: git tag or git commit, defaults to HEAD +# $2: kernel source tree, should be a git repo +# $3: optional upstream branch, remote branch name current HEAD is tracking +get_kernel_git_version() +{ + local gitref=${1:-HEAD} + local repo=${2:-$TOPDIR} + local upstream=${3:-"@{u}"} + + local last_tag release_tag release_info git_desc + local tag tagged + + # Get current commit's snapshot name, format: YYYYMMDDgit<commit>, + # or YYYYMMDD (Only if repo is missing, eg. running this script with tarball) + if ! KGIT_SNAPSHOT=$(git rev-parse --short=12 "$gitref" 2>/dev/null); then + warn "Invalid git reference '$gitref' or git repo is unavailable, versioning it as a dated snapshot" + KGIT_SNAPSHOT=$(date +"%Y%m%d") + KSNAPRELEASE=1 + return + fi + KGIT_SNAPSHOT=$(date +"%Y%m%d")git$KGIT_SNAPSHOT + + # Check if first merge-window, and set KPREMERGEWINDOW, see comment above about KPREMERGEWINDOW + if [[ $KROLLING ]] && _first_merge_window_detection "$@"; then + KPREMERGEWINDOW=1 + fi + + # Get latest git tag of this commit + last_tag=$(_get_last_git_tag_of "$gitref" -C "$repo") + tagged=$? + # Check and get latest release git tag, in case current tag is a test tag + # (eg. current tag is fix_xxxx, rebase_test_xxxx, or user tagged for fun, and previous release tag is 5.18.0-1[.KDIST]) + if _get_rel_info_from_tag "$last_tag" > /dev/null; then + # Latest tag is a release tag, just use it + release_tag=$last_tag + else + warn "Latest git tag '$last_tag' is not a release tag, it does't match Makefile version '$KVERSION.$KPATCHLEVEL.$KSUBLEVEL-$KEXTRAVERSION'" + if release_tag=$(_search_for_release_tag "$last_tag" "$repo"); then + warn "Found release tag '$release_tag'." + fi + fi + + if [[ "$release_tag" ]]; then + git_desc=$(_get_git_describe_of "$gitref" -C "$repo") + release_info=$(_get_rel_info_from_tag "$release_tag") + release_info_raw=$(_check_strip_kernel_majver "$release_tag") + + if ! [[ $release_info ]]; then + warn "No extra release info in release tag, might be a upstream tag." \ + "Please make a release commit with 'make dist-release' for a formal release." + release_info=0 + KGIT_RELEASE_NUM=0 + KGIT_SUB_RELEASE_NUM=0 + elif [[ $release_info == 0.* ]]; then + KGIT_RELEASE_NUM=${release_info##*.} + KGIT_SUB_RELEASE_NUM=${release_info%$KGIT_RELEASE_NUM} + KGIT_SUB_RELEASE_NUM=${KGIT_SUB_RELEASE_NUM%.} + KGIT_SUB_RELEASE_NUM=${KGIT_SUB_RELEASE_NUM##*.} + else + KGIT_RELEASE_NUM=${release_info%%.*} + KGIT_SUB_RELEASE_NUM=${release_info#$KGIT_RELEASE_NUM} + KGIT_SUB_RELEASE_NUM=${KGIT_SUB_RELEASE_NUM#.} + KGIT_SUB_RELEASE_NUM=${KGIT_SUB_RELEASE_NUM%%.*} + fi + + KERNEL_PREV_RELREASE_TAG=$release_tag + KGIT_LATEST_TAG_RELEASE_INFO=$release_info + + if [[ "$tagged" -eq 1 ]] && [[ "$release_tag" == "$last_tag" ]]; then + if [[ $release_info ]]; then + # This commit is tagged and it's a valid release tag, juse use it + if [[ "$KGIT_RELEASE_NUM" != '0' ]]; then + KGIT_TAG_RELEASE_INFO=$release_info + KGIT_TAG_RELEASE_INFO_RAW=$release_info_raw + KTAGRELEASE=$release_tag + else + warn "'$release_tag' is not a formal release tag, using snapshot versioning." + KGIT_SNAPSHOT=1 + fi + else + # Tagged but no release info from current tag, could be upstream style tag + KGIT_TAG_RELEASE_INFO=1 + # It's not a valid tag + KTAGRELEASE= + fi + + # If current tag is release tag, previous release tag should be another one + KERNEL_PREV_RELREASE_TAG=$(_search_for_release_tag ${release_tag}^ "$repo") + + elif [[ "$last_tag" == "$git_desc" ]]; then + # It's tagged, but the tag is not a release tag + # A dumb assumption here, if it's not in *.* format (v5.4, 4.12.0, etc...) it's a test tag + if [[ $last_tag != v*.* ]] && [[ $last_tag != *.*.* ]]; then + warn "'$last_tag' looks like a test tag, using it as versioning suffix." + warn "Please tag properly for release build, now versioning it as a test build." + KGIT_TESTBUILD_TAG=test.$last_tag + KGIT_TESTBUILD_TAG=${KGIT_TESTBUILD_TAG//-/_} + KTESTRELEASE=1 + else + warn "'$last_tag' looks like a kernel release tag but out-of-sync with Makefile" \ + "ignoring it and versioning as snapshot." + warn "Please tag properly for release build." + KSNAPRELEASE=1 + fi + else + # Just a simple untagged commit, nothing special + KSNAPRELEASE=1 + fi + else + # No tag or no repo info available, use snapshot version + KSNAPRELEASE=1 + KGIT_RELEASE_NUM=0 + KGIT_SUB_RELEASE_NUM=0 + fi + + # Fix release numbers, if it's not a number + if ! _is_num "$KGIT_RELEASE_NUM"; then + warn "Unrecognizable release number: $KGIT_RELEASE_NUM, resetting to 0" + KGIT_RELEASE_NUM=0 + fi + if ! _is_num "$KGIT_SUB_RELEASE_NUM"; then + KGIT_SUB_RELEASE_NUM=0 + fi +} + +_prepare_kernel_ver() { + if ! get_kernel_code_version "$@"; then + return 1 + fi + + if ! get_kernel_git_version "$@"; then + return 1 + fi + + # Disable PRE-merge window detection for tagged commit, + # We want to following user provided tag strictly + if [[ $KPREMERGEWINDOW ]]; then + if [[ ! $KTAGRELEASE ]]; then + KPATCHLEVEL=$(( KPATCHLEVEL + 1 )) + KEXTRAVERSION="rc0" + fi + fi +} + +### Generate a RPM friendly version based on kernel tag and commit info +# +# Examples: +# (Ignoring pre-release window detection, see comments about KPREMERGEWINDOW) +# (Also assume Makefile's kernel version info is all correct) +# +# git describe --tags Generated version (uname -r) Corresponding RPM version +# v5.18 5.18.0-0[.KDIST] kernel[-kdist]-5.18.0-0.rc1 +# v5.18.0 5.18.0-0[.KDIST] kernel[-kdist]-5.18.0-0.rc1 +# v5.18.1 5.18.1-0[.KDIST] kernel[-kdist]-5.18.1-0.rc1 +# v5.18.0-1-gac28df2 5.18.0-0.20220428gitac28df2cd5d0[.KDIST] kernel[-kdist]-5.18.0-0.20220428gitac28df2cd5d0 * +# v5.18.0-3-g16cadc5 5.18.0-0.20220428git16cadc58d50c[.KDIST] kernel[-kdist]-5.18.0-0.20220428git16cadc58d50c * +# v5.18-rc1 5.18.0-0.rc1[.KDIST] kernel[-kdist]-5.18.0-0.rc1 +# v5.18-rc1-1-g380a504 5.18.0-0.20220428git380a504e42d9.rc1[.KDIST] kernel[-kdist]-5.18.0-0.20220428git380a504e42d9.rc1 +# 5.18.0-1[.KDIST] 5.18.0-1[.KDIST] kernel[-kdist]-5.18.0-1 +# 5.18.12-3[.KDIST] 5.18.12-3[.KDIST] kernel[-kdist]-5.18.12-3 +# 5.18.12-3[.KDIST]-5-g9318b03 5.18.12-0.20220428git9318b0349d5c.3[.KDIST] kernel[-kdist]-5.18.12-0.20220428git9318b0349d5c.3 +# 5.18.0-12.rc1[.KDIST] 5.18.0-12.rc1[.KDIST] kernel[-kdist]-5.18.0-12.rc1 +# 5.18.0-12[.KDIST]-2-g551f9cd 5.18.0-0.20220428git551f9cd79ece.12[.KDIST] kernel[-kdist]-5.18.0-0.20220428git551f9cd79ece.12 +# x86-5.4.119-19-0010.prerelease 5.4.119-19-0010.prerelease kernel[-kdist]-5.4.119-19-0010.prerelease ** +# +# * NOTE: random commit snapshot can't be versioning in non-decreasing since one can always amend/rebase, so only the date stamp matters now. +# +# As you may have noticed, release always start with '0' unless a git tag have release >= 1, +# The tag should be generated by other commands that comes later in this script. +prepare_kernel_ver() { + local gitref=$1 localversion=$2 + local krelease + + case $localversion in + +* | '' ) localversion=${localversion#+} ;; + *) die "Unexpected LOCALVERSION '$localversion', run dist-check-configs for more info." + esac + + _prepare_kernel_ver "$gitref" + if [[ $KSNAPRELEASE ]]; then + # For snpashot version, it should start with 0. and + # KEXTRAVERSION will be appended at the tail of git info. + krelease=0.$KGIT_SNAPSHOT + [[ ${KEXTRAVERSION:-0} != "0" ]] && krelease=$krelease.$KEXTRAVERSION + # Release numbers will be appended too if available as a version hint for users. + [[ ${KGIT_RELEASE_NUM:-0} != "0" ]] && krelease=$krelease.$KGIT_RELEASE_NUM + [[ ${KGIT_SUB_RELEASE_NUM:-0} != "0" ]] && krelease=$krelease.$KGIT_SUB_RELEASE_NUM + elif [[ $KTESTRELEASE ]]; then + # For test tag, use the most recent release tag we can find and + # append the test suffix. + krelease=0 + [[ ${KEXTRAVERSION:-0} != "0" ]] && krelease=$krelease.$KEXTRAVERSION + [[ $KGIT_LATEST_TAG_RELEASE_INFO ]] && krelease=$krelease.$KGIT_LATEST_TAG_RELEASE_INFO.$KGIT_TESTBUILD_TAG + else + if [[ $KTAGRELEASE ]]; then + # If the git tag matches all release info, respect it. + krelease=$KGIT_TAG_RELEASE_INFO + else + # Upstream or unknown, set release to start with "0." + # so it can be updated easily later. + # And if it's a rc release, use "0.0" to ensure it have + # lower priority. + if [[ "$KRCRELEASE" ]]; then + krelease=0.0 + else + krelease=0.1 + fi + fi + + # If KEXTRAVERSION is not number it will break the release syntax + # if added as prefix, add as suffix in such case + if [[ $KEXTRAVERSION ]]; then + if _is_num "${KEXTRAVERSION%%.*}"; then + krelease="$KEXTRAVERSION.$krelease" + else + krelease="$krelease.$KEXTRAVERSION" + fi + fi + fi + + KERNEL_NAME="kernel${KDIST:+-$KDIST}" + KERNEL_MAJVER="$KVERSION.$KPATCHLEVEL.$KSUBLEVEL" + KERNEL_RELVER="$krelease" + KERNEL_UNAMER="$KERNEL_MAJVER-$KERNEL_RELVER${KDIST:+.$KDIST}" + + if [[ $KFORCEUNAMER ]] && [[ $KGIT_TAG_RELEASE_INFO_RAW ]]; then + KERNEL_UNAMER="$KERNEL_MAJVER$KGIT_TAG_RELEASE_INFO_RAW" + fi + + if [[ $localversion ]]; then + KERNEL_NAME="$KERNEL_NAME-$localversion" + KERNEL_UNAMER="$KERNEL_UNAMER+$localversion" + fi +} + +### Generate formal release version based on kernel tag and commit info +# +# Examples: +# (Ignoring pre-release window detection, see comments about KPREMERGEWINDOW) +# (Also assume Makefile's kernel version info is all correct) +# +# git describe --tags Generated Version (uname -r) Corresponding RPM version +# v5.18 5.18.0-1[.KDIST] kernel[-kdist]-5.18.0-1 +# v5.18.0 5.18.0-1[.KDIST] kernel[-kdist]-5.18.0-1 +# v5.18.1 5.18.1-1[.KDIST] kernel[-kdist]-5.18.1-1 +# v5.18.0-1-gac28df2 5.18.0-1[.KDIST] kernel[-kdist]-5.18.0-1 +# v5.18.0-3-g16cadc5 5.18.0-1[.KDIST] kernel[-kdist]-5.18.0-1 +# v5.18-rc1 5.18.0-1.rc1[.KDIST] kernel[-kdist]-5.18.0-1.rc1 +# v5.18-rc1-1-g380a504 5.18.0-1.rc1[.KDIST] kernel[-kdist]-5.18.0-1.rc1 +# 5.18.0-1[.KDIST] 5.18.0-2[.KDIST] kernel[-kdist]-5.18.0-2 +# 5.18.12-3[.KDIST] 5.18.12-4[.KDIST] kernel[-kdist]-5.18.12-4 +# 5.18.12-3[.KDIST]-5-g9318b03 5.18.12-4[.KDIST] kernel[-kdist]-5.18.12-4 +# 5.18.0-12.rc1[.KDIST] 5.18.0-12.rc1[.KDIST] kernel[-kdist]-5.18.0-13.rc1 +# 5.18.0-12[.KDIST]-2-g551f9cd 5.18.0-0.20220428git551f9cd79ece.12[.KDIST] kernel[-kdist]-5.18.0-13 +# x86-5.4.119-19-0010.prerelease 5.4.119-20[.KDIST] kernel[-kdist]-5.4.119-20[.KDIST] +# +# * NOTE: random commit snapshot can't be versioning in non-decreasing since one can always amend/rebase, so only the date stamp matters now. +# ** NOTE: Yes, this script is compatible with TK4 +# +# As you may have noticed, release always start with '0' unless a git tag have release >= 1, +# The tag should be generated by other commands that comes later in this script. +prepare_next_kernel_ver() { + _prepare_kernel_ver "$@" + + if [[ $KPREMERGEWINDOW ]]; then + warn "Upstream is in merge window, forcing a formal release is not recommanded." + fi + + # TK4 left-pads the release number with 0 + KGIT_RELEASE_NUM=$(echo "$KGIT_RELEASE_NUM" | sed 's/^0*//') + krelease=$((KGIT_RELEASE_NUM + 1)) + + if [[ $KEXTRAVERSION ]]; then + if _is_num "${KEXTRAVERSION%%.*}"; then + krelease="$KEXTRAVERSION.$krelease" + else + krelease="$krelease.$KEXTRAVERSION" + fi + fi + + KERNEL_MAJVER="$KVERSION.$KPATCHLEVEL.$KSUBLEVEL" + KERNEL_RELVER="$krelease" + KERNEL_UNAMER="$KERNEL_MAJVER-$KERNEL_RELVER${KDIST:+.$KDIST}" +} + +# Get next formal kernel version based on previous git tag +# Same as prepare_next_kernel_ver, but increase sub version instead. +# eg. instead of 5.18.12-3[.KDIST] -> 5.18.12-4[.KDIST], this generates 5.18.12-3[.KDIST] -> 5.18.12-3.1[.KDIST] +prepare_next_sub_kernel_ver() { + _prepare_kernel_ver "$@" + + if [[ $KPREMERGEWINDOW ]]; then + warn "Upstream is in merge window, forcing a formal release is not recommanded." + fi + + KGIT_RELEASE_NUM=$(echo "$KGIT_RELEASE_NUM" | sed 's/^0*//') + krelease=$KGIT_RELEASE_NUM + krelease=$krelease.$((KGIT_SUB_RELEASE_NUM + 1)) + + if [[ $KEXTRAVERSION ]]; then + if _is_num "${KEXTRAVERSION%%.*}"; then + krelease="$KEXTRAVERSION.$krelease" + else + krelease="$krelease.$KEXTRAVERSION" + fi + fi + + KERNEL_MAJVER="$KVERSION.$KPATCHLEVEL.$KSUBLEVEL" + KERNEL_RELVER="$krelease" + KERNEL_UNAMER="$KERNEL_MAJVER-$KERNEL_RELVER${KDIST:+.$KDIST}" +} diff --git a/dist/scripts/lib.sh b/dist/scripts/lib.sh new file mode 100755 index 000000000000..f2afc007d173 --- /dev/null +++ b/dist/scripts/lib.sh @@ -0,0 +1,182 @@ +#!/bin/bash +# +# Shell helpers +# + +RED="\033[0;31m" +GREEN="\033[0;32m" +YELLOW="\033[1;33m" +NC="\033[0m" + +echo_green() { + echo -en "$GREEN" + echo "$@" + echo -en "$NC" +} + +echo_yellow() { + echo -en "$YELLOW" + echo "$@" + echo -en "$NC" +} + +echo_red() { + echo -en "$RED" + echo "$@" + echo -en "$NC" +} + +info() { + echo_green -n "info: " >&2 + echo "$@" +} + +warn() { + echo_yellow -n "warn: " >&2 + echo "$@" >&2 +} + +error() { + echo_red -n "error: " >&2 + echo "$@" >&2 +} + +die() { + echo_red -n "fatal: " >&2 + echo "$@" >&2 + exit 1 +} + +# $1: relative dir path +# $2: optional git ref, if not set current worktree is used +# $3: optional git repo +ls_repo_dir() { + local _path=$1 + local _gitref=$2 + local _repo=${3:-$TOPDIR} + local _gitshow + + # If git reference is set and git repo is valid, try use the versioned Makefile + if [[ "$_gitref" ]]; then + if _gitshow=$(git -C "$_repo" show "$_gitref:$_path" 2>/dev/null); then + echo "$_gitshow" | tail -n +3 + return 0 + fi + warn "Failed to ls '$_path' from git reference '$_gitref', using current worktree as build source." + fi + + ls -1ap "$_repo/$_path/" +} + +# $1: relative file path +# $2: optional git ref, if not set current worktree is used +# $3: optional git repo +cat_repo_file() { + local _path=$1 + local _gitref=$2 + local _repo=${3:-$TOPDIR} + + # If git reference is set and git repo is valid, try use the versioned Makefile + if [[ "$_gitref" ]]; then + if git -C "$_repo" show "$_gitref:$_path" 2>/dev/null; then + return 0 + fi + warn "Failed to retrive '$_path' from git reference '$_gitref', using current worktree as build source." + fi + + cat "$_repo"/"$1" +} + +# $1: keyword +# $2: optional git ref, if not set current Makefile is used +# $3: optional git repo +get_dist_makefile_var() { + local _sedexp="/^$1\s*[:?]?=\s*(.*)/{s/^\s*^$1\s*[:?]?=\s*//;h};\${x;p}" + local _gitref=$2 + local _repo=${3:-$TOPDIR} + local _val + + _val=$(cat_repo_file "dist/Makefile" "$_gitref" "$_repo" | sed -nE -e "$_sedexp") + case $_val in + *\$* ) + die "Can't parse Makefile variable '$1', it references to other variables." + ;; + esac + + echo "$_val" +} + +[ "$TOPDIR" ] || TOPDIR=$(git rev-parse --show-toplevel 2>/dev/null) +[ "$TOPDIR" ] || TOPDIR="$(realpath "$(dirname "$(realpath "$0")")/../..")" +[ "$VENDOR" ] || VENDOR=$(get_dist_makefile_var VENDOR) +[ "$DISTPATH" ] || DISTPATH=$(get_dist_makefile_var DISTPATH) +[ "$DISTDIR" ] || DISTDIR=$TOPDIR/$DISTPATH +[ "$SOURCEDIR" ] || SOURCEDIR=$DISTDIR/rpm/SOURCES +[ "$SPEC_ARCH" ] || SPEC_ARCH=$(get_dist_makefile_var SPEC_ARCH) +# If KDIST is not set (or it's set to "-" wnich is illegal value for KDIST), read from dist Makefile. +[ "${KDIST--}" == - ] && KDIST=$(get_dist_makefile_var KDIST) + +if ! [ -s "$TOPDIR/Makefile" ]; then + echo "Dist tools can only be run within a valid Linux Kernel git workspace." >&2 + exit 1 +fi + +if [ -z "$VENDOR" ] || ! [ -s "$DISTDIR/Makefile" ]; then + echo "Dist tools can't work without properly configured dist file." >&2 + exit 1 +fi + +# Just use uname to get native arch +get_native_arch () { + uname -m +} + +# Convert any arch name into linux kernel arch name +# +# There is an inconsistence between Linux kernel's arch naming and +# RPM arch naming. eg. Linux kernel uses arm64 instead of aarch64 +# used by RPM, i686 vs x86, amd64 vs x86_64. Most are just same things +# with different name due to historical reasons. +get_kernel_arch () { + case $1 in + riscv64 ) + echo "riscv" + ;; + loongarch64 ) + echo "loongarch64" + ;; + arm64 | aarch64 ) + echo "arm64" + ;; + i386 | i686 | x86 ) + echo "x86" + ;; + amd64 | x86_64 ) + echo "x86_64" + ;; + esac +} + +# Convert any arch name into linux kernel src arch name +# +# Similiar to get_kernel_arch but return the corresponding +# source code base sub path in arch/ +get_kernel_src_arch () { + case $1 in + loongarch64 ) + echo "loongarch" + ;; + riscv64 ) + echo "riscv" + ;; + arm64 | aarch64 ) + echo "arm64" + ;; + i386 | i686 | x86 | amd64 | x86_64 ) + echo "x86" + ;; + esac +} + +type git >/dev/null || die 'git is required' +type make >/dev/null || die 'make is required' diff --git a/dist/scripts/ls-config-files.sh b/dist/scripts/ls-config-files.sh new file mode 100755 index 000000000000..09d76ffee44e --- /dev/null +++ b/dist/scripts/ls-config-files.sh @@ -0,0 +1,17 @@ +#!/bin/bash --norc +# +# List files to be generated for kernel config styles +# +# Params: +# $@: [ <filter>... ] When non-empty, only show kernel configs style matching <filter> + +# shellcheck source=./lib-config.sh +. "$(dirname "$(realpath "$0")")/lib-config.sh" + +_echo_file() { + for arch in "${CONFIG_ARCH[@]}"; do + echo "$CONFIG_OUTDIR/$1.$arch.config" + done +} + +for_each_config_target _echo_file "$@" diff --git a/dist/scripts/ls-config-targets.sh b/dist/scripts/ls-config-targets.sh new file mode 100755 index 000000000000..00d35c18f217 --- /dev/null +++ b/dist/scripts/ls-config-targets.sh @@ -0,0 +1,15 @@ +#!/bin/bash --norc +# +# List available kernel config styles +# +# Params: +# $@: [ <filter>... ] When non-empty, only show kernel configs style matching <filter> + +# shellcheck source=./lib-config.sh +. "$(dirname "$(realpath "$0")")/lib-config.sh" + +_echo() { + echo "$1" 2>/dev/null || exit 0 +} + +for_each_config_target _echo "$@" diff --git a/dist/scripts/make-release.sh b/dist/scripts/make-release.sh new file mode 100755 index 000000000000..0a3be15c4d5f --- /dev/null +++ b/dist/scripts/make-release.sh @@ -0,0 +1,131 @@ +#!/bin/bash --norc +# +# Print out the tkernel version based on git commit and work tree. +# +# shellcheck source=./lib-version.sh +. "$(dirname "$(realpath "$0")")/lib-version.sh" + +COMMIT=HEAD +case $1 in + --sub-release ) + prepare_next_sub_kernel_ver "$COMMIT" "$TOPDIR" || die "Failed preparing next sub release version info" + ;; + --maj-release) + prepare_next_kernel_ver "$COMMIT" "$TOPDIR" || die "Failed preparing next release version info" + ;; + *) + die "Invalid param $1, usage $0 {--maj-release|--sub-release}" + ;; +esac +AUTHOR_NAME=$(git config user.name) || die "Failed getting author name info from git config" +AUTHOR_MAIL=$(git config user.email) || die "Failed getting author email info from git config" +GITLOG=$(git -C "$TOPDIR" log "$KERNEL_PREV_RELREASE_TAG..$COMMIT" --pretty=oneline) || die "Failed getting changelog from git log" + +if [[ "$KTAGRELEASE" ]]; then + warn "You are generating changelog from a tagged release commit, however changelog update" + warn "should be done before tagging a release, please be careful with what you are doing or fix your workflow." + prepare_kernel_ver "$COMMIT" +fi + +if [[ -z "$GITLOG" ]]; then + error "No change found since last tag, using dummy changelog." + GITLOG="- Accumulated bug fix and improvements." +fi + +AUTHOR="$AUTHOR_NAME <$AUTHOR_MAIL>" +RELEASE_VERSION="$KERNEL_MAJVER-$KERNEL_RELVER" +TAG_VERSION="$KERNEL_UNAMER" +CHANGELOG_HDR="* $(date +"%a %b %e %Y") $AUTHOR - $RELEASE_VERSION" +CHANGELOG="$(echo "$GITLOG" | sed -E "s/^\S+/-/g")" + +print_preview() { + cat << EOF +Please review following info: +Tag: ${TAG_VERSION:-<skipped>} +Release Version: $RELEASE_VERSION +Release Author: $AUTHOR +Changelog: +$CHANGELOG_HDR +$CHANGELOG +EOF +} + +print_info() { + cat << EOF +Please review following info: +!!! DO NOT CHANGE THE FILE FORMAT !!! + +// You can set "Tag:" to empty to skip tagging. +// but it's strongly recommended to tag after changlog update, to make versioning more consistent. +Tag: $TAG_VERSION +Release Version: $RELEASE_VERSION +Release Author: $AUTHOR +Changelog: +* $(date +"%a %b %e %Y") <Author> - <Release Version>" +$CHANGELOG +EOF +} + +dump_info() { + print_info > "$DISTDIR/.release.stash" +} + +update_info() { + ${EDITOR:-vi} "$DISTDIR/.release.stash" >/dev/tty + [[ $? -eq 0 ]] || die "Failed to call editor to edit the release info" +} + +parse_info() { + TAG_VERSION=$(sed -E -ne "s/^Tag:\s*(.*)/\1/p" "$DISTDIR/.release.stash") + RELEASE_VERSION=$(sed -E -ne "s/\s*Release Version:\s*(.*)/\1/p" "$DISTDIR/.release.stash") + AUTHOR=$(sed -E -ne "s/^Release Author:\s*(.*)/\1/p" "$DISTDIR/.release.stash") + CHANGELOG=$(sed -n '/^* /,$p' "$DISTDIR/.release.stash" | tail -n +2) + CHANGELOG_HDR="* $(date +"%a %b %e %Y") $AUTHOR - $RELEASE_VERSION" +} + +while :; do + _res="?" + while :; do + { + print_preview + echo + echo "(Press 'q' to exit preview, Press 'e' to edit above info, Press 'y' to commit.)" + } | less + echo "Is this OK? (y/n/q/e, Y: Do the release, N/Q: quit, E: edit)" + read -r -n1 _res + case $_res in + n|N|q|Q ) + exit 0 + ;; + y|Y ) + info "Updating spec changelog and tagging HEAD... " + echo "$CHANGELOG_HDR" >> "$DISTDIR/templates/changelog.new" + echo "$CHANGELOG" >> "$DISTDIR/templates/changelog.new" + echo "" >> "$DISTDIR/templates/changelog.new" + cat "$DISTDIR/templates/changelog" >> "$DISTDIR/templates/changelog.new" + mv "$DISTDIR/templates/changelog.new" "$DISTDIR/templates/changelog" + git -C "$TOPDIR" add "$DISTPATH/templates/changelog" + git -C "$TOPDIR" commit -m "$DISTPATH: release $RELEASE_VERSION + +Upstream: no + +Signed-off-by: $AUTHOR" + if [[ $TAG_VERSION ]]; then + if ! git -C "$TOPDIR" tag "$TAG_VERSION"; then + error "Failed to tag '$TAG_VERSION', this tag may already exists." + error "Changelog update should be done before tagging a release, so you may either use dist-new-release to tag, or fix the tag later manually." + fi + else + warn "Please ensure a tag corresponding to '$RELEASE_VERSION' is added to repo to make changelog consistent." + fi + + exit 0 + ;; + e|E ) + dump_info + update_info + parse_info + ;; + esac + done +done diff --git a/dist/sources/README b/dist/sources/README new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/dist/sources/check-kabi b/dist/sources/check-kabi new file mode 100755 index 000000000000..0defb4d02157 --- /dev/null +++ b/dist/sources/check-kabi @@ -0,0 +1,156 @@ +#!/usr/bin/python3 +# +# check-kabi - Red Hat kABI reference checking tool +# +# We use this script to check against reference Module.kabi files. +# +# Author: Jon Masters <jcm@redhat.com> +# Copyright (C) 2007-2009 Red Hat, Inc. +# +# This software may be freely redistributed under the terms of the GNU +# General Public License (GPL). + +# Changelog: +# +# 2018/06/01 - Update for python3 by Petr Oros. +# 2009/08/15 - Updated for use in RHEL6. +# 2007/06/13 - Initial rewrite in python by Jon Masters. + +__author__ = "Jon Masters <jcm@redhat.com>" +__version__ = "2.0" +__date__ = "2009/08/15" +__copyright__ = "Copyright (C) 2007-2009 Red Hat, Inc" +__license__ = "GPL" + +import getopt +import string +import sys + +true = 1 +false = 0 + +def load_symvers(symvers, filename): + """Load a Module.symvers file.""" + + symvers_file = open(filename, "r") + + while true: + in_line = symvers_file.readline() + if in_line == "": + break + if in_line == "\n": + continue + try: + checksum, symbol, directory, type = in_line.split() + except ValueError: + # Firmware symbol may have an extra field + checksum, symbol, directory, type, extra = in_line.split() + + symvers[symbol] = " ".join([checksum, symbol, directory, type]) + + +def load_kabi(kabi, filename): + """Load a Module.kabi file.""" + + kabi_file = open(filename, "r") + + while true: + in_line = kabi_file.readline() + if in_line == "": + break + if in_line == "\n": + continue + try: + checksum, symbol, directory, type = in_line.split() + except ValueError: + # Firmware symbol may have an extra field + checksum, symbol, directory, type, extra = in_line.split() + + kabi[symbol] = " ".join([checksum, symbol, directory, type]) + + +def check_kabi(symvers, kabi): + """Check Module.kabi and Module.symvers files.""" + + fail = 0 + warn = 0 + changed_symbols = [] + moved_symbols = [] + + for symbol in kabi: + abi_hash, abi_sym, abi_dir, abi_type = kabi[symbol].split() + if symbol in symvers: + sym_hash, sym_sym, sym_dir, sym_type = symvers[symbol].split() + if abi_hash != sym_hash: + fail = 1 + changed_symbols.append(symbol) + + if abi_dir != sym_dir: + warn = 1 + moved_symbols.append(symbol) + else: + fail = 1 + changed_symbols.append(symbol) + + if fail: + print("*** ERROR - ABI BREAKAGE WAS DETECTED ***") + print("") + print("The following symbols have been changed (this will cause an ABI breakage):") + print("") + for symbol in changed_symbols: + print(symbol) + print("") + + if warn: + print("*** WARNING - ABI SYMBOLS MOVED ***") + print("") + print("The following symbols moved (typically caused by moving a symbol from being") + print("provided by the kernel vmlinux out to a loadable module):") + print("") + for symbol in moved_symbols: + print(symbol) + print("") + + """Halt the build, if we got errors and/or warnings. In either case, + double-checkig is required to avoid introducing / concealing + KABI inconsistencies.""" + if fail or warn: + sys.exit(1) + sys.exit(0) + + +def usage(): + print(""" +check-kabi: check Module.kabi and Module.symvers files. + + check-kabi [ -k Module.kabi ] [ -s Module.symvers ] + +""") + + +if __name__ == "__main__": + + symvers_file = "" + kabi_file = "" + + opts, args = getopt.getopt(sys.argv[1:], 'hk:s:') + + for o, v in opts: + if o == "-s": + symvers_file = v + if o == "-h": + usage() + sys.exit(0) + if o == "-k": + kabi_file = v + + if (symvers_file == "") or (kabi_file == ""): + usage() + sys.exit(1) + + symvers = {} + kabi = {} + + load_symvers(symvers, symvers_file) + load_kabi(kabi, kabi_file) + check_kabi(symvers, kabi) diff --git a/dist/sources/cpupower.config b/dist/sources/cpupower.config new file mode 100644 index 000000000000..8629a4a3ede7 --- /dev/null +++ b/dist/sources/cpupower.config @@ -0,0 +1,3 @@ +# See 'cpupower help' and cpupower(1) for more info +CPUPOWER_START_OPTS="frequency-set -g performance" +CPUPOWER_STOP_OPTS="frequency-set -g ondemand" diff --git a/dist/sources/cpupower.service b/dist/sources/cpupower.service new file mode 100644 index 000000000000..5f10ab7ee39a --- /dev/null +++ b/dist/sources/cpupower.service @@ -0,0 +1,13 @@ +[Unit] +Description=Configure CPU power related settings +After=syslog.target + +[Service] +Type=oneshot +RemainAfterExit=yes +EnvironmentFile=/etc/sysconfig/cpupower +ExecStart=/usr/bin/cpupower $CPUPOWER_START_OPTS +ExecStop=/usr/bin/cpupower $CPUPOWER_STOP_OPTS + +[Install] +WantedBy=multi-user.target diff --git a/dist/sources/filter-modules.sh b/dist/sources/filter-modules.sh new file mode 100755 index 000000000000..1326883cc044 --- /dev/null +++ b/dist/sources/filter-modules.sh @@ -0,0 +1,182 @@ +#!/bin/bash +# +# Used Fedora kernel-ark repo as reference. +# +# Called as filter-modules.sh <depmod base path> <kernel version> <arch> <System.map> +# +# This script collects modules, and filters modules into the kernel-core and kernel-modules +# subpackages. We list out subsystems/subdirs to prune from the installed +# module directory. What is left is put into the kernel-core package. What is +# pruned is contained in the kernel-modules package. +# +# This file contains the default subsys/subdirs to prune from all architectures. +# If an architecture needs to differ, we source a per-arch filter-<arch>.sh file +# that contains the set of override lists to be used instead. If a module or +# subsys should be in kernel-modules on all arches, please change the defaults +# listed here. + +# Set the default dirs and modules to filter out as external modules + +driverdirs="atm auxdisplay bcma bluetooth firewire fmc fpga iio infiniband isdn leds media memstick message mfd mmc mtd nfc ntb pcmcia platform power powercap ssb soundwire staging thermal tty uio w1" + +chardrvs="mwave pcmcia" + +netdrvs="appletalk can dsa hamradio ieee802154 irda ppp slip usb wireless" + +ethdrvs="3com adaptec alteon amd aquantia arc atheros broadcom cadence calxeda chelsio cisco dec dlink emulex icplus marvell mellanox micrel myricom neterion nvidia oki-semi packetengines qlogic rdc renesas sfc silan sis smsc stmicro sun tehuti ti wiznet xircom" + +inputdrvs="gameport tablet touchscreen joystick" + +hiddrvs="surface-hid" + +scsidrvs="aacraid aic7xxx aic94xx be2iscsi bfa bnx2i bnx2fc csiostor cxgbi esas2r fcoe fnic hisi_sas isci libsas lpfc megaraid mpt2sas mpt3sas mvsas pm8001 qla2xxx qla4xxx sym53c8xx_2 ufs qedf" + +usbdrvs="atm image misc serial wusbcore" + +drmdrvs="amd ast bridge gma500 i2c i915 mgag200 nouveau panel radeon via" + +netprots="6lowpan appletalk atm ax25 batman-adv bluetooth can dccp dsa ieee802154 irda l2tp mac80211 mac802154 mpls netrom nfc rds rfkill rose sctp smc wireless" + +fsdrvs="affs befs cifs coda cramfs dlm ecryptfs hfs hfsplus jfs jffs2 minix ncpfs nilfs2 ocfs2 reiserfs romfs squashfs sysv ubifs ufs gfs2" + +# .ko files to be filtered +singlemods="ntb_netdev iscsi_ibft iscsi_boot_sysfs megaraid pmcraid qedi qla1280 9pnet_rdma rpcrdma nvmet-rdma nvme-rdma hid-picolcd hid-prodikeys hwa-hc hwpoison-inject hid-sensor-hub target_core_user sbp_target cxgbit iw_cxgb3 iw_cxgb4 cxgb3i cxgb3i cxgb3i_ddp cxgb4i chcr chtls parport_serial ism regmap-sdw regmap-sdw-mbq arizona-micsupp hid-asus iTCO_wdt rnbd-client rnbd-server mlx5_ib mlx5_vdpa spi-altera-dfl nct6775 hid-playstation hid-nintendo ntc_thermistor configs" + +# Overrides is individual modules which need to remain in kernel-core due to deps. +overrides="cec wmi" + +BASE_DIR=$1 +KERNEL_UNAMER=$2 +ARCH=$3 +SYSTEM_MAP=$(realpath "$4") +MODULEPKG=$5 +MODULE_DIR=lib/modules/$KERNEL_UNAMER + +error() { + echo "filter-modules.sh: $*" >&2 +} + +if ! cd "$BASE_DIR"; then + error "Invalid base path: '$BASE_DIR'" + exit 1 +fi + +if ! cd "$MODULE_DIR"; then + error "Invalid kernel module path: '$MODULE_DIR'" + exit 1 +fi + +# To be read from build path +core_modules_list= +modules_list= + +# Read all kernel modules in the core modules list at the beginning +# filter them into external modules list step by step +# +# Not filtering internal or vdso so start with kernel/ +core_modules_list=$(find kernel -name '*.ko') + +filter_mods() { + local prefix=$1 mods=$2 suffix=$3 + local mod filter_list + + for mod in $mods; do + if ! filter_list=$(grep "$prefix$mod$suffix" <<< "$core_modules_list"); then + error "$prefix$mod$suffix is marked as non-core module but not built, skipping." + else + core_modules_list=$(grep -v "$prefix$mod$suffix" <<< "$core_modules_list") + modules_list+=$filter_list + modules_list+=$'\n' + fi + done +} + +filter_override() { + local filter_list + + for mod in $1; do + if filter_list=$(grep "/$mod.ko" <<< "$modules_list"); then + modules_list=$(grep -v "/$mod.ko" <<< "$modules_list") + core_modules_list+=$filter_list + core_modules_list+=$'\n' + fi + done +} + +# Check if modules dependency are still sane after splitting these mods out +# args: moduled to be spliited out +check_modules_dependency_after_split() { + # Mask external mods to do a depmod check + for mod in $modules_list; do + mv "$mod" "$mod.bak" + done + + # Run depmod on the resulting module tree and make sure it isn't broken + depmod_err=$(depmod "$KERNEL_UNAMER" -b "$BASE_DIR" -naeF "$SYSTEM_MAP" 2>&1 1>/dev/null) + if [ "$depmod_err" ]; then + error "Failed to filter out external modules, broken depmod:" + error "$depmod_err" + exit 1 + fi + + # Move the mods back + for mod in $modules_list; do + mv "$mod.bak" "$mod" + done +} + +# Modules override for differnet arch +case $ARCH in + aarch64 ) + driverdirs="atm auxdisplay bcma bluetooth firewire fpga infiniband leds media memstick message mmc mtd nfc ntb pcmcia power ssb soundwire staging tty uio w1 ofed_addon" + ethdrvs="3com adaptec arc alteon atheros broadcom cadence calxeda chelsio cisco dec dlink emulex marvell micrel myricom neterion nvidia packetengines qlogic rdc sfc silan sis smsc stmicro sun tehuti ti via wiznet xircom" + drmdrvs="amd arm bridge ast exynos hisilicon i2c imx mgag200 meson msm nouveau panel pl111 radeon rockchip tegra sun4i tiny vc4" + singlemods="ntb_netdev iscsi_ibft iscsi_boot_sysfs megaraid pmcraid qedi qla1280 9pnet_rdma rpcrdma nvmet-rdma nvme-rdma hid-picolcd hid-prodikeys hwpoison-inject target_core_user sbp_target cxgbit chcr rnbd-client rnbd-server mlx5_ib mlx5_core mlx5_vdpa dfl-emif octeontx2-cpt octeontx2-cptvf spi-altera-dfl rvu_cptpf rvu_cptvf regmap-sdw regmap-sdw-mbq hid-playstation hid-nintendo configs" + ;; + riscv64 ) + ;; + loongarch64 ) + ;; + x86_64 ) + ;; + * ) + error "Unknown arch '$ARCH'" + ;; +esac + +# Filter for split and verify +case $MODULEPKG in + non-core-modules ) + filter_mods "drivers/" "$driverdirs" / + filter_mods "drivers/char/" "$chardrvs" / + filter_mods "drivers/net/" "$netdrvs" / + filter_mods "drivers/net/ethernet/" "$ethdrvs" / + filter_mods "drivers/input/" "$inputdrvs" / + filter_mods "drivers/hid/" "$hiddrvs" / + filter_mods "drivers/scsi/" "$scsidrvs" / + filter_mods "drivers/usb"/ "$usbdrvs" / + filter_mods "drivers/gpu/drm/" "$drmdrvs" / + filter_mods "net/" "$netprots" / + filter_mods "fs/" "$fsdrvs" / + # Just kill sound. + filter_mods "" "sound" / + filter_mods "drivers" "soundwire" / + # Filter single modules + filter_mods "" "$singlemods" + # Now process the override list to bring those modules back into core + filter_override "$overrides" + ;; + * ) + error "Invalid module packaging param '$1'" + exit 1 + ;; +esac + +# Ensure this packaging splitting won't break core modules dependency +check_modules_dependency_after_split + +# Print the modules_list after sort, and prepend /lib/modules/<KERNEL_UNAMER>/ to each line +echo "%dir /lib/modules/$KERNEL_UNAMER/" +echo "$modules_list" | sort -n | sed "/^$/d;s/^/\/${MODULE_DIR//\//\\\/}\//" + +exit 0 diff --git a/dist/sources/module-keygen.sh b/dist/sources/module-keygen.sh new file mode 100755 index 000000000000..aee46be94ee5 --- /dev/null +++ b/dist/sources/module-keygen.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Default keygen +# +# We do nothing here, just check if CONFIG_MODULE_SIG_KEY is set to +# "certs/signing_key.pem", this is a special value for kernel config, +# Kbuild will automatically generate keys when this value is set. + +KERNEL_UNAMER=$1 +BUILD_DIR=$2 + +KCONFIG_FILE=$BUILD_DIR/.config + +error() { + echo "module-keygen: error: $*" >&2 + exit 1 +} + +if ! [[ -f $KCONFIG_FILE ]]; then + error "can't find a valid kernel config: $KCONFIG_FILE" +fi + +if ! grep -q '^CONFIG_MODULES=y' "$KCONFIG_FILE"; then + echo "CONFIG_MODULES not enabled, quit. " + exit 0 +fi + +if ! grep -q '^CONFIG_MODULE_SIG=y' "$KCONFIG_FILE"; then + echo "CONFIG_MODULE_SIG not enabled, quit. " + exit 0 +fi + +if ! grep -q '^CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"' "$KCONFIG_FILE"; then + error "CONFIG_MODULE_SIG_KEY=\"certs/signing_key.pem\" is not defined, can't gen keys." +fi + +# Don't use Kbuild's signing, use %%{_module_signer} instead, be compatible with debuginfo and compression +echo "module-keygen: Disabling CONFIG_MODULE_SIG_FORCE, siginig within temporary builtin key." +sed -i -e "s/^CONFIG_MODULE_SIG_FORCE.*/# CONFIG_MODULE_SIG_FORCE is not set/" "$KCONFIG_FILE" + +echo "module-keygen: $KERNEL_UNAMER is using builtin keys." diff --git a/dist/sources/module-signer.sh b/dist/sources/module-signer.sh new file mode 100755 index 000000000000..1a48a394de4d --- /dev/null +++ b/dist/sources/module-signer.sh @@ -0,0 +1,78 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Default key-sign +# +# Just a wrapper for sign-file +# +# We depend on CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +# for the built-in keys. + +KERNEL_UNAMER=$1 +BUILD_DIR=$2 +BASE_DIR=$3 + +KCONFIG_FILE=$BUILD_DIR/.config +MODULE_DIR=$BASE_DIR/lib/modules/$KERNEL_UNAMER + +error() { + echo "module-signer: error: $*" >&2 +} + +if ! [[ -f $KCONFIG_FILE ]]; then + error "can't find a valid kernel config." + exit 1 +fi + +if ! grep -q '^CONFIG_MODULES=y' "$KCONFIG_FILE"; then + echo "CONFIG_MODULES=y is not defined in .config, skipping signing." + exit 0 +fi + +if ! grep -q '^CONFIG_MODULE_SIG=y' "$KCONFIG_FILE"; then + echo "CONFIG_MODULE_SIG=y is not defined in .config, skipping signing." + exit 0 +fi + +if ! [[ -x $BUILD_DIR/scripts/sign-file ]]; then + error "$BUILD_DIR/scripts/sign-file is not an executable file." + exit 1 +fi + +if ! grep -q '^CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"' "$KCONFIG_FILE"; then + error "CONFIG_MODULE_SIG_KEY is not defined in .config, can't gen keys." + exit 1 +fi + +echo "module-signer: Signing $KERNEL_UNAMER modules with builtin keys..." +PRIKEY="$BUILD_DIR/certs/signing_key.pem" +PUBKEY="$BUILD_DIR/certs/signing_key.x509" + +if ! [[ -s $PRIKEY ]]; then + error "private key file doesn't exist: $PRIKEY" + exit 1 +fi + +if ! [[ -s $PUBKEY ]]; then + error "public key file doesn't exist: $PUBKEY" + exit 1 +fi + +JOB=$(nproc) +JOB=${JOB:-2} + +export BUILD_DIR +export PRIKEY +export PUBKEY +# shellcheck disable=2016 +find "$MODULE_DIR" -type f -name '*.ko' -print0 | xargs -0r -n 1 -P "$JOB" sh -c ' + $BUILD_DIR/scripts/sign-file sha256 "$PRIKEY" "$PUBKEY" "$1" + rm -f $1.sig $1.dig + + if [ "~Module signature appended~" != "$(tail -c 28 "$1")" ]; then + echo "module-signer: error: failed to sign $1." + exit 1 + fi +' _ || exit $? + +echo "module-signer: Signing $KERNEL_UNAMER done." diff --git a/dist/templates/changelog b/dist/templates/changelog new file mode 100644 index 000000000000..a0fe7a542c15 --- /dev/null +++ b/dist/templates/changelog @@ -0,0 +1,2 @@ +* Wed Oct 05 2022 Kairui Song <kasong@tencent.com> - 5.4.119-19-0009.11 +- Initial dist port release diff --git a/dist/templates/kernel.template.spec b/dist/templates/kernel.template.spec new file mode 100644 index 000000000000..fbe34230fe84 --- /dev/null +++ b/dist/templates/kernel.template.spec @@ -0,0 +1,1286 @@ +# A simplified kernel spec initially based on Tencent Linux Kernels and Fedora/CentOS +# +# By changing a few rpm macros, it's very convenient to build for different archs or +# kernel config styles, and build different components. +### Kenrel version relation macros +# Following variables filled by automation scripts: +# %%{kernel_unamer}: `uname -r` output, needed by scriptlets so prepare it early, eg. 5.18.19-2207.2.1.tks, 5.18.19-2207.2.1.tks+debug, 5.4.119-1-0009.1 +# %%{kernel_majver}: Kernel RPM package version, eg. 5.15.0, 5.15.3, 5.16.0 +# %%{kernel_relver}: Kernel RPM package release, eg. 2207.1, 0.20211115git1135ec008ef3.rc0.2207, 0009.11 +# %%{rpm_name}: Kernel RPM package name, eg. kernel, kernel-tlinux4, kernel-stream kernel-stream-debug +# %%{rpm_vendor}: RPM package vendor +# %%{rpm_url}: RPM url +# TODO: kernel_unamer don't have distro mark +{{VERSIONSPEC}} + +# TODO: We have a fixed tar name, might be better to include KDIST in tarname +%define kernel_tarname kernel-%{kernel_majver}-%{kernel_relver} + +# This section defines following value: +# %%{kernel_arch} +# Since kernel arch name differs from many other definations, this will insert a script snip +# to handle the convertion, and error out on unsupported arch. +{{ARCHSPEC}} + +# TODO: This is a workaround for kernel userspace tools (eg. perf), which doesn't +# support LTO, and causes FTBFS, need to remove this after LTO is available in +# upstream +%global _lto_cflags %{nil} + +###### Kernel packaging options ################################################# +# Since we need to generate kernel, kernel-subpackages, perf, bpftools, from +# this one single code tree, following build switches are very helpful. +# +# The following build options can be enabled or disabled with --with/--without +# in the rpmbuild command. But may by disabled by later checks# +# +# This section defines following options: +# with_core: kernel core pkg +# with_doc: kernel doc pkg +# with_headers: kernel headers pkg +# with_perf: perf tools pkg +# with_tools: kernel tools pkg +# with_bpftool: bpftool pkg +# with_debuginfo: debuginfo for all packages +# with_modsign: if mod should be signed +# with_kabichk: if kabi check is needed at the end of build +{{PKGPARAMSPEC}} + +# Only use with cross build, don't touch it unless you know what you are doing +%define with_crossbuild %{?_with_crossbuild: 1} %{?!_with_crossbuild: 0} + +###### Kernel signing params ################################################# +### TODO: Currently only module signing, no secureboot +# module-keygen +# Should be an executable accepting two params: +# module-keygen <kernel ver> <kernel objdir> +# <kernel ver>: Kernel's version-release, `uname -r` output of that kernel +# <kernel objdir>: Kernel build dir, where built kernel objs, certs, and vmlinux is stored +# +# This executable should provide required keys for signing, or at least disable builtin keygen +%define use_builtin_module_keygen %{?_module_keygen: 0} %{?!_module_keygen: 1} + +# module-signer +# Should be an executable accepting three params: +# module-signer <buildroot> <kernel ver> <kernel objdir> +# <kernel ver>: Kernel's version-release, `uname -r` output of that kernel +# <kernel objdir>: Kernel build dir, where built kernel objs, certs, and vmlinux is stored +# <buildroot>: RPM's buildroot, where kernel modules are installed into +# +# This executable should sign all kernel modules in <builddir>/lib/modules/<kernel ver> +# based on the info gatherable from <kernel objdir>. +%define use_builtin_module_signer %{?_module_signer: 0} %{?!_module_signer: 1} + +###### Required RPM macros ##################################################### + +### Debuginfo handling +# Following macros controls RPM's builtin debuginfo extracting behaviour, +# tune it into a kernel friendly style. +# +# Kernel package needs its own method to pack the debuginfo files. +# This disables RPM's built-in debuginfo files packaging, we package +# debuginfo files manually use find-debuginfo.sh. +%undefine _debuginfo_subpackages +# This disables RH vendor macro's debuginfo package template generation. +# It only generates debuginfo for the main package, but we only want debuginfo +# for subpackages so disable it and do things manually. +%undefine _enable_debug_packages +# This disable find-debuginfo.sh from appending minimal debuginfo +# to every binary. +%undefine _include_minidebuginfo +# This disables debugsource package which collect source files for debug info, +# we pack the kernel source code manually. +%undefine _debugsource_packages +# TODO: This prevents find-debuginfo.sh from adding unique suffix to .ko.debug files +# that will make .ko.debug file names unrecognizable by `crash` +# We may patch `crash` to fix that or find a better way, since this stops the unique +# debug file renaming for userspace packages too. +%undefine _unique_debug_names +# Pass --reloc-debug-sections to eu-strip, .ko files are ET_REL files. So they have relocation +# sections for debug sections. Those sections will not be relinked. This help create .debug files +# that has cross debug section relocations resolved. +%global _find_debuginfo_opts -r +%global debuginfo_dir /usr/lib/debug + +###### Build time config ####################################################### +# Disable kernel building for non-supported arch, allow building userspace package +%ifarch %nobuildarches noarch +%global with_core 0 +%endif + +# Require cross compiler if cross compiling +%if %{with_crossbuild} +BuildRequires: binutils-%{_build_arch}-linux-gnu, gcc-%{_build_arch}-linux-gnu +%global with_perf 0 +%global with_tools 0 +%global with_bpftool 0 +%global _cross_compile %{!?_cross_compile:%{_build_arch}-linux-gnu-}%{?_cross_compile:%{_cross_compile}} +%endif + +# List the packages used during the kernel build +BuildRequires: kmod, patch, bash, coreutils, tar, git-core, which, gawk +BuildRequires: make, gcc, binutils, system-rpm-config, hmaccalc, bison, flex, gcc-c++ +BuildRequires: bzip2, xz, findutils, gzip, perl-interpreter, perl-Carp, perl-devel +BuildRequires: net-tools, hostname, bc +BuildRequires: dwarves +BuildRequires: python3-devel, openssl-devel, elfutils-devel +BuildRequires: openssl +BuildRequires: gcc-plugin-devel +# glibc-static is required for a consistent build environment (specifically +# CONFIG_CC_CAN_LINK_STATIC=y). +BuildRequires: glibc-static + +%if %{with_perf} +BuildRequires: zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex xz-devel +BuildRequires: audit-libs-devel +BuildRequires: java-devel +BuildRequires: libbabeltrace-devel +%ifnarch aarch64 +BuildRequires: numactl-devel +%endif +%endif + +%if %{with_tools} +BuildRequires: gettext ncurses-devel +BuildRequires: pciutils-devel libcap-devel libnl3-devel +%endif + +%if %{with_doc} +BuildRequires: xmlto, asciidoc +%endif + +%if %{with_bpftool} +BuildRequires: llvm +# We don't care about this utils's python version, since we only want rst2* commands during build time +BuildRequires: /usr/bin/rst2man +BuildRequires: zlib-devel binutils-devel +%endif + +%if %{with_headers} +BuildRequires: rsync +%endif + +###### Kernel packages sources ################################################# +### Kernel tarball +Source0: %{kernel_tarname}.tar + +### Build time scripts +# Script used to assist kernel building +Source10: filter-modules.sh + +Source20: module-signer.sh +Source21: module-keygen.sh + +Source30: check-kabi + +### Arch speficied kernel configs and kABI +# Start from Source1000 to Source1199, for kernel config +# Start from Source1200 to Source1399, for kabi +{{ARCHSOURCESPEC}} + +### Userspace tools +# Start from Source2000 to Source2999, for userspace tools +Source2000: cpupower.service +Source2001: cpupower.config + +###### Kernel package definations ############################################## +### Main meta package +Summary: %{rpm_vendor} Linux kernel meta package +Name: %{rpm_name} +Version: %{kernel_majver} +Release: %{kernel_relver}%{?dist} +License: GPLv2 +URL: %{rpm_url} +Vendor: ${rpm_vendor} + +# We can't let RPM do the dependencies automatic because it'll then pick up +# a correct but undesirable perl dependency from the module headers which +# isn't required for the kernel proper to function +AutoReq: no +AutoProv: yes + +# Kernel requirements +# installonlypkg(kernel) is a hint for RPM that this package shouldn't be auto-cleaned. +Provides: installonlypkg(kernel) +Provides: kernel = %{version}-%{release} +Provides: %{rpm_name} = %{version}-%{release} +Requires: %{rpm_name}-core = %{version}-%{release} +Requires: %{rpm_name}-modules = %{version}-%{release} +Requires: linux-firmware + +%description +This is the meta package of %{?rpm_vendor:%{rpm_vendor} }Linux kernel, the core of operating system. + +%if %{with_core} +### Kernel core package +%package core +Summary: %{rpm_vendor} Linux Kernel +Provides: installonlypkg(kernel) +Provides: kernel-core = %{version}-%{release} +Provides: kernel-uname-r = %{kernel_unamer} +Requires(pre): coreutils +Requires(post): coreutils kmod dracut +Requires(preun): coreutils kmod +Requires(post): %{_bindir}/kernel-install +Requires(preun): %{_bindir}/kernel-install +# Kernel install hooks & initramfs +%if 0%{?rhel} == 7 || "%{?dist}" == ".tl2" +Requires(post): systemd +Requires(preun): systemd +%else +Requires(post): systemd-udev +Requires(preun): systemd-udev +%endif + +%description core +The kernel package contains the %{?rpm_vendor:%{rpm_vendor} } Linux kernel (vmlinuz), the core of +operating system. The kernel handles the basic functions +of the operating system: memory allocation, process allocation, device +input and output, etc. + +### Kernel module package +%package modules +Summary: %{rpm_vendor} Kernel modules to match the %{rpm_name}-core kernel +Provides: installonlypkg(kernel-module) +Provides: kernel-modules = %{version}-%{release} +Provides: kernel-modules-extra = %{version}-%{release} +Requires: %{rpm_name}-core = %{version}-%{release} +AutoReq: no +AutoProv: yes +Requires(pre): kmod +Requires(postun): kmod +%description modules +This package provides commonly used kernel modules for the %{?2:%{2}-}core kernel package. + +### Kernel devel package +%package devel +Summary: Development package for building kernel modules to match the %{version}-%{release} kernel +Release: %{release} +Provides: installonlypkg(kernel) +Provides: kernel-devel = %{version}-%{release} +Provides: kernel-devel-%{_target_cpu} = %{version}-%{release} +Provides: kernel-devel-uname-r = %{kernel_unamer} +AutoReqprov: no +%description devel +This package provides kernel headers and makefiles sufficient to build modules +against the %{version}-%{release} kernel package. + +%if %{with_debuginfo} +### Kernel debuginfo package +%package debuginfo +Summary: Debug information for package %{rpm_name} +Requires: %{rpm_name}-debuginfo-common-%{_target_cpu} +Provides: installonlypkg(kernel) +Provides: kernel-debuginfo = %{version}-%{release} +AutoReqProv: no +%description debuginfo +This package provides debug information including +vmlinux, System.map for package %{rpm_name}. +This is required to use SystemTap with %{rpm_name}. +# debuginfo search rule +# If BTF presents, keep it so kernel can use it. +%if 0%{?rhel} != 7 +# Old version of find-debuginfo.sh doesn't support this, so only do it for newer version. Old version of eu-strip seems doesn't strip BTF either, so should be fine. +%global _find_debuginfo_opts %{_find_debuginfo_opts} --keep-section '.BTF' +%endif +# Debuginfo file list for main kernel package +# The (\+.*)? is used to match all variant kernel +%global _find_debuginfo_opts %{_find_debuginfo_opts} -p '.*\/usr\/src\/kernels/.*|XXX' -o ignored-debuginfo.list -p $(echo '/.*/%{kernel_unamer}/.*|/.*%{kernel_unamer}(\.debug)?' | sed 's/+/[+]/g') -o debuginfo.list +# with_debuginfo +%endif +# with_core +%endif + +%if %{with_debuginfo} +### Common debuginfo package +%package debuginfo-common-%{_target_cpu} +Summary: Kernel source files used by %{rpm_name}-debuginfo packages +Provides: installonlypkg(kernel) +Provides: kernel-debuginfo-common = %{version}-%{release} +%description debuginfo-common-%{_target_cpu} +This package is required by %{rpm_name}-debuginfo subpackages. +It provides the kernel source files common to all builds. +# No need to define extra debuginfo search rule here, use debugfiles.list +# with_debuginfo +%endif + +%if %{with_headers} +%package headers +Summary: Header files for the Linux kernel for use by glibc +Obsoletes: glibc-kernheaders < 3.0-46 +Provides: glibc-kernheaders = 3.0-46 +Provides: kernel-headers = %{version}-%{release} +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. +# with_headers +%endif + +%if %{with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +Requires: bzip2 +License: GPLv2 +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n perf-debuginfo +Summary: Debug information for package perf +Requires: %{rpm_name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n perf-debuginfo +This package provides debug information for the perf package. +# debuginfo search rule +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%global _find_debuginfo_opts %{_find_debuginfo_opts} -p '.*%{_bindir}/perf(.*\.debug)?|.*%{_libexecdir}/perf-core/.*|.*%{_libdir}/libperf-jvmti.so(.*\.debug)?|XXX' -o perf-debuginfo.list + +%package -n python3-perf +Summary: Python bindings for apps which will manipulate perf events +%description -n python3-perf +The python3-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%package -n python3-perf-debuginfo +Summary: Debug information for package perf python bindings +Requires: %{rpm_name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n python3-perf-debuginfo +This package provides debug information for the perf python bindings. +# debuginfo search rule +# the python_sitearch macro should already be defined from above +%global _find_debuginfo_opts %{_find_debuginfo_opts} -p '.*%{python3_sitearch}/perf.*\.so(.*\.debug)?|XXX' -o python3-perf-debuginfo.list +# with_perf +%endif + +%if %{with_tools} +%package -n kernel-tools +Summary: Assortment of tools for the Linux kernel +License: GPLv2 +%ifarch %{cpupowerarchs} +Provides: cpupowerutils = 1:009-0.6.p1 +Obsoletes: cpupowerutils < 1:009-0.6.p1 +Provides: cpufreq-utils = 1:009-0.6.p1 +Provides: cpufrequtils = 1:009-0.6.p1 +Obsoletes: cpufreq-utils < 1:009-0.6.p1 +Obsoletes: cpufrequtils < 1:009-0.6.p1 +Obsoletes: cpuspeed < 1:1.5-16 +Requires: kernel-tools-libs = %{version}-%{release} +%endif +%description -n kernel-tools +This package contains the tools/ directory from the kernel source +and the supporting documentation. + +%package -n kernel-tools-libs +Summary: Libraries for the kernels-tools +License: GPLv2 +%description -n kernel-tools-libs +This package contains the libraries built from the tools/ directory +from the kernel source. + +%package -n kernel-tools-libs-devel +Summary: Assortment of tools for the Linux kernel +License: GPLv2 +Requires: kernel-tools = %{version}-%{release} +%ifarch %{cpupowerarchs} +Provides: cpupowerutils-devel = 1:009-0.6.p1 +Obsoletes: cpupowerutils-devel < 1:009-0.6.p1 +%endif +Requires: kernel-tools-libs = %{version}-%{release} +Provides: kernel-tools-devel +%description -n kernel-tools-libs-devel +This package contains the development files for the tools/ directory from +the kernel source. + +%package -n kernel-tools-debuginfo +Summary: Debug information for package kernel-tools +Requires: %{rpm_name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n kernel-tools-debuginfo +This package provides debug information for package kernel-tools. +# debuginfo search rule +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%global _find_debuginfo_opts %{_find_debuginfo_opts} -p '.*%{_bindir}/(cpupower|tmon|gpio-.*|iio_.*|ls.*|centrino-decode|powernow-k8-decode|turbostat|x86_energy_perf_policy|intel-speed-select|page_owner_sort|slabinfo)(.*\.debug)?|.*%{_libdir}/libcpupower.*|XXX' -o kernel-tools-debuginfo.list +# with_tools +%endif + +%if %{with_bpftool} +%package -n bpftool +Summary: Inspection and simple manipulation of eBPF programs and maps +License: GPLv2 +%description -n bpftool +This package contains the bpftool, which allows inspection and simple +manipulation of eBPF programs and maps. + +%package -n bpftool-debuginfo +Summary: Debug information for package bpftool +Requires: %{rpm_name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n bpftool-debuginfo +This package provides debug information for the bpftool package. +# debuginfo search rule +%global _find_debuginfo_opts %{_find_debuginfo_opts} -p '.*%{_sbindir}/bpftool(.*\.debug)?|XXX' -o bpftool-debuginfo.list +# with_bpftool +%endif + +###### common macros for build and install ##################################### +### Signing scripts +# If externel module signer and keygen provided, ignore built-in keygen and +# signer, else use builtin keygen and signer. +%if %{use_builtin_module_signer} + # SOURCE20 is just a wrapper for $BUILD_DIR/scripts/sign-file + %define _module_signer %{SOURCE20} +%endif +%if %{use_builtin_module_keygen} + # SOURCE21 is a dummy file, only perform some checks, we depend on Kbuild for builtin keygen + %define _module_keygen %{SOURCE21} +%endif + +### Prepare common build vars to share by %%prep, %%build and %%install section +# _KernSrc: Path to kernel source, located in _buildir +# _KernBuild: Path to the built kernel objects, could be same as $_KernSrc (just like source points to build under /lib/modules/<kver>) +# _KernVmlinuxH: path to vmlinux.h for BTF, located in _buildir +# KernUnameR: Get `uname -r` output of the built kernel +# KernModule: Kernel modules install path, located in %%{buildroot} +# KernDevel: Kernel headers and sources install path, located in %%{buildroot} +%global prepare_buildvar \ + cd %{kernel_tarname} \ + _KernSrc=%{_builddir}/%{rpm_name}-%{kernel_unamer}/%{kernel_tarname} \ + _KernBuild=%{_builddir}/%{rpm_name}-%{kernel_unamer}/%{kernel_unamer}-obj \ + _KernVmlinuxH=%{_builddir}/%{rpm_name}-%{kernel_unamer}/vmlinux.h \ + KernUnameR=%{kernel_unamer} \ + KernModule=%{buildroot}/lib/modules/%{kernel_unamer} \ + KernDevel=%{buildroot}/usr/src/kernels/%{kernel_unamer} \ + +###### Rpmbuild Prep Stage ##################################################### +%prep +%setup -q -c -n %{rpm_name}-%{kernel_unamer} +%{prepare_buildvar} + +# TODO: Apply test patch here +: + +# Mangle /usr/bin/python shebangs to /usr/bin/python3 +# Mangle all Python shebangs to be Python 3 explicitly +# -p preserves timestamps +# -n prevents creating ~backup files +# -i specifies the interpreter for the shebang +# This fixes errors such as +# *** ERROR: ambiguous python shebang in /usr/bin/kvm_stat: #!/usr/bin/python. Change it to python3 (or python2) explicitly. +# We patch all sources below for which we got a report/error. +find scripts/ tools/ Documentation/ \ + -type f -and \( \ + -name "*.py" -or \( -not -name "*.*" -exec grep -Iq '^#!.*python' {} \; \) \ + \) \ + -exec pathfix.py -i "%{__python3} %{py3_shbang_opts}" -p -n {} \+; + +# Update kernel version and suffix info to make uname consistent with RPM version +# PATCHLEVEL inconsistent only happen on first merge window, but patch them all just in case +sed -i "/^VESION/cVERSION = $(echo %{kernel_majver} | cut -d '.' -f 1)" $_KernSrc/Makefile +sed -i "/^PATCHLEVEL/cPATCHLEVEL = $(echo %{kernel_majver} | cut -d '.' -f 2)" $_KernSrc/Makefile +sed -i "/^SUBLEVEL/cSUBLEVEL = $(echo %{kernel_majver} | cut -d '.' -f 3)" $_KernSrc/Makefile + +# Patch the kernel to apply uname, the reason we use EXTRAVERSION to control uname +# instead of complete use LOCALVERSION is that, we don't want out scm/rpm version info +# get inherited by random kernels built reusing the config file under /boot, which +# will be confusing. +_KVERSION=$(sed -nE "/^VERSION\s*:?=\s*(.*)/{s/^\s*^VERSION\s*:?=\s*//;h};\${x;p}" $_KernSrc/Makefile) +_KPATCHLEVEL=$(sed -nE "/^PATCHLEVEL\s*:?=\s*(.*)/{s/^\s*^PATCHLEVEL\s*:?=\s*//;h};\${x;p}" $_KernSrc/Makefile) +_KSUBLEVEL=$(sed -nE "/^SUBLEVEL\s*:?=\s*(.*)/{s/^\s*^SUBLEVEL\s*:?=\s*//;h};\${x;p}" $_KernSrc/Makefile) +_KUNAMER_PREFIX=${_KVERSION}.${_KPATCHLEVEL}.${_KSUBLEVEL} +_KEXTRAVERSION="" +_KLOCALVERSION="" + +case $KernUnameR in + $_KUNAMER_PREFIX* ) + _KEXTRAVERSION=$(echo "$KernUnameR" | sed -e "s/^$_KUNAMER_PREFIX//") + + # Anything after "+" belongs to LOCALVERSION, eg, +debug/+minimal marker. + _KLOCALVERSION=$(echo "$_KEXTRAVERSION" | sed -ne 's/.*\([+].*\)$/\1/p') + _KEXTRAVERSION=$(echo "$_KEXTRAVERSION" | sed -e 's/[+].*$//') + + # Update Makefile to embed uname + sed -i "/^EXTRAVERSION/cEXTRAVERSION = $_KEXTRAVERSION" $_KernSrc/Makefile + # Save LOCALVERSION in .dist.localversion, it will be set to .config after + # .config is ready in BuildConfig. + echo "$_KLOCALVERSION" > $_KernSrc/.dist.localversion + ;; + * ) + echo "FATAL: error: kernel version doesn't match with kernel spec." >&2 && exit 1 + ;; + esac + +###### Rpmbuild Build Stage #################################################### +%build + +### Make flags +# +# Those defination have to be defined after %%build macro, %%build macro may change +# some build flags, and we have to inherit these changes. +# +# NOTE: kernel's tools build system doesn't playwell with command line variables, some +# `override` statement will stop working after recursive Makefile `include` call. +# So keep these variables as environment variables so they are available globally, +# `make` will transformed env vars into makefile variable in every iteration. + +## Common flags +%global make %{__make} %{_smp_mflags} +%global kernel_make_opts INSTALL_HDR_PATH=%{buildroot}/usr INSTALL_MOD_PATH=%{buildroot} KERNELRELEASE=$KernUnameR +%global tools_make_opts DESTDIR="%{buildroot}" prefix=%{_prefix} lib=%{_lib} PYTHON=%{__python3} INSTALL_ROOT="%{buildroot}" + +## Cross compile flags +%if %{with_crossbuild} +%global kernel_make_opts %{kernel_make_opts} CROSS_COMPILE=%{_cross_compile} ARCH=%{kernel_arch} +%global __strip %{_build_arch}-linux-gnu-strip +%endif + +# Drop host cflags for crossbuild, arch options from build target will break host compiler +%if !%{with_crossbuild} +%global kernel_make_opts %{kernel_make_opts} HOSTCFLAGS="%{?build_cflags}" HOSTLDFLAGS="%{?build_ldflags}" +%endif + +## make for host tool, reset arch and flags for native host bulid +%global host_make CFLAGS= LDFLAGS= ARCH= %{make} + +## make for kernel +%global kernel_make %{make} %{kernel_make_opts} + +## make for tools +%global tools_make CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" %{make} %{tools_make_opts} +%global perf_make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 NO_BIONIC=1 %{make} %{tools_make_opts} +%global bpftool_make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" EXTRA_LDFLAGS="%{__global_ldflags}" VMLINUX_H="$_KernVmlinuxH" %{make} %{tools_make_opts} + +# TK4: Workaround, Makefile fails with mass parallel build +%global tools_make %{tools_make} -j1 + +### Real make +%{prepare_buildvar} + +# Prepare Kernel config +BuildConfig() { + mkdir -p $_KernBuild + pushd $_KernBuild + cp $1 .config + + [ "$_KernBuild" != "$_KernSrc" ] && echo "include $_KernSrc/Makefile" > Makefile + [ "$_KernBuild" != "$_KernSrc" ] && cp $_KernSrc/.dist.localversion ./ + + # Respect scripts/setlocalversion, avoid it from potentially mucking with our version numbers. + # Also update LOCALVERSION in .config + cp .dist.localversion .scmversion + "$_KernSrc"/scripts/config --file .config --set-str LOCALVERSION "$(cat .dist.localversion)" + + # Ensures build-ids are unique to allow parallel debuginfo + sed -i -e "s/^CONFIG_BUILD_SALT.*/CONFIG_BUILD_SALT=\"$KernUnameR\"/" .config + + # Call olddefconfig before make all, set all unset config to default value. + # The packager uses CROSS_COMPILE=scripts/dummy-tools for generating .config + # so compiler related config are always unset, let's just use defconfig for them for now + %{kernel_make} olddefconfig + + %if %{with_modsign} + # Don't use Kbuild's signing, use %%{_module_signer} instead, be compatible with debuginfo and compression + sed -i -e "s/^CONFIG_MODULE_SIG_ALL.*/# CONFIG_MODULE_SIG_ALL is not set/" .config + %else + # Not signing, unset all signing related configs + sed -i -e "s/^CONFIG_MODULE_SIG_ALL=.*/# CONFIG_MODULE_SIG_ALL is not set/" .config + sed -i -e "s/^CONFIG_MODULE_SIG_FORCE=.*/# CONFIG_MODULE_SIG_FORCE is not set/" .config + sed -i -e "s/^CONFIG_MODULE_SIG=.*/# CONFIG_MODULE_SIG is not set/" .config + # Lockdown can't work without module sign + sed -i -e "s/^CONFIG_SECURITY_LOCKDOWN_LSM=.*/# CONFIG_SECURITY_LOCKDOWN_LSM is not set/" .config + sed -i -e "s/^CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=.*/# CONFIG_SECURITY_LOCKDOWN_LSM_EARLY is not set/" .config + %endif + # Don't use kernel's builtin module compression, imcompatible with debuginfo packaging and signing + sed -i -e "s/^\(CONFIG_DECOMPRESS_.*\)=y/# \1 is not set/" .config + popd +} + +## $1: .config file +BuildKernel() { + echo "*** Start building kernel $KernUnameR" + mkdir -p $_KernBuild + pushd $_KernBuild + + %if %{with_modsign} + # Call keygen here, if it generate the module keys, it should come before kbuild, + # so kbuild may avoid regenerate cert keys. + %{_module_keygen} "$KernUnameR" "$_KernBuild" + %endif + + # Build vmlinux + %{kernel_make} all + # Build modules + grep -q "CONFIG_MODULES=y" ".config" && %{kernel_make} modules + # CONFIG_KERNEL_HEADER_TEST generates some extra files in the process of + # testing so just delete + find . -name *.h.s -delete + + %if %{with_bpftool} + echo "*** Building bootstrap bpftool and extrace vmlinux.h" + if ! [ -s $_KernVmlinuxH ]; then + # Precompile a minimized bpftool without vmlinux.h, use it to extract vmlinux.h + # for bootstraping the full feature bpftool + %{host_make} -C $_KernSrc/tools/bpf/bpftool/ VMLINUX_BTF= VMLINUX_H= + # Prefer to extract the vmlinux.h from the vmlinux that were just compiled + # fallback to use host's vmlinux + # Skip this if bpftools is too old and doesn't support BTF dump + if $_KernSrc/tools/bpf/bpftool/bpftool btf help 2>&1 | grep -q "\bdump\b"; then + if grep -q "CONFIG_DEBUG_INFO_BTF=y" ".config"; then + $_KernSrc/tools/bpf/bpftool/bpftool btf dump file vmlinux format c > $_KernVmlinuxH + else + $_KernSrc/tools/bpf/bpftool/bpftool btf dump file /sys/kernel/btf/vmlinux format c > $_KernVmlinuxH + fi + fi + %{host_make} -C $_KernSrc/tools/bpf/bpftool/ clean + fi + %endif + + popd +} + +BuildPerf() { + %{perf_make} -C tools/perf all + %{perf_make} -C tools/perf man +} + +BuildTools() { + %{tools_make} -C tools/power/cpupower CPUFREQ_BENCH=false DEBUG=false + +%ifarch x86_64 + %{tools_make} -C tools/power/cpupower/debug/x86_64 centrino-decode powernow-k8-decode + %{tools_make} -C tools/power/x86/x86_energy_perf_policy + %{tools_make} -C tools/power/x86/turbostat + %{tools_make} -C tools/power/x86/intel-speed-select +%endif + + %{tools_make} -C tools/thermal/tmon/ + %{tools_make} -C tools/iio/ + %{tools_make} -C tools/gpio/ + %{tools_make} -C tools/vm/ slabinfo page_owner_sort +} + +BuildBpfTool() { + %{bpftool_make} -C tools/bpf/bpftool +} + +%if %{with_core} +{{CONFBUILDSPEC}} # `BuildConfig <.config from CONFSOURCESPEC>` +BuildKernel +%endif + +%if %{with_perf} +BuildPerf +%endif + +%if %{with_tools} +BuildTools +%endif + +%if %{with_bpftool} +BuildBpfTool +%endif + +###### Rpmbuild Install Stage ################################################## +%install +%{prepare_buildvar} + +InstKernelBasic() { + ####### Basic environment ################## + # prepare and pushd into the kernel module top dir + mkdir -p $KernModule + pushd $KernModule + + ####### modules_install ################## + pushd $_KernBuild + # Override $(mod-fw) because we don't want it to install any firmware + # we'll get it from the linux-firmware package and we don't want conflicts + grep -q "CONFIG_MODULES=y" ".config" && %{kernel_make} mod-fw= modules_install + # Check again, don't package firmware, use linux-firmware rpm instead + rm -rf %{buildroot}/lib/firmware + popd + + ####### Prepare kernel modules files for packaging ################ + # Don't package depmod files, they should be auto generated by depmod at rpm -i + rm -f modules.{alias,alias.bin,builtin.alias.bin,builtin.bin} \ + modules.{dep,dep.bin,devname,softdep,symbols,symbols.bin} + + # Process kernel modules + find . -name "*.ko" -type f | \ + while read -r _kmodule; do + # Mark it executable so strip and find-debuginfo can see it + chmod u+x "$_kmodule" + + # Detect missing or incorrect license tags + modinfo "$_kmodule" -l | grep -E -qv \ + 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' && \ + echo "Module $_kmodule has incorrect license." >&2 && exit 1 + + # Collect module symbol reference info for later usage + case "$kmodule" in */drivers/*) nm -upA "$_kmodule" ;; + esac | sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' >> drivers.undef + done || exit $? + + # Generate a list of modules for block and networking. + collect_modules_list() { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $KernModule/modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i $KernModule/modules.$1 + fi + } + + collect_modules_list networking \ + 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt(l_|2x00)(pci|usb)_probe|register_netdevice' + collect_modules_list block \ + 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' \ + 'pktcdvd.ko|dm-mod.ko' + collect_modules_list drm \ + 'drm_open|drm_init' + collect_modules_list modesetting \ + 'drm_crtc_init' + # Finish preparing the kernel module files + + ###### Install kernel core components ############################# + mkdir -p %{buildroot}/boot + install -m 644 $_KernBuild/.config config + install -m 644 $_KernBuild/.config %{buildroot}/boot/config-$KernUnameR + install -m 644 $_KernBuild/System.map System.map + install -m 644 $_KernBuild/System.map %{buildroot}/boot/System.map-$KernUnameR + + # NOTE: If we need to sign the vmlinuz, this is the place to do it. + %ifarch aarch64 + install -m 644 $_KernBuild/arch/arm64/boot/Image vmlinuz + %endif + + %ifarch riscv64 + mkdir dtb + cp $_KernBuild/arch/riscv/boot/dts/*/*.dtb dtb/ + install -m 644 $_KernBuild/arch/riscv/boot/Image vmlinuz + %endif + + %ifarch x86_64 + install -m 644 $_KernBuild/arch/x86/boot/bzImage vmlinuz + %endif + + %ifarch loongarch64 + install -m 644 $_KernBuild/vmlinuz vmlinuz + %endif + + install -m 644 vmlinuz %{buildroot}/boot/vmlinuz-$KernUnameR + + sha512hmac %{buildroot}/boot/vmlinuz-$KernUnameR | sed -e "s,%{buildroot},," > .vmlinuz.hmac + cp .vmlinuz.hmac %{buildroot}/boot/.vmlinuz-$KernUnameR.hmac + + ###### kABI checking and packaging ############################# + # Always create the kABI metadata for use in packaging + echo "**** GENERATING kernel ABI metadata ****" + gzip -c9 < $_KernBuild/Module.symvers > symvers.gz + cp symvers.gz %{buildroot}/boot/symvers-$KernUnameR.gz + + ###### End of installing kernel modules and core + popd +} + +CheckKernelABI() { + echo "**** kABI checking is enabled. ****" + if ! [ -s "$1" ]; then + echo "**** But cannot find reference Module.kabi file. ****" + else + cp $1 %{buildroot}/Module.kabi + %{SOURCE30} -k %{buildroot}/Module.kabi -s $_KernBuild/Module.symvers || exit 1 + rm %{buildroot}/Module.kabi + fi +} + +InstKernelDevel() { + ###### Install kernel-devel package ############################### + ### TODO: need tidy up + ### Save the headers/makefiles etc for building modules against. + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + + # `modules_install` will symlink build to $_KernBuild, and source to $_KernSrc, remove the symlinks first + rm -rf $KernModule/{build,source} + mkdir -p $KernModule/{extra,updates,weak-updates} + + # Symlink $KernDevel to kernel module build path + ln -sf /usr/src/kernels/$KernUnameR $KernModule/build + ln -sf /usr/src/kernels/$KernUnameR $KernModule/source + + # Start installing kernel devel files + mkdir -p $KernDevel + pushd $KernDevel + + # First copy everything + (cd $_KernSrc; cp --parents $(find . -type f -name "Makefile*" -o -name "Kconfig*") $KernDevel/) + # Copy built config and sym files + cp $_KernBuild/Module.symvers . + cp $_KernBuild/System.map . + cp $_KernBuild/.config . + if [ -s $_KernBuild/Module.markers ]; then + cp $_KernBuild/Module.markers . + fi + + # We may want to keep Documentation, I got complain from users of missing Makefile + # of Documentation when building custom module with document. + # rm -rf build/Documentation + # Script files + rm -rf scripts + cp -a $_KernSrc/scripts . + cp -a $_KernBuild/scripts . + find scripts -iname "*.o" -o -iname "*.cmd" -delete + + # Include files + rm -rf include + cp -a $_KernSrc/include . + cp -a $_KernBuild/include/config include/ + cp -a $_KernBuild/include/generated include/ + + # SELinux + mkdir -p security/selinux/ + cp -a $_KernSrc/security/selinux/include security/selinux/ + + # Set arch name + Arch=$(head -4 $_KernBuild/.config | sed -ne "s/.*Linux\/\([^\ ]*\).*/\1/p" | sed -e "s/x86_64/x86/" ) + + # Arch include + mkdir -p arch/$Arch + cp -a $_KernSrc/arch/$Arch/include arch/$Arch/ + cp -a $_KernBuild/arch/$Arch/include arch/$Arch/ + + if [ -d $_KernBuild/arch/$Arch/scripts ]; then + cp -a $_KernBuild/arch/$Arch/scripts arch/$Arch/ || : + fi + + # Kernel module build dependency + if [ -f $_KernBuild/tools/objtool/objtool ]; then + cp -a $_KernBuild/tools/objtool/objtool tools/objtool/ || : + fi + + if [ -f $_KernBuild/tools/objtool/fixdep ]; then + cp -a $_KernBuild/tools/objtool/fixdep tools/objtool/ || : + fi + + cp -a $_KernSrc/arch/$Arch/*lds arch/$Arch/ &>/dev/null || : + cp -a $_KernBuild/arch/$Arch/*lds arch/$Arch/ &>/dev/null || : + + mkdir -p arch/$Arch/kernel + if [ -f $_KernSrc/arch/$Arch/kernel/module.lds ]; then + cp -a $_KernSrc/arch/$Arch/kernel/module.lds arch/$Arch/kernel/ + fi + if [ -f $_KernBuild/arch/$Arch/kernel/module.lds ]; then + cp -a $_KernBuild/arch/$Arch/kernel/module.lds arch/$Arch/kernel/ + fi + + # Copy include/asm-$Arch for better compatibility with some old system + cp -a $_KernSrc/arch/$Arch include/asm-$Arch + + # Delete obj files + find include -iname "*.o" -o -iname "*.cmd" -delete + + # Make sure the Makefile and version.h have a matching timestamp so that + # external modules can be built + touch -r Makefile include/generated/uapi/linux/version.h + touch -r .config include/linux/autoconf.h + + # Done + popd +} + +InstKernelHeaders () { + %{kernel_make} headers_install + find %{buildroot}/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete +} + +InstPerf () { + %{perf_make} -C tools/perf install-bin install-traceevent-plugins install-python_ext install-man + + # remove the 'trace' symlink. + rm -f %{buildroot}%{_bindir}/trace +} + +InstTools() { + %{tools_make} -C tools/power/cpupower DESTDIR=%{buildroot} libdir=%{_libdir} mandir=%{_mandir} CPUFREQ_BENCH=false install + rm -f %{buildroot}%{_libdir}/*.{a,la} + %find_lang cpupower + mv cpupower.lang ../ + +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + install -m755 centrino-decode %{buildroot}%{_bindir}/centrino-decode + install -m755 powernow-k8-decode %{buildroot}%{_bindir}/powernow-k8-decode + popd +%endif + + chmod 0755 %{buildroot}%{_libdir}/libcpupower.so* + mkdir -p %{buildroot}%{_unitdir} %{buildroot}%{_sysconfdir}/sysconfig + install -m644 %{SOURCE2000} %{buildroot}%{_unitdir}/cpupower.service + install -m644 %{SOURCE2001} %{buildroot}%{_sysconfdir}/sysconfig/cpupower + +%ifarch x86_64 + mkdir -p %{buildroot}%{_mandir}/man8 + %{tools_make} -C tools/power/x86/x86_energy_perf_policy DESTDIR=%{buildroot} install + %{tools_make} -C tools/power/x86/turbostat DESTDIR=%{buildroot} install + %{tools_make} -C tools/power/x86/intel-speed-select DESTDIR=%{buildroot} install +%endif + + %{tools_make} -C tools/thermal/tmon install + %{tools_make} -C tools/iio install + %{tools_make} -C tools/gpio install + + pushd tools/vm/ + install -m755 slabinfo %{buildroot}%{_bindir}/slabinfo + install -m755 page_owner_sort %{buildroot}%{_bindir}/page_owner_sort + popd + # with_tools +} + +InstBpfTool () { + %{bpftool_make} -C tools/bpf/bpftool bash_compdir=%{_sysconfdir}/bash_completion.d/ mandir=%{_mandir} install doc-install +} + +CollectKernelFile() { + ###### Collect file list ######################################### + pushd %{buildroot} + + # Collect all module files and dirs + (find lib/modules/$KernUnameR/ -not -type d | sed -e 's/^lib*/\/lib/'; + find lib/modules/$KernUnameR/ -type d | sed -e 's/^lib*/%dir \/lib/') | sort -n > core.list + + # Do module splitting, filter-modules.sh will generate a list of + # modules to be split into external module package + # Rest of the modules stay in core package + %SOURCE10 "%{buildroot}" "$KernUnameR" "%{_target_cpu}" "$_KernBuild/System.map" non-core-modules >> modules.list || exit $? + + comm -23 core.list modules.list > core.list.tmp + mv core.list.tmp core.list + + popd + + # Make these file list usable in rpm build dir + mv %{buildroot}/core.list ../ + mv %{buildroot}/modules.list ../ +} + +###### Start Kernel Install + +%if %{with_core} +InstKernelBasic + +%if %{with_kabichk} +{{KABICHECKSPEC}} # `CheckKernelABI <Module.kabi from KABISOURCESPEC>` +%endif + +InstKernelDevel +%endif + +%if %{with_headers} +InstKernelHeaders +%endif + +%if %{with_perf} +InstPerf +%endif + +%if %{with_tools} +InstTools +%endif + +%if %{with_bpftool} +InstBpfTool +%endif + +CollectKernelFile + +###### Debuginfo ############################################################### +%if %{with_debuginfo} + +###### Kernel core debuginfo ####### +%if %{with_core} +mkdir -p %{buildroot}%{debuginfo_dir}/lib/modules/$KernUnameR +cp -rpf $_KernBuild/vmlinux %{buildroot}%{debuginfo_dir}/lib/modules/$KernUnameR/vmlinux +ln -sf %{debuginfo_dir}/lib/modules/$KernUnameR/vmlinux %{buildroot}/boot/vmlinux-$KernUnameR +%endif + +# All binary installation are done here, so run __debug_install_post, then undefine it. +# This triggers find-debuginfo.sh, undefine prevents it from being triggered again +# in post %%install. we do this here because we need to compress and sign modules +# after the debuginfo extraction. +%__debug_install_post + +# Delete the debuginfo for kernel-devel files +rm -rf %{buildroot}%{debuginfo_dir}/usr/src + +%endif +%undefine __debug_install_post + +###### Finally, module sign and compress ###### +%if %{with_modsign} +### Sign after debuginfo extration, extraction breaks signature +%{_module_signer} "$KernUnameR" "$_KernBuild" "%{buildroot}" || exit $? +%endif + +### Compression after signing, compressed module can't be signed +# Spawn at most 16 workers, at least 2 workers, each worker compress 4 files +NPROC=$(nproc --all) +[ "$NPROC" ] || NPROC=2 +[ "$NPROC" -gt 16 ] && NPROC=16 +find "$KernModule" -type f -name '*.ko' -print0 | xargs -0r -P${NPROC} -n4 xz -T1; + +### Change module path in file lists +for list in ../*.list; do + sed -i -e 's/\.ko$/\.ko.xz/' $list +done + +###### RPM scriptslets ######################################################### +### Core package +# Pre +%if %{with_core} +%pre core +# Best effort try to avoid installing with wrong arch +if command -v uname > /dev/null; then + system_arch=$(uname -m) + if [ %{_target_cpu} != $system_arch ]; then + echo "WARN: This kernel is built for %{_target_cpu}. but your system is $system_arch." > /dev/stderr + fi +fi + +%post core +touch %{_localstatedir}/lib/rpm-state/%{name}-%{version}-%{version}%{?dist}.installing_core + +%posttrans core +# Weak modules +if command -v weak-modules > /dev/null; then + weak-modules --add-kernel %{kernel_unamer} || exit $? +fi +# Boot entry and depmod files +if command -v kernel-install > /dev/null; then + kernel-install add %{kernel_unamer} /lib/modules/%{kernel_unamer}/vmlinuz +elif command -v new-kernel-pkg > /dev/null; then + new-kernel-pkg --package kernel --install %{kernel_unamer} --kernel-args="crashkernel=512M-12G:128M,12G-64G:256M,64G-128G:512M,128G-:768M" --make-default || exit $? + new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{kernel_unamer} || exit $? +else + echo "NOTICE: No available kernel install handler found. Please make sure boot loader and initramfs are properly configured after the installation." > /dev/stderr +fi +# Just in case kernel-install didn't depmod +depmod -A %{kernel_unamer} +# Core install done +rm -f %{_localstatedir}/lib/rpm-state/%{name}-%{version}-%{version}%{?dist}.installing_core + +%preun core +# Boot entry and depmod files +if command -v kernel-install > /dev/null; then + kernel-install remove %{kernel_unamer} /lib/modules/%{kernel_unamer}/vmlinuz || exit $? +elif command -v new-kernel-pkg > /dev/null; then + /sbin/new-kernel-pkg --rminitrd --dracut --remove %{kernel_unamer} +else + echo "NOTICE: No available kernel uninstall handler found. Please make sure boot loader and initramfs are properly cleared after the uninstallation." > /dev/stderr +fi + +# Weak modules +if command -v weak-modules > /dev/null; then + weak-modules --remove-kernel %{kernel_unamer} || exit $? +fi + +### Module package +%post modules +depmod -a %{kernel_unamer} +if [ ! -f %{_localstatedir}/lib/rpm-state/%{name}-%{version}-%{version}%{?dist}.installing_core ]; then + touch %{_localstatedir}/lib/rpm-state/%{name}-%{version}-%{version}%{?dist}.need_to_run_dracut +fi + +%posttrans modules +if [ -f %{_localstatedir}/lib/rpm-state/%{name}-%{version}-%{version}%{?dist}.need_to_run_dracut ]; then\ + dracut -f --kver "%{kernel_unamer}" + rm -f %{_localstatedir}/lib/rpm-state/%{name}-%{version}-%{version}%{?dist}.need_to_run_dracut +fi + +%postun modules +depmod -a %{kernel_unamer} + +### Devel package +%post devel +if [ -f /etc/sysconfig/kernel ]; then + . /etc/sysconfig/kernel || exit $? +fi +# This hardlink merges same devel files across different kernel packages +if [ "$HARDLINK" != "no" -a -x /usr/bin/hardlink -a ! -e /run/ostree-booted ]; then + (cd /usr/src/kernels/%{kernel_unamer} && /usr/bin/find . -type f | while read -r f; do + hardlink /usr/src/kernels/*/$f $f > /dev/null + done) +fi +%endif + +### kernel-tools package +%if %{with_tools} +%post -n kernel-tools-libs +/sbin/ldconfig + +%postun -n kernel-tools-libs +/sbin/ldconfig +%endif + +###### Rpmbuild packaging file list ############################################ +### empty meta-package +%if %{with_core} +%files +%{nil} + +%files core -f core.list +%defattr(-,root,root) +# Mark files as ghost in case rewritten after install (eg. by kernel-install script) +%ghost /boot/vmlinuz-%{kernel_unamer} +%ghost /boot/.vmlinuz-%{kernel_unamer}.hmac +/boot/System.map-%{kernel_unamer} +/boot/config-%{kernel_unamer} +/boot/symvers-%{kernel_unamer}.gz +# Initramfs will be generated after install +%ghost /boot/initramfs-%{kernel_unamer}%{?dist}.img +# Make depmod files ghost files of the core package +%ghost /lib/modules/%{kernel_unamer}/modules.alias +%ghost /lib/modules/%{kernel_unamer}/modules.alias.bin +%ghost /lib/modules/%{kernel_unamer}/modules.builtin.bin +%ghost /lib/modules/%{kernel_unamer}/modules.builtin.alias.bin +%ghost /lib/modules/%{kernel_unamer}/modules.dep +%ghost /lib/modules/%{kernel_unamer}/modules.dep.bin +%ghost /lib/modules/%{kernel_unamer}/modules.devname +%ghost /lib/modules/%{kernel_unamer}/modules.softdep +%ghost /lib/modules/%{kernel_unamer}/modules.symbols +%ghost /lib/modules/%{kernel_unamer}/modules.symbols.bin + +%files modules -f modules.list +%defattr(-,root,root) + +%files devel +%defattr(-,root,root) +/usr/src/kernels/%{kernel_unamer} + +%if %{with_debuginfo} +%files debuginfo -f debuginfo.list +%defattr(-,root,root) +/boot/vmlinux-%{kernel_unamer} +%endif +# with_core +%endif + +%if %{with_debuginfo} +%files debuginfo-common-%{_target_cpu} -f debugfiles.list +%defattr(-,root,root) +%endif + +%if %{with_headers} +%files headers +%defattr(-,root,root) +/usr/include/* +%endif + +%if %{with_perf} +%files -n perf +%defattr(-,root,root) +%{_bindir}/perf* +%dir %{_libdir}/traceevent/ +%{_libdir}/traceevent/* +%{_libdir}/libperf-jvmti.so +%dir %{_prefix}/lib/perf +%{_prefix}/lib/perf/* +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_datadir}/perf-core/* +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +%{_docdir}/perf-tip/tips.txt +# TODO: Missing doc? +# %%doc linux-%%{kernel_unamer}/tools/perf/Documentation/examples.txt + +%files -n python3-perf +%defattr(-,root,root) +%{python3_sitearch}/* + +%if %{with_debuginfo} +%files -f perf-debuginfo.list -n perf-debuginfo +%defattr(-,root,root) + +%files -f python3-perf-debuginfo.list -n python3-perf-debuginfo +%defattr(-,root,root) +%endif +# with_perf +%endif + +%if %{with_tools} +%files -n kernel-tools -f cpupower.lang +%defattr(-,root,root) +%{_bindir}/cpupower +%{_datadir}/bash-completion/completions/cpupower +%ifarch x86_64 +%{_bindir}/centrino-decode +%{_bindir}/powernow-k8-decode +%endif +%{_unitdir}/cpupower.service +%{_mandir}/man[1-8]/cpupower* +%config(noreplace) %{_sysconfdir}/sysconfig/cpupower +%ifarch x86_64 +%{_bindir}/x86_energy_perf_policy +%{_mandir}/man8/x86_energy_perf_policy* +%{_bindir}/turbostat +%{_mandir}/man8/turbostat* +%{_bindir}/intel-speed-select +%endif +%{_bindir}/tmon +%{_bindir}/iio_event_monitor +%{_bindir}/iio_generic_buffer +%{_bindir}/lsiio +%{_bindir}/lsgpio +%{_bindir}/gpio-* +%{_bindir}/page_owner_sort +%{_bindir}/slabinfo + +%files -n kernel-tools-libs +%defattr(-,root,root) +%{_libdir}/libcpupower.so.0 +%{_libdir}/libcpupower.so.0.0.1 + +%files -n kernel-tools-libs-devel +%defattr(-,root,root) +%{_libdir}/libcpupower.so +%{_includedir}/cpufreq.h + +%if %{with_debuginfo} +%files -f kernel-tools-debuginfo.list -n kernel-tools-debuginfo +%defattr(-,root,root) +%endif +# with_tools +%endif + +%if %{with_bpftool} +%files -n bpftool +%defattr(-,root,root) +%{_sbindir}/bpftool +%{_sysconfdir}/bash_completion.d/bpftool +%{_mandir}/man8/bpftool.8.gz +%{_mandir}/man8/bpftool-*.8.gz +%{_mandir}/man7/bpf-helpers.7.gz + +%if %{with_debuginfo} +%files -f bpftool-debuginfo.list -n bpftool-debuginfo +%defattr(-,root,root) +%endif +# with_bpftool +%endif + +###### Changelog ############################################################### +%changelog +{{CHANGELOGSPEC}} diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ebe1e9e5fd81..5559ef264060 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS config ACPI_PROCESSOR_CSTATE def_bool y + depends on ACPI_PROCESSOR depends on IA64 || X86 config ACPI_PROCESSOR_IDLE diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 5d361e4e3405..ef1ac4d127da 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -48,7 +48,7 @@ acpi-y += acpi_pnp.o acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o acpi-y += power.o acpi-y += event.o -acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o +acpi-y += evged.o acpi-y += sysfs.o acpi-y += property.o acpi-$(CONFIG_X86) += acpi_cmos_rtc.o diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index 57d9d574d4dd..06c756651425 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/configfs.h> #include <linux/acpi.h> +#include <linux/security.h> #include "acpica/accommon.h" #include "acpica/actables.h" @@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg, { const struct acpi_table_header *header = data; struct acpi_table *table; - int ret; + int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); + + if (ret) + return ret; table = container_of(cfg, struct acpi_table, cfg); @@ -263,7 +267,12 @@ static int __init acpi_configfs_init(void) acpi_table_group = configfs_register_default_group(root, "table", &acpi_tables_type); - return PTR_ERR_OR_ZERO(acpi_table_group); + if (IS_ERR(acpi_table_group)) { + configfs_unregister_subsystem(&acpi_configfs); + return PTR_ERR(acpi_table_group); + } + + return 0; } module_init(acpi_configfs_init); diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index 7a265c2171c0..cc4b509bad94 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -749,6 +749,9 @@ int __init acpi_aml_init(void) { int ret; + if (acpi_disabled) + return -ENODEV; + /* Initialize AML IO interface */ mutex_init(&acpi_aml_io.lock); init_waitqueue_head(&acpi_aml_io.wait); diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 8596a106a933..bd4648e5ff03 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -146,7 +146,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, static u32 err_seq; estatus = extlog_elog_entry_check(cpu, bank); - if (estatus == NULL) + if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC)) return NOTIFY_DONE; memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); @@ -176,7 +176,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, } out: - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EXTLOG; + return NOTIFY_OK; } static bool __init extlog_get_l1addr(void) @@ -223,9 +224,9 @@ static int __init extlog_init(void) u64 cap; int rc; - rdmsrl(MSR_IA32_MCG_CAP, cap); - - if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr()) + if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) || + !(cap & MCG_ELOG_P) || + !extlog_get_l1addr()) return -ENODEV; if (edac_get_report_status() == EDAC_REPORTING_FORCE) { diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index f3039b93ff61..101887528848 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -317,6 +317,9 @@ static bool matching_id(const char *idstr, const char *list_id) { int i; + if (strlen(idstr) != strlen(list_id)) + return false; + if (memcmp(idstr, list_id, 3)) return false; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 2c4dda0787e8..5379bc3f275d 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -705,3 +705,185 @@ void __init acpi_processor_init(void) acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); acpi_scan_add_handler(&processor_container_handler); } + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +/** + * acpi_processor_claim_cst_control - Request _CST control from the platform. + */ +bool acpi_processor_claim_cst_control(void) +{ + static bool cst_control_claimed; + acpi_status status; + + if (!acpi_gbl_FADT.cst_control || cst_control_claimed) + return true; + + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, + acpi_gbl_FADT.cst_control, 8); + if (ACPI_FAILURE(status)) { + pr_warn("ACPI: Failed to claim processor _CST control\n"); + return false; + } + + cst_control_claimed = true; + return true; +} +EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); + +/** + * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. + * @handle: ACPI handle of the processor object containing the _CST. + * @cpu: The numeric ID of the target CPU. + * @info: Object write the C-states information into. + * + * Extract the C-state information for the given CPU from the output of the _CST + * control method under the corresponding ACPI processor object (or processor + * device object) and populate @info with it. + * + * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke + * acpi_processor_ffh_cstate_probe() to verify them and update the + * cpu_cstate_entry data for @cpu. + */ +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *cst; + acpi_status status; + u64 count; + int last_index = 0; + int i, ret = 0; + + status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "No _CST\n"); + return -ENODEV; + } + + cst = buffer.pointer; + + /* There must be at least 2 elements. */ + if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { + acpi_handle_warn(handle, "Invalid _CST output\n"); + ret = -EFAULT; + goto end; + } + + count = cst->package.elements[0].integer.value; + + /* Validate the number of C-states. */ + if (count < 1 || count != cst->package.count - 1) { + acpi_handle_warn(handle, "Inconsistent _CST data\n"); + ret = -EFAULT; + goto end; + } + + for (i = 1; i <= count; i++) { + union acpi_object *element; + union acpi_object *obj; + struct acpi_power_register *reg; + struct acpi_processor_cx cx; + + /* + * If there is not enough space for all C-states, skip the + * excess ones and log a warning. + */ + if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { + acpi_handle_warn(handle, + "No room for more idle states (limit: %d)\n", + ACPI_PROCESSOR_MAX_POWER - 1); + break; + } + + memset(&cx, 0, sizeof(cx)); + + element = &cst->package.elements[i]; + if (element->type != ACPI_TYPE_PACKAGE) + continue; + + if (element->package.count != 4) + continue; + + obj = &element->package.elements[0]; + + if (obj->type != ACPI_TYPE_BUFFER) + continue; + + reg = (struct acpi_power_register *)obj->buffer.pointer; + + obj = &element->package.elements[1]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.type = obj->integer.value; + /* + * There are known cases in which the _CST output does not + * contain C1, so if the type of the first state found is not + * C1, leave an empty slot for C1 to be filled in later. + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + last_index = 1; + + cx.address = reg->address; + cx.index = last_index + 1; + + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { + /* + * In the majority of cases _CST describes C1 as + * a FIXED_HARDWARE C-state, but if the command + * line forbids using MWAIT, use CSTATE_HALT for + * C1 regardless. + */ + if (cx.type == ACPI_STATE_C1 && + boot_option_idle_override == IDLE_NOMWAIT) { + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + cx.entry_method = ACPI_CSTATE_FFH; + } + } else if (cx.type == ACPI_STATE_C1) { + /* + * In the special case of C1, FIXED_HARDWARE can + * be handled by executing the HLT instruction. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + continue; + } + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } else { + continue; + } + + if (cx.type == ACPI_STATE_C1) + cx.valid = 1; + + obj = &element->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.latency = obj->integer.value; + + obj = &element->package.elements[3]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + memcpy(&info->states[++last_index], &cx, sizeof(cx)); + } + + acpi_handle_info(handle, "Found %d idle states\n", last_index); + + info->count = last_index; + + end: + kfree(buffer.pointer); + + return ret; +} +EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); +#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index a74c1a0e892d..c0e243668261 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void); acpi_status acpi_hw_enable_all_wakeup_gpes(void); -u8 acpi_hw_check_all_gpes(void); +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number); acpi_status acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 7da1864798a0..ecaa28733dc6 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -256,6 +256,8 @@ u32 acpi_ns_build_normalized_path(struct acpi_namespace_node *node, char *full_path, u32 path_size, u8 no_trailing); +void acpi_ns_normalize_pathname(char *original_path); + char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, u8 no_trailing); diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 8def0e3d690f..b0b9bb31c336 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -283,6 +283,7 @@ struct acpi_object_addr_handler { acpi_adr_space_handler handler; struct acpi_namespace_node *node; /* Parent device */ void *context; + acpi_mutex context_mutex; acpi_adr_space_setup setup; union acpi_operand_object *region_list; /* Regions using this handler */ union acpi_operand_object *next; diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index 55a7e10998d8..1ef053585bbb 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -464,16 +464,14 @@ char *acpi_db_get_next_token(char *string, return (NULL); } - /* Remove any spaces at the beginning */ + /* Remove any spaces at the beginning, ignore blank lines */ - if (*string == ' ') { - while (*string && (*string == ' ')) { - string++; - } + while (*string && isspace(*string)) { + string++; + } - if (!(*string)) { - return (NULL); - } + if (!(*string)) { + return (NULL); } switch (*string) { @@ -551,7 +549,7 @@ char *acpi_db_get_next_token(char *string, /* Find end of token */ - while (*string && (*string != ' ')) { + while (*string && !isspace(*string)) { string++; } break; diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 8438e33aa447..fd9028a6bc20 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -518,13 +518,20 @@ acpi_ds_create_field(union acpi_parse_object *op, info.region_node = region_node; status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); - if (info.region_node->object->region.space_id == - ACPI_ADR_SPACE_PLATFORM_COMM - && !(region_node->object->field.internal_pcc_buffer = - ACPI_ALLOCATE_ZEROED(info.region_node->object->region. - length))) { - return_ACPI_STATUS(AE_NO_MEMORY); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); } + + if (info.region_node->object->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM) { + region_node->object->field.internal_pcc_buffer = + ACPI_ALLOCATE_ZEROED(info.region_node->object->region. + length); + if (!region_node->object->field.internal_pcc_buffer) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index d75aae304595..a68237b97c4c 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -16,6 +16,9 @@ #include "acinterp.h" #include "acnamesp.h" #include "acdebug.h" +#ifdef ACPI_EXEC_APP +#include "aecommon.h" +#endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswexec") @@ -329,6 +332,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) u32 op_class; union acpi_parse_object *next_op; union acpi_parse_object *first_arg; +#ifdef ACPI_EXEC_APP + char *namepath; + union acpi_operand_object *obj_desc; +#endif ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state); @@ -537,6 +544,32 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) status = acpi_ds_eval_buffer_field_operands(walk_state, op); + if (ACPI_FAILURE(status)) { + break; + } +#ifdef ACPI_EXEC_APP + /* + * acpi_exec support for namespace initialization file (initialize + * buffer_fields in this code.) + */ + namepath = + acpi_ns_get_external_pathname(op->common.node); + status = ae_lookup_init_file_entry(namepath, &obj_desc); + if (ACPI_SUCCESS(status)) { + status = + acpi_ex_write_data_to_field(obj_desc, + op->common. + node->object, + NULL); + if ACPI_FAILURE + (status) { + ACPI_EXCEPTION((AE_INFO, status, + "While writing to buffer field")); + } + } + ACPI_FREE(namepath); + status = AE_OK; +#endif break; case AML_TYPE_CREATE_OBJECT: diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 4bcf15bf03de..6cf93fae4d07 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -14,7 +14,6 @@ #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" - #ifdef ACPI_ASL_COMPILER #include "acdisasm.h" #endif @@ -399,7 +398,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) union acpi_parse_object *op; acpi_object_type object_type; acpi_status status = AE_OK; - #ifdef ACPI_ASL_COMPILER u8 param_count; #endif diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 935a8e2623e4..15d92bf15f0b 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -15,6 +15,9 @@ #include "acinterp.h" #include "acnamesp.h" #include "acevents.h" +#ifdef ACPI_EXEC_APP +#include "aecommon.h" +#endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswload2") @@ -373,6 +376,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) struct acpi_namespace_node *new_node; u32 i; u8 region_space; +#ifdef ACPI_EXEC_APP + union acpi_operand_object *obj_desc; + char *namepath; +#endif ACPI_FUNCTION_TRACE(ds_load2_end_op); @@ -466,6 +473,11 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) * be evaluated later during the execution phase */ status = acpi_ds_create_buffer_field(op, walk_state); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "CreateBufferField failure")); + goto cleanup; + } break; case AML_TYPE_NAMED_FIELD: @@ -604,6 +616,29 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) case AML_NAME_OP: status = acpi_ds_create_node(walk_state, node, op); + if (ACPI_FAILURE(status)) { + goto cleanup; + } +#ifdef ACPI_EXEC_APP + /* + * acpi_exec support for namespace initialization file (initialize + * Name opcodes in this code.) + */ + namepath = acpi_ns_get_external_pathname(node); + status = ae_lookup_init_file_entry(namepath, &obj_desc); + if (ACPI_SUCCESS(status)) { + + /* Detach any existing object, attach new object */ + + if (node->object) { + acpi_ns_detach_object(node); + } + acpi_ns_attach_object(node, obj_desc, + obj_desc->common.type); + } + ACPI_FREE(namepath); + status = AE_OK; +#endif break; case AML_METHOD_OP: diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 3ef4e27995f0..78550f5004c9 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node, /* Init handler obj */ + status = + acpi_os_create_mutex(&handler_obj->address_space.context_mutex); + if (ACPI_FAILURE(status)) { + acpi_ut_remove_reference(handler_obj); + goto unlock_and_exit; + } + handler_obj->address_space.space_id = (u8)space_id; handler_obj->address_space.handler_flags = flags; handler_obj->address_space.region_list = NULL; diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 45dc797df05d..50782033012b 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -111,6 +111,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, union acpi_operand_object *region_obj2; void *region_context = NULL; struct acpi_connection_info *context; + acpi_mutex context_mutex; + u8 context_locked; acpi_physical_address address; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); @@ -135,6 +137,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, } context = handler_desc->address_space.context; + context_mutex = handler_desc->address_space.context_mutex; + context_locked = FALSE; /* * It may be the case that the region has never been initialized. @@ -203,41 +207,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, handler = handler_desc->address_space.handler; address = (region_obj->region.address + region_offset); - /* - * Special handling for generic_serial_bus and general_purpose_io: - * There are three extra parameters that must be passed to the - * handler via the context: - * 1) Connection buffer, a resource template from Connection() op - * 2) Length of the above buffer - * 3) Actual access length from the access_as() op - * - * In addition, for general_purpose_io, the Address and bit_width fields - * are defined as follows: - * 1) Address is the pin number index of the field (bit offset from - * the previous Connection) - * 2) bit_width is the actual bit length of the field (number of pins) - */ - if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && - context && field_obj) { - - /* Get the Connection (resource_template) buffer */ - - context->connection = field_obj->field.resource_buffer; - context->length = field_obj->field.resource_length; - context->access_length = field_obj->field.access_length; - } - if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && - context && field_obj) { - - /* Get the Connection (resource_template) buffer */ - - context->connection = field_obj->field.resource_buffer; - context->length = field_obj->field.resource_length; - context->access_length = field_obj->field.access_length; - address = field_obj->field.pin_number_index; - bit_width = field_obj->field.bit_length; - } - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", ®ion_obj->region.handler->address_space, handler, @@ -255,11 +224,71 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, acpi_ex_exit_interpreter(); } + /* + * Special handling for generic_serial_bus and general_purpose_io: + * There are three extra parameters that must be passed to the + * handler via the context: + * 1) Connection buffer, a resource template from Connection() op + * 2) Length of the above buffer + * 3) Actual access length from the access_as() op + * + * Since we pass these extra parameters via the context, which is + * shared between threads, we must lock the context to avoid these + * parameters being changed from another thread before the handler + * has completed running. + * + * In addition, for general_purpose_io, the Address and bit_width fields + * are defined as follows: + * 1) Address is the pin number index of the field (bit offset from + * the previous Connection) + * 2) bit_width is the actual bit length of the field (number of pins) + */ + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && + context && field_obj) { + + status = + acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER); + if (ACPI_FAILURE(status)) { + goto re_enter_interpreter; + } + + context_locked = TRUE; + + /* Get the Connection (resource_template) buffer */ + + context->connection = field_obj->field.resource_buffer; + context->length = field_obj->field.resource_length; + context->access_length = field_obj->field.access_length; + } + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && + context && field_obj) { + + status = + acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER); + if (ACPI_FAILURE(status)) { + goto re_enter_interpreter; + } + + context_locked = TRUE; + + /* Get the Connection (resource_template) buffer */ + + context->connection = field_obj->field.resource_buffer; + context->length = field_obj->field.resource_length; + context->access_length = field_obj->field.access_length; + address = field_obj->field.pin_number_index; + bit_width = field_obj->field.bit_length; + } + /* Call the handler */ status = handler(function, address, bit_width, value, context, region_obj2->extra.region_context); + if (context_locked) { + acpi_os_release_mutex(context_mutex); + } + if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", acpi_ut_get_region_name(region_obj->region. @@ -276,6 +305,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, } } +re_enter_interpreter: if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 84b0b410310e..3e1813ebcca8 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) * * FUNCTION: acpi_any_gpe_status_set * - * PARAMETERS: None + * PARAMETERS: gpe_skip_number - Number of the GPE to skip * * RETURN: Whether or not the status bit is set for any GPE * - * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any - * of them is set or FALSE otherwise. + * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one + * represented by the "skip" argument, and return TRUE if any of + * them is set or FALSE otherwise. * ******************************************************************************/ -u32 acpi_any_gpe_status_set(void) +u32 acpi_any_gpe_status_set(u32 gpe_skip_number) { acpi_status status; + acpi_handle gpe_device; u8 ret; ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); @@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void) return (FALSE); } - ret = acpi_hw_check_all_gpes(); + status = acpi_get_gpe_device(gpe_skip_number, &gpe_device); + if (ACPI_FAILURE(status)) { + gpe_device = NULL; + } + + ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return (ret); diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 47265b073e6f..6e0d2a98c4ad 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device, /* Now we can delete the handler object */ + acpi_os_release_mutex(handler_obj->address_space. + context_mutex); acpi_ut_remove_reference(handler_obj); goto unlock_and_exit; } diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 728d752f7adc..85f799c9c25c 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) (u8)access_byte_width; } } - /* An additional reference for the container */ - - acpi_ut_add_reference(obj_desc->field.region_obj); - ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", obj_desc->field.start_field_bit_offset, diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index b1d7d5f92495..12516b07336e 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, return (AE_OK); } +struct acpi_gpe_block_status_context { + struct acpi_gpe_register_info *gpe_skip_register_info; + u8 gpe_skip_mask; + u8 retval; +}; + /****************************************************************************** * * FUNCTION: acpi_hw_get_gpe_block_status * * PARAMETERS: gpe_xrupt_info - GPE Interrupt info * gpe_block - Gpe Block info + * context - GPE list walk context data * * RETURN: Success * @@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, static acpi_status acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, - void *ret_ptr) + void *context) { + struct acpi_gpe_block_status_context *c = context; struct acpi_gpe_register_info *gpe_register_info; u64 in_enable, in_status; acpi_status status; - u8 *ret = ret_ptr; + u8 ret_mask; u32 i; /* Examine each GPE Register within the block */ @@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, continue; } - *ret |= in_enable & in_status; + ret_mask = in_enable & in_status; + if (ret_mask && c->gpe_skip_register_info == gpe_register_info) { + ret_mask &= ~c->gpe_skip_mask; + } + c->retval |= ret_mask; } return (AE_OK); @@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) * * FUNCTION: acpi_hw_check_all_gpes * - * PARAMETERS: None + * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip + * gpe_skip_number - Number of the GPE to skip * * RETURN: Combined status of all GPEs * - * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the + * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one + * represented by the "skip" arguments, and return TRUE if the * status bit is set for at least one of them of FALSE otherwise. * ******************************************************************************/ -u8 acpi_hw_check_all_gpes(void) +u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number) { - u8 ret = 0; + struct acpi_gpe_block_status_context context = { + .gpe_skip_register_info = NULL, + .retval = 0, + }; + struct acpi_gpe_event_info *gpe_event_info; + acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); - (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret); + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); - return (ret != 0); + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device, + gpe_skip_number); + if (gpe_event_info) { + context.gpe_skip_register_info = gpe_event_info->register_info; + context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info); + } + + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + + (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context); + return (context.retval != 0); } #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 370bbc867745..c717fff7d9b5 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -13,9 +13,6 @@ #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsnames") -/* Local Prototypes */ -static void acpi_ns_normalize_pathname(char *original_path); - /******************************************************************************* * * FUNCTION: acpi_ns_get_external_pathname @@ -30,7 +27,6 @@ static void acpi_ns_normalize_pathname(char *original_path); * for error and debug statements. * ******************************************************************************/ - char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { char *name_buffer; @@ -411,7 +407,7 @@ cleanup: * ******************************************************************************/ -static void acpi_ns_normalize_pathname(char *original_path) +void acpi_ns_normalize_pathname(char *original_path) { char *input_path = original_path; char *new_path_buffer; diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index eee263cb7beb..dd9e8831ab50 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -414,6 +414,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) ACPI_WARNING((AE_INFO, "Obj %p, Reference Count is already zero, cannot decrement\n", object)); + return; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS, @@ -452,13 +453,13 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) * * FUNCTION: acpi_ut_update_object_reference * - * PARAMETERS: object - Increment ref count for this object - * and all sub-objects + * PARAMETERS: object - Increment or decrement the ref count for + * this object and all sub-objects * action - Either REF_INCREMENT or REF_DECREMENT * * RETURN: Status * - * DESCRIPTION: Increment the object reference count + * DESCRIPTION: Increment or decrement the object reference count * * Object references are incremented when: * 1) An object is attached to a Node (namespace object) @@ -492,7 +493,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } /* - * All sub-objects must have their reference count incremented + * All sub-objects must have their reference count updated * also. Different object types have different subobjects. */ switch (object->common.type) { @@ -559,6 +560,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) break; } } + next_object = NULL; break; @@ -567,11 +569,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) next_object = object->buffer_field.buffer_obj; break; - case ACPI_TYPE_LOCAL_REGION_FIELD: - - next_object = object->field.region_obj; - break; - case ACPI_TYPE_LOCAL_BANK_FIELD: next_object = object->bank_field.bank_obj; @@ -612,6 +609,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } break; + case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_REGION: default: diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index e0d82fab1f44..4adf9ef2b0e2 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -40,6 +40,7 @@ #include <linux/sched/clock.h> #include <linux/uuid.h> #include <linux/ras.h> +#include <linux/task_work.h> #include <acpi/actbl1.h> #include <acpi/ghes.h> @@ -414,23 +415,46 @@ static void ghes_clear_estatus(struct ghes *ghes, ghes_ack_error(ghes->generic_v2); } -static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev) +/* + * Called as task_work before returning to user-space. + * Ensure any queued work has been done before we return to the context that + * triggered the notification. + */ +static void ghes_kick_task_work(struct callback_head *head) +{ + struct acpi_hest_generic_status *estatus; + struct ghes_estatus_node *estatus_node; + u32 node_len; + + estatus_node = container_of(head, struct ghes_estatus_node, task_work); + memory_failure_queue_kick(estatus_node->task_work_cpu); + + estatus = GHES_ESTATUS_FROM_NODE(estatus_node); + node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus)); + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); +} + +static bool ghes_handle_memory_failure(struct ghes *ghes, + struct acpi_hest_generic_data *gdata, + int sev) { -#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE unsigned long pfn; int flags = -1; int sec_sev = ghes_severity(gdata->error_severity); struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); + if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) + return false; + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) - return; + return false; pfn = mem_err->physical_addr >> PAGE_SHIFT; if (!pfn_valid(pfn)) { pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid address in generic error data: %#llx\n", mem_err->physical_addr); - return; + return false; } /* iff following two events can be handled properly by now */ @@ -440,9 +464,12 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) flags = 0; - if (flags != -1) + if (flags != -1) { memory_failure_queue(pfn, flags); -#endif + return true; + } + + return false; } /* @@ -490,11 +517,12 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) #endif } -static void ghes_do_proc(struct ghes *ghes, +static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { int sev, sec_sev; struct acpi_hest_generic_data *gdata; + bool work_queued = false; guid_t *sec_type; const guid_t *fru_id = &guid_null; char *fru_text = ""; @@ -515,7 +543,8 @@ static void ghes_do_proc(struct ghes *ghes, ghes_edac_report_mem_error(sev, mem_err); arch_apei_report_mem_error(sev, mem_err); - ghes_handle_memory_failure(gdata, sev); + if (ghes_handle_memory_failure(ghes, gdata, sev)) + work_queued = true; } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { ghes_handle_aer(gdata); @@ -532,6 +561,8 @@ static void ghes_do_proc(struct ghes *ghes, gdata->error_data_length); } } + + return work_queued; } static void __ghes_print_estatus(const char *pfx, @@ -827,7 +858,9 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) struct ghes_estatus_node *estatus_node; struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; + bool task_work_pending; u32 len, node_len; + int ret; llnode = llist_del_all(&ghes_estatus_llist); /* @@ -842,14 +875,26 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) estatus = GHES_ESTATUS_FROM_NODE(estatus_node); len = cper_estatus_len(estatus); node_len = GHES_ESTATUS_NODE_LEN(len); - ghes_do_proc(estatus_node->ghes, estatus); + task_work_pending = ghes_do_proc(estatus_node->ghes, estatus); if (!ghes_estatus_cached(estatus)) { generic = estatus_node->generic; if (ghes_print_estatus(NULL, generic, estatus)) ghes_estatus_cache_add(generic, estatus); } - gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, - node_len); + + if (task_work_pending && current->mm != &init_mm) { + estatus_node->task_work.func = ghes_kick_task_work; + estatus_node->task_work_cpu = smp_processor_id(); + ret = task_work_add(current, &estatus_node->task_work, + true); + if (ret) + estatus_node->task_work.func = NULL; + } + + if (!estatus_node->task_work.func) + gen_pool_free(ghes_estatus_pool, + (unsigned long)estatus_node, node_len); + llnode = next; } } @@ -909,6 +954,7 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, estatus_node->ghes = ghes; estatus_node->generic = ghes->generic; + estatus_node->task_work.func = NULL; estatus = GHES_ESTATUS_FROM_NODE(estatus_node); if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) { diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 267bdbf6a7bf..7597e82a0b49 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -141,8 +141,10 @@ static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void { int *count = data; - if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR || - hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2) + if ((hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR || + hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2) && + (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || + !(hest_hdr->source_id &0xF000))) (*count)++; return 0; } @@ -153,8 +155,10 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) struct ghes_arr *ghes_arr = data; int rc, i; - if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR && - hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2) + if ((hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR && + hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2) || + ((hest_hdr->source_id & 0xF000) && + boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) return 0; if (!((struct acpi_hest_generic *)hest_hdr)->enabled) diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c index 01962c63a711..7d7497b85602 100644 --- a/drivers/acpi/arm64/gtdt.c +++ b/drivers/acpi/arm64/gtdt.c @@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, int index) { struct platform_device *pdev; - int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); + int irq; /* * According to SBSA specification the size of refresh and control @@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, struct resource res[] = { DEFINE_RES_MEM(wd->control_frame_address, SZ_4K), DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K), - DEFINE_RES_IRQ(irq), + {}, }; int nr_res = ARRAY_SIZE(res); @@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, if (!(wd->refresh_frame_address && wd->control_frame_address)) { pr_err(FW_BUG "failed to get the Watchdog base address.\n"); - acpi_unregister_gsi(wd->timer_interrupt); return -EINVAL; } + irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); + res[2] = (struct resource)DEFINE_RES_IRQ(irq); if (irq <= 0) { pr_warn("failed to map the Watchdog interrupt.\n"); nr_res--; @@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, */ pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res); if (IS_ERR(pdev)) { - acpi_unregister_gsi(wd->timer_interrupt); + if (irq > 0) + acpi_unregister_gsi(wd->timer_interrupt); return PTR_ERR(pdev); } diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 5a7551d060f2..bc95a5eebd13 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -361,6 +361,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, static int iort_get_id_mapping_index(struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; + struct acpi_iort_pmcg *pmcg; switch (node->type) { case ACPI_IORT_NODE_SMMU_V3: @@ -388,6 +389,10 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) return smmu->id_mapping_index; case ACPI_IORT_NODE_PMCG: + pmcg = (struct acpi_iort_pmcg *)node->node_data; + if (pmcg->overflow_gsiv || node->mapping_count == 0) + return -EINVAL; + return 0; default: return -EINVAL; diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 985afc62da82..ee7a7da276dd 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -85,7 +85,18 @@ static const struct dmi_system_id lid_blacklst[] = { */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), - DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"), + DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, + { + /* + * Medion Akoya E2228T, notification of the LID device only + * happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"), }, .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, }, @@ -136,6 +147,7 @@ struct acpi_button { int last_state; ktime_t last_time; bool suspended; + bool lid_state_initialized; }; static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); @@ -391,6 +403,8 @@ static int acpi_lid_update_state(struct acpi_device *device, static void acpi_lid_initialize_state(struct acpi_device *device) { + struct acpi_button *button = acpi_driver_data(device); + switch (lid_init_state) { case ACPI_BUTTON_LID_INIT_OPEN: (void)acpi_lid_notify_state(device, 1); @@ -402,13 +416,14 @@ static void acpi_lid_initialize_state(struct acpi_device *device) default: break; } + + button->lid_state_initialized = true; } static void acpi_button_notify(struct acpi_device *device, u32 event) { struct acpi_button *button = acpi_driver_data(device); struct input_dev *input; - int users; switch (event) { case ACPI_FIXED_HARDWARE_EVENT: @@ -417,10 +432,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) case ACPI_BUTTON_NOTIFY_STATUS: input = button->input; if (button->type == ACPI_BUTTON_TYPE_LID) { - mutex_lock(&button->input->mutex); - users = button->input->users; - mutex_unlock(&button->input->mutex); - if (users) + if (button->lid_state_initialized) acpi_lid_update_state(device, true); } else { int keycode; @@ -465,7 +477,7 @@ static int acpi_button_resume(struct device *dev) struct acpi_button *button = acpi_driver_data(device); button->suspended = false; - if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) { + if (button->type == ACPI_BUTTON_TYPE_LID) { button->last_state = !!acpi_lid_evaluate_state(device); button->last_time = ktime_get(); acpi_lid_initialize_state(device); diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index a1a858ad4d18..2826bd45c61a 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -118,23 +118,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); */ #define NUM_RETRIES 500ULL -struct cppc_attr { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, - struct attribute *attr, char *buf); - ssize_t (*store)(struct kobject *kobj, - struct attribute *attr, const char *c, ssize_t count); -}; - #define define_one_cppc_ro(_name) \ -static struct cppc_attr _name = \ +static struct kobj_attribute _name = \ __ATTR(_name, 0444, show_##_name, NULL) #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) #define show_cppc_data(access_fn, struct_name, member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ - struct attribute *attr, char *buf) \ + struct kobj_attribute *attr, char *buf) \ { \ struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ struct struct_name st_name = {0}; \ @@ -160,7 +152,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); static ssize_t show_feedback_ctrs(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); struct cppc_perf_fb_ctrs fb_ctrs = {0}; @@ -865,6 +857,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) "acpi_cppc"); if (ret) { per_cpu(cpc_desc_ptr, pr->id) = NULL; + kobject_put(&cpc_ptr->kobj); goto out_free; } diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index b097ef209313..e7716c7b9be7 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -44,6 +44,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, sizeof(struct acpi_table_header))) return -EFAULT; uncopied_bytes = max_size = table.length; + /* make sure the buf is not allocated */ + kfree(buf); buf = kzalloc(max_size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -57,6 +59,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, (*ppos + count < count) || (count > uncopied_bytes)) { kfree(buf); + buf = NULL; return -EINVAL; } @@ -78,7 +81,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); } - kfree(buf); return count; } diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 5e4a8860a9c0..72e6fad39a5e 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) * possibly drop references to the power resources in use. */ state = ACPI_STATE_D3_HOT; - /* If _PR3 is not available, use D3hot as the target state. */ + /* If D3cold is not supported, use D3hot as the target state. */ if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) target_state = state; } else if (!device->power.states[state].flags.valid) { @@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state) end: if (result) { dev_warn(&device->dev, "Failed to change power state to %s\n", - acpi_power_state_string(state)); + acpi_power_state_string(target_state)); } else { device->power.state = target_state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] transitioned to %s\n", device->pnp.bus_id, - acpi_power_state_string(state))); + acpi_power_state_string(target_state))); } return result; @@ -749,7 +749,7 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context) static DEFINE_MUTEX(acpi_wakeup_lock); static int __acpi_device_wakeup_enable(struct acpi_device *adev, - u32 target_state, int max_count) + u32 target_state) { struct acpi_device_wakeup *wakeup = &adev->wakeup; acpi_status status; @@ -757,9 +757,10 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev, mutex_lock(&acpi_wakeup_lock); - if (wakeup->enable_count >= max_count) + if (wakeup->enable_count >= INT_MAX) { + acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n"); goto out; - + } if (wakeup->enable_count > 0) goto inc; @@ -799,7 +800,7 @@ out: */ static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state) { - return __acpi_device_wakeup_enable(adev, target_state, 1); + return __acpi_device_wakeup_enable(adev, target_state); } /** @@ -829,8 +830,12 @@ out: mutex_unlock(&acpi_wakeup_lock); } -static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable, - int max_count) +/** + * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. + * @dev: Device to enable/disable to generate wakeup events. + * @enable: Whether to enable or disable the wakeup functionality. + */ +int acpi_pm_set_device_wakeup(struct device *dev, bool enable) { struct acpi_device *adev; int error; @@ -850,36 +855,14 @@ static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable, return 0; } - error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(), - max_count); + error = __acpi_device_wakeup_enable(adev, acpi_target_system_state()); if (!error) dev_dbg(dev, "Wakeup enabled by ACPI\n"); return error; } - -/** - * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device. - * @dev: Device to enable/disable to generate wakeup events. - * @enable: Whether to enable or disable the wakeup functionality. - */ -int acpi_pm_set_device_wakeup(struct device *dev, bool enable) -{ - return __acpi_pm_set_device_wakeup(dev, enable, 1); -} EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup); -/** - * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge. - * @dev: Bridge device to enable/disable to generate wakeup events. - * @enable: Whether to enable or disable the wakeup functionality. - */ -int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) -{ - return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX); -} -EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup); - /** * acpi_dev_pm_low_power - Put ACPI device into a low-power state. * @dev: Device to put into a low-power state. diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 96869f1538b9..bfca116482b8 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; - len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); - if (len < 0) - return len; - - env->buflen += len; - if (!adev->data.of_compatible) - return 0; - - if (len > 0 && add_uevent_var(env, "MODALIAS=")) - return -ENOMEM; - - len = create_of_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); + if (adev->data.of_compatible) + len = create_of_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); + else + len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); if (len < 0) return len; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ca5cdb621c2a..c64001e789ed 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1043,29 +1043,21 @@ void acpi_ec_unblock_transactions(void) /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ -static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) @@ -1967,13 +1959,26 @@ bool acpi_ec_dispatch_gpe(void) u32 ret; if (!first_ec) - return false; + return acpi_any_gpe_status_set(U32_MAX); - ret = acpi_dispatch_gpe(NULL, first_ec->gpe); - if (ret == ACPI_INTERRUPT_HANDLED) { - pm_pr_dbg("EC GPE dispatched\n"); + /* + * Report wakeup if the status bit is set for any enabled GPE other + * than the EC one. + */ + if (acpi_any_gpe_status_set(first_ec->gpe)) return true; - } + + /* + * Dispatch the EC GPE in-band, but do not report wakeup in any case + * to allow the caller to process events properly after that. + */ + ret = acpi_dispatch_gpe(NULL, first_ec->gpe); + if (ret == ACPI_INTERRUPT_HANDLED) + pm_pr_dbg("EC GPE dispatched\n"); + + /* Flush the event and query workqueues. */ + acpi_ec_flush_work(); + return false; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c index aba0d0027586..9df6991635c2 100644 --- a/drivers/acpi/evged.c +++ b/drivers/acpi/evged.c @@ -79,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, struct resource r; struct acpi_resource_irq *p = &ares->data.irq; struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; + char ev_name[5]; + u8 trigger; if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) return AE_OK; @@ -87,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, dev_err(dev, "unable to parse IRQ resource\n"); return AE_ERROR; } - if (ares->type == ACPI_RESOURCE_TYPE_IRQ) + if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { gsi = p->interrupts[0]; - else + trigger = p->triggering; + } else { gsi = pext->interrupts[0]; + trigger = pext->triggering; + } irq = r.start; - if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { + switch (gsi) { + case 0 ... 255: + sprintf(ev_name, "_%c%02X", + trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); + + if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) + break; + /* fall through */ + default: + if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) + break; + dev_err(dev, "cannot locate _EVT method\n"); return AE_ERROR; } diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c index 8b0de8a3c647..0f1c939b7e90 100644 --- a/drivers/acpi/hmat/hmat.c +++ b/drivers/acpi/hmat/hmat.c @@ -403,7 +403,8 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n", p->flags, p->processor_PD, p->memory_PD); - if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) { + if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) || + hmat_revision > 1) { target = find_mem_target(p->memory_PD); if (!target) { pr_debug("HMAT: Memory Domain missing from SRAT\n"); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index afe6636f9ad3..159c422601bc 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -9,6 +9,8 @@ #ifndef _ACPI_INTERNAL_H_ #define _ACPI_INTERNAL_H_ +#include <linux/idr.h> + #define PREFIX "ACPI: " int early_acpi_osi_init(void); @@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context); extern struct list_head acpi_bus_id_list; +#define ACPI_MAX_DEVICE_INSTANCES 4096 + struct acpi_device_bus_id { - char bus_id[15]; - unsigned int instance_no; + const char *bus_id; + struct ida instance_ida; struct list_head node; }; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 14e68f202f81..9d78f29cf996 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -360,7 +360,7 @@ static union acpi_object *acpi_label_info(acpi_handle handle) static u8 nfit_dsm_revid(unsigned family, unsigned func) { - static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { + static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { [NVDIMM_FAMILY_INTEL] = { [NVDIMM_INTEL_GET_MODES] = 2, [NVDIMM_INTEL_GET_FWINFO] = 2, @@ -386,7 +386,7 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) if (family > NVDIMM_FAMILY_MAX) return 0; - if (func > 31) + if (func > NVDIMM_CMD_MAX) return 0; id = revid_table[family][func]; if (id == 0) @@ -492,7 +492,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, * Check for a valid command. For ND_CMD_CALL, we also have to * make sure that the DSM function is supported. */ - if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) + if (cmd == ND_CMD_CALL && + (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask))) return -ENOTTY; else if (!test_bit(cmd, &cmd_mask)) return -ENOTTY; @@ -1552,7 +1553,7 @@ static ssize_t format1_show(struct device *dev, le16_to_cpu(nfit_dcr->dcr->code)); break; } - if (rc != ENXIO) + if (rc != -ENXIO) break; } mutex_unlock(&acpi_desc->init_mutex); @@ -3499,7 +3500,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, if (nvdimm && cmd == ND_CMD_CALL && call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { func = call_pkg->nd_command; - if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) + if (func > NVDIMM_CMD_MAX || + (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) return -EOPNOTSUPP; } diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index f0ae48515b48..ee8d9973f60b 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -76,6 +76,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, */ acpi_nfit_ars_rescan(acpi_desc, 0); } + mce->kflags |= MCE_HANDLED_NFIT; break; } diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 24241941181c..b317f4043705 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -34,6 +34,7 @@ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV +#define NVDIMM_CMD_MAX 31 #define NVDIMM_STANDARD_CMDMASK \ (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index eadbf90e65d1..85e01752fbe4 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -31,7 +31,7 @@ int acpi_numa __initdata; int pxm_to_node(int pxm) { - if (pxm < 0) + if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) return NUMA_NO_NODE; return pxm_to_node_map[pxm]; } diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 6b347d9920cc..715a382d92df 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -142,6 +142,26 @@ static struct mcfg_fixup mcfg_quirks[] = { XGENE_V2_ECAM_MCFG(4, 0), XGENE_V2_ECAM_MCFG(4, 1), XGENE_V2_ECAM_MCFG(4, 2), + +#define AMPERE_ECAM32(rev, seg) \ + { "Ampere", "Altra ", rev, seg, MCFG_BUS_ANY, &pci_32b_read_ops } + + AMPERE_ECAM32(1, 0), + AMPERE_ECAM32(1, 1), + AMPERE_ECAM32(1, 2), + AMPERE_ECAM32(1, 3), + AMPERE_ECAM32(1, 4), + AMPERE_ECAM32(1, 5), + AMPERE_ECAM32(1, 6), + AMPERE_ECAM32(1, 7), + AMPERE_ECAM32(1, 8), + AMPERE_ECAM32(1, 9), + AMPERE_ECAM32(1, 10), + AMPERE_ECAM32(1, 11), + AMPERE_ECAM32(1, 12), + AMPERE_ECAM32(1, 13), + AMPERE_ECAM32(1, 14), + AMPERE_ECAM32(1, 15), }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index ed56c6d20b08..a1b2f3ec49e7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr) static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { - acpi_status status; - u64 count; - int current_count; - int i, ret = 0; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *cst; + int ret; if (nocst) return -ENODEV; - current_count = 0; + ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); + if (ret) + return ret; - status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return -ENODEV; - } + /* + * It is expected that there will be at least 2 states, C1 and + * something else (C2 or C3), so fail if that is not the case. + */ + if (pr->power.count < 2) + return -EFAULT; - cst = buffer.pointer; - - /* There must be at least 2 elements */ - if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - pr_err("not enough elements in _CST\n"); - ret = -EFAULT; - goto end; - } - - count = cst->package.elements[0].integer.value; - - /* Validate number of power states. */ - if (count < 1 || count != cst->package.count - 1) { - pr_err("count given by _CST is not valid\n"); - ret = -EFAULT; - goto end; - } - - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; - - for (i = 1; i <= count; i++) { - union acpi_object *element; - union acpi_object *obj; - struct acpi_power_register *reg; - struct acpi_processor_cx cx; - - memset(&cx, 0, sizeof(cx)); - - element = &(cst->package.elements[i]); - if (element->type != ACPI_TYPE_PACKAGE) - continue; - - if (element->package.count != 4) - continue; - - obj = &(element->package.elements[0]); - - if (obj->type != ACPI_TYPE_BUFFER) - continue; - - reg = (struct acpi_power_register *)obj->buffer.pointer; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && - (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) - continue; - - /* There should be an easy way to extract an integer... */ - obj = &(element->package.elements[1]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.type = obj->integer.value; - /* - * Some buggy BIOSes won't list C1 in _CST - - * Let acpi_processor_get_power_info_default() handle them later - */ - if (i == 1 && cx.type != ACPI_STATE_C1) - current_count++; - - cx.address = reg->address; - cx.index = current_count + 1; - - cx.entry_method = ACPI_CSTATE_SYSTEMIO; - if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (acpi_processor_ffh_cstate_probe - (pr->id, &cx, reg) == 0) { - cx.entry_method = ACPI_CSTATE_FFH; - } else if (cx.type == ACPI_STATE_C1) { - /* - * C1 is a special case where FIXED_HARDWARE - * can be handled in non-MWAIT way as well. - * In that case, save this _CST entry info. - * Otherwise, ignore this info and continue. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } else { - continue; - } - if (cx.type == ACPI_STATE_C1 && - (boot_option_idle_override == IDLE_NOMWAIT)) { - /* - * In most cases the C1 space_id obtained from - * _CST object is FIXED_HARDWARE access mode. - * But when the option of idle=halt is added, - * the entry_method type should be changed from - * CSTATE_FFH to CSTATE_HALT. - * When the option of idle=nomwait is added, - * the C1 entry_method type should be - * CSTATE_HALT. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } - } else { - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", - cx.address); - } - - if (cx.type == ACPI_STATE_C1) { - cx.valid = 1; - } - - obj = &(element->package.elements[2]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.latency = obj->integer.value; - - obj = &(element->package.elements[3]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - current_count++; - memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - - /* - * We support total ACPI_PROCESSOR_MAX_POWER - 1 - * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) - */ - if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { - pr_warn("Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - break; - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - current_count)); - - /* Validate number of power states discovered */ - if (current_count < 2) - ret = -EFAULT; - - end: - kfree(buffer.pointer); - - return ret; + return 0; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, @@ -900,7 +760,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) static inline void acpi_processor_cstate_first_run_checks(void) { - acpi_status status; static int first_run; if (first_run) @@ -912,13 +771,10 @@ static inline void acpi_processor_cstate_first_run_checks(void) max_cstate); first_run++; - if (acpi_gbl_FADT.cst_control && !nocst) { - status = acpi_os_write_port(acpi_gbl_FADT.smi_command, - acpi_gbl_FADT.cst_control, 8); - if (ACPI_FAILURE(status)) - ACPI_EXCEPTION((AE_INFO, status, - "Notifying BIOS of _CST ability failed")); - } + if (nocst) + return; + + acpi_processor_claim_cst_control(); } #else diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 5909e8fa4013..b9950a4bf984 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -80,6 +80,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) pr->performance_platform_limit = (int)ppc; + if (!pr->performance) + return -EINVAL; + if (ppc >= pr->performance->state_count || unlikely(!freq_qos_request_active(&pr->perflib_req))) return 0; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 532a1ae3595a..a0bd56ece3ff 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -897,13 +897,6 @@ static long __acpi_processor_get_throttling(void *data) return pr->throttling.acpi_processor_get_throttling(pr); } -static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) -{ - if (direct || (is_percpu_thread() && cpu == smp_processor_id())) - return fn(arg); - return work_on_cpu(cpu, fn, arg); -} - static int acpi_processor_get_throttling(struct acpi_processor *pr) { if (!pr) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 3eacf474e1e3..a08e3eb2a6f9 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -794,9 +794,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data, const union acpi_object *obj; int ret; - if (!val) - return -EINVAL; - if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) { ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj); if (ret) @@ -806,28 +803,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data, case DEV_PROP_U8: if (obj->integer.value > U8_MAX) return -EOVERFLOW; - *(u8 *)val = obj->integer.value; + + if (val) + *(u8 *)val = obj->integer.value; + break; case DEV_PROP_U16: if (obj->integer.value > U16_MAX) return -EOVERFLOW; - *(u16 *)val = obj->integer.value; + + if (val) + *(u16 *)val = obj->integer.value; + break; case DEV_PROP_U32: if (obj->integer.value > U32_MAX) return -EOVERFLOW; - *(u32 *)val = obj->integer.value; + + if (val) + *(u32 *)val = obj->integer.value; + break; default: - *(u64 *)val = obj->integer.value; + if (val) + *(u64 *)val = obj->integer.value; + break; } + + if (!val) + return 1; } else if (proptype == DEV_PROP_STRING) { ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj); if (ret) return ret; - *(char **)val = obj->string.pointer; + if (val) + *(char **)val = obj->string.pointer; return 1; } else { @@ -841,7 +853,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, { int ret; - if (!adev) + if (!adev || !val) return -EINVAL; ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val); @@ -935,10 +947,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, const union acpi_object *items; int ret; - if (val && nval == 1) { + if (nval == 1 || !val) { ret = acpi_data_prop_read_single(data, propname, proptype, val); - if (ret >= 0) + /* + * The overflow error means that the property is there and it is + * single-value, but its type does not match, so return. + */ + if (ret >= 0 || ret == -EOVERFLOW) return ret; + + /* + * Reading this property as a single-value one failed, but its + * value may still be represented as one-element array, so + * continue. + */ } ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 2a3e392751e0..48ca9a844f06 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -541,7 +541,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, ret = c->preproc(ares, c->preproc_data); if (ret < 0) { c->error = ret; - return AE_CTRL_TERMINATE; + return AE_ABORT_METHOD; } else if (ret > 0) { return AE_OK; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 915650bf519f..dbb5919f23e2 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -483,10 +483,10 @@ static void acpi_device_del(struct acpi_device *device) list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) if (!strcmp(acpi_device_bus_id->bus_id, acpi_device_hid(device))) { - if (acpi_device_bus_id->instance_no > 0) - acpi_device_bus_id->instance_no--; - else { + ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); + if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { list_del(&acpi_device_bus_id->node); + kfree_const(acpi_device_bus_id->bus_id); kfree(acpi_device_bus_id); } break; @@ -586,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, if (!device) return -EINVAL; + *device = NULL; + status = acpi_get_data_full(handle, acpi_scan_drop_device, (void **)device, callback); if (ACPI_FAILURE(status) || !*device) { @@ -621,12 +623,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev) put_device(&adev->dev); } +static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) +{ + struct acpi_device_bus_id *acpi_device_bus_id; + + /* Find suitable bus_id and instance number in acpi_bus_id_list. */ + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { + if (!strcmp(acpi_device_bus_id->bus_id, dev_id)) + return acpi_device_bus_id; + } + return NULL; +} + +static int acpi_device_set_name(struct acpi_device *device, + struct acpi_device_bus_id *acpi_device_bus_id) +{ + struct ida *instance_ida = &acpi_device_bus_id->instance_ida; + int result; + + result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); + if (result < 0) + return result; + + device->pnp.instance_no = result; + dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); + return 0; +} + int acpi_device_add(struct acpi_device *device, void (*release)(struct device *)) { + struct acpi_device_bus_id *acpi_device_bus_id; int result; - struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; - int found = 0; if (device->handle) { acpi_status status; @@ -652,34 +680,38 @@ int acpi_device_add(struct acpi_device *device, INIT_LIST_HEAD(&device->del_list); mutex_init(&device->physical_node_lock); - new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); - if (!new_bus_id) { - pr_err(PREFIX "Memory allocation error\n"); - result = -ENOMEM; - goto err_detach; - } - mutex_lock(&acpi_device_lock); - /* - * Find suitable bus_id and instance number in acpi_bus_id_list - * If failed, create one and link it into acpi_bus_id_list - */ - list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { - if (!strcmp(acpi_device_bus_id->bus_id, - acpi_device_hid(device))) { - acpi_device_bus_id->instance_no++; - found = 1; - kfree(new_bus_id); - break; + + acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); + if (acpi_device_bus_id) { + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) + goto err_unlock; + } else { + acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), + GFP_KERNEL); + if (!acpi_device_bus_id) { + result = -ENOMEM; + goto err_unlock; } - } - if (!found) { - acpi_device_bus_id = new_bus_id; - strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device)); - acpi_device_bus_id->instance_no = 0; + acpi_device_bus_id->bus_id = + kstrdup_const(acpi_device_hid(device), GFP_KERNEL); + if (!acpi_device_bus_id->bus_id) { + kfree(acpi_device_bus_id); + result = -ENOMEM; + goto err_unlock; + } + + ida_init(&acpi_device_bus_id->instance_ida); + + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) { + kfree(acpi_device_bus_id); + goto err_unlock; + } + list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); } - dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); if (device->parent) list_add_tail(&device->node, &device->parent->children); @@ -710,9 +742,10 @@ int acpi_device_add(struct acpi_device *device, if (device->parent) list_del(&device->node); list_del(&device->wakeup_list); + + err_unlock: mutex_unlock(&acpi_device_lock); - err_detach: acpi_detach_data(device->handle, acpi_scan_drop_device); return result; } @@ -919,12 +952,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) if (buffer.length && package && package->type == ACPI_TYPE_PACKAGE - && package->package.count) { - int err = acpi_extract_power_resources(package, 0, - &ps->resources); - if (!err) - device->power.flags.power_resources = 1; - } + && package->package.count) + acpi_extract_power_resources(package, 0, &ps->resources); + ACPI_FREE(buffer.pointer); } @@ -971,14 +1001,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_init_power_state(device, i); INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); - if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - /* Set defaults for D0 and D3hot states (always valid) */ + /* Set the defaults for D0 and D3hot (always supported). */ device->power.states[ACPI_STATE_D0].flags.valid = 1; device->power.states[ACPI_STATE_D0].power = 100; device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + /* + * Use power resources only if the D0 list of them is populated, because + * some platforms may provide _PR3 only to indicate D3cold support and + * in those cases the power resources list returned by it may be bogus. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { + device->power.flags.power_resources = 1; + /* + * D3cold is supported if the D3hot list of power resources is + * not empty. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) + device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + } + if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index abd39cc5ff88..d1b74179d217 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -977,16 +977,6 @@ static int acpi_s2idle_prepare_late(void) return 0; } -static void acpi_s2idle_sync(void) -{ - /* - * The EC driver uses the system workqueue and an additional special - * one, so those need to be flushed too. - */ - acpi_ec_flush_work(); - acpi_os_wait_events_complete(); /* synchronize Notify handling */ -} - static bool acpi_s2idle_wake(void) { if (!acpi_sci_irq_valid()) @@ -1013,21 +1003,12 @@ static bool acpi_s2idle_wake(void) if (acpi_check_wakeup_handlers()) return true; - /* - * If there are no EC events to process and at least one of the - * other enabled GPEs is active, the wakeup is regarded as a - * genuine one. - * - * Note that the checks below must be carried out in this order - * to avoid returning prematurely due to a change of the EC GPE - * status bit from unset to set between the checks with the - * status bits of all the other GPEs unset. - */ - if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe()) + /* Check non-EC GPE wakeups and dispatch the EC GPE. */ + if (acpi_ec_dispatch_gpe()) return true; /* - * Cancel the wakeup and process all pending events in case + * Cancel the SCI wakeup and process all pending events in case * there are any wakeup ones in there. * * Note that if any non-EC GPEs are active at this point, the @@ -1035,8 +1016,7 @@ static bool acpi_s2idle_wake(void) * should be missed by canceling the wakeup here. */ pm_system_cancel_wakeup(); - - acpi_s2idle_sync(); + acpi_os_wait_events_complete(); /* * The SCI is in the "suspended" state now and it cannot produce @@ -1069,7 +1049,8 @@ static void acpi_s2idle_restore(void) * of GPEs. */ acpi_os_wait_events_complete(); /* synchronize GPE processing */ - acpi_s2idle_sync(); + acpi_ec_flush_work(); /* flush the EC driver's workqueues */ + acpi_os_wait_events_complete(); /* synchronize Notify handling */ s2idle_wakeup = false; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index c60d2c6d31d6..76c668c05fa0 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void) } static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = +static const struct kobj_attribute pm_profile_attr = __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); static ssize_t hotplug_enabled_show(struct kobject *kobj, @@ -993,8 +993,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, error = kobject_init_and_add(&hotplug->kobj, &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); - if (error) + if (error) { + kobject_put(&hotplug->kobj); goto err_out; + } kobject_uevent(&hotplug->kobj, KOBJ_ADD); return; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 180ac4329763..b2cafa37df7c 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -791,7 +791,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, } /* - * acpi_table_init() + * acpi_locate_initial_tables() * * find RSDP, find and checksum SDT/XSDT. * checksum all tables, print SDT/XSDT @@ -799,7 +799,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, * result: sdt_entry[] is initialized */ -int __init acpi_table_init(void) +int __init acpi_locate_initial_tables(void) { acpi_status status; @@ -814,9 +814,45 @@ int __init acpi_table_init(void) status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); if (ACPI_FAILURE(status)) return -EINVAL; - acpi_table_initrd_scan(); + return 0; +} + +void __init acpi_reserve_initial_tables(void) +{ + int i; + + for (i = 0; i < ACPI_MAX_TABLES; i++) { + struct acpi_table_desc *table_desc = &initial_tables[i]; + u64 start = table_desc->address; + u64 size = table_desc->length; + + if (!start || !size) + break; + + pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n", + table_desc->signature.ascii, start, start + size - 1); + + memblock_reserve(start, size); + } +} + +void __init acpi_table_init_complete(void) +{ + acpi_table_initrd_scan(); check_multiple_madt(); +} + +int __init acpi_table_init(void) +{ + int ret; + + ret = acpi_locate_initial_tables(); + if (ret) + return ret; + + acpi_table_init_complete(); + return 0; } diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index d831a61e0010..383c7029d3ce 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -174,6 +174,8 @@ struct acpi_thermal { int tz_enabled; int kelvin_offset; struct work_struct thermal_check_work; + struct mutex thermal_check_lock; + refcount_t thermal_check_count; }; /* -------------------------------------------------------------------------- @@ -494,17 +496,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) return 0; } -static void acpi_thermal_check(void *data) -{ - struct acpi_thermal *tz = data; - - if (!tz->tz_enabled) - return; - - thermal_zone_device_update(tz->thermal_zone, - THERMAL_EVENT_UNSPECIFIED); -} - /* sys I/F for generic thermal sysfs support */ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) @@ -538,6 +529,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal, return 0; } +static void acpi_thermal_check_fn(struct work_struct *work); + static int thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { @@ -563,7 +556,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal, ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s kernel ACPI thermal control\n", tz->tz_enabled ? "Enable" : "Disable")); - acpi_thermal_check(tz); + acpi_thermal_check_fn(&tz->thermal_check_work); } return 0; } @@ -932,6 +925,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) Driver Interface -------------------------------------------------------------------------- */ +static void acpi_queue_thermal_check(struct acpi_thermal *tz) +{ + if (!work_pending(&tz->thermal_check_work)) + queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); +} + static void acpi_thermal_notify(struct acpi_device *device, u32 event) { struct acpi_thermal *tz = acpi_driver_data(device); @@ -942,17 +941,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) switch (event) { case ACPI_THERMAL_NOTIFY_TEMPERATURE: - acpi_thermal_check(tz); + acpi_queue_thermal_check(tz); break; case ACPI_THERMAL_NOTIFY_THRESHOLDS: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); - acpi_thermal_check(tz); + acpi_queue_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; case ACPI_THERMAL_NOTIFY_DEVICES: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); - acpi_thermal_check(tz); + acpi_queue_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; @@ -1052,7 +1051,27 @@ static void acpi_thermal_check_fn(struct work_struct *work) { struct acpi_thermal *tz = container_of(work, struct acpi_thermal, thermal_check_work); - acpi_thermal_check(tz); + + if (!tz->tz_enabled) + return; + /* + * In general, it is not sufficient to check the pending bit, because + * subsequent instances of this function may be queued after one of them + * has started running (e.g. if _TMP sleeps). Avoid bailing out if just + * one of them is running, though, because it may have done the actual + * check some time ago, so allow at least one of them to block on the + * mutex while another one is running the update. + */ + if (!refcount_dec_not_one(&tz->thermal_check_count)) + return; + + mutex_lock(&tz->thermal_check_lock); + + thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); + + refcount_inc(&tz->thermal_check_count); + + mutex_unlock(&tz->thermal_check_lock); } static int acpi_thermal_add(struct acpi_device *device) @@ -1084,6 +1103,8 @@ static int acpi_thermal_add(struct acpi_device *device) if (result) goto free_memory; + refcount_set(&tz->thermal_check_count, 3); + mutex_init(&tz->thermal_check_lock); INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), @@ -1149,7 +1170,7 @@ static int acpi_thermal_resume(struct device *dev) tz->state.active |= tz->trips.active[i].flags.enabled; } - queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); + acpi_queue_thermal_check(tz); return AE_OK; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index e63fd7bfd3a5..e7978d983b26 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -143,6 +143,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_vendor, + .ident = "GIGABYTE GB-BXBT-2807", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"), + }, + }, + { + .callback = video_detect_force_vendor, .ident = "Sony VPCEH3U1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), @@ -282,6 +290,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"), }, }, + /* https://bugs.launchpad.net/bugs/1894667 */ + { + .callback = video_detect_force_video, + .ident = "HP 635 Notebook", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"), + }, + }, /* Non win8 machines which need native backlight nevertheless */ { @@ -336,6 +353,25 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + { + .callback = video_detect_force_native, + .ident = "Acer Aspire 5738z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + DMI_MATCH(DMI_BOARD_NAME, "JV50"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */ + .callback = video_detect_force_native, + .ident = "Acer TravelMate 5735Z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"), + DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index fe1523664816..af58768a0393 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -299,10 +299,11 @@ static int amba_remove(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *drv = to_amba_driver(dev->driver); - int ret; + int ret = 0; pm_runtime_get_sync(dev); - ret = drv->remove(pcdev); + if (drv->remove) + ret = drv->remove(pcdev); pm_runtime_put_noidle(dev); /* Undo the runtime PM settings in amba_probe() */ @@ -319,7 +320,9 @@ static int amba_remove(struct device *dev) static void amba_shutdown(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); - drv->shutdown(to_amba_device(dev)); + + if (drv->shutdown) + drv->shutdown(to_amba_device(dev)); } /** @@ -332,12 +335,13 @@ static void amba_shutdown(struct device *dev) */ int amba_driver_register(struct amba_driver *drv) { - drv->drv.bus = &amba_bustype; + if (!drv->probe) + return -EINVAL; -#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn - SETFN(probe); - SETFN(remove); - SETFN(shutdown); + drv->drv.bus = &amba_bustype; + drv->drv.probe = amba_probe; + drv->drv.remove = amba_remove; + drv->drv.shutdown = amba_shutdown; return driver_register(&drv->drv); } diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 34a6de65aa7e..89b590c9573f 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -227,7 +227,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_work { struct list_head entry; - enum { + enum binder_work_type { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, BINDER_WORK_RETURN_ERROR, @@ -889,27 +889,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked( return w; } -/** - * binder_dequeue_work_head() - Dequeues the item at head of list - * @proc: binder_proc associated with list - * @list: list to dequeue head - * - * Removes the head of the list if there are items on the list - * - * Return: pointer dequeued binder_work, NULL if list was empty - */ -static struct binder_work *binder_dequeue_work_head( - struct binder_proc *proc, - struct list_head *list) -{ - struct binder_work *w; - - binder_inner_proc_lock(proc); - w = binder_dequeue_work_head_ilocked(list); - binder_inner_proc_unlock(proc); - return w; -} - static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); static void binder_free_thread(struct binder_thread *thread); @@ -2347,8 +2326,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * file is done when the transaction is torn * down. */ - WARN_ON(failed_at && - proc->tsk == current->group_leader); } break; case BINDER_TYPE_PTR: /* @@ -2984,6 +2961,12 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_binder; } e->to_node = target_node->debug_id; + if (WARN_ON(proc == target_proc)) { + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; @@ -3167,6 +3150,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; + t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); if (binder_alloc_copy_user_to_buffer( @@ -3637,10 +3621,17 @@ static int binder_thread_write(struct binder_proc *proc, struct binder_node *ctx_mgr_node; mutex_lock(&context->context_mgr_node_lock); ctx_mgr_node = context->binder_context_mgr_node; - if (ctx_mgr_node) + if (ctx_mgr_node) { + if (ctx_mgr_node->proc == proc) { + binder_user_error("%d:%d context manager tried to acquire desc 0\n", + proc->pid, thread->pid); + mutex_unlock(&context->context_mgr_node_lock); + return -EINVAL; + } ret = binder_inc_ref_for_node( proc, ctx_mgr_node, strong, NULL, &rdata); + } mutex_unlock(&context->context_mgr_node_lock); } if (ret) @@ -4578,13 +4569,17 @@ static void binder_release_work(struct binder_proc *proc, struct list_head *list) { struct binder_work *w; + enum binder_work_type wtype; while (1) { - w = binder_dequeue_work_head(proc, list); + binder_inner_proc_lock(proc); + w = binder_dequeue_work_head_ilocked(list); + wtype = w ? w->type : 0; + binder_inner_proc_unlock(proc); if (!w) return; - switch (w->type) { + switch (wtype) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; @@ -4618,9 +4613,11 @@ static void binder_release_work(struct binder_proc *proc, kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; + case BINDER_WORK_NODE: + break; default: pr_err("unexpected work type, %d, not freed\n", - w->type); + wtype); break; } } @@ -4688,8 +4685,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) static void binder_free_proc(struct binder_proc *proc) { + struct binder_device *device; + BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(proc->context->name); + kfree(device); + } binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); binder_stats_deleted(BINDER_STAT_PROC); @@ -5408,7 +5412,6 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; - struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5425,12 +5428,6 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); - device = container_of(proc->context, struct binder_device, context); - if (refcount_dec_and_test(&device->ref)) { - kfree(context->name); - kfree(device); - } - proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 7067d5542a82..3526bb1488e5 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -647,6 +647,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } +static void binder_alloc_clear_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc @@ -657,6 +659,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { + /* + * We could eliminate the call to binder_alloc_clear_buf() + * from binder_alloc_deferred_release() by moving this to + * binder_alloc_free_buf_locked(). However, that could + * increase contention for the alloc mutex if clear_on_free + * is used frequently for large buffers. The mutex is not + * needed for correctness here. + */ + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } mutex_lock(&alloc->mutex); binder_free_buf_locked(alloc, buffer); mutex_unlock(&alloc->mutex); @@ -753,6 +767,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) /* Transaction should already have been freed */ BUG_ON(buffer->transaction); + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } binder_free_buf_locked(alloc, buffer); buffers++; } @@ -948,7 +966,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } up_read(&mm->mmap_sem); - mmput(mm); + mmput_async(mm); trace_binder_unmap_kernel_start(alloc, index); @@ -1086,6 +1104,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, return lru_page->page_ptr; } +/** + * binder_alloc_clear_buf() - zero out buffer + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be cleared + * + * memset the given buffer to 0 + */ +static void binder_alloc_clear_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + size_t bytes = binder_alloc_buffer_size(alloc, buffer); + binder_size_t buffer_offset = 0; + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + void *kptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + kptr = kmap(page) + pgoff; + memset(kptr, 0, size); + kunmap(page); + bytes -= size; + buffer_offset += size; + } +} + /** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index db9c1b984695..288d0f478aa3 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -23,6 +23,7 @@ struct binder_transaction; * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees * @free: %true if buffer is free + * @clear_on_free: %true if buffer must be zeroed after use * @allow_user_free: %true if user is allowed to free buffer * @async_transaction: %true if buffer is in use for an async txn * @debug_id: unique ID for debugging @@ -40,9 +41,10 @@ struct binder_buffer { struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; + unsigned clear_on_free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned debug_id:29; + unsigned debug_id:28; struct binder_transaction *transaction; diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 753985c01517..46dc54d18f0b 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -56,7 +56,7 @@ struct acard_sg { __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ }; -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc); static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int acard_ahci_port_start(struct ata_port *ap); static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) return si; } -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ahci_port_priv *pp = ap->private_data; @@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ahci_fill_cmd_slot(pp, qc->hw_tag, opts); + + return AC_ERR_OK; } static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index d33528033042..8beb418ce167 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1728,6 +1728,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; #ifdef CONFIG_ARM64 + if (pdev->vendor == PCI_VENDOR_ID_HUAWEI && + pdev->device == 0xa235 && + pdev->revision < 0x30) + hpriv->flags |= AHCI_HFLAG_NO_SXS; + if (pdev->vendor == 0x177d && pdev->device == 0xa01c) hpriv->irq_handler = ahci_thunderx_irq_handler; #endif diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 3dbf398c92ea..732912cd4e08 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -240,6 +240,9 @@ enum { as default lpm_policy */ AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during suspend/resume */ + AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP + from phy_power_on() */ + AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ /* ap->flags bits */ diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 66a570d0da83..067b55cc157e 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -361,6 +361,10 @@ static int brcm_ahci_resume(struct device *dev) if (ret) return ret; + ret = ahci_platform_enable_regulators(hpriv); + if (ret) + goto out_disable_clks; + brcm_sata_init(priv); brcm_sata_phys_enable(priv); brcm_sata_alpm_init(hpriv); @@ -390,6 +394,8 @@ out_disable_platform_phys: ahci_platform_disable_phys(hpriv); out_disable_phys: brcm_sata_phys_disable(priv); + ahci_platform_disable_regulators(hpriv); +out_disable_clks: ahci_platform_disable_clks(hpriv); return ret; } @@ -463,6 +469,10 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (ret) goto out_reset; + ret = ahci_platform_enable_regulators(hpriv); + if (ret) + goto out_disable_clks; + /* Must be first so as to configure endianness including that * of the standard AHCI register space. */ @@ -472,7 +482,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); if (!priv->port_mask) { ret = -ENODEV; - goto out_disable_clks; + goto out_disable_regulators; } /* Must be done before ahci_platform_enable_phys() */ @@ -497,6 +507,8 @@ out_disable_platform_phys: ahci_platform_disable_phys(hpriv); out_disable_phys: brcm_sata_phys_disable(priv); +out_disable_regulators: + ahci_platform_disable_regulators(hpriv); out_disable_clks: ahci_platform_disable_clks(hpriv); out_reset: diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index d4bba3ace45d..3ad46d26d9d5 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -227,7 +227,7 @@ static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = { static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = { .plat_config = ahci_mvebu_armada_3700_config, - .flags = AHCI_HFLAG_SUSPEND_PHYS, + .flags = AHCI_HFLAG_SUSPEND_PHYS | AHCI_HFLAG_IGN_NOTSUPP_POWER_ON, }; static const struct of_device_id ahci_mvebu_of_match[] = { diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index bff369d9a1a7..fec2e9754aed 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int ahci_port_start(struct ata_port *ap); static void ahci_port_stop(struct ata_port *ap); -static void ahci_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc); static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); static void ahci_freeze(struct ata_port *ap); static void ahci_thaw(struct ata_port *ap); @@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) cap |= HOST_CAP_ALPM; } + if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) { + dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n"); + cap &= ~HOST_CAP_SXS; + } + if (hpriv->force_port_map && port_map != hpriv->force_port_map) { dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", port_map, hpriv->force_port_map); @@ -1624,7 +1629,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) return sata_pmp_qc_defer_cmd_switch(qc); } -static void ahci_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ahci_port_priv *pp = ap->private_data; @@ -1660,6 +1665,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ahci_fill_cmd_slot(pp, qc->hw_tag, opts); + + return AC_ERR_OK; } static void ahci_fbs_dec_intr(struct ata_port *ap) diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 129556fcf6be..416cfbf2f1c2 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -59,7 +59,7 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) } rc = phy_power_on(hpriv->phys[i]); - if (rc) { + if (rc && !(rc == -EOPNOTSUPP && (hpriv->flags & AHCI_HFLAG_IGN_NOTSUPP_POWER_ON))) { phy_exit(hpriv->phys[i]); goto disable_phys; } @@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev, int i, irq, n_ports, rc; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { + if (irq < 0) { if (irq != -EPROBE_DEFER) dev_err(dev, "no irq\n"); return irq; } + if (!irq) + return -EINVAL; hpriv->irq = irq; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 581595b35573..f67b3fb33d57 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -41,7 +41,6 @@ #include <linux/workqueue.h> #include <linux/scatterlist.h> #include <linux/io.h> -#include <linux/async.h> #include <linux/log2.h> #include <linux/slab.h> #include <linux/glob.h> @@ -4475,9 +4474,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, @@ -4980,7 +4978,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) return ATA_DEFER_LINK; } -void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } +enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) +{ + return AC_ERR_OK; +} /** * ata_sg_init - Associate command with scatter-gather table. @@ -5467,7 +5468,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc) return; } - ap->ops->qc_prep(qc); + qc->err_mask |= ap->ops->qc_prep(qc); + if (unlikely(qc->err_mask)) + goto err; trace_ata_qc_issue(qc); qc->err_mask |= ap->ops->qc_issue(qc); if (unlikely(qc->err_mask)) @@ -6592,7 +6595,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) /* perform each probe asynchronously */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - async_schedule(async_port_probe, ap); + ap->cookie = async_schedule(async_port_probe, ap); } return 0; @@ -6732,11 +6735,11 @@ void ata_host_detach(struct ata_host *host) { int i; - /* Ensure ata_port probe has completed */ - async_synchronize_full(); - - for (i = 0; i < host->n_ports; i++) + for (i = 0; i < host->n_ports; i++) { + /* Ensure ata_port probe has completed */ + async_synchronize_cookie(host->ports[i]->cookie + 1); ata_port_detach(host->ports[i]); + } /* the host is dead now, dissociate ACPI */ ata_acpi_dissociate(host); diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 3ff14071617c..79f2aeeb482a 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -763,6 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, if (dev->flags & ATA_DFLAG_DETACH) { detach = 1; + rc = -ENODEV; goto fail; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 58e09ffe8b9c..464efedc778b 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2374,6 +2374,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2399,7 +2400,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } @@ -3978,12 +3984,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; const u8 *cdb = scmd->cmnd; - const u8 *p; u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; int len; u16 fp = (u16)-1; u8 bp = 0xff; + u8 buffer[64]; + const u8 *p = buffer; VPRINTK("ENTER\n"); @@ -4017,12 +4024,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) goto invalid_param_len; - p = page_address(sg_page(scsi_sglist(scmd))); - /* Move past header and block descriptors. */ if (len < hdr_len) goto invalid_param_len; + if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), + buffer, sizeof(buffer))) + goto invalid_param_len; + if (six_byte) bd_len = p[3]; else @@ -4553,22 +4562,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) */ shost->max_host_blocked = 1; - rc = scsi_add_host_with_dma(ap->scsi_host, - &ap->tdev, ap->host->dev); + rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); if (rc) - goto err_add; + goto err_alloc; } return 0; - err_add: - scsi_host_put(host->ports[i]->scsi_host); err_alloc: while (--i >= 0) { struct Scsi_Host *shost = host->ports[i]->scsi_host; + /* scsi_host_put() is in ata_devres_release() */ scsi_remove_host(shost); - scsi_host_put(shost); } return rc; } diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 4ed682da52ae..038db94216a9 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) * LOCKING: * spin_lock_irqsave(host lock) */ -void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) +enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; ata_bmdma_fill_sg(qc); + + return AC_ERR_OK; } EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); @@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); * LOCKING: * spin_lock_irqsave(host lock) */ -void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) +enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; ata_bmdma_fill_sg_dumb(qc); + + return AC_ERR_OK; } EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index ebecab8c3f36..7c1c399450f3 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -817,12 +817,19 @@ static int arasan_cf_probe(struct platform_device *pdev) else quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */ - /* if irq is 0, support only PIO */ - acdev->irq = platform_get_irq(pdev, 0); - if (acdev->irq) + /* + * If there's an error getting IRQ (or we do get IRQ0), + * support only PIO + */ + ret = platform_get_irq(pdev, 0); + if (ret > 0) { + acdev->irq = ret; irq_handler = arasan_cf_interrupt; - else + } else if (ret == -EPROBE_DEFER) { + return ret; + } else { quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA; + } acdev->pbase = res->start; acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index d1644a8ef9fa..abc0e87ca1a8 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev) return -ENOMEM; irq = platform_get_irq(pdev, 0); - if (irq) + if (irq > 0) irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); + else if (irq < 0) + return irq; + else + return -EINVAL; /* Setup expansion bus chip selects */ *data->cs0_cfg = data->cs0_bits; diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 57f2ec71cfc3..1bfd0154dad5 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap) return ATA_CBL_PATA40; } -static void pata_macio_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) { unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); struct ata_port *ap = qc->ap; @@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) __func__, qc, qc->flags, write, qc->dev->devno); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; table = (struct dbdma_cmd *) priv->dma_table_cpu; @@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) table->command = cpu_to_le16(DBDMA_STOP); dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); + + return AC_ERR_OK; } diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c index 4afcb8e63e21..41430f79663c 100644 --- a/drivers/ata/pata_pxa.c +++ b/drivers/ata/pata_pxa.c @@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d) /* * Prepare taskfile for submission. */ -static void pxa_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc) { struct pata_pxa_data *pd = qc->ap->private_data; struct dma_async_tx_descriptor *tx; enum dma_transfer_direction dir; if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, DMA_PREP_INTERRUPT); if (!tx) { ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); - return; + return AC_ERR_OK; } tx->callback = pxa_ata_dma_irq; tx->callback_param = pd; pd->dma_cookie = dmaengine_submit(tx); + + return AC_ERR_OK; } /* diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index cb490531b62e..5db55e1e2a61 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int adma_port_start(struct ata_port *ap); static void adma_port_stop(struct ata_port *ap); -static void adma_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); static int adma_check_atapi_dma(struct ata_queued_cmd *qc); static void adma_freeze(struct ata_port *ap); @@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) return i; } -static void adma_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) { struct adma_port_priv *pp = qc->ap->private_data; u8 *buf = pp->pkt; @@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) adma_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) - return; + return AC_ERR_OK; buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* reserved */ @@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) printk("%s\n", obuf); } #endif + return AC_ERR_OK; } static inline void adma_packet_start(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index ca6c706e9c25..d55ee244d693 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, return num_prde; } -static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct sata_fsl_port_priv *pp = ap->private_data; @@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", desc_info, ttl_dwords, num_prde); + + return AC_ERR_OK; } static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 7f99e23bff88..a6b76cc12a66 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) prd[-1].flags |= PRD_END; } -static void inic_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) { struct inic_port_priv *pp = qc->ap->private_data; struct inic_pkt *pkt = pp->pkt; @@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) inic_fill_sg(prd, qc); pp->cpb_tbl[0] = pp->pkt_dma; + + return AC_ERR_OK; } static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index bde695a32097..d1ed9679c717 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static int mv_qc_defer(struct ata_queued_cmd *qc); -static void mv_qc_prep(struct ata_queued_cmd *qc); -static void mv_qc_prep_iie(struct ata_queued_cmd *qc); +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); @@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; @@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) switch (tf->protocol) { case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) - return; + return AC_ERR_OK; /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: mv_rw_multi_errata_sata24(qc); - return; + return AC_ERR_OK; default: - return; + return AC_ERR_OK; } /* Fill in command request block @@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work. - * - * FIXME: modify libata to give qc_prep a return value and - * return error here. */ - BUG_ON(tf->command); - break; + ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, + tf->command); + return AC_ERR_INVALID; } mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); @@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; mv_fill_sg(qc); + + return AC_ERR_OK; } /** @@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_qc_prep_iie(struct ata_queued_cmd *qc) +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; @@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) - return; + return AC_ERR_OK; if (tf->command == ATA_CMD_DSM) - return; /* use bmdma for this */ + return AC_ERR_OK; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) @@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) ); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; mv_fill_sg(qc); + + return AC_ERR_OK; } /** @@ -4095,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev) n_ports = mv_platform_data->n_ports; irq = platform_get_irq(pdev, 0); } + if (irq < 0) + return irq; + if (!irq) + return -EINVAL; host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 7510303111fa..0514aa7e80e3 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); static int nv_adma_slave_config(struct scsi_device *sdev); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); -static void nv_adma_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); static void nv_adma_irq_clear(struct ata_port *ap); @@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap); static void nv_swncq_error_handler(struct ata_port *ap); static int nv_swncq_slave_config(struct scsi_device *sdev); static int nv_swncq_port_start(struct ata_port *ap); -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); @@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) return 1; } -static void nv_adma_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; @@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); ata_bmdma_qc_prep(qc); - return; + return AC_ERR_OK; } cpb->resp_flags = NV_CPB_RESP_DONE; @@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) cpb->ctl_flags = ctl_flags; wmb(); cpb->resp_flags = 0; + + return AC_ERR_OK; } static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) @@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap) return 0; } -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) { if (qc->tf.protocol != ATA_PROT_NCQ) { ata_bmdma_qc_prep(qc); - return; + return AC_ERR_OK; } if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; nv_swncq_fill_sg(qc); + + return AC_ERR_OK; } static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) @@ -2096,7 +2100,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap) pp->dhfis_bits &= ~done_mask; pp->dmafis_bits &= ~done_mask; pp->sdbfis_bits |= done_mask; - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); if (!ap->qc_active) { DPRINTK("over\n"); diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 5fd464765ddc..c451d7d1c817 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int pdc_common_port_start(struct ata_port *ap); static int pdc_sata_port_start(struct ata_port *ap); -static void pdc_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); @@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); } -static void pdc_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) { struct pdc_port_priv *pp = qc->ap->private_data; unsigned int i; @@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) default: break; } + + return AC_ERR_OK; } static int pdc_is_sataii_tx4(unsigned long flags) diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index c53c5a47204d..ef00ab644afb 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int qs_port_start(struct ata_port *ap); static void qs_host_stop(struct ata_host *host); -static void qs_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); static void qs_freeze(struct ata_port *ap); @@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) return si; } -static void qs_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) { struct qs_port_priv *pp = qc->ap->private_data; u8 dflags = QS_DF_PORD, *buf = pp->pkt; @@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) qs_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) - return; + return AC_ERR_OK; nelem = qs_fill_sg(qc); @@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) /* frame information structure (FIS) */ ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); + + return AC_ERR_OK; } static inline void qs_packet_start(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 3495e1733a8e..44b0ed8f6bb8 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -120,7 +120,7 @@ /* Descriptor table word 0 bit (when DTA32M = 1) */ #define SATA_RCAR_DTEND BIT(0) -#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFEUL +#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFFUL /* Gen2 Physical Layer Control Registers */ #define RCAR_GEN2_PHY_CTL1_REG 0x1704 @@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); } -static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; sata_rcar_bmdma_fill_sg(qc); + + return AC_ERR_OK; } static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) @@ -905,7 +907,7 @@ static int sata_rcar_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) - goto err_pm_disable; + goto err_pm_put; host = ata_host_alloc(dev, 1); if (!host) { @@ -935,7 +937,6 @@ static int sata_rcar_probe(struct platform_device *pdev) err_pm_put: pm_runtime_put(dev); -err_pm_disable: pm_runtime_disable(dev); return ret; } @@ -989,8 +990,10 @@ static int sata_rcar_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } if (priv->type == RCAR_GEN3_SATA) { sata_rcar_init_module(priv); @@ -1015,8 +1018,10 @@ static int sata_rcar_restore(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } sata_rcar_setup_port(host); diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index e6fbae2f645a..75321f1ceba5 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev); static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); -static void sil_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); static void sil_bmdma_setup(struct ata_queued_cmd *qc); static void sil_bmdma_start(struct ata_queued_cmd *qc); static void sil_bmdma_stop(struct ata_queued_cmd *qc); @@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); } -static void sil_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; sil_fill_sg(qc); + + return AC_ERR_OK; } static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 7bef82de53ca..560070d4f1d0 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev); static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); static int sil24_qc_defer(struct ata_queued_cmd *qc); -static void sil24_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc); static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); static void sil24_pmp_attach(struct ata_port *ap); @@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc) return ata_std_qc_defer(qc); } -static void sil24_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct sil24_port_priv *pp = ap->private_data; @@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) if (qc->flags & ATA_QCFLAG_DMAMAP) sil24_fill_sg(qc, sge); + + return AC_ERR_OK; } static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 2277ba0c9c7f..2c7b30c5ea3d 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap); static void pdc_freeze(struct ata_port *ap); static void pdc_thaw(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); -static void pdc20621_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static unsigned int pdc20621_dimm_init(struct ata_host *host); @@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); } -static void pdc20621_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { case ATA_PROT_DMA: @@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc) default: break; } + + return AC_ERR_OK; } static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index d9fd70280482..7f814da3c2d0 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf) return -EMEDIUMTYPE; } dev_data = PRIV(dev); - if (!dev_data->persist) return 0; + if (!dev_data->persist) { + atm_dev_put(dev); + return 0; + } dev_data->persist = 0; - if (PRIV(dev)->vcc) return 0; + if (PRIV(dev)->vcc) { + atm_dev_put(dev); + return 0; + } kfree(dev_data); atm_dev_put(dev); atm_dev_deregister(dev); diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 9d0d65efcd94..de52428b8833 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2245,7 +2245,7 @@ static int eni_init_one(struct pci_dev *pci_dev, rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); if (rc < 0) - goto out; + goto err_disable; rc = -ENOMEM; eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); @@ -2281,7 +2281,8 @@ out: return rc; err_eni_release: - eni_do_release(dev); + dev->phy = NULL; + iounmap(ENI_DEV(dev)->ioaddr); err_unregister: atm_dev_deregister(dev); err_free_consistent: diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index d287837ed755..5acb45985675 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -998,6 +998,7 @@ static int fs_open(struct atm_vcc *atm_vcc) error = make_rate (pcr, r, &tmc0, NULL); if (error) { kfree(tc); + kfree(vcc); return error; } } diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index 63871859e6e8..52c2878b755d 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -262,7 +262,7 @@ static int idt77105_start(struct atm_dev *dev) { unsigned long flags; - if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) + if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) return -ENOMEM; PRIV(dev)->dev = dev; spin_lock_irqsave(&idt77105_priv_lock, flags); @@ -337,7 +337,7 @@ static int idt77105_stop(struct atm_dev *dev) else idt77105_all = walk->next; dev->phy = NULL; - dev->dev_data = NULL; + dev->phy_data = NULL; kfree(walk); break; } diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index df51680e8931..363073e7b653 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3606,7 +3606,7 @@ static int idt77252_init_one(struct pci_dev *pcidev, if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) { printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev)); - return err; + goto err_out_disable_pdev; } card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL); diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 645a6bc1df88..c6b38112bcf4 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2234,6 +2234,7 @@ static int lanai_dev_open(struct atm_dev *atmdev) conf1_write(lanai); #endif iounmap(lanai->base); + lanai->base = NULL; error_pci: pci_disable_device(lanai->pci); error: @@ -2246,6 +2247,8 @@ static int lanai_dev_open(struct atm_dev *atmdev) static void lanai_dev_close(struct atm_dev *atmdev) { struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data; + if (lanai->base==NULL) + return; printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n", lanai->number); lanai_timed_poll_stop(lanai); @@ -2555,7 +2558,7 @@ static int lanai_init_one(struct pci_dev *pci, struct atm_dev *atmdev; int result; - lanai = kmalloc(sizeof(*lanai), GFP_KERNEL); + lanai = kzalloc(sizeof(*lanai), GFP_KERNEL); if (lanai == NULL) { printk(KERN_ERR DEV_LABEL ": couldn't allocate dev_data structure!\n"); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 8db8c0fb5e2d..bb9835c62641 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -1706,6 +1706,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if (push_scqe(card, vc, scq, &scqe, skb) != 0) { atomic_inc(&vcc->stats->tx_err); + dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len, + DMA_TO_DEVICE); dev_kfree_skb_any(skb); return -EIO; } diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c index 7850758b5bb8..239852d85558 100644 --- a/drivers/atm/uPD98402.c +++ b/drivers/atm/uPD98402.c @@ -211,7 +211,7 @@ static void uPD98402_int(struct atm_dev *dev) static int uPD98402_start(struct atm_dev *dev) { DPRINTK("phy_start\n"); - if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) + if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) return -ENOMEM; spin_lock_init(&PRIV(dev)->lock); memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats)); diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a2fcde582e2a..33b887b38906 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv) { struct ht16k33_fbdev *fbdev = &priv->fbdev; - schedule_delayed_work(&fbdev->work, - msecs_to_jiffies(HZ / fbdev->refresh_rate)); + schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate); } /* diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 1eb81f113786..83e26fd188cc 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -270,7 +270,7 @@ static int __init get_cpu_for_node(struct device_node *node) static int __init parse_core(struct device_node *core, int package_id, int core_id) { - char name[10]; + char name[20]; bool leaf = true; int i = 0; int cpu; @@ -317,7 +317,7 @@ static int __init parse_core(struct device_node *core, int package_id, static int __init parse_cluster(struct device_node *cluster, int depth) { - char name[10]; + char name[20]; bool leaf = true; bool has_cores = false; struct device_node *c; diff --git a/drivers/base/component.c b/drivers/base/component.c index 1fdbd6ff2058..b9f20ada68b0 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -257,7 +257,8 @@ static int try_to_bring_up_master(struct master *master, ret = master->ops->bind(master->dev); if (ret < 0) { devres_release_group(master->dev, NULL); - dev_info(master->dev, "master bind failed: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_info(master->dev, "master bind failed: %d\n", ret); return ret; } @@ -611,8 +612,9 @@ static int component_bind(struct component *component, struct master *master, devres_release_group(component->dev, NULL); devres_release_group(master->dev, NULL); - dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", - dev_name(component->dev), component->ops, ret); + if (ret != -EPROBE_DEFER) + dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", + dev_name(component->dev), component->ops, ret); } return ret; diff --git a/drivers/base/core.c b/drivers/base/core.c index 7bd9cd366d41..6fea8096edf1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -106,6 +106,16 @@ int device_links_read_lock_held(void) #endif #endif /* !CONFIG_SRCU */ +static bool device_is_ancestor(struct device *dev, struct device *target) +{ + while (target->parent) { + target = target->parent; + if (dev == target) + return true; + } + return false; +} + /** * device_is_dependent - Check if one device depends on another one * @dev: Device to check dependencies for. @@ -119,7 +129,12 @@ static int device_is_dependent(struct device *dev, void *target) struct device_link *link; int ret; - if (dev == target) + /* + * The "ancestors" check is needed to catch the case when the target + * device has not been completely initialized yet and it is still + * missing from the list of children of its parent device. + */ + if (dev == target || device_is_ancestor(dev, target)) return 1; ret = device_for_each_child(dev, target, device_is_dependent); @@ -454,8 +469,7 @@ static void __device_link_del(struct kref *kref) dev_dbg(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_drop_link(link->consumer); + pm_runtime_drop_link(link); list_del_rcu(&link->s_node); list_del_rcu(&link->c_node); @@ -469,8 +483,7 @@ static void __device_link_del(struct kref *kref) dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_drop_link(link->consumer); + pm_runtime_drop_link(link); list_del(&link->s_node); list_del(&link->c_node); @@ -2391,6 +2404,71 @@ void device_del(struct device *dev) } EXPORT_SYMBOL_GPL(device_del); +DEFINE_KLIST(klist_hidden_devices, NULL, NULL); + +bool device_is_hidden(struct device *dev) +{ + bool ret = false; + + if (dev->class) { + mutex_lock(&dev->class->p->mutex); + ret = (dev->p->knode_class.n_klist == &klist_hidden_devices); + mutex_unlock(&dev->class->p->mutex); + } + return ret; +} + +int device_hide(struct device *dev) +{ + char *envp[] = { "EVENT=hide", NULL }; + + if (!dev->class) + return -EINVAL; + + if (MAJOR(dev->devt)) + devtmpfs_delete_node(dev); + + device_remove_class_symlinks(dev); + + mutex_lock(&dev->class->p->mutex); + /* remove the device from the class list */ + klist_remove(&dev->p->knode_class); + /* add to the hidden devices list */ + klist_add_tail(&dev->p->knode_class, &klist_hidden_devices); + mutex_unlock(&dev->class->p->mutex); + + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); + + return 0; +} + +int device_unhide(struct device *dev) +{ + char *envp[] = { "EVENT=unhide", NULL }; + int err; + + if (!dev->class) + return -EINVAL; + + err = device_add_class_symlinks(dev); + if (err) + return err; + + if (MAJOR(dev->devt)) + devtmpfs_create_node(dev); + + mutex_lock(&dev->class->p->mutex); + /* remove the device from the hidden devices list */ + klist_remove(&dev->p->knode_class); + /* tie the class to the device */ + klist_add_tail(&dev->p->knode_class, &dev->class->p->klist_devices); + mutex_unlock(&dev->class->p->mutex); + + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); + + return err; +} + /** * device_unregister - unregister device from system. * @dev: device going away. @@ -3400,9 +3478,10 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { - if (fwnode) { - struct fwnode_handle *fn = dev->fwnode; + struct device *parent = dev->parent; + struct fwnode_handle *fn = dev->fwnode; + if (fwnode) { if (fwnode_is_primary(fn)) fn = fn->secondary; @@ -3412,8 +3491,13 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } dev->fwnode = fwnode; } else { - dev->fwnode = fwnode_is_primary(dev->fwnode) ? - dev->fwnode->secondary : NULL; + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + if (!(parent && fn == parent->fwnode)) + fn->secondary = NULL; + } else { + dev->fwnode = NULL; + } } } EXPORT_SYMBOL_GPL(set_primary_fwnode); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 6265871a4af2..f00da44ae6fe 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -567,6 +567,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev, return sprintf(buf, "Not affected\n"); } +ssize_t __weak cpu_show_srbds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -575,6 +581,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); +static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -585,6 +592,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_mds.attr, &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, + &dev_attr_srbds.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b25bcab2a26b..cf7e5b4afc1b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -300,14 +300,16 @@ int driver_deferred_probe_check_state_continue(struct device *dev) static void deferred_probe_timeout_work_func(struct work_struct *work) { - struct device_private *private, *p; + struct device_private *p; deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); - list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) - dev_info(private->device, "deferred probe pending"); + mutex_lock(&deferred_probe_mutex); + list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) + dev_info(p->device, "deferred probe pending\n"); + mutex_unlock(&deferred_probe_mutex); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); @@ -518,7 +520,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) drv->bus->name, __func__, drv->name, dev_name(dev)); if (!list_empty(&dev->devres_head)) { dev_crit(dev, "Resources present before probing\n"); - return -EBUSY; + ret = -EBUSY; + goto done; } re_probe: @@ -639,7 +642,7 @@ pinctrl_bind_failed: ret = 0; done: atomic_dec(&probe_count); - wake_up(&probe_waitqueue); + wake_up_all(&probe_waitqueue); return ret; } @@ -872,7 +875,9 @@ static int __device_attach(struct device *dev, bool allow_async) int ret = 0; device_lock(dev); - if (dev->driver) { + if (dev->p->dead) { + goto out_unlock; + } else if (dev->driver) { if (device_is_bound(dev)) { ret = 1; goto out_unlock; @@ -1102,6 +1107,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv = dev->driver; if (drv) { + pm_runtime_get_sync(dev); + while (device_links_busy(dev)) { __device_driver_unlock(dev, parent); @@ -1113,13 +1120,12 @@ static void __device_release_driver(struct device *dev, struct device *parent) * have released the driver successfully while this one * was waiting, so check for that. */ - if (dev->driver != drv) + if (dev->driver != drv) { + pm_runtime_put(dev); return; + } } - pm_runtime_get_sync(dev); - pm_runtime_clean_up_links(dev); - driver_sysfs_remove(dev); if (dev->bus) diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 62ee90b4db56..70efbb22dfc3 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -525,7 +525,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, } retval = fw_sysfs_wait_timeout(fw_priv, timeout); - if (retval < 0) { + if (retval < 0 && retval != -ENOENT) { mutex_lock(&fw_lock); fw_load_abort(fw_sysfs); mutex_unlock(&fw_lock); diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 7ecd590e67fe..9bef6c35f344 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -139,10 +139,12 @@ int assign_fw(struct firmware *fw, struct device *device, void fw_free_paged_buf(struct fw_priv *fw_priv); int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed); int fw_map_paged_buf(struct fw_priv *fw_priv); +bool fw_is_paged_buf(struct fw_priv *fw_priv); #else static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } +static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; } #endif #endif /* __FIRMWARE_LOADER_H */ diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index bf44c79beae9..95d21b4af904 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -252,9 +252,11 @@ static void __free_fw_priv(struct kref *ref) list_del(&fw_priv->list); spin_unlock(&fwc->lock); - fw_free_paged_buf(fw_priv); /* free leftover pages */ - if (!fw_priv->allocated_size) + if (fw_is_paged_buf(fw_priv)) + fw_free_paged_buf(fw_priv); + else if (!fw_priv->allocated_size) vfree(fw_priv->data); + kfree_const(fw_priv->fw_name); kfree(fw_priv); } @@ -268,6 +270,11 @@ static void free_fw_priv(struct fw_priv *fw_priv) } #ifdef CONFIG_FW_LOADER_PAGED_BUF +bool fw_is_paged_buf(struct fw_priv *fw_priv) +{ + return fw_priv->is_paged_buf; +} + void fw_free_paged_buf(struct fw_priv *fw_priv) { int i; @@ -275,6 +282,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv) if (!fw_priv->pages) return; + vunmap(fw_priv->data); + for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kvfree(fw_priv->pages); @@ -328,10 +337,6 @@ int fw_map_paged_buf(struct fw_priv *fw_priv) if (!fw_priv->data) return -ENOMEM; - /* page table is no longer needed after mapping, let's free */ - kvfree(fw_priv->pages); - fw_priv->pages = NULL; - return 0; } #endif diff --git a/drivers/base/node.c b/drivers/base/node.c index 296546ffed6c..c9976dc4aa65 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -262,21 +262,20 @@ static void node_init_cache_dev(struct node *node) if (!dev) return; + device_initialize(dev); dev->parent = &node->dev; dev->release = node_cache_release; if (dev_set_name(dev, "memory_side_cache")) - goto free_dev; + goto put_device; - if (device_register(dev)) - goto free_name; + if (device_add(dev)) + goto put_device; pm_runtime_no_callbacks(dev); node->cache_dev = dev; return; -free_name: - kfree_const(dev->kobj.name); -free_dev: - kfree(dev); +put_device: + put_device(dev); } /** @@ -313,25 +312,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) return; dev = &info->dev; + device_initialize(dev); dev->parent = node->cache_dev; dev->release = node_cacheinfo_release; dev->groups = cache_groups; if (dev_set_name(dev, "index%d", cache_attrs->level)) - goto free_cache; + goto put_device; info->cache_attrs = *cache_attrs; - if (device_register(dev)) { + if (device_add(dev)) { dev_warn(&node->dev, "failed to add cache level:%d\n", cache_attrs->level); - goto free_name; + goto put_device; } pm_runtime_no_callbacks(dev); list_add_tail(&info->node, &node->cache_attrs); return; -free_name: - kfree_const(dev->kobj.name); -free_cache: - kfree(info); +put_device: + put_device(dev); } static void node_remove_caches(struct node *node) @@ -758,14 +756,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn) return pfn_to_nid(pfn); } +static int do_register_memory_block_under_node(int nid, + struct memory_block *mem_blk) +{ + int ret; + + /* + * If this memory block spans multiple nodes, we only indicate + * the last processed node. + */ + mem_blk->nid = nid; + + ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, + &mem_blk->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + if (ret) + return ret; + + return sysfs_create_link_nowarn(&mem_blk->dev.kobj, + &node_devices[nid]->dev.kobj, + kobject_name(&node_devices[nid]->dev.kobj)); +} + /* register memory section under specified node if it spans that node */ -static int register_mem_sect_under_node(struct memory_block *mem_blk, - void *arg) +static int register_mem_block_under_node_early(struct memory_block *mem_blk, + void *arg) { unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); unsigned long end_pfn = start_pfn + memory_block_pfns - 1; - int ret, nid = *(int *)arg; + int nid = *(int *)arg; unsigned long pfn; for (pfn = start_pfn; pfn <= end_pfn; pfn++) { @@ -782,38 +802,33 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk, } /* - * We need to check if page belongs to nid only for the boot - * case, during hotplug we know that all pages in the memory - * block belong to the same node. + * We need to check if page belongs to nid only at the boot + * case because node's ranges can be interleaved. */ - if (system_state == SYSTEM_BOOTING) { - page_nid = get_nid_for_pfn(pfn); - if (page_nid < 0) - continue; - if (page_nid != nid) - continue; - } + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; - /* - * If this memory block spans multiple nodes, we only indicate - * the last processed node. - */ - mem_blk->nid = nid; - - ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, - &mem_blk->dev.kobj, - kobject_name(&mem_blk->dev.kobj)); - if (ret) - return ret; - - return sysfs_create_link_nowarn(&mem_blk->dev.kobj, - &node_devices[nid]->dev.kobj, - kobject_name(&node_devices[nid]->dev.kobj)); + return do_register_memory_block_under_node(nid, mem_blk); } /* mem section does not span the specified node */ return 0; } +/* + * During hotplug we know that all pages in the memory block belong to the same + * node. + */ +static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, + void *arg) +{ + int nid = *(int *)arg; + + return do_register_memory_block_under_node(nid, mem_blk); +} + /* * Unregister a memory block device under the node it spans. Memory blocks * with multiple nodes cannot be offlined and therefore also never be removed. @@ -829,11 +844,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk) kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); } -int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) +int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, + enum meminit_context context) { + walk_memory_blocks_func_t func; + + if (context == MEMINIT_HOTPLUG) + func = register_mem_block_under_node_hotplug; + else + func = register_mem_block_under_node_early; + return walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), (void *)&nid, - register_mem_sect_under_node); + func); } #ifdef CONFIG_HUGETLBFS diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 604a461848c9..0b67d41bab8f 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -802,6 +802,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, /* temporary section violation during probe() */ drv->probe = probe; retval = code = __platform_driver_register(drv, module); + if (retval) + return retval; /* * Fixup that section violation, being paranoid about code scanning diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index cc85e87eaf05..8428d02cfe58 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2615,7 +2615,7 @@ static int genpd_iterate_idle_states(struct device_node *dn, ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); if (ret <= 0) - return ret; + return ret == -ENOENT ? 0 : ret; /* Loop over the phandles until all the requested entry is found */ of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0e99a760aebd..23af54512053 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -726,7 +726,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) if (is_async(dev)) { get_device(dev); - async_schedule(func, dev); + async_schedule_dev(func, dev); return true; } @@ -1728,13 +1728,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } /* - * If a device configured to wake up the system from sleep states - * has been suspended at run time and there's a resume request pending - * for it, this is equivalent to the device signaling wakeup, so the - * system suspend operation should be aborted. + * Wait for possible runtime PM transitions of the device in progress + * to complete and if there's a runtime resume request pending for it, + * resume it before proceeding with invoking the system-wide suspend + * callbacks for it. + * + * If the system-wide suspend callbacks below change the configuration + * of the device, they must disable runtime PM for it or otherwise + * ensure that its runtime-resume callbacks will not be confused by that + * change in case they are invoked going forward. */ - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); + pm_runtime_barrier(dev); if (pm_wakeup_pending()) { dev->power.direct_complete = false; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 48616f358854..94785083c018 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev) device_links_read_lock_held()) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME) || - READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) + if (!(link->flags & DL_FLAG_PM_RUNTIME)) continue; retval = pm_runtime_get_sync(link->supplier); @@ -306,20 +305,38 @@ static int rpm_get_suppliers(struct device *dev) return 0; } -static void rpm_put_suppliers(struct device *dev) +static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) { struct device_link *link; list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) { - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) - continue; while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); + pm_runtime_put_noidle(link->supplier); + + if (try_to_suspend) + pm_request_idle(link->supplier); } } +static void rpm_put_suppliers(struct device *dev) +{ + __rpm_put_suppliers(dev, true); +} + +static void rpm_suspend_suppliers(struct device *dev) +{ + struct device_link *link; + int idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) + pm_request_idle(link->supplier); + + device_links_read_unlock(idx); +} + /** * __rpm_callback - Run a given runtime PM callback for a given device. * @cb: Runtime PM callback to run. @@ -347,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) idx = device_links_read_lock(); retval = rpm_get_suppliers(dev); - if (retval) + if (retval) { + rpm_put_suppliers(dev); goto fail; + } device_links_read_unlock(idx); } @@ -371,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) || (dev->power.runtime_status == RPM_RESUMING && retval))) { idx = device_links_read_lock(); - fail: - rpm_put_suppliers(dev); + __rpm_put_suppliers(dev, false); +fail: device_links_read_unlock(idx); } @@ -647,8 +666,11 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } + if (dev->power.irq_safe) + goto out; + /* Maybe the parent is now able to suspend. */ - if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { + if (parent && !parent->power.ignore_children) { spin_unlock(&dev->power.lock); spin_lock(&parent->power.lock); @@ -657,6 +679,14 @@ static int rpm_suspend(struct device *dev, int rpmflags) spin_lock(&dev->power.lock); } + /* Maybe the suppliers are now able to suspend. */ + if (dev->power.links_count > 0) { + spin_unlock_irq(&dev->power.lock); + + rpm_suspend_suppliers(dev); + + spin_lock_irq(&dev->power.lock); + } out: trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); @@ -1618,42 +1648,6 @@ void pm_runtime_remove(struct device *dev) pm_runtime_reinit(dev); } -/** - * pm_runtime_clean_up_links - Prepare links to consumers for driver removal. - * @dev: Device whose driver is going to be removed. - * - * Check links from this device to any consumers and if any of them have active - * runtime PM references to the device, drop the usage counter of the device - * (as many times as needed). - * - * Links with the DL_FLAG_MANAGED flag unset are ignored. - * - * Since the device is guaranteed to be runtime-active at the point this is - * called, nothing else needs to be done here. - * - * Moreover, this is called after device_links_busy() has returned 'false', so - * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and - * therefore rpm_active can't be manipulated concurrently. - */ -void pm_runtime_clean_up_links(struct device *dev) -{ - struct device_link *link; - int idx; - - idx = device_links_read_lock(); - - list_for_each_entry_rcu(link, &dev->links.consumers, s_node, - device_links_read_lock_held()) { - if (!(link->flags & DL_FLAG_MANAGED)) - continue; - - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put_noidle(dev); - } - - device_links_read_unlock(idx); -} - /** * pm_runtime_get_suppliers - Resume and reference-count supplier devices. * @dev: Consumer device. @@ -1669,8 +1663,8 @@ void pm_runtime_get_suppliers(struct device *dev) device_links_read_lock_held()) if (link->flags & DL_FLAG_PM_RUNTIME) { link->supplier_preactivated = true; - refcount_inc(&link->rpm_active); pm_runtime_get_sync(link->supplier); + refcount_inc(&link->rpm_active); } device_links_read_unlock(idx); @@ -1683,6 +1677,8 @@ void pm_runtime_get_suppliers(struct device *dev) void pm_runtime_put_suppliers(struct device *dev) { struct device_link *link; + unsigned long flags; + bool put; int idx; idx = device_links_read_lock(); @@ -1691,7 +1687,11 @@ void pm_runtime_put_suppliers(struct device *dev) device_links_read_lock_held()) if (link->supplier_preactivated) { link->supplier_preactivated = false; - if (refcount_dec_not_one(&link->rpm_active)) + spin_lock_irqsave(&dev->power.lock, flags); + put = pm_runtime_status_suspended(dev) && + refcount_dec_not_one(&link->rpm_active); + spin_unlock_irqrestore(&dev->power.lock, flags); + if (put) pm_runtime_put(link->supplier); } @@ -1705,7 +1705,7 @@ void pm_runtime_new_link(struct device *dev) spin_unlock_irq(&dev->power.lock); } -void pm_runtime_drop_link(struct device *dev) +static void pm_runtime_drop_link_count(struct device *dev) { spin_lock_irq(&dev->power.lock); WARN_ON(dev->power.links_count == 0); @@ -1713,6 +1713,25 @@ void pm_runtime_drop_link(struct device *dev) spin_unlock_irq(&dev->power.lock); } +/** + * pm_runtime_drop_link - Prepare for device link removal. + * @link: Device link going away. + * + * Drop the link count of the consumer end of @link and decrement the supplier + * device's runtime PM usage counter as many times as needed to drop all of the + * PM runtime reference to it from the consumer. + */ +void pm_runtime_drop_link(struct device_link *link) +{ + if (!(link->flags & DL_FLAG_PM_RUNTIME)) + return; + + pm_runtime_drop_link_count(link->consumer); + + while (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); +} + static bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5817b51d2b15..92f0960e9014 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -241,7 +241,9 @@ void wakeup_source_unregister(struct wakeup_source *ws) { if (ws) { wakeup_source_remove(ws); - wakeup_source_sysfs_remove(ws); + if (ws->dev) + wakeup_source_sysfs_remove(ws); + wakeup_source_destroy(ws); } } @@ -1071,6 +1073,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m, break; } + if (!next_ws) + print_wakeup_source_stats(m, &deleted_ws); + return next_ws; } diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 3d80c4b43f72..d7c01b70e43d 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, int regcache_lookup_reg(struct regmap *map, unsigned int reg); int _regmap_raw_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len); + const void *val, size_t val_len, bool noinc); void regmap_async_complete_cb(struct regmap_async *async, int ret); diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index a93cafd7be4f..7f4b3b62492c 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, map->cache_bypass = true; - ret = _regmap_raw_write(map, base, *data, count * val_bytes); + ret = _regmap_raw_write(map, base, *data, count * val_bytes, false); if (ret) dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", base, cur - map->reg_stride, ret); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index e72843fe41df..4f2ff1b2b450 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -227,6 +227,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, if (*ppos < 0 || !count) return -EINVAL; + if (count > (PAGE_SIZE << (MAX_ORDER - 1))) + count = PAGE_SIZE << (MAX_ORDER - 1); + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -371,6 +374,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file, if (*ppos < 0 || !count) return -EINVAL; + if (count > (PAGE_SIZE << (MAX_ORDER - 1))) + count = PAGE_SIZE << (MAX_ORDER - 1); + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -457,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_only); - ssize_t result; - bool was_enabled, require_sync = false; + bool new_val, require_sync = false; int err; + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; + + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; + map->lock(map->lock_arg); - was_enabled = map->cache_only; - - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) { - map->unlock(map->lock_arg); - return result; - } - - if (map->cache_only && !was_enabled) { + if (new_val && !map->cache_only) { dev_warn(map->dev, "debugfs cache_only=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_only && was_enabled) { + } else if (!new_val && map->cache_only) { dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); require_sync = true; } + map->cache_only = new_val; map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -487,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file, dev_err(map->dev, "Failed to sync cache %d\n", err); } - return result; + return count; } static const struct file_operations regmap_cache_only_fops = { @@ -502,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_bypass); - ssize_t result; - bool was_enabled; + bool new_val; + int err; + + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; + + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; map->lock(map->lock_arg); - was_enabled = map->cache_bypass; - - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) - goto out; - - if (map->cache_bypass && !was_enabled) { + if (new_val && !map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_bypass && was_enabled) { + } else if (!new_val && map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); } + map->cache_bypass = new_val; -out: map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); - return result; + return count; } static const struct file_operations regmap_cache_bypass_fops = { @@ -571,8 +583,12 @@ void regmap_debugfs_init(struct regmap *map, const char *name) devname = dev_name(map->dev); if (name) { - map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", + if (!map->debugfs_name) { + map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", devname, name); + if (!map->debugfs_name) + return; + } name = map->debugfs_name; } else { name = devname; @@ -580,9 +596,10 @@ void regmap_debugfs_init(struct regmap *map, const char *name) if (!strcmp(name, "dummy")) { kfree(map->debugfs_name); - map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", dummy_index); + if (!map->debugfs_name) + return; name = map->debugfs_name; dummy_index++; } @@ -644,6 +661,7 @@ void regmap_debugfs_exit(struct regmap *map) regmap_debugfs_free_dump_cache(map); mutex_unlock(&map->cache_lock); kfree(map->debugfs_name); + map->debugfs_name = NULL; } else { struct regmap_debugfs_node *node, *tmp; diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index 50a66382d87d..e75168b941d0 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -12,7 +12,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val) struct device *dev = context; struct sdw_slave *slave = dev_to_sdw_dev(dev); - return sdw_write(slave, reg, val); + return sdw_write_no_pm(slave, reg, val); } static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) @@ -21,7 +21,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) struct sdw_slave *slave = dev_to_sdw_dev(dev); int read; - read = sdw_read(slave, reg); + read = sdw_read_no_pm(slave, reg); if (read < 0) return read; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 59f911e57719..e0893f1b1452 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -17,6 +17,7 @@ #include <linux/delay.h> #include <linux/log2.h> #include <linux/hwspinlock.h> +#include <asm/unaligned.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) { - __be16 *b = buf; - - b[0] = cpu_to_be16(val << shift); + put_unaligned_be16(val << shift, buf); } static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) { - __le16 *b = buf; - - b[0] = cpu_to_le16(val << shift); + put_unaligned_le16(val << shift, buf); } static void regmap_format_16_native(void *buf, unsigned int val, unsigned int shift) { - *(u16 *)buf = val << shift; + u16 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) @@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) { - __be32 *b = buf; - - b[0] = cpu_to_be32(val << shift); + put_unaligned_be32(val << shift, buf); } static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) { - __le32 *b = buf; - - b[0] = cpu_to_le32(val << shift); + put_unaligned_le32(val << shift, buf); } static void regmap_format_32_native(void *buf, unsigned int val, unsigned int shift) { - *(u32 *)buf = val << shift; + u32 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } #ifdef CONFIG_64BIT static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) { - __be64 *b = buf; - - b[0] = cpu_to_be64((u64)val << shift); + put_unaligned_be64((u64) val << shift, buf); } static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) { - __le64 *b = buf; - - b[0] = cpu_to_le64((u64)val << shift); + put_unaligned_le64((u64) val << shift, buf); } static void regmap_format_64_native(void *buf, unsigned int val, unsigned int shift) { - *(u64 *)buf = (u64)val << shift; + u64 v = (u64) val << shift; + + memcpy(buf, &v, sizeof(v)); } #endif @@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf) static unsigned int regmap_parse_16_be(const void *buf) { - const __be16 *b = buf; - - return be16_to_cpu(b[0]); + return get_unaligned_be16(buf); } static unsigned int regmap_parse_16_le(const void *buf) { - const __le16 *b = buf; - - return le16_to_cpu(b[0]); + return get_unaligned_le16(buf); } static void regmap_parse_16_be_inplace(void *buf) { - __be16 *b = buf; + u16 v = get_unaligned_be16(buf); - b[0] = be16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_16_le_inplace(void *buf) { - __le16 *b = buf; + u16 v = get_unaligned_le16(buf); - b[0] = le16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_16_native(const void *buf) { - return *(u16 *)buf; + u16 v; + + memcpy(&v, buf, sizeof(v)); + return v; } static unsigned int regmap_parse_24(const void *buf) @@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf) static unsigned int regmap_parse_32_be(const void *buf) { - const __be32 *b = buf; - - return be32_to_cpu(b[0]); + return get_unaligned_be32(buf); } static unsigned int regmap_parse_32_le(const void *buf) { - const __le32 *b = buf; - - return le32_to_cpu(b[0]); + return get_unaligned_le32(buf); } static void regmap_parse_32_be_inplace(void *buf) { - __be32 *b = buf; + u32 v = get_unaligned_be32(buf); - b[0] = be32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_32_le_inplace(void *buf) { - __le32 *b = buf; + u32 v = get_unaligned_le32(buf); - b[0] = le32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_32_native(const void *buf) { - return *(u32 *)buf; + u32 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #ifdef CONFIG_64BIT static unsigned int regmap_parse_64_be(const void *buf) { - const __be64 *b = buf; - - return be64_to_cpu(b[0]); + return get_unaligned_be64(buf); } static unsigned int regmap_parse_64_le(const void *buf) { - const __le64 *b = buf; - - return le64_to_cpu(b[0]); + return get_unaligned_le64(buf); } static void regmap_parse_64_be_inplace(void *buf) { - __be64 *b = buf; + u64 v = get_unaligned_be64(buf); - b[0] = be64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_64_le_inplace(void *buf) { - __le64 *b = buf; + u64 v = get_unaligned_le64(buf); - b[0] = le64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_64_native(const void *buf) { - return *(u64 *)buf; + u64 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #endif @@ -1356,6 +1348,7 @@ void regmap_exit(struct regmap *map) if (map->hwlock) hwspin_lock_free(map->hwlock); kfree_const(map->name); + kfree(map->patch); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); @@ -1370,7 +1363,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return (*r)->name == data; + return !strcmp((*r)->name, data); else return 1; } @@ -1475,7 +1468,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, } static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, - const void *val, size_t val_len) + const void *val, size_t val_len, bool noinc) { struct regmap_range_node *range; unsigned long flags; @@ -1534,7 +1527,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, win_residue, val_len / map->format.val_bytes); ret = _regmap_raw_write_impl(map, reg, val, win_residue * - map->format.val_bytes); + map->format.val_bytes, noinc); if (ret != 0) return ret; @@ -1548,7 +1541,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, win_residue = range->window_len - win_offset; } - ret = _regmap_select_page(map, ®, range, val_num); + ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); if (ret != 0) return ret; } @@ -1756,7 +1749,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, map->work_buf + map->format.reg_bytes + map->format.pad_bytes, - map->format.val_bytes); + map->format.val_bytes, + false); } static inline void *_regmap_map_get_context(struct regmap *map) @@ -1850,7 +1844,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) EXPORT_SYMBOL_GPL(regmap_write_async); int _regmap_raw_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len) + const void *val, size_t val_len, bool noinc) { size_t val_bytes = map->format.val_bytes; size_t val_count = val_len / val_bytes; @@ -1871,7 +1865,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, /* Write as many bytes as possible with chunk_size */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); + ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); if (ret) return ret; @@ -1882,7 +1876,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, /* Write remaining bytes */ if (val_len) - ret = _regmap_raw_write_impl(map, reg, val, val_len); + ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); return ret; } @@ -1915,7 +1909,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, map->lock(map->lock_arg); - ret = _regmap_raw_write(map, reg, val, val_len); + ret = _regmap_raw_write(map, reg, val, val_len, false); map->unlock(map->lock_arg); @@ -1973,7 +1967,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg, write_len = map->max_raw_write; else write_len = val_len; - ret = _regmap_raw_write(map, reg, val, write_len); + ret = _regmap_raw_write(map, reg, val, write_len, true); if (ret) goto out_unlock; val = ((u8 *)val) + write_len; @@ -2450,7 +2444,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, map->async = true; - ret = _regmap_raw_write(map, reg, val, val_len); + ret = _regmap_raw_write(map, reg, val, val_len, false); map->async = false; @@ -2461,7 +2455,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, EXPORT_SYMBOL_GPL(regmap_raw_write_async); static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, - unsigned int val_len) + unsigned int val_len, bool noinc) { struct regmap_range_node *range; int ret; @@ -2474,7 +2468,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, range = _regmap_range_lookup(map, reg); if (range) { ret = _regmap_select_page(map, ®, range, - val_len / map->format.val_bytes); + noinc ? 1 : val_len / map->format.val_bytes); if (ret != 0) return ret; } @@ -2512,7 +2506,7 @@ static int _regmap_bus_read(void *context, unsigned int reg, if (!map->format.parse_val) return -EINVAL; - ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); + ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); if (ret == 0) *val = map->format.parse_val(work_val); @@ -2628,7 +2622,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read bytes that fit into whole chunks */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_read(map, reg, val, chunk_bytes); + ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); if (ret != 0) goto out; @@ -2639,7 +2633,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read remaining bytes */ if (val_len) { - ret = _regmap_raw_read(map, reg, val, val_len); + ret = _regmap_raw_read(map, reg, val, val_len, false); if (ret != 0) goto out; } @@ -2714,7 +2708,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, read_len = map->max_raw_read; else read_len = val_len; - ret = _regmap_raw_read(map, reg, val, read_len); + ret = _regmap_raw_read(map, reg, val, read_len, true); if (ret) goto out_unlock; val = ((u8 *)val) + read_len; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index d5b4905e2adb..4c3b9813b284 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -534,14 +534,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode, struct swnode *c = to_swnode(child); if (!p || list_empty(&p->children) || - (c && list_is_last(&c->entry, &p->children))) + (c && list_is_last(&c->entry, &p->children))) { + fwnode_handle_put(child); return NULL; + } if (c) c = list_next_entry(c, entry); else c = list_first_entry(&p->children, struct swnode, entry); - return &c->fwnode; + + fwnode_handle_put(child); + return fwnode_handle_get(&c->fwnode); } static struct fwnode_handle * @@ -679,6 +683,13 @@ static void software_node_release(struct kobject *kobj) { struct swnode *swnode = kobj_to_swnode(kobj); + if (swnode->parent) { + ida_simple_remove(&swnode->parent->child_ids, swnode->id); + list_del(&swnode->entry); + } else { + ida_simple_remove(&swnode_root_ids, swnode->id); + } + if (swnode->allocated) { property_entries_free(swnode->node->properties); kfree(swnode->node); @@ -801,6 +812,9 @@ int software_node_register(const struct software_node *node) if (software_node_to_swnode(node)) return -EEXIST; + if (node->parent && !parent) + return -EINVAL; + return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0)); } EXPORT_SYMBOL_GPL(software_node_register); @@ -844,13 +858,6 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode) if (!swnode) return; - if (swnode->parent) { - ida_simple_remove(&swnode->parent->child_ids, swnode->id); - list_del(&swnode->entry); - } else { - ida_simple_remove(&swnode_root_ids, swnode->id); - } - kobject_put(&swnode->kobj); } EXPORT_SYMBOL_GPL(fwnode_remove_software_node); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 1bb8ec575352..0fc27ac14f29 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -461,6 +461,7 @@ config BLK_DEV_RBD config BLK_DEV_RSXX tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" depends on PCI + select CRC32 help Device driver for IBM's high speed PCIe SSD storage device: Flash Adapter 900GB Full Height. diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index f19a03b62365..40ea1a425c43 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2902,17 +2902,17 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx, (unsigned long long) current_req->cmd_flags)) return BLK_STS_IOERR; - spin_lock_irq(&floppy_lock); - list_add_tail(&bd->rq->queuelist, &floppy_reqs); - spin_unlock_irq(&floppy_lock); - if (test_and_set_bit(0, &fdc_busy)) { /* fdc busy, this new request will be treated when the current one is done */ is_alive(__func__, "old request running"); - return BLK_STS_OK; + return BLK_STS_RESOURCE; } + spin_lock_irq(&floppy_lock); + list_add_tail(&bd->rq->queuelist, &floppy_reqs); + spin_unlock_irq(&floppy_lock); + command_status = FD_COMMAND_NONE; __reschedule_timeout(MAXTIMEOUT, "fd_request"); set_fdc(0); @@ -4063,21 +4063,22 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (UFDCS->rawcmd == 1) UFDCS->rawcmd = 2; - if (!(mode & FMODE_NDELAY)) { - if (mode & (FMODE_READ|FMODE_WRITE)) { - UDRS->last_checked = 0; - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); - check_disk_change(bdev); - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) - goto out; - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) - goto out; - } - res = -EROFS; - if ((mode & FMODE_WRITE) && - !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + if (mode & (FMODE_READ|FMODE_WRITE)) { + UDRS->last_checked = 0; + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); + check_disk_change(bdev); + if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) + goto out; + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) goto out; } + + res = -EROFS; + + if ((mode & FMODE_WRITE) && + !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + goto out; + mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ef6e251857c8..ffbe792410d1 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -427,11 +427,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, * information. */ struct file *file = lo->lo_backing_file; + struct request_queue *q = lo->lo_queue; int ret; mode |= FALLOC_FL_KEEP_SIZE; - if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { + if (!blk_queue_discard(q)) { ret = -EOPNOTSUPP; goto out; } @@ -862,6 +863,23 @@ static void loop_config_discard(struct loop_device *lo) struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + u32 granularity, max_discard_sectors; + + /* + * If the backing device is a block device, mirror its zeroing + * capability. Set the discard sectors to the block device's zeroing + * capabilities because loop discards result in blkdev_issue_zeroout(), + * not blkdev_issue_discard(). This maintains consistent behavior with + * file-backed loop devices: discarded regions read back as zero. + */ + if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) { + struct request_queue *backingq; + + backingq = bdev_get_queue(inode->i_bdev); + + max_discard_sectors = backingq->limits.max_write_zeroes_sectors; + granularity = backingq->limits.discard_granularity ?: + queue_physical_block_size(backingq); /* * We use punch hole to reclaim the free space used by the @@ -869,22 +887,27 @@ static void loop_config_discard(struct loop_device *lo) * encryption is enabled, because it may give an attacker * useful information. */ - if ((!file->f_op->fallocate) || - lo->lo_encrypt_key_size) { + } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { + max_discard_sectors = 0; + granularity = 0; + + } else { + max_discard_sectors = UINT_MAX >> 9; + granularity = inode->i_sb->s_blocksize; + } + + if (max_discard_sectors) { + q->limits.discard_granularity = granularity; + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + } else { q->limits.discard_granularity = 0; - q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); - return; } - - q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_alignment = 0; - - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) @@ -1264,7 +1287,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); } /* I/O need to be drained during transfer transition */ @@ -1538,12 +1561,12 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) if (lo->lo_queue->limits.logical_block_size != arg) { sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); } blk_mq_freeze_queue(lo->lo_queue); - /* kill_bdev should have truncated all the pages */ + /* invalidate_bdev should have truncated all the pages */ if (lo->lo_queue->limits.logical_block_size != arg && lo->lo_device->bd_inode->i_mapping->nrpages) { err = -EAGAIN; @@ -2305,6 +2328,8 @@ static void __exit loop_exit(void) range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; + mutex_lock(&loop_ctl_mutex); + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_destroy(&loop_index_idr); @@ -2312,6 +2337,8 @@ static void __exit loop_exit(void) unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); + + mutex_unlock(&loop_ctl_mutex); } module_init(loop_init); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2bc6e7e149bc..950dbc5635fc 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -78,8 +78,7 @@ struct link_dead_args { #define NBD_RT_HAS_PID_FILE 3 #define NBD_RT_HAS_CONFIG_REF 4 #define NBD_RT_BOUND 5 -#define NBD_RT_DESTROY_ON_DISCONNECT 6 -#define NBD_RT_DISCONNECT_ON_CLOSE 7 +#define NBD_RT_DISCONNECT_ON_CLOSE 6 #define NBD_DESTROY_ON_DISCONNECT 0 #define NBD_DISCONNECT_REQUESTED 1 @@ -296,7 +295,7 @@ static void nbd_size_clear(struct nbd_device *nbd) } } -static void nbd_size_update(struct nbd_device *nbd) +static void nbd_size_update(struct nbd_device *nbd, bool start) { struct nbd_config *config = nbd->config; struct block_device *bdev = bdget_disk(nbd->disk, 0); @@ -313,7 +312,8 @@ static void nbd_size_update(struct nbd_device *nbd) if (bdev) { if (bdev->bd_disk) { bd_set_size(bdev, config->bytesize); - set_blocksize(bdev, config->blksize); + if (start) + set_blocksize(bdev, config->blksize); } else bdev->bd_invalidated = 1; bdput(bdev); @@ -328,7 +328,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, config->blksize = blocksize; config->bytesize = blocksize * nr_blocks; if (nbd->task_recv != NULL) - nbd_size_update(nbd); + nbd_size_update(nbd, false); } static void nbd_complete_rq(struct request *req) @@ -788,9 +788,9 @@ static void recv_work(struct work_struct *work) blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); } + nbd_config_put(nbd); atomic_dec(&config->recv_threads); wake_up(&config->recv_wq); - nbd_config_put(nbd); kfree(args); } @@ -1014,6 +1014,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, if (!sock) return err; + /* + * We need to make sure we don't get any errant requests while we're + * reallocating the ->socks array. + */ + blk_mq_freeze_queue(nbd->disk->queue); + if (!netlink && !nbd->task_setup && !test_bit(NBD_RT_BOUND, &config->runtime_flags)) nbd->task_setup = current; @@ -1023,25 +1029,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, test_bit(NBD_RT_BOUND, &config->runtime_flags))) { dev_err(disk_to_dev(nbd->disk), "Device being setup by another task"); - sockfd_put(sock); - return -EBUSY; + err = -EBUSY; + goto put_socket; + } + + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); + if (!nsock) { + err = -ENOMEM; + goto put_socket; } socks = krealloc(config->socks, (config->num_connections + 1) * sizeof(struct nbd_sock *), GFP_KERNEL); if (!socks) { - sockfd_put(sock); - return -ENOMEM; + kfree(nsock); + err = -ENOMEM; + goto put_socket; } config->socks = socks; - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); - if (!nsock) { - sockfd_put(sock); - return -ENOMEM; - } - nsock->fallback_index = -1; nsock->dead = false; mutex_init(&nsock->tx_lock); @@ -1051,8 +1058,14 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, nsock->cookie = 0; socks[config->num_connections++] = nsock; atomic_inc(&config->live_connections); + blk_mq_unfreeze_queue(nbd->disk->queue); return 0; + +put_socket: + blk_mq_unfreeze_queue(nbd->disk->queue); + sockfd_put(sock); + return err; } static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) @@ -1289,7 +1302,7 @@ static int nbd_start_device(struct nbd_device *nbd) args->index = i; queue_work(nbd->recv_workq, &args->work); } - nbd_size_update(nbd); + nbd_size_update(nbd, true); return error; } @@ -1345,6 +1358,8 @@ static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) nbd->tag_set.timeout = timeout * HZ; if (timeout) blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); + else + blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); } /* Must be called with config_lock held */ @@ -1498,6 +1513,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode) if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && bdev->bd_openers == 0) nbd_disconnect_and_put(nbd); + bdput(bdev); nbd_config_put(nbd); nbd_put(nbd); @@ -1928,12 +1944,21 @@ again: if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { - set_bit(NBD_RT_DESTROY_ON_DISCONNECT, - &config->runtime_flags); - set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); - put_dev = true; + /* + * We have 1 ref to keep the device around, and then 1 + * ref for our current operation here, which will be + * inherited by the config. If we already have + * DESTROY_ON_DISCONNECT set then we know we don't have + * that extra ref already held so we don't need the + * put_dev. + */ + if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, + &nbd->flags)) + put_dev = true; } else { - clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); + if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, + &nbd->flags)) + refcount_inc(&nbd->refs); } if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { set_bit(NBD_RT_DISCONNECT_ON_CLOSE, @@ -2104,15 +2129,13 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { - if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT, - &config->runtime_flags)) + if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, + &nbd->flags)) put_dev = true; - set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); } else { - if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT, - &config->runtime_flags)) + if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, + &nbd->flags)) refcount_inc(&nbd->refs); - clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); } if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 0e7da5015ccd..13eae973eaea 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -579,6 +579,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) if (tag != -1U) { cmd = &nq->cmds[tag]; cmd->tag = tag; + cmd->error = BLK_STS_OK; cmd->nq = nq; if (nq->dev->irqmode == NULL_IRQ_TIMER) { hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, @@ -1071,7 +1072,7 @@ static int null_handle_rq(struct nullb_cmd *cmd) len = bvec.bv_len; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, - req_op(rq) & REQ_FUA); + rq->cmd_flags & REQ_FUA); if (err) { spin_unlock_irq(&nullb->lock); return err; @@ -1335,6 +1336,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, cmd->timer.function = null_cmd_timer_expired; } cmd->rq = bd->rq; + cmd->error = BLK_STS_OK; cmd->nq = nq; blk_mq_start_request(bd->rq); @@ -1382,7 +1384,12 @@ static void cleanup_queues(struct nullb *nullb) static void null_del_dev(struct nullb *nullb) { - struct nullb_device *dev = nullb->dev; + struct nullb_device *dev; + + if (!nullb) + return; + + dev = nullb->dev; ida_simple_remove(&nullb_indexes, nullb->index); @@ -1736,6 +1743,7 @@ out_cleanup_queues: cleanup_queues(nullb); out_free_nullb: kfree(nullb); + dev->nullb = NULL; out: return rv; } diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 3d7fdea872f8..90dcf4a915f4 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -2,8 +2,7 @@ #include <linux/vmalloc.h> #include "null_blk.h" -/* zone_size in MBs to sectors. */ -#define ZONE_SIZE_SHIFT 11 +#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT) static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) { @@ -12,7 +11,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) int null_zone_init(struct nullb_device *dev) { - sector_t dev_size = (sector_t)dev->size * 1024 * 1024; + sector_t dev_capacity_sects; sector_t sector = 0; unsigned int i; @@ -20,10 +19,17 @@ int null_zone_init(struct nullb_device *dev) pr_err("zone_size must be power-of-two\n"); return -EINVAL; } + if (dev->zone_size > dev->size) { + pr_err("Zone size larger than device capacity\n"); + return -EINVAL; + } + + dev_capacity_sects = MB_TO_SECTS(dev->size); + dev->zone_size_sects = MB_TO_SECTS(dev->zone_size); + dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects); + if (dev_capacity_sects & (dev->zone_size_sects - 1)) + dev->nr_zones++; - dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; - dev->nr_zones = dev_size >> - (SECTOR_SHIFT + ilog2(dev->zone_size_sects)); dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone), GFP_KERNEL | __GFP_ZERO); if (!dev->zones) @@ -51,7 +57,10 @@ int null_zone_init(struct nullb_device *dev) struct blk_zone *zone = &dev->zones[i]; zone->start = zone->wp = sector; - zone->len = dev->zone_size_sects; + if (zone->start + dev->zone_size_sects > dev_capacity_sects) + zone->len = dev_capacity_sects - zone->start; + else + zone->len = dev->zone_size_sects; zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; zone->cond = BLK_ZONE_COND_EMPTY; @@ -64,6 +73,7 @@ int null_zone_init(struct nullb_device *dev) void null_zone_exit(struct nullb_device *dev) { kvfree(dev->zones); + dev->zones = NULL; } int null_zone_report(struct gendisk *disk, sector_t sector, diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index c5c6487a19d5..7b55811c2a81 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -454,7 +454,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) queue->queuedata = dev; blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); - blk_queue_segment_boundary(queue, -1UL); blk_queue_dma_alignment(queue, dev->blk_size-1); blk_queue_logical_block_size(queue, dev->blk_size); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index a67315786db4..bf2f0373a3b2 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4636,6 +4636,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) cancel_work_sync(&rbd_dev->unlock_work); } +/* + * header_rwsem must not be held to avoid a deadlock with + * rbd_dev_refresh() when flushing notifies. + */ static void rbd_unregister_watch(struct rbd_device *rbd_dev) { cancel_tasks_sync(rbd_dev); @@ -5276,6 +5280,9 @@ static ssize_t rbd_config_info_show(struct device *dev, { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return sprintf(buf, "%s\n", rbd_dev->config_info); } @@ -5387,6 +5394,9 @@ static ssize_t rbd_image_refresh(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = rbd_dev_refresh(rbd_dev); if (ret) return ret; @@ -6929,9 +6939,10 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static void rbd_dev_image_release(struct rbd_device *rbd_dev) { - rbd_dev_unprobe(rbd_dev); if (rbd_dev->opts) rbd_unregister_watch(rbd_dev); + + rbd_dev_unprobe(rbd_dev); rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; @@ -6942,6 +6953,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) * device. If this image is the one being mapped (i.e., not a * parent), initiate a watch on its header object before using that * object to get detailed information about the rbd image. + * + * On success, returns with header_rwsem held for write if called + * with @depth == 0. */ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) { @@ -6974,9 +6988,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) } } + if (!depth) + down_write(&rbd_dev->header_rwsem); + ret = rbd_dev_header_info(rbd_dev); if (ret) - goto err_out_watch; + goto err_out_probe; /* * If this image is the one being mapped, we have pool name and @@ -7025,10 +7042,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) return 0; err_out_probe: - rbd_dev_unprobe(rbd_dev); -err_out_watch: + if (!depth) + up_write(&rbd_dev->header_rwsem); if (!depth) rbd_unregister_watch(rbd_dev); + rbd_dev_unprobe(rbd_dev); err_out_format: rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); @@ -7047,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, struct rbd_client *rbdc; int rc; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!try_module_get(THIS_MODULE)) return -ENODEV; @@ -7085,12 +7106,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, goto err_out_rbd_dev; } - down_write(&rbd_dev->header_rwsem); rc = rbd_dev_image_probe(rbd_dev, 0); - if (rc < 0) { - up_write(&rbd_dev->header_rwsem); + if (rc < 0) goto err_out_rbd_dev; - } /* If we are mapping a snapshot it must be marked read-only */ if (rbd_dev->spec->snap_id != CEPH_NOSNAP) @@ -7199,6 +7217,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus, bool force = false; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + dev_id = -1; opt_buf[0] = '\0'; sscanf(buf, "%d %5s", &dev_id, opt_buf); diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 10f6368117d8..a1824bb08044 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf, { struct rsxx_cardinfo *card = file_inode(fp)->i_private; char *buf; - ssize_t st; + int st; buf = kzalloc(cnt, GFP_KERNEL); if (!buf) return -ENOMEM; st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1); - if (!st) - st = copy_to_user(ubuf, buf, cnt); + if (!st) { + if (copy_to_user(ubuf, buf, cnt)) + st = -EFAULT; + } kfree(buf); if (st) return st; @@ -867,6 +869,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); if (!card->event_wq) { dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); + st = -ENOMEM; goto failed_event_handler; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c2ed3e9128e3..2eeb2bcb488d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -33,6 +33,15 @@ struct virtio_blk_vq { } ____cacheline_aligned_in_smp; struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; struct virtio_device *vdev; /* The disk structure for the kernel. */ @@ -44,6 +53,13 @@ struct virtio_blk { /* Process context for config space updates */ struct work_struct config_work; + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; @@ -189,16 +205,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap) if (!range) return -ENOMEM; - __rq_for_each_bio(bio, req) { - u64 sector = bio->bi_iter.bi_sector; - u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; + /* + * Single max discard segment means multi-range discard isn't + * supported, and block layer only runs contiguity merge like + * normal RW request. So we can't reply on bio for retrieving + * each range info. + */ + if (queue_max_discard_segments(req->q) == 1) { + range[0].flags = cpu_to_le32(flags); + range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); + range[0].sector = cpu_to_le64(blk_rq_pos(req)); + n = 1; + } else { + __rq_for_each_bio(bio, req) { + u64 sector = bio->bi_iter.bi_sector; + u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; - range[n].flags = cpu_to_le32(flags); - range[n].num_sectors = cpu_to_le32(num_sectors); - range[n].sector = cpu_to_le64(sector); - n++; + range[n].flags = cpu_to_le32(flags); + range[n].num_sectors = cpu_to_le32(num_sectors); + range[n].sector = cpu_to_le64(sector); + n++; + } } + WARN_ON_ONCE(n != segments); + req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_offset = offset_in_page(range); req->special_vec.bv_len = sizeof(*range) * segments; @@ -345,9 +376,14 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, if (err == -ENOSPC) blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - if (err == -ENOMEM || err == -ENOSPC) + switch (err) { + case -ENOSPC: return BLK_STS_DEV_RESOURCE; - return BLK_STS_IOERR; + case -ENOMEM: + return BLK_STS_RESOURCE; + default: + return BLK_STS_IOERR; + } } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) @@ -383,10 +419,55 @@ out: return err; } +static void virtblk_get(struct virtio_blk *vblk) +{ + refcount_inc(&vblk->refs); +} + +static void virtblk_put(struct virtio_blk *vblk) +{ + if (refcount_dec_and_test(&vblk->refs)) { + ida_simple_remove(&vd_index_ida, vblk->index); + mutex_destroy(&vblk->vdev_mutex); + kfree(vblk); + } +} + +static int virtblk_open(struct block_device *bd, fmode_t mode) +{ + struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (vblk->vdev) + virtblk_get(vblk); + else + ret = -ENXIO; + + mutex_unlock(&vblk->vdev_mutex); + return ret; +} + +static void virtblk_release(struct gendisk *disk, fmode_t mode) +{ + struct virtio_blk *vblk = disk->private_data; + + virtblk_put(vblk); +} + /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (!vblk->vdev) { + ret = -ENXIO; + goto out; + } /* see if the host passed in geometry config */ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { @@ -402,12 +483,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } - return 0; +out: + mutex_unlock(&vblk->vdev_mutex); + return ret; } static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, .owner = THIS_MODULE, + .open = virtblk_open, + .release = virtblk_release, .getgeo = virtblk_getgeo, }; @@ -762,6 +847,10 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_free_index; } + /* This reference is dropped in virtblk_remove(). */ + refcount_set(&vblk->refs, 1); + mutex_init(&vblk->vdev_mutex); + vblk->vdev = vdev; vblk->sg_elems = sg_elems; @@ -916,6 +1005,7 @@ out_put_disk: put_disk(vblk->disk); out_free_vq: vdev->config->del_vqs(vdev); + kfree(vblk->vqs); out_free_vblk: kfree(vblk); out_free_index: @@ -927,8 +1017,6 @@ out: static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; - int index = vblk->index; - int refc; /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); @@ -938,18 +1026,21 @@ static void virtblk_remove(struct virtio_device *vdev) blk_mq_free_tag_set(&vblk->tag_set); + mutex_lock(&vblk->vdev_mutex); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); - refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); + /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ + vblk->vdev = NULL; + put_disk(vblk->disk); vdev->config->del_vqs(vdev); kfree(vblk->vqs); - kfree(vblk); - /* Only free device id if we don't have any users */ - if (refc == 1) - ida_simple_remove(&vd_index_ida, index); + mutex_unlock(&vblk->vdev_mutex); + + virtblk_put(vblk); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 3666afa639d1..d98cfd3b64ff 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -202,7 +202,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) -static int do_block_io_op(struct xen_blkif_ring *ring); +static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags); static int dispatch_rw_block_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req); @@ -615,6 +615,8 @@ int xen_blkif_schedule(void *arg) struct xen_vbd *vbd = &blkif->vbd; unsigned long timeout; int ret; + bool do_eoi; + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; set_freezable(); while (!kthread_should_stop()) { @@ -639,16 +641,23 @@ int xen_blkif_schedule(void *arg) if (timeout == 0) goto purge_gnt_list; + do_eoi = ring->waiting_reqs; + ring->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ - ret = do_block_io_op(ring); + ret = do_block_io_op(ring, &eoi_flags); if (ret > 0) ring->waiting_reqs = 1; if (ret == -EACCES) wait_event_interruptible(ring->shutdown_wq, kthread_should_stop()); + if (do_eoi && !ring->waiting_reqs) { + xen_irq_lateeoi(ring->irq, eoi_flags); + eoi_flags |= XEN_EOI_FLAG_SPURIOUS; + } + purge_gnt_list: if (blkif->vbd.feature_gnt_persistent && time_after(jiffies, ring->next_lru)) { @@ -841,8 +850,11 @@ again: pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { - if (get_free_page(ring, &pages[i]->page)) - goto out_of_memory; + if (get_free_page(ring, &pages[i]->page)) { + put_free_pages(ring, pages_to_gnt, segs_to_map); + ret = -ENOMEM; + goto out; + } addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; @@ -858,10 +870,8 @@ again: break; } - if (segs_to_map) { + if (segs_to_map) ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); - BUG_ON(ret); - } /* * Now swizzle the MFN in our domain with the MFN from the other domain @@ -876,7 +886,7 @@ again: pr_debug("invalid buffer -- could not remap it\n"); put_free_pages(ring, &pages[seg_idx]->page, 1); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; - ret |= 1; + ret |= !ret; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; @@ -928,17 +938,18 @@ next: } segs_to_map = 0; last_map = map_until; - if (map_until != num) + if (!ret && map_until != num) goto again; - return ret; - -out_of_memory: - pr_alert("%s: out of memory\n", __func__); - put_free_pages(ring, pages_to_gnt, segs_to_map); - for (i = last_map; i < num; i++) +out: + for (i = last_map; i < num; i++) { + /* Don't zap current batch's valid persistent grants. */ + if(i >= map_until) + pages[i]->persistent_gnt = NULL; pages[i]->handle = BLKBACK_INVALID_HANDLE; - return -ENOMEM; + } + + return ret; } static int xen_blkbk_map_seg(struct pending_req *pending_req) @@ -1121,7 +1132,7 @@ static void end_block_io_op(struct bio *bio) * and transmute it to the block API to hand it over to the proper block disk. */ static int -__do_block_io_op(struct xen_blkif_ring *ring) +__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) { union blkif_back_rings *blk_rings = &ring->blk_rings; struct blkif_request req; @@ -1144,6 +1155,9 @@ __do_block_io_op(struct xen_blkif_ring *ring) if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) break; + /* We've seen a request, so clear spurious eoi flag. */ + *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; + if (kthread_should_stop()) { more_to_do = 1; break; @@ -1202,13 +1216,13 @@ done: } static int -do_block_io_op(struct xen_blkif_ring *ring) +do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) { union blkif_back_rings *blk_rings = &ring->blk_rings; int more_to_do; do { - more_to_do = __do_block_io_op(ring); + more_to_do = __do_block_io_op(ring, eoi_flags); if (more_to_do) break; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 1d3002d773f7..20cb0c23a22c 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -316,6 +316,7 @@ struct xen_blkif { struct work_struct free_work; unsigned int nr_ring_pages; + bool multi_ref; /* All rings for this device. */ struct xen_blkif_ring *rings; unsigned int nr_rings; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index c4cd68116e7f..dca91d641f1d 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -229,9 +229,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, BUG(); } - err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, - xen_blkif_be_int, 0, - "blkif-backend", ring); + err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid, + evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); if (err < 0) { xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); ring->blk_rings.common.sring = NULL; @@ -257,6 +256,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (ring->xenblkd) { kthread_stop(ring->xenblkd); + ring->xenblkd = NULL; wake_up(&ring->shutdown_wq); } @@ -644,7 +644,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev, /* setup back pointer */ be->blkif->be = be; - err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, + err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL, + backend_changed, "%s/%s", dev->nodename, "physical-device"); if (err) goto fail; @@ -948,14 +949,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) for (i = 0; i < nr_grefs; i++) { char ring_ref_name[RINGREF_NAME_LEN]; - snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); + if (blkif->multi_ref) + snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); + else { + WARN_ON(i != 0); + snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref"); + } + err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, "%u", &ring_ref[i]); if (err != 1) { - if (nr_grefs == 1) - break; - err = -EINVAL; xenbus_dev_fatal(dev, err, "reading %s/%s", dir, ring_ref_name); @@ -963,18 +967,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) } } - if (err != 1) { - WARN_ON(nr_grefs != 1); - - err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", - &ring_ref[0]); - if (err != 1) { - err = -EINVAL; - xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); - return err; - } - } - err = -ENOMEM; for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); @@ -1078,10 +1070,15 @@ static int connect_ring(struct backend_info *be) blkif->nr_rings, blkif->blk_protocol, protocol, pers_grants ? "persistent grants" : ""); - ring_page_order = xenbus_read_unsigned(dev->otherend, - "ring-page-order", 0); - - if (ring_page_order > xen_blkif_max_ring_order) { + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", + &ring_page_order); + if (err != 1) { + blkif->nr_ring_pages = 1; + blkif->multi_ref = false; + } else if (ring_page_order <= xen_blkif_max_ring_order) { + blkif->nr_ring_pages = 1 << ring_page_order; + blkif->multi_ref = true; + } else { err = -EINVAL; xenbus_dev_fatal(dev, err, "requested ring page order %d exceed max:%d", @@ -1090,8 +1087,6 @@ static int connect_ring(struct backend_info *be) return err; } - blkif->nr_ring_pages = 1 << ring_page_order; - if (blkif->nr_rings == 1) return read_per_ring_refs(&blkif->rings[0], dev->otherend); else { diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c02be06c5299..def41e1bd736 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -47,6 +47,7 @@ #include <linux/bitmap.h> #include <linux/list.h> #include <linux/workqueue.h> +#include <linux/sched/mm.h> #include <xen/xen.h> #include <xen/xenbus.h> @@ -935,7 +936,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info) if (info->feature_discard) { blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); - rq->limits.discard_granularity = info->discard_granularity; + rq->limits.discard_granularity = info->discard_granularity ?: + info->physical_sector_size; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); @@ -2168,19 +2170,12 @@ static void blkfront_closing(struct blkfront_info *info) static void blkfront_setup_discard(struct blkfront_info *info) { - int err; - unsigned int discard_granularity; - unsigned int discard_alignment; - info->feature_discard = 1; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "discard-granularity", "%u", &discard_granularity, - "discard-alignment", "%u", &discard_alignment, - NULL); - if (!err) { - info->discard_granularity = discard_granularity; - info->discard_alignment = discard_alignment; - } + info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend, + "discard-granularity", + 0); + info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend, + "discard-alignment", 0); info->feature_secdiscard = !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", 0); @@ -2188,10 +2183,12 @@ static void blkfront_setup_discard(struct blkfront_info *info) static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) { - unsigned int psegs, grants; + unsigned int psegs, grants, memflags; int err, i; struct blkfront_info *info = rinfo->dev_info; + memflags = memalloc_noio_save(); + if (info->max_indirect_segments == 0) { if (!HAS_EXTRA_REQ) grants = BLKIF_MAX_SEGMENTS_PER_REQUEST; @@ -2223,7 +2220,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { - struct page *indirect_page = alloc_page(GFP_NOIO); + struct page *indirect_page = alloc_page(GFP_KERNEL); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &rinfo->indirect_pages); @@ -2234,15 +2231,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) rinfo->shadow[i].grants_used = kvcalloc(grants, sizeof(rinfo->shadow[i].grants_used[0]), - GFP_NOIO); + GFP_KERNEL); rinfo->shadow[i].sg = kvcalloc(psegs, sizeof(rinfo->shadow[i].sg[0]), - GFP_NOIO); + GFP_KERNEL); if (info->max_indirect_segments) rinfo->shadow[i].indirect_grants = kvcalloc(INDIRECT_GREFS(grants), sizeof(rinfo->shadow[i].indirect_grants[0]), - GFP_NOIO); + GFP_KERNEL); if ((rinfo->shadow[i].grants_used == NULL) || (rinfo->shadow[i].sg == NULL) || (info->max_indirect_segments && @@ -2251,6 +2248,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) sg_init_table(rinfo->shadow[i].sg, psegs); } + memalloc_noio_restore(memflags); return 0; @@ -2270,6 +2268,9 @@ out_of_memory: __free_page(indirect_page); } } + + memalloc_noio_restore(memflags); + return -ENOMEM; } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 1bf4a908a0bd..719c6b7741af 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -627,7 +627,7 @@ static ssize_t writeback_store(struct device *dev, struct bio_vec bio_vec; struct page *page; ssize_t ret = len; - int mode; + int mode, err; unsigned long blk_idx = 0; if (sysfs_streq(buf, "idle")) @@ -719,12 +719,17 @@ static ssize_t writeback_store(struct device *dev, * XXX: A single page IO would be inefficient for write * but it would be not bad as starter. */ - ret = submit_bio_wait(&bio); - if (ret) { + err = submit_bio_wait(&bio); + if (err) { zram_slot_lock(zram, index); zram_clear_flag(zram, index, ZRAM_UNDER_WB); zram_clear_flag(zram, index, ZRAM_IDLE); zram_slot_unlock(zram, index); + /* + * Return last IO error unless every IO were + * not suceeded. + */ + ret = err; continue; } @@ -1072,7 +1077,7 @@ static ssize_t mm_stat_show(struct device *dev, zram->limit_pages << PAGE_SHIFT, max_used << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.same_pages), - pool_stats.pages_compacted, + atomic_long_read(&pool_stats.pages_compacted), (u64)atomic64_read(&zram->stats.huge_pages)); up_read(&zram->init_lock); @@ -2023,7 +2028,8 @@ static ssize_t hot_add_show(struct class *class, return ret; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } -static CLASS_ATTR_RO(hot_add); +static struct class_attribute class_attr_hot_add = + __ATTR(hot_add, 0400, hot_add_show, NULL); static ssize_t hot_remove_store(struct class *class, struct class_attribute *attr, diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index f02a4bdc0ca7..dd29d687cd38 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -329,6 +329,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4204, "BCM2076B1" }, /* 002.002.004 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ + { 0x4606, "BCM4324B5" }, /* 002.006.006 */ { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x2122, "BCM4343A0" }, /* 001.001.034 */ @@ -343,6 +344,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { }; static const struct bcm_subver_table bcm_usb_subver_table[] = { + { 0x2105, "BCM20703A1" }, /* 001.001.005 */ { 0x210b, "BCM43142A0" }, /* 001.001.011 */ { 0x2112, "BCM4314A0" }, /* 001.001.018 */ { 0x2118, "BCM20702A0" }, /* 001.001.024 */ diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 0f3a020703ab..4c7978cb1786 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -328,7 +328,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { .helper = NULL, - .firmware = "mrvl/sd8977_uapsta.bin", + .firmware = "mrvl/sdsd8977_combo_v2.bin", .reg = &btmrvl_reg_8977, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, @@ -346,7 +346,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { .helper = NULL, - .firmware = "mrvl/sd8997_uapsta.bin", + .firmware = "mrvl/sdsd8997_combo_v4.bin", .reg = &btmrvl_reg_8997, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, @@ -1831,6 +1831,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); -MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin"); MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); -MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin"); diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 813338288453..304178be1ef4 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -684,7 +684,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) const u8 *fw_ptr; size_t fw_size; int err, dlen; - u8 flag; + u8 flag, param; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { @@ -692,6 +692,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) return err; } + /* Power on data RAM the firmware relies on. */ + param = 1; + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 3; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); + goto free_fw; + } + fw_ptr = fw->data; fw_size = fw->size; diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index e11169ad8247..8a81fbca5c9d 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -1015,7 +1015,7 @@ static int btmtkuart_probe(struct serdev_device *serdev) if (btmtkuart_is_standalone(bdev)) { err = clk_prepare_enable(bdev->osc); if (err < 0) - return err; + goto err_hci_free_dev; if (bdev->boot) { gpiod_set_value_cansleep(bdev->boot, 1); @@ -1028,10 +1028,8 @@ static int btmtkuart_probe(struct serdev_device *serdev) /* Power on */ err = regulator_enable(bdev->vcc); - if (err < 0) { - clk_disable_unprepare(bdev->osc); - return err; - } + if (err < 0) + goto err_clk_disable_unprepare; /* Reset if the reset-gpios is available otherwise the board * -level design should be guaranteed. @@ -1063,7 +1061,6 @@ static int btmtkuart_probe(struct serdev_device *serdev) err = hci_register_dev(hdev); if (err < 0) { dev_err(&serdev->dev, "Can't register HCI device\n"); - hci_free_dev(hdev); goto err_regulator_disable; } @@ -1072,6 +1069,11 @@ static int btmtkuart_probe(struct serdev_device *serdev) err_regulator_disable: if (btmtkuart_is_standalone(bdev)) regulator_disable(bdev->vcc); +err_clk_disable_unprepare: + if (btmtkuart_is_standalone(bdev)) + clk_disable_unprepare(bdev->osc); +err_hci_free_dev: + hci_free_dev(hdev); return err; } diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 98d53764871f..2acb719e596f 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev) btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", btqcomsmd_cmd_callback, btq); - if (IS_ERR(btq->cmd_channel)) - return PTR_ERR(btq->cmd_channel); + if (IS_ERR(btq->cmd_channel)) { + ret = PTR_ERR(btq->cmd_channel); + goto destroy_acl_channel; + } hdev = hci_alloc_dev(); - if (!hdev) - return -ENOMEM; + if (!hdev) { + ret = -ENOMEM; + goto destroy_cmd_channel; + } hci_set_drvdata(hdev, btq); btq->hdev = hdev; @@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->set_bdaddr = qca_set_bdaddr_rome; ret = hci_register_dev(hdev); - if (ret < 0) { - hci_free_dev(hdev); - return ret; - } + if (ret < 0) + goto hci_free_dev; platform_set_drvdata(pdev, btq); return 0; + +hci_free_dev: + hci_free_dev(hdev); +destroy_cmd_channel: + rpmsg_destroy_ept(btq->cmd_channel); +destroy_acl_channel: + rpmsg_destroy_ept(btq->acl_channel); + + return ret; } static int btqcomsmd_remove(struct platform_device *pdev) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index bf3c02be6930..0dfaf90a31b0 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, * the end. */ len = patch_length; - buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, - GFP_KERNEL); + buf = kvmalloc(patch_length, GFP_KERNEL); if (!buf) return -ENOMEM; + memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4); memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); *_buf = buf; @@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) if (ret < 0) return ret; ret = fw->size; - *buff = kmemdup(fw->data, ret, GFP_KERNEL); - if (!*buff) + *buff = kvmalloc(fw->size, GFP_KERNEL); + if (*buff) + memcpy(*buff, fw->data, ret); + else ret = -ENOMEM; release_firmware(fw); @@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, goto out; if (btrtl_dev->cfg_len > 0) { - tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); + tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); if (!tbuff) { ret = -ENOMEM; goto out; } memcpy(tbuff, fw_data, ret); - kfree(fw_data); + kvfree(fw_data); memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); ret += btrtl_dev->cfg_len; @@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, ret = rtl_download_firmware(hdev, fw_data, ret); out: - kfree(fw_data); + kvfree(fw_data); return ret; } void btrtl_free(struct btrtl_device_info *btrtl_dev) { - kfree(btrtl_dev->fw_data); - kfree(btrtl_dev->cfg_data); + kvfree(btrtl_dev->fw_data); + kvfree(btrtl_dev->cfg_data); kfree(btrtl_dev); } EXPORT_SYMBOL_GPL(btrtl_free); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 9c3b063e1a1f..b467fd05c5e8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2568,7 +2568,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); if (!skb) { hdev->stat.err_rx++; - goto err_out; + return; } hci_skb_pkt_type(skb) = HCI_EVENT_PKT; @@ -2586,13 +2586,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb) */ if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { data->evt_skb = skb_clone(skb, GFP_ATOMIC); - if (!data->evt_skb) - goto err_out; + if (!data->evt_skb) { + kfree_skb(skb); + return; + } } err = hci_recv_frame(hdev, skb); - if (err < 0) - goto err_free_skb; + if (err < 0) { + kfree_skb(data->evt_skb); + data->evt_skb = NULL; + return; + } if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { @@ -2601,11 +2606,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb) wake_up_bit(&data->flags, BTUSB_TX_WAIT_VND_EVT); } -err_out: - return; -err_free_skb: - kfree_skb(data->evt_skb); - data->evt_skb = NULL; return; } else if (urb->status == -ENOENT) { /* Avoid suspend failed when usb_kill_urb */ @@ -2664,6 +2664,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev) buf = kmalloc(size, GFP_KERNEL); if (!buf) { kfree(dr); + usb_free_urb(urb); return -ENOMEM; } @@ -2792,7 +2793,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) const u8 *fw_ptr; size_t fw_size; int err, dlen; - u8 flag; + u8 flag, param; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { @@ -2800,6 +2801,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) return err; } + /* Power on data RAM the firmware relies on. */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 3; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); + goto err_release_fw; + } + fw_ptr = fw->data; fw_size = fw->size; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 7646636f2d18..94ed734c1d7e 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -107,6 +107,7 @@ struct bcm_device { u32 oper_speed; int irq; bool irq_active_low; + bool irq_acquired; #ifdef CONFIG_PM struct hci_uart *hu; @@ -319,6 +320,8 @@ static int bcm_request_irq(struct bcm_data *bcm) goto unlock; } + bdev->irq_acquired = true; + device_init_wakeup(bdev->dev, true); pm_runtime_set_autosuspend_delay(bdev->dev, @@ -487,7 +490,7 @@ static int bcm_close(struct hci_uart *hu) } if (bdev) { - if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) { + if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) { devm_free_irq(bdev->dev, bdev->irq, bdev); device_init_wakeup(bdev->dev, false); pm_runtime_disable(bdev->dev); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index dacf297baf59..bf3e23104194 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -244,6 +244,9 @@ static int h5_close(struct hci_uart *hu) skb_queue_purge(&h5->rel); skb_queue_purge(&h5->unrel); + kfree_skb(h5->rx_skb); + h5->rx_skb = NULL; + if (h5->vnd && h5->vnd->close) h5->vnd->close(h5); @@ -790,7 +793,7 @@ static int h5_serdev_probe(struct serdev_device *serdev) if (!h5) return -ENOMEM; - set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags); h5->hu = &h5->serdev_hu; h5->serdev_hu.serdev = serdev; @@ -891,6 +894,11 @@ static int h5_btrtl_setup(struct h5 *h5) /* Give the device some time before the hci-core sends it a reset */ usleep_range(10000, 20000); + /* Enable controller to do both LE scan and BR/EDR inquiry + * simultaneously. + */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks); + out_free: btrtl_free(btrtl_dev); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 85a30fb9177b..8be4d807d137 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) goto no_schedule; - if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) { - set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) goto no_schedule; - } BT_DBG(""); @@ -174,10 +173,10 @@ restart: kfree_skb(skb); } + clear_bit(HCI_UART_SENDING, &hu->tx_state); if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)) goto restart; - clear_bit(HCI_UART_SENDING, &hu->tx_state); wake_up_bit(&hu->tx_state, HCI_UART_SENDING); } @@ -538,6 +537,7 @@ static void hci_uart_tty_close(struct tty_struct *tty) clear_bit(HCI_UART_PROTO_READY, &hu->flags); percpu_up_write(&hu->proto_lock); + cancel_work_sync(&hu->init_ready); cancel_work_sync(&hu->write_work); if (hdev) { diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 4652896d4990..1b4ad231e6ed 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -85,9 +85,9 @@ static void hci_uart_write_work(struct work_struct *work) hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); kfree_skb(skb); } - } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); - clear_bit(HCI_UART_SENDING, &hu->tx_state); + clear_bit(HCI_UART_SENDING, &hu->tx_state); + } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); } /* ------- Interface to HCI layer ------ */ @@ -357,7 +357,10 @@ void hci_uart_unregister_device(struct hci_uart *hu) struct hci_dev *hdev = hu->hdev; clear_bit(HCI_UART_PROTO_READY, &hu->flags); - hci_unregister_dev(hdev); + + cancel_work_sync(&hu->init_ready); + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); hci_free_dev(hdev); cancel_work_sync(&hu->write_work); diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index cc7bb900f524..95672306d371 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, goto error; mc_adev = resource->data; - if (!mc_adev) + if (!mc_adev) { + error = -EINVAL; goto error; + } mc_adev->consumer_link = device_link_add(&mc_dev->dev, &mc_adev->dev, diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index d9629fc13a15..0a4a387b615d 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -129,7 +129,12 @@ error_destroy_mc_io: */ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) { - struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; + struct fsl_mc_device *dpmcp_dev; + + if (!mc_io) + return; + + dpmcp_dev = mc_io->dpmcp_dev; if (dpmcp_dev) fsl_mc_io_unset_dpmcp(mc_io); diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 20c957185af2..2e9252d37a18 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, return 0; } +/* + * Released firmware describes the IO port max address as 0x3fff, which is + * the max host bus address. Fixup to a proper range. This will probably + * never be fixed in firmware. + */ +static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, + struct resource *r) +{ + if (r->end != 0x3fff) + return; + + if (r->start == 0xe4) + r->end = 0xe4 + 0x04 - 1; + else if (r->start == 0x2f8) + r->end = 0x2f8 + 0x08 - 1; + else + dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", + r); +} + /* * hisi_lpc_acpi_set_io_res - set the resources for a child * @child: the device node to be updated the I/O resource @@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, return -ENOMEM; } count = 0; - list_for_each_entry(rentry, &resource_list, node) - resources[count++] = *rentry->res; + list_for_each_entry(rentry, &resource_list, node) { + resources[count] = *rentry->res; + hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); + count++; + } acpi_dev_free_resource_list(&resource_list); diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 1b14256376d2..7c1da45be166 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -544,10 +544,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus) dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id); ++id; ret = device_register(&dev->dev); - if (ret) { + if (ret) put_device(&dev->dev); - kfree(dev); - } } } diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index b040447575ad..dcfb32ee5cb6 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c @@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev) */ l3->debug_irq = platform_get_irq(pdev, 0); ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, - 0x0, "l3-dbg-irq", l3); + IRQF_NO_THREAD, "l3-dbg-irq", l3); if (ret) { dev_err(l3->dev, "request_irq failed for %d\n", l3->debug_irq); @@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev) l3->app_irq = platform_get_irq(pdev, 1); ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, - 0x0, "l3-app-irq", l3); + IRQF_NO_THREAD, "l3-app-irq", l3); if (ret) dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c index 03ddcf426887..0b8f53a688b8 100644 --- a/drivers/bus/qcom-ebi2.c +++ b/drivers/bus/qcom-ebi2.c @@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev) /* Figure out the chipselect */ ret = of_property_read_u32(child, "reg", &csindex); - if (ret) + if (ret) { + of_node_put(child); return ret; + } if (csindex > 5) { dev_err(dev, diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index be79d6c6a4e4..1bb00a959c67 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, if (ret) goto unlock; - *buf = readl(rsb->regs + RSB_DATA); + *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0); unlock: mutex_unlock(&rsb->lock); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f0bc0841cbc4..d59e1ca9990b 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -70,11 +70,13 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @child_needs_resume: runtime resume needed for child on resume from suspend * @disable_on_idle: status flag used for disabling modules with resets * @idle_work: work structure used to perform delayed idle on a module - * @clk_enable_quirk: module specific clock enable quirk - * @clk_disable_quirk: module specific clock disable quirk + * @pre_reset_quirk: module specific pre-reset quirk + * @post_reset_quirk: module specific post-reset quirk * @reset_done_quirk: module specific reset done quirk * @module_enable_quirk: module specific enable quirk * @module_disable_quirk: module specific disable quirk + * @module_unlock_quirk: module specific sysconfig unlock quirk + * @module_lock_quirk: module specific sysconfig lock quirk */ struct sysc { struct device *dev; @@ -97,11 +99,13 @@ struct sysc { unsigned int needs_resume:1; unsigned int child_needs_resume:1; struct delayed_work idle_work; - void (*clk_enable_quirk)(struct sysc *sysc); - void (*clk_disable_quirk)(struct sysc *sysc); + void (*pre_reset_quirk)(struct sysc *sysc); + void (*post_reset_quirk)(struct sysc *sysc); void (*reset_done_quirk)(struct sysc *sysc); void (*module_enable_quirk)(struct sysc *sysc); void (*module_disable_quirk)(struct sysc *sysc); + void (*module_unlock_quirk)(struct sysc *sysc); + void (*module_lock_quirk)(struct sysc *sysc); }; static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, @@ -182,6 +186,37 @@ static u32 sysc_read_sysstatus(struct sysc *ddata) return sysc_read(ddata, offset); } +/* Poll on reset status */ +static int sysc_wait_softreset(struct sysc *ddata) +{ + u32 sysc_mask, syss_done, rstval; + int syss_offset, error = 0; + + if (ddata->cap->regbits->srst_shift < 0) + return 0; + + syss_offset = ddata->offsets[SYSC_SYSSTATUS]; + sysc_mask = BIT(ddata->cap->regbits->srst_shift); + + if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) + syss_done = 0; + else + syss_done = ddata->cfg.syss_mask; + + if (syss_offset >= 0) { + error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata, + rstval, (rstval & ddata->cfg.syss_mask) == + syss_done, 100, MAX_MODULE_SOFTRESET_WAIT); + + } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { + error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata, + rstval, !(rstval & sysc_mask), + 100, MAX_MODULE_SOFTRESET_WAIT); + } + + return error; +} + static int sysc_add_named_clock_from_child(struct sysc *ddata, const char *name, const char *optfck_name) @@ -567,6 +602,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata) return 0; } +/* Interconnect instances to probe before l4_per instances */ +static struct resource early_bus_ranges[] = { + /* am3/4 l4_wkup */ + { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, }, + /* omap4/5 and dra7 l4_cfg */ + { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, }, + /* omap4 l4_wkup */ + { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, }, + /* omap5 and dra7 l4_wkup without dra7 dcan segment */ + { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, }, +}; + +static atomic_t sysc_defer = ATOMIC_INIT(10); + +/** + * sysc_defer_non_critical - defer non_critical interconnect probing + * @ddata: device driver data + * + * We want to probe l4_cfg and l4_wkup interconnect instances before any + * l4_per instances as l4_per instances depend on resources on l4_cfg and + * l4_wkup interconnects. + */ +static int sysc_defer_non_critical(struct sysc *ddata) +{ + struct resource *res; + int i; + + if (!atomic_read(&sysc_defer)) + return 0; + + for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) { + res = &early_bus_ranges[i]; + if (ddata->module_pa >= res->start && + ddata->module_pa <= res->end) { + atomic_set(&sysc_defer, 0); + + return 0; + } + } + + atomic_dec_if_positive(&sysc_defer); + + return -EPROBE_DEFER; +} + static struct device_node *stdout_path; static void sysc_init_stdout_path(struct sysc *ddata) @@ -791,6 +871,10 @@ static int sysc_map_and_check_registers(struct sysc *ddata) if (error) return error; + error = sysc_defer_non_critical(ddata); + if (error) + return error; + sysc_check_children(ddata); error = sysc_parse_registers(ddata); @@ -863,6 +947,22 @@ static void sysc_show_registers(struct sysc *ddata) buf); } +/** + * sysc_write_sysconfig - handle sysconfig quirks for register write + * @ddata: device driver data + * @value: register value + */ +static void sysc_write_sysconfig(struct sysc *ddata, u32 value) +{ + if (ddata->module_unlock_quirk) + ddata->module_unlock_quirk(ddata); + + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value); + + if (ddata->module_lock_quirk) + ddata->module_lock_quirk(ddata); +} + #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) #define SYSC_CLOCACT_ICK 2 @@ -872,18 +972,47 @@ static int sysc_enable_module(struct device *dev) struct sysc *ddata; const struct sysc_regbits *regbits; u32 reg, idlemodes, best_mode; + int error; ddata = dev_get_drvdata(dev); + + /* + * Some modules like DSS reset automatically on idle. Enable optional + * reset clocks and wait for OCP softreset to complete. + */ + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) { + error = sysc_enable_opt_clocks(ddata); + if (error) { + dev_err(ddata->dev, + "Optional clocks failed for enable: %i\n", + error); + return error; + } + } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) + sysc_disable_opt_clocks(ddata); + + /* + * Some subsystem private interconnects, like DSS top level module, + * need only the automatic OCP softreset handling with no sysconfig + * register bits to configure. + */ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) return 0; regbits = ddata->cap->regbits; reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); - /* Set CLOCKACTIVITY, we only use it for ick */ + /* + * Set CLOCKACTIVITY, we only use it for ick. And we only configure it + * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware + * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag. + */ if (regbits->clkact_shift >= 0 && - (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT || - ddata->cfg.sysc_val & BIT(regbits->clkact_shift))) + (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT)) reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift; /* Set SIDLE mode */ @@ -909,7 +1038,7 @@ static int sysc_enable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_midle: /* Set MIDLE mode */ @@ -928,16 +1057,19 @@ set_midle: reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_autoidle: /* Autoidle bit must enabled separately if available */ if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) { reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); } + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + if (ddata->module_enable_quirk) ddata->module_enable_quirk(ddata); @@ -993,7 +1125,7 @@ static int sysc_disable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_sidle: /* Set SIDLE mode */ @@ -1016,7 +1148,10 @@ set_sidle: if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); + + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); return 0; } @@ -1162,7 +1297,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_suspend(dev); @@ -1174,7 +1310,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_resume(dev); @@ -1222,16 +1359,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff, + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0), /* Some timers on omap4 and later */ - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0), - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), @@ -1244,19 +1381,27 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Quirks that need to be set based on the module address */ - SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff, + SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | SYSC_QUIRK_SWSUP_SIDLE), /* Quirks that need to be set based on detected module */ - SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, + SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff, SYSC_MODULE_QUIRK_AESS), - SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff, + SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), - SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), - SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_NEEDED), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, SYSC_MODULE_QUIRK_HDQ1W), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, @@ -1269,12 +1414,22 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_I2C), SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, SYSC_MODULE_QUIRK_I2C), - SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), - SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, + SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), + SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), + SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, + SYSC_MODULE_QUIRK_RTC_UNLOCK), + SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), - SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff, + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), @@ -1283,57 +1438,66 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), #ifdef DEBUG - SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0), - SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0), - SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0), - SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0), + SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0), + SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0), + SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 0xffff00f0, 0), - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0), - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), - SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), - SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0), + SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), - SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), - SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0), - SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0), + SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0), + SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0), + SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0), + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0), SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), - SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), - SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0), - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0), - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0), - SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0), - SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0), + SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), + SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, 0), + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0), + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0), + SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0), + SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0), SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0), - SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0), - SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0), + SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0), + SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0), SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0), - SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0), - SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0), - SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0), - SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0), - SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0), + SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0), + SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0), + SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0), + SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0), SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0), SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0), - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0), - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0), - SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0), - SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0), - SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0), - SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0), + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0), + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), + SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), + SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), + SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), + SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0), - SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0), + SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0), + SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0), #endif }; @@ -1355,16 +1519,13 @@ static void sysc_init_early_quirks(struct sysc *ddata) if (q->base != ddata->module_pa) continue; - if (q->rev_offset >= 0 && - q->rev_offset != ddata->offsets[SYSC_REVISION]) + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) continue; - if (q->sysc_offset >= 0 && - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) continue; - if (q->syss_offset >= 0 && - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) continue; ddata->name = q->name; @@ -1384,16 +1545,13 @@ static void sysc_init_revision_quirks(struct sysc *ddata) if (q->base && q->base != ddata->module_pa) continue; - if (q->rev_offset >= 0 && - q->rev_offset != ddata->offsets[SYSC_REVISION]) + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) continue; - if (q->sysc_offset >= 0 && - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) continue; - if (q->syss_offset >= 0 && - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) continue; if (q->revision == ddata->revision || @@ -1424,7 +1582,7 @@ static void sysc_module_enable_quirk_aess(struct sysc *ddata) sysc_write(ddata, offset, 1); } -/* I2C needs extra enable bit toggling for reset */ +/* I2C needs to be disabled for reset */ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) { int offset; @@ -1445,14 +1603,48 @@ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) sysc_write(ddata, offset, val); } -static void sysc_clk_enable_quirk_i2c(struct sysc *ddata) +static void sysc_pre_reset_quirk_i2c(struct sysc *ddata) +{ + sysc_clk_quirk_i2c(ddata, false); +} + +static void sysc_post_reset_quirk_i2c(struct sysc *ddata) { sysc_clk_quirk_i2c(ddata, true); } -static void sysc_clk_disable_quirk_i2c(struct sysc *ddata) +/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */ +static void sysc_quirk_rtc(struct sysc *ddata, bool lock) { - sysc_clk_quirk_i2c(ddata, false); + u32 val, kick0_val = 0, kick1_val = 0; + unsigned long flags; + int error; + + if (!lock) { + kick0_val = 0x83e70b13; + kick1_val = 0x95a4f1e0; + } + + local_irq_save(flags); + /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ + error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val, + !(val & BIT(0)), 100, 50); + if (error) + dev_warn(ddata->dev, "rtc busy timeout\n"); + /* Now we have ~15 microseconds to read/write various registers */ + sysc_write(ddata, 0x6c, kick0_val); + sysc_write(ddata, 0x70, kick1_val); + local_irq_restore(flags); +} + +static void sysc_module_unlock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, false); +} + +static void sysc_module_lock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, true); } /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ @@ -1494,14 +1686,14 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { - ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w; + ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w; return; } if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) { - ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c; - ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c; + ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c; + ddata->post_reset_quirk = sysc_post_reset_quirk_i2c; return; } @@ -1509,6 +1701,13 @@ static void sysc_init_module_quirks(struct sysc *ddata) if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) ddata->module_enable_quirk = sysc_module_enable_quirk_aess; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { + ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; + ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; + + return; + } + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; @@ -1602,11 +1801,10 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset) */ static int sysc_reset(struct sysc *ddata) { - int sysc_offset, syss_offset, sysc_val, rstval, error = 0; - u32 sysc_mask, syss_done; + int sysc_offset, sysc_val, error; + u32 sysc_mask; sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; - syss_offset = ddata->offsets[SYSC_SYSSTATUS]; if (ddata->legacy_mode || sysc_offset < 0 || ddata->cap->regbits->srst_shift < 0 || @@ -1615,13 +1813,8 @@ static int sysc_reset(struct sysc *ddata) sysc_mask = BIT(ddata->cap->regbits->srst_shift); - if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) - syss_done = 0; - else - syss_done = ddata->cfg.syss_mask; - - if (ddata->clk_disable_quirk) - ddata->clk_disable_quirk(ddata); + if (ddata->pre_reset_quirk) + ddata->pre_reset_quirk(ddata); sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; @@ -1631,21 +1824,12 @@ static int sysc_reset(struct sysc *ddata) usleep_range(ddata->cfg.srst_udelay, ddata->cfg.srst_udelay * 2); - if (ddata->clk_enable_quirk) - ddata->clk_enable_quirk(ddata); + if (ddata->post_reset_quirk) + ddata->post_reset_quirk(ddata); - /* Poll on reset status */ - if (syss_offset >= 0) { - error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, - (rstval & ddata->cfg.syss_mask) == - syss_done, - 100, MAX_MODULE_SOFTRESET_WAIT); - - } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { - error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, - !(rstval & sysc_mask), - 100, MAX_MODULE_SOFTRESET_WAIT); - } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); if (ddata->reset_done_quirk) ddata->reset_done_quirk(ddata); @@ -2550,7 +2734,9 @@ static int sysc_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - reset_control_assert(ddata->rsts); + + if (!reset_control_status(ddata->rsts)) + reset_control_assert(ddata->rsts); unprepare: sysc_unprepare(ddata); diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 812d6aa6e013..f2d015a8ff57 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -125,7 +125,7 @@ config AGP_HP_ZX1 config AGP_PARISC tristate "HP Quicksilver AGP support" - depends on AGP && PARISC && 64BIT + depends on AGP && PARISC && 64BIT && IOMMU_SBA help This option gives you AGP GART support for the HP Quicksilver AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c6271ce250b3..0941d38b2d32 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void) if (intel_private.needs_dmar) { dma_addr = pci_map_page(intel_private.pcidev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) { + __free_page(page); return -EINVAL; + } intel_private.scratch_page_dma = dma_addr; } else @@ -846,6 +848,7 @@ void intel_gtt_insert_page(dma_addr_t addr, unsigned int flags) { intel_private.driver->write_entry(addr, pg, flags); + readl(intel_private.gtt + pg); if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } @@ -871,7 +874,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, j++; } } - wmb(); + readl(intel_private.gtt + j - 1); if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } @@ -1105,6 +1108,7 @@ static void i9xx_cleanup(void) static void i9xx_chipset_flush(void) { + wmb(); if (intel_private.i9xx_flush_page) writel(1, intel_private.i9xx_flush_page); } diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index a67430010aa6..5c7d3dfcfdd0 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -208,6 +208,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to enable SA power-domain\n"); + pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index e262445fed5f..f35f0f31f52a 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data, */ if (retval > 0) usleep_range(period_us, - period_us + min(1, period_us / 100)); + period_us + max(1, period_us / 100)); *(u32 *)data = readl(priv->io_base); retval += sizeof(u32); diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index 40b9927c072c..89a8faa9b6cf 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, struct device *dev = &pdev->dev; int rc; - bt_bmc->irq = platform_get_irq(pdev, 0); - if (!bt_bmc->irq) - return -ENODEV; + bt_bmc->irq = platform_get_irq_optional(pdev, 0); + if (bt_bmc->irq < 0) + return bt_bmc->irq; rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED, DEVICE_NAME, bt_bmc); if (rc < 0) { dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq); - bt_bmc->irq = 0; + bt_bmc->irq = rc; return rc; } @@ -479,7 +479,7 @@ static int bt_bmc_probe(struct platform_device *pdev) bt_bmc_config_irq(bt_bmc, pdev); - if (bt_bmc->irq) { + if (bt_bmc->irq >= 0) { dev_info(dev, "Using IRQ %d\n", bt_bmc->irq); } else { dev_info(dev, "No IRQ; using timer\n"); @@ -505,7 +505,7 @@ static int bt_bmc_remove(struct platform_device *pdev) struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); misc_deregister(&bt_bmc->miscdev); - if (!bt_bmc->irq) + if (bt_bmc->irq < 0) del_timer_sync(&bt_bmc->poll_timer); return 0; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 0b6e7f8d9729..ac656a6d5daf 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -33,6 +33,7 @@ #include <linux/workqueue.h> #include <linux/uuid.h> #include <linux/nospec.h> +#include <linux/vmalloc.h> #define IPMI_DRIVER_VERSION "39.2" @@ -1170,7 +1171,7 @@ static void free_user_work(struct work_struct *work) remove_work); cleanup_srcu_struct(&user->release_barrier); - kfree(user); + vfree(user); } int ipmi_create_user(unsigned int if_num, @@ -1202,7 +1203,7 @@ int ipmi_create_user(unsigned int if_num, if (rv) return rv; - new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); + new_user = vzalloc(sizeof(*new_user)); if (!new_user) return -ENOMEM; @@ -1249,7 +1250,7 @@ int ipmi_create_user(unsigned int if_num, out_kfree: srcu_read_unlock(&ipmi_interfaces_srcu, index); - kfree(new_user); + vfree(new_user); return rv; } EXPORT_SYMBOL(ipmi_create_user); @@ -3207,8 +3208,8 @@ static void __get_guid(struct ipmi_smi *intf) if (rv) /* Send failed, no GUID available. */ bmc->dyn_guid_set = 0; - - wait_event(intf->waitq, bmc->dyn_guid_set != 2); + else + wait_event(intf->waitq, bmc->dyn_guid_set != 2); /* dyn_guid_set makes the guid data available. */ smp_rmb(); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index c950f0aea2fe..ebe330c2ecb0 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -1977,7 +1977,7 @@ static int try_smi_init(struct smi_info *new_smi) /* Do this early so it's available for logs. */ if (!new_smi->io.dev) { pr_err("IPMI interface added with no device\n"); - rv = EIO; + rv = -EIO; goto out_err; } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 43dd0891ca1e..6b56bff9b68c 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -31,11 +31,15 @@ #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/security.h> +#include <linux/pseudo_fs.h> +#include <uapi/linux/magic.h> +#include <linux/mount.h> #ifdef CONFIG_IA64 # include <linux/efi.h> #endif +#define DEVMEM_MINOR 1 #define DEVPORT_MINOR 4 static inline unsigned long size_inside_page(unsigned long start, @@ -805,12 +809,65 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) return ret; } +static struct inode *devmem_inode; + +#ifdef CONFIG_IO_STRICT_DEVMEM +void revoke_devmem(struct resource *res) +{ + /* pairs with smp_store_release() in devmem_init_inode() */ + struct inode *inode = smp_load_acquire(&devmem_inode); + + /* + * Check that the initialization has completed. Losing the race + * is ok because it means drivers are claiming resources before + * the fs_initcall level of init and prevent /dev/mem from + * establishing mappings. + */ + if (!inode) + return; + + /* + * The expectation is that the driver has successfully marked + * the resource busy by this point, so devmem_is_allowed() + * should start returning false, however for performance this + * does not iterate the entire resource range. + */ + if (devmem_is_allowed(PHYS_PFN(res->start)) && + devmem_is_allowed(PHYS_PFN(res->end))) { + /* + * *cringe* iomem=relaxed says "go ahead, what's the + * worst that can happen?" + */ + return; + } + + unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); +} +#endif + static int open_port(struct inode *inode, struct file *filp) { + int rc; + if (!capable(CAP_SYS_RAWIO)) return -EPERM; - return security_locked_down(LOCKDOWN_DEV_MEM); + rc = security_locked_down(LOCKDOWN_DEV_MEM); + if (rc) + return rc; + + if (iminor(inode) != DEVMEM_MINOR) + return 0; + + /* + * Use a unified address space to have a single point to manage + * revocations when drivers want to take over a /dev/mem mapped + * range. + */ + inode->i_mapping = devmem_inode->i_mapping; + filp->f_mapping = inode->i_mapping; + + return 0; } #define zero_lseek null_lseek @@ -885,7 +942,7 @@ static const struct memdev { fmode_t fmode; } devlist[] = { #ifdef CONFIG_DEVMEM - [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, + [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, #endif #ifdef CONFIG_DEVKMEM [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, @@ -939,6 +996,48 @@ static char *mem_devnode(struct device *dev, umode_t *mode) static struct class *mem_class; +static int devmem_fs_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; +} + +static struct file_system_type devmem_fs_type = { + .name = "devmem", + .owner = THIS_MODULE, + .init_fs_context = devmem_fs_init_fs_context, + .kill_sb = kill_anon_super, +}; + +static int devmem_init_inode(void) +{ + static struct vfsmount *devmem_vfs_mount; + static int devmem_fs_cnt; + struct inode *inode; + int rc; + + rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt); + if (rc < 0) { + pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc); + return rc; + } + + inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb); + if (IS_ERR(inode)) { + rc = PTR_ERR(inode); + pr_err("Cannot allocate inode for /dev/mem: %d\n", rc); + simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt); + return rc; + } + + /* + * Publish /dev/mem initialized. + * Pairs with smp_load_acquire() in revoke_devmem(). + */ + smp_store_release(&devmem_inode, inode); + + return 0; +} + static int __init chr_dev_init(void) { int minor; @@ -960,6 +1059,8 @@ static int __init chr_dev_init(void) */ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) continue; + if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0) + continue; device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), NULL, devlist[minor].name); diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ff28c14af7e..ffd61aadb761 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1223,14 +1223,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta = sample.jiffies - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, sample.jiffies); - delta2 = delta - state->last_delta; - state->last_delta = delta; + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); - delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); if (delta < 0) delta = -delta; @@ -2149,7 +2149,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (crng_init < 2) return -ENODATA; - crng_reseed(&primary_crng, NULL); + crng_reseed(&primary_crng, &input_pool); crng_global_init_time = jiffies - 1; return 0; default: diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 6d81bb3bb503..896a3550fba9 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -777,18 +777,22 @@ static int __init tlclk_init(void) { int ret; - ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); - if (ret < 0) { - printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); - return ret; - } - tlclk_major = ret; + telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); + alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); if (!alarm_events) { ret = -ENOMEM; goto out1; } + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); + if (ret < 0) { + printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); + kfree(alarm_events); + return ret; + } + tlclk_major = ret; + /* Read telecom clock IRQ number (Set by BIOS) */ if (!request_region(TLCLK_BASE, 8, "telco_clock")) { printk(KERN_ERR "tlclk: request_region 0x%X failed.\n", @@ -796,7 +800,6 @@ static int __init tlclk_init(void) ret = -EBUSY; goto out2; } - telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", @@ -837,8 +840,8 @@ out3: release_region(TLCLK_BASE, 8); out2: kfree(alarm_events); -out1: unregister_chrdev(tlclk_major, "telco_clock"); +out1: return ret; } diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 7a0fca659b6a..8512ec76d526 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -99,20 +99,20 @@ static int tpm_read_log(struct tpm_chip *chip) * * If an event log is found then the securityfs files are setup to * export it to userspace, otherwise nothing is done. - * - * Returns -ENODEV if the firmware has no event log or securityfs is not - * supported. */ -int tpm_bios_log_setup(struct tpm_chip *chip) +void tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); unsigned int cnt; int log_version; int rc = 0; + if (chip->flags & TPM_CHIP_FLAG_VIRTUAL) + return; + rc = tpm_read_log(chip); if (rc < 0) - return rc; + return; log_version = rc; cnt = 0; @@ -158,13 +158,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip) cnt++; } - return 0; + return; err: - rc = PTR_ERR(chip->bios_dir[cnt]); chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); - return rc; + return; } void tpm_bios_log_teardown(struct tpm_chip *chip) diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c index 6bb023de17f1..e6cb9d525e30 100644 --- a/drivers/char/tpm/eventlog/efi.c +++ b/drivers/char/tpm/eventlog/efi.c @@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip) { struct efi_tcg2_final_events_table *final_tbl = NULL; + int final_events_log_size = efi_tpm_final_log_size; struct linux_efi_tpm_eventlog *log_tbl; struct tpm_bios_log *log; u32 log_size; @@ -41,6 +42,11 @@ int tpm_read_log_efi(struct tpm_chip *chip) log_size = log_tbl->size; memunmap(log_tbl); + if (!log_size) { + pr_warn("UEFI TPM log area empty\n"); + return -EIO; + } + log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size, MEMREMAP_WB); if (!log_tbl) { @@ -61,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip) ret = tpm_log_version; if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || - efi_tpm_final_log_size == 0 || + final_events_log_size == 0 || tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) goto out; final_tbl = memremap(efi.tpm_final_log, - sizeof(*final_tbl) + efi_tpm_final_log_size, + sizeof(*final_tbl) + final_events_log_size, MEMREMAP_WB); if (!final_tbl) { pr_err("Could not map UEFI TPM final log\n"); @@ -75,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip) goto out; } - efi_tpm_final_log_size -= log_tbl->final_events_preboot_size; + /* + * The 'final events log' size excludes the 'final events preboot log' + * at its beginning. + */ + final_events_log_size -= log_tbl->final_events_preboot_size; + /* + * Allocate memory for the 'combined log' where we will append the + * 'final events log' to. + */ tmp = krealloc(log->bios_event_log, - log_size + efi_tpm_final_log_size, + log_size + final_events_log_size, GFP_KERNEL); if (!tmp) { kfree(log->bios_event_log); @@ -89,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip) log->bios_event_log = tmp; /* - * Copy any of the final events log that didn't also end up in the - * main log. Events can be logged in both if events are generated + * Append any of the 'final events log' that didn't also end up in the + * 'main log'. Events can be logged in both if events are generated * between GetEventLog() and ExitBootServices(). */ memcpy((void *)log->bios_event_log + log_size, final_tbl->events + log_tbl->final_events_preboot_size, - efi_tpm_final_log_size); + final_events_log_size); + /* + * The size of the 'combined log' is the size of the 'main log' plus + * the size of the 'final events log'. + */ log->bios_event_log_end = log->bios_event_log + - log_size + efi_tpm_final_log_size; + log_size + final_events_log_size; out: memunmap(final_tbl); diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 739b1d9d16b6..2c96977ad080 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -115,6 +115,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, u32 converted_event_size; u32 converted_event_type; + (*pos)++; converted_event_size = do_endian_conversion(event->event_size); v += sizeof(struct tcpa_event) + converted_event_size; @@ -132,7 +133,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, ((v + sizeof(struct tcpa_event) + converted_event_size) > limit)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index b9aeda1cbcd7..e741b1157525 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -94,6 +94,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, size_t event_size; void *marker; + (*pos)++; event_header = log->bios_event_log; if (v == SEQ_START_TOKEN) { @@ -118,7 +119,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, if (((v + event_size) >= limit) || (event_size == 0)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 3d6d394a8661..1838039b0333 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->cdev.owner = THIS_MODULE; chip->cdevs.owner = THIS_MODULE; - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.context_buf) { - rc = -ENOMEM; - goto out; - } - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.session_buf) { + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); + if (rc) { rc = -ENOMEM; goto out; } @@ -596,9 +591,7 @@ int tpm_chip_register(struct tpm_chip *chip) tpm_sysfs_add_device(chip); - rc = tpm_bios_log_setup(chip); - if (rc != 0 && rc != -ENODEV) - return rc; + tpm_bios_log_setup(chip); tpm_add_ppi(chip); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 87f449340202..1784530b8387 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, goto out; } - /* atomic tpm command send and result receive. We only hold the ops - * lock during this period so that the tpm can be unregistered even if - * the char dev is held open. - */ - if (tpm_try_get_ops(priv->chip)) { - ret = -EPIPE; - goto out; - } - priv->response_length = 0; priv->response_read = false; *off = 0; @@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); - tpm_put_ops(priv->chip); mutex_unlock(&priv->buffer_mutex); return size; } + /* atomic tpm command send and result receive. We only hold the ops + * lock during this period so that the tpm can be unregistered even if + * the char dev is held open. + */ + if (tpm_try_get_ops(priv->chip)) { + ret = -EPIPE; + goto out; + } + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index d7a3888ad80f..b86ee5b18855 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -322,7 +322,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, for (i = 0; i < chip->nr_allocated_banks; i++) { if (digests[i].alg_id != chip->allocated_banks[i].alg_id) { - rc = EINVAL; + rc = -EINVAL; goto out; } } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index a7fea3e0ca86..37f010421a36 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -177,6 +177,9 @@ struct tpm_header { #define TPM_TAG_RQU_COMMAND 193 +/* TPM2 specific constants. */ +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */ + struct stclear_flags_t { __be16 tag; u8 deactivated; @@ -456,7 +459,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); -int tpm2_init_space(struct tpm_space *space); +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); void tpm2_flush_space(struct tpm_chip *chip); int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, @@ -464,7 +467,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); -int tpm_bios_log_setup(struct tpm_chip *chip); +void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); int tpm_dev_common_init(void); void tpm_dev_common_exit(void); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 982d341d8837..784b8b3cb903 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) } } -int tpm2_init_space(struct tpm_space *space) +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); + /* Prevent caller getting a dangling pointer. */ + space->context_buf = NULL; return -ENOMEM; } + space->buf_size = buf_size; return 0; } @@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE); - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE); + memcpy(chip->work_space.context_buf, space->context_buf, + space->buf_size); + memcpy(chip->work_space.session_buf, space->session_buf, + space->buf_size); rc = tpm2_load_space(chip); if (rc) { @@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->context_tbl[i], - space->context_buf, PAGE_SIZE, + space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; @@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->session_tbl[i], - space->session_buf, PAGE_SIZE, + space->session_buf, space->buf_size, &offset); - if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; @@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE); - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); + memcpy(space->context_buf, chip->work_space.context_buf, + space->buf_size); + memcpy(space->session_buf, chip->work_space.session_buf, + space->buf_size); return 0; out: diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index e59f1f91d7f3..a9dcf31eadd2 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -22,6 +22,7 @@ #include "tpm.h" #define ACPI_SIG_TPM2 "TPM2" +#define TPM_CRB_MAX_RESOURCES 3 static const guid_t crb_acpi_start_guid = GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714, @@ -91,7 +92,6 @@ enum crb_status { struct crb_priv { u32 sm; const char *hid; - void __iomem *iobase; struct crb_regs_head __iomem *regs_h; struct crb_regs_tail __iomem *regs_t; u8 __iomem *cmd; @@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = { static int crb_check_resource(struct acpi_resource *ares, void *data) { - struct resource *io_res = data; + struct resource *iores_array = data; struct resource_win win; struct resource *res = &(win.res); + int i; if (acpi_dev_resource_memory(ares, res) || acpi_dev_resource_address_space(ares, &win)) { - *io_res = *res; - io_res->name = NULL; + for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) { + if (resource_type(iores_array + i) != IORESOURCE_MEM) { + iores_array[i] = *res; + iores_array[i].name = NULL; + break; + } + } } return 1; } -static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, - struct resource *io_res, u64 start, u32 size) +static void __iomem *crb_map_res(struct device *dev, struct resource *iores, + void __iomem **iobase_ptr, u64 start, u32 size) { struct resource new_res = { .start = start, @@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL); - if (!resource_contains(io_res, &new_res)) + if (!iores) return devm_ioremap_resource(dev, &new_res); - return priv->iobase + (new_res.start - io_res->start); + if (!*iobase_ptr) { + *iobase_ptr = devm_ioremap_resource(dev, iores); + if (IS_ERR(*iobase_ptr)) + return *iobase_ptr; + } + + return *iobase_ptr + (new_res.start - iores->start); } /* @@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { - struct list_head resources; - struct resource io_res; + struct list_head acpi_resource_list; + struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} }; + void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL}; struct device *dev = &device->dev; + struct resource *iores; + void __iomem **iobase_ptr; + int i; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; @@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, u32 rsp_size; int ret; - INIT_LIST_HEAD(&resources); - ret = acpi_dev_get_resources(device, &resources, crb_check_resource, - &io_res); + INIT_LIST_HEAD(&acpi_resource_list); + ret = acpi_dev_get_resources(device, &acpi_resource_list, + crb_check_resource, iores_array); if (ret < 0) return ret; - acpi_dev_free_resource_list(&resources); + acpi_dev_free_resource_list(&acpi_resource_list); - if (resource_type(&io_res) != IORESOURCE_MEM) { + if (resource_type(iores_array) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; + } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) == + IORESOURCE_MEM) { + dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n"); + memset(iores_array + TPM_CRB_MAX_RESOURCES, + 0, sizeof(*iores_array)); + iores_array[TPM_CRB_MAX_RESOURCES].flags = 0; } - priv->iobase = devm_ioremap_resource(dev, &io_res); - if (IS_ERR(priv->iobase)) - return PTR_ERR(priv->iobase); + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (buf->control_address >= iores_array[i].start && + buf->control_address + sizeof(struct crb_regs_tail) - 1 <= + iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address, + sizeof(struct crb_regs_tail)); + + if (IS_ERR(priv->regs_t)) + return PTR_ERR(priv->regs_t); /* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older @@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { - if (buf->control_address == io_res.start + + if (iores && + buf->control_address == iores->start + sizeof(*priv->regs_h)) - priv->regs_h = priv->iobase; + priv->regs_h = *iobase_ptr; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } @@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, if (ret) return ret; - priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, - sizeof(struct crb_regs_tail)); - if (IS_ERR(priv->regs_t)) { - ret = PTR_ERR(priv->regs_t); - goto out_relinquish_locality; - } - /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. @@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; - cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, - ioread32(&priv->regs_t->ctrl_cmd_size)); + cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; iores_array[i].end; ++i) { + if (cmd_pa >= iores_array[i].start && + cmd_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size); dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size); - priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); + priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; @@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); - rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, - ioread32(&priv->regs_t->ctrl_rsp_size)); + rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (rsp_pa >= iores_array[i].start && + rsp_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size); if (cmd_pa != rsp_pa) { - priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); + priv->rsp = crb_map_res(dev, iores, iobase_ptr, + rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; } diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 78cc52690177..64428dbed992 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2012 IBM Corporation + * Copyright (C) 2012-2020 IBM Corporation * * Author: Ashley Lai <ashleydlai@gmail.com> * @@ -133,6 +133,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) return len; } +/** + * ibmvtpm_crq_send_init - Send a CRQ initialize message + * @ibmvtpm: vtpm device struct + * + * Return: + * 0 on success. + * Non-zero on failure. + */ +static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) +{ + int rc; + + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "%s failed rc=%d\n", __func__, rc); + + return rc; +} + +/** + * tpm_ibmvtpm_resume - Resume from suspend + * + * @dev: device struct + * + * Return: Always 0. + */ +static int tpm_ibmvtpm_resume(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); + int rc = 0; + + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_ENABLE_CRQ, + ibmvtpm->vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) { + dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); + return rc; + } + + rc = vio_enable_interrupts(ibmvtpm->vdev); + if (rc) { + dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); + return rc; + } + + rc = ibmvtpm_crq_send_init(ibmvtpm); + if (rc) + dev_err(dev, "Error send_init rc=%d\n", rc); + + return rc; +} + /** * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct @@ -146,6 +204,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); + bool retry = true; int rc, sig; if (!ibmvtpm->rtce_buf) { @@ -179,18 +238,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) */ ibmvtpm->tpm_processing_cmd = true; +again: rc = ibmvtpm_send_crq(ibmvtpm->vdev, IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { + /* + * H_CLOSED can be returned after LPM resume. Call + * tpm_ibmvtpm_resume() to re-enable the CRQ then retry + * ibmvtpm_send_crq() once before failing. + */ + if (rc == H_CLOSED && retry) { + tpm_ibmvtpm_resume(ibmvtpm->dev); + retry = false; + goto again; + } dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); - rc = 0; ibmvtpm->tpm_processing_cmd = false; - } else - rc = 0; + } spin_unlock(&ibmvtpm->rtce_lock); - return rc; + return 0; } static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) @@ -268,26 +336,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) return rc; } -/** - * ibmvtpm_crq_send_init - Send a CRQ initialize message - * @ibmvtpm: vtpm device struct - * - * Return: - * 0 on success. - * Non-zero on failure. - */ -static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) -{ - int rc; - - rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); - if (rc != H_SUCCESS) - dev_err(ibmvtpm->dev, - "ibmvtpm_crq_send_init failed rc=%d\n", rc); - - return rc; -} - /** * tpm_ibmvtpm_remove - ibm vtpm remove entry point * @vdev: vio device struct @@ -400,44 +448,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); } -/** - * tpm_ibmvtpm_resume - Resume from suspend - * - * @dev: device struct - * - * Return: Always 0. - */ -static int tpm_ibmvtpm_resume(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - int rc = 0; - - do { - if (rc) - msleep(100); - rc = plpar_hcall_norets(H_ENABLE_CRQ, - ibmvtpm->vdev->unit_address); - } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); - - if (rc) { - dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); - return rc; - } - - rc = vio_enable_interrupts(ibmvtpm->vdev); - if (rc) { - dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); - return rc; - } - - rc = ibmvtpm_crq_send_init(ibmvtpm); - if (rc) - dev_err(dev, "Error send_init rc=%d\n", rc); - - return rc; -} - static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) { return (status == 0); @@ -571,6 +581,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) */ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { ibmvtpm_crq_process(crq, ibmvtpm); + wake_up_interruptible(&ibmvtpm->crq_queue.wq); crq->valid = 0; smp_wmb(); } @@ -618,6 +629,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, } crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); + init_waitqueue_head(&crq_q->wq); ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); @@ -670,6 +682,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (rc) goto init_irq_cleanup; + if (!wait_event_timeout(ibmvtpm->crq_queue.wq, + ibmvtpm->rtce_buf != NULL, + HZ)) { + dev_err(dev, "CRQ response timed out\n"); + goto init_irq_cleanup; + } + return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index 7983f1a33267..b92aa7d3e93e 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h @@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue { struct ibmvtpm_crq *crq_addr; u32 index; u32 num_entry; + wait_queue_head_t wq; }; struct ibmvtpm_dev { diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index e7df342a317d..c722e3b3121a 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -27,6 +27,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/kernel.h> +#include <linux/dmi.h> #include "tpm.h" #include "tpm_tis_core.h" @@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da return container_of(data, struct tpm_tis_tcg_phy, priv); } -static bool interrupts = true; -module_param(interrupts, bool, 0444); +static int interrupts = -1; +module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); static bool itpm; @@ -63,6 +64,28 @@ module_param(force, bool, 0444); MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); #endif +static int tpm_tis_disable_irq(const struct dmi_system_id *d) +{ + if (interrupts == -1) { + pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident); + interrupts = 0; + } + + return 0; +} + +static const struct dmi_system_id tpm_tis_dmi_table[] = { + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkPad T490s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"), + }, + }, + {} +}; + #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) static int has_hid(struct acpi_device *dev, const char *hid) { @@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info) int irq = -1; int rc; + dmi_check_system(tpm_tis_dmi_table); + rc = check_acpi_tpm2(dev); if (rc) return rc; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index c3181ea9f271..7da35867b6ad 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l) if (rc < 0) return false; - if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == + if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID + | TPM_ACCESS_REQUEST_USE)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { priv->locality = l; return true; @@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l) return false; } -static bool locality_inactive(struct tpm_chip *chip, int l) -{ - struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - int rc; - u8 access; - - rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); - if (rc < 0) - return false; - - if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) - == TPM_ACCESS_VALID) - return true; - - return false; -} - static int release_locality(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - unsigned long stop, timeout; - long rc; tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); - stop = jiffies + chip->timeout_a; - - if (chip->flags & TPM_CHIP_FLAG_IRQ) { -again: - timeout = stop - jiffies; - if ((long)timeout <= 0) - return -1; - - rc = wait_event_interruptible_timeout(priv->int_queue, - (locality_inactive(chip, l)), - timeout); - - if (rc > 0) - return 0; - - if (rc == -ERESTARTSYS && freezing(current)) { - clear_thread_flag(TIF_SIGPENDING); - goto again; - } - } else { - do { - if (locality_inactive(chip, l)) - return 0; - tpm_msleep(TPM_TIMEOUT); - } while (time_before(jiffies, stop)); - } - return -1; + return 0; } static int request_locality(struct tpm_chip *chip, int l) @@ -433,6 +389,9 @@ static void disable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; + if (priv->irq == 0) + return; + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) intmask = 0; @@ -659,12 +618,22 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip) const char *desc = "attempting to generate an interrupt"; u32 cap2; cap_t cap; + int ret; + /* TPM 2.0 */ if (chip->flags & TPM_CHIP_FLAG_TPM2) return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc); - else - return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, - 0); + + /* TPM 1.2 */ + ret = request_locality(chip, 0); + if (ret < 0) + return ret; + + ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0); + + release_locality(chip, 0); + + return ret; } /* Register the IRQ and issue a command that will cause an interrupt. If an @@ -970,11 +939,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, init_waitqueue_head(&priv->read_queue); init_waitqueue_head(&priv->int_queue); if (irq != -1) { - /* Before doing irq testing issue a command to the TPM in polling mode + /* + * Before doing irq testing issue a command to the TPM in polling mode * to make sure it works. May as well use that command to set the * proper timeouts for the driver. */ - if (tpm_get_timeouts(chip)) { + + rc = request_locality(chip, 0); + if (rc < 0) + goto out_err; + + rc = tpm_get_timeouts(chip); + + release_locality(chip, 0); + + if (rc) { dev_err(dev, "Could not get TPM timeouts and durations\n"); rc = -ENODEV; goto out_err; @@ -983,9 +962,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); - if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) + if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { dev_err(&chip->dev, FW_BUG "TPM interrupt not working, polling instead\n"); + + disable_interrupts(chip); + } } else { tpm_tis_probe_irq(chip, intmask); } @@ -1000,7 +982,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, return 0; out_err: - if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) + if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); tpm_tis_remove(chip); diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 7a0a7051a06f..eef0fb06ea83 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) if (priv == NULL) return -ENOMEM; - rc = tpm2_init_space(&priv->space); + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index 56db949a7b70..e6258b4485dc 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty, return 0; } +/* + * TTY operations hangup function. + */ +static void tpk_hangup(struct tty_struct *tty) +{ + struct ttyprintk_port *tpkp = tty->driver_data; + + tty_port_hangup(&tpkp->port); +} + static const struct tty_operations ttyprintk_ops = { .open = tpk_open, .close = tpk_close, .write = tpk_write, .write_room = tpk_write_room, .ioctl = tpk_ioctl, + .hangup = tpk_hangup, }; static const struct tty_port_operations null_ops = { }; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 3259426f01dc..5eabbf73fdef 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -435,12 +435,12 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size /* * Allocate DMA memory from ancestor. When a virtio * device is created by remoteproc, the DMA memory is - * associated with the grandparent device: - * vdev => rproc => platform-dev. + * associated with the parent device: + * virtioY => remoteprocX#vdevYbuffer. */ - if (!vdev->dev.parent || !vdev->dev.parent->parent) + buf->dev = vdev->dev.parent; + if (!buf->dev) goto free_buf; - buf->dev = vdev->dev.parent->parent; /* Increase device refcnt to avoid freeing it */ get_device(buf->dev); @@ -2118,6 +2118,7 @@ static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, }; +MODULE_DEVICE_TABLE(virtio, id_table); static unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, @@ -2130,6 +2131,7 @@ static struct virtio_device_id rproc_serial_id_table[] = { #endif { 0 }, }; +MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); static unsigned int rproc_serial_features[] = { }; @@ -2282,6 +2284,5 @@ static void __exit fini(void) module_init(init); module_exit(fini); -MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c index e2007ac4d235..0eb83a0b70bc 100644 --- a/drivers/clk/actions/owl-s500.c +++ b/drivers/clk/actions/owl-s500.c @@ -183,7 +183,7 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0); static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0); /* divider clocks */ -static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0); +static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0); static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0); /* factor clocks */ diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index 37c22667e831..4313ecb2af5b 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index) return -EINVAL; regmap_read(regmap, AT91_CKGR_MOR, &tmp); - tmp &= ~MOR_KEY_MASK; if (index && !(tmp & AT91_PMC_MOSCSEL)) - regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); + tmp = AT91_PMC_MOSCSEL; else if (!index && (tmp & AT91_PMC_MOSCSEL)) - regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); + tmp = 0; + else + return 0; + + regmap_update_bits(regmap, AT91_CKGR_MOR, + AT91_PMC_MOSCSEL | MOR_KEY_MASK, + tmp | AT91_PMC_KEY); while (!clk_sam9x5_main_ready(regmap)) cpu_relax(); diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index 22aede42a336..c0895c993cce 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -75,6 +75,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, tmp_parent_rate = req->rate * div; tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate); + if (!tmp_parent_rate) + continue; + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); if (tmp_rate < req->rate) tmp_diff = req->rate - tmp_rate; @@ -211,7 +214,7 @@ _at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name, usb->hw.init = &init; usb->regmap = regmap; - usb->usbs_mask = SAM9X5_USBS_MASK; + usb->usbs_mask = usbs_mask; hw = &usb->hw; ret = clk_hw_register(NULL, &usb->hw); diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index 77398aefeb6d..e3f4c8f20223 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -162,7 +162,6 @@ static void __init sam9x60_pmc_setup(struct device_node *np) struct regmap *regmap; struct clk_hw *hw; int i; - bool bypass; i = of_property_match_string(np, "clock-names", "td_slck"); if (i < 0) @@ -197,10 +196,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; - bypass = of_property_read_bool(np, "atmel,osc-bypass"); - - hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, - bypass); + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, 0); if (IS_ERR(hw)) goto err_free; @@ -237,9 +233,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np) parent_names[0] = "pllack"; parent_names[1] = "upllck"; - parent_names[2] = "mainck"; - parent_names[3] = "mainck"; - hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 4); + parent_names[2] = "main_osc"; + hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 802e488fd3c3..c5486537b928 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -314,6 +314,7 @@ struct bcm2835_cprman { struct device *dev; void __iomem *regs; spinlock_t regs_lock; /* spinlock for all clocks */ + unsigned int soc; /* * Real names of cprman clock parents looked up through @@ -525,6 +526,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw) A2W_PLL_CTRL_PRST_DISABLE; } +static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman, + const struct bcm2835_pll_data *data) +{ + /* + * On BCM2711 there isn't a pre-divisor available in the PLL feedback + * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed + * for to for VCO RANGE bits. + */ + if (cprman->soc & SOC_BCM2711) + return 0; + + return data->ana->fb_prediv_mask; +} + static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate, unsigned long parent_rate, u32 *ndiv, u32 *fdiv) @@ -582,7 +597,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw, ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT; pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT; using_prediv = cprman_read(cprman, data->ana_reg_base + 4) & - data->ana->fb_prediv_mask; + bcm2835_pll_get_prediv_mask(cprman, data); if (using_prediv) { ndiv *= 2; @@ -665,6 +680,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw); struct bcm2835_cprman *cprman = pll->cprman; const struct bcm2835_pll_data *data = pll->data; + u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data); bool was_using_prediv, use_fb_prediv, do_ana_setup_first; u32 ndiv, fdiv, a2w_ctl; u32 ana[4]; @@ -682,7 +698,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, for (i = 3; i >= 0; i--) ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4); - was_using_prediv = ana[1] & data->ana->fb_prediv_mask; + was_using_prediv = ana[1] & prediv_mask; ana[0] &= ~data->ana->mask0; ana[0] |= data->ana->set0; @@ -692,10 +708,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, ana[3] |= data->ana->set3; if (was_using_prediv && !use_fb_prediv) { - ana[1] &= ~data->ana->fb_prediv_mask; + ana[1] &= ~prediv_mask; do_ana_setup_first = true; } else if (!was_using_prediv && use_fb_prediv) { - ana[1] |= data->ana->fb_prediv_mask; + ana[1] |= prediv_mask; do_ana_setup_first = false; } else { do_ana_setup_first = true; @@ -1320,8 +1336,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, pll->hw.init = &init; ret = devm_clk_hw_register(cprman->dev, &pll->hw); - if (ret) + if (ret) { + kfree(pll); return NULL; + } return &pll->hw; } @@ -1448,13 +1466,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, return &clock->hw; } -static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, +static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, const struct bcm2835_gate_data *data) { - return clk_register_gate(cprman->dev, data->name, data->parent, - CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, - cprman->regs + data->ctl_reg, - CM_GATE_BIT, 0, &cprman->regs_lock); + return clk_hw_register_gate(cprman->dev, data->name, data->parent, + CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, + cprman->regs + data->ctl_reg, + CM_GATE_BIT, 0, &cprman->regs_lock); } typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, @@ -2234,6 +2252,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cprman); cprman->onecell.num = asize; + cprman->soc = pdata->soc; hws = cprman->onecell.hws; for (i = 0; i < asize; i++) { diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c index 98e884957db8..911a29bd744e 100644 --- a/drivers/clk/bcm/clk-bcm63xx-gate.c +++ b/drivers/clk/bcm/clk-bcm63xx-gate.c @@ -155,6 +155,7 @@ static int clk_bcm63xx_probe(struct platform_device *pdev) for (entry = table; entry->name; entry++) maxbit = max_t(u8, maxbit, entry->bit); + maxbit++; hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit), GFP_KERNEL); diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index b1318e6b655b..af957179b135 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -17,7 +17,8 @@ #define ASPEED_G6_NUM_CLKS 67 -#define ASPEED_G6_SILICON_REV 0x004 +#define ASPEED_G6_SILICON_REV 0x014 +#define CHIP_REVISION_ID GENMASK(23, 16) #define ASPEED_G6_RESET_CTRL 0x040 #define ASPEED_G6_RESET_CTRL2 0x050 @@ -57,10 +58,10 @@ static void __iomem *scu_g6_base; static const struct aspeed_gate_data aspeed_g6_gates[] = { /* clk rst name parent flags */ [ASPEED_CLK_GATE_MCLK] = { 0, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ - [ASPEED_CLK_GATE_ECLK] = { 1, -1, "eclk-gate", "eclk", 0 }, /* Video Engine */ + [ASPEED_CLK_GATE_ECLK] = { 1, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */ [ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ /* vclk parent - dclk/d1clk/hclk/mclk */ - [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ + [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ /* From dpll */ [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ @@ -130,6 +131,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = { { 0 } }; +static const struct clk_div_table ast2600_emmc_extclk_div_table[] = { + { 0x0, 2 }, + { 0x1, 4 }, + { 0x2, 6 }, + { 0x3, 8 }, + { 0x4, 10 }, + { 0x5, 12 }, + { 0x6, 14 }, + { 0x7, 16 }, + { 0 } +}; + static const struct clk_div_table ast2600_mac_div_table[] = { { 0x0, 4 }, { 0x1, 4 }, @@ -177,18 +190,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val) static struct clk_hw *ast2600_calc_apll(const char *name, u32 val) { unsigned int mult, div; + u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV); - if (val & BIT(20)) { - /* Pass through mode */ - mult = div = 1; + if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) { + if (val & BIT(24)) { + /* Pass through mode */ + mult = div = 1; + } else { + /* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */ + u32 m = val & 0x1fff; + u32 n = (val >> 13) & 0x3f; + u32 p = (val >> 19) & 0xf; + + mult = (m + 1); + div = (n + 1) * (p + 1); + } } else { - /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */ - u32 m = (val >> 5) & 0x3f; - u32 od = (val >> 4) & 0x1; - u32 n = val & 0xf; + if (val & BIT(20)) { + /* Pass through mode */ + mult = div = 1; + } else { + /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */ + u32 m = (val >> 5) & 0x3f; + u32 od = (val >> 4) & 0x1; + u32 n = val & 0xf; - mult = (2 - od) * (m + 2); - div = n + 1; + mult = (2 - od) * (m + 2); + div = n + 1; + } } return clk_hw_register_fixed_factor(NULL, name, "clkin", 0, mult, div); @@ -389,6 +418,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev, return hw; } +static const char *const emmc_extclk_parent_names[] = { + "emmc_extclk_hpll_in", + "mpll", +}; + static const char * const vclk_parent_names[] = { "dpll", "d1pll", @@ -458,16 +492,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw; - /* EMMC ext clock divider */ - hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0, - &aspeed_g6_clk_lock); + /* EMMC ext clock */ + hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll", + 0, 1, 2); if (IS_ERR(hw)) return PTR_ERR(hw); - hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0, - ast2600_div_table, - &aspeed_g6_clk_lock); + + hw = clk_hw_register_mux(dev, "emmc_extclk_mux", + emmc_extclk_parent_names, + ARRAY_SIZE(emmc_extclk_parent_names), 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1, + 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux", + 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, + 15, 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_divider_table(dev, "emmc_extclk", + "emmc_extclk_gate", 0, + scu_g6_base + + ASPEED_G6_CLK_SELECTION1, 12, + 3, 0, ast2600_emmc_extclk_div_table, + &aspeed_g6_clk_lock); if (IS_ERR(hw)) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw; @@ -599,14 +649,22 @@ static const u32 ast2600_a0_axi_ahb_div_table[] = { 2, 2, 3, 5, }; -static const u32 ast2600_a1_axi_ahb_div_table[] = { - 4, 6, 2, 4, +static const u32 ast2600_a1_axi_ahb_div0_tbl[] = { + 3, 2, 3, 4, +}; + +static const u32 ast2600_a1_axi_ahb_div1_tbl[] = { + 3, 4, 6, 8, +}; + +static const u32 ast2600_a1_axi_ahb200_tbl[] = { + 3, 4, 3, 4, 2, 2, 2, 2, }; static void __init aspeed_g6_cc(struct regmap *map) { struct clk_hw *hw; - u32 val, div, chip_id, axi_div, ahb_div; + u32 val, div, divbits, chip_id, axi_div, ahb_div; clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); @@ -636,11 +694,22 @@ static void __init aspeed_g6_cc(struct regmap *map) else axi_div = 2; + divbits = (val >> 11) & 0x3; regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); - if (chip_id & BIT(16)) - ahb_div = ast2600_a1_axi_ahb_div_table[(val >> 11) & 0x3]; - else + if (chip_id & BIT(16)) { + if (!divbits) { + ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3]; + if (val & BIT(16)) + ahb_div *= 2; + } else { + if (val & BIT(16)) + ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits]; + else + ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits]; + } + } else { ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3]; + } hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div); aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw; diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 2ce370c804aa..f19994ce7ca1 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -195,6 +195,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) return ret; err_reg: + of_node_put(s2mps11_clks[0].clk_np); while (--i >= 0) clkdev_drop(s2mps11_clks[i].lookup); diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 886f7c5df51a..e3cdb4a282fe 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = { static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) { int ret; + unsigned long min_rate, max_rate; + struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, .num_parents = 0, @@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) sclk->hw.init = &init; ret = devm_clk_hw_register(dev, &sclk->hw); - if (!ret) - clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate, - sclk->info->range.max_rate); + if (ret) + return ret; + + if (sclk->info->rate_discrete) { + int num_rates = sclk->info->list.num_rates; + + if (num_rates <= 0) + return -EINVAL; + + min_rate = sclk->info->list.rates[0]; + max_rate = sclk->info->list.rates[num_rates - 1]; + } else { + min_rate = sclk->info->range.min_rate; + max_rate = sclk->info->range.max_rate; + } + + clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate); return ret; } diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 62d0fc486d3a..6ff87cd86712 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -114,7 +114,11 @@ static int clk_pm_runtime_get(struct clk_core *core) return 0; ret = pm_runtime_get_sync(core->dev); - return ret < 0 ? ret : 0; + if (ret < 0) { + pm_runtime_put_noidle(core->dev); + return ret; + } + return 0; } static void clk_pm_runtime_put(struct clk_core *core) @@ -2642,12 +2646,14 @@ static int clk_core_get_phase(struct clk_core *core) { int ret; - clk_prepare_lock(); + lockdep_assert_held(&prepare_lock); + if (!core->ops->get_phase) + return 0; + /* Always try to update cached phase if possible */ - if (core->ops->get_phase) - core->phase = core->ops->get_phase(core->hw); - ret = core->phase; - clk_prepare_unlock(); + ret = core->ops->get_phase(core->hw); + if (ret >= 0) + core->phase = ret; return ret; } @@ -2661,10 +2667,16 @@ static int clk_core_get_phase(struct clk_core *core) */ int clk_get_phase(struct clk *clk) { + int ret; + if (!clk) return 0; - return clk_core_get_phase(clk->core); + clk_prepare_lock(); + ret = clk_core_get_phase(clk->core); + clk_prepare_unlock(); + + return ret; } EXPORT_SYMBOL_GPL(clk_get_phase); @@ -2878,13 +2890,21 @@ static struct hlist_head *orphan_list[] = { static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, int level) { - seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", + int phase; + + seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", level * 3 + 1, "", 30 - level * 3, c->name, c->enable_count, c->prepare_count, c->protect_count, - clk_core_get_rate(c), clk_core_get_accuracy(c), - clk_core_get_phase(c), - clk_core_get_scaled_duty_cycle(c, 100000)); + clk_core_get_rate(c), clk_core_get_accuracy(c)); + + phase = clk_core_get_phase(c); + if (phase >= 0) + seq_printf(s, "%5d", phase); + else + seq_puts(s, "-----"); + + seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000)); } static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, @@ -2921,6 +2941,7 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary); static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) { + int phase; unsigned long min_rate, max_rate; clk_core_get_boundaries(c, &min_rate, &max_rate); @@ -2934,7 +2955,9 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"min_rate\": %lu,", min_rate); seq_printf(s, "\"max_rate\": %lu,", max_rate); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); - seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); + phase = clk_core_get_phase(c); + if (phase >= 0) + seq_printf(s, "\"phase\": %d,", phase); seq_printf(s, "\"duty_cycle\": %u", clk_core_get_scaled_duty_cycle(c, 100000)); } @@ -3375,14 +3398,11 @@ static int __clk_core_init(struct clk_core *core) core->accuracy = 0; /* - * Set clk's phase. + * Set clk's phase by clk_core_get_phase() caching the phase. * Since a phase is by definition relative to its parent, just * query the current clock phase, or just assume it's in phase. */ - if (core->ops->get_phase) - core->phase = core->ops->get_phase(core->hw); - else - core->phase = 0; + clk_core_get_phase(core); /* * Set clk's duty cycle. @@ -3432,6 +3452,9 @@ static int __clk_core_init(struct clk_core *core) out: clk_pm_runtime_put(core); unlock: + if (ret) + hlist_del_init(&core->child_node); + clk_prepare_unlock(); if (!ret) @@ -4128,20 +4151,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) /* search the list of notifiers for this clk */ list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) - break; + goto found; /* if clk wasn't in the notifier list, allocate new clk_notifier */ - if (cn->clk != clk) { - cn = kzalloc(sizeof(*cn), GFP_KERNEL); - if (!cn) - goto out; + cn = kzalloc(sizeof(*cn), GFP_KERNEL); + if (!cn) + goto out; - cn->clk = clk; - srcu_init_notifier_head(&cn->notifier_head); + cn->clk = clk; + srcu_init_notifier_head(&cn->notifier_head); - list_add(&cn->node, &clk_notifier_list); - } + list_add(&cn->node, &clk_notifier_list); +found: ret = srcu_notifier_chain_register(&cn->notifier_head, nb); clk->core->notifier_count++; @@ -4166,32 +4188,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register); */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) { - struct clk_notifier *cn = NULL; - int ret = -EINVAL; + struct clk_notifier *cn; + int ret = -ENOENT; if (!clk || !nb) return -EINVAL; clk_prepare_lock(); - list_for_each_entry(cn, &clk_notifier_list, node) - if (cn->clk == clk) + list_for_each_entry(cn, &clk_notifier_list, node) { + if (cn->clk == clk) { + ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); + + clk->core->notifier_count--; + + /* XXX the notifier code should handle this better */ + if (!cn->notifier_head.head) { + srcu_cleanup_notifier_head(&cn->notifier_head); + list_del(&cn->node); + kfree(cn); + } break; - - if (cn->clk == clk) { - ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); - - clk->core->notifier_count--; - - /* XXX the notifier code should handle this better */ - if (!cn->notifier_head.head) { - srcu_cleanup_notifier_head(&cn->notifier_head); - list_del(&cn->node); - kfree(cn); } - - } else { - ret = -ENOENT; } clk_prepare_unlock(); diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c index 1ac11b6a47a3..2ec48d030fda 100644 --- a/drivers/clk/davinci/pll.c +++ b/drivers/clk/davinci/pll.c @@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev, parent_name = postdiv_name; } - pllen = kzalloc(sizeof(*pllout), GFP_KERNEL); + pllen = kzalloc(sizeof(*pllen), GFP_KERNEL); if (!pllen) { ret = -ENOMEM; goto err_unregister_postdiv; diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index 41fc9c63356e..1846bd879dd7 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", - "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; + "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", }; static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", - "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; + "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", }; static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c index a03bbed662c6..2a46b9b61b46 100644 --- a/drivers/clk/imx/clk-pfdv2.c +++ b/drivers/clk/imx/clk-pfdv2.c @@ -139,6 +139,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; u8 frac; + if (!rate) + return -EINVAL; + + /* PFD can NOT change rate without gating */ + WARN_ON(clk_pfdv2_is_enabled(hw)); + tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = tmp; diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index 6e963031cd87..7490d4f4d936 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -393,15 +393,21 @@ static unsigned int ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info, unsigned int div) { - unsigned int i; + unsigned int i, best_i = 0, best = (unsigned int)-1; for (i = 0; i < (1 << clk_info->div.bits) && clk_info->div.div_table[i]; i++) { - if (clk_info->div.div_table[i] >= div) - return i; + if (clk_info->div.div_table[i] >= div && + clk_info->div.div_table[i] < best) { + best = clk_info->div.div_table[i]; + best_i = i; + + if (div == best) + break; + } } - return i - 1; + return best_i; } static unsigned diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index 956dd653a43d..c051ecba5cf8 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -432,8 +432,10 @@ static void __init jz4770_cgu_init(struct device_node *np) cgu = ingenic_cgu_new(jz4770_cgu_clocks, ARRAY_SIZE(jz4770_cgu_clocks), np); - if (!cgu) + if (!cgu) { pr_err("%s: failed to initialise CGU\n", __func__); + return; + } retval = ingenic_cgu_register_clocks(cgu); if (retval) diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c index a1a5f9cb439e..926696fba3f4 100644 --- a/drivers/clk/ingenic/tcu.c +++ b/drivers/clk/ingenic/tcu.c @@ -189,7 +189,7 @@ static long ingenic_tcu_round_rate(struct clk_hw *hw, unsigned long req_rate, u8 prescale; if (req_rate > rate) - return -EINVAL; + return rate; prescale = ingenic_tcu_get_prescale(rate, req_rate); diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c index 7edf8c8432b6..64ea895f1a7d 100644 --- a/drivers/clk/keystone/sci-clk.c +++ b/drivers/clk/keystone/sci-clk.c @@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider) np = of_find_node_with_property(np, *clk_name); if (!np) { clk_name++; - break; + continue; } if (!of_device_is_available(np)) diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c index 608a9a6621a3..00920182bbe6 100644 --- a/drivers/clk/mediatek/clk-mt6779.c +++ b/drivers/clk/mediatek/clk-mt6779.c @@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = { "pwm_sel", 19), GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm", "pwm_sel", 21), + GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0", + "uart_sel", 22), GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1", "uart_sel", 23), GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2", diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index 76f9cd039195..14e127e9a740 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, spinlock_t *lock) { struct mtk_clk_mux *clk_mux; - struct clk_init_data init; + struct clk_init_data init = {}; struct clk *clk; clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL); diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index dabeb435d067..3f8dcdcdde49 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig @@ -103,6 +103,7 @@ config COMMON_CLK_G12A select COMMON_CLK_MESON_AO_CLKC select COMMON_CLK_MESON_EE_CLKC select COMMON_CLK_MESON_CPU_DYNDIV + select COMMON_CLK_MESON_VID_PLL_DIV select MFD_SYSCON help Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2 diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 3a5853ca98c6..e8df254f8085 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -363,13 +363,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_regmap *clk = to_clk_regmap(hw); struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); - unsigned int enabled, m, n, frac = 0, ret; + unsigned int enabled, m, n, frac = 0; unsigned long old_rate; + int ret; if (parent_rate == 0 || rate == 0) return -EINVAL; - old_rate = rate; + old_rate = clk_hw_get_rate(hw); ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll); if (ret) @@ -391,7 +392,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (!enabled) return 0; - if (meson_clk_pll_enable(hw)) { + ret = meson_clk_pll_enable(hw); + if (ret) { pr_warn("%s: pll did not lock, trying to restore old rate %lu\n", __func__, old_rate); /* @@ -403,7 +405,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, meson_clk_pll_set_rate(hw, old_rate, parent_rate); } - return 0; + return ret; } /* diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index d2760a021301..3143e16065de 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = { &g12a_fclk_div2_div.hw }, .num_parents = 1, + /* + * Similar to fclk_div3, it seems that this clock is used by + * the resident firmware and is required by the platform to + * operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) Mark the clock used by a firmware resource, if possible + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 8856ce476ccf..082178a0f41a 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1071,7 +1071,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = { * Meson8m2: vid2_pll */ .parent_hws = (const struct clk_hw *[]) { - &meson8b_hdmi_pll_dco.hw + &meson8b_hdmi_pll_lvds_out.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1207,7 +1207,7 @@ static struct clk_regmap meson8b_vclk_in_en = { static struct clk_regmap meson8b_vclk_div1_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 0, }, .hw.init = &(struct clk_init_data){ @@ -1237,7 +1237,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = { static struct clk_regmap meson8b_vclk_div2_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 1, }, .hw.init = &(struct clk_init_data){ @@ -1267,7 +1267,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = { static struct clk_regmap meson8b_vclk_div4_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 2, }, .hw.init = &(struct clk_init_data){ @@ -1297,7 +1297,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = { static struct clk_regmap meson8b_vclk_div6_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 3, }, .hw.init = &(struct clk_init_data){ @@ -1327,7 +1327,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = { static struct clk_regmap meson8b_vclk_div12_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 4, }, .hw.init = &(struct clk_init_data){ @@ -1910,6 +1910,13 @@ static struct clk_regmap meson8b_mali = { }, }; +static const struct reg_sequence meson8m2_gp_pll_init_regs[] = { + { .reg = HHI_GP_PLL_CNTL2, .def = 0x59c88000 }, + { .reg = HHI_GP_PLL_CNTL3, .def = 0xca463823 }, + { .reg = HHI_GP_PLL_CNTL4, .def = 0x0286a027 }, + { .reg = HHI_GP_PLL_CNTL5, .def = 0x00003000 }, +}; + static const struct pll_params_table meson8m2_gp_pll_params_table[] = { PLL_PARAMS(182, 3), { /* sentinel */ }, @@ -1943,6 +1950,8 @@ static struct clk_regmap meson8m2_gp_pll_dco = { .width = 1, }, .table = meson8m2_gp_pll_params_table, + .init_regs = meson8m2_gp_pll_init_regs, + .init_count = ARRAY_SIZE(meson8m2_gp_pll_init_regs), }, .hw.init = &(struct clk_init_data){ .name = "gp_pll_dco", @@ -3491,54 +3500,87 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { static const struct meson8b_clk_reset_line { u32 reg; u8 bit_idx; + bool active_low; } meson8b_clk_reset_bits[] = { [CLKC_RESET_L2_CACHE_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 30, + .active_low = false, }, [CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 29, + .active_low = false, }, [CLKC_RESET_SCU_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 28, + .active_low = false, }, [CLKC_RESET_CPU3_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 27, + .active_low = false, }, [CLKC_RESET_CPU2_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 26, + .active_low = false, }, [CLKC_RESET_CPU1_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 25, + .active_low = false, }, [CLKC_RESET_CPU0_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 24, + .active_low = false, }, [CLKC_RESET_A5_GLOBAL_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 18, + .active_low = false, }, [CLKC_RESET_A5_AXI_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 17, + .active_low = false, }, [CLKC_RESET_A5_ABP_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 16, + .active_low = false, }, [CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30 + .reg = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 30, + .active_low = false, }, [CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = { - .reg = HHI_VID_CLK_CNTL, .bit_idx = 15 + .reg = HHI_VID_CLK_CNTL, + .bit_idx = 15, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 7, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 3, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 1, + .active_low = true, }, [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 0, + .active_low = true, }, }; @@ -3547,22 +3589,22 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, { struct meson8b_clk_reset *meson8b_clk_reset = container_of(rcdev, struct meson8b_clk_reset, reset); - unsigned long flags; const struct meson8b_clk_reset_line *reset; + unsigned int value = 0; + unsigned long flags; if (id >= ARRAY_SIZE(meson8b_clk_reset_bits)) return -EINVAL; reset = &meson8b_clk_reset_bits[id]; + if (assert != reset->active_low) + value = BIT(reset->bit_idx); + spin_lock_irqsave(&meson_clk_lock, flags); - if (assert) - regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, - BIT(reset->bit_idx), BIT(reset->bit_idx)); - else - regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, - BIT(reset->bit_idx), 0); + regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, + BIT(reset->bit_idx), value); spin_unlock_irqrestore(&meson_clk_lock, flags); diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index c889fbeec30f..c91fb07fcb65 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -20,6 +20,10 @@ * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf */ #define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */ +#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */ +#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */ +#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */ +#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */ #define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */ #define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */ #define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */ diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig index 415e6906a113..76cd06f4ed62 100644 --- a/drivers/clk/mvebu/Kconfig +++ b/drivers/clk/mvebu/Kconfig @@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON config ARMADA_AP_CPU_CLK bool + select ARMADA_AP_CP_HELPER config ARMADA_CP110_SYSCON bool diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index 5fc6d486a381..07e47c34e073 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -84,6 +84,7 @@ struct clk_pm_cpu { void __iomem *reg_div; u8 shift_div; struct regmap *nb_pm_base; + unsigned long l1_expiration; }; #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw) @@ -438,33 +439,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw) return val; } -static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index) -{ - struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); - struct regmap *base = pm_cpu->nb_pm_base; - int load_level; - - /* - * We set the clock parent only if the DVFS is available but - * not enabled. - */ - if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base)) - return -EINVAL; - - /* Set the parent clock for all the load level */ - for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) { - unsigned int reg, mask, val, - offset = ARMADA_37XX_NB_TBG_SEL_OFF; - - armada_3700_pm_dvfs_update_regs(load_level, ®, &offset); - - val = index << offset; - mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset; - regmap_update_bits(base, reg, mask, val); - } - return 0; -} - static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -512,8 +486,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, } /* - * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz - * respectively) to L0 frequency (1.2 Ghz) requires a significant + * Workaround when base CPU frequnecy is 1000 or 1200 MHz + * + * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz + * respectively) to L0 frequency (1/1.2 GHz) requires a significant * amount of time to let VDD stabilize to the appropriate * voltage. This amount of time is large enough that it cannot be * covered by the hardware countdown register. Due to this, the CPU @@ -523,26 +499,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, * To work around this problem, we prevent switching directly from the * L2/L3 frequencies to the L0 frequency, and instead switch to the L1 * frequency in-between. The sequence therefore becomes: - * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ) + * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz) * 2. Sleep 20ms for stabling VDD voltage - * 3. Then switch from L1(600MHZ) to L0(1200Mhz). + * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz). */ -static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base) +static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu, + unsigned int new_level, unsigned long rate, + struct regmap *base) { unsigned int cur_level; - if (rate != 1200 * 1000 * 1000) - return; - regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level); cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; - if (cur_level <= ARMADA_37XX_DVFS_LOAD_1) + + if (cur_level == new_level) return; + /* + * System wants to go to L1 on its own. If we are going from L2/L3, + * remember when 20ms will expire. If from L0, set the value so that + * next switch to L0 won't have to wait. + */ + if (new_level == ARMADA_37XX_DVFS_LOAD_1) { + if (cur_level == ARMADA_37XX_DVFS_LOAD_0) + pm_cpu->l1_expiration = jiffies; + else + pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20); + return; + } + + /* + * If we are setting to L2/L3, just invalidate L1 expiration time, + * sleeping is not needed. + */ + if (rate < 1000*1000*1000) + goto invalidate_l1_exp; + + /* + * We are going to L0 with rate >= 1GHz. Check whether we have been at + * L1 for long enough time. If not, go to L1 for 20ms. + */ + if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration) + goto invalidate_l1_exp; + regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD, ARMADA_37XX_NB_CPU_LOAD_MASK, ARMADA_37XX_DVFS_LOAD_1); msleep(20); + +invalidate_l1_exp: + pm_cpu->l1_expiration = 0; } static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, @@ -576,7 +582,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, reg = ARMADA_37XX_NB_CPU_LOAD; mask = ARMADA_37XX_NB_CPU_LOAD_MASK; - clk_pm_cpu_set_rate_wa(rate, base); + /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */ + if (parent_rate >= 1000*1000*1000) + clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base); regmap_update_bits(base, reg, mask, load_level); @@ -590,7 +598,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops clk_pm_cpu_ops = { .get_parent = clk_pm_cpu_get_parent, - .set_parent = clk_pm_cpu_set_parent, .round_rate = clk_pm_cpu_round_rate, .set_rate = clk_pm_cpu_set_rate, .recalc_rate = clk_pm_cpu_recalc_rate, diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c index e9e306d4e9af..41271351cf1f 100644 --- a/drivers/clk/mvebu/armada-37xx-xtal.c +++ b/drivers/clk/mvebu/armada-37xx-xtal.c @@ -13,8 +13,8 @@ #include <linux/platform_device.h> #include <linux/regmap.h> -#define NB_GPIO1_LATCH 0xC -#define XTAL_MODE BIT(31) +#define NB_GPIO1_LATCH 0x8 +#define XTAL_MODE BIT(9) static int armada_3700_xtal_clock_probe(struct platform_device *pdev) { diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c index 45cfc57bff92..af6ac17c7dae 100644 --- a/drivers/clk/qcom/a53-pll.c +++ b/drivers/clk/qcom/a53-pll.c @@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = { { .compatible = "qcom,msm8916-a53pll" }, { } }; +MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table); static struct platform_driver qcom_a53pll_driver = { .probe = qcom_a53pll_probe, diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 055318f97991..a69f53e435ed 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -55,7 +55,6 @@ #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS]) #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE]) #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC]) -#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL]) const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [CLK_ALPHA_PLL_TYPE_DEFAULT] = { @@ -114,7 +113,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_STATUS] = 0x30, [PLL_OFF_OPMODE] = 0x38, [PLL_OFF_ALPHA_VAL] = 0x40, - [PLL_OFF_CAL_VAL] = 0x44, }, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 96a36f6ff667..d7586e26acd8 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state) != (c->aggr_state & BIT(state)); } +static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state, + struct tcs_cmd *cmd, bool wait) +{ + if (wait) + return rpmh_write(c->dev, state, cmd, 1); + + return rpmh_write_async(c->dev, state, cmd, 1); +} + static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) { struct tcs_cmd cmd = { 0 }; u32 cmd_state, on_val; enum rpmh_state state = RPMH_SLEEP_STATE; int ret; + bool wait; cmd.addr = c->res_addr; cmd_state = c->aggr_state; @@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) if (cmd_state & BIT(state)) cmd.data = on_val; - ret = rpmh_write_async(c->dev, state, &cmd, 1); + wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE; + ret = clk_rpmh_send(c, state, &cmd, wait); if (ret) { dev_err(c->dev, "set %s state of %s failed: (%d)\n", !state ? "sleep" : @@ -267,7 +278,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) cmd.addr = c->res_addr; cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); - ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1); + ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable); if (ret) { dev_err(c->dev, "set active state of %s failed: (%d)\n", c->res_name, ret); diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 4e329a7baf2b..17e4a5a2a9fd 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -260,7 +260,7 @@ static struct clk_pll gpll0 = { .l_reg = 0x21004, .m_reg = 0x21008, .n_reg = 0x2100c, - .config_reg = 0x21014, + .config_reg = 0x21010, .mode_reg = 0x21000, .status_reg = 0x2101c, .status_bit = 17, @@ -287,7 +287,7 @@ static struct clk_pll gpll1 = { .l_reg = 0x20004, .m_reg = 0x20008, .n_reg = 0x2000c, - .config_reg = 0x20014, + .config_reg = 0x20010, .mode_reg = 0x20000, .status_reg = 0x2001c, .status_bit = 17, @@ -314,7 +314,7 @@ static struct clk_pll gpll2 = { .l_reg = 0x4a004, .m_reg = 0x4a008, .n_reg = 0x4a00c, - .config_reg = 0x4a014, + .config_reg = 0x4a010, .mode_reg = 0x4a000, .status_reg = 0x4a01c, .status_bit = 17, @@ -341,7 +341,7 @@ static struct clk_pll bimc_pll = { .l_reg = 0x23004, .m_reg = 0x23008, .n_reg = 0x2300c, - .config_reg = 0x23014, + .config_reg = 0x23010, .mode_reg = 0x23000, .status_reg = 0x2301c, .status_bit = 17, diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index 091acd59c1d6..752f267b2881 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = { static struct clk_alpha_pll gpll0 = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = { .name = "gpll0", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll0_out_even = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_even", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll0_out_main = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_main", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll0_out_odd = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_odd", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll0_out_test = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_test", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll1 = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = { .name = "gpll1", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll1_out_even = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_even", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll1_out_main = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_main", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll1_out_odd = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_odd", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll1_out_test = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_test", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll2 = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = { .name = "gpll2", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll2_out_even = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_even", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll2_out_main = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_main", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll2_out_odd = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_odd", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll2_out_test = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_test", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll3 = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = { .name = "gpll3", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll3_out_even = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_even", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll3_out_main = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_main", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll3_out_odd = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_odd", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll3_out_test = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_test", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll4 = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = { .name = "gpll4", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll4_out_even = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_even", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll4_out_main = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_main", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll4_out_odd = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_odd", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll4_out_test = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_test", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index bf5730832ef3..aa5c0c6ead01 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = { .cmd_rcgr = 0x48044, .mnd_width = 0, .hid_width = 5, - .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .parent_map = gcc_parent_map_xo_gpll0, .freq_tbl = ftbl_hmss_rbcpr_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "hmss_rbcpr_clk_src", @@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = { static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = { .halt_reg = 0x8a004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x8a004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x8a004, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index 20877214acff..ee908fbfeab1 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -75,8 +75,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = { .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_even", .parent_data = &(const struct clk_parent_data){ - .fw_name = "bi_tcxo", - .name = "bi_tcxo", + .hw = &gpll0.clkr.hw, }, .num_parents = 1, .ops = &clk_trion_pll_postdiv_ops, @@ -1616,6 +1615,38 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = { }, }; +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0_out_even.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_gpu_iref_clk = { .halt_reg = 0x8c010, .halt_check = BRANCH_HALT, @@ -1698,6 +1729,38 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = { }, }; +static struct clk_branch gcc_npu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0_out_even.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_npu_trig_clk = { .halt_reg = 0x4d00c, .halt_check = BRANCH_VOTED, @@ -2812,6 +2875,45 @@ static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = { }, }; +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x750ac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x75018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_card_unipro_core_clk = { .halt_reg = 0x75058, .halt_check = BRANCH_HALT, @@ -2992,6 +3094,45 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = { }, }; +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x770ac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x77018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_phy_unipro_core_clk = { .halt_reg = 0x77058, .halt_check = BRANCH_HALT, @@ -3332,12 +3473,16 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr, [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, [GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr, [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr, [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr, + [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr, + [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr, [GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr, [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr, [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr, @@ -3442,6 +3587,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr, [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr, + [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr, [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr, [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr, @@ -3459,6 +3607,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr, diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 1907ee195a08..f2dc625b745d 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -55,7 +55,7 @@ struct r9a06g032_clkdesc { u16 sel, g1, r1, g2, r2; } dual; }; -} __packed; +}; #define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \ { .gate = _clk, .reset = _rst, \ diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 132cc96895e3..6f9612c169af 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -800,7 +800,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev) /* Save module registers with bits under our control */ for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) { if (priv->smstpcr_saved[reg].mask) - priv->smstpcr_saved[reg].val = + priv->smstpcr_saved[reg].val = priv->stbyctrl ? + readb(priv->base + STBCR(reg)) : readl(priv->base + SMSTPCR(reg)); } @@ -860,8 +861,9 @@ static int cpg_mssr_resume_noirq(struct device *dev) } if (!i) - dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n", - priv->base + SMSTPCR(reg), oldval & mask); + dev_warn(dev, "Failed to enable %s%u[0x%x]\n", + priv->stbyctrl ? "STB" : "SMSTP", reg, + oldval & mask); } return 0; diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c index ba9f00dc9740..7dd2e0b1a586 100644 --- a/drivers/clk/rockchip/clk-half-divider.c +++ b/drivers/clk/rockchip/clk-half-divider.c @@ -167,7 +167,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, unsigned long flags, spinlock_t *lock) { - struct clk *clk; + struct clk *clk = ERR_PTR(-ENOMEM); struct clk_mux *mux = NULL; struct clk_gate *gate = NULL; struct clk_divider *div = NULL; diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index d17cfb7a3ff4..47d6482dda9d 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -137,7 +137,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" }; PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" }; PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" }; -PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" }; +PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" }; PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" }; PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" }; PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" }; @@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" }; PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" }; -PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" }; - PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; @@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(24), 6, 10, DFLAGS, RK2928_CLKGATE_CON(2), 8, GFLAGS), - GATE(0, "cpll_gpu", "cpll", 0, + COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0, + RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS, RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "gpll_gpu", "gpll", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "hdmiphy_gpu", "hdmiphy", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "usb480m_gpu", "usb480m", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0, - RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS), COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS, @@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS), /* PD_GPU */ - GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS), - GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS), + GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS), + GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS), /* PD_BUS */ GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS), diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 51564fc23c63..f4086287bb71 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -927,7 +927,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, @@ -969,7 +969,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { 0), GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 27fd274e92f8..dfef5f0833db 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -540,7 +540,7 @@ static const struct samsung_div_clock exynos5800_div_clks[] __initconst = { static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", - GATE_BUS_TOP, 24, 0, 0), + GATE_BUS_TOP, 24, CLK_IS_CRITICAL, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), }; @@ -940,25 +940,25 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", - GATE_BUS_TOP, 5, 0, 0), + GATE_BUS_TOP, 5, CLK_IS_CRITICAL, 0), GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0), GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", - GATE_BUS_TOP, 8, 0, 0), + GATE_BUS_TOP, 8, CLK_IS_CRITICAL, 0), GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio", GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk266_isp", "mout_user_aclk266_isp", - GATE_BUS_TOP, 13, 0, 0), + GATE_BUS_TOP, 13, CLK_IS_CRITICAL, 0), GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_isp", "mout_user_aclk400_isp", - GATE_BUS_TOP, 16, 0, 0), + GATE_BUS_TOP, 16, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0), GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", @@ -1158,8 +1158,10 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE_IP_GSCL1, 3, 0, 0), GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333", GATE_IP_GSCL1, 4, 0, 0), - GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0), - GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0), + GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, + CLK_IS_CRITICAL, 0), + GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, + CLK_IS_CRITICAL, 0), GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333", GATE_IP_GSCL1, 16, 0, 0), GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 4b1aa9382ad2..6f29ecd0442e 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -1706,7 +1706,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = { GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric", ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric", - ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 6, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, 5, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c index 6282ee2f361c..a8901f90a61a 100644 --- a/drivers/clk/sifive/fu540-prci.c +++ b/drivers/clk/sifive/fu540-prci.c @@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev) struct __prci_data *pd; int r; - pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + pd = devm_kzalloc(dev, + struct_size(pd, hw_clks.hws, + ARRAY_SIZE(__prci_init_clocks)), + GFP_KERNEL); if (!pd) return -ENOMEM; diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c index c84d5bab7ac2..b95483bb6a5e 100644 --- a/drivers/clk/sirf/clk-atlas6.c +++ b/drivers/clk/sirf/clk-atlas6.c @@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np) for (i = pll1; i < maxclk; i++) { atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]); - BUG_ON(!atlas6_clks[i]); + BUG_ON(IS_ERR(atlas6_clks[i])); } clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu"); clk_register_clkdev(atlas6_clks[io], NULL, "io"); diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c index cd5df9103614..d62778884208 100644 --- a/drivers/clk/socfpga/clk-gate-a10.c +++ b/drivers/clk/socfpga/clk-gate-a10.c @@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node, if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) { pr_err("%s: failed to find altr,sys-mgr regmap!\n", __func__); + kfree(socfpga_clk); return; } } diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 43ecd507bf83..cf94a12459ea 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; val &= GENMASK(socfpgaclk->width - 1, 0); /* Check for GPIO_DB_CLK by its offset */ - if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) + if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) div = val + 1; else div = (1 << val); diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 4705eb544f01..8d7b1d0c4664 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* read VCO1 reg for numerator and denominator */ reg = readl(socfpgaclk->hw.reg); refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate / refdiv; + + vco_freq = parent_rate; + do_div(vco_freq, refdiv); /* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4); diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 993f3a73c71e..55d3b505b08c 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = { { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), 0, 0, 2, 0xB0, 1}, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, - ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, + ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2}, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3}, { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux, diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c index 640270f51aa5..eb8862752c2b 100644 --- a/drivers/clk/sprd/pll.c +++ b/drivers/clk/sprd/pll.c @@ -105,7 +105,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, cfg = kcalloc(regs_num, sizeof(*cfg), GFP_KERNEL); if (!cfg) - return -ENOMEM; + return parent_rate; for (i = 0; i < regs_num; i++) cfg[i] = sprd_pll_read(pll, i); diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 4413b6e04a8e..55873d4b7603 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -375,6 +375,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) break; } + flex_flags &= ~CLK_IS_CRITICAL; of_clk_detect_critical(np, i, &flex_flags); /* diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index 5f66bf879772..149cfde817cb 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -389,6 +389,7 @@ static struct clk_div_table ths_div_table[] = { { .val = 1, .div = 2 }, { .val = 2, .div = 4 }, { .val = 3, .div = 6 }, + { /* Sentinel */ }, }; static const char * const ths_parents[] = { "osc24M" }; static struct ccu_div ths_clk = { diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index d89353a3cdec..2f00f1b7b9c0 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -228,7 +228,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k", static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2", psi_ahb1_ahb2_parents, 0x510, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -237,19 +237,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k", "psi-ahb1-ahb2", "pll-periph0" }; static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -673,7 +673,7 @@ static struct ccu_mux hdmi_cec_clk = { .common = { .reg = 0xb10, - .features = CCU_FEATURE_VARIABLE_PREDIV, + .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec", hdmi_cec_parents, &ccu_mux_ops, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 6b636362379e..7e629a4493af 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -322,6 +322,7 @@ static struct clk_div_table ths_div_table[] = { { .val = 1, .div = 2 }, { .val = 2, .div = 4 }, { .val = 3, .div = 6 }, + { /* Sentinel */ }, }; static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M", 0x074, 0, 2, ths_div_table, BIT(31), 0); diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index fa4ecb915590..9d3a76604d94 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux, max_m = cmp->m.max ?: 1 << cmp->m.width; max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); - if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { + if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) { ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p); rate = *parent_rate / p / m; } else { diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 27201fd26e44..e1aa1fbac48a 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -90,7 +90,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req) * Round down the frequency to the closest multiple of either * 6 or 16 */ - u32 round_freq_6 = round_down(freq_mhz, 6); + u32 round_freq_6 = rounddown(freq_mhz, 6); u32 round_freq_16 = round_down(freq_mhz, 16); if (round_freq_6 > round_freq_16) diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c index f8688c2ddf1a..fdb46c5efc26 100644 --- a/drivers/clk/tegra/clk-dfll.c +++ b/drivers/clk/tegra/clk-dfll.c @@ -1801,13 +1801,13 @@ static int dfll_fetch_pwm_params(struct tegra_dfll *td) &td->reg_init_uV); if (!ret) { dev_err(td->dev, "couldn't get initialized voltage\n"); - return ret; + return -EINVAL; } ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period); if (!ret) { dev_err(td->dev, "couldn't get PWM period\n"); - return ret; + return -EINVAL; } td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1); diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index de466b4446da..0efcb200dde5 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -233,6 +233,7 @@ enum clk_id { tegra_clk_sdmmc4, tegra_clk_sdmmc4_8, tegra_clk_se, + tegra_clk_se_10, tegra_clk_soc_therm, tegra_clk_soc_therm_8, tegra_clk_sor0, diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 1583f5fc992f..80f640d9ea71 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -1569,9 +1569,6 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) unsigned long flags = 0; unsigned long input_rate; - if (clk_pll_is_enabled(hw)) - return 0; - input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 49b9f2f85bad..4dc11e1e61ba 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -636,7 +636,7 @@ static struct tegra_periph_init_data periph_clks[] = { INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8), INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9), INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se), - INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se), + INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10), INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8), INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8), INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03), diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c index bec3e008335f..5e044ba1ae36 100644 --- a/drivers/clk/tegra/clk-tegra-pmc.c +++ b/drivers/clk/tegra/clk-tegra-pmc.c @@ -49,16 +49,16 @@ struct pmc_clk_init_data { static DEFINE_SPINLOCK(clk_out_lock); -static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern1", +static const char *clk_out1_parents[] = { "osc", "osc_div2", + "osc_div4", "extern1", }; -static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern2", +static const char *clk_out2_parents[] = { "osc", "osc_div2", + "osc_div4", "extern2", }; -static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern3", +static const char *clk_out3_parents[] = { "osc", "osc_div2", + "osc_div4", "extern3", }; static struct pmc_clk_init_data pmc_clks[] = { diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 7b4c6a488527..501929d9f70e 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -1263,6 +1263,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 }, + { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 }, /* must be the last entry */ { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 }, }; diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index fdfb90058504..bb2f2836dab2 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -194,15 +194,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, if (err) return NULL; } else { - const char *base_name = "adpll"; - char *buf; - - buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 + - strlen(postfix), GFP_KERNEL); - if (!buf) - return NULL; - sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix); - name = buf; + name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", + d->pa, postfix); } return name; diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index a360d3109555..73f567d8022f 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c @@ -212,7 +212,7 @@ static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = { }; static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = { - { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" }, { 0 }, }; diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index 423a99b9f10c..8d0dea188a28 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c @@ -146,10 +146,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node) if (!omap2_clk_is_hw_omap(clk_hw)) { pr_warn("can't setup clkdm for basic clk %s\n", __clk_get_name(clk)); + clk_put(clk); continue; } to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name; omap2_init_clk_clkdm(clk_hw); + clk_put(clk); } } diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 6a89936ba03a..eaa43575cfa5 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -196,6 +196,7 @@ cleanup: if (!cclk->comp_clks[i]) continue; list_del(&cclk->comp_clks[i]->link); + kfree(cclk->comp_clks[i]->parent_names); kfree(cclk->comp_clks[i]); } diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 95e36ba64acc..8024c6d2b9e9 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -498,6 +498,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, { struct clk_init_data *init; struct fapll_synth *synth; + struct clk *clk = ERR_PTR(-ENOMEM); init = kzalloc(sizeof(*init), GFP_KERNEL); if (!init) @@ -520,13 +521,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, synth->hw.init = init; synth->clk_pll = pll_clk; - return clk_register(NULL, &synth->hw); + clk = clk_register(NULL, &synth->hw); + if (IS_ERR(clk)) { + pr_err("failed to register clock\n"); + goto free; + } + + return clk; free: kfree(synth); kfree(init); - return ERR_PTR(-ENOMEM); + return clk; } static void __init ti_fapll_setup(struct device_node *node) diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c index c0f4631601e2..babca032352a 100644 --- a/drivers/clk/uniphier/clk-uniphier-mux.c +++ b/drivers/clk/uniphier/clk-uniphier-mux.c @@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index) static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw) { struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw); - int num_parents = clk_hw_get_num_parents(hw); + unsigned int num_parents = clk_hw_get_num_parents(hw); int ret; unsigned int val; - u8 i; + unsigned int i; ret = regmap_read(mux->regmap, mux->reg, &val); if (ret) diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index a11f93ecbf34..6f057ab9df03 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -558,7 +558,7 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, { int j; u32 num_nodes, clk_dev_id; - char *clk_out = NULL; + char *clk_out[MAX_NODES]; struct clock_topology *nodes; struct clk_hw *hw = NULL; @@ -572,16 +572,16 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, * Intermediate clock names are postfixed with type of clock. */ if (j != (num_nodes - 1)) { - clk_out = kasprintf(GFP_KERNEL, "%s%s", clk_name, + clk_out[j] = kasprintf(GFP_KERNEL, "%s%s", clk_name, clk_type_postfix[nodes[j].type]); } else { - clk_out = kasprintf(GFP_KERNEL, "%s", clk_name); + clk_out[j] = kasprintf(GFP_KERNEL, "%s", clk_name); } if (!clk_topology[nodes[j].type]) continue; - hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id, + hw = (*clk_topology[nodes[j].type])(clk_out[j], clk_dev_id, parent_names, num_parents, &nodes[j]); @@ -590,9 +590,12 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, __func__, clk_dev_id, clk_name, PTR_ERR(hw)); - parent_names[0] = clk_out; + parent_names[0] = clk_out[j]; } - kfree(clk_out); + + for (j = 0; j < num_nodes; j++) + kfree(clk_out[j]); + return hw; } diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c index a541397a172c..18fee827602a 100644 --- a/drivers/clk/zynqmp/pll.c +++ b/drivers/clk/zynqmp/pll.c @@ -103,9 +103,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate, /* Enable the fractional mode if needed */ rate_div = (rate * FRAC_DIV) / *prate; f = rate_div % FRAC_DIV; - zynqmp_pll_set_mode(hw, !!f); - - if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) { + if (f) { if (rate > PS_PLL_VCO_MAX) { fbdiv = rate / PS_PLL_VCO_MAX; rate = rate / (fbdiv + 1); @@ -179,10 +177,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate, int ret; const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); - if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) { - rate_div = (rate * FRAC_DIV) / parent_rate; + rate_div = (rate * FRAC_DIV) / parent_rate; + f = rate_div % FRAC_DIV; + zynqmp_pll_set_mode(hw, !!f); + + if (f) { m = rate_div / FRAC_DIV; - f = rate_div % FRAC_DIV; m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX)); rate = parent_rate * m; frac = (parent_rate * f) / FRAC_DIV; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index f35a53ce8988..3bb5625504e2 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -79,6 +79,7 @@ config IXP4XX_TIMER bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST depends on HAS_IOMEM select CLKSRC_MMIO + select TIMER_OF if OF help Enables support for the Intel XScale IXP4xx SoC timer. diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 9a5464c625b4..39cdda2c9a98 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -69,7 +69,7 @@ static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI; static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; static bool arch_counter_suspend_stop; -static bool vdso_default = true; +static enum vdso_arch_clockmode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; static cpumask_t evtstrm_available = CPU_MASK_NONE; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); @@ -392,10 +392,10 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; if (access == ARCH_TIMER_PHYS_ACCESS) { - cval = evt + arch_counter_get_cntpct(); + cval = evt + arch_counter_get_cntpct_stable(); write_sysreg(cval, cntp_cval_el0); } else { - cval = evt + arch_counter_get_cntvct(); + cval = evt + arch_counter_get_cntvct_stable(); write_sysreg(cval, cntv_cval_el0); } @@ -476,6 +476,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .set_next_event_virt = erratum_set_next_event_tval_virt, }, #endif +#ifdef CONFIG_ARM64_ERRATUM_1418040 + { + .match_type = ate_match_local_cap_id, + .id = (void *)ARM64_WORKAROUND_1418040, + .desc = "ARM erratum 1418040", + .disable_compat_vdso = true, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, @@ -560,8 +568,11 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa * change both the default value and the vdso itself. */ if (wa->read_cntvct_el0) { - clocksource_counter.archdata.vdso_direct = false; - vdso_default = false; + clocksource_counter.archdata.clock_mode = VDSO_CLOCKMODE_NONE; + vdso_default = VDSO_CLOCKMODE_NONE; + } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { + vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; + clocksource_counter.archdata.clock_mode = vdso_default; } } @@ -807,15 +818,24 @@ static void arch_timer_evtstrm_enable(int divider) static void arch_timer_configure_evtstream(void) { - int evt_stream_div, pos; + int evt_stream_div, lsb; + + /* + * As the event stream can at most be generated at half the frequency + * of the counter, use half the frequency when computing the divider. + */ + evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; + + /* + * Find the closest power of two to the divisor. If the adjacent bit + * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). + */ + lsb = fls(evt_stream_div) - 1; + if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) + lsb++; - /* Find the closest power of two to the divisor */ - evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; - pos = fls(evt_stream_div); - if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) - pos--; /* enable event stream */ - arch_timer_evtstrm_enable(min(pos, 15)); + arch_timer_evtstrm_enable(max(0, min(lsb, 15))); } static void arch_counter_set_user_access(void) @@ -979,7 +999,7 @@ static void __init arch_counter_register(unsigned type) } arch_timer_read_counter = rd; - clocksource_counter.archdata.vdso_direct = vdso_default; + clocksource_counter.archdata.clock_mode = vdso_default; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; } diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 654766538f93..10ce69548f1b 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -222,7 +222,8 @@ static int apbt_next_event(unsigned long delta, /** * dw_apb_clockevent_init() - use an APB timer as a clock_event_device * - * @cpu: The CPU the events will be targeted at. + * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation + * isn't required. * @name: The name used for the timer and the IRQ for it. * @rating: The rating to give the timer. * @base: I/O base for the timer registers. @@ -257,7 +258,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, dw_ced->ced.max_delta_ticks = 0x7fffffff; dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); dw_ced->ced.min_delta_ticks = 5000; - dw_ced->ced.cpumask = cpumask_of(cpu); + dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu); dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; dw_ced->ced.set_state_shutdown = apbt_shutdown; diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 8c28b127759f..6921b91b61ef 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -147,10 +147,6 @@ static int num_called; static int __init dw_apb_timer_init(struct device_node *timer) { switch (num_called) { - case 0: - pr_debug("%s: found clockevent timer\n", __func__); - add_clockevent(timer); - break; case 1: pr_debug("%s: found clocksource timer\n", __func__); add_clocksource(timer); @@ -161,6 +157,8 @@ static int __init dw_apb_timer_init(struct device_node *timer) #endif break; default: + pr_debug("%s: found clockevent timer\n", __func__); + add_clockevent(timer); break; } diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index 1d740a8c42ab..47114c2a7cb5 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c @@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node) return PTR_ERR(clk); } - ret = ENXIO; + ret = -ENXIO; base = of_iomap(node, 0); if (!base) { pr_err("failed to map registers for clockevent\n"); diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index f6ddae30933f..dae8c0c2e606 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -138,10 +138,7 @@ static void mxs_irq_clear(char *state) /* Clear pending interrupt */ timrot_irq_acknowledge(); - -#ifdef DEBUG - pr_info("%s: changing mode to %s\n", __func__, state) -#endif /* DEBUG */ + pr_debug("%s: changing mode to %s\n", __func__, state); } static int mxs_shutdown(struct clock_event_device *evt) diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c index 88fe2e9ba9a3..160bc6597de5 100644 --- a/drivers/clocksource/timer-cadence-ttc.c +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -411,10 +411,8 @@ static int __init ttc_setup_clockevent(struct clk *clk, ttcce->ttc.clk = clk; err = clk_prepare_enable(ttcce->ttc.clk); - if (err) { - kfree(ttcce); - return err; - } + if (err) + goto out_kfree; ttcce->ttc.clk_rate_change_nb.notifier_call = ttc_rate_change_clockevent_cb; @@ -424,7 +422,7 @@ static int __init ttc_setup_clockevent(struct clk *clk, &ttcce->ttc.clk_rate_change_nb); if (err) { pr_warn("Unable to register clock notifier.\n"); - return err; + goto out_kfree; } ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); @@ -453,15 +451,17 @@ static int __init ttc_setup_clockevent(struct clk *clk, err = request_irq(irq, ttc_clock_event_interrupt, IRQF_TIMER, ttcce->ce.name, ttcce); - if (err) { - kfree(ttcce); - return err; - } + if (err) + goto out_kfree; clockevents_config_and_register(&ttcce->ce, ttcce->ttc.freq / PRESCALE, 1, 0xfffe); return 0; + +out_kfree: + kfree(ttcce); + return err; } /** diff --git a/drivers/clocksource/timer-gx6605s.c b/drivers/clocksource/timer-gx6605s.c index 80d0939d040b..8d386adbe800 100644 --- a/drivers/clocksource/timer-gx6605s.c +++ b/drivers/clocksource/timer-gx6605s.c @@ -28,6 +28,7 @@ static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev) void __iomem *base = timer_of_base(to_timer_of(ce)); writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS); + writel_relaxed(0, base + TIMER_INI); ce->event_handler(ce); diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c index 7d487107e3cd..32b2563e2ad1 100644 --- a/drivers/clocksource/timer-orion.c +++ b/drivers/clocksource/timer-orion.c @@ -149,7 +149,8 @@ static int __init orion_timer_init(struct device_node *np) irq = irq_of_parse_and_map(np, 1); if (irq <= 0) { pr_err("%pOFn: unable to parse timer1 irq\n", np); - return -EINVAL; + ret = -EINVAL; + goto out_unprep_clk; } rate = clk_get_rate(clk); @@ -166,7 +167,7 @@ static int __init orion_timer_init(struct device_node *np) clocksource_mmio_readl_down); if (ret) { pr_err("Failed to initialize mmio timer\n"); - return ret; + goto out_unprep_clk; } sched_clock_register(orion_read_sched_clock, 32, rate); @@ -175,7 +176,7 @@ static int __init orion_timer_init(struct device_node *np) ret = setup_irq(irq, &orion_clkevt_irq); if (ret) { pr_err("%pOFn: unable to setup irq\n", np); - return ret; + goto out_unprep_clk; } ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; @@ -188,5 +189,9 @@ static int __init orion_timer_init(struct device_node *np) orion_delay_timer_init(rate); return 0; + +out_unprep_clk: + clk_disable_unprepare(clk); + return ret; } TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 00b113f4b958..5c23a9a56921 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -42,6 +42,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses"); * @base: base port address of the IIO device */ struct quad8_iio { + struct mutex lock; struct counter_device counter; unsigned int preset[QUAD8_NUM_COUNTERS]; unsigned int count_mode[QUAD8_NUM_COUNTERS]; @@ -116,6 +117,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev, /* Borrow XOR Carry effectively doubles count range */ *val = (borrow ^ carry) << 24; + mutex_lock(&priv->lock); + /* Reset Byte Pointer; transfer Counter to Output Latch */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, base_offset + 1); @@ -123,6 +126,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev, for (i = 0; i < 3; i++) *val |= (unsigned int)inb(base_offset) << (8 * i); + mutex_unlock(&priv->lock); + return IIO_VAL_INT; case IIO_CHAN_INFO_ENABLE: *val = priv->ab_enable[chan->channel]; @@ -153,6 +158,8 @@ static int quad8_write_raw(struct iio_dev *indio_dev, if ((unsigned int)val > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + /* Reset Byte Pointer */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); @@ -176,12 +183,16 @@ static int quad8_write_raw(struct iio_dev *indio_dev, /* Reset Error flag */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; case IIO_CHAN_INFO_ENABLE: /* only boolean values accepted */ if (val < 0 || val > 1) return -EINVAL; + mutex_lock(&priv->lock); + priv->ab_enable[chan->channel] = val; ior_cfg = val | priv->preset_enable[chan->channel] << 1; @@ -189,11 +200,18 @@ static int quad8_write_raw(struct iio_dev *indio_dev, /* Load I/O control configuration */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; case IIO_CHAN_INFO_SCALE: + mutex_lock(&priv->lock); + /* Quadrature scaling only available in quadrature mode */ - if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1)) + if (!priv->quadrature_mode[chan->channel] && + (val2 || val != 1)) { + mutex_unlock(&priv->lock); return -EINVAL; + } /* Only three gain states (1, 0.5, 0.25) */ if (val == 1 && !val2) @@ -207,11 +225,15 @@ static int quad8_write_raw(struct iio_dev *indio_dev, priv->quadrature_scale[chan->channel] = 2; break; default: + mutex_unlock(&priv->lock); return -EINVAL; } - else + else { + mutex_unlock(&priv->lock); return -EINVAL; + } + mutex_unlock(&priv->lock); return 0; } @@ -248,6 +270,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private, if (preset > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + priv->preset[chan->channel] = preset; /* Reset Byte Pointer */ @@ -257,6 +281,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private, for (i = 0; i < 3; i++) outb(preset >> (8 * i), base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -286,6 +312,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev, /* Preset enable is active low in Input/Output Control register */ preset_enable = !preset_enable; + mutex_lock(&priv->lock); + priv->preset_enable[chan->channel] = preset_enable; ior_cfg = priv->ab_enable[chan->channel] | @@ -294,6 +322,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev, /* Load I/O control configuration to Input / Output Control Register */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -351,6 +381,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev, unsigned int mode_cfg = cnt_mode << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + mutex_lock(&priv->lock); + priv->count_mode[chan->channel] = cnt_mode; /* Add quadrature mode configuration */ @@ -360,6 +392,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -387,19 +421,26 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int synchronous_mode) { struct quad8_iio *const priv = iio_priv(indio_dev); - const unsigned int idr_cfg = synchronous_mode | - priv->index_polarity[chan->channel] << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int idr_cfg = synchronous_mode; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->index_polarity[chan->channel] << 1; /* Index function must be non-synchronous in non-quadrature mode */ - if (synchronous_mode && !priv->quadrature_mode[chan->channel]) + if (synchronous_mode && !priv->quadrature_mode[chan->channel]) { + mutex_unlock(&priv->lock); return -EINVAL; + } priv->synchronous_mode[chan->channel] = synchronous_mode; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -427,8 +468,12 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int quadrature_mode) { struct quad8_iio *const priv = iio_priv(indio_dev); - unsigned int mode_cfg = priv->count_mode[chan->channel] << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int mode_cfg; + + mutex_lock(&priv->lock); + + mode_cfg = priv->count_mode[chan->channel] << 1; if (quadrature_mode) mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3; @@ -446,6 +491,8 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -473,15 +520,20 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int index_polarity) { struct quad8_iio *const priv = iio_priv(indio_dev); - const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] | - index_polarity << 1; const int base_offset = priv->base + 2 * chan->channel + 1; + unsigned int idr_cfg = index_polarity << 1; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->synchronous_mode[chan->channel]; priv->index_polarity[chan->channel] = index_polarity; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -585,7 +637,7 @@ static int quad8_signal_read(struct counter_device *counter, static int quad8_count_read(struct counter_device *counter, struct counter_count *count, struct counter_count_read_value *val) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int base_offset = priv->base + 2 * count->id; unsigned int flags; unsigned int borrow; @@ -600,6 +652,8 @@ static int quad8_count_read(struct counter_device *counter, /* Borrow XOR Carry effectively doubles count range */ position = (unsigned long)(borrow ^ carry) << 24; + mutex_lock(&priv->lock); + /* Reset Byte Pointer; transfer Counter to Output Latch */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, base_offset + 1); @@ -609,13 +663,15 @@ static int quad8_count_read(struct counter_device *counter, counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position); + mutex_unlock(&priv->lock); + return 0; } static int quad8_count_write(struct counter_device *counter, struct counter_count *count, struct counter_count_write_value *val) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int base_offset = priv->base + 2 * count->id; int err; unsigned long position; @@ -630,6 +686,8 @@ static int quad8_count_write(struct counter_device *counter, if (position > 0xFFFFFF) return -EINVAL; + mutex_lock(&priv->lock); + /* Reset Byte Pointer */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); @@ -653,6 +711,8 @@ static int quad8_count_write(struct counter_device *counter, /* Reset Error flag */ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); + mutex_unlock(&priv->lock); + return 0; } @@ -673,13 +733,13 @@ static enum counter_count_function quad8_count_functions_list[] = { static int quad8_function_get(struct counter_device *counter, struct counter_count *count, size_t *function) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const int id = count->id; - const unsigned int quadrature_mode = priv->quadrature_mode[id]; - const unsigned int scale = priv->quadrature_scale[id]; - if (quadrature_mode) - switch (scale) { + mutex_lock(&priv->lock); + + if (priv->quadrature_mode[id]) + switch (priv->quadrature_scale[id]) { case 0: *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1; break; @@ -693,6 +753,8 @@ static int quad8_function_get(struct counter_device *counter, else *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION; + mutex_unlock(&priv->lock); + return 0; } @@ -703,10 +765,15 @@ static int quad8_function_set(struct counter_device *counter, const int id = count->id; unsigned int *const quadrature_mode = priv->quadrature_mode + id; unsigned int *const scale = priv->quadrature_scale + id; - unsigned int mode_cfg = priv->count_mode[id] << 1; unsigned int *const synchronous_mode = priv->synchronous_mode + id; - const unsigned int idr_cfg = priv->index_polarity[id] << 1; const int base_offset = priv->base + 2 * id + 1; + unsigned int mode_cfg; + unsigned int idr_cfg; + + mutex_lock(&priv->lock); + + mode_cfg = priv->count_mode[id] << 1; + idr_cfg = priv->index_polarity[id] << 1; if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) { *quadrature_mode = 0; @@ -742,6 +809,8 @@ static int quad8_function_set(struct counter_device *counter, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -858,15 +927,20 @@ static int quad8_index_polarity_set(struct counter_device *counter, { struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id - 16; - const unsigned int idr_cfg = priv->synchronous_mode[channel_id] | - index_polarity << 1; const int base_offset = priv->base + 2 * channel_id + 1; + unsigned int idr_cfg = index_polarity << 1; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->synchronous_mode[channel_id]; priv->index_polarity[channel_id] = index_polarity; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -893,19 +967,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter, { struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id - 16; - const unsigned int idr_cfg = synchronous_mode | - priv->index_polarity[channel_id] << 1; const int base_offset = priv->base + 2 * channel_id + 1; + unsigned int idr_cfg = synchronous_mode; + + mutex_lock(&priv->lock); + + idr_cfg |= priv->index_polarity[channel_id] << 1; /* Index function must be non-synchronous in non-quadrature mode */ - if (synchronous_mode && !priv->quadrature_mode[channel_id]) + if (synchronous_mode && !priv->quadrature_mode[channel_id]) { + mutex_unlock(&priv->lock); return -EINVAL; + } priv->synchronous_mode[channel_id] = synchronous_mode; /* Load Index Control configuration to Index Control Register */ outb(QUAD8_CTR_IDR | idr_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -970,6 +1051,8 @@ static int quad8_count_mode_set(struct counter_device *counter, break; } + mutex_lock(&priv->lock); + priv->count_mode[count->id] = cnt_mode; /* Set count mode configuration value */ @@ -982,6 +1065,8 @@ static int quad8_count_mode_set(struct counter_device *counter, /* Load mode configuration to Counter Mode Register */ outb(QUAD8_CTR_CMR | mode_cfg, base_offset); + mutex_unlock(&priv->lock); + return 0; } @@ -1023,6 +1108,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter, if (err) return err; + mutex_lock(&priv->lock); + priv->ab_enable[count->id] = ab_enable; ior_cfg = ab_enable | priv->preset_enable[count->id] << 1; @@ -1030,6 +1117,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter, /* Load I/O control configuration */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); + mutex_unlock(&priv->lock); + return len; } @@ -1058,14 +1147,28 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter, return sprintf(buf, "%u\n", priv->preset[count->id]); } +static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id, + unsigned int preset) +{ + const unsigned int base_offset = quad8iio->base + 2 * id; + int i; + + quad8iio->preset[id] = preset; + + /* Reset Byte Pointer */ + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + + /* Set Preset Register */ + for (i = 0; i < 3; i++) + outb(preset >> (8 * i), base_offset); +} + static ssize_t quad8_count_preset_write(struct counter_device *counter, struct counter_count *count, void *private, const char *buf, size_t len) { struct quad8_iio *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id; unsigned int preset; int ret; - int i; ret = kstrtouint(buf, 0, &preset); if (ret) @@ -1075,14 +1178,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter, if (preset > 0xFFFFFF) return -EINVAL; - priv->preset[count->id] = preset; + mutex_lock(&priv->lock); - /* Reset Byte Pointer */ - outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); + quad8_preset_register_set(priv, count->id, preset); - /* Set Preset Register */ - for (i = 0; i < 3; i++) - outb(preset >> (8 * i), base_offset); + mutex_unlock(&priv->lock); return len; } @@ -1090,15 +1190,20 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter, static ssize_t quad8_count_ceiling_read(struct counter_device *counter, struct counter_count *count, void *private, char *buf) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; + + mutex_lock(&priv->lock); /* Range Limit and Modulo-N count modes use preset value as ceiling */ switch (priv->count_mode[count->id]) { case 1: case 3: - return quad8_count_preset_read(counter, count, private, buf); + mutex_unlock(&priv->lock); + return sprintf(buf, "%u\n", priv->preset[count->id]); } + mutex_unlock(&priv->lock); + /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ return sprintf(buf, "33554431\n"); } @@ -1107,15 +1212,29 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter, struct counter_count *count, void *private, const char *buf, size_t len) { struct quad8_iio *const priv = counter->priv; + unsigned int ceiling; + int ret; + + ret = kstrtouint(buf, 0, &ceiling); + if (ret) + return ret; + + /* Only 24-bit values are supported */ + if (ceiling > 0xFFFFFF) + return -EINVAL; + + mutex_lock(&priv->lock); /* Range Limit and Modulo-N count modes use preset value as ceiling */ switch (priv->count_mode[count->id]) { case 1: case 3: - return quad8_count_preset_write(counter, count, private, buf, - len); + quad8_preset_register_set(priv, count->id, ceiling); + break; } + mutex_unlock(&priv->lock); + return len; } @@ -1143,6 +1262,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, /* Preset enable is active low in Input/Output Control register */ preset_enable = !preset_enable; + mutex_lock(&priv->lock); + priv->preset_enable[count->id] = preset_enable; ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1; @@ -1150,6 +1271,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, /* Load I/O control configuration to Input / Output Control Register */ outb(QUAD8_CTR_IOR | ior_cfg, base_offset); + mutex_unlock(&priv->lock); + return len; } @@ -1320,6 +1443,9 @@ static int quad8_probe(struct device *dev, unsigned int id) quad8iio->counter.priv = quad8iio; quad8iio->base = base[id]; + /* Initialize mutex */ + mutex_init(&quad8iio->lock); + /* Reset all counters and disable interrupt function */ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP); /* Set initial configuration for all counters */ diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index 644ba18a72ad..889ea7a6ed63 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -24,7 +24,7 @@ struct stm32_timer_cnt { struct counter_device counter; struct regmap *regmap; struct clk *clk; - u32 ceiling; + u32 max_arr; }; /** @@ -35,13 +35,14 @@ struct stm32_timer_cnt { * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges */ enum stm32_count_function { - STM32_COUNT_SLAVE_MODE_DISABLED = -1, + STM32_COUNT_SLAVE_MODE_DISABLED, STM32_COUNT_ENCODER_MODE_1, STM32_COUNT_ENCODER_MODE_2, STM32_COUNT_ENCODER_MODE_3, }; static enum counter_count_function stm32_count_functions[] = { + [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE, [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4, @@ -65,14 +66,15 @@ static int stm32_count_write(struct counter_device *counter, struct counter_count_write_value *val) { struct stm32_timer_cnt *const priv = counter->priv; - u32 cnt; + u32 cnt, ceiling; int err; err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val); if (err) return err; - if (cnt > priv->ceiling) + regmap_read(priv->regmap, TIM_ARR, &ceiling); + if (cnt > ceiling) return -EINVAL; return regmap_write(priv->regmap, TIM_CNT, cnt); @@ -88,6 +90,9 @@ static int stm32_count_function_get(struct counter_device *counter, regmap_read(priv->regmap, TIM_SMCR, &smcr); switch (smcr & TIM_SMCR_SMS) { + case 0: + *function = STM32_COUNT_SLAVE_MODE_DISABLED; + return 0; case 1: *function = STM32_COUNT_ENCODER_MODE_1; return 0; @@ -97,9 +102,9 @@ static int stm32_count_function_get(struct counter_device *counter, case 3: *function = STM32_COUNT_ENCODER_MODE_3; return 0; + default: + return -EINVAL; } - - return -EINVAL; } static int stm32_count_function_set(struct counter_device *counter, @@ -110,6 +115,9 @@ static int stm32_count_function_set(struct counter_device *counter, u32 cr1, sms; switch (function) { + case STM32_COUNT_SLAVE_MODE_DISABLED: + sms = 0; + break; case STM32_COUNT_ENCODER_MODE_1: sms = 1; break; @@ -120,8 +128,7 @@ static int stm32_count_function_set(struct counter_device *counter, sms = 3; break; default: - sms = 0; - break; + return -EINVAL; } /* Store enable status */ @@ -129,10 +136,6 @@ static int stm32_count_function_set(struct counter_device *counter, regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); - /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); - regmap_write(priv->regmap, TIM_ARR, priv->ceiling); - regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); /* Make sure that registers are updated */ @@ -183,11 +186,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter, if (ret) return ret; + if (ceiling > priv->max_arr) + return -ERANGE; + /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); regmap_write(priv->regmap, TIM_ARR, ceiling); - priv->ceiling = ceiling; return len; } @@ -269,31 +274,36 @@ static int stm32_action_get(struct counter_device *counter, size_t function; int err; - /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */ - *action = STM32_SYNAPSE_ACTION_NONE; - err = stm32_count_function_get(counter, count, &function); if (err) - return 0; + return err; switch (function) { + case STM32_COUNT_SLAVE_MODE_DISABLED: + /* counts on internal clock when CEN=1 */ + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_1: /* counts up/down on TI1FP1 edge depending on TI2FP2 level */ if (synapse->signal->id == count->synapses[0].signal->id) *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + else + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_2: /* counts up/down on TI2FP2 edge depending on TI1FP1 level */ if (synapse->signal->id == count->synapses[1].signal->id) *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + else + *action = STM32_SYNAPSE_ACTION_NONE; + return 0; case STM32_COUNT_ENCODER_MODE_3: /* counts up/down on both TI1FP1 and TI2FP2 edges */ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; - break; + return 0; + default: + return -EINVAL; } - - return 0; } static const struct counter_ops stm32_timer_cnt_ops = { @@ -354,7 +364,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) priv->regmap = ddata->regmap; priv->clk = ddata->clk; - priv->ceiling = ddata->max_arr; + priv->max_arr = ddata->max_arr; priv->counter.name = dev_name(dev); priv->counter.parent = dev; diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index a905796f7f85..25f11e9ec358 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -41,6 +41,7 @@ config ARM_ARMADA_37XX_CPUFREQ config ARM_ARMADA_8K_CPUFREQ tristate "Armada 8K CPUFreq driver" depends on ARCH_MVEBU && CPUFREQ_DT + select ARMADA_AP_CPU_CLK help This enables the CPUFreq driver support for Marvell Armada8k SOCs. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index d6f7df33ab8c..4195834a4591 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -688,7 +688,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); } - if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { + if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 && + !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); cpumask_copy(data->freqdomain_cpus, diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index aa0f06dec959..e4782f562e7a 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -25,6 +25,10 @@ #include "cpufreq-dt.h" +/* Clk register set */ +#define ARMADA_37XX_CLK_TBG_SEL 0 +#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF 22 + /* Power management in North Bridge register set */ #define ARMADA_37XX_NB_L0L1 0x18 #define ARMADA_37XX_NB_L2L3 0x1C @@ -69,6 +73,8 @@ #define LOAD_LEVEL_NR 4 #define MIN_VOLT_MV 1000 +#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108 +#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155 /* AVS value for the corresponding voltage (in mV) */ static int avs_map[] = { @@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq) * will be configured then the DVFS will be enabled. */ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, - struct clk *clk, u8 *divider) + struct regmap *clk_base, u8 *divider) { + u32 cpu_tbg_sel; int load_lvl; - struct clk *parent; + + /* Determine to which TBG clock is CPU connected */ + regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel); + cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF; + cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK; for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) { unsigned int reg, mask, val, offset = 0; @@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, mask = (ARMADA_37XX_NB_CLK_SEL_MASK << ARMADA_37XX_NB_CLK_SEL_OFF); + /* Set TBG index, for all levels we use the same TBG */ + val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF; + mask = (ARMADA_37XX_NB_TBG_SEL_MASK + << ARMADA_37XX_NB_TBG_SEL_OFF); + /* * Set cpu divider based on the pre-computed array in * order to have balanced step. @@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, regmap_update_bits(base, reg, mask, val); } - - /* - * Set cpu clock source, for all the level we keep the same - * clock source that the one already configured. For this one - * we need to use the clock framework - */ - parent = clk_get_parent(clk); - clk_set_parent(clk, parent); } /* @@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm) * - L2 & L3 voltage should be about 150mv smaller than L0 voltage. * This function calculates L1 & L2 & L3 AVS values dynamically based * on L0 voltage and fill all AVS values to the AVS value table. + * When base CPU frequency is 1000 or 1200 MHz then there is additional + * minimal avs value for load L1. */ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, struct armada_37xx_dvfs *dvfs) @@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) dvfs->avs[load_level] = avs_min; + /* + * Set the avs values for load L0 and L1 when base CPU frequency + * is 1000/1200 MHz to its typical initial values according to + * the Armada 3700 Hardware Specifications. + */ + if (dvfs->cpu_freq_max >= 1000*1000*1000) { + if (dvfs->cpu_freq_max >= 1200*1000*1000) + avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ); + else + avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ); + dvfs->avs[0] = dvfs->avs[1] = avs_min; + } + return; } @@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base, target_vm = avs_map[l0_vdd_min] - 150; target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV; dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm); + + /* + * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz, + * otherwise the CPU gets stuck when switching from load L1 to load L0. + * Also ensure that avs value for load L1 is not higher than for L0. + */ + if (dvfs->cpu_freq_max >= 1000*1000*1000) { + u32 avs_min_l1; + + if (dvfs->cpu_freq_max >= 1200*1000*1000) + avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ); + else + avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ); + + if (avs_min_l1 > dvfs->avs[0]) + avs_min_l1 = dvfs->avs[0]; + + if (dvfs->avs[1] < avs_min_l1) + dvfs->avs[1] = avs_min_l1; + } } static void __init armada37xx_cpufreq_avs_setup(struct regmap *base, @@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void) struct platform_device *pdev; unsigned long freq; unsigned int cur_frequency, base_frequency; - struct regmap *nb_pm_base, *avs_base; + struct regmap *nb_clk_base, *nb_pm_base, *avs_base; struct device *cpu_dev; int load_lvl, ret; struct clk *clk, *parent; + nb_clk_base = + syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb"); + if (IS_ERR(nb_clk_base)) + return -ENODEV; + nb_pm_base = syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm"); @@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void) return -EINVAL; } - dvfs = armada_37xx_cpu_freq_info_get(cur_frequency); + dvfs = armada_37xx_cpu_freq_info_get(base_frequency); if (!dvfs) { clk_put(clk); return -EINVAL; @@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void) armada37xx_cpufreq_avs_configure(avs_base, dvfs); armada37xx_cpufreq_avs_setup(avs_base, dvfs); - armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider); + armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider); clk_put(clk); for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; @@ -456,6 +504,7 @@ static int __init armada37xx_cpufreq_driver_init(void) /* Now that everything is setup, enable the DVFS at hardware level */ armada37xx_cpufreq_enable_dvfs(nb_pm_base); + memset(&pdata, 0, sizeof(pdata)); pdata.suspend = armada37xx_cpufreq_suspend; pdata.resume = armada37xx_cpufreq_resume; @@ -472,7 +521,7 @@ disable_dvfs: remove_opp: /* clean-up the already added opp before leaving */ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) { - freq = cur_frequency / dvfs->divider[load_lvl]; + freq = base_frequency / dvfs->divider[load_lvl]; dev_pm_opp_remove(cpu_dev, freq); } @@ -483,6 +532,12 @@ remove_opp: /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */ late_initcall(armada37xx_cpufreq_driver_init); +static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = { + { .compatible = "marvell,armada-3700-nb-pm" }, + { }, +}; +MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match); + MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); MODULE_DESCRIPTION("Armada 37xx cpufreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 39e34f5066d3..b0fc5e84f857 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -204,6 +204,12 @@ static void __exit armada_8k_cpufreq_exit(void) } module_exit(armada_8k_cpufreq_exit); +static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = { + { .compatible = "marvell,ap806-cpu-clock" }, + { }, +}; +MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match); + MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>"); MODULE_DESCRIPTION("Armada 8K cpufreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 77b0e5d0fb13..a3c82f530d60 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -566,6 +566,16 @@ unmap_base: return ret; } +static void brcm_avs_prepare_uninit(struct platform_device *pdev) +{ + struct private_data *priv; + + priv = platform_get_drvdata(pdev); + + iounmap(priv->avs_intr_base); + iounmap(priv->base); +} + static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; @@ -701,21 +711,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev) brcm_avs_driver.driver_data = pdev; - return cpufreq_register_driver(&brcm_avs_driver); + ret = cpufreq_register_driver(&brcm_avs_driver); + if (ret) + brcm_avs_prepare_uninit(pdev); + + return ret; } static int brcm_avs_cpufreq_remove(struct platform_device *pdev) { - struct private_data *priv; int ret; ret = cpufreq_unregister_driver(&brcm_avs_driver); - if (ret) - return ret; + WARN_ON(ret); - priv = platform_get_drvdata(pdev); - iounmap(priv->base); - iounmap(priv->avs_intr_base); + brcm_avs_prepare_uninit(pdev); return 0; } diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index bca8d1f47fd2..1200842c3da4 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = { static const struct of_device_id blacklist[] __initconst = { { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "arm,vexpress", }, + { .compatible = "calxeda,highbank", }, { .compatible = "calxeda,ecx-2000", }, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 35f8e098e9fa..b8b7d3567d94 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -616,6 +616,24 @@ static struct cpufreq_governor *find_governor(const char *str_governor) return NULL; } +static struct cpufreq_governor *get_governor(const char *str_governor) +{ + struct cpufreq_governor *t; + + mutex_lock(&cpufreq_governor_mutex); + t = find_governor(str_governor); + if (!t) + goto unlock; + + if (!try_module_get(t->owner)) + t = NULL; + +unlock: + mutex_unlock(&cpufreq_governor_mutex); + + return t; +} + static unsigned int cpufreq_parse_policy(char *str_governor) { if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) @@ -635,28 +653,14 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor) { struct cpufreq_governor *t; - mutex_lock(&cpufreq_governor_mutex); + t = get_governor(str_governor); + if (t) + return t; - t = find_governor(str_governor); - if (!t) { - int ret; + if (request_module("cpufreq_%s", str_governor)) + return NULL; - mutex_unlock(&cpufreq_governor_mutex); - - ret = request_module("cpufreq_%s", str_governor); - if (ret) - return NULL; - - mutex_lock(&cpufreq_governor_mutex); - - t = find_governor(str_governor); - } - if (t && !try_module_get(t->owner)) - t = NULL; - - mutex_unlock(&cpufreq_governor_mutex); - - return t; + return get_governor(str_governor); } /** @@ -810,12 +814,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, goto out; } + mutex_lock(&cpufreq_governor_mutex); for_each_governor(t) { if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) - goto out; + break; i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); } + mutex_unlock(&cpufreq_governor_mutex); out: i += sprintf(&buf[i], "\n"); return i; @@ -1053,15 +1059,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) struct cpufreq_governor *def_gov = cpufreq_default_governor(); struct cpufreq_governor *gov = NULL; unsigned int pol = CPUFREQ_POLICY_UNKNOWN; + int ret; if (has_target()) { /* Update policy governor to the one used before hotplug. */ - gov = find_governor(policy->last_governor); + gov = get_governor(policy->last_governor); if (gov) { pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, policy->cpu); } else if (def_gov) { gov = def_gov; + __module_get(gov->owner); } else { return -ENODATA; } @@ -1084,7 +1092,11 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) return -ENODATA; } - return cpufreq_set_policy(policy, gov, pol); + ret = cpufreq_set_policy(policy, gov, pol); + if (gov) + module_put(gov->owner); + + return ret; } static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) @@ -2494,6 +2506,8 @@ EXPORT_SYMBOL(cpufreq_update_policy); */ void cpufreq_update_limits(unsigned int cpu) { + if (!cpufreq_driver) + return; if (cpufreq_driver->update_limits) cpufreq_driver->update_limits(cpu); else @@ -2507,26 +2521,27 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits); static int cpufreq_boost_set_sw(int state) { struct cpufreq_policy *policy; - int ret = -EINVAL; for_each_active_policy(policy) { + int ret; + if (!policy->freq_table) - continue; + return -ENXIO; ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table); if (ret) { pr_err("%s: Policy frequency update failed\n", __func__); - break; + return ret; } ret = freq_qos_update_request(policy->max_freq_req, policy->max); if (ret < 0) - break; + return ret; } - return ret; + return 0; } int cpufreq_boost_trigger_state(int state) diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index 5a7f6dafcddb..ac57cddc5f2f 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -101,6 +101,13 @@ out_put_node: } module_init(hb_cpufreq_driver_init); +static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = { + { .compatible = "calxeda,highbank" }, + { .compatible = "calxeda,ecx-2000" }, + { }, +}; +MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match); + MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 648a09a1778a..edef3399c979 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -280,6 +280,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev) void __iomem *base; np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp"); + if (!np) + np = of_find_compatible_node(NULL, NULL, + "fsl,imx6ull-ocotp"); if (!np) return -ENOENT; @@ -378,23 +381,24 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) goto put_reg; } + /* Because we have added the OPPs here, we must free them */ + free_opp = true; + if (of_machine_is_compatible("fsl,imx6ul") || of_machine_is_compatible("fsl,imx6ull")) { ret = imx6ul_opp_check_speed_grading(cpu_dev); if (ret) { if (ret == -EPROBE_DEFER) - goto put_node; + goto out_free_opp; dev_err(cpu_dev, "failed to read ocotp: %d\n", ret); - goto put_node; + goto out_free_opp; } } else { imx6q_opp_check_speed_grading(cpu_dev); } - /* Because we have added the OPPs here, we must free them */ - free_opp = true; num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 45499e0b9f2f..88fe803a044d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -649,11 +649,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, mutex_lock(&intel_pstate_limits_lock); if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { - u64 value; - - ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); - if (ret) - goto return_pref; + /* + * Use the cached HWP Request MSR value, because the register + * itself may be updated by intel_pstate_hwp_boost_up() or + * intel_pstate_hwp_boost_down() at any time. + */ + u64 value = READ_ONCE(cpu_data->hwp_req_cached); value &= ~GENMASK_ULL(31, 24); @@ -661,13 +662,18 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, epp = epp_values[pref_index - 1]; value |= (u64)epp << 24; + /* + * The only other updater of hwp_req_cached in the active mode, + * intel_pstate_hwp_set(), is called under the same lock as this + * function, so it cannot run in parallel with the update below. + */ + WRITE_ONCE(cpu_data->hwp_req_cached, value); ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); } else { if (epp == -EINVAL) epp = (pref_index - 1) << 2; ret = intel_pstate_set_epb(cpu_data->cpu, epp); } -return_pref: mutex_unlock(&intel_pstate_limits_lock); return ret; @@ -756,7 +762,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); - if (global.no_turbo) + if (global.no_turbo || global.turbo_disabled) *current_max = HWP_GUARANTEED_PERF(cap); else *current_max = HWP_HIGHEST_PERF(cap); @@ -1058,7 +1064,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, update_turbo_state(); if (global.turbo_disabled) { - pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_driver_lock); return -EPERM; @@ -1560,20 +1566,22 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { cpu->pstate.min_pstate = pstate_funcs.get_min(); - cpu->pstate.max_pstate = pstate_funcs.get_max(); cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); cpu->pstate.scaling = pstate_funcs.get_scaling(); - cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; if (hwp_active && !hwp_mode_bdw) { unsigned int phy_max, current_max; intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; + cpu->pstate.turbo_pstate = phy_max; + cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached)); } else { cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + cpu->pstate.max_pstate = pstate_funcs.get_max(); } + cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; if (pstate_funcs.get_aperf_mperf_shift) cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); @@ -2526,9 +2534,15 @@ static int intel_pstate_update_status(const char *buf, size_t size) { int ret; - if (size == 3 && !strncmp(buf, "off", size)) - return intel_pstate_driver ? - intel_pstate_unregister_driver() : -EINVAL; + if (size == 3 && !strncmp(buf, "off", size)) { + if (!intel_pstate_driver) + return -EINVAL; + + if (hwp_active) + return -EBUSY; + + return intel_pstate_unregister_driver(); + } if (size == 6 && !strncmp(buf, "active", size)) { if (intel_pstate_driver) { diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c index 0ea88778882a..86f612593e49 100644 --- a/drivers/cpufreq/loongson1-cpufreq.c +++ b/drivers/cpufreq/loongson1-cpufreq.c @@ -216,6 +216,7 @@ static struct platform_driver ls1x_cpufreq_platdrv = { module_platform_driver(ls1x_cpufreq_platdrv); +MODULE_ALIAS("platform:ls1x-cpufreq"); MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>"); MODULE_DESCRIPTION("Loongson1 CPUFreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 0c98dd08273d..927ebc582a38 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -540,6 +540,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { } }; +MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines); static int __init mtk_cpufreq_driver_init(void) { diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 2db2f1739e09..1b2ec3be59eb 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data) /* Take a frequency, and issue the fid/vid transition command */ static int transition_frequency_fidvid(struct powernow_k8_data *data, - unsigned int index) + unsigned int index, + struct cpufreq_policy *policy) { - struct cpufreq_policy *policy; u32 fid = 0; u32 vid = 0; int res; @@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); - policy = cpufreq_cpu_get(smp_processor_id()); - cpufreq_cpu_put(policy); - cpufreq_freq_transition_begin(policy, &freqs); res = transition_fid_vid(data, fid, vid); cpufreq_freq_transition_end(policy, &freqs, res); @@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg) powernow_k8_acpi_pst_values(data, newstate); - ret = transition_frequency_fidvid(data, newstate); + ret = transition_frequency_fidvid(data, newstate, pol); if (ret) { pr_err("transition frequency failed\n"); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 56f4bc0d209e..bc6ccf2c7aae 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -884,12 +884,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, unsigned long action, void *unused) { int cpu; - struct cpufreq_policy cpu_policy; + struct cpufreq_policy *cpu_policy; rebooting = true; for_each_online_cpu(cpu) { - cpufreq_get_policy(&cpu_policy, cpu); - powernv_cpufreq_target_index(&cpu_policy, get_nominal_index()); + cpu_policy = cpufreq_cpu_get(cpu); + if (!cpu_policy) + continue; + powernv_cpufreq_target_index(cpu_policy, get_nominal_index()); + cpufreq_cpu_put(cpu_policy); } return NOTIFY_DONE; @@ -902,6 +905,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { void powernv_cpufreq_work_fn(struct work_struct *work) { struct chip *chip = container_of(work, struct chip, throttle); + struct cpufreq_policy *policy; unsigned int cpu; cpumask_t mask; @@ -916,12 +920,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) chip->restore = false; for_each_cpu(cpu, &mask) { int index; - struct cpufreq_policy policy; - cpufreq_get_policy(&policy, cpu); - index = cpufreq_table_find_index_c(&policy, policy.cur); - powernv_cpufreq_target_index(&policy, index); - cpumask_andnot(&mask, &mask, policy.cpus); + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + index = cpufreq_table_find_index_c(policy, policy->cur); + powernv_cpufreq_target_index(policy, index); + cpumask_andnot(&mask, &mask, policy->cpus); + cpufreq_cpu_put(policy); } out: put_online_cpus(); @@ -1080,6 +1086,12 @@ free_and_return: static inline void clean_chip_info(void) { + int i; + + /* flush any pending work items */ + if (chips) + for (i = 0; i < nr_chips; i++) + cancel_work_sync(&chips[i].throttle); kfree(chips); } diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index f0d2d5035413..1e77d190f19f 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -305,6 +305,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, {}, }; +MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list); /* * Since the driver depends on smem and nvmem drivers, which may diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 2b51e0718c9f..b341ffbf56bc 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -239,6 +239,7 @@ static struct platform_driver scpi_cpufreq_platdrv = { }; module_platform_driver(scpi_cpufreq_platdrv); +MODULE_ALIAS("platform:scpi-cpufreq"); MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index 8f16bbb164b8..7ade4070ca82 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -141,7 +141,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = { static const struct reg_field *sti_cpufreq_match(void) { if (of_machine_is_compatible("st,stih407") || - of_machine_is_compatible("st,stih410")) + of_machine_is_compatible("st,stih410") || + of_machine_is_compatible("st,stih418")) return sti_stih407_dvfs_regfields; return NULL; @@ -258,7 +259,8 @@ static int sti_cpufreq_init(void) int ret; if ((!of_machine_is_compatible("st,stih407")) && - (!of_machine_is_compatible("st,stih410"))) + (!of_machine_is_compatible("st,stih410")) && + (!of_machine_is_compatible("st,stih418"))) return -ENODEV; ddata.cpu = get_cpu_device(0); @@ -290,6 +292,13 @@ register_cpufreq_dt: } module_init(sti_cpufreq_init); +static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = { + { .compatible = "st,stih407" }, + { .compatible = "st,stih410" }, + { }, +}; +MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match); + MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver"); MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@st.com>"); MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>"); diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 9907a165135b..2deed8d8773f 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -167,6 +167,7 @@ static const struct of_device_id sun50i_cpufreq_match_list[] = { { .compatible = "allwinner,sun50i-h6" }, {} }; +MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list); static const struct of_device_id *sun50i_cpufreq_match_node(void) { diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 29d2d7a21bd7..4c4ce65f399b 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -148,7 +148,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, */ stop_critical_timings(); drv->states[index].enter_s2idle(dev, drv, index); - WARN_ON(!irqs_disabled()); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); /* * timekeeping_resume() that will be called by tick_unfreeze() for the * first CPU executing it calls functions containing RCU read-side @@ -568,12 +569,17 @@ static void __cpuidle_device_init(struct cpuidle_device *dev) */ static int __cpuidle_register_device(struct cpuidle_device *dev) { - int ret; struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + int i, ret; if (!try_module_get(drv->owner)) return -EINVAL; + for (i = 0; i < drv->state_count; i++) { + if (drv->states[i].flags & CPUIDLE_FLAG_OFF) + dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; + } + per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 2bb2683b493c..306a6213df1c 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -292,6 +292,14 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ return sprintf(buf, "%s\n", state->_name);\ } +static ssize_t show_state_default_status(struct cpuidle_state *state, + struct cpuidle_state_usage *state_usage, + char *buf) +{ + return sprintf(buf, "%s\n", + state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); +} + define_show_state_function(exit_latency) define_show_state_function(target_residency) define_show_state_function(power_usage) @@ -314,6 +322,7 @@ define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); define_one_state_ro(above, show_state_above); define_one_state_ro(below, show_state_below); +define_one_state_ro(default_status, show_state_default_status); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, @@ -326,6 +335,7 @@ static struct attribute *cpuidle_state_default_attrs[] = { &attr_disable.attr, &attr_above.attr, &attr_below.attr, + &attr_default_status.attr, NULL }; @@ -480,7 +490,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &kdev->kobj, "state%d", i); if (ret) { - kfree(kobj); + kobject_put(&kobj->kobj); goto error_state; } cpuidle_add_s2idle_attr_group(kobj); @@ -611,7 +621,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, &kdev->kobj, "driver"); if (ret) { - kfree(kdrv); + kobject_put(&kdrv->kobj); return ret; } @@ -705,7 +715,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, "cpuidle"); if (error) { - kfree(kdev); + kobject_put(&kdev->kobj); return error; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 06b2b3fa5206..1f6308cdf79a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -491,11 +491,9 @@ if CRYPTO_DEV_UX500 endif # if CRYPTO_DEV_UX500 config CRYPTO_DEV_ATMEL_AUTHENC - tristate "Support for Atmel IPSEC/SSL hw accelerator" + bool "Support for Atmel IPSEC/SSL hw accelerator" depends on ARCH_AT91 || COMPILE_TEST - select CRYPTO_AUTHENC - select CRYPTO_DEV_ATMEL_AES - select CRYPTO_DEV_ATMEL_SHA + depends on CRYPTO_DEV_ATMEL_AES help Some Atmel processors can combine the AES and SHA hw accelerators to enhance support of IPSEC/SSL. @@ -508,6 +506,8 @@ config CRYPTO_DEV_ATMEL_AES select CRYPTO_AES select CRYPTO_AEAD select CRYPTO_BLKCIPHER + select CRYPTO_AUTHENC if CRYPTO_DEV_ATMEL_AUTHENC + select CRYPTO_DEV_ATMEL_SHA if CRYPTO_DEV_ATMEL_AUTHENC help Some Atmel processors have AES hw accelerator. Select this if you want to use the Atmel module for @@ -544,6 +544,7 @@ config CRYPTO_DEV_ATMEL_SHA config CRYPTO_DEV_ATMEL_I2C tristate + select BITREVERSE config CRYPTO_DEV_ATMEL_ECC tristate "Support for Microchip / Atmel ECC hw accelerator" diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 7d6b695c4ab3..230e8902c727 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -916,7 +916,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, } pd->pd_ctl.w = PD_CTL_HOST_READY | - ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) | + ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) || (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? PD_CTL_HASH_FINAL : 0); pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen); diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index f85356a48e7e..98b8483577ce 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -41,7 +41,7 @@ /* ================= Device Structure ================== */ -struct device_private iproc_priv; +struct bcm_device_private iproc_priv; /* ==================== Parameters ===================== */ @@ -2937,7 +2937,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, ctx->enckeylen = keylen; ctx->authkeylen = 0; - memcpy(ctx->enckey, key, ctx->enckeylen); switch (ctx->enckeylen) { case AES_KEYSIZE_128: @@ -2953,6 +2952,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, goto badkey; } + memcpy(ctx->enckey, key, ctx->enckeylen); + flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, ctx->authkeylen); flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); @@ -3013,6 +3014,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3041,6 +3046,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3070,6 +3079,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < CCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = CCM_ESP_SALT_SIZE; ctx->salt_offset = CCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 766452b24d0a..01feed268a0d 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -418,7 +418,7 @@ struct spu_hw { u32 num_chan; }; -struct device_private { +struct bcm_device_private { struct platform_device *pdev; struct spu_hw spu; @@ -465,6 +465,6 @@ struct device_private { struct mbox_chan **mbox; }; -extern struct device_private iproc_priv; +extern struct bcm_device_private iproc_priv; #endif diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c index cd7504101acd..7227dbf8f46c 100644 --- a/drivers/crypto/bcm/util.c +++ b/drivers/crypto/bcm/util.c @@ -348,7 +348,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode) static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { - struct device_private *ipriv; + struct bcm_device_private *ipriv; char *buf; ssize_t ret, out_offset, out_count; int i; diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 137ed3df0c74..9612da122ceb 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -112,6 +112,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER select CRYPTO_DES + select CRYPTO_XTS help Selecting this will use CAAM Queue Interface (QI) for sending & receiving crypto jobs to/from CAAM. This gives better performance diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 2912006b946b..fdd994ee55e2 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -818,12 +818,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } -static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher, - const u8 *key, unsigned int keylen) -{ - return skcipher_setkey(skcipher, key, keylen, 0); -} - static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -1810,7 +1804,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, if (ivsize || mapped_dst_nents > 1) sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + - mapped_dst_nents); + mapped_dst_nents - 1 + !!ivsize); if (sec4_sg_bytes) { edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, @@ -2058,21 +2052,6 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, }, - { - .skcipher = { - .base = { - .cra_name = "ecb(arc4)", - .cra_driver_name = "ecb-arc4-caam", - .cra_blocksize = ARC4_BLOCK_SIZE, - }, - .setkey = arc4_skcipher_setkey, - .encrypt = skcipher_encrypt, - .decrypt = skcipher_decrypt, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - }, - .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, - }, }; static struct caam_aead_alg driver_aeads[] = { @@ -3533,7 +3512,6 @@ int caam_algapi_init(struct device *ctrldev) struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; - u32 arc4_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false, gcm_support; @@ -3553,8 +3531,6 @@ int caam_algapi_init(struct device *ctrldev) CHA_ID_LS_DES_SHIFT; aes_inst = cha_inst & CHA_ID_LS_AES_MASK; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; - arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >> - CHA_ID_LS_ARC4_SHIFT; ccha_inst = 0; ptha_inst = 0; @@ -3575,7 +3551,6 @@ int caam_algapi_init(struct device *ctrldev) md_inst = mdha & CHA_VER_NUM_MASK; ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; - arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK; gcm_support = aesa & CHA_VER_MISC_AES_GCM; } @@ -3598,10 +3573,6 @@ int caam_algapi_init(struct device *ctrldev) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; - /* Skip ARC4 algorithms if not supported by device */ - if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4) - continue; - /* * Check support for AES modes not available * on LP devices. diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index aa9ccca67045..d6c58184bb57 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -1379,6 +1379,9 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, const u32 ctx1_iv_off) { u32 *key_jump_cmd; + u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT; + bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_CHACHA20); init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip if already shared */ @@ -1417,14 +1420,15 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, LDST_OFFSET_SHIFT)); /* Load operation */ - append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | - OP_ALG_ENCRYPT); + if (is_chacha20) + options |= OP_ALG_AS_FINALIZE; + append_operation(desc, options); /* Perform operation */ skcipher_append_src_dst(desc); /* Store IV */ - if (ivsize) + if (!is_chacha20 && ivsize) append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); @@ -1451,6 +1455,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, const u32 ctx1_iv_off) { u32 *key_jump_cmd; + bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_CHACHA20); init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip if already shared */ @@ -1499,7 +1505,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, skcipher_append_src_dst(desc); /* Store IV */ - if (ivsize) + if (!is_chacha20 && ivsize) append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); @@ -1518,7 +1524,13 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); */ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata) { - __be64 sector_size = cpu_to_be64(512); + /* + * Set sector size to a big value, practically disabling + * sector size segmentation in xts implementation. We cannot + * take full advantage of this HW feature with existing + * crypto API / dm-crypt SW architecture. + */ + __be64 sector_size = cpu_to_be64(BIT(15)); u32 *key_jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); @@ -1571,7 +1583,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); */ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata) { - __be64 sector_size = cpu_to_be64(512); + /* + * Set sector size to a big value, practically disabling + * sector size segmentation in xts implementation. We cannot + * take full advantage of this HW feature with existing + * crypto API / dm-crypt SW architecture. + */ + __be64 sector_size = cpu_to_be64(BIT(15)); u32 *key_jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 8e3449670d2f..2a605a419df8 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -18,6 +18,7 @@ #include "qi.h" #include "jr.h" #include "caamalg_desc.h" +#include <asm/unaligned.h> /* * crypto alg @@ -67,6 +68,11 @@ struct caam_ctx { struct device *qidev; spinlock_t lock; /* Protects multiple init of driver context */ struct caam_drv_ctx *drv_ctx[NUM_OP]; + struct crypto_skcipher *fallback; +}; + +struct caam_skcipher_req_ctx { + struct skcipher_request fallback_req; }; static int aead_set_sh_desc(struct crypto_aead *aead) @@ -745,12 +751,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct device *jrdev = ctx->jrdev; int ret = 0; + int err; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { dev_err(jrdev, "key size mismatch\n"); goto badkey; } + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; + ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; @@ -1395,6 +1406,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, return edesc; } +static inline bool xts_skcipher_ivsize(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); +} + static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { struct skcipher_edesc *edesc; @@ -1405,6 +1424,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) if (!req->cryptlen) return 0; + if (ctx->fallback && xts_skcipher_ivsize(req)) { + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); + } + if (unlikely(caam_congested)) return -EAGAIN; @@ -1529,6 +1563,7 @@ static struct caam_skcipher_alg driver_algs[] = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-caam-qi", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = xts_skcipher_setkey, @@ -2462,9 +2497,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + int ret = 0; - return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, - false); + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + ctx->fallback = fallback; + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + + crypto_skcipher_reqsize(fallback)); + } + + ret = caam_init_common(ctx, &caam_alg->caam, false); + if (ret && ctx->fallback) + crypto_free_skcipher(ctx->fallback); + + return ret; } static int caam_aead_init(struct crypto_aead *tfm) @@ -2490,7 +2548,11 @@ static void caam_exit_common(struct caam_ctx *ctx) static void caam_cra_exit(struct crypto_skcipher *tfm) { - caam_exit_common(crypto_skcipher_ctx(tfm)); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); } static void caam_aead_exit(struct crypto_aead *tfm) @@ -2524,7 +2586,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx); - alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init; alg->exit = caam_cra_exit; diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 60e2a54c19f1..c3c22a8de4c0 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -43,7 +43,6 @@ #include <crypto/akcipher.h> #include <crypto/scatterwalk.h> #include <crypto/skcipher.h> -#include <crypto/arc4.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/rsa.h> diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 596ce28b957d..2410b23aa609 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -200,6 +200,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc) int status; memset(req_info, 0, sizeof(struct cpt_request_info)); + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 7a24019356b5..e343249c8d05 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, } /* Create and initialize RPTR */ - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; @@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) * Get buffer for union cpt_res_s response * structure and its physical address */ - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 3514b082eca7..1e8dd9ebcc17 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -62,6 +62,8 @@ struct cpt_request_info { union ctrl_info ctrl; /* User control information */ struct cptvf_request req; /* Request Information (Core specific) */ + bool may_sleep; + struct buf_ptr in[MAX_BUF_CNT]; struct buf_ptr out[MAX_BUF_CNT]; diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index c4632d84c9a1..637be2f903d3 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) struct nitrox_device *nitrox_get_first_device(void) { - struct nitrox_device *ndev = NULL; + struct nitrox_device *ndev; mutex_lock(&devlist_lock); list_for_each_entry(ndev, &ndevlist, list) { @@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void) break; } mutex_unlock(&devlist_lock); - if (!ndev) + if (&ndev->list == &ndevlist) return NULL; refcount_inc(&ndev->refcnt); diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 8fec733f567f..63e227adbb13 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD config CRYPTO_DEV_SP_CCP bool "Cryptographic Coprocessor device" default y - depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_CCP_DD && DMADEVICES select HW_RANDOM select DMA_ENGINE - select DMADEVICES select CRYPTO_SHA1 select CRYPTO_SHA256 help diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 3f68262d9ab4..87a34d91fdf7 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -469,6 +469,7 @@ struct ccp_sg_workarea { unsigned int sg_used; struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c8da8eb160da..7234b95241e9 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } @@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->dma_sg = sg; + wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); @@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); + unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; - if (wa->sg_used == wa->sg->length) { - wa->sg = sg_next(wa->sg); + if (wa->sg_used == sg_dma_len(wa->dma_sg)) { + /* Advance to the next DMA scatterlist entry */ + wa->dma_sg = sg_next(wa->dma_sg); + + /* In the case that the DMA mapped scatterlist has entries + * that have been merged, the non-DMA mapped scatterlist + * must be advanced multiple times for each merged entry. + * This ensures that the current non-DMA mapped entry + * corresponds to the current DMA mapped entry. + */ + do { + sg_combined_len += wa->sg->length; + wa->sg = sg_next(wa->sg); + } while (wa->sg_used > sg_combined_len); + wa->sg_used = 0; } } @@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { - nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); @@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { @@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough data in the sg element, but we need to * adjust for any previously copied data */ - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); @@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough room in the sg element, but we need to * adjust for any previously used area */ - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } @@ -1731,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) break; default: ret = -EINVAL; - goto e_ctx; + goto e_data; } } else { /* Stash the context */ @@ -1777,8 +1792,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) LSB_ITEM_SIZE); break; default: + kfree(hmac_buf); ret = -EINVAL; - goto e_ctx; + goto e_data; } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); @@ -2027,7 +2043,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } @@ -2053,8 +2069,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - dst.sg_wa.sg_used += src.sg_wa.sg->length; - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index a72586eccd81..954f14bddf1d 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev, { unsigned int nents = 0; + *lbytes = 0; + while (nbytes && sg_list) { nents++; /* get the number of bytes in the last entry */ @@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev, nbytes : sg_list->length; sg_list = sg_next(sg_list); } + dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); return nents; } @@ -290,37 +293,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, unsigned int nbytes, int direction, u32 *nents, u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) { - if (sg_is_last(sg)) { - /* One entry only case -set to DLLI */ - if (dma_map_sg(dev, sg, 1, direction) != 1) { - dev_err(dev, "dma_map_sg() single buffer failed\n"); - return -ENOMEM; - } - dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", - &sg_dma_address(sg), sg_page(sg), sg_virt(sg), - sg->offset, sg->length); - *lbytes = nbytes; - *nents = 1; - *mapped_nents = 1; - } else { /*sg_is_last*/ - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); - if (*nents > max_sg_nents) { - *nents = 0; - dev_err(dev, "Too many fragments. current %d max %d\n", - *nents, max_sg_nents); - return -ENOMEM; - } - /* In case of mmu the number of mapped nents might - * be changed from the original sgl nents - */ - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); - if (*mapped_nents == 0) { - *nents = 0; - dev_err(dev, "dma_map_sg() sg buffer failed\n"); - return -ENOMEM; - } + int ret = 0; + + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); + if (*nents > max_sg_nents) { + *nents = 0; + dev_err(dev, "Too many fragments. current %d max %d\n", + *nents, max_sg_nents); + return -ENOMEM; } + ret = dma_map_sg(dev, sg, *nents, direction); + if (dma_mapping_error(dev, ret)) { + *nents = 0; + dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); + return -ENOMEM; + } + + *mapped_nents = ret; + return 0; } @@ -555,11 +546,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, + DMA_BIDIRECTIONAL); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_BIDIRECTIONAL); } if (drvdata->coherent && @@ -881,7 +873,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, &src_last_bytes); sg_index = areq_ctx->src_sgl->length; //check where the data starts - while (sg_index <= size_to_skip) { + while (src_mapped_nents && (sg_index <= size_to_skip)) { src_mapped_nents--; offset -= areq_ctx->src_sgl->length; sgl = sg_next(areq_ctx->src_sgl); @@ -902,13 +894,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, if (req->src != req->dst) { size_for_map = areq_ctx->assoclen + req->cryptlen; - size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? - authsize : 0; + + if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) + size_for_map += authsize; + else + size_for_map -= authsize; + if (is_gcm4543) size_for_map += crypto_aead_ivsize(tfm); rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, - &areq_ctx->dst.nents, + &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); if (rc) @@ -921,7 +917,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, offset = size_to_skip; //check where the data starts - while (sg_index <= size_to_skip) { + while (dst_mapped_nents && sg_index <= size_to_skip) { dst_mapped_nents--; offset -= areq_ctx->dst_sgl->length; sgl = sg_next(areq_ctx->dst_sgl); @@ -1117,13 +1113,15 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) } size_to_map = req->cryptlen + areq_ctx->assoclen; - if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) + /* If we do in-place encryption, we also need the auth tag */ + if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && + (req->src == req->dst)) { size_to_map += authsize; - + } if (is_gcm4543) size_to_map += crypto_aead_ivsize(tfm); rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, - &areq_ctx->src.nents, + &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h index af434872c6ff..827b6cb1236e 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.h +++ b/drivers/crypto/ccree/cc_buffer_mgr.h @@ -25,6 +25,7 @@ enum cc_sg_cpy_direct { struct cc_mlli { cc_sram_addr_t sram_addr; + unsigned int mapped_nents; unsigned int nents; //sg nents unsigned int mlli_nents; //mlli nents might be different than the above }; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index cd9c60268bf8..9bf0cce578f0 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -163,7 +163,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; - int rc = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); @@ -175,10 +174,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + /* Alloc hash tfm for essiv */ + ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); + if (IS_ERR(ctx_p->shash_tfm)) { + dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); + return PTR_ERR(ctx_p->shash_tfm); + } + } + /* Allocate key buffer, cache line aligned */ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - return -ENOMEM; + goto free_shash; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -190,21 +198,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); - return -ENOMEM; + goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { - /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); - if (IS_ERR(ctx_p->shash_tfm)) { - dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); - return PTR_ERR(ctx_p->shash_tfm); - } - } + return 0; - return rc; +free_key: + kfree(ctx_p->user.key); +free_shash: + crypto_free_shash(ctx_p->shash_tfm); + + return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 01dd418bdadc..2d30ed5a2674 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2480,8 +2480,9 @@ int chcr_aead_dma_map(struct device *dev, else reqctx->b0_dma = 0; if (req->src == req->dst) { - error = dma_map_sg(dev, req->src, sg_nents(req->src), - DMA_BIDIRECTIONAL); + error = dma_map_sg(dev, req->src, + sg_nents_for_len(req->src, dst_size), + DMA_BIDIRECTIONAL); if (!error) goto err; } else { @@ -2818,7 +2819,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int c_id = a_ctx(tfm)->tx_chan_id; unsigned int ccm_xtra; - unsigned char tag_offset = 0, auth_offset = 0; + unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 029a7354f541..5c16f368879b 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -125,8 +125,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx) atomic_set(&dev->inflight, 0); mutex_lock(&drv_data.drv_mutex); list_add_tail(&u_ctx->entry, &drv_data.inact_dev); - if (!drv_data.last_dev) - drv_data.last_dev = u_ctx; mutex_unlock(&drv_data.drv_mutex); } diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index dffa2aa855fd..82b76df43ae5 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -174,7 +174,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len) { if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) { __skb_trim(skb, 0); - refcount_add(2, &skb->users); + refcount_inc(&skb->users); } else { skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); } @@ -577,7 +577,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx) while (!skb_queue_empty(&listen_ctx->synq)) { struct chtls_sock *csk = - container_of((struct synq *)__skb_dequeue + container_of((struct synq *)skb_peek (&listen_ctx->synq), struct chtls_sock, synq); struct sock *child = csk->sk; @@ -692,14 +692,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb) if (rpl->status != CPL_ERR_NONE) { pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n", rpl->status, stid); - return CPL_RET_BUF_DONE; + } else { + cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); + sock_put(listen_ctx->lsk); + kfree(listen_ctx); + module_put(THIS_MODULE); } - cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); - sock_put(listen_ctx->lsk); - kfree(listen_ctx); - module_put(THIS_MODULE); - - return 0; + return CPL_RET_BUF_DONE; } static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) @@ -716,15 +715,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) if (rpl->status != CPL_ERR_NONE) { pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n", rpl->status, stid); - return CPL_RET_BUF_DONE; + } else { + cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); + sock_put(listen_ctx->lsk); + kfree(listen_ctx); + module_put(THIS_MODULE); } - - cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); - sock_put(listen_ctx->lsk); - kfree(listen_ctx); - module_put(THIS_MODULE); - - return 0; + return CPL_RET_BUF_DONE; } static void chtls_purge_wr_queue(struct sock *sk) @@ -1024,6 +1021,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, const struct cpl_pass_accept_req *req, struct chtls_dev *cdev) { + struct adapter *adap = pci_get_drvdata(cdev->pdev); struct inet_sock *newinet; const struct iphdr *iph; struct tls_context *ctx; @@ -1033,9 +1031,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk, struct neighbour *n; struct tcp_sock *tp; struct sock *newsk; + bool found = false; u16 port_id; int rxq_idx; - int step; + int step, i; iph = (const struct iphdr *)network_hdr; newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb); @@ -1047,12 +1046,20 @@ static struct sock *chtls_recv_sock(struct sock *lsk, goto free_sk; n = dst_neigh_lookup(dst, &iph->saddr); - if (!n) - goto free_sk; + if (!n || !n->dev) + goto free_dst; ndev = n->dev; - if (!ndev) + if (is_vlan_dev(ndev)) + ndev = vlan_dev_real_dev(ndev); + + for_each_port(adap, i) + if (cdev->ports[i] == ndev) + found = true; + + if (!found) goto free_dst; + port_id = cxgb4_port_idx(ndev); csk = chtls_sock_create(cdev); @@ -1077,6 +1084,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, sk_setup_caps(newsk, dst); ctx = tls_get_ctx(lsk); newsk->sk_destruct = ctx->sk_destruct; + newsk->sk_prot_creator = lsk->sk_prot_creator; csk->sk = newsk; csk->passive_reap_next = oreq; csk->tx_chan = cxgb4_port_chan(ndev); @@ -1107,6 +1115,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, free_csk: chtls_sock_release(&csk->kref); free_dst: + if (n) + neigh_release(n); dst_release(dst); free_sk: inet_csk_prepare_forced_close(newsk); @@ -1366,7 +1376,6 @@ static void add_to_reap_list(struct sock *sk) struct chtls_sock *csk = sk->sk_user_data; local_bh_disable(); - bh_lock_sock(sk); release_tcp_port(sk); /* release the port immediately */ spin_lock(&reap_list_lock); @@ -1375,7 +1384,6 @@ static void add_to_reap_list(struct sock *sk) if (!csk->passive_reap_next) schedule_work(&reap_task); spin_unlock(&reap_list_lock); - bh_unlock_sock(sk); local_bh_enable(); } @@ -1444,6 +1452,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb) sk_wake_async(sk, 0, POLL_OUT); data = lookup_stid(cdev->tids, stid); + if (!data) { + /* listening server close */ + kfree_skb(skb); + goto unlock; + } lsk = ((struct listen_ctx *)data)->lsk; bh_lock_sock(lsk); @@ -1829,39 +1842,6 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) kfree_skb(skb); } -static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, - struct chtls_dev *cdev, int status, int queue) -{ - struct cpl_abort_req_rss *req = cplhdr(skb); - struct sk_buff *reply_skb; - struct chtls_sock *csk; - - csk = rcu_dereference_sk_user_data(sk); - - reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), - GFP_KERNEL); - - if (!reply_skb) { - req->status = (queue << 1); - send_defer_abort_rpl(cdev, skb); - return; - } - - set_abort_rpl_wr(reply_skb, GET_TID(req), status); - kfree_skb(skb); - - set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); - if (csk_conn_inline(csk)) { - struct l2t_entry *e = csk->l2t_entry; - - if (e && sk->sk_state != TCP_SYN_RECV) { - cxgb4_l2t_send(csk->egress_dev, reply_skb, e); - return; - } - } - cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); -} - /* * Add an skb to the deferred skb queue for processing from process context. */ @@ -1924,9 +1904,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb) queue = csk->txq_idx; skb->sk = NULL; + chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, + CPL_ABORT_NO_RST, queue); do_abort_syn_rcv(child, lsk); - send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, - CPL_ABORT_NO_RST, queue); } static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) @@ -1956,8 +1936,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) if (!sock_owned_by_user(psk)) { int queue = csk->txq_idx; + chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); do_abort_syn_rcv(sk, psk); - send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); } else { skb->sk = sk; BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; @@ -1975,9 +1955,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) int queue = csk->txq_idx; if (is_neg_adv(req->status)) { - if (sk->sk_state == TCP_SYN_RECV) - chtls_set_tcb_tflag(sk, 0, 0); - kfree_skb(skb); return; } @@ -2003,12 +1980,11 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) return; - - chtls_release_resources(sk); - chtls_conn_done(sk); } chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue); + chtls_release_resources(sk); + chtls_conn_done(sk); } static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb) diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h index 3fac0c74a41f..df4451b30649 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h @@ -50,9 +50,6 @@ #define MIN_RCV_WND (24 * 1024U) #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000)) -/* ulp_mem_io + ulptx_idata + payload + padding */ -#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8) - /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ #define TX_HEADER_LEN \ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index a217fe72602d..753f4ba38f83 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -357,11 +357,15 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) if (ret) goto out_notcb; + if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN))) + goto out_notcb; + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid); csk->wr_credits -= DIV_ROUND_UP(len, 16); csk->wr_unacked += DIV_ROUND_UP(len, 16); enqueue_wr(csk, skb); cxgb4_ofld_send(csk->egress_dev, skb); + skb = NULL; chtls_set_scmd(csk); /* Clear quiesce for Rx key */ diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 98bc5a4cd5e7..4c44d7c0c2c1 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp) make_tx_data_wr(sk, skb, immdlen, len, credits_needed, completion); tp->snd_nxt += len; - tp->lsndtime = tcp_time_stamp(tp); + tp->lsndtime = tcp_jiffies32; if (completion) ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; } else { @@ -910,9 +910,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) return (__force int)cpu_to_be16(thdr->length); } -static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk) +static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk) { - return (cdev->max_host_sndbuf - sk->sk_wmem_queued); + return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0); } static int csk_wait_memory(struct chtls_dev *cdev, @@ -1210,6 +1210,7 @@ int chtls_sendpage(struct sock *sk, struct page *page, copied = 0; csk = rcu_dereference_sk_user_data(sk); cdev = csk->cdev; + lock_sock(sk); timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); err = sk_stream_wait_connect(sk, &timeo); @@ -1437,7 +1438,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1470,7 +1471,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); @@ -1536,6 +1537,7 @@ skip_copy: tp->urg_data = 0; if ((avail + offset) >= skb->len) { + struct sk_buff *next_skb; if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { tp->copied_seq += skb->len; hws->rcvpld = skb->hdr_len; @@ -1545,8 +1547,10 @@ skip_copy: chtls_free_skb(sk, skb); buffers_freed++; hws->copied_seq = 0; - if (copied >= target && - !skb_peek(&sk->sk_receive_queue)) + next_skb = skb_peek(&sk->sk_receive_queue); + if (copied >= target && !next_skb) + break; + if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR) break; } } while (len > 0); @@ -1615,7 +1619,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, break; } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); @@ -1743,7 +1747,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1774,7 +1778,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index c27e7160d2df..4ad4ffd90cee 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -630,13 +631,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -668,7 +669,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -679,7 +680,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 991a4425f006..4d9d97c59ee3 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1467,7 +1467,7 @@ static int safexcel_probe_generic(void *pdev, priv->ring[i].rdr_req = devm_kcalloc(dev, EIP197_DEFAULT_RING_SIZE, - sizeof(priv->ring[i].rdr_req), + sizeof(*priv->ring[i].rdr_req), GFP_KERNEL); if (!priv->ring[i].rdr_req) return -ENOMEM; diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 9181523ba760..acaa504d5a79 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -527,7 +527,7 @@ static void release_ixp_crypto(struct device *dev) if (crypt_virt) { dma_free_coherent(dev, - NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), + NPE_QLEN * sizeof(struct crypt_ctl), crypt_virt, crypt_phys); } } diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index 7e3ad085b5bd..efce3a83b35a 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c @@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp) static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) { struct mtk_ring **ring = cryp->ring; - int i, err = ENOMEM; + int i; for (i = 0; i < MTK_RING_MAX; i++) { ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); @@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) return 0; err_cleanup: - for (; i--; ) { + do { dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, ring[i]->res_base, ring[i]->res_dma); dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, ring[i]->cmd_base, ring[i]->cmd_dma); kfree(ring[i]); - } - return err; + } while (i--); + return -ENOMEM; } static int mtk_crypto_probe(struct platform_device *pdev) diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index bf8d2197bc11..f8a48a84df2a 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -20,6 +20,7 @@ #include <crypto/sha.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #define DCP_MAX_CHANS 4 #define DCP_BUF_SZ PAGE_SIZE @@ -621,49 +622,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); struct hash_alg_common *halg = crypto_hash_alg_common(tfm); - const int nents = sg_nents(req->src); uint8_t *in_buf = sdcp->coh->sha_in_buf; uint8_t *out_buf = sdcp->coh->sha_out_buf; - uint8_t *src_buf; - struct scatterlist *src; - unsigned int i, len, clen; + unsigned int i, len, clen, oft = 0; int ret; int fin = rctx->fini; if (fin) rctx->fini = 0; - for_each_sg(req->src, src, nents, i) { - src_buf = sg_virt(src); - len = sg_dma_len(src); + src = req->src; + len = req->nbytes; - do { - if (actx->fill + len > DCP_BUF_SZ) - clen = DCP_BUF_SZ - actx->fill; - else - clen = len; + while (len) { + if (actx->fill + len > DCP_BUF_SZ) + clen = DCP_BUF_SZ - actx->fill; + else + clen = len; - memcpy(in_buf + actx->fill, src_buf, clen); - len -= clen; - src_buf += clen; - actx->fill += clen; + scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, + 0); - /* - * If we filled the buffer and still have some - * more data, submit the buffer. - */ - if (len && actx->fill == DCP_BUF_SZ) { - ret = mxs_dcp_run_sha(req); - if (ret) - return ret; - actx->fill = 0; - rctx->init = 0; - } - } while (len); + len -= clen; + oft += clen; + actx->fill += clen; + + /* + * If we filled the buffer and still have some + * more data, submit the buffer. + */ + if (len && actx->fill == DCP_BUF_SZ) { + ret = mxs_dcp_run_sha(req); + if (ret) + return ret; + actx->fill = 0; + rctx->init = 0; + } } if (fin) { diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 2f53fbb74100..72edb10181b8 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -103,7 +103,7 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) dd->err = 0; } - err = pm_runtime_get_sync(dd->dev); + err = pm_runtime_resume_and_get(dd->dev); if (err < 0) { dev_err(dd->dev, "failed to get sync: %d\n", err); return err; @@ -1153,11 +1153,11 @@ static int omap_aes_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err); - goto err_res; + goto err_pm_disable; } omap_aes_dma_stop(dd); @@ -1267,6 +1267,7 @@ err_engine: omap_aes_dma_cleanup(dd); err_irq: tasklet_kill(&dd->done_task); +err_pm_disable: pm_runtime_disable(dev); err_res: dd = NULL; @@ -1317,7 +1318,7 @@ static int omap_aes_suspend(struct device *dev) static int omap_aes_resume(struct device *dev) { - pm_runtime_get_sync(dev); + pm_runtime_resume_and_get(dev); return 0; } #endif diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ac80bc6af093..d7c0c982ba43 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -165,8 +165,6 @@ struct omap_sham_hmac_ctx { }; struct omap_sham_ctx { - struct omap_sham_dev *dd; - unsigned long flags; /* fallback stuff */ @@ -455,6 +453,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); u32 val, mask; + if (likely(ctx->digcnt)) + omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); + /* * Setting ALGO_CONST only for the first iteration and * CLOSE_HASH only for the last one. Note that flags mode bits @@ -918,27 +919,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) return 0; } +struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) +{ + struct omap_sham_dev *dd; + + if (ctx->dd) + return ctx->dd; + + spin_lock_bh(&sham.lock); + dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); + list_move_tail(&dd->list, &sham.dev_list); + ctx->dd = dd; + spin_unlock_bh(&sham.lock); + + return dd; +} + static int omap_sham_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = NULL, *tmp; + struct omap_sham_dev *dd; int bs = 0; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); + ctx->dd = NULL; - ctx->dd = dd; + dd = omap_sham_find_dev(ctx); + if (!dd) + return -ENODEV; ctx->flags = 0; @@ -1187,8 +1196,7 @@ err1: static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct omap_sham_dev *dd = tctx->dd; + struct omap_sham_dev *dd = ctx->dd; ctx->op = op; @@ -1198,7 +1206,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = ctx->dd; + struct omap_sham_dev *dd = omap_sham_find_dev(ctx); if (!req->nbytes) return 0; @@ -1302,21 +1310,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); - struct omap_sham_dev *dd = NULL, *tmp; int err, i; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); - err = crypto_shash_setkey(tctx->fallback, key, keylen); if (err) return err; @@ -1334,7 +1329,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, memset(bctx->ipad + keylen, 0, bs - keylen); - if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { + if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { memcpy(bctx->opad, bctx->ipad, bs); for (i = 0; i < bs; i++) { @@ -2136,6 +2131,7 @@ static int omap_sham_probe(struct platform_device *pdev) } dd->flags |= dd->pdata->flags; + sham.flags |= dd->pdata->flags; pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); @@ -2163,6 +2159,9 @@ static int omap_sham_probe(struct platform_device *pdev) spin_unlock(&sham.lock); for (i = 0; i < dd->pdata->algs_info_size; i++) { + if (dd->pdata->algs_info[i].registered) + break; + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { struct ahash_alg *alg; @@ -2214,9 +2213,11 @@ static int omap_sham_remove(struct platform_device *pdev) list_del(&dd->list); spin_unlock(&sham.lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) - for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); + dd->pdata->algs_info[i].registered--; + } tasklet_kill(&dd->done_task); pm_runtime_disable(&pdev->dev); diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 2680e1525db5..13ecbb0e5852 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1697,11 +1697,6 @@ static int spacc_probe(struct platform_device *pdev) goto err_clk_put; } - ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); - if (ret) - goto err_clk_disable; - - /* * Use an IRQ threshold of 50% as a default. This seems to be a * reasonable trade off of latency against throughput but can be @@ -1709,6 +1704,10 @@ static int spacc_probe(struct platform_device *pdev) */ engine->stat_irq_thresh = (engine->fifo_sz / 2); + ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); + if (ret) + goto err_clk_disable; + /* * Configure the interrupts. We only use the STAT_CNT interrupt as we * only submit a new packet for processing when we complete another in diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 1dc5ac859f7b..bdd4e36763f1 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -233,12 +233,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index a68358b31292..538d9592c951 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -233,12 +233,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index cd1cdf5305bc..4898ef41fd9f 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -330,19 +330,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) ret = adf_isr_alloc_msix_entry_table(accel_dev); if (ret) - return ret; - if (adf_enable_msix(accel_dev)) goto err_out; - if (adf_setup_bh(accel_dev)) - goto err_out; + ret = adf_enable_msix(accel_dev); + if (ret) + goto err_free_msix_table; - if (adf_request_irqs(accel_dev)) - goto err_out; + ret = adf_setup_bh(accel_dev); + if (ret) + goto err_disable_msix; + + ret = adf_request_irqs(accel_dev); + if (ret) + goto err_cleanup_bh; return 0; + +err_cleanup_bh: + adf_cleanup_bh(accel_dev); + +err_disable_msix: + adf_disable_msix(&accel_dev->accel_pci_dev); + +err_free_msix_table: + adf_isr_free_msix_entry_table(accel_dev); + err_out: - adf_isr_resource_free(accel_dev); - return -EFAULT; + return ret; } EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 2136cbe4bf6c..f65ebc4117d1 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -197,6 +197,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n"); dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, ring->base_addr, ring->dma_addr); + ring->base_addr = NULL; return -EFAULT; } diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 4a73fc70f7a9..df9a1f35b832 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -304,17 +304,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) goto err_out; if (adf_setup_pf2vf_bh(accel_dev)) - goto err_out; + goto err_disable_msi; if (adf_setup_bh(accel_dev)) - goto err_out; + goto err_cleanup_pf2vf_bh; if (adf_request_msi_irq(accel_dev)) - goto err_out; + goto err_cleanup_bh; return 0; + +err_cleanup_bh: + adf_cleanup_bh(accel_dev); + +err_cleanup_pf2vf_bh: + adf_cleanup_pf2vf_bh(accel_dev); + +err_disable_msi: + adf_disable_msi(accel_dev); + err_out: - adf_vf_isr_resource_free(accel_dev); return -EFAULT; } EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index b50eb55f8f57..6b8ad3d67481 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -715,7 +715,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; dma_addr_t blp; - dma_addr_t bloutp = 0; + dma_addr_t bloutp; struct scatterlist *sg; size_t sz_out, sz = struct_size(bufl, bufers, n + 1); @@ -727,6 +727,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, if (unlikely(!bufl)) return -ENOMEM; + for_each_sg(sgl, sg, n, i) + bufl->bufers[i].addr = DMA_MAPPING_ERROR; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, blp))) goto err_in; @@ -760,10 +763,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!buflout)) goto err_in; + + bufers = buflout->bufers; + for_each_sg(sglout, sg, n, i) + bufers[i].addr = DMA_MAPPING_ERROR; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, bloutp))) goto err_out; - bufers = buflout->bufers; for_each_sg(sglout, sg, n, i) { int y = sg_nctr; @@ -873,6 +880,11 @@ static int qat_alg_aead_dec(struct aead_request *areq) struct icp_qat_fw_la_bulk_req *msg; int digst_size = crypto_aead_authsize(aead_tfm); int ret, ctr = 0; + u32 cipher_len; + + cipher_len = areq->cryptlen - digst_size; + if (cipher_len % AES_BLOCK_SIZE != 0) + return -EINVAL; ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) @@ -887,7 +899,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; - cipher_param->cipher_length = areq->cryptlen - digst_size; + cipher_param->cipher_length = cipher_len; cipher_param->cipher_offset = areq->assoclen; memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); @@ -916,6 +928,9 @@ static int qat_alg_aead_enc(struct aead_request *areq) uint8_t *iv = areq->iv; int ret, ctr = 0; + if (areq->cryptlen % AES_BLOCK_SIZE != 0) + return -EINVAL; + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) return ret; diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index ff149e176f64..dac130bb807a 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -1189,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, unsigned short mask; unsigned short dr_offset = 0x10; - status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); + ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); if (CE_INUSE_CONTEXTS & ctx_enables) { if (ctx & 0x1) { pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6bd8f6a2a24f..aeb03081415c 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle } return 0; out_err: + /* Do not free the list head unless we allocated it. */ + tail_old = tail_old->next; + if (flag) { + kfree(*init_tab_base); + *init_tab_base = NULL; + } + while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } - if (flag) - kfree(*init_tab_base); return -ENOMEM; } diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 1b762eefc6c1..5e0568c0113e 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -233,12 +233,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 9e11c3480353..e68b856d03b6 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -28,8 +28,10 @@ /* Registers values */ #define CRC_CR_RESET BIT(0) -#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5)) -#define CRC_INIT_DEFAULT 0xFFFFFFFF +#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) +#define CRC_CR_REV_IN_BYTE BIT(5) +#define CRC_CR_REV_OUT BIT(7) +#define CRC32C_INIT_DEFAULT 0xFFFFFFFF #define CRC_AUTOSUSPEND_DELAY 50 @@ -38,8 +40,6 @@ struct stm32_crc { struct device *dev; void __iomem *regs; struct clk *clk; - u8 pending_data[sizeof(u32)]; - size_t nb_pending_bytes; }; struct stm32_crc_list { @@ -59,14 +59,13 @@ struct stm32_crc_ctx { struct stm32_crc_desc_ctx { u32 partial; /* crc32c: partial in first 4 bytes of that struct */ - struct stm32_crc *crc; }; static int stm32_crc32_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = 0; mctx->poly = CRC32_POLY_LE; return 0; } @@ -75,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = CRC32C_INIT_DEFAULT; mctx->poly = CRC32C_POLY_LE; return 0; } @@ -94,32 +93,42 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, return 0; } +static struct stm32_crc *stm32_crc_get_next_crc(void) +{ + struct stm32_crc *crc; + + spin_lock_bh(&crc_list.lock); + crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); + if (crc) + list_move_tail(&crc->list, &crc_list.dev_list); + spin_unlock_bh(&crc_list.lock); + + return crc; +} + static int stm32_crc_init(struct shash_desc *desc) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; - spin_lock_bh(&crc_list.lock); - list_for_each_entry(crc, &crc_list.dev_list, list) { - ctx->crc = crc; - break; - } - spin_unlock_bh(&crc_list.lock); + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; - pm_runtime_get_sync(ctx->crc->dev); + pm_runtime_get_sync(crc->dev); /* Reset, set key, poly and configure in bit reverse mode */ - writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); - writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); - writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); + writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); /* Store partial result */ - ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); - ctx->crc->nb_pending_bytes = 0; + ctx->partial = readl_relaxed(crc->regs + CRC_DR); - pm_runtime_mark_last_busy(ctx->crc->dev); - pm_runtime_put_autosuspend(ctx->crc->dev); + pm_runtime_mark_last_busy(crc->dev); + pm_runtime_put_autosuspend(crc->dev); return 0; } @@ -128,31 +137,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, unsigned int length) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc *crc = ctx->crc; - u32 *d32; - unsigned int i; + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; + + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; pm_runtime_get_sync(crc->dev); - if (unlikely(crc->nb_pending_bytes)) { - while (crc->nb_pending_bytes != sizeof(u32) && length) { - /* Fill in pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); + /* + * Restore previously calculated CRC for this context as init value + * Restore polynomial configuration + * Configure in register for word input data, + * Configure out register in reversed bit mode data. + */ + writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + + if (d8 != PTR_ALIGN(d8, sizeof(u32))) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) { + writeb_relaxed(*d8++, crc->regs + CRC_DR); length--; } - - if (crc->nb_pending_bytes == sizeof(u32)) { - /* Process completed pending data */ - writel_relaxed(*(u32 *)crc->pending_data, - crc->regs + CRC_DR); - crc->nb_pending_bytes = 0; - } + /* Configure for word data */ + writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); } - d32 = (u32 *)d8; - for (i = 0; i < length >> 2; i++) - /* Process 32 bits data */ - writel_relaxed(*(d32++), crc->regs + CRC_DR); + for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32)) + writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR); + + if (length) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (length--) + writeb_relaxed(*d8++, crc->regs + CRC_DR); + } /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); @@ -160,22 +187,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); - /* Check for pending data (non 32 bits) */ - length &= 3; - if (likely(!length)) - return 0; - - if ((crc->nb_pending_bytes + length) >= sizeof(u32)) { - /* Shall not happen */ - dev_err(crc->dev, "Pending data overflow\n"); - return -EINVAL; - } - - d8 = (const u8 *)d32; - for (i = 0; i < length; i++) - /* Store pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); - return 0; } @@ -204,6 +215,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); } +static unsigned int refcnt; +static DEFINE_MUTEX(refcnt_lock); static struct shash_alg algs[] = { /* CRC-32 */ { @@ -294,12 +307,18 @@ static int stm32_crc_probe(struct platform_device *pdev) list_add(&crc->list, &crc_list.dev_list); spin_unlock(&crc_list.lock); - ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); - if (ret) { - dev_err(dev, "Failed to register\n"); - clk_disable_unprepare(crc->clk); - return ret; + mutex_lock(&refcnt_lock); + if (!refcnt) { + ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); + if (ret) { + mutex_unlock(&refcnt_lock); + dev_err(dev, "Failed to register\n"); + clk_disable_unprepare(crc->clk); + return ret; + } } + refcnt++; + mutex_unlock(&refcnt_lock); dev_info(dev, "Initialized\n"); @@ -320,7 +339,10 @@ static int stm32_crc_remove(struct platform_device *pdev) list_del(&crc->list); spin_unlock(&crc_list.lock); - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_lock(&refcnt_lock); + if (!--refcnt) + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_unlock(&refcnt_lock); pm_runtime_disable(crc->dev); pm_runtime_put_noidle(crc->dev); diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index ba5ea6434f9c..9b3511236ba2 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -537,7 +537,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) int ret; u32 cfg, hw_mode; - pm_runtime_get_sync(cryp->dev); + pm_runtime_resume_and_get(cryp->dev); /* Disable interrupt */ stm32_cryp_write(cryp, CRYP_IMSCR, 0); @@ -2054,7 +2054,7 @@ static int stm32_cryp_remove(struct platform_device *pdev) if (!cryp) return -ENODEV; - ret = pm_runtime_get_sync(cryp->dev); + ret = pm_runtime_resume_and_get(cryp->dev); if (ret < 0) return ret; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index cfc8e0e37bee..dcce15b55809 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -810,7 +810,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err) static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, struct stm32_hash_request_ctx *rctx) { - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); if (!(HASH_FLAGS_INIT & hdev->flags)) { stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); @@ -959,7 +959,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out) u32 *preg; unsigned int i; - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) cpu_relax(); @@ -997,7 +997,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in) preg = rctx->hw_context; - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); stm32_hash_write(hdev, HASH_IMR, *preg++); stm32_hash_write(hdev, HASH_STR, *preg++); @@ -1553,7 +1553,7 @@ static int stm32_hash_remove(struct platform_device *pdev) if (!hdev) return -ENODEV; - ret = pm_runtime_get_sync(hdev->dev); + ret = pm_runtime_resume_and_get(hdev->dev); if (ret < 0) return ret; diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 7e5e092a23b3..dce3a6f96c97 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -30,6 +30,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; + unsigned long pi = 0, po = 0; /* progress for in and out */ + bool miter_err; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ unsigned long flags; @@ -44,50 +46,62 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) spin_lock_irqsave(&ss->slock, flags); - for (i = 0; i < op->keylen; i += 4) - writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); + for (i = 0; i < op->keylen / 4; i++) + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); - writel(v, ss->base + SS_IV0 + i * 4); + writesl(ss->base + SS_IV0 + i * 4, &v, 1); } } writel(mode, ss->base + SS_CTL); - sg_miter_start(&mi, areq->src, sg_nents(areq->src), - SG_MITER_FROM_SG | SG_MITER_ATOMIC); - sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), - SG_MITER_TO_SG | SG_MITER_ATOMIC); - sg_miter_next(&mi); - sg_miter_next(&mo); - if (!mi.addr || !mo.addr) { - dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); - err = -EINVAL; - goto release_ss; - } ileft = areq->cryptlen / 4; oleft = areq->cryptlen / 4; oi = 0; oo = 0; do { - todo = min(rx_cnt, ileft); - todo = min_t(size_t, todo, (mi.length - oi) / 4); - if (todo) { - ileft -= todo; - writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); - oi += todo * 4; - } - if (oi == mi.length) { - sg_miter_next(&mi); - oi = 0; + if (ileft) { + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + if (pi) + sg_miter_skip(&mi, pi); + miter_err = sg_miter_next(&mi); + if (!miter_err || !mi.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + todo = min(rx_cnt, ileft); + todo = min_t(size_t, todo, (mi.length - oi) / 4); + if (todo) { + ileft -= todo; + writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); + oi += todo * 4; + } + if (oi == mi.length) { + pi += mi.length; + oi = 0; + } + sg_miter_stop(&mi); } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + if (po) + sg_miter_skip(&mo, po); + miter_err = sg_miter_next(&mo); + if (!miter_err || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } todo = min(tx_cnt, oleft); todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { @@ -96,9 +110,10 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) oo += todo * 4; } if (oo == mo.length) { - sg_miter_next(&mo); oo = 0; + po += mo.length; } + sg_miter_stop(&mo); } while (oleft); if (areq->iv) { @@ -109,8 +124,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) } release_ss: - sg_miter_stop(&mi); - sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; @@ -164,12 +177,14 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; + unsigned long pi = 0, po = 0; /* progress for in and out */ + bool miter_err; unsigned int oi, oo; /* offset for in and out */ unsigned int ob = 0; /* offset in buf */ unsigned int obo = 0; /* offset in bufo*/ unsigned int obl = 0; /* length of data in bufo */ unsigned long flags; - bool need_fallback; + bool need_fallback = false; if (!areq->cryptlen) return 0; @@ -188,12 +203,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * we can use the SS optimized function */ while (in_sg && no_chunk == 1) { - if (in_sg->length % 4) + if ((in_sg->length | in_sg->offset) & 3u) no_chunk = 0; in_sg = sg_next(in_sg); } while (out_sg && no_chunk == 1) { - if (out_sg->length % 4) + if ((out_sg->length | out_sg->offset) & 3u) no_chunk = 0; out_sg = sg_next(out_sg); } @@ -206,28 +221,17 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) spin_lock_irqsave(&ss->slock, flags); - for (i = 0; i < op->keylen; i += 4) - writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); + for (i = 0; i < op->keylen / 4; i++) + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); - writel(v, ss->base + SS_IV0 + i * 4); + writesl(ss->base + SS_IV0 + i * 4, &v, 1); } } writel(mode, ss->base + SS_CTL); - sg_miter_start(&mi, areq->src, sg_nents(areq->src), - SG_MITER_FROM_SG | SG_MITER_ATOMIC); - sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), - SG_MITER_TO_SG | SG_MITER_ATOMIC); - sg_miter_next(&mi); - sg_miter_next(&mo); - if (!mi.addr || !mo.addr) { - dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); - err = -EINVAL; - goto release_ss; - } ileft = areq->cryptlen; oleft = areq->cryptlen; oi = 0; @@ -235,8 +239,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) while (oleft) { if (ileft) { - char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ - + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + if (pi) + sg_miter_skip(&mi, pi); + miter_err = sg_miter_next(&mi); + if (!miter_err || !mi.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } /* * todo is the number of consecutive 4byte word that we * can read from current SG @@ -258,52 +270,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) */ todo = min(rx_cnt * 4 - ob, ileft); todo = min_t(size_t, todo, mi.length - oi); - memcpy(buf + ob, mi.addr + oi, todo); + memcpy(ss->buf + ob, mi.addr + oi, todo); ileft -= todo; oi += todo; ob += todo; if (!(ob % 4)) { - writesl(ss->base + SS_RXFIFO, buf, + writesl(ss->base + SS_RXFIFO, ss->buf, ob / 4); ob = 0; } } if (oi == mi.length) { - sg_miter_next(&mi); + pi += mi.length; oi = 0; } + sg_miter_stop(&mi); } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); - dev_dbg(ss->dev, - "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n", - mode, - oi, mi.length, ileft, areq->cryptlen, rx_cnt, - oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); if (!tx_cnt) continue; + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + if (po) + sg_miter_skip(&mo, po); + miter_err = sg_miter_next(&mo); + if (!miter_err || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } /* todo in 4bytes word */ todo = min(tx_cnt, oleft / 4); todo = min_t(size_t, todo, (mo.length - oo) / 4); + if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; oo += todo * 4; if (oo == mo.length) { - sg_miter_next(&mo); + po += mo.length; oo = 0; } } else { - char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ - /* * read obl bytes in bufo, we read at maximum for * emptying the device */ - readsl(ss->base + SS_TXFIFO, bufo, tx_cnt); + readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt); obl = tx_cnt * 4; obo = 0; do { @@ -315,17 +332,19 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) */ todo = min_t(size_t, mo.length - oo, obl - obo); - memcpy(mo.addr + oo, bufo + obo, todo); + memcpy(mo.addr + oo, ss->bufo + obo, todo); oleft -= todo; obo += todo; oo += todo; if (oo == mo.length) { + po += mo.length; sg_miter_next(&mo); oo = 0; } } while (obo < obl); /* bufo must be fully used here */ } + sg_miter_stop(&mo); } if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { @@ -335,8 +354,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) } release_ss: - sg_miter_stop(&mi); - sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index 35a27a7145f8..9a2adc130d9a 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -138,6 +138,8 @@ struct sun4i_ss_ctx { struct reset_control *reset; struct device *dev; struct resource *res; + char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ + char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ spinlock_t slock; /* control the use of the device */ #ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG u32 seed[SS_SEED_LEN / BITS_PER_LONG]; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 56e3068c9947..8ef6e93e43f3 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -460,7 +460,7 @@ DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) /* * locate current (offending) descriptor */ -static u32 current_desc_hdr(struct device *dev, int ch) +static __be32 current_desc_hdr(struct device *dev, int ch) { struct talitos_private *priv = dev_get_drvdata(dev); int tail, iter; @@ -478,7 +478,7 @@ static u32 current_desc_hdr(struct device *dev, int ch) iter = tail; while (priv->chan[ch].fifo[iter].dma_desc != cur_desc && - priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) { + priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) { iter = (iter + 1) & (priv->fifo_len - 1); if (iter == tail) { dev_err(dev, "couldn't locate current descriptor\n"); @@ -486,7 +486,7 @@ static u32 current_desc_hdr(struct device *dev, int ch) } } - if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) { + if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) { struct talitos_edesc *edesc; edesc = container_of(priv->chan[ch].fifo[iter].desc, @@ -501,13 +501,13 @@ static u32 current_desc_hdr(struct device *dev, int ch) /* * user diagnostics; report root cause of error based on execution unit status */ -static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) +static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr) { struct talitos_private *priv = dev_get_drvdata(dev); int i; if (!desc_hdr) - desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); + desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF)); switch (desc_hdr & DESC_HDR_SEL0_MASK) { case DESC_HDR_SEL0_AFEU: @@ -1097,11 +1097,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, */ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, unsigned int offset, int datalen, int elen, - struct talitos_ptr *link_tbl_ptr) + struct talitos_ptr *link_tbl_ptr, int align) { int n_sg = elen ? sg_count + 1 : sg_count; int count = 0; int cryptlen = datalen + elen; + int padding = ALIGN(cryptlen, align) - cryptlen; while (cryptlen && sg && n_sg--) { unsigned int len = sg_dma_len(sg); @@ -1125,7 +1126,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, offset += datalen; } to_talitos_ptr(link_tbl_ptr + count, - sg_dma_address(sg) + offset, len, 0); + sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0); to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); count++; cryptlen -= len; @@ -1148,10 +1149,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, unsigned int len, struct talitos_edesc *edesc, struct talitos_ptr *ptr, int sg_count, unsigned int offset, int tbl_off, int elen, - bool force) + bool force, int align) { struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + int aligned_len = ALIGN(len, align); if (!src) { to_talitos_ptr(ptr, 0, 0, is_sec1); @@ -1159,22 +1161,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, } to_talitos_ptr_ext_set(ptr, elen, is_sec1); if (sg_count == 1 && !force) { - to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); + to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1); return sg_count; } if (is_sec1) { - to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); + to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1); return sg_count; } sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen, - &edesc->link_tbl[tbl_off]); + &edesc->link_tbl[tbl_off], align); if (sg_count == 1 && !force) { /* Only one segment now, so no link tbl needed*/ copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); return sg_count; } to_talitos_ptr(ptr, edesc->dma_link_tbl + - tbl_off * sizeof(struct talitos_ptr), len, is_sec1); + tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1); to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); return sg_count; @@ -1186,7 +1188,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src, unsigned int offset, int tbl_off) { return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, - tbl_off, 0, false); + tbl_off, 0, false, 1); } /* @@ -1255,7 +1257,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], sg_count, areq->assoclen, tbl_off, elen, - false); + false, 1); if (ret > 1) { tbl_off += ret; @@ -1275,7 +1277,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, elen = 0; ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], sg_count, areq->assoclen, tbl_off, elen, - is_ipsec_esp && !encrypt); + is_ipsec_esp && !encrypt, 1); tbl_off += ret; if (!encrypt && is_ipsec_esp) { @@ -1583,6 +1585,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc, bool sync_needed = false; struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU && + (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR; /* first DWORD empty */ @@ -1603,8 +1607,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* * cipher in */ - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, - &desc->ptr[3], sg_count, 0, 0); + sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3], + sg_count, 0, 0, 0, false, is_ctr ? 16 : 1); if (sg_count > 1) sync_needed = true; diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 1469b956948a..32825119e880 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* primary execution unit mode (MODE0) and derivatives */ #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000) #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) #define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000) #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 82b316b2f537..ac420b201dd8 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -353,13 +353,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, int err; unsigned long flags; struct scatterlist outhdr, iv_sg, status_sg, **sgs; - int i; u64 dst_len; unsigned int num_out = 0, num_in = 0; int sg_total; uint8_t *iv; + struct scatterlist *sg; src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + pr_err("Invalid number of src SG.\n"); + return src_nents; + } + dst_nents = sg_nents(req->dst); pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", @@ -405,6 +410,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, goto free; } + dst_len = min_t(unsigned int, req->nbytes, dst_len); pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", req->nbytes, dst_len); @@ -445,12 +451,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, vc_sym_req->iv = iv; /* Source data */ - for (i = 0; i < src_nents; i++) - sgs[num_out++] = &req->src[i]; + for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--) + sgs[num_out++] = sg; /* Destination data */ - for (i = 0; i < dst_nents; i++) - sgs[num_out + num_in++] = &req->dst[i]; + for (sg = req->dst; sg; sg = sg_next(sg)) + sgs[num_out + num_in++] = sg; /* Status */ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); @@ -580,10 +586,11 @@ static void virtio_crypto_ablkcipher_finalize_req( scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); - crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, - req, err); kzfree(vc_sym_req->iv); virtcrypto_clear_request(&vc_sym_req->base); + + crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, + req, err); } static struct virtio_crypto_algo virtio_crypto_algs[] = { { diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 8fafbeab510a..eccdda1f7b71 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -227,7 +227,7 @@ static void dax_region_unregister(void *region) struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct resource *res, int target_node, unsigned int align, - unsigned long pfn_flags) + unsigned long long pfn_flags) { struct dax_region *dax_region; diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h index 8619e3299943..9e4eba67e8b9 100644 --- a/drivers/dax/bus.h +++ b/drivers/dax/bus.h @@ -11,7 +11,7 @@ struct dax_region; void dax_region_put(struct dax_region *dax_region); struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct resource *res, int target_node, unsigned int align, - unsigned long flags); + unsigned long long flags); enum dev_dax_subsys { DEV_DAX_BUS, diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index 6ccca3b890d6..3107ce80e809 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -32,7 +32,7 @@ struct dax_region { struct device *dev; unsigned int align; struct resource res; - unsigned long pfn_flags; + unsigned long long pfn_flags; }; /** diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index 3d0a7e702c94..1e678bdf5aed 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -22,6 +22,7 @@ int dev_dax_kmem_probe(struct device *dev) resource_size_t kmem_size; resource_size_t kmem_end; struct resource *new_res; + const char *new_res_name; int numa_node; int rc; @@ -48,11 +49,16 @@ int dev_dax_kmem_probe(struct device *dev) kmem_size &= ~(memory_block_size_bytes() - 1); kmem_end = kmem_start + kmem_size; - /* Region is permanently reserved. Hot-remove not yet implemented. */ - new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev)); + new_res_name = kstrdup(dev_name(dev), GFP_KERNEL); + if (!new_res_name) + return -ENOMEM; + + /* Region is permanently reserved if hotremove fails. */ + new_res = request_mem_region(kmem_start, kmem_size, new_res_name); if (!new_res) { dev_warn(dev, "could not reserve region [%pa-%pa]\n", &kmem_start, &kmem_end); + kfree(new_res_name); return -EBUSY; } @@ -63,12 +69,12 @@ int dev_dax_kmem_probe(struct device *dev) * unknown to us that will break add_memory() below. */ new_res->flags = IORESOURCE_SYSTEM_RAM; - new_res->name = dev_name(dev); rc = add_memory(numa_node, new_res->start, resource_size(new_res)); if (rc) { release_resource(new_res); kfree(new_res); + kfree(new_res_name); return rc; } dev_dax->dax_kmem_res = new_res; @@ -83,6 +89,7 @@ static int dev_dax_kmem_remove(struct device *dev) struct resource *res = dev_dax->dax_kmem_res; resource_size_t kmem_start = res->start; resource_size_t kmem_size = resource_size(res); + const char *res_name = res->name; int rc; /* @@ -102,6 +109,7 @@ static int dev_dax_kmem_remove(struct device *dev) /* Release and free dax resources */ release_resource(res); kfree(res); + kfree(res_name); dev_dax->dax_kmem_res = NULL; return 0; diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 26a654dbc69a..8074e5de815b 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -318,11 +318,15 @@ EXPORT_SYMBOL_GPL(dax_direct_access); bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, int blocksize, sector_t start, sector_t len) { + if (!dax_dev) + return false; + if (!dax_alive(dax_dev)) return false; return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len); } +EXPORT_SYMBOL_GPL(dax_supported); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) @@ -716,6 +720,7 @@ err_chrdev: static void __exit dax_core_exit(void) { + dax_bus_exit(); unregister_chrdev_region(dax_devt, MINORMASK+1); ida_destroy(&dax_minor_ida); dax_fs_exit(); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index ff81b7cdab71..c79652ee94be 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -319,7 +319,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq, devfreq->previous_freq = new_freq; if (devfreq->suspend_freq) - devfreq->resume_freq = cur_freq; + devfreq->resume_freq = new_freq; return err; } @@ -902,7 +902,9 @@ int devfreq_suspend_device(struct devfreq *devfreq) } if (devfreq->suspend_freq) { + mutex_lock(&devfreq->lock); ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0); + mutex_unlock(&devfreq->lock); if (ret) return ret; } @@ -930,7 +932,9 @@ int devfreq_resume_device(struct devfreq *devfreq) return 0; if (devfreq->resume_freq) { + mutex_lock(&devfreq->lock); ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0); + mutex_unlock(&devfreq->lock); if (ret) return ret; } diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index 2e65d7279d79..027769e39f9b 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -95,18 +95,20 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq, mutex_lock(&dmcfreq->lock); - if (target_rate >= dmcfreq->odt_dis_freq) - odt_enable = true; + if (dmcfreq->regmap_pmu) { + if (target_rate >= dmcfreq->odt_dis_freq) + odt_enable = true; - /* - * This makes a SMC call to the TF-A to set the DDR PD (power-down) - * timings and to enable or disable the ODT (on-die termination) - * resistors. - */ - arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0, - dmcfreq->odt_pd_arg1, - ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD, - odt_enable, 0, 0, 0, &res); + /* + * This makes a SMC call to the TF-A to set the DDR PD + * (power-down) timings and to enable or disable the + * ODT (on-die termination) resistors. + */ + arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0, + dmcfreq->odt_pd_arg1, + ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD, + odt_enable, 0, 0, 0, &res); + } /* * If frequency scaling from low to high, adjust voltage first. @@ -364,16 +366,21 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) if (res.a0) { dev_err(dev, "Failed to set dram param: %ld\n", res.a0); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } } } node = of_parse_phandle(np, "rockchip,pmu", 0); - if (node) { - data->regmap_pmu = syscon_node_to_regmap(node); - if (IS_ERR(data->regmap_pmu)) - return PTR_ERR(data->regmap_pmu); + if (!node) + goto no_pmu; + + data->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); + if (IS_ERR(data->regmap_pmu)) { + ret = PTR_ERR(data->regmap_pmu); + goto err_edev; } regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); @@ -391,9 +398,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq; break; default: - return -EINVAL; + ret = -EINVAL; + goto err_edev; }; +no_pmu: arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT, 0, 0, 0, 0, &res); @@ -425,7 +434,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) */ if (dev_pm_opp_of_add_table(dev)) { dev_err(dev, "Invalid operating-points in device tree.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } of_property_read_u32(np, "upthreshold", @@ -465,6 +475,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) err_free_opp: dev_pm_opp_of_remove_table(&pdev->dev); +err_edev: + devfreq_event_disable_edev(data->edev); + return ret; } diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c index a6ba75f4106d..e273011c83fb 100644 --- a/drivers/devfreq/tegra30-devfreq.c +++ b/drivers/devfreq/tegra30-devfreq.c @@ -68,6 +68,8 @@ #define KHZ 1000 +#define KHZ_MAX (ULONG_MAX / KHZ) + /* Assume that the bus is saturated if the utilization is 25% */ #define BUS_SATURATION_RATIO 25 @@ -169,7 +171,7 @@ struct tegra_actmon_emc_ratio { }; static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { - { 1400000, ULONG_MAX }, + { 1400000, KHZ_MAX }, { 1200000, 750000 }, { 1100000, 600000 }, { 1000000, 500000 }, diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 0fb0358f0073..758de0e9b2dd 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -45,17 +45,64 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) size_t ret = 0; dmabuf = dentry->d_fsdata; - mutex_lock(&dmabuf->lock); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); - mutex_unlock(&dmabuf->lock); + spin_unlock(&dmabuf->name_lock); return dynamic_dname(dentry, buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : ""); } +static void dma_buf_release(struct dentry *dentry) +{ + struct dma_buf *dmabuf; + + dmabuf = dentry->d_fsdata; + if (unlikely(!dmabuf)) + return; + + BUG_ON(dmabuf->vmapping_counter); + + /* + * Any fences that a dma-buf poll can wait on should be signaled + * before releasing dma-buf. This is the responsibility of each + * driver that uses the reservation objects. + * + * If you hit this BUG() it means someone dropped their ref to the + * dma-buf while still having pending operation to the buffer. + */ + BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); + + dmabuf->ops->release(dmabuf); + + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) + dma_resv_fini(dmabuf->resv); + + module_put(dmabuf->owner); + kfree(dmabuf->name); + kfree(dmabuf); +} + +static int dma_buf_file_release(struct inode *inode, struct file *file) +{ + struct dma_buf *dmabuf; + + if (!is_dma_buf_file(file)) + return -EINVAL; + + dmabuf = file->private_data; + + mutex_lock(&db_list.lock); + list_del(&dmabuf->list_node); + mutex_unlock(&db_list.lock); + + return 0; +} + static const struct dentry_operations dma_buf_dentry_ops = { .d_dname = dmabuffs_dname, + .d_release = dma_buf_release, }; static struct vfsmount *dma_buf_mnt; @@ -77,42 +124,6 @@ static struct file_system_type dma_buf_fs_type = { .kill_sb = kill_anon_super, }; -static int dma_buf_release(struct inode *inode, struct file *file) -{ - struct dma_buf *dmabuf; - - if (!is_dma_buf_file(file)) - return -EINVAL; - - dmabuf = file->private_data; - - BUG_ON(dmabuf->vmapping_counter); - - /* - * Any fences that a dma-buf poll can wait on should be signaled - * before releasing dma-buf. This is the responsibility of each - * driver that uses the reservation objects. - * - * If you hit this BUG() it means someone dropped their ref to the - * dma-buf while still having pending operation to the buffer. - */ - BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); - - dmabuf->ops->release(dmabuf); - - mutex_lock(&db_list.lock); - list_del(&dmabuf->list_node); - mutex_unlock(&db_list.lock); - - if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) - dma_resv_fini(dmabuf->resv); - - module_put(dmabuf->owner); - kfree(dmabuf->name); - kfree(dmabuf); - return 0; -} - static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; @@ -341,8 +352,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) kfree(name); goto out_unlock; } + spin_lock(&dmabuf->name_lock); kfree(dmabuf->name); dmabuf->name = name; + spin_unlock(&dmabuf->name_lock); out_unlock: mutex_unlock(&dmabuf->lock); @@ -388,7 +401,8 @@ static long dma_buf_ioctl(struct file *file, return ret; - case DMA_BUF_SET_NAME: + case DMA_BUF_SET_NAME_A: + case DMA_BUF_SET_NAME_B: return dma_buf_set_name(dmabuf, (const char __user *)arg); default: @@ -404,14 +418,14 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); - mutex_lock(&dmabuf->lock); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) seq_printf(m, "name:\t%s\n", dmabuf->name); - mutex_unlock(&dmabuf->lock); + spin_unlock(&dmabuf->name_lock); } static const struct file_operations dma_buf_fops = { - .release = dma_buf_release, + .release = dma_buf_file_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, @@ -540,6 +554,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->size = exp_info->size; dmabuf->exp_name = exp_info->exp_name; dmabuf->owner = exp_info->owner; + spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 2c136aee3e79..052a41e2451c 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence) } EXPORT_SYMBOL(dma_fence_free); +static bool __dma_fence_enable_signaling(struct dma_fence *fence) +{ + bool was_set; + + lockdep_assert_held(fence->lock); + + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return false; + + if (!was_set && fence->ops->enable_signaling) { + trace_dma_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + dma_fence_signal_locked(fence); + return false; + } + } + + return true; +} + /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable @@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; - if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && - fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return; - spin_lock_irqsave(fence->lock, flags); - - if (!fence->ops->enable_signaling(fence)) - dma_fence_signal_locked(fence); - - spin_unlock_irqrestore(fence->lock, flags); - } + spin_lock_irqsave(fence->lock, flags); + __dma_fence_enable_signaling(fence); + spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling); @@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, { unsigned long flags; int ret = 0; - bool was_set; if (WARN_ON(!fence || !func)) return -EINVAL; @@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, spin_lock_irqsave(fence->lock, flags); - was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - ret = -ENOENT; - else if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - ret = -ENOENT; - } - } - - if (!ret) { + if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); - } else + } else { INIT_LIST_HEAD(&cb->node); + ret = -ENOENT; + } + spin_unlock_irqrestore(fence->lock, flags); return ret; @@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; - bool was_set; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return ret; @@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) goto out; } - was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!__dma_fence_enable_signaling(fence)) goto out; - if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - goto out; - } - } - if (!timeout) { ret = 0; goto out; diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 709002515550..242a9ec295cf 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -161,7 +161,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) max = max(old->shared_count + num_fences, old->shared_max * 2); } else { - max = 4; + max = max(4ul, roundup_pow_of_two(num_fences)); } new = dma_resv_list_alloc(max); diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 8a05db3343d3..dcbcb712de6e 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) if (ret < 0) { dev_warn(&adev->dev, "error in parsing resource group\n"); - return; + break; } grp = (struct acpi_csrt_group *)((void *)grp + grp->length); } + + acpi_put_table((struct acpi_table_header *)csrt); } /** diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 672c73b4a2d4..303bc3e601a1 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1667,13 +1667,17 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; dmac_pdev = of_find_device_by_node(dma_spec->np); + if (!dmac_pdev) + return NULL; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - atslave = kzalloc(sizeof(*atslave), GFP_KERNEL); - if (!atslave) + atslave = kmalloc(sizeof(*atslave), GFP_KERNEL); + if (!atslave) { + put_device(&dmac_pdev->dev); return NULL; + } atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; /* @@ -1702,8 +1706,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, atslave->dma_dev = &dmac_pdev->dev; chan = dma_request_channel(mask, at_dma_filter, atslave); - if (!chan) + if (!chan) { + put_device(&dmac_pdev->dev); + kfree(atslave); return NULL; + } atchan = to_at_dma_chan(chan); atchan->per_if = dma_spec->args[0] & 0xff; diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index bf95f1d551c5..e27043427653 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -639,11 +639,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, unsigned long flags; unsigned long residue = 0; + spin_lock_irqsave(&jzchan->vchan.lock, flags); + status = dma_cookie_status(chan, cookie, txstate); if ((status == DMA_COMPLETE) || (txstate == NULL)) - return status; - - spin_lock_irqsave(&jzchan->vchan.lock, flags); + goto out_unlock_irqrestore; vdesc = vchan_find_desc(&jzchan->vchan, cookie); if (vdesc) { @@ -660,6 +660,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) status = DMA_ERROR; +out_unlock_irqrestore: spin_unlock_irqrestore(&jzchan->vchan.lock, flags); return status; } @@ -885,24 +886,11 @@ static int jz4780_dma_probe(struct platform_device *pdev) return -EINVAL; } - ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; - - jzdma->irq = ret; - - ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), - jzdma); - if (ret) { - dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); - return ret; - } - jzdma->clk = devm_clk_get(dev, NULL); if (IS_ERR(jzdma->clk)) { dev_err(dev, "failed to get clock\n"); ret = PTR_ERR(jzdma->clk); - goto err_free_irq; + return ret; } clk_prepare_enable(jzdma->clk); @@ -955,10 +943,23 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzchan->vchan.desc_free = jz4780_dma_desc_free; } + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_disable_clk; + + jzdma->irq = ret; + + ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), + jzdma); + if (ret) { + dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); + goto err_disable_clk; + } + ret = dmaenginem_async_device_register(dd); if (ret) { dev_err(dev, "failed to register device\n"); - goto err_disable_clk; + goto err_free_irq; } /* Register with OF DMA helpers. */ @@ -966,17 +967,17 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzdma); if (ret) { dev_err(dev, "failed to register OF DMA controller\n"); - goto err_disable_clk; + goto err_free_irq; } dev_info(dev, "JZ4780 DMA controller initialised\n"); return 0; -err_disable_clk: - clk_disable_unprepare(jzdma->clk); - err_free_irq: free_irq(jzdma->irq, jzdma); + +err_disable_clk: + clk_disable_unprepare(jzdma->clk); return ret; } diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a2cadfa2e6d7..238936e2dfe2 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info) struct dmatest_thread *thread; list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) + if (!thread->done && !thread->pending) return true; } } @@ -662,8 +662,8 @@ static int dmatest_func(void *data) flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ktime = ktime_get(); - while (!kthread_should_stop() - && !(params->iterations && total_tests >= params->iterations)) { + while (!(kthread_should_stop() || + (params->iterations && total_tests >= params->iterations))) { struct dma_async_tx_descriptor *tx = NULL; struct dmaengine_unmap_data *um; dma_addr_t *dsts; @@ -1166,10 +1166,13 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) mutex_unlock(&info->lock); return ret; } else if (dmatest_run) { - if (is_threaded_test_pending(info)) - start_threaded_tests(info); - else - pr_info("Could not start test, no channels configured\n"); + if (!is_threaded_test_pending(info)) { + pr_info("No channels configured, continue with any\n"); + if (!is_threaded_test_run(info)) + stop_threaded_test(info); + add_threaded_test(info); + } + start_threaded_tests(info); } else { stop_threaded_test(info); } @@ -1215,15 +1218,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp) add_threaded_test(info); /* Check if channel was added successfully */ - dtc = list_last_entry(&info->channels, struct dmatest_chan, node); - - if (dtc->chan) { + if (!list_empty(&info->channels)) { /* * if new channel was not successfully added, revert the * "test_channel" string to the name of the last successfully * added channel. exception for when users issues empty string * to channel parameter. */ + dtc = list_last_entry(&info->channels, struct dmatest_chan, node); if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0) && (strcmp("", strim(test_channel)) != 0)) { ret = -EINVAL; diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index ff392c01bad1..31577316f80b 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -85,12 +85,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) if (desc->chunk) { /* Create and add new element into the linked list */ - desc->chunks_alloc++; - list_add_tail(&chunk->list, &desc->chunk->list); if (!dw_edma_alloc_burst(chunk)) { kfree(chunk); return NULL; } + desc->chunks_alloc++; + list_add_tail(&chunk->list, &desc->chunk->list); } else { /* List head */ chunk->burst = NULL; @@ -391,7 +391,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (xfer->cyclic) { burst->dar = xfer->xfer.cyclic.paddr; } else { - burst->dar = sg_dma_address(sg); + burst->dar = dst_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -399,14 +399,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - src_addr += sg_dma_len(sg); } } else { burst->dar = dst_addr; if (xfer->cyclic) { burst->sar = xfer->xfer.cyclic.paddr; } else { - burst->sar = sg_dma_address(sg); + burst->sar = src_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -414,12 +413,14 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - dst_addr += sg_dma_len(sg); } } - if (!xfer->cyclic) + if (!xfer->cyclic) { + src_addr += sg_dma_len(sg); + dst_addr += sg_dma_len(sg); sg = sg_next(sg); + } } return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index e5162690de8f..db25f9b7778c 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -10,6 +10,7 @@ config DW_DMAC_CORE config DW_DMAC tristate "Synopsys DesignWare AHB DMA platform driver" + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller. This @@ -18,6 +19,7 @@ config DW_DMAC config DW_DMAC_PCI tristate "Synopsys DesignWare AHB DMA PCI driver" depends on PCI + depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller on the diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 21cb2a58dbd2..5e7fdc0b6e3d 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); - if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) - return; - dw->initialize_chan(dwc); /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); - - set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); } /*----------------------------------------------------------------------*/ @@ -777,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) if (dws->dma_dev != chan->device->dev) return false; + /* permit channels in accordance with the channels mask */ + if (dws->channels && !(dws->channels & dwc->mask)) + return false; + /* We have to copy data since dws can be temporary storage */ memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); @@ -954,8 +953,6 @@ static void dwc_issue_pending(struct dma_chan *chan) void do_dw_dma_off(struct dw_dma *dw) { - unsigned int i; - dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); @@ -966,9 +963,6 @@ void do_dw_dma_off(struct dw_dma *dw) while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) cpu_relax(); - - for (i = 0; i < dw->dma.chancnt; i++) - clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); } void do_dw_dma_on(struct dw_dma *dw) @@ -1032,8 +1026,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) /* Clear custom channel configuration */ memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); - clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); - /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.BLOCK, dwc->mask); diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c index 7a085b3c1854..d9810980920a 100644 --- a/drivers/dma/dw/dw.c +++ b/drivers/dma/dw/dw.c @@ -14,7 +14,7 @@ static void dw_dma_initialize_chan(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); - u32 cfghi = DWC_CFGH_FIFO_MODE; + u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); bool hs_polarity = dwc->dws.hs_polarity; diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c index 9e27831dee32..43e975fb6714 100644 --- a/drivers/dma/dw/of.c +++ b/drivers/dma/dw/of.c @@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, }; dma_cap_mask_t cap; - if (dma_spec->args_count != 3) + if (dma_spec->args_count < 3 || dma_spec->args_count > 4) return NULL; slave.src_id = dma_spec->args[0]; slave.dst_id = dma_spec->args[0]; slave.m_master = dma_spec->args[1]; slave.p_master = dma_spec->args[2]; + if (dma_spec->args_count >= 4) + slave.channels = dma_spec->args[3]; if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || slave.m_master >= dw->pdata->nr_masters || - slave.p_master >= dw->pdata->nr_masters)) + slave.p_master >= dw->pdata->nr_masters || + slave.channels >= BIT(dw->pdata->nr_channels))) return NULL; dma_cap_zero(cap); diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b1a7ca91701a..d6010486ee50 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -347,26 +347,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, /* * TCD parameters are stored in struct fsl_edma_hw_tcd in little * endian format. However, we need to load the TCD registers in - * big- or little-endian obeying the eDMA engine model endian. + * big- or little-endian obeying the eDMA engine model endian, + * and this is performed from specific edma_write functions */ edma_writew(edma, 0, ®s->tcd[ch].csr); - edma_writel(edma, le32_to_cpu(tcd->saddr), ®s->tcd[ch].saddr); - edma_writel(edma, le32_to_cpu(tcd->daddr), ®s->tcd[ch].daddr); - edma_writew(edma, le16_to_cpu(tcd->attr), ®s->tcd[ch].attr); - edma_writew(edma, le16_to_cpu(tcd->soff), ®s->tcd[ch].soff); + edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr); + edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr); - edma_writel(edma, le32_to_cpu(tcd->nbytes), ®s->tcd[ch].nbytes); - edma_writel(edma, le32_to_cpu(tcd->slast), ®s->tcd[ch].slast); + edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr); + edma_writew(edma, tcd->soff, ®s->tcd[ch].soff); - edma_writew(edma, le16_to_cpu(tcd->citer), ®s->tcd[ch].citer); - edma_writew(edma, le16_to_cpu(tcd->biter), ®s->tcd[ch].biter); - edma_writew(edma, le16_to_cpu(tcd->doff), ®s->tcd[ch].doff); + edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes); + edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast); - edma_writel(edma, le32_to_cpu(tcd->dlast_sga), + edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer); + edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); + edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff); + + edma_writel(edma, (s32)tcd->dlast_sga, ®s->tcd[ch].dlast_sga); - edma_writew(edma, le16_to_cpu(tcd->csr), ®s->tcd[ch].csr); + edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr); } static inline diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 5eaa2902ed39..6ff7063a8a2f 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -33,7 +33,7 @@ #define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0) #define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1) #define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1)) -#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0)) +#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0)) #define EDMA_TCD_ATTR_SSIZE_8BIT 0 #define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8) #define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8) diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b626c06ac2e0..6760ae4b2956 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) fsl_chan = &fsl_edma->chans[ch]; spin_lock(&fsl_chan->vchan.lock); + + if (!fsl_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&fsl_chan->vchan.lock); + continue; + } + if (!fsl_chan->edesc->iscyclic) { list_del(&fsl_chan->edesc->vdesc.node); vchan_cookie_complete(&fsl_chan->edesc->vdesc); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ad72b3f42ffa..eae385a312b8 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op) { struct fsldma_device *fdev; struct device_node *child; + unsigned int i; int err; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); @@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op) return 0; out_free_fdev: + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + if (fdev->chan[i]) + fsl_dma_chan_remove(fdev->chan[i]); + } irq_dispose_mapping(fdev->irq); iounmap(fdev->regs); out_free: @@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op) if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); } + irq_dispose_mapping(fdev->irq); iounmap(fdev->regs); kfree(fdev); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 56f18ae99233..308bed0a560a 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -205,10 +205,10 @@ struct fsldma_chan { #else static u64 fsl_ioread64(const u64 __iomem *addr) { - u32 fsl_addr = lower_32_bits(addr); - u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32; + u32 val_lo = in_le32((u32 __iomem *)addr); + u32 val_hi = in_le32((u32 __iomem *)addr + 1); - return fsl_addr_hi | in_le32((u32 *)fsl_addr); + return ((u64)val_hi << 32) + val_lo; } static void fsl_iowrite64(u64 val, u64 __iomem *addr) @@ -219,10 +219,10 @@ static void fsl_iowrite64(u64 val, u64 __iomem *addr) static u64 fsl_ioread64be(const u64 __iomem *addr) { - u32 fsl_addr = lower_32_bits(addr); - u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32; + u32 val_hi = in_be32((u32 __iomem *)addr); + u32 val_lo = in_be32((u32 __iomem *)addr + 1); - return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1)); + return ((u64)val_hi << 32) + val_lo; } static void fsl_iowrite64be(u64 val, u64 __iomem *addr) diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index 07cc7320a614..9045a6f7f589 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c @@ -26,22 +26,12 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) { struct hsu_dma_chip *chip = dev; - struct pci_dev *pdev = to_pci_dev(chip->dev); u32 dmaisr; u32 status; unsigned short i; int ret = 0; int err; - /* - * On Intel Tangier B0 and Anniedale the interrupt line, disregarding - * to have different numbers, is shared between HSU DMA and UART IPs. - * Thus on such SoCs we are expecting that IRQ handler is called in - * UART driver only. - */ - if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) - return IRQ_HANDLED; - dmaisr = readl(chip->regs + HSU_PCI_DMAISR); for (i = 0; i < chip->hsu->nr_channels; i++) { if (dmaisr & 0x1) { @@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_register_irq; + /* + * On Intel Tangier B0 and Anniedale the interrupt line, disregarding + * to have different numbers, is shared between HSU DMA and UART IPs. + * Thus on such SoCs we are expecting that IRQ handler is called in + * UART driver only. Instead of handling the spurious interrupt + * from HSU DMA here and waste CPU time and delay HSU UART interrupt + * handling, disable the interrupt entirely. + */ + if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) + disable_irq_nosync(chip->irq); + pci_set_drvdata(pdev, chip); return 0; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 18c011e57592..8e2a4d1f0be5 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -26,6 +26,18 @@ #include "../dmaengine.h" +int completion_timeout = 200; +module_param(completion_timeout, int, 0644); +MODULE_PARM_DESC(completion_timeout, + "set ioat completion timeout [msec] (default 200 [msec])"); +int idle_timeout = 2000; +module_param(idle_timeout, int, 0644); +MODULE_PARM_DESC(idle_timeout, + "set ioat idel timeout [msec] (default 2000 [msec])"); + +#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) +#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) + static char *chanerr_str[] = { "DMA Transfer Source Address Error", "DMA Transfer Destination Address Error", diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index b8e8e0b9693c..4ac9134962f3 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -99,8 +99,6 @@ struct ioatdma_chan { #define IOAT_RUN 5 #define IOAT_CHAN_ACTIVE 6 struct timer_list timer; - #define COMPLETION_TIMEOUT msecs_to_jiffies(100) - #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e15bd15a9ef6..e12b754e6398 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id) mcf_chan = &mcf_edma->chans[ch]; spin_lock(&mcf_chan->vchan.lock); + + if (!mcf_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&mcf_chan->vchan.lock); + continue; + } + if (!mcf_chan->edesc->iscyclic) { list_del(&mcf_chan->edesc->vdesc.node); vchan_cookie_complete(&mcf_chan->edesc->vdesc); diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 1a2028e1c29e..04d89eec11e7 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "request_irq failed with err %d\n", err); - goto err_unregister; + goto err_free; } platform_set_drvdata(pdev, hsdma); @@ -1006,6 +1006,9 @@ static int mtk_hsdma_probe(struct platform_device *pdev) return 0; +err_free: + mtk_hsdma_hw_deinit(hsdma); + of_dma_controller_free(pdev->dev.of_node); err_unregister: dma_async_device_unregister(dd); diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index e7d1e12bf464..89d90c456c0c 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, size); tdmac->desc_arr = NULL; + if (tdmac->status == DMA_ERROR) + tdmac->status = DMA_COMPLETE; return; } @@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( if (!desc) goto err_out; - mmp_tdma_config_write(chan, direction, &tdmac->slave_config); + if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config)) + goto err_out; while (buf < buf_len) { desc = &tdmac->desc_arr[i]; diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index e3850f04f676..889a94af4c85 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -766,8 +766,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev) goto disable_clk; msi_desc = first_msi_entry(&pdev->dev); - if (!msi_desc) + if (!msi_desc) { + ret = -ENODEV; goto free_msi_irqs; + } xor_dev->msi_desc = msi_desc; ret = devm_request_irq(&pdev->dev, msi_desc->irq, diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index c2d779daa4b5..4bbf4172b9bf 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -69,12 +69,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); - if (chan) { - chan->router = ofdma->dma_router; - chan->route_data = route_data; - } else { + if (IS_ERR_OR_NULL(chan)) { ofdma->dma_router->route_free(ofdma->dma_router->dev, route_data); + } else { + chan->router = ofdma->dma_router; + chan->route_data = route_data; } /* diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 90bbcef99ef8..bb9c361e224b 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -175,13 +175,11 @@ struct owl_dma_txd { * @id: physical index to this channel * @base: virtual memory base for the dma channel * @vchan: the virtual channel currently being served by this physical channel - * @lock: a lock to use when altering an instance of this struct */ struct owl_dma_pchan { u32 id; void __iomem *base; struct owl_dma_vchan *vchan; - spinlock_t lock; }; /** @@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, for (i = 0; i < od->nr_pchans; i++) { pchan = &od->pchans[i]; - spin_lock_irqsave(&pchan->lock, flags); + spin_lock_irqsave(&od->lock, flags); if (!pchan->vchan) { pchan->vchan = vchan; - spin_unlock_irqrestore(&pchan->lock, flags); + spin_unlock_irqrestore(&od->lock, flags); break; } - spin_unlock_irqrestore(&pchan->lock, flags); + spin_unlock_irqrestore(&od->lock, flags); } return pchan; @@ -1203,6 +1201,7 @@ static int owl_dma_remove(struct platform_device *pdev) owl_dma_free(od); clk_disable_unprepare(od->clk); + dma_pool_destroy(od->lli_pool); return 0; } diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 581e7a290d98..a3b0b4c56a19 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev, } pci_set_master(pdev); + pd->dma.dev = &pdev->dev; err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); if (err) { @@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev, goto err_free_irq; } - pd->dma.dev = &pdev->dev; INIT_LIST_HEAD(&pd->dma.channels); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6cce9ef61b29..57b6555d6d04 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2788,14 +2788,14 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + desc->rqcfg.brst_len = get_burst_len(desc, len); /* * If burst size is smaller than bus width then make sure we only * transfer one at a time to avoid a burst stradling an MFIFO entry. */ - if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) + if (burst * 8 < pl330->pcfg.data_bus_width) desc->rqcfg.brst_len = 1; - desc->rqcfg.brst_len = get_burst_len(desc, len); desc->bytes_requested = len; desc->txd.flags = flags; diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index b218a013c260..8f7ceb698226 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan) desc->residue = usb_dmac_get_current_residue(chan, desc, desc->sg_index - 1); desc->done_cookie = desc->vd.tx.cookie; + desc->vd.tx_result.result = DMA_TRANS_NOERROR; + desc->vd.tx_result.residue = desc->residue; vchan_cookie_complete(&desc->vd); /* Restart the next transfer if this driver has a next desc */ diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 5989b0893521..6c5771de32c6 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -488,8 +488,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c) spin_lock_irqsave(&chan->vchan.lock, flags); - if (chan->busy) { - stm32_dma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_dma_stop(chan); chan->desc = NULL; } @@ -545,6 +547,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) if (!vdesc) return; + list_del(&vdesc->node); + chan->desc = to_stm32_dma_desc(vdesc); chan->next_sg = 0; } @@ -622,7 +626,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) } else { chan->busy = false; if (chan->next_sg == chan->desc->num_sgs) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; } diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 5838311cf990..ee1cbf3be75d 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1127,6 +1127,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) return; } + list_del(&vdesc->node); + chan->desc = to_stm32_mdma_desc(vdesc); hwdesc = chan->desc->node[0].hwdesc; chan->curr_hwdesc = 0; @@ -1242,8 +1244,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c) LIST_HEAD(head); spin_lock_irqsave(&chan->vchan.lock, flags); - if (chan->busy) { - stm32_mdma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_mdma_stop(chan); chan->desc = NULL; } vchan_get_all_descriptors(&chan->vchan, &head); @@ -1331,7 +1335,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; chan->busy = false; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 4a750e29bfb5..3fe27dbde5b2 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1287,8 +1287,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); - if (tdc->busy) - tegra_dma_terminate_all(dc); + tegra_dma_terminate_all(dc); spin_lock_irqsave(&tdc->lock, flags); list_splice_init(&tdc->pending_sg_req, &sg_req_list); diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 6e1268552f74..9068591bd684 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) ret = pm_runtime_get_sync(tdc2dev(tdc)); if (ret < 0) { + pm_runtime_put_noidle(tdc2dev(tdc)); free_irq(tdc->irq, tdc); return ret; } @@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto rpm_disable; + } ret = tegra_adma_init(tdma); if (ret) @@ -900,7 +903,7 @@ static int tegra_adma_probe(struct platform_device *pdev) ret = dma_async_device_register(&tdma->dma_dev); if (ret < 0) { dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); - goto irq_dispose; + goto rpm_put; } ret = of_dma_controller_register(pdev->dev.of_node, diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 43acba2a1c0e..1b5f3e9f43d7 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -454,8 +454,8 @@ struct xilinx_dma_device { #define to_dma_tx_descriptor(tx) \ container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ - readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ - cond, delay_us, timeout_us) + readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \ + val, cond, delay_us, timeout_us) /* IO accessors */ static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) @@ -2431,7 +2431,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, has_dre = false; if (!has_dre) - xdev->common.copy_align = fls(width - 1); + xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || @@ -2543,7 +2543,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, struct device_node *node) { - int ret, i, nr_channels = 1; + int ret, i; + u32 nr_channels = 1; ret = of_property_read_u32(node, "dma-channels", &nr_channels); if ((ret < 0) && xdev->mcdma) @@ -2742,7 +2743,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) } /* Register the DMA engine with the core */ - dma_async_device_register(&xdev->common); + err = dma_async_device_register(&xdev->common); + if (err) { + dev_err(xdev->dev, "failed to register the dma device\n"); + goto error; + } err = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9c845c07b107..d47749a35863 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -123,10 +123,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F @@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } @@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan); - chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); return 0; } @@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index cc5e56d752c8..b96d48659114 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -22,6 +22,9 @@ static struct ecc_settings **ecc_stngs; /* Number of Unified Memory Controllers */ static u8 num_umcs; +/* Device for the PCI component */ +static struct device *pci_ctl_dev; + /* * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- @@ -215,7 +218,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate) scrubval = scrubrates[i].scrubval; - if (pvt->fam == 0x17 || pvt->fam == 0x18) { + if (pvt->umc) { __f17h_set_scrubval(pvt, scrubval); } else if (pvt->fam == 0x15 && pvt->model == 0x60) { f15h_select_dct(pvt, 0); @@ -257,18 +260,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci) int i, retval = -EINVAL; u32 scrubval = 0; - switch (pvt->fam) { - case 0x15: - /* Erratum #505 */ - if (pvt->model < 0x10) - f15h_select_dct(pvt, 0); - - if (pvt->model == 0x60) - amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); - break; - - case 0x17: - case 0x18: + if (pvt->umc) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); if (scrubval & BIT(0)) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); @@ -277,11 +269,17 @@ static int get_scrub_rate(struct mem_ctl_info *mci) } else { scrubval = 0; } - break; + } else if (pvt->fam == 0x15) { + /* Erratum #505 */ + if (pvt->model < 0x10) + f15h_select_dct(pvt, 0); - default: + if (pvt->model == 0x60) + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + else + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); + } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); - break; } scrubval = scrubval & 0x001F; @@ -1056,6 +1054,16 @@ static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; + if (pvt->umc) { + if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) + pvt->dram_type = MEM_LRDDR4; + else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) + pvt->dram_type = MEM_RDDR4; + else + pvt->dram_type = MEM_DDR4; + return; + } + switch (pvt->fam) { case 0xf: if (pvt->ext_model >= K8_REV_F) @@ -1101,16 +1109,6 @@ static void determine_memory_type(struct amd64_pvt *pvt) case 0x16: goto ddr3; - case 0x17: - case 0x18: - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - default: WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); pvt->dram_type = MEM_EMPTY; @@ -2317,6 +2315,15 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F17_M60H_CPUS] = { + .ctl_name = "F17h_M60h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, [F17_M70H_CPUS] = { .ctl_name = "F17h_M70h", .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0, @@ -2326,6 +2333,15 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F19_CPUS] = { + .ctl_name = "F19h", + .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, }; /* @@ -2661,6 +2677,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2) return -ENODEV; } + if (!pci_ctl_dev) + pci_ctl_dev = &pvt->F0->dev; + edac_dbg(1, "F0: %s\n", pci_name(pvt->F0)); edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); edac_dbg(1, "F6: %s\n", pci_name(pvt->F6)); @@ -2685,6 +2704,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2) return -ENODEV; } + if (!pci_ctl_dev) + pci_ctl_dev = &pvt->F2->dev; + edac_dbg(1, "F1: %s\n", pci_name(pvt->F1)); edac_dbg(1, "F2: %s\n", pci_name(pvt->F2)); edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); @@ -3366,6 +3388,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) fam_type = &family_types[F17_M30H_CPUS]; pvt->ops = &family_types[F17_M30H_CPUS].ops; break; + } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) { + fam_type = &family_types[F17_M60H_CPUS]; + pvt->ops = &family_types[F17_M60H_CPUS].ops; + break; } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) { fam_type = &family_types[F17_M70H_CPUS]; pvt->ops = &family_types[F17_M70H_CPUS].ops; @@ -3380,6 +3406,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) family_types[F17_CPUS].ctl_name = "F18h"; break; + case 0x19: + fam_type = &family_types[F19_CPUS]; + pvt->ops = &family_types[F19_CPUS].ops; + family_types[F19_CPUS].ctl_name = "F19h"; + break; + default: amd64_err("Unsupported family!\n"); return NULL; @@ -3611,21 +3643,10 @@ static void remove_one_instance(unsigned int nid) static void setup_pci_device(void) { - struct mem_ctl_info *mci; - struct amd64_pvt *pvt; - if (pci_ctl) return; - mci = edac_mc_find(0); - if (!mci) - return; - - pvt = mci->pvt_info; - if (pvt->umc) - pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR); - else - pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); + pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR); if (!pci_ctl) { pr_warn("%s(): Unable to create PCI control\n", __func__); pr_warn("%s(): PCI error report via EDAC not set\n", __func__); @@ -3639,6 +3660,7 @@ static const struct x86_cpu_id amd64_cpuids[] = { { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { } }; MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); @@ -3708,6 +3730,8 @@ static int __init amd64_edac_init(void) return 0; err_pci: + pci_ctl_dev = NULL; + msrs_free(msrs); msrs = NULL; @@ -3739,6 +3763,8 @@ static void __exit amd64_edac_exit(void) kfree(ecc_stngs); ecc_stngs = NULL; + pci_ctl_dev = NULL; + msrs_free(msrs); msrs = NULL; } diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8c3cda81e619..d3e65745516e 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -120,8 +120,12 @@ #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 +#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 +#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 /* * Function 1 - Address Map @@ -291,7 +295,9 @@ enum amd_families { F17_CPUS, F17_M10H_CPUS, F17_M30H_CPUS, + F17_M60H_CPUS, F17_M70H_CPUS, + F19_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c index 5634437bb39d..66669f9d690b 100644 --- a/drivers/edac/aspeed_edac.c +++ b/drivers/edac/aspeed_edac.c @@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev) /* register interrupt handler */ irq = platform_get_irq(pdev, 0); dev_dbg(&pdev->dev, "got irq %d\n", irq); - if (!irq) - return -ENODEV; + if (irq < 0) + return irq; rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH, DRV_NAME, ctx); diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 0e7ea3591b78..5e7593753799 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) /* Error exit stack */ err_kobj_reg: + kobject_put(&edac_dev->kobj); module_put(edac_dev->owner); err_out: diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 72c9eb9fdffb..53042af7262e 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void) /* Error unwind statck */ kobject_init_and_add_fail: - kfree(edac_pci_top_main_kobj); + kobject_put(edac_pci_top_main_kobj); kzalloc_fail: module_put(THIS_MODULE); diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index 523dd56a798c..0031819402d0 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -488,6 +488,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) if (!force_load && idx < 0) return -ENODEV; } else { + force_load = true; idx = 0; } @@ -586,6 +587,9 @@ void ghes_edac_unregister(struct ghes *ghes) struct mem_ctl_info *mci; unsigned long flags; + if (!force_load) + return; + mutex_lock(&ghes_reg_mutex); if (!refcount_dec_and_test(&ghes_refcount)) diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index c370d5457e6b..8a1913860d08 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -6,6 +6,7 @@ */ #include <linux/kernel.h> +#include <linux/io.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/mce.h> @@ -19,14 +20,16 @@ #define i10nm_printk(level, fmt, arg...) \ edac_printk(level, "i10nm", fmt, ##arg) -#define I10NM_GET_SCK_BAR(d, reg) \ +#define I10NM_GET_SCK_BAR(d, reg) \ pci_read_config_dword((d)->uracu, 0xd0, &(reg)) #define I10NM_GET_IMC_BAR(d, i, reg) \ pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg)) #define I10NM_GET_DIMMMTR(m, i, j) \ - (*(u32 *)((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)) + readl((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4) #define I10NM_GET_MCDDRTCFG(m, i, j) \ - (*(u32 *)((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4)) + readl((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4) +#define I10NM_GET_MCMTR(m, i) \ + readl((m)->mbase + 0x20ef8 + (i) * 0x4000) #define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23) #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12) @@ -122,10 +125,22 @@ static int i10nm_get_all_munits(void) return 0; } +static struct res_config i10nm_cfg0 = { + .type = I10NM, + .decs_did = 0x3452, + .busno_cfg_offset = 0xcc, +}; + +static struct res_config i10nm_cfg1 = { + .type = I10NM, + .decs_did = 0x3452, + .busno_cfg_offset = 0xd0, +}; + static const struct x86_cpu_id i10nm_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_D, 0, 0 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, 0 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_D, 0, 0 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_D, 0, (kernel_ulong_t)&i10nm_cfg0 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, (kernel_ulong_t)&i10nm_cfg0 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_D, 0, (kernel_ulong_t)&i10nm_cfg1 }, { } }; MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); @@ -134,7 +149,7 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan) { u32 mcmtr; - mcmtr = *(u32 *)(imc->mbase + 0x20ef8 + chan * 0x4000); + mcmtr = I10NM_GET_MCMTR(imc, chan); edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr); return !!GET_BITFIELD(mcmtr, 2, 2); @@ -162,7 +177,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci) mtr, mcddrtcfg, imc->mc, i, j); if (IS_DIMM_PRESENT(mtr)) - ndimms += skx_get_dimm_info(mtr, 0, dimm, + ndimms += skx_get_dimm_info(mtr, 0, 0, dimm, imc, i, j); else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) ndimms += skx_get_nvdimm_info(dimm, imc, i, j, @@ -235,6 +250,7 @@ static int __init i10nm_init(void) { u8 mc = 0, src_id = 0, node_id = 0; const struct x86_cpu_id *id; + struct res_config *cfg; const char *owner; struct skx_dev *d; int rc, i, off[3] = {0xd0, 0xc8, 0xcc}; @@ -250,11 +266,17 @@ static int __init i10nm_init(void) if (!id) return -ENODEV; + cfg = (struct res_config *)id->driver_data; + + /* Newer steppings have different offset for ATOM_TREMONT_D/ICELAKE_X */ + if (boot_cpu_data.x86_stepping >= 4) + cfg->busno_cfg_offset = 0xd0; + rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm); if (rc) return rc; - rc = skx_get_all_bus_mappings(0x3452, 0xcc, I10NM, &i10nm_edac_list); + rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list); if (rc < 0) goto fail; if (rc == 0) { diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 251f2b692785..0c72daa519ff 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -1074,16 +1074,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) PCI_DEVICE_ID_INTEL_5100_19, 0); if (!einj) { ret = -ENODEV; - goto bail_einj; + goto bail_mc_free; } rc = pci_enable_device(einj); if (rc < 0) { ret = rc; - goto bail_disable_einj; + goto bail_einj; } - mci->pdev = &pdev->dev; priv = mci->pvt_info; @@ -1149,14 +1148,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) bail_scrub: priv->scrub_enable = 0; cancel_delayed_work_sync(&(priv->i5100_scrubbing)); - edac_mc_free(mci); - -bail_disable_einj: pci_disable_device(einj); bail_einj: pci_dev_put(einj); +bail_mc_free: + edac_mc_free(mci); + bail_disable_ch1: pci_disable_device(ch1mm); diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index a71cca6eeb33..eea6b944c909 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1711,9 +1711,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) - tp_event = HW_EVENT_ERR_FATAL; - else tp_event = HW_EVENT_ERR_UNCORRECTED; + else + tp_event = HW_EVENT_ERR_FATAL; } else { tp_event = HW_EVENT_ERR_CORRECTED; } @@ -1816,7 +1816,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, struct mem_ctl_info *mci; i7_dev = get_i7core_dev(mce->socketid); - if (!i7_dev) + if (!i7_dev || (mce->kflags & MCE_HANDLED_CEC)) return NOTIFY_DONE; mci = i7_dev->mci; @@ -1835,7 +1835,8 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, i7core_check_error(mci, mce); /* Advise mcelog that the errors were handled */ - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block i7_mce_dec = { diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index d26300f9cb07..9be43b4f9c50 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -170,6 +170,8 @@ (n << (28 + (2 * skl) - PAGE_SHIFT)) static int nr_channels; +static struct pci_dev *mci_pdev; +static int ie31200_registered = 1; struct ie31200_priv { void __iomem *window; @@ -541,12 +543,16 @@ fail_free: static int ie31200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - edac_dbg(0, "MC:\n"); + int rc; + edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; + rc = ie31200_probe1(pdev, ent->driver_data); + if (rc == 0 && !mci_pdev) + mci_pdev = pci_dev_get(pdev); - return ie31200_probe1(pdev, ent->driver_data); + return rc; } static void ie31200_remove_one(struct pci_dev *pdev) @@ -555,6 +561,8 @@ static void ie31200_remove_one(struct pci_dev *pdev) struct ie31200_priv *priv; edac_dbg(0, "\n"); + pci_dev_put(mci_pdev); + mci_pdev = NULL; mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; @@ -596,17 +604,53 @@ static struct pci_driver ie31200_driver = { static int __init ie31200_init(void) { + int pci_rc, i; + edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); - return pci_register_driver(&ie31200_driver); + pci_rc = pci_register_driver(&ie31200_driver); + if (pci_rc < 0) + goto fail0; + + if (!mci_pdev) { + ie31200_registered = 0; + for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) { + mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor, + ie31200_pci_tbl[i].device, + NULL); + if (mci_pdev) + break; + } + if (!mci_pdev) { + edac_dbg(0, "ie31200 pci_get_device fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); + if (pci_rc < 0) { + edac_dbg(0, "ie31200 init fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + } + return 0; + +fail1: + pci_unregister_driver(&ie31200_driver); +fail0: + pci_dev_put(mci_pdev); + + return pci_rc; } static void __exit ie31200_exit(void) { edac_dbg(3, "MC:\n"); pci_unregister_driver(&ie31200_driver); + if (!ie31200_registered) + ie31200_remove_one(mci_pdev); } module_init(ie31200_init); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ea622c6f3a39..4d002121ef5c 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = { "L2 Fill Data error", }; +static const char * const smca_ls2_mce_desc[] = { + "An ECC error was detected on a data cache read by a probe or victimization", + "An ECC error or L2 poison was detected on a data cache read by a load", + "An ECC error was detected on a data cache read-modify-write by a store", + "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization", + "An ECC error or poison bit mismatch was detected on a tag read by a load", + "An ECC error or poison bit mismatch was detected on a tag read by a store", + "An ECC error was detected on an EMEM read by a load", + "An ECC error was detected on an EMEM read-modify-write by a store", + "A parity error was detected in an L1 TLB entry by any access", + "A parity error was detected in an L2 TLB entry by any access", + "A parity error was detected in a PWC entry by any access", + "A parity error was detected in an STQ entry by any access", + "A parity error was detected in an LDQ entry by any access", + "A parity error was detected in a MAB entry by any access", + "A parity error was detected in an SCB entry state field by any access", + "A parity error was detected in an SCB entry address field by any access", + "A parity error was detected in an SCB entry data field by any access", + "A parity error was detected in a WCB entry by any access", + "A poisoned line was detected in an SCB entry by any access", + "A SystemReadDataError error was reported on read data returned from L2 for a load", + "A SystemReadDataError error was reported on read data returned from L2 for an SCB store", + "A SystemReadDataError error was reported on read data returned from L2 for a WCB store", + "A hardware assertion error was reported", + "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access", +}; + static const char * const smca_if_mce_desc[] = { "Op Cache Microtag Probe Port Parity Error", "IC Microtag or Full Tag Multi-hit Error", @@ -378,6 +405,7 @@ struct smca_mce_desc { static struct smca_mce_desc smca_mce_descs[] = { [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) }, + [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) }, [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) }, [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) }, [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) }, @@ -975,7 +1003,7 @@ static void decode_smca_error(struct mce *m) } if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) - decode_dram_ecc(cpu_to_node(m->extcpu), m); + decode_dram_ecc(topology_die_id(m->extcpu), m); } static inline void amd_decode_err_code(u16 ec) @@ -1042,6 +1070,9 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) if (ignore_mce(m)) return NOTIFY_STOP; + if (m->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; + pr_emerg(HW_ERR "%s\n", decode_error_status(m)); pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s", @@ -1141,7 +1172,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) err_code: amd_decode_err_code(m->status & 0xffff); - return NOTIFY_STOP; + m->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block amd_mce_dec_nb = { @@ -1161,6 +1193,11 @@ static int __init mce_amd_init(void) if (!fam_ops) return -ENOMEM; + if (boot_cpu_has(X86_FEATURE_SMCA)) { + xec_mask = 0x3f; + goto out; + } + switch (c->x86) { case 0xf: fam_ops->mc0_mce = k8_mc0_mce; @@ -1209,11 +1246,8 @@ static int __init mce_amd_init(void) case 0x17: case 0x18: - xec_mask = 0x3f; - if (!boot_cpu_has(X86_FEATURE_SMCA)) { - printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n"); - goto err_out; - } + pr_warn("Decoding supported only on Scalable MCA processors.\n"); + goto err_out; break; default: @@ -1221,6 +1255,7 @@ static int __init mce_amd_init(void) goto err_out; } +out: pr_info("MCE: In-kernel MCE decoding enabled.\n"); mce_register_decode_chain(&amd_mce_dec_nb); diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index b1193be1ef1d..16d4c83967d1 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, u32 optypenum = GET_BITFIELD(m->status, 4, 6); int rc; - tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) : + tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) : HW_EVENT_ERR_CORRECTED; /* @@ -1400,7 +1400,7 @@ static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, vo return NOTIFY_DONE; mci = pnd2_mci; - if (!mci) + if (!mci || (mce->kflags & MCE_HANDLED_CEC)) return NOTIFY_DONE; /* @@ -1429,7 +1429,8 @@ static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, vo pnd2_mce_output_error(mci, mce, &daddr); /* Advice mcelog that the error were handled */ - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block pnd2_mce_dec = { diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f743502ca9b7..b14663fc808f 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -254,18 +254,20 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { * FIXME: Implement the error count reads directly */ -static const u32 correrrcnt[] = { - 0x104, 0x108, 0x10c, 0x110, -}; - #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) +#if 0 /* Currently unused*/ +static const u32 correrrcnt[] = { + 0x104, 0x108, 0x10c, 0x110, +}; + static const u32 correrrthrsld[] = { 0x11c, 0x120, 0x124, 0x128, }; +#endif #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) @@ -1340,7 +1342,7 @@ static void knl_show_mc_route(u32 reg, char *s) */ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) { - u64 sad_base, sad_size, sad_limit = 0; + u64 sad_base, sad_limit = 0; u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace; int sad_rule = 0; int tad_rule = 0; @@ -1427,7 +1429,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) edram_only = KNL_EDRAM_ONLY(dram_rule); sad_limit = pvt->info.sad_limit(dram_rule)+1; - sad_size = sad_limit - sad_base; pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[sad_rule], &interleave_reg); @@ -2952,7 +2953,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, struct mem_ctl_info *new_mci; struct sbridge_pvt *pvt = mci->pvt_info; enum hw_event_mc_err_type tp_event; - char *type, *optype, msg[256]; + char *optype, msg[256]; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); @@ -2981,14 +2982,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) { - type = "FATAL"; - tp_event = HW_EVENT_ERR_FATAL; - } else { - type = "NON_FATAL"; tp_event = HW_EVENT_ERR_UNCORRECTED; + } else { + tp_event = HW_EVENT_ERR_FATAL; } } else { - type = "CORRECTED"; tp_event = HW_EVENT_ERR_CORRECTED; } @@ -3138,6 +3136,8 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, if (edac_get_report_status() == EDAC_REPORTING_DISABLED) return NOTIFY_DONE; + if (mce->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; /* * Just let mcelog handle it if the error is @@ -3185,7 +3185,8 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, sbridge_mce_output_error(mci, mce); /* Advice mcelog that the error were handled */ - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block sbridge_mce_dec = { @@ -3200,7 +3201,6 @@ static struct notifier_block sbridge_mce_dec = { static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) { struct mem_ctl_info *mci = sbridge_dev->mci; - struct sbridge_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); @@ -3209,8 +3209,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) return; } - pvt = mci->pvt_info; - edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &sbridge_dev->pdev[0]->dev); diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c index 0fcf3785e8f3..49899108cb00 100644 --- a/drivers/edac/skx_base.c +++ b/drivers/edac/skx_base.c @@ -46,7 +46,8 @@ static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx) } enum munittype { - CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD + CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD, + ERRCHAN0, ERRCHAN1, ERRCHAN2, }; struct munit { @@ -68,6 +69,9 @@ static const struct munit skx_all_munits[] = { { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, + { 0x2043, { PCI_DEVFN(10, 3), PCI_DEVFN(12, 3) }, 2, 2, ERRCHAN0 }, + { 0x2047, { PCI_DEVFN(10, 7), PCI_DEVFN(12, 7) }, 2, 2, ERRCHAN1 }, + { 0x204b, { PCI_DEVFN(11, 3), PCI_DEVFN(13, 3) }, 2, 2, ERRCHAN2 }, { 0x208e, { }, 1, 0, SAD }, { } }; @@ -104,10 +108,18 @@ static int get_all_munits(const struct munit *m) } switch (m->mtype) { - case CHAN0: case CHAN1: case CHAN2: + case CHAN0: + case CHAN1: + case CHAN2: pci_dev_get(pdev); d->imc[i].chan[m->mtype].cdev = pdev; break; + case ERRCHAN0: + case ERRCHAN1: + case ERRCHAN2: + pci_dev_get(pdev); + d->imc[i].chan[m->mtype - ERRCHAN0].edev = pdev; + break; case SAD_ALL: pci_dev_get(pdev); d->sad_all = pdev; @@ -145,33 +157,35 @@ fail: return -ENODEV; } +static struct res_config skx_cfg = { + .type = SKX, + .decs_did = 0x2016, + .busno_cfg_offset = 0xcc, +}; + static const struct x86_cpu_id skx_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, (kernel_ulong_t)&skx_cfg}, { } }; MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); -#define SKX_GET_MTMTR(dev, reg) \ - pci_read_config_dword((dev), 0x87c, &(reg)) - -static bool skx_check_ecc(struct pci_dev *pdev) +static bool skx_check_ecc(u32 mcmtr) { - u32 mtmtr; - - SKX_GET_MTMTR(pdev, mtmtr); - - return !!GET_BITFIELD(mtmtr, 2, 2); + return !!GET_BITFIELD(mcmtr, 2, 2); } static int skx_get_dimm_config(struct mem_ctl_info *mci) { struct skx_pvt *pvt = mci->pvt_info; + u32 mtr, mcmtr, amap, mcddrtcfg; struct skx_imc *imc = pvt->imc; - u32 mtr, amap, mcddrtcfg; struct dimm_info *dimm; int i, j; int ndimms; + /* Only the mcmtr on the first channel is effective */ + pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr); + for (i = 0; i < SKX_NUM_CHANNELS; i++) { ndimms = 0; pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); @@ -182,14 +196,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci) pci_read_config_dword(imc->chan[i].cdev, 0x80 + 4 * j, &mtr); if (IS_DIMM_PRESENT(mtr)) { - ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j); + ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j); } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) { ndimms += skx_get_nvdimm_info(dimm, imc, i, j, EDAC_MOD_STR); nvdimm_count++; } } - if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { + if (ndimms && !skx_check_ecc(mcmtr)) { skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); return -ENODEV; } @@ -216,6 +230,39 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci) #define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) #define SKX_ILV_TARGET(tgt) ((tgt) & 7) +static void skx_show_retry_rd_err_log(struct decoded_addr *res, + char *msg, int len) +{ + u32 log0, log1, log2, log3, log4; + u32 corr0, corr1, corr2, corr3; + struct pci_dev *edev; + int n; + + edev = res->dev->imc[res->imc].chan[res->channel].edev; + + pci_read_config_dword(edev, 0x154, &log0); + pci_read_config_dword(edev, 0x148, &log1); + pci_read_config_dword(edev, 0x150, &log2); + pci_read_config_dword(edev, 0x15c, &log3); + pci_read_config_dword(edev, 0x114, &log4); + + n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x]", + log0, log1, log2, log3, log4); + + pci_read_config_dword(edev, 0x104, &corr0); + pci_read_config_dword(edev, 0x108, &corr1); + pci_read_config_dword(edev, 0x10c, &corr2); + pci_read_config_dword(edev, 0x110, &corr3); + + if (len - n > 0) + snprintf(msg + n, len - n, + " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]", + corr0 & 0xffff, corr0 >> 16, + corr1 & 0xffff, corr1 >> 16, + corr2 & 0xffff, corr2 >> 16, + corr3 & 0xffff, corr3 >> 16); +} + static bool skx_sad_decode(struct decoded_addr *res) { struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list); @@ -597,6 +644,7 @@ static inline void teardown_skx_debug(void) {} static int __init skx_init(void) { const struct x86_cpu_id *id; + struct res_config *cfg; const struct munit *m; const char *owner; int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8}; @@ -613,11 +661,13 @@ static int __init skx_init(void) if (!id) return -ENODEV; + cfg = (struct res_config *)id->driver_data; + rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm); if (rc) return rc; - rc = skx_get_all_bus_mappings(0x2016, 0xcc, SKX, &skx_edac_list); + rc = skx_get_all_bus_mappings(cfg, &skx_edac_list); if (rc < 0) goto fail; if (rc == 0) { @@ -659,7 +709,7 @@ static int __init skx_init(void) } } - skx_set_decode(skx_decode); + skx_set_decode(skx_decode, skx_show_retry_rd_err_log); if (nvdimm_count && skx_adxl_get() == -ENODEV) skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n"); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index a04349c6d17e..8ceb993295ce 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -37,6 +37,7 @@ static char *adxl_msg; static char skx_msg[MSG_SIZE]; static skx_decode_f skx_decode; +static skx_show_retry_log_f skx_show_retry_rd_err_log; static u64 skx_tolm, skx_tohm; static LIST_HEAD(dev_edac_list); @@ -100,6 +101,7 @@ void __exit skx_adxl_put(void) static bool skx_adxl_decode(struct decoded_addr *res) { + struct skx_dev *d; int i, len = 0; if (res->addr >= skx_tohm || (res->addr >= skx_tolm && @@ -118,6 +120,24 @@ static bool skx_adxl_decode(struct decoded_addr *res) res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]]; res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]]; + if (res->imc > NUM_IMC - 1) { + skx_printk(KERN_ERR, "Bad imc %d\n", res->imc); + return false; + } + + list_for_each_entry(d, &dev_edac_list, list) { + if (d->imc[0].src_id == res->socket) { + res->dev = d; + break; + } + } + + if (!res->dev) { + skx_printk(KERN_ERR, "No device for src_id %d imc %d\n", + res->socket, res->imc); + return false; + } + for (i = 0; i < adxl_component_count; i++) { if (adxl_values[i] == ~0x0ull) continue; @@ -131,9 +151,10 @@ static bool skx_adxl_decode(struct decoded_addr *res) return true; } -void skx_set_decode(skx_decode_f decode) +void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log) { skx_decode = decode; + skx_show_retry_rd_err_log = show_retry_log; } int skx_get_src_id(struct skx_dev *d, int off, u8 *id) @@ -176,12 +197,11 @@ static int get_width(u32 mtr) } /* - * We use the per-socket device @did to count how many sockets are present, + * We use the per-socket device @cfg->did to count how many sockets are present, * and to detemine which PCI buses are associated with each socket. Allocate * and build the full list of all the skx_dev structures that we need here. */ -int skx_get_all_bus_mappings(unsigned int did, int off, enum type type, - struct list_head **list) +int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) { struct pci_dev *pdev, *prev; struct skx_dev *d; @@ -190,7 +210,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type, prev = NULL; for (;;) { - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, prev); + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, cfg->decs_did, prev); if (!pdev) break; ndev++; @@ -200,7 +220,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type, return -ENOMEM; } - if (pci_read_config_dword(pdev, off, ®)) { + if (pci_read_config_dword(pdev, cfg->busno_cfg_offset, ®)) { kfree(d); pci_dev_put(pdev); skx_printk(KERN_ERR, "Failed to read bus idx\n"); @@ -209,7 +229,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type type, d->bus[0] = GET_BITFIELD(reg, 0, 7); d->bus[1] = GET_BITFIELD(reg, 8, 15); - if (type == SKX) { + if (cfg->type == SKX) { d->seg = pci_domain_nr(pdev->bus); d->bus[2] = GET_BITFIELD(reg, 16, 23); d->bus[3] = GET_BITFIELD(reg, 24, 31); @@ -283,7 +303,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, #define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows") #define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols") -int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, +int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, struct skx_imc *imc, int chan, int dimmno) { int banks = 16, ranks, rows, cols, npages; @@ -303,8 +323,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, imc->mc, chan, dimmno, size, npages, banks, 1 << ranks, rows, cols); - imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); - imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); + imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0); + imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9); imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); imc->chan[chan].dimms[dimmno].rowbits = rows; imc->chan[chan].dimms[dimmno].colbits = cols; @@ -452,34 +472,17 @@ static void skx_unregister_mci(struct skx_imc *imc) edac_mc_free(mci); } -static struct mem_ctl_info *get_mci(int src_id, int lmc) -{ - struct skx_dev *d; - - if (lmc > NUM_IMC - 1) { - skx_printk(KERN_ERR, "Bad lmc %d\n", lmc); - return NULL; - } - - list_for_each_entry(d, &dev_edac_list, list) { - if (d->imc[0].src_id == src_id) - return d->imc[lmc].mci; - } - - skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc); - return NULL; -} - static void skx_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, struct decoded_addr *res) { enum hw_event_mc_err_type tp_event; - char *type, *optype; + char *optype; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); bool recoverable; + int len; u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); u32 mscod = GET_BITFIELD(m->status, 16, 31); u32 errcode = GET_BITFIELD(m->status, 0, 15); @@ -490,14 +493,11 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) { - type = "FATAL"; - tp_event = HW_EVENT_ERR_FATAL; - } else { - type = "NON_FATAL"; tp_event = HW_EVENT_ERR_UNCORRECTED; + } else { + tp_event = HW_EVENT_ERR_FATAL; } } else { - type = "CORRECTED"; tp_event = HW_EVENT_ERR_CORRECTED; } @@ -539,12 +539,12 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, } } if (adxl_component_count) { - snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", + len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", overflow ? " OVERFLOW" : "", (uncorrected_error && recoverable) ? " recoverable" : "", mscod, errcode, adxl_msg); } else { - snprintf(skx_msg, MSG_SIZE, + len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x", overflow ? " OVERFLOW" : "", (uncorrected_error && recoverable) ? " recoverable" : "", @@ -553,6 +553,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, res->bank_group, res->bank_address, res->row, res->column); } + if (skx_show_retry_rd_err_log) + skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len); + edac_dbg(0, "%s\n", skx_msg); /* Call the helper to output message */ @@ -573,6 +576,9 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, if (edac_get_report_status() == EDAC_REPORTING_DISABLED) return NOTIFY_DONE; + if (mce->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; + /* ignore unless this is memory related with an address */ if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) return NOTIFY_DONE; @@ -583,15 +589,12 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, if (adxl_component_count) { if (!skx_adxl_decode(&res)) return NOTIFY_DONE; - - mci = get_mci(res.socket, res.imc); - } else { - if (!skx_decode || !skx_decode(&res)) - return NOTIFY_DONE; - - mci = res.dev->imc[res.imc].mci; + } else if (!skx_decode || !skx_decode(&res)) { + return NOTIFY_DONE; } + mci = res.dev->imc[res.imc].mci; + if (!mci) return NOTIFY_DONE; @@ -615,6 +618,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, skx_mce_output_error(mci, mce, &res); + mce->kflags |= MCE_HANDLED_EDAC; return NOTIFY_DONE; } diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index 08cc971a50ea..78f8c1de0b71 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -64,6 +64,7 @@ struct skx_dev { u8 src_id, node_id; struct skx_channel { struct pci_dev *cdev; + struct pci_dev *edev; struct skx_dimm { u8 close_pg; u8 bank_xor_enable; @@ -111,22 +112,30 @@ struct decoded_addr { int bank_group; }; +struct res_config { + enum type type; + /* Configuration agent device ID */ + unsigned int decs_did; + /* Default bus number configuration register offset */ + int busno_cfg_offset; +}; + typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci); typedef bool (*skx_decode_f)(struct decoded_addr *res); +typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len); int __init skx_adxl_get(void); void __exit skx_adxl_put(void); -void skx_set_decode(skx_decode_f decode); +void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); int skx_get_src_id(struct skx_dev *d, int off, u8 *id); int skx_get_node_id(struct skx_dev *d, u8 *id); -int skx_get_all_bus_mappings(unsigned int did, int off, enum type, - struct list_head **list); +int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list); int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm); -int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, +int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, struct skx_imc *imc, int chan, int dimmno); int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c index 6ac26d1b929f..324768946743 100644 --- a/drivers/edac/ti_edac.c +++ b/drivers/edac/ti_edac.c @@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev) /* add EMIF ECC error handler */ error_irq = platform_get_irq(pdev, 0); - if (!error_irq) { + if (error_irq < 0) { + ret = error_irq; edac_printk(KERN_ERR, EDAC_MOD_NAME, "EMIF irq number not defined.\n"); goto err; diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c index ad02dc6747a4..0317b614b680 100644 --- a/drivers/extcon/extcon-adc-jack.c +++ b/drivers/extcon/extcon-adc-jack.c @@ -124,7 +124,7 @@ static int adc_jack_probe(struct platform_device *pdev) for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++); data->num_conditions = i; - data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); + data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel); if (IS_ERR(data->chan)) return PTR_ERR(data->chan); @@ -164,7 +164,6 @@ static int adc_jack_remove(struct platform_device *pdev) free_irq(data->irq, data); cancel_work_sync(&data->handler.work); - iio_channel_release(data->chan); return 0; } diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index e970134c95fa..9a6103ac7118 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c @@ -597,7 +597,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) struct arizona *arizona = info->arizona; int id_gpio = arizona->pdata.hpdet_id_gpio; unsigned int report = EXTCON_JACK_HEADPHONE; - int ret, reading; + int ret, reading, state; bool mic = false; mutex_lock(&info->lock); @@ -610,12 +610,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) } /* If the cable was removed while measuring ignore the result */ - ret = extcon_get_state(info->edev, EXTCON_MECHANICAL); - if (ret < 0) { - dev_err(arizona->dev, "Failed to check cable state: %d\n", - ret); + state = extcon_get_state(info->edev, EXTCON_MECHANICAL); + if (state < 0) { + dev_err(arizona->dev, "Failed to check cable state: %d\n", state); goto out; - } else if (!ret) { + } else if (!state) { dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n"); goto done; } @@ -668,7 +667,7 @@ done: ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); /* If we have a mic then reenable MICDET */ - if (mic || info->mic) + if (state && (mic || info->mic)) arizona_start_mic(info); if (info->hpdet_active) { @@ -676,7 +675,9 @@ done: info->hpdet_active = false; } - info->hpdet_done = true; + /* Do not set hp_det done when the cable has been unplugged */ + if (state) + info->hpdet_done = true; out: mutex_unlock(&info->lock); @@ -1724,25 +1725,6 @@ static int arizona_extcon_remove(struct platform_device *pdev) bool change; int ret; - ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1, - ARIZONA_MICD_ENA, 0, - &change); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n", - ret); - } else if (change) { - regulator_disable(info->micvdd); - pm_runtime_put(info->dev); - } - - gpiod_put(info->micd_pol_gpio); - - pm_runtime_disable(&pdev->dev); - - regmap_update_bits(arizona->regmap, - ARIZONA_MICD_CLAMP_CONTROL, - ARIZONA_MICD_CLAMP_MODE_MASK, 0); - if (info->micd_clamp) { jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE; jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL; @@ -1758,10 +1740,31 @@ static int arizona_extcon_remove(struct platform_device *pdev) arizona_free_irq(arizona, jack_irq_rise, info); arizona_free_irq(arizona, jack_irq_fall, info); cancel_delayed_work_sync(&info->hpdet_work); + cancel_delayed_work_sync(&info->micd_detect_work); + cancel_delayed_work_sync(&info->micd_timeout_work); + + ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1, + ARIZONA_MICD_ENA, 0, + &change); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n", + ret); + } else if (change) { + regulator_disable(info->micvdd); + pm_runtime_put(info->dev); + } + + regmap_update_bits(arizona->regmap, + ARIZONA_MICD_CLAMP_CONTROL, + ARIZONA_MICD_CLAMP_MODE_MASK, 0); regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE, ARIZONA_JD1_ENA, 0); arizona_clk32k_disable(arizona); + gpiod_put(info->micd_pol_gpio); + + pm_runtime_disable(&pdev->dev); + return 0; } diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index 32fc5a66ffa9..26c7041f7069 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c @@ -1277,4 +1277,4 @@ module_platform_driver(max77693_muic_driver); MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver"); MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:extcon-max77693"); +MODULE_ALIAS("platform:max77693-muic"); diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c index d1c997599390..5f5252752644 100644 --- a/drivers/extcon/extcon-ptn5150.c +++ b/drivers/extcon/extcon-ptn5150.c @@ -127,7 +127,7 @@ static void ptn5150_irq_work(struct work_struct *work) case PTN5150_DFP_ATTACHED: extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false); - gpiod_set_value(info->vbus_gpiod, 0); + gpiod_set_value_cansleep(info->vbus_gpiod, 0); extcon_set_state_sync(info->edev, EXTCON_USB, true); break; @@ -138,9 +138,9 @@ static void ptn5150_irq_work(struct work_struct *work) PTN5150_REG_CC_VBUS_DETECTION_MASK) >> PTN5150_REG_CC_VBUS_DETECTION_SHIFT); if (vbus) - gpiod_set_value(info->vbus_gpiod, 0); + gpiod_set_value_cansleep(info->vbus_gpiod, 0); else - gpiod_set_value(info->vbus_gpiod, 1); + gpiod_set_value_cansleep(info->vbus_gpiod, 1); extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true); @@ -156,7 +156,7 @@ static void ptn5150_irq_work(struct work_struct *work) EXTCON_USB_HOST, false); extcon_set_state_sync(info->edev, EXTCON_USB, false); - gpiod_set_value(info->vbus_gpiod, 0); + gpiod_set_value_cansleep(info->vbus_gpiod, 0); } } diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index e055893fd5c3..5c9e156cd086 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev) sizeof(*edev->nh), GFP_KERNEL); if (!edev->nh) { ret = -ENOMEM; + device_unregister(&edev->dev); goto err_dev; } diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 0cc746673677..9ee747a85ee4 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct client *client = file->private_data; spinlock_t *client_list_lock = &client->lynx->client_list_lock; struct nosy_stats stats; + int ret; switch (cmd) { case NOSY_IOC_GET_STATS: @@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; case NOSY_IOC_START: + ret = -EBUSY; spin_lock_irq(client_list_lock); - list_add_tail(&client->link, &client->lynx->client_list); + if (list_empty(&client->link)) { + list_add_tail(&client->link, &client->lynx->client_list); + ret = 0; + } spin_unlock_irq(client_list_lock); - return 0; + return ret; case NOSY_IOC_STOP: spin_lock_irq(client_list_lock); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index e40a77bfe821..7dfbd0f6b76b 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU config QCOM_SCM bool depends on ARM || ARM64 + depends on HAVE_ARM_SMCCC select RESET_CONTROLLER config QCOM_SCM_32 diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index f804e8af6521..f986ee8919f0 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -173,6 +173,8 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle, protocols_imp[tot_num_ret + loop] = *(list + loop); tot_num_ret += loop_num_ret; + + scmi_reset_rx_to_maxsz(handle, t); } while (loop_num_ret); scmi_xfer_put(handle, t); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 32526a793f3a..38400a8d0ca8 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -177,6 +177,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, } tot_rate_cnt += num_returned; + + scmi_reset_rx_to_maxsz(handle, t); /* * check for both returned and remaining to avoid infinite * loop due to buggy firmware diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 5237c2ff79fe..9a680b9af9e5 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -103,6 +103,8 @@ int scmi_do_xfer_with_response(const struct scmi_handle *h, struct scmi_xfer *xfer); int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, size_t tx_size, size_t rx_size, struct scmi_xfer **p); +void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle, + struct scmi_xfer *xfer); int scmi_handle_put(const struct scmi_handle *handle); struct scmi_handle *scmi_handle_get(struct device *dev); void scmi_set_handle(struct scmi_device *scmi_dev); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 3eb0382491ce..11078199abed 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -481,6 +481,14 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) return ret; } +void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle, + struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(handle); + + xfer->rx.len = info->desc->max_msg_size; +} + #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) /** diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 601af4edad5e..129a2887e964 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -281,6 +281,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain, } tot_opp_cnt += num_returned; + + scmi_reset_rx_to_maxsz(handle, t); /* * check for both returned and remaining to avoid infinite * loop due to buggy firmware diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index ab42c21c5517..6d223f345b6c 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -35,9 +35,7 @@ struct scmi_msg_reset_domain_reset { #define EXPLICIT_RESET_ASSERT BIT(1) #define ASYNCHRONOUS_RESET BIT(2) __le32 reset_state; -#define ARCH_RESET_TYPE BIT(31) -#define COLD_RESET_STATE BIT(0) -#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE) +#define ARCH_COLD_RESET 0 }; struct reset_dom_info { diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 87f737e01473..041f8152272b 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) for (i = 0; i < num_domains; i++, scmi_pd++) { u32 state; - domains[i] = &scmi_pd->genpd; + if (handle->power_ops->state_get(handle, i, &state)) { + dev_warn(dev, "failed to get state for domain %d\n", i); + continue; + } scmi_pd->domain = i; scmi_pd->handle = handle; @@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd->genpd.power_off = scmi_pd_power_off; scmi_pd->genpd.power_on = scmi_pd_power_on; - if (handle->power_ops->state_get(handle, i, &state)) { - dev_warn(dev, "failed to get state for domain %d\n", i); - continue; - } - pm_genpd_init(&scmi_pd->genpd, NULL, state == SCMI_POWER_STATE_GENERIC_OFF); + + domains[i] = &scmi_pd->genpd; } scmi_pd_data->domains = domains; diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index a400ea805fc2..931208bc48f1 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -154,6 +154,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle, } desc_index += num_returned; + + scmi_reset_rx_to_maxsz(handle, t); /* * check for both returned and remaining to avoid infinite * loop due to buggy firmware diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 9cd70d1a5622..e497785cd99f 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -412,14 +412,19 @@ int sdei_event_enable(u32 event_num) return -ENOENT; } - spin_lock(&sdei_list_lock); - event->reenable = true; - spin_unlock(&sdei_list_lock); + cpus_read_lock(); if (event->type == SDEI_EVENT_TYPE_SHARED) err = sdei_api_event_enable(event->event_num); else err = sdei_do_cross_call(_local_event_enable, event); + + if (!err) { + spin_lock(&sdei_list_lock); + event->reenable = true; + spin_unlock(&sdei_list_lock); + } + cpus_read_unlock(); mutex_unlock(&sdei_events_lock); return err; @@ -491,11 +496,6 @@ static int _sdei_event_unregister(struct sdei_event *event) { lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_unregister(event->event_num); @@ -518,6 +518,11 @@ int sdei_event_unregister(u32 event_num) break; } + spin_lock(&sdei_list_lock); + event->reregister = false; + event->reenable = false; + spin_unlock(&sdei_list_lock); + err = _sdei_event_unregister(event); if (err) break; @@ -585,26 +590,15 @@ static int _sdei_event_register(struct sdei_event *event) lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_register(event->event_num, sdei_entry_point, event->registered, SDEI_EVENT_REGISTER_RM_ANY, 0); - err = sdei_do_cross_call(_local_event_register, event); - if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - + if (err) sdei_do_cross_call(_local_event_unregister, event); - } return err; } @@ -632,12 +626,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; } + cpus_read_lock(); err = _sdei_event_register(event); if (err) { sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); + } else { + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); } + cpus_read_unlock(); } while (0); mutex_unlock(&sdei_events_lock); diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index b248870a9806..3222645c95b3 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -216,6 +216,17 @@ config EFI_DEV_PATH_PARSER config EFI_EARLYCON def_bool y - depends on SERIAL_EARLYCON && !ARM && !IA64 + depends on EFI && SERIAL_EARLYCON && !ARM && !IA64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT + +config EFI_CUSTOM_SSDT_OVERLAYS + bool "Load custom ACPI SSDT overlay from an EFI variable" + depends on EFI_VARS && ACPI + default ACPI_TABLE_UPGRADE + help + Allow loading of an ACPI SSDT overlay from an EFI variable specified + by a kernel command line option. + + See Documentation/admin-guide/acpi/ssdt-overlays.rst for more + information. diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index ad8a4bc074fb..3a2b60736915 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -217,7 +217,7 @@ static void generic_ops_unregister(void) efivars_unregister(&generic_efivars); } -#if IS_ENABLED(CONFIG_ACPI) +#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS #define EFIVAR_SSDT_NAME_MAX 16 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; static int __init efivar_ssdt_setup(char *str) @@ -345,6 +345,7 @@ static int __init efisubsys_init(void) efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { pr_err("efi: Firmware registration failed.\n"); + destroy_workqueue(efi_rts_wq); return -ENOMEM; } @@ -381,6 +382,7 @@ err_unregister: generic_ops_unregister(); err_put: kobject_put(efi_kobj); + destroy_workqueue(efi_rts_wq); return error; } @@ -562,7 +564,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, } } - if (efi_enabled(EFI_MEMMAP)) + if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) efi_memattr_init(); efi_tpm_eventlog_init(); @@ -1004,7 +1006,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) } /* first try to find a slot in an existing linked list entry */ - for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { + for (prsv = efi_memreserve_root->next; prsv; ) { rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); if (index < rsv->size) { @@ -1014,6 +1016,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) memunmap(rsv); return efi_mem_reserve_iomem(addr, size); } + prsv = rsv->next; memunmap(rsv); } diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index aff3dfb4d7ba..d187585db97a 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -522,8 +522,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, NULL, "%s", short_name); kfree(short_name); - if (ret) + if (ret) { + kobject_put(&new_var->kobj); return ret; + } kobject_uevent(&new_var->kobj, KOBJ_ADD); if (efivar_entry_add(new_var, &efivar_sysfs_list)) { diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index d6dd5f503fa2..e8f71a50ba89 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, "entry%d", entry_num); if (rc) { - kfree(entry); + kobject_put(&entry->kobj); return rc; } } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index ee0661ddb25b..8c5b5529dbc0 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,6 +28,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) \ + $(call cc-option,-fno-addrsig) \ -D__DISABLE_EXPORTS GCOV_PROFILE := n diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index eb9af83e4d59..aeeb1b2d8ede 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -64,7 +64,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) efi_status_t status; efi_physical_addr_t log_location = 0, log_last_entry = 0; struct linux_efi_tpm_eventlog *log_tbl = NULL; - struct efi_tcg2_final_events_table *final_events_table; + struct efi_tcg2_final_events_table *final_events_table = NULL; unsigned long first_entry_addr, last_entry_addr; size_t log_size, last_entry_size; efi_bool_t truncated; @@ -140,7 +140,8 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) * Figure out whether any events have already been logged to the * final events structure, and if so how much space they take up */ - final_events_table = get_efi_config_table(sys_table_arg, + if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) + final_events_table = get_efi_config_table(sys_table_arg, LINUX_EFI_TPM_FINAL_LOG_GUID); if (final_events_table && final_events_table->nr_events) { struct tcg_pcr_event2_head *header; diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 31f9f0e369b9..c1955d320fec 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -16,7 +16,7 @@ int efi_tpm_final_log_size; EXPORT_SYMBOL(efi_tpm_final_log_size); -static int tpm2_calc_event_log_size(void *data, int count, void *size_info) +static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info) { struct tcg_pcr_event2_head *header; int event_size, size = 0; @@ -62,8 +62,11 @@ int __init efi_tpm_eventlog_init(void) tbl_size = sizeof(*log_tbl) + log_tbl->size; memblock_reserve(efi.tpm_log, tbl_size); - if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) + if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || + log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { + pr_warn(FW_BUG "TPM Final Events table missing or invalid\n"); goto out; + } final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl)); diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 0dbee32da4c6..5d995fe64b5c 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -13,6 +13,7 @@ config IMX_DSP config IMX_SCU bool "IMX SCU Protocol driver" depends on IMX_MBOX + select SOC_BUS help The System Controller Firmware (SCFW) is a low-level system function which runs on a dedicated Cortex-M core to provide power, clock, and diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 35a5f8f8eea5..a3b11bc71dcb 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -38,6 +38,7 @@ struct imx_sc_ipc { struct device *dev; struct mutex lock; struct completion done; + bool fast_ipc; /* temporarily store the SCU msg */ u32 *msg; @@ -115,6 +116,26 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg) struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc; struct imx_sc_rpc_msg *hdr; u32 *data = msg; + int i; + + if (!sc_ipc->msg) { + dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n", + sc_chan->idx, *data); + return; + } + + if (sc_ipc->fast_ipc) { + hdr = msg; + sc_ipc->rx_size = hdr->size; + sc_ipc->msg[0] = *data++; + + for (i = 1; i < sc_ipc->rx_size; i++) + sc_ipc->msg[i] = *data++; + + complete(&sc_ipc->done); + + return; + } if (sc_chan->idx == 0) { hdr = msg; @@ -137,20 +158,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg) static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) { - struct imx_sc_rpc_msg *hdr = msg; + struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg; struct imx_sc_chan *sc_chan; u32 *data = msg; int ret; + int size; int i; /* Check size */ - if (hdr->size > IMX_SC_RPC_MAX_MSG) + if (hdr.size > IMX_SC_RPC_MAX_MSG) return -EINVAL; - dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc, - hdr->func, hdr->size); + dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc, + hdr.func, hdr.size); - for (i = 0; i < hdr->size; i++) { + size = sc_ipc->fast_ipc ? 1 : hdr.size; + for (i = 0; i < size; i++) { sc_chan = &sc_ipc->chans[i % 4]; /* @@ -162,8 +185,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) * Wait for tx_done before every send to ensure that no * queueing happens at the mailbox channel level. */ - wait_for_completion(&sc_chan->tx_done); - reinit_completion(&sc_chan->tx_done); + if (!sc_ipc->fast_ipc) { + wait_for_completion(&sc_chan->tx_done); + reinit_completion(&sc_chan->tx_done); + } ret = mbox_send_message(sc_chan->ch, &data[i]); if (ret < 0) @@ -187,7 +212,8 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp) mutex_lock(&sc_ipc->lock); reinit_completion(&sc_ipc->done); - sc_ipc->msg = msg; + if (have_resp) + sc_ipc->msg = msg; sc_ipc->count = 0; ret = imx_scu_ipc_write(sc_ipc, msg); if (ret < 0) { @@ -209,6 +235,7 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp) } out: + sc_ipc->msg = NULL; mutex_unlock(&sc_ipc->lock); dev_dbg(sc_ipc->dev, "RPC SVC done\n"); @@ -224,6 +251,8 @@ static int imx_scu_probe(struct platform_device *pdev) struct imx_sc_chan *sc_chan; struct mbox_client *cl; char *chan_name; + struct of_phandle_args args; + int num_channel; int ret; int i; @@ -231,11 +260,20 @@ static int imx_scu_probe(struct platform_device *pdev) if (!sc_ipc) return -ENOMEM; - for (i = 0; i < SCU_MU_CHAN_NUM; i++) { - if (i < 4) + ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", + "#mbox-cells", 0, &args); + if (ret) + return ret; + + sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu"); + + num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM; + for (i = 0; i < num_channel; i++) { + if (i < num_channel / 2) chan_name = kasprintf(GFP_KERNEL, "tx%d", i); else - chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4); + chan_name = kasprintf(GFP_KERNEL, "rx%d", + i - num_channel / 2); if (!chan_name) return -ENOMEM; @@ -247,19 +285,22 @@ static int imx_scu_probe(struct platform_device *pdev) cl->knows_txdone = true; cl->rx_callback = imx_scu_rx_callback; - /* Initial tx_done completion as "done" */ - cl->tx_done = imx_scu_tx_done; - init_completion(&sc_chan->tx_done); - complete(&sc_chan->tx_done); + if (!sc_ipc->fast_ipc) { + /* Initial tx_done completion as "done" */ + cl->tx_done = imx_scu_tx_done; + init_completion(&sc_chan->tx_done); + complete(&sc_chan->tx_done); + } sc_chan->sc_ipc = sc_ipc; - sc_chan->idx = i % 4; + sc_chan->idx = i % (num_channel / 2); sc_chan->ch = mbox_request_channel_byname(cl, chan_name); if (IS_ERR(sc_chan->ch)) { ret = PTR_ERR(sc_chan->ch); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request mbox chan %s ret %d\n", chan_name, ret); + kfree(chan_name); return ret; } diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index 6a445397771c..03eb798ad3ed 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), GFP_KERNEL); - if (!cpu_groups) + if (!cpu_groups) { + free_cpumask_var(tmp); return -ENOMEM; + } cpumask_copy(tmp, cpu_online_mask); @@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) topology_core_cpumask(cpumask_any(tmp)); if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { + free_cpumask_var(tmp); free_cpu_groups(num_groups, &cpu_groups); return -ENOMEM; } diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 4802ab170fe5..b9fdc20b4eb9 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -9,7 +9,6 @@ #include <linux/init.h> #include <linux/cpumask.h> #include <linux/export.h> -#include <linux/dma-direct.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/types.h> @@ -441,8 +440,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, struct qcom_scm_mem_map_info *mem_to_map; phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; - phys_addr_t ptr_phys; - dma_addr_t ptr_dma; + dma_addr_t ptr_phys; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; @@ -459,10 +457,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); if (!ptr) return -ENOMEM; - ptr_phys = dma_to_phys(__scm->dev, ptr_dma); /* Fill source vmid detail */ src = ptr; @@ -490,7 +487,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d\n", ret); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8..6945c3c96637 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index 62f924489db5..5942343a5d6e 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -61,10 +61,10 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, region->pages); if (pinned < 0) { ret = pinned; - goto put_pages; + goto free_pages; } else if (pinned != npages) { ret = -EFAULT; - goto free_pages; + goto put_pages; } dev_dbg(dev, "%d pages pinned\n", pinned); diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index e4a34dc7947f..041d23469238 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev) * on this port and minimum soft reset pulse width has elapsed. * Driver polls port_soft_reset_ack to determine if reset done by HW. */ - if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST, + if (readq_poll_timeout(base + PORT_HDR_CTRL, v, + v & PORT_CTRL_SFTRST_ACK, RST_POLL_INVL, RST_POLL_TIMEOUT)) { dev_err(&pdev->dev, "timeout, fail to reset device\n"); return -ETIMEDOUT; diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 89ca292236ad..a78c409bf2c4 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) { struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); struct dfl_fpga_cdev *cdev = drvdata->cdev; - int ret = 0; if (!num_vfs) { /* @@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) dfl_fpga_cdev_config_ports_pf(cdev); } else { + int ret; + /* * before enable SRIOV, put released ports into VF access mode * first of all. @@ -248,11 +249,13 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) return ret; ret = pci_enable_sriov(pcidev, num_vfs); - if (ret) + if (ret) { dfl_fpga_cdev_config_ports_pf(cdev); + return ret; + } } - return ret; + return num_vfs; } static void cci_pci_remove(struct pci_dev *pcidev) diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index effed3a8d398..2ecb1d3e8eeb 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c @@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev) data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff", GPIOD_OUT_LOW); - if (IS_ERR(data->on_off)) + if (IS_ERR(data->on_off)) { + ret = PTR_ERR(data->on_off); goto err_put_device; + } if (data->on_off) { data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup", GPIOD_IN); - if (IS_ERR(data->wakeup)) + if (IS_ERR(data->wakeup)) { + ret = PTR_ERR(data->wakeup); goto err_put_device; + } ret = regulator_enable(data->vcc); if (ret) diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index 5640efe5e750..5bda38e0780f 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } @@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) if (ret < 0) { dev_err(chip->parent, "Failed to drop cache: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } ret = regmap_read(arizona->regmap, reg, &val); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->parent); return ret; + } pm_runtime_mark_last_busy(chip->parent); pm_runtime_put_autosuspend(chip->parent); @@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip, ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put(chip->parent); return ret; } } diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 09e53c5f3b0a..2820c59b5f07 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -1115,8 +1115,8 @@ static const struct aspeed_gpio_config ast2500_config = static const struct aspeed_bank_props ast2600_bank_props[] = { /* input output */ - {5, 0xffffffff, 0x0000ffff}, /* U/V/W/X */ - {6, 0xffff0000, 0x0fff0000}, /* Y/Z */ + {5, 0xffffffff, 0xffffff00}, /* U/V/W/X */ + {6, 0x0000ffff, 0x0000ffff}, /* Y/Z */ { }, }; diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 9fa6d3a967d2..100575973e1f 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -619,7 +619,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(kona_gpio->reg_base)) { - ret = -ENXIO; + ret = PTR_ERR(kona_gpio->reg_base); goto err_irq_domain; } diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 92e127e74813..ed6061b5cca1 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -49,7 +49,9 @@ #define GPIO_EXT_PORTC 0x58 #define GPIO_EXT_PORTD 0x5c +#define DWAPB_DRIVER_NAME "gpio-dwapb" #define DWAPB_MAX_PORTS 4 + #define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ #define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ #define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ @@ -398,7 +400,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, return; err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, - "gpio-dwapb", handle_level_irq, + DWAPB_DRIVER_NAME, handle_level_irq, IRQ_NOREQUEST, 0, IRQ_GC_INIT_NESTED_LOCK); if (err) { @@ -455,7 +457,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, */ err = devm_request_irq(gpio->dev, pp->irq[0], dwapb_irq_handler_mfd, - IRQF_SHARED, "gpio-dwapb-mfd", gpio); + IRQF_SHARED, DWAPB_DRIVER_NAME, gpio); if (err) { dev_err(gpio->dev, "error requesting IRQ\n"); irq_domain_remove(gpio->domain); @@ -533,26 +535,33 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio, dwapb_configure_irqs(gpio, port, pp); err = gpiochip_add_data(&port->gc, port); - if (err) + if (err) { dev_err(gpio->dev, "failed to register gpiochip for port%d\n", port->idx); - else - port->is_registered = true; + return err; + } /* Add GPIO-signaled ACPI event support */ - if (pp->has_irq) - acpi_gpiochip_request_interrupts(&port->gc); + acpi_gpiochip_request_interrupts(&port->gc); - return err; + port->is_registered = true; + + return 0; } static void dwapb_gpio_unregister(struct dwapb_gpio *gpio) { unsigned int m; - for (m = 0; m < gpio->nr_ports; ++m) - if (gpio->ports[m].is_registered) - gpiochip_remove(&gpio->ports[m].gc); + for (m = 0; m < gpio->nr_ports; ++m) { + struct dwapb_gpio_port *port = &gpio->ports[m]; + + if (!port->is_registered) + continue; + + acpi_gpiochip_free_interrupts(&port->gc); + gpiochip_remove(&port->gc); + } } static struct dwapb_platform_data * @@ -836,7 +845,7 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend, static struct platform_driver dwapb_gpio_driver = { .driver = { - .name = "gpio-dwapb", + .name = DWAPB_DRIVER_NAME, .pm = &dwapb_gpio_pm_ops, .of_match_table = of_match_ptr(dwapb_of_match), .acpi_match_table = ACPI_PTR(dwapb_acpi_match), @@ -850,3 +859,4 @@ module_platform_driver(dwapb_gpio_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver"); +MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME); diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index bb287f35cf40..a69b3faf51ef 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -569,6 +569,7 @@ static int sprd_eic_probe(struct platform_device *pdev) const struct sprd_eic_variant_data *pdata; struct gpio_irq_chip *irq; struct sprd_eic *sprd_eic; + struct resource *res; int ret, i; pdata = of_device_get_match_data(&pdev->dev); @@ -595,9 +596,13 @@ static int sprd_eic_probe(struct platform_device *pdev) * have one bank EIC, thus base[1] and base[2] can be * optional. */ - sprd_eic->base[i] = devm_platform_ioremap_resource(pdev, i); + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) + break; + + sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sprd_eic->base[i])) - continue; + return PTR_ERR(sprd_eic->base[i]); } sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type]; diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 226da8df6f10..94d9fa0d6aa7 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -25,6 +25,9 @@ /* Maximum value for gpio line identifiers */ #define EP93XX_GPIO_LINE_MAX 63 +/* Number of GPIO chips in EP93XX */ +#define EP93XX_GPIO_CHIP_NUM 8 + /* Maximum value for irq capable line identifiers */ #define EP93XX_GPIO_LINE_MAX_IRQ 23 @@ -34,74 +37,75 @@ */ #define EP93XX_GPIO_F_IRQ_BASE 80 +struct ep93xx_gpio_irq_chip { + struct irq_chip ic; + u8 irq_offset; + u8 int_unmasked; + u8 int_enabled; + u8 int_type1; + u8 int_type2; + u8 int_debounce; +}; + +struct ep93xx_gpio_chip { + struct gpio_chip gc; + struct ep93xx_gpio_irq_chip *eic; +}; + struct ep93xx_gpio { void __iomem *base; - struct gpio_chip gc[8]; + struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM]; }; +#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc) + +static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc) +{ + struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc); + + return egc->eic; +} + /************************************************************************* * Interrupt handling for EP93xx on-chip GPIOs *************************************************************************/ -static unsigned char gpio_int_unmasked[3]; -static unsigned char gpio_int_enabled[3]; -static unsigned char gpio_int_type1[3]; -static unsigned char gpio_int_type2[3]; -static unsigned char gpio_int_debounce[3]; +#define EP93XX_INT_TYPE1_OFFSET 0x00 +#define EP93XX_INT_TYPE2_OFFSET 0x04 +#define EP93XX_INT_EOI_OFFSET 0x08 +#define EP93XX_INT_EN_OFFSET 0x0c +#define EP93XX_INT_STATUS_OFFSET 0x10 +#define EP93XX_INT_RAW_STATUS_OFFSET 0x14 +#define EP93XX_INT_DEBOUNCE_OFFSET 0x18 -/* Port ordering is: A B F */ -static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c }; -static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 }; -static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; -static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; -static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; - -static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port) +static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, + struct ep93xx_gpio_irq_chip *eic) { - BUG_ON(port > 2); + writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET); - writeb_relaxed(0, epg->base + int_en_register_offset[port]); + writeb_relaxed(eic->int_type2, + epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET); - writeb_relaxed(gpio_int_type2[port], - epg->base + int_type2_register_offset[port]); + writeb_relaxed(eic->int_type1, + epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET); - writeb_relaxed(gpio_int_type1[port], - epg->base + int_type1_register_offset[port]); - - writeb(gpio_int_unmasked[port] & gpio_int_enabled[port], - epg->base + int_en_register_offset[port]); -} - -static int ep93xx_gpio_port(struct gpio_chip *gc) -{ - struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = 0; - - while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port]) - port++; - - /* This should not happen but is there as a last safeguard */ - if (port == ARRAY_SIZE(epg->gc)) { - pr_crit("can't find the GPIO port\n"); - return 0; - } - - return port; + writeb_relaxed(eic->int_unmasked & eic->int_enabled, + epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET); } static void ep93xx_gpio_int_debounce(struct gpio_chip *gc, unsigned int offset, bool enable) { struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); int port_mask = BIT(offset); if (enable) - gpio_int_debounce[port] |= port_mask; + eic->int_debounce |= port_mask; else - gpio_int_debounce[port] &= ~port_mask; + eic->int_debounce &= ~port_mask; - writeb(gpio_int_debounce[port], - epg->base + int_debounce_register_offset[port]); + writeb(eic->int_debounce, + epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET); } static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) @@ -122,12 +126,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) */ stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS); for_each_set_bit(offset, &stat, 8) - generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain, + generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain, offset)); stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS); for_each_set_bit(offset, &stat, 8) - generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain, + generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain, offset)); chained_irq_exit(irqchip, desc); @@ -153,52 +157,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc) static void ep93xx_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); int port_mask = BIT(d->irq & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ - ep93xx_gpio_update_int_params(epg, port); + eic->int_type2 ^= port_mask; /* switch edge direction */ + ep93xx_gpio_update_int_params(epg, eic); } - writeb(port_mask, epg->base + eoi_register_offset[port]); + writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET); } static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); int port_mask = BIT(d->irq & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ + eic->int_type2 ^= port_mask; /* switch edge direction */ - gpio_int_unmasked[port] &= ~port_mask; - ep93xx_gpio_update_int_params(epg, port); + eic->int_unmasked &= ~port_mask; + ep93xx_gpio_update_int_params(epg, eic); - writeb(port_mask, epg->base + eoi_register_offset[port]); + writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET); } static void ep93xx_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); - gpio_int_unmasked[port] &= ~BIT(d->irq & 7); - ep93xx_gpio_update_int_params(epg, port); + eic->int_unmasked &= ~BIT(d->irq & 7); + ep93xx_gpio_update_int_params(epg, eic); } static void ep93xx_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); - gpio_int_unmasked[port] |= BIT(d->irq & 7); - ep93xx_gpio_update_int_params(epg, port); + eic->int_unmasked |= BIT(d->irq & 7); + ep93xx_gpio_update_int_params(epg, eic); } /* @@ -209,8 +213,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d) static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); int offset = d->irq & 7; int port_mask = BIT(offset); irq_flow_handler_t handler; @@ -219,32 +223,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) switch (type) { case IRQ_TYPE_EDGE_RISING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] |= port_mask; + eic->int_type1 |= port_mask; + eic->int_type2 |= port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_EDGE_FALLING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] &= ~port_mask; + eic->int_type1 |= port_mask; + eic->int_type2 &= ~port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] |= port_mask; + eic->int_type1 &= ~port_mask; + eic->int_type2 |= port_mask; handler = handle_level_irq; break; case IRQ_TYPE_LEVEL_LOW: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] &= ~port_mask; + eic->int_type1 &= ~port_mask; + eic->int_type2 &= ~port_mask; handler = handle_level_irq; break; case IRQ_TYPE_EDGE_BOTH: - gpio_int_type1[port] |= port_mask; + eic->int_type1 |= port_mask; /* set initial polarity based on current input level */ if (gc->get(gc, offset)) - gpio_int_type2[port] &= ~port_mask; /* falling */ + eic->int_type2 &= ~port_mask; /* falling */ else - gpio_int_type2[port] |= port_mask; /* rising */ + eic->int_type2 |= port_mask; /* rising */ handler = handle_edge_irq; break; default: @@ -253,22 +257,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) irq_set_handler_locked(d, handler); - gpio_int_enabled[port] |= port_mask; + eic->int_enabled |= port_mask; - ep93xx_gpio_update_int_params(epg, port); + ep93xx_gpio_update_int_params(epg, eic); return 0; } -static struct irq_chip ep93xx_gpio_irq_chip = { - .name = "GPIO", - .irq_ack = ep93xx_gpio_irq_ack, - .irq_mask_ack = ep93xx_gpio_irq_mask_ack, - .irq_mask = ep93xx_gpio_irq_mask, - .irq_unmask = ep93xx_gpio_irq_unmask, - .irq_set_type = ep93xx_gpio_irq_type, -}; - /************************************************************************* * gpiolib interface for EP93xx on-chip GPIOs *************************************************************************/ @@ -276,17 +271,19 @@ struct ep93xx_gpio_bank { const char *label; int data; int dir; + int irq; int base; bool has_irq; bool has_hierarchical_irq; unsigned int irq_base; }; -#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \ +#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \ { \ .label = _label, \ .data = _data, \ .dir = _dir, \ + .irq = _irq, \ .base = _base, \ .has_irq = _has_irq, \ .has_hierarchical_irq = _has_hier, \ @@ -295,16 +292,16 @@ struct ep93xx_gpio_bank { static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = { /* Bank A has 8 IRQs */ - EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64), + EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, 64), /* Bank B has 8 IRQs */ - EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72), - EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0), - EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0), - EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0), + EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, 72), + EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0), + EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0), + EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0), /* Bank F has 8 IRQs */ - EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0), - EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0), - EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0), + EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, 0), + EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0), + EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0), }; static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset, @@ -326,13 +323,23 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset) return EP93XX_GPIO_F_IRQ_BASE + offset; } -static int ep93xx_gpio_add_bank(struct gpio_chip *gc, +static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic) +{ + ic->irq_ack = ep93xx_gpio_irq_ack; + ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack; + ic->irq_mask = ep93xx_gpio_irq_mask; + ic->irq_unmask = ep93xx_gpio_irq_unmask; + ic->irq_set_type = ep93xx_gpio_irq_type; +} + +static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc, struct platform_device *pdev, struct ep93xx_gpio *epg, struct ep93xx_gpio_bank *bank) { void __iomem *data = epg->base + bank->data; void __iomem *dir = epg->base + bank->dir; + struct gpio_chip *gc = &egc->gc; struct device *dev = &pdev->dev; struct gpio_irq_chip *girq; int err; @@ -346,8 +353,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, girq = &gc->irq; if (bank->has_irq || bank->has_hierarchical_irq) { + struct irq_chip *ic; + gc->set_config = ep93xx_gpio_set_config; - girq->chip = &ep93xx_gpio_irq_chip; + egc->eic = devm_kcalloc(dev, 1, + sizeof(*egc->eic), + GFP_KERNEL); + if (!egc->eic) + return -ENOMEM; + egc->eic->irq_offset = bank->irq; + ic = &egc->eic->ic; + ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label); + if (!ic->name) + return -ENOMEM; + ep93xx_init_irq_chip(dev, ic); + girq->chip = ic; } if (bank->has_irq) { @@ -389,7 +409,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i; irq_set_chip_data(gpio_irq, &epg->gc[5]); irq_set_chip_and_handler(gpio_irq, - &ep93xx_gpio_irq_chip, + girq->chip, handle_level_irq); irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST); } @@ -415,7 +435,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev) return PTR_ERR(epg->base); for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { - struct gpio_chip *gc = &epg->gc[i]; + struct ep93xx_gpio_chip *gc = &epg->gc[i]; struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; if (ep93xx_gpio_add_bank(gc, pdev, epg, bank)) diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index fae327d5b06e..6890d32d9f25 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -145,8 +145,10 @@ static int gpio_exar_probe(struct platform_device *pdev) mutex_init(&exar_gpio->lock); index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); - if (index < 0) - goto err_destroy; + if (index < 0) { + ret = index; + goto err_mutex_destroy; + } sprintf(exar_gpio->name, "exar_gpio%d", index); exar_gpio->gpio_chip.label = exar_gpio->name; @@ -173,6 +175,7 @@ static int gpio_exar_probe(struct platform_device *pdev) err_destroy: ida_simple_remove(&ida_index, index); +err_mutex_destroy: mutex_destroy(&exar_gpio->lock); return ret; } diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 213aedc97dc2..9c1c4d81aa7b 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -497,6 +497,7 @@ static int __init gpio_mockup_init(void) err = platform_driver_register(&gpio_mockup_driver); if (err) { gpio_mockup_err("error registering platform driver\n"); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return err; } @@ -527,6 +528,7 @@ static int __init gpio_mockup_init(void) gpio_mockup_err("error registering device"); platform_driver_unregister(&gpio_mockup_driver); gpio_mockup_unregister_pdevs(); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return PTR_ERR(pdev); } diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 6c0687694341..89a053b1d279 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -657,9 +657,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip, spin_lock_irqsave(&mvpwm->lock, flags); - val = (unsigned long long) - readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm)); - val *= NSEC_PER_SEC; + u = readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm)); + val = (unsigned long long) u * NSEC_PER_SEC; do_div(val, mvpwm->clk_rate); if (val > UINT_MAX) state->duty_cycle = UINT_MAX; @@ -668,21 +667,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip, else state->duty_cycle = 1; - val = (unsigned long long) - readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm)); + val = (unsigned long long) u; /* on duration */ + /* period = on + off duration */ + val += readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm)); val *= NSEC_PER_SEC; do_div(val, mvpwm->clk_rate); - if (val < state->duty_cycle) { + if (val > UINT_MAX) + state->period = UINT_MAX; + else if (val) + state->period = val; + else state->period = 1; - } else { - val -= state->duty_cycle; - if (val > UINT_MAX) - state->period = UINT_MAX; - else if (val) - state->period = val; - else - state->period = 1; - } regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u); if (u) @@ -1196,6 +1191,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev) devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); + /* Some MVEBU SoCs have simple PWM support for GPIO lines */ + if (IS_ENABLED(CONFIG_PWM)) { + err = mvebu_pwm_probe(pdev, mvchip, id); + if (err) + return err; + } + /* Some gpio controllers do not provide irq support */ if (!have_irqs) return 0; @@ -1205,7 +1207,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) if (!mvchip->domain) { dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", mvchip->chip.label); - return -ENODEV; + err = -ENODEV; + goto err_pwm; } err = irq_alloc_domain_generic_chips( @@ -1253,14 +1256,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip); } - /* Some MVEBU SoCs have simple PWM support for GPIO lines */ - if (IS_ENABLED(CONFIG_PWM)) - return mvebu_pwm_probe(pdev, mvchip, id); - return 0; err_domain: irq_domain_remove(mvchip->domain); +err_pwm: + pwmchip_remove(&mvchip->mvpwm->chip); return err; } diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index d0f27084a942..ce6954390cfd 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -29,6 +29,7 @@ #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF struct gpio_regs { + u32 sysconfig; u32 irqenable1; u32 irqenable2; u32 wake_en; @@ -1058,6 +1059,7 @@ static void omap_gpio_init_context(struct gpio_bank *p) const struct omap_gpio_reg_offs *regs = p->regs; void __iomem *base = p->base; + p->context.sysconfig = readl_relaxed(base + regs->sysconfig); p->context.ctrl = readl_relaxed(base + regs->ctrl); p->context.oe = readl_relaxed(base + regs->direction); p->context.wake_en = readl_relaxed(base + regs->wkup_en); @@ -1077,6 +1079,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank) const struct omap_gpio_reg_offs *regs = bank->regs; void __iomem *base = bank->base; + writel_relaxed(bank->context.sysconfig, base + regs->sysconfig); writel_relaxed(bank->context.wake_en, base + regs->wkup_en); writel_relaxed(bank->context.ctrl, base + regs->ctrl); writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); @@ -1104,6 +1107,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) bank->saved_datain = readl_relaxed(base + bank->regs->datain); + /* Save syconfig, it's runtime value can be different from init value */ + if (bank->loses_context) + bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig); + if (!bank->enabled_non_wakeup_gpios) goto update_gpio_context_count; @@ -1259,6 +1266,7 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb, static const struct omap_gpio_reg_offs omap2_gpio_regs = { .revision = OMAP24XX_GPIO_REVISION, + .sysconfig = OMAP24XX_GPIO_SYSCONFIG, .direction = OMAP24XX_GPIO_OE, .datain = OMAP24XX_GPIO_DATAIN, .dataout = OMAP24XX_GPIO_DATAOUT, @@ -1282,6 +1290,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = { static const struct omap_gpio_reg_offs omap4_gpio_regs = { .revision = OMAP4_GPIO_REVISION, + .sysconfig = OMAP4_GPIO_SYSCONFIG, .direction = OMAP4_GPIO_OE, .datain = OMAP4_GPIO_DATAIN, .dataout = OMAP4_GPIO_DATAOUT, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index de5d1383f28d..9a24dce3c262 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = { }; MODULE_DEVICE_TABLE(i2c, pca953x_id); +#ifdef CONFIG_GPIO_PCA953X_IRQ + +#include <linux/dmi.h> +#include <linux/gpio.h> +#include <linux/list.h> + +static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = { + { + /* + * On Intel Galileo Gen 2 board the IRQ pin of one of + * the I²C GPIO expanders, which has GpioInt() resource, + * is provided as an absolute number instead of being + * relative. Since first controller (gpio-sch.c) and + * second (gpio-dwapb.c) are at the fixed bases, we may + * safely refer to the number in the global space to get + * an IRQ out of it. + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), + }, + }, + {} +}; + +#ifdef CONFIG_ACPI +static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data) +{ + struct acpi_resource_gpio *agpio; + int *pin = data; + + if (acpi_gpio_get_irq_resource(ares, &agpio)) + *pin = agpio->pin_table[0]; + return 1; +} + +static int pca953x_acpi_find_pin(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + int pin = -ENOENT, ret; + LIST_HEAD(r); + + ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin); + acpi_dev_free_resource_list(&r); + if (ret < 0) + return ret; + + return pin; +} +#else +static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; } +#endif + +static int pca953x_acpi_get_irq(struct device *dev) +{ + int pin, ret; + + pin = pca953x_acpi_find_pin(dev); + if (pin < 0) + return pin; + + dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin); + + if (!gpio_is_valid(pin)) + return -EINVAL; + + ret = gpio_request(pin, "pca953x interrupt"); + if (ret) + return ret; + + ret = gpio_to_irq(pin); + + /* When pin is used as an IRQ, no need to keep it requested */ + gpio_free(pin); + + return ret; +} +#endif + static const struct acpi_device_id pca953x_acpi_ids[] = { { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, { } @@ -306,8 +384,23 @@ static const struct regmap_config pca953x_i2c_regmap = { .volatile_reg = pca953x_volatile_register, .cache_type = REGCACHE_RBTREE, - /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */ - .max_register = 0xff, + .max_register = 0x7f, +}; + +static const struct regmap_config pca953x_ai_i2c_regmap = { + .reg_bits = 8, + .val_bits = 8, + + .read_flag_mask = REG_ADDR_AI, + .write_flag_mask = REG_ADDR_AI, + + .readable_reg = pca953x_readable_register, + .writeable_reg = pca953x_writeable_register, + .volatile_reg = pca953x_volatile_register, + + .disable_locking = true, + .cache_type = REGCACHE_RBTREE, + .max_register = 0x7f, }; static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off, @@ -318,18 +411,6 @@ static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off, int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1; u8 regaddr = pinctrl | addr | (off / BANK_SZ); - /* Single byte read doesn't need AI bit set. */ - if (!addrinc) - return regaddr; - - /* Chips with 24 and more GPIOs always support Auto Increment */ - if (write && NBANK(chip) > 2) - regaddr |= REG_ADDR_AI; - - /* PCA9575 needs address-increment on multi-byte writes */ - if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) - regaddr |= REG_ADDR_AI; - return regaddr; } @@ -528,7 +609,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset, { struct pca953x_chip *chip = gpiochip_get_data(gc); - switch (config) { + switch (pinconf_to_config_param(config)) { case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: return pca953x_gpio_set_pull_up_down(chip, offset, config); @@ -770,6 +851,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, u8 reg_direction[MAX_BANK]; int ret, i; + if (dmi_first_match(pca953x_dmi_acpi_irq_info)) { + ret = pca953x_acpi_get_irq(&client->dev); + if (ret > 0) + client->irq = ret; + } + if (!client->irq) return 0; @@ -897,6 +984,7 @@ static int pca953x_probe(struct i2c_client *client, int ret; u32 invert = 0; struct regulator *reg; + const struct regmap_config *regmap_config; chip = devm_kzalloc(&client->dev, sizeof(struct pca953x_chip), GFP_KERNEL); @@ -960,7 +1048,17 @@ static int pca953x_probe(struct i2c_client *client, i2c_set_clientdata(client, chip); - chip->regmap = devm_regmap_init_i2c(client, &pca953x_i2c_regmap); + pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK); + + if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) { + dev_info(&client->dev, "using AI\n"); + regmap_config = &pca953x_ai_i2c_regmap; + } else { + dev_info(&client->dev, "using no AI\n"); + regmap_config = &pca953x_i2c_regmap; + } + + chip->regmap = devm_regmap_init_i2c(client, regmap_config); if (IS_ERR(chip->regmap)) { ret = PTR_ERR(chip->regmap); goto err_exit; @@ -991,7 +1089,6 @@ static int pca953x_probe(struct i2c_client *client, /* initialize cached registers from their original values. * we can't share this chip with another i2c master. */ - pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK); if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) { chip->regs = &pca953x_regs; diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 14fb8f6a1ad2..abcc8c0136c4 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client, * reset state. Otherwise it flags pins to be driven low. */ gpio->out = ~n_latch; - gpio->status = gpio->out; + gpio->status = gpio->read(gpio->client); status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio); if (status < 0) diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c index 52f1647a46fd..0cfeccb4ffe2 100644 --- a/drivers/gpio/gpio-pcie-idio-24.c +++ b/drivers/gpio/gpio-pcie-idio-24.c @@ -28,6 +28,47 @@ #include <linux/spinlock.h> #include <linux/types.h> +/* + * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status + * + * Bit: Description + * 0: Enable Interrupt Sources (Bit 0) + * 1: Enable Interrupt Sources (Bit 1) + * 2: Generate Internal PCI Bus Internal SERR# Interrupt + * 3: Mailbox Interrupt Enable + * 4: Power Management Interrupt Enable + * 5: Power Management Interrupt + * 6: Slave Read Local Data Parity Check Error Enable + * 7: Slave Read Local Data Parity Check Error Status + * 8: Internal PCI Wire Interrupt Enable + * 9: PCI Express Doorbell Interrupt Enable + * 10: PCI Abort Interrupt Enable + * 11: Local Interrupt Input Enable + * 12: Retry Abort Enable + * 13: PCI Express Doorbell Interrupt Active + * 14: PCI Abort Interrupt Active + * 15: Local Interrupt Input Active + * 16: Local Interrupt Output Enable + * 17: Local Doorbell Interrupt Enable + * 18: DMA Channel 0 Interrupt Enable + * 19: DMA Channel 1 Interrupt Enable + * 20: Local Doorbell Interrupt Active + * 21: DMA Channel 0 Interrupt Active + * 22: DMA Channel 1 Interrupt Active + * 23: Built-In Self-Test (BIST) Interrupt Active + * 24: Direct Master was the Bus Master during a Master or Target Abort + * 25: DMA Channel 0 was the Bus Master during a Master or Target Abort + * 26: DMA Channel 1 was the Bus Master during a Master or Target Abort + * 27: Target Abort after internal 256 consecutive Master Retrys + * 28: PCI Bus wrote data to LCS_MBOX0 + * 29: PCI Bus wrote data to LCS_MBOX1 + * 30: PCI Bus wrote data to LCS_MBOX2 + * 31: PCI Bus wrote data to LCS_MBOX3 + */ +#define PLX_PEX8311_PCI_LCS_INTCSR 0x68 +#define INTCSR_INTERNAL_PCI_WIRE BIT(8) +#define INTCSR_LOCAL_INPUT BIT(11) + /** * struct idio_24_gpio_reg - GPIO device registers structure * @out0_7: Read: FET Outputs 0-7 @@ -92,6 +133,7 @@ struct idio_24_gpio_reg { struct idio_24_gpio { struct gpio_chip chip; raw_spinlock_t lock; + __u8 __iomem *plx; struct idio_24_gpio_reg __iomem *reg; unsigned long irq_mask; }; @@ -360,13 +402,13 @@ static void idio_24_irq_mask(struct irq_data *data) unsigned long flags; const unsigned long bit_offset = irqd_to_hwirq(data) - 24; unsigned char new_irq_mask; - const unsigned long bank_offset = bit_offset/8 * 8; + const unsigned long bank_offset = bit_offset / 8; unsigned char cos_enable_state; raw_spin_lock_irqsave(&idio24gpio->lock, flags); - idio24gpio->irq_mask &= BIT(bit_offset); - new_irq_mask = idio24gpio->irq_mask >> bank_offset; + idio24gpio->irq_mask &= ~BIT(bit_offset); + new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8; if (!new_irq_mask) { cos_enable_state = ioread8(&idio24gpio->reg->cos_enable); @@ -389,12 +431,12 @@ static void idio_24_irq_unmask(struct irq_data *data) unsigned long flags; unsigned char prev_irq_mask; const unsigned long bit_offset = irqd_to_hwirq(data) - 24; - const unsigned long bank_offset = bit_offset/8 * 8; + const unsigned long bank_offset = bit_offset / 8; unsigned char cos_enable_state; raw_spin_lock_irqsave(&idio24gpio->lock, flags); - prev_irq_mask = idio24gpio->irq_mask >> bank_offset; + prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8; idio24gpio->irq_mask |= BIT(bit_offset); if (!prev_irq_mask) { @@ -481,6 +523,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct device *const dev = &pdev->dev; struct idio_24_gpio *idio24gpio; int err; + const size_t pci_plx_bar_index = 1; const size_t pci_bar_index = 2; const char *const name = pci_name(pdev); @@ -494,12 +537,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name); + err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name); if (err) { dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err); return err; } + idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index]; idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index]; idio24gpio->chip.label = name; @@ -520,6 +564,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Software board reset */ iowrite8(0, &idio24gpio->reg->soft_reset); + /* + * enable PLX PEX8311 internal PCI wire interrupt and local interrupt + * input + */ + iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8, + idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1); err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio); if (err) { diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 9888b62f37af..432c487f77b4 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -663,8 +663,8 @@ static int pxa_gpio_probe(struct platform_device *pdev) pchip->irq1 = irq1; gpio_reg_base = devm_platform_ioremap_resource(pdev, 0); - if (!gpio_reg_base) - return -EINVAL; + if (IS_ERR(gpio_reg_base)) + return PTR_ERR(gpio_reg_base); clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 187984d26f47..f0b6c68e848e 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -250,8 +250,10 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) int error; error = pm_runtime_get_sync(p->dev); - if (error < 0) + if (error < 0) { + pm_runtime_put(p->dev); return error; + } error = pinctrl_gpio_request(chip->base + offset); if (error) diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c index 006a7e6a75f2..7e70d2d06c3f 100644 --- a/drivers/gpio/gpio-siox.c +++ b/drivers/gpio/gpio-siox.c @@ -245,6 +245,7 @@ static int gpio_siox_probe(struct siox_device *sdevice) girq->chip = &ddata->ichip; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; + girq->threaded = true; ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL); if (ret) diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index d7314d39ab65..36ea8a3bd451 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data, sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 75b1135b383a..daf29044d0f1 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) continue; tc3589x_gpio->oldregs[i][j] = new; - tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); + tc3589x_reg_write(tc3589x, regmap[i] + j, new); } } diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 8a01d3694b28..cecde5440a39 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -365,6 +365,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) struct tegra_gpio_info *tgi = bank->tgi; unsigned int gpio = d->hwirq; + tegra_gpio_irq_mask(d); gpiochip_unlock_as_irq(&tgi->gc, gpio); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 7835aad6d162..88b04d8a7fa7 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -556,7 +556,7 @@ static int zynq_gpio_irq_reqres(struct irq_data *d) struct gpio_chip *chip = irq_data_get_irq_chip_data(d); int ret; - ret = pm_runtime_get_sync(chip->parent); + ret = pm_runtime_resume_and_get(chip->parent); if (ret < 0) return ret; @@ -884,7 +884,7 @@ static int zynq_gpio_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) goto err_pm_dis; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index b2e186047014..66dcab6ab26d 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -174,7 +174,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, int ret, value; ret = request_threaded_irq(event->irq, NULL, event->handler, - event->irqflags, "ACPI:Event", event); + event->irqflags | IRQF_ONESHOT, "ACPI:Event", event); if (ret) { dev_err(acpi_gpio->chip->parent, "Failed to setup interrupt handler for %d\n", diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index fbf6b1a0a4fa..558cd900d399 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -457,6 +457,8 @@ static ssize_t export_store(struct class *class, long gpio; struct gpio_desc *desc; int status; + struct gpio_chip *gc; + int offset; status = kstrtol(buf, 0, &gpio); if (status < 0) @@ -468,6 +470,12 @@ static ssize_t export_store(struct class *class, pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); return -EINVAL; } + gc = desc->gdev->chip; + offset = gpio_chip_hwgpio(desc); + if (!gpiochip_line_is_valid(gc, offset)) { + pr_warn("%s: GPIO %ld masked\n", __func__, gpio); + return -EINVAL; + } /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a8cf55eb54d8..abdf448b11a3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3894,7 +3894,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) } } - if (test_bit(FLAG_IS_OUT, &desc->flags)) { + /* To be valid for IRQ the line needs to be input or open drain */ + if (test_bit(FLAG_IS_OUT, &desc->flags) && + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { chip_err(chip, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); @@ -3957,7 +3959,12 @@ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset) if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { - WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags)); + /* + * We must not be output when using IRQ UNLESS we are + * open drain. + */ + WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) && + !test_bit(FLAG_OPEN_DRAIN, &desc->flags)); set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags); } } diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/sgpio-aspeed.c index 8319812593e3..3a5dfb8ded1f 100644 --- a/drivers/gpio/sgpio-aspeed.c +++ b/drivers/gpio/sgpio-aspeed.c @@ -17,7 +17,17 @@ #include <linux/spinlock.h> #include <linux/string.h> -#define MAX_NR_SGPIO 80 +/* + * MAX_NR_HW_GPIO represents the number of actual hardware-supported GPIOs (ie, + * slots within the clocked serial GPIO data). Since each HW GPIO is both an + * input and an output, we provide MAX_NR_HW_GPIO * 2 lines on our gpiochip + * device. + * + * We use SGPIO_OUTPUT_OFFSET to define the split between the inputs and + * outputs; the inputs start at line 0, the outputs start at OUTPUT_OFFSET. + */ +#define MAX_NR_HW_SGPIO 80 +#define SGPIO_OUTPUT_OFFSET MAX_NR_HW_SGPIO #define ASPEED_SGPIO_CTRL 0x54 @@ -30,8 +40,8 @@ struct aspeed_sgpio { struct clk *pclk; spinlock_t lock; void __iomem *base; - uint32_t dir_in[3]; int irq; + int n_sgpio; }; struct aspeed_sgpio_bank { @@ -111,31 +121,69 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio, } } -#define GPIO_BANK(x) ((x) >> 5) -#define GPIO_OFFSET(x) ((x) & 0x1f) +#define GPIO_BANK(x) ((x % SGPIO_OUTPUT_OFFSET) >> 5) +#define GPIO_OFFSET(x) ((x % SGPIO_OUTPUT_OFFSET) & 0x1f) #define GPIO_BIT(x) BIT(GPIO_OFFSET(x)) static const struct aspeed_sgpio_bank *to_bank(unsigned int offset) { - unsigned int bank = GPIO_BANK(offset); + unsigned int bank; + + bank = GPIO_BANK(offset); WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks)); return &aspeed_sgpio_banks[bank]; } +static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, unsigned int ngpios) +{ + struct aspeed_sgpio *sgpio = gpiochip_get_data(gc); + int n = sgpio->n_sgpio; + int c = SGPIO_OUTPUT_OFFSET - n; + + WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2); + + /* input GPIOs in the lower range */ + bitmap_set(valid_mask, 0, n); + bitmap_clear(valid_mask, n, c); + + /* output GPIOS above SGPIO_OUTPUT_OFFSET */ + bitmap_set(valid_mask, SGPIO_OUTPUT_OFFSET, n); + bitmap_clear(valid_mask, SGPIO_OUTPUT_OFFSET + n, c); + + return 0; +} + +static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, unsigned int ngpios) +{ + struct aspeed_sgpio *sgpio = gpiochip_get_data(gc); + int n = sgpio->n_sgpio; + + WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2); + + /* input GPIOs in the lower range */ + bitmap_set(valid_mask, 0, n); + bitmap_clear(valid_mask, n, ngpios - n); +} + +static bool aspeed_sgpio_is_input(unsigned int offset) +{ + return offset < SGPIO_OUTPUT_OFFSET; +} + static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); const struct aspeed_sgpio_bank *bank = to_bank(offset); unsigned long flags; enum aspeed_sgpio_reg reg; - bool is_input; int rc = 0; spin_lock_irqsave(&gpio->lock, flags); - is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset); - reg = is_input ? reg_val : reg_rdata; + reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata; rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); spin_unlock_irqrestore(&gpio->lock, flags); @@ -143,22 +191,31 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) return rc; } -static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) +static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); const struct aspeed_sgpio_bank *bank = to_bank(offset); - void __iomem *addr; + void __iomem *addr_r, *addr_w; u32 reg = 0; - addr = bank_reg(gpio, bank, reg_val); - reg = ioread32(addr); + if (aspeed_sgpio_is_input(offset)) + return -EINVAL; + + /* Since this is an output, read the cached value from rdata, then + * update val. */ + addr_r = bank_reg(gpio, bank, reg_rdata); + addr_w = bank_reg(gpio, bank, reg_val); + + reg = ioread32(addr_r); if (val) reg |= GPIO_BIT(offset); else reg &= ~GPIO_BIT(offset); - iowrite32(reg, addr); + iowrite32(reg, addr_w); + + return 0; } static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) @@ -175,43 +232,28 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset) { - struct aspeed_sgpio *gpio = gpiochip_get_data(gc); - unsigned long flags; - - spin_lock_irqsave(&gpio->lock, flags); - gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset); - spin_unlock_irqrestore(&gpio->lock, flags); - - return 0; + return aspeed_sgpio_is_input(offset) ? 0 : -EINVAL; } static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); unsigned long flags; + int rc; + + /* No special action is required for setting the direction; we'll + * error-out in sgpio_set_value if this isn't an output GPIO */ spin_lock_irqsave(&gpio->lock, flags); - - gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset); - sgpio_set_value(gc, offset, val); - + rc = sgpio_set_value(gc, offset, val); spin_unlock_irqrestore(&gpio->lock, flags); - return 0; + return rc; } static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) { - int dir_status; - struct aspeed_sgpio *gpio = gpiochip_get_data(gc); - unsigned long flags; - - spin_lock_irqsave(&gpio->lock, flags); - dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset); - spin_unlock_irqrestore(&gpio->lock, flags); - - return dir_status; - + return !!aspeed_sgpio_is_input(offset); } static void irqd_to_aspeed_sgpio_data(struct irq_data *d, @@ -402,6 +444,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, irq = &gpio->chip.irq; irq->chip = &aspeed_sgpio_irqchip; + irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask; irq->handler = handle_bad_irq; irq->default_type = IRQ_TYPE_NONE; irq->parent_handler = aspeed_sgpio_irq_handler; @@ -409,17 +452,15 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, irq->parents = &gpio->irq; irq->num_parents = 1; - /* set IRQ settings and Enable Interrupt */ + /* Apply default IRQ settings */ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { bank = &aspeed_sgpio_banks[i]; /* set falling or level-low irq */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0)); /* trigger type is edge */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1)); - /* dual edge trigger mode. */ - iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2)); - /* enable irq */ - iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable)); + /* single edge trigger */ + iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2)); } return 0; @@ -452,11 +493,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) if (rc < 0) { dev_err(&pdev->dev, "Could not read ngpios property\n"); return -EINVAL; - } else if (nr_gpios > MAX_NR_SGPIO) { + } else if (nr_gpios > MAX_NR_HW_SGPIO) { dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n", - MAX_NR_SGPIO, nr_gpios); + MAX_NR_HW_SGPIO, nr_gpios); return -EINVAL; } + gpio->n_sgpio = nr_gpios; rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq); if (rc < 0) { @@ -497,7 +539,8 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) spin_lock_init(&gpio->lock); gpio->chip.parent = &pdev->dev; - gpio->chip.ngpio = nr_gpios; + gpio->chip.ngpio = MAX_NR_HW_SGPIO * 2; + gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask; gpio->chip.direction_input = aspeed_sgpio_dir_in; gpio->chip.direction_output = aspeed_sgpio_dir_out; gpio->chip.get_direction = aspeed_sgpio_get_direction; @@ -509,9 +552,6 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) gpio->chip.label = dev_name(&pdev->dev); gpio->chip.base = -1; - /* set all SGPIO pins as input (1). */ - memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in)); - aspeed_sgpio_setup_irqs(gpio, pdev); rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e67c194c2aca..649f17dfcf45 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -206,6 +206,7 @@ source "drivers/gpu/drm/arm/Kconfig" config DRM_RADEON tristate "ATI Radeon" depends on DRM && PCI && MMU + depends on AGP || !AGP select FW_LOADER select DRM_KMS_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index d10f483f5e27..ce30d4e8bf25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -644,6 +644,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd); + if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) + return 0; + #if 0 unsigned long flags; int retry; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index e262f2ac07a3..92754cfb9808 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -540,6 +540,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v9_mqd *m = get_mqd(mqd); + if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) + return 0; + if (adev->in_gpu_reset) return -EIO; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6d021ecc8d59..f3fa271e3394 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1247,15 +1247,15 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( * be freed anyway */ - /* No more MMU notifiers */ - amdgpu_mn_unregister(mem->bo); - /* Make sure restore workers don't access the BO any more */ bo_list_entry = &mem->validate_list; mutex_lock(&process_info->lock); list_del(&bo_list_entry->head); mutex_unlock(&process_info->lock); + /* No more MMU notifiers */ + amdgpu_mn_unregister(mem->bo); + ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); if (unlikely(ret)) return ret; @@ -1288,7 +1288,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Free the BO*/ - amdgpu_bo_unref(&mem->bo); + drm_gem_object_put_unlocked(&mem->bo->tbo.base); mutex_destroy(&mem->lock); kfree(mem); @@ -1630,7 +1630,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC; - (*mem)->bo = amdgpu_bo_ref(bo); + drm_gem_object_get(&bo->tbo.base); + (*mem)->bo = bo; (*mem)->va = va; (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index daf687428cdb..663314f807fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -150,6 +150,7 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset); switch (crev) { case 11: + case 12: mem_channel_number = igp_info->v11.umachannelnumber; /* channel width is 64 */ return mem_channel_number * 64; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 50dff69a0f6e..b1172d93c99c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios; adev->bios = NULL; - bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { - return false; - } - - adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + if (!rom || romlen == 0) return false; - memcpy_fromio(adev->bios, bios, size); - - if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false; - } - adev->bios_size = size; + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; + + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios); + + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen; return true; +free_bios: + kfree(adev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index ece55c8fa673..cda0a76a733d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -719,8 +719,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -857,8 +859,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = amdgpu_connector_best_single_encoder(connector); @@ -980,8 +984,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { @@ -1330,8 +1336,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 1e25ca34d876..a9a81e55777b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -240,7 +240,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = RREG32_PCIE(*pos >> 2); + value = RREG32_PCIE(*pos); r = put_user(value, (uint32_t *)buf); if (r) return r; @@ -283,7 +283,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (r) return r; - WREG32_PCIE(*pos >> 2, value); + WREG32_PCIE(*pos, value); result += 4; buf += 4; @@ -990,27 +990,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) { struct amdgpu_job *job; - struct drm_sched_job *s_job; + struct drm_sched_job *s_job, *tmp; uint32_t preempt_seq; struct dma_fence *fence, **ptr; struct amdgpu_fence_driver *drv = &ring->fence_drv; struct drm_gpu_scheduler *sched = &ring->sched; + bool preempted = true; if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) return; preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); - if (preempt_seq <= atomic_read(&drv->last_seq)) - return; + if (preempt_seq <= atomic_read(&drv->last_seq)) { + preempted = false; + goto no_preempt; + } preempt_seq &= drv->num_fences_mask; ptr = &drv->fences[preempt_seq]; fence = rcu_dereference_protected(*ptr, 1); +no_preempt: spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + if (dma_fence_is_signaled(&s_job->s_fence->finished)) { + /* remove job from ring_mirror_list */ + list_del_init(&s_job->node); + sched->ops->free_job(s_job); + continue; + } job = to_amdgpu_job(s_job); - if (job->fence == fence) + if (preempted && job->fence == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 13694d5eba47..3b3fc9a426e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2057,11 +2057,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_remove_device(adev); - amdgpu_amdkfd_device_fini(adev); - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + amdgpu_amdkfd_device_fini(adev); + /* need to disable SMC first */ for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.hw) @@ -3070,12 +3070,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } } - amdgpu_amdkfd_suspend(adev); - amdgpu_ras_suspend(adev); r = amdgpu_device_ip_suspend_phase1(adev); + amdgpu_amdkfd_suspend(adev); + /* evict vram memory */ amdgpu_bo_evict_vram(adev); @@ -3466,6 +3466,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; + amdgpu_amdkfd_pre_reset(adev); + /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -3888,7 +3890,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ amdgpu_device_lock_adev(tmp_adev, false); r = amdgpu_device_pre_asic_reset(tmp_adev, - NULL, + (tmp_adev == adev) ? job : NULL, &need_full_reset); /*TODO Should we stop ?*/ if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82efc1e22e61..b588e0e409e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -282,7 +282,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_crtc_helper_set_config(set, ctx); @@ -306,6 +306,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, adev->have_disp_power_ref = false; } +out: /* drop the power reference we got coming in here */ pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 05d114a72ca1..e8e172010416 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1011,6 +1011,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, + {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, /* Navi14 */ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, @@ -1286,11 +1287,12 @@ long amdgpu_drm_ioctl(struct file *filp, dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_ioctl(filp, cmd, arg); pm_runtime_mark_last_busy(dev->dev); +out: pm_runtime_put_autosuspend(dev->dev); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 143753d237e7..fd94a17fb2c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, u32 cpp; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC; + AMDGPU_GEM_CREATE_VRAM_CLEARED; info = drm_get_format_info(adev->ddev, mode_cmd); cpp = info->cpp[0]; @@ -147,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, - ttm_bo_type_kernel, NULL, &gobj); + ttm_bo_type_device, NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 23085b352cf2..c212d5fc665c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -404,7 +404,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); - amdgpu_irq_get(adev, irq_src, irq_type); + + if (irq_src) + amdgpu_irq_get(adev, irq_src, irq_type); ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; @@ -539,8 +541,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(ring); } - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); drm_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) @@ -576,8 +579,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) } /* disable the interrupt */ - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } @@ -603,8 +607,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) continue; /* enable the interrupt */ - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8ceb44925947..9d61d1b7a569 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -161,16 +161,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_bo_list_entry vm_pd; struct list_head list, duplicates; + struct dma_fence *fence = NULL; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct amdgpu_bo_va *bo_va; - int r; + long r; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); tv.bo = &bo->tbo; - tv.num_shared = 1; + tv.num_shared = 2; list_add(&tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); @@ -178,28 +179,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false); if (r) { dev_err(adev->dev, "leaking bo va because " - "we fail to reserve bo (%d)\n", r); + "we fail to reserve bo (%ld)\n", r); return; } bo_va = amdgpu_vm_bo_find(vm, bo); - if (bo_va && --bo_va->ref_count == 0) { - amdgpu_vm_bo_rmv(adev, bo_va); + if (!bo_va || --bo_va->ref_count) + goto out_unlock; - if (amdgpu_vm_ready(vm)) { - struct dma_fence *fence = NULL; + amdgpu_vm_bo_rmv(adev, bo_va); + if (!amdgpu_vm_ready(vm)) + goto out_unlock; - r = amdgpu_vm_clear_freed(adev, vm, &fence); - if (unlikely(r)) { - dev_err(adev->dev, "failed to clear page " - "tables on GEM object close (%d)\n", r); - } - - if (fence) { - amdgpu_bo_fence(bo, fence, true); - dma_fence_put(fence); - } - } + fence = dma_resv_get_excl(bo->tbo.base.resv); + if (fence) { + amdgpu_bo_fence(bo, fence, true); + fence = NULL; } + + r = amdgpu_vm_clear_freed(adev, vm, &fence); + if (r || !fence) + goto out_unlock; + + amdgpu_bo_fence(bo, fence, true); + dma_fence_put(fence); + +out_unlock: + if (unlikely(r < 0)) + dev_err(adev->dev, "failed to clear page " + "tables on GEM object close (%ld)\n", r); ttm_eu_backoff_reservation(&ticket, &list); } @@ -554,6 +561,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct ww_acquire_ctx ticket; struct list_head list, duplicates; uint64_t va_flags; + uint64_t vm_size; int r = 0; if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { @@ -574,6 +582,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->va_address &= AMDGPU_GMC_HOLE_MASK; + vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; + vm_size -= AMDGPU_VA_RESERVED_SIZE; + if (args->va_address + args->map_size > vm_size) { + dev_dbg(&dev->pdev->dev, + "va_address 0x%llx is in top reserved area 0x%llx\n", + args->va_address + args->map_size, vm_size); + return -EINVAL; + } + if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", args->flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 2a3f5ec298db..76429932035e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -469,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; - if (!src) + if (!src || !src->funcs || !src->funcs->set) continue; for (k = 0; k < src->num_types; k++) amdgpu_irq_update(adev, src, k); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 96b2a31ccfed..f06a5142d66e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -36,7 +36,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) memset(&ti, 0, sizeof(struct amdgpu_task_info)); - if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { + if (amdgpu_gpu_recovery && + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", s_job->sched->name); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a73206784cba..59fd9ebf3a58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -638,8 +638,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * in the bitfields */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; if (info->read_mmr_reg.count > 128) return -EINVAL; @@ -667,9 +671,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device dev_info; uint64_t vm_size; + memset(&dev_info, 0, sizeof(dev_info)); dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; dev_info.external_rev = adev->external_rev_id; @@ -975,7 +980,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) r = pm_runtime_get_sync(dev->dev); if (r < 0) - return r; + goto pm_put; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { @@ -1026,6 +1031,7 @@ error_pasid: out_suspend: pm_runtime_mark_last_busy(dev->dev); +pm_put: pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03930313c263..bcb7ab5c602d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -90,7 +90,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) adev->pm.ac_power = true; else adev->pm.ac_power = false; - if (adev->powerplay.pp_funcs->enable_bapm) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->enable_bapm) amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); mutex_unlock(&adev->pm.mutex); } @@ -369,6 +370,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, if (current_level == level) return count; + if (adev->asic_type == CHIP_RAVEN) { + if (adev->rev_id < 8) { + if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, false); + else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, true); + } + } + /* profile_exit setting is valid only when current mode is in profile mode */ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | @@ -415,8 +425,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ret = smu_get_power_num_states(&adev->smu, &data); if (ret) return ret; - } else if (adev->powerplay.pp_funcs->get_pp_num_states) + } else if (adev->powerplay.pp_funcs->get_pp_num_states) { amdgpu_dpm_get_pp_num_states(adev, &data); + } else { + memset(&data, 0, sizeof(data)); + } buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); for (i = 0; i < data.nums; i++) @@ -857,7 +870,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) { int ret; - long level; + unsigned long level; char *sub_str = NULL; char *tmp; char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; @@ -873,8 +886,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) while (tmp[0]) { sub_str = strsep(&tmp, delimiter); if (strlen(sub_str)) { - ret = kstrtol(sub_str, 0, &level); - if (ret) + ret = kstrtoul(sub_str, 0, &level); + if (ret || level > 31) return -EINVAL; *mask |= 1 << level; } else @@ -2088,7 +2101,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); + return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, @@ -2118,7 +2131,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); + return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index c8793e6cc3c5..6373bfb47d55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) */ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) { - volatile u32 *dst_ptr; u32 dws; int r; /* allocate clear state block */ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) return r; } - /* set up the cs buffer */ - dst_ptr = adev->gfx.rlc.cs_ptr; - adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 91899d28fa72..e8132210c244 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _AMDGPU_TRACE_H_ #include <linux/stringify.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f15ded1ce905..91e3a87b1de8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -967,6 +967,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) release_sg: kfree(ttm->sg); + ttm->sg = NULL; return r; } @@ -983,7 +984,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) DMA_BIDIRECTIONAL : DMA_TO_DEVICE; /* double check that we don't free the table twice */ - if (!ttm->sg->sgl) + if (!ttm->sg || !ttm->sg->sgl) return; /* unmap the pages mapped to the device */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2c364b8695f..cfa8324b9f51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -231,7 +231,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if ((adev->asic_type == CHIP_POLARIS10 || adev->asic_type == CHIP_POLARIS11) && (adev->uvd.fw_version < FW_1_66_16)) - DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", + DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n", version_major, version_minor); } else { unsigned int enc_major, enc_minor, dec_minor; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c7514f743409..fb47ddc6f7f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2123,8 +2123,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t eaddr; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ @@ -2188,8 +2188,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, int r; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ @@ -2333,7 +2333,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, after->start = eaddr + 1; after->last = tmp->last; after->offset = tmp->offset; - after->offset += after->start - tmp->start; + after->offset += (after->start - tmp->start) << PAGE_SHIFT; after->flags = tmp->flags; after->bo_va = tmp->bo_va; list_add(&after->list, &tmp->bo_va->invalids); @@ -2867,10 +2867,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); - if (vm->use_cpu_for_update) + if (vm->use_cpu_for_update) { + /* Sync with last SDMA update/clear before switching to CPU */ + r = amdgpu_bo_sync_wait(vm->root.base.bo, + AMDGPU_FENCE_OWNER_UNDEFINED, true); + if (r) + goto free_idr; + vm->update_funcs = &amdgpu_vm_cpu_funcs; - else + } else { vm->update_funcs = &amdgpu_vm_sdma_funcs; + } dma_fence_put(vm->last_update); vm->last_update = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 2eda3a8c330d..4a64825b53cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -105,8 +105,8 @@ struct amdgpu_bo_list_entry; #define AMDGPU_MMHUB_0 1 #define AMDGPU_MMHUB_1 2 -/* hardcode that limit for now */ -#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) +/* Reserve 2MB at top/bottom of address space for kernel use */ +#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) /* max vmids dedicated for process */ #define AMDGPU_VM_MAX_RESERVED_VMID 1 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 65aae75f80fd..ce1048bad158 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -311,15 +311,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev } +/* + * NOTE psp_xgmi_node_info.num_hops layout is as follows: + * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved) + * num_hops[5:3] = reserved + * num_hops[2:0] = number of hops + */ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev) { struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; + uint8_t num_hops_mask = 0x7; int i; for (i = 0 ; i < top->num_nodes; ++i) if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) - return top->nodes[i].num_hops; + return top->nodes[i].num_hops & num_hops_mask; return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index dd30f4e61a8c..cae426c7c086 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 10000)) { + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); ctx->abort = true; } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c45304f1047c..450ad7d5e21a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ - cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); + cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ @@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle) { u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 tmp = RREG32(mmSRBM_STATUS2); + u32 tmp; - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; - } + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 1dca0cabc326..13520d173296 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -193,19 +193,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + + +out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 14417cebe38b..d17edc850427 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -993,39 +993,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v10_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -1787,25 +1754,7 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static int gfx_v10_0_init_csb(struct amdgpu_device *adev) { - int r; - - if (adev->in_gpu_reset) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (r) - return r; - - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, - (void **)&adev->gfx.rlc.cs_ptr); - if (!r) { - adev->gfx.rlc.funcs->get_csb_buffer(adev, - adev->gfx.rlc.cs_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - } - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - if (r) - return r; - } + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, @@ -3774,10 +3723,6 @@ static int gfx_v10_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gfx_v10_0_csb_vram_pin(adev); - if (r) - return r; - if (!amdgpu_emu_mode) gfx_v10_0_init_golden_registers(adev); @@ -3865,7 +3810,6 @@ static int gfx_v10_0_hw_fini(void *handle) } gfx_v10_0_cp_enable(adev, false); gfx_v10_0_enable_gui_idle_interrupt(adev, false); - gfx_v10_0_csb_vram_unpin(adev); return 0; } @@ -4094,10 +4038,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); - - /* only for Vega10 & Raven1 */ - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK); if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); @@ -4290,11 +4232,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else - amdgpu_gfx_off_ctrl(adev, true); + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; @@ -4687,12 +4625,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); return -ENOMEM; + } /* assert preemption condition */ amdgpu_ring_set_preempt_cond_exec(ring, false); @@ -4703,6 +4646,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) ++ring->trail_seq); amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + /* poll the trailing fence */ for (i = 0; i < adev->usec_timeout; i++) { if (ring->trail_seq == diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 791ba398f007..d92e92e5d50b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4554,6 +4554,8 @@ static int gfx_v7_0_hw_init(void *handle) gfx_v7_0_constants_init(adev); + /* init CSB */ + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* init rlc */ r = adev->gfx.rlc.funcs->resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index cc88ba76a8d4..467ed7fca884 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -3917,6 +3884,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v8_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32(mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -4837,10 +4805,6 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); - r = gfx_v8_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4958,8 +4922,6 @@ static int gfx_v8_0_hw_fini(void *handle) pr_err("rlc is busy, skip halt rlc\n"); amdgpu_gfx_rlc_exit_safe_mode(adev); - gfx_v8_0_csb_vram_unpin(adev); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 40034efa64bb..90dcc7afc9c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1026,6 +1026,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.mec_fw_write_wait = true; break; default: + adev->gfx.me_fw_write_wait = true; + adev->gfx.mec_fw_write_wait = true; break; } } @@ -1673,39 +1675,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -2594,6 +2563,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v9_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -3886,10 +3856,6 @@ static int gfx_v9_0_hw_init(void *handle) gfx_v9_0_constants_init(adev); - r = gfx_v9_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3975,8 +3941,6 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); adev->gfx.rlc.funcs->stop(adev); - gfx_v9_0_csb_vram_unpin(adev); - return 0; } @@ -4837,10 +4801,9 @@ static int gfx_v9_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: case CHIP_RENOIR: - if (!enable) { + if (!enable) amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); @@ -4866,12 +4829,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, amdgpu_gfx_off_ctrl(adev, true); break; case CHIP_VEGA12: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else { - amdgpu_gfx_off_ctrl(adev, true); - } + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index a13dd9a51149..7d165f024f07 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -193,19 +193,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + + +out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h index 074a9a09c0a7..a5b60c9a2418 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h @@ -73,6 +73,22 @@ #define SDMA_OP_AQL_COPY 0 #define SDMA_OP_AQL_BARRIER_OR 0 +#define SDMA_GCR_RANGE_IS_PA (1 << 18) +#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16) +#define SDMA_GCR_GL2_WB (1 << 15) +#define SDMA_GCR_GL2_INV (1 << 14) +#define SDMA_GCR_GL2_DISCARD (1 << 13) +#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11) +#define SDMA_GCR_GL2_US (1 << 10) +#define SDMA_GCR_GL1_INV (1 << 9) +#define SDMA_GCR_GLV_INV (1 << 8) +#define SDMA_GCR_GLK_INV (1 << 7) +#define SDMA_GCR_GLK_WB (1 << 6) +#define SDMA_GCR_GLM_INV (1 << 5) +#define SDMA_GCR_GLM_WB (1 << 4) +#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2) +#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0) + /*define for op field*/ #define SDMA_PKT_HEADER_op_offset 0 #define SDMA_PKT_HEADER_op_mask 0x000000FF diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 74a9fe8e0cfb..8c54f0be51ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -44,7 +44,7 @@ enum psp_gfx_crtl_cmd_id GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */ GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */ GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */ - GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */ + GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */ GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index a10175838013..b6af67f6f214 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 5f4e2c616241..cd3ebed46d05 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4554e72c8378..23de332f3c6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -698,7 +698,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -1579,7 +1579,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 8493bfbbc148..465351184bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -286,30 +286,20 @@ static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u64 *wptr = NULL; - uint64_t local_wptr = 0; + u64 wptr; if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */ - wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); - DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); - *wptr = (*wptr) >> 2; - DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); } else { - u32 lowbit, highbit; - - wptr = &local_wptr; - lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; - highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; - - DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", - ring->me, highbit, lowbit); - *wptr = highbit; - *wptr = (*wptr) << 32; - *wptr |= lowbit; + wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); + wptr = wptr << 32; + wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); } - return *wptr; + return wptr >> 2; } /** @@ -382,8 +372,27 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); - /* IB packet must end on a 8 DW boundary */ - sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + /* Invalidate L2, because if we don't do it, we might get stale cache + * lines from previous IBs. + */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV | + SDMA_GCR_GL2_WB | + SDMA_GCR_GLM_INV | + SDMA_GCR_GLM_WB) << 16); + amdgpu_ring_write(ring, 0xffffff80); + amdgpu_ring_write(ring, 0xffff); + + /* An IB packet must end on a 8 DW boundary--the next dword + * must be on a 8-dword boundary. Our IB packet below is 6 + * dwords long, thus add x number of NOPs, such that, in + * modular arithmetic, + * wptr + 6 + x = 8k, k >= 0, which in C is, + * (wptr + 6 + x) % 8 = 0. + * The expression below, is a solution of x. + */ + sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -1086,10 +1095,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw - * + * sdma_v5_0_ring_pad_ib - pad the IB * @ib: indirect buffer to fill with padding * + * Pad the IB with NOPs to a boundary multiple of 8. */ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { @@ -1097,7 +1106,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 0x7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1254,8 +1263,12 @@ static int sdma_v5_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->sdma.instance[i].fw != NULL) + release_firmware(adev->sdma.instance[i].fw); + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + } return 0; } @@ -1600,7 +1613,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ + .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib = sdma_v5_0_ring_emit_ib, .emit_fence = sdma_v5_0_ring_emit_fence, .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c086262cc181..41631271d64c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -276,6 +276,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; + if (adev->asic_type == CHIP_RENOIR) + return 10000; if (adev->asic_type == CHIP_RAVEN) return reference_clock / 4; @@ -1144,8 +1146,7 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_MMHUB | - AMD_PG_SUPPORT_VCN | - AMD_PG_SUPPORT_VCN_DPG; + AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index e40140bf6699..db0a3bda13fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -195,19 +195,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + +out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 36ad0c0e8efb..cd2cbe760e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1026,6 +1026,10 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); /* set the write pointer delay */ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); @@ -1048,6 +1052,9 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); return 0; } @@ -1357,8 +1364,13 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); /* Restore */ ring = &adev->vcn.inst->ring_enc[0]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); @@ -1366,6 +1378,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); ring = &adev->vcn.inst->ring_enc[1]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); @@ -1374,6 +1387,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 901fe3590165..d3400da6ab64 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x7a5d0000, 0x807c817c, 0x807aff7a, 0x00000080, 0xbf0a717c, 0xbf85fff8, - 0xbf820141, 0xbef4037e, + 0xbf820142, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, 0xbef703ff, @@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304080, 0x725d0100, 0xe0304100, 0x725d0200, 0xe0304180, - 0x725d0300, 0xbf820031, + 0x725d0300, 0xbf820032, 0xbef603ff, 0x01000000, 0xbef20378, 0x8078ff78, 0x00000400, 0xbefc0384, @@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304100, 0x725d0100, 0xe0304200, 0x725d0200, 0xe0304300, - 0x725d0300, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef603ff, - 0x01000000, 0xbefc03ff, - 0x0000006c, 0x80f89078, - 0xf429003a, 0xf0000000, - 0xbf8cc07f, 0x80fc847c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0x80f8a078, - 0xf42d003a, 0xf0000000, - 0xbf8cc07f, 0x80fc887c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0x80f8c078, - 0xf431003a, 0xf0000000, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0xbe883108, - 0xbe8a310a, 0xbe8c310c, - 0xbe8e310e, 0xbf06807c, - 0xbf84fff0, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0xbef603ff, - 0x01000000, 0xf4211bfa, + 0x725d0300, 0xbf8c3f70, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, + 0xbef603ff, 0x01000000, + 0xbefc03ff, 0x0000006c, + 0x80f89078, 0xf429003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc847c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0x80f8a078, 0xf42d003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0x80f8c078, 0xf431003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0xbef603ff, 0x01000000, + 0xf4211bfa, 0xf0000000, + 0x80788478, 0xf4211b3a, 0xf0000000, 0x80788478, - 0xf4211b3a, 0xf0000000, - 0x80788478, 0xf4211b7a, + 0xf4211b7a, 0xf0000000, + 0x80788478, 0xf4211eba, 0xf0000000, 0x80788478, - 0xf4211eba, 0xf0000000, - 0x80788478, 0xf4211efa, + 0xf4211efa, 0xf0000000, + 0x80788478, 0xf4211c3a, 0xf0000000, 0x80788478, - 0xf4211c3a, 0xf0000000, - 0x80788478, 0xf4211c7a, + 0xf4211c7a, 0xf0000000, + 0x80788478, 0xf4211e7a, 0xf0000000, 0x80788478, - 0xf4211e7a, 0xf0000000, - 0x80788478, 0xf4211cfa, + 0xf4211cfa, 0xf0000000, + 0x80788478, 0xf4211bba, 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef814, 0xf4211bba, - 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef815, - 0xbef2036d, 0x876dff72, - 0x0000ffff, 0xbefc036f, - 0xbefe037a, 0xbeff037b, - 0x876f71ff, 0x000003ff, - 0xb9ef4803, 0xb9f9f816, - 0x876f71ff, 0xfffff800, - 0x906f8b6f, 0xb9efa2c3, - 0xb9f3f801, 0x876fff72, - 0xfc000000, 0x906f9a6f, - 0x8f6f906f, 0xbef30380, + 0xb9eef815, 0xbef2036d, + 0x876dff72, 0x0000ffff, + 0xbefc036f, 0xbefe037a, + 0xbeff037b, 0x876f71ff, + 0x000003ff, 0xb9ef4803, + 0xb9f9f816, 0x876f71ff, + 0xfffff800, 0x906f8b6f, + 0xb9efa2c3, 0xb9f3f801, + 0x876fff72, 0xfc000000, + 0x906f9a6f, 0x8f6f906f, + 0xbef30380, 0x88736f73, + 0x876fff72, 0x02000000, + 0x906f996f, 0x8f6f8f6f, 0x88736f73, 0x876fff72, - 0x02000000, 0x906f996f, - 0x8f6f8f6f, 0x88736f73, - 0x876fff72, 0x01000000, - 0x906f986f, 0x8f6f996f, - 0x88736f73, 0x876fff70, - 0x00800000, 0x906f976f, - 0xb9f3f807, 0x87fe7e7e, - 0x87ea6a6a, 0xb9f0f802, - 0xbf8a0000, 0xbe80226c, - 0xbf810000, 0xbf9f0000, + 0x01000000, 0x906f986f, + 0x8f6f996f, 0x88736f73, + 0x876fff70, 0x00800000, + 0x906f976f, 0xb9f3f807, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9f0f802, 0xbf8a0000, + 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index cdaa523ce6be..4433bda2ce25 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -758,6 +758,7 @@ L_RESTORE_V0: buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 + s_waitcnt vmcnt(0) /* restore SGPRs */ //will be 2+8+16*6 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1d3cd5c50d5f..4a0ef9268918 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1664,6 +1664,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, } mutex_unlock(&p->mutex); + dma_buf_put(dmabuf); args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); @@ -1673,6 +1674,7 @@ err_free: amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); err_unlock: mutex_unlock(&p->mutex); + dma_buf_put(dmabuf); return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c index 511712c2e382..673d5e34f213 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c @@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file) return single_open(file, show, NULL); } +static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data) +{ + seq_printf(m, "echo gpu_id > hang_hws\n"); + return 0; +} static ssize_t kfd_debugfs_hang_hws_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) @@ -94,7 +99,7 @@ void kfd_debugfs_init(void) debugfs_create_file("rls", S_IFREG | 0444, debugfs_root, kfd_debugfs_rls_by_device, &kfd_debugfs_fops); debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root, - NULL, &kfd_debugfs_hang_hws_fops); + kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops); } void kfd_debugfs_fini(void) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0dc1084b5e82..ad9483b9eea3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1112,9 +1112,9 @@ kfd_gtt_out: return 0; kfd_gtt_no_free_chunk: - pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); + pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); mutex_unlock(&kfd->gtt_sa_lock); - kfree(mem_obj); + kfree(*mem_obj); return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index a2ed9c257cb0..ab69898c9cb7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1011,6 +1011,9 @@ static int set_sched_resources(struct device_queue_manager *dqm) static int initialize_cpsch(struct device_queue_manager *dqm) { + uint64_t num_sdma_queues; + uint64_t num_xgmi_sdma_queues; + pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); mutex_init(&dqm->lock_hidden); @@ -1019,8 +1022,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm) dqm->sdma_queue_count = 0; dqm->xgmi_sdma_queue_count = 0; dqm->active_runlist = false; - dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); - dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); + + num_sdma_queues = get_num_sdma_queues(dqm); + if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap)) + dqm->sdma_bitmap = ULLONG_MAX; + else + dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1); + + num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm); + if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap)) + dqm->xgmi_sdma_bitmap = ULLONG_MAX; + else + dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1); INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); @@ -1075,6 +1088,8 @@ static int stop_cpsch(struct device_queue_manager *dqm) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); dqm_unlock(dqm); + pm_release_ib(&dqm->packets); + kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); pm_uninit(&dqm->packets); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c index 72e4d61ac752..ad0593342333 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c @@ -58,8 +58,9 @@ static int update_qpd_v10(struct device_queue_manager *dqm, /* check if sh_mem_config register already configured */ if (qpd->sh_mem_config == 0) { qpd->sh_mem_config = - SH_MEM_ALIGNMENT_MODE_UNALIGNED << - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; + (SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT); #if 0 /* TODO: * This shouldn't be an issue with Navi10. Verify. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 5f35df23fb18..9266c8e76be7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -20,6 +20,10 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/kconfig.h> + +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2) + #include <linux/printk.h> #include <linux/device.h> #include <linux/slab.h> @@ -358,3 +362,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) return 0; } + +#endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h index dd23d9fdf6a8..afd420b01a0c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h @@ -23,7 +23,9 @@ #ifndef __KFD_IOMMU_H__ #define __KFD_IOMMU_H__ -#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2) +#include <linux/kconfig.h> + +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2) #define KFD_SUPPORT_IOMMU_V2 @@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd) } static inline int kfd_iommu_device_init(struct kfd_dev *kfd) { +#if IS_MODULE(CONFIG_AMD_IOMMU_V2) + WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD"); +#endif return 0; } @@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) return 0; } -#endif /* defined(CONFIG_AMD_IOMMU_V2) */ +#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */ #endif /* __KFD_IOMMU_H__ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 40e3fc0c6942..aa0a617b8d44 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -312,6 +312,7 @@ struct kfd_process *kfd_create_process(struct file *filep) (int)process->lead_thread->pid); if (ret) { pr_warn("Creating procfs pid directory failed"); + kobject_put(process->kobj); goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 7551761f2aa9..a49e2ab071d6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -612,8 +612,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, ret = kobject_init_and_add(dev->kobj_node, &node_type, sys_props.kobj_nodes, "%d", id); - if (ret < 0) + if (ret < 0) { + kobject_put(dev->kobj_node); return ret; + } dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node); if (!dev->kobj_mem) @@ -660,8 +662,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(mem->kobj, &mem_type, dev->kobj_mem, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(mem->kobj); return ret; + } mem->attr.name = "properties"; mem->attr.mode = KFD_SYSFS_FILE_MODE; @@ -679,8 +683,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(cache->kobj, &cache_type, dev->kobj_cache, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(cache->kobj); return ret; + } cache->attr.name = "properties"; cache->attr.mode = KFD_SYSFS_FILE_MODE; @@ -698,8 +704,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(iolink->kobj, &iolink_type, dev->kobj_iolink, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(iolink->kobj); return ret; + } iolink->attr.name = "properties"; iolink->attr.mode = KFD_SYSFS_FILE_MODE; @@ -779,8 +787,10 @@ static int kfd_topology_update_sysfs(void) ret = kobject_init_and_add(sys_props.kobj_topology, &sysprops_type, &kfd_device->kobj, "topology"); - if (ret < 0) + if (ret < 0) { + kobject_put(sys_props.kobj_topology); return ret; + } sys_props.kobj_nodes = kobject_create_and_add("nodes", sys_props.kobj_topology); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 360c87ba4595..82f1d5434b82 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -928,8 +928,13 @@ static int dm_late_init(void *handle) struct dmcu_iram_parameters params; unsigned int linear_lut[16]; int i; - struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; - bool ret = false; + struct dmcu *dmcu = NULL; + bool ret; + + if (!adev->dm.fw_dmcu) + return detect_mst_link_for_all_connectors(adev->ddev); + + dmcu = adev->dm.dc->res_pool->dmcu; for (i = 0; i < 16; i++) linear_lut[i] = 0xFFFF * i / 15; @@ -945,13 +950,10 @@ static int dm_late_init(void *handle) */ params.min_abm_backlight = 0x28F; - /* todo will enable for navi10 */ - if (adev->asic_type <= CHIP_RAVEN) { - ret = dmcu_load_iram(dmcu, params); + ret = dmcu_load_iram(dmcu, params); - if (!ret) - return -EINVAL; - } + if (!ret) + return -EINVAL; return detect_mst_link_for_all_connectors(adev->ddev); } @@ -1091,8 +1093,8 @@ static void emulated_link_detect(struct dc_link *link) link->type = dc_connection_none; prev_sink = link->local_sink; - if (prev_sink != NULL) - dc_sink_retain(prev_sink); + if (prev_sink) + dc_sink_release(prev_sink); switch (link->connector_signal) { case SIGNAL_TYPE_HDMI_TYPE_A: { @@ -1415,24 +1417,30 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) * TODO: check if we still need the S3 mode update workaround. * If yes, put it here. */ - if (aconnector->dc_sink) + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); + dc_sink_release(aconnector->dc_sink); + } aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { aconnector->edid = NULL; - drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); + if (aconnector->dc_link->aux_mode) { + drm_dp_cec_unset_edid( + &aconnector->dm_dp_aux.aux); + } } else { aconnector->edid = - (struct edid *) sink->dc_edid.raw_edid; - + (struct edid *)sink->dc_edid.raw_edid; drm_connector_update_edid_property(connector, - aconnector->edid); - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + aconnector->edid); + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, + aconnector->edid); } + amdgpu_dm_update_freesync_caps(connector, aconnector->edid); } else { @@ -2035,12 +2043,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) &dm_atomic_state_funcs); r = amdgpu_display_modeset_create_props(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } r = amdgpu_dm_audio_init(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } return 0; } @@ -2056,6 +2070,8 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) #if defined(CONFIG_ACPI) struct amdgpu_dm_backlight_caps caps; + memset(&caps, 0, sizeof(caps)); + if (dm->backlight_caps.caps_valid) return; @@ -2616,6 +2632,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, scaling_info->src_rect.x = state->src_x >> 16; scaling_info->src_rect.y = state->src_y >> 16; + /* + * For reasons we don't (yet) fully understand a non-zero + * src_y coordinate into an NV12 buffer can cause a + * system hang. To avoid hangs (and maybe be overly cautious) + * let's reject both non-zero src_x and src_y. + * + * We currently know of only one use-case to reproduce a + * scenario with non-zero src_x and src_y for NV12, which + * is to gesture the YouTube Android app into full screen + * on ChromeOS. + */ + if (state->fb && + state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || + scaling_info->src_rect.y != 0)) + return -EINVAL; + scaling_info->src_rect.width = state->src_w >> 16; if (scaling_info->src_rect.width == 0) return -EINVAL; @@ -2698,7 +2731,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, const union dc_tiling_info *tiling_info, const uint64_t info, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -2710,6 +2744,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, memset(&input, 0, sizeof(input)); memset(&output, 0, sizeof(output)); + if (force_disable_dcc) + return 0; + if (!offset) return 0; @@ -2759,7 +2796,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, union dc_tiling_info *tiling_info, struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = &afb->base; int ret; @@ -2869,7 +2907,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, ret = fill_plane_dcc_attributes(adev, afb, format, rotation, plane_size, tiling_info, - tiling_flags, dcc, address); + tiling_flags, dcc, address, + force_disable_dcc); if (ret) return ret; } @@ -2961,7 +3000,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_plane_state *plane_state, const uint64_t tiling_flags, struct dc_plane_info *plane_info, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = @@ -3040,7 +3080,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->rotation, tiling_flags, &plane_info->tiling_info, &plane_info->plane_size, - &plane_info->dcc, address); + &plane_info->dcc, address, + force_disable_dcc); if (ret) return ret; @@ -3063,6 +3104,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct dc_plane_info plane_info; uint64_t tiling_flags; int ret; + bool force_disable_dcc = false; ret = fill_dc_scaling_info(plane_state, &scaling_info); if (ret) @@ -3077,9 +3119,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, if (ret) return ret; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, &plane_info, - &dc_plane_state->address); + &dc_plane_state->address, + force_disable_dcc); if (ret) return ret; @@ -3929,6 +3973,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) struct amdgpu_device *adev = connector->dev->dev_private; struct amdgpu_display_manager *dm = &adev->dm; + /* + * Call only if mst_mgr was iniitalized before since it's not done + * for all connector types. + */ + if (aconnector->mst_mgr.dev) + drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); + #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) @@ -4272,19 +4323,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc) { } -static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state) -{ - struct drm_device *dev = new_crtc_state->crtc->dev; - struct drm_plane *plane; - - drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) { - if (plane->type == DRM_PLANE_TYPE_CURSOR) - return true; - } - - return false; -} - static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) { struct drm_atomic_state *state = new_crtc_state->state; @@ -4364,19 +4402,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return ret; } + /* + * We require the primary plane to be enabled whenever the CRTC is, otherwise + * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other + * planes are disabled, which is not supported by the hardware. And there is legacy + * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. + */ + if (state->enable && + !(state->plane_mask & drm_plane_mask(crtc->primary))) + return -EINVAL; + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; - /* - * We want at least one hardware plane enabled to use - * the stream with a cursor enabled. - */ - if (state->enable && state->active && - does_crtc_have_active_cursor(state) && - dm_crtc_state->active_planes == 0) - return -EINVAL; - if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) return 0; @@ -4481,6 +4520,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, uint64_t tiling_flags; uint32_t domain; int r; + bool force_disable_dcc = false; dm_plane_state_old = to_dm_plane_state(plane->state); dm_plane_state_new = to_dm_plane_state(new_state); @@ -4539,11 +4579,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; fill_plane_buffer_attributes( adev, afb, plane_state->format, plane_state->rotation, tiling_flags, &plane_state->tiling_info, &plane_state->plane_size, &plane_state->dcc, - &plane_state->address); + &plane_state->address, + force_disable_dcc); } return 0; @@ -5347,10 +5389,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, int x, y; int xorigin = 0, yorigin = 0; - position->enable = false; - position->x = 0; - position->y = 0; - if (!crtc || !plane->state->fb) return 0; @@ -5402,7 +5440,7 @@ static void handle_cursor_update(struct drm_plane *plane, struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint64_t address = afb ? afb->address : 0; - struct dc_cursor_position position; + struct dc_cursor_position position = {0}; struct dc_cursor_attributes attributes; int ret; @@ -5767,7 +5805,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, &bundle->plane_infos[planes_count], - &bundle->flip_addrs[planes_count].address); + &bundle->flip_addrs[planes_count].address, + false); + + DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n", + new_plane_state->plane->index, + bundle->plane_infos[planes_count].dcc.enable); bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; @@ -6435,14 +6478,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector) ret = PTR_ERR_OR_ZERO(conn_state); if (ret) - goto err; + goto out; /* Attach crtc to drm_atomic_state*/ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); ret = PTR_ERR_OR_ZERO(crtc_state); if (ret) - goto err; + goto out; /* force a restore */ crtc_state->mode_changed = true; @@ -6452,17 +6495,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector) ret = PTR_ERR_OR_ZERO(plane_state); if (ret) - goto err; - + goto out; /* Call commit internally with the state we just constructed */ ret = drm_atomic_commit(state); - if (!ret) - return 0; -err: - DRM_ERROR("Restoring old state failed with %i\n", ret); +out: drm_atomic_state_put(state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); return ret; } @@ -6902,6 +6943,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; + struct amdgpu_crtc *new_acrtc; bool needs_reset; int ret = 0; @@ -6911,9 +6953,23 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state = to_dm_plane_state(new_plane_state); dm_old_plane_state = to_dm_plane_state(old_plane_state); - /*TODO Implement atomic check for cursor plane */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) + /*TODO Implement better atomic check for cursor plane */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (!enable || !new_plane_crtc || + drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + new_acrtc = to_amdgpu_crtc(new_plane_crtc); + + if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) || + (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) { + DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n", + new_plane_state->crtc_w, new_plane_state->crtc_h); + return -EINVAL; + } + return 0; + } needs_reset = should_reset_plane(state, plane, old_plane_state, new_plane_state); @@ -6946,8 +7002,7 @@ static int dm_update_plane_state(struct dc *dc, dm_old_plane_state->dc_state, dm_state->context)) { - ret = EINVAL; - return ret; + return -EINVAL; } @@ -7138,7 +7193,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, ret = fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, &plane_info, - &flip_addr.address); + &flip_addr.address, + false); if (ret) goto cleanup; @@ -7186,6 +7242,30 @@ cleanup: *out_type = update_type; return ret; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct amdgpu_dm_connector *aconnector = NULL; + int i; + for_each_new_connector_in_state(state, connector, conn_state, i) { + if (conn_state->crtc != crtc) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->port || !aconnector->mst_port) + aconnector = NULL; + else + break; + } + + if (!aconnector) + return 0; + + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); +} +#endif /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. @@ -7239,6 +7319,40 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + /* Check connector changes */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + + /* Skip connectors that are disabled or part of modeset already. */ + if (!old_con_state->crtc && !new_con_state->crtc) + continue; + + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); + if (IS_ERR(new_crtc_state)) { + ret = PTR_ERR(new_crtc_state); + goto fail; + } + + if (dm_old_con_state->abm_level != + dm_new_con_state->abm_level) + new_crtc_state->connectors_changed = true; + } + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc_resource_is_dsc_encoding_supported(dc)) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { + ret = add_affected_mst_dsc_crtcs(state, crtc); + if (ret) + goto fail; + } + } + } +#endif for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed && @@ -7422,20 +7536,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * the same resource. If we have a new DC context as part of * the DM atomic state from validation we need to free it and * retain the existing one instead. + * + * Furthermore, since the DM atomic state only contains the DC + * context and can safely be annulled, we can free the state + * and clear the associated private object now to free + * some memory and avoid a possible use-after-free later. */ - struct dm_atomic_state *new_dm_state, *old_dm_state; - new_dm_state = dm_atomic_get_new_state(state); - old_dm_state = dm_atomic_get_old_state(state); + for (i = 0; i < state->num_private_objs; i++) { + struct drm_private_obj *obj = state->private_objs[i].ptr; - if (new_dm_state && old_dm_state) { - if (new_dm_state->context) - dc_release_state(new_dm_state->context); + if (obj->funcs == adev->dm.atomic_obj.funcs) { + int j = state->num_private_objs-1; - new_dm_state->context = old_dm_state->context; + dm_atomic_destroy_state(obj, + state->private_objs[i].state); - if (old_dm_state->context) - dc_retain_state(old_dm_state->context); + /* If i is not at the end of the array then the + * last element needs to be moved to where i was + * before the array can safely be truncated. + */ + if (i != j) + state->private_objs[i] = + state->private_objs[j]; + + state->private_objs[j].ptr = NULL; + state->private_objs[j].state = NULL; + state->private_objs[j].old_state = NULL; + state->private_objs[j].new_state = NULL; + + state->num_private_objs = j; + break; + } } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b43bb7f90e4e..2233d293a707 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func, res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, NULL); + dc_gamma_release(&gamma); + return res ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index a549c7c717dd..f0b001b3af57 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -113,7 +113,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, mutex_lock(&adev->dm.dc_lock); /* Enable CRTC CRC generation if necessary. */ - if (dm_is_crc_source_crtc(source)) { + if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, enable, enable)) { ret = -EINVAL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 28a6c7b2ef4b..2f858507ca70 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -101,7 +101,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); - if (payload.write) + if (payload.write && result >= 0) result = msg->size; if (result < 0) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 785322cd4c6c..7241d4c20778 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks( &pp_clk_info); else if (adev->smu.funcs) ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); + else + return false; if (ret) return false; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 221e0f56389f..823843cd2613 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info( /* Sort voltage table from low to high*/ if (result == BP_RESULT_OK) { - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index dff65c0fe82f..7873abea4112 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j-1].max_supported_clk ) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 5815983caaf8..0d2e13627c64 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3( cntl->enable_dp_audio); params.ucLaneNum = (uint8_t)(cntl->lanes_number); + switch (cntl->color_depth) { + case COLOR_DEPTH_888: + params.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case COLOR_DEPTH_101010: + params.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case COLOR_DEPTH_121212: + params.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case COLOR_DEPTH_161616: + params.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + default: + break; + } + if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params)) result = BP_RESULT_OK; @@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4( cntl->enable_dp_audio)); params.ucLaneNum = (uint8_t)(cntl->lanes_number); + switch (cntl->color_depth) { + case COLOR_DEPTH_888: + params.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case COLOR_DEPTH_101010: + params.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case COLOR_DEPTH_121212: + params.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case COLOR_DEPTH_161616: + params.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + default: + break; + } + if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params)) result = BP_RESULT_OK; @@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5( * driver choose program it itself, i.e. here we program it * to 888 by default. */ + if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) + switch (bp_params->color_depth) { + case TRANSMITTER_COLOR_DEPTH_30: + /* yes this is correct, the atom define is wrong */ + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP; + break; + case TRANSMITTER_COLOR_DEPTH_36: + /* yes this is correct, the atom define is wrong */ + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; + break; + default: + break; + } if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk)) result = BP_RESULT_OK; @@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6( * driver choose program it itself, i.e. here we pass required * target rate that includes deep color. */ + if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) + switch (bp_params->color_depth) { + case TRANSMITTER_COLOR_DEPTH_30: + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6; + break; + case TRANSMITTER_COLOR_DEPTH_36: + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6; + break; + case TRANSMITTER_COLOR_DEPTH_48: + clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; + break; + default: + break; + } if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk)) result = BP_RESULT_OK; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 47f529ce280a..2718396083ee 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru return disp_clk_threshold; } -static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks) +static void ramp_up_dispclk_with_dpp( + struct clk_mgr_internal *clk_mgr, + struct dc *dc, + struct dc_clocks *new_clocks, + bool safe_to_lower) { int i; int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks); bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + /* this function is to change dispclk, dppclk and dprefclk according to + * bandwidth requirement. Its call stack is rv1_update_clocks --> + * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth + * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw, + * prepare_bandwidth will be called first to allow enough clock, + * watermark for change, after end of dcn hw change, optimize_bandwidth + * is executed to lower clock to save power for new dcn hw settings. + * + * below is sequence of commit_planes_for_stream: + * + * step 1: prepare_bandwidth - raise clock to have enough bandwidth + * step 2: lock_doublebuffer_enable + * step 3: pipe_control_lock(true) - make dchubp register change will + * not take effect right way + * step 4: apply_ctx_for_surface - program dchubp + * step 5: pipe_control_lock(false) - dchubp register change take effect + * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream + * for full_date, optimize clock to save power + * + * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be + * changed for new dchubp configuration. but real dcn hub dchubps are + * still running with old configuration until end of step 5. this need + * clocks settings at step 1 should not less than that before step 1. + * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower + * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || + * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) + * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz + * + * the second condition is based on new dchubp configuration. dppclk + * for new dchubp may be different from dppclk before step 1. + * for example, before step 1, dchubps are as below: + * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979) + * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080) + * for dppclk for pipe0 need dppclk = dispclk + * + * new dchubp pipe split configuration: + * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080) + * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080) + * dppclk only needs dppclk = dispclk /2. + * + * dispclk, dppclk are not lock by otg master lock. they take effect + * after step 1. during this transition, dispclk are the same, but + * dppclk is changed to half of previous clock for old dchubp + * configuration between step 1 and step 6. This may cause p-state + * warning intermittently. + * + * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we + * need make sure dppclk are not changed to less between step 1 and 6. + * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz, + * new display clock is raised, but we do not know ratio of + * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz, + * new_clocks->dispclk_khz /2 does not guarantee equal or higher than + * old dppclk. we could ignore power saving different between + * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6. + * as long as safe_to_lower = false, set dpclk = dispclk to simplify + * condition check. + * todo: review this change for other asic. + **/ + if (!safe_to_lower) + request_dpp_div = false; + /* set disp clk to dpp clk threshold */ clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold); @@ -206,7 +271,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, /* program dispclk on = as a w/a for sleep resume clock ramping issues */ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) { - ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks); + ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; send_request_to_lower = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index dd92f9c295b4..9f301f8575a5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -97,8 +97,17 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, new_clocks->dppclk_khz = 100000; } - if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { - if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + /* + * Temporally ignore thew 0 cases for disp and dpp clks. + * We may have a new feature that requires 0 clks in the future. + */ + if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) { + new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz; + new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz; + } + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) { + if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz) dpp_clock_lowered = true; clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; update_dppclk = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4704aac336c2..092db590087c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, int i = 0; bool ret = false; + stream->adjust = *adjust; + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -905,15 +907,11 @@ static void program_timing_sync( /* set first pipe with plane as master */ for (j = 0; j < group_size; j++) { - struct pipe_ctx *temp; - if (pipe_set[j]->plane_state) { if (j == 0) break; - temp = pipe_set[0]; - pipe_set[0] = pipe_set[j]; - pipe_set[j] = temp; + swap(pipe_set[0], pipe_set[j]); break; } } @@ -1180,6 +1178,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } +static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) +{ + int i; + struct pipe_ctx *pipe; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state) + continue; + + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (pipe->plane_state->status.is_flip_pending) + return true; + } + return false; +} + bool dc_post_update_surfaces_to_stream(struct dc *dc) { int i; @@ -1190,6 +1208,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) post_surface_trace(dc); + if (is_flip_pending_in_pipes(dc, context)) + return true; + for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { @@ -1940,7 +1961,8 @@ static void commit_planes_do_stream_update(struct dc *dc, if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - dc->hwss.optimize_bandwidth(dc, dc->current_state); + dc->optimized_required = true; + } else { if (!dc->optimize_seamless_boot) dc->hwss.prepare_bandwidth(dc, dc->current_state); @@ -2152,7 +2174,7 @@ void dc_commit_updates_for_stream(struct dc *dc, enum surface_update_type update_type; struct dc_state *context; struct dc_context *dc_ctx = dc->ctx; - int i; + int i, j; stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -2190,10 +2212,29 @@ void dc_commit_updates_for_stream(struct dc *dc, copy_surface_update_to_plane(surface, &srf_updates[i]); + if (update_type >= UPDATE_TYPE_MED) { + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = + &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->plane_state != surface) + continue; + + resource_build_scaling_params(pipe_ctx); + } + } } copy_stream_update_to_stream(dc, context, stream, stream_update); + if (update_type > UPDATE_TYPE_FAST) { + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { + DC_ERROR("Mode validation failed for stream update!\n"); + dc_release_state(context); + return; + } + } + commit_planes_for_stream( dc, srf_updates, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 3aedc724241e..40041c61a100 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1303,6 +1303,11 @@ static bool construct( goto ddc_create_fail; } + if (!link->ddc->ddc_pin) { + DC_ERROR("Failed to get I2C info for connector!\n"); + goto ddc_create_fail; + } + link->ddc_hw_inst = dal_ddc_get_line( dal_ddc_service_get_ddc_pin(link->ddc)); @@ -1733,8 +1738,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1752,8 +1756,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1765,8 +1768,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1786,8 +1788,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1805,8 +1806,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1818,8 +1818,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1837,8 +1836,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1849,8 +1847,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1861,10 +1858,14 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); } static void write_i2c_default_retimer_setting( @@ -1889,8 +1890,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1901,8 +1901,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; @@ -1913,8 +1912,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1925,8 +1923,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; @@ -1937,8 +1934,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1949,8 +1945,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; if (is_vga_mode) { @@ -1965,8 +1960,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1977,8 +1971,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1989,9 +1982,13 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); } static void write_i2c_redriver_setting( @@ -2020,8 +2017,7 @@ static void write_i2c_redriver_setting( slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + DC_LOG_DEBUG("Set redriver failed"); } static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) @@ -2277,7 +2273,7 @@ enum dc_status dc_link_validate_mode_timing( /* A hack to avoid failing any modes for EDID override feature on * topology change such as lower quality cable for DP or different dongle */ - if (link->remote_sinks[0]) + if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) return DC_OK; /* Passive Dongle */ @@ -2768,15 +2764,6 @@ void core_link_enable_stream( CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, COLOR_DEPTH_UNDEFINED); - /* This second call is needed to reconfigure the DIG - * as a workaround for the incorrect value being applied - * from transmitter control. - */ - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) - stream->link->link_enc->funcs->setup( - stream->link->link_enc, - pipe_ctx->stream->signal); - #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || @@ -2927,8 +2914,12 @@ uint32_t dc_bandwidth_in_kbps_from_timing( #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (timing->flags.DSC) { - kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); - kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); + struct fixed31_32 link_bw_kbps; + + link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz); + link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160); + link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel); + kbps = dc_fixpt_ceil(link_bw_kbps); return kbps; } #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 51991bf26a93..4c90d68db230 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -126,22 +126,16 @@ struct aux_payloads { struct vector payloads; }; -static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count) +static bool dal_ddc_i2c_payloads_create( + struct dc_context *ctx, + struct i2c_payloads *payloads, + uint32_t count) { - struct i2c_payloads *payloads; - - payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL); - - if (!payloads) - return NULL; - if (dal_vector_construct( &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) - return payloads; - - kfree(payloads); - return NULL; + return true; + return false; } static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p) @@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) return p->payloads.count; } -static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) +static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p) { - if (!p || !*p) + if (!p) return; - dal_vector_destruct(&(*p)->payloads); - kfree(*p); - *p = NULL; + dal_vector_destruct(&p->payloads); } #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) @@ -521,9 +513,13 @@ bool dal_ddc_service_query_ddc_data( uint32_t payloads_num = write_payloads + read_payloads; + if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE) return false; + if (!payloads_num) + return false; + /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { @@ -556,23 +552,25 @@ bool dal_ddc_service_query_ddc_data( ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); } else { - struct i2c_payloads *payloads = - dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); + struct i2c_command command = {0}; + struct i2c_payloads payloads; - struct i2c_command command = { - .payloads = dal_ddc_i2c_payloads_get(payloads), - .number_of_payloads = 0, - .engine = DDC_I2C_COMMAND_ENGINE, - .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) + return false; + + command.payloads = dal_ddc_i2c_payloads_get(&payloads); + command.number_of_payloads = 0; + command.engine = DDC_I2C_COMMAND_ENGINE; + command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; dal_ddc_i2c_payloads_add( - payloads, address, write_size, write_buf, true); + &payloads, address, write_size, write_buf, true); dal_ddc_i2c_payloads_add( - payloads, address, read_size, read_buf, false); + &payloads, address, read_size, read_buf, false); command.number_of_payloads = - dal_ddc_i2c_payloads_get_count(payloads); + dal_ddc_i2c_payloads_get_count(&payloads); ret = dm_helpers_submit_i2c( ddc->ctx, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 6dd2334dd5e6..c18f39271b03 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1914,6 +1914,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting initial_link_setting; uint32_t link_bw; + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + /* search for the minimum link setting that: * 1. is supported according to the link training result * 2. could support the b/w requested by the timing @@ -3378,7 +3381,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) if (edp_config_set.bits.PANEL_MODE_EDP != panel_mode_edp) { - enum ddc_result result = DDC_RESULT_UNKNOWN; + enum dc_status result = DC_ERROR_UNEXPECTED; edp_config_set.bits.PANEL_MODE_EDP = panel_mode_edp; @@ -3388,7 +3391,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) &edp_config_set.raw, sizeof(edp_config_set.raw)); - ASSERT(result == DDC_RESULT_SUCESSFULL); + ASSERT(result == DC_OK); } } DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index a519dbc5ecb6..5641a9477d29 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -400,6 +400,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -496,11 +497,15 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; uint8_t dsc_packed_pps[128]; + memset(&dsc_cfg, 0, sizeof(dsc_cfg)); + memset(dsc_packed_pps, 0, 128); + /* Enable DSC hw block */ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; DC_LOG_DSC(" "); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index f787a6b94781..eca67d5d5b10 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -871,6 +871,20 @@ static bool dce110_program_pix_clk( bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC = pll_settings->use_external_clk; + switch (pix_clk_params->color_depth) { + case COLOR_DEPTH_101010: + bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30; + break; + case COLOR_DEPTH_121212: + bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36; + break; + case COLOR_DEPTH_161616: + bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48; + break; + default: + break; + } + if (clk_src->bios->funcs->set_pixel_clock( clk_src->bios, &bp_pc_params) != BP_RESULT_OK) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 6ed922a3c1cd..c25840a774f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -563,6 +563,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute( cntl.enable_dp_audio = enable_audio; cntl.pixel_clock = actual_pix_clk_khz; cntl.lanes_number = LANE_COUNT_FOUR; + cntl.color_depth = crtc_timing->display_color_depth; if (enc110->base.bp->funcs->encoder_control( enc110->base.bp, &cntl) != BP_RESULT_OK) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index ab63d0d0304c..6fd57cfb112f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -429,12 +429,12 @@ static void set_clamp( clamp_max = 0x3FC0; break; case COLOR_DEPTH_101010: - /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */ - clamp_max = 0x3FFC; + /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */ + clamp_max = 0x3FF0; break; case COLOR_DEPTH_121212: - /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */ - clamp_max = 0x3FFF; + /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */ + clamp_max = 0x3FFC; break; default: clamp_max = 0x3FC0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1599bb971111..e860ae05feda 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1151,6 +1151,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont bool video_large = false; bool desktop_large = false; bool dcc_disabled = false; + bool mpo_enabled = false; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) @@ -1159,6 +1160,9 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont if (context->stream_status[i].plane_count > 2) return DC_FAIL_UNSUPPORTED_1; + if (context->stream_status[i].plane_count > 1) + mpo_enabled = true; + for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; @@ -1182,6 +1186,10 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont } } + /* Disable MPO in multi-display configurations. */ + if (context->stream_count > 1 && mpo_enabled) + return DC_FAIL_UNSUPPORTED_1; + /* * Workaround: On DCN10 there is UMC issue that causes underflow when * playing 4k video on 4k desktop with video downscaled and single channel diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 9aa258f3550b..6718777c826d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -121,35 +121,35 @@ void enc1_update_generic_info_packet( switch (packet_index) { case 0: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC0_FRAME_UPDATE, 1); + AFMT_GENERIC0_IMMEDIATE_UPDATE, 1); break; case 1: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC1_FRAME_UPDATE, 1); + AFMT_GENERIC1_IMMEDIATE_UPDATE, 1); break; case 2: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC2_FRAME_UPDATE, 1); + AFMT_GENERIC2_IMMEDIATE_UPDATE, 1); break; case 3: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC3_FRAME_UPDATE, 1); + AFMT_GENERIC3_IMMEDIATE_UPDATE, 1); break; case 4: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC4_FRAME_UPDATE, 1); + AFMT_GENERIC4_IMMEDIATE_UPDATE, 1); break; case 5: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC5_FRAME_UPDATE, 1); + AFMT_GENERIC5_IMMEDIATE_UPDATE, 1); break; case 6: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC6_FRAME_UPDATE, 1); + AFMT_GENERIC6_IMMEDIATE_UPDATE, 1); break; case 7: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC7_FRAME_UPDATE, 1); + AFMT_GENERIC7_IMMEDIATE_UPDATE, 1); break; default: break; @@ -898,10 +898,10 @@ void enc1_stream_encoder_dp_blank( */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); /* Larger delay to wait until VBLANK - use max retry of - * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode + + * 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode + * a little more because we may not trust delay accuracy. */ - max_retries = DP_BLANK_MAX_RETRY * 250; + max_retries = DP_BLANK_MAX_RETRY * 501; /* disable DP stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index a512cbea00d1..b9656614950e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -275,7 +275,14 @@ struct dcn10_stream_enc_registers { SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\ @@ -339,7 +346,14 @@ struct dcn10_stream_enc_registers { type AFMT_GENERIC2_FRAME_UPDATE;\ type AFMT_GENERIC3_FRAME_UPDATE;\ type AFMT_GENERIC4_FRAME_UPDATE;\ + type AFMT_GENERIC0_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC1_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC2_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC3_IMMEDIATE_UPDATE;\ type AFMT_GENERIC4_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC5_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC6_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC7_IMMEDIATE_UPDATE;\ type AFMT_GENERIC5_FRAME_UPDATE;\ type AFMT_GENERIC6_FRAME_UPDATE;\ type AFMT_GENERIC7_FRAME_UPDATE;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 16476ed25536..206436632275 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -119,32 +119,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg, void dccg2_init(struct dccg *dccg) { - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - - // Fallthrough intentional to program all available dpp_dto's - switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { - case 6: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); - /* Fall through */ - case 5: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); - /* Fall through */ - case 4: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); - /* Fall through */ - case 3: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); - /* Fall through */ - case 2: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); - /* Fall through */ - case 1: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); - break; - default: - ASSERT(false); - break; - } } static const struct dccg_funcs dccg2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 1b419407af94..5c45c39662fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -207,6 +207,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str struct dsc_reg_values dsc_reg_vals; struct dsc_optc_config dsc_optc_cfg; + memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); + memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); + DC_LOG_DSC("Getting packed DSC PPS for DSC Config:"); dsc_config_log(dsc, dsc_cfg); DC_LOG_DSC("DSC Picture Parameter Set (PPS):"); @@ -348,6 +351,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; + dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. @@ -510,7 +514,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6; reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; - reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf; } static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index e933f6a369f9..083c42e521f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2015,7 +2015,8 @@ static void dcn20_fpga_init_hw(struct dc *dc) REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); + if (REG(REFCLK_CNTL)) + REG_WRITE(REFCLK_CNTL, 0); // diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index c13dce760098..08062de3fbeb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -340,8 +340,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 8.6, - .sr_enter_plus_exit_time_us = 10.9, + .sr_exit_time_us = 11.6, + .sr_enter_plus_exit_time_us = 13.9, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -2275,6 +2275,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -2845,7 +2846,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index a00af513aa2b..c8f77bd0ce8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -73,32 +73,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline( struct _vcs_dpi_display_dlg_regs_st *dlg_attr) { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); - uint32_t cur_value; + uint32_t refcyc_per_vm_group_vblank; + uint32_t refcyc_per_vm_req_vblank; + uint32_t refcyc_per_vm_group_flip; + uint32_t refcyc_per_vm_req_flip; + const uint32_t uninitialized_hw_default = 0; - REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_group_vblank) + REG_GET(VBLANK_PARAMETERS_5, + REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank); + + if (refcyc_per_vm_group_vblank == uninitialized_hw_default || + refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank) REG_SET(VBLANK_PARAMETERS_5, 0, REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank); REG_GET(VBLANK_PARAMETERS_6, - REFCYC_PER_VM_REQ_VBLANK, - &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_req_vblank) + REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank); + + if (refcyc_per_vm_req_vblank == uninitialized_hw_default || + refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank) REG_SET(VBLANK_PARAMETERS_6, 0, REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank); - REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_group_flip) + REG_GET(FLIP_PARAMETERS_3, + REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip); + + if (refcyc_per_vm_group_flip == uninitialized_hw_default || + refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip) REG_SET(FLIP_PARAMETERS_3, 0, REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip); - REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_req_flip) + REG_GET(FLIP_PARAMETERS_4, + REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip); + + if (refcyc_per_vm_req_flip == uninitialized_hw_default || + refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip) REG_SET(FLIP_PARAMETERS_4, 0, REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip); REG_SET(FLIP_PARAMETERS_5, 0, REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c); + REG_SET(FLIP_PARAMETERS_6, 0, REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 161bf7caf3ae..11a4c4029a90 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -247,7 +247,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .dram_channel_width_bytes = 4, .fabric_datapath_to_dcn_data_return_bytes = 32, .dcn_downspread_percent = 0.5, - .downspread_percent = 0.5, + .downspread_percent = 0.38, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, @@ -826,6 +826,8 @@ enum dcn20_clk_src_array_id { DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL1, DCN20_CLK_SRC_PLL2, + DCN20_CLK_SRC_PLL3, + DCN20_CLK_SRC_PLL4, DCN20_CLK_SRC_TOTAL_DCN21 }; @@ -1498,6 +1500,14 @@ static bool construct( dcn21_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 6c6c486b774a..945d23ca3677 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -3435,6 +3435,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DCCEnabledInAnyPlane = true; } } + mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly; for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->FabricAndDRAMBandwidthPerState[i] = dml_min( mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 0fafd693ffb4..5b5ed1be19ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -3467,6 +3467,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.DCCEnabledInAnyPlane = true; } } + mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly; for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->FabricAndDRAMBandwidthPerState[i] = dml_min( mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c index f8f85490e77e..8a6fb58d0199 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c @@ -63,13 +63,13 @@ enum gpio_result dal_gpio_open_ex( enum gpio_mode mode) { if (gpio->pin) { - ASSERT_CRITICAL(false); + BREAK_TO_DEBUGGER(); return GPIO_RESULT_ALREADY_OPENED; } // No action if allocation failed during gpio construct if (!gpio->hw_container.ddc) { - ASSERT_CRITICAL(false); + BREAK_TO_DEBUGGER(); return GPIO_RESULT_NON_SPECIFIC_ERROR; } gpio->mode = mode; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index 1ddb1c6fa149..75ecfdc5d5cd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -36,6 +36,7 @@ struct dsc_config { uint32_t pic_height; enum dc_pixel_encoding pixel_encoding; enum dc_color_depth color_depth; /* Bits per component */ + bool is_odm; struct dc_dsc_config dc_dsc_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 5db29bf582d3..70370b282f91 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -299,8 +299,8 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = { pflip_int_entry(1), pflip_int_entry(2), pflip_int_entry(3), - [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(), - [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(), + pflip_int_entry(4), + pflip_int_entry(5), [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(), gpio_pad_int_entry(0), gpio_pad_int_entry(1), diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index cbe7818529bb..623455cd7520 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg @@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .funcs = &vblank_irq_info_funcs\ } +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ + .funcs = &vupdate_no_lock_irq_info_funcs\ + } + #define vblank_int_entry(reg_num)\ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ @@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { vupdate_int_entry(3), vupdate_int_entry(4), vupdate_int_entry(5), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + vupdate_no_lock_int_entry(2), + vupdate_no_lock_int_entry(3), + vupdate_no_lock_int_entry(4), + vupdate_no_lock_int_entry(5), vblank_int_entry(0), vblank_int_entry(1), vblank_int_entry(2), diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 30ec80ac6fc8..8da322582b68 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -57,7 +57,7 @@ * general debug capabilities * */ -#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB) +#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)) #define ASSERT_CRITICAL(expr) do { \ if (WARN_ON(!(expr))) { \ kgdb_breakpoint(); \ diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 89ef9f6860e5..16df2a485dd0 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); */ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) { + if (arg1.value == 0) + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; + return dc_fixpt_exp( dc_fixpt_mul( dc_fixpt_log(arg1), diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 2d8f14b69117..e042d8ce05b4 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -799,7 +799,7 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma, pow_buffer_ptr = -1; // reset back to no optimize ret = true; release: - kfree(coeff); + kvfree(coeff); return ret; } @@ -1576,7 +1576,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma struct pwl_float_data_ex *rgb = rgb_regamma; const struct hw_x_point *coord_x = coordinates_x; - build_coefficients(&coeff, true); + build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB); i = 0; while (i != hw_points_num + 1) { @@ -1862,7 +1862,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, kfree(rgb_regamma); rgb_regamma_alloc_fail: - kvfree(rgb_user); + kfree(rgb_user); rgb_user_alloc_fail: return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index d306cc711997..9eb3a0dcd1f2 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -320,12 +320,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level & profile_mode_mask) { hwmgr->saved_dpm_level = hwmgr->dpm_level; hwmgr->en_umd_pstate = true; - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); } } else { /* exit umd pstate, restore level, enable gfx cg*/ @@ -1425,7 +1425,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability) + if (!(hwmgr->not_vf && amdgpu_dpm) || + !hwmgr->hwmgr_func->get_asic_baco_capability) return 0; mutex_lock(&hwmgr->smu_lock); @@ -1459,7 +1460,8 @@ static int pp_set_asic_baco_state(void *handle, int state) if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state) + if (!(hwmgr->not_vf && amdgpu_dpm) || + !hwmgr->hwmgr_func->set_asic_baco_state) return 0; mutex_lock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index a066e9297777..b51a124e505a 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1541,12 +1541,12 @@ static int smu_enable_umd_pstate(void *handle, if (*level & profile_mode_mask) { smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; smu_dpm_ctx->enable_umd_pstate = true; - amdgpu_device_ip_set_clockgating_state(smu->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); amdgpu_device_ip_set_powergating_state(smu->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(smu->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); } } else { /* exit umd pstate, restore level, enable gfx cg*/ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 77c14671866c..719597c5d27d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -984,6 +984,32 @@ static int init_thermal_controller( struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) { + hwmgr->thermal_controller.ucType = + powerplay_table->sThermalController.ucType; + hwmgr->thermal_controller.ucI2cLine = + powerplay_table->sThermalController.ucI2cLine; + hwmgr->thermal_controller.ucI2cAddress = + powerplay_table->sThermalController.ucI2cAddress; + + hwmgr->thermal_controller.fanInfo.bNoFan = + (0 != (powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN)); + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + + hwmgr->thermal_controller.fanInfo.ulMinRPM + = powerplay_table->sThermalController.ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM + = powerplay_table->sThermalController.ucFanMaxRPM * 100UL; + + set_hw_cap(hwmgr, + ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController); + + hwmgr->thermal_controller.use_hw_fan_control = 1; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index fed3fc4bb57a..6322e57893db 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -209,8 +209,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->need_min_deep_sleep_dcefclk && - smu10_data->deep_sleep_dcefclk != clock) { + if (clock && smu10_data->deep_sleep_dcefclk != clock) { smu10_data->deep_sleep_dcefclk = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, @@ -223,8 +222,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->dcf_actual_hard_min_freq && - smu10_data->dcf_actual_hard_min_freq != clock) { + if (clock && smu10_data->dcf_actual_hard_min_freq != clock) { smu10_data->dcf_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinDcefclkByFreq, @@ -237,8 +235,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->f_actual_hard_min_freq && - smu10_data->f_actual_hard_min_freq != clock) { + if (clock && smu10_data->f_actual_hard_min_freq != clock) { smu10_data->f_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 203ce4b1028f..7cde55854b65 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1533,6 +1533,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to reset to default!", result = tmp_result); + tmp_result = smum_stop_smc(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop smc!", result = tmp_result); + tmp_result = smu7_force_switch_to_arbf0(hwmgr); PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to force to switch arbf0!", result = tmp_result); @@ -2864,7 +2868,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, if (hwmgr->is_kicker) switch_limit_us = data->is_memory_gddr5 ? 450 : 150; else - switch_limit_us = data->is_memory_gddr5 ? 190 : 150; + switch_limit_us = data->is_memory_gddr5 ? 200 : 150; break; case CHIP_VEGAM: switch_limit_us = 30; @@ -3575,7 +3579,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_POWER: return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: - if ((data->vr_config & 0xff) == 0x2) + if ((data->vr_config & VRCONF_VDDGFX_MASK) == + (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); else @@ -3805,9 +3810,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, { uint32_t i; + /* force the trim if mclk_switching is disabled to prevent flicker */ + bool force_trim = (low_limit == high_limit); for (i = 0; i < dpm_table->count; i++) { /*skip the trim if od is enabled*/ - if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit + if ((!hwmgr->od_enabled || force_trim) + && (dpm_table->dpm_levels[i].value < low_limit || dpm_table->dpm_levels[i].value > high_limit)) dpm_table->dpm_levels[i].enabled = false; else @@ -3983,6 +3991,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if (hwmgr->hardcode_pp_table != NULL) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + tmp_result = smu7_update_avfs(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update avfs voltages!", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index beacfffbdc3e..ecbc9daea57e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3691,6 +3691,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if(hwmgr->hardcode_pp_table != NULL) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + vega10_update_avfs(hwmgr); /* diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index ba8763daa380..e8d01abf27fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -364,17 +364,29 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + /* + * As a common sense, usSoftwareShutdownTemp should be bigger + * than ThotspotLimit. For any invalid usSoftwareShutdownTemp, + * we will just use the max possible setting VEGA10_THERMAL_MAXIMUM_ALERT_TEMP + * to avoid false alarms. + */ + if ((tdp_table->usSoftwareShutdownTemp > + range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) { + if (high > tdp_table->usSoftwareShutdownTemp) + high = tdp_table->usSoftwareShutdownTemp; + } if (low > high) return -EINVAL; @@ -383,8 +395,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index 904eb2c9155b..40e7c72eeae0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f5915308e643..947e4fa3c5e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -981,27 +981,15 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - uint64_t features_enabled; - int i; - bool enabled; - int ret = 0; + int i, ret = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures)) == 0, "[DisableAllSMUFeatures] Failed to disable all smu features!", return ret); - ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); - PP_ASSERT_WITH_CODE(!ret, - "[DisableAllSMUFeatures] Failed to get enabled smc features!", - return ret); - - for (i = 0; i < GNLD_FEATURES_MAX; i++) { - enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? - true : false; - data->smu_features[i].enabled = enabled; - data->smu_features[i].supported = enabled; - } + for (i = 0; i < GNLD_FEATURES_MAX; i++) + data->smu_features[i].enabled = 0; return 0; } @@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) data->uvd_power_gated = true; data->vce_power_gated = true; - - if (data->smu_features[GNLD_DPM_UVD].enabled) - data->uvd_power_gated = false; - - if (data->smu_features[GNLD_DPM_VCE].enabled) - data->vce_power_gated = false; } static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) @@ -3211,10 +3193,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) { - uint64_t features_enabled; - uint64_t features_to_enable; - uint64_t features_to_disable; - int ret = 0; + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint64_t features_enabled, features_to_enable, features_to_disable; + int i, ret = 0; + bool enabled; if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) return -EINVAL; @@ -3243,6 +3226,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; } + /* Update the cached feature enablement state */ + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + if (ret) + return ret; + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? + true : false; + data->smu_features[i].enabled = enabled; + } + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c index ede54e87e287..ce56b93871e8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c @@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 7bf9a14bfa0b..f6490a128438 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -229,6 +229,7 @@ struct pp_smumgr_func { bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting); int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */ + int (*stop_smc)(struct pp_hwmgr *hwmgr); }; struct pp_hwmgr_func { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index c5288831aa15..05a55e850b5e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -114,4 +114,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); +extern int smum_stop_smc(struct pp_hwmgr *hwmgr); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index e5283dafc414..9a083cd80133 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -184,6 +184,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; + bool cur_value_match_level = false; if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; @@ -243,8 +244,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, GET_DPM_CUR_FREQ(clk_table, clk_type, i, value); size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); + if (cur_value == value) + cur_value_match_level = true; } + if (!cur_value_match_level) + size += sprintf(buf + size, " %uMhz *\n", cur_value); + return size; } diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h index 2a390ddd37dd..89cd6da118a3 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h @@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu); freq = table->SocClocks[dpm_level].Freq; \ break; \ case SMU_MCLK: \ - freq = table->MemClocks[dpm_level].Freq; \ + freq = table->FClocks[dpm_level].Freq; \ break; \ case SMU_DCEFCLK: \ freq = table->DcfClocks[dpm_level].Freq; \ diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 0922d9cd858a..c4d8c52c6b9c 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -171,7 +171,8 @@ static int smu_v11_0_init_microcode(struct smu_context *smu) chip_name = "navi12"; break; default: - BUG(); + dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 15590fd86ef4..42c8f8731a50 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -239,7 +239,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) switch (dev_id) { case 0x67BA: - case 0x66B1: + case 0x67B1: smu_data->power_tune_defaults = &defaults_hawaii_pro; break; case 0x67B8: @@ -2933,6 +2933,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) return 0; } +static void ci_reset_smc(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, + rst_reg, 1); +} + + +static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 1); +} + +static int ci_stop_smc(struct pp_hwmgr *hwmgr) +{ + ci_reset_smc(hwmgr); + ci_stop_smc_clock(hwmgr); + + return 0; +} + const struct pp_smumgr_func ci_smu_funcs = { .name = "ci_smu", .smu_init = ci_smu_init, @@ -2957,4 +2980,5 @@ const struct pp_smumgr_func ci_smu_funcs = { .is_dpm_running = ci_is_dpm_running, .update_dpm_settings = ci_update_dpm_settings, .update_smc_table = ci_update_smc_table, + .stop_smc = ci_stop_smc, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 4240aeec9000..83d06f8e99ec 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -217,3 +217,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl return -EINVAL; } + +int smum_stop_smc(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->smumgr_funcs->stop_smc) + return hwmgr->smumgr_funcs->stop_smc(hwmgr); + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index ae18fbcb26fb..86bdb0194493 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -642,9 +642,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, /* sclk is bigger than max sclk in the dependence table */ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i - 1].vddc - - (uint16_t)VDDC_VDDCI_DELTA)); if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) *voltage |= (data->vbios_boot_state.vddci_bootup_value * @@ -652,8 +649,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, else if (dep_table->entries[i - 1].vddci) *voltage |= (dep_table->entries[i - 1].vddci * VOLTAGE_SCALE) << VDDC_SHIFT; - else + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i - 1].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 3c70a53813bf..0b2bb485d9be 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -928,7 +928,7 @@ int malidp_de_planes_init(struct drm_device *drm) const struct malidp_hw_regmap *map = &malidp->dev->hw->map; struct malidp_plane *plane = NULL; enum drm_plane_type plane_type; - unsigned long crtcs = 1 << drm->mode_config.num_crtc; + unsigned long crtcs = BIT(drm->mode_config.num_crtc); unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig index 018383cfcfa7..5e95bcea43e9 100644 --- a/drivers/gpu/drm/aspeed/Kconfig +++ b/drivers/gpu/drm/aspeed/Kconfig @@ -3,6 +3,7 @@ config DRM_ASPEED_GFX tristate "ASPEED BMC Display Controller" depends on DRM && OF depends on (COMPILE_TEST || ARCH_ASPEED) + depends on MMU select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DMA_CMA if HAVE_DMA_CONTIGUOUS diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 02a9c1ed165b..fa50ab2523d4 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -194,6 +194,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow_fbdev = 1; + bochs->dev->mode_config.fbdev_use_iomem = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index a428185be2c1..d05b3033b510 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, { switch (fs) { case 32000: - *n = 4096; + case 48000: + case 96000: + case 192000: + *n = fs * 128 / 1000; break; case 44100: - *n = 6272; - break; - case 48000: - *n = 6144; + case 88200: + case 176400: + *n = fs * 128 / 900; break; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 22885dceaa17..1f26890a8da6 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1635,8 +1635,7 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, } struct analogix_dp_device * -analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, - struct analogix_dp_plat_data *plat_data) +analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) { struct platform_device *pdev = to_platform_device(dev); struct analogix_dp_device *dp; @@ -1739,22 +1738,30 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, irq_flags, "analogix-dp", dp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); - goto err_disable_pm_runtime; + return ERR_PTR(ret); } disable_irq(dp->irq); + return dp; +} +EXPORT_SYMBOL_GPL(analogix_dp_probe); + +int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) +{ + int ret; + dp->drm_dev = drm_dev; dp->encoder = dp->plat_data->encoder; dp->aux.name = "DP-AUX"; dp->aux.transfer = analogix_dpaux_transfer; - dp->aux.dev = &pdev->dev; + dp->aux.dev = dp->dev; ret = drm_dp_aux_register(&dp->aux); if (ret) - return ERR_PTR(ret); + return ret; - pm_runtime_enable(dev); + pm_runtime_enable(dp->dev); ret = analogix_dp_create_bridge(drm_dev, dp); if (ret) { @@ -1762,13 +1769,12 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, goto err_disable_pm_runtime; } - return dp; + return 0; err_disable_pm_runtime: + pm_runtime_disable(dp->dev); - pm_runtime_disable(dev); - - return ERR_PTR(ret); + return ret; } EXPORT_SYMBOL_GPL(analogix_dp_bind); @@ -1785,10 +1791,15 @@ void analogix_dp_unbind(struct analogix_dp_device *dp) drm_dp_aux_unregister(&dp->aux); pm_runtime_disable(dp->dev); - clk_disable_unprepare(dp->clock); } EXPORT_SYMBOL_GPL(analogix_dp_unbind); +void analogix_dp_remove(struct analogix_dp_device *dp) +{ + clk_disable_unprepare(dp->clock); +} +EXPORT_SYMBOL_GPL(analogix_dp_remove); + #ifdef CONFIG_PM int analogix_dp_suspend(struct analogix_dp_device *dp) { diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index 6e81e5db57f2..b050fd1f3d20 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -295,8 +295,12 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, const struct i2c_device_id *id) { struct device *dev = &stdp4028_i2c->dev; + int ret; - ge_b850v3_lvds_init(dev); + ret = ge_b850v3_lvds_init(dev); + + if (ret) + return ret; ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); @@ -354,8 +358,12 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c, const struct i2c_device_id *id) { struct device *dev = &stdp2690_i2c->dev; + int ret; - ge_b850v3_lvds_init(dev); + ret = ge_b850v3_lvds_init(dev); + + if (ret) + return ret; ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c; i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr); diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index bd3165ee5354..04431dbac4a4 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -177,7 +177,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) { - u8 ret; + u8 ret = 0; sii8620_read_buf(ctx, addr, &ret, 1); return ret; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 675442bfc1bd..77384c49fb8d 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -365,7 +365,6 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi, if (lpm) val |= CMD_MODE_ALL_LP; - dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS); dsi_write(dsi, DSI_CMD_MODE_CFG, val); } @@ -541,16 +540,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, unsigned long mode_flags) { + u32 val; + dsi_write(dsi, DSI_PWR_UP, RESET); if (mode_flags & MIPI_DSI_MODE_VIDEO) { dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE); dw_mipi_dsi_video_mode_config(dsi); - dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS); } else { dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE); } + val = PHY_TXREQUESTCLKHS; + if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + val |= AUTO_CLKLANE_CTRL; + dsi_write(dsi, DSI_LPCLK_CTRL, val); + dsi_write(dsi, DSI_PWR_UP, POWERUP); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 0a580957c8cf..f1de4bb6558c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -647,6 +647,12 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, buf[i]); } + /* Clear old status bits before start so we don't get confused */ + regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG, + AUX_IRQ_STATUS_NAT_I2C_FAIL | + AUX_IRQ_STATUS_AUX_RPLY_TOUT | + AUX_IRQ_STATUS_AUX_SHORT); + regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND); ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val, diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2dd2cd87cdbb..5e906ea6df67 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2948,7 +2948,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set, ret = handle_conflicting_encoders(state, true); if (ret) - return ret; + goto fail; ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 4c766624b20d..2337b3827e6a 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -27,6 +27,7 @@ #include <drm/drm_print.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_sysfs.h> #include <linux/uaccess.h> @@ -511,6 +512,10 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); connector->registration_state = DRM_CONNECTOR_REGISTERED; + + /* Let userspace know we have a new connector */ + drm_sysfs_hotplug_event(connector->dev); + goto unlock; err_debugfs: diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index eab0f2687cd6..00debd02c322 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -337,13 +337,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf, buf[len] = '\0'; - if (!strcmp(buf, "on")) + if (sysfs_streq(buf, "on")) connector->force = DRM_FORCE_ON; - else if (!strcmp(buf, "digital")) + else if (sysfs_streq(buf, "digital")) connector->force = DRM_FORCE_ON_DIGITAL; - else if (!strcmp(buf, "off")) + else if (sysfs_streq(buf, "off")) connector->force = DRM_FORCE_OFF; - else if (!strcmp(buf, "unspecified")) + else if (sysfs_streq(buf, "unspecified")) connector->force = DRM_FORCE_UNSPECIFIED; else return -EINVAL; diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index 6a626c82e264..f6598c5a9a87 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf, source[len - 1] = '\0'; ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); - if (ret) + if (ret) { + kfree(source); return ret; + } spin_lock_irq(&crc->lock); diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index 0cfb386754c3..0d7f90c00f04 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index) mutex_lock(&aux_idr_mutex); aux_dev = idr_find(&aux_idr, index); - if (!kref_get_unless_zero(&aux_dev->refcount)) + if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount)) aux_dev = NULL; mutex_unlock(&aux_idr_mutex); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index c5e9e2305fff..2de1eebe591f 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/iopoll.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -2694,9 +2695,9 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw, int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) { int ret = 0; - int i = 0; struct drm_dp_mst_branch *mstb = NULL; + mutex_lock(&mgr->payload_lock); mutex_lock(&mgr->lock); if (mst_state == mgr->mst_state) goto out_unlock; @@ -2755,25 +2756,18 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* this can fail if the device is gone */ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; - mutex_lock(&mgr->payload_lock); - memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); + memset(mgr->payloads, 0, + mgr->max_payloads * sizeof(mgr->payloads[0])); + memset(mgr->proposed_vcpis, 0, + mgr->max_payloads * sizeof(mgr->proposed_vcpis[0])); mgr->payload_mask = 0; set_bit(0, &mgr->payload_mask); - for (i = 0; i < mgr->max_payloads; i++) { - struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; - - if (vcpi) { - vcpi->vcpi = 0; - vcpi->num_slots = 0; - } - mgr->proposed_vcpis[i] = NULL; - } mgr->vcpi_mask = 0; - mutex_unlock(&mgr->payload_lock); } out_unlock: mutex_unlock(&mgr->lock); + mutex_unlock(&mgr->payload_lock); if (mstb) drm_dp_mst_topology_put_mstb(mstb); return ret; @@ -3375,11 +3369,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, { int ret; - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) + if (slots < 0) return false; - if (slots < 0) + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) return false; if (port->vcpi.vcpi > 0) { @@ -3395,6 +3389,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, if (ret) { DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), ret); + drm_dp_mst_topology_put_port(port); goto out; } DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", @@ -3505,6 +3500,17 @@ fail: return ret; } +static int do_get_act_status(struct drm_dp_aux *aux) +{ + int ret; + u8 status; + + ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + if (ret < 0) + return ret; + + return status; +} /** * drm_dp_check_act_status() - Check ACT handled status. @@ -3514,33 +3520,29 @@ fail: */ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) { - u8 status; - int ret; - int count = 0; + /* + * There doesn't seem to be any recommended retry count or timeout in + * the MST specification. Since some hubs have been observed to take + * over 1 second to update their payload allocations under certain + * conditions, we use a rather large timeout value. + */ + const int timeout_ms = 3000; + int ret, status; - do { - ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); - - if (ret < 0) { - DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); - goto fail; - } - - if (status & DP_PAYLOAD_ACT_HANDLED) - break; - count++; - udelay(100); - - } while (count < 30); - - if (!(status & DP_PAYLOAD_ACT_HANDLED)) { - DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); - ret = -EINVAL; - goto fail; + ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, + status & DP_PAYLOAD_ACT_HANDLED || status < 0, + 200, timeout_ms * USEC_PER_MSEC); + if (ret < 0 && status >= 0) { + DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n", + timeout_ms, status); + return -EINVAL; + } else if (status < 0) { + DRM_DEBUG_KMS("Failed to read payload table status: %d\n", + status); + return status; } + return 0; -fail: - return ret; } EXPORT_SYMBOL(drm_dp_check_act_status); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3f50b8865db4..9b69e55ad701 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -191,10 +191,11 @@ static const struct edid_quirk { { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, - /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ + /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */ { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, + { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP }, /* Windows Mixed Reality Headsets */ { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, @@ -4743,7 +4744,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d struct drm_display_mode *mode; unsigned pixel_clock = (timings->pixel_clock[0] | (timings->pixel_clock[1] << 8) | - (timings->pixel_clock[2] << 16)); + (timings->pixel_clock[2] << 16)) + 1; unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index cf804389f5ec..d50a7884e69e 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -84,7 +84,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, err = encoder_drv->encoder_init(client, dev, encoder); if (err) - goto fail_unregister; + goto fail_module_put; if (info->platform_data) encoder->slave_funcs->set_config(&encoder->base, @@ -92,9 +92,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, return 0; +fail_module_put: + module_put(module); fail_unregister: i2c_unregister_device(client); - module_put(module); fail: return err; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8d193a58363d..02ffde5fd722 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -390,7 +390,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, unsigned int y; for (y = clip->y1; y < clip->y2; y++) { - memcpy(dst, src, len); + if (!fb_helper->dev->mode_config.fbdev_use_iomem) + memcpy(dst, src, len); + else + memcpy_toio((void __iomem *)dst, src, len); + src += fb->pitches[0]; dst += fb->pitches[0]; } @@ -961,11 +965,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) drm_modeset_lock_all(fb_helper->dev); drm_client_for_each_modeset(modeset, &fb_helper->client) { crtc = modeset->crtc; - if (!crtc->funcs->gamma_set || !crtc->gamma_size) - return -EINVAL; + if (!crtc->funcs->gamma_set || !crtc->gamma_size) { + ret = -EINVAL; + goto out; + } - if (cmap->start + cmap->len > crtc->gamma_size) - return -EINVAL; + if (cmap->start + cmap->len > crtc->gamma_size) { + ret = -EINVAL; + goto out; + } r = crtc->gamma_store; g = r + crtc->gamma_size; @@ -978,8 +986,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) ret = crtc->funcs->gamma_set(crtc, r, g, b, crtc->gamma_size, NULL); if (ret) - return ret; + goto out; } +out: drm_modeset_unlock_all(fb_helper->dev); return ret; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6854f5867d51..1fdc85a71cec 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -710,6 +710,8 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, if (!objs) return -ENOMEM; + *objs_out = objs; + handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); if (!handles) { ret = -ENOMEM; @@ -723,8 +725,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, } ret = objects_lookup(filp, handles, count, objs); - *objs_out = objs; - out: kvfree(handles); return ret; @@ -872,9 +872,6 @@ err: * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, @@ -899,14 +896,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); if (ret) - return ret; + goto err; args->handle = handle; args->size = obj->size; - return 0; +err: + drm_gem_object_put_unlocked(obj); + return ret; } /** diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index f5918707672f..d88f4230c221 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -474,14 +474,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); loff_t num_pages = obj->size >> PAGE_SHIFT; + vm_fault_t ret; struct page *page; + pgoff_t page_offset; - if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) - return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - page = shmem->pages[vmf->pgoff]; + mutex_lock(&shmem->pages_lock); - return vmf_insert_page(vma, vmf->address, page); + if (page_offset >= num_pages || + WARN_ON_ONCE(!shmem->pages) || + shmem->madv < 0) { + ret = VM_FAULT_SIGBUS; + } else { + page = shmem->pages[page_offset]; + + ret = vmf_insert_page(vma, vmf->address, page); + } + + mutex_unlock(&shmem->pages_lock); + + return ret; } static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) @@ -549,9 +563,6 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags |= VM_MIXEDMAP; - /* Remove the fake offset */ - vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node); - return 0; } EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 22c7fd7196c8..2cf053fb8d54 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd, if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) return -EFAULT; + memset(&v, 0, sizeof(v)); + v = (struct drm_version) { .name_len = v32.name_len, .name = compat_ptr(v32.name), @@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; + + memset(&uq, 0, sizeof(uq)); + uq = (struct drm_unique){ .unique_len = uq32.unique_len, .unique = compat_ptr(uq32.unique), @@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, if (copy_from_user(&c32, argp, sizeof(c32))) return -EFAULT; + memset(&client, 0, sizeof(client)); + client.idx = c32.idx; err = drm_ioctl_kernel(file, drm_getclient, &client, 0); @@ -850,6 +857,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; + memset(&req, 0, sizeof(req)); + req.request.type = req32.request.type; req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; @@ -887,6 +896,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, struct drm_mode_fb_cmd2 req64; int err; + memset(&req64, 0, sizeof(req64)); + if (copy_from_user(&req64, argp, offsetof(drm_mode_fb_cmd232_t, modifier))) return -EFAULT; diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index a05e64e3d80b..4042f5b39765 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -937,7 +937,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc, } } - tr.len = chunk; + tr.len = chunk * 2; len -= chunk; ret = spi_sync(spi, &m); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index bd2498bbd74a..b99f96dcc6f1 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1029,11 +1029,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format); */ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) { - u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8, - scanline & 0xff }; + u8 payload[2] = { scanline >> 8, scanline & 0xff }; ssize_t err; - err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); + err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload, + sizeof(payload)); if (err < 0) return err; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index ffd95bfeaa94..f6bdec7fa925 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data { int orientation; }; -static const struct drm_dmi_panel_orientation_data acer_s1003 = { - .width = 800, - .height = 1280, - .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, -}; - static const struct drm_dmi_panel_orientation_data asus_t100ha = { .width = 800, .height = 1280, @@ -90,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data onegx1_pro = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "12/17/2020", NULL }, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = { .width = 720, .height = 1280, @@ -114,13 +115,25 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, - .driver_data = (void *)&acer_s1003, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), }, .driver_data = (void *)&asus_t100ha, + }, { /* Asus T101HA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Asus T103HAF */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), @@ -205,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* OneGX1 Pro */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"), + }, + .driver_data = (void *)&onegx1_pro, }, { /* VIOS LTH17 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index a86a3ab2771c..235729f4aadb 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -51,8 +51,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) { drm_dma_handle_t *dmah; - unsigned long addr; - size_t sz; /* pci_alloc_consistent only guarantees alignment to the smallest * PAGE_SIZE order which is greater than or equal to the requested size. @@ -68,20 +66,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali dmah->size = size; dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, - GFP_KERNEL | __GFP_COMP); + GFP_KERNEL); if (dmah->vaddr == NULL) { kfree(dmah); return NULL; } - /* XXX - Is virt_to_page() legal for consistent mem? */ - /* Reserve */ - for (addr = (unsigned long)dmah->vaddr, sz = size; - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - SetPageReserved(virt_to_page((void *)addr)); - } - return dmah; } @@ -94,19 +85,9 @@ EXPORT_SYMBOL(drm_pci_alloc); */ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) { - unsigned long addr; - size_t sz; - - if (dmah->vaddr) { - /* XXX - Is virt_to_page() legal for consistent mem? */ - /* Unreserve */ - for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - ClearPageReserved(virt_to_page((void *)addr)); - } + if (dmah->vaddr) dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, dmah->busaddr); - } } /** diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 4b5c7b0ed714..8fdb27135406 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -326,19 +326,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private, return -ENOENT; *fence = drm_syncobj_fence_get(syncobj); - drm_syncobj_put(syncobj); if (*fence) { ret = dma_fence_chain_find_seqno(fence, point); if (!ret) - return 0; + goto out; dma_fence_put(*fence); } else { ret = -EINVAL; } if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) - return ret; + goto out; memset(&wait, 0, sizeof(wait)); wait.task = current; @@ -370,6 +369,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private, if (wait.node.next) drm_syncobj_remove_wait(syncobj, &wait); +out: + drm_syncobj_put(syncobj); + return ret; } EXPORT_SYMBOL(drm_syncobj_find_fence); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index dd2bc85f43cc..4fd2f6cd03c1 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -293,9 +293,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector) return PTR_ERR(connector->kdev); } - /* Let userspace know we have a new connector */ - drm_sysfs_hotplug_event(dev); - if (connector->ddc) return sysfs_create_link(&connector->kdev->kobj, &connector->ddc->dev.kobj, "ddc"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 7e4e2959bf4f..0c9c40720ca9 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -12,6 +12,7 @@ #include "common.xml.h" #include "state.xml.h" +#include "state_blt.xml.h" #include "state_hi.xml.h" #include "state_3d.xml.h" #include "cmdstream.xml.h" @@ -233,6 +234,8 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu) struct etnaviv_cmdbuf *buffer = &gpu->buffer; unsigned int waitlink_offset = buffer->user_size - 16; u32 link_target, flush = 0; + bool has_blt = !!(gpu->identity.minor_features5 & + chipMinorFeatures5_BLT_ENGINE); lockdep_assert_held(&gpu->lock); @@ -248,16 +251,38 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu) if (flush) { unsigned int dwords = 7; + if (has_blt) + dwords += 10; + link_target = etnaviv_buffer_reserve(gpu, buffer, dwords); CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); - if (gpu->exec_state == ETNA_PIPE_3D) - CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, - VIVS_TS_FLUSH_CACHE_FLUSH); + if (gpu->exec_state == ETNA_PIPE_3D) { + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } else { + CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, + VIVS_TS_FLUSH_CACHE_FLUSH); + } + } CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } CMD_END(buffer); etnaviv_buffer_replace_wait(buffer, waitlink_offset, @@ -323,6 +348,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, bool switch_mmu_context = gpu->mmu_context != mmu_context; unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; + bool has_blt = !!(gpu->identity.minor_features5 & + chipMinorFeatures5_BLT_ENGINE); lockdep_assert_held(&gpu->lock); @@ -433,6 +460,15 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, * 2 semaphore stall + 1 event + 1 wait + 1 link. */ return_dwords = 7; + + /* + * When the BLT engine is present we need 6 more dwords in the return + * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable, + * but we don't need the normal TS flush state. + */ + if (has_blt) + return_dwords += 6; + return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); CMD_LINK(cmdbuf, return_dwords, return_target); @@ -447,11 +483,25 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR); - CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, - VIVS_TS_FLUSH_CACHE_FLUSH); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } else { + CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, + VIVS_TS_FLUSH_CACHE_FLUSH); + } } CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } + CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | VIVS_GL_EVENT_FROM_PE); CMD_WAIT(buffer); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index aa3e4c3b063a..1ba83a90cdef 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -240,8 +240,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit) } if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && - submit->bos[i].va != mapping->iova) + submit->bos[i].va != mapping->iova) { + etnaviv_gem_mapping_unreference(mapping); return -EINVAL; + } atomic_inc(&etnaviv_obj->gpu_active); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index d47d1a8e0219..85de8551ce86 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -713,7 +713,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) { dev_err(gpu->dev, "Failed to enable GPU power domain\n"); - return ret; + goto pm_put; } etnaviv_hw_identify(gpu); @@ -802,6 +802,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) fail: pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -842,7 +843,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) - return ret; + goto pm_put; dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); @@ -965,6 +966,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = 0; pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -978,7 +980,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) dev_err(gpu->dev, "recover hung GPU!\n"); if (pm_runtime_get_sync(gpu->dev) < 0) - return; + goto pm_put; mutex_lock(&gpu->lock); @@ -997,6 +999,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); } @@ -1269,8 +1272,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) if (!submit->runtime_resumed) { ret = pm_runtime_get_sync(gpu->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(gpu->dev); return NULL; + } submit->runtime_resumed = true; } @@ -1287,6 +1292,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) ret = event_alloc(gpu, nr_events, event); if (ret) { DRM_ERROR("no free events\n"); + pm_runtime_put_noidle(gpu->dev); return NULL; } @@ -1457,7 +1463,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) if (gpu->clk_bus) { ret = clk_prepare_enable(gpu->clk_bus); if (ret) - return ret; + goto disable_clk_reg; } if (gpu->clk_core) { @@ -1480,6 +1486,9 @@ disable_clk_core: disable_clk_bus: if (gpu->clk_bus) clk_disable_unprepare(gpu->clk_bus); +disable_clk_reg: + if (gpu->clk_reg) + clk_disable_unprepare(gpu->clk_reg); return ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index 8adbf2861bff..75f9db8f7bec 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -32,6 +32,7 @@ struct etnaviv_pm_domain { }; struct etnaviv_pm_domain_meta { + unsigned int feature; const struct etnaviv_pm_domain *domains; u32 nr_domains; }; @@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = { static const struct etnaviv_pm_domain_meta doms_meta[] = { { + .feature = chipFeatures_PIPE_3D, .nr_domains = ARRAY_SIZE(doms_3d), .domains = &doms_3d[0] }, { + .feature = chipFeatures_PIPE_2D, .nr_domains = ARRAY_SIZE(doms_2d), .domains = &doms_2d[0] }, { + .feature = chipFeatures_PIPE_VG, .nr_domains = ARRAY_SIZE(doms_vg), .domains = &doms_vg[0] } }; +static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu) +{ + unsigned int num = 0, i; + + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; + + if (gpu->identity.features & meta->feature) + num += meta->nr_domains; + } + + return num; +} + +static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, + unsigned int index) +{ + const struct etnaviv_pm_domain *domain = NULL; + unsigned int offset = 0, i; + + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; + + if (!(gpu->identity.features & meta->feature)) + continue; + + if (index - offset >= meta->nr_domains) { + offset += meta->nr_domains; + continue; + } + + domain = meta->domains + (index - offset); + } + + return domain; +} + int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, struct drm_etnaviv_pm_domain *domain) { - const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe]; + const unsigned int nr_domains = num_pm_domains(gpu); const struct etnaviv_pm_domain *dom; - if (domain->iter >= meta->nr_domains) + if (domain->iter >= nr_domains) return -EINVAL; - dom = meta->domains + domain->iter; + dom = pm_domain(gpu, domain->iter); + if (!dom) + return -EINVAL; domain->id = domain->iter; domain->nr_signals = dom->nr_signals; strncpy(domain->name, dom->name, sizeof(domain->name)); domain->iter++; - if (domain->iter == meta->nr_domains) + if (domain->iter == nr_domains) domain->iter = 0xff; return 0; @@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, struct drm_etnaviv_pm_signal *signal) { - const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe]; + const unsigned int nr_domains = num_pm_domains(gpu); const struct etnaviv_pm_domain *dom; const struct etnaviv_pm_signal *sig; - if (signal->domain >= meta->nr_domains) + if (signal->domain >= nr_domains) return -EINVAL; - dom = meta->domains + signal->domain; + dom = pm_domain(gpu, signal->domain); + if (!dom) + return -EINVAL; if (signal->iter >= dom->nr_signals) return -EINVAL; diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h index daae55995def..0e8bcf9dcc93 100644 --- a/drivers/gpu/drm/etnaviv/state_blt.xml.h +++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h @@ -46,6 +46,8 @@ DEALINGS IN THE SOFTWARE. /* This is a cut-down version of the state_blt.xml.h file */ +#define VIVS_BLT_SET_COMMAND 0x000140ac + #define VIVS_BLT_ENABLE 0x000140b8 #define VIVS_BLT_ENABLE_ENABLE 0x00000001 diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 3a0f0ba8c63a..e0cfae744afc 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -158,15 +158,8 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; int ret; - dp->dev = dev; dp->drm_dev = drm_dev; - dp->plat_data.dev_type = EXYNOS_DP; - dp->plat_data.power_on_start = exynos_dp_poweron; - dp->plat_data.power_off = exynos_dp_poweroff; - dp->plat_data.attach = exynos_dp_bridge_attach; - dp->plat_data.get_modes = exynos_dp_get_modes; - if (!dp->plat_data.panel && !dp->ptn_bridge) { ret = exynos_dp_dt_parse_panel(dp); if (ret) @@ -184,13 +177,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) dp->plat_data.encoder = encoder; - dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); - if (IS_ERR(dp->adp)) { + ret = analogix_dp_bind(dp->adp, dp->drm_dev); + if (ret) dp->encoder.funcs->destroy(&dp->encoder); - return PTR_ERR(dp->adp); - } - return 0; + return ret; } static void exynos_dp_unbind(struct device *dev, struct device *master, @@ -221,6 +212,7 @@ static int exynos_dp_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; + dp->dev = dev; /* * We just use the drvdata until driver run into component * add function, and then we would set drvdata to null, so @@ -246,16 +238,29 @@ static int exynos_dp_probe(struct platform_device *pdev) /* The remote port can be either a panel or a bridge */ dp->plat_data.panel = panel; + dp->plat_data.dev_type = EXYNOS_DP; + dp->plat_data.power_on_start = exynos_dp_poweron; + dp->plat_data.power_off = exynos_dp_poweroff; + dp->plat_data.attach = exynos_dp_bridge_attach; + dp->plat_data.get_modes = exynos_dp_get_modes; dp->plat_data.skip_connector = !!bridge; + dp->ptn_bridge = bridge; out: + dp->adp = analogix_dp_probe(dev, &dp->plat_data); + if (IS_ERR(dp->adp)) + return PTR_ERR(dp->adp); + return component_add(&pdev->dev, &exynos_dp_ops); } static int exynos_dp_remove(struct platform_device *pdev) { + struct exynos_dp_device *dp = platform_get_drvdata(pdev); + component_del(&pdev->dev, &exynos_dp_ops); + analogix_dp_remove(dp->adp); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 619f81435c1b..58b89ec11b0e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; - int ret; + int ret = 0; if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) { DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n", @@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, if (ret) clear_dma_max_seg_size(subdrv_dev); - return 0; + return ret; } /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 8ed94c994800..b83acd696774 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1741,10 +1741,6 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->dev = dev; dsi->driver_data = of_device_get_match_data(dev); - ret = exynos_dsi_parse_dt(dsi); - if (ret) - return ret; - dsi->supplies[0].supply = "vddcore"; dsi->supplies[1].supply = "vddio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), @@ -1805,11 +1801,25 @@ static int exynos_dsi_probe(struct platform_device *pdev) return ret; } + ret = exynos_dsi_parse_dt(dsi); + if (ret) + return ret; + platform_set_drvdata(pdev, &dsi->encoder); pm_runtime_enable(dev); - return component_add(dev, &exynos_dsi_component_ops); + ret = component_add(dev, &exynos_dsi_component_ops); + if (ret) + goto err_disable_runtime; + + return 0; + +err_disable_runtime: + pm_runtime_disable(dev); + of_node_put(dsi->in_bridge_node); + + return ret; } static int exynos_dsi_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index b78e8c5ba553..2aff986add89 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -268,8 +268,10 @@ static void mic_pre_enable(struct drm_bridge *bridge) goto unlock; ret = pm_runtime_get_sync(mic->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(mic->dev); goto unlock; + } mic_set_path(mic, 1); diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index f56852a503e8..8b784947ed3b 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct gma_clock_t clock; + memset(&clock, 0, sizeof(clock)); + switch (refclk) { case 27000: if (target < 200000) { diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 570b59520fd1..87738650dd90 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -2120,12 +2120,12 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev intel_dp->dpcd, sizeof(intel_dp->dpcd)); cdv_intel_edp_panel_vdd_off(gma_encoder); - if (ret == 0) { + if (ret <= 0) { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); cdv_intel_dp_encoder_destroy(encoder); cdv_intel_dp_destroy(connector); - goto err_priv; + goto err_connector; } else { DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", intel_dp->dpcd[0], intel_dp->dpcd[1], diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c index e28107061148..fc9a34ed58bd 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c @@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) hdmi_dev = pci_get_drvdata(dev); i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL); - if (i2c_dev == NULL) { - DRM_ERROR("Can't allocate interface\n"); - ret = -ENOMEM; - goto exit; - } + if (!i2c_dev) + return -ENOMEM; i2c_dev->adap = &oaktrail_hdmi_i2c_adapter; i2c_dev->status = I2C_STAT_INIT; @@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) oaktrail_hdmi_i2c_adapter.name, hdmi_dev); if (ret) { DRM_ERROR("Failed to request IRQ for I2C controller\n"); - goto err; + goto free_dev; } /* Adapter registration */ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter); - return ret; + if (ret) { + DRM_ERROR("Failed to add I2C adapter\n"); + goto free_irq; + } -err: + return 0; + +free_irq: + free_irq(dev->irq, hdmi_dev); +free_dev: kfree(i2c_dev); -exit: + return ret; } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 7005f8f69c68..d414525eccf6 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -313,6 +313,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_err; + ret = -ENOMEM; + dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); if (!dev_priv->mmu) goto out_err; diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index e6265fb85626..56bb34d04332 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -337,6 +337,7 @@ int psb_irq_postinstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long irqflags; + unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -349,20 +350,12 @@ int psb_irq_postinstall(struct drm_device *dev) PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - if (dev->vblank[0].enabled) - psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[1].enabled) - psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[2].enabled) - psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + else + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + } if (dev_priv->ops->hotplug_enable) dev_priv->ops->hotplug_enable(dev, true); @@ -375,6 +368,7 @@ void psb_irq_uninstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long irqflags; + unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -383,14 +377,10 @@ void psb_irq_uninstall(struct drm_device *dev) PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - if (dev->vblank[0].enabled) - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[1].enabled) - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[2].enabled) - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + } dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG | diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile index 0c2d4296bccd..87f990f1d32e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Makefile +++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o +hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o hibmc_drm_i2c.o obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index cc4c41748cfb..747b64aaa5f6 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -40,6 +40,7 @@ struct hibmc_dislay_pll_config { }; static const struct hibmc_dislay_pll_config hibmc_pll_table[] = { + {640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ}, {800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ}, {1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ}, {1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ}, @@ -47,6 +48,8 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = { {1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ}, {1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, {1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, + {1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ}, + {1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, {1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ}, {1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ}, {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ}, @@ -123,11 +126,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); reg = state->fb->width * (state->fb->format->cpp[0]); - /* now line_pad is 16 */ - reg = PADDING(16, reg); line_l = state->fb->width * state->fb->format->cpp[0]; - line_l = PADDING(16, line_l); + line_l = PADDING(128, line_l); writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) | HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l), priv->mmio + HIBMC_CRT_FB_WIDTH); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index c103005b0a33..ac36304a9d71 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -97,7 +97,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) priv->dev->mode_config.fb_base = priv->fb_base; priv->dev->mode_config.preferred_depth = 24; - priv->dev->mode_config.prefer_shadow = 0; + priv->dev->mode_config.prefer_shadow = 1; priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs; @@ -331,6 +331,11 @@ static int hibmc_pci_probe(struct pci_dev *pdev, struct drm_device *dev; int ret; + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, + "hibmcdrmfb"); + if (ret) + return ret; + dev = drm_dev_alloc(&hibmc_driver, &pdev->dev); if (IS_ERR(dev)) { DRM_ERROR("failed to allocate drm_device\n"); @@ -376,7 +381,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev) drm_dev_unregister(dev); hibmc_unload(dev); - drm_dev_put(dev); } static struct pci_device_id hibmc_pci_table[] = { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index e58ecd7edcf8..b0d2f79b43b9 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -14,6 +14,12 @@ #ifndef HIBMC_DRM_DRV_H #define HIBMC_DRM_DRV_H +#include <linux/gpio/consumer.h> +#include <linux/i2c-algo-bit.h> +#include <linux/i2c.h> +#include <drm/drm_encoder.h> + +#include <drm/drm_edid.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> @@ -31,6 +37,13 @@ struct hibmc_fbdev { int size; }; +struct hibmc_connector { + struct drm_connector base; + + struct i2c_adapter adapter; + struct i2c_algo_bit_data bit_data; +}; + struct hibmc_drm_private { /* hw */ void __iomem *mmio; @@ -41,6 +54,8 @@ struct hibmc_drm_private { /* drm */ struct drm_device *dev; + struct drm_encoder encoder; + struct hibmc_connector connector; bool mode_config_initialized; /* fbdev */ @@ -49,6 +64,16 @@ struct hibmc_drm_private { #define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb) +static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector) +{ + return container_of(connector, struct hibmc_connector, base); +} + +static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev) +{ + return dev->dev_private; +} + void hibmc_set_power_mode(struct hibmc_drm_private *priv, unsigned int power_mode); void hibmc_set_current_gate(struct hibmc_drm_private *priv, @@ -70,6 +95,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc); void hibmc_mm_fini(struct hibmc_drm_private *hibmc); int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); +int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector); extern const struct drm_mode_config_funcs hibmc_mode_funcs; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index b4c1cea051e8..4911b550542d 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -73,7 +73,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel; + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * bytes_per_pixel, 128); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); @@ -186,7 +186,7 @@ int hibmc_fbdev_init(struct hibmc_drm_private *priv) goto fini; } - ret = drm_fb_helper_initial_config(&hifbdev->helper, 16); + ret = drm_fb_helper_initial_config(&hifbdev->helper, 32); if (ret) { DRM_ERROR("failed to setup initial conn config: %d\n", ret); goto fini; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c new file mode 100644 index 000000000000..86d712090d87 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Tian Tao <tiantao6@hisilicon.com> + */ + +#include <linux/delay.h> +#include <linux/pci.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_probe_helper.h> + +#include "hibmc_drm_drv.h" + +#define GPIO_DATA 0x0802A0 +#define GPIO_DATA_DIRECTION 0x0802A4 + +#define I2C_SCL_MASK BIT(0) +#define I2C_SDA_MASK BIT(1) + +static void hibmc_set_i2c_signal(void *data, u32 mask, int value) +{ + struct hibmc_connector *hibmc_connector = data; + struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev); + u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION); + + if (value) { + tmp_dir &= ~mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } else { + u32 tmp_data = readl(priv->mmio + GPIO_DATA); + + tmp_data &= ~mask; + writel(tmp_data, priv->mmio + GPIO_DATA); + + tmp_dir |= mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } +} + +static int hibmc_get_i2c_signal(void *data, u32 mask) +{ + struct hibmc_connector *hibmc_connector = data; + struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev); + u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION); + + if ((tmp_dir & mask) != mask) { + tmp_dir &= ~mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } + + return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0; +} + +static void hibmc_ddc_setsda(void *data, int state) +{ + hibmc_set_i2c_signal(data, I2C_SDA_MASK, state); +} + +static void hibmc_ddc_setscl(void *data, int state) +{ + hibmc_set_i2c_signal(data, I2C_SCL_MASK, state); +} + +static int hibmc_ddc_getsda(void *data) +{ + return hibmc_get_i2c_signal(data, I2C_SDA_MASK); +} + +static int hibmc_ddc_getscl(void *data) +{ + return hibmc_get_i2c_signal(data, I2C_SCL_MASK); +} + +int hibmc_ddc_create(struct drm_device *drm_dev, + struct hibmc_connector *connector) +{ + connector->adapter.owner = THIS_MODULE; + connector->adapter.class = I2C_CLASS_DDC; + snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus"); + connector->adapter.dev.parent = &drm_dev->pdev->dev; + i2c_set_adapdata(&connector->adapter, connector); + connector->adapter.algo_data = &connector->bit_data; + + connector->bit_data.udelay = 20; + connector->bit_data.timeout = usecs_to_jiffies(2000); + connector->bit_data.data = connector; + connector->bit_data.setsda = hibmc_ddc_setsda; + connector->bit_data.setscl = hibmc_ddc_setscl; + connector->bit_data.getsda = hibmc_ddc_getsda; + connector->bit_data.getscl = hibmc_ddc_getscl; + + return i2c_bit_add_bus(&connector->adapter); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h index b63a1ee15ceb..057e480984fd 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h @@ -169,11 +169,12 @@ #define CRT_PLL1_HS_78MHZ 0x23540F82 #define CRT_PLL1_HS_74MHZ 0x23941dc2 #define CRT_PLL1_HS_80MHZ 0x23941001 -#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2 +#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2 #define CRT_PLL1_HS_108MHZ 0x23b41b01 #define CRT_PLL1_HS_162MHZ 0x23480681 #define CRT_PLL1_HS_148MHZ 0x23541dc2 #define CRT_PLL1_HS_193MHZ 0x234807c1 +#define CRT_PLL1_HS_106MHZ 0x237C1641 #define CRT_PLL2_HS 0x802ac #define CRT_PLL2_HS_25MHZ 0x206B851E @@ -186,6 +187,7 @@ #define CRT_PLL2_HS_162MHZ 0xA0000000 #define CRT_PLL2_HS_148MHZ 0xB0CCCCCD #define CRT_PLL2_HS_193MHZ 0xC0872B02 +#define CRT_PLL2_HS_106MHZ 0x0075c28f #define HIBMC_FIELD(field, value) (field(value) & field##_MASK) #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index 6d98fdc06f6c..bc746969ced8 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -18,15 +18,82 @@ #include "hibmc_drm_drv.h" #include "hibmc_drm_regs.h" +struct hibmc_resolution { + int w; + int h; +}; + +static const struct hibmc_resolution hibmc_modetables[] = { + {640, 480}, + {800, 600}, + {1024, 768}, + {1152, 864}, + {1280, 768}, + {1280, 720}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1600, 900}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200} +}; + +static int hibmc_valid_mode(int w, int h) +{ + int i; + + for (i = 0; i < sizeof(hibmc_modetables) / + sizeof(struct hibmc_resolution); i++) { + if (hibmc_modetables[i].w == w && hibmc_modetables[i].h == h) + return 0; + } + + return -1; +} + static int hibmc_connector_get_modes(struct drm_connector *connector) { - return drm_add_modes_noedid(connector, 800, 600); + int count; + void *edid; + struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector); + + edid = drm_get_edid(connector, &hibmc_connector->adapter); + if (edid) { + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + if (count) + goto out; + } + + drm_connector_update_edid_property(connector, NULL); + count = drm_add_modes_noedid(connector, 1920, 1200); + drm_set_preferred_mode(connector, 1024, 768); + +out: + kfree(edid); + return count; } static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - return MODE_OK; + int vrefresh = drm_mode_vrefresh(mode); + + if (vrefresh < 59 || vrefresh > 61) + return MODE_NOMODE; + else if (hibmc_valid_mode(mode->hdisplay, mode->vdisplay) != 0) + return MODE_NOMODE; + else + return MODE_OK; +} + +static void hibmc_connector_destroy(struct drm_connector *connector) +{ + struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector); + + i2c_del_adapter(&hibmc_connector->adapter); + drm_connector_cleanup(connector); } static const struct drm_connector_helper_funcs @@ -37,7 +104,7 @@ static const struct drm_connector_helper_funcs static const struct drm_connector_funcs hibmc_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, + .destroy = hibmc_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -96,21 +163,15 @@ static const struct drm_encoder_funcs hibmc_encoder_funcs = { int hibmc_vdac_init(struct hibmc_drm_private *priv) { struct drm_device *dev = priv->dev; - struct drm_encoder *encoder; - struct drm_connector *connector; + struct hibmc_connector *hibmc_connector = &priv->connector; + struct drm_encoder *encoder = &priv->encoder; + struct drm_connector *connector = &hibmc_connector->base; int ret; - connector = hibmc_connector_init(priv); - if (IS_ERR(connector)) { - DRM_ERROR("failed to create connector: %ld\n", - PTR_ERR(connector)); - return PTR_ERR(connector); - } - - encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL); - if (!encoder) { - DRM_ERROR("failed to alloc memory when init encoder\n"); - return -ENOMEM; + ret = hibmc_ddc_create(dev, hibmc_connector); + if (ret) { + DRM_ERROR("failed to create ddc: %d\n", ret); + return ret; } encoder->possible_crtcs = 0x1; @@ -122,6 +183,18 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) } drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); + + ret = drm_connector_init_with_ddc(dev, connector, + &hibmc_connector_funcs, + DRM_MODE_CONNECTOR_VGA, + &hibmc_connector->adapter); + if (ret) { + DRM_ERROR("failed to init connector: %d\n", ret); + return ERR_PTR(ret); + } + + drm_connector_helper_add(connector, &hibmc_connector_helper_funcs); + drm_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index 9f6e473e6295..4d9ab15ffdeb 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -77,7 +77,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, u32 handle; int ret; - args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16); + args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 128); args->size = args->pitch * args->height; ret = hibmc_gem_create(dev, args->size, false, diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index 3456d33feb46..ce8182bd0b55 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -83,13 +83,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle) return; } + if (!pkg->package.count) { + DRM_DEBUG_DRIVER("no connection in _DSM\n"); + return; + } + connector_count = &pkg->package.elements[0]; DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", (unsigned long long)connector_count->integer.value); for (i = 1; i < pkg->package.count; i++) { union acpi_object *obj = &pkg->package.elements[i]; - union acpi_object *connector_id = &obj->package.elements[0]; - union acpi_object *info = &obj->package.elements[1]; + union acpi_object *connector_id; + union acpi_object *info; + + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) { + DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i); + continue; + } + + connector_id = &obj->package.elements[0]; + info = &obj->package.elements[1]; + if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) { + DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i); + continue; + } + DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", (unsigned long long)connector_id->integer.value); DRM_DEBUG_DRIVER(" port id: %s\n", diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 8eb2b3ec01ed..b3c77c988d1c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2124,7 +2124,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, return; dig_port = enc_to_dig_port(&encoder->base); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 272503615378..4e1c228b9e99 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -11893,10 +11893,11 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, case 10 ... 11: bpp = 10 * 3; break; - case 12: + case 12 ... 16: bpp = 12 * 3; break; default: + MISSING_CASE(conn_state->max_bpc); return -EINVAL; } @@ -16860,8 +16861,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) static void intel_early_display_was(struct drm_i915_private *dev_priv) { - /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ - if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + /* + * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl + * Also known as Wa_14010480278. + */ + if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 9b15ac4f2fb6..c2fccf97f7a4 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -567,7 +567,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } /* Also take into account max slice width */ - min_slice_count = min_t(u8, min_slice_count, + min_slice_count = max_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); @@ -1292,8 +1292,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; - enum intel_display_power_domain aux_domain = - intel_aux_power_domain(intel_dig_port); + enum intel_display_power_domain aux_domain; intel_wakeref_t aux_wakeref; intel_wakeref_t pps_wakeref; int i, ret, recv_bytes; @@ -1308,6 +1307,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, if (is_tc_port) intel_tc_port_lock(intel_dig_port); + aux_domain = intel_aux_power_domain(intel_dig_port); + aux_wakeref = intel_display_power_get(i915, aux_domain); pps_wakeref = pps_lock(intel_dp); @@ -7218,11 +7219,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_connector_get_hw_state; /* init MST on ports that can support it */ - if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) && - (port == PORT_B || port == PORT_C || - port == PORT_D || port == PORT_F)) - intel_dp_mst_encoder_init(intel_dig_port, - intel_connector->base.base.id); + intel_dp_mst_encoder_init(intel_dig_port, + intel_connector->base.base.id); if (!intel_edp_init_connector(intel_dp, intel_connector)) { intel_dp_aux_fini(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 600873c796d0..74d45a0eecb8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -653,21 +653,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_device *dev = intel_dig_port->base.base.dev; + enum port port = intel_dig_port->base.port; int ret; - intel_dp->can_mst = true; + if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp)) + return 0; + + if (INTEL_GEN(i915) < 12 && port == PORT_A) + return 0; + + if (INTEL_GEN(i915) < 11 && port == PORT_E) + return 0; + intel_dp->mst_mgr.cbs = &mst_cbs; /* create encoders */ intel_dp_create_fake_mst_encoders(intel_dig_port); - ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev, + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm, &intel_dp->aux, 16, 3, conn_base_id); - if (ret) { - intel_dp->can_mst = false; + if (ret) return ret; - } + + intel_dp->can_mst = true; + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 07a038f21619..caf6166622e4 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -504,8 +504,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) if (!ret) goto err_llb; else if (ret > 1) { - DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); - + DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); } fbc->threshold = ret; diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index b030f7ae3302..4cf95e8031e3 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2129,7 +2129,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi)) return MODE_CLOCK_HIGH; - /* BXT DPLL can't generate 223-240 MHz */ + /* GLK DPLL can't generate 446-480 MHz */ + if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000) + return MODE_CLOCK_RANGE; + + /* BXT/GLK DPLL can't generate 223-240 MHz */ if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000) return MODE_CLOCK_RANGE; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index bc14e9c0285a..23edc1b8e43f 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -1603,20 +1603,21 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus val = pch_get_backlight(connector); else val = lpt_get_backlight(connector); - val = intel_panel_compute_brightness(connector, val); - panel->backlight.level = clamp(val, panel->backlight.min, - panel->backlight.max); if (cpu_mode) { DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n"); /* Write converted CPU PWM value to PCH override register */ - lpt_set_backlight(connector->base.state, panel->backlight.level); + lpt_set_backlight(connector->base.state, val); I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE); } + val = intel_panel_compute_brightness(connector, val); + panel->backlight.level = clamp(val, panel->backlight.min, + panel->backlight.max); + return 0; } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index a71b22bdd95b..7f329d8118a4 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -797,10 +797,20 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, if (intel_dsi->gpio_panel) gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); - intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); - /* Deassert reset */ - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); + /* + * Give the panel time to power-on and then deassert its reset. + * Depending on the VBT MIPI sequences version the deassert-seq + * may contain the necessary delay, intel_dsi_msleep() will skip + * the delay in that case. If there is no deassert-seq, then an + * unconditional msleep is used to give the panel time to power-on. + */ + if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) { + intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); + } else { + msleep(intel_dsi->panel_on_delay); + } if (IS_GEMINILAKE(dev_priv)) { glk_cold_boot = glk_dsi_enable_io(encoder); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 9c58e8fac1d9..a4b48c9abeac 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -605,21 +605,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (!obj) return -ENOENT; - /* - * Already in the desired write domain? Nothing for us to do! - * - * We apply a little bit of cunning here to catch a broader set of - * no-ops. If obj->write_domain is set, we must be in the same - * obj->read_domains, and only that domain. Therefore, if that - * obj->write_domain matches the request read_domains, we are - * already in the same read/write domain and can skip the operation, - * without having to further check the requested write_domain. - */ - if (READ_ONCE(obj->write_domain) == read_domains) { - err = 0; - goto out; - } - /* * Try to flush the object off the GPU without holding the lock. * We will repeat the flush holding the lock in the normal manner @@ -657,6 +642,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (err) goto out; + /* + * Already in the desired write domain? Nothing for us to do! + * + * We apply a little bit of cunning here to catch a broader set of + * no-ops. If obj->write_domain is set, we must be in the same + * obj->read_domains, and only that domain. Therefore, if that + * obj->write_domain matches the request read_domains, we are + * already in the same read/write domain and can skip the operation, + * without having to further check the requested write_domain. + */ + if (READ_ONCE(obj->write_domain) == read_domains) + goto out_unpin; + err = i915_gem_object_lock_interruptible(obj); if (err) goto out_unpin; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index bd4e41380777..198a91c76531 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -367,7 +367,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, return true; if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && - (vma->node.start + vma->node.size - 1) >> 32) + (vma->node.start + vma->node.size + 4095) >> 32) return true; if (flags & __EXEC_OBJECT_NEEDS_MAP && @@ -931,11 +931,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) static void reloc_gpu_flush(struct reloc_cache *cache) { - GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); + struct drm_i915_gem_object *obj = cache->rq->batch->obj; + + GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); - i915_gem_object_unpin_map(cache->rq->batch->obj); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(cache->rq->engine->gt); @@ -1161,6 +1163,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto out_pool; } + memset32(cmd, 0, pool->obj->base.size / sizeof(u32)); + batch = i915_vma_instance(pool->obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 4c4954e8ce0a..3f875aebbd23 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -36,7 +36,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned int max_segment = i915_sg_segment_size(); unsigned int sg_page_sizes; - struct pagevec pvec; gfp_t noreclaim; int ret; @@ -188,13 +187,17 @@ err_sg: sg_mark_end(sg); err_pages: mapping_clear_unevictable(mapping); - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, st) { - if (!pagevec_add(&pvec, page)) + if (sg != st->sgl) { + struct pagevec pvec; + + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, st) { + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) check_release_pagevec(&pvec); } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); sg_free_table(st); kfree(st); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 968d9b2705d0..6d0cc90401c0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -619,6 +619,14 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + /* + * Using __get_user_pages_fast() with a read-only + * access is questionable. A read-only page may be + * COW-broken, and then this might end up giving + * the wrong side of the COW.. + * + * We may or may not care. + */ if (pvec) /* defer to worker if malloc fails */ pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 4ce8626b140e..8073758d1036 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -354,7 +354,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) * instances. */ if ((INTEL_GEN(i915) >= 11 && - RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) || + (RUNTIME_INFO(i915)->vdbox_sfc_access & + BIT(engine->instance))) || (INTEL_GEN(i915) >= 9 && engine->instance == 0)) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 66f6d1a897f2..398068b17389 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1574,6 +1574,9 @@ static void process_csb(struct intel_engine_cs *engine) if (!inject_preempt_hang(execlists)) ring_set_paused(engine, 0); + /* XXX Magic delay for tgl */ + ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); + WRITE_ONCE(execlists->pending[0], NULL); break; @@ -2242,6 +2245,9 @@ err: static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) { i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); + + /* Called on error unwind, clear all flags to prevent further use */ + memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx)); } typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); @@ -3751,6 +3757,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_execlists(&ve->base); + ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ ve->base.cops = &virtual_context_ops; ve->base.request_alloc = execlists_request_alloc; diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index cea184a7dde9..e97a2aa31485 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -130,7 +130,19 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = { GEN9_MOCS_ENTRIES, MOCS_ENTRY(I915_MOCS_CACHED, LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), - L3_3_WB) + L3_3_WB), + + /* + * mocs:63 + * - used by the L3 for all of its evictions. + * Thus it is expected to allow LLC cacheability to enable coherent + * flows to be maintained. + * - used to force L3 uncachable cycles. + * Thus it is expected to make the surface L3 uncacheable. + */ + MOCS_ENTRY(63, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC) }; /* NOTE: the LE_TGT_CACHE is not used on Broxton */ diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index fc29a3705354..56cd14cacf5e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -963,18 +963,6 @@ static int cmd_handler_lri(struct parser_exec_state *s) int i, ret = 0; int cmd_len = cmd_length(s); struct intel_gvt *gvt = s->vgpu->gvt; - u32 valid_len = CMD_LEN(1); - - /* - * Official intel docs are somewhat sloppy , check the definition of - * MI_LOAD_REGISTER_IMM. - */ - #define MAX_VALID_LEN 127 - if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) { - gvt_err("len is not valid: len=%u valid_len=%u\n", - cmd_len, valid_len); - return -EFAULT; - } for (i = 1; i < cmd_len; i += 2) { if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index a62bdf9be682..21a562c2b1f5 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -172,21 +172,176 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) int pipe; if (IS_BROXTON(dev_priv)) { + enum transcoder trans; + enum port port; + + /* Clear PIPE, DDI, PHY, HPD before setting new */ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | BXT_DE_PORT_HP_DDIB | BXT_DE_PORT_HP_DDIC); + for_each_pipe(dev_priv, pipe) { + vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= + ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; + } + + for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) { + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE); + } + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + + for (port = PORT_A; port <= PORT_C; port++) { + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |= + (BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &= + ~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &= + ~(DDI_INIT_DISPLAY_DETECTED | + DDI_BUF_CTL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE; + } + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= + ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK); + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= + ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK); + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= + ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK); + /* No hpd_invert set in vgpu vbt, need to clear invert mask */ + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK; + + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30); + + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED; + + /* + * Only 1 PIPE enabled in current vGPU display and PIPE_A is + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, + * TRANSCODER_A can be enabled. PORT_x depends on the input of + * setup_virtual_dp_monitor. + */ + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE; + + /* + * Golden M/N are calculated based on: + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), + * DP link clk 1620 MHz and non-constant_n. + * TODO: calculate DP link symbol clk and stream clk m/n. + */ + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; + + /* Enable per-DDI/PORT vreg */ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= + (DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTA_HOTPLUG_ENABLE; vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIA; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= + DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTB_HOTPLUG_ENABLE; vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIB; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= + DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTC_HOTPLUG_ENABLE; vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIC; } @@ -207,14 +362,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG2); - vgpu_vreg_t(vgpu, LCPLL1_CTL) |= - LCPLL_PLL_ENABLE | - LCPLL_PLL_LOCK; - vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; - + /* + * Only 1 PIPE enabled in current vGPU display and PIPE_A is + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, + * TRANSCODER_A can be enabled. PORT_x depends on the input of + * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x + * so we fixed to DPLL0 here. + * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode + */ + vgpu_vreg_t(vgpu, DPLL_CTRL1) = + DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, DPLL_CTRL1) |= + DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, LCPLL1_CTL) = + LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; + vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0); + /* + * Golden M/N are calculated based on: + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), + * DP link clk 1620 MHz and non-constant_n. + * TODO: calculate DP link symbol clk and stream clk m/n. + */ + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | @@ -235,6 +417,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | @@ -255,6 +443,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | @@ -472,6 +666,63 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTD_HOTPLUG_STATUS_MASK; intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); + } else if (IS_BROXTON(dev_priv)) { + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + if (connected) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIA; + } else { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~BXT_DE_PORT_HP_DDIA; + } + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= + BXT_DE_PORT_HP_DDIA; + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= + ~PORTA_HOTPLUG_STATUS_MASK; + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTA_HOTPLUG_LONG_DETECT; + intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + if (connected) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIB; + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= + SFUSE_STRAP_DDIB_DETECTED; + } else { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~BXT_DE_PORT_HP_DDIB; + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= + ~SFUSE_STRAP_DDIB_DETECTED; + } + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= + BXT_DE_PORT_HP_DDIB; + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= + ~PORTB_HOTPLUG_STATUS_MASK; + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTB_HOTPLUG_LONG_DETECT; + intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + if (connected) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIC; + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= + SFUSE_STRAP_DDIC_DETECTED; + } else { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~BXT_DE_PORT_HP_DDIC; + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= + ~SFUSE_STRAP_DDIC_DETECTED; + } + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= + BXT_DE_PORT_HP_DDIC; + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= + ~PORTC_HOTPLUG_STATUS_MASK; + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTC_HOTPLUG_LONG_DETECT; + intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG); + } } } diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 8f37eefa0a02..a738c7e456dd 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -128,7 +128,7 @@ static bool intel_get_gvt_attrs(struct attribute ***type_attrs, return true; } -static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) +static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) { int i, j; struct intel_vgpu_type *type; @@ -146,7 +146,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) gvt_vgpu_type_groups[i] = group; } - return true; + return 0; unwind: for (j = 0; j < i; j++) { @@ -154,7 +154,7 @@ unwind: kfree(group); } - return false; + return -ENOMEM; } static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) @@ -362,7 +362,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) goto out_clean_thread; ret = intel_gvt_init_vgpu_type_groups(gvt); - if (ret == false) { + if (ret) { gvt_err("failed to init vgpu type groups: %d\n", ret); goto out_clean_types; } diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 25f78196b964..245c20d36f1b 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1632,6 +1632,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } +/** + * FixMe: + * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did: + * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.) + * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing + * these MI_BATCH_BUFFER. + * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT + * PML4 PTE: PAT(0) PCD(1) PWT(1). + * The performance is still expected to be low, will need further improvement. + */ +static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u64 pat = + GEN8_PPAT(0, CHV_PPAT_SNOOP) | + GEN8_PPAT(1, 0) | + GEN8_PPAT(2, 0) | + GEN8_PPAT(3, CHV_PPAT_SNOOP) | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | + GEN8_PPAT(7, CHV_PPAT_SNOOP); + + vgpu_vreg(vgpu, offset) = lower_32_bits(pat); + + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2778,7 +2806,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); + MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); MMIO_D(GAMTARBMODE, D_BDW_PLUS); @@ -3103,8 +3131,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAMT_CHKN_BIT_REG, D_KBL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL); + MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL); + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); return 0; } @@ -3278,9 +3306,17 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); MMIO_D(GEN6_GFXPAUSE, D_BXT); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_F(HSW_CS_GPR(0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); + MMIO_F(_MMIO(0x12600), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); + MMIO_F(BCS_GPR(0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); + MMIO_F(_MMIO(0x1a600), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index a55178884d67..e0e7adc545a5 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -271,6 +271,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; + vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= + SKL_FUSE_DOWNLOAD_STATUS | + SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG2); } } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 6c79d16b381e..058dcd541644 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -374,7 +374,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); - + /* skip now as current i915 ppgtt alloc won't allocate + top level pdp for non 4-level table, won't impact + shadow ppgtt. */ + if (!pd) + break; px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 32e57635709a..4deb7fec5eb5 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -432,8 +432,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - /*TODO: add more platforms support */ - if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) + if (IS_BROADWELL(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv)) + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); + else ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index f24096e27bef..a9a69760c18d 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -572,6 +572,9 @@ struct drm_i915_reg_descriptor { #define REG32(_reg, ...) \ { .addr = (_reg), __VA_ARGS__ } +#define REG32_IDX(_reg, idx) \ + { .addr = _reg(idx) } + /* * Convenience macro for adding 64-bit registers. * @@ -669,6 +672,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), REG32(BCS_SWCTRL), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), REG64_IDX(BCS_GPR, 0), REG64_IDX(BCS_GPR, 1), REG64_IDX(BCS_GPR, 2), @@ -1203,6 +1207,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, return dst; } +static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc, + const u32 cmd) +{ + return desc->cmd.value == (cmd & desc->cmd.mask); +} + static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, const u32 *cmd, u32 length) @@ -1241,19 +1251,19 @@ static bool check_cmd(const struct intel_engine_cs *engine, * allowed mask/value pair given in the whitelist entry. */ if (reg->mask) { - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { + if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) { DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", reg_addr); return false; } - if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) { DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n", reg_addr); return false; } - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && + if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) && (offset + 2 > length || (cmd[offset + 1] & reg->mask) != reg->value)) { DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n", @@ -1484,7 +1494,7 @@ int intel_engine_cmd_parser(struct i915_gem_context *ctx, goto err; } - if (desc->cmd.value == MI_BATCH_BUFFER_START) { + if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) { ret = check_bbstart(ctx, cmd, offset, length, batch_len, batch_start, shadow_batch_start); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 89b6112bd66b..00335a1c02b0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -33,6 +33,8 @@ #include <uapi/drm/i915_drm.h> #include <uapi/drm/drm_fourcc.h> +#include <asm/hypervisor.h> + #include <linux/io-mapping.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> @@ -1892,7 +1894,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) { const unsigned int pi = __platform_mask_index(info, p); - return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; + return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1); } static __always_inline bool @@ -2197,7 +2199,9 @@ static inline bool intel_vtd_active(void) if (intel_iommu_gfx_mapped) return true; #endif - return false; + + /* Running as a guest, we assume the host is enforcing VT'd */ + return !hypervisor_is_type(X86_HYPER_NATIVE); } static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index fe9edbba997c..b9af329eb3a3 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -307,6 +307,8 @@ static int compress_page(struct compress *c, if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) return -EIO; + + cond_resched(); } while (zstream->avail_in); /* Fallback to uncompressed if we increase size? */ @@ -392,6 +394,7 @@ static int compress_page(struct compress *c, if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE)) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; + cond_resched(); return 0; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 37e3dd3c1a9d..4193a9970251 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3500,6 +3500,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) val = I915_READ(GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; + val |= ~enabled_irqs & hotplug_irqs; I915_WRITE(GEN11_DE_HPD_IMR, val); POSTING_READ(GEN11_DE_HPD_IMR); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 0d39038898d4..49d498882cf6 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -894,8 +894,10 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) GEM_BUG_ON(to == from); GEM_BUG_ON(to->timeline == from->timeline); - if (i915_request_completed(from)) + if (i915_request_completed(from)) { + i915_sw_fence_set_error_once(&to->submit, from->fence.error); return 0; + } if (to->engine->schedule) { ret = i915_sched_node_add_dependency(&to->sched, &from->sched); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6a88db291252..b3fd6ff665da 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -158,9 +158,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, do { list_for_each_entry_safe(pos, next, &x->head, entry) { - pos->func(pos, - TASK_NORMAL, fence->error, - &extra); + int wake_flags; + + wake_flags = fence->error; + if (pos->func == autoremove_wake_function) + wake_flags = 0; + + pos->func(pos, TASK_NORMAL, wake_flags, &extra); } if (list_empty(&extra)) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3ccfc025fde2..91afeca0c6f8 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2966,7 +2966,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv) static void intel_print_wm_latency(struct drm_i915_private *dev_priv, const char *name, - const u16 wm[8]) + const u16 wm[]) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -4784,7 +4784,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, * WaIncreaseLatencyIPCEnabled: kbl,cfl * Display WA #1141: kbl,cfl */ - if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) || + if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && dev_priv->ipc_enabled) latency += 4; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9e583f13a9e4..62f8a47fda45 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1124,6 +1124,18 @@ unclaimed_reg_debug(struct intel_uncore *uncore, spin_unlock(&uncore->debug->lock); } +#define __vgpu_read(x) \ +static u##x \ +vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ + u##x val = __raw_uncore_read##x(uncore, reg); \ + trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ + return val; \ +} +__vgpu_read(8) +__vgpu_read(16) +__vgpu_read(32) +__vgpu_read(64) + #define GEN2_READ_HEADER(x) \ u##x val = 0; \ assert_rpm_wakelock_held(uncore->rpm); @@ -1327,6 +1339,16 @@ __gen_reg_write_funcs(gen8); #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER +#define __vgpu_write(x) \ +static void \ +vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ + trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ + __raw_uncore_write##x(uncore, reg, val); \ +} +__vgpu_write(8) +__vgpu_write(16) +__vgpu_write(32) + #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ do { \ (uncore)->funcs.mmio_writeb = x##_write8; \ @@ -1647,7 +1669,10 @@ static void uncore_raw_init(struct intel_uncore *uncore) { GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); - if (IS_GEN(uncore->i915, 5)) { + if (intel_vgpu_active(uncore->i915)) { + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu); + } else if (IS_GEN(uncore->i915, 5)) { ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); } else { diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index f22cfbf9353e..2e12a4a3bfa1 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -212,9 +212,8 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, if (!pdev->dev.of_node) return -ENODEV; - hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) - return -ENOMEM; + hdmi = dev_get_drvdata(dev); + memset(hdmi, 0, sizeof(*hdmi)); match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node); plat_data = match->data; @@ -239,8 +238,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); - platform_set_drvdata(pdev, hdmi); - hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data); /* @@ -270,6 +267,14 @@ static const struct component_ops dw_hdmi_imx_ops = { static int dw_hdmi_imx_probe(struct platform_device *pdev) { + struct imx_hdmi *hdmi; + + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + platform_set_drvdata(pdev, hdmi); + return component_add(&pdev->dev, &dw_hdmi_imx_ops); } diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index da87c70e413b..881c36d0f16b 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -281,9 +281,10 @@ static void imx_drm_unbind(struct device *dev) drm_kms_helper_poll_fini(drm); + component_unbind_all(drm->dev, drm); + drm_mode_config_cleanup(drm); - component_unbind_all(drm->dev, drm); dev_set_drvdata(dev, NULL); drm_dev_put(drm); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 695f307f36b2..116473c2360a 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); + return; + } + drm_panel_prepare(imx_ldb_ch->panel); if (dual) { @@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder, int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); u32 bus_format = imx_ldb_ch->bus_format; + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); + return; + } + if (mode->clock > 170000) { dev_warn(ldb->dev, "%s: mode exceeds 170 MHz pixel clock\n", __func__); @@ -302,18 +312,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct imx_ldb *ldb = imx_ldb_ch->ldb; + int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux, ret; drm_panel_disable(imx_ldb_ch->panel); - if (imx_ldb_ch == &ldb->channel[0]) + if (imx_ldb_ch == &ldb->channel[0] || dual) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; - else if (imx_ldb_ch == &ldb->channel[1]) + if (imx_ldb_ch == &ldb->channel[1] || dual) ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); - if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { + if (dual) { clk_disable_unprepare(ldb->clk[0]); clk_disable_unprepare(ldb->clk[1]); } @@ -593,9 +604,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) int ret; int i; - imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL); - if (!imx_ldb) - return -ENOMEM; + imx_ldb = dev_get_drvdata(dev); + memset(imx_ldb, 0, sizeof(*imx_ldb)); imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); if (IS_ERR(imx_ldb->regmap)) { @@ -703,8 +713,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) } } - dev_set_drvdata(dev, imx_ldb); - return 0; free_child: @@ -736,6 +744,14 @@ static const struct component_ops imx_ldb_ops = { static int imx_ldb_probe(struct platform_device *pdev) { + struct imx_ldb *imx_ldb; + + imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL); + if (!imx_ldb) + return -ENOMEM; + + platform_set_drvdata(pdev, imx_ldb); + return component_add(&pdev->dev, &imx_ldb_ops); } diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 5bbfaa2cd0f4..f91c3eb7697b 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -494,6 +494,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) return 0; } +static void imx_tve_disable_regulator(void *data) +{ + struct imx_tve *tve = data; + + regulator_disable(tve->dac_reg); +} + static bool imx_tve_readable_reg(struct device *dev, unsigned int reg) { return (reg % 4 == 0) && (reg <= 0xdc); @@ -546,9 +553,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) int irq; int ret; - tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL); - if (!tve) - return -ENOMEM; + tve = dev_get_drvdata(dev); + memset(tve, 0, sizeof(*tve)); tve->dev = dev; spin_lock_init(&tve->lock); @@ -618,6 +624,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) ret = regulator_enable(tve->dac_reg); if (ret) return ret; + ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve); + if (ret) + return ret; } tve->clk = devm_clk_get(dev, "tve"); @@ -659,27 +668,23 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - dev_set_drvdata(dev, tve); - return 0; } -static void imx_tve_unbind(struct device *dev, struct device *master, - void *data) -{ - struct imx_tve *tve = dev_get_drvdata(dev); - - if (!IS_ERR(tve->dac_reg)) - regulator_disable(tve->dac_reg); -} - static const struct component_ops imx_tve_ops = { .bind = imx_tve_bind, - .unbind = imx_tve_unbind, }; static int imx_tve_probe(struct platform_device *pdev) { + struct imx_tve *tve; + + tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL); + if (!tve) + return -ENOMEM; + + platform_set_drvdata(pdev, tve); + return component_add(&pdev->dev, &imx_tve_ops); } diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 63c0284f8b3c..2256c9789fc2 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -438,21 +438,13 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data) struct ipu_client_platformdata *pdata = dev->platform_data; struct drm_device *drm = data; struct ipu_crtc *ipu_crtc; - int ret; - ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL); - if (!ipu_crtc) - return -ENOMEM; + ipu_crtc = dev_get_drvdata(dev); + memset(ipu_crtc, 0, sizeof(*ipu_crtc)); ipu_crtc->dev = dev; - ret = ipu_crtc_init(ipu_crtc, pdata, drm); - if (ret) - return ret; - - dev_set_drvdata(dev, ipu_crtc); - - return 0; + return ipu_crtc_init(ipu_crtc, pdata, drm); } static void ipu_drm_unbind(struct device *dev, struct device *master, @@ -474,6 +466,7 @@ static const struct component_ops ipu_crtc_ops = { static int ipu_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct ipu_crtc *ipu_crtc; int ret; if (!dev->platform_data) @@ -483,6 +476,12 @@ static int ipu_drm_probe(struct platform_device *pdev) if (ret) return ret; + ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL); + if (!ipu_crtc) + return -ENOMEM; + + dev_set_drvdata(dev, ipu_crtc); + return component_add(dev, &ipu_crtc_ops); } diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index e7ce17503ae1..be55548f352a 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -204,9 +204,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) u32 bus_format = 0; const char *fmt; - imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); - if (!imxpd) - return -ENOMEM; + imxpd = dev_get_drvdata(dev); + memset(imxpd, 0, sizeof(*imxpd)); edidp = of_get_property(np, "edid", &imxpd->edid_len); if (edidp) @@ -236,8 +235,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - dev_set_drvdata(dev, imxpd); - return 0; } @@ -259,6 +256,14 @@ static const struct component_ops imx_pd_ops = { static int imx_pd_probe(struct platform_device *pdev) { + struct imx_parallel_display *imxpd; + + imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL); + if (!imxpd) + return -ENOMEM; + + platform_set_drvdata(pdev, imxpd); + return component_add(&pdev->dev, &imx_pd_ops); } diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index f156f245fdec..7e6179fe63f8 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -375,9 +375,9 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, if (state && state->fb) { addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); - width = state->crtc->state->adjusted_mode.hdisplay; - height = state->crtc->state->adjusted_mode.vdisplay; - cpp = state->fb->format->cpp[plane->index]; + width = state->src_w >> 16; + height = state->src_h >> 16; + cpp = state->fb->format->cpp[0]; priv->dma_hwdesc->addr = addr; priv->dma_hwdesc->cmd = width * height * cpp / 4; @@ -467,7 +467,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) { - struct ingenic_drm *priv = arg; + struct ingenic_drm *priv = drm_device_get_priv(arg); unsigned int state; regmap_read(priv->map, JZ_REG_LCD_STATE, &state); @@ -824,6 +824,7 @@ static const struct of_device_id ingenic_drm_of_match[] = { { .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, ingenic_drm_of_match); static struct platform_driver ingenic_drm_driver = { .driver = { diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 751454ae3cd1..28ed50628501 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -946,6 +946,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe) struct drm_crtc *crtc = &pipe->crtc; struct drm_device *drm = crtc->dev; struct mcde *mcde = drm->dev_private; + struct drm_pending_vblank_event *event; if (mcde->te_sync) drm_crtc_vblank_off(crtc); @@ -953,6 +954,15 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe) /* Disable FIFO A flow */ mcde_disable_fifo(mcde, MCDE_FIFO_A, true); + event = crtc->state->event; + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } + dev_info(drm->dev, "MCDE display is disabled\n"); } diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 5649887d2b90..82946ffcb6d2 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -215,7 +215,6 @@ static int mcde_modeset_init(struct drm_device *drm) drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); - drm_fbdev_generic_setup(drm, 32); return 0; @@ -282,6 +281,8 @@ static int mcde_drm_bind(struct device *dev) if (ret < 0) goto unbind; + drm_fbdev_generic_setup(drm, 32); + return 0; unbind: @@ -409,8 +410,8 @@ static int mcde_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { - ret = -EINVAL; + if (irq < 0) { + ret = irq; goto clk_disable; } diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 35bb825d1918..8c8c92fc82e9 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -940,10 +940,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, panel = NULL; bridge = of_drm_find_bridge(child); - if (IS_ERR(bridge)) { - dev_err(dev, "failed to find bridge (%ld)\n", - PTR_ERR(bridge)); - return PTR_ERR(bridge); + if (!bridge) { + dev_err(dev, "failed to find bridge\n"); + return -EINVAL; } } } diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index be6d95c5ff25..48de07e9059e 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -10,7 +10,9 @@ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/of_graph.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/types.h> @@ -73,6 +75,9 @@ struct mtk_dpi { enum mtk_dpi_out_yc_map yc_map; enum mtk_dpi_out_bit_num bit_num; enum mtk_dpi_out_channel_swap channel_swap; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_gpio; + struct pinctrl_state *pins_dpi; int refcount; }; @@ -378,6 +383,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) if (--dpi->refcount != 0) return; + if (dpi->pinctrl && dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); + mtk_dpi_disable(dpi); clk_disable_unprepare(dpi->pixel_clk); clk_disable_unprepare(dpi->engine_clk); @@ -402,6 +410,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) goto err_pixel; } + if (dpi->pinctrl && dpi->pins_dpi) + pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); + mtk_dpi_enable(dpi); return 0; @@ -689,6 +700,26 @@ static int mtk_dpi_probe(struct platform_device *pdev) dpi->dev = dev; dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev); + dpi->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(dpi->pinctrl)) { + dpi->pinctrl = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl!\n"); + } + if (dpi->pinctrl) { + dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep"); + if (IS_ERR(dpi->pins_gpio)) { + dpi->pins_gpio = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n"); + } + if (dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); + + dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default"); + if (IS_ERR(dpi->pins_dpi)) { + dpi->pins_dpi = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n"); + } + } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); dpi->regs = devm_ioremap_resource(dev, mem); if (IS_ERR(dpi->regs)) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 352b81a7a670..f98bb2e26372 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -594,8 +594,13 @@ err_pm: pm_runtime_disable(dev); err_node: of_node_put(private->mutex_node); - for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) + for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) { of_node_put(private->comp_node[i]); + if (private->ddp_comp[i]) { + put_device(private->ddp_comp[i]->larb_dev); + private->ddp_comp[i] = NULL; + } + } return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 584a9ecadce6..b7592b16ea94 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -101,6 +101,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, true, true); } +static void mtk_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + + state->pending.enable = false; + wmb(); /* Make sure the above parameter is set before update */ + state->pending.dirty = true; +} + static void mtk_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -115,6 +125,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return; + if (!plane->state->visible) { + mtk_plane_atomic_disable(plane, old_state); + return; + } + gem = fb->obj[0]; mtk_gem = to_mtk_gem_obj(gem); addr = mtk_gem->dma_addr; @@ -136,16 +151,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, state->pending.dirty = true; } -static void mtk_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - - state->pending.enable = false; - wmb(); /* Make sure the above parameter is set before update */ - state->pending.dirty = true; -} - static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mtk_plane_atomic_check, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index ce91b61364eb..6b22fd63c3f5 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1482,25 +1482,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to get system configuration registers: %d\n", ret); - return ret; + goto put_device; } hdmi->sys_regmap = regmap; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(hdmi->regs)) - return PTR_ERR(hdmi->regs); + if (IS_ERR(hdmi->regs)) { + ret = PTR_ERR(hdmi->regs); + goto put_device; + } remote = of_graph_get_remote_node(np, 1, 0); - if (!remote) - return -EINVAL; + if (!remote) { + ret = -EINVAL; + goto put_device; + } if (!of_device_is_compatible(remote, "hdmi-connector")) { hdmi->next_bridge = of_drm_find_bridge(remote); if (!hdmi->next_bridge) { dev_err(dev, "Waiting for external bridge\n"); of_node_put(remote); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto put_device; } } @@ -1509,7 +1514,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n", remote); of_node_put(remote); - return -EINVAL; + ret = -EINVAL; + goto put_device; } of_node_put(remote); @@ -1517,10 +1523,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); - return -EINVAL; + ret = -EINVAL; + goto put_device; } return 0; +put_device: + put_device(hdmi->cec_dev); + return ret; } /* diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c index 5223498502c4..23a74eb5d7f8 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c @@ -84,8 +84,9 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy) hdmi_phy->conf->hdmi_phy_disable_tmds) return &mtk_hdmi_phy_dev_ops; - dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n"); - return NULL; + if (hdmi_phy) + dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n"); + return NULL; } static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy, diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index a24f8dec5adc..86d096111277 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -420,6 +420,16 @@ static int meson_probe_remote(struct platform_device *pdev, return count; } +static void meson_drv_shutdown(struct platform_device *pdev) +{ + struct meson_drm *priv = dev_get_drvdata(&pdev->dev); + struct drm_device *drm = priv->drm; + + DRM_DEBUG_DRIVER("\n"); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); +} + static int meson_drv_probe(struct platform_device *pdev) { struct component_match *match = NULL; @@ -469,6 +479,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, + .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 1f83bc18d500..80f3b1da9fc2 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* NOTE: PM4/micro-engine firmware registers look to be the same * for a2xx and a3xx.. we could possibly push that part down to * adreno_gpu base class. Or push both PM4 and PFP but diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 5f7e98028eaf..eeba2deeca1e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -215,6 +215,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* setup access protection: */ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index ab2b752566d8..05cfa81d4c54 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -265,6 +265,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_A4XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* Load PM4: */ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 99cd6e62a971..e3579e5ffa14 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -581,8 +581,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); - /* Enable USE_RETENTION_FLOPS */ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); @@ -677,14 +675,21 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; - a5xx_preempt_hw_init(gpu); - a5xx_gpmu_ucode_init(gpu); ret = a5xx_ucode_init(gpu); if (ret) return ret; + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI, + gpu->rb[0]->iova); + + gpu_write(gpu, REG_A5XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + a5xx_preempt_hw_init(gpu); + /* Disable the interrupts through the initial bringup stage */ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); @@ -1126,8 +1131,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { - *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO, - REG_A5XX_RBBM_PERFCTR_CP_0_HI); + *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO, + REG_A5XX_RBBM_ALWAYSON_COUNTER_HI); return 0; } @@ -1359,6 +1364,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) { u64 busy_cycles, busy_time; + /* Only read the gpu busy if the hardware is already active */ + if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0) + return 0; + busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI); @@ -1367,6 +1376,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) gpu->devfreq.busy_cycles = busy_cycles; + pm_runtime_put(&gpu->pdev->dev); + if (WARN_ON(busy_time > ~0LU)) return ~0LU; @@ -1401,18 +1412,31 @@ static const struct adreno_gpu_funcs funcs = { static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; - u32 bin, val; + u32 val; + + /* + * If the OPP table specifies a opp-supported-hw property then we have + * to set something with dev_pm_opp_set_supported_hw() or the table + * doesn't get populated so pick an arbitrary value that should + * ensure the default frequencies are selected but not conflict with any + * actual bins + */ + val = 0x80; cell = nvmem_cell_get(dev, "speed_bin"); - /* If a nvmem cell isn't defined, nothing to do */ - if (IS_ERR(cell)) - return; + if (!IS_ERR(cell)) { + void *buf = nvmem_cell_read(cell, NULL); - bin = *((u32 *) nvmem_cell_read(cell, NULL)); - nvmem_cell_put(cell); + if (!IS_ERR(buf)) { + u8 bin = *((u8 *) buf); - val = (1 << bin); + val = (1 << bin); + kfree(buf); + } + + nvmem_cell_put(cell); + } dev_pm_opp_set_supported_hw(dev, &val, 1); } @@ -1445,7 +1469,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) check_speed_bin(&pdev->dev); - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); + /* Restricting nr_rings to 1 to temporarily disable preemption */ + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index a3a06db675ba..ee3ff32da004 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -300,7 +300,7 @@ int a5xx_power_init(struct msm_gpu *gpu) /* Set up the limits management */ if (adreno_is_a530(adreno_gpu)) a530_lm_setup(gpu); - else + else if (adreno_is_a540(adreno_gpu)) a540_lm_setup(gpu); /* Set up SP/TP power collpase */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 85f14feafdec..40431a09dc97 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -107,6 +107,13 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) struct msm_gpu *gpu = &adreno_gpu->base; int ret; + /* + * This can get called from devfreq while the hardware is idle. Don't + * bring up the power if it isn't already active + */ + if (pm_runtime_get_if_in_use(gmu->dev) == 0) + return; + gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, @@ -133,6 +140,7 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) * for now leave it at max so that the performance is nominal. */ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); + pm_runtime_put(gmu->dev); } void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) @@ -191,12 +199,22 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) { int ret; u32 val; + u32 mask, reset_val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, - val == 0xbabeface, 100, 10000); + (val & mask) == reset_val, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); @@ -705,10 +723,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); + /* + * "enable" the GX power domain which won't actually do anything but it + * will make sure that the refcounting is correct in case we need to + * bring down the GX after a GMU failure + */ + if (!IS_ERR_OR_NULL(gmu->gxpd)) + pm_runtime_get_sync(gmu->gxpd); + /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); if (ret) { + pm_runtime_put(gmu->gxpd); pm_runtime_put(gmu->dev); return ret; } @@ -744,19 +771,12 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Set the GPU to the highest power frequency */ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); - /* - * "enable" the GX power domain which won't actually do anything but it - * will make sure that the refcounting is correct in case we need to - * bring down the GX after a GMU failure - */ - if (!IS_ERR_OR_NULL(gmu->gxpd)) - pm_runtime_get(gmu->gxpd); - out: /* On failure, shut down the GMU to leave it in a good state */ if (ret) { disable_irq(gmu->gmu_irq); a6xx_rpmh_stop(gmu); + pm_runtime_put(gmu->gxpd); pm_runtime_put(gmu->dev); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 686c34d706b0..df2656e57991 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -512,6 +512,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu) if (ret) goto out; + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI, + gpu->rb[0]->iova); + + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; @@ -766,8 +773,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) /* Force the GPU power on so we can read this register */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); - *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO, - REG_A6XX_RBBM_PERFCTR_CP_0_HI); + *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, + REG_A6XX_CP_ALWAYS_ON_COUNTER_HI); a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); return 0; @@ -803,6 +810,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); u64 busy_cycles, busy_time; + + /* Only read the gpu busy if the hardware is already active */ + if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0) + return 0; + busy_cycles = gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); @@ -812,6 +824,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) gpu->devfreq.busy_cycles = busy_cycles; + pm_runtime_put(a6xx_gpu->gmu.dev); + if (WARN_ON(busy_time > ~0LU)) return ~0LU; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 691c1a277d91..dfcbb2b7cdda 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -834,7 +834,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu, int i; a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count, - sizeof(a6xx_state->indexed_regs)); + sizeof(*a6xx_state->indexed_regs)); if (!a6xx_state->indexed_regs) return; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 048c8be426f3..3802ad38c519 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -350,30 +350,10 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno: */ - ring->memptrs->fence = ring->seqno; + ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; } - /* - * Setup REG_CP_RB_CNTL. The same value is used across targets (with - * the excpetion of A430 that disables the RPTR shadow) - the cacluation - * for the ringbuffer size and block size is moved to msm_gpu.h for the - * pre-processor to deal with and the A430 variant is ORed in here - */ - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, - MSM_GPU_RB_CNTL_DEFAULT | - (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); - - /* Setup ringbuffer address - use ringbuffer[0] for GPU init */ - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, - REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); - - if (!adreno_is_a430(adreno_gpu)) { - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, - rbmemptr(gpu->rb[0], rptr)); - } - return 0; } @@ -381,11 +361,8 @@ int adreno_hw_init(struct msm_gpu *gpu) static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, struct msm_ringbuffer *ring) { - if (adreno_is_a430(adreno_gpu)) - return ring->memptrs->rptr = adreno_gpu_read( - adreno_gpu, REG_ADRENO_CP_RB_RPTR); - else - return ring->memptrs->rptr; + return ring->memptrs->rptr = adreno_gpu_read( + adreno_gpu, REG_ADRENO_CP_RB_RPTR); } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index ce59adff06aa..4aed5e9a84a4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -381,7 +381,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); if (!fevent) { - DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); + DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); return; } @@ -819,7 +819,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, struct drm_plane *plane; struct drm_display_mode *mode; - int cnt = 0, rc = 0, mixer_width, i, z_pos; + int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; int multirect_count = 0; @@ -852,9 +852,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, memset(pipe_staged, 0, sizeof(pipe_staged)); - mixer_width = mode->hdisplay / cstate->num_mixers; + if (cstate->num_mixers) { + mixer_width = mode->hdisplay / cstate->num_mixers; - _dpu_crtc_setup_lm_bounds(crtc, state); + _dpu_crtc_setup_lm_bounds(crtc, state); + } crtc_rect.x2 = mode->hdisplay; crtc_rect.y2 = mode->vdisplay; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index d82ea994063f..99d449ce4a07 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2185,7 +2185,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, dpu_enc = to_dpu_encoder_virt(enc); - mutex_init(&dpu_enc->enc_lock); ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); if (ret) goto fail; @@ -2200,7 +2199,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, 0); - mutex_init(&dpu_enc->rc_lock); INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, dpu_encoder_off_work); dpu_enc->idle_timeout = IDLE_TIMEOUT; @@ -2232,7 +2230,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); if (!dpu_enc) - return ERR_PTR(ENOMEM); + return ERR_PTR(-ENOMEM); rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, drm_enc_mode, NULL); @@ -2245,6 +2243,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, spin_lock_init(&dpu_enc->enc_spinlock); dpu_enc->enabled = false; + mutex_init(&dpu_enc->enc_lock); + mutex_init(&dpu_enc->rc_lock); return &dpu_enc->base; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 58d5acbcfc5c..b984bafd27e2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -853,9 +853,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); - min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, - pdpu->pipe_sblk->maxupscale << 16, + pdpu->pipe_sblk->maxdwnscale << 16, true, true); if (ret) { DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c index eeef41fcd4e1..0425400f44db 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -41,7 +41,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, { struct mdp5_kms *mdp5_kms = get_kms(encoder); struct device *dev = encoder->dev->dev; - u32 total_lines_x100, vclks_line, cfg; + u32 total_lines, vclks_line, cfg; long vsync_clk_speed; struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); int pp_id = mixer->pp; @@ -51,8 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, return -EINVAL; } - total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode); - if (!total_lines_x100) { + total_lines = mode->vtotal * drm_mode_vrefresh(mode); + if (!total_lines) { DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", __func__, mode->vtotal, drm_mode_vrefresh(mode)); return -EINVAL; @@ -64,15 +64,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, vsync_clk_speed); return -EINVAL; } - vclks_line = vsync_clk_speed * 100 / total_lines_x100; + vclks_line = vsync_clk_speed / total_lines; cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN; cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line); + /* + * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on + * the vsync_clk equating to roughly half the desired panel refresh rate. + * This is only necessary as stability fallback if interrupts from the + * panel arrive too late or not at all, but is currently used by default + * because these panel interrupts are not wired up yet. + */ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg); mdp5_write(mdp5_kms, - REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0); + REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal)); + mdp5_write(mdp5_kms, REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay); mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 03c6d6157e4d..395146884a22 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1099,7 +1099,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, pp_done); - complete(&mdp5_crtc->pp_completion); + complete_all(&mdp5_crtc->pp_completion); } static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 91cd76a2bab1..77823ccdd0f8 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -1037,7 +1037,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) return 0; fail: - mdp5_destroy(pdev); + if (mdp5_kms) + mdp5_destroy(pdev); return ret; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index 1afb7c579dbb..eca86bf448f7 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .disable = dsi_20nm_phy_disable, .init = msm_dsi_phy_init_common, }, - .io_start = { 0xfd998300, 0xfd9a0300 }, + .io_start = { 0xfd998500, 0xfd9a0500 }, .num_dsi_phy = 2, }; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index aa9385d5bfff..33033b94935e 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -559,6 +559,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; void __iomem *phy_base = pll_10nm->phy_cmn_mmio; u32 val; + int ret; val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); val &= ~0x3; @@ -573,6 +574,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) val |= cached->pll_mux; pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val); + ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate); + if (ret) { + DRM_DEV_ERROR(&pll_10nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + DBG("DSI PLL%d", pll_10nm->id); return 0; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 5ccfad794c6a..561bfa48841c 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, return msm_framebuffer_prepare(new_state->fb, kms->aspace); } +/* + * Helpers to control vblanks while we flush.. basically just to ensure + * that vblank accounting is switched on, so we get valid seqn/timestamp + * on pageflip events (if requested) + */ + +static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_get(crtc); + } +} + +static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_put(crtc); + } +} + static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) { unsigned crtc_mask = BIT(crtc_idx); @@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->enable_commit(kms); + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); @@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) */ kms->pending_crtc_mask &= ~crtc_mask; + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b73fbb65e14b..7443df77cadb 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -432,20 +432,22 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) drm_mode_config_init(ddev); + ret = msm_init_vram(ddev); + if (ret) + goto err_destroy_mdss; + /* Bind all our sub-components: */ ret = component_bind_all(dev, ddev); if (ret) goto err_destroy_mdss; - ret = msm_init_vram(ddev); - if (ret) - goto err_msm_uninit; - if (!dev->dma_parms) { dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; + if (!dev->dma_parms) { + ret = -ENOMEM; + goto err_msm_uninit; + } } dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); @@ -565,6 +567,7 @@ err_free_priv: kfree(priv); err_put_drm_dev: drm_dev_put(ddev); + platform_set_drvdata(pdev, NULL); return ret; } @@ -1321,6 +1324,17 @@ static int msm_pdev_remove(struct platform_device *pdev) return 0; } +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + struct msm_drm_private *priv = drm ? drm->dev_private : NULL; + + if (!priv || !priv->kms) + return; + + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, @@ -1332,6 +1346,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { .name = "msm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index ad2703698b05..cd59a5918038 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, int ret; if (fence > fctx->last_fence) { - DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n", + DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n", fctx->name, fence, fctx->last_fence); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5a6a79fbc9d6..d92a0ffe2a76 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -977,10 +977,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, static int msm_gem_new_impl(struct drm_device *dev, uint32_t size, uint32_t flags, - struct drm_gem_object **obj, - bool struct_mutex_locked) + struct drm_gem_object **obj) { - struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; switch (flags & MSM_BO_CACHE_MASK) { @@ -1006,15 +1004,6 @@ static int msm_gem_new_impl(struct drm_device *dev, INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->vmas); - if (struct_mutex_locked) { - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - } else { - mutex_lock(&dev->struct_mutex); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); - } - *obj = &msm_obj->base; return 0; @@ -1024,6 +1013,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags, bool struct_mutex_locked) { struct msm_drm_private *priv = dev->dev_private; + struct msm_gem_object *msm_obj; struct drm_gem_object *obj = NULL; bool use_vram = false; int ret; @@ -1044,14 +1034,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, if (size == 0) return ERR_PTR(-EINVAL); - ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked); + ret = msm_gem_new_impl(dev, size, flags, &obj); if (ret) goto fail; + msm_obj = to_msm_bo(obj); + if (use_vram) { struct msm_gem_vma *vma; struct page **pages; - struct msm_gem_object *msm_obj = to_msm_bo(obj); mutex_lock(&msm_obj->lock); @@ -1086,6 +1077,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); } + if (struct_mutex_locked) { + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + } else { + mutex_lock(&dev->struct_mutex); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + mutex_unlock(&dev->struct_mutex); + } + return obj; fail: @@ -1108,6 +1108,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt) { + struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; struct drm_gem_object *obj; uint32_t size; @@ -1121,7 +1122,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, size = PAGE_ALIGN(dmabuf->size); - ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false); + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); if (ret) goto fail; @@ -1146,6 +1147,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, } mutex_unlock(&msm_obj->lock); + + mutex_lock(&dev->struct_mutex); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + mutex_unlock(&dev->struct_mutex); + return obj; fail: diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index e397c44cc011..39ecb5a18431 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->id = id; ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, - MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova); + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo, + &ring->iova); if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 001fbf537440..a1d94be7883a 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, queue->flags = flags; if (priv->gpu) { - if (prio >= priv->gpu->nr_rings) + if (prio >= priv->gpu->nr_rings) { + kfree(queue); return -EINVAL; + } queue->prio = prio; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index e8506335cd15..1694a7deb913 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -26,6 +26,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> @@ -87,8 +88,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb) clk_disable_unprepare(mxsfb->clk_axi); } +static struct drm_framebuffer * +mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + const struct drm_format_info *info; + + info = drm_get_format_info(dev, mode_cmd); + if (!info) + return ERR_PTR(-EINVAL); + + if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { + dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n"); + return ERR_PTR(-EINVAL); + } + + return drm_gem_fb_create(dev, file_priv, mode_cmd); +} + static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { - .fb_create = drm_gem_fb_create, + .fb_create = mxsfb_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index d735ea7e2d88..daa79d39201f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -132,7 +132,7 @@ nv50_dmac_destroy(struct nv50_dmac *dmac) int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, - const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf, + const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf, struct nv50_dmac *dmac) { struct nouveau_cli *cli = (void *)device->object.client; @@ -167,7 +167,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, if (ret) return ret; - if (!syncbuf) + if (syncbuf < 0) return 0; ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY, @@ -1032,8 +1032,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port); @@ -2032,8 +2034,10 @@ nv50_disp_atomic_commit(struct drm_device *dev, int ret, i; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index 7c41b0599d1a..284068fa6d00 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -70,7 +70,7 @@ struct nv50_dmac { int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, const s32 *oclass, u8 head, void *data, u32 size, - u64 syncbuf, struct nv50_dmac *dmac); + s64 syncbuf, struct nv50_dmac *dmac); void nv50_dmac_destroy(struct nv50_dmac *); u32 *evo_wait(struct nv50_dmac *, int nr); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index c9692df2b76c..46578108a430 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -83,18 +83,20 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh, { u32 mode = 0x00; - if (asyc->dither.mode == DITHERING_MODE_AUTO) { - if (asyh->base.depth > asyh->or.bpc * 3) - mode = DITHERING_MODE_DYNAMIC2X2; - } else { - mode = asyc->dither.mode; - } + if (asyc->dither.mode) { + if (asyc->dither.mode == DITHERING_MODE_AUTO) { + if (asyh->base.depth > asyh->or.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = asyc->dither.mode; + } - if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { - if (asyh->or.bpc >= 8) - mode |= DITHERING_DEPTH_8BPC; - } else { - mode |= asyc->dither.depth; + if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { + if (asyh->or.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= asyc->dither.depth; + } } asyh->dither.enable = mode; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c index f7dbd965e4e7..b49a212af4d8 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c @@ -68,7 +68,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, int ret; ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, - &oclass, 0, &args, sizeof(args), 0, + &oclass, 0, &args, sizeof(args), -1, &wndw->wimm); if (ret) { NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h index f5f59261ea81..d1beaad0c82b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h @@ -14,6 +14,7 @@ enum dcb_connector_type { DCB_CONNECTOR_LVDS_SPWG = 0x41, DCB_CONNECTOR_DP = 0x46, DCB_CONNECTOR_eDP = 0x47, + DCB_CONNECTOR_mDP = 0x48, DCB_CONNECTOR_HDMI_0 = 0x60, DCB_CONNECTOR_HDMI_1 = 0x61, DCB_CONNECTOR_HDMI_C = 0x63, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 282fd90b65e1..9ce7b0d4b876 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -497,6 +497,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, if (ret) { NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); + goto done; } ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index eb31c5b6c8e9..496e7dcd6b7d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -568,8 +568,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) pm_runtime_get_noresume(dev->dev); } else { ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return conn_status; + } } nv_encoder = nouveau_connector_ddc_detect(connector); @@ -1238,6 +1240,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb) case DCB_CONNECTOR_DMS59_DP0: case DCB_CONNECTOR_DMS59_DP1: case DCB_CONNECTOR_DP : + case DCB_CONNECTOR_mDP : case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort; case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP; case DCB_CONNECTOR_HDMI_0 : diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 7dfbbbc1beea..3b13feca970f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data) int ret; ret = pm_runtime_get_sync(drm->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(drm->dev->dev); return ret; + } seq_printf(m, "0x%08x\n", nvif_rd32(&drm->client.device.object, 0x101000)); @@ -181,8 +183,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(drm->dev); return ret; + } + ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2cd83849600f..5347e5bdee8c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev) kfree(drm); } +/* + * On some Intel PCIe bridge controllers doing a + * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear. + * Skipping the intermediate D3hot step seems to make it work again. This is + * probably caused by not meeting the expectation the involved AML code has + * when the GPU is put into D3hot state before invoking it. + * + * This leads to various manifestations of this issue: + * - AML code execution to power on the GPU hits an infinite loop (as the + * code waits on device memory to change). + * - kernel crashes, as all PCI reads return -1, which most code isn't able + * to handle well enough. + * + * In all cases dmesg will contain at least one line like this: + * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3' + * followed by a lot of nouveau timeouts. + * + * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not + * documented PCI config space register 0x248 of the Intel PCIe bridge + * controller (0x1901) in order to change the state of the PCIe link between + * the PCIe port and the GPU. There are alternative code paths using other + * registers, which seem to work fine (executed pre Windows 8): + * - 0xbc bit 0x20 (publicly available documentation claims 'reserved') + * - 0xb0 bit 0x10 (link disable) + * Changing the conditions inside the firmware by poking into the relevant + * addresses does resolve the issue, but it seemed to be ACPI private memory + * and not any device accessible memory at all, so there is no portable way of + * changing the conditions. + * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared. + * + * The only systems where this behavior can be seen are hybrid graphics laptops + * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether + * this issue only occurs in combination with listed Intel PCIe bridge + * controllers and the mentioned GPUs or other devices as well. + * + * documentation on the PCIe bridge controller can be found in the + * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2" + * Section "12 PCI Express* Controller (x16) Registers" + */ + +static void quirk_broken_nv_runpm(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct pci_dev *bridge = pci_upstream_bridge(pdev); + + if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (bridge->device) { + case 0x1901: + drm->old_pm_cap = pdev->pm_cap; + pdev->pm_cap = 0; + NV_INFO(drm, "Disabling PCI power management to avoid bug\n"); + break; + } +} + static int nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { @@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, if (ret) goto fail_drm_dev_init; + quirk_broken_nv_runpm(pdev); return 0; fail_drm_dev_init: @@ -736,7 +795,11 @@ static void nouveau_drm_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + /* revert our workaround */ + if (drm->old_pm_cap) + pdev->pm_cap = drm->old_pm_cap; nouveau_drm_device_remove(dev); } @@ -989,8 +1052,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) /* need to bring up power immediately if opening device */ ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); @@ -1072,8 +1137,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) long ret; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { case DRM_NOUVEAU_NVIF: diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 70f34cacc552..8104e3806499 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -138,6 +138,8 @@ struct nouveau_drm { struct list_head clients; + u8 old_pm_cap; + struct { struct agp_bridge_data *bridge; u32 base; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f439f0a5b43a..c09ea357e88f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user) struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); int ret = pm_runtime_get_sync(drm->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put(drm->dev->dev); return ret; + } return 0; } @@ -315,7 +317,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct nouveau_framebuffer *fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; int ret; mode_cmd.width = sizes->surface_width; @@ -592,6 +594,7 @@ fini: drm_fb_helper_fini(&fbcon->helper); free: kfree(fbcon); + drm->fbcon = NULL; return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 1324c19f4e5c..2dd9fcab464b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -45,8 +45,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem) int ret; ret = pm_runtime_get_sync(dev); - if (WARN_ON(ret < 0 && ret != -EACCES)) + if (WARN_ON(ret < 0 && ret != -EACCES)) { + pm_runtime_put_autosuspend(dev); return; + } if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); @@ -76,8 +78,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) return ret; ret = pm_runtime_get_sync(dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev); goto out; + } ret = nouveau_vma_new(nvbo, vmm, &vma); pm_runtime_mark_last_busy(dev); @@ -193,7 +197,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, * to the caller, instead of a normal nouveau_bo ttm reference. */ ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); if (ret) { - nouveau_bo_ref(NULL, &nvbo); + drm_gem_object_release(&nvbo->bo.base); + kfree(nvbo); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c002f8968507..9682f30ab6f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -176,6 +176,8 @@ void nouveau_mem_del(struct ttm_mem_reg *reg) { struct nouveau_mem *mem = nouveau_mem(reg); + if (!mem) + return; nouveau_mem_fini(mem); kfree(reg->mm_node); reg->mm_node = NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index feaac908efed..34403b810dba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) else nvbe->ttm.ttm.func = &nv50_sgdma_backend; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) - /* - * A failing ttm_dma_tt_init() will call ttm_tt_destroy() - * and thus our nouveau_sgdma_destroy() hook, so we don't need - * to free nvbe here. - */ + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { + kfree(nvbe); return NULL; + } return &nvbe->ttm.ttm; } diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 668d4bd0c118..8556804e96ef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -112,11 +112,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_nouveau_svm_bind *args = data; unsigned target, cmd, priority; - unsigned long addr, end, size; + unsigned long addr, end; struct mm_struct *mm; args->va_start &= PAGE_MASK; - args->va_end &= PAGE_MASK; + args->va_end = ALIGN(args->va_end, PAGE_SIZE); /* Sanity check arguments */ if (args->reserved0 || args->reserved1) @@ -125,8 +125,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, return -EINVAL; if (args->va_start >= args->va_end) return -EINVAL; - if (!args->npages) - return -EINVAL; cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT; cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK; @@ -158,12 +156,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, if (args->stride) return -EINVAL; - size = ((unsigned long)args->npages) << PAGE_SHIFT; - if ((args->va_start + size) <= args->va_start) - return -EINVAL; - if ((args->va_start + size) > args->va_end) - return -EINVAL; - /* * Ok we are ask to do something sane, for now we only support migrate * commands but we will add things like memory policy (what to do on @@ -173,7 +165,12 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, mm = get_task_mm(current); down_read(&mm->mmap_sem); - for (addr = args->va_start, end = args->va_start + size; addr < end;) { + if (!cli->svm.svmm) { + up_read(&mm->mmap_sem); + return -EINVAL; + } + + for (addr = args->va_start, end = args->va_end; addr < end;) { struct vm_area_struct *vma; unsigned long next; @@ -181,6 +178,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, if (!vma) break; + addr = max(addr, vma->vm_start); next = min(vma->vm_end, end); /* This is a best effort so we ignore errors */ nouveau_dmem_migrate_vma(cli->drm, vma, addr, next); @@ -308,6 +306,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data, struct drm_nouveau_svm_init *args = data; int ret; + /* We need to fail if svm is disabled */ + if (!cli->drm->svm) + return -ENOSYS; + /* Allocate tracking for SVM-enabled VMM. */ if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c index 9b16a08eb4d9..bf6d41fb0c9f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c @@ -27,10 +27,10 @@ void gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc) { struct nvkm_device *device = ior->disp->engine.subdev.device; - const u32 hoff = head * 0x800; + const u32 soff = nv50_ior_base(ior); const u32 ctrl = scdc & 0x3; - nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl); + nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl); ior->tmds.high_speed = !!(scdc & 0x2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index c578deb5867a..c71606a45d1d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -1988,8 +1988,34 @@ gf100_gr_init_(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &base->engine.subdev; + struct nvkm_device *device = subdev->device; + bool reset = device->chipset == 0x137 || device->chipset == 0x138; u32 ret; + /* On certain GP107/GP108 boards, we trigger a weird issue where + * GR will stop responding to PRI accesses after we've asked the + * SEC2 RTOS to boot the GR falcons. This happens with far more + * frequency when cold-booting a board (ie. returning from D3). + * + * The root cause for this is not known and has proven difficult + * to isolate, with many avenues being dead-ends. + * + * A workaround was discovered by Karol, whereby putting GR into + * reset for an extended period right before initialisation + * prevents the problem from occuring. + * + * XXX: As RM does not require any such workaround, this is more + * of a hack than a true fix. + */ + reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset); + if (reset) { + nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); + nvkm_rd32(device, 0x000200); + msleep(50); + nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); + nvkm_rd32(device, 0x000200); + } + nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); ret = nvkm_falcon_get(gr->fecs.falcon, subdev); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 7deb81b6dbac..4b571cc6bc70 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", image.base, image.type, image.size); - if (!shadow_fetch(bios, mthd, image.size)) { + if (!shadow_fetch(bios, mthd, image.base + image.size)) { nvkm_debug(subdev, "%08x: fetch failed\n", image.base); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c index 9b91da09dc5f..8d9812a51ef6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c @@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name) else return ERR_PTR(-ENODEV); + if (!pdev->rom || pdev->romlen == 0) + return ERR_PTR(-ENODEV); + if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { + priv->size = pdev->romlen; if (ret = -ENODEV, - (priv->rom = pci_platform_rom(pdev, &priv->size))) + (priv->rom = ioremap(pdev->rom, pdev->romlen))) return priv; kfree(priv); } @@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(ret); } +static void +platform_fini(void *data) +{ + struct priv *priv = data; + + iounmap(priv->rom); + kfree(priv); +} + const struct nvbios_source nvbios_platform = { .name = "PLATFORM", .init = platform_init, - .fini = (void(*)(void *))kfree, + .fini = platform_fini, .read = pcirom_read, .rw = true, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index c8ab1b5741a3..db7769cb33eb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00e4e4 + base); udelay(1); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index 7ef60895f43a..d0e80ad52684 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -33,7 +33,7 @@ static void gm200_i2c_aux_fini(struct gm200_i2c_aux *aux) { struct nvkm_device *device = aux->base.pad->i2c->subdev.device; - nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000); + nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000); } static int @@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux) AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl); return -EBUSY; } - } while (ctrl & 0x03010000); + } while (ctrl & 0x07010000); /* set some magic, and wait up to 1ms for it to appear */ - nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq); + nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq); timeout = 1000; do { ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50)); @@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux) gm200_i2c_aux_fini(aux); return -EBUSY; } - } while ((ctrl & 0x03000000) != urep); + } while ((ctrl & 0x07000000) != urep); return 0; } @@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00d954 + base); udelay(1); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c index d80dbc8f09b2..55a4ea4393c6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ #include "priv.h" +#include <subdev/timer.h> static void gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i) @@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i) u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400)); nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); - nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000); } static void @@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i) u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400)); nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); - nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000); } static void @@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400)); nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); - nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); } void @@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus) intr1 &= ~stat; } } + + nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f)) + break; + ); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c index 9025ed1bd2a9..4caf3ef087e1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ #include "priv.h" +#include <subdev/timer.h> static void gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i) @@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i) u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800)); nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); - nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); } static void @@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i) u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800)); nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); - nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); } static void @@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800)); nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); - nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); } void @@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus) intr1 &= ~stat; } } + + nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f)) + break; + ); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index ee11ccaf0563..cb51e248cb41 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu) { struct nvkm_device *device = mmu->subdev.device; struct nvkm_mm *mm = &device->fb->ram->vram; - const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL); - const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP); - const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED); + const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL); + const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP); + const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED); u8 type = NVKM_MEM_KIND * !!mmu->func->kind; u8 heap = NVKM_MEM_VRAM; int heapM, heapN, heapU; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 4bdd63b57100..ac93dae2a9c8 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1151,6 +1151,31 @@ static const struct dss_features dra7xx_dss_feats = { .has_lcd_clk_src = true, }; +static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports) +{ + struct platform_device *pdev = dss->pdev; + struct device_node *parent = pdev->dev.of_node; + struct device_node *port; + unsigned int i; + + for (i = 0; i < num_ports; i++) { + port = of_graph_get_port_by_id(parent, i); + if (!port) + continue; + + switch (dss->feat->ports[i]) { + case OMAP_DISPLAY_TYPE_DPI: + dpi_uninit_port(port); + break; + case OMAP_DISPLAY_TYPE_SDI: + sdi_uninit_port(port); + break; + default: + break; + } + } +} + static int dss_init_ports(struct dss_device *dss) { struct platform_device *pdev = dss->pdev; @@ -1168,13 +1193,13 @@ static int dss_init_ports(struct dss_device *dss) case OMAP_DISPLAY_TYPE_DPI: r = dpi_init_port(dss, pdev, port, dss->feat->model); if (r) - return r; + goto error; break; case OMAP_DISPLAY_TYPE_SDI: r = sdi_init_port(dss, pdev, port); if (r) - return r; + goto error; break; default: @@ -1183,31 +1208,15 @@ static int dss_init_ports(struct dss_device *dss) } return 0; + +error: + __dss_uninit_ports(dss, i); + return r; } static void dss_uninit_ports(struct dss_device *dss) { - struct platform_device *pdev = dss->pdev; - struct device_node *parent = pdev->dev.of_node; - struct device_node *port; - int i; - - for (i = 0; i < dss->feat->num_ports; i++) { - port = of_graph_get_port_by_id(parent, i); - if (!port) - continue; - - switch (dss->feat->ports[i]) { - case OMAP_DISPLAY_TYPE_DPI: - dpi_uninit_port(port); - break; - case OMAP_DISPLAY_TYPE_SDI: - sdi_uninit_port(port); - break; - default: - break; - } - } + __dss_uninit_ports(dss, dss->feat->num_ports); } static int dss_video_pll_probe(struct dss_device *dss) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 31502857f013..ce67891eedd4 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -192,7 +192,7 @@ static int __init omapdss_boot_init(void) dss = of_find_matching_node(NULL, omapdss_of_match); if (dss == NULL || !of_device_is_available(dss)) - return 0; + goto put_node; omapdss_walk_device(dss, true); @@ -217,6 +217,8 @@ static int __init omapdss_boot_init(void) kfree(n); } +put_node: + of_node_put(dss); return 0; } diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 3c5ddbf30e97..f5e18802e7bc 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -451,11 +451,12 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, if (omap_state->manually_updated) return; - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_vblank_on(crtc); + ret = drm_crtc_vblank_get(crtc); WARN_ON(ret != 0); + spin_lock_irq(&crtc->dev->event_lock); omap_crtc_arm_event(crtc); spin_unlock_irq(&crtc->dev->event_lock); } diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 252f5ebb1acc..3dd6c0087edb 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -891,6 +891,7 @@ static int omap_dmm_probe(struct platform_device *dev) &omap_dmm->refill_pa, GFP_KERNEL); if (!omap_dmm->refill_va) { dev_err(&dev->dev, "could not allocate refill memory\n"); + ret = -ENOMEM; goto fail; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 8abb31f83ffc..f0ea782df836 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1935,7 +1935,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = { static const struct panel_desc lg_lb070wv8 = { .modes = &lg_lb070wv8_mode, .num_modes = 1, - .bpc = 16, + .bpc = 8, .size = { .width = 151, .height = 91, @@ -2382,12 +2382,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { static const struct panel_desc ortustech_com43h4m85ulc = { .modes = &ortustech_com43h4m85ulc_mode, .num_modes = 1, - .bpc = 8, + .bpc = 6, .size = { .width = 56, .height = 93, }, - .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 77c3a3855c68..ac9de37a8280 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -46,7 +46,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) sg_free_table(&bo->sgts[i]); } } - kfree(bo->sgts); + kvfree(bo->sgts); } drm_gem_shmem_free_object(obj); @@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) kref_put(&mapping->refcount, panfrost_gem_mapping_release); } -void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo) +void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo) { struct panfrost_gem_mapping *mapping; - mutex_lock(&bo->mappings.lock); list_for_each_entry(mapping, &bo->mappings.list, node) panfrost_gem_teardown_mapping(mapping); - mutex_unlock(&bo->mappings.lock); } int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index b3517ff9630c..8088d5fd8480 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -82,7 +82,7 @@ struct panfrost_gem_mapping * panfrost_gem_mapping_get(struct panfrost_gem_object *bo, struct panfrost_file_priv *priv); void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); -void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo); +void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo); void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 288e46c40673..1b9f68d8e9aa 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct panfrost_gem_object *bo = to_panfrost_bo(obj); + bool ret = false; if (atomic_read(&bo->gpu_usecount)) return false; - if (!mutex_trylock(&shmem->pages_lock)) + if (!mutex_trylock(&bo->mappings.lock)) return false; - panfrost_gem_teardown_mappings(bo); + if (!mutex_trylock(&shmem->pages_lock)) + goto unlock_mappings; + + panfrost_gem_teardown_mappings_locked(bo); drm_gem_shmem_purge_locked(obj); + ret = true; mutex_unlock(&shmem->pages_lock); - return true; + +unlock_mappings: + mutex_unlock(&bo->mappings.lock); + return ret; } static unsigned long diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 8822ec13a0d6..0d39a201c759 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) return 0; } +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev) +{ + /* + * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs + * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order + * to operate correctly. + */ + gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK); + gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16)); +} + static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) { u32 quirks = 0; @@ -304,6 +315,8 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) int ret; u32 val; + panfrost_gpu_init_quirks(pfdev); + /* Just turn on everything for now */ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, @@ -357,7 +370,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) return err; } - panfrost_gpu_init_quirks(pfdev); panfrost_gpu_power_on(pfdev); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index 4112412087b2..468c51e7e46d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev); void panfrost_gpu_power_on(struct panfrost_device *pfdev); void panfrost_gpu_power_off(struct panfrost_device *pfdev); +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); + #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 5d75f8cf6477..bfd503d22088 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -486,7 +486,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); if (!pages) { - kfree(bo->sgts); + kvfree(bo->sgts); bo->sgts = NULL; mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; @@ -494,8 +494,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, } bo->base.pages = pages; bo->base.pages_use_count = 1; - } else + } else { pages = bo->base.pages; + if (pages[page_offset]) { + /* Pages are already mapped, bail out. */ + mutex_unlock(&bo->base.pages_lock); + goto out; + } + } mapping = bo->base.base.filp->f_mapping; mapping_set_unevictable(mapping); @@ -529,6 +535,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); +out: panfrost_gem_mapping_put(bomapping); return 0; @@ -600,6 +607,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) access_type = (fault_status >> 8) & 0x3; source_id = (fault_status >> 16); + mmu_write(pfdev, MMU_INT_CLEAR, mask); + /* Page fault only */ ret = -1; if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0) @@ -623,8 +632,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) access_type, access_type_name(pfdev, fault_status), source_id); - mmu_write(pfdev, MMU_INT_CLEAR, mask); - status &= ~mask; } diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index ea38ac60581c..eddaa62ad8b0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -51,6 +51,10 @@ #define GPU_STATUS 0x34 #define GPU_STATUS_PRFCNT_ACTIVE BIT(2) #define GPU_LATEST_FLUSH_ID 0x38 +#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */ +#define GPU_PWR_KEY_UNLOCK 0x2968A819 +#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */ +#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */ #define GPU_FAULT_STATUS 0x3C #define GPU_FAULT_ADDRESS_LO 0x40 #define GPU_FAULT_ADDRESS_HI 0x44 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index ef09dc6bc635..d082c194cccc 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, return ret; ret = qxl_release_reserve_list(release, true); - if (ret) + if (ret) { + qxl_release_free(qdev, release); return ret; - + } cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_SURFACE_CMD_CREATE; cmd->flags = QXL_SURF_FLAG_KEEP_DATA; @@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, /* no need to add a release to the fence for this surface bo, since it is only released when we ask to destroy the surface and it would never signal otherwise */ - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); surf->hw_surf_alloc = true; spin_lock(&qdev->surf_id_idr_lock); @@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, cmd->surface_id = id; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); - qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 16d73b22f3f5..a6ee10cbcfdd 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -325,6 +325,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, head.id = i; head.flags = 0; + head.surface_id = 0; oldcount = qdev->monitors_config->count; if (crtc->state->active) { struct drm_display_mode *mode = &crtc->mode; @@ -523,8 +524,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane) cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); return ret; @@ -665,8 +666,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cmd->u.position.y = plane->state->crtc_y + fb->hot_y; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); if (old_cursor_bo != NULL) qxl_bo_unpin(old_cursor_bo); @@ -713,8 +714,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, cmd->type = QXL_CURSOR_HIDE; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); } static void qxl_update_dumb_head(struct qxl_device *qdev, @@ -1236,6 +1237,10 @@ int qxl_modeset_init(struct qxl_device *qdev) void qxl_modeset_fini(struct qxl_device *qdev) { + if (qdev->dumb_shadow_bo) { + drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base); + qdev->dumb_shadow_bo = NULL; + } qxl_destroy_monitors_object(qdev); drm_mode_config_cleanup(&qdev->ddev); } diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 5bebf1ea1c5d..3599db096973 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, goto out_release_backoff; rects = drawable_set_clipping(qdev, num_clips, clips_bo); - if (!rects) + if (!rects) { + ret = -EINVAL; goto out_release_backoff; - + } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->clip.type = SPICE_CLIP_TYPE_RECTS; @@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, } qxl_bo_kunmap(clips_bo); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_release_backoff: if (ret) diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index 43688ecdd8a0..60ab7151b84d 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev, break; default: DRM_ERROR("unsupported image bit depth\n"); - return -EINVAL; /* TODO: cleanup */ + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); + return -EINVAL; } image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; image->u.bitmap.x = width; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 8117a45b3610..72f3f1bbb40c 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, apply_surf_reloc(qdev, &reloc_info[i]); } + qxl_release_fence_buffer_objects(release); ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); - if (ret) - qxl_release_backoff_reserve_list(release); - else - qxl_release_fence_buffer_objects(release); out_free_bos: out_free_release: diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index bfc1631093e9..9bdbe0db8795 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -218,7 +218,7 @@ int qxl_device_init(struct qxl_device *qdev, &(qdev->ram_header->cursor_ring_hdr), sizeof(struct qxl_command), QXL_CURSOR_RING_SIZE, - qdev->io_base + QXL_IO_NOTIFY_CMD, + qdev->io_base + QXL_IO_NOTIFY_CURSOR, false, &qdev->cursor_event); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index c6fd123f60b5..1e62e7bbf1b1 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4366,7 +4366,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev, table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; - if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) return -EINVAL; if (!pi->mem_gddr5) { @@ -5578,6 +5578,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; + rdev->pm.dpm.num_ps = 0; for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -5587,10 +5588,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.power_state[i].clock_info) return -EINVAL; ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(rdev->pm.dpm.ps); + if (ps == NULL) return -ENOMEM; - } rdev->pm.dpm.ps[i].ps_priv = ps; ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, @@ -5612,8 +5611,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) k++; } power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + rdev->pm.dpm.num_ps = i + 1; } - rdev->pm.dpm.num_ps = state_array->ucNumEntries; /* fill in the vce power states */ for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index d9e62ca65ab8..bd2e577c701f 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2128,7 +2128,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev) if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) ret = -EINVAL; - if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) ret = -EINVAL; if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 4d1490fbb075..756a50e8aff2 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev) static bool radeon_read_platform_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = rdev->pdev->rom; + size_t romlen = rdev->pdev->romlen; + void __iomem *bios; rdev->bios = NULL; - bios = pci_platform_rom(rdev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + rdev->bios = kzalloc(romlen, GFP_KERNEL); + if (!rdev->bios) return false; - } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - return false; - } + + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; + + memcpy_fromio(rdev->bios, bios, romlen); + iounmap(bios); + + if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) + goto free_bios; return true; +free_bios: + kfree(rdev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index b684cd719612..bc63f4cecf5d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -883,8 +883,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -1029,8 +1031,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1167,8 +1171,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1251,8 +1257,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (radeon_connector->detected_hpd_without_ddc) { @@ -1666,8 +1674,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && radeon_check_hpd_status_unchanged(connector)) { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0826efd9b5f5..f9f74150d0d7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -631,8 +631,10 @@ radeon_crtc_set_config(struct drm_mode_set *set, dev = set->crtc->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_crtc_helper_set_config(set, ctx); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6128792ab883..c2573096d43c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -174,12 +174,7 @@ int radeon_no_wb; int radeon_modeset = -1; int radeon_dynclks = -1; int radeon_r4xx_atom = 0; -#ifdef __powerpc__ -/* Default to PCI on PowerPC (fdo #95017) */ int radeon_agpmode = -1; -#else -int radeon_agpmode = 0; -#endif int radeon_vram_limit = 0; int radeon_gart_size = -1; /* auto */ int radeon_benchmarking = 0; @@ -555,8 +550,10 @@ long radeon_drm_ioctl(struct file *filp, long ret; dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 2bb0187c5bc7..03d3550ecc7c 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -512,6 +512,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *value = rdev->config.si.backend_enable_mask; } else { DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); + return -EINVAL; } break; case RADEON_INFO_MAX_SCLK: @@ -638,8 +639,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); return r; + } /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 1529849e217e..7cdba77b1420 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -23,6 +23,7 @@ config DRM_RCAR_DW_HDMI config DRM_RCAR_LVDS tristate "R-Car DU LVDS Encoder Support" depends on DRM && DRM_BRIDGE && OF + select DRM_KMS_HELPER select DRM_PANEL select OF_FLATTREE select OF_OVERLAY diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index c6430027169f..a0021fc25b27 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) drm_plane_create_alpha_property(&plane->plane); - if (type == DRM_PLANE_TYPE_PRIMARY) - continue; - - drm_object_attach_property(&plane->plane.base, - rcdu->props.colorkey, - RCAR_DU_COLORKEY_NONE); - drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); + if (type == DRM_PLANE_TYPE_PRIMARY) { + drm_plane_create_zpos_immutable_property(&plane->plane, + 0); + } else { + drm_object_attach_property(&plane->plane.base, + rcdu->props.colorkey, + RCAR_DU_COLORKEY_NONE); + drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); + } } return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 5e4faf258c31..f1a81c9b184d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, drm_plane_helper_add(&plane->plane, &rcar_du_vsp_plane_helper_funcs); - if (type == DRM_PLANE_TYPE_PRIMARY) - continue; - - drm_plane_create_alpha_property(&plane->plane); - drm_plane_create_zpos_property(&plane->plane, 1, 1, - vsp->num_planes - 1); + if (type == DRM_PLANE_TYPE_PRIMARY) { + drm_plane_create_zpos_immutable_property(&plane->plane, + 0); + } else { + drm_plane_create_alpha_property(&plane->plane); + drm_plane_create_zpos_property(&plane->plane, 1, 1, + vsp->num_planes - 1); + } } return 0; diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index f38f5e113c6b..ce98c08aa8b4 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -325,15 +325,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, void *data) { struct rockchip_dp_device *dp = dev_get_drvdata(dev); - const struct rockchip_dp_chip_data *dp_data; struct drm_device *drm_dev = data; int ret; - dp_data = of_device_get_match_data(dev); - if (!dp_data) - return -ENODEV; - - dp->data = dp_data; dp->drm_dev = drm_dev; ret = rockchip_dp_drm_create_encoder(dp); @@ -344,16 +338,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, dp->plat_data.encoder = &dp->encoder; - dp->plat_data.dev_type = dp->data->chip_type; - dp->plat_data.power_on_start = rockchip_dp_poweron_start; - dp->plat_data.power_off = rockchip_dp_powerdown; - dp->plat_data.get_modes = rockchip_dp_get_modes; - - dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); - if (IS_ERR(dp->adp)) { - ret = PTR_ERR(dp->adp); + ret = analogix_dp_bind(dp->adp, drm_dev); + if (ret) goto err_cleanup_encoder; - } return 0; err_cleanup_encoder: @@ -368,8 +355,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master, analogix_dp_unbind(dp->adp); dp->encoder.funcs->destroy(&dp->encoder); - - dp->adp = ERR_PTR(-ENODEV); } static const struct component_ops rockchip_dp_component_ops = { @@ -380,10 +365,15 @@ static const struct component_ops rockchip_dp_component_ops = { static int rockchip_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct rockchip_dp_chip_data *dp_data; struct drm_panel *panel = NULL; struct rockchip_dp_device *dp; int ret; + dp_data = of_device_get_match_data(dev); + if (!dp_data) + return -ENODEV; + ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); if (ret < 0) return ret; @@ -394,7 +384,12 @@ static int rockchip_dp_probe(struct platform_device *pdev) dp->dev = dev; dp->adp = ERR_PTR(-ENODEV); + dp->data = dp_data; dp->plat_data.panel = panel; + dp->plat_data.dev_type = dp->data->chip_type; + dp->plat_data.power_on_start = rockchip_dp_poweron_start; + dp->plat_data.power_off = rockchip_dp_powerdown; + dp->plat_data.get_modes = rockchip_dp_get_modes; ret = rockchip_dp_of_probe(dp); if (ret < 0) @@ -402,12 +397,19 @@ static int rockchip_dp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dp); + dp->adp = analogix_dp_probe(dev, &dp->plat_data); + if (IS_ERR(dp->adp)) + return PTR_ERR(dp->adp); + return component_add(dev, &rockchip_dp_component_ops); } static int rockchip_dp_remove(struct platform_device *pdev) { + struct rockchip_dp_device *dp = platform_get_drvdata(pdev); + component_del(&pdev->dev, &rockchip_dp_component_ops); + analogix_dp_remove(dp->adp); return 0; } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 2af64459b3d7..37679507f943 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct work_struct *work) unsigned long flags; sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); + + /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ + spin_lock_irqsave(&sched->job_list_lock, flags); job = list_first_entry_or_null(&sched->ring_mirror_list, struct drm_sched_job, node); if (job) { + /* + * Remove the bad job so it cannot be freed by concurrent + * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread + * is parked at which point it's safe. + */ + list_del_init(&job->node); + spin_unlock_irqrestore(&sched->job_list_lock, flags); + job->sched->ops->timedout_job(job); /* @@ -298,6 +309,8 @@ static void drm_sched_job_timedout(struct work_struct *work) job->sched->ops->free_job(job); sched->free_guilty = false; } + } else { + spin_unlock_irqrestore(&sched->job_list_lock, flags); } spin_lock_irqsave(&sched->job_list_lock, flags); @@ -369,6 +382,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) kthread_park(sched->thread); + /* + * Reinsert back the bad job here - now it's safe as + * drm_sched_get_cleanup_job cannot race against us and release the + * bad job at this point - we parked (waited for) any in progress + * (earlier) cleanups and drm_sched_get_cleanup_job will not be called + * now until the scheduler thread is unparked. + */ + if (bad && bad->sched == sched) + /* + * Add at the head of the queue to reflect it was the earliest + * job extracted. + */ + list_add(&bad->node, &sched->ring_mirror_list); + /* * Iterate the job list from later to earlier one and either deactive * their HW callbacks or remove them from mirror list if they already @@ -496,8 +523,10 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) fence = sched->ops->run_job(s_job); if (IS_ERR_OR_NULL(fence)) { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + s_job->s_fence->parent = NULL; - dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); } else { s_job->s_fence->parent = fence; } @@ -627,7 +656,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) trace_drm_sched_process_job(s_fence); + dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence); + dma_fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } @@ -746,8 +777,9 @@ static int drm_sched_main(void *param) r); dma_fence_put(fence); } else { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); - dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); drm_sched_process_job(NULL, &sched_job->cb); } @@ -819,6 +851,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) if (sched->thread) kthread_stop(sched->thread); + /* Confirm no work left behind accessing device structures */ + cancel_delayed_work_sync(&sched->work_tdr); + sched->ready = false; } EXPORT_SYMBOL(drm_sched_fini); diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 3ab4fbf8eb0d..51571f7246ab 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -424,9 +424,12 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); + struct drm_device *ddev = crtc->dev; DRM_DEBUG_DRIVER("\n"); + pm_runtime_get_sync(ddev->dev); + /* Sets the background color value */ reg_write(ldev->regs, LTDC_BCCR, BCCR_BCBLACK); diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 4e29f4fe4a05..99f081ccc15d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine, /* We can't have an alpha plane at the lowest position */ if (!backend->quirks->supports_lowest_plane_alpha && - (plane_states[0]->fb->format->has_alpha || - (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))) + (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)) return -EINVAL; for (i = 1; i < num_planes; i++) { @@ -986,7 +985,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = { static const struct sun4i_backend_quirks sun7i_backend_quirks = { .needs_output_muxing = true, - .supports_lowest_plane_alpha = true, }; static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c index ec2a032e07b9..7186ba73d8e1 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.c +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c @@ -407,6 +407,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend, struct drm_framebuffer *fb = state->fb; const struct drm_format_info *format = fb->format; uint64_t modifier = fb->modifier; + unsigned int ch1_phase_idx; u32 out_fmt_val; u32 in_fmt_val, in_mod_val, in_ps_val; unsigned int i; @@ -442,18 +443,19 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend, * I have no idea what this does exactly, but it seems to be * related to the scaler FIR filter phase parameters. */ + ch1_phase_idx = (format->num_planes > 1) ? 1 : 0; regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG, - frontend->data->ch_phase[0].horzphase); + frontend->data->ch_phase[0]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG, - frontend->data->ch_phase[1].horzphase); + frontend->data->ch_phase[ch1_phase_idx]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG, - frontend->data->ch_phase[0].vertphase[0]); + frontend->data->ch_phase[0]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG, - frontend->data->ch_phase[1].vertphase[0]); + frontend->data->ch_phase[ch1_phase_idx]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, - frontend->data->ch_phase[0].vertphase[1]); + frontend->data->ch_phase[0]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, - frontend->data->ch_phase[1].vertphase[1]); + frontend->data->ch_phase[ch1_phase_idx]); /* * Checking the input format is sufficient since we currently only @@ -687,30 +689,12 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = { }; static const struct sun4i_frontend_data sun4i_a10_frontend = { - .ch_phase = { - { - .horzphase = 0, - .vertphase = { 0, 0 }, - }, - { - .horzphase = 0xfc000, - .vertphase = { 0xfc000, 0xfc000 }, - }, - }, + .ch_phase = { 0x000, 0xfc000 }, .has_coef_rdy = true, }; static const struct sun4i_frontend_data sun8i_a33_frontend = { - .ch_phase = { - { - .horzphase = 0x400, - .vertphase = { 0x400, 0x400 }, - }, - { - .horzphase = 0x400, - .vertphase = { 0x400, 0x400 }, - }, - }, + .ch_phase = { 0x400, 0xfc400 }, .has_coef_access_ctrl = true, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h index 0c382c1ddb0f..2e7b76e50c2b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.h +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h @@ -115,11 +115,7 @@ struct reset_control; struct sun4i_frontend_data { bool has_coef_access_ctrl; bool has_coef_rdy; - - struct { - u32 horzphase; - u32 vertphase[2]; - } ch_phase[2]; + u32 ch_phase[2]; }; struct sun4i_frontend { diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index 7ad3f06c127e..00ca35f07ba5 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h @@ -148,7 +148,7 @@ #define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3 #define SUN4I_HDMI_DDC_CLK_REG 0x528 -#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3) +#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3) #define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7) #define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540 diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c index 2ff780114106..12430b9d4e93 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c @@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate, unsigned long best_rate = 0; u8 best_m = 0, best_n = 0, _m, _n; - for (_m = 0; _m < 8; _m++) { + for (_m = 0; _m < 16; _m++) { for (_n = 0; _n < 8; _n++) { unsigned long tmp_rate; diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 9c3bdfd20337..4acdfa608775 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -262,9 +262,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force) struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); unsigned long reg; - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg, - reg & SUN4I_HDMI_HPD_HIGH, - 0, 500000)) { + reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); + if (!(reg & SUN4I_HDMI_HPD_HIGH)) { cec_phys_addr_invalidate(hdmi->cec_adap); return connector_status_disconnected; } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 27c80c9e2b83..eb3b2350687f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -545,30 +545,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, if (info->bus_flags & DRM_BUS_FLAG_DE_LOW) val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE; - /* - * On A20 and similar SoCs, the only way to achieve Positive Edge - * (Rising Edge), is setting dclk clock phase to 2/3(240°). - * By default TCON works in Negative Edge(Falling Edge), - * this is why phase is set to 0 in that case. - * Unfortunately there's no way to logically invert dclk through - * IO_POL register. - * The only acceptable way to work, triple checked with scope, - * is using clock phase set to 0° for Negative Edge and set to 240° - * for Positive Edge. - * On A33 and similar SoCs there would be a 90° phase option, - * but it divides also dclk by 2. - * Following code is a way to avoid quirks all around TCON - * and DOTCLOCK drivers. - */ - if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) - clk_set_phase(tcon->dclk, 240); - if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) - clk_set_phase(tcon->dclk, 0); + val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE; regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE | + SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE | SUN4I_TCON0_IO_POL_DE_NEGATIVE, val); @@ -665,6 +648,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, SUN4I_TCON1_BASIC5_V_SYNC(vsync) | SUN4I_TCON1_BASIC5_H_SYNC(hsync)); + /* Setup the polarity of multiple signals */ + if (tcon->quirks->polarity_in_ch0) { + val = 0; + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; + + regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val); + } else { + /* according to vendor driver, this bit must be always set */ + val = SUN4I_TCON1_IO_POL_UNKNOWN; + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE; + + regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val); + } + /* Map output pins to channel 1 */ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, SUN4I_TCON_GCTL_IOMAP_MASK, @@ -1409,14 +1416,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && encoder->encoder_type == DRM_MODE_ENCODER_TMDS) { ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id); - if (ret) + if (ret) { + put_device(&pdev->dev); return ret; + } } if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) { ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id); - if (ret) + if (ret) { + put_device(&pdev->dev); return ret; + } } return 0; @@ -1478,6 +1489,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { .has_channel_1 = true, + .polarity_in_ch0 = true, .set_mux = sun8i_r40_tcon_tv_set_mux, }; @@ -1504,6 +1516,8 @@ const struct of_device_id sun4i_tcon_of_table[] = { { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks }, { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks }, { .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks }, + { .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_quirks }, + { .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks }, { .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks }, { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks }, { .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks }, diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index a62ec826ae71..ce500c8dd4c7 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -113,6 +113,7 @@ #define SUN4I_TCON0_IO_POL_REG 0x88 #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28) #define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27) +#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26) #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25) #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24) @@ -153,6 +154,11 @@ #define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff) #define SUN4I_TCON1_IO_POL_REG 0xf0 +/* there is no documentation about this bit */ +#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26) +#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25) +#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24) + #define SUN4I_TCON1_IO_TRI_REG 0xf4 #define SUN4I_TCON_ECC_FIFO_REG 0xf8 @@ -224,6 +230,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool supports_lvds; /* Does the TCON support an LVDS output? */ + bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ /* callback to handle tcon muxing options */ diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index f83522717488..f2b288037b90 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -718,7 +718,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder) struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder); struct mipi_dsi_device *device = dsi->device; - union phy_configure_opts opts = { 0 }; + union phy_configure_opts opts = { }; struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; u16 delay; @@ -867,7 +867,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0), sun6i_dsi_dcs_build_pkt_hdr(dsi, msg)); - bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL); + bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL); if (!bounce) return -ENOMEM; @@ -878,7 +878,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc)); len += sizeof(crc); - regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len); + regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4)); regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1); kfree(bounce); diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h index f42441b1b14d..a55a38ad849c 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.h +++ b/drivers/gpu/drm/sun4i/sun8i_csc.h @@ -12,7 +12,7 @@ struct sun8i_mixer; /* VI channel CSC units offsets */ #define CCSC00_OFFSET 0xAA050 -#define CCSC01_OFFSET 0xFA000 +#define CCSC01_OFFSET 0xFA050 #define CCSC10_OFFSET 0xA0000 #define CCSC11_OFFSET 0xF0000 diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index a44dca4b0219..8f721be26477 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -49,11 +49,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector, { /* * Controller support maximum of 594 MHz, which correlates to - * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than - * 340 MHz scrambling has to be enabled. Because scrambling is - * not yet implemented, just limit to 340 MHz for now. + * 4K@60Hz 4:4:4 or RGB. */ - if (mode->clock > 340000) + if (mode->clock > 594000) return MODE_CLOCK_HIGH; return MODE_OK; @@ -209,6 +207,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, phy_node = of_parse_phandle(dev->of_node, "phys", 0); if (!phy_node) { dev_err(dev, "Can't found PHY phandle\n"); + ret = -EINVAL; goto err_disable_clk_tmds; } diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 43643ad31730..a4012ec13d4b 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = { static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = { /* pixelclk bpp8 bpp10 bpp12 */ - { 25175000, { 0x0000, 0x0000, 0x0000 }, }, { 27000000, { 0x0012, 0x0000, 0x0000 }, }, - { 59400000, { 0x0008, 0x0008, 0x0008 }, }, - { 72000000, { 0x0008, 0x0008, 0x001b }, }, - { 74250000, { 0x0013, 0x0013, 0x0013 }, }, - { 90000000, { 0x0008, 0x001a, 0x001b }, }, - { 118800000, { 0x001b, 0x001a, 0x001b }, }, - { 144000000, { 0x001b, 0x001a, 0x0034 }, }, - { 180000000, { 0x001b, 0x0033, 0x0034 }, }, - { 216000000, { 0x0036, 0x0033, 0x0034 }, }, - { 237600000, { 0x0036, 0x0033, 0x001b }, }, - { 288000000, { 0x0036, 0x001b, 0x001b }, }, - { 297000000, { 0x0019, 0x001b, 0x0019 }, }, - { 330000000, { 0x0036, 0x001b, 0x001b }, }, - { 594000000, { 0x003f, 0x001b, 0x001b }, }, + { 74250000, { 0x0013, 0x001a, 0x001b }, }, + { 148500000, { 0x0019, 0x0033, 0x0034 }, }, + { 297000000, { 0x0019, 0x001b, 0x001b }, }, + { 594000000, { 0x0010, 0x001b, 0x001b }, }, { ~0UL, { 0x0000, 0x0000, 0x0000 }, } }; static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = { /*pixelclk symbol term vlev*/ - { 74250000, 0x8009, 0x0004, 0x0232}, - { 148500000, 0x8029, 0x0004, 0x0273}, - { 594000000, 0x8039, 0x0004, 0x014a}, + { 27000000, 0x8009, 0x0007, 0x02b0 }, + { 74250000, 0x8009, 0x0006, 0x022d }, + { 148500000, 0x8029, 0x0006, 0x0270 }, + { 297000000, 0x8039, 0x0005, 0x01ab }, + { 594000000, 0x8029, 0x0000, 0x008a }, { ~0UL, 0x0000, 0x0000, 0x0000} }; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 18b4881f4481..12b99ba57501 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -396,7 +396,7 @@ static struct regmap_config sun8i_mixer_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = 0xbfffc, /* guessed */ + .max_register = 0xffffc, /* guessed */ }; static int sun8i_mixer_of_get_id(struct device_node *node) diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index fbf57bc3cdab..617cbe468aec 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1667,6 +1667,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, dev_err(dc->dev, "failed to set clock rate to %lu Hz\n", state->pclk); + + err = clk_set_rate(dc->clk, state->pclk); + if (err < 0) + dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", + dc->clk, state->pclk, err); } DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), @@ -1677,11 +1682,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); } - - err = clk_set_rate(dc->clk, state->pclk); - if (err < 0) - dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", - dc->clk, state->pclk, err); } static void tegra_dc_stop(struct tegra_dc *dc) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index bc7cc32140f8..6833dfad7241 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -256,7 +256,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) if (!fpriv) return -ENOMEM; - idr_init(&fpriv->contexts); + idr_init_base(&fpriv->contexts, 1); mutex_init(&fpriv->lock); filp->driver_priv = fpriv; diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 839b49c40e51..767fb440a79d 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -141,7 +141,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_enable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_enable(wgrp); } return 0; @@ -158,7 +160,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_disable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_disable(wgrp); } } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 75e65d9536d5..6c3d22165239 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2899,6 +2899,7 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) { dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); + clk_disable_unprepare(sor->clk); return err; } @@ -2906,12 +2907,17 @@ static int tegra_sor_init(struct host1x_client *client) } err = clk_prepare_enable(sor->clk_safe); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk); return err; + } err = clk_prepare_enable(sor->clk_dp); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk_safe); + clk_disable_unprepare(sor->clk); return err; + } /* * Enable and unmask the HDA codec SCRATCH0 register interrupt. This diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 5584e656b857..8c4fd1aa4c2d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -143,12 +143,16 @@ static int panel_connector_get_modes(struct drm_connector *connector) int i; for (i = 0; i < timings->num_timings; i++) { - struct drm_display_mode *mode = drm_mode_create(dev); + struct drm_display_mode *mode; struct videomode vm; if (videomode_from_timings(timings, &vm, i)) break; + mode = drm_mode_create(dev); + if (!mode) + break; + drm_display_mode_from_videomode(&vm, mode); mode->type = DRM_MODE_TYPE_DRIVER; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f07803699809..3ffcbaa13878 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -517,8 +517,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) dma_resv_unlock(bo->base.resv); } - if (bo->base.resv != &bo->base._resv) + if (bo->base.resv != &bo->base._resv) { + ttm_bo_flush_all_fences(bo); dma_resv_unlock(&bo->base._resv); + } error: kref_get(&bo->list_kref); @@ -759,7 +761,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, /* Don't evict this BO if it's outside of the * requested placement range */ - if (place->fpfn >= (bo->mem.start + bo->mem.size) || + if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) || (place->lpfn && place->lpfn <= bo->mem.start)) return false; @@ -939,8 +941,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, if (!fence) return 0; - if (no_wait_gpu) + if (no_wait_gpu) { + dma_fence_put(fence); return -EBUSY; + } dma_resv_add_shared_fence(bo->base.resv, fence); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 46dc3de7e81b..f2bad14ac04a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -358,8 +358,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { - unsigned long offset = (addr) - vma->vm_start; struct ttm_buffer_object *bo = vma->vm_private_data; + unsigned long offset = (addr) - vma->vm_start + + ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) + << PAGE_SHIFT); int ret; if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e0e9b4f69db6..c770ec7e9e8b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, ttm_tt_init_fields(ttm, bo, page_flags); if (ttm_tt_alloc_page_directory(ttm)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, else ret = ttm_dma_tt_alloc_page_directory(ttm_dma); if (ret) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index d733bbc4ac0e..17ff24d999d1 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -14,6 +14,7 @@ #include <linux/version.h> #include <linux/dma-buf.h> #include <linux/of_graph.h> +#include <linux/delay.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> @@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, struct drm_connector *connector = priv->connector; u32 format = fb->format->format; u32 ctrl1 = 0; + int retries; clk_prepare_enable(priv->clk); + /* Reset the TVE200 and wait for it to come back online */ + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); + for (retries = 0; retries < 5; retries++) { + usleep_range(30000, 50000); + if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) + continue; + else + break; + } + if (retries == 5 && + readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) { + dev_err(drm->dev, "can't get hardware out of reset\n"); + return; + } + /* Function 1 */ ctrl1 |= TVE200_CTRL_CSMODE; /* Interlace mode for CCIR656: parameterize? */ @@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe) drm_crtc_vblank_off(crtc); - /* Disable and Power Down */ + /* Disable put into reset and Power Down */ writel(0, priv->regs + TVE200_CTRL); + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); clk_disable_unprepare(priv->clk); } @@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe) struct drm_device *drm = crtc->dev; struct tve200_drm_dev_private *priv = drm->dev_private; + /* Clear any IRQs and enable */ + writel(0xFF, priv->regs + TVE200_INT_CLR); writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN); return 0; } diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index 416f24823c0a..02836d4e8023 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -210,8 +210,8 @@ static int tve200_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { - ret = -EINVAL; + if (irq < 0) { + ret = irq; goto clk_disable; } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 5e6fb6c2307f..0d78ba017a29 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -309,6 +309,7 @@ unbind_all: component_unbind_all(dev, drm); gem_destroy: vc4_gem_destroy(drm); + drm_mode_config_cleanup(drm); vc4_bo_cache_destroy(drm); dev_put: drm_dev_put(drm); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 0853b980bcb3..54435b72b761 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -681,11 +681,23 @@ static enum drm_mode_status vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, const struct drm_display_mode *mode) { - /* HSM clock must be 108% of the pixel clock. Additionally, - * the AXI clock needs to be at least 25% of pixel clock, but - * HSM ends up being the limiting factor. + /* + * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must + * be faster than pixel clock, infinitesimally faster, tested in + * simulation. Otherwise, exact value is unimportant for HDMI + * operation." This conflicts with bcm2835's vc4 documentation, which + * states HSM's clock has to be at least 108% of the pixel clock. + * + * Real life tests reveal that vc4's firmware statement holds up, and + * users are able to use pixel clocks closer to HSM's, namely for + * 1920x1200@60Hz. So it was decided to have leave a 1% margin between + * both clocks. Which, for RPi0-3 implies a maximum pixel clock of + * 162MHz. + * + * Additionally, the AXI clock needs to be at least 25% of + * pixel clock, but HSM ends up being the limiting factor. */ - if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100)) + if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) return MODE_CLOCK_HIGH; return MODE_OK; @@ -1113,6 +1125,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) card->num_links = 1; card->name = "vc4-hdmi"; card->dev = dev; + card->owner = THIS_MODULE; /* * Be careful, snd_soc_register_card() calls dev_set_drvdata() and diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 5e5f90810aca..363f456ea713 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -205,7 +205,7 @@ static void vc4_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_reset(plane, &vc4_state->base); } -static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) +static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state) { if (vc4_state->dlist_count == vc4_state->dlist_size) { u32 new_size = max(4u, vc4_state->dlist_count * 2); @@ -220,7 +220,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) vc4_state->dlist_size = new_size; } - vc4_state->dlist[vc4_state->dlist_count++] = val; + vc4_state->dlist_count++; +} + +static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) +{ + unsigned int idx = vc4_state->dlist_count; + + vc4_dlist_counter_increment(vc4_state); + vc4_state->dlist[idx] = val; } /* Returns the scl0/scl1 field based on whether the dimensions need to @@ -871,8 +879,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * be set when calling vc4_plane_allocate_lbm(). */ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || - vc4_state->y_scaling[1] != VC4_SCALING_NONE) - vc4_state->lbm_offset = vc4_state->dlist_count++; + vc4_state->y_scaling[1] != VC4_SCALING_NONE) { + vc4_state->lbm_offset = vc4_state->dlist_count; + vc4_dlist_counter_increment(vc4_state); + } if (num_planes > 1) { /* Emit Cb/Cr as channel 0 and Y as channel diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 909eba43664a..204d1df5a21d 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -229,32 +229,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, return 0; } -static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret; - - obj = drm_gem_object_lookup(file, handle); - if (!obj) - return -ENOENT; - - if (!obj->filp) { - ret = -EINVAL; - goto unref; - } - - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto unref; - - *offset = drm_vma_node_offset_addr(&obj->vma_node); -unref: - drm_gem_object_put_unlocked(obj); - - return ret; -} - static struct drm_ioctl_desc vgem_ioctls[] = { DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW), @@ -448,7 +422,6 @@ static struct drm_driver vgem_driver = { .fops = &vgem_driver_fops, .dumb_create = vgem_gem_dumb_create, - .dumb_map_offset = vgem_gem_dumb_map, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index c190702fab72..6dcc05ab31eb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -96,8 +96,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, vgdev->capsets[i].id > 0, 5 * HZ); if (ret == 0) { DRM_ERROR("timed out waiting for cap set %d\n", i); + spin_lock(&vgdev->display_info_lock); kfree(vgdev->capsets); vgdev->capsets = NULL; + spin_unlock(&vgdev->display_info_lock); return; } DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 7ac20490e1b4..bb46e7a0f1b5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -572,9 +572,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, int i = le32_to_cpu(cmd->capset_index); spin_lock(&vgdev->display_info_lock); - vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); - vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); - vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + if (vgdev->capsets) { + vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); + vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); + vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + } else { + DRM_ERROR("invalid capset memory."); + } spin_unlock(&vgdev->display_info_lock); wake_up(&vgdev->resp_wq); } @@ -988,8 +992,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, } /* gets freed when the ring has consumed it */ - ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), - GFP_KERNEL); + ents = kvmalloc_array(nents, + sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); if (!ents) { DRM_ERROR("failed to allocate ent list\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index d5585695c64d..45d6ebbdbdb2 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer) + (i * composer->pitch) + (j * composer->cpp); /* XRGB format ignores Alpha channel */ - memset(vaddr_out + src_offset + 24, 0, 8); + bitmap_clear(vaddr_out + src_offset, 24, 8); crc = crc32_le(crc, vaddr_out + src_offset, sizeof(u32)); } diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 927dafaebc76..8b01fae65f43 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -20,7 +20,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, output->period_ns); - WARN_ON(ret_overrun != 1); + if (ret_overrun != 1) + pr_warn("%s: vblank timer overrun\n", __func__); ret = drm_crtc_handle_vblank(crtc); if (!ret) diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 5a95100fa18b..03b05c54722d 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -121,11 +121,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, enum drm_plane_type type, int index); /* Gem stuff */ -struct drm_gem_object *vkms_gem_create(struct drm_device *dev, - struct drm_file *file, - u32 *handle, - u64 size); - vm_fault_t vkms_gem_fault(struct vm_fault *vmf); int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 6489bfe0a149..8ba8b87d0c99 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -95,10 +95,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf) return ret; } -struct drm_gem_object *vkms_gem_create(struct drm_device *dev, - struct drm_file *file, - u32 *handle, - u64 size) +static struct drm_gem_object *vkms_gem_create(struct drm_device *dev, + struct drm_file *file, + u32 *handle, + u64 size) { struct vkms_gem_object *obj; int ret; @@ -111,7 +111,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, return ERR_CAST(obj); ret = drm_gem_handle_create(file, &obj->gem, handle); - drm_gem_object_put_unlocked(&obj->gem); if (ret) return ERR_PTR(ret); @@ -140,6 +139,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, args->size = gem_obj->size; args->pitch = pitch; + drm_gem_object_put_unlocked(gem_obj); + DRM_DEBUG_DRIVER("Created object of size %lld\n", size); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f47d5710cc95..33b151988747 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2666,7 +2666,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, ++i; } - if (i != unit) { + if (&con->head == &dev_priv->dev->mode_config.connector_list) { DRM_ERROR("Could not find initial display unit.\n"); ret = -EINVAL; goto out_unlock; @@ -2690,13 +2690,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, break; } - if (mode->type & DRM_MODE_TYPE_PREFERRED) - *p_mode = mode; - else { + if (&mode->head == &con->modes) { WARN_ONCE(true, "Could not find initial preferred mode.\n"); *p_mode = list_first_entry(&con->modes, struct drm_display_mode, head); + } else { + *p_mode = mode; } out_unlock: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 5702219ec38f..7b54c1f56208 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) struct vmw_legacy_display_unit *entry; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; - int i = 0; + int i; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. @@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); - i++; } if (crtc == NULL) return 0; - fb = entry->base.crtc.primary->state->fb; + fb = crtc->primary->state->fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->format->cpp[0] * 8, diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 4be49c1aef51..09894a1d343f 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -400,7 +400,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp, args->size = args->pitch * args->height; obj = xen_drm_front_gem_create(dev, args->size); - if (IS_ERR_OR_NULL(obj)) { + if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail; } diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index f0b85e094111..4ec8a49241e1 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) size = round_up(size, PAGE_SIZE); xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return xen_obj; if (drm_info->front_info->cfg.be_alloc) { @@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) */ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); xen_obj->pages = drm_gem_get_pages(&xen_obj->base); - if (IS_ERR_OR_NULL(xen_obj->pages)) { + if (IS_ERR(xen_obj->pages)) { ret = PTR_ERR(xen_obj->pages); xen_obj->pages = NULL; goto fail; @@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, struct xen_gem_object *xen_obj; xen_obj = gem_create(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); return &xen_obj->base; @@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, size = attach->dmabuf->size; xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); ret = gem_alloc_pages_array(xen_obj, size); diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 21ad1c359b61..e4dedbb184ab 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp, int ret; fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); - if (IS_ERR_OR_NULL(fb)) + if (IS_ERR(fb)) return fb; gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 742aa9ff21b8..fcda8621ae6f 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full); */ void host1x_driver_unregister(struct host1x_driver *driver) { + struct host1x *host1x; + driver_unregister(&driver->driver); + mutex_lock(&devices_lock); + + list_for_each_entry(host1x, &devices, list) + host1x_detach_driver(host1x, driver); + + mutex_unlock(&devices_lock); + mutex_lock(&drivers_lock); list_del_init(&driver->list); mutex_unlock(&drivers_lock); diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index c0392672a842..1b4997bda1c7 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -16,6 +16,8 @@ #include "debug.h" #include "channel.h" +static DEFINE_MUTEX(debug_lock); + unsigned int host1x_debug_trace_cmdbuf; static pid_t host1x_debug_force_timeout_pid; @@ -52,12 +54,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) struct output *o = data; mutex_lock(&ch->cdma.lock); + mutex_lock(&debug_lock); if (show_fifo) host1x_hw_show_channel_fifo(m, ch, o); host1x_hw_show_channel_cdma(m, ch, o); + mutex_unlock(&debug_lock); mutex_unlock(&ch->cdma.lock); return 0; diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index ee2a025e54cf..b3dae9ec1a38 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -124,6 +124,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat) case V4L2_PIX_FMT_RGBX32: case V4L2_PIX_FMT_ARGB32: case V4L2_PIX_FMT_XRGB32: + case V4L2_PIX_FMT_RGB32: + case V4L2_PIX_FMT_BGR32: return IPUV3_COLORSPACE_RGB; default: return IPUV3_COLORSPACE_UNKNOWN; diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c index eeca50d9a1ee..aa1d4b6d278f 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c @@ -137,6 +137,17 @@ struct ipu_image_convert_ctx; struct ipu_image_convert_chan; struct ipu_image_convert_priv; +enum eof_irq_mask { + EOF_IRQ_IN = BIT(0), + EOF_IRQ_ROT_IN = BIT(1), + EOF_IRQ_OUT = BIT(2), + EOF_IRQ_ROT_OUT = BIT(3), +}; + +#define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT) +#define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \ + EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT) + struct ipu_image_convert_ctx { struct ipu_image_convert_chan *chan; @@ -173,6 +184,9 @@ struct ipu_image_convert_ctx { /* where to place converted tile in dest image */ unsigned int out_tile_map[MAX_TILES]; + /* mask of completed EOF irqs at every tile conversion */ + enum eof_irq_mask eof_mask; + struct list_head list; }; @@ -189,6 +203,8 @@ struct ipu_image_convert_chan { struct ipuv3_channel *rotation_out_chan; /* the IPU end-of-frame irqs */ + int in_eof_irq; + int rot_in_eof_irq; int out_eof_irq; int rot_out_eof_irq; @@ -1380,6 +1396,9 @@ static int convert_start(struct ipu_image_convert_run *run, unsigned int tile) dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n", __func__, chan->ic_task, ctx, run, tile, dst_tile); + /* clear EOF irq mask */ + ctx->eof_mask = 0; + if (ipu_rot_mode_is_irt(ctx->rot_mode)) { /* swap width/height for resizer */ dest_width = d_image->tile[dst_tile].height; @@ -1615,7 +1634,7 @@ static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx) } /* hold irqlock when calling */ -static irqreturn_t do_irq(struct ipu_image_convert_run *run) +static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run) { struct ipu_image_convert_ctx *ctx = run->ctx; struct ipu_image_convert_chan *chan = ctx->chan; @@ -1700,6 +1719,7 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run) ctx->cur_buf_num ^= 1; } + ctx->eof_mask = 0; /* clear EOF irq mask for next tile */ ctx->next_tile++; return IRQ_HANDLED; done: @@ -1709,45 +1729,15 @@ done: return IRQ_WAKE_THREAD; } -static irqreturn_t norotate_irq(int irq, void *data) -{ - struct ipu_image_convert_chan *chan = data; - struct ipu_image_convert_ctx *ctx; - struct ipu_image_convert_run *run; - unsigned long flags; - irqreturn_t ret; - - spin_lock_irqsave(&chan->irqlock, flags); - - /* get current run and its context */ - run = chan->current_run; - if (!run) { - ret = IRQ_NONE; - goto out; - } - - ctx = run->ctx; - - if (ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this is a rotation operation, just ignore */ - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; - } - - ret = do_irq(run); -out: - spin_unlock_irqrestore(&chan->irqlock, flags); - return ret; -} - -static irqreturn_t rotate_irq(int irq, void *data) +static irqreturn_t eof_irq(int irq, void *data) { struct ipu_image_convert_chan *chan = data; struct ipu_image_convert_priv *priv = chan->priv; struct ipu_image_convert_ctx *ctx; struct ipu_image_convert_run *run; + irqreturn_t ret = IRQ_HANDLED; + bool tile_complete = false; unsigned long flags; - irqreturn_t ret; spin_lock_irqsave(&chan->irqlock, flags); @@ -1760,14 +1750,33 @@ static irqreturn_t rotate_irq(int irq, void *data) ctx = run->ctx; - if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this was NOT a rotation operation, shouldn't happen */ - dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; + if (irq == chan->in_eof_irq) { + ctx->eof_mask |= EOF_IRQ_IN; + } else if (irq == chan->out_eof_irq) { + ctx->eof_mask |= EOF_IRQ_OUT; + } else if (irq == chan->rot_in_eof_irq || + irq == chan->rot_out_eof_irq) { + if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { + /* this was NOT a rotation op, shouldn't happen */ + dev_err(priv->ipu->dev, + "Unexpected rotation interrupt\n"); + goto out; + } + ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ? + EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT; + } else { + dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq); + ret = IRQ_NONE; + goto out; } - ret = do_irq(run); + if (ipu_rot_mode_is_irt(ctx->rot_mode)) + tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE); + else + tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE); + + if (tile_complete) + ret = do_tile_complete(run); out: spin_unlock_irqrestore(&chan->irqlock, flags); return ret; @@ -1801,6 +1810,10 @@ static void force_abort(struct ipu_image_convert_ctx *ctx) static void release_ipu_resources(struct ipu_image_convert_chan *chan) { + if (chan->in_eof_irq >= 0) + free_irq(chan->in_eof_irq, chan); + if (chan->rot_in_eof_irq >= 0) + free_irq(chan->rot_in_eof_irq, chan); if (chan->out_eof_irq >= 0) free_irq(chan->out_eof_irq, chan); if (chan->rot_out_eof_irq >= 0) @@ -1819,7 +1832,27 @@ static void release_ipu_resources(struct ipu_image_convert_chan *chan) chan->in_chan = chan->out_chan = chan->rotation_in_chan = chan->rotation_out_chan = NULL; - chan->out_eof_irq = chan->rot_out_eof_irq = -1; + chan->in_eof_irq = -1; + chan->rot_in_eof_irq = -1; + chan->out_eof_irq = -1; + chan->rot_out_eof_irq = -1; +} + +static int get_eof_irq(struct ipu_image_convert_chan *chan, + struct ipuv3_channel *channel) +{ + struct ipu_image_convert_priv *priv = chan->priv; + int ret, irq; + + irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF); + + ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan); + if (ret < 0) { + dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq); + return ret; + } + + return irq; } static int get_ipu_resources(struct ipu_image_convert_chan *chan) @@ -1855,31 +1888,33 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) } /* acquire the EOF interrupts */ - chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu, - chan->out_chan, - IPU_IRQ_EOF); - - ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, - 0, "ipu-ic", chan); + ret = get_eof_irq(chan, chan->in_chan); + if (ret < 0) { + chan->in_eof_irq = -1; + goto err; + } + chan->in_eof_irq = ret; + + ret = get_eof_irq(chan, chan->rotation_in_chan); + if (ret < 0) { + chan->rot_in_eof_irq = -1; + goto err; + } + chan->rot_in_eof_irq = ret; + + ret = get_eof_irq(chan, chan->out_chan); if (ret < 0) { - dev_err(priv->ipu->dev, "could not acquire irq %d\n", - chan->out_eof_irq); chan->out_eof_irq = -1; goto err; } + chan->out_eof_irq = ret; - chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu, - chan->rotation_out_chan, - IPU_IRQ_EOF); - - ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, - 0, "ipu-ic", chan); + ret = get_eof_irq(chan, chan->rotation_out_chan); if (ret < 0) { - dev_err(priv->ipu->dev, "could not acquire irq %d\n", - chan->rot_out_eof_irq); chan->rot_out_eof_irq = -1; goto err; } + chan->rot_out_eof_irq = ret; return 0; err: @@ -2458,6 +2493,8 @@ int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) chan->ic_task = i; chan->priv = priv; chan->dma_ch = &image_convert_dma_chan[i]; + chan->in_eof_irq = -1; + chan->rot_in_eof_irq = -1; chan->out_eof_irq = -1; chan->rot_out_eof_irq = -1; diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index fa704153cb00..2477b2a3f7c3 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -25,6 +25,7 @@ #define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */ #define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */ +#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */ #define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */ #define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */ @@ -368,6 +369,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size) case U1_FEATURE_REPORT_ID: break; case U1_ABSOLUTE_REPORT_ID: + case U1_ABSOLUTE_REPORT_ID_SECD: for (i = 0; i < hdata->max_fingers; i++) { u8 *contact = &data[i * 5]; @@ -760,6 +762,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) if (input_register_device(data->input2)) { input_free_device(input2); + ret = -ENOENT; goto exit; } } @@ -802,6 +805,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) break; case HID_DEVICE_ID_ALPS_U1_DUAL: case HID_DEVICE_ID_ALPS_U1: + case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY: data->dev_type = U1; break; default: diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index d732d1d10caf..6909c045fece 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -54,6 +54,7 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") struct apple_sc { unsigned long quirks; unsigned int fn_on; + unsigned int fn_found; DECLARE_BITMAP(pressed_numlock, KEY_CNT); }; @@ -339,12 +340,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { + struct apple_sc *asc = hid_get_drvdata(hdev); + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || usage->hid == (HID_UP_MSVENDOR | 0x0003) || usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); + asc->fn_found = true; apple_setup_input(hi->input); return 1; } @@ -371,6 +375,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi, return 0; } +static int apple_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + struct apple_sc *asc = hid_get_drvdata(hdev); + + if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); + asc->quirks = 0; + } + + return 0; +} + static int apple_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -585,6 +602,7 @@ static struct hid_driver apple_driver = { .event = apple_event, .input_mapping = apple_input_mapping, .input_mapped = apple_input_mapped, + .input_configured = apple_input_configured, }; module_hid_driver(apple_driver); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 359616e3efbb..8d202011b2db 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(hid_register_report); * Register a new field for this report. */ -static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) +static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) { struct hid_field *field; @@ -101,7 +101,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - values * sizeof(unsigned)), GFP_KERNEL); + usages * sizeof(unsigned)), GFP_KERNEL); if (!field) return NULL; @@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign usages = max_t(unsigned, parser->local.usage_index, parser->global.report_count); - field = hid_register_field(report, usages, parser->global.report_count); + field = hid_register_field(report, usages); if (!field) return 0; @@ -1300,6 +1300,9 @@ EXPORT_SYMBOL_GPL(hid_open_report); static s32 snto32(__u32 value, unsigned n) { + if (!value || !n) + return 0; + switch (n) { case 8: return ((__s8)value); case 16: return ((__s16)value); @@ -1597,6 +1600,17 @@ static void hid_output_field(const struct hid_device *hid, } } +/* + * Compute the size of a report. + */ +static size_t hid_compute_report_size(struct hid_report *report) +{ + if (report->size) + return ((report->size - 1) >> 3) + 1; + + return 0; +} + /* * Create a report. 'data' has to be allocated using * hid_alloc_report_buf() so that it has proper size. @@ -1609,7 +1623,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) if (report->id > 0) *data++ = report->id; - memset(data, 0, ((report->size - 1) >> 3) + 1); + memset(data, 0, hid_compute_report_size(report)); for (n = 0; n < report->maxfield; n++) hid_output_field(report->device, report->field[n], data); } @@ -1739,7 +1753,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, csize--; } - rsize = ((report->size - 1) >> 3) + 1; + rsize = hid_compute_report_size(report); if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE - 1; diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c index a50ba4a4a1d7..b88f889b3932 100644 --- a/drivers/hid/hid-cypress.c +++ b/drivers/hid/hid-cypress.c @@ -23,19 +23,17 @@ #define CP_2WHEEL_MOUSE_HACK 0x02 #define CP_2WHEEL_MOUSE_HACK_ON 0x04 +#define VA_INVAL_LOGICAL_BOUNDARY 0x08 + /* * Some USB barcode readers from cypress have usage min and usage max in * the wrong order */ -static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, +static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); unsigned int i; - if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) - return rdesc; - if (*rsize < 4) return rdesc; @@ -48,6 +46,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } +static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + /* + * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly + * reports Logical Minimum of its Consumer Control device as 572 + * (0x02 0x3c). Fix this by setting its Logical Minimum to zero. + */ + if (*rsize == 25 && + rdesc[0] == 0x05 && rdesc[1] == 0x0c && + rdesc[2] == 0x09 && rdesc[3] == 0x01 && + rdesc[6] == 0x19 && rdesc[7] == 0x00 && + rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) { + hid_info(hdev, + "fixing up varmilo VA104M consumer control report descriptor\n"); + rdesc[12] = 0x00; + rdesc[13] = 0x00; + } + return rdesc; +} + +static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); + + if (quirks & CP_RDESC_SWAPPED_MIN_MAX) + rdesc = cp_rdesc_fixup(hdev, rdesc, rsize); + if (quirks & VA_INVAL_LOGICAL_BOUNDARY) + rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize); + + return rdesc; +} + static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -128,6 +160,8 @@ static const struct hid_device_id cp_devices[] = { .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE), .driver_data = CP_2WHEEL_MOUSE_HACK }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1), + .driver_data = VA_INVAL_LOGICAL_BOUNDARY }, { } }; MODULE_DEVICE_TABLE(hid, cp_devices); diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 45c4f888b7c4..dae193749d44 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -188,6 +188,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER); if (ret) { hid_err(hdev, "Failed to init elan MT slots: %d\n", ret); + input_free_device(input); return ret; } @@ -198,6 +199,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) if (ret) { hid_err(hdev, "Failed to register elan input device: %d\n", ret); + input_mt_destroy_slots(input); input_free_device(input); return ret; } diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index aeb351658ad3..505ed76a830e 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -467,6 +467,8 @@ static int hammer_probe(struct hid_device *hdev, static const struct hid_device_id hammer_devices[] = { + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 646b98809ed3..1bfa690143d7 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -79,10 +79,10 @@ #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 #define HID_DEVICE_ID_ALPS_U1 0x1215 +#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C #define HID_DEVICE_ID_ALPS_1222 0x1222 - #define USB_VENDOR_ID_AMI 0x046b #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10 @@ -337,6 +337,8 @@ #define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81 #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001 +#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1 0X07b1 + #define USB_VENDOR_ID_DATA_MODUL 0x7374 #define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201 @@ -363,6 +365,7 @@ #define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803 #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843 #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844 +#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE3 0x1846 #define USB_VENDOR_ID_DWAV 0x0eef #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 @@ -385,11 +388,13 @@ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 #define USB_VENDOR_ID_ELAN 0x04f3 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 #define USB_DEVICE_ID_HP_X2 0x074d #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 +#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 @@ -448,6 +453,10 @@ #define USB_VENDOR_ID_FRUCTEL 0x25B6 #define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002 +#define USB_VENDOR_ID_GAMEVICE 0x27F8 +#define USB_DEVICE_ID_GAMEVICE_GV186 0x0BBE +#define USB_DEVICE_ID_GAMEVICE_KISHI 0x0BBF + #define USB_VENDOR_ID_GAMERON 0x0810 #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 @@ -479,6 +488,7 @@ #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 +#define USB_DEVICE_ID_GOOGLE_DON 0x5050 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f @@ -486,6 +496,7 @@ #define USB_DEVICE_ID_PENPOWER 0x00f4 #define USB_VENDOR_ID_GREENASIA 0x0e8f +#define USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR 0x3010 #define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013 #define USB_VENDOR_ID_GRETAGMACBETH 0x0971 @@ -619,6 +630,7 @@ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096 +#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293 #define USB_VENDOR_ID_IMATION 0x0718 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 @@ -630,6 +642,8 @@ #define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745 #define USB_VENDOR_ID_ITE 0x048d +#define I2C_VENDOR_ID_ITE 0x103c +#define I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15 0x184f #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 #define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a @@ -728,6 +742,9 @@ #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093 #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 @@ -738,6 +755,7 @@ #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e #define USB_DEVICE_ID_LOGITECH_T651 0xb00c +#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309 #define USB_DEVICE_ID_LOGITECH_C007 0xc007 #define USB_DEVICE_ID_LOGITECH_C077 0xc077 #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 @@ -769,6 +787,7 @@ #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a +#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882 #define USB_DEVICE_ID_S510_RECEIVER 0xc50c #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 @@ -840,6 +859,7 @@ #define USB_DEVICE_ID_MS_POWER_COVER 0x07da #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb +#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -912,6 +932,7 @@ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003 #define USB_VENDOR_ID_PLANTRONICS 0x047f +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056 #define USB_VENDOR_ID_PANASONIC 0x04da #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 @@ -994,6 +1015,8 @@ #define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232 #define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a +#define USB_VENDOR_ID_SAI 0x17dd + #define USB_VENDOR_ID_SAITEK 0x06a3 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 @@ -1003,6 +1026,8 @@ #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 #define USB_DEVICE_ID_SAITEK_X52 0x075c +#define USB_DEVICE_ID_SAITEK_X52_2 0x0255 +#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762 #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 @@ -1088,6 +1113,9 @@ #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 #define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200 +#define I2C_VENDOR_ID_SYNAPTICS 0x06cb +#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393 0x7a13 + #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 @@ -1102,8 +1130,10 @@ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 +#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 +#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 @@ -1142,6 +1172,9 @@ #define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882 #define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883 +#define USB_VENDOR_ID_TRUST 0x145f +#define USB_DEVICE_ID_TRUST_PANORA_TABLET 0x0212 + #define USB_VENDOR_ID_TURBOX 0x062a #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 #define USB_DEVICE_ID_ASUS_MD_5110 0x5110 @@ -1273,6 +1306,7 @@ #define USB_VENDOR_ID_UGTIZER 0x2179 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 +#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077 #define USB_VENDOR_ID_VIEWSONIC 0x0543 #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index dea9cc65bf80..ec08895e7b1d 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -319,6 +319,11 @@ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), HID_BATTERY_QUIRK_IGNORE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, + USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD), + HID_BATTERY_QUIRK_IGNORE }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, {} }; @@ -350,13 +355,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev) u8 *buf; int ret; - buf = kmalloc(2, GFP_KERNEL); + buf = kmalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4, dev->battery_report_type, HID_REQ_GET_REPORT); - if (ret != 2) { + if (ret < 2) { kfree(buf); return -ENODATA; } @@ -797,7 +802,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x3b: /* Battery Strength */ hidinput_setup_battery(device, HID_INPUT_REPORT, field); usage->type = EV_PWR; - goto ignore; + return; case 0x3c: /* Invert */ map_key_clear(BTN_TOOL_RUBBER); @@ -1059,7 +1064,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case HID_DC_BATTERYSTRENGTH: hidinput_setup_battery(device, HID_INPUT_REPORT, field); usage->type = EV_PWR; - goto ignore; + return; } goto unknown; @@ -1132,6 +1137,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: + /* Mapping failed, bail out */ + if (!bit) + return; + if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index 6c55682c5974..742c052b0110 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -11,6 +11,48 @@ #include "hid-ids.h" +#define QUIRK_TOUCHPAD_ON_OFF_REPORT BIT(0) + +static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) +{ + unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); + + if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) { + if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) { + hid_info(hdev, "Fixing up ITE keyboard report descriptor\n"); + rdesc[163] = HID_MAIN_ITEM_RELATIVE; + } + } + + return rdesc; +} + +static int ite_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, + int *max) +{ + + unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); + + if ((quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) && + (usage->hid & HID_USAGE_PAGE) == 0x00880000) { + if (usage->hid == 0x00880078) { + /* Touchpad on, userspace expects F22 for this */ + hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F22); + return 1; + } + if (usage->hid == 0x00880079) { + /* Touchpad off, userspace expects F23 for this */ + hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F23); + return 1; + } + return -1; + } + + return 0; +} + static int ite_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { @@ -37,13 +79,31 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, return 0; } +static int ite_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + int ret; + + hid_set_drvdata(hdev, (void *)id->driver_data); + + ret = hid_open_report(hdev); + if (ret) + return ret; + + return hid_hw_start(hdev, HID_CONNECT_DEFAULT); +} + static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_SYNAPTICS, - USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012), + .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, + /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); @@ -51,6 +111,9 @@ MODULE_DEVICE_TABLE(hid, ite_devices); static struct hid_driver ite_driver = { .name = "itetech", .id_table = ite_devices, + .probe = ite_probe, + .report_fixup = ite_report_fixup, + .input_mapping = ite_input_mapping, .event = ite_event, }; module_hid_driver(ite_driver); diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index bb50d6e7745b..b499ac37dc7b 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -328,7 +328,7 @@ static const char mse_bluetooth_descriptor[] = { 0x25, 0x01, /* LOGICAL_MAX (1) */ 0x75, 0x01, /* REPORT_SIZE (1) */ 0x95, 0x04, /* REPORT_COUNT (4) */ - 0x81, 0x06, /* INPUT */ + 0x81, 0x02, /* INPUT (Data,Var,Abs) */ 0xC0, /* END_COLLECTION */ 0xC0, /* END_COLLECTION */ }; @@ -866,11 +866,24 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev, schedule_work(&djrcv_dev->work); } +/* + * Some quad/bluetooth keyboards have a builtin touchpad in this case we see + * only 1 paired device with a device_type of REPORT_TYPE_KEYBOARD. For the + * touchpad to work we must also forward mouse input reports to the dj_hiddev + * created for the keyboard (instead of forwarding them to a second paired + * device with a device_type of REPORT_TYPE_MOUSE as we normally would). + */ +static const u16 kbd_builtin_touchpad_ids[] = { + 0xb309, /* Dinovo Edge */ + 0xb30c, /* Dinovo Mini */ +}; + static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev, struct hidpp_event *hidpp_report, struct dj_workitem *workitem) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); + int i, id; workitem->type = WORKITEM_TYPE_PAIRED; workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] & @@ -882,6 +895,13 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev, workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA | POWER_KEYS | MEDIA_CENTER | HIDPP; + id = (workitem->quad_id_msb << 8) | workitem->quad_id_lsb; + for (i = 0; i < ARRAY_SIZE(kbd_builtin_touchpad_ids); i++) { + if (id == kbd_builtin_touchpad_ids[i]) { + workitem->reports_supported |= STD_MOUSE; + break; + } + } break; case REPORT_TYPE_MOUSE: workitem->reports_supported |= STD_MOUSE | HIDPP; @@ -960,6 +980,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, case 0x07: device_type = "eQUAD step 4 Gaming"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); + workitem.reports_supported |= STD_KEYBOARD; break; case 0x08: device_type = "eQUAD step 4 for gamepads"; @@ -974,7 +995,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, workitem.reports_supported |= STD_KEYBOARD; break; case 0x0d: - device_type = "eQUAD Lightspeed 1_1"; + device_type = "eQUAD Lightspeed 1.1"; + logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); + workitem.reports_supported |= STD_KEYBOARD; + break; + case 0x0f: + device_type = "eQUAD Lightspeed 1.2"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; break; @@ -1842,6 +1868,10 @@ static const struct hid_device_id logi_dj_receivers[] = { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xc531), .driver_data = recvr_type_gaming_hidpp}, + { /* Logitech G602 receiver (0xc537) */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, + 0xc537), + .driver_data = recvr_type_gaming_hidpp}, { /* Logitech lightspeed receiver (0xc539) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1), diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index cd9193078525..919551ed5809 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -88,6 +88,8 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) +#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) + /* * There are two hidpp protocols in use, the first version hidpp10 is known * as register access protocol or RAP, the second version hidpp20 is known as @@ -2768,6 +2770,26 @@ static int g920_get_config(struct hidpp_device *hidpp, return g920_ff_set_autocenter(hidpp, data); } +/* -------------------------------------------------------------------------- */ +/* Logitech Dinovo Mini keyboard with builtin touchpad */ +/* -------------------------------------------------------------------------- */ +#define DINOVO_MINI_PRODUCT_ID 0xb30c + +static int lg_dinovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) + return 0; + + switch (usage->hid & HID_USAGE) { + case 0x00d: lg_map_key_clear(KEY_MEDIA); break; + default: + return 0; + } + return 1; +} + /* -------------------------------------------------------------------------- */ /* HID++1.0 devices which use HID++ reports for their wheels */ /* -------------------------------------------------------------------------- */ @@ -2964,7 +2986,7 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp) multiplier = 1; hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; - hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier); + hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier); return 0; } @@ -3003,6 +3025,9 @@ static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi, field->application != HID_GD_MOUSE) return m560_input_mapping(hdev, hi, field, usage, bit, max); + if (hdev->product == DINOVO_MINI_PRODUCT_ID) + return lg_dinovo_input_mapping(hdev, hi, field, usage, bit, max); + return 0; } @@ -3741,6 +3766,7 @@ static const struct hid_device_id hidpp_devices[] = { LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech MX Anywhere 2 */ LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { LDJ_DEVICE(0x4072), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, @@ -3763,6 +3789,9 @@ static const struct hid_device_id hidpp_devices[] = { { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb305), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */ + LDJ_DEVICE(0xb309), + .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, @@ -3805,6 +3834,9 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5000 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* Dinovo Edge keyboard over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309), + .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 34138667f8af..abd86903875f 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -535,6 +535,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(MSC_RAW, input->mscbit); } + /* + * hid-input may mark device as using autorepeat, but neither + * the trackpad, nor the mouse actually want it. + */ + __clear_bit(EV_REP, input->evbit); + return 0; } diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c index fc75f30f537c..92d7ecd41a78 100644 --- a/drivers/hid/hid-mf.c +++ b/drivers/hid/hid-mf.c @@ -153,6 +153,8 @@ static const struct hid_device_id mf_devices[] = { .driver_data = HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2), .driver_data = 0 }, /* No quirk required */ + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), + .driver_data = HID_QUIRK_MULTI_INPUT }, { } }; MODULE_DEVICE_TABLE(hid, mf_devices); diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 2d8b589201a4..8cb1ca1936e4 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -451,6 +451,8 @@ static const struct hid_device_id ms_devices[] = { .driver_data = MS_SURFACE_DIAL }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER), .driver_data = MS_QUIRK_FF }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS), + .driver_data = MS_QUIRK_FF }, { } }; MODULE_DEVICE_TABLE(hid, ms_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 362805ddf377..d91e6679afb1 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -69,6 +69,7 @@ MODULE_LICENSE("GPL"); #define MT_QUIRK_ASUS_CUSTOM_UP BIT(17) #define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18) #define MT_QUIRK_SEPARATE_APP_REPORT BIT(19) +#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20) #define MT_INPUTMODE_TOUCHSCREEN 0x02 #define MT_INPUTMODE_TOUCHPAD 0x03 @@ -189,6 +190,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app); #define MT_CLS_WIN_8 0x0012 #define MT_CLS_EXPORT_ALL_INPUTS 0x0013 #define MT_CLS_WIN_8_DUAL 0x0014 +#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015 /* vendor specific classes */ #define MT_CLS_3M 0x0101 @@ -279,6 +281,15 @@ static const struct mt_class mt_classes[] = { MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_WIN8_PTP_BUTTONS, .export_all_inputs = true }, + { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + .quirks = MT_QUIRK_ALWAYS_VALID | + MT_QUIRK_IGNORE_DUPLICATES | + MT_QUIRK_HOVERING | + MT_QUIRK_CONTACT_CNT_ACCURATE | + MT_QUIRK_STICKY_FINGERS | + MT_QUIRK_WIN8_PTP_BUTTONS | + MT_QUIRK_FORCE_MULTI_INPUT, + .export_all_inputs = true }, /* * vendor specific classes @@ -853,6 +864,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; @@ -1714,6 +1727,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) if (id->group != HID_GROUP_MULTITOUCH_WIN_8) hdev->quirks |= HID_QUIRK_MULTI_INPUT; + if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) { + hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP; + hdev->quirks |= HID_QUIRK_MULTI_INPUT; + } + timer_setup(&td->release_timer, mt_expired_timeout, 0); ret = hid_parse(hdev); @@ -1922,6 +1940,14 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, + { .driver_data = MT_CLS_EGALAX, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, + + /* Elan devices */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ELAN, 0x313a) }, /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, @@ -2053,6 +2079,15 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM)}, + /* Synaptics devices */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xce08) }, + + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xce09) }, + /* TopSeed panels */ { .driver_data = MT_CLS_TOPSEED, MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index 85b685efc12f..e81b7cec2d12 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -13,6 +13,7 @@ #include <linux/hid.h> #include <linux/module.h> +#include <linux/jiffies.h> #define PLT_HID_1_0_PAGE 0xffa00000 #define PLT_HID_2_0_PAGE 0xffa20000 @@ -36,6 +37,16 @@ #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \ (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) +#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0) + +#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */ + +struct plt_drv_data { + unsigned long device_type; + unsigned long last_volume_key_ts; + u32 quirks; +}; + static int plantronics_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, @@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev, unsigned long **bit, int *max) { unsigned short mapped_key; - unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); + struct plt_drv_data *drv_data = hid_get_drvdata(hdev); + unsigned long plt_type = drv_data->device_type; /* special case for PTT products */ if (field->application == HID_GD_JOYSTICK) @@ -105,6 +117,30 @@ mapped: return 1; } +static int plantronics_event(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value) +{ + struct plt_drv_data *drv_data = hid_get_drvdata(hdev); + + if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) { + unsigned long prev_ts, cur_ts; + + /* Usages are filtered in plantronics_usages. */ + + if (!value) /* Handle key presses only. */ + return 0; + + prev_ts = drv_data->last_volume_key_ts; + cur_ts = jiffies; + if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT) + return 1; /* Ignore the repeated key. */ + + drv_data->last_volume_key_ts = cur_ts; + } + + return 0; +} + static unsigned long plantronics_device_type(struct hid_device *hdev) { unsigned i, col_page; @@ -133,15 +169,24 @@ exit: static int plantronics_probe(struct hid_device *hdev, const struct hid_device_id *id) { + struct plt_drv_data *drv_data; int ret; + drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL); + if (!drv_data) + return -ENOMEM; + ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } - hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev)); + drv_data->device_type = plantronics_device_type(hdev); + drv_data->quirks = id->driver_data; + drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT); + + hid_set_drvdata(hdev, drv_data); ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE); @@ -153,15 +198,26 @@ err: } static const struct hid_device_id plantronics_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), + .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, plantronics_devices); +static const struct hid_usage_id plantronics_usages[] = { + { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID }, + { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID }, + { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR } +}; + static struct hid_driver plantronics_driver = { .name = "plantronics", .id_table = plantronics_devices, + .usage_table = plantronics_usages, .input_mapping = plantronics_input_mapping, + .event = plantronics_event, .probe = plantronics_probe, }; module_hid_driver(plantronics_driver); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index ae64a286a68f..f35d919c4eba 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -72,6 +72,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, @@ -83,11 +84,17 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186), + HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI), + HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, @@ -104,6 +111,9 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET }, @@ -146,6 +156,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, @@ -163,10 +175,12 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE }, { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, @@ -176,6 +190,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET }, { 0 } }; @@ -398,9 +413,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, #endif -#if IS_ENABLED(CONFIG_HID_ITE) - { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, -#endif #if IS_ENABLED(CONFIG_HID_ICADE) { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, #endif @@ -480,6 +492,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3) }, #endif #if IS_ENABLED(CONFIG_HID_MICROSOFT) { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, @@ -840,6 +853,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) }, #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) }, diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 1a6e600197d0..509b9bb1362c 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj, struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0, difference, old_profile; + struct kone_settings *settings = (struct kone_settings *)buf; /* I need to get my data in one piece */ if (off != 0 || count != sizeof(struct kone_settings)) return -EINVAL; mutex_lock(&kone->kone_lock); - difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings)); + difference = memcmp(settings, &kone->settings, + sizeof(struct kone_settings)); if (difference) { - retval = kone_set_settings(usb_dev, - (struct kone_settings const *)buf); - if (retval) { - mutex_unlock(&kone->kone_lock); - return retval; + if (settings->startup_profile < 1 || + settings->startup_profile > 5) { + retval = -EINVAL; + goto unlock; } + retval = kone_set_settings(usb_dev, settings); + if (retval) + goto unlock; + old_profile = kone->settings.startup_profile; - memcpy(&kone->settings, buf, sizeof(struct kone_settings)); + memcpy(&kone->settings, settings, sizeof(struct kone_settings)); kone_profile_activated(kone, kone->settings.startup_profile); if (kone->settings.startup_profile != old_profile) kone_profile_report(kone, kone->settings.startup_profile); } +unlock: mutex_unlock(&kone->kone_lock); + if (retval) + return retval; + return sizeof(struct kone_settings); } static BIN_ATTR(settings, 0660, kone_sysfs_read_settings, diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 94c7398b5c27..3dd7d3246737 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -483,7 +483,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev, return 1; ptr = raw_data; - ptr++; /* Skip report id */ + if (report->id) + ptr++; /* Skip report id */ spin_lock_irqsave(&pdata->lock, flags); diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 4c6ed6ef31f1..2f073f536070 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -867,6 +867,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc, if (sc->quirks & PS3REMOTE) return ps3remote_fixup(hdev, rdesc, rsize); + /* + * Some knock-off USB dongles incorrectly report their button count + * as 13 instead of 16 causing three non-functional buttons. + */ + if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 && + /* Report Count (13) */ + rdesc[23] == 0x95 && rdesc[24] == 0x0D && + /* Usage Maximum (13) */ + rdesc[37] == 0x29 && rdesc[38] == 0x0D && + /* Report Count (3) */ + rdesc[43] == 0x95 && rdesc[44] == 0x03) { + hid_info(hdev, "Fixing up USB dongle report descriptor\n"); + rdesc[24] = 0x10; + rdesc[38] = 0x10; + rdesc[44] = 0x00; + } + return rdesc; } diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 6286204d4c56..a3b151b29bd7 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam) steam_battery_register(steam); mutex_lock(&steam_devices_lock); - list_add(&steam->list, &steam_devices); + if (list_empty(&steam->list)) + list_add(&steam->list, &steam_devices); mutex_unlock(&steam_devices_lock); } @@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam) hid_info(steam->hdev, "Steam Controller '%s' disconnected", steam->serial_no); mutex_lock(&steam_devices_lock); - list_del(&steam->list); + list_del_init(&steam->list); mutex_unlock(&steam_devices_lock); steam->serial_no[0] = 0; } @@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev, mutex_init(&steam->mutex); steam->quirks = id->driver_data; INIT_WORK(&steam->work_connect, steam_work_connect_cb); + INIT_LIST_HEAD(&steam->list); steam->client_hdev = steam_create_client_hid(hdev); if (IS_ERR(steam->client_hdev)) { diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index 86b568037cb8..8e9c9e646cb7 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -385,6 +385,8 @@ static const struct hid_device_id uclogic_devices[] = { USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, + USB_DEVICE_ID_UGTIZER_TABLET_GT5040) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_G5) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 78a364ae2f68..e80c812f44a7 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -997,6 +997,8 @@ int uclogic_params_init(struct uclogic_params *params, break; case VID_PID(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610): + case VID_PID(USB_VENDOR_ID_UGTIZER, + USB_DEVICE_ID_UGTIZER_TABLET_GT5040): case VID_PID(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540): case VID_PID(USB_VENDOR_ID_UGEE, diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 479934f7d241..96898983db99 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -173,12 +173,16 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, + { I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15, + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, { USB_VENDOR_ID_ELAN, HID_ANY_ID, I2C_HID_QUIRK_BOGUS_IRQ }, { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, I2C_HID_QUIRK_RESET_ON_RESUME }, + { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393, + I2C_HID_QUIRK_RESET_ON_RESUME }, { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, I2C_HID_QUIRK_BAD_INPUT_SIZE }, { 0, 0 } @@ -420,6 +424,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) dev_err(&client->dev, "failed to change power setting.\n"); set_pwr_exit: + + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests. + * According to Goodix Windows even waits 60 ms after (other?) + * PWR_ON requests. Testing has confirmed that several devices + * will not work properly without a delay after a PWR_ON request. + */ + if (!ret && power_state == I2C_HID_PWR_ON) + msleep(60); + return ret; } @@ -441,15 +458,6 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; - /* - * The HID over I2C specification states that if a DEVICE needs time - * after the PWR_ON request, it should utilise CLOCK stretching. - * However, it has been observered that the Windows driver provides a - * 1ms sleep between the PWR_ON and RESET requests and that some devices - * rely on this. - */ - usleep_range(1000, 5000); - i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index a66f08041a1a..8e0f67455c09 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -373,6 +373,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Mediacom FlexBook edge 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"), + }, + .driver_data = (void *)&sipodev_desc + }, { .ident = "Odys Winbook 13", .matches = { @@ -389,6 +397,22 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Schneider SCL142ALM", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Vero K147", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VERO"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "K147"), + }, + .driver_data = (void *)&sipodev_desc + }, { } /* Terminate list */ }; diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index aa2dbed30fc3..6cf59fd26ad7 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -480,6 +480,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data, sizeof(ldr_xfer_query_resp)); if (rv < 0) { client_data->flag_retry = true; + *fw_info = (struct shim_fw_info){}; return rv; } @@ -489,6 +490,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data, "data size %d is not equal to size of loader_xfer_query_response %zu\n", rv, sizeof(struct loader_xfer_query_response)); client_data->flag_retry = true; + *fw_info = (struct shim_fw_info){}; return -EMSGSIZE; } diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index c7bc9db5b192..17a638f15082 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid) struct usbhid_device *usbhid = hid->driver_data; int res; + mutex_lock(&usbhid->mutex); + set_bit(HID_OPENED, &usbhid->iofl); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return 0; + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { + res = 0; + goto Done; + } res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ if (res < 0) { clear_bit(HID_OPENED, &usbhid->iofl); - return -EIO; + res = -EIO; + goto Done; } usbhid->intf->needs_remote_wakeup = 1; @@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid) msleep(50); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); + + Done: + mutex_unlock(&usbhid->mutex); return res; } @@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; + mutex_lock(&usbhid->mutex); + /* * Make sure we don't restart data acquisition due to * a resumption we no longer care about by avoiding racing @@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid) clear_bit(HID_IN_POLLING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return; + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { + hid_cancel_delayed_stuff(usbhid); + usb_kill_urb(usbhid->urbin); + usbhid->intf->needs_remote_wakeup = 0; + } - hid_cancel_delayed_stuff(usbhid); - usb_kill_urb(usbhid->urbin); - usbhid->intf->needs_remote_wakeup = 0; + mutex_unlock(&usbhid->mutex); } /* @@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid) unsigned int n, insize = 0; int ret; + mutex_lock(&usbhid->mutex); + clear_bit(HID_DISCONNECTED, &usbhid->iofl); usbhid->bufsize = HID_MIN_BUFFER_SIZE; @@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid) usbhid_set_leds(hid); device_set_wakeup_enable(&dev->dev, 1); } + + mutex_unlock(&usbhid->mutex); return 0; fail: @@ -1187,6 +1202,7 @@ fail: usbhid->urbout = NULL; usbhid->urbctrl = NULL; hid_free_buffers(dev, hid); + mutex_unlock(&usbhid->mutex); return ret; } @@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->intf->needs_remote_wakeup = 0; } + mutex_lock(&usbhid->mutex); + clear_bit(HID_STARTED, &usbhid->iofl); spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); @@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->urbout = NULL; hid_free_buffers(hid_to_usb_dev(hid), hid); + + mutex_unlock(&usbhid->mutex); } static int usbhid_power(struct hid_device *hid, int lvl) @@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * INIT_WORK(&usbhid->reset_work, hid_reset); timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); spin_lock_init(&usbhid->lock); + mutex_init(&usbhid->mutex); ret = hid_add_device(hid); if (ret) { diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 35b1fa6d962e..4711fb191a07 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -519,12 +519,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, switch (cmd) { case HIDIOCGUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; uref->value = field->value[uref->usage_index]; if (copy_to_user(user_arg, uref, sizeof(*uref))) goto fault; goto goodreturn; case HIDIOCSUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; field->value[uref->usage_index] = uref->value; goto goodreturn; diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 8620408bd7af..75fe85d3d27a 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -80,6 +80,7 @@ struct usbhid_device { dma_addr_t outbuf_dma; /* Output buffer dma */ unsigned long last_out; /* record of last output for timeouts */ + struct mutex mutex; /* start/stop/open/close */ spinlock_t lock; /* fifo spinlock */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ struct timer_list io_retry; /* Retry timer */ diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 5ded94b7bf68..73dafa60080f 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, } if (flush) - wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo); + wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo); else if (insert) - wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, + wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo, raw_data, report_size); return insert && !flush; @@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev, data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, data, n, WAC_CMD_RETRIES); - if (ret == n) { + if (ret == n && features->type == HID_GENERIC) { ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, data, n, 0); + } else if (ret == 2 && features->type != HID_GENERIC) { + features->touch_max = data[1]; } else { features->touch_max = 16; hid_warn(hdev, "wacom_feature_mapping: " @@ -1268,6 +1270,38 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom, group); } +static void wacom_devm_kfifo_release(struct device *dev, void *res) +{ + struct kfifo_rec_ptr_2 *devres = res; + + kfifo_free(devres); +} + +static int wacom_devm_kfifo_alloc(struct wacom *wacom) +{ + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct kfifo_rec_ptr_2 *pen_fifo; + int error; + + pen_fifo = devres_alloc(wacom_devm_kfifo_release, + sizeof(struct kfifo_rec_ptr_2), + GFP_KERNEL); + + if (!pen_fifo) + return -ENOMEM; + + error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); + if (error) { + devres_free(pen_fifo); + return error; + } + + devres_add(&wacom->hdev->dev, pen_fifo); + wacom_wac->pen_fifo = pen_fifo; + + return 0; +} + enum led_brightness wacom_leds_brightness_get(struct wacom_led *led) { struct wacom *wacom = led->wacom; @@ -2722,7 +2756,7 @@ static int wacom_probe(struct hid_device *hdev, if (features->check_for_hid_type && features->hid_type != hdev->type) return -ENODEV; - error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); + error = wacom_devm_kfifo_alloc(wacom); if (error) return error; @@ -2784,8 +2818,6 @@ static void wacom_remove(struct hid_device *hdev) if (wacom->wacom_wac.features.type != REMOTE) wacom_release_resources(wacom); - - kfifo_free(&wacom_wac->pen_fifo); } #ifdef CONFIG_PM diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index d99a9d407671..b21cf764afc0 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) { struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; + int nbuttons = wacom->features.numbered_buttons; - int buttons = data[282] | ((data[281] & 0x40) << 2); + int expresskeys = data[282]; + int center = (data[281] & 0x40) >> 6; int ring = data[285] & 0x7F; bool ringstatus = data[285] & 0x80; - bool prox = buttons || ringstatus; + bool prox = expresskeys || center || ringstatus; /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ ring = 71 - ring; @@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) if (ring > 71) ring -= 72; - wacom_report_numbered_buttons(pad_input, 9, buttons); + wacom_report_numbered_buttons(pad_input, nbuttons, + expresskeys | (center << (nbuttons - 1))); input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); @@ -2530,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, !wacom_wac->shared->is_touch_on) { if (!wacom_wac->shared->touch_down) return; - prox = 0; + prox = false; } wacom_wac->hid_data.num_received++; @@ -2597,7 +2600,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev, wacom_wac->is_invalid_bt_frame = !value; return; case HID_DG_CONTACTMAX: - features->touch_max = value; + if (!features->touch_max) { + features->touch_max = value; + } else { + hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max " + "%d -> %d\n", __func__, features->touch_max, value); + } return; } @@ -2637,9 +2645,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, case HID_DG_TIPSWITCH: hid_data->last_slot_field = equivalent_usage; break; + case HID_DG_CONTACTCOUNT: + hid_data->cc_report = report->id; + hid_data->cc_index = i; + hid_data->cc_value_index = j; + break; } } } + + if (hid_data->cc_report != 0 && + hid_data->cc_index >= 0) { + struct hid_field *field = report->field[hid_data->cc_index]; + int value = field->value[hid_data->cc_value_index]; + if (value) + hid_data->num_expected = value; + } + else { + hid_data->num_expected = wacom_wac->features.touch_max; + } } static void wacom_wac_finger_report(struct hid_device *hdev, @@ -2649,7 +2673,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct input_dev *input = wacom_wac->touch_input; unsigned touch_max = wacom_wac->features.touch_max; - struct hid_data *hid_data = &wacom_wac->hid_data; /* If more packets of data are expected, give us a chance to * process them rather than immediately syncing a partial @@ -2663,7 +2686,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, input_sync(input); wacom_wac->hid_data.num_received = 0; - hid_data->num_expected = 0; /* keep touch state for pen event */ wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); @@ -2738,73 +2760,12 @@ static void wacom_report_events(struct hid_device *hdev, } } -static void wacom_set_num_expected(struct hid_device *hdev, - struct hid_report *report, - int collection_index, - struct hid_field *field, - int field_index) -{ - struct wacom *wacom = hid_get_drvdata(hdev); - struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct hid_data *hid_data = &wacom_wac->hid_data; - unsigned int original_collection_level = - hdev->collection[collection_index].level; - bool end_collection = false; - int i; - - if (hid_data->num_expected) - return; - - // find the contact count value for this segment - for (i = field_index; i < report->maxfield && !end_collection; i++) { - struct hid_field *field = report->field[i]; - unsigned int field_level = - hdev->collection[field->usage[0].collection_index].level; - unsigned int j; - - if (field_level != original_collection_level) - continue; - - for (j = 0; j < field->maxusage; j++) { - struct hid_usage *usage = &field->usage[j]; - - if (usage->collection_index != collection_index) { - end_collection = true; - break; - } - if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) { - hid_data->cc_report = report->id; - hid_data->cc_index = i; - hid_data->cc_value_index = j; - - if (hid_data->cc_report != 0 && - hid_data->cc_index >= 0) { - - struct hid_field *field = - report->field[hid_data->cc_index]; - int value = - field->value[hid_data->cc_value_index]; - - if (value) - hid_data->num_expected = value; - } - } - } - } - - if (hid_data->cc_report == 0 || hid_data->cc_index < 0) - hid_data->num_expected = wacom_wac->features.touch_max; -} - static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report, int collection_index, struct hid_field *field, int field_index) { struct wacom *wacom = hid_get_drvdata(hdev); - if (WACOM_FINGER_FIELD(field)) - wacom_set_num_expected(hdev, report, collection_index, field, - field_index); wacom_report_events(hdev, report, collection_index, field_index); /* @@ -2817,7 +2778,9 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo if (report->type != HID_INPUT_REPORT) return -1; - if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) + if (WACOM_PAD_FIELD(field)) + return 0; + else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) wacom_wac_pen_report(hdev, report); else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input) wacom_wac_finger_report(hdev, report); @@ -3611,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_PEN)) return -ENODEV; @@ -3627,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, return 0; } + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(ABS_MISC, input_dev->absbit); @@ -3779,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, { struct wacom_features *features = &wacom_wac->features; - input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); - if (!(features->device_type & WACOM_DEVICETYPE_TOUCH)) return -ENODEV; @@ -3793,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, /* setup has already been done */ return 0; + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); if (features->touch_max == 1) { diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index da612b6e9c77..195910dd2154 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -342,7 +342,7 @@ struct wacom_wac { struct input_dev *pen_input; struct input_dev *touch_input; struct input_dev *pad_input; - struct kfifo_rec_ptr_2 pen_fifo; + struct kfifo_rec_ptr_2 *pen_fifo; int pid; int num_contacts_left; u8 bt_features; diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index 4bc4a201f0f6..f36036be7f03 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -355,7 +355,7 @@ static int ssi_add_controller(struct hsi_controller *ssi, err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL); if (err < 0) - goto out_err; + return err; ssi->id = err; ssi->owner = THIS_MODULE; @@ -424,7 +424,7 @@ static int ssi_hw_init(struct hsi_controller *ssi) struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); int err; - err = pm_runtime_get_sync(ssi->device.parent); + err = pm_runtime_resume_and_get(ssi->device.parent); if (err < 0) { dev_err(&ssi->device, "runtime PM failed %d\n", err); return err; diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c index 47f0208aa7c3..a5f92e2889cb 100644 --- a/drivers/hsi/hsi_core.c +++ b/drivers/hsi/hsi_core.c @@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port, if (err) goto err; - dev_set_name(&cl->device, "%s", name); - err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); if (err) { err = hsi_of_property_parse_mode(client, "hsi-rx-mode", @@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port, cl->device.release = hsi_client_release; cl->device.of_node = client; + dev_set_name(&cl->device, "%s", name); if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", name); put_device(&cl->device); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 8eb167540b4f..9260ad47350f 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -763,13 +763,19 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) free_cpumask_var(available_mask); } +#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */ +#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */ +#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS) +#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */ +#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS) + static void vmbus_wait_for_unload(void) { int cpu; void *page_addr; struct hv_message *msg; struct vmbus_channel_message_header *hdr; - u32 message_type; + u32 message_type, i; /* * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was @@ -779,10 +785,18 @@ static void vmbus_wait_for_unload(void) * functional and vmbus_unload_response() will complete * vmbus_connection.unload_event. If not, the last thing we can do is * read message pages for all CPUs directly. + * + * Wait up to 100 seconds since an Azure host must writeback any dirty + * data in its disk cache before the VMbus UNLOAD request will + * complete. This flushing has been empirically observed to take up + * to 50 seconds in cases with a lot of dirty data, so allow additional + * leeway and for inaccuracies in mdelay(). But eventually time out so + * that the panic path can't get hung forever in case the response + * message isn't seen. */ - while (1) { + for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) { if (completion_done(&vmbus_connection.unload_event)) - break; + goto completed; for_each_online_cpu(cpu) { struct hv_per_cpu_context *hv_cpu @@ -805,9 +819,18 @@ static void vmbus_wait_for_unload(void) vmbus_signal_eom(msg, message_type); } - mdelay(10); - } + /* + * Give a notice periodically so someone watching the + * serial output won't think it is completely hung. + */ + if (!(i % UNLOAD_MSG_LOOPS)) + pr_notice("Waiting for VMBus UNLOAD to complete\n"); + mdelay(UNLOAD_DELAY_UNIT_MS); + } + pr_err("Continuing even though VMBus UNLOAD did not complete\n"); + +completed: /* * We're crashing and already got the UNLOAD_RESPONSE, cleanup all * maybe-pending messages on all CPUs to be able to receive new @@ -839,6 +862,9 @@ void vmbus_initiate_unload(bool crash) { struct vmbus_channel_message_header hdr; + if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED) + return; + /* Pre-Win2012R2 hosts don't support reconnect */ if (vmbus_proto_version < VERSION_WIN8_1) return; @@ -1095,8 +1121,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) vmbus_device_unregister(channel->device_obj); put_device(dev); } - } - if (channel->primary_channel != NULL) { + } else if (channel->primary_channel != NULL) { /* * Sub-channel is being rescinded. Following is the channel * close sequence when initiated from the driveri (refer to @@ -1351,6 +1376,8 @@ channel_message_table[CHANNELMSG_COUNT] = { { CHANNELMSG_19, 0, NULL }, { CHANNELMSG_20, 0, NULL }, { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL }, + { CHANNELMSG_22, 0, NULL }, + { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL }, }; /* @@ -1362,25 +1389,16 @@ void vmbus_onmessage(void *context) { struct hv_message *msg = context; struct vmbus_channel_message_header *hdr; - int size; hdr = (struct vmbus_channel_message_header *)msg->u.payload; - size = msg->header.payload_size; trace_vmbus_on_message(hdr); - if (hdr->msgtype >= CHANNELMSG_COUNT) { - pr_err("Received invalid channel message type %d size %d\n", - hdr->msgtype, size); - print_hex_dump_bytes("", DUMP_PREFIX_NONE, - (unsigned char *)msg->u.payload, size); - return; - } - - if (channel_message_table[hdr->msgtype].message_handler) - channel_message_table[hdr->msgtype].message_handler(hdr); - else - pr_err("Unhandled channel message type %d\n", hdr->msgtype); + /* + * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go + * out of bound and the message_handler pointer can not be NULL. + */ + channel_message_table[hdr->msgtype].message_handler(hdr); } /* diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 6e4c015783ff..c90d79096e8c 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -67,7 +67,6 @@ static __u32 vmbus_get_next_version(__u32 current_version) int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version) { int ret = 0; - unsigned int cur_cpu; struct vmbus_channel_initiate_contact *msg; unsigned long flags; @@ -100,24 +99,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version) msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); - /* - * We want all channel messages to be delivered on CPU 0. - * This has been the behavior pre-win8. This is not - * perf issue and having all channel messages delivered on CPU 0 - * would be ok. - * For post win8 hosts, we support receiving channel messagges on - * all the CPUs. This is needed for kexec to work correctly where - * the CPU attempting to connect may not be CPU 0. - */ - if (version >= VERSION_WIN8_1) { - cur_cpu = get_cpu(); - msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu); - vmbus_connection.connect_cpu = cur_cpu; - put_cpu(); - } else { - msg->target_vcpu = 0; - vmbus_connection.connect_cpu = 0; - } + msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU); /* * Add to list before we send the request since we may diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index fcc52797c169..f849a1a89fb4 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -249,6 +249,17 @@ int hv_synic_cleanup(unsigned int cpu) bool channel_found = false; unsigned long flags; + /* + * Hyper-V does not provide a way to change the connect CPU once + * it is set; we must prevent the connect CPU from going offline + * while the VM is running normally. But in the panic or kexec() + * path where the vmbus is already disconnected, the CPU must be + * allowed to shut down. + */ + if (cpu == VMBUS_CONNECT_CPU && + vmbus_connection.conn_state == CONNECTED) + return -EBUSY; + /* * Search for channels which are bound to the CPU we're about to * cleanup. In case we find one and vmbus is still connected we need to diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 930674117533..bd4e72f6dfd4 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1277,7 +1277,7 @@ static void balloon_up(struct work_struct *dummy) /* Refuse to balloon below the floor. */ if (avail_pages < num_pages || avail_pages - num_pages < floor) { - pr_warn("Balloon request will be partially fulfilled. %s\n", + pr_info("Balloon request will be partially fulfilled. %s\n", avail_pages < num_pages ? "Not enough memory." : "Balloon floor reached."); diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index af9379a3bf89..cabcb66e7c5e 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -212,12 +212,13 @@ enum vmbus_connect_state { #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT -struct vmbus_connection { - /* - * CPU on which the initial host contact was made. - */ - int connect_cpu; +/* + * The CPU that Hyper-V will interrupt for VMBUS messages, such as + * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER. + */ +#define VMBUS_CONNECT_CPU 0 +struct vmbus_connection { u32 msg_conn_id; atomic_t offer_in_progress; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 05ead1735c6e..2d2568dac2a6 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -31,6 +31,7 @@ #include <linux/kdebug.h> #include <linux/efi.h> #include <linux/random.h> +#include <linux/kernel.h> #include <linux/syscore_ops.h> #include <clocksource/hyperv_timer.h> #include "hyperv_vmbus.h" @@ -48,14 +49,35 @@ static int hyperv_cpuhp_online; static void *hv_panic_page; +/* + * Boolean to control whether to report panic messages over Hyper-V. + * + * It can be set via /proc/sys/kernel/hyperv/record_panic_msg + */ +static int sysctl_record_panic_msg = 1; + +static int hyperv_report_reg(void) +{ + return !sysctl_record_panic_msg || !hv_panic_page; +} + static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, void *args) { struct pt_regs *regs; - regs = current_pt_regs(); + vmbus_initiate_unload(true); - hyperv_report_panic(regs, val); + /* + * Hyper-V should be notified only once about a panic. If we will be + * doing hyperv_report_panic_msg() later with kmsg data, don't do + * the notification here. + */ + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE + && hyperv_report_reg()) { + regs = current_pt_regs(); + hyperv_report_panic(regs, val, false); + } return NOTIFY_DONE; } @@ -65,7 +87,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val, struct die_args *die = (struct die_args *)args; struct pt_regs *regs = die->regs; - hyperv_report_panic(regs, val); + /* + * Hyper-V should be notified only once about a panic. If we will be + * doing hyperv_report_panic_msg() later with kmsg data, don't do + * the notification here. + */ + if (hyperv_report_reg()) + hyperv_report_panic(regs, val, true); return NOTIFY_DONE; } @@ -950,6 +978,9 @@ static int vmbus_resume(struct device *child_device) return drv->resume(dev); } +#else +#define vmbus_suspend NULL +#define vmbus_resume NULL #endif /* CONFIG_PM_SLEEP */ /* @@ -967,11 +998,22 @@ static void vmbus_device_release(struct device *device) } /* - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than - * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm. + * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm. + * + * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we + * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there + * is no way to wake up a Generation-2 VM. + * + * The other 4 ops are for hibernation. */ + static const struct dev_pm_ops vmbus_pm = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume) + .suspend_noirq = NULL, + .resume_noirq = NULL, + .freeze_noirq = vmbus_suspend, + .thaw_noirq = vmbus_resume, + .poweroff_noirq = vmbus_suspend, + .restore_noirq = vmbus_resume, }; /* The one and only one */ @@ -1031,6 +1073,10 @@ void vmbus_on_msg_dpc(unsigned long data) } entry = &channel_message_table[hdr->msgtype]; + + if (!entry->message_handler) + goto msg_handled; + if (entry->handler_type == VMHT_BLOCKING) { ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx == NULL) @@ -1050,14 +1096,28 @@ void vmbus_on_msg_dpc(unsigned long data) /* * If we are handling the rescind message; * schedule the work on the global work queue. + * + * The OFFER message and the RESCIND message should + * not be handled by the same serialized work queue, + * because the OFFER handler may call vmbus_open(), + * which tries to open the channel by sending an + * OPEN_CHANNEL message to the host and waits for + * the host's response; however, if the host has + * rescinded the channel before it receives the + * OPEN_CHANNEL message, the host just silently + * ignores the OPEN_CHANNEL message; as a result, + * the guest's OFFER handler hangs for ever, if we + * handle the RESCIND message in the same serialized + * work queue: the RESCIND handler can not start to + * run before the OFFER handler finishes. */ - schedule_work_on(vmbus_connection.connect_cpu, + schedule_work_on(VMBUS_CONNECT_CPU, &ctx->work); break; case CHANNELMSG_OFFERCHANNEL: atomic_inc(&vmbus_connection.offer_in_progress); - queue_work_on(vmbus_connection.connect_cpu, + queue_work_on(VMBUS_CONNECT_CPU, vmbus_connection.work_queue, &ctx->work); break; @@ -1104,7 +1164,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel) INIT_WORK(&ctx->work, vmbus_onmessage_work); - queue_work_on(vmbus_connection.connect_cpu, + queue_work_on(VMBUS_CONNECT_CPU, vmbus_connection.work_queue, &ctx->work); } @@ -1246,13 +1306,6 @@ static void vmbus_isr(void) add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); } -/* - * Boolean to control whether to report panic messages over Hyper-V. - * - * It can be set via /proc/sys/kernel/hyperv/record_panic_msg - */ -static int sysctl_record_panic_msg = 1; - /* * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg * buffer and call into Hyper-V to transfer the data. @@ -1380,19 +1433,29 @@ static int vmbus_bus_init(void) hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL); if (hv_panic_page) { ret = kmsg_dump_register(&hv_kmsg_dumper); - if (ret) + if (ret) { pr_err("Hyper-V: kmsg dump register " "error 0x%x\n", ret); + hv_free_hyperv_page( + (unsigned long)hv_panic_page); + hv_panic_page = NULL; + } } else pr_err("Hyper-V: panic message page memory " "allocation failed"); } register_die_notifier(&hyperv_die_block); - atomic_notifier_chain_register(&panic_notifier_list, - &hyperv_panic_block); } + /* + * Always register the panic notifier because we need to unload + * the VMbus channel connection to prevent any VMbus + * activity after the VM panics. + */ + atomic_notifier_chain_register(&panic_notifier_list, + &hyperv_panic_block); + vmbus_request_offers(); return 0; @@ -1406,7 +1469,6 @@ err_alloc: hv_remove_vmbus_irq(); bus_unregister(&hv_bus); - free_page((unsigned long)hv_panic_page); unregister_sysctl_table(hv_ctl_table_hdr); hv_ctl_table_hdr = NULL; return ret; @@ -2169,7 +2231,10 @@ static int vmbus_bus_suspend(struct device *dev) if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0) wait_for_completion(&vmbus_connection.ready_for_suspend_event); - WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0); + if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) { + pr_err("Can not suspend due to a previous failed resuming\n"); + return -EBUSY; + } mutex_lock(&vmbus_connection.channel_mutex); @@ -2202,8 +2267,6 @@ static int vmbus_bus_suspend(struct device *dev) vmbus_initiate_unload(false); - vmbus_connection.conn_state = DISCONNECTED; - /* Reset the event for the next resume. */ reinit_completion(&vmbus_connection.ready_for_resume_event); @@ -2245,13 +2308,18 @@ static int vmbus_bus_resume(struct device *dev) vmbus_request_offers(); - wait_for_completion(&vmbus_connection.ready_for_resume_event); + if (wait_for_completion_timeout( + &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0) + pr_err("Some vmbus device is missing after suspending?\n"); /* Reset the event for the next suspend. */ reinit_completion(&vmbus_connection.ready_for_suspend_event); return 0; } +#else +#define vmbus_bus_suspend NULL +#define vmbus_bus_resume NULL #endif /* CONFIG_PM_SLEEP */ static const struct acpi_device_id vmbus_acpi_device_ids[] = { @@ -2262,16 +2330,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = { MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); /* - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than - * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the - * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the - * pci "noirq" restore callback runs before "non-noirq" callbacks (see + * Note: we must use the "no_irq" ops, otherwise hibernation can not work with + * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in + * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() -> * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's - * resume callback must also run via the "noirq" callbacks. + * resume callback must also run via the "noirq" ops. + * + * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment + * earlier in this file before vmbus_pm. */ + static const struct dev_pm_ops vmbus_bus_pm = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume) + .suspend_noirq = NULL, + .resume_noirq = NULL, + .freeze_noirq = vmbus_bus_suspend, + .thaw_noirq = vmbus_bus_resume, + .poweroff_noirq = vmbus_bus_suspend, + .restore_noirq = vmbus_bus_resume }; static struct acpi_driver vmbus_acpi_driver = { @@ -2288,7 +2364,6 @@ static void hv_kexec_handler(void) { hv_stimer_global_cleanup(); vmbus_initiate_unload(false); - vmbus_connection.conn_state = DISCONNECTED; /* Make sure conn_state is set as hv_synic_cleanup checks for it */ mb(); cpuhp_remove_state(hyperv_cpuhp_online); @@ -2305,7 +2380,6 @@ static void hv_crash_handler(struct pt_regs *regs) * doing the cleanup for current CPU only. This should be sufficient * for kdump. */ - vmbus_connection.conn_state = DISCONNECTED; cpu = smp_processor_id(); hv_stimer_cleanup(cpu); hv_synic_disable_regs(cpu); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 13a6b4afb4b3..1f0b52e56a56 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -304,6 +304,16 @@ config SENSORS_FAM15H_POWER This driver can also be built as a module. If so, the module will be called fam15h_power. +config SENSORS_ALTRA + tristate "Altra sensors driver" + depends on ARM64 + help + If you say yes here you get support for Ampere SoC core and package + sensors for Ampere Altra CPUs. + + This driver can also be built as a module. If so, the module + will be called as altra-hwmon. + config SENSORS_APPLESMC tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)" depends on INPUT && X86 diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 40c036ea45e6..01edf299bdd5 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o +obj-$(CONFIG_SENSORS_ALTRA) += altra-hwmon.o obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 4cf25458f0b9..740ac0a1b726 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device) res = setup_attrs(resource); if (res) - goto exit_free; + goto exit_free_capability; resource->hwmon_dev = hwmon_device_register(&device->dev); if (IS_ERR(resource->hwmon_dev)) { @@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device) exit_remove: remove_attrs(resource); +exit_free_capability: + free_capabilities(resource); exit_free: kfree(resource); exit: diff --git a/drivers/hwmon/altra-hwmon.c b/drivers/hwmon/altra-hwmon.c new file mode 100644 index 000000000000..12460edd5f18 --- /dev/null +++ b/drivers/hwmon/altra-hwmon.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Ampere Altra SoC Hardware Monitoring Driver + * + * Copyright (C) 2020 Ampere Computing LLC + * Author: Loc Ho <loc.ho@os.amperecompting.com> + * Hoan Tran <hoan@os.amperecomputing.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/hwmon.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#define DRVNAME "altra_hwmon" +#define ALTRA_HWMON_VER1 1 +#define ALTRA_HWMON_VER2 2 + +#define HW_SUPPORTED_VER 1 + +#define UNIT_DEGREE_CELSIUS 0x0001 +#define UNIT_JOULE 0x0010 +#define UNIT_MILLI_JOULE 0x0011 +#define UNIT_MICRO_JOULE 0x0012 + +#define HW_METRIC_LABEL_REG 0x0000 +#define HW_METRIC_LABEL_SIZE 16 +#define HW_METRIC_INFO_REG 0x0010 +#define HW_METRIC_INFO_UNIT_RD(x) ((x) & 0xFF) +#define HW_METRIC_INFO_DATASIZE_RD(x) (((x) >> 8) & 0xFF) +#define HW_METRIC_INFO_DATACNT_RD(x) (((x) >> 16) & 0xFFFF) +#define HW_METRIC_DATA_REG 0x0018 +#define HW_METRIC_HDRSIZE 24 + +#define HW_METRICS_ID_REG 0x0000 +#define HW_METRICS_ID 0x304D5748 /* HWM0 */ +#define HW_METRICS_INFO_REG 0x0004 +#define HW_METRICS_INFO_VER_RD(x) ((x) & 0xFFFF) +#define HW_METRICS_INFO_CNT_RD(x) (((x) >> 16) & 0xFFFF) +#define HW_METRICS_DATA_REG 0x0008 +#define HW_METRICS_HDRSIZE 8 + +#define SENSOR_ITEM_LABEL_SIZE (HW_METRIC_LABEL_SIZE + 3 + 1) + +struct sensor_item { + char label[SENSOR_ITEM_LABEL_SIZE]; /* NULL terminator label */ + u32 scale_factor; /* Convert HW unit to HWmon unnt */ + u8 data_size; /* 4 or 8 bytes */ + u32 hw_reg; /* Registor offset to data */ +}; + +struct altra_hwmon_context { + struct hwmon_channel_info *channel_info; + const struct hwmon_channel_info **info; + struct hwmon_chip_info chip; + struct sensor_item *sensor_list[hwmon_max]; + u32 sensor_list_cnt[hwmon_max]; + struct device *dev; + struct device *hwmon_dev; + void __iomem *base; + u32 base_size; +}; + +static u32 altra_hwmon_read32(struct altra_hwmon_context *ctx, u32 reg) +{ + return readl_relaxed(ctx->base + reg); +} + +static u64 altra_hwmon_read64(struct altra_hwmon_context *ctx, u32 reg) +{ + return readq_relaxed(ctx->base + reg); +} + +static int altra_hwmon_read_labels(struct device *dev, + enum hwmon_sensor_types type, u32 attr, + int channel, const char **str) +{ + struct altra_hwmon_context *ctx = dev_get_drvdata(dev); + struct sensor_item *item; + + if (type >= hwmon_max) + return -EINVAL; + if (channel >= ctx->sensor_list_cnt[type]) + return -EINVAL; + + item = ctx->sensor_list[type]; + *str = item[channel].label; + return 0; +} + +static int altra_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct altra_hwmon_context *ctx = dev_get_drvdata(dev); + struct sensor_item *item; + + if (type >= hwmon_max) + return -EINVAL; + if (channel >= ctx->sensor_list_cnt[type]) + return -EINVAL; + + item = ctx->sensor_list[type]; + if (item[channel].data_size == 4) + *val = altra_hwmon_read32(ctx, item[channel].hw_reg); + else + *val = altra_hwmon_read64(ctx, item[channel].hw_reg); + *val *= item[channel].scale_factor; + + return 0; +} + +static umode_t altra_hwmon_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + return 0444; +} + +static const struct hwmon_ops altra_hwmon_ops = { + .is_visible = altra_hwmon_is_visible, + .read = altra_hwmon_read, + .read_string = altra_hwmon_read_labels, +}; + +static enum hwmon_sensor_types altra_hwmon_unit2type(u8 unit) +{ + switch (unit) { + case UNIT_DEGREE_CELSIUS: + return hwmon_temp; + case UNIT_JOULE: + case UNIT_MILLI_JOULE: + case UNIT_MICRO_JOULE: + return hwmon_energy; + } + return hwmon_max; +} + +static u32 altra_hwmon_scale_factor(u8 unit) +{ + switch (unit) { + case UNIT_DEGREE_CELSIUS: + return 1000; + case UNIT_JOULE: + return 1000000; + case UNIT_MILLI_JOULE: + return 1000; + case UNIT_MICRO_JOULE: + return 1; + } + return 1; +} + +static u32 altra_hwmon_type2flag(enum hwmon_sensor_types type) +{ + switch (type) { + case hwmon_energy: + return HWMON_E_INPUT | HWMON_E_LABEL; + case hwmon_temp: + return HWMON_T_LABEL | HWMON_T_INPUT; + default: + return 0; + } +} + +static int altra_sensor_is_valid(struct altra_hwmon_context *ctx, u32 reg, u32 data_size) +{ + int val; + + if (data_size == 4) + val = altra_hwmon_read32(ctx, reg); + else + val = altra_hwmon_read64(ctx, reg); + + return val ? 1 : 0; +} + +static int altra_create_sensor(struct altra_hwmon_context *ctx, + u32 metric_info, + struct hwmon_channel_info *info) +{ + enum hwmon_sensor_types type; + char label[SENSOR_ITEM_LABEL_SIZE]; + struct sensor_item *item_list; + struct sensor_item *item; + int data_size; + u32 *s_config; + u32 hw_info; + u32 total; + int i, j; + + /* Check for supported type */ + hw_info = altra_hwmon_read32(ctx, metric_info + HW_METRIC_INFO_REG); + type = altra_hwmon_unit2type(HW_METRIC_INFO_UNIT_RD(hw_info)); + if (type == hwmon_max) { + dev_err(ctx->dev, + "malform info header @ 0x%x value 0x%x. Ignore remaining\n", + metric_info + HW_METRIC_INFO_REG, hw_info); + return -ENODEV; + } + + /* Label */ + for (i = 0; i < HW_METRIC_LABEL_SIZE; i += 4) + *(u32 *)&label[i] = altra_hwmon_read32(ctx, + metric_info + HW_METRIC_LABEL_REG + i); + label[sizeof(label) - 1] = '\0'; + if (strlen(label) <= 0) { + dev_err(ctx->dev, + "malform label header 0x%x. Ignore remaining\n", + metric_info + HW_METRIC_LABEL_REG); + return -ENODEV; + } + + total = HW_METRIC_INFO_DATACNT_RD(hw_info); + data_size = HW_METRIC_INFO_DATASIZE_RD(hw_info); + /* Get the total valid sensors */ + j = 0; + for (i = 0; i < total; i++) { + if (altra_sensor_is_valid(ctx, metric_info + HW_METRIC_DATA_REG + + i * data_size, data_size)) + j++; + } + total = j; + + if (!ctx->sensor_list[type]) { + ctx->sensor_list[type] = devm_kzalloc(ctx->dev, + sizeof(struct sensor_item) * total, + GFP_KERNEL); + } else { + item_list = devm_kzalloc(ctx->dev, + sizeof(*item) * (ctx->sensor_list_cnt[type] + total), + GFP_KERNEL); + if (!item_list) + return -ENOMEM; + memcpy(item_list, ctx->sensor_list[type], + sizeof(*item) * ctx->sensor_list_cnt[type]); + devm_kfree(ctx->dev, ctx->sensor_list[type]); + ctx->sensor_list[type] = item_list; + } + + s_config = devm_kcalloc(ctx->dev, total, sizeof(u32), GFP_KERNEL); + if (!s_config) + return -ENOMEM; + info->type = type; + info->config = s_config; + + /* Set up sensor entry */ + item_list = ctx->sensor_list[type]; + j = 0; + for (i = 0; i < HW_METRIC_INFO_DATACNT_RD(hw_info); i++) { + /* Check if sensor is valid */ + if (!altra_sensor_is_valid(ctx, metric_info + HW_METRIC_DATA_REG + + i * data_size, data_size)) + continue; + + item = &item_list[ctx->sensor_list_cnt[type]]; + item->hw_reg = metric_info + HW_METRIC_DATA_REG + i * data_size; + scnprintf(item->label, SENSOR_ITEM_LABEL_SIZE, "%s %03u", label, j); + item->scale_factor = altra_hwmon_scale_factor(HW_METRIC_INFO_UNIT_RD(hw_info)); + item->data_size = data_size; + s_config[j] = altra_hwmon_type2flag(type); + ctx->sensor_list_cnt[type]++; + j++; + } + + return 0; +} + +static int altra_hwmon_create_sensors(struct altra_hwmon_context *ctx) +{ + u32 metrics_info; + u32 total_metric; + u32 hw_reg; + u32 hw_end_reg; + int ret; + u32 val; + int i; + int used; + + if (altra_hwmon_read32(ctx, HW_METRICS_ID_REG) != HW_METRICS_ID) + return -ENODEV; + + metrics_info = altra_hwmon_read32(ctx, HW_METRICS_INFO_REG); + if (HW_METRICS_INFO_VER_RD(metrics_info) != HW_SUPPORTED_VER) + return -ENODEV; + + total_metric = HW_METRICS_INFO_CNT_RD(metrics_info); + ctx->channel_info = devm_kzalloc(ctx->dev, + sizeof(struct hwmon_channel_info) * total_metric, + GFP_KERNEL); + if (!ctx->channel_info) + return -ENOMEM; + ctx->info = devm_kzalloc(ctx->dev, + sizeof(struct hwmon_channel_info *) * (total_metric + 1), + GFP_KERNEL); + if (!ctx->info) + return -ENOMEM; + + hw_reg = HW_METRICS_HDRSIZE; + for (used = 0, i = 0; i < total_metric; i++) { + /* Check for out of bound */ + if ((hw_reg + HW_METRIC_HDRSIZE) > ctx->base_size) { + dev_err(ctx->dev, + "malform metric header 0x%x (exceeded range). Ignore remaining\n", + hw_reg); + break; + } + + /* + * At least a metric header. Check with data. + */ + val = altra_hwmon_read32(ctx, hw_reg + HW_METRIC_INFO_REG); + hw_end_reg = hw_reg + HW_METRIC_HDRSIZE + + HW_METRIC_INFO_DATASIZE_RD(val) * + HW_METRIC_INFO_DATACNT_RD(val); + if (hw_end_reg > ctx->base_size) { + dev_err(ctx->dev, + "malform metric data 0x%x (exceeded range). Ignore remaining\n", + hw_reg); + break; + } + ret = altra_create_sensor(ctx, hw_reg, &ctx->channel_info[used]); + + /* 64-bit alignment */ + hw_reg = hw_end_reg; + hw_reg = ((hw_reg + 7) / 8) * 8; + if (ret == -ENODEV) + continue; + if (ret < 0) + return ret; + ctx->info[used] = &ctx->channel_info[used]; + used++; + } + ctx->info[used] = NULL; + return 0; +} + +static int altra_hwmon_probe(struct platform_device *pdev) +{ + const struct acpi_device_id *acpi_id; + struct altra_hwmon_context *ctx; + struct device *dev = &pdev->dev; + struct resource *res; + int version; + int err; + + ctx = devm_kzalloc(dev, sizeof(struct altra_hwmon_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + dev_set_drvdata(dev, ctx); + ctx->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!acpi_id) + return -EINVAL; + + version = (int)acpi_id->driver_data; + + ctx->base_size = resource_size(res); + if (version == ALTRA_HWMON_VER1) + ctx->base = devm_ioremap_resource(dev, res); + else + ctx->base = memremap(res->start, ctx->base_size, MEMREMAP_WB); + if (IS_ERR(ctx->base)) + return PTR_ERR(ctx->base); + + /* Create sensors */ + err = altra_hwmon_create_sensors(ctx); + if (err != 0) { + if (err == -ENODEV) + dev_err(dev, "No sensor\n"); + else + dev_err(dev, "Failed to create sensors error %d\n", err); + return err; + } + + ctx->chip.ops = &altra_hwmon_ops; + ctx->chip.info = ctx->info; + ctx->hwmon_dev = devm_hwmon_device_register_with_info(dev, DRVNAME, ctx, + &ctx->chip, NULL); + if (IS_ERR(ctx->hwmon_dev)) { + dev_err(dev, "Fail to register with HWmon\n"); + err = PTR_ERR(ctx->hwmon_dev); + return err; + } + + return 0; +} + +static int altra_hwmon_remove(struct platform_device *pdev) +{ + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id altra_hwmon_acpi_match[] = { + {"AMPC0005", ALTRA_HWMON_VER1}, + {"AMPC0006", ALTRA_HWMON_VER2}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, altra_hwmon_acpi_match); +#endif + +static struct platform_driver altra_hwmon_driver = { + .probe = altra_hwmon_probe, + .remove = altra_hwmon_remove, + .driver = { + .name = "altra-hwmon", + .acpi_match_table = ACPI_PTR(altra_hwmon_acpi_match), + }, +}; +module_platform_driver(altra_hwmon_driver); + +MODULE_DESCRIPTION("Altra SoC hardware sensor monitor"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 183ff3d25129..006bc07bcd30 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -748,15 +748,18 @@ static ssize_t applesmc_light_show(struct device *dev, } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; + + ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); if (ret) goto out; - ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: @@ -805,12 +808,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); - speed = ((buffer[0] << 8 | buffer[1]) >> 2); - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); + + speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, @@ -846,12 +848,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); + + manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, @@ -867,10 +868,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; + val = (buffer[0] << 8 | buffer[1]); + if (input) val = val | (0x01 << to_index(attr)); else @@ -946,13 +948,12 @@ static ssize_t applesmc_key_count_show(struct device *dev, u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); - count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + - ((u32)buffer[2]<<8) + buffer[3]; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); + + count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + + ((u32)buffer[2]<<8) + buffer[3]; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 40c489be62ea..40f3139f1e02 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -851,6 +851,8 @@ static int aspeed_create_fan(struct device *dev, ret = of_property_read_u32(child, "reg", &pwm_port); if (ret) return ret; + if (pwm_port >= ARRAY_SIZE(pwm_port_params)) + return -EINVAL; aspeed_create_pwm_port(priv, (u8)pwm_port); ret = of_property_count_u8_elems(child, "cooling-levels"); diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index 53b517dbe7e6..4af2fc309c28 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c @@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev, int channel = to_sensor_dev_attr(devattr)->index; int ret; - mutex_lock(&hwmon->hwmon_lock); + mutex_lock(&hwmon->da9052->auxadc_lock); ret = __da9052_read_tsi(dev, channel); - mutex_unlock(&hwmon->hwmon_lock); + mutex_unlock(&hwmon->da9052->auxadc_lock); if (ret < 0) return ret; diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 491a570e8e50..924c02c1631d 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -443,7 +443,7 @@ static ssize_t pwm1_enable_store(struct device *dev, } result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); - if (result) { + if (result < 0) { count = result; goto err; } diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index 8a51dcf055ea..026f70d7c5a4 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -403,7 +403,7 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable) /* For enabling routine, increase refcount and resume() at first */ if (enable) { - ret = pm_runtime_get_sync(ina->pm_dev); + ret = pm_runtime_resume_and_get(ina->pm_dev); if (ret < 0) { dev_err(dev, "Failed to get PM runtime\n"); return ret; diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index f2d81b0558e5..e3f1ebee7130 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id) } data->config = config; - hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42", data, &jc42_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 5c1dddde193c..f96fd8efb45a 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index 743752a2467a..64122eb38060 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = { * Map device tree / platform data register bit map to chip bit map. * Applies to alert register and over-temperature register. */ -#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ +#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ (((reg) & 0x01) << 6) | ((reg) & 0x80)) +#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) #define MAX6697_REG_STAT(n) (0x44 + (n)) @@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data, return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, - MAX6697_MAP_BITS(pdata->alert_mask)); + MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, - MAX6697_MAP_BITS(pdata->over_temperature_mask)); + MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); if (ret < 0) return ret; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 7efa6bfef060..ba9b96973e80 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -786,13 +786,13 @@ static const char *const nct6798_temp_label[] = { "Agent1 Dimm1", "BYTE_TEMP0", "BYTE_TEMP1", - "", - "", + "PECI Agent 0 Calibration", /* undocumented */ + "PECI Agent 1 Calibration", /* undocumented */ "", "Virtual_TEMP" }; -#define NCT6798_TEMP_MASK 0x8fff0ffe +#define NCT6798_TEMP_MASK 0xbfff0ffe #define NCT6798_VIRT_TEMP_MASK 0x80000c00 /* NCT6102D/NCT6106D specific data */ diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 281c81edabc6..b812b199e5e5 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -197,7 +197,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f); - if (cnt == 0x1fff) + if (cnt == 0 || cnt == 0x1fff) rpm = 0; else rpm = 1350000 / cnt; @@ -209,7 +209,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f); - if (cnt == 0x1fff) + if (cnt == 0 || cnt == 0x1fff) rpm = 0; else rpm = 1350000 / cnt; @@ -356,6 +356,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel, struct nct7904_data *data = dev_get_drvdata(dev); int ret, temp; unsigned int reg1, reg2, reg3; + s8 temps; switch (attr) { case hwmon_temp_input: @@ -461,7 +462,8 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; - *val = ret * 1000; + temps = ret; + *val = temps * 1000; return 0; } diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index 5caa37fbfc18..66b12e5ccbc6 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -454,6 +454,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id); static int adm1275_probe(struct i2c_client *client, const struct i2c_device_id *id) { + s32 (*config_read_fn)(const struct i2c_client *client, u8 reg); u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; int config, device_config; int ret; @@ -499,11 +500,16 @@ static int adm1275_probe(struct i2c_client *client, "Device mismatch: Configured %s, detected %s\n", id->name, mid->name); - config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG); + if (mid->driver_data == adm1272 || mid->driver_data == adm1278 || + mid->driver_data == adm1293 || mid->driver_data == adm1294) + config_read_fn = i2c_smbus_read_word_data; + else + config_read_fn = i2c_smbus_read_byte_data; + config = config_read_fn(client, ADM1275_PMON_CONFIG); if (config < 0) return config; - device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG); + device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG); if (device_config < 0) return device_config; diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c index 5c63a6600729..e22617def70b 100644 --- a/drivers/hwmon/pmbus/max34440.c +++ b/drivers/hwmon/pmbus/max34440.c @@ -387,7 +387,6 @@ static struct pmbus_driver_info max34440_info[] = { .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, - .read_byte_data = max34440_read_byte_data, .read_word_data = max34440_read_word_data, .write_word_data = max34440_write_word_data, }, @@ -418,7 +417,6 @@ static struct pmbus_driver_info max34440_info[] = { .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, - .read_byte_data = max34440_read_byte_data, .read_word_data = max34440_read_word_data, .write_word_data = max34440_write_word_data, }, @@ -454,7 +452,6 @@ static struct pmbus_driver_info max34440_info[] = { .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, - .read_byte_data = max34440_read_byte_data, .read_word_data = max34440_read_word_data, .write_word_data = max34440_write_word_data, }, diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 42ffd2e5182d..df6f042fb605 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -54,16 +54,18 @@ static irqreturn_t pulse_handler(int irq, void *dev_id) static void sample_timer(struct timer_list *t) { struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer); + unsigned int delta = ktime_ms_delta(ktime_get(), ctx->sample_start); int pulses; - u64 tmp; - pulses = atomic_read(&ctx->pulses); - atomic_sub(pulses, &ctx->pulses); - tmp = (u64)pulses * ktime_ms_delta(ktime_get(), ctx->sample_start) * 60; - do_div(tmp, ctx->pulses_per_revolution * 1000); - ctx->rpm = tmp; + if (delta) { + pulses = atomic_read(&ctx->pulses); + atomic_sub(pulses, &ctx->pulses); + ctx->rpm = (unsigned int)(pulses * 1000 * 60) / + (ctx->pulses_per_revolution * delta); + + ctx->sample_start = ktime_get(); + } - ctx->sample_start = ktime_get(); mod_timer(&ctx->rpm_timer, jiffies + HZ); } @@ -328,8 +330,18 @@ static int pwm_fan_probe(struct platform_device *pdev) ctx->pwm_value = MAX_PWM; - /* Set duty cycle to maximum allowed and enable PWM output */ pwm_init_state(ctx->pwm, &state); + /* + * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned + * long. Check this here to prevent the fan running at a too low + * frequency. + */ + if (state.period > ULONG_MAX / MAX_PWM + 1) { + dev_err(dev, "Configured period too big\n"); + return -EINVAL; + } + + /* Set duty cycle to maximum allowed and enable PWM output */ state.duty_cycle = ctx->pwm->args.period - 1; state.enabled = true; diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 8a7732c0bef3..7cd13a217c61 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -147,7 +147,7 @@ static enum hwmon_sensor_types scmi_types[] = { [ENERGY] = hwmon_energy, }; -static u32 hwmon_attributes[] = { +static u32 hwmon_attributes[hwmon_max] = { [hwmon_chip] = HWMON_C_REGISTER_TZ, [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL, diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 3810290e6d07..95cba1a2cddf 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -176,6 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) unsigned long flags; struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct perf_output_handle *handle = data; + struct cs_buffers *buf = etm_perf_sink_config(handle); spin_lock_irqsave(&drvdata->spinlock, flags); @@ -186,7 +187,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) } /* Get a handle on the pid of the process to monitor */ - pid = task_pid_nr(handle->event->owner); + pid = buf->pid; if (drvdata->pid != -1 && drvdata->pid != pid) { ret = -EBUSY; @@ -383,6 +384,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev, if (!buf) return NULL; + buf->pid = task_pid_nr(event->owner); buf->snapshot = overwrite; buf->nr_pages = nr_pages; buf->data_pages = pages; diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 84f1dcb69827..9b0c5d719232 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data) cpumask_t *mask = &event_data->mask; struct coresight_device *sink; - if (WARN_ON(cpumask_empty(mask))) + if (!event_data->snk_config) return; - if (!event_data->snk_config) + if (WARN_ON(cpumask_empty(mask))) return; cpu = cpumask_first(mask); @@ -310,6 +310,16 @@ static void etm_event_start(struct perf_event *event, int flags) if (!event_data) goto fail; + /* + * Check if this ETM is allowed to trace, as decided + * at etm_setup_aux(). This could be due to an unreachable + * sink from this ETM. We can't do much in this case if + * the sink was specified or hinted to the driver. For + * now, simply don't record anything on this ETM. + */ + if (!cpumask_test_cpu(cpu, &event_data->mask)) + goto fail_end_stop; + path = etm_event_cpu_path(event_data, cpu); /* We need a sink, no need to continue without one */ sink = coresight_get_sink(path); diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index a128b5063f46..83dccdeef906 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1184,6 +1184,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) return 0; err_arch_supported: + etmdrvdata[drvdata->cpu] = NULL; if (--etm4_count == 0) { cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); if (hp_online) diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 82e563cdc879..56379d4a7ede 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -86,6 +86,7 @@ enum cs_mode { * struct cs_buffer - keep track of a recording session' specifics * @cur: index of the current buffer * @nr_pages: max number of pages granted to us + * @pid: PID this cs_buffer belongs to * @offset: offset within the current buffer * @data_size: how much we collected in this run * @snapshot: is this run in snapshot mode @@ -94,6 +95,7 @@ enum cs_mode { struct cs_buffers { unsigned int cur; unsigned int nr_pages; + pid_t pid; unsigned long offset; local_t data_size; bool snapshot; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index d0cc3985b72a..a5d70d09d2bd 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -227,6 +227,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct perf_output_handle *handle = data; + struct cs_buffers *buf = etm_perf_sink_config(handle); spin_lock_irqsave(&drvdata->spinlock, flags); do { @@ -243,7 +244,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) } /* Get a handle on the pid of the process to monitor */ - pid = task_pid_nr(handle->event->owner); + pid = buf->pid; if (drvdata->pid != -1 && drvdata->pid != pid) { ret = -EBUSY; @@ -399,6 +400,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, if (!buf) return NULL; + buf->pid = task_pid_nr(event->owner); buf->snapshot = overwrite; buf->nr_pages = nr_pages; buf->data_pages = pages; @@ -596,13 +598,6 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) goto out; } - /* There is no point in reading a TMC in HW FIFO mode */ - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode != TMC_MODE_CIRCULAR_BUFFER) { - ret = -EINVAL; - goto out; - } - /* Don't interfere if operated from Perf */ if (drvdata->mode == CS_MODE_PERF) { ret = -EINVAL; @@ -616,8 +611,15 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) } /* Disable the TMC if need be */ - if (drvdata->mode == CS_MODE_SYSFS) + if (drvdata->mode == CS_MODE_SYSFS) { + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + ret = -EINVAL; + goto out; + } __tmc_etb_disable_hw(drvdata); + } drvdata->reading = true; out: @@ -639,15 +641,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) spin_lock_irqsave(&drvdata->spinlock, flags); - /* There is no point in reading a TMC in HW FIFO mode */ - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode != TMC_MODE_CIRCULAR_BUFFER) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); - return -EINVAL; - } - /* Re-enable the TMC if need be */ if (drvdata->mode == CS_MODE_SYSFS) { + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return -EINVAL; + } /* * The trace run will continue with the same allocated trace * buffer. As such zero-out the buffer so that we don't end diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 625882bc8b08..ed77c7f7b344 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -217,6 +217,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages, } else { page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + goto err; } paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir); if (dma_mapping_error(real_dev, paddr)) @@ -1533,7 +1535,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev, /* Insert barrier packets at the beginning, if there was an overflow */ if (lost) - tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); + tmc_etr_buf_insert_barrier_packet(etr_buf, offset); tmc_etr_sync_perf_buffer(etr_perf, offset, size); /* diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index ca232ec565e8..c9ac3dc65113 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -1021,15 +1021,30 @@ int intel_th_set_output(struct intel_th_device *thdev, { struct intel_th_device *hub = to_intel_th_hub(thdev); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + int ret; /* In host mode, this is up to the external debugger, do nothing. */ if (hub->host_mode) return 0; - if (!hubdrv->set_output) - return -ENOTSUPP; + /* + * hub is instantiated together with the source device that + * calls here, so guaranteed to be present. + */ + hubdrv = to_intel_th_driver(hub->dev.driver); + if (!hubdrv || !try_module_get(hubdrv->driver.owner)) + return -EINVAL; - return hubdrv->set_output(hub, master); + if (!hubdrv->set_output) { + ret = -ENOTSUPP; + goto out; + } + + ret = hubdrv->set_output(hub, master); + +out: + module_put(hubdrv->driver.owner); + return ret; } EXPORT_SYMBOL_GPL(intel_th_set_output); diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index f72803a02391..28509b02a0b5 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, output->active = false; for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS) { + TH_CONFIGURABLE_MASTERS + 1) { gth_master_set(gth, master, -1); } spin_unlock(>h->gth_lock); @@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, othdev->output.port = -1; othdev->output.active = false; gth->output[port].output = NULL; - for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++) + for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++) if (gth->master[master] == port) gth->master[master] = -1; spin_unlock(>h->gth_lock); diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 6f4f5486fe6d..5fe694708b7a 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -47,11 +47,13 @@ struct intel_th_output { /** * struct intel_th_drvdata - describes hardware capabilities and quirks * @tscu_enable: device needs SW to enable time stamping unit + * @multi_is_broken: device has multiblock mode is broken * @has_mintctl: device has interrupt control (MINTCTL) register * @host_mode_only: device can only operate in 'host debugger' mode */ struct intel_th_drvdata { unsigned int tscu_enable : 1, + multi_is_broken : 1, has_mintctl : 1, host_mode_only : 1; }; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 255f8f41c8ff..3cd2489d398c 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -157,7 +157,8 @@ struct msc { /* config */ unsigned int enabled : 1, wrap : 1, - do_irq : 1; + do_irq : 1, + multi_is_broken : 1; unsigned int mode; unsigned int burst_len; unsigned int index; @@ -1665,7 +1666,7 @@ static int intel_th_msc_init(struct msc *msc) { atomic_set(&msc->user_count, -1); - msc->mode = MSC_MODE_MULTI; + msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; mutex_init(&msc->buf_mutex); INIT_LIST_HEAD(&msc->win_list); INIT_LIST_HEAD(&msc->iter_list); @@ -1877,6 +1878,9 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf, return -EINVAL; found: + if (i == MSC_MODE_MULTI && msc->multi_is_broken) + return -EOPNOTSUPP; + mutex_lock(&msc->buf_mutex); ret = 0; @@ -2083,6 +2087,9 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) if (!res) msc->do_irq = 1; + if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) + msc->multi_is_broken = 1; + msc->index = thdev->id; msc->thdev = thdev; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 86aa6a46bcba..a723c8c33087 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -120,6 +120,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); } +static const struct intel_th_drvdata intel_th_1x_multi_is_broken = { + .multi_is_broken = 1, +}; + static const struct intel_th_drvdata intel_th_2x = { .tscu_enable = 1, .has_mintctl = 1, @@ -152,7 +156,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = { { /* Kaby Lake PCH-H */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), - .driver_data = (kernel_ulong_t)0, + .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken, }, { /* Denverton */ @@ -207,7 +211,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = { { /* Comet Lake PCH-V */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6), - .driver_data = (kernel_ulong_t)&intel_th_2x, + .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken, }, { /* Ice Lake NNPI */ @@ -229,11 +233,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Tiger Lake PCH-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Jasper Lake PCH */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Jasper Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Elkhart Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529), @@ -244,6 +258,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Alder Lake-P */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Emmitsburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Alder Lake-M */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Rocket Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 3a1f4e650378..a1529f571491 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -161,9 +161,7 @@ static int sth_stm_link(struct stm_data *stm_data, unsigned int master, { struct sth_device *sth = container_of(stm_data, struct sth_device, stm); - intel_th_set_output(to_intel_th_device(sth->dev), master); - - return 0; + return intel_th_set_output(to_intel_th_device(sth->dev), master); } static int intel_th_sw_init(struct sth_device *sth) diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c index 3e7df1c0477f..81d7b21d31ec 100644 --- a/drivers/hwtracing/stm/heartbeat.c +++ b/drivers/hwtracing/stm/heartbeat.c @@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data) static int stm_heartbeat_init(void) { - int i, ret = -ENOMEM; + int i, ret; if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) return -EINVAL; @@ -72,8 +72,10 @@ static int stm_heartbeat_init(void) for (i = 0; i < nr_devs; i++) { stm_heartbeat[i].data.name = kasprintf(GFP_KERNEL, "heartbeat.%d", i); - if (!stm_heartbeat[i].data.name) + if (!stm_heartbeat[i].data.name) { + ret = -ENOMEM; goto fail_unregister; + } stm_heartbeat[i].data.nr_chans = 1; stm_heartbeat[i].data.link = stm_heartbeat_link; diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 5ac93f41bfec..1d3691a049b1 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -41,8 +41,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap) pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); pca_outw(adap, I2C_PCA_IND, 0xA5); pca_outw(adap, I2C_PCA_IND, 0x5A); + + /* + * After a reset we need to re-apply any configuration + * (calculated in pca_init) to get the bus in a working state. + */ + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi); + + pca_set_con(adap, I2C_PCA_CON_ENSIO); } else { adap->reset_chip(adap->data); + pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq); } } @@ -314,7 +328,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, DEB2("BUS ERROR - SDA Stuck low\n"); pca_reset(adap); goto out; - case 0x90: /* Bus error - SCL stuck low */ + case 0x78: /* Bus error - SCL stuck low (PCA9665) */ + case 0x90: /* Bus error - SCL stuck low (PCA9564) */ DEB2("BUS ERROR - SCL Stuck low\n"); pca_reset(adap); goto out; @@ -422,13 +437,14 @@ static int pca_init(struct i2c_adapter *adap) " Use the nominal frequency.\n", adap->name); } - pca_reset(pca_data); - clock = pca_clock(pca_data); printk(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]); - pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock); + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.clock_freq = clock; + + pca_reset(pca_data); } else { int clock; int mode; @@ -495,19 +511,15 @@ static int pca_init(struct i2c_adapter *adap) thi = tlow * min_thi / min_tlow; } + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.mode = mode; + pca_data->bus_settings.tlow = tlow; + pca_data->bus_settings.thi = thi; + pca_reset(pca_data); printk(KERN_INFO "%s: Clock frequency is %dHz\n", adap->name, clock * 100); - - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE); - pca_outw(pca_data, I2C_PCA_IND, mode); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL); - pca_outw(pca_data, I2C_PCA_IND, tlow); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH); - pca_outw(pca_data, I2C_PCA_IND, thi); - - pca_set_con(pca_data, I2C_PCA_CON_ENSIO); } udelay(500); /* 500 us for oscillator to stabilise */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 146ce40d8e0a..2d08a8719506 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -1162,6 +1162,7 @@ config I2C_RCAR tristate "Renesas R-Car I2C Controller" depends on ARCH_RENESAS || COMPILE_TEST select I2C_SLAVE + select RESET_CONTROLLER if ARCH_RCAR_GEN3 help If you say yes to this option, support will be included for the R-Car I2C controller. diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 1de23b4f3809..a60042431370 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -70,6 +70,7 @@ * @isr_mask: cached copy of local ISR enables. * @isr_status: cached copy of local ISR status. * @lock: spinlock for IRQ synchronization. + * @isr_mutex: mutex for IRQ thread. */ struct altr_i2c_dev { void __iomem *base; @@ -86,6 +87,7 @@ struct altr_i2c_dev { u32 isr_mask; u32 isr_status; spinlock_t lock; /* IRQ synchronization */ + struct mutex isr_mutex; }; static void @@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) struct altr_i2c_dev *idev = _dev; u32 status = idev->isr_status; + mutex_lock(&idev->isr_mutex); if (!idev->msg) { dev_warn(idev->dev, "unexpected interrupt\n"); altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); - return IRQ_HANDLED; + goto out; } read = (idev->msg->flags & I2C_M_RD) != 0; @@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) complete(&idev->msg_complete); dev_dbg(idev->dev, "Message Complete\n"); } +out: + mutex_unlock(&idev->isr_mutex); return IRQ_HANDLED; } @@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) u32 value; u8 addr = i2c_8bit_addr_from_msg(msg); + mutex_lock(&idev->isr_mutex); idev->msg = msg; idev->msg_len = msg->len; idev->buf = msg->buf; @@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) altr_i2c_int_enable(idev, imask, true); altr_i2c_fill_tx_fifo(idev); } + mutex_unlock(&idev->isr_mutex); time_left = wait_for_completion_timeout(&idev->msg_complete, ALTR_I2C_XFER_TIMEOUT); @@ -384,7 +391,6 @@ static int altr_i2c_probe(struct platform_device *pdev) struct altr_i2c_dev *idev = NULL; struct resource *res; int irq, ret; - u32 val; idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); if (!idev) @@ -410,18 +416,19 @@ static int altr_i2c_probe(struct platform_device *pdev) idev->dev = &pdev->dev; init_completion(&idev->msg_complete); spin_lock_init(&idev->lock); + mutex_init(&idev->isr_mutex); - val = device_property_read_u32(idev->dev, "fifo-size", + ret = device_property_read_u32(idev->dev, "fifo-size", &idev->fifo_size); - if (val) { + if (ret) { dev_err(&pdev->dev, "FIFO size set to default of %d\n", ALTR_I2C_DFLT_FIFO_SZ); idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; } - val = device_property_read_u32(idev->dev, "clock-frequency", + ret = device_property_read_u32(idev->dev, "clock-frequency", &idev->bus_clk_rate); - if (val) { + if (ret) { dev_err(&pdev->dev, "Default to 100kHz\n"); idev->bus_clk_rate = 100000; /* default clock rate */ } diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c index 5e4800d72e00..cd3fd5ee5f65 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-pci.c +++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c @@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev, if (!privdata) return -ENOMEM; + privdata->pci_dev = pci_dev; rc = amd_mp2_pci_init(privdata, pci_dev); if (rc) return rc; mutex_init(&privdata->c2p_lock); - privdata->pci_dev = pci_dev; pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000); pm_runtime_use_autosuspend(&pci_dev->dev); diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 7b098ff5f5dd..bdcc3c9d0abe 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -69,6 +69,7 @@ * These share bit definitions, so use the same values for the enable & * status bits. */ +#define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14) #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13) #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7) @@ -603,6 +604,8 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) /* Ack all interrupts except for Rx done */ writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, bus->base + ASPEED_I2C_INTR_STS_REG); + readl(bus->base + ASPEED_I2C_INTR_STS_REG); + irq_received &= ASPEED_I2CD_INTR_RECV_MASK; irq_remaining = irq_received; #if IS_ENABLED(CONFIG_I2C_SLAVE) @@ -645,9 +648,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) irq_received, irq_handled); /* Ack Rx done */ - if (irq_received & ASPEED_I2CD_INTR_RX_DONE) + if (irq_received & ASPEED_I2CD_INTR_RX_DONE) { writel(ASPEED_I2CD_INTR_RX_DONE, bus->base + ASPEED_I2C_INTR_STS_REG); + readl(bus->base + ASPEED_I2C_INTR_STS_REG); + } spin_unlock(&bus->lock); return irq_remaining ? IRQ_NONE : IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 9ffdffaf6141..70cd9fc7fb86 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -157,6 +157,11 @@ #define IE_S_ALL_INTERRUPT_SHIFT 21 #define IE_S_ALL_INTERRUPT_MASK 0x3f +/* + * It takes ~18us to reading 10bytes of data, hence to keep tasklet + * running for less time, max slave read per tasklet is set to 10 bytes. + */ +#define MAX_SLAVE_RX_PER_INT 10 enum i2c_slave_read_status { I2C_SLAVE_RX_FIFO_EMPTY = 0, @@ -203,8 +208,18 @@ struct bcm_iproc_i2c_dev { /* bytes that have been read */ unsigned int rx_bytes; unsigned int thld_bytes; + + bool slave_rx_only; + bool rx_start_rcvd; + bool slave_read_complete; + u32 tx_underrun; + u32 slave_int_mask; + struct tasklet_struct slave_rx_tasklet; }; +/* tasklet to process slave rx data */ +static void slave_rx_tasklet_fn(unsigned long); + /* * Can be expanded in the future if more interrupt status bits are utilized */ @@ -213,7 +228,8 @@ struct bcm_iproc_i2c_dev { #define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\ | BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\ - | BIT(IS_S_TX_UNDERRUN_SHIFT)) + | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\ + | BIT(IS_S_RX_THLD_SHIFT)) static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave); static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave); @@ -257,6 +273,7 @@ static void bcm_iproc_i2c_slave_init( { u32 val; + iproc_i2c->tx_underrun = 0; if (need_reset) { /* put controller in reset */ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET); @@ -293,8 +310,11 @@ static void bcm_iproc_i2c_slave_init( /* Enable interrupt register to indicate a valid byte in receive fifo */ val = BIT(IE_S_RX_EVENT_SHIFT); + /* Enable interrupt register to indicate a Master read transaction */ + val |= BIT(IE_S_RD_EVENT_SHIFT); /* Enable interrupt register for the Slave BUSY command */ val |= BIT(IE_S_START_BUSY_SHIFT); + iproc_i2c->slave_int_mask = val; iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); } @@ -319,73 +339,176 @@ static void bcm_iproc_i2c_check_slave_status( } } +static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c) +{ + u8 rx_data, rx_status; + u32 rx_bytes = 0; + u32 val; + + while (rx_bytes < MAX_SLAVE_RX_PER_INT) { + val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET); + rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK; + rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK); + + if (rx_status == I2C_SLAVE_RX_START) { + /* Start of SMBUS Master write */ + i2c_slave_event(iproc_i2c->slave, + I2C_SLAVE_WRITE_REQUESTED, &rx_data); + iproc_i2c->rx_start_rcvd = true; + iproc_i2c->slave_read_complete = false; + } else if (rx_status == I2C_SLAVE_RX_DATA && + iproc_i2c->rx_start_rcvd) { + /* Middle of SMBUS Master write */ + i2c_slave_event(iproc_i2c->slave, + I2C_SLAVE_WRITE_RECEIVED, &rx_data); + } else if (rx_status == I2C_SLAVE_RX_END && + iproc_i2c->rx_start_rcvd) { + /* End of SMBUS Master write */ + if (iproc_i2c->slave_rx_only) + i2c_slave_event(iproc_i2c->slave, + I2C_SLAVE_WRITE_RECEIVED, + &rx_data); + + i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, + &rx_data); + } else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) { + iproc_i2c->rx_start_rcvd = false; + iproc_i2c->slave_read_complete = true; + break; + } + + rx_bytes++; + } +} + +static void slave_rx_tasklet_fn(unsigned long data) +{ + struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data; + u32 int_clr; + + bcm_iproc_i2c_slave_read(iproc_i2c); + + /* clear pending IS_S_RX_EVENT_SHIFT interrupt */ + int_clr = BIT(IS_S_RX_EVENT_SHIFT); + + if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) { + /* + * In case of single byte master-read request, + * IS_S_TX_UNDERRUN_SHIFT event is generated before + * IS_S_START_BUSY_SHIFT event. Hence start slave data send + * from first IS_S_TX_UNDERRUN_SHIFT event. + * + * This means don't send any data from slave when + * IS_S_RD_EVENT_SHIFT event is generated else it will increment + * eeprom or other backend slave driver read pointer twice. + */ + iproc_i2c->tx_underrun = 0; + iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT); + + /* clear IS_S_RD_EVENT_SHIFT interrupt */ + int_clr |= BIT(IS_S_RD_EVENT_SHIFT); + } + + /* clear slave interrupt */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr); + /* enable slave interrupts */ + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask); +} + static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status) { u32 val; - u8 value, rx_status; + u8 value; - /* Slave RX byte receive */ - if (status & BIT(IS_S_RX_EVENT_SHIFT)) { - val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET); - rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK; - if (rx_status == I2C_SLAVE_RX_START) { - /* Start of SMBUS for Master write */ - i2c_slave_event(iproc_i2c->slave, - I2C_SLAVE_WRITE_REQUESTED, &value); + /* + * Slave events in case of master-write, master-write-read and, + * master-read + * + * Master-write : only IS_S_RX_EVENT_SHIFT event + * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT + * events + * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT + * events or only IS_S_RD_EVENT_SHIFT + */ + if (status & BIT(IS_S_RX_EVENT_SHIFT) || + status & BIT(IS_S_RD_EVENT_SHIFT)) { + /* disable slave interrupts */ + val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); + val &= ~iproc_i2c->slave_int_mask; + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); - val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET); - value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK); - i2c_slave_event(iproc_i2c->slave, - I2C_SLAVE_WRITE_RECEIVED, &value); - } else if (status & BIT(IS_S_RD_EVENT_SHIFT)) { - /* Start of SMBUS for Master Read */ - i2c_slave_event(iproc_i2c->slave, - I2C_SLAVE_READ_REQUESTED, &value); - iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value); + if (status & BIT(IS_S_RD_EVENT_SHIFT)) + /* Master-write-read request */ + iproc_i2c->slave_rx_only = false; + else + /* Master-write request only */ + iproc_i2c->slave_rx_only = true; - val = BIT(S_CMD_START_BUSY_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val); + /* schedule tasklet to read data later */ + tasklet_schedule(&iproc_i2c->slave_rx_tasklet); - /* - * Enable interrupt for TX FIFO becomes empty and - * less than PKT_LENGTH bytes were output on the SMBUS - */ - val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); - val |= BIT(IE_S_TX_UNDERRUN_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); - } else { - /* Master write other than start */ - value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK); - i2c_slave_event(iproc_i2c->slave, - I2C_SLAVE_WRITE_RECEIVED, &value); - } - } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { - /* Master read other than start */ - i2c_slave_event(iproc_i2c->slave, - I2C_SLAVE_READ_PROCESSED, &value); - - iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value); - val = BIT(S_CMD_START_BUSY_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val); + /* clear only IS_S_RX_EVENT_SHIFT interrupt */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, + BIT(IS_S_RX_EVENT_SHIFT)); } - /* Stop */ + if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { + iproc_i2c->tx_underrun++; + if (iproc_i2c->tx_underrun == 1) + /* Start of SMBUS for Master Read */ + i2c_slave_event(iproc_i2c->slave, + I2C_SLAVE_READ_REQUESTED, + &value); + else + /* Master read other than start */ + i2c_slave_event(iproc_i2c->slave, + I2C_SLAVE_READ_PROCESSED, + &value); + + iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value); + /* start transfer */ + val = BIT(S_CMD_START_BUSY_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val); + + /* clear interrupt */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, + BIT(IS_S_TX_UNDERRUN_SHIFT)); + } + + /* Stop received from master in case of master read transaction */ if (status & BIT(IS_S_START_BUSY_SHIFT)) { - i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value); /* * Enable interrupt for TX FIFO becomes empty and * less than PKT_LENGTH bytes were output on the SMBUS */ - val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); - val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); + iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, + iproc_i2c->slave_int_mask); + + /* End of SMBUS for Master Read */ + val = BIT(S_TX_WR_STATUS_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val); + + val = BIT(S_CMD_START_BUSY_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val); + + /* flush TX FIFOs */ + val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET); + val |= (BIT(S_FIFO_TX_FLUSH_SHIFT)); + iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val); + + i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value); + + /* clear interrupt */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, + BIT(IS_S_START_BUSY_SHIFT)); } - /* clear interrupt status */ - iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status); + /* check slave transmit status only if slave is transmitting */ + if (!iproc_i2c->slave_rx_only) + bcm_iproc_i2c_check_slave_status(iproc_i2c); - bcm_iproc_i2c_check_slave_status(iproc_i2c); return true; } @@ -500,12 +623,17 @@ static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c, static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) { struct bcm_iproc_i2c_dev *iproc_i2c = data; - u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET); + u32 slave_status; + u32 status; bool ret; - u32 sl_status = status & ISR_MASK_SLAVE; - if (sl_status) { - ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status); + status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET); + /* process only slave interrupt which are enabled */ + slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) & + ISR_MASK_SLAVE; + + if (slave_status) { + ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status); if (ret) return IRQ_HANDLED; else @@ -1022,6 +1150,10 @@ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave) return -EAFNOSUPPORT; iproc_i2c->slave = slave; + + tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn, + (unsigned long)iproc_i2c); + bcm_iproc_i2c_slave_init(iproc_i2c, false); return 0; } @@ -1034,7 +1166,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) if (!iproc_i2c->slave) return -EINVAL; - iproc_i2c->slave = NULL; + disable_irq(iproc_i2c->irq); /* disable all slave interrupts */ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); @@ -1042,11 +1174,24 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) IE_S_ALL_INTERRUPT_SHIFT); iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); + tasklet_kill(&iproc_i2c->slave_rx_tasklet); + /* Erase the slave address programmed */ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET); tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp); + /* flush TX/RX FIFOs */ + tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT)); + iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp); + + /* clear all pending slave interrupts */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE); + + iproc_i2c->slave = NULL; + + enable_irq(iproc_i2c->irq); + return 0; } diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 506991596b68..5e89cd6b690c 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -316,7 +316,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev, goto cmd_out; } - if ((CMD_RD || CMD_WR) && + if ((cmd == CMD_RD || cmd == CMD_WR) && bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) { rc = -EREMOTEIO; dev_dbg(dev->device, "controller received NOACK intr for %s\n", diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 9d71ce15db05..17f0dd1f891e 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -377,10 +377,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->recv_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -437,11 +435,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->send_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; - cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ @@ -906,7 +901,10 @@ static int cdns_i2c_probe(struct platform_device *pdev) if (IS_ERR(id->membase)) return PTR_ERR(id->membase); - id->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + id->irq = ret; id->adap.owner = THIS_MODULE; id->adap.dev.of_node = pdev->dev.of_node; diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 1213e1932ccb..24d584a1c9a7 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -65,6 +65,9 @@ struct i2c_ram { char res1[4]; /* Reserved */ ushort rpbase; /* Relocation pointer */ char res2[2]; /* Reserved */ + /* The following elements are only for CPM2 */ + char res3[4]; /* Reserved */ + uint sdmatmp; /* Internal */ }; #define I2COM_START 0x80 diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 2de7452fcd6d..bde63fe0f6ad 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -305,6 +305,16 @@ int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) { int timeout = TIMEOUT; + /* + * Workaround: When a slave goes offline and the master tries to send + * it data, the bus gets stuck. Issuing abort seems to work. + */ + if (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_MASTER_ACTIVITY) { + /* printk(KERN_ERR "DEBUG: Issue abort\n"); */ + dw_writel(dev, 3, DW_IC_ENABLE); + usleep_range(50000, 100000); + } + while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) { if (timeout <= 0) { dev_warn(dev->dev, "timeout waiting for bus ready\n"); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 16dd338877d0..0c55c54372d7 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -370,10 +370,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) adap->dev.of_node = pdev->dev.of_node; adap->nr = -1; - dev_pm_set_driver_flags(&pdev->dev, - DPM_FLAG_SMART_PREPARE | - DPM_FLAG_SMART_SUSPEND | - DPM_FLAG_LEAVE_SUSPENDED); + if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE | + DPM_FLAG_LEAVE_SUSPENDED); + } else { + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_LEAVE_SUSPENDED); + } /* The code below assumes runtime PM to be disabled. */ WARN_ON(pm_runtime_enabled(&pdev->dev)); diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index bb810dee8fb5..73f139690e4e 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, {0,} }; +MODULE_DEVICE_TABLE(pci, pch_pcidev_id); static irqreturn_t pch_i2c_handler(int irq, void *pData); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 959d4912ec0d..0230a13a6ab7 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -397,7 +397,10 @@ static int em_i2c_probe(struct platform_device *pdev) em_i2c_reset(&priv->adap); - priv->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_clk; + priv->irq = ret; ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0, "em_i2c", priv); if (ret) diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c index e0c256922d4f..977d6f524649 100644 --- a/drivers/i2c/busses/i2c-fsi.c +++ b/drivers/i2c/busses/i2c-fsi.c @@ -98,7 +98,7 @@ #define I2C_STAT_DAT_REQ BIT(25) #define I2C_STAT_CMD_COMP BIT(24) #define I2C_STAT_STOP_ERR BIT(23) -#define I2C_STAT_MAX_PORT GENMASK(19, 16) +#define I2C_STAT_MAX_PORT GENMASK(22, 16) #define I2C_STAT_ANY_INT BIT(15) #define I2C_STAT_SCL_IN BIT(11) #define I2C_STAT_SDA_IN BIT(10) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 3ff6fbd79b12..2b6a4c1f188f 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1424,7 +1424,7 @@ static int i801_add_mux(struct i801_priv *priv) /* Register GPIO descriptor lookup table */ lookup = devm_kzalloc(dev, - struct_size(lookup, table, mux_config->n_gpios), + struct_size(lookup, table, mux_config->n_gpios + 1), GFP_KERNEL); if (!lookup) return -ENOMEM; @@ -1688,6 +1688,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } static inline void i801_acpi_remove(struct i801_priv *priv) { } #endif +static unsigned char i801_setup_hstcfg(struct i801_priv *priv) +{ + unsigned char hstcfg = priv->original_hstcfg; + + hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ + hstcfg |= SMBHSTCFG_HST_EN; + pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg); + return hstcfg; +} + static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned char temp; @@ -1804,14 +1814,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) return err; } - pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp); - priv->original_hstcfg = temp; - temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ - if (!(temp & SMBHSTCFG_HST_EN)) { + pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg); + temp = i801_setup_hstcfg(priv); + if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN)) dev_info(&dev->dev, "Enabling SMBus device\n"); - temp |= SMBHSTCFG_HST_EN; - } - pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp); if (temp & SMBHSTCFG_SMB_SMI_EN) { dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n"); @@ -1885,6 +1891,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_drvdata(dev, priv); + dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_set_autosuspend_delay(&dev->dev, 1000); pm_runtime_use_autosuspend(&dev->dev); pm_runtime_put_autosuspend(&dev->dev); @@ -1937,6 +1944,7 @@ static int i801_resume(struct device *dev) { struct i801_priv *priv = dev_get_drvdata(dev); + i801_setup_hstcfg(priv); i801_enable_host_notify(&priv->adapter); return 0; diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c index 8382eb64b424..d6c17506dba4 100644 --- a/drivers/i2c/busses/i2c-icy.c +++ b/drivers/i2c/busses/i2c-icy.c @@ -43,6 +43,7 @@ #include <linux/i2c.h> #include <linux/i2c-algo-pcf.h> +#include <asm/amigahw.h> #include <asm/amigaints.h> #include <linux/zorro.h> diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index 20a4fbc53007..a1f8a9a91213 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, atomic = true; } - ret = pm_runtime_get_sync(adap->dev.parent); + ret = pm_runtime_resume_and_get(adap->dev.parent); if (ret < 0) return ret; @@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c) u32 rev; int ret; - ret = pm_runtime_get_sync(i2c->adap.dev.parent); + ret = pm_runtime_resume_and_get(i2c->adap.dev.parent); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index c92b56485fa6..a0d045c1bc9e 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -265,7 +265,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx) unsigned int temp; int ret; - ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent); + ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index a3b61336fe55..9d3f42fd6352 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -414,6 +414,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) dma->chan_using = NULL; } +static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits) +{ + unsigned int temp; + + /* + * i2sr_clr_opcode is the value to clear all interrupts. Here we want to + * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits> + * toggled. This is required because i.MX needs W0C and Vybrid uses W1C. + */ + temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); +} + static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) { unsigned long orig_jiffies = jiffies; @@ -426,8 +439,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) /* check for arbitration lost */ if (temp & I2SR_IAL) { - temp &= ~I2SR_IAL; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); + i2c_imx_clear_irq(i2c_imx, I2SR_IAL); return -EAGAIN; } @@ -458,6 +470,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } + + /* check for arbitration lost */ + if (i2c_imx->i2csr & I2SR_IAL) { + dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); + i2c_imx_clear_irq(i2c_imx, I2SR_IAL); + + i2c_imx->i2csr = 0; + return -EAGAIN; + } + dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); i2c_imx->i2csr = 0; return 0; @@ -567,6 +589,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) /* Stop I2C transaction */ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); if (i2c_imx->dma) temp &= ~I2CR_DMAEN; @@ -597,9 +621,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) if (temp & I2SR_IIF) { /* save status register */ i2c_imx->i2csr = temp; - temp &= ~I2SR_IIF; - temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF); - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); + i2c_imx_clear_irq(i2c_imx, I2SR_IIF); wake_up(&i2c_imx->queue); return IRQ_HANDLED; } @@ -732,9 +754,12 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, */ dev_dbg(dev, "<%s> clear MSTA\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - i2c_imx_bus_busy(i2c_imx, 0); + if (!i2c_imx->stopped) + i2c_imx_bus_busy(i2c_imx, 0); } else { /* * For i2c master receiver repeat restart operation like: @@ -857,9 +882,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo dev_dbg(&i2c_imx->adapter.dev, "<%s> clear MSTA\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - i2c_imx_bus_busy(i2c_imx, 0); + if (!i2c_imx->stopped) + i2c_imx_bus_busy(i2c_imx, 0); } else { /* * For i2c master receiver repeat restart operation like: @@ -1112,14 +1140,6 @@ static int i2c_imx_probe(struct platform_device *pdev) return ret; } - /* Request IRQ */ - ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, - pdev->name, i2c_imx); - if (ret) { - dev_err(&pdev->dev, "can't claim irq %d\n", irq); - goto clk_disable; - } - /* Init queue */ init_waitqueue_head(&i2c_imx->queue); @@ -1138,6 +1158,14 @@ static int i2c_imx_probe(struct platform_device *pdev) if (ret < 0) goto rpm_disable; + /* Request IRQ */ + ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED, + pdev->name, i2c_imx); + if (ret) { + dev_err(&pdev->dev, "can't claim irq %d\n", irq); + goto rpm_disable; + } + /* Set up clock divider */ i2c_imx->bitrate = IMX_I2C_BIT_RATE; ret = of_property_read_u32(pdev->dev.of_node, @@ -1180,13 +1208,12 @@ static int i2c_imx_probe(struct platform_device *pdev) clk_notifier_unregister: clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); + free_irq(irq, i2c_imx); rpm_disable: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); - -clk_disable: clk_disable_unprepare(i2c_imx->clk); return ret; } @@ -1194,7 +1221,7 @@ clk_disable: static int i2c_imx_remove(struct platform_device *pdev) { struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); - int ret; + int irq, ret; ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) @@ -1214,6 +1241,9 @@ static int i2c_imx_remove(struct platform_device *pdev) imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); + irq = platform_get_irq(pdev, 0); + if (irq >= 0) + free_irq(irq, i2c_imx); clk_disable_unprepare(i2c_imx->clk); pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 8f0e1f802f2d..a5e960cd8245 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -751,7 +751,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev) jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); - i2c->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err; + i2c->irq = ret; ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret) diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 1e2647f9a2a7..6661481f125c 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -5,6 +5,7 @@ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com> */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/i2c.h> @@ -32,12 +33,17 @@ #define REG_CTRL_ACK_IGNORE BIT(1) #define REG_CTRL_STATUS BIT(2) #define REG_CTRL_ERROR BIT(3) -#define REG_CTRL_CLKDIV_SHIFT 12 -#define REG_CTRL_CLKDIV_MASK GENMASK(21, 12) -#define REG_CTRL_CLKDIVEXT_SHIFT 28 -#define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, 28) +#define REG_CTRL_CLKDIV GENMASK(21, 12) +#define REG_CTRL_CLKDIVEXT GENMASK(29, 28) + +#define REG_SLV_ADDR GENMASK(7, 0) +#define REG_SLV_SDA_FILTER GENMASK(10, 8) +#define REG_SLV_SCL_FILTER GENMASK(13, 11) +#define REG_SLV_SCL_LOW GENMASK(27, 16) +#define REG_SLV_SCL_LOW_EN BIT(28) #define I2C_TIMEOUT_MS 500 +#define FILTER_DELAY 15 enum { TOKEN_END = 0, @@ -132,19 +138,24 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) unsigned long clk_rate = clk_get_rate(i2c->clk); unsigned int div; - div = DIV_ROUND_UP(clk_rate, freq * i2c->data->div_factor); + div = DIV_ROUND_UP(clk_rate, freq); + div -= FILTER_DELAY; + div = DIV_ROUND_UP(div, i2c->data->div_factor); /* clock divider has 12 bits */ - if (div >= (1 << 12)) { + if (div > GENMASK(11, 0)) { dev_err(i2c->dev, "requested bus frequency too low\n"); - div = (1 << 12) - 1; + div = GENMASK(11, 0); } - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK, - (div & GENMASK(9, 0)) << REG_CTRL_CLKDIV_SHIFT); + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV, + FIELD_PREP(REG_CTRL_CLKDIV, div & GENMASK(9, 0))); - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, - (div >> 10) << REG_CTRL_CLKDIVEXT_SHIFT); + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT, + FIELD_PREP(REG_CTRL_CLKDIVEXT, div >> 10)); + + /* Disable HIGH/LOW mode */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0); dev_dbg(i2c->dev, "%s: clk %lu, freq %u, div %u\n", __func__, clk_rate, freq, div); @@ -273,7 +284,10 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg) token = (msg->flags & I2C_M_RD) ? TOKEN_SLAVE_ADDR_READ : TOKEN_SLAVE_ADDR_WRITE; - writel(msg->addr << 1, i2c->regs + REG_SLAVE_ADDR); + + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR, + FIELD_PREP(REG_SLV_ADDR, msg->addr << 1)); + meson_i2c_add_token(i2c, TOKEN_START); meson_i2c_add_token(i2c, token); } @@ -432,6 +446,10 @@ static int meson_i2c_probe(struct platform_device *pdev) return ret; } + /* Disable filtering */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, + REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0); + meson_i2c_set_clk_div(i2c, timings.bus_freq_hz); return 0; diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 2fd717d8dd30..71d7bae2cbca 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv) if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) { mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG, &datalen, 1); - if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) { + if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) { dev_err(priv->dev, "Incorrect smbus block read message len\n"); - return -E2BIG; + return -EPROTO; } } else { datalen = priv->xfer.data_len; diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 2152ec5f535c..e1ef0122ef75 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -389,6 +389,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) { u16 control_reg; + writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); + udelay(50); + writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); + mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET); /* Set ioconfig */ @@ -419,10 +423,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL); mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN); - - writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); - udelay(50); - writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); } /* @@ -1008,7 +1008,8 @@ static int mtk_i2c_probe(struct platform_device *pdev) mtk_i2c_clock_disable(i2c); ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq, - IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c); + IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, + I2C_DRV_NAME, i2c); if (ret < 0) { dev_err(&pdev->dev, "Request I2C IRQ %d fail\n", irq); @@ -1035,7 +1036,16 @@ static int mtk_i2c_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int mtk_i2c_resume(struct device *dev) +static int mtk_i2c_suspend_noirq(struct device *dev) +{ + struct mtk_i2c *i2c = dev_get_drvdata(dev); + + i2c_mark_adapter_suspended(&i2c->adap); + + return 0; +} + +static int mtk_i2c_resume_noirq(struct device *dev) { int ret; struct mtk_i2c *i2c = dev_get_drvdata(dev); @@ -1050,12 +1060,15 @@ static int mtk_i2c_resume(struct device *dev) mtk_i2c_clock_disable(i2c); + i2c_mark_adapter_resumed(&i2c->adap); + return 0; } #endif static const struct dev_pm_ops mtk_i2c_pm = { - SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq, + mtk_i2c_resume_noirq) }; static struct platform_driver mtk_i2c_driver = { diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 89224913f578..081a1169ecea 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -25,6 +25,7 @@ #include <linux/of_device.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> +#include <linux/dma/mxs-dma.h> #define DRIVER_NAME "mxs-i2c" @@ -200,7 +201,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | + MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); @@ -228,7 +230,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | + MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); @@ -260,7 +263,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | + MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c index d9607905dc2f..845eda70b8ca 100644 --- a/drivers/i2c/busses/i2c-octeon-core.c +++ b/drivers/i2c/busses/i2c-octeon-core.c @@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, if (result) return result; if (recv_len && i == 0) { - if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) + if (data[i] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; length += data[i]; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 2dfea357b131..c36522849325 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1408,9 +1408,9 @@ omap_i2c_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(omap->dev); - r = pm_runtime_get_sync(omap->dev); + r = pm_runtime_resume_and_get(omap->dev); if (r < 0) - goto err_free_mem; + goto err_disable_pm; /* * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2. @@ -1518,8 +1518,8 @@ err_unuse_clocks: omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); pm_runtime_dont_use_autosuspend(omap->dev); pm_runtime_put_sync(omap->dev); +err_disable_pm: pm_runtime_disable(&pdev->dev); -err_free_mem: return r; } @@ -1530,7 +1530,7 @@ static int omap_i2c_remove(struct platform_device *pdev) int ret; i2c_del_adapter(&omap->adapter); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c index b6b5a495118b..a567fd2b295e 100644 --- a/drivers/i2c/busses/i2c-owl.c +++ b/drivers/i2c/busses/i2c-owl.c @@ -179,6 +179,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev) fifostat = readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT); if (fifostat & OWL_I2C_FIFOSTAT_RNB) { i2c_dev->err = -ENXIO; + /* Clear NACK error bit by writing "1" */ + owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOSTAT, + OWL_I2C_FIFOSTAT_RNB, true); goto stop; } @@ -186,6 +189,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev) stat = readl(i2c_dev->base + OWL_I2C_REG_STAT); if (stat & OWL_I2C_STAT_BEB) { i2c_dev->err = -EIO; + /* Clear BUS error bit by writing "1" */ + owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT, + OWL_I2C_STAT_BEB, true); goto stop; } diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index a7a81846d5b1..635dd697ac0b 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -140,7 +140,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) int ret = 0; int irq; - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); /* If irq is 0, we do polling. */ if (irq < 0) irq = 0; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 30ded6422e7b..69740a4ff1db 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -977,7 +977,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { + (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || + dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { retval = piix4_setup_sb800(dev, id, 1); } diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 2c3c3d6935c0..d0c557c8d80f 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -312,11 +312,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), readl(_ISR(i2c))); - dev_dbg(dev, "log: "); + dev_err(dev, "log:"); for (i = 0; i < i2c->irqlogidx; i++) - pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]); - - pr_debug("\n"); + pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); + pr_cont("\n"); } #else /* ifdef DEBUG */ @@ -706,11 +705,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) { u32 icr; - /* - * Clear the STOP and ACK flags - */ + /* Clear the START, STOP, ACK, TB and MA flags */ icr = readl(_ICR(i2c)); - icr &= ~(ICR_STOP | ICR_ACKNAK); + icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); writel(icr, _ICR(i2c)); } diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 17abf60c94ae..b56a427fb928 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -87,6 +87,9 @@ struct geni_i2c_dev { u32 clk_freq_out; const struct geni_i2c_clk_fld *clk_fld; int suspended; + void *dma_buf; + size_t xfer_len; + dma_addr_t dma_addr; }; struct geni_i2c_err_log { @@ -350,14 +353,39 @@ static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c) dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n"); } +static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c, + struct i2c_msg *cur) +{ + gi2c->cur_rd = 0; + if (gi2c->dma_buf) { + if (gi2c->err) + geni_i2c_rx_fsm_rst(gi2c); + geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len); + i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err); + } +} + +static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c, + struct i2c_msg *cur) +{ + gi2c->cur_wr = 0; + if (gi2c->dma_buf) { + if (gi2c->err) + geni_i2c_tx_fsm_rst(gi2c); + geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len); + i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err); + } +} + static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, u32 m_param) { - dma_addr_t rx_dma; + dma_addr_t rx_dma = 0; unsigned long time_left; void *dma_buf = NULL; struct geni_se *se = &gi2c->se; size_t len = msg->len; + struct i2c_msg *cur; if (!of_machine_is_compatible("lenovo,yoga-c630")) dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); @@ -374,19 +402,18 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); i2c_put_dma_safe_msg_buf(dma_buf, msg, false); dma_buf = NULL; + } else { + gi2c->xfer_len = len; + gi2c->dma_addr = rx_dma; + gi2c->dma_buf = dma_buf; } + cur = gi2c->cur; time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!time_left) geni_i2c_abort_xfer(gi2c); - gi2c->cur_rd = 0; - if (dma_buf) { - if (gi2c->err) - geni_i2c_rx_fsm_rst(gi2c); - geni_se_rx_dma_unprep(se, rx_dma, len); - i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err); - } + geni_i2c_rx_msg_cleanup(gi2c, cur); return gi2c->err; } @@ -394,11 +421,12 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, u32 m_param) { - dma_addr_t tx_dma; + dma_addr_t tx_dma = 0; unsigned long time_left; void *dma_buf = NULL; struct geni_se *se = &gi2c->se; size_t len = msg->len; + struct i2c_msg *cur; if (!of_machine_is_compatible("lenovo,yoga-c630")) dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); @@ -415,22 +443,21 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); i2c_put_dma_safe_msg_buf(dma_buf, msg, false); dma_buf = NULL; + } else { + gi2c->xfer_len = len; + gi2c->dma_addr = tx_dma; + gi2c->dma_buf = dma_buf; } if (!dma_buf) /* Get FIFO IRQ */ writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG); + cur = gi2c->cur; time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!time_left) geni_i2c_abort_xfer(gi2c); - gi2c->cur_wr = 0; - if (dma_buf) { - if (gi2c->err) - geni_i2c_tx_fsm_rst(gi2c); - geni_se_tx_dma_unprep(se, tx_dma, len); - i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err); - } + geni_i2c_tx_msg_cleanup(gi2c, cur); return gi2c->err; } diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index e09cd0775ae9..3417f7dffa94 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -806,7 +806,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup) if (ret || qup->bus_err || qup->qup_err) { reinit_completion(&qup->xfer); - if (qup_i2c_change_state(qup, QUP_RUN_STATE)) { + ret = qup_i2c_change_state(qup, QUP_RUN_STATE); + if (ret) { dev_err(qup->dev, "change to run state timed out"); goto desc_err; } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 531c01100b56..d0c4b3019e41 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -89,7 +89,6 @@ #define RCAR_BUS_PHASE_START (MDBS | MIE | ESG) #define RCAR_BUS_PHASE_DATA (MDBS | MIE) -#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF) #define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB) #define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE) @@ -117,6 +116,7 @@ enum rcar_i2c_type { }; struct rcar_i2c_priv { + u32 flags; void __iomem *io; struct i2c_adapter adap; struct i2c_msg *msg; @@ -127,7 +127,6 @@ struct rcar_i2c_priv { int pos; u32 icccr; - u32 flags; u8 recovery_icmcr; /* protected by adapter lock */ enum rcar_i2c_type devtype; struct i2c_client *slave; @@ -580,13 +579,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); } - rcar_i2c_write(priv, ICSSR, ~SAR & 0xff); + /* Clear SSR, too, because of old STOPs to other clients than us */ + rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff); } /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } @@ -614,7 +615,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) /* * This driver has a lock-free design because there are IP cores (at least * R-Car Gen2) which have an inherent race condition in their hardware design. - * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after + * There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after * the interrupt was generated, otherwise an unwanted repeated message gets * generated. It turned out that taking a spinlock at the beginning of the ISR * was already causing repeated messages. Thus, this driver was converted to @@ -623,13 +624,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) static irqreturn_t rcar_i2c_irq(int irq, void *ptr) { struct rcar_i2c_priv *priv = ptr; - u32 msr, val; + u32 msr; /* Clear START or STOP immediately, except for REPSTART after read */ - if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) { - val = rcar_i2c_read(priv, ICMCR); - rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA); - } + if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) + rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA); msr = rcar_i2c_read(priv, ICMSR); @@ -850,7 +849,7 @@ static int rcar_reg_slave(struct i2c_client *slave) priv->slave = slave; rcar_i2c_write(priv, ICSAR, slave->addr); rcar_i2c_write(priv, ICSSR, 0); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSCR, SIE | SDBS); return 0; @@ -862,11 +861,14 @@ static int rcar_unreg_slave(struct i2c_client *slave) WARN_ON(!priv->slave); - /* disable irqs and ensure none is running before clearing ptr */ + /* ensure no irq is running before clearing ptr */ + disable_irq(priv->irq); rcar_i2c_write(priv, ICSIER, 0); - rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSSR, 0); + enable_irq(priv->irq); + rcar_i2c_write(priv, ICSCR, SDBS); + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ - synchronize_irq(priv->irq); priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); @@ -971,6 +973,8 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (ret < 0) goto out_pm_put; + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ + if (priv->devtype == I2C_RCAR_GEN3) { priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(priv->rstc)) { diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c index c2005c789d2b..319d1fa617c8 100644 --- a/drivers/i2c/busses/i2c-sh7760.c +++ b/drivers/i2c/busses/i2c-sh7760.c @@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev) goto out2; } - id->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto out3; + id->irq = ret; id->adap.nr = pdev->id; id->adap.algo = &sh7760_i2c_algo; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 8777af4c695e..d5dd58c27ce5 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -129,6 +129,7 @@ struct sh_mobile_i2c_data { int sr; bool send_stop; bool stop_after_dma; + bool atomic_xfer; struct resource *res; struct dma_chan *dma_tx; @@ -333,13 +334,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op ret = iic_rd(pd, ICDR); break; case OP_RX_STOP: /* enable DTE interrupt, issue stop */ - iic_wr(pd, ICIC, - ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); + if (!pd->atomic_xfer) + iic_wr(pd, ICIC, + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */ - iic_wr(pd, ICIC, - ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); + if (!pd->atomic_xfer) + iic_wr(pd, ICIC, + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); ret = iic_rd(pd, ICDR); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; @@ -435,7 +438,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) if (wakeup) { pd->sr |= SW_DONE; - wake_up(&pd->wait); + if (!pd->atomic_xfer) + wake_up(&pd->wait); } /* defeat write posting to avoid spurious WAIT interrupts */ @@ -587,6 +591,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, pd->pos = -1; pd->sr = 0; + if (pd->atomic_xfer) + return; + pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8); if (pd->dma_buf) sh_mobile_i2c_xfer_dma(pd); @@ -643,15 +650,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd) return i ? 0 : -ETIMEDOUT; } -static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, - struct i2c_msg *msgs, - int num) +static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd, + struct i2c_msg *msgs, int num) { - struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); struct i2c_msg *msg; int err = 0; int i; - long timeout; + long time_left; /* Wake up device and enable clock */ pm_runtime_get_sync(pd->dev); @@ -668,15 +673,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, if (do_start) i2c_op(pd, OP_START); - /* The interrupt handler takes care of the rest... */ - timeout = wait_event_timeout(pd->wait, - pd->sr & (ICSR_TACK | SW_DONE), - adapter->timeout); + if (pd->atomic_xfer) { + unsigned long j = jiffies + pd->adap.timeout; - /* 'stop_after_dma' tells if DMA transfer was complete */ - i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); + time_left = time_before_eq(jiffies, j); + while (time_left && + !(pd->sr & (ICSR_TACK | SW_DONE))) { + unsigned char sr = iic_rd(pd, ICSR); - if (!timeout) { + if (sr & (ICSR_AL | ICSR_TACK | + ICSR_WAIT | ICSR_DTE)) { + sh_mobile_i2c_isr(0, pd); + udelay(150); + } else { + cpu_relax(); + } + time_left = time_before_eq(jiffies, j); + } + } else { + /* The interrupt handler takes care of the rest... */ + time_left = wait_event_timeout(pd->wait, + pd->sr & (ICSR_TACK | SW_DONE), + pd->adap.timeout); + + /* 'stop_after_dma' tells if DMA xfer was complete */ + i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, + pd->stop_after_dma); + } + + if (!time_left) { dev_err(pd->dev, "Transfer request timed out\n"); if (pd->dma_direction != DMA_NONE) sh_mobile_i2c_cleanup_dma(pd); @@ -702,14 +727,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, return err ?: num; } +static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); + + pd->atomic_xfer = false; + return sh_mobile_xfer(pd, msgs, num); +} + +static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); + + pd->atomic_xfer = true; + return sh_mobile_xfer(pd, msgs, num); +} + static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } static const struct i2c_algorithm sh_mobile_i2c_algorithm = { - .functionality = sh_mobile_i2c_func, - .master_xfer = sh_mobile_i2c_xfer, + .functionality = sh_mobile_i2c_func, + .master_xfer = sh_mobile_i2c_xfer, + .master_xfer_atomic = sh_mobile_i2c_xfer_atomic, }; static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = { diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index b432e7580458..92ba0183fd8a 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -72,6 +72,8 @@ /* timeout (ms) for pm runtime autosuspend */ #define SPRD_I2C_PM_TIMEOUT 1000 +/* timeout (ms) for transfer message */ +#define I2C_XFER_TIMEOUT 1000 /* SPRD i2c data structure */ struct sprd_i2c { @@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, bool is_last_msg) { struct sprd_i2c *i2c_dev = i2c_adap->algo_data; + unsigned long time_left; i2c_dev->msg = msg; i2c_dev->buf = msg->buf; @@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, sprd_i2c_opt_start(i2c_dev); - wait_for_completion(&i2c_dev->complete); + time_left = wait_for_completion_timeout(&i2c_dev->complete, + msecs_to_jiffies(I2C_XFER_TIMEOUT)); + if (!time_left) + return -ETIMEDOUT; return i2c_dev->err; } @@ -284,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct sprd_i2c *i2c_dev = i2c_adap->algo_data; int im, ret; - ret = pm_runtime_get_sync(i2c_dev->dev); + ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; @@ -571,7 +577,7 @@ static int sprd_i2c_remove(struct platform_device *pdev) struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev); int ret; - ret = pm_runtime_get_sync(i2c_dev->dev); + ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 54e1fc8a495e..f7f7b5b64720 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -434,6 +434,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) /** * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode * @i2c_dev: Controller's private data + * @max: Maximum amount of data to fill into the Tx FIFO * * This functions fills the Tx FIFO with fixed pattern when * in read mode to trigger clock. diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index b2634afe066d..a7977eef2ead 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -53,6 +53,8 @@ #define STM32F7_I2C_CR1_RXDMAEN BIT(15) #define STM32F7_I2C_CR1_TXDMAEN BIT(14) #define STM32F7_I2C_CR1_ANFOFF BIT(12) +#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8) +#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8) #define STM32F7_I2C_CR1_ERRIE BIT(7) #define STM32F7_I2C_CR1_TCIE BIT(6) #define STM32F7_I2C_CR1_STOPIE BIT(5) @@ -151,7 +153,7 @@ #define STM32F7_I2C_MAX_SLAVE 0x2 #define STM32F7_I2C_DNF_DEFAULT 0 -#define STM32F7_I2C_DNF_MAX 16 +#define STM32F7_I2C_DNF_MAX 15 #define STM32F7_I2C_ANALOG_FILTER_ENABLE 1 #define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */ @@ -657,6 +659,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) else stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_ANFOFF); + + /* Program the Digital Filter */ + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_DNF_MASK); + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf)); + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_PE); } diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c index ec7a7e917edd..c0c7d01473f2 100644 --- a/drivers/i2c/busses/i2c-tegra-bpmp.c +++ b/drivers/i2c/busses/i2c-tegra-bpmp.c @@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out) flags &= ~I2C_M_RECV_LEN; } - return (flags != 0) ? -EINVAL : 0; + return 0; } /** diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index dbc43cfec19d..db94e96aed77 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_device.h> @@ -230,7 +231,6 @@ struct tegra_i2c_hw_feature { * @base_phys: physical base address of the I2C controller * @cont_id: I2C controller ID, used for packet header * @irq: IRQ number of transfer complete interrupt - * @irq_disabled: used to track whether or not the interrupt is enabled * @is_dvc: identifies the DVC I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_err: error code for completed message @@ -240,7 +240,6 @@ struct tegra_i2c_hw_feature { * @bus_clk_rate: current I2C bus clock rate * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes * @is_multimaster_mode: track if I2C controller is in multi-master mode - * @xfer_lock: lock to serialize transfer submission and processing * @tx_dma_chan: DMA transmit channel * @rx_dma_chan: DMA receive channel * @dma_phys: handle to DMA resources @@ -260,7 +259,6 @@ struct tegra_i2c_dev { phys_addr_t base_phys; int cont_id; int irq; - bool irq_disabled; int is_dvc; struct completion msg_complete; int msg_err; @@ -270,8 +268,6 @@ struct tegra_i2c_dev { u32 bus_clk_rate; u16 clk_divisor_non_hs_mode; bool is_multimaster_mode; - /* xfer_lock: lock to serialize transfer submission and processing */ - spinlock_t xfer_lock; struct dma_chan *tx_dma_chan; struct dma_chan *rx_dma_chan; dma_addr_t dma_phys; @@ -790,11 +786,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) if (err) return err; - if (i2c_dev->irq_disabled) { - i2c_dev->irq_disabled = false; - enable_irq(i2c_dev->irq); - } - return 0; } @@ -825,18 +816,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) status = i2c_readl(i2c_dev, I2C_INT_STATUS); - spin_lock(&i2c_dev->xfer_lock); if (status == 0) { dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), i2c_readl(i2c_dev, I2C_STATUS), i2c_readl(i2c_dev, I2C_CNFG)); i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; - - if (!i2c_dev->irq_disabled) { - disable_irq_nosync(i2c_dev->irq); - i2c_dev->irq_disabled = true; - } goto err; } @@ -925,7 +910,6 @@ err: complete(&i2c_dev->msg_complete); done: - spin_unlock(&i2c_dev->xfer_lock); return IRQ_HANDLED; } @@ -999,6 +983,30 @@ out: i2c_writel(i2c_dev, val, reg); } +static unsigned long +tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev, + struct completion *complete, + unsigned int timeout_ms) +{ + unsigned long ret; + + enable_irq(i2c_dev->irq); + ret = wait_for_completion_timeout(complete, + msecs_to_jiffies(timeout_ms)); + disable_irq(i2c_dev->irq); + + /* + * There is a chance that completion may happen after IRQ + * synchronization, which is done by disable_irq(). + */ + if (ret == 0 && completion_done(complete)) { + dev_warn(i2c_dev->dev, "completion done after timeout\n"); + ret = 1; + } + + return ret; +} + static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); @@ -1020,8 +1028,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG); tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE); - time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(50)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, 50); if (time_left == 0) { dev_err(i2c_dev->dev, "timed out for bus clear\n"); return -ETIMEDOUT; @@ -1044,7 +1052,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, u32 packet_header; u32 int_mask; unsigned long time_left; - unsigned long flags; size_t xfer_size; u32 *buffer = NULL; int err = 0; @@ -1075,7 +1082,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, */ xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC, i2c_dev->bus_clk_rate); - spin_lock_irqsave(&i2c_dev->xfer_lock, flags); int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; tegra_i2c_unmask_irq(i2c_dev, int_mask); @@ -1090,7 +1096,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting RX DMA failed, err %d\n", err); - goto unlock; + return err; } } else { @@ -1149,7 +1155,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting TX DMA failed, err %d\n", err); - goto unlock; + return err; } } else { tegra_i2c_fill_tx_fifo(i2c_dev); @@ -1169,15 +1175,10 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK)); -unlock: - spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags); - if (dma) { - if (err) - return err; + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->dma_complete, xfer_time); - time_left = wait_for_completion_timeout(&i2c_dev->dma_complete, - msecs_to_jiffies(xfer_time)); if (time_left == 0) { dev_err(i2c_dev->dev, "DMA transfer timeout\n"); dmaengine_terminate_sync(i2c_dev->msg_read ? @@ -1202,13 +1203,13 @@ unlock: i2c_dev->tx_dma_chan); } - time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(xfer_time)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, xfer_time); + tegra_i2c_mask_irq(i2c_dev, int_mask); if (time_left == 0) { dev_err(i2c_dev->dev, "i2c transfer timed out\n"); - tegra_i2c_init(i2c_dev, true); return -ETIMEDOUT; } @@ -1568,7 +1569,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) I2C_PACKET_HEADER_SIZE; init_completion(&i2c_dev->msg_complete); init_completion(&i2c_dev->dma_complete); - spin_lock_init(&i2c_dev->xfer_lock); if (!i2c_dev->hw->has_single_clk_source) { fast_clk = devm_clk_get(&pdev->dev, "fast-clk"); @@ -1644,6 +1644,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) goto release_dma; } + irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); if (ret) { @@ -1719,15 +1721,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); - int err; + int err = 0; i2c_mark_adapter_suspended(&i2c_dev->adapter); - err = pm_runtime_force_suspend(dev); - if (err < 0) - return err; + if (!pm_runtime_status_suspended(dev)) + err = tegra_i2c_runtime_suspend(dev); - return 0; + return err; } static int __maybe_unused tegra_i2c_resume(struct device *dev) @@ -1735,6 +1736,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); int err; + /* + * We need to ensure that clocks are enabled so that registers can be + * restored in tegra_i2c_init(). + */ err = tegra_i2c_runtime_resume(dev); if (err) return err; @@ -1743,13 +1748,16 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; - err = tegra_i2c_runtime_suspend(dev); - if (err) - return err; - - err = pm_runtime_force_resume(dev); - if (err < 0) - return err; + /* + * In case we are runtime suspended, disable clocks again so that we + * don't unbalance the clock reference counts during the next runtime + * resume transition. + */ + if (pm_runtime_status_suspended(dev)) { + err = tegra_i2c_runtime_suspend(dev); + if (err) + return err; + } i2c_mark_adapter_resumed(&i2c_dev->adapter); diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index ce70b5288472..c70983780ae7 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level, void i2c_acpi_register_devices(struct i2c_adapter *adap) { acpi_status status; + acpi_handle handle; if (!has_acpi_companion(&adap->dev)) return; @@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap) adap, NULL); if (ACPI_FAILURE(status)) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); + + if (!adap->dev.parent) + return; + + handle = ACPI_HANDLE(adap->dev.parent); + if (!handle) + return; + + acpi_walk_dep_device_list(handle); } const struct acpi_device_id * @@ -737,7 +747,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) return -ENOMEM; } - acpi_walk_dep_device_list(handle); return 0; } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 810a942eaa8e..840f59650c7c 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -254,13 +254,14 @@ EXPORT_SYMBOL_GPL(i2c_recover_bus); static void i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; - char *err_str; + char *err_str, *err_level = KERN_ERR; if (!bri) return; if (!bri->recover_bus) { - err_str = "no recover_bus() found"; + err_str = "no suitable method provided"; + err_level = KERN_DEBUG; goto err; } @@ -290,7 +291,7 @@ static void i2c_init_recovery(struct i2c_adapter *adap) return; err: - dev_err(&adap->dev, "Not using recovery: %s\n", err_str); + dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str); adap->bus_recovery_info = NULL; } @@ -338,8 +339,10 @@ static int i2c_device_probe(struct device *dev) } else if (ACPI_COMPANION(dev)) { irq = i2c_acpi_get_irq(client); } - if (irq == -EPROBE_DEFER) - return irq; + if (irq == -EPROBE_DEFER) { + status = irq; + goto put_sync_adapter; + } if (irq < 0) irq = 0; @@ -352,16 +355,20 @@ static int i2c_device_probe(struct device *dev) * or ACPI ID table is supplied for the probing device. */ if (!driver->id_table && - !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && - !i2c_of_match_device(dev->driver->of_match_table, client)) - return -ENODEV; + !acpi_driver_match_device(dev, dev->driver) && + !i2c_of_match_device(dev->driver->of_match_table, client)) { + status = -ENODEV; + goto put_sync_adapter; + } if (client->flags & I2C_CLIENT_WAKE) { int wakeirq; wakeirq = of_irq_get_byname(dev->of_node, "wakeup"); - if (wakeirq == -EPROBE_DEFER) - return wakeirq; + if (wakeirq == -EPROBE_DEFER) { + status = wakeirq; + goto put_sync_adapter; + } device_init_wakeup(&client->dev, true); @@ -408,6 +415,10 @@ err_detach_pm_domain: err_clear_wakeup_irq: dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); +put_sync_adapter: + if (client->flags & I2C_CLIENT_HOST_NOTIFY) + pm_runtime_put_sync(&client->adapter->dev); + return status; } @@ -1375,8 +1386,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) /* create pre-declared device nodes */ of_i2c_register_devices(adap); - i2c_acpi_register_devices(adap); i2c_acpi_install_space_handler(adap); + i2c_acpi_register_devices(adap); if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 5427f047faf0..1589179d5eb9 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); + if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; - } if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", @@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client) { int ret; + if (IS_ERR_OR_NULL(client)) + return -EINVAL; + if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 3ac426a8ab5a..c2ae8c8cd429 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: + if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, + "Invalid block size returned: %d\n", + msg[1].buf[0]); + status = -EPROTO; + goto cleanup; + } for (i = 0; i < msg[1].buf[0] + 1; i++) data->block[i] = msg[1].buf[i]; break; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 2ea4585d18c5..94beacc41302 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -40,7 +40,7 @@ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; - struct device *dev; + struct device dev; struct cdev cdev; }; @@ -84,12 +84,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) return i2c_dev; } -static void put_i2c_dev(struct i2c_dev *i2c_dev) +static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); spin_unlock(&i2c_dev_list_lock); - kfree(i2c_dev); + if (del_cdev) + cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); + put_device(&i2c_dev->dev); } static ssize_t name_show(struct device *dev, @@ -628,6 +630,14 @@ static const struct file_operations i2cdev_fops = { static struct class *i2c_dev_class; +static void i2cdev_dev_release(struct device *dev) +{ + struct i2c_dev *i2c_dev; + + i2c_dev = container_of(dev, struct i2c_dev, dev); + kfree(i2c_dev); +} + static int i2cdev_attach_adapter(struct device *dev, void *dummy) { struct i2c_adapter *adap; @@ -644,27 +654,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) cdev_init(&i2c_dev->cdev, &i2cdev_fops); i2c_dev->cdev.owner = THIS_MODULE; - res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1); - if (res) - goto error_cdev; - /* register this i2c device with the driver core */ - i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, - MKDEV(I2C_MAJOR, adap->nr), NULL, - "i2c-%d", adap->nr); - if (IS_ERR(i2c_dev->dev)) { - res = PTR_ERR(i2c_dev->dev); - goto error; + device_initialize(&i2c_dev->dev); + i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); + i2c_dev->dev.class = i2c_dev_class; + i2c_dev->dev.parent = &adap->dev; + i2c_dev->dev.release = i2cdev_dev_release; + dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); + + res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); + if (res) { + put_i2c_dev(i2c_dev, false); + return res; } pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; -error: - cdev_del(&i2c_dev->cdev); -error_cdev: - put_i2c_dev(i2c_dev); - return res; } static int i2cdev_detach_adapter(struct device *dev, void *dummy) @@ -680,9 +686,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) if (!i2c_dev) /* attach_adapter must have failed */ return 0; - cdev_del(&i2c_dev->cdev); - put_i2c_dev(i2c_dev); - device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); + put_i2c_dev(i2c_dev, true); pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); return 0; diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 0e16490eb3a1..5365199a31f4 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -272,6 +272,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err_rollback_available: device_remove_file(&pdev->dev, &dev_attr_available_masters); err_rollback: + i2c_demux_deactivate_master(priv); for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); of_changeset_destroy(&priv->chan[j].chgset); diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 5c051dba32a5..6cc71c90f85e 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1760,6 +1760,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master) i3c_master_detach_free_devs(master); } +static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev) +{ + struct i3c_master_controller *master = i3cdev->common.master; + struct i3c_dev_boardinfo *i3cboardinfo; + + list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) { + if (i3cdev->info.pid != i3cboardinfo->pid) + continue; + + i3cdev->boardinfo = i3cboardinfo; + i3cdev->info.static_addr = i3cboardinfo->static_addr; + return; + } +} + static struct i3c_dev_desc * i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev) { @@ -1815,10 +1830,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, if (ret) goto err_detach_dev; + i3c_master_attach_boardinfo(newdev); + olddev = i3c_master_search_i3c_dev_duplicate(newdev); if (olddev) { - newdev->boardinfo = olddev->boardinfo; - newdev->info.static_addr = olddev->info.static_addr; newdev->dev = olddev->dev; if (newdev->dev) newdev->dev->desc = newdev; diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 10db0bf0655a..6d5719cea9f5 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -1593,8 +1593,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, sizeof(*master->ibi.slots), GFP_KERNEL); - if (!master->ibi.slots) + if (!master->ibi.slots) { + ret = -ENOMEM; goto err_disable_sysclk; + } writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL); writel(MST_INT_IBIR_THR, master->regs + MST_IER); diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 80bc3bf82f4d..775fd34132ab 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) sense_rq->rq_disk = rq->rq_disk; sense_rq->cmd_flags = REQ_OP_DRV_IN; ide_req(sense_rq)->type = ATA_PRIV_SENSE; - sense_rq->rq_flags |= RQF_PREEMPT; req->cmd[0] = GPCMD_REQUEST_SENSE; req->cmd[4] = cmd_len; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index b137f27a34d5..b32a013d827a 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -512,11 +512,6 @@ repeat: * above to return us whatever is in the queue. Since we call * ide_do_request() ourselves, we end up taking requests while * the queue is blocked... - * - * We let requests forced at head of queue with ide-preempt - * though. I hope that doesn't happen too much, hopefully not - * unless the subdriver triggers such a thing in its own PM - * state machine. */ if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && ata_pm_request(rq) == 0 && diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 347b08b56042..4b0ad1864d76 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -41,6 +41,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/tick.h> @@ -79,6 +80,7 @@ struct idle_cpu { unsigned long auto_demotion_disable_flags; bool byt_auto_demotion_disable_flag; bool disable_promotion_to_c1e; + bool use_acpi; }; static const struct idle_cpu *icpu; @@ -89,6 +91,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static struct cpuidle_state *cpuidle_state_table; +/* + * Enable this state by default even if the ACPI _CST does not list it. + */ +#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) + /* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. @@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 80, .enter = &intel_idle, @@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 250, .enter = &intel_idle, @@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 500, .enter = &intel_idle, @@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -675,6 +682,35 @@ static struct cpuidle_state skx_cstates[] = { .enter = NULL } }; +static struct cpuidle_state icx_cstates[] = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 128, + .target_residency = 384, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] = { { .name = "C1E", @@ -808,7 +844,7 @@ static struct cpuidle_state bxt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -869,7 +905,7 @@ static struct cpuidle_state dnv_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -944,6 +980,22 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, mwait_idle_with_hints(eax, ecx); } +static bool intel_idle_verify_cstate(unsigned int mwait_hint) +{ + unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; + unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & + MWAIT_SUBSTATE_MASK; + + /* Ignore the C-state if there are NO sub-states in CPUID for it. */ + if (num_substates == 0) + return false; + + if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle states deeper than C2"); + + return true; +} + static void __setup_broadcast_timer(bool on) { if (on) @@ -975,6 +1027,13 @@ static const struct idle_cpu idle_cpu_nehalem = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_nhx = { + .state_table = nehalem_cstates, + .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_atom = { .state_table = atom_cstates, }; @@ -993,6 +1052,12 @@ static const struct idle_cpu idle_cpu_snb = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_snx = { + .state_table = snb_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_byt = { .state_table = byt_cstates, .disable_promotion_to_c1e = true, @@ -1013,6 +1078,7 @@ static const struct idle_cpu idle_cpu_ivb = { static const struct idle_cpu idle_cpu_ivt = { .state_table = ivt_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_hsw = { @@ -1020,11 +1086,23 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_hsx = { + .state_table = hsw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_bdw = { .state_table = bdw_cstates, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_bdx = { + .state_table = bdw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_skl = { .state_table = skl_cstates, .disable_promotion_to_c1e = true, @@ -1033,15 +1111,24 @@ static const struct idle_cpu idle_cpu_skl = { static const struct idle_cpu idle_cpu_skx = { .state_table = skx_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + +static const struct idle_cpu idle_cpu_icx = { + .state_table = icx_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_avn = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_knl = { .state_table = knl_cstates, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_bxt = { @@ -1052,20 +1139,21 @@ static const struct idle_cpu idle_cpu_bxt = { static const struct idle_cpu idle_cpu_dnv = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct x86_cpu_id intel_idle_ids[] __initconst = { - INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem), + INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx), INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem), INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem), INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem), - INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem), - INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx), + INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx), INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft), - INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx), INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb), - INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb), + INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx), INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt), INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier), @@ -1073,19 +1161,20 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb), INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt), INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), + INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx), INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw), INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn), INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw), + INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx), + INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx), INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx), + INTEL_CPU_FAM6(ICELAKE_X, idle_cpu_icx), INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl), INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl), INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt), @@ -1095,6 +1184,162 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { {} }; +#define INTEL_CPU_FAM6_MWAIT \ + { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 } + +static const struct x86_cpu_id intel_mwait_ids[] __initconst = { + INTEL_CPU_FAM6_MWAIT, + {} +}; + +static bool intel_idle_max_cstate_reached(int cstate) +{ + if (cstate + 1 > max_cstate) { + pr_info("max_cstate %d reached\n", max_cstate); + return true; + } + return false; +} + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +#include <acpi/processor.h> + +static bool no_acpi __read_mostly; +module_param(no_acpi, bool, 0444); +MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); + +static struct acpi_processor_power acpi_state_table; + +/** + * intel_idle_cst_usable - Check if the _CST information can be used. + * + * Check if all of the C-states listed by _CST in the max_cstate range are + * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT. + */ +static bool intel_idle_cst_usable(void) +{ + int cstate, limit; + + limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), + acpi_state_table.count); + + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; + + if (cx->entry_method != ACPI_CSTATE_FFH) + return false; + } + + return true; +} + +static bool intel_idle_acpi_cst_extract(void) +{ + unsigned int cpu; + + if (no_acpi) { + pr_debug("Not allowed to use ACPI _CST\n"); + return false; + } + + for_each_possible_cpu(cpu) { + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (!pr) + continue; + + if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) + continue; + + acpi_state_table.count++; + + if (!intel_idle_cst_usable()) + continue; + + if (!acpi_processor_claim_cst_control()) { + acpi_state_table.count = 0; + return false; + } + + return true; + } + + pr_debug("ACPI _CST not found or not usable\n"); + return false; +} + +static void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) +{ + int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx; + struct cpuidle_state *state; + + if (intel_idle_max_cstate_reached(cstate)) + break; + + cx = &acpi_state_table.states[cstate]; + + state = &drv->states[drv->state_count++]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); + strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + state->exit_latency = cx->latency; + /* + * For C1-type C-states use the same number for both the exit + * latency and target residency, because that is the case for + * C1 in the majority of the static C-states tables above. + * For the other types of C-states, however, set the target + * residency to 3 times the exit latency which should lead to + * a reasonable balance between energy-efficiency and + * performance in the majority of interesting cases. + */ + state->target_residency = cx->latency; + if (cx->type > ACPI_STATE_C1) + state->target_residency *= 3; + + state->flags = MWAIT2flg(cx->address); + if (cx->type > ACPI_STATE_C2) + state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + + state->enter = intel_idle; + state->enter_s2idle = intel_idle_s2idle; + } +} + +static bool intel_idle_off_by_default(u32 mwait_hint) +{ + int cstate, limit; + + /* + * If there are no _CST C-states, do not disable any C-states by + * default. + */ + if (!acpi_state_table.count) + return false; + + limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + if (acpi_state_table.states[cstate].address == mwait_hint) + return false; + } + return true; +} +#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static inline bool intel_idle_acpi_cst_extract(void) { return false; } +static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } +static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ + /* * intel_idle_probe() */ @@ -1109,17 +1354,15 @@ static int __init intel_idle_probe(void) } id = x86_match_cpu(intel_idle_ids); - if (!id) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6) - pr_debug("does not run on family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return -ENODEV; - } - - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { - pr_debug("Please enable MWAIT in BIOS SETUP\n"); - return -ENODEV; + if (id) { + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_debug("Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } + } else { + id = x86_match_cpu(intel_mwait_ids); + if (!id) + return -ENODEV; } if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) @@ -1135,7 +1378,13 @@ static int __init intel_idle_probe(void) pr_debug("MWAIT substates: 0x%x\n", mwait_substates); icpu = (const struct idle_cpu *)id->driver_data; - cpuidle_state_table = icpu->state_table; + if (icpu) { + cpuidle_state_table = icpu->state_table; + if (icpu->use_acpi) + intel_idle_acpi_cst_extract(); + } else if (!intel_idle_acpi_cst_extract()) { + return -ENODEV; + } pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); @@ -1317,60 +1566,39 @@ static void intel_idle_state_table_update(void) } } -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states - */ -static void __init intel_idle_cpuidle_driver_init(void) +static void intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) { int cstate; - struct cpuidle_driver *drv = &intel_idle_driver; - - intel_idle_state_table_update(); - - cpuidle_poll_state_init(drv); - drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { - int num_substates, mwait_hint, mwait_cstate; + unsigned int mwait_hint; - if ((cpuidle_state_table[cstate].enter == NULL) && - (cpuidle_state_table[cstate].enter_s2idle == NULL)) + if (intel_idle_max_cstate_reached(cstate)) break; - if (cstate + 1 > max_cstate) { - pr_info("max_cstate %d reached\n", max_cstate); + if (!cpuidle_state_table[cstate].enter && + !cpuidle_state_table[cstate].enter_s2idle) break; - } - mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); - mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); - - /* number of sub-states for this state in CPUID.MWAIT */ - num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) - & MWAIT_SUBSTATE_MASK; - - /* if NO sub-states for this state in CPUID, skip it */ - if (num_substates == 0) - continue; - - /* if state marked as disabled, skip it */ + /* If marked as unusable, skip this state. */ if (cpuidle_state_table[cstate].disabled != 0) { pr_debug("state %s is disabled\n", cpuidle_state_table[cstate].name); continue; } + mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); + if (!intel_idle_verify_cstate(mwait_hint)) + continue; - if (((mwait_cstate + 1) > 2) && - !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - mark_tsc_unstable("TSC halts in idle" - " states deeper than C2"); + /* Structure copy. */ + drv->states[drv->state_count] = cpuidle_state_table[cstate]; - drv->states[drv->state_count] = /* structure copy */ - cpuidle_state_table[cstate]; + if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) && + !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)) + drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF; - drv->state_count += 1; + drv->state_count++; } if (icpu->byt_auto_demotion_disable_flag) { @@ -1379,6 +1607,24 @@ static void __init intel_idle_cpuidle_driver_init(void) } } +/* + * intel_idle_cpuidle_driver_init() + * allocate, initialize cpuidle_states + */ +static void __init intel_idle_cpuidle_driver_init(void) +{ + struct cpuidle_driver *drv = &intel_idle_driver; + + intel_idle_state_table_update(); + + cpuidle_poll_state_init(drv); + drv->state_count = 1; + + if (icpu) + intel_idle_init_cstates_icpu(drv); + else + intel_idle_init_cstates_acpi(drv); +} /* * intel_idle_cpu_init() @@ -1397,6 +1643,9 @@ static int intel_idle_cpu_init(unsigned int cpu) return -EIO; } + if (!icpu) + return 0; + if (icpu->auto_demotion_disable_flags) auto_demotion_disable(); diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index c4810c73b2a2..2de45587569a 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = { ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12), ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X, BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), - ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y, + ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y, BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), IIO_CHAN_SOFT_TIMESTAMP(7) }; diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 121b4e89f038..bcdf25f32e22 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -189,6 +189,14 @@ struct bmc150_accel_data { struct mutex mutex; u8 fifo_mode, watermark; s16 buffer[8]; + /* + * Ensure there is sufficient space and correct alignment for + * the timestamp if enabled + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; u8 bw_bits; u32 slope_dur; u32 slope_thres; @@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, * now. */ for (i = 0; i < count; i++) { - u16 sample[8]; int j, bit; j = 0; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) - memcpy(&sample[j++], &buffer[i * 3 + bit], 2); + memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], + sizeof(data->scan.channels[0])); - iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + tstamp); tstamp += sample_period; } diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index fee535d6e45b..da9452e81105 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -126,6 +126,12 @@ enum kx_chipset { KX_MAX_CHIPS /* this must be last */ }; +enum kx_acpi_type { + ACPI_GENERIC, + ACPI_SMO8500, + ACPI_KIOX010A, +}; + struct kxcjk1013_data { struct i2c_client *client; struct iio_trigger *dready_trig; @@ -142,7 +148,7 @@ struct kxcjk1013_data { bool motion_trigger_on; int64_t timestamp; enum kx_chipset chipset; - bool is_smo8500_device; + enum kx_acpi_type acpi_type; }; enum kxcjk1013_axis { @@ -269,6 +275,32 @@ static const struct { {19163, 1, 0}, {38326, 0, 1} }; +#ifdef CONFIG_ACPI +enum kiox010a_fn_index { + KIOX010A_SET_LAPTOP_MODE = 1, + KIOX010A_SET_TABLET_MODE = 2, +}; + +static int kiox010a_dsm(struct device *dev, int fn_index) +{ + acpi_handle handle = ACPI_HANDLE(dev); + guid_t kiox010a_dsm_guid; + union acpi_object *obj; + + if (!handle) + return -ENODEV; + + guid_parse("1f339696-d475-4e26-8cad-2e9f8e6d7a91", &kiox010a_dsm_guid); + + obj = acpi_evaluate_dsm(handle, &kiox010a_dsm_guid, 1, fn_index, NULL); + if (!obj) + return -EIO; + + ACPI_FREE(obj); + return 0; +} +#endif + static int kxcjk1013_set_mode(struct kxcjk1013_data *data, enum kxcjk1013_mode mode) { @@ -346,6 +378,13 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data) { int ret; +#ifdef CONFIG_ACPI + if (data->acpi_type == ACPI_KIOX010A) { + /* Make sure the kbd and touchpad on 2-in-1s using 2 KXCJ91008-s work */ + kiox010a_dsm(&data->client->dev, KIOX010A_SET_LAPTOP_MODE); + } +#endif + ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_WHO_AM_I); if (ret < 0) { dev_err(&data->client->dev, "Error reading who_am_i\n"); @@ -1233,7 +1272,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private) static const char *kxcjk1013_match_acpi_device(struct device *dev, enum kx_chipset *chipset, - bool *is_smo8500_device) + enum kx_acpi_type *acpi_type) { const struct acpi_device_id *id; @@ -1242,7 +1281,9 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev, return NULL; if (strcmp(id->id, "SMO8500") == 0) - *is_smo8500_device = true; + *acpi_type = ACPI_SMO8500; + else if (strcmp(id->id, "KIOX010A") == 0) + *acpi_type = ACPI_KIOX010A; *chipset = (enum kx_chipset)id->driver_data; @@ -1278,7 +1319,7 @@ static int kxcjk1013_probe(struct i2c_client *client, } else if (ACPI_HANDLE(&client->dev)) { name = kxcjk1013_match_acpi_device(&client->dev, &data->chipset, - &data->is_smo8500_device); + &data->acpi_type); } else return -ENODEV; @@ -1296,7 +1337,7 @@ static int kxcjk1013_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &kxcjk1013_info; - if (client->irq > 0 && !data->is_smo8500_device) { + if (client->irq > 0 && data->acpi_type != ACPI_SMO8500) { ret = devm_request_threaded_irq(&client->dev, client->irq, kxcjk1013_data_rdy_trig_poll, kxcjk1013_event_handler, diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 0b876b2dc5bd..76429e2a6fb8 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) const struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct kxsd9_state *st = iio_priv(indio_dev); + /* + * Ensure correct positioning and alignment of timestamp. + * No need to zero initialize as all elements written. + */ + struct { + __be16 chan[4]; + s64 ts __aligned(8); + } hw_values; int ret; - /* 4 * 16bit values AND timestamp */ - __be16 hw_values[8]; ret = regmap_bulk_read(st->map, KXSD9_REG_X, - &hw_values, - 8); + hw_values.chan, + sizeof(hw_values.chan)); if (ret) { dev_err(st->dev, "error reading data\n"); @@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, - hw_values, + &hw_values, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c index 8b5a6aff9bf4..70ec3490bdb8 100644 --- a/drivers/iio/accel/mma7455_core.c +++ b/drivers/iio/accel/mma7455_core.c @@ -52,6 +52,14 @@ struct mma7455_data { struct regmap *regmap; + /* + * Used to reorganize data. Will ensure correct alignment of + * the timestamp if present + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static int mma7455_drdy(struct mma7455_data *mma7455) @@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma7455_data *mma7455 = iio_priv(indio_dev); - u8 buf[16]; /* 3 x 16-bit channels + padding + ts */ int ret; ret = mma7455_drdy(mma7455); if (ret) goto done; - ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf, - sizeof(__le16) * 3); + ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, + mma7455->scan.channels, + sizeof(mma7455->scan.channels)); if (ret) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 00e100fc845a..85d453b3f5ec 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -110,6 +110,12 @@ struct mma8452_data { int sleep_val; struct regulator *vdd_reg; struct regulator *vddio_reg; + + /* Ensure correct alignment of time stamp when present */ + struct { + __be16 channels[3]; + s64 ts __aligned(8); + } buffer; }; /** @@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma8452_data *data = iio_priv(indio_dev); - u8 buffer[16]; /* 3 16-bit channels + padding + ts */ int ret; - ret = mma8452_read(data, (__be16 *)buffer); + ret = mma8452_read(data, data->buffer.channels); if (ret < 0) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); done: @@ -1685,10 +1690,13 @@ static int mma8452_probe(struct i2c_client *client, ret = mma8452_set_freefall_mode(data, false); if (ret < 0) - goto buffer_cleanup; + goto unregister_device; return 0; +unregister_device: + iio_device_unregister(indio_dev); + buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 66d768d971e1..6e429072e44a 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -980,7 +980,7 @@ static int sca3000_read_data(struct sca3000_state *st, st->tx[0] = SCA3000_READ_REG(reg_address_high); ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); if (ret) { - dev_err(get_device(&st->us->dev), "problem reading register"); + dev_err(&st->us->dev, "problem reading register\n"); return ret; } diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index f0af3a42f53c..cb5788084299 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -784,6 +784,7 @@ config STM32_ADC_CORE depends on ARCH_STM32 || COMPILE_TEST depends on OF depends on REGULATOR + depends on HAS_IOMEM select IIO_BUFFER select MFD_STM32_TIMERS select IIO_STM32_TIMER_TRIGGER diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c index 217a5a5c3c6d..7e741294de7b 100644 --- a/drivers/iio/adc/ad7780.c +++ b/drivers/iio/adc/ad7780.c @@ -309,7 +309,7 @@ static int ad7780_probe(struct spi_device *spi) ret = ad7780_init_gpios(&spi->dev, st); if (ret) - goto error_cleanup_buffer_and_trigger; + return ret; st->reg = devm_regulator_get(&spi->dev, "avdd"); if (IS_ERR(st->reg)) diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index bbc41ecf0d2f..6ed6d1410201 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -541,7 +541,7 @@ static const struct iio_info ad7797_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, - .attrs = &ad7793_attribute_group, + .attrs = &ad7797_attribute_group, .validate_trigger = ad_sd_validate_trigger, }; diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c index 6b51bfcad0d0..325cd7d18b91 100644 --- a/drivers/iio/adc/ad7949.c +++ b/drivers/iio/adc/ad7949.c @@ -91,7 +91,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, int ret; int i; int bits_per_word = ad7949_adc->resolution; - int mask = GENMASK(ad7949_adc->resolution, 0); + int mask = GENMASK(ad7949_adc->resolution - 1, 0); struct spi_message msg; struct spi_transfer tx[] = { { diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index bdd7cba6f6b0..d3e9ec00ef95 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -146,6 +146,11 @@ struct ina2xx_chip_info { int range_vbus; /* Bus voltage maximum in V */ int pga_gain_vshunt; /* Shunt voltage PGA gain */ bool allow_async_readout; + /* data buffer needs space for channel data and timestamp */ + struct { + u16 chan[4]; + u64 ts __aligned(8); + } scan; }; static const struct ina2xx_config ina2xx_config[] = { @@ -738,8 +743,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev) static int ina2xx_work_buffer(struct iio_dev *indio_dev) { struct ina2xx_chip_info *chip = iio_priv(indio_dev); - /* data buffer needs space for channel data and timestap */ - unsigned short data[4 + sizeof(s64)/sizeof(short)]; int bit, ret, i = 0; s64 time; @@ -758,10 +761,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) if (ret < 0) return ret; - data[i++] = val; + chip->scan.chan[i++] = val; } - iio_push_to_buffers_with_timestamp(indio_dev, data, time); + iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time); return 0; }; diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c index 3b6f3b9a6c5b..a1b66f92e1bf 100644 --- a/drivers/iio/adc/max1118.c +++ b/drivers/iio/adc/max1118.c @@ -35,6 +35,11 @@ struct max1118 { struct spi_device *spi; struct mutex lock; struct regulator *reg; + /* Ensure natural alignment of buffer elements */ + struct { + u8 channels[2]; + s64 ts __aligned(8); + } scan; u8 data ____cacheline_aligned; }; @@ -159,7 +164,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max1118 *adc = iio_priv(indio_dev); - u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */ int scan_index; int i = 0; @@ -177,10 +181,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) goto out; } - data[i] = ret; + adc->scan.channels[i] = ret; i++; } - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); out: mutex_unlock(&adc->lock); diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index ea24d7c58b12..8ae4cf135157 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -95,16 +95,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig) { int ret; - mutex_lock(&adc->lock); - ret = i2c_master_send(adc->i2c, &newconfig, 1); if (ret > 0) { adc->config = newconfig; ret = 0; } - mutex_unlock(&adc->lock); - return ret; } @@ -137,6 +133,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc, u8 config; u8 req_channel = channel->channel; + mutex_lock(&adc->lock); + if (req_channel != MCP3422_CHANNEL(adc->config)) { config = adc->config; config &= ~MCP3422_CHANNEL_MASK; @@ -144,12 +142,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc, config &= ~MCP3422_PGA_MASK; config |= MCP3422_PGA_VALUE(adc->pga[req_channel]); ret = mcp3422_update_config(adc, config); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&adc->lock); return ret; + } msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); } - return mcp3422_read(adc, value, &config); + ret = mcp3422_read(adc, value, &config); + + mutex_unlock(&adc->lock); + + return ret; } static int mcp3422_read_raw(struct iio_dev *iio, diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c index 7bbb64ca3b32..2449d91e4766 100644 --- a/drivers/iio/adc/mt6577_auxadc.c +++ b/drivers/iio/adc/mt6577_auxadc.c @@ -9,9 +9,9 @@ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> +#include <linux/mod_devicetable.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/iopoll.h> #include <linux/io.h> #include <linux/iio/iio.h> @@ -279,6 +279,8 @@ static int mt6577_auxadc_probe(struct platform_device *pdev) goto err_disable_clk; } + adc_dev->dev_comp = device_get_match_data(&pdev->dev); + mutex_init(&adc_dev->lock); mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC, diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c index 21fdcde77883..56e7696aa3c0 100644 --- a/drivers/iio/adc/qcom-spmi-adc5.c +++ b/drivers/iio/adc/qcom-spmi-adc5.c @@ -786,7 +786,7 @@ static int adc5_probe(struct platform_device *pdev) static struct platform_driver adc5_driver = { .driver = { - .name = "qcom-spmi-adc5.c", + .name = "qcom-spmi-adc5", .of_match_table = adc5_match_table, }, .probe = adc5_probe, diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c index 203ad59da336..71c455a27097 100644 --- a/drivers/iio/adc/qcom-spmi-vadc.c +++ b/drivers/iio/adc/qcom-spmi-vadc.c @@ -598,7 +598,7 @@ static const struct vadc_channels vadc_chans[] = { VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1) VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0) - VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0) + VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT) VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0) VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0) VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0) diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c index c37f201294b2..b1fb1fd763fc 100644 --- a/drivers/iio/adc/rcar-gyroadc.c +++ b/drivers/iio/adc/rcar-gyroadc.c @@ -357,7 +357,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3); break; default: - return -EINVAL; + goto err_e_inval; } /* @@ -374,7 +374,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) dev_err(dev, "Failed to get child reg property of ADC \"%pOFn\".\n", child); - return ret; + goto err_of_node_put; } /* Channel number is too high. */ @@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) dev_err(dev, "Only %i channels supported with %pOFn, but reg = <%i>.\n", num_channels, child, reg); - return -EINVAL; + goto err_e_inval; } } @@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) dev_err(dev, "Channel %i uses different ADC mode than the rest.\n", reg); - return -EINVAL; + goto err_e_inval; } /* Channel is valid, grab the regulator. */ @@ -401,7 +401,8 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) if (IS_ERR(vref)) { dev_dbg(dev, "Channel %i 'vref' supply not connected.\n", reg); - return PTR_ERR(vref); + ret = PTR_ERR(vref); + goto err_of_node_put; } priv->vref[reg] = vref; @@ -425,8 +426,10 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) * attached to the GyroADC at a time, so if we found it, * we can stop parsing here. */ - if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) + if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) { + of_node_put(child); break; + } } if (first) { @@ -435,6 +438,12 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) } return 0; + +err_e_inval: + ret = -EINVAL; +err_of_node_put: + of_node_put(child); + return ret; } static void rcar_gyroadc_deinit_supplies(struct iio_dev *indio_dev) diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 582ba047c4a6..cddc3dfd2ab7 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -372,7 +372,7 @@ static int rockchip_saradc_resume(struct device *dev) ret = clk_prepare_enable(info->clk); if (ret) - return ret; + clk_disable_unprepare(info->pclk); return ret; } diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index 93a096a91f8c..14d6a537289c 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -65,12 +65,14 @@ struct stm32_adc_priv; * @clk_sel: clock selection routine * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet) * @has_syscfg: SYSCFG capability flags + * @num_irqs: number of interrupt lines */ struct stm32_adc_priv_cfg { const struct stm32_adc_common_regs *regs; int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *); u32 max_clk_rate_hz; unsigned int has_syscfg; + unsigned int num_irqs; }; /** @@ -372,21 +374,15 @@ static int stm32_adc_irq_probe(struct platform_device *pdev, struct device_node *np = pdev->dev.of_node; unsigned int i; - for (i = 0; i < STM32_ADC_MAX_ADCS; i++) { + /* + * Interrupt(s) must be provided, depending on the compatible: + * - stm32f4/h7 shares a common interrupt line. + * - stm32mp1, has one line per ADC + */ + for (i = 0; i < priv->cfg->num_irqs; i++) { priv->irq[i] = platform_get_irq(pdev, i); - if (priv->irq[i] < 0) { - /* - * At least one interrupt must be provided, make others - * optional: - * - stm32f4/h7 shares a common interrupt. - * - stm32mp1, has one line per ADC (either for ADC1, - * ADC2 or both). - */ - if (i && priv->irq[i] == -ENXIO) - continue; - + if (priv->irq[i] < 0) return priv->irq[i]; - } } priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0, @@ -397,9 +393,7 @@ static int stm32_adc_irq_probe(struct platform_device *pdev, return -ENOMEM; } - for (i = 0; i < STM32_ADC_MAX_ADCS; i++) { - if (priv->irq[i] < 0) - continue; + for (i = 0; i < priv->cfg->num_irqs; i++) { irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler); irq_set_handler_data(priv->irq[i], priv); } @@ -417,11 +411,8 @@ static void stm32_adc_irq_remove(struct platform_device *pdev, irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq)); irq_domain_remove(priv->domain); - for (i = 0; i < STM32_ADC_MAX_ADCS; i++) { - if (priv->irq[i] < 0) - continue; + for (i = 0; i < priv->cfg->num_irqs; i++) irq_set_chained_handler(priv->irq[i], NULL); - } } static int stm32_adc_core_switches_supply_en(struct stm32_adc_priv *priv, @@ -789,6 +780,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev) { return stm32_adc_core_hw_start(dev); } + +static int stm32_adc_core_runtime_idle(struct device *dev) +{ + pm_runtime_mark_last_busy(dev); + + return 0; +} #endif static const struct dev_pm_ops stm32_adc_core_pm_ops = { @@ -796,13 +794,14 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = { pm_runtime_force_resume) SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend, stm32_adc_core_runtime_resume, - NULL) + stm32_adc_core_runtime_idle) }; static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = { .regs = &stm32f4_adc_common_regs, .clk_sel = stm32f4_adc_clk_sel, .max_clk_rate_hz = 36000000, + .num_irqs = 1, }; static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { @@ -810,6 +809,7 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { .clk_sel = stm32h7_adc_clk_sel, .max_clk_rate_hz = 36000000, .has_syscfg = HAS_VBOOSTER, + .num_irqs = 1, }; static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { @@ -817,6 +817,7 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { .clk_sel = stm32h7_adc_clk_sel, .max_clk_rate_hz = 40000000, .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD, + .num_irqs = 2, }; static const struct of_device_id stm32_adc_of_match[] = { diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 73aee5949b6b..94fde39d9ff7 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1367,8 +1367,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc) static void stm32_adc_dma_buffer_done(void *data) { struct iio_dev *indio_dev = data; + struct stm32_adc *adc = iio_priv(indio_dev); + int residue = stm32_adc_dma_residue(adc); - iio_trigger_poll_chained(indio_dev->trig); + /* + * In DMA mode the trigger services of IIO are not used + * (e.g. no call to iio_trigger_poll). + * Calling irq handler associated to the hardware trigger is not + * relevant as the conversions have already been done. Data + * transfers are performed directly in DMA callback instead. + * This implementation avoids to call trigger irq handler that + * may sleep, in an atomic context (DMA irq handler context). + */ + dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi); + + while (residue >= indio_dev->scan_bytes) { + u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi]; + + iio_push_to_buffers(indio_dev, buffer); + + residue -= indio_dev->scan_bytes; + adc->bufi += indio_dev->scan_bytes; + if (adc->bufi >= adc->rx_buf_sz) + adc->bufi = 0; + } } static int stm32_adc_dma_start(struct iio_dev *indio_dev) @@ -1735,15 +1757,27 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev) return 0; } -static int stm32_adc_dma_request(struct iio_dev *indio_dev) +static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); struct dma_slave_config config; int ret; - adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx"); - if (!adc->dma_chan) + adc->dma_chan = dma_request_chan(dev, "rx"); + if (IS_ERR(adc->dma_chan)) { + ret = PTR_ERR(adc->dma_chan); + if (ret != -ENODEV) { + if (ret != -EPROBE_DEFER) + dev_err(dev, + "DMA channel request failed with %d\n", + ret); + return ret; + } + + /* DMA is optional: fall back to IRQ mode */ + adc->dma_chan = NULL; return 0; + } adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev, STM32_DMA_BUFFER_SIZE, @@ -1778,6 +1812,7 @@ static int stm32_adc_probe(struct platform_device *pdev) { struct iio_dev *indio_dev; struct device *dev = &pdev->dev; + irqreturn_t (*handler)(int irq, void *p) = NULL; struct stm32_adc *adc; int ret; @@ -1839,13 +1874,15 @@ static int stm32_adc_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = stm32_adc_dma_request(indio_dev); + ret = stm32_adc_dma_request(dev, indio_dev); if (ret < 0) return ret; + if (!adc->dma_chan) + handler = &stm32_adc_trigger_handler; + ret = iio_triggered_buffer_setup(indio_dev, - &iio_pollfunc_store_time, - &stm32_adc_trigger_handler, + &iio_pollfunc_store_time, handler, &stm32_adc_buffer_setup_ops); if (ret) { dev_err(&pdev->dev, "buffer setup failed\n"); diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 3ae0366a7b58..c2948defa785 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -62,7 +62,7 @@ enum sd_converter_type { struct stm32_dfsdm_dev_data { int type; - int (*init)(struct iio_dev *indio_dev); + int (*init)(struct device *dev, struct iio_dev *indio_dev); unsigned int num_channels; const struct regmap_config *regmap_cfg; }; @@ -1359,13 +1359,18 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev) } } -static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev) +static int stm32_dfsdm_dma_request(struct device *dev, + struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); - adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx"); - if (!adc->dma_chan) - return -EINVAL; + adc->dma_chan = dma_request_chan(dev, "rx"); + if (IS_ERR(adc->dma_chan)) { + int ret = PTR_ERR(adc->dma_chan); + + adc->dma_chan = NULL; + return ret; + } adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE, @@ -1415,7 +1420,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, &adc->dfsdm->ch_list[ch->channel]); } -static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) +static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev) { struct iio_chan_spec *ch; struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); @@ -1442,10 +1447,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) indio_dev->num_channels = 1; indio_dev->channels = ch; - return stm32_dfsdm_dma_request(indio_dev); + return stm32_dfsdm_dma_request(dev, indio_dev); } -static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) +static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev) { struct iio_chan_spec *ch; struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); @@ -1489,8 +1494,17 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) init_completion(&adc->completion); /* Optionally request DMA */ - if (stm32_dfsdm_dma_request(indio_dev)) { - dev_dbg(&indio_dev->dev, "No DMA support\n"); + ret = stm32_dfsdm_dma_request(dev, indio_dev); + if (ret) { + if (ret != -ENODEV) { + if (ret != -EPROBE_DEFER) + dev_err(dev, + "DMA channel request failed with %d\n", + ret); + return ret; + } + + dev_dbg(dev, "No DMA support\n"); return 0; } @@ -1603,7 +1617,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev) adc->dfsdm->fl_list[adc->fl_id].sync_mode = val; adc->dev_data = dev_data; - ret = dev_data->init(iio); + ret = dev_data->init(dev, iio); if (ret < 0) return ret; diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index 0235863ff77b..cc8cbffe2b7b 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -33,6 +33,12 @@ struct adc081c { /* 8, 10 or 12 */ int bits; + + /* Ensure natural alignment of buffer elements */ + struct { + u16 channel; + s64 ts __aligned(8); + } scan; }; #define REG_CONV_RES 0x00 @@ -128,14 +134,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc081c *data = iio_priv(indio_dev); - u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ int ret; ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES); if (ret < 0) goto out; - buf[0] = ret; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + data->scan.channel = ret; + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c index 6ea39f4bbb37..55abd2fd2ccc 100644 --- a/drivers/iio/adc/ti-adc0832.c +++ b/drivers/iio/adc/ti-adc0832.c @@ -28,6 +28,12 @@ struct adc0832 { struct regulator *reg; struct mutex lock; u8 mux_bits; + /* + * Max size needed: 16x 1 byte ADC data + 8 bytes timestamp + * May be shorter if not all channels are enabled subject + * to the timestamp remaining 8 byte aligned. + */ + u8 data[24] __aligned(8); u8 tx_buf[2] ____cacheline_aligned; u8 rx_buf[2]; @@ -199,7 +205,6 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc0832 *adc = iio_priv(indio_dev); - u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */ int scan_index; int i = 0; @@ -217,10 +222,10 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p) goto out; } - data[i] = ret; + adc->data[i] = ret; i++; } - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, adc->data, iio_get_time_ns(indio_dev)); out: mutex_unlock(&adc->lock); diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index bdedf456ee05..fc053216d282 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -25,6 +25,11 @@ struct adc084s021 { struct spi_transfer spi_trans; struct regulator *reg; struct mutex lock; + /* Buffer used to align data */ + struct { + __be16 channels[4]; + s64 ts __aligned(8); + } scan; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache line. @@ -140,14 +145,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc) struct iio_poll_func *pf = pollfunc; struct iio_dev *indio_dev = pf->indio_dev; struct adc084s021 *adc = iio_priv(indio_dev); - __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */ mutex_lock(&adc->lock); - if (adc084s021_adc_conversion(adc, &data) < 0) + if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0) dev_err(&adc->spi->dev, "Failed to read data\n"); - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); mutex_unlock(&adc->lock); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c index 68a9dcb8faa2..db476482cc75 100644 --- a/drivers/iio/adc/ti-adc12138.c +++ b/drivers/iio/adc/ti-adc12138.c @@ -47,6 +47,12 @@ struct adc12138 { struct completion complete; /* The number of cclk periods for the S/H's acquisition time */ unsigned int acquisition_time; + /* + * Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp. + * Less may be need if not all channels are enabled, as long as + * the 8 byte alignment of the timestamp is maintained. + */ + __be16 data[20] __aligned(8); u8 tx_buf[2] ____cacheline_aligned; u8 rx_buf[2]; @@ -329,7 +335,6 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc12138 *adc = iio_priv(indio_dev); - __be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */ __be16 trash; int ret; int scan_index; @@ -345,7 +350,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) reinit_completion(&adc->complete); ret = adc12138_start_and_read_conv(adc, scan_chan, - i ? &data[i - 1] : &trash); + i ? &adc->data[i - 1] : &trash); if (ret) { dev_warn(&adc->spi->dev, "failed to start conversion\n"); @@ -362,7 +367,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) } if (i) { - ret = adc12138_read_conv_data(adc, &data[i - 1]); + ret = adc12138_read_conv_data(adc, &adc->data[i - 1]); if (ret) { dev_warn(&adc->spi->dev, "failed to get conversion data\n"); @@ -370,7 +375,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) } } - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, adc->data, iio_get_time_ns(indio_dev)); out: mutex_unlock(&adc->lock); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index a550b132cfb7..871690a47661 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -309,6 +309,7 @@ static const struct iio_chan_spec ads1115_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; +#ifdef CONFIG_PM static int ads1015_set_power_state(struct ads1015_data *data, bool on) { int ret; @@ -326,6 +327,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) return ret < 0 ? ret : 0; } +#else /* !CONFIG_PM */ + +static int ads1015_set_power_state(struct ads1015_data *data, bool on) +{ + return 0; +} + +#endif /* !CONFIG_PM */ + static int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) { diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c index 552c2be8d87a..4b706949a67f 100644 --- a/drivers/iio/adc/ti-ads124s08.c +++ b/drivers/iio/adc/ti-ads124s08.c @@ -97,6 +97,14 @@ struct ads124s_private { struct gpio_desc *reset_gpio; struct spi_device *spi; struct mutex lock; + /* + * Used to correctly align data. + * Ensure timestamp is naturally aligned. + * Note that the full buffer length may not be needed if not + * all channels are enabled, as long as the alignment of the + * timestamp is maintained. + */ + u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u32)] __aligned(8); u8 data[5] ____cacheline_aligned; }; @@ -270,7 +278,6 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ads124s_private *priv = iio_priv(indio_dev); - u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; int scan_index, j = 0; int ret; @@ -285,7 +292,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p) if (ret) dev_err(&priv->spi->dev, "Start ADC conversions failed\n"); - buffer[j] = ads124s_read(indio_dev, scan_index); + priv->buffer[j] = ads124s_read(indio_dev, scan_index); ret = ads124s_write_cmd(indio_dev, ADS124S08_STOP_CONV); if (ret) dev_err(&priv->spi->dev, "Stop ADC conversions failed\n"); @@ -293,7 +300,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p) j++; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, priv->buffer, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c index 9a460807d46d..8a8792010c20 100644 --- a/drivers/iio/adc/ti-ads8344.c +++ b/drivers/iio/adc/ti-ads8344.c @@ -29,19 +29,20 @@ struct ads8344 { struct mutex lock; u8 tx_buf ____cacheline_aligned; - u16 rx_buf; + u8 rx_buf[3]; }; -#define ADS8344_VOLTAGE_CHANNEL(chan, si) \ +#define ADS8344_VOLTAGE_CHANNEL(chan, addr) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = chan, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .address = addr, \ } -#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si) \ +#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, addr) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ @@ -50,6 +51,7 @@ struct ads8344 { .differential = 1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .address = addr, \ } static const struct iio_chan_spec ads8344_channels[] = { @@ -89,11 +91,11 @@ static int ads8344_adc_conversion(struct ads8344 *adc, int channel, udelay(9); - ret = spi_read(spi, &adc->rx_buf, 2); + ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf)); if (ret) return ret; - return adc->rx_buf; + return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7; } static int ads8344_read_raw(struct iio_dev *iio, @@ -105,7 +107,7 @@ static int ads8344_read_raw(struct iio_dev *iio, switch (mask) { case IIO_CHAN_INFO_RAW: mutex_lock(&adc->lock); - *value = ads8344_adc_conversion(adc, channel->scan_index, + *value = ads8344_adc_conversion(adc, channel->address, channel->differential); mutex_unlock(&adc->lock); if (*value < 0) diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 4fd389678dba..3f0b88b13dd3 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -102,6 +102,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500; #define XADC_FLAGS_BUFFERED BIT(0) +/* + * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does + * not have a hardware FIFO. Which means an interrupt is generated for each + * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely + * overloaded by the interrupts that it soft-lockups. For this reason the driver + * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy, + * but still responsive. + */ +#define XADC_MAX_SAMPLERATE 150000 + static void xadc_write_reg(struct xadc *xadc, unsigned int reg, uint32_t val) { @@ -674,7 +684,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state) spin_lock_irqsave(&xadc->lock, flags); xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val); - xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS); + xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS); if (state) val |= XADC_AXI_INT_EOS; else @@ -722,13 +732,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode) { uint16_t val; + /* Powerdown the ADC-B when it is not needed. */ switch (seq_mode) { case XADC_CONF1_SEQ_SIMULTANEOUS: case XADC_CONF1_SEQ_INDEPENDENT: - val = XADC_CONF2_PD_ADC_B; + val = 0; break; default: - val = 0; + val = XADC_CONF2_PD_ADC_B; break; } @@ -797,6 +808,16 @@ static int xadc_preenable(struct iio_dev *indio_dev) if (ret) goto err; + /* + * In simultaneous mode the upper and lower aux channels are samples at + * the same time. In this mode the upper 8 bits in the sequencer + * register are don't care and the lower 8 bits control two channels + * each. As such we must set the bit if either the channel in the lower + * group or the upper group is enabled. + */ + if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS) + scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000; + ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16); if (ret) goto err; @@ -823,11 +844,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = { .postdisable = &xadc_postdisable, }; +static int xadc_read_samplerate(struct xadc *xadc) +{ + unsigned int div; + uint16_t val16; + int ret; + + ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); + if (ret) + return ret; + + div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; + if (div < 2) + div = 2; + + return xadc_get_dclk_rate(xadc) / div / 26; +} + static int xadc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct xadc *xadc = iio_priv(indio_dev); - unsigned int div; uint16_t val16; int ret; @@ -880,41 +917,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev, *val = -((273150 << 12) / 503975); return IIO_VAL_INT; case IIO_CHAN_INFO_SAMP_FREQ: - ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); - if (ret) + ret = xadc_read_samplerate(xadc); + if (ret < 0) return ret; - div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; - if (div < 2) - div = 2; - - *val = xadc_get_dclk_rate(xadc) / div / 26; - + *val = ret; return IIO_VAL_INT; default: return -EINVAL; } } -static int xadc_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, int val, int val2, long info) +static int xadc_write_samplerate(struct xadc *xadc, int val) { - struct xadc *xadc = iio_priv(indio_dev); unsigned long clk_rate = xadc_get_dclk_rate(xadc); unsigned int div; if (!clk_rate) return -EINVAL; - if (info != IIO_CHAN_INFO_SAMP_FREQ) - return -EINVAL; - if (val <= 0) return -EINVAL; /* Max. 150 kSPS */ - if (val > 150000) - val = 150000; + if (val > XADC_MAX_SAMPLERATE) + val = XADC_MAX_SAMPLERATE; val *= 26; @@ -927,7 +954,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev, * limit. */ div = clk_rate / val; - if (clk_rate / div / 26 > 150000) + if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE) div++; if (div < 2) div = 2; @@ -938,6 +965,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev, div << XADC_CONF2_DIV_OFFSET); } +static int xadc_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, int val2, long info) +{ + struct xadc *xadc = iio_priv(indio_dev); + + if (info != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + + return xadc_write_samplerate(xadc, val); +} + static const struct iio_event_spec xadc_temp_events[] = { { .type = IIO_EV_TYPE_THRESH, @@ -1225,6 +1263,21 @@ static int xadc_probe(struct platform_device *pdev) if (ret) goto err_free_samplerate_trigger; + /* + * Make sure not to exceed the maximum samplerate since otherwise the + * resulting interrupt storm will soft-lock the system. + */ + if (xadc->ops->flags & XADC_FLAGS_BUFFERED) { + ret = xadc_read_samplerate(xadc); + if (ret < 0) + goto err_free_samplerate_trigger; + if (ret > XADC_MAX_SAMPLERATE) { + ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE); + if (ret < 0) + goto err_free_samplerate_trigger; + } + } + ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0, dev_name(&pdev->dev), indio_dev); if (ret) diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 2ebdfc35bcda..7bf4e9a16a6a 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -75,6 +75,11 @@ struct ccs811_data { struct ccs811_reading buffer; struct iio_trigger *drdy_trig; bool drdy_trig_on; + /* Ensures correct alignment of timestamp if present */ + struct { + s16 channels[2]; + s64 ts __aligned(8); + } scan; }; static const struct iio_chan_spec ccs811_channels[] = { @@ -306,17 +311,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ccs811_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; - s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */ int ret; - ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4, - (u8 *)&buf); + ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, + sizeof(data->scan.channels), + (u8 *)data->scan.channels); if (ret != 4) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c index 23c9ab252470..07bb90d72434 100644 --- a/drivers/iio/chemical/pms7003.c +++ b/drivers/iio/chemical/pms7003.c @@ -73,6 +73,11 @@ struct pms7003_state { struct pms7003_frame frame; struct completion frame_ready; struct mutex lock; /* must be held whenever state gets touched */ + /* Used to construct scan to push to the IIO buffer */ + struct { + u16 data[3]; /* PM1, PM2P5, PM10 */ + s64 ts; + } scan; }; static int pms7003_do_cmd(struct pms7003_state *state, enum pms7003_cmd cmd) @@ -104,7 +109,6 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct pms7003_state *state = iio_priv(indio_dev); struct pms7003_frame *frame = &state->frame; - u16 data[3 + 1 + 4]; /* PM1, PM2P5, PM10, padding, timestamp */ int ret; mutex_lock(&state->lock); @@ -114,12 +118,15 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p) goto err; } - data[PM1] = pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET); - data[PM2P5] = pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET); - data[PM10] = pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET); + state->scan.data[PM1] = + pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET); + state->scan.data[PM2P5] = + pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET); + state->scan.data[PM10] = + pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET); mutex_unlock(&state->lock); - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &state->scan, iio_get_time_ns(indio_dev)); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c index edbb956e81e8..c0845d892faa 100644 --- a/drivers/iio/chemical/sps30.c +++ b/drivers/iio/chemical/sps30.c @@ -230,15 +230,18 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct sps30_state *state = iio_priv(indio_dev); int ret; - s32 data[4 + 2]; /* PM1, PM2P5, PM4, PM10, timestamp */ + struct { + s32 data[4]; /* PM1, PM2P5, PM4, PM10 */ + s64 ts; + } scan; mutex_lock(&state->lock); - ret = sps30_do_meas(state, data, 4); + ret = sps30_do_meas(state, scan.data, ARRAY_SIZE(scan.data)); mutex_unlock(&state->lock); if (ret) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index d2609e6feda4..b4f394f05863 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -57,10 +57,13 @@ static void get_default_min_max_freq(enum motionsensor_type type, { switch (type) { case MOTIONSENSE_TYPE_ACCEL: - case MOTIONSENSE_TYPE_GYRO: *min_freq = 12500; *max_freq = 100000; break; + case MOTIONSENSE_TYPE_GYRO: + *min_freq = 25000; + *max_freq = 100000; + break; case MOTIONSENSE_TYPE_MAG: *min_freq = 5000; *max_freq = 25000; diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 4a3064fb6cd9..364683783ae5 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -80,7 +80,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); - if (!sdata->sensor_settings->odr.addr) + if (!sdata->sensor_settings->odr.mask) return 0; err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index c64e6898ff20..d1ffc8cb08ed 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -188,9 +188,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev, return ret; if (pwr_down) - st->pwr_down_mask |= (1 << chan->channel); - else st->pwr_down_mask &= ~(1 << chan->channel); + else + st->pwr_down_mask |= (1 << chan->channel); ret = ad5504_spi_write(st, AD5504_ADDR_CTRL, AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) | diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 2d897e64c6a9..424922cad1e3 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -416,7 +416,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, s64 tmp = *val * (3767897513LL / 25LL); *val = div_s64_rem(tmp, 1000000000LL, val2); - ret = IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_MICRO; } else { int mult; @@ -447,7 +447,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, ret = IIO_VAL_INT; break; default: - ret = -EINVAL; + return -EINVAL; } unlock: diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c index 0ec4d2609ef9..364925d703db 100644 --- a/drivers/iio/dac/vf610_dac.c +++ b/drivers/iio/dac/vf610_dac.c @@ -225,6 +225,7 @@ static int vf610_dac_probe(struct platform_device *pdev) return 0; error_iio_device_register: + vf610_dac_exit(info); clk_disable_unprepare(info->clk); return ret; diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c index d3fbe9d86467..1c3c1bd53374 100644 --- a/drivers/iio/gyro/itg3200_buffer.c +++ b/drivers/iio/gyro/itg3200_buffer.c @@ -46,13 +46,20 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct itg3200 *st = iio_priv(indio_dev); - __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)]; + /* + * Ensure correct alignment and padding including for the + * timestamp that may be inserted. + */ + struct { + __be16 buf[ITG3200_SCAN_ELEMENTS]; + s64 ts __aligned(8); + } scan; - int ret = itg3200_read_all_channels(st->i2c, buf); + int ret = itg3200_read_all_channels(st->i2c, scan.buf); if (ret < 0) goto error_ret; - iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 80154bca18b6..7046bca1d7eb 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -550,6 +550,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p) MPU3050_FIFO_R, &fifo_values[offset], toread); + if (ret) + goto out_trigger_unlock; dev_dbg(mpu3050->dev, "%04x %04x %04x %04x %04x\n", diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index dc22dc363a99..29104656a537 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -63,6 +63,7 @@ static const struct reg_field afe4403_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct data layout to push into IIO buffer. */ struct afe4403_data { struct device *dev; @@ -72,6 +73,8 @@ struct afe4403_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + /* Ensure suitable alignment for timestamp */ + s32 buffer[8] __aligned(8); }; enum afe4403_chan_id { @@ -309,7 +312,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4403_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[8]; u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; u8 rx[3]; @@ -326,9 +328,9 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - buffer[i++] = (rx[0] << 16) | - (rx[1] << 8) | - (rx[2]); + afe->buffer[i++] = (rx[0] << 16) | + (rx[1] << 8) | + (rx[2]); } /* Disable reading from the device */ @@ -337,7 +339,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index e728bbb21ca8..cebb1fd4d0b1 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -83,6 +83,7 @@ static const struct reg_field afe4404_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct a scan to push to the iio buffer. */ struct afe4404_data { struct device *dev; @@ -91,6 +92,7 @@ struct afe4404_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + s32 buffer[10] __aligned(8); }; enum afe4404_chan_id { @@ -328,17 +330,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4404_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[10]; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { ret = regmap_read(afe->regmap, afe4404_channel_values[bit], - &buffer[i++]); + &afe->buffer[i++]); if (ret) goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index dcf5a5bdfaa8..7618cdf59efd 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -38,6 +38,11 @@ struct hdc100x_data { /* integration time of the sensor */ int adc_int_us[2]; + /* Ensure natural alignment of timestamp */ + struct { + __be16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* integration time in us */ @@ -319,7 +324,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) struct i2c_client *client = data->client; int delay = data->adc_int_us[0] + data->adc_int_us[1]; int ret; - s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */ /* dual read starts at temp register */ mutex_lock(&data->lock); @@ -330,13 +334,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) } usleep_range(delay, delay + 1000); - ret = i2c_master_recv(client, (u8 *)buf, 4); + ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4); if (ret < 0) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: mutex_unlock(&data->lock); diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c index c99b54b0568d..969e16f5eeb6 100644 --- a/drivers/iio/humidity/hid-sensor-humidity.c +++ b/drivers/iio/humidity/hid-sensor-humidity.c @@ -17,7 +17,10 @@ struct hid_humidity_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info humidity_attr; - s32 humidity_data; + struct { + s32 humidity_data; + u64 timestamp __aligned(8); + } scan; int scale_pre_decml; int scale_post_decml; int scale_precision; @@ -127,9 +130,8 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev, struct hid_humidity_state *humid_st = iio_priv(indio_dev); if (atomic_read(&humid_st->common_attributes.data_ready)) - iio_push_to_buffers_with_timestamp(indio_dev, - &humid_st->humidity_data, - iio_get_time_ns(indio_dev)); + iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan, + iio_get_time_ns(indio_dev)); return 0; } @@ -144,7 +146,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev, switch (usage_id) { case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY: - humid_st->humidity_data = *(s32 *)raw_data; + humid_st->scan.humidity_data = *(s32 *)raw_data; return 0; default: diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h index 7d6771f7cf47..b2eb5abeaccd 100644 --- a/drivers/iio/humidity/hts221.h +++ b/drivers/iio/humidity/hts221.h @@ -14,8 +14,6 @@ #include <linux/iio/iio.h> -#define HTS221_DATA_SIZE 2 - enum hts221_sensor_type { HTS221_SENSOR_H, HTS221_SENSOR_T, @@ -39,6 +37,11 @@ struct hts221_hw { bool enabled; u8 odr; + /* Ensure natural alignment of timestamp */ + struct { + __le16 channels[2]; + s64 ts __aligned(8); + } scan; }; extern const struct dev_pm_ops hts221_pm_ops; diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index 81d50a861c22..49dcd36d8838 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -162,7 +162,6 @@ static const struct iio_buffer_setup_ops hts221_buffer_ops = { static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) { - u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)]; struct iio_poll_func *pf = p; struct iio_dev *iio_dev = pf->indio_dev; struct hts221_hw *hw = iio_priv(iio_dev); @@ -172,18 +171,20 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) /* humidity data */ ch = &iio_dev->channels[HTS221_SENSOR_H]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer, HTS221_DATA_SIZE); + &hw->scan.channels[0], + sizeof(hw->scan.channels[0])); if (err < 0) goto out; /* temperature data */ ch = &iio_dev->channels[HTS221_SENSOR_T]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE); + &hw->scan.channels[1], + sizeof(hw->scan.channels[1])); if (err < 0) goto out; - iio_push_to_buffers_with_timestamp(iio_dev, buffer, + iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, iio_get_time_ns(iio_dev)); out: diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c index 0575ff706bd4..59e33302042c 100644 --- a/drivers/iio/imu/adis16400.c +++ b/drivers/iio/imu/adis16400.c @@ -464,8 +464,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev) if (ret) goto err_ret; - ret = sscanf(indio_dev->name, "adis%u\n", &device_id); - if (ret != 1) { + if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) { ret = -EINVAL; goto err_ret; } diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h index 621f5309d735..431f10c2b951 100644 --- a/drivers/iio/imu/bmi160/bmi160.h +++ b/drivers/iio/imu/bmi160/bmi160.h @@ -7,6 +7,13 @@ struct bmi160_data { struct regmap *regmap; struct iio_trigger *trig; + /* + * Ensure natural alignment for timestamp if present. + * Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts. + * If fewer channels are enabled, less space may be needed, as + * long as the timestamp is still aligned to 8 bytes. + */ + __le16 buf[12] __aligned(8); }; extern const struct regmap_config bmi160_regmap_config; diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c index 6af65d6f1d28..088694c82327 100644 --- a/drivers/iio/imu/bmi160/bmi160_core.c +++ b/drivers/iio/imu/bmi160/bmi160_core.c @@ -411,8 +411,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct bmi160_data *data = iio_priv(indio_dev); - __le16 buf[16]; - /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */ int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L; __le16 sample; @@ -422,10 +420,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) &sample, sizeof(sample)); if (ret) goto done; - buf[j++] = sample; + data->buf[j++] = sample; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, data->buf, pf->timestamp); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index b0f3da1976e4..d1f2109012ed 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -664,13 +664,29 @@ static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private) static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private) { struct st_lsm6dsx_hw *hw = private; - int count; + int fifo_len = 0, len; - mutex_lock(&hw->fifo_lock); - count = hw->settings->fifo_ops.read_fifo(hw); - mutex_unlock(&hw->fifo_lock); + /* + * If we are using edge IRQs, new samples can arrive while + * processing current interrupt since there are no hw + * guarantees the irq line stays "low" long enough to properly + * detect the new interrupt. In this case the new sample will + * be missed. + * Polling FIFO status register allow us to read new + * samples even if the interrupt arrives while processing + * previous data and the timeslot where the line is "low" is + * too short to be properly detected. + */ + do { + mutex_lock(&hw->fifo_lock); + len = hw->settings->fifo_ops.read_fifo(hw); + mutex_unlock(&hw->fifo_lock); - return count ? IRQ_HANDLED : IRQ_NONE; + if (len > 0) + fifo_len += len; + } while (len > 0); + + return fifo_len ? IRQ_HANDLED : IRQ_NONE; } static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev) diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 112225c0e486..e099517283be 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -845,12 +845,12 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, indio_dev->masklength, in_ind + 1); while (in_ind != out_ind) { - in_ind = find_next_bit(indio_dev->active_scan_mask, - indio_dev->masklength, - in_ind + 1); length = iio_storage_bytes_for_si(indio_dev, in_ind); /* Make sure we are aligned */ in_loc = roundup(in_loc, length) + length; + in_ind = find_next_bit(indio_dev->active_scan_mask, + indio_dev->masklength, + in_ind + 1); } length = iio_storage_bytes_for_si(indio_dev, in_ind); out_loc = roundup(out_loc, length); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 524a686077ca..485b2e6748c5 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -130,6 +130,8 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_PM2P5] = "pm2p5", [IIO_MOD_PM4] = "pm4", [IIO_MOD_PM10] = "pm10", + [IIO_MOD_ETHANOL] = "ethanol", + [IIO_MOD_H2] = "h2", }; /* relies on pairs of these shared then separate */ diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 4a1a883dc061..42b64b85d06c 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -485,6 +485,7 @@ config VCNL4000 config VCNL4035 tristate "VCNL4035 combined ALS and proximity sensor" + select IIO_BUFFER select IIO_TRIGGERED_BUFFER select REGMAP_I2C depends on I2C diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 7e1030af9ba3..70da928e0d16 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -25,6 +25,9 @@ struct prox_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info prox_attr; u32 human_presence; + int scale_pre_decml; + int scale_post_decml; + int scale_precision; }; /* Channel definitions */ @@ -95,8 +98,9 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = prox_state->prox_attr.units; - ret_type = IIO_VAL_INT; + *val = prox_state->scale_pre_decml; + *val2 = prox_state->scale_post_decml; + ret_type = prox_state->scale_precision; break; case IIO_CHAN_INFO_OFFSET: *val = hid_sensor_convert_exponent( @@ -236,6 +240,11 @@ static int prox_parse_report(struct platform_device *pdev, HID_USAGE_SENSOR_HUMAN_PRESENCE, &st->common_attributes.sensitivity); + st->scale_precision = hid_sensor_format_scale( + hsdev->usage, + &st->prox_attr, + &st->scale_pre_decml, &st->scale_post_decml); + return ret; } diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 71f99d2a22c1..ceddb6a3b61b 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1242,13 +1242,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ltr501_data *data = iio_priv(indio_dev); - u16 buf[8]; + struct { + u16 channels[3]; + s64 ts __aligned(8); + } scan; __le16 als_buf[2]; u8 mask = 0; int j = 0; int ret, psdata; - memset(buf, 0, sizeof(buf)); + memset(&scan, 0, sizeof(scan)); /* figure out which data needs to be ready */ if (test_bit(0, indio_dev->active_scan_mask) || @@ -1267,9 +1270,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) if (ret < 0) return ret; if (test_bit(0, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[1]); + scan.channels[j++] = le16_to_cpu(als_buf[1]); if (test_bit(1, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[0]); + scan.channels[j++] = le16_to_cpu(als_buf[0]); } if (mask & LTR501_STATUS_PS_RDY) { @@ -1277,10 +1280,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) &psdata, 2); if (ret < 0) goto done; - buf[j++] = psdata & LTR501_PS_DATA_MASK; + scan.channels[j++] = psdata & LTR501_PS_DATA_MASK; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index d6d8007ba430..8cc619de2c3a 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c @@ -75,6 +75,11 @@ struct max44000_data { struct mutex lock; struct regmap *regmap; + /* Ensure naturally aligned timestamp */ + struct { + u16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */ @@ -488,7 +493,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max44000_data *data = iio_priv(indio_dev); - u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */ int index = 0; unsigned int regval; int ret; @@ -498,17 +502,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) ret = max44000_read_alsval(data); if (ret < 0) goto out_unlock; - buf[index++] = ret; + data->scan.channels[index++] = ret; } if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) { ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, ®val); if (ret < 0) goto out_unlock; - buf[index] = regval; + data->scan.channels[index] = regval; } mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c index a0a7aeae5a82..254c5b0199d4 100644 --- a/drivers/iio/light/rpr0521.c +++ b/drivers/iio/light/rpr0521.c @@ -194,6 +194,17 @@ struct rpr0521_data { bool pxs_need_dis; struct regmap *regmap; + + /* + * Ensure correct naturally aligned timestamp. + * Note that the read will put garbage data into + * the padding but this should not be a problem + */ + struct { + __le16 channels[3]; + u8 garbage; + s64 ts __aligned(8); + } scan; }; static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL); @@ -449,8 +460,6 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p) struct rpr0521_data *data = iio_priv(indio_dev); int err; - u8 buffer[16]; /* 3 16-bit channels + padding + ts */ - /* Use irq timestamp when reasonable. */ if (iio_trigger_using_own(indio_dev) && data->irq_timestamp) { pf->timestamp = data->irq_timestamp; @@ -461,11 +470,11 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p) pf->timestamp = iio_get_time_ns(indio_dev); err = regmap_bulk_read(data->regmap, RPR0521_REG_PXS_DATA, - &buffer, + data->scan.channels, (3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */ if (!err) iio_push_to_buffers_with_timestamp(indio_dev, - buffer, pf->timestamp); + &data->scan, pf->timestamp); else dev_err(&data->client->dev, "Trigger consumer can't read from sensor.\n"); diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c index 015a21f0c2ef..9174ab928880 100644 --- a/drivers/iio/light/si1133.c +++ b/drivers/iio/light/si1133.c @@ -102,6 +102,9 @@ #define SI1133_INPUT_FRACTION_LOW 15 #define SI1133_LUX_OUTPUT_FRACTION 12 #define SI1133_LUX_BUFFER_SIZE 9 +#define SI1133_MEASURE_BUFFER_SIZE 3 + +#define SI1133_SIGN_BIT_INDEX 23 static const int si1133_scale_available[] = { 1, 2, 4, 8, 16, 32, 64, 128}; @@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = { } }; -static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag, +static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag, s8 shift) { return ((input << fraction) / mag) << shift; } -static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, +static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order, u8 input_fraction, s8 sign, const struct si1133_coeff *coeffs) { @@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, * The algorithm is from: * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716 */ -static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff, +static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff, const struct si1133_coeff *coeffs) { u8 x_order, y_order; @@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data, { int err; - __be16 resp; + u8 buffer[SI1133_MEASURE_BUFFER_SIZE]; err = si1133_set_adcmux(data, 0, chan->channel); if (err) @@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data, if (err) return err; - err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp), - (u8 *)&resp); + err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer), + buffer); if (err) return err; - *val = be16_to_cpu(resp); + *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], + SI1133_SIGN_BIT_INDEX); return err; } @@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val) { int err; int lux; - u32 high_vis; - u32 low_vis; - u32 ir; + s32 high_vis; + s32 low_vis; + s32 ir; u8 buffer[SI1133_LUX_BUFFER_SIZE]; /* Activate lux channels */ @@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val) if (err) return err; - high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2]; - low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5]; - ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8]; + high_vis = + sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], + SI1133_SIGN_BIT_INDEX); + + low_vis = + sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5], + SI1133_SIGN_BIT_INDEX); + + ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8], + SI1133_SIGN_BIT_INDEX); if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD) lux = si1133_calc_polynomial(high_vis, ir, diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c index 982bba0c54e7..cb40345e7b51 100644 --- a/drivers/iio/light/si1145.c +++ b/drivers/iio/light/si1145.c @@ -169,6 +169,7 @@ struct si1145_part_info { * @part_info: Part information * @trig: Pointer to iio trigger * @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode + * @buffer: Used to pack data read from sensor. */ struct si1145_data { struct i2c_client *client; @@ -180,6 +181,14 @@ struct si1145_data { bool autonomous; struct iio_trigger *trig; int meas_rate; + /* + * Ensure timestamp will be naturally aligned if present. + * Maximum buffer size (may be only partly used if not all + * channels are enabled): + * 6*2 bytes channels data + 4 bytes alignment + + * 8 bytes timestamp + */ + u8 buffer[24] __aligned(8); }; /** @@ -441,12 +450,6 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) struct iio_poll_func *pf = private; struct iio_dev *indio_dev = pf->indio_dev; struct si1145_data *data = iio_priv(indio_dev); - /* - * Maximum buffer size: - * 6*2 bytes channels data + 4 bytes alignment + - * 8 bytes timestamp - */ - u8 buffer[24]; int i, j = 0; int ret; u8 irq_status = 0; @@ -479,7 +482,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) ret = i2c_smbus_read_i2c_block_data_or_emulated( data->client, indio_dev->channels[i].address, - sizeof(u16) * run, &buffer[j]); + sizeof(u16) * run, &data->buffer[j]); if (ret < 0) goto done; j += run * sizeof(u16); @@ -494,7 +497,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) goto done; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h index 78bc56aad129..283086887caf 100644 --- a/drivers/iio/light/st_uvis25.h +++ b/drivers/iio/light/st_uvis25.h @@ -27,6 +27,11 @@ struct st_uvis25_hw { struct iio_trigger *trig; bool enabled; int irq; + /* Ensure timestamp is naturally aligned */ + struct { + u8 chan; + s64 ts __aligned(8); + } scan; }; extern const struct dev_pm_ops st_uvis25_pm_ops; diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c index d262c254b895..9aaff35f49b0 100644 --- a/drivers/iio/light/st_uvis25_core.c +++ b/drivers/iio/light/st_uvis25_core.c @@ -234,17 +234,19 @@ static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = { static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p) { - u8 buffer[ALIGN(sizeof(u8), sizeof(s64)) + sizeof(s64)]; struct iio_poll_func *pf = p; struct iio_dev *iio_dev = pf->indio_dev; struct st_uvis25_hw *hw = iio_priv(iio_dev); + unsigned int val; int err; - err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, (int *)buffer); + err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val); if (err < 0) goto out; - iio_push_to_buffers_with_timestamp(iio_dev, buffer, + hw->scan.chan = val; + + iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, iio_get_time_ns(iio_dev)); out: diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index e5b00a6611ac..7384a3ffcac4 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -193,7 +193,6 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, u8 rdy_mask, u8 data_reg, int *val) { int tries = 20; - __be16 buf; int ret; mutex_lock(&data->vcnl4000_lock); @@ -220,13 +219,12 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, goto fail; } - ret = i2c_smbus_read_i2c_block_data(data->client, - data_reg, sizeof(buf), (u8 *) &buf); + ret = i2c_smbus_read_word_swapped(data->client, data_reg); if (ret < 0) goto fail; mutex_unlock(&data->vcnl4000_lock); - *val = be16_to_cpu(buf); + *val = ret; return 0; diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index d32996702110..87c15a63c1a4 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -185,6 +185,11 @@ struct ak8974 { bool drdy_irq; struct completion drdy_complete; bool drdy_active_low; + /* Ensure timestamp is naturally aligned */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static const char ak8974_reg_avdd[] = "avdd"; @@ -581,7 +586,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) { struct ak8974 *ak8974 = iio_priv(indio_dev); int ret; - __le16 hw_values[8]; /* Three axes + 64bit padding */ pm_runtime_get_sync(&ak8974->i2c->dev); mutex_lock(&ak8974->lock); @@ -591,13 +595,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) dev_err(&ak8974->i2c->dev, "error triggering measure\n"); goto out_unlock; } - ret = ak8974_getresult(ak8974, hw_values); + ret = ak8974_getresult(ak8974, ak8974->scan.channels); if (ret) { dev_err(&ak8974->i2c->dev, "error getting measures\n"); goto out_unlock; } - iio_push_to_buffers_with_timestamp(indio_dev, hw_values, + iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan, iio_get_time_ns(indio_dev)); out_unlock: @@ -764,19 +768,21 @@ static int ak8974_probe(struct i2c_client *i2c, ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); if (IS_ERR(ak8974->map)) { dev_err(&i2c->dev, "failed to allocate register map\n"); + pm_runtime_put_noidle(&i2c->dev); + pm_runtime_disable(&i2c->dev); return PTR_ERR(ak8974->map); } ret = ak8974_set_power(ak8974, AK8974_PWR_ON); if (ret) { dev_err(&i2c->dev, "could not power on\n"); - goto power_off; + goto disable_pm; } ret = ak8974_detect(ak8974); if (ret) { dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n"); - goto power_off; + goto disable_pm; } ret = ak8974_selftest(ak8974); @@ -786,14 +792,9 @@ static int ak8974_probe(struct i2c_client *i2c, ret = ak8974_reset(ak8974); if (ret) { dev_err(&i2c->dev, "AK8974 reset failed\n"); - goto power_off; + goto disable_pm; } - pm_runtime_set_autosuspend_delay(&i2c->dev, - AK8974_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&i2c->dev); - pm_runtime_put(&i2c->dev); - indio_dev->dev.parent = &i2c->dev; indio_dev->channels = ak8974_channels; indio_dev->num_channels = ARRAY_SIZE(ak8974_channels); @@ -846,6 +847,11 @@ no_irq: goto cleanup_buffer; } + pm_runtime_set_autosuspend_delay(&i2c->dev, + AK8974_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&i2c->dev); + pm_runtime_put(&i2c->dev); + return 0; cleanup_buffer: @@ -854,7 +860,6 @@ disable_pm: pm_runtime_put_noidle(&i2c->dev); pm_runtime_disable(&i2c->dev); ak8974_set_power(ak8974, AK8974_PWR_OFF); -power_off: regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs); return ret; diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 893bec5a0312..82af903a765b 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -368,6 +368,12 @@ struct ak8975_data { struct iio_mount_matrix orientation; struct regulator *vdd; struct regulator *vid; + + /* Ensure natural alignment of timestamp */ + struct { + s16 channels[3]; + s64 ts __aligned(8); + } scan; }; /* Enable attached power regulator if any. */ @@ -805,7 +811,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) const struct i2c_client *client = data->client; const struct ak_def *def = data->def; int ret; - s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ __le16 fval[3]; mutex_lock(&data->lock); @@ -828,12 +833,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) mutex_unlock(&data->lock); /* Clamp to valid range. */ - buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); - buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); - buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); + data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); + data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); + data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); - iio_push_to_buffers_with_timestamp(indio_dev, buff, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); + return; unlock: diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index fb16cfdd6fa6..b7b98dd4d2cb 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c @@ -56,6 +56,12 @@ struct mag3110_data { int sleep_val; struct regulator *vdd_reg; struct regulator *vddio_reg; + /* Ensure natural alignment of timestamp */ + struct { + __be16 channels[3]; + u8 temperature; + s64 ts __aligned(8); + } scan; }; static int mag3110_request(struct mag3110_data *data) @@ -387,10 +393,9 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mag3110_data *data = iio_priv(indio_dev); - u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */ int ret; - ret = mag3110_read(data, (__be16 *) buffer); + ret = mag3110_read(data, data->scan.channels); if (ret < 0) goto done; @@ -399,10 +404,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) MAG3110_DIE_TEMP); if (ret < 0) goto done; - buffer[6] = ret; + data->scan.temperature = ret; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 8d0f15f27dc5..0a95afaa48fe 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -264,6 +264,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data, + (s32)2097152) * calib->H2 + 8192) >> 14); var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4; + var = clamp_val(var, 0, 419430400); + return var >> 12; }; @@ -706,7 +708,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) unsigned int ctrl; if (data->use_eoc) - init_completion(&data->done); + reinit_completion(&data->done); ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas); if (ret) @@ -962,6 +964,9 @@ static int bmp085_fetch_eoc_irq(struct device *dev, "trying to enforce it\n"); irq_trig = IRQF_TRIGGER_RISING; } + + init_completion(&data->done); + ret = devm_request_threaded_irq(dev, irq, bmp085_eoc_irq, diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c index d066f3c5a8a6..822b9e19688e 100644 --- a/drivers/iio/pressure/mpl3115.c +++ b/drivers/iio/pressure/mpl3115.c @@ -144,7 +144,14 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mpl3115_data *data = iio_priv(indio_dev); - u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */ + /* + * 32-bit channel + 16-bit channel + padding + ts + * Note that it is possible for only one of the first 2 + * channels to be enabled. If that happens, the first element + * of the buffer may be either 16 or 32-bits. As such we cannot + * use a simple structure definition to express this data layout. + */ + u8 buffer[16] __aligned(8); int ret, pos = 0; mutex_lock(&data->lock); diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 2f598ad91621..f5db9fa086f3 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -212,16 +212,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ms5611_state *st = iio_priv(indio_dev); - s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ + /* Ensure buffer elements are naturally aligned */ + struct { + s32 channels[2]; + s64 ts __aligned(8); + } scan; int ret; mutex_lock(&st->lock); - ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); + ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1], + &scan.channels[0]); mutex_unlock(&st->lock); if (ret < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index 9d0d07930236..7f2e5a8942a4 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -664,8 +664,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev) int err; err = pm_runtime_get_sync(indio_dev->dev.parent); - if (err < 0) + if (err < 0) { + pm_runtime_put(indio_dev->dev.parent); return err; + } if (err > 0) { /* diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c index 166b3e6d7db8..5254b1fbccfd 100644 --- a/drivers/iio/proximity/mb1232.c +++ b/drivers/iio/proximity/mb1232.c @@ -40,6 +40,11 @@ struct mb1232_data { */ struct completion ranging; int irqnr; + /* Ensure correct alignment of data to push to IIO buffer */ + struct { + s16 distance; + s64 ts __aligned(8); + } scan; }; static irqreturn_t mb1232_handle_irq(int irq, void *dev_id) @@ -113,17 +118,13 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mb1232_data *data = iio_priv(indio_dev); - /* - * triggered buffer - * 16-bit channel + 48-bit padding + 64-bit timestamp - */ - s16 buffer[8] = { 0 }; - buffer[0] = mb1232_read_distance(data); - if (buffer[0] < 0) + data->scan.distance = mb1232_read_distance(data); + if (data->scan.distance < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c index eda55b9c1e9b..500b5cc4351a 100644 --- a/drivers/iio/temperature/hid-sensor-temperature.c +++ b/drivers/iio/temperature/hid-sensor-temperature.c @@ -17,7 +17,10 @@ struct temperature_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info temperature_attr; - s32 temperature_data; + struct { + s32 temperature_data; + u64 timestamp __aligned(8); + } scan; int scale_pre_decml; int scale_post_decml; int scale_precision; @@ -34,7 +37,7 @@ static const struct iio_chan_spec temperature_channels[] = { BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS), }, - IIO_CHAN_SOFT_TIMESTAMP(3), + IIO_CHAN_SOFT_TIMESTAMP(1), }; /* Adjust channel real bits based on report descriptor */ @@ -125,9 +128,8 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev, struct temperature_state *temp_st = iio_priv(indio_dev); if (atomic_read(&temp_st->common_attributes.data_ready)) - iio_push_to_buffers_with_timestamp(indio_dev, - &temp_st->temperature_data, - iio_get_time_ns(indio_dev)); + iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan, + iio_get_time_ns(indio_dev)); return 0; } @@ -142,7 +144,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev, switch (usage_id) { case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE: - temp_st->temperature_data = *(s32 *)raw_data; + temp_st->scan.temperature_data = *(s32 *)raw_data; return 0; default: return -EINVAL; diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c index a5e670726717..58c1c30d5612 100644 --- a/drivers/iio/trigger/iio-trig-hrtimer.c +++ b/drivers/iio/trigger/iio-trig-hrtimer.c @@ -102,7 +102,7 @@ static int iio_trig_hrtimer_set_state(struct iio_trigger *trig, bool state) if (state) hrtimer_start(&trig_info->timer, trig_info->period, - HRTIMER_MODE_REL); + HRTIMER_MODE_REL_HARD); else hrtimer_cancel(&trig_info->timer); @@ -132,7 +132,7 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name) trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops; trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups; - hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); trig_info->timer.function = iio_hrtimer_trig_handler; trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY; diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index b44b1c322ec8..786ee0e4e885 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -80,6 +80,9 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS This allows the user to config the default GID type that the CM uses for each device, when initiaing new connections. +config INFINIBAND_VIRT_DMA + def_bool !HIGHMEM + if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS source "drivers/infiniband/hw/mthca/Kconfig" source "drivers/infiniband/hw/qib/Kconfig" diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 1753a9801b70..c9e63c692b6e 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq; static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, - .len = sizeof(struct rdma_nla_ls_gid)}, + .len = sizeof(struct rdma_nla_ls_gid), + .validation_type = NLA_VALIDATE_MIN, + .min = sizeof(struct rdma_nla_ls_gid)}, }; static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) @@ -645,13 +647,12 @@ static void process_one_req(struct work_struct *_work) req->callback = NULL; spin_lock_bh(&lock); + /* + * Although the work will normally have been canceled by the workqueue, + * it can still be requeued as long as it is on the req_list. + */ + cancel_delayed_work(&req->work); if (!list_empty(&req->list)) { - /* - * Although the work will normally have been canceled by the - * workqueue, it can still be requeued as long as it is on the - * req_list. - */ - cancel_delayed_work(&req->work); list_del_init(&req->list); kfree(req); } diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 65b10efca2b8..7affe6b4ae21 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1542,8 +1542,11 @@ int ib_cache_setup_one(struct ib_device *device) if (err) return err; - rdma_for_each_port (device, p) - ib_cache_update(device, p, true); + rdma_for_each_port (device, p) { + err = ib_cache_update(device, p, true); + if (err) + return err; + } return 0; } diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 319e4b4ae639..c933c1c7ddd8 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -597,18 +597,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path, return 0; } -static int cm_alloc_id(struct cm_id_private *cm_id_priv) -{ - int err; - u32 id; - - err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv, - xa_limit_32b, &cm.local_id_next, GFP_KERNEL); - - cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; - return err; -} - static u32 cm_local_id(__be32 local_id) { return (__force u32) (local_id ^ cm.random_id_operand); @@ -862,6 +850,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, void *context) { struct cm_id_private *cm_id_priv; + u32 id; int ret; cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); @@ -873,9 +862,6 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; - ret = cm_alloc_id(cm_id_priv); - if (ret) - goto error; spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); @@ -884,11 +870,20 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, INIT_LIST_HEAD(&cm_id_priv->altr_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); + + ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b, + &cm.local_id_next, GFP_KERNEL); + if (ret < 0) + goto error; + cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; + xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), + cm_id_priv, GFP_KERNEL); + return &cm_id_priv->id; error: kfree(cm_id_priv); - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } EXPORT_SYMBOL(ib_create_cm_id); @@ -1097,14 +1092,22 @@ retest: break; } - spin_lock_irq(&cm.lock); + spin_lock_irq(&cm_id_priv->lock); + spin_lock(&cm.lock); + /* Required for cleanup paths related cm_req_handler() */ + if (cm_id_priv->timewait_info) { + cm_cleanup_timewait(cm_id_priv->timewait_info); + kfree(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; + } if (!list_empty(&cm_id_priv->altr_list) && (!cm_id_priv->altr_send_port_not_ready)) list_del(&cm_id_priv->altr_list); if (!list_empty(&cm_id_priv->prim_list) && (!cm_id_priv->prim_send_port_not_ready)) list_del(&cm_id_priv->prim_list); - spin_unlock_irq(&cm.lock); + spin_unlock(&cm.lock); + spin_unlock_irq(&cm_id_priv->lock); cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); @@ -1421,7 +1424,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_IDLE) { + if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL; goto out; @@ -1432,6 +1435,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; goto out; } @@ -1439,12 +1443,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, param->ppath_sgid_attr, &cm_id_priv->av, cm_id_priv); if (ret) - goto error1; + goto out; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av, cm_id_priv); if (ret) - goto error1; + goto out; } cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); @@ -1462,7 +1466,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) - goto error1; + goto out; req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); @@ -1485,7 +1489,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, return 0; error2: cm_free_msg(cm_id_priv->msg); -error1: kfree(cm_id_priv->timewait_info); out: return ret; } EXPORT_SYMBOL(ib_send_cm_req); @@ -1959,6 +1962,7 @@ static int cm_req_handler(struct cm_work *work) id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; goto destroy; } cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; @@ -1970,7 +1974,7 @@ static int cm_req_handler(struct cm_work *work) pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, be32_to_cpu(cm_id->local_id)); ret = -EINVAL; - goto free_timeinfo; + goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; @@ -2055,8 +2059,6 @@ static int cm_req_handler(struct cm_work *work) rejected: atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); -free_timeinfo: - kfree(cm_id_priv->timewait_info); destroy: ib_destroy_cm_id(cm_id); return ret; @@ -4334,7 +4336,7 @@ static void cm_add_one(struct ib_device *ib_device) unsigned long flags; int ret; int count = 0; - u8 i; + unsigned int i; cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), GFP_KERNEL); @@ -4346,7 +4348,7 @@ static void cm_add_one(struct ib_device *ib_device) cm_dev->going_down = 0; set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); - for (i = 1; i <= ib_device->phys_port_cnt; i++) { + rdma_for_each_port (ib_device, i) { if (!rdma_cap_ib_cm(ib_device, i)) continue; @@ -4425,7 +4427,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; - int i; + unsigned int i; if (!cm_dev) return; @@ -4438,7 +4440,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) cm_dev->going_down = 1; spin_unlock_irq(&cm.lock); - for (i = 1; i <= ib_device->phys_port_cnt; i++) { + rdma_for_each_port (ib_device, i) { if (!rdma_cap_ib_cm(ib_device, i)) continue; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 8f776b7de45e..ecac62a7b59e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -530,6 +530,10 @@ static void cma_release_dev(struct rdma_id_private *id_priv) list_del(&id_priv->list); cma_deref_dev(id_priv->cma_dev); id_priv->cma_dev = NULL; + if (id_priv->id.route.addr.dev_addr.sgid_attr) { + rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); + id_priv->id.route.addr.dev_addr.sgid_attr = NULL; + } mutex_unlock(&lock); } @@ -1631,6 +1635,8 @@ static struct rdma_id_private *cma_find_listener( { struct rdma_id_private *id_priv, *id_priv_dev; + lockdep_assert_held(&lock); + if (!bind_list) return ERR_PTR(-EINVAL); @@ -1677,6 +1683,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, } } + mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice @@ -1718,6 +1725,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); err: rcu_read_unlock(); + mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; @@ -1799,19 +1807,30 @@ static void cma_release_port(struct rdma_id_private *id_priv) mutex_unlock(&lock); } -static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv, - struct cma_multicast *mc) +static void destroy_mc(struct rdma_id_private *id_priv, + struct cma_multicast *mc) { - struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; - struct net_device *ndev = NULL; - - if (dev_addr->bound_dev_if) - ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); - if (ndev) { - cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false); - dev_put(ndev); + if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) { + ib_sa_free_multicast(mc->multicast.ib); + kfree(mc); + return; + } + + if (rdma_protocol_roce(id_priv->id.device, + id_priv->id.port_num)) { + struct rdma_dev_addr *dev_addr = + &id_priv->id.route.addr.dev_addr; + struct net_device *ndev = NULL; + + if (dev_addr->bound_dev_if) + ndev = dev_get_by_index(dev_addr->net, + dev_addr->bound_dev_if); + if (ndev) { + cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false); + dev_put(ndev); + } + kref_put(&mc->mcref, release_mc); } - kref_put(&mc->mcref, release_mc); } static void cma_leave_mc_groups(struct rdma_id_private *id_priv) @@ -1819,16 +1838,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) struct cma_multicast *mc; while (!list_empty(&id_priv->mc_list)) { - mc = container_of(id_priv->mc_list.next, - struct cma_multicast, list); + mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, + list); list_del(&mc->list); - if (rdma_cap_ib_mcast(id_priv->cma_dev->device, - id_priv->id.port_num)) { - ib_sa_free_multicast(mc->multicast.ib); - kfree(mc); - } else { - cma_leave_roce_mc_group(id_priv, mc); - } + destroy_mc(id_priv, mc); } } @@ -1870,9 +1883,6 @@ void rdma_destroy_id(struct rdma_cm_id *id) kfree(id_priv->id.route.path_rec); - if (id_priv->id.route.addr.dev_addr.sgid_attr) - rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); - put_net(id_priv->id.route.addr.dev_addr.net); kfree(id_priv); } @@ -2473,6 +2483,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; + lockdep_assert_held(&lock); + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; @@ -3245,6 +3257,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, u64 sid, mask; __be16 port; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); port = htons(bind_list->port); @@ -3273,6 +3287,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps, struct rdma_bind_list *bind_list; int ret; + lockdep_assert_held(&lock); + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; @@ -3299,6 +3315,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); + lockdep_assert_held(&lock); + hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); @@ -3338,6 +3356,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps, unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; + lockdep_assert_held(&lock); + inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; @@ -3385,6 +3405,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) @@ -3415,6 +3437,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps, unsigned short snum; int ret; + lockdep_assert_held(&lock); + snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; @@ -4164,16 +4188,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) else pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", status); - mutex_lock(&id_priv->qp_mutex); - if (!status && id_priv->id.qp) { - status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, - be16_to_cpu(multicast->rec.mlid)); - if (status) - pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n", - status); - } - mutex_unlock(&id_priv->qp_mutex); - event.status = status; event.param.ud.private_data = mc->context; if (!status) { @@ -4428,6 +4442,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, struct cma_multicast *mc; int ret; + /* Not supported for kernel QPs */ + if (WARN_ON(id->qp)) + return -EINVAL; + if (!id->device) return -EINVAL; @@ -4478,25 +4496,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irq(&id_priv->lock); list_for_each_entry(mc, &id_priv->mc_list, list) { - if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { - list_del(&mc->list); - spin_unlock_irq(&id_priv->lock); + if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) + continue; + list_del(&mc->list); + spin_unlock_irq(&id_priv->lock); - if (id->qp) - ib_detach_mcast(id->qp, - &mc->multicast.ib->rec.mgid, - be16_to_cpu(mc->multicast.ib->rec.mlid)); - - BUG_ON(id_priv->cma_dev->device != id->device); - - if (rdma_cap_ib_mcast(id->device, id->port_num)) { - ib_sa_free_multicast(mc->multicast.ib); - kfree(mc); - } else if (rdma_protocol_roce(id->device, id->port_num)) { - cma_leave_roce_mc_group(id_priv, mc); - } - return; - } + WARN_ON(id_priv->cma_dev->device != id->device); + destroy_mc(id_priv, mc); + return; } spin_unlock_irq(&id_priv->lock); } diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 8b0b5ae22e4c..726e70b68249 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -322,8 +322,21 @@ fail: return ERR_PTR(err); } +static void drop_cma_dev(struct config_group *cgroup, struct config_item *item) +{ + struct config_group *group = + container_of(item, struct config_group, cg_item); + struct cma_dev_group *cma_dev_group = + container_of(group, struct cma_dev_group, device_group); + + configfs_remove_default_groups(&cma_dev_group->ports_group); + configfs_remove_default_groups(&cma_dev_group->device_group); + config_item_put(item); +} + static struct configfs_group_operations cma_subsys_group_ops = { .make_group = make_cma_dev, + .drop_item = drop_cma_dev, }; static const struct config_item_type cma_subsys_type = { diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 46dd50ff7c85..f454d63008d6 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -195,7 +195,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp) return ret; } -static void counter_history_stat_update(const struct rdma_counter *counter) +static void counter_history_stat_update(struct rdma_counter *counter) { struct ib_device *dev = counter->device; struct rdma_port_counter *port_counter; @@ -205,6 +205,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter) if (!port_counter->hstats) return; + rdma_counter_query_stats(counter); + for (i = 0; i < counter->stats->num_counters; i++) port_counter->hstats->value[i] += counter->stats->value[i]; } @@ -282,7 +284,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) struct rdma_counter *counter; int ret; - if (!qp->res.valid) + if (!qp->res.valid || rdma_is_kernel_res(&qp->res)) return 0; if (!rdma_is_port_valid(dev, port)) @@ -485,7 +487,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, goto err; } - if (counter->res.task != qp->res.task) { + if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) { ret = -EINVAL; goto err_task; } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 10ae6c6eab0a..256d379bba67 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1330,6 +1330,10 @@ out: return ret; } +static void prevent_dealloc_device(struct ib_device *ib_dev) +{ +} + /** * ib_register_device - Register an IB device with IB core * @device:Device to register @@ -1383,9 +1387,6 @@ int ib_register_device(struct ib_device *device, const char *name) } ret = enable_device_and_get(device); - dev_set_uevent_suppress(&device->dev, false); - /* Mark for userspace that device is ready */ - kobject_uevent(&device->dev.kobj, KOBJ_ADD); if (ret) { void (*dealloc_fn)(struct ib_device *); @@ -1397,16 +1398,20 @@ int ib_register_device(struct ib_device *device, const char *name) * possibility for a parallel unregistration along with this * error flow. Since we have a refcount here we know any * parallel flow is stopped in disable_device and will see the - * NULL pointers, causing the responsibility to + * special dealloc_driver pointer, causing the responsibility to * ib_dealloc_device() to revert back to this thread. */ dealloc_fn = device->ops.dealloc_driver; - device->ops.dealloc_driver = NULL; + device->ops.dealloc_driver = prevent_dealloc_device; ib_device_put(device); __ib_unregister_device(device); device->ops.dealloc_driver = dealloc_fn; + dev_set_uevent_suppress(&device->dev, false); return ret; } + dev_set_uevent_suppress(&device->dev, false); + /* Mark for userspace that device is ready */ + kobject_uevent(&device->dev.kobj, KOBJ_ADD); ib_device_put(device); return 0; @@ -1449,7 +1454,8 @@ static void __ib_unregister_device(struct ib_device *ib_dev) * Drivers using the new flow may not call ib_dealloc_device except * in error unwind prior to registration success. */ - if (ib_dev->ops.dealloc_driver) { + if (ib_dev->ops.dealloc_driver && + ib_dev->ops.dealloc_driver != prevent_dealloc_device) { WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); ib_dealloc_device(ib_dev); } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 9947d16edef2..2284930b5f91 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -639,10 +639,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); flush_workqueue(port_priv->wq); - ib_cancel_rmpp_recvs(mad_agent_priv); deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_cancel_rmpp_recvs(mad_agent_priv); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); @@ -2960,6 +2960,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { + kfree(mad_priv); ret = -ENOMEM; break; } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index ef4b0c7061e4..e4905d9fecb0 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -702,9 +702,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg, continue; qp = container_of(res, struct ib_qp, res); - if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) - continue; - if (!qp->counter || (qp->counter->id != counter->id)) continue; @@ -1248,10 +1245,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); ret = fe->fill_res_func(msg, has_cap_net_admin, res, port); - rdma_restrack_put(res); if (ret) goto err_free; + rdma_restrack_put(res); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index ccf4d069c25c..d0580eed3bcb 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -160,9 +160,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, uobj->context = NULL; /* - * For DESTROY the usecnt is held write locked, the caller is expected - * to put it unlock and put the object when done with it. Only DESTROY - * can remove the IDR handle. + * For DESTROY the usecnt is not changed, the caller is expected to + * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR + * handle. */ if (reason != RDMA_REMOVE_DESTROY) atomic_set(&uobj->usecnt, 0); @@ -194,7 +194,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj, /* * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY * sequence. It should only be used from command callbacks. On success the - * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This + * caller must pair this with uobj_put_destroy(). This * version requires the caller to have already obtained an * LOOKUP_DESTROY uobject kref. */ @@ -205,6 +205,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) down_read(&ufile->hw_destroy_rwsem); + /* + * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left + * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. + * This is because any other concurrent thread can still see the object + * in the xarray due to RCU. Leaving it locked ensures nothing else will + * touch it. + */ ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); if (ret) goto out_unlock; @@ -223,7 +230,7 @@ out_unlock: /* * uobj_get_destroy destroys the HW object and returns a handle to the uobj * with a NULL object pointer. The caller must pair this with - * uverbs_put_destroy. + * uobj_put_destroy(). */ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, u32 id, struct uverbs_attr_bundle *attrs) @@ -257,8 +264,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, uobj = __uobj_get_destroy(obj, id, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); - - rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); + uobj_put_destroy(uobj); return 0; } @@ -362,7 +368,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj, * and the caller is expected to ensure that uverbs_close_fd is never * done while a call top lookup is possible. */ - if (f->f_op != fd_type->fops) { + if (f->f_op != fd_type->fops || uobject->ufile != ufile) { fput(f); return ERR_PTR(-EBADF); } @@ -689,7 +695,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { assert_uverbs_usecnt(uobj, mode); - uobj->uapi_object->type_class->lookup_put(uobj, mode); /* * In order to unlock an object, either decrease its usecnt for * read access or zero it in case of exclusive access. See @@ -706,6 +711,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj, break; } + uobj->uapi_object->type_class->lookup_put(uobj, mode); /* Pairs with the kref obtained by type->lookup_get */ uverbs_uobject_put(uobj); } diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index a07665f7ef8c..f1b4db80913f 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -234,6 +234,7 @@ static void rdma_restrack_add(struct rdma_restrack_entry *res) } else { ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b, &rt->next_id, GFP_KERNEL); + ret = (ret < 0) ? ret : 0; } if (!ret) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index bddb5434fbed..d2d70c89193f 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) return len; } -static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; void *data; struct ib_sa_mad *mad; int len; + unsigned long flags; + unsigned long delay; + gfp_t gfp_flag; + int ret; + + INIT_LIST_HEAD(&query->list); + query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); mad = query->mad_buf->mad; len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); @@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); -} + gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : + GFP_NOWAIT; -static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) -{ - unsigned long flags; - unsigned long delay; - int ret; - - INIT_LIST_HEAD(&query->list); - query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); - - /* Put the request on the list first.*/ spin_lock_irqsave(&ib_nl_request_lock, flags); + ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); + + if (ret) + goto out; + + /* Put the request on the list.*/ delay = msecs_to_jiffies(sa_local_svc_timeout_ms); query->timeout = delay + jiffies; list_add_tail(&query->list, &ib_nl_request_list); /* Start the timeout if this is the only request */ if (ib_nl_request_list.next == &query->list) queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - ret = ib_nl_send_msg(query, gfp_mask); - if (ret) { - ret = -EIO; - /* Remove the request */ - spin_lock_irqsave(&ib_nl_request_lock, flags); - list_del(&query->list); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - } +out: + spin_unlock_irqrestore(&ib_nl_request_lock, flags); return ret; } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 7a50cedcef1f..091cca9d88ed 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1060,8 +1060,7 @@ static int add_port(struct ib_core_device *coredev, int port_num) coredev->ports_kobj, "%d", port_num); if (ret) { - kfree(p); - return ret; + goto err_put; } p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL); @@ -1074,8 +1073,7 @@ static int add_port(struct ib_core_device *coredev, int port_num) ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type, &p->kobj, "gid_attrs"); if (ret) { - kfree(p->gid_attr_group); - goto err_put; + goto err_put_gid_attrs; } if (device->ops.process_mad && is_full_dev) { @@ -1406,8 +1404,10 @@ int ib_port_register_module_stat(struct ib_device *device, u8 port_num, ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s", name); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } } return 0; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f4f79f1292b9..ef4be14af3bb 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -581,6 +581,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); + events_reported = ctx->events_reported; mutex_unlock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &list, list) { @@ -590,7 +591,6 @@ static int ucma_free_ctx(struct ucma_context *ctx) kfree(uevent); } - events_reported = ctx->events_reported; mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; @@ -1473,7 +1473,9 @@ static ssize_t ucma_process_join(struct ucma_file *file, return 0; err3: + mutex_lock(&ctx->mutex); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err2: xa_erase(&multicast_table, mc->id); @@ -1639,7 +1641,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, cur_file = ctx->file; if (cur_file == new_file) { + mutex_lock(&cur_file->mut); resp.events_reported = ctx->events_reported; + mutex_unlock(&cur_file->mut); goto response; } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 0d42ba8c0b69..650f71dd4ab9 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, dma_addr_t mask; int i; + /* rdma_for_each_block() has a bug if the page size is smaller than the + * page size used to build the umem. For now prevent smaller page sizes + * from being returned. + */ + pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); + /* At minimum, drivers must support PAGE_SIZE or smaller */ if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0)))) return 0; va = virt; - /* max page size not to exceed MR length */ - mask = roundup_pow_of_two(umem->length); + /* The best result is the smallest page size that results in the minimum + * number of required pages. Compute the largest page size that could + * work based on VA address bits that don't change. + */ + mask = pgsz_bitmap & + GENMASK(BITS_PER_LONG - 1, + bits_per((umem->length - 1 + virt) ^ virt)); /* offset into first SGL */ pgoff = umem->address & ~PAGE_MASK; diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index da229eab5903..ad3a092b8b5c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -379,6 +379,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, mutex_lock(&file->mutex); + if (file->agents_dead) { + mutex_unlock(&file->mutex); + return -EIO; + } + while (list_empty(&file->recv_list)) { mutex_unlock(&file->mutex); @@ -392,6 +397,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, mutex_lock(&file->mutex); } + if (file->agents_dead) { + mutex_unlock(&file->mutex); + return -EIO; + } + packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); list_del(&packet->list); @@ -524,7 +534,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, agent = __get_agent(file, packet->mad.hdr.id); if (!agent) { - ret = -EINVAL; + ret = -EIO; goto err_up; } @@ -653,10 +663,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait) /* we will always be able to post a MAD send */ __poll_t mask = EPOLLOUT | EPOLLWRNORM; + mutex_lock(&file->mutex); poll_wait(filp, &file->recv_wait, wait); if (!list_empty(&file->recv_list)) mask |= EPOLLIN | EPOLLRDNORM; + if (file->agents_dead) + mask = EPOLLERR; + mutex_unlock(&file->mutex); return mask; } @@ -1336,6 +1350,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port) list_for_each_entry(file, &port->file_list, port_list) { mutex_lock(&file->mutex); file->agents_dead = 1; + wake_up_interruptible(&file->recv_wait); mutex_unlock(&file->mutex); for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index e2ddcb0dc4ee..c398d1a64614 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -757,6 +757,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->res.type = RDMA_RESTRACK_MR; + mr->iova = cmd.hca_va; rdma_restrack_uadd(&mr->res); uobj->object = mr; @@ -847,6 +848,9 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) atomic_dec(&old_pd->usecnt); } + if (cmd.flags & IB_MR_REREG_TRANS) + mr->iova = cmd.hca_va; + memset(&resp, 0, sizeof(resp)); resp.lkey = mr->lkey; resp.rkey = mr->rkey; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index f2a2d1246c19..adb08c3fc085 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -307,6 +307,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, spin_lock_irq(&ev_queue->lock); if (!list_empty(&ev_queue->event_list)) pollflags = EPOLLIN | EPOLLRDNORM; + else if (ev_queue->is_closed) + pollflags = EPOLLERR; spin_unlock_irq(&ev_queue->lock); return pollflags; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 6c4093d0a91d..5d896f6b2b61 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1648,7 +1648,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, if (!(rdma_protocol_ib(qp->device, attr->alt_ah_attr.port_num) && rdma_protocol_ib(qp->device, port))) { - ret = EINVAL; + ret = -EINVAL; goto out; } } @@ -1749,7 +1749,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) dev_put(netdev); - if (!rc) { + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { netdev_speed = lksettings.base.speed; } else { netdev_speed = SPEED_1000; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ebc3e3d4a6e2..58c021648b7c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1793,6 +1793,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, goto out; } qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); + qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state); qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); qp_attr->pkey_index = qplib_qp->pkey_index; @@ -2973,6 +2974,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, wc->wc_flags |= IB_WC_GRH; } +static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, + u16 vlan_id) +{ + /* + * Check if the vlan is configured in the host. If not configured, it + * can be a transparent VLAN. So dont report the vlan id. + */ + if (!__vlan_find_dev_deep_rcu(rdev->netdev, + htons(ETH_P_8021Q), vlan_id)) + return false; + return true; +} + static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, u16 *vid, u8 *sl) { @@ -3041,9 +3055,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { - wc->vlan_id = vlan_id; - wc->sl = sl; - wc->wc_flags |= IB_WC_WITH_VLAN; + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->sl = sl; + wc->wc_flags |= IB_WC_WITH_VLAN; + } } wc->port_num = 1; wc->vendor_err = orig_cqe->status; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 27e2df44d043..cfe5f47d9890 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -789,7 +789,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index bdbde8e22420..4041b35ea3da 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -736,6 +736,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, unmap_io: pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); + dpit->dbr_bar_reg_iomem = NULL; return -ENOMEM; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 40296b97d21e..079aaaaffec7 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -152,7 +152,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); - attr->max_sgid = le32_to_cpu(sb->max_gid); + attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 13d9432d5ce2..194f5ef45ca6 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -47,6 +47,7 @@ struct bnxt_qplib_dev_attr { #define FW_VER_ARR_LEN 4 u8 fw_ver[FW_VER_ARR_LEN]; +#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256 u16 max_sgid; u16 max_mrw; u32 max_qp; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index d82e0589cfd2..535ee41ee421 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) srqidx = ABORT_RSS_SRQIDX_G( be32_to_cpu(req->srqidx_status)); if (srqidx) { - complete_cached_srq_buffers(ep, - req->srqidx_status); + complete_cached_srq_buffers(ep, srqidx); } else { /* Hold ep ref until finish_peer_abort() */ c4iw_get_ep(&ep->com); @@ -3383,7 +3382,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { err = pick_local_ipaddrs(dev, cm_id); if (err) - goto fail2; + goto fail3; } /* find a route */ @@ -3405,7 +3404,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { err = pick_local_ip6addrs(dev, cm_id); if (err) - goto fail2; + goto fail3; } /* find a route */ @@ -3611,13 +3610,14 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) ep->com.local_addr.ss_family == AF_INET) { err = cxgb4_remove_server_filter( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], false); } else { struct sockaddr_in6 *sin6; c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_remove_server( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], 0); + ep->com.dev->rdev.lldi.rxq_ids[0], + ep->com.local_addr.ss_family == AF_INET6); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, @@ -3878,8 +3878,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) return 0; } - ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W, - TCB_RQ_START_S); + ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M, + TCB_RQ_START_S); cleanup: pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index b1bb61c65f4f..16b74591a68d 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -1007,6 +1007,9 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (attr->flags) return -EINVAL; + if (entries < 1 || entries > ibdev->attrs.max_cqe) + return -EINVAL; + if (vector >= rhp->rdev.lldi.nciq) return -EINVAL; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 599340c1f0b8..541dbcf22d0e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -953,6 +953,7 @@ void c4iw_dealloc(struct uld_ctx *ctx) static void c4iw_remove(struct uld_ctx *ctx) { pr_debug("c4iw_dev %p\n", ctx->dev); + debugfs_remove_recursive(ctx->dev->debugfs_root); c4iw_unregister_device(ctx->dev); c4iw_dealloc(ctx); } diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 35c284af574d..dcb58cef336d 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -399,7 +399,6 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) mmid = stag >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; mhp->ibmr.length = mhp->attr.len; - mhp->ibmr.iova = mhp->attr.va_fbo; mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12); pr_debug("mmid 0x%x mhp %p\n", mmid, mhp); return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 89ac2f9ae6dd..e7472f0da59d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2471,7 +2471,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; - init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; + init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index 5c95c789f302..e800e8e8bed5 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c @@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->cqids); - for (i = qid; i & rdev->qpmask; i++) { + for (i = qid + 1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out; diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index d268bf9c42ee..c29da2f4e339 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int i2c1_debugfs_open(struct inode *in, struct file *fp) @@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } @@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp) static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int qsfp1_debugfs_open(struct inode *in, struct file *fp) @@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index 2b57ba70ddd6..c09080712485 100644 --- a/drivers/infiniband/hw/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c @@ -1924,6 +1924,7 @@ int parse_platform_config(struct hfi1_devdata *dd) dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n", __func__, (ptr - (u32 *)dd->platform_config.data)); + ret = -EINVAL; goto bail; } /* Jump the CRC DWORD */ diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 26b792bb1027..fbff6b2f00e7 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -844,6 +844,29 @@ wq_error: return -ENOMEM; } +/** + * destroy_workqueues - destroy per port workqueues + * @dd: the hfi1_ib device + */ +static void destroy_workqueues(struct hfi1_devdata *dd) +{ + int pidx; + struct hfi1_pportdata *ppd; + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + + if (ppd->hfi1_wq) { + destroy_workqueue(ppd->hfi1_wq); + ppd->hfi1_wq = NULL; + } + if (ppd->link_wq) { + destroy_workqueue(ppd->link_wq); + ppd->link_wq = NULL; + } + } +} + /** * enable_general_intr() - Enable the IRQs that will be handled by the * general interrupt handler. @@ -1117,15 +1140,10 @@ static void shutdown_device(struct hfi1_devdata *dd) * We can't count on interrupts since we are stopping. */ hfi1_quiet_serdes(ppd); - - if (ppd->hfi1_wq) { - destroy_workqueue(ppd->hfi1_wq); - ppd->hfi1_wq = NULL; - } - if (ppd->link_wq) { - destroy_workqueue(ppd->link_wq); - ppd->link_wq = NULL; - } + if (ppd->hfi1_wq) + flush_workqueue(ppd->hfi1_wq); + if (ppd->link_wq) + flush_workqueue(ppd->link_wq); } sdma_exit(dd); } @@ -1814,6 +1832,7 @@ static void remove_one(struct pci_dev *pdev) * clear dma engines, etc. */ shutdown_device(dd); + destroy_workqueues(dd); stop_timers(dd); diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index f8e733aa3bb8..acd4400b0092 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -381,7 +381,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if (dd->flags & HFI1_SHUTDOWN) + return true; return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 8a2e0d9351e9..c018fc633cca 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: switch (prev->wr.opcode) { case IB_WR_TID_RDMA_WRITE: req = wqe_to_tid_req(prev); @@ -5406,7 +5407,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if ((dd->flags & HFI1_SHUTDOWN)) + return true; return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 13e4203497b3..a92346e88628 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); pq->state = SDMA_PKT_Q_ACTIVE; - /* Send the first N packets in the request to buy us some time */ - ret = user_sdma_send_pkts(req, pcount); - if (unlikely(ret < 0 && ret != -EBUSY)) - goto free_req; /* * This is a somewhat blocking send implementation. diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index e36d31569081..3e68ba9dab45 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -657,7 +657,7 @@ struct hns_roce_qp { u8 rdb_en; u8 sdb_en; u32 doorbell_qpn; - u32 sq_signal_bits; + enum ib_sig_type sq_signal_bits; struct hns_roce_wq sq; struct ib_umem *umem; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index a79fa67df871..a405c64d2a82 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; break; case IB_WR_LOCAL_INV: - break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_LSO: diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 4540b00ccee9..d01e3222c00c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1009,7 +1009,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, u32 timeout = 0; int handle = 0; u16 desc_ret; - int ret = 0; + int ret; int ntc; spin_lock_bh(&csq->lock); @@ -1054,15 +1054,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, if (hns_roce_cmq_csq_done(hr_dev)) { complete = true; handle = 0; + ret = 0; while (handle < num) { /* get the result of hardware write back */ desc_to_use = &csq->desc[ntc]; desc[handle] = *desc_to_use; dev_dbg(hr_dev->dev, "Get cmq desc:\n"); desc_ret = le16_to_cpu(desc[handle].retval); - if (desc_ret == CMD_EXEC_SUCCESS) - ret = 0; - else + if (unlikely(desc_ret != CMD_EXEC_SUCCESS)) ret = -EIO; priv->cmq.last_status = desc_ret; ntc++; @@ -1349,34 +1348,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) { struct hns_roce_pf_timer_res_a *req_a; - struct hns_roce_cmq_desc desc[2]; - int ret, i; + struct hns_roce_cmq_desc desc; + int ret; - for (i = 0; i < 2; i++) { - hns_roce_cmq_setup_basic_desc(&desc[i], - HNS_ROCE_OPC_QUERY_PF_TIMER_RES, - true); + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES, + true); - if (i == 0) - desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - else - desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - } - - ret = hns_roce_cmq_send(hr_dev, desc, 2); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; - req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data; + req_a = (struct hns_roce_pf_timer_res_a *)desc.data; hr_dev->caps.qpc_timer_bt_num = - roce_get_field(req_a->qpc_timer_bt_idx_num, - PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, - PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); + roce_get_field(req_a->qpc_timer_bt_idx_num, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); hr_dev->caps.cqc_timer_bt_num = - roce_get_field(req_a->cqc_timer_bt_idx_num, - PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, - PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); + roce_get_field(req_a->cqc_timer_bt_idx_num, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); return 0; } @@ -2431,6 +2422,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1); @@ -4564,7 +4556,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->path_mig_state = IB_MIG_ARMED; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; if (hr_qp->ibqp.qp_type == IB_QPT_UD) - qp_attr->qkey = V2_QKEY_VAL; + qp_attr->qkey = le32_to_cpu(context.qkey_xrcd); qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_EPSN_M, @@ -4622,9 +4614,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S); qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn, - V2_QPC_BYTE_212_RETRY_CNT_M, - V2_QPC_BYTE_212_RETRY_CNT_S); - qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer); + V2_QPC_BYTE_212_RETRY_NUM_INIT_M, + V2_QPC_BYTE_212_RETRY_NUM_INIT_S); + qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_NUM_INIT_M, + V2_QPC_BYTE_244_RNR_NUM_INIT_S); done: qp_attr->cur_qp_state = qp_attr->qp_state; @@ -4640,6 +4634,7 @@ done: } qp_init_attr->cap = qp_attr->cap; + qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits; out: mutex_unlock(&hr_qp->mutex); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index b5d196c119ee..f23a341400c0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -848,8 +848,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) return 0; err_qp_table_free: - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) - hns_roce_cleanup_qp_table(hr_dev); + hns_roce_cleanup_qp_table(hr_dev); err_cq_table_free: hns_roce_cleanup_cq_table(hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 8dd2d666f687..730e50c87a76 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1181,8 +1181,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, mutex_lock(&hr_qp->mutex); - cur_state = attr_mask & IB_QP_CUR_STATE ? - attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; + if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) + goto out; + + cur_state = hr_qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (ibqp->uobject && diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 8feec35f95a7..6d6719fa7e46 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -398,8 +398,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp) } /* i40iw.c */ -void i40iw_add_ref(struct ib_qp *); -void i40iw_rem_ref(struct ib_qp *); +void i40iw_qp_add_ref(struct ib_qp *ibqp); +void i40iw_qp_rem_ref(struct ib_qp *ibqp); struct ib_qp *i40iw_get_qp(struct ib_device *, int); void i40iw_flush_wqes(struct i40iw_device *iwdev, @@ -543,9 +543,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev, bool wait); void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf); void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp); -void i40iw_free_qp_resources(struct i40iw_device *iwdev, - struct i40iw_qp *iwqp, - u32 qp_num); +void i40iw_free_qp_resources(struct i40iw_qp *iwqp); + enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, struct i40iw_dma_mem *memptr, u32 size, u32 mask); diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 2d6a378e8560..56c1e9abc52d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, struct rtable *rt; struct neighbour *neigh; int rc = arpindex; - struct net_device *netdev = iwdev->netdev; __be32 dst_ipaddr = htonl(dst_ip); __be32 src_ipaddr = htonl(src_ip); @@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, return rc; } - if (netif_is_bond_slave(netdev)) - netdev = netdev_master_upper_dev_get(netdev); - neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); rcu_read_lock(); @@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, { struct neighbour *neigh; int rc = arpindex; - struct net_device *netdev = iwdev->netdev; struct dst_entry *dst; struct sockaddr_in6 dst_addr; struct sockaddr_in6 src_addr; @@ -2079,16 +2074,13 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); if (!dst || dst->error) { if (dst) { - dst_release(dst); i40iw_pr_err("ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return rc; } - if (netif_is_bond_slave(netdev)) - netdev = netdev_master_upper_dev_get(netdev); - neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32); rcu_read_lock(); @@ -2330,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) iwqp = cm_node->iwqp; if (iwqp) { iwqp->cm_node = NULL; - i40iw_rem_ref(&iwqp->ibqp); + i40iw_qp_rem_ref(&iwqp->ibqp); cm_node->iwqp = NULL; } else if (cm_node->qhash_set) { i40iw_get_addr_info(cm_node, &nfo); @@ -3460,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp) kfree(work); return; } - i40iw_add_ref(&iwqp->ibqp); + i40iw_qp_add_ref(&iwqp->ibqp); spin_unlock_irqrestore(&iwdev->qptable_lock, flags); work->iwqp = iwqp; @@ -3631,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work) kfree(dwork); i40iw_cm_disconn_true(iwqp); - i40iw_rem_ref(&iwqp->ibqp); + i40iw_qp_rem_ref(&iwqp->ibqp); } /** @@ -3753,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_node->lsmm_size = accept.size + conn_param->private_data_len; i40iw_cm_init_tsa_conn(iwqp, cm_node); cm_id->add_ref(cm_id); - i40iw_add_ref(&iwqp->ibqp); + i40iw_qp_add_ref(&iwqp->ibqp); attr.qp_state = IB_QPS_RTS; cm_node->qhash_set = false; @@ -3916,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) iwqp->cm_node = cm_node; cm_node->iwqp = iwqp; iwqp->cm_id = cm_id; - i40iw_add_ref(&iwqp->ibqp); + i40iw_qp_add_ref(&iwqp->ibqp); if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { cm_node->state = I40IW_CM_STATE_SYN_SENT; diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index 55a1fbf0e670..a7512508f7e6 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) __func__, info->qp_cq_id); continue; } - i40iw_add_ref(&iwqp->ibqp); + i40iw_qp_add_ref(&iwqp->ibqp); spin_unlock_irqrestore(&iwdev->qptable_lock, flags); qp = &iwqp->sc_qp; spin_lock_irqsave(&iwqp->lock, flags); @@ -427,7 +427,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) break; } if (info->qp) - i40iw_rem_ref(&iwqp->ibqp); + i40iw_qp_rem_ref(&iwqp->ibqp); } while (1); if (aeqcnt) @@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev, int arp_index; arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action); - if (arp_index == -1) + if (arp_index < 0) return; cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); if (!cqp_request) diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 238614370927..f1b0290da92d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -54,10 +54,6 @@ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD) -static int push_mode; -module_param(push_mode, int, 0644); -MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)"); - static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all"); @@ -1588,7 +1584,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, if (status) goto exit; iwdev->obj_next = iwdev->obj_mem; - iwdev->push_mode = push_mode; init_waitqueue_head(&iwdev->vchnl_waitq); init_waitqueue_head(&dev->vf_reqs); diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c index 540aab5e502d..3fafc5424e76 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_pble.c +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c @@ -392,12 +392,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n", pble_rsrc->next_fpm_addr, chunk->size, chunk->size); pble_rsrc->unallocated_pble -= (chunk->size >> 3); - list_add(&chunk->list, &pble_rsrc->pinfo.clist); sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ? sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; - if (sd_entry->valid) - return 0; - if (dev->is_pf) { + if (dev->is_pf && !sd_entry->valid) { ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val, idx->sd_idx, sd_entry->entry_type, true); @@ -408,6 +405,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, } sd_entry->valid = true; + list_add(&chunk->list, &pble_rsrc->pinfo.clist); return 0; error: kfree(chunk); diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 016524683e17..72db7c1dc299 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -479,25 +479,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev) } } -/** - * i40iw_free_qp - callback after destroy cqp completes - * @cqp_request: cqp request for destroy qp - * @num: not used - */ -static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num) -{ - struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param; - struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; - struct i40iw_device *iwdev; - u32 qp_num = iwqp->ibqp.qp_num; - - iwdev = iwqp->iwdev; - - i40iw_rem_pdusecount(iwqp->iwpd, iwdev); - i40iw_free_qp_resources(iwdev, iwqp, qp_num); - i40iw_rem_devusecount(iwdev); -} - /** * i40iw_wait_event - wait for completion * @iwdev: iwarp device @@ -618,26 +599,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev) } /** - * i40iw_add_ref - add refcount for qp + * i40iw_qp_add_ref - add refcount for qp * @ibqp: iqarp qp */ -void i40iw_add_ref(struct ib_qp *ibqp) +void i40iw_qp_add_ref(struct ib_qp *ibqp) { struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp; - atomic_inc(&iwqp->refcount); + refcount_inc(&iwqp->refcount); } /** - * i40iw_rem_ref - rem refcount for qp and free if 0 + * i40iw_qp_rem_ref - rem refcount for qp and free if 0 * @ibqp: iqarp qp */ -void i40iw_rem_ref(struct ib_qp *ibqp) +void i40iw_qp_rem_ref(struct ib_qp *ibqp) { struct i40iw_qp *iwqp; - enum i40iw_status_code status; - struct i40iw_cqp_request *cqp_request; - struct cqp_commands_info *cqp_info; struct i40iw_device *iwdev; u32 qp_num; unsigned long flags; @@ -645,7 +623,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp) iwqp = to_iwqp(ibqp); iwdev = iwqp->iwdev; spin_lock_irqsave(&iwdev->qptable_lock, flags); - if (!atomic_dec_and_test(&iwqp->refcount)) { + if (!refcount_dec_and_test(&iwqp->refcount)) { spin_unlock_irqrestore(&iwdev->qptable_lock, flags); return; } @@ -653,25 +631,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp) qp_num = iwqp->ibqp.qp_num; iwdev->qp_table[qp_num] = NULL; spin_unlock_irqrestore(&iwdev->qptable_lock, flags); - cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); - if (!cqp_request) - return; + complete(&iwqp->free_qp); - cqp_request->callback_fcn = i40iw_free_qp; - cqp_request->param = (void *)&iwqp->sc_qp; - cqp_info = &cqp_request->info; - cqp_info->cqp_cmd = OP_QP_DESTROY; - cqp_info->post_sq = 1; - cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp; - cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; - cqp_info->in.u.qp_destroy.remove_hash_idx = true; - status = i40iw_handle_cqp_op(iwdev, cqp_request); - if (!status) - return; - - i40iw_rem_pdusecount(iwqp->iwpd, iwdev); - i40iw_free_qp_resources(iwdev, iwqp, qp_num); - i40iw_rem_devusecount(iwdev); } /** @@ -938,7 +899,7 @@ static void i40iw_terminate_timeout(struct timer_list *t) struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp; i40iw_terminate_done(qp, 1); - i40iw_rem_ref(&iwqp->ibqp); + i40iw_qp_rem_ref(&iwqp->ibqp); } /** @@ -950,7 +911,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp) struct i40iw_qp *iwqp; iwqp = (struct i40iw_qp *)qp->back_qp; - i40iw_add_ref(&iwqp->ibqp); + i40iw_qp_add_ref(&iwqp->ibqp); timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0); iwqp->terminate_timer.expires = jiffies + HZ; add_timer(&iwqp->terminate_timer); @@ -966,7 +927,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp) iwqp = (struct i40iw_qp *)qp->back_qp; if (del_timer(&iwqp->terminate_timer)) - i40iw_rem_ref(&iwqp->ibqp); + i40iw_qp_rem_ref(&iwqp->ibqp); } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index cd9ee1664a69..7e9c1a40f040 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -168,38 +168,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context) */ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { - struct i40iw_ucontext *ucontext; - u64 db_addr_offset; - u64 push_offset; + struct i40iw_ucontext *ucontext = to_ucontext(context); + u64 dbaddr; - ucontext = to_ucontext(context); - if (ucontext->iwdev->sc_dev.is_pf) { - db_addr_offset = I40IW_DB_ADDR_OFFSET; - push_offset = I40IW_PUSH_OFFSET; - if (vma->vm_pgoff) - vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1; - } else { - db_addr_offset = I40IW_VF_DB_ADDR_OFFSET; - push_offset = I40IW_VF_PUSH_OFFSET; - if (vma->vm_pgoff) - vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1; - } + if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; - vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT; + dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0); - if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_private_data = ucontext; - } else { - if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - else - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - } - - if (io_remap_pfn_range(vma, vma->vm_start, - vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), - PAGE_SIZE, vma->vm_page_prot)) + if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot))) return -EAGAIN; return 0; @@ -366,11 +344,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va, * @iwqp: qp ptr (user or kernel) * @qp_num: qp number assigned */ -void i40iw_free_qp_resources(struct i40iw_device *iwdev, - struct i40iw_qp *iwqp, - u32 qp_num) +void i40iw_free_qp_resources(struct i40iw_qp *iwqp) { struct i40iw_pbl *iwpbl = &iwqp->iwpbl; + struct i40iw_device *iwdev = iwqp->iwdev; + u32 qp_num = iwqp->ibqp.qp_num; i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); @@ -404,6 +382,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq) static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct i40iw_qp *iwqp = to_iwqp(ibqp); + struct ib_qp_attr attr; + struct i40iw_device *iwdev = iwqp->iwdev; + + memset(&attr, 0, sizeof(attr)); iwqp->destroyed = 1; @@ -418,7 +400,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) } } - i40iw_rem_ref(&iwqp->ibqp); + attr.qp_state = IB_QPS_ERR; + i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + i40iw_qp_rem_ref(&iwqp->ibqp); + wait_for_completion(&iwqp->free_qp); + i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp); + i40iw_rem_pdusecount(iwqp->iwpd, iwdev); + i40iw_free_qp_resources(iwqp); + i40iw_rem_devusecount(iwdev); + return 0; } @@ -579,6 +569,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, qp->back_qp = (void *)iwqp; qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; + iwqp->iwdev = iwdev; iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info; if (i40iw_allocate_dma_mem(dev->hw, @@ -603,7 +594,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, goto error; } - iwqp->iwdev = iwdev; iwqp->iwpd = iwpd; iwqp->ibqp.qp_num = qp_num; qp = &iwqp->sc_qp; @@ -717,7 +707,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, goto error; } - i40iw_add_ref(&iwqp->ibqp); + refcount_set(&iwqp->refcount, 1); spin_lock_init(&iwqp->lock); iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; iwdev->qp_table[qp_num] = iwqp; @@ -739,10 +729,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, } init_completion(&iwqp->sq_drained); init_completion(&iwqp->rq_drained); + init_completion(&iwqp->free_qp); return &iwqp->ibqp; error: - i40iw_free_qp_resources(iwdev, iwqp, qp_num); + i40iw_free_qp_resources(iwqp); return ERR_PTR(err_code); } @@ -2654,13 +2645,13 @@ static const struct ib_device_ops i40iw_dev_ops = { .get_hw_stats = i40iw_get_hw_stats, .get_port_immutable = i40iw_port_immutable, .iw_accept = i40iw_accept, - .iw_add_ref = i40iw_add_ref, + .iw_add_ref = i40iw_qp_add_ref, .iw_connect = i40iw_connect, .iw_create_listen = i40iw_create_listen, .iw_destroy_listen = i40iw_destroy_listen, .iw_get_qp = i40iw_get_qp, .iw_reject = i40iw_reject, - .iw_rem_ref = i40iw_rem_ref, + .iw_rem_ref = i40iw_qp_rem_ref, .map_mr_sg = i40iw_map_mr_sg, .mmap = i40iw_mmap, .modify_qp = i40iw_modify_qp, diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h index 3a413752ccc3..ad7d81041bc9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h @@ -140,7 +140,7 @@ struct i40iw_qp { struct i40iw_qp_host_ctx_info ctx_info; struct i40iwarp_offload_info iwarp_info; void *allocated_buffer; - atomic_t refcount; + refcount_t refcount; struct iw_cm_id *cm_id; void *cm_node; struct ib_mr *lsmm_mr; @@ -175,5 +175,6 @@ struct i40iw_qp { struct i40iw_dma_mem ietf_mem; struct completion sq_drained; struct completion rq_drained; + struct completion free_qp; }; #endif diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index b591861934b3..81d6a3460b55 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) if (!sriov->is_going_down && !id->scheduled_delete) { id->scheduled_delete = 1; schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); + } else if (id->scheduled_delete) { + /* Adjust timeout if already scheduled */ + mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); } spin_unlock_irqrestore(&sriov->going_down_lock, flags); spin_unlock(&sriov->id_map_lock); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 57079110af9b..08eccf2b6967 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1307,6 +1307,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } +static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg) +{ + unsigned long flags; + struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; + struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); + + spin_lock_irqsave(&dev->sriov.going_down_lock, flags); + if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) + queue_work(ctx->wi_wq, &ctx->work); + spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); +} + static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, struct mlx4_ib_demux_pv_qp *tun_qp, int index) @@ -2009,7 +2021,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, cq_size *= 2; cq_attr.cqe = cq_size; - ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, + ctx->cq = ib_create_cq(ctx->ib_dev, + create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler, NULL, ctx, &cq_attr); if (IS_ERR(ctx->cq)) { ret = PTR_ERR(ctx->cq); @@ -2046,6 +2059,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; + ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); if (ret) { @@ -2189,7 +2203,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, goto err_mcg; } - snprintf(name, sizeof name, "mlx4_ibt%d", port); + snprintf(name, sizeof(name), "mlx4_ibt%d", port); ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!ctx->wq) { pr_err("Failed to create tunnelling WQ for port %d\n", port); @@ -2197,7 +2211,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, goto err_wq; } - snprintf(name, sizeof name, "mlx4_ibud%d", port); + snprintf(name, sizeof(name), "mlx4_ibwi%d", port); + ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + if (!ctx->wi_wq) { + pr_err("Failed to create wire WQ for port %d\n", port); + ret = -ENOMEM; + goto err_wiwq; + } + + snprintf(name, sizeof(name), "mlx4_ibud%d", port); ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); if (!ctx->ud_wq) { pr_err("Failed to create up/down WQ for port %d\n", port); @@ -2208,6 +2230,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, return 0; err_udwq: + destroy_workqueue(ctx->wi_wq); + ctx->wi_wq = NULL; + +err_wiwq: destroy_workqueue(ctx->wq); ctx->wq = NULL; @@ -2255,12 +2281,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; } flush_workqueue(ctx->wq); + flush_workqueue(ctx->wi_wq); for (i = 0; i < dev->dev->caps.sqp_demux; i++) { destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); free_pv_object(dev, i, ctx->port); } kfree(ctx->tun); destroy_workqueue(ctx->ud_wq); + destroy_workqueue(ctx->wi_wq); destroy_workqueue(ctx->wq); } } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 369a203332a2..b8274c6fc43e 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -781,7 +781,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->ip_gids = true; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; - props->pkey_tbl_len = 1; + if (mdev->dev->caps.pkey_table_len[port]) + props->pkey_tbl_len = 1; props->max_mtu = IB_MTU_4096; props->max_vl_num = 2; props->state = IB_PORT_DOWN; @@ -1492,8 +1493,9 @@ static int __mlx4_ib_create_default_rules( int i; for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { + union ib_flow_spec ib_spec = {}; int ret; - union ib_flow_spec ib_spec; + switch (pdefault_rules->rules_create_list[i]) { case 0: /* no rule */ diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index eb53bb4c0c91..0173e3931cc7 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -459,6 +459,7 @@ struct mlx4_ib_demux_pv_ctx { struct ib_pd *pd; struct work_struct work; struct workqueue_struct *wq; + struct workqueue_struct *wi_wq; struct mlx4_ib_demux_pv_qp qp[2]; }; @@ -466,6 +467,7 @@ struct mlx4_ib_demux_ctx { struct ib_device *ib_dev; int port; struct workqueue_struct *wq; + struct workqueue_struct *wi_wq; struct workqueue_struct *ud_wq; spinlock_t ud_lock; atomic64_t subnet_prefix; diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 6ae503cfc526..9114cb730769 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->ibmr.length = length; - mr->ibmr.iova = virt_addr; mr->ibmr.page_size = 1U << shift; return &mr->ibmr; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index bd4aa04416c6..6e2b3e2f83f1 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, int send_size; int header_size; int spc; + int err; int i; if (wr->wr.opcode != IB_WR_SEND) @@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); - ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + if (err) + return err; sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); @@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr, } sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) - ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, + &pkey); else - ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, + &pkey); + if (err) + return err; + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index ff664355de55..73d5b8dc74d8 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -167,7 +167,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, { enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); - struct mlx5_ib_srq *srq; + struct mlx5_ib_srq *srq = NULL; struct mlx5_ib_wq *wq; u16 wqe_ctr; u8 roce_packet_type; @@ -179,7 +179,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, if (qp->ibqp.xrcd) { msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); - srq = to_mibsrq(msrq); + if (msrq) + srq = to_mibsrq(msrq); } else { srq = to_msrq(qp->ibqp.srq); } diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index d609f4659afb..664e0f374ac0 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -489,6 +489,10 @@ static u64 devx_get_obj_id(const void *in) obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, MLX5_GET(rst2init_qp_in, in, qpn)); break; + case MLX5_CMD_OP_INIT2INIT_QP: + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, + MLX5_GET(init2init_qp_in, in, qpn)); + break; case MLX5_CMD_OP_INIT2RTR_QP: obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, MLX5_GET(init2rtr_qp_in, in, qpn)); @@ -814,6 +818,7 @@ static bool devx_is_obj_modify_cmd(const void *in) case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_RST2INIT_QP: case MLX5_CMD_OP_INIT2RTR_QP: + case MLX5_CMD_OP_INIT2INIT_QP: case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP: @@ -1113,7 +1118,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT); break; case MLX5_CMD_OP_CREATE_TIR: - MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR); + *obj_id = MLX5_GET(create_tir_out, out, tirn); + MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR); + MLX5_SET(destroy_tir_in, din, tirn, *obj_id); break; case MLX5_CMD_OP_CREATE_TIS: MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS); @@ -2015,8 +2022,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( num_alloc_xa_entries++; event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL); - if (!event_sub) + if (!event_sub) { + err = -ENOMEM; goto err; + } list_add_tail(&event_sub->event_list, &sub_list); if (use_eventfd) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4f44a731a48e..e2656b68ec22 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -517,7 +517,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, mdev_port_num); if (err) goto out; - ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); props->active_width = IB_WIDTH_4X; @@ -888,7 +888,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, /* We support 'Gappy' memory registration too */ props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; } - props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + /* IB_WR_REG_MR always requires changing the entity size with UMR */ + if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; if (MLX5_CAP_GEN(mdev, sho)) { props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; /* At this stage no support for signature handover */ @@ -6171,7 +6173,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) err = set_has_smi_cap(dev); if (err) - return err; + goto err_mp; if (!mlx5_core_mp_enabled(mdev)) { for (i = 1; i <= dev->num_ports; i++) { @@ -6211,8 +6213,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) err_mp: mlx5_ib_cleanup_multiport_master(dev); - - return -ENOMEM; + return err; } static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev) @@ -6624,7 +6625,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); if (err) - mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); + mlx5_free_bfreg(dev->mdev, &dev->bfreg); return err; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 881decb1309a..09e29c6cb66d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1463,6 +1463,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u16 uid = to_mpd(pd)->uid; u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; + if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt) + return -EINVAL; if (qp->sq.wqe_cnt) { err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); if (err) @@ -5496,7 +5498,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); rdma_ah_set_static_rate(ah_attr, path->static_rate ? path->static_rate - 5 : 0); - if (path->grh_mlid & (1 << 7)) { + + if (path->grh_mlid & (1 << 7) || + ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); rdma_ah_set_grh(ah_attr, NULL, diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 4e7fde86c96b..c29c1f7da4a1 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -310,12 +310,18 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq, srq->msrq.event = mlx5_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; - if (udata) - if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { + if (udata) { + struct mlx5_ib_create_srq_resp resp = { + .srqn = srq->msrq.srqn, + }; + + if (ib_copy_to_udata(udata, &resp, min(udata->outlen, + sizeof(resp)))) { mlx5_ib_dbg(dev, "copy to user failed\n"); err = -EFAULT; goto err_core; } + } init_attr->attr.max_wr = srq->msrq.max - 1; diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index 8fc3630a9d4c..0224231a2e6f 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_core_srq *srq; - xa_lock(&table->array); + xa_lock_irq(&table->array); srq = xa_load(&table->array, srqn); if (srq) refcount_inc(&srq->common.refcount); - xa_unlock(&table->array); + xa_unlock_irq(&table->array); return srq; } diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index c3cfea243af8..26c3408dcaca 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -604,7 +604,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev, entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; break; default: - entry->opcode = MTHCA_OPCODE_INVALID; + entry->opcode = 0xFF; break; } } else { @@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); - if (IS_ERR(mailbox)) + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); goto err_out_arm; + } cq_context = mailbox->buf; @@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, } spin_lock_irq(&dev->cq_table.lock); - if (mthca_array_set(&dev->cq_table.cq, - cq->cqn & (dev->limits.num_cqs - 1), - cq)) { + err = mthca_array_set(&dev->cq_table.cq, + cq->cqn & (dev->limits.num_cqs - 1), cq); + if (err) { spin_unlock_irq(&dev->cq_table.lock); goto err_out_free_mr; } diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index bfd4eebc1182..58d46449b0e8 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -105,7 +105,6 @@ enum { MTHCA_OPCODE_ATOMIC_CS = 0x11, MTHCA_OPCODE_ATOMIC_FA = 0x12, MTHCA_OPCODE_BIND_MW = 0x18, - MTHCA_OPCODE_INVALID = 0xff }; enum { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index e8267e590772..55bd8873da46 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -442,9 +442,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) pr_err("%s(%d) Freeing in use pdid=0x%x.\n", __func__, dev->id, pd->id); } - kfree(uctx->cntxt_pd); uctx->cntxt_pd = NULL; _ocrdma_dealloc_pd(dev, pd); + kfree(pd); } static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index b462eaca1ee3..93040c994e2e 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -360,7 +360,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ); if (IS_IWARP(dev)) { - xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ); + xa_init(&dev->qps); dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); } @@ -601,7 +601,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); /* Part 2 - check capabilities */ - page_size = ~dev->attr.page_size_caps + 1; + page_size = ~qed_attr->page_size_caps + 1; if (page_size > PAGE_SIZE) { DP_ERR(dev, "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 0cfd849b13d6..ed56df319d2d 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -40,6 +40,7 @@ #include <linux/qed/qed_rdma_if.h> #include <linux/qed/qede_rdma.h> #include <linux/qed/roce_common.h> +#include <linux/completion.h> #include "qedr_hsi_rdma.h" #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" @@ -348,10 +349,10 @@ struct qedr_srq_hwq_info { u32 wqe_prod; u32 sge_prod; u32 wr_prod_cnt; - u32 wr_cons_cnt; + atomic_t wr_cons_cnt; u32 num_elems; - u32 *virt_prod_pair_addr; + struct rdma_srq_producers *virt_prod_pair_addr; dma_addr_t phy_prod_pair_addr; }; @@ -377,10 +378,20 @@ enum qedr_qp_err_bitmap { QEDR_QP_ERR_RQ_PBL_FULL = 32, }; +enum qedr_qp_create_type { + QEDR_QP_CREATE_NONE, + QEDR_QP_CREATE_USER, + QEDR_QP_CREATE_KERNEL, +}; + +enum qedr_iwarp_cm_flags { + QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0), + QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1), +}; + struct qedr_qp { struct ib_qp ibqp; /* must be first */ struct qedr_dev *dev; - struct qedr_iw_ep *ep; struct qedr_qp_hwq_info sq; struct qedr_qp_hwq_info rq; @@ -395,6 +406,7 @@ struct qedr_qp { u32 id; struct qedr_pd *pd; enum ib_qp_type qp_type; + enum qedr_qp_create_type create_type; struct qed_rdma_qp *qed_qp; u32 qp_id; u16 icid; @@ -437,8 +449,11 @@ struct qedr_qp { /* Relevant to qps created from user space only (applications) */ struct qedr_userq usq; struct qedr_userq urq; - atomic_t refcnt; - bool destroyed; + + /* synchronization objects used with iwarp ep */ + struct kref refcnt; + struct completion iwarp_cm_comp; + unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */ }; struct qedr_ah { @@ -531,7 +546,7 @@ struct qedr_iw_ep { struct iw_cm_id *cm_id; struct qedr_qp *qp; void *qed_context; - u8 during_connect; + struct kref refcnt; }; static inline diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 22881d4442b9..b9b46a0c4803 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info, } } +static void qedr_iw_free_qp(struct kref *ref) +{ + struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt); + + kfree(qp); +} + +static void +qedr_iw_free_ep(struct kref *ref) +{ + struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt); + + if (ep->qp) + kref_put(&ep->qp->refcnt, qedr_iw_free_qp); + + if (ep->cm_id) + ep->cm_id->rem_ref(ep->cm_id); + + kfree(ep); +} + static void qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params) { @@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params) ep->dev = dev; ep->qed_context = params->ep_context; + kref_init(&ep->refcnt); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REQUEST; @@ -128,8 +150,17 @@ qedr_iw_issue_event(void *context, if (params->cm_info) { event.ird = params->cm_info->ird; event.ord = params->cm_info->ord; - event.private_data_len = params->cm_info->private_data_len; - event.private_data = (void *)params->cm_info->private_data; + /* Only connect_request and reply have valid private data + * the rest of the events this may be left overs from + * connection establishment. CONNECT_REQUEST is issued via + * qedr_iw_mpa_request + */ + if (event_type == IW_CM_EVENT_CONNECT_REPLY) { + event.private_data_len = + params->cm_info->private_data_len; + event.private_data = + (void *)params->cm_info->private_data; + } } if (ep->cm_id) @@ -141,12 +172,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params) { struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; - if (ep->cm_id) { + if (ep->cm_id) qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE); - ep->cm_id->rem_ref(ep->cm_id); - ep->cm_id = NULL; - } + kref_put(&ep->refcnt, qedr_iw_free_ep); } static void @@ -186,11 +215,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work) struct qedr_qp *qp = ep->qp; struct iw_cm_event event; - if (qp->destroyed) { - kfree(dwork); - qedr_iw_qp_rem_ref(&qp->ibqp); - return; - } + /* The qp won't be released until we release the ep. + * the ep's refcnt was increased before calling this + * function, therefore it is safe to access qp + */ + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT, + &qp->iwarp_cm_flags)) + goto out; memset(&event, 0, sizeof(event)); event.status = dwork->status; @@ -204,7 +235,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work) else qp_params.new_state = QED_ROCE_QP_STATE_SQD; - kfree(dwork); if (ep->cm_id) ep->cm_id->event_handler(ep->cm_id, &event); @@ -214,7 +244,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work) dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); - qedr_iw_qp_rem_ref(&qp->ibqp); + complete(&ep->qp->iwarp_cm_comp); +out: + kfree(dwork); + kref_put(&ep->refcnt, qedr_iw_free_ep); } static void @@ -224,13 +257,17 @@ qedr_iw_disconnect_event(void *context, struct qedr_discon_work *work; struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; struct qedr_dev *dev = ep->dev; - struct qedr_qp *qp = ep->qp; work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return; - qedr_iw_qp_add_ref(&qp->ibqp); + /* We can't get a close event before disconnect, but since + * we're scheduling a work queue we need to make sure close + * won't delete the ep, so we increase the refcnt + */ + kref_get(&ep->refcnt); + work->ep = ep; work->event = params->event; work->status = params->status; @@ -252,16 +289,30 @@ qedr_iw_passive_complete(void *context, if ((params->status == -ECONNREFUSED) && (!ep->qp)) { DP_DEBUG(dev, QEDR_MSG_IWARP, "PASSIVE connection refused releasing ep...\n"); - kfree(ep); + kref_put(&ep->refcnt, qedr_iw_free_ep); return; } + complete(&ep->qp->iwarp_cm_comp); qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED); if (params->status < 0) qedr_iw_close_event(context, params); } +static void +qedr_iw_active_complete(void *context, + struct qed_iwarp_cm_event_params *params) +{ + struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; + + complete(&ep->qp->iwarp_cm_comp); + qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY); + + if (params->status < 0) + kref_put(&ep->refcnt, qedr_iw_free_ep); +} + static int qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params) { @@ -288,27 +339,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params) qedr_iw_mpa_reply(context, params); break; case QED_IWARP_EVENT_PASSIVE_COMPLETE: - ep->during_connect = 0; qedr_iw_passive_complete(context, params); break; - case QED_IWARP_EVENT_ACTIVE_COMPLETE: - ep->during_connect = 0; - qedr_iw_issue_event(context, - params, - IW_CM_EVENT_CONNECT_REPLY); - if (params->status < 0) { - struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; - - ep->cm_id->rem_ref(ep->cm_id); - ep->cm_id = NULL; - } + qedr_iw_active_complete(context, params); break; case QED_IWARP_EVENT_DISCONNECT: qedr_iw_disconnect_event(context, params); break; case QED_IWARP_EVENT_CLOSE: - ep->during_connect = 0; qedr_iw_close_event(context, params); break; case QED_IWARP_EVENT_RQ_EMPTY: @@ -451,10 +490,10 @@ qedr_addr6_resolve(struct qedr_dev *dev, if ((!dst) || dst->error) { if (dst) { - dst_release(dst); DP_ERR(dev, "ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return -EINVAL; } @@ -476,6 +515,19 @@ qedr_addr6_resolve(struct qedr_dev *dev, return rc; } +struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn) +{ + struct qedr_qp *qp; + + xa_lock(&dev->qps); + qp = xa_load(&dev->qps, qpn); + if (qp) + kref_get(&qp->refcnt); + xa_unlock(&dev->qps); + + return qp; +} + int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct qedr_dev *dev = get_qedr_dev(cm_id->device); @@ -491,10 +543,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) int rc = 0; int i; - qp = xa_load(&dev->qps, conn_param->qpn); - if (unlikely(!qp)) - return -EINVAL; - laddr = (struct sockaddr_in *)&cm_id->m_local_addr; raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; @@ -516,8 +564,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) return -ENOMEM; ep->dev = dev; + kref_init(&ep->refcnt); + + qp = qedr_iw_load_qp(dev, conn_param->qpn); + if (!qp) { + rc = -EINVAL; + goto err; + } + ep->qp = qp; - qp->ep = ep; cm_id->add_ref(cm_id); ep->cm_id = cm_id; @@ -580,16 +635,22 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) in_params.qp = qp->qed_qp; memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN); - ep->during_connect = 1; + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, + &qp->iwarp_cm_flags)) { + rc = -ENODEV; + goto err; /* QP already being destroyed */ + } + rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params); - if (rc) + if (rc) { + complete(&qp->iwarp_cm_comp); goto err; + } return rc; err: - cm_id->rem_ref(cm_id); - kfree(ep); + kref_put(&ep->refcnt, qedr_iw_free_ep); return rc; } @@ -668,6 +729,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) listener->qed_handle); cm_id->rem_ref(cm_id); + kfree(listener); return rc; } @@ -681,14 +743,13 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn); - qp = xa_load(&dev->qps, conn_param->qpn); + qp = qedr_iw_load_qp(dev, conn_param->qpn); if (!qp) { DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn); return -EINVAL; } ep->qp = qp; - qp->ep = ep; cm_id->add_ref(cm_id); ep->cm_id = cm_id; @@ -700,15 +761,23 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) params.ird = conn_param->ird; params.ord = conn_param->ord; - ep->during_connect = 1; + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, + &qp->iwarp_cm_flags)) { + rc = -EINVAL; + goto err; /* QP already destroyed */ + } + rc = dev->ops->iwarp_accept(dev->rdma_ctx, ¶ms); - if (rc) + if (rc) { + complete(&qp->iwarp_cm_comp); goto err; + } return rc; + err: - ep->during_connect = 0; - cm_id->rem_ref(cm_id); + kref_put(&ep->refcnt, qedr_iw_free_ep); + return rc; } @@ -731,17 +800,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp) { struct qedr_qp *qp = get_qedr_qp(ibqp); - atomic_inc(&qp->refcnt); + kref_get(&qp->refcnt); } void qedr_iw_qp_rem_ref(struct ib_qp *ibqp) { struct qedr_qp *qp = get_qedr_qp(ibqp); - if (atomic_dec_and_test(&qp->refcnt)) { - xa_erase_irq(&qp->dev->qps, qp->qp_id); - kfree(qp); - } + kref_put(&qp->refcnt, qedr_iw_free_qp); } struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index a7ccca3c4f89..4408d3364664 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -51,6 +51,7 @@ #include "verbs.h" #include <rdma/qedr-abi.h> #include "qedr_roce_cm.h" +#include "qedr_iw_cm.h" #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm) #define RDMA_MAX_SGE_PER_SRQ (4) @@ -1193,7 +1194,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev, struct ib_qp_init_attr *attrs) { spin_lock_init(&qp->q_lock); - atomic_set(&qp->refcnt, 1); + if (rdma_protocol_iwarp(&dev->ibdev, 1)) { + kref_init(&qp->refcnt); + init_completion(&qp->iwarp_cm_comp); + } qp->pd = pd; qp->qp_type = attrs->qp_type; qp->max_inline_data = attrs->cap.max_inline_data; @@ -1600,6 +1604,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1); int rc = -EINVAL; + qp->create_type = QEDR_QP_CREATE_USER; memset(&ureq, 0, sizeof(ureq)); rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq)); if (rc) { @@ -1813,6 +1818,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev, u32 n_sq_entries; memset(&in_params, 0, sizeof(in_params)); + qp->create_type = QEDR_QP_CREATE_KERNEL; /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in * the ring. The ring should allow at least a single WR, even if the @@ -1926,7 +1932,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, qp->ibqp.qp_num = qp->qp_id; if (rdma_protocol_iwarp(&dev->ibdev, 1)) { - rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL); + rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL); if (rc) goto err; } @@ -2399,7 +2405,7 @@ int qedr_query_qp(struct ib_qp *ibqp, qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges; - qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; + qp_attr->cap.max_inline_data = dev->attr.max_inline; qp_init_attr->cap = qp_attr->cap; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; @@ -2445,7 +2451,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, return rc; } - if (udata) + if (qp->create_type == QEDR_QP_CREATE_USER) qedr_cleanup_user(dev, qp); else qedr_cleanup_kernel(dev, qp); @@ -2475,34 +2481,46 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) qedr_modify_qp(ibqp, &attr, attr_mask, NULL); } } else { - /* Wait for the connect/accept to complete */ - if (qp->ep) { - int wait_count = 1; + /* If connection establishment started the WAIT_FOR_CONNECT + * bit will be on and we need to Wait for the establishment + * to complete before destroying the qp. + */ + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, + &qp->iwarp_cm_flags)) + wait_for_completion(&qp->iwarp_cm_comp); - while (qp->ep->during_connect) { - DP_DEBUG(dev, QEDR_MSG_QP, - "Still in during connect/accept\n"); - - msleep(100); - if (wait_count++ > 200) { - DP_NOTICE(dev, - "during connect timeout\n"); - break; - } - } - } + /* If graceful disconnect started, the WAIT_FOR_DISCONNECT + * bit will be on, and we need to wait for the disconnect to + * complete before continuing. We can use the same completion, + * iwarp_cm_comp, since this is the only place that waits for + * this completion and it is sequential. In addition, + * disconnect can't occur before the connection is fully + * established, therefore if WAIT_FOR_DISCONNECT is on it + * means WAIT_FOR_CONNECT is also on and the completion for + * CONNECT already occurred. + */ + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT, + &qp->iwarp_cm_flags)) + wait_for_completion(&qp->iwarp_cm_comp); } if (qp->qp_type == IB_QPT_GSI) qedr_destroy_gsi_qp(dev); + /* We need to remove the entry from the xarray before we release the + * qp_id to avoid a race of the qp_id being reallocated and failing + * on xa_insert + */ + if (rdma_protocol_iwarp(&dev->ibdev, 1)) + xa_erase(&dev->qps, qp->qp_id); + qedr_free_qp_resources(dev, qp, udata); - if (atomic_dec_and_test(&qp->refcnt) && - rdma_protocol_iwarp(&dev->ibdev, 1)) { - xa_erase_irq(&dev->qps, qp->qp_id); + if (rdma_protocol_iwarp(&dev->ibdev, 1)) + qedr_iw_qp_rem_ref(&qp->ibqp); + else kfree(qp); - } + return 0; } @@ -3444,7 +3462,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq) * count and consumer count and subtract it from max * work request supported so that we get elements left. */ - used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt; + used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt); return hw_srq->max_wr - used; } @@ -3459,7 +3477,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, unsigned long flags; int status = 0; u32 num_sge; - u32 offset; spin_lock_irqsave(&srq->lock, flags); @@ -3472,7 +3489,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, if (!qedr_srq_elem_left(hw_srq) || wr->num_sge > srq->hw_srq.max_sges) { DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n", - hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt, + hw_srq->wr_prod_cnt, + atomic_read(&hw_srq->wr_cons_cnt), wr->num_sge, srq->hw_srq.max_sges); status = -ENOMEM; *bad_wr = wr; @@ -3506,22 +3524,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, hw_srq->sge_prod++; } - /* Flush WQE and SGE information before + /* Update WQE and SGE information before * updating producer. */ - wmb(); + dma_wmb(); /* SRQ producer is 8 bytes. Need to update SGE producer index * in first 4 bytes and need to update WQE producer in * next 4 bytes. */ - *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod; - offset = offsetof(struct rdma_srq_producers, wqe_prod); - *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) = - hw_srq->wqe_prod; + srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod; + /* Make sure sge producer is updated first */ + dma_wmb(); + srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod; - /* Flush producer after updating it. */ - wmb(); wr = wr->next; } @@ -3940,7 +3956,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp, } else { __process_resp_one(dev, qp, cq, wc, resp, wr_id); } - srq->hw_srq.wr_cons_cnt++; + atomic_inc(&srq->hw_srq.wr_cons_cnt); return 1; } diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 568b21eb6ea1..021df0654ba7 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping linkcontrol sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + goto bail_link; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); @@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping sl2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_link; + goto bail_sl; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); @@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping diag_counters sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl; + goto bail_diagc; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); @@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_diagc; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) &cc_table_bin_attr); kobject_put(&ppd->pport_cc_kobj); } + kobject_put(&ppd->diagc_kobj); kobject_put(&ppd->sl2vl_kobj); kobject_put(&ppd->pport_kobj); } diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 556b8e44a51c..a102a5d8769f 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, } usnic_uiom_free_dev_list(dev_list); + dev_list = NULL; } /* Try to find resources on an unused vf */ @@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, qp_grp_check: if (IS_ERR_OR_NULL(qp_grp)) { usnic_err("Failed to allocate qp_grp\n"); + if (usnic_ib_share_vf) + usnic_uiom_free_dev_list(dev_list); return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); } return qp_grp; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index e580ae9cc55a..10e67283b9db 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -266,7 +266,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) } ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1); if (ret) - return ret; + goto err_srq_free; spin_lock_init(&dev->srq_tbl_lock); rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); @@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); ret = -ENOMEM; - goto err_free_device; + goto err_disable_pdev; } ret = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig index 1f2759c72108..a297f13eb666 100644 --- a/drivers/infiniband/sw/rdmavt/Kconfig +++ b/drivers/infiniband/sw/rdmavt/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config INFINIBAND_RDMAVT tristate "RDMA verbs transport library" - depends on X86_64 && ARCH_DMA_ADDR_T_64BIT + depends on INFINIBAND_VIRT_DMA + depends on X86_64 depends on PCI select DMA_VIRT_OPS ---help--- diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 0fee3c87776b..bd729aa1d510 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, */ if (udata && udata->outlen >= sizeof(__u64)) { cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); - if (!cq->ip) { - err = -ENOMEM; + if (IS_ERR(cq->ip)) { + err = PTR_ERR(cq->ip); goto bail_wc; } diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index 652f4a7efc1b..37853aa3bcf7 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c @@ -154,7 +154,7 @@ done: * @udata: user data (must be valid!) * @obj: opaque pointer to a cq, wq etc * - * Return: rvt_mmap struct on success + * Return: rvt_mmap struct on success, ERR_PTR on failure */ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, struct ib_udata *udata, void *obj) @@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); if (!ip) - return ip; + return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 799254a049ba..d14ad523f96c 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -898,8 +898,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_tail_ack_queue = 0; qp->s_acked_ack_queue = 0; qp->s_num_rd_atomic = 0; - if (qp->r_rq.kwq) - qp->r_rq.kwq->count = qp->r_rq.size; qp->r_sge.num_sge = 0; atomic_set(&qp->s_reserved_used, 0); } @@ -1196,7 +1194,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, err = alloc_ud_wq_attr(qp, rdi->dparms.node); if (err) { ret = (ERR_PTR(err)); - goto bail_driver_priv; + goto bail_rq_rvt; } err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, @@ -1244,8 +1242,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, qp->ip = rvt_create_mmap_info(rdi, s, udata, qp->r_rq.wq); - if (!qp->ip) { - ret = ERR_PTR(-ENOMEM); + if (IS_ERR(qp->ip)) { + ret = ERR_CAST(qp->ip); goto bail_qpn; } @@ -1300,9 +1298,11 @@ bail_qpn: rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); bail_rq_wq: - rvt_free_rq(&qp->r_rq); free_ud_wq_attr(qp); +bail_rq_rvt: + rvt_free_rq(&qp->r_rq); + bail_driver_priv: rdi->driver_f.qp_priv_free(rdi, qp); @@ -2350,31 +2350,6 @@ bad_lkey: return 0; } -/** - * get_count - count numbers of request work queue entries - * in circular buffer - * @rq: data structure for request queue entry - * @tail: tail indices of the circular buffer - * @head: head indices of the circular buffer - * - * Return - total number of entries in the circular buffer - */ -static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head) -{ - u32 count; - - count = head; - - if (count >= rq->size) - count = 0; - if (count < tail) - count += rq->size - tail; - else - count -= tail; - - return count; -} - /** * get_rvt_head - get head indices of the circular buffer * @rq: data structure for request queue entry @@ -2449,7 +2424,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { head = get_rvt_head(rq, ip); - kwq->count = get_count(rq, tail, head); + kwq->count = rvt_get_rq_count(rq, head, tail); } if (unlikely(kwq->count == 0)) { ret = 0; @@ -2484,7 +2459,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) * the number of remaining WQEs. */ if (kwq->count < srq->limit) { - kwq->count = get_count(rq, tail, get_rvt_head(rq, ip)); + kwq->count = + rvt_get_rq_count(rq, + get_rvt_head(rq, ip), tail); if (kwq->count < srq->limit) { struct ib_event ev; diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c index 890d7b760d2e..27415185d862 100644 --- a/drivers/infiniband/sw/rdmavt/rc.c +++ b/drivers/infiniband/sw/rdmavt/rc.c @@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp) * not atomic, which is OK, since the fuzziness is * resolved as further ACKs go out. */ - credits = head - tail; - if ((int)credits < 0) - credits += qp->r_rq.size; + credits = rvt_get_rq_count(&qp->r_rq, head, tail); } /* * Binary search the credit table to find the code to diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c index 24fef021d51d..f547c115af03 100644 --- a/drivers/infiniband/sw/rdmavt/srq.c +++ b/drivers/infiniband/sw/rdmavt/srq.c @@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr, u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); - if (!srq->ip) { - ret = -ENOMEM; + if (IS_ERR(srq->ip)) { + ret = PTR_ERR(srq->ip); goto bail_wq; } diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 18da1e1ea979..833f3f1b87f5 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports) if (!rdi) return rdi; - rdi->ports = kcalloc(nports, - sizeof(struct rvt_ibport **), - GFP_KERNEL); + rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL); if (!rdi->ports) ib_dealloc_device(&rdi->ibdev); diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index d9bcfe740588..0e8f1d05dfb2 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -2,8 +2,9 @@ config RDMA_RXE tristate "Software RDMA over Ethernet (RoCE) driver" depends on INET && PCI && INFINIBAND - depends on !64BIT || ARCH_DMA_ADDR_T_64BIT + depends on INFINIBAND_VIRT_DMA select NET_UDP_TUNNEL + select CRYPTO select CRYPTO_CRC32 select DMA_VIRT_OPS ---help--- diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index a8c11b5e1e94..de5f3efe9fcb 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -48,6 +48,8 @@ static void rxe_cleanup_ports(struct rxe_dev *rxe) } +bool rxe_initialized; + /* free resources for a rxe device all objects created for this device must * have been destroyed */ @@ -116,6 +118,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN; rxe->attr.max_pkeys = RXE_MAX_PKEYS; rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; + addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, + rxe->ndev->dev_addr); rxe->max_ucontext = RXE_MAX_UCONTEXT; } @@ -157,9 +161,6 @@ static int rxe_init_ports(struct rxe_dev *rxe) rxe_init_port_param(port); - if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len) - return -EINVAL; - port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len, sizeof(*port->pkey_tbl), GFP_KERNEL); @@ -358,6 +359,7 @@ static int __init rxe_module_init(void) return err; rdma_link_register(&rxe_link_ops); + rxe_initialized = true; pr_info("loaded\n"); return 0; } @@ -369,6 +371,7 @@ static void __exit rxe_module_exit(void) rxe_net_exit(); rxe_cache_exit(); + rxe_initialized = false; pr_info("unloaded\n"); } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index fb07eed9e402..cae1b0a24c85 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -67,6 +67,8 @@ #define RXE_ROCE_V2_SPORT (0xc000) +extern bool rxe_initialized; + static inline u32 rxe_crc32(struct rxe_dev *rxe, u32 crc, void *next, size_t len) { diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index 48f48122ddcb..6a413d73b95d 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c @@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) - return NULL; + return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index ea6a819b7167..ffbc50341a55 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -207,6 +207,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, vaddr = page_address(sg_page_iter_page(&sg_iter)); if (!vaddr) { pr_warn("null vaddr\n"); + ib_umem_release(umem); err = -ENOMEM; goto err1; } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 312c2fc961c0..d41135682891 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -453,6 +453,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) void rxe_loopback(struct sk_buff *skb) { + if (skb->protocol == htons(ETH_P_IP)) + skb_pull(skb, sizeof(struct iphdr)); + else + skb_pull(skb, sizeof(struct ipv6hdr)); + rxe_rcv(skb); } diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index e2c6d1cedf41..f85273883794 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -592,15 +592,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, int err; if (mask & IB_QP_MAX_QP_RD_ATOMIC) { - int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); + int max_rd_atomic = attr->max_rd_atomic ? + roundup_pow_of_two(attr->max_rd_atomic) : 0; qp->attr.max_rd_atomic = max_rd_atomic; atomic_set(&qp->req.rd_atomic, max_rd_atomic); } if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { - int max_dest_rd_atomic = - __roundup_pow_of_two(attr->max_dest_rd_atomic); + int max_dest_rd_atomic = attr->max_dest_rd_atomic ? + roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index ff92704de32f..245040c3a35d 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, if (outbuf) { ip = rxe_create_mmap_info(rxe, buf_size, udata, buf); - if (!ip) + if (IS_ERR(ip)) { + err = PTR_ERR(ip); goto err1; + } - err = copy_to_user(outbuf, &ip->info, sizeof(ip->info)); - if (err) + if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) { + err = -EFAULT; goto err2; + } spin_lock_bh(&rxe->pending_lock); list_add(&ip->pending_mmaps, &rxe->pending_mmaps); @@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, err2: kfree(ip); err1: - return -EINVAL; + return err; } inline void rxe_queue_reset(struct rxe_queue *q) diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 831ad578a7b2..369ba76f1605 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -36,21 +36,26 @@ #include "rxe.h" #include "rxe_loc.h" +/* check that QP matches packet opcode type and is in a valid state */ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp) { + unsigned int pkt_type; + if (unlikely(!qp->valid)) goto err1; + pkt_type = pkt->opcode & 0xe0; + switch (qp_type(qp)) { case IB_QPT_RC: - if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) { + if (unlikely(pkt_type != IB_OPCODE_RC)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } break; case IB_QPT_UC: - if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) { + if (unlikely(pkt_type != IB_OPCODE_UC)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } @@ -58,7 +63,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: - if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) { + if (unlikely(pkt_type != IB_OPCODE_UD)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } @@ -281,6 +286,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) struct rxe_mc_elem *mce; struct rxe_qp *qp; union ib_gid dgid; + struct sk_buff *per_qp_skb; + struct rxe_pkt_info *per_qp_pkt; int err; if (skb->protocol == htons(ETH_P_IP)) @@ -298,7 +305,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) list_for_each_entry(mce, &mcg->qp_list, qp_list) { qp = mce->qp; - pkt = SKB_TO_PKT(skb); /* validate qp for incoming packet */ err = check_type_state(rxe, pkt, qp); @@ -309,15 +315,27 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) if (err) continue; - /* if *not* the last qp in the list - * increase the users of the skb then post to the next qp + /* for all but the last qp create a new clone of the + * skb and pass to the qp. If an error occurs in the + * checks for the last qp in the list we need to + * free the skb since it hasn't been passed on to + * rxe_rcv_pkt() which would free it later. */ - if (mce->qp_list.next != &mcg->qp_list) - skb_get(skb); + if (mce->qp_list.next != &mcg->qp_list) { + per_qp_skb = skb_clone(skb, GFP_ATOMIC); + } else { + per_qp_skb = skb; + /* show we have consumed the skb */ + skb = NULL; + } - pkt->qp = qp; + if (unlikely(!per_qp_skb)) + continue; + + per_qp_pkt = SKB_TO_PKT(per_qp_skb); + per_qp_pkt->qp = qp; rxe_add_ref(qp); - rxe_rcv_pkt(pkt, skb); + rxe_rcv_pkt(per_qp_pkt, per_qp_skb); } spin_unlock_bh(&mcg->mcg_lock); @@ -325,15 +343,20 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */ err1: + /* free skb if not consumed */ kfree_skb(skb); } static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb) { + struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); const struct ib_gid_attr *gid_attr; union ib_gid dgid; union ib_gid *pdgid; + if (pkt->mask & RXE_LOOPBACK_MASK) + return 0; + if (skb->protocol == htons(ETH_P_IP)) { ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, (struct in6_addr *)&dgid); @@ -366,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb) if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES)) goto drop; - if (unlikely(rxe_match_dgid(rxe, skb) < 0)) { + if (rxe_match_dgid(rxe, skb) < 0) { pr_warn_ratelimited("failed matching dgid\n"); goto drop; } diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index e5031172c019..a4d6e0b7901e 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -664,7 +664,8 @@ next_wqe: } if (unlikely(qp_type(qp) == IB_QPT_RC && - qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) { + psn_compare(qp->req.psn, (qp->comp.psn + + RXE_MAX_UNACKED_PSNS)) > 0)) { qp->req.wait_psn = 1; goto exit; } diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c index ccda5f5a3bc0..2af31d421bfc 100644 --- a/drivers/infiniband/sw/rxe/rxe_sysfs.c +++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c @@ -61,6 +61,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp) struct net_device *ndev; struct rxe_dev *exists; + if (!rxe_initialized) { + pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); + return -EAGAIN; + } + len = sanitize_arg(val, intf, sizeof(intf)); if (!len) { pr_err("add: invalid interface name\n"); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 623129f27f5a..d1fe57ac87f5 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -679,6 +679,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, unsigned int mask; unsigned int length = 0; int i; + struct ib_send_wr *next; while (wr) { mask = wr_opcode_mask(wr->opcode, qp); @@ -695,6 +696,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, break; } + next = wr->next; + length = 0; for (i = 0; i < wr->num_sge; i++) length += wr->sg_list[i].length; @@ -705,7 +708,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, *bad_wr = wr; break; } - wr = wr->next; + wr = next; } rxe_run_task(&qp->req.task, 1); @@ -1075,7 +1078,7 @@ static ssize_t parent_show(struct device *device, struct rxe_dev *rxe = rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); - return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); + return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1)); } static DEVICE_ATTR_RO(parent); diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig index b622fc62f2cd..3450ba5081df 100644 --- a/drivers/infiniband/sw/siw/Kconfig +++ b/drivers/infiniband/sw/siw/Kconfig @@ -1,6 +1,7 @@ config RDMA_SIW tristate "Software RDMA over TCP/IP (iWARP) driver" depends on INET && INFINIBAND && LIBCRC32C + depends on INFINIBAND_VIRT_DMA select DMA_VIRT_OPS help This driver implements the iWARP RDMA transport over diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index dba4535494ab..4d8bc995b450 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -667,7 +667,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) { struct siw_sqe *orq_e = orq_get_tail(qp); - if (orq_e && READ_ONCE(orq_e->flags) == 0) + if (READ_ONCE(orq_e->flags) == 0) return orq_e; return NULL; diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 130b1e31b978..dbbf8c6c16d3 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -66,12 +66,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name) static int dev_id = 1; int rv; + sdev->vendor_part_id = dev_id++; + rv = ib_register_device(base_dev, name); if (rv) { pr_warn("siw: device registration error %d\n", rv); return rv; } - sdev->vendor_part_id = dev_id++; siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr); @@ -133,7 +134,7 @@ static struct { static int siw_init_cpulist(void) { - int i, num_nodes = num_possible_nodes(); + int i, num_nodes = nr_node_ids; memset(siw_tx_thread, 0, sizeof(siw_tx_thread)); diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index e99983f07663..8bffa6e013fc 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj, mem->perms = rights & IWARP_ACCESS_MASK; kref_init(&mem->ref); - mr->mem = mem; - get_random_bytes(&next, 4); next &= 0x00ffffff; @@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj, kfree(mem); return -ENOMEM; } + + mr->mem = mem; /* Set the STag index part */ mem->stag = id << 8; mr->base_mr.lkey = mr->base_mr.rkey = mem->stag; diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index b4317480cee7..5927ac5923dd 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -199,26 +199,26 @@ void siw_qp_llp_write_space(struct sock *sk) static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) { - irq_size = roundup_pow_of_two(irq_size); - orq_size = roundup_pow_of_two(orq_size); - + if (irq_size) { + irq_size = roundup_pow_of_two(irq_size); + qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe)); + if (!qp->irq) { + qp->attrs.irq_size = 0; + return -ENOMEM; + } + } + if (orq_size) { + orq_size = roundup_pow_of_two(orq_size); + qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe)); + if (!qp->orq) { + qp->attrs.orq_size = 0; + qp->attrs.irq_size = 0; + vfree(qp->irq); + return -ENOMEM; + } + } qp->attrs.irq_size = irq_size; qp->attrs.orq_size = orq_size; - - qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe)); - if (!qp->irq) { - siw_dbg_qp(qp, "irq malloc for %d failed\n", irq_size); - qp->attrs.irq_size = 0; - return -ENOMEM; - } - qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe)); - if (!qp->orq) { - siw_dbg_qp(qp, "orq malloc for %d failed\n", orq_size); - qp->attrs.orq_size = 0; - qp->attrs.irq_size = 0; - vfree(qp->irq); - return -ENOMEM; - } siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size); return 0; } @@ -288,13 +288,14 @@ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl) if (ctrl & MPA_V2_RDMA_WRITE_RTR) wqe->sqe.opcode = SIW_OP_WRITE; else if (ctrl & MPA_V2_RDMA_READ_RTR) { - struct siw_sqe *rreq; + struct siw_sqe *rreq = NULL; wqe->sqe.opcode = SIW_OP_READ; spin_lock(&qp->orq_lock); - rreq = orq_get_free(qp); + if (qp->attrs.orq_size) + rreq = orq_get_free(qp); if (rreq) { siw_read_to_orq(rreq, &wqe->sqe); qp->orq_put++; @@ -877,6 +878,96 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) rreq->num_sge = 1; } +static int siw_activate_tx_from_sq(struct siw_qp *qp) +{ + struct siw_sqe *sqe; + struct siw_wqe *wqe = tx_wqe(qp); + int rv = 1; + + sqe = sq_get_next(qp); + if (!sqe) + return 0; + + memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); + wqe->wr_status = SIW_WR_QUEUED; + + /* First copy SQE to kernel private memory */ + memcpy(&wqe->sqe, sqe, sizeof(*sqe)); + + if (wqe->sqe.opcode >= SIW_NUM_OPCODES) { + rv = -EINVAL; + goto out; + } + if (wqe->sqe.flags & SIW_WQE_INLINE) { + if (wqe->sqe.opcode != SIW_OP_SEND && + wqe->sqe.opcode != SIW_OP_WRITE) { + rv = -EINVAL; + goto out; + } + if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) { + rv = -EINVAL; + goto out; + } + wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; + wqe->sqe.sge[0].lkey = 0; + wqe->sqe.num_sge = 1; + } + if (wqe->sqe.flags & SIW_WQE_READ_FENCE) { + /* A READ cannot be fenced */ + if (unlikely(wqe->sqe.opcode == SIW_OP_READ || + wqe->sqe.opcode == + SIW_OP_READ_LOCAL_INV)) { + siw_dbg_qp(qp, "cannot fence read\n"); + rv = -EINVAL; + goto out; + } + spin_lock(&qp->orq_lock); + + if (qp->attrs.orq_size && !siw_orq_empty(qp)) { + qp->tx_ctx.orq_fence = 1; + rv = 0; + } + spin_unlock(&qp->orq_lock); + + } else if (wqe->sqe.opcode == SIW_OP_READ || + wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) { + struct siw_sqe *rreq; + + if (unlikely(!qp->attrs.orq_size)) { + /* We negotiated not to send READ req's */ + rv = -EINVAL; + goto out; + } + wqe->sqe.num_sge = 1; + + spin_lock(&qp->orq_lock); + + rreq = orq_get_free(qp); + if (rreq) { + /* + * Make an immediate copy in ORQ to be ready + * to process loopback READ reply + */ + siw_read_to_orq(rreq, &wqe->sqe); + qp->orq_put++; + } else { + qp->tx_ctx.orq_fence = 1; + rv = 0; + } + spin_unlock(&qp->orq_lock); + } + + /* Clear SQE, can be re-used by application */ + smp_store_mb(sqe->flags, 0); + qp->sq_get++; +out: + if (unlikely(rv < 0)) { + siw_dbg_qp(qp, "error %d\n", rv); + wqe->wr_status = SIW_WR_IDLE; + } + return rv; +} + /* * Must be called with SQ locked. * To avoid complete SQ starvation by constant inbound READ requests, @@ -885,133 +976,55 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) */ int siw_activate_tx(struct siw_qp *qp) { - struct siw_sqe *irqe, *sqe; + struct siw_sqe *irqe; struct siw_wqe *wqe = tx_wqe(qp); - int rv = 1; + + if (!qp->attrs.irq_size) + return siw_activate_tx_from_sq(qp); irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size]; - if (irqe->flags & SIW_WQE_VALID) { - sqe = sq_get_next(qp); + if (!(irqe->flags & SIW_WQE_VALID)) + return siw_activate_tx_from_sq(qp); - /* - * Avoid local WQE processing starvation in case - * of constant inbound READ request stream - */ - if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) { - qp->irq_burst = 0; - goto skip_irq; - } - memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); - wqe->wr_status = SIW_WR_QUEUED; - - /* start READ RESPONSE */ - wqe->sqe.opcode = SIW_OP_READ_RESPONSE; - wqe->sqe.flags = 0; - if (irqe->num_sge) { - wqe->sqe.num_sge = 1; - wqe->sqe.sge[0].length = irqe->sge[0].length; - wqe->sqe.sge[0].laddr = irqe->sge[0].laddr; - wqe->sqe.sge[0].lkey = irqe->sge[0].lkey; - } else { - wqe->sqe.num_sge = 0; - } - - /* Retain original RREQ's message sequence number for - * potential error reporting cases. - */ - wqe->sqe.sge[1].length = irqe->sge[1].length; - - wqe->sqe.rkey = irqe->rkey; - wqe->sqe.raddr = irqe->raddr; - - wqe->processed = 0; - qp->irq_get++; - - /* mark current IRQ entry free */ - smp_store_mb(irqe->flags, 0); - - goto out; + /* + * Avoid local WQE processing starvation in case + * of constant inbound READ request stream + */ + if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) { + qp->irq_burst = 0; + return siw_activate_tx_from_sq(qp); } - sqe = sq_get_next(qp); - if (sqe) { -skip_irq: - memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); - wqe->wr_status = SIW_WR_QUEUED; + memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); + wqe->wr_status = SIW_WR_QUEUED; - /* First copy SQE to kernel private memory */ - memcpy(&wqe->sqe, sqe, sizeof(*sqe)); - - if (wqe->sqe.opcode >= SIW_NUM_OPCODES) { - rv = -EINVAL; - goto out; - } - if (wqe->sqe.flags & SIW_WQE_INLINE) { - if (wqe->sqe.opcode != SIW_OP_SEND && - wqe->sqe.opcode != SIW_OP_WRITE) { - rv = -EINVAL; - goto out; - } - if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) { - rv = -EINVAL; - goto out; - } - wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; - wqe->sqe.sge[0].lkey = 0; - wqe->sqe.num_sge = 1; - } - if (wqe->sqe.flags & SIW_WQE_READ_FENCE) { - /* A READ cannot be fenced */ - if (unlikely(wqe->sqe.opcode == SIW_OP_READ || - wqe->sqe.opcode == - SIW_OP_READ_LOCAL_INV)) { - siw_dbg_qp(qp, "cannot fence read\n"); - rv = -EINVAL; - goto out; - } - spin_lock(&qp->orq_lock); - - if (!siw_orq_empty(qp)) { - qp->tx_ctx.orq_fence = 1; - rv = 0; - } - spin_unlock(&qp->orq_lock); - - } else if (wqe->sqe.opcode == SIW_OP_READ || - wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) { - struct siw_sqe *rreq; - - wqe->sqe.num_sge = 1; - - spin_lock(&qp->orq_lock); - - rreq = orq_get_free(qp); - if (rreq) { - /* - * Make an immediate copy in ORQ to be ready - * to process loopback READ reply - */ - siw_read_to_orq(rreq, &wqe->sqe); - qp->orq_put++; - } else { - qp->tx_ctx.orq_fence = 1; - rv = 0; - } - spin_unlock(&qp->orq_lock); - } - - /* Clear SQE, can be re-used by application */ - smp_store_mb(sqe->flags, 0); - qp->sq_get++; + /* start READ RESPONSE */ + wqe->sqe.opcode = SIW_OP_READ_RESPONSE; + wqe->sqe.flags = 0; + if (irqe->num_sge) { + wqe->sqe.num_sge = 1; + wqe->sqe.sge[0].length = irqe->sge[0].length; + wqe->sqe.sge[0].laddr = irqe->sge[0].laddr; + wqe->sqe.sge[0].lkey = irqe->sge[0].lkey; } else { - rv = 0; + wqe->sqe.num_sge = 0; } -out: - if (unlikely(rv < 0)) { - siw_dbg_qp(qp, "error %d\n", rv); - wqe->wr_status = SIW_WR_IDLE; - } - return rv; + + /* Retain original RREQ's message sequence number for + * potential error reporting cases. + */ + wqe->sqe.sge[1].length = irqe->sge[1].length; + + wqe->sqe.rkey = irqe->rkey; + wqe->sqe.raddr = irqe->raddr; + + wqe->processed = 0; + qp->irq_get++; + + /* mark current IRQ entry free */ + smp_store_mb(irqe->flags, 0); + + return 1; } /* diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index c0a887240325..c7c38f7fd29d 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, break; bytes = min(bytes, len); - if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) { + if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) == + bytes) { copied += bytes; offset += bytes; len -= bytes; @@ -679,6 +680,10 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx) } spin_lock_irqsave(&qp->sq_lock, flags); + if (unlikely(!qp->attrs.irq_size)) { + run_sq = 0; + goto error_irq; + } if (tx_work->wr_status == SIW_WR_IDLE) { /* * immediately schedule READ response w/o @@ -711,8 +716,9 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx) /* RRESP now valid as current TX wqe or placed into IRQ */ smp_store_mb(resp->flags, SIW_WQE_VALID); } else { - pr_warn("siw: [QP %u]: irq %d exceeded %d\n", qp_id(qp), - qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size); +error_irq: + pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n", + qp_id(qp), qp->attrs.irq_size); siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, RDMAP_ETYPE_REMOTE_OPERATION, @@ -739,6 +745,9 @@ static int siw_orqe_start_rx(struct siw_qp *qp) struct siw_sqe *orqe; struct siw_wqe *wqe = NULL; + if (unlikely(!qp->attrs.orq_size)) + return -EPROTO; + /* make sure ORQ indices are current */ smp_mb(); @@ -795,8 +804,8 @@ int siw_proc_rresp(struct siw_qp *qp) */ rv = siw_orqe_start_rx(qp); if (rv) { - pr_warn("siw: [QP %u]: ORQ empty at idx %d\n", - qp_id(qp), qp->orq_get % qp->attrs.orq_size); + pr_warn("siw: [QP %u]: ORQ empty, size %d\n", + qp_id(qp), qp->attrs.orq_size); goto error_term; } rv = siw_rresp_check_ntoh(srx, frx); @@ -1289,11 +1298,13 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error) wc_status); siw_wqe_put_mem(wqe, SIW_OP_READ); - if (!error) + if (!error) { rv = siw_check_tx_fence(qp); - else - /* Disable current ORQ eleement */ - WRITE_ONCE(orq_get_current(qp)->flags, 0); + } else { + /* Disable current ORQ element */ + if (qp->attrs.orq_size) + WRITE_ONCE(orq_get_current(qp)->flags, 0); + } break; case RDMAP_RDMA_READ_REQ: diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 5d97bba0ce6d..424918eb1cd4 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) { struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; struct siw_device *sdev = to_siw_dev(pd->device); - struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); + struct siw_mem *mem; int rv = 0; siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); - if (unlikely(!mem || !base_mr)) { + if (unlikely(!base_mr)) { pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); return -EINVAL; } + if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); - rv = -EINVAL; - goto out; + return -EINVAL; } + + mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); + if (unlikely(!mem)) { + pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); + return -EINVAL; + } + if (unlikely(mem->pd != pd)) { pr_warn("siw: fastreg: PD mismatch\n"); rv = -EINVAL; @@ -1100,8 +1107,8 @@ next_wqe: /* * RREQ may have already been completed by inbound RRESP! */ - if (tx_type == SIW_OP_READ || - tx_type == SIW_OP_READ_LOCAL_INV) { + if ((tx_type == SIW_OP_READ || + tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) { /* Cleanup pending entry in ORQ */ qp->orq_put--; qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0; diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 1b1a40db529c..2c3704f0f10f 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -387,13 +387,23 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, if (rv) goto err_out; + num_sqe = attrs->cap.max_send_wr; + num_rqe = attrs->cap.max_recv_wr; + /* All queue indices are derived from modulo operations * on a free running 'get' (consumer) and 'put' (producer) * unsigned counter. Having queue sizes at power of two * avoids handling counter wrap around. */ - num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr); - num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr); + if (num_sqe) + num_sqe = roundup_pow_of_two(num_sqe); + else { + /* Zero sized SQ is not supported */ + rv = -EINVAL; + goto err_out; + } + if (num_rqe) + num_rqe = roundup_pow_of_two(num_rqe); if (qp->kernel_verbs) qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe)); @@ -401,7 +411,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe)); if (qp->sendq == NULL) { - siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe); rv = -ENOMEM; goto err_out_xa; } @@ -434,7 +443,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, vmalloc_user(num_rqe * sizeof(struct siw_rqe)); if (qp->recvq == NULL) { - siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe); rv = -ENOMEM; goto err_out_xa; } @@ -982,9 +990,9 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, unsigned long flags; int rv = 0; - if (qp->srq) { + if (qp->srq || qp->attrs.rq_size == 0) { *bad_wr = wr; - return -EOPNOTSUPP; /* what else from errno.h? */ + return -EINVAL; } if (!qp->kernel_verbs) { siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n"); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 2aa3457a30ce..50a355738609 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -377,8 +377,12 @@ struct ipoib_dev_priv { struct ipoib_rx_buf *rx_ring; struct ipoib_tx_buf *tx_ring; + /* cyclic ring variables for managing tx_ring, for UD only */ unsigned int tx_head; unsigned int tx_tail; + /* cyclic ring variables for counting overall outstanding send WRs */ + unsigned int global_tx_head; + unsigned int global_tx_tail; struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; struct ib_ud_wr tx_wr; struct ib_wc send_wc[MAX_SEND_CQE]; @@ -511,7 +515,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev); int ipoib_ib_dev_open_default(struct net_device *dev); int ipoib_ib_dev_open(struct net_device *dev); -int ipoib_ib_dev_stop(struct net_device *dev); +void ipoib_ib_dev_stop(struct net_device *dev); void ipoib_ib_dev_up(struct net_device *dev); void ipoib_ib_dev_down(struct net_device *dev); int ipoib_ib_dev_stop_default(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index c59e00a0881f..9bf0fa30df28 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ return; } - if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) { + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); netif_stop_queue(dev); @@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ } else { netif_trans_update(dev); ++tx->tx_head; - ++priv->tx_head; + ++priv->global_tx_head; } } @@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) netif_tx_lock(dev); ++tx->tx_tail; - ++priv->tx_tail; + ++priv->global_tx_tail; if (unlikely(netif_queue_stopped(dev) && - (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) netif_wake_queue(dev); @@ -1232,8 +1234,9 @@ timeout: dev_kfree_skb_any(tx_req->skb); netif_tx_lock_bh(p->dev); ++p->tx_tail; - ++priv->tx_tail; - if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) && + ++priv->global_tx_tail; + if (unlikely((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && netif_queue_stopped(p->dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(p->dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index c332b4761816..494f413dc3c6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; + ++priv->global_tx_tail; if (unlikely(netif_queue_stopped(dev) && - ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) netif_wake_queue(dev); @@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, else priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; /* increase the tx_head after send success, but use it for queue state */ - if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) { + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); netif_stop_queue(dev); } @@ -662,17 +665,17 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, rc = priv->tx_head; ++priv->tx_head; + ++priv->global_tx_head; } return rc; } -static void __ipoib_reap_ah(struct net_device *dev) +static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_ah *ah, *tah; unsigned long flags; - netif_tx_lock_bh(dev); + netif_tx_lock_bh(priv->dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) @@ -683,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev) } spin_unlock_irqrestore(&priv->lock, flags); - netif_tx_unlock_bh(dev); + netif_tx_unlock_bh(priv->dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); - struct net_device *dev = priv->dev; - __ipoib_reap_ah(dev); + ipoib_reap_dead_ahs(priv); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); } -static void ipoib_flush_ah(struct net_device *dev) +static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - cancel_delayed_work(&priv->ah_reap_task); - flush_workqueue(priv->wq); - ipoib_reap_ah(&priv->ah_reap_task.work); + clear_bit(IPOIB_STOP_REAPER, &priv->flags); + queue_delayed_work(priv->wq, &priv->ah_reap_task, + round_jiffies_relative(HZ)); } -static void ipoib_stop_ah(struct net_device *dev) +static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - set_bit(IPOIB_STOP_REAPER, &priv->flags); - ipoib_flush_ah(dev); + cancel_delayed_work(&priv->ah_reap_task); + /* + * After ipoib_stop_ah_reaper() we always go through + * ipoib_reap_dead_ahs() which ensures the work is really stopped and + * does a final flush out of the dead_ah's list + */ } static int recvs_pending(struct net_device *dev) @@ -807,6 +810,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; + ++priv->global_tx_tail; } for (i = 0; i < ipoib_recvq_size; ++i) { @@ -841,18 +845,6 @@ timeout: return 0; } -int ipoib_ib_dev_stop(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - priv->rn_ops->ndo_stop(dev); - - clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_flush_ah(dev); - - return 0; -} - int ipoib_ib_dev_open_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -896,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev) return -1; } - clear_bit(IPOIB_STOP_REAPER, &priv->flags); - queue_delayed_work(priv->wq, &priv->ah_reap_task, - round_jiffies_relative(HZ)); - + ipoib_start_ah_reaper(priv); if (priv->rn_ops->ndo_open(dev)) { pr_warn("%s: Failed to open dev\n", dev->name); goto dev_stop; @@ -910,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev) return 0; dev_stop: - set_bit(IPOIB_STOP_REAPER, &priv->flags); - cancel_delayed_work(&priv->ah_reap_task); - set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_ib_dev_stop(dev); + ipoib_stop_ah_reaper(priv); return -1; } +void ipoib_ib_dev_stop(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + priv->rn_ops->ndo_stop(dev); + + clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + ipoib_stop_ah_reaper(priv); +} + void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -1227,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_mcast_dev_flush(dev); if (oper_up) set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); - ipoib_flush_ah(dev); + ipoib_reap_dead_ahs(priv); } if (level >= IPOIB_FLUSH_NORMAL) @@ -1302,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * the neighbor garbage collection is stopped and reaped. * That should all be done now, so make a final ah flush. */ - ipoib_stop_ah(dev); + ipoib_reap_dead_ahs(priv); clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index ac0583ff280d..69ecf37053a8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1188,9 +1188,11 @@ static void ipoib_timeout(struct net_device *dev) ipoib_warn(priv, "transmit timeout: latency %d msecs\n", jiffies_to_msecs(jiffies - dev_trans_start(dev))); - ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", - netif_queue_stopped(dev), - priv->tx_head, priv->tx_tail); + ipoib_warn(priv, + "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n", + netif_queue_stopped(dev), priv->tx_head, priv->tx_tail, + priv->global_tx_head, priv->global_tx_tail); + /* XXX reset QP, etc. */ } @@ -1705,7 +1707,7 @@ static int ipoib_dev_init_default(struct net_device *dev) goto out_rx_ring_cleanup; } - /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ + /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */ if (ipoib_transport_dev_init(dev, priv->ca)) { pr_warn("%s: ipoib_transport_dev_init failed\n", @@ -1977,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev) /* no more works over the priv->wq */ if (priv->wq) { + /* See ipoib_mcast_carrier_on_task() */ + WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); priv->wq = NULL; @@ -2459,6 +2463,8 @@ static struct net_device *ipoib_add_port(const char *format, /* call event handler to ensure pkey in sync */ queue_work(ipoib_workqueue, &priv->flush_heavy); + ndev->rtnl_link_ops = ipoib_get_link_ops(); + result = register_netdev(ndev); if (result) { pr_warn("%s: couldn't register ipoib port %d; error %d\n", diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 38c984d16996..d5a90a66b45c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, return 0; } +static void ipoib_del_child_link(struct net_device *dev, struct list_head *head) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (!priv->parent) + return; + + unregister_netdevice_queue(dev, head); +} + static size_t ipoib_get_size(const struct net_device *dev) { return nla_total_size(2) + /* IFLA_IPOIB_PKEY */ @@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = { .priv_size = sizeof(struct ipoib_dev_priv), .setup = ipoib_setup_common, .newlink = ipoib_new_child_link, + .dellink = ipoib_del_child_link, .changelink = ipoib_changelink, .get_size = ipoib_get_size, .fill_info = ipoib_fill_info, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 8ac8e18fbe0c..58ca5e9c6079 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -192,6 +192,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) } priv = ipoib_priv(ndev); + ndev->rtnl_link_ops = ipoib_get_link_ops(); + result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD); if (result && ndev->reg_state == NETREG_UNINITIALIZED) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2cc89a9b9e9b..ea8e611397a3 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -292,12 +292,27 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, { struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + struct iser_fr_desc *desc; + struct ib_mr_status mr_status; - if (!reg->mem_h) + desc = reg->mem_h; + if (!desc) return; - device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, - reg->mem_h); + /* + * The signature MR cannot be invalidated and reused without checking. + * libiscsi calls the check_protection transport handler only if + * SCSI-Response is received. And the signature MR is not checked if + * the task is completed for some other reason like a timeout or error + * handling. That's why we must check the signature MR here before + * putting it to the free pool. + */ + if (unlikely(desc->sig_protected)) { + desc->sig_protected = false; + ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS, + &mr_status); + } + device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc); reg->mem_h = NULL; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a1a035270cab..71268d61d2b8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -182,15 +182,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { - dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ib_dev, dma_addr)) goto dma_map_fail; rx_desc->dma_addr = dma_addr; rx_sg = &rx_desc->rx_sg; - rx_sg->addr = rx_desc->dma_addr; + rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); rx_sg->length = ISER_RX_PAYLOAD_SIZE; rx_sg->lkey = device->pd->local_dma_lkey; rx_desc->rx_cqe.done = isert_recv_done; @@ -202,7 +202,7 @@ dma_map_fail: rx_desc = isert_conn->rx_descs; for (j = 0; j < i; j++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); isert_conn->rx_descs = NULL; @@ -223,7 +223,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); @@ -408,10 +408,9 @@ isert_free_login_buf(struct isert_conn *isert_conn) ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); kfree(isert_conn->login_rsp_buf); - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, - DMA_FROM_DEVICE); - kfree(isert_conn->login_req_buf); + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); + kfree(isert_conn->login_desc); } static int @@ -420,25 +419,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, { int ret; - isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), + isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), GFP_KERNEL); - if (!isert_conn->login_req_buf) + if (!isert_conn->login_desc) return -ENOMEM; - isert_conn->login_req_dma = ib_dma_map_single(ib_dev, - isert_conn->login_req_buf, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); + isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, + isert_conn->login_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); if (ret) { - isert_err("login_req_dma mapping error: %d\n", ret); - isert_conn->login_req_dma = 0; - goto out_free_login_req_buf; + isert_err("login_desc dma mapping error: %d\n", ret); + isert_conn->login_desc->dma_addr = 0; + goto out_free_login_desc; } isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); if (!isert_conn->login_rsp_buf) { ret = -ENOMEM; - goto out_unmap_login_req_buf; + goto out_unmap_login_desc; } isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, @@ -455,11 +454,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, out_free_login_rsp_buf: kfree(isert_conn->login_rsp_buf); -out_unmap_login_req_buf: - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); -out_free_login_req_buf: - kfree(isert_conn->login_req_buf); +out_unmap_login_desc: + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); +out_free_login_desc: + kfree(isert_conn->login_desc); return ret; } @@ -578,7 +577,7 @@ isert_connect_release(struct isert_conn *isert_conn) ib_destroy_qp(isert_conn->qp); } - if (isert_conn->login_req_buf) + if (isert_conn->login_desc) isert_free_login_buf(isert_conn); isert_device_put(device); @@ -964,17 +963,18 @@ isert_login_post_recv(struct isert_conn *isert_conn) int ret; memset(&sge, 0, sizeof(struct ib_sge)); - sge.addr = isert_conn->login_req_dma; + sge.addr = isert_conn->login_desc->dma_addr + + isert_get_hdr_offset(isert_conn->login_desc); sge.length = ISER_RX_PAYLOAD_SIZE; sge.lkey = isert_conn->device->pd->local_dma_lkey; isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", sge.addr, sge.length, sge.lkey); - isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; + isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); - rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; + rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; @@ -1051,7 +1051,7 @@ post_send: static void isert_rx_login_req(struct isert_conn *isert_conn) { - struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; + struct iser_rx_desc *rx_desc = isert_conn->login_desc; int rx_buflen = isert_conn->login_req_len; struct iscsi_conn *conn = isert_conn->conn; struct iscsi_login *login = conn->conn_login; @@ -1063,7 +1063,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) if (login->first_request) { struct iscsi_login_req *login_req = - (struct iscsi_login_req *)&rx_desc->iscsi_header; + (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); /* * Setup the initial iscsi_login values from the leading * login request PDU. @@ -1082,13 +1082,13 @@ isert_rx_login_req(struct isert_conn *isert_conn) login->tsih = be16_to_cpu(login_req->tsih); } - memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); + memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); isert_dbg("Using login payload size: %d, rx_buflen: %d " "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, MAX_KEY_VALUE_PAIRS); - memcpy(login->req_buf, &rx_desc->data[0], size); + memcpy(login->req_buf, isert_get_data(rx_desc), size); if (login->first_request) { complete(&isert_conn->login_comp); @@ -1153,14 +1153,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, if (imm_data_len != data_len) { sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, - &rx_desc->data[0], imm_data_len); + isert_get_data(rx_desc), imm_data_len); isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", sg_nents, imm_data_len); } else { sg_init_table(&isert_cmd->sg, 1); cmd->se_cmd.t_data_sg = &isert_cmd->sg; cmd->se_cmd.t_data_nents = 1; - sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); + sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), + imm_data_len); isert_dbg("Transfer Immediate imm_data_len: %d\n", imm_data_len); } @@ -1229,9 +1230,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, } isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " "sg_nents: %u from %p %u\n", sg_start, sg_off, - sg_nents, &rx_desc->data[0], unsol_data_len); + sg_nents, isert_get_data(rx_desc), unsol_data_len); - sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], + sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), unsol_data_len); rc = iscsit_check_dataout_payload(cmd, hdr, false); @@ -1290,7 +1291,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd } cmd->text_in_ptr = text_in; - memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); + memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); return iscsit_process_text_cmd(conn, cmd, hdr); } @@ -1300,7 +1301,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, uint32_t read_stag, uint64_t read_va, uint32_t write_stag, uint64_t write_va) { - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); struct iscsi_conn *conn = isert_conn->conn; struct iscsi_cmd *cmd; struct isert_cmd *isert_cmd; @@ -1398,8 +1399,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) struct isert_conn *isert_conn = wc->qp->qp_context; struct ib_device *ib_dev = isert_conn->cm_id->device; struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; - struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); + struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); uint64_t read_va = 0, write_va = 0; uint32_t read_stag = 0, write_stag = 0; @@ -1413,7 +1414,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) rx_desc->in_use = true; ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, @@ -1448,7 +1449,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) read_stag, read_va, write_stag, write_va); ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void @@ -1462,8 +1463,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } - ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; @@ -1478,8 +1479,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) complete(&isert_conn->login_req_comp); mutex_unlock(&isert_conn->mutex); - ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 3b296bac4f60..d267a6d60d87 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -59,9 +59,11 @@ ISERT_MAX_TX_MISC_PDUS + \ ISERT_MAX_RX_MISC_PDUS) -#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ - (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ - sizeof(struct ib_cqe) + sizeof(bool))) +/* + * RX size is default of 8k plus headers, but data needs to align to + * 512 boundary, so use 1024 to have the extra space for alignment. + */ +#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024) #define ISCSI_ISER_SG_TABLESIZE 256 @@ -80,21 +82,41 @@ enum iser_conn_state { }; struct iser_rx_desc { - struct iser_ctrl iser_header; - struct iscsi_hdr iscsi_header; - char data[ISCSI_DEF_MAX_RECV_SEG_LEN]; + char buf[ISER_RX_SIZE]; u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; bool in_use; - char pad[ISER_RX_PAD_SIZE]; -} __packed; +}; static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe) { return container_of(cqe, struct iser_rx_desc, rx_cqe); } +static void *isert_get_iser_hdr(struct iser_rx_desc *desc) +{ + return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN; +} + +static size_t isert_get_hdr_offset(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) - (void *)desc->buf; +} + +static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl); +} + +static void *isert_get_data(struct iser_rx_desc *desc) +{ + void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN; + + WARN_ON((uintptr_t)data & 511); + return data; +} + struct iser_tx_desc { struct iser_ctrl iser_header; struct iscsi_hdr iscsi_header; @@ -141,9 +163,8 @@ struct isert_conn { u32 responder_resources; u32 initiator_depth; bool pi_support; - struct iser_rx_desc *login_req_buf; + struct iser_rx_desc *login_desc; char *login_rsp_buf; - u64 login_req_dma; int login_req_len; u64 login_rsp_dma; struct iser_rx_desc *rx_descs; diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 02b92e3cd9a8..2822ca5e8277 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2373,6 +2373,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n", dev_name(&sdev->device->dev), port_num); mutex_unlock(&sport->mutex); + ret = -EINVAL; goto reject; } diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index cb6e3a5f509c..0d57e51b8ba1 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &client->fasync); } -static int evdev_flush(struct file *file, fl_owner_t id) -{ - struct evdev_client *client = file->private_data; - struct evdev *evdev = client->evdev; - - mutex_lock(&evdev->mutex); - - if (evdev->exist && !client->revoked) - input_flush_device(&evdev->handle, file); - - mutex_unlock(&evdev->mutex); - return 0; -} - static void evdev_free(struct device *dev) { struct evdev *evdev = container_of(dev, struct evdev, dev); @@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file) unsigned int i; mutex_lock(&evdev->mutex); + + if (evdev->exist && !client->revoked) + input_flush_device(&evdev->handle, file); + evdev_ungrab(evdev, client); mutex_unlock(&evdev->mutex); @@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .flush = evdev_flush, .llseek = no_llseek, }; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index a2b5fbba2d3b..430dc6975004 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -456,7 +456,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, if (IS_ERR(abspam)) return PTR_ERR(abspam); - for (i = 0; i < joydev->nabs; i++) { + for (i = 0; i < len && i < joydev->nabs; i++) { if (abspam[i] > ABS_MAX) { retval = -EINVAL; goto out; @@ -480,6 +480,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, int i; int retval = 0; + if (len % sizeof(*keypam)) + return -EINVAL; + len = min(len, sizeof(joydev->keypam)); /* Validate the map. */ @@ -487,7 +490,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, if (IS_ERR(keypam)) return PTR_ERR(keypam); - for (i = 0; i < joydev->nkey; i++) { + for (i = 0; i < (len / 2) && i < joydev->nkey; i++) { if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) { retval = -EINVAL; goto out; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 6b40a1c68f9f..e5f1e3cf9179 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -215,9 +215,17 @@ static const struct xpad_device { { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, @@ -241,6 +249,7 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, + { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, @@ -295,6 +304,10 @@ static const struct xpad_device { { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, + { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, + { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, @@ -418,6 +431,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ + XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ @@ -427,8 +441,12 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ + XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ + XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ { } }; @@ -458,6 +476,16 @@ static const u8 xboxone_fw2015_init[] = { 0x05, 0x20, 0x00, 0x01, 0x00 }; +/* + * This packet is required for Xbox One S (0x045e:0x02ea) + * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to + * initialize the controller that was previously used in + * Bluetooth mode. + */ +static const u8 xboxone_s_init[] = { + 0x05, 0x20, 0x00, 0x0f, 0x06 +}; + /* * This packet is required for the Titanfall 2 Xbox One pads * (0x0e6f:0x0165) to finish initialization and for Hori pads @@ -516,6 +544,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), + XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index d38398526965..a4b7422de534 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -48,6 +48,7 @@ #include <linux/efi.h> #include <linux/input.h> #include <linux/input/mt.h> +#include <linux/ktime.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/spinlock.h> @@ -400,7 +401,7 @@ struct applespi_data { unsigned int cmd_msg_cntr; /* lock to protect the above parameters and flags below */ spinlock_t cmd_msg_lock; - bool cmd_msg_queued; + ktime_t cmd_msg_queued; enum applespi_evt_type cmd_evt_type; struct led_classdev backlight_info; @@ -716,7 +717,7 @@ static void applespi_msg_complete(struct applespi_data *applespi, wake_up_all(&applespi->drain_complete); if (is_write_msg) { - applespi->cmd_msg_queued = false; + applespi->cmd_msg_queued = 0; applespi_send_cmd_msg(applespi); } @@ -758,8 +759,16 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi) return 0; /* check whether send is in progress */ - if (applespi->cmd_msg_queued) - return 0; + if (applespi->cmd_msg_queued) { + if (ktime_ms_delta(ktime_get(), applespi->cmd_msg_queued) < 1000) + return 0; + + dev_warn(&applespi->spi->dev, "Command %d timed out\n", + applespi->cmd_evt_type); + + applespi->cmd_msg_queued = 0; + applespi->write_active = false; + } /* set up packet */ memset(packet, 0, APPLESPI_PACKET_SIZE); @@ -856,7 +865,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi) return sts; } - applespi->cmd_msg_queued = true; + applespi->cmd_msg_queued = ktime_get_coarse(); applespi->write_active = true; return 0; @@ -1908,7 +1917,7 @@ static int __maybe_unused applespi_resume(struct device *dev) applespi->drain = false; applespi->have_cl_led_on = false; applespi->have_bl_level = 0; - applespi->cmd_msg_queued = false; + applespi->cmd_msg_queued = 0; applespi->read_active = false; applespi->write_active = false; diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 8d4d9786cc74..cae262b6ff39 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -183,6 +183,7 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev, "changed: [r%d c%d]: byte %02x\n", row, col, new_state); + input_event(idev, EV_MSC, MSC_SCAN, pos); input_report_key(idev, keycodes[pos], new_state); } diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c index b0ead7199c40..a69dcc3bd30c 100644 --- a/drivers/input/keyboard/dlink-dir685-touchkeys.c +++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c @@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match); static struct i2c_driver dir685_tk_i2c_driver = { .driver = { - .name = "dlin-dir685-touchkeys", + .name = "dlink-dir685-touchkeys", .of_match_table = of_match_ptr(dir685_tk_of_match), }, .probe = dir685_tk_probe, diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index 7c70492d9d6b..f831f01501d5 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c @@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) } keypad->irq = platform_get_irq(pdev, 0); - if (!keypad->irq) { - err = -ENXIO; + if (keypad->irq < 0) { + err = keypad->irq; goto failed_free; } diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c index 63d5e488137d..e9fa1423f136 100644 --- a/drivers/input/keyboard/nspire-keypad.c +++ b/drivers/input/keyboard/nspire-keypad.c @@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int nspire_keypad_chip_init(struct nspire_keypad *keypad) +static int nspire_keypad_open(struct input_dev *input) { + struct nspire_keypad *keypad = input_get_drvdata(input); unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles; + int error; + + error = clk_prepare_enable(keypad->clk); + if (error) + return error; cycles_per_us = (clk_get_rate(keypad->clk) / 1000000); if (cycles_per_us == 0) @@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad) keypad->int_mask = 1 << 1; writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); - /* Disable GPIO interrupts to prevent hanging on touchpad */ - /* Possibly used to detect touchpad events */ - writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); - /* Acknowledge existing interrupts */ - writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); - - return 0; -} - -static int nspire_keypad_open(struct input_dev *input) -{ - struct nspire_keypad *keypad = input_get_drvdata(input); - int error; - - error = clk_prepare_enable(keypad->clk); - if (error) - return error; - - error = nspire_keypad_chip_init(keypad); - if (error) { - clk_disable_unprepare(keypad->clk); - return error; - } - return 0; } @@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input) { struct nspire_keypad *keypad = input_get_drvdata(input); + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + clk_disable_unprepare(keypad->clk); } @@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev) return -ENOMEM; } + error = clk_prepare_enable(keypad->clk); + if (error) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return error; + } + + /* Disable interrupts */ + writel(0, keypad->reg_base + KEYPAD_INTMSK); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_INT); + + /* Disable GPIO interrupts to prevent hanging on touchpad */ + /* Possibly used to detect touchpad events */ + writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); + /* Acknowledge existing GPIO interrupts */ + writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); + + clk_disable_unprepare(keypad->clk); + input_set_drvdata(input, keypad); input->id.bustype = BUS_HOST; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index 94c94d7f5155..dd16f7b3c7ef 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -186,12 +186,8 @@ static int omap4_keypad_open(struct input_dev *input) return 0; } -static void omap4_keypad_close(struct input_dev *input) +static void omap4_keypad_stop(struct omap4_keypad *keypad_data) { - struct omap4_keypad *keypad_data = input_get_drvdata(input); - - disable_irq(keypad_data->irq); - /* Disable interrupts and wake-up events */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, OMAP4_VAL_IRQDISABLE); @@ -200,7 +196,15 @@ static void omap4_keypad_close(struct input_dev *input) /* clear pending interrupts */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); +} +static void omap4_keypad_close(struct input_dev *input) +{ + struct omap4_keypad *keypad_data; + + keypad_data = input_get_drvdata(input); + disable_irq(keypad_data->irq); + omap4_keypad_stop(keypad_data); enable_irq(keypad_data->irq); pm_runtime_put_sync(input->dev.parent); @@ -223,13 +227,37 @@ static int omap4_keypad_parse_dt(struct device *dev, return 0; } +static int omap4_keypad_check_revision(struct device *dev, + struct omap4_keypad *keypad_data) +{ + unsigned int rev; + + rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION); + rev &= 0x03 << 30; + rev >>= 30; + switch (rev) { + case KBD_REVISION_OMAP4: + keypad_data->reg_offset = 0x00; + keypad_data->irqreg_offset = 0x00; + break; + case KBD_REVISION_OMAP5: + keypad_data->reg_offset = 0x10; + keypad_data->irqreg_offset = 0x0c; + break; + default: + dev_err(dev, "Keypad reports unsupported revision %d", rev); + return -EINVAL; + } + + return 0; +} + static int omap4_keypad_probe(struct platform_device *pdev) { struct omap4_keypad *keypad_data; struct input_dev *input_dev; struct resource *res; unsigned int max_keys; - int rev; int irq; int error; @@ -240,10 +268,8 @@ static int omap4_keypad_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "no keyboard irq assigned\n"); - return -EINVAL; - } + if (irq < 0) + return irq; keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL); if (!keypad_data) { @@ -271,41 +297,33 @@ static int omap4_keypad_probe(struct platform_device *pdev) goto err_release_mem; } + pm_runtime_enable(&pdev->dev); /* * Enable clocks for the keypad module so that we can read * revision register. */ - pm_runtime_enable(&pdev->dev); error = pm_runtime_get_sync(&pdev->dev); if (error) { dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n"); - goto err_unmap; - } - rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION); - rev &= 0x03 << 30; - rev >>= 30; - switch (rev) { - case KBD_REVISION_OMAP4: - keypad_data->reg_offset = 0x00; - keypad_data->irqreg_offset = 0x00; - break; - case KBD_REVISION_OMAP5: - keypad_data->reg_offset = 0x10; - keypad_data->irqreg_offset = 0x0c; - break; - default: - dev_err(&pdev->dev, - "Keypad reports unsupported revision %d", rev); - error = -EINVAL; - goto err_pm_put_sync; + pm_runtime_put_noidle(&pdev->dev); + } else { + error = omap4_keypad_check_revision(&pdev->dev, + keypad_data); + if (!error) { + /* Ensure device does not raise interrupts */ + omap4_keypad_stop(keypad_data); + } + pm_runtime_put_sync(&pdev->dev); } + if (error) + goto err_pm_disable; /* input device allocation */ keypad_data->input = input_dev = input_allocate_device(); if (!input_dev) { error = -ENOMEM; - goto err_pm_put_sync; + goto err_pm_disable; } input_dev->name = pdev->name; @@ -351,28 +369,25 @@ static int omap4_keypad_probe(struct platform_device *pdev) goto err_free_keymap; } - device_init_wakeup(&pdev->dev, true); - pm_runtime_put_sync(&pdev->dev); - error = input_register_device(keypad_data->input); if (error < 0) { dev_err(&pdev->dev, "failed to register input device\n"); - goto err_pm_disable; + goto err_free_irq; } + device_init_wakeup(&pdev->dev, true); platform_set_drvdata(pdev, keypad_data); + return 0; -err_pm_disable: - pm_runtime_disable(&pdev->dev); +err_free_irq: free_irq(keypad_data->irq, keypad_data); err_free_keymap: kfree(keypad_data->keymap); err_free_input: input_free_device(input_dev); -err_pm_put_sync: - pm_runtime_put_sync(&pdev->dev); -err_unmap: +err_pm_disable: + pm_runtime_disable(&pdev->dev); iounmap(keypad_data->base); err_release_mem: release_mem_region(res->start, resource_size(res)); diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c index 27126e621eb6..d450f11b98a7 100644 --- a/drivers/input/keyboard/sunkbd.c +++ b/drivers/input/keyboard/sunkbd.c @@ -99,7 +99,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio, switch (data) { case SUNKBD_RET_RESET: - schedule_work(&sunkbd->tq); + if (sunkbd->enabled) + schedule_work(&sunkbd->tq); sunkbd->reset = -1; break; @@ -200,16 +201,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd) } /* - * sunkbd_reinit() sets leds and beeps to a state the computer remembers they - * were in. + * sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers + * they were in. */ -static void sunkbd_reinit(struct work_struct *work) +static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd) { - struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); - - wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); - serio_write(sunkbd->serio, SUNKBD_CMD_SETLED); serio_write(sunkbd->serio, (!!test_bit(LED_CAPSL, sunkbd->dev->led) << 3) | @@ -222,11 +219,39 @@ static void sunkbd_reinit(struct work_struct *work) SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd)); } + +/* + * sunkbd_reinit() wait for the keyboard reset to complete and restores state + * of leds and beeps. + */ + +static void sunkbd_reinit(struct work_struct *work) +{ + struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); + + /* + * It is OK that we check sunkbd->enabled without pausing serio, + * as we only want to catch true->false transition that will + * happen once and we will be woken up for it. + */ + wait_event_interruptible_timeout(sunkbd->wait, + sunkbd->reset >= 0 || !sunkbd->enabled, + HZ); + + if (sunkbd->reset >= 0 && sunkbd->enabled) + sunkbd_set_leds_beeps(sunkbd); +} + static void sunkbd_enable(struct sunkbd *sunkbd, bool enable) { serio_pause_rx(sunkbd->serio); sunkbd->enabled = enable; serio_continue_rx(sunkbd->serio); + + if (!enable) { + wake_up_interruptible(&sunkbd->wait); + cancel_work_sync(&sunkbd->tq); + } } /* diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c index 14b55bacdd0f..fb078e049413 100644 --- a/drivers/input/keyboard/tm2-touchkey.c +++ b/drivers/input/keyboard/tm2-touchkey.c @@ -75,6 +75,14 @@ static struct touchkey_variant aries_touchkey_variant = { .cmd_led_off = ARIES_TOUCHKEY_CMD_LED_OFF, }; +static const struct touchkey_variant tc360_touchkey_variant = { + .keycode_reg = 0x00, + .base_reg = 0x00, + .fixed_regulator = true, + .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON, + .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF, +}; + static int tm2_touchkey_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { @@ -327,6 +335,9 @@ static const struct of_device_id tm2_touchkey_of_match[] = { }, { .compatible = "cypress,aries-touchkey", .data = &aries_touchkey_variant, + }, { + .compatible = "coreriver,tc360-touchkey", + .data = &tc360_touchkey_variant, }, { }, }; diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index af3a6824f1a4..77e0743a3cf8 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c @@ -50,7 +50,7 @@ struct twl4030_keypad { bool autorepeat; unsigned int n_rows; unsigned int n_cols; - unsigned int irq; + int irq; struct device *dbg_dev; struct input_dev *input; @@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev) } kp->irq = platform_get_irq(pdev, 0); - if (!kp->irq) { - dev_err(&pdev->dev, "no keyboard irq assigned\n"); - return -EINVAL; - } + if (kp->irq < 0) + return kp->irq; error = matrix_keypad_build_keymap(keymap_data, NULL, TWL4030_MAX_ROWS, diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c index 5fe92d4ba3f0..4cc4e8ff42b3 100644 --- a/drivers/input/misc/adxl34x.c +++ b/drivers/input/misc/adxl34x.c @@ -696,7 +696,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq, struct input_dev *input_dev; const struct adxl34x_platform_data *pdata; int err, range, i; - unsigned char revid; + int revid; if (!irq) { dev_err(dev, "no IRQ?\n"); diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index 305f0160506a..8a36d78fed63 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c @@ -68,7 +68,7 @@ static int ati_remote2_get_channel_mask(char *buffer, { pr_debug("%s()\n", __func__); - return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg); + return sprintf(buffer, "0x%04x\n", *(unsigned int *)kp->arg); } static int ati_remote2_set_mode_mask(const char *val, @@ -84,7 +84,7 @@ static int ati_remote2_get_mode_mask(char *buffer, { pr_debug("%s()\n", __func__); - return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg); + return sprintf(buffer, "0x%02x\n", *(unsigned int *)kp->arg); } static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK; diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index c09b9628ad34..c872ba579b03 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -568,12 +568,15 @@ static int cm109_input_open(struct input_dev *idev) dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->ctl_data->byte[HID_OR3] = 0x00; + dev->ctl_urb_pending = 1; error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); - if (error) + if (error) { + dev->ctl_urb_pending = 0; dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); - else + } else { dev->open = 1; + } mutex_unlock(&dev->pm_mutex); diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index c1b524ab4623..ba50f5713423 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c @@ -573,7 +573,7 @@ static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa, memset(&cmd, 0, sizeof(cmd)); put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr); - put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2); + put_unaligned_le16(sizeof(cmd) - 2, &cmd.head.length); cmd.head.report_id = PIP_APP_CMD_REPORT_ID; cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE; put_unaligned_le16(read_offset, &cmd.read_offset); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 8719da540383..196e8505dd8d 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) u8 hover_info = packet[ETP_HOVER_INFO_OFFSET]; bool contact_valid, hover_event; + pm_wakeup_event(&data->client->dev, 0); + hover_event = hover_info & 0x40; for (i = 0; i < ETP_MAX_FINGERS; i++) { contact_valid = tp_info & (1U << (3 + i)); @@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1]; int x, y; + pm_wakeup_event(&data->client->dev, 0); + if (!data->tp_input) { dev_warn_once(&data->client->dev, "received a trackpoint report while no trackpoint device has been created. Please report upstream.\n"); @@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) static irqreturn_t elan_isr(int irq, void *dev_id) { struct elan_tp_data *data = dev_id; - struct device *dev = &data->client->dev; int error; u8 report[ETP_MAX_REPORT_LEN]; @@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id) if (error) goto out; - pm_wakeup_event(dev, 0); - switch (report[ETP_REPORT_ID_OFFSET]) { case ETP_REPORT_ID: elan_report_absolute(data, report); @@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id) elan_report_trackpoint(data, report); break; default: - dev_err(dev, "invalid report id data (%x)\n", + dev_err(&data->client->dev, "invalid report id data (%x)\n", report[ETP_REPORT_ID_OFFSET]); } diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 2d8434b7b623..053fe2da1e08 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -89,6 +89,47 @@ static int elantech_ps2_command(struct psmouse *psmouse, return rc; } +/* + * Send an Elantech style special command to read 3 bytes from a register + */ +static int elantech_read_reg_params(struct psmouse *psmouse, u8 reg, u8 *param) +{ + if (elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) || + elantech_ps2_command(psmouse, NULL, ETP_REGISTER_READWRITE) || + elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) || + elantech_ps2_command(psmouse, NULL, reg) || + elantech_ps2_command(psmouse, param, PSMOUSE_CMD_GETINFO)) { + psmouse_err(psmouse, + "failed to read register %#02x\n", reg); + return -EIO; + } + + return 0; +} + +/* + * Send an Elantech style special command to write a register with a parameter + */ +static int elantech_write_reg_params(struct psmouse *psmouse, u8 reg, u8 *param) +{ + if (elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) || + elantech_ps2_command(psmouse, NULL, ETP_REGISTER_READWRITE) || + elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) || + elantech_ps2_command(psmouse, NULL, reg) || + elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) || + elantech_ps2_command(psmouse, NULL, param[0]) || + elantech_ps2_command(psmouse, NULL, ETP_PS2_CUSTOM_COMMAND) || + elantech_ps2_command(psmouse, NULL, param[1]) || + elantech_ps2_command(psmouse, NULL, PSMOUSE_CMD_SETSCALE11)) { + psmouse_err(psmouse, + "failed to write register %#02x with value %#02x%#02x\n", + reg, param[0], param[1]); + return -EIO; + } + + return 0; +} + /* * Send an Elantech style special command to read a value from a register */ @@ -1529,19 +1570,35 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = { { } }; +/* + * Change Report id 0x5E to 0x5F. + */ +static int elantech_change_report_id(struct psmouse *psmouse) +{ + unsigned char param[2] = { 0x10, 0x03 }; + + if (elantech_write_reg_params(psmouse, 0x7, param) || + elantech_read_reg_params(psmouse, 0x7, param) || + param[0] != 0x10 || param[1] != 0x03) { + psmouse_err(psmouse, "Unable to change report ID to 0x5f.\n"); + return -EIO; + } + + return 0; +} /* * determine hardware version and set some properties according to it. */ static int elantech_set_properties(struct elantech_device_info *info) { /* This represents the version of IC body. */ - int ver = (info->fw_version & 0x0f0000) >> 16; + info->ic_version = (info->fw_version & 0x0f0000) >> 16; /* Early version of Elan touchpads doesn't obey the rule. */ if (info->fw_version < 0x020030 || info->fw_version == 0x020600) info->hw_version = 1; else { - switch (ver) { + switch (info->ic_version) { case 2: case 4: info->hw_version = 2; @@ -1557,6 +1614,11 @@ static int elantech_set_properties(struct elantech_device_info *info) } } + /* Get information pattern for hw_version 4 */ + info->pattern = 0x00; + if (info->ic_version == 0x0f && (info->fw_version & 0xff) <= 0x02) + info->pattern = info->fw_version & 0xff; + /* decide which send_cmd we're gonna use early */ info->send_cmd = info->hw_version >= 3 ? elantech_send_cmd : synaptics_send_cmd; @@ -1598,6 +1660,7 @@ static int elantech_query_info(struct psmouse *psmouse, { unsigned char param[3]; unsigned char traces; + unsigned char ic_body[3]; memset(info, 0, sizeof(*info)); @@ -1640,6 +1703,21 @@ static int elantech_query_info(struct psmouse *psmouse, info->samples[2]); } + if (info->pattern > 0x00 && info->ic_version == 0xf) { + if (info->send_cmd(psmouse, ETP_ICBODY_QUERY, ic_body)) { + psmouse_err(psmouse, "failed to query ic body\n"); + return -EINVAL; + } + info->ic_version = be16_to_cpup((__be16 *)ic_body); + psmouse_info(psmouse, + "Elan ic body: %#04x, current fw version: %#02x\n", + info->ic_version, ic_body[2]); + } + + info->product_id = be16_to_cpup((__be16 *)info->samples); + if (info->pattern == 0x00) + info->product_id &= 0xff; + if (info->samples[1] == 0x74 && info->hw_version == 0x03) { /* * This module has a bug which makes absolute mode @@ -1654,6 +1732,23 @@ static int elantech_query_info(struct psmouse *psmouse, /* The MSB indicates the presence of the trackpoint */ info->has_trackpoint = (info->capabilities[0] & 0x80) == 0x80; + if (info->has_trackpoint && info->ic_version == 0x0011 && + (info->product_id == 0x08 || info->product_id == 0x09 || + info->product_id == 0x0d || info->product_id == 0x0e)) { + /* + * This module has a bug which makes trackpoint in SMBus + * mode return invalid data unless trackpoint is switched + * from using 0x5e reports to 0x5f. If we are not able to + * make the switch, let's abort initialization so we'll be + * using standard PS/2 protocol. + */ + if (elantech_change_report_id(psmouse)) { + psmouse_info(psmouse, + "Trackpoint report is broken, forcing standard PS/2 protocol\n"); + return -ENODEV; + } + } + info->x_res = 31; info->y_res = 31; if (info->hw_version == 4) { diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index e0a3e59d4f1b..571e6ca11d33 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -18,6 +18,7 @@ #define ETP_CAPABILITIES_QUERY 0x02 #define ETP_SAMPLE_QUERY 0x03 #define ETP_RESOLUTION_QUERY 0x04 +#define ETP_ICBODY_QUERY 0x05 /* * Command values for register reading or writing @@ -140,7 +141,10 @@ struct elantech_device_info { unsigned char samples[3]; unsigned char debug; unsigned char hw_version; + unsigned char pattern; unsigned int fw_version; + unsigned int ic_version; + unsigned int product_id; unsigned int x_min; unsigned int y_min; unsigned int x_max; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 527ae0b9a191..0b4a3039f312 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) { int type = *((unsigned int *)kp->arg); - return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); + return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } static int __init psmouse_init(void) diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index e99d9bf1a267..e78c4c7eda34 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data, fsp_reg_write_enable(psmouse, false); - return count; + return retval; } PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 4d2036209b45..4b81b2d0fe06 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = { "LEN005b", /* P50 */ "LEN005e", /* T560 */ "LEN006c", /* T470s */ + "LEN007a", /* T470s */ "LEN0071", /* T480 */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0073", /* X1 Carbon G5 (Elantech) */ @@ -178,6 +179,7 @@ static const char * const smbus_pnp_ids[] = { "LEN0093", /* T480 */ "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ + "LEN0099", /* X1 Extreme 1st */ "LEN009b", /* T580 */ "LEN200f", /* T450s */ "LEN2044", /* L470 */ diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 3eefee2ee2a1..ef2fa0905208 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -17,10 +17,12 @@ #include "trackpoint.h" static const char * const trackpoint_variants[] = { - [TP_VARIANT_IBM] = "IBM", - [TP_VARIANT_ALPS] = "ALPS", - [TP_VARIANT_ELAN] = "Elan", - [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_IBM] = "IBM", + [TP_VARIANT_ALPS] = "ALPS", + [TP_VARIANT_ELAN] = "Elan", + [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_JYT_SYNAPTICS] = "JYT_Synaptics", + [TP_VARIANT_SYNAPTICS] = "Synaptics", }; /* @@ -280,6 +282,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, case TP_VARIANT_ALPS: case TP_VARIANT_ELAN: case TP_VARIANT_NXP: + case TP_VARIANT_JYT_SYNAPTICS: + case TP_VARIANT_SYNAPTICS: if (variant_id) *variant_id = param[0]; if (firmware_id) diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 5cb93ed26085..eb5412904fe0 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -24,10 +24,12 @@ * 0x01 was the original IBM trackpoint, others implement very limited * subset of trackpoint features. */ -#define TP_VARIANT_IBM 0x01 -#define TP_VARIANT_ALPS 0x02 -#define TP_VARIANT_ELAN 0x03 -#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_IBM 0x01 +#define TP_VARIANT_ALPS 0x02 +#define TP_VARIANT_ELAN 0x03 +#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_JYT_SYNAPTICS 0x05 +#define TP_VARIANT_SYNAPTICS 0x06 /* * Commands diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 190b9974526b..258d5fe3d395 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) if (count) { kfree(attn_data.data); - attn_data.data = NULL; + drvdata->attn_data.data = NULL; } if (!kfifo_is_empty(&drvdata->attn_fifo)) @@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev) if (data->input) { rmi_driver_set_input_name(rmi_dev, data->input); if (!rmi_dev->xport->input) { - if (input_register_device(data->input)) { + retval = input_register_device(data->input); + if (retval) { dev_err(dev, "%s: Failed to register input device.\n", __func__); goto err_destroy_functions; diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c index e1423f7648d6..4c039e4125d9 100644 --- a/drivers/input/serio/hil_mlc.c +++ b/drivers/input/serio/hil_mlc.c @@ -74,7 +74,7 @@ EXPORT_SYMBOL(hil_mlc_unregister); static LIST_HEAD(hil_mlcs); static DEFINE_RWLOCK(hil_mlcs_lock); static struct timer_list hil_mlcs_kicker; -static int hil_mlcs_probe; +static int hil_mlcs_probe, hil_mlc_stop; static void hil_mlcs_process(unsigned long unused); static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0); @@ -702,9 +702,13 @@ static int hilse_donode(hil_mlc *mlc) if (!mlc->ostarted) { mlc->ostarted = 1; mlc->opacket = pack; - mlc->out(mlc); + rc = mlc->out(mlc); nextidx = HILSEN_DOZE; write_unlock_irqrestore(&mlc->lock, flags); + if (rc) { + hil_mlc_stop = 1; + return 1; + } break; } mlc->ostarted = 0; @@ -715,8 +719,13 @@ static int hilse_donode(hil_mlc *mlc) case HILSE_CTS: write_lock_irqsave(&mlc->lock, flags); - nextidx = mlc->cts(mlc) ? node->bad : node->good; + rc = mlc->cts(mlc); + nextidx = rc ? node->bad : node->good; write_unlock_irqrestore(&mlc->lock, flags); + if (rc) { + hil_mlc_stop = 1; + return 1; + } break; default: @@ -780,6 +789,12 @@ static void hil_mlcs_process(unsigned long unused) static void hil_mlcs_timer(struct timer_list *unused) { + if (hil_mlc_stop) { + /* could not send packet - stop immediately. */ + pr_warn(PREFIX "HIL seems stuck - Disabling HIL MLC.\n"); + return; + } + hil_mlcs_probe = 1; tasklet_schedule(&hil_mlcs_tasklet); /* Re-insert the periodic task. */ diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index 232d30c825bd..3e85e9039374 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c @@ -210,7 +210,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc) priv->tseq[2] = 1; priv->tseq[3] = 0; priv->tseq[4] = 0; - __hp_sdc_enqueue_transaction(&priv->trans); + return __hp_sdc_enqueue_transaction(&priv->trans); busy: return 1; done: @@ -219,7 +219,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc) return 0; } -static void hp_sdc_mlc_out(hil_mlc *mlc) +static int hp_sdc_mlc_out(hil_mlc *mlc) { struct hp_sdc_mlc_priv_s *priv; @@ -234,7 +234,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) do_data: if (priv->emtestmode) { up(&mlc->osem); - return; + return 0; } /* Shouldn't be sending commands when loop may be busy */ BUG_ON(down_trylock(&mlc->csem)); @@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) BUG_ON(down_trylock(&mlc->csem)); } enqueue: - hp_sdc_enqueue_transaction(&priv->trans); + return hp_sdc_enqueue_transaction(&priv->trans); } static int __init hp_sdc_mlc_init(void) diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h deleted file mode 100644 index 391f94d9e47d..000000000000 --- a/drivers/input/serio/i8042-ppcio.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _I8042_PPCIO_H -#define _I8042_PPCIO_H - - -#if defined(CONFIG_WALNUT) - -#define I8042_KBD_IRQ 25 -#define I8042_AUX_IRQ 26 - -#define I8042_KBD_PHYS_DESC "walnutps2/serio0" -#define I8042_AUX_PHYS_DESC "walnutps2/serio1" -#define I8042_MUX_PHYS_DESC "walnutps2/serio%d" - -extern void *kb_cs; -extern void *kb_data; - -#define I8042_COMMAND_REG (*(int *)kb_cs) -#define I8042_DATA_REG (*(int *)kb_data) - -static inline int i8042_read_data(void) -{ - return readb(kb_data); -} - -static inline int i8042_read_status(void) -{ - return readb(kb_cs); -} - -static inline void i8042_write_data(int val) -{ - writeb(val, kb_data); -} - -static inline void i8042_write_command(int val) -{ - writeb(val, kb_cs); -} - -static inline int i8042_platform_init(void) -{ - i8042_reset = I8042_RESET_ALWAYS; - return 0; -} - -static inline void i8042_platform_exit(void) -{ -} - -#else - -#include "i8042-io.h" - -#endif - -#endif /* _I8042_PPCIO_H */ diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index dc974c288e88..23442a144b83 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -220,6 +220,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), + }, + }, { } }; @@ -425,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), }, }, + { + /* Lenovo XiaoXin Air 12 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -530,6 +543,25 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), }, }, + { + /* + * Acer Aspire 5738z + * Touchpad stops working in mux mode when dis- + re-enabled + * with the touchpad enable/disable toggle hotkey + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + }, + }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; @@ -556,6 +588,11 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ }, + }, { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ + }, }, { } }; @@ -581,6 +618,48 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"), + }, + }, { /* Advent 4211 */ .matches = { @@ -651,6 +730,21 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, }, + { + /* Lenovo ThinkPad Twist S230u */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + }, + }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; @@ -680,6 +774,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), }, }, + { + /* Acer Aspire 5 A515 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), + DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + }, + }, { } }; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 20ff2bed3917..6ff6b5710dd4 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -121,6 +121,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600); MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]"); #endif +static bool i8042_present; static bool i8042_bypass_aux_irq_test; static char i8042_kbd_firmware_id[128]; static char i8042_aux_firmware_id[128]; @@ -341,6 +342,9 @@ int i8042_command(unsigned char *param, int command) unsigned long flags; int retval; + if (!i8042_present) + return -1; + spin_lock_irqsave(&i8042_lock, flags); retval = __i8042_command(param, command); spin_unlock_irqrestore(&i8042_lock, flags); @@ -1464,7 +1468,8 @@ static int __init i8042_setup_aux(void) if (error) goto err_free_ports; - if (aux_enable()) + error = aux_enable(); + if (error) goto err_free_irq; i8042_aux_irq_registered = true; @@ -1609,12 +1614,15 @@ static int __init i8042_init(void) err = i8042_platform_init(); if (err) - return err; + return (err == -ENODEV) ? 0 : err; err = i8042_controller_check(); if (err) goto err_platform_exit; + /* Set this before creating the dev to allow i8042_command to work right away */ + i8042_present = true; + pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0); if (IS_ERR(pdev)) { err = PTR_ERR(pdev); @@ -1633,6 +1641,9 @@ static int __init i8042_init(void) static void __exit i8042_exit(void) { + if (!i8042_present) + return; + platform_device_unregister(i8042_platform_device); platform_driver_unregister(&i8042_driver); i8042_platform_exit(); diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h index 38dc27ad3c18..eb376700dfff 100644 --- a/drivers/input/serio/i8042.h +++ b/drivers/input/serio/i8042.h @@ -17,8 +17,6 @@ #include "i8042-ip22io.h" #elif defined(CONFIG_SNI_RM) #include "i8042-snirm.h" -#elif defined(CONFIG_PPC) -#include "i8042-ppcio.h" #elif defined(CONFIG_SPARC) #include "i8042-sparcio.h" #elif defined(CONFIG_X86) || defined(CONFIG_IA64) diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c index a681a2c04e39..f15ed3dcdb9b 100644 --- a/drivers/input/serio/sun4i-ps2.c +++ b/drivers/input/serio/sun4i-ps2.c @@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev) struct sun4i_ps2data *drvdata; struct serio *serio; struct device *dev = &pdev->dev; - unsigned int irq; int error; drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL); @@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev) writel(0, drvdata->reg_base + PS2_REG_GCTL); /* Get IRQ for the device */ - irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(dev, "no IRQ found\n"); - error = -ENXIO; + drvdata->irq = platform_get_irq(pdev, 0); + if (drvdata->irq < 0) { + error = drvdata->irq; goto err_disable_clk; } - drvdata->irq = irq; drvdata->serio = serio; drvdata->dev = dev; diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 46ad9090493b..1e812a193ce7 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -96,6 +96,7 @@ config TOUCHSCREEN_AD7879_SPI config TOUCHSCREEN_ADC tristate "Generic ADC based resistive touchscreen" depends on IIO + select IIO_BUFFER select IIO_BUFFER_CB help Say Y here if you want to use the generic ADC diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 51ddb204ca1b..d247d0ae82d2 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -33,6 +33,7 @@ #include <linux/regulator/consumer.h> #include <linux/module.h> #include <asm/irq.h> +#include <asm/unaligned.h> /* * This code has been heavily tested on a Nokia 770, and lightly @@ -199,6 +200,26 @@ struct ads7846 { #define REF_ON (READ_12BIT_DFR(x, 1, 1)) #define REF_OFF (READ_12BIT_DFR(y, 0, 0)) +static int get_pendown_state(struct ads7846 *ts) +{ + if (ts->get_pendown_state) + return ts->get_pendown_state(); + + return !gpio_get_value(ts->gpio_pendown); +} + +static void ads7846_report_pen_up(struct ads7846 *ts) +{ + struct input_dev *input = ts->input; + + input_report_key(input, BTN_TOUCH, 0); + input_report_abs(input, ABS_PRESSURE, 0); + input_sync(input); + + ts->pendown = false; + dev_vdbg(&ts->spi->dev, "UP\n"); +} + /* Must be called with ts->lock held */ static void ads7846_stop(struct ads7846 *ts) { @@ -215,6 +236,10 @@ static void ads7846_stop(struct ads7846 *ts) static void ads7846_restart(struct ads7846 *ts) { if (!ts->disabled && !ts->suspended) { + /* Check if pen was released since last stop */ + if (ts->pendown && !get_pendown_state(ts)) + ads7846_report_pen_up(ts); + /* Tell IRQ thread that it may poll the device. */ ts->stopped = false; mb(); @@ -410,7 +435,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command) if (status == 0) { /* BE12 value, then padding */ - status = be16_to_cpu(*((u16 *)&req->sample[1])); + status = get_unaligned_be16(&req->sample[1]); status = status >> 3; status &= 0x0fff; } @@ -605,14 +630,6 @@ static const struct attribute_group ads784x_attr_group = { /*--------------------------------------------------------------------------*/ -static int get_pendown_state(struct ads7846 *ts) -{ - if (ts->get_pendown_state) - return ts->get_pendown_state(); - - return !gpio_get_value(ts->gpio_pendown); -} - static void null_wait_for_sync(void) { } @@ -785,10 +802,11 @@ static void ads7846_report_state(struct ads7846 *ts) /* compute touch pressure resistance using equation #2 */ Rt = z2; Rt -= z1; - Rt *= x; Rt *= ts->x_plate_ohms; + Rt = DIV_ROUND_CLOSEST(Rt, 16); + Rt *= x; Rt /= z1; - Rt = (Rt + 2047) >> 12; + Rt = DIV_ROUND_CLOSEST(Rt, 256); } else { Rt = 0; } @@ -867,16 +885,8 @@ static irqreturn_t ads7846_irq(int irq, void *handle) msecs_to_jiffies(TS_POLL_PERIOD)); } - if (ts->pendown && !ts->stopped) { - struct input_dev *input = ts->input; - - input_report_key(input, BTN_TOUCH, 0); - input_report_abs(input, ABS_PRESSURE, 0); - input_sync(input); - - ts->pendown = false; - dev_vdbg(&ts->spi->dev, "UP\n"); - } + if (ts->pendown && !ts->stopped) + ads7846_report_pen_up(ts); return IRQ_HANDLED; } diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 240e8de24cd2..b41b97c962ed 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -935,19 +935,25 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev, error = device_property_read_u32(dev, "offset", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val); + if (reg_addr->reg_offset != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset, val); tsdata->offset = val; } error = device_property_read_u32(dev, "offset-x", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val); + if (reg_addr->reg_offset_x != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset_x, val); tsdata->offset_x = val; } error = device_property_read_u32(dev, "offset-y", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val); + if (reg_addr->reg_offset_y != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset_y, val); tsdata->offset_y = val; } } diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c index d6772a2c2d09..e396857cb4c1 100644 --- a/drivers/input/touchscreen/elo.c +++ b/drivers/input/touchscreen/elo.c @@ -341,8 +341,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv) switch (elo->id) { case 0: /* 10-byte protocol */ - if (elo_setup_10(elo)) + if (elo_setup_10(elo)) { + err = -EIO; goto fail3; + } break; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 0403102e807e..bfb945fc33a1 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -137,6 +137,18 @@ static const struct dmi_system_id rotated_screen[] = { DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"), }, }, + { + .ident = "Teclast X98 Pro", + .matches = { + /* + * Only match BIOS date, because the manufacturers + * BIOS does not report the board name at all + * (sometimes)... + */ + DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), + DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"), + }, + }, { .ident = "WinBook TW100", .matches = { @@ -168,6 +180,22 @@ static const struct dmi_system_id nine_bytes_report[] = { {} }; +/* + * Those tablets have their x coordinate inverted + */ +static const struct dmi_system_id inverted_x_screen[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "Cube I15-TC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Cube"), + DMI_MATCH(DMI_PRODUCT_NAME, "I15-TC") + }, + }, +#endif + {} +}; + /** * goodix_i2c_read - read data from a register of the i2c slave device. * @@ -780,6 +808,12 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) "Non-standard 9-bytes report format quirk\n"); } + if (dmi_check_system(inverted_x_screen)) { + ts->prop.invert_x = true; + dev_dbg(&ts->client->dev, + "Applying 'inverted x screen' quirk\n"); + } + error = input_mt_init_slots(ts->input_dev, ts->max_touch_num, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index f4ebdab06280..22839dde1d09 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -109,7 +109,7 @@ static bool ili210x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata, if (finger >= ILI210X_TOUCHES) return false; - if (touchdata[0] & BIT(finger)) + if (!(touchdata[0] & BIT(finger))) return false; *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0); diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index 9ed258854349..5e6ba5c4eca2 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c @@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) mutex_lock(&input_dev->mutex); - if (input_dev->users) { - retval = clk_prepare_enable(tsc->adc_clk); - if (retval) - goto out; + if (!input_dev->users) + goto out; - retval = clk_prepare_enable(tsc->tsc_clk); - if (retval) { - clk_disable_unprepare(tsc->adc_clk); - goto out; - } + retval = clk_prepare_enable(tsc->adc_clk); + if (retval) + goto out; - retval = imx6ul_tsc_init(tsc); + retval = clk_prepare_enable(tsc->tsc_clk); + if (retval) { + clk_disable_unprepare(tsc->adc_clk); + goto out; } + retval = imx6ul_tsc_init(tsc); + if (retval) { + clk_disable_unprepare(tsc->tsc_clk); + clk_disable_unprepare(tsc->adc_clk); + goto out; + } out: mutex_unlock(&input_dev->mutex); return retval; diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index a5ab774da4cc..fb28fd2d6f1c 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -54,6 +54,7 @@ enum mms_type { TYPE_MMS114 = 114, TYPE_MMS152 = 152, + TYPE_MMS345L = 345, }; struct mms114_data { @@ -91,15 +92,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg, if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL) BUG(); - /* Write register: use repeated start */ + /* Write register */ xfer[0].addr = client->addr; - xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART; + xfer[0].flags = client->flags & I2C_M_TEN; xfer[0].len = 1; xfer[0].buf = &buf; /* Read data */ xfer[1].addr = client->addr; - xfer[1].flags = I2C_M_RD; + xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; xfer[1].len = len; xfer[1].buf = val; @@ -250,6 +251,15 @@ static int mms114_get_version(struct mms114_data *data) int error; switch (data->type) { + case TYPE_MMS345L: + error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf); + if (error) + return error; + + dev_info(dev, "TSP FW Rev: bootloader 0x%x / core 0x%x / config 0x%x\n", + buf[0], buf[1], buf[2]); + break; + case TYPE_MMS152: error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf); if (error) @@ -287,8 +297,8 @@ static int mms114_setup_regs(struct mms114_data *data) if (error < 0) return error; - /* MMS152 has no configuration or power on registers */ - if (data->type == TYPE_MMS152) + /* Only MMS114 has configuration and power on registers */ + if (data->type != TYPE_MMS114) return 0; error = mms114_set_active(data, true); @@ -428,10 +438,8 @@ static int mms114_probe(struct i2c_client *client, const void *match_data; int error; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_PROTOCOL_MANGLING)) { - dev_err(&client->dev, - "Need i2c bus that supports protocol mangling\n"); + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "Not supported I2C adapter\n"); return -ENODEV; } @@ -600,6 +608,9 @@ static const struct of_device_id mms114_dt_match[] = { }, { .compatible = "melfas,mms152", .data = (void *)TYPE_MMS152, + }, { + .compatible = "melfas,mms345l", + .data = (void *)TYPE_MMS345L, }, { } }; diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index fe245439adee..2c67f8eacc7c 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -410,6 +410,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, enum raydium_bl_ack state) { int error; + static const u8 cmd[] = { 0xFF, 0x39 }; error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len); if (error) { @@ -418,7 +419,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, return error; } - error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0); + error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd)); if (error) { dev_err(&client->dev, "Ack obj command failed: %d\n", error); return error; diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c index b63d7fdf0cd2..85a1f465c097 100644 --- a/drivers/input/touchscreen/s6sy761.c +++ b/drivers/input/touchscreen/s6sy761.c @@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata, u8 major = event[4]; u8 minor = event[5]; u8 z = event[6] & S6SY761_MASK_Z; - u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4); - u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y); + u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4); + u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y); input_mt_slot(sdata->input, tid); diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index b6f95f20f924..cd8805d71d97 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev, mutex_lock(&sdata->mutex); - if (value & sdata->hover_enabled) + if (value && sdata->hover_enabled) goto out; if (sdata->running) diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index 2e2ea5719c90..902522df0359 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -774,6 +774,7 @@ static int sur40_probe(struct usb_interface *interface, dev_err(&interface->dev, "Unable to register video controls."); v4l2_ctrl_handler_free(&sur40->hdl); + error = sur40->hdl.error; goto err_unreg_v4l2; } diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 16d70201de4a..397cb1d3f481 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = { #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH + {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index c498796adc07..e579b3633a84 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -704,6 +704,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst) GFP_KERNEL); if (new) src->links = new; + else + ret = -ENOMEM; out: mutex_unlock(&icc_lock); diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c index 8e0735a87040..3a3ce6ea65ff 100644 --- a/drivers/interconnect/qcom/qcs404.c +++ b/drivers/interconnect/qcom/qcs404.c @@ -157,8 +157,8 @@ struct qcom_icc_desc { } DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV); -DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV); -DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV); +DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV); +DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV); DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0); DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV); DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d69d43b4355b..2aa340c63939 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -138,7 +138,7 @@ config AMD_IOMMU select PCI_PASID select IOMMU_API select IOMMU_IOVA - depends on X86_64 && PCI && ACPI + depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE ---help--- With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides @@ -205,7 +205,7 @@ config INTEL_IOMMU_DEBUGFS config INTEL_IOMMU_SVM bool "Support for Shared Virtual Memory with Intel IOMMU" - depends on INTEL_IOMMU && X86 + depends on INTEL_IOMMU && X86_64 select PCI_PASID select MMU_NOTIFIER help diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index bc7771498342..c392930253a3 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1469,25 +1469,27 @@ static bool increase_address_space(struct protection_domain *domain, bool ret = false; u64 *pte; + pte = (void *)get_zeroed_page(gfp); + if (!pte) + return false; + spin_lock_irqsave(&domain->lock, flags); if (address <= PM_LEVEL_SIZE(domain->mode) || WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) goto out; - pte = (void *)get_zeroed_page(gfp); - if (!pte) - goto out; - *pte = PM_LEVEL_PDE(domain->mode, iommu_virt_to_phys(domain->pt_root)); domain->pt_root = pte; domain->mode += 1; + pte = NULL; ret = true; out: spin_unlock_irqrestore(&domain->lock, flags); + free_page((unsigned long)pte); return ret; } @@ -2386,6 +2388,7 @@ static void update_domain(struct protection_domain *domain) domain_flush_devices(domain); domain_flush_tlb_pde(domain); + domain_flush_complete(domain); } static int dir2prot(enum dma_data_direction direction) @@ -3872,6 +3875,7 @@ out: static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, struct amd_ir_data *data) { + bool ret; struct irq_remap_table *table; struct amd_iommu *iommu; unsigned long flags; @@ -3889,10 +3893,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, entry = (struct irte_ga *)table->table; entry = &entry[index]; - entry->lo.fields_remap.valid = 0; - entry->hi.val = irte->hi.val; - entry->lo.val = irte->lo.val; - entry->lo.fields_remap.valid = 1; + + ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, + entry->lo.val, entry->hi.val, + irte->lo.val, irte->hi.val); + /* + * We use cmpxchg16 to atomically update the 128-bit IRTE, + * and it cannot be updated by the hardware or other processors + * behind us, so the return value of cmpxchg16 should be the + * same as the old value. + */ + WARN_ON(!ret); + if (data) data->ref = entry; @@ -4430,14 +4442,18 @@ int amd_iommu_deactivate_guest_mode(void *data) struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; struct irq_cfg *cfg = ir_data->cfg; + u64 valid; if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry || !entry->lo.fields_vapic.guest_mode) return 0; + valid = entry->lo.fields_remap.valid; + entry->lo.val = 0; entry->hi.val = 0; + entry->lo.fields_remap.valid = valid; entry->lo.fields_remap.dm = apic->irq_dest_mode; entry->lo.fields_remap.int_type = apic->irq_delivery_mode; entry->hi.fields.vector = cfg->vector; @@ -4574,9 +4590,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) if (!fn) return -ENOMEM; iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); - if (!iommu->ir_domain) + if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } iommu->ir_domain->parent = arch_get_ir_parent_domain(); iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 00319f404a30..be098f15131e 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -12,6 +12,7 @@ #include <linux/acpi.h> #include <linux/list.h> #include <linux/bitmap.h> +#include <linux/delay.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/interrupt.h> @@ -253,6 +254,8 @@ static enum iommu_init_state init_state = IOMMU_START_STATE; static int amd_iommu_enable_interrupts(void); static int __init iommu_go_to_state(enum iommu_init_state state); static void init_device_table_dma(void); +static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, + u8 fxn, u64 *value, bool is_write); static bool amd_iommu_pre_enabled = true; @@ -1331,8 +1334,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, } case IVHD_DEV_ACPI_HID: { u16 devid; - u8 hid[ACPIHID_HID_LEN] = {0}; - u8 uid[ACPIHID_UID_LEN] = {0}; + u8 hid[ACPIHID_HID_LEN]; + u8 uid[ACPIHID_UID_LEN]; int ret; if (h->type != 0x40) { @@ -1349,6 +1352,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } + uid[0] = '\0'; switch (e->uidf) { case UID_NOT_PRESENT: @@ -1363,8 +1367,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case UID_IS_CHARACTER: - memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); - uid[ACPIHID_UID_LEN - 1] = '\0'; + memcpy(uid, &e->uid, e->uidl); + uid[e->uidl] = '\0'; break; default: @@ -1521,7 +1525,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling it. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; break; case 0x11: @@ -1530,8 +1541,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * XT, GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling them. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + break; + } + /* * Note: Since iommu_update_intcapxt() leverages * the IOMMU MMIO access to MSI capability block registers @@ -1654,13 +1675,11 @@ static int __init init_iommu_all(struct acpi_table_header *table) return 0; } -static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, - u8 fxn, u64 *value, bool is_write); - -static void init_iommu_perf_ctr(struct amd_iommu *iommu) +static void __init init_iommu_perf_ctr(struct amd_iommu *iommu) { + int retry; struct pci_dev *pdev = iommu->dev; - u64 val = 0xabcd, val2 = 0, save_reg = 0; + u64 val = 0xabcd, val2 = 0, save_reg, save_src; if (!iommu_feature(iommu, FEATURE_PC)) return; @@ -1668,17 +1687,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) amd_iommu_pc_present = true; /* save the value to restore, if writable */ - if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) || + iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false)) + goto pc_false; + + /* + * Disable power gating by programing the performance counter + * source to 20 (i.e. counts the reads and writes from/to IOMMU + * Reserved Register [MMIO Offset 1FF8h] that are ignored.), + * which never get incremented during this init phase. + * (Note: The event is also deprecated.) + */ + val = 20; + if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true)) goto pc_false; /* Check if the performance counters can be written to */ - if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || - (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || - (val != val2)) - goto pc_false; + val = 0xabcd; + for (retry = 5; retry; retry--) { + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) || + iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) || + val2) + break; + + /* Wait about 20 msec for power gating to disable and retry. */ + msleep(20); + } /* restore */ - if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) || + iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true)) + goto pc_false; + + if (val != val2) goto pc_false; pci_info(pdev, "IOMMU performance counters supported\n"); @@ -2946,7 +2987,7 @@ static int __init parse_amd_iommu_intr(char *str) { for (; *str; ++str) { if (strncmp(str, "legacy", 6) == 0) { - amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; break; } if (strncmp(str, "vapic", 5) == 0) { diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index daeabd98c60e..76e9d3e2f9f2 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -254,7 +254,7 @@ #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) #define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) -#define DTE_IRQ_TABLE_LEN (8ULL << 1) +#define DTE_IRQ_TABLE_LEN (9ULL << 1) #define DTE_IRQ_REMAP_ENABLE 1ULL #define PAGE_MODE_NONE 0x00 @@ -348,7 +348,7 @@ #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) -#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) +#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) #define DTE_GCR3_INDEX_A 0 #define DTE_GCR3_INDEX_B 1 @@ -406,7 +406,11 @@ extern bool amd_iommu_np_cache; /* Only true if all IOMMUs support device IOTLBs */ extern bool amd_iommu_iotlb_sup; -#define MAX_IRQS_PER_TABLE 256 +/* + * AMD IOMMU hardware only support 512 IRTEs despite + * the architectural limitation of 2048 entries. + */ +#define MAX_IRQS_PER_TABLE 512 #define IRQ_TABLE_ALIGNMENT 128 struct irq_remap_table { diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index d6d85debd01b..05f3d93cf480 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -741,6 +741,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) might_sleep(); + /* + * When memory encryption is active the device is likely not in a + * direct-mapped domain. Forbid using IOMMUv2 functionality for now. + */ + if (mem_encrypt_active()) + return -ENODEV; + if (!amd_iommu_v2_supported()) return -ENODEV; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 7c503a6bc585..056a5b1d00c1 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -38,6 +38,10 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> +#ifdef CONFIG_ARCH_PHYTIUM +#include <asm/machine_types.h> +#endif + #include <linux/amba/bus.h> #include <linux/fsl/mc.h> @@ -58,6 +62,8 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +#define SMR_MASK_SHIFT 16 + static int force_stage; /* * not really modular, but the easiest way to keep compat with existing @@ -1333,6 +1339,19 @@ static int arm_smmu_add_device(struct device *dev) return -ENODEV; } +#ifdef CONFIG_ARCH_PHYTIUM + /* ft2000+ */ + if (typeof_ft2000plus()) { + int num = fwspec->num_ids; + for (i = 0; i < num; i++) { +#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) + u32 fwid = FWID_READ(fwspec->ids[i]); + iommu_fwspec_add_ids(dev, &fwid, 1); + } + } +#endif + + ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]); @@ -1424,6 +1443,13 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) group != smmu->s2crs[idx].group) return ERR_PTR(-EINVAL); +#ifdef CONFIG_ARCH_PHYTIUM + if(typeof_s2500()) + break; + if(typeof_ft2000plus()&& !smmu->s2crs[idx].group) + continue; +#endif + group = smmu->s2crs[idx].group; } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 9e393b9c5091..1b9795743276 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -898,7 +898,8 @@ int __init detect_intel_iommu(void) if (!ret) ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, &validate_drhd_cb); - if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { + if (!ret && !no_iommu && !iommu_detected && + (!dmar_disabled || dmar_platform_optin())) { iommu_detected = 1; /* Make sure ACS will be enabled */ pci_request_acs(); @@ -1019,8 +1020,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) { struct intel_iommu *iommu; u32 ver, sts; - int agaw = 0; - int msagaw = 0; + int agaw = -1; + int msagaw = -1; int err; if (!drhd->reg_base_addr) { @@ -1045,17 +1046,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) } err = -EINVAL; - agaw = iommu_calculate_agaw(iommu); - if (agaw < 0) { - pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", - iommu->seq_id); - goto err_unmap; + if (cap_sagaw(iommu->cap) == 0) { + pr_info("%s: No supported address widths. Not attempting DMA translation.\n", + iommu->name); + drhd->ignored = 1; } - msagaw = iommu_calculate_max_sagaw(iommu); - if (msagaw < 0) { - pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", - iommu->seq_id); - goto err_unmap; + + if (!drhd->ignored) { + agaw = iommu_calculate_agaw(iommu); + if (agaw < 0) { + pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", + iommu->seq_id); + drhd->ignored = 1; + } + } + if (!drhd->ignored) { + msagaw = iommu_calculate_max_sagaw(iommu); + if (msagaw < 0) { + pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", + iommu->seq_id); + drhd->ignored = 1; + agaw = -1; + } } iommu->agaw = agaw; iommu->msagaw = msagaw; @@ -1082,7 +1094,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) raw_spin_lock_init(&iommu->register_lock); - if (intel_iommu_enabled) { + /* + * This is only for hotplug; at boot time intel_iommu_enabled won't + * be set yet. When intel_iommu_init() runs, it registers the units + * present at boot time, then sets intel_iommu_enabled. + */ + if (intel_iommu_enabled && !drhd->ignored) { err = iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, "%s", iommu->name); @@ -1097,6 +1114,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) } drhd->iommu = iommu; + iommu->drhd = drhd; return 0; @@ -1111,7 +1129,7 @@ error: static void free_iommu(struct intel_iommu *iommu) { - if (intel_iommu_enabled) { + if (intel_iommu_enabled && !iommu->drhd->ignored) { iommu_device_unregister(&iommu->iommu); iommu_device_sysfs_remove(&iommu->iommu); } diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 9c94e16fb127..55ed857f804f 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1299,13 +1299,17 @@ static int exynos_iommu_of_xlate(struct device *dev, return -ENODEV; data = platform_get_drvdata(sysmmu); - if (!data) + if (!data) { + put_device(&sysmmu->dev); return -ENODEV; + } if (!owner) { owner = kzalloc(sizeof(*owner), GFP_KERNEL); - if (!owner) + if (!owner) { + put_device(&sysmmu->dev); return -ENOMEM; + } INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index a386b83e0e34..f0fe5030acd3 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -155,7 +155,10 @@ static int __init hyperv_prepare_irq_remapping(void) 0, IOAPIC_REMAPPING_ENTRY, fn, &hyperv_ir_domain_ops, NULL); - irq_domain_free_fwnode(fn); + if (!ioapic_ir_domain) { + irq_domain_free_fwnode(fn); + return -ENOMEM; + } /* * Hyper-V doesn't provide irq remapping function for diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0d922eeae357..953d86ca6d2b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level) return (level - 1) * LEVEL_STRIDE; } -static inline int pfn_level_offset(unsigned long pfn, int level) +static inline int pfn_level_offset(u64 pfn, int level) { return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; } -static inline unsigned long level_mask(int level) +static inline u64 level_mask(int level) { - return -1UL << level_to_offset_bits(level); + return -1ULL << level_to_offset_bits(level); } -static inline unsigned long level_size(int level) +static inline u64 level_size(int level) { - return 1UL << level_to_offset_bits(level); + return 1ULL << level_to_offset_bits(level); } -static inline unsigned long align_to_level(unsigned long pfn, int level) +static inline u64 align_to_level(u64 pfn, int level) { return (pfn + level_size(level) - 1) & level_mask(level); } static inline unsigned long lvl_to_nr_pages(unsigned int lvl) { - return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); + return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); } /* VT-d pages must always be _smaller_ than MM pages. Otherwise things @@ -179,7 +179,7 @@ static int rwbf_quirk; * (used when kernel is launched w/ TXT) */ static int force_on = 0; -int intel_iommu_tboot_noforce; +static int intel_iommu_tboot_noforce; static int no_platform_optin; #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) @@ -611,6 +611,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) return g_iommus[iommu_id]; } +static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) +{ + return sm_supported(iommu) ? + ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); +} + static void domain_update_iommu_coherency(struct dmar_domain *domain) { struct dmar_drhd_unit *drhd; @@ -622,7 +628,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) for_each_domain_iommu(i, domain) { found = true; - if (!ecap_coherent(g_iommus[i]->ecap)) { + if (!iommu_paging_structure_coherency(g_iommus[i])) { domain->iommu_coherency = 0; break; } @@ -633,7 +639,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) /* No hardware attached; use lowest common denominator */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) { - if (!ecap_coherent(iommu->ecap)) { + if (!iommu_paging_structure_coherency(iommu)) { domain->iommu_coherency = 0; break; } @@ -2090,7 +2096,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, context_set_fault_enable(context); context_set_present(context); - domain_flush_cache(domain, context, sizeof(*context)); + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(context, sizeof(*context)); /* * It's a non-present to present mapping. If hardware doesn't cache @@ -2553,14 +2560,14 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, } /* Setup the PASID entry for requests without PASID: */ - spin_lock(&iommu->lock); + spin_lock_irqsave(&iommu->lock, flags); if (hw_pass_through && domain_type_is_si(domain)) ret = intel_pasid_setup_pass_through(iommu, domain, dev, PASID_RID2PASID); else ret = intel_pasid_setup_second_level(iommu, domain, dev, PASID_RID2PASID); - spin_unlock(&iommu->lock); + spin_unlock_irqrestore(&iommu->lock, flags); if (ret) { dev_err(dev, "Setup RID2PASID failed\n"); dmar_remove_one_dev_info(dev); @@ -3278,6 +3285,12 @@ static int __init init_dmars(void) if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; + + if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { + pr_info("Disable batched IOTLB flush due to virtualization"); + intel_iommu_strict = 1; + } + #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_supported(iommu)) intel_svm_init(iommu); @@ -4335,7 +4348,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) struct dmar_atsr_unit *atsru; struct acpi_dmar_atsr *tmp; - list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list, + dmar_rcu_check()) { tmp = (struct acpi_dmar_atsr *)atsru->hdr; if (atsr->segment != tmp->segment) continue; @@ -4919,7 +4933,8 @@ int __init intel_iommu_init(void) * Intel IOMMU is required for a TXT/tboot launch or platform * opt in, so enforce that. */ - force_on = tboot_force_iommu() || platform_optin_force_iommu(); + force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) || + platform_optin_force_iommu(); if (iommu_init_mempool()) { if (force_on) @@ -5954,6 +5969,23 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; } +/* + * Check that the device does not live on an external facing PCI port that is + * marked as untrusted. Such devices should not be able to apply quirks and + * thus not be able to bypass the IOMMU restrictions. + */ +static bool risky_device(struct pci_dev *pdev) +{ + if (pdev->untrusted) { + pci_info(pdev, + "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", + pdev->vendor, pdev->device); + pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n"); + return true; + } + return false; +} + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, @@ -5982,6 +6014,9 @@ const struct iommu_ops intel_iommu_ops = { static void quirk_iommu_igfx(struct pci_dev *dev) { + if (risky_device(dev)) + return; + pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); dmar_map_gfx = 0; } @@ -6023,6 +6058,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); static void quirk_iommu_rwbf(struct pci_dev *dev) { + if (risky_device(dev)) + return; + /* * Mobile 4 Series Chipset neglects to set RWBF capability, * but needs it. Same seems to hold for the desktop versions. @@ -6053,6 +6091,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) { unsigned short ggc; + if (risky_device(dev)) + return; + if (pci_read_config_word(dev, GGC, &ggc)) return; @@ -6086,6 +6127,12 @@ static void __init check_tylersburg_isoch(void) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); if (!pdev) return; + + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + pci_dev_put(pdev); /* System Management Registers. Might be hidden, in which case @@ -6095,6 +6142,11 @@ static void __init check_tylersburg_isoch(void) if (!pdev) return; + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { pci_dev_put(pdev); return; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 518d0b2d12af..a3739f626629 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -99,8 +99,10 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) return 0; } -static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, - unsigned long address, unsigned long pages, int ih) +static void __flush_svm_range_dev(struct intel_svm *svm, + struct intel_svm_dev *sdev, + unsigned long address, + unsigned long pages, int ih) { struct qi_desc desc; @@ -151,6 +153,22 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d } } +static void intel_flush_svm_range_dev(struct intel_svm *svm, + struct intel_svm_dev *sdev, + unsigned long address, + unsigned long pages, int ih) +{ + unsigned long shift = ilog2(__roundup_pow_of_two(pages)); + unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift)); + unsigned long start = ALIGN_DOWN(address, align); + unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align); + + while (start < end) { + __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih); + start += align; + } +} + static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, unsigned long pages, int ih) { @@ -502,7 +520,7 @@ struct page_req_dsc { u64 priv_data[2]; }; -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) +#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) { @@ -583,14 +601,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) * any faults on kernel addresses. */ if (!svm->mm) goto bad_req; - /* If the mm is already defunct, don't handle faults. */ - if (!mmget_not_zero(svm->mm)) - goto bad_req; /* If address is not canonical, return invalid response */ if (!is_canonical_address(address)) goto bad_req; + /* If the mm is already defunct, don't handle faults. */ + if (!mmget_not_zero(svm->mm)) + goto bad_req; + down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); if (!vma || address < vma->vm_start) @@ -645,7 +664,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) resp.qw0 = QI_PGRP_PASID(req->pasid) | QI_PGRP_DID(req->rid) | QI_PGRP_PASID_P(req->pasid_present) | - QI_PGRP_PDP(req->pasid_present) | + QI_PGRP_PDP(req->priv_data_present) | QI_PGRP_RESP_CODE(result) | QI_PGRP_RESP_TYPE; resp.qw1 = QI_PGRP_IDX(req->prg_index) | diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 81e43c1df7ec..5dcc81b1df62 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -507,12 +507,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); + /* Block compatibility-format MSIs */ + if (sts & DMA_GSTS_CFIS) { + iommu->gcmd &= ~DMA_GCMD_CFI; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_CFIS), sts); + } + /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts @@ -563,8 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) 0, INTR_REMAP_TABLE_ENTRIES, fn, &intel_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); goto out_free_bitmap; } @@ -628,13 +634,21 @@ out_free_table: static void intel_teardown_irq_remapping(struct intel_iommu *iommu) { + struct fwnode_handle *fn; + if (iommu && iommu->ir_table) { if (iommu->ir_msi_domain) { + fn = iommu->ir_msi_domain->fwnode; + irq_domain_remove(iommu->ir_msi_domain); + irq_domain_free_fwnode(fn); iommu->ir_msi_domain = NULL; } if (iommu->ir_domain) { + fn = iommu->ir_domain->fwnode; + irq_domain_remove(iommu->ir_domain); + irq_domain_free_fwnode(fn); iommu->ir_domain = NULL; } free_pages((unsigned long)iommu->ir_table->base, @@ -1386,6 +1400,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, irq_data = irq_domain_get_irq_data(domain, virq + i); irq_cfg = irqd_cfg(irq_data); if (!irq_data || !irq_cfg) { + if (!i) + kfree(data); ret = -EINVAL; goto out_free_data; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b20fa3bb4939..3d7448e7cec9 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -492,7 +492,7 @@ struct iommu_group *iommu_group_alloc(void) NULL, "%d", group->id); if (ret) { ida_simple_remove(&iommu_group_ida, group->id); - kfree(group); + kobject_put(&group->kobj); return ERR_PTR(ret); } diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 0e6a9536eca6..612cbf668adf 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) for (i = 0 ; i < mag->size; ++i) { struct iova *iova = private_find_iova(iovad, mag->pfns[i]); - BUG_ON(!iova); + if (WARN_ON(!iova)) + continue; + private_free_iova(iovad, iova); } diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 8e19bfa94121..a99afb5d9011 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, mutex_lock(&iommu_debug_lock); bytes = omap_iommu_dump_ctx(obj, p, count); + if (bytes < 0) + goto err; bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); +err: mutex_unlock(&iommu_debug_lock); kfree(buf); diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index e0b3fa2bb7ab..280de92b332e 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -814,8 +814,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) qcom_iommu->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) + if (res) { qcom_iommu->local_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qcom_iommu->local_base)) + return PTR_ERR(qcom_iommu->local_base); + } qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); if (IS_ERR(qcom_iommu->iface_clk)) { diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 3ea9d7682999..60e659a24f90 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -454,7 +454,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, if (!region) return -ENOMEM; - list_add(&vdev->resv_regions, ®ion->list); + list_add(®ion->list, &vdev->resv_regions); return 0; } @@ -614,18 +614,20 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, int ret; struct viommu_domain *vdomain = to_viommu_domain(domain); - vdomain->viommu = viommu; - vdomain->map_flags = viommu->map_flags; + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); + if (ret < 0) + return ret; + + vdomain->id = (unsigned int)ret; domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->geometry = viommu->geometry; - ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, - viommu->last_domain, GFP_KERNEL); - if (ret >= 0) - vdomain->id = (unsigned int)ret; + vdomain->map_flags = viommu->map_flags; + vdomain->viommu = viommu; - return ret > 0 ? 0 : ret; + return 0; } static void viommu_domain_free(struct iommu_domain *domain) diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index d246d74ec3a5..fdcf2bcae164 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -306,6 +306,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to map driver user space!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); + res = -ENOMEM; goto out_release_mem8_space; } diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 97f9c001d8ff..6d04672975ef 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -56,6 +56,15 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN + select GENERIC_IRQ_MULTI_HANDLER + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ_DOMAIN + config ARM_NVIC bool select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index cc7c43932f16..8903d3c34916 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c index 23a3b877f7f1..ede02dc2bcd0 100644 --- a/drivers/irqchip/irq-alpine-msi.c +++ b/drivers/irqchip/irq-alpine-msi.c @@ -165,8 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, return 0; err_sgi: - while (--i >= 0) - irq_domain_free_irqs_parent(domain, virq, i); + irq_domain_free_irqs_parent(domain, virq, i - 1); alpine_msix_free_sgi(priv, sgi, nr_irqs); return err; } diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 000000000000..7a77d9787f2e --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,4136 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng <wangyinfeng@phytium.com.cn> + * Chen Baozi <chenbaozi@phytium.com.cn> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/acpi.h> +#include <linux/acpi_iort.h> +#include <linux/bitmap.h> +#include <linux/cpu.h> +#include <linux/crash_dump.h> +#include <linux/delay.h> +#include <linux/dma-iommu.h> +#include <linux/efi.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/list.h> +#include <linux/log2.h> +#include <linux/memblock.h> +#include <linux/mm.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/percpu.h> +#include <linux/slab.h> +#include <linux/syscore_ops.h> + +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic-phytium-2500.h> +#include <linux/irqchip/arm-gic-v4.h> + +#include <asm/cputype.h> +#include <asm/exception.h> + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) +#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) + +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. + */ +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void __iomem *base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 cbaser_save; + u32 ctlr_save; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + u32 ite_size; + u32 device_ids; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + bool is_v4; + int vlpi_redist_offset; +}; + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS (16) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + struct mutex vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (vm->vlpi_count[its->list_nr]) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + u64 raw_cmd[4]; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr; + u64 target; + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + u64 prev_idx, + struct its_cmd_block *to) +{ + u64 rd_idx, to_idx, linear_idx; + u32 count = 1000000; /* 1s! */ + + /* Linearize to_idx if the command set has wrapped around */ + to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; + + while (1) { + s64 delta; + + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; + + linear_idx += delta; + if (linear_idx >= to_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); + return -1; + } + prev_idx = rd_idx; + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + u64 rd_idx; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (!vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + return d->hwirq - its_dev->event_map.lpi_base; +} + +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + va = page_address(its_dev->event_map.vm->vprop_page); + map = &its_dev->event_map.vlpi_maps[event]; + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + its_send_inv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) + return; + + its_dev->event_map.vlpi_maps[event].db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issueing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + skt_cpu_cnt[skt]++; + } else if (0xff != skt ) { + pr_err("socket address: %d is out of range.", skt); + } + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (0 != skt_id) { + for (i = 0; i < skt_id; i++) { + cpus += skt_cpu_cnt[i]; + } + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + + if (skt_id == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if(skt_id == skt) + return i; + } else if (0xff != skt) { + pr_err("socket address: %d is out of range.", skt); + } + } + } + + return cpus; +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + /* lpi cannot be routed to a redistributor that is on a foreign node */ + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + if (its_dev->its->numa_node >= 0) { + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + if (!cpumask_intersects(mask_val, cpu_mask)) + return -EINVAL; + } + } + + + cpu = its_cpumask_select(its_dev, mask_val, cpu_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != its_dev->event_map.col_map[id]) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + return IRQ_SET_MASK_OK_DONE; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + + return 0; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + mutex_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_KERNEL); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + mutex_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + mutex_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || + !its_dev->event_map.vlpi_maps[event].vm) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = its_dev->event_map.vlpi_maps[event]; + +out: + mutex_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + mutex_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + mutex_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!its_dev->its->is_v4) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kmalloc(sizeof(*range), GFP_KERNEL); + if (range) { + range->base_id = base; + range->span = span; + } + + return range; +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) +{ + if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) + return; + if (a->base_id + a->span != b->base_id) + return; + b->base_id = a->base_id; + b->span += a->span; + list_del(&a->entry); + kfree(a); +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new, *old; + + new = mk_lpi_range(base, nr_lpis); + if (!new) + return -ENOMEM; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_reverse(old, &lpi_range_list, entry) { + if (old->base_id < base) + break; + } + /* + * old is the last element with ->base_id smaller than base, + * so new goes right after it. If there are no elements with + * ->base_id smaller than base, &old->entry ends up pointing + * at the head of the list, and inserting new it the start of + * the list is the right thing to do in that case as well. + */ + list_add(&new->entry, &old->entry); + /* + * Now check if we can merge with the preceding and/or + * following ranges. + */ + merge_lpi_ranges(old, new); + merge_lpi_ranges(new, list_next_entry(new, entry)); + + mutex_unlock(&lpi_range_lock); + return 0; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (!nr_irqs) + err = -ENOSPC; + + if (err) + goto out; + + bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + kfree(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) +{ + phys_addr_t start, end, addr_end; + u64 i; + + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_region(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GIC-2500: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GIC-2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char *its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 psz, u32 order, + bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages; + struct page *page; + void *base; + +retry_alloc_baser: + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + base = (void *)page_address(page); + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { + /* + * Page size didn't stick. Let's try a smaller + * size and retry. If we reach 4K, then + * something is horribly wrong... + */ + free_pages((unsigned long)base, order); + baser->base = NULL; + + switch (psz) { + case SZ_16K: + psz = SZ_4K; + goto retry_alloc_baser; + case SZ_64K: + psz = SZ_16K; + goto retry_alloc_baser; + } + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 psz, u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order >= MAX_ORDER) { + new_order = MAX_ORDER - 1; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", + &its->phys_base, its_base_type_string[type], + its->device_ids, ids); + } + + *order = new_order; + + return indirect; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + u32 psz = SZ_64K; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + u32 order = get_order(psz); + bool indirect = false; + + switch (type) { + case GITS_BASER_TYPE_NONE: + continue; + + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, + psz, &order, + its->device_ids); + break; + + case GITS_BASER_TYPE_VCPU: + indirect = its_parse_indirect_baser(its, baser, + psz, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + psz = baser->psz; + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + /* Allow a kdump kernel */ + if (is_kdump_kernel()) + return true; + + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GIC-2500: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + return val; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->lpi_enabled) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + if (gic_rdists->has_vlpis) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * sheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base); + WARN_ON(val & GICR_VPENDBASER_Dirty); + } + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->lpi_enabled = true; + pr_info("GIC-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->pend_page ? "allocated" : "reserved", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + u64 target; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < its->device_ids); + + return its_alloc_table_entry(its, baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!its->is_v4) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * its->ite_size; + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + kfree(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + mutex_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) +{ + int idx; + + /* Find a free LPI region in lpi_map and allocate them. */ + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + int err = 0; + + /* + * We ignore "dev" entirely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + mutex_lock(&its->dev_alloc_lock); + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + its_dev->shared = true; + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + mutex_unlock(&its->dev_alloc_lock); + info->scratchpad[0].ptr = its_dev; + return err; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + struct its_node *its = its_dev->its; + irq_hw_number_t hwirq; + int err; + int i; + + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); + if (err) + return err; + + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, &its_irq_chip, its_dev); + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); + } + + return 0; +} + +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + skt_cpu_cnt[skt]++; + } else if (0xff != skt ) { + pr_err("socket address: %d is out of range.", skt); + } + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (0 != skt_id) { + for (i = 0; i < skt_id; i++) { + cpus += skt_cpu_cnt[i]; + } + } + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) { + cpus = cpu; + } + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + + if (skt_id == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (0xff != skt) { + pr_err("socket address: %d is out of range.", skt); + } + } + } + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + cpu = its_cpumask_first(its_dev, cpu_mask); + if (cpu >= nr_cpu_ids) { + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) + return -EINVAL; + + cpu = cpumask_first(cpu_online_mask); + } + + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; + int i; + + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditionned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + kfree(its_dev->event_map.col_map); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + mutex_unlock(&its->dev_alloc_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int cpu = cpumask_first(mask_val); + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + */ + if (vpe->col_idx != cpu) { + int from = vpe->col_idx; + + vpe->col_idx = cpu; + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + } + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_NonShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + val = its_clear_vpend_valid(vlpi_base); + + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + vpe->idai = false; + vpe->pending_last = true; + } else { + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + } +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + } else { + its_vpe_send_cmd(vpe, its_send_inv); + } +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_simple_remove(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpt_page); + return -ENOMEM; + } + + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_vpe_irq_chip, vm->vpes[i]); + set_bit(i, bitmap); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i - 1); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* If we use the list map, we issue VMAPP on demand... */ + if (its_list_map) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we unmap the VPE once no VLPIs are + * associated with the VM. + */ + if (its_list_map) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, false); + } +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER<n> or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size */ + its->device_ids = 0x14; /* 20 bits, 8MB */ + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->ite_size = 16; + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (its->device_ids > ids) + its->device_ids = ids; + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) + continue; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) + continue; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) + continue; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER<n> + * registers is undefined according to the GIC v3 ITS + * Specification. + */ + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + inner_domain->parent = its_parent; + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + inner_domain->flags |= its->msi_domain_flags; + info->ops = &its_msi_domain_ops; + info->data = its; + inner_domain->host_data = info; + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) { + pr_err("ITS: Can't allocate GICv4 proxy device array\n"); + return -ENOMEM; + } + + /* Use the last possible DevID */ + devid = GENMASK(its->device_ids - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct resource *res, + void __iomem *its_base) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &res->start); + return -EINVAL; + } + + ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its_base + GITS_CTLR); + ctlr = readl_relaxed(its_base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &res->start, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + struct its_node *its; + void __iomem *its_base; + u32 val, ctlr; + u64 baser, tmp, typer; + struct page *page; + int err; + + its_base = ioremap(res->start, resource_size(res)); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + return -ENOMEM; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + err = -ENODEV; + goto out_unmap; + } + + err = its_force_quiescent(its_base); + if (err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) { + err = -ENOMEM; + goto out_unmap; + } + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); + its->device_ids = GITS_TYPER_DEVBITS(typer); + its->is_v4 = !!(typer & GITS_TYPER_VLPIS); + if (its->is_v4) { + if (!(typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(res, its_base); + if (err < 0) + goto out_free_its; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &res->start, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + } + } + + its->numa_node = numa_node; + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { + err = -ENOMEM; + goto out_free_its; + } + its->cmd_base = (void *)page_address(page); + its->cmd_write = its->cmd_base; + its->fwnode_handle = handle; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; + + its_enable_quirks(its); + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (its->is_v4) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + if (GITS_TYPER_HCC(typer)) + its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; + + err = its_init_domain(handle, its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_free_its: + kfree(its); +out_unmap: + iounmap(its_base); + pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if (gic_data_rdist()->lpi_enabled || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GIC-2500: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-v3-its", }, + {}, +}; + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + node = acpi_map_pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) { + pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); + return; + } + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode(&res.start); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-phytium-2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-phytium-2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + err = its_probe_one(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!err) + return 0; + + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + int err; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + gic_rdists = rdists; + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) + has_v4 |= its->is_v4; + + if (has_v4 & rdists->has_vlpis) { + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 000000000000..0fea0846b1f6 --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,2374 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng <wangyinfeng@phytium.com.cn> + * Chen Baozi <chenbaozi@phytium.com.cn> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/cpu_pm.h> +#include <linux/delay.h> +#include <linux/crash_dump.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/percpu.h> +#include <linux/refcount.h> +#include <linux/slab.h> + +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic-common.h> +#include <linux/irqchip/arm-gic-phytium-2500.h> +#include <linux/irqchip/irq-partition-percpu.h> + +#include <asm/cputype.h> +#include <asm/exception.h> +#include <asm/smp_plat.h> +#include <asm/virt.h> + +#include "irq-gic-common.h" + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; + +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) + +#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +struct gic_chip_data { + struct fwnode_handle *fwnode; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + * + * For now, we only support pseudo-NMIs if we have non-secure view of + * priorities. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t *ppi_nmi_refs; + +static struct gic_kvm_info gic_v3_kvm_info; +static DEFINE_PER_CPU(bool, has_rss); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +enum gic_intid_range { + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) +{ + return __get_intid_range(d->hwirq); +} + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline int gic_irq_in_rdist(struct irq_data *d) +{ + enum gic_intid_range range = get_intid_range(d); + return range == PPI_RANGE || range == EPPI_RANGE; +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ + return gic_data.dist_base; + + default: + return NULL; + } +} + +static void gic_do_wait_for_rwp(void __iomem *base) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + } +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base()); +} + +#ifdef CONFIG_ARM64 + +static u64 __maybe_unused gic_read_iar(void) +{ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); +} +#endif + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) + return; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + } + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if(mpidr & 0xFFFF) // either Aff1 or Aff0 is not zero + return; + + rbase = rbase + 64*SZ_128K; // skip 64 Redistributors + + for(i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", mpidr, 64 + i, + enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; // next redistributor + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + unsigned int skt; + + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + + unsigned long mpidr; + void __iomem *rbase; + int i; + unsigned int skt; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 are zero + rbase = base + 64*SZ_128K; // skip 64 Redistributors + + for(i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (index / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K); // RD from SGI base + rbase = rbase + SZ_128K; + } + } // core 0 of each socket + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_do_wait_for_rwp(base); + } +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static inline bool gic_supports_nmi(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis); +} + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= 8192) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + reg = val ? GICD_ICENABLER : GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= 8192) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + u32 offset, index; + + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static u32 gic_get_ppi_index(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case PPI_RANGE: + return d->hwirq - 16; + case EPPI_RANGE: + return d->hwirq - EPPI_BASE_INTID + 16; + default: + unreachable(); + } +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { + refcount_set(&ppi_nmi_refs[idx], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, GICD_INT_NMI_PRI); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, GICD_INT_DEF_PRI); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + gic_write_eoir(gic_irq(d)); +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + gic_write_dir(gic_irq(d)); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + enum gic_intid_range range; + unsigned int irq = gic_irq(d); + void __iomem *base; + u32 offset, index; + int ret; + + unsigned long mpidr; + int i; + void __iomem *rbase; + unsigned int skt; + + /* Interrupt configuration for SGIs can't be changed */ + if (irq < 16) + return -EINVAL; + + range = get_intid_range(d); + + /* SPIs have restrictions on the supported types */ + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + offset = convert_offset_index(d, GICD_ICFGR, &index); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(index, type, base + offset, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 are zero + rbase = base + 64*SZ_128K; // skip 64 Redistributors + + for(i = 0; i < 4; i++) { + ret = gic_configure_irq(index, type, rbase + offset, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(index, type, base + offset, NULL); + gic_do_wait_for_rwp(base); + } + + + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + ret = 0; + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_mpidr_to_affinity(unsigned long mpidr) +{ + u64 aff; + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + gic_write_eoir(irqnr); + } +} + +static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + bool irqs_enabled = interrupts_enabled(regs); + int err; + + if (irqs_enabled) + nmi_enter(); + + if (static_branch_likely(&supports_deactivate_key)) + gic_write_eoir(irqnr); + /* + * Leave the PSR.I bit set to prevent other NMIs to be + * received while handling this one. + * PSR.I will be restored when we ERET to the + * interrupted context. + */ + err = handle_domain_nmi(gic_data.domain, irqnr, regs); + if (err) + gic_deactivate_unhandled(irqnr); + + if (irqs_enabled) + nmi_exit(); +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + u32 irqnr; + + irqnr = gic_read_iar(); + + if (gic_supports_nmi() && + unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) { + gic_handle_nmi(irqnr, regs); + return; + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + /* Check for special IDs first */ + if ((irqnr >= 1020 && irqnr <= 1023)) + return; + + /* Treat anything but SGIs in a uniform way */ + if (likely(irqnr > 15)) { + int err; + + if (static_branch_likely(&supports_deactivate_key)) + gic_write_eoir(irqnr); + else + isb(); + + err = handle_domain_irq(gic_data.domain, irqnr, regs); + if (err) { + WARN_ONCE(true, "Unexpected interrupt received!\n"); + gic_deactivate_unhandled(irqnr); + } + return; + } + if (irqnr < 16) { + gic_write_eoir(irqnr); + if (static_branch_likely(&supports_deactivate_key)) + gic_write_dir(irqnr); +#ifdef CONFIG_SMP + /* + * Unlike GICv2, we don't need an smp_rmb() here. + * The control dependency from gic_read_iar to + * the ISB in gic_write_eoir is enough to ensure + * that any shared data read by handle_IPI will + * be read after the ACK. + */ + handle_IPI(irqnr, regs); +#else + WARN_ONCE(true, "Unexpected SGI received!\n"); +#endif + } +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base = gic_data.dist_base; + u32 val; + + unsigned int skt; + + for(skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + + if((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < GIC_LINE_NR; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); + + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff, and wait for the distributor to drain */ + gic_dist_config(base, GIC_LINE_NR, NULL); + gic_do_wait_for_rwp(base); // do sync outside of gic_dist_config + + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + + /* Enable distributor with ARE, Group1 */ + writel_relaxed(val, base + GICD_CTLR); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); + for (i = 32; i < GIC_LINE_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr = cpu_logical_map(smp_processor_id()); + u64 typer; + u32 aff; + u32 aff2_skt; + u32 redist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + redist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != redist_skt) { + return 1; + } + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); + + return 1; +} + +static void gic_update_rdist_properties(void) +{ + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("%d PPIs implemented\n", gic_data.ppi_nr); + pr_info("%sVLPI support, %sdirect LPI support\n", + !gic_data.rdists.has_vlpis ? "no " : "", + !gic_data.rdists.has_direct_lpi ? "no " : ""); +} + +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = cpu_logical_map(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); + + /* Set priority mask register */ + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else { + /* + * Mismatch configuration with boot CPU, the system is likely + * to die as interrupt masking will not work properly on all + * CPUs + */ + WARN_ON(gic_supports_nmi() && group0 && + !gic_dist_security_disabled()); + } + + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch(pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + /* Fall through */ + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + /* Fall through */ + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch(pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + /* Fall through */ + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + /* Fall through */ + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); + + need_rss |= MPIDR_RS(cpu_logical_map(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)cpu_logical_map(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return strtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + int i; + unsigned long mpidr; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 is zero + rbase = rbase + 64*SZ_128K; // skip 64 Redistributors + for(i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + + rbase = rbase + SZ_128K; + + } + } + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr = cpu_logical_map(cpu); + u16 tlist = 0; + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = cpu_logical_map(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +{ + int cpu; + + if (WARN_ON(irq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + wmb(); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, irq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void gic_smp_init(void) +{ + set_smp_cross_call(gic_raise_softirq); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gicv3:starting", + gic_starting_cpu, NULL); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + skt_cpu_cnt[skt]++; + } else if (0xff != skt ) { + pr_err("socket address: %d is out of range.", skt); + } + } + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + if (0 != irq_skt) { + for (i = 0; i < irq_skt; i++) { + cpus += skt_cpu_cnt[i]; + } + } + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + + if(irq_skt == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if (irq_skt == skt) + return i; + } else if (0xff != skt) { + pr_err("socket address: %d is out of range.", skt); + } + } + } + + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + u32 offset, index; + void __iomem *reg; + int enabled; + u64 val; + unsigned int skt; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + offset = convert_offset_index(d, GICD_IROUTER, &index); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + offset + GICD_IROUTER + (index * 8); + val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + else + gic_dist_wait_for_rwp(); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_smp_init() do { } while(0) +#endif + +#ifdef CONFIG_CPU_PM +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GICv3-phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + switch (__get_intid_range(hw)) { + case PPI_RANGE: + case EPPI_RANGE: + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + irq_set_status_flags(irq, IRQ_NOAUTOEN); + break; + + case SPI_RANGE: + case ESPI_RANGE: + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + break; + + case LPI_RANGE: + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; + } + + return 0; +} + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1]; + if (fwspec->param[1] >= 16) + *hwirq += EPPI_BASE_INTID - 16; + else + *hwirq += 16; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitionned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if(fwspec->param_count != 2) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + if (fwspec->param_count >= 4 && + fwspec->param[0] == 1 && fwspec->param[3] != 0 && + gic_data.ppi_descs) + return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); + + return d == gic_data.domain; +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct device_node *np; + int ret; + + if (!gic_data.ppi_descs) + return -ENOMEM; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static bool gic_enable_quirk_msm8996(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; + + return true; +} + +static bool gic_enable_quirk_hip06_07(void *data) +{ + struct gic_chip_data *d = data; + + /* + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite + * not being an actual ARM implementation). The saving grace is + * that GIC-600 doesn't have ESPI, so nothing to do in that case. + * HIP07 doesn't even have a proper IIDR, and still pretends to + * have ESPI. In both cases, put them right. + */ + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { + /* Zero both ESPI and the RES0 field next to it... */ + d->rdists.gicd_typer &= ~GENMASK(9, 8); + return true; + } + + return false; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "GICv3: Qualcomm MSM8996 broken firmware", + .compatible = "qcom,msm8996-gic-v3", + .init = gic_enable_quirk_msm8996, + }, + { + .desc = "GICv3: HIP06 erratum 161010803", + .iidr = 0x0204043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + .desc = "GICv3: HIP07 erratum 161010803", + .iidr = 0x00000000, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + } +}; + +static void gic_enable_nmi_support(void) +{ + int i; + + if (!gic_prio_masking_enabled()) + return; + + if (gic_has_group0() && !gic_dist_security_disabled()) { + pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); + return; + } + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + static_branch_enable(&supports_pseudo_nmis); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static int __init gic_init_bases(void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), + gic_quirks, &gic_data); + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + pr_info("Distributor has %sRange Selector support\n", + gic_data.has_rss ? "" : "no "); + + if (typer & GICD_TYPER_MBIS) { + err = mbi_init(handle, gic_data.domain); + if (err) + pr_err("Failed to initialize MBIs\n"); + } + + set_handle_irq(gic_handle_irq); + + gic_update_rdist_properties(); + + gic_smp_init(); + gic_dist_init(); + gic_cpu_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + } else { + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(handle, gic_data.domain); + } + + gic_enable_nmi_support(); + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); + if (!gic_data.ppi_descs) + return; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %pOFn[%d] { ", + child_part, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) + continue; + + pr_cont("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + } + + pr_cont("}\n"); + part_idx++; + } + + for (i = 0; i < gic_data.ppi_nr; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_set_kvm_info(&gic_v3_kvm_info); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + void __iomem *dist_base; + struct redist_region *rdist_regs; + u64 redist_stride; + u32 nr_redist_regions; + int err, i; + struct resource res; + unsigned long skt; + + dist_base = of_iomap(node, 0); + if (!dist_base) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return -ENXIO; + } + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + + if (of_address_to_resource(node, 0, &res)) { + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3_soc_bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for(skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, mars3_gic_dists[skt].size); + + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + struct resource res; + int ret; + + ret = of_address_to_resource(node, 1 + i, &res); + rdist_regs[i].redist_base = of_iomap(node, 1 + i); + if (ret || !rdist_regs[i].redist_base) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + gic_enable_of_quirks(node, gic_quirks, &gic_data); + + err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, + redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_phyt_2500, "arm,gic-phytium-2500", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + skt_cpu_cnt[skt]++; + } else if (0xff != skt ) { + pr_err("socket address: %d is out of range.", skt); + } + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) { + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + } + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count = 0; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) + return 0; + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) + acpi_data.single_redist = true; + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_set_kvm_info(&gic_v3_kvm_info); +} + +static int __init +gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + struct fwnode_handle *domain_handle; + size_t size; + int i, err, skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + //mars3_gic_dists[0].phys_base = __pa(acpi_data.dist_base); + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + +#ifdef CONFIG_ACPI + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + + if (is_kdump_kernel()) + mars3_sockets_bitmap = 0x3; + + if (mars3_sockets_bitmap == 0){ + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!!!"); + } + else + printk("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); +#endif + + for(skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, mars3_gic_dists[skt].size); + + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, + acpi_data.nr_redist_regions, 0, domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 11f3b50dcdcb..22352846faa4 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -35,12 +35,15 @@ #include <asm/cputype.h> #include <asm/exception.h> +#ifdef CONFIG_ARCH_PHYTIUM +#include <asm/machine_types.h> +#endif + #include "irq-gic-common.h" #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) -#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) @@ -1195,6 +1198,11 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->data = its_get_event_id(d); +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_ft2000plus()) + return ; +#endif + iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); } @@ -2581,6 +2589,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, msi_alloc_info_t *info = args; struct its_device *its_dev = info->scratchpad[0].ptr; struct its_node *its = its_dev->its; + struct irq_data *irqd; irq_hw_number_t hwirq; int err; int i; @@ -2600,7 +2609,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &its_irq_chip, its_dev); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); @@ -2985,12 +2996,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, return 0; } +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_vpe_irq_chip = { .name = "GICv4-vpe", .irq_mask = its_vpe_mask_irq, .irq_unmask = its_vpe_unmask_irq, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, .irq_set_irqchip_state = its_vpe_set_irqchip_state, .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; @@ -3358,9 +3375,6 @@ static int its_save_disable(void) list_for_each_entry(its, &its_nodes, entry) { void __iomem *base; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; its->ctlr_save = readl_relaxed(base + GITS_CTLR); err = its_force_quiescent(base); @@ -3379,9 +3393,6 @@ err: list_for_each_entry_continue_reverse(its, &its_nodes, entry) { void __iomem *base; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; writel_relaxed(its->ctlr_save, base + GITS_CTLR); } @@ -3401,9 +3412,6 @@ static void its_restore_enable(void) void __iomem *base; int i; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; /* @@ -3411,7 +3419,10 @@ static void its_restore_enable(void) * don't restore it since writing to CBASER or BASER<n> * registers is undefined according to the GIC v3 ITS * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); ret = its_force_quiescent(base); if (ret) { pr_err("ITS@%pa: failed to quiesce on resume: %d\n", @@ -3678,9 +3689,6 @@ static int __init its_probe_one(struct resource *res, ctlr |= GITS_CTLR_ImDe; writel_relaxed(ctlr, its->base + GITS_CTLR); - if (GITS_TYPER_HCC(typer)) - its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; - err = its_init_domain(handle, its); if (err) goto out_free_tables; diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index 563a9b366294..e81e89a81cb5 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) reg = of_get_property(np, "mbi-alias", NULL); if (reg) { mbi_phys_base = of_translate_address(np, reg); - if (mbi_phys_base == OF_BAD_ADDR) { + if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) { ret = -ENXIO; goto err_free_mbi; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 446603efbc90..9d0b42cb9903 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -621,6 +621,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs irqnr = gic_read_iar(); + /* Check for special IDs first */ + if ((irqnr >= 1020 && irqnr <= 1023)) + return; + if (gic_supports_nmi() && unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) { gic_handle_nmi(irqnr, regs); @@ -632,10 +636,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs gic_arch_enable_irqs(); } - /* Check for special IDs first */ - if ((irqnr >= 1020 && irqnr <= 1023)) - return; - /* Treat anything but SGIs in a uniform way */ if (likely(irqnr > 15)) { int err; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 30ab623343d3..882204d1ef4f 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); - unsigned int cpu, shift = (gic_irq(d) % 4) * 8; - u32 val, mask, bit; - unsigned long flags; + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); + unsigned int cpu; if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); @@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; - gic_lock_irqsave(flags); - mask = 0xff << shift; - bit = gic_cpu_map[cpu] << shift; - val = readl_relaxed(reg) & ~mask; - writel_relaxed(val | bit, reg); - gic_unlock_irqrestore(flags); - + writeb_relaxed(gic_cpu_map[cpu], reg); irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK_DONE; diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index 6d05cefe9d79..02a82723a57a 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -179,4 +179,5 @@ err_free_tcu: } IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); +IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index dda512dfe2c1..31bc11f15bfa 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -168,6 +168,7 @@ static int __init intc_2chip_of_init(struct device_node *node, { return ingenic_intc_of_init(node, 2); } +IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init); IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init); diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 6b566bba263b..ff7627b57772 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -220,10 +220,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, return 0; } +static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + platform_msi_domain_free(domain, virq, nr_irqs); +} + static const struct irq_domain_ops mbigen_domain_ops = { .translate = mbigen_domain_translate, .alloc = mbigen_irq_domain_alloc, - .free = irq_domain_free_irqs_common, + .free = mbigen_irq_domain_free, }; static int mbigen_of_create_domain(struct platform_device *pdev, diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 95d4fd8f7a96..0bbb0b2d0dd5 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; + ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq, + &mips_mt_cpu_irq_controller, + NULL); + + if (ret) + return ret; + ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH); if (ret) return ret; diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 73eae5966a40..6ff98b87e5c0 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -15,7 +15,7 @@ #include <linux/spinlock.h> struct mtk_sysirq_chip_data { - spinlock_t lock; + raw_spinlock_t lock; u32 nr_intpol_bases; void __iomem **intpol_bases; u32 *intpol_words; @@ -37,7 +37,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) reg_index = chip_data->which_word[hwirq]; offset = hwirq & 0x1f; - spin_lock_irqsave(&chip_data->lock, flags); + raw_spin_lock_irqsave(&chip_data->lock, flags); value = readl_relaxed(base + reg_index * 4); if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) { if (type == IRQ_TYPE_LEVEL_LOW) @@ -53,7 +53,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) data = data->parent_data; ret = data->chip->irq_set_type(data, type); - spin_unlock_irqrestore(&chip_data->lock, flags); + raw_spin_unlock_irqrestore(&chip_data->lock, flags); return ret; } @@ -212,7 +212,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node, ret = -ENOMEM; goto out_free_which_word; } - spin_lock_init(&chip_data->lock); + raw_spin_lock_init(&chip_data->lock); return 0; diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c index 1d027623c776..abd011fcecf4 100644 --- a/drivers/irqchip/irq-sni-exiu.c +++ b/drivers/irqchip/irq-sni-exiu.c @@ -136,7 +136,7 @@ static int exiu_domain_translate(struct irq_domain *domain, if (fwspec->param_count != 2) return -EINVAL; *hwirq = fwspec->param[0]; - *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; } return 0; } diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index e00f2fa27f00..a8322a4e18d3 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -431,6 +431,16 @@ static void stm32_irq_ack(struct irq_data *d) irq_gc_unlock(gc); } +/* directly set the target bit without reading first. */ +static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg) +{ + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(val, base + reg); +} + static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg) { struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); @@ -464,9 +474,9 @@ static void stm32_exti_h_eoi(struct irq_data *d) raw_spin_lock(&chip_data->rlock); - stm32_exti_set_bit(d, stm32_bank->rpr_ofst); + stm32_exti_write_bit(d, stm32_bank->rpr_ofst); if (stm32_bank->fpr_ofst != UNDEF_REG) - stm32_exti_set_bit(d, stm32_bank->fpr_ofst); + stm32_exti_write_bit(d, stm32_bank->fpr_ofst); raw_spin_unlock(&chip_data->rlock); diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index ef4d625d2d80..0a35499c4672 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -37,6 +37,7 @@ #define VINT_ENABLE_SET_OFFSET 0x0 #define VINT_ENABLE_CLR_OFFSET 0x8 #define VINT_STATUS_OFFSET 0x18 +#define VINT_STATUS_MASKED_OFFSET 0x20 /** * struct ti_sci_inta_event_desc - Description of an event coming to @@ -116,7 +117,7 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc) chained_irq_enter(irq_desc_get_chip(desc), desc); val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 + - VINT_STATUS_OFFSET); + VINT_STATUS_MASKED_OFFSET); for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) { virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq); @@ -570,7 +571,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); inta->base = devm_ioremap_resource(dev, res); if (IS_ERR(inta->base)) - return -ENODEV; + return PTR_ERR(inta->base); domain = irq_domain_add_linear(dev_of_node(dev), ti_sci_get_num_resources(inta->vint), diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 928858dada75..f1386733d3bc 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -6,6 +6,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/versatile-fpga.h> #include <linux/irqdomain.h> #include <linux/module.h> @@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) static void fpga_irq_handle(struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); struct fpga_irq_data *f = irq_desc_get_handler_data(desc); - u32 status = readl(f->base + IRQ_STATUS); + u32 status; + chained_irq_enter(chip, desc); + + status = readl(f->base + IRQ_STATUS); if (status == 0) { do_bad_IRQ(desc); - return; + goto out; } do { @@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) status &= ~(1 << irq); generic_handle_irq(irq_find_mapping(f->domain, irq)); } while (status); + +out: + chained_irq_exit(chip, desc); } /* @@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, if (of_property_read_u32(node, "valid-mask", &valid_mask)) valid_mask = 0; + writel(clear_mask, base + IRQ_ENABLE_CLEAR); + writel(clear_mask, base + FIQ_ENABLE_CLEAR); + /* Some chips are cascaded from a parent IRQ */ parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { @@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); - writel(clear_mask, base + IRQ_ENABLE_CLEAR); - writel(clear_mask, base + FIQ_ENABLE_CLEAR); - /* * On Versatile AB/PB, some secondary interrupts have a direct * pass-thru to the primary controller for IRQs 20 and 22-31 which need diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index bca880213e91..51e3d45daaa7 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac) { if (isac->type & IPAC_TYPE_ISACX) WriteISAC(isac, ISACX_MASK, 0xff); - else + else if (isac->type != 0) WriteISAC(isac, ISAC_MASK, 0xff); if (isac->dch.timer.function != NULL) { del_timer(&isac->dch.timer); diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig index 26cf0ac9c4ad..c9a53c222472 100644 --- a/drivers/isdn/mISDN/Kconfig +++ b/drivers/isdn/mISDN/Kconfig @@ -13,6 +13,7 @@ if MISDN != n config MISDN_DSP tristate "Digital Audio Processing of transparent data" depends on MISDN + select BITREVERSE help Enable support for digital audio processing capability. diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 1988de1d64c0..0c62bc283aca 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -823,6 +823,13 @@ config LEDS_LM36274 Say Y to enable the LM36274 LED driver for TI LMU devices. This supports the LED device LM36274. +config LEDS_ALTRA_SPCI + bool "Altra LED support via SPCI" + depends on ARM64 && ACPI + help + If you say yes here, you get support for Altra SPCI LED + When in doubt, say N. + comment "LED Triggers" source "drivers/leds/trigger/Kconfig" diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 41fb073a39c1..4926e0e045ba 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -85,6 +85,7 @@ obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o +obj-$(CONFIG_LEDS_ALTRA_SPCI) += leds-altra-spci.o # LED SPI Drivers obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 647b1263c579..0a4823d9797a 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -172,6 +172,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev) { led_cdev->flags |= LED_SUSPENDED; led_set_brightness_nopm(led_cdev, 0); + flush_work(&led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_classdev_suspend); @@ -281,7 +282,7 @@ int led_classdev_register_ext(struct device *parent, if (ret) dev_warn(parent, "Led %s renamed to %s due to name collision", - led_cdev->name, dev_name(led_cdev->dev)); + proposed_name, dev_name(led_cdev->dev)); if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) { ret = led_add_brightness_hw_changed(led_cdev); diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 23963e5cb5d6..0d59763e40de 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -318,14 +318,15 @@ void led_trigger_event(struct led_trigger *trig, enum led_brightness brightness) { struct led_classdev *led_cdev; + unsigned long flags; if (!trig) return; - read_lock(&trig->leddev_list_lock); + read_lock_irqsave(&trig->leddev_list_lock, flags); list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) led_set_brightness(led_cdev, brightness); - read_unlock(&trig->leddev_list_lock); + read_unlock_irqrestore(&trig->leddev_list_lock, flags); } EXPORT_SYMBOL_GPL(led_trigger_event); @@ -336,11 +337,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig, int invert) { struct led_classdev *led_cdev; + unsigned long flags; if (!trig) return; - read_lock(&trig->leddev_list_lock); + read_lock_irqsave(&trig->leddev_list_lock, flags); list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) { if (oneshot) led_blink_set_oneshot(led_cdev, delay_on, delay_off, @@ -348,7 +350,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig, else led_blink_set(led_cdev, delay_on, delay_off); } - read_unlock(&trig->leddev_list_lock); + read_unlock_irqrestore(&trig->leddev_list_lock, flags); } void led_trigger_blink(struct led_trigger *trig, diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index b3044c9a8120..465c3755cf2e 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -203,21 +203,33 @@ static int pm860x_led_probe(struct platform_device *pdev) data->cdev.brightness_set_blocking = pm860x_led_set; mutex_init(&data->lock); - ret = devm_led_classdev_register(chip->dev, &data->cdev); + ret = led_classdev_register(chip->dev, &data->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } pm860x_led_set(&data->cdev, 0); + + platform_set_drvdata(pdev, data); + return 0; } +static int pm860x_led_remove(struct platform_device *pdev) +{ + struct pm860x_led *data = platform_get_drvdata(pdev); + + led_classdev_unregister(&data->cdev); + + return 0; +} static struct platform_driver pm860x_led_driver = { .driver = { .name = "88pm860x-led", }, .probe = pm860x_led_probe, + .remove = pm860x_led_remove, }; module_platform_driver(pm860x_led_driver); diff --git a/drivers/leds/leds-altra-spci.c b/drivers/leds/leds-altra-spci.c new file mode 100644 index 000000000000..53d060e1894d --- /dev/null +++ b/drivers/leds/leds-altra-spci.c @@ -0,0 +1,440 @@ +/* LEDs driver for Altra SPCI + * + * Copyright (C) 2020 Ampere Computing LLC + * Author: Dung Cao <dcao@os.amperecompting.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + * This driver support control LED via SPCI interface on Altra system. + * Currently, it is 2-LEDs system(Activity LED, Status LED). Activity LED is + * completely controlled by hardware. Status LED is controlled by this driver. + * There are 4 stages of Status LED: + * - ON + * - OFF + * - Locate(blink at 1Hz) + * - Rebuild(blink at 4Hz) + * Example: + * To On, Off the LED on slot 9: + * 1. Write the seg:bus_number corresponding to LED on slot 9 to altra:led + * echo 000b:03 > /sys/class/leds/altra:led/address + * 2. Write 1 to brightness attr to ON, 0 to brightness attr to OFF + * echo 1 > /sys/class/leds/altra:led/brightness + * echo 0 > /sys/class/leds/altra:led/brightness + * To blink the LED on slot 9: + * 1. Write the seg:bus_number corresponding to LED on slot 9 to altra:led + * echo 000b:03 > /sys/class/leds/altra:led/address + * 2. Write 1 to blink attr to select Locate(blink 1Hz), or 4 to blink attr + * to select Rebuild(blink 4Hz) + * echo 1 > /sys/class/leds/altra:led/blink + * echo 4 > /sys/class/leds/altra:led/blink + * 3. Write 1 to shot attr to execute blink + * echo 1 > /sys/class/leds/altra:led/shot + */ + +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/leds.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/arm-smccc.h> +#include <linux/types.h> +#include <uapi/linux/uleds.h> +#include <linux/bitfield.h> + +/* LED miscellaneous functions */ +#define LED_CMD 4 /* Control LED state */ +#define GETPORT_CMD 9 /* Get PCIE port */ +#define LED_FAULT 1 /* LED type - Fault */ +#define LED_ATT 2 /* LED type - Attention */ +#define LED_SET_ON 1 +#define LED_SET_OFF 2 +#define LED_SET_BLINK_1HZ 3 +#define LED_SET_BLINK_4HZ 4 +#define BLINK_1HZ 1 +#define BLINK_4HZ 4 +#define ARG_SEG_MASK GENMASK(3,0) +#define ARG_BUS_MASK GENMASK(7,4) + +/* Function id for SMC calling convention */ +#define FUNCID_TYPE_SHIFT 31 +#define FUNCID_CC_SHIFT 30 +#define FUNCID_OEN_SHIFT 24 +#define FUNCID_NUM_SHIFT 0 +#define FUNCID_TYPE_MASK 0x1 +#define FUNCID_CC_MASK 0x1 +#define FUNCID_OEN_MASK 0x3f +#define FUNCID_NUM_MASK 0xffff +#define FUNCID_TYPE_WIDTH 1 +#define FUNCID_CC_WIDTH 1 +#define FUNCID_OEN_WIDTH 6 +#define FUNCID_NUM_WIDTH 16 +#define SMC_64 1 +#define SMC_32 0 +#define SMC_TYPE_FAST 1 +#define SMC_TYPE_STD 0 + +/* SPCI error codes. */ +#define SPCI_SUCCESS 0 +#define SPCI_NOT_SUPPORTED -1 +#define SPCI_INVALID_PARAMETER -2 +#define SPCI_NO_MEMORY -3 +#define SPCI_BUSY -4 +#define SPCI_QUEUED -5 +#define SPCI_DENIED -6 +#define SPCI_NOT_PRESENT -7 + +/* Client ID used for SPCI calls */ +#define SPCI_CLIENT_ID 0x0000ACAC + +/* This error code must be different to the ones used by SPCI */ +#define SPCI_ERROR -42 + +/* Definitions to build the complete SMC ID */ +#define SPCI_FID_MISC_FLAG (0 << 27) +#define SPCI_FID_MISC_SHIFT 20 +#define SPCI_FID_MISC_MASK 0x7F +#define SPCI_FID_TUN_FLAG BIT(27) +#define SPCI_FID_TUN_SHIFT 24 +#define SPCI_FID_TUN_MASK 0x7 +#define OEN_SPCI_START 0x30 +#define OEN_SPCI_END 0x3F +#define ARG_RES_MASK GENMASK(15,0) +#define ARG_HANDLE_MASK GENMASK(31,16) +#define SPCI_SMC(spci_fid) ((OEN_SPCI_START << FUNCID_OEN_SHIFT) | \ + (1 << 31) | (spci_fid)) +#define SPCI_MISC_32(misc_fid) ((SMC_32 << FUNCID_CC_SHIFT) | \ + SPCI_FID_MISC_FLAG | \ + SPCI_SMC((misc_fid) << SPCI_FID_MISC_SHIFT)) +#define SPCI_MISC_64(misc_fid) ((SMC_64 << FUNCID_CC_SHIFT) | \ + SPCI_FID_MISC_FLAG | \ + SPCI_SMC((misc_fid) << SPCI_FID_MISC_SHIFT)) +#define SPCI_TUN_32(tun_fid) ((SMC_32 << FUNCID_CC_SHIFT) | \ + SPCI_FID_TUN_FLAG | \ + SPCI_SMC((tun_fid) << SPCI_FID_TUN_SHIFT)) +#define SPCI_TUN_64(tun_fid) ((SMC_64 << FUNCID_CC_SHIFT) | \ + SPCI_FID_TUN_FLAG | \ + SPCI_SMC((tun_fid) << SPCI_FID_TUN_SHIFT)) + +/* SPCI miscellaneous functions */ +#define SPCI_FID_VERSION 0x0 +#define SPCI_FID_SERVICE_HANDLE_OPEN 0x2 +#define SPCI_FID_SERVICE_HANDLE_CLOSE 0x3 +#define SPCI_FID_SERVICE_MEM_REGISTER 0x4 +#define SPCI_FID_SERVICE_MEM_UNREGISTER 0x5 +#define SPCI_FID_SERVICE_MEM_PUBLISH 0x6 +#define SPCI_FID_SERVICE_REQUEST_BLOCKING 0x7 +#define SPCI_FID_SERVICE_REQUEST_START 0x8 +#define SPCI_FID_SERVICE_GET_RESPONSE 0x9 +#define SPCI_FID_SERVICE_RESET_CLIENT_STATE 0xA + +/* SPCI tunneling functions */ +#define SPCI_FID_SERVICE_TUN_REQUEST_START 0x0 +#define SPCI_FID_SERVICE_REQUEST_RESUME 0x1 +#define SPCI_FID_SERVICE_TUN_REQUEST_BLOCKING 0x2 + +/* Complete SMC IDs and associated values */ +#define SPCI_VERSION SPCI_MISC_32(SPCI_FID_VERSION) +#define SPCI_SERVICE_HANDLE_OPEN \ + SPCI_MISC_32(SPCI_FID_SERVICE_HANDLE_OPEN) +#define SPCI_SERVICE_HANDLE_OPEN_NOTIFY_BIT 1 +#define SPCI_SERVICE_HANDLE_CLOSE \ + SPCI_MISC_32(SPCI_FID_SERVICE_HANDLE_CLOSE) +#define SPCI_SERVICE_REQUEST_BLOCKING_AARCH32 \ + SPCI_MISC_32(SPCI_FID_SERVICE_REQUEST_BLOCKING) +#define SPCI_SERVICE_REQUEST_BLOCKING_AARCH64 \ + SPCI_MISC_64(SPCI_FID_SERVICE_REQUEST_BLOCKING) + +struct spci_led { + struct led_classdev cdev; + u8 seg; + u8 bus_num; + u8 blink; + u32 uuid[4]; +}; + +static int spci_service_handle_open(u16 client_id, u16 *handle, + u32 uuid1, u32 uuid2, u32 uuid3, u32 uuid4) +{ + int ret; + struct arm_smccc_res res; + u32 x1; + + arm_smccc_smc(SPCI_SERVICE_HANDLE_OPEN, uuid1, uuid2, uuid3, uuid4, + 0, 0, client_id, &res); + + ret = res.a0; + if (ret != SPCI_SUCCESS) { + return ret; + } + + x1 = res.a1; + + if (FIELD_GET(ARG_RES_MASK, x1) != 0) { + return SPCI_ERROR; + } + + /* The handle is returned in the top 16 bits */ + *handle = FIELD_GET(ARG_HANDLE_MASK, x1); + + return SPCI_SUCCESS; +} + +static int spci_service_handle_close(u16 client_id, u16 handle) +{ + struct arm_smccc_res res; + + arm_smccc_smc(SPCI_SERVICE_HANDLE_CLOSE, (client_id | (handle << 16)), + 0, 0, 0, 0, 0, 0, &res); + + return (int) res.a0; +} + +static int spci_service_request_locking(u64 x1, u64 x2, u64 x3, u64 x4, + u64 x5, u64 x6, u16 client_id, u16 handle, + u64 *rx1, u64 *rx2, u64 *rx3) +{ + struct arm_smccc_res res; + int ret; + + arm_smccc_smc(SPCI_SERVICE_REQUEST_BLOCKING_AARCH64, x1, x2, x3, x4, + x5, x6, (client_id | (handle << 16)), &res); + + ret = res.a0; + if (ret != SPCI_SUCCESS) + return ret; + + if (rx1 != NULL) + *rx1 = res.a1; + if (rx2 != NULL) + *rx2 = res.a2; + if (rx2 != NULL) + *rx3 = res.a3; + + return SPCI_SUCCESS; +} + + +static const struct acpi_device_id altra_spci_leds_acpi_match[] = { + {"AMPC0008", 1}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, altra_spci_leds_acpi_match); + +static void spci_led_set(struct led_classdev *cdev, + enum led_brightness value) +{ + struct spci_led *led = container_of(cdev, struct spci_led, cdev); + u64 x1, x2, x3; + u16 handle; + int ret; + + if (!led) { + dev_err(cdev->dev, "Failed to get resource\n"); + return; + } + + ret = spci_service_handle_open(SPCI_CLIENT_ID, &handle, + led->uuid[0], led->uuid[1], + led->uuid[2], led->uuid[3]); + if (ret != SPCI_SUCCESS) { + dev_err(cdev->dev, "spci_service_handle_open has failure %d\n", ret); + return; + } + + x1 = value ? LED_SET_ON : LED_SET_OFF; + x2 = LED_ATT; + x3 = FIELD_PREP(ARG_SEG_MASK, led->seg) | + FIELD_PREP(ARG_BUS_MASK, led->bus_num); + ret = spci_service_request_locking(LED_CMD, x1, x2, + x3, 0, 0, SPCI_CLIENT_ID, handle, + NULL, NULL, NULL); + if (ret != SPCI_SUCCESS) { + dev_err(cdev->dev, "spci_service_request_locking has failure %d\n", ret); + return; + } + + ret = spci_service_handle_close(SPCI_CLIENT_ID, handle); + if (ret != SPCI_SUCCESS) { + dev_err(cdev->dev, "spci_service_handle_close has failure %d\n", ret); + return; + } +} + +static ssize_t led_shot(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct spci_led *led = dev_get_drvdata(dev); + u64 x1, x2, x3; + u16 handle; + int ret; + + ret = spci_service_handle_open(SPCI_CLIENT_ID, &handle, + led->uuid[0], led->uuid[1], + led->uuid[2], led->uuid[3]); + if (ret != SPCI_SUCCESS) { + dev_err(dev, "spci_service_handle_open has failure %d\n", ret); + return ret; + } + + if (led->blink == BLINK_1HZ) + x1 = LED_SET_BLINK_1HZ; + else if (led->blink == BLINK_4HZ) + x1 = LED_SET_BLINK_4HZ; + else { + dev_err(dev, "Wrong blink set mode\n"); + return -EINVAL; + } + x2 = LED_ATT; + x3 = FIELD_PREP(ARG_SEG_MASK, led->seg) | + FIELD_PREP(ARG_BUS_MASK, led->bus_num); + ret = spci_service_request_locking(LED_CMD, x1, x2, + x3, 0, 0, SPCI_CLIENT_ID, handle, + NULL, NULL, NULL); + if (ret != SPCI_SUCCESS) { + dev_err(dev, "spci_service_request_locking has failure %d\n", ret); + return ret; + } + + ret = spci_service_handle_close(SPCI_CLIENT_ID, handle); + if (ret != SPCI_SUCCESS) { + dev_err(dev, "spci_service_handle_close has failure %d\n", ret); + return ret; + } + + return size; +} + +static ssize_t led_blink_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spci_led *led = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", led->blink); +} + +static ssize_t led_blink_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct spci_led *led = dev_get_drvdata(dev); + unsigned long state; + int ret; + + ret = kstrtoul(buf, 0, &state); + if (ret) + return ret; + + led->blink = state; + + return size; +} + +static ssize_t led_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spci_led *led = dev_get_drvdata(dev); + + return sprintf(buf, "%04x:%02u\n", led->seg, led->bus_num); +} + +static ssize_t led_address_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct spci_led *led = dev_get_drvdata(dev); + char *t, *q; + char m[16]; + unsigned long seg, bus_num; + + strcpy(m, buf); + q = m; + t = strchr(q, ':'); + if (t == NULL) { + dev_err(dev, "Invalid address format %s\n", q); + return size; + } + *t = '\0'; + if (kstrtoul(q, 16, &seg)) { + dev_err(dev, "Fail to convert string %s to unsigned\n", q); + return size; + } + q = t + 1; + if (kstrtoul(q, 16, &bus_num)) { + dev_err(dev, "Fail to convert string %s to unsigned\n", q); + return size; + } + led->seg = seg; + led->bus_num = bus_num; + + return size; +} + +static DEVICE_ATTR(blink, 0644, led_blink_show, led_blink_store); +static DEVICE_ATTR(address, 0644, led_address_show, led_address_store); +static DEVICE_ATTR(shot, 0200, NULL, led_shot); + +static int spci_led_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct fwnode_handle *fwnode = NULL; + struct spci_led *led_data; + char name[LED_MAX_NAME_SIZE]; + int ret = 0; + + led_data = devm_kzalloc(dev, sizeof(struct spci_led), GFP_KERNEL); + if (led_data == NULL) + return -ENOMEM; + + fwnode = acpi_fwnode_handle(ACPI_COMPANION(dev)); + ret = fwnode_property_read_u32_array(fwnode, "uuid", led_data->uuid, 4); + if (ret) { + dev_err(dev, "Failed to get uuid\n"); + return ret; + } + snprintf(name, LED_MAX_NAME_SIZE, "altra:led"); + led_data->cdev.name = name; + led_data->cdev.brightness_set = spci_led_set; + ret = devm_led_classdev_register(dev, &led_data->cdev); + if (ret < 0) { + dev_err(dev, "Failed to register %s: %d\n", name, ret); + return ret; + } + dev_set_drvdata(led_data->cdev.dev, led_data); + if (device_create_file(led_data->cdev.dev, &dev_attr_shot)) + device_remove_file(led_data->cdev.dev, &dev_attr_shot); + if (device_create_file(led_data->cdev.dev, &dev_attr_blink)) + device_remove_file(led_data->cdev.dev, &dev_attr_blink); + if (device_create_file(led_data->cdev.dev, &dev_attr_address)) + device_remove_file(led_data->cdev.dev, &dev_attr_address); + + dev_info(dev, "Altra SPCI LED driver probed\n"); + + return 0; +} + +static struct platform_driver spci_led_driver = { + .probe = spci_led_probe, + .driver = { + .name = "leds-altra-spci", + .acpi_match_table = ACPI_PTR(altra_spci_leds_acpi_match), + }, +}; + +module_platform_driver(spci_led_driver); + +MODULE_AUTHOR("Dung Cao <dcao@os.amperecompting.com>"); +MODULE_DESCRIPTION("Altra SPCI LED driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:leds-spci"); diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index c50d34e2b098..de932755bcb3 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c @@ -332,7 +332,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, led->cdev.brightness_set = bcm6328_led_set; led->cdev.blink_set = bcm6328_blink_set; - rc = led_classdev_register(dev, &led->cdev); + rc = devm_led_classdev_register(dev, &led->cdev); if (rc < 0) return rc; diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c index aec285fd21c0..dbb8953334d9 100644 --- a/drivers/leds/leds-bcm6358.c +++ b/drivers/leds/leds-bcm6358.c @@ -137,7 +137,7 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, led->cdev.brightness_set = bcm6358_led_set; - rc = led_classdev_register(dev, &led->cdev); + rc = devm_led_classdev_register(dev, &led->cdev); if (rc < 0) return rc; diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c index ed1b303f699f..2b5fb00438a2 100644 --- a/drivers/leds/leds-da903x.c +++ b/drivers/leds/leds-da903x.c @@ -110,12 +110,23 @@ static int da903x_led_probe(struct platform_device *pdev) led->flags = pdata->flags; led->master = pdev->dev.parent; - ret = devm_led_classdev_register(led->master, &led->cdev); + ret = led_classdev_register(led->master, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", id); return ret; } + platform_set_drvdata(pdev, led); + + return 0; +} + +static int da903x_led_remove(struct platform_device *pdev) +{ + struct da903x_led *led = platform_get_drvdata(pdev); + + led_classdev_unregister(&led->cdev); + return 0; } @@ -124,6 +135,7 @@ static struct platform_driver da903x_led_driver = { .name = "da903x-led", }, .probe = da903x_led_probe, + .remove = da903x_led_remove, }; module_platform_driver(da903x_led_driver); diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index 9504ad405aef..b3edee703193 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -694,7 +694,7 @@ static int lm3533_led_probe(struct platform_device *pdev) platform_set_drvdata(pdev, led); - ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev); + ret = led_classdev_register(pdev->dev.parent, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id); return ret; @@ -704,13 +704,18 @@ static int lm3533_led_probe(struct platform_device *pdev) ret = lm3533_led_setup(led, pdata); if (ret) - return ret; + goto err_deregister; ret = lm3533_ctrlbank_enable(&led->cb); if (ret) - return ret; + goto err_deregister; return 0; + +err_deregister: + led_classdev_unregister(&led->cdev); + + return ret; } static int lm3533_led_remove(struct platform_device *pdev) @@ -720,6 +725,7 @@ static int lm3533_led_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&led->cb); + led_classdev_unregister(&led->cdev); return 0; } diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c index a5abb499574b..129f475aebf2 100644 --- a/drivers/leds/leds-lm355x.c +++ b/drivers/leds/leds-lm355x.c @@ -165,18 +165,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip) /* input and output pins configuration */ switch (chip->type) { case CHIP_LM3554: - reg_val = pdata->pin_tx2 | pdata->ntc_pin; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin; ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val); if (ret < 0) goto out; - reg_val = pdata->pass_mode; + reg_val = (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val); if (ret < 0) goto out; break; case CHIP_LM3556: - reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin | + (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val); if (ret < 0) goto out; diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c index 836b60c9a2b8..db842eeb7ca2 100644 --- a/drivers/leds/leds-lm36274.c +++ b/drivers/leds/leds-lm36274.c @@ -133,7 +133,7 @@ static int lm36274_probe(struct platform_device *pdev) lm36274_data->pdev = pdev; lm36274_data->dev = lmu->dev; lm36274_data->regmap = lmu->regmap; - dev_set_drvdata(&pdev->dev, lm36274_data); + platform_set_drvdata(pdev, lm36274_data); ret = lm36274_parse_dt(lm36274_data); if (ret) { @@ -147,8 +147,16 @@ static int lm36274_probe(struct platform_device *pdev) return ret; } - return devm_led_classdev_register(lm36274_data->dev, - &lm36274_data->led_dev); + return led_classdev_register(lm36274_data->dev, &lm36274_data->led_dev); +} + +static int lm36274_remove(struct platform_device *pdev) +{ + struct lm36274 *lm36274_data = platform_get_drvdata(pdev); + + led_classdev_unregister(&lm36274_data->led_dev); + + return 0; } static const struct of_device_id of_lm36274_leds_match[] = { @@ -159,6 +167,7 @@ MODULE_DEVICE_TABLE(of, of_lm36274_leds_match); static struct platform_driver lm36274_driver = { .probe = lm36274_probe, + .remove = lm36274_remove, .driver = { .name = "lm36274-leds", }, diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c index cabe379071a7..82aea1cd0c12 100644 --- a/drivers/leds/leds-mlxreg.c +++ b/drivers/leds/leds-mlxreg.c @@ -228,8 +228,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv) brightness = LED_OFF; led_data->base_color = MLXREG_LED_GREEN_SOLID; } - sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg", - data->label); + snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name), + "mlxreg:%s", data->label); led_cdev->name = led_data->led_cdev_name; led_cdev->brightness = brightness; led_cdev->max_brightness = LED_ON; diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index 082df7f1dd90..67f4235cb28a 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -269,12 +269,23 @@ static int wm831x_status_probe(struct platform_device *pdev) drvdata->cdev.blink_set = wm831x_status_blink_set; drvdata->cdev.groups = wm831x_status_groups; - ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev); + ret = led_classdev_register(wm831x->dev, &drvdata->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } + platform_set_drvdata(pdev, drvdata); + + return 0; +} + +static int wm831x_status_remove(struct platform_device *pdev) +{ + struct wm831x_status *drvdata = platform_get_drvdata(pdev); + + led_classdev_unregister(&drvdata->cdev); + return 0; } @@ -283,6 +294,7 @@ static struct platform_driver wm831x_status_driver = { .name = "wm831x-status", }, .probe = wm831x_status_probe, + .remove = wm831x_status_remove, }; module_platform_driver(wm831x_status_driver); diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 8f39f9ba5c80..4c2ce210c123 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig @@ -19,6 +19,7 @@ if NVM config NVM_PBLK tristate "Physical Block Device Open-Channel SSD target" + select CRC32 help Allows an open-channel SSD to be exposed as a block device to the host. The target assumes the device exposes raw flash and must be diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 7543e395a2c6..4714aa936b2a 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -849,11 +849,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa) rqd.ppa_addr = generic_to_dev_addr(dev, ppa); ret = nvm_submit_io_sync_raw(dev, &rqd); + __free_page(page); if (ret) return ret; - __free_page(page); - return rqd.error; } @@ -1316,8 +1315,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) strlcpy(info->bmname, "gennvm", sizeof(info->bmname)); i++; - if (i > 31) { - pr_err("max 31 devices can be reported.\n"); + if (i >= ARRAY_SIZE(devices->info)) { + pr_err("max %zd devices can be reported.\n", + ARRAY_SIZE(devices->info)); break; } } diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index ac824d7b2dcf..6aa903529570 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -270,15 +270,12 @@ static int macii_autopoll(int devs) unsigned long flags; int err = 0; + local_irq_save(flags); + /* bit 1 == device 1, and so on. */ autopoll_devs = devs & 0xFFFE; - if (!autopoll_devs) - return 0; - - local_irq_save(flags); - - if (current_req == NULL) { + if (autopoll_devs && !current_req) { /* Send a Talk Reg 0. The controller will repeatedly transmit * this as long as it is idle. */ diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index 4150301a89a5..e8377ce0a95a 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c @@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu) s32 tmax; int fmin; - /* Get PID params from the appropriate SAT */ - hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); - if (hdr == NULL) { - printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); - return -EINVAL; - } - piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; - /* Get FVT params to get Tmax; if not found, assume default */ hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); if (hdr) { @@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu) if (tmax < cpu_all_tmax) cpu_all_tmax = tmax; + kfree(hdr); + + /* Get PID params from the appropriate SAT */ + hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); + if (hdr == NULL) { + printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); + return -EINVAL; + } + piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; + /* * Darwin has a minimum fan speed of 1000 rpm for the 4-way and * 515 for the 2-way. That appears to be overkill, so for now, @@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu) pid.min = fmin; wf_cpu_pid_init(&cpu_pid[cpu], &pid); + + kfree(hdr); + return 0; } diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 0b821a5b2db8..3e7d4b20ab34 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -82,9 +82,12 @@ static void msg_submit(struct mbox_chan *chan) exit: spin_unlock_irqrestore(&chan->lock, flags); - if (!err && (chan->txdone_method & TXDONE_BY_POLL)) - /* kick start the timer immediately to avoid delays */ - hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); + /* kick start the timer immediately to avoid delays */ + if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { + /* but only if not already active */ + if (!hrtimer_active(&chan->mbox->poll_hrt)) + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); + } } static void tx_tick(struct mbox_chan *chan, int r) @@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) struct mbox_chan *chan = &mbox->chans[i]; if (chan->active_req && chan->cl) { + resched = true; txdone = chan->mbox->ops->last_tx_done(chan); if (txdone) tx_tick(chan, 0); - else - resched = true; } } diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index 9a6ce9f5a7db..3c8b365ce635 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -70,7 +70,7 @@ struct cmdq_task { struct cmdq { struct mbox_controller mbox; void __iomem *base; - u32 irq; + int irq; u32 thread_nr; u32 irq_mask; struct cmdq_thread *thread; @@ -474,10 +474,8 @@ static int cmdq_probe(struct platform_device *pdev) } cmdq->irq = platform_get_irq(pdev, 0); - if (!cmdq->irq) { - dev_err(dev, "failed to get irq\n"); - return -EINVAL; - } + if (cmdq->irq < 0) + return cmdq->irq; cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev); cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0); diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index 86887c9a349a..f9cc674ba9b7 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->req_buf_size = resource_size(&res); mchan->req_buf = devm_ioremap(mdev, res.start, mchan->req_buf_size); - if (IS_ERR(mchan->req_buf)) { + if (!mchan->req_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->req_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret); @@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->resp_buf_size = resource_size(&res); mchan->resp_buf = devm_ioremap(mdev, res.start, mchan->resp_buf_size); - if (IS_ERR(mchan->resp_buf)) { + if (!mchan->resp_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->resp_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); @@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->req_buf_size = resource_size(&res); mchan->req_buf = devm_ioremap(mdev, res.start, mchan->req_buf_size); - if (IS_ERR(mchan->req_buf)) { + if (!mchan->req_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->req_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); @@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->resp_buf_size = resource_size(&res); mchan->resp_buf = devm_ioremap(mdev, res.start, mchan->resp_buf_size); - if (IS_ERR(mchan->resp_buf)) { + if (!mchan->resp_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->resp_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index aa98953f4462..7dd6e98257c7 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -565,6 +565,7 @@ config DM_INTEGRITY select BLK_DEV_INTEGRITY select DM_BUFIO select CRYPTO + select CRYPTO_SKCIPHER select ASYNC_XOR ---help--- This device-mapper target emulates a block device that has diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 3d2b63585da9..36de6f7ddf22 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -264,7 +264,7 @@ struct bcache_device { #define BCACHE_DEV_UNLINK_DONE 2 #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 - unsigned int nr_stripes; + int nr_stripes; unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; @@ -585,6 +585,7 @@ struct cache_set { */ wait_queue_head_t btree_cache_wait; struct task_struct *btree_cache_alloc_lock; + spinlock_t btree_cannibalize_lock; /* * When we free a btree node, we increment the gen of the bucket the @@ -985,6 +986,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; extern struct workqueue_struct *bch_journal_wq; +extern struct workqueue_struct *bch_flush_wq; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; @@ -1026,5 +1028,7 @@ void bch_debug_exit(void); void bch_debug_init(void); void bch_request_exit(void); int bch_request_init(void); +void bch_btree_exit(void); +int bch_btree_init(void); #endif /* _BCACHE_H */ diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 08768796b543..fda68c00ddd5 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b, b->page_order = page_order; - t->data = (void *) __get_free_pages(gfp, b->page_order); + t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); if (!t->data) goto err; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 46a8b5a91c38..5a33910aea78 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -99,6 +99,8 @@ #define PTR_HASH(c, k) \ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) +static struct workqueue_struct *btree_io_wq; + #define insert_lock(s, b) ((b)->level <= (s)->lock) /* @@ -366,7 +368,7 @@ static void __btree_node_write_done(struct closure *cl) btree_complete_write(b, w); if (btree_node_dirty(b)) - schedule_delayed_work(&b->work, 30 * HZ); + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); closure_return_with_destructor(cl, btree_node_write_unlock); } @@ -539,7 +541,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) BUG_ON(!i->keys); if (!btree_node_dirty(b)) - schedule_delayed_work(&b->work, 30 * HZ); + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); set_btree_node_dirty(b); @@ -840,7 +842,7 @@ int bch_btree_cache_alloc(struct cache_set *c) mutex_init(&c->verify_lock); c->verify_ondisk = (void *) - __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); + __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c))); c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); @@ -886,15 +888,17 @@ out: static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) { - struct task_struct *old; - - old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); - if (old && old != current) { + spin_lock(&c->btree_cannibalize_lock); + if (likely(c->btree_cache_alloc_lock == NULL)) { + c->btree_cache_alloc_lock = current; + } else if (c->btree_cache_alloc_lock != current) { if (op) prepare_to_wait(&c->btree_cache_wait, &op->wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&c->btree_cannibalize_lock); return -EINTR; } + spin_unlock(&c->btree_cannibalize_lock); return 0; } @@ -929,10 +933,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, */ static void bch_cannibalize_unlock(struct cache_set *c) { + spin_lock(&c->btree_cannibalize_lock); if (c->btree_cache_alloc_lock == current) { c->btree_cache_alloc_lock = NULL; wake_up(&c->btree_cache_wait); } + spin_unlock(&c->btree_cannibalize_lock); } static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, @@ -1442,7 +1448,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__set_blocks(n1, n1->keys + n2->keys, block_bytes(b->c)) > btree_blocks(new_nodes[i])) - goto out_nocoalesce; + goto out_unlock_nocoalesce; keys = n2->keys; /* Take the key of the node we're getting rid of */ @@ -1471,7 +1477,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__bch_keylist_realloc(&keylist, bkey_u64s(&new_nodes[i]->key))) - goto out_nocoalesce; + goto out_unlock_nocoalesce; bch_btree_node_write(new_nodes[i], &cl); bch_keylist_add(&keylist, &new_nodes[i]->key); @@ -1517,6 +1523,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, /* Invalidated our iterator */ return -EINTR; +out_unlock_nocoalesce: + for (i = 0; i < nodes; i++) + mutex_unlock(&new_nodes[i]->write_lock); + out_nocoalesce: closure_sync(&cl); @@ -2651,3 +2661,18 @@ void bch_keybuf_init(struct keybuf *buf) spin_lock_init(&buf->lock); array_allocator_init(&buf->freelist); } + +void bch_btree_exit(void) +{ + if (btree_io_wq) + destroy_workqueue(btree_io_wq); +} + +int __init bch_btree_init(void) +{ + btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0); + if (!btree_io_wq) + return -ENOMEM; + + return 0; +} diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6730820780b0..b4fd923e0d40 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -958,8 +958,8 @@ atomic_t *bch_journal(struct cache_set *c, journal_try_write(c); } else if (!w->dirty) { w->dirty = true; - schedule_delayed_work(&c->journal.work, - msecs_to_jiffies(c->journal_delay_ms)); + queue_delayed_work(bch_flush_wq, &c->journal.work, + msecs_to_jiffies(c->journal_delay_ms)); spin_unlock(&c->journal.lock); } else { spin_unlock(&c->journal.lock); @@ -1002,8 +1002,8 @@ int bch_journal_alloc(struct cache_set *c) j->w[1].c = c; if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || - !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || - !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) + !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) || + !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS))) return -ENOMEM; return 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 658b0f4a01f5..b0d569032dd4 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -48,6 +48,7 @@ static int bcache_major; static DEFINE_IDA(bcache_device_idx); static wait_queue_head_t unregister_wait; struct workqueue_struct *bcache_wq; +struct workqueue_struct *bch_flush_wq; struct workqueue_struct *bch_journal_wq; @@ -789,7 +790,9 @@ static void bcache_device_free(struct bcache_device *d) bcache_device_detach(d); if (disk) { - if (disk->flags & GENHD_FL_UP) + bool disk_added = (disk->flags & GENHD_FL_UP) != 0; + + if (disk_added) del_gendisk(disk); if (disk->queue) @@ -797,7 +800,8 @@ static void bcache_device_free(struct bcache_device *d) ida_simple_remove(&bcache_device_idx, first_minor_to_idx(disk->first_minor)); - put_disk(disk); + if (disk_added) + put_disk(disk); } bioset_exit(&d->bio_split); @@ -813,19 +817,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, SIZE_MAX / sizeof(atomic_t)); - size_t n; + uint64_t n; int idx; if (!d->stripe_size) d->stripe_size = 1 << 31; - d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); - - if (!d->nr_stripes || d->nr_stripes > max_stripes) { - pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", - (unsigned int)d->nr_stripes); + n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); + if (!n || n > max_stripes) { + pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n", + n); return -ENOMEM; } + d->nr_stripes = n; n = d->nr_stripes * sizeof(atomic_t); d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); @@ -1751,7 +1755,7 @@ void bch_cache_set_unregister(struct cache_set *c) } #define alloc_bucket_pages(gfp, c) \ - ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) + ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) { @@ -1795,6 +1799,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) sema_init(&c->sb_write_mutex, 1); mutex_init(&c->bucket_lock); init_waitqueue_head(&c->btree_cache_wait); + spin_lock_init(&c->btree_cannibalize_lock); init_waitqueue_head(&c->bucket_wait); init_waitqueue_head(&c->gc_wait); sema_init(&c->uuid_write_mutex, 1); @@ -2088,7 +2093,14 @@ found: sysfs_create_link(&c->kobj, &ca->kobj, buf)) goto err; - if (ca->sb.seq > c->sb.seq) { + /* + * A special case is both ca->sb.seq and c->sb.seq are 0, + * such condition happens on a new created cache device whose + * super block is never flushed yet. In this case c->sb.version + * and other members should be updated too, otherwise we will + * have a mistaken super block version in cache set. + */ + if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) { c->sb.version = ca->sb.version; memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); c->sb.flags = ca->sb.flags; @@ -2641,6 +2653,9 @@ static void bcache_exit(void) destroy_workqueue(bcache_wq); if (bch_journal_wq) destroy_workqueue(bch_journal_wq); + if (bch_flush_wq) + destroy_workqueue(bch_flush_wq); + bch_btree_exit(); if (bcache_major) unregister_blkdev(bcache_major, "bcache"); @@ -2696,10 +2711,26 @@ static int __init bcache_init(void) return bcache_major; } + if (bch_btree_init()) + goto err; + bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); if (!bcache_wq) goto err; + /* + * Let's not make this `WQ_MEM_RECLAIM` for the following reasons: + * + * 1. It used `system_wq` before which also does no memory reclaim. + * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and + * reduced throughput can be observed. + * + * We still want to user our own queue to not congest the `system_wq`. + */ + bch_flush_wq = alloc_workqueue("bch_flush", 0, 0); + if (!bch_flush_wq) + goto err; + bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); if (!bch_journal_wq) goto err; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index d60268fe49e1..0b02210ab435 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -519,15 +519,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) { struct bcache_device *d = c->devices[inode]; - unsigned int stripe_offset, stripe, sectors_dirty; + unsigned int stripe_offset, sectors_dirty; + int stripe; if (!d) return; + stripe = offset_to_stripe(d, offset); + if (stripe < 0) + return; + if (UUID_FLASH_ONLY(&c->uuids[inode])) atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); - stripe = offset_to_stripe(d, offset); stripe_offset = offset & (d->stripe_size - 1); while (nr_sectors) { @@ -567,12 +571,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) static void refill_full_stripes(struct cached_dev *dc) { struct keybuf *buf = &dc->writeback_keys; - unsigned int start_stripe, stripe, next_stripe; + unsigned int start_stripe, next_stripe; + int stripe; bool wrapped = false; stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); - - if (stripe >= dc->disk.nr_stripes) + if (stripe < 0) stripe = 0; start_stripe = stripe; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 4e4c6810dc3c..c4ff76037227 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -33,10 +33,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } -static inline unsigned int offset_to_stripe(struct bcache_device *d, +static inline int offset_to_stripe(struct bcache_device *d, uint64_t offset) { do_div(offset, d->stripe_size); + + /* d->nr_stripes is in range [1, INT_MAX] */ + if (unlikely(offset >= d->nr_stripes)) { + pr_err("Invalid stripe %llu (>= nr_stripes %d).\n", + offset, d->nr_stripes); + return -EINVAL; + } + + /* + * Here offset is definitly smaller than INT_MAX, + * return it as int will never overflow. + */ return offset; } @@ -44,7 +56,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned int nr_sectors) { - unsigned int stripe = offset_to_stripe(&dc->disk, offset); + int stripe = offset_to_stripe(&dc->disk, offset); + + if (stripe < 0) + return false; while (1) { if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 2d519c223562..e8c37d9a652d 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1438,6 +1438,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) { sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT; + if (s >= c->start) + s -= c->start; + else + s = 0; if (likely(c->sectors_per_block_bits >= 0)) s >>= c->sectors_per_block_bits; else @@ -1446,6 +1450,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) } EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) +{ + return c->dm_io; +} +EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); + sector_t dm_bufio_get_block_number(struct dm_buffer *b) { return b->block; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 151aa95775be..af6d4f898e4c 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c index 54e4fdd607e1..17712456fa63 100644 --- a/drivers/md/dm-clone-metadata.c +++ b/drivers/md/dm-clone-metadata.c @@ -656,7 +656,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd, return (bit >= (start + nr_regions)); } -unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd) +unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd) { return bitmap_weight(cmd->region_map, cmd->nr_regions); } @@ -748,7 +748,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd) static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) { int r; - unsigned long word, flags; + unsigned long word; word = 0; do { @@ -772,9 +772,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) return r; /* Update the changed flag */ - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); dmap->changed = 0; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); return 0; } @@ -782,7 +782,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd) { int r = 0; - unsigned long flags; struct dirty_map *dmap, *next_dmap; down_write(&cmd->lock); @@ -808,9 +807,9 @@ int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd) } /* Swap dirty bitmaps */ - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); cmd->current_dmap = next_dmap; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); /* Set old dirty bitmap as currently committing */ cmd->committing_dmap = dmap; @@ -851,6 +850,12 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re struct dirty_map *dmap; unsigned long word, flags; + if (unlikely(region_nr >= cmd->nr_regions)) { + DMERR("Region %lu out of range (total number of regions %lu)", + region_nr, cmd->nr_regions); + return -ERANGE; + } + word = region_nr / BITS_PER_LONG; spin_lock_irqsave(&cmd->bitmap_lock, flags); @@ -878,9 +883,16 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, { int r = 0; struct dirty_map *dmap; - unsigned long word, region_nr, flags; + unsigned long word, region_nr; - spin_lock_irqsave(&cmd->bitmap_lock, flags); + if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start || + (start + nr_regions) > cmd->nr_regions)) { + DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)", + start, nr_regions, cmd->nr_regions); + return -ERANGE; + } + + spin_lock_irq(&cmd->bitmap_lock); if (cmd->read_only) { r = -EPERM; @@ -898,7 +910,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, } } out: - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); return r; } @@ -965,13 +977,11 @@ out: void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd) { - unsigned long flags; - down_write(&cmd->lock); - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); cmd->read_only = 1; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); if (!cmd->fail_io) dm_bm_set_read_only(cmd->bm); @@ -981,13 +991,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd) void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd) { - unsigned long flags; - down_write(&cmd->lock); - spin_lock_irqsave(&cmd->bitmap_lock, flags); + spin_lock_irq(&cmd->bitmap_lock); cmd->read_only = 0; - spin_unlock_irqrestore(&cmd->bitmap_lock, flags); + spin_unlock_irq(&cmd->bitmap_lock); if (!cmd->fail_io) dm_bm_set_read_write(cmd->bm); diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h index c7848c49aef8..d848b8799c07 100644 --- a/drivers/md/dm-clone-metadata.h +++ b/drivers/md/dm-clone-metadata.h @@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re * @start: Starting region number * @nr_regions: Number of regions in the range * - * This function doesn't block, so it's safe to call it from interrupt context. + * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq() + * it's NOT safe to call it from any context where interrupts are disabled, e.g., + * from interrupt context. */ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, unsigned long nr_regions); @@ -154,7 +156,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd, /* * Returns the number of hydrated regions. */ -unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd); +unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd); /* * Returns the first unhydrated region with region_nr >= @start diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index e6e5d24a79f5..eb7a5d3ba81a 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -282,7 +282,7 @@ static bool bio_triggers_commit(struct clone *clone, struct bio *bio) /* Get the address of the region in sectors */ static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr) { - return (region_nr << clone->region_shift); + return ((sector_t)region_nr << clone->region_shift); } /* Get the region number of the bio */ @@ -293,10 +293,17 @@ static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio) /* Get the region range covered by the bio */ static void bio_region_range(struct clone *clone, struct bio *bio, - unsigned long *rs, unsigned long *re) + unsigned long *rs, unsigned long *nr_regions) { + unsigned long end; + *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size); - *re = bio_end_sector(bio) >> clone->region_shift; + end = bio_end_sector(bio) >> clone->region_shift; + + if (*rs >= end) + *nr_regions = 0; + else + *nr_regions = end - *rs; } /* Check whether a bio overwrites a region */ @@ -338,8 +345,6 @@ static void submit_bios(struct bio_list *bios) */ static void issue_bio(struct clone *clone, struct bio *bio) { - unsigned long flags; - if (!bio_triggers_commit(clone, bio)) { generic_make_request(bio); return; @@ -358,9 +363,9 @@ static void issue_bio(struct clone *clone, struct bio *bio) * Batch together any bios that trigger commits and then issue a single * commit for them in process_deferred_flush_bios(). */ - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_add(&clone->deferred_flush_bios, bio); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); wake_worker(clone); } @@ -456,7 +461,7 @@ static void trim_bio(struct bio *bio, sector_t sector, unsigned int len) static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success) { - unsigned long rs, re; + unsigned long rs, nr_regions; /* * If the destination device supports discards, remap and trim the @@ -465,9 +470,9 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ */ if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) { remap_to_dest(clone, bio); - bio_region_range(clone, bio, &rs, &re); - trim_bio(bio, rs << clone->region_shift, - (re - rs) << clone->region_shift); + bio_region_range(clone, bio, &rs, &nr_regions); + trim_bio(bio, region_to_sector(clone, rs), + nr_regions << clone->region_shift); generic_make_request(bio); } else bio_endio(bio); @@ -475,12 +480,21 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ static void process_discard_bio(struct clone *clone, struct bio *bio) { - unsigned long rs, re, flags; + unsigned long rs, nr_regions; - bio_region_range(clone, bio, &rs, &re); - BUG_ON(re > clone->nr_regions); + bio_region_range(clone, bio, &rs, &nr_regions); + if (!nr_regions) { + bio_endio(bio); + return; + } - if (unlikely(rs == re)) { + if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs || + (rs + nr_regions) > clone->nr_regions)) { + DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)", + clone_device_name(clone), rs, nr_regions, + clone->nr_regions, + (unsigned long long)bio->bi_iter.bi_sector, + bio_sectors(bio)); bio_endio(bio); return; } @@ -489,7 +503,7 @@ static void process_discard_bio(struct clone *clone, struct bio *bio) * The covered regions are already hydrated so we just need to pass * down the discard. */ - if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) { + if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) { complete_discard_bio(clone, bio, true); return; } @@ -507,9 +521,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio) /* * Defer discard processing. */ - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_add(&clone->deferred_discard_bios, bio); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); wake_worker(clone); } @@ -784,11 +798,14 @@ static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr struct dm_io_region from, to; struct clone *clone = hd->clone; + if (WARN_ON(!nr_regions)) + return; + region_size = clone->region_size; region_start = hd->region_nr; region_end = region_start + nr_regions - 1; - total_size = (nr_regions - 1) << clone->region_shift; + total_size = region_to_sector(clone, nr_regions - 1); if (region_end == clone->nr_regions - 1) { /* @@ -1167,13 +1184,13 @@ static void process_deferred_discards(struct clone *clone) int r = -EPERM; struct bio *bio; struct blk_plug plug; - unsigned long rs, re, flags; + unsigned long rs, nr_regions; struct bio_list discards = BIO_EMPTY_LIST; - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_merge(&discards, &clone->deferred_discard_bios); bio_list_init(&clone->deferred_discard_bios); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); if (bio_list_empty(&discards)) return; @@ -1183,14 +1200,13 @@ static void process_deferred_discards(struct clone *clone) /* Update the metadata */ bio_list_for_each(bio, &discards) { - bio_region_range(clone, bio, &rs, &re); + bio_region_range(clone, bio, &rs, &nr_regions); /* * A discard request might cover regions that have been already * hydrated. There is no need to update the metadata for these * regions. */ - r = dm_clone_cond_set_range(clone->cmd, rs, re - rs); - + r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions); if (unlikely(r)) break; } @@ -1203,13 +1219,12 @@ out: static void process_deferred_bios(struct clone *clone) { - unsigned long flags; struct bio_list bios = BIO_EMPTY_LIST; - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_merge(&bios, &clone->deferred_bios); bio_list_init(&clone->deferred_bios); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); if (bio_list_empty(&bios)) return; @@ -1220,7 +1235,6 @@ static void process_deferred_bios(struct clone *clone) static void process_deferred_flush_bios(struct clone *clone) { struct bio *bio; - unsigned long flags; bool dest_dev_flushed; struct bio_list bios = BIO_EMPTY_LIST; struct bio_list bio_completions = BIO_EMPTY_LIST; @@ -1229,13 +1243,13 @@ static void process_deferred_flush_bios(struct clone *clone) * If there are any deferred flush bios, we must commit the metadata * before issuing them or signaling their completion. */ - spin_lock_irqsave(&clone->lock, flags); + spin_lock_irq(&clone->lock); bio_list_merge(&bios, &clone->deferred_flush_bios); bio_list_init(&clone->deferred_flush_bios); bio_list_merge(&bio_completions, &clone->deferred_flush_completions); bio_list_init(&clone->deferred_flush_completions); - spin_unlock_irqrestore(&clone->lock, flags); + spin_unlock_irq(&clone->lock); if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone))) @@ -1455,7 +1469,7 @@ static void clone_status(struct dm_target *ti, status_type_t type, goto error; } - DMEMIT("%u %llu/%llu %llu %lu/%lu %u ", + DMEMIT("%u %llu/%llu %llu %u/%lu %u ", DM_CLONE_METADATA_BLOCK_SIZE, (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks), (unsigned long long)nr_metadata_blocks, @@ -1775,6 +1789,7 @@ error: static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) { int r; + sector_t nr_regions; struct clone *clone; struct dm_arg_set as; @@ -1816,7 +1831,16 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto out_with_source_dev; clone->region_shift = __ffs(clone->region_size); - clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size); + nr_regions = dm_sector_div_up(ti->len, clone->region_size); + + /* Check for overflow */ + if (nr_regions != (unsigned long)nr_regions) { + ti->error = "Too many regions. Consider increasing the region size"; + r = -EOVERFLOW; + goto out_with_source_dev; + } + + clone->nr_regions = nr_regions; r = validate_nr_regions(clone->nr_regions, &ti->error); if (r) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index c4ef1fceead6..3fea121fcbcf 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -106,6 +106,10 @@ struct mapped_device { struct block_device *bdev; + int swap_bios; + struct semaphore swap_bios_semaphore; + struct mutex swap_bios_lock; + struct dm_stats stats; /* for blk-mq request-based DM support */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 492bbe0584d9..d85648b9c247 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -720,7 +720,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); struct skcipher_request *req; struct scatterlist src, dst; - struct crypto_wait wait; + DECLARE_CRYPTO_WAIT(wait); int err; req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); @@ -2737,6 +2737,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) wake_up_process(cc->write_thread); ti->num_flush_bios = 1; + ti->limit_swap_bios = true; return 0; @@ -2957,7 +2958,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->max_segment_size = PAGE_SIZE; limits->logical_block_size = - max_t(unsigned short, limits->logical_block_size, cc->sector_size); + max_t(unsigned, limits->logical_block_size, cc->sector_size); limits->physical_block_size = max_t(unsigned, limits->physical_block_size, cc->sector_size); limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index bdb84b8e7162..6b0b3a13ab4a 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -47,6 +47,7 @@ struct writeset { static void writeset_free(struct writeset *ws) { vfree(ws->bits); + ws->bits = NULL; } static int setup_on_disk_bitset(struct dm_disk_bitset *info, @@ -71,8 +72,6 @@ static size_t bitset_size(unsigned nr_bits) */ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) { - ws->md.nr_bits = nr_blocks; - ws->md.root = INVALID_WRITESET_ROOT; ws->bits = vzalloc(bitset_size(nr_blocks)); if (!ws->bits) { DMERR("%s: couldn't allocate in memory bitset", __func__); @@ -85,12 +84,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) /* * Wipes the in-core bitset, and creates a new on disk bitset. */ -static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws, + dm_block_t nr_blocks) { int r; - memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); + memset(ws->bits, 0, bitset_size(nr_blocks)); + ws->md.nr_bits = nr_blocks; r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); if (r) { DMERR("%s: setup_on_disk_bitset failed", __func__); @@ -134,7 +135,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info, { int r; - if (!test_and_set_bit(block, ws->bits)) { + if (!test_bit(block, ws->bits)) { r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); if (r) { /* FIXME: fail mode */ @@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value) static int ws_eq(void *context, const void *value1, const void *value2) { - return !memcmp(value1, value2, sizeof(struct writeset_metadata)); + return !memcmp(value1, value2, sizeof(struct writeset_disk)); } /*----------------------------------------------------------------*/ @@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md) } disk = dm_block_data(sblock); + + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk->data_block_size) != md->block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk->data_block_size), md->block_size); + r = -EINVAL; + goto bad; + } + r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, disk->metadata_space_map_root, sizeof(disk->metadata_space_map_root), @@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md) setup_infos(md); - md->block_size = le32_to_cpu(disk->data_block_size); md->nr_blocks = le32_to_cpu(disk->nr_blocks); md->current_era = le32_to_cpu(disk->current_era); + ws_unpack(&disk->current_writeset, &md->current_writeset->md); md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); md->era_array_root = le64_to_cpu(disk->era_array_root); md->metadata_snap = le64_to_cpu(disk->metadata_snap); @@ -746,6 +756,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md, ws_unpack(&disk, &d->writeset); d->value = cpu_to_le32(key); + /* + * We initialise another bitset info to avoid any caching side effects + * with the previous one. + */ + dm_disk_bitset_init(md->tm, &d->info); + d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); d->current_bit = 0; d->step = metadata_digest_transcribe_writeset; @@ -759,12 +775,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d) return 0; memset(d, 0, sizeof(*d)); - - /* - * We initialise another bitset info to avoid any caching side - * effects with the previous one. - */ - dm_disk_bitset_init(md->tm, &d->info); d->step = metadata_digest_lookup_writeset; return 0; @@ -802,6 +812,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev, static void metadata_close(struct era_metadata *md) { + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); destroy_persistent_data_objects(md); kfree(md); } @@ -839,6 +851,7 @@ static int metadata_resize(struct era_metadata *md, void *arg) r = writeset_alloc(&md->writesets[1], *new_size); if (r) { DMERR("%s: writeset_alloc failed for writeset 1", __func__); + writeset_free(&md->writesets[0]); return r; } @@ -849,6 +862,8 @@ static int metadata_resize(struct era_metadata *md, void *arg) &value, &md->era_array_root); if (r) { DMERR("%s: dm_array_resize failed", __func__); + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); return r; } @@ -870,7 +885,6 @@ static int metadata_era_archive(struct era_metadata *md) } ws_pack(&md->current_writeset->md, &value); - md->current_writeset->md.root = INVALID_WRITESET_ROOT; keys[0] = md->current_era; __dm_bless_for_disk(&value); @@ -882,6 +896,7 @@ static int metadata_era_archive(struct era_metadata *md) return r; } + md->current_writeset->md.root = INVALID_WRITESET_ROOT; md->archived_writesets = true; return 0; @@ -898,7 +913,7 @@ static int metadata_new_era(struct era_metadata *md) int r; struct writeset *new_writeset = next_writeset(md); - r = writeset_init(&md->bitset_info, new_writeset); + r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks); if (r) { DMERR("%s: writeset_init failed", __func__); return r; @@ -951,7 +966,7 @@ static int metadata_commit(struct era_metadata *md) int r; struct dm_block *sblock; - if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { + if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, &md->current_writeset->md.root); if (r) { @@ -1226,8 +1241,10 @@ static void process_deferred_bios(struct era *era) int r; struct bio_list deferred_bios, marked_bios; struct bio *bio; + struct blk_plug plug; bool commit_needed = false; bool failed = false; + struct writeset *ws = era->md->current_writeset; bio_list_init(&deferred_bios); bio_list_init(&marked_bios); @@ -1237,9 +1254,11 @@ static void process_deferred_bios(struct era *era) bio_list_init(&era->deferred_bios); spin_unlock(&era->deferred_lock); + if (bio_list_empty(&deferred_bios)) + return; + while ((bio = bio_list_pop(&deferred_bios))) { - r = writeset_test_and_set(&era->md->bitset_info, - era->md->current_writeset, + r = writeset_test_and_set(&era->md->bitset_info, ws, get_block(era, bio)); if (r < 0) { /* @@ -1247,7 +1266,6 @@ static void process_deferred_bios(struct era *era) * FIXME: finish. */ failed = true; - } else if (r == 0) commit_needed = true; @@ -1263,9 +1281,19 @@ static void process_deferred_bios(struct era *era) if (failed) while ((bio = bio_list_pop(&marked_bios))) bio_io_error(bio); - else - while ((bio = bio_list_pop(&marked_bios))) + else { + blk_start_plug(&plug); + while ((bio = bio_list_pop(&marked_bios))) { + /* + * Only update the in-core writeset if the on-disk one + * was updated too. + */ + if (commit_needed) + set_bit(get_block(era, bio), ws->bits); generic_make_request(bio); + } + blk_finish_plug(&plug); + } } static void process_rpc_calls(struct era *era) @@ -1486,15 +1514,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) } era->md = md; - era->nr_blocks = calc_nr_blocks(era); - - r = metadata_resize(era->md, &era->nr_blocks); - if (r) { - ti->error = "couldn't resize metadata"; - era_destroy(era); - return -ENOMEM; - } - era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); if (!era->wq) { ti->error = "could not create workqueue for metadata object"; @@ -1571,16 +1590,24 @@ static int era_preresume(struct dm_target *ti) dm_block_t new_size = calc_nr_blocks(era); if (era->nr_blocks != new_size) { - r = in_worker1(era, metadata_resize, &new_size); - if (r) + r = metadata_resize(era->md, &new_size); + if (r) { + DMERR("%s: metadata_resize failed", __func__); return r; + } + + r = metadata_commit(era->md); + if (r) { + DMERR("%s: metadata_commit failed", __func__); + return r; + } era->nr_blocks = new_size; } start_worker(era); - r = in_worker0(era, metadata_new_era); + r = in_worker0(era, metadata_era_rollover); if (r) { DMERR("%s: metadata_era_rollover failed", __func__); return r; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 145bc2e7eaf0..9f4d657dd36c 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -254,6 +254,7 @@ struct dm_integrity_c { bool journal_uptodate; bool just_formatted; bool recalculate_flag; + bool legacy_recalculate; struct alg_spec internal_hash_alg; struct alg_spec journal_crypt_alg; @@ -381,6 +382,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic) return READ_ONCE(ic->failed); } +static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) +{ + if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) && + !ic->legacy_recalculate) + return true; + return false; +} + static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, unsigned j, unsigned char seq) { @@ -1343,12 +1352,52 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se return 0; } -static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) +struct flush_request { + struct dm_io_request io_req; + struct dm_io_region io_reg; + struct dm_integrity_c *ic; + struct completion comp; +}; + +static void flush_notify(unsigned long error, void *fr_) +{ + struct flush_request *fr = fr_; + if (unlikely(error != 0)) + dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO); + complete(&fr->comp); +} + +static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) { int r; + + struct flush_request fr; + + if (!ic->meta_dev) + flush_data = false; + if (flush_data) { + fr.io_req.bi_op = REQ_OP_WRITE, + fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + fr.io_req.mem.type = DM_IO_KMEM, + fr.io_req.mem.ptr.addr = NULL, + fr.io_req.notify.fn = flush_notify, + fr.io_req.notify.context = &fr; + fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio), + fr.io_reg.bdev = ic->dev->bdev, + fr.io_reg.sector = 0, + fr.io_reg.count = 0, + fr.ic = ic; + init_completion(&fr.comp); + r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL); + BUG_ON(r); + } + r = dm_bufio_write_dirty_buffers(ic->bufio); if (unlikely(r)) dm_integrity_io_error(ic, "writing tags", r); + + if (flush_data) + wait_for_completion(&fr.comp); } static void sleep_on_endio_wait(struct dm_integrity_c *ic) @@ -1514,7 +1563,7 @@ static void integrity_metadata(struct work_struct *w) struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); char *checksums; unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; - char checksums_onstack[HASH_MAX_DIGESTSIZE]; + char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; unsigned sectors_to_process = dio->range.n_sectors; sector_t sector = dio->range.logical_sector; @@ -1743,7 +1792,7 @@ retry_kmap: } while (++s < ic->sectors_per_block); #ifdef INTERNAL_VERIFY if (ic->internal_hash) { - char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; + char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { @@ -2077,7 +2126,7 @@ static void integrity_commit(struct work_struct *w) flushes = bio_list_get(&ic->flush_bio_list); if (unlikely(ic->mode != 'J')) { spin_unlock_irq(&ic->endio_wait.lock); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); goto release_flush_bios; } @@ -2287,7 +2336,7 @@ skip_io: complete_journal_op(&comp); wait_for_completion_io(&comp.comp); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); } static void integrity_writer(struct work_struct *w) @@ -2298,7 +2347,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) + if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) return; spin_lock_irq(&ic->endio_wait.lock); @@ -2329,7 +2378,7 @@ static void recalc_write_super(struct dm_integrity_c *ic) { int r; - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, false); if (dm_integrity_failed(ic)) return; @@ -2359,12 +2408,13 @@ static void integrity_recalc(struct work_struct *w) next_chunk: - if (unlikely(dm_suspended(ic->ti))) + if (unlikely(dm_post_suspending(ic->ti))) goto unlock_ret; range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { if (ic->mode == 'B') { + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); } @@ -2442,6 +2492,17 @@ next_chunk: goto err; } + if (ic->mode == 'B') { + sector_t start, end; + start = (range.logical_sector >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + end = ((range.logical_sector + range.n_sectors) >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); + } + advance_and_next: cond_resched(); @@ -2520,7 +2581,7 @@ static void bitmap_flush_work(struct work_struct *work) unsigned long limit; struct bio *bio; - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, false); range.logical_sector = 0; range.n_sectors = ic->provided_data_sectors; @@ -2529,7 +2590,7 @@ static void bitmap_flush_work(struct work_struct *work) add_new_range_and_wait(ic, &range); spin_unlock_irq(&ic->endio_wait.lock); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); if (ic->meta_dev) blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL); @@ -2800,11 +2861,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti) if (ic->meta_dev) queue_work(ic->writer_wq, &ic->writer_work); drain_workqueue(ic->writer_wq); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); } if (ic->mode == 'B') { - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); #if 1 /* set to 0 to test bitmap replay code */ init_journal(ic, 0, ic->journal_sections, 0); @@ -2946,6 +3007,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, arg_count += !!ic->internal_hash_alg.alg_string; arg_count += !!ic->journal_crypt_alg.alg_string; arg_count += !!ic->journal_mac_alg.alg_string; + arg_count += ic->legacy_recalculate; DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, ic->tag_size, ic->mode, arg_count); if (ic->meta_dev) @@ -2965,6 +3027,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); } + if (ic->legacy_recalculate) + DMEMIT(" legacy_recalculate"); #define EMIT_ALG(a, n) \ do { \ @@ -3573,7 +3637,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned extra_args; struct dm_arg_set as; static const struct dm_arg _args[] = { - {0, 9, "Invalid number of feature args"}, + {0, 14, "Invalid number of feature args"}, }; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; bool should_write_sb; @@ -3698,6 +3762,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) if (val >= (uint64_t)UINT_MAX * 1000 / HZ) { r = -EINVAL; ti->error = "Invalid bitmap_flush_interval argument"; + goto bad; } ic->bitmap_flush_interval = msecs_to_jiffies(val); } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { @@ -3717,6 +3782,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } else if (!strcmp(opt_string, "recalculate")) { ic->recalculate_flag = true; + } else if (!strcmp(opt_string, "legacy_recalculate")) { + ic->legacy_recalculate = true; } else { r = -EINVAL; ti->error = "Invalid argument"; @@ -4007,6 +4074,20 @@ try_smaller_buffer: r = -ENOMEM; goto bad; } + } else { + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { + ti->error = "Recalculate can only be specified with internal_hash"; + r = -EINVAL; + goto bad; + } + } + + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && + dm_integrity_disable_recalculate(ic)) { + ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\""; + r = -EOPNOTSUPP; + goto bad; } ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index ac83f5002ce5..3f15d8dc2b71 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ * Grab our output buffer. */ nl = orig_nl = get_result_buffer(param, param_size, &len); - if (len < needed) { + if (len < needed || len < sizeof(nl->dev)) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } @@ -1600,6 +1600,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para if (!argc) { DMWARN("Empty message received."); + r = -EINVAL; goto out_argv; } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e0c32793c248..54ecfea2cf47 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -576,10 +576,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) /* Do we need to select a new pgpath? */ pgpath = READ_ONCE(m->current_pgpath); - queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); - if (!pgpath || !queue_io) + if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, bio->bi_iter.bi_size); + /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */ + queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); + if ((pgpath && queue_io) || (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { /* Queue for the daemon to resubmit */ @@ -1188,17 +1190,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) static void flush_multipath_work(struct multipath *m) { if (m->hw_handler_name) { - set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + unsigned long flags; + + if (!atomic_read(&m->pg_init_in_progress)) + goto skip; + + spin_lock_irqsave(&m->lock, flags); + if (atomic_read(&m->pg_init_in_progress) && + !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) { + spin_unlock_irqrestore(&m->lock, flags); - if (atomic_read(&m->pg_init_in_progress)) flush_workqueue(kmpath_handlerd); - multipath_wait_for_pg_init_completion(m); + multipath_wait_for_pg_init_completion(m); - clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + spin_lock_irqsave(&m->lock, flags); + clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); + } + spin_unlock_irqrestore(&m->lock, flags); } - +skip: if (m->queue_mode == DM_TYPE_BIO_BASED) flush_work(&m->process_queued_bios); flush_work(&m->trigger_event); @@ -1854,7 +1864,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, int r; current_pgpath = READ_ONCE(m->current_pgpath); - if (!current_pgpath) + if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) current_pgpath = choose_pgpath(m, 0); if (current_pgpath) { diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index b0aa595e4375..5e73cc6ad0ce 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1892,6 +1892,14 @@ static bool rs_takeover_requested(struct raid_set *rs) return rs->md.new_level != rs->md.level; } +/* True if layout is set to reshape. */ +static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev) +{ + return (use_mddev ? rs->md.delta_disks : rs->delta_disks) || + rs->md.new_layout != rs->md.layout || + rs->md.new_chunk_sectors != rs->md.chunk_sectors; +} + /* True if @rs is requested to reshape by ctr */ static bool rs_reshape_requested(struct raid_set *rs) { @@ -1904,9 +1912,7 @@ static bool rs_reshape_requested(struct raid_set *rs) if (rs_is_raid0(rs)) return false; - change = mddev->new_layout != mddev->layout || - mddev->new_chunk_sectors != mddev->chunk_sectors || - rs->delta_disks; + change = rs_is_layout_change(rs, false); /* Historical case to support raid1 reshape without delta disks */ if (rs_is_raid1(rs)) { @@ -2843,7 +2849,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs) } /* - * + * Reshape: * - change raid layout * - change chunk size * - add disks @@ -2952,6 +2958,20 @@ static int rs_setup_reshape(struct raid_set *rs) return r; } +/* + * If the md resync thread has updated superblock with max reshape position + * at the end of a reshape but not (yet) reset the layout configuration + * changes -> reset the latter. + */ +static void rs_reset_inconclusive_reshape(struct raid_set *rs) +{ + if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) { + rs_set_cur(rs); + rs->md.delta_disks = 0; + rs->md.reshape_backwards = 0; + } +} + /* * Enable/disable discard support on RAID set depending on * RAID level and discard properties of underlying RAID members. @@ -3216,11 +3236,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto bad; + /* Catch any inconclusive reshape superblock content. */ + rs_reset_inconclusive_reshape(rs); + /* Start raid set read-only and assumed clean to change in raid_resume() */ rs->md.ro = 1; rs->md.in_sync = 1; - /* Keep array frozen */ + /* Keep array frozen until resume. */ set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); /* Has to be held on running the array */ @@ -3234,7 +3257,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) } r = md_start(&rs->md); - if (r) { ti->error = "Failed to start raid array"; mddev_unlock(&rs->md); @@ -3744,10 +3766,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); /* - * RAID1 and RAID10 personalities require bio splitting, - * RAID0/4/5/6 don't and process large discard bios properly. + * RAID0 and RAID10 personalities require bio splitting, + * RAID1/4/5/6 don't and process large discard bios properly. */ - if (rs_is_raid1(rs) || rs_is_raid10(rs)) { + if (rs_is_raid0(rs) || rs_is_raid10(rs)) { limits->discard_granularity = chunk_size_bytes; limits->max_discard_sectors = rs->md.chunk_sectors; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 3f8577e2c13b..6bc61927d320 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -70,9 +70,6 @@ void dm_start_queue(struct request_queue *q) void dm_stop_queue(struct request_queue *q) { - if (blk_mq_queue_stopped(q)) - return; - blk_mq_quiesce_queue(q); } @@ -575,6 +572,7 @@ out_tag_set: blk_mq_free_tag_set(md->tag_set); out_kfree_tag_set: kfree(md->tag_set); + md->tag_set = NULL; return err; } @@ -584,6 +582,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md) if (md->tag_set) { blk_mq_free_tag_set(md->tag_set); kfree(md->tag_set); + md->tag_set = NULL; } } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 4fb1a40e68a0..0164c9ca984b 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -141,6 +141,11 @@ struct dm_snapshot { * for them to be committed. */ struct bio_list bios_queued_during_merge; + + /* + * Flush data after merge. + */ + struct bio flush_bio; }; /* @@ -1121,6 +1126,17 @@ shut: static void error_bios(struct bio *bio); +static int flush_data(struct dm_snapshot *s) +{ + struct bio *flush_bio = &s->flush_bio; + + bio_reset(flush_bio); + bio_set_dev(flush_bio, s->origin->bdev); + flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + + return submit_bio_wait(flush_bio); +} + static void merge_callback(int read_err, unsigned long write_err, void *context) { struct dm_snapshot *s = context; @@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context) goto shut; } + if (flush_data(s) < 0) { + DMERR("Flush after merge failed: shutting down merge"); + goto shut; + } + if (s->store->type->commit_merge(s->store, s->num_merging_chunks) < 0) { DMERR("Write error in exception store: shutting down merge"); @@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->first_merging_chunk = 0; s->num_merging_chunks = 0; bio_list_init(&s->bios_queued_during_merge); + bio_init(&s->flush_bio, NULL, 0); /* Allocate hash table for COW data */ if (init_hash_tables(s)) { @@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti) dm_exception_store_destroy(s->store); + bio_uninit(&s->flush_bio); + dm_put_device(ti, s->cow); dm_put_device(ti, s->origin); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 52e049554f5c..06b382304d92 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -428,14 +428,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, { int r; dev_t dev; + unsigned int major, minor; + char dummy; struct dm_dev_internal *dd; struct dm_table *t = ti->table; BUG_ON(!t); - dev = dm_get_dev_t(path); - if (!dev) - return -ENODEV; + if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + if (MAJOR(dev) != major || MINOR(dev) != minor) + return -EOVERFLOW; + } else { + dev = dm_get_dev_t(path); + if (!dev) + return -ENODEV; + } dd = find_device(&t->devices, dev); if (!dd) { @@ -879,20 +888,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) EXPORT_SYMBOL_GPL(dm_table_set_type); /* validate the dax capability of the target device span */ -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - int blocksize = *(int *) data; + int blocksize = *(int *) data, id; + bool rc; - return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, - start, len); + id = dax_read_lock(); + rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); + dax_read_unlock(id); + + return rc; } /* Check devices support synchronous DAX */ -static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - return dev->dax_dev && dax_synchronous(dev->dax_dev); + return !dev->dax_dev || !dax_synchronous(dev->dax_dev); } bool dm_table_supports_dax(struct dm_table *t, @@ -909,7 +922,7 @@ bool dm_table_supports_dax(struct dm_table *t, return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, iterate_fn, blocksize)) + ti->type->iterate_devices(ti, iterate_fn, blocksize)) return false; } @@ -918,21 +931,15 @@ bool dm_table_supports_dax(struct dm_table *t, static bool dm_table_does_not_support_partial_completion(struct dm_table *t); -struct verify_rq_based_data { - unsigned sq_count; - unsigned mq_count; -}; - -static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - struct verify_rq_based_data *v = data; + struct block_device *bdev = dev->bdev; + struct request_queue *q = bdev_get_queue(bdev); - if (queue_is_mq(q)) - v->mq_count++; - else - v->sq_count++; + /* request-based cannot stack on partitions! */ + if (bdev != bdev->bd_contains) + return false; return queue_is_mq(q); } @@ -941,7 +948,6 @@ static int dm_table_determine_type(struct dm_table *t) { unsigned i; unsigned bio_based = 0, request_based = 0, hybrid = 0; - struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0}; struct dm_target *tgt; struct list_head *devices = dm_table_get_devices(t); enum dm_queue_mode live_md_type = dm_get_md_type(t->md); @@ -990,7 +996,7 @@ static int dm_table_determine_type(struct dm_table *t) verify_bio_based: /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; - if (dm_table_supports_dax(t, device_supports_dax, &page_size) || + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { t->type = DM_TYPE_DAX_BIO_BASED; } else { @@ -1045,14 +1051,10 @@ verify_rq_based: /* Non-request-stackable devices can't be used for request-based dm */ if (!tgt->type->iterate_devices || - !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) { + !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) { DMERR("table load rejected: including non-request-stackable devices"); return -EINVAL; } - if (v.sq_count > 0) { - DMERR("table load rejected: not all devices are blk-mq request-stackable"); - return -EINVAL; - } return 0; } @@ -1327,12 +1329,6 @@ void dm_table_event_callback(struct dm_table *t, void dm_table_event(struct dm_table *t) { - /* - * You can no longer call dm_table_event() from interrupt - * context, use a bottom half instead. - */ - BUG_ON(in_interrupt()); - mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); @@ -1380,6 +1376,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } +/* + * type->iterate_devices() should be called when the sanity check needs to + * iterate and check all underlying data devices. iterate_devices() will + * iterate all underlying data devices until it encounters a non-zero return + * code, returned by whether the input iterate_devices_callout_fn, or + * iterate_devices() itself internally. + * + * For some target type (e.g. dm-stripe), one call of iterate_devices() may + * iterate multiple underlying devices internally, in which case a non-zero + * return code returned by iterate_devices_callout_fn will stop the iteration + * in advance. + * + * Cases requiring _any_ underlying device supporting some kind of attribute, + * should use the iteration structure like dm_table_any_dev_attr(), or call + * it directly. @func should handle semantics of positive examples, e.g. + * capable of something. + * + * Cases requiring _all_ underlying devices supporting some kind of attribute, + * should use the iteration structure like dm_table_supports_nowait() or + * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that + * uses an @anti_func that handle semantics of counter examples, e.g. not + * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); + */ +static bool dm_table_any_dev_attr(struct dm_table *t, + iterate_devices_callout_fn func, void *data) +{ + struct dm_target *ti; + unsigned int i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, func, data)) + return true; + } + + return false; +} + static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1416,13 +1452,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table) return true; } -static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); enum blk_zoned_model *zoned_model = data; - return q && blk_queue_zoned_model(q) == *zoned_model; + return !q || blk_queue_zoned_model(q) != *zoned_model; } static bool dm_table_supports_zoned_model(struct dm_table *t, @@ -1439,37 +1475,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) + ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) return false; } return true; } -static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data; - return q && blk_queue_zone_sectors(q) == *zone_sectors; -} - -static bool dm_table_matches_zone_sectors(struct dm_table *t, - unsigned int zone_sectors) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) - return false; - } - - return true; + return !q || blk_queue_zone_sectors(q) != *zone_sectors; } static int validate_hardware_zoned_model(struct dm_table *table, @@ -1489,7 +1508,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, if (!zone_sectors || !is_power_of_2(zone_sectors)) return -EINVAL; - if (!dm_table_matches_zone_sectors(table, zone_sectors)) { + if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { DMERR("%s: zone sectors is not consistent across all devices", dm_device_name(table->md)); return -EINVAL; @@ -1679,29 +1698,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, return false; } -static int dm_table_supports_dax_write_cache(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, - device_dax_write_cache_enabled, NULL)) - return true; - } - - return false; -} - -static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && blk_queue_nonrot(q); + return q && !blk_queue_nonrot(q); } static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1712,35 +1714,18 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); } -static bool dm_table_all_devices_attribute(struct dm_table *t, - iterate_devices_callout_fn func) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, func, NULL)) - return false; - } - - return true; -} - -static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, +static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { char b[BDEVNAME_SIZE]; /* For now, NVMe devices are the only devices of this class */ - return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); + return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0); } static bool dm_table_does_not_support_partial_completion(struct dm_table *t) { - return dm_table_all_devices_attribute(t, device_no_partial_completion); + return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL); } static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1867,27 +1852,6 @@ static int device_requires_stable_pages(struct dm_target *ti, return q && bdi_cap_stable_pages_required(q->backing_dev_info); } -/* - * If any underlying device requires stable pages, a table must require - * them as well. Only targets that support iterate_devices are considered: - * don't want error, zero, etc to require stable pages. - */ -static bool dm_table_requires_stable_pages(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) - return true; - } - - return false; -} - void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1920,22 +1884,22 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); - if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) + if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) set_dax_synchronous(t->md->dax_dev); } else blk_queue_flag_clear(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax_write_cache(t)) + if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true); /* Ensure that all underlying devices are non-rotational. */ - if (dm_table_all_devices_attribute(t, device_is_nonrot)) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else + if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + else + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1947,8 +1911,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. + * If any underlying device requires stable pages, a table must require + * them as well. Only targets that support iterate_devices are considered: + * don't want error, zero, etc to require stable pages. */ - if (dm_table_requires_stable_pages(t)) + if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; @@ -1959,7 +1926,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ - if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + if (blk_queue_add_random(q) && + dm_table_any_dev_attr(t, device_is_not_random, NULL)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); /* diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 4cd8868f8004..a5ed59eafdc5 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } @@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) } pmd_write_lock_in_core(pmd); - if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { + if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) { r = __commit_transaction(pmd); if (r < 0) DMWARN("%s: __commit_transaction() failed, error = %d", diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 3ceeb6b404ed..cea2b3789736 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -61,19 +61,18 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio, static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index, unsigned *offset, struct dm_buffer **buf) { - u64 position, block; + u64 position, block, rem; u8 *res; position = (index + rsb) * v->fec->roots; - block = position >> v->data_dev_block_bits; - *offset = (unsigned)(position - (block << v->data_dev_block_bits)); + block = div64_u64_rem(position, v->fec->io_size, &rem); + *offset = (unsigned)rem; - res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf); + res = dm_bufio_read(v->fec->bufio, block, buf); if (IS_ERR(res)) { DMERR("%s: FEC %llu: parity read failed (block %llu): %ld", v->data_dev->name, (unsigned long long)rsb, - (unsigned long long)(v->fec->start + block), - PTR_ERR(res)); + (unsigned long long)block, PTR_ERR(res)); *buf = NULL; } @@ -155,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, /* read the next block when we run out of parity bytes */ offset += v->fec->roots; - if (offset >= 1 << v->data_dev_block_bits) { + if (offset >= v->fec->io_size) { dm_bufio_release(buf); par = fec_read_parity(v, rsb, block_offset, &offset, &buf); @@ -435,7 +434,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, fio->level++; if (type == DM_VERITY_BLOCK_TYPE_METADATA) - block += v->data_blocks; + block = block - v->hash_start + v->data_blocks; /* * For RS(M, N), the continuous FEC data is divided into blocks of N @@ -551,6 +550,7 @@ void verity_fec_dtr(struct dm_verity *v) mempool_exit(&f->rs_pool); mempool_exit(&f->prealloc_pool); mempool_exit(&f->extra_pool); + mempool_exit(&f->output_pool); kmem_cache_destroy(f->cache); if (f->data_bufio) @@ -673,7 +673,7 @@ int verity_fec_ctr(struct dm_verity *v) { struct dm_verity_fec *f = v->fec; struct dm_target *ti = v->ti; - u64 hash_blocks; + u64 hash_blocks, fec_blocks; int ret; if (!verity_fec_is_enabled(v)) { @@ -742,16 +742,23 @@ int verity_fec_ctr(struct dm_verity *v) return -E2BIG; } + if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1)) + f->io_size = 1 << v->data_dev_block_bits; + else + f->io_size = v->fec->roots << SECTOR_SHIFT; + f->bufio = dm_bufio_client_create(f->dev->bdev, - 1 << v->data_dev_block_bits, + f->io_size, 1, 0, NULL, NULL); if (IS_ERR(f->bufio)) { ti->error = "Cannot initialize FEC bufio client"; return PTR_ERR(f->bufio); } - if (dm_bufio_get_device_size(f->bufio) < - ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) { + dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT)); + + fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT); + if (dm_bufio_get_device_size(f->bufio) < fec_blocks) { ti->error = "FEC device is too small"; return -E2BIG; } diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 42fbd3a7fc9f..3c46c8d61883 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -36,6 +36,7 @@ struct dm_verity_fec { struct dm_dev *dev; /* parity data device */ struct dm_bufio_client *data_bufio; /* for data dev access */ struct dm_bufio_client *bufio; /* for parity data access */ + size_t io_size; /* IO size for roots */ sector_t start; /* parity data start in blocks */ sector_t blocks; /* number of blocks covered */ sector_t rounds; /* number of interleaving rounds */ diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 4fb33e7562c5..711f101447e3 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -33,7 +33,7 @@ #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once" -#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \ +#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \ DM_VERITY_ROOT_HASH_VERIFICATION_OPTS) static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE; @@ -533,6 +533,15 @@ static int verity_verify_io(struct dm_verity_io *io) return 0; } +/* + * Skip verity work in response to I/O error when system is shutting down. + */ +static inline bool verity_is_system_shutting_down(void) +{ + return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF + || system_state == SYSTEM_RESTART; +} + /* * End one "io" structure with a given error. */ @@ -560,7 +569,8 @@ static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; - if (bio->bi_status && !verity_fec_is_enabled(io->v)) { + if (bio->bi_status && + (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) { verity_finish_io(io, bio->bi_status); return; } diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 184dabce1bad..4c2971835d33 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -142,6 +142,7 @@ struct dm_writecache { size_t metadata_sectors; size_t n_blocks; uint64_t seq_count; + sector_t data_device_sectors; void *block_start; struct wc_entry *entries; unsigned block_size; @@ -224,6 +225,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) pfn_t pfn; int id; struct page **pages; + sector_t offset; wc->memory_vmapped = false; @@ -242,9 +244,16 @@ static int persistent_memory_claim(struct dm_writecache *wc) goto err1; } + offset = get_start_sect(wc->ssd_dev->bdev); + if (offset & (PAGE_SIZE / 512 - 1)) { + r = -EINVAL; + goto err1; + } + offset >>= PAGE_SHIFT - 9; + id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -266,7 +275,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; @@ -279,6 +288,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) while (daa-- && i < p) { pages[i++] = pfn_t_to_page(pfn); pfn.val++; + if (!(i & 15)) + cond_resched(); } } while (i < p); wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); @@ -306,7 +317,7 @@ err1: #else static int persistent_memory_claim(struct dm_writecache *wc) { - BUG(); + return -EOPNOTSUPP; } #endif @@ -805,6 +816,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ writecache_wait_for_ios(wc, WRITE); discarded_something = true; } + if (!writecache_entry_is_committed(wc, e)) + wc->uncommitted_blocks--; writecache_free_entry(wc, e); } @@ -872,11 +885,30 @@ static int writecache_alloc_entries(struct dm_writecache *wc) struct wc_entry *e = &wc->entries[b]; e->index = b; e->write_in_progress = false; + cond_resched(); } return 0; } +static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) +{ + struct dm_io_region region; + struct dm_io_request req; + + region.bdev = wc->ssd_dev->bdev; + region.sector = wc->start_sector; + region.count = n_sectors; + req.bi_op = REQ_OP_READ; + req.bi_op_flags = REQ_SYNC; + req.mem.type = DM_IO_VMA; + req.mem.ptr.vma = (char *)wc->memory_map; + req.client = wc->dm_io; + req.notify.fn = NULL; + + return dm_io(&req, 1, ®ion, NULL); +} + static void writecache_resume(struct dm_target *ti) { struct dm_writecache *wc = ti->private; @@ -887,8 +919,20 @@ static void writecache_resume(struct dm_target *ti) wc_lock(wc); - if (WC_MODE_PMEM(wc)) + wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT; + + if (WC_MODE_PMEM(wc)) { persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); + } else { + r = writecache_read_metadata(wc, wc->metadata_sectors); + if (r) { + size_t sb_entries_offset; + writecache_error(wc, r, "unable to read metadata: %d", r); + sb_entries_offset = offsetof(struct wc_memory_superblock, entries); + memset((char *)wc->memory_map + sb_entries_offset, -1, + (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); + } + } wc->tree = RB_ROOT; INIT_LIST_HEAD(&wc->lru); @@ -926,6 +970,7 @@ static void writecache_resume(struct dm_target *ti) e->original_sector = le64_to_cpu(wme.original_sector); e->seq_count = le64_to_cpu(wme.seq_count); } + cond_resched(); } #endif for (b = 0; b < wc->n_blocks; b++) { @@ -1446,6 +1491,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t void *address = memory_data(wc, e); persistent_memory_flush_cache(address, block_size); + + if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) + return true; + return bio_add_page(&wb->bio, persistent_memory_page(address), block_size, persistent_memory_page_offset(address)) != 0; } @@ -1517,6 +1566,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba if (writecache_has_error(wc)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); + } else if (unlikely(!bio_sectors(bio))) { + bio->bi_status = BLK_STS_OK; + bio_endio(bio); } else { submit_bio(bio); } @@ -1560,6 +1612,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac e = f; } + if (unlikely(to.sector + to.count > wc->data_device_sectors)) { + if (to.sector >= wc->data_device_sectors) { + writecache_copy_endio(0, 0, c); + continue; + } + from.count = to.count = wc->data_device_sectors - to.sector; + } + dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); __writeback_throttle(wc, wbl); @@ -1770,8 +1830,10 @@ static int init_memory(struct dm_writecache *wc) pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); - for (b = 0; b < wc->n_blocks; b++) + for (b = 0; b < wc->n_blocks; b++) { write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); + cond_resched(); + } writecache_flush_all_metadata(wc); writecache_commit_flushed(wc, false); @@ -1845,7 +1907,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) struct wc_memory_superblock s; static struct dm_arg _args[] = { - {0, 10, "Invalid number of feature args"}, + {0, 16, "Invalid number of feature args"}, }; as.argc = argc; @@ -1980,6 +2042,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Invalid block size"; goto bad; } + if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || + wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { + r = -EINVAL; + ti->error = "Block size is smaller than device logical block size"; + goto bad; + } wc->block_size_bits = __ffs(wc->block_size); wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; @@ -2062,14 +2130,18 @@ invalid_optional: } if (WC_MODE_PMEM(wc)) { + if (!dax_synchronous(wc->ssd_dev->dax_dev)) { + r = -EOPNOTSUPP; + ti->error = "Asynchronous persistent memory not supported as pmem cache"; + goto bad; + } + r = persistent_memory_claim(wc); if (r) { ti->error = "Unable to map persistent memory for cache"; goto bad; } } else { - struct dm_io_region region; - struct dm_io_request req; size_t n_blocks, n_metadata_blocks; uint64_t n_bitmap_bits; @@ -2126,19 +2198,9 @@ invalid_optional: goto bad; } - region.bdev = wc->ssd_dev->bdev; - region.sector = wc->start_sector; - region.count = wc->metadata_sectors; - req.bi_op = REQ_OP_READ; - req.bi_op_flags = REQ_SYNC; - req.mem.type = DM_IO_VMA; - req.mem.ptr.vma = (char *)wc->memory_map; - req.client = wc->dm_io; - req.notify.fn = NULL; - - r = dm_io(&req, 1, ®ion, NULL); + r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); if (r) { - ti->error = "Unable to read metadata"; + ti->error = "Unable to read first block of metadata"; goto bad; } } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 5205cf9bbfd9..e6b0039d07aa 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1107,7 +1107,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone, if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) { set_bit(DMZ_RND, &zone->flags); - zmd->nr_rnd_zones++; } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ || blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) { set_bit(DMZ_SEQ, &zone->flags); @@ -1590,7 +1589,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) return dzone; } - return ERR_PTR(-EBUSY); + return NULL; } /* @@ -1610,7 +1609,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) return zone; } - return ERR_PTR(-EBUSY); + return NULL; } /* diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index e7ace908a9b7..d50817320e8e 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -349,8 +349,8 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) /* Get a data zone */ dzone = dmz_get_zone_for_reclaim(zmd); - if (IS_ERR(dzone)) - return PTR_ERR(dzone); + if (!dzone) + return -EBUSY; start = jiffies; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 03267609b515..6e4f3ef2dd50 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -790,7 +790,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* Set target (no write same support) */ - ti->max_io_len = dev->zone_nr_sectors << 9; + ti->max_io_len = dev->zone_nr_sectors; ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_write_zeroes_bios = 1; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ed612dbc972d..aa2cb23dbd3c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/blkpg.h> #include <linux/bio.h> @@ -141,10 +142,21 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8 #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; +#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) +static int swap_bios = DEFAULT_SWAP_BIOS; +static int get_swap_bios(void) +{ + int latch = READ_ONCE(swap_bios); + if (unlikely(latch <= 0)) + latch = DEFAULT_SWAP_BIOS; + return latch; +} + /* * For mempools pre-allocation at the table loading time. */ @@ -454,8 +466,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, return -EAGAIN; map = dm_get_live_table(md, &srcu_idx); - if (!map) - return -EIO; + if (!map) { + ret = -EIO; + goto out; + } tgt = dm_table_find_target(map, sector); if (!tgt) { @@ -492,7 +506,6 @@ out: static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, struct block_device **bdev) - __acquires(md->io_barrier) { struct dm_target *tgt; struct dm_table *map; @@ -526,7 +539,6 @@ retry: } static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) - __releases(md->io_barrier) { dm_put_live_table(md, srcu_idx); } @@ -547,7 +559,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, * subset of the parent bdev; require extra privileges. */ if (!capable(CAP_SYS_RAWIO)) { - DMWARN_LIMIT( + DMDEBUG_LIMIT( "%s: sending ioctl %x to DM device without required privilege.", current->comm, cmd); r = -ENOIOCTLCMD; @@ -978,6 +990,11 @@ void disable_write_zeroes(struct mapped_device *md) limits->max_write_zeroes_sectors = 0; } +static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) +{ + return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); +} + static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; @@ -1015,6 +1032,11 @@ static void clone_endio(struct bio *bio) } } + if (unlikely(swap_bios_limit(tio->ti, bio))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } + free_tio(tio); dec_pending(io, error); } @@ -1118,15 +1140,16 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd { struct mapped_device *md = dax_get_private(dax_dev); struct dm_table *map; + bool ret = false; int srcu_idx; - bool ret; map = dm_get_live_table(md, &srcu_idx); if (!map) - return false; + goto out; - ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); + ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize); +out: dm_put_live_table(md, srcu_idx); return ret; @@ -1268,6 +1291,22 @@ void dm_remap_zone_report(struct dm_target *ti, sector_t start, } EXPORT_SYMBOL_GPL(dm_remap_zone_report); +static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) +{ + mutex_lock(&md->swap_bios_lock); + while (latch < md->swap_bios) { + cond_resched(); + down(&md->swap_bios_semaphore); + md->swap_bios--; + } + while (latch > md->swap_bios) { + cond_resched(); + up(&md->swap_bios_semaphore); + md->swap_bios++; + } + mutex_unlock(&md->swap_bios_lock); +} + static blk_qc_t __map_bio(struct dm_target_io *tio) { int r; @@ -1288,6 +1327,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) atomic_inc(&io->io_count); sector = clone->bi_iter.bi_sector; + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + int latch = get_swap_bios(); + if (unlikely(latch != md->swap_bios)) + __set_swap_bios_limit(md, latch); + down(&md->swap_bios_semaphore); + } + r = ti->type->map(ti, clone); switch (r) { case DM_MAPIO_SUBMITTED: @@ -1302,10 +1349,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) ret = generic_make_request(clone); break; case DM_MAPIO_KILL: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_IOERR); break; case DM_MAPIO_REQUEUE: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_DM_REQUEUE); break; @@ -1444,9 +1499,6 @@ static int __send_empty_flush(struct clone_info *ci) BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); - - bio_disassociate_blkg(ci->bio); - return 0; } @@ -1634,6 +1686,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { ci.bio = bio; @@ -1708,6 +1761,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; @@ -1726,23 +1780,6 @@ out: return ret; } -static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) -{ - unsigned len, sector_count; - - sector_count = bio_sectors(*bio); - len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); - - if (sector_count > len) { - struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); - - bio_chain(split, *bio); - trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); - generic_make_request(*bio); - *bio = split; - } -} - static blk_qc_t dm_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { @@ -1770,14 +1807,12 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, if (current->bio_list) { if (is_abnormal_io(bio)) blk_queue_split(md->queue, &bio); - else - dm_queue_split(md, ti, &bio); + /* regular IO is split by __split_and_process_bio */ } if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) return __process_bio(md, map, bio, ti); - else - return __split_and_process_bio(md, map, bio); + return __split_and_process_bio(md, map, bio); } static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) @@ -1927,6 +1962,7 @@ static void cleanup_mapped_device(struct mapped_device *md) mutex_destroy(&md->suspend_lock); mutex_destroy(&md->type_lock); mutex_destroy(&md->table_devices_lock); + mutex_destroy(&md->swap_bios_lock); dm_mq_cleanup_mapped_device(md); } @@ -1996,6 +2032,10 @@ static struct mapped_device *alloc_dev(int minor) init_waitqueue_head(&md->eventq); init_completion(&md->kobj_holder.completion); + md->swap_bios = get_swap_bios(); + sema_init(&md->swap_bios_semaphore, md->swap_bios); + mutex_init(&md->swap_bios_lock); + md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; @@ -2406,6 +2446,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ @@ -2728,7 +2769,9 @@ retry: if (r) goto out_unlock; + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); out_unlock: mutex_unlock(&md->suspend_lock); @@ -2825,7 +2868,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); } static void __dm_internal_resume(struct mapped_device *md) @@ -2902,17 +2947,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { + int r; + unsigned noio_flag; char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; + noio_flag = memalloc_noio_save(); + if (!cookie) - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, - action, envp); + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, + action, envp); } + + memalloc_noio_restore(noio_flag); + + return r; } uint32_t dm_next_uevent_seq(struct mapped_device *md) @@ -2978,6 +3031,11 @@ int dm_suspended_md(struct mapped_device *md) return test_bit(DMF_SUSPENDED, &md->flags); } +static int dm_post_suspending_md(struct mapped_device *md) +{ + return test_bit(DMF_POST_SUSPENDING, &md->flags); +} + int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); @@ -2994,6 +3052,12 @@ int dm_suspended(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_suspended); +int dm_post_suspending(struct dm_target *ti) +{ + return dm_post_suspending_md(dm_table_get_md(ti->table)); +} +EXPORT_SYMBOL_GPL(dm_post_suspending); + int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); @@ -3254,6 +3318,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); +module_param(swap_bios, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); + MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index d7c4f6606b5f..9fbf87e04019 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -74,7 +74,7 @@ void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn, int *blocksize); -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data); void dm_lock_md_type(struct mapped_device *md); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 3ad18246fcb3..d7eef5292ae2 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1372,7 +1372,7 @@ __acquires(bitmap->lock) if (bitmap->bp[page].hijacked || bitmap->bp[page].map == NULL) csize = ((sector_t)1) << (bitmap->chunkshift + - PAGE_COUNTER_SHIFT - 1); + PAGE_COUNTER_SHIFT); else csize = ((sector_t)1) << bitmap->chunkshift; *blocks = csize - (offset & (csize - 1)); @@ -1726,6 +1726,8 @@ void md_bitmap_flush(struct mddev *mddev) md_bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; md_bitmap_daemon_work(mddev); + if (mddev->bitmap_info.external) + md_super_wait(mddev); md_bitmap_update_sb(bitmap); } @@ -1954,6 +1956,7 @@ out: } EXPORT_SYMBOL_GPL(md_bitmap_load); +/* caller need to free returned bitmap with md_bitmap_free() */ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) { int rv = 0; @@ -2017,6 +2020,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, md_bitmap_unplug(mddev->bitmap); *low = lo; *high = hi; + md_bitmap_free(bitmap); return rv; } @@ -2620,4 +2624,3 @@ struct attribute_group md_bitmap_group = { .name = "bitmap", .attrs = md_bitmap_attrs, }; - diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 813a99ffa86f..794e1d589104 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -664,9 +664,27 @@ out: * Takes the lock on the TOKEN lock resource so no other * node can communicate while the operation is underway. */ -static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) +static int lock_token(struct md_cluster_info *cinfo) { - int error, set_bit = 0; + int error; + + error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); + if (error) { + pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", + __func__, __LINE__, error); + } else { + /* Lock the receive sequence */ + mutex_lock(&cinfo->recv_mutex); + } + return error; +} + +/* lock_comm() + * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel. + */ +static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked) +{ + int rv, set_bit = 0; struct mddev *mddev = cinfo->mddev; /* @@ -677,34 +695,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) */ if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state)) { - error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, + rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); - WARN_ON_ONCE(error); + WARN_ON_ONCE(rv); md_wakeup_thread(mddev->thread); set_bit = 1; } - error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); - if (set_bit) - clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); - if (error) - pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", - __func__, __LINE__, error); - - /* Lock the receive sequence */ - mutex_lock(&cinfo->recv_mutex); - return error; -} - -/* lock_comm() - * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel. - */ -static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked) -{ wait_event(cinfo->wait, !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state)); - - return lock_token(cinfo, mddev_locked); + rv = lock_token(cinfo); + if (set_bit) + clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); + return rv; } static void unlock_comm(struct md_cluster_info *cinfo) @@ -784,9 +787,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg, { int ret; - lock_comm(cinfo, mddev_locked); - ret = __sendmsg(cinfo, cmsg); - unlock_comm(cinfo); + ret = lock_comm(cinfo, mddev_locked); + if (!ret) { + ret = __sendmsg(cinfo, cmsg); + unlock_comm(cinfo); + } return ret; } @@ -1061,7 +1066,7 @@ static int metadata_update_start(struct mddev *mddev) return 0; } - ret = lock_token(cinfo, 1); + ret = lock_token(cinfo); clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); return ret; } @@ -1139,6 +1144,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz bitmap = get_bitmap_from_slot(mddev, i); if (IS_ERR(bitmap)) { pr_err("can't get bitmap from slot %d\n", i); + bitmap = NULL; goto out; } counts = &bitmap->counts; @@ -1165,6 +1171,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz * can't resize bitmap */ goto out; + md_bitmap_free(bitmap); } return 0; @@ -1253,7 +1260,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors) int raid_slot = -1; md_update_sb(mddev, 1); - lock_comm(cinfo, 1); + if (lock_comm(cinfo, 1)) { + pr_err("%s: lock_comm failed\n", __func__); + return; + } memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(METADATA_UPDATED); @@ -1405,7 +1415,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) cmsg.type = cpu_to_le32(NEWDISK); memcpy(cmsg.uuid, uuid, 16); cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); - lock_comm(cinfo, 1); + if (lock_comm(cinfo, 1)) + return -EAGAIN; ret = __sendmsg(cinfo, &cmsg); if (ret) { unlock_comm(cinfo); @@ -1518,6 +1529,7 @@ static void unlock_all_bitmaps(struct mddev *mddev) } } kfree(cinfo->other_bitmap_lockres); + cinfo->other_bitmap_lockres = NULL; } } diff --git a/drivers/md/md.c b/drivers/md/md.c index dc32437ebb56..a95af9aadd2a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -378,6 +378,13 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) struct blkcg *blkcg = bio_blkcg(bio); unsigned int sectors, cpu; + if (mddev == NULL || mddev->pers == NULL) { + bio_io_error(bio); + return BLK_QC_T_NONE; + } + + blk_queue_split(q, &bio); + if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { bio_io_error(bio); return BLK_QC_T_NONE; @@ -385,10 +392,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(q, &bio); - if (mddev == NULL || mddev->pers == NULL) { - bio_io_error(bio); - return BLK_QC_T_NONE; - } if (mddev->ro == 1 && unlikely(rw == WRITE)) { if (bio_sectors(bio) != 0) bio->bi_status = BLK_STS_IOERR; @@ -542,8 +545,10 @@ static void md_submit_flush_data(struct work_struct *ws) * could wait for this and below md_handle_request could wait for those * bios because of suspend check */ + spin_lock_irq(&mddev->lock); mddev->last_flush = mddev->start_flush; mddev->flush_bio = NULL; + spin_unlock_irq(&mddev->lock); wake_up(&mddev->sb_wait); if (bio->bi_iter.bi_size == 0) { @@ -649,7 +654,34 @@ void mddev_init(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_init); +static struct mddev *mddev_find_locked(dev_t unit) +{ + struct mddev *mddev; + + list_for_each_entry(mddev, &all_mddevs, all_mddevs) + if (mddev->unit == unit) + return mddev; + + return NULL; +} + static struct mddev *mddev_find(dev_t unit) +{ + struct mddev *mddev; + + if (MAJOR(unit) != MD_MAJOR) + unit &= ~((1 << MdpMinorShift) - 1); + + spin_lock(&all_mddevs_lock); + mddev = mddev_find_locked(unit); + if (mddev) + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + + return mddev; +} + +static struct mddev *mddev_find_or_alloc(dev_t unit) { struct mddev *mddev, *new = NULL; @@ -660,13 +692,13 @@ static struct mddev *mddev_find(dev_t unit) spin_lock(&all_mddevs_lock); if (unit) { - list_for_each_entry(mddev, &all_mddevs, all_mddevs) - if (mddev->unit == unit) { - mddev_get(mddev); - spin_unlock(&all_mddevs_lock); - kfree(new); - return mddev; - } + mddev = mddev_find_locked(unit); + if (mddev) { + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + kfree(new); + return mddev; + } if (new) { list_add(&new->all_mddevs, &all_mddevs); @@ -692,12 +724,7 @@ static struct mddev *mddev_find(dev_t unit) return NULL; } - is_free = 1; - list_for_each_entry(mddev, &all_mddevs, all_mddevs) - if (mddev->unit == dev) { - is_free = 0; - break; - } + is_free = !mddev_find_locked(dev); } new->unit = dev; new->md_minor = MINOR(dev); @@ -5438,7 +5465,7 @@ static int md_alloc(dev_t dev, char *name) * writing to /sys/module/md_mod/parameters/new_array. */ static DEFINE_MUTEX(disks_mutex); - struct mddev *mddev = mddev_find(dev); + struct mddev *mddev = mddev_find_or_alloc(dev); struct gendisk *disk; int partitioned; int shift; @@ -6045,7 +6072,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); static void mddev_detach(struct mddev *mddev) { md_bitmap_wait_behind_writes(mddev); - if (mddev->pers && mddev->pers->quiesce) { + if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); } @@ -6314,11 +6341,9 @@ static void autorun_devices(int part) md_probe(dev, NULL, NULL); mddev = mddev_find(dev); - if (!mddev || !mddev->gendisk) { - if (mddev) - mddev_put(mddev); + if (!mddev) break; - } + if (mddev_lock(mddev)) pr_warn("md: %s locked, cannot run\n", mdname(mddev)); else if (mddev->raid_disks || mddev->major_version @@ -6725,8 +6750,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) goto busy; kick_rdev: - if (mddev_is_clustered(mddev)) - md_cluster_ops->remove_disk(mddev, rdev); + if (mddev_is_clustered(mddev)) { + if (md_cluster_ops->remove_disk(mddev, rdev)) + goto busy; + } md_kick_rdev_from_array(rdev); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); @@ -7056,6 +7083,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks) return -EINVAL; if (mddev->sync_thread || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || mddev->reshape_position != MaxSector) return -EBUSY; @@ -7375,8 +7403,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, err = -EBUSY; goto out; } - WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); - set_bit(MD_CLOSING, &mddev->flags); + if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { + mutex_unlock(&mddev->open_mutex); + err = -EBUSY; + goto out; + } did_set_md_closing = true; mutex_unlock(&mddev->open_mutex); sync_blockdev(bdev); @@ -7612,9 +7643,9 @@ static int md_open(struct block_device *bdev, fmode_t mode) */ mddev_put(mddev); /* Wait until bdev->bd_disk is definitely gone */ - flush_workqueue(md_misc_wq); - /* Then retry the open from the top */ - return -ERESTARTSYS; + if (work_pending(&mddev->del_work)) + flush_workqueue(md_misc_wq); + return -EBUSY; } BUG_ON(mddev != bdev->bd_disk->private_data); @@ -7947,7 +7978,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos) loff_t l = *pos; struct mddev *mddev; - if (l >= 0x10000) + if (l == 0x10000) { + ++*pos; + return (void *)2; + } + if (l > 0x10000) return NULL; if (!l--) /* header */ @@ -9044,11 +9079,11 @@ void md_check_recovery(struct mddev *mddev) } if (mddev_is_clustered(mddev)) { - struct md_rdev *rdev; + struct md_rdev *rdev, *tmp; /* kick the device if another node issued a * remove disk. */ - rdev_for_each(rdev, mddev) { + rdev_for_each_safe(rdev, tmp, mddev) { if (test_and_clear_bit(ClusterRemove, &rdev->flags) && rdev->raid_disk < 0) md_kick_rdev_from_array(rdev); @@ -9361,7 +9396,7 @@ err_wq: static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) { struct mdp_superblock_1 *sb = page_address(rdev->sb_page); - struct md_rdev *rdev2; + struct md_rdev *rdev2, *tmp; int role, ret; char b[BDEVNAME_SIZE]; @@ -9378,7 +9413,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) } /* Check for change of roles in the active devices */ - rdev_for_each(rdev2, mddev) { + rdev_for_each_safe(rdev2, tmp, mddev) { if (test_bit(Faulty, &rdev2->flags)) continue; @@ -9423,8 +9458,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) } } - if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) - update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); + if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { + ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); + if (ret) + pr_warn("md: updating array disks failed. %d\n", ret); + } /* * Since mddev->delta_disks has already updated in update_raid_disks, diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 749ec268d957..54c089a50b15 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); @@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); @@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock); int dm_bm_flush(struct dm_block_manager *bm) { - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; return dm_bufio_write_dirty_buffers(bm->bufio); @@ -616,19 +616,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) bool dm_bm_is_read_only(struct dm_block_manager *bm) { - return bm->read_only; + return (bm ? bm->read_only : true); } EXPORT_SYMBOL_GPL(dm_bm_is_read_only); void dm_bm_set_read_only(struct dm_block_manager *bm) { - bm->read_only = true; + if (bm) + bm->read_only = true; } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); void dm_bm_set_read_write(struct dm_block_manager *bm) { - bm->read_only = false; + if (bm) + bm->read_only = false; } EXPORT_SYMBOL_GPL(dm_bm_set_read_write); diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index a240990a7f33..5673f8eb5f88 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -34,12 +34,12 @@ struct node_header { __le32 max_entries; __le32 value_size; __le32 padding; -} __packed; +} __attribute__((packed, aligned(8))); struct btree_node { struct node_header header; __le64 keys[0]; -} __packed; +} __attribute__((packed, aligned(8))); /* diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index d8b4125e338c..a213bf11738f 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, */ begin = do_div(index_begin, ll->entries_per_block); end = do_div(end, ll->entries_per_block); + if (end == 0) + end = ll->entries_per_block; for (i = index_begin; i < index_end; i++, begin = 0) { struct dm_block *blk; diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h index 8de63ce39bdd..87e17909ef52 100644 --- a/drivers/md/persistent-data/dm-space-map-common.h +++ b/drivers/md/persistent-data/dm-space-map-common.h @@ -33,7 +33,7 @@ struct disk_index_entry { __le64 blocknr; __le32 nr_free; __le32 none_free_before; -} __packed; +} __attribute__ ((packed, aligned(8))); #define MAX_METADATA_BITMAPS 255 @@ -43,7 +43,7 @@ struct disk_metadata_index { __le64 blocknr; struct disk_index_entry index[MAX_METADATA_BITMAPS]; -} __packed; +} __attribute__ ((packed, aligned(8))); struct ll_disk; @@ -86,7 +86,7 @@ struct disk_sm_root { __le64 nr_allocated; __le64 bitmap_root; __le64 ref_count_root; -} __packed; +} __attribute__ ((packed, aligned(8))); #define ENTRIES_PER_BYTE 4 @@ -94,7 +94,7 @@ struct disk_bitmap_header { __le32 csum; __le32 not_used; __le64 blocknr; -} __packed; +} __attribute__ ((packed, aligned(8))); enum allocation_event { SM_NONE, diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c7137f50bd1d..a715cb88052c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -458,6 +458,8 @@ static void raid1_end_write_request(struct bio *bio) if (!test_bit(Faulty, &rdev->flags)) set_bit(R1BIO_WriteError, &r1_bio->state); else { + /* Fail the request */ + set_bit(R1BIO_Degraded, &r1_bio->state); /* Finished with this branch */ r1_bio->bios[mirror] = NULL; to_put = bio; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ec136e44aef7..a195a85cc366 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1145,7 +1145,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, struct md_rdev *err_rdev = NULL; gfp_t gfp = GFP_NOIO; - if (r10_bio->devs[slot].rdev) { + if (slot >= 0 && r10_bio->devs[slot].rdev) { /* * This is an error retry, but we cannot * safely dereference the rdev in the r10_bio, @@ -1510,6 +1510,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) r10_bio->mddev = mddev; r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->state = 0; + r10_bio->read_slot = -1; memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); if (bio_data_dir(bio) == READ) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 36cd7c2fbf40..08a7f97750f7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num) * of the P and Q blocks. */ static int scribble_alloc(struct raid5_percpu *percpu, - int num, int cnt, gfp_t flags) + int num, int cnt) { size_t obj_size = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); void *scribble; - scribble = kvmalloc_array(cnt, obj_size, flags); + /* + * If here is in raid array suspend context, it is in memalloc noio + * context as well, there is no potential recursive memory reclaim + * I/Os with the GFP_KERNEL flag. + */ + scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL); if (!scribble) return -ENOMEM; @@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) percpu = per_cpu_ptr(conf->percpu, cpu); err = scribble_alloc(percpu, new_disks, - new_sectors / STRIPE_SECTORS, - GFP_NOIO); + new_sectors / STRIPE_SECTORS); if (err) break; } @@ -2403,8 +2407,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) } else err = -ENOMEM; - mutex_unlock(&conf->cache_size_mutex); - conf->slab_cache = sc; conf->active_name = 1-conf->active_name; @@ -2427,6 +2429,8 @@ static int resize_stripes(struct r5conf *conf, int newsize) if (!err) conf->pool_size = newsize; + mutex_unlock(&conf->cache_size_mutex); + return err; } @@ -3600,6 +3604,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, * is missing/faulty, then we need to read everything we can. */ if (sh->raid_conf->level != 6 && + sh->raid_conf->rmw_level != PARITY_DISABLE_RMW && sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; @@ -4835,7 +4840,7 @@ static void handle_stripe(struct stripe_head *sh) * or to load a block that is being partially written. */ if (s.to_read || s.non_overwrite - || (conf->level == 6 && s.to_write && s.failed) + || (s.to_write && s.failed) || (s.syncing && (s.uptodate + s.compute < disks)) || s.replacing || s.expanding) @@ -6765,8 +6770,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu conf->previous_raid_disks), max(conf->chunk_sectors, conf->prev_chunk_sectors) - / STRIPE_SECTORS, - GFP_KERNEL)) { + / STRIPE_SECTORS)) { free_scratch_buffer(conf, percpu); return -ENOMEM; } diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index b14c09cd9593..06383b26712b 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -1732,6 +1732,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap, unsigned j; log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID; + if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { + dprintk(1, "unknown logical address type\n"); + return -EINVAL; + } if (type_mask & (1 << log_addrs->log_addr_type[i])) { dprintk(1, "duplicate logical address type\n"); return -EINVAL; @@ -1752,10 +1756,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap, dprintk(1, "invalid primary device type\n"); return -EINVAL; } - if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { - dprintk(1, "unknown logical address type\n"); - return -EINVAL; - } for (j = 0; j < feature_sz; j++) { if ((features[j] & 0x80) == 0) { if (op_is_dev_features) diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 12d676484472..ed75636a6fb3 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap, struct cec_log_addrs log_addrs; mutex_lock(&adap->lock); - log_addrs = adap->log_addrs; + /* + * We use memcpy here instead of assignment since there is a + * hole at the end of struct cec_log_addrs that an assignment + * might ignore. So when we do copy_to_user() we could leak + * one byte of memory. + */ + memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); if (!adap->is_configured) memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, sizeof(log_addrs.log_addr)); diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c index 88f90dfd368b..ae17407e477a 100644 --- a/drivers/media/common/siano/smsdvb-main.c +++ b/drivers/media/common/siano/smsdvb-main.c @@ -1169,12 +1169,15 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev, rc = dvb_create_media_graph(&client->adapter, true); if (rc < 0) { pr_err("dvb_create_media_graph failed %d\n", rc); - goto client_error; + goto media_graph_error; } pr_info("DVB interface registered.\n"); return 0; +media_graph_error: + smsdvb_debugfs_release(client); + client_error: dvb_unregister_frontend(&client->frontend); diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 917fe034af37..197cf17b246f 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev) if (dvbdev->adapter->conn) { media_device_unregister_entity(dvbdev->adapter->conn); + kfree(dvbdev->adapter->conn); dvbdev->adapter->conn = NULL; kfree(dvbdev->adapter->conn_pads); dvbdev->adapter->conn_pads = NULL; @@ -707,9 +708,10 @@ int dvb_create_media_graph(struct dvb_adapter *adap, } if (ntuner && ndemod) { - pad_source = media_get_pad_index(tuner, true, + /* NOTE: first found tuner source pad presumed correct */ + pad_source = media_get_pad_index(tuner, false, PAD_SIGNAL_ANALOG); - if (pad_source) + if (pad_source < 0) return -EINVAL; ret = media_create_pad_links(mdev, MEDIA_ENT_F_TUNER, diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 1953b00b3e48..685c0ac71819 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -470,10 +470,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) goto error; if (dev->delivery_system == SYS_DVBS) { - dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; - dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; + u32 bit_error = buf[0] << 24 | buf[1] << 16 | + buf[2] << 8 | buf[3] << 0; + + dev->dvbv3_ber = bit_error; + dev->post_bit_error += bit_error; c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_error.stat[0].uvalue = dev->post_bit_error; dev->block_error += buf[4] << 8 | buf[5] << 0; diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c index 97144734eb05..8a8585261bb8 100644 --- a/drivers/media/firewire/firedtv-fw.c +++ b/drivers/media/firewire/firedtv-fw.c @@ -272,6 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) name_len = fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)); + if (name_len < 0) { + err = name_len; + goto fail_free; + } for (i = ARRAY_SIZE(model_names); --i; ) if (strlen(model_names[i]) <= name_len && strncmp(name, model_names[i], name_len) == 0) diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c index 62763ec4cd07..809fa44ed988 100644 --- a/drivers/media/i2c/adv7511-v4l2.c +++ b/drivers/media/i2c/adv7511-v4l2.c @@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client) adv7511_set_isr(sd, false); adv7511_init_setup(sd); - cancel_delayed_work(&state->edid_handler); + cancel_delayed_work_sync(&state->edid_handler); i2c_unregister_device(state->i2c_edid); i2c_unregister_device(state->i2c_cec); i2c_unregister_device(state->i2c_pktmem); diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 2dedd6ebb236..b887299ac195 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -3606,7 +3606,7 @@ static int adv76xx_remove(struct i2c_client *client) io_write(sd, 0x6e, 0); io_write(sd, 0x73, 0); - cancel_delayed_work(&state->delayed_work_enable_hotplug); + cancel_delayed_work_sync(&state->delayed_work_enable_hotplug); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); adv76xx_unregister_clients(to_state(sd)); diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 885619841719..02cbab826d0b 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client) struct adv7842_state *state = to_state(sd); adv7842_irq_enable(sd, false); - cancel_delayed_work(&state->delayed_work_enable_hotplug); + cancel_delayed_work_sync(&state->delayed_work_enable_hotplug); v4l2_device_unregister_subdev(sd); media_entity_cleanup(&sd->entity); adv7842_unregister_clients(sd); diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c index 159a3a604f0e..24659cb0d083 100644 --- a/drivers/media/i2c/imx214.c +++ b/drivers/media/i2c/imx214.c @@ -785,7 +785,7 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable) if (ret < 0) goto err_rpm_put; } else { - ret = imx214_start_streaming(imx214); + ret = imx214_stop_streaming(imx214); if (ret < 0) goto err_rpm_put; pm_runtime_put(imx214->dev); diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c index 6011cec5e351..e6aa9f32b6a8 100644 --- a/drivers/media/i2c/imx274.c +++ b/drivers/media/i2c/imx274.c @@ -1235,6 +1235,8 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd, ret = imx274_set_frame_interval(imx274, fi->interval); if (!ret) { + fi->interval = imx274->frame_interval; + /* * exposure time range is decided by frame interval * need to update it after frame interval changes @@ -1730,9 +1732,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv, __func__, frame_interval.numerator, frame_interval.denominator); - if (frame_interval.numerator == 0) { - err = -EINVAL; - goto fail; + if (frame_interval.numerator == 0 || frame_interval.denominator == 0) { + frame_interval.denominator = IMX274_DEF_FRAME_RATE; + frame_interval.numerator = 1; } req_frame_rate = (u32)(frame_interval.denominator diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index de295114ca48..21666d705e37 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable) ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); if (ret) { - info->set_power(&client->dev, 0); + if (info->set_power) + info->set_power(&client->dev, 0); return ret; } diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c index 19a3ceea3bc2..a62d7e2ac356 100644 --- a/drivers/media/i2c/max2175.c +++ b/drivers/media/i2c/max2175.c @@ -503,7 +503,7 @@ static void max2175_set_bbfilter(struct max2175 *ctx) } } -static bool max2175_set_csm_mode(struct max2175 *ctx, +static int max2175_set_csm_mode(struct max2175 *ctx, enum max2175_csm_mode new_mode) { int ret = max2175_poll_csm_ready(ctx); diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index a398ea81e422..be6c882dd1d5 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -34,6 +34,8 @@ #define OV5640_REG_SYS_RESET02 0x3002 #define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006 #define OV5640_REG_SYS_CTRL0 0x3008 +#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42 +#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02 #define OV5640_REG_CHIP_ID 0x300a #define OV5640_REG_IO_MIPI_CTRL00 0x300e #define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017 @@ -272,8 +274,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl) /* YUV422 UYVY VGA@30fps */ static const struct reg_value ov5640_init_setting_30fps_VGA[] = { {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0}, - {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0}, - {0x3630, 0x36, 0, 0}, + {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0}, {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0}, {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0}, {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0}, @@ -740,7 +741,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg, * +->| PLL Root Div | - reg 0x3037, bit 4 * +-+------------+ * | +---------+ - * +->| Bit Div | - reg 0x3035, bits 0-3 + * +->| Bit Div | - reg 0x3034, bits 0-3 * +-+-------+ * | +-------------+ * +->| SCLK Div | - reg 0x3108, bits 0-1 @@ -1109,6 +1110,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor, val = regs->val; mask = regs->mask; + /* remain in power down mode for DVP */ + if (regs->reg_addr == OV5640_REG_SYS_CTRL0 && + val == OV5640_REG_SYS_CTRL0_SW_PWUP && + sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY) + continue; + if (mask) ret = ov5640_mod_reg(sensor, reg_addr, mask, val); else @@ -1264,31 +1271,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on) if (ret) return ret; - /* - * enable VSYNC/HREF/PCLK DVP control lines - * & D[9:6] DVP data lines - * - * PAD OUTPUT ENABLE 01 - * - 6: VSYNC output enable - * - 5: HREF output enable - * - 4: PCLK output enable - * - [3:0]: D[9:6] output enable - */ - ret = ov5640_write_reg(sensor, - OV5640_REG_PAD_OUTPUT_ENABLE01, - on ? 0x7f : 0); - if (ret) - return ret; - - /* - * enable D[5:0] DVP data lines - * - * PAD OUTPUT ENABLE 02 - * - [7:2]: D[5:0] output enable - */ - return ov5640_write_reg(sensor, - OV5640_REG_PAD_OUTPUT_ENABLE02, - on ? 0xfc : 0); + return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ? + OV5640_REG_SYS_CTRL0_SW_PWUP : + OV5640_REG_SYS_CTRL0_SW_PWDN); } static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on) @@ -1987,6 +1972,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor) clk_disable_unprepare(sensor->xclk); } +static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on) +{ + int ret; + + if (!on) { + /* Reset MIPI bus settings to their default values. */ + ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58); + ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04); + ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00); + return 0; + } + + /* + * Power up MIPI HS Tx and LS Rx; 2 data lanes mode + * + * 0x300e = 0x40 + * [7:5] = 010 : 2 data lanes mode (see FIXME note in + * "ov5640_set_stream_mipi()") + * [4] = 0 : Power up MIPI HS Tx + * [3] = 0 : Power up MIPI LS Rx + * [2] = 0 : MIPI interface disabled + */ + ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40); + if (ret) + return ret; + + /* + * Gate clock and set LP11 in 'no packets mode' (idle) + * + * 0x4800 = 0x24 + * [5] = 1 : Gate clock when 'no packets' + * [2] = 1 : MIPI bus in LP11 when 'no packets' + */ + ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24); + if (ret) + return ret; + + /* + * Set data lanes and clock in LP11 when 'sleeping' + * + * 0x3019 = 0x70 + * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping' + * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping' + * [4] = 1 : MIPI clock lane in LP11 when 'sleeping' + */ + ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70); + if (ret) + return ret; + + /* Give lanes some time to coax into LP11 state. */ + usleep_range(500, 1000); + + return 0; +} + +static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on) +{ + int ret; + + if (!on) { + /* Reset settings to their default values. */ + ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00); + ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00); + return 0; + } + + /* + * enable VSYNC/HREF/PCLK DVP control lines + * & D[9:6] DVP data lines + * + * PAD OUTPUT ENABLE 01 + * - 6: VSYNC output enable + * - 5: HREF output enable + * - 4: PCLK output enable + * - [3:0]: D[9:6] output enable + */ + ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f); + if (ret) + return ret; + + /* + * enable D[5:0] DVP data lines + * + * PAD OUTPUT ENABLE 02 + * - [7:2]: D[5:0] output enable + */ + return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc); +} + static int ov5640_set_power(struct ov5640_dev *sensor, bool on) { int ret = 0; @@ -1999,68 +2073,18 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on) ret = ov5640_restore_mode(sensor); if (ret) goto power_off; - - /* We're done here for DVP bus, while CSI-2 needs setup. */ - if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY) - return 0; - - /* - * Power up MIPI HS Tx and LS Rx; 2 data lanes mode - * - * 0x300e = 0x40 - * [7:5] = 010 : 2 data lanes mode (see FIXME note in - * "ov5640_set_stream_mipi()") - * [4] = 0 : Power up MIPI HS Tx - * [3] = 0 : Power up MIPI LS Rx - * [2] = 0 : MIPI interface disabled - */ - ret = ov5640_write_reg(sensor, - OV5640_REG_IO_MIPI_CTRL00, 0x40); - if (ret) - goto power_off; - - /* - * Gate clock and set LP11 in 'no packets mode' (idle) - * - * 0x4800 = 0x24 - * [5] = 1 : Gate clock when 'no packets' - * [2] = 1 : MIPI bus in LP11 when 'no packets' - */ - ret = ov5640_write_reg(sensor, - OV5640_REG_MIPI_CTRL00, 0x24); - if (ret) - goto power_off; - - /* - * Set data lanes and clock in LP11 when 'sleeping' - * - * 0x3019 = 0x70 - * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping' - * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping' - * [4] = 1 : MIPI clock lane in LP11 when 'sleeping' - */ - ret = ov5640_write_reg(sensor, - OV5640_REG_PAD_OUTPUT00, 0x70); - if (ret) - goto power_off; - - /* Give lanes some time to coax into LP11 state. */ - usleep_range(500, 1000); - - } else { - if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) { - /* Reset MIPI bus settings to their default values. */ - ov5640_write_reg(sensor, - OV5640_REG_IO_MIPI_CTRL00, 0x58); - ov5640_write_reg(sensor, - OV5640_REG_MIPI_CTRL00, 0x04); - ov5640_write_reg(sensor, - OV5640_REG_PAD_OUTPUT00, 0x00); - } - - ov5640_set_power_off(sensor); } + if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) + ret = ov5640_set_power_mipi(sensor, on); + else + ret = ov5640_set_power_dvp(sensor, on); + if (ret) + goto power_off; + + if (!on) + ov5640_set_power_off(sensor); + return 0; power_off: @@ -3068,8 +3092,8 @@ static int ov5640_probe(struct i2c_client *client) free_ctrls: v4l2_ctrl_handler_free(&sensor->ctrls.handler); entity_cleanup: - mutex_destroy(&sensor->lock); media_entity_cleanup(&sensor->sd.entity); + mutex_destroy(&sensor->lock); return ret; } @@ -3079,9 +3103,9 @@ static int ov5640_remove(struct i2c_client *client) struct ov5640_dev *sensor = to_ov5640_dev(sd); v4l2_async_unregister_subdev(&sensor->sd); - mutex_destroy(&sensor->lock); media_entity_cleanup(&sensor->sd.entity); v4l2_ctrl_handler_free(&sensor->ctrls.handler); + mutex_destroy(&sensor->lock); return 0; } diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c index 041fcbb4eebd..79e608dba4b6 100644 --- a/drivers/media/i2c/ov5670.c +++ b/drivers/media/i2c/ov5670.c @@ -2081,7 +2081,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670) /* By default, V4L2_CID_PIXEL_RATE is read only */ ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, - V4L2_CID_PIXEL_RATE, 0, + V4L2_CID_PIXEL_RATE, + link_freq_configs[0].pixel_rate, link_freq_configs[0].pixel_rate, 1, link_freq_configs[0].pixel_rate); diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c index 34b7046d9702..1adcd1ed1664 100644 --- a/drivers/media/i2c/ov5695.c +++ b/drivers/media/i2c/ov5695.c @@ -971,16 +971,9 @@ unlock_and_return: return ret; } -/* Calculate the delay in us by clock rate and clock cycles */ -static inline u32 ov5695_cal_delay(u32 cycles) -{ - return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000); -} - static int __ov5695_power_on(struct ov5695 *ov5695) { - int ret; - u32 delay_us; + int i, ret; struct device *dev = &ov5695->client->dev; ret = clk_prepare_enable(ov5695->xvclk); @@ -991,21 +984,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695) gpiod_set_value_cansleep(ov5695->reset_gpio, 1); - ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators\n"); - goto disable_clk; + /* + * The hardware requires the regulators to be powered on in order, + * so enable them one by one. + */ + for (i = 0; i < OV5695_NUM_SUPPLIES; i++) { + ret = regulator_enable(ov5695->supplies[i].consumer); + if (ret) { + dev_err(dev, "Failed to enable %s: %d\n", + ov5695->supplies[i].supply, ret); + goto disable_reg_clk; + } } gpiod_set_value_cansleep(ov5695->reset_gpio, 0); - /* 8192 cycles prior to first SCCB transaction */ - delay_us = ov5695_cal_delay(8192); - usleep_range(delay_us, delay_us * 2); + usleep_range(1000, 1200); return 0; -disable_clk: +disable_reg_clk: + for (--i; i >= 0; i--) + regulator_disable(ov5695->supplies[i].consumer); clk_disable_unprepare(ov5695->xvclk); return ret; @@ -1013,9 +1013,22 @@ disable_clk: static void __ov5695_power_off(struct ov5695 *ov5695) { + struct device *dev = &ov5695->client->dev; + int i, ret; + clk_disable_unprepare(ov5695->xvclk); gpiod_set_value_cansleep(ov5695->reset_gpio, 1); - regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies); + + /* + * The hardware requires the regulators to be powered off in order, + * so disable them one by one. + */ + for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) { + ret = regulator_disable(ov5695->supplies[i].consumer); + if (ret) + dev_err(dev, "Failed to disable %s: %d\n", + ov5695->supplies[i].supply, ret); + } } static int __maybe_unused ov5695_runtime_resume(struct device *dev) @@ -1285,7 +1298,7 @@ static int ov5695_probe(struct i2c_client *client, if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ) dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n"); - ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ov5695->reset_gpio)) { dev_err(dev, "Failed to get reset-gpios\n"); return -EINVAL; diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 42805dfbffeb..06edbe8749c6 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -2327,11 +2327,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr, if (rval < 0) { if (rval != -EBUSY && rval != -EAGAIN) pm_runtime_set_active(&client->dev); - pm_runtime_put(&client->dev); + pm_runtime_put_noidle(&client->dev); return -ENODEV; } if (smiapp_read_nvm(sensor, sensor->nvm)) { + pm_runtime_put(&client->dev); dev_err(&client->dev, "nvm read failed\n"); return -ENODEV; } diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index dbbab75f135e..114c084c4aec 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = { .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable, }; -static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, - bool *handled) +static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus, + bool *handled) { struct tc358743_state *state = to_state(sd); unsigned int cec_rxint, cec_txint; @@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, cec_transmit_attempt_done(state->cec_adap, CEC_TX_STATUS_ERROR); } - *handled = true; + if (handled) + *handled = true; } if ((intstatus & MASK_CEC_RINT) && (cec_rxint & MASK_CECRIEND)) { @@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, msg.msg[i] = v & 0xff; } cec_received_msg(state->cec_adap, &msg); - *handled = true; + if (handled) + *handled = true; } i2c_wr16(sd, INTSTATUS, intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); @@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled) #ifdef CONFIG_VIDEO_TC358743_CEC if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) { - tc358743_cec_isr(sd, intstatus, handled); + tc358743_cec_handler(sd, intstatus, handled); i2c_wr16(sd, INTSTATUS, intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT); @@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled) static irqreturn_t tc358743_irq_handler(int irq, void *dev_id) { struct tc358743_state *state = dev_id; - bool handled; + bool handled = false; tc358743_isr(&state->sd, 0, &handled); @@ -2190,7 +2192,7 @@ static int tc358743_remove(struct i2c_client *client) del_timer_sync(&state->timer); flush_work(&state->work_i2c_poll); } - cancel_delayed_work(&state->delayed_work_enable_hotplug); + cancel_delayed_work_sync(&state->delayed_work_enable_hotplug); cec_unregister_adapter(state->cec_adap); v4l2_async_unregister_subdev(sd); v4l2_device_unregister_subdev(sd); diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c index 5e68182001ec..e43d8327b810 100644 --- a/drivers/media/i2c/tda1997x.c +++ b/drivers/media/i2c/tda1997x.c @@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client) media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(&state->hdl); regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies); - cancel_delayed_work(&state->delayed_work_enable_hpd); + cancel_delayed_work_sync(&state->delayed_work_enable_hpd); mutex_destroy(&state->page_lock); mutex_destroy(&state->lock); diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c index 078141712c88..0b977e73ceb2 100644 --- a/drivers/media/i2c/video-i2c.c +++ b/drivers/media/i2c/video-i2c.c @@ -255,7 +255,7 @@ static int amg88xx_set_power(struct video_i2c_data *data, bool on) return amg88xx_set_power_off(data); } -#if IS_ENABLED(CONFIG_HWMON) +#if IS_REACHABLE(CONFIG_HWMON) static const u32 amg88xx_temp_config[] = { HWMON_T_INPUT, diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c index e19df5165e78..da8088351135 100644 --- a/drivers/media/mc/mc-device.c +++ b/drivers/media/mc/mc-device.c @@ -575,6 +575,38 @@ static void media_device_release(struct media_devnode *devnode) dev_dbg(devnode->parent, "Media device released\n"); } +static void __media_device_unregister_entity(struct media_entity *entity) +{ + struct media_device *mdev = entity->graph_obj.mdev; + struct media_link *link, *tmp; + struct media_interface *intf; + unsigned int i; + + ida_free(&mdev->entity_internal_idx, entity->internal_idx); + + /* Remove all interface links pointing to this entity */ + list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { + list_for_each_entry_safe(link, tmp, &intf->links, list) { + if (link->entity == entity) + __media_remove_intf_link(link); + } + } + + /* Remove all data links that belong to this entity */ + __media_entity_remove_links(entity); + + /* Remove all pads that belong to this entity */ + for (i = 0; i < entity->num_pads; i++) + media_gobj_destroy(&entity->pads[i].graph_obj); + + /* Remove the entity */ + media_gobj_destroy(&entity->graph_obj); + + /* invoke entity_notify callbacks to handle entity removal?? */ + + entity->graph_obj.mdev = NULL; +} + /** * media_device_register_entity - Register an entity with a media device * @mdev: The media device @@ -632,6 +664,7 @@ int __must_check media_device_register_entity(struct media_device *mdev, */ ret = media_graph_walk_init(&new, mdev); if (ret) { + __media_device_unregister_entity(entity); mutex_unlock(&mdev->graph_mutex); return ret; } @@ -644,38 +677,6 @@ int __must_check media_device_register_entity(struct media_device *mdev, } EXPORT_SYMBOL_GPL(media_device_register_entity); -static void __media_device_unregister_entity(struct media_entity *entity) -{ - struct media_device *mdev = entity->graph_obj.mdev; - struct media_link *link, *tmp; - struct media_interface *intf; - unsigned int i; - - ida_free(&mdev->entity_internal_idx, entity->internal_idx); - - /* Remove all interface links pointing to this entity */ - list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { - list_for_each_entry_safe(link, tmp, &intf->links, list) { - if (link->entity == entity) - __media_remove_intf_link(link); - } - } - - /* Remove all data links that belong to this entity */ - __media_entity_remove_links(entity); - - /* Remove all pads that belong to this entity */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_destroy(&entity->pads[i].graph_obj); - - /* Remove the entity */ - media_gobj_destroy(&entity->graph_obj); - - /* invoke entity_notify callbacks to handle entity removal?? */ - - entity->graph_obj.mdev = NULL; -} - void media_device_unregister_entity(struct media_entity *entity) { struct media_device *mdev = entity->graph_obj.mdev; diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c index e3fca436c75b..c0782fd96c59 100644 --- a/drivers/media/mc/mc-request.c +++ b/drivers/media/mc/mc-request.c @@ -296,9 +296,18 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd) if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) return -ENOMEM; + if (mdev->ops->req_alloc) + req = mdev->ops->req_alloc(mdev); + else + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) - return fd; + if (fd < 0) { + ret = fd; + goto err_free_req; + } filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); if (IS_ERR(filp)) { @@ -306,15 +315,6 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd) goto err_put_fd; } - if (mdev->ops->req_alloc) - req = mdev->ops->req_alloc(mdev); - else - req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) { - ret = -ENOMEM; - goto err_fput; - } - filp->private_data = req; req->mdev = mdev; req->state = MEDIA_REQUEST_STATE_IDLE; @@ -336,12 +336,15 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd) return 0; -err_fput: - fput(filp); - err_put_fd: put_unused_fd(fd); +err_free_req: + if (mdev->ops->req_free) + mdev->ops->req_free(req); + else + kfree(req); + return ret; } diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index a359da7773a9..ff2962cea616 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) btv->id = dev->device; if (pci_enable_device(dev)) { pr_warn("%d: Can't enable device\n", btv->c.nr); - return -EIO; + result = -EIO; + goto free_mem; } if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { pr_warn("%d: No suitable DMA available\n", btv->c.nr); - return -EIO; + result = -EIO; + goto free_mem; } if (!request_mem_region(pci_resource_start(dev,0), pci_resource_len(dev,0), @@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) pr_warn("%d: can't request iomem (0x%llx)\n", btv->c.nr, (unsigned long long)pci_resource_start(dev, 0)); - return -EBUSY; + result = -EBUSY; + goto free_mem; } pci_set_master(dev); pci_set_command(dev); @@ -4211,6 +4214,10 @@ fail0: release_mem_region(pci_resource_start(btv->c.pci,0), pci_resource_len(btv->c.pci,0)); pci_disable_device(btv->c.pci); + +free_mem: + bttvs[btv->c.nr] = NULL; + kfree(btv); return result; } diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 7e0b0b7cc2a3..ead0acb7807c 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -2074,6 +2074,10 @@ static struct { * 0x1451 is PCI ID for the IOMMU found on Ryzen */ { PCI_VENDOR_ID_AMD, 0x1451 }, + /* According to sudo lspci -nn, + * 0x1423 is the PCI ID for the IOMMU found on Kaveri + */ + { PCI_VENDOR_ID_AMD, 0x1423 }, }; static bool cx23885_does_need_dma_reset(void) diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c index e880afe37f15..d59ca3601785 100644 --- a/drivers/media/pci/cx23885/cx23888-ir.c +++ b/drivers/media/pci/cx23885/cx23888-ir.c @@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); - if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) + if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, + GFP_KERNEL)) { + kfree(state); return -ENOMEM; + } state->dev = dev; sd = &state->sd; diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 1adfdc7ab0db..7808ec1052bf 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -799,6 +799,7 @@ static void cio2_vb2_return_all_buffers(struct cio2_queue *q, atomic_dec(&q->bufs_queued); vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf, state); + q->bufs[i] = NULL; } } } @@ -1243,29 +1244,15 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_format *fmt) { struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); - struct v4l2_subdev_format format; - int ret; - if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + mutex_lock(&q->subdev_lock); + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad); - return 0; - } + else + fmt->format = q->subdev_fmt; - if (fmt->pad == CIO2_PAD_SINK) { - format.which = V4L2_SUBDEV_FORMAT_ACTIVE; - ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, - &format); - - if (ret) - return ret; - /* update colorspace etc */ - q->subdev_fmt.colorspace = format.format.colorspace; - q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc; - q->subdev_fmt.quantization = format.format.quantization; - q->subdev_fmt.xfer_func = format.format.xfer_func; - } - - fmt->format = q->subdev_fmt; + mutex_unlock(&q->subdev_lock); return 0; } @@ -1282,6 +1269,9 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_format *fmt) { struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); + struct v4l2_mbus_framefmt *mbus; + u32 mbus_code = fmt->format.code; + unsigned int i; /* * Only allow setting sink pad format; @@ -1290,16 +1280,29 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, if (fmt->pad == CIO2_PAD_SOURCE) return cio2_subdev_get_fmt(sd, cfg, fmt); - if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { - *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; - } else { - /* It's the sink, allow changing frame size */ - q->subdev_fmt.width = fmt->format.width; - q->subdev_fmt.height = fmt->format.height; - q->subdev_fmt.code = fmt->format.code; - fmt->format = q->subdev_fmt; + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + else + mbus = &q->subdev_fmt; + + fmt->format.code = formats[0].mbus_code; + + for (i = 0; i < ARRAY_SIZE(formats); i++) { + if (formats[i].mbus_code == mbus_code) { + fmt->format.code = mbus_code; + break; + } } + fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH); + fmt->format.height = min_t(u32, fmt->format.height, + CIO2_IMAGE_MAX_LENGTH); + fmt->format.field = V4L2_FIELD_NONE; + + mutex_lock(&q->subdev_lock); + *mbus = fmt->format; + mutex_unlock(&q->subdev_lock); + return 0; } @@ -1558,6 +1561,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) /* Initialize miscellaneous variables */ mutex_init(&q->lock); + mutex_init(&q->subdev_lock); /* Initialize formats to default values */ fmt = &q->subdev_fmt; @@ -1676,6 +1680,7 @@ fail_vdev_media_entity: fail_subdev_media_entity: cio2_fbpt_exit(q, &cio2->pci_dev->dev); fail_fbpt: + mutex_destroy(&q->subdev_lock); mutex_destroy(&q->lock); return r; @@ -1689,6 +1694,7 @@ static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q) v4l2_device_unregister_subdev(&q->subdev); media_entity_cleanup(&q->subdev.entity); cio2_fbpt_exit(q, &cio2->pci_dev->dev); + mutex_destroy(&q->subdev_lock); mutex_destroy(&q->lock); } diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h index 7caab9b8c2b9..af5855662112 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h @@ -332,6 +332,7 @@ struct cio2_queue { /* Subdev, /dev/v4l-subdevX */ struct v4l2_subdev subdev; + struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */ struct media_pad subdev_pads[CIO2_PADS]; struct v4l2_mbus_framefmt subdev_fmt; atomic_t frame_sequence; diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c index d4f12c250f91..526042d8afae 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c @@ -175,7 +175,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) struct spi_master *master; struct netup_spi *nspi; - master = spi_alloc_master(&ndev->pci_dev->dev, + master = devm_spi_alloc_master(&ndev->pci_dev->dev, sizeof(struct netup_spi)); if (!master) { dev_err(&ndev->pci_dev->dev, @@ -208,6 +208,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) ndev->pci_slot, ndev->pci_func); if (!spi_new_device(master, &netup_spi_board)) { + spi_unregister_master(master); ndev->spi = NULL; dev_err(&ndev->pci_dev->dev, "%s(): unable to create SPI device\n", __func__); @@ -226,13 +227,13 @@ void netup_spi_release(struct netup_unidvb_dev *ndev) if (!spi) return; + spi_unregister_master(spi->master); spin_lock_irqsave(&spi->lock, flags); reg = readw(&spi->regs->control_stat); writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); reg = readw(&spi->regs->control_stat); writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat); spin_unlock_irqrestore(&spi->lock, flags); - spi_unregister_master(spi->master); ndev->spi = NULL; } diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index cb65d345fd3e..e2666d1c6896 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -282,8 +282,11 @@ static int empress_init(struct saa7134_dev *dev) q->lock = &dev->lock; q->dev = &dev->pci->dev; err = vb2_queue_init(q); - if (err) + if (err) { + video_device_release(dev->empress_dev); + dev->empress_dev = NULL; return err; + } dev->empress_dev->queue = q; dev->empress_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c index 79e1afb71075..5cc4ef21f9d3 100644 --- a/drivers/media/pci/saa7134/saa7134-tvaudio.c +++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c @@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value) { int err; - audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value); + audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", + (reg << 2) & 0xffffffff, value); err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR); if (err < 0) return err; diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index e6a71c17566d..952ea250feda 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -641,16 +641,17 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio * struct mxb *mxb = (struct mxb *)dev->ext_priv; DEB_D("VIDIOC_S_AUDIO %d\n", a->index); - if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) { - if (mxb->cur_audinput != a->index) { - mxb->cur_audinput = a->index; - tea6420_route(mxb, a->index); - if (mxb->cur_audinput == 0) - mxb_update_audmode(mxb); - } - return 0; + if (a->index >= 32 || + !(mxb_inputs[mxb->cur_input].audioset & (1 << a->index))) + return -EINVAL; + + if (mxb->cur_audinput != a->index) { + mxb->cur_audinput = a->index; + tea6420_route(mxb, a->index); + if (mxb->cur_audinput == 0) + mxb_update_audmode(mxb); } - return -EINVAL; + return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c index 3fca7257a720..df494644b5b6 100644 --- a/drivers/media/pci/saa7164/saa7164-encoder.c +++ b/drivers/media/pci/saa7164/saa7164-encoder.c @@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port) printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n", __func__, result); result = -ENOMEM; - goto failed; + goto fail_pci; } /* Establish encoder defaults here */ @@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port) 100000, ENCODER_DEF_BITRATE); if (hdl->error) { result = hdl->error; - goto failed; + goto fail_hdl; } port->std = V4L2_STD_NTSC_M; @@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port) printk(KERN_INFO "%s: can't allocate mpeg device\n", dev->name); result = -ENOMEM; - goto failed; + goto fail_hdl; } port->v4l_device->ctrl_handler = hdl; @@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port) if (result < 0) { printk(KERN_INFO "%s: can't register mpeg device\n", dev->name); - /* TODO: We're going to leak here if we don't dealloc - The buffers above. The unreg function can't deal wit it. - */ - goto failed; + goto fail_reg; } printk(KERN_INFO "%s: registered device video%d [mpeg]\n", @@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port) saa7164_api_set_encoder(port); saa7164_api_get_encoder(port); + return 0; - result = 0; -failed: +fail_reg: + video_device_release(port->v4l_device); + port->v4l_device = NULL; +fail_hdl: + v4l2_ctrl_handler_free(hdl); +fail_pci: return result; } diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c index 9445d792bfc9..731aa702e2b7 100644 --- a/drivers/media/pci/smipcie/smipcie-ir.c +++ b/drivers/media/pci/smipcie/smipcie-ir.c @@ -60,39 +60,45 @@ static void smi_ir_decode(struct smi_rc *ir) { struct smi_dev *dev = ir->dev; struct rc_dev *rc_dev = ir->rc_dev; - u32 dwIRControl, dwIRData; - u8 index, ucIRCount, readLoop; + u32 control, data; + u8 index, ir_count, read_loop; - dwIRControl = smi_read(IR_Init_Reg); + control = smi_read(IR_Init_Reg); - if (dwIRControl & rbIRVld) { - ucIRCount = (u8) smi_read(IR_Data_Cnt); + dev_dbg(&rc_dev->dev, "ircontrol: 0x%08x\n", control); - readLoop = ucIRCount/4; - if (ucIRCount % 4) - readLoop += 1; - for (index = 0; index < readLoop; index++) { - dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4)); + if (control & rbIRVld) { + ir_count = (u8)smi_read(IR_Data_Cnt); - ir->irData[index*4 + 0] = (u8)(dwIRData); - ir->irData[index*4 + 1] = (u8)(dwIRData >> 8); - ir->irData[index*4 + 2] = (u8)(dwIRData >> 16); - ir->irData[index*4 + 3] = (u8)(dwIRData >> 24); + dev_dbg(&rc_dev->dev, "ircount %d\n", ir_count); + + read_loop = ir_count / 4; + if (ir_count % 4) + read_loop += 1; + for (index = 0; index < read_loop; index++) { + data = smi_read(IR_DATA_BUFFER_BASE + (index * 4)); + dev_dbg(&rc_dev->dev, "IRData 0x%08x\n", data); + + ir->irData[index * 4 + 0] = (u8)(data); + ir->irData[index * 4 + 1] = (u8)(data >> 8); + ir->irData[index * 4 + 2] = (u8)(data >> 16); + ir->irData[index * 4 + 3] = (u8)(data >> 24); } - smi_raw_process(rc_dev, ir->irData, ucIRCount); - smi_set(IR_Init_Reg, rbIRVld); + smi_raw_process(rc_dev, ir->irData, ir_count); } - if (dwIRControl & rbIRhighidle) { + if (control & rbIRhighidle) { struct ir_raw_event rawir = {}; + dev_dbg(&rc_dev->dev, "high idle\n"); + rawir.pulse = 0; rawir.duration = US_TO_NS(SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN); ir_raw_event_store_with_filter(rc_dev, &rawir); - smi_set(IR_Init_Reg, rbIRhighidle); } + smi_set(IR_Init_Reg, rbIRVld); ir_raw_event_handle(rc_dev); } @@ -151,7 +157,7 @@ int smi_ir_init(struct smi_dev *dev) rc_dev->dev.parent = &dev->pci_dev->dev; rc_dev->map_name = dev->info->rc_map; - rc_dev->timeout = MS_TO_NS(100); + rc_dev->timeout = US_TO_NS(SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN); rc_dev->rx_resolution = US_TO_NS(SMI_SAMPLE_PERIOD); ir->rc_dev = rc_dev; @@ -174,7 +180,7 @@ void smi_ir_exit(struct smi_dev *dev) struct smi_rc *ir = &dev->ir; struct rc_dev *rc_dev = ir->rc_dev; - smi_ir_stop(ir); rc_unregister_device(rc_dev); + smi_ir_stop(ir); ir->rc_dev = NULL; } diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c index 30c8f2ec9c3c..809e4e65bb6e 100644 --- a/drivers/media/pci/solo6x10/solo6x10-g723.c +++ b/drivers/media/pci/solo6x10/solo6x10-g723.c @@ -399,7 +399,7 @@ int solo_g723_init(struct solo_dev *solo_dev) ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev)); if (ret < 0) - return ret; + goto snd_error; ret = solo_snd_pcm_init(solo_dev); if (ret < 0) diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig index 011b766f0bff..d613feee8176 100644 --- a/drivers/media/pci/sta2x11/Kconfig +++ b/drivers/media/pci/sta2x11/Kconfig @@ -2,6 +2,7 @@ config STA2X11_VIP tristate "STA2X11 VIP Video For Linux" depends on STA2X11 || COMPILE_TEST + select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT select VIDEOBUF2_DMA_CONTIG depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index d0cdee1c6eb0..bf36b1e22b63 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -406,14 +406,15 @@ static void debiirq(unsigned long cookie) case DATA_CI_GET: { u8 *data = av7110->debi_virt; + u8 data_0 = data[0]; - if ((data[0] < 2) && data[2] == 0xff) { + if (data_0 < 2 && data[2] == 0xff) { int flags = 0; if (data[5] > 0) flags |= CA_CI_MODULE_PRESENT; if (data[5] > 5) flags |= CA_CI_MODULE_READY; - av7110->ci_slot[data[0]].flags = flags; + av7110->ci_slot[data_0].flags = flags; } else ci_get_data(&av7110->ci_rbuffer, av7110->debi_virt, diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c index fadbdeeb4495..293867b9e796 100644 --- a/drivers/media/pci/ttpci/budget-core.c +++ b/drivers/media/pci/ttpci/budget-core.c @@ -369,20 +369,25 @@ static int budget_register(struct budget *budget) ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; budget->mem_frontend.source = DMX_MEMORY_FE; ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); if (ret < 0) - return ret; + goto err_release_dmx; ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); return 0; + +err_release_dmx: + dvb_dmxdev_release(&budget->dmxdev); + dvb_dmx_release(&budget->demux); + return ret; } static void budget_unregister(struct budget *budget) diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c index 09732eed7eb4..656142c7a2cc 100644 --- a/drivers/media/pci/tw5864/tw5864-video.c +++ b/drivers/media/pci/tw5864/tw5864-video.c @@ -767,6 +767,9 @@ static int tw5864_enum_frameintervals(struct file *file, void *priv, fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE; ret = tw5864_frameinterval_get(input, &frameinterval); + if (ret) + return ret; + fintv->stepwise.step = frameinterval; fintv->stepwise.min = frameinterval; fintv->stepwise.max = frameinterval; @@ -785,6 +788,9 @@ static int tw5864_g_parm(struct file *file, void *priv, cp->capability = V4L2_CAP_TIMEPERFRAME; ret = tw5864_frameinterval_get(input, &cp->timeperframe); + if (ret) + return ret; + cp->timeperframe.numerator *= input->frame_interval; cp->capturemode = 0; cp->readbuffers = 2; diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index 4eaaf39b9223..6dde49d9aa4c 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -491,8 +491,8 @@ static void aspeed_video_off(struct aspeed_video *video) aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff); /* Turn off the relevant clocks */ - clk_disable(video->vclk); clk_disable(video->eclk); + clk_disable(video->vclk); clear_bit(VIDEO_CLOCKS_ON, &video->flags); } @@ -503,8 +503,8 @@ static void aspeed_video_on(struct aspeed_video *video) return; /* Turn on the relevant clocks */ - clk_enable(video->eclk); clk_enable(video->vclk); + clk_enable(video->eclk); set_bit(VIDEO_CLOCKS_ON, &video->flags); } @@ -1529,12 +1529,12 @@ static int aspeed_video_setup_video(struct aspeed_video *video) V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask, V4L2_JPEG_CHROMA_SUBSAMPLING_444); - if (video->ctrl_handler.error) { + rc = video->ctrl_handler.error; + if (rc) { v4l2_ctrl_handler_free(&video->ctrl_handler); v4l2_device_unregister(v4l2_dev); - dev_err(video->dev, "Failed to init controls: %d\n", - video->ctrl_handler.error); + dev_err(video->dev, "Failed to init controls: %d\n", rc); return rc; } @@ -1684,8 +1684,11 @@ static int aspeed_video_probe(struct platform_device *pdev) return rc; rc = aspeed_video_setup_video(video); - if (rc) + if (rc) { + clk_unprepare(video->vclk); + clk_unprepare(video->eclk); return rc; + } return 0; } diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c index 4a3b3810fd89..31390ce2dbf2 100644 --- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c +++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c @@ -278,11 +278,7 @@ static int cros_ec_cec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cros_ec_cec); cros_ec_cec->cros_ec = cros_ec; - ret = device_init_wakeup(&pdev->dev, 1); - if (ret) { - dev_err(&pdev->dev, "failed to initialize wakeup\n"); - return ret; - } + device_init_wakeup(&pdev->dev, 1); cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec, DRV_NAME, diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 71f4fe882d13..74f68ac3c9a7 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1482,8 +1482,6 @@ probe_out: /* Unregister video device */ video_unregister_device(&ch->video_dev); } - kfree(vpif_obj.sd); - v4l2_device_unregister(&vpif_obj.v4l2_dev); return err; } diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c index d38d2bbb6f0f..7000f0bf0b35 100644 --- a/drivers/media/platform/davinci/vpss.c +++ b/drivers/media/platform/davinci/vpss.c @@ -505,19 +505,31 @@ static void vpss_exit(void) static int __init vpss_init(void) { + int ret; + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) return -EBUSY; oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); if (unlikely(!oper_cfg.vpss_regs_base2)) { - release_mem_region(VPSS_CLK_CTRL, 4); - return -ENOMEM; + ret = -ENOMEM; + goto err_ioremap; } writel(VPSS_CLK_CTRL_VENCCLKEN | - VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); + VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); - return platform_driver_register(&vpss_driver); + ret = platform_driver_register(&vpss_driver); + if (ret) + goto err_pd_register; + + return 0; + +err_pd_register: + iounmap(oper_cfg.vpss_regs_base2); +err_ioremap: + release_mem_region(VPSS_CLK_CTRL, 4); + return ret; } subsys_initcall(vpss_init); module_exit(vpss_exit); diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c index cde0d254ec1c..a77c49b18511 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.c +++ b/drivers/media/platform/exynos4-is/fimc-isp.c @@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on) if (on) { ret = pm_runtime_get_sync(&is->pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(&is->pdev->dev); return ret; + } set_bit(IS_ST_PWR_ON, &is->state); ret = fimc_is_start_firmware(is); diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c index e87c6a09205b..efd06621951c 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/exynos4-is/fimc-lite.c @@ -470,7 +470,7 @@ static int fimc_lite_open(struct file *file) set_bit(ST_FLITE_IN_USE, &fimc->state); ret = pm_runtime_get_sync(&fimc->pdev->dev); if (ret < 0) - goto unlock; + goto err_pm; ret = v4l2_fh_open(file); if (ret < 0) diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 9aaf3b8060d5..a07d796f63df 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) return -ENXIO; ret = pm_runtime_get_sync(fmd->pmf); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(fmd->pmf); return ret; + } fmd->num_sensors = 0; @@ -1268,6 +1270,7 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) if (IS_ERR(pctl->state_default)) return PTR_ERR(pctl->state_default); + /* PINCTRL_STATE_IDLE is optional */ pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_IDLE); return 0; diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index 540151bbf58f..1aac167abb17 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable) if (enable) { s5pcsis_clear_counters(state); ret = pm_runtime_get_sync(&state->pdev->dev); - if (ret && ret != 1) + if (ret && ret != 1) { + pm_runtime_put_noidle(&state->pdev->dev); return ret; + } } mutex_lock(&state->lock); diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 803baf97f06e..6de8b3d99fb9 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -1940,6 +1940,7 @@ int mccic_register(struct mcam_camera *cam) out: v4l2_async_notifier_unregister(&cam->notifier); v4l2_device_unregister(&cam->v4l2_dev); + v4l2_async_notifier_cleanup(&cam->notifier); return ret; } EXPORT_SYMBOL_GPL(mccic_register); @@ -1961,6 +1962,7 @@ void mccic_shutdown(struct mcam_camera *cam) v4l2_ctrl_handler_free(&cam->ctrl_handler); v4l2_async_notifier_unregister(&cam->notifier); v4l2_device_unregister(&cam->v4l2_dev); + v4l2_async_notifier_cleanup(&cam->notifier); } EXPORT_SYMBOL_GPL(mccic_shutdown); diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c index ee802fc3bcdf..9fa1bc5514f3 100644 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c @@ -571,6 +571,13 @@ static int mtk_jpeg_queue_setup(struct vb2_queue *q, if (!q_data) return -EINVAL; + if (*num_planes) { + for (i = 0; i < *num_planes; i++) + if (sizes[i] < q_data->sizeimage[i]) + return -EINVAL; + return 0; + } + *num_planes = q_data->fmt->colplanes; for (i = 0; i < q_data->fmt->colplanes; i++) { sizes[i] = q_data->sizeimage[i]; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c index 5a6ec8fb52da..f9bbd0000bf3 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c @@ -48,11 +48,14 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) dec_clk->clk_info = devm_kcalloc(&pdev->dev, dec_clk->clk_num, sizeof(*clk_info), GFP_KERNEL); - if (!dec_clk->clk_info) - return -ENOMEM; + if (!dec_clk->clk_info) { + ret = -ENOMEM; + goto put_device; + } } else { mtk_v4l2_err("Failed to get vdec clock count"); - return -EINVAL; + ret = -EINVAL; + goto put_device; } for (i = 0; i < dec_clk->clk_num; i++) { @@ -61,25 +64,29 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) "clock-names", i, &clk_info->clk_name); if (ret) { mtk_v4l2_err("Failed to get clock name id = %d", i); - return ret; + goto put_device; } clk_info->vcodec_clk = devm_clk_get(&pdev->dev, clk_info->clk_name); if (IS_ERR(clk_info->vcodec_clk)) { mtk_v4l2_err("devm_clk_get (%d)%s fail", i, clk_info->clk_name); - return PTR_ERR(clk_info->vcodec_clk); + ret = PTR_ERR(clk_info->vcodec_clk); + goto put_device; } } pm_runtime_enable(&pdev->dev); - + return 0; +put_device: + put_device(pm->larbvdec); return ret; } void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev) { pm_runtime_disable(dev->pm.dev); + put_device(dev->pm.larbvdec); } void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm) diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c index 3e2bfded79a6..e682bdb1ed45 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c @@ -49,14 +49,16 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) node = of_parse_phandle(dev->of_node, "mediatek,larb", 1); if (!node) { mtk_v4l2_err("no mediatek,larb found"); - return -ENODEV; + ret = -ENODEV; + goto put_larbvenc; } pdev = of_find_device_by_node(node); of_node_put(node); if (!pdev) { mtk_v4l2_err("no mediatek,larb device found"); - return -ENODEV; + ret = -ENODEV; + goto put_larbvenc; } pm->larbvenclt = &pdev->dev; @@ -69,11 +71,14 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) enc_clk->clk_info = devm_kcalloc(&pdev->dev, enc_clk->clk_num, sizeof(*clk_info), GFP_KERNEL); - if (!enc_clk->clk_info) - return -ENOMEM; + if (!enc_clk->clk_info) { + ret = -ENOMEM; + goto put_larbvenclt; + } } else { mtk_v4l2_err("Failed to get venc clock count"); - return -EINVAL; + ret = -EINVAL; + goto put_larbvenclt; } for (i = 0; i < enc_clk->clk_num; i++) { @@ -82,17 +87,24 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) "clock-names", i, &clk_info->clk_name); if (ret) { mtk_v4l2_err("venc failed to get clk name %d", i); - return ret; + goto put_larbvenclt; } clk_info->vcodec_clk = devm_clk_get(&pdev->dev, clk_info->clk_name); if (IS_ERR(clk_info->vcodec_clk)) { mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i, clk_info->clk_name); - return PTR_ERR(clk_info->vcodec_clk); + ret = PTR_ERR(clk_info->vcodec_clk); + goto put_larbvenclt; } } + return 0; + +put_larbvenclt: + put_device(pm->larbvenclt); +put_larbvenc: + put_device(pm->larbvenc); return ret; } diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c index 27779b75df54..ac112cf06ab3 100644 --- a/drivers/media/platform/mx2_emmaprp.c +++ b/drivers/media/platform/mx2_emmaprp.c @@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcdev); irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto rel_vdev; + } + ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0, dev_name(&pdev->dev), pcdev); if (ret) diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 327c5716922a..dce6b3685e13 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -2330,8 +2330,10 @@ static int isp_probe(struct platform_device *pdev) mem = platform_get_resource(pdev, IORESOURCE_MEM, i); isp->mmio_base[map_idx] = devm_ioremap_resource(isp->dev, mem); - if (IS_ERR(isp->mmio_base[map_idx])) - return PTR_ERR(isp->mmio_base[map_idx]); + if (IS_ERR(isp->mmio_base[map_idx])) { + ret = PTR_ERR(isp->mmio_base[map_idx]); + goto error; + } } ret = isp_get_clocks(isp); diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 97d660606d98..b8c2b8bba826 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -2287,7 +2287,7 @@ static int preview_init_entities(struct isp_prev_device *prev) me->ops = &preview_media_ops; ret = media_entity_pads_init(me, PREV_PADS_NUM, pads); if (ret < 0) - return ret; + goto error_handler_free; preview_init_formats(sd, NULL); @@ -2320,6 +2320,8 @@ error_video_out: omap3isp_video_cleanup(&prev->video_in); error_video_in: media_entity_cleanup(&prev->subdev.entity); +error_handler_free: + v4l2_ctrl_handler_free(&prev->ctrls); return ret; } diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 8d47ea0c33f8..6e04e3ec61ba 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -1447,6 +1447,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb) struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue); struct pxa_buffer *buf = vb2_to_pxa_buffer(vb); int ret = 0; +#ifdef DEBUG + int i; +#endif switch (pcdev->channels) { case 1: diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c index 008afb85023b..3c5b9082ad72 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy.c @@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_sync(dev); return ret; + } ret = csiphy_set_clock_rates(csiphy); if (ret < 0) { diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index 1d50dfbbb762..4c2675b43718 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -901,6 +901,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, video->nformats = ARRAY_SIZE(formats_rdi_8x96); } } else { + ret = -EINVAL; goto error_video_register; } diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index 3fdc9f964a3c..2483641799df 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c @@ -504,7 +504,6 @@ static int camss_of_parse_ports(struct camss *camss) return num_subdevs; err_cleanup: - v4l2_async_notifier_cleanup(&camss->notifier); of_node_put(node); return ret; } @@ -835,29 +834,38 @@ static int camss_probe(struct platform_device *pdev) camss->csid_num = 4; camss->vfe_num = 2; } else { - return -EINVAL; + ret = -EINVAL; + goto err_free; } camss->csiphy = devm_kcalloc(dev, camss->csiphy_num, sizeof(*camss->csiphy), GFP_KERNEL); - if (!camss->csiphy) - return -ENOMEM; + if (!camss->csiphy) { + ret = -ENOMEM; + goto err_free; + } camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid), GFP_KERNEL); - if (!camss->csid) - return -ENOMEM; + if (!camss->csid) { + ret = -ENOMEM; + goto err_free; + } camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); - if (!camss->vfe) - return -ENOMEM; + if (!camss->vfe) { + ret = -ENOMEM; + goto err_free; + } v4l2_async_notifier_init(&camss->notifier); num_subdevs = camss_of_parse_ports(camss); - if (num_subdevs < 0) - return num_subdevs; + if (num_subdevs < 0) { + ret = num_subdevs; + goto err_cleanup; + } ret = camss_init_subdevices(camss); if (ret < 0) @@ -936,6 +944,8 @@ err_register_entities: v4l2_device_unregister(&camss->v4l2_dev); err_cleanup: v4l2_async_notifier_cleanup(&camss->notifier); +err_free: + kfree(camss); return ret; } diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 84e982f259a0..bbc430a00344 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -316,8 +316,10 @@ static int venus_probe(struct platform_device *pdev) goto err_core_deinit; ret = pm_runtime_put_sync(dev); - if (ret) + if (ret) { + pm_runtime_get_noresume(dev); goto err_dev_unregister; + } return 0; @@ -328,6 +330,7 @@ err_core_deinit: err_venus_shutdown: venus_shutdown(core); err_runtime_disable: + pm_runtime_put_noidle(dev); pm_runtime_set_suspended(dev); pm_runtime_disable(dev); hfi_destroy(core); diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index d3d1748a7ef6..33f70e1def94 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -44,8 +44,14 @@ static void venus_reset_cpu(struct venus_core *core) int venus_set_hw_state(struct venus_core *core, bool resume) { - if (core->use_tz) - return qcom_scm_set_remote_state(resume, 0); + int ret; + + if (core->use_tz) { + ret = qcom_scm_set_remote_state(resume, 0); + if (resume && ret == -EINVAL) + ret = 0; + return ret; + } if (resume) venus_reset_cpu(core); diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c index 2293d936e49c..7f515a4b9bd1 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.c +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data) if (IS_V1(core)) { core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC; core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK; + core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC; } } diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 59ae7a1e63bc..658825b4c4e8 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -987,11 +987,10 @@ static int vdec_stop_capture(struct venus_inst *inst) ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT); vdec_cancel_dst_buffers(inst); inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP; - INIT_LIST_HEAD(&inst->registeredbufs); venus_helper_free_dpb_bufs(inst); break; default: - return 0; + break; } return ret; @@ -1090,6 +1089,14 @@ static int vdec_buf_init(struct vb2_buffer *vb) static void vdec_buf_cleanup(struct vb2_buffer *vb) { struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct venus_buffer *buf = to_venus_buffer(vbuf); + + mutex_lock(&inst->lock); + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + if (!list_empty(&inst->registeredbufs)) + list_del_init(&buf->reg_list); + mutex_unlock(&inst->lock); inst->buf_count--; if (!inst->buf_count) diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c index 43c78620c9d8..05c712e00a2a 100644 --- a/drivers/media/platform/rcar-fcp.c +++ b/drivers/media/platform/rcar-fcp.c @@ -8,6 +8,7 @@ */ #include <linux/device.h> +#include <linux/dma-mapping.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mod_devicetable.h> @@ -21,6 +22,7 @@ struct rcar_fcp_device { struct list_head list; struct device *dev; + struct device_dma_parameters dma_parms; }; static LIST_HEAD(fcp_devices); @@ -101,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp) return 0; ret = pm_runtime_get_sync(fcp->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(fcp->dev); return ret; + } return 0; } @@ -136,6 +140,9 @@ static int rcar_fcp_probe(struct platform_device *pdev) fcp->dev = &pdev->dev; + fcp->dev->dma_parms = &fcp->dma_parms; + dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32)); + pm_runtime_enable(&pdev->dev); mutex_lock(&fcp_lock); diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c index c14af1b929df..d27eccfa57ca 100644 --- a/drivers/media/platform/rcar-vin/rcar-csi2.c +++ b/drivers/media/platform/rcar-vin/rcar-csi2.c @@ -361,7 +361,6 @@ struct rcar_csi2 { struct media_pad pads[NR_OF_RCAR_CSI2_PAD]; struct v4l2_async_notifier notifier; - struct v4l2_async_subdev asd; struct v4l2_subdev *remote; struct v4l2_mbus_framefmt mf; @@ -810,6 +809,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv, static int rcsi2_parse_dt(struct rcar_csi2 *priv) { + struct v4l2_async_subdev *asd; + struct fwnode_handle *fwnode; struct device_node *ep; struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; int ret; @@ -833,24 +834,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv) return ret; } - priv->asd.match.fwnode = - fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep)); - priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; - + fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep)); of_node_put(ep); + dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode)); + v4l2_async_notifier_init(&priv->notifier); - - ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd); - if (ret) { - fwnode_handle_put(priv->asd.match.fwnode); - return ret; - } - priv->notifier.ops = &rcar_csi2_notify_ops; - dev_dbg(priv->dev, "Found '%pOF'\n", - to_of_node(priv->asd.match.fwnode)); + asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode, + sizeof(*asd)); + fwnode_handle_put(fwnode); + if (IS_ERR(asd)) + return PTR_ERR(asd); ret = v4l2_async_subdev_notifier_register(&priv->subdev, &priv->notifier); diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c index 3cb29b2e0b2b..e5f636080108 100644 --- a/drivers/media/platform/rcar-vin/rcar-dma.c +++ b/drivers/media/platform/rcar-vin/rcar-dma.c @@ -1334,8 +1334,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel) int ret; ret = pm_runtime_get_sync(vin->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(vin->dev); return ret; + } /* Make register writes take effect immediately. */ vnmc = rvin_read(vin, VNMC_REG); diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c index 0f267a237b42..af3c8d405509 100644 --- a/drivers/media/platform/rcar_drif.c +++ b/drivers/media/platform/rcar_drif.c @@ -185,7 +185,6 @@ struct rcar_drif_frame_buf { /* OF graph endpoint's V4L2 async data */ struct rcar_drif_graph_ep { struct v4l2_subdev *subdev; /* Async matched subdev */ - struct v4l2_async_subdev asd; /* Async sub-device descriptor */ }; /* DMA buffer */ @@ -1105,12 +1104,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier, struct rcar_drif_sdr *sdr = container_of(notifier, struct rcar_drif_sdr, notifier); - if (sdr->ep.asd.match.fwnode != - of_fwnode_handle(subdev->dev->of_node)) { - rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name); - return -EINVAL; - } - v4l2_set_subdev_hostdata(subdev, sdr); sdr->ep.subdev = subdev; rdrif_dbg(sdr, "bound asd %s\n", subdev->name); @@ -1214,7 +1207,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr) { struct v4l2_async_notifier *notifier = &sdr->notifier; struct fwnode_handle *fwnode, *ep; - int ret; + struct v4l2_async_subdev *asd; v4l2_async_notifier_init(notifier); @@ -1223,26 +1216,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr) if (!ep) return 0; - fwnode = fwnode_graph_get_remote_port_parent(ep); - if (!fwnode) { - dev_warn(sdr->dev, "bad remote port parent\n"); - fwnode_handle_put(ep); - return -EINVAL; - } - - sdr->ep.asd.match.fwnode = fwnode; - sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE; - ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd); - if (ret) { - fwnode_handle_put(fwnode); - return ret; - } - /* Get the endpoint properties */ rcar_drif_get_ep_properties(sdr, ep); - fwnode_handle_put(fwnode); + fwnode = fwnode_graph_get_remote_port_parent(ep); fwnode_handle_put(ep); + if (!fwnode) { + dev_warn(sdr->dev, "bad remote port parent\n"); + return -EINVAL; + } + + asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode, + sizeof(*asd)); + fwnode_handle_put(fwnode); + if (IS_ERR(asd)) + return PTR_ERR(asd); return 0; } diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c index cb93a13e1777..97bed45360f0 100644 --- a/drivers/media/platform/rcar_fdp1.c +++ b/drivers/media/platform/rcar_fdp1.c @@ -2369,7 +2369,7 @@ static int fdp1_probe(struct platform_device *pdev) dprintk(fdp1, "FDP1 Version R-Car H3\n"); break; case FD1_IP_M3N: - dprintk(fdp1, "FDP1 Version R-Car M3N\n"); + dprintk(fdp1, "FDP1 Version R-Car M3-N\n"); break; case FD1_IP_E3: dprintk(fdp1, "FDP1 Version R-Car E3\n"); diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c index 36b821ccc1db..bf9a75b75083 100644 --- a/drivers/media/platform/rockchip/rga/rga-buf.c +++ b/drivers/media/platform/rockchip/rga/rga-buf.c @@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count) ret = pm_runtime_get_sync(rga->dev); if (ret < 0) { + pm_runtime_put_noidle(rga->dev); rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED); return ret; } diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 4be6dcf292ff..aaa96f256356 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -200,22 +200,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) dst_info.data.format = ctx->out.fmt->hw_format; dst_info.data.swap = ctx->out.fmt->color_swap; - if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { - if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) { - switch (ctx->in.colorspace) { - case V4L2_COLORSPACE_REC709: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT709_R0; - break; - default: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT601_R0; - break; - } + /* + * CSC mode must only be set when the colorspace families differ between + * input and output. It must remain unset (zeroed) if both are the same. + */ + + if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) { + switch (ctx->in.colorspace) { + case V4L2_COLORSPACE_REC709: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; + break; + default: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0; + break; } } - if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { + if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) { switch (ctx->out.colorspace) { case V4L2_COLORSPACE_REC709: dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h index 96cb0314dfa7..e8917e5630a4 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.h +++ b/drivers/media/platform/rockchip/rga/rga-hw.h @@ -95,6 +95,11 @@ #define RGA_COLOR_FMT_CP_8BPP 15 #define RGA_COLOR_FMT_MASK 15 +#define RGA_COLOR_FMT_IS_YUV(fmt) \ + (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP)) +#define RGA_COLOR_FMT_IS_RGB(fmt) \ + ((fmt) < RGA_COLOR_FMT_YUV422SP) + #define RGA_COLOR_NONE_SWAP 0 #define RGA_COLOR_RB_SWAP 1 #define RGA_COLOR_ALPHA_SWAP 2 diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c index c6fbcd7036d6..ee624804862e 100644 --- a/drivers/media/platform/s3c-camif/camif-core.c +++ b/drivers/media/platform/s3c-camif/camif-core.c @@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev) ret = camif_media_dev_init(camif); if (ret < 0) - goto err_alloc; + goto err_pm; ret = camif_register_sensor(camif); if (ret < 0) @@ -498,10 +498,9 @@ err_sens: media_device_unregister(&camif->media_dev); media_device_cleanup(&camif->media_dev); camif_unregister_media_entities(camif); -err_alloc: +err_pm: pm_runtime_put(dev); pm_runtime_disable(dev); -err_pm: camif_clk_put(camif); err_clk: s3c_camif_unregister_subdev(camif); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c index 7d52431c2c83..62d2320a7218 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c @@ -79,8 +79,10 @@ int s5p_mfc_power_on(void) int i, ret = 0; ret = pm_runtime_get_sync(pm->device); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(pm->device); return ret; + } /* clock control */ for (i = 0; i < pm->num_clocks; i++) { diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c index 77ca7517fa3e..bae62af82643 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c @@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data) int ret; unsigned int i; - ret = pm_runtime_get_sync(bdisp->dev); + ret = pm_runtime_resume_and_get(bdisp->dev); if (ret < 0) { seq_puts(s, "Cannot wake up IP\n"); return 0; diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index 675b5f2b4c2e..a55ddf8d185d 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -1367,7 +1367,7 @@ static int bdisp_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "failed to set PM\n"); - goto err_dbg; + goto err_pm; } /* Filters */ @@ -1395,7 +1395,6 @@ err_filter: bdisp_hw_free_filters(bdisp->dev); err_pm: pm_runtime_put(dev); -err_dbg: bdisp_debugfs_remove(bdisp); err_v4l2: v4l2_device_unregister(&bdisp->v4l2_dev); diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c index 91369fb3ffaa..2791107e641b 100644 --- a/drivers/media/platform/sti/delta/delta-v4l2.c +++ b/drivers/media/platform/sti/delta/delta-v4l2.c @@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work) /* enable the hardware */ if (!dec->pm) { ret = delta_get_sync(ctx); - if (ret) + if (ret) { + delta_put_autosuspend(ctx); goto err; + } } /* decode this access unit */ diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c index 401aaafa1710..43f279e2a6a3 100644 --- a/drivers/media/platform/sti/hva/hva-hw.c +++ b/drivers/media/platform/sti/hva/hva-hw.c @@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva) if (pm_runtime_get_sync(dev) < 0) { dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX); + pm_runtime_put_noidle(dev); mutex_unlock(&hva->protect_mutex); return -EFAULT; } @@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "%s failed to set PM\n", HVA_PREFIX); - goto err_clk; + goto err_pm; } /* check IP hardware version */ @@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s) if (pm_runtime_get_sync(dev) < 0) { seq_puts(s, "Cannot wake up IP\n"); + pm_runtime_put_noidle(dev); mutex_unlock(&hva->protect_mutex); return; } diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 9392e3409fba..d41475f56ab5 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) if (ret < 0) { dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n", __func__, ret); - goto err_release_buffers; + goto err_pm_put; } ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline); @@ -837,8 +837,6 @@ err_media_pipeline_stop: err_pm_put: pm_runtime_put(dcmi->dev); - -err_release_buffers: spin_lock_irq(&dcmi->irqlock); /* * Return all buffers to vb2 in QUEUED state. diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c index f0dfe68486d1..c43a35df25de 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c @@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count) } subdev = sun6i_video_remote_subdev(video, NULL); - if (!subdev) + if (!subdev) { + ret = -EINVAL; goto stop_media_pipeline; + } config.pixelformat = video->fmt.fmt.pix.pixelformat; config.code = video->mbus_code; diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index 223161f9c403..f06408009a9c 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -266,8 +266,6 @@ struct cal_ctx { struct v4l2_subdev *sensor; struct v4l2_fwnode_endpoint endpoint; - struct v4l2_async_subdev asd; - struct v4l2_fh fh; struct cal_dev *dev; struct cc_data *cc; @@ -537,16 +535,16 @@ static void enable_irqs(struct cal_ctx *ctx) static void disable_irqs(struct cal_ctx *ctx) { + u32 val; + /* Disable IRQ_WDMA_END 0/1 */ - reg_write_field(ctx->dev, - CAL_HL_IRQENABLE_CLR(2), - CAL_HL_IRQ_CLEAR, - CAL_HL_IRQ_MASK(ctx->csi2_port)); + val = 0; + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val); /* Disable IRQ_WDMA_START 0/1 */ - reg_write_field(ctx->dev, - CAL_HL_IRQENABLE_CLR(3), - CAL_HL_IRQ_CLEAR, - CAL_HL_IRQ_MASK(ctx->csi2_port)); + val = 0; + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val); /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */ reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0); } @@ -680,12 +678,13 @@ static void pix_proc_config(struct cal_ctx *ctx) } static void cal_wr_dma_config(struct cal_ctx *ctx, - unsigned int width) + unsigned int width, unsigned int height) { u32 val; val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); + set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK); set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, CAL_WR_DMA_CTRL_DTAG_MASK); set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, @@ -1308,7 +1307,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) csi2_lane_config(ctx); csi2_ctx_config(ctx); pix_proc_config(ctx); - cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); + cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline, + ctx->v_fmt.fmt.pix.height); cal_wr_dma_addr(ctx, addr); csi2_ppi_enable(ctx); @@ -1648,7 +1648,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) parent = pdev->dev.of_node; - asd = &ctx->asd; endpoint = &ctx->endpoint; ep_node = NULL; @@ -1695,8 +1694,6 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) ctx_dbg(3, ctx, "can't get remote parent\n"); goto cleanup_exit; } - asd->match_type = V4L2_ASYNC_MATCH_FWNODE; - asd->match.fwnode = of_fwnode_handle(sensor_node); v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint); @@ -1726,9 +1723,17 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst) v4l2_async_notifier_init(&ctx->notifier); + asd = kzalloc(sizeof(*asd), GFP_KERNEL); + if (!asd) + goto cleanup_exit; + + asd->match_type = V4L2_ASYNC_MATCH_FWNODE; + asd->match.fwnode = of_fwnode_handle(sensor_node); + ret = v4l2_async_notifier_add_subdev(&ctx->notifier, asd); if (ret) { ctx_err(ctx, "Error adding asd\n"); + kfree(asd); goto cleanup_exit; } diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index 8b14ba4a3d9e..817bd13370ef 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -2435,6 +2435,8 @@ static int vpe_runtime_get(struct platform_device *pdev) r = pm_runtime_get_sync(&pdev->dev); WARN_ON(r < 0); + if (r) + pm_runtime_put_noidle(&pdev->dev); return r < 0 ? r : 0; } diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 82350097503e..c77281d43f89 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -2052,6 +2052,7 @@ static int vicodec_request_validate(struct media_request *req) } ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, vicodec_ctrl_stateless_state.id); + v4l2_ctrl_request_hdl_put(hdl); if (!ctrl) { v4l2_info(&ctx->dev->v4l2_dev, "Missing required codec control\n"); @@ -2172,16 +2173,19 @@ static int vicodec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); - if (register_instance(dev, &dev->stateful_enc, - "stateful-encoder", true)) + ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder", + true); + if (ret) goto unreg_dev; - if (register_instance(dev, &dev->stateful_dec, - "stateful-decoder", false)) + ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder", + false); + if (ret) goto unreg_sf_enc; - if (register_instance(dev, &dev->stateless_dec, - "stateless-decoder", false)) + ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder", + false); + if (ret) goto unreg_sf_dec; #ifdef CONFIG_MEDIA_CONTROLLER diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index f6a5cdbd74e7..cc71aa425597 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -174,13 +174,13 @@ static const u8 vivid_hdmi_edid[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b, - 0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f, + 0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f, 0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21, 0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00, 0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4, - 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3, + 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3, 0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d, 0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, @@ -189,7 +189,7 @@ static const u8 vivid_hdmi_edid[256] = { 0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51, 0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, }; static int vidioc_querycap(struct file *file, void *priv, diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index a0364ac497f9..54bb3a59bf17 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -1025,7 +1025,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh, return -EINVAL; } dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags); - dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags); + dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags); return 0; } diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index d7b43037e500..e07b135613eb 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type, if (!pool) return NULL; + pool->vsp1 = vsp1; + spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free); diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index a4a45d68a6ef..1c00688c71c2 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -245,7 +245,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1, brx = &vsp1->bru->entity; else if (pipe->brx && !drm_pipe->force_brx_release) brx = pipe->brx; - else if (!vsp1->bru->entity.pipe) + else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe) brx = &vsp1->bru->entity; else brx = &vsp1->brs->entity; @@ -462,9 +462,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1, * make sure it is present in the pipeline's list of entities if it * wasn't already. */ - if (!use_uif) { + if (drm_pipe->uif && !use_uif) { drm_pipe->uif->pipe = NULL; - } else if (!drm_pipe->uif->pipe) { + } else if (drm_pipe->uif && !drm_pipe->uif->pipe) { drm_pipe->uif->pipe = pipe; list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities); } diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index c650e45bb0ad..aa66e4f5f3f3 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1) int ret; ret = pm_runtime_get_sync(vsp1->dev); - return ret < 0 ? ret : 0; + if (ret < 0) { + pm_runtime_put_noidle(vsp1->dev); + return ret; + } + + return 0; } /* @@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev) /* Configure device parameters based on the version register. */ pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); + ret = vsp1_device_get(vsp1); if (ret < 0) goto done; vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION); - pm_runtime_put_sync(&pdev->dev); + vsp1_device_put(vsp1); for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) { if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) == @@ -877,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev) } done: - if (ret) + if (ret) { pm_runtime_disable(&pdev->dev); + rcar_fcp_put(vsp1->fcp); + } return ret; } diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 48d23433b3c0..caeb51def782 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -5,6 +5,7 @@ obj-y += keymaps/ obj-$(CONFIG_RC_CORE) += rc-core.o rc-core-y := rc-main.o rc-ir-raw.o rc-core-$(CONFIG_LIRC) += lirc_dev.o +rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c index 9cdef17b4793..c12dda73cdd5 100644 --- a/drivers/media/rc/ati_remote.c +++ b/drivers/media/rc/ati_remote.c @@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface, err("%s: endpoint_in message size==0? \n", __func__); return -ENODEV; } + if (!usb_endpoint_is_int_out(endpoint_out)) { + err("%s: Unexpected endpoint_out\n", __func__); + return -ENODEV; + } ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL); rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE); diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 0a0ce620e4a2..a2d74f957e61 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -113,7 +113,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_trace_printk: - if (capable(CAP_SYS_ADMIN)) + if (perfmon_capable()) return bpf_get_trace_printk_proto(); /* fall through */ default: diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index 18ca12d78314..66703989ae18 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -79,13 +79,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf, // space edge = ktime_add_us(edge, txbuf[i]); delta = ktime_us_delta(edge, ktime_get()); - if (delta > 10) { - spin_unlock_irqrestore(&gpio_ir->lock, flags); - usleep_range(delta, delta + 10); - spin_lock_irqsave(&gpio_ir->lock, flags); - } else if (delta > 0) { + if (delta > 0) udelay(delta); - } } else { // pulse ktime_t last = ktime_add_us(edge, txbuf[i]); diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index 3ab6cec0dc3b..4b8aee390518 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data) /* read the interrupt flags */ iflags = dev->params.get_irq_causes(dev); + /* Check for RX overflow */ + if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) { + dev_warn(&dev->rdev->dev, "receive overflow\n"); + ir_raw_event_reset(dev->rdev); + } + /* check for the receive interrupt */ - if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) { + if (iflags & ITE_IRQ_RX_FIFO) { /* read the FIFO bytes */ rx_bytes = dev->params.get_rx_bytes(dev, rx_buf, diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile index a56fc634d2d6..d89dcc448122 100644 --- a/drivers/media/rc/keymaps/Makefile +++ b/drivers/media/rc/keymaps/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-behold.o \ rc-behold-columbus.o \ rc-budget-ci-old.o \ - rc-cec.o \ rc-cinergy-1400.o \ rc-cinergy.o \ rc-d680-dmb.o \ @@ -117,6 +116,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-videomate-m1f.o \ rc-videomate-s350.o \ rc-videomate-tv-pvr.o \ + rc-videostrong-kii-pro.o \ rc-wetek-hub.o \ rc-wetek-play2.o \ rc-winfast.o \ diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c index 3e3bd11092b4..068e22aeac8c 100644 --- a/drivers/media/rc/keymaps/rc-cec.c +++ b/drivers/media/rc/keymaps/rc-cec.c @@ -1,5 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Keytable for the CEC remote control + * + * This keymap is unusual in that it can't be built as a module, + * instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC + * is set. This is because it can be called from drm_dp_cec_set_edid() via + * cec_register_adapter() in an asynchronous context, and it is not + * allowed to use request_module() to load rc-cec.ko in that case. + * + * Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we + * just compile this keymap into the rc-core module and never as a + * separate module. * * Copyright (c) 2015 by Kamil Debski */ @@ -152,7 +162,7 @@ static struct rc_map_table cec[] = { /* 0x77-0xff: Reserved */ }; -static struct rc_map_list cec_map = { +struct rc_map_list cec_map = { .map = { .scan = cec, .size = ARRAY_SIZE(cec), @@ -160,19 +170,3 @@ static struct rc_map_list cec_map = { .name = RC_MAP_CEC, } }; - -static int __init init_rc_map_cec(void) -{ - return rc_map_register(&cec_map); -} - -static void __exit exit_rc_map_cec(void) -{ - rc_map_unregister(&cec_map); -} - -module_init(init_rc_map_cec); -module_exit(exit_rc_map_cec); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kamil Debski"); diff --git a/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c new file mode 100644 index 000000000000..414d4d231e7e --- /dev/null +++ b/drivers/media/rc/keymaps/rc-videostrong-kii-pro.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright (C) 2019 Mohammad Rasim <mohammad.rasim96@gmail.com> + +#include <media/rc-map.h> +#include <linux/module.h> + +// +// Keytable for the Videostrong KII Pro STB remote control +// + +static struct rc_map_table kii_pro[] = { + { 0x59, KEY_POWER }, + { 0x19, KEY_MUTE }, + { 0x42, KEY_RED }, + { 0x40, KEY_GREEN }, + { 0x00, KEY_YELLOW }, + { 0x03, KEY_BLUE }, + { 0x4a, KEY_BACK }, + { 0x48, KEY_FORWARD }, + { 0x08, KEY_PREVIOUSSONG}, + { 0x0b, KEY_NEXTSONG}, + { 0x46, KEY_PLAYPAUSE }, + { 0x44, KEY_STOP }, + { 0x1f, KEY_FAVORITES}, //KEY_F5? + { 0x04, KEY_PVR }, + { 0x4d, KEY_EPG }, + { 0x02, KEY_INFO }, + { 0x09, KEY_SUBTITLE }, + { 0x01, KEY_AUDIO }, + { 0x0d, KEY_HOMEPAGE }, + { 0x11, KEY_TV }, // DTV ? + { 0x06, KEY_UP }, + { 0x5a, KEY_LEFT }, + { 0x1a, KEY_ENTER }, // KEY_OK ? + { 0x1b, KEY_RIGHT }, + { 0x16, KEY_DOWN }, + { 0x45, KEY_MENU }, + { 0x05, KEY_ESC }, + { 0x13, KEY_VOLUMEUP }, + { 0x17, KEY_VOLUMEDOWN }, + { 0x58, KEY_APPSELECT }, + { 0x12, KEY_VENDOR }, // mouse + { 0x55, KEY_PAGEUP }, // KEY_CHANNELUP ? + { 0x15, KEY_PAGEDOWN }, // KEY_CHANNELDOWN ? + { 0x52, KEY_1 }, + { 0x50, KEY_2 }, + { 0x10, KEY_3 }, + { 0x56, KEY_4 }, + { 0x54, KEY_5 }, + { 0x14, KEY_6 }, + { 0x4e, KEY_7 }, + { 0x4c, KEY_8 }, + { 0x0c, KEY_9 }, + { 0x18, KEY_WWW }, // KEY_F7 + { 0x0f, KEY_0 }, + { 0x51, KEY_BACKSPACE }, +}; + +static struct rc_map_list kii_pro_map = { + .map = { + .scan = kii_pro, + .size = ARRAY_SIZE(kii_pro), + .rc_proto = RC_PROTO_NEC, + .name = RC_MAP_KII_PRO, + } +}; + +static int __init init_rc_map_kii_pro(void) +{ + return rc_map_register(&kii_pro_map); +} + +static void __exit exit_rc_map_kii_pro(void) +{ + rc_map_unregister(&kii_pro_map); +} + +module_init(init_rc_map_kii_pro) +module_exit(exit_rc_map_kii_pro) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mohammad Rasim <mohammad.rasim96@gmail.com>"); diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index f9616158bcf4..c68e52c17ae1 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -701,11 +701,18 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, data[0], data[1]); break; case MCE_RSP_EQIRCFS: + if (!data[0] && !data[1]) { + dev_dbg(dev, "%s: no carrier", inout); + break; + } + // prescaler should make sense + if (data[0] > 8) + break; period = DIV_ROUND_CLOSEST((1U << data[0] * 2) * (data[1] + 1), 10); if (!period) break; - carrier = (1000 * 1000) / period; + carrier = USEC_PER_SEC / period; dev_dbg(dev, "%s carrier of %u Hz (period %uus)", inout, carrier, period); break; @@ -1169,7 +1176,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in) switch (subcmd) { /* the one and only 5-byte return value command */ case MCE_RSP_GETPORTSTATUS: - if (buf_in[5] == 0) + if (buf_in[5] == 0 && *hi < 8) ir->txports_cabled |= 1 << *hi; break; diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 6f80c251f641..ee80f38970bc 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1256,6 +1256,10 @@ static ssize_t store_protocols(struct device *device, } mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } old_protocols = *current_protocols; new_protocols = old_protocols; @@ -1394,6 +1398,10 @@ static ssize_t store_filter(struct device *device, return -EINVAL; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } new_filter = *filter; if (fattr->mask) @@ -1508,6 +1516,10 @@ static ssize_t store_wakeup_protocols(struct device *device, int i; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } allowed = dev->allowed_wakeup_protocols; @@ -1565,25 +1577,25 @@ static void rc_dev_release(struct device *device) kfree(dev); } -#define ADD_HOTPLUG_VAR(fmt, val...) \ - do { \ - int err = add_uevent_var(env, fmt, val); \ - if (err) \ - return err; \ - } while (0) - static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) { struct rc_dev *dev = to_rc_dev(device); + int ret = 0; - if (dev->rc_map.name) - ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); - if (dev->driver_name) - ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name); - if (dev->device_name) - ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name); + mutex_lock(&dev->lock); - return 0; + if (!dev->registered) + ret = -ENODEV; + if (ret == 0 && dev->rc_map.name) + ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name); + if (ret == 0 && dev->driver_name) + ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name); + if (ret == 0 && dev->device_name) + ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name); + + mutex_unlock(&dev->lock); + + return ret; } /* @@ -1880,6 +1892,8 @@ int rc_register_device(struct rc_dev *dev) goto out_raw; } + dev->registered = true; + rc = device_add(&dev->dev); if (rc) goto out_rx_free; @@ -1889,8 +1903,6 @@ int rc_register_device(struct rc_dev *dev) dev->device_name ?: "Unspecified device", path ?: "N/A"); kfree(path); - dev->registered = true; - /* * once the the input device is registered in rc_setup_rx_device, * userspace can open the input device and rc_open() will be called @@ -1975,14 +1987,14 @@ void rc_unregister_device(struct rc_dev *dev) del_timer_sync(&dev->timer_keyup); del_timer_sync(&dev->timer_repeat); - rc_free_rx_device(dev); - mutex_lock(&dev->lock); if (dev->users && dev->close) dev->close(dev); dev->registered = false; mutex_unlock(&dev->lock); + rc_free_rx_device(dev); + /* * lirc device should be freed with dev->registered = false, so * that userspace polling will get notified. @@ -2021,6 +2033,9 @@ static int __init rc_core_init(void) led_trigger_register_simple("rc-feedback", &led_feedback); rc_map_register(&empty_map); +#ifdef CONFIG_MEDIA_CEC_RC + rc_map_register(&cec_map); +#endif return 0; } @@ -2030,6 +2045,9 @@ static void __exit rc_core_exit(void) lirc_dev_exit(); class_unregister(&rc_class); led_trigger_unregister_simple(led_feedback); +#ifdef CONFIG_MEDIA_CEC_RC + rc_map_unregister(&cec_map); +#endif rc_map_unregister(&empty_map); } diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c index e222b4c98be4..78c5a4d0f279 100644 --- a/drivers/media/rc/sunxi-cir.c +++ b/drivers/media/rc/sunxi-cir.c @@ -137,6 +137,8 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id) } else if (status & REG_RXSTA_RPE) { ir_raw_event_set_idle(ir->rc, true); ir_raw_event_handle(ir->rc); + } else { + ir_raw_event_handle(ir->rc); } spin_unlock(&ir->ir_lock); diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c index b3505f402476..8647c50b66e5 100644 --- a/drivers/media/tuners/m88rs6000t.c +++ b/drivers/media/tuners/m88rs6000t.c @@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength) PGA2_cri = PGA2_GC >> 2; PGA2_crf = PGA2_GC & 0x03; - for (i = 0; i <= RF_GC; i++) + for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++) RFG += RFGS[i]; if (RF_GC == 0) @@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength) if (RF_GC == 3) RFG += 100; - for (i = 0; i <= IF_GC; i++) + for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++) IFG += IFGS[i]; TIAG = TIA_GC * TIA_GS; - for (i = 0; i <= BB_GC; i++) + for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++) BBG += BBGS[i]; PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS; diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c index 83ca5dc047ea..baa9950783b6 100644 --- a/drivers/media/tuners/qm1d1c0042.c +++ b/drivers/media/tuners/qm1d1c0042.c @@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe) if (val == reg_initval[reg_index][0x00]) break; } - if (reg_index >= QM1D1C0042_NUM_REG_ROWS) + if (reg_index >= QM1D1C0042_NUM_REG_ROWS) { + ret = -EINVAL; goto failed; + } memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS); usleep_range(2000, 3000); diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index e87040d6eca7..a39e1966816b 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -75,24 +75,23 @@ static int si2157_init(struct dvb_frontend *fe) struct si2157_cmd cmd; const struct firmware *fw; const char *fw_name; - unsigned int uitmp, chip_id; + unsigned int chip_id, xtal_trim; dev_dbg(&client->dev, "\n"); - /* Returned IF frequency is garbage when firmware is not running */ - memcpy(cmd.args, "\x15\x00\x06\x07", 4); + /* Try to get Xtal trim property, to verify tuner still running */ + memcpy(cmd.args, "\x15\x00\x04\x02", 4); cmd.wlen = 4; cmd.rlen = 4; ret = si2157_cmd_execute(client, &cmd); - if (ret) - goto err; - uitmp = cmd.args[2] << 0 | cmd.args[3] << 8; - dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp); + xtal_trim = cmd.args[2] | (cmd.args[3] << 8); - if (uitmp == dev->if_frequency / 1000) + if (ret == 0 && xtal_trim < 16) goto warm; + dev->if_frequency = 0; /* we no longer know current tuner state */ + /* power up */ if (dev->chiptype == SI2157_CHIPTYPE_SI2146) { memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9); diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c index b6e70fada3fb..8fb186b25d6a 100644 --- a/drivers/media/tuners/tuner-simple.c +++ b/drivers/media/tuners/tuner-simple.c @@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer) case TUNER_TENA_9533_DI: case TUNER_YMEC_TVF_5533MF: tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n"); - return 0; + return -EINVAL; case TUNER_PHILIPS_FM1216ME_MK3: case TUNER_PHILIPS_FM1236_MK3: case TUNER_PHILIPS_FMD1216ME_MK3: @@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe, TUNER_RATIO_SELECT_50; /* 50 kHz step */ /* Bandswitch byte */ - simple_radio_bandswitch(fe, &buffer[0]); + if (simple_radio_bandswitch(fe, &buffer[0])) + return 0; /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) = diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 62d3566bf7ee..5ac1a6af8782 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -391,7 +391,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) - lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa), + lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa); usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC); info("INT Interrupt Service Started"); diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig index 1a3e5f965ae4..2d7a5c1c84af 100644 --- a/drivers/media/usb/dvb-usb/Kconfig +++ b/drivers/media/usb/dvb-usb/Kconfig @@ -150,6 +150,7 @@ config DVB_USB_CXUSB config DVB_USB_CXUSB_ANALOG bool "Analog support for the Conexant USB2.0 hybrid reference design" depends on DVB_USB_CXUSB && VIDEO_V4L2 + depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_USB_CXUSB select VIDEO_CX25840 select VIDEOBUF2_VMALLOC help diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c index d4ea72bf09c5..5131c8d4c632 100644 --- a/drivers/media/usb/dvb-usb/dibusb-mb.c +++ b/drivers/media/usb/dvb-usb/dibusb-mb.c @@ -81,7 +81,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { err("tuner i2c write failed."); - ret = -EREMOTEIO; + return -EREMOTEIO; } if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl) diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 16a0b4a359ea..f57c4627624f 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) } } - if ((ret = dvb_usb_adapter_stream_init(adap)) || - (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) || - (ret = dvb_usb_adapter_frontend_init(adap))) { + ret = dvb_usb_adapter_stream_init(adap); + if (ret) return ret; - } + + ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs); + if (ret) + goto dvb_init_err; + + ret = dvb_usb_adapter_frontend_init(adap); + if (ret) + goto frontend_init_err; /* use exclusive FE lock if there is multiple shared FEs */ if (adap->fe_adap[1].fe) @@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) } return 0; + +frontend_init_err: + dvb_usb_adapter_dvb_exit(adap); +dvb_init_err: + dvb_usb_adapter_stream_exit(adap); + return ret; } static int dvb_usb_adapter_exit(struct dvb_usb_device *d) @@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums) if (d->props.priv_init != NULL) { ret = d->props.priv_init(d); - if (ret != 0) { - kfree(d->priv); - d->priv = NULL; - return ret; - } + if (ret != 0) + goto err_priv_init; } } /* check the capabilities and set appropriate variables */ dvb_usb_device_power_ctrl(d, 1); - if ((ret = dvb_usb_i2c_init(d)) || - (ret = dvb_usb_adapter_init(d, adapter_nums))) { - dvb_usb_exit(d); - return ret; - } + ret = dvb_usb_i2c_init(d); + if (ret) + goto err_i2c_init; + ret = dvb_usb_adapter_init(d, adapter_nums); + if (ret) + goto err_adapter_init; if ((ret = dvb_usb_remote_init(d))) err("could not initialize remote control."); @@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums) dvb_usb_device_power_ctrl(d, 0); return 0; + +err_adapter_init: + dvb_usb_adapter_exit(d); +err_i2c_init: + dvb_usb_i2c_exit(d); + if (d->priv && d->props.priv_destroy) + d->props.priv_destroy(d); +err_priv_init: + kfree(d->priv); + d->priv = NULL; + return ret; } /* determine the name and the state of the just found USB device */ @@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf, if (du != NULL) *du = NULL; - if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) { + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + err("no memory for 'struct dvb_usb_device'"); + return -ENOMEM; + } + + memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties)); + + desc = dvb_usb_find_device(udev, &d->props, &cold); + if (!desc) { deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n"); - return -ENODEV; + ret = -ENODEV; + goto error; } if (cold) { info("found a '%s' in cold state, will try to load a firmware", desc->name); ret = dvb_usb_download_firmware(udev, props); if (!props->no_reconnect || ret != 0) - return ret; + goto error; } info("found a '%s' in warm state.", desc->name); - d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL); - if (d == NULL) { - err("no memory for 'struct dvb_usb_device'"); - return -ENOMEM; - } - d->udev = udev; - memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties)); d->desc = desc; d->owner = owner; usb_set_intfdata(intf, d); - if (du != NULL) + ret = dvb_usb_init(d, adapter_nums); + if (ret) { + info("%s error while loading driver (%d)", desc->name, ret); + goto error; + } + + if (du) *du = d; - ret = dvb_usb_init(d, adapter_nums); + info("%s successfully initialized and connected.", desc->name); + return 0; - if (ret == 0) - info("%s successfully initialized and connected.", desc->name); - else - info("%s error while loading driver (%d)", desc->name, ret); + error: + usb_set_intfdata(intf, NULL); + kfree(d); return ret; } EXPORT_SYMBOL(dvb_usb_device_init); diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h index 2eb0e24e8943..20830b365f39 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb.h +++ b/drivers/media/usb/dvb-usb/dvb-usb.h @@ -485,7 +485,7 @@ extern int __must_check dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16); /* commonly used remote control parsing */ -extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *); +extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *); /* commonly used firmware download types and function */ struct hexline { diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c index 1282f701f185..ac8b8bf6ee1d 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.c +++ b/drivers/media/usb/dvb-usb/gp8psk.c @@ -182,7 +182,7 @@ out_rel_fw: static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) { - u8 status, buf; + u8 status = 0, buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); if (onoff) { diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index e6088b5d1b80..3daa64bb1e1d 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -956,14 +956,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk, usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL); if (!usb_bufs->buf[i]) { - em28xx_uninit_usb_xfer(dev, mode); - for (i--; i >= 0; i--) kfree(usb_bufs->buf[i]); - kfree(usb_bufs->buf); - usb_bufs->buf = NULL; - + em28xx_uninit_usb_xfer(dev, mode); return -ENOMEM; } diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index a73faf12f7e4..e1946237ac8c 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -1924,6 +1924,7 @@ ret: return result; out_free: + em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE); kfree(dvb); dev->dvb = NULL; goto ret; diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index ff2aa057c1fb..f889c9d740cd 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -1044,6 +1044,7 @@ static int go7007_usb_probe(struct usb_interface *intf, struct go7007_usb *usb; const struct go7007_usb_board *board; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_host_endpoint *ep; unsigned num_i2c_devs; char *name; int video_pipe, i, v_urb_len; @@ -1140,7 +1141,8 @@ static int go7007_usb_probe(struct usb_interface *intf, if (usb->intr_urb->transfer_buffer == NULL) goto allocfail; - if (go->board_id == GO7007_BOARDID_SENSORAY_2250) + ep = usb->usbdev->ep_in[4]; + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, usb_rcvbulkpipe(usb->usbdev, 4), usb->intr_urb->transfer_buffer, 2*sizeof(u16), diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c index b05fa227ffb2..95756cbb722f 100644 --- a/drivers/media/usb/go7007/snd-go7007.c +++ b/drivers/media/usb/go7007/snd-go7007.c @@ -236,22 +236,18 @@ int go7007_snd_init(struct go7007 *go) gosnd->capturing = 0; ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, &gosnd->card); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_snd; + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, &go7007_snd_device_ops); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname)); strscpy(gosnd->card->longname, gosnd->card->shortname, @@ -262,11 +258,8 @@ int go7007_snd_init(struct go7007 *go) &go7007_snd_capture_ops); ret = snd_card_register(gosnd->card); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; gosnd->substream = NULL; go->snd_context = gosnd; @@ -274,6 +267,12 @@ int go7007_snd_init(struct go7007 *go) ++dev; return 0; + +free_card: + snd_card_free(gosnd->card); +free_snd: + kfree(gosnd); + return ret; } EXPORT_SYMBOL(go7007_snd_init); diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index c1b307bbe540..e4d78e676e74 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -1575,6 +1575,9 @@ out: input_unregister_device(gspca_dev->input_dev); #endif v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler); + v4l2_device_unregister(&gspca_dev->v4l2_dev); + if (sd_desc->probe_error) + sd_desc->probe_error(gspca_dev); kfree(gspca_dev->usb_buf); kfree(gspca_dev); return ret; diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h index b0ced2e14006..a6554d5e9e1a 100644 --- a/drivers/media/usb/gspca/gspca.h +++ b/drivers/media/usb/gspca/gspca.h @@ -105,6 +105,7 @@ struct sd_desc { cam_cf_op config; /* called on probe */ cam_op init; /* called on probe and resume */ cam_op init_controls; /* called on probe */ + cam_v_op probe_error; /* called if probe failed, do cleanup here */ cam_op start; /* called on stream on after URBs creation */ cam_pkt_op pkt_scan; /* optional operations */ diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c index 863c485f4275..65a74060986a 100644 --- a/drivers/media/usb/gspca/sq905.c +++ b/drivers/media/usb/gspca/sq905.c @@ -158,7 +158,7 @@ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; - int act_len; + int act_len = 0; gspca_dev->usb_buf[0] = '\0'; if (need_lock) diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c index 95673fc0a99c..d9bc2aacc885 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c @@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id); +static void stv06xx_probe_error(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *)gspca_dev; + + kfree(sd->sensor_priv); + sd->sensor_priv = NULL; +} + /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = stv06xx_config, .init = stv06xx_init, .init_controls = stv06xx_init_controls, + .probe_error = stv06xx_probe_error, .start = stv06xx_start, .stopN = stv06xx_stopN, .pkt_scan = stv06xx_pkt_scan, diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index 65be6f140fe8..1c60dfb647e5 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -1230,7 +1230,7 @@ static int msi2500_probe(struct usb_interface *intf, } dev->master = master; - master->bus_num = 0; + master->bus_num = -1; master->num_chipselect = 1; master->transfer_one_message = msi2500_transfer_one_message; spi_master_set_devdata(master, dev); diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 9b76cf133d52..3df7ca2357da 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -147,16 +147,17 @@ static const struct video_device pwc_template = { /***************************************************************************/ /* Private functions */ -static void *pwc_alloc_urb_buffer(struct device *dev, +static void *pwc_alloc_urb_buffer(struct usb_device *dev, size_t size, dma_addr_t *dma_handle) { + struct device *dmadev = dev->bus->sysdev; void *buffer = kmalloc(size, GFP_KERNEL); if (!buffer) return NULL; - *dma_handle = dma_map_single(dev, buffer, size, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *dma_handle)) { + *dma_handle = dma_map_single(dmadev, buffer, size, DMA_FROM_DEVICE); + if (dma_mapping_error(dmadev, *dma_handle)) { kfree(buffer); return NULL; } @@ -164,12 +165,14 @@ static void *pwc_alloc_urb_buffer(struct device *dev, return buffer; } -static void pwc_free_urb_buffer(struct device *dev, +static void pwc_free_urb_buffer(struct usb_device *dev, size_t size, void *buffer, dma_addr_t dma_handle) { - dma_unmap_single(dev, dma_handle, size, DMA_FROM_DEVICE); + struct device *dmadev = dev->bus->sysdev; + + dma_unmap_single(dmadev, dma_handle, size, DMA_FROM_DEVICE); kfree(buffer); } @@ -274,6 +277,7 @@ static void pwc_frame_complete(struct pwc_device *pdev) static void pwc_isoc_handler(struct urb *urb) { struct pwc_device *pdev = (struct pwc_device *)urb->context; + struct device *dmadev = urb->dev->bus->sysdev; int i, fst, flen; unsigned char *iso_buf = NULL; @@ -320,7 +324,7 @@ static void pwc_isoc_handler(struct urb *urb) /* Reset ISOC error counter. We did get here, after all. */ pdev->visoc_errors = 0; - dma_sync_single_for_cpu(&urb->dev->dev, + dma_sync_single_for_cpu(dmadev, urb->transfer_dma, urb->transfer_buffer_length, DMA_FROM_DEVICE); @@ -371,7 +375,7 @@ static void pwc_isoc_handler(struct urb *urb) pdev->vlast_packet_size = flen; } - dma_sync_single_for_device(&urb->dev->dev, + dma_sync_single_for_device(dmadev, urb->transfer_dma, urb->transfer_buffer_length, DMA_FROM_DEVICE); @@ -453,7 +457,7 @@ retry: urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer_length = ISO_BUFFER_SIZE; - urb->transfer_buffer = pwc_alloc_urb_buffer(&udev->dev, + urb->transfer_buffer = pwc_alloc_urb_buffer(udev, urb->transfer_buffer_length, &urb->transfer_dma); if (urb->transfer_buffer == NULL) { @@ -516,7 +520,7 @@ static void pwc_iso_free(struct pwc_device *pdev) if (urb) { PWC_DEBUG_MEMORY("Freeing URB\n"); if (urb->transfer_buffer) - pwc_free_urb_buffer(&urb->dev->dev, + pwc_free_urb_buffer(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 19c90fa9e443..293a460f4616 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -141,6 +141,10 @@ static int tm6000_start_stream(struct tm6000_core *dev) if (ret < 0) { printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n", ret, __func__); + + kfree(dvb->bulk_urb->transfer_buffer); + usb_free_urb(dvb->bulk_urb); + dvb->bulk_urb = NULL; return ret; } else printk(KERN_ERR "tm6000: pipe reset\n"); diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index c07a81a6cbe2..c46cbcfafab3 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -461,11 +461,12 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev) if (dev->urb_buffer) return 0; - dev->urb_buffer = kmalloc_array(num_bufs, sizeof(void *), GFP_KERNEL); + dev->urb_buffer = kmalloc_array(num_bufs, sizeof(*dev->urb_buffer), + GFP_KERNEL); if (!dev->urb_buffer) return -ENOMEM; - dev->urb_dma = kmalloc_array(num_bufs, sizeof(dma_addr_t *), + dev->urb_dma = kmalloc_array(num_bufs, sizeof(*dev->urb_dma), GFP_KERNEL); if (!dev->urb_dma) return -ENOMEM; diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c index 6f108996142d..bbfaec2e6ef6 100644 --- a/drivers/media/usb/usbtv/usbtv-audio.c +++ b/drivers/media/usb/usbtv/usbtv-audio.c @@ -399,7 +399,7 @@ void usbtv_audio_free(struct usbtv *usbtv) cancel_work_sync(&usbtv->snd_trigger); if (usbtv->snd && usbtv->udev) { - snd_card_free(usbtv->snd); + snd_card_free_when_closed(usbtv->snd); usbtv->snd = NULL; } } diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index ee9c656d121f..2308c0b4f5e7 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -113,7 +113,8 @@ static int usbtv_probe(struct usb_interface *intf, usbtv_audio_fail: /* we must not free at this point */ - usb_get_dev(usbtv->udev); + v4l2_device_get(&usbtv->v4l2_dev); + /* this will undo the v4l2_device_get() */ usbtv_video_free(usbtv); usbtv_video_fail: diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index e399b9fad757..36abe47997b0 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, offset &= 7; mask = ((1LL << bits) - 1) << offset; - for (; bits > 0; data++) { + while (1) { u8 byte = *data & mask; value |= offset > 0 ? (byte >> offset) : (byte << (-offset)); bits -= 8 - (offset > 0 ? offset : 0); + if (bits <= 0) + break; + offset -= 8; mask = (1 << bits) - 1; + data++; } /* Sign-extend the value if needed. */ @@ -1844,30 +1848,35 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain, { struct uvc_entity *entity; struct uvc_control *ctrl; - unsigned int i, found = 0; + unsigned int i; + bool found; u32 reqflags; u16 size; u8 *data = NULL; int ret; /* Find the extension unit. */ + found = false; list_for_each_entry(entity, &chain->entities, chain) { if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT && - entity->id == xqry->unit) + entity->id == xqry->unit) { + found = true; break; + } } - if (entity->id != xqry->unit) { + if (!found) { uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n", xqry->unit); return -ENOENT; } /* Find the control and perform delayed initialization if needed. */ + found = false; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (ctrl->index == xqry->selector - 1) { - found = 1; + found = true; break; } } @@ -2024,13 +2033,6 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl, goto done; } - /* - * Retrieve control flags from the device. Ignore errors and work with - * default flag values from the uvc_ctrl array when the device doesn't - * properly implement GET_INFO on standard controls. - */ - uvc_ctrl_get_flags(dev, ctrl, &ctrl->info); - ctrl->initialized = 1; uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s " @@ -2253,6 +2255,13 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl) if (uvc_entity_match_guid(ctrl->entity, info->entity) && ctrl->index == info->index) { uvc_ctrl_add_info(dev, ctrl, info); + /* + * Retrieve control flags from the device. Ignore errors + * and work with default flag values from the uvc_ctrl + * array when the device doesn't properly implement + * GET_INFO on standard controls. + */ + uvc_ctrl_get_flags(dev, ctrl, &ctrl->info); break; } } diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 99883550375e..40ca1d4e0348 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -967,7 +967,10 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, unsigned int i; extra_size = roundup(extra_size, sizeof(*entity->pads)); - num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; + if (num_pads) + num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1; + else + num_inputs = 0; size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + num_inputs; entity = kzalloc(size, GFP_KERNEL); @@ -983,7 +986,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, for (i = 0; i < num_inputs; ++i) entity->pads[i].flags = MEDIA_PAD_FL_SINK; - if (!UVC_ENTITY_IS_OTERM(entity)) + if (!UVC_ENTITY_IS_OTERM(entity) && num_pads) entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE; entity->bNrInPins = num_inputs; diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c index b4499cddeffe..ca3a9c2eec27 100644 --- a/drivers/media/usb/uvc/uvc_entity.c +++ b/drivers/media/usb/uvc/uvc_entity.c @@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain, int ret; if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { + u32 function; + v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); strscpy(entity->subdev.name, entity->name, sizeof(entity->subdev.name)); + switch (UVC_ENTITY_TYPE(entity)) { + case UVC_VC_SELECTOR_UNIT: + function = MEDIA_ENT_F_VID_MUX; + break; + case UVC_VC_PROCESSING_UNIT: + case UVC_VC_EXTENSION_UNIT: + /* For lack of a better option. */ + function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; + break; + case UVC_COMPOSITE_CONNECTOR: + case UVC_COMPONENT_CONNECTOR: + function = MEDIA_ENT_F_CONN_COMPOSITE; + break; + case UVC_SVIDEO_CONNECTOR: + function = MEDIA_ENT_F_CONN_SVIDEO; + break; + case UVC_ITT_CAMERA: + function = MEDIA_ENT_F_CAM_SENSOR; + break; + case UVC_TT_VENDOR_SPECIFIC: + case UVC_ITT_VENDOR_SPECIFIC: + case UVC_ITT_MEDIA_TRANSPORT_INPUT: + case UVC_OTT_VENDOR_SPECIFIC: + case UVC_OTT_DISPLAY: + case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: + case UVC_EXTERNAL_VENDOR_SPECIFIC: + default: + function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; + break; + } + + entity->subdev.entity.function = function; + ret = media_entity_pads_init(&entity->subdev.entity, entity->num_pads, entity->pads); diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 0335e69b70ab..7d60dd3b0bd8 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, if (ret < 0) goto done; + /* After the probe, update fmt with the values returned from + * negotiation with the device. Some devices return invalid bFormatIndex + * and bFrameIndex values, in which case we can only assume they have + * accepted the requested format as-is. + */ + for (i = 0; i < stream->nformats; ++i) { + if (probe->bFormatIndex == stream->format[i].index) { + format = &stream->format[i]; + break; + } + } + + if (i == stream->nformats) + uvc_trace(UVC_TRACE_FORMAT, + "Unknown bFormatIndex %u, using default\n", + probe->bFormatIndex); + + for (i = 0; i < format->nframes; ++i) { + if (probe->bFrameIndex == format->frame[i].bFrameIndex) { + frame = &format->frame[i]; + break; + } + } + + if (i == format->nframes) + uvc_trace(UVC_TRACE_FORMAT, + "Unknown bFrameIndex %u, using default\n", + probe->bFrameIndex); + fmt->fmt.pix.width = frame->wWidth; fmt->fmt.pix.height = frame->wHeight; fmt->fmt.pix.field = V4L2_FIELD_NONE; fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame); fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize; + fmt->fmt.pix.pixelformat = format->fcc; fmt->fmt.pix.colorspace = format->colorspace; if (uvc_format != NULL) diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index cd84dbbf6a89..7ac7a5063fb2 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1795,7 +1795,8 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, case V4L2_CTRL_TYPE_INTEGER_MENU: if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum) return -ERANGE; - if (ctrl->menu_skip_mask & (1ULL << ptr.p_s32[idx])) + if (ptr.p_s32[idx] < BITS_PER_LONG_LONG && + (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx]))) return -EINVAL; if (ctrl->type == V4L2_CTRL_TYPE_MENU && ctrl->qmenu[ptr.p_s32[idx]][0] == '\0') @@ -2154,7 +2155,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) if (hdl == NULL || hdl->buckets == NULL) return; - if (!hdl->req_obj.req && !list_empty(&hdl->requests)) { + /* + * If the main handler is freed and it is used by handler objects in + * outstanding requests, then unbind and put those objects before + * freeing the main handler. + * + * The main handler can be identified by having a NULL ops pointer in + * the request object. + */ + if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) { struct v4l2_ctrl_handler *req, *next_req; list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { @@ -3185,8 +3194,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj) container_of(obj, struct v4l2_ctrl_handler, req_obj); struct v4l2_ctrl_handler *main_hdl = obj->priv; - list_del_init(&hdl->requests); mutex_lock(main_hdl->lock); + list_del_init(&hdl->requests); if (hdl->request_is_queued) { list_del_init(&hdl->requests_queued); hdl->request_is_queued = false; @@ -3245,8 +3254,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req, if (!ret) { ret = media_request_object_bind(req, &req_ops, from, false, &hdl->req_obj); - if (!ret) + if (!ret) { + mutex_lock(from->lock); list_add_tail(&hdl->requests, &from->requests); + mutex_unlock(from->lock); + } } return ret; } diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c index 3bd1888787eb..48c3b9f72722 100644 --- a/drivers/media/v4l2-core/v4l2-fwnode.c +++ b/drivers/media/v4l2-core/v4l2-fwnode.c @@ -93,7 +93,7 @@ v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type) const struct v4l2_fwnode_bus_conv *conv = get_v4l2_fwnode_bus_conv_by_fwnode_bus(type); - return conv ? conv->mbus_type : V4L2_MBUS_UNKNOWN; + return conv ? conv->mbus_type : V4L2_MBUS_INVALID; } static const char * @@ -436,6 +436,10 @@ static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, v4l2_fwnode_mbus_type_to_string(vep->bus_type), vep->bus_type); mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type); + if (mbus_type == V4L2_MBUS_INVALID) { + pr_debug("unsupported bus type %u\n", bus_type); + return -EINVAL; + } if (vep->bus_type != V4L2_MBUS_UNKNOWN) { if (mbus_type != V4L2_MBUS_UNKNOWN && diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 58868d7129eb..24db33f803c0 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -3016,7 +3016,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, v4l2_kioctl func) { char sbuf[128]; - void *mbuf = NULL; + void *mbuf = NULL, *array_buf = NULL; void *parg = (void *)arg; long err = -EINVAL; bool has_array_args; @@ -3075,20 +3075,14 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, has_array_args = err; if (has_array_args) { - /* - * When adding new types of array args, make sure that the - * parent argument to ioctl (which contains the pointer to the - * array) fits into sbuf (so that mbuf will still remain - * unused up to here). - */ - mbuf = kvmalloc(array_size, GFP_KERNEL); + array_buf = kvmalloc(array_size, GFP_KERNEL); err = -ENOMEM; - if (NULL == mbuf) + if (array_buf == NULL) goto out_array_args; err = -EFAULT; - if (copy_from_user(mbuf, user_ptr, array_size)) + if (copy_from_user(array_buf, user_ptr, array_size)) goto out_array_args; - *kernel_ptr = mbuf; + *kernel_ptr = array_buf; } /* Handles IOCTL */ @@ -3107,7 +3101,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, if (has_array_args) { *kernel_ptr = (void __force *)user_ptr; - if (copy_to_user(user_ptr, mbuf, array_size)) + if (copy_to_user(user_ptr, array_buf, array_size)) err = -EFAULT; goto out_array_args; } @@ -3129,6 +3123,7 @@ out_array_args: } out: + kvfree(array_buf); kvfree(mbuf); return err; } diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index 402c6bc8e621..af296b6fcbbd 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -163,35 +163,12 @@ static const struct file_operations emif_mr4_fops = { static int __init_or_module emif_debugfs_init(struct emif_data *emif) { - struct dentry *dentry; - int ret; - - dentry = debugfs_create_dir(dev_name(emif->dev), NULL); - if (!dentry) { - ret = -ENOMEM; - goto err0; - } - emif->debugfs_root = dentry; - - dentry = debugfs_create_file("regcache_dump", S_IRUGO, - emif->debugfs_root, emif, &emif_regdump_fops); - if (!dentry) { - ret = -ENOMEM; - goto err1; - } - - dentry = debugfs_create_file("mr4", S_IRUGO, - emif->debugfs_root, emif, &emif_mr4_fops); - if (!dentry) { - ret = -ENOMEM; - goto err1; - } - + emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL); + debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif, + &emif_regdump_fops); + debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif, + &emif_mr4_fops); return 0; -err1: - debugfs_remove_recursive(emif->debugfs_root); -err0: - return ret; } static void __exit emif_debugfs_exit(struct emif_data *emif) diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c index 0b0ed72016da..0309bd5a1800 100644 --- a/drivers/memory/fsl-corenet-cf.c +++ b/drivers/memory/fsl-corenet-cf.c @@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, ccf); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "%s: no irq\n", __func__); - return -ENXIO; - } + if (irq < 0) + return irq; ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf); if (ret) { diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index a113e811faab..da1ce7fd4cf5 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -127,7 +127,7 @@ static void mtk_smi_clk_disable(const struct mtk_smi *smi) int mtk_smi_larb_get(struct device *larbdev) { - int ret = pm_runtime_get_sync(larbdev); + int ret = pm_runtime_resume_and_get(larbdev); return (ret < 0) ? ret : 0; } @@ -336,7 +336,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev) int ret; /* Power on smi-common. */ - ret = pm_runtime_get_sync(larb->smi_common_dev); + ret = pm_runtime_resume_and_get(larb->smi_common_dev); if (ret < 0) { dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret); return ret; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index eff26c1b1394..332ffd7cf8b0 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -949,7 +949,7 @@ static int gpmc_cs_remap(int cs, u32 base) int ret; u32 old_base, size; - if (cs > gpmc_cs_num) { + if (cs >= gpmc_cs_num) { pr_err("%s: requested chip-select is disabled\n", __func__); return -ENODEV; } @@ -984,7 +984,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) struct resource *res = &gpmc->mem; int r = -1; - if (cs > gpmc_cs_num) { + if (cs >= gpmc_cs_num) { pr_err("%s: requested chip-select is disabled\n", __func__); return -ENODEV; } @@ -1026,8 +1026,8 @@ EXPORT_SYMBOL(gpmc_cs_request); void gpmc_cs_free(int cs) { - struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; - struct resource *res = &gpmc->mem; + struct gpmc_cs_data *gpmc; + struct resource *res; spin_lock(&gpmc_mem_lock); if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) { @@ -1036,6 +1036,9 @@ void gpmc_cs_free(int cs) spin_unlock(&gpmc_mem_lock); return; } + gpmc = &gpmc_cs[cs]; + res = &gpmc->mem; + gpmc_cs_disable_mem(cs); if (res->flags) release_resource(res); @@ -2274,6 +2277,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev) } } #else +void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p) +{ + memset(p, 0, sizeof(*p)); +} static int gpmc_probe_dt(struct platform_device *pdev) { return 0; diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index 73bd3023202f..b42804b1801e 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -63,7 +63,7 @@ /* ECC memory config register specific constants */ #define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2 -#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0xC +#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0x3 #define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \ (2 << 21)) /* UpdateRegs operation */ diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index db526dbf71ee..94219d2a2773 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c @@ -378,8 +378,10 @@ static int aemif_probe(struct platform_device *pdev) */ for_each_available_child_of_node(np, child_np) { ret = of_aemif_parse_abus_config(pdev, child_np); - if (ret < 0) + if (ret < 0) { + of_node_put(child_np); goto error; + } } } else if (pdata && pdata->num_abus_data > 0) { for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) { @@ -405,8 +407,10 @@ static int aemif_probe(struct platform_device *pdev) for_each_available_child_of_node(np, child_np) { ret = of_platform_populate(child_np, NULL, dev_lookup, dev); - if (ret < 0) + if (ret < 0) { + of_node_put(child_np); goto error; + } } } else if (pdata) { for (i = 0; i < pdata->num_sub_devices; i++) { diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 693ee73eb291..12bc3f5a6cbb 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -441,6 +441,9 @@ static void memstick_check(struct work_struct *work) } else if (host->card->stop) host->card->stop(host->card); + if (host->removing) + goto out_power_off; + card = memstick_alloc_card(host); if (!card) { @@ -465,7 +468,6 @@ static void memstick_check(struct work_struct *work) host->card = card; if (device_register(&card->dev)) { put_device(&card->dev); - kfree(host->card); host->card = NULL; } } else @@ -545,6 +547,7 @@ EXPORT_SYMBOL(memstick_add_host); */ void memstick_remove_host(struct memstick_host *host) { + host->removing = 1; flush_workqueue(workqueue); mutex_lock(&host->lock); if (host->card) diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index dd3a1f3dcc19..d2ef46337191 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -759,8 +759,10 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto error3; dev->mmio = pci_ioremap_bar(pdev, 0); - if (!dev->mmio) + if (!dev->mmio) { + error = -ENOMEM; goto error4; + } dev->irq = pdev->irq; spin_lock_init(&dev->irq_lock); @@ -786,12 +788,14 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) &dev->dummy_dma_page_physical_address, GFP_KERNEL); r592_stop_dma(dev , 0); - if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, - DRV_NAME, dev)) + error = request_irq(dev->irq, &r592_irq, IRQF_SHARED, + DRV_NAME, dev); + if (error) goto error6; r592_update_card_detect(dev); - if (memstick_add_host(host)) + error = memstick_add_host(host); + if (error) goto error7; message("driver successfully loaded"); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index f0737c57ed5f..4bc43d8eeb9e 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); int mptscsih_resume(struct pci_dev *pdev); #endif -#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -1178,8 +1176,10 @@ mptscsih_remove(struct pci_dev *pdev) MPT_SCSI_HOST *hd; int sz1; - if((hd = shost_priv(host)) == NULL) - return; + if (host == NULL) + hd = NULL; + else + hd = shost_priv(host); mptscsih_shutdown(pdev); @@ -1195,14 +1195,15 @@ mptscsih_remove(struct pci_dev *pdev) "Free'd ScsiLookup (%d) memory\n", ioc->name, sz1)); - kfree(hd->info_kbuf); + if (hd) + kfree(hd->info_kbuf); /* NULL the Scsi_Host pointer */ ioc->sh = NULL; - scsi_host_put(host); - + if (host) + scsi_host_put(host); mpt_detach(pdev); } @@ -2422,7 +2423,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR /* Copy the sense received into the scsi command block. */ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); - memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); + memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC); /* Log SMART data (asc = 0x5D, non-IM case only) if required. */ diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 4a31907a4525..3ff872c205ee 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -1430,6 +1430,15 @@ err_irq: arizona_irq_exit(arizona); err_pm: pm_runtime_disable(arizona->dev); + + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } err_reset: arizona_enable_reset(arizona); regulator_disable(arizona->dcvdd); @@ -1452,6 +1461,15 @@ int arizona_dev_exit(struct arizona *arizona) regulator_disable(arizona->dcvdd); regulator_put(arizona->dcvdd); + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } + mfd_remove_devices(arizona->dev); arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona); arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index 077d9ab112b7..d919ae9691e2 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data) unsigned int val; int ret; - ret = pm_runtime_get_sync(arizona->dev); + ret = pm_runtime_resume_and_get(arizona->dev); if (ret < 0) { dev_err(arizona->dev, "Failed to resume device: %d\n", ret); return IRQ_NONE; diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c index fab3cdc27ed6..19d57a45134c 100644 --- a/drivers/mfd/bd9571mwv.c +++ b/drivers/mfd/bd9571mwv.c @@ -185,9 +185,9 @@ static int bd9571mwv_probe(struct i2c_client *client, return ret; } - ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells, - ARRAY_SIZE(bd9571mwv_cells), NULL, 0, - regmap_irq_get_domain(bd->irq_data)); + ret = devm_mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, + bd9571mwv_cells, ARRAY_SIZE(bd9571mwv_cells), + NULL, 0, regmap_irq_get_domain(bd->irq_data)); if (ret) { regmap_del_irq_chip(bd->irq, bd->irq_data); return ret; diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 7841c11411d0..707f4287ab4a 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -90,6 +90,11 @@ struct dln2_mod_rx_slots { spinlock_t lock; }; +enum dln2_endpoint { + DLN2_EP_OUT = 0, + DLN2_EP_IN = 1, +}; + struct dln2_dev { struct usb_device *usb_dev; struct usb_interface *interface; @@ -282,7 +287,11 @@ static void dln2_rx(struct urb *urb) len = urb->actual_length - sizeof(struct dln2_header); if (handle == DLN2_HANDLE_EVENT) { + unsigned long flags; + + spin_lock_irqsave(&dln2->event_cb_lock, flags); dln2_run_event_callbacks(dln2, id, echo, data, len); + spin_unlock_irqrestore(&dln2->event_cb_lock, flags); } else { /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */ if (dln2_transfer_complete(dln2, urb, handle, echo)) @@ -733,10 +742,10 @@ static int dln2_probe(struct usb_interface *interface, hostif->desc.bNumEndpoints < 2) return -ENODEV; - epin = &hostif->endpoint[0].desc; - epout = &hostif->endpoint[1].desc; + epout = &hostif->endpoint[DLN2_EP_OUT].desc; if (!usb_endpoint_is_bulk_out(epout)) return -ENODEV; + epin = &hostif->endpoint[DLN2_EP_IN].desc; if (!usb_endpoint_is_bulk_in(epin)) return -ENODEV; diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index b33030e3385c..da91965b8f7b 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -196,6 +196,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, + /* EBG */ + { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info }, @@ -225,6 +228,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info }, + /* TGL-H */ + { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info }, /* EHL */ { PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info }, diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index bfe4ff337581..b0f0781a6b9c 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -384,7 +384,7 @@ int intel_lpss_probe(struct device *dev, if (!lpss) return -ENOMEM; - lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET, + lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE); if (!lpss->priv) return -ENOMEM; diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 23276a80e3b4..96d02b6f06fd 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -28,6 +28,11 @@ int mfd_cell_enable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0; + if (!cell->enable) { + dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); + return 0; + } + /* only call enable hook if the cell wasn't previously enabled */ if (atomic_inc_return(cell->usage_count) == 1) err = cell->enable(pdev); @@ -45,6 +50,11 @@ int mfd_cell_disable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0; + if (!cell->disable) { + dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); + return 0; + } + /* only disable if no other clients are using it */ if (atomic_dec_return(cell->usage_count) == 0) err = cell->disable(pdev); diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 154270f8d8d7..bbcde58e2a11 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1424,8 +1424,14 @@ static int sm501_plat_probe(struct platform_device *dev) goto err_claim; } - return sm501_init_dev(sm); + ret = sm501_init_dev(sm); + if (ret) + goto err_unmap; + return 0; + + err_unmap: + iounmap(sm->regs); err_claim: release_resource(sm->regs_claim); kfree(sm->regs_claim); diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index c0529a1cd5ea..20529ff48f00 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -204,7 +204,7 @@ static int sprd_pmic_probe(struct spi_device *spi) } ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq, - IRQF_ONESHOT | IRQF_NO_SUSPEND, 0, + IRQF_ONESHOT, 0, &ddata->irq_chip, &ddata->irq_data); if (ret) { dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret); @@ -220,9 +220,34 @@ static int sprd_pmic_probe(struct spi_device *spi) return ret; } + device_init_wakeup(&spi->dev, true); return 0; } +#ifdef CONFIG_PM_SLEEP +static int sprd_pmic_suspend(struct device *dev) +{ + struct sprd_pmic *ddata = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(ddata->irq); + + return 0; +} + +static int sprd_pmic_resume(struct device *dev) +{ + struct sprd_pmic *ddata = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(ddata->irq); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume); + static const struct of_device_id sprd_pmic_match[] = { { .compatible = "sprd,sc2731", .data = &sc2731_data }, {}, @@ -234,6 +259,7 @@ static struct spi_driver sprd_pmic_driver = { .name = "sc27xx-pmic", .bus = &spi_bus_type, .of_match_table = sprd_pmic_match, + .pm = &sprd_pmic_pm_ops, }, .probe = sprd_pmic_probe, }; diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c index efcd4b980c94..1adba6a46dcb 100644 --- a/drivers/mfd/stm32-timers.c +++ b/drivers/mfd/stm32-timers.c @@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = { static void stm32_timers_get_arr_size(struct stm32_timers *ddata) { + u32 arr; + + /* Backup ARR to restore it after getting the maximum value */ + regmap_read(ddata->regmap, TIM_ARR, &arr); + /* * Only the available bits will be written so when readback * we get the maximum value of auto reload register */ regmap_write(ddata->regmap, TIM_ARR, ~0L); regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr); - regmap_write(ddata->regmap, TIM_ARR, 0x0); + regmap_write(ddata->regmap, TIM_ARR, arr); } static void stm32_timers_dma_probe(struct device *dev, diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c index 857991cb3cbb..711979afd90a 100644 --- a/drivers/mfd/stmfx.c +++ b/drivers/mfd/stmfx.c @@ -287,14 +287,21 @@ static int stmfx_irq_init(struct i2c_client *client) ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin); if (ret) - return ret; + goto irq_exit; ret = devm_request_threaded_irq(stmfx->dev, client->irq, NULL, stmfx_irq_handler, irqtrigger | IRQF_ONESHOT, "stmfx", stmfx); if (ret) - stmfx_irq_exit(client); + goto irq_exit; + + stmfx->irq = client->irq; + + return 0; + +irq_exit: + stmfx_irq_exit(client); return ret; } @@ -481,6 +488,8 @@ static int stmfx_suspend(struct device *dev) if (ret) return ret; + disable_irq(stmfx->irq); + if (stmfx->vdd) return regulator_disable(stmfx->vdd); @@ -501,6 +510,13 @@ static int stmfx_resume(struct device *dev) } } + /* Reset STMFX - supply has been stopped during suspend */ + ret = stmfx_chip_reset(stmfx); + if (ret) { + dev_err(stmfx->dev, "Failed to reset chip: %d\n", ret); + return ret; + } + ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL, &stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl)); if (ret) @@ -517,6 +533,8 @@ static int stmfx_resume(struct device *dev) if (ret) return ret; + enable_irq(stmfx->irq); + return 0; } #endif diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c index 8a7cc0f86958..65b98f3fbd92 100644 --- a/drivers/mfd/wm831x-auxadc.c +++ b/drivers/mfd/wm831x-auxadc.c @@ -93,11 +93,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x, wait_for_completion_timeout(&req->done, msecs_to_jiffies(500)); mutex_lock(&wm831x->auxadc_lock); - - list_del(&req->list); ret = req->val; out: + list_del(&req->list); mutex_unlock(&wm831x->auxadc_lock); kfree(req); diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 1e9fe7d92597..737dede4a95c 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -690,3 +690,4 @@ module_i2c_driver(wm8994_i2c_driver); MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); +MODULE_SOFTDEP("pre: wm8994_regulator"); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index ab4144ea1f11..d6cd5537126c 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -10,7 +10,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/atmel-ssc.h> #include <linux/slab.h> #include <linux/module.h> @@ -20,7 +20,7 @@ #include "../../sound/soc/atmel/atmel_ssc_dai.h" /* Serialize access to ssc_list and user count */ -static DEFINE_SPINLOCK(user_lock); +static DEFINE_MUTEX(user_lock); static LIST_HEAD(ssc_list); struct ssc_device *ssc_request(unsigned int ssc_num) @@ -28,7 +28,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num) int ssc_valid = 0; struct ssc_device *ssc; - spin_lock(&user_lock); + mutex_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { if (ssc->pdev->dev.of_node) { if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") @@ -44,18 +44,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num) } if (!ssc_valid) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); pr_err("ssc: ssc%d platform device is missing\n", ssc_num); return ERR_PTR(-ENODEV); } if (ssc->user) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); dev_dbg(&ssc->pdev->dev, "module busy\n"); return ERR_PTR(-EBUSY); } ssc->user++; - spin_unlock(&user_lock); + mutex_unlock(&user_lock); clk_prepare(ssc->clk); @@ -67,14 +67,14 @@ void ssc_free(struct ssc_device *ssc) { bool disable_clk = true; - spin_lock(&user_lock); + mutex_lock(&user_lock); if (ssc->user) ssc->user--; else { disable_clk = false; dev_dbg(&ssc->pdev->dev, "device already free\n"); } - spin_unlock(&user_lock); + mutex_unlock(&user_lock); if (disable_clk) clk_unprepare(ssc->clk); @@ -237,9 +237,9 @@ static int ssc_probe(struct platform_device *pdev) return -ENXIO; } - spin_lock(&user_lock); + mutex_lock(&user_lock); list_add_tail(&ssc->list, &ssc_list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); platform_set_drvdata(pdev, ssc); @@ -258,9 +258,9 @@ static int ssc_remove(struct platform_device *pdev) ssc_sound_dai_remove(ssc); - spin_lock(&user_lock); + mutex_lock(&user_lock); list_del(&ssc->list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); return 0; } diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 3a9467aaa435..c3e3907a0c2f 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -338,6 +338,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr) { rts5227_extra_init_hw(pcr); + /* Power down OCP for power consumption */ + if (!pcr->card_exist) + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, + OC_POWER_DOWN); + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG); rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04); diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index b4a66b64f742..4c707d8dc3eb 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -143,6 +143,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) rtsx_disable_aspm(pcr); + /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ + msleep(1); + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); @@ -1186,10 +1189,6 @@ void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) rtsx_pci_write_register(pcr, REG_OCPGLITCH, SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch); rtsx_pci_enable_ocp(pcr); - } else { - /* OC power down */ - rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, - OC_POWER_DOWN); } } } @@ -1531,12 +1530,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); if (ret < 0) - goto disable_irq; + goto free_slots; schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); return 0; +free_slots: + kfree(pcr->slots); disable_irq: free_irq(pcr->irq, (void *)pcr); disable_msi: diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 25a9dd9c0c1b..2ba899f5659f 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -393,8 +393,8 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, *capp_unit_id = get_capp_unit_id(np, *phb_index); of_node_put(np); if (!*capp_unit_id) { - pr_err("cxl: invalid capp unit id (phb_index: %d)\n", - *phb_index); + pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n", + *chipid, *phb_index); return -ENODEV; } diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index f0263d1a1fdf..d97a243ad30c 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -624,7 +624,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type, &afu->dev.kobj, "cr%i", cr->cr); if (rc) - goto err; + goto err1; rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr); if (rc) diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index cde9a2fc1325..490ff49d11ed 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi) at25->nvmem_config.reg_read = at25_ee_read; at25->nvmem_config.reg_write = at25_ee_write; at25->nvmem_config.priv = at25; - at25->nvmem_config.stride = 4; + at25->nvmem_config.stride = 1; at25->nvmem_config.word_size = 1; at25->nvmem_config.size = chip.byte_len; diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 94cfb675fe4e..8b355fc0607b 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = { EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH, }; +static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = { + .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE, +}; + struct eeprom_93xx46_dev { struct spi_device *spi; struct eeprom_93xx46_platform_data *pdata; @@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; } +static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev) +{ + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE; +} + static int eeprom_93xx46_read(void *priv, unsigned int off, void *val, size_t count) { @@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", cmd_addr, edev->spi->max_speed_hz); + if (has_quirk_extra_read_cycle(edev)) { + cmd_addr <<= 1; + bits += 1; + } + spi_message_init(&m); t[0].tx_buf = (char *)&cmd_addr; @@ -363,6 +377,7 @@ static void select_deassert(void *context) static const struct of_device_id eeprom_93xx46_of_table[] = { { .compatible = "eeprom-93xx46", }, { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, }, + { .compatible = "microchip,93lc46b", .data = µchip_93lc46b_data, }, {} }; MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); @@ -511,3 +526,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); MODULE_ALIAS("spi:93xx46"); +MODULE_ALIAS("spi:eeprom-93xx46"); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 842f2210dc7e..beaf15807f78 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -886,6 +886,7 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, struct fastrpc_channel_ctx *cctx; struct fastrpc_user *fl = ctx->fl; struct fastrpc_msg *msg = &ctx->msg; + int ret; cctx = fl->cctx; msg->pid = fl->tgid; @@ -901,7 +902,13 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, msg->size = roundup(ctx->msg_sz, PAGE_SIZE); fastrpc_context_get(ctx); - return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); + ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); + + if (ret) + fastrpc_context_put(ctx); + + return ret; + } static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, @@ -917,6 +924,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (!fl->cctx->rpdev) return -EPIPE; + if (handle == FASTRPC_INIT_HANDLE && !kernel) { + dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle); + return -EPERM; + } + ctx = fastrpc_context_alloc(fl, kernel, sc, args); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1434,8 +1446,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) domains[domain_id]); data->miscdev.fops = &fastrpc_fops; err = misc_register(&data->miscdev); - if (err) + if (err) { + kfree(data); return err; + } kref_init(&data->refcount); diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index a7a4fed4d899..e3d943c65419 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -108,6 +108,8 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp) list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_list_lock); + put_pid(hpriv->taskpid); + kfree(hpriv); return 0; @@ -229,16 +231,16 @@ delete_cdev_device: static void device_cdev_sysfs_del(struct hl_device *hdev) { - /* device_release() won't be called so must free devices explicitly */ - if (!hdev->cdev_sysfs_created) { - kfree(hdev->dev_ctrl); - kfree(hdev->dev); - return; - } + if (!hdev->cdev_sysfs_created) + goto put_devices; hl_sysfs_fini(hdev); cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); cdev_device_del(&hdev->cdev, hdev->dev); + +put_devices: + put_device(hdev->dev); + put_device(hdev->dev_ctrl); } /* @@ -959,6 +961,7 @@ again: GFP_KERNEL); if (!hdev->kernel_ctx) { rc = -ENOMEM; + hl_mmu_fini(hdev); goto out_err; } @@ -970,6 +973,7 @@ again: "failed to init kernel ctx in hard reset\n"); kfree(hdev->kernel_ctx); hdev->kernel_ctx = NULL; + hl_mmu_fini(hdev); goto out_err; } } @@ -1285,9 +1289,9 @@ sw_fini: early_fini: device_early_fini(hdev); free_dev_ctrl: - kfree(hdev->dev_ctrl); + put_device(hdev->dev_ctrl); free_dev: - kfree(hdev->dev); + put_device(hdev->dev); out_disabled: hdev->disabled = true; if (add_cdev_sysfs_on_err) diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c index ea2ca67fbfbf..153858475abc 100644 --- a/drivers/misc/habanalabs/firmware_if.c +++ b/drivers/misc/habanalabs/firmware_if.c @@ -11,6 +11,7 @@ #include <linux/genalloc.h> #include <linux/io-64-nonatomic-lo-hi.h> +#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ /** * hl_fw_push_fw_to_device() - Push FW code to device. * @hdev: pointer to hl_device structure. @@ -43,6 +44,14 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name, dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size); + if (fw_size > FW_FILE_MAX_SIZE) { + dev_err(hdev->dev, + "FW file size %zu exceeds maximum of %u bytes\n", + fw_size, FW_FILE_MAX_SIZE); + rc = -EINVAL; + goto out; + } + fw_data = (const u64 *) fw->data; memcpy_toio(dst, fw_data, fw_size); diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c index d6ec12b3e692..08fc89ea0a0c 100644 --- a/drivers/misc/habanalabs/goya/goya_security.c +++ b/drivers/misc/habanalabs/goya/goya_security.c @@ -695,7 +695,6 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); - mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); @@ -875,6 +874,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE); + pb_addr = (mmTPC1_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC1_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -882,6 +891,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1057,6 +1070,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE); + pb_addr = (mmTPC2_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC2_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1064,6 +1087,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1239,6 +1266,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE); + pb_addr = (mmTPC3_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC3_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1246,6 +1283,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1421,6 +1462,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE); + pb_addr = (mmTPC4_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC4_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1428,6 +1479,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1603,6 +1658,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE); + pb_addr = (mmTPC5_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC5_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1610,6 +1675,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1785,6 +1854,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE); + pb_addr = (mmTPC6_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC6_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1792,6 +1871,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1967,6 +2050,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE); + pb_addr = (mmTPC7_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC7_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1974,6 +2067,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 75862be53c60..30addffd76f5 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -23,7 +23,7 @@ #define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT) -#define HL_PENDING_RESET_PER_SEC 5 +#define HL_PENDING_RESET_PER_SEC 30 #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */ diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index 8c342fb499ca..ae50bd55f30a 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -443,6 +443,7 @@ static struct pci_driver hl_pci_driver = { .id_table = ids, .probe = hl_pci_probe, .remove = hl_pci_remove, + .shutdown = hl_pci_remove, .driver.pm = &hl_pm_ops, }; diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index 22566b75ca50..acfccf32be6b 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -67,6 +67,11 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; total_size = num_pgs << page_shift; + if (!total_size) { + dev_err(hdev->dev, "Cannot allocate 0 bytes\n"); + return -EINVAL; + } + contiguous = args->flags & HL_MEM_CONTIGUOUS; if (contiguous) { @@ -94,7 +99,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, phys_pg_pack->contiguous = contiguous; phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); - if (!phys_pg_pack->pages) { + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { rc = -ENOMEM; goto pages_arr_err; } @@ -689,7 +694,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), GFP_KERNEL); - if (!phys_pg_pack->pages) { + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { rc = -ENOMEM; goto page_pack_arr_mem_err; } diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c index 176c315836f1..d66e16de4cda 100644 --- a/drivers/misc/habanalabs/mmu.c +++ b/drivers/misc/habanalabs/mmu.c @@ -422,7 +422,7 @@ int hl_mmu_init(struct hl_device *hdev) hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, prop->mmu_hop_table_size, GFP_KERNEL | __GFP_ZERO); - if (!hdev->mmu_shadow_hop0) { + if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) { rc = -ENOMEM; goto err_pool_add; } diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index 057d7bbde402..d98174eced5a 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000}; static int lis3_3dlh_rates[4] = {50, 100, 400, 1000}; /* ODR is Output Data Rate */ -static int lis3lv02d_get_odr(struct lis3lv02d *lis3) +static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3) { u8 ctrl; int shift; @@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3) lis3->read(lis3, CTRL_REG1, &ctrl); ctrl &= lis3->odr_mask; shift = ffs(lis3->odr_mask) - 1; - return lis3->odrs[(ctrl >> shift)]; + return (ctrl >> shift); } static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3) { - int div = lis3lv02d_get_odr(lis3); + int odr_idx = lis3lv02d_get_odr_index(lis3); + int div = lis3->odrs[odr_idx]; - if (WARN_ONCE(div == 0, "device returned spurious data")) + if (div == 0) { + if (odr_idx == 0) { + /* Power-down mode, not sampling no need to sleep */ + return 0; + } + + dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx); return -ENXIO; + } /* LIS3 power on delay is quite long */ msleep(lis3->pwron_delay / div); @@ -807,9 +815,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lis3lv02d *lis3 = dev_get_drvdata(dev); + int odr_idx; lis3lv02d_sysfs_poweron(lis3); - return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3)); + + odr_idx = lis3lv02d_get_odr_index(lis3); + return sprintf(buf, "%d\n", lis3->odrs[odr_idx]); } static ssize_t lis3lv02d_rate_set(struct device *dev, diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index c70b3822013f..30c8ac24635d 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -16,7 +16,7 @@ KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --rename-section .text=.rodata,alloc,readonly,load + --rename-section .noinstr.text=.rodata,alloc,readonly,load targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c index 58d180af72cf..baacb876d1d9 100644 --- a/drivers/misc/lkdtm/rodata.c +++ b/drivers/misc/lkdtm/rodata.c @@ -5,7 +5,7 @@ */ #include "lkdtm.h" -void notrace lkdtm_rodata_do_nothing(void) +void noinstr lkdtm_rodata_do_nothing(void) { /* Does nothing. We just want an architecture agnostic "return". */ } diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 53bb394ccba6..cef97a7eb8b6 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -745,9 +745,8 @@ static int mei_cl_device_remove(struct device *dev) mei_cl_bus_module_put(cldev); module_put(THIS_MODULE); - dev->driver = NULL; - return ret; + return ret; } static ssize_t name_show(struct device *dev, struct device_attribute *a, diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1e3edbbacb1e..c6b163060c76 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -266,6 +266,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid(dev, uuid); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } @@ -287,6 +288,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index c1f9e810cf81..030d0e7b148b 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -128,11 +128,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl) * * @cl: host client * - * Return: mtu + * Return: mtu or 0 if client is not connected */ static inline size_t mei_cl_mtu(const struct mei_cl *cl) { - return cl->me_cl->props.max_msg_length; + return cl->me_cl ? cl->me_cl->props.max_msg_length : 0; } /** diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index a44094cdbc36..d20b2b99c6f2 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1300,7 +1300,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EPROTO; } - dev->dev_state = MEI_DEV_POWER_DOWN; + mei_set_devstate(dev, MEI_DEV_POWER_DOWN); dev_info(dev->dev, "hbm: stop response: resetting.\n"); /* force the reset */ return -EPROTO; diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index 01e27682ea30..a486c6c7f407 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -1381,6 +1381,8 @@ retry: (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0, pinned_pages->pages); if (nr_pages != pinned_pages->nr_pages) { + if (pinned_pages->nr_pages < 0) + pinned_pages->nr_pages = 0; if (try_upgrade) { if (ulimit) __scif_dec_pinned_vm_lock(mm, nr_pages); @@ -1400,7 +1402,6 @@ retry: if (pinned_pages->nr_pages < nr_pages) { err = -EFAULT; - pinned_pages->nr_pages = nr_pages; goto dec_pinned; } @@ -1413,7 +1414,6 @@ dec_pinned: __scif_dec_pinned_vm_lock(mm, nr_pages); /* Something went wrong! Rollback */ error_unmap: - pinned_pages->nr_pages = nr_pages; scif_destroy_pinned_pages(pinned_pages); *pages = NULL; dev_dbg(scif_info.mdev.this_device, diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 85942f6717c5..8aadc6055df1 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, /* First assign the vring's allocated in host memory */ vqconfig = _vop_vq_config(vdev->desc) + index; memcpy_fromio(&config, vqconfig, sizeof(config)); - _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); + _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4); vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size); if (!va) diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c index 30eac172f017..7014ffe88632 100644 --- a/drivers/misc/mic/vop/vop_vringh.c +++ b/drivers/misc/mic/vop/vop_vringh.c @@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev, num = le16_to_cpu(vqconfig[i].num); mutex_init(&vvr->vr_mutex); - vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + + vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) + sizeof(struct _mic_vring_info)); vr->va = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, @@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev, goto err; } vr->len = vr_size; - vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); + vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4); vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i); vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size, DMA_BIDIRECTIONAL); @@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf, size_t partlen; bool dma = VOP_USE_DMA && vi->dma_ch; int err = 0; + size_t offset = 0; if (dma) { dma_alignment = 1 << vi->dma_ch->device->copy_align; @@ -655,13 +656,20 @@ memcpy: * We are copying to IO below and should ideally use something * like copy_from_user_toio(..) if it existed. */ - if (copy_from_user((void __force *)dbuf, ubuf, len)) { - err = -EFAULT; - dev_err(vop_dev(vdev), "%s %d err %d\n", - __func__, __LINE__, err); - goto err; + while (len) { + partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE); + + if (copy_from_user(vvr->buf, ubuf + offset, partlen)) { + err = -EFAULT; + dev_err(vop_dev(vdev), "%s %d err %d\n", + __func__, __LINE__, err); + goto err; + } + memcpy_toio(dbuf + offset, vvr->buf, partlen); + offset += partlen; + vdev->out_bytes += partlen; + len -= partlen; } - vdev->out_bytes += len; err = 0; err: vpdev->hw_ops->unmap(vpdev, dbuf); diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index 95ff7c5a1dfb..0a5e5b841aeb 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c @@ -166,6 +166,7 @@ static const struct of_device_id pvpanic_mmio_match[] = { { .compatible = "qemu,pvpanic-mmio", }, {} }; +MODULE_DEVICE_TABLE(of, pvpanic_mmio_match); static struct platform_driver pvpanic_mmio_driver = { .driver = { diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index 16695366ec92..26ff49fdf0f7 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, return VMCI_ERROR_MORE_DATA; } - dbells = kmalloc(data_size, GFP_ATOMIC); + dbells = kzalloc(data_size, GFP_ATOMIC); if (!dbells) return VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index 345addd9306d..fa8a7fce4481 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn) { int result; - struct vmci_notify_bm_set_msg bitmap_set_msg; + struct vmci_notify_bm_set_msg bitmap_set_msg = { }; bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_SET_NOTIFY_BITMAP); diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 7a84a48c75da..543768551866 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev) VMCI_UTIL_NUM_RESOURCES * sizeof(u32); struct vmci_datagram *check_msg; - check_msg = kmalloc(msg_size, GFP_KERNEL); + check_msg = kzalloc(msg_size, GFP_KERNEL); if (!check_msg) { dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); return -ENOMEM; diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 8531ae781195..c2338750313c 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -537,6 +537,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); + if (queue_size + queue_page_size > KMALLOC_MAX_SIZE) + return NULL; + queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); if (queue) { queue->q_header = NULL; @@ -630,7 +633,7 @@ static void qp_release_pages(struct page **pages, for (i = 0; i < num_pages; i++) { if (dirty) - set_page_dirty(pages[i]); + set_page_dirty_lock(pages[i]); put_page(pages[i]); pages[i] = NULL; @@ -657,8 +660,9 @@ static int qp_host_get_user_memory(u64 produce_uva, if (retval < (int)produce_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(produce) failed (retval=%d)", retval); - qp_release_pages(produce_q->kernel_if->u.h.header_page, - retval, false); + if (retval > 0) + qp_release_pages(produce_q->kernel_if->u.h.header_page, + retval, false); err = VMCI_ERROR_NO_MEM; goto out; } @@ -670,8 +674,9 @@ static int qp_host_get_user_memory(u64 produce_uva, if (retval < (int)consume_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(consume) failed (retval=%d)", retval); - qp_release_pages(consume_q->kernel_if->u.h.header_page, - retval, false); + if (retval > 0) + qp_release_pages(consume_q->kernel_if->u.h.header_page, + retval, false); qp_release_pages(produce_q->kernel_if->u.h.header_page, produce_q->kernel_if->num_pages, false); err = VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 48ba7e02bed7..d4c14b617201 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -602,10 +602,10 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, const u32 depth) { u32 reg = 0; - u32 res; - u32 n, i; + int res, i, nr_pages; + u32 n; u32 *addr = NULL; - struct page *page[MAX_NUM_PAGES]; + struct page *pages[MAX_NUM_PAGES]; /* * Writes that go beyond the length of @@ -622,15 +622,22 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE) n += 1; - res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page); - if (res < n) { - for (i = 0; i < res; i++) - put_page(page[i]); + if (WARN_ON_ONCE(n > INT_MAX)) + return -EINVAL; + + nr_pages = n; + + res = get_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages); + if (res < nr_pages) { + if (res > 0) { + for (i = 0; i < res; i++) + put_page(pages[i]); + } return -EINVAL; } - for (i = 0; i < n; i++) { - addr = kmap(page[i]); + for (i = 0; i < nr_pages; i++) { + addr = kmap(pages[i]); do { xsdfec_regwrite(xsdfec, base_addr + ((offset + reg) * @@ -639,7 +646,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, reg++; } while ((reg < len) && ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)); - put_page(page[i]); + put_page(pages[i]); } return reg; } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 95b41c0891d0..39f2864455a6 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -621,6 +621,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; } + /* + * Make sure to update CACHE_CTRL in case it was changed. The cache + * will get turned back on if the card is re-initialized, e.g. + * suspend/resume or hw reset in recovery. + */ + if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) && + (cmd.opcode == MMC_SWITCH)) { + u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1; + + card->ext_csd.cache_ctrl = value; + } + /* * According to the SD specs, some commands require a delay after * issuing the command. @@ -630,7 +642,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) { + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Ensure RPMB/R1B command has completed by polling CMD13 * "Send Status". @@ -1417,6 +1429,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) struct mmc_request *mrq = &mqrq->brq.mrq; struct request_queue *q = req->q; struct mmc_host *host = mq->card->host; + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); unsigned long flags; bool put_card; int err; @@ -1446,7 +1459,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); @@ -2214,6 +2227,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) case MMC_ISSUE_ASYNC: switch (req_op(req)) { case REQ_OP_FLUSH: + if (!mmc_cache_enabled(host)) { + blk_mq_end_request(req, BLK_STS_OK); + return MMC_REQ_FINISHED; + } ret = mmc_blk_cqe_issue_flush(mq, req); break; case REQ_OP_READ: @@ -2474,8 +2491,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, struct mmc_rpmb_data, chrdev); - put_device(&rpmb->dev); mmc_blk_put(rpmb->md); + put_device(&rpmb->dev); return 0; } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 74de3f2dda38..28501f165d45 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -373,11 +373,6 @@ void mmc_remove_card(struct mmc_card *card) mmc_remove_card_debugfs(card); #endif - if (host->cqe_enabled) { - host->cqe_ops->cqe_disable(host); - host->cqe_enabled = false; - } - if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { pr_info("%s: SPI card removed\n", @@ -390,6 +385,10 @@ void mmc_remove_card(struct mmc_card *card) of_node_put(card->dev.of_node); } + if (host->cqe_enabled) { + host->cqe_ops->cqe_disable(host); + host->cqe_enabled = false; + } + put_device(&card->dev); } - diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 26644b7ec13e..460a456bcdd2 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1221,7 +1221,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) err = mmc_wait_for_cmd(host, &cmd, 0); if (err) - return err; + goto power_cycle; if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) return -EIO; @@ -2392,80 +2392,6 @@ void mmc_stop_host(struct mmc_host *host) mmc_release_host(host); } -#ifdef CONFIG_PM_SLEEP -/* Do the card removal on suspend if card is assumed removeable - * Do that in pm notifier while userspace isn't yet frozen, so we will be able - to sync the card. -*/ -static int mmc_pm_notify(struct notifier_block *notify_block, - unsigned long mode, void *unused) -{ - struct mmc_host *host = container_of( - notify_block, struct mmc_host, pm_notify); - unsigned long flags; - int err = 0; - - switch (mode) { - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - case PM_RESTORE_PREPARE: - spin_lock_irqsave(&host->lock, flags); - host->rescan_disable = 1; - spin_unlock_irqrestore(&host->lock, flags); - cancel_delayed_work_sync(&host->detect); - - if (!host->bus_ops) - break; - - /* Validate prerequisites for suspend */ - if (host->bus_ops->pre_suspend) - err = host->bus_ops->pre_suspend(host); - if (!err) - break; - - if (!mmc_card_is_removable(host)) { - dev_warn(mmc_dev(host), - "pre_suspend failed for non-removable host: " - "%d\n", err); - /* Avoid removing non-removable hosts */ - break; - } - - /* Calling bus_ops->remove() with a claimed host can deadlock */ - host->bus_ops->remove(host); - mmc_claim_host(host); - mmc_detach_bus(host); - mmc_power_off(host); - mmc_release_host(host); - host->pm_flags = 0; - break; - - case PM_POST_SUSPEND: - case PM_POST_HIBERNATION: - case PM_POST_RESTORE: - - spin_lock_irqsave(&host->lock, flags); - host->rescan_disable = 0; - spin_unlock_irqrestore(&host->lock, flags); - _mmc_detect_change(host, 0, false); - - } - - return 0; -} - -void mmc_register_pm_notifier(struct mmc_host *host) -{ - host->pm_notify.notifier_call = mmc_pm_notify; - register_pm_notifier(&host->pm_notify); -} - -void mmc_unregister_pm_notifier(struct mmc_host *host) -{ - unregister_pm_notifier(&host->pm_notify); -} -#endif - static int __init mmc_init(void) { int ret; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 575ac0257af2..db3c9c68875d 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -29,6 +29,7 @@ struct mmc_bus_ops { int (*shutdown)(struct mmc_host *); int (*hw_reset)(struct mmc_host *); int (*sw_reset)(struct mmc_host *); + bool (*cache_enabled)(struct mmc_host *); }; void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); @@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card); int mmc_hs200_to_hs400(struct mmc_card *card); int mmc_hs400_to_hs200(struct mmc_card *card); -#ifdef CONFIG_PM_SLEEP -void mmc_register_pm_notifier(struct mmc_host *host); -void mmc_unregister_pm_notifier(struct mmc_host *host); -#else -static inline void mmc_register_pm_notifier(struct mmc_host *host) { } -static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { } -#endif - void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq); bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq); @@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, host->ops->post_req(host, mrq, err); } +static inline bool mmc_cache_enabled(struct mmc_host *host) +{ + if (host->bus_ops->cache_enabled) + return host->bus_ops->cache_enabled(host); + + return false; +} + #endif diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index b3484def0a8b..f7339590a84c 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -33,6 +33,42 @@ static DEFINE_IDA(mmc_host_ida); +#ifdef CONFIG_PM_SLEEP +static int mmc_host_class_prepare(struct device *dev) +{ + struct mmc_host *host = cls_dev_to_mmc_host(dev); + + /* + * It's safe to access the bus_ops pointer, as both userspace and the + * workqueue for detecting cards are frozen at this point. + */ + if (!host->bus_ops) + return 0; + + /* Validate conditions for system suspend. */ + if (host->bus_ops->pre_suspend) + return host->bus_ops->pre_suspend(host); + + return 0; +} + +static void mmc_host_class_complete(struct device *dev) +{ + struct mmc_host *host = cls_dev_to_mmc_host(dev); + + _mmc_detect_change(host, 0, false); +} + +static const struct dev_pm_ops mmc_host_class_dev_pm_ops = { + .prepare = mmc_host_class_prepare, + .complete = mmc_host_class_complete, +}; + +#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops) +#else +#define MMC_HOST_CLASS_DEV_PM_OPS NULL +#endif + static void mmc_host_classdev_release(struct device *dev) { struct mmc_host *host = cls_dev_to_mmc_host(dev); @@ -43,6 +79,7 @@ static void mmc_host_classdev_release(struct device *dev) static struct class mmc_host_class = { .name = "mmc_host", .dev_release = mmc_host_classdev_release, + .pm = MMC_HOST_CLASS_DEV_PM_OPS, }; int mmc_register_host_class(void) @@ -477,8 +514,6 @@ int mmc_add_host(struct mmc_host *host) #endif mmc_start_host(host); - mmc_register_pm_notifier(host); - return 0; } @@ -494,7 +529,6 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { - mmc_unregister_pm_notifier(host); mmc_stop_host(host); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b7159e243323..ed939bb2f700 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -297,7 +297,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd) } } -static void mmc_part_add(struct mmc_card *card, unsigned int size, +static void mmc_part_add(struct mmc_card *card, u64 size, unsigned int part_cfg, char *name, int idx, bool ro, int area_type) { @@ -313,7 +313,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) { int idx; u8 hc_erase_grp_sz, hc_wp_grp_sz; - unsigned int part_size; + u64 part_size; /* * General purpose partition feature support -- @@ -343,8 +343,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] << 8) + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; - part_size *= (size_t)(hc_erase_grp_sz * - hc_wp_grp_sz); + part_size *= (hc_erase_grp_sz * hc_wp_grp_sz); mmc_part_add(card, part_size << 19, EXT_CSD_PART_CONFIG_ACC_GP0 + idx, "gp%d", idx, false, @@ -362,7 +361,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0, idx; - unsigned int part_size; + u64 part_size; struct device_node *np; bool broken_hpi = false; @@ -424,10 +423,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; - /* Some eMMC set the value too low so set a minimum */ - if (card->ext_csd.part_time && - card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) - card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) @@ -617,6 +612,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->ext_csd.data_sector_size = 512; } + /* + * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined + * when accessing a specific field", so use it here if there is no + * PARTITION_SWITCH_TIME. + */ + if (!card->ext_csd.part_time) + card->ext_csd.part_time = card->ext_csd.generic_cmd6_time; + /* Some eMMC set the value too low so set a minimum */ + if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) + card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; + /* eMMC v5 or later */ if (card->ext_csd.rev >= 7) { memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION], @@ -2013,6 +2019,12 @@ static void mmc_detect(struct mmc_host *host) } } +static bool _mmc_cache_enabled(struct mmc_host *host) +{ + return host->card->ext_csd.cache_size > 0 && + host->card->ext_csd.cache_ctrl & 1; +} + static int _mmc_suspend(struct mmc_host *host, bool is_suspend) { int err = 0; @@ -2191,6 +2203,7 @@ static const struct mmc_bus_ops mmc_ops = { .alive = mmc_alive, .shutdown = mmc_shutdown, .hw_reset = _mmc_hw_reset, + .cache_enabled = _mmc_cache_enabled, }; /* diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 18a7afb2a5b2..09311c2bd858 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -959,9 +959,7 @@ int mmc_flush_cache(struct mmc_card *card) { int err = 0; - if (mmc_card_mmc(card) && - (card->ext_csd.cache_size > 0) && - (card->ext_csd.cache_ctrl & 1)) { + if (mmc_cache_enabled(card->host)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE, 1, 0); if (err) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 9edc08685e86..9cee10e44e2f 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) case MMC_ISSUE_DCMD: if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { if (recovery_needed) - __mmc_cqe_recovery_notifier(mq); + mmc_cqe_recovery_notifier(mrq); return BLK_EH_RESET_TIMER; } - /* No timeout (XXX: huh? comment doesn't make much sense) */ - blk_mq_complete_request(req); + /* The request has gone already */ return BLK_EH_DONE; default: /* Timeout is handled by mmc core */ @@ -125,18 +124,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, struct request_queue *q = req->q; struct mmc_queue *mq = q->queuedata; unsigned long flags; - int ret; + bool ignore_tout; spin_lock_irqsave(&mq->lock, flags); - - if (mq->recovery_needed || !mq->use_cqe) - ret = BLK_EH_RESET_TIMER; - else - ret = mmc_cqe_timed_out(req); - + ignore_tout = mq->recovery_needed || !mq->use_cqe; spin_unlock_irqrestore(&mq->lock, flags); - return ret; + return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); } static void mmc_mq_recovery_handler(struct work_struct *work) @@ -190,7 +184,7 @@ static void mmc_queue_setup_discard(struct request_queue *q, q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) - q->limits.discard_granularity = 0; + q->limits.discard_granularity = SECTOR_SIZE; if (mmc_can_secure_erase_trim(card)) blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); } @@ -376,8 +370,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) "merging was advertised but not possible"); blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); - if (mmc_card_mmc(card)) + if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) { block_size = card->ext_csd.data_sector_size; + WARN_ON(block_size != 512 && block_size != 4096); + } blk_queue_logical_block_size(mq->queue, block_size); /* diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index fe914ff5f5d6..4f7c08e68f8c 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card) csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1; csd->erase_size <<= csd->write_blkbits - 9; } + + if (UNSTUFF_BITS(resp, 13, 1)) + mmc_card_set_readonly(card); break; case 1: /* @@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card) csd->write_blkbits = 9; csd->write_partial = 0; csd->erase_size = 1; + + if (UNSTUFF_BITS(resp, 13, 1)) + mmc_card_set_readonly(card); break; default: pr_err("%s: unrecognised CSD structure version %d\n", diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index ebb387aa5158..0bf33786fc5c 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -584,7 +584,7 @@ try_again: */ err = mmc_send_io_op_cond(host, ocr, &rocr); if (err) - goto err; + return err; /* * For SPI, enable CRC as appropriate. @@ -592,17 +592,15 @@ try_again: if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) - goto err; + return err; } /* * Allocate card structure. */ card = mmc_alloc_card(host, NULL); - if (IS_ERR(card)) { - err = PTR_ERR(card); - goto err; - } + if (IS_ERR(card)) + return PTR_ERR(card); if ((rocr & R4_MEMORY_PRESENT) && mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) { @@ -610,19 +608,15 @@ try_again: if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) { - mmc_remove_card(card); - pr_debug("%s: Perhaps the card was replaced\n", - mmc_hostname(host)); - return -ENOENT; + err = -ENOENT; + goto mismatch; } } else { card->type = MMC_TYPE_SDIO; if (oldcard && oldcard->type != MMC_TYPE_SDIO) { - mmc_remove_card(card); - pr_debug("%s: Perhaps the card was replaced\n", - mmc_hostname(host)); - return -ENOENT; + err = -ENOENT; + goto mismatch; } } @@ -677,7 +671,7 @@ try_again: if (!oldcard && card->type == MMC_TYPE_SD_COMBO) { err = mmc_sd_get_csd(host, card); if (err) - return err; + goto remove; mmc_decode_cid(card); } @@ -704,7 +698,12 @@ try_again: mmc_set_timing(card->host, MMC_TIMING_SD_HS); } - goto finish; + if (oldcard) + mmc_remove_card(card); + else + host->card = card; + + return 0; } /* @@ -718,9 +717,8 @@ try_again: /* Retry init sequence, but without R4_18V_PRESENT. */ retries = 0; goto try_again; - } else { - goto remove; } + return err; } /* @@ -731,16 +729,14 @@ try_again: goto remove; if (oldcard) { - int same = (card->cis.vendor == oldcard->cis.vendor && - card->cis.device == oldcard->cis.device); - mmc_remove_card(card); - if (!same) { - pr_debug("%s: Perhaps the card was replaced\n", - mmc_hostname(host)); - return -ENOENT; + if (card->cis.vendor == oldcard->cis.vendor && + card->cis.device == oldcard->cis.device) { + mmc_remove_card(card); + card = oldcard; + } else { + err = -ENOENT; + goto mismatch; } - - card = oldcard; } card->ocr = ocr_card; mmc_fixup_device(card, sdio_fixup_methods); @@ -801,16 +797,15 @@ try_again: err = -EINVAL; goto remove; } -finish: - if (!oldcard) - host->card = card; + + host->card = card; return 0; +mismatch: + pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host)); remove: - if (!oldcard) + if (oldcard != card) mmc_remove_card(card); - -err: return err; } @@ -929,21 +924,37 @@ out: */ static int mmc_sdio_pre_suspend(struct mmc_host *host) { - int i, err = 0; + int i; for (i = 0; i < host->card->sdio_funcs; i++) { struct sdio_func *func = host->card->sdio_func[i]; if (func && sdio_func_present(func) && func->dev.driver) { const struct dev_pm_ops *pmops = func->dev.driver->pm; - if (!pmops || !pmops->suspend || !pmops->resume) { + if (!pmops || !pmops->suspend || !pmops->resume) /* force removal of entire card in that case */ - err = -ENOSYS; - break; - } + goto remove; } } - return err; + return 0; + +remove: + if (!mmc_card_is_removable(host)) { + dev_warn(mmc_dev(host), + "missing suspend/resume ops for non-removable SDIO card\n"); + /* Don't remove a non-removable card - we can't re-detect it. */ + return 0; + } + + /* Remove the SDIO card and let it be re-detected later on. */ + mmc_sdio_remove(host); + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_power_off(host); + mmc_release_host(host); + host->pm_flags = 0; + + return 0; } /* diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index e0655278c5c3..9a5aaac29099 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -20,12 +20,17 @@ #include "sdio_cis.h" #include "sdio_ops.h" +#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { unsigned i, nr_strings; char **buffer, *string; + if (size < 2) + return 0; + /* Find all null-terminated (including zero length) strings in the TPLLV1_INFO field. Trailing garbage is ignored. */ buf += 2; @@ -263,6 +268,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) do { unsigned char tpl_code, tpl_link; + unsigned long timeout = jiffies + + msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); if (ret) @@ -315,6 +322,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) prev = &this->next; if (ret == -ENOENT) { + if (time_after(jiffies, timeout)) + break; /* warn about unknown tuples */ pr_warn_ratelimited("%s: queuing unknown" " CIS tuple 0x%02x (%u bytes)\n", diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index 93d346c01110..4c229dd2b6e5 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, struct sg_table sgtable; unsigned int nents, left_size, i; unsigned int seg_size = card->host->max_seg_size; + int err; WARN_ON(blksz == 0); @@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, mmc_set_data_timeout(&data, card); + mmc_pre_req(card->host, &mrq); + mmc_wait_for_req(card->host, &mrq); + if (cmd.error) + err = cmd.error; + else if (data.error) + err = data.error; + else if (mmc_host_is_spi(card->host)) + /* host driver already reported errors */ + err = 0; + else if (cmd.resp[0] & R5_ERROR) + err = -EIO; + else if (cmd.resp[0] & R5_FUNCTION_NUMBER) + err = -EINVAL; + else if (cmd.resp[0] & R5_OUT_OF_RANGE) + err = -ERANGE; + else + err = 0; + + mmc_post_req(card->host, &mrq, err); + if (nents > 1) sg_free_table(&sgtable); - if (cmd.error) - return cmd.error; - if (data.error) - return data.error; - - if (mmc_host_is_spi(card->host)) { - /* host driver already reported errors */ - } else { - if (cmd.resp[0] & R5_ERROR) - return -EIO; - if (cmd.resp[0] & R5_FUNCTION_NUMBER) - return -EINVAL; - if (cmd.resp[0] & R5_OUT_OF_RANGE) - return -ERANGE; - } - - return 0; + return err; } int sdio_reset(struct mmc_host *host) diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index 1aee485d56d4..026ca9194ce5 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to get irq for data line\n"); - return ret; + goto free_host; } mutex_init(&host->cmd_mutex); @@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, host); mmc_add_host(mmc); return 0; + +free_host: + mmc_free_host(mmc); + return ret; } static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev) diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 5047f7343ffc..2d65b32d205a 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -5,6 +5,7 @@ #include <linux/delay.h> #include <linux/highmem.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/slab.h> @@ -298,16 +299,16 @@ static void __cqhci_disable(struct cqhci_host *cq_host) cq_host->activated = false; } -int cqhci_suspend(struct mmc_host *mmc) +int cqhci_deactivate(struct mmc_host *mmc) { struct cqhci_host *cq_host = mmc->cqe_private; - if (cq_host->enabled) + if (cq_host->enabled && cq_host->activated) __cqhci_disable(cq_host); return 0; } -EXPORT_SYMBOL(cqhci_suspend); +EXPORT_SYMBOL(cqhci_deactivate); int cqhci_resume(struct mmc_host *mmc) { @@ -343,12 +344,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) /* CQHCI is idle and should halt immediately, so set a small timeout */ #define CQHCI_OFF_TIMEOUT 100 +static u32 cqhci_read_ctl(struct cqhci_host *cq_host) +{ + return cqhci_readl(cq_host, CQHCI_CTL); +} + static void cqhci_off(struct mmc_host *mmc) { struct cqhci_host *cq_host = mmc->cqe_private; - ktime_t timeout; - bool timed_out; u32 reg; + int err; if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) return; @@ -358,15 +363,9 @@ static void cqhci_off(struct mmc_host *mmc) cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); - timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT); - while (1) { - timed_out = ktime_compare(ktime_get(), timeout) > 0; - reg = cqhci_readl(cq_host, CQHCI_CTL); - if ((reg & CQHCI_HALT) || timed_out) - break; - } - - if (timed_out) + err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, + reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); + if (err < 0) pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); else pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h index def76e9b5cac..437700179de4 100644 --- a/drivers/mmc/host/cqhci.h +++ b/drivers/mmc/host/cqhci.h @@ -230,7 +230,11 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, int data_error); int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64); struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev); -int cqhci_suspend(struct mmc_host *mmc); +int cqhci_deactivate(struct mmc_host *mmc); +static inline int cqhci_suspend(struct mmc_host *mmc) +{ + return cqhci_deactivate(mmc); +} int cqhci_resume(struct mmc_host *mmc); #endif diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e712315c7e8d..545c3f2f8a06 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1151,9 +1151,11 @@ static int meson_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_CMD23; if (host->dram_access_quirk) { + /* Limit segments to 1 due to low available sram memory */ + mmc->max_segs = 1; /* Limit to the available sram memory */ - mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size; - mmc->max_blk_count = mmc->max_segs; + mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN / + mmc->max_blk_size; } else { mmc->max_blk_count = CMD_CFG_LENGTH_MASK; mmc->max_segs = SD_EMMC_DESC_BUF_LEN / diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index ba9a63db73da..360d523132bd 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -246,6 +246,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host) mrq = host->mrq; + if (host->cmd->error) + meson_mx_mmc_soft_reset(host); + host->mrq = NULL; host->cmd = NULL; @@ -357,14 +360,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) meson_mx_mmc_start_cmd(mmc, mrq->cmd); } -static int meson_mx_mmc_card_busy(struct mmc_host *mmc) -{ - struct meson_mx_mmc_host *host = mmc_priv(mmc); - u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC); - - return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK); -} - static void meson_mx_mmc_read_response(struct mmc_host *mmc, struct mmc_command *cmd) { @@ -506,7 +501,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t) static struct mmc_host_ops meson_mx_mmc_ops = { .request = meson_mx_mmc_request, .set_ios = meson_mx_mmc_set_ios, - .card_busy = meson_mx_mmc_card_busy, .get_cd = mmc_gpio_get_cd, .get_ro = mmc_gpio_get_ro, }; @@ -570,7 +564,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) mmc->f_max = clk_round_rate(host->cfg_div_clk, clk_get_rate(host->parent_clk)); - mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY; mmc->ops = &meson_mx_mmc_ops; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index c37e70dbe250..7e4bc9124efd 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -168,6 +168,8 @@ static struct variant_data variant_ux500 = { .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -201,6 +203,8 @@ static struct variant_data variant_ux500v2 = { .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -260,6 +264,7 @@ static struct variant_data variant_stm32_sdmmc = { .datacnt_useless = true, .datalength_bits = 25, .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, .stm32_idmabsize_mask = GENMASK(12, 5), .init = sdmmc_variant_init, }; @@ -279,6 +284,7 @@ static struct variant_data variant_qcom = { .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, .pwrreg_powerup = MCI_PWR_UP, .f_max = 208000000, .explicit_mclk_control = true, @@ -447,10 +453,11 @@ void mmci_dma_setup(struct mmci_host *host) static int mmci_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; + if (!data) return 0; - - if (!is_power_of_2(data->blksz)) { + if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) { dev_err(mmc_dev(host->mmc), "unsupported block size (%d bytes)\n", data->blksz); return -EINVAL; @@ -515,7 +522,9 @@ int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", data->sg_len, data->blksz, data->blocks, data->flags); - host->ops->dma_start(host, &datactrl); + ret = host->ops->dma_start(host, &datactrl); + if (ret) + return ret; /* Trigger the DMA transfer */ mmci_write_datactrlreg(host, datactrl); @@ -822,6 +831,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, if (data->blksz * data->blocks <= variant->fifosize) return -EINVAL; + /* + * This is necessary to get SDIO working on the Ux500. We do not yet + * know if this is a bug in: + * - The Ux500 DMA controller (DMA40) + * - The MMCI DMA interface on the Ux500 + * some power of two blocks (such as 64 bytes) are sent regularly + * during SDIO traffic and those work fine so for these we enable DMA + * transfers. + */ + if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz)) + return -EINVAL; + device = chan->device; nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); @@ -872,9 +893,14 @@ int mmci_dmae_prep_data(struct mmci_host *host, int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) { struct mmci_dmae_priv *dmae = host->dma_priv; + int ret; host->dma_in_progress = true; - dmaengine_submit(dmae->desc_current); + ret = dma_submit_error(dmaengine_submit(dmae->desc_current)); + if (ret < 0) { + host->dma_in_progress = false; + return ret; + } dma_async_issue_pending(dmae->cur); *datactrl |= MCI_DPSM_DMAENABLE; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 833236ecb31e..89ab73343cf3 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -278,7 +278,11 @@ struct mmci_host; * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @datactrl_mask_sdio: SDIO enable mask in datactrl register - * @datactrl_blksz: block size in power of two + * @datactrl_blocksz: block size in power of two + * @datactrl_any_blocksz: true if block any block sizes are accepted by + * hardware, such as with some SDIO traffic that send + * odd packets. + * @dma_power_of_2: DMA only works with blocks that are a power of 2. * @datactrl_first: true if data must be setup before send command * @datacnt_useless: true if you could not use datacnt register to read * remaining data @@ -323,6 +327,8 @@ struct variant_data { unsigned int datactrl_mask_ddrmode; unsigned int datactrl_mask_sdio; unsigned int datactrl_blocksz; + u8 datactrl_any_blocksz:1; + u8 dma_power_of_2:1; u8 datactrl_first:1; u8 datacnt_useless:1; u8 st_sdio:1; diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index 8e83ae6920ae..0953bd8a4f79 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -162,6 +162,9 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) { writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); + + if (!data->host_cookie) + sdmmc_idma_unprep_data(host, data, 0); } static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 010fe29a4888..1254a5650cff 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/reset.h> #include <linux/mmc/card.h> #include <linux/mmc/core.h> @@ -412,6 +413,7 @@ struct msdc_host { struct pinctrl_state *pins_uhs; struct delayed_work req_timeout; int irq; /* host interrupt */ + struct reset_control *reset; struct clk *src_clk; /* msdc source clock */ struct clk *h_clk; /* msdc h_clk */ @@ -1018,13 +1020,13 @@ static void msdc_track_cmd_data(struct msdc_host *host, static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) { unsigned long flags; - bool ret; - ret = cancel_delayed_work(&host->req_timeout); - if (!ret) { - /* delay work already running */ - return; - } + /* + * No need check the return value of cancel_delayed_work, as only ONE + * path will go here! + */ + cancel_delayed_work(&host->req_timeout); + spin_lock_irqsave(&host->lock, flags); host->mrq = NULL; spin_unlock_irqrestore(&host->lock, flags); @@ -1044,7 +1046,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, bool done = false; bool sbc_error; unsigned long flags; - u32 *rsp = cmd->resp; + u32 *rsp; if (mrq->sbc && cmd == mrq->cmd && (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR @@ -1065,6 +1067,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, if (done) return true; + rsp = cmd->resp; sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); @@ -1252,7 +1255,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, struct mmc_request *mrq, struct mmc_data *data) { - struct mmc_command *stop = data->stop; + struct mmc_command *stop; unsigned long flags; bool done; unsigned int check_data = events & @@ -1268,6 +1271,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, if (done) return true; + stop = data->stop; if (check_data || (stop && stop->error)) { dev_dbg(host->dev, "DMA status: 0x%8X\n", @@ -1474,6 +1478,12 @@ static void msdc_init_hw(struct msdc_host *host) u32 val; u32 tune_reg = host->dev_comp->pad_tune_reg; + if (host->reset) { + reset_control_assert(host->reset); + usleep_range(10, 50); + reset_control_deassert(host->reset); + } + /* Configure to MMC/SD mode, clock free running */ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); @@ -2232,6 +2242,11 @@ static int msdc_drv_probe(struct platform_device *pdev) if (IS_ERR(host->src_clk_cg)) host->src_clk_cg = NULL; + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "hrst"); + if (IS_ERR(host->reset)) + return PTR_ERR(host->reset); + host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = -EINVAL; diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 4031217d21c3..52054931c350 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -644,7 +644,7 @@ static int mxs_mmc_probe(struct platform_device *pdev) ret = mmc_of_parse(mmc); if (ret) - goto out_clk_disable; + goto out_free_dma; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index b2bbcb09a49e..953e7457137a 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -729,6 +729,7 @@ static int pxamci_probe(struct platform_device *pdev) host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); if (IS_ERR(host->power)) { + ret = PTR_ERR(host->power); dev_err(dev, "Failed requesting gpio_power\n"); goto out; } diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 234551a68739..689eb119d44f 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -874,6 +874,7 @@ int renesas_sdhi_remove(struct platform_device *pdev) tmio_mmc_host_remove(host); renesas_sdhi_clk_disable(host); + tmio_mmc_host_free(host); return 0; } diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index a66f8d6d61d1..f54d0427e9c0 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -186,8 +186,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, mmc_get_dma_dir(data))) goto force_pio; - /* This DMAC cannot handle if buffer is not 8-bytes alignment */ - if (!IS_ALIGNED(sg_dma_address(sg), 8)) + /* This DMAC cannot handle if buffer is not 128-bytes alignment */ + if (!IS_ALIGNED(sg_dma_address(sg), 128)) goto force_pio_with_unmap; if (data->flags & MMC_DATA_READ) { @@ -229,15 +229,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) DTRAN_CTRL_DM_START); } -static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host) { - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; enum dma_data_direction dir; - spin_lock_irq(&host->lock); - if (!host->data) - goto out; + return false; if (host->data->flags & MMC_DATA_READ) dir = DMA_FROM_DEVICE; @@ -250,6 +247,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) if (dir == DMA_FROM_DEVICE) clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + return true; +} + +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + spin_lock_irq(&host->lock); + if (!renesas_sdhi_internal_dmac_complete(host)) + goto out; + tmio_mmc_do_data_irq(host); out: spin_unlock_irq(&host->lock); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 1604f512c7bd..0dc8eafdc81d 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -532,6 +532,11 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { .caps = MMC_CAP_NONREMOVABLE, }; +struct amd_sdhci_host { + bool tuned_clock; + bool dll_enabled; +}; + /* AMD sdhci reset dll register. */ #define SDHCI_AMD_RESET_DLL_REGISTER 0x908 @@ -542,39 +547,96 @@ static int amd_select_drive_strength(struct mmc_card *card, return MMC_SET_DRIVER_TYPE_A; } -static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host) +static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) { + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + /* AMD Platform requires dll setting */ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); usleep_range(10, 20); - sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); + if (enable) + sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); + + amd_host->dll_enabled = enable; } /* - * For AMD Platform it is required to disable the tuning - * bit first controller to bring to HS Mode from HS200 - * mode, later enable to tune to HS400 mode. + * The initialization sequence for HS400 is: + * HS->HS200->Perform Tuning->HS->HS400 + * + * The re-tuning sequence is: + * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400 + * + * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400 + * mode. If we switch to a different mode, we need to disable the tuned clock. + * If we have previously performed tuning and switch back to HS200 or + * HS400, we can re-enable the tuned clock. + * */ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); unsigned int old_timing = host->timing; + u16 val; sdhci_set_ios(mmc, ios); - if (old_timing == MMC_TIMING_MMC_HS200 && - ios->timing == MMC_TIMING_MMC_HS) - sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2); - if (old_timing != MMC_TIMING_MMC_HS400 && - ios->timing == MMC_TIMING_MMC_HS400) { - sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2); - sdhci_acpi_amd_hs400_dll(host); + + if (old_timing != host->timing && amd_host->tuned_clock) { + if (host->timing == MMC_TIMING_MMC_HS400 || + host->timing == MMC_TIMING_MMC_HS200) { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val |= SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } else { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } + + /* DLL is only required for HS400 */ + if (host->timing == MMC_TIMING_MMC_HS400 && + !amd_host->dll_enabled) + sdhci_acpi_amd_hs400_dll(host, true); } } +static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + int err; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + amd_host->tuned_clock = false; + + err = sdhci_execute_tuning(mmc, opcode); + + if (!err && !host->tuning_err) + amd_host->tuned_clock = true; + + return err; +} + +static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + if (mask & SDHCI_RESET_ALL) { + amd_host->tuned_clock = false; + sdhci_acpi_amd_hs400_dll(host, false); + } + + sdhci_reset(host, mask); +} + static const struct sdhci_ops sdhci_acpi_ops_amd = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = amd_sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; @@ -596,17 +658,58 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, (host->mmc->caps & MMC_CAP_1_8V_DDR)) host->mmc->caps2 = MMC_CAP2_HS400_1_8V; + /* + * There are two types of presets out in the wild: + * 1) Default/broken presets. + * These presets have two sets of problems: + * a) The clock divisor for SDR12, SDR25, and SDR50 is too small. + * This results in clock frequencies that are 2x higher than + * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 = + * 100 MHz.x + * b) The HS200 and HS400 driver strengths don't match. + * By default, the SDR104 preset register has a driver strength of + * A, but the (internal) HS400 preset register has a driver + * strength of B. As part of initializing HS400, HS200 tuning + * needs to be performed. Having different driver strengths + * between tuning and operation is wrong. It results in different + * rise/fall times that lead to incorrect sampling. + * 2) Firmware with properly initialized presets. + * These presets have proper clock divisors. i.e., SDR12 => 12MHz, + * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and + * HS400 preset driver strengths match. + * + * Enabling presets for HS400 doesn't work for the following reasons: + * 1) sdhci_set_ios has a hard coded list of timings that are used + * to determine if presets should be enabled. + * 2) sdhci_get_preset_value is using a non-standard register to + * read out HS400 presets. The AMD controller doesn't support this + * non-standard register. In fact, it doesn't expose the HS400 + * preset register anywhere in the SDHCI memory map. This results + * in reading a garbage value and using the wrong presets. + * + * Since HS400 and HS200 presets must be identical, we could + * instead use the the SDR104 preset register. + * + * If the above issues are resolved we could remove this quirk for + * firmware that that has valid presets (i.e., SDR12 <= 12 MHz). + */ + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; host->mmc_host_ops.set_ios = amd_set_ios; + host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; return 0; } static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { - .chip = &sdhci_acpi_chip_amd, - .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, - .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | - SDHCI_QUIRK_32BIT_ADMA_SIZE, + .chip = &sdhci_acpi_chip_amd, + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE, + .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, .probe_slot = sdhci_acpi_emmc_amd_probe_slot, + .priv_size = sizeof(struct amd_sdhci_host), }; struct sdhci_acpi_uid_slot { diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 5f2e9696ee4d..0c2489446bd7 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -194,6 +194,79 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv) return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp); } +static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val) +{ + struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); + void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06; + u32 tmp; + int i, ret; + + if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val))) + return -EINVAL; + + tmp = readl(reg); + tmp &= ~SDHCI_CDNS_HRS06_TUNE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val); + + /* + * Workaround for IP errata: + * The IP6116 SD/eMMC PHY design has a timing issue on receive data + * path. Send tune request twice. + */ + for (i = 0; i < 2; i++) { + tmp |= SDHCI_CDNS_HRS06_TUNE_UP; + writel(tmp, reg); + + ret = readl_poll_timeout(reg, tmp, + !(tmp & SDHCI_CDNS_HRS06_TUNE_UP), + 0, 1); + if (ret) + return ret; + } + + return 0; +} + +/* + * In SD mode, software must not use the hardware tuning and instead perform + * an almost identical procedure to eMMC. + */ +static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + int cur_streak = 0; + int max_streak = 0; + int end_of_streak = 0; + int i; + + /* + * Do not execute tuning for UHS_SDR50 or UHS_DDR50. + * The delay is set by probe, based on the DT properties. + */ + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104) + return 0; + + for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) { + if (sdhci_cdns_set_tune_val(host, i) || + mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */ + cur_streak = 0; + } else { /* good */ + cur_streak++; + if (cur_streak > max_streak) { + max_streak = cur_streak; + end_of_streak = i; + } + } + } + + if (!max_streak) { + dev_err(mmc_dev(host->mmc), "no tuning point found\n"); + return -EIO; + } + + return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2); +} + static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) { @@ -233,6 +306,7 @@ static const struct sdhci_ops sdhci_cdns_ops = { .get_timeout_clock = sdhci_cdns_get_timeout_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, + .platform_execute_tuning = sdhci_cdns_execute_tuning, .set_uhs_signaling = sdhci_cdns_set_uhs_signaling, }; @@ -245,78 +319,6 @@ static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = { .ops = &sdhci_cdns_ops, }; -static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val) -{ - struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); - void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06; - u32 tmp; - int i, ret; - - if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val))) - return -EINVAL; - - tmp = readl(reg); - tmp &= ~SDHCI_CDNS_HRS06_TUNE; - tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val); - - /* - * Workaround for IP errata: - * The IP6116 SD/eMMC PHY design has a timing issue on receive data - * path. Send tune request twice. - */ - for (i = 0; i < 2; i++) { - tmp |= SDHCI_CDNS_HRS06_TUNE_UP; - writel(tmp, reg); - - ret = readl_poll_timeout(reg, tmp, - !(tmp & SDHCI_CDNS_HRS06_TUNE_UP), - 0, 1); - if (ret) - return ret; - } - - return 0; -} - -static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode) -{ - struct sdhci_host *host = mmc_priv(mmc); - int cur_streak = 0; - int max_streak = 0; - int end_of_streak = 0; - int i; - - /* - * This handler only implements the eMMC tuning that is specific to - * this controller. Fall back to the standard method for SD timing. - */ - if (host->timing != MMC_TIMING_MMC_HS200) - return sdhci_execute_tuning(mmc, opcode); - - if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200)) - return -EINVAL; - - for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) { - if (sdhci_cdns_set_tune_val(host, i) || - mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */ - cur_streak = 0; - } else { /* good */ - cur_streak++; - if (cur_streak > max_streak) { - max_streak = cur_streak; - end_of_streak = i; - } - } - } - - if (!max_streak) { - dev_err(mmc_dev(host->mmc), "no tuning point found\n"); - return -EIO; - } - - return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2); -} - static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -377,7 +379,6 @@ static int sdhci_cdns_probe(struct platform_device *pdev) priv->hrs_addr = host->ioaddr; priv->enhanced_strobe = false; host->ioaddr += SDHCI_CDNS_SRS_BASE; - host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; host->mmc_host_ops.hs400_enhanced_strobe = sdhci_cdns_hs400_enhanced_strobe; sdhci_enable_v4_mode(host); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index dccb4df46512..771676209005 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -87,7 +87,7 @@ #define ESDHC_STD_TUNING_EN (1 << 24) /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 -#define ESDHC_TUNING_START_TAP_MASK 0xff +#define ESDHC_TUNING_START_TAP_MASK 0x7f #define ESDHC_TUNING_STEP_MASK 0x00070000 #define ESDHC_TUNING_STEP_SHIFT 16 @@ -1589,9 +1589,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); - int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + int dead; pm_runtime_get_sync(&pdev->dev); + dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 3d0bb5e2e09b..8bed81cf03ad 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1096,7 +1096,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); - int tuning_seq_cnt = 3; + int tuning_seq_cnt = 10; u8 phase, tuned_phases[16], tuned_phase_cnt = 0; int rc; struct mmc_ios ios = host->mmc->ios; @@ -1112,6 +1112,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ msm_host->use_cdr = true; + /* + * Clear tuning_done flag before tuning to ensure proper + * HS400 settings. + */ + msm_host->tuning_done = 0; + /* * For HS400 tuning in HS200 timing requires: * - select MCLK/2 in VENDOR_SPEC @@ -1146,6 +1152,22 @@ retry: } while (++phase < ARRAY_SIZE(tuned_phases)); if (tuned_phase_cnt) { + if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { + /* + * All phases valid is _almost_ as bad as no phases + * valid. Probably all phases are not really reliable + * but we didn't detect where the unreliable place is. + * That means we'll essentially be guessing and hoping + * we get a good phase. Better to try a few times. + */ + dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", + mmc_hostname(mmc)); + if (--tuning_seq_cnt) { + tuned_phase_cnt = 0; + goto retry; + } + } + rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) @@ -1736,7 +1758,9 @@ static const struct sdhci_ops sdhci_msm_ops = { static const struct sdhci_pltfm_data sdhci_msm_pdata = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .ops = &sdhci_msm_ops, }; @@ -1944,6 +1968,8 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; + pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c index 8962f6664381..47ddded57000 100644 --- a/drivers/mmc/host/sdhci-of-aspeed.c +++ b/drivers/mmc/host/sdhci-of-aspeed.c @@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) if (WARN_ON(clock > host->max_clk)) clock = host->max_clk; - for (div = 1; div < 256; div *= 2) { + for (div = 2; div < 256; div *= 2) { if ((parent / div) <= clock) break; } diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index a5137845a1c7..6793fb8fe976 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -58,6 +58,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = { static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { .ops = &sdhci_dwcmshc_ops, .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; static int dwcmshc_probe(struct platform_device *pdev) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index fcfb50f84c8b..5922ae021d86 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -81,6 +81,7 @@ struct sdhci_esdhc { bool quirk_tuning_erratum_type2; bool quirk_ignore_data_inhibit; bool quirk_delay_before_data_reset; + bool quirk_trans_complete_erratum; bool in_sw_tuning; unsigned int peripheral_clock; const struct esdhc_clk_fixup *clk_fixup; @@ -734,23 +735,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u32 val; + u32 val, bus_width = 0; + /* + * Add delay to make sure all the DMA transfers are finished + * for quirk. + */ if (esdhc->quirk_delay_before_data_reset && (mask & SDHCI_RESET_DATA) && (host->flags & SDHCI_REQ_USE_DMA)) mdelay(5); + /* + * Save bus-width for eSDHC whose vendor version is 2.2 + * or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; + } + sdhci_reset(host, mask); - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + /* + * Restore bus-width setting and interrupt registers for eSDHC + * whose vendor version is 2.2 or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + val &= ~ESDHC_CTRL_BUSWIDTH_MASK; + val |= bus_width; + sdhci_writel(host, val, ESDHC_PROCTL); - if (mask & SDHCI_RESET_ALL) { + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } + + /* + * Some bits have to be cleaned manually for eSDHC whose spec + * version is higher than 3.0 for all reset. + */ + if ((mask & SDHCI_RESET_ALL) && + (esdhc->spec_ver >= SDHCI_SPEC_300)) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; sdhci_writel(host, val, ESDHC_TBCTL); + /* + * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to + * 0 for quirk. + */ if (esdhc->quirk_unreliable_pulse_detection) { val = sdhci_readl(host, ESDHC_DLLCFG1); val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; @@ -968,6 +1004,17 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) esdhc_tuning_block_enable(host, true); + /* + * The eSDHC controller takes the data timeout value into account + * during tuning. If the SD card is too slow sending the response, the + * timer will expire and a "Buffer Read Ready" interrupt without data + * is triggered. This leads to tuning errors. + * + * Just set the timeout to the maximum value because the core will + * already take care of it in sdhci_send_tuning(). + */ + sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); + hs400_tuning = host->flags & SDHCI_HS400_TUNING; do { @@ -1047,10 +1094,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, static u32 esdhc_irq(struct sdhci_host *host, u32 intmask) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); u32 command; - if (of_find_compatible_node(NULL, NULL, - "fsl,p2020-esdhc")) { + if (esdhc->quirk_trans_complete_erratum) { command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); if (command == MMC_WRITE_MULTIPLE_BLOCK && @@ -1164,6 +1212,8 @@ static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = { static struct soc_device_attribute soc_unreliable_pulse_detection[] = { { .family = "QorIQ LX2160A", .revision = "1.0", }, + { .family = "QorIQ LX2160A", .revision = "2.0", }, + { .family = "QorIQ LS1028A", .revision = "1.0", }, { }, }; @@ -1204,8 +1254,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) esdhc->clk_fixup = match->data; np = pdev->dev.of_node; - if (of_device_is_compatible(np, "fsl,p2020-esdhc")) + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { esdhc->quirk_delay_before_data_reset = true; + esdhc->quirk_trans_complete_erratum = true; + } clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 5091e2c1c0e5..a9151bd27211 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -24,6 +24,8 @@ #include <linux/iopoll.h> #include <linux/gpio.h> #include <linux/pm_runtime.h> +#include <linux/pm_qos.h> +#include <linux/debugfs.h> #include <linux/mmc/slot-gpio.h> #include <linux/mmc/sdhci-pci-data.h> #include <linux/acpi.h> @@ -232,6 +234,14 @@ static void sdhci_pci_dumpregs(struct mmc_host *mmc) sdhci_dumpregs(mmc_priv(mmc)); } +static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + sdhci_reset(host, mask); +} + /*****************************************************************************\ * * * Hardware specific quirk handling * @@ -510,8 +520,11 @@ struct intel_host { int drv_strength; bool d3_retune; bool rpm_retune_ok; + bool needs_pwr_off; u32 glk_rx_ctrl1; u32 glk_tun_val; + u32 active_ltr; + u32 idle_ltr; }; static const guid_t intel_dsm_guid = @@ -601,6 +614,9 @@ static int intel_select_drive_strength(struct mmc_card *card, struct sdhci_pci_slot *slot = sdhci_priv(host); struct intel_host *intel_host = sdhci_pci_priv(slot); + if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) + return 0; + return intel_host->drv_strength; } @@ -632,9 +648,25 @@ out: static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) { + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); int cntr; u8 reg; + /* + * Bus power may control card power, but a full reset still may not + * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can. + * That might be needed to initialize correctly, if the card was left + * powered on previously. + */ + if (intel_host->needs_pwr_off) { + intel_host->needs_pwr_off = false; + if (mode != MMC_POWER_OFF) { + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + usleep_range(10000, 12500); + } + } + sdhci_set_power(host, mode, vdd); if (mode == MMC_POWER_OFF) @@ -654,6 +686,15 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, } } +static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + /* Set UHS timing to SDR25 for High Speed mode */ + if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS) + timing = MMC_TIMING_UHS_SDR25; + sdhci_set_uhs_signaling(host, timing); +} + #define INTEL_HS400_ES_REG 0x78 #define INTEL_HS400_ES_BIT BIT(0) @@ -710,7 +751,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = { .enable_dma = sdhci_pci_enable_dma, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, - .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_uhs_signaling = sdhci_intel_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, }; @@ -719,8 +760,8 @@ static const struct sdhci_ops sdhci_intel_glk_ops = { .set_power = sdhci_intel_set_power, .enable_dma = sdhci_pci_enable_dma, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, - .set_uhs_signaling = sdhci_set_uhs_signaling, + .reset = sdhci_cqhci_reset, + .set_uhs_signaling = sdhci_intel_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, .irq = sdhci_cqhci_irq, }; @@ -753,6 +794,108 @@ static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) return 0; } +#define INTEL_ACTIVELTR 0x804 +#define INTEL_IDLELTR 0x808 + +#define INTEL_LTR_REQ BIT(15) +#define INTEL_LTR_SCALE_MASK GENMASK(11, 10) +#define INTEL_LTR_SCALE_1US (2 << 10) +#define INTEL_LTR_SCALE_32US (3 << 10) +#define INTEL_LTR_VALUE_MASK GENMASK(9, 0) + +static void intel_cache_ltr(struct sdhci_pci_slot *slot) +{ + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct sdhci_host *host = slot->host; + + intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR); + intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR); +} + +static void intel_ltr_set(struct device *dev, s32 val) +{ + struct sdhci_pci_chip *chip = dev_get_drvdata(dev); + struct sdhci_pci_slot *slot = chip->slots[0]; + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct sdhci_host *host = slot->host; + u32 ltr; + + pm_runtime_get_sync(dev); + + /* + * Program latency tolerance (LTR) accordingly what has been asked + * by the PM QoS layer or disable it in case we were passed + * negative value or PM_QOS_LATENCY_ANY. + */ + ltr = readl(host->ioaddr + INTEL_ACTIVELTR); + + if (val == PM_QOS_LATENCY_ANY || val < 0) { + ltr &= ~INTEL_LTR_REQ; + } else { + ltr |= INTEL_LTR_REQ; + ltr &= ~INTEL_LTR_SCALE_MASK; + ltr &= ~INTEL_LTR_VALUE_MASK; + + if (val > INTEL_LTR_VALUE_MASK) { + val >>= 5; + if (val > INTEL_LTR_VALUE_MASK) + val = INTEL_LTR_VALUE_MASK; + ltr |= INTEL_LTR_SCALE_32US | val; + } else { + ltr |= INTEL_LTR_SCALE_1US | val; + } + } + + if (ltr == intel_host->active_ltr) + goto out; + + writel(ltr, host->ioaddr + INTEL_ACTIVELTR); + writel(ltr, host->ioaddr + INTEL_IDLELTR); + + /* Cache the values into lpss structure */ + intel_cache_ltr(slot); +out: + pm_runtime_put_autosuspend(dev); +} + +static bool intel_use_ltr(struct sdhci_pci_chip *chip) +{ + switch (chip->pdev->device) { + case PCI_DEVICE_ID_INTEL_BYT_EMMC: + case PCI_DEVICE_ID_INTEL_BYT_EMMC2: + case PCI_DEVICE_ID_INTEL_BYT_SDIO: + case PCI_DEVICE_ID_INTEL_BYT_SD: + case PCI_DEVICE_ID_INTEL_BSW_EMMC: + case PCI_DEVICE_ID_INTEL_BSW_SDIO: + case PCI_DEVICE_ID_INTEL_BSW_SD: + return false; + default: + return true; + } +} + +static void intel_ltr_expose(struct sdhci_pci_chip *chip) +{ + struct device *dev = &chip->pdev->dev; + + if (!intel_use_ltr(chip)) + return; + + dev->power.set_latency_tolerance = intel_ltr_set; + dev_pm_qos_expose_latency_tolerance(dev); +} + +static void intel_ltr_hide(struct sdhci_pci_chip *chip) +{ + struct device *dev = &chip->pdev->dev; + + if (!intel_use_ltr(chip)) + return; + + dev_pm_qos_hide_latency_tolerance(dev); + dev->power.set_latency_tolerance = NULL; +} + static void byt_probe_slot(struct sdhci_pci_slot *slot) { struct mmc_host_ops *ops = &slot->host->mmc_host_ops; @@ -767,6 +910,43 @@ static void byt_probe_slot(struct sdhci_pci_slot *slot) ops->start_signal_voltage_switch = intel_start_signal_voltage_switch; device_property_read_u32(dev, "max-frequency", &mmc->f_max); + + if (!mmc->slotno) { + slot->chip->slots[mmc->slotno] = slot; + intel_ltr_expose(slot->chip); + } +} + +static void byt_add_debugfs(struct sdhci_pci_slot *slot) +{ + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct mmc_host *mmc = slot->host->mmc; + struct dentry *dir = mmc->debugfs_root; + + if (!intel_use_ltr(slot->chip)) + return; + + debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr); + debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr); + + intel_cache_ltr(slot); +} + +static int byt_add_host(struct sdhci_pci_slot *slot) +{ + int ret = sdhci_add_host(slot->host); + + if (!ret) + byt_add_debugfs(slot); + return ret; +} + +static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead) +{ + struct mmc_host *mmc = slot->host->mmc; + + if (!mmc->slotno) + intel_ltr_hide(slot->chip); } static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) @@ -787,7 +967,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) { return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && - dmi_match(DMI_BIOS_VENDOR, "LENOVO"); + (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || + dmi_match(DMI_SYS_VENDOR, "IRBIS")); } static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) @@ -847,6 +1028,8 @@ static int glk_emmc_add_host(struct sdhci_pci_slot *slot) if (ret) goto cleanup; + byt_add_debugfs(slot); + return 0; cleanup: @@ -973,6 +1156,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) return 0; } +static void byt_needs_pwr_off(struct sdhci_pci_slot *slot) +{ + struct intel_host *intel_host = sdhci_pci_priv(slot); + u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL); + + intel_host->needs_pwr_off = reg & SDHCI_POWER_ON; +} + static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) { byt_probe_slot(slot); @@ -990,6 +1181,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3) slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V; + byt_needs_pwr_off(slot); + return 0; } @@ -1024,6 +1217,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { #endif .allow_runtime_pm = true, .probe_slot = byt_emmc_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | SDHCI_QUIRK_NO_LED, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | @@ -1037,6 +1232,7 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { .allow_runtime_pm = true, .probe_slot = glk_emmc_probe_slot, .add_host = glk_emmc_add_host, + .remove_slot = byt_remove_slot, #ifdef CONFIG_PM_SLEEP .suspend = sdhci_cqhci_suspend, .resume = sdhci_cqhci_resume, @@ -1067,6 +1263,8 @@ static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = { SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .allow_runtime_pm = true, .probe_slot = ni_byt_sdio_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, .ops = &sdhci_intel_byt_ops, .priv_size = sizeof(struct intel_host), }; @@ -1084,6 +1282,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .allow_runtime_pm = true, .probe_slot = byt_sdio_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, .ops = &sdhci_intel_byt_ops, .priv_size = sizeof(struct intel_host), }; @@ -1103,6 +1303,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { .allow_runtime_pm = true, .own_cd_for_runtime_pm = true, .probe_slot = byt_sd_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, .ops = &sdhci_intel_byt_ops, .priv_size = sizeof(struct intel_host), }; @@ -1732,6 +1934,8 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd), SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd), SDHCI_PCI_DEVICE(O2, 8120, o2), SDHCI_PCI_DEVICE(O2, 8220, o2), SDHCI_PCI_DEVICE(O2, 8221, o2), diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index ce15a05f23d4..fd76aa672e02 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -26,6 +26,9 @@ #define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26) #define GLI_9750_DRIVING_1_VALUE 0xFFF #define GLI_9750_DRIVING_2_VALUE 0x3 +#define SDHCI_GLI_9750_SEL_1 BIT(29) +#define SDHCI_GLI_9750_SEL_2 BIT(31) +#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30)) #define SDHCI_GLI_9750_PLL 0x864 #define SDHCI_GLI_9750_PLL_TX2_INV BIT(23) @@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host) GLI_9750_DRIVING_1_VALUE); driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2, GLI_9750_DRIVING_2_VALUE); + driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST); + driving_value |= SDHCI_GLI_9750_SEL_2; sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING); sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4; @@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg) return value; } +#ifdef CONFIG_PM_SLEEP +static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + + pci_free_irq_vectors(slot->chip->pdev); + gli_pcie_enable_msi(slot); + + return sdhci_pci_resume_host(chip); +} +#endif + static const struct sdhci_ops sdhci_gl9755_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, @@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = { .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, .probe_slot = gli_probe_slot_gl9755, .ops = &sdhci_gl9755_ops, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_gli_resume, +#endif }; static const struct sdhci_ops sdhci_gl9750_ops = { @@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = { .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, .probe_slot = gli_probe_slot_gl9750, .ops = &sdhci_gl9750_ops, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_gli_resume, +#endif }; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index fa8105087d68..41a2394313dd 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -561,6 +561,12 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; } + if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD1) { + slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; + host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + } + host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning; if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2) diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 981bbbe63aff..779156ce1ee1 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -57,6 +57,8 @@ #define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5 #define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4 #define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8 +#define PCI_DEVICE_ID_INTEL_LKF_EMMC 0x98c4 +#define PCI_DEVICE_ID_INTEL_LKF_SD 0x98f8 #define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000 #define PCI_DEVICE_ID_VIA_95D0 0x95d0 diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index d07b9793380f..10705e5fa90e 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -665,14 +665,14 @@ static int sdhci_sprd_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); - struct mmc_host *mmc = host->mmc; - mmc_remove_host(mmc); + sdhci_remove_host(host, 0); + clk_disable_unprepare(sprd_host->clk_sdio); clk_disable_unprepare(sprd_host->clk_enable); clk_disable_unprepare(sprd_host->clk_2x_enable); - mmc_free_host(mmc); + sdhci_pltfm_free(pdev); return 0; } diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index a25c3a4d3f6c..c105356ad4cb 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -100,6 +100,12 @@ #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) +/* + * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra + * SDMMC hardware data timeout. + */ +#define NVQUIRK_HAS_TMCLK BIT(10) + /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 @@ -130,6 +136,7 @@ struct sdhci_tegra_autocal_offsets { struct sdhci_tegra { const struct sdhci_tegra_soc_data *soc_data; struct gpio_desc *power_gpio; + struct clk *tmclk; bool ddr_signaling; bool pad_calib_required; bool pad_control_available; @@ -1370,7 +1377,6 @@ static const struct sdhci_ops tegra210_sdhci_ops = { static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | @@ -1386,7 +1392,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = { NVQUIRK_HAS_PADCALIB | NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | - NVQUIRK_ENABLE_SDR104, + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, .min_tap_delay = 106, .max_tap_delay = 185, }; @@ -1407,7 +1414,6 @@ static const struct sdhci_ops tegra186_sdhci_ops = { static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | @@ -1424,6 +1430,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = { NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK | NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, .min_tap_delay = 84, .max_tap_delay = 136, @@ -1436,7 +1443,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra194 = { NVQUIRK_HAS_PADCALIB | NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | - NVQUIRK_ENABLE_SDR104, + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, .min_tap_delay = 96, .max_tap_delay = 139, }; @@ -1564,6 +1572,43 @@ static int sdhci_tegra_probe(struct platform_device *pdev) goto err_power_req; } + /* + * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host + * timeout clock and SW can choose TMCLK or SDCLK for hardware + * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of + * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL. + * + * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses + * 12Mhz TMCLK which is advertised in host capability register. + * With TMCLK of 12Mhz provides maximum data timeout period that can + * be achieved is 11s better than using SDCLK for data timeout. + * + * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's + * supporting separate TMCLK. + */ + + if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) { + clk = devm_clk_get(&pdev->dev, "tmclk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -EPROBE_DEFER) + goto err_power_req; + + dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc); + clk = NULL; + } + + clk_set_rate(clk, 12000000); + rc = clk_prepare_enable(clk); + if (rc) { + dev_err(&pdev->dev, + "failed to enable tmclk: %d\n", rc); + goto err_power_req; + } + + tegra_host->tmclk = clk; + } + clk = devm_clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { rc = PTR_ERR(clk); @@ -1607,6 +1652,7 @@ err_add_host: err_rst_get: clk_disable_unprepare(pltfm_host->clk); err_clk_get: + clk_disable_unprepare(tegra_host->tmclk); err_power_req: err_parse_dt: sdhci_pltfm_free(pdev); @@ -1624,6 +1670,7 @@ static int sdhci_tegra_remove(struct platform_device *pdev) reset_control_assert(tegra_host->rst); usleep_range(2000, 4000); clk_disable_unprepare(pltfm_host->clk); + clk_disable_unprepare(tegra_host->tmclk); sdhci_pltfm_free(pdev); diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 1dea1ba66f7b..5f57e78e5f13 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -167,7 +167,12 @@ static void xenon_reset_exit(struct sdhci_host *host, /* Disable tuning request and auto-retuning again */ xenon_retune_setup(host); - xenon_set_acg(host, true); + /* + * The ACG should be turned off at the early init time, in order + * to solve a possible issues with the 1.8V regulator stabilization. + * The feature is enabled in later stage. + */ + xenon_set_acg(host, false); xenon_set_sdclk_off_idle(host, sdhc_id, false); @@ -235,6 +240,16 @@ static void xenon_voltage_switch(struct sdhci_host *host) { /* Wait for 5ms after set 1.8V signal enable bit */ usleep_range(5000, 5500); + + /* + * For some reason the controller's Host Control2 register reports + * the bit representing 1.8V signaling as 0 when read after it was + * written as 1. Subsequent read reports 1. + * + * Since this may cause some issues, do an empty read of the Host + * Control2 register here to circumvent this. + */ + sdhci_readw(host, SDHCI_HOST_CONTROL2); } static const struct sdhci_ops sdhci_xenon_ops = { diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 4478b94d4791..92709232529a 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -152,7 +152,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) u32 present; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || - !mmc_card_is_removable(host->mmc)) + !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) return; if (enable) { @@ -981,7 +981,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } -static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) { if (enable) host->ier |= SDHCI_INT_DATA_TIMEOUT; @@ -990,28 +990,31 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } +EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); + +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + bool too_big = false; + u8 count = sdhci_calc_timeout(host, cmd, &too_big); + + if (too_big && + host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { + sdhci_calc_sw_timeout(host, cmd); + sdhci_set_data_timeout_irq(host, false); + } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { + sdhci_set_data_timeout_irq(host, true); + } + + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); +} +EXPORT_SYMBOL_GPL(__sdhci_set_timeout); static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - u8 count; - - if (host->ops->set_timeout) { + if (host->ops->set_timeout) host->ops->set_timeout(host, cmd); - } else { - bool too_big = false; - - count = sdhci_calc_timeout(host, cmd, &too_big); - - if (too_big && - host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { - sdhci_calc_sw_timeout(host, cmd); - sdhci_set_data_timeout_irq(host, false); - } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { - sdhci_set_data_timeout_irq(host, true); - } - - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); - } + else + __sdhci_set_timeout(host, cmd); } static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) @@ -1159,9 +1162,11 @@ static inline void sdhci_auto_cmd_select(struct sdhci_host *host, /* * In case of Version 4.10 or later, use of 'Auto CMD Auto * Select' is recommended rather than use of 'Auto CMD12 - * Enable' or 'Auto CMD23 Enable'. + * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode + * here because some controllers (e.g sdhci-of-dwmshc) expect it. */ - if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) { + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (use_cmd12 || use_cmd23)) { *mode |= SDHCI_TRNS_AUTO_SEL; ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); @@ -2642,6 +2647,37 @@ static bool sdhci_request_done(struct sdhci_host *host) return true; } + /* + * The controller needs a reset of internal state machines + * upon error conditions. + */ + if (sdhci_needs_reset(host, mrq)) { + /* + * Do not finish until command and data lines are available for + * reset. Note there can only be one other mrq, so it cannot + * also be in mrqs_done, otherwise host->cmd and host->data_cmd + * would both be null. + */ + if (host->cmd || host->data_cmd) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + + /* Some controllers need this kick or reset won't work here */ + if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) + /* This is to force an update */ + host->ops->set_clock(host, host->clock); + + /* + * Spec says we should do both at the same time, but Ricoh + * controllers do not like that. + */ + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + + host->pending_reset = false; + } + /* * Always unmap the data buffers if they were mapped by * sdhci_prepare_data() whenever we finish with a request. @@ -2694,35 +2730,6 @@ static bool sdhci_request_done(struct sdhci_host *host) } } - /* - * The controller needs a reset of internal state machines - * upon error conditions. - */ - if (sdhci_needs_reset(host, mrq)) { - /* - * Do not finish until command and data lines are available for - * reset. Note there can only be one other mrq, so it cannot - * also be in mrqs_done, otherwise host->cmd and host->data_cmd - * would both be null. - */ - if (host->cmd || host->data_cmd) { - spin_unlock_irqrestore(&host->lock, flags); - return true; - } - - /* Some controllers need this kick or reset won't work here */ - if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) - /* This is to force an update */ - host->ops->set_clock(host, host->clock); - - /* Spec says we should do both at the same time, but Ricoh - controllers do not like that. */ - sdhci_do_reset(host, SDHCI_RESET_CMD); - sdhci_do_reset(host, SDHCI_RESET_DATA); - - host->pending_reset = false; - } - host->mrqs_done[i] = NULL; spin_unlock_irqrestore(&host->lock, flags); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index fe83ece6965b..76e69288632d 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -795,5 +795,7 @@ void sdhci_end_tuning(struct sdhci_host *host); void sdhci_reset_tuning(struct sdhci_host *host); void sdhci_send_tuning(struct sdhci_host *host, u32 opcode); void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode); +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index dec5a99f52cf..25083f010a7a 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1285,12 +1285,14 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) cancel_work_sync(&host->done); cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_release_dma(host); + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); - pm_runtime_dont_use_autosuspend(&pdev->dev); if (host->native_hotplug) pm_runtime_put_noidle(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); } EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 0c72ec5546c3..b2e28ecae4a5 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -614,11 +614,6 @@ static int uniphier_sd_probe(struct platform_device *pdev) } } - ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, - dev_name(dev), host); - if (ret) - goto free_host; - if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) host->dma_ops = &uniphier_sd_internal_dma_ops; else @@ -644,10 +639,19 @@ static int uniphier_sd_probe(struct platform_device *pdev) ret = tmio_mmc_host_probe(host); if (ret) - goto free_host; + goto disable_clk; + + ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, + dev_name(dev), host); + if (ret) + goto remove_host; return 0; +remove_host: + tmio_mmc_host_remove(host); +disable_clk: + uniphier_sd_clk_disable(host); free_host: tmio_mmc_host_free(host); @@ -660,6 +664,7 @@ static int uniphier_sd_remove(struct platform_device *pdev) tmio_mmc_host_remove(host); uniphier_sd_clk_disable(host); + tmio_mmc_host_free(host); return 0; } diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index b11ac2314328..6eba2441c7ef 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1860,10 +1860,12 @@ static int usdhi6_probe(struct platform_device *pdev) ret = mmc_add_host(mmc); if (ret < 0) - goto e_clk_off; + goto e_release_dma; return 0; +e_release_dma: + usdhi6_dma_release(host); e_clk_off: clk_disable_unprepare(host->clk); e_free_mmc: diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index f4ac064ff471..d12a068b0f9e 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -319,6 +319,8 @@ struct via_crdr_mmc_host { /* some devices need a very long delay for power to stabilize */ #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 +#define VIA_CMD_TIMEOUT_MS 1000 + static const struct pci_device_id via_ids[] = { {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, @@ -551,14 +553,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host, { void __iomem *addrbase; struct mmc_data *data; + unsigned int timeout_ms; u32 cmdctrl = 0; WARN_ON(host->cmd); data = cmd->data; - mod_timer(&host->timer, jiffies + HZ); host->cmd = cmd; + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS; + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms)); + /*Command index*/ cmdctrl = cmd->opcode << 8; @@ -1254,11 +1259,14 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host) static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) { struct via_crdr_mmc_host *host; + unsigned long flags; host = pci_get_drvdata(pcidev); + spin_lock_irqsave(&host->lock, flags); via_save_pcictrlreg(host); via_save_sdcreg(host); + spin_unlock_irqrestore(&host->lock, flags); pci_save_state(pcidev); pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index a4f2d8cdca12..c8b9ab40a102 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -794,7 +794,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) kfree(mtd->eraseregions); kfree(mtd); kfree(cfi->cmdset_priv); - kfree(cfi->cfiq); return NULL; } diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 931e5c2481b5..b50ec7ecd10c 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -243,22 +243,25 @@ static int phram_setup(const char *val) ret = parse_num64(&start, token[1]); if (ret) { - kfree(name); parse_err("illegal start address\n"); + goto error; } ret = parse_num64(&len, token[2]); if (ret) { - kfree(name); parse_err("illegal device length\n"); + goto error; } ret = register_device(name, start, len); - if (!ret) - pr_info("%s device: %#llx at %#llx\n", name, len, start); - else - kfree(name); + if (ret) + goto error; + pr_info("%s device: %#llx at %#llx\n", name, len, start); + return 0; + +error: + kfree(name); return ret; } diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c index 0f1547f09d08..72f5c7b30079 100644 --- a/drivers/mtd/lpddr/lpddr2_nvm.c +++ b/drivers/mtd/lpddr/lpddr2_nvm.c @@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add, return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK); } +static const struct mtd_info lpddr2_nvm_mtd_info = { + .type = MTD_RAM, + .writesize = 1, + .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), + ._read = lpddr2_nvm_read, + ._write = lpddr2_nvm_write, + ._erase = lpddr2_nvm_erase, + ._unlock = lpddr2_nvm_unlock, + ._lock = lpddr2_nvm_lock, +}; + /* * lpddr2_nvm driver probe method */ @@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) .pfow_base = OW_BASE_ADDRESS, .fldrv_priv = pcm_data, }; + if (IS_ERR(map->virt)) return PTR_ERR(map->virt); @@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) return PTR_ERR(pcm_data->ctl_regs); /* Populate mtd_info data structure */ - *mtd = (struct mtd_info) { - .dev = { .parent = &pdev->dev }, - .name = pdev->dev.init_name, - .type = MTD_RAM, - .priv = map, - .size = resource_size(add_range), - .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width, - .writesize = 1, - .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width, - .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), - ._read = lpddr2_nvm_read, - ._write = lpddr2_nvm_write, - ._erase = lpddr2_nvm_erase, - ._unlock = lpddr2_nvm_unlock, - ._lock = lpddr2_nvm_lock, - }; + *mtd = lpddr2_nvm_mtd_info; + mtd->dev.parent = &pdev->dev; + mtd->name = pdev->dev.init_name; + mtd->priv = map; + mtd->size = resource_size(add_range); + mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width; + mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width; /* Verify the presence of the device looking for PFOW string */ if (!lpddr2_nvm_pfow_present(map)) { diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index 1efc643c9871..9341a8a592e8 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c @@ -68,7 +68,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared), GFP_KERNEL); if (!shared) { - kfree(lpddr); kfree(mtd); return NULL; } diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 975aed94f06c..ee437af41f11 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -354,9 +354,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint32_t retlen; int ret = 0; - if (!(file->f_mode & FMODE_WRITE)) - return -EPERM; - if (length > 4096) return -EINVAL; @@ -641,6 +638,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) pr_debug("MTD_ioctl\n"); + /* + * Check the file mode to require "dangerous" commands to have write + * permissions. + */ + switch (cmd) { + /* "safe" commands */ + case MEMGETREGIONCOUNT: + case MEMGETREGIONINFO: + case MEMGETINFO: + case MEMREADOOB: + case MEMREADOOB64: + case MEMISLOCKED: + case MEMGETOOBSEL: + case MEMGETBADBLOCK: + case OTPSELECT: + case OTPGETREGIONCOUNT: + case OTPGETREGIONINFO: + case ECCGETLAYOUT: + case ECCGETSTATS: + case MTDFILEMODE: + case BLKPG: + case BLKRRPART: + break; + + /* "dangerous" commands */ + case MEMERASE: + case MEMERASE64: + case MEMLOCK: + case MEMUNLOCK: + case MEMSETBADBLOCK: + case MEMWRITEOOB: + case MEMWRITEOOB64: + case MEMWRITE: + case OTPLOCK: + if (!(file->f_mode & FMODE_WRITE)) + return -EPERM; + break; + + default: + return -ENOTTY; + } + switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) @@ -688,9 +727,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct erase_info *erase; - if(!(file->f_mode & FMODE_WRITE)) - return -EPERM; - erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; @@ -983,9 +1019,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) ret = 0; break; } - - default: - ret = -ENOTTY; } return ret; @@ -1029,6 +1062,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; + if (!(file->f_mode & FMODE_WRITE)) { + ret = -EPERM; + break; + } + if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 6cc7ecb0c788..32a76b8feaa5 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -563,7 +563,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) config.id = -1; config.dev = &mtd->dev; - config.name = mtd->name; + config.name = dev_name(&mtd->dev); config.owner = THIS_MODULE; config.reg_read = mtd_nvmem_reg_read; config.size = mtd->size; @@ -825,6 +825,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, /* Prefer parsed partitions over driver-provided fallback */ ret = parse_mtd_partitions(mtd, types, parser_data); + if (ret == -EPROBE_DEFER) + goto out; + if (ret > 0) ret = 0; else if (nr_parts) diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 4ced68be7ed7..774970bfcf85 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, record_size - MTDOOPS_HEADER_SIZE, NULL); - /* Panics must be written immediately */ - if (reason != KMSG_DUMP_OOPS) + if (reason != KMSG_DUMP_OOPS) { + /* Panics must be written immediately */ mtdoops_write(cxt, 1); - - /* For other cases, schedule work to write it "nicely" */ - schedule_work(&cxt->work_write); + } else { + /* For other cases, schedule work to write it "nicely" */ + schedule_work(&cxt->work_write); + } } static void mtdoops_notify_add(struct mtd_info *mtd) diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 8d6be90a6fe8..23d11e8b5644 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -813,10 +813,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf, NULL, 0, chip->ecc.strength); - if (ret >= 0) + if (ret >= 0) { + mtd->ecc_stats.corrected += ret; max_bitflips = max(ret, max_bitflips); - else + } else { mtd->ecc_stats.failed++; + } databuf += chip->ecc.size; eccbuf += chip->ecc.bytes; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 15ef30b368a5..0f3c09fb9c34 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -537,8 +537,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } else { ctrl->cs_offsets = brcmnand_cs_offsets; - /* v5.0 and earlier has a different CS0 offset layout */ - if (ctrl->nand_version <= 0x0500) + /* v3.3-5.0 have a different CS0 offset layout */ + if (ctrl->nand_version >= 0x0303 && + ctrl->nand_version <= 0x0500) ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; } @@ -1019,11 +1020,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, if (!section) { /* * Small-page NAND use byte 6 for BBI while large-page - * NAND use byte 0. + * NAND use bytes 0 and 1. */ - if (cfg->page_size > 512) - oobregion->offset++; - oobregion->length--; + if (cfg->page_size > 512) { + oobregion->offset += 2; + oobregion->length -= 2; + } else { + oobregion->length--; + } } } @@ -1787,28 +1791,31 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, struct nand_chip *chip, void *buf, u64 addr) { - int i, sas; - void *oob = chip->oob_poi; + struct mtd_oob_region ecc; + int i; int bitflips = 0; int page = addr >> chip->page_shift; int ret; + void *ecc_bytes; void *ecc_chunk; if (!buf) buf = nand_get_data_buf(chip); - sas = mtd->oobsize / chip->ecc.steps; - /* read without ecc for verification */ ret = chip->ecc.read_page_raw(chip, buf, true, page); if (ret) return ret; - for (i = 0; i < chip->ecc.steps; i++, oob += sas) { + for (i = 0; i < chip->ecc.steps; i++) { ecc_chunk = buf + chip->ecc.size * i; - ret = nand_check_erased_ecc_chunk(ecc_chunk, - chip->ecc.size, - oob, sas, NULL, 0, + + mtd_ooblayout_ecc(mtd, i, &ecc); + ecc_bytes = chip->oob_poi + ecc.offset; + + ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size, + ecc_bytes, ecc.length, + NULL, 0, chip->ecc.strength); if (ret < 0) return ret; @@ -2357,6 +2364,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip) ret = brcmstb_choose_ecc_layout(host); + /* If OOB is written with ECC enabled it will cause ECC errors */ + if (is_hamming_ecc(host->ctrl, &host->hwcfg)) { + chip->ecc.write_oob = brcmnand_write_oob_raw; + chip->ecc.read_oob = brcmnand_read_oob_raw; + } + return ret; } diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index c0e1a8ebe820..522390b99d3c 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -1609,13 +1609,10 @@ static int __init doc_probe(unsigned long physadr) numchips = doc2001_init(mtd); if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) { - /* DBB note: i believe nand_release is necessary here, as + /* DBB note: i believe nand_cleanup is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ - /* nand_release will call mtd_device_unregister, but we - haven't yet added it. This is handled without incident by - mtd_device_unregister, as far as I can tell. */ - nand_release(nand); + nand_cleanup(nand); goto fail; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 1054cc070747..20b0ee174dc6 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -62,7 +62,6 @@ static int fun_chip_ready(struct nand_chip *chip) static void fun_wait_rnb(struct fsl_upm_nand *fun) { if (fun->rnb_gpio[fun->mchip_number] >= 0) { - struct mtd_info *mtd = nand_to_mtd(&fun->chip); int cnt = 1000000; while (--cnt && !fun_chip_ready(&fun->chip)) diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index a6964feeec77..81e4b0f46662 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -1066,11 +1066,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) host->read_dma_chan = dma_request_channel(mask, filter, NULL); if (!host->read_dma_chan) { dev_err(&pdev->dev, "Unable to get read dma channel\n"); + ret = -ENODEV; goto disable_clk; } host->write_dma_chan = dma_request_channel(mask, filter, NULL); if (!host->write_dma_chan) { dev_err(&pdev->dev, "Unable to get write dma channel\n"); + ret = -ENODEV; goto release_dma_read_chan; } } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index b9d5d55a5edb..60f146920b9f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -149,8 +149,10 @@ static int gpmi_init(struct gpmi_nand_data *this) int ret; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(this->dev); return ret; + } ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) @@ -540,8 +542,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this) return ret; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(this->dev); return ret; + } /* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this @@ -2404,7 +2408,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, void *buf_read = NULL; const void *buf_write = NULL; bool direct = false; - struct completion *completion; + struct completion *dma_completion, *bch_completion; unsigned long to; this->ntransfers = 0; @@ -2412,8 +2416,10 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->transfers[i].direction = DMA_NONE; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(this->dev); return ret; + } /* * This driver currently supports only one NAND chip. Plus, dies share @@ -2496,22 +2502,24 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); } + desc->callback = dma_irq_callback; + desc->callback_param = this; + dma_completion = &this->dma_done; + bch_completion = NULL; + + init_completion(dma_completion); + if (this->bch && buf_read) { writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, this->resources.bch_regs + HW_BCH_CTRL_SET); - completion = &this->bch_done; - } else { - desc->callback = dma_irq_callback; - desc->callback_param = this; - completion = &this->dma_done; + bch_completion = &this->bch_done; + init_completion(bch_completion); } - init_completion(completion); - dmaengine_submit(desc); dma_async_issue_pending(get_dma_chan(this)); - to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000)); + to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000)); if (!to) { dev_err(this->dev, "DMA timeout, last DMA\n"); gpmi_dump_info(this); @@ -2519,6 +2527,16 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, goto unmap; } + if (this->bch && buf_read) { + to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000)); + if (!to) { + dev_err(this->dev, "BCH timeout, last DMA\n"); + gpmi_dump_info(this); + ret = -ETIMEDOUT; + goto unmap; + } + } + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, this->resources.bch_regs + HW_BCH_CTRL_CLR); gpmi_clear_bch(this); @@ -2580,7 +2598,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) this->bch_geometry.auxiliary_size = 128; ret = gpmi_alloc_dma_buffer(this); if (ret) - goto err_out; + return ret; nand_controller_init(&this->base); this->base.ops = &gpmi_nand_controller_ops; diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 49afebee50db..4b7c399d4f4b 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -376,7 +376,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, ret = mtd_device_register(mtd, NULL, 0); if (ret) { - nand_release(chip); + nand_cleanup(chip); return ret; } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index fc49e13d81ec..ee4afa17d8a3 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -707,7 +707,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) * In case the interrupt was not served in the required time frame, * check if the ISR was not served or if something went actually wrong. */ - if (ret && !pending) { + if (!ret && !pending) { dev_err(nfc->dev, "Timeout waiting for RB signal\n"); return -ETIMEDOUT; } @@ -2664,7 +2664,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(chip); + nand_cleanup(chip); return ret; } @@ -2673,6 +2673,16 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, return 0; } +static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) +{ + struct marvell_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &nfc->chips, node) { + nand_release(&entry->chip); + list_del(&entry->node); + } +} + static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) { struct device_node *np = dev->of_node; @@ -2707,21 +2717,16 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) ret = marvell_nand_chip_init(dev, nfc, nand_np); if (ret) { of_node_put(nand_np); - return ret; + goto cleanup_chips; } } return 0; -} -static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) -{ - struct marvell_nand_chip *entry, *temp; +cleanup_chips: + marvell_nand_chips_cleanup(nfc); - list_for_each_entry_safe(entry, temp, &nfc->chips, node) { - nand_release(&entry->chip); - list_del(&entry->node); - } + return ret; } static int marvell_nfc_init_dma(struct marvell_nfc *nfc) diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 1b82b687e5a5..ab7ab6a279aa 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -510,7 +510,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf, } static void meson_nfc_dma_buffer_release(struct nand_chip *nand, - int infolen, int datalen, + int datalen, int infolen, enum dma_data_direction dir) { struct meson_nfc *nfc = nand_get_controller_data(nand); @@ -1041,9 +1041,12 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc) ret = clk_set_rate(nfc->device_clk, 24000000); if (ret) - goto err_phase_rx; + goto err_disable_rx; return 0; + +err_disable_rx: + clk_disable_unprepare(nfc->phase_rx); err_phase_rx: clk_disable_unprepare(nfc->phase_tx); err_phase_tx: diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 373d47d1ba4c..08008c844a47 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "mtd parse partition error\n"); - nand_release(nand); + nand_cleanup(nand); return ret; } diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index f64e3b6605c6..db66c1be6e5f 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -731,8 +731,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy); int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms) { - /* Wait until R/B pin indicates chip is ready or timeout occurs */ - timeout_ms = jiffies + msecs_to_jiffies(timeout_ms); + + /* + * Wait until R/B pin indicates chip is ready or timeout occurs. + * +1 below is necessary because if we are now in the last fraction + * of jiffy and msecs_to_jiffies is 1 then we will wait only that + * small jiffy fraction - possibly leading to false timeout. + */ + timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { if (gpiod_get_value_cansleep(gpiod)) return 0; @@ -5907,6 +5913,8 @@ void nand_cleanup(struct nand_chip *chip) chip->ecc.algo == NAND_ECC_BCH) nand_bch_free((struct nand_bch_control *)chip->ecc.priv); + nanddev_cleanup(&chip->base); + /* Free bad block table memory */ kfree(chip->bbt); kfree(chip->data_buf); diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 0b879bd0a68c..8fe8d7bdd203 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -173,7 +173,7 @@ int nand_onfi_detect(struct nand_chip *chip) } if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) == - le16_to_cpu(p->crc)) { + le16_to_cpu(p[i].crc)) { if (i) memcpy(p, &p[i], sizeof(*p)); break; diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index f64b06a71dfa..f12b7a7844c9 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -314,10 +314,9 @@ int onfi_fill_data_interface(struct nand_chip *chip, /* microseconds -> picoseconds */ timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX; timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX; - timings->tR_max = 1000000ULL * 200000000ULL; - /* nanoseconds -> picoseconds */ - timings->tCCS_min = 1000UL * 500000; + timings->tR_max = 200000000; + timings->tCCS_min = 500000; } return 0; diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 5502ffbdd1e6..6e0e31eab7cc 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -411,6 +411,7 @@ static int elm_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (pm_runtime_get_sync(&pdev->dev) < 0) { ret = -EINVAL; + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "can't enable clock\n"); return ret; diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index d27b39a7223c..a3dcdf25f5f2 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) mtd->name = "orion_nand"; ret = mtd_device_register(mtd, board->parts, board->nr_parts); if (ret) { - nand_release(nc); + nand_cleanup(nc); goto no_dev; } diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index c43cb4d92d3d..23c222b6c40e 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -32,6 +32,7 @@ struct oxnas_nand_ctrl { void __iomem *io_base; struct clk *clk; struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS]; + unsigned int nchips; }; static uint8_t oxnas_nand_read_byte(struct nand_chip *chip) @@ -79,9 +80,9 @@ static int oxnas_nand_probe(struct platform_device *pdev) struct nand_chip *chip; struct mtd_info *mtd; struct resource *res; - int nchips = 0; int count = 0; int err = 0; + int i; /* Allocate memory for the device structure (and zero it) */ oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas), @@ -140,17 +141,15 @@ static int oxnas_nand_probe(struct platform_device *pdev) goto err_release_child; err = mtd_device_register(mtd, NULL, 0); - if (err) { - nand_release(chip); - goto err_release_child; - } + if (err) + goto err_cleanup_nand; - oxnas->chips[nchips] = chip; - ++nchips; + oxnas->chips[oxnas->nchips] = chip; + ++oxnas->nchips; } /* Exit if no chips found */ - if (!nchips) { + if (!oxnas->nchips) { err = -ENODEV; goto err_clk_unprepare; } @@ -159,8 +158,17 @@ static int oxnas_nand_probe(struct platform_device *pdev) return 0; +err_cleanup_nand: + nand_cleanup(chip); err_release_child: of_node_put(nand_np); + + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); + nand_cleanup(chip); + } + err_clk_unprepare: clk_disable_unprepare(oxnas->clk); return err; @@ -169,9 +177,13 @@ err_clk_unprepare: static int oxnas_nand_remove(struct platform_device *pdev) { struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev); + struct nand_chip *chip; + int i; - if (oxnas->chips[0]) - nand_release(oxnas->chips[0]); + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + nand_release(chip); + } clk_disable_unprepare(oxnas->clk); diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 9cfe7395172a..066ff6dc9a23 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { dev_err(dev, "Unable to register MTD device\n"); err = -ENODEV; - goto out_lpc; + goto out_cleanup_nand; } dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, @@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) return 0; + out_cleanup_nand: + nand_cleanup(chip); out_lpc: release_region(lpcctl, 4); out_ior: diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index dc0f3074ddbf..3a495b233443 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev) if (!err) return err; - nand_release(&data->chip); + nand_cleanup(&data->chip); out: if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 7bb9a7e8e1e7..c10995ca624a 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -459,11 +459,13 @@ struct qcom_nand_host { * among different NAND controllers. * @ecc_modes - ecc mode for NAND * @is_bam - whether NAND controller is using BAM + * @is_qpic - whether NAND CTRL is part of qpic IP * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset */ struct qcom_nandc_props { u32 ecc_modes; bool is_bam; + bool is_qpic; u32 dev_cmd_reg_start; }; @@ -1568,6 +1570,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); int i; + nandc_read_buffer_sync(nandc, true); + for (i = 0; i < cw_cnt; i++) { u32 flash = le32_to_cpu(nandc->reg_read_buf[i]); @@ -2751,7 +2755,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) u32 nand_ctrl; /* kill onenand */ - nandc_write(nandc, SFLASHC_BURST_CFG, 0); + if (!nandc->props->is_qpic) + nandc_write(nandc, SFLASHC_BURST_CFG, 0); nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), NAND_DEV_CMD_VLD_VAL); @@ -2845,7 +2850,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) struct device *dev = nandc->dev; struct device_node *dn = dev->of_node, *child; struct qcom_nand_host *host; - int ret; + int ret = -ENODEV; for_each_available_child_of_node(dn, child) { host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); @@ -2863,10 +2868,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) list_add_tail(&host->node, &nandc->host_list); } - if (list_empty(&nandc->host_list)) - return -ENODEV; - - return 0; + return ret; } /* parse custom DT properties here */ @@ -3007,12 +3009,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = { static const struct qcom_nandc_props ipq4019_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x0, }; static const struct qcom_nandc_props ipq8074_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x7000, }; diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index b47a9eaff89b..d8c52a016080 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) return 0; err_add: - nand_release(this); + nand_cleanup(this); err_scan: iounmap(sharpsl->io); diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 20f40c0e812c..7c94fc51a611 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) if (!res) return res; - nand_release(nand_chip); + nand_cleanup(nand_chip); out: iounmap(host->io_base); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 89773293c64d..45c376fc571a 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2003,7 +2003,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(nand); + nand_cleanup(nand); return ret; } diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index db030f1701ee..4e9a6d94f6e8 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev) if (!retval) return retval; - nand_release(nand_chip); + nand_cleanup(nand_chip); err_irq: tmio_hw_stop(dev, tmio); diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index 6b399a75f9ae..b6f114da5714 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -850,8 +850,10 @@ static int vf610_nfc_probe(struct platform_device *pdev) } of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev); - if (!of_id) - return -ENODEV; + if (!of_id) { + err = -ENODEV; + goto err_disable_clk; + } nfc->variant = (enum vf610_nfc_variant)of_id->data; diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index 834f794816a9..018311dc8fe1 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev) err = mtd_device_register(mtd, NULL, 0); if (err) - nand_release(&data->chip); + nand_cleanup(&data->chip); return err; } diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 89f6beefb01c..55e636efcc8b 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -568,18 +568,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) { struct spinand_device *spinand = nand_to_spinand(nand); + u8 marker[2] = { }; struct nand_page_io_req req = { .pos = *pos, - .ooblen = 2, + .ooblen = sizeof(marker), .ooboffs = 0, - .oobbuf.in = spinand->oobbuf, + .oobbuf.in = marker, .mode = MTD_OPS_RAW, }; - memset(spinand->oobbuf, 0, 2); spinand_select_target(spinand, pos->target); spinand_read_page(spinand, &req, false); - if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff) + if (marker[0] != 0xff || marker[1] != 0xff) return true; return false; @@ -603,15 +603,16 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) { struct spinand_device *spinand = nand_to_spinand(nand); + u8 marker[2] = { }; struct nand_page_io_req req = { .pos = *pos, .ooboffs = 0, - .ooblen = 2, - .oobbuf.out = spinand->oobbuf, + .ooblen = sizeof(marker), + .oobbuf.out = marker, + .mode = MTD_OPS_RAW, }; int ret; - /* Erase block before marking it bad. */ ret = spinand_select_target(spinand, pos->target); if (ret) return ret; @@ -620,9 +621,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) if (ret) return ret; - spinand_erase_op(spinand, pos); - - memset(spinand->oobbuf, 0, 2); return spinand_write_page(spinand, &req); } @@ -1051,6 +1049,10 @@ static int spinand_init(struct spinand_device *spinand) mtd->oobavail = ret; + /* Propagate ECC information to mtd_info */ + mtd->ecc_strength = nand->eccreq.strength; + mtd->ecc_step_size = nand->eccreq.step_size; + return 0; err_cleanup_nanddev: @@ -1131,12 +1133,14 @@ static const struct spi_device_id spinand_ids[] = { { .name = "spi-nand" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(spi, spinand_ids); #ifdef CONFIG_OF static const struct of_device_id spinand_of_ids[] = { { .compatible = "spi-nand" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, spinand_of_ids); #endif static struct spi_mem_driver spinand_drv = { diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index e99d425aa93f..b13b39763a40 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -21,7 +21,7 @@ #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), @@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_f, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), @@ -201,7 +201,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - 0, + SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), SPINAND_INFO("GD5F2GQ4xA", 0xF2, @@ -210,7 +210,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - 0, + SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), SPINAND_INFO("GD5F4GQ4xA", 0xF4, @@ -219,7 +219,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - 0, + SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), SPINAND_INFO("GD5F1GQ4UExxG", 0xd1, @@ -228,7 +228,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - 0, + SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, gd5fxgq4uexxg_ecc_get_status)), SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148, @@ -237,7 +237,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, &write_cache_variants, &update_cache_variants), - 0, + SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, gd5fxgq4ufxxg_ecc_get_status)), }; diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c index 752b6cf005f7..8fd61767af83 100644 --- a/drivers/mtd/parsers/afs.c +++ b/drivers/mtd/parsers/afs.c @@ -370,10 +370,8 @@ static int parse_afs_partitions(struct mtd_info *mtd, return i; out_free_parts: - while (i >= 0) { + while (--i >= 0) kfree(parts[i].name); - i--; - } kfree(parts); *pparts = NULL; return ret; diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c index c86f2db8c882..0dca51549128 100644 --- a/drivers/mtd/parsers/cmdlinepart.c +++ b/drivers/mtd/parsers/cmdlinepart.c @@ -218,12 +218,41 @@ static int mtdpart_setup_real(char *s) struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; - char *p, *mtd_id; + char *p, *mtd_id, *semicol, *open_parenth; + + /* + * Replace the first ';' by a NULL char so strrchr can work + * properly. + */ + semicol = strchr(s, ';'); + if (semicol) + *semicol = '\0'; + + /* + * make sure that part-names with ":" will not be handled as + * part of the mtd-id with an ":" + */ + open_parenth = strchr(s, '('); + if (open_parenth) + *open_parenth = '\0'; mtd_id = s; - /* fetch <mtd-id> */ - p = strchr(s, ':'); + /* + * fetch <mtd-id>. We use strrchr to ignore all ':' that could + * be present in the MTD name, only the last one is interpreted + * as an <mtd-id>/<part-definition> separator. + */ + p = strrchr(s, ':'); + + /* Restore the '(' now. */ + if (open_parenth) + *open_parenth = '('; + + /* Restore the ';' now. */ + if (semicol) + *semicol = ';'; + if (!p) { pr_err("no mtd-id\n"); return -EINVAL; diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c index d69607b48227..fab0949aabba 100644 --- a/drivers/mtd/parsers/parser_imagetag.c +++ b/drivers/mtd/parsers/parser_imagetag.c @@ -83,6 +83,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master, pr_err("invalid rootfs address: %*ph\n", (int)sizeof(buf->flash_image_start), buf->flash_image_start); + ret = -EINVAL; goto out; } @@ -92,6 +93,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master, pr_err("invalid kernel address: %*ph\n", (int)sizeof(buf->kernel_address), buf->kernel_address); + ret = -EINVAL; goto out; } @@ -100,6 +102,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master, pr_err("invalid kernel length: %*ph\n", (int)sizeof(buf->kernel_length), buf->kernel_length); + ret = -EINVAL; goto out; } @@ -108,6 +111,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master, pr_err("invalid total length: %*ph\n", (int)sizeof(buf->total_length), buf->total_length); + ret = -EINVAL; goto out; } diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 7bef63947b29..97a5e1eaeefd 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -475,7 +475,7 @@ static int cqspi_read_setup(struct spi_nor *nor) /* Setup dummy clock cycles */ dummy_clk = nor->read_dummy; if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) - dummy_clk = CQSPI_DUMMY_CLKS_MAX; + return -EOPNOTSUPP; if (dummy_clk / 8) { reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c index 6dac9dd8bf42..8fcc48056a8b 100644 --- a/drivers/mtd/spi-nor/hisi-sfc.c +++ b/drivers/mtd/spi-nor/hisi-sfc.c @@ -396,8 +396,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host) for_each_available_child_of_node(dev->of_node, np) { ret = hisi_spi_nor_register(np, host); - if (ret) + if (ret) { + of_node_put(np); goto fail; + } if (host->num_chip == HIFMC_MAX_CHIP_NUM) { dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index f417fb680cd8..dd6963e4af2c 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1011,14 +1011,15 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, erase = &map->erase_type[i]; + /* Alignment is not mandatory for overlaid regions */ + if (region->offset & SNOR_OVERLAID_REGION && + region->size <= len) + return erase; + /* Don't erase more than what the user has asked for. */ if (erase->size > len) continue; - /* Alignment is not mandatory for overlaid regions */ - if (region->offset & SNOR_OVERLAID_REGION) - return erase; - spi_nor_div_by_erase_size(erase, addr, &rem); if (rem) continue; @@ -1152,6 +1153,7 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, goto destroy_erase_cmd_list; if (prev_erase != erase || + erase->size != cmd->size || region->offset & SNOR_OVERLAID_REGION) { cmd = spi_nor_init_erase_cmd(region, erase); if (IS_ERR(cmd)) { @@ -3700,7 +3702,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region, int i; for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { - if (!(erase_type & BIT(i))) + if (!(erase[i].size && erase_type & BIT(erase[i].idx))) continue; if (region->size & erase[i].size_mask) { spi_nor_region_mark_overlay(region); @@ -3770,6 +3772,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + region[i].size; } + spi_nor_region_mark_end(®ion[i - 1]); save_uniform_erase_type = map->uniform_erase_type; map->uniform_erase_type = spi_nor_sort_erase_mask(map, @@ -3793,8 +3796,6 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, if (!(regions_erase_type & BIT(erase[i].idx))) spi_nor_set_erase_type(&erase[i], 0, 0xFF); - spi_nor_region_mark_end(®ion[i - 1]); - return 0; } @@ -4444,11 +4445,10 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor) memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params)); - if (spi_nor_parse_sfdp(nor, &sfdp_params)) { + if (spi_nor_parse_sfdp(nor, &nor->params)) { + memcpy(&nor->params, &sfdp_params, sizeof(nor->params)); nor->addr_width = 0; nor->flags &= ~SNOR_F_4B_OPCODES; - } else { - memcpy(&nor->params, &sfdp_params, sizeof(nor->params)); } } diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index a1dff92ceedf..8a83072401a7 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -392,9 +392,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos) { struct ubi_device *ubi = s->private; - if (*pos == 0) - return SEQ_START_TOKEN; - if (*pos < ubi->peb_count) return pos; @@ -408,8 +405,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct ubi_device *ubi = s->private; - if (v == SEQ_START_TOKEN) - return pos; (*pos)++; if (*pos < ubi->peb_count) @@ -431,11 +426,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter) int err; /* If this is the start, print a header */ - if (iter == SEQ_START_TOKEN) { - seq_puts(s, - "physical_block_number\terase_count\tblock_status\tread_status\n"); - return 0; - } + if (*block_number == 0) + seq_puts(s, "physical_block_number\terase_count\n"); err = ubi_io_is_bad(ubi, *block_number); if (err) diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index c44c8470247e..b486250923c5 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) return victim; } +static inline void return_unused_peb(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->free); + ubi->free_count++; +} + /** * return_unused_pool_pebs - returns unused PEB to the free tree. * @ubi: UBI device description object @@ -52,23 +59,10 @@ static void return_unused_pool_pebs(struct ubi_device *ubi, for (i = pool->used; i < pool->size; i++) { e = ubi->lookuptbl[pool->pebs[i]]; - wl_tree_add(e, &ubi->free); - ubi->free_count++; + return_unused_peb(ubi, e); } } -static int anchor_pebs_available(struct rb_root *root) -{ - struct rb_node *p; - struct ubi_wl_entry *e; - - ubi_rb_for_each_entry(p, e, root, u.rb) - if (e->pnum < UBI_FM_MAX_START) - return 1; - - return 0; -} - /** * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. * @ubi: UBI device description object @@ -277,8 +271,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); + + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* No luck, trigger wear leveling to produce a new anchor PEB */ + ubi->fm_do_produce_anchor = 1; if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -294,7 +306,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) return -ENOMEM; } - wrk->anchor = 1; wrk->func = &wear_leveling_worker; __schedule_ubi_work(ubi, wrk); return 0; @@ -356,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi) return_unused_pool_pebs(ubi, &ubi->fm_pool); return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); + if (ubi->fm_anchor) { + return_unused_peb(ubi, ubi->fm_anchor); + ubi->fm_anchor = NULL; + } + if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 604772fc4a96..53f448e7433a 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1543,14 +1543,6 @@ int ubi_update_fastmap(struct ubi_device *ubi) return 0; } - ret = ubi_ensure_anchor_pebs(ubi); - if (ret) { - up_write(&ubi->fm_eba_sem); - up_write(&ubi->work_sem); - up_write(&ubi->fm_protect); - return ret; - } - new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); if (!new_fm) { up_write(&ubi->fm_eba_sem); @@ -1621,7 +1613,8 @@ int ubi_update_fastmap(struct ubi_device *ubi) } spin_lock(&ubi->wl_lock); - tmp_e = ubi_wl_get_fm_peb(ubi, 1); + tmp_e = ubi->fm_anchor; + ubi->fm_anchor = NULL; spin_unlock(&ubi->wl_lock); if (old_fm) { @@ -1673,6 +1666,9 @@ out_unlock: up_write(&ubi->work_sem); up_write(&ubi->fm_protect); kfree(old_fm); + + ubi_ensure_anchor_pebs(ubi); + return ret; err: diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 721b6aa7936c..a173eb707bdd 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -491,6 +491,8 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap + * @fm_anchor: The next anchor PEB to use for fastmap + * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks @@ -599,6 +601,8 @@ struct ubi_device { struct work_struct fm_work; int fm_work_scheduled; int fast_attach; + struct ubi_wl_entry *fm_anchor; + int fm_do_produce_anchor; /* Wear-leveling sub-system's stuff */ struct rb_root used; @@ -789,7 +793,6 @@ struct ubi_attach_info { * @vol_id: the volume ID on which this erasure is being performed * @lnum: the logical eraseblock number * @torture: if the physical eraseblock has to be tortured - * @anchor: produce a anchor PEB to by used by fastmap * * The @func pointer points to the worker function. If the @shutdown argument is * not zero, the worker has to free the resources and exit immediately as the @@ -805,7 +808,6 @@ struct ubi_work { int vol_id; int lnum; int torture; - int anchor; }; #include "debug.h" diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 3fcdefe2714d..7def041bbe48 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, } } - /* If no fastmap has been written and this WL entry can be used - * as anchor PEB, hold it back and return the second best WL entry - * such that fastmap can use the anchor PEB later. */ - if (prev_e && !ubi->fm_disabled && - !ubi->fm && e->pnum < UBI_FM_MAX_START) - return prev_e; - return e; } @@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; int erase = 0, keep = 0, vol_id = -1, lnum = -1; -#ifdef CONFIG_MTD_UBI_FASTMAP - int anchor = wrk->anchor; -#endif struct ubi_wl_entry *e1, *e2; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; @@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, } #ifdef CONFIG_MTD_UBI_FASTMAP - /* Check whether we need to produce an anchor PEB */ - if (!anchor) - anchor = !anchor_pebs_available(&ubi->free); - - if (anchor) { + if (ubi->fm_do_produce_anchor) { e1 = find_anchor_wl_entry(&ubi->used); if (!e1) goto out_cancel; @@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, self_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); + ubi->fm_do_produce_anchor = 0; } else if (!ubi->scrub.rb_node) { #else if (!ubi->scrub.rb_node) { @@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) goto out_cancel; } - wrk->anchor = 0; wrk->func = &wear_leveling_worker; if (nested) __schedule_ubi_work(ubi, wrk); @@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { spin_lock(&ubi->wl_lock); - wl_tree_add(e, &ubi->free); - ubi->free_count++; + + if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { + ubi->fm_anchor = e; + ubi->fm_do_produce_anchor = 0; + } else { + wl_tree_add(e, &ubi->free); + ubi->free_count++; + } + spin_unlock(&ubi->wl_lock); /* @@ -1636,6 +1629,19 @@ int ubi_thread(void *u) !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&ubi->wl_lock); + + /* + * Check kthread_should_stop() after we set the task + * state to guarantee that we either see the stop bit + * and exit or the task state is reset to runnable such + * that it's not scheduled out indefinitely and detects + * the stop bit at kthread_should_stop(). + */ + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + break; + } + schedule(); continue; } @@ -1882,6 +1888,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (err) goto out_free; +#ifdef CONFIG_MTD_UBI_FASTMAP + ubi_ensure_anchor_pebs(ubi); +#endif return 0; out_free: diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h index a9e2d669acd8..c93a53293786 100644 --- a/drivers/mtd/ubi/wl.h +++ b/drivers/mtd/ubi/wl.h @@ -2,7 +2,6 @@ #ifndef UBI_WL_H #define UBI_WL_H #ifdef CONFIG_MTD_UBI_FASTMAP -static int anchor_pebs_available(struct rb_root *root); static void update_fastmap_work_fn(struct work_struct *wrk); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a7059e018c47..8ce9b1d45e69 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1163,6 +1163,7 @@ static void bond_setup_by_slave(struct net_device *bond_dev, bond_dev->type = slave_dev->type; bond_dev->hard_header_len = slave_dev->hard_header_len; + bond_dev->needed_headroom = slave_dev->needed_headroom; bond_dev->addr_len = slave_dev->addr_len; memcpy(bond_dev->broadcast, slave_dev->broadcast, @@ -1307,29 +1308,9 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) slave->dev->flags &= ~IFF_SLAVE; } -static struct slave *bond_alloc_slave(struct bonding *bond) -{ - struct slave *slave = NULL; - - slave = kzalloc(sizeof(*slave), GFP_KERNEL); - if (!slave) - return NULL; - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), - GFP_KERNEL); - if (!SLAVE_AD_INFO(slave)) { - kfree(slave); - return NULL; - } - } - INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); - - return slave; -} - -static void bond_free_slave(struct slave *slave) +static void slave_kobj_release(struct kobject *kobj) { + struct slave *slave = to_slave(kobj); struct bonding *bond = bond_get_bond_by_slave(slave); cancel_delayed_work_sync(&slave->notify_work); @@ -1339,6 +1320,53 @@ static void bond_free_slave(struct slave *slave) kfree(slave); } +static struct kobj_type slave_ktype = { + .release = slave_kobj_release, +#ifdef CONFIG_SYSFS + .sysfs_ops = &slave_sysfs_ops, +#endif +}; + +static int bond_kobj_init(struct slave *slave) +{ + int err; + + err = kobject_init_and_add(&slave->kobj, &slave_ktype, + &(slave->dev->dev.kobj), "bonding_slave"); + if (err) + kobject_put(&slave->kobj); + + return err; +} + +static struct slave *bond_alloc_slave(struct bonding *bond, + struct net_device *slave_dev) +{ + struct slave *slave = NULL; + + slave = kzalloc(sizeof(*slave), GFP_KERNEL); + if (!slave) + return NULL; + + slave->bond = bond; + slave->dev = slave_dev; + + if (bond_kobj_init(slave)) + return NULL; + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), + GFP_KERNEL); + if (!SLAVE_AD_INFO(slave)) { + kobject_put(&slave->kobj); + return NULL; + } + } + INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); + + return slave; +} + static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) { info->bond_mode = BOND_MODE(bond); @@ -1522,14 +1550,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_undo_flags; } - new_slave = bond_alloc_slave(bond); + new_slave = bond_alloc_slave(bond, slave_dev); if (!new_slave) { res = -ENOMEM; goto err_undo_flags; } - new_slave->bond = bond; - new_slave->dev = slave_dev; /* Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. */ @@ -1851,7 +1877,7 @@ err_restore_mtu: dev_set_mtu(slave_dev, new_slave->original_mtu); err_free: - bond_free_slave(new_slave); + kobject_put(&new_slave->kobj); err_undo_flags: /* Enslave of first slave has failed and we need to fix master's mac */ @@ -2031,7 +2057,7 @@ static int __bond_release_one(struct net_device *bond_dev, if (!netif_is_bond_master(slave_dev)) slave_dev->priv_flags &= ~IFF_BONDING; - bond_free_slave(slave); + kobject_put(&slave->kobj); return 0; } @@ -2052,7 +2078,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = __bond_release_one(bond_dev, slave_dev, false, true); - if (ret == 0 && !bond_has_slaves(bond)) { + if (ret == 0 && !bond_has_slaves(bond) && + bond_dev->reg_state != NETREG_UNREGISTERING) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond\n"); bond_remove_proc_entry(bond); @@ -2801,6 +2828,9 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (bond_time_in_interval(bond, last_rx, 1)) { bond_propose_link_state(slave, BOND_LINK_UP); commit++; + } else if (slave->link == BOND_LINK_BACK) { + bond_propose_link_state(slave, BOND_LINK_FAIL); + commit++; } continue; } @@ -2909,6 +2939,19 @@ static void bond_ab_arp_commit(struct bonding *bond) continue; + case BOND_LINK_FAIL: + bond_set_slave_link_state(slave, BOND_LINK_FAIL, + BOND_SLAVE_NOTIFY_NOW); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); + + /* A slave has just been enslaved and has become + * the current active slave. + */ + if (rtnl_dereference(bond->curr_active_slave)) + RCU_INIT_POINTER(bond->current_arp_slave, NULL); + continue; + default: slave_err(bond->dev, slave->dev, "impossible: link_new_state %d on slave\n", @@ -2959,8 +3002,6 @@ static bool bond_ab_arp_probe(struct bonding *bond) return should_notify_rtnl; } - bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); - bond_for_each_slave_rcu(bond, slave, iter) { if (!found && !before && bond_slave_is_up(slave)) before = slave; @@ -4287,13 +4328,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) +{ + if (speed == 0 || speed == SPEED_UNKNOWN) + speed = slave->speed; + else + speed = min(speed, slave->speed); + + return speed; +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { struct bonding *bond = netdev_priv(bond_dev); - unsigned long speed = 0; struct list_head *iter; struct slave *slave; + u32 speed = 0; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; @@ -4305,8 +4356,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { - if (slave->speed != SPEED_UNKNOWN) - speed += slave->speed; + if (slave->speed != SPEED_UNKNOWN) { + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) + speed = bond_mode_bcast_speed(slave, + speed); + else + speed += slave->speed; + } if (cmd->base.duplex == DUPLEX_UNKNOWN && slave->duplex != DUPLEX_UNKNOWN) cmd->base.duplex = slave->duplex; @@ -4905,15 +4961,19 @@ int bond_create(struct net *net, const char *name) bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); + if (res < 0) { + free_netdev(bond_dev); + rtnl_unlock(); + + return res; + } netif_carrier_off(bond_dev); bond_work_init_all(bond); rtnl_unlock(); - if (res < 0) - free_netdev(bond_dev); - return res; + return 0; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b43b51646b11..f0f9138e967f 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, return err; err = register_netdevice(bond_dev); - - netif_carrier_off(bond_dev); if (!err) { struct bonding *bond = netdev_priv(bond_dev); + netif_carrier_off(bond_dev); bond_work_init_all(bond); } diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 09718c763be5..471ab6c5056a 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -190,13 +190,14 @@ static void bond_info_show_slave(struct seq_file *seq, slave->dev->addr_len, slave->perm_hwaddr); seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); seq_printf(seq, "Slave active: %d\n", !slave->backup); - seq_printf(seq, "Slave sm_vars: 0x%x\n", - SLAVE_AD_INFO(slave)->port.sm_vars); if (BOND_MODE(bond) == BOND_MODE_8023AD) { const struct port *port = &SLAVE_AD_INFO(slave)->port; const struct aggregator *agg = port->aggregator; + seq_printf(seq, "Slave sm_vars: 0x%x\n", + port->sm_vars); + if (agg) { seq_printf(seq, "Aggregator ID: %d\n", agg->aggregator_identifier); diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 007481557191..fd07561da034 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -121,7 +121,6 @@ static const struct slave_attribute *slave_attrs[] = { }; #define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr) -#define to_slave(obj) container_of(obj, struct slave, kobj) static ssize_t slave_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -132,26 +131,15 @@ static ssize_t slave_show(struct kobject *kobj, return slave_attr->show(slave, buf); } -static const struct sysfs_ops slave_sysfs_ops = { +const struct sysfs_ops slave_sysfs_ops = { .show = slave_show, }; -static struct kobj_type slave_ktype = { -#ifdef CONFIG_SYSFS - .sysfs_ops = &slave_sysfs_ops, -#endif -}; - int bond_sysfs_slave_add(struct slave *slave) { const struct slave_attribute **a; int err; - err = kobject_init_and_add(&slave->kobj, &slave_ktype, - &(slave->dev->dev.kobj), "bonding_slave"); - if (err) - return err; - for (a = slave_attrs; *a; ++a) { err = sysfs_create_file(&slave->kobj, &((*a)->attr)); if (err) { @@ -169,6 +157,4 @@ void bond_sysfs_slave_del(struct slave *slave) for (a = slave_attrs; *a; ++a) sysfs_remove_file(&slave->kobj, &((*a)->attr)); - - kobject_put(&slave->kobj); } diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 17c166cc8482..e4d944770cca 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3 config CAN_KVASER_PCIEFD depends on PCI tristate "Kvaser PCIe FD cards" + select CRC32 help This is a driver for the Kvaser PCI Express CAN FD family. diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 22164300122d..a2b4463d8480 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o obj-$(CONFIG_CAN_VXCAN) += vxcan.o obj-$(CONFIG_CAN_SLCAN) += slcan.o -obj-$(CONFIG_CAN_DEV) += can-dev.o -can-dev-y += dev.o -can-dev-y += rx-offload.o - -can-dev-$(CONFIG_CAN_LEDS) += led.o - +obj-y += dev/ obj-y += rcar/ obj-y += spi/ obj-y += usb/ diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 8e9f5620c9a2..f14e739ba3f4 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = { .brp_inc = 1, }; -static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_enable(priv->device); -} - -static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) -{ - if (priv->device) - pm_runtime_disable(priv->device); -} - static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) { if (priv->device) @@ -1334,7 +1322,6 @@ static const struct net_device_ops c_can_netdev_ops = { int register_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); int err; /* Deactivate pins to prevent DRA7 DCAN IP from being @@ -1344,28 +1331,19 @@ int register_c_can_dev(struct net_device *dev) */ pinctrl_pm_select_sleep_state(dev->dev.parent); - c_can_pm_runtime_enable(priv); - dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; err = register_candev(dev); - if (err) - c_can_pm_runtime_disable(priv); - else + if (!err) devm_can_led_init(dev); - return err; } EXPORT_SYMBOL_GPL(register_c_can_dev); void unregister_c_can_dev(struct net_device *dev) { - struct c_can_priv *priv = netdev_priv(dev); - unregister_candev(dev); - - c_can_pm_runtime_disable(priv); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 406b4847e5dc..7efb60b50876 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); + void __iomem *addr = priv->base; unregister_c_can_dev(dev); free_c_can_dev(dev); - pci_iounmap(pdev, priv->base); + pci_iounmap(pdev, addr); pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index b5145a7f874c..f2b0408ce87d 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -29,6 +29,7 @@ #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> @@ -385,6 +386,7 @@ static int c_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); + pm_runtime_enable(priv->device); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", @@ -397,6 +399,7 @@ static int c_can_plat_probe(struct platform_device *pdev) return 0; exit_free_device: + pm_runtime_disable(priv->device); free_c_can_dev(dev); exit: dev_err(&pdev->dev, "probe failed\n"); @@ -407,9 +410,10 @@ exit: static int c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); - + pm_runtime_disable(priv->device); free_c_can_dev(dev); return 0; diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile new file mode 100644 index 000000000000..cba92e6bcf6f --- /dev/null +++ b/drivers/net/can/dev/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_DEV) += can-dev.o +can-dev-y += dev.o +can-dev-y += rx-offload.o + +can-dev-$(CONFIG_CAN_LEDS) += led.o diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev/dev.c similarity index 98% rename from drivers/net/can/dev.c rename to drivers/net/can/dev/dev.c index 3a33fb503400..322da89cb9c6 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev/dev.c @@ -486,9 +486,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) */ struct sk_buff *skb = priv->echo_skb[idx]; struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u8 len = cf->len; - *len_ptr = len; + /* get the real payload length for netdev statistics */ + if (cf->can_id & CAN_RTR_FLAG) + *len_ptr = 0; + else + *len_ptr = cf->len; + priv->echo_skb[idx] = NULL; return skb; @@ -512,7 +516,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) if (!skb) return 0; - netif_rx(skb); + skb_get(skb); + if (netif_rx(skb) == NET_RX_SUCCESS) + dev_consume_skb_any(skb); + else + dev_kfree_skb_any(skb); return len; } @@ -559,11 +567,11 @@ static void can_restart(struct net_device *dev) } cf->can_id |= CAN_ERR_RESTARTED; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx_ni(skb); + restart: netdev_dbg(dev, "restarted\n"); priv->can_stats.restarts++; @@ -710,6 +718,7 @@ EXPORT_SYMBOL_GPL(alloc_can_err_skb); struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, unsigned int txqs, unsigned int rxqs) { + struct can_ml_priv *can_ml; struct net_device *dev; struct can_priv *priv; int size; @@ -741,7 +750,8 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, priv = netdev_priv(dev); priv->dev = dev; - dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN); + can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; @@ -1126,7 +1136,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct can_ctrlmode cm = {.flags = priv->ctrlmode}; - struct can_berr_counter bec; + struct can_berr_counter bec = { }; enum can_state state = priv->state; if (priv->do_get_state) @@ -1218,6 +1228,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head) static struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", + .netns_refund = true, .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/dev/rx-offload.c similarity index 99% rename from drivers/net/can/rx-offload.c rename to drivers/net/can/dev/rx-offload.c index 84cae167e42f..7e75a87a8a6a 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -272,7 +272,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) { - kfree_skb(skb); + dev_kfree_skb_any(skb); return -ENOBUFS; } @@ -317,7 +317,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, { if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) { - kfree_skb(skb); + dev_kfree_skb_any(skb); return -ENOBUFS; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index e5c207ad3c77..7ec15cb356c0 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -321,8 +321,7 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = { static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -545,11 +544,17 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) static int flexcan_chip_freeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; + unsigned int timeout; + u32 bitrate = priv->can.bittiming.bitrate; u32 reg; + if (bitrate) + timeout = 1000 * 1000 * 10 / bitrate; + else + timeout = FLEXCAN_TIMEOUT_US / 10; + reg = priv->read(®s->mcr); - reg |= FLEXCAN_MCR_HALT; + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; priv->write(reg, ®s->mcr); while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) @@ -615,8 +620,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev, int err; err = pm_runtime_get_sync(priv->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_noidle(priv->dev); return err; + } err = __flexcan_get_berr_counter(dev, bec); @@ -1056,10 +1063,13 @@ static int flexcan_chip_start(struct net_device *dev) flexcan_set_bittiming(dev); + /* set freeze, halt */ + err = flexcan_chip_freeze(priv); + if (err) + goto out_chip_disable; + /* MCR * - * enable freeze - * halt now * only supervisor access * enable warning int * enable individual RX masking @@ -1068,9 +1078,8 @@ static int flexcan_chip_start(struct net_device *dev) */ reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); - reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | - FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + reg_mcr |= FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); /* MCR * @@ -1201,14 +1210,10 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(reg_mecr, ®s->mecr); } - err = flexcan_transceiver_enable(priv); - if (err) - goto out_chip_disable; - /* synchronize with the can bus */ err = flexcan_chip_unfreeze(priv); if (err) - goto out_transceiver_disable; + goto out_chip_disable; priv->can.state = CAN_STATE_ERROR_ACTIVE; @@ -1225,25 +1230,28 @@ static int flexcan_chip_start(struct net_device *dev) return 0; - out_transceiver_disable: - flexcan_transceiver_disable(priv); out_chip_disable: flexcan_chip_disable(priv); return err; } -/* flexcan_chip_stop +/* __flexcan_chip_stop * - * this functions is entered with clocks enabled + * this function is entered with clocks enabled */ -static void flexcan_chip_stop(struct net_device *dev) +static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; + int err; /* freeze + disable module */ - flexcan_chip_freeze(priv); - flexcan_chip_disable(priv); + err = flexcan_chip_freeze(priv); + if (err && !disable_on_error) + return err; + err = flexcan_chip_disable(priv); + if (err && !disable_on_error) + goto out_chip_unfreeze; /* Disable all interrupts */ priv->write(0, ®s->imask2); @@ -1251,8 +1259,24 @@ static void flexcan_chip_stop(struct net_device *dev) priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, ®s->ctrl); - flexcan_transceiver_disable(priv); priv->can.state = CAN_STATE_STOPPED; + + return 0; + + out_chip_unfreeze: + flexcan_chip_unfreeze(priv); + + return err; +} + +static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev) +{ + return __flexcan_chip_stop(dev, true); +} + +static inline int flexcan_chip_stop(struct net_device *dev) +{ + return __flexcan_chip_stop(dev, false); } static int flexcan_open(struct net_device *dev) @@ -1261,17 +1285,23 @@ static int flexcan_open(struct net_device *dev) int err; err = pm_runtime_get_sync(priv->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_noidle(priv->dev); return err; + } err = open_candev(dev); if (err) goto out_runtime_put; - err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); + err = flexcan_transceiver_enable(priv); if (err) goto out_close; + err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_transceiver_disable; + priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + (sizeof(priv->regs->mb[1]) / priv->mb_size); @@ -1327,6 +1357,8 @@ static int flexcan_open(struct net_device *dev) can_rx_offload_del(&priv->offload); out_free_irq: free_irq(dev->irq, dev); + out_transceiver_disable: + flexcan_transceiver_disable(priv); out_close: close_candev(dev); out_runtime_put: @@ -1341,10 +1373,11 @@ static int flexcan_close(struct net_device *dev) netif_stop_queue(dev); can_rx_offload_disable(&priv->offload); - flexcan_chip_stop(dev); + flexcan_chip_stop_disable_on_error(dev); can_rx_offload_del(&priv->offload); free_irq(dev->irq, dev); + flexcan_transceiver_disable(priv); close_candev(dev); pm_runtime_put(priv->dev); @@ -1407,10 +1440,14 @@ static int register_flexcandev(struct net_device *dev) if (err) goto out_chip_disable; - /* set freeze, halt and activate FIFO, restrict register access */ + /* set freeze, halt */ + err = flexcan_chip_freeze(priv); + if (err) + goto out_chip_disable; + + /* activate FIFO, restrict register access */ reg = priv->read(®s->mcr); - reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | - FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; priv->write(reg, ®s->mcr); /* Currently we only support newer versions of this core @@ -1655,6 +1692,8 @@ static int flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + device_set_wakeup_enable(&pdev->dev, false); + device_set_wakeup_capable(&pdev->dev, false); unregister_flexcandev(dev); pm_runtime_disable(&pdev->dev); free_candev(dev); @@ -1681,8 +1720,6 @@ static int __maybe_unused flexcan_suspend(struct device *device) err = flexcan_chip_disable(priv); if (err) return err; - - err = pm_runtime_force_suspend(device); } netif_stop_queue(dev); netif_device_detach(dev); @@ -1708,10 +1745,6 @@ static int __maybe_unused flexcan_resume(struct device *device) if (err) return err; } else { - err = pm_runtime_force_resume(device); - if (err) - return err; - err = flexcan_chip_enable(priv); } } @@ -1742,8 +1775,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device) struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - if (netif_running(dev) && device_may_wakeup(device)) - flexcan_enable_wakeup_irq(priv, true); + if (netif_running(dev)) { + int err; + + if (device_may_wakeup(device)) + flexcan_enable_wakeup_irq(priv, true); + + err = pm_runtime_force_suspend(device); + if (err) + return err; + } return 0; } @@ -1753,8 +1794,16 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - if (netif_running(dev) && device_may_wakeup(device)) - flexcan_enable_wakeup_irq(priv, false); + if (netif_running(dev)) { + int err; + + err = pm_runtime_force_resume(device); + if (err) + return err; + + if (device_may_wakeup(device)) + flexcan_enable_wakeup_irq(priv, false); + } return 0; } diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 6f766918211a..e7a26ec9bdc1 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 +#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 /* Loopback control register */ @@ -287,12 +288,12 @@ struct kvaser_pciefd_tx_packet { static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { .name = KVASER_PCIEFD_DRV_NAME, .tseg1_min = 1, - .tseg1_max = 255, + .tseg1_max = 512, .tseg2_min = 1, .tseg2_max = 32, .sjw_max = 16, .brp_min = 1, - .brp_max = 4096, + .brp_max = 8192, .brp_inc = 1, }; @@ -947,6 +948,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); + /* Disable Bus load reporting */ + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); + tx_npackets = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG); if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) & diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index 1ff0b7fe81d6..c10932a7f1fe 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -16,7 +16,8 @@ config CAN_M_CAN_PLATFORM config CAN_M_CAN_TCAN4X5X depends on CAN_M_CAN - depends on REGMAP_SPI + depends on SPI + select REGMAP_SPI tristate "TCAN4X5X M_CAN device" ---help--- Say Y here if you want support for Texas Instruments TCAN4x5x diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 562c8317e3aa..b2224113987c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -379,10 +379,6 @@ void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) cccr &= ~CCCR_CSR; if (enable) { - /* Clear the Clock stop request if it was set */ - if (cccr & CCCR_CSR) - cccr &= ~CCCR_CSR; - /* enable m_can configuration */ m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); udelay(5); @@ -505,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) } while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { - if (rxfs & RXFS_RFL) - netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - m_can_read_fifo(dev, rxfs); quota--; @@ -664,7 +657,7 @@ static int m_can_handle_state_change(struct net_device *dev, unsigned int ecr; switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: /* error warning state */ cdev->can.can_stats.error_warning++; cdev->can.state = CAN_STATE_ERROR_WARNING; @@ -693,7 +686,7 @@ static int m_can_handle_state_change(struct net_device *dev, __m_can_get_berr_counter(dev, &bec); switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: /* error warning state */ cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? @@ -846,7 +839,7 @@ static int m_can_rx_peripheral(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); - m_can_rx_handler(dev, 1); + m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); m_can_enable_all_interrupts(cdev); @@ -913,6 +906,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) struct net_device_stats *stats = &dev->stats; u32 ir; + if (pm_runtime_suspended(cdev->dev)) + return IRQ_NONE; ir = m_can_read(cdev, M_CAN_IR); if (!ir) return IRQ_NONE; @@ -988,7 +983,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 256, - .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ .tseg2_max = 128, .sjw_max = 128, .brp_min = 1, @@ -1335,6 +1330,8 @@ static int m_can_dev_setup(struct m_can_classdev *m_can_dev) &m_can_data_bittiming_const_31X; break; case 32: + case 33: + /* Support both MCAN version v3.2.x and v3.3.0 */ m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? m_can_dev->bit_timing : &m_can_bittiming_const_31X; @@ -1366,6 +1363,9 @@ static void m_can_stop(struct net_device *dev) /* disable all interrupts */ m_can_disable_all_interrupts(cdev); + /* Set init mode to disengage from the network */ + m_can_config_endisable(cdev, true); + /* set the state as STOPPED */ cdev->can.state = CAN_STATE_STOPPED; } @@ -1600,7 +1600,7 @@ static int m_can_open(struct net_device *dev) INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); err = request_threaded_irq(dev->irq, NULL, m_can_isr, - IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + IRQF_ONESHOT, dev->name, dev); } else { err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, @@ -1764,6 +1764,12 @@ out: } EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); +void m_can_class_free_dev(struct net_device *net) +{ + free_candev(net); +} +EXPORT_SYMBOL_GPL(m_can_class_free_dev); + int m_can_class_register(struct m_can_classdev *m_can_dev) { int ret; @@ -1859,8 +1865,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev) { unregister_candev(m_can_dev->net); - m_can_clk_stop(m_can_dev); - free_candev(m_can_dev->net); } EXPORT_SYMBOL_GPL(m_can_class_unregister); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index 49f42b50627a..b2699a7c9997 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -99,6 +99,7 @@ struct m_can_classdev { }; struct m_can_classdev *m_can_class_allocate_dev(struct device *dev); +void m_can_class_free_dev(struct net_device *net); int m_can_class_register(struct m_can_classdev *cdev); void m_can_class_unregister(struct m_can_classdev *cdev); int m_can_class_get_clocks(struct m_can_classdev *cdev); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 38ea5e600fb8..e6d0cb9ee02f 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct m_can_classdev *mcan_class = netdev_priv(ndev); - m_can_class_suspend(dev); - clk_disable_unprepare(mcan_class->cclk); clk_disable_unprepare(mcan_class->hclk); diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index b233756345f8..0d66582bd356 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -88,7 +88,7 @@ #define TCAN4X5X_MRAM_START 0x8000 #define TCAN4X5X_MCAN_OFFSET 0x1000 -#define TCAN4X5X_MAX_REGISTER 0x8fff +#define TCAN4X5X_MAX_REGISTER 0x8ffc #define TCAN4X5X_CLEAR_ALL_INT 0xffffffff #define TCAN4X5X_SET_ALL_INT 0xffffffff @@ -126,30 +126,6 @@ struct tcan4x5x_priv { int reg_offset; }; -static struct can_bittiming_const tcan4x5x_bittiming_const = { - .name = DEVICE_NAME, - .tseg1_min = 2, - .tseg1_max = 31, - .tseg2_min = 2, - .tseg2_max = 16, - .sjw_max = 16, - .brp_min = 1, - .brp_max = 32, - .brp_inc = 1, -}; - -static struct can_bittiming_const tcan4x5x_data_bittiming_const = { - .name = DEVICE_NAME, - .tseg1_min = 1, - .tseg1_max = 32, - .tseg2_min = 1, - .tseg2_max = 16, - .sjw_max = 16, - .brp_min = 1, - .brp_max = 32, - .brp_inc = 1, -}; - static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) { int wake_state = 0; @@ -349,14 +325,14 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) if (ret) return ret; + /* Zero out the MCAN buffers */ + m_can_init_ram(cdev); + ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); if (ret) return ret; - /* Zero out the MCAN buffers */ - m_can_init_ram(cdev); - return ret; } @@ -449,8 +425,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi) mcan_class->dev = &spi->dev; mcan_class->ops = &tcan4x5x_ops; mcan_class->is_peripheral = true; - mcan_class->bit_timing = &tcan4x5x_bittiming_const; - mcan_class->data_timing = &tcan4x5x_data_bittiming_const; mcan_class->net->irq = spi->irq; spi_set_drvdata(spi, priv); @@ -467,6 +441,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi) priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, &spi->dev, &tcan4x5x_regmap); + if (IS_ERR(priv->regmap)) { + ret = PTR_ERR(priv->regmap); + goto out_clk; + } tcan4x5x_power_enable(priv->power, 1); @@ -497,10 +475,10 @@ static int tcan4x5x_can_remove(struct spi_device *spi) { struct tcan4x5x_priv *priv = spi_get_drvdata(spi); - tcan4x5x_power_enable(priv->power, 0); - m_can_class_unregister(priv->mcan_dev); + tcan4x5x_power_enable(priv->power, 0); + return 0; } diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 6b0c6a99fc8d..91b156b2123a 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -248,8 +248,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, cf_len = get_can_dlc(pucan_msg_get_dlc(msg)); /* if this frame is an echo, */ - if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && - !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { + if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) { unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); @@ -263,7 +262,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, netif_wake_queue(priv->ndev); spin_unlock_irqrestore(&priv->echo_lock, flags); - return 0; + + /* if this frame is only an echo, stop here. Otherwise, + * continue to push this application self-received frame into + * its own rx queue. + */ + if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) + return 0; } /* otherwise, it should be pushed into rx fifo */ diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 4dfa459ef5c7..95fefb1eef36 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -519,6 +519,7 @@ static struct slcan *slc_alloc(void) int i; char name[IFNAMSIZ]; struct net_device *dev = NULL; + struct can_ml_priv *can_ml; struct slcan *sl; int size; @@ -541,7 +542,8 @@ static struct slcan *slc_alloc(void) dev->base_addr = i; sl = netdev_priv(dev); - dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN); + can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); /* Initialize channel control data */ sl->magic = SLCAN_MAGIC; diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 8242fb287cbb..16e3f55efa31 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -382,8 +382,13 @@ static int softing_netdev_open(struct net_device *ndev) /* check or determine and set bittime */ ret = open_candev(ndev); - if (!ret) - ret = softing_startstop(ndev, 1); + if (ret) + return ret; + + ret = softing_startstop(ndev, 1); + if (ret < 0) + close_candev(ndev); + return ret; } diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 31ad364a89bb..d0a05fc5cc32 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -873,7 +873,8 @@ static int ti_hecc_probe(struct platform_device *pdev) priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) { dev_err(&pdev->dev, "hecc ioremap failed\n"); - return PTR_ERR(priv->base); + err = PTR_ERR(priv->base); + goto probe_exit_candev; } /* handle hecc-ram memory */ @@ -886,7 +887,8 @@ static int ti_hecc_probe(struct platform_device *pdev) priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->hecc_ram)) { dev_err(&pdev->dev, "hecc-ram ioremap failed\n"); - return PTR_ERR(priv->hecc_ram); + err = PTR_ERR(priv->hecc_ram); + goto probe_exit_candev; } /* handle mbx memory */ @@ -899,13 +901,14 @@ static int ti_hecc_probe(struct platform_device *pdev) priv->mbx = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->mbx)) { dev_err(&pdev->dev, "mbx ioremap failed\n"); - return PTR_ERR(priv->mbx); + err = PTR_ERR(priv->mbx); + goto probe_exit_candev; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "No irq resource\n"); - goto probe_exit; + goto probe_exit_candev; } priv->ndev = ndev; @@ -936,7 +939,7 @@ static int ti_hecc_probe(struct platform_device *pdev) err = clk_prepare_enable(priv->clk); if (err) { dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); - goto probe_exit_clk; + goto probe_exit_release_clk; } priv->offload.mailbox_read = ti_hecc_mailbox_read; @@ -945,7 +948,7 @@ static int ti_hecc_probe(struct platform_device *pdev) err = can_rx_offload_add_timestamp(ndev, &priv->offload); if (err) { dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); - goto probe_exit_clk; + goto probe_exit_disable_clk; } err = register_candev(ndev); @@ -963,11 +966,13 @@ static int ti_hecc_probe(struct platform_device *pdev) probe_exit_offload: can_rx_offload_del(&priv->offload); -probe_exit_clk: +probe_exit_disable_clk: + clk_disable_unprepare(priv->clk); +probe_exit_release_clk: clk_put(priv->clk); probe_exit_candev: free_candev(ndev); -probe_exit: + return err; } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index a4b4b742c80c..0ad13d78815c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -63,21 +63,27 @@ enum gs_can_identify_mode { }; /* data types passed between host and device */ -struct gs_host_config { - u32 byte_order; -} __packed; -/* All data exchanged between host and device is exchanged in host byte order, - * thanks to the struct gs_host_config byte_order member, which is sent first - * to indicate the desired byte order. + +/* The firmware on the original USB2CAN by Geschwister Schneider + * Technologie Entwicklungs- und Vertriebs UG exchanges all data + * between the host and the device in host byte order. This is done + * with the struct gs_host_config::byte_order member, which is sent + * first to indicate the desired byte order. + * + * The widely used open source firmware candleLight doesn't support + * this feature and exchanges the data in little endian byte order. */ +struct gs_host_config { + __le32 byte_order; +} __packed; struct gs_device_config { u8 reserved1; u8 reserved2; u8 reserved3; u8 icount; - u32 sw_version; - u32 hw_version; + __le32 sw_version; + __le32 hw_version; } __packed; #define GS_CAN_MODE_NORMAL 0 @@ -87,26 +93,26 @@ struct gs_device_config { #define GS_CAN_MODE_ONE_SHOT BIT(3) struct gs_device_mode { - u32 mode; - u32 flags; + __le32 mode; + __le32 flags; } __packed; struct gs_device_state { - u32 state; - u32 rxerr; - u32 txerr; + __le32 state; + __le32 rxerr; + __le32 txerr; } __packed; struct gs_device_bittiming { - u32 prop_seg; - u32 phase_seg1; - u32 phase_seg2; - u32 sjw; - u32 brp; + __le32 prop_seg; + __le32 phase_seg1; + __le32 phase_seg2; + __le32 sjw; + __le32 brp; } __packed; struct gs_identify_mode { - u32 mode; + __le32 mode; } __packed; #define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) @@ -117,23 +123,23 @@ struct gs_identify_mode { #define GS_CAN_FEATURE_IDENTIFY BIT(5) struct gs_device_bt_const { - u32 feature; - u32 fclk_can; - u32 tseg1_min; - u32 tseg1_max; - u32 tseg2_min; - u32 tseg2_max; - u32 sjw_max; - u32 brp_min; - u32 brp_max; - u32 brp_inc; + __le32 feature; + __le32 fclk_can; + __le32 tseg1_min; + __le32 tseg1_max; + __le32 tseg2_min; + __le32 tseg2_max; + __le32 sjw_max; + __le32 brp_min; + __le32 brp_max; + __le32 brp_inc; } __packed; #define GS_CAN_FLAG_OVERFLOW 1 struct gs_host_frame { u32 echo_id; - u32 can_id; + __le32 can_id; u8 can_dlc; u8 channel; @@ -329,13 +335,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) if (!skb) return; - cf->can_id = hf->can_id; + cf->can_id = le32_to_cpu(hf->can_id); cf->can_dlc = get_can_dlc(hf->can_dlc); memcpy(cf->data, hf->data, 8); /* ERROR frames tell us information about the controller */ - if (hf->can_id & CAN_ERR_FLAG) + if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG) gs_update_state(dev, cf); netdev->stats.rx_packets++; @@ -418,11 +424,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev) if (!dbt) return -ENOMEM; - dbt->prop_seg = bt->prop_seg; - dbt->phase_seg1 = bt->phase_seg1; - dbt->phase_seg2 = bt->phase_seg2; - dbt->sjw = bt->sjw; - dbt->brp = bt->brp; + dbt->prop_seg = cpu_to_le32(bt->prop_seg); + dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1); + dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2); + dbt->sjw = cpu_to_le32(bt->sjw); + dbt->brp = cpu_to_le32(bt->brp); /* request bit timings */ rc = usb_control_msg(interface_to_usbdev(intf), @@ -503,7 +509,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, cf = (struct can_frame *)skb->data; - hf->can_id = cf->can_id; + hf->can_id = cpu_to_le32(cf->can_id); hf->can_dlc = cf->can_dlc; memcpy(hf->data, cf->data, cf->can_dlc); @@ -573,6 +579,7 @@ static int gs_can_open(struct net_device *netdev) int rc, i; struct gs_device_mode *dm; u32 ctrlmode; + u32 flags = 0; rc = open_candev(netdev); if (rc) @@ -640,24 +647,24 @@ static int gs_can_open(struct net_device *netdev) /* flags */ ctrlmode = dev->can.ctrlmode; - dm->flags = 0; if (ctrlmode & CAN_CTRLMODE_LOOPBACK) - dm->flags |= GS_CAN_MODE_LOOP_BACK; + flags |= GS_CAN_MODE_LOOP_BACK; else if (ctrlmode & CAN_CTRLMODE_LISTENONLY) - dm->flags |= GS_CAN_MODE_LISTEN_ONLY; + flags |= GS_CAN_MODE_LISTEN_ONLY; /* Controller is not allowed to retry TX * this mode is unavailable on atmels uc3c hardware */ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) - dm->flags |= GS_CAN_MODE_ONE_SHOT; + flags |= GS_CAN_MODE_ONE_SHOT; if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) - dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE; + flags |= GS_CAN_MODE_TRIPLE_SAMPLE; /* finally start device */ - dm->mode = GS_CAN_MODE_START; + dm->mode = cpu_to_le32(GS_CAN_MODE_START); + dm->flags = cpu_to_le32(flags); rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_MODE, @@ -737,9 +744,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) return -ENOMEM; if (do_identify) - imode->mode = GS_CAN_IDENTIFY_ON; + imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON); else - imode->mode = GS_CAN_IDENTIFY_OFF; + imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF); rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), @@ -790,6 +797,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct net_device *netdev; int rc; struct gs_device_bt_const *bt_const; + u32 feature; bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL); if (!bt_const) @@ -830,14 +838,14 @@ static struct gs_can *gs_make_candev(unsigned int channel, /* dev settup */ strcpy(dev->bt_const.name, "gs_usb"); - dev->bt_const.tseg1_min = bt_const->tseg1_min; - dev->bt_const.tseg1_max = bt_const->tseg1_max; - dev->bt_const.tseg2_min = bt_const->tseg2_min; - dev->bt_const.tseg2_max = bt_const->tseg2_max; - dev->bt_const.sjw_max = bt_const->sjw_max; - dev->bt_const.brp_min = bt_const->brp_min; - dev->bt_const.brp_max = bt_const->brp_max; - dev->bt_const.brp_inc = bt_const->brp_inc; + dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min); + dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max); + dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min); + dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max); + dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max); + dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min); + dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max); + dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc); dev->udev = interface_to_usbdev(intf); dev->iface = intf; @@ -854,28 +862,29 @@ static struct gs_can *gs_make_candev(unsigned int channel, /* can settup */ dev->can.state = CAN_STATE_STOPPED; - dev->can.clock.freq = bt_const->fclk_can; + dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can); dev->can.bittiming_const = &dev->bt_const; dev->can.do_set_bittiming = gs_usb_set_bittiming; dev->can.ctrlmode_supported = 0; - if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY) + feature = le32_to_cpu(bt_const->feature); + if (feature & GS_CAN_FEATURE_LISTEN_ONLY) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; - if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK) + if (feature & GS_CAN_FEATURE_LOOP_BACK) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; - if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) + if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; - if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) + if (feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; SET_NETDEV_DEV(netdev, &intf->dev); - if (dconf->sw_version > 1) - if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY) + if (le32_to_cpu(dconf->sw_version) > 1) + if (feature & GS_CAN_FEATURE_IDENTIFY) netdev->ethtool_ops = &gs_usb_ethtool_ops; kfree(bt_const); @@ -910,7 +919,7 @@ static int gs_usb_probe(struct usb_interface *intf, if (!hconf) return -ENOMEM; - hconf->byte_order = 0x0000beef; + hconf->byte_order = cpu_to_le32(0x0000beef); /* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 7ab87a758754..218fadc91155 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -367,7 +367,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .tseg2_max = 32, .sjw_max = 16, .brp_min = 1, - .brp_max = 4096, + .brp_max = 8192, .brp_inc = 1, }; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 21faa2ec4632..8f785c199e22 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -326,8 +326,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, if (!ctx) return NETDEV_TX_BUSY; - can_put_echo_skb(skb, priv->netdev, ctx->ndx); - if (cf->can_id & CAN_EFF_FLAG) { /* SIDH | SIDL | EIDH | EIDL * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 @@ -357,6 +355,8 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) usb_msg.dlc |= MCBA_DLC_RTR_MASK; + can_put_echo_skb(skb, priv->netdev, ctx->ndx); + err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx); if (err) goto xmit_failed; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 0b7766b715fd..4b18f37beb4c 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) /* protect from getting time before setting now */ if (ktime_to_ns(time_ref->tv_host)) { u64 delta_us; + s64 delta_ts = 0; - delta_us = ts - time_ref->ts_dev_2; - if (ts < time_ref->ts_dev_2) - delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; + /* General case: dev_ts_1 < dev_ts_2 < ts, with: + * + * - dev_ts_1 = previous sync timestamp + * - dev_ts_2 = last sync timestamp + * - ts = event timestamp + * - ts_period = known sync period (theoretical) + * ~ dev_ts2 - dev_ts1 + * *but*: + * + * - time counters wrap (see adapter->ts_used_bits) + * - sometimes, dev_ts_1 < ts < dev_ts2 + * + * "normal" case (sync time counters increase): + * must take into account case when ts wraps (tsw) + * + * < ts_period > < > + * | | | + * ---+--------+----+-------0-+--+--> + * ts_dev_1 | ts_dev_2 | + * ts tsw + */ + if (time_ref->ts_dev_1 < time_ref->ts_dev_2) { + /* case when event time (tsw) wraps */ + if (ts < time_ref->ts_dev_1) + delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits); - delta_us += time_ref->ts_total; + /* Otherwise, sync time counter (ts_dev_2) has wrapped: + * handle case when event time (tsn) hasn't. + * + * < ts_period > < > + * | | | + * ---+--------+--0-+---------+--+--> + * ts_dev_1 | ts_dev_2 | + * tsn ts + */ + } else if (time_ref->ts_dev_1 < ts) { + delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits); + } - delta_us *= time_ref->adapter->us_per_ts_scale; + /* add delay between last sync and event timestamps */ + delta_ts += (signed int)(ts - time_ref->ts_dev_2); + + /* add time from beginning to last sync */ + delta_ts += time_ref->ts_total; + + /* convert ticks number into microseconds */ + delta_us = delta_ts * time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; *time = ktime_add_us(time_ref->tv_host_0, delta_us); @@ -815,7 +856,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) - goto lbl_unregister_candev; + goto adap_dev_free; } /* get device number early */ @@ -827,6 +868,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, return 0; +adap_dev_free: + if (dev->adapter->dev_free) + dev->adapter->dev_free(dev); + lbl_unregister_candev: unregister_candev(netdev); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 47cc1ff5b88e..96bbdef672bc 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)]; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct canfd_frame *cfd; struct sk_buff *skb; const u16 rx_msg_flags = le16_to_cpu(rm->flags); + if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev)) + return -ENOMEM; + + dev = usb_if->dev[pucan_msg_get_channel(rm)]; + netdev = dev->netdev; + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { /* CANFD frame case */ skb = alloc_canfd_skb(netdev, &cfd); @@ -506,11 +512,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, else memcpy(cfd->data, rm->d, cfd->len); - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low)); - netdev->stats.rx_packets++; netdev->stats.rx_bytes += cfd->len; + peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low)); + return 0; } @@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; - struct pcan_usb_fd_device *pdev = - container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_usb_fd_device *pdev; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; enum can_state rx_state, tx_state; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; + if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev)) + return -ENOMEM; + + dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; + pdev = container_of(dev, struct pcan_usb_fd_device, dev); + netdev = dev->netdev; + /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; @@ -566,11 +578,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, if (!skb) return -ENOMEM; - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); - netdev->stats.rx_packets++; netdev->stats.rx_bytes += cf->can_dlc; + peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); + return 0; } @@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)]; - struct pcan_usb_fd_device *pdev = - container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_usb_fd_device *pdev; + struct peak_usb_device *dev; + + if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev)) + return -EINVAL; + + dev = usb_if->dev[pucan_ermsg_get_channel(er)]; + pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* keep a trace of tx and rx error counters for later use */ pdev->bec.txerr = er->tx_err_cnt; @@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)]; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; + if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev)) + return -EINVAL; + + dev = usb_if->dev[pufd_omsg_get_channel(ov)]; + netdev = dev->netdev; + /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); if (!skb) @@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, u16 tx_msg_size, tx_msg_flags; u8 can_dlc; + if (cfd->len > CANFD_MAX_DLEN) + return -EINVAL; + tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4); tx_msg->size = cpu_to_le16(tx_msg_size); tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 39ca14b0585d..067705e2850b 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev) dev->addr_len = 0; dev->tx_queue_len = 0; dev->flags = IFF_NOARP; - dev->ml_priv = netdev_priv(dev); + can_set_ml_priv(dev, netdev_priv(dev)); /* set flags according to driver capabilities */ if (echo) diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index d6ba9426be4d..7000c6cd1e48 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *peer; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *peerstats, *srcstats = &dev->stats; + u8 len; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; @@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) skb->dev = peer; skb->ip_summed = CHECKSUM_UNNECESSARY; + len = cfd->len; if (netif_rx_ni(skb) == NET_RX_SUCCESS) { srcstats->tx_packets++; - srcstats->tx_bytes += cfd->len; + srcstats->tx_bytes += len; peerstats = &peer->stats; peerstats->rx_packets++; - peerstats->rx_bytes += cfd->len; + peerstats->rx_bytes += len; } out_unlock: @@ -139,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = { static void vxcan_setup(struct net_device *dev) { + struct can_ml_priv *can_ml; + dev->type = ARPHRD_CAN; dev->mtu = CANFD_MTU; dev->hard_header_len = 0; @@ -147,7 +151,9 @@ static void vxcan_setup(struct net_device *dev) dev->flags = (IFF_NOARP|IFF_ECHO); dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; - dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); + + can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); + can_set_ml_priv(dev, can_ml); } /* forward declaration for rtnl_create_link() */ diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 2be846ee627d..0de39ebb3566 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1384,7 +1384,7 @@ static int xcan_open(struct net_device *ndev) if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); - return ret; + goto err; } ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, @@ -1468,6 +1468,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev, if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); + pm_runtime_put(priv->dev); return ret; } @@ -1783,7 +1784,7 @@ static int xcan_probe(struct platform_device *pdev) if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); - goto err_pmdisable; + goto err_disableclks; } if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { @@ -1818,7 +1819,6 @@ static int xcan_probe(struct platform_device *pdev) err_disableclks: pm_runtime_put(priv->dev); -err_pmdisable: pm_runtime_disable(&pdev->dev); err_free: free_candev(ndev); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index d618650533b6..e78b683f7305 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -514,6 +514,19 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) } EXPORT_SYMBOL(b53_imp_vlan_setup); +static void b53_port_set_learning(struct b53_device *dev, int port, + bool learning) +{ + u16 reg; + + b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); + if (learning) + reg &= ~BIT(port); + else + reg |= BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); +} + int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; @@ -527,6 +540,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) cpu_port = ds->ports[port].cpu_dp->index; b53_br_egress_floods(ds, port, true, true); + b53_port_set_learning(dev, port, false); if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); @@ -645,6 +659,7 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) b53_brcm_hdr_setup(dev->ds, port); b53_br_egress_floods(dev->ds, port, true, true); + b53_port_set_learning(dev, port, false); } static void b53_enable_mib(struct b53_device *dev) @@ -981,13 +996,6 @@ static int b53_setup(struct dsa_switch *ds) b53_disable_port(ds, port); } - /* Let DSA handle the case were multiple bridges span the same switch - * device and different VLAN awareness settings are requested, which - * would be breaking filtering semantics for any of the other bridge - * devices. (not hardware supported) - */ - ds->vlan_filtering_is_global = true; - return ret; } @@ -1330,7 +1338,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0) return -EOPNOTSUPP; - if (vlan->vid_end > dev->num_vlans) + if (vlan->vid_end >= dev->num_vlans) return -ERANGE; b53_enable_vlan(dev, true, ds->vlan_filtering); @@ -1441,6 +1449,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) reg |= ARLTBL_RW; else reg &= ~ARLTBL_RW; + if (dev->vlan_enabled) + reg &= ~ARLTBL_IVL_SVL_SELECT; + else + reg |= ARLTBL_IVL_SVL_SELECT; b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); return b53_arl_op_wait(dev); @@ -1450,6 +1462,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, u16 vid, struct b53_arl_entry *ent, u8 *idx, bool is_valid) { + DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); unsigned int i; int ret; @@ -1457,6 +1470,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, if (ret) return ret; + bitmap_zero(free_bins, dev->num_arl_entries); + /* Read the bins */ for (i = 0; i < dev->num_arl_entries; i++) { u64 mac_vid; @@ -1468,13 +1483,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); b53_arl_to_entry(ent, mac_vid, fwd_entry); - if (!(fwd_entry & ARLTBL_VALID)) + if (!(fwd_entry & ARLTBL_VALID)) { + set_bit(i, free_bins); continue; + } if ((mac_vid & ARLTBL_MAC_MASK) != mac) continue; + if (dev->vlan_enabled && + ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) + continue; *idx = i; + return 0; } + if (bitmap_weight(free_bins, dev->num_arl_entries) == 0) + return -ENOSPC; + + *idx = find_first_bit(free_bins, dev->num_arl_entries); + return -ENOENT; } @@ -1504,15 +1530,27 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, if (op) return ret; - /* We could not find a matching MAC, so reset to a new entry */ - if (ret) { + switch (ret) { + case -ETIMEDOUT: + return ret; + case -ENOSPC: + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", + addr, vid); + return is_valid ? ret : 0; + case -ENOENT: + /* We could not find a matching MAC, so reset to a new entry */ + dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", + addr, vid, idx); fwd_entry = 0; - idx = 1; + break; + default: + dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", + addr, vid, idx); + break; } memset(&ent, 0, sizeof(ent)); ent.port = port; - ent.is_valid = is_valid; ent.vid = vid; ent.is_static = true; memcpy(ent.mac, addr, ETH_ALEN); @@ -1674,6 +1712,8 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; + b53_port_set_learning(dev, port, true); + return 0; } EXPORT_SYMBOL(b53_br_join); @@ -1721,6 +1761,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) vl->untag |= BIT(port) | BIT(cpu_port); b53_set_vlan_entry(dev, pvid, vl); } + b53_port_set_learning(dev, port, false); } EXPORT_SYMBOL(b53_br_leave); @@ -2370,6 +2411,13 @@ struct b53_device *b53_switch_alloc(struct device *base, dev->priv = priv; dev->ops = ops; ds->ops = &b53_switch_ops; + /* Let DSA handle the case were multiple bridges span the same switch + * device and different VLAN awareness settings are requested, which + * would be breaking filtering semantics for any of the other bridge + * devices. (not hardware supported) + */ + ds->vlan_filtering_is_global = true; + mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 2a9f421680aa..b2c539a42154 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -115,6 +115,7 @@ #define B53_UC_FLOOD_MASK 0x32 #define B53_MC_FLOOD_MASK 0x34 #define B53_IPMC_FLOOD_MASK 0x36 +#define B53_DIS_LEARNING 0x3c /* * Override Ports 0-7 State on devices with xMII interfaces (8 bit) @@ -292,6 +293,7 @@ /* ARL Table Read/Write Register (8 bit) */ #define B53_ARLTBL_RW_CTRL 0x00 #define ARLTBL_RW BIT(0) +#define ARLTBL_IVL_SVL_SELECT BIT(6) #define ARLTBL_START_DONE BIT(7) /* MAC Address Index Register (48 bit) */ @@ -304,7 +306,7 @@ * * BCM5325 and BCM5365 share most definitions below */ -#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) +#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10) #define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 #define ARLTBL_VID_MASK_25 0xff @@ -316,13 +318,16 @@ #define ARLTBL_VALID_25 BIT(63) /* ARL Table Data Entry N Registers (32 bit) */ -#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08) +#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18) #define ARLTBL_DATA_PORT_ID_MASK 0x1ff #define ARLTBL_TC(tc) ((3 & tc) << 11) #define ARLTBL_AGE BIT(14) #define ARLTBL_STATIC BIT(15) #define ARLTBL_VALID BIT(16) +/* Maximum number of bin entries in the ARL for all switches */ +#define B53_ARLTBL_MAX_BIN_ENTRIES 4 + /* ARL Search Control Register (8 bit) */ #define B53_ARL_SRCH_CTL 0x50 #define B53_ARL_SRCH_CTL_25 0x20 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 9502db66092e..0ee1c0a7b165 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -172,11 +172,6 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); - /* Enable learning */ - reg = core_readl(priv, CORE_DIS_LEARN); - reg &= ~BIT(port); - core_writel(priv, reg, CORE_DIS_LEARN); - /* Enable Broadcom tags for that port if requested */ if (priv->brcm_tag_mask & BIT(port)) b53_brcm_hdr_setup(ds, port); @@ -421,15 +416,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) /* Find our integrated MDIO bus node */ dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); priv->master_mii_bus = of_mdio_find_bus(dn); - if (!priv->master_mii_bus) + if (!priv->master_mii_bus) { + of_node_put(dn); return -EPROBE_DEFER; + } get_device(&priv->master_mii_bus->dev); priv->master_mii_dn = dn; priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); - if (!priv->slave_mii_bus) + if (!priv->slave_mii_bus) { + of_node_put(dn); return -ENOMEM; + } priv->slave_mii_bus->priv = priv; priv->slave_mii_bus->name = "sf2 slave mii"; @@ -480,8 +479,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) * in bits 15:8 and the patch level in bits 7:0 which is exactly what * the REG_PHY_REVISION register layout is. */ - - return priv->hw_params.gphy_rev; + if (priv->int_phy_mask & BIT(port)) + return priv->hw_params.gphy_rev; + else + return 0; } static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, @@ -1116,6 +1117,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); + /* Balance of_node_put() done by of_find_node_by_name() */ + of_node_get(dn); ports = of_find_node_by_name(dn, "ports"); if (ports) { bcm_sf2_identify_ports(priv, ports); diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 471837cf0b21..e15d18bb981e 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -882,17 +882,14 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, fs->m_ext.data[1])) return -EINVAL; - if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) + if (fs->location != RX_CLS_LOC_ANY && + fs->location > bcm_sf2_cfp_rule_size(priv)) return -EINVAL; if (fs->location != RX_CLS_LOC_ANY && test_bit(fs->location, priv->cfp.used)) return -EBUSY; - if (fs->location != RX_CLS_LOC_ANY && - fs->location > bcm_sf2_cfp_rule_size(priv)) - return -EINVAL; - ret = bcm_sf2_cfp_rule_cmp(priv, port, fs); if (ret == 0) return -EEXIST; @@ -973,7 +970,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) struct cfp_rule *rule; int ret; - if (loc >= CFP_NUM_RULES) + if (loc > bcm_sf2_cfp_rule_size(priv)) return -EINVAL; /* Refuse deleting unused rules, and those that are not unique since diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 925ed135a4d9..0df6c2b9484a 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -356,6 +356,7 @@ static void __exit dsa_loop_exit(void) } module_exit(dsa_loop_exit); +MODULE_SOFTDEP("pre: dsa_loop_bdinfo"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Fainelli"); MODULE_DESCRIPTION("DSA loopback driver"); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index a69c9b9878b7..dc75e798dbff 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -26,6 +26,7 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/firmware.h> #include <linux/if_bridge.h> @@ -91,11 +92,13 @@ GSWIP_MDIO_PHY_FDUP_MASK) /* GSWIP MII Registers */ -#define GSWIP_MII_CFG0 0x00 -#define GSWIP_MII_CFG1 0x02 -#define GSWIP_MII_CFG5 0x04 +#define GSWIP_MII_CFGp(p) (0x2 * (p)) +#define GSWIP_MII_CFG_RESET BIT(15) #define GSWIP_MII_CFG_EN BIT(14) +#define GSWIP_MII_CFG_ISOLATE BIT(13) #define GSWIP_MII_CFG_LDCLKDIS BIT(12) +#define GSWIP_MII_CFG_RGMII_IBS BIT(8) +#define GSWIP_MII_CFG_RMII_CLK BIT(7) #define GSWIP_MII_CFG_MODE_MIIP 0x0 #define GSWIP_MII_CFG_MODE_MIIM 0x1 #define GSWIP_MII_CFG_MODE_RMIIP 0x2 @@ -191,6 +194,23 @@ #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA)) #define GSWIP_MAC_FLEN 0x8C5 +#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_0_PADEN BIT(8) +#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7) +#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070 +#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010 +#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020 +#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030 +#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040 +#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C +#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004 +#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C +#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003 +#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001 +#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ @@ -391,17 +411,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, int port) { - switch (port) { - case 0: - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0); - break; - case 1: - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1); - break; - case 5: - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5); - break; - } + /* There's no MII_CFG register for the CPU port */ + if (!dsa_is_cpu_port(priv->ds, port)) + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port)); } static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, @@ -662,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, GSWIP_SDMA_PCTRLp(port)); if (!dsa_is_cpu_port(ds, port)) { - u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | - GSWIP_MDIO_PHY_SPEED_AUTO | - GSWIP_MDIO_PHY_FDUP_AUTO | - GSWIP_MDIO_PHY_FCONTX_AUTO | - GSWIP_MDIO_PHY_FCONRX_AUTO | - (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK); + u32 mdio_phy = 0; - gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port)); - /* Activate MDIO auto polling */ - gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0); + if (phydev) + mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); } return 0; @@ -684,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port) if (!dsa_is_user_port(ds, port)) return; - if (!dsa_is_cpu_port(ds, port)) { - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN, - GSWIP_MDIO_PHY_LINK_MASK, - GSWIP_MDIO_PHYp(port)); - /* Deactivate MDIO auto polling */ - gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0); - } - gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, GSWIP_FDMA_PCTRLp(port)); gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, @@ -799,15 +800,32 @@ static int gswip_setup(struct dsa_switch *ds) gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3); - /* disable PHY auto polling */ + /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an + * interoperability problem with this auto polling mechanism because + * their status registers think that the link is in a different state + * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set + * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the + * auto polling state machine consider the link being negotiated with + * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads + * to the switch port being completely dead (RX and TX are both not + * working). + * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F + * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes + * it would work fine for a few minutes to hours and then stop, on + * other device it would no traffic could be sent or received at all. + * Testing shows that when PHY auto polling is disabled these problems + * go away. + */ gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); + /* Configure the MDIO Clock 2.5 MHz */ gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); - /* Disable the xMII link */ - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0); - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1); - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5); + /* Disable the xMII interface and clear it's isolation bit */ + for (i = 0; i < priv->hw_info->max_ports; i++) + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, + 0, i); /* enable special tag insertion on cpu port */ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, @@ -1429,11 +1447,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, Pause); phylink_set(mask, Asym_Pause); - /* With the exclusion of MII and Reverse MII, we support Gigabit, - * including Half duplex + /* With the exclusion of MII, Reverse MII and Reduced MII, we + * support Gigabit, including Half duplex */ if (state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII) { + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_RMII) { phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseT_Half); } @@ -1451,10 +1470,117 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, unsupported: bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); - dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); + dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", + phy_modes(state->interface), port); return; } +static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) +{ + u32 mdio_phy; + + if (link) + mdio_phy = GSWIP_MDIO_PHY_LINK_UP; + else + mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); +} + +static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, + phy_interface_t interface) +{ + u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; + + switch (speed) { + case SPEED_10: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M2P5; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_100: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M25; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_1000: + mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; + + mii_cfg = GSWIP_MII_CFG_RATE_M125; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; + break; + } + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0, + GSWIP_MAC_CTRL_0p(port)); +} + +static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) +{ + u32 mac_ctrl_0, mdio_phy; + + if (duplex == DUPLEX_FULL) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; + mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; + mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; + } + + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0, + GSWIP_MAC_CTRL_0p(port)); + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); +} + +static void gswip_port_set_pause(struct gswip_priv *priv, int port, + bool tx_pause, bool rx_pause) +{ + u32 mac_ctrl_0, mdio_phy; + + if (tx_pause && rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_EN; + } else if (tx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_DIS; + } else if (rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_DIS; + } + + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK, + mac_ctrl_0, GSWIP_MAC_CTRL_0p(port)); + gswip_mdio_mask(priv, + GSWIP_MDIO_PHY_FCONTX_MASK | + GSWIP_MDIO_PHY_FCONRX_MASK, + mdio_phy, GSWIP_MDIO_PHYp(port)); +} + static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) @@ -1474,6 +1600,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, break; case PHY_INTERFACE_MODE_RMII: miicfg |= GSWIP_MII_CFG_MODE_RMIIM; + + /* Configure the RMII clock as output: */ + miicfg |= GSWIP_MII_CFG_RMII_CLK; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: @@ -1486,7 +1615,16 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, "Unsupported interface: %d\n", state->interface); return; } - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port); + + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | + GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, + miicfg, port); + + gswip_port_set_speed(priv, port, state->speed, state->interface); + gswip_port_set_duplex(priv, port, state->duplex); + gswip_port_set_pause(priv, port, !!(state->pause & MLO_PAUSE_TX), + !!(state->pause & MLO_PAUSE_RX)); switch (state->interface) { case PHY_INTERFACE_MODE_RGMII_ID: @@ -1511,6 +1649,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, struct gswip_priv *priv = ds->priv; gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); + + if (!dsa_is_cpu_port(ds, port)) + gswip_port_set_link(priv, port, false); } static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -1520,9 +1661,10 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, { struct gswip_priv *priv = ds->priv; - /* Enable the xMII interface only for the external PHY */ - if (interface != PHY_INTERFACE_MODE_INTERNAL) - gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); + if (!dsa_is_cpu_port(ds, port)) + gswip_port_set_link(priv, port, true); + + gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); } static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, @@ -1817,6 +1959,16 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, i++; } + /* The standalone PHY11G requires 300ms to be fully + * initialized and ready for any MDIO communication after being + * taken out of reset. For the SoC-internal GPHY variant there + * is no (known) documentation for the minimum time after a + * reset. Use the same value as for the standalone variant as + * some users have reported internal PHYs not being detected + * without any delay. + */ + msleep(300); + return 0; remove_gphy: diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 24a5e99f7fd5..84c4319e3b31 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1267,6 +1267,9 @@ static int ksz8795_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 50ffc63d6231..49ab1346dc3f 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -973,23 +973,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, PORT_MIRROR_SNIFFER, false); } -static void ksz9477_phy_setup(struct ksz_device *dev, int port, - struct phy_device *phy) -{ - /* Only apply to port with PHY. */ - if (port >= dev->phy_port_cnt) - return; - - /* The MAC actually cannot run in 1000 half-duplex mode. */ - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* PHY does not support gigabit. */ - if (!(dev->features & GBIT_SUPPORT)) - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Full_BIT); -} - static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data) { bool gbit; @@ -1587,6 +1570,9 @@ static int ksz9477_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } @@ -1599,7 +1585,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .get_port_addr = ksz9477_get_port_addr, .cfg_port_member = ksz9477_cfg_port_member, .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, - .phy_setup = ksz9477_phy_setup, .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, @@ -1613,7 +1598,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { int ksz9477_switch_register(struct ksz_device *dev) { - return ksz_switch_register(dev, &ksz9477_dev_ops); + int ret, i; + struct phy_device *phydev; + + ret = ksz_switch_register(dev, &ksz9477_dev_ops); + if (ret) + return ret; + + for (i = 0; i < dev->phy_port_cnt; ++i) { + if (!dsa_is_user_port(dev->ds, i)) + continue; + + phydev = dsa_to_port(dev->ds, i)->slave->phydev; + + /* The MAC actually cannot run in 1000 half-duplex mode. */ + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* PHY does not support gigabit. */ + if (!(dev->features & GBIT_SUPPORT)) + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + } + return ret; } EXPORT_SYMBOL(ksz9477_switch_register); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index fe47180c908b..7fabc0e3d807 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -366,8 +366,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); - if (dev->dev_ops->phy_setup) - dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index a20ebb749377..061142b183cb 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -120,8 +120,6 @@ struct ksz_dev_ops { u32 (*get_port_addr)(int port, int offset); void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); - void (*phy_setup)(struct ksz_device *dev, int port, - struct phy_device *phy); void (*port_cleanup)(struct ksz_device *dev, int port); void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 8071c3fa3fb7..00d680cb4441 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -66,58 +66,6 @@ static const struct mt7530_mib_desc mt7530_mib[] = { MIB_DESC(1, 0xb8, "RxArlDrop"), }; -static int -mt7623_trgmii_write(struct mt7530_priv *priv, u32 reg, u32 val) -{ - int ret; - - ret = regmap_write(priv->ethernet, TRGMII_BASE(reg), val); - if (ret < 0) - dev_err(priv->dev, - "failed to priv write register\n"); - return ret; -} - -static u32 -mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg) -{ - int ret; - u32 val; - - ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val); - if (ret < 0) { - dev_err(priv->dev, - "failed to priv read register\n"); - return ret; - } - - return val; -} - -static void -mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg, - u32 mask, u32 set) -{ - u32 val; - - val = mt7623_trgmii_read(priv, reg); - val &= ~mask; - val |= set; - mt7623_trgmii_write(priv, reg, val); -} - -static void -mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val) -{ - mt7623_trgmii_rmw(priv, reg, 0, val); -} - -static void -mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val) -{ - mt7623_trgmii_rmw(priv, reg, val, 0); -} - static int core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad) { @@ -530,27 +478,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode) for (i = 0 ; i < NUM_TRGMII_CTRL; i++) mt7530_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_MASK, RD_TAP(16)); - else - if (priv->id != ID_MT7621) - mt7623_trgmii_set(priv, GSW_INTF_MODE, - INTF_MODE_TRGMII); - - return 0; -} - -static int -mt7623_pad_clk_setup(struct dsa_switch *ds) -{ - struct mt7530_priv *priv = ds->priv; - int i; - - for (i = 0 ; i < NUM_TRGMII_CTRL; i++) - mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i), - TD_DM_DRVP(8) | TD_DM_DRVN(8)); - - mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL); - mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST); - return 0; } @@ -712,11 +639,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv, mt7530_write(priv, MT7530_PVC_P(port), PORT_SPEC_TAG); - /* Disable auto learning on the cpu port */ - mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); - - /* Unknown unicast frame fordwarding to the cpu port */ - mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port))); + /* Unknown multicast frame forwarding to the cpu port */ + mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); /* Set CPU port number */ if (priv->id == ID_MT7621) @@ -857,8 +781,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) */ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, MT7530_PORT_MATRIX_MODE); - mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, - VLAN_ATTR(MT7530_VLAN_TRANSPARENT)); + mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK, + VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); for (i = 0; i < MT7530_NUM_PORTS; i++) { if (dsa_is_user_port(ds, i) && @@ -874,8 +799,8 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) if (all_user_ports_removed) { mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT), PCR_MATRIX(dsa_user_ports(priv->ds))); - mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), - PORT_SPEC_TAG); + mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG + | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); } } @@ -893,16 +818,22 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS)); /* Trapped into security mode allows packet forwarding through VLAN - * table lookup. + * table lookup. CPU port is set to fallback mode to let untagged + * frames pass through. */ - mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, - MT7530_PORT_SECURITY_MODE); + if (dsa_is_cpu_port(ds, port)) + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_FALLBACK_MODE); + else + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_SECURITY_MODE); /* Set the port as a user port which is to be able to recognize VID * from incoming packets before fetching entry within the VLAN table. */ - mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, - VLAN_ATTR(MT7530_VLAN_USER)); + mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK, + VLAN_ATTR(MT7530_VLAN_USER) | + PVC_EG_TAG(MT7530_VLAN_EG_DISABLED)); } static void @@ -1255,10 +1186,6 @@ mt7530_setup(struct dsa_switch *ds) dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent; if (priv->id == ID_MT7530) { - priv->ethernet = syscon_node_to_regmap(dn); - if (IS_ERR(priv->ethernet)) - return PTR_ERR(priv->ethernet); - regulator_set_voltage(priv->core_pwr, 1000000, 1000000); ret = regulator_enable(priv->core_pwr); if (ret < 0) { @@ -1321,8 +1248,6 @@ mt7530_setup(struct dsa_switch *ds) /* Enable and reset MIB counters */ mt7530_mib_reset(ds); - mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK); - for (i = 0; i < MT7530_NUM_PORTS; i++) { /* Disable forwarding by default on all ports */ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, @@ -1332,6 +1257,10 @@ mt7530_setup(struct dsa_switch *ds) mt7530_cpu_port_enable(priv, i); else mt7530_port_disable(ds, i); + + /* Enable consistent egress tag */ + mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); } /* Setup port 5 */ @@ -1416,14 +1345,6 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port, /* Setup TX circuit incluing relevant PAD and driving */ mt7530_pad_clk_setup(ds, state->interface); - if (priv->id == ID_MT7530) { - /* Setup RX circuit, relevant PAD and driving on the - * host which must be placed after the setup on the - * device side is all finished. - */ - mt7623_pad_clk_setup(ds); - } - priv->p6_interface = state->interface; break; default: @@ -1535,7 +1456,7 @@ unsupported: phylink_set(mask, 100baseT_Full); if (state->interface != PHY_INTERFACE_MODE_MII) { - phylink_set(mask, 1000baseT_Half); + /* This switch only supports 1G full-duplex. */ phylink_set(mask, 1000baseT_Full); if (port == 5) phylink_set(mask, 1000baseX_Full); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index ccb9da8cad0d..3ef7b5a6fc22 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -31,6 +31,7 @@ enum { #define MT7530_MFC 0x10 #define BC_FFP(x) (((x) & 0xff) << 24) #define UNM_FFP(x) (((x) & 0xff) << 16) +#define UNM_FFP_MASK UNM_FFP(~0) #define UNU_FFP(x) (((x) & 0xff) << 8) #define UNU_FFP_MASK UNU_FFP(~0) #define CPU_EN BIT(7) @@ -147,6 +148,12 @@ enum mt7530_port_mode { /* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */ MT7530_PORT_MATRIX_MODE = PORT_VLAN(0), + /* Fallback Mode: Forward received frames with ingress ports that do + * not belong to the VLAN member. Frames whose VID is not listed on + * the VLAN table are forwarded by the PCR_MATRIX members. + */ + MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1), + /* Security Mode: Discard any frame due to ingress membership * violation or VID missed on the VLAN table. */ @@ -167,9 +174,16 @@ enum mt7530_port_mode { /* Register for port vlan control */ #define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100)) #define PORT_SPEC_TAG BIT(5) +#define PVC_EG_TAG(x) (((x) & 0x7) << 8) +#define PVC_EG_TAG_MASK PVC_EG_TAG(7) #define VLAN_ATTR(x) (((x) & 0x3) << 6) #define VLAN_ATTR_MASK VLAN_ATTR(3) +enum mt7530_vlan_port_eg_tag { + MT7530_VLAN_EG_DISABLED = 0, + MT7530_VLAN_EG_CONSISTENT = 1, +}; + enum mt7530_vlan_port_attr { MT7530_VLAN_USER = 0, MT7530_VLAN_TRANSPARENT = 3, @@ -268,7 +282,6 @@ enum mt7530_vlan_port_attr { /* Registers for TRGMII on the both side */ #define MT7530_TRGMII_RCK_CTRL 0x7a00 -#define GSW_TRGMII_RCK_CTRL 0x300 #define RX_RST BIT(31) #define RXC_DQSISEL BIT(30) #define DQSI1_TAP_MASK (0x7f << 8) @@ -277,31 +290,24 @@ enum mt7530_vlan_port_attr { #define DQSI0_TAP(x) ((x) & 0x7f) #define MT7530_TRGMII_RCK_RTT 0x7a04 -#define GSW_TRGMII_RCK_RTT 0x304 #define DQS1_GATE BIT(31) #define DQS0_GATE BIT(30) #define MT7530_TRGMII_RD(x) (0x7a10 + (x) * 8) -#define GSW_TRGMII_RD(x) (0x310 + (x) * 8) #define BSLIP_EN BIT(31) #define EDGE_CHK BIT(30) #define RD_TAP_MASK 0x7f #define RD_TAP(x) ((x) & 0x7f) -#define GSW_TRGMII_TXCTRL 0x340 #define MT7530_TRGMII_TXCTRL 0x7a40 #define TRAIN_TXEN BIT(31) #define TXC_INV BIT(30) #define TX_RST BIT(28) #define MT7530_TRGMII_TD_ODT(i) (0x7a54 + 8 * (i)) -#define GSW_TRGMII_TD_ODT(i) (0x354 + 8 * (i)) #define TD_DM_DRVP(x) ((x) & 0xf) #define TD_DM_DRVN(x) (((x) & 0xf) << 4) -#define GSW_INTF_MODE 0x390 -#define INTF_MODE_TRGMII BIT(1) - #define MT7530_TRGMII_TCK_CTRL 0x7a78 #define TCK_TAP(x) (((x) & 0xf) << 8) @@ -434,7 +440,6 @@ static const char *p5_intf_modes(unsigned int p5_interface) * @ds: The pointer to the dsa core structure * @bus: The bus used for the device and built-in PHY * @rstc: The pointer to reset control used by MCM - * @ethernet: The regmap used for access TRGMII-based registers * @core_pwr: The power supplied into the core * @io_pwr: The power supplied into the I/O * @reset: The descriptor for GPIO line tied to its reset pin @@ -451,7 +456,6 @@ struct mt7530_priv { struct dsa_switch *ds; struct mii_bus *bus; struct reset_control *rstc; - struct regmap *ethernet; struct regulator *core_pwr; struct regulator *io_pwr; struct gpio_desc *reset; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 6787d560e9e3..446eb06e50b4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1517,7 +1517,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (!entry.portvec) entry.state = 0; } else { - entry.portvec |= BIT(port); + if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) + entry.portvec = BIT(port); + else + entry.portvec |= BIT(port); + entry.state = state; } @@ -2143,6 +2147,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); usleep_range(10000, 20000); + + mv88e6xxx_g1_wait_eeprom_done(chip); } } @@ -2760,10 +2766,17 @@ unlock: return err; } +/* prod_id for switch families which do not have a PHY model number */ +static const u16 family_prod_id_table[] = { + [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341, + [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390, +}; + static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; struct mv88e6xxx_chip *chip = mdio_bus->chip; + u16 prod_id; u16 val; int err; @@ -2774,23 +2787,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); mv88e6xxx_reg_unlock(chip); - if (reg == MII_PHYSID2) { - /* Some internal PHYs don't have a model number. */ - if (chip->info->family != MV88E6XXX_FAMILY_6165) - /* Then there is the 6165 family. It gets is - * PHYs correct. But it can also have two - * SERDES interfaces in the PHY address - * space. And these don't have a model - * number. But they are not PHYs, so we don't - * want to give them something a PHY driver - * will recognise. - * - * Use the mv88e6390 family model number - * instead, for anything which really could be - * a PHY, - */ - if (!(val & 0x3f0)) - val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4; + /* Some internal PHYs don't have a model number. */ + if (reg == MII_PHYSID2 && !(val & 0x3f0) && + chip->info->family < ARRAY_SIZE(family_prod_id_table)) { + prod_id = family_prod_id_table[chip->info->family]; + if (prod_id) + val |= prod_id >> 4; } return err ? err : val; @@ -3063,7 +3065,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 8a903624fdd7..938dd146629f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } +void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip) +{ + const unsigned long timeout = jiffies + 1 * HZ; + u16 val; + int err; + + /* Wait up to 1 second for the switch to finish reading the + * EEPROM. + */ + while (time_before(jiffies, timeout)) { + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); + if (err) { + dev_err(chip->dev, "Error reading status"); + return; + } + + /* If the switch is still resetting, it may not + * respond on the bus, and so MDIO read returns + * 0xffff. Differentiate between that, and waiting for + * the EEPROM to be done by bit 0 being set. + */ + if (val != 0xffff && + val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)) + return; + + usleep_range(1000, 2000); + } + + dev_err(chip->dev, "Timeout waiting for EEPROM done"); +} + /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 * Offset 0x02: Switch MAC Address Register Bytes 2 & 3 * Offset 0x03: Switch MAC Address Register Bytes 4 & 5 diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 0ae96a1e919b..08d66ef6aace 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -277,6 +277,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); +void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index 33056a609e96..fcfda1a1cecf 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -125,11 +125,9 @@ static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip, * Offset 0x08: VTU/STU Data Register 2 * Offset 0x09: VTU/STU Data Register 3 */ - -static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) +static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip, + u16 *regs) { - u16 regs[3]; int i; /* Read all 3 VTU/STU Data registers */ @@ -142,12 +140,45 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip, return err; } - /* Extract MemberTag and PortState data */ + return 0; +} + +static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 regs[3]; + int err; + int i; + + err = mv88e6185_g1_vtu_stu_data_read(chip, regs); + if (err) + return err; + + /* Extract MemberTag data */ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { unsigned int member_offset = (i % 4) * 4; - unsigned int state_offset = member_offset + 2; entry->member[i] = (regs[i / 4] >> member_offset) & 0x3; + } + + return 0; +} + +static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 regs[3]; + int err; + int i; + + err = mv88e6185_g1_vtu_stu_data_read(chip, regs); + if (err) + return err; + + /* Extract PortState data */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { + unsigned int state_offset = (i % 4) * 4 + 2; + entry->state[i] = (regs[i / 4] >> state_offset) & 0x3; } @@ -320,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip, if (err) return err; + err = mv88e6185_g1_stu_data_read(chip, entry); + if (err) + return err; + /* VTU DBNum[3:0] are located in VTU Operation 3:0 * VTU DBNum[5:4] are located in VTU Operation 9:8 */ @@ -349,6 +384,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip, if (err) return err; + err = mv88e6185_g1_stu_data_read(chip, entry); + if (err) + return err; + /* VTU DBNum[3:0] are located in VTU Operation 3:0 * VTU DBNum[7:4] are located in VTU Operation 11:8 */ @@ -374,11 +413,6 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip, return err; if (entry->valid) { - /* Fetch (and mask) VLAN PortState data from the STU */ - err = mv88e6xxx_g1_vtu_stu_get(chip, entry); - if (err) - return err; - err = mv88e6185_g1_vtu_data_read(chip, entry); if (err) return err; @@ -386,6 +420,15 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip, err = mv88e6xxx_g1_vtu_fid_read(chip, entry); if (err) return err; + + /* Fetch VLAN PortState data from the STU */ + err = mv88e6xxx_g1_vtu_stu_get(chip, entry); + if (err) + return err; + + err = mv88e6185_g1_stu_data_read(chip, entry); + if (err) + return err; } return 0; diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h index 9a63b51e1d82..6f2dab7e33d6 100644 --- a/drivers/net/dsa/realtek-smi-core.h +++ b/drivers/net/dsa/realtek-smi-core.h @@ -25,6 +25,9 @@ struct rtl8366_mib_counter { const char *name; }; +/** + * struct rtl8366_vlan_mc - Virtual LAN member configuration + */ struct rtl8366_vlan_mc { u16 vid; u16 untag; @@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi); int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used); int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, u32 untag, u32 fid); -int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val); int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, unsigned int vid); int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index ac88caca5ad4..49c626a33680 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -36,113 +36,66 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used) } EXPORT_SYMBOL_GPL(rtl8366_mc_is_used); -int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, - u32 untag, u32 fid) +/** + * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration + * @smi: the Realtek SMI device instance + * @vid: the VLAN ID to look up or allocate + * @vlanmc: the pointer will be assigned to a pointer to a valid member config + * if successful + * @return: index of a new member config or negative error number + */ +static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid, + struct rtl8366_vlan_mc *vlanmc) { struct rtl8366_vlan_4k vlan4k; int ret; int i; - /* Update the 4K table */ - ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); - if (ret) - return ret; - - vlan4k.member = member; - vlan4k.untag = untag; - vlan4k.fid = fid; - ret = smi->ops->set_vlan_4k(smi, &vlan4k); - if (ret) - return ret; - - /* Try to find an existing MC entry for this VID */ + /* Try to find an existing member config entry for this VID */ for (i = 0; i < smi->num_vlan_mc; i++) { - struct rtl8366_vlan_mc vlanmc; - - ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); - if (ret) - return ret; - - if (vid == vlanmc.vid) { - /* update the MC entry */ - vlanmc.member = member; - vlanmc.untag = untag; - vlanmc.fid = fid; - - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); - break; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(rtl8366_set_vlan); - -int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val) -{ - struct rtl8366_vlan_mc vlanmc; - int ret; - int index; - - ret = smi->ops->get_mc_index(smi, port, &index); - if (ret) - return ret; - - ret = smi->ops->get_vlan_mc(smi, index, &vlanmc); - if (ret) - return ret; - - *val = vlanmc.vid; - return 0; -} -EXPORT_SYMBOL_GPL(rtl8366_get_pvid); - -int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, - unsigned int vid) -{ - struct rtl8366_vlan_mc vlanmc; - struct rtl8366_vlan_4k vlan4k; - int ret; - int i; - - /* Try to find an existing MC entry for this VID */ - for (i = 0; i < smi->num_vlan_mc; i++) { - ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); - if (ret) - return ret; - - if (vid == vlanmc.vid) { - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); - if (ret) - return ret; - - ret = smi->ops->set_mc_index(smi, port, i); + ret = smi->ops->get_vlan_mc(smi, i, vlanmc); + if (ret) { + dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n", + i, vid); return ret; } + + if (vid == vlanmc->vid) + return i; } /* We have no MC entry for this VID, try to find an empty one */ for (i = 0; i < smi->num_vlan_mc; i++) { - ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); - if (ret) + ret = smi->ops->get_vlan_mc(smi, i, vlanmc); + if (ret) { + dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n", + i, vid); return ret; + } - if (vlanmc.vid == 0 && vlanmc.member == 0) { + if (vlanmc->vid == 0 && vlanmc->member == 0) { /* Update the entry from the 4K table */ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); - if (ret) + if (ret) { + dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n", + i, vid); return ret; + } - vlanmc.vid = vid; - vlanmc.member = vlan4k.member; - vlanmc.untag = vlan4k.untag; - vlanmc.fid = vlan4k.fid; - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); - if (ret) + vlanmc->vid = vid; + vlanmc->member = vlan4k.member; + vlanmc->untag = vlan4k.untag; + vlanmc->fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, vlanmc); + if (ret) { + dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n", + i, vid); return ret; + } - ret = smi->ops->set_mc_index(smi, port, i); - return ret; + dev_dbg(smi->dev, "created new MC at index %d for VID %d\n", + i, vid); + return i; } } @@ -160,24 +113,110 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, if (ret) return ret; - vlanmc.vid = vid; - vlanmc.member = vlan4k.member; - vlanmc.untag = vlan4k.untag; - vlanmc.fid = vlan4k.fid; - ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); - if (ret) + vlanmc->vid = vid; + vlanmc->member = vlan4k.member; + vlanmc->untag = vlan4k.untag; + vlanmc->fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, vlanmc); + if (ret) { + dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n", + i, vid); return ret; - - ret = smi->ops->set_mc_index(smi, port, i); - return ret; + } + dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n", + i, vid); + return i; } } - dev_err(smi->dev, - "all VLAN member configurations are in use\n"); - + dev_err(smi->dev, "all VLAN member configurations are in use\n"); return -ENOSPC; } + +int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid) +{ + struct rtl8366_vlan_mc vlanmc; + struct rtl8366_vlan_4k vlan4k; + int mc; + int ret; + + if (!smi->ops->is_vlan_valid(smi, vid)) + return -EINVAL; + + dev_dbg(smi->dev, + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, member, untag); + + /* Update the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlan4k.member |= member; + vlan4k.untag |= untag; + vlan4k.fid = fid; + ret = smi->ops->set_vlan_4k(smi, &vlan4k); + if (ret) + return ret; + + dev_dbg(smi->dev, + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, vlan4k.member, vlan4k.untag); + + /* Find or allocate a member config for this VID */ + ret = rtl8366_obtain_mc(smi, vid, &vlanmc); + if (ret < 0) + return ret; + mc = ret; + + /* Update the MC entry */ + vlanmc.member |= member; + vlanmc.untag |= untag; + vlanmc.fid = fid; + + /* Commit updates to the MC entry */ + ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc); + if (ret) + dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n", + mc, vid); + else + dev_dbg(smi->dev, + "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", + vid, vlanmc.member, vlanmc.untag); + + return ret; +} +EXPORT_SYMBOL_GPL(rtl8366_set_vlan); + +int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid) +{ + struct rtl8366_vlan_mc vlanmc; + int mc; + int ret; + + if (!smi->ops->is_vlan_valid(smi, vid)) + return -EINVAL; + + /* Find or allocate a member config for this VID */ + ret = rtl8366_obtain_mc(smi, vid, &vlanmc); + if (ret < 0) + return ret; + mc = ret; + + ret = smi->ops->set_mc_index(smi, port, mc); + if (ret) { + dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n", + mc, port); + return ret; + } + + dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n", + port, vid, mc); + + return 0; +} EXPORT_SYMBOL_GPL(rtl8366_set_pvid); int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable) @@ -376,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (!smi->ops->is_vlan_valid(smi, vid)) return; - dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", + dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n", + vlan->vid_begin, port, untagged ? "untagged" : "tagged", pvid ? " PVID" : "no PVID"); @@ -384,36 +424,31 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) dev_err(smi->dev, "port is DSA or CPU port\n"); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - int pvid_val = 0; - - dev_info(smi->dev, "add VLAN %04x\n", vid); + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { member |= BIT(port); if (untagged) untag |= BIT(port); - /* To ensure that we have a valid MC entry for this VLAN, - * initialize the port VLAN ID here. - */ - ret = rtl8366_get_pvid(smi, port, &pvid_val); - if (ret < 0) { - dev_err(smi->dev, "could not lookup PVID for port %d\n", - port); - return; - } - if (pvid_val == 0) { - ret = rtl8366_set_pvid(smi, port, vid); - if (ret < 0) - return; - } - } + ret = rtl8366_set_vlan(smi, vid, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); - ret = rtl8366_set_vlan(smi, port, member, untag, 0); - if (ret) - dev_err(smi->dev, - "failed to set up VLAN %04x", - vid); + if (!pvid) + continue; + + ret = rtl8366_set_pvid(smi, port, vid); + if (ret) + dev_err(smi->dev, + "failed to set PVID on port %d to VLAN %04x", + port, vid); + + if (!ret) + dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n", + vid, port); + } } EXPORT_SYMBOL_GPL(rtl8366_vlan_add); @@ -439,13 +474,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, return ret; if (vid == vlanmc.vid) { - /* clear VLAN member configurations */ - vlanmc.vid = 0; - vlanmc.priority = 0; - vlanmc.member = 0; - vlanmc.untag = 0; - vlanmc.fid = 0; - + /* Remove this port from the VLAN */ + vlanmc.member &= ~BIT(port); + vlanmc.untag &= ~BIT(port); + /* + * If no ports are members of this VLAN + * anymore then clear the whole member + * config so it can be reused. + */ + if (!vlanmc.member && vlanmc.untag) { + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.fid = 0; + } ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); if (ret) { dev_err(smi->dev, diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index f5cc8b0a7c74..7f731bf36998 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -1269,7 +1269,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) if (smi->vlan4k_enabled) max = RTL8366RB_NUM_VIDS - 1; - if (vlan == 0 || vlan >= max) + if (vlan == 0 || vlan > max) return false; return true; diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 0537df06a9b5..95155a1f9f9d 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -432,7 +432,7 @@ static void emac_timeout(struct net_device *dev) /* Hardware start transmission. * Send a packet to media from the upper layer. */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); unsigned long channel; @@ -440,7 +440,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) channel = db->tx_fifo_stat & 3; if (channel == 3) - return 1; + return NETDEV_TX_BUSY; channel = (channel == 1 ? 1 : 0); @@ -845,13 +845,13 @@ static int emac_probe(struct platform_device *pdev) db->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(db->clk)) { ret = PTR_ERR(db->clk); - goto out_iounmap; + goto out_dispose_mapping; } ret = clk_prepare_enable(db->clk); if (ret) { dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret); - goto out_iounmap; + goto out_dispose_mapping; } ret = sunxi_sram_claim(&pdev->dev); @@ -910,6 +910,8 @@ out_release_sram: sunxi_sram_release(&pdev->dev); out_clk_disable_unprepare: clk_disable_unprepare(db->clk); +out_dispose_mapping: + irq_dispose_mapping(ndev->irq); out_iounmap: iounmap(db->membase); out: @@ -928,6 +930,7 @@ static int emac_remove(struct platform_device *pdev) unregister_netdev(ndev); sunxi_sram_release(&pdev->dev); clk_disable_unprepare(db->clk); + irq_dispose_mapping(ndev->irq); iounmap(db->membase); free_netdev(ndev); diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 48de4bee209e..9225733f4fec 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2349,6 +2349,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, rss->hash_key; int rc; + if (unlikely(!func)) + return -EINVAL; + rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, rss->hash_key_dma_addr, @@ -2361,8 +2364,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (rss->hash_func) rss->hash_func--; - if (func) - *func = rss->hash_func; + *func = rss->hash_func; if (key) memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 26325f7b3c1f..2e5348ec2a2e 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2622,16 +2622,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, goto err_mmio_read_less; } - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width)); if (rc) { - dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc); - goto err_mmio_read_less; - } - - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); - if (rc) { - dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", - rc); + dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc); goto err_mmio_read_less; } @@ -2835,16 +2828,14 @@ static void ena_fw_reset_device(struct work_struct *work) { struct ena_adapter *adapter = container_of(work, struct ena_adapter, reset_task); - struct pci_dev *pdev = adapter->pdev; - if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { - dev_err(&pdev->dev, - "device reset schedule while reset bit is off\n"); - return; - } rtnl_lock(); - ena_destroy_device(adapter, false); - ena_restore_device(adapter); + + if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + ena_destroy_device(adapter, false); + ena_restore_device(adapter); + } + rtnl_unlock(); } @@ -2926,7 +2917,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, } u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.missed_tx = missed_tx; + tx_ring->tx_stats.missed_tx += missed_tx; u64_stats_update_end(&tx_ring->syncp); return rc; @@ -3452,6 +3443,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); + if (rc) { + dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); + goto err_disable_device; + } + pci_set_master(pdev); ena_dev = vzalloc(sizeof(*ena_dev)); @@ -3675,8 +3672,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ - del_timer_sync(&adapter->timer_service); + /* Make sure timer and reset routine won't be called after + * freeing device resources. + */ + del_timer_sync(&adapter->timer_service); cancel_work_sync(&adapter->reset_task); rtnl_lock(); /* lock released inside the below if-else block */ @@ -3847,6 +3847,9 @@ static void ena_keep_alive_wd(void *adapter_data, rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; u64_stats_update_begin(&adapter->syncp); + /* These stats are accumulated by the device, so the counters indicate + * all drops since last reset. + */ adapter->dev_stats.rx_drops = rx_drops; u64_stats_update_end(&adapter->syncp); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index dc02950a96b8..28412f11a9ca 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -68,7 +68,7 @@ * 16kB. */ #if PAGE_SIZE > SZ_16K -#define ENA_PAGE_SIZE SZ_16K +#define ENA_PAGE_SIZE (_AC(SZ_16K, UL)) #else #define ENA_PAGE_SIZE PAGE_SIZE #endif diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index f5ad12c10934..da84660ceae1 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1548,8 +1548,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - ioaddr = pci_resource_start(pdev, 0); - if (!ioaddr) { + if (!pci_resource_len(pdev, 0)) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); err = -ENODEV; @@ -1562,6 +1561,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); goto err_disable_dev; } + + ioaddr = pci_resource_start(pdev, 0); if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index b40d4377cc71..b2cd3bdba9f8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1279,10 +1279,18 @@ #define MDIO_PMA_10GBR_FECCTRL 0x00ab #endif +#ifndef MDIO_PMA_RX_CTRL1 +#define MDIO_PMA_RX_CTRL1 0x8051 +#endif + #ifndef MDIO_PCS_DIG_CTRL #define MDIO_PCS_DIG_CTRL 0x8000 #endif +#ifndef MDIO_PCS_DIGITAL_STAT +#define MDIO_PCS_DIGITAL_STAT 0x8010 +#endif + #ifndef MDIO_AN_XNP #define MDIO_AN_XNP 0x0016 #endif @@ -1358,6 +1366,8 @@ #define XGBE_KR_TRAINING_ENABLE BIT(1) #define XGBE_PCS_CL37_BP BIT(12) +#define XGBE_PCS_PSEQ_STATE_MASK 0x1c +#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10 #define XGBE_AN_CL37_INT_CMPLT BIT(0) #define XGBE_AN_CL37_INT_MASK 0x01 @@ -1375,6 +1385,10 @@ #define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 #define XGBE_PMA_CDR_TRACK_EN_ON 0x01 +#define XGBE_PMA_RX_RST_0_MASK BIT(4) +#define XGBE_PMA_RX_RST_0_RESET_ON 0x10 +#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 98f8f2033154..da8c2c4aca7e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -514,7 +514,7 @@ static void xgbe_isr_task(unsigned long data) xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule_irqoff(&pdata->napi); + __napi_schedule(&pdata->napi); } } else { /* Don't clear Rx/Tx status if doing per channel DMA @@ -1443,6 +1443,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) return; netif_tx_stop_all_queues(netdev); + netif_carrier_off(pdata->netdev); xgbe_stop_timers(pdata); flush_workqueue(pdata->dev_workqueue); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 8a3a60bb2688..156a0bc8ab01 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata) &an_restart); if (an_restart) { xgbe_phy_config_aneg(pdata); - return; + goto adjust_link; } if (pdata->phy.link) { @@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) pdata->phy_if.phy_impl.stop(pdata); pdata->phy.link = 0; - netif_carrier_off(pdata->netdev); xgbe_phy_adjust_link(pdata); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 128cd648ba99..d6f6afb67bcc 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -921,6 +921,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) if ((phy_id & 0xfffffff0) != 0x03625d10) return false; + /* Reset PHY - wait for self-clearing reset bit to clear */ + genphy_soft_reset(phy_data->phydev); + /* Disable RGMII mode */ phy_write(phy_data->phydev, 0x18, 0x7007); reg = phy_read(phy_data->phydev, 0x18); @@ -1948,6 +1951,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata) xgbe_phy_put_comm_ownership(pdata); } +static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata) +{ + int reg; + + reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT, + XGBE_PCS_PSEQ_STATE_MASK); + if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) { + /* Mailbox command timed out, reset of RX block is required. + * This can be done by asseting the reset bit and wait for + * its compeletion. + */ + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1, + XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON); + ndelay(20); + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1, + XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF); + usleep_range(40, 50); + netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n"); + } +} + static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int cmd, unsigned int sub_cmd) { @@ -1955,9 +1979,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int wait; /* Log if a previous command did not complete */ - if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) { netif_dbg(pdata, link, pdata->netdev, "firmware mailbox not ready for command\n"); + xgbe_phy_rx_reset(pdata); + } /* Construct the command */ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd); @@ -1979,6 +2005,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, netif_dbg(pdata, link, pdata->netdev, "firmware mailbox command did not complete\n"); + + /* Reset on error */ + xgbe_phy_rx_reset(pdata); } static void xgbe_phy_rrc(struct xgbe_prv_data *pdata) @@ -2575,6 +2604,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) if (reg & MDIO_STAT1_LSTATUS) return 1; + if (pdata->phy.autoneg == AUTONEG_ENABLE && + phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) { + if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) { + netif_carrier_off(pdata->netdev); + *an_restart = 1; + } + } + /* No link, attempt a receiver reset cycle */ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 47bcbcf58048..0c93a552b921 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -181,9 +181,9 @@ #define XGBE_DMA_SYS_AWCR 0x30303030 /* DMA cache settings - PCI device */ -#define XGBE_DMA_PCI_ARCR 0x00000003 -#define XGBE_DMA_PCI_AWCR 0x13131313 -#define XGBE_DMA_PCI_AWARCR 0x00000313 +#define XGBE_DMA_PCI_ARCR 0x000f0f0f +#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f +#define XGBE_DMA_PCI_AWARCR 0x00000f0f /* DMA channel interrupt modes */ #define XGBE_IRQ_MODE_EDGE 0 diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index a58185b1d8bf..3e3711b60d01 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea) int i; unsigned short data; - for (i = 0; i < 6; i++) + for (i = 0; i < 3; i++) { reset_and_select_srom(dev); data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index bb65dd39f847..72c7404ae6c5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -66,8 +66,10 @@ static int aq_ndev_open(struct net_device *ndev) goto err_exit; err = aq_nic_start(aq_nic); - if (err < 0) + if (err < 0) { + aq_nic_stop(aq_nic); goto err_exit; + } err_exit: if (err < 0) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 12949f1ec1ea..145334fb18f4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -690,6 +690,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) u32 *regs_buff = p; int err = 0; + if (unlikely(!self->aq_hw_ops->hw_get_regs)) + return -EOPNOTSUPP; + regs->version = 1; err = self->aq_hw_ops->hw_get_regs(self->aq_hw, @@ -704,6 +707,9 @@ err_exit: int aq_nic_get_regs_count(struct aq_nic_s *self) { + if (unlikely(!self->aq_hw_ops->hw_get_regs)) + return 0; + return self->aq_nic_cfg.aq_hw_caps->mac_regs_count; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 74b9f3f1da81..0e8264c0b308 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -56,7 +56,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = { { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, - { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, }, { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, }, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 359a4d387185..9a0db70c1143 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -776,7 +776,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, int err = 0; if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { - err = EBADRQC; + err = -EBADRQC; goto err_exit; } for (self->aq_nic_cfg->mc_list_count = 0U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 6f340695e6bd..774e48b3f904 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1597,7 +1597,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location + i), - ipv6_src[i]); + ipv6_src[3 - i]); } void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, @@ -1608,7 +1608,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location + i), - ipv6_dest[i]); + ipv6_dest[3 - i]); } u32 hw_atl_sem_ram_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 35887ad89025..dd8d591404be 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2564,7 +2564,7 @@ */ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */ -#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4) +#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4) /* Bitmask for bitfield l3_da0[1F:0] */ #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu /* Inverted bitmask for bitfield l3_da0[1F:0] */ diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 0187dbf3b87d..54cdafdd067d 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (IS_ERR(data->reset_gpio)) { error = PTR_ERR(data->reset_gpio); dev_err(priv->dev, "Failed to request gpio: %d\n", error); + mdiobus_free(bus); return error; } diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 1b1a09095c0d..58a002dd758c 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -222,8 +222,6 @@ #define AG71XX_REG_RX_SM 0x01b0 #define AG71XX_REG_TX_SM 0x01b4 -#define ETH_SWITCH_HEADER_LEN 2 - #define AG71XX_DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV \ | NETIF_MSG_PROBE \ @@ -553,7 +551,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); if (IS_ERR(ag->mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); - return PTR_ERR(ag->mdio_reset); + err = PTR_ERR(ag->mdio_reset); + goto mdio_err_put_clk; } mii_bus->name = "ag71xx_mdio"; @@ -783,7 +782,7 @@ static void ag71xx_hw_setup(struct ag71xx *ag) static unsigned int ag71xx_max_frame_len(unsigned int mtu) { - return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; + return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; } static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index d4bbcdfd691a..bde8ec75ac4e 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1249,8 +1249,12 @@ out_disable_adv_intr: static void __alx_stop(struct alx_priv *alx) { - alx_halt(alx); alx_free_irq(alx); + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + alx_halt(alx); alx_free_rings(alx); alx_free_napis(alx); } @@ -1858,9 +1862,6 @@ static void alx_remove(struct pci_dev *pdev) struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; - cancel_work_sync(&alx->link_check_wk); - cancel_work_sync(&alx->reset_wk); - /* restore permanent mac address */ alx_set_macaddr(hw, hw->perm_addr); @@ -1896,13 +1897,16 @@ static int alx_resume(struct device *dev) if (!netif_running(alx->dev)) return 0; - netif_device_attach(alx->dev); rtnl_lock(); err = __alx_open(alx, true); rtnl_unlock(); + if (err) + return err; - return err; + netif_device_attach(alx->dev); + + return 0; } static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 1a7710c399d7..3e8bd454ec50 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2391,7 +2391,8 @@ static int b44_init_one(struct ssb_device *sdev, goto err_out_free_dev; } - if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) { + err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30)); + if (err) { dev_err(sdev->dev, "Required 30BIT DMA mask unsupported by the system\n"); goto err_out_powerdown; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index ad86a186ddc5..470d12e30881 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -666,7 +666,8 @@ static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, dma_addr_t mapping; /* Allocate a new SKB for a new packet */ - skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); + skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH, + GFP_ATOMIC | __GFP_NOWARN); if (!skb) { priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); @@ -2452,8 +2453,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->tx_rings = devm_kcalloc(&pdev->dev, txq, sizeof(struct bcm_sysport_tx_ring), GFP_KERNEL); - if (!priv->tx_rings) - return -ENOMEM; + if (!priv->tx_rings) { + ret = -ENOMEM; + goto err_free_netdev; + } priv->is_lite = params->is_lite; priv->num_rx_desc_words = params->num_rx_desc_words; @@ -2517,6 +2520,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; + dev->max_mtu = UMAC_MAX_MTU_SIZE; /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index cb97ec56fdec..e29ac84c6b3f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,6 +1,943 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_BNXT) += bnxt_en.o +#!/usr/bin/make +# Makefile for building Linux Broadcom Gigabit ethernet driver as a module. +# $id$ +KVER= +ifeq ($(KVER),) + KVER=$(shell uname -r) +endif -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o -bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o -bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o +KVER_MAJ=$(shell echo $(KVER) | cut -d "." -f1) + +__ARCH=$(shell uname -m) + +# PREFIX may be set by the RPM build to set the effective root. +PREFIX= +ifeq ($(shell ls /lib/modules/$(KVER)/build > /dev/null 2>&1 && echo build),) +# SuSE source RPMs + _KVER=$(shell echo $(KVER) | cut -d "-" -f1,2) + _KFLA=$(shell echo $(KVER) | cut -d "-" -f3) + _ARCH=$(shell file -b /lib/modules/$(shell uname -r)/build | cut -d "/" -f5) + ifeq ($(_ARCH),) + _ARCH=$(__ARCH) + endif + ifeq ($(shell ls /usr/src/linux-$(_KVER)-obj > /dev/null 2>&1 && echo linux),) + ifeq ($(shell ls /usr/src/kernels/$(KVER)-$(__ARCH) > /dev/null 2>&1 && echo linux),) + LINUX= + else + LINUX=/usr/src/kernels/$(KVER)-$(__ARCH) + LINUXSRC=$(LINUX) + endif + else + LINUX=/usr/src/linux-$(_KVER)-obj/$(_ARCH)/$(_KFLA) + LINUXSRC=/usr/src/linux-$(_KVER) + endif +else + LINUX=/lib/modules/$(KVER)/build + ifeq ($(shell ls /lib/modules/$(KVER)/source > /dev/null 2>&1 && echo source),) + LINUXSRC=$(LINUX) + else + LINUXSRC=/lib/modules/$(KVER)/source + endif +endif + +KDIR ?= $(srctree) +ifneq ($(KDIR),) + LINUX=$(KDIR) + LINUXSRC=$(LINUX) +endif + +ifeq ($(shell ls $(LINUXSRC)/include/uapi/linux > /dev/null 2>&1 && echo uapi),) + UAPI= +else + UAPI=uapi +endif + +ifeq ($(BCMMODDIR),) + ifeq ($(shell ls /lib/modules/$(KVER)/updates > /dev/null 2>&1 && echo 1),1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.conf > /dev/null 2>&1 && echo 1),1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.d/* > /dev/null 2>&1 && echo 1),1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + ifeq ($(shell expr $(KVER_MAJ) \>= 3), 1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + BCMMODDIR=/lib/modules/$(KVER)/kernel/drivers/net + endif + endif + endif + endif +endif + +ifneq ($(shell grep -o "pci_enable_msix_range" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG = -DHAVE_MSIX_RANGE +else + DISTRO_CFLAG = +endif + +ifneq ($(shell grep -o "msix_cap" $(LINUXSRC)/include/linux/pci.h),) + ifeq ($(shell grep -o "pci_dev_rh1" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_MSIX_CAP + endif +endif + +ifneq ($(shell grep -so "GENL_ID_GENERATE" $(LINUXSRC)/include/uapi/linux/genetlink.h),) + DISTRO_CFLAG += -DHAVE_GENL_ID_GENERATE +endif + +ifneq ($(shell grep "genl_register_family_with_ops" $(LINUXSRC)/include/net/genetlink.h),) + DISTRO_CFLAG += -DHAVE_GENL_REG_FAMILY_WITH_OPS +endif + +ifneq ($(shell grep -A 8 "genl_family {" $(LINUXSRC)/include/net/genetlink.h | grep -o "struct nla_policy"),) + DISTRO_CFLAG += -DHAVE_GENL_POLICY +endif + +ifneq ($(shell grep "hlist_for_each_entry_safe" $(LINUXSRC)/include/linux/list.h | grep "tpos" > /dev/null 2>&1 && echo tpos),) + DISTRO_CFLAG += -DHAVE_OLD_HLIST +endif + +ifneq ($(shell grep -o "csum_level" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_CSUM_LEVEL +endif + +ifneq ($(shell grep -o "build_skb" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_BUILD_SKB + ifneq ($(shell grep "build_skb" $(LINUXSRC)/include/linux/skbuff.h | grep "int frag_size" > /dev/null 2>&1 && echo frag_size),) + DISTRO_CFLAG += -DHAVE_NEW_BUILD_SKB + endif +endif + +ifneq ($(shell grep -o "inner_network_offset" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_INNER_NETWORK_OFFSET +endif + +ifeq ($(shell grep -o "skb_frag_size" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DNO_SKB_FRAG_SIZE +endif + +ifneq ($(shell grep -so "n_proto" $(LINUXSRC)/include/net/flow_keys.h),) + DISTRO_CFLAG += -DHAVE_N_PROTO +endif + +ifneq ($(shell grep -so "flow_keys" $(LINUXSRC)/include/net/flow_keys.h),) + DISTRO_CFLAG += -DHAVE_FLOW_KEYS +endif + +ifneq ($(shell grep -o "skb_get_hash_raw" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_GET_HASH_RAW +endif + +ifneq ($(shell grep -o "PKT_HASH_TYPE" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_HASH_TYPE +endif + +ifneq ($(shell grep -o "SKB_GSO_UDP_TUNNEL_CSUM" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_GSO_UDP_TUNNEL_CSUM +else +ifneq ($(shell grep -o "SKB_GSO_UDP_TUNNEL" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_GSO_UDP_TUNNEL +endif +endif + +ifneq ($(shell grep -o "skb_frag_page" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_FRAG_PAGE +endif + +ifneq ($(shell grep -o "skb_frag_off_add" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_FRAG_ACCESSORS +endif + +ifneq ($(shell grep "skb_checksum_none_assert" $(LINUXSRC)/include/linux/skbuff.h > /dev/null 2>&1 && echo skb_cs_none_assert),) + DISTRO_CFLAG += -DHAVE_SKB_CHECKSUM_NONE_ASSERT +endif + +ifneq ($(shell grep -o "xmit_more" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_XMIT_MORE +endif + +ifneq ($(shell grep -so "min_tx_rate" $(LINUXSRC)/include/$(UAPI)/linux/if_link.h),) + DISTRO_CFLAG += -DHAVE_IFLA_TX_RATE +endif + +ifneq ($(shell grep -so "IFLA_XDP_PROG_ID" $(LINUXSRC)/include/$(UAPI)/linux/if_link.h),) + DISTRO_CFLAG += -DHAVE_IFLA_XDP_PROG_ID +endif + +ifneq ($(shell grep -o "dma_set_mask_and_coherent" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_SET_MASK_AND_COHERENT +endif + +ifneq ($(shell grep -o "dma_set_coherent_mask" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_SET_COHERENT_MASK +endif + +ifneq ($(shell grep "dma_buf_export" $(LINUXSRC)/include/linux/dma-buf.h | grep "exp_info"),) + DISTRO_CFLAG += -DHAVE_DMA_EXPORT +endif + +ifneq ($(shell grep -w "dma_buf_export" $(LINUXSRC)/include/linux/dma-buf.h | grep "define"),) + ifneq ($(shell grep "dma_buf_export" $(LINUXSRC)/include/linux/dma-buf.h | grep "resv"),) + DISTRO_CFLAG += -DHAVE_DMA_EXPORT_FLAG_RESV + else + DISTRO_CFLAG += -DHAVE_DMA_EXPORT_FLAG + endif +endif + +ifneq ($(shell grep -w "(*kmap_atomic)" $(LINUXSRC)/include/linux/dma-buf.h),) + DISTRO_CFLAG += -DHAVE_DMABUF_KMAP_ATOMIC +endif + +ifneq ($(shell grep -w "(*kmap)" $(LINUXSRC)/include/linux/dma-buf.h),) + DISTRO_CFLAG += -DHAVE_DMABUF_KMAP +endif + +ifneq ($(shell grep -w "(*map_atomic)" $(LINUXSRC)/include/linux/dma-buf.h),) + DISTRO_CFLAG += -DHAVE_DMABUF_MAP_ATOMIC +endif + +ifneq ($(shell grep -w "(*map)" $(LINUXSRC)/include/linux/dma-buf.h),) + DISTRO_CFLAG += -DHAVE_DMABUF_MAP +endif + +ifneq ($(shell grep -w "attach" $(LINUXSRC)/include/linux/dma-buf.h | grep "struct device"),) + DISTRO_CFLAG += -DHAVE_DMABUF_ATTACH_DEV +endif + +ifneq ($(shell ls $(LINUXSRC)/include/linux/dma-attrs.h > /dev/null 2>&1 && echo dma_attrs),) + DISTRO_CFLAG += -DHAVE_DMA_ATTRS_H +endif + +ifneq ($(shell grep -o "dma_map_page_attrs" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_DMA_MAP_PAGE_ATTRS +else + ifneq ($(shell grep -so "dma_map_page_attrs" $(LINUXSRC)/include/asm-generic/dma-mapping-common.h),) + DISTRO_CFLAG += -DHAVE_DMA_MAP_PAGE_ATTRS + endif +endif + +ifneq ($(shell grep -o "dma_zalloc_coherent" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_DMA_ZALLOC_COHERENT +endif + +ifneq ($(shell grep -o "ndo_udp_tunnel_add" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_UDP_TUNNEL + ifneq ($(shell grep -A 24 "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h | grep -o "ndo_udp_tunnel_add"),) + DISTRO_CFLAG += -DHAVE_NDO_UDP_TUNNEL_RH + endif +else + ifneq ($(shell grep -o "ndo_add_vxlan_port" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_ADD_VXLAN + ifneq ($(shell grep -so "vxlan_get_rx_port" $(LINUXSRC)/include/net/vxlan.h),) + DISTRO_CFLAG += -DHAVE_VXLAN_GET_RX_PORT + endif + endif +endif + +ifneq ($(shell grep -o "struct dev_addr_list" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_DEV_ADDR_LIST +endif + +ifneq ($(shell grep "netif_set_real_num_tx" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo real_tx),) + DISTRO_CFLAG += -DHAVE_NETIF_SET_REAL_NUM_TX +else + DISTRO_CFLAG += -DVOID_NETIF_SET_NUM_TX +endif + +ifneq ($(shell grep "netif_set_real_num_tx" $(LINUXSRC)/include/linux/netdevice.h | grep void > /dev/null 2>&1 && echo netif_set_real),) + DISTRO_CFLAG += -DVOID_NETIF_SET_NUM_TX +endif + +ifneq ($(shell grep -o "netdev_tx_sent_queue" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_TX_QUEUE_CTRL +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/flow_dissector.h > /dev/null 2>&1 && echo flow),) + DISTRO_CFLAG += -DNEW_FLOW_KEYS -DHAVE_FLOW_KEYS + ifneq ($(shell grep -so "static inline bool skb_flow_dissect_flow_keys" $(LINUXSRC)/include/linux/skbuff.h),) + ifneq ($(shell grep -A 2 "static inline bool skb_flow_dissect_flow_keys" $(LINUXSRC)/include/linux/skbuff.h | grep -o "unsigned int flags"),) + DISTRO_CFLAG += -DHAVE_NEW_FLOW_DISSECTOR_WITH_FLAGS + endif + ifneq ($(shell grep -o "FLOW_DIS_ENCAPSULATION" $(LINUXSRC)/include/net/flow_dissector.h),) + DISTRO_CFLAG += -DHAVE_NEW_FLOW_DISSECTOR + endif + endif + ifneq ($(shell grep -o "flow_hash_from_keys" $(LINUXSRC)/include/net/flow_dissector.h),) + DISTRO_CFLAG += -DHAVE_FLOW_HASH_FROM_KEYS + endif +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/flow_offload.h > /dev/null 2>&1 && echo flow_offload),) + DISTRO_CFLAG += -DHAVE_FLOW_OFFLOAD_H + ifneq ($(shell grep -so "struct flow_cls_offload" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_TC_FLOW_CLS_OFFLOAD + endif + ifneq ($(shell grep -o "flow_block_cb_setup_simple" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_SETUP_TC_BLOCK_HELPER + endif + ifneq ($(shell grep -o "__flow_indr_block_cb_register" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_BLOCK_CB + endif + ifneq ($(shell grep -o "FLOW_ACTION_POLICE" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_ACTION_POLICE + endif + ifneq ($(shell grep -o "flow_action_basic_hw_stats_check" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK + endif +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/udp_tunnel.h > /dev/null 2>&1 && echo udp_tunnel),) + DISTRO_CFLAG += -DHAVE_UDP_TUNNEL_H +endif + +ifneq ($(shell grep -o "ether_addr_equal" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETHER_ADDR_EQUAL +endif + +ifneq ($(shell grep -o "ether_addr_copy" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETHER_ADDR_COPY +endif + +ifneq ($(shell grep -o "eth_broadcast_addr" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETH_BROADCAST_ADDR +endif + +ifneq ($(shell grep -o "eth_get_headlen" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETH_GET_HEADLEN +endif + +ifneq ($(shell grep -o "eth_hw_addr_random" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETH_HW_ADDR_RANDOM +endif + +ifneq ($(shell grep -s "eth_get_headlen" $(LINUXSRC)/include/linux/etherdevice.h | grep -o "struct net_device"),) + DISTRO_CFLAG += -DHAVE_ETH_GET_HEADLEN_NEW +endif + +ifneq ($(shell grep -o "get_rxnfc" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_RXNFC + ifneq ($(shell grep -A 2 "get_rxnfc" $(LINUXSRC)/include/linux/ethtool.h | grep -o "void"),) + DISTRO_CFLAG += -DHAVE_RXNFC_VOID + endif +endif + +ifneq ($(shell grep -o "get_rxfh_key_size" $(LINUXSRC)/include/linux/ethtool.h),) + ifneq ($(shell grep -o "ETH_RSS_HASH_TOP" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_GET_RXFH_KEY_SIZE + endif +endif + +ifneq ($(shell grep -o "(\*set_rxfh)" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_SET_RXFH +endif + +ifneq ($(shell grep -o "get_rxfh_indir_size" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_RXFH_INDIR_SIZE +endif + +ifneq ($(shell grep -o "set_phys_id" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_SET_PHYS_ID +endif + +ifneq ($(shell grep -so "ETHTOOL_LINK_MODE_25000baseCR_Full_BIT" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),) + ifeq ($(shell grep -o "^struct new_ethtool_ops {" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_GLINKSETTINGS_25G + endif +endif + +ifneq ($(shell grep -so "ETHTOOL_LINK_MODE_50000baseCR_Full_BIT" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_GLINKSETTINGS_PAM4 +endif + +ifneq ($(shell grep -so "ethtool_tcpip6_spec" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_IP6_SPEC +endif + +ifeq ($(shell grep -o "rx_cpu_rmap" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNO_NETDEV_CPU_RMAP +else + ifneq ($(shell grep -o "irq_run_affinity_notifiers" $(LINUXSRC)/include/linux/interrupt.h),) + DISTRO_CFLAG += -DNO_NETDEV_CPU_RMAP + endif +endif + +ifneq ($(shell grep -o "hw_features" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "get_netdev_hw_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_HW_FEATURES + endif +endif + +ifneq ($(shell grep -o "netdev_notifier_info_to_dev" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_NOTIFIER_INFO_TO_DEV +endif + +ifneq ($(shell grep "register_netdevice_notifier_rh" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_REGISTER_NETDEVICE_NOTIFIER_RH +endif + +ifneq ($(shell grep -o "hw_enc_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_HW_ENC_FEATURES +endif + +ifneq ($(shell grep -o "sriov_configure" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DPCIE_SRIOV_CONFIGURE + ifneq ($(shell grep -A 2 "pci_driver_rh" $(LINUXSRC)/include/linux/pci.h | \ + grep -o "sriov_configure"),) + DISTRO_CFLAG += -DSRIOV_CONF_DEF_IN_PCI_DRIVER_RH + endif +endif + +ifneq ($(shell grep -o "pci_vfs_assigned" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_VFS_ASSIGNED +endif + +ifneq ($(shell grep -o "pci_num_vf" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_NUM_VF +endif + +ifneq ($(shell grep -o "ndo_fix_features" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_FEATURE_CONTROL + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_FEATURE_CONTROL + endif +endif + +ifneq ($(shell grep -o "ndo_rx_flow_steer" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "netdev_rfs_info" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_RX_FLOW_STEER + endif +endif + +ifneq ($(shell grep -o "ndo_busy_poll" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_BUSY_POLL + endif +endif + +ifneq ($(shell grep -o "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64 + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64 + endif + ifneq ($(shell grep "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h | grep -o "void"),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64_VOID + endif +endif + +ifneq ($(shell grep -o "ndo_get_vf_config" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_GET_VF_CONFIG +endif + +ifneq ($(shell grep -A 2 "ndo_bridge_getlink" $(LINUXSRC)/include/linux/netdevice.h | grep -o "nlflags"),) + ifneq ($(shell grep -A 3 "ndo_dflt_bridge_getlink" $(LINUXSRC)/include/linux/rtnetlink.h | grep -o "filter_mask"),) + DISTRO_CFLAG += -DHAVE_NDO_BRIDGE_GETLINK + endif +endif + +ifneq ($(shell grep -A 4 "ndo_bridge_setlink" $(LINUXSRC)/include/linux/netdevice.h | grep -o "netlink_ext_ack"),) + DISTRO_CFLAG += -DHAVE_NDO_BRIDGE_SETLINK_EXTACK +endif + +ifneq ($(shell grep -o "ndo_set_vf_link_state" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_LINK_STATE + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_LINK_STATE + endif +endif + +ifneq ($(shell grep -o "ndo_set_vf_spoofchk" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_VF_SPOOFCHK + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_VF_SPOOFCHK + endif +endif + +ifneq ($(shell grep -A 1 "ndo_set_vf_vlan" $(LINUXSRC)/include/linux/netdevice.h | grep -o "proto"),) + ifeq ($(shell grep -o "RH_KABI_EXTEND(struct net_device_ops_extended extended)" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNEW_NDO_SET_VF_VLAN + endif +endif + +ifneq ($(shell grep -o "ndo_set_vf_vlan_rh73" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_VLAN_RH73 +endif + +ifneq ($(shell grep -o "ndo_set_vf_trust" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_TRUST + ifneq ($(shell grep -A 3 "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h | grep -o "ndo_set_vf_trust"),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_TRUST_RH + endif +endif + +ifneq ($(shell grep -o "ndo_set_vf_queues" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_QUEUES + endif +endif + +ifneq ($(shell grep -o "ndo_change_mtu_rh74" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_CHANGE_MTU_RH74 +endif + +ifneq ($(shell grep -o "RH_KABI_USE_P(16, struct net_device_extended \*extended)" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NET_DEVICE_EXT +endif + +ifneq ($(shell grep -o "RH_KABI_EXTEND(struct net_device_ops_extended extended)" $(LINUXSRC)/include/linux/netdevice.h),) + ifneq ($(shell grep -o "ndo_get_phys_port_name" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_EXT_GET_PHYS_PORT_NAME + endif +endif + +ifneq ($(shell grep -o "ndo_setup_tc_rh72" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SETUP_TC_RH72 +endif + +ifneq ($(shell grep -o "(\*ndo_setup_tc_rh)" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SETUP_TC_RH +endif + +ifneq ($(shell grep -o "tc_setup_cb_egdev_call" $(LINUXSRC)/include/net/act_api.h),) + DISTRO_CFLAG += -DHAVE_TC_CB_EGDEV +endif + +ifneq ($(shell grep -o "ndo_setup_tc" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_SETUP_TC + ifneq ($(shell grep -o "struct tc_to_netdev" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_TO_NETDEV + ifneq ($(shell grep -o "struct tc_mqprio_qopt" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_MQPRIO_QOPT + endif + ifneq ($(shell grep -A 1 "ndo_setup_tc" $(LINUXSRC)/include/linux/netdevice.h | grep -o "u32 chain_index"),) + DISTRO_CFLAG += -DHAVE_CHAIN_INDEX + endif + endif + ifneq ($(shell grep -o "enum tc_setup_type" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_SETUP_TYPE + endif + ifneq ($(shell grep -so "tc_cls_flower_offload" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TC_CLS_FLOWER_OFFLOAD + endif + ifneq ($(shell grep -o "TC_SETUP_BLOCK" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_SETUP_BLOCK + endif + ifneq ($(shell grep -o "TC_SETUP_QDISC_MQPRIO" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_SETUP_QDISC_MQPRIO + endif + ifneq ($(shell grep -so "tcf_mirred_dev" $(LINUXSRC)/include/net/tc_act/tc_mirred.h),) + DISTRO_CFLAG += -DHAVE_TCF_MIRRED_DEV + endif + ifneq ($(shell grep -so "tc_cls_can_offload_and_chain0" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 + endif + ifneq ($(shell grep -s -A 2 "tcf_block_cb_register" $(LINUXSRC)/include/net/pkt_cls.h | grep -o "netlink_ext_ack"),) + DISTRO_CFLAG += -DHAVE_TC_CB_REG_EXTACK + endif + ifneq ($(shell grep -so "tcf_exts_for_each_action" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TC_EXTS_FOR_ACTION + endif + ifneq ($(shell grep -s -A 3 "struct tc_cls_matchall_offload" $(LINUXSRC)/include/net/pkt_cls.h | grep -o "flow_rule"),) + DISTRO_CFLAG += -DHAVE_TC_MATCHALL_FLOW_RULE + endif +endif + +ifneq ($(shell grep -so "FLOW_DISSECTOR_KEY_ICMP" $(LINUXSRC)/include/net/flow_dissector.h),) + DISTRO_CFLAG += -DHAVE_FLOW_DISSECTOR_KEY_ICMP +endif + +ifneq ($(shell grep -so "rhashtable" $(LINUXSRC)/include/linux/rhashtable.h),) + DISTRO_CFLAG += -DHAVE_RHASHTABLE +endif + +ifneq ($(shell grep -so "tcf_exts_to_list" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TCF_EXTS_TO_LIST +endif + +ifneq ($(shell grep -so "tcf_exts_stats_update" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TCF_STATS_UPDATE +endif + +ifneq ($(shell grep -so "tcf_exts_has_actions" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TCF_EXTS_HAS_ACTIONS +endif + +ifneq ($(shell grep -so "is_tcf_tunnel_set" $(LINUXSRC)/include/net/tc_act/tc_tunnel_key.h),) + DISTRO_CFLAG += -DHAVE_TCF_TUNNEL +endif + +ifneq ($(shell grep -o "netdev_get_num_tc" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_GET_NUM_TC +endif + +ifneq ($(shell grep -so "netdev_features_t" $(LINUXSRC)/include/linux/netdev_features.h || \ + grep -o "netdev_features_t" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_FEATURES_T +endif + +ifneq ($(shell grep -o "ndo_fix_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_FIX_FEATURES +endif + +ifneq ($(shell grep -o "netif_set_real_num_rx" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETIF_SET_REAL_NUM_RX +endif + +ifneq ($(shell grep -o "netif_get_num_default_rss_queues" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETIF_GET_DEFAULT_RSS +endif + +ifneq ($(shell grep -o "ndo_vlan_rx_register" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_VLAN_RX_REGISTER +endif + +ifneq ($(shell grep -o "ndo_get_port_parent_id" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_GET_PORT_PARENT_ID +endif + +ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + ifneq ($(shell grep -o "ndo_xdp_xmit" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_EXT_NDO_XDP_XMIT + endif +else ifneq ($(shell grep -o "ndo_xdp" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_XDP + ifneq ($(shell grep -o "ndo_bpf" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_BPF + endif + ifneq ($(shell ls $(LINUXSRC)/include/net/bpf_trace.h > /dev/null 2>&1 && echo bpf_trace),) + DISTRO_CFLAG += -DHAVE_BPF_TRACE + endif + ifneq ($(shell grep -o "xdp_set_data_meta_invalid" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_SET_DATA_META_INVALID + endif + ifneq ($(shell grep -o "void bpf_prog_add" $(LINUXSRC)/include/linux/bpf.h),) + DISTRO_CFLAG += -DHAVE_VOID_BPF_PROG_ADD + endif +endif + +ifneq ($(shell grep -so "enum xdp_action" $(LINUXSRC)/include/uapi/linux/bpf.h),) + DISTRO_CFLAG += -DHAVE_XDP_ACTION +endif +ifneq ($(shell grep -so "XDP_REDIRECT" $(LINUXSRC)/include/uapi/linux/bpf.h),) + DISTRO_CFLAG += -DHAVE_XDP_REDIRECT +endif +ifneq ($(shell grep -so "struct xdp_frame" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_FRAME +endif +ifneq ($(shell grep -so "enum xdp_mem_type" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_MEM_TYPE +endif +ifneq ($(shell grep -so "struct xdp_rxq_info" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_RXQ_INFO + ifneq ($(shell grep -o "xdp_rxq_info_is_reg" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_RXQ_INFO_IS_REG + endif +endif + +ifneq ($(shell grep -so "xdp_data_hard_end" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_FRAME_SZ +endif + +ifneq ($(shell grep -so "page_pool_release_page" $(LINUXSRC)/include/net/page_pool.h),) + DISTRO_CFLAG += -DHAVE_PAGE_POOL_RELEASE_PAGE +endif + +ifneq ($(shell grep -o "netdev_name" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_NAME +endif + +ifneq ($(shell grep -o "netdev_update_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_UPDATE_FEATURES +endif + +ifneq ($(shell grep -o "napi_hash_add" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NAPI_HASH_ADD +endif + +ifneq ($(shell grep -o "napi_hash_del" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NAPI_HASH_DEL +endif + +ifneq ($(shell grep "napi_complete_done" $(LINUXSRC)/include/linux/netdevice.h | grep -o "bool"),) + DISTRO_CFLAG += -DHAVE_NEW_NAPI_COMPLETE_DONE +endif + +ifneq ($(shell grep -o "min_mtu" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_MIN_MTU +endif + +ifneq ($(shell grep -o "prog_attached" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_PROG_ATTACHED +endif + +ifneq ($(shell grep -o "netdev_xmit_more" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_XMIT_MORE +endif + +ifneq ($(shell grep -A 1 "ndo_tx_timeout" $(LINUXSRC)/include/linux/netdevice.h | grep -o txqueue),) + DISTRO_CFLAG += -DHAVE_NDO_TX_TIMEOUT_QUEUE +endif + +ifneq ($(shell grep -o "netpoll_poll_dev" $(LINUXSRC)/include/linux/netpoll.h),) + DISTRO_CFLAG += -DHAVE_NETPOLL_POLL_DEV +endif + +ifneq ($(shell grep -o "prandom_bytes" $(LINUXSRC)/include/linux/random.h),) + DISTRO_CFLAG += -DHAVE_PRANDOM_BYTES +endif + +ifneq ($(shell grep -o "tcp_v6_check" $(LINUXSRC)/include/net/ip6_checksum.h),) + DISTRO_CFLAG += -DHAVE_TCP_V6_CHECK +endif + +ifneq ($(shell grep -o "usleep_range" $(LINUXSRC)/include/linux/delay.h),) + DISTRO_CFLAG += -DHAVE_USLEEP_RANGE +endif + +ifneq ($(shell grep -o "vzalloc" $(LINUXSRC)/include/linux/vmalloc.h),) + DISTRO_CFLAG += -DHAVE_VZALLOC +endif + +ifneq ($(shell grep -o "kmalloc_array" $(LINUXSRC)/include/linux/slab.h),) + DISTRO_CFLAG += -DHAVE_KMALLOC_ARRAY +endif + +ifneq ($(shell grep -o "pcie_capability_read_word" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCIE_CAPABILITY_READ_WORD +endif + +ifneq ($(shell grep -o "pcie_link_width" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_LINK_WIDTH +endif + +ifneq ($(shell grep -o "PCIE_SPEED_2_5GT" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCIE_BUS_SPEED +endif + +ifneq ($(shell grep -o "pci_is_bridge" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_IS_BRIDGE +endif + +ifneq ($(shell grep -o "pci_upstream_bridge" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_UPSTREAM_BRIDGE +endif + +ifneq ($(shell grep -o "pcie_print_link_status" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_PRINT_LINK_STATUS +endif + +ifneq ($(shell grep -o "_genl_register_family_with_ops_grps" $(LINUXSRC)/include/net/genetlink.h),) + DISTRO_CFLAG += -DHAVE_GENL_REG_OPS_GRPS +endif + +ifneq ($(shell grep -o "pci_physfn" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_PHYSFN +endif + +ifneq ($(shell grep -o "pcie_flr" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCIE_FLR +endif + +ifneq ($(shell grep -o "pci_get_dsn" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_GET_DSN +endif + +ifneq ($(shell ls $(LINUXSRC)/include/$(UAPI)/linux/net_tstamp.h > /dev/null 2>&1 && echo net_tstamp),) + ifneq ($(shell ls $(LINUXSRC)/include/linux/timecounter.h > /dev/null 2>&1 && echo timecounter),) + ifneq ($(shell ls $(LINUXSRC)/include/linux/timekeeping.h > /dev/null 2>&1 && echo timekeeping),) + ifneq ($(shell grep -o "HWTSTAMP_FILTER_PTP_V2_EVENT" $(LINUXSRC)/include/$(UAPI)/linux/net_tstamp.h),) + DISTRO_CFLAG += -DHAVE_IEEE1588_SUPPORT + endif + endif + endif +endif + +ifneq ($(shell grep -so "gettimex64" $(LINUXSRC)/include/linux/ptp_clock_kernel.h),) + DISTRO_CFLAG += -DHAVE_PTP_GETTIMEX64 +endif + +ifneq ($(shell grep -o "time64_to_tm" $(LINUXSRC)/include/linux/time.h),) + DISTRO_CFLAG += -DHAVE_TIME64 +endif + +ifneq ($(shell grep -o "timer_setup" $(LINUXSRC)/include/linux/timer.h),) + DISTRO_CFLAG += -DHAVE_TIMER_SETUP +endif + +ifneq ($(shell grep -s "devlink_ops" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK +endif + +ifneq ($(shell grep -s "devlink_param" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PARAM + ifneq ($(shell grep -s -A 2 "int (\*validate)" $(LINUXSRC)/include/net/devlink.h | grep "struct netlink_ext_ack \*extack"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_VALIDATE_NEW + endif +endif + +ifneq ($(shell grep -s -A 7 "devlink_port_attrs" $(LINUXSRC)/include/net/devlink.h | grep -o "netdev_phys_item_id"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PORT_ATTRS +endif + +ifneq ($(shell grep -s "ndo_get_devlink_port" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_NDO_DEVLINK_PORT +endif + +ifneq ($(shell grep -s "devlink_port_params_register" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PORT_PARAM +endif + +ifneq ($(shell grep -s "devlink_params_publish" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PARAM_PUBLISH +endif + +ifneq ($(shell grep -s -A 1 "eswitch_mode_set" $(LINUXSRC)/include/net/devlink.h | grep -o "netlink_ext_ack"),) + DISTRO_CFLAG += -DHAVE_ESWITCH_MODE_SET_EXTACK +endif + +ifneq ($(shell grep -so "DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_IGNORE_ARI +endif + +ifneq ($(shell grep -so "info_get" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_INFO + ifneq ($(shell grep -so "devlink_info_board_serial_number_put" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_INFO_BSN_PUT + endif +endif + +ifneq ($(shell grep -so "flash_update" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FLASH_UPDATE +endif + +ifneq ($(shell grep -so "devlink_flash_update_status_notify" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FLASH_UPDATE_STATUS +endif + +ifneq ($(shell grep -so "struct devlink_health_reporter" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORT +endif + +ifneq ($(shell grep -so "devlink_health_reporter_state_update" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORTER_STATE_UPDATE +endif + +ifneq ($(shell grep -s -A 1 "(*recover)" $(LINUXSRC)/include/net/devlink.h | grep netlink_ext_ack),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORT_EXTACK +endif + +ifneq ($(shell grep -so "devlink_health_reporter_recovery_done" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORTER_RECOVERY_DONE +endif + +ifneq ($(shell grep -s -A 2 "devlink_health_reporter_create" $(LINUXSRC)/include/net/devlink.h | grep auto_recover),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_AUTO_RECOVER +endif + +# Check if the file exists or not +ifneq ($(shell grep -s "switchdev_ops" $(LINUXSRC)/include/net/switchdev.h),) + DISTRO_CFLAG += -DHAVE_SWITCHDEV +endif + +ifneq ($(shell grep -s "METADATA_HW_PORT_MUX" $(LINUXSRC)/include/net/dst_metadata.h),) + DISTRO_CFLAG += -DHAVE_METADATA_HW_PORT_MUX +endif + +ifneq ($(shell grep -so "(*ieee_delapp)" $(LINUXSRC)/include/net/dcbnl.h),) + DISTRO_CFLAG += -DHAVE_IEEE_DELAPP +endif + +ifneq ($(shell grep -so "dcb_ieee_getapp_prio_dscp_mask_map" $(LINUXSRC)/include/net/dcbnl.h),) + DISTRO_CFLAG += -DHAVE_DSCP_MASK_MAP +endif + +ifneq ($(shell grep -o cpumask_local_spread $(LINUXSRC)/include/linux/cpumask.h),) + DISTRO_CFLAG += -DHAVE_CPUMASK_LOCAL_SPREAD +endif + +ifneq ($(shell grep -o cpumask_set_cpu_local_first $(LINUXSRC)/include/linux/cpumask.h),) + DISTRO_CFLAG += -DHAVE_CPUMASK_LOCAL_FIRST +endif + +ifeq ($(shell grep -so "ETH_RESET_AP" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),) + DISTRO_CFLAG += -DNO_ETH_RESET_AP +endif + +ifneq ($(shell ls $(LINUXSRC)/include/linux/dim.h > /dev/null 2>&1 && echo dim),) + DISTRO_CFLAG += -DHAVE_DIM +endif + +ifneq ($(shell grep -o simple_open $(LINUXSRC)/include/linux/fs.h),) + DISTRO_CFLAG += -DHAVE_SIMPLE_OPEN +endif + +ifneq ($(shell grep -o hwmon_device_register_with_groups $(LINUXSRC)/include/linux/hwmon.h),) + DISTRO_CFLAG += -DHAVE_NEW_HWMON_API +endif + +ifneq ($(shell grep -o ETH_RESET_CRASHDUMP $(LINUXSRC)/include/uapi/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_RESET_CRASHDUMP +endif + +override EXTRA_CFLAGS += ${DISTRO_CFLAG} -g -DCHIMP_FW -D__LINUX -DCONFIG_BNXT_SRIOV -DCONFIG_BNXT_DCB -DCONFIG_BNXT_FLASHDEV -DHSI_DBG_DISABLE -DCONFIG_BNXT_LFC + +cflags-y += $(EXTRA_CFLAGS) + +BCM_DRV = bnxt_en.ko +ifneq ($(KERNELRELEASE),) + +ifeq ($(shell expr $(KVER_MAJ) \>= 5), 1) + BNXT_DBGFS_OBJ = bnxt_debugfs.o +else + BNXT_DBGFS_OBJ = bnxt_debugfs_cpt.o +endif + +obj-m += bnxt_en.o +bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_ethtool.o bnxt_sriov.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_tc.o bnxt_devlink.o bnxt_lfc.o bnxt_netlink.o bnxt_dim.o bnxt_eem.o $(BNXT_DBGFS_OBJ) #decode_hsi.o + +else + +default: +ifeq ($(CROSS_COMPILE),) + make -C $(LINUX) M=$(shell pwd) modules +else ifneq ($(CROSS_COMPILE),) + make -C $(LINUXSRC) M=$(shell pwd) modules CROSS_COMPILE=$(CROSS_COMPILE) ARCH=$(ARCH) +endif + +yocto_all: + $(MAKE) -C $(LINUXSRC) M=$(shell pwd) + +modules_install: + $(MAKE) -C $(LINUXSRC) M=$(shell pwd) modules_install + +endif + +install: default + @if [ "$(KDIR)" != "" ]; then \ + echo "Cannot use install with KDIR option"; exit 2;\ + fi + mkdir -p $(PREFIX)$(BCMMODDIR); + install -m 444 $(BCM_DRV) $(PREFIX)$(BCMMODDIR); + @if [ "$(PREFIX)" = "" ]; then /sbin/depmod -a ;\ + else echo " *** Run '/sbin/depmod -a' to update the module database.";\ + fi + +.PHONEY: all clean install + +clean: + -rm -f bnxt.o bnxt.mod.c bnxt.mod.o .bnxt.*.cmd *.cmd *.markers *.order *.symvers decode_hsi.o .decode_* + -rm -rf .tmp_versions + -rm -rf bnxt_en.o bnxt_en.ko bnxt_en.mod.o bnxt_en.mod.c .bnxt_en.* bnxt_sriov.o .bnxt_sriov.* bnxt_ethtool.o .bnxt_ethtool.* bnxt_dcb.o .bnxt_dcb.* bnxt_ulp.o .bnxt_ulp.* bnxt_ptp.o .bnxt_ptp.* bnxt_xdp.o .bnxt_xdp.* bnxt_lfc.o .bnxt_lfc.* bnxt_hwrm.o .bnxt_hwrm.* + -rm -rf bnxt_vfr.o .bnxt_vfr.* bnxt_tc.o .bnxt_tc.* bnxt_devlink.o .bnxt_devlink.* bnxt_netlink.o .bnxt_netlink.* + -rm -rf bnxt_dim.o .bnxt_dim.* bnxt_debugfs*.o .bnxt_debugfs* bnxt_eem.o .bnxt_eem* + -rm -f Module.markers Module.symvers modules.order diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6862594b49ab..241c8905a33b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,14 +1,17 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2019 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ +#include <linux/version.h> + #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/stringify.h> #include <linux/kernel.h> @@ -36,37 +39,68 @@ #include <linux/if_vlan.h> #include <linux/if_bridge.h> #include <linux/rtc.h> +#ifdef HAVE_NDO_XDP #include <linux/bpf.h> +#endif #include <net/ip.h> #include <net/tcp.h> #include <net/udp.h> #include <net/checksum.h> #include <net/ip6_checksum.h> +#if defined(HAVE_UDP_TUNNEL_H) #include <net/udp_tunnel.h> +#endif +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) +#ifdef HAVE_NDO_ADD_VXLAN +#include <net/vxlan.h> +#endif +#endif +#if !defined(NEW_FLOW_KEYS) && defined(HAVE_FLOW_KEYS) +#include <net/flow_keys.h> +#endif #include <linux/workqueue.h> #include <linux/prefetch.h> #include <linux/cache.h> #include <linux/log2.h> #include <linux/aer.h> #include <linux/bitmap.h> +#ifndef NO_NETDEV_CPU_RMAP #include <linux/cpu_rmap.h> +#endif +#ifdef HAVE_IEEE1588_SUPPORT +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> +#endif #include <linux/cpumask.h> +#ifdef HAVE_TC_SETUP_TYPE #include <net/pkt_cls.h> +#endif #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> -#include <net/page_pool.h> +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" +#include "bnxt_eem.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" #include "bnxt_dcb.h" #include "bnxt_xdp.h" +#include "bnxt_ptp.h" +#ifndef HSI_DBG_DISABLE +#include "decode_hsi.h" +#endif +#include "bnxt_netlink.h" #include "bnxt_vfr.h" #include "bnxt_tc.h" #include "bnxt_devlink.h" +#include "bnxt_lfc.h" #include "bnxt_debugfs.h" +#ifdef CONFIG_PAGE_POOL +#include <net/page_pool.h> +#endif #define BNXT_TX_TIMEOUT (5 * HZ) @@ -82,6 +116,13 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define BNXT_RX_COPY_THRESH 256 #define BNXT_TX_PUSH_THRESH 164 +#define BNXT_TX_PUSH_THRESH_PPP 208 + +#ifndef PCIE_SRIOV_CONFIGURE +static unsigned int num_vfs; +module_param(num_vfs, uint, 0); +MODULE_PARM_DESC(num_vfs, " Number of supported virtual functions (0 means sriov is disabled)"); +#endif enum board_idx { BCM57301, @@ -122,9 +163,16 @@ enum board_idx { BCM58802, BCM58804, BCM58808, +#ifdef BNXT_FPGA + BCM58812, + BCM58814, + BCM58818, +#endif NETXTREME_E_VF, NETXTREME_C_VF, NETXTREME_S_VF, + NETXTREME_C_VF_HV, + NETXTREME_E_VF_HV, NETXTREME_E_P5_VF, }; @@ -170,13 +218,20 @@ static const struct { [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, +#ifdef BNXT_FPGA + [BCM58812] = { "Broadcom BCM58812 NetXtreme-S 2x50G Ethernet" }, + [BCM58814] = { "Broadcom BCM58814 NetXtreme-S 2x100G Ethernet" }, + [BCM58818] = { "Broadcom BCM58818 NetXtreme-S 2x200G Ethernet" }, +#endif [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, - [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, + [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, }; -static const struct pci_device_id bnxt_pci_tbl[] = { +const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, @@ -223,15 +278,28 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, +#ifdef BNXT_FPGA + { PCI_VDEVICE(BROADCOM, 0xd812), .driver_data = BCM58812 }, + { PCI_VDEVICE(BROADCOM, 0xd814), .driver_data = BCM58814 }, + { PCI_VDEVICE(BROADCOM, 0xd818), .driver_data = BCM58818 }, +#endif #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, @@ -246,16 +314,22 @@ static const u16 bnxt_vf_req_snif[] = { HWRM_FUNC_VF_CFG, HWRM_PORT_PHY_QCFG, HWRM_CFA_L2_FILTER_ALLOC, + HWRM_OEM_CMD, }; static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, + ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, + ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, }; static struct workqueue_struct *bnxt_pf_wq; @@ -263,7 +337,8 @@ static struct workqueue_struct *bnxt_pf_wq; static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || - idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); + idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || + idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -274,16 +349,18 @@ static bool bnxt_vf_pciid(enum board_idx idx) writel(DB_CP_IRQ_DIS_FLAGS, db) #define BNXT_DB_CQ(db, idx) \ - writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) + writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) #define BNXT_DB_NQ_P5(db, idx) \ - writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) + writeq((db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx), \ + (db)->doorbell) #define BNXT_DB_CQ_ARM(db, idx) \ - writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) + writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) #define BNXT_DB_NQ_ARM_P5(db, idx) \ - writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) + writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | DB_RING_IDX(db, idx), \ + (db)->doorbell) static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) { @@ -304,7 +381,7 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) { if (bp->flags & BNXT_FLAG_CHIP_P5) - writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), + writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_RING_IDX(db, idx), db->doorbell); else BNXT_DB_CQ(db, idx); @@ -334,12 +411,200 @@ const u16 bnxt_lhint_arr[] = { static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) { +#ifdef CONFIG_VF_REPS struct metadata_dst *md_dst = skb_metadata_dst(skb); if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) return 0; return md_dst->u.port_info.port_id; +#else + return 0; +#endif +} + +static int bnxt_push_xmit(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct netdev_queue *txq, struct sk_buff *skb, + u32 vlan_tag_flags, u32 cfa_action) +{ + struct tx_push_buffer *tx_push_buf = txr->tx_push; + struct tx_push_bd *tx_push = &tx_push_buf->push_bd; + struct tx_bd_ext *tx_push1 = &tx_push->txbd2; + void __iomem *db = txr->tx_db.doorbell; + void *pdata = tx_push_buf->data; + struct bnxt_sw_tx_bd *tx_buf; + u16 prod, last_frag; + unsigned int length; + struct tx_bd *txbd; + int i, push_len; + u64 *end; + u32 len; + + prod = txr->tx_prod; + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + tx_buf = &txr->tx_buf_ring[RING_TX(prod)]; + last_frag = skb_shinfo(skb)->nr_frags; + length = skb->len; + len = skb_headlen(skb); + + /* Set COAL_NOW to be ready quickly for the next push */ + tx_push->tx_bd_len_flags_type = + cpu_to_le32((length << TX_BD_LEN_SHIFT) | + TX_BD_TYPE_LONG_TX_BD | + TX_BD_FLAGS_LHINT_512_AND_SMALLER | + TX_BD_FLAGS_COAL_NOW | TX_BD_FLAGS_PACKET_END | + (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_push1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + else + tx_push1->tx_bd_hsize_lflags = 0; + + tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + tx_push1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); + + end = pdata + length; + end = PTR_ALIGN(end, 8) - 1; + *end = 0; + + skb_copy_from_linear_data(skb, pdata, len); + pdata += len; + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *fptr; + + fptr = skb_frag_address_safe(frag); + if (!fptr) + return -EFAULT; + + memcpy(pdata, fptr, skb_frag_size(frag)); + pdata += skb_frag_size(frag); + } + + txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; + txbd->tx_bd_haddr = txr->data_mapping; + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + memcpy(txbd, tx_push1, sizeof(*txbd)); + prod = NEXT_TX(prod); + tx_push->doorbell = cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | + DB_RING_IDX(&txr->tx_db, prod)); + txr->tx_prod = prod; + + tx_buf->is_push = 1; + netdev_tx_sent_queue(txq, length); + wmb(); /* Sync is_push and byte queue before pushing data */ + + push_len = (length + sizeof(*tx_push) + 7) / 8; + if (push_len > 16) { + __iowrite64_copy(db, tx_push_buf, 16); + __iowrite32_copy(db + 4, tx_push_buf + 1, (push_len - 16) << 1); + } else { + __iowrite64_copy(db, tx_push_buf, push_len); + } + return 0; +} + +static int bnxt_push_xmit_p5(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct netdev_queue *txq, struct sk_buff *skb, + u32 vlan_tag_flags, u32 cfa_action) +{ + struct bnxt_db_info *db = &txr->tx_push_db; + struct bnxt_sw_tx_bd *tx_buf; + struct tx_bd_ext *txbd1; + int i, push_len, bds; + u16 prod, last_frag; + unsigned int length; + struct tx_bd *txbd; + void *pdata; + u64 *end; + u32 len; + + if (unlikely(!db->doorbell)) + return -EOPNOTSUPP; + + length = skb->len; + push_len = TX_PUSH_LEN(length); + len = skb_headlen(skb); + prod = txr->tx_prod; + + bds = TX_INLINE_BDS(push_len); + if (bds > (TX_DESC_CNT - TX_IDX(prod))) + return -E2BIG; + + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + tx_buf = &txr->tx_buf_ring[RING_TX(prod)]; + last_frag = skb_shinfo(skb)->nr_frags; + + /* Set COAL_NOW to be ready quickly for the next push */ + txbd->tx_bd_len_flags_type = + cpu_to_le32((length << TX_BD_LEN_SHIFT) | + TX_BD_TYPE_LONG_TX_BD_INLINE | + TX_BD_FLAGS_LHINT_512_AND_SMALLER | + TX_BD_FLAGS_COAL_NOW | TX_BD_FLAGS_PACKET_END | + (bds << TX_BD_FLAGS_BD_CNT_SHIFT)); + txbd->tx_bd_opaque = prod; + txbd->tx_bd_haddr = cpu_to_le64(0); + txbd1 = (struct tx_bd_ext *) (txbd + 1); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + txbd1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + else + txbd1->tx_bd_hsize_lflags = 0; + + txbd1->tx_bd_mss = cpu_to_le32(0); + txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + txbd1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); + + pdata = txbd1 + 1; + end = pdata + length; + end = PTR_ALIGN(end, 8) - 1; + *end = 0; + + skb_copy_from_linear_data(skb, pdata, len); + pdata += len; + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *fptr; + + fptr = skb_frag_address_safe(frag); + if (!fptr) + return -EFAULT; + + memcpy(pdata, fptr, skb_frag_size(frag)); + pdata += skb_frag_size(frag); + } + + txr->tx_prod = prod + bds; + + tx_buf->is_push = 1; + tx_buf->inline_data_bds = bds - 2; + netdev_tx_sent_queue(txq, length); + wmb(); /* Sync is_push and byte queue before pushing data */ + + push_len = DIV_ROUND_UP(push_len, 8); + + if (bp->tx_push_mode == BNXT_PUSH_MODE_WCB) { + writeq(db->db_key64 | DBR_TYPE_PUSH_START | + DB_RING_IDX(db, prod), db->doorbell); + __iowrite64_copy(txr->tx_push_wcb, txbd, push_len); + writeq(db->db_key64 | DBR_TYPE_PUSH_END | DBR_PATH_L2 | + DB_RING_IDX(db, txr->tx_prod), db->doorbell); + } else { + bnxt_db_write_relaxed(bp, db, txr->tx_prod); + writeq_relaxed(DB_PUSH_INFO(db, push_len, prod), + db->doorbell + sizeof(struct dbc_dbc)); + __iowrite64_copy(txr->tx_push_wcb, txbd, push_len); + /* flip buffers */ + db->doorbell = (void *)((uintptr_t)db->doorbell ^ DB_PPP_SIZE); + txr->tx_push_wcb = (void *)((uintptr_t)txr->tx_push_wcb ^ DB_PPP_SIZE); + } + + return 0; } static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -356,10 +621,13 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) struct pci_dev *pdev = bp->pdev; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; + __le32 lflags = 0; i = skb_get_queue_mapping(skb); if (unlikely(i >= bp->tx_nr_rings)) { dev_kfree_skb_any(skb); + netdev_warn(dev, "TX packet queue %d exceeds maximum %d\n", + i, bp->tx_nr_rings - 1); return NETDEV_TX_OK; } @@ -381,7 +649,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) txbd->tx_bd_opaque = prod; - tx_buf = &txr->tx_buf_ring[prod]; + tx_buf = &txr->tx_buf_ring[RING_TX(prod)]; tx_buf->skb = skb; tx_buf->nr_frags = last_frag; @@ -393,88 +661,53 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Currently supports 8021Q, 8021AD vlan offloads * QINQ1, QINQ2, QINQ3 vlan headers are deprecated */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) if (skb->vlan_proto == htons(ETH_P_8021Q)) +#endif vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; } +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && + atomic_dec_if_positive(&ptp->tx_avail) >= 0) { + lflags = cpu_to_le32(TX_BD_FLAGS_STAMP); + goto normal_tx; + } + } +#endif + if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { - struct tx_push_buffer *tx_push_buf = txr->tx_push; - struct tx_push_bd *tx_push = &tx_push_buf->push_bd; - struct tx_bd_ext *tx_push1 = &tx_push->txbd2; - void __iomem *db = txr->tx_db.doorbell; - void *pdata = tx_push_buf->data; - u64 *end; - int j, push_len; - - /* Set COAL_NOW to be ready quickly for the next push */ - tx_push->tx_bd_len_flags_type = - cpu_to_le32((length << TX_BD_LEN_SHIFT) | - TX_BD_TYPE_LONG_TX_BD | - TX_BD_FLAGS_LHINT_512_AND_SMALLER | - TX_BD_FLAGS_COAL_NOW | - TX_BD_FLAGS_PACKET_END | - (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); - - if (skb->ip_summed == CHECKSUM_PARTIAL) - tx_push1->tx_bd_hsize_lflags = - cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); - else - tx_push1->tx_bd_hsize_lflags = 0; - - tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - tx_push1->tx_bd_cfa_action = - cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); - - end = pdata + length; - end = PTR_ALIGN(end, 8) - 1; - *end = 0; - - skb_copy_from_linear_data(skb, pdata, len); - pdata += len; - for (j = 0; j < last_frag; j++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; - void *fptr; - - fptr = skb_frag_address_safe(frag); - if (!fptr) - goto normal_tx; - - memcpy(pdata, fptr, skb_frag_size(frag)); - pdata += skb_frag_size(frag); + switch (bp->tx_push_mode) { + case BNXT_PUSH_MODE_WCB: + fallthrough; + case BNXT_PUSH_MODE_PPP: + if (!bnxt_push_xmit_p5(bp, txr, txq, skb, + vlan_tag_flags, cfa_action)) + goto tx_done; + break; + case BNXT_PUSH_MODE_LEGACY: + if (!bnxt_push_xmit(bp, txr, txq, skb, vlan_tag_flags, + cfa_action)) + goto tx_done; + break; + default: + break; } - - txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; - txbd->tx_bd_haddr = txr->data_mapping; - prod = NEXT_TX(prod); - txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - memcpy(txbd, tx_push1, sizeof(*txbd)); - prod = NEXT_TX(prod); - tx_push->doorbell = - cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); - txr->tx_prod = prod; - - tx_buf->is_push = 1; - netdev_tx_sent_queue(txq, skb->len); - wmb(); /* Sync is_push and byte queue before pushing data */ - - push_len = (length + sizeof(*tx_push) + 7) / 8; - if (push_len > 16) { - __iowrite64_copy(db, tx_push_buf, 16); - __iowrite32_copy(db + 4, tx_push_buf + 1, - (push_len - 16) << 1); - } else { - __iowrite64_copy(db, tx_push_buf, push_len); - } - - goto tx_done; + /* Continue normal TX if push fails. */ } +#ifdef HAVE_IEEE1588_SUPPORT normal_tx: +#endif if (length < BNXT_MIN_PKT_SIZE) { pad = BNXT_MIN_PKT_SIZE - length; if (skb_pad(skb, pad)) { /* SKB already freed. */ tx_buf->skb = NULL; + txr->bnapi->cp_ring.sw_stats.tx.tx_sw_errors++; return NETDEV_TX_OK; } length = BNXT_MIN_PKT_SIZE; @@ -485,6 +718,7 @@ normal_tx: if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { dev_kfree_skb_any(skb); tx_buf->skb = NULL; + txr->bnapi->cp_ring.sw_stats.tx.tx_sw_errors++; return NETDEV_TX_OK; } @@ -498,15 +732,17 @@ normal_tx: txbd1 = (struct tx_bd_ext *) &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - txbd1->tx_bd_hsize_lflags = 0; + txbd1->tx_bd_hsize_lflags = lflags; if (skb_is_gso(skb)) { u32 hdr_len; +#ifdef HAVE_INNER_NETWORK_OFFSET if (skb->encapsulation) hdr_len = skb_inner_network_offset(skb) + skb_inner_network_header_len(skb) + inner_tcp_hdrlen(skb); else +#endif hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); @@ -517,7 +753,7 @@ normal_tx: txbd1->tx_bd_mss = cpu_to_le32(length); length += hdr_len; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - txbd1->tx_bd_hsize_lflags = + txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); txbd1->tx_bd_mss = 0; } @@ -548,7 +784,7 @@ normal_tx: if (unlikely(dma_mapping_error(&pdev->dev, mapping))) goto tx_dma_error; - tx_buf = &txr->tx_buf_ring[prod]; + tx_buf = &txr->tx_buf_ring[RING_TX(prod)]; dma_unmap_addr_set(tx_buf, mapping, mapping); txbd->tx_bd_haddr = cpu_to_le64(mapping); @@ -564,20 +800,32 @@ normal_tx: netdev_tx_sent_queue(txq, skb->len); +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely(lflags)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_tx_timestamp(skb); +#endif + /* Sync BD data before updating doorbell */ wmb(); prod = NEXT_TX(prod); txr->tx_prod = prod; +#if defined(HAVE_NETDEV_XMIT_MORE) || defined(HAVE_SKB_XMIT_MORE) if (!netdev_xmit_more() || netif_xmit_stopped(txq)) +#endif bnxt_db_write(bp, &txr->tx_db, prod); tx_done: + mmiowb(); + if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { +#if defined(HAVE_NETDEV_XMIT_MORE) || defined(HAVE_SKB_XMIT_MORE) if (netdev_xmit_more() && !tx_buf->is_push) bnxt_db_write(bp, &txr->tx_db, prod); +#endif netif_tx_stop_queue(txq); @@ -593,11 +841,14 @@ tx_done: return NETDEV_TX_OK; tx_dma_error: + if (lflags) + atomic_inc(&bp->ptp_cfg->tx_avail); + last_frag = i; /* start back at beginning and unmap skb */ prod = txr->tx_prod; - tx_buf = &txr->tx_buf_ring[prod]; + tx_buf = &txr->tx_buf_ring[RING_TX(prod)]; tx_buf->skb = NULL; dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); @@ -606,13 +857,14 @@ tx_dma_error: /* unmap remaining mapped pages */ for (i = 0; i < last_frag; i++) { prod = NEXT_TX(prod); - tx_buf = &txr->tx_buf_ring[prod]; + tx_buf = &txr->tx_buf_ring[RING_TX(prod)]; dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), PCI_DMA_TODEVICE); } dev_kfree_skb_any(skb); + txr->bnapi->cp_ring.sw_stats.tx.tx_sw_errors++; return NETDEV_TX_OK; } @@ -627,16 +879,18 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) for (i = 0; i < nr_pkts; i++) { struct bnxt_sw_tx_bd *tx_buf; + bool compl_deferred = false; struct sk_buff *skb; int j, last; - tx_buf = &txr->tx_buf_ring[cons]; + tx_buf = &txr->tx_buf_ring[RING_TX(cons)]; cons = NEXT_TX(cons); skb = tx_buf->skb; tx_buf->skb = NULL; if (tx_buf->is_push) { tx_buf->is_push = 0; + cons += tx_buf->inline_data_bds; goto next_tx_int; } @@ -646,7 +900,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) for (j = 0; j < last; j++) { cons = NEXT_TX(cons); - tx_buf = &txr->tx_buf_ring[cons]; + tx_buf = &txr->tx_buf_ring[RING_TX(cons)]; dma_unmap_page( &pdev->dev, dma_unmap_addr(tx_buf, mapping), @@ -654,11 +908,38 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) PCI_DMA_TODEVICE); } +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (!bnxt_get_tx_ts_p5(bp, skb)) + compl_deferred = true; + else + atomic_inc(&bp->ptp_cfg->tx_avail); + } else { + u64 ts; + + if (!bnxt_get_tx_ts(bp, &ts)) { + struct skb_shared_hwtstamps timestamp; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 ns; + + memset(×tamp, 0, + sizeof(timestamp)); + ns = timecounter_cyc2time(&ptp->tc, ts); + timestamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, ×tamp); + } + atomic_inc(&bp->ptp_cfg->tx_avail); + } + } +#endif + next_tx_int: cons = NEXT_TX(cons); tx_bytes += skb->len; - dev_kfree_skb_any(skb); + if (!compl_deferred) + dev_kfree_skb_any(skb); } netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); @@ -682,21 +963,29 @@ next_tx_int: } } +#ifdef HAVE_BUILD_SKB static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, gfp_t gfp) { struct device *dev = &bp->pdev->dev; struct page *page; - +#ifndef CONFIG_PAGE_POOL + page = alloc_page(gfp); +#else page = page_pool_dev_alloc_pages(rxr->page_pool); +#endif if (!page) return NULL; *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(dev, *mapping)) { +#ifndef CONFIG_PAGE_POOL + __free_page(page); +#else page_pool_recycle_direct(rxr->page_pool, page); +#endif return NULL; } *mapping += bp->rx_dma_offset; @@ -723,12 +1012,47 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, } return data; } +#else + +static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, + gfp_t gfp) +{ + return NULL; +} + +static inline struct sk_buff *__bnxt_alloc_rx_data(struct bnxt *bp, + dma_addr_t *mapping, + gfp_t gfp) +{ + struct sk_buff *skb; + u8 *data; + struct pci_dev *pdev = bp->pdev; + + skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + if (skb == NULL) + return NULL; + + data = skb->data; + + *mapping = dma_map_single_attrs(&pdev->dev, + data + bp->rx_dma_offset, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + if (dma_mapping_error(&pdev->dev, *mapping)) { + dev_kfree_skb(skb); + skb = NULL; + } + return skb; +} +#endif int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 prod, gfp_t gfp) { struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; - struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(prod)]; dma_addr_t mapping; if (BNXT_RX_PAGE_MODE(bp)) { @@ -741,13 +1065,21 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, rx_buf->data = page; rx_buf->data_ptr = page_address(page) + bp->rx_offset; } else { +#ifdef HAVE_BUILD_SKB u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); +#else + struct sk_buff *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); +#endif if (!data) return -ENOMEM; rx_buf->data = data; +#ifdef HAVE_BUILD_SKB rx_buf->data_ptr = data + bp->rx_offset; +#else + rx_buf->data_ptr = data->data + bp->rx_offset; +#endif } rx_buf->mapping = mapping; @@ -759,9 +1091,10 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) { u16 prod = rxr->rx_prod; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct bnxt *bp = rxr->bnapi->bp; struct rx_bd *cons_bd, *prod_bd; - prod_rx_buf = &rxr->rx_buf_ring[prod]; + prod_rx_buf = &rxr->rx_buf_ring[RING_RX(prod)]; cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf->data = data; @@ -790,7 +1123,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, u16 prod, gfp_t gfp) { struct rx_bd *rxbd = - &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + &rxr->rx_agg_desc_ring[RX_AGG_RING(prod)][RX_IDX(prod)]; struct bnxt_sw_rx_agg_bd *rx_agg_buf; struct pci_dev *pdev = bp->pdev; struct page *page; @@ -832,7 +1165,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, __set_bit(sw_prod, rxr->rx_agg_bmap); rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; - rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); + rxr->rx_sw_agg_prod = RING_RX_AGG(NEXT_RX_AGG(sw_prod)); rx_agg_buf->page = page; rx_agg_buf->offset = offset; @@ -842,9 +1175,8 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, return 0; } -static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - u16 cp_cons, u16 curr) +struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u16 cp_cons, u16 curr) { struct rx_agg_cmp *agg; @@ -854,9 +1186,9 @@ static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, return agg; } -static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr, - u16 agg_id, u16 curr) +struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 agg_id, u16 curr) { struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; @@ -908,18 +1240,21 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, prod_rx_buf->mapping = cons_rx_buf->mapping; - prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + prod_bd = + &rxr->rx_agg_desc_ring[RX_AGG_RING(prod)][RX_IDX(prod)]; prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); prod_bd->rx_bd_opaque = sw_prod; prod = NEXT_RX_AGG(prod); - sw_prod = NEXT_RX_AGG(sw_prod); + sw_prod = RING_RX_AGG(NEXT_RX_AGG(sw_prod)); } rxr->rx_agg_prod = prod; rxr->rx_sw_agg_prod = sw_prod; } +#ifdef HAVE_BUILD_SKB +#ifdef BNXT_RX_PAGE_MODE_SUPPORT static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, void *data, u8 *data_ptr, @@ -966,6 +1301,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, return skb; } +#endif static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, @@ -995,6 +1331,30 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, skb_put(skb, offset_and_len & 0xffff); return skb; } +#else +static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, u16 cons, + void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + struct sk_buff *skb = data; + u16 prod = rxr->rx_prod; + int err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, skb); + return NULL; + } + + dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + skb_reserve(skb, bp->rx_offset); + skb_put(skb, offset_and_len & 0xffff); + return skb; +} +#endif static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -1169,12 +1529,42 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { if (!rxr->bnapi->in_reset) { rxr->bnapi->in_reset = true; - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + if (bp->flags & BNXT_FLAG_CHIP_P5) + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + else + set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } rxr->rx_next_cons = 0xffff; } +static void bnxt_set_netdev_mtu(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + u16 dflt_mtu = bp->fw_dflt_mtu; + +#ifdef HAVE_MIN_MTU + /* MTU range: 60 - FW defined max */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = bp->max_mtu; + + /* qcfg hwrm provides user configured 'default mtu'. + * Configure it on netdev if it is valid mtu. + */ + if (dflt_mtu) { + dev->mtu = dflt_mtu; + if (bp->fw_cap & BNXT_FW_CAP_ADMIN_MTU) { + bp->max_mtu = dflt_mtu; + dev->min_mtu = dflt_mtu; + dev->max_mtu = dflt_mtu; + } + } +#else + if (dflt_mtu) + dev->mtu = dflt_mtu; +#endif +} + static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) { struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; @@ -1221,7 +1611,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, cons = tpa_start->rx_tpa_start_cmp_opaque; prod = rxr->rx_prod; cons_rx_buf = &rxr->rx_buf_ring[cons]; - prod_rx_buf = &rxr->rx_buf_ring[prod]; + prod_rx_buf = &rxr->rx_buf_ring[RING_RX(prod)]; tpa_info = &rxr->rx_tpa[agg_id]; if (unlikely(cons != rxr->rx_next_cons || @@ -1276,8 +1666,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info->agg_count = 0; rxr->rx_prod = NEXT_RX(prod); - cons = NEXT_RX(cons); - rxr->rx_next_cons = NEXT_RX(cons); + cons = RING_RX(NEXT_RX(cons)); + rxr->rx_next_cons = RING_RX(NEXT_RX(cons)); cons_rx_buf = &rxr->rx_buf_ring[cons]; bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); @@ -1503,6 +1893,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u32 *raw_cons, struct rx_tpa_end_cmp *tpa_end, struct rx_tpa_end_cmp_ext *tpa_end1, +#ifdef OLD_VLAN + u32 *vlan, +#endif u8 *event) { struct bnxt_napi *bnapi = cpr->bnapi; @@ -1513,7 +1906,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, dma_addr_t mapping; struct sk_buff *skb; u16 idx = 0, agg_id; +#ifdef HAVE_BUILD_SKB void *data; +#else + struct sk_buff *data; +#endif bool gro; if (unlikely(bnapi->in_reset)) { @@ -1574,7 +1971,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } } else { +#ifdef HAVE_BUILD_SKB u8 *new_data; +#else + struct sk_buff *new_data; +#endif dma_addr_t new_mapping; new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); @@ -1584,20 +1985,29 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } tpa_info->data = new_data; +#ifdef HAVE_BUILD_SKB tpa_info->data_ptr = new_data + bp->rx_offset; +#else + tpa_info->data_ptr = new_data->data + bp->rx_offset; +#endif tpa_info->mapping = new_mapping; +#ifdef HAVE_BUILD_SKB skb = build_skb(data, 0); +#else + skb = data; +#endif dma_unmap_single_attrs(&bp->pdev->dev, mapping, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - +#ifdef HAVE_BUILD_SKB if (!skb) { kfree(data); bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } skb_reserve(skb, bp->rx_offset); +#endif skb_put(skb, len); } @@ -1616,19 +2026,29 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u16 vlan_proto = tpa_info->metadata >> RX_CMP_FLAGS2_METADATA_TPID_SFT; u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; +#ifdef OLD_VLAN + if (vlan_proto == ETH_P_8021Q) + *vlan = vtag | OLD_VLAN_VALID; +#else __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); +#endif } skb_checksum_none_assert(skb); if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { skb->ip_summed = CHECKSUM_UNNECESSARY; +#ifdef HAVE_CSUM_LEVEL skb->csum_level = (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; +#elif defined(HAVE_INNER_NETWORK_OFFSET) + skb->encapsulation = + (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; +#endif } if (gro) @@ -1650,15 +2070,125 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, } static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, - struct sk_buff *skb) + u32 vlan, struct sk_buff *skb) { if (skb->dev != bp->dev) { /* this packet belongs to a vf-rep */ bnxt_vf_rep_rx(bp, skb); return; } + skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); +#ifdef BNXT_PRIV_RX_BUSY_POLL + skb_mark_napi_id(skb, &bnapi->napi); +#endif +#ifdef OLD_VLAN + if (vlan && bp->vlgrp) + vlan_gro_receive(&bnapi->napi, bp->vlgrp, (u16)vlan, skb); +#else + if (bnxt_busy_polling(bnapi)) + netif_receive_skb(skb); +#endif + else + napi_gro_receive(&bnapi->napi, skb); +} + +#ifdef OLD_VLAN +static u32 bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, + struct rx_cmp *rxcmp, struct rx_cmp_ext *rxcmp1) +{ + u16 vtag, vlan_proto; + u32 meta_data; + + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + __le32 flags2 = rxcmp1->rx_cmp_flags2; + + if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) + return 0; + + meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; + vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + if (vlan_proto == ETH_P_8021Q) + return vtag | OLD_VLAN_VALID; + } else if (cmp_type == CMP_TYPE_RX_L2_V2_CMP) { + if (RX_CMP_VLAN_VALID(rxcmp)) { + u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); + + if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) { + vlan_proto = ETH_P_8021Q; + vtag = RX_CMP_METADATA0_VID(rxcmp1); + return vtag | OLD_VLAN_VALID; + } + } + } + return 0; +} +#else +static void bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, + struct rx_cmp *rxcmp, struct rx_cmp_ext *rxcmp1) +{ + u16 vtag, vlan_proto; + u32 meta_data; + + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + __le32 flags2 = rxcmp1->rx_cmp_flags2; + + if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) + return; + + meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; + vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + } else if (cmp_type == CMP_TYPE_RX_L2_V2_CMP) { + if (RX_CMP_VLAN_VALID(rxcmp)) { + u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); + + if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) + vlan_proto = ETH_P_8021Q; + else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) + vlan_proto = ETH_P_8021AD; + else + return; + vtag = RX_CMP_METADATA0_VID(rxcmp1); + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + } + } +} +#endif + +static bool bnxt_rx_csum_err(struct sk_buff *skb, u8 cmp_type, + struct rx_cmp *rxcmp, struct rx_cmp_ext *rxcmp1) +{ + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + if (RX_CMP_L4_CS_OK(rxcmp1)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; +#ifdef HAVE_CSUM_LEVEL + skb->csum_level = RX_CMP_ENCAP(rxcmp1); +#elif defined(HAVE_INNER_NETWORK_OFFSET) + skb->encapsulation = RX_CMP_ENCAP(rxcmp1); +#endif + return false; + } + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) + return true; + } else if (cmp_type == CMP_TYPE_RX_L2_V2_CMP) { + u32 csum_count = RX_CMP_V2_L4_CS_OK_CNT(rxcmp1); + + if (csum_count) { + skb->ip_summed = CHECKSUM_UNNECESSARY; +#ifdef HAVE_CSUM_LEVEL + skb->csum_level = csum_count - 1; +#elif defined(HAVE_INNER_NETWORK_OFFSET) + skb->encapsulation = csum_count > 1; +#endif + return false; + } + if (RX_CMP_V2_L4_CS_ERRS(rxcmp1)) + return true; + } + return false; } /* returns the following: @@ -1680,12 +2210,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; +#ifdef HAVE_BUILD_SKB u8 *data_ptr, agg_bufs, cmp_type; + void *data; +#else + struct sk_buff *data; + u8 *data_ptr, agg_bufs, cmp_type; +#endif dma_addr_t dma_addr; struct sk_buff *skb; - void *data; int rc = 0; - u32 misc; + u32 vlan = 0; + u32 misc, flags; +#ifdef HAVE_IEEE1588_SUPPORT + u64 ts = 0; +#endif rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; @@ -1717,14 +2256,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, (struct rx_tpa_end_cmp *)rxcmp, - (struct rx_tpa_end_cmp_ext *)rxcmp1, event); + (struct rx_tpa_end_cmp_ext *)rxcmp1, +#ifdef OLD_VLAN + &vlan, +#endif + event); if (IS_ERR(skb)) return -EBUSY; rc = -ENOMEM; if (likely(skb)) { - bnxt_deliver_skb(bp, bnapi, skb); + bnxt_deliver_skb(bp, bnapi, vlan, skb); rc = 1; } *event |= BNXT_RX_EVENT; @@ -1763,24 +2306,34 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, - false); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, false); rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - bnapi->cp_ring.rx_buf_errors++; + bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { - netdev_warn(bp->dev, "RX buffer error %x\n", - rx_err); + netdev_warn_once(bp->dev, "RX buffer error %x\n", + rx_err); bnxt_sched_reset(bp, rxr); } } goto next_rx_no_len; } - len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; + flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); + len = flags >> RX_CMP_LEN_SHIFT; dma_addr = rx_buf->mapping; +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == + RX_CMP_FLAGS_ITYPE_PTP_W_TS)) { + if (bp->flags & BNXT_FLAG_CHIP_P5) + bnxt_get_rx_ts_p5(bp, &ts, rxcmp1->rx_cmp_unused3); + else + bnxt_get_rx_ts(bp, &ts); + } +#endif + if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { rc = 1; goto next_rx; @@ -1832,30 +2385,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cfa_code = RX_CMP_CFA_CODE(rxcmp1); skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); - if ((rxcmp1->rx_cmp_flags2 & - cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); - u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; - u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; - - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); - } - + if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) +#ifdef OLD_VLAN + vlan = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); +#else + bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); +#endif skb_checksum_none_assert(skb); - if (RX_CMP_L4_CS_OK(rxcmp1)) { - if (dev->features & NETIF_F_RXCSUM) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = RX_CMP_ENCAP(rxcmp1); - } - } else { - if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { - if (dev->features & NETIF_F_RXCSUM) - bnapi->cp_ring.rx_l4_csum_errors++; - } + if (dev->features & NETIF_F_RXCSUM) { + if (bnxt_rx_csum_err(skb, cmp_type, rxcmp, rxcmp1)) + bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; } - bnxt_deliver_skb(bp, bnapi, skb); +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely(ts)) { + u64 ns; + + ns = timecounter_cyc2time(&bp->ptp_cfg->tc, ts); + memset(skb_hwtstamps(skb), 0, sizeof(*skb_hwtstamps(skb))); + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + } +#endif + + bnxt_deliver_skb(bp, bnapi, vlan, skb); rc = 1; next_rx: @@ -1864,7 +2416,7 @@ next_rx: next_rx_no_len: rxr->rx_prod = NEXT_RX(prod); - rxr->rx_next_cons = NEXT_RX(cons); + rxr->rx_next_cons = RING_RX(NEXT_RX(cons)); next_rx_no_prod_no_len: *raw_cons = tmp_raw_cons; @@ -1925,7 +2477,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) break; case BNXT_FW_HEALTH_REG_TYPE_GRC: reg_off = fw_health->mapped_regs[reg_idx]; - /* fall through */ + fallthrough; case BNXT_FW_HEALTH_REG_TYPE_BAR0: val = readl(bp->bar0 + reg_off); break; @@ -1939,8 +2491,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) } #define BNXT_GET_EVENT_PORT(data) \ - ((data) & \ - ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) + (data & ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) static int bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) @@ -1968,7 +2519,11 @@ static int bnxt_async_event_process(struct bnxt *bp, } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); } - /* fall through */ + fallthrough; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: + set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); + fallthrough; case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; @@ -2045,6 +2600,39 @@ static int bnxt_async_event_process(struct bnxt *bp, bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); goto async_event_process_exit; } + case ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE: { + u32 data1 = le32_to_cpu(cmpl->event_data1); + struct bnxt_pf_info *pf = &bp->pf; + u32 pf_id, vf_idx, vf_state; + + pf_id = EVENT_DATA1_VNIC_CHNG_PF_ID(data1); + vf_idx = EVENT_DATA1_VNIC_CHNG_VF_ID(data1) - pf->first_vf_id; + vf_state = EVENT_DATA1_VNIC_CHNG_VNIC_STATE(data1); + if (BNXT_PF(bp) && pf->active_vfs && pf_id == pf->fw_fid && + vf_idx < pf->active_vfs) { + bnxt_update_vf_vnic(bp, vf_idx, vf_state); + set_bit(BNXT_VF_VNIC_CHANGE_SP_EVENT, &bp->sp_event); + break; + } + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: { + if (netif_msg_hw(bp)) { + u32 data1 = le32_to_cpu(cmpl->event_data1); + u32 data2 = le32_to_cpu(cmpl->event_data2); + + netdev_dbg(bp->dev, + "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", + data1, data2); + } + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { + u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; + + hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@ -2064,10 +2652,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) switch (cmpl_type) { case CMPL_BASE_TYPE_HWRM_DONE: seq_id = le16_to_cpu(h_cmpl->sequence_id); - if (seq_id == bp->hwrm_intr_seq_id) - bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; - else - netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); + hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); break; case CMPL_BASE_TYPE_HWRM_FWD_REQ: @@ -2160,7 +2745,9 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct tx_cmp *txcmp; cpr->has_more_work = 0; + cpr->had_work_done = 1; while (1) { + u8 cmp_type; int rc; cons = RING_CMP(raw_cons); @@ -2173,8 +2760,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, * reading any further. */ dma_rmb(); - cpr->had_work_done = 1; - if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { + cmp_type = TX_CMP_TYPE(txcmp); + if (cmp_type == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ if (unlikely(tx_pkts > bp->tx_wake_thresh)) { @@ -2184,7 +2771,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->has_more_work = 1; break; } - } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { + } else if (cmp_type >= CMP_TYPE_RX_L2_TPA_START_V2_CMP && + cmp_type <= CMP_TYPE_RX_TPA_AGG_CMP) { if (likely(budget)) rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); else @@ -2201,12 +2789,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; - } else if (unlikely((TX_CMP_TYPE(txcmp) == - CMPL_BASE_TYPE_HWRM_DONE) || - (TX_CMP_TYPE(txcmp) == - CMPL_BASE_TYPE_HWRM_FWD_REQ) || - (TX_CMP_TYPE(txcmp) == - CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { + } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || + cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || + cmp_type == + CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { bnxt_hwrm_handler(bp, txcmp); } raw_cons = NEXT_RAW_CMP(raw_cons); @@ -2243,7 +2829,7 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) bnapi->tx_pkts = 0; } - if (bnapi->events & BNXT_RX_EVENT) { + if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; if (bnapi->events & BNXT_AGG_EVENT) @@ -2332,7 +2918,11 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { +#ifdef HAVE_NEW_NAPI_COMPLETE_DONE napi_complete_done(napi, rx_pkts); +#else + napi_complete(napi); +#endif BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); } return rx_pkts; @@ -2345,6 +2935,9 @@ static int bnxt_poll(struct napi_struct *napi, int budget) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int work_done = 0; + if (!bnxt_lock_napi(bnapi)) + return budget; + while (1) { work_done += bnxt_poll_work(bp, cpr, budget - work_done); @@ -2355,8 +2948,13 @@ static int bnxt_poll(struct napi_struct *napi, int budget) } if (!bnxt_has_work(bp, cpr)) { +#ifdef HAVE_NEW_NAPI_COMPLETE_DONE if (napi_complete_done(napi, work_done)) BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); +#else + napi_complete(napi); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); +#endif break; } } @@ -2369,6 +2967,8 @@ static int bnxt_poll(struct napi_struct *napi, int budget) &dim_sample); net_dim(&cpr->dim, dim_sample); } + mmiowb(); + bnxt_unlock_napi(bnapi); return work_done; } @@ -2390,7 +2990,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) } static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, - u64 dbr_type, bool all) + u64 dbr_type) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int i; @@ -2399,10 +2999,11 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; struct bnxt_db_info *db; - if (cpr2 && (all || cpr2->had_work_done)) { + if (cpr2 && cpr2->had_work_done) { db = &cpr2->cp_db; writeq(db->db_key64 | dbr_type | - RING_CMP(cpr2->cp_raw_cons), db->doorbell); + DB_RING_IDX(db, cpr2->cp_raw_cons), + db->doorbell); cpr2->had_work_done = 0; } } @@ -2419,30 +3020,32 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) int work_done = 0; u32 cons; + if (!bnxt_lock_napi(bnapi)) + return budget; + if (cpr->has_more_work) { cpr->has_more_work = 0; work_done = __bnxt_poll_cqs(bp, bnapi, budget); - if (cpr->has_more_work) { - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false); - return work_done; - } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true); - if (napi_complete_done(napi, work_done)) - BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); - return work_done; } while (1) { cons = RING_CMP(raw_cons); nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; if (!NQ_CMP_VALID(nqcmp, raw_cons)) { - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, - false); + if (cpr->has_more_work) + break; + + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); cpr->cp_raw_cons = raw_cons; +#ifdef HAVE_NEW_NAPI_COMPLETE_DONE if (napi_complete_done(napi, work_done)) BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); - return work_done; +#else + napi_complete(napi); + BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); +#endif + goto poll_done; } /* The valid test of the entry must be done first before @@ -2457,19 +3060,59 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) cpr2 = cpr->cp_ring_arr[idx]; work_done += __bnxt_poll_work(bp, cpr2, budget - work_done); - cpr->has_more_work = cpr2->has_more_work; + cpr->has_more_work |= cpr2->has_more_work; } else { bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); } raw_cons = NEXT_RAW_CMP(raw_cons); - if (cpr->has_more_work) - break; } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true); - cpr->cp_raw_cons = raw_cons; + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); + if (raw_cons != cpr->cp_raw_cons) { + cpr->cp_raw_cons = raw_cons; + BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); + } +poll_done: + bnxt_unlock_napi(bnapi); return work_done; } +#ifdef BNXT_PRIV_RX_BUSY_POLL +static int bnxt_busy_poll(struct napi_struct *napi) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt *bp = bnapi->bp; + int rx_work = 0, budget = 4; + + if (atomic_read(&bp->intr_sem) != 0) + return LL_FLUSH_FAILED; + + if (!bp->link_info.link_up) + return LL_FLUSH_FAILED; + + if (!bnxt_lock_poll(bnapi)) + return LL_FLUSH_BUSY; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + struct bnxt_cp_ring_info *cpr2; + + cpr2 = cpr->cp_ring_arr[0]; + if (cpr2) + rx_work = bnxt_poll_work(bp, cpr2, budget); + cpr2 = cpr->cp_ring_arr[1]; + if (cpr2 && rx_work < budget) + rx_work += bnxt_poll_work(bp, cpr2, budget - rx_work); + } else { + rx_work = bnxt_poll_work(bp, cpr, budget); + } + + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); + + bnxt_unlock_poll(bnapi); + return rx_work; +} +#endif + static void bnxt_free_tx_skbs(struct bnxt *bp) { int i, max_idx; @@ -2494,7 +3137,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) dma_unmap_addr(tx_buf, mapping), dma_unmap_len(tx_buf, len), PCI_DMA_TODEVICE); +#ifdef HAVE_XDP_FRAME xdp_return_frame(tx_buf->xdpf); +#endif tx_buf->action = 0; tx_buf->xdpf = NULL; j++; @@ -2538,93 +3183,114 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) } } +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct pci_dev *pdev = bp->pdev; + struct bnxt_tpa_idx_map *map; + int i, max_idx, max_agg_idx; + + max_idx = bp->rx_nr_pages * RX_DESC_CNT; + max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; + + for (i = 0; i < bp->max_tpa; i++) { + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; +#ifdef HAVE_BUILD_SKB + u8 *data = tpa_info->data; +#else + struct sk_buff *data = tpa_info->data; +#endif + + if (!data) + continue; + + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + tpa_info->data = NULL; + +#ifdef HAVE_BUILD_SKB + kfree(data); +#else + dev_kfree_skb_any(data); +#endif + } + +skip_rx_tpa_free: + for (i = 0; i < max_idx; i++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + dma_addr_t mapping = rx_buf->mapping; +#ifdef HAVE_BUILD_SKB + void *data = rx_buf->data; +#else + struct sk_buff *data = rx_buf->data; +#endif + + if (!data) + continue; + +#ifdef HAVE_BUILD_SKB + if (BNXT_RX_PAGE_MODE(bp)) { + mapping -= bp->rx_dma_offset; + dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, + bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); +#ifndef CONFIG_PAGE_POOL + __free_page(data); +#else + page_pool_recycle_direct(rxr->page_pool, data); +#endif + } else { + dma_unmap_single_attrs(&pdev->dev, mapping, + bp->rx_buf_use_size, + bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + kfree(data); + } +#else + dma_unmap_single_attrs(&pdev->dev, mapping, bp->rx_buf_use_size, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + dev_kfree_skb_any(data); +#endif + rx_buf->data = NULL; + } + for (i = 0; i < max_agg_idx; i++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; + struct page *page = rx_agg_buf->page; + + if (!page) + continue; + + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + DMA_ATTR_WEAK_ORDERING); + + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); + + __free_page(page); + } + if (rxr->rx_page) { + __free_page(rxr->rx_page); + rxr->rx_page = NULL; + } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); +} + static void bnxt_free_rx_skbs(struct bnxt *bp) { - int i, max_idx, max_agg_idx; - struct pci_dev *pdev = bp->pdev; + int i; if (!bp->rx_ring) return; - max_idx = bp->rx_nr_pages * RX_DESC_CNT; - max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_tpa_idx_map *map; - int j; - - if (rxr->rx_tpa) { - for (j = 0; j < bp->max_tpa; j++) { - struct bnxt_tpa_info *tpa_info = - &rxr->rx_tpa[j]; - u8 *data = tpa_info->data; - - if (!data) - continue; - - dma_unmap_single_attrs(&pdev->dev, - tpa_info->mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - - tpa_info->data = NULL; - - kfree(data); - } - } - - for (j = 0; j < max_idx; j++) { - struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; - dma_addr_t mapping = rx_buf->mapping; - void *data = rx_buf->data; - - if (!data) - continue; - - rx_buf->data = NULL; - - if (BNXT_RX_PAGE_MODE(bp)) { - mapping -= bp->rx_dma_offset; - dma_unmap_page_attrs(&pdev->dev, mapping, - PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - page_pool_recycle_direct(rxr->page_pool, data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - kfree(data); - } - } - - for (j = 0; j < max_agg_idx; j++) { - struct bnxt_sw_rx_agg_bd *rx_agg_buf = - &rxr->rx_agg_ring[j]; - struct page *page = rx_agg_buf->page; - - if (!page) - continue; - - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, - PCI_DMA_FROMDEVICE, - DMA_ATTR_WEAK_ORDERING); - - rx_agg_buf->page = NULL; - __clear_bit(j, rxr->rx_agg_bmap); - - __free_page(page); - } - if (rxr->rx_page) { - __free_page(rxr->rx_page); - rxr->rx_page = NULL; - } - map = rxr->rx_tpa_idx_map; - if (map) - memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); - } + for (i = 0; i < bp->rx_nr_rings; i++) + bnxt_free_one_rx_ring_skbs(bp, i); } static void bnxt_free_skbs(struct bnxt *bp) @@ -2692,6 +3358,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) if (!rmem->pg_arr[i]) return -ENOMEM; + if (rmem->init_val) + memset(rmem->pg_arr[i], rmem->init_val, + rmem->page_size); if (rmem->nr_pages > 1 || rmem->depth > 0) { if (i == rmem->nr_pages - 2 && (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) @@ -2779,15 +3448,19 @@ static void bnxt_free_rx_rings(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; +#ifdef HAVE_NDO_XDP if (rxr->xdp_prog) bpf_prog_put(rxr->xdp_prog); +#endif +#ifdef HAVE_XDP_RXQ_INFO if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) xdp_rxq_info_unreg(&rxr->xdp_rxq); - +#endif +#ifdef CONFIG_PAGE_POOL page_pool_destroy(rxr->page_pool); rxr->page_pool = NULL; - +#endif kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; @@ -2799,8 +3472,8 @@ static void bnxt_free_rx_rings(struct bnxt *bp) } } -static int bnxt_alloc_rx_page_pool(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr) +#ifdef CONFIG_PAGE_POOL +static int bnxt_alloc_rx_page_pool(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { struct page_pool_params pp = { 0 }; @@ -2812,12 +3485,18 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, rxr->page_pool = page_pool_create(&pp); if (IS_ERR(rxr->page_pool)) { int err = PTR_ERR(rxr->page_pool); - rxr->page_pool = NULL; return err; } return 0; } +#else +static int bnxt_alloc_rx_page_pool(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + return 0; +} +#endif + static int bnxt_alloc_rx_rings(struct bnxt *bp) { @@ -2839,17 +3518,22 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (rc) return rc; +#ifdef HAVE_XDP_RXQ_INFO rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); if (rc < 0) return rc; rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, - MEM_TYPE_PAGE_POOL, - rxr->page_pool); +#ifndef CONFIG_PAGE_POOL + MEM_TYPE_PAGE_SHARED, NULL); +#else + MEM_TYPE_PAGE_POOL, rxr->page_pool); +#endif if (rc) { xdp_rxq_info_unreg(&rxr->xdp_rxq); return rc; } +#endif rc = bnxt_alloc_ring(bp, &ring->ring_mem); if (rc) @@ -2907,7 +3591,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) struct pci_dev *pdev = bp->pdev; bp->tx_push_size = 0; - if (bp->tx_push_thresh) { + if (bp->tx_push_mode == BNXT_PUSH_MODE_LEGACY) { int push_size; push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + @@ -2915,7 +3599,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) if (push_size > 256) { push_size = 0; - bp->tx_push_thresh = 0; + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; } bp->tx_push_size = push_size; @@ -2950,6 +3634,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) mapping = txr->tx_push_mapping + sizeof(struct tx_push_bd); txr->data_mapping = cpu_to_le64(mapping); + + memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } qidx = bp->tc_to_qidx[j]; ring->queue_id = bp->q_info[qidx].queue_id; @@ -3130,7 +3816,7 @@ skip_rx: ring = &txr->tx_ring_struct; rmem = &ring->ring_mem; rmem->nr_pages = bp->tx_nr_pages; - rmem->page_size = HW_RXBD_RING_SIZE; + rmem->page_size = HW_TXBD_RING_SIZE; rmem->pg_arr = (void **)txr->tx_desc_ring; rmem->dma_arr = txr->tx_desc_mapping; rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; @@ -3160,13 +3846,68 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) } } +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct net_device *dev = bp->dev; + u32 prod; + int i; + + prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX(prod); + } + rxr->rx_prod = prod; + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + prod = rxr->rx_agg_prod; + for (i = 0; i < bp->rx_agg_ring_size; i++) { + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX_AGG(prod); + } + rxr->rx_agg_prod = prod; + + if (rxr->rx_tpa) { + dma_addr_t mapping; +#ifdef HAVE_BUILD_SKB + u8 *data; +#else + struct sk_buff *data; +#endif + + for (i = 0; i < bp->max_tpa; i++) { + data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); + if (!data) + return -ENOMEM; + + rxr->rx_tpa[i].data = data; +#ifdef HAVE_BUILD_SKB + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; +#else + rxr->rx_tpa[i].data_ptr = data->data + bp->rx_offset; +#endif + rxr->rx_tpa[i].mapping = mapping; + } + } + return 0; +} + static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) { - struct net_device *dev = bp->dev; struct bnxt_rx_ring_info *rxr; struct bnxt_ring_struct *ring; - u32 prod, type; - int i; + u32 type; type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; @@ -3178,7 +3919,12 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); +#ifdef HAVE_NDO_XDP if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { +#ifdef HAVE_VOID_BPF_PROG_ADD + bpf_prog_add(bp->xdp_prog, 1); + rxr->xdp_prog = bp->xdp_prog; +#else rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); if (IS_ERR(rxr->xdp_prog)) { int rc = PTR_ERR(rxr->xdp_prog); @@ -3186,63 +3932,23 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) rxr->xdp_prog = NULL; return rc; } +#endif } - prod = rxr->rx_prod; - for (i = 0; i < bp->rx_ring_size; i++) { - if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", - ring_nr, i, bp->rx_ring_size); - break; - } - prod = NEXT_RX(prod); - } - rxr->rx_prod = prod; +#endif + ring->fw_ring_id = INVALID_HW_RING_ID; ring = &rxr->rx_agg_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; - if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) - return 0; + if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { + type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; - type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | - RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; - - bnxt_init_rxbd_pages(ring, type); - - prod = rxr->rx_agg_prod; - for (i = 0; i < bp->rx_agg_ring_size; i++) { - if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", - ring_nr, i, bp->rx_ring_size); - break; - } - prod = NEXT_RX_AGG(prod); - } - rxr->rx_agg_prod = prod; - - if (bp->flags & BNXT_FLAG_TPA) { - if (rxr->rx_tpa) { - u8 *data; - dma_addr_t mapping; - - for (i = 0; i < bp->max_tpa; i++) { - data = __bnxt_alloc_rx_data(bp, &mapping, - GFP_KERNEL); - if (!data) - return -ENOMEM; - - rxr->rx_tpa[i].data = data; - rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; - rxr->rx_tpa[i].mapping = mapping; - } - } else { - netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); - return -ENOMEM; - } + bnxt_init_rxbd_pages(ring, type); } - return 0; + return bnxt_alloc_one_rx_ring(bp, ring_nr); } static void bnxt_init_cp_rings(struct bnxt *bp) @@ -3377,11 +4083,17 @@ static void bnxt_init_vnics(struct bnxt *bp) vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; - if (bp->vnic_info[i].rss_hash_key) { - if (i == 0) - prandom_bytes(vnic->rss_hash_key, - HW_HASH_KEY_SIZE); + if (i == 0) { + if (bp->flags & BNXT_FLAG_USR_RSS_HASH_KEY) { + memcpy(vnic->rss_hash_key, + bp->usr_rss_hash_key, + HW_HASH_KEY_SIZE); + } + else + prandom_bytes(vnic->rss_hash_key, + HW_HASH_KEY_SIZE); + } else memcpy(vnic->rss_hash_key, bp->vnic_info[0].rss_hash_key, @@ -3414,8 +4126,14 @@ void bnxt_set_tpa_flags(struct bnxt *bp) return; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39) +#ifdef HAVE_NETIF_F_GRO_HW else if (bp->dev->features & NETIF_F_GRO_HW) +#else + if ((bp->dev->features & NETIF_F_GRO) && BNXT_SUPPORTS_TPA(bp)) +#endif bp->flags |= BNXT_FLAG_GRO; +#endif } /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must @@ -3423,7 +4141,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp) */ void bnxt_set_ring_params(struct bnxt *bp) { - u32 ring_size, rx_size, rx_space; + u32 ring_size, rx_size, rx_space, max_rx_cmpl; u32 agg_factor = 0, agg_ring_size = 0; /* 8 for CRC and VLAN */ @@ -3479,7 +4197,15 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + max_rx_cmpl = bp->rx_ring_size; + /* MAX TPA needs to be added because TPA_START completions are + * immediately recycled, so the TPA completions are not bound by + * the RX ring size. + */ + if (bp->flags & BNXT_FLAG_TPA) + max_rx_cmpl += bp->max_tpa; + /* RX and TPA completions are 32-byte, all others are 16-byte */ + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; bp->cp_ring_size = ring_size; bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); @@ -3499,18 +4225,26 @@ void bnxt_set_ring_params(struct bnxt *bp) int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { if (page_mode) { +#ifdef BNXT_RX_PAGE_MODE_SUPPORT if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) return -EOPNOTSUPP; +#ifdef HAVE_MIN_MTU bp->dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); +#endif bp->flags &= ~BNXT_FLAG_AGG_RINGS; bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_BIDIRECTIONAL; bp->rx_skb_func = bnxt_rx_page_skb; /* Disable LRO or GRO_HW */ netdev_update_features(bp->dev); +#else + return -EOPNOTSUPP; +#endif } else { +#ifdef HAVE_MIN_MTU bp->dev->max_mtu = bp->max_mtu; +#endif bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; @@ -3543,12 +4277,11 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp) } if (vnic->rss_table) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, + dma_free_coherent(&pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); vnic->rss_table = NULL; } - vnic->rss_hash_key = NULL; vnic->flags = 0; } @@ -3608,7 +4341,13 @@ vnic_skip_grps: continue; /* Allocate rss table and hash key */ - vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); + if (bp->flags & BNXT_FLAG_CHIP_P5) + size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&pdev->dev, + vnic->rss_table_size, &vnic->rss_table_dma_addr, GFP_KERNEL); if (!vnic->rss_table) { @@ -3616,8 +4355,6 @@ vnic_skip_grps: goto out; } - size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); - vnic->rss_hash_key = ((void *)vnic->rss_table) + size; vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; } @@ -3629,142 +4366,218 @@ out: static void bnxt_free_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; + struct hlist_node __maybe_unused *dummy; + struct bnxt_hwrm_wait_token *token; - if (bp->hwrm_cmd_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, - bp->hwrm_cmd_resp_dma_addr); - bp->hwrm_cmd_resp_addr = NULL; - } + dma_pool_destroy(bp->hwrm_dma_pool); + bp->hwrm_dma_pool = NULL; - if (bp->hwrm_cmd_kong_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, - bp->hwrm_cmd_kong_resp_addr, - bp->hwrm_cmd_kong_resp_dma_addr); - bp->hwrm_cmd_kong_resp_addr = NULL; - } -} - -static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) -{ - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_cmd_kong_resp_addr) - return 0; - - bp->hwrm_cmd_kong_resp_addr = - dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_kong_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_kong_resp_addr) - return -ENOMEM; - - return 0; + rcu_read_lock(); + __hlist_for_each_entry_rcu(token, dummy, &bp->hwrm_pending_list, node) + WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); + rcu_read_unlock(); } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - - bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_resp_addr) + bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, + BNXT_HWRM_DMA_SIZE, + BNXT_HWRM_DMA_ALIGN, 0); + if (!bp->hwrm_dma_pool) return -ENOMEM; + INIT_HLIST_HEAD(&bp->hwrm_pending_list); + return 0; } -static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) +static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) { - if (bp->hwrm_short_cmd_req_addr) { - struct pci_dev *pdev = bp->pdev; - - dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - bp->hwrm_short_cmd_req_addr, - bp->hwrm_short_cmd_req_dma_addr); - bp->hwrm_short_cmd_req_addr = NULL; + kfree(stats->hw_masks); + stats->hw_masks = NULL; + kfree(stats->sw_stats); + stats->sw_stats = NULL; + if (stats->hw_stats) { + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + stats->hw_stats = NULL; } } -static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) +static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, + bool alloc_masks) { - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_short_cmd_req_addr) - return 0; - - bp->hwrm_short_cmd_req_addr = - dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - &bp->hwrm_short_cmd_req_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_short_cmd_req_addr) + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, + &stats->hw_stats_map, GFP_KERNEL); + if (!stats->hw_stats) return -ENOMEM; + memset(stats->hw_stats, 0, stats->len); + + stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); + if (!stats->sw_stats) + goto stats_mem_err; + + if (alloc_masks) { + stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); + if (!stats->hw_masks) + goto stats_mem_err; + } return 0; + +stats_mem_err: + bnxt_free_stats_mem(bp, stats); + return -ENOMEM; +} + +static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = mask; +} + +static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); +} + +static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, + struct bnxt_stats_mem *stats) +{ + struct hwrm_func_qstats_ext_output *resp; + struct hwrm_func_qstats_ext_input *req; + __le64 *hw_masks; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || + !(bp->flags & BNXT_FLAG_CHIP_P5)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); + +static void bnxt_init_stats(struct bnxt *bp) +{ + struct bnxt_napi *bnapi = bp->bnapi[0]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + __le64 *rx_stats, *tx_stats; + int rc, rx_count, tx_count; + u64 *rx_masks, *tx_masks; + u64 mask; + u8 flags; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + rc = bnxt_hwrm_func_qstat_ext(bp, stats); + if (rc) { + if (bp->flags & BNXT_FLAG_CHIP_P5) + mask = (1ULL << 48) - 1; + else + mask = -1ULL; + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + stats = &bp->port_stats; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats) / 8; + tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_count = sizeof(struct tx_port_stats) / 8; + + flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); + bnxt_hwrm_port_qstats(bp, 0); + } + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + stats = &bp->rx_port_stats_ext; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats_ext) / 8; + stats = &bp->tx_port_stats_ext; + tx_stats = stats->hw_stats; + tx_masks = stats->hw_masks; + tx_count = sizeof(struct tx_port_stats_ext) / 8; + + flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats_ext(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + if (tx_stats) + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + if (tx_stats) + bnxt_copy_hw_masks(tx_masks, tx_stats, + tx_count); + bnxt_hwrm_port_qstats_ext(bp, 0); + } + } } static void bnxt_free_port_stats(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - bp->flags &= ~BNXT_FLAG_PORT_STATS; bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; - if (bp->hw_rx_port_stats) { - dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, - bp->hw_rx_port_stats, - bp->hw_rx_port_stats_map); - bp->hw_rx_port_stats = NULL; - } - - if (bp->hw_tx_port_stats_ext) { - dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), - bp->hw_tx_port_stats_ext, - bp->hw_tx_port_stats_ext_map); - bp->hw_tx_port_stats_ext = NULL; - } - - if (bp->hw_rx_port_stats_ext) { - dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), - bp->hw_rx_port_stats_ext, - bp->hw_rx_port_stats_ext_map); - bp->hw_rx_port_stats_ext = NULL; - } - - if (bp->hw_pcie_stats) { - dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), - bp->hw_pcie_stats, bp->hw_pcie_stats_map); - bp->hw_pcie_stats = NULL; - } + bnxt_free_stats_mem(bp, &bp->port_stats); + bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); + bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); } static void bnxt_free_ring_stats(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - int size, i; + int i; if (!bp->bnapi) return; - size = bp->hw_ring_stats_size; - for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - if (cpr->hw_stats) { - dma_free_coherent(&pdev->dev, size, cpr->hw_stats, - cpr->hw_stats_map); - cpr->hw_stats = NULL; - } + bnxt_free_stats_mem(bp, &cpr->stats); } } static int bnxt_alloc_stats(struct bnxt *bp) { u32 size, i; - struct pci_dev *pdev = bp->pdev; + int rc; size = bp->hw_ring_stats_size; @@ -3772,11 +4585,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, - &cpr->hw_stats_map, - GFP_KERNEL); - if (!cpr->hw_stats) - return -ENOMEM; + cpr->stats.len = size; + rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); + if (rc) + return rc; cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } @@ -3784,22 +4596,17 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) return 0; - if (bp->hw_rx_port_stats) + if (!BNXT_ASIC(bp) && !(bp->flags & BNXT_FLAG_CHIP_SR2)) + return 0; + + if (bp->port_stats.hw_stats) goto alloc_ext_stats; - bp->hw_port_stats_size = sizeof(struct rx_port_stats) + - sizeof(struct tx_port_stats) + 1024; + bp->port_stats.len = BNXT_PORT_STATS_SIZE; + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); + if (rc) + return rc; - bp->hw_rx_port_stats = - dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, - &bp->hw_rx_port_stats_map, - GFP_KERNEL); - if (!bp->hw_rx_port_stats) - return -ENOMEM; - - bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; - bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + - sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; alloc_ext_stats: @@ -3808,41 +4615,28 @@ alloc_ext_stats: if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) return 0; - if (bp->hw_rx_port_stats_ext) + if (bp->rx_port_stats_ext.hw_stats) goto alloc_tx_ext_stats; - bp->hw_rx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), - &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); - if (!bp->hw_rx_port_stats_ext) + bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) return 0; alloc_tx_ext_stats: - if (bp->hw_tx_port_stats_ext) - goto alloc_pcie_stats; + if (bp->tx_port_stats_ext.hw_stats) + return 0; if (bp->hwrm_spec_code >= 0x10902 || (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { - bp->hw_tx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, - sizeof(struct tx_port_stats_ext), - &bp->hw_tx_port_stats_ext_map, - GFP_KERNEL); + bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) + return 0; } bp->flags |= BNXT_FLAG_PORT_STATS_EXT; - -alloc_pcie_stats: - if (bp->hw_pcie_stats || - !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) - return 0; - - bp->hw_pcie_stats = - dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), - &bp->hw_pcie_stats_map, GFP_KERNEL); - if (!bp->hw_pcie_stats) - return 0; - - bp->flags |= BNXT_FLAG_PCIE_STATS; return 0; } @@ -3881,9 +4675,8 @@ static void bnxt_clear_ring_indices(struct bnxt *bp) } } -static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) +static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) { -#ifdef CONFIG_RFS_ACCEL int i; /* Under rtnl_lock and all our NAPIs have been disabled. It's @@ -3891,46 +4684,77 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) */ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { struct hlist_head *head; - struct hlist_node *tmp; + struct hlist_node *tmp, __maybe_unused *nxt; struct bnxt_ntuple_filter *fltr; head = &bp->ntp_fltr_hash_tbl[i]; - hlist_for_each_entry_safe(fltr, tmp, head, hash) { - hlist_del(&fltr->hash); + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { + if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) + continue; + hlist_del(&fltr->base.hash); + clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); + bp->ntp_fltr_count--; kfree(fltr); } } - if (irq_reinit) { - kfree(bp->ntp_fltr_bmap); - bp->ntp_fltr_bmap = NULL; - } + if (!all) + return; + + kfree(bp->ntp_fltr_bmap); + bp->ntp_fltr_bmap = NULL; bp->ntp_fltr_count = 0; -#endif } static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) { -#ifdef CONFIG_RFS_ACCEL int i, rc = 0; - if (!(bp->flags & BNXT_FLAG_RFS)) + if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) return 0; for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); bp->ntp_fltr_count = 0; - bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), - sizeof(long), + bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(bp->max_fltr), sizeof(long), GFP_KERNEL); if (!bp->ntp_fltr_bmap) rc = -ENOMEM; return rc; -#else - return 0; -#endif +} + +static void bnxt_free_l2_filters(struct bnxt *bp, bool all) +{ + int i; + + for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp, __maybe_unused *nxt; + struct bnxt_l2_filter *fltr; + + head = &bp->l2_fltr_hash_tbl[i]; + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { + if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) + continue; + hlist_del(&fltr->base.hash); + if (fltr->base.flags) { + clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); + bp->ntp_fltr_count--; + } + kfree(fltr); + } + } +} + +static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) +{ + int i; + + for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) + INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); + prandom_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); } static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) @@ -3939,7 +4763,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_tx_rings(bp); bnxt_free_rx_rings(bp); bnxt_free_cp_rings(bp); - bnxt_free_ntp_fltrs(bp, irq_re_init); + bnxt_free_ntp_fltrs(bp, false); + bnxt_free_l2_filters(bp, false); if (irq_re_init) { bnxt_free_ring_stats(bp); bnxt_free_ring_grps(bp); @@ -4045,6 +4870,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) rc = bnxt_alloc_stats(bp); if (rc) goto alloc_mem_err; + bnxt_init_stats(bp); rc = bnxt_alloc_ntp_fltrs(bp); if (rc) @@ -4053,6 +4879,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) rc = bnxt_alloc_vnics(bp); if (rc) goto alloc_mem_err; + } bnxt_init_ring_struct(bp); @@ -4134,277 +4961,62 @@ static void bnxt_enable_int(struct bnxt *bp) } } -void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, - u16 cmpl_ring, u16 target_id) +int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, + bool async_only) { - struct input *req = request; - - req->req_type = cpu_to_le16(req_type); - req->cmpl_ring = cpu_to_le16(cmpl_ring); - req->target_id = cpu_to_le16(target_id); - if (bnxt_kong_hwrm_message(bp, req)) - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - else - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); -} - -static int bnxt_hwrm_to_stderr(u32 hwrm_err) -{ - switch (hwrm_err) { - case HWRM_ERR_CODE_SUCCESS: - return 0; - case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: - return -EACCES; - case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: - return -ENOSPC; - case HWRM_ERR_CODE_INVALID_PARAMS: - case HWRM_ERR_CODE_INVALID_FLAGS: - case HWRM_ERR_CODE_INVALID_ENABLES: - case HWRM_ERR_CODE_UNSUPPORTED_TLV: - case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: - return -EINVAL; - case HWRM_ERR_CODE_NO_BUFFER: - return -ENOMEM; - case HWRM_ERR_CODE_HOT_RESET_PROGRESS: - return -EAGAIN; - case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: - return -EOPNOTSUPP; - default: - return -EIO; - } -} - -static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, - int timeout, bool silent) -{ - int i, intr_process, rc, tmo_count; - struct input *req = msg; - u32 *data = msg; - __le32 *resp_len; - u8 *valid; - u16 cp_ring_id, len = 0; - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; - struct hwrm_short_input short_input = {0}; - u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; - u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; - u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; - u16 dst = BNXT_HWRM_CHNL_CHIMP; - - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) - return -EBUSY; - - if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { - if (msg_len > bp->hwrm_max_ext_req_len || - !bp->hwrm_short_cmd_req_addr) - return -EINVAL; - } - - if (bnxt_hwrm_kong_chnl(bp, req)) { - dst = BNXT_HWRM_CHNL_KONG; - bar_offset = BNXT_GRCPF_REG_KONG_COMM; - doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; - resp = bp->hwrm_cmd_kong_resp_addr; - resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; - } - - memset(resp, 0, PAGE_SIZE); - cp_ring_id = le16_to_cpu(req->cmpl_ring); - intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; - - req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); - /* currently supports only one outstanding message */ - if (intr_process) - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - msg_len > BNXT_HWRM_MAX_REQ_LEN) { - void *short_cmd_req = bp->hwrm_short_cmd_req_addr; - u16 max_msg_len; - - /* Set boundary for maximum extended request length for short - * cmd format. If passed up from device use the max supported - * internal req length. - */ - max_msg_len = bp->hwrm_max_ext_req_len; - - memcpy(short_cmd_req, req, msg_len); - if (msg_len < max_msg_len) - memset(short_cmd_req + msg_len, 0, - max_msg_len - msg_len); - - short_input.req_type = req->req_type; - short_input.signature = - cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); - short_input.size = cpu_to_le16(msg_len); - short_input.req_addr = - cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); - - data = (u32 *)&short_input; - msg_len = sizeof(short_input); - - /* Sync memory write before updating doorbell */ - wmb(); - - max_req_len = BNXT_HWRM_SHORT_REQ_LEN; - } - - /* Write request msg to hwrm channel */ - __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); - - for (i = msg_len; i < max_req_len; i += 4) - writel(0, bp->bar0 + bar_offset + i); - - /* Ring channel doorbell */ - writel(1, bp->bar0 + doorbell_offset); - - if (!pci_is_enabled(bp->pdev)) - return 0; - - if (!timeout) - timeout = DFLT_HWRM_CMD_TIMEOUT; - /* convert timeout to usec */ - timeout *= 1000; - - i = 0; - /* Short timeout for the first few iterations: - * number of loops = number of loops for short timeout + - * number of loops for standard timeout. - */ - tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; - timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; - tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); - - if (intr_process) { - u16 seq_id = bp->hwrm_intr_seq_id; - - /* Wait until hwrm response cmpl interrupt is processed */ - while (bp->hwrm_intr_seq_id != (u16)~seq_id && - i++ < tmo_count) { - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - else - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - - if (bp->hwrm_intr_seq_id != (u16)~seq_id) { - if (!silent) - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - le16_to_cpu(req->req_type)); - return -EBUSY; - } - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; - valid = resp_addr + len - 1; - } else { - int j; - - /* Check if response len is updated */ - for (i = 0; i < tmo_count; i++) { - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> - HWRM_RESP_LEN_SFT; - if (len) - break; - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - else - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - - if (i >= tmo_count) { - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len); - return -EBUSY; - } - - /* Last byte of resp contains valid bit */ - valid = resp_addr + len - 1; - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { - /* make sure we read from updated DMA memory */ - dma_rmb(); - if (*valid) - break; - usleep_range(1, 5); - } - - if (j >= HWRM_VALID_BIT_DELAY_USEC) { - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len, - *valid); - return -EBUSY; - } - } - - /* Zero valid bit for compatibility. Valid bit in an older spec - * may become a new field in a newer spec. We must make sure that - * a new field not implemented by old spec will read zero. - */ - *valid = 0; - rc = le16_to_cpu(resp->error_code); - if (rc && !silent) - netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - le16_to_cpu(resp->req_type), - le16_to_cpu(resp->seq_id), rc); - return bnxt_hwrm_to_stderr(rc); -} - -int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); -} - -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); -} - -int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, msg, msg_len, timeout); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - -int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - -int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, - int bmap_size) -{ - struct hwrm_func_drv_rgtr_input req = {0}; DECLARE_BITMAP(async_events_bmap, 256); u32 *events = (u32 *)async_events_bmap; - int i; + struct hwrm_func_drv_rgtr_output *resp; + struct hwrm_func_drv_rgtr_input *req; + u32 flags = 0; + int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); + if (rc) + return rc; - req.enables = - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | + FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; + req->flags = cpu_to_le32(flags); + req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); + req->ver_maj_8b = DRV_VER_MAJ; + req->ver_min_8b = DRV_VER_MIN; + req->ver_upd_8b = DRV_VER_UPD; + + if (BNXT_PF(bp)) { + u32 data[8]; + int i; + + memset(data, 0, sizeof(data)); + for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { + u16 cmd = bnxt_vf_req_snif[i]; + unsigned int bit, idx; + + if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) && + (cmd == HWRM_PORT_PHY_QCFG)) + continue; + + idx = cmd / 32; + bit = cmd % 32; + data[idx] |= 1 << bit; + } + + for (i = 0; i < 8; i++) + req->vf_req_fwd[i] = cpu_to_le32(data[i]); + + req->enables |= + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); + } + + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req->flags |= cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); memset(async_events_bmap, 0, sizeof(async_events_bmap)); for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { @@ -4421,102 +5033,65 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, __set_bit(i, async_events_bmap); } } - for (i = 0; i < 8; i++) - req.async_event_fwd[i] |= cpu_to_le32(events[i]); + req->async_event_fwd[i] |= cpu_to_le32(events[i]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -} + if (async_only) + req->enables = + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); -static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) -{ - struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_rgtr_input req = {0}; - u32 flags; - int rc; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); - - req.enables = - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | - FUNC_DRV_RGTR_REQ_ENABLES_VER); - - req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; - if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) - flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; - if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) - flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT; - req.flags = cpu_to_le32(flags); - req.ver_maj_8b = DRV_VER_MAJ; - req.ver_min_8b = DRV_VER_MIN; - req.ver_upd_8b = DRV_VER_UPD; - req.ver_maj = cpu_to_le16(DRV_VER_MAJ); - req.ver_min = cpu_to_le16(DRV_VER_MIN); - req.ver_upd = cpu_to_le16(DRV_VER_UPD); - - if (BNXT_PF(bp)) { - u32 data[8]; - int i; - - memset(data, 0, sizeof(data)); - for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { - u16 cmd = bnxt_vf_req_snif[i]; - unsigned int bit, idx; - - idx = cmd / 32; - bit = cmd % 32; - data[idx] |= 1 << bit; - } - - for (i = 0; i < 8; i++) - req.vf_req_fwd[i] = cpu_to_le32(data[i]); - - req.enables |= - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); + if (resp->flags & + cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; } - - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.flags |= cpu_to_le32( - FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc && (resp->flags & - cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))) - bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) { - struct hwrm_func_drv_unrgtr_input req = {0}; + struct hwrm_func_drv_unrgtr_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); + if (rc) + return rc; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_free_input req = {0}; + struct hwrm_tunnel_dst_port_free_input *req; + u32 rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); - req.tunnel_type = tunnel_type; + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); + if (rc) + return rc; + + req->tunnel_type = tunnel_type; switch (tunnel_type) { case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: - req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; + req->tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; break; case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: - req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; + req->tunnel_dst_port_id = bp->nge_fw_dst_port_id; break; default: break; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", rc); @@ -4526,17 +5101,19 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_alloc_input req = {0}; - struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_tunnel_dst_port_alloc_output *resp; + struct hwrm_tunnel_dst_port_alloc_input *req; + u32 rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); + if (rc) + return rc; - req.tunnel_type = tunnel_type; - req.tunnel_dst_port_val = port; + req->tunnel_type = tunnel_type; + req->tunnel_dst_port_val = port; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", rc); @@ -4555,39 +5132,286 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, } err_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) { - struct hwrm_cfa_l2_set_rx_mask_input req = {0}; + struct hwrm_cfa_l2_set_rx_mask_input *req; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); + if (rc) + return rc; - req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); - req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); - req.mask = cpu_to_le32(vnic->rx_mask); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + req->mask = cpu_to_le32(vnic->rx_mask); + return hwrm_req_send_silent(bp, req); +} + +void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) +{ + if (!atomic_dec_and_test(&fltr->refcnt)) + return; + spin_lock_bh(&bp->ntp_fltr_lock); + if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { + spin_unlock_bh(&bp->ntp_fltr_lock); + return; + } + hlist_del_rcu(&fltr->base.hash); + if (fltr->base.flags) { + clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); + bp->ntp_fltr_count--; + } + spin_unlock_bh(&bp->ntp_fltr_lock); + kfree_rcu(fltr, base.rcu); +} + +static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u32 idx) +{ + struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; + struct hlist_node __maybe_unused *node; + struct bnxt_l2_filter *fltr; + + __hlist_for_each_entry_rcu(fltr, node, head, base.hash) { + struct bnxt_l2_key *l2_key = &fltr->l2_key; + + if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && + l2_key->vlan == key->vlan) + return fltr; + } + return NULL; +} + +static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u32 idx) +{ + struct bnxt_l2_filter *fltr = NULL; + + rcu_read_lock(); + fltr = __bnxt_lookup_l2_filter(bp, key, idx); + if (fltr) + atomic_inc(&fltr->refcnt); + rcu_read_unlock(); + return fltr; } #ifdef CONFIG_RFS_ACCEL -static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, - struct bnxt_ntuple_filter *fltr) +static struct bnxt_l2_filter *bnxt_lookup_l2_filter_from_key(struct bnxt *bp, + struct bnxt_l2_key *key) { - struct hwrm_cfa_ntuple_filter_free_input req = {0}; + struct bnxt_l2_filter *fltr; + u32 idx; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); - req.ntuple_filter_id = fltr->filter_id; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & + BNXT_L2_FLTR_HASH_MASK; + fltr = bnxt_lookup_l2_filter(bp, key, idx); + return fltr; +} +#endif + +static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, + struct bnxt_l2_key *key, u32 idx) +{ + struct hlist_head *head; + + ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); + fltr->l2_key.vlan = key->vlan; + fltr->base.type = BNXT_FLTR_TYPE_L2; + if (fltr->base.flags) { + int bit_id; + + bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, + bp->max_fltr, 0); + if (bit_id < 0) + return -ENOMEM; + fltr->base.sw_id = (u16)bit_id; + bp->ntp_fltr_count++; + } + head = &bp->l2_fltr_hash_tbl[idx]; + hlist_add_head_rcu(&fltr->base.hash, head); + set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); + atomic_set(&fltr->refcnt, 1); + return 0; +} + +static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + gfp_t gfp) +{ + struct bnxt_l2_filter *fltr; + u32 idx; + int rc; + + idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & + BNXT_L2_FLTR_HASH_MASK; + fltr = bnxt_lookup_l2_filter(bp, key, idx); + if (fltr) + return fltr; + + fltr = kzalloc(sizeof(*fltr), gfp); + if (!fltr) + return ERR_PTR(-ENOMEM); + spin_lock_bh(&bp->ntp_fltr_lock); + rc = bnxt_init_l2_filter(bp, fltr, key, idx); + spin_unlock_bh(&bp->ntp_fltr_lock); + if (rc) { + bnxt_del_l2_filter(bp, fltr); + fltr = ERR_PTR(rc); + } + return fltr; +} + +struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u16 flags) +{ + struct bnxt_l2_filter *fltr; + u32 idx; + int rc; + + idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & + BNXT_L2_FLTR_HASH_MASK; + spin_lock_bh(&bp->ntp_fltr_lock); + fltr = __bnxt_lookup_l2_filter(bp, key, idx); + if (fltr) { + fltr = ERR_PTR(-EEXIST); + goto l2_filter_exit; + } + fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); + if (!fltr) { + fltr = ERR_PTR(-ENOMEM); + goto l2_filter_exit; + } + fltr->base.flags = flags; + rc = bnxt_init_l2_filter(bp, fltr, key, idx); + if (rc) { + spin_unlock_bh(&bp->ntp_fltr_lock); + bnxt_del_l2_filter(bp, fltr); + return ERR_PTR(rc); + } + +l2_filter_exit: + spin_unlock_bh(&bp->ntp_fltr_lock); + return fltr; +} + +static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) +{ + u16 fid = INVALID_HW_RING_ID; + struct bnxt_vf_info *vf; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) + fid = vf[vf_idx].fw_fid; + rcu_read_unlock(); + return fid; +} + +int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_free_input *req; + u16 target_id = 0xffff; + int rc; + + if (fltr->base.flags & BNXT_ACT_FUNC_DST) { + struct bnxt_pf_info *pf = &bp->pf; + + if (fltr->base.vf_idx >= pf->active_vfs) + return -EINVAL; + + target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); + if (target_id == INVALID_HW_RING_ID) + return -EINVAL; + } + + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + + req->target_id = cpu_to_le16(target_id); + req->l2_filter_id = fltr->base.filter_id; + return hwrm_req_send(bp, req); +} + +int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_alloc_output *resp; + struct hwrm_cfa_l2_filter_alloc_input *req; + u16 target_id = 0xffff; + int rc; + + if (fltr->base.flags & BNXT_ACT_FUNC_DST) { + struct bnxt_pf_info *pf = &bp->pf; + + if (fltr->base.vf_idx >= pf->active_vfs) + return -EINVAL; + + target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); + } + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); + if (rc) + return rc; + + req->target_id = cpu_to_le16(target_id); + req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + req->flags |= + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); + req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); + req->enables = + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); + ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); + eth_broadcast_addr(req->l2_addr_mask); + if (fltr->l2_key.vlan) { + req->enables |= + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); + req->num_vlans = 1; + req->l2_ivlan = fltr->l2_key.vlan; + req->l2_ivlan_mask = 0xfff; + } + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + fltr->base.filter_id = resp->l2_filter_id; + set_bit(BNXT_FLTR_VALID, &fltr->base.state); + } + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) +{ + struct hwrm_cfa_ntuple_filter_free_input *req; + int rc; + + set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); + + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); + if (rc) + return rc; + + req->ntuple_filter_id = fltr->base.filter_id; + return hwrm_req_send(bp, req); } #define BNXT_NTP_FLTR_FLAGS \ (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ - CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ @@ -4603,107 +5427,128 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, #define BNXT_NTP_TUNNEL_FLTR_FLAG \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE -static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, - struct bnxt_ntuple_filter *fltr) +void bnxt_fill_ipv6_mask(__be32 mask[4]) +{ + int i; + + for (i = 0; i < 4; i++) + mask[i] = cpu_to_be32(~0); +} + +int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) { - struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; struct hwrm_cfa_ntuple_filter_alloc_output *resp; + struct hwrm_cfa_ntuple_filter_alloc_input *req; struct flow_keys *keys = &fltr->fkeys; + struct bnxt_l2_filter *l2_fltr; struct bnxt_vnic_info *vnic; - u32 dst_ena = 0; - int rc = 0; + u32 flags = 0; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); - req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); + if (rc) + return rc; - if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) { - dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; - req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq); - vnic = &bp->vnic_info[0]; + l2_fltr = fltr->l2_fltr; + req->l2_filter_id = l2_fltr->base.filter_id; + + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req->dst_id = cpu_to_le16(fltr->base.rxq); } else { - vnic = &bp->vnic_info[fltr->rxq + 1]; + vnic = &bp->vnic_info[fltr->base.rxq + 1]; + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); - req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena); + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); - req.ethertype = htons(ETH_P_IP); - memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); - req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.ip_protocol = keys->basic.ip_proto; + req->ethertype = htons(ETH_P_IP); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; +#ifdef NEW_FLOW_KEYS + req->ip_protocol = keys->basic.ip_proto; if (keys->basic.n_proto == htons(ETH_P_IPV6)) { - int i; - - req.ethertype = htons(ETH_P_IPV6); - req.ip_addr_type = + req->ethertype = htons(ETH_P_IPV6); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; - *(struct in6_addr *)&req.src_ipaddr[0] = - keys->addrs.v6addrs.src; - *(struct in6_addr *)&req.dst_ipaddr[0] = - keys->addrs.v6addrs.dst; - for (i = 0; i < 4; i++) { - req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); - req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { + *(struct in6_addr *)&req->src_ipaddr[0] = + keys->addrs.v6addrs.src; + bnxt_fill_ipv6_mask(req->src_ipaddr_mask); + } + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { + *(struct in6_addr *)&req->dst_ipaddr[0] = + keys->addrs.v6addrs.dst; + bnxt_fill_ipv6_mask(req->dst_ipaddr_mask); } } else { - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { + req->src_ipaddr[0] = keys->addrs.v4addrs.src; + req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + } + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { + req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + } } +#ifdef HAVE_NEW_FLOW_DISSECTOR if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { - req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); - req.tunnel_type = + req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req->tunnel_type = CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; } +#endif - req.src_port = keys->ports.src; - req.src_port_mask = cpu_to_be16(0xffff); - req.dst_port = keys->ports.dst; - req.dst_port_mask = cpu_to_be16(0xffff); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); - fltr->filter_id = resp->ntuple_filter_id; + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { + req->src_port = keys->ports.src; + req->src_port_mask = cpu_to_be16(0xffff); } - mutex_unlock(&bp->hwrm_cmd_lock); + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { + req->dst_port = keys->ports.dst; + req->dst_port_mask = cpu_to_be16(0xffff); + } +#else + req->ip_protocol = keys->ip_proto; + + req->src_ipaddr[0] = keys->src; + req->src_ipaddr_mask[0] = 0xffffffff; + req->dst_ipaddr[0] = keys->dst; + req->dst_ipaddr_mask[0] = 0xffffffff; + + req->src_port = keys->port16[0]; + req->src_port_mask = 0xffff; + req->dst_port = keys->port16[1]; + req->dst_port_mask = 0xffff; +#endif + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + fltr->base.filter_id = resp->ntuple_filter_id; + } + hwrm_req_drop(bp, req); return rc; } -#endif static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, u8 *mac_addr) { - u32 rc = 0; - struct hwrm_cfa_l2_filter_alloc_input req = {0}; - struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_l2_filter *fltr; + struct bnxt_l2_key key; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); - req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); - if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) - req.flags |= - cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); - req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); - req.enables = - cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | - CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | - CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); - memcpy(req.l2_addr, mac_addr, ETH_ALEN); - req.l2_addr_mask[0] = 0xff; - req.l2_addr_mask[1] = 0xff; - req.l2_addr_mask[2] = 0xff; - req.l2_addr_mask[3] = 0xff; - req.l2_addr_mask[4] = 0xff; - req.l2_addr_mask[5] = 0xff; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) - bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = - resp->l2_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + ether_addr_copy(key.dst_mac_addr, mac_addr); + key.vlan = 0; + fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); + if (IS_ERR(fltr)) + return PTR_ERR(fltr); + fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; + rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); + if (rc) + bnxt_del_l2_filter(bp, fltr); + else + bp->vnic_info[vnic_id].l2_filters[idx] = fltr; return rc; } @@ -4713,24 +5558,17 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) int rc = 0; /* Any associated ntuple filters will also be cleared by firmware. */ - mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < num_of_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; for (j = 0; j < vnic->uc_filter_count; j++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; + struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; - bnxt_hwrm_cmd_hdr_init(bp, &req, - HWRM_CFA_L2_FILTER_FREE, -1, -1); - - req.l2_filter_id = vnic->fw_l2_filter_id[j]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + bnxt_hwrm_l2_filter_free(bp, fltr); + bnxt_del_l2_filter(bp, fltr); } vnic->uc_filter_count = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -4739,12 +5577,15 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; - struct hwrm_vnic_tpa_cfg_input req = {0}; + struct hwrm_vnic_tpa_cfg_input *req; + int rc; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); + if (rc) + return rc; if (tpa_flags) { u16 mss = bp->dev->mtu - 40; @@ -4758,9 +5599,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) if (tpa_flags & BNXT_FLAG_GRO) flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; - req.flags = cpu_to_le32(flags); + req->flags = cpu_to_le32(flags); - req.enables = + req->enables = cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); @@ -4784,14 +5625,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) } else { segs = ilog2(nsegs); } - req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(max_aggs); + req->max_agg_segs = cpu_to_le16(segs); + req->max_aggs = cpu_to_le16(max_aggs); - req.min_agg_len = cpu_to_le32(512); + req->min_agg_len = cpu_to_le32(512); } - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) @@ -4828,118 +5669,201 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) } } +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) +{ + int entries; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; + else + entries = HW_HASH_INDEX_SIZE; + + bp->rss_indir_tbl_entries = entries; + bp->rss_indir_tbl = kcalloc(entries, sizeof(*bp->rss_indir_tbl), + GFP_KERNEL); + if (!bp->rss_indir_tbl) + return -ENOMEM; + return 0; +} + +static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) +{ + u16 max_rings, max_entries, pad, i; + + if (!bp->rx_nr_rings) + return; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + max_rings = bp->rx_nr_rings - 1; + else + max_rings = bp->rx_nr_rings; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + max_entries = (max_rings + BNXT_RSS_TABLE_ENTRIES_P5 - 1) & + ~(BNXT_RSS_TABLE_ENTRIES_P5 - 1); + else + max_entries = HW_HASH_INDEX_SIZE; + + for (i = 0; i < max_entries; i++) + bp->rss_indir_tbl[i] = i % max_rings; + + pad = bp->rss_indir_tbl_entries - max_entries; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); +} + +static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); + u16 i, j; + + /* Fill the RSS indirection table with ring group ids */ + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { + if (!no_rss) + j = bp->rss_indir_tbl[i]; + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); + } +} + +static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + __le16 *ring_tbl = vnic->rss_table; + struct bnxt_rx_ring_info *rxr; + u16 tbl_size, i; + + tbl_size = (bp->rx_nr_rings + BNXT_RSS_TABLE_ENTRIES_P5 - 1) & + ~(BNXT_RSS_TABLE_ENTRIES_P5 - 1); + + for (i = 0; i < tbl_size; i++) { + u16 ring_id, j; + + j = bp->rss_indir_tbl[i]; + rxr = &bp->rx_ring[j]; + + ring_id = rxr->rx_ring_struct.fw_ring_id; + *ring_tbl++ = cpu_to_le16(ring_id); + ring_id = bnxt_cp_ring_for_rx(bp, rxr); + *ring_tbl++ = cpu_to_le16(ring_id); + } +} + +static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + __bnxt_fill_hw_rss_tbl_p5(bp, vnic); + else + __bnxt_fill_hw_rss_tbl(bp, vnic); +} + +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 2; + return 1; +} + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { - u32 i, j, max_rings; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; + int rc; if ((bp->flags & BNXT_FLAG_CHIP_P5) || - vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) + (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + if (set_rss) { - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - if (vnic->flags & BNXT_VNIC_RSS_FLAG) { - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - max_rings = bp->rx_nr_rings - 1; - else - max_rings = bp->rx_nr_rings; - } else { - max_rings = 1; - } - - /* Fill the RSS indirection table with ring group ids */ - for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { - if (j == max_rings) - j = 0; - vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); - } - - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); - req.hash_key_tbl_addr = + bnxt_fill_hw_rss_tbl(bp, vnic); + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; + dma_addr_t ring_tbl_map; + u32 i, nr_ctxs; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); - if (!set_rss) { - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return 0; - } - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); - req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); - nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); - for (i = 0, k = 0; i < nr_ctxs; i++) { - __le16 *ring_tbl = vnic->rss_table; - int rc; + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; - req.ring_table_pair_index = i; - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); - for (j = 0; j < 64; j++) { - u16 ring_id; + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) + return hwrm_req_send(bp, req); - ring_id = rxr->rx_ring_struct.fw_ring_id; - *ring_tbl++ = cpu_to_le16(ring_id); - ring_id = bnxt_cp_ring_for_rx(bp, rxr); - *ring_tbl++ = cpu_to_le16(ring_id); - rxr++; - k++; - if (k == max_rings) { - k = 0; - rxr = &bp->rx_ring[0]; - } - } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + bnxt_fill_hw_rss_tbl(bp, vnic); + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + ring_tbl_map = vnic->rss_table_dma_addr; + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + + hwrm_req_hold(bp, req); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { + req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req->ring_table_pair_index = i; + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = hwrm_req_send(bp, req); if (rc) - return rc; + goto exit; } - return 0; + +exit: + hwrm_req_drop(bp, req); + return rc; } static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_plcmodes_cfg_input req = {0}; + struct hwrm_vnic_plcmodes_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); - req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); /* thresholds not implemented in firmware yet */ - req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); - req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return hwrm_req_send(bp, req); } static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { - struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; + struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); - req.rss_cos_lb_ctx_id = + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) + return; + + req->rss_cos_lb_ctx_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } @@ -4960,20 +5884,20 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; int rc; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = - bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, - -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -4987,47 +5911,50 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { - unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_cfg_input req = {0}; + struct hwrm_vnic_cfg_input *req; + unsigned int ring = 0, grp_idx; u16 def_vlan = 0; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; - req.default_rx_ring_id = + req->default_rx_ring_id = cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); - req.default_cmpl_ring_id = + req->default_cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); - req.enables = + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); goto vnic_mru; } - req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { - req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | - VNIC_CFG_REQ_ENABLES_MRU); + req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { - req.rss_rule = + req->rss_rule = cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | - VNIC_CFG_REQ_ENABLES_MRU); - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { - req.rss_rule = cpu_to_le16(0xffff); + req->rss_rule = cpu_to_le16(0xffff); } if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { - req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); + req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); } else { - req.cos_rule = cpu_to_le16(0xffff); + req->cos_rule = cpu_to_le16(0xffff); } if (vnic->flags & BNXT_VNIC_RSS_FLAG) @@ -5038,40 +5965,38 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; - req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); - req.lb_rule = cpu_to_le16(0xffff); + req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); + req->lb_rule = cpu_to_le16(0xffff); vnic_mru: - req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN); + req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV if (BNXT_VF(bp)) def_vlan = bp->vf.vlan; #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); + req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } -static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) +static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) { - u32 rc = 0; - if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { - struct hwrm_vnic_free_input req = {0}; + struct hwrm_vnic_free_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); - req.vnic_id = + if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) + return; + + req->vnic_id = cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } - return rc; } static void bnxt_hwrm_vnic_free(struct bnxt *bp) @@ -5086,11 +6011,15 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, unsigned int start_rx_ring_idx, unsigned int nr_rings) { - int rc = 0; unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; - struct hwrm_vnic_alloc_input req = {0}; - struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_alloc_output *resp; + struct hwrm_vnic_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) goto vnic_no_ring_grps; @@ -5109,23 +6038,22 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; + if (vnic_id == 0) - req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) { - struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_vnic_qcaps_input req = {0}; + struct hwrm_vnic_qcaps_output *resp; + struct hwrm_vnic_qcaps_input *req; int rc; bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); @@ -5133,9 +6061,12 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10600) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u32 flags = le32_to_cpu(resp->flags); @@ -5145,100 +6076,105 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || + (BNXT_CHIP_P5_THOR(bp) && + !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) + bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); if (bp->max_tpa_v2) bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats_ext); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { + struct hwrm_ring_grp_alloc_output *resp; + struct hwrm_ring_grp_alloc_input *req; + int rc; u16 i; - u32 rc = 0; if (bp->flags & BNXT_FLAG_CHIP_P5) return 0; - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->rx_nr_rings; i++) { - struct hwrm_ring_grp_alloc_input req = {0}; - struct hwrm_ring_grp_alloc_output *resp = - bp->hwrm_cmd_resp_addr; unsigned int grp_idx = bp->rx_ring[i].bnapi->index; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); + req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); + req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); + req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); - req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); - req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); - req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); - req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); + rc = hwrm_req_send(bp, req); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); if (rc) break; bp->grp_info[grp_idx].fw_grp_id = le32_to_cpu(resp->ring_group_id); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) +static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) { + struct hwrm_ring_grp_free_input *req; u16 i; - u32 rc = 0; - struct hwrm_ring_grp_free_input req = {0}; if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) - return 0; + return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); + if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) + return; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) continue; - req.ring_group_id = + req->ring_group_id = cpu_to_le32(bp->grp_info[i].fw_grp_id); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; + hwrm_req_drop(bp, req); } static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, u32 map_index) { - int rc = 0, err = 0; - struct hwrm_ring_alloc_input req = {0}; - struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_ring_mem_info *rmem = &ring->ring_mem; struct bnxt_ring_grp_info *grp_info; + struct hwrm_ring_alloc_output *resp; + struct hwrm_ring_alloc_input *req; + int rc, err = 0; u16 ring_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); + if (rc) + goto exit; - req.enables = 0; + req->enables = 0; if (rmem->nr_pages > 1) { - req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); + req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); /* Page size is in log2 units */ - req.page_size = BNXT_PAGE_SHIFT; - req.page_tbl_depth = 1; + req->page_size = BNXT_PAGE_SHIFT; + req->page_tbl_depth = 1; } else { - req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); + req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); } - req.fbo = 0; + req->fbo = 0; /* Association of ring index with doorbell index and MSIX number */ - req.logical_id = cpu_to_le16(map_index); + req->logical_id = cpu_to_le16(map_index); switch (ring_type) { case HWRM_RING_ALLOC_TX: { @@ -5246,67 +6182,64 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, txr = container_of(ring, struct bnxt_tx_ring_info, tx_ring_struct); - req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); - req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.queue_id = cpu_to_le16(ring->queue_id); + req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); + req->length = cpu_to_le32(bp->tx_ring_mask + 1); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->queue_id = cpu_to_le16(ring->queue_id); break; } case HWRM_RING_ALLOC_RX: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; - req.length = cpu_to_le32(bp->rx_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bp->rx_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { u16 flags = 0; /* Association of rx ring with stats context */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( - RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); if (NET_IP_ALIGN == 2) flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; - req.flags = cpu_to_le16(flags); + req->flags = cpu_to_le16(flags); } break; case HWRM_RING_ALLOC_AGG: if (bp->flags & BNXT_FLAG_CHIP_P5) { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; /* Association of agg ring with rx ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); - req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( - RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | - RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); } else { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; } - req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { /* Association of cp ring with nq */ grp_info = &bp->grp_info[map_index]; - req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); - req.cq_handle = cpu_to_le64(ring->handle); - req.enables |= cpu_to_le32( - RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); + req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req->cq_handle = cpu_to_le64(ring->handle); + req->enables |= cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); } else if (bp->flags & BNXT_FLAG_USING_MSIX) { - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; } break; case HWRM_RING_ALLOC_NQ: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; break; default: netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", @@ -5314,12 +6247,12 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return -1; } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); err = le16_to_cpu(resp->error_code); ring_id = le16_to_cpu(resp->ring_id); - mutex_unlock(&bp->hwrm_cmd_lock); - + hwrm_req_drop(bp, req); +exit: if (rc || err) { netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", ring_type, rc, err); @@ -5334,33 +6267,60 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) int rc; if (BNXT_PF(bp)) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); } else { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); + } +} + +static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, + u32 ring_type) +{ + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_ring_mask = bp->tx_ring_mask; + break; + case HWRM_RING_ALLOC_RX: + db->db_ring_mask = bp->rx_ring_mask; + break; + case HWRM_RING_ALLOC_AGG: + db->db_ring_mask = bp->rx_agg_ring_mask; + break; + case HWRM_RING_ALLOC_CMPL: + case HWRM_RING_ALLOC_NQ: + db->db_ring_mask = bp->cp_ring_mask; + break; + } + if (bp->flags & BNXT_FLAG_CHIP_SR2) { + db->db_epoch_mask = db->db_ring_mask + 1; + db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); } - return rc; } static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, u32 map_idx, u32 xid) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - if (BNXT_PF(bp)) - db->doorbell = bp->bar1 + 0x10000; - else - db->doorbell = bp->bar1 + 0x4000; + int legacy_db_size = DB_PF_OFFSET_P5; + switch (ring_type) { case HWRM_RING_ALLOC_TX: db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; @@ -5377,6 +6337,14 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, break; } db->db_key64 |= (u64)xid << DBR_XID_SFT; + + if (bp->flags & BNXT_FLAG_CHIP_SR2) { + db->db_key64 |= DBR_VALID; + legacy_db_size = bp->legacy_db_size; + } else if (BNXT_VF(bp)) { + legacy_db_size = DB_VF_OFFSET_P5; + } + db->doorbell = bp->bar1 + legacy_db_size; } else { db->doorbell = bp->bar1 + map_idx * 0x80; switch (ring_type) { @@ -5392,6 +6360,47 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, break; } } + bnxt_set_db_mask(bp, db, ring_type); +} + +static void bnxt_set_push_db(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u32 map_idx, u32 xid) +{ + struct bnxt_db_info *db; + u32 offset; + u64 dpi; + + db = &txr->tx_push_db; + db->doorbell = NULL; + if (!(bp->flags & BNXT_FLAG_CHIP_P5) || !bp->db_base_wc) + return; + + switch (bp->tx_push_mode) { + case BNXT_PUSH_MODE_WCB: + dpi = (map_idx / DB_WCB_PER_PAGE) + 1; + offset = map_idx % DB_WCB_PER_PAGE; + if ((dpi * DB_WCB_PAGE_SIZE) > (bp->db_size - bp->db_size_nc)) + return; + + db->doorbell = bp->bar1 + DB_WCB_FIRST_OFFSET + (offset * 8); + db->db_key64 |= (((dpi & 0xff) << DBR_PI_LO_SFT) | + ((dpi & 0xf00) >> 8) << DBR_PI_HI_SFT); + txr->tx_push_wcb = bp->db_base_wc + + ((dpi - 1) * DB_WCB_PAGE_SIZE) + + ((offset + 1) * DB_WCB_BUFFER_SIZE); + break; + case BNXT_PUSH_MODE_PPP: + /* two buffers per idx for ping pong page mode */ + db->doorbell = bp->db_base_wc + map_idx * 2 * DB_PPP_SIZE; + db->db_key64 |= DBR_PATH_L2 | DBR_TYPE_SQ | DBR_VALID; + bnxt_set_db_mask(bp, db, HWRM_RING_ALLOC_TX); + txr->tx_push_wcb = db->doorbell + DB_PPP_BD_OFFSET; + break; + default: + return; + } + + db->db_key64 |= (u64)xid << DBR_XID_SFT; } static int bnxt_hwrm_ring_alloc(struct bnxt *bp) @@ -5459,6 +6468,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) if (rc) goto err_out; bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); + bnxt_set_push_db(bp, txr, map_idx, ring->fw_ring_id); } type = HWRM_RING_ALLOC_RX; @@ -5521,23 +6531,28 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, int cmpl_ring_id) { + struct hwrm_ring_free_output *resp; + struct hwrm_ring_free_input *req; + u16 error_code = 0; int rc; - struct hwrm_ring_free_input req = {0}; - struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - u16 error_code; if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); - req.ring_type = ring_type; - req.ring_id = cpu_to_le16(ring->fw_ring_id); + rc = hwrm_req_init(bp, req, HWRM_RING_FREE); + if (rc) + goto exit; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - error_code = le16_to_cpu(resp->error_code); - mutex_unlock(&bp->hwrm_cmd_lock); + req->cmpl_ring = cpu_to_le16(cmpl_ring_id); + req->ring_type = ring_type; + req->ring_id = cpu_to_le16(ring->fw_ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + error_code = le16_to_cpu(resp->error_code); + hwrm_req_drop(bp, req); +exit: if (rc || error_code) { netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", ring_type, rc, error_code); @@ -5652,23 +6667,31 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, static int bnxt_hwrm_get_rings(struct bnxt *bp) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + u16 flags; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } + flags = le16_to_cpu(resp->flags); + if (!(flags & FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED)) + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); if (BNXT_NEW_RM(bp)) { u16 cp, stats; @@ -5699,42 +6722,50 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_cp_rings = cp; hw_resc->resv_stat_ctxs = stats; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } -/* Caller must hold bp->hwrm_cmd_lock */ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(fid); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + hwrm_req_drop(bp, req); return rc; } static bool bnxt_rfs_supported(struct bnxt *bp); -static void -__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, - int tx_rings, int rx_rings, int ring_grps, - int cp_rings, int stats, int vnics) +static struct hwrm_func_cfg_input * +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_CFG)) + return NULL; + req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; req->num_tx_rings = cpu_to_le16(tx_rings); + if (bp->tx_push_mode == BNXT_PUSH_MODE_PPP) + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE); if (BNXT_NEW_RM(bp)) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; @@ -5772,17 +6803,19 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_vnics = cpu_to_le16(vnics); } req->enables = cpu_to_le32(enables); + return req; } -static void -__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, - struct hwrm_func_vf_cfg_input *req, int tx_rings, - int rx_rings, int ring_grps, int cp_rings, - int stats, int vnics) +static struct hwrm_func_vf_cfg_input * +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_vf_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) + return NULL; + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; @@ -5801,6 +6834,8 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req->num_tx_rings = cpu_to_le16(tx_rings); + if (bp->tx_push_mode == BNXT_PUSH_MODE_PPP) + req->flags |= cpu_to_le32(FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE); req->num_rx_rings = cpu_to_le16(rx_rings); if (bp->flags & BNXT_FLAG_CHIP_P5) { req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); @@ -5814,36 +6849,41 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, req->num_vnics = cpu_to_le16(vnics); req->enables = cpu_to_le32(enables); + return req; } static int bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; int rc; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - if (!req.enables) - return 0; + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!req->enables) { + hwrm_req_drop(bp, req); + return 0; + } + + rc = hwrm_req_send(bp, req); if (rc) return rc; if (bp->hwrm_spec_code < 0x10601) bp->hw_resc.resv_tx_rings = tx_rings; - rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); } static int bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc; if (!BNXT_NEW_RM(bp)) { @@ -5851,14 +6891,16 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; } - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + + rc = hwrm_req_send(bp, req); if (rc) return rc; - rc = bnxt_hwrm_get_rings(bp); - return rc; + return bnxt_hwrm_get_rings(bp); } static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, @@ -6003,6 +7045,10 @@ static int __bnxt_reserve_rings(struct bnxt *bp) rx = rx_rings << 1; cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; bp->tx_nr_rings = tx; + + /* Reset the RSS indirection if we cannot reserve all the RX rings */ + if (rx_rings != bp->rx_nr_rings) + bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; bp->rx_nr_rings = rx_rings; bp->cp_nr_rings = cp; @@ -6016,15 +7062,17 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; u32 flags; - int rc; if (!BNXT_NEW_RM(bp)) return 0; - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -6034,37 +7082,38 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (!(bp->flags & BNXT_FLAG_CHIP_P5)) flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; - req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; u32 flags; - int rc; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; - if (bp->flags & BNXT_FLAG_CHIP_P5) + if (bp->flags & BNXT_FLAG_CHIP_P5) { flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; - else + } else { flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; + } } - req.flags = cpu_to_le32(flags); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@ -6085,9 +7134,9 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) { - struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; - struct hwrm_ring_aggint_qcaps_input req = {0}; + struct hwrm_ring_aggint_qcaps_output *resp; + struct hwrm_ring_aggint_qcaps_input *req; int rc; coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; @@ -6103,9 +7152,11 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10902) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); coal_cap->nq_params = le32_to_cpu(resp->nq_params); @@ -6125,7 +7176,7 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) le16_to_cpu(resp->num_cmpl_aggr_int_max); coal_cap->timer_units = le16_to_cpu(resp->timer_units); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) @@ -6193,37 +7244,40 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); } -/* Caller holds bp->hwrm_cmd_lock */ static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_coal *hw_coal) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; u32 nq_params = coal_cap->nq_params; u16 tmr; + int rc; if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, - -1, -1); - req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); - req.flags = + rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); + req->flags = cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); - req.int_lat_tmr_min = cpu_to_le16(tmr); - req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); - return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->int_lat_tmr_min = cpu_to_le16(tmr); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + return hwrm_req_send(bp, req); } int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal coal; + int rc; /* Tick values in micro seconds. * 1 coal_buf x bufs_per_record = 1 completion record. @@ -6236,48 +7290,51 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) if (!bnapi->rx_ring) return -ENODEV; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; - bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &coal, req_rx); - req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); + req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); - return hwrm_send_message(bp, &req_rx, sizeof(req_rx), - HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req_rx); } int bnxt_hwrm_set_coal(struct bnxt *bp) { - int i, rc = 0; - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, - req_tx = {0}, *req; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx, + *req; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_cmd_hdr_init(bp, &req_tx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; - bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); - bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); + rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); + + hwrm_req_hold(bp, req_rx); + hwrm_req_hold(bp, req_tx); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_coal *hw_coal; u16 ring_id; - req = &req_rx; + req = req_rx; if (!bnapi->rx_ring) { ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); - req = &req_tx; + req = req_tx; } else { ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); } req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6285,11 +7342,10 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) continue; if (bnapi->rx_ring && bnapi->tx_ring) { - req = &req_tx; + req = req_tx; ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; } @@ -6299,64 +7355,77 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) hw_coal = &bp->tx_coal; __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_rx); + hwrm_req_drop(bp, req_tx); return rc; } -static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) +static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { - int rc = 0, i; - struct hwrm_stat_ctx_free_input req = {0}; + struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; + struct hwrm_stat_ctx_free_input *req; + int i; if (!bp->bnapi) - return 0; + return; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - return 0; + return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); + if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) + return; + if (BNXT_FW_MAJ(bp) <= 20) { + if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { + hwrm_req_drop(bp, req); + return; + } + hwrm_req_hold(bp, req0); + } + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { - req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); + if (req0) { + req0->stat_ctx_id = req->stat_ctx_id; + hwrm_req_send(bp, req0); + } + hwrm_req_send(bp, req); cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } } - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; + hwrm_req_drop(bp, req); + if (req0) + hwrm_req_drop(bp, req0); } static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) { - int rc = 0, i; - struct hwrm_stat_ctx_alloc_input req = {0}; - struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_stat_ctx_alloc_output *resp; + struct hwrm_stat_ctx_alloc_input *req; + int rc, i; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); + if (rc) + return rc; - req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); - req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); + req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); + req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); - mutex_lock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); + req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6364,21 +7433,25 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { - struct hwrm_func_qcfg_input req = {0}; - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - u16 flags; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + u32 min_db_offset = 0; + u16 flags, dflt_mtu; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto func_qcfg_exit; @@ -6400,6 +7473,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) } if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; + if (flags & FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED) + bp->fw_cap |= BNXT_FW_CAP_SECURE_MODE; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -6420,44 +7495,129 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (!bp->max_mtu) bp->max_mtu = BNXT_MAX_MTU; + dflt_mtu = le16_to_cpu(resp->mtu); + if (dflt_mtu >= ETH_ZLEN && dflt_mtu <= bp->max_mtu) { + bp->fw_dflt_mtu = dflt_mtu; + if ((bp->fw_cap & BNXT_FW_CAP_SECURE_MODE) && + !(bp->fw_cap & BNXT_FW_CAP_ADMIN_PF)) + bp->fw_cap |= BNXT_FW_CAP_ADMIN_MTU; + } else { + bp->fw_dflt_mtu = 0; + } + + if (bp->db_size) + goto func_qcfg_exit; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (BNXT_PF(bp)) + min_db_offset = DB_PF_OFFSET_P5; + else + min_db_offset = DB_VF_OFFSET_P5; + } + bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * + 1024); + if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || + bp->db_size <= min_db_offset) + bp->db_size = pci_resource_len(bp->pdev, 2); + + bp->legacy_db_size = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; + func_qcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); + return rc; +} + +static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) +{ + struct hwrm_port_mac_ptp_qcfg_output *resp; + struct hwrm_port_mac_ptp_qcfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int rc; + + if (bp->hwrm_spec_code < 0x10801) { + kfree(ptp); + bp->ptp_cfg = NULL; + return 0; + } + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) + goto exit; + } else { + if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS)) + goto exit; + } + + if (!ptp) + ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); + if (!ptp) { + rc = -ENOMEM; + goto exit; + } + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + le32_to_cpu(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + le32_to_cpu(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + le32_to_cpu(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + le32_to_cpu(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + le32_to_cpu(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + le32_to_cpu(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + le32_to_cpu(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + le32_to_cpu(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + le32_to_cpu(resp->tx_ts_reg_off_fifo); + } + + ptp->bp = bp; + bp->ptp_cfg = ptp; + +exit: + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) { - struct hwrm_func_backing_store_qcaps_input req = {0}; - struct hwrm_func_backing_store_qcaps_output *resp = - bp->hwrm_cmd_resp_addr; + struct hwrm_func_backing_store_qcaps_output *resp; + struct hwrm_func_backing_store_qcaps_input *req; int rc; if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; - int i; + int i, tqm_rings; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { rc = -ENOMEM; goto ctx_err; } - ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL); - if (!ctx_pg) { - kfree(ctx); - rc = -ENOMEM; - goto ctx_err; - } - for (i = 0; i < bp->max_q + 1; i++, ctx_pg++) - ctx->tqm_mem[i] = ctx_pg; - - bp->ctx = ctx; ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); @@ -6489,11 +7649,26 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) le16_to_cpu(resp->mrav_num_entries_units); ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); + ctx->ctx_kind_initializer = resp->ctx_kind_initializer; + ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; + if (!ctx->tqm_fp_rings_count) + ctx->tqm_fp_rings_count = bp->max_q; + + tqm_rings = ctx->tqm_fp_rings_count + 1; + ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); + if (!ctx_pg) { + kfree(ctx); + rc = -ENOMEM; + goto ctx_err; + } + for (i = 0; i < tqm_rings; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + bp->ctx = ctx; } else { rc = 0; } ctx_err: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -6528,103 +7703,104 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) { - struct hwrm_func_backing_store_cfg_input req = {0}; + struct hwrm_func_backing_store_cfg_input *req; struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_pg_info *ctx_pg; __le32 *num_entries; + u32 ena, flags = 0; __le64 *pg_dir; - u32 flags = 0; u8 *pg_attr; int i, rc; - u32 ena; if (!ctx) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); - req.enables = cpu_to_le32(enables); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG); + if (rc) + return rc; + req->enables = cpu_to_le32(enables); if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { ctx_pg = &ctx->qp_mem; - req.qp_num_entries = cpu_to_le32(ctx_pg->entries); - req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); - req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); - req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); + req->qp_num_entries = cpu_to_le32(ctx_pg->entries); + req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); + req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); + req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.qpc_pg_size_qpc_lvl, - &req.qpc_page_dir); + &req->qpc_pg_size_qpc_lvl, + &req->qpc_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { ctx_pg = &ctx->srq_mem; - req.srq_num_entries = cpu_to_le32(ctx_pg->entries); - req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); - req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); + req->srq_num_entries = cpu_to_le32(ctx_pg->entries); + req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); + req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.srq_pg_size_srq_lvl, - &req.srq_page_dir); + &req->srq_pg_size_srq_lvl, + &req->srq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { ctx_pg = &ctx->cq_mem; - req.cq_num_entries = cpu_to_le32(ctx_pg->entries); - req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); - req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); - bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, - &req.cq_page_dir); - } - if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { - ctx_pg = &ctx->vnic_mem; - req.vnic_num_vnic_entries = - cpu_to_le16(ctx->vnic_max_vnic_entries); - req.vnic_num_ring_table_entries = - cpu_to_le16(ctx->vnic_max_ring_table_entries); - req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); + req->cq_num_entries = cpu_to_le32(ctx_pg->entries); + req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); + req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.vnic_pg_size_vnic_lvl, - &req.vnic_page_dir); - } - if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { - ctx_pg = &ctx->stat_mem; - req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); - req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); - bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.stat_pg_size_stat_lvl, - &req.stat_page_dir); + &req->cq_pg_size_cq_lvl, + &req->cq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { ctx_pg = &ctx->mrav_mem; - req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); if (ctx->mrav_num_entries_units) flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; - req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); + req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.mrav_pg_size_mrav_lvl, - &req.mrav_page_dir); + &req->mrav_pg_size_mrav_lvl, + &req->mrav_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { ctx_pg = &ctx->tim_mem; - req.tim_num_entries = cpu_to_le32(ctx_pg->entries); - req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); + req->tim_num_entries = cpu_to_le32(ctx_pg->entries); + req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.tim_pg_size_tim_lvl, - &req.tim_page_dir); + &req->tim_pg_size_tim_lvl, + &req->tim_page_dir); } - for (i = 0, num_entries = &req.tqm_sp_num_entries, - pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, - pg_dir = &req.tqm_sp_page_dir, + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { + ctx_pg = &ctx->vnic_mem; + req->vnic_num_vnic_entries = + cpu_to_le16(ctx->vnic_max_vnic_entries); + req->vnic_num_ring_table_entries = + cpu_to_le16(ctx->vnic_max_ring_table_entries); + req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->vnic_pg_size_vnic_lvl, + &req->vnic_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { + ctx_pg = &ctx->stat_mem; + req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries); + req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->stat_pg_size_stat_lvl, + &req->stat_page_dir); + } + for (i = 0, num_entries = &req->tqm_sp_num_entries, + pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, + pg_dir = &req->tqm_sp_page_dir, ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { if (!(enables & ena)) continue; - req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); + req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); ctx_pg = ctx->tqm_mem[i]; *num_entries = cpu_to_le32(ctx_pg->entries); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } - req.flags = cpu_to_le32(flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + req->flags = cpu_to_le32(flags); + return hwrm_req_send(bp, req); } static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, @@ -6641,15 +7817,15 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, return bnxt_alloc_ring(bp, rmem); } -static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, - struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, - u8 depth) +int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, + u8 depth, bool use_init_val) { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; int rc; if (!mem_size) - return 0; + return -EINVAL; ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { @@ -6660,8 +7836,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, int nr_tbls, i; rmem->depth = 2; - ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), - GFP_KERNEL); + ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg), + GFP_KERNEL); if (!ctx_pg->ctx_pg_tbl) return -ENOMEM; nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); @@ -6681,6 +7857,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; rmem->depth = 1; rmem->nr_pages = MAX_CTX_PAGES; + if (use_init_val) + rmem->init_val = bp->ctx->ctx_kind_initializer; if (i == (nr_tbls - 1)) { int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; @@ -6695,13 +7873,15 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); if (rmem->nr_pages > 1 || depth) rmem->depth = 1; + if (use_init_val) + rmem->init_val = bp->ctx->ctx_kind_initializer; rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); } return rc; } -static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, - struct bnxt_ctx_pg_info *ctx_pg) +void bnxt_free_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg) { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; @@ -6738,7 +7918,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) return; if (ctx->tqm_mem[0]) { - for (i = 0; i < bp->max_q + 1; i++) + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); kfree(ctx->tqm_mem[0]); ctx->tqm_mem[0] = NULL; @@ -6759,6 +7939,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; u32 mem_size, ena, entries; + u32 entries_sp, min; u32 num_mr, num_ah; u32 extra_srqs = 0; u32 extra_qps = 0; @@ -6777,29 +7958,34 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { pg_lvl = 2; - extra_qps = 65536; - extra_srqs = 8192; + extra_qps = min_t(u32, 65536, + ctx->qp_max_entries - + ctx->qp_max_l2_entries - + ctx->qp_min_qp1_entries); + extra_srqs = min_t(u32, 8192, + ctx->srq_max_entries - + ctx->srq_max_l2_entries); } ctx_pg = &ctx->qp_mem; ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + extra_qps; mem_size = ctx->qp_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); if (rc) return rc; ctx_pg = &ctx->srq_mem; ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; mem_size = ctx->srq_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); if (rc) return rc; ctx_pg = &ctx->cq_mem; ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; mem_size = ctx->cq_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); if (rc) return rc; @@ -6807,14 +7993,14 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ctx_pg->entries = ctx->vnic_max_vnic_entries + ctx->vnic_max_ring_table_entries; mem_size = ctx->vnic_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); if (rc) return rc; ctx_pg = &ctx->stat_mem; ctx_pg->entries = ctx->stat_max_entries; mem_size = ctx->stat_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); if (rc) return rc; @@ -6823,14 +8009,14 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) goto skip_rdma; ctx_pg = &ctx->mrav_mem; - /* 128K extra is needed to accommodate static AH context + /* 128K extra is needed to accomodate static AH context * allocation by f/w. */ - num_mr = 1024 * 256; - num_ah = 1024 * 128; + num_mr = min_t(u32, ctx->mrav_max_entries / 2, 1024 * 256); + num_ah = min_t(u32, num_mr, 1024 * 128); ctx_pg->entries = num_mr + num_ah; mem_size = ctx->mrav_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); if (rc) return rc; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; @@ -6842,21 +8028,24 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ctx_pg = &ctx->tim_mem; ctx_pg->entries = ctx->qp_mem.entries; mem_size = ctx->tim_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); if (rc) return rc; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; skip_rdma: - entries = ctx->qp_max_l2_entries + extra_qps; + min = ctx->tqm_min_entries_per_ring; + entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + + 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; + entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); + entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries; entries = roundup(entries, ctx->tqm_entries_multiple); - entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, - ctx->tqm_max_entries_per_ring); - for (i = 0; i < bp->max_q + 1; i++) { + entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { ctx_pg = ctx->tqm_mem[i]; - ctx_pg->entries = entries; - mem_size = ctx->tqm_entry_size * entries; - rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + ctx_pg->entries = i ? entries : entries_sp; + mem_size = ctx->tqm_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, false); if (rc) return rc; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; @@ -6872,19 +8061,71 @@ skip_rdma: return 0; } +static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) +{ + struct hwrm_queue_qportcfg_output *resp; + struct hwrm_queue_qportcfg_input *req; + u8 i, j, *qptr; + bool no_rdma; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto qportcfg_exit; + + if (!resp->max_configurable_queues) { + rc = -EINVAL; + goto qportcfg_exit; + } + bp->max_tc = resp->max_configurable_queues; + bp->max_lltc = resp->max_configurable_lossless_queues; + if (bp->max_tc > BNXT_MAX_QUEUE) + bp->max_tc = BNXT_MAX_QUEUE; + + no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); + qptr = &resp->queue_id0; + for (i = 0, j = 0; i < bp->max_tc; i++) { + bp->q_info[j].queue_id = *qptr; + bp->q_ids[i] = *qptr++; + bp->q_info[j].queue_profile = *qptr++; + bp->tc_to_qidx[j] = j; + if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || + (no_rdma && BNXT_PF(bp))) + j++; + } + bp->max_q = bp->max_tc; + bp->max_tc = max_t(u8, j, 1); + + if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) + bp->max_tc = 1; + + if (bp->max_lltc > bp->max_tc) + bp->max_lltc = bp->max_tc; + +qportcfg_exit: + hwrm_req_drop(bp, req); + return rc; +} + int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { - struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_resource_qcaps_input req = {0}; + struct hwrm_func_resource_qcaps_output *resp; + struct hwrm_func_resource_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc) goto hwrm_func_resc_qcaps_exit; @@ -6923,25 +8164,31 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) le16_to_cpu(resp->vf_reservation_strategy); if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; + + if (resp->flags & + cpu_to_le16(FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED)) + bp->fw_cap |= BNXT_FW_CAP_VF_RES_MIN_GUARANTEED; } hwrm_func_resc_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - u32 flags; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; + u32 flags, flags_ext; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_func_qcaps_exit; @@ -6950,18 +8197,42 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_ROCEV1_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) bp->flags |= BNXT_FLAG_ROCEV2_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN; + if (flags & FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_ADMIN_PF; + if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; + if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; - if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) - bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + if (flags & FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_VF_VNIC_NOTIFY; + if (flags & FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CRASHDUMP; + if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) + bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; - bp->tx_push_thresh = 0; - if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) - bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; + flags_ext = le32_to_cpu(resp->flags_ext); + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; + + /* TODO: enable BNXT_PUSH_MODE_WCB */ + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; + if (BITS_PER_LONG == 64 && + (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED)) { + bp->tx_push_mode = BNXT_PUSH_MODE_PPP; + bp->tx_push_thresh = BNXT_TX_PUSH_THRESH_PPP; + } else if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && + BNXT_FW_MAJ(bp) > 217) { + bp->tx_push_mode = BNXT_PUSH_MODE_LEGACY; + } hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); @@ -6974,24 +8245,26 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); + hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); + hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); + hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); + hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); + hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + if (BNXT_PF(bp)) { struct bnxt_pf_info *pf = &bp->pf; pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); - bp->dev->dev_port = pf->port_id; memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); pf->first_vf_id = le16_to_cpu(resp->first_vf_id); pf->max_vfs = le16_to_cpu(resp->max_vfs); - pf->max_encap_records = le32_to_cpu(resp->max_encap_records); - pf->max_decap_records = le32_to_cpu(resp->max_decap_records); - pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); - pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); - pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); - pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); bp->flags &= ~BNXT_FLAG_WOL_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) + __bnxt_hwrm_ptp_qcfg(bp); } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; @@ -7002,12 +8275,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) } hwrm_func_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); - static int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc; @@ -7017,7 +8288,8 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) return rc; rc = bnxt_hwrm_queue_qportcfg(bp); if (rc) { - netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", + rc); return rc; } if (bp->hwrm_spec_code >= 0x10803) { @@ -7033,32 +8305,104 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) { - struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; - int rc = 0; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; u32 flags; + int rc; if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) return 0; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_cfa_adv_qcaps_exit; flags = le32_to_cpu(resp->flags); if (flags & - CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED) - bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX; + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; hwrm_cfa_adv_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } +static int __bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return 0; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_fw_health(struct bnxt *bp) +{ + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + rc = __bnxt_alloc_fw_health(bp); + if (rc) { + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return rc; + } + + return 0; +} + +static inline void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) +{ + writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + + BNXT_FW_HEALTH_WIN_MAP_OFF); +} + +static void bnxt_try_map_fw_health_reg(struct bnxt *bp) +{ + void __iomem *hs; + u32 status_loc; + u32 reg_type; + u32 sig; + + __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); + hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); + + sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); + if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { + if (bp->fw_health) + bp->fw_health->status_reliable = false; + return; + } + + if (__bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware status checks\n"); + return; + } + + status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc)); + bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; + reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { + __bnxt_map_fw_health_reg(bp, status_loc); + bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = + BNXT_FW_HEALTH_WIN_OFF(status_loc); + } + + bp->fw_health->status_reliable = true; +} + static int bnxt_map_fw_health_regs(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -7075,30 +8419,31 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) reg_base = reg & BNXT_GRC_BASE_MASK; if ((reg & BNXT_GRC_BASE_MASK) != reg_base) return -ERANGE; - fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE + - (reg & BNXT_GRC_OFFSET_MASK); + fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); } if (reg_base == 0xffffffff) return 0; - writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + - BNXT_FW_HEALTH_WIN_MAP_OFF); + __bnxt_map_fw_health_reg(bp, reg_base); return 0; } static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) { - struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_fw_health *fw_health = bp->fw_health; - struct hwrm_error_recovery_qcfg_input req = {0}; + struct hwrm_error_recovery_qcfg_output *resp; + struct hwrm_error_recovery_qcfg_input *req; int rc, i; if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto err_recovery_out; fw_health->flags = le32_to_cpu(resp->flags); @@ -7137,10 +8482,10 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) fw_health->fw_reset_seq_vals[i] = le32_to_cpu(resp->reset_reg_val[i]); fw_health->fw_reset_seq_delay_msec[i] = - resp->delay_after_reset[i]; + le32_to_cpu(resp->delay_after_reset[i]); } err_recovery_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (!rc) rc = bnxt_map_fw_health_regs(bp); if (rc) @@ -7150,87 +8495,58 @@ err_recovery_out: static int bnxt_hwrm_func_reset(struct bnxt *bp) { - struct hwrm_func_reset_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); - req.enables = 0; - - return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); -} - -static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) -{ - int rc = 0; - struct hwrm_queue_qportcfg_input req = {0}; - struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; - u8 i, j, *qptr; - bool no_rdma; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - goto qportcfg_exit; - - if (!resp->max_configurable_queues) { - rc = -EINVAL; - goto qportcfg_exit; - } - bp->max_tc = resp->max_configurable_queues; - bp->max_lltc = resp->max_configurable_lossless_queues; - if (bp->max_tc > BNXT_MAX_QUEUE) - bp->max_tc = BNXT_MAX_QUEUE; - - no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); - qptr = &resp->queue_id0; - for (i = 0, j = 0; i < bp->max_tc; i++) { - bp->q_info[j].queue_id = *qptr; - bp->q_ids[i] = *qptr++; - bp->q_info[j].queue_profile = *qptr++; - bp->tc_to_qidx[j] = j; - if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || - (no_rdma && BNXT_PF(bp))) - j++; - } - bp->max_q = bp->max_tc; - bp->max_tc = max_t(u8, j, 1); - - if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) - bp->max_tc = 1; - - if (bp->max_lltc > bp->max_tc) - bp->max_lltc = bp->max_tc; - -qportcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - -static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) -{ - struct hwrm_ver_get_input req = {0}; + struct hwrm_func_reset_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); - req.hwrm_intf_maj = HWRM_VERSION_MAJOR; - req.hwrm_intf_min = HWRM_VERSION_MINOR; - req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); + if (rc) + return rc; - rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, - silent); - return rc; + req->enables = 0; + hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_poll(struct bnxt *bp) +{ + struct hwrm_ver_get_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + + hwrm_req_silence(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + return -ENODEV; + + return 0; } static int bnxt_hwrm_ver_get(struct bnxt *bp) { - struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; - u32 dev_caps_cfg; - int rc; + struct hwrm_ver_get_output *resp; + struct hwrm_ver_get_input *req; + u16 fw_maj, fw_min, fw_bld, fw_rsv; + u32 dev_caps_cfg, hwrm_ver; + int rc, len; + + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_hwrm_ver_get(bp, false); + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_ver_get_exit; @@ -7245,9 +8561,35 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_upd_8b); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", - resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, - resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); + + hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | + HWRM_VERSION_UPDATE; + + if (bp->hwrm_spec_code > hwrm_ver) + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, + HWRM_VERSION_UPDATE); + else + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); + + fw_maj = le16_to_cpu(resp->hwrm_fw_major); + if (bp->hwrm_spec_code > 0x10803 && fw_maj) { + fw_min = le16_to_cpu(resp->hwrm_fw_minor); + fw_bld = le16_to_cpu(resp->hwrm_fw_build); + fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); + len = FW_VER_STR_LEN; + } else { + fw_maj = resp->hwrm_fw_maj_8b; + fw_min = resp->hwrm_fw_min_8b; + fw_bld = resp->hwrm_fw_bld_8b; + fw_rsv = resp->hwrm_fw_rsvd_8b; + len = BC_HWRM_STR_LEN; + } + bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); + snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, + fw_rsv); if (strlen(resp->active_pkg_name)) { int fw_ver_len = strlen(bp->fw_ver_str); @@ -7270,10 +8612,19 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; bp->chip_num = le16_to_cpu(resp->chip_num); + bp->chip_rev = resp->chip_rev; if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && !resp->chip_metal) bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; +#ifdef BNXT_FPGA + bp->chip_platform_type = resp->chip_platform_type; + if (!BNXT_ASIC(bp) && !BNXT_CHIP_SR2(bp) && bp->pdev->devfn != 0) { + dev_err(&bp->pdev->dev, "Skipping over FPGA function %d\n", bp->pdev->devfn); + rc = -ENODEV; + goto hwrm_ver_get_exit; + } +#endif dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) @@ -7294,54 +8645,162 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; + if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_EEM; hwrm_ver_get_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_fw_set_time(struct bnxt *bp) { - struct hwrm_fw_set_time_input req = {0}; + struct hwrm_fw_set_time_input *req; +#if defined(HAVE_TIME64) struct tm tm; time64_t now = ktime_get_real_seconds(); +#elif defined(CONFIG_RTC_LIB) || defined(CONFIG_RTC_LIB_MODULE) + struct rtc_time tm; + struct timeval tv; +#endif + int rc; if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; +#if defined(HAVE_TIME64) time64_to_tm(now, 0, &tm); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); - req.year = cpu_to_le16(1900 + tm.tm_year); - req.month = 1 + tm.tm_mon; - req.day = tm.tm_mday; - req.hour = tm.tm_hour; - req.minute = tm.tm_min; - req.second = tm.tm_sec; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +#elif defined(CONFIG_RTC_LIB) || defined(CONFIG_RTC_LIB_MODULE) + do_gettimeofday(&tv); + rtc_time_to_tm(tv.tv_sec, &tm); +#else + return -EOPNOTSUPP; +#endif + rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); + if (rc) + return rc; + + req->year = cpu_to_le16(1900 + tm.tm_year); + req->month = 1 + tm.tm_mon; + req->day = tm.tm_mday; + req->hour = tm.tm_hour; + req->minute = tm.tm_min; + req->second = tm.tm_sec; + return hwrm_req_send(bp, req); } -static int bnxt_hwrm_port_qstats(struct bnxt *bp) +static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) { - int rc; + u64 sw_tmp; + + sw_tmp = (*sw & ~mask) | hw; + if (hw < (*sw & mask)) + sw_tmp += mask + 1; + WRITE_ONCE(*sw, sw_tmp); +} + +static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, + int count, bool ignore_zero) +{ + int i; + + for (i = 0; i < count; i++) { + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); + + if (ignore_zero && !hw) + continue; + + if (masks[i] == -1ULL) + sw_stats[i] = hw; + else + bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); + } +} + +static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) +{ + if (!stats->hw_stats) + return; + + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + stats->hw_masks, stats->len / 8, false); +} + +static void bnxt_accumulate_all_stats(struct bnxt *bp) +{ + struct bnxt_stats_mem *ring0_stats; + bool ignore_zero = false; + int i; + + /* Chip bug. Counter intermittently becomes 0. */ + if (bp->flags & BNXT_FLAG_CHIP_P5) + ignore_zero = true; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + if (!i) + ring0_stats = stats; + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + ring0_stats->hw_masks, + ring0_stats->len / 8, ignore_zero); + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct bnxt_stats_mem *stats = &bp->port_stats; + __le64 *hw_stats = stats->hw_stats; + u64 *sw_stats = stats->sw_stats; + u64 *masks = stats->hw_masks; + int cnt; + + cnt = sizeof(struct rx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + + hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + cnt = sizeof(struct tx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + bnxt_accumulate_stats(&bp->rx_port_stats_ext); + bnxt_accumulate_stats(&bp->tx_port_stats_ext); + } +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) +{ + struct hwrm_port_qstats_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_port_qstats_input req = {0}; + int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->port_id = cpu_to_le16(pf->port_id); + req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + BNXT_TX_PORT_STATS_BYTE_OFFSET); + req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); + return hwrm_req_send(bp, req); } -static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) { - struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; - struct hwrm_port_qstats_ext_input req = {0}; + struct hwrm_queue_pri2cos_qcfg_output *resp_qc; + struct hwrm_queue_pri2cos_qcfg_input *req_qc; + struct hwrm_port_qstats_ext_output *resp_qs; + struct hwrm_port_qstats_ext_input *req_qs; struct bnxt_pf_info *pf = &bp->pf; u32 tx_stat_size; int rc; @@ -7349,42 +8808,59 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); - tx_stat_size = bp->hw_tx_port_stats_ext ? - sizeof(*bp->hw_tx_port_stats_ext) : 0; - req.tx_stat_size = cpu_to_le16(tx_stat_size); - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); + if (rc) + return rc; + + req_qs->flags = flags; + req_qs->port_id = cpu_to_le16(pf->port_id); + req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req_qs->rx_stat_host_addr = + cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + tx_stat_size = bp->tx_port_stats_ext.hw_stats ? + sizeof(struct rx_port_stats_ext) : 0; + req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); + req_qs->tx_stat_host_addr = + cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); + + resp_qs = hwrm_req_hold(bp, req_qs); + rc = hwrm_req_send(bp, req_qs); if (!rc) { - bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; + bp->fw_rx_stats_ext_size = + le16_to_cpu(resp_qs->rx_stat_size) / 8; bp->fw_tx_stats_ext_size = tx_stat_size ? - le16_to_cpu(resp->tx_stat_size) / 8 : 0; + le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; } else { bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + hwrm_req_drop(bp, req_qs); + + if (flags) + return rc; + if (bp->fw_tx_stats_ext_size <= offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { - mutex_unlock(&bp->hwrm_cmd_lock); - bp->pri2cos_valid = 0; + bp->pri2cos_valid = false; return rc; } - bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; - rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); + req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + + resp_qc = hwrm_req_hold(bp, req_qc); + rc = hwrm_req_send(bp, req_qc); if (!rc) { - struct hwrm_queue_pri2cos_qcfg_output *resp2; u8 *pri2cos; int i, j; - resp2 = bp->hwrm_cmd_resp_addr; - pri2cos = &resp2->pri0_cos_queue_id; + pri2cos = &resp_qc->pri0_cos_queue_id; for (i = 0; i < 8; i++) { u8 queue_id = pri2cos[i]; u8 queue_idx; @@ -7393,33 +8869,21 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) queue_idx = queue_id % 10; if (queue_idx > BNXT_MAX_QUEUE) { bp->pri2cos_valid = false; - goto qstats_done; + hwrm_req_drop(bp, req_qc); + return rc; } for (j = 0; j < bp->max_q; j++) { if (bp->q_ids[j] == queue_id) bp->pri2cos_idx[i] = queue_idx; } } - bp->pri2cos_valid = 1; + bp->pri2cos_valid = true; } -qstats_done: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_qc); + return rc; } -static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) -{ - struct hwrm_pcie_qstats_input req = {0}; - - if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) - return 0; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); - req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); - req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -} - static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@ -7434,6 +8898,9 @@ static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) bp->nge_port_cnt = 0; } +/* TODO: remove this once min aggregate packet size workaround is removed */ +static int bnxt_dbg_hwrm_wr_reg(struct bnxt *, u32, u32); + static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) { int rc, i; @@ -7493,41 +8960,50 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, } } +#ifdef HAVE_NDO_BRIDGE_GETLINK static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + u8 evb_mode; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); if (br_mode == BRIDGE_MODE_VEB) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; else if (br_mode == BRIDGE_MODE_VEPA) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; else return -EINVAL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + req->evb_mode = evb_mode; + return hwrm_req_send(bp, req); } +#endif static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; int rc; if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; - if (size == 128) - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; + if (size == 128) + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; + + return hwrm_req_send(bp, req); } static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) @@ -7590,7 +9066,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) { int rc, i, nr_ctxs; - nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); for (i = 0; i < nr_ctxs; i++) { rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); if (rc) { @@ -7635,7 +9111,6 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { -#ifdef CONFIG_RFS_ACCEL int i, rc = 0; if (bp->flags & BNXT_FLAG_CHIP_P5) @@ -7664,9 +9139,6 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) break; } return rc; -#else - return 0; -#endif } /* Allow PF and VF with default VLAN to be in promiscuous mode */ @@ -7834,11 +9306,15 @@ static int bnxt_set_real_num_queues(struct bnxt *bp) int rc; struct net_device *dev = bp->dev; +#ifdef VOID_NETIF_SET_NUM_TX + netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - + bp->tx_nr_rings_xdp); +#else rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - bp->tx_nr_rings_xdp); if (rc) return rc; - +#endif rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); if (rc) return rc; @@ -7934,7 +9410,6 @@ static int bnxt_setup_int_mode(struct bnxt *bp) return rc; } -#ifdef CONFIG_RFS_ACCEL static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) { return bp->hw_resc.max_rsscos_ctxs; @@ -7944,7 +9419,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) { return bp->hw_resc.max_vnics; } -#endif unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { @@ -8152,6 +9626,9 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); } + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp); + if (rc) { netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; @@ -8182,11 +9659,13 @@ static void bnxt_free_irq(struct bnxt *bp) irq = &bp->irq_tbl[map_idx]; if (irq->requested) { +#if defined(HAVE_CPUMASK_LOCAL_FIRST) || defined(HAVE_CPUMASK_LOCAL_SPREAD) if (irq->have_cpumask) { irq_set_affinity_hint(irq->vector, NULL); free_cpumask_var(irq->cpu_mask); irq->have_cpumask = 0; } +#endif free_irq(irq->vector, bp->bnapi[i]); } @@ -8234,12 +9713,22 @@ static int bnxt_request_irq(struct bnxt *bp) irq->requested = 1; +#if defined(HAVE_CPUMASK_LOCAL_FIRST) || defined(HAVE_CPUMASK_LOCAL_SPREAD) if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { int numa_node = dev_to_node(&bp->pdev->dev); irq->have_cpumask = 1; +#ifdef HAVE_CPUMASK_LOCAL_SPREAD cpumask_set_cpu(cpumask_local_spread(i, numa_node), irq->cpu_mask); +#else + rc = cpumask_set_cpu_local_first(i, numa_node, + irq->cpu_mask); + if (rc) { + netdev_warn(bp->dev, "Set CPU mask failed\n"); + break; + } +#endif rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); if (rc) { netdev_warn(bp->dev, @@ -8248,6 +9737,7 @@ static int bnxt_request_irq(struct bnxt *bp) break; } } +#endif } return rc; } @@ -8287,15 +9777,18 @@ static void bnxt_init_napi(struct bnxt *bp) for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); + napi_hash_add(&bnapi->napi); } if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bnapi = bp->bnapi[cp_nr_rings]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0, 64); + napi_hash_add(&bnapi->napi); } } else { bnapi = bp->bnapi[0]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); + napi_hash_add(&bnapi->napi); } } @@ -8313,6 +9806,7 @@ static void bnxt_disable_napi(struct bnxt *bp) cancel_work_sync(&cpr->dim.work); napi_disable(&bp->bnapi[i]->napi); + bnxt_disable_poll(bp->bnapi[i]); } } @@ -8321,14 +9815,19 @@ static void bnxt_enable_napi(struct bnxt *bp) int i; for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; - bp->bnapi[i]->in_reset = false; + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - if (bp->bnapi[i]->rx_ring) { + if (bnapi->in_reset) + cpr->sw_stats.rx.rx_resets++; + bnapi->in_reset = false; + + if (bnapi->rx_ring) { INIT_WORK(&cpr->dim.work, bnxt_dim_work); cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; } - napi_enable(&bp->bnapi[i]->napi); + bnxt_enable_poll(bnapi); + napi_enable(&bnapi->napi); } } @@ -8345,7 +9844,10 @@ void bnxt_tx_disable(struct bnxt *bp) } /* Stop all TX queues */ netif_tx_disable(bp->dev); - netif_carrier_off(bp->dev); +#ifdef BNXT_SKIP_CARRIER_OFF + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) +#endif + netif_carrier_off(bp->dev); } void bnxt_tx_enable(struct bnxt *bp) @@ -8404,21 +9906,24 @@ static void bnxt_report_link(struct bnxt *bp) static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_port_phy_qcaps_input req = {0}; - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; + int rc; bp->flags &= ~BNXT_FLAG_EEE_CAP; if (bp->test_info) - bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK; + bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | + BNXT_TEST_FL_AN_PHY_LPBK); if (bp->hwrm_spec_code < 0x10201) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_phy_qcaps_exit; @@ -8437,32 +9942,52 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (bp->test_info) bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { + if (bp->test_info) + bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; + } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { + if (BNXT_PF(bp)) + bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; + } if (resp->supported_speeds_auto_mode) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); + if (resp->supported_pam4_speeds_auto_mode) + link_info->support_pam4_auto_speeds = + le16_to_cpu(resp->supported_pam4_speeds_auto_mode); bp->port_count = resp->port_cnt; hwrm_phy_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } +static bool bnxt_support_dropped(u16 advertising, u16 supported) +{ + u16 diff = advertising ^ supported; + + return ((supported | diff) != supported); +} + static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { - int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; - struct hwrm_port_phy_qcfg_input req = {0}; - struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_qcfg_output *resp; + struct hwrm_port_phy_qcfg_input *req; u8 link_up = link_info->link_up; - u16 diff; + bool support_changed = false; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -8482,10 +10007,17 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) else link_info->link_speed = 0; link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); + link_info->force_pam4_link_speed = + le16_to_cpu(resp->force_pam4_link_speed); link_info->support_speeds = le16_to_cpu(resp->support_speeds); + link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->auto_pam4_link_speeds = + le16_to_cpu(resp->auto_pam4_link_speed_mask); link_info->lp_auto_link_speeds = le16_to_cpu(resp->link_partner_adv_speeds); + link_info->lp_auto_pam4_link_speeds = + le16_to_cpu(resp->link_partner_pam4_adv_speeds); link_info->preemphasis = le32_to_cpu(resp->preemphasis); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; @@ -8549,22 +10081,26 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) /* alwasy link down if not require to update link state */ link_info->link_up = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); - if (!BNXT_SINGLE_PF(bp)) + if (!BNXT_PHY_CFG_ABLE(bp)) return 0; - diff = link_info->support_auto_speeds ^ link_info->advertising; - if ((link_info->support_auto_speeds | diff) != - link_info->support_auto_speeds) { - /* An advertised speed is no longer supported, so we need to - * update the advertisement settings. Caller holds RTNL - * so we can modify link settings. - */ + /* Check if any advertised speeds are no longer supported. The caller + * holds the link_lock mutex, so we can modify link_info settings. + */ + if (bnxt_support_dropped(link_info->advertising, + link_info->support_auto_speeds)) { link_info->advertising = link_info->support_auto_speeds; - if (link_info->autoneg & BNXT_AUTONEG_SPEED) - bnxt_hwrm_set_link_setting(bp, true, false); + support_changed = true; } + if (bnxt_support_dropped(link_info->advertising_pam4, + link_info->support_pam4_auto_speeds)) { + link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; + support_changed = true; + } + if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) + bnxt_hwrm_set_link_setting(bp, true, false); return 0; } @@ -8574,6 +10110,9 @@ static void bnxt_get_port_module_status(struct bnxt *bp) struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; u8 module_status; + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) + return; + if (bnxt_update_link(bp, true)) return; @@ -8623,27 +10162,30 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) } } -static void bnxt_hwrm_set_link_common(struct bnxt *bp, - struct hwrm_port_phy_cfg_input *req) +static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { - u8 autoneg = bp->link_info.autoneg; - u16 fw_link_speed = bp->link_info.req_link_speed; - u16 advertising = bp->link_info.advertising; - - if (autoneg & BNXT_AUTONEG_SPEED) { - req->auto_mode |= - PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; - - req->enables |= cpu_to_le32( - PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); - req->auto_link_speed_mask = cpu_to_le16(advertising); - + if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { + req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; + if (bp->link_info.advertising) { + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); + req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); + } + if (bp->link_info.advertising_pam4) { + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); + req->auto_link_pam4_speed_mask = + cpu_to_le16(bp->link_info.advertising_pam4); + } req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); - req->flags |= - cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); } else { - req->force_link_speed = cpu_to_le16(fw_link_speed); req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); + if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { + req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); + } else { + req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + } } /* tell chimp that the setting takes effect immediately */ @@ -8652,18 +10194,20 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, int bnxt_hwrm_set_pause(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - bnxt_hwrm_set_pause_common(bp, &req); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + bnxt_hwrm_set_pause_common(bp, req); if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || bp->link_info.force_link_chng) - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { /* since changing of pause setting doesn't trigger any link * change event, the driver needs to update the current pause @@ -8676,7 +10220,6 @@ int bnxt_hwrm_set_pause(struct bnxt *bp) bnxt_report_link(bp); } bp->link_info.force_link_chng = false; - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -8705,22 +10248,30 @@ static void bnxt_hwrm_set_eee(struct bnxt *bp, int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); if (set_pause) - bnxt_hwrm_set_pause_common(bp, &req); + bnxt_hwrm_set_pause_common(bp, req); - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); if (set_eee) - bnxt_hwrm_set_eee(bp, &req); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + bnxt_hwrm_set_eee(bp, req); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_shutdown_link(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) + return 0; if (!BNXT_SINGLE_PF(bp)) return 0; @@ -8728,32 +10279,38 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) if (pci_num_vf(bp->pdev)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); + return hwrm_req_send(bp, req); } static int bnxt_fw_init_one(struct bnxt *bp); static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { - struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_if_change_input req = {0}; + struct hwrm_func_drv_if_change_output *resp; bool resc_reinit = false, fw_reset = false; + struct hwrm_func_drv_if_change_input *req; u32 flags = 0; int rc; if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); + if (rc) + return rc; + if (up) - req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) flags = le32_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (rc) return rc; @@ -8771,9 +10328,12 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) } if (resc_reinit || fw_reset) { if (fw_reset) { + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_ulp_stop(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + bnxt_dcb_free(bp, true); rc = bnxt_fw_init_one(bp); if (rc) { set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@ -8809,8 +10369,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) { - struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_led_qcaps_input req = {0}; + struct hwrm_port_led_qcaps_output *resp; + struct hwrm_port_led_qcaps_input *req; struct bnxt_pf_info *pf = &bp->pf; int rc; @@ -8818,12 +10378,15 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { @@ -8843,54 +10406,63 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_alloc_input req = {0}; - struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_alloc_output *resp; + struct hwrm_wol_filter_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; - req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); - memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; + req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); + memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->wol_filter_id = resp->wol_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_free_input req = {0}; + struct hwrm_wol_filter_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); - req.wol_filter_id = bp->wol_filter_id; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); + req->wol_filter_id = bp->wol_filter_id; + return hwrm_req_send(bp, req); } static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) { - struct hwrm_wol_filter_qcfg_input req = {0}; - struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_qcfg_output *resp; + struct hwrm_wol_filter_qcfg_input *req; u16 next_handle = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.handle = cpu_to_le16(handle); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->handle = cpu_to_le16(handle); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { next_handle = le16_to_cpu(resp->next_handle); if (next_handle != 0) { @@ -8901,7 +10473,7 @@ static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return next_handle; } @@ -8922,17 +10494,17 @@ static void bnxt_get_wol_settings(struct bnxt *bp) static ssize_t bnxt_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; + struct hwrm_temp_monitor_query_input *req; struct bnxt *bp = dev_get_drvdata(dev); u32 temp = 0; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) - temp = resp->temp * 1000; /* display millidegree */ - mutex_unlock(&bp->hwrm_cmd_lock); + if (!hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY)) { + resp = hwrm_req_hold(bp, req); + if (!hwrm_req_send(bp, req)) + temp = resp->temp * 1000; /* display millidegree */ + hwrm_req_drop(bp, req); + } return sprintf(buf, "%u\n", temp); } @@ -8967,14 +10539,17 @@ static void bnxt_hwmon_open(struct bnxt *bp) dev_warn(&pdev->dev, "Cannot register hwmon device\n"); } } + #else -static void bnxt_hwmon_close(struct bnxt *bp) + +static inline void bnxt_hwmon_close(struct bnxt *bp) { } -static void bnxt_hwmon_open(struct bnxt *bp) +static inline void bnxt_hwmon_open(struct bnxt *bp) { } + #endif static bool bnxt_eee_config_ok(struct bnxt *bp) @@ -9009,6 +10584,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp) bool update_eee = false; struct bnxt_link_info *link_info = &bp->link_info; + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) + return 0; + rc = bnxt_update_link(bp, true); if (rc) { netdev_err(bp->dev, "failed to update link (rc: %x)\n", @@ -9028,21 +10606,26 @@ static int bnxt_update_phy_setting(struct bnxt *bp) if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { if (BNXT_AUTO_MODE(link_info->auto_mode)) update_link = true; - if (link_info->req_link_speed != link_info->force_link_speed) + if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && + link_info->req_link_speed != link_info->force_link_speed) + update_link = true; + else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && + link_info->req_link_speed != link_info->force_pam4_link_speed) update_link = true; if (link_info->req_duplex != link_info->duplex_setting) update_link = true; } else { if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) update_link = true; - if (link_info->advertising != link_info->auto_link_speeds) + if (link_info->advertising != link_info->auto_link_speeds || + link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) update_link = true; } /* The last close may have shutdown the link, so need to call * PHY_CFG to bring it back up. */ - if (!netif_carrier_ok(bp->dev)) + if (!bp->link_info.link_up) update_link = true; if (!bnxt_eee_config_ok(bp)) @@ -9082,7 +10665,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) int rc = 0; bnxt_preset_reg_win(bp); - netif_carrier_off(bp->dev); +#ifdef BNXT_SKIP_CARRIER_OFF + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) +#endif + netif_carrier_off(bp->dev); if (irq_re_init) { /* Reserve rings now if none were reserved at driver probe. */ rc = bnxt_init_dflt_ring_mode(bp); @@ -9119,6 +10705,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_enable_napi(bp); bnxt_debug_dev_init(bp); + bnxt_ptp_start(bp); rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); @@ -9139,8 +10726,19 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - if (irq_re_init) + if (irq_re_init) { +#if defined(HAVE_NDO_UDP_TUNNEL) udp_tunnel_get_rx_info(bp->dev); +#elif defined(HAVE_NDO_ADD_VXLAN) +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) + vxlan_get_rx_port(bp->dev); +#endif + if (!bnxt_hwrm_tunnel_dst_port_alloc( + bp, htons(0x17c1), + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) + bp->nge_port_cnt = 1; +#endif + } set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); @@ -9148,7 +10746,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); /* Poll link status and check for SFP+ module status */ + mutex_lock(&bp->link_lock); bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) @@ -9219,30 +10819,50 @@ void bnxt_half_close_nic(struct bnxt *bp) bnxt_free_mem(bp, false); } +static void bnxt_reenable_sriov(struct bnxt *bp) +{ + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + int n = pf->active_vfs; + + if (n) + bnxt_cfg_hw_sriov(bp, &n, true); + } +} + static int bnxt_open(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - int rc; + int rc, timeout = 0; if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); return -ENODEV; } + clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); +retry_if_change: rc = bnxt_hwrm_if_change(bp, true); - if (rc) + if (rc == -EAGAIN && timeout < 40) { + msleep(50); + timeout++; + goto retry_if_change; + } else if (rc) { return rc; + } rc = __bnxt_open_nic(bp, true, true); +#ifndef PCIE_SRIOV_CONFIGURE + if (!rc) + bnxt_start_sriov(bp, num_vfs); +#endif if (rc) { bnxt_hwrm_if_change(bp, false); } else { - if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) && - BNXT_PF(bp)) { - struct bnxt_pf_info *pf = &bp->pf; - int n = pf->active_vfs; - - if (n) - bnxt_cfg_hw_sriov(bp, &n, true); + if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + bnxt_ulp_start(bp, 0); + bnxt_reenable_sriov(bp); + } } bnxt_hwmon_open(bp); } @@ -9256,11 +10876,12 @@ static bool bnxt_drv_busy(struct bnxt *bp) test_bit(BNXT_STATE_READ_STATS, &bp->state)); } +#ifdef NETDEV_GET_STATS64 static void bnxt_get_ring_stats(struct bnxt *bp, struct rtnl_link_stats64 *stats); +#endif -static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, - bool link_re_init) +void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { /* Close the VF-reps before closing PF */ if (BNXT_PF(bp)) @@ -9284,9 +10905,11 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, del_timer_sync(&bp->timer); bnxt_free_skbs(bp); +#ifdef NETDEV_GET_STATS64 /* Save ring stats before shutdown */ - if (bp->bnapi) + if (bp->bnapi && irq_re_init) bnxt_get_ring_stats(bp, &bp->net_stats_prev); +#endif if (irq_re_init) { bnxt_free_irq(bp); bnxt_del_napi(bp); @@ -9337,53 +10960,60 @@ static int bnxt_close(struct net_device *dev) static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, u16 *val) { - struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_mdio_read_input req = {0}; + struct hwrm_port_phy_mdio_read_output *resp; + struct hwrm_port_phy_mdio_read_input *req; int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) - *val = le16_to_cpu(resp->reg_data); - mutex_unlock(&bp->hwrm_cmd_lock); + *val = le32_to_cpu(resp->reg_data); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, u16 val) { - struct hwrm_port_phy_mdio_write_input req = {0}; + struct hwrm_port_phy_mdio_write_input *req; + int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); - if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); - } - req.reg_data = cpu_to_le16(val); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); + if (rc) + return rc; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); + if (mdio_phy_id_is_c45(phy_addr)) { + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); + } + req->reg_data = cpu_to_le16(val); + + return hwrm_req_send(bp, req); } /* rtnl_lock held */ @@ -9417,6 +11047,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, mdio->val_in); +#ifdef HAVE_IEEE1588_SUPPORT + case SIOCSHWTSTAMP: + return bnxt_hwtstamp_set(dev, ifr); + + case SIOCGHWTSTAMP: + return bnxt_hwtstamp_get(dev, ifr); +#endif + default: /* do nothing */ break; @@ -9424,39 +11062,40 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } +#ifdef NETDEV_GET_STATS64 + static void bnxt_get_ring_stats(struct bnxt *bp, struct rtnl_link_stats64 *stats) { int i; - for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct ctx_hw_stats *hw_stats = cpr->hw_stats; + u64 *sw = cpr->stats.sw_stats; - stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); stats->rx_missed_errors += - le64_to_cpu(hw_stats->rx_discard_pkts); + BNXT_GET_RING_STATS64(sw, rx_discard_pkts); - stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); + stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); + stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); } } @@ -9474,7 +11113,11 @@ static void bnxt_add_prev_stats(struct bnxt *bp, stats->tx_dropped += prev_stats->tx_dropped; } +#ifdef NETDEV_GET_STATS64_VOID static void +#else +static struct rtnl_link_stats64 * +#endif bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct bnxt *bp = netdev_priv(dev); @@ -9487,35 +11130,117 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { clear_bit(BNXT_STATE_READ_STATS, &bp->state); *stats = bp->net_stats_prev; +#ifdef NETDEV_GET_STATS64_VOID return; +#else + return stats; +#endif } bnxt_get_ring_stats(bp, stats); bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { - struct rx_port_stats *rx = bp->hw_rx_port_stats; - struct tx_port_stats *tx = bp->hw_tx_port_stats; + u64 *rx = bp->port_stats.sw_stats; + u64 *tx = bp->port_stats.sw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; - stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); - stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); - stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + - le64_to_cpu(rx->rx_ovrsz_frames) + - le64_to_cpu(rx->rx_runt_frames); - stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + - le64_to_cpu(rx->rx_jbr_frames); - stats->collisions = le64_to_cpu(tx->tx_total_collisions); - stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); - stats->tx_errors = le64_to_cpu(tx->tx_err); + stats->rx_crc_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); + stats->rx_frame_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); + stats->rx_length_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); + stats->rx_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + stats->collisions = + BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); + stats->tx_fifo_errors = + BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); + stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); } clear_bit(BNXT_STATE_READ_STATS, &bp->state); +#ifndef NETDEV_GET_STATS64_VOID + return stats; +#endif } +#else +static struct net_device_stats *bnxt_get_stats(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + int i; + + set_bit(BNXT_STATE_READ_STATS, &bp->state); + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + return stats; + } + + memset(stats, 0, sizeof(struct net_device_stats)); + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct ctx_hw_stats *hw_stats = cpr->stats.hw_stats; + + stats->rx_packets += GET_NET_STATS(hw_stats->rx_ucast_pkts); + stats->rx_packets += GET_NET_STATS(hw_stats->rx_mcast_pkts); + stats->rx_packets += GET_NET_STATS(hw_stats->rx_bcast_pkts); + + stats->tx_packets += GET_NET_STATS(hw_stats->tx_ucast_pkts); + stats->tx_packets += GET_NET_STATS(hw_stats->tx_mcast_pkts); + stats->tx_packets += GET_NET_STATS(hw_stats->tx_bcast_pkts); + + stats->rx_bytes += GET_NET_STATS(hw_stats->rx_ucast_bytes); + stats->rx_bytes += GET_NET_STATS(hw_stats->rx_mcast_bytes); + stats->rx_bytes += GET_NET_STATS(hw_stats->rx_bcast_bytes); + + stats->tx_bytes += GET_NET_STATS(hw_stats->tx_ucast_bytes); + stats->tx_bytes += GET_NET_STATS(hw_stats->tx_mcast_bytes); + stats->tx_bytes += GET_NET_STATS(hw_stats->tx_bcast_bytes); + + stats->rx_missed_errors += + GET_NET_STATS(hw_stats->rx_discard_pkts); + stats->multicast += GET_NET_STATS(hw_stats->rx_mcast_pkts); + stats->tx_dropped += GET_NET_STATS(hw_stats->tx_error_pkts); + } + + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct rx_port_stats *rx = bp->port_stats.hw_stats; + struct tx_port_stats *tx = bp->port_stats.hw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET; + + stats->rx_crc_errors = GET_NET_STATS(rx->rx_fcs_err_frames); + stats->rx_frame_errors = GET_NET_STATS(rx->rx_align_err_frames); + stats->rx_length_errors = GET_NET_STATS(rx->rx_undrsz_frames) + + GET_NET_STATS(rx->rx_ovrsz_frames) + + GET_NET_STATS(rx->rx_runt_frames); + stats->rx_errors = GET_NET_STATS(rx->rx_false_carrier_frames) + + GET_NET_STATS(rx->rx_jbr_frames); + stats->collisions = GET_NET_STATS(tx->tx_total_collisions); + stats->tx_fifo_errors = GET_NET_STATS(tx->tx_fifo_underruns); + stats->tx_errors = GET_NET_STATS(tx->tx_err); + } + + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + return &dev->stats; +} +#endif static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) { struct net_device *dev = bp->dev; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; +#ifdef HAVE_DEV_ADDR_LIST + struct dev_addr_list *ha; +#else struct netdev_hw_addr *ha; +#endif u8 *haddr; int mc_count = 0; bool update = false; @@ -9527,7 +11252,11 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) vnic->mc_list_count = 0; return false; } +#ifdef HAVE_DEV_ADDR_LIST + haddr = ha->da_addr; +#else haddr = ha->addr; +#endif if (!ether_addr_equal(haddr, vnic->mc_list + off)) { memcpy(vnic->mc_list + off, haddr, ETH_ALEN); update = true; @@ -9619,19 +11348,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) if (!uc_update) goto skip_uc; - mutex_lock(&bp->hwrm_cmd_lock); for (i = 1; i < vnic->uc_filter_count; i++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; + struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, - -1); - - req.l2_filter_id = vnic->fw_l2_filter_id[i]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + bnxt_hwrm_l2_filter_free(bp, fltr); + bnxt_del_l2_filter(bp, fltr); } - mutex_unlock(&bp->hwrm_cmd_lock); vnic->uc_filter_count = 1; @@ -9696,7 +11418,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp) static bool bnxt_rfs_supported(struct bnxt *bp) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) return true; return false; } @@ -9710,7 +11432,6 @@ static bool bnxt_rfs_supported(struct bnxt *bp) /* If runtime conditions support RFS */ static bool bnxt_rfs_capable(struct bnxt *bp) { -#ifdef CONFIG_RFS_ACCEL int vnics, max_vnics, max_rss_ctxs; if (bp->flags & BNXT_FLAG_CHIP_P5) @@ -9746,11 +11467,9 @@ static bool bnxt_rfs_capable(struct bnxt *bp) netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); return false; -#else - return false; -#endif } +#ifdef NETDEV_FEATURE_CONTROL static netdev_features_t bnxt_fix_features(struct net_device *dev, netdev_features_t features) { @@ -9771,22 +11490,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ - if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != - (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - else - features |= NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX; + if ((features & BNXT_HW_FEATURE_VLAN_ALL_RX) != + BNXT_HW_FEATURE_VLAN_ALL_RX) { + if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; + else if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) + features |= BNXT_HW_FEATURE_VLAN_ALL_RX; } #ifdef CONFIG_BNXT_SRIOV - if (BNXT_VF(bp)) { - if (bp->vf.vlan) { - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - } - } + if (BNXT_VF(bp) && bp->vf.vlan) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; #endif return features; } @@ -9801,15 +11514,23 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) bool update_tpa = false; flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39) +#ifdef HAVE_NETIF_F_GRO_HW if (features & NETIF_F_GRO_HW) +#else + if ((features & NETIF_F_GRO) && BNXT_SUPPORTS_TPA(bp)) +#endif flags |= BNXT_FLAG_GRO; else if (features & NETIF_F_LRO) +#else + if (features & NETIF_F_LRO) +#endif flags |= BNXT_FLAG_LRO; if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) flags &= ~BNXT_FLAG_TPA; - if (features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) flags |= BNXT_FLAG_STRIP_VLAN; if (features & NETIF_F_NTUPLE) @@ -9856,27 +11577,320 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) } return rc; } +#endif +static int bnxt_dbg_hwrm_wr_reg(struct bnxt *bp, u32 reg_off, u32 reg_val) +{ + struct hwrm_dbg_write_direct_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_WRITE_DIRECT); + if (rc) + return rc; + + req->write_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + /* TODO: support reg write to one register for now */ + req->write_len32 = cpu_to_le32(1); + req->write_data[0] = cpu_to_le32(reg_val); + return hwrm_req_send(bp, req); +} + +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf) +{ + struct hwrm_dbg_read_direct_output *resp; + struct hwrm_dbg_read_direct_input *req; + dma_addr_t dma_handle; + __le32 *dbg_reg_buf; + int rc, i, j; + + rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); + if (rc) + return rc; + + dbg_reg_buf = hwrm_req_dma_slice(bp, req, HWRM_DBG_REG_BUF_SIZE, + &dma_handle); + if (!dbg_reg_buf) { + rc = -ENOMEM; + goto dbg_rd_reg_exit; + } + req->host_dest_addr = cpu_to_le64(dma_handle); + + resp = hwrm_req_hold(bp, req); + for (i = 0; i < num_words; ) { + u16 words; + + req->read_addr = cpu_to_le32(reg_off + i * 4 + + CHIMP_REG_VIEW_ADDR); + words = num_words - i; + words = min_t(u16, words, HWRM_DBG_REG_BUF_SIZE / 4); + req->read_len32 = cpu_to_le32(words); + /* holding response is sufficient for repeated commands too */ + rc = hwrm_req_send(bp, req); + if (rc || resp->error_code) { + rc = -EIO; + goto dbg_rd_reg_exit; + } + for (j = 0; j < words; j++) + reg_buf[i + j] = le32_to_cpu(dbg_reg_buf[j]); + + i += words; + } + +dbg_rd_reg_exit: + hwrm_req_drop(bp, req); + return rc; +} static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, u32 ring_id, u32 *prod, u32 *cons) { - struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_dbg_ring_info_get_input req = {0}; + struct hwrm_dbg_ring_info_get_output *resp; + struct hwrm_dbg_ring_info_get_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); - req.ring_type = ring_type; - req.fw_ring_id = cpu_to_le32(ring_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); + if (rc) + return rc; + + req->ring_type = ring_type; + req->fw_ring_id = cpu_to_le32(ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { - *prod = le32_to_cpu(resp->producer_index); - *cons = le32_to_cpu(resp->consumer_index); + *prod = resp->producer_index; + *cons = resp->consumer_index; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } +static void bnxt_dbg_dump_hw_ring(struct bnxt *bp, u32 index) +{ + u32 val[15] = {0xDEADDEAD}; + u32 fw_ring_id; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr, *cpr2; + + if (!netif_msg_hw(bp)) + return; + + bnapi = bp->bnapi[index]; + txr = bnapi->tx_ring; + rxr = bnapi->rx_ring; + cpr = &bnapi->cp_ring; + + if (!txr) + goto skip_txr; + + /* TBD prod/cons */ + fw_ring_id = txr->tx_ring_struct.fw_ring_id; + if (fw_ring_id != INVALID_HW_RING_ID) { + if (bp->flags & BNXT_FLAG_CHIP_P5) { + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_TX, + fw_ring_id, &val[0], &val[1]); + cpr2 = cpr->cp_ring_arr[1]; + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[2], &val[3]); + } else { + bnxt_dbg_hwrm_rd_reg(bp, + BDETBD_REG_BD_PRODUCER_IDX + + fw_ring_id * 4, 1, &val[0]); + bnxt_dbg_hwrm_rd_reg(bp, + BDETBD_REG_BD_REQ_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[1]); + bnxt_dbg_hwrm_rd_reg(bp, + BDETBD_REG_BD_CMPL_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[3]); + } + } + +skip_txr: + if (!rxr) + goto skip_rxr; + + /* RBD prod/cons */ + fw_ring_id = rxr->rx_ring_struct.fw_ring_id; + if (fw_ring_id != INVALID_HW_RING_ID) { + if (bp->flags & BNXT_FLAG_CHIP_P5) { + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_RX, + fw_ring_id, &val[4], &val[5]); + cpr2 = cpr->cp_ring_arr[0]; + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[6], &val[7]); + } else { + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_PRODUCER_IDX + + fw_ring_id * 4, 1, &val[4]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_REQ_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[5]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_CMPL_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[7]); + } + } + /* AGG RBD prod/cons */ + fw_ring_id = rxr->rx_agg_ring_struct.fw_ring_id; + if (fw_ring_id != INVALID_HW_RING_ID) { + if (bp->flags & BNXT_FLAG_CHIP_P5) { + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_RX, + fw_ring_id, &val[8], &val[9]); + cpr2 = cpr->cp_ring_arr[0]; + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[10], &val[11]); + } else { + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_PRODUCER_IDX + + fw_ring_id * 4, 1, &val[8]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_REQ_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[9]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_CMPL_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[11]); + } + } + +skip_rxr: + /* CAG prod/cons/vector ctrl */ + fw_ring_id = cpr->cp_ring_struct.fw_ring_id; + if (fw_ring_id < 1024) { + bnxt_dbg_hwrm_rd_reg(bp, + CAG_REG_CAG_PRODUCER_INDEX_REG + fw_ring_id * 4, 1, + &val[12]); + bnxt_dbg_hwrm_rd_reg(bp, + CAG_REG_CAG_CONSUMER_INDEX_REG + fw_ring_id * 4, 1, + &val[13]); + bnxt_dbg_hwrm_rd_reg(bp, + CAG_REG_CAG_VECTOR_CTRL + fw_ring_id * 4, 1, &val[14]); + } else if (fw_ring_id != INVALID_HW_RING_ID) { + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_PRODUCER_INDEX_REG_ADDR_OFFSET, fw_ring_id); + bnxt_dbg_hwrm_rd_reg(bp, CAG_REG_CAG_PRODUCER_INDEX_REG, 1, + &val[12]); + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_PRODUCER_INDEX_REG_ADDR_OFFSET, 0); + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_CONSUMER_INDEX_REG_ADDR_OFFSET, fw_ring_id); + bnxt_dbg_hwrm_rd_reg(bp, CAG_REG_CAG_CONSUMER_INDEX_REG, 1, + &val[13]); + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_CONSUMER_INDEX_REG_ADDR_OFFSET, 0); + bnxt_dbg_hwrm_wr_reg(bp, CAG_REG_CAG_VECTOR_CTRL_ADDR_OFFSET, + fw_ring_id); + bnxt_dbg_hwrm_rd_reg(bp, CAG_REG_CAG_VECTOR_CTRL, 1, &val[14]); + bnxt_dbg_hwrm_wr_reg(bp, CAG_REG_CAG_VECTOR_CTRL_ADDR_OFFSET, + 0); + } + netdev_info(bp->dev, "[%d]: TBD{prod: %x cons: %x cp prod: %x cp cons: %x} " + "RBD{prod: %x cons: %x cp prod: %x cp cons: %x} " + "RBD AGG{prod: %x cons: %x cp prod: %x cp cons: %x} " + "CAG{prod: %x cons: %x vec: %x}\n", index, + val[0], val[1], val[2], val[3], + val[4], val[5], val[6], val[7], + val[8], val[9], val[10], val[11], + val[12], val[13], val[14]); +} + +static void bnxt_dbg_dump_hw_states(struct bnxt *bp) +{ + int rc, i; + u32 val[32] = {0xDEADDEAD}; + u32 dbg_sel; + + if (!netif_msg_hw(bp)) + return; + + /* dump tdc interrupt status */ + rc = bnxt_dbg_hwrm_rd_reg(bp, TDC_REG_INT_STS_0, 1, val); + if (!rc) + netdev_info(bp->dev, "TDC_REG_INT_STS_0: %x\n", val[0]); + /* dump tdc debug bus */ + netdev_info(bp->dev, "TDC debug bus dump:\n"); + dbg_sel = 0x80000000; + for (i = 0; i < 5; i++) { + rc = bnxt_dbg_hwrm_wr_reg(bp, TDC_REG_TDC_DEBUG_CNTL, dbg_sel); + if (rc) + break; + rc = bnxt_dbg_hwrm_rd_reg(bp, TDC_REG_TDC_DEBUG_STATUS, 1, val); + if (rc) + break; + netdev_info(bp->dev, "\tdbg_sel %08x: %08x\n", dbg_sel, val[0]); + dbg_sel++; + } + if (bp->flags & BNXT_FLAG_CHIP_P5) + return; + + /* dump tdi debug bus */ + netdev_info(bp->dev, "TDI debug bus dump:\n"); + dbg_sel = 0xf; + rc = bnxt_dbg_hwrm_wr_reg(bp, TDI_REG_DBG_DWORD_ENABLE, dbg_sel); + if (!rc) { + rc = bnxt_dbg_hwrm_rd_reg(bp, TDI_REG_DBG_OUT_DATA, 1, val); + if (!rc) + netdev_info(bp->dev, "\tTDI_REG_DBG_DWORD_ENABLE (%x): " + "%08x\n", dbg_sel, val[0]); + for (dbg_sel = 2; dbg_sel < 0x12; dbg_sel++) { + rc = bnxt_dbg_hwrm_wr_reg(bp, TDI_REG_DBG_SELECT, + dbg_sel); + if (rc) + break; + rc = bnxt_dbg_hwrm_rd_reg(bp, TDI_REG_DBG_OUT_DATA, + 8, val); + if (rc) + break; + netdev_info(bp->dev, "\tTDI_REG_DBG_OUT_DATA: " + "%08x %08x %08x %08x " + "%08x %08x %08x %08x\n", + val[0], val[1], val[2], val[3], + val[4], val[5], val[6], val[7]); + } + } + /* dump te_dec port and cmd credits */ + rc = bnxt_dbg_hwrm_rd_reg(bp, TE_DEC_REG_PORT_CURRENT_CREDIT_REG, + HWRM_DBG_REG_BUF_SIZE/4, val); + if (!rc) { + netdev_info(bp->dev, "TE_DEC_REG_PORT_CURRENT_CREDIT_REG: " + "%x %x %x\n", val[0], val[1], val[2]); + netdev_info(bp->dev, "TE_DEC_REG_PORT_CURRENT_CMD_CREDIT_REG: " + "%x %x %x\n", val[16], val[17], val[18]); + } + /* dump partial RDI debug bus */ + netdev_info(bp->dev, "RDI debug bus dump:\n"); + dbg_sel = 0x80000000; + for (i = 0; i < 3; i++) { + rc = bnxt_dbg_hwrm_wr_reg(bp, RDI_REG_RDI_DEBUG_CONTROL_REG, + dbg_sel); + if (rc) + break; + rc = bnxt_dbg_hwrm_rd_reg(bp, RDI_REG_RDI_DEBUG_STATUS_REG, + 1, val); + if (rc) + break; + netdev_info(bp->dev, "\tdbg_sel %x: %08x\n", dbg_sel, val[0]); + dbg_sel++; + } + dbg_sel = 0x80001000; + rc = bnxt_dbg_hwrm_wr_reg(bp, RDI_REG_RDI_DEBUG_CONTROL_REG, + dbg_sel); + if (!rc) + rc = bnxt_dbg_hwrm_rd_reg(bp, RDI_REG_RDI_DEBUG_STATUS_REG, + 1, val); + if (!rc) + netdev_info(bp->dev, "\tdbg_sel %x: %08x\n", dbg_sel, val[0]); +} + static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; @@ -9906,11 +11920,18 @@ static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - int i = bnapi->index; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2; + int i = bnapi->index, j; netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); + for (j = 0; j < 2; j++) { + cpr2 = cpr->cp_ring_arr[j]; + if (!cpr2) + continue; + netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, j, cpr2->cp_ring_struct.fw_ring_id, cpr2->cp_raw_cons); + } } static void bnxt_dbg_dump_states(struct bnxt *bp) @@ -9925,34 +11946,83 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) bnxt_dump_rx_sw_state(bnapi); bnxt_dump_cp_sw_state(bnapi); } + bnxt_dbg_dump_hw_ring(bp, i); } + bnxt_dbg_dump_hw_states(bp); +} + +static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct hwrm_ring_reset_input *req; + struct bnxt_napi *bnapi = rxr->bnapi; + struct bnxt_cp_ring_info *cpr; + u16 cp_ring_id; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_RING_RESET); + if (rc) + return rc; + + cpr = &bnapi->cp_ring; + cp_ring_id = cpr->cp_ring_struct.fw_ring_id; + req->cmpl_ring = cpu_to_le16(cp_ring_id); + req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; + req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); + return hwrm_req_send_silent(bp, req); } static void bnxt_reset_task(struct bnxt *bp, bool silent) { - if (!silent) + if (!silent) { bnxt_dbg_dump_states(bp); + usleep_range(10, 50); + bnxt_dbg_dump_states(bp); + } + if (netif_running(bp->dev)) { int rc; - if (!silent) + if (silent) { + bnxt_close_nic(bp, false, false); + bnxt_open_nic(bp, false, false); + } else { bnxt_ulp_stop(bp); - bnxt_close_nic(bp, false, false); - rc = bnxt_open_nic(bp, false, false); - if (!silent && !rc) - bnxt_ulp_start(bp); + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); + bnxt_ulp_start(bp, rc); + } } } -static void bnxt_tx_timeout(struct net_device *dev) +static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bnxt *bp = netdev_priv(dev); +#ifdef BNXT_SKIP_CARRIER_OFF + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; +#endif netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bnxt_poll_controller(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int i; + + /* Only process tx rings/combined rings in netpoll mode. */ + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + + napi_schedule(&txr->bnapi->napi); + } +} +#endif + static void bnxt_fw_health_check(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -9984,12 +12054,18 @@ fw_reset: bnxt_queue_sp_work(bp); } +#ifdef HAVE_TIMER_SETUP static void bnxt_timer(struct timer_list *t) { struct bnxt *bp = from_timer(bp, t, timer); +#else +static void bnxt_timer(unsigned long data) +{ + struct bnxt *bp = (struct bnxt *)data; +#endif struct net_device *dev = bp->dev; - if (!netif_running(dev)) + if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) return; if (atomic_read(&bp->intr_sem) != 0) @@ -9998,8 +12074,7 @@ static void bnxt_timer(struct timer_list *t) if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) bnxt_fw_health_check(bp); - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && - bp->stats_coal_ticks) { + if (bp->link_info.link_up && bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } @@ -10009,9 +12084,19 @@ static void bnxt_timer(struct timer_list *t) bnxt_queue_sp_work(bp); } + if (bp->ptp_cfg && (bp->flags & BNXT_FLAG_CHIP_P5)) + set_bit(BNXT_PTP_CURRENT_TIME_EVENT, &bp->sp_event); + +#ifdef CONFIG_RFS_ACCEL + if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } +#endif /*CONFIG_RFS_ACCEL*/ + if (bp->link_info.phy_retry) { if (time_after(jiffies, bp->link_info.phy_retry_expires)) { - bp->link_info.phy_retry = 0; + bp->link_info.phy_retry = false; netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); } else { set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); @@ -10019,7 +12104,8 @@ static void bnxt_timer(struct timer_list *t) } } - if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) { + if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && + netif_carrier_ok(dev)) { set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } @@ -10053,15 +12139,64 @@ static void bnxt_reset(struct bnxt *bp, bool silent) bnxt_rtnl_unlock_sp(bp); } +/* Only called from bnxt_sp_task() */ +static void bnxt_rx_ring_reset(struct bnxt *bp) +{ + int i; + + bnxt_rtnl_lock_sp(bp); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bnxt_rtnl_unlock_sp(bp); + return; + } + /* Disable and flush TPA before resetting the RX ring */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_cp_ring_info *cpr; + int rc; + + if (!rxr->bnapi->in_reset) + continue; + + rc = bnxt_hwrm_rx_ring_reset(bp, i); + if (rc) { + if (rc == -EINVAL || rc == -EOPNOTSUPP) + netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); + else + netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", + rc); + bnxt_reset_task(bp, false); + break; + } + bnxt_free_one_rx_ring_skbs(bp, i); + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + rxr->rx_next_cons = 0; + rxr->bnapi->in_reset = false; + bnxt_alloc_one_rx_ring(bp, i); + cpr = &rxr->bnapi->cp_ring; + cpr->sw_stats.rx.rx_resets++; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + } + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, true); + bnxt_rtnl_unlock_sp(bp); +} + static void bnxt_fw_reset_close(struct bnxt *bp) { - __bnxt_close_nic(bp, true, false); - bnxt_ulp_irq_stop(bp); + bnxt_ulp_stop(bp); /* When firmware is fatal state, disable PCI device to prevent * any potential bad DMAs before freeing kernel memory. */ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) pci_disable_device(bp->pdev); + __bnxt_close_nic(bp, true, false); bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); if (pci_is_enabled(bp->pdev)) @@ -10121,6 +12256,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp) void bnxt_fw_exception(struct bnxt *bp) { + netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); bnxt_rtnl_lock_sp(bp); bnxt_force_fw_reset(bp); @@ -10179,12 +12315,12 @@ void bnxt_fw_reset(struct bnxt *bp) goto fw_reset_exit; } bnxt_fw_reset_close(bp); - if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) { bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; tmo = HZ / 10; } else { bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; - tmo = bp->fw_reset_min_dsecs * HZ / 10; + tmo = bp->fw_reset_min_dsecs * HZ /10; } bnxt_queue_fw_reset_work(bp, tmo); } @@ -10225,13 +12361,58 @@ static void bnxt_chk_missed_irq(struct bnxt *bp) bnxt_dbg_hwrm_ring_info_get(bp, DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, fw_ring_id, &val[0], &val[1]); - cpr->missed_irqs++; + cpr->sw_stats.cmn.missed_irqs++; } } } static void bnxt_cfg_ntp_filters(struct bnxt *); +static void bnxt_vf_vnic_change(struct bnxt *bp) +{ + struct bnxt_pf_info *pf = &bp->pf; + int i, num_vfs = pf->active_vfs; + + if (!num_vfs) + return; + + for (i = 0; i < num_vfs; i++) + bnxt_commit_vf_vnic(bp, i); + bnxt_cfg_ntp_filters(bp); +} + +static void bnxt_init_ethtool_link_settings(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + link_info->autoneg = BNXT_AUTONEG_SPEED; + if (bp->hwrm_spec_code >= 0x10201) { + if (link_info->auto_pause_setting & + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } else { + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } + link_info->advertising = link_info->auto_link_speeds; + link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; + } else { + link_info->req_link_speed = link_info->force_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + if (link_info->force_pam4_link_speed) { + link_info->req_link_speed = + link_info->force_pam4_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; + } + link_info->req_duplex = link_info->duplex_setting; + } + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->req_flow_ctrl = + link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; + else + link_info->req_flow_ctrl = link_info->force_pause_setting; +} + static void bnxt_sp_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, sp_task); @@ -10246,6 +12427,8 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) bnxt_cfg_rx_mode(bp); + if (test_and_clear_bit(BNXT_VF_VNIC_CHANGE_SP_EVENT, &bp->sp_event)) + bnxt_vf_vnic_change(bp); if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) @@ -10268,10 +12451,13 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } + if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) + netdev_info(bp->dev, "Receive PF driver unload event!\n"); + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_port_qstats(bp); - bnxt_hwrm_port_qstats_ext(bp); - bnxt_hwrm_pcie_qstats(bp); + bnxt_hwrm_port_qstats(bp, 0); + bnxt_hwrm_port_qstats_ext(bp, 0); + bnxt_accumulate_all_stats(bp); } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { @@ -10283,10 +12469,14 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_phy_qcaps(bp); rc = bnxt_update_link(bp, true); - mutex_unlock(&bp->link_lock); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); + + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, + &bp->sp_event)) + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); } if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { int rc; @@ -10322,6 +12512,9 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, true); + if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) + bnxt_rx_ring_reset(bp); + if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); @@ -10331,6 +12524,9 @@ static void bnxt_sp_task(struct work_struct *work) BNXT_FW_EXCEPTION_SP_EVENT); } + if (test_and_clear_bit(BNXT_PTP_CURRENT_TIME_EVENT, &bp->sp_event)) + bnxt_ptp_get_current_time(bp); + smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } @@ -10376,6 +12572,11 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) { + if (bp->db_base_wc) { + iounmap(bp->db_base_wc); + bp->db_base_wc = NULL; + } + if (bp->bar2) { pci_iounmap(pdev, bp->bar2); bp->bar2 = NULL; @@ -10408,8 +12609,8 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) * 1 coal_buf x bufs_per_record = 1 completion record. */ coal = &bp->rx_coal; - coal->coal_ticks = 10; - coal->coal_bufs = 30; + coal->coal_ticks = 6; + coal->coal_bufs = 12; coal->coal_ticks_irq = 1; coal->coal_bufs_irq = 2; coal->idle_thresh = 50; @@ -10426,20 +12627,60 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } -static void bnxt_alloc_fw_health(struct bnxt *bp) +static int bnxt_init_mac_addr(struct bnxt *bp) { - if (bp->fw_health) - return; + int rc = 0; - if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && - !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) - return; + if (BNXT_PF(bp)) { + memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + } else { +#ifdef CONFIG_BNXT_SRIOV + struct bnxt_vf_info *vf = &bp->vf; + bool strict_approval = true; - bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); - if (!bp->fw_health) { - netdev_warn(bp->dev, "Failed to allocate fw_health\n"); - bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; - bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + if (is_valid_ether_addr(vf->mac_addr)) { + /* overwrite netdev dev_addr with admin VF MAC */ + memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + /* Older PF driver or firmware may not approve this + * correctly. + */ + strict_approval = false; + } else { + eth_hw_addr_random(bp->dev); + } + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); +#endif + } + return rc; +} + +static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { + bp->flags |= BNXT_FLAG_UDP_RSS_CAP; + bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } +} + +static void bnxt_set_dflt_rfs(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + + dev->hw_features &= ~NETIF_F_NTUPLE; + dev->features &= ~NETIF_F_NTUPLE; + bp->flags &= ~BNXT_FLAG_RFS; + if (bnxt_rfs_supported(bp)) { + dev->hw_features |= NETIF_F_NTUPLE; + if (bnxt_rfs_capable(bp)) { + bp->flags |= BNXT_FLAG_RFS; + dev->features |= NETIF_F_NTUPLE; + } } } @@ -10449,21 +12690,16 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp) bp->fw_cap = 0; rc = bnxt_hwrm_ver_get(bp); - if (rc) + bnxt_try_map_fw_health_reg(bp); + if (rc) { + if (bp->fw_health && bp->fw_health->status_reliable) + netdev_err(bp->dev, + "Firmware not responding, status: 0x%x\n", + bnxt_fw_health_readl(bp, + BNXT_FW_HEALTH_REG)); return rc; - - if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { - rc = bnxt_alloc_kong_hwrm_resources(bp); - if (rc) - bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; } - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - return rc; - } rc = bnxt_hwrm_func_reset(bp); if (rc) return -ENODEV; @@ -10489,20 +12725,20 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", rc); - bnxt_alloc_fw_health(bp); - rc = bnxt_hwrm_error_recovery_qcfg(bp); - if (rc) - netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", - rc); + if (bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware error recovery\n"); + } else { + rc = bnxt_hwrm_error_recovery_qcfg(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", + rc); + } - rc = bnxt_hwrm_func_drv_rgtr(bp); - if (rc) - return -ENODEV; - - rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); + rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); if (rc) return -ENODEV; + bnxt_hwrm_query_eem_caps(bp); bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); @@ -10511,36 +12747,6 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) return 0; } -static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) -{ - bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; - bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | - VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; - if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) { - bp->flags |= BNXT_FLAG_UDP_RSS_CAP; - bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | - VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; - } -} - -static void bnxt_set_dflt_rfs(struct bnxt *bp) -{ - struct net_device *dev = bp->dev; - - dev->hw_features &= ~NETIF_F_NTUPLE; - dev->features &= ~NETIF_F_NTUPLE; - bp->flags &= ~BNXT_FLAG_RFS; - if (bnxt_rfs_supported(bp)) { - dev->hw_features |= NETIF_F_NTUPLE; - if (bnxt_rfs_capable(bp)) { - bp->flags |= BNXT_FLAG_RFS; - dev->features |= NETIF_F_NTUPLE; - } - } -} - static void bnxt_fw_init_one_p3(struct bnxt *bp) { struct pci_dev *pdev = bp->pdev; @@ -10558,6 +12764,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp) bnxt_hwrm_coal_params_qcaps(bp); } +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); + static int bnxt_fw_init_one(struct bnxt *bp) { int rc; @@ -10572,6 +12780,9 @@ static int bnxt_fw_init_one(struct bnxt *bp) netdev_err(bp->dev, "Firmware init phase 2 failed\n"); return rc; } + rc = bnxt_probe_phy(bp, false); + if (rc) + return rc; rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); if (rc) return rc; @@ -10603,7 +12814,7 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) writel(reg_off & BNXT_GRC_BASE_MASK, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; - /* fall through */ + fallthrough; case BNXT_FW_HEALTH_REG_TYPE_BAR0: writel(val, bp->bar0 + reg_off); break; @@ -10620,21 +12831,32 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) static void bnxt_reset_all(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; - int i; + int i, rc; + + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { +#ifdef CONFIG_TEE_BNXT_FW + rc = tee_bnxt_fw_load(); + if (rc) + netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc); + bp->fw_reset_timestamp = jiffies; +#endif + return; + } if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) bnxt_fw_reset_writel(bp, i); } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { - struct hwrm_fw_reset_input req = {0}; - int rc; + struct hwrm_fw_reset_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); - req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; - req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (!rc) { + req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); + req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + rc = hwrm_req_send(bp, req); + } if (rc) netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); } @@ -10675,16 +12897,20 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_timestamp = jiffies; rtnl_lock(); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + rtnl_unlock(); + goto fw_reset_abort; + } bnxt_fw_reset_close(bp); - if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) { bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; tmo = HZ / 10; } else { bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; tmo = bp->fw_reset_min_dsecs * HZ / 10; } - rtnl_unlock(); bnxt_queue_fw_reset_work(bp, tmo); + rtnl_unlock(); return; } case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { @@ -10693,7 +12919,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); if (!(val & BNXT_FW_STATUS_SHUTDOWN) && !time_after(jiffies, bp->fw_reset_timestamp + - (bp->fw_reset_max_dsecs * HZ / 10))) { + (bp->fw_reset_max_dsecs * HZ / 10))) { bnxt_queue_fw_reset_work(bp, HZ / 5); return; } @@ -10707,7 +12933,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; } - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_RESET_FW: bnxt_reset_all(bp); bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; @@ -10730,10 +12956,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) } pci_set_master(bp->pdev); bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_POLL_FW: bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; - rc = __bnxt_hwrm_ver_get(bp, true); + rc = bnxt_hwrm_poll(bp); if (rc) { if (time_after(jiffies, bp->fw_reset_timestamp + (bp->fw_reset_max_dsecs * HZ / 10))) { @@ -10745,10 +12971,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_OPENING: while (!rtnl_trylock()) { - bnxt_queue_fw_reset_work(bp, HZ / 10); + bnxt_queue_fw_reset_work(bp, HZ / 50); return; } rc = bnxt_open(bp->dev); @@ -10757,19 +12983,23 @@ static void bnxt_fw_reset_task(struct work_struct *work) clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); dev_close(bp->dev); } - bnxt_ulp_irq_restart(bp, rc); - rtnl_unlock(); bp->fw_reset_state = 0; - /* Make sure fw_reset_state is 0 before clearing the flag */ smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - break; + bnxt_ulp_start(bp, rc); + if (!rc) + bnxt_reenable_sriov(bp); + bnxt_dl_health_recovery_done(bp); + bnxt_dl_health_status_update(bp, true); + rtnl_unlock(); } return; fw_reset_abort: clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) + bnxt_dl_health_status_update(bp, false); bp->fw_reset_state = 0; rtnl_lock(); dev_close(bp->dev); @@ -10814,6 +13044,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->dev = dev; bp->pdev = pdev; + /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() + * determines the BAR size. + */ bp->bar0 = pci_ioremap_bar(pdev, 0); if (!bp->bar0) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); @@ -10821,13 +13054,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) goto init_err_release; } - bp->bar1 = pci_ioremap_bar(pdev, 2); - if (!bp->bar1) { - dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); - rc = -ENOMEM; - goto init_err_release; - } - bp->bar2 = pci_ioremap_bar(pdev, 4); if (!bp->bar2) { dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); @@ -10850,7 +13076,11 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bnxt_init_dflt_coal(bp); +#ifdef HAVE_TIMER_SETUP timer_setup(&bp->timer, bnxt_timer, 0); +#else + setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp); +#endif bp->current_interval = BNXT_TIMER_INTERVAL; clear_bit(BNXT_STATE_OPEN, &bp->state); @@ -10898,6 +13128,17 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) { struct bnxt *bp = netdev_priv(dev); +#ifndef HAVE_MIN_MTU + if (new_mtu < ETH_ZLEN || new_mtu > bp->max_mtu) + return -EINVAL; + + if (BNXT_RX_PAGE_MODE(bp) && new_mtu > BNXT_MAX_PAGE_MODE_MTU) + return -EINVAL; + + if (bp->fw_cap & BNXT_FW_CAP_ADMIN_MTU) + return -EPERM; +#endif + if (netif_running(dev)) bnxt_close_nic(bp, true, false); @@ -10910,6 +13151,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) return 0; } +#if defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB) int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) { struct bnxt *bp = netdev_priv(dev); @@ -10953,6 +13195,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) return 0; } +#endif /* defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB) */ + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_TC_SETUP_BLOCK +LIST_HEAD(bnxt_block_cb_list); static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) @@ -10965,25 +13212,59 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data, + BNXT_TC_DEV_INGRESS); +#else return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); +#endif default: return -EOPNOTSUPP; } } -static LIST_HEAD(bnxt_block_cb_list); +#else /* HAVE_TC_SETUP_BLOCK */ -static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) +static int bnxt_setup_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { struct bnxt *bp = netdev_priv(dev); + if (!bnxt_tc_flower_enabled(bp)) + return -EOPNOTSUPP; + +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower); +#endif +} +#endif +#endif + +#if (defined(HAVE_NDO_SETUP_TC_RH) || !defined(HAVE_NDO_SETUP_TC_RH72)) +#ifdef HAVE_TC_SETUP_TYPE +static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) && defined(HAVE_TC_SETUP_BLOCK) + struct bnxt *bp = netdev_priv(dev); +#endif + switch (type) { +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_TC_SETUP_BLOCK case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &bnxt_block_cb_list, bnxt_setup_tc_block_cb, bp, bp, true); +#else + case TC_SETUP_CLSFLOWER: + return bnxt_setup_flower(dev, type_data); +#endif +#endif case TC_SETUP_QDISC_MQPRIO: { struct tc_mqprio_qopt *mqprio = type_data; @@ -10996,38 +13277,158 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -#ifdef CONFIG_RFS_ACCEL +#else /* !HAVE_TC_SETUP_TYPE */ +#ifdef HAVE_TC_TO_NETDEV +#ifdef HAVE_CHAIN_INDEX +static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, + __be16 proto, struct tc_to_netdev *ntc) +#else /* !HAVE_CHAIN_INDEX */ +static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) +#endif +{ + switch (ntc->type) { +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + case TC_SETUP_CLSFLOWER: + return bnxt_setup_flower(dev, ntc->cls_flower); +#endif + case TC_SETUP_MQPRIO: +#ifdef HAVE_MQPRIO_QOPT + ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc); +#else + return bnxt_setup_mq_tc(dev, ntc->tc); +#endif + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_TO_NETDEV */ +#endif /* HAVE_TC_SETUP_TYPE */ +#endif /* HAVE_NDO_SETUP_TC_RH72 */ + +#ifdef NEW_FLOW_KEYS static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct bnxt_ntuple_filter *f2) { struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys; + if (f1->ntuple_flags != f2->ntuple_flags) + return false; + if (keys1->basic.n_proto != keys2->basic.n_proto || keys1->basic.ip_proto != keys2->basic.ip_proto) return false; if (keys1->basic.n_proto == htons(ETH_P_IP)) { - if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || - keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) + if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && + keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) || + ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)) return false; } else { - if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, - sizeof(keys1->addrs.v6addrs.src)) || - memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, - sizeof(keys1->addrs.v6addrs.dst))) + if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && + memcmp(&keys1->addrs.v6addrs.src, + &keys2->addrs.v6addrs.src, + sizeof(keys1->addrs.v6addrs.src))) || + ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && + memcmp(&keys1->addrs.v6addrs.dst, + &keys2->addrs.v6addrs.dst, + sizeof(keys1->addrs.v6addrs.dst)))) return false; } - if (keys1->ports.ports == keys2->ports.ports && - keys1->control.flags == keys2->control.flags && - ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && - ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) + if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) && + keys1->ports.src != keys2->ports.src) || + ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) && + keys1->ports.dst != keys2->ports.dst)) + return false; + +#ifdef HAVE_NEW_FLOW_DISSECTOR + if (keys1->control.flags == keys2->control.flags && + f1->l2_fltr == f2->l2_fltr) +#else + if (f1->l2_fltr == f2->l2_fltr) +#endif return true; return false; } +#else + +static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, + struct bnxt_ntuple_filter *f2) +{ + struct flow_keys *keys1 = &f1->fkeys; + struct flow_keys *keys2 = &f2->fkeys; + + if (keys1->src == keys2->src && + keys1->dst == keys2->dst && + keys1->ports == keys2->ports && + keys1->ip_proto == keys2->ip_proto && +#ifdef HAVE_N_PROTO + keys1->n_proto == keys2->n_proto && +#endif + f1->l2_fltr == f2->l2_fltr) + return true; + + return false; +} +#endif /* NEW_FLOW_KEYS */ + +struct bnxt_ntuple_filter * +bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr, u32 idx) +{ + struct hlist_node __maybe_unused *node; + struct bnxt_ntuple_filter *f; + struct hlist_head *head; + + head = &bp->ntp_fltr_hash_tbl[idx]; + __hlist_for_each_entry_rcu(f, node, head, base.hash) { + if (bnxt_fltr_match(f, fltr)) + return f; + } + return NULL; +} + +#ifdef HAVE_FLOW_HASH_FROM_KEYS +u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys) +{ + return jhash2((u32 *)&fkeys->FLOW_KEYS_HASH_START_FIELD, + BNXT_NTUPLE_KEY_SIZE, bp->hash_seed) & + BNXT_NTP_FLTR_HASH_MASK; +} +#endif + +int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, + u32 idx) +{ + struct hlist_head *head; + int bit_id; + + spin_lock_bh(&bp->ntp_fltr_lock); + bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); + if (bit_id < 0) { + spin_unlock_bh(&bp->ntp_fltr_lock); + return -ENOMEM; + } + + fltr->base.sw_id = (u16)bit_id; + fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; + fltr->base.flags |= BNXT_ACT_RING_DST; + head = &bp->ntp_fltr_hash_tbl[idx]; + hlist_add_head_rcu(&fltr->base.hash, head); + set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); + bp->ntp_fltr_count++; + spin_unlock_bh(&bp->ntp_fltr_lock); + return 0; +} + +#ifdef CONFIG_RFS_ACCEL static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { @@ -11035,30 +13436,46 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct bnxt_ntuple_filter *fltr, *new_fltr; struct flow_keys *fkeys; struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); - int rc = 0, idx, bit_id, l2_idx = 0; - struct hlist_head *head; + struct bnxt_l2_filter *l2_fltr; + int rc = 0, idx; + struct hlist_node __maybe_unused *node; +#ifdef HAVE_NEW_FLOW_DISSECTOR + u32 flags; +#endif - if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; - int off = 0, j; + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || + !(bp->flags & BNXT_FLAG_RFS)) + return -EOPNOTSUPP; - netif_addr_lock_bh(dev); - for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { - if (ether_addr_equal(eth->h_dest, - vnic->uc_list + off)) { - l2_idx = j + 1; - break; - } - } - netif_addr_unlock_bh(dev); - if (!l2_idx) +#ifdef HAVE_INNER_NETWORK_OFFSET + if (skb->encapsulation) + return -EPROTONOSUPPORT; +#endif + + if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { + l2_fltr = bp->vnic_info[0].l2_filters[0]; + atomic_inc(&l2_fltr->refcnt); + } else { + struct bnxt_l2_key key; + + ether_addr_copy(key.dst_mac_addr, eth->h_dest); + key.vlan = 0; + l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); + if (!l2_fltr) return -EINVAL; + if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { + bnxt_del_l2_filter(bp, l2_fltr); + return -EINVAL; + } } new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); - if (!new_fltr) + if (!new_fltr) { + bnxt_del_l2_filter(bp, l2_fltr); return -ENOMEM; + } fkeys = &new_fltr->fkeys; +#ifdef NEW_FLOW_KEYS if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { rc = -EPROTONOSUPPORT; goto err_free; @@ -11076,108 +13493,188 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rc = -EPROTONOSUPPORT; goto err_free; } - if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && - bp->hwrm_spec_code < 0x10601) { +#ifdef HAVE_NEW_FLOW_DISSECTOR + flags = fkeys->control.flags; + if (((flags & FLOW_DIS_ENCAPSULATION) && + bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { + rc = -EPROTONOSUPPORT; + goto err_free; + } +#endif +#else + if (!skb_flow_dissect(skb, fkeys)) { rc = -EPROTONOSUPPORT; goto err_free; } - memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); - memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); +#ifdef HAVE_N_PROTO + if ((fkeys->n_proto != htons(ETH_P_IP)) || + ((fkeys->ip_proto != IPPROTO_TCP) && + (fkeys->ip_proto != IPPROTO_UDP))) { + rc = -EPROTONOSUPPORT; + goto err_free; + } +#else + if ((skb->protocol != htons(ETH_P_IP)) || + ((fkeys->ip_proto != IPPROTO_TCP) && + (fkeys->ip_proto != IPPROTO_UDP))) { + rc = -EPROTONOSUPPORT; + goto err_free; + } +#endif +#endif + new_fltr->l2_fltr = l2_fltr; + new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL; + +#ifdef HAVE_FLOW_HASH_FROM_KEYS + idx = bnxt_get_ntp_filter_idx(bp, fkeys); +#else idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; - head = &bp->ntp_fltr_hash_tbl[idx]; +#endif rcu_read_lock(); - hlist_for_each_entry_rcu(fltr, head, hash) { - if (bnxt_fltr_match(fltr, new_fltr)) { - rcu_read_unlock(); - rc = 0; - goto err_free; - } + fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); + if (fltr) { + rcu_read_unlock(); + rc = 0; + goto err_free; } rcu_read_unlock(); - spin_lock_bh(&bp->ntp_fltr_lock); - bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, - BNXT_NTP_FLTR_MAX_FLTR, 0); - if (bit_id < 0) { - spin_unlock_bh(&bp->ntp_fltr_lock); - rc = -ENOMEM; - goto err_free; + new_fltr->flow_id = flow_id; + new_fltr->base.rxq = rxq_index; + rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); + if (!rc) { + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + return new_fltr->base.sw_id; } - new_fltr->sw_id = (u16)bit_id; - new_fltr->flow_id = flow_id; - new_fltr->l2_fltr_idx = l2_idx; - new_fltr->rxq = rxq_index; - hlist_add_head_rcu(&new_fltr->hash, head); - bp->ntp_fltr_count++; - spin_unlock_bh(&bp->ntp_fltr_lock); - - set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - - return new_fltr->sw_id; - err_free: + bnxt_del_l2_filter(bp, l2_fltr); kfree(new_fltr); return rc; } +#endif /* CONFIG_RFS_ACCEL */ + +void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) +{ + spin_lock_bh(&bp->ntp_fltr_lock); + if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { + spin_unlock_bh(&bp->ntp_fltr_lock); + return; + } + hlist_del_rcu(&fltr->base.hash); + bp->ntp_fltr_count--; + spin_unlock_bh(&bp->ntp_fltr_lock); + bnxt_del_l2_filter(bp, fltr->l2_fltr); + clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); + kfree_rcu(fltr, base.rcu); +} static void bnxt_cfg_ntp_filters(struct bnxt *bp) { int i; + for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp, __maybe_unused *nxt; + struct bnxt_l2_filter *fltr; + + head = &bp->l2_fltr_hash_tbl[i]; + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { + if (fltr->base.flags & BNXT_ACT_FUNC_DST) { + u16 vf_idx = fltr->base.vf_idx; + + if (bnxt_vf_vnic_state_is_up(bp, vf_idx)) + continue; + + bnxt_del_l2_filter(bp, fltr); + } + } + } for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { struct hlist_head *head; - struct hlist_node *tmp; + struct hlist_node *tmp, __maybe_unused *nxt; struct bnxt_ntuple_filter *fltr; int rc; head = &bp->ntp_fltr_hash_tbl[i]; - hlist_for_each_entry_safe(fltr, tmp, head, hash) { + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { bool del = false; - if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { - if (rps_may_expire_flow(bp->dev, fltr->rxq, + if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { + if (fltr->base.flags & BNXT_ACT_NO_AGING) + continue; +#ifdef CONFIG_RFS_ACCEL + if (rps_may_expire_flow(bp->dev, fltr->base.rxq, fltr->flow_id, - fltr->sw_id)) { + fltr->base.sw_id)) { bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); del = true; } +#endif /* CONFIG_RFS_ACCEL */ } else { rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, fltr); if (rc) del = true; else - set_bit(BNXT_FLTR_VALID, &fltr->state); + set_bit(BNXT_FLTR_VALID, + &fltr->base.state); } - if (del) { - spin_lock_bh(&bp->ntp_fltr_lock); - hlist_del_rcu(&fltr->hash); - bp->ntp_fltr_count--; - spin_unlock_bh(&bp->ntp_fltr_lock); - synchronize_rcu(); - clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); - kfree(fltr); - } + if (del) + bnxt_del_ntp_filter(bp, fltr); } } - if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) - netdev_info(bp->dev, "Receive PF driver unload event!"); } -#else - -static void bnxt_cfg_ntp_filters(struct bnxt *bp) +#ifdef HAVE_NDO_ADD_VXLAN +static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) { + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (bp->vxlan_port_cnt && bp->vxlan_port != port) + return; + + bp->vxlan_port_cnt++; + if (bp->vxlan_port_cnt == 1) { + bp->vxlan_port = port; + set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } } -#endif /* CONFIG_RFS_ACCEL */ +static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct bnxt *bp = netdev_priv(dev); + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (bp->vxlan_port_cnt && bp->vxlan_port == port) { + bp->vxlan_port_cnt--; + + if (bp->vxlan_port_cnt == 0) { + set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } + } +} +#elif defined(HAVE_NDO_UDP_TUNNEL) static void bnxt_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) { @@ -11256,7 +13753,27 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, bnxt_queue_sp_work(bp); } +#endif +#ifdef OLD_VLAN +static void bnxt_vlan_rx_register(struct net_device *dev, + struct vlan_group *vlgrp) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) { + bp->vlgrp = vlgrp; + return; + } + bnxt_disable_napi(bp); + bnxt_disable_int_sync(bp); + bp->vlgrp = vlgrp; + bnxt_enable_napi(bp); + bnxt_enable_int(bp); +} +#endif + +#ifdef HAVE_NDO_BRIDGE_GETLINK static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) @@ -11267,14 +13784,19 @@ static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, nlflags, filter_mask, NULL); } +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack) +#else +static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) +#endif { struct bnxt *bp = netdev_priv(dev); struct nlattr *attr, *br_spec; int rem, rc = 0; - if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) + if (bp->hwrm_spec_code < 0x10707 || !BNXT_SINGLE_PF(bp)) return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); @@ -11301,10 +13823,32 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, } return rc; } +#endif +#ifdef CONFIG_VF_REPS +#ifndef HAVE_NDO_DEVLINK_PORT +static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + rc = snprintf(buf, len, "p%d", bp->pf.port_id); + + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} +#endif + +#ifdef HAVE_NDO_GET_PORT_PARENT_ID int bnxt_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) -{ + { struct bnxt *bp = netdev_priv(dev); if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) @@ -11314,52 +13858,160 @@ int bnxt_get_port_parent_id(struct net_device *dev, if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP; - ppid->id_len = sizeof(bp->switch_id); - memcpy(ppid->id, bp->switch_id, ppid->id_len); + ppid->id_len = sizeof(bp->dsn); + memcpy(ppid->id, bp->dsn, ppid->id_len); return 0; } +#else + +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +{ + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return -EOPNOTSUPP; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(bp->dsn); + memcpy(attr->u.ppid.id, bp->dsn, attr->u.ppid.id_len); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int bnxt_swdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + return bnxt_port_attr_get(netdev_priv(dev), attr); +} + +static const struct switchdev_ops bnxt_switchdev_ops = { + .switchdev_port_attr_get = bnxt_swdev_port_attr_get +}; +#endif /* HAVE_NDO_GET_PORT_PARENT_ID */ +#endif /* CONFIG_VF_REPS */ + +#ifdef HAVE_NDO_DEVLINK_PORT static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); return &bp->dl_port; } +#endif static const struct net_device_ops bnxt_netdev_ops = { +#ifdef HAVE_NDO_SIZE + .ndo_size = sizeof(const struct net_device_ops), +#endif .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, .ndo_stop = bnxt_close, +#ifdef NETDEV_GET_STATS64 .ndo_get_stats64 = bnxt_get_stats64, +#else + .ndo_get_stats = bnxt_get_stats, +#endif .ndo_set_rx_mode = bnxt_set_rx_mode, .ndo_do_ioctl = bnxt_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnxt_change_mac_addr, .ndo_change_mtu = bnxt_change_mtu, +#ifdef NETDEV_FEATURE_CONTROL .ndo_fix_features = bnxt_fix_features, .ndo_set_features = bnxt_set_features, +#endif .ndo_tx_timeout = bnxt_tx_timeout, #ifdef CONFIG_BNXT_SRIOV +#ifdef HAVE_NDO_GET_VF_CONFIG .ndo_get_vf_config = bnxt_get_vf_config, .ndo_set_vf_mac = bnxt_set_vf_mac, .ndo_set_vf_vlan = bnxt_set_vf_vlan, .ndo_set_vf_rate = bnxt_set_vf_bw, +#ifdef HAVE_NDO_SET_VF_LINK_STATE .ndo_set_vf_link_state = bnxt_set_vf_link_state, +#endif +#ifdef HAVE_VF_SPOOFCHK .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST .ndo_set_vf_trust = bnxt_set_vf_trust, #endif - .ndo_setup_tc = bnxt_setup_tc, +#ifdef HAVE_NDO_SET_VF_QUEUES + .ndo_set_vf_queues = bnxt_set_vf_queues, +#endif +#endif +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bnxt_poll_controller, +#endif + +#ifdef HAVE_SETUP_TC + +#if (defined(HAVE_TC_TO_NETDEV) || defined(HAVE_TC_SETUP_TYPE)) + +#if defined(HAVE_NDO_SETUP_TC_RH) + .extended.ndo_setup_tc_rh = bnxt_setup_tc, +#elif defined(HAVE_NDO_SETUP_TC_RH72) + .ndo_setup_tc = bnxt_setup_mq_tc, +#else /* !HAVE_NDO_SETUP_TC_RH && !HAVE_NDO_SETUP_TC_RH72 */ + .ndo_setup_tc = bnxt_setup_tc, +#endif + +#else /* !HAVE_TC_TO_NETDEV && !HAVE_TC_SETUP_TYPE */ + .ndo_setup_tc = bnxt_setup_mq_tc, +#endif + +#endif /* HAVE_SETUP_TC */ + #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = bnxt_rx_flow_steer, #endif +#if defined(HAVE_NDO_ADD_VXLAN) + .ndo_add_vxlan_port = bnxt_add_vxlan_port, + .ndo_del_vxlan_port = bnxt_del_vxlan_port, +#elif defined(HAVE_NDO_UDP_TUNNEL) .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, +#endif +#ifdef BNXT_PRIV_RX_BUSY_POLL + .ndo_busy_poll = bnxt_busy_poll, +#endif +#ifdef OLD_VLAN + .ndo_vlan_rx_register = bnxt_vlan_rx_register, +#endif +#ifdef HAVE_NDO_XDP .ndo_bpf = bnxt_xdp, +#endif +#if defined(HAVE_XDP_FRAME) && !defined(HAVE_EXT_NDO_XDP_XMIT) .ndo_xdp_xmit = bnxt_xdp_xmit, +#endif +#ifdef HAVE_NDO_BRIDGE_GETLINK .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, - .ndo_get_devlink_port = bnxt_get_devlink_port, +#endif +#ifdef CONFIG_VF_REPS +#if !defined(HAVE_DEVLINK_PORT_ATTRS) && defined(HAVE_NDO_GET_PORT_PARENT_ID) + .ndo_get_port_parent_id = bnxt_get_port_parent_id, +#endif +#ifndef HAVE_NDO_DEVLINK_PORT +#ifdef HAVE_EXT_GET_PHYS_PORT_NAME + .extended.ndo_get_phys_port_name = bnxt_get_phys_port_name, +#else + .ndo_get_phys_port_name = bnxt_get_phys_port_name, +#endif +#endif /* HAVE_NDO_DEVLINK_PORT */ +#endif /* CONFIG_VF_REPS */ +#ifdef HAVE_NDO_DEVLINK_PORT + .ndo_get_devlink_port = bnxt_get_devlink_port, +#endif }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -11371,9 +14023,16 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_sriov_disable(bp); bnxt_dl_fw_reporters_destroy(bp, true); +#ifdef HAVE_DEVLINK_PORT_PARAM + if (BNXT_PF(bp)) + devlink_port_type_clear(&bp->dl_port); +#endif + bnxt_ptp_free(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); bnxt_dl_unregister(bp); + bnxt_free_l2_filters(bp, true); + bnxt_free_ntp_fltrs(bp, true); bnxt_shutdown_tc(bp); bnxt_cancel_sp_work(bp); bp->sp_event = 0; @@ -11381,17 +14040,26 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); - bnxt_free_hwrm_short_cmd_req(bp); bnxt_ethtool_free(bp); - bnxt_dcb_free(bp); + bnxt_dcb_free(bp, false); kfree(bp->edev); bp->edev = NULL; + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; +#if defined(HAVE_NDO_XDP) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) + if (bp->xdp_prog) + bpf_prog_put(bp->xdp_prog); +#endif kfree(bp->fw_health); bp->fw_health = NULL; + kfree(bp->eem_info); + bp->eem_info = NULL; bnxt_cleanup_pci(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; bnxt_free_port_stats(bp); free_netdev(dev); } @@ -11401,14 +14069,24 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) { + link_info->link_up = 1; + return 0; + } + rc = bnxt_hwrm_phy_qcaps(bp); if (rc) { netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", rc); return rc; } + if (!fw_dflt) + return 0; + + mutex_lock(&bp->link_lock); rc = bnxt_update_link(bp, false); if (rc) { + mutex_unlock(&bp->link_lock); netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", rc); return rc; @@ -11420,40 +14098,28 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) if (link_info->auto_link_speeds && !link_info->support_auto_speeds) link_info->support_auto_speeds = link_info->support_speeds; - if (!fw_dflt) - return 0; - - /*initialize the ethool setting copy with NVM settings */ - if (BNXT_AUTO_MODE(link_info->auto_mode)) { - link_info->autoneg = BNXT_AUTONEG_SPEED; - if (bp->hwrm_spec_code >= 0x10201) { - if (link_info->auto_pause_setting & - PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - } else { - link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - } - link_info->advertising = link_info->auto_link_speeds; - } else { - link_info->req_link_speed = link_info->force_link_speed; - link_info->req_duplex = link_info->duplex_setting; - } - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) - link_info->req_flow_ctrl = - link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; - else - link_info->req_flow_ctrl = link_info->force_pause_setting; + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); return 0; } static int bnxt_get_max_irq(struct pci_dev *pdev) { u16 ctrl; +#ifndef HAVE_MSIX_CAP + int msix_cap; + msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (!msix_cap) + return 1; + + pci_read_config_word(pdev, msix_cap + PCI_MSIX_FLAGS, &ctrl); +#else if (!pdev->msix_cap) return 1; pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); +#endif return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; } @@ -11663,56 +14329,116 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) return rc; } -static int bnxt_init_mac_addr(struct bnxt *bp) +#define BNXT_VPD_LEN 512 +static void bnxt_vpd_read_info(struct bnxt *bp) { - int rc = 0; + struct pci_dev *pdev = bp->pdev; + int i, len, pos, ro_size; + ssize_t vpd_size; + u8 *vpd_data; - if (BNXT_PF(bp)) { - memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); - } else { -#ifdef CONFIG_BNXT_SRIOV - struct bnxt_vf_info *vf = &bp->vf; - bool strict_approval = true; + vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL); + if (!vpd_data) + return; - if (is_valid_ether_addr(vf->mac_addr)) { - /* overwrite netdev dev_addr with admin VF MAC */ - memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); - /* Older PF driver or firmware may not approve this - * correctly. - */ - strict_approval = false; - } else { - eth_hw_addr_random(bp->dev); - } - rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); -#endif + vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data); + if (vpd_size <= 0) { + netdev_err(bp->dev, "Unable to read VPD\n"); + goto exit; } - return rc; + + i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + if (i < 0) { + netdev_err(bp->dev, "VPD READ-Only not found\n"); + goto exit; + } + + ro_size = pci_vpd_lrdt_size(&vpd_data[i]); + i += PCI_VPD_LRDT_TAG_SIZE; + if (i + ro_size > vpd_size) + goto exit; + + pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, + PCI_VPD_RO_KEYWORD_PARTNO); + if (pos < 0) + goto read_sn; + + len = pci_vpd_info_field_size(&vpd_data[pos]); + pos += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len + pos > vpd_size) + goto read_sn; + + strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN)); + +read_sn: + pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, "SN"); + if (pos < 0) + goto exit; + + len = pci_vpd_info_field_size(&vpd_data[pos]); + pos += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len + pos > vpd_size) + goto exit; + + strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN)); +exit: + kfree(vpd_data); } static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) { struct pci_dev *pdev = bp->pdev; - int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - u32 dw; + u64 qword; - if (!pos) { - netdev_info(bp->dev, "Unable do read adapter's DSN"); + qword = pci_get_dsn(pdev); + if (!qword) { + netdev_info(bp->dev, "Unable to read adapter's DSN\n"); return -EOPNOTSUPP; } - /* DSN (two dw) is at an offset of 4 from the cap pos */ - pos += 4; - pci_read_config_dword(pdev, pos, &dw); - put_unaligned_le32(dw, &dsn[0]); - pci_read_config_dword(pdev, pos + 4, &dw); - put_unaligned_le32(dw, &dsn[4]); + put_unaligned_le64(qword, dsn); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; } +static int bnxt_map_db_bar(struct bnxt *bp) +{ + if (!bp->db_size) + return -ENODEV; + + bp->db_size_nc = bp->db_size; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && + bp->tx_push_mode > BNXT_PUSH_MODE_LEGACY) { + if (bp->tx_push_mode == BNXT_PUSH_MODE_PPP) + bp->db_size_nc = PAGE_SIZE; + else if (BNXT_PF(bp)) + bp->db_size_nc = DB_PF_OFFSET_P5 + PAGE_SIZE; + else + bp->db_size_nc = DB_VF_OFFSET_P5 + PAGE_SIZE; + if (bp->db_size <= bp->db_size_nc) { + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + bp->db_size_nc = bp->db_size; + } else { + bp->db_base_wc = + ioremap_wc(pci_resource_start(bp->pdev, 2) + + bp->db_size_nc, + bp->db_size - bp->db_size_nc); + if (!bp->db_base_wc) { + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + netdev_warn(bp->dev, "Failed to map WCB pages, TX push not supported.\n"); + } + } + } + bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size_nc); + if (!bp->bar1) + return -ENOMEM; + return 0; +} + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct bnxt_hw_resc *hw_resc; static int version_printed; struct net_device *dev; struct bnxt *bp; @@ -11743,7 +14469,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (bnxt_vf_pciid(ent->driver_data)) bp->flags |= BNXT_FLAG_VF; +#ifndef HAVE_MSIX_CAP + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) +#else if (pdev->msix_cap) +#endif bp->flags |= BNXT_FLAG_MSIX_CAP; rc = bnxt_init_board(pdev, dev); @@ -11753,8 +14483,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; +#ifdef CONFIG_VF_REPS +#ifndef HAVE_NDO_GET_PORT_PARENT_ID + SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); +#endif +#endif pci_set_drvdata(pdev, dev); + if (BNXT_PF(bp)) + bnxt_vpd_read_info(bp); + rc = bnxt_alloc_hwrm_resources(bp); if (rc) goto init_err_pci_clean; @@ -11766,13 +14504,23 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; - if (BNXT_CHIP_P5(bp)) + if (BNXT_CHIP_P5(bp)) { bp->flags |= BNXT_FLAG_CHIP_P5; + if (BNXT_CHIP_SR2(bp)) + bp->flags |= BNXT_FLAG_CHIP_SR2; + } rc = bnxt_fw_init_one_p2(bp); if (rc) goto init_err_pci_clean; + rc = bnxt_map_db_bar(bp); + if (rc) { + dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", + rc); + goto init_err_pci_clean; + } + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | @@ -11781,20 +14529,29 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_GRO; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39) if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_LRO; +#endif +#ifdef NETDEV_HW_ENC_FEATURES dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; +#endif +#ifdef HAVE_GSO_PARTIAL_FEATURES dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; +#endif + dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_GRO_HW; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; @@ -11816,27 +14573,35 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!BNXT_CHIP_P4_PLUS(bp)) bp->flags |= BNXT_FLAG_DOUBLE_DB; + bp->ulp_version = BNXT_ULP_VERSION; bp->ulp_probe = bnxt_ulp_probe; + rc = bnxt_probe_phy(bp, true); + if (rc) + goto init_err_pci_clean; + + hw_resc = &bp->hw_resc; + bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + + BNXT_L2_FLTR_MAX_FLTR; + /* Older firmware may not report these filters properly */ + if (bp->max_fltr < BNXT_MAX_FLTR) + bp->max_fltr = BNXT_MAX_FLTR; + bnxt_init_l2_fltr_tbl(bp); rc = bnxt_init_mac_addr(bp); if (rc) { - dev_err(&pdev->dev, "Unable to initialize mac address.\n"); + netdev_err(bp->dev, "Unable to initialize mac address.\n"); rc = -EADDRNOTAVAIL; goto init_err_pci_clean; } if (BNXT_PF(bp)) { - /* Read the adapter's DSN to use as the eswitch switch_id */ - bnxt_pcie_dsn_get(bp, bp->switch_id); + /* Read the adapter's DSN to use as the eswitch id */ + rc = bnxt_pcie_dsn_get(bp, bp->dsn); + if (rc) + netdev_warn(dev, "Failed to read DSN.\n"); } - /* MTU range: 60 - FW defined max */ - dev->min_mtu = ETH_ZLEN; - dev->max_mtu = bp->max_mtu; - - rc = bnxt_probe_phy(bp, true); - if (rc) - goto init_err_pci_clean; + bnxt_set_netdev_mtu(bp); bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); @@ -11850,7 +14615,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_fw_init_one_p3(bp); - if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) + if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; rc = bnxt_init_int_mode(bp); @@ -11862,6 +14627,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + rc = bnxt_alloc_rss_indir_tbl(bp); + if (rc) + goto init_err_pci_clean; + bnxt_set_dflt_rss_indir_tbl(bp); + if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = @@ -11880,14 +14650,22 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_cleanup; +#ifdef HAVE_DEVLINK_PORT_PARAM if (BNXT_PF(bp)) devlink_port_type_eth_set(&bp->dl_port, bp->dev); +#endif bnxt_dl_fw_reporters_create(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, (long)pci_resource_start(pdev, 0), dev->dev_addr); + pcie_print_link_status(pdev); + if (bnxt_ptp_init(bp)) { + netdev_warn(dev, "PTP initialization failed.\n"); + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } return 0; @@ -11897,7 +14675,7 @@ init_err_cleanup: bnxt_clear_int_mode(bp); init_err_pci_clean: - bnxt_free_hwrm_short_cmd_req(bp); + bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); kfree(bp->fw_health); bp->fw_health = NULL; @@ -11905,6 +14683,8 @@ init_err_pci_clean: bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; init_err_free: free_netdev(dev); @@ -11931,7 +14711,8 @@ static void bnxt_shutdown(struct pci_dev *pdev) bnxt_clear_int_mode(bp); pci_disable_device(pdev); - if (system_state == SYSTEM_POWER_OFF) { + if (system_state == SYSTEM_POWER_OFF && + pdev->pm_cap) { pci_wake_from_d3(pdev, bp->wol); pci_set_power_state(pdev, PCI_D3hot); } @@ -11948,11 +14729,16 @@ static int bnxt_suspend(struct device *device) int rc = 0; rtnl_lock(); + bnxt_ulp_stop(bp); if (netif_running(dev)) { netif_device_detach(dev); rc = bnxt_close(dev); } bnxt_hwrm_func_drv_unrgtr(bp); + pci_disable_device(bp->pdev); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rtnl_unlock(); return rc; } @@ -11964,7 +14750,14 @@ static int bnxt_resume(struct device *device) int rc = 0; rtnl_lock(); - if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { + rc = pci_enable_device(bp->pdev); + if (rc) { + netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", + rc); + goto resume_exit; + } + pci_set_master(bp->pdev); + if (bnxt_hwrm_ver_get(bp)) { rc = -ENODEV; goto resume_exit; } @@ -11973,12 +14766,25 @@ static int bnxt_resume(struct device *device) rc = -EBUSY; goto resume_exit; } + + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) + goto resume_exit; + + if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { + rc = -ENODEV; + goto resume_exit; + } + bnxt_get_wol_settings(bp); if (netif_running(dev)) { rc = bnxt_open(dev); if (!rc) netif_device_attach(dev); } + bnxt_ulp_start(bp, rc); + if (!rc) + bnxt_reenable_sriov(bp); resume_exit: rtnl_unlock(); @@ -12024,6 +14830,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, bnxt_close(netdev); pci_disable_device(pdev); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rtnl_unlock(); /* Request a slot slot reset. */ @@ -12041,6 +14850,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, */ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) { + struct net_device *netdev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(netdev); int err = 0; @@ -12057,21 +14867,35 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); err = bnxt_hwrm_func_reset(bp); - if (!err && netif_running(netdev)) - err = bnxt_open(netdev); - if (!err) { + err = bnxt_hwrm_func_qcaps(bp); + if (!err && netif_running(netdev)) + err = bnxt_open(netdev); + } + bnxt_ulp_start(bp, err); + if (!err) { + bnxt_reenable_sriov(bp); result = PCI_ERS_RESULT_RECOVERED; - bnxt_ulp_start(bp); } } - if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) - dev_close(netdev); + if (result != PCI_ERS_RESULT_RECOVERED) { + if (netif_running(netdev)) + dev_close(netdev); + pci_disable_device(pdev); + } rtnl_unlock(); - return PCI_ERS_RESULT_RECOVERED; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0) + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); /* non-fatal, continue */ + } +#endif + return result; } /** @@ -12098,6 +14922,13 @@ static const struct pci_error_handlers bnxt_err_handler = { .resume = bnxt_io_resume }; +#if defined(CONFIG_BNXT_SRIOV) && \ + defined(SRIOV_CONF_DEF_IN_PCI_DRIVER_RH) +static struct pci_driver_rh bnxt_pci_driver_rh = { + .sriov_configure = bnxt_sriov_configure +}; +#endif + static struct pci_driver bnxt_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnxt_pci_tbl, @@ -12106,19 +14937,39 @@ static struct pci_driver bnxt_pci_driver = { .shutdown = bnxt_shutdown, .driver.pm = BNXT_PM_OPS, .err_handler = &bnxt_err_handler, -#if defined(CONFIG_BNXT_SRIOV) +#if defined(CONFIG_BNXT_SRIOV) && defined(PCIE_SRIOV_CONFIGURE) +#ifndef SRIOV_CONF_DEF_IN_PCI_DRIVER_RH .sriov_configure = bnxt_sriov_configure, +#else + .rh_reserved = &bnxt_pci_driver_rh, +#endif #endif }; static int __init bnxt_init(void) { + int rc; + +#ifndef PCIE_SRIOV_CONFIGURE + bnxt_sriov_init(num_vfs); +#endif + bnxt_lfc_init(); + + rc = bnxt_nl_register(); + if (rc) + pr_info("%s : failed to load\n", BNXT_NL_NAME); + bnxt_debug_init(); return pci_register_driver(&bnxt_pci_driver); } static void __exit bnxt_exit(void) { +#ifndef PCIE_SRIOV_CONFIGURE + bnxt_sriov_exit(); +#endif + bnxt_lfc_exit(); + bnxt_nl_unregister(); pci_unregister_driver(&bnxt_pci_driver); if (bnxt_pf_wq) destroy_workqueue(bnxt_pf_wq); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index cda7ba31095a..939635a625aa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -11,28 +11,51 @@ #ifndef BNXT_H #define BNXT_H +#include "bnxt_extra_ver.h" + #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.10.0" +#define DRV_MODULE_VERSION "1.10.1" DRV_MODULE_EXTRA_VER #define DRV_VER_MAJ 1 #define DRV_VER_MIN 10 -#define DRV_VER_UPD 0 +#define DRV_VER_UPD 1 #include <linux/interrupt.h> -#include <linux/rhashtable.h> #include <linux/crash_dump.h> +#ifdef HAVE_DEVLINK #include <net/devlink.h> +#endif +#ifdef HAVE_METADATA_HW_PORT_MUX #include <net/dst_metadata.h> +#endif +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#include <linux/rhashtable.h> +#endif +#if defined(HAVE_SWITCHDEV) && !defined(HAVE_NDO_GET_PORT_PARENT_ID) +#include <net/switchdev.h> +#endif +#ifdef HAVE_XDP_RXQ_INFO #include <net/xdp.h> +#endif +#ifdef HAVE_DIM #include <linux/dim.h> +#else +#include "bnxt_dim.h" +#endif +#ifdef CONFIG_TEE_BNXT_FW +#include <linux/firmware/broadcom/tee_bnxt_fw.h> +#endif +#ifdef CONFIG_PAGE_POOL struct page_pool; +#endif struct tx_bd { __le32 tx_bd_len_flags_type; #define TX_BD_TYPE (0x3f << 0) #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0) #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0) + #define TX_BD_TYPE_LONG_TX_BD_INLINE (0x11 << 0) #define TX_BD_FLAGS_PACKET_END (1 << 6) #define TX_BD_FLAGS_NO_CMPL (1 << 7) #define TX_BD_FLAGS_BD_CNT (0x1f << 8) @@ -109,6 +132,10 @@ struct tx_cmp { __le32 tx_cmp_flags_type; #define CMP_TYPE (0x3f << 0) #define CMP_TYPE_TX_L2_CMP 0 + #define CMP_TYPE_TX_L2_COAL_CMP 2 + #define CMP_TYPE_TX_L2_PTP_CMP 3 + #define CMP_TYPE_RX_L2_TPA_START_V2_CMP 13 + #define CMP_TYPE_RX_L2_V2_CMP 15 #define CMP_TYPE_RX_L2_CMP 17 #define CMP_TYPE_RX_AGG_CMP 18 #define CMP_TYPE_RX_L2_TPA_START_CMP 19 @@ -151,6 +178,7 @@ struct rx_cmp { #define RX_CMP_FLAGS_RSS_VALID (1 << 10) #define RX_CMP_FLAGS_UNUSED (1 << 11) #define RX_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_CMP_FLAGS_ITYPES_MASK 0xf000 #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12) #define RX_CMP_FLAGS_ITYPE_IP (1 << 12) #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12) @@ -171,6 +199,12 @@ struct rx_cmp { #define RX_CMP_RSS_HASH_TYPE_SHIFT 9 #define RX_CMP_PAYLOAD_OFFSET (0xff << 16) #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16 + #define RX_CMP_METADATA1 (0xf << 28) + #define RX_CMP_METADATA1_SHIFT 28 + #define RX_CMP_METADATA1_TPID_SEL (0x7 << 28) + #define RX_CMP_METADATA1_TPID_8021Q (0x1 << 28) + #define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28) + #define RX_CMP_METADATA1_VALID (0x8 << 28) __le32 rx_cmp_rss_hash; }; @@ -184,13 +218,23 @@ struct rx_cmp { (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) +#define RX_CMP_VLAN_VALID(rxcmp) \ + ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID)) + +#define RX_CMP_VLAN_TPID_SEL(rxcmp) \ + (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL) + struct rx_cmp_ext { __le32 rx_cmp_flags2; #define RX_CMP_FLAGS2_IP_CS_CALC 0x1 #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_CMP_FLAGS2_CS_ALL_OK_MODE (0x1 << 3) #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) + #define RX_CMP_FLAGS2_CS_OK_MASK (0x3f << 10) + #define RX_CMP_FLAGS2_L4_CS_OK_MASK (0x38 << 10) + #define RX_CMP_FLAGS2_CS_OK_SFT 10 __le32 rx_cmp_meta_data; #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff @@ -229,8 +273,18 @@ struct rx_cmp_ext { #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12) #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12) + #define RX_CMPL_V2_ERRORS_OT_PKT_ERROR_MASK (0x7 << 4) + #define RX_CMPL_V2_ERRORS_OT_PKT_L4_CS_ERROR (0x7 << 4) + #define RX_CMPL_V2_ERRORS_T_PKT_ERROR_MASK (0x7 << 9) + #define RX_CMPL_V2_ERRORS_T_PKT_L4_CS_ERROR (0x7 << 9) + #define RX_CMPL_V2_ERRORS_PKT_ERROR_MASK (0xf << 12) + #define RX_CMPL_V2_ERRORS_PKT_L4_CS_ERROR (0xa << 12) + #define RX_CMPL_CFA_CODE_MASK (0xffff << 16) #define RX_CMPL_CFA_CODE_SFT 16 + #define RX_CMPL_METADATA0_MASK (0xffff << 16) + #define RX_CMPL_METADATA0_VID_MASK (0x0fff << 16) + #define RX_CMPL_METADATA0_SFT 16 __le32 rx_cmp_unused3; }; @@ -252,10 +306,37 @@ struct rx_cmp_ext { ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) +#define RX_CMP_V2_L4_CS_OK(rxcmp1) \ + (le32_to_cpu((rxcmp1)->rx_cmp_flags2) & RX_CMP_FLAGS2_L4_CS_OK_MASK) + +#define RX_CMP_V2_L4_CS_OK_CNT(rxcmp1) \ + (RX_CMP_V2_L4_CS_OK(rxcmp1) >> RX_CMP_FLAGS2_CS_OK_SFT) + +#define RX_CMP_V2_OT_L4_CS_ERR(err) \ + (((err) & RX_CMPL_V2_ERRORS_OT_PKT_ERROR_MASK) == \ + RX_CMPL_V2_ERRORS_OT_PKT_L4_CS_ERROR) + +#define RX_CMP_V2_T_L4_CS_ERR(err) \ + (((err) & RX_CMPL_V2_ERRORS_T_PKT_ERROR_MASK) == \ + RX_CMPL_V2_ERRORS_T_PKT_L4_CS_ERROR) + +#define RX_CMP_V2_L4_CS_ERR(err) \ + (((err) & RX_CMPL_V2_ERRORS_PKT_ERROR_MASK) == \ + RX_CMPL_V2_ERRORS_PKT_L4_CS_ERROR) + +#define RX_CMP_V2_L4_CS_ERRS(rxcmp1) ({ \ + u32 __err = le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2); \ + (RX_CMP_V2_OT_L4_CS_ERR(__err) || RX_CMP_V2_T_L4_CS_ERR(__err) ||\ + RX_CMP_V2_L4_CS_ERR(__err)) ? 1 : 0; }) + #define RX_CMP_CFA_CODE(rxcmpl1) \ ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \ RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT) +#define RX_CMP_METADATA0_VID(rxcmp1) \ + ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \ + RX_CMPL_METADATA0_VID_MASK) >> RX_CMPL_METADATA0_SFT) + struct rx_agg_cmp { __le32 rx_agg_cmp_len_flags_type; #define RX_AGG_CMP_TYPE (0x3f << 0) @@ -264,7 +345,7 @@ struct rx_agg_cmp { u32 rx_agg_cmp_opaque; __le32 rx_agg_cmp_v; #define RX_AGG_CMP_V (1 << 0) - #define RX_AGG_CMP_AGG_ID (0xffff << 16) + #define RX_AGG_CMP_AGG_ID (0x0fff << 16) #define RX_AGG_CMP_AGG_ID_SHIFT 16 __le32 rx_agg_cmp_unused; }; @@ -300,8 +381,10 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 - #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16) #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16 + #define RX_TPA_START_CMP_METADATA1 (0xf << 28) + #define RX_TPA_START_CMP_METADATA1_SHIFT 28 __le32 rx_tpa_start_cmp_rss_hash; }; @@ -333,6 +416,10 @@ struct rx_tpa_start_cmp_ext { #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_TPA_START_CMP_FLAGS2_CS_ALL_OK_MODE (0x1 << 3) + #define RX_TPA_START_CMP_FLAGS2_CS_OK_MASK (0x3f << 10) + #define RX_TPA_START_CMP_FLAGS2_L4_CS_OK_MASK (0x38 << 10) + #define RX_TPA_START_CMP_FLAGS2_CS_OK_SFT 10 #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9) #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10) @@ -350,6 +437,8 @@ struct rx_tpa_start_cmp_ext { #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 + #define RX_TPA_START_CMP_METADATA0_MASK (0xffff << 16) + #define RX_TPA_START_CMP_METADATA0_SFT 16 __le32 rx_tpa_start_cmp_hdr_info; }; @@ -485,6 +574,23 @@ struct rx_tpa_end_cmp_ext { !!((data1) & \ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED) +#define EVENT_DATA1_VNIC_CHNG_PF_ID(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK) >>\ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT) + +#define EVENT_DATA1_VNIC_CHNG_VF_ID(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK) >>\ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT) + +#define EVENT_DATA1_VNIC_CHNG_VNIC_STATE(data1) \ + ((data1) & \ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK) + +#define EVENT_DATA1_VNIC_CHNG_VNIC_STATE_ALLOC 1 +#define EVENT_DATA1_VNIC_CHNG_VNIC_STATE_FREE 2 + struct nqe_cn { __le16 type; #define NQ_CN_TYPE_MASK 0x3fUL @@ -508,14 +614,18 @@ struct nqe_cn { #define DB_KEY_TX_PUSH (0x4 << 28) #define DB_LONG_TX_PUSH (0x2 << 24) -#define BNXT_MIN_ROCE_CP_RINGS 2 -#define BNXT_MIN_ROCE_STAT_CTXS 1 - /* 64-bit doorbell */ #define DBR_INDEX_MASK 0x0000000000ffffffULL +#define DBR_PI_LO_MASK 0xff000000UL +#define DBR_PI_LO_SFT 24 +#define DBR_EPOCH_MASK 0x01000000UL +#define DBR_EPOCH_SFT 24 #define DBR_XID_MASK 0x000fffff00000000ULL #define DBR_XID_SFT 32 +#define DBR_PI_HI_MASK 0xf0000000000000ULL +#define DBR_PI_HI_SFT 52 #define DBR_PATH_L2 (0x1ULL << 56) +#define DBR_VALID (0x1ULL << 58) #define DBR_TYPE_SQ (0x0ULL << 60) #define DBR_TYPE_RQ (0x1ULL << 60) #define DBR_TYPE_SRQ (0x2ULL << 60) @@ -528,8 +638,21 @@ struct nqe_cn { #define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60) #define DBR_TYPE_NQ (0xaULL << 60) #define DBR_TYPE_NQ_ARM (0xbULL << 60) +#define DBR_TYPE_PUSH_START (0xcULL << 60) +#define DBR_TYPE_PUSH_END (0xdULL << 60) #define DBR_TYPE_NULL (0xfULL << 60) +#define DB_PF_OFFSET_P5 0x10000 +#define DB_VF_OFFSET_P5 0x4000 + +#define DB_WCB_FIRST_OFFSET 16 +#define DB_WCB_PER_PAGE 15 +#define DB_WCB_PAGE_SIZE 4096 +#define DB_WCB_BUFFER_SIZE 256 + +#define DB_PPP_SIZE 256 +#define DB_PPP_BD_OFFSET 16 + #define INVALID_HW_RING_ID ((u16)-1) /* The hardware supports certain page sizes. Use the supported page sizes @@ -601,10 +724,11 @@ struct nqe_cn { #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) -#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define RX_RING(x) (((x) & bp->rx_ring_mask) >> (BNXT_PAGE_SHIFT - 4)) +#define RX_AGG_RING(x) (((x) & bp->rx_agg_ring_mask) >> (BNXT_PAGE_SHIFT - 4)) #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1)) -#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define TX_RING(x) (((x) & bp->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4)) #define TX_IDX(x) ((x) & (TX_DESC_CNT - 1)) #define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) @@ -631,48 +755,26 @@ struct nqe_cn { #define RX_CMP_TYPE(rxcmp) \ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE) -#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask) +#define RING_RX(idx) ((idx) & bp->rx_ring_mask) +#define NEXT_RX(idx) ((idx) + 1) -#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask) +#define RING_RX_AGG(idx) ((idx) & bp->rx_agg_ring_mask) +#define NEXT_RX_AGG(idx) ((idx) + 1) -#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask) +#define RING_TX(idx) ((idx) & bp->tx_ring_mask) +#define NEXT_TX(idx) ((idx) + 1) + +#define TX_PUSH_LEN(len) \ + ((len) + sizeof(struct tx_bd) + sizeof(struct tx_bd_ext)) + +#define TX_INLINE_BDS(len) (DIV_ROUND_UP(len, sizeof(struct tx_bd))) #define ADV_RAW_CMP(idx, n) ((idx) + (n)) #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) -#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) -#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) #define DFLT_HWRM_CMD_TIMEOUT 500 -#define SHORT_HWRM_CMD_TIMEOUT 20 -#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) -#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) -#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) -#define HWRM_RESP_ERR_CODE_MASK 0xffff -#define HWRM_RESP_LEN_OFFSET 4 -#define HWRM_RESP_LEN_MASK 0xffff0000 -#define HWRM_RESP_LEN_SFT 16 -#define HWRM_RESP_VALID_MASK 0xff000000 -#define BNXT_HWRM_REQ_MAX_SIZE 128 -#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ - BNXT_HWRM_REQ_MAX_SIZE) -#define HWRM_SHORT_MIN_TIMEOUT 3 -#define HWRM_SHORT_MAX_TIMEOUT 10 -#define HWRM_SHORT_TIMEOUT_COUNTER 5 - -#define HWRM_MIN_TIMEOUT 25 -#define HWRM_MAX_TIMEOUT 40 - -#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ - ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ - (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ - ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) - -#define HWRM_VALID_BIT_DELAY_USEC 150 - -#define BNXT_HWRM_CHNL_CHIMP 0 -#define BNXT_HWRM_CHNL_KONG 1 #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 @@ -688,6 +790,7 @@ struct bnxt_sw_tx_bd { DEFINE_DMA_UNMAP_LEN(len); u8 is_gso; u8 is_push; + u8 inline_data_bds; u8 action; union { unsigned short nr_frags; @@ -716,6 +819,7 @@ struct bnxt_ring_mem_info { #define BNXT_RMEM_USE_FULL_PAGE_FLAG 4 u16 depth; + u8 init_val; void **pg_arr; dma_addr_t *dma_arr; @@ -757,8 +861,23 @@ struct bnxt_db_info { u64 db_key64; u32 db_key32; }; + u32 db_ring_mask; + u32 db_epoch_mask; + u8 db_epoch_shift; }; +#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \ + ((db)->db_epoch_shift)) +#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \ + DB_EPOCH(db, idx)) + +#define DB_PUSH_LEN(len) (DB_PUSH_INFO_PUSH_SIZE_MASK & \ + (((sizeof(struct db_push_info) + \ + sizeof(struct dbc_dbc)) / 8 + \ + (len)) << DB_PUSH_INFO_PUSH_SIZE_SFT)) + +#define DB_PUSH_INFO(db, len, idx) (DB_PUSH_LEN(len) | DB_RING_IDX(db, idx)) + struct bnxt_tx_ring_info { struct bnxt_napi *bnapi; u16 tx_prod; @@ -771,6 +890,8 @@ struct bnxt_tx_ring_info { dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; + struct bnxt_db_info tx_push_db; + void __iomem *tx_push_wcb; struct tx_push_buffer *tx_push; dma_addr_t tx_push_mapping; __le64 data_mapping; @@ -873,7 +994,9 @@ struct bnxt_rx_ring_info { struct bnxt_db_info rx_db; struct bnxt_db_info rx_agg_db; +#ifdef HAVE_NDO_XDP struct bpf_prog *xdp_prog; +#endif struct rx_bd *rx_desc_ring[MAX_RX_PAGES]; struct bnxt_sw_rx_bd *rx_buf_ring; @@ -895,8 +1018,40 @@ struct bnxt_rx_ring_info { struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct; +#ifdef HAVE_XDP_RXQ_INFO struct xdp_rxq_info xdp_rxq; - struct page_pool *page_pool; +#endif +#ifdef CONFIG_PAGE_POOL + struct page_pool *page_pool; +#endif +}; + +struct bnxt_rx_sw_stats { + u64 rx_l4_csum_errors; + u64 rx_resets; + u64 rx_buf_errors; +}; + +struct bnxt_tx_sw_stats { + u64 tx_sw_errors; +}; + +struct bnxt_cmn_sw_stats { + u64 missed_irqs; +}; + +struct bnxt_sw_stats { + struct bnxt_rx_sw_stats rx; + struct bnxt_tx_sw_stats tx; + struct bnxt_cmn_sw_stats cmn; +}; + +struct bnxt_stats_mem { + u64 *sw_stats; + u64 *hw_masks; + void *hw_stats; + dma_addr_t hw_stats_map; + int len; }; struct bnxt_cp_ring_info { @@ -923,12 +1078,10 @@ struct bnxt_cp_ring_info { dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; - struct ctx_hw_stats *hw_stats; - dma_addr_t hw_stats_map; + struct bnxt_stats_mem stats; u32 hw_stats_ctx_id; - u64 rx_l4_csum_errors; - u64 rx_buf_errors; - u64 missed_irqs; + + struct bnxt_sw_stats sw_stats; struct bnxt_ring_struct cp_ring_struct; @@ -954,9 +1107,21 @@ struct bnxt_napi { u32 flags; #define BNXT_NAPI_FLAG_XDP 0x1 +#ifdef BNXT_PRIV_RX_BUSY_POLL + atomic_t poll_state; +#endif bool in_reset; }; +#ifdef BNXT_PRIV_RX_BUSY_POLL +enum bnxt_poll_state_t { + BNXT_STATE_IDLE = 0, + BNXT_STATE_NAPI, + BNXT_STATE_POLL, + BNXT_STATE_DISABLE, +}; +#endif + struct bnxt_irq { irq_handler_t handler; unsigned int vector; @@ -988,7 +1153,7 @@ struct bnxt_vnic_info { u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC]; u16 fw_l2_ctx_id; #define BNXT_MAX_UC_ADDRS 4 - __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS]; + struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS]; /* index 0 always dev_addr */ u16 uc_filter_count; u8 *uc_list; @@ -998,6 +1163,16 @@ struct bnxt_vnic_info { __le16 *rss_table; dma_addr_t rss_hash_key_dma_addr; u64 *rss_hash_key; + int rss_table_size; +#define BNXT_RSS_TABLE_ENTRIES_P5 64 +#define BNXT_RSS_TABLE_SIZE_P5 (BNXT_RSS_TABLE_ENTRIES_P5 * 4) +#define BNXT_RSS_TABLE_MAX_TBL_P5 8 +#define BNXT_MAX_RSS_TABLE_SIZE_P5 \ + (BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5) + +#define BNXT_MAX_RSS_TABLE_ENTRIES_P5 \ + (BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5) + u32 rx_mask; u8 *mc_list; @@ -1041,6 +1216,12 @@ struct bnxt_hw_resc { u16 max_nqs; u16 max_irqs; u16 resv_irqs; + u32 max_encap_records; + u32 max_decap_records; + u32 max_tx_em_flows; + u32 max_tx_wm_flows; + u32 max_rx_em_flows; + u32 max_rx_wm_flows; }; #if defined(CONFIG_BNXT_SRIOV) @@ -1050,6 +1231,8 @@ struct bnxt_vf_info { u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only * stored by PF. */ + u8 vnic_state_pending; + u8 vnic_state; u16 vlan; u16 func_qcfg_flags; u32 flags; @@ -1058,11 +1241,19 @@ struct bnxt_vf_info { #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 #define BNXT_VF_TRUST 0x10 - u32 func_flags; /* func cfg flags */ u32 min_tx_rate; u32 max_tx_rate; + u16 min_tx_rings; + u16 max_tx_rings; + u16 min_rx_rings; + u16 max_rx_rings; + u16 min_cp_rings; + u16 min_stat_ctxs; + u16 min_ring_grps; + u16 min_vnics; void *hwrm_cmd_req_addr; dma_addr_t hwrm_cmd_req_dma_addr; + unsigned long police_id; }; #endif @@ -1076,36 +1267,75 @@ struct bnxt_pf_info { u16 active_vfs; u16 registered_vfs; u16 max_vfs; - u32 max_encap_records; - u32 max_decap_records; - u32 max_tx_em_flows; - u32 max_tx_wm_flows; - u32 max_rx_em_flows; - u32 max_rx_wm_flows; + u16 vf_hwrm_cmd_req_page_shift; unsigned long *vf_event_bmap; u16 hwrm_cmd_req_pages; u8 vf_resv_strategy; #define BNXT_VF_RESV_STRATEGY_MAXIMAL 0 #define BNXT_VF_RESV_STRATEGY_MINIMAL 1 #define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC 2 - void *hwrm_cmd_req_addr[4]; - dma_addr_t hwrm_cmd_req_dma_addr[4]; - struct bnxt_vf_info *vf; + +#define BNXT_MAX_VF_CMD_FWD_PAGES 4 + void *hwrm_cmd_req_addr[BNXT_MAX_VF_CMD_FWD_PAGES]; + dma_addr_t hwrm_cmd_req_dma_addr[BNXT_MAX_VF_CMD_FWD_PAGES]; + struct bnxt_vf_info __rcu *vf; +}; + +struct bnxt_filter_base { + struct hlist_node hash; + __le64 filter_id; + u8 type; +#define BNXT_FLTR_TYPE_NTUPLE 1 +#define BNXT_FLTR_TYPE_L2 2 + u8 flags; +#define BNXT_ACT_DROP 1 +#define BNXT_ACT_RING_DST 2 +#define BNXT_ACT_FUNC_DST 4 +#define BNXT_ACT_NO_AGING 8 + u16 sw_id; + u16 rxq; + u16 fw_vnic_id; + u16 vf_idx; + unsigned long state; +#define BNXT_FLTR_VALID 0 +#define BNXT_FLTR_INSERTED 1 +#define BNXT_FLTR_FW_DELETED 2 + + struct rcu_head rcu; }; struct bnxt_ntuple_filter { - struct hlist_node hash; - u8 dst_mac_addr[ETH_ALEN]; - u8 src_mac_addr[ETH_ALEN]; + struct bnxt_filter_base base; struct flow_keys fkeys; - __le64 filter_id; - u16 sw_id; - u8 l2_fltr_idx; - u16 rxq; + struct bnxt_l2_filter *l2_fltr; + u32 ntuple_flags; +#define BNXT_NTUPLE_MATCH_SRC_IP 1 +#define BNXT_NTUPLE_MATCH_DST_IP 2 +#define BNXT_NTUPLE_MATCH_SRC_PORT 4 +#define BNXT_NTUPLE_MATCH_DST_PORT 8 +#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \ + BNXT_NTUPLE_MATCH_DST_IP | \ + BNXT_NTUPLE_MATCH_SRC_PORT | \ + BNXT_NTUPLE_MATCH_DST_PORT) u32 flow_id; - unsigned long state; -#define BNXT_FLTR_VALID 0 -#define BNXT_FLTR_UPDATE 1 +}; + +struct bnxt_l2_key { + union { + u8 dst_mac_addr[ETH_ALEN]; + u32 filter_key; + } __packed; + u16 vlan; +}; + +#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4) +#define BNXT_NTUPLE_KEY_SIZE ((sizeof(struct flow_keys) - \ + FLOW_KEYS_HASH_OFFSET) / 4) + +struct bnxt_l2_filter { + struct bnxt_filter_base base; + struct bnxt_l2_key l2_key; + atomic_t refcnt; }; struct bnxt_link_info { @@ -1153,7 +1383,9 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB #define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB #define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB +#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB u16 support_speeds; + u16 support_pam4_speeds; u16 auto_link_speeds; /* fw adv setting */ #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB @@ -1165,9 +1397,16 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB #define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB + u16 auto_pam4_link_speeds; +#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G +#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G +#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G u16 support_auto_speeds; + u16 support_pam4_auto_speeds; u16 lp_auto_link_speeds; + u16 lp_auto_pam4_link_speeds; u16 force_link_speed; + u16 force_pam4_link_speed; u32 preemphasis; u8 module_status; u16 fec_cfg; @@ -1179,10 +1418,14 @@ struct bnxt_link_info { u8 autoneg; #define BNXT_AUTONEG_SPEED 1 #define BNXT_AUTONEG_FLOW_CTRL 2 + u8 req_signal_mode; +#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ +#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 u8 req_duplex; u8 req_flow_ctrl; u16 req_link_speed; u16 advertising; /* user adv setting */ + u16 advertising_pam4; bool force_link_chng; bool phy_retry; @@ -1220,28 +1463,48 @@ struct bnxt_led_info { struct bnxt_test_info { u8 offline_mask; u8 flags; -#define BNXT_TEST_FL_EXT_LPBK 0x1 +#define BNXT_TEST_FL_EXT_LPBK 0x1 +#define BNXT_TEST_FL_AN_PHY_LPBK 0x2 u16 timeout; char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; }; +#define CHIMP_REG_VIEW_ADDR \ + ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000) + #define BNXT_GRCPF_REG_CHIMP_COMM 0x0 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 -#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 -#define BNXT_CAG_REG_BASE 0x300000 +#define BNXT_GRCPF_REG_SYNC_TIME 0x480 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ 0x488 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_MSK 0xffffffUL +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_SFT 0 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_MSK 0x1f000000UL +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_SFT 24 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_MSK 0x20000000UL +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_SFT 29 #define BNXT_GRCPF_REG_KONG_COMM 0xA00 #define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00 +#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 +#define BNXT_CAG_REG_BASE 0x300000 + #define BNXT_GRC_BASE_MASK 0xfffff000 #define BNXT_GRC_OFFSET_MASK 0x00000ffc +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD struct bnxt_tc_flow_stats { u64 packets; u64 bytes; }; +struct bnxt_flower_indr_block_cb_priv { + struct net_device *tunnel_netdev; + struct bnxt *bp; + struct list_head list; +}; + struct bnxt_tc_info { bool enabled; @@ -1279,7 +1542,9 @@ struct bnxt_tc_info { u64 bytes_mask; u64 packets_mask; }; +#endif +#ifdef CONFIG_VF_REPS struct bnxt_vf_rep_stats { u64 packets; u64 bytes; @@ -1297,6 +1562,7 @@ struct bnxt_vf_rep { struct bnxt_vf_rep_stats rx_stats; struct bnxt_vf_rep_stats tx_stats; }; +#endif #define PTU_PTE_VALID 0x1UL #define PTU_PTE_LAST 0x2UL @@ -1339,6 +1605,8 @@ struct bnxt_ctx_mem_info { u32 tim_max_entries; u16 mrav_num_entries_units; u8 tqm_entries_multiple; + u8 ctx_kind_initializer; + u8 tqm_fp_rings_count; u32 flags; #define BNXT_CTX_FLAG_INITED 0x01 @@ -1371,6 +1639,8 @@ struct bnxt_fw_health { u32 last_fw_reset_cnt; u8 enabled:1; u8 master:1; + u8 fatal:1; + u8 status_reliable:1; u8 tmr_multiplier; u8 tmr_counter; u8 fw_reset_seq_cnt; @@ -1378,8 +1648,8 @@ struct bnxt_fw_health { u32 fw_reset_seq_vals[16]; u32 fw_reset_seq_delay_msec[16]; struct devlink_health_reporter *fw_reporter; - struct devlink_health_reporter *fw_reset_reporter; struct devlink_health_reporter *fw_fatal_reporter; + struct devlink_health_reporter *fw_reset_reporter; }; struct bnxt_fw_reporter_ctx { @@ -1398,9 +1668,31 @@ struct bnxt_fw_reporter_ctx { #define BNXT_FW_HEALTH_WIN_BASE 0x3000 #define BNXT_FW_HEALTH_WIN_MAP_OFF 8 +#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \ + ((reg) & BNXT_GRC_OFFSET_MASK)) + #define BNXT_FW_STATUS_HEALTHY 0x8000 #define BNXT_FW_STATUS_SHUTDOWN 0x100000 +struct bnxt_oem_eem_req { + __le32 entry_num; +}; + +struct bnxt_oem_cmd_req { + __le32 req_type; + #define BNXT_OEM_CMD_TYPE_EEM_SYS_MEMORY 0x1 + union { + struct bnxt_oem_eem_req eem_req; + } req; +}; + +enum bnxt_push_mode { + BNXT_PUSH_MODE_NONE = 0, + BNXT_PUSH_MODE_LEGACY, /* legacy silicon operation mode */ + BNXT_PUSH_MODE_WCB, /* write combining supported on P5 silicon */ + BNXT_PUSH_MODE_PPP, /* double buffered mode supported on SR2 */ +}; + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -1429,6 +1721,8 @@ struct bnxt { #define CHIP_NUM_57414L 0x16db #define CHIP_NUM_5745X 0xd730 +#define CHIP_NUM_57452 0xc452 +#define CHIP_NUM_57454 0xc454 #define CHIP_NUM_57508 0x1750 #define CHIP_NUM_57504 0x1751 @@ -1438,6 +1732,8 @@ struct bnxt { #define CHIP_NUM_58804 0xd804 #define CHIP_NUM_58808 0xd808 +#define CHIP_NUM_58818 0xd818 + #define BNXT_CHIP_NUM_5730X(chip_num) \ ((chip_num) >= CHIP_NUM_57301 && \ (chip_num) <= CHIP_NUM_57304) @@ -1461,7 +1757,10 @@ struct bnxt { ((chip_num) == CHIP_NUM_58700) #define BNXT_CHIP_NUM_5745X(chip_num) \ - ((chip_num) == CHIP_NUM_5745X) + ((chip_num) == CHIP_NUM_5745X || \ + (chip_num) == CHIP_NUM_57452 || \ + (chip_num) == CHIP_NUM_57454) + #define BNXT_CHIP_NUM_57X0X(chip_num) \ (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num)) @@ -1474,6 +1773,25 @@ struct bnxt { (chip_num) == CHIP_NUM_58804 || \ (chip_num) == CHIP_NUM_58808) + u8 chip_rev; +#ifdef BNXT_FPGA + u8 chip_platform_type; + +#define BNXT_ASIC(bp) \ + ((bp)->chip_platform_type == VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC) +#else +#define BNXT_ASIC(bp) true +#endif +#define BNXT_VPD_FLD_LEN 32 + char board_partno[BNXT_VPD_FLD_LEN]; + + /* Must remain NULL for old bnxt_re upstream/ + * inbox driver. + */ + void *reserved_ulp_kabi_0; + + char board_serialno[BNXT_VPD_FLD_LEN]; + struct net_device *dev; struct pci_dev *pdev; @@ -1509,15 +1827,16 @@ struct bnxt { BNXT_FLAG_ROCEV2_CAP) #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 + #define BNXT_FLAG_CHIP_SR2 0x80000 #define BNXT_FLAG_MULTI_HOST 0x100000 #define BNXT_FLAG_DSN_VALID 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 - #define BNXT_FLAG_DIM 0x2000000 + #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 - #define BNXT_FLAG_PCIE_STATS 0x40000000 - + #define BNXT_FLAG_DIM 0x20000000 + #define BNXT_FLAG_USR_RSS_HASH_KEY 0x80000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ BNXT_FLAG_STRIP_VLAN) @@ -1527,18 +1846,26 @@ struct bnxt { #define BNXT_NPAR(bp) ((bp)->port_partition_type) #define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) +#define BNXT_PHY_CFG_ABLE(bp) (BNXT_SINGLE_PF(bp) || \ + ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG)) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \ (bp)->max_tpa_v2) && !is_kdump_kernel()) -/* Chip class phase 5 */ -#define BNXT_CHIP_P5(bp) \ +#define BNXT_CHIP_SR2(bp) \ + ((bp)->chip_num == CHIP_NUM_58818) + +#define BNXT_CHIP_P5_THOR(bp) \ ((bp)->chip_num == CHIP_NUM_57508 || \ (bp)->chip_num == CHIP_NUM_57504 || \ (bp)->chip_num == CHIP_NUM_57502) +/* Chip class phase 5 */ +#define BNXT_CHIP_P5(bp) \ + (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp)) + /* Chip class phase 4.x */ #define BNXT_CHIP_P4(bp) \ (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \ @@ -1550,11 +1877,41 @@ struct bnxt { #define BNXT_CHIP_P4_PLUS(bp) \ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp)) +#define BNXT_CHIP_SUPPORTS_PHY(bp) (BNXT_ASIC(bp) || BNXT_CHIP_SR2(bp)) + /* Must remain NULL for new bnxt_re upstream/ + * inbox driver. + */ + void *reserved_ulp_kabi_1; + struct bnxt_en_dev *edev; + /* The following 2 fields are for + * OOT compatibility checking only. + */ + void *reserved; /* Old bnxt_re sees that the + * original ulp_probe pointer + * is NULL and will not call. + */ + u32 ulp_version; /* New bnxt_re checks that + * the ulp_version is correct + * before calling the new + * ulp_probe. + */ +#define BNXT_ULP_VERSION 0x695a0004 /* Change this when any interface + * structure or API changes between + * bnxt_en and bnxt_re. + */ struct bnxt_en_dev * (*ulp_probe)(struct net_device *); + /* Do not add any fields before + * ulp_probe in both OOT and + * upstream/inbox drivers. + */ struct bnxt_napi **bnapi; +#ifdef OLD_VLAN + struct vlan_group *vlgrp; +#endif + struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; u16 *tx_ring_map; @@ -1592,6 +1949,7 @@ struct bnxt { int tx_nr_rings_xdp; int tx_wake_thresh; + enum bnxt_push_mode tx_push_mode; int tx_push_thresh; int tx_push_size; @@ -1606,8 +1964,12 @@ struct bnxt { struct bnxt_vnic_info *vnic_info; int nr_vnics; u32 rss_hash_cfg; + u16 *rss_indir_tbl; + u16 rss_indir_tbl_entries; + u8 usr_rss_hash_key[HW_HASH_KEY_SIZE]; u16 max_mtu; + u16 fw_dflt_mtu; u8 max_tc; u8 max_lltc; /* lossless TCs */ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; @@ -1621,13 +1983,14 @@ struct bnxt { struct timer_list timer; unsigned long state; -#define BNXT_STATE_OPEN 0 -#define BNXT_STATE_IN_SP_TASK 1 -#define BNXT_STATE_READ_STATS 2 -#define BNXT_STATE_FW_RESET_DET 3 -#define BNXT_STATE_IN_FW_RESET 4 -#define BNXT_STATE_ABORT_ERR 5 +#define BNXT_STATE_OPEN 0 +#define BNXT_STATE_IN_SP_TASK 1 +#define BNXT_STATE_READ_STATS 2 +#define BNXT_STATE_FW_RESET_DET 3 +#define BNXT_STATE_IN_FW_RESET 4 +#define BNXT_STATE_ABORT_ERR 5 #define BNXT_STATE_FW_FATAL_COND 6 +#define BNXT_STATE_DRV_REGISTERED 7 struct bnxt_irq *irq_tbl; int total_irqs; @@ -1649,47 +2012,49 @@ struct bnxt { #define BNXT_FW_CAP_DCBX_AGENT 0x00000004 #define BNXT_FW_CAP_NEW_RM 0x00000008 #define BNXT_FW_CAP_IF_CHANGE 0x00000010 + #define BNXT_FW_CAP_LINK_ADMIN 0x00000020 + #define BNXT_FW_CAP_VF_RES_MIN_GUARANTEED 0x00000040 #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 + #define BNXT_FW_CAP_ADMIN_MTU 0x00000100 + #define BNXT_FW_CAP_ADMIN_PF 0x00000200 #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 #define BNXT_FW_CAP_TRUSTED_VF 0x00000800 + #define BNXT_FW_CAP_VF_VNIC_NOTIFY 0x00001000 #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000 #define BNXT_FW_CAP_PKG_VER 0x00004000 #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000 - #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000 + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000 #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 + #define BNXT_FW_CAP_SECURE_MODE 0x00080000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_FW_CAP_HOT_RESET 0x00200000 + #define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000 + #define BNXT_FW_CAP_CRASHDUMP 0x00800000 + #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 + #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 + #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 + #define BNXT_FW_CAP_CFA_EEM 0x08000000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; - u16 hwrm_intr_seq_id; - void *hwrm_short_cmd_req_addr; - dma_addr_t hwrm_short_cmd_req_dma_addr; - void *hwrm_cmd_resp_addr; - dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_cmd_kong_resp_addr; - dma_addr_t hwrm_cmd_kong_resp_dma_addr; + struct dma_pool *hwrm_dma_pool; + struct hlist_head hwrm_pending_list; +#define HWRM_DBG_REG_BUF_SIZE 128 +#ifdef NETDEV_GET_STATS64 struct rtnl_link_stats64 net_stats_prev; - struct rx_port_stats *hw_rx_port_stats; - struct tx_port_stats *hw_tx_port_stats; - struct rx_port_stats_ext *hw_rx_port_stats_ext; - struct tx_port_stats_ext *hw_tx_port_stats_ext; - struct pcie_ctx_hw_stats *hw_pcie_stats; - dma_addr_t hw_rx_port_stats_map; - dma_addr_t hw_tx_port_stats_map; - dma_addr_t hw_rx_port_stats_ext_map; - dma_addr_t hw_tx_port_stats_ext_map; - dma_addr_t hw_pcie_stats_map; - int hw_port_stats_size; +#endif + struct bnxt_stats_mem port_stats; + struct bnxt_stats_mem rx_port_stats_ext; + struct bnxt_stats_mem tx_port_stats_ext; u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; u16 hw_ring_stats_size; u8 pri2cos_idx[8]; - u8 pri2cos_valid; + bool pri2cos_valid; u16 hwrm_max_req_len; u16 hwrm_max_ext_req_len; @@ -1700,6 +2065,12 @@ struct bnxt { #define BC_HWRM_STR_LEN 21 #define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN) char fw_ver_str[FW_VER_STR_LEN]; + char hwrm_ver_supp[FW_VER_STR_LEN]; + u64 fw_ver_code; +#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \ + ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) +#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48) + __be16 vxlan_port; u8 vxlan_port_cnt; __le16 vxlan_fw_dst_port_id; @@ -1741,6 +2112,9 @@ struct bnxt { #define BNXT_RING_COAL_NOW_SP_EVENT 17 #define BNXT_FW_RESET_NOTIFY_SP_EVENT 18 #define BNXT_FW_EXCEPTION_SP_EVENT 19 +#define BNXT_VF_VNIC_CHANGE_SP_EVENT 20 +#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21 +#define BNXT_PTP_CURRENT_TIME_EVENT 22 struct delayed_work fw_reset_task; int fw_reset_state; @@ -1750,7 +2124,6 @@ struct bnxt { #define BNXT_FW_RESET_STATE_POLL_FW 4 #define BNXT_FW_RESET_STATE_OPENING 5 #define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6 - u16 fw_reset_min_dsecs; #define BNXT_DFLT_FW_RST_MIN_DSECS 20 u16 fw_reset_max_dsecs; @@ -1765,6 +2138,7 @@ struct bnxt { #ifdef CONFIG_BNXT_SRIOV int nr_vfs; struct bnxt_vf_info vf; + struct hwrm_func_vf_resource_cfg_input vf_resc_cfg_input; wait_queue_head_t sriov_cfg_wait; bool sriov_cfg; #define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) @@ -1780,8 +2154,12 @@ struct bnxt { /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ spinlock_t db_lock; #endif + int legacy_db_size; + int db_size; + int db_size_nc; + void __iomem *db_base_wc; -#define BNXT_NTP_FLTR_MAX_FLTR 4096 +#define BNXT_NTP_FLTR_MAX_FLTR 8192 #define BNXT_NTP_FLTR_HASH_SIZE 512 #define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1) struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE]; @@ -1789,10 +2167,16 @@ struct bnxt { unsigned long *ntp_fltr_bmap; int ntp_fltr_count; + int max_fltr; + +#define BNXT_L2_FLTR_MAX_FLTR 1024 +#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR) +#define BNXT_L2_FLTR_HASH_SIZE 32 +#define BNXT_L2_FLTR_HASH_MASK (BNXT_L2_FLTR_HASH_SIZE - 1) + struct hlist_head l2_fltr_hash_tbl[BNXT_L2_FLTR_HASH_SIZE]; + + u32 hash_seed; - /* To protect link related settings during link changes and - * ethtool settings changes. - */ struct mutex link_lock; struct bnxt_link_info link_info; struct ethtool_eee eee; @@ -1807,27 +2191,66 @@ struct bnxt { u8 num_leds; struct bnxt_led_info leds[BNXT_MAX_LED]; + u16 dump_flag; +#define BNXT_DUMP_LIVE 0 +#define BNXT_DUMP_CRASH 1 +#ifdef HAVE_NDO_XDP struct bpf_prog *xdp_prog; +#endif + struct bnxt_ptp_cfg *ptp_cfg; + +#ifndef PCIE_SRIOV_CONFIGURE + int req_vfs; + struct work_struct iov_task; +#endif + +#ifdef CONFIG_VF_REPS /* devlink interface and vf-rep structs */ struct devlink *dl; +#ifdef HAVE_DEVLINK_PORT_PARAM struct devlink_port dl_port; +#endif enum devlink_eswitch_mode eswitch_mode; struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */ - u8 switch_id[8]; +#endif + u8 dsn[8]; + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD struct bnxt_tc_info *tc_info; + struct list_head tc_indr_block_list; + struct notifier_block tc_netdev_nb; +#endif + struct bnxt_eem_info *eem_info; + void *dmabuf_data; struct dentry *debugfs_pdev; + struct dentry *debugfs_dim; struct device *hwmon_dev; }; +#define BNXT_GET_RING_STATS64(sw, counter) \ + (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8)) + +#define BNXT_GET_RX_PORT_STATS64(sw, counter) \ + (*((sw) + offsetof(struct rx_port_stats, counter) / 8)) + +#define BNXT_GET_TX_PORT_STATS64(sw, counter) \ + (*((sw) + offsetof(struct tx_port_stats, counter) / 8)) + +#define BNXT_PORT_STATS_SIZE \ + (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024) + +#define BNXT_TX_PORT_STATS_BYTE_OFFSET \ + (sizeof(struct rx_port_stats) + 512) + #define BNXT_RX_STATS_OFFSET(counter) \ (offsetof(struct rx_port_stats, counter) / 8) #define BNXT_TX_STATS_OFFSET(counter) \ ((offsetof(struct tx_port_stats, counter) + \ - sizeof(struct rx_port_stats) + 512) / 8) + BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8) #define BNXT_RX_STATS_EXT_OFFSET(counter) \ (offsetof(struct rx_port_stats_ext, counter) / 8) @@ -1835,8 +2258,97 @@ struct bnxt { #define BNXT_TX_STATS_EXT_OFFSET(counter) \ (offsetof(struct tx_port_stats_ext, counter) / 8) -#define BNXT_PCIE_STATS_OFFSET(counter) \ - (offsetof(struct pcie_ctx_hw_stats, counter) / 8) +#define BNXT_HW_FEATURE_VLAN_ALL_RX \ + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX) +#define BNXT_HW_FEATURE_VLAN_ALL_TX \ + (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX) + +#ifdef BNXT_PRIV_RX_BUSY_POLL +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the NAPI poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_NAPI); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the busy poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_POLL); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ + int old; + + while (1) { + old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_DISABLE); + if (old == BNXT_STATE_IDLE) + break; + usleep_range(500, 5000); + } +} + +#else + +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + return true; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ +} + +#endif #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 @@ -1847,6 +2359,28 @@ struct bnxt { #define SFF_MODULE_ID_QSFP28 0x11 #define BNXT_MAX_PHY_I2C_RESP_SIZE 64 +#define BDETBD_REG_BD_PRODUCER_IDX 0x90000UL +#define BDETBD_REG_BD_REQ_CONSUMER_IDX 0x91000UL +#define BDETBD_REG_BD_CMPL_CONSUMER_IDX 0x92000UL +#define BDERBD_REG_BD_PRODUCER_IDX 0x410000UL +#define BDERBD_REG_BD_REQ_CONSUMER_IDX 0x411000UL +#define BDERBD_REG_BD_CMPL_CONSUMER_IDX 0x412000UL +#define CAG_REG_CAG_VECTOR_CTRL_ADDR_OFFSET 0x30003cUL +#define CAG_REG_CAG_PRODUCER_INDEX_REG_ADDR_OFFSET 0x300040UL +#define CAG_REG_CAG_CONSUMER_INDEX_REG_ADDR_OFFSET 0x300044UL +#define CAG_REG_CAG_PRODUCER_INDEX_REG 0x302000UL +#define CAG_REG_CAG_CONSUMER_INDEX_REG 0x303000UL +#define CAG_REG_CAG_VECTOR_CTRL 0x301000UL +#define TDC_REG_INT_STS_0 0x180020UL +#define TDC_REG_TDC_DEBUG_CNTL 0x180014UL +#define TDC_REG_TDC_DEBUG_STATUS 0x180018UL +#define TDI_REG_DBG_DWORD_ENABLE 0x100104UL +#define TDI_REG_DBG_OUT_DATA 0x100120UL +#define TDI_REG_DBG_SELECT 0x100100UL +#define TE_DEC_REG_PORT_CURRENT_CREDIT_REG 0x2401300UL +#define RDI_REG_RDI_DEBUG_CONTROL_REG 0x27001cUL +#define RDI_REG_RDI_DEBUG_STATUS_REG 0x270020UL + static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) { /* Tell compiler to fetch tx indices from memory. */ @@ -1864,8 +2398,6 @@ do { \ writel((val64) >> 32, (db) + 4); \ spin_unlock(&bp->db_lock); \ } while (0) - -#define writeq_relaxed writeq #endif /* For TX and RX ring doorbells with no ordering guarantee*/ @@ -1873,9 +2405,10 @@ static inline void bnxt_db_write_relaxed(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - writeq_relaxed(db->db_key64 | idx, db->doorbell); + writeq_relaxed(db->db_key64 | DB_RING_IDX(db, idx), + db->doorbell); } else { - u32 db_val = db->db_key32 | idx; + u32 db_val = db->db_key32 | DB_RING_IDX(db, idx); writel_relaxed(db_val, db->doorbell); if (bp->flags & BNXT_FLAG_DOUBLE_DB) @@ -1888,9 +2421,9 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - writeq(db->db_key64 | idx, db->doorbell); + writeq(db->db_key64 | DB_RING_IDX(db, idx), db->doorbell); } else { - u32 db_val = db->db_key32 | idx; + u32 db_val = db->db_key32 | DB_RING_IDX(db, idx); writel(db_val, db->doorbell); if (bp->flags & BNXT_FLAG_DOUBLE_DB) @@ -1898,79 +2431,32 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } -static inline bool bnxt_cfa_hwrm_message(u16 req_type) -{ - switch (req_type) { - case HWRM_CFA_ENCAP_RECORD_ALLOC: - case HWRM_CFA_ENCAP_RECORD_FREE: - case HWRM_CFA_DECAP_FILTER_ALLOC: - case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_EM_FLOW_ALLOC: - case HWRM_CFA_EM_FLOW_FREE: - case HWRM_CFA_EM_FLOW_CFG: - case HWRM_CFA_FLOW_ALLOC: - case HWRM_CFA_FLOW_FREE: - case HWRM_CFA_FLOW_INFO: - case HWRM_CFA_FLOW_FLUSH: - case HWRM_CFA_FLOW_STATS: - case HWRM_CFA_METER_PROFILE_ALLOC: - case HWRM_CFA_METER_PROFILE_FREE: - case HWRM_CFA_METER_PROFILE_CFG: - case HWRM_CFA_METER_INSTANCE_ALLOC: - case HWRM_CFA_METER_INSTANCE_FREE: - return true; - default: - return false; - } -} - -static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type))); -} - -static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr)); -} - -static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req) -{ - if (bnxt_hwrm_kong_chnl(bp, (struct input *)req)) - return bp->hwrm_cmd_kong_resp_addr; - else - return bp->hwrm_cmd_resp_addr; -} - -static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst) -{ - u16 seq_id; - - if (dst == BNXT_HWRM_CHNL_CHIMP) - seq_id = bp->hwrm_cmd_seq++; - else - seq_id = bp->hwrm_cmd_kong_seq++; - return seq_id; -} - extern const u16 bnxt_lhint_arr[]; +extern const struct pci_device_id bnxt_pci_tbl[]; +int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, struct bnxt_ctx_pg_info *ctx_pg, + u32 mem_size, u8 depth, bool use_init_val); +void bnxt_free_ctx_pg_tbls(struct bnxt *bp, struct bnxt_ctx_pg_info *ctx_pg); int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 prod, gfp_t gfp); void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data); -u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); -void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); -int _hwrm_send_message(struct bnxt *, void *, u32, int); -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); -int hwrm_send_message(struct bnxt *, void *, u32, int); -int hwrm_send_message_silent(struct bnxt *, void *, u32, int); -int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, - int bmap_size); +int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, + int bmap_size, bool async_only); +void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr); +struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u16 flags); +int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr); +int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr); +int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr); +int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr); +void bnxt_fill_ipv6_mask(__be32 mask[4]); +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int bnxt_nq_rings_in_use(struct bnxt *bp); @@ -1997,12 +2483,31 @@ void bnxt_fw_exception(struct bnxt *bp); void bnxt_fw_reset(struct bnxt *bp); int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp); +#if defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB) int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); +#endif +struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr, u32 idx); +#ifdef HAVE_FLOW_HASH_FROM_KEYS +u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys); +#endif +int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, + u32 idx); +void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_restore_pf_fw_resources(struct bnxt *bp); + +#ifdef CONFIG_VF_REPS +#ifdef HAVE_NDO_GET_PORT_PARENT_ID int bnxt_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid); +#else +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); +#endif +#endif void bnxt_dim_work(struct work_struct *work); int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi); - +u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_compat.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_compat.h new file mode 100644 index 000000000000..64f058829031 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_compat.h @@ -0,0 +1,1774 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/pci.h> +#include <linux/version.h> +#include <linux/ethtool.h> +#include <linux/skbuff.h> +#include <linux/rtnetlink.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#if !defined(NEW_FLOW_KEYS) && defined(HAVE_FLOW_KEYS) +#include <net/flow_keys.h> +#endif +#include <linux/sched.h> +#include <linux/dma-buf.h> +#ifdef HAVE_IEEE1588_SUPPORT +#include <linux/ptp_clock_kernel.h> +#endif +#if defined(HAVE_TC_FLOW_CLS_OFFLOAD) || defined(HAVE_TC_CLS_FLOWER_OFFLOAD) +#include <net/pkt_cls.h> +#endif +#ifdef HAVE_DIM +#include <linux/dim.h> +#endif +#ifdef HAVE_DEVLINK +#include <net/devlink.h> +#endif + +#ifndef IS_ENABLED +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) +#endif + +#if !IS_ENABLED(CONFIG_NET_DEVLINK) +#undef HAVE_DEVLINK +#undef HAVE_DEVLINK_INFO +#undef HAVE_DEVLINK_PARAM +#undef HAVE_NDO_DEVLINK_PORT +#undef HAVE_DEVLINK_PORT_PARAM +#undef HAVE_DEVLINK_FLASH_UPDATE +#undef HAVE_DEVLINK_HEALTH_REPORT +#endif + +/* Reconcile all dependencies for VF reps: + * SRIOV, Devlink, Switchdev and HW port info in metadata_dst + */ +#if defined(CONFIG_BNXT_SRIOV) && defined(HAVE_DEVLINK) && \ + defined(CONFIG_NET_SWITCHDEV) && defined(HAVE_METADATA_HW_PORT_MUX) && \ + (LINUX_VERSION_CODE >= 0x030a00) +#define CONFIG_VF_REPS 1 +#endif +/* DEVLINK code has dependencies on VF reps */ +#ifdef HAVE_DEVLINK_PARAM +#define CONFIG_VF_REPS 1 +#endif +#ifdef CONFIG_VF_REPS +#ifndef SWITCHDEV_SET_OPS +#define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops)) +#endif +#endif + +/* Reconcile all dependencies for TC Flower offload + * Need the following to be defined to build TC flower offload + * HAVE_TC_FLOW_CLS_OFFLOAD OR HAVE_TC_CLS_FLOWER_OFFLOAD + * HAVE_RHASHTABLE + * HAVE_FLOW_DISSECTOR_KEY_ICMP + * CONFIG_NET_SWITCHDEV + * HAVE_TCF_EXTS_TO_LIST (its possible to do without this but + * the code gets a bit complicated. So, for now depend on this.) + * HAVE_TCF_TUNNEL + * Instead of checking for all of the above defines, enable one + * define when all are enabled. + */ +#if (defined(HAVE_TC_FLOW_CLS_OFFLOAD) || \ + defined(HAVE_TC_CLS_FLOWER_OFFLOAD)) && \ + (defined(HAVE_TCF_EXTS_TO_LIST) || \ + defined(HAVE_TC_EXTS_FOR_ACTION)) && \ + defined(HAVE_RHASHTABLE) && defined(HAVE_FLOW_DISSECTOR_KEY_ICMP) && \ + defined(HAVE_TCF_TUNNEL) && defined(CONFIG_NET_SWITCHDEV) && \ + (LINUX_VERSION_CODE >= 0x030a00) +#define CONFIG_BNXT_FLOWER_OFFLOAD 1 +#ifndef HAVE_NDO_GET_PORT_PARENT_ID +#define netdev_port_same_parent_id(a, b) switchdev_port_same_parent_id(a, b) +#endif +#else +#undef CONFIG_BNXT_FLOWER_OFFLOAD +#endif + +/* With upstream kernels >= v5.2.0, struct tc_cls_flower_offload has been + * replaced by struct flow_cls_offload. For older kernels(< v5.2.0), rename + * the respective definitions here. + */ +#ifndef HAVE_TC_FLOW_CLS_OFFLOAD +#ifdef HAVE_TC_CLS_FLOWER_OFFLOAD +#define flow_cls_offload tc_cls_flower_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#endif +#endif + +#if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE) +#if defined(HAVE_NEW_HWMON_API) +#define CONFIG_BNXT_HWMON 1 +#endif +#endif + +#ifndef SPEED_20000 +#define SPEED_20000 20000 +#endif + +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif + +#ifndef SPEED_40000 +#define SPEED_40000 40000 +#endif + +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif + +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif + +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif + +#ifndef PORT_DA +#define PORT_DA 0x05 +#endif + +#ifndef PORT_NONE +#define PORT_NONE 0xef +#endif + +#if !defined(SUPPORTED_40000baseCR4_Full) +#define SUPPORTED_40000baseCR4_Full (1 << 24) + +#define ADVERTISED_40000baseCR4_Full (1 << 24) +#endif + +#if !defined(ETH_TEST_FL_EXTERNAL_LB) +#define ETH_TEST_FL_EXTERNAL_LB 0 +#define ETH_TEST_FL_EXTERNAL_LB_DONE 0 +#endif + +#if !defined(IPV4_FLOW) +#define IPV4_FLOW 0x10 +#endif + +#if !defined(IPV6_FLOW) +#define IPV6_FLOW 0x11 +#endif + +#if defined(HAVE_ETH_GET_HEADLEN) || (LINUX_VERSION_CODE > 0x040900) +#define BNXT_RX_PAGE_MODE_SUPPORT 1 +#endif + +#if !defined(ETH_P_8021AD) +#define ETH_P_8021AD 0x88A8 +#endif + +#if !defined(ETH_P_ROCE) +#define ETH_P_ROCE 0x8915 +#endif + +#if !defined(ROCE_V2_UDP_PORT) +#define ROCE_V2_UDP_DPORT 4791 +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL +#define NETIF_F_GSO_UDP_TUNNEL 0 +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#endif + +#ifndef NETIF_F_GSO_GRE +#define NETIF_F_GSO_GRE 0 +#endif + +#ifndef NETIF_F_GSO_GRE_CSUM +#define NETIF_F_GSO_GRE_CSUM 0 +#endif + +#ifndef NETIF_F_GSO_IPIP +#define NETIF_F_GSO_IPIP 0 +#endif + +#ifndef NETIF_F_GSO_SIT +#define NETIF_F_GSO_SIT 0 +#endif + +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 (NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT) +#endif + +#ifndef NETIF_F_GSO_PARTIAL +#define NETIF_F_GSO_PARTIAL 0 +#else +#define HAVE_GSO_PARTIAL_FEATURES 1 +#endif + +/* Tie rx checksum offload to tx checksum offload for older kernels. */ +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM NETIF_F_IP_CSUM +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE 0 +#endif + +#ifndef NETIF_F_RXHASH +#define NETIF_F_RXHASH 0 +#else +#define HAVE_NETIF_F_RXHASH +#endif + +#ifdef NETIF_F_GRO_HW +#define HAVE_NETIF_F_GRO_HW 1 +#else +#define NETIF_F_GRO_HW 0 +#endif + +#ifndef HAVE_SKB_GSO_UDP_TUNNEL_CSUM +#ifndef HAVE_SKB_GSO_UDP_TUNNEL +#define SKB_GSO_UDP_TUNNEL 0 +#endif +#define SKB_GSO_UDP_TUNNEL_CSUM SKB_GSO_UDP_TUNNEL +#endif + +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 +#endif + +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 +#endif + +#ifndef BRIDGE_MODE_UNDEF +#define BRIDGE_MODE_UNDEF 0xffff +#endif + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(mapping) DECLARE_PCI_UNMAP_ADDR(mapping) +#endif + +#ifndef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(len) DECLARE_PCI_UNMAP_LEN(len) +#endif + +#ifndef dma_unmap_addr_set +#define dma_unmap_addr_set pci_unmap_addr_set +#endif + +#ifndef dma_unmap_addr +#define dma_unmap_addr pci_unmap_addr +#endif + +#ifndef dma_unmap_len +#define dma_unmap_len pci_unmap_len +#endif + +#ifdef HAVE_DMA_ATTRS_H +#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ + dma_map_single_attrs(dev, cpu_addr, size, dir, NULL) + +#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_single_attrs(dev, dma_addr, size, dir, NULL) + +#ifdef HAVE_DMA_MAP_PAGE_ATTRS +#define dma_map_page_attrs(dev, page, offset, size, dir, attrs) \ + dma_map_page_attrs(dev, page, offset, size, dir, NULL) + +#define dma_unmap_page_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_page_attrs(dev, dma_addr, size, dir, NULL) +#endif +#endif + +#ifndef HAVE_DMA_MAP_PAGE_ATTRS +#define dma_map_page_attrs(dev, page, offset, size, dir, attrs) \ + dma_map_page(dev, page, offset, size, dir) + +#define dma_unmap_page_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_page(dev, dma_addr, size, dir) +#endif + +static inline struct dma_buf *dma_buf_export_attr(void *priv, int pg_count, + const struct dma_buf_ops *ops) +{ +#ifdef HAVE_DMA_EXPORT + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = ops; + exp_info.size = pg_count << PAGE_SHIFT; + exp_info.flags = O_RDWR; + exp_info.priv = priv; + exp_info.exp_name = "eem_dma_buf"; + return dma_buf_export(&exp_info); +#endif + +#ifdef HAVE_DMA_EXPORT_FLAG_RESV + return dma_buf_export(priv, ops, pg_count << PAGE_SHIFT, O_RDWR, NULL); +#endif + +#ifdef HAVE_DMA_EXPORT_FLAG + return dma_buf_export(priv, ops, pg_count << PAGE_SHIFT, O_RDWR); +#endif +} + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a, b) 0 +#endif + +#if defined(RHEL_RELEASE_CODE) && (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,3)) +#if defined(CONFIG_X86_64) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif +#endif + +#ifdef HAVE_NDO_SET_VF_VLAN_RH73 +#define ndo_set_vf_vlan ndo_set_vf_vlan_rh73 +#endif + +#ifdef HAVE_NDO_CHANGE_MTU_RH74 +#define ndo_change_mtu ndo_change_mtu_rh74 +#undef HAVE_MIN_MTU +#endif + +#ifdef HAVE_NDO_SETUP_TC_RH72 +#define ndo_setup_tc ndo_setup_tc_rh72 +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST_RH +#define ndo_set_vf_trust extended.ndo_set_vf_trust +#endif + +#ifdef HAVE_NDO_UDP_TUNNEL_RH +#define ndo_udp_tunnel_add extended.ndo_udp_tunnel_add +#define ndo_udp_tunnel_del extended.ndo_udp_tunnel_del +#endif + +#ifndef HAVE_TC_SETUP_QDISC_MQPRIO +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif + +#if defined(HAVE_NDO_SETUP_TC_RH) || defined(HAVE_EXT_GET_PHYS_PORT_NAME) || defined(HAVE_NDO_SET_VF_TRUST_RH) +#define HAVE_NDO_SIZE 1 +#endif + +#ifndef HAVE_FLOW_KEYS +struct flow_keys { + __be32 src; + __be32 dst; + union { + __be32 ports; + __be16 port16[2]; + }; + u16 thoff; + u8 ip_proto; +}; +#endif + +#ifndef FLOW_RSS +#define FLOW_RSS 0x20000000 +#endif + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif + +#ifndef ETHTOOL_RX_FLOW_SPEC_RING +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif + +#ifndef ETHTOOL_GEEE +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; +#endif + +#ifndef HAVE_ETHTOOL_RESET_CRASHDUMP +enum compat_ethtool_reset_flags { ETH_RESET_CRASHDUMP = 1 << 9 }; +#endif + +#ifndef HAVE_SKB_FRAG_PAGE +static inline struct page *skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} + +static inline void *skb_frag_address_safe(const skb_frag_t *frag) +{ + void *ptr = page_address(skb_frag_page(frag)); + if (unlikely(!ptr)) + return NULL; + + return ptr + frag->page_offset; +} + +static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) +{ + frag->page = page; +} + +#define skb_frag_dma_map(x, frag, y, len, z) \ + pci_map_page(bp->pdev, (frag)->page, \ + (frag)->page_offset, (len), PCI_DMA_TODEVICE) +#endif + +#ifndef HAVE_SKB_FRAG_ACCESSORS +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif + +#ifndef HAVE_PCI_VFS_ASSIGNED +static inline int pci_vfs_assigned(struct pci_dev *dev) +{ + return 0; +} +#endif + +#ifndef HAVE_PCI_NUM_VF +#include <../drivers/pci/pci.h> + +static inline int pci_num_vf(struct pci_dev *dev) +{ + if (!dev->is_physfn) + return 0; + + return dev->sriov->nr_virtfn; +} +#endif + +#ifndef SKB_ALLOC_NAPI +static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + struct sk_buff *skb; + + length += NET_SKB_PAD + NET_IP_ALIGN; + skb = netdev_alloc_skb(napi->dev, length); + + if (likely(skb)) + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + return skb; +} +#endif + +#ifndef HAVE_SKB_HASH_TYPE + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, /* Undefined type */ + PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ + PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ + PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ +}; + +static inline void +skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) +{ +#ifdef HAVE_NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} + +#endif + +#define GET_NET_STATS(x) (unsigned long)le64_to_cpu(x) + +#if !defined(NETDEV_RX_FLOW_STEER) || !defined(HAVE_FLOW_KEYS) || (LINUX_VERSION_CODE < 0x030300) || defined(NO_NETDEV_CPU_RMAP) +#undef CONFIG_RFS_ACCEL +#endif + +#ifdef CONFIG_RFS_ACCEL +#if !defined(HAVE_FLOW_HASH_FROM_KEYS) && !defined(HAVE_GET_HASH_RAW) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ + return skb->rxhash; +} +#endif +#endif /* CONFIG_RFS_ACCEL */ + +#if !defined(IEEE_8021QAZ_APP_SEL_DGRAM) || !defined(CONFIG_DCB) || !defined(HAVE_IEEE_DELAPP) +#undef CONFIG_BNXT_DCB +#endif + +#ifdef CONFIG_BNXT_DCB +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif +#endif + +#ifndef NETDEV_HW_FEATURES +#define hw_features features +#endif + +#ifndef HAVE_NETDEV_FEATURES_T +#ifdef HAVE_NDO_FIX_FEATURES +typedef u32 netdev_features_t; +#else +typedef unsigned long netdev_features_t; +#endif +#endif + +#if !defined(IFF_UNICAST_FLT) +#define IFF_UNICAST_FLT 0 +#endif + +#ifndef HAVE_NEW_BUILD_SKB +#define build_skb(data, frag) build_skb(data) +#endif + +#ifndef __rcu +#define __rcu +#endif + +#ifndef rcu_dereference_protected +#define rcu_dereference_protected(p, c) \ + rcu_dereference((p)) +#endif + +#ifndef rcu_access_pointer +#define rcu_access_pointer rcu_dereference +#endif + +#ifndef rtnl_dereference +#define rtnl_dereference(p) \ + rcu_dereference_protected(p, lockdep_rtnl_is_held()) +#endif + +#ifndef RCU_INIT_POINTER +#define RCU_INIT_POINTER(p, v) \ + p = (typeof(*v) __force __rcu *)(v) +#endif + +#ifdef HAVE_OLD_HLIST +#define __hlist_for_each_entry_rcu(f, n, h, m) \ + hlist_for_each_entry_rcu(f, n, h, m) +#define __hlist_for_each_entry_safe(f, n, t, h, m) \ + hlist_for_each_entry_safe(f, n, t, h, m) +#else +#define __hlist_for_each_entry_rcu(f, n, h, m) \ + hlist_for_each_entry_rcu(f, h, m) +#define __hlist_for_each_entry_safe(f, n, t, h, m) \ + hlist_for_each_entry_safe(f, t, h, m) +#endif + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef NETIF_F_HW_VLAN_CTAG_TX +#define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX +#define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX +/* 802.1AD not supported on older kernels */ +#define NETIF_F_HW_VLAN_STAG_TX 0 +#define NETIF_F_HW_VLAN_STAG_RX 0 + +#define __vlan_hwaccel_put_tag(skb, proto, tag) \ + if (proto == ntohs(ETH_P_8021Q)) \ + __vlan_hwaccel_put_tag(skb, tag) + +#define vlan_proto protocol + +#if defined(HAVE_VLAN_RX_REGISTER) +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define OLD_VLAN 1 +#define OLD_VLAN_VALID (1 << 31) +#endif +#endif + +#endif + +#ifndef HAVE_NETDEV_NOTIFIER_INFO_TO_DEV +#ifndef netdev_notifier_info_to_dev +static inline struct net_device * +netdev_notifier_info_to_dev(void *ptr) +{ + return (struct net_device *)ptr; +} +#endif +#endif + +static inline int bnxt_en_register_netdevice_notifier(struct notifier_block *nb) +{ + int rc; +#ifdef HAVE_REGISTER_NETDEVICE_NOTIFIER_RH + rc = register_netdevice_notifier_rh(nb); +#else + rc = register_netdevice_notifier(nb); +#endif + return rc; +} + +static inline int bnxt_en_unregister_netdevice_notifier(struct notifier_block *nb) +{ + int rc; +#ifdef HAVE_REGISTER_NETDEVICE_NOTIFIER_RH + rc = unregister_netdevice_notifier_rh(nb); +#else + rc = unregister_netdevice_notifier(nb); +#endif + return rc; +} + +#ifndef HAVE_NETDEV_UPDATE_FEATURES +static inline void netdev_update_features(struct net_device *dev) +{ + /* Do nothing, since we can't set default VLAN on these old kernels. */ +} +#endif + +#if !defined(netdev_printk) && (LINUX_VERSION_CODE < 0x020624) + +#ifndef HAVE_NETDEV_NAME +static inline const char *netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#endif + +#define NET_PARENT_DEV(netdev) ((netdev)->dev.parent) + +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, NET_PARENT_DEV(netdev), \ + "%s: " format, \ + netdev_name(netdev), ##args) + +#endif + +#ifndef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#endif + +#ifndef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#endif + +#ifndef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#endif + +#ifndef dev_warn_ratelimited +#define dev_warn_ratelimited(dev, format, args...) \ + dev_warn(dev, format, ##args) +#endif + +#ifndef netdev_level_once +#define netdev_level_once(level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) + +#define netdev_info_once(dev, fmt, ...) \ + netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) +#define netdev_warn_once(dev, fmt, ...) \ + netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) +#endif + +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif + +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif + +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() smp_mb() +#endif + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef writel_relaxed +#define writel_relaxed(v, a) writel(v, a) +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed(v, a) writeq(v, a) +#endif + +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) (x = val) +#endif + +#ifndef READ_ONCE +#define READ_ONCE(val) (val) +#endif + +#ifdef CONFIG_NET_RX_BUSY_POLL +#include <net/busy_poll.h> +#if defined(HAVE_NAPI_HASH_ADD) && defined(NETDEV_BUSY_POLL) +#define BNXT_PRIV_RX_BUSY_POLL 1 +#endif +#endif + +#if defined(HAVE_NETPOLL_POLL_DEV) +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#if !defined(CONFIG_PTP_1588_CLOCK) && !defined(CONFIG_PTP_1588_CLOCK_MODULE) +#undef HAVE_IEEE1588_SUPPORT +#endif + +#if !defined(HAVE_PTP_GETTIMEX64) +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts) +{ +} + +static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts) +{ +} +#endif + +#if defined(RHEL_RELEASE_CODE) && (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,0)) +#undef CONFIG_CRASH_DUMP +#endif + +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present(skb) vlan_tx_tag_present(skb) +#define skb_vlan_tag_get(skb) vlan_tx_tag_get(skb) +#endif + +#if !defined(HAVE_NAPI_HASH_DEL) +static inline void napi_hash_del(struct napi_struct *napi) +{ +} +#endif + +#if !defined(LL_FLUSH_FAILED) || !defined(HAVE_NAPI_HASH_ADD) +static inline void napi_hash_add(struct napi_struct *napi) +{ +} +#endif + +#ifndef HAVE_SET_COHERENT_MASK +static inline int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + + return pci_set_consistent_dma_mask(pdev, mask); +} +#endif + +#ifndef HAVE_SET_MASK_AND_COHERENT +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +#ifndef HAVE_DMA_ZALLOC_COHERENT +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); + return ret; +} +#endif + +#ifndef HAVE_IFLA_TX_RATE +#define ndo_set_vf_rate ndo_set_vf_tx_rate +#endif + +#ifndef HAVE_PRANDOM_BYTES +#define prandom_bytes get_random_bytes +#endif + +#ifndef rounddown +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) +#endif + +#ifdef NO_SKB_FRAG_SIZE +static inline unsigned int skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif + +#ifdef NO_ETH_RESET_AP +#define ETH_RESET_AP (1<<8) +#endif + +#ifndef HAVE_SKB_CHECKSUM_NONE_ASSERT +static inline void skb_checksum_none_assert(struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; +} +#endif + +#ifndef HAVE_NEW_FLOW_DISSECTOR_WITH_FLAGS +#define skb_flow_dissect_flow_keys(skb, fkeys, flags) \ + skb_flow_dissect_flow_keys(skb, fkeys) +#endif + +#ifndef HAVE_ETHER_ADDR_EQUAL +static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#endif + +#ifndef HAVE_ETHER_ADDR_COPY +static inline void ether_addr_copy(u8 *dst, const u8 *src) +{ + memcpy(dst, src, ETH_ALEN); +} +#endif + +#ifndef HAVE_ETH_BROADCAST_ADDR +static inline void eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef HAVE_ETH_HW_ADDR_RANDOM +static inline void eth_hw_addr_random(struct net_device *dev) +{ +#if defined(NET_ADDR_RANDOM) + dev->addr_assign_type = NET_ADDR_RANDOM; +#endif + random_ether_addr(dev->dev_addr); +} +#endif + +#ifndef HAVE_NETDEV_TX_QUEUE_CTRL +static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes) +{ +} + +static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, + unsigned int pkts, unsigned int bytes) +{ +} + +static inline void netdev_tx_reset_queue(struct netdev_queue *q) +{ +} +#endif + +#ifndef HAVE_NETIF_SET_REAL_NUM_RX +static inline int netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxq) +{ + return 0; +} +#endif + +#ifndef HAVE_NETIF_SET_REAL_NUM_TX +static inline void netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->real_num_tx_queues = txq; +} +#endif + +#ifndef HAVE_NETIF_GET_DEFAULT_RSS +static inline int netif_get_num_default_rss_queues(void) +{ + return min_t(int, 8, num_online_cpus()); +} +#endif + +#ifndef IFF_RXFH_CONFIGURED +#define IFF_RXFH_CONFIGURED 0 +#undef HAVE_SET_RXFH +static inline bool netif_is_rxfh_configured(const struct net_device *dev) +{ + return false; +} +#endif + +#if !defined(HAVE_TCP_V6_CHECK) +static __inline__ __sum16 tcp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) +{ + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); +} +#endif + +#ifndef HAVE_USLEEP_RANGE +static inline void usleep_range(unsigned long min, unsigned long max) +{ + if (min < 1000) + udelay(min); + else + msleep(min / 1000); +} +#endif + +#ifndef HAVE_GET_NUM_TC +static inline int netdev_get_num_tc(struct net_device *dev) +{ + return 0; +} + +static inline void netdev_reset_tc(struct net_device *dev) +{ +} + +static inline int netdev_set_tc_queue(struct net_device *devi, u8 tc, + u16 count, u16 offset) +{ + return 0; +} +#endif + +#ifndef HAVE_VZALLOC +static inline void *vzalloc(size_t size) +{ + void *ret = vmalloc(size); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif + +#ifndef HAVE_KMALLOC_ARRAY +static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp) +{ + return kmalloc(n * s, gfp); +} +#endif + +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif + +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif + +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif + +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif + +#ifndef HAVE_MSIX_RANGE +static inline int +pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int rc; + + while (maxvec >= minvec) { + rc = pci_enable_msix(dev, entries, maxvec); + if (!rc || rc < 0) + return rc; + maxvec = rc; + } +} +#endif /* HAVE_MSIX_RANGE */ + +#ifndef HAVE_PCI_PHYSFN +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif + + return dev; +} +#endif + +#ifndef HAVE_PCI_PRINT_LINK_STATUS +#ifndef HAVE_PCI_LINK_WIDTH +enum pcie_link_width { + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; +#endif + +#ifndef HAVE_PCIE_BUS_SPEED +enum pci_bus_speed { + PCIE_SPEED_2_5GT = 0x14, + PCIE_SPEED_5_0GT = 0x15, + PCIE_SPEED_8_0GT = 0x16, +#ifndef PCIE_SPEED_16_0GT + PCIE_SPEED_16_0GT = 0x17, +#endif + PCI_SPEED_UNKNOWN = 0xFF, +}; +#endif + +#ifndef PCIE_SPEED_16_0GT +#define PCIE_SPEED_16_0GT 0x17 +#endif + +static const unsigned char pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef PCI_EXP_LNKCAP2 +#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */ +#endif + +#ifndef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ +#endif + +#ifndef PCI_EXP_LNKCAP2_SLS_16_0GB +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0*/ +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1*/ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_8_0GB +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit2 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_16_0GB +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#endif + +#ifndef PCIE_SPEED2STR +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000 * 128 / 130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000 * 128 / 130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000 * 8 / 10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500 * 8 / 10 : \ + 0) +#endif /* PCIE_SPEED2STR */ + +#define BNXT_PCIE_CAP 0xAC +#ifndef HAVE_PCI_UPSTREAM_BRIDGE +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +static u32 +pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, enum pcie_link_width *width) +{ + enum pcie_link_width next_width; + enum pci_bus_speed next_speed; + u32 bw, next_bw; + u16 lnksta; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } +#else + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKSTA, &lnksta); + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; +#endif /* HAVE_PCIE_CAPABILITY_READ_WORD */ + + return bw; +} + +static enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) +{ + /* + * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported, falling + * back to Max Link Speed in Link Capabilities otherwise. + */ +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + u32 lnkcap2, lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); +#else + u16 lnkcap2, lnkcap; + + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKCAP2, &lnkcap2); +#endif + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); +#else + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKCAP, &lnkcap); +#endif + if (lnkcap) { + if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + } + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev) +{ +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); +#else + u16 lnkcap; + + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKCAP, &lnkcap); +#endif + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = pcie_get_speed_cap(dev); + *width = pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +static inline void pcie_print_link_status(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = pcie_bandwidth_capable(pdev, &speed_cap, &width_cap); + bw_avail = pcie_bandwidth_available(pdev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + netdev_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + netdev_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "<unknown>", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* HAVE_PCI_PRINT_LINK_STATUS */ + +#ifndef HAVE_PCI_IS_BRIDGE +static inline bool pci_is_bridge(struct pci_dev *dev) +{ + return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; +} +#endif + +#ifndef HAVE_PCI_GET_DSN +static inline u64 pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif + +#ifndef HAVE_NDO_XDP +struct netdev_bpf; +struct xdp_buff; +#elif !defined(HAVE_NDO_BPF) +#define ndo_bpf ndo_xdp +#define netdev_bpf netdev_xdp +#endif + +#ifndef XDP_PACKET_HEADROOM +#define XDP_PACKET_HEADROOM 0 +#endif + +#ifndef HAVE_XDP_FRAME +#define xdp_do_flush_map() +#ifndef HAVE_XDP_REDIRECT +struct bpf_prog; +static inline int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, + struct bpf_prog *prog) +{ + return 0; +} +#endif +#endif + +#ifndef HAVE_XDP_ACTION +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP, + XDP_PASS, + XDP_TX, +#ifndef HAVE_XDP_REDIRECT + XDP_REDIRECT, +#endif +}; +#else +#ifndef HAVE_XDP_REDIRECT +#define XDP_REDIRECT 4 +#endif +#endif + +#ifndef HAVE_BPF_TRACE +#define trace_xdp_exception(dev, xdp_prog, act) +#define bpf_warn_invalid_xdp_action(act) +#endif + +#ifndef HAVE_XDP_SET_DATA_META_INVALID +#define xdp_set_data_meta_invalid(xdp) +#endif + +#ifdef HAVE_XDP_RXQ_INFO +#ifndef HAVE_XDP_RXQ_INFO_IS_REG + +#define REG_STATE_REGISTERED 0x1 + +static inline bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) +{ + return (xdp_rxq->reg_state == REG_STATE_REGISTERED); +} +#endif +#else +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; +}; +#endif + +#ifndef HAVE_XDP_MEM_TYPE +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ + MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ + MEM_TYPE_PAGE_POOL, + MEM_TYPE_ZERO_COPY, + MEM_TYPE_MAX, +}; +static inline int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, + enum xdp_mem_type type, void *allocator) +{ + return 0; +} +#endif + +#if !defined(CONFIG_PAGE_POOL) || !defined(HAVE_PAGE_POOL_RELEASE_PAGE) +#define page_pool_release_page(page_pool, page) +#endif + +#ifndef HAVE_TCF_EXTS_HAS_ACTIONS +#define tcf_exts_has_actions(x) (!tc_no_actions(x)) +#endif + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) && !defined(HAVE_TCF_STATS_UPDATE) +static inline void +tcf_exts_stats_update(const struct tcf_exts *exts, + u64 bytes, u64 packets, u64 lastuse) +{ +#ifdef CONFIG_NET_CLS_ACT + int i; + + preempt_disable(); + + for (i = 0; i < exts->nr_actions; i++) { + struct tc_action *a = exts->actions[i]; + + tcf_action_stats_update(a, bytes, packets, lastuse); + } + + preempt_enable(); +#endif +} +#endif + +#ifndef HAVE_TC_CB_REG_EXTACK +#define tcf_block_cb_register(block, cb, cb_ident, cb_priv, extack) \ + tcf_block_cb_register(block, cb, cb_ident, cb_priv) +#endif + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#if !defined(HAVE_TC_CLS_CAN_OFFLOAD_AND_CHAIN0) && defined(HAVE_TC_SETUP_BLOCK) +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + return true; +} +#endif + +#ifdef HAVE_TC_CB_EGDEV + +static inline void bnxt_reg_egdev(const struct net_device *dev, + void *cb, void *cb_priv, int vf_idx) +{ + if (tc_setup_cb_egdev_register(dev, (tc_setup_cb_t *)cb, cb_priv)) + netdev_warn(dev, + "Failed to register egdev for VF-Rep: %d", vf_idx); +} + +static inline void bnxt_unreg_egdev(const struct net_device *dev, + void *cb, void *cb_priv) +{ + tc_setup_cb_egdev_unregister(dev, (tc_setup_cb_t *)cb, cb_priv); +} + +#else + +static inline void bnxt_reg_egdev(const struct net_device *dev, + void *cb, void *cb_priv, int vf_idx) +{ +} + +static inline void bnxt_unreg_egdev(const struct net_device *dev, + void *cb, void *cb_priv) +{ +} + +#endif /* HAVE_TC_CB_EGDEV */ + +#ifdef HAVE_TC_SETUP_BLOCK +#ifndef HAVE_SETUP_TC_BLOCK_HELPER + +static inline int +flow_block_cb_setup_simple(struct tc_block_offload *f, + struct list_head *driver_block_list, + tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} + +#endif /* !HAVE_SETUP_TC_BLOCK_HELPER */ +#endif /* HAVE_TC_SETUP_BLOCK */ +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +#ifndef BIT_ULL +#define BIT_ULL(nr) (1ULL << (nr)) +#endif + +#ifndef HAVE_SIMPLE_OPEN +static inline int simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + return 0; +} +#endif + +#if !defined(HAVE_DEVLINK_PARAM_PUBLISH) && defined(HAVE_DEVLINK_PARAM) +static inline void devlink_params_publish(struct devlink *devlink) +{ +} +#endif + +#ifdef HAVE_DEVLINK_HEALTH_REPORT +#ifndef HAVE_DEVLINK_HEALTH_REPORTER_STATE_UPDATE + +#define DEVLINK_HEALTH_REPORTER_STATE_HEALTHY 0 +#define DEVLINK_HEALTH_REPORTER_STATE_ERROR 1 + +static inline void +devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, + int state) +{ +} +#endif + +#ifndef HAVE_DEVLINK_HEALTH_REPORTER_RECOVERY_DONE +static inline void +devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter) +{ +} +#endif + +#ifndef HAVE_DEVLINK_HEALTH_REPORT_EXTACK +#define bnxt_fw_reporter_diagnose(reporter, priv_ctx, extack) \ + bnxt_fw_reporter_diagnose(reporter, priv_ctx) +#define bnxt_fw_reset_recover(reporter, priv_ctx, extack) \ + bnxt_fw_reset_recover(reporter, priv_ctx) +#define bnxt_fw_fatal_recover(reporter, priv_ctx, extack) \ + bnxt_fw_fatal_recover(reporter, priv_ctx) +#endif /* HAVE_DEVLINK_HEALTH_REPORT_EXTACK */ +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ + +#ifndef mmiowb +#define mmiowb() do {} while (0) +#endif + +#ifndef HAVE_ETH_GET_HEADLEN_NEW +#define eth_get_headlen(dev, data, len) eth_get_headlen(data, len) +#endif + +#ifndef HAVE_NETDEV_XMIT_MORE +#define netdev_xmit_more() skb->xmit_more +#endif + +#ifndef HAVE_NDO_TX_TIMEOUT_QUEUE +#define bnxt_tx_timeout(dev, queue) bnxt_tx_timeout(dev) +#endif + +#ifdef HAVE_NDO_ADD_VXLAN +#ifndef HAVE_VXLAN_GET_RX_PORT +static inline void vxlan_get_rx_port(struct net_device *netdev) +{ +} +#endif +#endif + +#ifndef HAVE_DEVLINK_VALIDATE_NEW +#define bnxt_dl_msix_validate(dl, id, val, extack) \ + bnxt_dl_msix_validate(dl, id, val) +#endif + +#ifndef kfree_rcu +#define kfree_rcu(ptr, rcu_head) \ + do { \ + synchronize_rcu(); \ + kfree(ptr); \ + } while (0) +#endif + +#if defined(HAVE_DEVLINK_FLASH_UPDATE) && \ + !defined(HAVE_DEVLINK_FLASH_UPDATE_STATUS) +static inline void devlink_flash_update_begin_notify(struct devlink *devlink) +{ +} + +static inline void devlink_flash_update_end_notify(struct devlink *devlink) +{ +} + +static inline void devlink_flash_update_status_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total) +{ +} +#endif + +#ifdef HAVE_DEVLINK_INFO +#ifndef DEVLINK_INFO_VERSION_GENERIC_ASIC_ID +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id" +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev" +#define DEVLINK_INFO_VERSION_GENERIC_FW "fw" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_PSID +#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_ROCE +#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API +#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API "fw.mgmt.api" +#endif + +#ifndef HAVE_DEVLINK_INFO_BSN_PUT +static inline int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn) +{ + return 0; +} +#endif +#endif /* HAVE_DEVLINK_INFO */ + +#ifndef HAVE_PCIE_FLR +static inline int pcie_flr(struct pci_dev *dev) +{ + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + + msleep(100); + + return 0; +} +#endif /* pcie_flr */ + +#if defined(HAVE_FLOW_OFFLOAD_H) && \ + !defined(HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK) +static inline bool +flow_action_basic_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack) +{ + return true; +} + +#define flow_stats_update(flow_stats, bytes, pkts, last_used, used_hw_stats) \ + flow_stats_update(flow_stats, bytes, pkts, last_used) +#endif + +#ifndef fallthrough +#define fallthrough do {} while (0) /* fall through */ +#endif + +#ifndef __ALIGN_KERNEL_MASK +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#ifndef __ALIGN_KERNEL +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#endif +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif + +struct bnxt_compat_dma_pool { + struct dma_pool *pool; + size_t size; +}; + +static inline struct bnxt_compat_dma_pool* +bnxt_compat_dma_pool_create(const char *name, struct device *dev, size_t size, + size_t align, size_t allocation) +{ + struct bnxt_compat_dma_pool *wrapper; + + wrapper = kmalloc_node(sizeof(*wrapper), GFP_KERNEL, dev_to_node(dev)); + if (!wrapper) + return NULL; + + wrapper->pool = dma_pool_create(name, dev, size, align, allocation); + wrapper->size = size; + + return wrapper; +} + +static inline void +bnxt_compat_dma_pool_destroy(struct bnxt_compat_dma_pool *wrapper) +{ + dma_pool_destroy(wrapper->pool); + kfree(wrapper); +} + +static inline void * +bnxt_compat_dma_pool_alloc(struct bnxt_compat_dma_pool *wrapper, + gfp_t mem_flags, dma_addr_t *handle) +{ + void *mem; + + mem = dma_pool_alloc(wrapper->pool, mem_flags & ~__GFP_ZERO, handle); + if (mem_flags & __GFP_ZERO) + memset(mem, 0, wrapper->size); + return mem; +} + +static inline void +bnxt_compat_dma_pool_free(struct bnxt_compat_dma_pool *wrapper, void *vaddr, + dma_addr_t addr) +{ + dma_pool_free(wrapper->pool, vaddr, addr); +} + +#define dma_pool_create bnxt_compat_dma_pool_create +#define dma_pool_destroy bnxt_compat_dma_pool_destroy +#define dma_pool_alloc bnxt_compat_dma_pool_alloc +#define dma_pool_free bnxt_compat_dma_pool_free +#define dma_pool bnxt_compat_dma_pool diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h index 09c22f8fe399..072f1ac98b92 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h @@ -10,6 +10,7 @@ #ifndef BNXT_COREDUMP_H #define BNXT_COREDUMP_H +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) struct bnxt_coredump_segment_hdr { __u8 signature[4]; __le32 component_id; @@ -63,4 +64,5 @@ struct bnxt_coredump_record { __u8 ioctl_high_version; __le16 rsvd3[313]; }; +#endif /* defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) */ #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index b1511bcffb1b..3dcbe80f14ad 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -15,9 +15,11 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/etherdevice.h> -#include <rdma/ib_verbs.h> +#include <linux/log2.h> +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_dcb.h" #ifdef CONFIG_BNXT_DCB @@ -38,39 +40,42 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id) static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_cfg_input req = {0}; - int rc = 0, i; + struct hwrm_queue_pri2cos_cfg_input *req; u8 *pri2cos; + int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | - QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG); + if (rc) + return rc; - pri2cos = &req.pri0_cos_queue_id; + req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | + QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); + + pri2cos = &req->pri0_cos_queue_id; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { u8 qidx; - req.enables |= cpu_to_le32( - QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); + req->enables |= cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); qidx = bp->tc_to_qidx[ets->prio_tc[i]]; pri2cos[i] = bp->q_info[qidx].queue_id; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req = {0}; - int rc = 0; + struct hwrm_queue_pri2cos_qcfg_output *resp; + struct hwrm_queue_pri2cos_qcfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; int i; @@ -84,25 +89,27 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->prio_tc[i] = tc; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, u8 max_tc) { - struct hwrm_queue_cos2bw_cfg_input req = {0}; + struct hwrm_queue_cos2bw_cfg_input *req; struct bnxt_cos2bw_cfg cos2bw; - int rc = 0, i; void *data; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); for (i = 0; i < max_tc; i++) { u8 qidx = bp->tc_to_qidx[i]; - req.enables |= cpu_to_le32( - QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << - qidx); + req->enables |= + cpu_to_le32(QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << qidx); memset(&cos2bw, 0, sizeof(cos2bw)); cos2bw.queue_id = bp->q_info[qidx].queue_id; @@ -121,31 +128,32 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } - data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); + data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (qidx == 0) { - req.queue_id0 = cos2bw.queue_id; - req.unused_0 = 0; + req->queue_id0 = cos2bw.queue_id; + req->unused_0 = 0; } } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_cos2bw_qcfg_input req = {0}; + struct hwrm_queue_cos2bw_qcfg_output *resp; + struct hwrm_queue_cos2bw_qcfg_input *req; struct bnxt_cos2bw_cfg cos2bw; void *data; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -169,7 +177,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->tc_tx_bw[tc] = cos2bw.bw_weight; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } @@ -231,7 +239,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask) static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_cfg_input req = {0}; + struct hwrm_queue_pfcenable_cfg_input *req; struct ieee_ets *my_ets = bp->ieee_ets; unsigned int tc_mask = 0, pri_mask = 0; u8 i, pri, lltc_count = 0; @@ -267,44 +275,45 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) } if (need_q_remap) - rc = bnxt_queue_remap(bp, tc_mask); + bnxt_queue_remap(bp, tc_mask); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); - req.flags = cpu_to_le32(pri_mask); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG); if (rc) return rc; - return rc; + req->flags = cpu_to_le32(pri_mask); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pfcenable_qcfg_input req = {0}; + struct hwrm_queue_pfcenable_qcfg_output *resp; + struct hwrm_queue_pfcenable_qcfg_input *req; u8 pri_mask; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } pri_mask = le32_to_cpu(resp->flags); pfc->pfc_en = pri_mask; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_fw_set_structured_data_input set = {0}; - struct hwrm_fw_get_structured_data_input get = {0}; + struct hwrm_fw_set_structured_data_input *set; + struct hwrm_fw_get_structured_data_input *get; struct hwrm_struct_data_dcbx_app *fw_app; struct hwrm_struct_hdr *data; dma_addr_t mapping; @@ -314,19 +323,26 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10601) return 0; + rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA); + if (rc) + return rc; + + hwrm_req_hold(bp, get); + hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO); + n = IEEE_8021QAZ_MAX_TCS; data_len = sizeof(*data) + sizeof(*fw_app) * n; - data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, - GFP_KERNEL); - if (!data) - return -ENOMEM; + data = hwrm_req_dma_slice(bp, get, data_len, &mapping); + if (!data) { + rc = -ENOMEM; + goto set_app_exit; + } - bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1); - get.dest_data_addr = cpu_to_le64(mapping); - get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); - get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - get.count = 0; - rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT); + get->dest_data_addr = cpu_to_le64(mapping); + get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); + get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + get->count = 0; + rc = hwrm_req_send(bp, get); if (rc) goto set_app_exit; @@ -372,44 +388,49 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, data->len = cpu_to_le16(sizeof(*fw_app) * n); data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1); - set.src_data_addr = cpu_to_le64(mapping); - set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); - set.hdr_cnt = 1; - rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA); + if (rc) + goto set_app_exit; + + set->src_data_addr = cpu_to_le64(mapping); + set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); + set->hdr_cnt = 1; + rc = hwrm_req_send(bp, set); set_app_exit: - dma_free_coherent(&bp->pdev->dev, data_len, data, mapping); + hwrm_req_drop(bp, get); /* dropping get request and associated slice */ return rc; } static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) { - struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_dscp_qcaps_input req = {0}; + struct hwrm_queue_dscp_qcaps_output *resp; + struct hwrm_queue_dscp_qcaps_input *req; int rc; bp->max_dscp_value = 0; if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; if (bp->max_dscp_value < 0x3f) bp->max_dscp_value = 0; } - - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_queue_dscp2pri_cfg_input req = {0}; + struct hwrm_queue_dscp2pri_cfg_input *req; struct bnxt_dscp2pri_entry *dscp2pri; dma_addr_t mapping; int rc; @@ -417,23 +438,25 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10800) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1); - dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri), - &mapping, GFP_KERNEL); - if (!dscp2pri) - return -ENOMEM; + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG); + if (rc) + return rc; - req.src_data_addr = cpu_to_le64(mapping); + dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping); + if (!dscp2pri) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + req->src_data_addr = cpu_to_le64(mapping); dscp2pri->dscp = app->protocol; if (add) dscp2pri->mask = 0x3f; else dscp2pri->mask = 0; dscp2pri->pri = app->priority; - req.entry_cnt = cpu_to_le16(1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri, - mapping); + req->entry_cnt = cpu_to_le16(1); + rc = hwrm_req_send(bp, req); return rc; } @@ -551,7 +574,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct bnxt *bp = netdev_priv(dev); - __le64 *stats = (__le64 *)bp->hw_rx_port_stats; + __le64 *stats = bp->port_stats.hw_stats; struct ieee_pfc *my_pfc = bp->ieee_pfc; long rx_off, tx_off; int i, rc; @@ -640,7 +663,7 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) return rc; if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == ETH_P_IBOE) || + app->protocol == ETH_P_ROCE) || (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM && app->protocol == ROCE_V2_UDP_DPORT)) rc = bnxt_hwrm_set_dcbx_app(bp, app, true); @@ -668,7 +691,7 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) if (rc) return rc; if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == ETH_P_IBOE) || + app->protocol == ETH_P_ROCE) || (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM && app->protocol == ROCE_V2_UDP_DPORT)) rc = bnxt_hwrm_set_dcbx_app(bp, app, false); @@ -679,6 +702,59 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) return rc; } +static void __bnxt_del_roce_app(struct bnxt *bp, struct dcb_app *app) +{ + u32 prio_mask = dcb_ieee_getapp_mask(bp->dev, app); + + if (!prio_mask) + return; + + app->priority = ilog2(prio_mask); + dcb_ieee_delapp(bp->dev, app); +} + +static void bnxt_del_roce_apps(struct bnxt *bp) +{ + struct dcb_app app; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + return; + + app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; + app.protocol = ETH_P_ROCE; + __bnxt_del_roce_app(bp, &app); + + app.selector = IEEE_8021QAZ_APP_SEL_DGRAM; + app.protocol = ROCE_V2_UDP_DPORT; + __bnxt_del_roce_app(bp, &app); +} + +static void bnxt_del_dscp_apps(struct bnxt *bp) +{ +#ifdef HAVE_DSCP_MASK_MAP + struct dcb_ieee_app_prio_map dscp_map; + struct dcb_app app; + int i, j; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + return; + + app.selector = IEEE_8021QAZ_APP_SEL_DSCP; + dcb_ieee_getapp_prio_dscp_mask_map(bp->dev, &dscp_map); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + for (j = 0; j < 64; j++) { + if (dscp_map.map[i] & (1ULL << j)) { + app.protocol = j; + app.priority = i; + dcb_ieee_delapp(bp->dev, &app); + } + } + } +#endif +} + static u8 bnxt_dcbnl_getdcbx(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -737,12 +813,16 @@ void bnxt_dcb_init(struct bnxt *bp) bp->dev->dcbnl_ops = &dcbnl_ops; } -void bnxt_dcb_free(struct bnxt *bp) +void bnxt_dcb_free(struct bnxt *bp, bool reset) { kfree(bp->ieee_pfc); kfree(bp->ieee_ets); bp->ieee_pfc = NULL; bp->ieee_ets = NULL; + if (reset) { + bnxt_del_roce_apps(bp); + bnxt_del_dscp_apps(bp); + } } #else @@ -751,7 +831,7 @@ void bnxt_dcb_init(struct bnxt *bp) { } -void bnxt_dcb_free(struct bnxt *bp) +void bnxt_dcb_free(struct bnxt *bp, bool reset) { } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index 6eed231de565..d6d0c18cf5c1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -40,15 +40,12 @@ struct bnxt_dscp2pri_entry { }; #define BNXT_LLQ(q_profile) \ - ((q_profile) == \ - QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) - + ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) #define BNXT_CNPQ(q_profile) \ - ((q_profile) == \ - QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP) + ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP) #define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300 void bnxt_dcb_init(struct bnxt *bp); -void bnxt_dcb_free(struct bnxt *bp); +void bnxt_dcb_free(struct bnxt *bp, bool reset); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c index 156c2404854f..fc6be50349ea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c @@ -11,9 +11,15 @@ #include <linux/module.h> #include <linux/pci.h> #include "bnxt_hsi.h" +#include "bnxt_compat.h" +#ifdef HAVE_DIM #include <linux/dim.h> +#else +#include "bnxt_dim.h" +#endif #include "bnxt.h" -#include "bnxt_debugfs.h" + +#ifdef CONFIG_DEBUG_FS static struct dentry *bnxt_debug_mnt; @@ -105,3 +111,5 @@ void bnxt_debug_exit(void) { debugfs_remove_recursive(bnxt_debug_mnt); } + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs_cpt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs_cpt.c new file mode 100644 index 000000000000..1bde89dc913d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs_cpt.c @@ -0,0 +1,132 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#ifdef HAVE_DIM +#include <linux/dim.h> +#else +#include "bnxt_dim.h" +#endif +#include "bnxt.h" + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *bnxt_debug_mnt; + +static ssize_t debugfs_dim_read(struct file *filep, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct dim *dim = filep->private_data; + int len; + char *buf; + + if (*ppos) + return 0; + if (!dim) + return -ENODEV; + buf = kasprintf(GFP_KERNEL, + "state = %d\n" \ + "profile_ix = %d\n" \ + "mode = %d\n" \ + "tune_state = %d\n" \ + "steps_right = %d\n" \ + "steps_left = %d\n" \ + "tired = %d\n", + dim->state, + dim->profile_ix, + dim->mode, + dim->tune_state, + dim->steps_right, + dim->steps_left, + dim->tired); + if (!buf) + return -ENOMEM; + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_dim_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_dim_read, +}; + +static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx, + struct dentry *dd) +{ + static char qname[16]; + + snprintf(qname, 10, "%d", ring_idx); + return debugfs_create_file(qname, 0600, dd, + dim, &debugfs_dim_fops); +} + +void bnxt_debug_dev_init(struct bnxt *bp) +{ + const char *pname = pci_name(bp->pdev); + struct dentry *pdevf; + int i; + + bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt); + if (bp->debugfs_pdev) { + pdevf = debugfs_create_dir("dim", bp->debugfs_pdev); + if (!pdevf) { + pr_err("failed to create debugfs entry %s/dim\n", + pname); + return; + } + bp->debugfs_dim = pdevf; + /* create files for each rx ring */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + if (cpr && bp->bnapi[i]->rx_ring) { + pdevf = debugfs_dim_ring_init(&cpr->dim, i, + bp->debugfs_dim); + if (!pdevf) + pr_err("failed to create debugfs entry %s/dim/%d\n", + pname, i); + } + } + } else { + pr_err("failed to create debugfs entry %s\n", pname); + } +} + +void bnxt_debug_dev_exit(struct bnxt *bp) +{ + if (bp) { + debugfs_remove_recursive(bp->debugfs_pdev); + bp->debugfs_pdev = NULL; + } +} + +void bnxt_debug_init(void) +{ + bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL); + if (!bnxt_debug_mnt) + pr_err("failed to init bnxt_en debugfs\n"); +} + +void bnxt_debug_exit(void) +{ + debugfs_remove_recursive(bnxt_debug_mnt); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 2d817ba0602c..75f94b7e0218 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -9,14 +9,228 @@ #include <linux/pci.h> #include <linux/netdevice.h> +#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM) #include <net/devlink.h> +#endif +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" +#include "bnxt_ethtool.h" +#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM) +#ifdef HAVE_DEVLINK_INFO +static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, + union bnxt_nvm_data *src, + int nvm_num_bits, int dl_num_bytes); + +static int bnxt_hwrm_nvm_get_var(struct bnxt *bp, dma_addr_t data_dma_addr, + u16 offset, u16 num_bits) +{ + struct hwrm_nvm_get_variable_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + req->dest_data_addr = cpu_to_le64(data_dma_addr); + req->option_num = cpu_to_le16(offset); + req->data_len = cpu_to_le16(num_bits); + return hwrm_req_send_silent(bp, req); +} + +static int bnxt_get_nvm_cfg_ver(struct bnxt *bp, + union devlink_param_value *nvm_cfg_ver) +{ + union bnxt_nvm_data *data; + dma_addr_t data_dma_addr; + int rc; + + data = dma_zalloc_coherent(&bp->pdev->dev, sizeof(*data), + &data_dma_addr, GFP_KERNEL); + if (!data) + return -ENOMEM; + + rc = bnxt_hwrm_nvm_get_var(bp, data_dma_addr, NVM_OFF_NVM_CFG_VER, + BNXT_NVM_CFG_VER_BITS); + if (!rc) + bnxt_copy_from_nvm_data(nvm_cfg_ver, data, + BNXT_NVM_CFG_VER_BITS, + BNXT_NVM_CFG_VER_BYTES); + + dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + return rc; +} + +static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + union devlink_param_value nvm_cfg_ver; + struct hwrm_ver_get_output *ver_resp; + char mgmt_ver[FW_VER_STR_LEN]; + char roce_ver[FW_VER_STR_LEN]; + char fw_ver[FW_VER_STR_LEN]; + char buf[32]; + int rc; + + rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME); + if (rc) + return rc; + + if (strlen(bp->board_partno)) { + rc = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, + bp->board_partno); + if (rc) + return rc; + } + + if (strlen(bp->board_serialno)) { + rc = devlink_info_board_serial_number_put(req, bp->board_serialno); + if (rc) + return rc; + } + + sprintf(buf, "%X", bp->chip_num); + rc = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf); + if (rc) + return rc; + + ver_resp = &bp->ver_resp; + sprintf(buf, "%X", ver_resp->chip_rev); + rc = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf); + if (rc) + return rc; + + if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) { + sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X", + bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4], + bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]); + rc = devlink_info_serial_number_put(req, buf); + if (rc) + return rc; + } + + if (strlen(bp->board_serialno)) { + rc = devlink_info_version_fixed_put(req, "vpd.serialno", + bp->board_serialno); + if (rc) + return rc; + } + + if (strlen(ver_resp->active_pkg_name)) { + rc = + devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + ver_resp->active_pkg_name); + if (rc) + return rc; + } + + if (BNXT_PF(bp) && !bnxt_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) { + u32 ver = nvm_cfg_ver.vu32; + + sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF, + ver & 0xF); + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf); + if (rc) + return rc; + } + + if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) { + snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor, + ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch); + + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor, + ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_major, ver_resp->roce_fw_minor, + ver_resp->roce_fw_build, ver_resp->roce_fw_patch); + } else { + snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b, + ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b); + + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b, + ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b, + ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b); + } + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, fw_ver); + if (rc) + return rc; + + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, + bp->hwrm_ver_supp); + if (rc) + return rc; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, mgmt_ver); + if (rc) + return rc; + + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + if (rc) + return rc; + } + return 0; +} +#endif /* HAVE_DEVLINK_INFO */ + +#ifdef HAVE_DEVLINK_FLASH_UPDATE +static int +bnxt_dl_flash_update(struct devlink *dl, const char *filename, + const char *region, struct netlink_ext_ack *extack) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; + + if (region) + return -EOPNOTSUPP; + + if (!BNXT_PF(bp)) { + NL_SET_ERR_MSG_MOD(extack, + "flash update not supported from a VF"); + return -EPERM; + } + + devlink_flash_update_begin_notify(dl); + devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0, + 0); + rc = bnxt_flash_package_from_file(bp->dev, filename, 0); + if (!rc) + devlink_flash_update_status_notify(dl, "Flashing done", region, + 0, 0); + else + devlink_flash_update_status_notify(dl, "Flashing failed", + region, 0, 0); + devlink_flash_update_end_notify(dl); + return rc; +} +#endif /* HAVE_DEVLINK_FLASH_UPDATE */ + +#ifdef HAVE_DEVLINK_HEALTH_REPORT static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, - struct devlink_fmsg *fmsg) + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); u32 val, health_status; @@ -60,7 +274,8 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { }; static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter, - void *priv_ctx) + void *priv_ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); @@ -68,7 +283,11 @@ static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter, return -EOPNOTSUPP; bnxt_fw_reset(bp); +#ifdef HAVE_DEVLINK_HEALTH_REPORTER_RECOVERY_DONE + return -EINPROGRESS; +#else return 0; +#endif } static const @@ -78,7 +297,8 @@ struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = { }; static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, - void *priv_ctx) + void *priv_ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; @@ -87,13 +307,18 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, if (!priv_ctx) return -EOPNOTSUPP; + bp->fw_health->fatal = true; event = fw_reporter_ctx->sp_event; if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT) bnxt_fw_reset(bp); else if (event == BNXT_FW_EXCEPTION_SP_EVENT) bnxt_fw_exception(bp); +#ifdef HAVE_DEVLINK_HEALTH_REPORTER_RECOVERY_DONE + return -EINPROGRESS; +#else return 0; +#endif } static const @@ -102,6 +327,27 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { .recover = bnxt_fw_fatal_recover, }; +static struct devlink_health_reporter * +__bnxt_dl_reporter_create(struct bnxt *bp, + const struct devlink_health_reporter_ops *ops) +{ + struct devlink_health_reporter *reporter; + +#ifndef HAVE_DEVLINK_HEALTH_AUTO_RECOVER + reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp); +#else + reporter = devlink_health_reporter_create(bp->dl, ops, 0, + !!ops->recover, bp); +#endif + if (IS_ERR(reporter)) { + netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n", + ops->name, PTR_ERR(reporter)); + return NULL; + } + + return reporter; +} + void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; @@ -113,15 +359,9 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp) goto err_recovery; health->fw_reset_reporter = - devlink_health_reporter_create(bp->dl, - &bnxt_dl_fw_reset_reporter_ops, - 0, true, bp); - if (IS_ERR(health->fw_reset_reporter)) { - netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", - PTR_ERR(health->fw_reset_reporter)); - health->fw_reset_reporter = NULL; + __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reset_reporter_ops); + if (!health->fw_reset_reporter) bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; - } err_recovery: if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) @@ -129,13 +369,8 @@ err_recovery: if (!health->fw_reporter) { health->fw_reporter = - devlink_health_reporter_create(bp->dl, - &bnxt_dl_fw_reporter_ops, - 0, false, bp); - if (IS_ERR(health->fw_reporter)) { - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", - PTR_ERR(health->fw_reporter)); - health->fw_reporter = NULL; + __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops); + if (!health->fw_reporter) { bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; return; } @@ -145,15 +380,9 @@ err_recovery: return; health->fw_fatal_reporter = - devlink_health_reporter_create(bp->dl, - &bnxt_dl_fw_fatal_reporter_ops, - 0, true, bp); - if (IS_ERR(health->fw_fatal_reporter)) { - netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", - PTR_ERR(health->fw_fatal_reporter)); - health->fw_fatal_reporter = NULL; + __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_fatal_reporter_ops); + if (!health->fw_fatal_reporter) bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; - } } void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) @@ -219,15 +448,59 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) } } +void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy) +{ + struct bnxt_fw_health *health = bp->fw_health; + u8 state; + + if (healthy) + state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; + else + state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; + + if (health->fatal) + devlink_health_reporter_state_update(health->fw_fatal_reporter, + state); + else + devlink_health_reporter_state_update(health->fw_reset_reporter, + state); + + health->fatal = false; +} + +void bnxt_dl_health_recovery_done(struct bnxt *bp) +{ + struct bnxt_fw_health *hlth = bp->fw_health; + + if (hlth->fatal) + devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter); + else + devlink_health_reporter_recovery_done(hlth->fw_reset_reporter); +} +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ + static const struct devlink_ops bnxt_dl_ops = { +#ifdef CONFIG_VF_REPS #ifdef CONFIG_BNXT_SRIOV .eswitch_mode_set = bnxt_dl_eswitch_mode_set, .eswitch_mode_get = bnxt_dl_eswitch_mode_get, #endif /* CONFIG_BNXT_SRIOV */ +#endif /* CONFIG_VF_REPS */ +#ifdef HAVE_DEVLINK_INFO + .info_get = bnxt_dl_info_get, +#endif /* HAVE_DEVLINK_INFO */ +#ifdef HAVE_DEVLINK_FLASH_UPDATE + .flash_update = bnxt_dl_flash_update, +#endif /* HAVE_DEVLINK_FLASH_UPDATE */ }; -static const struct devlink_ops bnxt_vf_dl_ops; +static const struct devlink_ops bnxt_vf_dl_ops = { +#ifdef HAVE_DEVLINK_INFO + .info_get = bnxt_dl_info_get, +#endif /* HAVE_DEVLINK_INFO */ +}; +#ifdef HAVE_DEVLINK_PARAM enum bnxt_dl_param_id { BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, @@ -236,21 +509,18 @@ enum bnxt_dl_param_id { static const struct bnxt_dl_nvm_param nvm_params[] = { {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, BNXT_NVM_SHARED_CFG, 1, 1}, +#ifdef HAVE_IGNORE_ARI {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, BNXT_NVM_SHARED_CFG, 1, 1}, {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, +#endif {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, BNXT_NVM_SHARED_CFG, 1, 1}, }; -union bnxt_nvm_data { - u8 val8; - __le32 val32; -}; - static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst, union devlink_param_value *src, int nvm_num_bits, int dl_num_bytes) @@ -290,17 +560,20 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, } static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, - int msg_len, union devlink_param_value *val) + union devlink_param_value *val) { struct hwrm_nvm_get_variable_input *req = msg; struct bnxt_dl_nvm_param nvm_param; + struct hwrm_err_output *resp; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; int idx = 0, rc, i; /* Get/Set NVM CFG parameter is supported only on PFs */ - if (BNXT_VF(bp)) + if (BNXT_VF(bp)) { + hwrm_req_drop(bp, req); return -EPERM; + } for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { if (nvm_params[i].id == param_id) { @@ -309,18 +582,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } } - if (i == ARRAY_SIZE(nvm_params)) + if (i == ARRAY_SIZE(nvm_params)) { + hwrm_req_drop(bp, req); return -EOPNOTSUPP; + } if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) idx = bp->pf.port_id; else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; - data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), - &data_dma_addr, GFP_KERNEL); - if (!data) + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + + if (!data) { + hwrm_req_drop(bp, req); return -ENOMEM; + } req->dest_data_addr = cpu_to_le64(data_dma_addr); req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); @@ -329,26 +606,24 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (idx) req->dimensions = cpu_to_le16(1); + resp = hwrm_req_hold(bp, req); if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); - rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, msg); } else { - rc = hwrm_send_message_silent(bp, msg, msg_len, - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, msg); if (!rc) { bnxt_copy_from_nvm_data(val, data, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); } else { - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - if (resp->cmd_err == NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) rc = -EOPNOTSUPP; } } - dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + hwrm_req_drop(bp, req); if (rc == -EACCES) netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); return rc; @@ -357,33 +632,38 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_get_variable_input req = {0}; + struct hwrm_nvm_get_variable_input *req; struct bnxt *bp = bnxt_get_bp_from_dl(dl); int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); - if (!rc) - if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) - ctx->val.vbool = !ctx->val.vbool; + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); + if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; return rc; } static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_set_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + struct hwrm_nvm_set_variable_input *req; + int rc; if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) ctx->val.vbool = !ctx->val.vbool; - return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE); + if (rc) + return rc; + + return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); } +#ifdef HAVE_IGNORE_ARI static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) @@ -403,12 +683,14 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, return 0; } +#endif static const struct devlink_param bnxt_dl_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, NULL), +#ifdef HAVE_IGNORE_ARI DEVLINK_PARAM_GENERIC(IGNORE_ARI, BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, @@ -421,6 +703,7 @@ static const struct devlink_param bnxt_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, bnxt_dl_msix_validate), +#endif DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_PERMANENT), @@ -428,78 +711,114 @@ static const struct devlink_param bnxt_dl_params[] = { NULL), }; +#ifdef HAVE_DEVLINK_PORT_PARAM static const struct devlink_param bnxt_dl_port_params[] = { }; +#endif +#endif /* HAVE_DEVLINK_PARAM */ + +static int bnxt_dl_params_register(struct bnxt *bp) +{ +#ifdef HAVE_DEVLINK_PARAM + int rc; + + if (bp->hwrm_spec_code < 0x10600) + return 0; + + rc = devlink_params_register(bp->dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + if (rc) { + netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", + rc); + return rc; + } +#ifdef HAVE_DEVLINK_PORT_PARAM + rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); + if (rc) { + netdev_err(bp->dev, "devlink_port_params_register failed\n"); + devlink_params_unregister(bp->dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + return rc; + } +#endif /* HAVE_DEVLINK_PORT_PARAM */ + devlink_params_publish(bp->dl); +#endif /* HAVE_DEVLINK_PARAM */ + + return 0; +} + +static void bnxt_dl_params_unregister(struct bnxt *bp) +{ +#ifdef HAVE_DEVLINK_PARAM + if (bp->hwrm_spec_code < 0x10600) + return; + + devlink_params_unregister(bp->dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); +#ifdef HAVE_DEVLINK_PORT_PARAM + devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); +#endif /* HAVE_DEVLINK_PORT_PARAM */ +#endif /* HAVE_DEVLINK_PARAM */ +} int bnxt_dl_register(struct bnxt *bp) { struct devlink *dl; int rc; - if (bp->hwrm_spec_code < 0x10600) { - netdev_warn(bp->dev, "Firmware does not support NVM params"); - return -ENOTSUPP; - } - if (BNXT_PF(bp)) dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); else dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); if (!dl) { - netdev_warn(bp->dev, "devlink_alloc failed"); + netdev_warn(bp->dev, "devlink_alloc failed\n"); return -ENOMEM; } bnxt_link_bp_to_dl(bp, dl); +#ifdef CONFIG_VF_REPS /* Add switchdev eswitch mode setting, if SRIOV supported */ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && bp->hwrm_spec_code > 0x10803) bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; +#endif rc = devlink_register(dl, &bp->pdev->dev); if (rc) { - netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); + netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc); goto err_dl_free; } if (!BNXT_PF(bp)) return 0; - rc = devlink_params_register(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); - if (rc) { - netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", - rc); - goto err_dl_unreg; - } - +#ifdef HAVE_DEVLINK_PORT_PARAM +#ifdef HAVE_DEVLINK_PORT_ATTRS devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - bp->pf.port_id, false, 0, - bp->switch_id, sizeof(bp->switch_id)); + bp->pf.port_id, false, 0, bp->dsn, + sizeof(bp->dsn)); +#endif rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { - netdev_err(bp->dev, "devlink_port_register failed"); - goto err_dl_param_unreg; + netdev_err(bp->dev, "devlink_port_register failed\n"); + goto err_dl_unreg; } +#endif /* HAVE_DEVLINK_PORT_PARAM */ - rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - if (rc) { - netdev_err(bp->dev, "devlink_port_params_register failed"); + rc = bnxt_dl_params_register(bp); + if (rc) goto err_dl_port_unreg; - } - - devlink_params_publish(dl); return 0; err_dl_port_unreg: +#ifdef HAVE_DEVLINK_PORT_PARAM devlink_port_unregister(&bp->dl_port); -err_dl_param_unreg: - devlink_params_unregister(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); err_dl_unreg: +#endif /* HAVE_DEVLINK_PORT_PARAM */ devlink_unregister(dl); err_dl_free: bnxt_link_bp_to_dl(bp, NULL); @@ -515,13 +834,35 @@ void bnxt_dl_unregister(struct bnxt *bp) return; if (BNXT_PF(bp)) { - devlink_port_params_unregister(&bp->dl_port, - bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); + bnxt_dl_params_unregister(bp); +#ifdef HAVE_DEVLINK_PORT_PARAM devlink_port_unregister(&bp->dl_port); - devlink_params_unregister(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); +#endif } devlink_unregister(dl); devlink_free(dl); } +#endif /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */ + +#ifndef HAVE_DEVLINK_HEALTH_REPORT +void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) +{ + switch (event) { + case BNXT_FW_RESET_NOTIFY_SP_EVENT: + bnxt_fw_reset(bp); + break; + case BNXT_FW_EXCEPTION_SP_EVENT: + bnxt_fw_exception(bp); + break; + } +} + +void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy) +{ +} + +void bnxt_dl_health_recovery_done(struct bnxt *bp) +{ +} + +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 689c47ab2155..a510924ea793 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -10,6 +10,7 @@ #ifndef BNXT_DEVLINK_H #define BNXT_DEVLINK_H +#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM) /* Struct to hold housekeeping info needed by devlink interface */ struct bnxt_dl { struct bnxt *bp; /* back ptr to the controlling dev */ @@ -33,15 +34,25 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) } } +#ifdef HAVE_DEVLINK_PARAM #define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 #define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 #define NVM_OFF_IGNORE_ARI 164 #define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 +#define NVM_OFF_NVM_CFG_VER 602 -#define BNXT_MSIX_VEC_MAX 1280 +#define BNXT_NVM_CFG_VER_BITS 24 +#define BNXT_NVM_CFG_VER_BYTES 4 + +#define BNXT_MSIX_VEC_MAX 512 #define BNXT_MSIX_VEC_MIN_MAX 128 +union bnxt_nvm_data { + u8 val8; + __le32 val32; +}; + enum bnxt_nvm_dir_type { BNXT_NVM_SHARED_CFG = 40, BNXT_NVM_PORT_CFG, @@ -55,11 +66,36 @@ struct bnxt_dl_nvm_param { u16 nvm_num_bits; u8 dl_num_bytes; }; +#endif /* HAVE_DEVLINK_PARAM */ -void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); -void bnxt_dl_fw_reporters_create(struct bnxt *bp); -void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); +#else /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */ + +static inline int bnxt_dl_register(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_dl_unregister(struct bnxt *bp) +{ +} + +#endif /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */ +void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); +void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); +void bnxt_dl_health_recovery_done(struct bnxt *bp); +#ifdef HAVE_DEVLINK_HEALTH_REPORT +void bnxt_dl_fw_reporters_create(struct bnxt *bp); +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); +#else +static inline void bnxt_dl_fw_reporters_create(struct bnxt *bp) +{ +} + +static inline void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) +{ +} +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ #endif /* BNXT_DEVLINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c index 6f6576dc417a..a2a816782293 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c @@ -7,7 +7,14 @@ * the Free Software Foundation. */ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "bnxt_compat.h" +#ifdef HAVE_DIM #include <linux/dim.h> +#else +#include "bnxt_dim.h" +#endif #include "bnxt_hsi.h" #include "bnxt.h" @@ -29,3 +36,32 @@ void bnxt_dim_work(struct work_struct *work) bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi); dim->state = DIM_START_MEASURE; } + +#ifndef HAVE_DIM +void net_dim(struct dim *dim, struct dim_sample end_sample) +{ + struct dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < DIM_NEVENTS) + break; + dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + fallthrough; + case DIM_START_MEASURE: + dim->state = DIM_MEASURE_IN_PROGRESS; + break; + case DIM_APPLY_NEW_PROFILE: + break; + } +} +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.h new file mode 100644 index 000000000000..bb263444e965 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.h @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NET_DIM_H +#define NET_DIM_H + +#include <linux/module.h> + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u8 cq_period_mode; +}; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; +}; + +struct dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ +}; + +struct dim { /* Adaptive Moderation */ + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + DIM_CQ_PERIOD_NUM_MODES +}; + +/* Adaptive moderation logic */ +enum { + DIM_START_MEASURE, + DIM_MEASURE_IN_PROGRESS, + DIM_APPLY_NEW_PROFILE, +}; + +enum { + DIM_PARKING_ON_TOP, + DIM_PARKING_TIRED, + DIM_GOING_RIGHT, + DIM_GOING_LEFT, +}; + +enum { + DIM_STATS_WORSE, + DIM_STATS_SAME, + DIM_STATS_BETTER, +}; + +enum { + DIM_STEPPED, + DIM_TOO_TIRED, + DIM_ON_EDGE, +}; + +#define NET_DIM_PARAMS_NUM_PROFILES 5 +/* Adaptive moderation profiles */ +#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + +/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ +#define NET_DIM_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {4, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {32, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define NET_DIM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct dim_cq_moder +profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_EQE_PROFILES, + NET_DIM_CQE_PROFILES, +}; + +static inline struct dim_cq_moder +net_dim_get_rx_moderation(u8 cq_period_mode, int ix) +{ + struct dim_cq_moder cq_moder = profile[cq_period_mode][ix]; + + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} + +static inline struct dim_cq_moder +net_dim_get_def_rx_moderation(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = NET_DIM_DEF_PROFILE_CQE; + else /* DIM_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_rx_moderation(rx_cq_period_mode, default_profile_ix); +} + +static inline bool dim_on_top(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + return true; + case DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: /* DIM_GOING_LEFT */ + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +static inline void dim_turn(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + dim->tune_state = DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case DIM_GOING_LEFT: + dim->tune_state = DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +static inline int net_dim_step(struct dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return DIM_TOO_TIRED; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return DIM_STEPPED; +} + +static inline void dim_park_on_top(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = DIM_PARKING_ON_TOP; +} + +static inline void dim_park_tired(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = DIM_PARKING_TIRED; +} + +static inline void net_dim_exit_parking(struct dim *dim) +{ + dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : + DIM_GOING_RIGHT; + net_dim_step(dim); +} + +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + +static inline int net_dim_stats_compare(struct dim_stats *curr, + struct dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? DIM_STATS_BETTER : + DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->ppms) + return curr->ppms ? DIM_STATS_BETTER : + DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->epms) + return DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + return DIM_STATS_SAME; +} + +static inline bool net_dim_decision(struct dim_stats *curr_stats, + struct dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case DIM_GOING_RIGHT: + case DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != DIM_STATS_BETTER) + dim_turn(dim); + + if (dim_on_top(dim)) { + dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case DIM_ON_EDGE: + dim_park_on_top(dim); + break; + case DIM_TOO_TIRED: + dim_park_tired(dim); + break; + } + + break; + } + + if ((prev_state != DIM_PARKING_ON_TOP) || + (dim->tune_state != DIM_PARKING_ON_TOP)) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +static inline void dim_update_sample(u16 event_ctr, + u64 packets, + u64 bytes, + struct dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +#define DIM_NEVENTS 320 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) + +static inline void dim_calc_stats(struct dim_sample *start, + struct dim_sample *end, + struct dim_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC, + delta_us); +} + +void net_dim(struct dim *dim, struct dim_sample end_sample); + +#endif /* NET_DIM_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_eem.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_eem.c new file mode 100644 index 000000000000..e0e3ebdab505 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_eem.c @@ -0,0 +1,497 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_eem.h" + +static int bnxt_hwrm_cfa_eem_qcaps(struct bnxt *bp, enum bnxt_eem_dir dir) +{ + struct bnxt_eem_ctx_mem_info *ctxp = &bp->eem_info->eem_ctx_info[dir]; + struct bnxt_eem_pg_misc *tbl = &ctxp->eem_misc[KEY0_TABLE]; + struct bnxt_eem_caps *cap = &bp->eem_info->eem_caps[dir]; + struct hwrm_cfa_eem_qcaps_output *resp; + struct hwrm_cfa_eem_qcaps_input *req; + u32 flags; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_EEM_QCAPS); + if (!rc) { + flags = dir == BNXT_EEM_DIR_TX ? + CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX : + CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX; + req->flags = cpu_to_le32(flags); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + } + if (rc) { + hwrm_req_drop(bp, req); + netdev_warn(bp->dev, "Failed to read CFA %s EEM Capabilities\n", + BNXT_EEM_DIR(dir)); + return rc; + } + + cap->flags = le32_to_cpu(resp->flags); + cap->supported = le32_to_cpu(resp->supported); + cap->max_entries_supported = le32_to_cpu(resp->max_entries_supported); + cap->key_entry_size = le16_to_cpu(resp->key_entry_size); + cap->record_entry_size = le16_to_cpu(resp->record_entry_size); + cap->efc_entry_size = le16_to_cpu(resp->efc_entry_size); + hwrm_req_drop(bp, req); + + netdev_dbg(bp->dev, "%s CFA EEM Capabilities:\n", BNXT_EEM_DIR(dir)); + netdev_dbg(bp->dev, "supported: 0x%x max_entries_supported: 0x%x\n", + resp->supported, resp->max_entries_supported); + netdev_dbg(bp->dev, "key_entry_size: 0x%x record_entry_size: 0x%x\n", + resp->key_entry_size, resp->record_entry_size); + + tbl[KEY0_TABLE].type = KEY0_TABLE; + tbl[KEY1_TABLE].type = KEY1_TABLE; + tbl[RECORD_TABLE].type = RECORD_TABLE; + tbl[EFC_TABLE].type = EFC_TABLE; + + tbl[KEY0_TABLE].entry_size = cap->key_entry_size; + tbl[KEY1_TABLE].entry_size = cap->key_entry_size; + tbl[RECORD_TABLE].entry_size = cap->record_entry_size; + tbl[EFC_TABLE].entry_size = cap->efc_entry_size; + + if (BNXT_CHIP_P4(bp) && (dir == BNXT_EEM_DIR_TX)) + cap->record_entry_size = BNXT_EEM_TX_RECORD_SIZE; + + tbl[KEY0_TABLE].ctx_id = BNXT_EEM_CTX_ID_INVALID; + tbl[KEY1_TABLE].ctx_id = BNXT_EEM_CTX_ID_INVALID; + tbl[RECORD_TABLE].ctx_id = BNXT_EEM_CTX_ID_INVALID; + tbl[EFC_TABLE].ctx_id = BNXT_EEM_CTX_ID_INVALID; + + return rc; +} + +void bnxt_hwrm_query_eem_caps(struct bnxt *bp) +{ + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_EEM)) + return; + + bp->eem_info = kzalloc(sizeof(*bp->eem_info), GFP_KERNEL); + if (!bp->eem_info) + return; + + /* Query tx eem caps */ + bnxt_hwrm_cfa_eem_qcaps(bp, BNXT_EEM_DIR_TX); + + /* Query rx eem caps */ + bnxt_hwrm_cfa_eem_qcaps(bp, BNXT_EEM_DIR_RX); +} + +static int bnxt_hwrm_cfa_eem_op(struct bnxt *bp, enum bnxt_eem_dir dir, u16 op) +{ + struct hwrm_cfa_eem_op_input *req; + u32 flags; + int rc; + + if (op != CFA_EEM_OP_REQ_OP_EEM_ENABLE && + op != CFA_EEM_OP_REQ_OP_EEM_DISABLE && + op != CFA_EEM_OP_REQ_OP_EEM_CLEANUP) + return -EINVAL; + + rc = hwrm_req_init(bp, req, HWRM_CFA_EEM_OP); + if (!rc) { + flags = (dir == BNXT_EEM_DIR_TX) ? + CFA_EEM_OP_REQ_FLAGS_PATH_TX : + CFA_EEM_OP_REQ_FLAGS_PATH_RX; + req->flags = cpu_to_le32(flags); + req->op = cpu_to_le16(op); + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_warn(bp->dev, "%s CFA EEM OP:%d failed, rc=%d", + BNXT_EEM_DIR(dir), op, rc); + else + netdev_dbg(bp->dev, "%s CFA EEM OP:%d succeeded", + BNXT_EEM_DIR(dir), op); + return rc; +} + +static int bnxt_hwrm_cfa_eem_mem_unrgtr(struct bnxt *bp, u16 *ctx_id) +{ + struct hwrm_cfa_ctx_mem_unrgtr_input *req; + int rc = 0; + + if (*ctx_id == BNXT_EEM_CTX_ID_INVALID) + return rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_CTX_MEM_UNRGTR); + if (!rc) { + req->ctx_id = cpu_to_le16(*ctx_id); + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_warn(bp->dev, "Failed to Unregister CFA EEM Mem\n"); + else + *ctx_id = BNXT_EEM_CTX_ID_INVALID; + return rc; +} + +static void bnxt_eem_ctx_unreg(struct bnxt *bp, enum bnxt_eem_dir dir) +{ + struct bnxt_eem_ctx_mem_info *ctxp = &bp->eem_info->eem_ctx_info[dir]; + struct bnxt_eem_pg_misc *tbl_misc; + struct bnxt_ctx_pg_info *tbl; + int tbl_type; + + for (tbl_type = KEY0_TABLE; tbl_type < MAX_TABLE; tbl_type++) { + if (tbl_type == EFC_TABLE) + continue; + tbl = &ctxp->eem_tables[tbl_type]; + tbl_misc = &ctxp->eem_misc[tbl_type]; + bnxt_hwrm_cfa_eem_mem_unrgtr(bp, &tbl_misc->ctx_id); + bnxt_free_ctx_pg_tbls(bp, tbl); + } +} + +static void bnxt_unconfig_eem(struct bnxt *bp, enum bnxt_eem_dir dir) +{ + bool configured = false; + + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_EEM) || + dir < BNXT_EEM_DIR_TX || dir > BNXT_EEM_DIR_RX) + return; + + if (dir == BNXT_EEM_DIR_TX) { + configured = bp->eem_info->eem_cfg_tx_dir ? true : false; + bp->eem_info->eem_cfg_tx_dir = false; + } else { + configured = bp->eem_info->eem_cfg_rx_dir ? true : false; + bp->eem_info->eem_cfg_rx_dir = false; + } + + if (configured) { + bnxt_hwrm_cfa_eem_op(bp, dir, CFA_EEM_OP_REQ_OP_EEM_DISABLE); + bnxt_hwrm_cfa_eem_op(bp, dir, CFA_EEM_OP_REQ_OP_EEM_CLEANUP); + } + + if (bp->eem_info->eem_primary) + bnxt_eem_ctx_unreg(bp, dir); + + if (!bp->eem_info->eem_cfg_tx_dir && !bp->eem_info->eem_cfg_rx_dir) { + bp->eem_info->eem_primary = false; + bp->eem_info->eem_group = -1; + } +} + +static int bnxt_eem_validate_num_entries(struct bnxt *bp, + enum bnxt_eem_dir dir, + u32 num_entries) +{ + struct bnxt_eem_caps *cap = &bp->eem_info->eem_caps[dir]; + + if (num_entries < BNXT_EEM_MIN_ENTRIES || + num_entries > cap->max_entries_supported || + num_entries != roundup_pow_of_two(num_entries)) { + netdev_warn(bp->dev, "EEM: Invalid num_entries requested: %u\n", + num_entries); + return -EINVAL; + } + + bp->eem_info->eem_ctx_info[dir].num_entries = num_entries; + + return 0; +} + +static void bnxt_eem_pg_size_table(struct bnxt_eem_caps *cap, int tbl_type, + u32 num_entries, u32 *size, u8 *depth, + struct bnxt_eem_pg_misc *tbl_misc) +{ + u32 mem_size = 0; + + if (tbl_type == KEY0_TABLE || tbl_type == KEY1_TABLE) { + mem_size = cap->key_entry_size * num_entries; + tbl_misc->entry_size = cap->key_entry_size; + } + + if (tbl_type == RECORD_TABLE) { + mem_size = cap->record_entry_size * num_entries * 2; + tbl_misc->entry_size = cap->record_entry_size; + } + + if (tbl_type == EFC_TABLE) { + mem_size = cap->efc_entry_size * num_entries; + tbl_misc->entry_size = cap->efc_entry_size; + } + + tbl_misc->type = tbl_type; + *size = mem_size; + *depth = (DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE) < MAX_CTX_PAGES) ? + 1 : 2; +} + +static u8 bnxt_eem_page_level_code(int pg_lvl) +{ + u8 page_level; + + switch (pg_lvl) { + case PT_LVL_2: + page_level = CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_2; + break; + case PT_LVL_1: + page_level = CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_1; + break; + case PT_LVL_0: + page_level = CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_0; + break; + default: + page_level = 0xff; + break; + } + return page_level; +} + +static u8 bnxt_eem_page_size_code(u32 page_size) +{ + u8 pg_sz_code = 0xff; + + switch (page_size) { + case BNXT_EEM_PG_SZ_4K: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_4K; + break; + case BNXT_EEM_PG_SZ_8K: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_8K; + break; + case BNXT_EEM_PG_SZ_64K: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_64K; + break; + case BNXT_EEM_PG_SZ_256K: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_256K; + break; + case BNXT_EEM_PG_SZ_1M: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1M; + break; + case BNXT_EEM_PG_SZ_2M: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_2M; + break; + case BNXT_EEM_PG_SZ_4M: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_4M; + break; + case BNXT_EEM_PG_SZ_1G: + pg_sz_code = CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1G; + break; + default: + break; + } + + return pg_sz_code; +} + +static int bnxt_hwrm_cfa_eem_mem_rgtr(struct bnxt *bp, int page_level, + u32 page_size, u64 page_dir, u16 *ctx_id) +{ + struct hwrm_cfa_ctx_mem_rgtr_output *resp; + struct hwrm_cfa_ctx_mem_rgtr_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_CTX_MEM_RGTR); + if (!rc) { + req->flags = 0; + req->page_level = bnxt_eem_page_level_code(page_level); + req->page_size = bnxt_eem_page_size_code(page_size); + req->page_dir = cpu_to_le64(page_dir); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + } + if (rc) { + hwrm_req_drop(bp, req); + netdev_warn(bp->dev, "Failed to register CFA EEM Mem\n"); + return rc; + } + + *ctx_id = le16_to_cpu(resp->ctx_id); + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_eem_ctx_reg(struct bnxt *bp, enum bnxt_eem_dir dir, + u16 grp_id, u32 num_entries) +{ + struct bnxt_eem_ctx_mem_info *ctxp = &bp->eem_info->eem_ctx_info[dir]; + struct bnxt_eem_caps *cap = &bp->eem_info->eem_caps[dir]; + struct bnxt_eem_pg_misc *tbl_misc; + struct bnxt_ctx_pg_info *tbl; + int tbl_type; + u32 mem_size; + u8 depth; + int rc; + + ctxp->num_entries = num_entries; + if (!num_entries) + return 0; + + for (tbl_type = KEY0_TABLE; tbl_type < MAX_TABLE; tbl_type++) { + tbl = &ctxp->eem_tables[tbl_type]; + tbl_misc = &ctxp->eem_misc[tbl_type]; + + /* No EFC table support, skip EFC */ + if (tbl_type == EFC_TABLE) { + tbl_misc->ctx_id = 0; + tbl_misc->entry_size = 0; + continue; + } + + bnxt_eem_pg_size_table(cap, tbl_type, num_entries, + &mem_size, &depth, tbl_misc); + rc = bnxt_alloc_ctx_pg_tbls(bp, tbl, mem_size, depth, false); + if (rc) { + netdev_err(bp->dev, + "alloc failed size 0x%x, depth %d\n", + mem_size, depth); + goto cleanup; + } + + rc = bnxt_hwrm_cfa_eem_mem_rgtr(bp, depth, + BNXT_EEM_PAGE_SIZE, + tbl->ring_mem.pg_tbl_map, + &tbl_misc->ctx_id); + if (rc) + goto cleanup; + } + return rc; +cleanup: + bnxt_eem_ctx_unreg(bp, dir); + return rc; +} + +static int bnxt_hwrm_cfa_eem_cfg(struct bnxt *bp, enum bnxt_eem_dir dir, + u32 cfg_flags, + u16 grp_id) +{ + struct bnxt_eem_ctx_mem_info *ctxp = &bp->eem_info->eem_ctx_info[dir]; + struct bnxt_eem_pg_misc *tbl = &ctxp->eem_misc[KEY0_TABLE]; + struct hwrm_cfa_eem_cfg_input *req; + u32 flags; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_EEM_CFG); + if (!rc) { + flags = (dir == BNXT_EEM_DIR_TX) ? + CFA_EEM_CFG_REQ_FLAGS_PATH_TX : + CFA_EEM_CFG_REQ_FLAGS_PATH_RX; + flags |= CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD; + + if (cfg_flags) { + req->key0_ctx_id = cpu_to_le16(tbl[KEY0_TABLE].ctx_id); + req->key1_ctx_id = cpu_to_le16(tbl[KEY1_TABLE].ctx_id); + req->record_ctx_id = cpu_to_le16(tbl[RECORD_TABLE].ctx_id); + req->efc_ctx_id = cpu_to_le16(tbl[EFC_TABLE].ctx_id); + req->num_entries = cpu_to_le32(ctxp->num_entries); + } else { + flags |= CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF; + } + req->flags = cpu_to_le32(flags); + req->group_id = cpu_to_le16(grp_id); + + rc = hwrm_req_send(bp, req); + } + if (rc) { + netdev_warn(bp->dev, "Failed to configure %s CFA EEM: rc=%d", + BNXT_EEM_DIR(dir), rc); + return rc; + } + netdev_dbg(bp->dev, "Configured %s EEM\n", BNXT_EEM_DIR(dir)); + return rc; +} + +static int bnxt_config_eem(struct bnxt *bp, enum bnxt_eem_dir dir, u32 flags, + u32 num_entries, u16 grp_id) +{ + int rc = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_EEM) || dir < BNXT_EEM_DIR_TX || + dir > BNXT_EEM_DIR_RX || + bnxt_eem_validate_num_entries(bp, dir, num_entries)) + return -EINVAL; + + if ((bp->eem_info->eem_cfg_tx_dir && (dir == BNXT_EEM_DIR_TX)) || + (bp->eem_info->eem_cfg_rx_dir && (dir == BNXT_EEM_DIR_RX))) { + netdev_warn(bp->dev, + "already config the EEM in dir %d\n", + dir); + return 0; + } + + bp->eem_info->eem_primary = flags ? true : false; + if (bp->eem_info->eem_primary) { + rc = bnxt_eem_ctx_reg(bp, dir, grp_id, num_entries); + if (rc) + goto cleanup; + } + + rc = bnxt_hwrm_cfa_eem_cfg(bp, dir, flags, grp_id); + if (rc) + goto cleanup; + + rc = bnxt_hwrm_cfa_eem_op(bp, dir, CFA_EEM_OP_REQ_OP_EEM_ENABLE); + if (rc) + goto cleanup; + + if (dir == BNXT_EEM_DIR_TX) + bp->eem_info->eem_cfg_tx_dir = true; + else + bp->eem_info->eem_cfg_rx_dir = true; + bp->eem_info->eem_group = grp_id; + + return rc; +cleanup: + bnxt_unconfig_eem(bp, dir); + return rc; +} + +int bnxt_eem_clear_cfg_system_memory(struct bnxt *bp) +{ + int dir; + + netdev_dbg(bp->dev, "call undo system memory\n"); + for (dir = BNXT_EEM_DIR_TX; dir < BNXT_EEM_NUM_DIR; dir++) + bnxt_unconfig_eem(bp, dir); + + return 0; +} + +int bnxt_eem_cfg_system_memory(struct bnxt *bp, u32 entry_num) +{ + int dir; + int rc; + + netdev_dbg(bp->dev, + "call cfg system memory, entry num: %d\n", + entry_num); + + for (dir = BNXT_EEM_DIR_TX; dir < BNXT_EEM_NUM_DIR; dir++) { + rc = bnxt_config_eem(bp, dir, true, entry_num, 0); + if (rc) { + netdev_err(bp->dev, + "config EEM fail. dir %d entry_num %d\n", + dir, entry_num); + goto cleanup; + } + } + return rc; +cleanup: + for (dir = BNXT_EEM_DIR_TX; dir < BNXT_EEM_NUM_DIR; dir++) + bnxt_unconfig_eem(bp, dir); + + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_eem.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_eem.h new file mode 100644 index 000000000000..6095581c6a91 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_eem.h @@ -0,0 +1,108 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_EEM_H +#define BNXT_EEM_H + +#include <linux/bitops.h> + +#if (PAGE_SHIFT < 12) /* < 4K >> 4K */ +#define BNXT_EEM_PAGE_SHIFT 12 +#elif (PAGE_SHIFT <= 13) /* 4K, 8K */ +#define BNXT_EEM_PAGE_SHIFT PAGE_SHIFT +#elif (PAGE_SHIFT < 16) /* 16K, 32K >> 8K */ +#define BNXT_EEM_PAGE_SHIFT 13 +#elif (PAGE_SHIFT <= 17) /* 64K, 128K >> 64K */ +#define BNXT_EEM_PAGE_SHIFT 16 +#elif (PAGE_SHIFT <= 19) /* 256K, 512K >> 256K */ +#define BNXT_EEM_PAGE_SHIFT 18 +#elif (PAGE_SHIFT <= 22) /* 1M, 2M, 4M */ +#define BNXT_EEM_PAGE_SHIFT PAGE_SHIFT +#elif (PAGE_SHIFT <= 29) /* 8M ... 512M >> 4M */ +#define BNXT_EEM_PAGE_SHIFT 22 +#else /* >= 1G >> 1G */ +#define BNXT_EEM_PAGE_SHIFT 30 +#endif + +#define BNXT_EEM_PAGE_SIZE BIT(BNXT_EEM_PAGE_SHIFT) + +#define BNXT_EEM_PG_SZ_4K BIT(12) +#define BNXT_EEM_PG_SZ_8K BIT(13) +#define BNXT_EEM_PG_SZ_64K BIT(16) +#define BNXT_EEM_PG_SZ_256K BIT(18) +#define BNXT_EEM_PG_SZ_1M BIT(20) +#define BNXT_EEM_PG_SZ_2M BIT(21) +#define BNXT_EEM_PG_SZ_4M BIT(22) +#define BNXT_EEM_PG_SZ_1G BIT(30) + +#define BNXT_EEM_MIN_ENTRIES BIT(15) /* 32K */ +#define BNXT_EEM_MAX_ENTRIES BIT(27) /* 128M */ + +#define BNXT_EEM_TX_RECORD_SIZE 64 +#define BNXT_EEM_CTX_ID_INVALID 0xFFFF + +enum bnxt_eem_dir { + BNXT_EEM_DIR_TX, + BNXT_EEM_DIR_RX, + BNXT_EEM_NUM_DIR +}; + +#define BNXT_EEM_DIR(dir) ((dir) == BNXT_EEM_DIR_TX ? "TX" : "RX") + +enum bnxt_eem_table_type { + KEY0_TABLE, + KEY1_TABLE, + RECORD_TABLE, + EFC_TABLE, + MAX_TABLE +}; + +enum bnxt_pg_tbl_lvl { + PT_LVL_0, + PT_LVL_1, + PT_LVL_2, + PT_LVL_MAX +}; + +/* CFA EEM Caps saved from the response to HWRM_CFA_EEM_QCAPS */ +struct bnxt_eem_caps { + u32 flags; + u32 supported; + u32 max_entries_supported; + u16 key_entry_size; + u16 record_entry_size; + u16 efc_entry_size; +}; + +struct bnxt_eem_pg_misc { + int type; + u16 ctx_id; + u32 entry_size; +}; + +struct bnxt_eem_ctx_mem_info { + u32 num_entries; + struct bnxt_eem_pg_misc eem_misc[MAX_TABLE]; + struct bnxt_ctx_pg_info eem_tables[MAX_TABLE]; +}; + +struct bnxt_eem_info { + u8 eem_cfg_tx_dir:1; + u8 eem_cfg_rx_dir:1; + u8 eem_primary:1; + u16 eem_group; + struct bnxt_eem_caps eem_caps[BNXT_EEM_NUM_DIR]; + struct bnxt_eem_ctx_mem_info + eem_ctx_info[BNXT_EEM_NUM_DIR]; +}; + +void bnxt_hwrm_query_eem_caps(struct bnxt *bp); +int bnxt_eem_clear_cfg_system_memory(struct bnxt *bp); +int bnxt_eem_cfg_system_memory(struct bnxt *bp, u32 entry_num); +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index fb1ab58da9fa..9bb193b6d276 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -16,18 +16,33 @@ #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/firmware.h> +#if !defined(NEW_FLOW_KEYS) && defined(HAVE_FLOW_KEYS) +#include <net/flow_keys.h> +#endif +#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT) +#include <linux/ptp_clock_kernel.h> +#include <linux/net_tstamp.h> +#include <linux/timecounter.h> +#endif +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) #include <linux/utsname.h> -#include <linux/time.h> +#include "bnxt_coredump.h" +#endif +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_xdp.h" +#include "bnxt_ptp.h" #include "bnxt_ethtool.h" +#include "bnxt_sriov.h" +#ifdef CONFIG_BNXT_FLASHDEV #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ -#include "bnxt_coredump.h" #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) +#endif static u32 bnxt_get_msglevel(struct net_device *dev) { @@ -64,7 +79,7 @@ static int bnxt_get_coalesce(struct net_device *dev, hw_coal = &bp->tx_coal; mult = hw_coal->bufs_per_record; coal->tx_coalesce_usecs = hw_coal->coal_ticks; - coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; + coal->tx_max_coalesced_frames = hw_coal->coal_bufs/ mult; coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; @@ -137,33 +152,36 @@ reset_coalesce: return rc; } -static const char * const bnxt_ring_stats_str[] = { +static const char *const bnxt_ring_rx_stats_str[] = { "rx_ucast_packets", "rx_mcast_packets", "rx_bcast_packets", "rx_discards", - "rx_drops", + "rx_errors", "rx_ucast_bytes", "rx_mcast_bytes", "rx_bcast_bytes", +}; + +static const char *const bnxt_ring_tx_stats_str[] = { "tx_ucast_packets", "tx_mcast_packets", "tx_bcast_packets", + "tx_errors", "tx_discards", - "tx_drops", "tx_ucast_bytes", "tx_mcast_bytes", "tx_bcast_bytes", }; -static const char * const bnxt_ring_tpa_stats_str[] = { +static const char *bnxt_ring_tpa_stats_str[] = { "tpa_packets", "tpa_bytes", "tpa_events", "tpa_aborts", }; -static const char * const bnxt_ring_tpa2_stats_str[] = { +static const char *bnxt_ring_tpa2_stats_str[] = { "rx_tpa_eligible_pkt", "rx_tpa_eligible_bytes", "rx_tpa_pkt", @@ -171,9 +189,17 @@ static const char * const bnxt_ring_tpa2_stats_str[] = { "rx_tpa_errors", }; -static const char * const bnxt_ring_sw_stats_str[] = { +static const char *const bnxt_rx_sw_stats_str[] = { "rx_l4_csum_errors", + "rx_resets", "rx_buf_errors", +}; + +static const char *const bnxt_tx_sw_stats_str[] = { + "tx_sw_errors", +}; + +static const char *const bnxt_cmn_sw_stats_str[] = { "missed_irqs", }; @@ -287,9 +313,6 @@ static const char * const bnxt_ring_sw_stats_str[] = { BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ BNXT_TX_STATS_PRI_ENTRY(counter, 7) -#define BNXT_PCIE_STATS_ENTRY(counter) \ - { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) } - enum { RX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS, @@ -303,6 +326,17 @@ static struct { {0, "tx_total_discard_pkts"}, }; +struct stats_entry { + long offset; + char string[ETH_GSTRING_LEN]; +}; + +#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) +#define NUM_RING_TX_SW_STATS ARRAY_SIZE(bnxt_tx_sw_stats_str) +#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) +#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) +#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) + static const struct { long offset; char string[ETH_GSTRING_LEN]; @@ -443,24 +477,6 @@ static const struct { BNXT_TX_STATS_PRI_ENTRIES(tx_packets), }; -static const struct { - long offset; - char string[ETH_GSTRING_LEN]; -} bnxt_pcie_stats_arr[] = { - BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_link_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate), - BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate), - BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics), - BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics), - BNXT_PCIE_STATS_ENTRY(pcie_equalization_time), - BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]), - BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]), - BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram), -}; - #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_STATS_PRI \ @@ -468,7 +484,6 @@ static const struct { ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) -#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) { @@ -482,17 +497,27 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) static int bnxt_get_num_ring_stats(struct bnxt *bp) { - int num_stats; + int rx, tx, cmn; + bool sh = false; - num_stats = ARRAY_SIZE(bnxt_ring_stats_str) + - ARRAY_SIZE(bnxt_ring_sw_stats_str) + - bnxt_get_num_tpa_ring_stats(bp); - return num_stats * bp->cp_nr_rings; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; + + rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + + bnxt_get_num_tpa_ring_stats(bp); + tx = NUM_RING_TX_HW_STATS + NUM_RING_TX_SW_STATS; + cmn = NUM_RING_CMN_SW_STATS; + if (sh) + return (rx + tx + cmn) * bp->cp_nr_rings; + else + return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + + cmn * bp->cp_nr_rings; } static int bnxt_get_num_stats(struct bnxt *bp) { int num_stats = bnxt_get_num_ring_stats(bp); + int len; num_stats += BNXT_NUM_SW_FUNC_STATS; @@ -500,15 +525,16 @@ static int bnxt_get_num_stats(struct bnxt *bp) num_stats += BNXT_NUM_PORT_STATS; if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - num_stats += bp->fw_rx_stats_ext_size + - bp->fw_tx_stats_ext_size; + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + num_stats += len; + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + num_stats += len; if (bp->pri2cos_valid) num_stats += BNXT_NUM_STATS_PRI; } - if (bp->flags & BNXT_FLAG_PCIE_STATS) - num_stats += BNXT_NUM_PCIE_STATS; - return num_stats; } @@ -528,13 +554,32 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset) } } +static bool is_rx_ring(struct bnxt *bp, int ring_num) +{ + return ring_num < bp->rx_nr_rings; +} + +static bool is_tx_ring(struct bnxt *bp, int ring_num) +{ + int tx_base = 0; + + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + tx_base = bp->rx_nr_rings; + + if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) + return true; + return false; +} + static void bnxt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) + - bnxt_get_num_tpa_ring_stats(bp); + int buf_size = bnxt_get_num_stats(bp) * sizeof(u64); + u32 tpa_stats; + + memset(buf, 0, buf_size); if (!bp->bnapi) { j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; @@ -544,22 +589,53 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) bnxt_sw_func_stats[i].counter = 0; + tpa_stats = bnxt_get_num_tpa_ring_stats(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - __le64 *hw_stats = (__le64 *)cpr->hw_stats; + u64 *sw_stats = cpr->stats.sw_stats; + u64 *sw; int k; - for (k = 0; k < stat_fields; j++, k++) - buf[j] = le64_to_cpu(hw_stats[k]); - buf[j++] = cpr->rx_l4_csum_errors; - buf[j++] = cpr->rx_buf_errors; - buf[j++] = cpr->missed_irqs; + if (is_rx_ring(bp, i)) { + for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) + buf[j] = sw_stats[k]; + } + if (is_tx_ring(bp, i)) { + k = NUM_RING_RX_HW_STATS; + for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; + j++, k++) + buf[j] = sw_stats[k]; + } + if (!tpa_stats || !is_rx_ring(bp, i)) + goto skip_tpa_ring_stats; + + k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; + for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + + tpa_stats; j++, k++) + buf[j] = sw_stats[k]; + +skip_tpa_ring_stats: + sw = (u64 *)&cpr->sw_stats.rx; + if (is_rx_ring(bp, i)) { + for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) + buf[j] = sw[k]; + } + + sw = (u64 *)&cpr->sw_stats.tx; + if (is_tx_ring(bp, i)) { + for (k = 0; k < NUM_RING_TX_SW_STATS; j++, k++) + buf[j] = sw[k]; + } + + sw = (u64 *)&cpr->sw_stats.cmn; + for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) + buf[j] = sw[k]; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += - le64_to_cpu(cpr->hw_stats->rx_discard_pkts); + BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += - le64_to_cpu(cpr->hw_stats->tx_discard_pkts); + BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) @@ -567,96 +643,117 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, skip_ring_stats: if (bp->flags & BNXT_FLAG_PORT_STATS) { - __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; + u64 *port_stats = bp->port_stats.sw_stats; - for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { - buf[j] = le64_to_cpu(*(port_stats + - bnxt_port_stats_arr[i].offset)); - } + for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) + buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); } - if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; - __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext; - for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { - buf[j] = le64_to_cpu(*(rx_port_stats_ext + - bnxt_port_stats_ext_arr[i].offset)); + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; + u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; + u32 len; + + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++, j++) { + buf[j] = *(rx_port_stats_ext + + bnxt_port_stats_ext_arr[i].offset); } - for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { - buf[j] = le64_to_cpu(*(tx_port_stats_ext + - bnxt_tx_port_stats_ext_arr[i].offset)); + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++, j++) { + buf[j] = *(tx_port_stats_ext + + bnxt_tx_port_stats_ext_arr[i].offset); } if (bp->pri2cos_valid) { for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_bytes_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); + buf[j] = *(rx_port_stats_ext + n); } for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_pkts_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); + buf[j] = *(rx_port_stats_ext + n); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_bytes_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); + buf[j] = *(tx_port_stats_ext + n); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_pkts_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); + buf[j] = *(tx_port_stats_ext + n); } } } - if (bp->flags & BNXT_FLAG_PCIE_STATS) { - __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats; - - for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) { - buf[j] = le64_to_cpu(*(pcie_stats + - bnxt_pcie_stats_arr[i].offset)); - } - } } static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnxt *bp = netdev_priv(dev); - static const char * const *str; + static const char **str; u32 i, j, num_str; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < bp->cp_nr_rings; i++) { - num_str = ARRAY_SIZE(bnxt_ring_stats_str); - for (j = 0; j < num_str; j++) { - sprintf(buf, "[%d]: %s", i, - bnxt_ring_stats_str[j]); - buf += ETH_GSTRING_LEN; + if (is_rx_ring(bp, i)) { + num_str = NUM_RING_RX_HW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_rx_stats_str[j]); + buf += ETH_GSTRING_LEN; + } } - if (!BNXT_SUPPORTS_TPA(bp)) + if (is_tx_ring(bp, i)) { + num_str = NUM_RING_TX_HW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_tx_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + num_str = bnxt_get_num_tpa_ring_stats(bp); + if (!num_str || !is_rx_ring(bp, i)) goto skip_tpa_stats; - if (bp->max_tpa_v2) { - num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + if (bp->max_tpa_v2) str = bnxt_ring_tpa2_stats_str; - } else { - num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str); + else str = bnxt_ring_tpa_stats_str; - } + for (j = 0; j < num_str; j++) { sprintf(buf, "[%d]: %s", i, str[j]); buf += ETH_GSTRING_LEN; } skip_tpa_stats: - num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str); + if (is_rx_ring(bp, i)) { + num_str = NUM_RING_RX_SW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_rx_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + if (is_tx_ring(bp, i)) { + num_str = NUM_RING_TX_SW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_tx_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + num_str = NUM_RING_CMN_SW_STATS; for (j = 0; j < num_str; j++) { sprintf(buf, "[%d]: %s", i, - bnxt_ring_sw_stats_str[j]); + bnxt_cmn_sw_stats_str[j]); buf += ETH_GSTRING_LEN; } } @@ -672,11 +769,17 @@ skip_tpa_stats: } } if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { + u32 len; + + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++) { strcpy(buf, bnxt_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; } - for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++) { strcpy(buf, bnxt_tx_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; @@ -704,12 +807,6 @@ skip_tpa_stats: } } } - if (bp->flags & BNXT_FLAG_PCIE_STATS) { - for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) { - strcpy(buf, bnxt_pcie_stats_arr[i].string); - buf += ETH_GSTRING_LEN; - } - } break; case ETH_SS_TEST: if (bp->num_tests) @@ -760,6 +857,7 @@ static int bnxt_set_ringparam(struct net_device *dev, return 0; } +#if defined(ETHTOOL_GCHANNELS) && !defined(GET_ETHTOOL_OP_EXT) static void bnxt_get_channels(struct net_device *dev, struct ethtool_channels *channel) { @@ -792,6 +890,10 @@ static void bnxt_get_channels(struct net_device *dev, channel->max_rx = max_rx_rings; channel->max_tx = max_tx_rings; channel->max_other = 0; + + if (!bp->tx_nr_rings) + return; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) { channel->combined_count = bp->rx_nr_rings; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) @@ -848,6 +950,10 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } + if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != + bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings)) + bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -894,30 +1000,71 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } +#endif + +#ifdef HAVE_RXNFC +#ifdef HAVE_FLOW_KEYS +static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], + int tbl_size, u32 *ids, u32 start, + u32 id_cnt) +{ + int i, j = start; + + if (j >= id_cnt) + return j; + for (i = 0; i < tbl_size; i++) { + struct hlist_head *head; + struct hlist_node __maybe_unused *node; + struct bnxt_filter_base *fltr; + + head = &tbl[i]; + __hlist_for_each_entry_rcu(fltr, node, head, hash) { + if (!fltr->flags || + test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) + continue; + ids[j++] = fltr->sw_id; + if (j == id_cnt) + return j; + } + } + return j; +} + +static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, + struct hlist_head tbl[], + int tbl_size, u32 id) +{ + int i; + + for (i = 0; i < tbl_size; i++) { + struct hlist_head *head; + struct hlist_node __maybe_unused *node; + struct bnxt_filter_base *fltr; + + head = &tbl[i]; + __hlist_for_each_entry_rcu(fltr, node, head, hash) { + if (fltr->flags && fltr->sw_id == id) + return fltr; + } + } + return NULL; +} -#ifdef CONFIG_RFS_ACCEL static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, u32 *rule_locs) { - int i, j = 0; + u32 count; cmd->data = bp->ntp_fltr_count; - for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { - struct hlist_head *head; - struct bnxt_ntuple_filter *fltr; - - head = &bp->ntp_fltr_hash_tbl[i]; - rcu_read_lock(); - hlist_for_each_entry_rcu(fltr, head, hash) { - if (j == cmd->rule_cnt) - break; - rule_locs[j++] = fltr->sw_id; - } - rcu_read_unlock(); - if (j == cmd->rule_cnt) - break; - } - cmd->rule_cnt = j; + rcu_read_lock(); + count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, + cmd->rule_cnt); + cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, + BNXT_NTP_FLTR_HASH_SIZE, + rule_locs, count, + cmd->rule_cnt); + rcu_read_unlock(); return 0; } @@ -925,28 +1072,55 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct bnxt_filter_base *fltr_base; struct bnxt_ntuple_filter *fltr; struct flow_keys *fkeys; - int i, rc = -EINVAL; + int rc = -EINVAL; - if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + if (fs->location >= bp->max_fltr) return rc; - for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { - struct hlist_head *head; + rcu_read_lock(); + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, + fs->location); + if (fltr_base) { + struct ethhdr *h_ether = &fs->h_u.ether_spec; + struct ethhdr *m_ether = &fs->m_u.ether_spec; + struct bnxt_l2_filter *l2_fltr; + struct bnxt_l2_key *l2_key; - head = &bp->ntp_fltr_hash_tbl[i]; - rcu_read_lock(); - hlist_for_each_entry_rcu(fltr, head, hash) { - if (fltr->sw_id == fs->location) - goto fltr_found; + l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); + l2_key = &l2_fltr->l2_key; + fs->flow_type = ETHER_FLOW; + ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); + eth_broadcast_addr(m_ether->h_dest); + if (l2_key->vlan) { + struct ethtool_flow_ext *m_ext = &fs->m_ext; + struct ethtool_flow_ext *h_ext = &fs->h_ext; + + fs->flow_type |= FLOW_EXT; + m_ext->vlan_tci = htons(0xfff); + h_ext->vlan_tci = htons(l2_key->vlan); } + if (fltr_base->flags & BNXT_ACT_RING_DST) + fs->ring_cookie = fltr_base->rxq; + if (fltr_base->flags & BNXT_ACT_FUNC_DST) + fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; rcu_read_unlock(); + return 0; } - return rc; - -fltr_found: + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, + BNXT_NTP_FLTR_HASH_SIZE, + fs->location); + if (!fltr_base) { + rcu_read_unlock(); + return rc; + } + fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); fkeys = &fltr->fkeys; +#ifdef NEW_FLOW_KEYS if (fkeys->basic.n_proto == htons(ETH_P_IP)) { if (fkeys->basic.ip_proto == IPPROTO_TCP) fs->flow_type = TCP_V4_FLOW; @@ -955,20 +1129,24 @@ fltr_found: else goto fltr_err; - fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; - fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); - - fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; - fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); - - fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; - fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); - - fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; - fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); + } + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); + } + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { + fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); + } + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { + fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + } } else { - int i; - +#ifdef HAVE_ETHTOOL_IP6_SPEC if (fkeys->basic.ip_proto == IPPROTO_TCP) fs->flow_type = TCP_V6_FLOW; else if (fkeys->basic.ip_proto == IPPROTO_UDP) @@ -976,22 +1154,49 @@ fltr_found: else goto fltr_err; - *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = - fkeys->addrs.v6addrs.src; - *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = - fkeys->addrs.v6addrs.dst; - for (i = 0; i < 4; i++) { - fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0); - fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0); + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = + fkeys->addrs.v6addrs.src; + bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src); } - fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; - fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); - - fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; - fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = + fkeys->addrs.v6addrs.dst; + bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst); + } + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { + fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); + } + if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { + fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); + } +#endif } - fs->ring_cookie = fltr->rxq; +#else + if (fkeys->ip_proto == IPPROTO_TCP) + fs->flow_type = TCP_V4_FLOW; + else if (fkeys->ip_proto == IPPROTO_UDP) + fs->flow_type = UDP_V4_FLOW; + else + goto fltr_err; + + fs->h_u.tcp_ip4_spec.ip4src = fkeys->src; + fs->m_u.tcp_ip4_spec.ip4src = (__be32) ~0; + + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->dst; + fs->m_u.tcp_ip4_spec.ip4dst = (__be32) ~0; + + fs->h_u.tcp_ip4_spec.psrc = fkeys->port16[0]; + fs->m_u.tcp_ip4_spec.psrc = (__be16) ~0; + + fs->h_u.tcp_ip4_spec.pdst = fkeys->port16[1]; + fs->m_u.tcp_ip4_spec.pdst = (__be16) ~0; +#endif + + fs->ring_cookie = fltr->base.rxq; rc = 0; fltr_err: @@ -999,8 +1204,291 @@ fltr_err: return rc; } + +static int bnxt_add_l2_cls_rule(struct bnxt *bp, + struct ethtool_rx_flow_spec *fs) +{ + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + struct ethhdr *h_ether = &fs->h_u.ether_spec; + struct ethhdr *m_ether = &fs->m_u.ether_spec; + struct bnxt_l2_filter *fltr; + struct bnxt_l2_key key; + u16 vnic_id; + u8 flags; + int rc; + + if (!is_broadcast_ether_addr(m_ether->h_dest)) + return -EINVAL; + ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); + key.vlan = 0; + if (fs->flow_type & FLOW_EXT) { + struct ethtool_flow_ext *m_ext = &fs->m_ext; + struct ethtool_flow_ext *h_ext = &fs->h_ext; + + if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) + return -EINVAL; + key.vlan = htons(h_ext->vlan_tci); + } + + if (vf) { + flags = BNXT_ACT_FUNC_DST; + vnic_id = 0xffff; + vf--; + if (!bnxt_vf_vnic_state_is_up(bp, vf)) + return -ENODEV; + } else { + flags = BNXT_ACT_RING_DST; + if (bp->flags & BNXT_FLAG_CHIP_P5) + vnic_id = bp->vnic_info[0].fw_vnic_id; + else + vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; + } + fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); + if (IS_ERR(fltr)) + return PTR_ERR(fltr); + + fltr->base.fw_vnic_id = vnic_id; + fltr->base.rxq = ring; + fltr->base.vf_idx = vf; + rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); + if (rc) + bnxt_del_l2_filter(bp, fltr); + else + fs->location = fltr->base.sw_id; + return rc; +} + +#if defined(NEW_FLOW_KEYS) && defined(HAVE_ETHTOOL_IP6_SPEC) && defined(HAVE_FLOW_HASH_FROM_KEYS) +#define IPV4_ALL_MASK ((__force __be32)~0) +#define L4_PORT_ALL_MASK ((__force __be16)~0) + +static bool ipv6_mask_is_full(__be32 mask[4]) +{ + return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK; +} + +static bool ipv6_mask_is_zero(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + #endif +static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, + struct ethtool_rx_flow_spec *fs) +{ +#if defined(NEW_FLOW_KEYS) && defined(HAVE_ETHTOOL_IP6_SPEC) && defined(HAVE_FLOW_HASH_FROM_KEYS) + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + struct bnxt_ntuple_filter *new_fltr, *fltr; + struct bnxt_l2_filter *l2_fltr; + u32 flow_type = fs->flow_type; + struct flow_keys *fkeys; + u32 idx; + int rc; + + if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) + return -EOPNOTSUPP; + + new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); + if (!new_fltr) + return -ENOMEM; + + l2_fltr = bp->vnic_info[0].l2_filters[0]; + atomic_inc(&l2_fltr->refcnt); + new_fltr->l2_fltr = l2_fltr; + fkeys = &new_fltr->fkeys; + + rc = -EOPNOTSUPP; + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: { + struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; + + fkeys->basic.ip_proto = IPPROTO_TCP; + if (flow_type == UDP_V4_FLOW) + fkeys->basic.ip_proto = IPPROTO_UDP; + fkeys->basic.n_proto = htons(ETH_P_IP); + + if (ip_mask->ip4src == IPV4_ALL_MASK) { + fkeys->addrs.v4addrs.src = ip_spec->ip4src; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP; + } else if (ip_mask->ip4src) { + goto ntuple_err; + } + if (ip_mask->ip4dst == IPV4_ALL_MASK) { + fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP; + } else if (ip_mask->ip4dst) { + goto ntuple_err; + } + + if (ip_mask->psrc == L4_PORT_ALL_MASK) { + fkeys->ports.src = ip_spec->psrc; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT; + } else if (ip_mask->psrc) { + goto ntuple_err; + } + if (ip_mask->pdst == L4_PORT_ALL_MASK) { + fkeys->ports.dst = ip_spec->pdst; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT; + } else if (ip_mask->pdst) { + goto ntuple_err; + } + break; + } + case TCP_V6_FLOW: + case UDP_V6_FLOW: { + struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; + + fkeys->basic.ip_proto = IPPROTO_TCP; + if (flow_type == UDP_V6_FLOW) + fkeys->basic.ip_proto = IPPROTO_UDP; + fkeys->basic.n_proto = htons(ETH_P_IPV6); + + if (ipv6_mask_is_full(ip_mask->ip6src)) { + fkeys->addrs.v6addrs.src = + *(struct in6_addr *)&ip_spec->ip6src; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP; + } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) { + goto ntuple_err; + } + if (ipv6_mask_is_full(ip_mask->ip6dst)) { + fkeys->addrs.v6addrs.dst = + *(struct in6_addr *)&ip_spec->ip6dst; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP; + } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) { + goto ntuple_err; + } + + if (ip_mask->psrc == L4_PORT_ALL_MASK) { + fkeys->ports.src = ip_spec->psrc; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT; + } else if (ip_mask->psrc) { + goto ntuple_err; + } + if (ip_mask->pdst == L4_PORT_ALL_MASK) { + fkeys->ports.dst = ip_spec->pdst; + new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT; + } else if (ip_mask->pdst) { + goto ntuple_err; + } + break; + } + default: + goto ntuple_err; + } + if (!new_fltr->ntuple_flags) + goto ntuple_err; + + idx = bnxt_get_ntp_filter_idx(bp, fkeys); + rcu_read_lock(); + fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); + if (fltr) { + rcu_read_unlock(); + rc = -EEXIST; + goto ntuple_err; + } + rcu_read_unlock(); + + new_fltr->base.rxq = ring; + new_fltr->base.flags = BNXT_ACT_NO_AGING; + __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); + rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); + if (!rc) { + rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); + if (rc) { + bnxt_del_ntp_filter(bp, new_fltr); + return rc; + } + fs->location = new_fltr->base.sw_id; + return 0; + } + +ntuple_err: + atomic_dec(&l2_fltr->refcnt); + kfree(new_fltr); + return rc; +#else + return -EOPNOTSUPP; +#endif +} + +static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = &cmd->fs; + u32 ring, flow_type; + int rc; + u8 vf; + + if (!netif_running(bp->dev)) + return -EAGAIN; + if (!(bp->flags & BNXT_FLAG_RFS)) + return -EPERM; + if (fs->location != RX_CLS_LOC_ANY) + return -EINVAL; + + ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + if (BNXT_VF(bp) && vf) + return -EINVAL; + if (BNXT_PF(bp) && vf > bp->pf.active_vfs) + return -EINVAL; + if (!vf && ring >= bp->rx_nr_rings) + return -EINVAL; + + flow_type = fs->flow_type; + if (flow_type & (FLOW_MAC_EXT | FLOW_RSS)) + return -EINVAL; + flow_type &= ~FLOW_EXT; + if (flow_type == ETHER_FLOW) + rc = bnxt_add_l2_cls_rule(bp, fs); + else + rc = bnxt_add_ntuple_cls_rule(bp, fs); + return rc; +} + +static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = &cmd->fs; + struct bnxt_filter_base *fltr_base; + u32 id = fs->location; + + rcu_read_lock(); + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, id); + if (fltr_base) { + struct bnxt_l2_filter *l2_fltr; + + l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); + rcu_read_unlock(); + bnxt_hwrm_l2_filter_free(bp, l2_fltr); + bnxt_del_l2_filter(bp, l2_fltr); + return 0; + } + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, + BNXT_NTP_FLTR_HASH_SIZE, + fs->location); + if (fltr_base) { + struct bnxt_ntuple_filter *fltr; + + fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); + rcu_read_unlock(); + if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) + return -EINVAL; + bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); + bnxt_del_ntp_filter(bp, fltr); + return 0; + } + + rcu_read_unlock(); + return -ENOENT; +} +#endif /* HAVE_FLOW_KEYS */ + static u64 get_ethtool_ipv4_rss(struct bnxt *bp) { if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) @@ -1029,7 +1517,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -1048,7 +1536,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -1141,20 +1629,24 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) } static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_RXNFC_VOID + void *rule_locs) +#else u32 *rule_locs) +#endif { struct bnxt *bp = netdev_priv(dev); int rc = 0; switch (cmd->cmd) { -#ifdef CONFIG_RFS_ACCEL +#ifdef HAVE_FLOW_KEYS case ETHTOOL_GRXRINGS: cmd->data = bp->rx_nr_rings; break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = bp->ntp_fltr_count; - cmd->data = BNXT_NTP_FLTR_MAX_FLTR; + cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; break; case ETHTOOL_GRXCLSRLALL: @@ -1164,7 +1656,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRULE: rc = bnxt_grxclsrule(bp, cmd); break; -#endif +#endif /* HAVE_FLOW_KEYS */ case ETHTOOL_GRXFH: rc = bnxt_grxfh(bp, cmd); @@ -1188,6 +1680,16 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) rc = bnxt_srxfh(bp, cmd); break; +#ifdef HAVE_FLOW_KEYS + case ETHTOOL_SRXCLSRLINS: + rc = bnxt_srxclsrlins(bp, cmd); + break; + + case ETHTOOL_SRXCLSRLDEL: + rc = bnxt_srxclsrldel(bp, cmd); + break; +#endif /* HAVE_FLOW_KEYS */ + default: rc = -EOPNOTSUPP; break; @@ -1195,11 +1697,22 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return rc; } +#endif /* HAVE_RXNFC */ + +#if defined(HAVE_RXFH_INDIR_SIZE) && !defined(GET_ETHTOOL_OP_EXT) static u32 bnxt_get_rxfh_indir_size(struct net_device *dev) { + struct bnxt *bp = netdev_priv(dev); + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + return (bp->rx_nr_rings + BNXT_RSS_TABLE_ENTRIES_P5 - 1) & + ~(BNXT_RSS_TABLE_ENTRIES_P5 - 1); + } return HW_HASH_INDEX_SIZE; } +#endif +#if defined(HAVE_GET_RXFH_KEY_SIZE) && !defined(GET_ETHTOOL_OP_EXT) static u32 bnxt_get_rxfh_key_size(struct net_device *dev) { return HW_HASH_KEY_SIZE; @@ -1210,7 +1723,7 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, { struct bnxt *bp = netdev_priv(dev); struct bnxt_vnic_info *vnic; - int i = 0; + u32 i, tbl_size; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -1219,9 +1732,10 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, return 0; vnic = &bp->vnic_info[0]; - if (indir && vnic->rss_table) { - for (i = 0; i < HW_HASH_INDEX_SIZE; i++) - indir[i] = le16_to_cpu(vnic->rss_table[i]); + if (indir && bp->rss_indir_tbl) { + tbl_size = bnxt_get_rxfh_indir_size(dev); + for (i = 0; i < tbl_size; i++) + indir[i] = bp->rss_indir_tbl[i]; } if (key && vnic->rss_hash_key) @@ -1229,7 +1743,41 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, return 0; } +#endif +#if defined(HAVE_SET_RXFH) && defined(ETH_RSS_HASH_TOP) && !defined(GET_ETHTOOL_OP_EXT) +static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct bnxt *bp = netdev_priv(dev); + + if (hfunc && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!key && !indir) + return -EOPNOTSUPP; + + if (key) { + memcpy(bp->usr_rss_hash_key, key, HW_HASH_KEY_SIZE); + bp->flags |= BNXT_FLAG_USR_RSS_HASH_KEY; + } + if (indir) { + u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); + + for (i = 0; i < tbl_size; i++) + bp->rss_indir_tbl[i] = indir[i]; + pad = bp->rss_indir_tbl_entries - tbl_size; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + } + + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + bnxt_open_nic(bp, false, false); + } + return 0; +} +#endif static void bnxt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1247,6 +1795,60 @@ static void bnxt_get_drvinfo(struct net_device *dev, info->regdump_len = 0; } +static int bnxt_get_regs_len(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int reg_len; + + reg_len = BNXT_PXP_REG_LEN; + + if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) + reg_len += sizeof(struct pcie_ctx_hw_stats); + + return reg_len; +} + +static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *_p) +{ + struct pcie_ctx_hw_stats *hw_pcie_stats; + struct bnxt *bp = netdev_priv(dev); + struct hwrm_pcie_qstats_input *req; + dma_addr_t hw_pcie_stats_addr; + int rc; + + regs->version = 0; + bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); + + if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) + return; + + if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) + return; + + hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), + &hw_pcie_stats_addr); + if (!hw_pcie_stats) { + hwrm_req_drop(bp, req); + return; + } + + hwrm_req_hold(bp, req); /* hold on to slice */ + req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); + req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + rc = hwrm_req_send(bp, req); + if (!rc) { + u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); + __le64 *src = (__le64 *)hw_pcie_stats; + int i; + + regs->version = 1; + for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) + dst[i] = le64_to_cpu(src[i]); + } + hwrm_req_drop(bp, req); +} + static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bnxt *bp = netdev_priv(dev); @@ -1313,6 +1915,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) return speed_mask; } +#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ { \ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ @@ -1377,6 +1980,37 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ } +#ifdef HAVE_ETHTOOL_GLINKSETTINGS_PAM4 +#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ +{ \ + if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 50000baseCR_Full); \ + if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 100000baseCR2_Full);\ + if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 200000baseCR4_Full);\ +} + +#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ +{ \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 50000baseCR_Full)) \ + (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100000baseCR2_Full)) \ + (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 200000baseCR4_Full)) \ + (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \ +} +#else +#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) +#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) +#endif + static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, struct ethtool_link_ksettings *lk_ksettings) { @@ -1387,6 +2021,8 @@ static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, fw_pause = link_info->auto_pause_setting; BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); + fw_speeds = link_info->advertising_pam4; + BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising); } static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, @@ -1400,6 +2036,8 @@ static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, lp_advertising); + fw_speeds = link_info->lp_auto_pam4_link_speeds; + BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising); } static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, @@ -1408,16 +2046,64 @@ static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, u16 fw_speeds = link_info->support_speeds; BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); + fw_speeds = link_info->support_pam4_speeds; + BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported); ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Asym_Pause); - if (link_info->support_auto_speeds) + if (link_info->support_auto_speeds || + link_info->support_pam4_auto_speeds) ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Autoneg); } +#else + +static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->advertising; + u8 fw_pause = 0; + + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + fw_pause = link_info->auto_pause_setting; + + return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); +} + +static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->lp_auto_link_speeds; + u8 fw_pause = 0; + + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + fw_pause = link_info->lp_pause; + + return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); +} + +static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->support_speeds; + u32 supported; + + supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; +} + +static u32 bnxt_fw_to_ethtool_support_adv_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->support_auto_speeds; + u32 supported; + + supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + if (supported) + supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + return supported; +} +#endif + u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) { switch (fw_link_speed) { @@ -1439,11 +2125,14 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) return SPEED_50000; case BNXT_LINK_SPEED_100GB: return SPEED_100000; + case BNXT_LINK_SPEED_200GB: + return SPEED_200000; default: return SPEED_UNKNOWN; } } +#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G static int bnxt_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *lk_ksettings) { @@ -1462,15 +2151,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev, ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, Autoneg); base->autoneg = AUTONEG_ENABLE; - if (link_info->phy_link_status == BNXT_LINK_LINK) + base->duplex = DUPLEX_UNKNOWN; + if (link_info->phy_link_status == BNXT_LINK_LINK) { bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; + else + base->duplex = DUPLEX_HALF; + } ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); - if (!netif_carrier_ok(dev)) - base->duplex = DUPLEX_UNKNOWN; - else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - base->duplex = DUPLEX_FULL; - else - base->duplex = DUPLEX_HALF; } else { base->autoneg = AUTONEG_DISABLE; ethtool_speed = @@ -1506,55 +2195,149 @@ static int bnxt_get_link_ksettings(struct net_device *dev, return 0; } -static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed) +#else + +static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; + u32 ethtool_speed; + + mutex_lock(&bp->link_lock); + cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); + + if (link_info->support_auto_speeds) + cmd->supported |= SUPPORTED_Autoneg; + + if (link_info->autoneg) { + cmd->advertising = + bnxt_fw_to_ethtool_advertised_spds(link_info); + cmd->advertising |= ADVERTISED_Autoneg; + cmd->autoneg = AUTONEG_ENABLE; + cmd->duplex = DUPLEX_UNKNOWN; + if (link_info->phy_link_status == BNXT_LINK_LINK) { + cmd->lp_advertising = + bnxt_fw_to_ethtool_lp_adv(link_info); + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + } + ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + } else { + cmd->autoneg = AUTONEG_DISABLE; + cmd->advertising = 0; + ethtool_speed = + bnxt_fw_to_ethtool_speed(link_info->req_link_speed); + cmd->duplex = DUPLEX_HALF; + if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + } + ethtool_cmd_speed_set(cmd, ethtool_speed); + + cmd->port = PORT_NONE; + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + cmd->port = PORT_TP; + cmd->supported |= SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) + cmd->port = PORT_DA; + else if (link_info->media_type == + PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) + cmd->port = PORT_FIBRE; + } + + if (link_info->transceiver == + PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) + cmd->transceiver = XCVR_INTERNAL; + else + cmd->transceiver = XCVR_EXTERNAL; + cmd->phy_address = link_info->phy_addr; + mutex_unlock(&bp->link_lock); + + return 0; +} +#endif + +static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u16 support_pam4_spds = link_info->support_pam4_speeds; u16 support_spds = link_info->support_speeds; - u32 fw_speed = 0; + u8 sig_mode = BNXT_SIG_MODE_NRZ; + u16 fw_speed = 0; switch (ethtool_speed) { case SPEED_100: if (support_spds & BNXT_LINK_SPEED_MSK_100MB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; break; case SPEED_1000: if (support_spds & BNXT_LINK_SPEED_MSK_1GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; break; case SPEED_2500: if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; break; case SPEED_10000: if (support_spds & BNXT_LINK_SPEED_MSK_10GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; break; case SPEED_20000: if (support_spds & BNXT_LINK_SPEED_MSK_20GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; break; case SPEED_25000: if (support_spds & BNXT_LINK_SPEED_MSK_25GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; break; case SPEED_40000: if (support_spds & BNXT_LINK_SPEED_MSK_40GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; break; case SPEED_50000: - if (support_spds & BNXT_LINK_SPEED_MSK_50GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; + if (support_spds & BNXT_LINK_SPEED_MSK_50GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; + sig_mode = BNXT_SIG_MODE_PAM4; + } break; case SPEED_100000: - if (support_spds & BNXT_LINK_SPEED_MSK_100GB) - fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB; + if (support_spds & BNXT_LINK_SPEED_MSK_100GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; + } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; + sig_mode = BNXT_SIG_MODE_PAM4; + } break; - default: - netdev_err(dev, "unsupported speed!\n"); + case SPEED_200000: + if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; + sig_mode = BNXT_SIG_MODE_PAM4; + } break; } - return fw_speed; + + if (!fw_speed) { + netdev_err(dev, "unsupported speed!\n"); + return -EINVAL; + } + + link_info->req_link_speed = fw_speed; + link_info->req_signal_mode = sig_mode; + link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; + link_info->autoneg = 0; + link_info->advertising = 0; + link_info->advertising_pam4 = 0; + + return 0; } u16 bnxt_get_fw_auto_link_speeds(u32 advertising) @@ -1579,6 +2362,7 @@ u16 bnxt_get_fw_auto_link_speeds(u32 advertising) return fw_speed_mask; } +#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G static int bnxt_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *lk_ksettings) { @@ -1586,33 +2370,35 @@ static int bnxt_set_link_ksettings(struct net_device *dev, struct bnxt_link_info *link_info = &bp->link_info; const struct ethtool_link_settings *base = &lk_ksettings->base; bool set_pause = false; - u16 fw_advertising = 0; u32 speed; int rc = 0; - if (!BNXT_SINGLE_PF(bp)) + if (!BNXT_PHY_CFG_ABLE(bp)) return -EOPNOTSUPP; mutex_lock(&bp->link_lock); if (base->autoneg == AUTONEG_ENABLE) { - BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, + BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings, advertising); + BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4, + lk_ksettings, advertising); link_info->autoneg |= BNXT_AUTONEG_SPEED; - if (!fw_advertising) + if (!link_info->advertising && !link_info->advertising_pam4) { link_info->advertising = link_info->support_auto_speeds; - else - link_info->advertising = fw_advertising; + link_info->advertising_pam4 = + link_info->support_pam4_auto_speeds; + } /* any change to autoneg will cause link change, therefore the * driver should put back the original pause setting in autoneg */ set_pause = true; } else { - u16 fw_speed; u8 phy_type = link_info->phy_type; if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + netdev_err(dev, "10GBase-T devices must autoneg\n"); rc = -EINVAL; goto set_setting_exit; @@ -1623,15 +2409,83 @@ static int bnxt_set_link_ksettings(struct net_device *dev, goto set_setting_exit; } speed = base->speed; - fw_speed = bnxt_get_fw_speed(dev, speed); - if (!fw_speed) { + rc = bnxt_force_link_speed(dev, speed); + if (rc) + goto set_setting_exit; + } + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); + +set_setting_exit: + mutex_unlock(&bp->link_lock); + return rc; +} + +#else + +static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + int rc = 0; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + bool set_pause = false; + u16 fw_advertising = 0; + u32 speed; + + if (!BNXT_PHY_CFG_ABLE(bp)) + return -EOPNOTSUPP; + + mutex_lock(&bp->link_lock); + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 supported_spds = + bnxt_fw_to_ethtool_support_adv_spds(link_info); + + if (!supported_spds) { + netdev_err(dev, "Autoneg not supported\n"); rc = -EINVAL; goto set_setting_exit; } - link_info->req_link_speed = fw_speed; - link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; - link_info->autoneg = 0; - link_info->advertising = 0; + if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | + ADVERTISED_TP | ADVERTISED_FIBRE)) { + netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", + cmd->advertising); + rc = -EINVAL; + goto set_setting_exit; + } + fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); + link_info->autoneg |= BNXT_AUTONEG_SPEED; + if (!fw_advertising) + link_info->advertising = link_info->support_auto_speeds; + else + link_info->advertising = fw_advertising; + /* any change to autoneg will cause link change, therefore the + * driver should put back the original pause setting in autoneg + */ + set_pause = true; + } else { + u8 phy_type = link_info->phy_type; + + if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || + phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || + link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + + netdev_err(dev, "10GBase-T devices must autoneg\n"); + rc = -EINVAL; + goto set_setting_exit; + } + /* TODO: currently don't support half duplex */ + if (cmd->duplex == DUPLEX_HALF) { + netdev_err(dev, "HALF DUPLEX is not supported!\n"); + rc = -EINVAL; + goto set_setting_exit; + } + /* If received a request for an unknown duplex, assume full*/ + if (cmd->duplex == DUPLEX_UNKNOWN) + cmd->duplex = DUPLEX_FULL; + speed = ethtool_cmd_speed(cmd); + rc = bnxt_force_link_speed(dev, speed); + if (rc) + goto set_setting_exit; } if (netif_running(dev)) @@ -1641,6 +2495,7 @@ set_setting_exit: mutex_unlock(&bp->link_lock); return rc; } +#endif static void bnxt_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) @@ -1662,7 +2517,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - if (!BNXT_SINGLE_PF(bp)) + if (!BNXT_PHY_CFG_ABLE(bp)) return -EOPNOTSUPP; if (epause->autoneg) { @@ -1688,8 +2543,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) + if (netif_running(dev)) { + mutex_lock(&bp->link_lock); rc = bnxt_hwrm_set_pause(bp); + mutex_unlock(&bp->link_lock); + } return rc; } @@ -1701,63 +2559,88 @@ static u32 bnxt_get_link(struct net_device *dev) return bp->link_info.link_up; } +#ifdef CONFIG_BNXT_FLASHDEV static void bnxt_print_admin_err(struct bnxt *bp) { - netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); + netdev_info(bp->dev, "PF does not have admin privileges to flash the device\n"); } static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length); -static int bnxt_flash_nvram(struct net_device *dev, - u16 dir_type, - u16 dir_ordinal, - u16 dir_ext, - u16 dir_attr, - const u8 *data, +static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, size_t data_len) { struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_write_input *req; int rc; - struct hwrm_nvm_write_input req = {0}; - dma_addr_t dma_handle; - u8 *kmem; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); + if (rc) + return rc; - req.dir_type = cpu_to_le16(dir_type); - req.dir_ordinal = cpu_to_le16(dir_ordinal); - req.dir_ext = cpu_to_le16(dir_ext); - req.dir_attr = cpu_to_le16(dir_attr); - req.dir_data_length = cpu_to_le32(data_len); + if (data_len && data) { + dma_addr_t dma_handle; + u8 *kmem; - kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, - GFP_KERNEL); - if (!kmem) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)data_len); - return -ENOMEM; + kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); + if (!kmem) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + req->dir_data_length = cpu_to_le32(data_len); + + memcpy(kmem, data, data_len); + req->host_src_addr = cpu_to_le64(dma_handle); } - memcpy(kmem, data, data_len); - req.host_src_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT); + req->dir_type = cpu_to_le16(dir_type); + req->dir_ordinal = cpu_to_le16(dir_ordinal); + req->dir_ext = cpu_to_le16(dir_ext); + req->dir_attr = cpu_to_le16(dir_attr); + req->dir_item_length = cpu_to_le32(dir_item_len); + rc = hwrm_req_send(bp, req); if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } -static int bnxt_firmware_reset(struct net_device *dev, - u16 dir_type) +static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, + u8 self_reset, u8 flags) { - struct hwrm_fw_reset_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (rc) + return rc; + + req->embedded_proc_type = proc_type; + req->selfrst_status = self_reset; + req->flags = flags; + + if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { + rc = hwrm_req_send_silent(bp, req); + } else { + rc = hwrm_req_send(bp, req); + if (rc == -EACCES) + bnxt_print_admin_err(bp); + } + return rc; +} + +static int bnxt_firmware_reset(struct net_device *dev, + enum bnxt_nvm_directory_type dir_type) +{ + u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; + u8 proc_type, flags = 0; /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ /* (e.g. when firmware isn't already running) */ @@ -1765,40 +2648,50 @@ static int bnxt_firmware_reset(struct net_device *dev, case BNX_DIR_TYPE_CHIMP_PATCH: case BNX_DIR_TYPE_BOOTCODE: case BNX_DIR_TYPE_BOOTCODE_2: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; /* Self-reset ChiMP upon next PCIe reset: */ - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; /* Self-reset APE upon next PCIe reset: */ - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: - req.embedded_proc_type = - FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; break; case BNX_DIR_TYPE_BONO_FW: case BNX_DIR_TYPE_BONO_PATCH: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; - break; - case BNXT_FW_RESET_CHIP: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; - break; - case BNXT_FW_RESET_AP: - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP; + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; break; default: return -EINVAL; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc == -EACCES) - bnxt_print_admin_err(bp); - return rc; + return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); +} + +static int bnxt_firmware_reset_chip(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + u8 flags = 0; + + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + + return bnxt_hwrm_firmware_reset(dev, + FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, + flags); +} + +static int bnxt_firmware_reset_ap(struct net_device *dev) +{ + return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, + 0); } static int bnxt_flash_firmware(struct net_device *dev, @@ -1874,7 +2767,7 @@ static int bnxt_flash_firmware(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); if (rc == 0) /* Firmware update successful */ rc = bnxt_firmware_reset(dev, dir_type); @@ -1927,7 +2820,7 @@ static int bnxt_flash_microcode(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); return rc; } @@ -1987,109 +2880,125 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, rc, filename); return rc; } - if (bnxt_dir_type_is_ape_bin_format(dir_type) == true) + if (bnxt_dir_type_is_ape_bin_format(dir_type)) rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); - else if (bnxt_dir_type_is_other_exec_format(dir_type) == true) + else if (bnxt_dir_type_is_other_exec_format(dir_type)) rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); else rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw->data, fw->size); + 0, 0, 0, fw->data, fw->size); release_firmware(fw); return rc; } -static int bnxt_flash_package_from_file(struct net_device *dev, - char *filename, u32 install_type) +int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, + u32 install_type) { + struct hwrm_nvm_install_update_input *install; + struct hwrm_nvm_install_update_output *resp; + struct hwrm_nvm_modify_input *modify; struct bnxt *bp = netdev_priv(dev); - struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_nvm_install_update_input install = {0}; + bool defrag_attempted = false; const struct firmware *fw; + dma_addr_t dma_handle; + u8 *kmem = NULL; u32 item_len; - int rc = 0; u16 index; + int rc; bnxt_hwrm_fw_set_time(bp); - if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, &item_len, NULL) != 0) { - netdev_err(dev, "PKG update area not created in nvram\n"); - return -ENOBUFS; - } - rc = request_firmware(&fw, filename, &dev->dev); - if (rc != 0) { + if (rc) { netdev_err(dev, "PKG error %d requesting file: %s\n", rc, filename); return rc; } - if (fw->size > item_len) { - netdev_err(dev, "PKG insufficient update area in nvram: %lu", - (unsigned long)fw->size); - rc = -EFBIG; - } else { - dma_addr_t dma_handle; - u8 *kmem; - struct hwrm_nvm_modify_input modify = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); - - modify.dir_idx = cpu_to_le16(index); - modify.len = cpu_to_le32(fw->size); - - kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size, - &dma_handle, GFP_KERNEL); - if (!kmem) { - netdev_err(dev, - "dma_alloc_coherent failure, length = %u\n", - (unsigned int)fw->size); - rc = -ENOMEM; - } else { - memcpy(kmem, fw->data, fw->size); - modify.host_src_addr = cpu_to_le64(dma_handle); - - rc = hwrm_send_message(bp, &modify, sizeof(modify), - FLASH_PACKAGE_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, fw->size, kmem, - dma_handle); - } - } - release_firmware(fw); + rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); if (rc) - goto err_exit; + return rc; + kmem = hwrm_req_dma_slice(bp, modify, fw->size, &dma_handle); + if (!kmem) { + hwrm_req_drop(bp, modify); + return -ENOMEM; + } + + rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); + if (rc) { + hwrm_req_drop(bp, modify); + return rc; + } + + hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT); + hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT); + + hwrm_req_hold(bp, modify); + modify->len = cpu_to_le32(fw->size); + modify->host_src_addr = cpu_to_le64(dma_handle); + memcpy(kmem, fw->data, fw->size); + + resp = hwrm_req_hold(bp, install); if ((install_type & 0xffff) == 0) install_type >>= 16; - bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); - install.install_type = cpu_to_le32(install_type); + install->install_type = cpu_to_le32(install_type); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) { - u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; - - if (resp->error_code && error_code == - NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { - install.flags |= cpu_to_le16( - NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); + do { + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + BNX_DIR_EXT_NONE, + &index, &item_len, NULL); + if (rc) { + netdev_err(dev, "PKG update area not created in nvram\n"); + break; } + if (fw->size > item_len) { + netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", + (unsigned long)fw->size); + rc = -EFBIG; + break; + } + + modify->dir_idx = cpu_to_le16(index); + + rc = hwrm_req_send(bp, modify); if (rc) - goto flash_pkg_exit; - } + break; + + rc = hwrm_req_send(bp, install); + + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == + NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { + install->flags |= + cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); + + rc = hwrm_req_send(bp, install); + + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == + NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { + /* FW has cleared NVM area, driver will create + * UPDATE directory and try the flash again + */ + defrag_attempted = true; + rc = bnxt_flash_nvram(bp->dev, + BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + 0, 0, item_len, NULL, 0); + } + } + } while (defrag_attempted && !rc); + + hwrm_req_drop(bp, modify); + hwrm_req_drop(bp, install); + release_firmware(fw); if (resp->result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", (s8)resp->result, (int)resp->problem_item); rc = -ENOPKG; } -flash_pkg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); -err_exit: + if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; @@ -2113,20 +3022,22 @@ static int bnxt_flash_device(struct net_device *dev, static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) { + struct hwrm_nvm_get_dir_info_output *output; + struct hwrm_nvm_get_dir_info_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_get_dir_info_input req = {0}; - struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { *entries = le32_to_cpu(output->entries); *length = le32_to_cpu(output->entry_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2145,14 +3056,14 @@ static int bnxt_get_eeprom_len(struct net_device *dev) static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) { + struct hwrm_nvm_get_dir_entries_input *req; struct bnxt *bp = netdev_priv(dev); - int rc; - u32 dir_entries; - u32 entry_length; - u8 *buf; - size_t buflen; dma_addr_t dma_handle; - struct hwrm_nvm_get_dir_entries_input req = {0}; + u32 entry_length; + u32 dir_entries; + size_t buflen; + u8 *buf; + int rc; rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); if (rc != 0) @@ -2167,20 +3078,23 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) len -= 2; memset(data, 0xff, len); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); + if (rc) + return rc; + buflen = dir_entries * entry_length; - buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, - GFP_KERNEL); + buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)buflen); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); - dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2188,31 +3102,34 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, u32 length, u8 *data) { struct bnxt *bp = netdev_priv(dev); - int rc; - u8 *buf; + struct hwrm_nvm_read_input *req; dma_addr_t dma_handle; - struct hwrm_nvm_read_input req = {0}; + u8 *buf; + int rc; if (!length) return -EINVAL; - buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, - GFP_KERNEL); + rc = hwrm_req_init(bp, req, HWRM_NVM_READ); + if (rc) + return rc; + + buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)length); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - req.dir_idx = cpu_to_le16(index); - req.offset = cpu_to_le32(offset); - req.len = cpu_to_le32(length); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + req->dir_idx = cpu_to_le16(index); + req->offset = cpu_to_le32(offset); + req->len = cpu_to_le32(length); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, length); - dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2220,20 +3137,23 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length) { + struct hwrm_nvm_find_dir_entry_output *output; + struct hwrm_nvm_find_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_find_dir_entry_input req = {0}; - struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); - req.enables = 0; - req.dir_idx = 0; - req.dir_type = cpu_to_le16(type); - req.dir_ordinal = cpu_to_le16(ordinal); - req.dir_ext = cpu_to_le16(ext); - req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); + if (rc) + return rc; + + req->enables = 0; + req->dir_idx = 0; + req->dir_type = cpu_to_le16(type); + req->dir_ordinal = cpu_to_le16(ordinal); + req->dir_ext = cpu_to_le16(ext); + req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc == 0) { if (index) *index = le16_to_cpu(output->dir_idx); @@ -2242,7 +3162,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, if (data_length) *data_length = le32_to_cpu(output->dir_data_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2318,8 +3238,16 @@ static int bnxt_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { - u32 index; - u32 offset; + struct hwrm_nvm_get_dev_info_output *dev_info_resp; + struct hwrm_nvm_get_dev_info_input *dev_info_req; + struct hwrm_fw_qstatus_output *fw_status_resp; + struct hwrm_fw_qstatus_input *fw_status_req; + struct bnxt *bp = netdev_priv(dev); + u32 size, index, offset; + int rc, i; + + if (eeprom->len < 1) + return -EINVAL; if (eeprom->offset == 0) /* special offset value to get directory */ return bnxt_get_nvram_directory(dev, eeprom->len, data); @@ -2327,22 +3255,66 @@ static int bnxt_get_eeprom(struct net_device *dev, index = eeprom->offset >> 24; offset = eeprom->offset & 0xffffff; - if (index == 0) { - netdev_err(dev, "unsupported index value: %d\n", index); - return -EINVAL; - } + if (index != 0) + return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, + data); - return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); + switch (offset) { + case 1: /* Query firmware reset status */ + if (eeprom->len < 5) + return -EINVAL; + size = 4; /* procs: BOOT, MGMT, NETCTRL, and ROCE */ + *(data++) = size; + rc = hwrm_req_init(bp, fw_status_req, HWRM_FW_QSTATUS); + if (rc) + return rc; + + fw_status_resp = hwrm_req_hold(bp, fw_status_req); + for (i = 0; i < size; i++) { + fw_status_req->embedded_proc_type = i; + rc = hwrm_req_send(bp, fw_status_req); + if (rc == 0) + *(data++) = fw_status_resp->selfrst_status; + else + break; + } + hwrm_req_drop(bp, fw_status_req); + return rc; + case 2: /* Query firmware version information */ + size = sizeof(bp->ver_resp); + *(data++) = size; + memcpy(data, &bp->ver_resp, min(size, eeprom->len - 1)); + return 0; + case 3: /* Query NVM device information */ + rc = hwrm_req_init(bp, dev_info_req, HWRM_NVM_GET_DEV_INFO); + if (rc) + return rc; + + dev_info_resp = hwrm_req_hold(bp, dev_info_req); + rc = hwrm_req_send(bp, dev_info_req); + if (rc == 0) { + size = sizeof(*dev_info_resp); + *(data++) = size; + memcpy(data, dev_info_resp, min(size, eeprom->len - 1)); + } + hwrm_req_drop(bp, dev_info_req); + return rc; + } + return -EINVAL; } static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) { + struct hwrm_nvm_erase_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); - struct hwrm_nvm_erase_dir_entry_input req = {0}; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); - req.dir_idx = cpu_to_le16(index); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); + if (rc) + return rc; + + req->dir_idx = cpu_to_le16(index); + return hwrm_req_send(bp, req); } static int bnxt_set_eeprom(struct net_device *dev, @@ -2376,16 +3348,19 @@ static int bnxt_set_eeprom(struct net_device *dev, } /* Create or re-write an NVM item: */ - if (bnxt_dir_type_is_executable(type) == true) + if (bnxt_dir_type_is_executable(type)) return -EOPNOTSUPP; ext = eeprom->magic & 0xffff; ordinal = eeprom->offset >> 16; attr = eeprom->offset & 0xffff; - return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, + return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, eeprom->len); } +#endif + +#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT) static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) { struct bnxt *bp = netdev_priv(dev); @@ -2395,7 +3370,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); int rc = 0; - if (!BNXT_SINGLE_PF(bp)) + if (!BNXT_PHY_CFG_ABLE(bp)) return -EOPNOTSUPP; if (!(bp->flags & BNXT_FLAG_EEE_CAP)) @@ -2459,36 +3434,40 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } +#endif +#if defined(ETHTOOL_GMODULEEEPROM) && !defined(GET_ETHTOOL_OP_EXT) static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, u16 page_number, u16 start_addr, u16 data_length, u8 *buf) { - struct hwrm_port_phy_i2c_read_input req = {0}; - struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_i2c_read_output *output; + struct hwrm_port_phy_i2c_read_input *req; int rc, byte_offset = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); - req.i2c_slave_addr = i2c_addr; - req.page_number = cpu_to_le16(page_number); - req.port_id = cpu_to_le16(bp->pf.port_id); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); + if (rc) + return rc; + + output = hwrm_req_hold(bp, req); + req->i2c_slave_addr = i2c_addr; + req->page_number = cpu_to_le16(page_number); + req->port_id = cpu_to_le16(bp->pf.port_id); do { u16 xfer_size; xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); data_length -= xfer_size; - req.page_offset = cpu_to_le16(start_addr + byte_offset); - req.data_length = xfer_size; - req.enables = cpu_to_le32(start_addr + byte_offset ? + req->page_offset = cpu_to_le16(start_addr + byte_offset); + req->data_length = xfer_size; + req->enables = cpu_to_le32(start_addr + byte_offset ? PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc) memcpy(buf + byte_offset, output->data, xfer_size); - mutex_unlock(&bp->hwrm_cmd_lock); byte_offset += xfer_size; } while (!rc && data_length > 0); + hwrm_req_drop(bp, req); return rc; } @@ -2562,7 +3541,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev, if (rc) return rc; start += length; - data += length; + data += start; length = eeprom->len - length; } @@ -2574,6 +3553,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev, } return rc; } +#endif static int bnxt_nway_reset(struct net_device *dev) { @@ -2582,7 +3562,7 @@ static int bnxt_nway_reset(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - if (!BNXT_SINGLE_PF(bp)) + if (!BNXT_PHY_CFG_ABLE(bp)) return -EOPNOTSUPP; if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) @@ -2594,17 +3574,68 @@ static int bnxt_nway_reset(struct net_device *dev) return rc; } -static int bnxt_set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) +#if (LINUX_VERSION_CODE < 0x30000) +static int bnxt_phys_id(struct net_device *dev, u32 data) { - struct hwrm_port_led_cfg_input req = {0}; + struct hwrm_port_led_cfg_input *req; struct bnxt *bp = netdev_priv(dev); struct bnxt_pf_info *pf = &bp->pf; struct bnxt_led_cfg *led_cfg; u8 led_state; - __le16 duration; + u16 duration; int i, rc; + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + if (!data) + data = 2; + + led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; + duration = 500; + + while (1) { + /* reinit, don't hold onto the HWRM resources during delay */ + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + req->num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req->led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req->enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = cpu_to_le16(duration); + led_cfg->led_blink_off = cpu_to_le16(duration); + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + rc = hwrm_req_send(bp, req); + + if (!duration || rc) + break; + + msleep_interruptible(data * 1000); + led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; + duration = 0; + } + return rc; +} + +#else +#if defined(HAVE_SET_PHYS_ID) && !defined(GET_ETHTOOL_OP_EXT) +static int bnxt_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct hwrm_port_led_cfg_input *req; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_led_cfg *led_cfg; + __le16 duration; + u8 led_state; + int rc, i; + if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; @@ -2617,28 +3648,38 @@ static int bnxt_set_phys_id(struct net_device *dev, } else { return -EINVAL; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.num_leds = bp->num_leds; - led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + req->num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req->led0_id; for (i = 0; i < bp->num_leds; i++, led_cfg++) { - req.enables |= BNXT_LED_DFLT_ENABLES(i); + req->enables |= BNXT_LED_DFLT_ENABLES(i); led_cfg->led_id = bp->leds[i].led_id; led_cfg->led_state = led_state; led_cfg->led_blink_on = duration; led_cfg->led_blink_off = duration; led_cfg->led_group_id = bp->leds[i].led_group_id; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + return hwrm_req_send(bp, req); } +#endif +#endif static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) { - struct hwrm_selftest_irq_input req = {0}; + struct hwrm_selftest_irq_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); + if (rc) + return rc; + + req->cmpl_ring = cpu_to_le16(cmpl_ring); + return hwrm_req_send(bp, req); } static int bnxt_test_irq(struct bnxt *bp) @@ -2658,31 +3699,37 @@ static int bnxt_test_irq(struct bnxt *bp) static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) { - struct hwrm_port_mac_cfg_input req = {0}; + struct hwrm_port_mac_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); if (enable) - req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; else - req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return hwrm_req_send(bp, req); } static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) { - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_qcaps_input req = {0}; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2694,7 +3741,8 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, u16 fw_speed; int rc; - if (!link_info->autoneg) + if (!link_info->autoneg || + (bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK)) return 0; rc = bnxt_query_force_speeds(bp, &fw_advertising); @@ -2702,7 +3750,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, return rc; fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; - if (netif_carrier_ok(bp->dev)) + if (bp->link_info.link_up) fw_speed = bp->link_info.link_speed; else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; @@ -2716,7 +3764,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, req->force_link_speed = cpu_to_le16(fw_speed); req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); - rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); req->flags = 0; req->force_link_speed = cpu_to_le16(0); return rc; @@ -2724,21 +3772,29 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ + hwrm_req_hold(bp, req); if (enable) { - bnxt_disable_an_for_lpbk(bp, &req); + bnxt_disable_an_for_lpbk(bp, req); if (ext) - req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; else - req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; } else { - req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; } - req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); + rc = hwrm_req_send(bp, req); + hwrm_req_drop(bp, req); + return rc; } static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -2856,17 +3912,21 @@ static int bnxt_run_loopback(struct bnxt *bp) static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) { - struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_exec_input req = {0}; + struct hwrm_selftest_exec_output *resp; + struct hwrm_selftest_exec_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - resp->test_success = 0; - req.flags = test_mask; - rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, bp->test_info->timeout); + req->flags = test_mask; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); *test_results = resp->test_success; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2971,74 +4031,151 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, } } +#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT) +static int bnxt_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ptp_cfg *ptp; + + ptp = bp->ptp_cfg; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + info->phc_index = -1; + if (!ptp) + return 0; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + if (ptp->ptp_clock) + info->phc_index = ptp_clock_index(ptp->ptp_clock); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + return 0; +} +#endif + +#if defined(ETHTOOL_RESET) && !defined(GET_ETHTOOL_OP_EXT) +static int bnxt_hwrm_crashdump_erase(struct net_device *dev, u8 scope) +{ + struct hwrm_dbg_crashdump_erase_input *req; + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_ERASE); + if (rc) + return rc; + + req->scope = scope; + return hwrm_req_send(bp, req); +} + static int bnxt_reset(struct net_device *dev, u32 *flags) { struct bnxt *bp = netdev_priv(dev); - int rc = 0; + bool reload = false; + u32 req = *flags; + + if (!req) + return -EINVAL; if (!BNXT_PF(bp)) { netdev_err(dev, "Reset is not supported from a VF\n"); return -EOPNOTSUPP; } - if (pci_vfs_assigned(bp->pdev)) { + if (req & BNXT_FW_RESET_CRASHDUMP) { + if (bp->fw_cap & BNXT_FW_CAP_CRASHDUMP) { + u8 scope = DBG_CRASHDUMP_ERASE_REQ_SCOPE_INVALIDATE; + + if (!bnxt_hwrm_crashdump_erase(dev, scope)) { + netdev_info(dev, "Crashdump data erased.\n"); + *flags &= ~BNXT_FW_RESET_CRASHDUMP; + if (!*flags) + return 0; /* done, skip VF check */ + } + } else if (req == BNXT_FW_RESET_CRASHDUMP) { + return -EOPNOTSUPP; /* only request, fail hard */ + } + } + + if (pci_vfs_assigned(bp->pdev) && + !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { netdev_err(dev, "Reset not allowed when VFs are assigned to VMs\n"); return -EBUSY; } - if (*flags == ETH_RESET_ALL) { + if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { /* This feature is not supported in older firmware versions */ - if (bp->hwrm_spec_code < 0x10803) - return -EOPNOTSUPP; - - rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); - if (!rc) { - netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); - *flags = 0; + if (bp->hwrm_spec_code >= 0x10803) { + if (!bnxt_firmware_reset_chip(dev)) { + netdev_info(dev, "Firmware reset request successful.\n"); + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) + reload = true; + *flags &= ~BNXT_FW_RESET_CHIP; + } + } else if (req == BNXT_FW_RESET_CHIP) { + return -EOPNOTSUPP; /* only request, fail hard */ } - } else if (*flags == ETH_RESET_AP) { - /* This feature is not supported in older firmware versions */ - if (bp->hwrm_spec_code < 0x10803) - return -EOPNOTSUPP; - - rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); - if (!rc) { - netdev_info(dev, "Reset Application Processor request successful.\n"); - *flags = 0; - } - } else { - rc = -EINVAL; } - return rc; -} + if (req & BNXT_FW_RESET_AP) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code >= 0x10803) { + if (!bnxt_firmware_reset_ap(dev)) { + netdev_info(dev, "Reset application processor successful.\n"); + reload = true; + *flags &= ~BNXT_FW_RESET_AP; + } + } else if (req == BNXT_FW_RESET_AP) { + return -EOPNOTSUPP; /* only request, fail hard */ + } + } -static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, + if (reload) + netdev_info(dev, "Reload driver to complete reset\n"); + + return 0; +} +#endif + +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, struct bnxt_hwrm_dbg_dma_info *info) { - struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; struct hwrm_dbg_cmn_input *cmn_req = msg; __le16 *seq_ptr = msg + info->seq_off; + struct hwrm_dbg_cmn_output *cmn_resp; u16 seq = 0, len, segs_off; - void *resp = cmn_resp; dma_addr_t dma_handle; + void *dma_buf, *resp; int rc, off = 0; - void *dma_buf; - dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, - GFP_KERNEL); - if (!dma_buf) + dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); + if (!dma_buf) { + hwrm_req_drop(bp, msg); return -ENOMEM; + } + + hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); + cmn_resp = hwrm_req_hold(bp, msg); + resp = cmn_resp; segs_off = offsetof(struct hwrm_dbg_coredump_list_output, total_segments); cmn_req->host_dest_addr = cpu_to_le64(dma_handle); cmn_req->host_buf_len = cpu_to_le32(info->dma_len); - mutex_lock(&bp->hwrm_cmd_lock); while (1) { *seq_ptr = cpu_to_le16(seq); - rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, msg); if (rc) break; @@ -3082,26 +4219,27 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, seq++; off += len; } - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); + hwrm_req_drop(bp, msg); return rc; } static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, struct bnxt_coredump *coredump) { - struct hwrm_dbg_coredump_list_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; + struct hwrm_dbg_coredump_list_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); + if (rc) + return rc; info.dma_len = COREDUMP_LIST_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, data_len); - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) { coredump->data = info.dest_buf; coredump->data_size = info.dest_buf_size; @@ -3113,26 +4251,34 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, u16 segment_id) { - struct hwrm_dbg_coredump_initiate_input req = {0}; + struct hwrm_dbg_coredump_initiate_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); + if (rc) + return rc; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT); + hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); + + return hwrm_req_send(bp, req); } static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, void *buf, u32 buf_len, u32 offset) { - struct hwrm_dbg_coredump_retrieve_input req = {0}; + struct hwrm_dbg_coredump_retrieve_input *req; struct bnxt_hwrm_dbg_dma_info info = {NULL}; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); + if (rc) + return rc; + + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, @@ -3145,7 +4291,7 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, info.seg_start = offset; } - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) *seg_len = info.dest_buf_size; @@ -3165,6 +4311,7 @@ bnxt_fill_coredump_seg_hdr(struct bnxt *bp, seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; seg_hdr->low_version = seg_rec->version_low; seg_hdr->high_version = seg_rec->version_hi; + seg_hdr->flags = seg_rec->compress_flags; } else { /* For hwrm_ver_get response Component id = 2 * and Segment id = 0 @@ -3180,16 +4327,34 @@ bnxt_fill_coredump_seg_hdr(struct bnxt *bp, seg_hdr->instance = cpu_to_le32(instance); } +static struct bnxt_time +bnxt_get_current_time(struct bnxt *bp) +{ + struct bnxt_time time; +#if defined(HAVE_TIME64) + time64_t now = ktime_get_real_seconds(); + + time64_to_tm(now, 0, &time.tm); +#elif defined(CONFIG_RTC_LIB) || defined(CONFIG_RTC_LIB_MODULE) + struct timeval tv; + + do_gettimeofday(&tv); + rtc_time_to_tm(tv.tv_sec, &time.tm); +#endif + time.tm.tm_mon += 1; + time.tm.tm_year += 1900; + + return time; +} + static void bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, - time64_t start, s16 start_utc, u16 total_segs, + struct bnxt_time start, s16 start_utc, u16 total_segs, int status) { - time64_t end = ktime_get_real_seconds(); + struct bnxt_time end = bnxt_get_current_time(bp); u32 os_ver_major = 0, os_ver_minor = 0; - struct tm tm; - time64_to_tm(start, 0, &tm); memset(record, 0, sizeof(*record)); memcpy(record->signature, "cOrE", 4); record->flags = 0; @@ -3198,12 +4363,12 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->asic_state = 0; strlcpy(record->system_name, utsname()->nodename, sizeof(record->system_name)); - record->year = cpu_to_le16(tm.tm_year + 1900); - record->month = cpu_to_le16(tm.tm_mon + 1); - record->day = cpu_to_le16(tm.tm_mday); - record->hour = cpu_to_le16(tm.tm_hour); - record->minute = cpu_to_le16(tm.tm_min); - record->second = cpu_to_le16(tm.tm_sec); + record->year = cpu_to_le16(start.tm.tm_year); + record->month = cpu_to_le16(start.tm.tm_mon); + record->day = cpu_to_le16(start.tm.tm_mday); + record->hour = cpu_to_le16(start.tm.tm_hour); + record->minute = cpu_to_le16(start.tm.tm_min); + record->second = cpu_to_le16(start.tm.tm_sec); record->utc_bias = cpu_to_le16(start_utc); strcpy(record->commandline, "ethtool -w"); record->total_segments = cpu_to_le32(total_segs); @@ -3213,13 +4378,12 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->os_ver_minor = cpu_to_le32(os_ver_minor); strlcpy(record->os_name, utsname()->sysname, 32); - time64_to_tm(end, 0, &tm); - record->end_year = cpu_to_le16(tm.tm_year + 1900); - record->end_month = cpu_to_le16(tm.tm_mon + 1); - record->end_day = cpu_to_le16(tm.tm_mday); - record->end_hour = cpu_to_le16(tm.tm_hour); - record->end_minute = cpu_to_le16(tm.tm_min); - record->end_second = cpu_to_le16(tm.tm_sec); + record->end_year = cpu_to_le16(end.tm.tm_year); + record->end_month = cpu_to_le16(end.tm.tm_mon); + record->end_day = cpu_to_le16(end.tm.tm_mday); + record->end_hour = cpu_to_le16(end.tm.tm_hour); + record->end_minute = cpu_to_le16(end.tm.tm_min); + record->end_second = cpu_to_le16(end.tm.tm_sec); record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | bp->ver_resp.chip_rev << 8 | @@ -3237,14 +4401,14 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) struct coredump_segment_record *seg_record = NULL; struct bnxt_coredump_segment_hdr seg_hdr; struct bnxt_coredump coredump = {NULL}; - time64_t start_time; + struct bnxt_time start_time; u16 start_utc; int rc = 0, i; if (buf) buf_len = *dump_len; - start_time = ktime_get_real_seconds(); + start_time = bnxt_get_current_time(bp); start_utc = sys_tz.tz_minuteswest * 60; seg_hdr_len = sizeof(seg_hdr); @@ -3329,10 +4493,28 @@ err: kfree(coredump.data); *dump_len += sizeof(struct bnxt_coredump_record); if (rc == -ENOBUFS) - netdev_err(bp->dev, "Firmware returned large coredump buffer"); + netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); return rc; } +static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) +{ + struct bnxt *bp = netdev_priv(dev); + + if (dump->flag > BNXT_DUMP_CRASH) { + netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n"); + return -EINVAL; + } + + if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) { + netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); + return -EOPNOTSUPP; + } + + bp->dump_flag = dump->flag; + return 0; +} + static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) { struct bnxt *bp = netdev_priv(dev); @@ -3345,7 +4527,12 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) bp->ver_resp.hwrm_fw_bld_8b << 8 | bp->ver_resp.hwrm_fw_rsvd_8b; - return bnxt_get_coredump(bp, NULL, &dump->len); + dump->flag = bp->dump_flag; + if (bp->dump_flag == BNXT_DUMP_CRASH) + dump->len = BNXT_CRASH_DUMP_LEN; + else + bnxt_get_coredump(bp, NULL, &dump->len); + return 0; } static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, @@ -3358,13 +4545,23 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, memset(buf, 0, dump->len); - return bnxt_get_coredump(bp, buf, &dump->len); + dump->flag = bp->dump_flag; + if (dump->flag == BNXT_DUMP_CRASH) { +#ifdef CONFIG_TEE_BNXT_FW + return tee_bnxt_copy_coredump(buf, 0, dump->len); +#endif + } else { + return bnxt_get_coredump(bp, buf, &dump->len); + } + + return 0; } +#endif /* ETHTOOL_GET_DUMP_FLAG */ void bnxt_ethtool_init(struct bnxt *bp) { - struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_qlist_input req = {0}; + struct hwrm_selftest_qlist_output *resp; + struct hwrm_selftest_qlist_input *req; struct bnxt_test_info *test_info; struct net_device *dev = bp->dev; int i, rc; @@ -3376,19 +4573,22 @@ void bnxt_ethtool_init(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + test_info = bp->test_info; + if (!test_info) { + test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); + if (!test_info) + return; + bp->test_info = test_info; + } + + if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto ethtool_init_exit; - test_info = bp->test_info; - if (!test_info) - test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); - if (!test_info) - goto ethtool_init_exit; - - bp->test_info = test_info; bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; if (bp->num_tests > BNXT_MAX_TEST) bp->num_tests = BNXT_MAX_TEST; @@ -3422,7 +4622,7 @@ void bnxt_ethtool_init(struct bnxt *bp) } ethtool_init_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } void bnxt_ethtool_free(struct bnxt *bp) @@ -3432,11 +4632,26 @@ void bnxt_ethtool_free(struct bnxt *bp) } const struct ethtool_ops bnxt_ethtool_ops = { +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_STATS_BLOCK_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, +#endif +#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, +#else + .get_settings = bnxt_get_settings, + .set_settings = bnxt_set_settings, +#endif .get_pauseparam = bnxt_get_pauseparam, .set_pauseparam = bnxt_set_pauseparam, .get_drvinfo = bnxt_get_drvinfo, + .get_regs_len = bnxt_get_regs_len, + .get_regs = bnxt_get_regs, .get_wol = bnxt_get_wol, .set_wol = bnxt_set_wol, .get_coalesce = bnxt_get_coalesce, @@ -3448,26 +4663,57 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_ethtool_stats = bnxt_get_ethtool_stats, .set_ringparam = bnxt_set_ringparam, .get_ringparam = bnxt_get_ringparam, +#if defined(ETHTOOL_GCHANNELS) && !defined(GET_ETHTOOL_OP_EXT) .get_channels = bnxt_get_channels, .set_channels = bnxt_set_channels, +#endif +#ifdef HAVE_RXNFC .get_rxnfc = bnxt_get_rxnfc, .set_rxnfc = bnxt_set_rxnfc, +#endif +#if defined(HAVE_RXFH_INDIR_SIZE) && !defined(GET_ETHTOOL_OP_EXT) .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, +#endif +#if defined(HAVE_GET_RXFH_KEY_SIZE) && !defined(GET_ETHTOOL_OP_EXT) .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, +#endif +#if defined(HAVE_SET_RXFH) && defined(ETH_RSS_HASH_TOP) && !defined(GET_ETHTOOL_OP_EXT) + .set_rxfh = bnxt_set_rxfh, +#endif +#ifdef CONFIG_BNXT_FLASHDEV .flash_device = bnxt_flash_device, .get_eeprom_len = bnxt_get_eeprom_len, .get_eeprom = bnxt_get_eeprom, .set_eeprom = bnxt_set_eeprom, +#endif .get_link = bnxt_get_link, +#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT) .get_eee = bnxt_get_eee, .set_eee = bnxt_set_eee, +#endif +#if defined(ETHTOOL_GMODULEEEPROM) && !defined(GET_ETHTOOL_OP_EXT) .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, +#endif .nway_reset = bnxt_nway_reset, +#if (LINUX_VERSION_CODE < 0x30000) + .phys_id = bnxt_phys_id, +#else +#if defined(HAVE_SET_PHYS_ID) && !defined(GET_ETHTOOL_OP_EXT) .set_phys_id = bnxt_set_phys_id, +#endif +#endif .self_test = bnxt_self_test, +#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT) + .get_ts_info = bnxt_get_ts_info, +#endif +#if defined(ETHTOOL_RESET) && !defined(GET_ETHTOOL_OP_EXT) .reset = bnxt_reset, +#endif +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) + .set_dump = bnxt_set_dump, .get_dump_flag = bnxt_get_dump_flag, .get_dump_data = bnxt_get_dump_data, +#endif }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 3998f6e809a9..694403f13c2d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -11,6 +11,11 @@ #ifndef BNXT_ETHTOOL_H #define BNXT_ETHTOOL_H +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) +#include <linux/time.h> +#include <linux/rtc.h> +#endif + struct bnxt_led_cfg { u8 led_id; u8 led_state; @@ -22,6 +27,7 @@ struct bnxt_led_cfg { u8 rsvd; }; +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) #define COREDUMP_LIST_BUF_LEN 2048 #define COREDUMP_RETRIEVE_BUF_LEN 4096 @@ -31,6 +37,14 @@ struct bnxt_coredump { u16 total_segs; }; +struct bnxt_time { +#if defined(HAVE_TIME64) + struct tm tm; +#elif defined(CONFIG_RTC_LIB) || defined(CONFIG_RTC_LIB_MODULE) + struct rtc_time tm; +#endif +}; + #define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) struct bnxt_hwrm_dbg_dma_info { @@ -63,6 +77,9 @@ struct hwrm_dbg_cmn_output { #define HWRM_DBG_CMN_FLAGS_MORE 1 }; +#define BNXT_CRASH_DUMP_LEN (8 << 20) +#endif /* defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) */ + #define BNXT_LED_DFLT_ENA \ (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ @@ -75,14 +92,23 @@ struct hwrm_dbg_cmn_output { #define BNXT_LED_DFLT_ENABLES(x) \ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) -#define BNXT_FW_RESET_AP 0xfffe -#define BNXT_FW_RESET_CHIP 0xffff +#define BNXT_FW_RESET_CRASHDUMP (ETH_RESET_CRASHDUMP << ETH_RESET_SHARED_SHIFT) +#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT) +#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \ + ETH_RESET_DMA | ETH_RESET_FILTER | \ + ETH_RESET_OFFLOAD | ETH_RESET_MAC | \ + ETH_RESET_PHY | ETH_RESET_RAM) \ + << ETH_RESET_SHARED_SHIFT) + +#define BNXT_PXP_REG_LEN 0x3110 extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 bnxt_fw_to_ethtool_speed(u16); u16 bnxt_get_fw_auto_link_speeds(u32); +int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, + u32 install_type); void bnxt_ethtool_init(struct bnxt *bp); void bnxt_ethtool_free(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_extra_ver.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_extra_ver.h new file mode 100644 index 000000000000..d120d32f730f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_extra_ver.h @@ -0,0 +1,15 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_EXTRA_VER_H +#define BNXT_EXTRA_VER_H + +#define DRV_MODULE_EXTRA_VER "-216.1.192.4" + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h index cad30ddc6936..b94d804c2adb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 03b197eb793b..50dfc9893961 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2019 Broadcom Inc. + * Copyright (c) 2018-2020 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -169,13 +169,21 @@ struct cmd_nums { #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL #define HWRM_RING_AGGINT_QCAPS 0x54UL + #define HWRM_RING_SQ_ALLOC 0x55UL + #define HWRM_RING_SQ_CFG 0x56UL + #define HWRM_RING_SQ_FREE 0x57UL #define HWRM_RING_RESET 0x5eUL #define HWRM_RING_GRP_ALLOC 0x60UL #define HWRM_RING_GRP_FREE 0x61UL + #define HWRM_RING_CFG 0x62UL + #define HWRM_RING_QCFG 0x63UL #define HWRM_RESERVED5 0x64UL #define HWRM_RESERVED6 0x65UL #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_QUEUE_MPLS_QCAPS 0x80UL + #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL + #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL #define HWRM_CFA_L2_FILTER_FREE 0x91UL #define HWRM_CFA_L2_FILTER_CFG 0x92UL @@ -204,11 +212,13 @@ struct cmd_nums { #define HWRM_PORT_PHY_MDIO_READ 0xb6UL #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL + #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL + #define HWRM_PORT_ECN_QSTATS 0xbaUL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_HEALTH_CHECK 0xc2UL #define HWRM_FW_SYNC 0xc3UL - #define HWRM_FW_STATE_BUFFER_QCAPS 0xc4UL + #define HWRM_FW_STATE_QCAPS 0xc4UL #define HWRM_FW_STATE_QUIESCE 0xc5UL #define HWRM_FW_STATE_BACKUP 0xc6UL #define HWRM_FW_STATE_RESTORE 0xc7UL @@ -217,6 +227,8 @@ struct cmd_nums { #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL #define HWRM_FW_IPC_MAILBOX 0xccUL + #define HWRM_FW_ECN_CFG 0xcdUL + #define HWRM_FW_ECN_QCFG 0xceUL #define HWRM_EXEC_FWD_RESP 0xd0UL #define HWRM_REJECT_FWD_RESP 0xd1UL #define HWRM_FWD_RESP 0xd2UL @@ -225,8 +237,12 @@ struct cmd_nums { #define HWRM_PORT_PRBS_TEST 0xd5UL #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL + #define HWRM_FW_STATE_UNQUIESCE 0xd8UL + #define HWRM_PORT_DSC_DUMP 0xd9UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_REG_POWER_QUERY 0xe1UL + #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL + #define HWRM_REG_POWER_HISTOGRAM 0xe3UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL #define HWRM_WOL_FILTER_QCFG 0xf2UL @@ -308,6 +324,7 @@ struct cmd_nums { #define HWRM_ENGINE_STATS_CONFIG 0x155UL #define HWRM_ENGINE_STATS_CLEAR 0x156UL #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL #define HWRM_ENGINE_RQ_ALLOC 0x15eUL #define HWRM_ENGINE_RQ_FREE 0x15fUL #define HWRM_ENGINE_CQ_ALLOC 0x160UL @@ -324,6 +341,8 @@ struct cmd_nums { #define HWRM_FUNC_VF_BW_CFG 0x195UL #define HWRM_FUNC_VF_BW_QCFG 0x196UL #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL + #define HWRM_FUNC_QSTATS_EXT 0x198UL + #define HWRM_STAT_EXT_CTX_QUERY 0x199UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -334,6 +353,37 @@ struct cmd_nums { #define HWRM_MFG_OTP_CFG 0x207UL #define HWRM_MFG_OTP_QCFG 0x208UL #define HWRM_MFG_HDMA_TEST 0x209UL + #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL + #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL + #define HWRM_TF 0x2bcUL + #define HWRM_TF_VERSION_GET 0x2bdUL + #define HWRM_TF_SESSION_OPEN 0x2c6UL + #define HWRM_TF_SESSION_ATTACH 0x2c7UL + #define HWRM_TF_SESSION_REGISTER 0x2c8UL + #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL + #define HWRM_TF_SESSION_CLOSE 0x2caUL + #define HWRM_TF_SESSION_QCFG 0x2cbUL + #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL + #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL + #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL + #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL + #define HWRM_TF_TBL_TYPE_GET 0x2daUL + #define HWRM_TF_TBL_TYPE_SET 0x2dbUL + #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL + #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL + #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL + #define HWRM_TF_EXT_EM_OP 0x2e7UL + #define HWRM_TF_EXT_EM_CFG 0x2e8UL + #define HWRM_TF_EXT_EM_QCFG 0x2e9UL + #define HWRM_TF_EM_INSERT 0x2eaUL + #define HWRM_TF_EM_DELETE 0x2ebUL + #define HWRM_TF_TCAM_SET 0x2f8UL + #define HWRM_TF_TCAM_GET 0x2f9UL + #define HWRM_TF_TCAM_MOVE 0x2faUL + #define HWRM_TF_TCAM_FREE 0x2fbUL + #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL + #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL + #define HWRM_SV 0x400UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL #define HWRM_DBG_WRITE_DIRECT 0xff12UL @@ -349,6 +399,11 @@ struct cmd_nums { #define HWRM_DBG_RING_INFO_GET 0xff1cUL #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL + #define HWRM_DBG_DRV_TRACE 0xff1fUL + #define HWRM_DBG_QCAPS 0xff20UL + #define HWRM_DBG_QCFG 0xff21UL + #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL + #define HWRM_NVM_REQ_ARBITRATION 0xffedUL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL #define HWRM_NVM_VALIDATE_OPTION 0xffefUL #define HWRM_NVM_FLUSH 0xfff0UL @@ -390,6 +445,7 @@ struct ret_codes { #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_BUSY 0x10UL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL @@ -420,9 +476,9 @@ struct hwrm_err_output { #define HWRM_TARGET_ID_TOOLS 0xFFFD #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 -#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 100 -#define HWRM_VERSION_STR "1.10.0.100" +#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_RSVD 49 +#define HWRM_VERSION_STR "1.10.1.49" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -474,6 +530,7 @@ struct hwrm_ver_get_output { #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL u8 roce_fw_maj_8b; u8 roce_fw_min_8b; u8 roce_fw_bld_8b; @@ -637,6 +694,9 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -680,6 +740,97 @@ struct hwrm_async_event_cmpl_link_status_change { #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20 }; +/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_mtu_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +}; + +/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_dcb_config_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE +}; + /* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ struct hwrm_async_event_cmpl_port_conn_not_allowed { __le16 type; @@ -709,6 +860,28 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed { #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN }; +/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + /* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_link_speed_cfg_change { __le16 type; @@ -733,6 +906,31 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change { #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; +/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_phy_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE 0x20000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE 0x40000UL +}; + /* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ struct hwrm_async_event_cmpl_reset_notify { __le16 type; @@ -790,6 +988,187 @@ struct hwrm_async_event_cmpl_error_recovery { #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL }; +/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_flr_proc_cmplt { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_flr { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL +}; + /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { __le16 type; @@ -815,6 +1194,37 @@ struct hwrm_async_event_cmpl_vf_cfg_change { #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL }; +/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_llfc_pfc_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK 0x3UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC 0x2UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK 0x1cUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT 2 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK 0x1fffe0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT 5 +}; + /* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ struct hwrm_async_event_cmpl_default_vnic_change { __le16 type; @@ -914,6 +1324,248 @@ struct hwrm_async_event_cmpl_eem_cache_flush_done { #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0 }; +/* hwrm_async_event_cmpl_tcp_flag_action_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_tcp_flag_action_change { + __le16 type; + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_TCP_FLAG_ACTION_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_eem_flow_active (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_flow_active { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_EEM_FLOW_ACTIVE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_GLOBAL_ID_2_MASK 0x3fffffffUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_GLOBAL_ID_2_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION 0x40000000UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_RX (0x0UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_TX (0x1UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_TX + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_GLOBAL_ID_1_MASK 0x3fffffffUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_GLOBAL_ID_1_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION 0x40000000UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_TX + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE 0x80000000UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_0 (0x0UL << 31) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_1 (0x1UL << 31) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_1 +}; + +/* hwrm_async_event_cmpl_eem_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_EEM_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_DATA1_EEM_TX_ENABLE 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_DATA1_EEM_RX_ENABLE 0x2UL +}; + +/* hwrm_async_event_cmpl_quiesce_done (size:128b/16B) */ +struct hwrm_async_event_cmpl_quiesce_done { + __le16 type; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_LAST ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_QUIESCE_DONE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_SFT 0 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_SUCCESS 0x0UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_TIMEOUT 0x1UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_ERROR 0x2UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_LAST ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_ERROR + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_OPAQUE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_OPAQUE_SFT 8 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_SFT 16 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_INCOMPLETE_NQ 0x10000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_1 0x20000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_2 0x40000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_3 0x80000UL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_V 0x1UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */ +struct hwrm_async_event_cmpl_deferred_response { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_pfc_watchdog_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pfc_watchdog_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_SFT 0 + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS0 0x1UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS1 0x2UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS2 0x4UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS3 0x8UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS4 0x10UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS5 0x20UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS6 0x40UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS7 0x80UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff00UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 8 +}; + +/* hwrm_async_event_cmpl_fw_trace_msg (size:128b/16B) */ +struct hwrm_async_event_cmpl_fw_trace_msg { + __le16 type; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_FW_TRACE_MSG + __le32 event_data2; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE0_MASK 0xffUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE0_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE1_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE1_SFT 8 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE2_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE2_SFT 16 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE3_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE3_SFT 24 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_V 0x1UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_OPAQUE_SFT 1 + u8 timestamp_lo; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING 0x1UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_COMPLETE 0x0UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_PARTIAL 0x1UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_PARTIAL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE 0x2UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_PRIMARY (0x0UL << 1) + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_SECONDARY (0x1UL << 1) + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_SECONDARY + __le16 timestamp_hi; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE4_MASK 0xffUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE4_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE5_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE5_SFT 8 + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE6_MASK 0xffUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE6_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE7_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE7_SFT 8 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE8_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE8_SFT 16 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_SFT 24 +}; + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + __le16 type; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + __le32 event_data2; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL +}; + /* hwrm_func_reset_input (size:192b/24B) */ struct hwrm_func_reset_input { __le16 req_type; @@ -1047,6 +1699,8 @@ struct hwrm_func_vf_cfg_input { #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL __le16 num_rsscos_ctxs; __le16 num_cmpl_rings; __le16 num_tx_rings; @@ -1079,7 +1733,7 @@ struct hwrm_func_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_qcaps_output (size:640b/80B) */ +/* hwrm_func_qcaps_output (size:704b/88B) */ struct hwrm_func_qcaps_output { __le16 error_code; __le16 req_type; @@ -1115,6 +1769,11 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1135,7 +1794,18 @@ struct hwrm_func_qcaps_output { __le32 max_flow_id; __le32 max_hw_ring_grps; __le16 max_sp_tx_rings; - u8 unused_0; + u8 unused_0[2]; + __le32 flags_ext; + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SQ_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL + u8 max_sqs; + u8 unused_1[2]; u8 valid; }; @@ -1150,7 +1820,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:704b/88B) */ +/* hwrm_func_qcfg_output (size:768b/96B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -1169,6 +1839,8 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL + #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL + #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1255,11 +1927,16 @@ struct hwrm_func_qcfg_output { u8 unused_1; u8 always_1; __le32 reset_addr_poll; - u8 unused_2[3]; + __le16 legacy_l2_db_size_kb; + __le16 svif_info; + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0 + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL + u8 unused_2[7]; u8 valid; }; -/* hwrm_func_cfg_input (size:704b/88B) */ +/* hwrm_func_cfg_input (size:768b/96B) */ struct hwrm_func_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1290,30 +1967,35 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL + #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL __le32 enables; - #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL - #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL - #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL - #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL - #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL - #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL - #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL - #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL - #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL - #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL - #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL - #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL - #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL - #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL - #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL - #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL - #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL - #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL - #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL - #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL - #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL - #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL - #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL + #define FUNC_CFG_REQ_ENABLES_SQ_ID 0x1000000UL __le16 mtu; __le16 mru; __le16 num_rsscos_ctxs; @@ -1387,6 +2069,8 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 __le16 num_mcast_filters; + __le16 sq_id; + u8 unused_0[6]; }; /* hwrm_func_cfg_output (size:128b/16B) */ @@ -1408,9 +2092,10 @@ struct hwrm_func_qstats_input { __le64 resp_addr; __le16 fid; u8 flags; - #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL - #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL - #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY + #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL + #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL + #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK u8 unused_0[5]; }; @@ -1444,6 +2129,58 @@ struct hwrm_func_qstats_output { u8 valid; }; +/* hwrm_func_qstats_ext_input (size:256b/32B) */ +struct hwrm_func_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 flags; + #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK + u8 unused_0[1]; + __le32 enables; + #define FUNC_QSTATS_EXT_REQ_ENABLES_SQ_ID 0x1UL + __le16 sq_id; + __le16 traffic_class; + u8 unused_1[4]; +}; + +/* hwrm_func_qstats_ext_output (size:1472b/184B) */ +struct hwrm_func_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_func_clr_stats_input (size:192b/24B) */ struct hwrm_func_clr_stats_input { __le16 req_type; @@ -1500,6 +2237,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL + #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1615,6 +2353,29 @@ struct hwrm_func_buf_rgtr_output { u8 valid; }; +/* hwrm_func_buf_unrgtr_input (size:192b/24B) */ +struct hwrm_func_buf_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_UNRGTR_REQ_ENABLES_VF_ID 0x1UL + __le16 vf_id; + u8 unused_0[2]; +}; + +/* hwrm_func_buf_unrgtr_output (size:128b/16B) */ +struct hwrm_func_buf_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_func_drv_qver_input (size:192b/24B) */ struct hwrm_func_drv_qver_input { __le16 req_type; @@ -1762,7 +2523,7 @@ struct hwrm_func_backing_store_qcaps_input { __le64 resp_addr; }; -/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */ +/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */ struct hwrm_func_backing_store_qcaps_output { __le16 error_code; __le16 req_type; @@ -1792,6 +2553,10 @@ struct hwrm_func_backing_store_qcaps_output { __le32 tim_max_entries; __le16 mrav_num_entries_units; u8 tqm_entries_multiple; + u8 ctx_kind_initializer; + __le32 rsvd; + __le16 rsvd1; + u8 tqm_fp_rings_count; u8 valid; }; @@ -2135,6 +2900,338 @@ struct hwrm_func_backing_store_cfg_output { u8 valid; }; +/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */ +struct hwrm_func_backing_store_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_BACKING_STORE_QCFG_RESP_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL + u8 unused_0[4]; + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_QP 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_SRQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_CQ 0x4UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_VNIC 0x8UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_STAT 0x10UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_MRAV 0x4000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TIM 0x8000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le16 srq_num_l2_entries; + __le16 cq_num_l2_entries; + __le32 cq_num_entries; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + u8 unused_1[7]; + u8 valid; +}; + /* hwrm_error_recovery_qcfg_input (size:192b/24B) */ struct hwrm_error_recovery_qcfg_input { __le16 req_type; @@ -2214,7 +3311,203 @@ struct hwrm_error_recovery_qcfg_output { #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2 __le32 reset_reg_val[16]; u8 delay_after_reset[16]; - u8 unused_1[7]; + __le32 err_recovery_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2 + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_func_vlan_qcfg_input (size:192b/24B) */ +struct hwrm_func_vlan_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_vlan_qcfg_output (size:320b/40B) */ +struct hwrm_func_vlan_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 unused_0; + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd2; + __le32 rsvd3; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + u8 unused_3[4]; +}; + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ +struct hwrm_func_vf_vnic_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[2]; + __le32 max_vnic_id_cnt; + __le64 vnic_id_tbl_addr; +}; + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ +struct hwrm_func_vf_vnic_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id_cnt; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_vf_bw_cfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_SFT 0 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_SFT 12 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_LAST FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 +}; + +/* hwrm_func_vf_bw_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_bw_qcfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_SFT 0 +}; + +/* hwrm_func_vf_bw_qcfg_output (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_SFT 0 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_SFT 12 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_LAST FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 + u8 unused_0[7]; u8 valid; }; @@ -2243,6 +3536,55 @@ struct hwrm_func_drv_if_change_output { u8 valid; }; +/* hwrm_func_host_pf_ids_query_input (size:192b/24B) */ +struct hwrm_func_host_pf_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 host; + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_SOC 0x1UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_0 0x2UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_1 0x4UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_2 0x8UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_3 0x10UL + u8 filter; + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_ALL 0x0UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_L2 0x1UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_ROCE 0x2UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_LAST FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_ROCE + u8 unused_1[6]; +}; + +/* hwrm_func_host_pf_ids_query_output (size:128b/16B) */ +struct hwrm_func_host_pf_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_pf_id; + __le16 pf_ordinal_mask; + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_0 0x1UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_1 0x2UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_2 0x4UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_3 0x8UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_4 0x10UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_5 0x20UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_6 0x40UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_7 0x80UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_8 0x100UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_9 0x200UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_10 0x400UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_11 0x800UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_12 0x1000UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_13 0x2000UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_14 0x4000UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_15 0x8000UL + u8 unused_1[3]; + u8 valid; +}; + /* hwrm_port_phy_cfg_input (size:448b/56B) */ struct hwrm_port_phy_cfg_input { __le16 req_type; @@ -2251,33 +3593,39 @@ struct hwrm_port_phy_cfg_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL - #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL - #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL - #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL - #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL __le32 enables; - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL - #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL - #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL - #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL - #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL - #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL - #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL __le16 port_id; __le16 force_link_speed; #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL @@ -2290,7 +3638,6 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB u8 auto_mode; @@ -2321,7 +3668,6 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB __le16 auto_link_speed_mask; @@ -2339,7 +3685,6 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL u8 wirespeed; #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL @@ -2363,11 +3708,19 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL - u8 unused_2[2]; + __le16 force_pam4_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB __le32 tx_lpi_timer; #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 - __le32 unused_3; + __le16 auto_link_pam4_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL + u8 unused_2[2]; }; /* hwrm_port_phy_cfg_output (size:128b/16B) */ @@ -2401,7 +3754,7 @@ struct hwrm_port_phy_qcfg_input { u8 unused_0[6]; }; -/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +/* hwrm_port_phy_qcfg_output (size:832b/104B) */ struct hwrm_port_phy_qcfg_output { __le16 error_code; __le16 req_type; @@ -2412,7 +3765,10 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK - u8 unused_0; + u8 link_signal_mode; + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 __le16 link_speed; #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL @@ -2449,7 +3805,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL __le16 force_link_speed; #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL @@ -2461,7 +3816,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB u8 auto_mode; @@ -2486,7 +3840,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB __le16 auto_link_speed_mask; @@ -2504,7 +3857,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL u8 wirespeed; #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL @@ -2524,6 +3876,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE __le32 preemphasis; @@ -2637,13 +3990,21 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 __le16 fec_cfg; - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL u8 duplex_state; #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL @@ -2652,7 +4013,24 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - u8 unused_2[7]; + __le16 support_pam4_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL + __le16 force_pam4_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB + __le16 auto_pam4_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL + __le16 link_partner_pam4_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL + u8 unused_0[7]; u8 valid; }; @@ -2742,6 +4120,70 @@ struct hwrm_port_mac_cfg_output { u8 valid; }; +/* hwrm_port_mac_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_qcfg_output (size:256b/32B) */ +struct hwrm_port_mac_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 flags; + #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL + #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL + #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 unused_1; + __le16 port_svif_info; + #define PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_MASK 0x7fffUL + #define PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_SFT 0 + #define PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_VALID 0x8000UL + u8 unused_2[5]; + u8 valid; +}; + /* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ struct hwrm_port_mac_ptp_qcfg_input { __le16 req_type; @@ -2761,8 +4203,8 @@ struct hwrm_port_mac_ptp_qcfg_output { __le16 resp_len; u8 flags; #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL u8 unused_0[3]; __le32 rx_ts_reg_off_lower; __le32 rx_ts_reg_off_upper; @@ -2916,7 +4358,11 @@ struct hwrm_port_qstats_input { __le16 target_id; __le64 resp_addr; __le16 port_id; - u8 unused_0[6]; + u8 flags; + #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL + #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK + u8 unused_0[5]; __le64 tx_stat_host_addr; __le64 rx_stat_host_addr; }; @@ -3030,6 +4476,90 @@ struct rx_port_stats_ext { __le64 rx_discard_packets_cos7; }; +/* rx_port_stats_ext_pfc_wd (size:5120b/640B) */ +struct rx_port_stats_ext_pfc_wd { + __le64 rx_pfc_watchdog_storms_detected_pri0; + __le64 rx_pfc_watchdog_storms_detected_pri1; + __le64 rx_pfc_watchdog_storms_detected_pri2; + __le64 rx_pfc_watchdog_storms_detected_pri3; + __le64 rx_pfc_watchdog_storms_detected_pri4; + __le64 rx_pfc_watchdog_storms_detected_pri5; + __le64 rx_pfc_watchdog_storms_detected_pri6; + __le64 rx_pfc_watchdog_storms_detected_pri7; + __le64 rx_pfc_watchdog_storms_reverted_pri0; + __le64 rx_pfc_watchdog_storms_reverted_pri1; + __le64 rx_pfc_watchdog_storms_reverted_pri2; + __le64 rx_pfc_watchdog_storms_reverted_pri3; + __le64 rx_pfc_watchdog_storms_reverted_pri4; + __le64 rx_pfc_watchdog_storms_reverted_pri5; + __le64 rx_pfc_watchdog_storms_reverted_pri6; + __le64 rx_pfc_watchdog_storms_reverted_pri7; + __le64 rx_pfc_watchdog_storms_rx_packets_pri0; + __le64 rx_pfc_watchdog_storms_rx_packets_pri1; + __le64 rx_pfc_watchdog_storms_rx_packets_pri2; + __le64 rx_pfc_watchdog_storms_rx_packets_pri3; + __le64 rx_pfc_watchdog_storms_rx_packets_pri4; + __le64 rx_pfc_watchdog_storms_rx_packets_pri5; + __le64 rx_pfc_watchdog_storms_rx_packets_pri6; + __le64 rx_pfc_watchdog_storms_rx_packets_pri7; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri0; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri1; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri2; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri3; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri4; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri5; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri6; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri7; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri0; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri1; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri2; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri3; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri4; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri5; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri6; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri7; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri0; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri1; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri2; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri3; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri4; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri5; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri6; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri7; +}; + /* hwrm_port_qstats_ext_input (size:320b/40B) */ struct hwrm_port_qstats_ext_input { __le16 req_type; @@ -3040,7 +4570,11 @@ struct hwrm_port_qstats_ext_input { __le16 port_id; __le16 tx_stat_size; __le16 rx_stat_size; - u8 unused_0[2]; + u8 flags; + #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL + #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL + #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK + u8 unused_0; __le64 tx_stat_host_addr; __le64 rx_stat_host_addr; }; @@ -3059,6 +4593,31 @@ struct hwrm_port_qstats_ext_output { u8 valid; }; +/* hwrm_port_qstats_ext_pfc_wd_input (size:256b/32B) */ +struct hwrm_port_qstats_ext_pfc_wd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 pfc_wd_stat_size; + u8 unused_0[4]; + __le64 pfc_wd_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_pfc_wd_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_pfc_wd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pfc_wd_stat_size; + u8 flags; + u8 valid; + u8 unused_0[4]; +}; + /* hwrm_port_lpbk_qstats_input (size:128b/16B) */ struct hwrm_port_lpbk_qstats_input { __le16 req_type; @@ -3088,6 +4647,36 @@ struct hwrm_port_lpbk_qstats_output { u8 valid; }; +/* hwrm_port_ecn_qstats_input (size:192b/24B) */ +struct hwrm_port_ecn_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_ecn_qstats_output (size:384b/48B) */ +struct hwrm_port_ecn_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 mark_cnt_cos0; + __le32 mark_cnt_cos1; + __le32 mark_cnt_cos2; + __le32 mark_cnt_cos3; + __le32 mark_cnt_cos4; + __le32 mark_cnt_cos5; + __le32 mark_cnt_cos6; + __le32 mark_cnt_cos7; + u8 mark_en; + u8 unused_0[6]; + u8 valid; +}; + /* hwrm_port_clr_stats_input (size:192b/24B) */ struct hwrm_port_clr_stats_input { __le16 req_type; @@ -3170,17 +4759,19 @@ struct hwrm_port_phy_qcaps_input { u8 unused_0[6]; }; -/* hwrm_port_phy_qcaps_output (size:192b/24B) */ +/* hwrm_port_phy_qcaps_output (size:256b/32B) */ struct hwrm_port_phy_qcaps_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; u8 flags; - #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2 + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4 u8 port_cnt; #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL @@ -3203,7 +4794,6 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL __le16 supported_speeds_auto_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL @@ -3219,7 +4809,6 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL __le16 supported_speeds_eee_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL @@ -3236,8 +4825,48 @@ struct hwrm_port_phy_qcaps_output { __le32 valid_tx_lpi_timer_high; #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 - #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL - #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 + #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24 + __le16 supported_pam4_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL + __le16 supported_pam4_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; + __le32 data[16]; +}; + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; }; /* hwrm_port_phy_i2c_read_input (size:320b/40B) */ @@ -3656,6 +5285,212 @@ struct hwrm_port_led_qcaps_output { u8 valid; }; +/* hwrm_port_prbs_test_input (size:384b/48B) */ +struct hwrm_port_prbs_test_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le16 data_len; + __le16 unused_0; + __le32 unused_1; + __le16 port_id; + __le16 poly; + #define PORT_PRBS_TEST_REQ_POLY_PRBS7 0x0UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS9 0x1UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS11 0x2UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS15 0x3UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS23 0x4UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS31 0x5UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS58 0x6UL + #define PORT_PRBS_TEST_REQ_POLY_INVALID 0xffUL + #define PORT_PRBS_TEST_REQ_POLY_LAST PORT_PRBS_TEST_REQ_POLY_INVALID + __le16 prbs_config; + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_START_STOP 0x1UL + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_TX_LANE_MAP_VALID 0x2UL + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_RX_LANE_MAP_VALID 0x4UL + __le16 timeout; + __le32 tx_lane_map; + __le32 rx_lane_map; +}; + +/* hwrm_port_prbs_test_output (size:128b/16B) */ +struct hwrm_port_prbs_test_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 unused_0; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_dsc_dump_input (size:320b/40B) */ +struct hwrm_port_dsc_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le16 data_len; + __le16 unused_0; + __le32 unused_1; + __le16 port_id; + __le16 diag_level; + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_LANE 0x0UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_CORE 0x1UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_EVENT 0x2UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_EYE 0x3UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_REG_CORE 0x4UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_REG_LANE 0x5UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_UC_CORE 0x6UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_UC_LANE 0x7UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_LANE_DEBUG 0x8UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_BER_VERT 0x9UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_BER_HORZ 0xaUL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_EVENT_SAFE 0xbUL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP 0xcUL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_LAST PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP + __le16 lane_number; + __le16 dsc_dump_config; + #define PORT_DSC_DUMP_REQ_DSC_DUMP_CONFIG_START_RETRIEVE 0x1UL +}; + +/* hwrm_port_dsc_dump_output (size:128b/16B) */ +struct hwrm_port_dsc_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 unused_0; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_sfp_sideband_cfg_input (size:256b/32B) */ +struct hwrm_port_sfp_sideband_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le32 enables; + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_RS0 0x1UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_RS1 0x2UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_TX_DIS 0x4UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_MOD_SEL 0x8UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_RESET_L 0x10UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_LP_MODE 0x20UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_PWR_DIS 0x40UL + __le32 flags; + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_RS0 0x1UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_RS1 0x2UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_TX_DIS 0x4UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_MOD_SEL 0x8UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_RESET_L 0x10UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_LP_MODE 0x20UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_PWR_DIS 0x40UL +}; + +/* hwrm_port_sfp_sideband_cfg_output (size:128b/16B) */ +struct hwrm_port_sfp_sideband_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused[7]; + u8 valid; +}; + +/* hwrm_port_sfp_sideband_qcfg_input (size:192b/24B) */ +struct hwrm_port_sfp_sideband_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_sfp_sideband_qcfg_output (size:192b/24B) */ +struct hwrm_port_sfp_sideband_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 supported_mask; + __le32 sideband_signals; + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_MOD_ABS 0x1UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RX_LOS 0x2UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RS0 0x4UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RS1 0x8UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_TX_DIS 0x10UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_TX_FAULT 0x20UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_MOD_SEL 0x40UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RESET_L 0x80UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_LP_MODE 0x100UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_PWR_DIS 0x200UL + u8 unused[7]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_bus_acquire_input (size:192b/24B) */ +struct hwrm_port_phy_mdio_bus_acquire_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 client_id; + __le16 mdio_bus_timeout; + u8 unused_0[2]; +}; + +/* hwrm_port_phy_mdio_bus_acquire_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_bus_acquire_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 unused_0; + __le16 client_id; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_bus_release_input (size:192b/24B) */ +struct hwrm_port_phy_mdio_bus_release_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 client_id; + u8 unused_0[4]; +}; + +/* hwrm_port_phy_mdio_bus_release_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_bus_release_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 unused_0; + __le16 clients_id; + u8 unused_1[3]; + u8 valid; +}; + /* hwrm_queue_qportcfg_input (size:192b/24B) */ struct hwrm_queue_qportcfg_input { __le16 req_type; @@ -3765,6 +5600,39 @@ struct hwrm_queue_qportcfg_output { u8 valid; }; +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX + __le32 queue_id; +}; + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 queue_len; + u8 service_profile; + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN + u8 queue_cfg_info; + #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0; + u8 valid; +}; + /* hwrm_queue_cfg_input (size:320b/40B) */ struct hwrm_queue_cfg_input { __le16 req_type; @@ -3820,14 +5688,22 @@ struct hwrm_queue_pfcenable_qcfg_output { __le16 seq_id; __le16 resp_len; __le32 flags; - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL u8 unused_0[3]; u8 valid; }; @@ -3840,14 +5716,22 @@ struct hwrm_queue_pfcenable_cfg_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL - #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL __le16 port_id; u8 unused_0[2]; }; @@ -4718,6 +6602,96 @@ struct hwrm_queue_dscp2pri_cfg_output { u8 valid; }; +/* hwrm_queue_mpls_qcaps_input (size:192b/24B) */ +struct hwrm_queue_mpls_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_mpls_qcaps_output (size:128b/16B) */ +struct hwrm_queue_mpls_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_mplstc2pri_cfg_allowed; + u8 hw_default_pri; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_queue_mplstc2pri_qcfg_input (size:192b/24B) */ +struct hwrm_queue_mplstc2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_mplstc2pri_qcfg_output (size:192b/24B) */ +struct hwrm_queue_mplstc2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 tc0_pri_queue_id; + u8 tc1_pri_queue_id; + u8 tc2_pri_queue_id; + u8 tc3_pri_queue_id; + u8 tc4_pri_queue_id; + u8 tc5_pri_queue_id; + u8 tc6_pri_queue_id; + u8 tc7_pri_queue_id; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_mplstc2pri_cfg_input (size:256b/32B) */ +struct hwrm_queue_mplstc2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC0_PRI_QUEUE_ID 0x1UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC1_PRI_QUEUE_ID 0x2UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC2_PRI_QUEUE_ID 0x4UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC3_PRI_QUEUE_ID 0x8UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC4_PRI_QUEUE_ID 0x10UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC5_PRI_QUEUE_ID 0x20UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC6_PRI_QUEUE_ID 0x40UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC7_PRI_QUEUE_ID 0x80UL + u8 port_id; + u8 unused_0[3]; + u8 tc0_pri_queue_id; + u8 tc1_pri_queue_id; + u8 tc2_pri_queue_id; + u8 tc3_pri_queue_id; + u8 tc4_pri_queue_id; + u8 tc5_pri_queue_id; + u8 tc6_pri_queue_id; + u8 tc7_pri_queue_id; +}; + +/* hwrm_queue_mplstc2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_mplstc2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_vnic_alloc_input (size:192b/24B) */ struct hwrm_vnic_alloc_input { __le16 req_type; @@ -4786,6 +6760,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL + #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL __le16 vnic_id; __le16 dflt_ring_grp; __le16 rss_rule; @@ -4795,7 +6770,12 @@ struct hwrm_vnic_cfg_input { __le16 default_rx_ring_id; __le16 default_cmpl_ring_id; __le16 queue_id; - u8 unused0[6]; + u8 rx_csum_v2_mode; + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX + u8 unused0[5]; }; /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -4808,6 +6788,50 @@ struct hwrm_vnic_cfg_output { u8 valid; }; +/* hwrm_vnic_qcfg_input (size:256b/32B) */ +struct hwrm_vnic_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define VNIC_QCFG_REQ_ENABLES_VF_ID_VALID 0x1UL + __le32 vnic_id; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCFG_RESP_FLAGS_DEFAULT 0x1UL + #define VNIC_QCFG_RESP_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_QCFG_RESP_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_QCFG_RESP_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + __le16 queue_id; + u8 rx_csum_v2_mode; + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_DEFAULT 0x0UL + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_ALL_OK 0x1UL + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_MAX 0x2UL + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_LAST VNIC_QCFG_RESP_RX_CSUM_V2_MODE_MAX + u8 unused_1[4]; + u8 valid; +}; + /* hwrm_vnic_qcaps_input (size:192b/24B) */ struct hwrm_vnic_qcaps_input { __le16 req_type; @@ -4837,6 +6861,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; @@ -4980,6 +7005,51 @@ struct hwrm_vnic_rss_cfg_output { u8 valid; }; +/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_rss_cfg_cmd_err { + u8 code; + #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY + u8 unused_0[7]; +}; + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_ctx_idx; + u8 unused_0[6]; +}; + +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 hash_type; + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL + u8 unused_0[4]; + __le32 hash_key[10]; + u8 hash_mode_flags; + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 unused_1[6]; + u8 valid; +}; + /* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ struct hwrm_vnic_plcmodes_cfg_input { __le16 req_type; @@ -4994,15 +7064,18 @@ struct hwrm_vnic_plcmodes_cfg_input { #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL __le32 enables; #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL __le32 vnic_id; __le16 jumbo_thresh; __le16 hds_offset; __le16 hds_threshold; - u8 unused_0[6]; + __le16 max_bds; + u8 unused_0[4]; }; /* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ @@ -5015,6 +7088,40 @@ struct hwrm_vnic_plcmodes_cfg_output { u8 valid; }; +/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_DFLT_VNIC 0x40UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_VIRTIO_PLACEMENT 0x80UL + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + __le16 max_bds; + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { __le16 req_type; @@ -5070,6 +7177,7 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SQ_ID 0x200UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL @@ -5085,7 +7193,7 @@ struct hwrm_ring_alloc_input { __le32 fbo; u8 page_size; u8 page_tbl_depth; - u8 unused_1[2]; + __le16 sq_id; __le32 length; __le16 logical_id; __le16 cmpl_ring_id; @@ -5183,11 +7291,12 @@ struct hwrm_ring_reset_input { __le16 target_id; __le64 resp_addr; u8 ring_type; - #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL - #define RING_RESET_REQ_RING_TYPE_TX 0x1UL - #define RING_RESET_REQ_RING_TYPE_RX 0x2UL - #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL - #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP u8 unused_0; __le16 ring_id; u8 unused_1[4]; @@ -5204,6 +7313,75 @@ struct hwrm_ring_reset_output { u8 valid; }; +/* hwrm_ring_cfg_input (size:256b/32B) */ +struct hwrm_ring_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_CFG_REQ_RING_TYPE_TX 0x1UL + #define RING_CFG_REQ_RING_TYPE_RX 0x2UL + #define RING_CFG_REQ_RING_TYPE_LAST RING_CFG_REQ_RING_TYPE_RX + u8 unused_0; + __le16 ring_id; + __le16 enables; + #define RING_CFG_REQ_ENABLES_RX_SOP_PAD_ENABLE 0x1UL + #define RING_CFG_REQ_ENABLES_PROXY_MODE_ENABLE 0x2UL + #define RING_CFG_REQ_ENABLES_TX_PROXY_SRC_INTF_OVERRIDE 0x4UL + #define RING_CFG_REQ_ENABLES_SQ_ID 0x8UL + #define RING_CFG_REQ_ENABLES_CMPL_RING_ID_UPDATE 0x10UL + __le16 proxy_fid; + __le16 sq_id; + __le16 cmpl_ring_id; + u8 rx_sop_pad_bytes; + u8 unused_1[3]; +}; + +/* hwrm_ring_cfg_output (size:128b/16B) */ +struct hwrm_ring_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_qcfg_input (size:192b/24B) */ +struct hwrm_ring_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_QCFG_REQ_RING_TYPE_TX 0x1UL + #define RING_QCFG_REQ_RING_TYPE_RX 0x2UL + #define RING_QCFG_REQ_RING_TYPE_LAST RING_QCFG_REQ_RING_TYPE_RX + u8 unused_0[5]; + __le16 ring_id; +}; + +/* hwrm_ring_qcfg_output (size:192b/24B) */ +struct hwrm_ring_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 enables; + #define RING_QCFG_RESP_ENABLES_RX_SOP_PAD_ENABLE 0x1UL + #define RING_QCFG_RESP_ENABLES_PROXY_MODE_ENABLE 0x2UL + #define RING_QCFG_RESP_ENABLES_TX_PROXY_SRC_INTF_OVERRIDE 0x4UL + __le16 proxy_fid; + __le16 sq_id; + __le16 cmpl_ring_id; + u8 rx_sop_pad_bytes; + u8 unused_0[6]; + u8 valid; +}; + /* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ struct hwrm_ring_aggint_qcaps_input { __le16 req_type; @@ -5258,7 +7436,11 @@ struct hwrm_ring_cmpl_ring_qaggint_params_input { __le16 target_id; __le64 resp_addr; __le16 ring_id; - u8 unused_0[6]; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0 + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + u8 unused_0[4]; }; /* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ @@ -5364,6 +7546,246 @@ struct hwrm_ring_grp_free_output { u8 unused_0[7]; u8 valid; }; + +/* hwrm_ring_sq_alloc_input (size:1088b/136B) */ +struct hwrm_ring_sq_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING0 0x1UL + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING1 0x2UL + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING2 0x4UL + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING3 0x8UL + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING4 0x10UL + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING5 0x20UL + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING6 0x40UL + #define RING_SQ_ALLOC_REQ_ENABLES_TQM_RING7 0x80UL + __le32 reserved; + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING0_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING0_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING0_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING0_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING0_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING0_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING0_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING1_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING1_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING1_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING1_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING1_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING1_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING1_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING2_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING2_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING2_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING2_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING2_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING2_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING2_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING3_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING3_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING3_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING3_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING3_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING3_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING3_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING4_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING4_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING4_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING4_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING4_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING4_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING4_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING5_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING5_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING5_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING5_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING5_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING5_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING5_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING6_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING6_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING6_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING6_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING6_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING6_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING6_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define RING_SQ_ALLOC_REQ_TQM_RING7_LVL_MASK 0xfUL + #define RING_SQ_ALLOC_REQ_TQM_RING7_LVL_SFT 0 + #define RING_SQ_ALLOC_REQ_TQM_RING7_LVL_LVL_0 0x0UL + #define RING_SQ_ALLOC_REQ_TQM_RING7_LVL_LVL_1 0x1UL + #define RING_SQ_ALLOC_REQ_TQM_RING7_LVL_LVL_2 0x2UL + #define RING_SQ_ALLOC_REQ_TQM_RING7_LVL_LAST RING_SQ_ALLOC_REQ_TQM_RING7_LVL_LVL_2 + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_SFT 4 + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_LAST RING_SQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_1G + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le16 tqm_entry_size; + u8 unused_0[6]; +}; + +/* hwrm_ring_sq_alloc_output (size:128b/16B) */ +struct hwrm_ring_sq_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 sq_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_ring_sq_cfg_input (size:768b/96B) */ +struct hwrm_ring_sq_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 sq_id; + u8 tc_enabled; + u8 unused_0; + __le32 flags; + #define RING_SQ_CFG_REQ_FLAGS_TC_MAX_BW_ENABLED 0x1UL + #define RING_SQ_CFG_REQ_FLAGS_TC_MIN_BW_ENABLED 0x2UL + __le32 max_bw_tc0; + __le32 max_bw_tc1; + __le32 max_bw_tc2; + __le32 max_bw_tc3; + __le32 max_bw_tc4; + __le32 max_bw_tc5; + __le32 max_bw_tc6; + __le32 max_bw_tc7; + __le32 min_bw_tc0; + __le32 min_bw_tc1; + __le32 min_bw_tc2; + __le32 min_bw_tc3; + __le32 min_bw_tc4; + __le32 min_bw_tc5; + __le32 min_bw_tc6; + __le32 min_bw_tc7; + __le32 max_bw; + u8 unused_1[4]; +}; + +/* hwrm_ring_sq_cfg_output (size:128b/16B) */ +struct hwrm_ring_sq_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_sq_free_input (size:192b/24B) */ +struct hwrm_ring_sq_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 sq_id; + u8 unused_0[6]; +}; + +/* hwrm_ring_sq_free_output (size:128b/16B) */ +struct hwrm_ring_sq_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; #define DEFAULT_FLOW_ID 0xFFFFFFFFUL #define ROCEV1_FLOW_ID 0xFFFFFFFEUL #define ROCEV2_FLOW_ID 0xFFFFFFFDUL @@ -5594,6 +8016,53 @@ struct hwrm_cfa_l2_set_rx_mask_cmd_err { u8 unused_0[7]; }; +/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 num_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 max_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 num_vlan_entries; + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ struct hwrm_cfa_tunnel_filter_alloc_input { __le16 req_type; @@ -5689,6 +8158,120 @@ struct hwrm_cfa_tunnel_filter_free_output { u8 valid; }; +/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 flags; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_FLAGS_MODIFY_DST 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dest_fid; + u8 unused_0[5]; + u8 valid; +}; + /* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ struct hwrm_vxlan_ipv4_hdr { u8 ver_hlen; @@ -5807,7 +8390,7 @@ struct hwrm_cfa_encap_record_free_output { u8 valid; }; -/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */ +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ struct hwrm_cfa_ntuple_filter_alloc_input { __le16 req_type; __le16 cmpl_ring; @@ -5815,10 +8398,12 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -5887,8 +8472,6 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __be16 dst_port; __be16 dst_port_mask; __le64 ntuple_filter_id_hint; - __le16 rfs_ring_tbl_idx; - u8 unused_0[6]; }; /* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ @@ -5954,7 +8537,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input { #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL __le32 flags; - #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; @@ -5974,6 +8558,513 @@ struct hwrm_cfa_ntuple_filter_cfg_output { u8 valid; }; +/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */ +struct hwrm_cfa_em_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_LAST CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_BYTE_CTR 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PKT_CTR 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DECAP 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_ENCAP 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DROP 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_METER 0x40UL + __le32 enables; + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_ID 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_MACADDR 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_OVLAN_VID 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IVLAN_VID 0x40UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ETHERTYPE 0x80UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_IPADDR 0x100UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_IPADDR 0x200UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x400UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x800UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_PORT 0x1000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_PORT 0x2000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_ID 0x4000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x8000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ENCAP_RECORD_ID 0x10000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_METER_INSTANCE_ID 0x20000UL + __le64 l2_filter_id; + u8 tunnel_type; + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[3]; + __le32 tunnel_id; + u8 src_macaddr[6]; + __le16 meter_instance_id; + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_LAST CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID + u8 dst_macaddr[6]; + __le16 ovlan_vid; + __le16 ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_LAST CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP + u8 unused_1[2]; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 mirror_vnic_id; + __le32 encap_record_id; + u8 unused_2[4]; +}; + +/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */ +struct hwrm_cfa_em_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 em_filter_id; + __le32 flow_id; + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_em_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 em_filter_id; +}; + +/* hwrm_cfa_em_flow_free_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */ +struct hwrm_cfa_em_flow_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + u8 unused_0[4]; + __le64 em_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_meter_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_meter_qcaps_output (size:320b/40B) */ +struct hwrm_cfa_meter_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_MASK 0xfUL + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_SFT 0 + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_375MHZ 0x0UL + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_625MHZ 0x1UL + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_LAST CFA_METER_QCAPS_RESP_FLAGS_CLOCK_625MHZ + u8 unused_0[4]; + __le16 min_tx_profile; + __le16 max_tx_profile; + __le16 min_rx_profile; + __le16 max_rx_profile; + __le16 min_tx_instance; + __le16 max_tx_instance; + __le16 min_rx_instance; + __le16 max_rx_instance; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_LAST CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 + __le16 reserved1; + __le32 reserved2; + __le32 commit_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW + __le32 commit_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_LAST CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_profile_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_LAST CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 + __le16 meter_profile_id; + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID + __le32 reserved; + __le32 commit_rate; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW + __le32 commit_burst; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_LAST CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_cfg_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_INSTANCE_CFG_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_CFG_REQ_METER_PROFILE_ID_LAST CFA_METER_INSTANCE_CFG_REQ_METER_PROFILE_ID_INVALID + __le16 meter_instance_id; + u8 unused_1[2]; +}; + +/* hwrm_cfa_meter_instance_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ struct hwrm_cfa_decap_filter_alloc_input { __le16 req_type; @@ -6228,6 +9319,120 @@ struct hwrm_cfa_flow_free_output { u8 valid; }; +/* hwrm_cfa_flow_action_data (size:960b/120B) */ +struct hwrm_cfa_flow_action_data { + __le16 action_flags; + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TUNNEL_IP 0x20UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TTL_DECREMENT 0x40UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_FLOW_AGING_ENABLED 0x80UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_ENCAP 0x100UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_DECAP 0x200UL + __le16 act_meter_id; + __le16 vnic_id; + __le16 vport_id; + __be16 nat_port; + __le16 unused_0[3]; + __be32 nat_ip_address[4]; + u8 encap_type; + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPIP 0x4UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_MPLS 0x6UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VLAN 0x7UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_LAST CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_GPE_V6 + u8 unused[7]; + __le32 encap_data[20]; +}; + +/* hwrm_cfa_flow_tunnel_hdr_data (size:64b/8B) */ +struct hwrm_cfa_flow_tunnel_hdr_data { + u8 tunnel_type; + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_LAST CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_ANYTUNNEL + u8 unused[3]; + __le32 tunnel_id; +}; + +/* hwrm_cfa_flow_l4_key_data (size:64b/8B) */ +struct hwrm_cfa_flow_l4_key_data { + __be16 l4_src_port; + __be16 l4_dst_port; + __le32 unused; +}; + +/* hwrm_cfa_flow_l3_key_data (size:512b/64B) */ +struct hwrm_cfa_flow_l3_key_data { + u8 ip_protocol; + u8 unused_0[7]; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be32 nat_ip_address[4]; + __le32 unused[2]; +}; + +/* hwrm_cfa_flow_l2_key_data (size:448b/56B) */ +struct hwrm_cfa_flow_l2_key_data { + __be16 dmac[3]; + __le16 unused_0; + __be16 smac[3]; + __le16 unused_1; + __be16 l2_rewrite_dmac[3]; + __le16 unused_2; + __be16 l2_rewrite_smac[3]; + __le16 ethertype; + __le16 num_vlan_tags; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + u8 unused_3[2]; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + u8 unused[8]; +}; + +/* hwrm_cfa_flow_key_data (size:4160b/520B) */ +struct hwrm_cfa_flow_key_data { + __le32 t_l2_key_data[14]; + __le32 t_l2_key_mask[14]; + __le32 t_l3_key_data[16]; + __le32 t_l3_key_mask[16]; + __le32 t_l4_key_data[2]; + __le32 t_l4_key_mask[2]; + __le32 tunnel_hdr[2]; + __le32 l2_key_data[14]; + __le32 l2_key_mask[14]; + __le32 l3_key_data[16]; + __le32 l3_key_mask[16]; + __le32 l4_key_data[2]; + __le32 l4_key_mask[2]; +}; + /* hwrm_cfa_flow_info_input (size:256b/32B) */ struct hwrm_cfa_flow_info_input { __le16 req_type; @@ -6273,6 +9478,52 @@ struct hwrm_cfa_flow_info_output { u8 valid; }; +/* hwrm_cfa_flow_flush_input (size:256b/32B) */ +struct hwrm_cfa_flow_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_TABLE_VALID 0x1UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_RESET_ALL 0x2UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_RESET_PORT 0x4UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_INCL_FC 0x8000000UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_MASK 0xc0000000UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_SFT 30 + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_16BIT (0x0UL << 30) + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT (0x1UL << 30) + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_LAST CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT + u8 page_size; + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_4K 0x0UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_8K 0x1UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_64K 0x4UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_256K 0x6UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_1M 0x8UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_2M 0x9UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_4M 0xaUL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_1G 0x12UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_LAST CFA_FLOW_FLUSH_REQ_PAGE_SIZE_1G + u8 page_level; + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_0 0x0UL + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_1 0x1UL + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_2 0x2UL + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LAST CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_2 + __le16 num_flows; + __le64 page_dir; +}; + +/* hwrm_cfa_flow_flush_output (size:128b/16B) */ +struct hwrm_cfa_flow_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_cfa_flow_stats_input (size:640b/80B) */ struct hwrm_cfa_flow_stats_input { __le16 req_type; @@ -6334,6 +9585,363 @@ struct hwrm_cfa_flow_stats_output { u8 valid; }; +/* hwrm_cfa_flow_aging_timer_reset_input (size:256b/32B) */ +struct hwrm_cfa_flow_aging_timer_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + u8 unused_0[2]; + __le32 flow_timer; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_timer_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_cfg_input (size:384b/48B) */ +struct hwrm_cfa_flow_aging_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FLOW_TIMER 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FIN_TIMER 0x2UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_UDP_FLOW_TIMER 0x4UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_DMA_INTERVAL 0x8UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_NOTICE_INTERVAL 0x10UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_CTX_MAX_ENTRIES 0x20UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_CTX_ID 0x40UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_CTX_MEM_TYPE 0x80UL + u8 flags; + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM 0x2UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_DISABLE (0x0UL << 1) + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_ENABLE (0x1UL << 1) + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_LAST CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_ENABLE + u8 unused_0; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; + __le16 eem_dma_interval; + __le16 eem_notice_interval; + __le32 eem_ctx_max_entries; + __le16 eem_ctx_id; + __le16 eem_ctx_mem_type; + #define CFA_FLOW_AGING_CFG_REQ_EEM_CTX_MEM_TYPE_EJECTION_DATA 0x0UL + #define CFA_FLOW_AGING_CFG_REQ_EEM_CTX_MEM_TYPE_LAST CFA_FLOW_AGING_CFG_REQ_EEM_CTX_MEM_TYPE_EJECTION_DATA + u8 unused_1[4]; +}; + +/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcfg_output (size:320b/40B) */ +struct hwrm_cfa_flow_aging_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; + __le16 eem_dma_interval; + __le16 eem_notice_interval; + __le32 eem_ctx_max_entries; + __le16 eem_ctx_id; + __le16 eem_ctx_mem_type; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */ +struct hwrm_cfa_flow_aging_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 max_tcp_flow_timer; + __le32 max_tcp_fin_timer; + __le32 max_udp_flow_timer; + __le32 max_aging_flows; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_tcp_flag_process_qcfg_input (size:128b/16B) */ +struct hwrm_cfa_tcp_flag_process_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_tcp_flag_process_qcfg_output (size:192b/24B) */ +struct hwrm_cfa_tcp_flag_process_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_ar_id_port0; + __le16 rx_ar_id_port1; + __le16 tx_ar_id_port0; + __le16 tx_ar_id_port1; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_a_id; + __le16 vf_b_id; + u8 unused_0[4]; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_vf_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_VF_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + __le16 vf_pair_index; + u8 unused_0[2]; + char vf_pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */ +struct hwrm_cfa_vf_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_vf_pair_index; + __le16 vf_a_fid; + __le16 vf_a_index; + __le16 vf_b_fid; + __le16 vf_b_index; + u8 pair_state; + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + u8 unused_0[5]; + char pair_name[32]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_alloc_input (size:576b/72B) */ +struct hwrm_cfa_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 pair_mode; + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MOD 0x5UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MODALL 0x6UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_LAST CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MODALL + u8 unused_0; + __le16 vf_a_id; + u8 host_b_id; + u8 pf_b_id; + __le16 vf_b_id; + u8 port_id; + u8 pri; + __le16 new_pf_fid; + __le32 enables; + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_AB_VALID 0x1UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_BA_VALID 0x2UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_AB_VALID 0x4UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_BA_VALID 0x8UL + char pair_name[32]; + u8 q_ab; + u8 q_ba; + u8 fc_ab; + u8 fc_ba; + u8 unused_1[4]; +}; + +/* hwrm_cfa_pair_alloc_output (size:192b/24B) */ +struct hwrm_cfa_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; +}; + +/* hwrm_cfa_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_REPRE 0x2UL + __le16 pair_index; + u8 pair_pfid; + u8 pair_vfid; + char pair_name[32]; +}; + +/* hwrm_cfa_pair_info_output (size:576b/72B) */ +struct hwrm_cfa_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_pair_index; + __le16 a_fid; + u8 host_a_index; + u8 pf_a_index; + __le16 vf_a_index; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 b_fid; + u8 host_b_index; + u8 pf_b_index; + __le16 vf_b_index; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 pair_mode; + #define CFA_PAIR_INFO_RESP_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_LAST CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR + u8 pair_state; + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + char pair_name[32]; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ struct hwrm_cfa_vfr_alloc_input { __le16 req_type; @@ -6379,6 +9987,308 @@ struct hwrm_cfa_vfr_free_output { u8 valid; }; +/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */ +struct hwrm_cfa_redirect_query_tunnel_type_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 unused_0[6]; +}; + +/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */ +struct hwrm_cfa_redirect_query_tunnel_type_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tunnel_mask; + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NONTUNNEL 0x1UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN 0x2UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NVGRE 0x4UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2GRE 0x8UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPIP 0x10UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_GENEVE 0x20UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_MPLS 0x40UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_STT 0x80UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE 0x100UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN_V4 0x200UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE_V1 0x400UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_ANYTUNNEL 0x800UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2_ETYPE 0x1000UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN_GPE_V6 0x2000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_rgtr_input (size:256b/32B) */ +struct hwrm_cfa_ctx_mem_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + u8 page_level; + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_0 0x0UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_1 0x1UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_2 0x2UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LAST CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_2 + u8 page_size; + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_4K 0x0UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_8K 0x1UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_64K 0x4UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_256K 0x6UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1M 0x8UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_2M 0x9UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_4M 0xaUL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1G 0x12UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_LAST CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1G + __le32 unused_0; + __le64 page_dir; +}; + +/* hwrm_cfa_ctx_mem_rgtr_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ctx_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_unrgtr_input (size:192b/24B) */ +struct hwrm_cfa_ctx_mem_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_cfa_ctx_mem_unrgtr_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_qctx_input (size:192b/24B) */ +struct hwrm_cfa_ctx_mem_qctx_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_cfa_ctx_mem_qctx_output (size:256b/32B) */ +struct hwrm_cfa_ctx_mem_qctx_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + u8 page_level; + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_0 0x0UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_1 0x1UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_2 0x2UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LAST CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_2 + u8 page_size; + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_4K 0x0UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_8K 0x1UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_64K 0x4UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_256K 0x6UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_1M 0x8UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_2M 0x9UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_4M 0xaUL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_1G 0x12UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_LAST CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_1G + u8 unused_0[4]; + __le64 page_dir; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_ctx_mem_qcaps_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 max_entries; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_counter_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_counter_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_counter_qcaps_output (size:576b/72B) */ +struct hwrm_cfa_counter_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT 0x1UL + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_NONE 0x0UL + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_64_BIT 0x1UL + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_LAST CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_64_BIT + __le32 unused_0; + __le32 min_rx_fc; + __le32 max_rx_fc; + __le32 min_tx_fc; + __le32 max_tx_fc; + __le32 min_rx_efc; + __le32 max_rx_efc; + __le32 min_tx_efc; + __le32 max_tx_efc; + __le32 min_rx_mdc; + __le32 max_rx_mdc; + __le32 min_tx_mdc; + __le32 max_tx_mdc; + __le32 max_flow_alloc_fc; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_cfa_counter_cfg_input (size:256b/32B) */ +struct hwrm_cfa_counter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE 0x1UL + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_DISABLE 0x0UL + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_ENABLE 0x1UL + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_LAST CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_ENABLE + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH 0x2UL + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 1) + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 1) + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH_LAST CFA_COUNTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_MASK 0xcUL + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_SFT 2 + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PUSH (0x0UL << 2) + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL (0x1UL << 2) + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC (0x2UL << 2) + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_LAST CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC + __le16 counter_type; + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_FC 0x0UL + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_EFC 0x1UL + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_MDC 0x2UL + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_LAST CFA_COUNTER_CFG_REQ_COUNTER_TYPE_MDC + __le16 ctx_id; + __le16 update_tmr_ms; + __le32 num_entries; + __le32 unused_0; +}; + +/* hwrm_cfa_counter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_counter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_counter_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_counter_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH 0x1UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH_LAST CFA_COUNTER_QCFG_REQ_FLAGS_PATH_RX + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_MASK 0x6UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_SFT 1 + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PUSH (0x0UL << 1) + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL (0x1UL << 1) + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC (0x2UL << 1) + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_LAST CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC + __le16 counter_type; + __le32 unused_0; +}; + +/* hwrm_cfa_counter_qcfg_output (size:192b/24B) */ +struct hwrm_cfa_counter_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ctx_id; + __le16 update_tmr_ms; + __le32 num_entries; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_counter_qstats_input (size:320b/40B) */ +struct hwrm_cfa_counter_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH 0x1UL + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_LAST CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_RX + __le16 counter_type; + __le16 input_flow_ctx_id; + __le16 num_entries; + __le16 delta_time_ms; + __le16 meter_instance_id; + __le16 mdc_ctx_id; + u8 unused_0[2]; + __le64 expected_count; +}; + +/* hwrm_cfa_counter_qstats_output (size:128b/16B) */ +struct hwrm_cfa_counter_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_cfa_eem_qcaps_input (size:192b/24B) */ struct hwrm_cfa_eem_qcaps_input { __le16 req_type; @@ -6534,22 +10444,895 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output { __le16 seq_id; __le16 resp_len; __le32 flags; - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL u8 unused_0[3]; u8 valid; }; +/* hwrm_cfa_tflib_input (size:1024b/128B) */ +struct hwrm_cfa_tflib_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 tf_type; + __le16 tf_subtype; + u8 unused0[4]; + __le32 tf_req[26]; +}; + +/* hwrm_cfa_tflib_output (size:5632b/704B) */ +struct hwrm_cfa_tflib_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tf_type; + __le16 tf_subtype; + __le32 tf_resp_code; + __le32 tf_resp[170]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_input (size:1024b/128B) */ +struct hwrm_tf_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + __le16 subtype; + u8 unused0[4]; + __le32 req[26]; +}; + +/* hwrm_tf_output (size:5632b/704B) */ +struct hwrm_tf_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 type; + __le16 subtype; + __le32 resp_code; + __le32 resp[170]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_version_get_input (size:128b/16B) */ +struct hwrm_tf_version_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_tf_version_get_output (size:128b/16B) */ +struct hwrm_tf_version_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 major; + u8 minor; + u8 update; + u8 unused0[4]; + u8 valid; +}; + +/* hwrm_tf_session_open_input (size:640b/80B) */ +struct hwrm_tf_session_open_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 session_name[64]; +}; + +/* hwrm_tf_session_open_output (size:192b/24B) */ +struct hwrm_tf_session_open_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_session_id; + __le32 fw_session_client_id; + __le32 unused0; + u8 unused1[3]; + u8 valid; +}; + +/* hwrm_tf_session_attach_input (size:704b/88B) */ +struct hwrm_tf_session_attach_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 attach_fw_session_id; + __le32 unused0; + u8 session_name[64]; +}; + +/* hwrm_tf_session_attach_output (size:128b/16B) */ +struct hwrm_tf_session_attach_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_session_id; + u8 unused0[3]; + u8 valid; +}; + +/* hwrm_tf_session_register_input (size:704b/88B) */ +struct hwrm_tf_session_register_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 unused0; + u8 session_client_name[64]; +}; + +/* hwrm_tf_session_register_output (size:128b/16B) */ +struct hwrm_tf_session_register_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_session_client_id; + u8 unused0[3]; + u8 valid; +}; + +/* hwrm_tf_session_unregister_input (size:192b/24B) */ +struct hwrm_tf_session_unregister_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 fw_session_client_id; +}; + +/* hwrm_tf_session_unregister_output (size:128b/16B) */ +struct hwrm_tf_session_unregister_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_session_close_input (size:192b/24B) */ +struct hwrm_tf_session_close_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + u8 unused0[4]; +}; + +/* hwrm_tf_session_close_output (size:128b/16B) */ +struct hwrm_tf_session_close_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_session_qcfg_input (size:192b/24B) */ +struct hwrm_tf_session_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + u8 unused0[4]; +}; + +/* hwrm_tf_session_qcfg_output (size:128b/16B) */ +struct hwrm_tf_session_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 rx_act_flags; + #define TF_SESSION_QCFG_RESP_RX_ACT_FLAGS_ABCR_GFID_EN 0x1UL + #define TF_SESSION_QCFG_RESP_RX_ACT_FLAGS_ABCR_VTAG_DLT_BOTH 0x2UL + #define TF_SESSION_QCFG_RESP_RX_ACT_FLAGS_TECT_SMAC_OVR_RUTNSL2 0x4UL + u8 tx_act_flags; + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_ABCR_VEB_EN 0x1UL + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_TECT_GRE_SET_K 0x2UL + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_TECT_IPV6_TC_IH 0x4UL + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_TECT_IPV4_TOS_IH 0x8UL + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_session_resc_qcaps_input (size:256b/32B) */ +struct hwrm_tf_session_resc_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_TX + __le16 qcaps_size; + __le64 qcaps_addr; +}; + +/* hwrm_tf_session_resc_qcaps_output (size:192b/24B) */ +struct hwrm_tf_session_resc_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_MASK 0x3UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_SFT 0 + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_STATIC 0x0UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_1 0x1UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_2 0x2UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_3 0x3UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_LAST TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_3 + __le16 size; + __le16 unused0; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_session_resc_alloc_input (size:320b/40B) */ +struct hwrm_tf_session_resc_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_TX + __le16 req_size; + __le64 req_addr; + __le64 resc_addr; +}; + +/* hwrm_tf_session_resc_alloc_output (size:128b/16B) */ +struct hwrm_tf_session_resc_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 size; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_session_resc_free_input (size:256b/32B) */ +struct hwrm_tf_session_resc_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_FREE_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_FREE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_FREE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_FREE_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_FREE_REQ_FLAGS_DIR_TX + __le16 free_size; + __le64 free_addr; +}; + +/* hwrm_tf_session_resc_free_output (size:128b/16B) */ +struct hwrm_tf_session_resc_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_session_resc_flush_input (size:256b/32B) */ +struct hwrm_tf_session_resc_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_TX + __le16 flush_size; + __le64 flush_addr; +}; + +/* hwrm_tf_session_resc_flush_output (size:128b/16B) */ +struct hwrm_tf_session_resc_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* tf_rm_resc_req_entry (size:64b/8B) */ +struct tf_rm_resc_req_entry { + __le32 type; + __le16 min; + __le16 max; +}; + +/* tf_rm_resc_entry (size:64b/8B) */ +struct tf_rm_resc_entry { + __le32 type; + __le16 start; + __le16 stride; +}; + +/* hwrm_tf_tbl_type_get_input (size:256b/32B) */ +struct hwrm_tf_tbl_type_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR 0x1UL + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR_LAST TF_TBL_TYPE_GET_REQ_FLAGS_DIR_TX + u8 unused0[2]; + __le32 type; + __le32 index; +}; + +/* hwrm_tf_tbl_type_get_output (size:1216b/152B) */ +struct hwrm_tf_tbl_type_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + __le16 size; + __le16 unused0; + u8 data[128]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_tbl_type_set_input (size:1024b/128B) */ +struct hwrm_tf_tbl_type_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR 0x1UL + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR_LAST TF_TBL_TYPE_SET_REQ_FLAGS_DIR_TX + u8 unused0[2]; + __le32 type; + __le32 index; + __le16 size; + u8 unused1[6]; + u8 data[88]; +}; + +/* hwrm_tf_tbl_type_set_output (size:128b/16B) */ +struct hwrm_tf_tbl_type_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_ctxt_mem_rgtr_input (size:256b/32B) */ +struct hwrm_tf_ctxt_mem_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + u8 page_level; + #define TF_CTXT_MEM_RGTR_REQ_PAGE_LEVEL_LVL_0 0x0UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_LEVEL_LVL_1 0x1UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_LEVEL_LVL_2 0x2UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_LEVEL_LAST TF_CTXT_MEM_RGTR_REQ_PAGE_LEVEL_LVL_2 + u8 page_size; + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_4K 0x0UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_8K 0x1UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_64K 0x4UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_256K 0x6UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_1M 0x8UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_2M 0x9UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_4M 0xaUL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_1G 0x12UL + #define TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_LAST TF_CTXT_MEM_RGTR_REQ_PAGE_SIZE_1G + __le32 unused0; + __le64 page_dir; +}; + +/* hwrm_tf_ctxt_mem_rgtr_output (size:128b/16B) */ +struct hwrm_tf_ctxt_mem_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ctx_id; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_ctxt_mem_unrgtr_input (size:192b/24B) */ +struct hwrm_tf_ctxt_mem_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ctx_id; + u8 unused0[6]; +}; + +/* hwrm_tf_ctxt_mem_unrgtr_output (size:128b/16B) */ +struct hwrm_tf_ctxt_mem_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_ext_em_qcaps_input (size:192b/24B) */ +struct hwrm_tf_ext_em_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define TF_EXT_EM_QCAPS_REQ_FLAGS_DIR 0x1UL + #define TF_EXT_EM_QCAPS_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EXT_EM_QCAPS_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EXT_EM_QCAPS_REQ_FLAGS_DIR_LAST TF_EXT_EM_QCAPS_REQ_FLAGS_DIR_TX + #define TF_EXT_EM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x2UL + __le32 unused0; +}; + +/* hwrm_tf_ext_em_qcaps_output (size:320b/40B) */ +struct hwrm_tf_ext_em_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define TF_EXT_EM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x1UL + #define TF_EXT_EM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x2UL + __le32 unused0; + __le32 supported; + #define TF_EXT_EM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL + #define TF_EXT_EM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL + #define TF_EXT_EM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL + #define TF_EXT_EM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL + #define TF_EXT_EM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL + __le32 max_entries_supported; + __le16 key_entry_size; + __le16 record_entry_size; + __le16 efc_entry_size; + __le16 fid_entry_size; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_ext_em_op_input (size:192b/24B) */ +struct hwrm_tf_ext_em_op_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define TF_EXT_EM_OP_REQ_FLAGS_DIR 0x1UL + #define TF_EXT_EM_OP_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EXT_EM_OP_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EXT_EM_OP_REQ_FLAGS_DIR_LAST TF_EXT_EM_OP_REQ_FLAGS_DIR_TX + __le16 unused0; + __le16 op; + #define TF_EXT_EM_OP_REQ_OP_RESERVED 0x0UL + #define TF_EXT_EM_OP_REQ_OP_EXT_EM_DISABLE 0x1UL + #define TF_EXT_EM_OP_REQ_OP_EXT_EM_ENABLE 0x2UL + #define TF_EXT_EM_OP_REQ_OP_EXT_EM_CLEANUP 0x3UL + #define TF_EXT_EM_OP_REQ_OP_LAST TF_EXT_EM_OP_REQ_OP_EXT_EM_CLEANUP + __le16 unused1; +}; + +/* hwrm_tf_ext_em_op_output (size:128b/16B) */ +struct hwrm_tf_ext_em_op_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_ext_em_cfg_input (size:384b/48B) */ +struct hwrm_tf_ext_em_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define TF_EXT_EM_CFG_REQ_FLAGS_DIR 0x1UL + #define TF_EXT_EM_CFG_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EXT_EM_CFG_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EXT_EM_CFG_REQ_FLAGS_DIR_LAST TF_EXT_EM_CFG_REQ_FLAGS_DIR_TX + #define TF_EXT_EM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x2UL + #define TF_EXT_EM_CFG_REQ_FLAGS_SECONDARY_PF 0x4UL + __le16 group_id; + u8 flush_interval; + u8 unused0; + __le32 num_entries; + __le32 unused1; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + __le16 unused2; + __le32 unused3; +}; + +/* hwrm_tf_ext_em_cfg_output (size:128b/16B) */ +struct hwrm_tf_ext_em_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_ext_em_qcfg_input (size:192b/24B) */ +struct hwrm_tf_ext_em_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define TF_EXT_EM_QCFG_REQ_FLAGS_DIR 0x1UL + #define TF_EXT_EM_QCFG_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EXT_EM_QCFG_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EXT_EM_QCFG_REQ_FLAGS_DIR_LAST TF_EXT_EM_QCFG_REQ_FLAGS_DIR_TX + __le32 unused0; +}; + +/* hwrm_tf_ext_em_qcfg_output (size:256b/32B) */ +struct hwrm_tf_ext_em_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define TF_EXT_EM_QCFG_RESP_FLAGS_DIR 0x1UL + #define TF_EXT_EM_QCFG_RESP_FLAGS_DIR_RX 0x0UL + #define TF_EXT_EM_QCFG_RESP_FLAGS_DIR_TX 0x1UL + #define TF_EXT_EM_QCFG_RESP_FLAGS_DIR_LAST TF_EXT_EM_QCFG_RESP_FLAGS_DIR_TX + #define TF_EXT_EM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x2UL + __le32 num_entries; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_em_insert_input (size:832b/104B) */ +struct hwrm_tf_em_insert_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_EM_INSERT_REQ_FLAGS_DIR 0x1UL + #define TF_EM_INSERT_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EM_INSERT_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EM_INSERT_REQ_FLAGS_DIR_LAST TF_EM_INSERT_REQ_FLAGS_DIR_TX + __le16 strength; + __le32 action_ptr; + __le32 em_record_idx; + __le64 em_key[8]; + __le16 em_key_bitlen; + __le16 unused0[3]; +}; + +/* hwrm_tf_em_insert_output (size:128b/16B) */ +struct hwrm_tf_em_insert_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rptr_index; + u8 rptr_entry; + u8 num_of_entries; + __le32 unused0; +}; + +/* hwrm_tf_em_delete_input (size:832b/104B) */ +struct hwrm_tf_em_delete_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_EM_DELETE_REQ_FLAGS_DIR 0x1UL + #define TF_EM_DELETE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EM_DELETE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EM_DELETE_REQ_FLAGS_DIR_LAST TF_EM_DELETE_REQ_FLAGS_DIR_TX + __le16 unused0; + __le64 flow_handle; + __le64 em_key[8]; + __le16 em_key_bitlen; + __le16 unused1[3]; +}; + +/* hwrm_tf_em_delete_output (size:128b/16B) */ +struct hwrm_tf_em_delete_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 em_index; + __le16 unused0[3]; +}; + +/* hwrm_tf_tcam_set_input (size:1024b/128B) */ +struct hwrm_tf_tcam_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_SET_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_SET_REQ_FLAGS_DIR_LAST TF_TCAM_SET_REQ_FLAGS_DIR_TX + #define TF_TCAM_SET_REQ_FLAGS_DMA 0x2UL + __le32 type; + __le16 idx; + u8 key_size; + u8 result_size; + u8 mask_offset; + u8 result_offset; + u8 unused0[6]; + u8 dev_data[88]; +}; + +/* hwrm_tf_tcam_set_output (size:128b/16B) */ +struct hwrm_tf_tcam_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_tcam_get_input (size:256b/32B) */ +struct hwrm_tf_tcam_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_GET_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_GET_REQ_FLAGS_DIR_LAST TF_TCAM_GET_REQ_FLAGS_DIR_TX + __le32 type; + __le16 idx; + __le16 unused0; +}; + +/* hwrm_tf_tcam_get_output (size:2368b/296B) */ +struct hwrm_tf_tcam_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 key_size; + u8 result_size; + u8 mask_offset; + u8 result_offset; + u8 unused0[4]; + u8 dev_data[272]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_tcam_move_input (size:1024b/128B) */ +struct hwrm_tf_tcam_move_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_MOVE_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_MOVE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_MOVE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_MOVE_REQ_FLAGS_DIR_LAST TF_TCAM_MOVE_REQ_FLAGS_DIR_TX + __le32 type; + __le16 count; + __le16 unused0; + __le16 idx_pairs[48]; +}; + +/* hwrm_tf_tcam_move_output (size:128b/16B) */ +struct hwrm_tf_tcam_move_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_tcam_free_input (size:1024b/128B) */ +struct hwrm_tf_tcam_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_FREE_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_FREE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_FREE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_FREE_REQ_FLAGS_DIR_LAST TF_TCAM_FREE_REQ_FLAGS_DIR_TX + __le32 type; + __le16 count; + __le16 unused0; + __le16 idx_list[48]; +}; + +/* hwrm_tf_tcam_free_output (size:128b/16B) */ +struct hwrm_tf_tcam_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_global_cfg_set_input (size:448b/56B) */ +struct hwrm_tf_global_cfg_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR 0x1UL + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_LAST TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_TX + __le32 type; + __le32 offset; + __le16 size; + u8 unused0[6]; + u8 data[16]; +}; + +/* hwrm_tf_global_cfg_set_output (size:128b/16B) */ +struct hwrm_tf_global_cfg_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_global_cfg_get_input (size:320b/40B) */ +struct hwrm_tf_global_cfg_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR 0x1UL + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_LAST TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_TX + __le32 type; + __le32 offset; + __le16 size; + u8 unused0[6]; +}; + +/* hwrm_tf_global_cfg_get_output (size:256b/32B) */ +struct hwrm_tf_global_cfg_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 size; + u8 unused0[6]; + u8 data[16]; +}; + /* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ struct hwrm_tunnel_dst_port_query_input { __le16 req_type; @@ -6647,15 +11430,15 @@ struct ctx_hw_stats { __le64 rx_mcast_pkts; __le64 rx_bcast_pkts; __le64 rx_discard_pkts; - __le64 rx_drop_pkts; + __le64 rx_error_pkts; __le64 rx_ucast_bytes; __le64 rx_mcast_bytes; __le64 rx_bcast_bytes; __le64 tx_ucast_pkts; __le64 tx_mcast_pkts; __le64 tx_bcast_pkts; + __le64 tx_error_pkts; __le64 tx_discard_pkts; - __le64 tx_drop_pkts; __le64 tx_ucast_bytes; __le64 tx_mcast_bytes; __le64 tx_bcast_bytes; @@ -6671,15 +11454,15 @@ struct ctx_hw_stats_ext { __le64 rx_mcast_pkts; __le64 rx_bcast_pkts; __le64 rx_discard_pkts; - __le64 rx_drop_pkts; + __le64 rx_error_pkts; __le64 rx_ucast_bytes; __le64 rx_mcast_bytes; __le64 rx_bcast_bytes; __le64 tx_ucast_pkts; __le64 tx_mcast_pkts; __le64 tx_bcast_pkts; + __le64 tx_error_pkts; __le64 tx_discard_pkts; - __le64 tx_drop_pkts; __le64 tx_ucast_bytes; __le64 tx_mcast_bytes; __le64 tx_bcast_bytes; @@ -6690,6 +11473,18 @@ struct ctx_hw_stats_ext { __le64 rx_tpa_errors; }; +/* ctx_eng_stats (size:512b/64B) */ +struct ctx_eng_stats { + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; +}; + /* hwrm_stat_ctx_alloc_input (size:256b/32B) */ struct hwrm_stat_ctx_alloc_input { __le16 req_type; @@ -6746,7 +11541,9 @@ struct hwrm_stat_ctx_query_input { __le16 target_id; __le64 resp_addr; __le32 stat_ctx_id; - u8 unused_0[4]; + u8 flags; + #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; }; /* hwrm_stat_ctx_query_output (size:1408b/176B) */ @@ -6779,6 +11576,79 @@ struct hwrm_stat_ctx_query_output { u8 valid; }; +/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ext_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 flags; + #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */ +struct hwrm_stat_ext_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_eng_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */ +struct hwrm_stat_ctx_eng_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ struct hwrm_stat_ctx_clr_stats_input { __le16 req_type; @@ -6953,6 +11823,39 @@ struct hwrm_fw_set_time_output { u8 valid; }; +/* hwrm_fw_get_time_input (size:128b/16B) */ +struct hwrm_fw_get_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_get_time_output (size:192b/24B) */ +struct hwrm_fw_get_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 year; + #define FW_GET_TIME_RESP_YEAR_UNKNOWN 0x0UL + #define FW_GET_TIME_RESP_YEAR_LAST FW_GET_TIME_RESP_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_GET_TIME_RESP_ZONE_UTC 0 + #define FW_GET_TIME_RESP_ZONE_UNKNOWN 65535 + #define FW_GET_TIME_RESP_ZONE_LAST FW_GET_TIME_RESP_ZONE_UNKNOWN + u8 unused_1[3]; + u8 valid; +}; + /* hwrm_struct_hdr (size:128b/16B) */ struct hwrm_struct_hdr { __le16 struct_id; @@ -6977,6 +11880,54 @@ struct hwrm_struct_hdr { u8 unused_0[6]; }; +/* hwrm_struct_data_dcbx_ets (size:256b/32B) */ +struct hwrm_struct_data_dcbx_ets { + u8 destination; + #define STRUCT_DATA_DCBX_ETS_DESTINATION_CONFIGURATION 0x1UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION 0x2UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_LAST STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION + u8 max_tcs; + __le16 unused1; + u8 pri0_to_tc_map; + u8 pri1_to_tc_map; + u8 pri2_to_tc_map; + u8 pri3_to_tc_map; + u8 pri4_to_tc_map; + u8 pri5_to_tc_map; + u8 pri6_to_tc_map; + u8 pri7_to_tc_map; + u8 tc0_to_bw_map; + u8 tc1_to_bw_map; + u8 tc2_to_bw_map; + u8 tc3_to_bw_map; + u8 tc4_to_bw_map; + u8 tc5_to_bw_map; + u8 tc6_to_bw_map; + u8 tc7_to_bw_map; + u8 tc0_to_tsa_map; + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_SP 0x0UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_CBS 0x1UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_ETS 0x2UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC 0xffUL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_LAST STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC + u8 tc1_to_tsa_map; + u8 tc2_to_tsa_map; + u8 tc3_to_tsa_map; + u8 tc4_to_tsa_map; + u8 tc5_to_tsa_map; + u8 tc6_to_tsa_map; + u8 tc7_to_tsa_map; + u8 unused_0[4]; +}; + +/* hwrm_struct_data_dcbx_pfc (size:64b/8B) */ +struct hwrm_struct_data_dcbx_pfc { + u8 pfc_priority_bitmap; + u8 max_pfc_tcs; + u8 mbc; + u8 unused_0[5]; +}; + /* hwrm_struct_data_dcbx_app (size:64b/8B) */ struct hwrm_struct_data_dcbx_app { __be16 protocol_id; @@ -6991,6 +11942,128 @@ struct hwrm_struct_data_dcbx_app { u8 unused_0[3]; }; +/* hwrm_struct_data_dcbx_feature_state (size:64b/8B) */ +struct hwrm_struct_data_dcbx_feature_state { + u8 dcbx_mode; + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_DISABLED 0x0UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_IEEE 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE + u8 ets_state; + u8 pfc_state; + u8 app_state; + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ENABLE_BIT_POS 0x7UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_WILLING_BIT_POS 0x6UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS 0x5UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS + u8 unused[3]; + u8 resets; + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_PFC 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_APP 0x4UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE 0x8UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_LAST STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE +}; + +/* hwrm_struct_data_lldp (size:64b/8B) */ +struct hwrm_struct_data_lldp { + u8 admin_state; + #define STRUCT_DATA_LLDP_ADMIN_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_TX 0x1UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_RX 0x2UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE 0x3UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_LAST STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE + u8 port_description_state; + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_LAST STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE + u8 system_name_state; + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE + u8 system_desc_state; + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE + u8 system_cap_state; + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE + u8 mgmt_addr_state; + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_LAST STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE + u8 async_event_notification_state; + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_LAST STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE + u8 unused_0; +}; + +/* hwrm_struct_data_lldp_generic (size:2112b/264B) */ +struct hwrm_struct_data_lldp_generic { + u8 tlv_type; + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_CHASSIS 0x1UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT 0x2UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_NAME 0x3UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_DESCRIPTION 0x4UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_NAME 0x5UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION 0x6UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_LAST STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION + u8 subtype; + u8 length; + u8 unused1[5]; + __le32 tlv_value[64]; +}; + +/* hwrm_struct_data_lldp_device (size:1472b/184B) */ +struct hwrm_struct_data_lldp_device { + __le16 ttl; + u8 mgmt_addr_len; + u8 mgmt_addr_type; + u8 unused_3[4]; + __le32 mgmt_addr[8]; + __le32 system_caps; + u8 intf_num_type; + u8 mgmt_addr_oid_length; + u8 unused_4[2]; + __le32 intf_num; + u8 unused_5[4]; + __le32 mgmt_addr_oid[32]; +}; + +/* hwrm_struct_data_port_description (size:64b/8B) */ +struct hwrm_struct_data_port_description { + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_struct_data_rss_v2 (size:128b/16B) */ +struct hwrm_struct_data_rss_v2 { + __le16 flags; + #define STRUCT_DATA_RSS_V2_FLAGS_HASH_VALID 0x1UL + __le16 rss_ctx_id; + __le16 num_ring_groups; + __le16 hash_type; + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV4 0x1UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV4 0x2UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV4 0x4UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV6 0x8UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV6 0x10UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV6 0x20UL + __le64 hash_key_ring_group_ids; +}; + +/* hwrm_struct_data_power_information (size:192b/24B) */ +struct hwrm_struct_data_power_information { + __le32 bkup_power_info_ver; + __le32 platform_bkup_power_count; + __le32 load_milli_watt; + __le32 bkup_time_milli_seconds; + __le32 bkup_power_status; + __le32 bkup_power_charge_time; +}; + /* hwrm_fw_set_structured_data_input (size:256b/32B) */ struct hwrm_fw_set_structured_data_input { __le16 req_type; @@ -7070,6 +12143,354 @@ struct hwrm_fw_get_structured_data_cmd_err { u8 unused_0[7]; }; +/* hwrm_fw_ipc_msg_input (size:320b/40B) */ +struct hwrm_fw_ipc_msg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FW_IPC_MSG_REQ_ENABLES_COMMAND_ID 0x1UL + #define FW_IPC_MSG_REQ_ENABLES_SRC_PROCESSOR 0x2UL + #define FW_IPC_MSG_REQ_ENABLES_DATA_OFFSET 0x4UL + #define FW_IPC_MSG_REQ_ENABLES_LENGTH 0x8UL + __le16 command_id; + #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG 0x1UL + #define FW_IPC_MSG_REQ_COMMAND_ID_MHB_HOST 0x2UL + #define FW_IPC_MSG_REQ_COMMAND_ID_LAST FW_IPC_MSG_REQ_COMMAND_ID_MHB_HOST + u8 src_processor; + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_CFW 0x1UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_BONO 0x2UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_APE 0x3UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG 0x4UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_LAST FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG + u8 unused_0; + __le32 data_offset; + __le16 length; + u8 unused_1[2]; + __le64 opaque; +}; + +/* hwrm_fw_ipc_msg_output (size:256b/32B) */ +struct hwrm_fw_ipc_msg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 msg_data_1; + __le32 msg_data_2; + __le64 reserved64; + u8 reserved48[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_input (size:256b/32B) */ +struct hwrm_fw_ipc_mailbox_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + u8 unused; + u8 event_id; + u8 port_id; + __le32 event_data1; + __le32 event_data2; + u8 unused_0[4]; +}; + +/* hwrm_fw_ipc_mailbox_output (size:128b/16B) */ +struct hwrm_fw_ipc_mailbox_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_cmd_err (size:64b/8B) */ +struct hwrm_fw_ipc_mailbox_cmd_err { + u8 code; + #define FW_IPC_MAILBOX_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_LAST FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_ecn_cfg_input (size:192b/24B) */ +struct hwrm_fw_ecn_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define FW_ECN_CFG_REQ_FLAGS_ENABLE_ECN 0x1UL + u8 unused_0[6]; +}; + +/* hwrm_fw_ecn_cfg_output (size:128b/16B) */ +struct hwrm_fw_ecn_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ecn_qcfg_input (size:128b/16B) */ +struct hwrm_fw_ecn_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_ecn_qcfg_output (size:128b/16B) */ +struct hwrm_fw_ecn_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define FW_ECN_QCFG_RESP_FLAGS_ENABLE_ECN 0x1UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_fw_health_check_input (size:128b/16B) */ +struct hwrm_fw_health_check_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_health_check_output (size:128b/16B) */ +struct hwrm_fw_health_check_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_status; + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_BOOTED 0x1UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH 0x2UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_BOOTED 0x4UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH 0x8UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_BOOTED 0x10UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH 0x20UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SECOND_RT 0x40UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_FASTBOOTED 0x80UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_fw_sync_input (size:192b/24B) */ +struct hwrm_fw_sync_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 sync_action; + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SBI 0x1UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT 0x2UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT 0x4UL + #define FW_SYNC_REQ_SYNC_ACTION_ACTION 0x80000000UL + u8 unused_0[4]; +}; + +/* hwrm_fw_sync_output (size:128b/16B) */ +struct hwrm_fw_sync_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 sync_status; + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_MASK 0xffUL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SFT 0 + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_IN_PROGRESS 0x1UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_TIMEOUT 0x2UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL 0x3UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_LAST FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_ERR 0x40000000UL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_COMPLETE 0x80000000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_fw_state_qcaps_input (size:128b/16B) */ +struct hwrm_fw_state_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_state_qcaps_output (size:256b/32B) */ +struct hwrm_fw_state_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 backup_memory; + __le32 quiesce_timeout; + __le32 fw_status_blackout; + __le32 fw_status_max_wait; + u8 unused_0[4]; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_fw_state_quiesce_input (size:192b/24B) */ +struct hwrm_fw_state_quiesce_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define FW_STATE_QUIESCE_REQ_FLAGS_ERROR_RECOVERY 0x1UL + u8 unused_0[7]; +}; + +/* hwrm_fw_state_quiesce_output (size:192b/24B) */ +struct hwrm_fw_state_quiesce_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 quiesce_status; + #define FW_STATE_QUIESCE_RESP_QUIESCE_STATUS_INITIATED 0x80000000UL + u8 unused_0[4]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_fw_state_unquiesce_input (size:128b/16B) */ +struct hwrm_fw_state_unquiesce_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_state_unquiesce_output (size:192b/24B) */ +struct hwrm_fw_state_unquiesce_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unquiesce_status; + #define FW_STATE_UNQUIESCE_RESP_UNQUIESCE_STATUS_COMPLETE 0x80000000UL + u8 unused_0[4]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_fw_state_backup_input (size:256b/32B) */ +struct hwrm_fw_state_backup_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 backup_pg_size_backup_lvl; + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_MASK 0xfUL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_SFT 0 + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_0 0x0UL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_1 0x1UL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2 0x2UL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LAST FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2 + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_MASK 0xf0UL + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_SFT 4 + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_4K (0x0UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8K (0x1UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_64K (0x2UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_2M (0x3UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8M (0x4UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G (0x5UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_LAST FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G + u8 unused_0[7]; + __le64 backup_page_dir; +}; + +/* hwrm_fw_state_backup_output (size:192b/24B) */ +struct hwrm_fw_state_backup_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 backup_status; + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_MASK 0xffUL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SFT 0 + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_QUIESCE_ERROR 0x1UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL 0x3UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_LAST FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_RESET_REQUIRED 0x40000000UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_COMPLETE 0x80000000UL + u8 unused_0[4]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_fw_state_restore_input (size:256b/32B) */ +struct hwrm_fw_state_restore_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 restore_pg_size_restore_lvl; + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_MASK 0xfUL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_SFT 0 + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_0 0x0UL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_1 0x1UL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2 0x2UL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LAST FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2 + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_MASK 0xf0UL + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_SFT 4 + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_4K (0x0UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8K (0x1UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_64K (0x2UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_2M (0x3UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8M (0x4UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G (0x5UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_LAST FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G + u8 unused_0[7]; + __le64 restore_page_dir; +}; + +/* hwrm_fw_state_restore_output (size:128b/16B) */ +struct hwrm_fw_state_restore_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 restore_status; + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_MASK 0xffUL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SFT 0 + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_GENERAL 0x1UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_FORMAT_PARSE 0x2UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK 0x3UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_LAST FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_FAILURE_ROLLBACK_COMPLETED 0x40000000UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_COMPLETE 0x80000000UL + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_exec_fwd_resp_input (size:1024b/128B) */ struct hwrm_exec_fwd_resp_input { __le16 req_type; @@ -7189,6 +12610,110 @@ struct hwrm_temp_monitor_query_output { u8 valid; }; +/* hwrm_reg_power_query_input (size:128b/16B) */ +struct hwrm_reg_power_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_reg_power_query_output (size:192b/24B) */ +struct hwrm_reg_power_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define REG_POWER_QUERY_RESP_FLAGS_IN_POWER_AVAILABLE 0x1UL + #define REG_POWER_QUERY_RESP_FLAGS_OUT_POWER_AVAILABLE 0x2UL + __le32 in_power_mw; + __le32 out_power_mw; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_core_frequency_query_input (size:128b/16B) */ +struct hwrm_core_frequency_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_core_frequency_query_output (size:128b/16B) */ +struct hwrm_core_frequency_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 core_frequency_hz; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_reg_power_histogram_input (size:192b/24B) */ +struct hwrm_reg_power_histogram_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define REG_POWER_HISTOGRAM_REQ_FLAGS_CLEAR_HISTOGRAM 0x1UL + __le32 unused_0; +}; + +/* hwrm_reg_power_histogram_output (size:1088b/136B) */ +struct hwrm_reg_power_histogram_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT 0x1UL + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_INPUT 0x0UL + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT 0x1UL + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_LAST REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT + u8 unused_0[2]; + __le32 sampling_period; + __le64 sample_count; + __le32 power_hist[26]; + u8 unused_1[7]; + u8 valid; +}; + +#define BUCKET_NO_DATA_FOR_SAMPLE 0x0UL +#define BUCKET_RANGE_8W_OR_LESS 0x1UL +#define BUCKET_RANGE_8W_TO_9W 0x2UL +#define BUCKET_RANGE_9W_TO_10W 0x3UL +#define BUCKET_RANGE_10W_TO_11W 0x4UL +#define BUCKET_RANGE_11W_TO_12W 0x5UL +#define BUCKET_RANGE_12W_TO_13W 0x6UL +#define BUCKET_RANGE_13W_TO_14W 0x7UL +#define BUCKET_RANGE_14W_TO_15W 0x8UL +#define BUCKET_RANGE_15W_TO_16W 0x9UL +#define BUCKET_RANGE_16W_TO_18W 0xaUL +#define BUCKET_RANGE_18W_TO_20W 0xbUL +#define BUCKET_RANGE_20W_TO_22W 0xcUL +#define BUCKET_RANGE_22W_TO_24W 0xdUL +#define BUCKET_RANGE_24W_TO_26W 0xeUL +#define BUCKET_RANGE_26W_TO_28W 0xfUL +#define BUCKET_RANGE_28W_TO_30W 0x10UL +#define BUCKET_RANGE_30W_TO_32W 0x11UL +#define BUCKET_RANGE_32W_TO_34W 0x12UL +#define BUCKET_RANGE_34W_TO_36W 0x13UL +#define BUCKET_RANGE_36W_TO_38W 0x14UL +#define BUCKET_RANGE_38W_TO_40W 0x15UL +#define BUCKET_RANGE_40W_TO_42W 0x16UL +#define BUCKET_RANGE_42W_TO_44W 0x17UL +#define BUCKET_RANGE_44W_TO_50W 0x18UL +#define BUCKET_RANGE_OVER_50W 0x19UL +#define BUCKET_LAST BUCKET_RANGE_OVER_50W + + /* hwrm_wol_filter_alloc_input (size:512b/64B) */ struct hwrm_wol_filter_alloc_input { __le16 req_type; @@ -7328,6 +12853,423 @@ struct hwrm_wol_reason_qcfg_output { u8 valid; }; +/* hwrm_dbg_read_direct_input (size:256b/32B) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* hwrm_dbg_read_direct_output (size:128b/16B) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 crc32; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_dbg_write_direct_input (size:448b/56B) */ +struct hwrm_dbg_write_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 write_addr; + __le32 write_len32; + __le32 write_data[8]; +}; + +/* hwrm_dbg_write_direct_output (size:128b/16B) */ +struct hwrm_dbg_write_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_read_indirect_input (size:640b/80B) */ +struct hwrm_dbg_read_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_dest_addr_len; + u8 indirect_access_type; + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_PRIVATE 0x13UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_HOST_DMA 0x14UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_HOST_DMA + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; + __le32 opaque[10]; +}; + +/* hwrm_dbg_read_indirect_output (size:128b/16B) */ +struct hwrm_dbg_read_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_write_indirect_input (size:832b/104B) */ +struct hwrm_dbg_write_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 indirect_access_type; + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_PRIVATE 0x13UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_HOST_DMA 0x14UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_HOST_DMA + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; + u8 unused_1[4]; + __le32 write_data[8]; + __le32 opaque[10]; +}; + +/* hwrm_dbg_write_indirect_output (size:128b/16B) */ +struct hwrm_dbg_write_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_dump_input (size:320b/40B) */ +struct hwrm_dbg_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 handle; + u8 unused_0[4]; + __le64 host_dbg_dump_addr; + __le64 host_dbg_dump_addr_len; +}; + +/* hwrm_dbg_dump_output (size:192b/24B) */ +struct hwrm_dbg_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nexthandle; + __le32 dbg_data_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_erase_nvm_input (size:192b/24B) */ +struct hwrm_dbg_erase_nvm_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define DBG_ERASE_NVM_REQ_FLAGS_ERASE_ALL 0x1UL + u8 unused_0[6]; +}; + +/* hwrm_dbg_erase_nvm_output (size:128b/16B) */ +struct hwrm_dbg_erase_nvm_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_cfg_input (size:192b/24B) */ +struct hwrm_dbg_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define DBG_CFG_REQ_FLAGS_UART_LOG 0x1UL + #define DBG_CFG_REQ_FLAGS_UART_LOG_SECONDARY 0x2UL + #define DBG_CFG_REQ_FLAGS_FW_TRACE 0x4UL + #define DBG_CFG_REQ_FLAGS_FW_TRACE_SECONDARY 0x8UL + #define DBG_CFG_REQ_FLAGS_DEBUG_NOTIFY 0x10UL + #define DBG_CFG_REQ_FLAGS_JTAG_DEBUG 0x20UL + __le16 async_cmpl_ring; + u8 unused_0[2]; +}; + +/* hwrm_dbg_cfg_output (size:128b/16B) */ +struct hwrm_dbg_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_crashdump_header_input (size:192b/24B) */ +struct hwrm_dbg_crashdump_header_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; +}; + +/* hwrm_dbg_crashdump_header_output (size:512b/64B) */ +struct hwrm_dbg_crashdump_header_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 version_hi; + u8 version_low; + __le16 header_len; + __le32 dump_size; + __le32 crash_time; + s8 utc_offset; + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_UTC 0 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMSTERDAM 4 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_EGYPT 8 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_EUROPE_MOSCOW 12 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_IRAN 14 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_DUBAI 16 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_KABUL 18 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ANTARCTICA_MAWSON 20 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_COLOMBO 22 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_KATHMANDU 23 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_INDIAN_CHAGOS 24 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_INDIAN_COCOS 26 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_BANGKOK 28 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_HONG_KONG 32 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_PYONGYANG 34 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_EUCLA 35 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_TOKYO 36 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_ADELAIDE 38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_BROKEN_HILL 38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_DARWIN 38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_SYDNEY 40 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_LORD_HOWE 42 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ANTARCTICA_MACQUARIE 44 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ANTARCTICA_SOUTH_POLE 48 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_CHATHAM 51 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_APIA 52 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_KIRITIMATIS 56 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ATLANTIC_CAPE_VERDE -4 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ATLANTIC_SOUTH_GEORGIA -8 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_ARGENTINA_BUENOS_AIRES -12 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_SAO_PAULO -12 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_NEWFOUNDLAND -14 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_BARBADOS -16 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_CANCUN -20 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_COSTA_RICA -24 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_PHOENIX -28 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_US_ARIZONA -28 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_US_PACIFIC -32 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_US_ALASKA -36 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_MARQUESAS -38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_HAWAII -40 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_MIDWAY -44 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_LAST DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_MIDWAY + u8 crash_cntr; + __le16 dev_uid_length; + u8 dev_uid[32]; + __le32 power_on_count; + u8 unused_2[3]; + u8 valid; +}; + +/* hwrm_dbg_crashdump_erase_input (size:192b/24B) */ +struct hwrm_dbg_crashdump_erase_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 scope; + #define DBG_CRASHDUMP_ERASE_REQ_SCOPE_INVALIDATE 0x0UL + #define DBG_CRASHDUMP_ERASE_REQ_SCOPE_REINIT 0x1UL + #define DBG_CRASHDUMP_ERASE_REQ_SCOPE_LAST DBG_CRASHDUMP_ERASE_REQ_SCOPE_REINIT + u8 unused_0[3]; + __le32 unused_1; +}; + +/* hwrm_dbg_crashdump_erase_output (size:128b/16B) */ +struct hwrm_dbg_crashdump_erase_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_dbg_qcaps_input (size:192b/24B) */ +struct hwrm_dbg_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_dbg_qcaps_output (size:192b/24B) */ +struct hwrm_dbg_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[2]; + __le32 coredump_component_disable_caps; + #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_dbg_qcfg_input (size:192b/24B) */ +struct hwrm_dbg_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 coredump_component_disable_flags; + #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL +}; + +/* hwrm_dbg_qcfg_output (size:192b/24B) */ +struct hwrm_dbg_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[2]; + __le32 coredump_size; + __le32 flags; + #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL + #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL + #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL + #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL + #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL + #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL + __le16 async_cmpl_ring; + u8 unused_1[1]; + u8 valid; +}; + +/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */ +struct hwrm_dbg_crashdump_medium_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 output_dest_flags; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL + __le16 pg_size_lvl; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5 + __le32 size; + __le32 coredump_component_disable_flags; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL + __le32 unused_0; + __le64 pbl; +}; + +/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */ +struct hwrm_dbg_crashdump_medium_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + /* coredump_segment_record (size:128b/16B) */ struct coredump_segment_record { __le16 component_id; @@ -7440,6 +13382,64 @@ struct hwrm_dbg_coredump_retrieve_output { u8 valid; }; +/* hwrm_dbg_i2c_cmd_input (size:320b/40B) */ +struct hwrm_dbg_i2c_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 read_size; + __le16 write_size; + u8 chnl_id; + u8 options; + #define DBG_I2C_CMD_REQ_OPTIONS_10_BIT_ADDRESSING 0x1UL + #define DBG_I2C_CMD_REQ_OPTIONS_FAST_MODE 0x2UL + __le16 slave_addr; + u8 xfer_mode; + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_READ 0x0UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE 0x1UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ 0x2UL + #define DBG_I2C_CMD_REQ_XFER_MODE_LAST DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ + u8 unused_1[7]; +}; + +/* hwrm_dbg_i2c_cmd_output (size:128b/16B) */ +struct hwrm_dbg_i2c_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_fw_cli_input (size:1024b/128B) */ +struct hwrm_dbg_fw_cli_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 cli_cmd_len; + u8 unused_0[2]; + u8 cli_cmd[96]; +}; + +/* hwrm_dbg_fw_cli_output (size:128b/16B) */ +struct hwrm_dbg_fw_cli_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cli_data_len; + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_dbg_ring_info_get_input (size:192b/24B) */ struct hwrm_dbg_ring_info_get_input { __le16 req_type; @@ -7451,7 +13451,8 @@ struct hwrm_dbg_ring_info_get_input { #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL - #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX + #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ u8 unused_0[3]; __le32 fw_ring_id; }; @@ -7464,6 +13465,58 @@ struct hwrm_dbg_ring_info_get_output { __le16 resp_len; __le32 producer_index; __le32 consumer_index; + __le32 cag_vector_ctrl; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_dbg_drv_trace_input (size:1024b/128B) */ +struct hwrm_dbg_drv_trace_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 severity; + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_FATAL 0x0UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_ERROR 0x1UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_WARNING 0x2UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_INFO 0x3UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_DEBUG 0x4UL + #define DBG_DRV_TRACE_REQ_SEVERITY_LAST DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_DEBUG + u8 write_len; + u8 unused_0[6]; + char trace_data[104]; +}; + +/* hwrm_dbg_drv_trace_output (size:128b/16B) */ +struct hwrm_dbg_drv_trace_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */ +struct hwrm_nvm_raw_write_blk_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le32 dest_addr; + __le32 len; +}; + +/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */ +struct hwrm_nvm_raw_write_blk_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; u8 unused_0[7]; u8 valid; }; @@ -7493,6 +13546,28 @@ struct hwrm_nvm_read_output { u8 valid; }; +/* hwrm_nvm_raw_dump_input (size:256b/32B) */ +struct hwrm_nvm_raw_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 offset; + __le32 len; +}; + +/* hwrm_nvm_raw_dump_output (size:128b/16B) */ +struct hwrm_nvm_raw_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ struct hwrm_nvm_get_dir_entries_input { __le16 req_type; @@ -7585,7 +13660,9 @@ struct hwrm_nvm_modify_input { __le64 resp_addr; __le64 host_src_addr; __le16 dir_idx; - u8 unused_0[2]; + __le16 flags; + #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL + #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL __le32 offset; __le32 len; u8 unused_1[4]; @@ -7751,6 +13828,7 @@ struct hwrm_nvm_install_update_input { #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL u8 unused_0[2]; }; @@ -7787,6 +13865,34 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; +/* hwrm_nvm_flush_input (size:128b/16B) */ +struct hwrm_nvm_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_flush_output (size:128b/16B) */ +struct hwrm_nvm_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_flush_cmd_err (size:64b/8B) */ +struct hwrm_nvm_flush_cmd_err { + u8 code; + #define NVM_FLUSH_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FLUSH_CMD_ERR_CODE_FAIL 0x1UL + #define NVM_FLUSH_CMD_ERR_CODE_LAST NVM_FLUSH_CMD_ERR_CODE_FAIL + u8 unused_0[7]; +}; + /* hwrm_nvm_get_variable_input (size:320b/40B) */ struct hwrm_nvm_get_variable_input { __le16 req_type; @@ -7889,6 +13995,114 @@ struct hwrm_nvm_set_variable_cmd_err { u8 unused_0[7]; }; +/* hwrm_nvm_validate_option_input (size:320b/40B) */ +struct hwrm_nvm_validate_option_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_LAST NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 unused_0[2]; +}; + +/* hwrm_nvm_validate_option_output (size:128b/16B) */ +struct hwrm_nvm_validate_option_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_VALIDATE_OPTION_RESP_RESULT_NOT_MATCH 0x0UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_MATCH 0x1UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_LAST NVM_VALIDATE_OPTION_RESP_RESULT_MATCH + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */ +struct hwrm_nvm_validate_option_cmd_err { + u8 code; + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_nvm_factory_defaults_input (size:192b/24B) */ +struct hwrm_nvm_factory_defaults_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mode; + #define NVM_FACTORY_DEFAULTS_REQ_MODE_RESTORE 0x0UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE 0x1UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_LAST NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE + u8 unused_0[7]; +}; + +/* hwrm_nvm_factory_defaults_output (size:128b/16B) */ +struct hwrm_nvm_factory_defaults_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_OK 0x0UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_RESTORE_OK 0x1UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY 0x2UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_LAST NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */ +struct hwrm_nvm_factory_defaults_cmd_err { + u8 code; + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG 0x1UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG 0x2UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG + u8 unused_0[7]; +}; + +/* hwrm_nvm_req_arbitration_input (size:192b/24B) */ +struct hwrm_nvm_req_arbitration_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 type; + #define NVM_REQ_ARBITRATION_REQ_TYPE_STATUS 0x0UL + #define NVM_REQ_ARBITRATION_REQ_TYPE_ACQUIRE 0x1UL + #define NVM_REQ_ARBITRATION_REQ_TYPE_RELEASE 0x2UL + #define NVM_REQ_ARBITRATION_REQ_TYPE_LAST NVM_REQ_ARBITRATION_REQ_TYPE_RELEASE + u8 unused_0[7]; +}; + +/* hwrm_nvm_req_arbitration_output (size:128b/16B) */ +struct hwrm_nvm_req_arbitration_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 acquired; + u8 unused_0[6]; + u8 valid; +}; + /* hwrm_selftest_qlist_input (size:128b/16B) */ struct hwrm_selftest_qlist_input { __le16 req_type; @@ -7930,7 +14144,14 @@ struct hwrm_selftest_qlist_output { char test5_name[32]; char test6_name[32]; char test7_name[32]; - u8 unused_2[7]; + u8 eyescope_target_BER_support; + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED + u8 unused_2[6]; u8 valid; }; @@ -7994,4 +14215,335 @@ struct hwrm_selftest_irq_output { u8 valid; }; +/* hwrm_selftest_retrieve_serdes_data_input (size:320b/40B) */ +struct hwrm_selftest_retrieve_serdes_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le32 resp_data_offset; + __le16 data_len; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0x7UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_EYE_PROJECTION 0x8UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 options; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_MASK 0xfUL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_HORIZONTAL (0x0UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL (0x1UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE 0x20UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LEFT_TOP (0x0UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM (0x1UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_MASK 0xc0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_SFT 6 + u8 targetBER; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E8 0x0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E9 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E10 0x2UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E11 0x3UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E12 0x4UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E12 + u8 action; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_SYNCHRONOUS 0x0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_START 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_PROGRESS 0x2UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_STOP 0x3UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_STOP + u8 unused[6]; +}; + +/* hwrm_selftest_retrieve_serdes_data_output (size:192b/24B) */ +struct hwrm_selftest_retrieve_serdes_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 copied_data_len; + __le16 progress_percent; + __le16 timeout; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_TOTAL 0x0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_POW2 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_LAST SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_POW2 + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_RSVD_MASK 0xfeUL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_RSVD_SFT 1 + u8 unused_0; + __le16 hdr_size; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_oem_cmd_input (size:1024b/128B) */ +struct hwrm_oem_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 IANA; + __le32 unused_0; + __le32 oem_data[26]; +}; + +/* hwrm_oem_cmd_output (size:768b/96B) */ +struct hwrm_oem_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 IANA; + __le32 unused_0; + __le32 oem_data[18]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_sv_input (size:1152b/144B) */ +struct hwrm_sv_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 opaque[32]; +}; + +/* hwrm_sv_output (size:1088b/136B) */ +struct hwrm_sv_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque[32]; +}; + +/* dbc_dbc (size:64b/8B) */ +struct dbc_dbc { + u32 index; + #define DBC_DBC_INDEX_MASK 0xffffffUL + #define DBC_DBC_INDEX_SFT 0 + #define DBC_DBC_EPOCH 0x1000000UL + #define DBC_DBC_RESIZE_TOGGLE 0x2000000UL + u32 type_path_xid; + #define DBC_DBC_XID_MASK 0xfffffUL + #define DBC_DBC_XID_SFT 0 + #define DBC_DBC_PATH_MASK 0x3000000UL + #define DBC_DBC_PATH_SFT 24 + #define DBC_DBC_PATH_ROCE (0x0UL << 24) + #define DBC_DBC_PATH_L2 (0x1UL << 24) + #define DBC_DBC_PATH_ENGINE (0x2UL << 24) + #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE + #define DBC_DBC_VALID 0x4000000UL + #define DBC_DBC_DEBUG_TRACE 0x8000000UL + #define DBC_DBC_TYPE_MASK 0xf0000000UL + #define DBC_DBC_TYPE_SFT 28 + #define DBC_DBC_TYPE_SQ (0x0UL << 28) + #define DBC_DBC_TYPE_RQ (0x1UL << 28) + #define DBC_DBC_TYPE_SRQ (0x2UL << 28) + #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_DBC_TYPE_CQ (0x4UL << 28) + #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) + #define DBC_DBC_TYPE_NQ (0xaUL << 28) + #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28) + #define DBC_DBC_TYPE_NULL (0xfUL << 28) + #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL +}; + +/* dbc_dbc32 (size:32b/4B) */ +struct dbc_dbc32 { + u32 type_abs_incr_xid; + #define DBC_DBC32_XID_MASK 0xfffffUL + #define DBC_DBC32_XID_SFT 0 + #define DBC_DBC32_PATH_MASK 0xc00000UL + #define DBC_DBC32_PATH_SFT 22 + #define DBC_DBC32_PATH_ROCE (0x0UL << 22) + #define DBC_DBC32_PATH_L2 (0x1UL << 22) + #define DBC_DBC32_PATH_LAST DBC_DBC32_PATH_L2 + #define DBC_DBC32_INCR_MASK 0xf000000UL + #define DBC_DBC32_INCR_SFT 24 + #define DBC_DBC32_ABS 0x10000000UL + #define DBC_DBC32_TYPE_MASK 0xe0000000UL + #define DBC_DBC32_TYPE_SFT 29 + #define DBC_DBC32_TYPE_SQ (0x0UL << 29) + #define DBC_DBC32_TYPE_LAST DBC_DBC32_TYPE_SQ +}; + +/* db_push_start (size:64b/8B) */ +struct db_push_start { + u64 db; + #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_START_DB_INDEX_SFT 0 + #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_START_DB_PI_LO_SFT 24 + #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_START_DB_XID_SFT 32 + #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_START_DB_PI_HI_SFT 52 + #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_START_DB_TYPE_SFT 60 + #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END +}; + +/* db_push_end (size:64b/8B) */ +struct db_push_end { + u64 db; + #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_END_DB_INDEX_SFT 0 + #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_END_DB_PI_LO_SFT 24 + #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_END_DB_XID_SFT 32 + #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_END_DB_PI_HI_SFT 52 + #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL + #define DB_PUSH_END_DB_PATH_SFT 56 + #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56) + #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56) + #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56) + #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE + #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL + #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_END_DB_TYPE_SFT 60 + #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END +}; + +/* db_push_info (size:64b/8B) */ +struct db_push_info { + u32 push_size_push_index; + #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL + #define DB_PUSH_INFO_PUSH_INDEX_SFT 0 + #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL + #define DB_PUSH_INFO_PUSH_SIZE_SFT 24 + u32 reserved32; +}; + +/* dbc_absolute_db_32 (size:32b/4B) */ +struct dbc_absolute_db_32 { + u32 index; + #define DBC_ABSOLUTE_DB_32_INDEX_MASK 0xffffUL + #define DBC_ABSOLUTE_DB_32_INDEX_SFT 0 + #define DBC_ABSOLUTE_DB_32_EPOCH 0x10000UL + #define DBC_ABSOLUTE_DB_32_RESIZE_TOGGLE 0x20000UL + #define DBC_ABSOLUTE_DB_32_MXID_MASK 0xfc0000UL + #define DBC_ABSOLUTE_DB_32_MXID_SFT 18 + #define DBC_ABSOLUTE_DB_32_PATH_MASK 0x3000000UL + #define DBC_ABSOLUTE_DB_32_PATH_SFT 24 + #define DBC_ABSOLUTE_DB_32_PATH_ROCE (0x0UL << 24) + #define DBC_ABSOLUTE_DB_32_PATH_L2 (0x1UL << 24) + #define DBC_ABSOLUTE_DB_32_PATH_LAST DBC_ABSOLUTE_DB_32_PATH_L2 + #define DBC_ABSOLUTE_DB_32_VALID 0x4000000UL + #define DBC_ABSOLUTE_DB_32_DEBUG_TRACE 0x8000000UL + #define DBC_ABSOLUTE_DB_32_TYPE_MASK 0xf0000000UL + #define DBC_ABSOLUTE_DB_32_TYPE_SFT 28 + #define DBC_ABSOLUTE_DB_32_TYPE_SQ (0x0UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_RQ (0x1UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_SRQ (0x2UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_CQ (0x4UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NQ (0xaUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NQ_MASK (0xeUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NULL (0xfUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_LAST DBC_ABSOLUTE_DB_32_TYPE_NULL +}; + +/* dbc_relative_db_32 (size:32b/4B) */ +struct dbc_relative_db_32 { + u32 xid; + #define DBC_RELATIVE_DB_32_XID_MASK 0xfffffUL + #define DBC_RELATIVE_DB_32_XID_SFT 0 + #define DBC_RELATIVE_DB_32_PATH_MASK 0xc00000UL + #define DBC_RELATIVE_DB_32_PATH_SFT 22 + #define DBC_RELATIVE_DB_32_PATH_ROCE (0x0UL << 22) + #define DBC_RELATIVE_DB_32_PATH_L2 (0x1UL << 22) + #define DBC_RELATIVE_DB_32_PATH_LAST DBC_RELATIVE_DB_32_PATH_L2 + #define DBC_RELATIVE_DB_32_INCR_MASK 0x1f000000UL + #define DBC_RELATIVE_DB_32_INCR_SFT 24 + #define DBC_RELATIVE_DB_32_TYPE_MASK 0xe0000000UL + #define DBC_RELATIVE_DB_32_TYPE_SFT 29 + #define DBC_RELATIVE_DB_32_TYPE_SQ (0x0UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_SRQ (0x1UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_CQ (0x2UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_CQ_ARMALL (0x3UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_NQ (0x4UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_NQ_ARM (0x5UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_NQ_MASK (0x6UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_LAST DBC_RELATIVE_DB_32_TYPE_NQ_MASK +}; + +/* dbc_drk (size:128b/16B) */ +struct dbc_drk { + u32 db_format_linked_last_valid; + #define DBC_DRK_VALID 0x1UL + #define DBC_DRK_LAST 0x2UL + #define DBC_DRK_LINKED 0x4UL + #define DBC_DRK_DB_FORMAT 0x8UL + #define DBC_DRK_DB_FORMAT_B64 (0x0UL << 3) + #define DBC_DRK_DB_FORMAT_B32A (0x1UL << 3) + #define DBC_DRK_DB_FORMAT_LAST DBC_DRK_DB_FORMAT_B32A + u32 pi; + #define DBC_DRK_PI_MASK 0xffffUL + #define DBC_DRK_PI_SFT 0 + u64 memptr; +}; + +/* fw_status_reg (size:32b/4B) */ +struct fw_status_reg { + u32 fw_status; + #define FW_STATUS_REG_CODE_MASK 0xffffUL + #define FW_STATUS_REG_CODE_SFT 0 + #define FW_STATUS_REG_CODE_READY 0x8000UL + #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY + #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL + #define FW_STATUS_REG_RECOVERABLE 0x20000UL + #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL + #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL + #define FW_STATUS_REG_SHUTDOWN 0x100000UL +}; + +/* hcomm_status (size:64b/8B) */ +struct hcomm_status { + u32 sig_ver; + #define HCOMM_STATUS_VER_MASK 0xffUL + #define HCOMM_STATUS_VER_SFT 0 + #define HCOMM_STATUS_VER_LATEST 0x1UL + #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST + #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL + #define HCOMM_STATUS_SIGNATURE_SFT 8 + #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8) + #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL + u32 fw_status_loc; + #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL + #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0 + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 + #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL + #define HCOMM_STATUS_TRUE_OFFSET_SFT 2 +}; +#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL + #endif /* _BNXT_HSI_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c new file mode 100644 index 000000000000..5cb4c86b2a08 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -0,0 +1,566 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/errno.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/skbuff.h> + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" + +static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type) +{ + return (((u64)ctx) + req_type) ^ BNXT_HWRM_SENTINEL; +} + +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len) +{ + struct bnxt_hwrm_ctx *ctx; + dma_addr_t dma_handle; + u8 *req_addr; + + if (req_len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO, + &dma_handle); + if (!req_addr) + return -ENOMEM; + + ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET); + /* safety first, sentinel used to check for invalid requests */ + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + ctx->req_len = req_len; + ctx->req = (struct input *)req_addr; + ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET); + ctx->dma_handle = dma_handle; + ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */ + ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT; + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + ctx->gfp = GFP_KERNEL; + ctx->slice_addr = NULL; + + /* initialize common request fields */ + ctx->req->req_type = cpu_to_le16(req_type); + ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET); + ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING); + ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET); + *req = ctx->req; + + return 0; +} + +static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr) +{ + void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET; + struct input *req = (struct input *)req_addr; + struct bnxt_hwrm_ctx *ctx = ctx_addr; + u64 sentinel; + + if (!req) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "null HWRM request"); + dump_stack(); + return NULL; + } + + /* HWRM API has no type safety, verify sentinel to validate address */ + sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type)); + if (ctx->sentinel != sentinel) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n", + (u32)le16_to_cpu(req->req_type)); + dump_stack(); + return NULL; + } + + return ctx; +} + +void hwrm_req_silence(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->flags |= BNXT_HWRM_CTX_SILENT; +} + +void hwrm_req_timeout(struct bnxt *bp, void *req, int timeout) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->timeout = timeout; +} + +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->gfp = gfp; +} + +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *internal_req = req; + u16 req_type; + + if (!ctx) + return -EINVAL; + + if (len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + /* free any existing slices */ + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + if (ctx->slice_addr) { + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + ctx->slice_addr = NULL; + } + ctx->gfp = GFP_KERNEL; + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) { + memcpy(internal_req, new_req, len); + } else { + internal_req->req_type = ((struct input *)new_req)->req_type; + ctx->req = new_req; + } + + ctx->req_len = len; + ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle + + BNXT_HWRM_RESP_OFFSET); + + /* update sentinel for potentially new request type */ + req_type = le16_to_cpu(internal_req->req_type); + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + + return 0; +} + +void hwrm_req_capture_ts(struct bnxt *bp, void *req, + struct ptp_system_timestamp *sts) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->sts = sts; +} + +void *hwrm_req_hold(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *input = (struct input *)req; + + if (!ctx) + return NULL; + + if (ctx->flags & BNXT_HWRM_CTX_OWNED) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + ctx->flags |= BNXT_HWRM_CTX_OWNED; + return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET; +} + +static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET; + dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */ + + /* unmap any auxiliary DMA slice */ + if (ctx->slice_addr) + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + + /* invalidate, ensure ownership, sentinel and dma_handle are cleared */ + memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx)); + + /* return the buffer to the DMA pool */ + if (dma_handle) + dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle); +} + +/* void because failure is irrecoverable */ +void hwrm_req_drop(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + __hwrm_ctx_drop(bp, ctx); +} + +static int __hwrm_to_stderr(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + +static struct bnxt_hwrm_wait_token * +__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst) + __acquires(&bp->hwrm_cmd_lock) +{ + struct bnxt_hwrm_wait_token *token; + + token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + return NULL; + + mutex_lock(&bp->hwrm_cmd_lock); + + token->dst = dst; + token->state = BNXT_HWRM_PENDING; + if (dst == BNXT_HWRM_CHNL_CHIMP) { + token->seq_id = bp->hwrm_cmd_seq++; + hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list); + } else { + token->seq_id = bp->hwrm_cmd_kong_seq++; + } + + return token; +} + +static void +__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token) + __releases(&bp->hwrm_cmd_lock) +{ + if (token->dst == BNXT_HWRM_CHNL_CHIMP) { + hlist_del_rcu(&token->node); + kfree_rcu(token, rcu); + } else { + kfree(token); + } + mutex_unlock(&bp->hwrm_cmd_lock); +} + +void +hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state) +{ + struct hlist_node __maybe_unused *dummy; + struct bnxt_hwrm_wait_token *token; + + rcu_read_lock(); + __hlist_for_each_entry_rcu(token, dummy, &bp->hwrm_pending_list, node) { + if (token->seq_id == seq_id) { + WRITE_ONCE(token->state, state); + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); +} + +static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + struct bnxt_hwrm_wait_token *token = NULL; + struct hwrm_short_input short_input = {0}; + u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; + int i, timeout, tmo_count, rc = -EBUSY; + u32 *data = (u32 *)ctx->req; + u32 msg_len = ctx->req_len; + u16 len = 0; + u8 *valid; + +#ifndef HSI_DBG_DISABLE + decode_hwrm_req(ctx->req); +#endif + + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + + if (msg_len > BNXT_HWRM_MAX_REQ_LEN && + msg_len > bp->hwrm_max_ext_req_len) { + rc = -E2BIG; + goto exit; + } + + if (hwrm_req_kong(bp, ctx->req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n", + (u32)le16_to_cpu(ctx->req->req_type)); + rc = -EINVAL; + goto exit; + } + } + + if (ctx->flags & BNXT_HWRM_RESP_DIRTY) + memset(ctx->resp, 0, PAGE_SIZE); + + token = __hwrm_acquire_token(bp, dst); + if (!token) { + rc = -ENOMEM; + goto exit; + } + ctx->req->seq_id = cpu_to_le16(token->seq_id); + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + msg_len > BNXT_HWRM_MAX_REQ_LEN) { + short_input.req_type = ctx->req->req_type; + short_input.signature = + cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); + short_input.size = cpu_to_le16(msg_len); + short_input.req_addr = cpu_to_le64(ctx->dma_handle); + + data = (u32 *)&short_input; + msg_len = sizeof(short_input); + + max_req_len = BNXT_HWRM_SHORT_REQ_LEN; + } + + /* Ensure any associated DMA buffers are written before doorbell */ + wmb(); + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); + + for (i = msg_len; i < max_req_len; i += 4) + writel(0, bp->bar0 + bar_offset + i); + + ptp_read_system_prets(ctx->sts); + + /* Ring channel doorbell */ + writel(1, bp->bar0 + doorbell_offset); + + if (!pci_is_enabled(bp->pdev)) { + rc = 0; /* don't wait for response during error recovery */ + goto exit; + } + + /* convert timeout to usec */ + timeout = ctx->timeout * 1000; + + i = 0; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); + + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE && + i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + else + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + + if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + le16_to_cpu(ctx->req->req_type)); + goto exit; + } + len = le16_to_cpu(ctx->resp->resp_len); + valid = ((u8 *)ctx->resp) + len - 1; + } else { + int j; + + /* Check if response len is updated */ + for (i = 0; i < tmo_count; i++) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + + if (token && + READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) { + __hwrm_release_token(bp, token); + token = NULL; + } + + len = le16_to_cpu(ctx->resp->resp_len); + if (len) + break; + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + else + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + + if (i >= tmo_count) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len); + goto exit; + } + + /* Last byte of resp contains valid bit */ + valid = ((u8 *)ctx->resp) + len - 1; + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) + break; + usleep_range(1, 5); + } + + if (j >= HWRM_VALID_BIT_DELAY_USEC) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len, + *valid); + goto exit; + } + } + + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; + rc = le16_to_cpu(ctx->resp->error_code); + if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) { + if (rc == HWRM_ERR_CODE_BUSY) + netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n", + le16_to_cpu(ctx->resp->req_type)); + else + netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + le16_to_cpu(ctx->resp->req_type), + le16_to_cpu(ctx->resp->seq_id), rc); + } +#ifndef HSI_DBG_DISABLE + decode_hwrm_resp(ctx->resp); +#endif + rc = __hwrm_to_stderr(rc); +exit: + ptp_read_system_postts(ctx->sts); + if (token) + __hwrm_release_token(bp, token); + if (ctx->flags & BNXT_HWRM_CTX_OWNED) + ctx->flags |= BNXT_HWRM_RESP_DIRTY; + else + __hwrm_ctx_drop(bp, ctx); + return rc; +} + +int hwrm_req_send(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (!ctx) + return -EINVAL; + + return __hwrm_send(bp, ctx); +} + +int hwrm_req_send_silent(struct bnxt *bp, void *req) +{ + hwrm_req_silence(bp, req); + return hwrm_req_send(bp, req); +} + +void * +hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE; + struct input *input = req; + u8 *addr, *req_addr = req; + u32 max_offset, offset; + + if (!ctx) + return NULL; + + max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated; + offset = max_offset - size; + offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN); + addr = req_addr + offset; + + if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) { + ctx->allocated = end - addr; + *dma_handle = ctx->dma_handle + offset; + return addr; + } + + /* could not suballocate from ctx buffer, try create a new mapping */ + if (ctx->slice_addr) { + /* if one exists, can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp); + + if (!addr) + return NULL; + + ctx->slice_addr = addr; + ctx->slice_size = size; + ctx->slice_handle = *dma_handle; + + return addr; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h new file mode 100644 index 000000000000..55f71e81a22f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -0,0 +1,357 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWRM_H +#define BNXT_HWRM_H + +#include "bnxt_hsi.h" + +enum bnxt_hwrm_ctx_flags { + BNXT_HWRM_CTX_OWNED = BIT(0), /* caller owns the context */ + BNXT_HWRM_CTX_SILENT = BIT(1), /* squelch firmware errors */ + BNXT_HWRM_RESP_DIRTY = BIT(2), /* response contains data */ +}; + +struct bnxt_hwrm_ctx { + u64 sentinel; + dma_addr_t dma_handle; + struct output *resp; + struct input *req; + dma_addr_t slice_handle; + void *slice_addr; + struct ptp_system_timestamp *sts; + u32 slice_size; + u32 req_len; + enum bnxt_hwrm_ctx_flags flags; + int timeout; + u32 allocated; + gfp_t gfp; +}; + +enum bnxt_hwrm_wait_state { + BNXT_HWRM_PENDING, + BNXT_HWRM_DEFERRED, + BNXT_HWRM_COMPLETE, + BNXT_HWRM_CANCELLED, +}; + +enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG }; + +struct bnxt_hwrm_wait_token { + struct rcu_head rcu; + struct hlist_node node; + enum bnxt_hwrm_wait_state state; + enum bnxt_hwrm_chnl dst; + u16 seq_id; +}; + +void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); + +#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) +#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) +#define SHORT_HWRM_CMD_TIMEOUT 20 +#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) +#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) +#define BNXT_HWRM_TARGET 0xffff +#define BNXT_HWRM_NO_CMPL_RING -1 +#define BNXT_HWRM_REQ_MAX_SIZE 128 +#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */ +#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE +#define BNXT_HWRM_RESP_OFFSET (BNXT_HWRM_DMA_SIZE - \ + BNXT_HWRM_RESP_RESERVED) +#define BNXT_HWRM_CTX_OFFSET (BNXT_HWRM_RESP_OFFSET - \ + sizeof(struct bnxt_hwrm_ctx)) +#define BNXT_HWRM_DMA_ALIGN 16 +#define BNXT_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */ +#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ + BNXT_HWRM_REQ_MAX_SIZE) +#define HWRM_SHORT_MIN_TIMEOUT 3 +#define HWRM_SHORT_MAX_TIMEOUT 10 +#define HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define HWRM_MIN_TIMEOUT 25 +#define HWRM_MAX_TIMEOUT 40 + +static inline int hwrm_total_timeout(int n) +{ + return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT : + HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + + (n - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT; +} + +#define HWRM_VALID_BIT_DELAY_USEC 150 + +static inline bool hwrm_req_type_cfa(u16 req_type) +{ + switch (req_type) { + case HWRM_CFA_ENCAP_RECORD_ALLOC: + case HWRM_CFA_ENCAP_RECORD_FREE: + case HWRM_CFA_DECAP_FILTER_ALLOC: + case HWRM_CFA_DECAP_FILTER_FREE: + case HWRM_CFA_EM_FLOW_ALLOC: + case HWRM_CFA_EM_FLOW_FREE: + case HWRM_CFA_EM_FLOW_CFG: + case HWRM_CFA_FLOW_ALLOC: + case HWRM_CFA_FLOW_FREE: + case HWRM_CFA_FLOW_INFO: + case HWRM_CFA_FLOW_FLUSH: + case HWRM_CFA_FLOW_STATS: + case HWRM_CFA_METER_PROFILE_ALLOC: + case HWRM_CFA_METER_PROFILE_FREE: + case HWRM_CFA_METER_PROFILE_CFG: + case HWRM_CFA_METER_INSTANCE_ALLOC: + case HWRM_CFA_METER_INSTANCE_FREE: + case HWRM_CFA_EEM_QCAPS: + case HWRM_CFA_EEM_OP: + case HWRM_CFA_EEM_CFG: + case HWRM_CFA_EEM_QCFG: + case HWRM_CFA_CTX_MEM_RGTR: + case HWRM_CFA_CTX_MEM_UNRGTR: + return true; + default: + return false; + } +} + +static inline bool hwrm_req_kong(struct bnxt *bp, struct input *req) +{ + return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && + (hwrm_req_type_cfa(le16_to_cpu(req->req_type)) || + le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG)); +} + +/** + * __hwrm_req_init() - Initialize an HWRM request. + * @bp: The driver context. + * @req: A pointer to the request pointer to initialize. + * @req_type: The request type. This will be converted to the little endian + * before being written to the req_type field of the returned request. + * @req_len: The length of the request to be allocated. + * + * Allocate DMA resources and initialize a new HWRM request object of the + * given type. The response address field in the request is configured with + * the DMA bus address that has been mapped for the response and the passed + * request is pointed to kernel virtual memory mapped for the request (such + * that short_input indirection can be accomplished without copying). The + * request’s target and completion ring are initialized to default values and + * can be overridden by writing to the returned request object directly. + * + * The initialized request can be further customized by writing to its fields + * directly, taking care to covert such fields to little endian. The request + * object will be consumed (and all its associated resources release) upon + * passing it to hwrm_req_send() unless ownership of the request has been + * claimed by the caller via a call to hwrm_req_hold(). If the request is not + * consumed, either because it is never sent or because ownership has been + * claimed, then it must be released by a call to hwrm_req_drop(). + * + * Return: zero on success, negative error code otherwise: + * E2BIG: the type of request pointer is too large to fit. + * ENOMEM: an allocation failure occurred. + */ +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len); +#define hwrm_req_init(bp, req, req_type) \ + __hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req))) + +/** + * hwrm_req_hold() - Claim ownership of the request's resources. + * @bp: The driver context. + * @req: A pointer to the request to own. The request will no longer be + * consumed by calls to hwrm_req_send(). + * + * Take ownership of the request. Ownership places responsibility on the + * caller to free the resources associated with the request via a call to + * hwrm_req_drop(). The caller taking ownership implies that a subsequent + * call to hwrm_req_send() will not consume the request (ie. sending will + * not free the associated resources if the request is owned by the caller). + * Taking ownership returns a reference to the response. Retaining and + * accessing the response data is the most common reason to take ownership + * of the request. Ownership can also be acquired in order to reuse the same + * request object across multiple invocations of hwrm_req_send(). + * + * Return: A pointer to the response object. + * + * The resources associated with the response will remain available to the + * caller until ownership of the request is relinquished via a call to + * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if + * a valid request is provided. A returned NULL value would imply a driver + * bug and the implementation will complain loudly in the logs to aid in + * detection. It should not be necessary to check the result for NULL. + */ +void *hwrm_req_hold(struct bnxt *bp, void *req); + +/** + * hwrm_req_drop() - Release all resources associated with the request. + * @bp: The driver context. + * @req: The request to consume, releasing the associated resources. The + * request object, any slices, and its associated response are no + * longer valid. + * + * It is legal to call hwrm_req_drop() on an unowned request, provided it + * has not already been consumed by hwrm_req_send() (for example, to release + * an aborted request). A given request should not be dropped more than once, + * nor should it be dropped after having been consumed by hwrm_req_send(). To + * do so is an error (the context will not be found and a stack trace will be + * rendered in the kernel log). + */ +void hwrm_req_drop(struct bnxt *bp, void *req); + +/** + * hwrm_req_silence() - Do not log HWRM errors. + * @bp: The driver context. + * @req: The request to silence. + * + * Do not print HWRM errors to the kernel log for the associated request + * during a subsequent call to hwrm_req_send(). Some requests are expected + * to fail in certain scenarios and, as such, errors should be suppressed. + */ +void hwrm_req_silence(struct bnxt *bp, void *req); + +/** + * hwrm_req_timeout() - Set the completion timeout for the request. + * @bp: The driver context. + * @req: The request to set the timeout. + * @timeout: The timeout in milliseconds. + * + * Set the timeout associated with the request for subsequent calls to + * hwrm_req_send(). Some requests are long running and require a different + * timeout than the default. + */ +void hwrm_req_timeout(struct bnxt *bp, void *req, int timeout); + +/** + * hwrm_req_send() - Execute an HWRM command. + * @bp: The driver context. + * @req: A pointer to the request to send. The DMA resources associated with + * the request will be released (ie. the request will be consumed) unless + * ownership of the request has been assumed by the caller via a call to + * hwrm_req_hold(). + * + * Send an HWRM request to the device and wait for a response. The request is + * consumed if it is not owned by the caller. This function will block until + * the request has either completed or times out due to an error. + * + * Return: A result code. + * + * The result is zero on success, otherwise the negative error code indicates + * one of the following errors: + * E2BIG: The request was too large. + * EBUSY: The firmware is in a fatal state or the request timed out + * EACCESS: HWRM access denied. + * ENOSPC: HWRM resource allocation error. + * EINVAL: Request parameters are invalid. + * ENOMEM: HWRM has no buffers. + * EAGAIN: HWRM busy or reset in progress. + * EOPNOTSUPP: Invalid request type. + * EIO: Any other error. + * Error handling is orthogonal to request ownership. An unowned request will + * still be consumed on error. If the caller owns the request, then the caller + * is responsible for releasing the resources. Otherwise, hwrm_req_send() will + * always consume the request. + */ +int hwrm_req_send(struct bnxt *bp, void *req); + +/** + * hwrm_req_send_silent() - A silent version of hwrm_req_send(). + * @bp: The driver context. + * @req: The request to send without logging. + * + * The same as hwrm_req_send(), except that the request is silenced using + * hwrm_req_silence() prior the call. This version of the function is + * provided solely to preserve the legacy API’s flavor for this functionality. + * + * Return: A result code, see hwrm_req_send(). + */ +int hwrm_req_send_silent(struct bnxt *bp, void *req); + +/** + * hwrm_req_replace() - Replace request data. + * @bp: The driver context. + * @req: The request to modify. A call to hwrm_req_replace() is conceptually + * an assignment of new_req to req. Subsequent calls to HWRM API functions, + * such as hwrm_req_send(), should thus use req and not new_req (in fact, + * calls to HWRM API functions will fail if non-managed request objects + * are passed). + * @len: The length of new_req. + * @new_req: The pre-built request to copy or reference. + * + * Replaces the request data in req with that of new_req. This is useful in + * scenarios where a request object has already been constructed by a third + * party prior to creating a resource managed request using hwrm_req_init(). + * Depending on the length, hwrm_req_replace() will either copy the new + * request data into the DMA memory allocated for req, or it will simply + * reference the new request and use it in lieu of req during subsequent + * calls to hwrm_req_send(). The resource management is associated with + * req and is independent of and does not apply to new_req. The caller must + * ensure that the lifetime of new_req is least as long as req. Any slices + * that may have been associated with the original request are released. + * + * Return: zero on success, negative error code otherwise: + * E2BIG: Request is too large. + * EINVAL: Invalid request to modify. + */ +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len); + +/** + * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices. + * @bp: The driver context. + * @req: The request for which calls to hwrm_req_dma_slice() will have altered + * allocation flags. + * @flags: A bitmask of GFP flags. These flags are passed to + * dma_alloc_coherent() whenever it is used to allocate backing memory + * for slices. Note that calls to hwrm_req_dma_slice() will not always + * result in new allocations, however, memory suballocated from the + * request buffer is already __GFP_ZERO. + * + * Sets the GFP allocation flags associated with the request for subsequent + * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO + * for slice allocations. + */ +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags); + +/** + * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory. + * @bp: The driver context. + * @req: The request for which indirect data will be associated. + * @size: The size of the allocation. + * @dma: The bus address associated with the allocation. The HWRM API has no + * knowledge about the type of the request and so cannot infer how the + * caller intends to use the indirect data. Thus, the caller is + * responsible for configuring the request object appropriately to + * point to the associated indirect memory. Note, DMA handle has the + * same definition as it does in dma_alloc_coherent(), the caller is + * responsible for endian conversions via cpu_to_le64() before assigning + * this address. + * + * Allocates DMA mapped memory for indirect data related to a request. The + * lifetime of the DMA resources will be bound to that of the request (ie. + * they will be automatically released when the request is either consumed by + * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are + * efficiently suballocated out of the request buffer space, hence the name + * slice, while larger requests are satisfied via an underlying call to + * dma_alloc_coherent(). Multiple suballocations are supported, however, only + * one externally mapped region is. + * + * Return: The kernel virtual address of the DMA mapping. + */ +void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma); + +/** + * hwrm_req_capture_ts() - Get system time into PTP provided time structure + * @bp: The driver context. + * @req: The request to query free running timer of the NIC + * @sts: System provided ptp_system_timestamp structure + * + * PTP's gettimex64() optionally provides a system timestamp structure that + * driver should update. This function sets the sts member of bnxt_hwrm_ctx + * associated with the request. The __hwrm_send() will eventually update the + * pre and post system time in this structure when executing the gettimex64() + */ +void hwrm_req_capture_ts(struct bnxt *bp, void *req, struct ptp_system_timestamp *sts); +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc.c new file mode 100644 index 000000000000..e2e501b6778b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc.c @@ -0,0 +1,1234 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/cdev.h> +#include <linux/uaccess.h> +#include <linux/interrupt.h> +#include <linux/rtnetlink.h> +#include <linux/dma-buf.h> + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_eem.h" +#include "bnxt_hwrm.h" +#include "bnxt_ulp.h" +#include "bnxt_lfc.h" +#include "bnxt_lfc_ioctl.h" + +#ifdef CONFIG_BNXT_LFC + +#define MAX_LFC_OPEN_ULP_DEVICES 32 +#define PRIME_1 29 +#define PRIME_2 31 + +static struct bnxt_gloabl_dev blfc_global_dev; +static struct bnxt_lfc_dev_array blfc_array[MAX_LFC_OPEN_ULP_DEVICES]; + +static bool bnxt_lfc_inited; +static bool is_domain_available; +static int domain_no; +static void bnxt_lfc_unreg(struct bnxt_lfc_dev *blfc_dev); + +static int lfc_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + u32 i; + struct bnxt_lfc_dev *blfc_dev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_UNREGISTER: + for (i = 0; i < MAX_LFC_OPEN_ULP_DEVICES; i++) { + mutex_lock(&blfc_global_dev.bnxt_lfc_lock); + blfc_dev = blfc_array[i].bnxt_lfc_dev; + if (blfc_dev && blfc_dev->ndev == dev) { + bnxt_lfc_unreg(blfc_dev); + dev_put(blfc_dev->ndev); + kfree(blfc_dev); + blfc_array[i].bnxt_lfc_dev = NULL; + blfc_array[i].taken = 0; + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + break; + } + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + } + break; + default: + /* do nothing */ + break; + } + return 0; +} + +struct notifier_block lfc_device_notifier = { + .notifier_call = lfc_device_event +}; + +static u32 bnxt_lfc_get_hash_key(u32 bus, u32 devfn) +{ + return ((bus * PRIME_1 + devfn) * PRIME_2) % MAX_LFC_OPEN_ULP_DEVICES; +} + +static int32_t bnxt_lfc_probe_and_reg(struct bnxt_lfc_dev *blfc_dev) +{ + int32_t rc; + struct pci_dev *pdev = blfc_dev->pdev; + + blfc_dev->bp = netdev_priv(blfc_dev->ndev); + if (!blfc_dev->bp->ulp_probe) { + BNXT_LFC_ERR(&pdev->dev, + "Probe routine not valid\n"); + return -EINVAL; + } + + blfc_dev->en_dev = blfc_dev->bp->ulp_probe(blfc_dev->ndev); + if (IS_ERR(blfc_dev->en_dev)) { + BNXT_LFC_ERR(&pdev->dev, + "Device probing failed\n"); + return -EINVAL; + } + + rtnl_lock(); + rc = blfc_dev->en_dev->en_ops->bnxt_register_device(blfc_dev->en_dev, + BNXT_OTHER_ULP, + &blfc_dev->uops, + blfc_dev); + if (rc) { + BNXT_LFC_ERR(&pdev->dev, + "Device registration failed\n"); + rtnl_unlock(); + return rc; + } + rtnl_unlock(); + return 0; +} + +static void bnxt_lfc_unreg(struct bnxt_lfc_dev *blfc_dev) +{ + int32_t rc; + + rc = blfc_dev->en_dev->en_ops->bnxt_unregister_device( + blfc_dev->en_dev, + BNXT_OTHER_ULP); + if (rc) + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "netdev %p unregister failed!", + blfc_dev->ndev); +} + +static bool bnxt_lfc_is_valid_pdev(struct pci_dev *pdev) +{ + int32_t idx; + + if (!pdev) { + BNXT_LFC_ERR(NULL, "No such PCI device\n"); + return false; + } + + if (pdev->vendor != PCI_VENDOR_ID_BROADCOM) { + pci_dev_put(pdev); + BNXT_LFC_ERR(NULL, "Not a Broadcom PCI device\n"); + return false; + } + + for (idx = 0; bnxt_pci_tbl[idx].device != 0; idx++) { + if (pdev->device == bnxt_pci_tbl[idx].device) { + BNXT_LFC_DEBUG(&pdev->dev, "Found valid PCI device\n"); + return true; + } + } + pci_dev_put(pdev); + BNXT_LFC_ERR(NULL, "PCI device not supported\n"); + return false; +} + +static void bnxt_lfc_init_req_hdr(struct input *req_hdr, u16 req_type, + u16 cpr_id, u16 tgt_id) +{ + req_hdr->req_type = cpu_to_le16(req_type); + req_hdr->cmpl_ring = cpu_to_le16(cpr_id); + req_hdr->target_id = cpu_to_le16(tgt_id); +} + +static void bnxt_lfc_prep_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, + int32_t msg_len, void *resp, + int32_t resp_max_len, + int32_t timeout) +{ + fw_msg->msg = msg; + fw_msg->msg_len = msg_len; + fw_msg->resp = resp; + fw_msg->resp_max_len = resp_max_len; + fw_msg->timeout = timeout; +} + +static int32_t bnxt_lfc_process_nvm_flush(struct bnxt_lfc_dev *blfc_dev) +{ + int32_t rc = 0; + struct bnxt_en_dev *en_dev = blfc_dev->en_dev; + + struct hwrm_nvm_flush_input req = {0}; + struct hwrm_nvm_flush_output resp = {0}; + struct bnxt_fw_msg fw_msg; + + bnxt_lfc_init_req_hdr((void *)&req, + HWRM_NVM_FLUSH, -1, -1); + bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_NVM_FLUSH_TIMEOUT); + rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_OTHER_ULP, &fw_msg); + if (rc) + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send NVM_FLUSH FW msg, rc = 0x%x", rc); + return rc; + +} +static int32_t bnxt_lfc_process_nvm_get_var_req(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_nvm_get_var_req + *nvm_get_var_req) +{ + int32_t rc; + uint16_t len_in_bytes; + struct pci_dev *pdev = blfc_dev->pdev; + struct bnxt_en_dev *en_dev = blfc_dev->en_dev; + + struct hwrm_nvm_get_variable_input req = {0}; + struct hwrm_nvm_get_variable_output resp = {0}; + struct bnxt_fw_msg fw_msg; + void *dest_data_addr = NULL; + dma_addr_t dest_data_dma_addr; + + if (nvm_get_var_req->len_in_bits == 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, "Invalid Length\n"); + return -ENOMEM; + } + + len_in_bytes = (nvm_get_var_req->len_in_bits + 7) / 8; + dest_data_addr = dma_alloc_coherent(&pdev->dev, + len_in_bytes, + &dest_data_dma_addr, + GFP_KERNEL); + + if (dest_data_addr == NULL) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to alloc mem for data\n"); + return -ENOMEM; + } + + bnxt_lfc_init_req_hdr((void *)&req, + HWRM_NVM_GET_VARIABLE, -1, -1); + req.dest_data_addr = cpu_to_le64(dest_data_dma_addr); + req.data_len = cpu_to_le16(nvm_get_var_req->len_in_bits); + req.option_num = cpu_to_le16(nvm_get_var_req->option_num); + req.dimensions = cpu_to_le16(nvm_get_var_req->dimensions); + req.index_0 = cpu_to_le16(nvm_get_var_req->index_0); + req.index_1 = cpu_to_le16(nvm_get_var_req->index_1); + req.index_2 = cpu_to_le16(nvm_get_var_req->index_2); + req.index_3 = cpu_to_le16(nvm_get_var_req->index_3); + + bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + + rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_OTHER_ULP, &fw_msg); + if (rc) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send NVM_GET_VARIABLE FW msg, rc = 0x%x", + rc); + goto done; + } + + rc = copy_to_user(nvm_get_var_req->out_val, dest_data_addr, + len_in_bytes); + if (rc != 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send %d characters to the user\n", rc); + rc = -EFAULT; + } +done: + dma_free_coherent(&pdev->dev, (nvm_get_var_req->len_in_bits), + dest_data_addr, + dest_data_dma_addr); + return rc; +} + +static int32_t bnxt_lfc_process_nvm_set_var_req(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_nvm_set_var_req + *nvm_set_var_req) +{ + int32_t rc; + uint16_t len_in_bytes; + struct pci_dev *pdev = blfc_dev->pdev; + struct bnxt_en_dev *en_dev = blfc_dev->en_dev; + + struct hwrm_nvm_set_variable_input req = {0}; + struct hwrm_nvm_set_variable_output resp = {0}; + struct bnxt_fw_msg fw_msg; + void *src_data_addr = NULL; + dma_addr_t src_data_dma_addr; + + if (nvm_set_var_req->len_in_bits == 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, "Invalid Length\n"); + return -ENOMEM; + } + + len_in_bytes = (nvm_set_var_req->len_in_bits + 7) / 8; + src_data_addr = dma_alloc_coherent(&pdev->dev, + len_in_bytes, + &src_data_dma_addr, + GFP_KERNEL); + + if (src_data_addr == NULL) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to alloc mem for data\n"); + return -ENOMEM; + } + + rc = copy_from_user(src_data_addr, + nvm_set_var_req->in_val, + len_in_bytes); + + if (rc != 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send %d bytes from the user\n", rc); + rc = -EFAULT; + goto done; + } + + bnxt_lfc_init_req_hdr((void *)&req, + HWRM_NVM_SET_VARIABLE, -1, -1); + req.src_data_addr = cpu_to_le64(src_data_dma_addr); + req.data_len = cpu_to_le16(nvm_set_var_req->len_in_bits); + req.option_num = cpu_to_le16(nvm_set_var_req->option_num); + req.dimensions = cpu_to_le16(nvm_set_var_req->dimensions); + req.index_0 = cpu_to_le16(nvm_set_var_req->index_0); + req.index_1 = cpu_to_le16(nvm_set_var_req->index_1); + req.index_2 = cpu_to_le16(nvm_set_var_req->index_2); + req.index_3 = cpu_to_le16(nvm_set_var_req->index_3); + + bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + + rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_OTHER_ULP, &fw_msg); + if (rc) + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send NVM_SET_VARIABLE FW msg, rc = 0x%x", rc); +done: + dma_free_coherent(&pdev->dev, len_in_bytes, + src_data_addr, + src_data_dma_addr); + return rc; +} + +static int32_t bnxt_lfc_fill_fw_msg(struct pci_dev *pdev, + struct bnxt_fw_msg *fw_msg, + struct blfc_fw_msg *msg) +{ + int32_t rc = 0; + + if (copy_from_user(fw_msg->msg, + (void __user *)((unsigned long)msg->usr_req), + msg->len_req)) { + BNXT_LFC_ERR(&pdev->dev, "Failed to copy data from user\n"); + return -EFAULT; + } + + fw_msg->msg_len = msg->len_req; + fw_msg->resp_max_len = msg->len_resp; + if (!msg->timeout) + fw_msg->timeout = DFLT_HWRM_CMD_TIMEOUT; + else + fw_msg->timeout = msg->timeout; + return rc; +} + +static int32_t bnxt_lfc_prepare_dma_operations(struct bnxt_lfc_dev *blfc_dev, + struct blfc_fw_msg *msg, + struct bnxt_fw_msg *fw_msg) +{ + int32_t rc = 0; + uint8_t i, num_allocated = 0; + void *dma_ptr; + + for (i = 0; i < msg->num_dma_indications; i++) { + if (msg->dma[i].length == 0 || + msg->dma[i].length > MAX_DMA_MEM_SIZE) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Invalid DMA memory length\n"); + rc = -EINVAL; + goto err; + } + blfc_dev->dma_virt_addr[i] = dma_alloc_coherent( + &blfc_dev->pdev->dev, + msg->dma[i].length, + &blfc_dev->dma_addr[i], + GFP_KERNEL); + if (!blfc_dev->dma_virt_addr[i]) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to allocate memory for data_addr[%d]\n", + i); + rc = -ENOMEM; + goto err; + } + num_allocated++; + if (!(msg->dma[i].read_or_write)) { + if (copy_from_user(blfc_dev->dma_virt_addr[i], + (void __user *)( + (unsigned long)(msg->dma[i].data)), + msg->dma[i].length)) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to copy data from user for data_addr[%d]\n", + i); + rc = -EFAULT; + goto err; + } + } + dma_ptr = fw_msg->msg + msg->dma[i].offset; + + if ((PTR_ALIGN(dma_ptr, 8) == dma_ptr) && + (msg->dma[i].offset < msg->len_req)) { + __le64 *dmap = dma_ptr; + + *dmap = cpu_to_le64(blfc_dev->dma_addr[i]); + } else { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Wrong input parameter\n"); + rc = -EINVAL; + goto err; + } + } + return rc; +err: + for (i = 0; i < num_allocated; i++) + dma_free_coherent(&blfc_dev->pdev->dev, + msg->dma[i].length, + blfc_dev->dma_virt_addr[i], + blfc_dev->dma_addr[i]); + return rc; +} + +static int32_t bnxt_lfc_process_hwrm(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_req *lfc_req) +{ + int32_t rc = 0, i, hwrm_err = 0; + struct bnxt_en_dev *en_dev = blfc_dev->en_dev; + struct pci_dev *pdev = blfc_dev->pdev; + struct bnxt_fw_msg fw_msg; + struct blfc_fw_msg msg, *msg2 = NULL; + + if (copy_from_user(&msg, + (void __user *)((unsigned long)lfc_req->req.hreq), + sizeof(msg))) { + BNXT_LFC_ERR(&pdev->dev, "Failed to copy data from user\n"); + return -EFAULT; + } + + if (msg.len_req > BNXT_LFC_MAX_HWRM_REQ_LENGTH || + msg.len_resp > BNXT_LFC_MAX_HWRM_RESP_LENGTH) { + BNXT_LFC_ERR(&pdev->dev, + "Invalid length\n"); + return -EINVAL; + } + + fw_msg.msg = kmalloc(msg.len_req, GFP_KERNEL); + if (!fw_msg.msg) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to allocate input req memory\n"); + return -ENOMEM; + } + + fw_msg.resp = kmalloc(msg.len_resp, GFP_KERNEL); + if (!fw_msg.resp) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to allocate resp memory\n"); + rc = -ENOMEM; + goto err; + } + + rc = bnxt_lfc_fill_fw_msg(pdev, &fw_msg, &msg); + if (rc) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to fill the FW data\n"); + goto err; + } + + if (msg.num_dma_indications) { + if (msg.num_dma_indications > MAX_NUM_DMA_INDICATIONS) { + BNXT_LFC_ERR(&pdev->dev, + "Invalid DMA indications\n"); + rc = -EINVAL; + goto err1; + } + msg2 = kmalloc((sizeof(struct blfc_fw_msg) + + (msg.num_dma_indications * sizeof(struct dma_info))), + GFP_KERNEL); + if (!msg2) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to allocate memory\n"); + rc = -ENOMEM; + goto err; + } + if (copy_from_user((void *)msg2, + (void __user *)((unsigned long)lfc_req->req.hreq), + (sizeof(struct blfc_fw_msg) + + (msg.num_dma_indications * + sizeof(struct dma_info))))) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to copy data from user\n"); + rc = -EFAULT; + goto err; + } + rc = bnxt_lfc_prepare_dma_operations(blfc_dev, msg2, &fw_msg); + if (rc) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to perform DMA operaions\n"); + goto err; + } + } + + hwrm_err = en_dev->en_ops->bnxt_send_fw_msg(en_dev, + BNXT_OTHER_ULP, &fw_msg); + if (hwrm_err) { + struct input *req = fw_msg.msg; + + BNXT_LFC_ERR(&pdev->dev, + "Failed to send FW msg type = 0x%x, error = 0x%x", + req->req_type, hwrm_err); + goto err; + } + + for (i = 0; i < msg.num_dma_indications; i++) { + if (msg2->dma[i].read_or_write) { + if (copy_to_user((void __user *) + ((unsigned long)msg2->dma[i].data), + blfc_dev->dma_virt_addr[i], + msg2->dma[i].length)) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to copy data from user\n"); + rc = -EFAULT; + goto err; + } + } + } +err: + for (i = 0; i < msg.num_dma_indications; i++) + dma_free_coherent(&pdev->dev, msg2->dma[i].length, + blfc_dev->dma_virt_addr[i], + blfc_dev->dma_addr[i]); + + if (hwrm_err != -EBUSY && hwrm_err != -E2BIG) { + if (copy_to_user((void __user *)((unsigned long)msg.usr_resp), + fw_msg.resp, + msg.len_resp)) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to copy data from user\n"); + rc = -EFAULT; + } + } +err1: + kfree(msg2); + kfree(fw_msg.msg); + fw_msg.msg = NULL; + kfree(fw_msg.resp); + fw_msg.resp = NULL; + /* If HWRM command fails, return the response error code */ + if (hwrm_err) + return hwrm_err; + return rc; +} + +static void bnxt_eem_dmabuf_release(struct bnxt *bp, + struct bnxt_eem_dmabuf_info *dmabuf_priv) +{ + struct bnxt_eem_dmabuf_info *priv; + int tbl_type; + int dir; + + for (dir = BNXT_EEM_DIR_TX; dir < BNXT_EEM_NUM_DIR; dir++) { + for (tbl_type = 0; tbl_type < MAX_TABLE; tbl_type++) { + if (tbl_type == EFC_TABLE) + continue; + + priv = dmabuf_priv + (dir * MAX_TABLE + tbl_type); + netdev_dbg(bp->dev, + "Dir:%d, tbl_type:%d, priv->fd %d\n", + dir, priv->type, priv->fd); + + if (priv->type != MAX_TABLE) { + if (priv->buf) + dma_buf_put(priv->buf); + + if (priv->pages) { + sg_free_table(&priv->sg); + kfree(priv->pages); + } + + priv->buf = NULL; + priv->fd = 0; + priv->pages = NULL; + priv->type = MAX_TABLE; + } + } + dmabuf_priv->dir &= ~(1 << dir); + } +} + +static int bnxt_eem_dmabuf_close(struct bnxt *bp) +{ + struct bnxt_eem_dmabuf_info *dmabuf_priv; + + if (!bp->dmabuf_data) + return 0; + + dmabuf_priv = bp->dmabuf_data; + bnxt_eem_dmabuf_release(bp, dmabuf_priv); + + if (!dmabuf_priv->dir) { + kfree(dmabuf_priv); + bp->dmabuf_data = NULL; + netdev_dbg(bp->dev, "release dmabuf data\n"); + } + + return 0; +} + +static int32_t bnxt_lfc_process_dmabuf_close(struct bnxt_lfc_dev *blfc_dev) +{ + struct bnxt *bp = blfc_dev->bp; + + return bnxt_eem_dmabuf_close(bp); +} + +static int bnxt_eem_dmabuf_reg(struct bnxt *bp) +{ + struct bnxt_eem_dmabuf_info *priv; + int i; + + if (bp->dmabuf_data) { + bnxt_eem_dmabuf_close(bp); + netdev_warn(bp->dev, "clear existed dmabuf\n"); + } + + priv = kzalloc(sizeof(*priv) * BNXT_EEM_NUM_DIR * MAX_TABLE, + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + bp->dmabuf_data = priv; + for (i = 0; i < BNXT_EEM_NUM_DIR * MAX_TABLE; i++) { + priv[i].dev = &bp->pdev->dev; + priv[i].type = MAX_TABLE; + } + + return 0; +} + +#ifdef HAVE_DMABUF_ATTACH_DEV +static int bnxt_dmabuf_attach(struct dma_buf *buf, struct device *dev, + struct dma_buf_attachment *attach) +#else +static int bnxt_dmabuf_attach(struct dma_buf *buf, + struct dma_buf_attachment *attach) +#endif +{ + return 0; +} + +static void bnxt_dmabuf_detach(struct dma_buf *buf, + struct dma_buf_attachment *attach) +{ +} + +static +struct sg_table *bnxt_dmabuf_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + return NULL; +} + +static void bnxt_dmabuf_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ +} + +static void bnxt_dmabuf_release(struct dma_buf *buf) +{ +} + +#if defined(HAVE_DMABUF_KMAP_ATOMIC) || defined(HAVE_DMABUF_KMAP) || \ + defined(HAVE_DMABUF_MAP_ATOMIC) || defined(HAVE_DMABUF_MAP) +static void *bnxt_dmabuf_kmap(struct dma_buf *buf, unsigned long page) +{ + return NULL; +} + +static void bnxt_dmabuf_kunmap(struct dma_buf *buf, unsigned long page, + void *vaddr) +{ +} +#endif + +static void bnxt_dmabuf_vm_open(struct vm_area_struct *vma) +{ +} + +static void bnxt_dmabuf_vm_close(struct vm_area_struct *vma) +{ +} + +static const struct vm_operations_struct dmabuf_vm_ops = { + .open = bnxt_dmabuf_vm_open, + .close = bnxt_dmabuf_vm_close, +}; + +static int bnxt_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma) +{ + pgprot_t prot = vm_get_page_prot(vma->vm_flags); + struct bnxt_eem_dmabuf_info *priv = buf->priv; + int page_block = vma->vm_pgoff / MAX_CTX_PAGES; + int page_no = vma->vm_pgoff % MAX_CTX_PAGES; + struct bnxt_ctx_pg_info *ctx_pg; + dma_addr_t pg_phys; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND; + vma->vm_ops = &dmabuf_vm_ops; + vma->vm_private_data = priv; + vma->vm_page_prot = pgprot_writecombine(prot); + + ctx_pg = priv->tbl->ctx_pg_tbl[page_block]; + pg_phys = ctx_pg->ctx_dma_arr[page_no]; + return remap_pfn_range(vma, vma->vm_start, pg_phys >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot); +} + +static void *bnxt_dmabuf_vmap(struct dma_buf *buf) +{ + return NULL; +} + +static void bnxt_dmabuf_vunmap(struct dma_buf *buf, void *vaddr) +{ +} + +static const struct dma_buf_ops bnxt_dmabuf_ops = { + .attach = bnxt_dmabuf_attach, + .detach = bnxt_dmabuf_detach, + .map_dma_buf = bnxt_dmabuf_map_dma_buf, + .unmap_dma_buf = bnxt_dmabuf_unmap_dma_buf, + .release = bnxt_dmabuf_release, +#ifdef HAVE_DMABUF_KMAP_ATOMIC + .kmap_atomic = bnxt_dmabuf_kmap, + .kunmap_atomic = bnxt_dmabuf_kunmap, +#endif +#ifdef HAVE_DMABUF_KMAP + .kmap = bnxt_dmabuf_kmap, + .kunmap = bnxt_dmabuf_kunmap, +#endif +#ifdef HAVE_DMABUF_MAP_ATOMIC + .map_atomic = bnxt_dmabuf_kmap, + .unmap_atomic = bnxt_dmabuf_kunmap, +#endif +#ifdef HAVE_DMABUF_MAP + .map = bnxt_dmabuf_kmap, + .unmap = bnxt_dmabuf_kunmap, +#endif + .mmap = bnxt_dmabuf_mmap, + .vmap = bnxt_dmabuf_vmap, + .vunmap = bnxt_dmabuf_vunmap, +}; + +static int bnxt_dmabuf_page_map(struct bnxt *bp, int dir, int tbl_type, + struct bnxt_eem_dmabuf_info *priv) +{ + struct bnxt_ctx_pg_info *pg_tbl = NULL; + struct bnxt_ctx_pg_info *ctx_pg; + int page_no = 0; + int ctx_block; + int i, j; + + priv->tbl = &bp->eem_info->eem_ctx_info[dir].eem_tables[tbl_type]; + ctx_pg = &bp->eem_info->eem_ctx_info[dir].eem_tables[tbl_type]; + + if (!ctx_pg->nr_pages) + return -EINVAL; + + priv->pg_count = ctx_pg->nr_pages; + priv->pages = kcalloc(priv->pg_count, sizeof(*priv->pages), GFP_KERNEL); + if (!priv->pages) + return -ENOMEM; + + ctx_block = priv->pg_count / MAX_CTX_PAGES; + for (i = 0; i < ctx_block; i++) { + pg_tbl = ctx_pg->ctx_pg_tbl[i]; + if (pg_tbl) { + for (j = 0; j < MAX_CTX_PAGES; j++) { + priv->pages[page_no] = + virt_to_page(pg_tbl->ctx_pg_arr[j]); + page_no++; + if (page_no == priv->pg_count) + break; + } + } + } + + if (sg_alloc_table_from_pages(&priv->sg, priv->pages, priv->pg_count, + 0, (priv->pg_count << PAGE_SHIFT), + GFP_KERNEL)) { + kfree(priv->pages); + return -ENOMEM; + } + + if (!dma_map_sg(priv->dev, priv->sg.sgl, + priv->sg.nents, DMA_BIDIRECTIONAL)) { + sg_free_table(&priv->sg); + kfree(priv->pages); + return -ENOMEM; + } + + return 0; +} + +static int bnxt_dmabuf_export(struct bnxt_eem_dmabuf_info *priv) +{ + int rc = 0; + + priv->buf = dma_buf_export_attr(priv, priv->pg_count, &bnxt_dmabuf_ops); + if (!priv->buf) + rc = -EBUSY; + + if (IS_ERR(priv->buf)) + rc = PTR_ERR(priv->buf); + + return rc; +} + +static int bnxt_eem_dmabuf_export(struct bnxt *bp, u32 flags) +{ + struct bnxt_eem_dmabuf_info *dmabuf_priv; + struct bnxt_eem_dmabuf_info *priv; + int tbl_type; + int dir; + int rc; + + dmabuf_priv = bp->dmabuf_data; + for (dir = BNXT_EEM_DIR_TX; dir < BNXT_EEM_NUM_DIR; dir++) { + for (tbl_type = KEY0_TABLE; tbl_type < MAX_TABLE; tbl_type++) { + if (tbl_type == EFC_TABLE) + continue; + + priv = dmabuf_priv + (dir * MAX_TABLE + tbl_type); + netdev_dbg(bp->dev, + "export: dir %d, tbl:%d\n", dir, tbl_type); + + if (priv->type != MAX_TABLE && priv->fd) { + rc = -EIO; + return rc; + } + + rc = bnxt_dmabuf_page_map(bp, dir, tbl_type, priv); + if (rc) + return rc; + + rc = bnxt_dmabuf_export(priv); + if (rc) + return rc; + + priv->type = tbl_type; + } + dmabuf_priv->dir |= (1 << dir); + } + return 0; +} + +static int bnxt_eem_dmabuf_fd(struct bnxt *bp) +{ + struct bnxt_eem_dmabuf_info *priv; + int fd; + int i; + + if (!bp->dmabuf_data) { + netdev_err(bp->dev, "Failed to export dmabuf file"); + return -EINVAL; + } + + for (priv = bp->dmabuf_data, i = 0; + i < BNXT_EEM_NUM_DIR * MAX_TABLE; i++, priv++) { + if (!priv->buf) { + priv->fd = 0; + continue; + } + + get_dma_buf(priv->buf); + fd = dma_buf_fd(priv->buf, O_ACCMODE); + if (fd < 0) { + dma_buf_put(priv->buf); + return -EINVAL; + } + priv->fd = fd; + } + + return 0; +} + +int bnxt_eem_dmabuf_export_fd(struct bnxt *bp) +{ + int rc; + + rc = bnxt_eem_dmabuf_reg(bp); + if (rc) { + netdev_err(bp->dev, "dmabuf private data initial failed\n"); + return rc; + } + + rc = bnxt_eem_dmabuf_export(bp, O_ACCMODE); + if (rc) { + netdev_err(bp->dev, "prepare dmabuf fail.\n"); + goto clean; + } + + rc = bnxt_eem_dmabuf_fd(bp); + if (rc) { + netdev_err(bp->dev, "dmabuf fd fail.\n"); + goto clean; + } + + return rc; +clean: + bnxt_eem_dmabuf_close(bp); + return rc; +} + +static +int32_t bnxt_lfc_process_dmabuf_export(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_req *lfc_req) +{ + struct bnxt_lfc_dmabuf_fd dmabuf_fd; + struct bnxt_eem_dmabuf_info *priv; + struct bnxt *bp = blfc_dev->bp; + int32_t rc; + int i; + + rc = bnxt_eem_dmabuf_export_fd(bp); + if (rc) + return rc; + + for (priv = bp->dmabuf_data, i = 0; + i < BNXT_LFC_EEM_MAX_TABLE; i++, priv++) + dmabuf_fd.fd[i] = priv->fd; + + rc = copy_to_user(lfc_req->req.eem_dmabuf_export_fd_req.dma_fd, + &dmabuf_fd, sizeof(dmabuf_fd)); + if (rc) + rc = -EFAULT; + + return rc; +} + +static int32_t bnxt_lfc_process_req(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_req *lfc_req) +{ + int32_t rc; + + switch (lfc_req->hdr.req_type) { + case BNXT_LFC_NVM_GET_VAR_REQ: + rc = bnxt_lfc_process_nvm_get_var_req(blfc_dev, + &lfc_req->req.nvm_get_var_req); + break; + case BNXT_LFC_NVM_SET_VAR_REQ: + rc = bnxt_lfc_process_nvm_set_var_req(blfc_dev, + &lfc_req->req.nvm_set_var_req); + break; + case BNXT_LFC_NVM_FLUSH_REQ: + rc = bnxt_lfc_process_nvm_flush(blfc_dev); + break; + case BNXT_LFC_GENERIC_HWRM_REQ: + rc = bnxt_lfc_process_hwrm(blfc_dev, lfc_req); + break; + case BNXT_LFC_DMABUF_CLOSE_REQ: + rc = bnxt_lfc_process_dmabuf_close(blfc_dev); + break; + case BNXT_LFC_EEM_DMABUF_EXPORT_FD_REQ: + rc = bnxt_lfc_process_dmabuf_export(blfc_dev, lfc_req); + break; + default: + BNXT_LFC_DEBUG(&blfc_dev->pdev->dev, + "No valid request found\n"); + return -EINVAL; + } + return rc; +} + +static int32_t bnxt_lfc_open(struct inode *inode, struct file *flip) +{ + BNXT_LFC_DEBUG(NULL, "open is called"); + return 0; +} + +static ssize_t bnxt_lfc_read(struct file *filp, char __user *buff, + size_t length, loff_t *offset) +{ + return -EINVAL; +} + +static ssize_t bnxt_lfc_write(struct file *filp, const char __user *ubuff, + size_t len, loff_t *offset) +{ + struct bnxt_lfc_generic_msg kbuff; + + if (len != sizeof(kbuff)) { + BNXT_LFC_ERR(NULL, "Invalid length provided (%zu)\n", len); + return -EINVAL; + } + + if (copy_from_user(&kbuff, (void __user *)ubuff, len)) { + BNXT_LFC_ERR(NULL, "Failed to copy data from user application\n"); + return -EFAULT; + } + + switch (kbuff.key) { + case BNXT_LFC_KEY_DOMAIN_NO: + is_domain_available = true; + domain_no = kbuff.value; + break; + default: + BNXT_LFC_ERR(NULL, "Invalid Key provided (%u)\n", kbuff.key); + return -EINVAL; + } + return len; +} + +static loff_t bnxt_lfc_seek(struct file *filp, loff_t offset, int32_t whence) +{ + return -EINVAL; +} + +static void bnxt_lfc_stop(void *handle) +{ + struct bnxt_lfc_dev *blfc_dev_stop = handle; + struct bnxt_lfc_dev *blfc_dev = NULL; + int i; + + for (i = 0; i < MAX_LFC_OPEN_ULP_DEVICES; i++) { + blfc_dev = blfc_array[i].bnxt_lfc_dev; + if(blfc_dev == blfc_dev_stop) { + bnxt_lfc_unreg(blfc_dev); + dev_put(blfc_dev->ndev); + kfree(blfc_dev); + blfc_array[i].bnxt_lfc_dev = NULL; + blfc_array[i].taken = 0; + break; + } + } +} + +static long bnxt_lfc_ioctl(struct file *flip, unsigned int cmd, + unsigned long args) +{ + int32_t rc; + struct bnxt_lfc_req temp; + struct bnxt_lfc_req *lfc_req; + u32 index; + struct bnxt_lfc_dev *blfc_dev = NULL; + + rc = copy_from_user(&temp, (void __user *)args, sizeof(struct bnxt_lfc_req)); + if (rc) { + BNXT_LFC_ERR(NULL, + "Failed to send %d bytes from the user\n", rc); + return -EINVAL; + } + lfc_req = &temp; + + if (!lfc_req) + return -EINVAL; + + switch (cmd) { + case BNXT_LFC_REQ: + BNXT_LFC_DEBUG(NULL, "BNXT_LFC_REQ called"); + mutex_lock(&blfc_global_dev.bnxt_lfc_lock); + index = bnxt_lfc_get_hash_key(lfc_req->hdr.bus, lfc_req->hdr.devfn); + if (blfc_array[index].taken) { + if (lfc_req->hdr.devfn != blfc_array[index].bnxt_lfc_dev->devfn || + lfc_req->hdr.bus != blfc_array[index].bnxt_lfc_dev->bus) { + /* we have a false hit. Free the older blfc device + store the new one */ + rtnl_lock(); + bnxt_lfc_unreg(blfc_array[index].bnxt_lfc_dev); + dev_put(blfc_array[index].bnxt_lfc_dev->ndev); + kfree(blfc_array[index].bnxt_lfc_dev); + blfc_array[index].bnxt_lfc_dev = NULL; + blfc_array[index].taken = 0; + rtnl_unlock(); + goto not_taken; + } + blfc_dev = blfc_array[index].bnxt_lfc_dev; + } + else { +not_taken: + blfc_dev = kzalloc(sizeof(struct bnxt_lfc_dev), GFP_KERNEL); + if (!blfc_dev) { + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + return -EINVAL; + } + blfc_dev->pdev = + pci_get_domain_bus_and_slot( + ((is_domain_available == true) ? + domain_no : 0), lfc_req->hdr.bus, + lfc_req->hdr.devfn); + + if (bnxt_lfc_is_valid_pdev(blfc_dev->pdev) != true) { + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + kfree(blfc_dev); + return -EINVAL; + } + + rtnl_lock(); + blfc_dev->ndev = pci_get_drvdata(blfc_dev->pdev); + if (!blfc_dev->ndev) { + printk("Driver with provided BDF doesn't exist\n"); + pci_dev_put(blfc_dev->pdev); + rtnl_unlock(); + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + kfree(blfc_dev); + return -EINVAL; + } + + dev_hold(blfc_dev->ndev); + rtnl_unlock(); + if (try_module_get(blfc_dev->pdev->driver->driver.owner)) { + blfc_dev->uops.ulp_stop = bnxt_lfc_stop; + rc = bnxt_lfc_probe_and_reg(blfc_dev); + module_put(blfc_dev->pdev->driver->driver.owner); + } else { + rc = -EINVAL; + } + pci_dev_put(blfc_dev->pdev); + + if (rc) { + dev_put(blfc_dev->ndev); + kfree(blfc_dev); + is_domain_available = false; + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + return -EINVAL; + } + + blfc_dev->bus = lfc_req->hdr.bus; + blfc_dev->devfn = lfc_req->hdr.devfn; + rtnl_lock(); + blfc_array[index].bnxt_lfc_dev = blfc_dev; + blfc_array[index].taken = 1; + rtnl_unlock(); + } + + rc = bnxt_lfc_process_req(blfc_dev, lfc_req); + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + break; + + default: + BNXT_LFC_ERR(NULL, "No Valid IOCTL found\n"); + return -EINVAL; + +} + return rc; +} + +static int32_t bnxt_lfc_release(struct inode *inode, struct file *filp) +{ + BNXT_LFC_DEBUG(NULL, "release is called"); + return 0; +} + +int32_t __init bnxt_lfc_init(void) +{ + int32_t rc; + + rc = alloc_chrdev_region(&blfc_global_dev.d_dev, 0, 1, BNXT_LFC_DEV_NAME); + if (rc < 0) { + BNXT_LFC_ERR(NULL, "Allocation of char dev region is failed\n"); + return rc; + } + + blfc_global_dev.d_class = class_create(THIS_MODULE, BNXT_LFC_DEV_NAME); + if (IS_ERR(blfc_global_dev.d_class)) { + BNXT_LFC_ERR(NULL, "Class creation is failed\n"); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); + return -1; + } + + if (IS_ERR(device_create(blfc_global_dev.d_class, NULL, blfc_global_dev.d_dev, NULL, + BNXT_LFC_DEV_NAME))) { + BNXT_LFC_ERR(NULL, "Device creation is failed\n"); + class_destroy(blfc_global_dev.d_class); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); + return -1; + } + + blfc_global_dev.fops.owner = THIS_MODULE; + blfc_global_dev.fops.open = bnxt_lfc_open; + blfc_global_dev.fops.read = bnxt_lfc_read; + blfc_global_dev.fops.write = bnxt_lfc_write; + blfc_global_dev.fops.llseek = bnxt_lfc_seek; + blfc_global_dev.fops.unlocked_ioctl = bnxt_lfc_ioctl; + blfc_global_dev.fops.release = bnxt_lfc_release; + + cdev_init(&blfc_global_dev.c_dev, &blfc_global_dev.fops); + if (cdev_add(&blfc_global_dev.c_dev, blfc_global_dev.d_dev, 1) == -1) { + BNXT_LFC_ERR(NULL, "Char device addition is failed\n"); + device_destroy(blfc_global_dev.d_class, blfc_global_dev.d_dev); + class_destroy(blfc_global_dev.d_class); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); + return -1; + } + mutex_init(&blfc_global_dev.bnxt_lfc_lock); + bnxt_lfc_inited = true; + + memset(blfc_array, 0, sizeof(struct bnxt_lfc_dev_array) + * MAX_LFC_OPEN_ULP_DEVICES); + + rc = bnxt_en_register_netdevice_notifier(&lfc_device_notifier); + if (rc) { + BNXT_LFC_ERR(NULL, "Error on register NETDEV event notifier\n"); + return -1; + } + return 0; +} + +void __exit bnxt_lfc_exit(void) +{ + struct bnxt_lfc_dev *blfc_dev; + u32 i; + if (!bnxt_lfc_inited) + return; + for (i = 0; i < MAX_LFC_OPEN_ULP_DEVICES; i++) { + mutex_lock(&blfc_global_dev.bnxt_lfc_lock); + blfc_dev = blfc_array[i].bnxt_lfc_dev; + if (blfc_dev) { + rtnl_lock(); + bnxt_lfc_unreg(blfc_dev); + blfc_array[i].bnxt_lfc_dev = NULL; + blfc_array[i].taken = 0; + rtnl_unlock(); + dev_put(blfc_dev->ndev); + kfree(blfc_dev); + } + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + } + bnxt_en_unregister_netdevice_notifier(&lfc_device_notifier); + cdev_del(&blfc_global_dev.c_dev); + device_destroy(blfc_global_dev.d_class, blfc_global_dev.d_dev); + class_destroy(blfc_global_dev.d_class); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); +} +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc.h new file mode 100644 index 000000000000..706b551db772 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc.h @@ -0,0 +1,110 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_LFC_H +#define BNXT_LFC_H + +#ifdef CONFIG_BNXT_LFC + +/* Assuming that no HWRM command requires more than 10 DMA address + * as input requests. + */ +#define MAX_NUM_DMA_INDICATIONS 10 +#define MAX_DMA_MEM_SIZE 0x10000 /*64K*/ + +/* To prevent mismatch between bnxtnvm user application and bnxt_lfc + * keeping the max. size as 512. + */ +#define BNXT_LFC_MAX_HWRM_REQ_LENGTH HWRM_MAX_REQ_LEN +#define BNXT_LFC_MAX_HWRM_RESP_LENGTH (512) + +#define BNXT_NVM_FLUSH_TIMEOUT ((DFLT_HWRM_CMD_TIMEOUT) * 100) +#define BNXT_LFC_DEV_NAME "bnxt_lfc" +#define DRV_NAME BNXT_LFC_DEV_NAME + +#define BNXT_LFC_ERR(dev, fmt, arg...) \ + dev_err(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +#define BNXT_LFC_WARN(dev, fmt, arg...) \ + dev_warn(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +#define BNXT_LFC_INFO(dev, fmt, arg...) \ + dev_info(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +#define BNXT_LFC_DEBUG(dev, fmt, arg...) \ + dev_dbg(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +struct bnxt_lfc_dev_array { + u32 taken; + struct bnxt_lfc_dev *bnxt_lfc_dev; +}; + +struct bnxt_lfc_dev { + struct pci_dev *pdev; + struct net_device *ndev; + + struct bnxt *bp; + struct bnxt_en_dev *en_dev; + + struct bnxt_ulp_ops uops; + u32 bus; + u32 devfn; + + /* dma_virt_addr to hold the virtual address + * of the DMA memory. + */ + void *dma_virt_addr[MAX_NUM_DMA_INDICATIONS]; + /* dma_addr to hold the DMA addresses*/ + dma_addr_t dma_addr[MAX_NUM_DMA_INDICATIONS]; +}; + +struct bnxt_gloabl_dev { + dev_t d_dev; + struct class *d_class; + struct cdev c_dev; + + struct file_operations fops; + + struct mutex bnxt_lfc_lock; +}; + +struct bnxt_eem_dmabuf_info { + int fd; + u32 dir; + int type; + int pg_count; + struct dma_buf *buf; + struct sg_table sg; + struct device *dev; + struct page **pages; + struct bnxt_ctx_pg_info *tbl; +}; + +int32_t bnxt_lfc_init(void); +void bnxt_lfc_exit(void); + +#else + +static inline int32_t bnxt_lfc_init() +{ +} + +static inline void bnxt_lfc_exit() +{ +} +#endif +#endif /*BNXT_LFC_H*/ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc_ioctl.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc_ioctl.h new file mode 100644 index 000000000000..5f1c6c73a69c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_lfc_ioctl.h @@ -0,0 +1,120 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_LFC_IOCTL_H +#define BNXT_LFC_IOCTL_H + +#define BNXT_LFC_IOCTL_MAGIC 0x98 +#define BNXT_LFC_VER 1 + +enum bnxt_lfc_req_type { + BNXT_LFC_NVM_GET_VAR_REQ = 1, + BNXT_LFC_NVM_SET_VAR_REQ, + BNXT_LFC_NVM_FLUSH_REQ, + BNXT_LFC_GENERIC_HWRM_REQ, + BNXT_LFC_EEM_DMABUF_EXPORT_FD_REQ, + BNXT_LFC_DMABUF_CLOSE_REQ, +}; + +struct bnxt_lfc_req_hdr { + uint32_t ver; + uint32_t bus; + uint32_t devfn; + enum bnxt_lfc_req_type req_type; +}; + +struct bnxt_lfc_nvm_get_var_req { + uint16_t option_num; + uint16_t dimensions; + uint16_t index_0; + uint16_t index_1; + uint16_t index_2; + uint16_t index_3; + uint16_t len_in_bits; + uint8_t __user *out_val; +}; + +struct bnxt_lfc_nvm_set_var_req { + uint16_t option_num; + uint16_t dimensions; + uint16_t index_0; + uint16_t index_1; + uint16_t index_2; + uint16_t index_3; + uint16_t len_in_bits; + uint8_t __user *in_val; +}; + +struct dma_info { + __u64 data; + /* Based on read_or_write parameter + * LFC will either fill or read the + * data to or from the user memory + */ + __u32 length; + /* Length of the data for read/write */ + __u16 offset; + /* Offset at which HWRM input structure needs DMA address*/ + __u8 read_or_write; + /* It should be 0 for write and 1 for read */ + __u8 unused; +}; + +struct blfc_fw_msg { + __u64 usr_req; + /* HWRM input structure */ + __u64 usr_resp; + /* HWRM output structure */ + __u32 len_req; + /* HWRM input structure length*/ + __u32 len_resp; + /* HWRM output structure length*/ + __u32 timeout; + /* HWRM command timeout. If 0 then + * LFC will provide default timeout + */ + __u32 num_dma_indications; + /* Number of DMA addresses used in HWRM command */ + struct dma_info dma[0]; + /* User should allocate it with + * (sizeof(struct dma_info) * num_dma_indications) + */ +}; + + +struct bnxt_lfc_generic_msg { + __u8 key; + #define BNXT_LFC_KEY_DOMAIN_NO 1 + __u8 reserved[3]; + __u32 value; +}; + +#define BNXT_LFC_EEM_MAX_TABLE 8 + +struct bnxt_lfc_dmabuf_fd { + int fd[BNXT_LFC_EEM_MAX_TABLE]; +}; + +struct bnxt_lfc_eem_dmabuf_export_fd_req { + void __user *dma_fd; +}; + +struct bnxt_lfc_req { + struct bnxt_lfc_req_hdr hdr; + union { + struct bnxt_lfc_nvm_get_var_req nvm_get_var_req; + struct bnxt_lfc_nvm_set_var_req nvm_set_var_req; + struct bnxt_lfc_eem_dmabuf_export_fd_req + eem_dmabuf_export_fd_req; + __u64 hreq; /* Pointer to "struct blfc_fw_msg" */ + } req; +}; + +#define BNXT_LFC_REQ _IOW(BNXT_LFC_IOCTL_MAGIC, 1, struct bnxt_lfc_req) +#endif /*BNXT_LFC_IOCTL_H*/ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_netlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_netlink.c new file mode 100644 index 000000000000..8679046a1d67 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_netlink.c @@ -0,0 +1,232 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include <linux/netdevice.h> +#include <linux/pci.h> +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt_netlink.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" + +/* attribute policy */ +static struct nla_policy bnxt_netlink_policy[BNXT_NUM_ATTRS] = { + [BNXT_ATTR_PID] = { .type = NLA_U32 }, + [BNXT_ATTR_IF_INDEX] = { .type = NLA_U32 }, + [BNXT_ATTR_REQUEST] = { .type = NLA_BINARY }, + [BNXT_ATTR_RESPONSE] = { .type = NLA_BINARY }, +}; + +static struct genl_family bnxt_netlink_family; + +static int bnxt_parse_attrs(struct nlattr **a, struct bnxt **bp, + struct net_device **dev) +{ + pid_t pid; + struct net *ns = NULL; + const char *drivername; + + if (!a[BNXT_ATTR_PID]) { + netdev_err(*dev, "No process ID specified\n"); + goto err_inval; + } + pid = nla_get_u32(a[BNXT_ATTR_PID]); + ns = get_net_ns_by_pid(pid); + if (IS_ERR(ns)) { + netdev_err(*dev, "Invalid net namespace for pid %d (err: %ld)\n", + pid, PTR_ERR(ns)); + goto err_inval; + } + + if (!a[BNXT_ATTR_IF_INDEX]) { + netdev_err(*dev, "No interface index specified\n"); + goto err_inval; + } + *dev = dev_get_by_index(ns, nla_get_u32(a[BNXT_ATTR_IF_INDEX])); + + put_net(ns); + ns = NULL; + if (!*dev) { + netdev_err(*dev, "Invalid network interface index %d (err: %ld)\n", + nla_get_u32(a[BNXT_ATTR_IF_INDEX]), PTR_ERR(ns)); + goto err_inval; + } + if (!(*dev)->dev.parent || !(*dev)->dev.parent->driver || + !(*dev)->dev.parent->driver->name) { + netdev_err(*dev, "Unable to get driver name for device %s\n", + (*dev)->name); + goto err_inval; + } + drivername = (*dev)->dev.parent->driver->name; + if (strcmp(drivername, DRV_MODULE_NAME)) { + netdev_err(*dev, "Device %s (%s) is not a %s device!\n", + (*dev)->name, drivername, DRV_MODULE_NAME); + goto err_inval; + } + *bp = netdev_priv(*dev); + if (!*bp) { + netdev_warn((*bp)->dev, "No private data\n"); + goto err_inval; + } + + return 0; + +err_inval: + if (ns && !IS_ERR(ns)) + put_net(ns); + return -EINVAL; +} + +/* handler */ +static int bnxt_netlink_hwrm(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **a = info->attrs; + struct net_device *dev = NULL; + struct sk_buff *reply = NULL; + struct input *req, *nl_req; + struct bnxt *bp = NULL; + struct output *resp; + int len, rc; + void *hdr; + + rc = bnxt_parse_attrs(a, &bp, &dev); + if (rc) + goto err_rc; + + if (!bp) { + rc = -EINVAL; + goto err_rc; + } + + if (!bp->hwrm_dma_pool) { + netdev_warn(bp->dev, "HWRM interface not currently available on %s\n", + dev->name); + rc = -EINVAL; + goto err_rc; + } + + if (!a[BNXT_ATTR_REQUEST]) { + netdev_warn(bp->dev, "No request specified\n"); + rc = -EINVAL; + goto err_rc; + } + len = nla_len(a[BNXT_ATTR_REQUEST]); + nl_req = nla_data(a[BNXT_ATTR_REQUEST]); + + reply = genlmsg_new(PAGE_SIZE, GFP_KERNEL); + if (!reply) { + netdev_warn(bp->dev, "Error: genlmsg_new failed\n"); + rc = -ENOMEM; + goto err_rc; + } + + rc = hwrm_req_init(bp, req, nl_req->req_type); + if (rc) + goto err_rc; + + rc = hwrm_req_replace(bp, req, nl_req, len); + if (rc) + goto err_rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) { + /* + * Indicate success for the hwrm transport, while letting + * the hwrm error be passed back to the netlink caller in + * the response message. Caller is responsible for handling + * any errors. + * + * no kernel warnings are logged in this case. + */ + rc = 0; + } + hdr = genlmsg_put_reply(reply, info, &bnxt_netlink_family, 0, + BNXT_CMD_HWRM); + if (nla_put(reply, BNXT_ATTR_RESPONSE, resp->resp_len, resp)) { + netdev_warn(bp->dev, "No space for response attribte\n"); + hwrm_req_drop(bp, req); + rc = -ENOMEM; + goto err_rc; + } + genlmsg_end(reply, hdr); + hwrm_req_drop(bp, req); + + dev_put(dev); + dev = NULL; + + return genlmsg_reply(reply, info); + +err_rc: + if (reply && !IS_ERR(reply)) + kfree_skb(reply); + if (dev && !IS_ERR(dev)) + dev_put(dev); + + if (bp) + netdev_warn(bp->dev, "returning with error code %d\n", rc); + + return rc; +} + +/* handlers */ +#if defined(HAVE_GENL_REG_OPS_GRPS) || !defined(HAVE_GENL_REG_FAMILY_WITH_OPS) +static const struct genl_ops bnxt_netlink_ops[] = { +#else +static struct genl_ops bnxt_netlink_ops[] = { +#endif + { + .cmd = BNXT_CMD_HWRM, + .flags = GENL_ADMIN_PERM, /* Req's CAP_NET_ADMIN privilege */ +#ifndef HAVE_GENL_POLICY + .policy = bnxt_netlink_policy, +#endif + .doit = bnxt_netlink_hwrm, + .dumpit = NULL, + }, +}; + +/* family definition */ +static struct genl_family bnxt_netlink_family = { +#ifdef HAVE_GENL_ID_GENERATE + .id = GENL_ID_GENERATE, +#endif + .hdrsize = 0, + .name = BNXT_NL_NAME, + .version = BNXT_NL_VER, + .maxattr = BNXT_NUM_ATTRS, +#ifdef HAVE_GENL_POLICY + .policy = bnxt_netlink_policy, +#endif +#ifndef HAVE_GENL_REG_FAMILY_WITH_OPS + .ops = bnxt_netlink_ops, + .n_ops = ARRAY_SIZE(bnxt_netlink_ops) +#endif +}; + +int bnxt_nl_register(void) +{ +#ifndef HAVE_GENL_REG_FAMILY_WITH_OPS + return genl_register_family(&bnxt_netlink_family); +#elif defined(HAVE_GENL_REG_OPS_GRPS) + return genl_register_family_with_ops(&bnxt_netlink_family, + bnxt_netlink_ops); +#else + return genl_register_family_with_ops(&bnxt_netlink_family, + bnxt_netlink_ops, + ARRAY_SIZE(bnxt_netlink_ops)); +#endif + + return 0; +} + +int bnxt_nl_unregister(void) +{ + return genl_unregister_family(&bnxt_netlink_family); +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_netlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_netlink.h new file mode 100644 index 000000000000..6939cd4c8e71 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_netlink.h @@ -0,0 +1,41 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef __BNXT_NETLINK_H__ +#define __BNXT_NETLINK_H__ + +#include <net/genetlink.h> +#include <net/netlink.h> + +/* commands */ +enum { + BNXT_CMD_UNSPEC, + BNXT_CMD_HWRM, + BNXT_NUM_CMDS +}; +#define BNXT_CMD_MAX (BNXT_NUM_CMDS - 1) + +/* attributes */ +enum { + BNXT_ATTR_UNSPEC, + BNXT_ATTR_PID, + BNXT_ATTR_IF_INDEX, + BNXT_ATTR_REQUEST, + BNXT_ATTR_RESPONSE, + BNXT_NUM_ATTRS +}; +#define BNXT_ATTR_MAX (BNXT_NUM_ATTRS - 1) + +#define BNXT_NL_NAME "bnxt_netlink" +#define BNXT_NL_VER 1 + +int bnxt_nl_register(void); +int bnxt_nl_unregister(void); + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 83444811d3c6..6a1ba9bd9b77 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c new file mode 100644 index 000000000000..6663353b0bc5 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -0,0 +1,645 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#ifdef HAVE_IEEE1588_SUPPORT +#include <linux/ptp_clock_kernel.h> +#include <linux/net_tstamp.h> +#include <linux/timecounter.h> +#include <linux/timekeeping.h> +#endif +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ptp.h" + +#ifdef HAVE_IEEE1588_SUPPORT +static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + u64 ns = timespec64_to_ns(ts); + + timecounter_init(&ptp->tc, &ptp->cc, ns); + return 0; +} + +static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, + struct ptp_system_timestamp *sts) +{ + struct hwrm_port_ts_query_output *resp; + struct hwrm_port_ts_query_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY); + if (rc) + return rc; + + req->flags = cpu_to_le32(flags); + resp = hwrm_req_hold(bp, req); + if (sts) + hwrm_req_capture_ts(bp, req, sts); + + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + *ts = le64_to_cpu(resp->ptp_msg_ts); + hwrm_req_drop(bp, req); + return 0; +} + +int bnxt_ptp_get_current_time(struct bnxt *bp) +{ + u32 flags = PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + ptp->old_time = ptp->current_time; + return bnxt_hwrm_port_ts_query(bp, flags, &ptp->current_time, NULL); +} + +#ifdef HAVE_PTP_GETTIMEX64 +static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + u32 flags = PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME; + u64 ns, cycles; + int rc; + + rc = bnxt_hwrm_port_ts_query(ptp->bp, flags, &cycles, sts); + if (rc) + return rc; + + ns = timecounter_cyc2time(&ptp->tc, cycles); + *ts = ns_to_timespec64(ns); + + return 0; +} +#else +static int bnxt_ptp_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + u64 ns; + + ns = timecounter_read(&ptp->tc); + *ts = ns_to_timespec64(ns); + return 0; +} +#endif +static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + + timecounter_adjtime(&ptp->tc, delta); + return 0; +} + +static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + s32 period, period1, period2, dif, dif1, dif2; + s32 step, best_step = 0, best_period = 0; + s32 best_dif = BNXT_MAX_PHC_DRIFT; + u32 drift_sign = 1; + + if (ptp->bp->flags & BNXT_FLAG_CHIP_P5) { + struct hwrm_port_mac_cfg_input *req; + int rc; + + rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = ppb; + req->enables = + cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(ptp->bp, req); + if (rc) + netdev_err(ptp->bp->dev, + "ptp adjfreq failed. rc = %d\n", rc); + return rc; + } + + /* Frequency adjustment requires programming 3 values: + * 1-bit direction + * 5-bit adjustment step in 1 ns unit + * 24-bit period in 1 us unit between adjustments + */ + if (ppb < 0) { + ppb = -ppb; + drift_sign = 0; + } + + if (ppb == 0) { + /* no adjustment */ + best_step = 0; + best_period = 0xFFFFFF; + } else if (ppb >= BNXT_MAX_PHC_DRIFT) { + /* max possible adjustment */ + best_step = 31; + best_period = 1; + } else { + /* Find the best possible adjustment step and period */ + for (step = 0; step <= 31; step++) { + period1 = step * 1000000 / ppb; + period2 = period1 + 1; + if (period1 != 0) + dif1 = ppb - (step * 1000000 / period1); + else + dif1 = BNXT_MAX_PHC_DRIFT; + if (dif1 < 0) + dif1 = -dif1; + dif2 = ppb - (step * 1000000 / period2); + if (dif2 < 0) + dif2 = -dif2; + dif = (dif1 < dif2) ? dif1 : dif2; + period = (dif1 < dif2) ? period1 : period2; + if (dif < best_dif) { + best_dif = dif; + best_step = step; + best_period = period; + } + } + } + writel((drift_sign << BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_SFT) | + (best_step << BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_SFT) | + (best_period & BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_MSK), + ptp->bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME_ADJ); + + return 0; +} + +static int bnxt_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -ENOTSUPP; +} + +static void bnxt_clr_rx_ts(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_pf_info *pf = &bp->pf; + u16 port_id; + int i = 0; + u32 fifo; + + if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) + return; + + port_id = pf->port_id; + fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]); + while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < 10)) { + writel(1 << port_id, bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); + fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]); + i++; + } +} + +static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct hwrm_port_mac_cfg_input *req; + u32 flags = 0; + int rc; + + if (!ptp) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + if (ptp->rx_filter) + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + else + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + if (ptp->tx_tstamp_en) + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + else + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); + + return hwrm_req_send(bp, req); +} + +int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwtstamp_config stmpconf; + struct bnxt_ptp_cfg *ptp; + u16 old_rxctl, new_rxctl; + int old_rx_filter, rc; + u8 old_tx_tstamp_en; + + ptp = bp->ptp_cfg; + if (!ptp) + return -EOPNOTSUPP; + + if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) + return -EFAULT; + + if (stmpconf.flags) + return -EINVAL; + + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) + return -ERANGE; + + old_rx_filter = ptp->rx_filter; + old_rxctl = ptp->rxctl; + old_tx_tstamp_en = ptp->tx_tstamp_en; + switch (stmpconf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + new_rxctl = 0; + ptp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + new_rxctl = BNXT_PTP_MSG_EVENTS; + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + new_rxctl = BNXT_PTP_MSG_SYNC; + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + break; + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + new_rxctl = BNXT_PTP_MSG_DELAY_REQ; + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + break; + default: + return -ERANGE; + } + + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + ptp->tx_tstamp_en = 1; + else + ptp->tx_tstamp_en = 0; + + if (!old_rxctl && new_rxctl) { + rc = bnxt_hwrm_ptp_cfg(bp); + if (rc) + goto ts_set_err; + ptp->rxctl = new_rxctl; + bnxt_clr_rx_ts(bp); + } + + rc = bnxt_hwrm_ptp_cfg(bp); + if (rc) + goto ts_set_err; + + stmpconf.rx_filter = ptp->rx_filter; + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? + -EFAULT : 0; + +ts_set_err: + ptp->rx_filter = old_rx_filter; + ptp->rxctl = old_rxctl; + ptp->tx_tstamp_en = old_tx_tstamp_en; + return rc; +} + +int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwtstamp_config stmpconf; + struct bnxt_ptp_cfg *ptp; + + ptp = bp->ptp_cfg; + if (!ptp) + return -EOPNOTSUPP; + + stmpconf.flags = 0; + stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + + stmpconf.rx_filter = ptp->rx_filter; + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? + -EFAULT : 0; +} + +static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win) +{ + u32 reg_base = *reg_arr & 0xfffff000; + u32 win_off; + int i; + + for (i = 0; i < count; i++) { + if ((reg_arr[i] & 0xfffff000) != reg_base) + return -ERANGE; + } + win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; + writel(reg_base, bp->bar0 + win_off); + return 0; +} + +static int bnxt_map_ptp_regs(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 *reg_arr, reg_base; + int rc, i; + + reg_arr = ptp->rx_regs; + rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); + if (rc) + return rc; + + reg_arr = ptp->tx_regs; + rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); + if (rc) + return rc; + + reg_base = ptp->rx_regs[BNXT_PTP_RX_TS_L] & 0xfffff000; + for (i = 0; i < BNXT_PTP_RX_REGS; i++) + ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); + + reg_base = ptp->tx_regs[BNXT_PTP_TX_TS_L] & 0xfffff000; + for (i = 0; i < BNXT_PTP_TX_REGS; i++) + ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); + + return 0; +} + +static void bnxt_unmap_ptp_regs(struct bnxt *bp) +{ + writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); + writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); +} + +static u64 bnxt_cc_read(const struct cyclecounter *cc) +{ + struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc); + struct bnxt *bp = ptp->bp; + u64 ns; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + u32 flags = PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME; + int rc; + + rc = bnxt_hwrm_port_ts_query(bp, flags, &ns, NULL); + if (rc) + netdev_err(bp->dev, "TS query for cc_read failed rc = %x\n", + rc); + } else { + ns = readl(bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME); + ns |= (u64)readl(bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME + 4) << 32; + } + + return ns; +} + +int bnxt_get_tx_ts(struct bnxt *bp, u64 *ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 fifo; + + fifo = readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]); + if (fifo & BNXT_PTP_TX_FIFO_EMPTY) + return -EAGAIN; + + fifo = readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]); + *ts = readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]); + *ts |= (u64)readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H]) << + 32; + readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); + return 0; +} + +int bnxt_get_rx_ts(struct bnxt *bp, u64 *ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_pf_info *pf = &bp->pf; + u16 port_id; + u32 fifo; + + if (!ptp) + return -ENODEV; + + fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]); + if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) + return -EAGAIN; + + port_id = pf->port_id; + writel(1 << port_id, bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); + + fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]); + if (fifo & BNXT_PTP_RX_FIFO_PENDING) { + bnxt_clr_rx_ts(bp); + return -EBUSY; + } + + *ts = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]); + *ts |= (u64)readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H]) << + 32; + + return 0; +} + +void bnxt_ptp_ts_task(struct work_struct *work) +{ + struct bnxt_ptp_cfg *ptp = container_of(work, struct bnxt_ptp_cfg, + ptp_ts_task); + u32 flags = PORT_TS_QUERY_REQ_FLAGS_PATH_TX; + struct skb_shared_hwtstamps timestamp; + struct bnxt *bp = ptp->bp; + u64 ts = 0, ns = 0; + int rc; + + if (!ptp->tx_skb) + return; + + rc = bnxt_hwrm_port_ts_query(bp, flags, &ts, NULL); + if (rc) { + netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n", + rc); + } else { + memset(×tamp, 0, sizeof(timestamp)); + ns = timecounter_cyc2time(&ptp->tc, ts); + timestamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(ptp->tx_skb, ×tamp); + } + + dev_kfree_skb_any(ptp->tx_skb); + ptp->tx_skb = NULL; + atomic_inc(&bp->ptp_cfg->tx_avail); +} + +int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp->tx_skb) { + netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n"); + return -EAGAIN; + } + ptp->tx_skb = skb; + schedule_work(&ptp->ptp_ts_task); + return 0; +} + +int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 time; + + time = READ_ONCE(ptp->old_time); + *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts; + if (pkt_ts < (time & BNXT_LO_TIMER_MASK)) + *ts += BNXT_LO_TIMER_MASK + 1; + + return 0; +} + +int bnxt_ptp_start(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int rc = 0; + + if (!ptp) + return 0; + + if (bp->flags & BNXT_FLAG_CHIP_P5) { + u32 flags = PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME; + int rc; + + rc = bnxt_hwrm_port_ts_query(bp, flags, &ptp->current_time, + NULL); + ptp->old_time = ptp->current_time; + } + return rc; +} + +static const struct ptp_clock_info bnxt_ptp_caps = { + .owner = THIS_MODULE, + .name = "bnxt clock", + .max_adj = BNXT_MAX_PHC_DRIFT, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 1, + .n_pins = 0, + .pps = 0, + .adjfreq = bnxt_ptp_adjfreq, + .adjtime = bnxt_ptp_adjtime, +#ifdef HAVE_PTP_GETTIMEX64 + .gettimex64 = bnxt_ptp_gettimex, +#else + .gettime64 = bnxt_ptp_gettime, +#endif + .settime64 = bnxt_ptp_settime, + .enable = bnxt_ptp_enable, +}; + +int bnxt_ptp_init(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { + int rc; + + rc = bnxt_map_ptp_regs(bp); + if (rc) + return rc; + } + + atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); + + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = bnxt_cc_read; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; + + timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); + + ptp->ptp_info = bnxt_ptp_caps; + ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev); + if (IS_ERR(ptp->ptp_clock)) + ptp->ptp_clock = NULL; + + INIT_WORK(&ptp->ptp_ts_task, bnxt_ptp_ts_task); + return 0; +} + +void bnxt_ptp_free(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return; + + if (ptp->ptp_clock) + ptp_clock_unregister(ptp->ptp_clock); + + cancel_work_sync(&ptp->ptp_ts_task); + ptp->ptp_clock = NULL; + if (ptp->tx_skb) { + dev_kfree_skb_any(ptp->tx_skb); + ptp->tx_skb = NULL; + } + bnxt_unmap_ptp_regs(bp); +} + +#else + +int bnxt_ptp_get_current_time(struct bnxt *bp) +{ + return 0; +} + +int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +int bnxt_ptp_start(struct bnxt *bp) +{ + return 0; +} + +int bnxt_ptp_init(struct bnxt *bp) +{ + return 0; +} + +void bnxt_ptp_free(struct bnxt *bp) +{ + return; +} + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h new file mode 100644 index 000000000000..e9d4468428f4 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -0,0 +1,81 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_PTP_H +#define BNXT_PTP_H + +#define BNXT_MAX_PHC_DRIFT 31000000 +#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL +#define BNXT_HI_TIMER_MASK 0xffff00000000UL + +struct bnxt_ptp_cfg { +#ifdef HAVE_IEEE1588_SUPPORT + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + struct cyclecounter cc; + struct timecounter tc; + struct sk_buff *tx_skb; + u64 current_time; + u64 old_time; + struct work_struct ptp_ts_task; +#endif + struct bnxt *bp; + atomic_t tx_avail; +#define BNXT_MAX_TX_TS 1 + u16 rxctl; +#define BNXT_PTP_MSG_SYNC (1 << 0) +#define BNXT_PTP_MSG_DELAY_REQ (1 << 1) +#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2) +#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3) +#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8) +#define BNXT_PTP_MSG_DELAY_RESP (1 << 9) +#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10) +#define BNXT_PTP_MSG_ANNOUNCE (1 << 11) +#define BNXT_PTP_MSG_SIGNALING (1 << 12) +#define BNXT_PTP_MSG_MANAGEMENT (1 << 13) +#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \ + BNXT_PTP_MSG_DELAY_REQ | \ + BNXT_PTP_MSG_PDELAY_REQ | \ + BNXT_PTP_MSG_PDELAY_RESP) + u8 tx_tstamp_en:1; + int rx_filter; + +#define BNXT_PTP_RX_TS_L 0 +#define BNXT_PTP_RX_TS_H 1 +#define BNXT_PTP_RX_SEQ 2 +#define BNXT_PTP_RX_FIFO 3 +#define BNXT_PTP_RX_FIFO_PENDING 0x1 +#define BNXT_PTP_RX_FIFO_ADV 4 +#define BNXT_PTP_RX_REGS 5 + +#define BNXT_PTP_TX_TS_L 0 +#define BNXT_PTP_TX_TS_H 1 +#define BNXT_PTP_TX_SEQ 2 +#define BNXT_PTP_TX_FIFO 3 +#define BNXT_PTP_TX_FIFO_EMPTY 0x2 +#define BNXT_PTP_TX_REGS 4 + u32 rx_regs[BNXT_PTP_RX_REGS]; + u32 rx_mapped_regs[BNXT_PTP_RX_REGS]; + u32 tx_regs[BNXT_PTP_TX_REGS]; + u32 tx_mapped_regs[BNXT_PTP_TX_REGS]; +}; + +int bnxt_ptp_get_current_time(struct bnxt *bp); +int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); +int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); +int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); +int bnxt_get_tx_ts(struct bnxt *bp, u64 *ts); +int bnxt_get_rx_ts(struct bnxt *bp, u64 *ts); +int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); +int bnxt_ptp_start(struct bnxt *bp); +int bnxt_ptp_init(struct bnxt *bp); +void bnxt_ptp_free(struct bnxt *bp); +void bnxt_ptp_ts_task(struct work_struct *work); + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f6f3454d6059..9d1a832d2f31 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,118 +12,152 @@ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> +#include <linux/rtnetlink.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_vfr.h" #include "bnxt_ethtool.h" +#include "bnxt_eem.h" #ifdef CONFIG_BNXT_SRIOV static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, - struct bnxt_vf_info *vf, u16 event_id) + struct bnxt_vf_info *vf, + u16 event_id) { - struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_fwd_async_event_cmpl_input *req; struct hwrm_async_event_cmpl *async_cmpl; - int rc = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL); + if (rc) + goto exit; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); if (vf) - req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid); else /* broadcast this async event to all VFs */ - req.encap_async_event_target_id = cpu_to_le16(0xffff); - async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + req->encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = + (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl; async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); async_cmpl->event_id = cpu_to_le16(event_id); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); +exit: if (rc) netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", rc); return rc; } -static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) +#ifdef HAVE_NDO_GET_VF_CONFIG +static struct bnxt_vf_info *bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) + __acquires(&bp->sriov_lock) { + struct bnxt_vf_info *vf; + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { netdev_err(bp->dev, "vf ndo called though PF is down\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } + mutex_lock(&bp->sriov_lock); if (!bp->pf.active_vfs) { + mutex_unlock(&bp->sriov_lock); netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } if (vf_id >= bp->pf.active_vfs) { + mutex_unlock(&bp->sriov_lock); netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); - return -EINVAL; + return ERR_PTR(-EINVAL); } - return 0; + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + if (!vf) { + mutex_unlock(&bp->sriov_lock); + netdev_warn(bp->dev, "VF structure freed\n"); + return ERR_PTR(-ENODEV); + } + return &vf[vf_id]; } +static void bnxt_vf_ndo_end(struct bnxt *bp) + __releases(&bp->sriov_lock) +{ + mutex_unlock(&bp->sriov_lock); +} + +#ifdef HAVE_VF_SPOOFCHK int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); - struct bnxt_vf_info *vf; + struct hwrm_func_cfg_input *req; bool old_setting = false; + struct bnxt_vf_info *vf; u32 func_flags; int rc; if (bp->hwrm_spec_code < 0x10701) return -ENOTSUPP; - rc = bnxt_vf_ndo_prep(bp, vf_id); - if (rc) - return rc; + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); - vf = &bp->pf.vf[vf_id]; if (vf->flags & BNXT_VF_SPOOFCHK) old_setting = true; - if (old_setting == setting) + if (old_setting == setting) { + bnxt_vf_ndo_end(bp); return 0; + } - func_flags = vf->func_flags; if (setting) - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(func_flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); if (!rc) { - vf->func_flags = func_flags; - if (setting) - vf->flags |= BNXT_VF_SPOOFCHK; - else - vf->flags &= ~BNXT_VF_SPOOFCHK; + req->fid = cpu_to_le16(vf->fw_fid); + req->flags = cpu_to_le32(func_flags); + rc = hwrm_req_send(bp, req); + if (!rc) { + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } } + bnxt_vf_ndo_end(bp); return rc; } +#endif static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) return rc; - } - vf->func_qcfg_flags = le16_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); - return 0; + + req->fid = cpu_to_le16(vf->fw_fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->func_qcfg_flags = cpu_to_le16(resp->flags); + hwrm_req_drop(bp, req); + return rc; } static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) @@ -135,22 +169,25 @@ static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); } +#ifdef HAVE_NDO_SET_VF_TRUST static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; int rc; if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); if (vf->flags & BNXT_VF_TRUST) - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); else - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return rc; + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + return hwrm_req_send(bp, req); } int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) @@ -158,89 +195,288 @@ int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; - if (bnxt_vf_ndo_prep(bp, vf_id)) + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) return -EINVAL; - vf = &bp->pf.vf[vf_id]; if (trusted) vf->flags |= BNXT_VF_TRUST; else vf->flags &= ~BNXT_VF_TRUST; bnxt_hwrm_set_trusted_vf(bp, vf); + bnxt_vf_ndo_end(bp); return 0; } +#endif + +#ifdef HAVE_NDO_SET_VF_QUEUES +static bool bnxt_param_ok(int new, u16 curr, u16 avail) +{ + int delta; + + if (new <= curr) + return true; + + delta = new - curr; + if (delta <= avail) + return true; + return false; +} + +static void bnxt_adjust_ring_resc(struct bnxt *bp, struct bnxt_vf_info *vf, + struct hwrm_func_vf_resource_cfg_input *req) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + u16 cp = 0, grp = 0, stat = 0, vnic = 0; + u16 min_l2, max_l2, min_rss, max_rss; + u16 min_tx, max_tx, min_rx, max_rx; + + min_tx = le16_to_cpu(req->min_tx_rings); + max_tx = le16_to_cpu(req->max_tx_rings); + min_rx = le16_to_cpu(req->min_rx_rings); + max_rx = le16_to_cpu(req->max_rx_rings); + min_rss = le16_to_cpu(req->min_rsscos_ctx); + max_rss = le16_to_cpu(req->max_rsscos_ctx); + min_l2 = le16_to_cpu(req->min_l2_ctxs); + max_l2 = le16_to_cpu(req->max_l2_ctxs); + if (!min_tx && !max_tx && !min_rx && !max_rx) { + min_rss = 0; + max_rss = 0; + min_l2 = 0; + max_l2 = 0; + } else if (bp->pf.vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MAXIMAL) { + u16 avail_cp_rings, avail_stat_ctx; + u16 avail_vnics, avail_ring_grps; + + avail_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); + avail_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); + avail_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; + avail_vnics = hw_resc->max_vnics - bp->nr_vnics; + + cp = max_t(u16, 2 * min_tx, min_rx); + if (cp > vf->min_cp_rings) + cp = min_t(u16, cp, avail_cp_rings + vf->min_cp_rings); + grp = min_tx; + if (grp > vf->min_ring_grps) + grp = min_t(u16, avail_ring_grps + vf->min_ring_grps, + grp); + stat = min_rx; + if (stat > vf->min_stat_ctxs) + stat = min_t(u16, avail_stat_ctx + vf->min_stat_ctxs, + stat); + vnic = min_rx; + if (vnic > vf->min_vnics) + vnic = min_t(u16, vnic, avail_vnics + vf->min_vnics); + + } else { + return; + } + req->min_cmpl_rings = cpu_to_le16(cp); + req->max_cmpl_rings = cpu_to_le16(cp); + req->min_hw_ring_grps = cpu_to_le16(grp); + req->max_hw_ring_grps = cpu_to_le16(grp); + req->min_stat_ctx = cpu_to_le16(stat); + req->max_stat_ctx = cpu_to_le16(stat); + req->min_vnics = cpu_to_le16(vnic); + req->max_vnics = cpu_to_le16(vnic); + req->min_rsscos_ctx = cpu_to_le16(min_rss); + req->max_rsscos_ctx = cpu_to_le16(max_rss); + req->min_l2_ctxs = cpu_to_le16(min_l2); + req->max_l2_ctxs = cpu_to_le16(max_l2); +} + +static void bnxt_record_ring_resc(struct bnxt *bp, struct bnxt_vf_info *vf, + struct hwrm_func_vf_resource_cfg_input *req) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + hw_resc->max_tx_rings += vf->min_tx_rings; + hw_resc->max_rx_rings += vf->min_rx_rings; + vf->min_tx_rings = le16_to_cpu(req->min_tx_rings); + vf->max_tx_rings = le16_to_cpu(req->max_tx_rings); + vf->min_rx_rings = le16_to_cpu(req->min_rx_rings); + vf->max_rx_rings = le16_to_cpu(req->max_rx_rings); + hw_resc->max_tx_rings -= vf->min_tx_rings; + hw_resc->max_rx_rings -= vf->min_rx_rings; + if (bp->pf.vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MAXIMAL) { + hw_resc->max_cp_rings += vf->min_cp_rings; + hw_resc->max_hw_ring_grps += vf->min_ring_grps; + hw_resc->max_stat_ctxs += vf->min_stat_ctxs; + hw_resc->max_vnics += vf->min_vnics; + vf->min_cp_rings = le16_to_cpu(req->min_cmpl_rings); + vf->min_ring_grps = le16_to_cpu(req->min_hw_ring_grps); + vf->min_stat_ctxs = le16_to_cpu(req->min_stat_ctx); + vf->min_vnics = le16_to_cpu(req->min_vnics); + hw_resc->max_cp_rings -= vf->min_cp_rings; + hw_resc->max_hw_ring_grps -= vf->min_ring_grps; + hw_resc->max_stat_ctxs -= vf->min_stat_ctxs; + hw_resc->max_vnics -= vf->min_vnics; + } +} + +int bnxt_set_vf_queues(struct net_device *dev, int vf_id, int min_txq, + int max_txq, int min_rxq, int max_rxq) +{ + struct hwrm_func_vf_resource_cfg_input *req; + struct bnxt *bp = netdev_priv(dev); + u16 avail_tx_rings, avail_rx_rings; + struct bnxt_hw_resc *hw_resc; + struct bnxt_vf_info *vf; + int rc; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return -EINVAL; + + if (!BNXT_NEW_RM(bp) || + !(bp->fw_cap & BNXT_FW_CAP_VF_RES_MIN_GUARANTEED)) { + bnxt_vf_ndo_end(bp); + return -EOPNOTSUPP; + } + + hw_resc = &bp->hw_resc; + + avail_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + avail_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; + else + avail_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; + + if (!bnxt_param_ok(min_txq, vf->min_tx_rings, avail_tx_rings) || + !bnxt_param_ok(min_rxq, vf->min_rx_rings, avail_rx_rings) || + !bnxt_param_ok(max_txq, vf->max_tx_rings, avail_tx_rings) || + !bnxt_param_ok(max_rxq, vf->max_rx_rings, avail_rx_rings)) { + bnxt_vf_ndo_end(bp); + return -ENOBUFS; + } + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) { + bnxt_vf_ndo_end(bp); + return rc; + } + + rc = hwrm_req_replace(bp, req, &bp->vf_resc_cfg_input, sizeof(*req)); + if (rc) { + bnxt_vf_ndo_end(bp); + return rc; + } + + req->vf_id = cpu_to_le16(vf->fw_fid); + req->min_tx_rings = cpu_to_le16(min_txq); + req->min_rx_rings = cpu_to_le16(min_rxq); + req->max_tx_rings = cpu_to_le16(max_txq); + req->max_rx_rings = cpu_to_le16(max_rxq); + req->flags = cpu_to_le16(FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED); + + bnxt_adjust_ring_resc(bp, vf, req); + + bnxt_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + bnxt_record_ring_resc(bp, vf, req); + bnxt_req_drop(bp, req); + bnxt_vf_ndo_end(bp); + return rc; +} +#endif int bnxt_get_vf_config(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi) { struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; - int rc; - rc = bnxt_vf_ndo_prep(bp, vf_id); - if (rc) - return rc; + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); ivi->vf = vf_id; - vf = &bp->pf.vf[vf_id]; if (is_valid_ether_addr(vf->mac_addr)) memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); else memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); +#ifdef HAVE_IFLA_TX_RATE ivi->max_tx_rate = vf->max_tx_rate; ivi->min_tx_rate = vf->min_tx_rate; +#else + ivi->tx_rate = vf->max_tx_rate; +#endif ivi->vlan = vf->vlan; if (vf->flags & BNXT_VF_QOS) ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; else ivi->qos = 0; +#ifdef HAVE_VF_SPOOFCHK ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST ivi->trusted = bnxt_is_trusted_vf(bp, vf); +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; else ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +#ifdef HAVE_NDO_SET_VF_QUEUES + ivi->min_tx_queues = vf->min_tx_rings; + ivi->max_tx_queues = vf->max_tx_rings; + ivi->min_rx_queues = vf->min_rx_rings; + ivi->max_rx_queues = vf->max_rx_rings; +#endif + bnxt_vf_ndo_end(bp); return 0; } int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; + u16 fw_fid; int rc; - rc = bnxt_vf_ndo_prep(bp, vf_id); - if (rc) - return rc; + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); /* reject bc or mc mac addr, zero mac addr means allow * VF to use its own mac addr */ if (is_multicast_ether_addr(mac)) { + bnxt_vf_ndo_end(bp); netdev_err(dev, "Invalid VF ethernet address\n"); return -EINVAL; } - vf = &bp->pf.vf[vf_id]; memcpy(vf->mac_addr, mac, ETH_ALEN); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + fw_fid = vf->fw_fid; + bnxt_vf_ndo_end(bp); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + return hwrm_req_send(bp, req); } +#ifdef NEW_NDO_SET_VF_VLAN int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) +#else +int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) +#endif { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u16 vlan_tag; int rc; @@ -248,89 +484,147 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (bp->hwrm_spec_code < 0x10201) return -ENOTSUPP; +#ifdef NEW_NDO_SET_VF_VLAN if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; +#endif - rc = bnxt_vf_ndo_prep(bp, vf_id); - if (rc) - return rc; + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); /* TODO: needed to implement proper handling of user priority, * currently fail the command if there is valid priority */ - if (vlan_id > 4095 || qos) + if (vlan_id > 4095 || qos) { + bnxt_vf_ndo_end(bp); return -EINVAL; + } - vf = &bp->pf.vf[vf_id]; vlan_tag = vlan_id; - if (vlan_tag == vf->vlan) + if (vlan_tag == vf->vlan) { + bnxt_vf_ndo_end(bp); return 0; + } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); - req.dflt_vlan = cpu_to_le16(vlan_tag); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) - vf->vlan = vlan_tag; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->dflt_vlan = cpu_to_le16(vlan_tag); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->vlan = vlan_tag; + } + bnxt_vf_ndo_end(bp); return rc; } +#ifdef HAVE_IFLA_TX_RATE int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, int max_tx_rate) +#else +int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int max_tx_rate) +#endif { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u32 pf_link_speed; int rc; - rc = bnxt_vf_ndo_prep(bp, vf_id); - if (rc) - return rc; + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); - vf = &bp->pf.vf[vf_id]; pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); if (max_tx_rate > pf_link_speed) { + bnxt_vf_ndo_end(bp); netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", max_tx_rate, vf_id); return -EINVAL; } +#ifdef HAVE_IFLA_TX_RATE if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { + bnxt_vf_ndo_end(bp); netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", min_tx_rate, vf_id); return -EINVAL; } - if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) + if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) { + bnxt_vf_ndo_end(bp); return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(max_tx_rate); - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(min_tx_rate); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - vf->min_tx_rate = min_tx_rate; - vf->max_tx_rate = max_tx_rate; } +#else + if (max_tx_rate == vf->max_tx_rate) { + bnxt_vf_ndo_end(bp); + return 0; + } +#endif + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req->max_bw = cpu_to_le32(max_tx_rate); +#ifdef HAVE_IFLA_TX_RATE + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); + req->min_bw = cpu_to_le32(min_tx_rate); +#endif + rc = hwrm_req_send(bp, req); + if (!rc) { +#ifdef HAVE_IFLA_TX_RATE + vf->min_tx_rate = min_tx_rate; +#endif + vf->max_tx_rate = max_tx_rate; + } + } + bnxt_vf_ndo_end(bp); return rc; } +static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id) +{ + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) + return 0; + + vf = &bp->pf.vf[vf_id]; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); + switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) { + case BNXT_VF_LINK_FORCED: + req->options = + FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN; + break; + case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP): + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP; + break; + default: + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + break; + } + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + return hwrm_req_send(bp, req); +} + +#ifdef HAVE_NDO_SET_VF_LINK_STATE int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) { struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; int rc; - rc = bnxt_vf_ndo_prep(bp, vf_id); - if (rc) - return rc; - - vf = &bp->pf.vf[vf_id]; + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); switch (link) { @@ -345,14 +639,23 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) break; default: netdev_err(bp->dev, "Invalid link option\n"); - rc = -EINVAL; - break; + bnxt_vf_ndo_end(bp); + return -EINVAL; } - if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) + + if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); + else + rc = bnxt_set_vf_link_admin_state(bp, vf_id); + + if (rc) + rc = -EIO; + bnxt_vf_ndo_end(bp); return rc; } +#endif +#endif static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) { @@ -368,21 +671,22 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) { - int i, rc = 0; + struct hwrm_func_vf_resc_free_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_func_vf_resc_free_input req = {0}; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { - req.vf_id = cpu_to_le16(i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(i); + rc = hwrm_req_send(bp, req); if (rc) break; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -391,48 +695,58 @@ static void bnxt_free_vf_resources(struct bnxt *bp) struct pci_dev *pdev = bp->pdev; int i; + RCU_INIT_POINTER(bp->pf.vf, NULL); + synchronize_rcu(); + kfree(bp->pf.vf); + kfree(bp->pf.vf_event_bmap); bp->pf.vf_event_bmap = NULL; - for (i = 0; i < 4; i++) { + for (i = 0; i < BNXT_MAX_VF_CMD_FWD_PAGES; i++) { if (bp->pf.hwrm_cmd_req_addr[i]) { - dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE, + dma_free_coherent(&pdev->dev, 1 << bp->pf.vf_hwrm_cmd_req_page_shift, bp->pf.hwrm_cmd_req_addr[i], bp->pf.hwrm_cmd_req_dma_addr[i]); bp->pf.hwrm_cmd_req_addr[i] = NULL; } } - - kfree(bp->pf.vf); - bp->pf.vf = NULL; } static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) { struct pci_dev *pdev = bp->pdev; u32 nr_pages, size, i, j, k = 0; + u32 page_size, reqs_per_page; + void *p; - bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); - if (!bp->pf.vf) + p = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); + if (!p) return -ENOMEM; + rcu_assign_pointer(bp->pf.vf, p); bnxt_set_vf_attr(bp, num_vfs); size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; - nr_pages = size / BNXT_PAGE_SIZE; - if (size & (BNXT_PAGE_SIZE - 1)) - nr_pages++; + page_size = BNXT_PAGE_SIZE; + bp->pf.vf_hwrm_cmd_req_page_shift = BNXT_PAGE_SHIFT; + /* Adjust the page size to make sure we fit all VFs to up to 4 chunks*/ + while (size > page_size * BNXT_MAX_VF_CMD_FWD_PAGES) { + page_size *= 2; + bp->pf.vf_hwrm_cmd_req_page_shift++; + } + nr_pages = DIV_ROUND_UP(size, page_size); + reqs_per_page = page_size / BNXT_HWRM_REQ_MAX_SIZE; for (i = 0; i < nr_pages; i++) { bp->pf.hwrm_cmd_req_addr[i] = - dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE, + dma_alloc_coherent(&pdev->dev, page_size, &bp->pf.hwrm_cmd_req_dma_addr[i], GFP_KERNEL); if (!bp->pf.hwrm_cmd_req_addr[i]) return -ENOMEM; - for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { + for (j = 0; j < reqs_per_page && k < num_vfs; j++) { struct bnxt_vf_info *vf = &bp->pf.vf[k]; vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + @@ -444,8 +758,7 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) } } - /* Max 128 VF's */ - bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); + bp->pf.vf_event_bmap = kzalloc(ALIGN(DIV_ROUND_UP(num_vfs, 8), sizeof(long)), GFP_KERNEL); if (!bp->pf.vf_event_bmap) return -ENOMEM; @@ -455,52 +768,57 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) { - struct hwrm_func_buf_rgtr_input req = {0}; + struct hwrm_func_buf_rgtr_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR); + if (rc) + return rc; - req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); - req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); - req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); - req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); - req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); - req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); - req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req->req_buf_page_size = cpu_to_le16(bp->pf.vf_hwrm_cmd_req_page_shift); + req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } -/* Caller holds bp->hwrm_cmd_lock mutex lock */ -static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) +static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; vf = &bp->pf.vf[vf_id]; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); + req->fid = cpu_to_le16(vf->fw_fid); if (is_valid_ether_addr(vf->mac_addr)) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN); } if (vf->vlan) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - req.dflt_vlan = cpu_to_le16(vf->vlan); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + req->dflt_vlan = cpu_to_le16(vf->vlan); } if (vf->max_tx_rate) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(vf->max_tx_rate); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req->max_bw = cpu_to_le32(vf->max_tx_rate); #ifdef HAVE_IFLA_TX_RATE - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(vf->min_tx_rate); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); + req->min_bw = cpu_to_le32(vf->min_tx_rate); #endif } if (vf->flags & BNXT_VF_TRUST) - req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); - _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } /* Only called by PF to reserve resources for VFs, returns actual number of @@ -508,15 +826,18 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) */ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) { - struct hwrm_func_vf_resource_cfg_input req = {0}; + struct hwrm_func_vf_resource_cfg_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; u16 vf_stat_ctx, vf_vnics, vf_ring_grps; struct bnxt_pf_info *pf = &bp->pf; - int i, rc = 0, min = 1; + int i, rc, min = 1; u16 vf_msix = 0; + u16 vf_rss; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); @@ -533,23 +854,23 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; vf_vnics = hw_resc->max_vnics - bp->nr_vnics; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); + vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; - req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); - req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { min = 0; - req.min_rsscos_ctx = cpu_to_le16(min); + req->min_rsscos_ctx = cpu_to_le16(min); } if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { - req.min_cmpl_rings = cpu_to_le16(min); - req.min_tx_rings = cpu_to_le16(min); - req.min_rx_rings = cpu_to_le16(min); - req.min_l2_ctxs = cpu_to_le16(min); - req.min_vnics = cpu_to_le16(min); - req.min_stat_ctx = cpu_to_le16(min); + req->min_cmpl_rings = cpu_to_le16(min); + req->min_tx_rings = cpu_to_le16(min); + req->min_rx_rings = cpu_to_le16(min); + req->min_l2_ctxs = cpu_to_le16(min); + req->min_vnics = cpu_to_le16(min); + req->min_stat_ctx = cpu_to_le16(min); if (!(bp->flags & BNXT_FLAG_CHIP_P5)) - req.min_hw_ring_grps = cpu_to_le16(min); + req->min_hw_ring_grps = cpu_to_le16(min); } else { vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; @@ -557,55 +878,78 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_vnics /= num_vfs; vf_stat_ctx /= num_vfs; vf_ring_grps /= num_vfs; + vf_rss /= num_vfs; - req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.min_tx_rings = cpu_to_le16(vf_tx_rings); - req.min_rx_rings = cpu_to_le16(vf_rx_rings); - req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.min_vnics = cpu_to_le16(vf_vnics); - req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->min_tx_rings = cpu_to_le16(vf_tx_rings); + req->min_rx_rings = cpu_to_le16(vf_rx_rings); + req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->min_vnics = cpu_to_le16(vf_vnics); + req->min_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->min_rsscos_ctx = cpu_to_le16(vf_rss); } - req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.max_tx_rings = cpu_to_le16(vf_tx_rings); - req.max_rx_rings = cpu_to_le16(vf_rx_rings); - req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.max_vnics = cpu_to_le16(vf_vnics); - req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->max_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->max_tx_rings = cpu_to_le16(vf_tx_rings); + req->max_rx_rings = cpu_to_le16(vf_rx_rings); + req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->max_vnics = cpu_to_le16(vf_vnics); + req->max_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->max_rsscos_ctx = cpu_to_le16(vf_rss); if (bp->flags & BNXT_FLAG_CHIP_P5) - req.max_msix = cpu_to_le16(vf_msix / num_vfs); + req->max_msix = vf_msix / num_vfs; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { - if (reset) - __bnxt_set_vf_params(bp, i); + struct bnxt_vf_info *vf = &pf->vf[i]; - req.vf_id = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + vf->fw_fid = pf->first_vf_id + i; + if (bnxt_set_vf_link_admin_state(bp, i)) { + rc = -EIO; + break; + } + + if (reset) { + rc = __bnxt_set_vf_params(bp, i); + if (rc) + break; + } + + req->vf_id = cpu_to_le16(vf->fw_fid); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; - pf->vf[i].fw_fid = pf->first_vf_id + i; + vf->min_tx_rings = le16_to_cpu(req->min_tx_rings); + vf->max_tx_rings = vf_tx_rings; + vf->min_rx_rings = le16_to_cpu(req->min_rx_rings); + vf->max_rx_rings = vf_rx_rings; + vf->min_cp_rings = le16_to_cpu(req->min_cmpl_rings); + vf->min_stat_ctxs = le16_to_cpu(req->min_stat_ctx); + vf->min_ring_grps = le16_to_cpu(req->min_hw_ring_grps); + vf->min_vnics = le16_to_cpu(req->min_vnics); } - mutex_unlock(&bp->hwrm_cmd_lock); + if (pf->active_vfs) { u16 n = pf->active_vfs; - hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; - hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; - hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * - n; - hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; - hw_resc->max_rsscos_ctxs -= pf->active_vfs; - hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; - hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; + hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= + le16_to_cpu(req->min_hw_ring_grps) * n; + hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n; + hw_resc->max_rsscos_ctxs -= + le16_to_cpu(req->min_rsscos_ctx) * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n; if (bp->flags & BNXT_FLAG_CHIP_P5) - hw_resc->max_irqs -= vf_msix * n; + hw_resc->max_nqs -= vf_msix * n; + memcpy(&bp->vf_resc_cfg_input, req, sizeof(*req)); rc = pf->active_vfs; } + hwrm_req_drop(bp, req); return rc; } @@ -614,15 +958,17 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) */ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { - u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_cfg_input *req; int total_vf_tx_rings = 0; u16 vf_ring_grps; + u32 rc, mtu, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; /* Remaining rings are distributed equally amongs VF's for now */ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; @@ -638,50 +984,59 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | - FUNC_CFG_REQ_ENABLES_MRU | - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); - mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - req.mru = cpu_to_le16(mtu); - req.mtu = cpu_to_le16(mtu); + if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) { + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + req->enables |= + cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + } - req.num_rsscos_ctxs = cpu_to_le16(1); - req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.num_tx_rings = cpu_to_le16(vf_tx_rings); - req.num_rx_rings = cpu_to_le16(vf_rx_rings); - req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.num_l2_ctxs = cpu_to_le16(4); + mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + req->mru = cpu_to_le16(mtu); + req->mtu = cpu_to_le16(mtu); - req.num_vnics = cpu_to_le16(vf_vnics); + req->num_rsscos_ctxs = cpu_to_le16(1); + req->num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->num_tx_rings = cpu_to_le16(vf_tx_rings); + req->num_rx_rings = cpu_to_le16(vf_rx_rings); + req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->num_l2_ctxs = cpu_to_le16(4); + + req->num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ - req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { + struct bnxt_vf_info *vf = &pf->vf[i]; int vf_tx_rsvd = vf_tx_rings; - req.fid = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; - pf->vf[i].fw_fid = le16_to_cpu(req.fid); - rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, - &vf_tx_rsvd); + vf->fw_fid = le16_to_cpu(req->fid); + rc = __bnxt_hwrm_get_tx_rings(bp, vf->fw_fid, &vf_tx_rsvd); if (rc) break; total_vf_tx_rings += vf_tx_rsvd; + vf->min_tx_rings = vf_tx_rsvd; + vf->max_tx_rings = vf_tx_rsvd; + vf->min_rx_rings = vf_rx_rings; + vf->max_rx_rings = vf_rx_rings; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (pf->active_vfs) { hw_resc->max_tx_rings -= total_vf_tx_rings; hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; @@ -720,8 +1075,7 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) *num_vfs = 0; return rc; } - netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", - rc); + netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); *num_vfs = rc; } @@ -761,12 +1115,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) min_rx_rings) rx_ok = 1; } - if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || - avail_cp < min_rx_rings) + if ((hw_resc->max_vnics - bp->nr_vnics < min_rx_rings) || + (avail_cp < min_rx_rings)) rx_ok = 0; - if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && - avail_cp >= min_tx_rings) + if ((hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings) && + (avail_cp >= min_tx_rings)) tx_ok = 1; if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= @@ -821,7 +1175,9 @@ void bnxt_sriov_disable(struct bnxt *bp) if (!num_vfs) return; - /* synchronize VF and VF-rep create and destroy */ + /* synchronize VF and VF-rep create and destroy + * and to protect the array of VF structures + */ mutex_lock(&bp->sriov_lock); bnxt_vf_reps_destroy(bp); @@ -835,11 +1191,12 @@ void bnxt_sriov_disable(struct bnxt *bp) /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } - mutex_unlock(&bp->sriov_lock); bnxt_free_vf_resources(bp); bp->pf.active_vfs = 0; + mutex_unlock(&bp->sriov_lock); + /* Reclaim all resources for the PF. */ rtnl_lock(); bnxt_restore_pf_fw_resources(bp); @@ -896,27 +1253,80 @@ sriov_cfg_exit: return num_vfs; } +#ifndef PCIE_SRIOV_CONFIGURE + +static struct workqueue_struct *bnxt_iov_wq; + +void bnxt_sriov_init(unsigned int num_vfs) +{ + if (num_vfs) + bnxt_iov_wq = create_singlethread_workqueue("bnxt_iov_wq"); +} + +void bnxt_sriov_exit(void) +{ + if (bnxt_iov_wq) + destroy_workqueue(bnxt_iov_wq); + bnxt_iov_wq = NULL; +} + +static void bnxt_iov_task(struct work_struct *work) +{ + struct bnxt *bp; + + bp = container_of(work, struct bnxt, iov_task); + bnxt_sriov_configure(bp->pdev, bp->req_vfs); +} + +void bnxt_start_sriov(struct bnxt *bp, int num_vfs) +{ + int pos, req_vfs; + + if (!num_vfs || !BNXT_PF(bp)) + return; + + pos = pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + return; + } else { + u16 t_vf = 0; + + pci_read_config_word(bp->pdev, pos + PCI_SRIOV_TOTAL_VF, &t_vf); + req_vfs = min_t(int, num_vfs, (int)t_vf); + } + + if (!bnxt_iov_wq) { + netdev_warn(bp->dev, "Work queue not available to start SRIOV\n"); + return; + } + bp->req_vfs = req_vfs; + INIT_WORK(&bp->iov_task, bnxt_iov_task); + queue_work(bnxt_iov_wq, &bp->iov_task); +} +#endif + static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, void *encap_resp, __le64 encap_resp_addr, __le16 encap_resp_cpr, u32 msg_size) { - int rc = 0; - struct hwrm_fwd_resp_input req = {0}; + struct hwrm_fwd_resp_input *req; + int rc; if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_len = cpu_to_le16(msg_size); + req->encap_resp_addr = encap_resp_addr; + req->encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req->encap_resp, encap_resp, msg_size); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_len = cpu_to_le16(msg_size); - req.encap_resp_addr = encap_resp_addr; - req.encap_resp_cmpl_ring = encap_resp_cpr; - memcpy(req.encap_resp, encap_resp, msg_size); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); return rc; @@ -925,19 +1335,21 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_reject_fwd_resp_input req = {0}; + struct hwrm_reject_fwd_resp_input *req; + int rc; - if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) - return -EINVAL; + rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP); + if (!rc) { + if (msg_size > sizeof(req->encap_request)) + msg_size = sizeof(req->encap_request); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); return rc; @@ -946,19 +1358,21 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_exec_fwd_resp_input req = {0}; + struct hwrm_exec_fwd_resp_input *req; + int rc; if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) - return -EINVAL; + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); return rc; @@ -1021,6 +1435,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) } if (mac_ok) return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); } @@ -1038,10 +1453,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) phy_qcfg_req = (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; - mutex_lock(&bp->hwrm_cmd_lock); + mutex_lock(&bp->link_lock); memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; phy_qcfg_resp.valid = 1; @@ -1078,6 +1493,80 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) return rc; } +static int bnxt_process_vf_msg(struct bnxt *bp, struct hwrm_oem_cmd_input *req) +{ + struct bnxt_oem_cmd_req *oem_cmd = + (struct bnxt_oem_cmd_req *)req->oem_data; + int rc = -EINVAL; + u32 entry_num; + + switch (le32_to_cpu(oem_cmd->req_type)) { + case BNXT_OEM_CMD_TYPE_EEM_SYS_MEMORY: + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_EEM) || + !bp->eem_info) + return -EACCES; + + entry_num = le32_to_cpu(oem_cmd->req.eem_req.entry_num); + if (entry_num) + rc = bnxt_eem_cfg_system_memory(bp, entry_num); + else + rc = bnxt_eem_clear_cfg_system_memory(bp); + break; + default: + break; + } + + return rc; +} + +static int bnxt_clear_vf_message(struct bnxt *bp, + struct hwrm_oem_cmd_input *req) +{ + struct bnxt_oem_cmd_req *oem_cmd = + (struct bnxt_oem_cmd_req *)req->oem_data; + int rc = 0; + + switch (le32_to_cpu(oem_cmd->req_type)) { + case BNXT_OEM_CMD_TYPE_EEM_SYS_MEMORY: + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_EEM) || + !bp->eem_info) + return -EACCES; + + rc = bnxt_eem_clear_cfg_system_memory(bp); + break; + default: + break; + } + + return rc; +} + +static int bnxt_hwrm_oem_cmd(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_oem_cmd_input *req = vf->hwrm_cmd_req_addr; + struct hwrm_oem_cmd_output resp; + int rc = 0; + + rc = bnxt_process_vf_msg(bp, req); + if (rc) + return bnxt_hwrm_fwd_err_resp(bp, vf, sizeof(*req)); + + resp.error_code = 0; + resp.IANA = PCI_VENDOR_ID_BROADCOM; + resp.req_type = cpu_to_le16(req->req_type); + resp.resp_len = cpu_to_le16(sizeof(struct hwrm_oem_cmd_output)); + resp.valid = 1; + + rc = bnxt_hwrm_fwd_resp(bp, vf, &resp, req->resp_addr, + req->cmpl_ring, sizeof(resp)); + if (rc) { + bnxt_clear_vf_message(bp, req); + netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); + } + + return rc; +} + static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) { int rc = 0; @@ -1101,7 +1590,11 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) case HWRM_PORT_PHY_QCFG: rc = bnxt_vf_set_link(bp, vf); break; + case HWRM_OEM_CMD: + rc = bnxt_hwrm_oem_cmd(bp, vf); + break; default: + rc = bnxt_hwrm_fwd_err_resp(bp, vf, bp->hwrm_max_req_len); break; } return rc; @@ -1125,14 +1618,16 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) void bnxt_update_vf_mac(struct bnxt *bp) { - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS)) + return; - mutex_lock(&bp->hwrm_cmd_lock); - if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + req->fid = cpu_to_le16(0xffff); + + resp = hwrm_req_hold(bp, req); + if (hwrm_req_send(bp, req)) goto update_vf_mac_exit; /* Store MAC address from the firmware. There are 2 cases: @@ -1149,12 +1644,12 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (is_valid_ether_addr(bp->vf.mac_addr)) memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc = 0; if (!BNXT_VF(bp)) @@ -1165,10 +1660,14 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) rc = -EADDRNOTAVAIL; goto mac_done; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + goto mac_done; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + rc = hwrm_req_send(bp, req); mac_done: if (rc && strict) { rc = -EADDRNOTAVAIL; @@ -1178,6 +1677,52 @@ mac_done: } return 0; } + +void bnxt_update_vf_vnic(struct bnxt *bp, u32 vf_idx, u32 state) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) { + vf = &vf[vf_idx]; + if (state == EVENT_DATA1_VNIC_CHNG_VNIC_STATE_ALLOC) + vf->vnic_state_pending = 1; + else if (state == EVENT_DATA1_VNIC_CHNG_VNIC_STATE_FREE) + vf->vnic_state_pending = 0; + } + rcu_read_unlock(); +} + +void bnxt_commit_vf_vnic(struct bnxt *bp, u32 vf_idx) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) { + vf = &vf[vf_idx]; + vf->vnic_state = vf->vnic_state_pending; + } + rcu_read_unlock(); +} + +bool bnxt_vf_vnic_state_is_up(struct bnxt *bp, u32 vf_idx) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf = vf; + bool up = false; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) + up = !!vf[vf_idx].vnic_state; + rcu_read_unlock(); + return up; +} + #else int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) @@ -1204,4 +1749,17 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { return 0; } + +void bnxt_update_vf_vnic(struct bnxt *bp, u32 vf_idx, u32 state) +{ +} + +void bnxt_commit_vf_vnic(struct bnxt *bp, u32 vf_idx) +{ +} + +bool bnxt_vf_vnic_state_is_up(struct bnxt *bp, u32 vf_idx) +{ + return false; +} #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 629641bf6fc5..5bab2246ee0a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,26 +19,50 @@ ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\ offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id)) -#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \ - ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\ - offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id)) - #define BNXT_VF_MIN_RSS_CTX 1 #define BNXT_VF_MAX_RSS_CTX 1 #define BNXT_VF_MIN_L2_CTX 1 #define BNXT_VF_MAX_L2_CTX 4 +#ifdef HAVE_NDO_GET_VF_CONFIG int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); int bnxt_set_vf_mac(struct net_device *, int, u8 *); +#ifdef NEW_NDO_SET_VF_VLAN int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); +#else +int bnxt_set_vf_vlan(struct net_device *, int, u16, u8); +#endif +#ifdef HAVE_IFLA_TX_RATE int bnxt_set_vf_bw(struct net_device *, int, int, int); +#else +int bnxt_set_vf_bw(struct net_device *, int, int); +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE int bnxt_set_vf_link_state(struct net_device *, int, int); +#endif +#ifdef HAVE_VF_SPOOFCHK int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); +#endif +#ifdef HAVE_NDO_SET_VF_QUEUES +int bnxt_set_vf_queues(struct net_device *dev, int vf_id, int min_txq, + int max_txq, int min_rxq, int max_rxq); +#endif +#endif int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); +#ifndef PCIE_SRIOV_CONFIGURE +void bnxt_start_sriov(struct bnxt *, int); +void bnxt_sriov_init(unsigned int); +void bnxt_sriov_exit(void); +#endif int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); int bnxt_approve_mac(struct bnxt *, u8 *, bool); +void bnxt_update_vf_vnic(struct bnxt *bp, u32 vf_idx, u32 state); +void bnxt_commit_vf_vnic(struct bnxt *bp, u32 vf_idx); +bool bnxt_vf_vnic_state_is_up(struct bnxt *bp, u32 vf_idx); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index c8062d020d1e..2e5b454086da 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -10,20 +10,30 @@ #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/if_vlan.h> +#if defined(HAVE_TC_FLOW_CLS_OFFLOAD) || defined(HAVE_TC_CLS_FLOWER_OFFLOAD) #include <net/flow_dissector.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_skbedit.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> +#include <net/tc_act/tc_pedit.h> +#ifdef HAVE_TCF_TUNNEL #include <net/tc_act/tc_tunnel_key.h> +#endif +#include <net/vxlan.h> +#endif /* HAVE_TC_FLOW_CLS_OFFLOAD || HAVE_TC_CLS_FLOWER_OFFLOAD */ +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_sriov.h" #include "bnxt_tc.h" #include "bnxt_vfr.h" +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + #define BNXT_FID_INVALID 0xffff #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) @@ -36,6 +46,8 @@ #define is_vid_exactmatch(vlan_tci_mask) \ ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) +static bool is_wildcard(void *mask, int len); +static bool is_exactmatch(void *mask, int len); /* Return the dst fid of the func for flow forwarding * For PFs: src_fid is the fid of the PF * For VF-reps: src_fid the fid of the VF @@ -46,19 +58,22 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) /* check if dev belongs to the same switch */ if (!netdev_port_same_parent_id(pf_bp->dev, dev)) { - netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", + netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n", dev->ifindex); return BNXT_FID_INVALID; } +#ifdef CONFIG_VF_REPS /* Is dev a VF-rep? */ if (bnxt_dev_is_vf_rep(dev)) return bnxt_vf_rep_get_fid(dev); +#endif bp = netdev_priv(dev); return bp->pf.fw_fid; } +#ifdef HAVE_FLOW_OFFLOAD_H static int bnxt_tc_parse_redir(struct bnxt *bp, struct bnxt_tc_actions *actions, const struct flow_action_entry *act) @@ -66,7 +81,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, struct net_device *dev = act->dev; if (!dev) { - netdev_info(bp->dev, "no dev in mirred action"); + netdev_info(bp->dev, "no dev in mirred action\n"); return -EINVAL; } @@ -75,6 +90,37 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, return 0; } +#else + +static int bnxt_tc_parse_redir(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ +#ifdef HAVE_TCF_MIRRED_DEV + struct net_device *dev = tcf_mirred_dev(tc_act); + + if (!dev) { + netdev_info(bp->dev, "no dev in mirred action"); + return -EINVAL; + } +#else + int ifindex = tcf_mirred_ifindex(tc_act); + struct net_device *dev; + + dev = __dev_get_by_index(dev_net(bp->dev), ifindex); + if (!dev) { + netdev_info(bp->dev, "no dev for ifindex=%d", ifindex); + return -EINVAL; + } +#endif + + actions->flags |= BNXT_TC_ACTION_FLAG_FWD; + actions->dst_dev = dev; + return 0; +} +#endif + +#ifdef HAVE_FLOW_OFFLOAD_H static int bnxt_tc_parse_vlan(struct bnxt *bp, struct bnxt_tc_actions *actions, const struct flow_action_entry *act) @@ -94,6 +140,29 @@ static int bnxt_tc_parse_vlan(struct bnxt *bp, return 0; } +#else + +static int bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + switch (tcf_vlan_action(tc_act)) { + case TCA_VLAN_ACT_POP: + actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; + break; + case TCA_VLAN_ACT_PUSH: + actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; + actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); + actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} +#endif + +#ifdef HAVE_FLOW_OFFLOAD_H static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, struct bnxt_tc_actions *actions, const struct flow_action_entry *act) @@ -101,8 +170,17 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, const struct ip_tunnel_info *tun_info = act->tunnel; const struct ip_tunnel_key *tun_key = &tun_info->key; +#else +static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); + struct ip_tunnel_key *tun_key = &tun_info->key; +#endif + if (ip_tunnel_info_af(tun_info) != AF_INET) { - netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); + netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n"); return -EOPNOTSUPP; } @@ -111,18 +189,197 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, return 0; } + +/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes + * each(u32). + * This routine consolidates such multiple unaligned values into one + * field each for Key & Mask (for src and dst macs separately) + * For example, + * Mask/Key Offset Iteration + * ========== ====== ========= + * dst mac 0xffffffff 0 1 + * dst mac 0x0000ffff 4 2 + * + * src mac 0xffff0000 4 1 + * src mac 0xffffffff 8 2 + * + * The above combination coming from the stack will be consolidated as + * Mask/Key + * ============== + * src mac: 0xffffffffffff + * dst mac: 0xffffffffffff + */ +static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask, + u8 *actual_key, u8 *actual_mask) +{ + u32 key = get_unaligned((u32 *)actual_key); + u32 mask = get_unaligned((u32 *)actual_mask); + + part_key &= part_mask; + part_key |= key & ~part_mask; + + put_unaligned(mask | part_mask, (u32 *)actual_mask); + put_unaligned(part_key, (u32 *)actual_key); +} + +static int +bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions, + u16 *eth_addr, u16 *eth_addr_mask) +{ + u16 *p; + int j; + + if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask))) + return -EINVAL; + + if (!is_wildcard(ð_addr_mask[0], ETH_ALEN)) { + if (!is_exactmatch(ð_addr_mask[0], ETH_ALEN)) + return -EINVAL; + /* FW expects dmac to be in u16 array format */ + p = eth_addr; + for (j = 0; j < 3; j++) + actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j)); + } + + if (!is_wildcard(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) { + if (!is_exactmatch(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) + return -EINVAL; + /* FW expects smac to be in u16 array format */ + p = ð_addr[ETH_ALEN / 2]; + for (j = 0; j < 3; j++) + actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j)); + } + + return 0; +} + +#ifdef HAVE_FLOW_OFFLOAD_H +static int +bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions, + struct flow_action_entry *act, int act_idx, u8 *eth_addr, + u8 *eth_addr_mask) +{ + size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr); + size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr); + u32 mask, val, offset, idx; + u8 htype; + + offset = act->mangle.offset; + htype = act->mangle.htype; + mask = ~act->mangle.mask; + val = act->mangle.val; + + switch (htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) { + netdev_err(bp->dev, + "%s: eth_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE; + + bnxt_set_l2_key_mask(val, mask, ð_addr[offset], + ð_addr_mask[offset]); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = true; + if (offset == offsetof(struct iphdr, saddr)) { + actions->nat.src_xlate = true; + actions->nat.l3.ipv4.saddr.s_addr = htonl(val); + } + else if (offset == offsetof(struct iphdr, daddr)) { + actions->nat.src_xlate = false; + actions->nat.l3.ipv4.daddr.s_addr = htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv4_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + + netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n", + actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr, + &actions->nat.l3.ipv4.daddr); + break; + + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = false; + if (offset >= offsetof(struct ipv6hdr, saddr) && + offset < offset_of_ip6_daddr) { + /* 16 byte IPv6 address comes in 4 iterations of + * 4byte chunks each + */ + actions->nat.src_xlate = true; + idx = (offset - offset_of_ip6_saddr) / 4; + /* First 4bytes will be copied to idx 0 and so on */ + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); + } else if (offset >= offset_of_ip6_daddr && + offset < offset_of_ip6_daddr + 16) { + actions->nat.src_xlate = false; + idx = (offset - offset_of_ip6_daddr) / 4; + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv6_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + /* HW does not support L4 rewrite alone without L3 + * rewrite + */ + if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) { + netdev_err(bp->dev, + "Need to specify L3 rewrite as well\n"); + return -EINVAL; + } + if (actions->nat.src_xlate) + actions->nat.l4.ports.sport = htons(val); + else + actions->nat.l4.ports.dport = htons(val); + netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n", + actions->nat.l4.ports.sport, + actions->nat.l4.ports.dport); + break; + default: + netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n", + __func__); + return -EINVAL; + } + return 0; +} + static int bnxt_tc_parse_actions(struct bnxt *bp, struct bnxt_tc_actions *actions, - struct flow_action *flow_action) + struct flow_action *flow_action, + struct netlink_ext_ack *extack) { + /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by + * smac (6 bytes) if rewrite of both is specified, otherwise either + * dmac or smac + */ + u16 eth_addr_mask[ETH_ALEN] = { 0 }; + /* Used to store the L2 rewrite key for dmac (6 bytes) followed by + * smac (6 bytes) if rewrite of both is specified, otherwise either + * dmac or smac + */ + u16 eth_addr[ETH_ALEN] = { 0 }; struct flow_action_entry *act; int i, rc; if (!flow_action_has_entries(flow_action)) { - netdev_info(bp->dev, "no actions"); + netdev_info(bp->dev, "no actions\n"); return -EINVAL; } + if (!flow_action_basic_hw_stats_check(flow_action, extack)) + return -EOPNOTSUPP; + flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_DROP: @@ -148,11 +405,26 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, case FLOW_ACTION_TUNNEL_DECAP: actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; break; + /* Packet edit: L2 rewrite, NAT, NAPT */ + case FLOW_ACTION_MANGLE: + rc = bnxt_tc_parse_pedit(bp, actions, act, i, + (u8 *)eth_addr, + (u8 *)eth_addr_mask); + if (rc) + return rc; + break; default: break; } } + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { + rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr, + eth_addr_mask); + if (rc) + return rc; + } + if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { /* dst_fid is PF's fid */ @@ -169,6 +441,220 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, return 0; } +#else + +static int +bnxt_tc_parse_pedit(struct bnxt *bp, const struct tc_action *tc_act, + struct bnxt_tc_actions *actions, + u8 *eth_addr, u8 *eth_addr_mask) +{ + size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr); + size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr); + u32 mask, val, offset, idx; + u8 cmd, htype; + int nkeys, j; + + nkeys = tcf_pedit_nkeys(tc_act); + for (j = 0 ; j < nkeys; j++) { + cmd = tcf_pedit_cmd(tc_act, j); + /* L2 rewrite comes as TCA_PEDIT_KEY_EX_CMD_SET type from TC. + * Return error, if the TC pedit cmd is not of this type. + */ + if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { + netdev_err(bp->dev, "%s: pedit cmd not supported\n", + __func__); + return -EINVAL; + } + + offset = tcf_pedit_offset(tc_act, j); + htype = tcf_pedit_htype(tc_act, j); + mask = ~tcf_pedit_mask(tc_act, j); + val = tcf_pedit_val(tc_act, j); + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) { + netdev_err(bp->dev, + "%s: eth_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + actions->flags |= + BNXT_TC_ACTION_FLAG_L2_REWRITE; + + bnxt_set_l2_key_mask(val, mask, + ð_addr[offset], + ð_addr_mask[offset]); + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = true; + if (offset == offsetof(struct iphdr, saddr)) { + actions->nat.src_xlate = true; + actions->nat.l3.ipv4.saddr.s_addr = htonl(val); + } else if (offset == offsetof(struct iphdr, daddr)) { + actions->nat.src_xlate = false; + actions->nat.l3.ipv4.daddr.s_addr = htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv4_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = false; + + if (offset >= offsetof(struct ipv6hdr, saddr) && + offset < offset_of_ip6_daddr) { + /* 16 byte IPv6 address comes in 4 iterations of + * 4byte chunks each + */ + actions->nat.src_xlate = true; + idx = (offset - offset_of_ip6_saddr) / 4; + /* First 4bytes will be copied to idx 0 and so on */ + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = + htonl(val); + } else if (offset >= offset_of_ip6_daddr && + offset < offset_of_ip6_daddr + 16) { + actions->nat.src_xlate = false; + idx = (offset - offset_of_ip6_daddr) / 4; + actions->nat.l3.ipv6.daddr.s6_addr32[idx] = + htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv6_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + /* HW does not support L4 rewrite alone without L3 + * rewrite + */ + if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) { + netdev_err(bp->dev, + "Need to specify L3 rewrite as well\n"); + return -EINVAL; + } + if (actions->nat.src_xlate) + actions->nat.l4.ports.sport = htons(val); + else + actions->nat.l4.ports.dport = htons(val); + break; + /* Return, if the packet edit is not for L2/L3/L4 */ + default: + netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n", + __func__); + return -EINVAL; + } + } + + return 0; +} + +static int bnxt_tc_parse_actions(struct bnxt *bp, + struct bnxt_tc_actions *actions, + struct tcf_exts *tc_exts) +{ + u16 eth_addr_mask[ETH_ALEN] = { 0 }; + u16 eth_addr[ETH_ALEN] = { 0 }; + const struct tc_action *tc_act; +#ifndef HAVE_TC_EXTS_FOR_ACTION + LIST_HEAD(tc_actions); + int rc; +#else + int i, j, rc; +#endif + + if (!tcf_exts_has_actions(tc_exts)) { + netdev_info(bp->dev, "no actions"); + return -EINVAL; + } + +#ifndef HAVE_TC_EXTS_FOR_ACTION + tcf_exts_to_list(tc_exts, &tc_actions); + list_for_each_entry(tc_act, &tc_actions, list) { +#else + tcf_exts_for_each_action(i, tc_act, tc_exts) { +#endif + /* Drop action */ + if (is_tcf_gact_shot(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_DROP; + return 0; /* don't bother with other actions */ + } + + /* Redirect action */ + if (is_tcf_mirred_egress_redirect(tc_act)) { + rc = bnxt_tc_parse_redir(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Push/pop VLAN */ + if (is_tcf_vlan(tc_act)) { + rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Tunnel encap */ + if (is_tcf_tunnel_set(tc_act)) { + rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Tunnel decap */ + if (is_tcf_tunnel_release(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; + continue; + } + + /* Packet edit: L2 rewrite, NAT, NAPT */ + if (is_tcf_pedit(tc_act)) { + rc = bnxt_tc_parse_pedit(bp, tc_act, actions, + (u8 *)eth_addr, + (u8 *)eth_addr_mask); + if (rc) + return rc; + + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { + rc = bnxt_fill_l2_rewrite_fields(actions, + eth_addr, + eth_addr_mask); + if (rc) + return rc; + } + } + } + + if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { + if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { + /* dst_fid is PF's fid */ + actions->dst_fid = bp->pf.fw_fid; + } else { + /* find the FID from dst_dev */ + actions->dst_fid = + bnxt_flow_get_dst_fid(bp, actions->dst_dev); + if (actions->dst_fid == BNXT_FID_INVALID) + return -EINVAL; + } + } + + return 0; +} +#endif + +#ifdef HAVE_FLOW_OFFLOAD_H static int bnxt_tc_parse_flow(struct bnxt *bp, struct flow_cls_offload *tc_flow_cmd, struct bnxt_tc_flow *flow) @@ -179,7 +665,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { - netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -300,24 +786,189 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, flow->tun_mask.tp_src = match.mask->src; } - return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action); + return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action, + tc_flow_cmd->common.extack); } +#else +#define GET_KEY(flow_cmd, key_type) \ + skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ + (flow_cmd)->key) +#define GET_MASK(flow_cmd, key_type) \ + skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ + (flow_cmd)->mask) + +static int bnxt_tc_parse_flow(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + struct bnxt_tc_flow *flow) +{ + struct flow_dissector *dissector = tc_flow_cmd->dissector; + + /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ + if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", + dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + struct flow_dissector_key_basic *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + + flow->l2_key.ether_type = key->n_proto; + flow->l2_mask.ether_type = mask->n_proto; + + if (key->n_proto == htons(ETH_P_IP) || + key->n_proto == htons(ETH_P_IPV6)) { + flow->l4_key.ip_proto = key->ip_proto; + flow->l4_mask.ip_proto = mask->ip_proto; + } + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + struct flow_dissector_key_eth_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; + ether_addr_copy(flow->l2_key.dmac, key->dst); + ether_addr_copy(flow->l2_mask.dmac, mask->dst); + ether_addr_copy(flow->l2_key.smac, key->src); + ether_addr_copy(flow->l2_mask.smac, mask->src); + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + struct flow_dissector_key_vlan *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + + flow->l2_key.inner_vlan_tci = + cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); + flow->l2_mask.inner_vlan_tci = + cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); + flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); + flow->l2_mask.inner_vlan_tpid = htons(0xffff); + flow->l2_key.num_vlans = 1; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + struct flow_dissector_key_ipv4_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; + flow->l3_key.ipv4.daddr.s_addr = key->dst; + flow->l3_mask.ipv4.daddr.s_addr = mask->dst; + flow->l3_key.ipv4.saddr.s_addr = key->src; + flow->l3_mask.ipv4.saddr.s_addr = mask->src; + } else if (dissector_uses_key(dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_dissector_key_ipv6_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); + struct flow_dissector_key_ipv6_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; + flow->l3_key.ipv6.daddr = key->dst; + flow->l3_mask.ipv6.daddr = mask->dst; + flow->l3_key.ipv6.saddr = key->src; + flow->l3_mask.ipv6.saddr = mask->src; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + struct flow_dissector_key_ports *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; + flow->l4_key.ports.dport = key->dst; + flow->l4_mask.ports.dport = mask->dst; + flow->l4_key.ports.sport = key->src; + flow->l4_mask.ports.sport = mask->src; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_dissector_key_icmp *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + struct flow_dissector_key_icmp *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + + flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; + flow->l4_key.icmp.type = key->type; + flow->l4_key.icmp.code = key->code; + flow->l4_mask.icmp.type = mask->type; + flow->l4_mask.icmp.code = mask->code; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + struct flow_dissector_key_ipv4_addrs *mask = + GET_MASK(tc_flow_cmd, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; + flow->tun_key.u.ipv4.dst = key->dst; + flow->tun_mask.u.ipv4.dst = mask->dst; + flow->tun_key.u.ipv4.src = key->src; + flow->tun_mask.u.ipv4.src = mask->src; + } else if (dissector_uses_key(dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { + return -EOPNOTSUPP; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + struct flow_dissector_key_keyid *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; + flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); + flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_dissector_key_ports *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + struct flow_dissector_key_ports *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; + flow->tun_key.tp_dst = key->dst; + flow->tun_mask.tp_dst = mask->dst; + flow->tun_key.tp_src = key->src; + flow->tun_mask.tp_src = mask->src; + } + + return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); +} +#endif + static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { - struct hwrm_cfa_flow_free_input req = { 0 }; + struct hwrm_cfa_flow_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.ext_flow_handle = flow_node->ext_flow_handle; - else - req.flow_handle = flow_node->flow_handle; + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE); + if (!rc) { + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req->ext_flow_handle = flow_node->ext_flow_handle; + else + req->flow_handle = flow_node->flow_handle; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -391,29 +1042,101 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; struct bnxt_tc_l3_key *l3_key = &flow->l3_key; - struct hwrm_cfa_flow_alloc_input req = { 0 }; struct hwrm_cfa_flow_alloc_output *resp; + struct hwrm_cfa_flow_alloc_input *req; u16 flow_flags = 0, action_flags = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC); + if (rc) + return rc; - req.src_fid = cpu_to_le16(flow->src_fid); - req.ref_flow_handle = ref_flow_handle; + req->src_fid = cpu_to_le16(flow->src_fid); + req->ref_flow_handle = ref_flow_handle; + + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { + memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac, + ETH_ALEN); + memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac, + ETH_ALEN); + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + } + + if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) { + if (actions->nat.l3_is_ipv4) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS; + + if (actions->nat.src_xlate) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; + /* L3 source rewrite */ + req->nat_ip_address[0] = + actions->nat.l3.ipv4.saddr.s_addr; + /* L4 source port */ + if (actions->nat.l4.ports.sport) + req->nat_port = + actions->nat.l4.ports.sport; + } else { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; + /* L3 destination rewrite */ + req->nat_ip_address[0] = + actions->nat.l3.ipv4.daddr.s_addr; + /* L4 destination port */ + if (actions->nat.l4.ports.dport) + req->nat_port = + actions->nat.l4.ports.dport; + } + netdev_dbg(bp->dev, + "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); + } else { + if (actions->nat.src_xlate) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; + /* L3 source rewrite */ + memcpy(req->nat_ip_address, + actions->nat.l3.ipv6.saddr.s6_addr32, + sizeof(req->nat_ip_address)); + /* L4 source port */ + if (actions->nat.l4.ports.sport) + req->nat_port = + actions->nat.l4.ports.sport; + } else { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; + /* L3 destination rewrite */ + memcpy(req->nat_ip_address, + actions->nat.l3.ipv6.daddr.s6_addr32, + sizeof(req->nat_ip_address)); + /* L4 destination port */ + if (actions->nat.l4.ports.dport) + req->nat_port = + actions->nat.l4.ports.dport; + } + netdev_dbg(bp->dev, + "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); + } + } if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { - req.tunnel_handle = tunnel_handle; + req->tunnel_handle = tunnel_handle; flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; } - req.ethertype = flow->l2_key.ether_type; - req.ip_proto = flow->l4_key.ip_proto; + req->ethertype = flow->l2_key.ether_type; + req->ip_proto = flow->l4_key.ip_proto; if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { - memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); - memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); + memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN); + memcpy(req->smac, flow->l2_key.smac, ETH_ALEN); } if (flow->l2_key.num_vlans > 0) { @@ -422,7 +1145,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, * in outer_vlan_tci when num_vlans is 1 (which is * always the case in TC.) */ - req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; + req->outer_vlan_tci = flow->l2_key.inner_vlan_tci; } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ @@ -435,68 +1158,67 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { - req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; - req.ip_dst_mask_len = + req->ip_dst[0] = l3_key->ipv4.daddr.s_addr; + req->ip_dst_mask_len = inet_mask_len(l3_mask->ipv4.daddr.s_addr); - req.ip_src[0] = l3_key->ipv4.saddr.s_addr; - req.ip_src_mask_len = + req->ip_src[0] = l3_key->ipv4.saddr.s_addr; + req->ip_src_mask_len = inet_mask_len(l3_mask->ipv4.saddr.s_addr); } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { - memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, - sizeof(req.ip_dst)); - req.ip_dst_mask_len = + memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32, + sizeof(req->ip_dst)); + req->ip_dst_mask_len = ipv6_mask_len(&l3_mask->ipv6.daddr); - memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, - sizeof(req.ip_src)); - req.ip_src_mask_len = + memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32, + sizeof(req->ip_src)); + req->ip_src_mask_len = ipv6_mask_len(&l3_mask->ipv6.saddr); } } if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { - req.l4_src_port = flow->l4_key.ports.sport; - req.l4_src_port_mask = flow->l4_mask.ports.sport; - req.l4_dst_port = flow->l4_key.ports.dport; - req.l4_dst_port_mask = flow->l4_mask.ports.dport; + req->l4_src_port = flow->l4_key.ports.sport; + req->l4_src_port_mask = flow->l4_mask.ports.sport; + req->l4_dst_port = flow->l4_key.ports.dport; + req->l4_dst_port_mask = flow->l4_mask.ports.dport; } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { /* l4 ports serve as type/code when ip_proto is ICMP */ - req.l4_src_port = htons(flow->l4_key.icmp.type); - req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); - req.l4_dst_port = htons(flow->l4_key.icmp.code); - req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); + req->l4_src_port = htons(flow->l4_key.icmp.type); + req->l4_src_port_mask = htons(flow->l4_mask.icmp.type); + req->l4_dst_port = htons(flow->l4_key.icmp.code); + req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code); } - req.flags = cpu_to_le16(flow_flags); + req->flags = cpu_to_le16(flow_flags); if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; } else { if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; - req.dst_fid = cpu_to_le16(actions->dst_fid); + req->dst_fid = cpu_to_le16(actions->dst_fid); } if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; - req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; - req.l2_rewrite_vlan_tci = actions->push_vlan_tci; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid; + req->l2_rewrite_vlan_tci = actions->push_vlan_tci; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; /* Rewrite config with tpid = 0 implies vlan pop */ - req.l2_rewrite_vlan_tpid = 0; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = 0; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } } - req.action_flags = cpu_to_le16(action_flags); + req->action_flags = cpu_to_le16(action_flags); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); /* CFA_FLOW_ALLOC response interpretation: * fw with fw with * 16-bit 64-bit @@ -512,7 +1234,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, flow_node->flow_id = resp->flow_id; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -522,67 +1244,69 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, __le32 ref_decap_handle, __le32 *decap_filter_handle) { - struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; struct hwrm_cfa_decap_filter_alloc_output *resp; struct ip_tunnel_key *tun_key = &flow->tun_key; + struct hwrm_cfa_decap_filter_alloc_input *req; u32 enables = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC); + if (rc) + goto exit; - req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); + req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; - req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; - req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; + req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; /* tunnel_id is wrongly defined in hsi defn. as __le32 */ - req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); + req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id); } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR; - ether_addr_copy(req.dst_macaddr, l2_info->dmac); + ether_addr_copy(req->dst_macaddr, l2_info->dmac); } if (l2_info->num_vlans) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; - req.t_ivlan_vid = l2_info->inner_vlan_tci; + req->t_ivlan_vid = l2_info->inner_vlan_tci; } enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; - req.ethertype = htons(ETH_P_IP); + req->ethertype = htons(ETH_P_IP); if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; - req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.dst_ipaddr[0] = tun_key->u.ipv4.dst; - req.src_ipaddr[0] = tun_key->u.ipv4.src; + req->ip_addr_type = + CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->dst_ipaddr[0] = tun_key->u.ipv4.dst; + req->src_ipaddr[0] = tun_key->u.ipv4.src; } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; - req.dst_port = tun_key->tp_dst; + req->dst_port = tun_key->tp_dst; } /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. */ - req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; - req.enables = cpu_to_le32(enables); + req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle; + req->enables = cpu_to_le32(enables); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) *decap_filter_handle = resp->decap_filter_id; - } else { - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -590,15 +1314,16 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, static int hwrm_cfa_decap_filter_free(struct bnxt *bp, __le32 decap_filter_handle) { - struct hwrm_cfa_decap_filter_free_input req = { 0 }; + struct hwrm_cfa_decap_filter_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); - req.decap_filter_id = decap_filter_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE); + if (!rc) { + req->decap_filter_id = decap_filter_handle; + rc = hwrm_req_send(bp, req); + } if (rc) - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -608,17 +1333,20 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { - struct hwrm_cfa_encap_record_alloc_input req = { 0 }; struct hwrm_cfa_encap_record_alloc_output *resp; - struct hwrm_cfa_encap_data_vxlan *encap = - (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; - struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = - (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + struct hwrm_cfa_encap_record_alloc_input *req; + struct hwrm_cfa_encap_data_vxlan *encap; + struct hwrm_vxlan_ipv4_hdr *encap_ipv4; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC); + if (rc) + goto exit; - req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; + encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data; + encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + + req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); ether_addr_copy(encap->src_mac_addr, l2_info->smac); @@ -639,15 +1367,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, encap->dst_port = encap_key->tp_dst; encap->vni = tunnel_id_to_key32(encap_key->tun_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) *encap_record_handle = resp->encap_record_id; - } else { - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -655,15 +1382,16 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, static int hwrm_cfa_encap_record_free(struct bnxt *bp, __le32 encap_record_handle) { - struct hwrm_cfa_encap_record_free_input req = { 0 }; + struct hwrm_cfa_encap_record_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); - req.encap_record_id = encap_record_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE); + if (!rc) { + req->encap_record_id = encap_record_handle; + rc = hwrm_req_send(bp, req); + } if (rc) - netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); return rc; } @@ -682,7 +1410,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp, tc_info->l2_ht_params); if (rc) netdev_err(bp->dev, - "Error: %s: rhashtable_remove_fast: %d", + "Error: %s: rhashtable_remove_fast: %d\n", __func__, rc); kfree_rcu(l2_node, rcu); } @@ -711,7 +1439,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, if (rc) { kfree_rcu(l2_node, rcu); netdev_err(bp->dev, - "Error: %s: rhashtable_insert_fast: %d", + "Error: %s: rhashtable_insert_fast: %d\n", __func__, rc); return NULL; } @@ -770,11 +1498,18 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && (flow->l4_key.ip_proto != IPPROTO_TCP && flow->l4_key.ip_proto != IPPROTO_UDP)) { - netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports", + netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n", flow->l4_key.ip_proto); return false; } + if (is_multicast_ether_addr(flow->l2_key.dmac) || + is_broadcast_ether_addr(flow->l2_key.dmac)) { + netdev_info(bp->dev, + "Broadcast/Multicast flow offload unsupported\n"); + return false; + } + /* Currently source/dest MAC cannot be partial wildcard */ if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) && !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) { @@ -813,7 +1548,8 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) return true; } -/* Returns the final refcount of the node on success +/* + * Returns the final refcount of the node on success * or a -ve error code on failure */ static int bnxt_tc_put_tunnel_node(struct bnxt *bp, @@ -827,7 +1563,7 @@ static int bnxt_tc_put_tunnel_node(struct bnxt *bp, rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, *ht_params); if (rc) { - netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); rc = -1; } kfree_rcu(tunnel_node, rcu); @@ -837,7 +1573,8 @@ static int bnxt_tc_put_tunnel_node(struct bnxt *bp, } } -/* Get (or add) either encap or decap tunnel node from/to the supplied +/* + * Get (or add) either encap or decap tunnel node from/to the supplied * hash table. */ static struct bnxt_tc_tunnel_node * @@ -868,7 +1605,7 @@ bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table, tunnel_node->refcount++; return tunnel_node; err: - netdev_info(bp->dev, "error rc=%d", rc); + netdev_info(bp->dev, "error rc=%d\n", rc); return NULL; } @@ -926,7 +1663,7 @@ static void bnxt_tc_put_decap_l2_node(struct bnxt *bp, &decap_l2_node->node, tc_info->decap_l2_ht_params); if (rc) - netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); kfree_rcu(decap_l2_node, rcu); } } @@ -966,7 +1703,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, rt = ip_route_output_key(dev_net(real_dst_dev), &flow); if (IS_ERR(rt)) { - netdev_info(bp->dev, "no route to %pI4b", &flow.daddr); + netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr); return -EOPNOTSUPP; } @@ -980,7 +1717,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, if (vlan->real_dev != real_dst_dev) { netdev_info(bp->dev, - "dst_dev(%s) doesn't use PF-if(%s)", + "dst_dev(%s) doesn't use PF-if(%s)\n", netdev_name(dst_dev), netdev_name(real_dst_dev)); rc = -EOPNOTSUPP; @@ -992,7 +1729,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, #endif } else if (dst_dev != real_dst_dev) { netdev_info(bp->dev, - "dst_dev(%s) for %pI4b is not PF-if(%s)", + "dst_dev(%s) for %pI4b is not PF-if(%s)\n", netdev_name(dst_dev), &flow.daddr, netdev_name(real_dst_dev)); rc = -EOPNOTSUPP; @@ -1001,7 +1738,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); if (!nbr) { - netdev_info(bp->dev, "can't lookup neighbor for %pI4b", + netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n", &flow.daddr); rc = -EOPNOTSUPP; goto put_rt; @@ -1053,7 +1790,8 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) goto done; - /* Resolve the L2 fields for tunnel decap + /* + * Resolve the L2 fields for tunnel decap * Resolve the route for remote vtep (saddr) of the decap key * Find it's next-hop mac addrs */ @@ -1116,7 +1854,8 @@ static void bnxt_tc_put_encap_handle(struct bnxt *bp, hwrm_cfa_encap_record_free(bp, encap_handle); } -/* Lookup the tunnel encap table and check if there's an encap_handle +/* + * Lookup the tunnel encap table and check if there's an encap_handle * alloc'd already. * If not, query L2 info via a route lookup and issue an encap_record_alloc * cmd to FW. @@ -1211,7 +1950,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, tc_info->flow_ht_params); if (rc) - netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d", + netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n", __func__, rc); kfree_rcu(flow_node, rcu); @@ -1224,7 +1963,7 @@ static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; } -static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, +static void bnxt_tc_set_src_fid(struct bnxt* bp, struct bnxt_tc_flow *flow, u16 src_fid) { if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) @@ -1246,8 +1985,14 @@ static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, * * The hash-tables are already protected by the rhashtable API. */ +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, struct flow_cls_offload *tc_flow_cmd) +#endif { struct bnxt_tc_flow_node *new_node, *old_node; struct bnxt_tc_info *tc_info = bp->tc_info; @@ -1263,6 +2008,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, goto done; } new_node->cookie = tc_flow_cmd->cookie; +#ifdef HAVE_TC_CB_EGDEV + new_node->tc_dev_dir = tc_dev_dir; +#endif flow = &new_node->flow; rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); @@ -1274,15 +2022,28 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, if (!bnxt_tc_can_offload(bp, flow)) { rc = -EOPNOTSUPP; - goto free_node; + kfree_rcu(new_node, rcu); + return rc; } /* If a flow exists with the same cookie, delete it */ old_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, tc_info->flow_ht_params); - if (old_node) + if (old_node) { +#ifdef HAVE_TC_CB_EGDEV + if (old_node->tc_dev_dir != tc_dev_dir) { + /* This happens when TC invokes flow-add for the same + * flow a second time through egress dev (e.g, in the + * case of VF-VF, VF-Uplink flows). Ignore it and + * return success. + */ + kfree_rcu(new_node, rcu); + return 0; + } +#endif __bnxt_tc_del_flow(bp, old_node); + } /* Check if the L2 part of the flow has been offloaded already. * If so, bump up it's refcnt and get it's reference handle. @@ -1325,13 +2086,19 @@ unlock: free_node: kfree_rcu(new_node, rcu); done: - netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", + netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n", __func__, tc_flow_cmd->cookie, rc); return rc; } +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_del_flow(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else static int bnxt_tc_del_flow(struct bnxt *bp, struct flow_cls_offload *tc_flow_cmd) +#endif { struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow_node *flow_node; @@ -1339,14 +2106,24 @@ static int bnxt_tc_del_flow(struct bnxt *bp, flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, tc_info->flow_ht_params); +#ifdef HAVE_TC_CB_EGDEV + if (!flow_node || flow_node->tc_dev_dir != tc_dev_dir) +#else if (!flow_node) +#endif return -EINVAL; return __bnxt_tc_del_flow(bp, flow_node); } +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_get_flow_stats(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else static int bnxt_tc_get_flow_stats(struct bnxt *bp, struct flow_cls_offload *tc_flow_cmd) +#endif { struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats; struct bnxt_tc_info *tc_info = bp->tc_info; @@ -1357,7 +2134,11 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, tc_info->flow_ht_params); +#ifdef HAVE_TC_CB_EGDEV + if (!flow_node || flow_node->tc_dev_dir != tc_dev_dir) +#else if (!flow_node) +#endif return -1; flow = &flow_node->flow; @@ -1371,8 +2152,13 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, lastused = flow->lastused; spin_unlock(&flow->stats_lock); +#ifdef HAVE_FLOW_OFFLOAD_H flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, - lastused); + lastused, FLOW_ACTION_HW_STATS_DELAYED); +#else + tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, + lastused); +#endif return 0; } @@ -1406,14 +2192,20 @@ static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, struct bnxt_tc_stats_batch stats_batch[]) { - struct hwrm_cfa_flow_stats_input req = { 0 }; struct hwrm_cfa_flow_stats_output *resp; - __le16 *req_flow_handles = &req.flow_handle_0; - __le32 *req_flow_ids = &req.flow_id_0; + struct hwrm_cfa_flow_stats_input *req; + __le16 *req_flow_handles; + __le32 *req_flow_ids; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); - req.num_flows = cpu_to_le16(num_flows); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS); + if (rc) + goto exit; + + req_flow_handles = &req->flow_handle_0; + req_flow_ids = &req->flow_id_0; + + req->num_flows = cpu_to_le16(num_flows); for (i = 0; i < num_flows; i++) { struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; @@ -1421,13 +2213,12 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, &req_flow_handles[i], &req_flow_ids[i]); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { __le64 *resp_packets; __le64 *resp_bytes; - resp = bnxt_get_hwrm_resp_addr(bp, &req); resp_packets = &resp->packet_0; resp_bytes = &resp->byte_0; @@ -1437,15 +2228,17 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, stats_batch[i].hw_stats.bytes = le64_to_cpu(resp_bytes[i]); } - } else { - netdev_info(bp->dev, "error rc=%d", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: + if (rc) + netdev_info(bp->dev, "error rc=%d\n", rc); return rc; } -/* Add val to accum while handling a possible wraparound +/* + * Add val to accum while handling a possible wraparound * of val. Eventhough val is of type u64, its actual width * is denoted by mask and will wrap-around beyond that width. */ @@ -1565,9 +2358,40 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp) rhashtable_walk_exit(&tc_info->iter); } +#ifdef HAVE_TC_CB_EGDEV +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *cls_flower, + int tc_dev_dir) +{ +#ifdef HAVE_TC_SETUP_TYPE +#ifndef HAVE_TC_SETUP_BLOCK + if (!is_classid_clsact_ingress(cls_flower->common.classid)) + return -EOPNOTSUPP; +#endif +#endif + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return bnxt_tc_add_flow(bp, src_fid, cls_flower, tc_dev_dir); + case FLOW_CLS_DESTROY: + return bnxt_tc_del_flow(bp, cls_flower, tc_dev_dir); + case FLOW_CLS_STATS: + return bnxt_tc_get_flow_stats(bp, cls_flower, tc_dev_dir); + default: + return -EOPNOTSUPP; + } +} + +#else + int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct flow_cls_offload *cls_flower) { +#ifdef HAVE_TC_SETUP_TYPE +#ifndef HAVE_TC_SETUP_BLOCK + if (!is_classid_clsact_ingress(cls_flower->common.classid)) + return -EOPNOTSUPP; +#endif +#endif switch (cls_flower->command) { case FLOW_CLS_REPLACE: return bnxt_tc_add_flow(bp, src_fid, cls_flower); @@ -1579,6 +2403,250 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, return -EOPNOTSUPP; } } +#endif /* HAVE_TC_CB_EGDEV */ + +#ifdef HAVE_TC_SETUP_TYPE +#ifdef HAVE_TC_SETUP_BLOCK +#ifdef HAVE_FLOW_INDR_BLOCK_CB + +static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct bnxt_flower_indr_block_cb_priv *priv = cb_priv; + struct flow_cls_offload *flower = type_data; + struct bnxt *bp = priv->bp; + + if (flower->common.chain_index) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower); + default: + return -EOPNOTSUPP; + } +} + +static struct bnxt_flower_indr_block_cb_priv * +bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev) +{ + struct bnxt_flower_indr_block_cb_priv *cb_priv; + + /* All callback list access should be protected by RTNL. */ + ASSERT_RTNL(); + + list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) + if (cb_priv->tunnel_netdev == netdev) + return cb_priv; + + return NULL; +} + +static void bnxt_tc_setup_indr_rel(void *cb_priv) +{ + struct bnxt_flower_indr_block_cb_priv *priv = cb_priv; + + list_del(&priv->list); + kfree(priv); +} + +extern struct list_head bnxt_block_cb_list; +static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, + struct flow_block_offload *f) +{ + struct bnxt_flower_indr_block_cb_priv *cb_priv; + struct flow_block_cb *block_cb; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case FLOW_BLOCK_BIND: + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); + if (!cb_priv) + return -ENOMEM; + + cb_priv->tunnel_netdev = netdev; + cb_priv->bp = bp; + list_add(&cb_priv->list, &bp->tc_indr_block_list); + + block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb, + cb_priv, cb_priv, + bnxt_tc_setup_indr_rel); + if (IS_ERR(block_cb)) { + list_del(&cb_priv->list); + kfree(cb_priv); + return PTR_ERR(block_cb); + } + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list); + break; + case FLOW_BLOCK_UNBIND: + cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev); + if (!cb_priv) + return -ENOENT; + + block_cb = flow_block_cb_lookup(f->block, + bnxt_tc_setup_indr_block_cb, + cb_priv); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static bool bnxt_is_netdev_indr_offload(struct net_device *netdev) +{ + return netif_is_vxlan(netdev); +} + +static int bnxt_tc_indr_block_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *netdev; + struct bnxt *bp; + int rc; + + netdev = netdev_notifier_info_to_dev(ptr); + if (!bnxt_is_netdev_indr_offload(netdev)) + return NOTIFY_OK; + + bp = container_of(nb, struct bnxt, tc_netdev_nb); + + switch (event) { + case NETDEV_REGISTER: + rc = __flow_indr_block_cb_register(netdev, bp, + bnxt_tc_setup_indr_cb, + bp); + if (rc) + netdev_info(bp->dev, + "Failed to register indirect blk: dev: %s\n", + netdev->name); + break; + case NETDEV_UNREGISTER: + __flow_indr_block_cb_unregister(netdev, + bnxt_tc_setup_indr_cb, + bp); + break; + } + + return NOTIFY_DONE; +} + +#endif /* HAVE_FLOW_INDR_BLOCK_CB */ + +#if defined(HAVE_TC_MATCHALL_FLOW_RULE) && defined(HAVE_FLOW_ACTION_POLICE) + +static inline int bnxt_tc_find_vf_by_fid(struct bnxt *bp, u16 fid) +{ + int num_vfs = pci_num_vf(bp->pdev); + int i; + + for (i = 0; i < num_vfs; i++) { + if (bp->pf.vf[i].fw_fid == fid) + break; + } + if (i >= num_vfs) + return -EINVAL; + return i; +} + +static int bnxt_tc_del_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *matchall_cmd) +{ + int vf_idx; + + vf_idx = bnxt_tc_find_vf_by_fid(bp, src_fid); + if (vf_idx < 0) + return vf_idx; + + if (bp->pf.vf[vf_idx].police_id != matchall_cmd->cookie) + return -ENOENT; + + bnxt_set_vf_bw(bp->dev, vf_idx, 0, 0); + bp->pf.vf[vf_idx].police_id = 0; + return 0; +} + +static int bnxt_tc_add_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *matchall_cmd) +{ + struct flow_action_entry *action; + int vf_idx; + s64 burst; + u64 rate; + int rc; + + vf_idx = bnxt_tc_find_vf_by_fid(bp, src_fid); + if (vf_idx < 0) + return vf_idx; + + action = &matchall_cmd->rule->action.entries[0]; + if (action->id != FLOW_ACTION_POLICE) { + netdev_err(bp->dev, "%s: Unsupported matchall action: %d", + __func__, action->id); + return -EOPNOTSUPP; + } + if (bp->pf.vf[vf_idx].police_id && bp->pf.vf[vf_idx].police_id != + matchall_cmd->cookie) { + netdev_err(bp->dev, + "%s: Policer is already configured for VF: %d", + __func__, vf_idx); + return -EEXIST; + } + + rate = (u32)div_u64(action->police.rate_bytes_ps, 1024 * 1000) * 8; + burst = (u32)div_u64(action->police.rate_bytes_ps * + PSCHED_NS2TICKS(action->police.burst), + PSCHED_TICKS_PER_SEC); + burst = (u32)PSCHED_TICKS2NS(burst) / (1 << 20); + + rc = bnxt_set_vf_bw(bp->dev, vf_idx, burst, rate); + if (rc) { + netdev_err(bp->dev, + "Error: %s: VF: %d rate: %llu burst: %llu rc: %d", + __func__, vf_idx, rate, burst, rc); + return rc; + } + + bp->pf.vf[vf_idx].police_id = matchall_cmd->cookie; + return 0; +} + +int bnxt_tc_setup_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *cls_matchall) +{ + switch (cls_matchall->command) { + case TC_CLSMATCHALL_REPLACE: + return bnxt_tc_add_matchall(bp, src_fid, cls_matchall); + case TC_CLSMATCHALL_DESTROY: + return bnxt_tc_del_matchall(bp, src_fid, cls_matchall); + default: + return -EOPNOTSUPP; + } +} + +#endif /* HAVE_TC_MATCHALL_FLOW_RULE && HAVE_FLOW_ACTION_POLICE */ +#endif /* HAVE_TC_SETUP_BLOCK */ +#endif /* HAVE_TC_SETUP_TYPE */ static const struct rhashtable_params bnxt_tc_flow_ht_params = { .head_offset = offsetof(struct bnxt_tc_flow_node, node), @@ -1616,7 +2684,7 @@ int bnxt_init_tc(struct bnxt *bp) struct bnxt_tc_info *tc_info; int rc; - if (bp->hwrm_spec_code < 0x10803) { + if (bp->hwrm_spec_code < 0x10800) { netdev_warn(bp->dev, "Firmware does not support TC flower offload.\n"); return -ENOTSUPP; @@ -1663,7 +2731,19 @@ int bnxt_init_tc(struct bnxt *bp) bp->dev->hw_features |= NETIF_F_HW_TC; bp->dev->features |= NETIF_F_HW_TC; bp->tc_info = tc_info; + +#ifndef HAVE_FLOW_INDR_BLOCK_CB return 0; +#else + /* init indirect block notifications */ + INIT_LIST_HEAD(&bp->tc_indr_block_list); + bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event; + rc = register_netdevice_notifier(&bp->tc_netdev_nb); + if (!rc) + return 0; + + rhashtable_destroy(&tc_info->encap_table); +#endif destroy_decap_table: rhashtable_destroy(&tc_info->decap_table); @@ -1685,6 +2765,9 @@ void bnxt_shutdown_tc(struct bnxt *bp) if (!bnxt_tc_flower_enabled(bp)) return; +#ifdef HAVE_FLOW_INDR_BLOCK_CB + unregister_netdevice_notifier(&bp->tc_netdev_nb); +#endif rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->l2_table); rhashtable_destroy(&tc_info->decap_l2_table); @@ -1693,3 +2776,5 @@ void bnxt_shutdown_tc(struct bnxt *bp) kfree(tc_info); bp->tc_info = NULL; } + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 4f05305052f2..6ff3ba330f6d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -62,6 +62,12 @@ struct bnxt_tc_tunnel_key { __be32 id; }; +#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \ + ((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \ + is_wildcard(&(eth_addr)[ETH_ALEN / 2], ETH_ALEN)) || \ + (is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \ + is_wildcard(&(eth_addr_mask)[ETH_ALEN / 2], ETH_ALEN))) + struct bnxt_tc_actions { u32 flags; #define BNXT_TC_ACTION_FLAG_FWD BIT(0) @@ -71,6 +77,8 @@ struct bnxt_tc_actions { #define BNXT_TC_ACTION_FLAG_DROP BIT(5) #define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6) #define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7) +#define BNXT_TC_ACTION_FLAG_L2_REWRITE BIT(8) +#define BNXT_TC_ACTION_FLAG_NAT_XLATE BIT(9) u16 dst_fid; struct net_device *dst_dev; @@ -79,6 +87,18 @@ struct bnxt_tc_actions { /* tunnel encap */ struct ip_tunnel_key tun_encap_key; +#define PEDIT_OFFSET_SMAC_LAST_4_BYTES 0x8 + __be16 l2_rewrite_dmac[3]; + __be16 l2_rewrite_smac[3]; + struct { + bool src_xlate; /* true => translate src, + * false => translate dst + * Mutually exclusive, i.e cannot set both + */ + bool l3_is_ipv4; /* false means L3 is ipv6 */ + struct bnxt_tc_l3_key l3; + struct bnxt_tc_l4_key l4; + } nat; }; struct bnxt_tc_flow { @@ -123,7 +143,8 @@ struct bnxt_tc_flow { spinlock_t stats_lock; }; -/* Tunnel encap/decap hash table +/* + * Tunnel encap/decap hash table * This table is used to maintain a list of flows that use * the same tunnel encap/decap params (ip_daddrs, vni, udp_dport) * and the FW returned handle. @@ -144,7 +165,8 @@ struct bnxt_tc_tunnel_node { struct rcu_head rcu; }; -/* L2 hash table +/* + * L2 hash table * The same data-struct is used for L2-flow table and L2-tunnel table. * The L2 part of a flow or tunnel is stored in a hash table. * A flow that shares the same L2 key/mask with an @@ -166,6 +188,12 @@ struct bnxt_tc_l2_node { struct rcu_head rcu; }; +/* Track if the TC offload API is invoked on an ingress or egress device. */ +enum { + BNXT_TC_DEV_INGRESS = 1, + BNXT_TC_DEV_EGRESS = 2 +}; + struct bnxt_tc_flow_node { /* hash key: provided by TC */ unsigned long cookie; @@ -176,6 +204,7 @@ struct bnxt_tc_flow_node { __le64 ext_flow_handle; __le16 flow_handle; __le32 flow_id; + int tc_dev_dir; /* L2 node in l2 hashtable that shares flow's l2 key */ struct bnxt_tc_l2_node *l2_node; @@ -195,11 +224,23 @@ struct bnxt_tc_flow_node { struct rcu_head rcu; }; +#ifdef HAVE_TC_CB_EGDEV +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *cls_flower, + int tc_dev_dir); +#else int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct flow_cls_offload *cls_flower); +#endif + int bnxt_init_tc(struct bnxt *bp); void bnxt_shutdown_tc(struct bnxt *bp); void bnxt_tc_flow_stats_work(struct bnxt *bp); +#if defined(HAVE_TC_MATCHALL_FLOW_RULE) && defined(HAVE_FLOW_ACTION_POLICE) +int bnxt_tc_setup_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *cls_matchall); +#endif + static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) { @@ -208,12 +249,6 @@ static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) #else /* CONFIG_BNXT_FLOWER_OFFLOAD */ -static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, - struct flow_cls_offload *cls_flower) -{ - return -EOPNOTSUPP; -} - static inline int bnxt_init_tc(struct bnxt *bp) { return 0; @@ -232,4 +267,5 @@ static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) return false; } #endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + #endif /* BNXT_TC_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 30816ec4fa91..6d5c5ca48b1b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -1,12 +1,13 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ +#include <linux/version.h> #include <linux/module.h> #include <linux/kernel.h> @@ -20,8 +21,10 @@ #include <asm/byteorder.h> #include <linux/bitmap.h> +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, @@ -81,7 +84,7 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) edev->en_ops->bnxt_free_msix(edev, ulp_id); if (ulp->max_async_event_id) - bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); + bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); RCU_INIT_POINTER(ulp->ulp_ops, NULL); synchronize_rcu(); @@ -104,7 +107,17 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) for (i = 0; i < num_msix; i++) { ent[i].vector = bp->irq_tbl[idx + i].vector; ent[i].ring_idx = idx + i; - ent[i].db_offset = (idx + i) * 0x80; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (bp->flags & BNXT_FLAG_CHIP_SR2) { + ent[i].db_offset = bp->legacy_db_size; + } else { + ent[i].db_offset = DB_PF_OFFSET_P5; + if (BNXT_VF(bp)) + ent[i].db_offset = DB_VF_OFFSET_P5; + } + } else { + ent[i].db_offset = (idx + i) * 0x80; + } } } @@ -186,7 +199,7 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) edev->ulp_tbl[ulp_id].msix_requested = 0; edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; - if (netif_running(dev)) { + if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) { bnxt_close_nic(bp, true, false); bnxt_open_nic(bp, true, false); } @@ -216,8 +229,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) { - if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - return BNXT_MIN_ROCE_STAT_CTXS; + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_en_dev *edev = bp->edev; + + if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested) + return BNXT_MIN_ROCE_STAT_CTXS; + } return 0; } @@ -227,19 +244,25 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct output *resp; struct input *req; int rc; - if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state) + if ((ulp_id != BNXT_ROCE_ULP) && bp->fw_reset_state) return -EBUSY; - mutex_lock(&bp->hwrm_cmd_lock); - req = fw_msg->msg; - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); - rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len, - fw_msg->timeout); - if (!rc) { - struct output *resp = bp->hwrm_cmd_resp_addr; + rc = hwrm_req_init(bp, req, 0 /* don't care */); + if (rc) + return rc; + + rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, fw_msg->timeout); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc || rc == -EAGAIN) { u32 len = le16_to_cpu(resp->resp_len); if (fw_msg->resp_max_len < len) @@ -247,7 +270,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, memcpy(fw_msg->resp, resp, len); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -270,6 +293,7 @@ void bnxt_ulp_stop(struct bnxt *bp) if (!edev) return; + edev->flags |= BNXT_EN_FLAG_ULP_STOPPED; for (i = 0; i < BNXT_MAX_ULP; i++) { struct bnxt_ulp *ulp = &edev->ulp_tbl[i]; @@ -280,7 +304,7 @@ void bnxt_ulp_stop(struct bnxt *bp) } } -void bnxt_ulp_start(struct bnxt *bp) +void bnxt_ulp_start(struct bnxt *bp, int err) { struct bnxt_en_dev *edev = bp->edev; struct bnxt_ulp_ops *ops; @@ -289,6 +313,11 @@ void bnxt_ulp_start(struct bnxt *bp) if (!edev) return; + edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED; + + if (err) + return; + for (i = 0; i < BNXT_MAX_ULP; i++) { struct bnxt_ulp *ulp = &edev->ulp_tbl[i]; @@ -439,7 +468,7 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id, /* Make sure bnxt_ulp_async_events() sees this order */ smp_wmb(); ulp->max_async_event_id = max_id; - bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1); + bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true); return 0; } @@ -457,6 +486,10 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); struct bnxt_en_dev *edev; +#if (BITS_PER_LONG == 64) + BUILD_BUG_ON(offsetof(struct bnxt, reserved_ulp_kabi_0) != 0x40); + BUILD_BUG_ON(offsetof(struct bnxt, reserved_ulp_kabi_1) != 0x80); +#endif edev = bp->edev; if (!edev) { edev = kzalloc(sizeof(*edev), GFP_KERNEL); @@ -469,6 +502,8 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; edev->net = dev; edev->pdev = bp->pdev; + edev->l2_db_size = bp->db_size; + edev->l2_db_size_nc = bp->db_size_nc; bp->edev = edev; } return bp->edev; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index cd78453d0bf0..0705676e59bb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -10,6 +10,8 @@ #ifndef BNXT_ULP_H #define BNXT_ULP_H +#include <linux/version.h> + #define BNXT_ROCE_ULP 0 #define BNXT_OTHER_ULP 1 #define BNXT_MAX_ULP 2 @@ -26,6 +28,9 @@ struct bnxt_msix_entry { u32 db_offset; }; +/* Change BNXT_ULP_VERSION in bnxt.h if any structure or API exposed to + * bnxt_re is modified. + */ struct bnxt_ulp_ops { /* async_notifier() cannot sleep (in BH context) */ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *); @@ -64,8 +69,17 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \ BNXT_EN_FLAG_ROCEV2_CAP) #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4 + #define BNXT_EN_FLAG_ULP_STOPPED 0x8 const struct bnxt_en_ops *en_ops; struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP]; + int l2_db_size; /* Doorbell BAR size in + * bytes mapped by L2 + * driver. + */ + int l2_db_size_nc; /* Doorbell BAR size in + * bytes mapped as non- + * cacheable. + */ }; struct bnxt_en_ops { @@ -92,12 +106,11 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp); int bnxt_get_ulp_msix_base(struct bnxt *bp); int bnxt_get_ulp_stat_ctxs(struct bnxt *bp); void bnxt_ulp_stop(struct bnxt *bp); -void bnxt_ulp_start(struct bnxt *bp); +void bnxt_ulp_start(struct bnxt *bp, int err); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); void bnxt_ulp_shutdown(struct bnxt *bp); void bnxt_ulp_irq_stop(struct bnxt *bp); void bnxt_ulp_irq_restart(struct bnxt *bp, int err); void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl); struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev); - #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index b010b34cdaf8..68d58574c4fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -11,15 +11,19 @@ #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/jhash.h> +#ifdef HAVE_TC_SETUP_TYPE #include <net/pkt_cls.h> +#endif +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_tc.h" -#ifdef CONFIG_BNXT_SRIOV +#ifdef CONFIG_VF_REPS #define CFA_HANDLE_INVALID 0xffff #define VF_IDX_INVALID 0xffff @@ -27,57 +31,60 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, u16 *tx_cfa_action, u16 *rx_cfa_code) { - struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_cfa_vfr_alloc_input req = { 0 }; + struct hwrm_cfa_vfr_alloc_output *resp; + struct hwrm_cfa_vfr_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); - req.vf_id = cpu_to_le16(vf_idx); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC); if (!rc) { - *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); - *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); - netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", - *tx_cfa_action, *rx_cfa_code); - } else { - netdev_info(bp->dev, "%s error rc=%d", __func__, rc); - } + req->vf_id = cpu_to_le16(vf_idx); + sprintf(req->vfr_name, "vfr%d", vf_idx); - mutex_unlock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } + hwrm_req_drop(bp, req); + } + if (rc) + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; } static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) { - struct hwrm_cfa_vfr_free_input req = { 0 }; + struct hwrm_cfa_vfr_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE); + if (!rc) { + sprintf(req->vfr_name, "vfr%d", vf_idx); + rc = hwrm_req_send(bp, req); + } if (rc) - netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; } static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, u16 *max_mtu) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; u16 mtu; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { mtu = le16_to_cpu(resp->max_mtu_configured); if (!mtu) @@ -85,7 +92,8 @@ static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, else *max_mtu = mtu; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); + return rc; } @@ -141,6 +149,11 @@ bnxt_vf_rep_get_stats64(struct net_device *dev, stats->tx_bytes = vf_rep->tx_stats.bytes; } +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_TC_SETUP_TYPE +#ifdef HAVE_TC_SETUP_BLOCK +static LIST_HEAD(bnxt_vf_block_cb_list); + static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) @@ -155,13 +168,23 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, switch (type) { case TC_SETUP_CLSFLOWER: +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, vf_fid, type_data, + BNXT_TC_DEV_INGRESS); +#else return bnxt_tc_setup_flower(bp, vf_fid, type_data); +#endif + +#if defined(HAVE_TC_MATCHALL_FLOW_RULE) && defined(HAVE_FLOW_ACTION_POLICE) + case TC_SETUP_CLSMATCHALL: + return bnxt_tc_setup_matchall(bp, vf_fid, type_data); +#endif default: return -EOPNOTSUPP; } } -static LIST_HEAD(bnxt_vf_block_cb_list); +#endif /* HAVE_TC_SETUP_BLOCK */ static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) @@ -169,16 +192,100 @@ static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, struct bnxt_vf_rep *vf_rep = netdev_priv(dev); switch (type) { +#ifdef HAVE_TC_SETUP_BLOCK case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &bnxt_vf_block_cb_list, - bnxt_vf_rep_setup_tc_block_cb, - vf_rep, vf_rep, true); + bnxt_vf_rep_setup_tc_block_cb, vf_rep, vf_rep, true); +#else + case TC_SETUP_CLSFLOWER: { + struct bnxt *bp = vf_rep->bp; + int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; + +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, vf_fid, type_data, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(bp, vf_fid, type_data); +#endif + } +#endif default: return -EOPNOTSUPP; } } +#else /* HAVE_TC_SETUP_TYPE */ + +#ifdef HAVE_CHAIN_INDEX +static int bnxt_vf_rep_setup_tc(struct net_device *dev, u32 handle, + u32 chain_index, __be16 proto, + struct tc_to_netdev *ntc) +#else +static int bnxt_vf_rep_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *ntc) +#endif +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + if (!bnxt_tc_flower_enabled(vf_rep->bp)) + return -EOPNOTSUPP; + + switch (ntc->type) { + case TC_SETUP_CLSFLOWER: +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(vf_rep->bp, + bnxt_vf_rep_get_fid(dev), + ntc->cls_flower, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(vf_rep->bp, + bnxt_vf_rep_get_fid(dev), + ntc->cls_flower); +#endif + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_SETUP_TYPE */ + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_vf_rep_tc_cb_egdev(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct bnxt_vf_rep *vf_rep = cb_priv; + struct bnxt *bp = vf_rep->bp; + int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; + + if (!bnxt_tc_flower_enabled(vf_rep->bp) || + !tc_cls_can_offload_and_chain0(bp->dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_tc_setup_flower(bp, vf_fid, type_data, + BNXT_TC_DEV_EGRESS); + default: + return -EOPNOTSUPP; + } +} +#else + +#ifdef HAVE_TC_SETUP_TYPE +static int bnxt_vf_rep_tc_cb_egdev(enum tc_setup_type type, void *type_data, + void *cb_priv) +#else +static int bnxt_vf_rep_tc_cb_egdev(int type, void *type_data, void *cb_priv) +#endif /* HAVE_TC_SETUP_TYPE */ +{ + return 0; +} +#endif /* HAVE_TC_CB_EGDEV */ + +#define bnxt_cb_egdev ((void *)bnxt_vf_rep_tc_cb_egdev) + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) { u16 vf_idx; @@ -222,6 +329,7 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } +#ifdef HAVE_NDO_GET_PORT_PARENT_ID static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) { @@ -233,18 +341,51 @@ static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev, return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid); } +#else + +static int bnxt_vf_rep_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + /* as only PORT_PARENT_ID is supported currently use common code + * between PF and VF-rep for now. + */ + return bnxt_port_attr_get(vf_rep->bp, attr); +} + +static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { + .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get +}; +#endif + static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { .get_drvinfo = bnxt_vf_rep_get_drvinfo }; static const struct net_device_ops bnxt_vf_rep_netdev_ops = { +#ifdef HAVE_NDO_SETUP_TC_RH + .ndo_size = sizeof(const struct net_device_ops), +#endif .ndo_open = bnxt_vf_rep_open, .ndo_stop = bnxt_vf_rep_close, .ndo_start_xmit = bnxt_vf_rep_xmit, .ndo_get_stats64 = bnxt_vf_rep_get_stats64, +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_NDO_SETUP_TC_RH + .extended.ndo_setup_tc_rh = bnxt_vf_rep_setup_tc, +#else .ndo_setup_tc = bnxt_vf_rep_setup_tc, +#endif +#endif +#ifdef HAVE_NDO_GET_PORT_PARENT_ID .ndo_get_port_parent_id = bnxt_vf_rep_get_port_parent_id, +#endif +#ifdef HAVE_EXT_GET_PHYS_PORT_NAME + .extended.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name +#else .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name +#endif }; bool bnxt_dev_is_vf_rep(struct net_device *dev) @@ -307,8 +448,14 @@ static void __bnxt_vf_reps_destroy(struct bnxt *bp) /* if register_netdev failed, then netdev_ops * would have been set to NULL */ - if (vf_rep->dev->netdev_ops) + if (vf_rep->dev->netdev_ops) { +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + bnxt_unreg_egdev(vf_rep->dev, + bnxt_cb_egdev, + (void *)vf_rep); +#endif unregister_netdev(vf_rep->dev); + } free_netdev(vf_rep->dev); } } @@ -374,6 +521,9 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->netdev_ops = &bnxt_vf_rep_netdev_ops; dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; +#ifndef HAVE_NDO_GET_PORT_PARENT_ID + SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); +#endif /* Just inherit all the featues of the parent PF as the VF-R * uses the RX/TX rings of the parent PF */ @@ -387,8 +537,13 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, ether_addr_copy(dev->dev_addr, dev->perm_addr); /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */ if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu)) +#ifdef HAVE_NET_DEVICE_EXT + dev->extended->max_mtu = max_mtu; + dev->extended->min_mtu = ETH_ZLEN; +#else dev->max_mtu = max_mtu; dev->min_mtu = ETH_ZLEN; +#endif } static int bnxt_vf_reps_create(struct bnxt *bp) @@ -456,6 +611,10 @@ static int bnxt_vf_reps_create(struct bnxt *bp) dev->netdev_ops = NULL; goto err; } +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + bnxt_reg_egdev(vf_rep->dev, bnxt_cb_egdev, (void *)vf_rep, + vf_rep->vf_idx); +#endif } /* publish cfa_code_map only after all VF-reps have been initialized */ @@ -465,7 +624,7 @@ static int bnxt_vf_reps_create(struct bnxt *bp) return 0; err: - netdev_info(bp->dev, "%s error=%d", __func__, rc); + netdev_info(bp->dev, "%s error=%d\n", __func__, rc); kfree(cfa_code_map); __bnxt_vf_reps_destroy(bp); return rc; @@ -480,15 +639,19 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) return 0; } +#ifdef HAVE_ESWITCH_MODE_SET_EXTACK int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) +#else +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +#endif { struct bnxt *bp = bnxt_get_bp_from_dl(devlink); int rc = 0; mutex_lock(&bp->sriov_lock); if (bp->eswitch_mode == mode) { - netdev_info(bp->dev, "already in %s eswitch mode", + netdev_info(bp->dev, "already in %s eswitch mode\n", mode == DEVLINK_ESWITCH_MODE_LEGACY ? "legacy" : "switchdev"); rc = -EINVAL; @@ -508,7 +671,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, } if (pci_num_vf(bp->pdev) == 0) { - netdev_info(bp->dev, "Enable VFs before setting switchdev mode"); + netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n"); rc = -EPERM; goto done; } @@ -524,4 +687,4 @@ done: return rc; } -#endif +#endif /* CONFIG_VF_REPS */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index d7287651422f..7daebda77fbb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -10,7 +10,7 @@ #ifndef BNXT_VFR_H #define BNXT_VFR_H -#ifdef CONFIG_BNXT_SRIOV +#ifdef CONFIG_VF_REPS #define MAX_CFA_CODE 65536 @@ -30,11 +30,19 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) bool bnxt_dev_is_vf_rep(struct net_device *dev); int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); +#ifdef HAVE_ESWITCH_MODE_SET_EXTACK int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack); +#else +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); +#endif #else +static inline void bnxt_vf_reps_destroy(struct bnxt *bp) +{ +} + static inline void bnxt_vf_reps_close(struct bnxt *bp) { } @@ -61,5 +69,5 @@ static inline bool bnxt_dev_is_vf_rep(struct net_device *dev) { return false; } -#endif /* CONFIG_BNXT_SRIOV */ +#endif /* CONFIG_VF_REPS */ #endif /* BNXT_VFR_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index c6f6f2033880..9d211d5fdaff 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -12,13 +12,20 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#ifdef HAVE_NDO_XDP #include <linux/bpf.h> +#ifdef HAVE_BPF_TRACE #include <linux/bpf_trace.h> +#endif #include <linux/filter.h> -#include <net/page_pool.h> +#endif +#include "bnxt_compat.h" #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_xdp.h" +#ifdef CONFIG_PAGE_POOL +#include <net/page_pool.h> +#endif struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, @@ -30,7 +37,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, u16 prod; prod = txr->tx_prod; - tx_buf = &txr->tx_buf_ring[prod]; + tx_buf = &txr->tx_buf_ring[RING_TX(prod)]; txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) | @@ -44,6 +51,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, return tx_buf; } +#ifdef HAVE_NDO_XDP static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len, u16 rx_prod) { @@ -54,6 +62,7 @@ static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, tx_buf->action = XDP_TX; } +#ifdef HAVE_XDP_FRAME static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len, @@ -67,6 +76,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, dma_unmap_addr_set(tx_buf, mapping, mapping); dma_unmap_len_set(tx_buf, len, 0); } +#endif void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) { @@ -79,16 +89,18 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) int i; for (i = 0; i < nr_pkts; i++) { - tx_buf = &txr->tx_buf_ring[tx_cons]; + tx_buf = &txr->tx_buf_ring[RING_TX(tx_cons)]; if (tx_buf->action == XDP_REDIRECT) { struct pci_dev *pdev = bp->pdev; dma_unmap_single(&pdev->dev, - dma_unmap_addr(tx_buf, mapping), - dma_unmap_len(tx_buf, len), - PCI_DMA_TODEVICE); + dma_unmap_addr(tx_buf, mapping), + dma_unmap_len(tx_buf, len), + PCI_DMA_TODEVICE); +#ifdef HAVE_XDP_FRAME xdp_return_frame(tx_buf->xdpf); +#endif tx_buf->action = 0; tx_buf->xdpf = NULL; } else if (tx_buf->action == XDP_TX) { @@ -99,7 +111,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } txr->tx_cons = tx_cons; if (rx_doorbell_needed) { - tx_buf = &txr->tx_buf_ring[last_tx_cons]; + tx_buf = &txr->tx_buf_ring[RING_TX(last_tx_cons)]; bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); } } @@ -133,11 +145,18 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); txr = rxr->bnapi->tx_ring; +#if XDP_PACKET_HEADROOM xdp.data_hard_start = *data_ptr - offset; +#endif xdp.data = *data_ptr; xdp_set_data_meta_invalid(&xdp); xdp.data_end = *data_ptr + *len; +#ifdef HAVE_XDP_RXQ_INFO xdp.rxq = &rxr->xdp_rxq; +#endif +#ifdef HAVE_XDP_FRAME_SZ + xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ +#endif orig_data = xdp.data; rcu_read_lock(); @@ -151,10 +170,14 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (tx_avail != bp->tx_ring_size) *event &= ~BNXT_RX_EVENT; +#if XDP_PACKET_HEADROOM *len = xdp.data_end - xdp.data; +#endif if (orig_data != xdp.data) { +#if XDP_PACKET_HEADROOM offset = xdp.data - xdp.data_hard_start; *data_ptr = xdp.data_hard_start + offset; +#endif } switch (act) { case XDP_PASS: @@ -171,7 +194,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, bp->rx_dir); __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, - NEXT_RX(rxr->rx_prod)); + NEXT_RX(rxr->rx_prod)); bnxt_reuse_rx_data(rxr, cons, page); return true; case XDP_REDIRECT: @@ -192,7 +215,11 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { trace_xdp_exception(bp->dev, xdp_prog, act); +#ifndef CONFIG_PAGE_POOL + __free_page(page); +#else page_pool_recycle_direct(rxr->page_pool, page); +#endif return true; } @@ -211,6 +238,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, return true; } +#ifdef HAVE_XDP_FRAME int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, u32 flags) { @@ -260,7 +288,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, return num_frames - drops; } - +#endif /* Under rtnl_lock */ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) { @@ -330,7 +358,12 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) rc = bnxt_xdp_set(bp, xdp->prog); break; case XDP_QUERY_PROG: +#ifdef HAVE_PROG_ATTACHED + xdp->prog_attached = !!bp->xdp_prog; +#endif +#ifdef HAVE_IFLA_XDP_PROG_ID xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; +#endif rc = 0; break; default: @@ -339,3 +372,14 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) } return rc; } +#else +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +{ +} + +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + void *page, u8 **data_ptr, unsigned int *len, u8 *event) +{ + return false; +} +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 0df40c3beb05..9a4feabea675 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -14,11 +14,18 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); +#ifdef HAVE_NDO_XDP bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); +#else +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + void *page, u8 **data_ptr, unsigned int *len, + u8 *event); +#endif int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); +#ifdef HAVE_XDP_FRAME int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, u32 flags); - +#endif #endif diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 8f909d57501f..b27da024aa9d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -69,6 +69,9 @@ #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ TOTAL_DESC * DMA_DESC_SIZE) +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + static inline void bcmgenet_writel(u32 value, void __iomem *offset) { /* MIPS chips strapped for BE will automagically configure the @@ -995,6 +998,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, if (netif_running(dev)) bcmgenet_update_mib_counters(priv); + dev->netdev_ops->ndo_get_stats(dev); + for (i = 0; i < BCMGENET_STATS_LEN; i++) { const struct bcmgenet_stats *s; char *p; @@ -1586,11 +1591,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - if (skb_padto(skb, ETH_ZLEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Retain how many bytes will be sent on the wire, without TSB inserted * by transmit checksum offload */ @@ -1639,6 +1639,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) len_stat = (size << DMA_BUFLENGTH_SHIFT) | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ if (!i) { len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -1695,7 +1698,8 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, dma_addr_t mapping; /* Allocate a new Rx skb */ - skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); if (!skb) { priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, priv->dev, @@ -2849,6 +2853,7 @@ static void bcmgenet_netif_start(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); /* Start the network engine */ + bcmgenet_set_rx_mode(dev); bcmgenet_enable_rx_napi(priv); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); @@ -3204,6 +3209,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) dev->stats.rx_packets = rx_packets; dev->stats.rx_errors = rx_errors; dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; return &dev->stats; } @@ -3578,8 +3584,10 @@ static int bcmgenet_probe(struct platform_device *pdev) clk_disable_unprepare(priv->clk); err = register_netdev(dev); - if (err) + if (err) { + bcmgenet_mii_exit(dev); goto err; + } return err; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index dbc69d8fa05f..5b7c2f9241d0 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -14,6 +14,7 @@ #include <linux/if_vlan.h> #include <linux/phy.h> #include <linux/dim.h> +#include <linux/ethtool.h> /* total number of Buffer Descriptors, same for Rx/Tx */ #define TOTAL_DESC 256 @@ -674,6 +675,7 @@ struct bcmgenet_priv { /* WOL */ struct clk *clk_wol; u32 wolopts; + u8 sopass[SOPASS_MAX]; struct bcmgenet_mib_counters mib; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index ea20d94bd050..a41f82379369 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -41,18 +41,13 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; wol->wolopts = priv->wolopts; memset(wol->sopass, 0, sizeof(wol->sopass)); - if (wol->wolopts & WAKE_MAGICSECURE) { - reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS); - put_unaligned_be16(reg, &wol->sopass[0]); - reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS); - put_unaligned_be32(reg, &wol->sopass[2]); - } + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); } /* ethtool function - set WOL (Wake on LAN) settings. @@ -62,7 +57,6 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; - u32 reg; if (!device_can_wakeup(kdev)) return -ENOTSUPP; @@ -70,17 +64,8 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) return -EINVAL; - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); - if (wol->wolopts & WAKE_MAGICSECURE) { - bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), - UMAC_MPD_PW_MS); - bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), - UMAC_MPD_PW_LS); - reg |= MPD_PW_EN; - } else { - reg &= ~MPD_PW_EN; - } - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { @@ -120,6 +105,14 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) return retries; } +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode) { @@ -140,13 +133,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Do not leave UniMAC in MPD mode only */ retries = bcmgenet_poll_wol_status(priv); if (retries < 0) { reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); - reg &= ~MPD_EN; + reg &= ~(MPD_EN | MPD_PW_EN); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); return retries; } @@ -185,7 +182,7 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (!(reg & MPD_EN)) return; /* already powered up so skip the rest */ - reg &= ~MPD_EN; + reg &= ~(MPD_EN | MPD_PW_EN); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Disable CRC Forward */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ca3aa1250dd1..70bd79dc43f2 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7227,8 +7227,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp) static inline void tg3_reset_task_cancel(struct tg3 *tp) { - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, RESET_TASK_PENDING); + if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + cancel_work_sync(&tp->reset_task); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } @@ -11219,18 +11219,27 @@ static void tg3_reset_task(struct work_struct *work) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); err = tg3_init_hw(tp, true); - if (err) + if (err) { + tg3_full_unlock(tp); + tp->irq_sync = 0; + tg3_napi_enable(tp); + /* Clear this flag so that tg3_reset_task_cancel() will not + * call cancel_work_sync() and wait forever. + */ + tg3_flag_clear(tp, RESET_TASK_PENDING); + dev_close(tp->dev); goto out; + } tg3_netif_start(tp); -out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); +out: rtnl_unlock(); } @@ -18176,8 +18185,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We probably don't have netdev yet */ - if (!netdev || !netif_running(netdev)) + /* Could be second call or maybe we don't have netdev yet */ + if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) goto done; /* We needn't recover from permanent error */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 234c13ebbc41..377668465535 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) int status; status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) + if (status < 0) { + pm_runtime_put_noidle(&bp->pdev->dev); goto mdio_pm_exit; + } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -367,8 +369,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, int status; status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) + if (status < 0) { + pm_runtime_put_noidle(&bp->pdev->dev); goto mdio_pm_exit; + } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -1714,7 +1718,8 @@ static inline int macb_clear_csum(struct sk_buff *skb) static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) { - bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); + bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || + skb_is_nonlinear(*skb); int padlen = ETH_ZLEN - (*skb)->len; int headroom = skb_headroom(*skb); int tailroom = skb_tailroom(*skb); @@ -2910,6 +2915,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) bool cmp_b = false; bool cmp_c = false; + if (!macb_is_gem(bp)) + return; + tp4sp_v = &(fs->h_u.tcp_ip4_spec); tp4sp_m = &(fs->m_u.tcp_ip4_spec); @@ -3281,6 +3289,7 @@ static void macb_restore_features(struct macb *bp) { struct net_device *netdev = bp->dev; netdev_features_t features = netdev->features; + struct ethtool_rx_fs_item *item; /* TX checksum offload */ macb_set_txcsum_feature(bp, features); @@ -3289,6 +3298,9 @@ static void macb_restore_features(struct macb *bp) macb_set_rxcsum_feature(bp, features); /* RX Flow Filters */ + list_for_each_entry(item, &bp->rx_fs_list.list, list) + gem_prog_cmp_regs(bp, &item->fs); + macb_set_rxflow_feature(bp, features); } @@ -3578,6 +3590,7 @@ static int macb_init(struct platform_device *pdev) reg = gem_readl(bp, DCFG8); bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), GEM_BFEXT(T2SCR, reg)); + INIT_LIST_HEAD(&bp->rx_fs_list.list); if (bp->max_tuples > 0) { /* also needs one ethtype match to check IPv4 */ if (GEM_BFEXT(SCR2ETH, reg) > 0) { @@ -3588,7 +3601,6 @@ static int macb_init(struct platform_device *pdev) /* Filtering is supported in hw but don't enable it in kernel now */ dev->hw_features |= NETIF_F_NTUPLE; /* init Rx flow definitions */ - INIT_LIST_HEAD(&bp->rx_fs_list.list); bp->rx_fs_list.count = 0; spin_lock_init(&bp->rx_fs_lock); } else @@ -3691,8 +3703,10 @@ static int at91ether_open(struct net_device *dev) int ret; ret = pm_runtime_get_sync(&lp->pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&lp->pdev->dev); return ret; + } /* Clear internal statistics */ ctl = macb_readl(lp, NCR); @@ -3702,7 +3716,7 @@ static int at91ether_open(struct net_device *dev) ret = at91ether_start(dev); if (ret) - return ret; + goto pm_exit; /* Enable MAC interrupts */ macb_writel(lp, IER, MACB_BIT(RCOMP) | @@ -3719,6 +3733,10 @@ static int at91ether_open(struct net_device *dev) netif_start_queue(dev); return 0; + +pm_exit: + pm_runtime_put_sync(&lp->pdev->dev); + return ret; } /* Close the interface */ @@ -4048,15 +4066,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, static int fu540_c000_init(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) - return -ENODEV; - - mgmt->reg = ioremap(res->start, resource_size(res)); - if (!mgmt->reg) - return -ENOMEM; + mgmt->reg = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mgmt->reg)) + return PTR_ERR(mgmt->reg); return macb_init(pdev); } @@ -4260,7 +4272,7 @@ static int macb_probe(struct platform_device *pdev) bp->wol = 0; if (of_get_property(np, "magic-packet", NULL)) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); spin_lock_init(&bp->lock); @@ -4453,7 +4465,8 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_carrier_off(netdev); if (bp->ptp_info) bp->ptp_info->ptp_remove(netdev); - pm_runtime_force_suspend(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_suspend(dev); return 0; } @@ -4468,7 +4481,8 @@ static int __maybe_unused macb_resume(struct device *dev) if (!netif_running(netdev)) return 0; - pm_runtime_force_resume(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IDR, MACB_BIT(WOL)); @@ -4507,7 +4521,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); @@ -4523,7 +4537,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 43d11c38b38a..4cddd628d41b 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & CN23XX_PCIE_SRIOV_FDL_MASK); } else { - ret = EINVAL; + ret = -EINVAL; /* Under some virtual environments, extended PCI regs are * inaccessible, in which case the above read will have failed. diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h index e6d4ad99cc38..3f1c189646f4 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h @@ -521,7 +521,7 @@ #define CN23XX_BAR1_INDEX_OFFSET 3 #define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \ - (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \ + (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \ ((idx) << CN23XX_BAR1_INDEX_OFFSET)) /*############################ DPI #########################*/ diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h index b248966837b4..7aad40b2aa73 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -412,7 +412,7 @@ | CN6XXX_INTR_M0UNWI_ERR \ | CN6XXX_INTR_M1UPB0_ERR \ | CN6XXX_INTR_M1UPWI_ERR \ - | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UNB0_ERR \ | CN6XXX_INTR_M1UNWI_ERR \ | CN6XXX_INTR_INSTR_DB_OF_ERR \ | CN6XXX_INTR_SLIST_DB_OF_ERR \ diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index cdd7e5da4a74..4fa9d485e209 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -235,6 +235,11 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) /* Put it in the ring. */ p->rx_ring[p->rx_next_fill] = re.d64; + /* Make sure there is no reorder of filling the ring and ringing + * the bell + */ + wmb(); + dma_sync_single_for_device(p->dev, p->rx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); @@ -1217,7 +1222,7 @@ static int octeon_mgmt_open(struct net_device *netdev) */ if (netdev->phydev) { netif_carrier_off(netdev); - phy_start_aneg(netdev->phydev); + phy_start(netdev->phydev); } netif_wake_queue(netdev); @@ -1245,8 +1250,10 @@ static int octeon_mgmt_stop(struct net_device *netdev) napi_disable(&p->napi); netif_stop_queue(netdev); - if (netdev->phydev) + if (netdev->phydev) { + phy_stop(netdev->phydev); phy_disconnect(netdev->phydev); + } netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 40a44dcb3d9b..afeefbed4ebd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1876,13 +1876,8 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) if (nic->xdp_prog) { /* Attach BPF program */ - nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); - if (!IS_ERR(nic->xdp_prog)) { - bpf_attached = true; - } else { - ret = PTR_ERR(nic->xdp_prog); - nic->xdp_prog = NULL; - } + bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); + bpf_attached = true; } /* Calculate Tx queues needed for XDP and network stack */ @@ -2047,11 +2042,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) /* Save message data locally to prevent them from * being overwritten by next ndo_set_rx_mode call(). */ - spin_lock(&nic->rx_mode_wq_lock); + spin_lock_bh(&nic->rx_mode_wq_lock); mode = vf_work->mode; mc = vf_work->mc; vf_work->mc = NULL; - spin_unlock(&nic->rx_mode_wq_lock); + spin_unlock_bh(&nic->rx_mode_wq_lock); __nicvf_set_rx_mode_task(mode, mc, nic); } @@ -2185,6 +2180,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->max_queues *= 2; nic->ptp_clock = ptp_clock; + /* Initialize mutex that serializes usage of VF's mailbox */ + mutex_init(&nic->rx_mode_mtx); + /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!nic->reg_base) { @@ -2261,7 +2259,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); - mutex_init(&nic->rx_mode_mtx); err = register_netdev(netdev); if (err) { diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 4ab57d33a87e..6bd6fb5a3613 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; mbx.rq.rq_num = qidx; - mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | + mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 6dabbf1502c7..c0e96bf5dd1a 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -3176,6 +3176,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, GFP_KERNEL | __GFP_COMP); if (!avail) { CH_ALERT(adapter, "free list queue 0 initialization failed\n"); + ret = -ENOMEM; goto err; } if (avail < q->fl[0].size) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index c2e92786608b..7801425e2726 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1054,9 +1054,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, } } -static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, - struct cudbg_error *cudbg_err, - u8 mem_type) +static int cudbg_mem_region_size(struct cudbg_init *pdbg_init, + struct cudbg_error *cudbg_err, + u8 mem_type, unsigned long *region_size) { struct adapter *padap = pdbg_init->adap; struct cudbg_meminfo mem_info; @@ -1065,15 +1065,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); rc = cudbg_fill_meminfo(padap, &mem_info); - if (rc) + if (rc) { + cudbg_err->sys_err = rc; return rc; + } cudbg_t4_fwcache(pdbg_init, cudbg_err); rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx); - if (rc) + if (rc) { + cudbg_err->sys_err = rc; return rc; + } - return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; + if (region_size) + *region_size = mem_info.avail[mc_idx].limit - + mem_info.avail[mc_idx].base; + + return 0; } static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, @@ -1081,7 +1089,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, struct cudbg_error *cudbg_err, u8 mem_type) { - unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type); + unsigned long size = 0; + int rc; + + rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size); + if (rc) + return rc; return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, cudbg_err); @@ -1380,11 +1393,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer temp_buff = { 0 }; struct sge_qbase_reg_field *sge_qbase; struct ireg_buf *ch_sge_dbg; + u8 padap_running = 0; int i, rc; + u32 size; - rc = cudbg_get_buff(pdbg_init, dbg_buff, - sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), - &temp_buff); + /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can + * lead to SGE missing doorbells under heavy traffic. So, only + * collect them when adapter is idle. + */ + for_each_port(padap, i) { + padap_running = netif_running(padap->port[i]); + if (padap_running) + break; + } + + size = sizeof(*ch_sge_dbg) * 2; + if (!padap_running) + size += sizeof(*sge_qbase); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1406,7 +1433,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, ch_sge_dbg++; } - if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 && + !padap_running) { sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; /* 1 addr reg SGE_QBASE_INDEX and 4 data reg * SGE_QBASE_MAP[0-3] @@ -1967,7 +1995,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, u8 mem_type[CTXT_INGRESS + 1] = { 0 }; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ch_cntxt *buff; - u64 *dst_off, *src_off; u8 *ctx_buf; u8 i, k; int rc; @@ -2036,8 +2063,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, } for (j = 0; j < max_ctx_qid; j++) { + __be64 *dst_off; + u64 *src_off; + src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); - dst_off = (u64 *)buff->data; + dst_off = (__be64 *)buff->data; /* The data is stored in 64-bit cpu order. Convert it * to big endian before parsing. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 43b0f8c57da7..64a2453e06ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -145,13 +145,13 @@ static int configure_filter_smac(struct adapter *adap, struct filter_entry *f) int err; /* do a set-tcb for smac-sel and CWR bit.. */ - err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1); - if (err) - goto smac_err; - err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W, TCB_SMAC_SEL_V(TCB_SMAC_SEL_M), TCB_SMAC_SEL_V(f->smt->idx), 1); + if (err) + goto smac_err; + + err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1); if (!err) return 0; @@ -165,37 +165,40 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, unsigned int tid, bool dip, bool sip, bool dp, bool sp) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + if (dip) { if (f->fs.type) { set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, WORD_MASK, f->fs.nat_lip[15] | f->fs.nat_lip[14] << 8 | f->fs.nat_lip[13] << 16 | - f->fs.nat_lip[12] << 24, 1); + (u64)f->fs.nat_lip[12] << 24, 1); set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1, WORD_MASK, f->fs.nat_lip[11] | f->fs.nat_lip[10] << 8 | f->fs.nat_lip[9] << 16 | - f->fs.nat_lip[8] << 24, 1); + (u64)f->fs.nat_lip[8] << 24, 1); set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2, WORD_MASK, f->fs.nat_lip[7] | f->fs.nat_lip[6] << 8 | f->fs.nat_lip[5] << 16 | - f->fs.nat_lip[4] << 24, 1); + (u64)f->fs.nat_lip[4] << 24, 1); set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3, WORD_MASK, f->fs.nat_lip[3] | f->fs.nat_lip[2] << 8 | f->fs.nat_lip[1] << 16 | - f->fs.nat_lip[0] << 24, 1); + (u64)f->fs.nat_lip[0] << 24, 1); } else { set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W, WORD_MASK, f->fs.nat_lip[3] | f->fs.nat_lip[2] << 8 | f->fs.nat_lip[1] << 16 | - f->fs.nat_lip[0] << 24, 1); + (u64)f->fs.nat_lip[0] << 25, 1); } } @@ -205,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, WORD_MASK, f->fs.nat_fip[15] | f->fs.nat_fip[14] << 8 | f->fs.nat_fip[13] << 16 | - f->fs.nat_fip[12] << 24, 1); + (u64)f->fs.nat_fip[12] << 24, 1); set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1, WORD_MASK, f->fs.nat_fip[11] | f->fs.nat_fip[10] << 8 | f->fs.nat_fip[9] << 16 | - f->fs.nat_fip[8] << 24, 1); + (u64)f->fs.nat_fip[8] << 24, 1); set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2, WORD_MASK, f->fs.nat_fip[7] | f->fs.nat_fip[6] << 8 | f->fs.nat_fip[5] << 16 | - f->fs.nat_fip[4] << 24, 1); + (u64)f->fs.nat_fip[4] << 24, 1); set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3, WORD_MASK, f->fs.nat_fip[3] | f->fs.nat_fip[2] << 8 | f->fs.nat_fip[1] << 16 | - f->fs.nat_fip[0] << 24, 1); + (u64)f->fs.nat_fip[0] << 24, 1); } else { set_tcb_field(adap, f, tid, @@ -231,13 +234,14 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, WORD_MASK, f->fs.nat_fip[3] | f->fs.nat_fip[2] << 8 | f->fs.nat_fip[1] << 16 | - f->fs.nat_fip[0] << 24, 1); + (u64)f->fs.nat_fip[0] << 24, 1); } } set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, - (dp ? f->fs.nat_lport : 0) | - (sp ? f->fs.nat_fport << 16 : 0), 1); + (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) | + (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0), + 1); } /* Validate filter spec against configuration done on the card. */ @@ -608,6 +612,7 @@ int set_filter_wr(struct adapter *adapter, int fidx) FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | FW_FILTER_WR_DMAC_V(f->fs.newdmac) | + FW_FILTER_WR_SMAC_V(f->fs.newsmac) | FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || @@ -625,7 +630,8 @@ int set_filter_wr(struct adapter *adapter, int fidx) FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); - fwr->smac_sel = 0; + if (f->fs.newsmac) + fwr->smac_sel = f->smt->idx; fwr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_CHAN_V(0) | FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id)); @@ -656,6 +662,9 @@ int set_filter_wr(struct adapter *adapter, int fidx) fwr->fpm = htons(f->fs.mask.fport); if (adapter->params.filter2_wr_support) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + fwr->natmode_to_ulp_type = FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? ULP_MODE_TCPDDP : @@ -663,8 +672,8 @@ int set_filter_wr(struct adapter *adapter, int fidx) FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); - fwr->newlport = htons(f->fs.nat_lport); - fwr->newfport = htons(f->fs.nat_fport); + fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8); + fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8); } /* Mark the filter as "pending" and ship off the Filter Work Request. @@ -832,16 +841,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family) struct in_addr *addr; addr = (struct in_addr *)ipmask; - if (addr->s_addr == 0xffffffff) + if (addr->s_addr == htonl(0xffffffff)) return true; } else if (family == AF_INET6) { struct in6_addr *addr6; addr6 = (struct in6_addr *)ipmask; - if (addr6->s6_addr32[0] == 0xffffffff && - addr6->s6_addr32[1] == 0xffffffff && - addr6->s6_addr32[2] == 0xffffffff && - addr6->s6_addr32[3] == 0xffffffff) + if (addr6->s6_addr32[0] == htonl(0xffffffff) && + addr6->s6_addr32[1] == htonl(0xffffffff) && + addr6->s6_addr32[2] == htonl(0xffffffff) && + addr6->s6_addr32[3] == htonl(0xffffffff)) return true; } return false; @@ -1041,11 +1050,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb, TX_QUEUE_V(f->fs.nat_mode) | T5_OPT_2_VALID_F | RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) | - CONG_CNTRL_V((f->fs.action == FILTER_DROP) | - (f->fs.dirsteer << 1)) | PACE_V((f->fs.maskhash) | - ((f->fs.dirsteerhash) << 1)) | - CCTRL_ECN_V(f->fs.action == FILTER_SWITCH)); + ((f->fs.dirsteerhash) << 1))); } static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb, @@ -1081,11 +1087,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb, TX_QUEUE_V(f->fs.nat_mode) | T5_OPT_2_VALID_F | RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) | - CONG_CNTRL_V((f->fs.action == FILTER_DROP) | - (f->fs.dirsteer << 1)) | PACE_V((f->fs.maskhash) | - ((f->fs.dirsteerhash) << 1)) | - CCTRL_ECN_V(f->fs.action == FILTER_SWITCH)); + ((f->fs.dirsteerhash) << 1))); } static int cxgb4_set_hash_filter(struct net_device *dev, @@ -1610,13 +1613,16 @@ out: static int configure_filter_tcb(struct adapter *adap, unsigned int tid, struct filter_entry *f) { - if (f->fs.hitcnts) + if (f->fs.hitcnts) { set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, - TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) | + TCB_TIMESTAMP_V(TCB_TIMESTAMP_M), + TCB_TIMESTAMP_V(0ULL), + 1); + set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W, TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M), - TCB_TIMESTAMP_V(0ULL) | TCB_RTT_TS_RECENT_AGE_V(0ULL), 1); + } if (f->fs.newdmac) set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, @@ -1738,6 +1744,20 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl) } return; } + switch (f->fs.action) { + case FILTER_PASS: + if (f->fs.dirsteer) + set_tcb_tflag(adap, f, tid, + TF_DIRECT_STEER_S, 1, 1); + break; + case FILTER_DROP: + set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1); + break; + case FILTER_SWITCH: + set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1); + break; + } + break; default: @@ -1798,22 +1818,11 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) if (ctx) ctx->result = 0; } else if (ret == FW_FILTER_WR_FLT_ADDED) { - int err = 0; - - if (f->fs.newsmac) - err = configure_filter_smac(adap, f); - - if (!err) { - f->pending = 0; /* async setup completed */ - f->valid = 1; - if (ctx) { - ctx->result = 0; - ctx->tid = idx; - } - } else { - clear_filter(adap, f); - if (ctx) - ctx->result = err; + f->pending = 0; /* async setup completed */ + f->valid = 1; + if (ctx) { + ctx->result = 0; + ctx->tid = idx; } } else { /* Something went wrong. Issue a warning about the diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 069a51847885..deb1c1f30107 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2504,7 +2504,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, /* Clear out filter specifications */ memset(&f->fs, 0, sizeof(struct ch_filter_specification)); - f->fs.val.lport = cpu_to_be16(sport); + f->fs.val.lport = be16_to_cpu(sport); f->fs.mask.lport = ~0; val = (u8 *)&sip; if ((val[0] | val[1] | val[2] | val[3]) != 0) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c index b1a073eea60b..a020e8490681 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c @@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap) { struct mps_entries_ref *mps_entry, *tmp; - if (!list_empty(&adap->mps_ref)) + if (list_empty(&adap->mps_ref)) return; spin_lock(&adap->mps_ref_lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 58a039c3224a..f5bc996ac77d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta) FW_PTP_CMD_PORTID_V(0)); c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; + c.u.ts.sign = (delta < 0) ? 1 : 0; + if (delta < 0) + delta = -delta; c.u.ts.tm = cpu_to_be64(delta); err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); @@ -308,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) */ static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct adapter *adapter = (struct adapter *)container_of(ptp, - struct adapter, ptp_clock_info); - struct fw_ptp_cmd c; + struct adapter *adapter = container_of(ptp, struct adapter, + ptp_clock_info); u64 ns; - int err; - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | - FW_CMD_REQUEST_F | - FW_CMD_READ_F | - FW_PTP_CMD_PORTID_V(0)); - c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); - c.u.ts.sc = FW_PTP_SC_GET_TIME; - - err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c); - if (err < 0) { - dev_err(adapter->pdev_dev, - "PTP: %s error %d\n", __func__, -err); - return err; - } + ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A)); + ns |= (u64)t4_read_reg(adapter, + T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32; /* convert to timespec*/ - ns = be64_to_cpu(c.u.ts.tm); *ts = ns_to_timespec64(ns); - - return err; + return 0; } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index e447976bdd3e..22d634111b81 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -58,12 +58,91 @@ static struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), - PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), - PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), }; +static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = { + /* Default supported NAT modes */ + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_NONE, + .natmode = NAT_MODE_NONE, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP, + .natmode = NAT_MODE_DIP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT, + .natmode = NAT_MODE_DIP_DP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | + CXGB4_ACTION_NATMODE_SIP, + .natmode = NAT_MODE_DIP_DP_SIP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | + CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_DIP_DP_SP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_SIP_SP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | + CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_DIP_SIP_SP, + }, + { + .chip = CHELSIO_T5, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | + CXGB4_ACTION_NATMODE_DPORT | + CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_ALL, + }, + /* T6+ can ignore L4 ports when they're disabled. */ + { + .chip = CHELSIO_T6, + .flags = CXGB4_ACTION_NATMODE_SIP, + .natmode = NAT_MODE_SIP_SP, + }, + { + .chip = CHELSIO_T6, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT, + .natmode = NAT_MODE_DIP_DP_SP, + }, + { + .chip = CHELSIO_T6, + .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP, + .natmode = NAT_MODE_ALL, + }, +}; + +static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs, + u8 natmode_flags) +{ + u8 i = 0; + + /* Translate the enabled NAT 4-tuple fields to one of the + * hardware supported NAT mode configurations. This ensures + * that we pick a valid combination, where the disabled fields + * do not get overwritten to 0. + */ + for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { + if (cxgb4_natmode_config_array[i].flags == natmode_flags) { + fs->nat_mode = cxgb4_natmode_config_array[i].natmode; + return; + } + } +} + static struct ch_tc_flower_entry *allocate_flower_entry(void) { struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); @@ -156,14 +235,14 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - fs->val.lport = cpu_to_be16(match.key->dst); - fs->mask.lport = cpu_to_be16(match.mask->dst); - fs->val.fport = cpu_to_be16(match.key->src); - fs->mask.fport = cpu_to_be16(match.mask->src); + fs->val.lport = be16_to_cpu(match.key->dst); + fs->mask.lport = be16_to_cpu(match.mask->dst); + fs->val.fport = be16_to_cpu(match.key->src); + fs->mask.fport = be16_to_cpu(match.mask->src); /* also initialize nat_lport/fport to same values */ - fs->nat_lport = cpu_to_be16(match.key->dst); - fs->nat_fport = cpu_to_be16(match.key->src); + fs->nat_lport = fs->val.lport; + fs->nat_fport = fs->val.fport; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { @@ -291,7 +370,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, } static void process_pedit_field(struct ch_filter_specification *fs, u32 val, - u32 mask, u32 offset, u8 htype) + u32 mask, u32 offset, u8 htype, + u8 *natmode_flags) { switch (htype) { case FLOW_ACT_MANGLE_HDR_TYPE_ETH: @@ -316,74 +396,104 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_IP4_SRC: offload_pedit(fs, val, mask, IP4_SRC); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP4_DST: offload_pedit(fs, val, mask, IP4_DST); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; } - fs->nat_mode = NAT_MODE_ALL; break; case FLOW_ACT_MANGLE_HDR_TYPE_IP6: switch (offset) { case PEDIT_IP6_SRC_31_0: offload_pedit(fs, val, mask, IP6_SRC_31_0); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_SRC_63_32: offload_pedit(fs, val, mask, IP6_SRC_63_32); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_SRC_95_64: offload_pedit(fs, val, mask, IP6_SRC_95_64); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_SRC_127_96: offload_pedit(fs, val, mask, IP6_SRC_127_96); + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; break; case PEDIT_IP6_DST_31_0: offload_pedit(fs, val, mask, IP6_DST_31_0); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; case PEDIT_IP6_DST_63_32: offload_pedit(fs, val, mask, IP6_DST_63_32); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; case PEDIT_IP6_DST_95_64: offload_pedit(fs, val, mask, IP6_DST_95_64); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; case PEDIT_IP6_DST_127_96: offload_pedit(fs, val, mask, IP6_DST_127_96); + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; } - fs->nat_mode = NAT_MODE_ALL; break; case FLOW_ACT_MANGLE_HDR_TYPE_TCP: switch (offset) { case PEDIT_TCP_SPORT_DPORT: - if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - TCP_SPORT); - else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), TCP_DPORT); + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { + fs->nat_fport = val; + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + } else { + fs->nat_lport = val >> 16; + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; + } } - fs->nat_mode = NAT_MODE_ALL; break; case FLOW_ACT_MANGLE_HDR_TYPE_UDP: switch (offset) { case PEDIT_UDP_SPORT_DPORT: - if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - UDP_SPORT); - else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), UDP_DPORT); + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { + fs->nat_fport = val; + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + } else { + fs->nat_lport = val >> 16; + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; + } } - fs->nat_mode = NAT_MODE_ALL; + break; } } +static int cxgb4_action_natmode_validate(struct net_device *dev, + struct adapter *adap, u8 natmode_flags) +{ + u8 i = 0; + + /* Extract the NAT mode to enable based on what 4-tuple fields + * are enabled to be overwritten. This ensures that the + * disabled fields don't get overwritten to 0. + */ + for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { + const struct cxgb4_natmode_config *c; + + c = &cxgb4_natmode_config_array[i]; + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip && + natmode_flags == c->flags) + return 0; + } + netdev_err(dev, "%s: Unsupported NAT mode 4-tuple combination\n", + __func__); + return -EOPNOTSUPP; +} + static void cxgb4_process_flow_actions(struct net_device *in, struct flow_cls_offload *cls, struct ch_filter_specification *fs) { struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_action_entry *act; + u8 natmode_flags = 0; int i; flow_action_for_each(i, act, &rule->action) { @@ -434,13 +544,17 @@ static void cxgb4_process_flow_actions(struct net_device *in, val = act->mangle.val; offset = act->mangle.offset; - process_pedit_field(fs, val, mask, offset, htype); + process_pedit_field(fs, val, mask, offset, htype, + &natmode_flags); } break; default: break; } } + if (natmode_flags) + cxgb4_action_natmode_tweak(fs, natmode_flags); + } static bool valid_l4_mask(u32 mask) @@ -457,7 +571,8 @@ static bool valid_l4_mask(u32 mask) } static bool valid_pedit_action(struct net_device *dev, - const struct flow_action_entry *act) + const struct flow_action_entry *act, + u8 *natmode_flags) { u32 mask, offset; u8 htype; @@ -482,7 +597,10 @@ static bool valid_pedit_action(struct net_device *dev, case FLOW_ACT_MANGLE_HDR_TYPE_IP4: switch (offset) { case PEDIT_IP4_SRC: + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; case PEDIT_IP4_DST: + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -496,10 +614,13 @@ static bool valid_pedit_action(struct net_device *dev, case PEDIT_IP6_SRC_63_32: case PEDIT_IP6_SRC_95_64: case PEDIT_IP6_SRC_127_96: + *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; case PEDIT_IP6_DST_31_0: case PEDIT_IP6_DST_63_32: case PEDIT_IP6_DST_95_64: case PEDIT_IP6_DST_127_96: + *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -515,6 +636,10 @@ static bool valid_pedit_action(struct net_device *dev, __func__); return false; } + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + else + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -530,6 +655,10 @@ static bool valid_pedit_action(struct net_device *dev, __func__); return false; } + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; + else + *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; break; default: netdev_err(dev, "%s: Unsupported pedit field\n", @@ -548,10 +677,12 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, struct flow_cls_offload *cls) { struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct adapter *adap = netdev2adap(dev); struct flow_action_entry *act; bool act_redir = false; bool act_pedit = false; bool act_vlan = false; + u8 natmode_flags = 0; int i; flow_action_for_each(i, act, &rule->action) { @@ -561,7 +692,6 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, /* Do nothing */ break; case FLOW_ACTION_REDIRECT: { - struct adapter *adap = netdev2adap(dev); struct net_device *n_dev, *target_dev; unsigned int i; bool found = false; @@ -611,7 +741,8 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, } break; case FLOW_ACTION_MANGLE: { - bool pedit_valid = valid_pedit_action(dev, act); + bool pedit_valid = valid_pedit_action(dev, act, + &natmode_flags); if (!pedit_valid) return -EOPNOTSUPP; @@ -630,6 +761,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, return -EINVAL; } + if (act_pedit) { + int ret; + + ret = cxgb4_action_natmode_validate(dev, adap, natmode_flags); + if (ret) + return ret; + } + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index eb4c95248baf..c905debe6f7a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -108,6 +108,21 @@ struct ch_tc_pedit_fields { #define PEDIT_TCP_SPORT_DPORT 0x0 #define PEDIT_UDP_SPORT_DPORT 0x0 +enum cxgb4_action_natmode_flags { + CXGB4_ACTION_NATMODE_NONE = 0, + CXGB4_ACTION_NATMODE_DIP = (1 << 0), + CXGB4_ACTION_NATMODE_SIP = (1 << 1), + CXGB4_ACTION_NATMODE_DPORT = (1 << 2), + CXGB4_ACTION_NATMODE_SPORT = (1 << 3), +}; + +/* TC PEDIT action to NATMODE translation entry */ +struct cxgb4_natmode_config { + enum chip_type chip; + u8 flags; + u8 natmode; +}; + int cxgb4_tc_flower_replace(struct net_device *dev, struct flow_cls_offload *cls); int cxgb4_tc_flower_destroy(struct net_device *dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 02fc63fa7f25..b3a342561a96 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -47,7 +47,7 @@ static int fill_match_fields(struct adapter *adap, bool next_header) { unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off, err; bool found; @@ -216,7 +216,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) const struct cxgb4_next_header *next; bool found = false; unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off; if (t->table[link_uhtid - 1].link_handle) { @@ -230,10 +230,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) /* Try to find matches that allow jumps to next header. */ for (i = 0; next[i].jump; i++) { - if (next[i].offoff != cls->knode.sel->offoff || - next[i].shift != cls->knode.sel->offshift || - next[i].mask != cls->knode.sel->offmask || - next[i].offset != cls->knode.sel->off) + if (next[i].sel.offoff != cls->knode.sel->offoff || + next[i].sel.offshift != cls->knode.sel->offshift || + next[i].sel.offmask != cls->knode.sel->offmask || + next[i].sel.off != cls->knode.sel->off) continue; /* Found a possible candidate. Find a key that @@ -245,9 +245,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) val = cls->knode.sel->keys[j].val; mask = cls->knode.sel->keys[j].mask; - if (next[i].match_off == off && - next[i].match_val == val && - next[i].match_mask == mask) { + if (next[i].key.off == off && + next[i].key.val == val && + next[i].key.mask == mask) { found = true; break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index a4b99edcc339..141085e159e5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -38,12 +38,12 @@ struct cxgb4_match_field { int off; /* Offset from the beginning of the header to match */ /* Fill the value/mask pair in the spec if matched */ - int (*val)(struct ch_filter_specification *f, u32 val, u32 mask); + int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask); }; /* IPv4 match fields */ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 16) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF; @@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { u32 mask_val; u8 frag_val; @@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 16) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF; @@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = { /* IPv6 match fields */ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 20) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF; @@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 8) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF; @@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[4], &val, sizeof(u32)); memcpy(&f->mask.fip[4], &mask, sizeof(u32)); @@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[8], &val, sizeof(u32)); memcpy(&f->mask.fip[8], &mask, sizeof(u32)); @@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[12], &val, sizeof(u32)); memcpy(&f->mask.fip[12], &mask, sizeof(u32)); @@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[4], &val, sizeof(u32)); memcpy(&f->mask.lip[4], &mask, sizeof(u32)); @@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[8], &val, sizeof(u32)); memcpy(&f->mask.lip[8], &mask, sizeof(u32)); @@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[12], &val, sizeof(u32)); memcpy(&f->mask.lip[12], &mask, sizeof(u32)); @@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = { /* TCP/UDP match */ static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.fport = ntohl(val) >> 16; f->mask.fport = ntohl(mask) >> 16; @@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = { }; struct cxgb4_next_header { - unsigned int offset; /* Offset to next header */ - /* offset, shift, and mask added to offset above + /* Offset, shift, and mask added to beginning of the header * to get to next header. Useful when using a header * field's value to jump to next header such as IHL field * in IPv4 header. */ - unsigned int offoff; - u32 shift; - u32 mask; - /* match criteria to make this jump */ - unsigned int match_off; - u32 match_val; - u32 match_mask; + struct tc_u32_sel sel; + struct tc_u32_key key; /* location of jump to make */ const struct cxgb4_match_field *jump; }; @@ -258,26 +252,74 @@ struct cxgb4_next_header { * IPv4 header. */ static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = { - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00, - .jump = cxgb4_tcp_fields }, - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00060000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00110000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header * to get to transport layer header. */ static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = { - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000, - .jump = cxgb4_tcp_fields }, - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00000600), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00001100), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; struct cxgb4_link { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c index 3de8a5e83b6c..d7fefdbf3e57 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c @@ -62,6 +62,7 @@ static struct thermal_zone_device_ops cxgb4_thermal_ops = { int cxgb4_thermal_init(struct adapter *adap) { struct ch_thermal *ch_thermal = &adap->ch_thermal; + char ch_tz_name[THERMAL_NAME_LENGTH]; int num_trip = CXGB4_NUM_TRIPS; u32 param, val; int ret; @@ -82,7 +83,8 @@ int cxgb4_thermal_init(struct adapter *adap) ch_thermal->trip_type = THERMAL_TRIP_CRITICAL; } - ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip, + snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name); + ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip, 0, adap, &cxgb4_thermal_ops, NULL, 0, 0); @@ -97,7 +99,9 @@ int cxgb4_thermal_init(struct adapter *adap) int cxgb4_thermal_remove(struct adapter *adap) { - if (adap->ch_thermal.tzdev) + if (adap->ch_thermal.tzdev) { thermal_zone_device_unregister(adap->ch_thermal.tzdev); + adap->ch_thermal.tzdev = NULL; + } return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index cee582e36134..6b71ec33bf14 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -44,6 +44,9 @@ #define MAX_ULD_QSETS 16 +/* ulp_mem_io + ulptx_idata + payload + padding */ +#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8) + /* CPL message priority levels */ enum { CPL_PRIORITY_DATA = 0, /* data messages */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index e6fe2870137b..a440c1cf0b61 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -506,41 +506,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev, } EXPORT_SYMBOL(cxgb4_select_ntuple); -/* - * Called when address resolution fails for an L2T entry to handle packets - * on the arpq head. If a packet specifies a failure handler it is invoked, - * otherwise the packet is sent to the device. - */ -static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e) -{ - struct sk_buff *skb; - - while ((skb = __skb_dequeue(&e->arpq)) != NULL) { - const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - - spin_unlock(&e->lock); - if (cb->arp_err_handler) - cb->arp_err_handler(cb->handle, skb); - else - t4_ofld_send(adap, skb); - spin_lock(&e->lock); - } -} - /* * Called when the host's neighbor layer makes a change to some entry that is * loaded into the HW L2 table. */ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) { - struct l2t_entry *e; - struct sk_buff_head *arpq = NULL; - struct l2t_data *d = adap->l2t; unsigned int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = addr_hash(d, addr, addr_len, ifidx); + int hash, ifidx = neigh->dev->ifindex; + struct sk_buff_head *arpq = NULL; + struct l2t_data *d = adap->l2t; + struct l2t_entry *e; + hash = addr_hash(d, addr, addr_len, ifidx); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (!addreq(e, addr) && e->ifindex == ifidx) { @@ -573,8 +552,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) write_l2e(adap, e, 0); } - if (arpq) - handle_failed_resolution(adap, e); + if (arpq) { + struct sk_buff *skb; + + /* Called when address resolution fails for an L2T + * entry to handle packets on the arpq head. If a + * packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ + while ((skb = __skb_dequeue(&e->arpq)) != NULL) { + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + spin_unlock(&e->lock); + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + spin_lock(&e->lock); + } + } spin_unlock_bh(&e->lock); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 3a45ac8f0e01..57bf10b4d80c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2158,17 +2158,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) * @skb: the packet * * Returns true if a packet can be sent as an offload WR with immediate - * data. We currently use the same limit as for Ethernet packets. + * data. + * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field. + * However, FW_ULPTX_WR commands have a 256 byte immediate only + * payload limit. */ static inline int is_ofld_imm(const struct sk_buff *skb) { struct work_request_hdr *req = (struct work_request_hdr *)skb->data; unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); - if (opcode == FW_CRYPTO_LOOKASIDE_WR) + if (unlikely(opcode == FW_ULPTX_WR)) + return skb->len <= MAX_IMM_ULPTX_WR_LEN; + else if (opcode == FW_CRYPTO_LOOKASIDE_WR) return skb->len <= SGE_MAX_WR_LEN; else - return skb->len <= MAX_IMM_TX_PKT_LEN; + return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; } /** @@ -2441,6 +2446,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, txq_info = adap->sge.uld_txq_info[tx_uld_type]; if (unlikely(!txq_info)) { WARN_ON(true); + kfree_skb(skb); return NET_XMIT_DROP; } @@ -2816,7 +2822,7 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter, hwtstamps = skb_hwtstamps(skb); memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); + hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); return RX_PTP_PKT_SUC; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 3f6813daf3c1..42374859b9d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2093,7 +2093,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1274, + 0x11fc, 0x123c, + 0x1254, 0x1274, 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, @@ -3499,7 +3500,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, drv_fw = &fw_info->fw_hdr; /* Read the header of the firmware on the card */ - ret = -t4_read_flash(adap, FLASH_FW_START, + ret = t4_read_flash(adap, FLASH_FW_START, sizeof(*card_fw) / sizeof(uint32_t), (uint32_t *)card_fw, 1); if (ret == 0) { @@ -3528,8 +3529,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, should_install_fs_fw(adap, card_fw_usable, be32_to_cpu(fs_fw->fw_ver), be32_to_cpu(card_fw->fw_ver))) { - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, - fw_size, 0); + ret = t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); if (ret != 0) { dev_err(adap->pdev_dev, "failed to install firmware: %d\n", ret); @@ -3560,7 +3561,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); - ret = EINVAL; + ret = -EINVAL; goto bye; } @@ -3748,7 +3749,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); - if (ret < 0) + if (ret) return ret; *phy_fw_ver = val; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 0c5373462ced..0b1b5f9c67d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -219,6 +219,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a957a6e4d4c4..b0519c326692 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1900,6 +1900,9 @@ #define MAC_PORT_CFG2_A 0x818 +#define MAC_PORT_PTP_SUM_LO_A 0x990 +#define MAC_PORT_PTP_SUM_HI_A 0x994 + #define MPS_CMN_CTL_A 0x9000 #define COUNTPAUSEMCRX_S 5 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h index 1b9afb192f7f..358f0fe40270 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h @@ -50,6 +50,10 @@ #define TCB_RQ_START_M 0x3ffffffULL #define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S) +#define TF_DROP_S 22 +#define TF_DIRECT_STEER_S 23 +#define TF_LPBK_S 59 + #define TF_CCTRL_ECE_S 60 #define TF_CCTRL_CWR_S 61 #define TF_CCTRL_RFR_S 62 diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 0dd64acd2a3f..08cac1bfacaf 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -171,6 +171,7 @@ struct enic { u16 num_vfs; #endif spinlock_t enic_api_lock; + bool enic_api_busy; struct enic_port_profile *pp; /* work queue cache line section */ diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c index b161f24522b8..b028ea2dec2b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_api.c +++ b/drivers/net/ethernet/cisco/enic/enic_api.c @@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, struct vnic_dev *vdev = enic->vdev; spin_lock(&enic->enic_api_lock); + while (enic->enic_api_busy) { + spin_unlock(&enic->enic_api_lock); + cpu_relax(); + spin_lock(&enic->enic_api_lock); + } + spin_lock_bh(&enic->devcmd_lock); vnic_dev_cmd_proxy_by_index_start(vdev, vf); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 6e2ab10ad2e6..8314102002b0 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2142,8 +2142,6 @@ static int enic_dev_wait(struct vnic_dev *vdev, int done; int err; - BUG_ON(in_interrupt()); - err = start(vdev, arg); if (err) return err; @@ -2331,6 +2329,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic) rss_hash_bits, rss_base_cpu, rss_enable); } +static void enic_set_api_busy(struct enic *enic, bool busy) +{ + spin_lock(&enic->enic_api_lock); + enic->enic_api_busy = busy; + spin_unlock(&enic->enic_api_lock); +} + static void enic_reset(struct work_struct *work) { struct enic *enic = container_of(work, struct enic, reset); @@ -2340,7 +2345,9 @@ static void enic_reset(struct work_struct *work) rtnl_lock(); - spin_lock(&enic->enic_api_lock); + /* Stop any activity from infiniband */ + enic_set_api_busy(enic, true); + enic_stop(enic->netdev); enic_dev_soft_reset(enic); enic_reset_addr_lists(enic); @@ -2348,7 +2355,10 @@ static void enic_reset(struct work_struct *work) enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); - spin_unlock(&enic->enic_api_lock); + + /* Allow infiniband to fiddle with the device again */ + enic_set_api_busy(enic, false); + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); rtnl_unlock(); @@ -2360,7 +2370,9 @@ static void enic_tx_hang_reset(struct work_struct *work) rtnl_lock(); - spin_lock(&enic->enic_api_lock); + /* Stop any activity from infiniband */ + enic_set_api_busy(enic, true); + enic_dev_hang_notify(enic); enic_stop(enic->netdev); enic_dev_hang_reset(enic); @@ -2369,7 +2381,10 @@ static void enic_tx_hang_reset(struct work_struct *work) enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); - spin_unlock(&enic->enic_api_lock); + + /* Allow infiniband to fiddle with the device again */ + enic_set_api_busy(enic, false); + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); rtnl_unlock(); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 2814b96751b4..c9fb1ec625d8 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2388,7 +2388,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); - netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM); + netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM); if (!netdev) { dev_err(dev, "Can't allocate ethernet device #%d\n", id); return -ENOMEM; @@ -2445,7 +2445,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); - return PTR_ERR(port->reset); + ret = PTR_ERR(port->reset); + goto unprepare; } reset_control_reset(port->reset); usleep_range(100, 500); @@ -2501,23 +2502,24 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port_names[port->id], port); if (ret) - return ret; + goto unprepare; ret = register_netdev(netdev); - if (!ret) { - netdev_info(netdev, - "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", - port->irq, &dmares->start, - &gmacres->start); - ret = gmac_setup_phy(netdev); - if (ret) - netdev_info(netdev, - "PHY init failed, deferring to ifup time\n"); - return 0; - } + if (ret) + goto unprepare; - port->netdev = NULL; - free_netdev(netdev); + netdev_info(netdev, + "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", + port->irq, &dmares->start, + &gmacres->start); + ret = gmac_setup_phy(netdev); + if (ret) + netdev_info(netdev, + "PHY init failed, deferring to ifup time\n"); + return 0; + +unprepare: + clk_disable_unprepare(port->pclk); return ret; } @@ -2526,7 +2528,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev) struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); - free_netdev(port->netdev); return 0; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 70060c51854f..1d5d8984b49a 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -134,6 +134,8 @@ struct board_info { u32 wake_state; int ip_summed; + + struct regulator *power_supply; }; /* debug code */ @@ -1454,7 +1456,7 @@ dm9000_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "failed to request reset gpio %d: %d\n", reset_gpios, ret); - return -ENODEV; + goto out_regulator_disable; } /* According to manual PWRST# Low Period Min 1ms */ @@ -1466,14 +1468,18 @@ dm9000_probe(struct platform_device *pdev) if (!pdata) { pdata = dm9000_parse_dt(&pdev->dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto out_regulator_disable; + } } /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); - if (!ndev) - return -ENOMEM; + if (!ndev) { + ret = -ENOMEM; + goto out_regulator_disable; + } SET_NETDEV_DEV(ndev, &pdev->dev); @@ -1484,6 +1490,8 @@ dm9000_probe(struct platform_device *pdev) db->dev = &pdev->dev; db->ndev = ndev; + if (!IS_ERR(power)) + db->power_supply = power; spin_lock_init(&db->lock); mutex_init(&db->addr_lock); @@ -1506,7 +1514,7 @@ dm9000_probe(struct platform_device *pdev) goto out; } - db->irq_wake = platform_get_irq(pdev, 1); + db->irq_wake = platform_get_irq_optional(pdev, 1); if (db->irq_wake >= 0) { dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); @@ -1708,6 +1716,10 @@ out: dm9000_release_board(pdev, db); free_netdev(ndev); +out_regulator_disable: + if (!IS_ERR(power)) + regulator_disable(power); + return ret; } @@ -1765,10 +1777,13 @@ static int dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct board_info *dm = to_dm9000_board(ndev); unregister_netdev(ndev); - dm9000_release_board(pdev, netdev_priv(ndev)); + dm9000_release_board(pdev, dm); free_netdev(ndev); /* free device structure */ + if (dm->power_supply) + regulator_disable(dm->power_supply); dev_dbg(&pdev->dev, "released and freed device\n"); return 0; diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index f1a2da15dd0a..b14d93da242f 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi #define DSL CONFIG_DE2104X_DSL #endif -#define DE_RX_RING_SIZE 64 +#define DE_RX_RING_SIZE 128 #define DE_TX_RING_SIZE 64 #define DE_RING_BYTES \ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ea4f17f5cce7..590d20ca891c 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1207,7 +1207,7 @@ static int ethoc_probe(struct platform_device *pdev) ret = mdiobus_register(priv->mdio); if (ret) { dev_err(&netdev->dev, "failed to register MDIO bus\n"); - goto free2; + goto free3; } ret = ethoc_mdio_probe(netdev); @@ -1239,6 +1239,7 @@ error2: netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); +free3: mdiobus_free(priv->mdio); free2: clk_disable_unprepare(priv->clk); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 96e9565f1e08..4050f81f788c 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1307,6 +1307,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) */ if (unlikely(priv->need_mac_restart)) { ftgmac100_start_hw(priv); + priv->need_mac_restart = false; /* Re-enable "bad" interrupts */ iowrite32(FTGMAC100_INT_BAD, @@ -1807,6 +1808,11 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); priv->is_aspeed = true; + /* Disable ast2600 problematic HW arbitration */ + if (of_device_is_compatible(np, "aspeed,ast2600-mac")) { + iowrite32(FTGMAC100_TM_DEFAULT, + priv->base + FTGMAC100_OFFSET_TM); + } } else { priv->rxdes0_edorr_mask = BIT(15); priv->txdes0_edotr_mask = BIT(15); @@ -1886,6 +1892,8 @@ static int ftgmac100_probe(struct platform_device *pdev) return 0; err_ncsi_dev: + if (priv->ndev) + ncsi_unregister_dev(priv->ndev); err_register_netdev: ftgmac100_destroy_mdio(netdev); err_setup_mdio: @@ -1906,6 +1914,8 @@ static int ftgmac100_remove(struct platform_device *pdev) netdev = platform_get_drvdata(pdev); priv = netdev_priv(netdev); + if (priv->ndev) + ncsi_unregister_dev(priv->ndev); unregister_netdev(netdev); clk_disable_unprepare(priv->clk); diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h index e5876a3fda91..63b3e02fab16 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.h +++ b/drivers/net/ethernet/faraday/ftgmac100.h @@ -169,6 +169,14 @@ #define FTGMAC100_MACCR_FAST_MODE (1 << 19) #define FTGMAC100_MACCR_SW_RST (1 << 31) +/* + * test mode control register + */ +#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28) +#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27) +#define FTGMAC100_TM_DEFAULT \ + (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV) + /* * PHY control register */ diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 6a7e8993119f..941c7e667afc 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -77,6 +77,7 @@ config UCC_GETH depends on QUICC_ENGINE select FSL_PQ_MDIO select PHYLIB + select FIXED_PHY ---help--- This driver supports the Gigabit Ethernet mode of the QUICC Engine, which is available on some Freescale SOCs. @@ -90,6 +91,7 @@ config GIANFAR depends on HAS_DMA select FSL_PQ_MDIO select PHYLIB + select FIXED_PHY select CRC32 ---help--- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig index 3b325733a4f8..0a54c7e0e4ae 100644 --- a/drivers/net/ethernet/freescale/dpaa/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig @@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH tristate "DPAA Ethernet" depends on FSL_DPAA && FSL_FMAN select PHYLIB + select FIXED_PHY select FSL_FMAN_MAC ---help--- Data Path Acceleration Architecture Ethernet driver, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index bf5add954181..7b2d033a71a0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { addr = dpaa2_sg_get_addr(&sgt[i]); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); free_pages((unsigned long)sg_vaddr, 0); @@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* Get the address and length from the S/G entry */ sg_addr = dpaa2_sg_get_addr(sge); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); - dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, sg_addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); sg_length = dpaa2_sg_get_len(sge); @@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, (page_address(page) - page_address(head_page)); skb_add_rx_frag(skb, i - 1, head_page, page_offset, - sg_length, DPAA2_ETH_RX_BUF_SIZE); + sg_length, priv->rx_buf_size); } if (dpaa2_sg_is_final(sge)) @@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, DMA_BIDIRECTIONAL); free_pages((unsigned long)vaddr, 0); } @@ -331,7 +331,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, break; case XDP_REDIRECT: dma_unmap_page(priv->net_dev->dev.parent, addr, - DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); + priv->rx_buf_size, DMA_BIDIRECTIONAL); ch->buf_count--; xdp.data_hard_start = vaddr; err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); @@ -370,7 +370,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, trace_dpaa2_rx_fd(priv->net_dev, fd); vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); fas = dpaa2_get_fas(vaddr, false); @@ -389,13 +389,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, return; } - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); skb = build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); - dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); skb = build_frag_skb(priv, ch, buf_data); free_pages((unsigned long)vaddr, 0); @@ -963,7 +963,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, if (!page) goto err_alloc; - addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, + addr = dma_map_page(dev, page, 0, priv->rx_buf_size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, addr))) goto err_map; @@ -973,7 +973,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* tracing point */ trace_dpaa2_eth_buf_seed(priv->net_dev, page, DPAA2_ETH_RX_BUF_RAW_SIZE, - addr, DPAA2_ETH_RX_BUF_SIZE, + addr, priv->rx_buf_size, bpid); } @@ -1680,7 +1680,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) int mfl, linear_mfl; mfl = DPAA2_ETH_L2_MAX_FRM(mtu); - linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE - + linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; if (mfl > linear_mfl) { @@ -1772,11 +1772,8 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) if (prog && !xdp_mtu_valid(priv, dev->mtu)) return -EINVAL; - if (prog) { - prog = bpf_prog_add(prog, priv->num_channels); - if (IS_ERR(prog)) - return PTR_ERR(prog); - } + if (prog) + bpf_prog_add(prog, priv->num_channels); up = netif_running(dev); need_update = (!!priv->xdp_prog != !!prog); @@ -1981,7 +1978,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev, int i; if (type != TC_SETUP_QDISC_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; num_queues = dpaa2_eth_queue_count(priv); @@ -1993,7 +1990,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev, if (num_tc > dpaa2_eth_tc_count(priv)) { netdev_err(net_dev, "Max %d traffic classes supported\n", dpaa2_eth_tc_count(priv)); - return -EINVAL; + return -EOPNOTSUPP; } if (!num_tc) { @@ -2090,7 +2087,7 @@ close: free: fsl_mc_object_free(dpcon); - return NULL; + return ERR_PTR(err); } static void free_dpcon(struct dpaa2_eth_priv *priv, @@ -2114,8 +2111,8 @@ alloc_channel(struct dpaa2_eth_priv *priv) return NULL; channel->dpcon = setup_dpcon(priv); - if (IS_ERR_OR_NULL(channel->dpcon)) { - err = PTR_ERR_OR_ZERO(channel->dpcon); + if (IS_ERR(channel->dpcon)) { + err = PTR_ERR(channel->dpcon); goto err_setup; } @@ -2432,6 +2429,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) else rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + /* We need to ensure that the buffer size seen by WRIOP is a multiple + * of 64 or 256 bytes depending on the WRIOP version. + */ + priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); + /* tx buffer */ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; buf_layout.pass_timestamp = true; @@ -2649,8 +2651,10 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * dpaa2_eth_fs_count(priv), GFP_KERNEL); - if (!priv->cls_rules) + if (!priv->cls_rules) { + err = -ENOMEM; goto close; + } return 0; @@ -3096,7 +3100,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) pools_params.num_dpbp = 1; pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; pools_params.pools[0].backup_pool = 0; - pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; + pools_params.pools[0].buffer_size = priv->rx_buf_size; err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); if (err) { dev_err(dev, "dpni_set_pools() failed\n"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 8a0e65b3267f..4570ed53c6c7 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -373,6 +373,7 @@ struct dpaa2_eth_priv { u16 tx_data_offset; struct fsl_mc_device *dpbp_dev; + u16 rx_buf_size; u16 bpid; struct iommu_domain *iommu_domain; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index dc9a6c36cac0..e4d9fb0e72bf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -590,7 +590,7 @@ static int num_rules(struct dpaa2_eth_priv *priv) static int update_cls_rule(struct net_device *net_dev, struct ethtool_rx_flow_spec *new_fs, - int location) + unsigned int location) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_cls_rule *rule; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index b6ff89307409..b77eaf31bd4e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -254,7 +254,7 @@ static irqreturn_t enetc_msix(int irq, void *data) /* disable interrupts */ enetc_wr_reg(v->rbier, 0); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); napi_schedule_irqoff(&v->napi); @@ -290,7 +290,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) /* enable interrupts */ enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), ENETC_TBIER_TXTIE); @@ -942,7 +942,7 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) enetc_free_tx_ring(priv->tx_ring[i]); } -static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) { int size = cbdr->bd_count * sizeof(struct enetc_cbd); @@ -963,7 +963,7 @@ static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) return 0; } -static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) { int size = cbdr->bd_count * sizeof(struct enetc_cbd); @@ -971,7 +971,7 @@ static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) cbdr->bd_base = NULL; } -static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) { /* set CBDR cache attributes */ enetc_wr(hw, ENETC_SICAR2, @@ -991,7 +991,7 @@ static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) cbdr->cir = hw->reg + ENETC_SICBDRCIR; } -static void enetc_clear_cbdr(struct enetc_hw *hw) +void enetc_clear_cbdr(struct enetc_hw *hw) { enetc_wr(hw, ENETC_SICBDRMR, 0); } @@ -1016,13 +1016,12 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) return 0; } -static int enetc_configure_si(struct enetc_ndev_priv *priv) +int enetc_configure_si(struct enetc_ndev_priv *priv) { struct enetc_si *si = priv->si; struct enetc_hw *hw = &si->hw; int err; - enetc_setup_cbdr(hw, &si->cbd_ring); /* set SI cache attributes */ enetc_wr(hw, ENETC_SICAR0, ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); @@ -1068,6 +1067,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) if (err) return err; + enetc_setup_cbdr(&si->hw, &si->cbd_ring); + priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), GFP_KERNEL); if (!priv->cls_rules) { @@ -1075,14 +1076,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) goto err_alloc_cls; } - err = enetc_configure_si(priv); - if (err) - goto err_config_si; - return 0; -err_config_si: - kfree(priv->cls_rules); err_alloc_cls: enetc_clear_cbdr(&si->hw); enetc_free_cbdr(priv->dev, &si->cbd_ring); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 541b4e2073fe..b8801a2b6a02 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -221,6 +221,7 @@ void enetc_get_si_caps(struct enetc_si *si); void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); void enetc_free_si_resources(struct enetc_ndev_priv *priv); +int enetc_configure_si(struct enetc_ndev_priv *priv); int enetc_open(struct net_device *ndev); int enetc_close(struct net_device *ndev); @@ -236,6 +237,10 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, void enetc_set_ethtool_ops(struct net_device *ndev); /* control buffer descriptor ring (CBDR) */ +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr); +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr); +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr); +void enetc_clear_cbdr(struct enetc_hw *hw); int enetc_set_mac_flt_entry(struct enetc_si *si, int index, char *mac_addr, int si_map); int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index fcb52efec075..89121d7ce3e6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -141,8 +141,8 @@ static const struct { { ENETC_PM0_R255, "MAC rx 128-255 byte packets" }, { ENETC_PM0_R511, "MAC rx 256-511 byte packets" }, { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" }, - { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" }, - { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" }, + { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" }, + { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" }, { ENETC_PM0_ROVR, "MAC rx oversized packets" }, { ENETC_PM0_RJBR, "MAC rx jabber packets" }, { ENETC_PM0_RFRG, "MAC rx fragment packets" }, @@ -161,9 +161,13 @@ static const struct { { ENETC_PM0_TBCA, "MAC tx broadcast frames" }, { ENETC_PM0_TPKT, "MAC tx packets" }, { ENETC_PM0_TUND, "MAC tx undersized packets" }, + { ENETC_PM0_T64, "MAC tx 64 byte packets" }, { ENETC_PM0_T127, "MAC tx 65-127 byte packets" }, + { ENETC_PM0_T255, "MAC tx 128-255 byte packets" }, + { ENETC_PM0_T511, "MAC tx 256-511 byte packets" }, { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" }, - { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" }, + { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" }, + { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" }, { ENETC_PM0_TCNP, "MAC tx control packets" }, { ENETC_PM0_TDFR, "MAC tx deferred packets" }, { ENETC_PM0_TMCOL, "MAC tx multiple collisions" }, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 88276299f447..fac80831d532 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -181,6 +181,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/ #define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ #define ENETC_RSSHASH_KEY_SIZE 40 +#define ENETC_PRSSCAPR 0x1404 +#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) #define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ #define ENETC_PSIVLANFMR 0x1700 #define ENETC_PSIVLANFMR_VS BIT(0) @@ -239,8 +241,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_R255 0x8180 #define ENETC_PM0_R511 0x8188 #define ENETC_PM0_R1023 0x8190 -#define ENETC_PM0_R1518 0x8198 -#define ENETC_PM0_R1519X 0x81A0 +#define ENETC_PM0_R1522 0x8198 +#define ENETC_PM0_R1523X 0x81A0 #define ENETC_PM0_ROVR 0x81A8 #define ENETC_PM0_RJBR 0x81B0 #define ENETC_PM0_RFRG 0x81B8 @@ -259,9 +261,13 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_TBCA 0x8250 #define ENETC_PM0_TPKT 0x8260 #define ENETC_PM0_TUND 0x8268 +#define ENETC_PM0_T64 0x8270 #define ENETC_PM0_T127 0x8278 +#define ENETC_PM0_T255 0x8280 +#define ENETC_PM0_T511 0x8288 #define ENETC_PM0_T1023 0x8290 -#define ENETC_PM0_T1518 0x8298 +#define ENETC_PM0_T1522 0x8298 +#define ENETC_PM0_T1523X 0x82A0 #define ENETC_PM0_TCNP 0x82C0 #define ENETC_PM0_TDFR 0x82D0 #define ENETC_PM0_TMCOL 0x82D8 diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index b73421c3e25b..ac62464e0416 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -809,6 +809,71 @@ static void enetc_of_put_phy(struct enetc_ndev_priv *priv) of_node_put(priv->phy_node); } +/* Initialize the entire shared memory for the flow steering entries + * of this port (PF + VFs) + */ +static int enetc_init_port_rfs_memory(struct enetc_si *si) +{ + struct enetc_cmd_rfse rfse = {0}; + struct enetc_hw *hw = &si->hw; + int num_rfs, i, err = 0; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val); + + for (i = 0; i < num_rfs; i++) { + err = enetc_set_fs_entry(si, &rfse, i); + if (err) + break; + } + + return err; +} + +static int enetc_init_port_rss_memory(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + int num_rss, err; + int *rss_table; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRSSCAPR); + num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val); + if (!num_rss) + return 0; + + rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + err = enetc_set_rss_table(si, rss_table, num_rss); + + kfree(rss_table); + + return err; +} + +static void enetc_init_unused_port(struct enetc_si *si) +{ + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + int err; + + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; + err = enetc_alloc_cbdr(dev, &si->cbd_ring); + if (err) + return; + + enetc_setup_cbdr(hw, &si->cbd_ring); + + enetc_init_port_rfs_memory(si); + enetc_init_port_rss_memory(si); + + enetc_clear_cbdr(hw); + enetc_free_cbdr(dev, &si->cbd_ring); +} + static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -818,11 +883,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, struct enetc_pf *pf; int err; - if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { - dev_info(&pdev->dev, "device is disabled, skipping\n"); - return -ENODEV; - } - err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); if (err) { dev_err(&pdev->dev, "PCI probing failed\n"); @@ -836,6 +896,13 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_map_pf_space; } + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + enetc_init_unused_port(si); + dev_info(&pdev->dev, "device is disabled, skipping\n"); + err = -ENODEV; + goto err_device_disabled; + } + pf = enetc_si_priv(si); pf->si = si; pf->total_vfs = pci_sriov_get_totalvfs(pdev); @@ -863,6 +930,24 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_alloc_si_res; } + err = enetc_init_port_rfs_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); + goto err_init_port_rfs; + } + + err = enetc_init_port_rss_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); + goto err_init_port_rss; + } + + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + err = enetc_alloc_msix(priv); if (err) { dev_err(&pdev->dev, "MSIX alloc failed\n"); @@ -885,14 +970,19 @@ static int enetc_pf_probe(struct pci_dev *pdev, return 0; err_reg_netdev: + enetc_mdio_remove(pf); enetc_of_put_phy(priv); enetc_free_msix(priv); +err_config_si: +err_init_port_rss: +err_init_port_rfs: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: +err_device_disabled: err_map_pf_space: enetc_pci_remove(pdev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index ebd21bf4cfa1..3a8c2049b417 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -189,6 +189,12 @@ static int enetc_vf_probe(struct pci_dev *pdev, goto err_alloc_si_res; } + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + err = enetc_alloc_msix(priv); if (err) { dev_err(&pdev->dev, "MSIX alloc failed\n"); @@ -208,6 +214,7 @@ static int enetc_vf_probe(struct pci_dev *pdev, err_reg_netdev: enetc_free_msix(priv); +err_config_si: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index f79e57f735b3..d89568f810bc 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q { struct sk_buff *rx_skbuff[RX_RING_SIZE]; }; +struct fec_stop_mode_gpr { + struct regmap *gpr; + u8 reg; + u8 bit; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -562,6 +568,7 @@ struct fec_enet_private { int hwts_tx_en; struct delayed_work time_keep; struct regulator *reg_phy; + struct fec_stop_mode_gpr stop_gpr; unsigned int tx_align; unsigned int rx_align; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3fc8a66e4f41..fd7fc6f20c9d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -62,6 +62,8 @@ #include <linux/if_vlan.h> #include <linux/pinctrl/consumer.h> #include <linux/prefetch.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #include <soc/imx/cpuidle.h> #include <asm/cacheflush.h> @@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_OPD_V 0xFFF0 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ +struct fec_devinfo { + u32 quirks; + u8 stop_gpr_reg; + u8 stop_gpr_bit; +}; + +static const struct fec_devinfo fec_imx25_info = { + .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | + FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx27_info = { + .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx28_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx6q_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_RACC, + .stop_gpr_reg = 0x34, + .stop_gpr_bit = 27, +}; + +static const struct fec_devinfo fec_mvf600_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, +}; + +static const struct fec_devinfo fec_imx6x_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, +}; + +static const struct fec_devinfo fec_imx6ul_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | + FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_COALESCE, +}; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | - FEC_QUIRK_HAS_FRREG, + .driver_data = (kernel_ulong_t)&fec_imx25_info, }, { .name = "imx27-fec", - .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG, + .driver_data = (kernel_ulong_t)&fec_imx27_info, }, { .name = "imx28-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | - FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_FRREG, + .driver_data = (kernel_ulong_t)&fec_imx28_info, }, { .name = "imx6q-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | - FEC_QUIRK_HAS_RACC, + .driver_data = (kernel_ulong_t)&fec_imx6q_info, }, { .name = "mvf600-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, + .driver_data = (kernel_ulong_t)&fec_mvf600_info, }, { .name = "imx6sx-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | - FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + .driver_data = (kernel_ulong_t)&fec_imx6x_info, }, { .name = "imx6ul-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | - FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_COALESCE, + .driver_data = (kernel_ulong_t)&fec_imx6ul_info, }, { /* sentinel */ } @@ -1092,11 +1130,28 @@ fec_restart(struct net_device *ndev) } +static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) +{ + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; + struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; + + if (stop_gpr->gpr) { + if (enabled) + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), + BIT(stop_gpr->bit)); + else + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), 0); + } else if (pdata && pdata->sleep_mode_enable) { + pdata->sleep_mode_enable(enabled); + } +} + static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); u32 val; @@ -1125,9 +1180,7 @@ fec_stop(struct net_device *ndev) val = readl(fep->hwp + FEC_ECNTRL); val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); writel(val, fep->hwp + FEC_ECNTRL); - - if (pdata && pdata->sleep_mode_enable) - pdata->sleep_mode_enable(true); + fec_enet_stop_mode(fep, true); } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); @@ -1892,6 +1945,27 @@ out: return ret; } +static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = ndev->phydev; + + if (phy_dev) { + phy_reset_after_clk_enable(phy_dev); + } else if (fep->phy_node) { + /* + * If the PHY still is not bound to the MAC, but there is + * OF PHY node and a matching PHY device instance already, + * use the OF PHY node to obtain the PHY device instance, + * and then use that PHY device instance when triggering + * the PHY reset. + */ + phy_dev = of_phy_find_device(fep->phy_node); + phy_reset_after_clk_enable(phy_dev); + put_device(&phy_dev->mdio.dev); + } +} + static int fec_enet_clk_enable(struct net_device *ndev, bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1918,7 +1992,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) if (ret) goto failed_clk_ref; - phy_reset_after_clk_enable(ndev->phydev); + fec_enet_phy_reset_after_clk_enable(ndev); } else { clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { @@ -2929,16 +3003,16 @@ fec_enet_open(struct net_device *ndev) /* Init MAC prior to mii bus probe */ fec_restart(ndev); - /* Probe and connect to PHY when open the interface */ - ret = fec_enet_mii_probe(ndev); - if (ret) - goto err_enet_mii_probe; - /* Call phy_reset_after_clk_enable() again if it failed during * phy_reset_after_clk_enable() before because the PHY wasn't probed. */ if (reset_again) - phy_reset_after_clk_enable(ndev->phydev); + fec_enet_phy_reset_after_clk_enable(ndev); + + /* Probe and connect to PHY when open the interface */ + ret = fec_enet_mii_probe(ndev); + if (ret) + goto err_enet_mii_probe; if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_used(); @@ -3398,6 +3472,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev) return irq_cnt; } +static int fec_enet_init_stop_mode(struct fec_enet_private *fep, + struct fec_devinfo *dev_info, + struct device_node *np) +{ + struct device_node *gpr_np; + int ret = 0; + + if (!dev_info) + return 0; + + gpr_np = of_parse_phandle(np, "gpr", 0); + if (!gpr_np) + return 0; + + fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); + if (IS_ERR(fep->stop_gpr.gpr)) { + dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); + ret = PTR_ERR(fep->stop_gpr.gpr); + fep->stop_gpr.gpr = NULL; + goto out; + } + + fep->stop_gpr.reg = dev_info->stop_gpr_reg; + fep->stop_gpr.bit = dev_info->stop_gpr_bit; + +out: + of_node_put(gpr_np); + + return ret; +} + static int fec_probe(struct platform_device *pdev) { @@ -3412,6 +3517,7 @@ fec_probe(struct platform_device *pdev) int num_rx_qs; char irq_name[8]; int irq_cnt; + struct fec_devinfo *dev_info; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -3429,7 +3535,9 @@ fec_probe(struct platform_device *pdev) of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; - fep->quirks = pdev->id_entry->driver_data; + dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; + if (dev_info) + fep->quirks = dev_info->quirks; fep->netdev = ndev; fep->num_rx_queues = num_rx_qs; @@ -3463,6 +3571,10 @@ fec_probe(struct platform_device *pdev) if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; + ret = fec_enet_init_stop_mode(fep, dev_info, np); + if (ret) + goto failed_stop_mode; + phy_node = of_parse_phandle(np, "phy-handle", 0); if (!phy_node && of_phy_is_fixed_link(np)) { ret = of_phy_register_fixed_link(np); @@ -3616,11 +3728,11 @@ failed_mii_init: failed_irq: failed_init: fec_ptp_stop(pdev); - if (fep->reg_phy) - regulator_disable(fep->reg_phy); failed_reset: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); failed_clk_ahb: @@ -3631,6 +3743,7 @@ failed_clk: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(phy_node); +failed_stop_mode: failed_phy: dev_id--; failed_ioremap: @@ -3708,7 +3821,6 @@ static int __maybe_unused fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; int ret; int val; @@ -3726,8 +3838,8 @@ static int __maybe_unused fec_resume(struct device *dev) goto failed_clk; } if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { - if (pdata && pdata->sleep_mode_enable) - pdata->sleep_mode_enable(false); + fec_enet_stop_mode(fep, false); + val = readl(fep->hwp + FEC_ECNTRL); val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); writel(val, fep->hwp + FEC_ECNTRL); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 945643c02615..49fad118988b 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -382,9 +382,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; unsigned long flags; + mutex_lock(&adapter->ptp_clk_mutex); + /* Check the ptp clock */ + if (!adapter->ptp_clk_on) { + mutex_unlock(&adapter->ptp_clk_mutex); + return -EINVAL; + } spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); *ts = ns_to_timespec64(ns); diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 4c2fa13a7dd7..c8e434c8ab98 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1396,8 +1396,7 @@ static void enable_time_stamp(struct fman *fman) { struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; u16 fm_clk_freq = fman->state->fm_clk_freq; - u32 tmp, intgr, ts_freq; - u64 frac; + u32 tmp, intgr, ts_freq, frac; ts_freq = (u32)(1 << fman->state->count1_micro_bit); /* configure timestamp so that bit 8 will count 1 microsecond diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 1ca543ac8f2c..d2de9ea80c43 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->multicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; @@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->unicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index dd6d0526f6c1..19f327efdaff 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) struct eth_hash_t *hash; /* Allocate address hash table */ - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + hash = kmalloc(sizeof(*hash), GFP_KERNEL); if (!hash) return NULL; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index e1901874c19f..9088b4f4b4b8 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, tmp = ioread32be(®s->command_config); tmp &= ~CMD_CFG_PFC_MODE; - priority = 0; iowrite32be(tmp, ®s->command_config); @@ -986,7 +985,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ee82ee1384eb..47f6fee1f396 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1756,6 +1756,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct fman_port *port; struct fman *fman; struct device_node *fm_node, *port_node; + struct platform_device *fm_pdev; struct resource res; struct resource *dev_res; u32 val; @@ -1780,8 +1781,14 @@ static int fman_port_probe(struct platform_device *of_dev) goto return_err; } - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + fm_pdev = of_find_device_by_node(fm_node); of_node_put(fm_node); + if (!fm_pdev) { + err = -EINVAL; + goto return_err; + } + + fman = dev_get_drvdata(&fm_pdev->dev); if (!fman) { err = -EINVAL; goto return_err; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index f75b9c11b2d2..ac5a281e0ec3 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index c8e5d889bd81..21de56345503 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = { }; module_platform_driver(fs_enet_bb_mdio_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 1582d82483ec..4e6a9c5d8af5 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = { }; module_platform_driver(fs_enet_fec_mdio_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2580bcd85025..382a45d84cc3 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -366,7 +366,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, static int gfar_set_mac_addr(struct net_device *dev, void *p) { - eth_mac_addr(dev, p); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; gfar_set_mac_for_addr(dev, 0, dev->dev_addr); @@ -751,8 +755,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) continue; err = gfar_parse_group(child, priv, model); - if (err) + if (err) { + of_node_put(child); goto err_grp_init; + } } } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); @@ -1824,20 +1830,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; /* make space for additional header when fcb is needed */ - if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { - struct sk_buff *skb_new; - - skb_new = skb_realloc_headroom(skb, fcb_len); - if (!skb_new) { + if (fcb_len) { + if (unlikely(skb_cow_head(skb, fcb_len))) { dev->stats.tx_errors++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - - if (skb->sk) - skb_set_owner_w(skb_new, skb->sk); - dev_consume_skb_any(skb); - skb = skb_new; } /* total number of fragments in the SKB */ @@ -2394,6 +2392,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, if (lstatus & BD_LFLAG(RXBD_LAST)) size -= skb->len; + WARN(size < 0, "gianfar: rx fragment size underflow"); + if (size < 0) + return false; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rxb->page_offset + RXBUF_ALIGNMENT, size, GFAR_RXB_TRUESIZE); @@ -2556,6 +2558,17 @@ static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, if (lstatus & BD_LFLAG(RXBD_EMPTY)) break; + /* lost RXBD_LAST descriptor due to overrun */ + if (skb && + (lstatus & BD_LFLAG(RXBD_FIRST))) { + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + + /* can continue normally */ + } + /* order rx buffer descriptor reads */ rmb(); @@ -3375,7 +3388,7 @@ static int gfar_probe(struct platform_device *ofdev) if (dev->features & NETIF_F_IP_CSUM || priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - dev->needed_headroom = GMAC_FCB_LEN; + dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index f839fa94ebdd..beaf35c585d2 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -42,6 +42,7 @@ #include <soc/fsl/qe/ucc.h> #include <soc/fsl/qe/ucc_fast.h> #include <asm/machdep.h> +#include <net/sch_generic.h> #include "ucc_geth.h" @@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) static void ugeth_quiesce(struct ucc_geth_private *ugeth) { - /* Prevent any further xmits, plus detach the device. */ - netif_device_detach(ugeth->ndev); - - /* Wait for any current xmits to finish. */ - netif_tx_disable(ugeth->ndev); + /* Prevent any further xmits */ + netif_tx_stop_all_queues(ugeth->ndev); /* Disable the interrupt to avoid NAPI rescheduling. */ disable_irq(ugeth->ug_info->uf_info.irq); @@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) { napi_enable(&ugeth->napi); enable_irq(ugeth->ug_info->uf_info.irq); - netif_device_attach(ugeth->ndev); + + /* allow to xmit again */ + netif_tx_wake_all_queues(ugeth->ndev); + __netdev_watchdog_up(ugeth->ndev); } /* Called every time the controller might need to be made @@ -3889,6 +3890,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); dev->mtu = 1500; + dev->max_mtu = 1518; ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); ugeth->phy_interface = phy_interface; @@ -3934,12 +3936,12 @@ static int ucc_geth_remove(struct platform_device* ofdev) struct device_node *np = ofdev->dev.of_node; unregister_netdev(dev); - free_netdev(dev); ucc_geth_memclean(ugeth); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(ugeth->ug_info->tbi_node); of_node_put(ugeth->ug_info->phy_node); + free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index a86a42131fc7..b00fbef612cf 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -576,7 +576,14 @@ struct ucc_geth_tx_global_pram { u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ u32 tqptr; /* a base pointer to the Tx Queues Memory Region */ - u8 res2[0x80 - 0x74]; + u8 res2[0x78 - 0x74]; + u64 snums_en; + u32 l2l3baseptr; /* top byte consists of a few other bit fields */ + + u16 mtu[8]; + u8 res3[0xa8 - 0x94]; + u32 wrrtablebase; /* top byte is reserved */ + u8 res4[0xc0 - 0xac]; } __packed; /* structure representing Extended Filtering Global Parameters in PRAM */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index eb69e5c81a4d..7516f6823090 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1677,8 +1677,10 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev) for (j = 0; j < fetch_num; j++) { /* alloc one skb and init */ skb = hns_assemble_skb(ndev); - if (!skb) + if (!skb) { + ret = -ENOMEM; goto out; + } rd = &tx_ring_data(priv, skb->queue_mapping); hns_nic_net_xmit_hw(ndev, skb, rd); @@ -2296,8 +2298,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + } /* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@ -2313,7 +2317,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->fwnode = args.fwnode; } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; } ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 717fccc2efba..78b2f4e01bd8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, /* for mutl buffer*/ new_skb = skb_copy(skb, GFP_ATOMIC); dev_kfree_skb_any(skb); + if (!new_skb) { + netdev_err(ndev, "skb alloc failed\n"); + return; + } skb = new_skb; check_ok = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index f8a87f8ca983..148e53812d89 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -123,7 +123,7 @@ struct hclgevf_mbx_arq_ring { #define hclge_mbx_ring_ptr_move_crq(crq) \ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) #define hclge_mbx_tail_ptr_move_arq(arq) \ - (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) + (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) #define hclge_mbx_head_ptr_move_arq(arq) \ - (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) + (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a0998937727d..0db835d87d09 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -77,6 +77,7 @@ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) enum hns_desc_type { + DESC_TYPE_UNKNOWN, DESC_TYPE_SKB, DESC_TYPE_PAGE, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 403e0f089f2a..506fffedefe5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -21,6 +21,7 @@ #include <net/pkt_cls.h> #include <net/tcp.h> #include <net/vxlan.h> +#include <net/geneve.h> #include "hnae3.h" #include "hns3_enet.h" @@ -795,7 +796,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the * hardware will not do the checksum offload when udp dest port is - * 4789. + * 4789 or 6081. */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { @@ -804,7 +805,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) l4.hdr = skb_transport_header(skb); if (!(!skb->encapsulation && - l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT)))) return false; skb_checksum_help(skb); @@ -1117,11 +1119,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, if (type == DESC_TYPE_SKB) { struct sk_buff *skb = (struct sk_buff *)priv; - int ret; - - ret = hns3_fill_skb_desc(ring, skb, desc); - if (unlikely(ret)) - return ret; dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); } else { @@ -1136,14 +1133,14 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return -ENOMEM; } + desc_cb->priv = priv; desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; if (likely(size <= HNS3_MAX_BD_SIZE)) { u16 bdtp_fe_sc_vld_ra_ri = 0; - desc_cb->priv = priv; - desc_cb->dma = dma; - desc_cb->type = type; desc->addr = cpu_to_le64(dma); desc->tx.send_size = cpu_to_le16(size); hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); @@ -1155,19 +1152,13 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, } frag_buf_num = hns3_tx_bd_count(size); - sizeoflast = size & HNS3_TX_LAST_SIZE_M; + sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { u16 bdtp_fe_sc_vld_ra_ri = 0; - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; - desc_cb->type = (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE; - /* now, fill the descriptor */ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? @@ -1181,7 +1172,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use); - desc_cb = &ring->desc_cb[ring->next_to_use]; desc = &ring->desc[ring->next_to_use]; } @@ -1292,6 +1282,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) unsigned int i; for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + + memset(desc, 0, sizeof(*desc)); + /* check if this is where we started */ if (ring->next_to_use == next_to_use_orig) break; @@ -1299,6 +1293,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) /* rollback one */ ring_ptr_move_bw(ring, next_to_use); + if (!ring->desc_cb[ring->next_to_use].dma) + continue; + /* unmap the descriptor dma address */ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) dma_unmap_single(dev, @@ -1313,6 +1310,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) ring->desc_cb[ring->next_to_use].length = 0; ring->desc_cb[ring->next_to_use].dma = 0; + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; } } @@ -1362,6 +1360,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); + if (unlikely(ret < 0)) + goto fill_err; + ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, DESC_TYPE_SKB); if (unlikely(ret)) @@ -3312,7 +3314,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) { - struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int ret = 0; @@ -3344,6 +3345,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) } for (i = 0; i < priv->vector_num; i++) { + struct hnae3_ring_chain_node vector_ring_chain; + tqp_vector = &priv->tqp_vector[i]; tqp_vector->rx_group.total_bytes = 0; @@ -3993,9 +3996,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); - hns3_dbg_uninit(handle); - out_netdev_free: + hns3_dbg_uninit(handle); free_netdev(netdev); } @@ -4007,8 +4009,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) return; if (linkup) { - netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); if (netif_msg_link(handle)) netdev_info(netdev, "link up\n"); } else { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 5d468ed404a6..424614d2d027 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -186,8 +186,6 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) -#define HNS3_TX_LAST_SIZE_M 0xffff - #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 52c9d204fe3d..34e5448d59f6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -174,18 +174,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, { struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; unsigned char *packet = skb->data; + u32 len = skb_headlen(skb); u32 i; - for (i = 0; i < skb->len; i++) + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); + + for (i = 0; i < len; i++) if (packet[i] != (unsigned char)(i & 0xff)) break; /* The packet is correctly received */ - if (i == skb->len) + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE) tqp_vector->rx_group.total_packets++; else print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); + skb->data, len, true); dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 1426eb5ddf3d..e34e0854635c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1018,16 +1018,16 @@ struct hclge_fd_tcam_config_3_cmd { #define HCLGE_FD_AD_DROP_B 0 #define HCLGE_FD_AD_DIRECT_QID_B 1 #define HCLGE_FD_AD_QID_S 2 -#define HCLGE_FD_AD_QID_M GENMASK(12, 2) +#define HCLGE_FD_AD_QID_M GENMASK(11, 2) #define HCLGE_FD_AD_USE_COUNTER_B 12 #define HCLGE_FD_AD_COUNTER_NUM_S 13 #define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) #define HCLGE_FD_AD_NXT_STEP_B 20 #define HCLGE_FD_AD_NXT_KEY_S 21 -#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21) +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21) #define HCLGE_FD_AD_WR_RULE_ID_B 0 #define HCLGE_FD_AD_RULE_ID_S 1 -#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) +#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1) struct hclge_fd_ad_config_cmd { u8 stage; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index 38b79321c4c4..de69ebf68857 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -35,8 +35,6 @@ #define HCLGE_DBG_DFX_SSU_2_OFFSET 12 -#pragma pack(1) - struct hclge_qos_pri_map_cmd { u8 pri0_tc : 4, pri1_tc : 4; @@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info { struct hclge_dbg_reg_common_msg reg_msg; }; -#pragma pack() - static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { {false, "Reserved"}, {true, "BP_CPU_STATE"}, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d4652dea4569..93f3865b679b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -746,7 +746,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; - if (hdev->hw.mac.phydev) { + if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && + hdev->hw.mac.phydev->drv->set_loopback) { count += 1; handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; } @@ -4907,9 +4908,9 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, case BIT(INNER_SRC_MAC): for (i = 0; i < ETH_ALEN; i++) { calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples.src_mac[i]); + rule->tuples_mask.src_mac[i]); calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], - rule->tuples.src_mac[i]); + rule->tuples_mask.src_mac[i]); } return true; @@ -5627,9 +5628,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, /* to avoid rule conflict, when user configure rule by ethtool, * we need to clear all arfs rules */ + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); - spin_lock_bh(&hdev->fd_rule_lock); ret = hclge_fd_config_rule(hdev, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -5672,6 +5673,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, return ret; } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bool clear_list) { @@ -5684,7 +5686,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return; - spin_lock_bh(&hdev->fd_rule_lock); for_each_set_bit(location, hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, @@ -5701,8 +5702,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bitmap_zero(hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); } - - spin_unlock_bh(&hdev->fd_rule_lock); } static int hclge_restore_fd_entries(struct hnae3_handle *handle) @@ -5940,8 +5939,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); fs->m_ext.vlan_tci = rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? - cpu_to_be16(VLAN_VID_MASK) : - cpu_to_be16(rule->tuples_mask.vlan_tag1); + 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); } if (fs->flow_type & FLOW_MAC_EXT) { @@ -6069,7 +6067,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_fd_rule_tuples new_tuples; + struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; u16 tmp_queue_id; @@ -6079,20 +6077,18 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; - memset(&new_tuples, 0, sizeof(new_tuples)); - hclge_fd_get_flow_tuples(fkeys, &new_tuples); - - spin_lock_bh(&hdev->fd_rule_lock); - /* when there is already fd rule existed add by user, * arfs should not work */ + spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { spin_unlock_bh(&hdev->fd_rule_lock); return -EOPNOTSUPP; } + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + /* check is there flow director filter existed for this flow, * if not, create a new filter for it; * if filter exist with different queue id, modify the filter; @@ -6177,6 +6173,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) #endif } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_clear_arfs_rules(struct hnae3_handle *handle) { #ifdef CONFIG_RFS_ACCEL @@ -6221,10 +6218,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) hdev->fd_en = enable; clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; - if (!enable) + + if (!enable) { + spin_lock_bh(&hdev->fd_rule_lock); hclge_del_all_fd_entries(handle, clear); - else + spin_unlock_bh(&hdev->fd_rule_lock); + } else { hclge_restore_fd_entries(handle); + } } static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) @@ -6678,8 +6679,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int i; set_bit(HCLGE_STATE_DOWN, &hdev->state); - + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); + spin_unlock_bh(&hdev->fd_rule_lock); /* If it is not PF reset, the firmware will disable the MAC, * so it only need to stop phy here. @@ -8560,12 +8562,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) { + struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; int reset_try_times = 0; int reset_status; u16 queue_gid; int ret; + if (queue_id >= handle->kinfo.num_tqps) { + dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n", + queue_id); + return; + } + queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 7d7e712691b9..fc275d4f484c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2140,14 +2140,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + hclgevf_reset_tqp_stats(handle); hclgevf_request_link_info(hdev); hclgevf_update_link_mode(hdev); - clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); - return 0; } @@ -2782,8 +2782,8 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) hclgevf_uninit_msi(hdev); } - hclgevf_pci_uninit(hdev); hclgevf_cmd_uninit(hdev); + hclgevf_pci_uninit(hdev); } static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c5be4ebd8437..a20d9147d5f2 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1317,6 +1317,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) int offset = ibmveth_rxq_frame_offset(adapter); int csum_good = ibmveth_rxq_csum_good(adapter); int lrg_pkt = ibmveth_rxq_large_packet(adapter); + __sum16 iph_check = 0; skb = ibmveth_rxq_get_buffer(adapter); @@ -1353,16 +1354,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); + /* PHYP without PLSO support places a -1 in the ip + * checksum for large send frames. + */ + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + iph_check = iph->check; + } + + if ((length > netdev->mtu + ETH_HLEN) || + lrg_pkt || iph_check == 0xffff) { + ibmveth_rx_mss_helper(skb, mss, lrg_pkt); + adapter->rx_large_packets++; + } + if (csum_good) { skb->ip_summed = CHECKSUM_UNNECESSARY; ibmveth_rx_csum_helper(skb, adapter); } - if (length > netdev->mtu + ETH_HLEN) { - ibmveth_rx_mss_helper(skb, mss, lrg_pkt); - adapter->rx_large_packets++; - } - napi_gro_receive(napi, skb); /* send it up */ netdev->stats.rx_packets++; @@ -1682,7 +1693,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->min_mtu = IBMVETH_MIN_MTU; - netdev->max_mtu = ETH_MAX_MTU; + netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index e1ab2feeae53..a2b7b982ee29 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -202,8 +202,13 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, if (!ltb->buff) return; + /* VIOS automatically unmaps the long term buffer at remote + * end for the following resets: + * FAILOVER, MOBILITY, TIMEOUT. + */ if (adapter->reset_reason != VNIC_RESET_FAILOVER && - adapter->reset_reason != VNIC_RESET_MOBILITY) + adapter->reset_reason != VNIC_RESET_MOBILITY && + adapter->reset_reason != VNIC_RESET_TIMEOUT) send_request_unmap(adapter, ltb->map_id); dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); } @@ -416,6 +421,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) int i, j, rc; u64 *size_array; + if (!adapter->rx_pool) + return -1; + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); @@ -586,6 +594,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) int tx_scrqs; int i, rc; + if (!adapter->tx_pool) + return -1; + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); @@ -779,12 +790,13 @@ static int ibmvnic_login(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); int retry_count = 0; + int retries = 10; bool retry; int rc; do { retry = false; - if (retry_count > IBMVNIC_MAX_QUEUES) { + if (retry_count > retries) { netdev_warn(netdev, "Login attempts exceeded\n"); return -1; } @@ -799,11 +811,23 @@ static int ibmvnic_login(struct net_device *netdev) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - netdev_warn(netdev, "Login timed out\n"); - return -1; + netdev_warn(netdev, "Login timed out, retrying...\n"); + retry = true; + adapter->init_done_rc = 0; + retry_count++; + continue; } - if (adapter->init_done_rc == PARTIALSUCCESS) { + if (adapter->init_done_rc == ABORTED) { + netdev_warn(netdev, "Login aborted, retrying...\n"); + retry = true; + adapter->init_done_rc = 0; + retry_count++; + /* FW or device may be busy, so + * wait a bit before retrying login + */ + msleep(500); + } else if (adapter->init_done_rc == PARTIALSUCCESS) { retry_count++; release_sub_crqs(adapter, 1); @@ -1057,19 +1081,13 @@ static int __ibmvnic_open(struct net_device *netdev) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); if (rc) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_disable(&adapter->napi[i]); + ibmvnic_napi_disable(adapter); release_resources(adapter); return rc; } netif_tx_start_all_queues(netdev); - if (prev_state == VNIC_CLOSED) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - } - adapter->state = VNIC_OPEN; return rc; } @@ -1090,18 +1108,27 @@ static int ibmvnic_open(struct net_device *netdev) if (adapter->state != VNIC_CLOSED) { rc = ibmvnic_login(netdev); if (rc) - return rc; + goto out; rc = init_resources(adapter); if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); - return rc; + goto out; } } rc = __ibmvnic_open(netdev); +out: + /* + * If open fails due to a pending failover, set device state and + * return. Device operation will be handled by reset routine. + */ + if (rc && adapter->failover_pending) { + adapter->state = VNIC_OPEN; + rc = 0; + } return rc; } @@ -1226,10 +1253,8 @@ static int __ibmvnic_close(struct net_device *netdev) adapter->state = VNIC_CLOSING; rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); - if (rc) - return rc; adapter->state = VNIC_CLOSED; - return 0; + return rc; } static int ibmvnic_close(struct net_device *netdev) @@ -1492,6 +1517,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) skb_copy_from_linear_data(skb, dst, skb->len); } + /* post changes to long_term_buff *dst before VIOS accessing it */ + dma_wmb(); + tx_pool->consumer_index = (tx_pool->consumer_index + 1) % tx_pool->num_buffers; @@ -1716,6 +1744,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) int rc; rc = 0; + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + ether_addr_copy(adapter->mac_addr, addr->sa_data); if (adapter->state != VNIC_PROBED) rc = __ibmvnic_set_mac(netdev, addr->sa_data); @@ -1813,12 +1844,19 @@ static int do_reset(struct ibmvnic_adapter *adapter, u64 old_num_rx_queues, old_num_tx_queues; u64 old_num_rx_slots, old_num_tx_slots; struct net_device *netdev = adapter->netdev; - int i, rc; + int rc; netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", rwi->reset_reason); rtnl_lock(); + /* + * Now that we have the rtnl lock, clear any pending failover. + * This will ensure ibmvnic_open() has either completed or will + * block until failover is complete. + */ + if (rwi->reset_reason == VNIC_RESET_FAILOVER) + adapter->failover_pending = false; netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; @@ -1865,13 +1903,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, release_sub_crqs(adapter, 1); } else { rc = ibmvnic_reset_crq(adapter); - if (!rc) + if (rc == H_CLOSED || rc == H_SUCCESS) { rc = vio_enable_interrupts(adapter->vdev); + if (rc) + netdev_err(adapter->netdev, + "Reset failed to enable interrupts. rc=%d\n", + rc); + } } if (rc) { netdev_err(adapter->netdev, - "Couldn't initialize crq. rc=%d\n", rc); + "Reset couldn't initialize crq. rc=%d\n", rc); goto out; } @@ -1900,7 +1943,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, adapter->req_rx_add_entries_per_subcrq != old_num_rx_slots || adapter->req_tx_entries_per_subcrq != - old_num_tx_slots) { + old_num_tx_slots || + !adapter->rx_pool || + !adapter->tso_pool || + !adapter->tx_pool) { release_rx_pools(adapter); release_tx_pools(adapter); release_napi(adapter); @@ -1912,12 +1958,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, } else { rc = reset_tx_pools(adapter); - if (rc) + if (rc) { + netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", + rc); goto out; + } rc = reset_rx_pools(adapter); - if (rc) + if (rc) { + netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", + rc); goto out; + } } ibmvnic_disable_irqs(adapter); } @@ -1937,12 +1989,11 @@ static int do_reset(struct ibmvnic_adapter *adapter, /* refresh device's multicast list */ ibmvnic_set_multi(netdev); - /* kick napi */ - for (i = 0; i < adapter->req_rx_queues; i++) - napi_schedule(&adapter->napi[i]); - - if (adapter->reset_reason != VNIC_RESET_FAILOVER) + if (adapter->reset_reason == VNIC_RESET_FAILOVER || + adapter->reset_reason == VNIC_RESET_MOBILITY) { call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); + call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev); + } rc = 0; @@ -2012,6 +2063,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, if (rc) return IBMVNIC_OPEN_FAILED; + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); + call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev); + return 0; } @@ -2075,6 +2129,13 @@ static void __ibmvnic_reset(struct work_struct *work) /* CHANGE_PARAM requestor holds rtnl_lock */ rc = do_change_param_reset(adapter, rwi, reset_state); } else if (adapter->force_reset_recovery) { + /* + * Since we are doing a hard reset now, clear the + * failover_pending flag so we don't ignore any + * future MOBILITY or other resets. + */ + adapter->failover_pending = false; + /* Transport event occurred during previous reset */ if (adapter->wait_for_reset) { /* Previous was CHANGE_PARAM; caller locked */ @@ -2138,9 +2199,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, unsigned long flags; int ret; + /* + * If failover is pending don't schedule any other reset. + * Instead let the failover complete. If there is already a + * a failover reset scheduled, we will detect and drop the + * duplicate reset when walking the ->rwi_list below. + */ if (adapter->state == VNIC_REMOVING || adapter->state == VNIC_REMOVED || - adapter->failover_pending) { + (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { ret = EBUSY; netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); goto err; @@ -2193,6 +2260,12 @@ static void ibmvnic_tx_timeout(struct net_device *dev) { struct ibmvnic_adapter *adapter = netdev_priv(dev); + if (test_bit(0, &adapter->resetting)) { + netdev_err(adapter->netdev, + "Adapter is resetting, skip timeout reset\n"); + return; + } + ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); } @@ -2234,6 +2307,12 @@ restart_poll: if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) break; + /* The queue entry at the current index is peeked at above + * to determine that there is a valid descriptor awaiting + * processing. We want to be sure that the current slot + * holds a valid descriptor before reading its contents. + */ + dma_rmb(); next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); rx_buff = (struct ibmvnic_rx_buff *)be64_to_cpu(next-> @@ -2258,6 +2337,8 @@ restart_poll: offset = be16_to_cpu(next->rx_comp.off_frame_data); flags = next->rx_comp.flags; skb = rx_buff->skb; + /* load long_term_buff before copying to skb */ + dma_rmb(); skb_copy_to_linear_data(skb, rx_buff->data + offset, length); @@ -2694,6 +2775,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) { int i, rc; + if (!adapter->tx_scrq || !adapter->rx_scrq) + return -EINVAL; + for (i = 0; i < adapter->req_tx_queues; i++) { netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); @@ -2912,13 +2996,18 @@ restart_loop: unsigned int pool = scrq->pool_index; int num_entries = 0; + /* The queue entry at the current index is peeked at above + * to determine that there is a valid descriptor awaiting + * processing. We want to be sure that the current slot + * holds a valid descriptor before reading its contents. + */ + dma_rmb(); + next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { - if (next->tx_comp.rcs[i]) { + if (next->tx_comp.rcs[i]) dev_err(dev, "tx error %x\n", next->tx_comp.rcs[i]); - continue; - } index = be32_to_cpu(next->tx_comp.correlators[i]); if (index & IBMVNIC_TSO_POOL_MASK) { tx_pool = &adapter->tso_pool[pool]; @@ -3067,7 +3156,7 @@ req_rx_irq_failed: req_tx_irq_failed: for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); - irq_dispose_mapping(adapter->rx_scrq[j]->irq); + irq_dispose_mapping(adapter->tx_scrq[j]->irq); } release_sub_crqs(adapter, 1); return rc; @@ -3312,6 +3401,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, } spin_unlock_irqrestore(&scrq->lock, flags); + /* Ensure that the entire buffer descriptor has been + * loaded before reading its contents + */ + dma_rmb(); + return entry; } @@ -4040,8 +4134,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); goto out; } + /* crq->change_mac_addr.mac_addr is the requested one + * crq->change_mac_addr_rsp.mac_addr is the returned valid one. + */ ether_addr_copy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0]); + ether_addr_copy(adapter->mac_addr, + &crq->change_mac_addr_rsp.mac_addr[0]); out: complete(&adapter->fw_done); return rc; @@ -4438,7 +4537,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, case IBMVNIC_1GBPS: adapter->speed = SPEED_1000; break; - case IBMVNIC_10GBP: + case IBMVNIC_10GBPS: adapter->speed = SPEED_10000; break; case IBMVNIC_25GBPS: @@ -4453,6 +4552,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, case IBMVNIC_100GBPS: adapter->speed = SPEED_100000; break; + case IBMVNIC_200GBPS: + adapter->speed = SPEED_200000; + break; default: if (netif_carrier_ok(netdev)) netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); @@ -4486,12 +4588,26 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, case IBMVNIC_CRQ_INIT: dev_info(dev, "Partner initialized\n"); adapter->from_passive_init = true; - adapter->failover_pending = false; if (!completion_done(&adapter->init_done)) { complete(&adapter->init_done); adapter->init_done_rc = -EIO; } - ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); + rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); + if (rc && rc != -EBUSY) { + /* We were unable to schedule the failover + * reset either because the adapter was still + * probing (eg: during kexec) or we could not + * allocate memory. Clear the failover_pending + * flag since no one else will. We ignore + * EBUSY because it means either FAILOVER reset + * is already scheduled or the adapter is + * being removed. + */ + netdev_err(netdev, + "Error %ld scheduling failover reset\n", + rc); + adapter->failover_pending = false; + } break; case IBMVNIC_CRQ_INIT_COMPLETE: dev_info(dev, "Partner initialization complete\n"); @@ -4535,12 +4651,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); break; } - dev_info(dev, "Partner protocol version is %d\n", - crq->version_exchange_rsp.version); - if (be16_to_cpu(crq->version_exchange_rsp.version) < - ibmvnic_version) - ibmvnic_version = + ibmvnic_version = be16_to_cpu(crq->version_exchange_rsp.version); + dev_info(dev, "Partner protocol version is %d\n", + ibmvnic_version); send_cap_queries(adapter); break; case QUERY_CAPABILITY_RSP: @@ -4650,6 +4764,12 @@ static void ibmvnic_tasklet(void *data) while (!done) { /* Pull all the valid messages off the CRQ */ while ((crq = ibmvnic_next_crq(adapter)) != NULL) { + /* This barrier makes sure ibmvnic_next_crq()'s + * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded + * before ibmvnic_handle_crq()'s + * switch(gen_crq->first) and switch(gen_crq->cmd). + */ + dma_rmb(); ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } @@ -4696,6 +4816,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); /* Clean out the queue */ + if (!crq->msgs) + return -EINVAL; + memset(crq->msgs, 0, PAGE_SIZE); crq->cur = 0; crq->active = false; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index ebc39248b334..0da20f19bb17 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -373,7 +373,7 @@ struct ibmvnic_phys_parms { #define IBMVNIC_10MBPS 0x40000000 #define IBMVNIC_100MBPS 0x20000000 #define IBMVNIC_1GBPS 0x10000000 -#define IBMVNIC_10GBP 0x08000000 +#define IBMVNIC_10GBPS 0x08000000 #define IBMVNIC_40GBPS 0x04000000 #define IBMVNIC_100GBPS 0x02000000 #define IBMVNIC_25GBPS 0x01000000 diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 86493fea56e4..a2ee28e487a6 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); - e1000_down(adapter); - e1000_up(adapter); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); } @@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; int count = E1000_CHECK_RESET_COUNT; - while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) usleep_range(10000, 20000); - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + e1000_down(adapter); e1000_power_down_phy(adapter); e1000_free_irq(adapter); @@ -3140,8 +3150,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); if (skb->data_len && hdr_len == len) { switch (hw->mac_type) { + case e1000_82544: { unsigned int pull_size; - case e1000_82544: + /* Make sure we have room to chop off 4 bytes, * and that the end alignment will work out to * this hardware's requirements @@ -3162,6 +3173,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } len = skb_headlen(skb); break; + } default: /* do nothing */ break; diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 2c1bab377b2a..1fd4406173a8 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 37a2314d3e6b..944abd5eae11 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -576,7 +576,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) #define er32(reg) __er32(hw, E1000_##reg) -s32 __ew32_prepare(struct e1000_hw *hw); void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index eff75bd8a8f0..11fdc27faa82 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -86,6 +86,12 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0 #define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1 #define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2 +#define E1000_DEV_ID_PCH_CMP_I219_LM10 0x0D4E +#define E1000_DEV_ID_PCH_CMP_I219_V10 0x0D4F +#define E1000_DEV_ID_PCH_CMP_I219_LM11 0x0D4C +#define E1000_DEV_ID_PCH_CMP_I219_V11 0x0D4D +#define E1000_DEV_ID_PCH_CMP_I219_LM12 0x0D53 +#define E1000_DEV_ID_PCH_CMP_I219_V12 0x0D55 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8c4507838325..c2feedfd321d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -119,14 +119,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set * and try again a number of times. **/ -s32 __ew32_prepare(struct e1000_hw *hw) +static void __ew32_prepare(struct e1000_hw *hw) { s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) udelay(50); - - return i; } void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) @@ -607,11 +605,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, rx_ring->tail); - if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + if (unlikely(i != readl(rx_ring->tail))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); @@ -624,11 +622,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, tx_ring->tail); - if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + if (unlikely(i != readl(tx_ring->tail))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); @@ -5289,6 +5287,10 @@ static void e1000_watchdog_task(struct work_struct *work) /* oops */ break; } + if (hw->mac.type == e1000_pch_spt) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } } /* enable transmits in the hardware, need to do this @@ -5951,15 +5953,19 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + rtnl_lock(); /* don't run the task if already down */ - if (test_bit(__E1000_DOWN, &adapter->state)) + if (test_bit(__E1000_DOWN, &adapter->state)) { + rtnl_unlock(); return; + } if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -6343,11 +6349,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + u32 ctrl, ctrl_ext, rctl, status, wufc; int retval = 0; + /* Runtime suspend should only enable wakeup for link changes */ + if (runtime) + wufc = E1000_WUFC_LNKC; + else if (device_may_wakeup(&pdev->dev)) + wufc = adapter->wol; + else + wufc = 0; + status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -6404,7 +6416,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { - if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. */ @@ -7560,6 +7572,12 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 96bc531ac1da..514f17d75fee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2612,6 +2612,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; + xdp_set_data_meta_invalid(&xdp); xdp.data_hard_start = (void *)((u8 *)xdp.data - i40e_rx_offset(rx_ring)); xdp.data_end = (void *)((u8 *)xdp.data + size); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 5a3122e0b825..2b62d8dddbe3 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3265,9 +3265,6 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case FLOW_CLS_REPLACE: return iavf_configure_clsflower(adapter, cls_flower); @@ -3291,6 +3288,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + struct iavf_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return iavf_setup_tc_cls_flower(cb_priv, type_data); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 026d17344a93..af646336b00d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1698,6 +1698,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) if (rx_buffer) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; + xdp_set_data_meta_invalid(&xdp); xdp.data_hard_start = (void *)((u8 *)xdp.data - iavf_rx_offset(rx_ring)); xdp.data_end = (void *)((u8 *)xdp.data + size); diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 171f0b625407..d68b8aa31b19 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -436,6 +436,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; + enum ice_status status; hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->switch_info), GFP_KERNEL); @@ -446,7 +447,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - return ice_init_def_sw_recp(hw); + status = ice_init_def_sw_recp(hw); + if (status) { + devm_kfree(ice_hw_to_dev(hw), hw->switch_info); + return status; + } + return 0; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index c68709c7ef81..2e9c97bad3c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -199,7 +199,9 @@ unwind_alloc_rq_bufs: cq->rq.r.rq_bi[i].pa = 0; cq->rq.r.rq_bi[i].size = 0; } + cq->rq.r.rq_bi = NULL; devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); + cq->rq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } @@ -245,7 +247,9 @@ unwind_alloc_sq_bufs: cq->sq.r.sq_bi[i].pa = 0; cq->sq.r.sq_bi[i].size = 0; } + cq->sq.r.sq_bi = NULL; devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); + cq->sq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } @@ -304,6 +308,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) return 0; } +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +do { \ + int i; \ + /* free descriptors */ \ + if ((qi)->ring.r.ring##_bi) \ + for (i = 0; i < (qi)->num_##ring##_entries; i++) \ + if ((qi)->ring.r.ring##_bi[i].pa) { \ + dmam_free_coherent(ice_hw_to_dev(hw), \ + (qi)->ring.r.ring##_bi[i].size, \ + (qi)->ring.r.ring##_bi[i].va, \ + (qi)->ring.r.ring##_bi[i].pa); \ + (qi)->ring.r.ring##_bi[i].va = NULL;\ + (qi)->ring.r.ring##_bi[i].pa = 0;\ + (qi)->ring.r.ring##_bi[i].size = 0;\ + } \ + /* free the buffer info list */ \ + if ((qi)->ring.cmd_buf) \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ + /* free DMA head */ \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ +} while (0) + /** * ice_init_sq - main initialization routine for Control ATQ * @hw: pointer to the hardware structure @@ -357,6 +383,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: + ICE_FREE_CQ_BUFS(hw, cq, sq); ice_free_cq_ring(hw, &cq->sq); init_ctrlq_exit: @@ -416,33 +443,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: + ICE_FREE_CQ_BUFS(hw, cq, rq); ice_free_cq_ring(hw, &cq->rq); init_ctrlq_exit: return ret_code; } -#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ -do { \ - int i; \ - /* free descriptors */ \ - for (i = 0; i < (qi)->num_##ring##_entries; i++) \ - if ((qi)->ring.r.ring##_bi[i].pa) { \ - dmam_free_coherent(ice_hw_to_dev(hw), \ - (qi)->ring.r.ring##_bi[i].size,\ - (qi)->ring.r.ring##_bi[i].va,\ - (qi)->ring.r.ring##_bi[i].pa);\ - (qi)->ring.r.ring##_bi[i].va = NULL; \ - (qi)->ring.r.ring##_bi[i].pa = 0; \ - (qi)->ring.r.ring##_bi[i].size = 0; \ - } \ - /* free the buffer info list */ \ - if ((qi)->ring.cmd_buf) \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ - /* free DMA head */ \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ -} while (0) - /** * ice_shutdown_sq - shutdown the Control ATQ * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 4df9da359135..3b1d35365ef0 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -31,8 +31,8 @@ enum ice_ctl_q { ICE_CTL_Q_MAILBOX, }; -/* Control Queue timeout settings - max delay 250ms */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ +/* Control Queue timeout settings - max delay 1s */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ struct ice_ctl_q_ring { diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 62673e27af0e..fc9ff985a62b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2635,14 +2635,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", vsi->tx_rings[0]->count, new_tx_cnt); - tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL); if (!tx_rings) { err = -ENOMEM; goto done; } - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_txq(vsi, i) { /* clone ring and setup updated count */ tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_cnt; @@ -2667,14 +2667,14 @@ process_rx: netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", vsi->rx_rings[0]->count, new_rx_cnt); - rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); if (!rx_rings) { err = -ENOMEM; goto done; } - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_rxq(vsi, i) { /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_cnt; @@ -2712,7 +2712,7 @@ process_link: ice_down(vsi); if (tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_txq(vsi, i) { ice_free_tx_ring(vsi->tx_rings[i]); *vsi->tx_rings[i] = tx_rings[i]; } @@ -2720,7 +2720,7 @@ process_link: } if (rx_rings) { - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_rxq(vsi, i) { ice_free_rx_ring(vsi->rx_rings[i]); /* copy the real tail offset */ rx_rings[i].tail = vsi->rx_rings[i]->tail; @@ -2744,7 +2744,7 @@ process_link: free_tx: /* error cleanup if the Rx allocations failed after getting Tx */ if (tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) + ice_for_each_txq(vsi, i) ice_free_tx_ring(&tx_rings[i]); devm_kfree(&pf->pdev->dev, tx_rings); } diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index cbd53b586c36..6cfe8eb7f47d 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1535,10 +1535,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->ref_count), GFP_KERNEL); + if (!es->ref_count) + goto err; es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); - if (!es->ref_count) + if (!es->written) goto err; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2408f0de95fc..d0ccb7ad447b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2900,7 +2900,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_interrupt_unroll; + goto err_init_vsi_unroll; } /* Driver is mostly up */ @@ -2986,6 +2986,7 @@ err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: ice_clear_interrupt_scheme(pf); +err_init_vsi_unroll: devm_kfree(dev, pf->vsi); err_init_pf_unroll: ice_deinit_pf(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 1acdd43a2edd..7ff2e07f6d38 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1279,6 +1279,9 @@ ice_add_update_vsi_list(struct ice_hw *hw, ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id); + if (!m_entry->vsi_list_info) + return ICE_ERR_NO_MEMORY; + /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list */ @@ -2266,6 +2269,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && fm_entry->fltr_info.vsi_handle == vsi_handle) || (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + fm_entry->vsi_list_info && (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); } @@ -2338,14 +2342,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, return ICE_ERR_PARAM; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { - struct ice_fltr_info *fi; - - fi = &fm_entry->fltr_info; - if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) continue; status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, - vsi_list_head, fi); + vsi_list_head, + &fm_entry->fltr_info); if (status) return status; } @@ -2663,7 +2665,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, &remove_list_head); mutex_unlock(rule_lock); if (status) - return; + goto free_fltr_list; switch (lkup) { case ICE_SW_LKUP_MAC: @@ -2686,6 +2688,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, break; } +free_fltr_list: list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { list_del(&fm_entry->list_entry); devm_kfree(ice_hw_to_dev(hw), fm_entry); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 8959418776f6..f80933320fd3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev, u32 speed; u32 supported, advertising; - status = rd32(E1000_STATUS); + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ed7e667d7eb2..7a4e2b014dd6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4326,8 +4326,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) else mrqc |= E1000_MRQC_ENABLE_VMDQ; } else { - if (hw->mac.type != e1000_i211) - mrqc |= E1000_MRQC_ENABLE_RSS_MQ; + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; } igb_vmm_control(adapter); @@ -6194,9 +6193,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index ac98f1d96892..cbcb8611ab50 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1670,12 +1670,18 @@ static int igc_get_link_ksettings(struct net_device *netdev, cmd->base.phy_address = hw->phy.addr; /* advertising link modes */ - ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); /* set autoneg settings */ if (hw->mac.autoneg == 1) { @@ -1684,6 +1690,9 @@ static int igc_get_link_ksettings(struct net_device *netdev, Autoneg); } + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + switch (hw->fc.requested_mode) { case igc_fc_full: ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); @@ -1698,12 +1707,11 @@ static int igc_get_link_ksettings(struct net_device *netdev, Asym_Pause); break; default: - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); + break; } - status = rd32(IGC_STATUS); + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); if (status & IGC_STATUS_LU) { if (status & IGC_STATUS_SPEED_1000) { @@ -1786,6 +1794,12 @@ static int igc_set_link_ksettings(struct net_device *netdev, ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index c25f555aaf82..ed5d09c11c38 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, u16 *data) { struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; u32 attempts = 100000; u32 i, k, eewr = 0; - s32 ret_val = 0; /* A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. @@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || words == 0) { hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -IGC_ERR_NVM; goto out; } diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 5eeb4c8caf4a..08adf103e90b 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -647,7 +647,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) } out: - return 0; + return ret_val; } /** diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 24888676f69b..6b43e1c5b1c3 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2222,21 +2222,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) } /** - * igc_get_stats - Get System Network Statistics + * igc_get_stats64 - Get System Network Statistics * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer * * Returns the address of the device statistics structure. * The statistics are updated here and also from the timer callback. */ -static struct net_device_stats *igc_get_stats(struct net_device *netdev) +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct igc_adapter *adapter = netdev_priv(netdev); + spin_lock(&adapter->stats64_lock); if (!test_bit(__IGC_RESETTING, &adapter->state)) igc_update_stats(adapter); - - /* only return the current stats */ - return &netdev->stats; + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); } static netdev_features_t igc_fix_features(struct net_device *netdev, @@ -3984,7 +3986,7 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_start_xmit = igc_xmit_frame, .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, - .ndo_get_stats = igc_get_stats, + .ndo_get_stats64 = igc_get_stats64, .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, .ndo_features_check = igc_features_check, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 79ed5068bb08..1d710f80c11d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -200,7 +200,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index cb9e3fee75d9..ea2f2bc4967c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1007,7 +1007,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = txr_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + WRITE_ONCE(adapter->tx_ring[txr_idx], ring); /* update count and index */ txr_count--; @@ -1034,7 +1034,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, set_ring_xdp(ring); /* assign ring to adapter */ - adapter->xdp_ring[xdp_idx] = ring; + WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); /* update count and index */ xdp_count--; @@ -1078,7 +1078,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = rxr_idx; /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; + WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); /* update count and index */ rxr_count--; @@ -1107,13 +1107,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) ixgbe_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) - adapter->xdp_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); else - adapter->tx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); } ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); adapter->q_vector[v_idx] = NULL; #ifdef HAVE_NDO_BUSY_POLL diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1123fe5b12b1..79cdffdc7336 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2169,7 +2169,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, rx_buffer->page_offset ^= truesize; #else unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : SKB_DATA_ALIGN(size); rx_buffer->page_offset += truesize; @@ -8241,7 +8242,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); + + if (!rx_ring) + continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; @@ -8263,15 +8267,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); + if (!xdp_ring) + continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index ae195f8adff5..9f804e2aba35 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -219,7 +219,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); spin_unlock_irqrestore(&lp->lock, flags); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } } @@ -1113,7 +1113,7 @@ out: return rc; probe_err_register: - kfree(lp->td_ring); + kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); probe_err_td_ring: iounmap(lp->tx_dma_regs); probe_err_dma_tx: @@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev) iounmap(lp->eth_regs); iounmap(lp->rx_dma_regs); iounmap(lp->tx_dma_regs); + kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); unregister_netdev(bif->dev); free_netdev(bif->dev); diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 900affbdcc0e..4e44a39267eb 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget) } if (rx < budget) { - napi_complete(&ch->napi); - ltq_dma_enable_irq(&ch->dma); + if (napi_complete_done(&ch->napi, rx)) + ltq_dma_enable_irq(&ch->dma); } return rx; @@ -245,6 +245,7 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) int pkts = 0; int bytes = 0; + netif_tx_lock(net_dev); while (pkts < budget) { struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; @@ -268,9 +269,13 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) net_dev->stats.tx_bytes += bytes; netdev_completed_queue(ch->priv->net_dev, pkts, bytes); + netif_tx_unlock(net_dev); + if (netif_queue_stopped(net_dev)) + netif_wake_queue(net_dev); + if (pkts < budget) { - napi_complete(&ch->napi); - ltq_dma_enable_irq(&ch->dma); + if (napi_complete_done(&ch->napi, pkts)) + ltq_dma_enable_irq(&ch->dma); } return pkts; @@ -341,10 +346,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr) { struct xrx200_chan *ch = ptr; - ltq_dma_disable_irq(&ch->dma); - ltq_dma_ack_irq(&ch->dma); + if (napi_schedule_prep(&ch->napi)) { + __napi_schedule(&ch->napi); + ltq_dma_disable_irq(&ch->dma); + } - napi_schedule(&ch->napi); + ltq_dma_ack_irq(&ch->dma); return IRQ_HANDLED; } @@ -498,7 +505,7 @@ static int xrx200_probe(struct platform_device *pdev) /* setup NAPI */ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); - netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); + netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); platform_set_drvdata(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a10ae28ebc8a..7b0543056b10 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -104,9 +104,11 @@ #define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c +/* Only exists on Armada XP and Armada 370 */ #define MVNETA_SERDES_CFG 0x24A0 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 +#define MVNETA_HSGMII_SERDES_PROTO 0x1107 #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 @@ -3025,7 +3027,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, } /* Setup XPS mapping */ - if (txq_number > 1) + if (pp->neta_armada3700) + cpu = 0; + else if (txq_number > 1) cpu = txq->id % num_present_cpus(); else cpu = pp->rxq_def % num_present_cpus(); @@ -3164,26 +3168,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } -static int mvneta_comphy_init(struct mvneta_port *pp) +static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) { int ret; - if (!pp->comphy) - return 0; - - ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, - pp->phy_interface); + ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); if (ret) return ret; return phy_power_on(pp->comphy); } +static int mvneta_config_interface(struct mvneta_port *pp, + phy_interface_t interface) +{ + int ret = 0; + + if (pp->comphy) { + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + ret = mvneta_comphy_init(pp, interface); + } + } else { + switch (interface) { + case PHY_INTERFACE_MODE_QSGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_QSGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_SGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_2500BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_HSGMII_SERDES_PROTO); + break; + default: + break; + } + } + + pp->phy_interface = interface; + + return ret; +} + static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; - WARN_ON(mvneta_comphy_init(pp)); + WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3558,17 +3596,13 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, /* When at 2.5G, the link partner can send frames with shortened * preambles. */ - if (state->speed == SPEED_2500) + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; - if (pp->comphy && pp->phy_interface != state->interface && - (state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX)) { - pp->phy_interface = state->interface; - - WARN_ON(phy_power_off(pp->comphy)); - WARN_ON(mvneta_comphy_init(pp)); + if (pp->phy_interface != state->interface) { + if (pp->comphy) + WARN_ON(phy_power_off(pp->comphy)); + WARN_ON(mvneta_config_interface(pp, state->interface)); } if (new_ctrl0 != gmac_ctrl0) @@ -3732,6 +3766,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) node_online); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts + * are routed to CPU 0, so we don't need all the cpu-hotplug support + */ + if (pp->neta_armada3700) + return 0; spin_lock(&pp->lock); /* @@ -4469,12 +4508,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); - if (phy_mode == PHY_INTERFACE_MODE_QSGMII) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); - else if (phy_mode == PHY_INTERFACE_MODE_SGMII || - phy_interface_mode_is_8023z(phy_mode)) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); - else if (!phy_interface_mode_is_rgmii(phy_mode)) + if (phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(phy_mode) && + !phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; return 0; @@ -4661,7 +4698,7 @@ static int mvneta_probe(struct platform_device *pdev) if (err < 0) goto err_netdev; - err = mvneta_port_power_up(pp, phy_mode); + err = mvneta_port_power_up(pp, pp->phy_interface); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); goto err_netdev; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 35478cba2aa5..6122057d60c0 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1070,7 +1070,7 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); - val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } @@ -1422,6 +1422,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, struct mvpp2_ethtool_fs *efs; int ret; + if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) + return -EINVAL; + efs = port->rfs_rules[info->fs.location]; if (!efs) return -EINVAL; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index ef44c6979a31..491bcfd36ac2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1073,7 +1073,7 @@ static void mvpp2_interrupts_unmask(void *arg) u32 val; /* If the thread isn't used, don't do anything */ - if (smp_processor_id() > port->priv->nthreads) + if (smp_processor_id() >= port->priv->nthreads) return; val = MVPP2_CAUSE_MISC_SUM_MASK | @@ -1129,7 +1129,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); if (port->gop_id == 2) - val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; + val |= GENCONF_CTRL0_PORT0_RGMII; else if (port->gop_id == 3) val |= GENCONF_CTRL0_PORT1_RGMII_MII; regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); @@ -1541,7 +1541,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port) for (q = 0; q < port->ntxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, - MVPP22_CTRS_TX_CTR(port->id, i), + MVPP22_CTRS_TX_CTR(port->id, q), mvpp2_ethtool_txq_regs[i].offset); /* Rxqs are numbered from 0 from the user standpoint, but not from the @@ -1550,7 +1550,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port) for (q = 0; q < port->nrxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, - port->first_rxq + i, + port->first_rxq + q, mvpp2_ethtool_rxq_regs[i].offset); } @@ -2078,7 +2078,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg) int queue; /* If the thread isn't used, don't do anything */ - if (smp_processor_id() > port->priv->nthreads) + if (smp_processor_id() >= port->priv->nthreads) return; for (queue = 0; queue < port->ntxqs; queue++) { @@ -2161,17 +2161,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { - unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + unsigned int thread; u32 val; if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); - - put_cpu(); + /* PKT-coalescing registers are per-queue + per-thread */ + for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); + } } static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) @@ -3696,6 +3697,7 @@ static int mvpp2_open(struct net_device *dev) if (!valid) { netdev_err(port->dev, "invalid configuration: no dt or link IRQ"); + err = -ENOENT; goto err_free_irq; } @@ -4319,6 +4321,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, if (!mvpp22_rss_is_supported()) return -EOPNOTSUPP; + if (rss_context >= MVPP22_N_RSS_TABLES) + return -EINVAL; if (hfunc) *hfunc = ETH_RSS_HASH_CRC32; @@ -4541,7 +4545,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; unsigned int thread; - int queue, err; + int queue, err, val; /* Checks for hardware constraints */ if (port->first_rxq + port->nrxqs > @@ -4555,6 +4559,18 @@ static int mvpp2_port_init(struct mvpp2_port *port) mvpp2_egress_disable(port); mvpp2_port_disable(port); + if (mvpp2_is_xlg(port->phy_interface)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; + val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), @@ -4742,12 +4758,16 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, eth_hw_addr_random(dev); } +static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) +{ + return container_of(config, struct mvpp2_port, phylink_config); +} + static void mvpp2_phylink_validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state) { - struct mvpp2_port *port = container_of(config, struct mvpp2_port, - phylink_config); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; /* Invalid combinations */ @@ -4770,8 +4790,6 @@ static void mvpp2_phylink_validate(struct phylink_config *config, phylink_set(mask, Autoneg); phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); switch (state->interface) { case PHY_INTERFACE_MODE_10GKR: @@ -4874,8 +4892,7 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port, static int mvpp2_phylink_mac_link_state(struct phylink_config *config, struct phylink_link_state *state) { - struct mvpp2_port *port = container_of(config, struct mvpp2_port, - phylink_config); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); @@ -4893,8 +4910,7 @@ static int mvpp2_phylink_mac_link_state(struct phylink_config *config, static void mvpp2_mac_an_restart(struct phylink_config *config) { - struct mvpp2_port *port = container_of(config, struct mvpp2_port, - phylink_config); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, @@ -5082,13 +5098,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct net_device *dev = to_net_dev(config->dev); - struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); bool change_interface = port->phy_interface != state->interface; /* Check for invalid configuration */ if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) { - netdev_err(dev, "Invalid mode on %s\n", dev->name); + netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); return; } @@ -5125,8 +5140,7 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode, phy_interface_t interface, struct phy_device *phy) { - struct net_device *dev = to_net_dev(config->dev); - struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val; if (!phylink_autoneg_inband(mode)) { @@ -5147,14 +5161,13 @@ static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode, mvpp2_egress_enable(port); mvpp2_ingress_enable(port); - netif_tx_wake_all_queues(dev); + netif_tx_wake_all_queues(port->dev); } static void mvpp2_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct net_device *dev = to_net_dev(config->dev); - struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val; if (!phylink_autoneg_inband(mode)) { @@ -5171,7 +5184,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config, } } - netif_tx_stop_all_queues(dev); + netif_tx_stop_all_queues(port->dev); mvpp2_egress_disable(port); mvpp2_ingress_disable(port); @@ -5923,8 +5936,8 @@ static int mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); struct fwnode_handle *fwnode = pdev->dev.fwnode; + int i = 0, poolnum = MVPP2_BM_POOLS_NUM; struct fwnode_handle *port_fwnode; - int i = 0; mvpp2_dbgfs_cleanup(priv); @@ -5938,7 +5951,10 @@ static int mvpp2_remove(struct platform_device *pdev) destroy_workqueue(priv->stats_queue); - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + if (priv->percpu_pools) + poolnum = mvpp2_get_nrxqs(priv) * 2; + + for (i = 0; i < poolnum; i++) { struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 5692c6087bbb..dd590086fe6a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) /* Clear entry invalidation bit */ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); - /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); + return 0; } @@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, return -EINVAL; } +/* Drop flow control pause frames */ +static void mvpp2_prs_drop_fc(struct mvpp2 *priv) +{ + unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 }; + struct mvpp2_prs_entry pe; + unsigned int len; + + memset(&pe, 0, sizeof(pe)); + + /* For all ports - drop flow control frames */ + pe.index = MVPP2_PE_FC_DROP; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Set match on DA */ + len = ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); +} + /* Enable/disable dropping all mac da's */ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) { @@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); /* Create dummy entries for drop all and promiscuous modes */ + mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); @@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IPv6 header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + /* Jump to DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index e22f6c85d380..4b68dd374733 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -129,7 +129,7 @@ #define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) -/* reserved */ +#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4) #define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) #define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 6d55e3d0b7ea..54e9f6dc24ea 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -725,8 +725,10 @@ static int cgx_lmac_init(struct cgx *cgx) if (!lmac) return -ENOMEM; lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); - if (!lmac->name) - return -ENOMEM; + if (!lmac->name) { + err = -ENOMEM; + goto err_lmac_free; + } sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); lmac->lmac_id = i; lmac->cgx = cgx; @@ -737,7 +739,7 @@ static int cgx_lmac_init(struct cgx *cgx) CGX_LMAC_FWI + i * 9), cgx_fwi_event_handler, 0, lmac->name, lmac); if (err) - return err; + goto err_irq; /* Enable interrupt */ cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, @@ -748,6 +750,12 @@ static int cgx_lmac_init(struct cgx *cgx) } return cgx_lmac_verify_fwi_version(cgx); + +err_irq: + kfree(lmac->name); +err_lmac_free: + kfree(lmac); + return err; } static int cgx_lmac_exit(struct cgx *cgx) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 413c3f254cf8..c881a573da66 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -43,7 +43,7 @@ struct qmem { void *base; dma_addr_t iova; int alloc_sz; - u8 entry_sz; + u16 entry_sz; u8 align; u32 qsize; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index e581091c09c4..02b4620f7368 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1980,8 +1980,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu) INTR_MASK(rvu->hw->total_pfs) & ~1ULL); for (irq = 0; irq < rvu->num_vec; irq++) { - if (rvu->irq_allocated[irq]) + if (rvu->irq_allocated[irq]) { free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + rvu->irq_allocated[irq] = false; + } } pci_free_irq_vectors(rvu->pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 15f70273e29c..d82a519a0cd9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -1967,10 +1967,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; + entry = index + 1; if (mcam->entry2cntr_map[index] != req->cntr) continue; - entry = index + 1; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 51b77c2de400..235bbb2940a8 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1554,8 +1554,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); - unregister_netdev(dev); cancel_work_sync(&pep->tx_timeout_task); + unregister_netdev(dev); free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 5f56ee83e3b1..df7c23cd3360 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -203,7 +203,7 @@ io_error: static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { - u16 v; + u16 v = 0; __gm_phy_read(hw, port, reg, &v); return v; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 703adb96429e..d01b3a1b40f4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -65,6 +65,17 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg) return __raw_readl(eth->base + reg); } +u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg) +{ + u32 val; + + val = mtk_r32(eth, reg); + val &= ~mask; + val |= set; + mtk_w32(eth, val, reg); + return reg; +} + static int mtk_mdio_busy_wait(struct mtk_eth *eth) { unsigned long t_start = jiffies; @@ -160,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, return 0; } -static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) +static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface, int speed) { u32 val; int ret; + if (interface == PHY_INTERFACE_MODE_TRGMII) { + mtk_w32(eth, TRGMII_MODE, INTF_MODE); + val = 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + return; + } + val = (speed == SPEED_1000) ? INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; mtk_w32(eth, val, INTF_MODE); @@ -193,7 +214,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, struct mtk_mac *mac = container_of(config, struct mtk_mac, phylink_config); struct mtk_eth *eth = mac->hw; - u32 mcr_cur, mcr_new, sid; + u32 mcr_cur, mcr_new, sid, i; int val, ge_mode, err; /* MT76x8 has no hardware settings between for the MAC */ @@ -251,10 +272,20 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, state->interface)) goto err_phy; } else { - if (state->interface != - PHY_INTERFACE_MODE_TRGMII) - mtk_gmac0_rgmii_adjust(mac->hw, - state->speed); + mtk_gmac0_rgmii_adjust(mac->hw, + state->interface, + state->speed); + + /* mt7623_pad_clk_setup */ + for (i = 0 ; i < NUM_TRGMII_CTRL; i++) + mtk_w32(mac->hw, + TD_DM_DRVP(8) | TD_DM_DRVN(8), + TRGMII_TD_ODT(i)); + + /* Assert/release MT7623 RXC reset */ + mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL, + TRGMII_RCK_CTRL); + mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL); } } @@ -2847,6 +2878,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->irq = eth->irq[0]; eth->netdev[id]->dev.of_node = np; + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + return 0; free_netdev: diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 76bd12cb8150..1e787f3577aa 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -350,10 +350,13 @@ #define DQSI0(x) ((x << 0) & GENMASK(6, 0)) #define DQSI1(x) ((x << 8) & GENMASK(14, 8)) #define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) +#define RXC_RST BIT(31) #define RXC_DQSISEL BIT(30) #define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16)) #define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2) +#define NUM_TRGMII_CTRL 5 + /* TRGMII RXC control register */ #define TRGMII_TCK_CTRL 0x10340 #define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) @@ -361,6 +364,11 @@ #define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2) #define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2)) +/* TRGMII TX Drive Strength */ +#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i)) +#define TD_DM_DRVP(x) ((x) & 0xf) +#define TD_DM_DRVN(x) (((x) & 0xf) << 4) + /* TRGMII Interface mode register */ #define INTF_MODE 0x10390 #define TRGMII_INTF_DIS BIT(0) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a1202e53710c..5582fba2f582 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -47,7 +47,7 @@ #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) -static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) +int mlx4_en_moderation_update(struct mlx4_en_priv *priv) { int i, t; int err = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 70fd246840e2..13e46e296194 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1383,8 +1383,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } priv->port_stats.tx_timeout++; - en_dbg(DRV, priv, "Scheduling watchdog\n"); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) { + en_dbg(DRV, priv, "Scheduling port restart\n"); + queue_work(mdev->workqueue, &priv->restart_task); + } } @@ -1738,6 +1740,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_en_deactivate_cq(priv, cq); goto tx_err; } + clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state); if (t != TX_XDP) { tx_ring->tx_queue = netdev_get_tx_queue(dev, i); tx_ring->recycle_ring = NULL; @@ -1834,6 +1837,7 @@ int mlx4_en_start_port(struct net_device *dev) local_bh_enable(); } + clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state); netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -2004,7 +2008,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) static void mlx4_en_restart(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - watchdog_task); + restart_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; @@ -2295,11 +2299,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, lockdep_is_held(&priv->mdev->state_lock)); if (xdp_prog && carry_xdp_prog) { - xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num); - if (IS_ERR(xdp_prog)) { - mlx4_en_free_resources(tmp); - return PTR_ERR(xdp_prog); - } + bpf_prog_add(xdp_prog, tmp->rx_ring_num); for (i = 0; i < tmp->rx_ring_num; i++) rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog, xdp_prog); @@ -2386,7 +2386,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) { mutex_lock(&mdev->state_lock); if (!mdev->device_up) { - /* NIC is probably restarting - let watchdog task reset + /* NIC is probably restarting - let restart task reset * the port */ en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { @@ -2395,7 +2395,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (err) { en_err(priv, "Failed restarting port:%d\n", priv->port); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, + &priv->state)) + queue_work(mdev->workqueue, &priv->restart_task); } } mutex_unlock(&mdev->state_lock); @@ -2791,11 +2793,9 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) * program for a new one. */ if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { - if (prog) { - prog = bpf_prog_add(prog, priv->rx_ring_num - 1); - if (IS_ERR(prog)) - return PTR_ERR(prog); - } + if (prog) + bpf_prog_add(prog, priv->rx_ring_num - 1); + mutex_lock(&mdev->state_lock); for (i = 0; i < priv->rx_ring_num; i++) { old_prog = rcu_dereference_protected( @@ -2816,13 +2816,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) if (!tmp) return -ENOMEM; - if (prog) { - prog = bpf_prog_add(prog, priv->rx_ring_num - 1); - if (IS_ERR(prog)) { - err = PTR_ERR(prog); - goto out; - } - } + if (prog) + bpf_prog_add(prog, priv->rx_ring_num - 1); mutex_lock(&mdev->state_lock); memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); @@ -2865,13 +2860,13 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) if (err) { en_err(priv, "Failed starting port %d for XDP change\n", priv->port); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) + queue_work(mdev->workqueue, &priv->restart_task); } } unlock_out: mutex_unlock(&mdev->state_lock); -out: kfree(tmp); return err; } @@ -3263,7 +3258,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->watchdog_task, mlx4_en_restart); + INIT_WORK(&priv->restart_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); @@ -3650,6 +3645,8 @@ int mlx4_en_reset_config(struct net_device *dev, en_err(priv, "Failed starting port\n"); } + if (!err) + err = mlx4_en_moderation_update(priv); out: mutex_unlock(&mdev->state_lock); kfree(tmp); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index db3552f2d087..f9797e503884 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -942,6 +942,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) bool clean_complete = true; int done; + if (!budget) + return 0; + if (priv->tx_ring_num[TX_XDP]) { xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; if (xdp_tx_cq->xdp_busy) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4d5ca302c067..605c079d4841 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -43,6 +43,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/moduleparam.h> +#include <linux/indirect_call_wrapper.h> #include "mlx4_en.h" @@ -261,6 +262,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, } } +INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u64 timestamp, + int napi_mode)); u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, @@ -329,6 +334,11 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, return tx_info->nr_txbb; } +INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u64 timestamp, + int napi_mode)); + u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u64 timestamp, @@ -340,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, .dma = tx_info->map0_dma, }; - if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { + if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { dma_unmap_page(priv->ddev, tx_info->map0_dma, PAGE_SIZE, priv->dma_dir); put_page(tx_info->page); @@ -382,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) return cnt; } +static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe, + u16 cqe_index, struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_info *tx_info; + struct mlx4_en_tx_desc *tx_desc; + u16 wqe_index; + int desc_size; + + en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n", + ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe), + false); + + wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask; + tx_info = &ring->tx_info[wqe_index]; + desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE; + en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn, + wqe_index, desc_size); + tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false); + + if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) + return; + + en_err(priv, "Scheduling port restart\n"); + queue_work(mdev->workqueue, &priv->restart_task); +} + bool mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int napi_budget) { @@ -428,13 +467,10 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == - MLX4_CQE_OPCODE_ERROR)) { - struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; - - en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", - cqe_err->vendor_err_syndrome, - cqe_err->syndrome); - } + MLX4_CQE_OPCODE_ERROR)) + if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state)) + mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index, + ring); /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; @@ -449,7 +485,9 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ - last_nr_txbb = ring->free_tx_desc( + last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc, + mlx4_en_free_tx_desc, + mlx4_en_recycle_tx_desc, priv, ring, ring_index, timestamp, napi_budget); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 6e501af0e532..f6cfec81ccc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1864,8 +1864,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) #define INIT_HCA_MCAST_OFFSET 0x0c0 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) -#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) -#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) +#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13) +#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17) #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 @@ -1873,7 +1873,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_DRIVER_VERSION_SZ 0x40 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) -#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) +#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13) #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) @@ -2734,7 +2734,7 @@ void mlx4_opreq_action(struct work_struct *work) if (err) { mlx4_err(dev, "Failed to retrieve required operation: %d\n", err); - return; + goto out; } MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 650ae08c71de..8f020f26ebf5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -182,8 +182,8 @@ struct mlx4_init_hca_param { u64 cmpt_base; u64 mtt_base; u64 global_caps; - u16 log_mc_entry_sz; - u16 log_mc_hash_sz; + u8 log_mc_entry_sz; + u8 log_mc_hash_sz; u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */ u8 log_num_qps; u8 log_num_srqs; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 58546231416b..53829f4c3335 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2552,6 +2552,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) if (!err || err == -ENOSPC) { priv->def_counter[port] = idx; + err = 0; } else if (err == -ENOENT) { err = 0; continue; @@ -2602,7 +2603,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) *idx = get_param_l(&out_param); - + if (WARN_ON(err == -ENOSPC)) + err = -EINVAL; return err; } return __mlx4_counter_alloc(dev, idx); @@ -4354,12 +4356,14 @@ end: static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 630f15977f09..17a5bd4c68b2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -271,6 +271,10 @@ struct mlx4_en_page_cache { } buf[MLX4_EN_CACHE_SIZE]; }; +enum { + MLX4_EN_TX_RING_STATE_RECOVERING, +}; + struct mlx4_en_priv; struct mlx4_en_tx_ring { @@ -317,6 +321,7 @@ struct mlx4_en_tx_ring { * Only queue_stopped might be used if BQL is not properly working. */ unsigned long queue_stopped; + unsigned long state; struct mlx4_hwq_resources sp_wqres; struct mlx4_qp sp_qp; struct mlx4_qp_context sp_context; @@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap { struct mutex mutex; /* for mutual access to stats bitmap */ }; +enum { + MLX4_EN_STATE_FLAG_RESTARTING, +}; + struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -595,7 +604,7 @@ struct mlx4_en_priv { struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; - struct work_struct watchdog_task; + struct work_struct restart_task; struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; @@ -643,6 +652,7 @@ struct mlx4_en_priv { u32 pflags; u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; u8 rss_hash_fn; + unsigned long state; }; enum mlx4_en_wol { @@ -787,6 +797,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); #define DEV_FEATURE_CHANGED(dev, new_features, feature) \ ((dev->features & feature) ^ (new_features & feature)) +int mlx4_en_moderation_update(struct mlx4_en_priv *priv); int mlx4_en_reset_config(struct net_device *dev, struct hwtstamp_config ts_config, netdev_features_t new_features); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 1a11bc0e1612..cfa0bba3940f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!buddy->bits[i]) goto err_out_free; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1187ef1375e2..cb341372d5a3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule if (!fs_rule->mirr_mbox) { mlx4_err(dev, "rule mirroring mailbox is null\n"); + mlx4_free_cmd_mailbox(dev, mailbox); return -EINVAL; } memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index ea934cd02448..76547d35cd0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -69,12 +69,10 @@ enum { MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; -static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, - struct mlx5_cmd_msg *in, - struct mlx5_cmd_msg *out, - void *uout, int uout_size, - mlx5_cmd_cbk_t cbk, - void *context, int page_queue) +static struct mlx5_cmd_work_ent * +cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, void *uout, int uout_size, + mlx5_cmd_cbk_t cbk, void *context, int page_queue) { gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; struct mlx5_cmd_work_ent *ent; @@ -83,6 +81,7 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, if (!ent) return ERR_PTR(-ENOMEM); + ent->idx = -EINVAL; ent->in = in; ent->out = out; ent->uout = uout; @@ -91,10 +90,16 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, ent->context = context; ent->cmd = cmd; ent->page_queue = page_queue; + refcount_set(&ent->refcnt, 1); return ent; } +static void cmd_free_ent(struct mlx5_cmd_work_ent *ent) +{ + kfree(ent); +} + static u8 alloc_token(struct mlx5_cmd *cmd) { u8 token; @@ -109,7 +114,7 @@ static u8 alloc_token(struct mlx5_cmd *cmd) return token; } -static int alloc_ent(struct mlx5_cmd *cmd) +static int cmd_alloc_index(struct mlx5_cmd *cmd) { unsigned long flags; int ret; @@ -123,7 +128,7 @@ static int alloc_ent(struct mlx5_cmd *cmd) return ret < cmd->max_reg_cmds ? ret : -ENOMEM; } -static void free_ent(struct mlx5_cmd *cmd, int idx) +static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { unsigned long flags; @@ -132,6 +137,22 @@ static void free_ent(struct mlx5_cmd *cmd, int idx) spin_unlock_irqrestore(&cmd->alloc_lock, flags); } +static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) +{ + refcount_inc(&ent->refcnt); +} + +static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) +{ + if (!refcount_dec_and_test(&ent->refcnt)) + return; + + if (ent->idx >= 0) + cmd_free_index(ent->cmd, ent->idx); + + cmd_free_ent(ent); +} + static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) { return cmd->cmd_buf + (idx << cmd->log_stride); @@ -219,11 +240,6 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) ent->ret = -ETIMEDOUT; } -static void free_cmd(struct mlx5_cmd_work_ent *ent) -{ - kfree(ent); -} - static int verify_signature(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd_mailbox *next = ent->out->next; @@ -837,17 +853,55 @@ static void cb_timeout_handler(struct work_struct *work) struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); + mlx5_cmd_eq_recover(dev); + + /* Maybe got handled by eq recover ? */ + if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + goto out; /* phew, already handled */ + } + ent->ret = -ETIMEDOUT; - mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", - mlx5_command_str(msg_to_opcode(ent->in)), - msg_to_opcode(ent->in)); + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", + ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + +out: + cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ } static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); +static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) +{ + if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) + return true; + + return cmd->allowed_opcode == opcode; +} + +static int cmd_alloc_index_retry(struct mlx5_cmd *cmd) +{ + unsigned long alloc_end = jiffies + msecs_to_jiffies(1000); + int idx; + +retry: + idx = cmd_alloc_index(cmd); + if (idx < 0 && time_before(jiffies, alloc_end)) { + /* Index allocation can fail on heavy load of commands. This is a temporary + * situation as the current command already holds the semaphore, meaning that + * another command completion is being handled and it is expected to release + * the entry index soon. + */ + cpu_relax(); + goto retry; + } + return idx; +} + static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); @@ -861,17 +915,18 @@ static void cmd_work_handler(struct work_struct *work) int alloc_ret; int cmd_mode; + complete(&ent->handling); sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { - alloc_ret = alloc_ent(cmd); + alloc_ret = cmd_alloc_index_retry(cmd); if (alloc_ret < 0) { mlx5_core_err(dev, "failed to allocate command entry\n"); if (ent->callback) { ent->callback(-EAGAIN, ent->context); mlx5_free_cmd_msg(dev, ent->out); free_msg(dev, ent->in); - free_cmd(ent); + cmd_ent_put(ent); } else { ent->ret = -EAGAIN; complete(&ent->done); @@ -888,7 +943,6 @@ static void cmd_work_handler(struct work_struct *work) } cmd->ent_arr[ent->idx] = ent; - set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -908,12 +962,14 @@ static void cmd_work_handler(struct work_struct *work) ent->ts1 = ktime_get_ns(); cmd_mode = cmd->mode; - if (ent->callback) - schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout)) + cmd_ent_get(ent); + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); /* Skip sending command to fw if internal error */ if (pci_channel_offline(dev->pdev) || - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + !opcode_allowed(&dev->cmd, ent->op)) { u8 status = 0; u32 drv_synd; @@ -925,6 +981,7 @@ static void cmd_work_handler(struct work_struct *work) return; } + cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -968,25 +1025,62 @@ static const char *deliv_status_to_str(u8 status) } } +enum { + MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000, +}; + +static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, + struct mlx5_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC); + + mlx5_cmd_eq_recover(dev); + + /* Re-wait on the ent->done after executing the recovery flow. If the + * recovery flow (or any other recovery flow running simultaneously) + * has recovered an EQE, it should cause the entry to be completed by + * the command interface. + */ + if (wait_for_completion_timeout(&ent->done, timeout)) { + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + return; + } + + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + + ent->ret = -ETIMEDOUT; + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); +} + static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) { unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); struct mlx5_cmd *cmd = &dev->cmd; int err; - if (cmd->mode == CMD_MODE_POLLING || ent->polling) { - wait_for_completion(&ent->done); - } else if (!wait_for_completion_timeout(&ent->done, timeout)) { - ent->ret = -ETIMEDOUT; - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + if (!wait_for_completion_timeout(&ent->handling, timeout) && + cancel_work_sync(&ent->work)) { + ent->ret = -ECANCELED; + goto out_err; } + if (cmd->mode == CMD_MODE_POLLING || ent->polling) + wait_for_completion(&ent->done); + else if (!wait_for_completion_timeout(&ent->done, timeout)) + wait_func_handle_exec_timeout(dev, ent); +out_err: err = ent->ret; if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + } else if (err == -ECANCELED) { + mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -1014,14 +1108,20 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (callback && page_queue) return -EINVAL; - ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, - page_queue); + ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, + callback, context, page_queue); if (IS_ERR(ent)) return PTR_ERR(ent); + /* put for this ent is when consumed, depending on the use case + * 1) (!callback) blocking flow: by caller after wait_func completes + * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled + */ + ent->token = token; ent->polling = force_polling; + init_completion(&ent->handling); if (!callback) init_completion(&ent->done); @@ -1036,11 +1136,11 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, } if (callback) - goto out; + goto out; /* mlx5_cmd_comp_handler() will put(ent) */ err = wait_func(dev, ent); - if (err == -ETIMEDOUT) - goto out; + if (err == -ETIMEDOUT || err == -ECANCELED) + goto out_free; ds = ent->ts2 - ent->ts1; op = MLX5_GET(mbox_in, in->first.data, opcode); @@ -1057,7 +1157,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, *status = ent->status; out_free: - free_cmd(ent); + cmd_ent_put(ent); out: return err; } @@ -1387,6 +1487,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev) mlx5_cmdif_debugfs_init(dev); } +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + down(&cmd->pages_sem); + + cmd->allowed_opcode = opcode; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) { struct mlx5_cmd *cmd = &dev->cmd; @@ -1472,14 +1588,19 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force if (!forced) { mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", ent->idx); - free_ent(cmd, ent->idx); - free_cmd(ent); + cmd_ent_put(ent); } continue; } - if (ent->callback) - cancel_delayed_work(&ent->cb_timeout_work); + if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work)) + cmd_ent_put(ent); /* timeout work was canceled */ + + if (!forced || /* Real FW completion */ + pci_channel_offline(dev->pdev) || /* FW is inaccessible */ + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + cmd_ent_put(ent); + if (ent->page_queue) sem = &cmd->pages_sem; else @@ -1501,10 +1622,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force ent->ret, deliv_status_to_str(ent->status), ent->status); } - /* only real completion will free the entry slot */ - if (!forced) - free_ent(cmd, ent->idx); - if (ent->callback) { ds = ent->ts2 - ent->ts1; if (ent->op < ARRAY_SIZE(cmd->stats)) { @@ -1532,10 +1649,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force free_msg(dev, ent->in); err = err ? err : ent->status; - if (!forced) - free_cmd(ent); + /* final consumer is done, release ent */ + cmd_ent_put(ent); callback(err, context); } else { + /* release wait_func() so mlx5_cmd_invoke() + * can make the final ent_put() + */ complete(&ent->done); } up(sem); @@ -1545,8 +1665,11 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) { + struct mlx5_cmd *cmd = &dev->cmd; + unsigned long bitmask; unsigned long flags; u64 vector; + int i; /* wait for pending handlers to complete */ mlx5_eq_synchronize_cmd_irq(dev); @@ -1555,11 +1678,20 @@ void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) if (!vector) goto no_trig; + bitmask = vector; + /* we must increment the allocated entries refcount before triggering the completions + * to guarantee pending commands will not get freed in the meanwhile. + * For that reason, it also has to be done inside the alloc_lock. + */ + for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) + cmd_ent_get(cmd->ent_arr[i]); vector |= MLX5_TRIGGERED_CMD_COMP; spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%llx\n", vector); mlx5_cmd_comp_handler(dev, vector, true); + for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) + cmd_ent_put(cmd->ent_arr[i]); return; no_trig: @@ -1663,12 +1795,13 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int err; u8 status = 0; u32 drv_synd; + u16 opcode; u8 token; + opcode = MLX5_GET(mbox_in, in, opcode); if (pci_channel_offline(dev->pdev) || - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - u16 opcode = MLX5_GET(mbox_in, in, opcode); - + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + !opcode_allowed(&dev->cmd, opcode)) { err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); MLX5_SET(mbox_out, out, status, status); MLX5_SET(mbox_out, out, syndrome, drv_synd); @@ -1970,6 +2103,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); cmd->mode = CMD_MODE_POLLING; + cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; create_msg_cache(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 381925c90d94..d63ce3feb65c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -23,7 +23,10 @@ static int mlx5_devlink_flash_update(struct devlink *devlink, if (err) return err; - return mlx5_firmware_flash(dev, fw, extack); + err = mlx5_firmware_flash(dev, fw, extack); + release_firmware(fw); + + return err; } static u8 mlx5_fw_ver_major(u32 version) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 94d7b69a95c7..eb2e57ff08a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) return NULL; } - tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL); if (!tracer) return ERR_PTR(-ENOMEM); @@ -982,7 +982,7 @@ destroy_workqueue: tracer->dev = NULL; destroy_workqueue(tracer->work_queue); free_tracer: - kfree(tracer); + kvfree(tracer); return ERR_PTR(err); } @@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) mlx5_fw_tracer_destroy_log_buf(tracer); flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); - kfree(tracer); + kvfree(tracer); } static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 55ceabf077b2..b5c8afe8cd10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -92,7 +92,12 @@ struct page_pool; #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) -#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between + * WQEs, This page will absorb write overflow by the hardware, when + * receiving packets larger than MTU. These oversize packets are + * dropped by the driver at a later stage. + */ +#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8)) #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ @@ -367,6 +372,7 @@ enum { MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, + MLX5E_SQ_STATE_PENDING_XSK_TX, }; struct mlx5e_sq_wqe_info { @@ -693,6 +699,7 @@ struct mlx5e_rq { u32 rqn; struct mlx5_core_dev *mdev; struct mlx5_core_mkey umr_mkey; + struct mlx5e_dma_info wqe_overflow; /* XDP read-mostly */ struct xdp_rxq_info xdp_rxq; @@ -948,7 +955,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); -void mlx5e_poll_ico_cq(struct mlx5e_cq *cq); +int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); @@ -1035,14 +1042,15 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs); void mlx5e_close_channels(struct mlx5e_channels *chs); -/* Function pointer to be used to modify WH settings while +/* Function pointer to be used to modify HW or kernel settings while * switching channels */ -typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); +typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv); int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify); + mlx5e_fp_preactivate preactivate); +int mlx5e_num_channels_changed(struct mlx5e_priv *priv); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); @@ -1101,7 +1109,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index fce6eccdcf8b..fa81a97f6ba9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_400GAUI_8] = 400000, }; +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev) +{ + struct mlx5e_port_eth_proto eproto; + int err; + + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet)) + return true; + + err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto); + if (err) + return false; + + return !!eproto.cap; +} + static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, const u32 **arr, u32 *size, bool force_legacy) { - bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev); *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : ARRAY_SIZE(mlx5e_link_speed); @@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) bool ext; int err; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) goto out; @@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) int err; int i; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index 4a7f4497692b..e196888f7056 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, bool force_legacy); - +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index 951ea26d96bc..6d27f69cc7fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv, option_key = (struct geneve_opt *)&enc_opts.key->data[0]; option_mask = (struct geneve_opt *)&enc_opts.mask->data[0]; + if (option_mask->opt_class == 0 && option_mask->type == 0 && + !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4)) + return 0; + if (option_key->length > max_tlv_option_data_len) { NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE options: unsupported option len"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index c28cbae42331..2c80205dc939 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -152,6 +152,10 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) mlx5e_close_cq(&c->xskicosq.cq); mlx5e_close_xdpsq(&c->xsksq); mlx5e_close_cq(&c->xsksq.cq); + + memset(&c->xskrq, 0, sizeof(c->xskrq)); + memset(&c->xsksq, 0, sizeof(c->xsksq)); + memset(&c->xskicosq, 0, sizeof(c->xskicosq)); } void mlx5e_activate_xsk(struct mlx5e_channel *c) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index fe2d596cb361..3bcdb5b2fc20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) return 0; + if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state)) + return 0; + spin_lock(&c->xskicosq_lock); mlx5e_trigger_irq(&c->xskicosq); spin_unlock(&c->xskicosq_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 46725cd743a3..7d1985fa0d4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -69,8 +69,8 @@ static void mlx5e_ktls_del(struct net_device *netdev, struct mlx5e_ktls_offload_context_tx *tx_priv = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); - mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); + mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); kvfree(tx_priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c index 01468ec27446..b949b9a7538b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c @@ -35,7 +35,6 @@ #include <net/sock.h> #include "en.h" -#include "accel/tls.h" #include "fpga/sdk.h" #include "en_accel/tls.h" @@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { #define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) +static bool is_tls_atomic_stats(struct mlx5e_priv *priv) +{ + return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev); +} + int mlx5e_tls_get_count(struct mlx5e_priv *priv) { - if (!priv->tls) + if (!is_tls_atomic_stats(priv)) return 0; return NUM_TLS_SW_COUNTERS; @@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { unsigned int i, idx = 0; - if (!priv->tls) + if (!is_tls_atomic_stats(priv)) return 0; for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) @@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { int i, idx = 0; - if (!priv->tls) + if (!is_tls_atomic_stats(priv)) return 0; for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c6776f308d5e..e3dc2cbdc9f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, struct ptys2ethtool_config **arr, u32 *size) { - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : @@ -445,9 +445,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { *cur_params = new_channels.params; - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); + mlx5e_num_channels_changed(priv); goto out; } @@ -455,12 +453,8 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, if (arfs_enabled) mlx5e_arfs_disable(priv); - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); - /* Switch to new channels, set new parameters and close old ones */ - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_num_channels_changed); if (arfs_enabled) { int err2 = mlx5e_arfs_enable(priv); @@ -706,11 +700,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev, return 0; } -static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, - u32 eth_proto_cap, - u8 connector_type, bool ext) +static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev, + struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap, u8 connector_type) { - if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { + if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) @@ -842,9 +836,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = { [MLX5E_PORT_OTHER] = PORT_OTHER, }; -static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext) +static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type) { - if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) return ptys2connector_type[connector_type]; if (eth_proto & @@ -877,7 +871,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } @@ -906,7 +900,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, __func__, err); goto err_query_regs; } - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_capability); eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, @@ -943,11 +937,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - - link_ksettings->base.port = get_connector_port(eth_proto_oper, - connector_type, ext); - ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, - connector_type, ext); + connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ? + connector_type : MLX5E_PORT_UNKNOWN; + link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type); + ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin, + connector_type); get_lp_advertising(mdev, eth_proto_lp, link_ksettings); if (an_status == MLX5_AN_COMPLETE) @@ -982,6 +976,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); } +static int mlx5e_speed_validate(struct net_device *netdev, bool ext, + const unsigned long link_modes, u8 autoneg) +{ + /* Extended link-mode has no speed limitations. */ + if (ext) + return 0; + + if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && + autoneg != AUTONEG_ENABLE) { + netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n", + __func__); + return -EINVAL; + } + return 0; +} + static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0; @@ -1058,7 +1068,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, autoneg = link_ksettings->base.autoneg; speed = link_ksettings->base.speed; - ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext_supported = mlx5e_ptys_ext_supported(mdev); ext = ext_requested(autoneg, adver, ext_supported); if (!ext_supported && ext) return -EOPNOTSUPP; @@ -1074,13 +1084,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : mlx5e_port_speed2linkmodes(mdev, speed, !ext); - if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && - autoneg != AUTONEG_ENABLE) { - netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", - __func__); - err = -EINVAL; + err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg); + if (err) goto out; - } link_modes = link_modes & eproto.cap; if (!link_modes) { @@ -1554,6 +1560,10 @@ static int mlx5e_set_fecparam(struct net_device *netdev, int mode; int err; + if (bitmap_weight((unsigned long *)&fecparam->fec, + ETHTOOL_FEC_BASER_BIT + 1) > 1) + return -EOPNOTSUPP; + for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) { if (!(pplm_fec_2_ethtool[mode] & fecparam->fec)) continue; @@ -1801,6 +1811,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + int err; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; @@ -1810,7 +1821,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return -EINVAL; } - mlx5e_modify_rx_cqe_compression_locked(priv, enable); + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); + if (err) + return err; + priv->channels.params.rx_cqe_compress_def = enable; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 73d3dc07331f..c4ac7a9968d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -217,6 +217,9 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; } + if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type)) + return 0; + *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(*rule_p)) { @@ -397,8 +400,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); } @@ -415,8 +417,12 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); + + /* must be called after DESTROY bit is set and + * set_rx_mode is called and flushed + */ + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_del_any_vid_rules(priv); } @@ -921,6 +927,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, in = kvzalloc(inlen, GFP_KERNEL); if (!in) { kfree(ft->g); + ft->g = NULL; return -ENOMEM; } @@ -1061,6 +1068,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) in = kvzalloc(inlen, GFP_KERNEL); if (!in) { kfree(ft->g); + ft->g = NULL; return -ENOMEM; } @@ -1340,6 +1348,7 @@ err_destroy_groups: ft->g[ft->num_groups] = NULL; mlx5e_destroy_groups(ft); kvfree(in); + kfree(ft->g); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ee7c753e9ea0..e9bae659b742 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -266,12 +266,17 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, u64 npages, u8 page_shift, - struct mlx5_core_mkey *umr_mkey) + struct mlx5_core_mkey *umr_mkey, + dma_addr_t filler_addr) { - int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + struct mlx5_mtt *mtt; + int inlen; void *mkc; u32 *in; int err; + int i; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) @@ -291,6 +296,18 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, translations_octword_size, MLX5_MTT_OCTW(npages)); MLX5_SET(mkc, mkc, log_page_size, page_shift); + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + MLX5_MTT_OCTW(npages)); + + /* Initialize the mkey with all MTTs pointing to a default + * page (filler_addr). When the channels are activated, UMR + * WQEs will redirect the RX WQEs to the actual memory from + * the RQ's pool, while the gaps (wqe_overflow) remain mapped + * to the default page. + */ + mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0 ; i < npages ; i++) + mtt[i].ptag = cpu_to_be64(filler_addr); err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); @@ -302,7 +319,8 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq { u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); - return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); + return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey, + rq->wqe_overflow.addr); } static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) @@ -370,6 +388,28 @@ static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work) mlx5e_reporter_rq_cqe_err(rq); } +static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq) +{ + rq->wqe_overflow.page = alloc_page(GFP_KERNEL); + if (!rq->wqe_overflow.page) + return -ENOMEM; + + rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0, + PAGE_SIZE, rq->buff.map_dir); + if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) { + __free_page(rq->wqe_overflow.page); + return -ENOMEM; + } + return 0; +} + +static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) +{ + dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE, + rq->buff.map_dir); + __free_page(rq->wqe_overflow.page); +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, @@ -408,12 +448,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->stats = &c->priv->channel_stats[c->ix].rq; INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); - rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; - if (IS_ERR(rq->xdp_prog)) { - err = PTR_ERR(rq->xdp_prog); - rq->xdp_prog = NULL; - goto err_rq_wq_destroy; - } + if (params->xdp_prog) + bpf_prog_inc(params->xdp_prog); + rq->xdp_prog = params->xdp_prog; rq_xdp_ix = rq->ix; if (xsk) @@ -432,7 +469,11 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; + + err = mlx5e_alloc_mpwqe_rq_drop_page(rq); + if (err) + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -474,7 +515,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) - goto err_rq_wq_destroy; + goto err_rq_drop_page; rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); err = mlx5e_rq_alloc_mpwqe_info(rq, c); @@ -485,7 +526,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; @@ -622,6 +663,8 @@ err_free: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); +err_rq_drop_page: + mlx5e_free_mpwqe_rq_drop_page(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ kvfree(rq->wqe.frags); @@ -649,6 +692,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); + mlx5e_free_mpwqe_rq_drop_page(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ kvfree(rq->wqe.frags); @@ -2318,8 +2362,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, { switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return order_base_2(MLX5E_UMR_WQEBBS) + - mlx5e_get_rq_log_wq_sz(rqp->rqc); + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, + order_base_2(MLX5E_UMR_WQEBBS) + + mlx5e_get_rq_log_wq_sz(rqp->rqc)); default: /* MLX5_WQ_TYPE_CYCLIC */ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } @@ -2758,7 +2803,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); } - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + /* Verify inner tirs resources allocated */ + if (!priv->inner_indir_tir[0].tirn) return; for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { @@ -2897,6 +2943,28 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } +static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv) +{ + int num_txqs = priv->channels.num * priv->channels.params.num_tc; + int num_rxqs = priv->channels.num * priv->profile->rq_groups; + struct net_device *netdev = priv->netdev; + + mlx5e_netdev_set_tcs(netdev); + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, num_rxqs); +} + +int mlx5e_num_channels_changed(struct mlx5e_priv *priv) +{ + u16 count = priv->channels.params.num_channels; + + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); + + return 0; +} + static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) { int i, ch; @@ -2918,13 +2986,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { - int num_txqs = priv->channels.num * priv->channels.params.num_tc; - int num_rxqs = priv->channels.num * priv->profile->rq_groups; - struct net_device *netdev = priv->netdev; - - mlx5e_netdev_set_tcs(netdev); - netif_set_real_num_tx_queues(netdev, num_txqs); - netif_set_real_num_rx_queues(netdev, num_rxqs); + mlx5e_update_netdev_queues(priv); mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); @@ -2960,7 +3022,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify) + mlx5e_fp_preactivate preactivate) { struct net_device *netdev = priv->netdev; int new_num_txqs; @@ -2979,9 +3041,11 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, priv->channels = *new_chs; - /* New channels are ready to roll, modify HW settings if needed */ - if (hw_modify) - hw_modify(priv); + /* New channels are ready to roll, call the preactivate hook if needed + * to modify HW settings or update kernel parameters. + */ + if (preactivate) + preactivate(priv); priv->profile->update_rx(priv); mlx5e_activate_priv_channels(priv); @@ -2993,7 +3057,7 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify) + mlx5e_fp_preactivate preactivate) { int err; @@ -3001,7 +3065,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, if (err) return err; - mlx5e_switch_priv_channels(priv, new_chs, hw_modify); + mlx5e_switch_priv_channels(priv, new_chs, preactivate); return 0; } @@ -3019,6 +3083,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; } +static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, + enum mlx5_port_status state) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int vport_admin_state; + + mlx5_set_port_admin_status(mdev, state); + + if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) + return; + + if (state == MLX5_PORT_UP) + vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO; + else + vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN; + + mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3051,7 +3134,7 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); if (!err) - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); if (mlx5_vxlan_allowed(priv->mdev->vxlan)) @@ -3088,7 +3171,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -3387,14 +3470,15 @@ out: return err; } -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) { int i; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); - if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) + /* Verify inner tirs resources allocated */ + if (!priv->inner_indir_tir[0].tirn) return; for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) @@ -3561,7 +3645,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; - if (!mlx5e_monitor_counter_supported(priv)) { + /* In switchdev mode, monitor counters doesn't monitor + * rx/tx stats of 802_3. The update stats mechanism + * should keep the 802_3 layout counters updated + */ + if (!mlx5e_monitor_counter_supported(priv) || + mlx5e_is_uplink_rep(priv)) { /* update HW stats in background for next time */ mlx5e_queue_update_stats(priv); } @@ -4237,6 +4326,21 @@ void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); } +static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev, + struct sk_buff *skb) +{ + switch (skb->inner_protocol) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + case htons(ETH_P_TEB): + return true; + case htons(ETH_P_MPLS_UC): + case htons(ETH_P_MPLS_MC): + return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre); + } + return false; +} + static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, struct sk_buff *skb, netdev_features_t features) @@ -4259,7 +4363,9 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, switch (proto) { case IPPROTO_GRE: - return features; + if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb)) + return features; + break; case IPPROTO_IPIP: case IPPROTO_IPV6: if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) @@ -4406,16 +4512,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) /* no need for full reset when exchanging programs */ reset = (!priv->channels.params.xdp_prog || !prog); - if (was_opened && !reset) { + if (was_opened && !reset) /* num_channels is invariant here, so we can take the * batched reference right upfront. */ - prog = bpf_prog_add(prog, priv->channels.num); - if (IS_ERR(prog)) { - err = PTR_ERR(prog); - goto unlock; - } - } + bpf_prog_add(prog, priv->channels.num); if (was_opened && reset) { struct mlx5e_channels new_channels = {}; @@ -5096,7 +5197,7 @@ err_destroy_xsk_rqts: err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv, priv->direct_tir); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv, priv->direct_tir); err_destroy_indirect_rqts: @@ -5115,7 +5216,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); mlx5e_destroy_direct_tirs(priv, priv->direct_tir); - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv, priv->direct_tir); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); @@ -5147,7 +5248,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* Marking the link as currently not needed by the Driver */ if (!netif_running(netdev)) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN); mlx5e_set_netdev_mtu_boundaries(priv); mlx5e_set_dev_port_mtu(priv); @@ -5308,9 +5409,10 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) max_nch = mlx5e_get_max_num_channels(priv->mdev); if (priv->channels.params.num_channels > max_nch) { mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); + /* Reducing the number of channels - RXFH has to be reset. */ + priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; priv->channels.params.num_channels = max_nch; - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, max_nch); + mlx5e_num_channels_changed(priv); } err = profile->init_tx(priv); @@ -5330,6 +5432,8 @@ err_cleanup_tx: profile->cleanup_tx(priv); out: + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + cancel_work_sync(&priv->update_stats_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index cd9bb7c7b341..88b51f64a64e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1597,7 +1597,7 @@ err_destroy_ttc_table: err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv, priv->direct_tir); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv, false); + mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv, priv->direct_tir); err_destroy_indirect_rqts: @@ -1614,7 +1614,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) mlx5_del_flow_rules(rpriv->vport_rx_rule); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); mlx5e_destroy_direct_tirs(priv, priv->direct_tir); - mlx5e_destroy_indirect_tirs(priv, false); + mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv, priv->direct_tir); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); @@ -1736,6 +1736,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work, mlx5e_tc_reoffload_flows_work); + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); @@ -1814,29 +1816,30 @@ static int register_devlink_port(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep = rpriv->rep; struct netdev_phys_item_id ppid = {}; unsigned int dl_port_index = 0; + u16 pfnum; if (!is_devlink_port_supported(dev, rpriv)) return 0; mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); + pfnum = PCI_FUNC(dev->pdev->devfn); if (rep->vport == MLX5_VPORT_UPLINK) { devlink_port_attrs_set(&rpriv->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - PCI_FUNC(dev->pdev->devfn), false, 0, + pfnum, false, 0, &ppid.id[0], ppid.id_len); dl_port_index = vport_to_devlink_port_index(dev, rep->vport); } else if (rep->vport == MLX5_VPORT_PF) { devlink_port_attrs_pci_pf_set(&rpriv->dl_port, &ppid.id[0], ppid.id_len, - dev->pdev->devfn); + pfnum); dl_port_index = rep->vport; } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) { devlink_port_attrs_pci_vf_set(&rpriv->dl_port, &ppid.id[0], ppid.id_len, - dev->pdev->devfn, - rep->vport - 1); + pfnum, rep->vport - 1); dl_port_index = vport_to_devlink_port_index(dev, rep->vport); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1d295a7afc8c..066bada4ccd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -587,7 +587,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) return !!err; } -void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) +int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5_cqe64 *cqe; @@ -595,11 +595,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) int i; if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return; + return 0; cqe = mlx5_cqwq_get_cqe(&cq->wq); if (likely(!cqe)) - return; + return 0; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur @@ -646,6 +646,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) sq->cc = sqcc; mlx5_cqwq_update_db_record(&cq->wq); + + return i; } bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) @@ -1426,6 +1428,7 @@ out: #ifdef CONFIG_MLX5_CORE_IPOIB +#define MLX5_IB_GRH_SGID_OFFSET 8 #define MLX5_IB_GRH_DGID_OFFSET 24 #define MLX5_GID_SIZE 16 @@ -1439,6 +1442,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct net_device *netdev; struct mlx5e_priv *priv; char *pseudo_header; + u32 flags_rqpn; u32 qpn; u8 *dgid; u8 g; @@ -1460,7 +1464,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, tstamp = &priv->tstamp; stats = &priv->channel_stats[rq->ix].rq; - g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; + flags_rqpn = be32_to_cpu(cqe->flags_rqpn); + g = (flags_rqpn >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; if ((!g) || dgid[0] != 0xff) skb->pkt_type = PACKET_HOST; @@ -1469,9 +1474,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, else skb->pkt_type = PACKET_MULTICAST; - /* TODO: IB/ipoib: Allow mcast packets from other VFs - * 68996a6e760e5c74654723eeb57bf65628ae87f4 + /* Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. */ + if (g && (qpn == (flags_rqpn & 0xffffff)) && + (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, + MLX5_GID_SIZE) == 0)) { + skb->dev = NULL; + return; + } skb_pull(skb, MLX5_IB_GRH_BYTES); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1f9107d83848..6495c26d9596 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -57,6 +57,7 @@ #include "lib/devcom.h" #include "lib/geneve.h" #include "diag/en_tc_tracepoint.h" +#include <asm/div64.h> struct mlx5_nic_flow_attr { u32 action; @@ -1837,8 +1838,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_ENC_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); - netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", - dissector->used_keys); + netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", + dissector->used_keys); return -EOPNOTSUPP; } @@ -3181,12 +3182,13 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr, u32 *action) { - int nest_level = attr->parse_attr->filter_dev->lower_level; struct flow_action_entry vlan_act = { .id = FLOW_ACTION_VLAN_POP, }; - int err = 0; + int nest_level, err = 0; + nest_level = attr->parse_attr->filter_dev->lower_level - + priv->netdev->lower_level; while (nest_level--) { err = parse_tc_vlan_action(priv, &vlan_act, attr, action); if (err) @@ -3942,13 +3944,13 @@ errout: return err; } -static int apply_police_params(struct mlx5e_priv *priv, u32 rate, +static int apply_police_params(struct mlx5e_priv *priv, u64 rate, struct netlink_ext_ack *extack) { struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch *esw; + u32 rate_mbps = 0; u16 vport_num; - u32 rate_mbps; int err; vport_num = rpriv->rep->vport; @@ -3965,7 +3967,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, * Moreover, if rate is non zero we choose to configure to a minimum of * 1 mbit/sec. */ - rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; + if (rate) { + rate = (rate * BITS_PER_BYTE) + 500000; + rate_mbps = max_t(u32, do_div(rate, 1000000), 1); + } + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index dee12f17f9c2..d9e0fc146741 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -537,10 +537,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) { struct mlx5e_tx_wqe_info *wi; + u32 dma_fifo_cc, nbytes = 0; + u16 ci, sqcc, npkts = 0; struct sk_buff *skb; - u32 dma_fifo_cc; - u16 sqcc; - u16 ci; int i; sqcc = sq->cc; @@ -565,11 +564,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) } dev_kfree_skb_any(skb); + npkts++; + nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; } sq->dma_fifo_cc = dma_fifo_cc; sq->cc = sqcc; + + netdev_tx_completed_queue(sq->txq, npkts, nbytes); } #ifdef CONFIG_MLX5_CORE_IPOIB diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 800d34ed8a96..76efa9579215 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -145,7 +145,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) busy |= rq->post_wqes(rq); if (xsk_open) { - mlx5e_poll_ico_cq(&c->xskicosq.cq); + if (mlx5e_poll_ico_cq(&c->xskicosq.cq)) + /* Don't clear the flag if nothing was polled to prevent + * queueing more WQEs and overflowing XSKICOSQ. + */ + clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state); busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 580c71cb9dfa..30a2ee3c40a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -190,6 +190,29 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) return count_eqe; } +static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags) + __acquires(&eq->lock) +{ + if (in_irq()) + spin_lock(&eq->lock); + else + spin_lock_irqsave(&eq->lock, *flags); +} + +static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags) + __releases(&eq->lock) +{ + if (in_irq()) + spin_unlock(&eq->lock); + else + spin_unlock_irqrestore(&eq->lock, *flags); +} + +enum async_eq_nb_action { + ASYNC_EQ_IRQ_HANDLER = 0, + ASYNC_EQ_RECOVER = 1, +}; + static int mlx5_eq_async_int(struct notifier_block *nb, unsigned long action, void *data) { @@ -199,11 +222,14 @@ static int mlx5_eq_async_int(struct notifier_block *nb, struct mlx5_eq_table *eqt; struct mlx5_core_dev *dev; struct mlx5_eqe *eqe; + unsigned long flags; int num_eqes = 0; dev = eq->dev; eqt = dev->priv.eq_table; + mlx5_eq_async_int_lock(eq_async, &flags); + eqe = next_eqe_sw(eq); if (!eqe) goto out; @@ -224,8 +250,19 @@ static int mlx5_eq_async_int(struct notifier_block *nb, out: eq_update_ci(eq, 1); + mlx5_eq_async_int_unlock(eq_async, &flags); - return 0; + return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0; +} + +void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq; + int eqes; + + eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL); + if (eqes) + mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes); } static void init_eq_buf(struct mlx5_eq *eq) @@ -563,6 +600,40 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) gather_user_async_events(dev, mask); } +static int +setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq, + struct mlx5_eq_param *param, const char *name) +{ + int err; + + eq->irq_nb.notifier_call = mlx5_eq_async_int; + spin_lock_init(&eq->lock); + + err = create_async_eq(dev, &eq->core, param); + if (err) { + mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err); + return err; + } + err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); + if (err) { + mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err); + destroy_async_eq(dev, &eq->core); + } + return err; +} + +static void cleanup_async_eq(struct mlx5_core_dev *dev, + struct mlx5_eq_async *eq, const char *name) +{ + int err; + + mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); + err = destroy_async_eq(dev, &eq->core); + if (err) + mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n", + name, err); +} + static int create_async_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; @@ -572,77 +643,48 @@ static int create_async_eqs(struct mlx5_core_dev *dev) MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); mlx5_eq_notifier_register(dev, &table->cq_err_nb); - table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int; param = (struct mlx5_eq_param) { .irq_index = 0, .nent = MLX5_NUM_CMD_EQE, + .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, }; - - param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD; - err = create_async_eq(dev, &table->cmd_eq.core, ¶m); - if (err) { - mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); - goto err0; - } - err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); - if (err) { - mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err); + mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ); + err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd"); + if (err) goto err1; - } - mlx5_cmd_use_events(dev); - table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; + mlx5_cmd_use_events(dev); + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); + param = (struct mlx5_eq_param) { .irq_index = 0, .nent = MLX5_NUM_ASYNC_EQE, }; gather_async_events_mask(dev, param.mask); - err = create_async_eq(dev, &table->async_eq.core, ¶m); - if (err) { - mlx5_core_warn(dev, "failed to create async EQ %d\n", err); + err = setup_async_eq(dev, &table->async_eq, ¶m, "async"); + if (err) goto err2; - } - err = mlx5_eq_enable(dev, &table->async_eq.core, - &table->async_eq.irq_nb); - if (err) { - mlx5_core_warn(dev, "failed to enable async EQ %d\n", err); - goto err3; - } - table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; param = (struct mlx5_eq_param) { .irq_index = 0, .nent = /* TODO: sriov max_vf + */ 1, + .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, }; - param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST; - err = create_async_eq(dev, &table->pages_eq.core, ¶m); - if (err) { - mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); - goto err4; - } - err = mlx5_eq_enable(dev, &table->pages_eq.core, - &table->pages_eq.irq_nb); - if (err) { - mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err); - goto err5; - } + err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages"); + if (err) + goto err3; - return err; + return 0; -err5: - destroy_async_eq(dev, &table->pages_eq.core); -err4: - mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); err3: - destroy_async_eq(dev, &table->async_eq.core); + cleanup_async_eq(dev, &table->async_eq, "async"); err2: mlx5_cmd_use_polling(dev); - mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); + cleanup_async_eq(dev, &table->cmd_eq, "cmd"); err1: - destroy_async_eq(dev, &table->cmd_eq.core); -err0: + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); return err; } @@ -650,28 +692,11 @@ err0: static void destroy_async_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; - int err; - - mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb); - err = destroy_async_eq(dev, &table->pages_eq.core); - if (err) - mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", - err); - - mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); - err = destroy_async_eq(dev, &table->async_eq.core); - if (err) - mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", - err); + cleanup_async_eq(dev, &table->pages_eq, "pages"); + cleanup_async_eq(dev, &table->async_eq, "async"); mlx5_cmd_use_polling(dev); - - mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); - err = destroy_async_eq(dev, &table->cmd_eq.core); - if (err) - mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", - err); - + cleanup_async_eq(dev, &table->cmd_eq, "cmd"); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); } @@ -901,13 +926,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) mutex_unlock(&table->lock); } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +#define MLX5_MAX_ASYNC_EQS 4 +#else +#define MLX5_MAX_ASYNC_EQS 3 +#endif + int mlx5_eq_table_create(struct mlx5_core_dev *dev) { struct mlx5_eq_table *eq_table = dev->priv.eq_table; + int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? + MLX5_CAP_GEN(dev, max_num_eqs) : + 1 << MLX5_CAP_GEN(dev, log_max_eq); int err; eq_table->num_comp_eqs = - mlx5_irq_get_num_comp(eq_table->irq_table); + min_t(int, + mlx5_irq_get_num_comp(eq_table->irq_table), + num_eqs - MLX5_MAX_ASYNC_EQS); err = create_async_eqs(dev); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c6ed4b7f4f97..009d383d83f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1592,6 +1592,10 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); + + if (!vport->qos.enabled) + return -EOPNOTSUPP; + MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); return mlx5_modify_scheduling_element_cmd(esw->dev, @@ -1919,7 +1923,7 @@ abort: mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } - + esw_destroy_tsar(esw); return err; } @@ -2094,6 +2098,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; + int other_vport = 1; int err = 0; if (!ESW_ALLOWED(esw)) @@ -2101,15 +2107,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, if (IS_ERR(evport)) return PTR_ERR(evport); + if (vport == MLX5_VPORT_UPLINK) { + opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; + other_vport = 0; + vport = 0; + } mutex_lock(&esw->state_lock); - err = mlx5_modify_vport_admin_state(esw->dev, - MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport, 1, link_state); + err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); if (err) { - mlx5_core_warn(esw->dev, - "Failed to set vport %d link state, err = %d", - vport, err); + mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", + vport, opmod, err); goto unlock; } @@ -2151,8 +2159,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); int err = 0; - if (!ESW_ALLOWED(esw)) - return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); if (vlan > 4095 || qos > 7) @@ -2180,6 +2186,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u8 set_flags = 0; int err; + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; @@ -2364,12 +2373,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) max_guarantee = evport->info.min_rate; } - return max_t(u32, max_guarantee / fw_max_bw_share, 1); + if (max_guarantee) + return max_t(u32, max_guarantee / fw_max_bw_share, 1); + return 0; } -static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) +static int normalize_vports_min_rate(struct mlx5_eswitch *esw) { u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + u32 divider = calculate_vports_min_rate_divider(esw); struct mlx5_vport *evport; u32 vport_max_rate; u32 vport_min_rate; @@ -2382,9 +2394,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) continue; vport_min_rate = evport->info.min_rate; vport_max_rate = evport->info.max_rate; - bw_share = MLX5_MIN_BW_SHARE; + bw_share = 0; - if (vport_min_rate) + if (divider) bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, divider, fw_max_bw_share); @@ -2409,7 +2421,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); u32 fw_max_bw_share; u32 previous_min_rate; - u32 divider; bool min_rate_supported; bool max_rate_supported; int err = 0; @@ -2434,8 +2445,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, previous_min_rate = evport->info.min_rate; evport->info.min_rate = min_rate; - divider = calculate_vports_min_rate_divider(esw); - err = normalize_vports_min_rate(esw, divider); + err = normalize_vports_min_rate(esw); if (err) { evport->info.min_rate = previous_min_rate; goto unlock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 6bd6f5895244..0ddbae1e64fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -606,6 +606,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { ret static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } +static inline +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 5acfdea3a75a..7cc80dc4e6d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1143,36 +1143,38 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) } esw->fdb_table.offloads.send_to_vport_grp = g; - /* create peer esw miss group */ - memset(flow_group_in, 0, inlen); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + /* create peer esw miss group */ + memset(flow_group_in, 0, inlen); - esw_set_flow_group_source_port(esw, flow_group_in); + esw_set_flow_group_source_port(esw, flow_group_in); - if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { - match_criteria = MLX5_ADDR_OF(create_flow_group_in, - flow_group_in, - match_criteria); + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { + match_criteria = MLX5_ADDR_OF(create_flow_group_in, + flow_group_in, + match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - misc_parameters.source_eswitch_owner_vhca_id); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_eswitch_owner_vhca_id); - MLX5_SET(create_flow_group_in, flow_group_in, - source_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(create_flow_group_in, flow_group_in, + source_eswitch_owner_vhca_id_valid, 1); + } + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + ix + esw->total_vports - 1); + ix += esw->total_vports; + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); + goto peer_miss_err; + } + esw->fdb_table.offloads.peer_miss_grp = g; } - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, - ix + esw->total_vports - 1); - ix += esw->total_vports; - - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); - goto peer_miss_err; - } - esw->fdb_table.offloads.peer_miss_grp = g; - /* create miss group */ memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -1206,7 +1208,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) miss_rule_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); miss_err: - mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); peer_miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: @@ -1229,7 +1232,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); - mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index 8bcf3426b9c6..3ce17c3d7a00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev) events->dev = dev; dev->priv.events = events; events->wq = create_singlethread_workqueue("mlx5_events"); - if (!events->wq) + if (!events->wq) { + kfree(events); return -ENOMEM; + } INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 86e6bbb57482..11e12761b0a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -496,6 +496,13 @@ static void del_sw_hw_rule(struct fs_node *node) goto out; } + if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT && + --fte->dests_size) { + fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; + goto out; + } + if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { fte->modify_mask |= @@ -629,7 +636,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, fte->action = *flow_act; fte->flow_context = spec->flow_context; - tree_init_node(&fte->node, NULL, del_sw_fte); + tree_init_node(&fte->node, del_hw_fte, del_sw_fte); return fte; } @@ -809,18 +816,15 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root = find_root(&prio->node); struct mlx5_flow_table *iter; - int i = 0; int err; fs_for_each_ft(iter, prio) { - i++; err = root->cmds->modify_flow_table(root, iter, ft); if (err) { - mlx5_core_warn(dev, "Failed to modify flow table %d\n", - iter->id); + mlx5_core_err(dev, + "Failed to modify flow table id %d, type %d, err %d\n", + iter->id, iter->type, err); /* The driver is out of sync with the FW */ - if (i > 1) - WARN_ON(true); return err; } } @@ -1064,6 +1068,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa destroy_ft: root->cmds->destroy_flow_table(root, ft); free_ft: + rhltable_destroy(&ft->fgs_hash); kfree(ft); unlock_root: mutex_unlock(&root->chain_lock); @@ -1692,6 +1697,7 @@ search_again_locked: if (!fte_tmp) continue; rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); + /* No error check needed here, because insert_fte() is not called */ up_write_ref_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false); kmem_cache_free(steering->ftes_cache, fte); @@ -1740,7 +1746,8 @@ skip_search: up_write_ref_node(&g->node, false); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); up_write_ref_node(&fte->node, false); - tree_put_node(&fte->node, false); + if (IS_ERR(rule)) + tree_put_node(&fte->node, false); return rule; } rule = ERR_PTR(-ENOENT); @@ -1840,7 +1847,8 @@ search_again_locked: up_write_ref_node(&g->node, false); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); up_write_ref_node(&fte->node, false); - tree_put_node(&fte->node, false); + if (IS_ERR(rule)) + tree_put_node(&fte->node, false); tree_put_node(&g->node, false); return rule; @@ -1928,12 +1936,15 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) down_write_ref_node(&fte->node, false); for (i = handle->num_rules - 1; i >= 0; i--) tree_remove_node(&handle->rule[i]->node, true); - if (fte->modify_mask && fte->dests_size) { - modify_fte(fte); + if (fte->dests_size) { + if (fte->modify_mask) + modify_fte(fte); up_write_ref_node(&fte->node, false); - } else { + } else if (list_empty(&fte->node.children)) { del_hw_fte(&fte->node); - up_write(&fte->node.lock); + /* Avoid double call to del_hw_fte */ + fte->node.del_hw_func = NULL; + up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); } kfree(handle); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index c07f3154437c..f628887d8af8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -193,15 +193,23 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { + bool err_detected = false; + + /* Mark the device as fatal in order to abort FW commands */ + if ((check_fatal_sensors(dev) || force) && + dev->state == MLX5_DEVICE_STATE_UP) { + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + err_detected = true; + } mutex_lock(&dev->intf_state_mutex); - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) - goto unlock; + if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto unlock;/* a previous error is still being handled */ if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; goto unlock; } - if (check_fatal_sensors(dev) || force) { + if (check_fatal_sensors(dev) || force) { /* protected state setting */ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mlx5_cmd_flush(dev); } @@ -243,7 +251,7 @@ recover_from_sw_reset: if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) break; - cond_resched(); + msleep(20); } while (!time_after(jiffies, end)); if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 3ed8ab2d703d..0fed2419623d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -396,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv, priv->direct_tir); err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv, priv->direct_tir); err_destroy_indirect_rqts: @@ -412,7 +412,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv, priv->direct_tir); - mlx5e_destroy_indirect_tirs(priv, true); + mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv, priv->direct_tir); mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_close_drop_rq(&priv->drop_rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 43f97601b500..492ff2ef9a40 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -388,10 +388,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { @@ -477,8 +498,9 @@ static int mlx5_pps_event(struct notifier_block *nb, switch (clock->ptp_info.pin_config[pin].func) { case PTP_PF_EXTTS: ptp_event.index = pin; - ptp_event.timestamp = timecounter_cyc2time(&clock->tc, - be64_to_cpu(eqe->data.pps.time_stamp)); + ptp_event.timestamp = + mlx5_timecounter_cyc2time(clock, + be64_to_cpu(eqe->data.pps.time_stamp)); if (clock->pps_info.enabled) { ptp_event.type = PTP_CLOCK_PPSUSR; ptp_event.pps_times.ts_real = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h index 4be4d2d36218..9aaf0eab7c2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -38,6 +38,7 @@ struct mlx5_eq { struct mlx5_eq_async { struct mlx5_eq core; struct notifier_block irq_nb; + spinlock_t lock; /* To avoid irq EQ handle races with resiliency flows */ }; struct mlx5_eq_comp { @@ -82,6 +83,7 @@ void mlx5_cq_tasklet_cb(unsigned long data); struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq); +void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev); void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev); void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c96a0e501007..f2657cd3ffa4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -50,6 +50,7 @@ #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif +#include <linux/version.h> #include <net/devlink.h> #include "mlx5_core.h" #include "lib/eq.h" @@ -227,7 +228,10 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) strncat(string, ",", remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, DRIVER_VERSION, remaining_size); + + snprintf(string + strlen(string), remaining_size, "%u.%u.%u", + (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff), + (u16)(LINUX_VERSION_CODE & 0xffff)); /*Send the command*/ MLX5_SET(set_driver_version_in, in, opcode, @@ -794,6 +798,11 @@ err_disable: static void mlx5_pci_close(struct mlx5_core_dev *dev) { + /* health work might still be active, and it needs pci bar in + * order to know the NIC state. Therefore, drain the health WQ + * before removing the pci bars + */ + mlx5_drain_health_wq(dev); iounmap(dev->iseg); pci_clear_master(dev->pdev); release_bar(dev->pdev); @@ -1183,7 +1192,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) err = mlx5_function_setup(dev, boot); if (err) - goto out; + goto err_function; if (boot) { err = mlx5_init_once(dev); @@ -1229,6 +1238,7 @@ err_load: mlx5_cleanup_once(dev); function_teardown: mlx5_function_teardown(dev, boot); +err_function: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mutex_unlock(&dev->intf_state_mutex); @@ -1553,6 +1563,22 @@ static void shutdown(struct pci_dev *pdev) mlx5_pci_disable_device(dev); } +static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + mlx5_unload_one(dev, false); + + return 0; +} + +static int mlx5_resume(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + return mlx5_load_one(dev, false); +} + static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ @@ -1596,6 +1622,8 @@ static struct pci_driver mlx5_core_driver = { .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, + .suspend = mlx5_suspend, + .resume = mlx5_resume, .shutdown = shutdown, .err_handler = &mlx5_err_handler, .sriov_configure = mlx5_core_sriov_configure, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 91bd258ecf1b..db76c92b75e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -339,6 +339,24 @@ out_free: return err; } +static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, + u32 npages) +{ + u32 pages_set = 0; + unsigned int n; + + for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) { + MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set, + fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE)); + pages_set++; + + if (!--npages) + break; + } + + return pages_set; +} + static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { @@ -362,8 +380,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, if (fwp->func_id != func_id) continue; - MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); - i++; + i += fwp_fill_manage_pages_out(fwp, out, i, npages - i); } MLX5_SET(manage_pages_out, out, output_num_entries, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 373981a659c7..6fd974920394 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -115,7 +115,7 @@ static int request_irqs(struct mlx5_core_dev *dev, int nvec) return 0; err_request_irq: - for (; i >= 0; i--) { + while (i--) { struct mlx5_irq *irq = mlx5_irq_get(dev, i); int irqn = pci_irq_vector(dev->pdev, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index cc262b30aed5..dc589322940c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) return 0; } -static int mlx5_eeprom_page(int offset) +static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, + u8 *module_id) +{ + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; + int err, status; + u8 *ptr; + + MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW); + MLX5_SET(mcia_reg, in, module, module_num); + MLX5_SET(mcia_reg, in, device_address, 0); + MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, size, 1); + MLX5_SET(mcia_reg, in, l, 0); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); + if (err) + return err; + + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + + *module_id = ptr[0]; + + return 0; +} + +static int mlx5_qsfp_eeprom_page(u16 offset) { if (offset < MLX5_EEPROM_PAGE_LENGTH) /* Addresses between 0-255 - page 00 */ @@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset) MLX5_EEPROM_HIGH_PAGE_LENGTH); } -static int mlx5_eeprom_high_page_offset(int page_num) +static int mlx5_qsfp_eeprom_high_page_offset(int page_num) { if (!page_num) /* Page 0 always start from low page */ return 0; @@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num) return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; } +static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = mlx5_qsfp_eeprom_page(*offset); + *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num); +} + +static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < MLX5_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = MLX5_I2C_ADDR_HIGH; + *offset -= MLX5_EEPROM_PAGE_LENGTH; +} + int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data) { - int module_num, page_num, status, err; + int module_num, status, err, page_num = 0; + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - u32 in[MLX5_ST_SZ_DW(mcia_reg)]; - u16 i2c_addr; - void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + u16 i2c_addr = 0; + u8 module_id; + void *ptr; err = mlx5_query_module_num(dev, &module_num); if (err) return err; - memset(in, 0, sizeof(in)); - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); + err = mlx5_query_module_id(dev, module_num, &module_id); + if (err) + return err; - /* Get the page number related to the given offset */ - page_num = mlx5_eeprom_page(offset); - - /* Set the right offset according to the page number, - * For page_num > 0, relative offset is always >= 128 (high page). - */ - offset -= mlx5_eeprom_high_page_offset(page_num); + switch (module_id) { + case MLX5_MODULE_ID_SFP: + mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + case MLX5_MODULE_ID_QSFP: + case MLX5_MODULE_ID_QSFP_PLUS: + case MLX5_MODULE_ID_QSFP28: + mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + default: + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; - i2c_addr = MLX5_I2C_ADDR_LOW; + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); @@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, return -EIO; } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); memcpy(data, ptr, size); return size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index 0fc7de4aa572..8e0dddc6383f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -116,7 +116,7 @@ free: static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) { mlx5_core_roce_gid_set(dev, 0, 0, 0, - NULL, NULL, false, 0, 0); + NULL, NULL, false, 0, 1); } static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 41662c4e2664..64f6f529f6eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -92,6 +92,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); + caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) { caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 5b24732b18c0..56bf900eb753 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -223,6 +223,11 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, if (ret) return ret; + if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) { + mlx5dr_err(dmn, "SW steering is not supported on this device\n"); + return -EOPNOTSUPP; + } + ret = dr_domain_query_fdb_caps(mdev, dmn); if (ret) return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 095ec7b1399d..f012aac83b10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -181,7 +181,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, in, pas)); err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen); - kfree(in); + kvfree(in); if (err) { mlx5_core_warn(mdev, " Can't create QP\n"); @@ -689,6 +689,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq, pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); } +static void dr_cq_complete(struct mlx5_core_cq *mcq, + struct mlx5_eqe *eqe) +{ + pr_err("CQ completion CQ: #%u\n", mcq->cqn); +} + static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, struct mlx5_uars_page *uar, size_t ncqe) @@ -750,6 +756,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); cq->mcq.event = dr_cq_event; + cq->mcq.comp = dr_cq_complete; err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); kvfree(in); @@ -761,7 +768,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, cq->mcq.set_ci_db = cq->wq_ctrl.db.db; cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; *cq->mcq.set_ci_db = 0; - *cq->mcq.arm_db = 0; + + /* set no-zero value, in order to avoid the HW to run db-recovery on + * CQ that used in polling mode. + */ + *cq->mcq.arm_db = cpu_to_be32(2 << 28); + cq->mcq.vector = 0; cq->mcq.irqn = irqn; cq->mcq.uar = uar; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 31737dfca4ea..c360d08af67d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -613,6 +613,7 @@ struct mlx5dr_cmd_caps { u8 max_ft_level; u16 roce_min_src_udp; u8 num_esw_ports; + u8 sw_format_ver; bool eswitch_manager; bool rx_sw_owner; bool tx_sw_owner; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 1e32e2443f73..348f02e336f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -247,29 +247,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, /* The order of the actions are must to be keep, only the following * order is supported by SW steering: - * TX: push vlan -> modify header -> encap + * TX: modify header -> push vlan -> encap * RX: decap -> pop vlan -> modify header */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - } - - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { enum mlx5dr_action_reformat_type decap_type = DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; @@ -322,6 +302,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = fte->action.modify_hdr->action.dr_action; + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + if (delay_encap_set) actions[num_actions++] = fte->action.pkt_reformat->action.dr_action; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 0a0884d86d44..2d39bade89e3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -444,7 +444,8 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) if (trans->core->fw_flash_in_progress) timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS); - queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); + queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, + timeout << trans->retries); } static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, @@ -493,6 +494,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_transmit(trans->core, trans); if (err == 0) return; + + if (!atomic_dec_and_test(&trans->active)) + return; } else { err = -EIO; } @@ -592,7 +596,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); if (err) - return err; + goto err_trap_register; err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) @@ -604,6 +608,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); +err_trap_register: destroy_workqueue(mlxsw_core->emad_wq); return err; } @@ -1280,6 +1285,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, if (!reload) devlink_resources_unregister(devlink, NULL); mlxsw_core->bus->fini(mlxsw_core->bus_priv); + if (!reload) + devlink_free(devlink); return; @@ -1576,7 +1583,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; @@ -1801,11 +1808,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index c51b2adfc1e1..2cbfa5cfefab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&block->resource_list); block->afa = mlxsw_afa; @@ -344,7 +344,7 @@ err_second_set_create: mlxsw_afa_set_destroy(block->first_set); err_first_set_create: kfree(block); - return NULL; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(mlxsw_afa_block_create); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 35a1dc89c28a..c3fc1cc4bb2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -19,7 +19,7 @@ #define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */ #define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */ #define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */ -#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */ +#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */ #define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */ #define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2) #define MLXSW_THERMAL_ZONE_MAX_NAME 16 @@ -177,6 +177,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, if (err) return err; + if (crit_temp > emerg_temp) { + dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n", + tz->tzdev->type, crit_temp, emerg_temp); + return 0; + } + /* According to the system thermal requirements, the thermal zones are * defined with four trip points. The critical and emergency * temperature thresholds, provided by QSFP module are set as "active" @@ -191,11 +197,8 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp; tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp; tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp; - if (emerg_temp > crit_temp) - tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp + + tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp + MLXSW_THERMAL_MODULE_TEMP_SHIFT; - else - tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp; return 0; } @@ -390,8 +393,7 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev, static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, int trip, enum thermal_trend *trend) { - struct mlxsw_thermal_module *tz = tzdev->devdata; - struct mlxsw_thermal *thermal = tz->parent; + struct mlxsw_thermal *thermal = tzdev->devdata; if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) return -EINVAL; @@ -592,6 +594,22 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip, return 0; } +static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev, + int trip, enum thermal_trend *trend) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; + + if (tzdev == thermal->tz_highest_dev) + return 1; + + *trend = THERMAL_TREND_STABLE; + return 0; +} + static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { .bind = mlxsw_thermal_module_bind, .unbind = mlxsw_thermal_module_unbind, @@ -603,7 +621,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { .set_trip_temp = mlxsw_thermal_module_trip_temp_set, .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, - .get_trend = mlxsw_thermal_trend_get, + .get_trend = mlxsw_thermal_module_trend_get, }; static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, @@ -642,7 +660,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { .set_trip_temp = mlxsw_thermal_module_trip_temp_set, .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, - .get_trend = mlxsw_thermal_trend_get, + .get_trend = mlxsw_thermal_module_trend_get, }; static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index f3d1f9411d10..aa4fef789084 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1401,23 +1401,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, u16 num_pages; int err; - mutex_init(&mlxsw_pci->cmd.lock); - init_waitqueue_head(&mlxsw_pci->cmd.wait); - mlxsw_pci->core = mlxsw_core; mbox = mlxsw_cmd_mbox_alloc(); if (!mbox) return -ENOMEM; - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); - if (err) - goto mbox_put; - - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - if (err) - goto err_out_mbox_alloc; - err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); if (err) goto err_sw_reset; @@ -1524,9 +1513,6 @@ err_query_fw: mlxsw_pci_free_irq_vectors(mlxsw_pci); err_alloc_irq: err_sw_reset: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); -err_out_mbox_alloc: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); mbox_put: mlxsw_cmd_mbox_free(mbox); return err; @@ -1540,8 +1526,6 @@ static void mlxsw_pci_fini(void *bus_priv) mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } static struct mlxsw_pci_queue * @@ -1755,6 +1739,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; +static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + mutex_init(&mlxsw_pci->cmd.lock); + init_waitqueue_head(&mlxsw_pci->cmd.wait); + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + if (err) + goto err_in_mbox_alloc; + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + if (err) + goto err_out_mbox_alloc; + + return 0; + +err_out_mbox_alloc: + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); +err_in_mbox_alloc: + mutex_destroy(&mlxsw_pci->cmd.lock); + return err; +} + +static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + mutex_destroy(&mlxsw_pci->cmd.lock); +} + static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; @@ -1810,6 +1825,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); + err = mlxsw_pci_cmd_init(mlxsw_pci); + if (err) + goto err_pci_cmd_init; + mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1827,6 +1846,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: + mlxsw_pci_cmd_fini(mlxsw_pci); +err_pci_cmd_init: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1844,6 +1865,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); + mlxsw_pci_cmd_fini(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a806c6190bb1..6a432bb93dbb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3932,6 +3932,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_cpu_port_remove(mlxsw_sp); kfree(mlxsw_sp->port_to_module); kfree(mlxsw_sp->ports); + mlxsw_sp->ports = NULL; } static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) @@ -3986,6 +3987,7 @@ err_cpu_port_create: kfree(mlxsw_sp->port_to_module); err_port_to_module_alloc: kfree(mlxsw_sp->ports); + mlxsw_sp->ports = NULL; return err; } @@ -4040,6 +4042,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, } } +static struct mlxsw_sp_port * +mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) + return mlxsw_sp->ports[local_port]; + return NULL; +} + static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, unsigned int count, struct netlink_ext_ack *extack) @@ -4058,7 +4068,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); - mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); @@ -4136,7 +4146,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X); local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X); - mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 6c66a0f1b79e..ad69913f19c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, * to be written using PEFA register to all indexes for all regions. */ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) { - err = -ENOMEM; + if (IS_ERR(afa_block)) { + err = PTR_ERR(afa_block); goto err_afa_block; } err = mlxsw_afa_block_continue(afa_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 3d3cca596116..d77cdcb5c642 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -444,7 +444,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl, rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); if (!rulei) - return NULL; + return ERR_PTR(-ENOMEM); if (afa_block) { rulei->act_block = afa_block; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index e993159e8e4c..ec0d5a4a60a9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -290,13 +290,14 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam, int err; group->tcam = tcam; - mutex_init(&group->lock); INIT_LIST_HEAD(&group->region_list); err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); if (err) return err; + mutex_init(&group->lock); + return 0; } @@ -986,8 +987,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, unsigned int priority, struct mlxsw_afk_element_usage *elusage) { + struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2; struct mlxsw_sp_acl_tcam_vregion *vregion; - struct mlxsw_sp_acl_tcam_vchunk *vchunk; + struct list_head *pos; int err; if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) @@ -1025,7 +1027,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, } mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); - list_add_tail(&vchunk->list, &vregion->vchunk_list); + + /* Position the vchunk inside the list according to priority */ + list_for_each(pos, &vregion->vchunk_list) { + vchunk2 = list_entry(pos, typeof(*vchunk2), list); + if (vchunk2->priority > priority) + break; + } + list_add_tail(&vchunk->list, pos); mutex_unlock(&vregion->lock); return vchunk; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 336e5ecc68f8..f1874578aa14 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -524,6 +524,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table, u16 erif_index = 0; int err; + /* Add the eRIF */ + if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { + erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); + err = mr->mr_ops->route_erif_add(mlxsw_sp, + rve->mr_route->route_priv, + erif_index); + if (err) + return err; + } + /* Update the route action, as the new eVIF can be a tunnel or a pimreg * device which will require updating the action. */ @@ -533,17 +543,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table, rve->mr_route->route_priv, route_action); if (err) - return err; - } - - /* Add the eRIF */ - if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { - erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); - err = mr->mr_ops->route_erif_add(mlxsw_sp, - rve->mr_route->route_priv, - erif_index); - if (err) - goto err_route_erif_add; + goto err_route_action_update; } /* Update the minimum MTU */ @@ -561,14 +561,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table, return 0; err_route_min_mtu_update: - if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) - mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, - erif_index); -err_route_erif_add: if (route_action != rve->mr_route->route_action) mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv, rve->mr_route->route_action); +err_route_action_update: + if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) + mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, + erif_index); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 346f4a5fe053..221aa6a474eb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, int err; afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) - return ERR_PTR(-ENOMEM); + if (IS_ERR(afa_block)) + return afa_block; err = mlxsw_afa_block_append_allocated_counter(afa_block, counter_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index efdf8cb5114c..2f013fc71698 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6287,7 +6287,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (WARN_ON(!fib_work)) + if (!fib_work) return NOTIFY_BAD; fib_work->mlxsw_sp = router->mlxsw_sp; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 63e7a058b7c6..059cc1600890 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1258,6 +1258,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); + mlxsw_sx->ports = NULL; } static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) @@ -1292,6 +1293,7 @@ err_port_module_info_get: if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); + mlxsw_sx->ports = NULL; return err; } @@ -1375,6 +1377,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port, u8 module, width; int err; + if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) { + dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); + return -EINVAL; + } + if (new_type == DEVLINK_PORT_TYPE_AUTO) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 52c41d11f565..c3a6edc0ddf6 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1070,7 +1070,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) if (unlikely(ret)) { netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n", ret); - goto out_free; + goto out_stop; } eidled = encx24j600_read_reg(priv, EIDLED); @@ -1088,6 +1088,8 @@ static int encx24j600_spi_probe(struct spi_device *spi) out_unregister: unregister_netdev(priv->ndev); +out_stop: + kthread_stop(priv->kworker_task); out_free: free_netdev(ndev); @@ -1100,6 +1102,7 @@ static int encx24j600_spi_remove(struct spi_device *spi) struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); unregister_netdev(priv->ndev); + kthread_stop(priv->kworker_task); free_netdev(priv->ndev); diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 3a0b289d9771..eedec1346078 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev, wol->supported = 0; wol->wolopts = 0; - phy_ethtool_get_wol(netdev->phydev, wol); + + if (netdev->phydev) + phy_ethtool_get_wol(netdev->phydev, wol); wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | WAKE_MAGIC | WAKE_PHY | WAKE_ARP; @@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev, device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); - phy_ethtool_set_wol(netdev->phydev, wol); - - return 0; + return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol) + : -ENETDOWN; } #endif /* CONFIG_PM */ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index a43140f7b5eb..4bbdc53eaf3f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -145,7 +145,8 @@ static void lan743x_intr_software_isr(void *context) int_sts = lan743x_csr_read(adapter, INT_STS); if (int_sts & INT_BIT_SW_GP_) { - lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_); + /* disable the interrupt to prevent repeated re-triggering */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); intr->software_isr_flag = 1; } } @@ -672,14 +673,12 @@ clean_up: static int lan743x_dp_write(struct lan743x_adapter *adapter, u32 select, u32 addr, u32 length, u32 *buf) { - int ret = -EIO; u32 dp_sel; int i; - mutex_lock(&adapter->dp_lock); if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 1, 40, 100, 100)) - goto unlock; + return -EIO; dp_sel = lan743x_csr_read(adapter, DP_SEL); dp_sel &= ~DP_SEL_MASK_; dp_sel |= select; @@ -691,13 +690,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter, lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 1, 40, 100, 100)) - goto unlock; + return -EIO; } - ret = 0; -unlock: - mutex_unlock(&adapter->dp_lock); - return ret; + return 0; } static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) @@ -1247,13 +1243,13 @@ clean_up_data_descriptor: goto clear_active; if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) { - dev_kfree_skb(buffer_info->skb); + dev_kfree_skb_any(buffer_info->skb); goto clear_skb; } if (cleanup) { lan743x_ptp_unrequest_tx_timestamp(tx->adapter); - dev_kfree_skb(buffer_info->skb); + dev_kfree_skb_any(buffer_info->skb); } else { ignore_sync = (buffer_info->flags & TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0; @@ -1563,7 +1559,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, if (required_number_of_descriptors > lan743x_tx_get_avail_desc(tx)) { if (required_number_of_descriptors > (tx->ring_size - 1)) { - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); } else { /* save to overflow buffer */ tx->overflow_skb = skb; @@ -1596,7 +1592,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, start_frame_length, do_timestamp, skb->ip_summed == CHECKSUM_PARTIAL)) { - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); goto unlock; } @@ -1615,7 +1611,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, * frame assembler clean up was performed inside * lan743x_tx_frame_add_fragment */ - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); goto unlock; } } @@ -1903,6 +1899,14 @@ static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx) length, GFP_ATOMIC | GFP_DMA); } +static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index) +{ + /* update the tail once per 8 descriptors */ + if ((index & 7) == 7) + lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number), + index); +} + static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, struct sk_buff *skb) { @@ -1933,6 +1937,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, descriptor->data0 = (RX_DESC_DATA0_OWN_ | (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)); skb_reserve(buffer_info->skb, RX_HEAD_PADDING); + lan743x_rx_update_tail(rx, index); return 0; } @@ -1951,6 +1956,7 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) descriptor->data0 = (RX_DESC_DATA0_OWN_ | ((buffer_info->buffer_length) & RX_DESC_DATA0_BUF_LENGTH_MASK_)); + lan743x_rx_update_tail(rx, index); } static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) @@ -2162,6 +2168,7 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) { struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); struct lan743x_adapter *adapter = rx->adapter; + int result = RX_PROCESS_RESULT_NOTHING_TO_DO; u32 rx_tail_flags = 0; int count; @@ -2170,27 +2177,19 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) lan743x_csr_write(adapter, DMAC_INT_STS, DMAC_INT_BIT_RXFRM_(rx->channel_number)); } - count = 0; - while (count < weight) { - int rx_process_result = lan743x_rx_process_packet(rx); - - if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) { - count++; - } else if (rx_process_result == - RX_PROCESS_RESULT_NOTHING_TO_DO) { + for (count = 0; count < weight; count++) { + result = lan743x_rx_process_packet(rx); + if (result == RX_PROCESS_RESULT_NOTHING_TO_DO) break; - } else if (rx_process_result == - RX_PROCESS_RESULT_PACKET_DROPPED) { - continue; - } } rx->frame_count += count; - if (count == weight) - goto done; + if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED) + return weight; if (!napi_complete_done(napi, count)) - goto done; + return count; + /* re-arm interrupts, must write to rx tail on some chip variants */ if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { @@ -2200,10 +2199,10 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) INT_BIT_DMA_RX_(rx->channel_number)); } - /* update RX_TAIL */ - lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), - rx_tail_flags | rx->last_tail); -done: + if (rx_tail_flags) + lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), + rx_tail_flags | rx->last_tail); + return count; } @@ -2348,7 +2347,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx) netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll, - rx->ring_size - 1); + NAPI_POLL_WEIGHT); lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_RX_SWR_(rx->channel_number)); @@ -2674,7 +2673,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, adapter->intr.irq = adapter->pdev->irq; lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); - mutex_init(&adapter->dp_lock); ret = lan743x_gpio_init(adapter); if (ret) diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 3b02eeae5f45..1fbcef391098 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -706,9 +706,6 @@ struct lan743x_adapter { struct lan743x_csr csr; struct lan743x_intr intr; - /* lock, used to prevent concurrent access to data port */ - struct mutex dp_lock; - struct lan743x_gpio gpio; struct lan743x_ptp ptp; diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index e1651756bf9d..f70bb81e1ed6 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); unregister_netdev(ndev); - free_irq(ndev->irq, ndev); + devm_free_irq(&pdev->dev, ndev->irq, ndev); moxart_mac_free_memory(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index da1fd0e08c36..6030c90d50cc 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1716,10 +1716,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; - if (!ocelot_netdevice_dev_check(dev)) - return 0; - if (event == NETDEV_PRECHANGEUPPER && + ocelot_netdevice_dev_check(dev) && netif_is_lag_master(info->upper_dev)) { struct netdev_lag_upper_info *lag_upper_info = info->upper_info; struct netlink_ext_ack *extack; diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index 51fa82b429a3..40970352d208 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -235,11 +235,13 @@ static int jazz_sonic_probe(struct platform_device *pdev) err = register_netdev(dev); if (err) - goto out1; + goto undo_probe1; return 0; -out1: +undo_probe1: + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 0937fc2a928e..23c9394cd5d2 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -540,10 +540,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev) err = register_netdev(dev); if (err) - goto out; + goto undo_probe; return 0; +undo_probe: + dma_free_coherent(lp->device, + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); out: free_netdev(dev); @@ -618,12 +622,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board) err = register_netdev(ndev); if (err) - goto out; + goto undo_probe; nubus_set_drvdata(board, ndev); return 0; +undo_probe: + dma_free_coherent(lp->device, + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); out: free_netdev(ndev); return err; diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index e1b886e87a76..44171d7bb434 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -265,11 +265,14 @@ int xtsonic_probe(struct platform_device *pdev) sonic_msg_init(dev); if ((err = register_netdev(dev))) - goto out1; + goto undo_probe1; return 0; -out1: +undo_probe1: + dma_free_coherent(lp->device, + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); release_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index e678ba379598..628fa9b2f741 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ if ((mask & VXGE_DEBUG_MASK) == mask) \ - printk(fmt "\n", __VA_ARGS__); \ + printk(fmt "\n", ##__VA_ARGS__); \ } while (0) #else #define vxge_debug_ll(level, mask, fmt, ...) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 59a57ff5e96a..9c86f4f9cd42 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) #define vxge_debug_ll_config(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__) #else #define vxge_debug_ll_config(level, fmt, ...) #endif #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) #define vxge_debug_init(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_init(level, fmt, ...) #endif #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) #define vxge_debug_tx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__) #else #define vxge_debug_tx(level, fmt, ...) #endif #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) #define vxge_debug_rx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__) #else #define vxge_debug_rx(level, fmt, ...) #endif #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) #define vxge_debug_mem(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__) #else #define vxge_debug_mem(level, fmt, ...) #endif #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) #define vxge_debug_entryexit(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_entryexit(level, fmt, ...) #endif #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) #define vxge_debug_intr(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__) #else #define vxge_debug_intr(level, fmt, ...) #endif diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 9183b3e85d21..bdbf0726145e 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, if (!nfp_nsp_has_hwinfo_lookup(nsp)) { nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); eth_hw_addr_random(nn->dp.netdev); + nfp_nsp_close(nsp); return; } @@ -332,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) goto err_free_alink; alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL); - if (!alink->prio_map) + if (!alink->prio_map) { + err = -ENOMEM; goto err_free_alink; + } /* This is a multi-host app, make sure MAC/PHY is up, but don't * make the MAC/PHY state follow the state of any of the ports. diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 0e2db6ea79e9..2ec62c8d86e1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) dev_consume_skb_any(skb); else dev_kfree_skb_any(skb); + return; } nfp_ccm_rx(&bpf->ccm, skb); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 88fab6a82acf..06927ba5a3ae 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -46,9 +46,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, /* Grab a single ref to the map for our record. The prog destroy ndo * happens after free_used_maps(). */ - map = bpf_map_inc(map, false); - if (IS_ERR(map)) - return PTR_ERR(map); + bpf_map_inc(map); record = kmalloc(sizeof(*record), GFP_KERNEL); if (!record) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 31d94592a7c0..2d99533ad3e0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -164,6 +164,7 @@ struct nfp_fl_internal_ports { * @qos_rate_limiters: Current active qos rate limiters * @qos_stats_lock: Lock on qos stats updates * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded + * @merge_table: Hash table to store merged flows */ struct nfp_flower_priv { struct nfp_app *app; @@ -196,6 +197,7 @@ struct nfp_flower_priv { unsigned int qos_rate_limiters; spinlock_t qos_stats_lock; /* Protect the qos stats */ int pre_tun_rule_cnt; + struct rhashtable merge_table; }; /** @@ -310,6 +312,12 @@ struct nfp_fl_payload_link { }; extern const struct rhashtable_params nfp_flower_table_params; +extern const struct rhashtable_params merge_table_params; + +struct nfp_merge_info { + u64 parent_ctx; + struct rhash_head ht_node; +}; struct nfp_fl_stats_frame { __be32 stats_con_id; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 5defd31d481c..327bb56b3ef5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_free_ctx_entry; } + /* Do net allocate a mask-id for pre_tun_rules. These flows are used to + * configure the pre_tun table and are never actually send to the + * firmware as an add-flow message. This causes the mask-id allocation + * on the firmware to get out of sync if allocated here. + */ new_mask_id = 0; - if (!nfp_check_mask_add(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_add(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, &nfp_flow->meta.flags, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id"); @@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, goto err_remove_mask; } - if (!nfp_check_mask_remove(app, nfp_flow->mask_data, + if (!nfp_flow->pre_tun_rule.dev && + !nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, NULL, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id"); @@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app, return 0; err_remove_mask: - nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, - NULL, &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, + NULL, &new_mask_id); err_remove_rhash: WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, &ctx_entry->ht_node, @@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app, __nfp_modify_flow_metadata(priv, nfp_flow); - nfp_check_mask_remove(app, nfp_flow->mask_data, - nfp_flow->meta.mask_len, &nfp_flow->meta.flags, - &new_mask_id); + if (!nfp_flow->pre_tun_rule.dev) + nfp_check_mask_remove(app, nfp_flow->mask_data, + nfp_flow->meta.mask_len, &nfp_flow->meta.flags, + &new_mask_id); /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; @@ -480,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = { .automatic_shrinking = true, }; +const struct rhashtable_params merge_table_params = { + .key_offset = offsetof(struct nfp_merge_info, parent_ctx), + .head_offset = offsetof(struct nfp_merge_info, ht_node), + .key_len = sizeof(u64), +}; + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_num_mems) { @@ -496,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_flow_table; + err = rhashtable_init(&priv->merge_table, &merge_table_params); + if (err) + goto err_free_stats_ctx_table; + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -503,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - goto err_free_stats_ctx_table; + goto err_free_merge_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -540,6 +560,8 @@ err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_merge_table: + rhashtable_destroy(&priv->merge_table); err_free_stats_ctx_table: rhashtable_destroy(&priv->stats_ctx_table); err_free_flow_table: @@ -558,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) nfp_check_rhashtable_empty, NULL); rhashtable_free_and_destroy(&priv->stats_ctx_table, nfp_check_rhashtable_empty, NULL); + rhashtable_free_and_destroy(&priv->merge_table, + nfp_check_rhashtable_empty, NULL); kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 987ae221f6be..f57e7f337012 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -923,6 +923,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *merge_flow; struct nfp_fl_key_ls merge_key_ls; + struct nfp_merge_info *merge_info; + u64 parent_ctx = 0; int err; ASSERT_RTNL(); @@ -933,6 +935,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, nfp_flower_is_merge_flow(sub_flow2)) return -EINVAL; + /* check if the two flows are already merged */ + parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; + parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); + if (rhashtable_lookup_fast(&priv->merge_table, + &parent_ctx, merge_table_params)) { + nfp_flower_cmsg_warn(app, "The two flows are already merged.\n"); + return 0; + } + err = nfp_flower_can_merge(sub_flow1, sub_flow2); if (err) return err; @@ -974,16 +985,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, if (err) goto err_release_metadata; + merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL); + if (!merge_info) { + err = -ENOMEM; + goto err_remove_rhash; + } + merge_info->parent_ctx = parent_ctx; + err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node, + merge_table_params); + if (err) + goto err_destroy_merge_info; + err = nfp_flower_xmit_flow(app, merge_flow, NFP_FLOWER_CMSG_TYPE_FLOW_MOD); if (err) - goto err_remove_rhash; + goto err_remove_merge_info; merge_flow->in_hw = true; sub_flow1->in_hw = false; return 0; +err_remove_merge_info: + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, + &merge_info->ht_node, + merge_table_params)); +err_destroy_merge_info: + kfree(merge_info); err_remove_rhash: WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, &merge_flow->fl_node, @@ -1211,7 +1239,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app, { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload_link *link, *temp; + struct nfp_merge_info *merge_info; struct nfp_fl_payload *origin; + u64 parent_ctx = 0; bool mod = false; int err; @@ -1248,8 +1278,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app, err_free_links: /* Clean any links connected with the merged flow. */ list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, - merge_flow.list) + merge_flow.list) { + u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); + + parent_ctx = (parent_ctx << 32) | (u64)(ctx_id); nfp_flower_unlink_flow(link); + } + + merge_info = rhashtable_lookup_fast(&priv->merge_table, + &parent_ctx, + merge_table_params); + if (merge_info) { + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, + &merge_info->ht_node, + merge_table_params)); + kfree(merge_info); + } kfree(merge_flow->action_data); kfree(merge_flow->mask_data); @@ -1368,7 +1412,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app, ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); priv->stats[ctx_id].pkts += pkts; priv->stats[ctx_id].bytes += bytes; - max_t(u64, priv->stats[ctx_id].used, used); + priv->stats[ctx_id].used = max_t(u64, used, + priv->stats[ctx_id].used); } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1b840ee47339..17b91ed39369 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -731,8 +731,8 @@ nfp_port_get_fecparam(struct net_device *netdev, struct nfp_eth_table_port *eth_port; struct nfp_port *port; - param->active_fec = ETHTOOL_FEC_NONE_BIT; - param->fec = ETHTOOL_FEC_NONE_BIT; + param->active_fec = ETHTOOL_FEC_NONE; + param->fec = ETHTOOL_FEC_NONE; port = nfp_port_from_netdev(netdev); eth_port = nfp_port_get_eth_port(port); diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 2761f3a3ae50..56f285985b43 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1318,19 +1318,21 @@ static int nixge_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); err = nixge_of_get_resources(pdev); if (err) - return err; + goto free_netdev; __nixge_hw_set_mac_address(ndev); priv->tx_irq = platform_get_irq_byname(pdev, "tx"); if (priv->tx_irq < 0) { netdev_err(ndev, "could not find 'tx' irq"); - return priv->tx_irq; + err = priv->tx_irq; + goto free_netdev; } priv->rx_irq = platform_get_irq_byname(pdev, "rx"); if (priv->rx_irq < 0) { netdev_err(ndev, "could not find 'rx' irq"); - return priv->rx_irq; + err = priv->rx_irq; + goto free_netdev; } priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 544012a67221..1d59ef367a85 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -815,7 +815,8 @@ static int lpc_mii_init(struct netdata_local *pldat) if (mdiobus_register(pldat->mii_bus)) goto err_out_unregister_bus; - if (lpc_mii_probe(pldat->ndev) != 0) + err = lpc_mii_probe(pldat->ndev); + if (err) goto err_out_unregister_bus; return 0; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index be6660128b55..040a15a828b4 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1078,16 +1078,20 @@ static int pasemi_mac_open(struct net_device *dev) mac->tx = pasemi_mac_setup_tx_resources(dev); - if (!mac->tx) + if (!mac->tx) { + ret = -ENOMEM; goto out_tx_ring; + } /* We might already have allocated rings in case mtu was changed * before interface was brought up. */ if (dev->mtu > 1500 && !mac->num_cs) { pasemi_mac_setup_csrings(mac); - if (!mac->num_cs) + if (!mac->num_cs) { + ret = -ENOMEM; goto out_tx_ring; + } } /* Zero out rmon counters */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 7d10265f782a..b8de3784d976 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -102,15 +102,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ionic_lif *lif = netdev_priv(netdev); + unsigned int offset; unsigned int size; regs->version = IONIC_DEV_CMD_REG_VERSION; + offset = 0; size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); + offset += size; size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); } static int ionic_get_link_ksettings(struct net_device *netdev, @@ -122,6 +125,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(ks, supported); + if (!idev->port_info) { + netdev_err(netdev, "port_info not initialized\n"); + return -EOPNOTSUPP; + } + /* The port_info data is found in a DMA space that the NIC keeps * up-to-date, so there's no need to request the data from the * NIC, we already have it in our memory space. diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 134640412d7b..e66002251596 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -666,7 +666,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq, eid = le64_to_cpu(comp->event.eid); /* Have we run out of new completions to process? */ - if (eid <= lif->last_eid) + if ((s64)(eid - lif->last_eid) <= 0) return false; lif->last_eid = eid; @@ -809,8 +809,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) if (f) return 0; - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, - ctx.comp.rx_filter_add.filter_id); + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); err = ionic_adminq_post_wait(lif, &ctx); @@ -839,6 +838,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) return -ENOENT; } + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", + addr, f->filter_id); + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); spin_unlock_bh(&lif->rx_filters.lock); @@ -847,9 +849,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) if (err) return err; - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, - ctx.cmd.rx_filter_del.filter_id); - return 0; } @@ -1187,6 +1186,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif) netdev->hw_features |= netdev->hw_enc_features; netdev->features |= netdev->hw_features; + netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; netdev->priv_flags |= IFF_UNICAST_FLT; @@ -1290,13 +1290,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, }; int err; + netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); err = ionic_adminq_post_wait(lif, &ctx); if (err) return err; - netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, - ctx.comp.rx_filter_add.filter_id); - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); } @@ -1321,8 +1319,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, return -ENOENT; } - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, - le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", + vid, f->filter_id); ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 7a093f148ee5..60cb77e2bab4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -36,10 +36,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif) spin_lock_init(&lif->rx_filters.lock); + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); } + spin_unlock_bh(&lif->rx_filters.lock); return 0; } @@ -51,11 +53,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) struct hlist_node *tmp; unsigned int i; + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { head = &lif->rx_filters.by_id[i]; hlist_for_each_entry_safe(f, tmp, head, by_id) ionic_rx_filter_free(lif, f); } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, @@ -91,6 +95,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); + netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index ab6663d94f42..c818d24a8b24 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -257,7 +257,7 @@ void ionic_rx_fill(struct ionic_queue *q) unsigned int len; unsigned int i; - len = netdev->mtu + ETH_HLEN; + len = netdev->mtu + ETH_HLEN + VLAN_HLEN; for (i = ionic_q_space_avail(q); i; i--) { skb = ionic_rx_skb_alloc(q, len, &dma_addr); diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 55a29ec76680..58eac2471d53 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -78,6 +78,7 @@ config QED depends on PCI select ZLIB_INFLATE select CRC8 + select CRC32 select NET_DEVLINK ---help--- This enables the support for ... diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index c692a41e4548..25b6f2ee2beb 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -564,11 +564,6 @@ static const struct net_device_ops netxen_netdev_ops = { .ndo_set_features = netxen_set_features, }; -static inline bool netxen_function_zero(struct pci_dev *pdev) -{ - return (PCI_FUNC(pdev->devfn) == 0) ? true : false; -} - static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, u32 mode) { @@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter) netxen_initialize_interrupt_registers(adapter); netxen_set_msix_bit(pdev, 0); - if (netxen_function_zero(pdev)) { + if (adapter->portnum == 0) { if (!netxen_setup_msi_interrupts(adapter, num_msix)) netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); else diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 8e1bdf58b9e7..8ea46b81b739 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -396,7 +396,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } - iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -2073,8 +2073,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { - DP_NOTICE(p_hwfn, - "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 859caa6c1a1f..8e7be214f959 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -8197,6 +8197,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* Re-populate nvm attribute info */ + qed_mcp_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_populate(p_hwfn); + /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index a1ebc2b1ca0b..a923c6553270 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) void qed_resc_free(struct qed_dev *cdev) { + struct qed_rdma_info *rdma_info; + struct qed_hwfn *p_hwfn; int i; if (IS_VF(cdev)) { @@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev) qed_llh_free(cdev); for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = cdev->hwfns + i; + rdma_info = p_hwfn->p_rdma_info; qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); @@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_ooo_free(p_hwfn); } - if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { + qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto); qed_rdma_info_free(p_hwfn); + } qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); @@ -3087,7 +3092,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } /* Log and clear previous pglue_b errors if such exist */ - qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, @@ -4137,7 +4142,8 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST) | - BIT(QED_MF_INTER_PF_SWITCH); + BIT(QED_MF_INTER_PF_SWITCH) | + BIT(QED_MF_DISABLE_ARFS); break; case NVM_CFG1_GLOB_MF_MODE_DEFAULT: cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | @@ -4150,6 +4156,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", cdev->mf_bits); + + /* In CMT the PF is unknown when the GFS block processes the + * packet. Therefore cannot use searcher as it has a per PF + * database, and thus ARFS must be disabled. + * + */ + if (QED_IS_CMT(cdev)) + cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS); } DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", @@ -4418,12 +4432,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } -static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) -{ - kfree(p_hwfn->nvm_info.image_att); - p_hwfn->nvm_info.image_att = NULL; -} - static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, @@ -4508,7 +4516,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err3: if (IS_LEAD_HWFN(p_hwfn)) - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); @@ -4569,7 +4577,7 @@ int qed_hw_prepare(struct qed_dev *cdev, if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } @@ -4603,7 +4611,7 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); } static void qed_chain_free_next_ptr(struct qed_dev *cdev, @@ -4648,26 +4656,20 @@ static void qed_chain_free_single(struct qed_dev *cdev, static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) { - void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl; u32 page_cnt = p_chain->page_cnt, i, pbl_size; - u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; - if (!pp_virt_addr_tbl) + if (!pp_addr_tbl) return; - if (!p_pbl_virt) - goto out; - for (i = 0; i < page_cnt; i++) { - if (!pp_virt_addr_tbl[i]) + if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map) break; dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, - pp_virt_addr_tbl[i], - *(dma_addr_t *)p_pbl_virt); - - p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + pp_addr_tbl[i].virt_addr, + pp_addr_tbl[i].dma_map); } pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; @@ -4677,9 +4679,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) pbl_size, p_chain->pbl_sp.p_virt_table, p_chain->pbl_sp.p_phys_table); -out: - vfree(p_chain->pbl.pp_virt_addr_tbl); - p_chain->pbl.pp_virt_addr_tbl = NULL; + + vfree(p_chain->pbl.pp_addr_tbl); + p_chain->pbl.pp_addr_tbl = NULL; } void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) @@ -4780,19 +4782,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, { u32 page_cnt = p_chain->page_cnt, size, i; dma_addr_t p_phys = 0, p_pbl_phys = 0; - void **pp_virt_addr_tbl = NULL; + struct addr_tbl_entry *pp_addr_tbl; u8 *p_pbl_virt = NULL; void *p_virt = NULL; - size = page_cnt * sizeof(*pp_virt_addr_tbl); - pp_virt_addr_tbl = vzalloc(size); - if (!pp_virt_addr_tbl) + size = page_cnt * sizeof(*pp_addr_tbl); + pp_addr_tbl = vzalloc(size); + if (!pp_addr_tbl) return -ENOMEM; /* The allocation of the PBL table is done with its full size, since it * is expected to be successive. * qed_chain_init_pbl_mem() is called even in a case of an allocation - * failure, since pp_virt_addr_tbl was previously allocated, and it + * failure, since tbl was previously allocated, and it * should be saved to allow its freeing during the error flow. */ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; @@ -4806,8 +4808,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, p_chain->b_external_pbl = true; } - qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, - pp_virt_addr_tbl); + qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl); if (!p_pbl_virt) return -ENOMEM; @@ -4826,7 +4827,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, /* Fill the PBL table with the physical address of the page */ *(dma_addr_t *)p_pbl_virt = p_phys; /* Keep the virtual address of the page */ - p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; + p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt; + p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys; p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 9f5113639eaf..666e43748a5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -256,9 +256,10 @@ out: #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init) { + char msg[256]; u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); @@ -272,22 +273,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_NOTICE(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + snprintf(msg, sizeof(msg), + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), + tmp, + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -320,8 +322,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); - if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) { + snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); + } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { @@ -360,7 +368,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) { - return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) @@ -1172,7 +1180,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, "MFW indication via attention\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index d473b522afc5..ba5cfebf2d0d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -431,7 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt); +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 65ec16a31658..9adbaccd0c5e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -2742,14 +2742,18 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps, sizeof(*iwarp_info->partial_fpdus), GFP_KERNEL); - if (!iwarp_info->partial_fpdus) + if (!iwarp_info->partial_fpdus) { + rc = -ENOMEM; goto err; + } iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); - if (!iwarp_info->mpa_intermediate_buf) + if (!iwarp_info->mpa_intermediate_buf) { + rc = -ENOMEM; goto err; + } /* The mpa_bufs array serves for pending RX packets received on the * mpa ll2 that don't have place on the tx ring and require later @@ -2759,8 +2763,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc, sizeof(*iwarp_info->mpa_bufs), GFP_KERNEL); - if (!iwarp_info->mpa_bufs) + if (!iwarp_info->mpa_bufs) { + rc = -ENOMEM; goto err; + } INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list); INIT_LIST_HEAD(&iwarp_info->mpa_buf_list); @@ -2832,8 +2838,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn) if (rc) return rc; - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); - return qed_iwarp_ll2_stop(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 1a5fc2ae351c..8a73482cb7a8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2001,6 +2001,9 @@ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params) { + if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits)) + return; + if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_cfg_params->tcp, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 38f7f40b3a4d..bc1f5b36b5bf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -280,6 +280,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, &cdev->mf_bits); + if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) + dev_info->b_arfs_capable = true; dev_info->tx_switching = true; if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) @@ -1087,9 +1089,6 @@ static void qed_update_pf_params(struct qed_dev *cdev, #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 #define QED_PERIODIC_DB_REC_INTERVAL \ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) -#define QED_PERIODIC_DB_REC_WAIT_COUNT 10 -#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ - (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, enum qed_slowpath_wq_flag wq_flag, @@ -1123,7 +1122,7 @@ void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) static void qed_slowpath_wq_stop(struct qed_dev *cdev) { - int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; + int i; if (IS_VF(cdev)) return; @@ -1135,13 +1134,7 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev) /* Stop queuing new delayed works */ cdev->hwfns[i].slowpath_wq_active = false; - /* Wait until the last periodic doorbell recovery is executed */ - while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, - &cdev->hwfns[i].slowpath_task_flags) && - sleep_count--) - msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); - - flush_workqueue(cdev->hwfns[i].slowpath_wq); + cancel_delayed_work(&cdev->hwfns[i].slowpath_task); destroy_workqueue(cdev->hwfns[i].slowpath_wq); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 36ddb89856a8..9401b49275f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3149,6 +3149,13 @@ err0: return rc; } +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; + p_hwfn->nvm_info.valid = false; +} + int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 9c4c2763de8d..e38297383b00 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1192,6 +1192,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); +/** + * @brief Delete nvm info shadow in the given hardware function + * + * @param p_hwfn + */ +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); + /** * @brief Get the engine affinity configuration. * diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index e49fada85410..83817bb50e9f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn) break; } } - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); } static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index dcb5c917f373..fb9c3ca5d36c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_ETH_ROCE: + case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 856051f50eb7..adc2c8f3d48e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); } +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 +#define QED_VF_CHANNEL_USLEEP_DELAY 100 +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 +#define QED_VF_CHANNEL_MSLEEP_DELAY 25 + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; struct ustorm_vf_zone *zone_data; - int rc = 0, time = 100; + int iter, rc = 0; zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; @@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); /* When PF would be done with the response, it would write back to the - * `done' address. Poll until then. + * `done' address from a coherent DMA zone. Poll until then. */ - while ((!*done) && time) { - msleep(25); - time--; + + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; + while (!*done && iter--) { + udelay(QED_VF_CHANNEL_USLEEP_DELAY); + dma_rmb(); + } + + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; + while (!*done && iter--) { + msleep(QED_VF_CHANNEL_MSLEEP_DELAY); + dma_rmb(); } if (!*done) { diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 1f27f9866b80..61b5aa3e5b98 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -574,12 +574,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MIN 128 +#define NUM_RX_BDS_KDUMP_MIN 63 #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) #define TX_RING_SIZE_POW 13 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) #define NUM_TX_BDS_MIN 128 +#define NUM_TX_BDS_KDUMP_MIN 63 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX #define QEDE_MIN_PKT_LEN 64 diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index c8bdbf057d5a..5041994bf03f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -336,6 +336,9 @@ int qede_alloc_arfs(struct qede_dev *edev) { int i; + if (!edev->dev_info.common.b_arfs_capable) + return -EINVAL; + edev->arfs = vzalloc(sizeof(*edev->arfs)); if (!edev->arfs) return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 004c0bfec41d..f310a94e0489 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1737,6 +1737,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb, ntohs(udp_hdr(skb)->dest) != gnv_port)) return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } else if (l4_proto == IPPROTO_IPIP) { + /* IPIP tunnels are unknown to the device or at least unsupported natively, + * offloads for them can't be done trivially, so disable them for such skb. + */ + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index ba53612ae0df..25c1795889d4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include <linux/crash_dump.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/version.h> @@ -707,8 +708,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->dp_module = dp_module; edev->dp_level = dp_level; edev->ops = qed_ops; - edev->q_num_rx_buffers = NUM_RX_BDS_DEF; - edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + + if (is_kdump_kernel()) { + edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; + edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; + } else { + edev->q_num_rx_buffers = NUM_RX_BDS_DEF; + edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + } DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", info->num_queues, info->num_queues); @@ -763,7 +770,7 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) + if (edev->dev_info.common.b_arfs_capable) hw_features |= NETIF_F_NTUPLE; if (edev->dev_info.common.vxlan_enable || @@ -1151,7 +1158,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, /* PTP not supported on VFs */ if (!is_vf) - qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL)); + qede_ptp_enable(edev); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -1240,6 +1247,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) if (system_state == SYSTEM_POWER_OFF) return; qed_ops->common->remove(cdev); + edev->cdev = NULL; /* Since this can happen out-of-sync with other flows, * don't release the netdevice until after slowpath stop @@ -2115,12 +2123,8 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) goto out; - fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); - if (IS_ERR(fp->rxq->xdp_prog)) { - rc = PTR_ERR(fp->rxq->xdp_prog); - fp->rxq->xdp_prog = NULL; - goto out; - } + bpf_prog_add(edev->xdp_prog, 1); + fp->rxq->xdp_prog = edev->xdp_prog; } if (fp->type & QEDE_FASTPATH_TX) { @@ -2203,7 +2207,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { + if (edev->dev_info.common.b_arfs_capable) { qede_poll_for_freeing_arfs_filters(edev); qede_free_arfs(edev); } @@ -2270,10 +2274,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, if (rc) goto err2; - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { - rc = qede_alloc_arfs(edev); - if (rc) - DP_NOTICE(edev, "aRFS memory allocation failed\n"); + if (qede_alloc_arfs(edev)) { + edev->ndev->features &= ~NETIF_F_NTUPLE; + edev->dev_info.common.b_arfs_capable = false; } qede_napi_add_enable(edev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index f815435cf106..2d3b2fa92df5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -411,6 +411,7 @@ void qede_ptp_disable(struct qede_dev *edev) if (ptp->tx_skb) { dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; + clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); } /* Disable PTP in HW */ @@ -422,7 +423,7 @@ void qede_ptp_disable(struct qede_dev *edev) edev->ptp = NULL; } -static int qede_ptp_init(struct qede_dev *edev, bool init_tc) +static int qede_ptp_init(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -443,25 +444,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc) /* Init work queue for Tx timestamping */ INIT_WORK(&ptp->work, qede_ptp_task); - /* Init cyclecounter and timecounter. This is done only in the first - * load. If done in every load, PTP application will fail when doing - * unload / load (e.g. MTU change) while it is running. - */ - if (init_tc) { - memset(&ptp->cc, 0, sizeof(ptp->cc)); - ptp->cc.read = qede_ptp_read_cc; - ptp->cc.mask = CYCLECOUNTER_MASK(64); - ptp->cc.shift = 0; - ptp->cc.mult = 1; + /* Init cyclecounter and timecounter */ + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; - timecounter_init(&ptp->tc, &ptp->cc, - ktime_to_ns(ktime_get_real())); - } + timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); - return rc; + return 0; } -int qede_ptp_enable(struct qede_dev *edev, bool init_tc) +int qede_ptp_enable(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -482,7 +477,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) edev->ptp = ptp; - rc = qede_ptp_init(edev, init_tc); + rc = qede_ptp_init(edev); if (rc) goto err1; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 691a14c4b2c5..89c7f3cf3ee2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); void qede_ptp_disable(struct qede_dev *edev); -int qede_ptp_enable(struct qede_dev *edev, bool init_tc); +int qede_ptp_enable(struct qede_dev *edev); int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 2d873ae8a234..668ccc9d49f8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev) qede_rdma_cleanup_event(edev); destroy_workqueue(edev->rdma_info.rdma_wq); + edev->rdma_info.rdma_wq = NULL; } int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) @@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev, if (edev->rdma_info.exp_recovery) return; - if (!edev->rdma_info.qedr_dev) + if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) return; /* We don't want the cleanup flow to start while we're allocating and diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 2a533280b124..29b9c728a65e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) ahw->diag_cnt = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); if (ret) - goto fail_diag_irq; + goto fail_mbx_args; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; @@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); + +fail_mbx_args: qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_irq: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 07f9067affc6..10286215092f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d ahw->reset.seq_error = 0; ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); - if (p_dev->ahw->reset.buff == NULL) + if (ahw->reset.buff == NULL) return -ENOMEM; p_buff = p_dev->ahw->reset.buff; @@ -2251,7 +2251,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) /* Boot either flash image or firmware image from host file system */ if (qlcnic_load_fw_file == 1) { - if (qlcnic_83xx_load_fw_image_from_host(adapter)) + err = qlcnic_83xx_load_fw_image_from_host(adapter); + if (err) return err; } else { QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index c07438db30ba..f2e5f494462b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2509,6 +2509,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) qlcnic_sriov_vf_register_map(ahw); break; default: + err = -EINVAL; goto err_out_free_hw_res; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index f34ae8c75bc5..61a39d167c8b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1426,6 +1426,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); @@ -1444,6 +1445,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) struct qlcnic_83xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; + if (!hdr) + return; hdr->drv_cap_mask = 0x1f; fw_dump->cap_mask = 0x1f; dev_info(&pdev->dev, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index bebe38d74d66..ab1b4da5d5be 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1439,6 +1439,7 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, { struct emac_tpd tpd; u32 prod_idx; + int len; memset(&tpd, 0, sizeof(tpd)); @@ -1458,9 +1459,10 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, if (skb_network_offset(skb) != ETH_HLEN) TPD_TYP_SET(&tpd, 1); + len = skb->len; emac_tx_fill_tpd(adpt, tx_q, skb, &tpd); - netdev_sent_queue(adpt->netdev, skb->len); + netdev_sent_queue(adpt->netdev, len); /* Make sure the are enough free descriptors to hold one * maximum-sized SKB. We need one desc for each fragment, diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index c84ab052ef26..3eee8df359a1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -485,13 +485,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev, ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); if (ret) - return ret; + goto disable_clk_axi; ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); if (ret) - return ret; + goto disable_clk_cfg_ahb; - return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + if (ret) + goto disable_clk_cfg_ahb; + + return 0; + +disable_clk_cfg_ahb: + clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]); +disable_clk_axi: + clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]); + + return ret; } /* Enable clocks; needs emac_clks_phase1_init to be called before */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index fbf4cbcf1a65..18d88b424828 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev) return 0; } -static int rmnet_register_real_device(struct net_device *real_dev) +static int rmnet_register_real_device(struct net_device *real_dev, + struct netlink_ext_ack *extack) { struct rmnet_port *port; int rc, entry; ASSERT_RTNL(); - if (rmnet_is_real_dev_registered(real_dev)) + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + if (port->rmnet_mode != RMNET_EPMODE_VND) { + NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + return -EINVAL; + } + return 0; + } port = kzalloc(sizeof(*port), GFP_ATOMIC); if (!port) @@ -134,7 +142,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - err = rmnet_register_real_device(real_dev); + err = rmnet_register_real_device(real_dev, extack); if (err) goto err0; @@ -279,7 +287,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], { struct rmnet_priv *priv = netdev_priv(dev); struct net_device *real_dev; - struct rmnet_endpoint *ep; struct rmnet_port *port; u16 mux_id; @@ -294,19 +301,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_RMNET_MUX_ID]) { mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - if (rmnet_get_endpoint(port, mux_id)) { - NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); - return -EINVAL; + + if (mux_id != priv->mux_id) { + struct rmnet_endpoint *ep; + + ep = rmnet_get_endpoint(port, priv->mux_id); + if (!ep) + return -ENODEV; + + if (rmnet_get_endpoint(port, mux_id)) { + NL_SET_ERR_MSG_MOD(extack, + "MUX ID already exists"); + return -EINVAL; + } + + hlist_del_init_rcu(&ep->hlnode); + hlist_add_head_rcu(&ep->hlnode, + &port->muxed_ep[mux_id]); + + ep->mux_id = mux_id; + priv->mux_id = mux_id; } - ep = rmnet_get_endpoint(port, priv->mux_id); - if (!ep) - return -ENODEV; - - hlist_del_init_rcu(&ep->hlnode); - hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); - - ep->mux_id = mux_id; - priv->mux_id = mux_id; } if (data[IFLA_RMNET_FLAGS]) { @@ -409,13 +424,10 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (port->nr_rmnet_devs > 1) return -EINVAL; - if (port->rmnet_mode != RMNET_EPMODE_VND) - return -EINVAL; - if (rmnet_is_real_dev_registered(slave_dev)) return -EBUSY; - err = rmnet_register_real_device(slave_dev); + err = rmnet_register_real_device(slave_dev, extack); if (err) return -EBUSY; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 29a7bfa2584d..3d7d3ab383f8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -188,6 +188,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) dev = skb->dev; port = rmnet_get_port_rcu(dev); + if (unlikely(!port)) { + atomic_long_inc(&skb->dev->rx_nohandler); + kfree_skb(skb); + goto done; + } switch (port->rmnet_mode) { case RMNET_EPMODE_VND: diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 3bc6d1ef29ec..8ff178fc2670 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -742,12 +742,6 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp) RTL_W8(tp, Cfg9346, Cfg9346_Unlock); } -static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force) -{ - pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_READRQ, force); -} - static bool rtl_is_8125(struct rtl8169_private *tp) { return tp->mac_version >= RTL_GIGA_MAC_VER_60; @@ -2202,6 +2196,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp) { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + /* RTL8401, reportedly works if treated as RTL8101e */ + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 }, { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, @@ -3956,7 +3952,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) } switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39: case RTL_GIGA_MAC_VER_43: @@ -3985,7 +3983,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) static void rtl_pll_power_up(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39: case RTL_GIGA_MAC_VER_43: @@ -4051,14 +4051,12 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); } static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) @@ -4073,104 +4071,80 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) { - RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, MaxTxPacketSize, 0x24); RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); } static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) { - RTL_W8(tp, MaxTxPacketSize, 0x0c); + RTL_W8(tp, MaxTxPacketSize, 0x3f); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); -} - -static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) -{ - rtl_tx_performance_tweak(tp, - PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); -} - -static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) -{ - rtl_tx_performance_tweak(tp, - PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN); } static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) { - r8168b_0_hw_jumbo_enable(tp); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); } static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) { - r8168b_0_hw_jumbo_disable(tp); - RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); } -static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) +static void rtl_jumbo_config(struct rtl8169_private *tp) { + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + rtl_unlock_config_regs(tp); switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - r8168b_0_hw_jumbo_enable(tp); - break; case RTL_GIGA_MAC_VER_12: case RTL_GIGA_MAC_VER_17: - r8168b_1_hw_jumbo_enable(tp); + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } break; case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: - r8168c_hw_jumbo_enable(tp); + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } break; case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: - r8168dp_hw_jumbo_enable(tp); + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); break; case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: - r8168e_hw_jumbo_enable(tp); + if (jumbo) { + pcie_set_readrq(tp->pci_dev, 512); + r8168e_hw_jumbo_enable(tp); + } else { + r8168e_hw_jumbo_disable(tp); + } break; default: break; } rtl_lock_config_regs(tp); -} -static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) -{ - rtl_unlock_config_regs(tp); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - r8168b_0_hw_jumbo_disable(tp); - break; - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - r8168b_1_hw_jumbo_disable(tp); - break; - case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: - r8168c_hw_jumbo_disable(tp); - break; - case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: - r8168dp_hw_jumbo_disable(tp); - break; - case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: - r8168e_hw_jumbo_disable(tp); - break; - default: - break; - } - rtl_lock_config_regs(tp); -} + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); -static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu) -{ - if (mtu > ETH_DATA_LEN) - rtl_hw_jumbo_enable(tp); - else - rtl_hw_jumbo_disable(tp); + /* Chip doesn't support pause in jumbo mode */ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising, !jumbo); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising, !jumbo); + phy_start_aneg(tp->phydev); } DECLARE_RTL_COND(rtl_chipcmd_cond) @@ -4569,18 +4543,12 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); rtl_disable_clock_request(tp); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { rtl_set_def_aspm_entry_latency(tp); - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_disable_clock_request(tp); } @@ -4595,8 +4563,6 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_ephy_init(tp, e_info_8168d_4); rtl_enable_clock_request(tp); @@ -4671,8 +4637,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) { rtl_set_def_aspm_entry_latency(tp); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); @@ -4699,7 +4663,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) { 0x08, 0x0001, 0x0002 }, { 0x09, 0x0000, 0x0080 }, { 0x19, 0x0000, 0x0224 }, - { 0x00, 0x0000, 0x0004 }, + { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x3df0, 0x0200 }, }; @@ -4716,7 +4680,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) { 0x06, 0x00c0, 0x0020 }, { 0x0f, 0xffff, 0x5200 }, { 0x19, 0x0000, 0x0224 }, - { 0x00, 0x0000, 0x0004 }, + { 0x00, 0x0000, 0x0008 }, { 0x0c, 0x3df0, 0x0200 }, }; @@ -4735,8 +4699,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_reset_packet_filter(tp); rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); @@ -4973,8 +4935,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_reset_packet_filter(tp); rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4)); @@ -5032,8 +4992,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_reset_packet_filter(tp); rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80); @@ -5136,8 +5094,6 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) RTL_W8(tp, DBG_REG, FIX_NAK_1); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W8(tp, Config1, LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5153,8 +5109,6 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { rtl_set_def_aspm_entry_latency(tp); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); } @@ -5215,8 +5169,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8402); - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); rtl_reset_packet_filter(tp); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); @@ -5432,10 +5384,18 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp) static void rtl_hw_start_8168(struct rtl8169_private *tp) { - if (tp->mac_version == RTL_GIGA_MAC_VER_13 || - tp->mac_version == RTL_GIGA_MAC_VER_16) + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN); + break; + default: + break; + } if (rtl_is_8168evl_up(tp)) RTL_W8(tp, MaxTxPacketSize, EarlySize); @@ -5492,7 +5452,7 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); - rtl_jumbo_config(tp, tp->dev->mtu); + rtl_jumbo_config(tp); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R16(tp, CPlusCmd); @@ -5507,10 +5467,13 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) { struct rtl8169_private *tp = netdev_priv(dev); - rtl_jumbo_config(tp, new_mtu); - dev->mtu = new_mtu; netdev_update_features(dev); + rtl_jumbo_config(tp); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); return 0; } @@ -5840,7 +5803,8 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, opts[1] |= transport_offset << TCPHO_SHIFT; } else { if (unlikely(rtl_test_hw_pad_bug(tp, skb))) - return !eth_skb_pad(skb); + /* eth_skb_pad would free the skb on error */ + return !__skb_put_padto(skb, ETH_ZLEN, false); } return true; @@ -6254,12 +6218,11 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) if (unlikely(status & RxFIFOOver && tp->mac_version == RTL_GIGA_MAC_VER_11)) { netif_stop_queue(tp->dev); - /* XXX - Hack alert. See rtl_task(). */ - set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } rtl_irq_disable(tp); - napi_schedule_irqoff(&tp->napi); + napi_schedule(&tp->napi); out: rtl_ack_events(tp, status); @@ -6358,8 +6321,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp) if (!tp->supports_gmii) phy_set_max_speed(phydev, SPEED_100); - phy_support_asym_pause(phydev); - phy_attached_info(phydev); return 0; @@ -6411,9 +6372,9 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); - phy_disconnect(tp->phydev); + free_irq(pci_irq_vector(pdev, 0), tp); - pci_free_irq(pdev, 0, tp); + phy_disconnect(tp->phydev); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); @@ -6464,8 +6425,8 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); - retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp, - dev->name); + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + IRQF_SHARED, dev->name, tp); if (retval < 0) goto err_release_fw_2; @@ -6498,7 +6459,7 @@ out: return retval; err_free_irq: - pci_free_irq(pdev, 0, tp); + free_irq(pci_irq_vector(pdev, 0), tp); err_release_fw_2: rtl_release_firmware(tp); rtl8169_rx_clear(tp); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 3f165c137236..9208a72fe17e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget) int q = napi - priv->napi; int mask = BIT(q); int quota = budget; - u32 ris0, tis; - for (;;) { - tis = ravb_read(ndev, TIS); - ris0 = ravb_read(ndev, RIS0); - if (!((ris0 & mask) || (tis & mask))) - break; + /* Processing RX Descriptor Ring */ + /* Clear RX interrupt */ + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); + if (ravb_rx(ndev, "a, q)) + goto out; - /* Processing RX Descriptor Ring */ - if (ris0 & mask) { - /* Clear RX interrupt */ - ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - if (ravb_rx(ndev, "a, q)) - goto out; - } - /* Processing TX Descriptor Ring */ - if (tis & mask) { - spin_lock_irqsave(&priv->lock, flags); - /* Clear TX interrupt */ - ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); - ravb_tx_free(ndev, q, true); - netif_wake_subqueue(ndev, q); - spin_unlock_irqrestore(&priv->lock, flags); - } - } + /* Processing RX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); + /* Clear TX interrupt */ + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); + ravb_tx_free(ndev, q, true); + netif_wake_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); napi_complete(napi); @@ -1444,6 +1433,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct ravb_private *priv = container_of(work, struct ravb_private, work); struct net_device *ndev = priv->ndev; + int error; netif_tx_stop_all_queues(ndev); @@ -1452,15 +1442,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ - ravb_stop_dma(ndev); + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); /* Device init */ - ravb_dmac_init(ndev); + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } ravb_emac_init(ndev); +out: /* Initialise PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); @@ -1719,12 +1730,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) config.flags = 0; config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT) + switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { + case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT: config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL) + break; + case RAVB_RXTSTAMP_TYPE_ALL: config.rx_filter = HWTSTAMP_FILTER_ALL; - else + break; + default: config.rx_filter = HWTSTAMP_FILTER_NONE; + } return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 8aa1b1bda96d..a042f4607b0d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -610,6 +610,8 @@ static struct sh_eth_cpu_data r7s72100_data = { EESR_TDE, .fdr_value = 0x0000070f, + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .no_psr = 1, .apr = 1, .mpr = 1, @@ -828,6 +830,8 @@ static struct sh_eth_cpu_data r7s9210_data = { .fdr_value = 0x0000070f, + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, + .apr = 1, .mpr = 1, .tpauser = 1, @@ -1131,6 +1135,9 @@ static struct sh_eth_cpu_data sh771x_data = { EESIPR_CEEFIP | EESIPR_CELFIP | EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, + + .trscer_err_mask = DESC_I_RINT8, + .tsu = 1, .dual_port = 1, }; @@ -2640,10 +2647,10 @@ static int sh_eth_close(struct net_device *ndev) /* Free all the skbuffs in the Rx queue and the DMA buffer. */ sh_eth_ring_free(ndev); - pm_runtime_put_sync(&mdp->pdev->dev); - mdp->is_opened = 0; + pm_runtime_put(&mdp->pdev->dev); + return 0; } diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 786b158bd305..5abb3f9684ff 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker) err_dma_event_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->event_ring); err_dma_event_ring_create: + rocker_dma_cmd_ring_waits_free(rocker); +err_dma_cmd_ring_waits_alloc: rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_waits_alloc: - rocker_dma_cmd_ring_waits_free(rocker); err_dma_cmd_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); return err; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 3a6761131f4c..2248d2674612 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, "power", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Optional reset GPIO configured? Minimum 100 ns reset needed @@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, "reset", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Need to wait for optional EEPROM to load, max 750 us according diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 38068fc34141..c7bdada4d1b9 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2502,20 +2502,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev) retval = smsc911x_init(dev); if (retval < 0) - goto out_disable_resources; + goto out_init_fail; netif_carrier_off(dev); retval = smsc911x_mii_init(pdev, dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); - goto out_disable_resources; + goto out_init_fail; } retval = register_netdev(dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_disable_resources; + goto out_init_fail; } else { SMSC_TRACE(pdata, probe, "Network interface: \"%s\"", dev->name); @@ -2556,9 +2556,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) return 0; -out_disable_resources: +out_init_fail: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); +out_disable_resources: (void)smsc911x_disable_resources(pdev); out_enable_resources_fail: smsc911x_free_resources(pdev); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 8bd2912bf713..33d7c2940ba9 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1693,14 +1693,17 @@ static int netsec_netdev_init(struct net_device *ndev) goto err1; /* set phy power down */ - data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) | - BMCR_PDOWN; - netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, + data | BMCR_PDOWN); ret = netsec_reset_hardware(priv, true); if (ret) goto err2; + /* Restore phy power state */ + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 38d39c4b5ac8..603d54f83399 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev) ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, priv->pinmode_mask, priv->pinmode_val); if (ret) - return ret; + goto out_reset_assert; ave_global_reset(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 0d21082ceb93..826626e870d5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -318,6 +318,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) /* Enable PTP clock */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); + break; + case PHY_INTERFACE_MODE_SGMII: + val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); + break; + default: + /* We don't get here; the switch above will have errored out */ + unreachable(); + } regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { @@ -337,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; + plat_dat->multicast_filter_bins = 0; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 33ce139f090f..2788d4c5b192 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -29,7 +29,6 @@ #define PRG_ETH0_EXT_RMII_MODE 4 /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ -#define PRG_ETH0_CLK_M250_SEL_SHIFT 4 #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) #define PRG_ETH0_TXDLY_SHIFT 5 @@ -119,6 +118,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) { .div = 5, .val = 5, }, { .div = 6, .val = 6, }, { .div = 7, .val = 7, }, + { /* end of array */ } }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); @@ -142,8 +142,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) } clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0; - clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; - clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; + clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK); + clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >> + clk_configs->m250_mux.shift; clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names, MUX_CLK_NUM_PARENTS, &clk_mux_ops, &clk_configs->m250_mux.hw); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e0a5fe83d8e0..bfc4a92f1d92 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -75,6 +75,11 @@ struct ethqos_emac_por { unsigned int value; }; +struct ethqos_emac_driver_data { + const struct ethqos_emac_por *por; + unsigned int num_por; +}; + struct qcom_ethqos { struct platform_device *pdev; void __iomem *rgmii_base; @@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = { { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 }, }; +static const struct ethqos_emac_driver_data emac_v2_3_0_data = { + .por = emac_v2_3_0_por, + .num_por = ARRAY_SIZE(emac_v2_3_0_por), +}; + static int ethqos_dll_configure(struct qcom_ethqos *ethqos) { unsigned int val; @@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; + const struct ethqos_emac_driver_data *data; struct qcom_ethqos *ethqos; struct resource *res; int ret; @@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) goto err_mem; } - ethqos->por = of_device_get_match_data(&pdev->dev); + data = of_device_get_match_data(&pdev->dev); + ethqos->por = data->por; + ethqos->num_por = data->num_por; ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii"); if (IS_ERR(ethqos->rgmii_clk)) { @@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev) } static const struct of_device_id qcom_ethqos_match[] = { - { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por}, + { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data}, { } }; MODULE_DEVICE_TABLE(of, qcom_ethqos_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index e0212d2fc2a1..70d41783329d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -241,6 +241,8 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val) switch (phymode) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; break; case PHY_INTERFACE_MODE_MII: @@ -289,16 +291,19 @@ static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac) phymode == PHY_INTERFACE_MODE_MII || phymode == PHY_INTERFACE_MODE_GMII || phymode == PHY_INTERFACE_MODE_SGMII) { - ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, &module); module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2)); regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, module); - } else { - ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); } + if (dwmac->f2h_ptp_ref_clk) + ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); + else + ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << + (reg_shift / 2)); + regmap_write(sys_mgr_base_addr, reg_offset, ctrl); /* Deassert reset for the phy configuration to be sampled by diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index e9e0867ec139..2f6258ca9515 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -64,6 +64,7 @@ struct emac_variant { * @variant: reference to the current board variant * @regmap: regmap for using the syscon * @internal_phy_powered: Does the internal PHY is enabled + * @use_internal_phy: Is the internal PHY selected for use * @mux_handle: Internal pointer used by mdio-mux lib */ struct sunxi_priv_data { @@ -74,6 +75,7 @@ struct sunxi_priv_data { const struct emac_variant *variant; struct regmap_field *regmap_field; bool internal_phy_powered; + bool use_internal_phy; void *mux_handle; }; @@ -523,8 +525,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { .dma_interrupt = sun8i_dwmac_dma_interrupt, }; +static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv); + static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) { + struct net_device *ndev = platform_get_drvdata(pdev); struct sunxi_priv_data *gmac = priv; int ret; @@ -538,13 +543,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) ret = clk_prepare_enable(gmac->tx_clk); if (ret) { - if (gmac->regulator) - regulator_disable(gmac->regulator); dev_err(&pdev->dev, "Could not enable AHB clock\n"); - return ret; + goto err_disable_regulator; + } + + if (gmac->use_internal_phy) { + ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev)); + if (ret) + goto err_disable_clk; } return 0; + +err_disable_clk: + clk_disable_unprepare(gmac->tx_clk); +err_disable_regulator: + if (gmac->regulator) + regulator_disable(gmac->regulator); + + return ret; } static void sun8i_dwmac_core_init(struct mac_device_info *hw, @@ -815,7 +832,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, struct sunxi_priv_data *gmac = priv->plat->bsp_priv; u32 reg, val; int ret = 0; - bool need_power_ephy = false; if (current_child ^ desired_child) { regmap_field_read(gmac->regmap_field, ®); @@ -823,13 +839,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: dev_info(priv->device, "Switch mux to internal PHY"); val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT; - - need_power_ephy = true; + gmac->use_internal_phy = true; break; case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID: dev_info(priv->device, "Switch mux to external PHY"); val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN; - need_power_ephy = false; + gmac->use_internal_phy = false; break; default: dev_err(priv->device, "Invalid child ID %x\n", @@ -837,7 +852,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, return -EINVAL; } regmap_field_write(gmac->regmap_field, val); - if (need_power_ephy) { + if (gmac->use_internal_phy) { ret = sun8i_dwmac_power_internal_phy(priv); if (ret) return ret; @@ -988,17 +1003,12 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) struct sunxi_priv_data *gmac = priv; if (gmac->variant->soc_has_internal_phy) { - /* sun8i_dwmac_exit could be called with mdiomux uninit */ - if (gmac->mux_handle) - mdio_mux_uninit(gmac->mux_handle); if (gmac->internal_phy_powered) sun8i_dwmac_unpower_internal_phy(gmac); } sun8i_dwmac_unset_syscon(gmac); - reset_control_put(gmac->rst_ephy); - clk_disable_unprepare(gmac->tx_clk); if (gmac->regulator) @@ -1196,6 +1206,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) plat_dat->init = sun8i_dwmac_init; plat_dat->exit = sun8i_dwmac_exit; plat_dat->setup = sun8i_dwmac_setup; + plat_dat->tx_fifo_size = 4096; + plat_dat->rx_fifo_size = 16384; ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); if (ret) @@ -1227,12 +1239,32 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) return ret; dwmac_mux: + reset_control_put(gmac->rst_ephy); + clk_put(gmac->ephy_clk); sun8i_dwmac_unset_syscon(gmac); dwmac_exit: stmmac_pltfr_remove(pdev); return ret; } +static int sun8i_dwmac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + + if (gmac->variant->soc_has_internal_phy) { + mdio_mux_uninit(gmac->mux_handle); + sun8i_dwmac_unpower_internal_phy(gmac); + reset_control_put(gmac->rst_ephy); + clk_put(gmac->ephy_clk); + } + + stmmac_pltfr_remove(pdev); + + return 0; +} + static const struct of_device_id sun8i_dwmac_match[] = { { .compatible = "allwinner,sun8i-h3-emac", .data = &emac_variant_h3 }, @@ -1252,7 +1284,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); static struct platform_driver sun8i_dwmac_driver = { .probe = sun8i_dwmac_probe, - .remove = stmmac_pltfr_remove, + .remove = sun8i_dwmac_remove, .driver = { .name = "dwmac-sun8i", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 102d637bc84a..65a3e3b5face 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -146,6 +146,8 @@ static int sun7i_gmac_probe(struct platform_device *pdev) plat_dat->init = sun7i_gmac_init; plat_dat->exit = sun7i_gmac_exit; plat_dat->fix_mac_speed = sun7i_fix_speed; + plat_dat->tx_fifo_size = 4096; + plat_dat->rx_fifo_size = 16384; ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index bc9b01376e80..1d0b64bd1e1a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -166,6 +166,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF; } else if (dev->flags & IFF_ALLMULTI) { value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { + /* Fall back to all multicast if we've no filter */ + value = GMAC_FRAME_FILTER_PM; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 68c157979b94..a41ac13cc4e5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -116,6 +116,23 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr, ioaddr + DMA_CHAN_INTR_ENA(chan)); } +static void dwmac410_dma_init_channel(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value; + + /* common channel control register config */ + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (dma_cfg->pblx8) + value = value | DMA_BUS_MODE_PBL; + + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, + ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + static void dwmac4_dma_init(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, int atds) { @@ -462,7 +479,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { const struct stmmac_dma_ops dwmac410_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, - .init_chan = dwmac4_dma_init_channel, + .init_chan = dwmac410_dma_init_channel, .init_rx_chan = dwmac4_dma_init_rx_chan, .init_tx_chan = dwmac4_dma_init_tx_chan, .axi = dwmac4_dma_axi, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index f2a29a90e085..afdea015f4b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -60,10 +60,6 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) value &= ~DMA_CONTROL_ST; writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); - - value = readl(ioaddr + GMAC_CONFIG); - value &= ~GMAC_CONFIG_TE; - writel(value, ioaddr + GMAC_CONFIG); } void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 1a768837ca72..ce1346c14b05 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -662,23 +662,16 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); int ret; - if (!edata->eee_enabled) { + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + if (!edata->eee_enabled) stmmac_disable_eee_mode(priv); - } else { - /* We are asking for enabling the EEE but it is safe - * to verify all by invoking the eee_init function. - * In case of failure it will return an error. - */ - edata->eee_enabled = stmmac_eee_init(priv); - if (!edata->eee_enabled) - return -EOPNOTSUPP; - } ret = phylink_ethtool_set_eee(priv->phylink, edata); if (ret) return ret; - priv->eee_enabled = edata->eee_enabled; priv->tx_lpi_timer = edata->tx_lpi_timer; return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 020159622559..e5d9007c8090 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -26,12 +26,16 @@ static void config_sub_second_increment(void __iomem *ioaddr, unsigned long data; u32 reg_value; - /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second - * formula = (1/ptp_clock) * 1000000000 - * where ptp_clock is 50MHz if fine method is used to update system + /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second + * increment to twice the number of nanoseconds of a clock cycle. + * The calculation of the default_addend value by the caller will set it + * to mid-range = 2^31 when the remainder of this division is zero, + * which will make the accumulator overflow once every 2 ptp_clock + * cycles, adding twice the number of nanoseconds of a clock cycle : + * 2000000000ULL / ptp_clock. */ if (value & PTP_TCR_TSCFUPDT) - data = (1000000000ULL / 50000000); + data = (2000000000ULL / ptp_clock); else data = (1000000000ULL / ptp_clock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 89a6ae2b17e3..8e7c60e02fa0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -175,32 +175,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) } } -/** - * stmmac_stop_all_queues - Stop all queues - * @priv: driver private structure - */ -static void stmmac_stop_all_queues(struct stmmac_priv *priv) -{ - u32 tx_queues_cnt = priv->plat->tx_queues_to_use; - u32 queue; - - for (queue = 0; queue < tx_queues_cnt; queue++) - netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); -} - -/** - * stmmac_start_all_queues - Start all queues - * @priv: driver private structure - */ -static void stmmac_start_all_queues(struct stmmac_priv *priv) -{ - u32 tx_queues_cnt = priv->plat->tx_queues_to_use; - u32 queue; - - for (queue = 0; queue < tx_queues_cnt; queue++) - netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); -} - static void stmmac_service_event_schedule(struct stmmac_priv *priv) { if (!test_bit(STMMAC_DOWN, &priv->state) && @@ -630,7 +604,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - ts_event_en = PTP_TCR_TSEVNTENA; + if (priv->synopsys_id != DWMAC_CORE_5_10) + ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; @@ -1466,6 +1441,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) stmmac_free_tx_buffer(priv, queue, i); } +/** + * stmmac_free_tx_skbufs - free TX skb buffers + * @priv: private structure + */ +static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) +{ + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < tx_queue_cnt; queue++) + dma_free_tx_skbufs(priv, queue); +} + /** * free_dma_rx_desc_resources - free RX dma desc resources * @priv: private structure @@ -2736,7 +2724,7 @@ static int stmmac_open(struct net_device *dev) } stmmac_enable_all_queues(priv); - stmmac_start_all_queues(priv); + netif_tx_start_all_queues(priv->dev); return 0; @@ -2770,15 +2758,10 @@ static int stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; - if (priv->eee_enabled) - del_timer_sync(&priv->eee_ctrl_timer); - /* Stop and disconnect the PHY */ phylink_stop(priv->phylink); phylink_disconnect_phy(priv->phylink); - stmmac_stop_all_queues(priv); - stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) @@ -2791,6 +2774,11 @@ static int stmmac_release(struct net_device *dev) if (priv->lpi_irq > 0) free_irq(priv->lpi_irq, dev); + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + /* Stop TX/RX DMA and clear the descriptors */ stmmac_stop_all_dma(priv); @@ -3751,6 +3739,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); int txfifosz = priv->plat->tx_fifo_size; + const int mtu = new_mtu; if (txfifosz == 0) txfifosz = priv->dma_cap.tx_fifo_size; @@ -3768,7 +3757,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) return -EINVAL; - dev->mtu = new_mtu; + dev->mtu = mtu; netdev_update_features(dev); @@ -3832,7 +3821,7 @@ static int stmmac_set_features(struct net_device *netdev, /** * stmmac_interrupt - main ISR * @irq: interrupt number. - * @dev_id: to pass the net device pointer. + * @dev_id: to pass the net device pointer (must be valid). * Description: this is the main driver interrupt service routine. * It can call: * o DMA service routine (to manage incoming frame reception and transmission @@ -3856,11 +3845,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if (priv->irq_wake) pm_wakeup_event(priv->device, 0); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -4774,13 +4758,17 @@ int stmmac_suspend(struct device *dev) mutex_lock(&priv->lock); netif_device_detach(ndev); - stmmac_stop_all_queues(priv); stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) del_timer_sync(&priv->tx_queue[chan].txtimer); + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + /* Stop TX/RX DMA */ stmmac_stop_all_dma(priv); @@ -4833,6 +4821,8 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv) tx_q->cur_tx = 0; tx_q->dirty_tx = 0; tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); } } @@ -4879,6 +4869,7 @@ int stmmac_resume(struct device *dev) stmmac_reset_queues_param(priv); + stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv); stmmac_hw_setup(ndev, false); @@ -4887,8 +4878,6 @@ int stmmac_resume(struct device *dev) stmmac_enable_all_queues(priv); - stmmac_start_all_queues(priv); - mutex_unlock(&priv->lock); if (!device_may_wakeup(priv->device)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5150551c28be..508325cc105d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev, * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ - stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + stmmac_res->wol_irq = + platform_get_irq_byname_optional(pdev, "eth_wake_irq"); if (stmmac_res->wol_irq < 0) { if (stmmac_res->wol_irq == -EPROBE_DEFER) return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n"); stmmac_res->wol_irq = stmmac_res->irq; } - stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; + stmmac_res->lpi_irq = + platform_get_irq_byname_optional(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq < 0) { + if (stmmac_res->lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ eth_lpi not found\n"); + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 1d135b02ea02..52b453b60597 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -332,7 +332,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv, priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; } else if (!qopt->enable) { - return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB); + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, + MTL_QUEUE_DCB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; } /* Port Transmit Rate and Speed Divider */ diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index c91876f8c536..6e78a33aa5e4 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4971,7 +4971,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cas_cacheline_size)) { dev_err(&pdev->dev, "Could not set PCI cache " "line size\n"); - goto err_write_cacheline; + goto err_out_free_res; } } #endif @@ -5144,7 +5144,6 @@ err_out_iounmap: err_out_free_res: pci_release_regions(pdev); -err_write_cacheline: /* Try to restore it in case the error occurred after we * set it. */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f5fd1f3c07cc..2911740af706 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3931,8 +3931,6 @@ static void niu_xmac_interrupt(struct niu *np) mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; - if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) - mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 8b94d9ad9e2b..f87e135a8aef 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1353,27 +1353,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, if (vio_version_after_eq(&port->vio, 1, 3)) localmtu -= VLAN_HLEN; - if (skb->protocol == htons(ETH_P_IP)) { - struct flowi4 fl4; - struct rtable *rt = NULL; - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = dev->ifindex; - fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); - fl4.daddr = ip_hdr(skb)->daddr; - fl4.saddr = ip_hdr(skb)->saddr; - - rt = ip_route_output_key(dev_net(dev), &fl4); - if (!IS_ERR(rt)) { - skb_dst_set(skb, &rt->dst); - icmp_send(skb, ICMP_DEST_UNREACH, - ICMP_FRAG_NEEDED, - htonl(localmtu)); - } - } + if (skb->protocol == htons(ETH_P_IP)) + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(localmtu)); #if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); #endif goto out_dropped; } diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 0f8a924fc60c..c6c1bb15557f 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2052,6 +2052,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /*bdx_hw_reset(priv); */ if (bdx_read_mac(priv)) { pr_err("load MAC address failed\n"); + err = -EFAULT; goto err_out_iomap; } SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d7a953c647b4..33eca554424a 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2209,7 +2209,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, HOST_PORT_NUM, ALE_VLAN, vid); ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 0, ALE_VLAN, vid); - ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); + ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid); err: pm_runtime_put(cpsw->dev); return ret; @@ -2876,6 +2876,7 @@ static int cpsw_probe(struct platform_device *pdev) CPSW_MAX_QUEUES, CPSW_MAX_QUEUES); if (!ndev) { dev_err(dev, "error allocating net_device\n"); + ret = -ENOMEM; goto clean_cpts; } @@ -2999,11 +3000,15 @@ static int cpsw_suspend(struct device *dev) struct cpsw_common *cpsw = dev_get_drvdata(dev); int i; + rtnl_lock(); + for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].ndev) if (netif_running(cpsw->slaves[i].ndev)) cpsw_ndo_stop(cpsw->slaves[i].ndev); + rtnl_unlock(); + /* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 61136428e2c0..26cfe3f7ed8d 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -485,6 +485,7 @@ void cpts_unregister(struct cpts *cpts) ptp_clock_unregister(cpts->clock); cpts->clock = NULL; + cpts->phc_index = -1; cpts_write32(cpts, 0, int_enable); cpts_write32(cpts, 0, control); @@ -667,6 +668,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, cpts->cc.read = cpts_systim_read; cpts->cc.mask = CLOCKSOURCE_MASK(32); cpts->info = cpts_info; + cpts->phc_index = -1; cpts_calc_mult_shift(cpts); /* save cc.mult original value as it can be modified diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ae27be85e363..7cc09a6f9f9a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; /* EMAC mac_status register */ #define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000) #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20) -#define EMAC_MACSTATUS_TXERRCH_MASK (0x7) +#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000) #define EMAC_MACSTATUS_TXERRCH_SHIFT (16) #define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000) #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12) -#define EMAC_MACSTATUS_RXERRCH_MASK (0x7) +#define EMAC_MACSTATUS_RXERRCH_MASK (0x700) #define EMAC_MACSTATUS_RXERRCH_SHIFT (8) /* EMAC RX register masks */ diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 38b7f6d35759..702fdc393da0 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -397,6 +397,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) data->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; data->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!data->regs) return -ENOMEM; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 538e70810d3d..a99c7c95de5c 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card, descr = descr->next; } while (descr != chain->ring); - dma_free_coherent(&card->pdev->dev, chain->num_desc, - chain->hwring, chain->dma_addr); + dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), + chain->hwring, chain->dma_addr); } /** diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 12466a72cefc..aab0cf57c654 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -644,7 +644,7 @@ static int tc_mii_probe(struct net_device *dev) linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); } - linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_andnot(phydev->supported, phydev->supported, mask); linkmode_copy(phydev->advertising, phydev->supported); lp->link = 0; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index eb480204cdbe..5b8451c58aa4 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1425,9 +1425,7 @@ static int temac_probe(struct platform_device *pdev) of_node_put(dma_np); } else if (pdata) { /* 2nd memory resource specifies DMA registers */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); + lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(lp->sdma_regs)) { dev_err(&pdev->dev, "could not map DMA registers\n"); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 2dacfc85b3ba..04e51af32178 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -435,7 +435,7 @@ struct axienet_local { void __iomem *regs; void __iomem *dma_regs; - struct tasklet_struct dma_err_tasklet; + struct work_struct dma_err_task; int tx_irq; int rx_irq; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 479325eeaf8a..f98318d93ce7 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -437,9 +437,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) lp->options |= options; } -static void __axienet_device_reset(struct axienet_local *lp) +static int __axienet_device_reset(struct axienet_local *lp) { u32 timeout; + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this @@ -455,9 +456,11 @@ static void __axienet_device_reset(struct axienet_local *lp) if (--timeout == 0) { netdev_err(lp->ndev, "%s: DMA reset timeout!\n", __func__); - break; + return -ETIMEDOUT; } } + + return 0; } /** @@ -470,13 +473,17 @@ static void __axienet_device_reset(struct axienet_local *lp) * areconnected to Axi Ethernet reset lines, this in turn resets the Axi * Ethernet core. No separate hardware reset is done for the Axi Ethernet * core. + * Returns 0 on success or a negative error number otherwise. */ -static void axienet_device_reset(struct net_device *ndev) +static int axienet_device_reset(struct net_device *ndev) { u32 axienet_status; struct axienet_local *lp = netdev_priv(ndev); + int ret; - __axienet_device_reset(lp); + ret = __axienet_device_reset(lp); + if (ret) + return ret; lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; lp->options |= XAE_OPTION_VLAN; @@ -491,9 +498,11 @@ static void axienet_device_reset(struct net_device *ndev) lp->options |= XAE_OPTION_JUMBO; } - if (axienet_dma_bd_init(ndev)) { + ret = axienet_dma_bd_init(ndev); + if (ret) { netdev_err(ndev, "%s: descriptor allocation failed\n", __func__); + return ret; } axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); @@ -518,6 +527,8 @@ static void axienet_device_reset(struct net_device *ndev) axienet_setoptions(ndev, lp->options); netif_trans_update(ndev); + + return 0; } /** @@ -806,7 +817,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) /* Write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - tasklet_schedule(&lp->dma_err_tasklet); + schedule_work(&lp->dma_err_task); axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); } out: @@ -855,7 +866,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) /* write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - tasklet_schedule(&lp->dma_err_tasklet); + schedule_work(&lp->dma_err_task); axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); } out: @@ -891,7 +902,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) return IRQ_HANDLED; } -static void axienet_dma_err_handler(unsigned long data); +static void axienet_dma_err_handler(struct work_struct *work); /** * axienet_open - Driver open routine. @@ -921,8 +932,9 @@ static int axienet_open(struct net_device *ndev) */ mutex_lock(&lp->mii_bus->mdio_lock); axienet_mdio_disable(lp); - axienet_device_reset(ndev); - ret = axienet_mdio_enable(lp); + ret = axienet_device_reset(ndev); + if (ret == 0) + ret = axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); if (ret < 0) return ret; @@ -935,9 +947,8 @@ static int axienet_open(struct net_device *ndev) phylink_start(lp->phylink); - /* Enable tasklets for Axi DMA error handling */ - tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, - (unsigned long) lp); + /* Enable worker thread for Axi DMA error handling */ + INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, @@ -966,7 +977,7 @@ err_rx_irq: err_tx_irq: phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); - tasklet_kill(&lp->dma_err_tasklet); + cancel_work_sync(&lp->dma_err_task); dev_err(lp->dev, "request_irq() failed\n"); return ret; } @@ -1025,7 +1036,7 @@ static int axienet_stop(struct net_device *ndev) axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); - tasklet_kill(&lp->dma_err_tasklet); + cancel_work_sync(&lp->dma_err_task); if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); @@ -1505,17 +1516,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = { }; /** - * axienet_dma_err_handler - Tasklet handler for Axi DMA Error - * @data: Data passed + * axienet_dma_err_handler - Work queue task for Axi DMA Error + * @work: pointer to work_struct * * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the * Tx/Rx BDs. */ -static void axienet_dma_err_handler(unsigned long data) +static void axienet_dma_err_handler(struct work_struct *work) { u32 axienet_status; u32 cr, i; - struct axienet_local *lp = (struct axienet_local *) data; + struct axienet_local *lp = container_of(work, struct axienet_local, + dma_err_task); struct net_device *ndev = lp->ndev; struct axidma_bd *cur_p; @@ -1677,6 +1689,18 @@ static int axienet_probe(struct platform_device *pdev) lp->options = XAE_OPTION_DEFAULTS; lp->rx_bd_num = RX_BD_NUM_DEFAULT; lp->tx_bd_num = TX_BD_NUM_DEFAULT; + + lp->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(lp->clk)) { + ret = PTR_ERR(lp->clk); + goto free_netdev; + } + ret = clk_prepare_enable(lp->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret); + goto free_netdev; + } + /* Map device registers */ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); lp->regs = devm_ioremap_resource(&pdev->dev, ethres); @@ -1824,20 +1848,6 @@ static int axienet_probe(struct platform_device *pdev) lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (lp->phy_node) { - lp->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(lp->clk)) { - dev_warn(&pdev->dev, "Failed to get clock: %ld\n", - PTR_ERR(lp->clk)); - lp->clk = NULL; - } else { - ret = clk_prepare_enable(lp->clk); - if (ret) { - dev_err(&pdev->dev, "Unable to enable clock: %d\n", - ret); - goto free_netdev; - } - } - ret = axienet_mdio_setup(lp); if (ret) dev_warn(&pdev->dev, diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 56b7791911bf..c866f58dab70 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = { .ndo_set_mac_address = dfx_ctl_set_mac_address, }; +static void dfx_register_res_alloc_err(const char *print_name, bool mmio, + bool eisa) +{ + pr_err("%s: Cannot use %s, no address set, aborting\n", + print_name, mmio ? "MMIO" : "I/O"); + pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n", + print_name, mmio ? 'n' : 'y'); + if (eisa && mmio) + pr_err("%s: Or run ECU and set adapter's MMIO location\n", + print_name); +} + +static void dfx_register_res_err(const char *print_name, bool mmio, + unsigned long start, unsigned long len) +{ + pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n", + print_name, mmio ? "MMIO" : "I/O", len, start); +} + /* * ================ * = dfx_register = @@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev) dev_set_drvdata(bdev, dev); dfx_get_bars(bdev, bar_start, bar_len); - if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) { - pr_err("%s: Cannot use MMIO, no address set, aborting\n", - print_name); - pr_err("%s: Run ECU and set adapter's MMIO location\n", - print_name); - pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\"" - "\n", print_name); + if (bar_len[0] == 0 || + (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) { + dfx_register_res_alloc_err(print_name, dfx_use_mmio, + dfx_bus_eisa); err = -ENXIO; - goto err_out; + goto err_out_disable; } if (dfx_use_mmio) @@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev) else region = request_region(bar_start[0], bar_len[0], print_name); if (!region) { - pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, " - "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name, - (long)bar_len[0], (long)bar_start[0]); + dfx_register_res_err(print_name, dfx_use_mmio, + bar_start[0], bar_len[0]); err = -EBUSY; goto err_out_disable; } if (bar_start[1] != 0) { region = request_region(bar_start[1], bar_len[1], print_name); if (!region) { - pr_err("%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", print_name, - (long)bar_len[1], (long)bar_start[1]); + dfx_register_res_err(print_name, 0, + bar_start[1], bar_len[1]); err = -EBUSY; goto err_out_csr_region; } @@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev) if (bar_start[2] != 0) { region = request_region(bar_start[2], bar_len[2], print_name); if (!region) { - pr_err("%s: Cannot reserve I/O resource " - "0x%lx @ 0x%lx, aborting\n", print_name, - (long)bar_len[2], (long)bar_start[2]); + dfx_register_res_err(print_name, 0, + bar_start[2], bar_len[2]); err = -EBUSY; goto err_out_bh_region; } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 730ab57201bd..84f5717c01e2 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -221,8 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, if (ip_tunnel_collect_metadata() || gs->collect_md) { __be16 flags; - flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | - (gnvh->oam ? TUNNEL_OAM : 0) | + flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) | (gnvh->critical ? TUNNEL_CRIT_OPT : 0); tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, @@ -773,7 +772,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4, struct flowi4 *fl4, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -789,6 +789,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->flowi4_proto = IPPROTO_UDP; fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; tos = info->key.tos; if ((tos == 1) && !geneve->collect_md) { @@ -823,7 +825,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6, struct flowi6 *fl6, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -839,6 +842,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->flowi6_proto = IPPROTO_UDP; fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; + fl6->fl6_dport = dport; + fl6->fl6_sport = sport; + prio = info->key.tos; if ((prio == 1) && !geneve->collect_md) { prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); @@ -885,14 +891,18 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + if (!pskb_inet_may_pull(skb)) + return -EINVAL; + + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, GENEVE_IPV4_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -947,13 +957,17 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + if (!pskb_inet_may_pull(skb)) + return -EINVAL; + + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -987,9 +1001,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (geneve->collect_md) { info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { - err = -EINVAL; netdev_dbg(dev, "no tunnel metadata\n"); - goto tx_error; + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; } } else { info = &geneve->info; @@ -1006,7 +1021,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!err)) return NETDEV_TX_OK; -tx_error: + dev_kfree_skb(skb); if (err == -ELOOP) @@ -1033,13 +1048,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct geneve_dev *geneve = netdev_priv(dev); + __be16 sport; if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; - struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); + + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -1049,9 +1069,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; - struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); + + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1062,8 +1086,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; } - info->key.tp_src = udp_flow_src_port(geneve->net, skb, - 1, USHRT_MAX, true); + info->key.tp_src = sport; info->key.tp_dst = geneve->info.key.tp_dst; return 0; } @@ -1207,7 +1230,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]); if (df < 0 || df > GENEVE_DF_MAX) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF], "Invalid DF attribute"); return -EINVAL; } @@ -1614,11 +1637,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct geneve_dev *geneve = netdev_priv(dev); + enum ifla_geneve_df df = geneve->df; struct geneve_sock *gs4, *gs6; struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; - enum ifla_geneve_df df; bool ttl_inherit; int err; @@ -1648,6 +1671,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; geneve->ttl_inherit = ttl_inherit; + geneve->df = df; geneve_unquiesce(geneve, gs4, gs6); return 0; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 3a53d222bfcc..d0653babab92 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -545,9 +545,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(mtu)); + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); goto err_rt; } @@ -663,10 +662,6 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gtp = netdev_priv(dev); - err = gtp_encap_enable(gtp, data); - if (err < 0) - return err; - if (!data[IFLA_GTP_PDP_HASHSIZE]) { hashsize = 1024; } else { @@ -677,12 +672,16 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, err = gtp_hashtable_new(gtp, hashsize); if (err < 0) - goto out_encap; + return err; + + err = gtp_encap_enable(gtp, data); + if (err < 0) + goto out_hashtable; err = register_netdevice(dev); if (err < 0) { netdev_dbg(dev, "failed to register new netdev %d\n", err); - goto out_hashtable; + goto out_encap; } gn = net_generic(dev_net(dev), gtp_net_id); @@ -693,11 +692,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, return 0; +out_encap: + gtp_encap_disable(gtp); out_hashtable: kfree(gtp->addr_hash); kfree(gtp->tid_hash); -out_encap: - gtp_encap_disable(gtp); return err; } @@ -1172,16 +1171,17 @@ out_unlock: static struct genl_family gtp_genl_family; static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, - u32 type, struct pdp_ctx *pctx) + int flags, u32 type, struct pdp_ctx *pctx) { void *genlh; - genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, + genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags, type); if (genlh == NULL) goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; @@ -1230,8 +1230,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) goto err_unlock; } - err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, - info->snd_seq, info->nlhdr->nlmsg_type, pctx); + err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, + 0, info->nlhdr->nlmsg_type, pctx); if (err < 0) goto err_unlock_free; @@ -1274,6 +1274,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, gtp_genl_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + NLM_F_MULTI, cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; cb->args[1] = j; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index fbea6f232819..e2ad3c2e8df5 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -127,7 +127,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) { struct bpqdev *bpq; - list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) { + list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list, + lockdep_rtnl_is_held()) { if (bpq->ethdev == dev) return bpq->axdev; } diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 71cdef9fb56b..5ab53e9942f3 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1133,6 +1133,7 @@ static int __init yam_init_driver(void) err = register_netdev(dev); if (err) { printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); + free_netdev(dev); goto error; } yam_devs[i] = dev; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 2a6ec5394966..a4b3fce69ecd 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev) rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { - pci_free_consistent(pdev, sizeof(struct ring_ctrl), + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ca16ae8c8332..362b7ca6f3b2 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -366,7 +366,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, } rcu_read_unlock(); - while (unlikely(txq >= ndev->real_num_tx_queues)) + while (txq >= ndev->real_num_tx_queues) txq -= ndev->real_num_tx_queues; return txq; @@ -501,7 +501,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, int rc; skb->dev = vf_netdev; - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); rc = dev_queue_xmit(skb); if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { @@ -531,12 +531,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; - /* if VF is present and up then redirect packets - * already called with rcu_read_lock_bh + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. + * If netpoll is in uses, then VF can not be used either. */ vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); if (vf_netdev && netif_running(vf_netdev) && - !netpoll_tx_running(net)) + netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net)) return netvsc_vf_xmit(net, vf_netdev, skb); /* We will atmost need two pages to describe the rndis diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 5a37514e4234..5945ac5f38ee 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -882,7 +882,9 @@ static int adf7242_rx(struct adf7242_local *lp) int ret; u8 lqi, len_u8, *data; - adf7242_read_reg(lp, 0, &len_u8); + ret = adf7242_read_reg(lp, 0, &len_u8); + if (ret) + return ret; len = len_u8; @@ -1262,7 +1264,7 @@ static int adf7242_probe(struct spi_device *spi) WQ_MEM_RECLAIM); if (unlikely(!lp->wqueue)) { ret = -ENOMEM; - goto err_hw_init; + goto err_alloc_wq; } ret = adf7242_hw_init(lp); @@ -1294,6 +1296,8 @@ static int adf7242_probe(struct spi_device *spi) return ret; err_hw_init: + destroy_workqueue(lp->wqueue); +err_alloc_wq: mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 0dd0ba915ab9..23ee0b14cbfa 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n) return -ENOMEM; } usb_anchor_urb(urb, &atusb->idle_urbs); + usb_free_urb(urb); n--; } return 0; diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 430c93786153..25dbea302fb6 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2924,6 +2924,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv) ); if (!priv->irq_workqueue) { dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); + destroy_workqueue(priv->mlme_workqueue); return -ENOMEM; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index b805abc9ec3b..5fbabae2909e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree(port); } +#define IPVLAN_ALWAYS_ON_OFLOADS \ + (NETIF_F_SG | NETIF_F_HW_CSUM | \ + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) + +#define IPVLAN_ALWAYS_ON \ + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ + #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) @@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->features |= IPVLAN_ALWAYS_ON; + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->hw_enc_features |= dev->features; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; @@ -225,7 +236,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, { struct ipvl_dev *ipvlan = netdev_priv(dev); - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features = netdev_increment_features(ipvlan->phy_dev->features, + features, features); + features |= IPVLAN_ALWAYS_ON; + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); + + return features; } static void ipvlan_change_rx_flags(struct net_device *dev, int change) @@ -732,10 +750,9 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; ipvlan->dev->gso_max_segs = dev->gso_max_segs; - netdev_features_change(ipvlan->dev); + netdev_update_features(ipvlan->dev); } break; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 32c627702ac5..f233a6933a97 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1080,6 +1080,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) struct macsec_rx_sa *rx_sa; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; + unsigned int len; sci_t sci; u32 pn; bool cbit; @@ -1236,9 +1237,10 @@ deliver: macsec_rxsc_put(rx_sc); skb_orphan(skb); + len = skb->len; ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) - count_rx(dev, skb->len); + count_rx(dev, len); else macsec->secy.netdev->stats.rx_dropped++; @@ -1309,7 +1311,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) struct crypto_aead *tfm; int ret; - tfm = crypto_alloc_aead("gcm(aes)", 0, 0); + /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; @@ -3226,11 +3229,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack) { struct macsec_dev *macsec = macsec_priv(dev); - struct net_device *real_dev; - int err; - sci_t sci; - u8 icv_len = DEFAULT_ICV_LEN; rx_handler_func_t *rx_handler; + u8 icv_len = DEFAULT_ICV_LEN; + struct net_device *real_dev; + int err, mtu; + sci_t sci; if (!tb[IFLA_LINK]) return -EINVAL; @@ -3246,7 +3249,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); - dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + if (mtu < 0) + dev->mtu = 0; + else + dev->mtu = mtu; rx_handler = rtnl_dereference(real_dev->rx_handler); if (rx_handler && rx_handler != macsec_handle_frame) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 26f6be4796c7..07622cf8765a 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -447,6 +447,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) int ret; rx_handler_result_t handle_res; + /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ + if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { unsigned int hash; @@ -1255,6 +1259,9 @@ static void macvlan_port_destroy(struct net_device *dev) static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + struct nlattr *nla, *head; + int rem, len; + if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; @@ -1302,6 +1309,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } + if (data[IFLA_MACVLAN_MACADDR_DATA]) { + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); + + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) != IFLA_MACVLAN_MACADDR || + nla_len(nla) != ETH_ALEN) + return -EINVAL; + + if (!is_valid_ether_addr(nla_data(nla))) + return -EADDRNOTAVAIL; + } + } + if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; @@ -1358,10 +1379,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { - if (nla_type(nla) != IFLA_MACVLAN_MACADDR || - nla_len(nla) != ETH_ALEN) - continue; - addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) @@ -1704,7 +1721,7 @@ static int macvlan_device_event(struct notifier_block *unused, struct macvlan_dev, list); - if (macvlan_sync_address(vlan->dev, dev->dev_addr)) + if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr)) return NOTIFY_BAD; break; diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index b16a1221d19b..fb182bec8f06 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -61,7 +61,8 @@ static int net_failover_open(struct net_device *dev) return 0; err_standby_open: - dev_close(primary_dev); + if (primary_dev) + dev_close(primary_dev); err_primary_open: netif_tx_disable(dev); return err; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 55f57f76d01b..917f37c17630 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -292,6 +292,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) ns = netdev_priv(dev); ns->netdev = dev; + u64_stats_init(&ns->syncp); ns->nsim_dev = nsim_dev; ns->nsim_dev_port = nsim_dev_port; ns->nsim_bus_dev = nsim_dev->nsim_bus_dev; @@ -301,7 +302,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) rtnl_lock(); err = nsim_bpf_init(ns); if (err) - goto err_free_netdev; + goto err_rtnl_unlock; nsim_ipsec_init(ns); @@ -315,8 +316,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) err_ipsec_teardown: nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); +err_rtnl_unlock: rtnl_unlock(); -err_free_netdev: free_netdev(dev); return ERR_PTR(err); } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index fe602648b99f..dcf2051ef2c0 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -193,6 +193,7 @@ config MDIO_THUNDER depends on 64BIT depends on PCI select MDIO_CAVIUM + select MDIO_DEVRES help This driver supports the MDIO interfaces found on Cavium ThunderX SoCs when the MDIO bus device appears as a PCI diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index e0d3310957ff..c99883120556 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd); int bcm_phy_set_eee(struct phy_device *phydev, bool enable) { - int val; + int val, mask = 0; /* Enable EEE at PHY level */ val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL); @@ -209,10 +209,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable) if (val < 0) return val; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) + mask |= MDIO_EEE_1000T; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->supported)) + mask |= MDIO_EEE_100TX; + if (enable) - val |= (MDIO_EEE_100TX | MDIO_EEE_1000T); + val |= mask; else - val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T); + val &= ~mask; phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 8f241b57fcf6..7d845117abb0 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1119,7 +1119,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) goto out; } dp83640_clock_init(clock, bus); - list_add_tail(&phyter_clocks, &clock->list); + list_add_tail(&clock->list, &phyter_clocks); out: mutex_unlock(&phyter_clocks_lock); @@ -1348,6 +1348,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V1; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -1355,6 +1356,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: @@ -1362,6 +1364,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -1369,6 +1372,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index b7875b36097f..574a8bca1ec4 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -11,6 +11,18 @@ #define XWAY_MDIO_IMASK 0x19 /* interrupt mask */ #define XWAY_MDIO_ISTAT 0x1A /* interrupt status */ +#define XWAY_MDIO_LED 0x1B /* led control */ + +/* bit 15:12 are reserved */ +#define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */ +#define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */ +#define XWAY_MDIO_LED_LED1_EN BIT(9) /* Enable the integrated function of LED1 */ +#define XWAY_MDIO_LED_LED0_EN BIT(8) /* Enable the integrated function of LED0 */ +/* bit 7:4 are reserved */ +#define XWAY_MDIO_LED_LED3_DA BIT(3) /* Direct Access to LED3 */ +#define XWAY_MDIO_LED_LED2_DA BIT(2) /* Direct Access to LED2 */ +#define XWAY_MDIO_LED_LED1_DA BIT(1) /* Direct Access to LED1 */ +#define XWAY_MDIO_LED_LED0_DA BIT(0) /* Direct Access to LED0 */ #define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */ #define XWAY_MDIO_INIT_MSRE BIT(14) @@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev) /* Clear all pending interrupts */ phy_read(phydev, XWAY_MDIO_ISTAT); + /* Ensure that integrated led function is enabled for all leds */ + err = phy_write(phydev, XWAY_MDIO_LED, + XWAY_MDIO_LED_LED0_EN | + XWAY_MDIO_LED_LED1_EN | + XWAY_MDIO_LED_LED2_EN | + XWAY_MDIO_LED_LED3_EN); + if (err) + return err; + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH, XWAY_MMD_LEDCH_NACS_NONE | XWAY_MMD_LEDCH_SBF_F02HZ | diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a7796134e3be..9dbe625ad447 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -358,7 +358,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev) return marvell_config_aneg(phydev); } -#ifdef CONFIG_OF_MDIO +#if IS_ENABLED(CONFIG_OF_MDIO) /* Set and/or override some configuration registers based on the * marvell,reg-init property stored in the of_node for the phydev. * @@ -2401,9 +2401,31 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, }, { - .phy_id = MARVELL_PHY_ID_88E6390, + .phy_id = MARVELL_PHY_ID_88E6341_FAMILY, .phy_id_mask = MARVELL_PHY_ID_MASK, - .name = "Marvell 88E6390", + .name = "Marvell 88E6341 Family", + /* PHY_GBIT_FEATURES */ + .probe = m88e1510_probe, + .config_init = &marvell_config_init, + .config_aneg = &m88e6390_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .resume = &genphy_resume, + .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + }, + { + .phy_id = MARVELL_PHY_ID_88E6390_FAMILY, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6390 Family", /* PHY_GBIT_FEATURES */ .probe = m88e6390_probe, .config_init = &marvell_config_init, @@ -2441,7 +2463,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, - { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK }, { } }; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 51b64f087717..663c68ed6ef9 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1154,7 +1154,7 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9131_config_init, - .read_status = ksz9031_read_status, + .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index 001def4509c2..fed3e395f18e 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -3,9 +3,21 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/delay.h> #include <linux/mii.h> #include <linux/phy.h> +/* External Register Control Register */ +#define LAN87XX_EXT_REG_CTL (0x14) +#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000) +#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800) + +/* External Register Read Data Register */ +#define LAN87XX_EXT_REG_RD_DATA (0x15) + +/* External Register Write Data Register */ +#define LAN87XX_EXT_REG_WR_DATA (0x16) + /* Interrupt Source Register */ #define LAN87XX_INTERRUPT_SOURCE (0x18) @@ -14,9 +26,160 @@ #define LAN87XX_MASK_LINK_UP (0x0004) #define LAN87XX_MASK_LINK_DOWN (0x0002) +/* phyaccess nested types */ +#define PHYACC_ATTR_MODE_READ 0 +#define PHYACC_ATTR_MODE_WRITE 1 +#define PHYACC_ATTR_MODE_MODIFY 2 + +#define PHYACC_ATTR_BANK_SMI 0 +#define PHYACC_ATTR_BANK_MISC 1 +#define PHYACC_ATTR_BANK_PCS 2 +#define PHYACC_ATTR_BANK_AFE 3 +#define PHYACC_ATTR_BANK_MAX 7 + #define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>" #define DRIVER_DESC "Microchip LAN87XX T1 PHY driver" +struct access_ereg_val { + u8 mode; + u8 bank; + u8 offset; + u16 val; + u16 mask; +}; + +static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank, + u8 offset, u16 val) +{ + u16 ereg = 0; + int rc = 0; + + if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX) + return -EINVAL; + + if (bank == PHYACC_ATTR_BANK_SMI) { + if (mode == PHYACC_ATTR_MODE_WRITE) + rc = phy_write(phydev, offset, val); + else + rc = phy_read(phydev, offset); + return rc; + } + + if (mode == PHYACC_ATTR_MODE_WRITE) { + ereg = LAN87XX_EXT_REG_CTL_WR_CTL; + rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val); + if (rc < 0) + return rc; + } else { + ereg = LAN87XX_EXT_REG_CTL_RD_CTL; + } + + ereg |= (bank << 8) | offset; + + rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg); + if (rc < 0) + return rc; + + if (mode == PHYACC_ATTR_MODE_READ) + rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA); + + return rc; +} + +static int access_ereg_modify_changed(struct phy_device *phydev, + u8 bank, u8 offset, u16 val, u16 mask) +{ + int new = 0, rc = 0; + + if (bank > PHYACC_ATTR_BANK_MAX) + return -EINVAL; + + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val); + if (rc < 0) + return rc; + + new = val | (rc & (mask ^ 0xFFFF)); + rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new); + + return rc; +} + +static int lan87xx_phy_init(struct phy_device *phydev) +{ + static const struct access_ereg_val init[] = { + /* TX Amplitude = 5 */ + {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B, + 0x000A, 0x001E}, + /* Clear SMI interrupts */ + {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18, + 0, 0}, + /* Clear MISC interrupts */ + {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08, + 0, 0}, + /* Turn on TC10 Ring Oscillator (ROSC) */ + {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20, + 0x0020, 0x0020}, + /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20, + 0x283C, 0}, + /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21, + 0x274F, 0}, + /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep, + * and Wake_In to wake PHY + */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20, + 0x80A7, 0}, + /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer + * to 128 uS + */ + {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24, + 0xF110, 0}, + /* Enable HW Init */ + {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A, + 0x0100, 0x0100}, + }; + int rc, i; + + /* Start manual initialization procedures in Managed Mode */ + rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, + 0x1a, 0x0000, 0x0100); + if (rc < 0) + return rc; + + /* Soft Reset the SMI block */ + rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI, + 0x00, 0x8000, 0x8000); + if (rc < 0) + return rc; + + /* Check to see if the self-clearing bit is cleared */ + usleep_range(1000, 2000); + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_SMI, 0x00, 0); + if (rc < 0) + return rc; + if ((rc & 0x8000) != 0) + return -ETIMEDOUT; + + /* PHY Initialization */ + for (i = 0; i < ARRAY_SIZE(init); i++) { + if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) { + rc = access_ereg_modify_changed(phydev, init[i].bank, + init[i].offset, + init[i].val, + init[i].mask); + } else { + rc = access_ereg(phydev, init[i].mode, init[i].bank, + init[i].offset, init[i].val); + } + if (rc < 0) + return rc; + } + + return 0; +} + static int lan87xx_phy_config_intr(struct phy_device *phydev) { int rc, val = 0; @@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev) return rc < 0 ? rc : 0; } +static int lan87xx_config_init(struct phy_device *phydev) +{ + int rc = lan87xx_phy_init(phydev); + + return rc < 0 ? rc : 0; +} + static struct phy_driver microchip_t1_phy_driver[] = { { .phy_id = 0x0007c150, @@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = { .features = PHY_BASIC_T1_FEATURES, + .config_init = lan87xx_config_init, .config_aneg = genphy_config_aneg, .ack_interrupt = lan87xx_phy_ack_interrupt, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index ea890d802ffe..b0b8a3ce82b6 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -345,15 +345,16 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, phydev->autoneg = autoneg; - phydev->speed = speed; + if (autoneg == AUTONEG_DISABLE) { + phydev->speed = speed; + phydev->duplex = duplex; + } linkmode_copy(phydev->advertising, advertising); linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->advertising, autoneg == AUTONEG_ENABLE); - phydev->duplex = duplex; - phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; /* Restart the PHY */ @@ -834,7 +835,7 @@ EXPORT_SYMBOL(phy_free_interrupt); */ void phy_stop(struct phy_device *phydev) { - if (!phy_is_started(phydev)) { + if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) { WARN(1, "called from state %s\n", phy_state_to_str(phydev->state)); return; @@ -1160,9 +1161,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) /* Restart autonegotiation so the new modes get sent to the * link partner. */ - ret = phy_restart_aneg(phydev); - if (ret < 0) - return ret; + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = phy_restart_aneg(phydev); + if (ret < 0) + return ret; + } } return 0; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0907c3d8d94a..9d0a306f0562 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -615,7 +615,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, if (c45_ids) dev->c45_ids = *c45_ids; dev->irq = bus->irq[addr]; + dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); + device_initialize(&mdiodev->dev); dev->state = PHY_DOWN; @@ -649,10 +651,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, ret = phy_request_driver_module(dev, phy_id); } - if (!ret) { - device_initialize(&mdiodev->dev); - } else { - kfree(dev); + if (ret) { + put_device(&mdiodev->dev); dev = ERR_PTR(ret); } @@ -797,8 +797,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, /* Grab the bits from PHYIR2, and put them in the lower half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); - if (phy_reg < 0) - return -EIO; + if (phy_reg < 0) { + /* returning -ENODEV doesn't stop bus scanning */ + return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; + } *phy_id |= phy_reg; @@ -1419,7 +1421,8 @@ void phy_detach(struct phy_device *phydev) phy_led_triggers_unregister(phydev); - module_put(phydev->mdio.dev.driver->owner); + if (phydev->mdio.dev.driver) + module_put(phydev->mdio.dev.driver->owner); /* If the device had no specific driver before (i.e. - it * was using the generic driver), we unbind the device diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index b23fc41896ef..816e59fe68f5 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -9,6 +9,12 @@ #include "sfp.h" +struct sfp_quirk { + const char *vendor; + const char *part; + void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes); +}; + /** * struct sfp_bus - internal representation of a sfp bus */ @@ -21,6 +27,7 @@ struct sfp_bus { const struct sfp_socket_ops *socket_ops; struct device *sfp_dev; struct sfp *sfp; + const struct sfp_quirk *sfp_quirk; const struct sfp_upstream_ops *upstream_ops; void *upstream; @@ -30,6 +37,71 @@ struct sfp_bus { bool started; }; +static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, + unsigned long *modes) +{ + phylink_set(modes, 2500baseX_Full); +} + +static const struct sfp_quirk sfp_quirks[] = { + { + // Alcatel Lucent G-010S-P can operate at 2500base-X, but + // incorrectly report 2500MBd NRZ in their EEPROM + .vendor = "ALCATELLUCENT", + .part = "G010SP", + .modes = sfp_quirk_2500basex, + }, { + // Alcatel Lucent G-010S-A can operate at 2500base-X, but + // report 3.2GBd NRZ in their EEPROM + .vendor = "ALCATELLUCENT", + .part = "3FE46541AA", + .modes = sfp_quirk_2500basex, + }, { + // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd + // NRZ in their EEPROM + .vendor = "HUAWEI", + .part = "MA5671A", + .modes = sfp_quirk_2500basex, + }, +}; + +static size_t sfp_strlen(const char *str, size_t maxlen) +{ + size_t size, i; + + /* Trailing characters should be filled with space chars */ + for (i = 0, size = 0; i < maxlen; i++) + if (str[i] != ' ') + size = i + 1; + + return size; +} + +static bool sfp_match(const char *qs, const char *str, size_t len) +{ + if (!qs) + return true; + if (strlen(qs) != len) + return false; + return !strncmp(qs, str, len); +} + +static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id) +{ + const struct sfp_quirk *q; + unsigned int i; + size_t vs, ps; + + vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name)); + ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn)); + + for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++) + if (sfp_match(q->vendor, id->base.vendor_name, vs) && + sfp_match(q->part, id->base.vendor_pn, ps)) + return q; + + return NULL; +} /** * sfp_parse_port() - Parse the EEPROM base ID, setting the port type * @bus: a pointer to the &struct sfp_bus structure for the sfp module @@ -233,6 +305,9 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, phylink_set(modes, 1000baseX_Full); } + if (bus->sfp_quirk) + bus->sfp_quirk->modes(id, modes); + bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS); phylink_set(support, Autoneg); @@ -553,6 +628,8 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id) const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); int ret = 0; + bus->sfp_quirk = sfp_lookup_quirk(id); + if (ops && ops->module_insert) ret = ops->module_insert(bus->upstream, id); @@ -566,6 +643,8 @@ void sfp_module_remove(struct sfp_bus *bus) if (ops && ops->module_remove) ops->module_remove(bus->upstream); + + bus->sfp_quirk = NULL; } EXPORT_SYMBOL_GPL(sfp_module_remove); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 272d5773573e..27b67f12ec45 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1970,7 +1970,8 @@ static int sfp_probe(struct platform_device *pdev) continue; sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]); - if (!sfp->gpio_irq[i]) { + if (sfp->gpio_irq[i] < 0) { + sfp->gpio_irq[i] = 0; poll = true; continue; } diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index a44dd3c8af63..087b01684135 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -492,6 +492,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb) goto out; + if (skb->pkt_type != PACKET_HOST) + goto abort; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 3ae70c7e6860..f285422a8071 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1095,10 +1095,9 @@ static long tap_ioctl(struct file *file, unsigned int cmd, return -ENOLINK; } ret = 0; - u = tap->dev->type; + dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name); if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || - copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) || - put_user(u, &ifr->ifr_hwaddr.sa_family)) + copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa))) ret = -EFAULT; tap_put_tap_dev(tap); rtnl_unlock(); @@ -1113,7 +1112,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, rtnl_unlock(); return -ENOLINK; } - ret = dev_set_mac_address(tap->dev, &sa, NULL); + ret = dev_set_mac_address_user(tap->dev, &sa, NULL); tap_put_tap_dev(tap); rtnl_unlock(); return ret; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 4004f98e50d9..0eb894b7c0bd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -287,7 +287,7 @@ inst_rollback: for (i--; i >= 0; i--) __team_option_inst_del_option(team, dst_opts[i]); - i = option_count - 1; + i = option_count; alloc_rollback: for (i--; i >= 0; i--) kfree(dst_opts[i]); @@ -468,6 +468,9 @@ static const struct team_mode *team_mode_get(const char *kind) struct team_mode_item *mitem; const struct team_mode *mode = NULL; + if (!try_module_get(THIS_MODULE)) + return NULL; + spin_lock(&mode_list_lock); mitem = __find_mode(kind); if (!mitem) { @@ -483,6 +486,7 @@ static const struct team_mode *team_mode_get(const char *kind) } spin_unlock(&mode_list_lock); + module_put(THIS_MODULE); return mode; } @@ -987,7 +991,8 @@ static void __team_compute_features(struct team *team) unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; - list_for_each_entry(port, &team->port_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) { vlan_features = netdev_increment_features(vlan_features, port->dev->vlan_features, TEAM_VLAN_FEATURES); @@ -1001,6 +1006,7 @@ static void __team_compute_features(struct team *team) if (port->dev->hard_header_len > max_hard_header_len) max_hard_header_len = port->dev->hard_header_len; } + rcu_read_unlock(); team->dev->vlan_features = vlan_features; team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | @@ -1016,9 +1022,7 @@ static void __team_compute_features(struct team *team) static void team_compute_features(struct team *team) { - mutex_lock(&team->lock); __team_compute_features(team); - mutex_unlock(&team->lock); netdev_change_features(team->dev); } @@ -2107,6 +2111,7 @@ static void team_setup_by_port(struct net_device *dev, dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; + dev->needed_headroom = port_dev->needed_headroom; dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 16f5cb249ed5..7c40ae058e6d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -68,6 +68,14 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/mutex.h> +#include <linux/ieee802154.h> +#include <linux/if_ltalk.h> +#include <uapi/linux/if_fddi.h> +#include <uapi/linux/if_hippi.h> +#include <uapi/linux/if_fc.h> +#include <net/ax25.h> +#include <net/rose.h> +#include <net/6lowpan.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> @@ -1469,7 +1477,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, int i; if (it->nr_segs > MAX_SKB_FRAGS + 1) - return ERR_PTR(-ENOMEM); + return ERR_PTR(-EMSGSIZE); local_bh_disable(); skb = napi_get_frags(&tfile->napi); @@ -1908,8 +1916,11 @@ drop: skb->dev = tun->dev; break; case IFF_TAP: - if (!frags) - skb->protocol = eth_type_trans(skb, tun->dev); + if (frags && !pskb_may_pull(skb, ETH_HLEN)) { + err = -ENOMEM; + goto drop; + } + skb->protocol = eth_type_trans(skb, tun->dev); break; } @@ -1925,6 +1936,7 @@ drop: skb_reset_network_header(skb); skb_probe_transport_header(skb); + skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { struct bpf_prog *xdp_prog; @@ -1965,9 +1977,12 @@ drop: } if (frags) { + u32 headlen; + /* Exercise flow dissector code path. */ - u32 headlen = eth_get_headlen(tun->dev, skb->data, - skb_headlen(skb)); + skb_push(skb, ETH_HLEN); + headlen = eth_get_headlen(tun->dev, skb->data, + skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { this_cpu_inc(tun->pcpu_stats->rx_dropped); @@ -2021,12 +2036,15 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t result; + int noblock = 0; if (!tun) return -EBADFD; - result = tun_get_user(tun, tfile, NULL, from, - file->f_flags & O_NONBLOCK, false); + if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) + noblock = 1; + + result = tun_get_user(tun, tfile, NULL, from, noblock, false); tun_put(tun); return result; @@ -2247,10 +2265,15 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t len = iov_iter_count(to), ret; + int noblock = 0; if (!tun) return -EBADFD; - ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); + + if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) + noblock = 1; + + ret = tun_do_read(tun, tfile, to, noblock, NULL); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; @@ -2492,6 +2515,7 @@ build: skb->protocol = eth_type_trans(skb, tun->dev); skb_reset_network_header(skb); skb_probe_transport_header(skb); + skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { err = do_xdp_generic(xdp_prog, skb); @@ -2503,7 +2527,6 @@ build: !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); - skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); /* No need for get_cpu_ptr() here since this function is @@ -3028,6 +3051,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, return __tun_set_ebpf(tun, prog_p, prog); } +/* Return correct value for tun->dev->addr_len based on tun->dev->type. */ +static unsigned char tun_get_addr_len(unsigned short type) +{ + switch (type) { + case ARPHRD_IP6GRE: + case ARPHRD_TUNNEL6: + return sizeof(struct in6_addr); + case ARPHRD_IPGRE: + case ARPHRD_TUNNEL: + case ARPHRD_SIT: + return 4; + case ARPHRD_ETHER: + return ETH_ALEN; + case ARPHRD_IEEE802154: + case ARPHRD_IEEE802154_MONITOR: + return IEEE802154_EXTENDED_ADDR_LEN; + case ARPHRD_PHONET_PIPE: + case ARPHRD_PPP: + case ARPHRD_NONE: + return 0; + case ARPHRD_6LOWPAN: + return EUI64_ADDR_LEN; + case ARPHRD_FDDI: + return FDDI_K_ALEN; + case ARPHRD_HIPPI: + return HIPPI_ALEN; + case ARPHRD_IEEE802: + return FC_ALEN; + case ARPHRD_ROSE: + return ROSE_ADDR_LEN; + case ARPHRD_NETROM: + return AX25_ADDR_LEN; + case ARPHRD_LOCALTLK: + return LTALK_ALEN; + default: + return 0; + } +} + static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { @@ -3183,6 +3245,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = -EBUSY; } else { tun->dev->type = (int) arg; + tun->dev->addr_len = tun_get_addr_len(tun->dev->type); tun_debug(KERN_INFO, tun, "linktype set to %d\n", tun->dev->type); ret = 0; @@ -3209,7 +3272,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case SIOCGIFHWADDR: /* Get hw address */ memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); - ifr.ifr_hwaddr.sa_family = tun->dev->type; + dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; @@ -3219,7 +3282,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", ifr.ifr_hwaddr.sa_data); - ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL); + ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL); break; case TUNGETSNDBUF: diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index e39f41efda3e..7bc6e8f856fe 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -296,7 +296,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal) netdev_dbg(dev->net, "asix_get_phy_addr()\n"); - if (ret < 0) { + if (ret < 2) { netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); goto out; } diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index af3994e0853b..6101d82102e7 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -198,6 +198,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); + ret = -EIO; goto free; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index daa54486ab09..ec4b148b1e6c 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -295,12 +295,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, int ret; if (2 == size) { - u16 buf; + u16 buf = 0; ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); le16_to_cpus(&buf); *((u16 *)data) = buf; } else if (4 == size) { - u32 buf; + u32 buf = 0; ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); le32_to_cpus(&buf); *((u32 *)data) = buf; @@ -1387,10 +1387,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } if (pkt_cnt == 0) { - /* Skip IP alignment psudo header */ - skb_pull(skb, 2); skb->len = pkt_len; - skb_set_tail_pointer(skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + skb_set_tail_pointer(skb, skb->len); skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(skb, pkt_hdr); return 1; @@ -1399,8 +1399,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) ax_skb = skb_clone(skb, GFP_ATOMIC); if (ax_skb) { ax_skb->len = pkt_len; - ax_skb->data = skb->data + 2; - skb_set_tail_pointer(ax_skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); usbnet_skb_return(dev, ax_skb); @@ -1718,6 +1719,7 @@ static const struct driver_info belkin_info = { .status = ax88179_status, .link_reset = ax88179_link_reset, .reset = ax88179_reset, + .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index bcabd39d136a..f778172356e6 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i err = register_netdev(dev); if (err) { + /* Set disconnected flag so that disconnect() returns early. */ + pnd->disconnected = 1; usb_driver_release_interface(&usbpn_driver, data_intf); goto out; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index fe630438f67b..8325f6d65dcc 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -787,6 +787,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM, @@ -808,14 +815,21 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, -/* Microsoft Surface 3 dock (based on Realtek RTL8153) */ +/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, - /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ +/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + +/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index c2c82e6391b4..cbe7f35eac98 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1126,7 +1126,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * accordingly. Otherwise, we should check here. */ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) - delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); + delayed_ndp_size = ctx->max_ndp_size + + max_t(u32, + ctx->tx_ndp_modulus, + ctx->tx_modulus + ctx->tx_remainder) - 1; else delayed_ndp_size = 0; @@ -1307,7 +1310,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && skb_out->len > ctx->min_tx_pkt) { padding_count = ctx->tx_curr_size - skb_out->len; - skb_put_zero(skb_out, padding_count); + if (!WARN_ON(padding_count > ctx->tx_curr_size)) + skb_put_zero(skb_out, padding_count); } else if (skb_out->len < ctx->tx_curr_size && (skb_out->len % dev->maxpacket) == 0) { skb_put_u8(skb_out, 0); /* force short packet */ @@ -1625,9 +1629,6 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. */ - netif_info(dev, link, dev->net, - "network connection: %sconnected\n", - !!event->wValue ? "" : "dis"); usbnet_link_change(dev, !!event->wValue, 0); break; diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index b91f92e4e5f2..915ac75b55fc 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -625,6 +625,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 74849da031fa..02de9480d3f0 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index) return serial; } -static int get_free_serial_index(void) +static int obtain_minor(struct hso_serial *serial) { int index; unsigned long flags; @@ -619,8 +619,10 @@ static int get_free_serial_index(void) spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { + serial_table[index] = serial->parent; + serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); - return index; + return 0; } } spin_unlock_irqrestore(&serial_table_lock, flags); @@ -629,15 +631,12 @@ static int get_free_serial_index(void) return -1; } -static void set_serial_by_index(unsigned index, struct hso_serial *serial) +static void release_minor(struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); - if (serial) - serial_table[index] = serial->parent; - else - serial_table[index] = NULL; + serial_table[serial->minor] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } @@ -1389,8 +1388,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) unsigned long flags; if (old) - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", - tty->termios.c_cflag, old->c_cflag); + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", + (unsigned int)tty->termios.c_cflag, + (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); @@ -2229,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); + release_minor(serial); } static void hso_serial_common_free(struct hso_serial *serial) @@ -2252,22 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial) static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { - int minor; int i; tty_port_init(&serial->port); - minor = get_free_serial_index(); - if (minor < 0) - goto exit; + if (obtain_minor(serial)) + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, - tty_drv, minor, &serial->parent->interface->dev, + tty_drv, serial->minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) { + release_minor(serial); + goto exit2; + } - /* fill in specific data for later use */ - serial->minor = minor; serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; @@ -2309,6 +2310,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } @@ -2664,9 +2666,6 @@ static struct hso_device *hso_create_bulk_serial_device( serial->write_data = hso_std_serial_write_data; - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -2723,9 +2722,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -3109,8 +3105,7 @@ static void hso_free_interface(struct usb_interface *interface) cancel_work_sync(&serial_table[i]->async_put_intf); cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); - kref_put(&serial_table[i]->ref, hso_serial_ref_free); - set_serial_by_index(i, NULL); + kref_put(&serial->parent->ref, hso_serial_ref_free); } } diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 8c01fbf68a89..345576f1a747 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -59,7 +59,7 @@ #define IPHETH_USBINTF_SUBCLASS 253 #define IPHETH_USBINTF_PROTO 1 -#define IPHETH_BUF_SIZE 1516 +#define IPHETH_BUF_SIZE 1514 #define IPHETH_IP_ALIGN 2 /* padding at front of URB */ #define IPHETH_TX_TIMEOUT (5 * HZ) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 0170a441208a..71cc5b63d8ce 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -377,10 +377,6 @@ struct lan78xx_net { struct tasklet_struct bh; struct delayed_work wq; - struct usb_host_endpoint *ep_blkin; - struct usb_host_endpoint *ep_blkout; - struct usb_host_endpoint *ep_intr; - int msg_enable; struct urb *urb_intr; @@ -2868,78 +2864,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_OK; } -static int -lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) -{ - int tmp; - struct usb_host_interface *alt = NULL; - struct usb_host_endpoint *in = NULL, *out = NULL; - struct usb_host_endpoint *status = NULL; - - for (tmp = 0; tmp < intf->num_altsetting; tmp++) { - unsigned ep; - - in = NULL; - out = NULL; - status = NULL; - alt = intf->altsetting + tmp; - - for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { - struct usb_host_endpoint *e; - int intr = 0; - - e = alt->endpoint + ep; - switch (e->desc.bmAttributes) { - case USB_ENDPOINT_XFER_INT: - if (!usb_endpoint_dir_in(&e->desc)) - continue; - intr = 1; - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_BULK: - break; - default: - continue; - } - if (usb_endpoint_dir_in(&e->desc)) { - if (!intr && !in) - in = e; - else if (intr && !status) - status = e; - } else { - if (!out) - out = e; - } - } - if (in && out) - break; - } - if (!alt || !in || !out) - return -EINVAL; - - dev->pipe_in = usb_rcvbulkpipe(dev->udev, - in->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->pipe_out = usb_sndbulkpipe(dev->udev, - out->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->ep_intr = status; - - return 0; -} - static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; - ret = lan78xx_get_endpoints(dev, intf); - if (ret) { - netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", - ret); - return ret; - } - dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -3708,6 +3638,7 @@ static void lan78xx_stat_monitor(struct timer_list *t) static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; @@ -3756,6 +3687,34 @@ static int lan78xx_probe(struct usb_interface *intf, mutex_init(&dev->stats.access_lock); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); + if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); + if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { + ret = -ENODEV; + goto out2; + } + + ep_intr = &intf->cur_altsetting->endpoint[2]; + if (!usb_endpoint_is_int_in(&ep_intr->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + usb_endpoint_num(&ep_intr->desc)); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3767,18 +3726,7 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); - dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; - dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; - dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; - - dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); - dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); - - dev->pipe_intr = usb_rcvintpipe(dev->udev, - dev->ep_intr->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - period = dev->ep_intr->desc.bInterval; - + period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); if (buf) { @@ -3791,6 +3739,7 @@ static int lan78xx_probe(struct usb_interface *intf, usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6c738a271257..6508d70056b3 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -441,13 +441,6 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c goto err; } - /* we don't want to modify a running netdev */ - if (netif_running(dev->net)) { - netdev_err(dev->net, "Cannot change a running device\n"); - ret = -EBUSY; - goto err; - } - ret = qmimux_register_device(dev->net, mux_id); if (!ret) { info->flags |= QMI_WWAN_FLAG_MUX; @@ -477,13 +470,6 @@ static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, c if (!rtnl_trylock()) return restart_syscall(); - /* we don't want to modify a running netdev */ - if (netif_running(dev->net)) { - netdev_err(dev->net, "Cannot change a running device\n"); - ret = -EBUSY; - goto err; - } - del_dev = qmimux_find_dev(dev, mux_id); if (!del_dev) { netdev_err(dev->net, "mux_id not present\n"); @@ -1058,6 +1044,7 @@ static const struct usb_device_id products[] = { {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ /* 3. Combined interface devices matching on interface number */ @@ -1092,7 +1079,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9011, 4)}, {QMI_FIXED_INTF(0x05c6, 0x9021, 1)}, {QMI_FIXED_INTF(0x05c6, 0x9022, 2)}, - {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */ + {QMI_QUIRK_SET_DTR(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */ {QMI_FIXED_INTF(0x05c6, 0x9026, 3)}, {QMI_FIXED_INTF(0x05c6, 0x902e, 5)}, {QMI_FIXED_INTF(0x05c6, 0x9031, 5)}, @@ -1279,6 +1266,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */ + {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */ {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, @@ -1324,12 +1312,14 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ @@ -1344,12 +1334,14 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ + {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */ + {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -1359,6 +1351,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ @@ -1368,10 +1361,12 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ + {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 44ea5dcc43fd..f6d643ecaf39 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2836,29 +2836,6 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) device_set_wakeup_enable(&tp->udev->dev, false); } -static void r8153_mac_clk_spd(struct r8152 *tp, bool enable) -{ - /* MAC clock speed down */ - if (enable) { - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, - ALDPS_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, - EEE_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, - PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN | - U1U2_SPDWN_EN | L1_SPDWN_EN); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, - PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN | - TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN | - TP1000_SPDWN_EN); - } else { - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); - } -} - static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; @@ -3158,11 +3135,9 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) if (enable) { r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); - r8153_mac_clk_spd(tp, true); rtl_runtime_suspend_enable(tp, true); } else { rtl_runtime_suspend_enable(tp, false); - r8153_mac_clk_spd(tp, false); switch (tp->version) { case RTL_VER_03: @@ -3727,7 +3702,6 @@ static void r8153_first_init(struct r8152 *tp) u32 ocp_data; int i; - r8153_mac_clk_spd(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); @@ -3789,8 +3763,6 @@ static void r8153_enter_oob(struct r8152 *tp) u32 ocp_data; int i; - r8153_mac_clk_spd(tp, true); - ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); @@ -4498,9 +4470,14 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); + /* MAC clock speed down */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); + r8153_power_cut_en(tp, false); r8153_u1u2en(tp, true); - r8153_mac_clk_spd(tp, false); usb_enable_lpm(tp->udev); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); @@ -5552,7 +5529,10 @@ static int rtl_ops_init(struct r8152 *tp) ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; - tp->rx_buf_sz = 32 * 1024; + if (tp->udev->speed < USB_SPEED_SUPER) + tp->rx_buf_sz = 16 * 1024; + else + tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; @@ -5837,6 +5817,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, @@ -5844,6 +5825,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)}, {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index bd9c07888ebb..f9b359d4e293 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -201,7 +201,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } - msleep(20); + msleep(40); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; @@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) reply_len = sizeof *phym; retval = rndis_query(dev, intf, u.buf, RNDIS_OID_GEN_PHYSICAL_MEDIUM, - 0, (void **) &phym, &reply_len); + reply_len, (void **)&phym, &reply_len); if (retval != 0 || !phym) { /* OID is optional so don't fail here. */ phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED); diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 13e51ccf0214..491625c1c308 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -274,12 +274,20 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) return 1; } -static inline void set_ethernet_addr(rtl8150_t * dev) +static void set_ethernet_addr(rtl8150_t *dev) { - u8 node_id[6]; + u8 node_id[ETH_ALEN]; + int ret; - get_registers(dev, IDR, sizeof(node_id), node_id); - memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id)); + ret = get_registers(dev, IDR, sizeof(node_id), node_id); + + if (ret == sizeof(node_id)) { + ether_addr_copy(dev->netdev->dev_addr, node_id); + } else { + eth_hw_addr_random(dev->netdev); + netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", + dev->netdev->dev_addr); + } } static int rtl8150_set_mac_address(struct net_device *netdev, void *p) diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 355be77f4241..bb4ccbda031a 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* Init all registers */ ret = smsc95xx_reset(dev); + if (ret) + goto free_pdata; /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); if (ret < 0) - return ret; + goto free_pdata; + val >>= 16; pdata->chip_id = val; pdata->mdix_ctrl = get_mdix_status(dev->net); @@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); return 0; + +free_pdata: + kfree(pdata); + return ret; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1324,7 +1331,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); if (pdata) { - cancel_delayed_work(&pdata->carrier_check); + cancel_delayed_work_sync(&pdata->carrier_check); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9f3c839f9e5f..016bf8c04528 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -254,8 +254,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; rcv_xdp = rcu_access_pointer(rq->xdp_prog); - if (rcv_xdp) - skb_record_rx_queue(skb, rxq); + skb_record_rx_queue(skb, rxq); } skb_tx_timestamp(skb); @@ -389,6 +388,13 @@ static int veth_select_rxq(struct net_device *dev) return smp_processor_id() % dev->real_num_rx_queues; } +static struct net_device *veth_peer_dev(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + /* Callers must be under RCU read side. */ + return rcu_dereference(priv->peer); +} + static int veth_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { @@ -510,13 +516,15 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) { void *hard_start = frame->data - frame->headroom; - void *head = hard_start - sizeof(struct xdp_frame); int len = frame->len, delta = 0; struct xdp_frame orig_frame; struct bpf_prog *xdp_prog; unsigned int headroom; struct sk_buff *skb; + /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */ + hard_start -= sizeof(struct xdp_frame); + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (likely(xdp_prog)) { @@ -538,7 +546,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, break; case XDP_TX: orig_frame = *frame; - xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) { trace_xdp_exception(rq->dev, xdp_prog, act); @@ -550,7 +557,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, goto xdp_xmit; case XDP_REDIRECT: orig_frame = *frame; - xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) { frame = &orig_frame; @@ -572,7 +578,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, rcu_read_unlock(); headroom = sizeof(struct xdp_frame) + frame->headroom - delta; - skb = veth_build_skb(head, headroom, len, 0); + skb = veth_build_skb(hard_start, headroom, len, 0); if (!skb) { xdp_return_frame(frame); goto err; @@ -1176,6 +1182,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_set_rx_headroom = veth_set_rx_headroom, .ndo_bpf = veth_xdp, .ndo_xdp_xmit = veth_xdp_xmit, + .ndo_get_peer_dev = veth_peer_dev, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5a635f028bdc..870806bd8d6d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -26,7 +26,7 @@ static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); -static bool csum = true, gso = true, napi_tx = true; +static bool csum = true, gso = true, napi_tx; module_param(csum, bool, 0444); module_param(gso, bool, 0444); module_param(napi_tx, bool, 0644); @@ -63,6 +63,11 @@ static const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_CSUM }; +#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ + (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ + (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ + (1ULL << VIRTIO_NET_F_GUEST_UFO)) + struct virtnet_stat_desc { char desc[ETH_GSTRING_LEN]; size_t offset; @@ -190,6 +195,9 @@ struct virtnet_info { /* # of XDP queue pairs currently used by the driver */ u16 xdp_queue_pairs; + /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ + bool xdp_enabled; + /* I like... big packets and I cannot lie! */ bool big_packets; @@ -371,7 +379,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, - bool hdr_valid) + bool hdr_valid, unsigned int metasize) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -393,6 +401,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); + /* hdr_valid means no XDP, so we can copy the vnet header */ if (hdr_valid) memcpy(hdr, p, hdr_len); @@ -405,6 +414,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, copy = skb_tailroom(skb); skb_put_data(skb, p, copy); + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + len -= copy; offset += copy; @@ -450,10 +464,6 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct virtio_net_hdr_mrg_rxbuf *hdr; int err; - /* virtqueue want to use data area in-front of packet */ - if (unlikely(xdpf->metasize > 0)) - return -EOPNOTSUPP; - if (unlikely(xdpf->headroom < vi->hdr_len)) return -EOVERFLOW; @@ -474,12 +484,42 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, return 0; } -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) -{ - unsigned int qp; +/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on + * the current cpu, so it does not need to be locked. + * + * Here we use marco instead of inline functions because we have to deal with + * three issues at the same time: 1. the choice of sq. 2. judge and execute the + * lock/unlock of txq 3. make sparse happy. It is difficult for two inline + * functions to perfectly solve these three problems at the same time. + */ +#define virtnet_xdp_get_sq(vi) ({ \ + int cpu = smp_processor_id(); \ + struct netdev_queue *txq; \ + typeof(vi) v = (vi); \ + unsigned int qp; \ + \ + if (v->curr_queue_pairs > nr_cpu_ids) { \ + qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ + qp += cpu; \ + txq = netdev_get_tx_queue(v->dev, qp); \ + __netif_tx_acquire(txq); \ + } else { \ + qp = cpu % v->curr_queue_pairs; \ + txq = netdev_get_tx_queue(v->dev, qp); \ + __netif_tx_lock(txq, cpu); \ + } \ + v->sq + qp; \ +}) - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - return &vi->sq[qp]; +#define virtnet_xdp_put_sq(vi, q) { \ + struct netdev_queue *txq; \ + typeof(vi) v = (vi); \ + \ + txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ + if (v->curr_queue_pairs > nr_cpu_ids) \ + __netif_tx_release(txq); \ + else \ + __netif_tx_unlock(txq); \ } static int virtnet_xdp_xmit(struct net_device *dev, @@ -505,7 +545,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, if (!xdp_prog) return -ENXIO; - sq = virtnet_xdp_sq(vi); + sq = virtnet_xdp_get_sq(vi); if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { ret = -EINVAL; @@ -553,12 +593,13 @@ out: sq->stats.kicks += kicks; u64_stats_update_end(&sq->stats.syncp); + virtnet_xdp_put_sq(vi, sq); return ret; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { - return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; + return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; } /* We copy the packet for XDP in the following cases: @@ -582,8 +623,12 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, int page_off, unsigned int *len) { - struct page *page = alloc_page(GFP_ATOMIC); + struct page *page; + if (*len > PAGE_SIZE - page_off) + return NULL; + + page = alloc_page(GFP_ATOMIC); if (!page) return NULL; @@ -644,6 +689,7 @@ static struct sk_buff *receive_small(struct net_device *dev, unsigned int delta = 0; struct page *xdp_page; int err; + unsigned int metasize = 0; len -= vi->hdr_len; stats->bytes += len; @@ -683,8 +729,8 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; xdp.data = xdp.data_hard_start + xdp_headroom; - xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.data_meta = xdp.data; xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -695,6 +741,7 @@ static struct sk_buff *receive_small(struct net_device *dev, /* Recalculate length in case bpf program changed it */ delta = orig_data - xdp.data; len = xdp.data_end - xdp.data; + metasize = xdp.data - xdp.data_meta; break; case XDP_TX: stats->xdp_tx++; @@ -740,6 +787,9 @@ static struct sk_buff *receive_small(struct net_device *dev, memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); } /* keep zeroed vnet hdr since packet was changed by bpf */ + if (metasize) + skb_metadata_set(skb, metasize); + err: return skb; @@ -760,8 +810,8 @@ static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_rq_stats *stats) { struct page *page = buf; - struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, - PAGE_SIZE, true); + struct sk_buff *skb = + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -793,6 +843,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, unsigned int truesize; unsigned int headroom = mergeable_ctx_to_headroom(ctx); int err; + unsigned int metasize = 0; head_skb = NULL; stats->bytes += len - vi->hdr_len; @@ -839,8 +890,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, data = page_address(xdp_page) + offset; xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; xdp.data = data + vi->hdr_len; - xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + (len - vi->hdr_len); + xdp.data_meta = xdp.data; xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -848,24 +899,27 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, switch (act) { case XDP_PASS: - /* recalculate offset to account for any header - * adjustments. Note other cases do not build an - * skb and avoid using offset - */ - offset = xdp.data - - page_address(xdp_page) - vi->hdr_len; + metasize = xdp.data - xdp.data_meta; - /* recalculate len if xdp.data or xdp.data_end were - * adjusted + /* recalculate offset to account for any header + * adjustments and minus the metasize to copy the + * metadata in page_to_skb(). Note other cases do not + * build an skb and avoid using offset */ - len = xdp.data_end - xdp.data + vi->hdr_len; + offset = xdp.data - page_address(xdp_page) - + vi->hdr_len - metasize; + + /* recalculate len if xdp.data, xdp.data_end or + * xdp.data_meta were adjusted + */ + len = xdp.data_end - xdp.data + vi->hdr_len + metasize; /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); put_page(page); - head_skb = page_to_skb(vi, rq, xdp_page, - offset, len, - PAGE_SIZE, false); + head_skb = page_to_skb(vi, rq, xdp_page, offset, + len, PAGE_SIZE, false, + metasize); return head_skb; } break; @@ -921,7 +975,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_skb; } - head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog); + head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, + metasize); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -1231,9 +1286,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, break; } while (rq->vq->num_free); if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { - u64_stats_update_begin(&rq->stats.syncp); + unsigned long flags; + + flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); rq->stats.kicks++; - u64_stats_update_end(&rq->stats.syncp); + u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } return !oom; @@ -1406,12 +1463,16 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) return; if (__netif_tx_trylock(txq)) { - free_old_xmit_skbs(sq, true); + do { + virtqueue_disable_cb(sq->vq); + free_old_xmit_skbs(sq, true); + } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); + + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); } - - if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) - netif_tx_wake_queue(txq); } static int virtnet_poll(struct napi_struct *napi, int budget) @@ -1435,12 +1496,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) xdp_do_flush_map(); if (xdp_xmit & VIRTIO_XDP_TX) { - sq = virtnet_xdp_sq(vi); + sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); } + virtnet_xdp_put_sq(vi, sq); } return received; @@ -1481,6 +1543,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) struct virtnet_info *vi = sq->vq->vdev->priv; unsigned int index = vq2txq(sq->vq); struct netdev_queue *txq; + int opaque; + bool done; if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { /* We don't need to enable cb for XDP */ @@ -1490,14 +1554,30 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); + virtqueue_disable_cb(sq->vq); free_old_xmit_skbs(sq, true); - __netif_tx_unlock(txq); - - virtqueue_napi_complete(napi, sq->vq, 0); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) netif_tx_wake_queue(txq); + opaque = virtqueue_enable_cb_prepare(sq->vq); + + done = napi_complete_done(napi, 0); + if (!done) + virtqueue_disable_cb(sq->vq); + + __netif_tx_unlock(txq); + + if (done) { + if (unlikely(virtqueue_poll(sq->vq, opaque))) { + if (napi_schedule_prep(napi)) { + __netif_tx_lock(txq, raw_smp_processor_id()); + virtqueue_disable_cb(sq->vq); + __netif_tx_unlock(txq); + __napi_schedule(napi); + } + } + } return 0; } @@ -1525,7 +1605,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (virtio_net_hdr_from_skb(skb, &hdr->hdr, virtio_is_little_endian(vi->vdev), false, 0)) - BUG(); + return -EPROTO; if (vi->mergeable_rx_bufs) hdr->num_buffers = 0; @@ -1559,10 +1639,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ - free_old_xmit_skbs(sq, false); - - if (use_napi && kick) - virtqueue_enable_cb_delayed(sq->vq); + do { + if (use_napi) + virtqueue_disable_cb(sq->vq); + free_old_xmit_skbs(sq, false); + } while (use_napi && kick && + unlikely(!virtqueue_enable_cb_delayed(sq->vq))); /* timestamp packet in software */ skb_tx_timestamp(skb); @@ -1632,6 +1714,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, { struct scatterlist *sgs[4], hdr, stat; unsigned out_num = 0, tmp; + unsigned long timeout = jiffies + 3*HZ; + int ret; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); @@ -1651,7 +1735,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); - virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + if (ret < 0) { + dev_warn(&vi->vdev->dev, + "Failed to add sgs for command vq: %d\n.", ret); + return false; + } if (unlikely(!virtqueue_kick(vi->cvq))) return vi->ctrl->status == VIRTIO_NET_OK; @@ -1660,8 +1749,14 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp) && - !virtqueue_is_broken(vi->cvq)) + !virtqueue_is_broken(vi->cvq)) { + if(time_after(jiffies, timeout)) { + dev_err(&vi->vdev->dev, + "%s: timeout waiting for response\n", __func__); + break; + } cpu_relax(); + } return vi->ctrl->status == VIRTIO_NET_OK; } @@ -2065,14 +2160,16 @@ static int virtnet_set_channels(struct net_device *dev, get_online_cpus(); err = _virtnet_set_queues(vi, queue_pairs); - if (!err) { - netif_set_real_num_tx_queues(dev, queue_pairs); - netif_set_real_num_rx_queues(dev, queue_pairs); - - virtnet_set_affinity(vi); + if (err) { + put_online_cpus(); + goto err; } + virtnet_set_affinity(vi); put_online_cpus(); + netif_set_real_num_tx_queues(dev, queue_pairs); + netif_set_real_num_rx_queues(dev, queue_pairs); + err: return err; } @@ -2414,7 +2511,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { - NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); + NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); return -EOPNOTSUPP; } @@ -2435,21 +2532,17 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { - NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); - netdev_warn(dev, "request %i queues but max is %i\n", - curr_qp + xdp_qp, vi->max_queue_pairs); - return -ENOMEM; + netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", + curr_qp + xdp_qp, vi->max_queue_pairs); + xdp_qp = 0; } old_prog = rtnl_dereference(vi->rq[0].xdp_prog); if (!prog && !old_prog) return 0; - if (prog) { - prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); - if (IS_ERR(prog)) - return PTR_ERR(prog); - } + if (prog) + bpf_prog_add(prog, vi->max_queue_pairs - 1); /* Make sure NAPI is not using any XDP TX queues for RX. */ if (netif_running(dev)) { @@ -2475,11 +2568,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, vi->xdp_queue_pairs = xdp_qp; if (prog) { + vi->xdp_enabled = true; for (i = 0; i < vi->max_queue_pairs; i++) { rcu_assign_pointer(vi->rq[i].xdp_prog, prog); if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); } + } else { + vi->xdp_enabled = false; } for (i = 0; i < vi->max_queue_pairs; i++) { @@ -2563,14 +2659,15 @@ static int virtnet_set_features(struct net_device *dev, u64 offloads; int err; - if ((dev->features ^ features) & NETIF_F_LRO) { - if (vi->xdp_queue_pairs) + if ((dev->features ^ features) & NETIF_F_GRO_HW) { + if (vi->xdp_enabled) return -EBUSY; - if (features & NETIF_F_LRO) + if (features & NETIF_F_GRO_HW) offloads = vi->guest_offloads_capable; else - offloads = 0; + offloads = vi->guest_offloads_capable & + ~GUEST_OFFLOAD_GRO_HW_MASK; err = virtnet_set_guest_offloads(vi, offloads); if (err) @@ -3046,9 +3143,9 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_RXCSUM; if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) - dev->features |= NETIF_F_LRO; + dev->features |= NETIF_F_GRO_HW; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) - dev->hw_features |= NETIF_F_LRO; + dev->hw_features |= NETIF_F_GRO_HW; dev->vlan_features = dev->features; @@ -3106,6 +3203,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev_err(&vdev->dev, "device MTU appears to have changed it is now %d < %d", mtu, dev->min_mtu); + err = -EINVAL; goto free; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 216acf37ca7c..a06e6ab453f5 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -861,7 +861,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, switch (protocol) { case IPPROTO_TCP: - ctx->l4_hdr_size = tcp_hdrlen(skb); + ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : + tcp_hdrlen(skb); break; case IPPROTO_UDP: ctx->l4_hdr_size = sizeof(struct udphdr); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 0a38c76688ab..5e2571d23ab9 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -702,6 +702,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!p) return 0; + if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) + return 0; while (n--) p[n] = rssConf->indTable[n]; return 0; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index b8228f50bc94..14dfb9278345 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -188,8 +188,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, fl6.flowi6_proto = iph->nexthdr; fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; - dst = ip6_route_output(net, NULL, &fl6); - if (dst == dst_null) + dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst) || dst == dst_null) goto err; skb_dst_drop(skb); @@ -332,8 +332,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } -static int vrf_finish_direct(struct net *net, struct sock *sk, - struct sk_buff *skb) +static void vrf_finish_direct(struct sk_buff *skb) { struct net_device *vrf_dev = skb->dev; @@ -352,7 +351,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk, skb_pull(skb, ETH_HLEN); } - return 1; + /* reset skb device */ + nf_reset_ct(skb); } #if IS_ENABLED(CONFIG_IPV6) @@ -431,15 +431,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, return skb; } +static int vrf_output6_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + vrf_finish_direct(skb); + + return vrf_ip6_local_out(net, sk, skb); +} + static int vrf_output6_direct(struct net *net, struct sock *sk, struct sk_buff *skb) { + int err = 1; + skb->protocol = htons(ETH_P_IPV6); - return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, - net, sk, skb, NULL, skb->dev, - vrf_finish_direct, - !(IPCB(skb)->flags & IPSKB_REROUTED)); + if (!(IPCB(skb)->flags & IPSKB_REROUTED)) + err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb, + NULL, skb->dev, vrf_output6_direct_finish); + + if (likely(err == 1)) + vrf_finish_direct(skb); + + return err; +} + +static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + int err; + + err = vrf_output6_direct(net, sk, skb); + if (likely(err == 1)) + err = vrf_ip6_local_out(net, sk, skb); + + return err; } static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, @@ -452,18 +478,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, - skb, NULL, vrf_dev, vrf_output6_direct); + skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); if (likely(err == 1)) err = vrf_output6_direct(net, sk, skb); - /* reset skb device */ if (likely(err == 1)) - nf_reset_ct(skb); - else - skb = NULL; + return skb; - return skb; + return NULL; } static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, @@ -474,7 +497,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); return vrf_ip6_out_redirect(vrf_dev, skb); @@ -642,15 +666,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, return skb; } +static int vrf_output_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + vrf_finish_direct(skb); + + return vrf_ip_local_out(net, sk, skb); +} + static int vrf_output_direct(struct net *net, struct sock *sk, struct sk_buff *skb) { + int err = 1; + skb->protocol = htons(ETH_P_IP); - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, - net, sk, skb, NULL, skb->dev, - vrf_finish_direct, - !(IPCB(skb)->flags & IPSKB_REROUTED)); + if (!(IPCB(skb)->flags & IPSKB_REROUTED)) + err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, + NULL, skb->dev, vrf_output_direct_finish); + + if (likely(err == 1)) + vrf_finish_direct(skb); + + return err; +} + +static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + int err; + + err = vrf_output_direct(net, sk, skb); + if (likely(err == 1)) + err = vrf_ip_local_out(net, sk, skb); + + return err; } static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, @@ -663,18 +713,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, - skb, NULL, vrf_dev, vrf_output_direct); + skb, NULL, vrf_dev, vrf_ip_out_direct_finish); if (likely(err == 1)) err = vrf_output_direct(net, sk, skb); - /* reset skb device */ if (likely(err == 1)) - nf_reset_ct(skb); - else - skb = NULL; + return skb; - return skb; + return NULL; } static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, @@ -686,7 +733,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); return vrf_ip_out_redirect(vrf_dev, skb); @@ -990,11 +1038,17 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, int orig_iif = skb->skb_iif; bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); bool is_ndisc = ipv6_ndisc_frame(skb); + bool is_ll_src; /* loopback, multicast & non-ND link-local traffic; do not push through - * packet taps again. Reset pkt_type for upper layers to process skb + * packet taps again. Reset pkt_type for upper layers to process skb. + * for packets with lladdr src, however, skip so that the dst can be + * determine at input using original ifindex in the case that daddr + * needs strict */ - if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { + is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL; + if (skb->pkt_type == PACKET_LOOPBACK || + (need_strict && !is_ndisc && !is_ll_src)) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IP6CB(skb)->flags |= IP6SKB_L3SLAVE; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 93690f77ec9c..8443df79fabc 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1225,6 +1225,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -1237,12 +1238,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; @@ -1924,6 +1928,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, ns_olen = request->len - skb_network_offset(request) - sizeof(struct ipv6hdr) - sizeof(*ns); for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { + if (!ns->opt[i + 1]) { + kfree_skb(reply); + return NULL; + } if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); break; @@ -2542,7 +2550,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2582,7 +2590,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), @@ -2859,8 +2867,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ - if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f, true, true); + if (is_zero_ether_addr(f->eth_addr) && + f->vni == vxlan->cfg.vni) + continue; + vxlan_fdb_destroy(vxlan, f, true, true); } spin_unlock_bh(&vxlan->hash_lock[h]); } @@ -3144,7 +3154,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); if (id >= VXLAN_N_VID) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID], "VXLAN ID must be lower than 16777216"); return -ERANGE; } @@ -3155,7 +3165,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], = nla_data(data[IFLA_VXLAN_PORT_RANGE]); if (ntohs(p->high) < ntohs(p->low)) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE], "Invalid source port range"); return -EINVAL; } @@ -3165,7 +3175,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]); if (df < 0 || df > VXLAN_DF_MAX) { - NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF], + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF], "Invalid DF attribute"); return -EINVAL; } @@ -3528,6 +3538,9 @@ static void vxlan_config_apply(struct net_device *dev, dev->gso_max_segs = lowerdev->gso_max_segs; needed_headroom = lowerdev->hard_header_len; + needed_headroom += lowerdev->needed_headroom; + + dev->needed_tailroom = lowerdev->needed_tailroom; max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); @@ -3607,8 +3620,10 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, if (dst->remote_ifindex) { remote_dev = __dev_get_by_index(net, dst->remote_ifindex); - if (!remote_dev) + if (!remote_dev) { + err = -ENODEV; goto errout; + } err = netdev_upper_dev_link(remote_dev, dev, extack); if (err) @@ -4409,7 +4424,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan, *next; struct net_device *dev, *aux; - unsigned int h; for_each_netdev_safe(net, dev, aux) if (dev->rtnl_link_ops == &vxlan_link_ops) @@ -4423,14 +4437,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) unregister_netdevice_queue(vxlan->dev, head); } - for (h = 0; h < PORT_HASH_SIZE; ++h) - WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); } static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) { struct net *net; LIST_HEAD(list); + unsigned int h; rtnl_lock(); list_for_each_entry(net, net_list, exit_list) @@ -4438,6 +4451,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) unregister_netdevice_many(&list); rtnl_unlock(); + + list_for_each_entry(net, net_list, exit_list) { + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + + for (h = 0; h < PORT_HASH_SIZE; ++h) + WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); + } } static struct pernet_operations vxlan_net_ops = { diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index dd1a147f2971..0d6e1829e0ac 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -200,7 +200,7 @@ config WANXL_BUILD_FIRMWARE depends on WANXL && !PREVENT_FIRMWARE_BUILD help Allows you to rebuild firmware run by the QUICC processor. - It requires as68k, ld68k and hexdump programs. + It requires m68k toolchains and hexdump programs. You should never need this option, say N. @@ -282,6 +282,7 @@ config SLIC_DS26522 tristate "Slic Maxim ds26522 card support" depends on SPI depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST + select BITREVERSE help This module initializes and configures the slic maxim card in T1 or E1 mode. diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 701f5d2fe3b6..cf7a0a65aae8 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -40,17 +40,17 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) - AS68K = $(AS) - LD68K = $(LD) + M68KCC = $(CC) + M68KLD = $(LD) else - AS68K = as68k - LD68K = ld68k + M68KCC = $(CROSS_COMPILE_M68K)gcc + M68KLD = $(CROSS_COMPILE_M68K)ld endif quiet_cmd_build_wanxlfw = BLD FW $@ cmd_build_wanxlfw = \ - $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \ - $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ + $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $(obj)/wanxlfw.o $<; \ + $(M68KLD) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \ rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index af539151d663..61428076f32e 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file, chan->tx_status = 1; spin_unlock_irqrestore(&cosa->lock, flags); up(&chan->wsem); + kfree(kbuf); return -ERESTARTSYS; } } diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 4ad0a0c33d85..034eb6535ab7 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -204,14 +204,18 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) priv->rx_skbuff = kcalloc(priv->rx_ring_size, sizeof(*priv->rx_skbuff), GFP_KERNEL); - if (!priv->rx_skbuff) + if (!priv->rx_skbuff) { + ret = -ENOMEM; goto free_ucc_pram; + } priv->tx_skbuff = kcalloc(priv->tx_ring_size, sizeof(*priv->tx_skbuff), GFP_KERNEL); - if (!priv->tx_skbuff) + if (!priv->tx_skbuff) { + ret = -ENOMEM; goto free_rx_skbuff; + } priv->skb_curtx = 0; priv->skb_dirtytx = 0; diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index dfc16770458d..8b6598a3713d 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto; static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) { - struct hdlc_device *hdlc = dev_to_hdlc(dev); + struct hdlc_device *hdlc; + + /* First make sure "dev" is an HDLC device */ + if (!(dev->priv_flags & IFF_WAN_HDLC)) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + + hdlc = dev_to_hdlc(dev); if (!net_eq(dev_net(dev), &init_net)) { kfree_skb(skb); diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index a030f5aa6b95..50804d047308 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -118,6 +118,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, skb_put(skb, sizeof(struct cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); @@ -370,6 +371,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); spin_lock_init(&state(hdlc)->lock); dev->header_ops = &cisco_header_ops; + dev->hard_header_len = sizeof(struct hdlc_header); dev->type = ARPHRD_CISCO; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 9acad651ea1f..3a44dad87602 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -273,63 +273,69 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc, static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) { - u16 head_len; struct sk_buff *skb = *skb_p; - switch (skb->protocol) { - case cpu_to_be16(NLPID_CCITT_ANSI_LMI): - head_len = 4; - skb_push(skb, head_len); - skb->data[3] = NLPID_CCITT_ANSI_LMI; - break; + if (!skb->dev) { /* Control packets */ + switch (dlci) { + case LMI_CCITT_ANSI_DLCI: + skb_push(skb, 4); + skb->data[3] = NLPID_CCITT_ANSI_LMI; + break; - case cpu_to_be16(NLPID_CISCO_LMI): - head_len = 4; - skb_push(skb, head_len); - skb->data[3] = NLPID_CISCO_LMI; - break; + case LMI_CISCO_DLCI: + skb_push(skb, 4); + skb->data[3] = NLPID_CISCO_LMI; + break; - case cpu_to_be16(ETH_P_IP): - head_len = 4; - skb_push(skb, head_len); - skb->data[3] = NLPID_IP; - break; + default: + return -EINVAL; + } - case cpu_to_be16(ETH_P_IPV6): - head_len = 4; - skb_push(skb, head_len); - skb->data[3] = NLPID_IPV6; - break; + } else if (skb->dev->type == ARPHRD_DLCI) { + switch (skb->protocol) { + case htons(ETH_P_IP): + skb_push(skb, 4); + skb->data[3] = NLPID_IP; + break; - case cpu_to_be16(ETH_P_802_3): - head_len = 10; - if (skb_headroom(skb) < head_len) { - struct sk_buff *skb2 = skb_realloc_headroom(skb, - head_len); + case htons(ETH_P_IPV6): + skb_push(skb, 4); + skb->data[3] = NLPID_IPV6; + break; + + default: + skb_push(skb, 10); + skb->data[3] = FR_PAD; + skb->data[4] = NLPID_SNAP; + /* OUI 00-00-00 indicates an Ethertype follows */ + skb->data[5] = 0x00; + skb->data[6] = 0x00; + skb->data[7] = 0x00; + /* This should be an Ethertype: */ + *(__be16 *)(skb->data + 8) = skb->protocol; + } + + } else if (skb->dev->type == ARPHRD_ETHER) { + if (skb_headroom(skb) < 10) { + struct sk_buff *skb2 = skb_realloc_headroom(skb, 10); if (!skb2) return -ENOBUFS; dev_kfree_skb(skb); skb = *skb_p = skb2; } - skb_push(skb, head_len); + skb_push(skb, 10); skb->data[3] = FR_PAD; skb->data[4] = NLPID_SNAP; - skb->data[5] = FR_PAD; + /* OUI 00-80-C2 stands for the 802.1 organization */ + skb->data[5] = 0x00; skb->data[6] = 0x80; skb->data[7] = 0xC2; + /* PID 00-07 stands for Ethernet frames without FCS */ skb->data[8] = 0x00; - skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ - break; + skb->data[9] = 0x07; - default: - head_len = 10; - skb_push(skb, head_len); - skb->data[3] = FR_PAD; - skb->data[4] = NLPID_SNAP; - skb->data[5] = FR_PAD; - skb->data[6] = FR_PAD; - skb->data[7] = FR_PAD; - *(__be16*)(skb->data + 8) = skb->protocol; + } else { + return -EINVAL; } dlci_to_q922(skb->data, dlci); @@ -425,14 +431,16 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) skb_put(skb, pad); memset(skb->data + len, 0, pad); } - skb->protocol = cpu_to_be16(ETH_P_802_3); } + skb->dev = dev; if (!fr_hard_header(&skb, pvc->dlci)) { dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; if (pvc->state.fecn) /* TX Congestion counter */ dev->stats.tx_compressed++; skb->dev = pvc->frad; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); dev_queue_xmit(skb); return NETDEV_TX_OK; } @@ -492,10 +500,8 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) memset(skb->data, 0, len); skb_reserve(skb, 4); if (lmi == LMI_CISCO) { - skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); fr_hard_header(&skb, LMI_CISCO_DLCI); } else { - skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); } data = skb_tail_pointer(skb); @@ -555,6 +561,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); @@ -1041,7 +1048,7 @@ static void pvc_setup(struct net_device *dev) { dev->type = ARPHRD_DLCI; dev->flags = IFF_POINTOPOINT; - dev->hard_header_len = 10; + dev->hard_header_len = 0; dev->addr_len = 2; netif_keep_dst(dev); } @@ -1093,6 +1100,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) dev->mtu = HDLC_MAX_MTU; dev->min_mtu = 68; dev->max_mtu = HDLC_MAX_MTU; + dev->needed_headroom = 10; dev->priv_flags |= IFF_NO_QUEUE; dev->ml_priv = pvc; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 48ced3912576..261b53fc8e04 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -251,6 +251,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); skb_queue_tail(&tx_queue, skb); } @@ -383,11 +384,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } for (opt = data; len; len -= opt[1], opt += opt[1]) { - if (len < 2 || len < opt[1]) { - dev->stats.rx_errors++; - kfree(out); - return; /* bad packet, drop silently */ - } + if (len < 2 || opt[1] < 2 || len < opt[1]) + goto err_out; if (pid == PID_LCP) switch (opt[0]) { @@ -395,6 +393,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, continue; /* MRU always OK and > 1500 bytes? */ case LCP_OPTION_ACCM: /* async control character map */ + if (opt[1] < sizeof(valid_accm)) + goto err_out; if (!memcmp(opt, valid_accm, sizeof(valid_accm))) continue; @@ -406,6 +406,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } break; case LCP_OPTION_MAGIC: + if (len < 6) + goto err_out; if (opt[1] != 6 || (!opt[2] && !opt[3] && !opt[4] && !opt[5])) break; /* reject invalid magic number */ @@ -424,6 +426,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); kfree(out); + return; + +err_out: + dev->stats.rx_errors++; + kfree(out); } static int ppp_rx(struct sk_buff *skb) @@ -562,6 +569,13 @@ static void ppp_timer(struct timer_list *t) unsigned long flags; spin_lock_irqsave(&ppp->lock, flags); + /* mod_timer could be called after we entered this function but + * before we got the lock. + */ + if (timer_pending(&proto->timer)) { + spin_unlock_irqrestore(&ppp->lock, flags); + return; + } switch (proto->state) { case STOPPING: case REQ_SENT: diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index 08e0a46501de..c70a518b8b47 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) old_qlen = dev->tx_queue_len; ether_setup(dev); dev->tx_queue_len = old_qlen; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; eth_hw_addr_random(dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_off(dev); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index bf78073ee7fd..e2a83f4cd3bb 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -62,8 +62,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } skb_push(skb, 1); skb_reset_network_header(skb); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 0f1217b506ad..4e42954d8cbf 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -51,6 +51,8 @@ struct lapbethdev { struct list_head node; struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* lapbeth device (lapb#) */ + bool up; + spinlock_t up_lock; /* Protects "up" */ }; static LIST_HEAD(lapbeth_devices); @@ -98,8 +100,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe rcu_read_lock(); lapbeth = lapbeth_get_x25_dev(dev); if (!lapbeth) - goto drop_unlock; - if (!netif_running(lapbeth->axdev)) + goto drop_unlock_rcu; + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) goto drop_unlock; len = skb->data[0] + skb->data[1] * 256; @@ -114,11 +117,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe goto drop_unlock; } out: + spin_unlock_bh(&lapbeth->up_lock); rcu_read_unlock(); return 0; drop_unlock: kfree_skb(skb); goto out; +drop_unlock_rcu: + rcu_read_unlock(); drop: kfree_skb(skb); return 0; @@ -128,10 +134,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - skb_push(skb, 1); - - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } + + skb_push(skb, 1); ptr = skb->data; *ptr = X25_IFACE_DATA; @@ -146,13 +154,17 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; - /* - * Just to be *really* sure not to send anything if the interface - * is down, the ethernet device may have gone. + spin_lock_bh(&lapbeth->up_lock); + if (!lapbeth->up) + goto drop; + + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. */ - if (!netif_running(dev)) + if (skb->len < 1) goto drop; switch (skb->data[0]) { @@ -177,6 +189,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, goto drop; } out: + spin_unlock_bh(&lapbeth->up_lock); return NETDEV_TX_OK; drop: kfree_skb(skb); @@ -190,8 +203,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) struct net_device *dev; int size = skb->len; - skb->protocol = htons(ETH_P_X25); - ptr = skb_push(skb, 2); *ptr++ = size % 256; @@ -202,6 +213,10 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; + skb->protocol = htons(ETH_P_DEC); + + skb_reset_network_header(skb); + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); @@ -266,6 +281,7 @@ static const struct lapb_register_struct lapbeth_callbacks = { */ static int lapbeth_open(struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) { @@ -273,15 +289,21 @@ static int lapbeth_open(struct net_device *dev) return -ENODEV; } - netif_start_queue(dev); + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = true; + spin_unlock_bh(&lapbeth->up_lock); + return 0; } static int lapbeth_close(struct net_device *dev) { + struct lapbethdev *lapbeth = netdev_priv(dev); int err; - netif_stop_queue(dev); + spin_lock_bh(&lapbeth->up_lock); + lapbeth->up = false; + spin_unlock_bh(&lapbeth->up_lock); if ((err = lapb_unregister(dev)) != LAPB_OK) pr_err("lapb_unregister error: %d\n", err); @@ -303,7 +325,7 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; - dev->hard_header_len = 3; + dev->hard_header_len = 0; dev->mtu = 1000; dev->addr_len = 0; } @@ -324,12 +346,25 @@ static int lapbeth_new_device(struct net_device *dev) if (!ndev) goto out; + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes, + * then this driver prepends a length field of 2 bytes, + * then the underlying Ethernet device prepends its own header. + */ + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + + dev->needed_headroom; + ndev->needed_tailroom = dev->needed_tailroom; + lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; dev_hold(dev); lapbeth->ethdev = dev; + lapbeth->up = false; + spin_lock_init(&lapbeth->up_lock); + rc = -EIO; if (register_netdevice(ndev)) goto fail; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 0e6a51525d91..f3deb2a2fa47 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -912,6 +912,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) break; default: printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); + unregister_hdlc_device(dev); + return -EIO; break; } diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 914be5847386..cdcc380b4c26 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl) netif_wake_queue(sl->dev); } -/* Send one completely decapsulated IP datagram to the IP layer. */ +/* Send an LAPB frame to the LAPB module to process. */ static void x25_asy_bump(struct x25_asy *sl) { @@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl) count = sl->rcount; dev->stats.rx_bytes += count; - skb = dev_alloc_skb(count+1); + skb = dev_alloc_skb(count); if (skb == NULL) { netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } - skb_push(skb, 1); /* LAPB internal control */ skb_put_data(skb, sl->rbuff, count); skb->protocol = x25_type_trans(skb, sl->dev); err = lapb_data_received(skb->dev, skb); @@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl) kfree_skb(skb); printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); } else { - netif_rx(skb); dev->stats.rx_packets++; } } @@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, */ /* - * Called when I frame data arrives. We did the work above - throw it - * at the net layer. + * Called when I frame data arrive. We add a pseudo header for upper + * layers and pass it to upper layers. */ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) { + if (skb_cow(skb, 1)) { + kfree_skb(skb); + return NET_RX_DROP; + } + skb_push(skb, 1); + skb->data[0] = X25_IFACE_DATA; + + skb->protocol = x25_type_trans(skb, dev); + return netif_rx(skb); } @@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) switch (s) { case X25_END: if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 2) + sl->rcount >= 2) x25_asy_bump(sl); clear_bit(SLF_ESCAPE, &sl->flags); sl->rcount = 0; diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 5c79f052cad2..34f81f16b5a0 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL); - cmd->hdr.length = sizeof(cmd->sw_rf); + cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf)); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION); cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status)); diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index 529ebca1e9e1..1f7709d24f35 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -354,6 +354,7 @@ out: usb_autopm_put_interface(i2400mu->usb_iface); d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n", i2400m, ack, ack_size, (long) result); + usb_put_urb(¬if_urb); return result; error_exceeded: diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index da2d179430ca..4c57e79e5779 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ + AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect + SMCWUSBT-G2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */ diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index eca87f7c5b6c..01e05af5ae08 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); if (ret) { dma_free_coherent(ar->dev, - (nentries * sizeof(struct ce_desc_64) + + (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), src_ring->base_addr_owner_space_unaligned, base_addr); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index bd2b5628f850..04c50a26a4f4 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1516,7 +1516,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, *len += scnprintf(buf + *len, buf_len - *len, "No. Preamble Rate_code "); - for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++) + for (i = 0; i < tpc_stats->num_tx_chain; i++) *len += scnprintf(buf + *len, buf_len - *len, "tpc_value%d ", i); @@ -2532,6 +2532,7 @@ void ath10k_debug_destroy(struct ath10k *ar) ath10k_debug_fw_stats_reset(ar); kfree(ar->debug.tpc_stats); + kfree(ar->debug.tpc_stats_final); } int ath10k_debug_register(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 30c080094af1..bd5fa4dbab9c 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -2033,6 +2033,7 @@ struct ath10k_htt_tx_ops { int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu); + void (*htt_flush_tx)(struct ath10k_htt *htt); }; static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) @@ -2072,6 +2073,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt, return htt->tx_ops->htt_tx(htt, txmode, msdu); } +static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_flush_tx) + htt->tx_ops->htt_flush_tx(htt); +} + static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) { if (!htt->tx_ops->htt_alloc_txbuff) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 9f0e7b4943ec..04095f91d301 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); + + if (idx < 0 || idx >= htt->rx_ring.size) { + ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); + idx &= htt->rx_ring.size_mask; + ret = -ENOMEM; + goto fail; + } + while (num > 0) { skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); if (!skb) { @@ -941,6 +949,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, u8 preamble = 0; u8 group_id; u32 info1, info2, info3; + u32 stbc, nsts_su; info1 = __le32_to_cpu(rxd->ppdu_start.info1); info2 = __le32_to_cpu(rxd->ppdu_start.info2); @@ -985,11 +994,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, */ bw = info2 & 3; sgi = info3 & 1; + stbc = (info2 >> 3) & 1; group_id = (info2 >> 4) & 0x3F; if (GROUP_ID_IS_SU_MIMO(group_id)) { mcs = (info3 >> 4) & 0x0F; - nss = ((info2 >> 10) & 0x07) + 1; + nsts_su = ((info2 >> 10) & 0x07); + if (stbc) + nss = (nsts_su >> 2) + 1; + else + nss = (nsts_su + 1); } else { /* Hardware doesn't decode VHT-SIG-B into Rx descriptor * so it's impossible to decode MCS. Also since diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index a182c0944cc7..c38e1963ebc0 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -529,9 +529,14 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt) htt->tx_mem_allocated = false; } -void ath10k_htt_tx_stop(struct ath10k_htt *htt) +static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) { idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); +} + +void ath10k_htt_tx_stop(struct ath10k_htt *htt) +{ + ath10k_htt_flush_tx_queue(htt); idr_destroy(&htt->pending_tx); } @@ -1535,7 +1540,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } @@ -1742,7 +1749,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } @@ -1774,6 +1783,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, .htt_tx = ath10k_htt_tx_hl, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, + .htt_flush_tx = ath10k_htt_flush_tx_queue, }; void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index c415e971735b..004af89a02b8 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -1145,6 +1145,7 @@ static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd) const struct ath10k_hw_ops qca99x0_ops = { .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes, .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error, + .is_rssi_enable = ath10k_htt_tx_rssi_enable, }; const struct ath10k_hw_ops qca6174_ops = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 2ae57c1de7b5..ae4c9edc445c 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -810,7 +810,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw, #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 #define TARGET_10_4_NUM_WDS_ENTRIES 32 -#define TARGET_10_4_DMA_BURST_SIZE 0 +#define TARGET_10_4_DMA_BURST_SIZE 1 #define TARGET_10_4_MAC_AGGR_DELIM 0 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 36d24ea126a2..47b733fdf4fc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3624,23 +3624,16 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) { struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; - int ret = 0; - spin_lock_bh(&ar->data_lock); - - if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { + if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) { ath10k_warn(ar, "wmi mgmt tx queue is full\n"); - ret = -ENOSPC; - goto unlock; + return -ENOSPC; } - __skb_queue_tail(q, skb); + skb_queue_tail(q, skb); ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); -unlock: - spin_unlock_bh(&ar->data_lock); - - return ret; + return 0; } static enum ath10k_mac_tx_path @@ -3911,6 +3904,9 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) if (ret) { ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", ret); + /* remove this msdu from idr tracking */ + ath10k_wmi_cleanup_mgmt_tx_send(ar, skb); + dma_unmap_single(ar->dev, paddr, skb->len, DMA_TO_DEVICE); ieee80211_free_txskb(ar->hw, skb); @@ -7082,6 +7078,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ath10k_wmi_peer_flush(ar, arvif->vdev_id, arvif->bssid, bitmap); } + ath10k_htt_flush_tx(&ar->htt); } return; } @@ -7127,7 +7124,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar, struct ieee80211_channel *channel) { int ret; - enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; + enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; lockdep_assert_held(&ar->conf_mutex); @@ -8811,7 +8808,6 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { - ar->hw->wiphy->max_sched_scan_reqs = 1; ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 0a727502d14c..fd49d3419e79 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2074,6 +2074,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_irq_sync(ar); napi_synchronize(&ar->napi); napi_disable(&ar->napi); + cancel_work_sync(&ar_pci->dump_work); /* Most likely the device has HTT Rx ring configured. The only way to * prevent the device from accessing (and possible corrupting) host diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 9870d2d095c8..24b1927a0751 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -550,6 +550,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar, le16_to_cpu(htc_hdr->len), ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH); ret = -ENOMEM; + + queue_work(ar->workqueue, &ar->restart_work); + ath10k_warn(ar, "exceeds length, start recovery\n"); + goto err; } @@ -1582,23 +1586,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { int ret; + void *mem; + + mem = kzalloc(buf_len, GFP_KERNEL); + if (!mem) + return -ENOMEM; /* set window register to start read cycle */ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address); if (ret) { ath10k_warn(ar, "failed to set mbox window read address: %d", ret); - return ret; + goto out; } /* read the data */ - ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len); + ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len); if (ret) { ath10k_warn(ar, "failed to read from mbox window data address: %d\n", ret); - return ret; + goto out; } - return 0; + memcpy(buf, mem, buf_len); + +out: + kfree(mem); + + return ret; } static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address, diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 63607c3b8e81..d4589b2ab3b6 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1039,12 +1039,13 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar, ret = ath10k_snoc_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); - goto err_wlan_enable; + goto err_free_rri; } return 0; -err_wlan_enable: +err_free_rri: + ath10k_ce_free_rri(ar); ath10k_snoc_wlan_disable(ar); return ret; diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 39abf8b12903..f46b9083bbf1 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -84,9 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); + rcu_read_lock(); if (txq && txq->sta && skb_cb->airtime_est) ieee80211_sta_register_airtime(txq->sta, txq->tid, skb_cb->airtime_est, 0); + rcu_read_unlock(); if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index 1e0343081be9..05c0d5e92475 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -1009,6 +1009,8 @@ static int ath10k_usb_probe(struct usb_interface *interface, ar_usb = ath10k_usb_priv(ar); ret = ath10k_usb_create(ar, interface); + if (ret) + goto err; ar_usb->ar = ar; ar->dev_id = product_id; @@ -1021,7 +1023,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_warn(ar, "failed to register driver core: %d\n", ret); - goto err; + goto err_usb_destroy; } /* TODO: remove this once USB support is fully implemented */ @@ -1029,6 +1031,9 @@ static int ath10k_usb_probe(struct usb_interface *interface, return 0; +err_usb_destroy: + ath10k_usb_destroy(ar); + err: ath10k_core_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 1491c25518bb..edccabc667e8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -133,6 +133,7 @@ struct wmi_ops { struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr); + int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu); struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, u32 log_level); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); @@ -441,6 +442,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) return ar->wmi.ops->get_txbf_conf_scheme(ar); } +static inline int +ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) +{ + if (!ar->wmi.ops->cleanup_mgmt_tx_send) + return -EOPNOTSUPP; + + return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu); +} + static inline int ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, dma_addr_t paddr) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index eb0c963d9fd5..315d20f5c8eb 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -445,13 +445,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb) case WMI_TDLS_TEARDOWN_REASON_TX: case WMI_TDLS_TEARDOWN_REASON_RSSI: case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: + rcu_read_lock(); station = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); if (!station) { ath10k_warn(ar, "did not find station from tdls peer event"); - kfree(tb); - return; + goto exit; } arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id)); ieee80211_tdls_oper_request( @@ -461,7 +461,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb) GFP_ATOMIC ); break; + default: + kfree(tb); + return; } + +exit: + rcu_read_unlock(); kfree(tb); } @@ -1260,13 +1266,15 @@ static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, switch (tag) { case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: + arg->service_map_ext_valid = true; arg->service_map_ext_len = *(__le32 *)ptr; arg->service_map_ext = ptr + sizeof(__le32); return 0; default: break; } - return -EPROTO; + + return 0; } static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, @@ -2837,6 +2845,18 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) return skb; } +static int +ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, + struct sk_buff *msdu) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_wmi *wmi = &ar->wmi; + + idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); + + return 0; +} + static int ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr) @@ -2911,6 +2931,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, if (desc_id < 0) goto err_free_skb; + cb->msdu_id = desc_id; + ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); @@ -4339,6 +4361,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, /* .gen_mgmt_tx = not implemented; HTT is used */ .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, + .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 90f1197a6ad8..91604a14a8f4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4668,16 +4668,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, } pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++; - for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, rate_code[i], type); @@ -4790,7 +4787,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_config_event *ev; @@ -4806,6 +4803,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) return; } + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n", + rate_max, WMI_TPC_RATE_MAX); + rate_max = WMI_TPC_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return; @@ -4822,8 +4826,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, rate_code, pream_table, @@ -5018,16 +5022,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, } pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++; - for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, rate_code[i], type, pream_idx); @@ -5043,7 +5044,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_final_table_event *ev; @@ -5051,12 +5052,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) ev = (struct wmi_pdev_tpc_final_table_event *)skb->data; + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_FINAL_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n", + rate_max, WMI_TPC_FINAL_RATE_MAX); + rate_max = WMI_TPC_FINAL_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return; - num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain); @@ -5069,8 +5082,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, rate_code, pream_table, @@ -5646,8 +5659,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) ret); } - ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, - __le32_to_cpu(arg.service_map_ext_len)); + /* + * Initialization of "arg.service_map_ext_valid" to ZERO is necessary + * for the below logic to work. + */ + if (arg.service_map_ext_valid) + ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, + __le32_to_cpu(arg.service_map_ext_len)); } static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index e80dbe7e8f4c..761bc4a7064d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6857,6 +6857,7 @@ struct wmi_svc_rdy_ev_arg { }; struct wmi_svc_avail_ev_arg { + bool service_map_ext_valid; __le32 service_map_ext_len; const __le32 *service_map_ext; }; diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 5e7ea838a921..814131a0680a 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); + if (aid < 1 || aid > AP_MAX_NUM_STA) + return; + if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) assoc_info; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 2382c6c46851..c610fe21c85c 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, return -EINVAL; } + if (tsid >= 16) { + ath6kl_err("invalid tsid: %d\n", tsid); + return -EINVAL; + } + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); if (!skb) return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index a412b352182c..d50022d26464 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -177,7 +177,8 @@ struct ath_frame_info { s8 txq; u8 keyix; u8 rtscts_rate; - u8 retries : 7; + u8 retries : 6; + u8 dyn_smps : 1; u8 baw_tracked : 1; u8 tx_power; enum ath9k_key_type keytype:2; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 26ea51a72156..859a865c5995 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1223,8 +1223,11 @@ static ssize_t write_file_nf_override(struct file *file, ah->nf_override = val; - if (ah->curchan) + if (ah->curchan) { + ath9k_ps_wakeup(sc); ath9k_hw_loadnf(ah, ah->curchan); + ath9k_ps_restore(sc); + } return count; } diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index dd0c32379375..2ed98aaed6fb 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle) spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); /* The pending URBs have to be canceled. */ + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_pending, list) { + usb_get_urb(tx_buf->urb); + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_urb(tx_buf->urb); + list_del(&tx_buf->list); + usb_free_urb(tx_buf->urb); + kfree(tx_buf->buf); + kfree(tx_buf); + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); } @@ -612,6 +621,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, hif_dev->remain_skb = nskb; spin_unlock(&hif_dev->rx_lock); } else { + if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { + dev_err(&hif_dev->udev->dev, + "ath9k_htc: over RX MAX_PKT_NUM\n"); + goto err; + } nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); if (!nskb) { dev_err(&hif_dev->udev->dev, @@ -638,9 +652,9 @@ err: static void ath9k_hif_usb_rx_cb(struct urb *urb) { - struct sk_buff *skb = (struct sk_buff *) urb->context; - struct hif_device_usb *hif_dev = - usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + struct rx_buf *rx_buf = (struct rx_buf *)urb->context; + struct hif_device_usb *hif_dev = rx_buf->hif_dev; + struct sk_buff *skb = rx_buf->skb; int ret; if (!skb) @@ -680,14 +694,15 @@ resubmit: return; free: kfree_skb(skb); + kfree(rx_buf); } static void ath9k_hif_usb_reg_in_cb(struct urb *urb) { - struct sk_buff *skb = (struct sk_buff *) urb->context; + struct rx_buf *rx_buf = (struct rx_buf *)urb->context; + struct hif_device_usb *hif_dev = rx_buf->hif_dev; + struct sk_buff *skb = rx_buf->skb; struct sk_buff *nskb; - struct hif_device_usb *hif_dev = - usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); int ret; if (!skb) @@ -727,11 +742,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } + rx_buf->skb = nskb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); } resubmit: @@ -745,6 +762,7 @@ resubmit: return; free: kfree_skb(skb); + kfree(rx_buf); urb->context = NULL; } @@ -753,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; unsigned long flags; + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_buf, list) { + usb_get_urb(tx_buf->urb); + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_urb(tx_buf->urb); list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_dev->tx.flags |= HIF_USB_TX_FLUSH; spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_pending, list) { + usb_get_urb(tx_buf->urb); + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_urb(tx_buf->urb); list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); } @@ -790,7 +818,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) init_usb_anchor(&hif_dev->mgmt_submitted); for (i = 0; i < MAX_TX_URB_NUM; i++) { - tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); + tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); if (!tx_buf) goto err; @@ -827,8 +855,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) { - struct urb *urb = NULL; + struct rx_buf *rx_buf = NULL; struct sk_buff *skb = NULL; + struct urb *urb = NULL; int i, ret; init_usb_anchor(&hif_dev->rx_submitted); @@ -836,6 +865,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) for (i = 0; i < MAX_RX_URB_NUM; i++) { + rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto err_rxb; + } + /* Allocate URB */ urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { @@ -850,11 +885,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) goto err_skb; } + rx_buf->hif_dev = hif_dev; + rx_buf->skb = skb; + usb_fill_bulk_urb(urb, hif_dev->udev, usb_rcvbulkpipe(hif_dev->udev, USB_WLAN_RX_PIPE), skb->data, MAX_RX_BUF_SIZE, - ath9k_hif_usb_rx_cb, skb); + ath9k_hif_usb_rx_cb, rx_buf); /* Anchor URB */ usb_anchor_urb(urb, &hif_dev->rx_submitted); @@ -880,6 +918,8 @@ err_submit: err_skb: usb_free_urb(urb); err_urb: + kfree(rx_buf); +err_rxb: ath9k_hif_usb_dealloc_rx_urbs(hif_dev); return ret; } @@ -891,14 +931,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) { - struct urb *urb = NULL; + struct rx_buf *rx_buf = NULL; struct sk_buff *skb = NULL; + struct urb *urb = NULL; int i, ret; init_usb_anchor(&hif_dev->reg_in_submitted); for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { + rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto err_rxb; + } + /* Allocate URB */ urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { @@ -913,11 +960,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) goto err_skb; } + rx_buf->hif_dev = hif_dev; + rx_buf->skb = skb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), skb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, skb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); /* Anchor URB */ usb_anchor_urb(urb, &hif_dev->reg_in_submitted); @@ -943,6 +993,8 @@ err_submit: err_skb: usb_free_urb(urb); err_urb: + kfree(rx_buf); +err_rxb: ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); return ret; } @@ -973,7 +1025,7 @@ err: return -ENOMEM; } -static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) +void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) { usb_kill_anchored_urbs(&hif_dev->regout_submitted); ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); @@ -1341,8 +1393,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) if (hif_dev->flags & HIF_USB_READY) { ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); - ath9k_htc_hw_free(hif_dev->htc_handle); ath9k_hif_usb_dev_deinit(hif_dev); + ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv); + ath9k_htc_hw_free(hif_dev->htc_handle); } usb_set_intfdata(interface, NULL); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h index 7846916aa01d..5985aa15ca93 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h @@ -86,6 +86,11 @@ struct tx_buf { struct list_head list; }; +struct rx_buf { + struct sk_buff *skb; + struct hif_device_usb *hif_dev; +}; + #define HIF_USB_TX_STOP BIT(0) #define HIF_USB_TX_FLUSH BIT(1) @@ -133,5 +138,6 @@ struct hif_device_usb { int ath9k_hif_usb_init(void); void ath9k_hif_usb_exit(void); +void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev); #endif /* HTC_USB_H */ diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index d961095ab01f..11054c17a9b5 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) if (unlikely(r)) { ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", reg_offset, r); - return -EIO; + return -1; } return be32_to_cpu(val); @@ -931,8 +931,9 @@ err_init: int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, u16 devid, char *product, u32 drv_info) { - struct ieee80211_hw *hw; + struct hif_device_usb *hif_dev; struct ath9k_htc_priv *priv; + struct ieee80211_hw *hw; int ret; hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); @@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, return 0; err_init: - ath9k_deinit_wmi(priv); + ath9k_stop_wmi(priv); + hif_dev = (struct hif_device_usb *)htc_handle->hif_dev; + ath9k_hif_usb_dealloc_urbs(hif_dev); + ath9k_destoy_wmi(priv); err_free: ieee80211_free_hw(hw); return ret; @@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; ath9k_deinit_device(htc_handle->drv_priv); - ath9k_deinit_wmi(htc_handle->drv_priv); + ath9k_stop_wmi(htc_handle->drv_priv); ieee80211_free_hw(htc_handle->drv_priv->hw); } } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 9cec5c216e1f..628f45c8c06f 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -973,7 +973,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, struct ath_htc_rx_status *rxstatus; struct ath_rx_status rx_stats; bool decrypt_error = false; - __be16 rs_datalen; + u16 rs_datalen; bool is_phyerr; if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { @@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, * which are not PHY_ERROR (short radar pulses have a length of 3) */ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { - ath_warn(common, - "Short RX data len, dropping (dlen: %d)\n", - rs_datalen); + ath_dbg(common, ANY, + "Short RX data len, dropping (dlen: %d)\n", + rs_datalen); goto rx_next; } diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index d091c8ebdcf0..510e61e97dbc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target, if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { epid = svc_rspmsg->endpoint_id; + if (epid < 0 || epid >= ENDPOINT_MAX) + return; + service_id = be16_to_cpu(svc_rspmsg->service_id); max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); endpoint = &target->endpoint[epid]; @@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); - kfree_skb(skb); return -ETIMEDOUT; } @@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); - kfree_skb(skb); return -ETIMEDOUT; } @@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target, if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); - kfree_skb(skb); return -ETIMEDOUT; } @@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, if (skb) { htc_hdr = (struct htc_frame_hdr *) skb->data; + if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint)) + goto ret; endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; skb_pull(skb, sizeof(struct htc_frame_hdr)); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 052deffb4c9d..9fd8e64288ff 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -287,7 +287,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah) srev = REG_READ(ah, AR_SREV); - if (srev == -EIO) { + if (srev == -1) { ath_err(ath9k_hw_common(ah), "Failed to read SREV register"); return false; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 34121fbf32e3..bd7e757a0f92 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1457,6 +1457,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); } + if (changed & IEEE80211_CONF_CHANGE_POWER) + ath9k_set_txpower(sc, NULL); + mutex_unlock(&sc->mutex); ath9k_ps_restore(sc); diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index cdc146091194..e7a3127395be 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) return wmi; } -void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) +void ath9k_stop_wmi(struct ath9k_htc_priv *priv) { struct wmi *wmi = priv->wmi; mutex_lock(&wmi->op_mutex); wmi->stopped = true; mutex_unlock(&wmi->op_mutex); +} +void ath9k_destoy_wmi(struct ath9k_htc_priv *priv) +{ kfree(priv->wmi); } @@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); - kfree_skb(skb); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h index 380175d5ecd7..d8b912206232 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.h +++ b/drivers/net/wireless/ath/ath9k/wmi.h @@ -179,7 +179,6 @@ struct wmi { }; struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); -void ath9k_deinit_wmi(struct ath9k_htc_priv *priv); int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, enum htc_endpoint_id *wmi_ctrl_epid); int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, @@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, void ath9k_wmi_event_tasklet(unsigned long data); void ath9k_fatal_work(struct work_struct *work); void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv); +void ath9k_stop_wmi(struct ath9k_htc_priv *priv); +void ath9k_destoy_wmi(struct ath9k_htc_priv *priv); #define WMI_CMD(_wmi_cmd) \ do { \ diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 31e7b108279c..14e6871a1405 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1271,6 +1271,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, is_40, is_sgi, is_sp); if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; + if (rix >= 8 && fi->dyn_smps) { + info->rates[i].RateFlags |= + ATH9K_RATESERIES_RTS_CTS; + info->flags |= ATH9K_TXDESC_CTSENA; + } info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, is_40, false); @@ -2111,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, fi->keyix = an->ps_key; else fi->keyix = ATH9K_TXKEYIX_INVALID; + fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC; fi->keytype = keytype; fi->framelen = framelen; fi->tx_power = txpower; diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index 51934d191f33..1ab09e1c9ec5 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); if (SUPP(CARL9170FW_WLANTX_CAB)) { - if_comb_types |= - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO); + if_comb_types |= BIT(NL80211_IFTYPE_AP); #ifdef CONFIG_MAC80211_MESH if_comb_types |= diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 40a8054f8aa6..21ca62b06214 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar, ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && (vif->type != NL80211_IFTYPE_AP)); - /* While the driver supports HW offload in a single - * P2P client configuration, it doesn't support HW - * offload in the favourit, concurrent P2P GO+CLIENT - * configuration. Hence, HW offload will always be - * disabled for P2P. + /* The driver used to have P2P GO+CLIENT support, + * but since this was dropped and we don't know if + * there are any gremlins lurking in the shadows, + * so best we keep HW offload disabled for P2P. */ ar->disable_offload |= vif->p2p; @@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) break; - /* P2P GO [master] use-case - * Because the P2P GO station is selected dynamically - * by all participating peers of a WIFI Direct network, - * the driver has be able to change the main interface - * operating mode on the fly. - */ - if (main_vif->p2p && vif->p2p && - vif->type == NL80211_IFTYPE_AP) { - old_main = main_vif; - break; - } - err = -EBUSY; rcu_read_unlock(); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 79998a3ddb7a..46ae4ec4ad47 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = { .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, .mcs = { .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, - .rx_highest = cpu_to_le16(72), + .rx_highest = cpu_to_le16(150), .tx_params = IEEE80211_HT_MCS_TX_DEFINED, } } @@ -1341,7 +1341,7 @@ static int wcn36xx_probe(struct platform_device *pdev) if (addr && ret != ETH_ALEN) { wcn36xx_err("invalid local-mac-address\n"); ret = -EINVAL; - goto out_wq; + goto out_destroy_ept; } else if (addr) { wcn36xx_info("mac address: %pM\n", addr); SET_IEEE80211_PERM_ADDR(wcn->hw, addr); @@ -1349,7 +1349,7 @@ static int wcn36xx_probe(struct platform_device *pdev) ret = wcn36xx_platform_get_resources(wcn, pdev); if (ret) - goto out_wq; + goto out_destroy_ept; wcn36xx_init_ieee80211(wcn); ret = ieee80211_register_hw(wcn->hw); @@ -1361,6 +1361,8 @@ static int wcn36xx_probe(struct platform_device *pdev) out_unmap: iounmap(wcn->ccu_base); iounmap(wcn->dxe_base); +out_destroy_ept: + rpmsg_destroy_ept(wcn->smd_channel); out_wq: ieee80211_free_hw(hw); out_err: diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index 0d1a8dab30ed..32e1c036f3ac 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -2,6 +2,7 @@ config WIL6210 tristate "Wilocity 60g WiFi card wil6210 support" select WANT_DEV_COREDUMP + select CRC32 depends on CFG80211 depends on PCI default n diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index b85603e91c7a..3432dfe1ddb4 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5569,7 +5569,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); - + ieee80211_hw_set(hw, MFP_CAPABLE); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index d3c001fa8eb4..32ce1b42ce08 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -5308,7 +5308,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev) for (i = 0; i < 4; i++) { if (dev->phy.rev >= 3) - table[i] = coef[i]; + coef[i] = table[i]; else coef[i] = 0; } diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 8b6b657c4b85..5208a39fd6f7 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -3801,6 +3801,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c index e9b23c2e5bd4..efd63f4ce74f 100644 --- a/drivers/net/wireless/broadcom/b43legacy/xmit.c +++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c @@ -558,6 +558,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, default: b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", chanstat); + goto drop; } memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index e3ebb7abbdae..cd813c69a178 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -82,6 +82,8 @@ #define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000) +#define BRCMF_PS_MAX_TIMEOUT_MS 2000 + #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) @@ -2789,6 +2791,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, else bphy_err(drvr, "error (%d)\n", err); } + + err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret", + min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS)); + if (err) + bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err); + done: brcmf_dbg(TRACE, "Exit\n"); return err; @@ -5374,7 +5382,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif, return false; } -static bool brcmf_is_linkdown(const struct brcmf_event_msg *e) +static bool brcmf_is_linkdown(struct brcmf_cfg80211_vif *vif, + const struct brcmf_event_msg *e) { u32 event = e->event_code; u16 flags = e->flags; @@ -5383,6 +5392,8 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e) (event == BRCMF_E_DISASSOC_IND) || ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) { brcmf_dbg(CONN, "Processing link down\n"); + clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state); + clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state); return true; } return false; @@ -5675,7 +5686,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, } else brcmf_bss_connect_done(cfg, ndev, e, true); brcmf_net_setcarrier(ifp, true); - } else if (brcmf_is_linkdown(e)) { + } else if (brcmf_is_linkdown(ifp->vif, e)) { brcmf_dbg(CONN, "Linkdown\n"); if (!brcmf_is_ibssmode(ifp->vif)) { brcmf_bss_connect_done(cfg, ndev, e, false); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 85cf96461dde..e9bb8dbdc9aa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -483,7 +483,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, ret = brcmf_proto_hdrpull(drvr, true, skb, ifp); if (ret || !(*ifp) || !(*ifp)->ndev) { - if (ret != -ENODATA && *ifp) + if (ret != -ENODATA && *ifp && (*ifp)->ndev) (*ifp)->ndev->stats.rx_errors++; brcmu_pkt_buf_free_skb(skb); return -ENODATA; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c index 4aa2561934d7..6d5188b78f2d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c @@ -40,6 +40,18 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = { BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data" }; +static const struct brcmf_dmi_data predia_basic_data = { + BRCM_CC_43341_CHIP_ID, 2, "predia-basic" +}; + +/* Note the Voyo winpad A15 tablet uses the same Ampak AP6330 module, with the + * exact same nvram file as the Prowise-PT301 tablet. Since the nvram for the + * Prowise-PT301 is already in linux-firmware we just point to that here. + */ +static const struct brcmf_dmi_data voyo_winpad_a15_data = { + BRCM_CC_4330_CHIP_ID, 4, "Prowise-PT301" +}; + static const struct dmi_system_id dmi_platform_data[] = { { /* ACEPC T8 Cherry Trail Z8350 mini PC */ @@ -111,6 +123,26 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&pov_tab_p1006w_data, }, + { + /* Predia Basic tablet (+ with keyboard dock) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + /* Mx.WT107.KUBNGEA02 with the version-nr dropped */ + DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"), + }, + .driver_data = (void *)&predia_basic_data, + }, + { + /* Voyo winpad A15 tablet */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "11/20/2014"), + }, + .driver_data = (void *)&voyo_winpad_a15_data, + }, {} }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 2c3526aeca6f..545015610cf8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -283,13 +283,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) if (!err) ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + if (drvr->settings->feature_disable) { brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", ifp->drvr->feat_flags, drvr->settings->feature_disable); ifp->drvr->feat_flags &= ~drvr->settings->feature_disable; } - brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); brcmf_feat_firmware_overrides(drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index 79c8a858b6d6..a30fcfbf2ee7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -304,10 +304,12 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr) { struct brcmf_fweh_info *fweh = &drvr->fweh; - /* cancel the worker */ - cancel_work_sync(&fweh->event_work); - WARN_ON(!list_empty(&fweh->event_q)); - memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler)); + /* cancel the worker if initialized */ + if (fweh->event_work.func) { + cancel_work_sync(&fweh->event_work); + WARN_ON(!list_empty(&fweh->event_q)); + memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler)); + } } /** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 37c512036e0e..ce18433aaefb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -19,7 +19,7 @@ #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ -#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 +#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004 #define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */ #define BRCMF_STA_WME 0x00000002 /* WMM association */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 2bd892df83cc..3d36b6ee158b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -643,6 +643,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, int ifidx) { + struct brcmf_fws_hanger_item *hi; bool (*matchfn)(struct sk_buff *, void *) = NULL; struct sk_buff *skb; int prec; @@ -654,6 +655,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); while (skb) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + hi = &fws->hanger.items[hslot]; + WARN_ON(skb != hi->pkt); + hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); brcmu_pkt_buf_free_skb(skb); @@ -2145,8 +2149,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); brcmf_fws_schedule_deq(fws); } else { - bphy_err(drvr, "drop skb: no hanger slot\n"); - brcmf_txfinalize(ifp, skb, false); + bphy_err(drvr, "no hanger slot available\n"); rc = -ENOMEM; } brcmf_fws_unlock(fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index e3dd8623be4e..c2705d7a4247 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -1619,6 +1619,8 @@ fail: BRCMF_TX_IOCTL_MAX_MSG_SIZE, msgbuf->ioctbuf, msgbuf->ioctbuf_handle); + if (msgbuf->txflow_wq) + destroy_workqueue(msgbuf->txflow_wq); kfree(msgbuf); } return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 3be60aef5465..cb68f54a9c56 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1936,16 +1936,18 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) fwreq = brcmf_pcie_prepare_fw_request(devinfo); if (!fwreq) { ret = -ENOMEM; - goto fail_bus; + goto fail_brcmf; } ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup); if (ret < 0) { kfree(fwreq); - goto fail_bus; + goto fail_brcmf; } return 0; +fail_brcmf: + brcmf_free(&devinfo->pdev->dev); fail_bus: kfree(bus->msgbuf); kfree(bus); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index d43247a95ce5..ef5521b9b357 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3685,7 +3685,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) if (bus->idlecount > bus->idletime) { brcmf_dbg(SDIO, "idle\n"); sdio_claim_host(bus->sdiodev->func1); - brcmf_sdio_wd_timer(bus, false); +#ifdef DEBUG + if (!BRCMF_FWCON_ON() || + bus->console_interval == 0) +#endif + brcmf_sdio_wd_timer(bus, false); bus->idlecount = 0; brcmf_sdio_bus_sleep(bus, true, false); sdio_release_host(bus->sdiodev->func1); @@ -4429,6 +4433,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) brcmf_sdiod_intr_unregister(bus->sdiodev); brcmf_detach(bus->sdiodev->dev); + brcmf_free(bus->sdiodev->dev); cancel_work_sync(&bus->datawork); if (bus->brcmf_wq) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index 7ef36234a25d..66797dc5e90d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft; pi->pi_fptr.detach = wlc_phy_detach_lcnphy; - if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) + if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) { + kfree(pi->u.pi_lcnphy); return false; + } if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { if (pi_lcn->lcnphy_tempsense_option == 3) { diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index c4c8f1b62e1e..da0d3834b5f0 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -1925,6 +1925,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!",__func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } npacks = skb_queue_len (&ai->txq); if (npacks >= MAXTXQ - 1) { @@ -2127,6 +2131,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } /* Find a vacant FID */ for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ ); @@ -2201,6 +2209,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } /* Find a vacant FID */ for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ ); diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c index 3d558b47168b..456330296b73 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c @@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, } if (ext->alg != IW_ENCODE_ALG_NONE) { - memcpy(sec.keys[idx], ext->key, ext->key_len); - sec.key_sizes[idx] = ext->key_len; + int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN); + + memcpy(sec.keys[idx], ext->key, key_len); + sec.key_sizes[idx] = key_len; sec.flags |= (1 << idx); if (ext->alg == IW_ENCODE_ALG_WEP) { sec.encode_alg[idx] = SEC_ALG_WEP; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c index 6209f85a71dd..0af9e997c9f6 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c @@ -374,7 +374,7 @@ out: } static void * -il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +il3945_rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 7c6e2c863497..0a02d8aca320 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -2474,7 +2474,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, } static void * -il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +il4965_rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 746749f37996..1107b96a8a88 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4286,8 +4286,8 @@ il_apm_init(struct il_priv *il) * power savings, even without L1. */ if (il->cfg->set_l0s) { - pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { + ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); + if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { /* L1-ASPM enabled; disable(!) L0S */ il_set_bit(il, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index 74229fcb63a9..e68a13c33c45 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -3019,7 +3019,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, cpu_to_le16(priv->lib->bt_params->agg_time_limit); } -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +static void *rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 73196cbc7fbe..75d958bab0e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -8,7 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -99,7 +99,7 @@ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, IWL_MVM_DQA_MIN_DATA_QUEUE = 10, - IWL_MVM_DQA_MAX_DATA_QUEUE = 31, + IWL_MVM_DQA_MAX_DATA_QUEUE = 30, }; enum iwl_mvm_tx_fifo { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 695bbaa86273..12ef3a042051 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -147,6 +147,16 @@ #define CSR_MAC_SHADOW_REG_CTL2 (CSR_BASE + 0x0AC) #define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE 0xFFFF +/* LTR control (since IWL_DEVICE_FAMILY_22000) */ +#define CSR_LTR_LONG_VAL_AD (CSR_BASE + 0x0D4) +#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ 0x80000000 +#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE 0x1c000000 +#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL 0x03ff0000 +#define CSR_LTR_LONG_VAL_AD_SNOOP_REQ 0x00008000 +#define CSR_LTR_LONG_VAL_AD_SNOOP_SCALE 0x00001c00 +#define CSR_LTR_LONG_VAL_AD_SNOOP_VAL 0x000003ff +#define CSR_LTR_LONG_VAL_AD_SCALE_USEC 2 + /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 5d546dac7814..022f2faccab4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -525,8 +525,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | - IEEE80211_HE_MAC_CAP2_ACK_EN, + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, @@ -610,8 +609,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_BSR | - IEEE80211_HE_MAC_CAP2_ACK_EN, + IEEE80211_HE_MAC_CAP2_BSR, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index f043eefabb4e..7b1d2dac6ceb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -514,7 +514,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, const size_t bufsz = sizeof(buf); int pos = 0; + mutex_lock(&mvm->mutex); iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); + mutex_unlock(&mvm->mutex); + do_div(curr_os, NSEC_PER_USEC); diff = curr_os - curr_gp2; pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index ad18c2f1a806..524f9dd2323d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -478,6 +476,11 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, if (kstrtou16(buf, 0, &amsdu_len)) return -EINVAL; + /* only change from debug set <-> debug unset */ + if ((amsdu_len && mvmsta->orig_amsdu_len) || + (!!amsdu_len && mvmsta->orig_amsdu_len)) + return -EBUSY; + if (amsdu_len) { mvmsta->orig_amsdu_len = sta->max_amsdu_len; sta->max_amsdu_len = amsdu_len; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index c54fe6650018..7272d8522a9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -134,7 +134,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm) .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; - /* Do not configure default queue, it is configured via context info */ + /* + * The default queue is configured via context info, so if we + * have a single queue, there's nothing to do here. + */ + if (mvm->trans->num_rx_queues == 1) + return 0; + + /* skip the default queue */ num_queues = mvm->trans->num_rx_queues - 1; size = struct_size(cmd, data, num_queues); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 6ca087ffd163..fc6430edd110 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1193,14 +1193,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) */ flush_work(&mvm->roc_done_wk); + iwl_mvm_rm_aux_sta(mvm); + iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ - /* the fw is stopped, the aux sta is dead: clean up driver state */ - iwl_mvm_del_aux_sta(mvm); - /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 @@ -3023,7 +3022,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* this would be a mac80211 bug ... but don't crash */ if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) - return -EINVAL; + return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; /* * If we are in a STA removal flow and in DQA mode: @@ -3070,6 +3069,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, goto out_unlock; } + if (vif->type == NL80211_IFTYPE_STATION) + vif->bss_conf.he_support = sta->he_cap.has_he; + if (sta->tdls && (vif->p2p || iwl_mvm_tdls_sta_count(mvm, NULL) == @@ -3651,9 +3653,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, - "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", - channel->hw_value, req_dur, duration, delay, - dtim_interval); + "ROC: Requesting to remain on channel %u for %ums\n", + channel->hw_value, req_dur); + IWL_DEBUG_TE(mvm, + "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", + duration, delay, dtim_interval); + /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); @@ -4164,6 +4169,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, iwl_mvm_binding_remove_vif(mvm, vif); out: + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && + switching_chanctx) + return; mvmvif->phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index ed367b0a185c..f49887379c43 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -281,7 +281,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) int regulatory_type; /* Checking for required sections */ - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { + if (mvm->trans->cfg->nvm_type == IWL_NVM) { if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 3acbd5b7ab4b..8b0576cde797 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -316,6 +316,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), + RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, + iwl_mvm_probe_resp_data_notif, + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF, + iwl_mvm_channel_switch_noa_notif, + RX_HANDLER_SYNC), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -832,6 +838,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (!mvm->scan_cmd) goto out_free; + /* invalidate ids to prevent accidental removal of sta_id 0 */ + mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; + mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; + /* Set EBS as successful as long as not stated otherwise by the FW. */ mvm->last_ebs_successful = true; @@ -1232,6 +1242,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) reprobe = container_of(wk, struct iwl_mvm_reprobe, work); if (device_reprobe(reprobe->dev)) dev_err(reprobe->dev, "reprobe failed!\n"); + put_device(reprobe->dev); kfree(reprobe); module_put(THIS_MODULE); } @@ -1282,7 +1293,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) module_put(THIS_MODULE); return; } - reprobe->dev = mvm->trans->dev; + reprobe->dev = get_device(mvm->trans->dev); INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 24df3182ec9e..be8bc0601d7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -195,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, { u16 supp; int i, highest_mcs; + u8 nss = sta->rx_nss; - for (i = 0; i < sta->rx_nss; i++) { - if (i == IWL_TLC_NSS_MAX) - break; + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + nss = 1; + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); if (!highest_mcs) continue; @@ -245,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, u16 tx_mcs_160 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); int i; + u8 nss = sta->rx_nss; - for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) { + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + nss = 1; + + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; @@ -307,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, cmd->mode = IWL_TLC_MNG_MODE_HT; cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = - cpu_to_le16(ht_cap->mcs.rx_mask[1]); + + /* the station support only a single receive chain */ + if (sta->smps_mode == IEEE80211_SMPS_STATIC) + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + 0; + else + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } @@ -354,14 +367,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, u16 size = le32_to_cpu(notif->amsdu_size); int i; - /* - * In debug sta->max_amsdu_len < size - * so also check with orig_amsdu_len which holds the original - * data before debugfs changed the value - */ - if (WARN_ON(sta->max_amsdu_len < size && - mvmsta->orig_amsdu_len < size)) + if (sta->max_amsdu_len < size) { + /* + * In debug sta->max_amsdu_len < size + * so also check with orig_amsdu_len which holds the + * original data before debugfs changed the value + */ + WARN_ON(mvmsta->orig_amsdu_len < size); goto out; + } mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 42d525e46e80..9af657820b38 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3663,7 +3663,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta)); } -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +static void *rs_alloc(struct ieee80211_hw *hw) { return hw->priv; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 5ee33c8ae9d2..77b8def26edb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -566,6 +566,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_mvm_stat_data { struct iwl_mvm *mvm; + __le32 flags; __le32 mac_id; u8 beacon_filter_average_energy; void *general; @@ -606,6 +607,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, -general->beacon_average_energy[vif_id]; } + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + if (mvmvif->id != id) return; @@ -763,6 +771,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, flags = stats->flag; } + data.flags = flags; iwl_mvm_rx_stats_check_trigger(mvm, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 8ad2d889179c..40cafcf40ccf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -722,6 +722,11 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, lockdep_assert_held(&mvm->mutex); + if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, + "max queue %d >= num_of_queues (%d)", maxq, + mvm->trans->trans_cfg->base_params->num_of_queues)) + maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; + /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -ENOSPC; @@ -1164,9 +1169,9 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) inactive_tid_bitmap, &unshare_queues, &changetid_queues); - if (ret >= 0 && free_queue < 0) { + if (ret && free_queue < 0) { queue_owner = sta; - free_queue = ret; + free_queue = i; } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); @@ -1179,17 +1184,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); + rcu_read_unlock(); + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); - if (ret) { - rcu_read_unlock(); + if (ret) return ret; - } } - rcu_read_unlock(); - return free_queue; } @@ -2067,6 +2070,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) @@ -2075,18 +2081,29 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; + + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + + return ret; +} + void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) { iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); } -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) -{ - lockdep_assert_held(&mvm->mutex); - - iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); -} - /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 8d70093847cb..da2d1ac01229 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 74980382e64c..eab159205e48 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -164,8 +164,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, /* Allocate IML */ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL); - if (!iml_img) - return -ENOMEM; + if (!iml_img) { + ret = -ENOMEM; + goto err_free_ctxt_info; + } memcpy(iml_img, trans->iml, trans->iml_len); @@ -180,6 +182,26 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + /* + * The firmware initializes this again later (to a smaller + * value), but for the boot process initialize the LTR to + * ~250 usec. + */ + u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | + u32_encode_bits(250, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | + CSR_LTR_LONG_VAL_AD_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | + u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); + + iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val); + } + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); else @@ -187,6 +209,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, return 0; +err_free_ctxt_info: + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), + trans_pcie->ctxt_info_gen3, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_gen3 = NULL; err_free_prph_info: dma_free_coherent(trans->dev, sizeof(*prph_info), diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index b0b7eca1754e..f34297fd453c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -968,6 +968,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)}, + {IWL_PCI_DEVICE(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index c76d26708e65..0581f082301e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2178,18 +2178,38 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { unsigned long flags; - int offs, ret = 0; + int offs = 0; u32 *vals = buf; - if (iwl_trans_grab_nic_access(trans, &flags)) { - iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); - for (offs = 0; offs < dwords; offs++) - vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); - iwl_trans_release_nic_access(trans, &flags); - } else { - ret = -EBUSY; + while (offs < dwords) { + /* limit the time we spin here under lock to 1/2s */ + unsigned long end = jiffies + HZ / 2; + bool resched = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_RADDR, + addr + 4 * offs); + + while (offs < dwords) { + vals[offs] = iwl_read32(trans, + HBUS_TARG_MEM_RDAT); + offs++; + + if (time_after(jiffies, end)) { + resched = true; + break; + } + } + iwl_trans_release_nic_access(trans, &flags); + + if (resched) + cond_resched(); + } else { + return -EBUSY; + } } - return ret; + + return 0; } static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index ff4c34d7b74f..92fbef5d760c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -705,6 +705,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_tfh_tfd *tfd; + unsigned long flags2; copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); @@ -773,14 +774,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - spin_lock_bh(&txq->lock); + spin_lock_irqsave(&txq->lock, flags2); idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags2); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); @@ -915,7 +916,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); out: - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags2); free_dup_buf: if (idx < 0) kfree(dup_buf); @@ -1283,6 +1284,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) iwl_pcie_gen2_txq_unmap(trans, queue); + iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]); + trans_pcie->txq[queue] = NULL; + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index d3b58334e13e..c8531d6a0592 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -657,6 +657,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; + if (!txq) { + IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); + return; + } + spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", @@ -1539,6 +1544,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; + unsigned long flags2; if (WARN(!trans->wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, @@ -1622,10 +1628,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - spin_lock_bh(&txq->lock); + spin_lock_irqsave(&txq->lock, flags2); if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags2); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); @@ -1786,7 +1792,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); out: - spin_unlock_bh(&txq->lock); + spin_unlock_irqrestore(&txq->lock, flags2); free_dup_buf: if (idx < 0) kfree(dup_buf); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index e753f43e0162..e2368bfe3e46 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -1234,13 +1234,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->len < ETH_HLEN) goto drop; - ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); - if (!ctx) - goto busy; - - memset(ctx->buf, 0, BULK_BUF_SIZE); - buf = ctx->buf->data; - tx_control = 0; err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, @@ -1248,6 +1241,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) if (err) goto drop; + ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); + if (!ctx) + goto drop; + + memset(ctx->buf, 0, BULK_BUF_SIZE); + buf = ctx->buf->data; + { __le16 *tx_cntl = (__le16 *)buf; *tx_cntl = cpu_to_le16(tx_control); diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c index 80ad0b7eaef4..f8c6027cab6b 100644 --- a/drivers/net/wireless/intersil/p54/p54pci.c +++ b/drivers/net/wireless/intersil/p54/p54pci.c @@ -329,10 +329,12 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) struct p54p_desc *desc; dma_addr_t mapping; u32 idx, i; + __le32 device_addr; spin_lock_irqsave(&priv->lock, flags); idx = le32_to_cpu(ring_control->host_idx[1]); i = idx % ARRAY_SIZE(ring_control->tx_data); + device_addr = ((struct p54_hdr *)skb->data)->req_id; mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); @@ -346,7 +348,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) desc = &ring_control->tx_data[i]; desc->host_addr = cpu_to_le32(mapping); - desc->device_addr = ((struct p54_hdr *)skb->data)->req_id; + desc->device_addr = device_addr; desc->len = cpu_to_le16(skb->len); desc->flags = 0; diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c index b94764c88750..ff0e30c0c14c 100644 --- a/drivers/net/wireless/intersil/p54/p54usb.c +++ b/drivers/net/wireless/intersil/p54/p54usb.c @@ -61,6 +61,7 @@ static const struct usb_device_id p54u_table[] = { {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ + {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */ {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */ {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */ {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 14f562cd715c..c48c68090d76 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3575,9 +3575,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) } if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { - hwname = kasprintf(GFP_KERNEL, "%.*s", - nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), - (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + GFP_KERNEL); if (!hwname) return -ENOMEM; param.hwname = hwname; @@ -3597,9 +3597,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_RADIO_ID]) { idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { - hwname = kasprintf(GFP_KERNEL, "%.*s", - nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), - (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + GFP_KERNEL); if (!hwname) return -ENOMEM; } else diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 25ac9db35dbf..bedc09215088 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -247,10 +247,10 @@ static void if_usb_disconnect(struct usb_interface *intf) lbtf_deb_enter(LBTF_DEB_MAIN); - if_usb_reset_device(priv); - - if (priv) + if (priv) { + if_usb_reset_device(priv); lbtf_remove_card(priv); + } /* Unlink and free urb */ if_usb_free(cardp); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index d89684168500..9e6dc289ec3e 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1496,7 +1496,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - static struct mwifiex_sta_node *node; + struct mwifiex_sta_node *node; + int i; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && priv->media_connected && idx == 0) { @@ -1506,13 +1507,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, HostCmd_ACT_GEN_GET, 0, NULL, true); - if (node && (&node->list == &priv->sta_list)) { - node = NULL; - return -ENOENT; - } - - node = list_prepare_entry(node, &priv->sta_list, list); - list_for_each_entry_continue(node, &priv->sta_list, list) { + i = 0; + list_for_each_entry(node, &priv->sta_list, list) { + if (i++ != idx) + continue; ether_addr_copy(mac, node->mac_addr); return mwifiex_dump_station_info(priv, node, sinfo); } diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 1fb76d2f5d3f..8b9d0809daf6 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -953,7 +953,7 @@ struct mwifiex_tkip_param { struct mwifiex_aes_param { u8 pn[WPA_PN_SIZE]; __le16 key_len; - u8 key[WLAN_KEY_LEN_CCMP]; + u8 key[WLAN_KEY_LEN_CCMP_256]; } __packed; struct mwifiex_wapi_param { diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index d87aeff70cef..c2cb1e711c06 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN); + if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN) + req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN; memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index d14e55e3c9da..5894566ec480 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1469,6 +1469,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); mwifiex_deauthenticate(priv, NULL); + mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); + mwifiex_uninit_sw(adapter); adapter->is_up = false; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index fc1706d0647d..58c9623c3a91 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -377,6 +377,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); + + card->pci_reset_ongoing = true; } /* @@ -405,6 +407,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev) dev_err(&pdev->dev, "reinit failed: %d\n", ret); else mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); + + card->pci_reset_ongoing = false; } static const struct pci_error_handlers mwifiex_pcie_err_handler = { @@ -2995,7 +2999,19 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) int ret; u32 fw_status; - cancel_work_sync(&card->work); + /* Perform the cancel_work_sync() only when we're not resetting + * the card. It's because that function never returns if we're + * in reset path. If we're here when resetting the card, it means + * that we failed to reset the card (reset failure path). + */ + if (!card->pci_reset_ongoing) { + mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n"); + cancel_work_sync(&card->work); + mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n"); + } else { + mwifiex_dbg(adapter, MSG, + "skipped cancel_work_sync() because we're in card reset failure path\n"); + } ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); if (fw_status == FIRMWARE_READY_PCIE) { diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index f7ce9b6db6b4..72d0c01ff359 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -391,6 +391,8 @@ struct pcie_service_card { struct mwifiex_msix_context share_irq_ctx; struct work_struct work; unsigned long work_flags; + + bool pci_reset_ongoing; }; static inline int diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 59f0651d148b..629af26675cf 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1891,7 +1891,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, chan, CFG80211_BSS_FTYPE_UNKNOWN, bssid, timestamp, cap_info_bitmap, beacon_period, - ie_buf, ie_len, rssi, GFP_KERNEL); + ie_buf, ie_len, rssi, GFP_ATOMIC); if (bss) { bss_priv = (struct mwifiex_bss_priv *)bss->priv; bss_priv->band = band; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index fec38b6e86ff..b322c2755e9a 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1996,6 +1996,8 @@ error: kfree(card->mpa_rx.buf); card->mpa_tx.buf_size = 0; card->mpa_rx.buf_size = 0; + card->mpa_tx.buf = NULL; + card->mpa_rx.buf = NULL; } return ret; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index f672bdf52cc1..2d9ec225aead 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -36,9 +36,9 @@ #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" -#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin" +#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin" #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin" -#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" +#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 20c206da0631..4eaa493e3325 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -580,6 +580,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, { struct host_cmd_ds_802_11_key_material *key = &resp->params.key_material; + int len; + + len = le16_to_cpu(key->key_param_set.key_len); + if (len > sizeof(key->key_param_set.key)) + return -EINVAL; if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { @@ -593,9 +598,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, memset(priv->aes_key.key_param_set.key, 0, sizeof(key->key_param_set.key)); - priv->aes_key.key_param_set.key_len = key->key_param_set.key_len; - memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, - le16_to_cpu(priv->aes_key.key_param_set.key_len)); + priv->aes_key.key_param_set.key_len = cpu_to_le16(len); + memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len); return 0; } @@ -610,9 +614,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_802_11_key_material_v2 *key_v2; - __le16 len; + int len; key_v2 = &resp->params.key_material_v2; + + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); + if (len > sizeof(key_v2->key_param_set.key_params.aes.key)) + return -EINVAL; + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); @@ -626,12 +635,11 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, return 0; memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, - WLAN_KEY_LEN_CCMP); + sizeof(key_v2->key_param_set.key_params.aes.key)); priv->aes_key_v2.key_param_set.key_params.aes.key_len = - key_v2->key_param_set.key_params.aes.key_len; - len = priv->aes_key_v2.key_param_set.key_params.aes.key_len; + cpu_to_le16(len); memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, - key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len)); + key_v2->key_param_set.key_params.aes.key, len); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index c2365eeb7016..528107d70c1c 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter) skb_dequeue(&port->tx_aggr.aggr_list))) mwifiex_write_data_complete(adapter, skb_tmp, 0, -1); - del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer); + if (port->tx_aggr.timer_cnxt.hold_timer.function) + del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer); port->tx_aggr.timer_cnxt.is_hold_timer_set = false; port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; } diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index c4db6417748f..1b76b2419186 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -1469,6 +1469,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); + txq->txd = NULL; return -ENOMEM; } diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 8f3d36a15e17..f8441fd65400 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -143,8 +143,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) struct ieee80211_sta *sta; struct mt76_rx_tid *tid; bool sn_less; - u16 seqno, head, size; - u8 ackp, idx; + u16 seqno, head, size, idx; + u8 ackp; __skb_queue_tail(frames, skb); @@ -230,7 +230,7 @@ out: } int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno, - u16 ssn, u8 size) + u16 ssn, u16 size) { struct mt76_rx_tid *tid; @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start); static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) { - u8 size = tid->size; + u16 size = tid->size; int i; cancel_delayed_work(&tid->reorder_work); @@ -268,6 +268,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) if (!skb) continue; + tid->reorder_buf[i] = NULL; tid->nframes--; dev_kfree_skb(skb); } diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6249a46c1976..d3efcbd48ee1 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -261,10 +261,13 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, struct mt76_queue_buf buf; dma_addr_t addr; + if (q->queued + 1 >= q->ndesc - 1) + goto error; + addr = dma_map_single(dev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev, addr))) - return -ENOMEM; + goto error; buf.addr = addr; buf.len = skb->len; @@ -275,6 +278,10 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, spin_unlock_bh(&q->lock); return 0; + +error: + dev_kfree_skb(skb); + return -ENOMEM; } static int @@ -445,22 +452,27 @@ static void mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, int len, bool more) { - struct page *page = virt_to_head_page(data); - int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; - if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { - offset += q->buf_offset; - skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, - q->buf_size); + if (nr_frags < ARRAY_SIZE(shinfo->frags)) { + struct page *page = virt_to_head_page(data); + int offset = data - page_address(page) + q->buf_offset; + + skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); + } else { + skb_free_frag(data); } if (more) return; q->rx_head = NULL; - dev->drv->rx_skb(dev, q - dev->q_rx, skb); + if (nr_frags < ARRAY_SIZE(shinfo->frags)) + dev->drv->rx_skb(dev, q - dev->q_rx, skb); + else + dev_kfree_skb(skb); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 1a2c143b34d0..8bd191347b9f 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -105,7 +105,15 @@ static int mt76_led_init(struct mt76_dev *dev) dev->led_al = of_property_read_bool(np, "led-active-low"); } - return devm_led_classdev_register(dev->dev, &dev->led_cdev); + return led_classdev_register(dev->dev, &dev->led_cdev); +} + +static void mt76_led_cleanup(struct mt76_dev *dev) +{ + if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) + return; + + led_classdev_unregister(&dev->led_cdev); } static void mt76_init_stream_cap(struct mt76_dev *dev, @@ -360,6 +368,8 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; + if (IS_ENABLED(CONFIG_MT76_LEDS)) + mt76_led_cleanup(dev); mt76_tx_status_check(dev, NULL, true); ieee80211_unregister_hw(hw); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 502814c26b33..52a16b42dfd7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -240,8 +240,8 @@ struct mt76_rx_tid { struct delayed_work reorder_work; u16 head; - u8 size; - u8 nframes; + u16 size; + u16 nframes; u8 started:1, stopped:1, timer_pending:1; @@ -723,7 +723,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx, void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, - u16 ssn, u8 size); + u16 ssn, u16 size); void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 25d5b1608bc9..0a5695c3d924 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -561,6 +561,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mtxq = (struct mt76_txq *)txq->drv_priv; + mutex_lock(&dev->mt76.mutex); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, @@ -590,6 +591,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } + mutex_unlock(&dev->mt76.mutex); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 87c748715b5d..38183aef0eb9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -455,6 +455,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mtxq = (struct mt76_txq *)txq->drv_priv; + mutex_lock(&dev->mt76.mutex); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, @@ -485,6 +486,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } + mutex_unlock(&dev->mt76.mutex); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 842cd81704db..b6867d93c0e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -119,8 +119,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; int ret = 0; - if (seq != rxd->seq) - return -EAGAIN; + if (seq != rxd->seq) { + ret = -EAGAIN; + goto out; + } switch (cmd) { case -MCU_CMD_PATCH_SEM_CONTROL: @@ -134,6 +136,7 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, default: break; } +out: dev_kfree_skb(skb); return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index e858bba8c8ff..0075fba93546 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -212,6 +212,7 @@ static inline bool is_mt76x0(struct mt76x02_dev *dev) static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || + mt76_chip(&dev->mt76) == 0x7632 || mt76_chip(&dev->mt76) == 0x7662 || mt76_chip(&dev->mt76) == 0x7602; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index aec73a0295e8..de0d6f21c621 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -371,6 +371,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mtxq = (struct mt76_txq *)txq->drv_priv; + mutex_lock(&dev->mt76.mutex); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, @@ -400,6 +401,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } + mutex_unlock(&dev->mt76.mutex); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 8b26c6108186..96a2b7ba6764 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ + { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */ { }, }; diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index f6a0454abe04..6f2172be7b66 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) if (new_p) { /* we have one extra ref from the allocator */ - __free_pages(e->p, MT_RX_ORDER); - + put_page(e->p); e->p = new_p; } } @@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, } e = &q->e[q->end]; - e->skb = skb; usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, mt7601u_complete_tx, q); ret = usb_submit_urb(e->urb, GFP_ATOMIC); @@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, q->end = (q->end + 1) % q->entries; q->used++; + e->skb = skb; if (q->used >= q->entries) ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c index c868582c5d22..aa3b64902cf9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) { u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); - return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); + return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); } static void diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index c0c32805fb8d..106f1a846f49 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -834,6 +834,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) default: pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid, vif->vifid, vif->wdev.iftype); + dev_kfree_skb(cmd_skb); ret = -EINVAL; goto out; } @@ -1996,6 +1997,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, break; default: pr_err("unsupported iftype %d\n", vif->wdev.iftype); + dev_kfree_skb(cmd_skb); ret = -EINVAL; goto out; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index 4824be0c6231..2b8db3f73d00 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -299,19 +299,19 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR); if (IS_ERR(sysctl_bar)) { pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); - return ret; + return PTR_ERR(sysctl_bar); } dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR); if (IS_ERR(dmareg_bar)) { pr_err("failed to map BAR%u\n", QTN_DMA_BAR); - return ret; + return PTR_ERR(dmareg_bar); } epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR); if (IS_ERR(epmem_bar)) { pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); - return ret; + return PTR_ERR(epmem_bar); } chipid = qtnf_chip_id_get(sysctl_bar); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 3499b211dad5..048984ca81fd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -5447,7 +5447,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); - usb_free_urb(urb); goto error; } @@ -5456,6 +5455,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) rtl8xxxu_write32(priv, REG_USB_HIMR, val32); error: + usb_free_urb(urb); return ret; } @@ -5781,6 +5781,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) struct rtl8xxxu_priv *priv = hw->priv; struct rtl8xxxu_rx_urb *rx_urb; struct rtl8xxxu_tx_urb *tx_urb; + struct sk_buff *skb; unsigned long flags; int ret, i; @@ -5831,6 +5832,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) rx_urb->hw = hw; ret = rtl8xxxu_submit_rx_urb(priv, rx_urb); + if (ret) { + if (ret != -ENOMEM) { + skb = (struct sk_buff *)rx_urb->urb.context; + dev_kfree_skb(skb); + } + rtl8xxxu_queue_rx_urb(priv, rx_urb); + } } exit: /* diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 0c7d74902d33..4b5ea0ec9109 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -261,7 +261,7 @@ static void rtl_rate_update(void *ppriv, { } -static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +static void *rtl_rate_alloc(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); return rtlpriv; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index 85093b3e5373..ed72a2aeb6c8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x824, 0x00030FE0, 0x828, 0x00000000, 0x82C, 0x002081DD, - 0x830, 0x2AAA8E24, + 0x830, 0x2AAAEEC8, 0x834, 0x0037A706, 0x838, 0x06489B44, 0x83C, 0x0000095B, @@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x9D8, 0x00000000, 0x9DC, 0x00000000, 0x9E0, 0x00005D00, - 0x9E4, 0x00000002, + 0x9E4, 0x00000003, 0x9E8, 0x00000001, 0xA00, 0x00D047C8, - 0xA04, 0x01FF000C, + 0xA04, 0x01FF800C, 0xA08, 0x8C8A8300, 0xA0C, 0x2E68000F, 0xA10, 0x9500BB78, @@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x083, 0x00021800, 0x084, 0x00028000, 0x085, 0x00048000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x0009483A, + 0xA0000000, 0x00000000, 0x086, 0x00094838, + 0xB0000000, 0x00000000, 0x087, 0x00044980, 0x088, 0x00048000, 0x089, 0x0000D480, @@ -1409,26 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x03C, 0x000CA000, 0x0EF, 0x00000000, 0x0EF, 0x00001100, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF3, 0x034, 0x00049DF0, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF3, 0x034, 0x00049DF0, - 0xFF0F0404, 0xCDEF, - 0x034, 0x0004ADF3, - 0x034, 0x00049DF0, - 0xFF0F0200, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004ADF5, 0x034, 0x00049DF2, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004A0F3, 0x034, 0x000490B1, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0F3, + 0x034, 0x000490B1, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xA0000000, 0x00000000, 0x034, 0x0004ADF7, 0x034, 0x00049DF3, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00048DED, 0x034, 0x00047DEA, 0x034, 0x00046DE7, @@ -1438,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042886, 0x034, 0x00041486, 0x034, 0x00040447, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00048DED, 0x034, 0x00047DEA, 0x034, 0x00046DE7, @@ -1448,17 +1458,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042886, 0x034, 0x00041486, 0x034, 0x00040447, - 0xFF0F0404, 0xCDEF, - 0x034, 0x00048DED, - 0x034, 0x00047DEA, - 0x034, 0x00046DE7, - 0x034, 0x00045CE9, - 0x034, 0x00044CE6, - 0x034, 0x000438C6, - 0x034, 0x00042886, - 0x034, 0x00041486, - 0x034, 0x00040447, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000480AE, 0x034, 0x000470AB, 0x034, 0x0004608B, @@ -1468,7 +1468,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00042026, 0x034, 0x00041023, 0x034, 0x00040002, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000480AE, + 0x034, 0x000470AB, + 0x034, 0x0004608B, + 0x034, 0x00045069, + 0x034, 0x00044048, + 0x034, 0x00043045, + 0x034, 0x00042026, + 0x034, 0x00041023, + 0x034, 0x00040002, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xA0000000, 0x00000000, 0x034, 0x00048DEF, 0x034, 0x00047DEC, 0x034, 0x00046DE9, @@ -1478,28 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x0004248A, 0x034, 0x0004108D, 0x034, 0x0004008A, - 0xFF0F0104, 0xDEAD, - 0xFF0F0200, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002ADF4, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002A0F3, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0F3, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002ADF4, + 0xA0000000, 0x00000000, 0x034, 0x0002ADF7, - 0xFF0F0200, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF4, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF4, - 0xFF0F0404, 0xCDEF, - 0x034, 0x00029DF4, - 0xFF0F0200, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00029DF1, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000290F0, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000290F0, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF1, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF4, + 0xA0000000, 0x00000000, 0x034, 0x00029DF2, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00028DF1, 0x034, 0x00027DEE, 0x034, 0x00026DEB, @@ -1509,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022889, 0x034, 0x00021489, 0x034, 0x0002044A, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00028DF1, 0x034, 0x00027DEE, 0x034, 0x00026DEB, @@ -1519,17 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022889, 0x034, 0x00021489, 0x034, 0x0002044A, - 0xFF0F0404, 0xCDEF, - 0x034, 0x00028DF1, - 0x034, 0x00027DEE, - 0x034, 0x00026DEB, - 0x034, 0x00025CEC, - 0x034, 0x00024CE9, - 0x034, 0x000238CA, - 0x034, 0x00022889, - 0x034, 0x00021489, - 0x034, 0x0002044A, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x000280AF, 0x034, 0x000270AC, 0x034, 0x0002608B, @@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022026, 0x034, 0x00021023, 0x034, 0x00020002, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000280AF, + 0x034, 0x000270AC, + 0x034, 0x0002608B, + 0x034, 0x00025069, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020002, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xA0000000, 0x00000000, 0x034, 0x00028DEE, 0x034, 0x00027DEB, 0x034, 0x00026CCD, @@ -1549,19 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00022849, 0x034, 0x00021449, 0x034, 0x0002004D, - 0xFF0F0104, 0xDEAD, - 0xFF0F02C0, 0xABCD, + 0xB0000000, 0x00000000, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A0D7, 0x034, 0x000090D3, 0x034, 0x000080B1, 0x034, 0x000070AE, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D7, + 0x034, 0x000090D3, + 0x034, 0x000080B1, + 0x034, 0x000070AE, + 0xA0000000, 0x00000000, 0x034, 0x0000ADF7, 0x034, 0x00009DF4, 0x034, 0x00008DF1, 0x034, 0x00007DEE, - 0xFF0F02C0, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00006DEB, 0x034, 0x00005CEC, 0x034, 0x00004CE9, @@ -1569,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002889, 0x034, 0x00001489, 0x034, 0x0000044A, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x00006DEB, 0x034, 0x00005CEC, 0x034, 0x00004CE9, @@ -1577,15 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002889, 0x034, 0x00001489, 0x034, 0x0000044A, - 0xFF0F0404, 0xCDEF, - 0x034, 0x00006DEB, - 0x034, 0x00005CEC, - 0x034, 0x00004CE9, - 0x034, 0x000038CA, - 0x034, 0x00002889, - 0x034, 0x00001489, - 0x034, 0x0000044A, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000608D, 0x034, 0x0000506B, 0x034, 0x0000404A, @@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002044, 0x034, 0x00001025, 0x034, 0x00000004, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000608D, + 0x034, 0x0000506B, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x00002044, + 0x034, 0x00001025, + 0x034, 0x00000004, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xA0000000, 0x00000000, 0x034, 0x00006DCD, 0x034, 0x00005CCD, 0x034, 0x00004CCA, @@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x034, 0x00002888, 0x034, 0x00001488, 0x034, 0x00000486, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000187, 0x035, 0x00008187, 0x035, 0x00010187, @@ -1625,17 +1676,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x00040188, 0x035, 0x00048188, 0x035, 0x00050188, - 0xFF0F0404, 0xCDEF, - 0x035, 0x00000187, - 0x035, 0x00008187, - 0x035, 0x00010187, - 0x035, 0x00020188, - 0x035, 0x00028188, - 0x035, 0x00030188, - 0x035, 0x00040188, - 0x035, 0x00048188, - 0x035, 0x00050188, - 0xCDCDCDCD, 0xCDCD, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x035, 0x00000145, 0x035, 0x00008145, 0x035, 0x00010145, @@ -1645,11 +1696,41 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x035, 0x000401C7, 0x035, 0x000481C7, 0x035, 0x000501C7, - 0xFF0F0104, 0xDEAD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xA0000000, 0x00000000, + 0x035, 0x00000145, + 0x035, 0x00008145, + 0x035, 0x00010145, + 0x035, 0x00020196, + 0x035, 0x00028196, + 0x035, 0x00030196, + 0x035, 0x000401C7, + 0x035, 0x000481C7, + 0x035, 0x000501C7, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x00085733, 0x036, 0x0008D733, 0x036, 0x00095733, @@ -1675,20 +1756,20 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x000CE4B4, 0x036, 0x000D64B4, 0x036, 0x000DE4B4, - 0xFF0F0404, 0xCDEF, - 0x036, 0x00085733, - 0x036, 0x0008D733, - 0x036, 0x00095733, - 0x036, 0x0009D733, - 0x036, 0x000A64B4, - 0x036, 0x000AE4B4, - 0x036, 0x000B64B4, - 0x036, 0x000BE4B4, - 0x036, 0x000C64B4, - 0x036, 0x000CE4B4, - 0x036, 0x000D64B4, - 0x036, 0x000DE4B4, - 0xCDCDCDCD, 0xCDCD, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x036, 0x000056B3, 0x036, 0x0000D6B3, 0x036, 0x000156B3, @@ -1701,103 +1782,201 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x036, 0x0004E7B4, 0x036, 0x000567B4, 0x036, 0x0005E7B4, - 0xFF0F0104, 0xDEAD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xA0000000, 0x00000000, + 0x036, 0x000056B3, + 0x036, 0x0000D6B3, + 0x036, 0x000156B3, + 0x036, 0x0001D6B3, + 0x036, 0x00026634, + 0x036, 0x0002E634, + 0x036, 0x00036634, + 0x036, 0x0003E634, + 0x036, 0x000467B4, + 0x036, 0x0004E7B4, + 0x036, 0x000567B4, + 0x036, 0x0005E7B4, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000008, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000001C8, 0x03C, 0x00000492, - 0xFF0F0404, 0xCDEF, - 0x03C, 0x000001C8, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, 0x03C, 0x00000492, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x0000022A, 0x03C, 0x00000594, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, + 0x03C, 0x00000492, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xA0000000, 0x00000000, + 0x03C, 0x0000022A, + 0x03C, 0x00000594, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000800, - 0xFF0F02C0, 0xCDEF, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x00000820, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000820, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0xA0000000, 0x00000000, 0x03C, 0x00000900, - 0xFF0F0104, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000002, - 0xFF0F0104, 0xABCD, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x0004E400, - 0xFF0F0404, 0xCDEF, - 0x008, 0x0004E400, - 0xCDCDCDCD, 0xCDCD, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x008, 0x00002000, - 0xFF0F0104, 0xDEAD, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x0004E400, + 0xA0000000, 0x00000000, + 0x008, 0x00002000, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0DF, 0x000000C0, - 0x01F, 0x00040064, - 0xFF0F0104, 0xABCD, + 0x01F, 0x00000064, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x000A7284, 0x059, 0x000600EC, - 0xFF0F0404, 0xCDEF, - 0x058, 0x000A7284, - 0x059, 0x000600EC, - 0xCDCDCDCD, 0xCDCD, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x058, 0x00081184, 0x059, 0x0006016C, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xA0000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000E8D73, 0x062, 0x00093FC5, - 0xFF0F0404, 0xCDEF, - 0x061, 0x000E8D73, - 0x062, 0x00093FC5, - 0xCDCDCDCD, 0xCDCD, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000EAD53, 0x062, 0x00093BC4, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xA0000000, 0x00000000, + 0x061, 0x000EAD53, + 0x062, 0x00093BC4, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xFF0F0200, 0xCDEF, - 0x063, 0x000710E9, - 0xFF0F02C0, 0xCDEF, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x063, 0x000110E9, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0xA0000000, 0x00000000, 0x063, 0x000714E9, - 0xFF0F0104, 0xDEAD, - 0xFF0F0104, 0xABCD, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xFF0F0204, 0xCDEF, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xFF0F0404, 0xCDEF, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C27C, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, 0x064, 0x0001C67C, - 0xFF0F0104, 0xDEAD, - 0xFF0F0200, 0xABCD, - 0x065, 0x00093016, - 0xFF0F02C0, 0xCDEF, - 0x065, 0x00093015, - 0xCDCDCDCD, 0xCDCD, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0xA0000000, 0x00000000, + 0x064, 0x0001C67C, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, 0x065, 0x00091016, - 0xFF0F0200, 0xDEAD, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00091016, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093016, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093015, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093015, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093016, + 0xA0000000, 0x00000000, + 0x065, 0x00091016, + 0xB0000000, 0x00000000, 0x018, 0x00000006, 0x0EF, 0x00002000, 0x03B, 0x0003824B, @@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x0B4, 0x0001214C, 0x0B7, 0x0003000C, 0x01C, 0x000539D2, + 0x0C4, 0x000AFE00, 0x018, 0x0001F12A, - 0x0FE, 0x00000000, - 0x0FE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, 0x018, 0x0001712A, }; @@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = { u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY); u32 RTL8821AE_MAC_REG_ARRAY[] = { + 0x421, 0x0000000F, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, @@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0xA6360001, 0x81C, 0xA5380001, 0x81C, 0xA43A0001, - 0x81C, 0xA33C0001, + 0x81C, 0x683C0001, 0x81C, 0x673E0001, 0x81C, 0x66400001, 0x81C, 0x65420001, @@ -2519,7 +2700,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x017A0001, 0x81C, 0x017C0001, 0x81C, 0x017E0001, - 0xFF0F02C0, 0xABCD, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFB000101, 0x81C, 0xFA020101, 0x81C, 0xF9040101, @@ -2578,7 +2759,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x016E0101, 0x81C, 0x01700101, 0x81C, 0x01720101, - 0xCDCDCDCD, 0xCDCD, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000101, + 0x81C, 0xFA020101, + 0x81C, 0xF9040101, + 0x81C, 0xF8060101, + 0x81C, 0xF7080101, + 0x81C, 0xF60A0101, + 0x81C, 0xF50C0101, + 0x81C, 0xF40E0101, + 0x81C, 0xF3100101, + 0x81C, 0xF2120101, + 0x81C, 0xF1140101, + 0x81C, 0xF0160101, + 0x81C, 0xEF180101, + 0x81C, 0xEE1A0101, + 0x81C, 0xED1C0101, + 0x81C, 0xEC1E0101, + 0x81C, 0xEB200101, + 0x81C, 0xEA220101, + 0x81C, 0xE9240101, + 0x81C, 0xE8260101, + 0x81C, 0xE7280101, + 0x81C, 0xE62A0101, + 0x81C, 0xE52C0101, + 0x81C, 0xE42E0101, + 0x81C, 0xE3300101, + 0x81C, 0xA5320101, + 0x81C, 0xA4340101, + 0x81C, 0xA3360101, + 0x81C, 0x87380101, + 0x81C, 0x863A0101, + 0x81C, 0x853C0101, + 0x81C, 0x843E0101, + 0x81C, 0x69400101, + 0x81C, 0x68420101, + 0x81C, 0x67440101, + 0x81C, 0x66460101, + 0x81C, 0x49480101, + 0x81C, 0x484A0101, + 0x81C, 0x474C0101, + 0x81C, 0x2A4E0101, + 0x81C, 0x29500101, + 0x81C, 0x28520101, + 0x81C, 0x27540101, + 0x81C, 0x26560101, + 0x81C, 0x25580101, + 0x81C, 0x245A0101, + 0x81C, 0x235C0101, + 0x81C, 0x055E0101, + 0x81C, 0x04600101, + 0x81C, 0x03620101, + 0x81C, 0x02640101, + 0x81C, 0x01660101, + 0x81C, 0x01680101, + 0x81C, 0x016A0101, + 0x81C, 0x016C0101, + 0x81C, 0x016E0101, + 0x81C, 0x01700101, + 0x81C, 0x01720101, + 0xA0000000, 0x00000000, 0x81C, 0xFF000101, 0x81C, 0xFF020101, 0x81C, 0xFE040101, @@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x046E0101, 0x81C, 0x03700101, 0x81C, 0x02720101, - 0xFF0F02C0, 0xDEAD, + 0xB0000000, 0x00000000, 0x81C, 0x01740101, 0x81C, 0x01760101, 0x81C, 0x01780101, diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 348b0072cdd6..bad06939a247 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -718,8 +718,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); - if (err) + if (err) { + usb_unanchor_urb(urb); + usb_free_urb(urb); goto err_out; + } usb_free_urb(urb); } return 0; @@ -881,10 +884,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!_urb) { - kfree_skb(skb); + if (!_urb) return NULL; - } _rtl_install_trx_info(rtlusb, skb, ep_num); usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, ep_num), skb->data, skb->len, _rtl_tx_complete, skb); @@ -898,7 +899,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; - struct sk_buff *_skb = NULL; WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { @@ -907,8 +907,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, return; } ep_num = rtlusb->ep_map.ep_mapping[qnum]; - _skb = skb; - _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); + _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); if (unlikely(!_urb)) { pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index 3e95ad198912..853ac1c2ed73 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -1923,7 +1923,8 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) if (coex_stat->wl_under_ips) return; - if (coex->freeze && !coex_stat->bt_setup_link) + if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO && + !coex_stat->bt_setup_link) return; coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++; diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 6ad985e98e42..5a906101498d 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -146,6 +146,8 @@ static int rtw_debugfs_copy_from_user(char tmp[], int size, { int tmp_len; + memset(tmp, 0, size); + if (count < num) return -EFAULT; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 35dbdb3c4f1e..8efaee7571f3 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -340,7 +340,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv); SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); - SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en); + SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en); SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 88e2252bf8a2..15c7a6fc37b9 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -553,8 +553,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) stbc_en = VHT_STBC_EN; if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) - is_support_sgi = true; } else if (sta->ht_cap.ht_supported) { ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | (sta->ht_cap.mcs.rx_mask[0] << 12); @@ -562,9 +560,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) stbc_en = HT_STBC_EN; if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = HT_LDPC_EN; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 || - sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) - is_support_sgi = true; } if (efuse->hw_cap.nss == 1) @@ -606,12 +601,18 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; + is_support_sgi = sta->vht_cap.vht_supported && + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW_CHANNEL_WIDTH_40; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW_CHANNEL_WIDTH_20; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 77a2bdee50fa..4a43c4fa716d 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -974,6 +974,7 @@ static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, len = pci_resource_len(pdev, bar_id); rtwpci->mmap = pci_iomap(pdev, bar_id, len); if (!rtwpci->mmap) { + pci_release_regions(pdev); rtw_err(rtwdev, "failed to map pci memory\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index 87824a4caba9..a47d871ae506 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -13,8 +13,8 @@ #define RTK_BEQ_TX_DESC_NUM 256 #define RTK_MAX_RX_DESC_NUM 512 -/* 8K + rx desc size */ -#define RTK_PCI_RX_BUF_SIZE (8192 + 24) +/* 11K + rx desc size */ +#define RTK_PCI_RX_BUF_SIZE (11454 + 24) #define RTK_PCI_CTRL 0x300 #define BIT_RST_TRXDMA_INTF BIT(20) diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index 47d199d2e7dc..02da69e9dfe7 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -1451,7 +1451,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev) } } -static u8 rtw_get_channel_group(u8 channel) +static u8 rtw_get_channel_group(u8 channel, u8 rate) { switch (channel) { default: @@ -1495,6 +1495,7 @@ static u8 rtw_get_channel_group(u8 channel) case 106: return 4; case 14: + return rate <= DESC_RATE11M ? 5 : 4; case 108: case 110: case 112: @@ -1744,7 +1745,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, s8 *limit = &pwr_param->pwr_limit; pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; - group = rtw_get_channel_group(ch); + group = rtw_get_channel_group(ch, rate); /* base power index for 2.4G/5G */ if (ch <= 14) { diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 6f8d5f9a9f7e..a07304405b2c 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -248,7 +248,8 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) rsi_set_len_qno(&data_desc->len_qno, (skb->len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); - if ((skb->len - header_size) == EAPOL4_PACKET_LEN) { + if (((skb->len - header_size) == EAPOL4_PACKET_LEN) || + ((skb->len - header_size) == EAPOL4_PACKET_LEN - 2)) { data_desc->misc_flags |= RSI_DESC_REQUIRE_CFM_TO_HOST; xtend_desc->confirm_frame_type = EAPOL4_CONFIRM; diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 1bebba4e8527..d51ec7104fb7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -153,9 +153,7 @@ static void rsi_handle_interrupt(struct sdio_func *function) if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED) return; - dev->sdio_irq_task = current; - rsi_interrupt_handler(adapter); - dev->sdio_irq_task = NULL; + rsi_set_event(&dev->rx_thread.event); } /** @@ -1059,8 +1057,6 @@ static int rsi_probe(struct sdio_func *pfunction, rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__); goto fail_kill_thread; } - skb_queue_head_init(&sdev->rx_q.head); - sdev->rx_q.num_rx_pkts = 0; sdio_claim_host(pfunction); if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) { @@ -1515,7 +1511,7 @@ static int rsi_restore(struct device *dev) } static const struct dev_pm_ops rsi_pm_ops = { .suspend = rsi_suspend, - .resume = rsi_resume, + .resume_noirq = rsi_resume, .freeze = rsi_freeze, .thaw = rsi_thaw, .restore = rsi_restore, diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 449f6d23c5e3..7c77b09240da 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -60,39 +60,20 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word) return status; } +static void rsi_rx_handler(struct rsi_hw *adapter); + void rsi_sdio_rx_thread(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; struct rsi_91x_sdiodev *sdev = adapter->rsi_dev; - struct sk_buff *skb; - int status; do { rsi_wait_event(&sdev->rx_thread.event, EVENT_WAIT_FOREVER); rsi_reset_event(&sdev->rx_thread.event); + rsi_rx_handler(adapter); + } while (!atomic_read(&sdev->rx_thread.thread_done)); - while (true) { - if (atomic_read(&sdev->rx_thread.thread_done)) - goto out; - - skb = skb_dequeue(&sdev->rx_q.head); - if (!skb) - break; - if (sdev->rx_q.num_rx_pkts > 0) - sdev->rx_q.num_rx_pkts--; - status = rsi_read_pkt(common, skb->data, skb->len); - if (status) { - rsi_dbg(ERR_ZONE, "Failed to read the packet\n"); - dev_kfree_skb(skb); - break; - } - dev_kfree_skb(skb); - } - } while (1); - -out: rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__); - skb_queue_purge(&sdev->rx_q.head); atomic_inc(&sdev->rx_thread.thread_done); complete_and_exit(&sdev->rx_thread.completion, 0); } @@ -113,10 +94,6 @@ static int rsi_process_pkt(struct rsi_common *common) u32 rcv_pkt_len = 0; int status = 0; u8 value = 0; - struct sk_buff *skb; - - if (dev->rx_q.num_rx_pkts >= RSI_MAX_RX_PKTS) - return 0; num_blks = ((adapter->interrupt_status & 1) | ((adapter->interrupt_status >> RECV_NUM_BLOCKS) << 1)); @@ -144,22 +121,19 @@ static int rsi_process_pkt(struct rsi_common *common) rcv_pkt_len = (num_blks * 256); - skb = dev_alloc_skb(rcv_pkt_len); - if (!skb) - return -ENOMEM; - - status = rsi_sdio_host_intf_read_pkt(adapter, skb->data, rcv_pkt_len); + status = rsi_sdio_host_intf_read_pkt(adapter, dev->pktbuffer, + rcv_pkt_len); if (status) { rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n", __func__); - dev_kfree_skb(skb); return status; } - skb_put(skb, rcv_pkt_len); - skb_queue_tail(&dev->rx_q.head, skb); - dev->rx_q.num_rx_pkts++; - rsi_set_event(&dev->rx_thread.event); + status = rsi_read_pkt(common, dev->pktbuffer, rcv_pkt_len); + if (status) { + rsi_dbg(ERR_ZONE, "Failed to read the packet\n"); + return status; + } return 0; } @@ -251,12 +225,12 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) } /** - * rsi_interrupt_handler() - This function read and process SDIO interrupts. + * rsi_rx_handler() - Read and process SDIO interrupts. * @adapter: Pointer to the adapter structure. * * Return: None. */ -void rsi_interrupt_handler(struct rsi_hw *adapter) +static void rsi_rx_handler(struct rsi_hw *adapter) { struct rsi_common *common = adapter->priv; struct rsi_91x_sdiodev *dev = diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 4b9e406b8461..a296f4e0d324 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -733,24 +733,24 @@ static int rsi_reset_card(struct rsi_hw *adapter) if (ret < 0) goto fail; } else { - if ((rsi_usb_master_reg_write(adapter, - NWP_WWD_INTERRUPT_TIMER, - NWP_WWD_INT_TIMER_CLKS, - RSI_9116_REG_SIZE)) < 0) { + ret = rsi_usb_master_reg_write(adapter, + NWP_WWD_INTERRUPT_TIMER, + NWP_WWD_INT_TIMER_CLKS, + RSI_9116_REG_SIZE); + if (ret < 0) goto fail; - } - if ((rsi_usb_master_reg_write(adapter, - NWP_WWD_SYSTEM_RESET_TIMER, - NWP_WWD_SYS_RESET_TIMER_CLKS, - RSI_9116_REG_SIZE)) < 0) { + ret = rsi_usb_master_reg_write(adapter, + NWP_WWD_SYSTEM_RESET_TIMER, + NWP_WWD_SYS_RESET_TIMER_CLKS, + RSI_9116_REG_SIZE); + if (ret < 0) goto fail; - } - if ((rsi_usb_master_reg_write(adapter, - NWP_WWD_MODE_AND_RSTART, - NWP_WWD_TIMER_DISABLE, - RSI_9116_REG_SIZE)) < 0) { + ret = rsi_usb_master_reg_write(adapter, + NWP_WWD_MODE_AND_RSTART, + NWP_WWD_TIMER_DISABLE, + RSI_9116_REG_SIZE); + if (ret < 0) goto fail; - } } rsi_dbg(INFO_ZONE, "Reset card done\n"); diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index c5cfb6238f73..ce6cf65a577a 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -111,11 +111,6 @@ struct receive_info { u32 buf_available_counter; }; -struct rsi_sdio_rx_q { - u8 num_rx_pkts; - struct sk_buff_head head; -}; - struct rsi_91x_sdiodev { struct sdio_func *pfunction; struct task_struct *sdio_irq_task; @@ -128,11 +123,10 @@ struct rsi_91x_sdiodev { u16 tx_blk_size; u8 write_fail; bool buff_status_updated; - struct rsi_sdio_rx_q rx_q; struct rsi_thread rx_thread; + u8 pktbuffer[8192] __aligned(4); }; -void rsi_interrupt_handler(struct rsi_hw *adapter); int rsi_init_sdio_slave_regs(struct rsi_hw *adapter); int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data); int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length); diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c index f7fe56affbcd..326b1cc1d2bc 100644 --- a/drivers/net/wireless/st/cw1200/main.c +++ b/drivers/net/wireless/st/cw1200/main.c @@ -381,6 +381,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, CW1200_LINK_ID_MAX, cw1200_skb_dtor, priv)) { + destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } @@ -392,6 +393,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, for (; i > 0; i--) cw1200_queue_deinit(&priv->tx_queue[i - 1]); cw1200_queue_stats_deinit(&priv->tx_queue_stats); + destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index 850864dbafa1..e6d426edab56 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -70,7 +70,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl, break; } - return 0; + return ret; } static void wl1251_event_mbox_dump(struct event_mailbox *mbox) diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index 3c9c623bb428..9d7dbfe7fe0c 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -635,7 +635,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl) wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | WLCORE_QUIRK_DUAL_PROBE_TMPL | WLCORE_QUIRK_TKIP_HEADER_SPACE | - WLCORE_QUIRK_START_STA_FAILS | WLCORE_QUIRK_AP_ZERO_SESSION_ID; wl->sr_fw_name = WL127X_FW_NAME_SINGLE; wl->mr_fw_name = WL127X_FW_NAME_MULTI; @@ -659,7 +658,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl) wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | WLCORE_QUIRK_DUAL_PROBE_TMPL | WLCORE_QUIRK_TKIP_HEADER_SPACE | - WLCORE_QUIRK_START_STA_FAILS | WLCORE_QUIRK_AP_ZERO_SESSION_ID; wl->plt_fw_name = WL127X_PLT_FW_NAME; wl->sr_fw_name = WL127X_FW_NAME_SINGLE; @@ -688,7 +686,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl) wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN | WLCORE_QUIRK_DUAL_PROBE_TMPL | WLCORE_QUIRK_TKIP_HEADER_SPACE | - WLCORE_QUIRK_START_STA_FAILS | WLCORE_QUIRK_AP_ZERO_SESSION_ID; wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 547ad538d8b6..be0ed19f9356 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -2862,21 +2862,8 @@ static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (is_ibss) ret = wl12xx_cmd_role_start_ibss(wl, wlvif); - else { - if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) { - /* - * TODO: this is an ugly workaround for wl12xx fw - * bug - we are not able to tx/rx after the first - * start_sta, so make dummy start+stop calls, - * and then call start_sta again. - * this should be fixed in the fw. - */ - wl12xx_cmd_role_start_sta(wl, wlvif); - wl12xx_cmd_role_stop_sta(wl, wlvif); - } - + else ret = wl12xx_cmd_role_start_sta(wl, wlvif); - } return ret; } @@ -3658,8 +3645,10 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(wl->dev); goto out; + } ret = wlcore_cmd_regdomain_config_locked(wl); if (ret < 0) { diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 90e56d4c3df3..e20e18cd04ae 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -863,6 +863,7 @@ void wl1271_tx_work(struct work_struct *work) ret = wlcore_tx_work_locked(wl); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); wl12xx_queue_recovery_work(wl); goto out; } diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index b7821311ac75..81c94d390623 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -547,9 +547,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, /* Each RX/TX transaction requires an end-of-transaction transfer */ #define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0) -/* the first start_role(sta) sometimes doesn't work on wl12xx */ -#define WLCORE_QUIRK_START_STA_FAILS BIT(1) - /* wl127x and SPI don't support SDIO block size alignment */ #define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2) diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 01305ba2d3aa..9d04ca53229b 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -12,6 +12,7 @@ #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <linux/etherdevice.h> +#include <linux/math64.h> #include <linux/module.h> static struct wiphy *common_wiphy; @@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work) scan_result.work); struct wiphy *wiphy = priv_to_wiphy(priv); struct cfg80211_scan_info scan_info = { .aborted = false }; + u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, CFG80211_BSS_FTYPE_PRESP, - fake_router_bssid, - ktime_get_boottime_ns(), + fake_router_bssid, tsf, WLAN_CAPABILITY_ESS, 0, (void *)&ssid, sizeof(ssid), DBM_TO_MBM(-50), GFP_KERNEL); diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 05847eb91a1b..32fe131ba366 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ struct xenvif *vif; /* Parent VIF */ + /* + * TX/RX common EOI handling. + * When feature-split-event-channels = 0, interrupt handler sets + * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set + * by the RX and TX interrupt handlers. + * RX and TX handler threads will issue an EOI when either + * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or + * NETBK_TX_EOI) are set and they will reset those bits. + */ + atomic_t eoi_pending; +#define NETBK_RX_EOI 0x01 +#define NETBK_TX_EOI 0x02 +#define NETBK_COMMON_EOI 0x04 + /* Use NAPI for guest TX */ struct napi_struct napi; /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ @@ -375,6 +389,7 @@ int xenvif_dealloc_kthread(void *data); irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); +bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread); void xenvif_rx_action(struct xenvif_queue *queue); void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 103ed00775eb..809089587301 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif) !vif->disabled; } +static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) +{ + bool rc; + + rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); + if (rc) + napi_schedule(&queue->napi); + return rc; +} + static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; + int old; - if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) - napi_schedule(&queue->napi); + old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); + WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n"); + + if (!xenvif_handle_tx_interrupt(queue)) { + atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending); + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + } return IRQ_HANDLED; } @@ -116,19 +132,48 @@ static int xenvif_poll(struct napi_struct *napi, int budget) return work_done; } +static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) +{ + bool rc; + + rc = xenvif_have_rx_work(queue, false); + if (rc) + xenvif_kick_thread(queue); + return rc; +} + static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; + int old; - xenvif_kick_thread(queue); + old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending); + WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n"); + + if (!xenvif_handle_rx_interrupt(queue)) { + atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending); + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + } return IRQ_HANDLED; } irqreturn_t xenvif_interrupt(int irq, void *dev_id) { - xenvif_tx_interrupt(irq, dev_id); - xenvif_rx_interrupt(irq, dev_id); + struct xenvif_queue *queue = dev_id; + int old; + bool has_rx, has_tx; + + old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); + WARN(old, "Interrupt while EOI pending\n"); + + has_tx = xenvif_handle_tx_interrupt(queue); + has_rx = xenvif_handle_rx_interrupt(queue); + + if (!has_rx && !has_tx) { + atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + } return IRQ_HANDLED; } @@ -595,7 +640,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, shared = (struct xen_netif_ctrl_sring *)addr; BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); - err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn); + err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn); if (err < 0) goto err_unmap; @@ -653,7 +698,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, if (tx_evtchn == rx_evtchn) { /* feature-split-event-channels == 0 */ - err = bind_interdomain_evtchn_to_irqhandler( + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, queue->name, queue); if (err < 0) @@ -664,7 +709,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, /* feature-split-event-channels == 1 */ snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); - err = bind_interdomain_evtchn_to_irqhandler( + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, queue->tx_irq_name, queue); if (err < 0) @@ -674,7 +719,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); - err = bind_interdomain_evtchn_to_irqhandler( + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, queue->rx_irq_name, queue); if (err < 0) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 0020b2e8c279..c213f2b81269 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) if (more_to_do) napi_schedule(&queue->napi); + else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI, + &queue->eoi_pending) & + (NETBK_TX_EOI | NETBK_COMMON_EOI)) + xen_irq_lateeoi(queue->tx_irq, 0); } static void tx_add_credit(struct xenvif_queue *queue) @@ -1336,7 +1340,15 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) NULL, queue->pages_to_map, nr_mops); - BUG_ON(ret); + if (ret) { + unsigned int i; + + netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", + nr_mops, ret); + for (i = 0; i < nr_mops; ++i) + WARN_ON_ONCE(queue->tx_map_ops[i].status == + GNTST_okay); + } } work_done = xenvif_tx_submit(queue); @@ -1622,9 +1634,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif) irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data) { struct xenvif *vif = data; + unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS; - while (xenvif_ctrl_work_todo(vif)) + while (xenvif_ctrl_work_todo(vif)) { xenvif_ctrl_action(vif); + eoi_flag = 0; + } + + xen_irq_lateeoi(irq, eoi_flag); return IRQ_HANDLED; } diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index ef5887037b22..48e2006f96ce 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) RING_IDX prod, cons; struct sk_buff *skb; int needed; + unsigned long flags; + + spin_lock_irqsave(&queue->rx_queue.lock, flags); skb = skb_peek(&queue->rx_queue); - if (!skb) + if (!skb) { + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); return false; + } needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); if (skb_is_gso(skb)) @@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) if (skb->sw_hash) needed++; + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); + do { prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; @@ -490,13 +497,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) return queue->stalled && prod - cons >= 1; } -static bool xenvif_have_rx_work(struct xenvif_queue *queue) +bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) { return xenvif_rx_ring_slots_available(queue) || (queue->vif->stall_timeout && (xenvif_rx_queue_stalled(queue) || xenvif_rx_queue_ready(queue))) || - kthread_should_stop() || + (test_kthread && kthread_should_stop()) || queue->vif->disabled; } @@ -527,15 +534,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) { DEFINE_WAIT(wait); - if (xenvif_have_rx_work(queue)) + if (xenvif_have_rx_work(queue, true)) return; for (;;) { long ret; prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); - if (xenvif_have_rx_work(queue)) + if (xenvif_have_rx_work(queue, true)) break; + if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI, + &queue->eoi_pending) & + (NETBK_RX_EOI | NETBK_COMMON_EOI)) + xen_irq_lateeoi(queue->rx_irq, 0); + ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); if (!ret) break; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index f533b7372d59..416305e6d093 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -713,12 +713,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev, return -ENOMEM; snprintf(node, maxlen, "%s/rate", dev->nodename); vif->credit_watch.node = node; + vif->credit_watch.will_handle = NULL; vif->credit_watch.callback = xen_net_rate_changed; err = register_xenbus_watch(&vif->credit_watch); if (err) { pr_err("Failed to set watcher %s\n", vif->credit_watch.node); kfree(node); vif->credit_watch.node = NULL; + vif->credit_watch.will_handle = NULL; vif->credit_watch.callback = NULL; } return err; @@ -765,6 +767,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, snprintf(node, maxlen, "%s/request-multicast-control", dev->otherend); vif->mcast_ctrl_watch.node = node; + vif->mcast_ctrl_watch.will_handle = NULL; vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed; err = register_xenbus_watch(&vif->mcast_ctrl_watch); if (err) { @@ -772,6 +775,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, vif->mcast_ctrl_watch.node); kfree(node); vif->mcast_ctrl_watch.node = NULL; + vif->mcast_ctrl_watch.will_handle = NULL; vif->mcast_ctrl_watch.callback = NULL; } return err; @@ -975,11 +979,15 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, - hotplug_status_changed, - "%s/%s", dev->nodename, "hotplug-status"); - if (!err) + if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, + NULL, hotplug_status_changed, + "%s/%s", dev->nodename, + "hotplug-status"); + if (err) + goto err; be->have_hotplug_status_watch = 1; + } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 482c6c8b0fb7..88280057e032 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ +static void xennet_bus_close(struct xenbus_device *dev) +{ + int ret; + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { + xenbus_switch_state(dev, XenbusStateClosing); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + + do { + xenbus_switch_state(dev, XenbusStateClosed); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + static int xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); - dev_dbg(&dev->dev, "%s\n", dev->nodename); - - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { - xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - - xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } - + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index a172a32aa9d9..3ea38ce86cc9 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -680,6 +680,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a, if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0) return false; + if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE) + return false; + return true; } diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 91d4d5b28a7d..ba6c486d6465 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, case S3FWRN5_MODE_FW: return s3fwrn5_fw_recv_frame(ndev, skb); default: + kfree_skb(skb); return -ENODEV; } } diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index be110d9cef02..310773a4ca66 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -293,8 +293,10 @@ static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info) if (ret < 0) return ret; - if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) + if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) { + release_firmware(fw->fw); return -EINVAL; + } memcpy(fw->date, fw->fw->data + 0x00, 12); fw->date[12] = '\0'; diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c index e4f7fa00862d..2505abc8ef28 100644 --- a/drivers/nfc/s3fwrn5/i2c.c +++ b/drivers/nfc/s3fwrn5/i2c.c @@ -26,8 +26,8 @@ struct s3fwrn5_i2c_phy { struct i2c_client *i2c_dev; struct nci_dev *ndev; - unsigned int gpio_en; - unsigned int gpio_fw_wake; + int gpio_en; + int gpio_fw_wake; struct mutex mutex; diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index 60acdfd1cb8c..856a10c293f8 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, memcpy(atr_res->gbi, atr_req->gbi, gb_len); r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, gb_len); - if (r < 0) + if (r < 0) { + kfree_skb(skb); return r; + } } info->dep_info.curr_nfc_dep_pni = 0; diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 9642971e89ce..457854765983 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -966,7 +966,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, rc = down_killable(&stcontext->exchange_lock); if (rc) { WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); - return rc; + goto free_skb_resp; } rc = st95hf_spi_send(&stcontext->spicontext, skb->data, diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c index 2581ab724c34..f8f75a504a58 100644 --- a/drivers/ntb/core.c +++ b/drivers/ntb/core.c @@ -214,10 +214,8 @@ int ntb_default_port_number(struct ntb_dev *ntb) case NTB_TOPO_B2B_DSD: return NTB_PORT_SEC_DSD; default: - break; + return 0; } - - return -EINVAL; } EXPORT_SYMBOL(ntb_default_port_number); @@ -240,10 +238,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx) case NTB_TOPO_B2B_DSD: return NTB_PORT_PRI_USD; default: - break; + return 0; } - - return -EINVAL; } EXPORT_SYMBOL(ntb_default_peer_port_number); @@ -315,4 +311,3 @@ static void __exit ntb_driver_exit(void) bus_unregister(&ntb_bus); } module_exit(ntb_driver_exit); - diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 156c2a18a239..abb37659de34 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -1036,6 +1036,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, err_dma_mask: pci_clear_master(pdev); + pci_release_regions(pdev); err_pci_regions: pci_disable_device(pdev); err_pci_enable: diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index e9b7c2dfc730..5ce4766a6c9e 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -158,6 +158,8 @@ struct perf_peer { /* NTB connection setup service */ struct work_struct service; unsigned long sts; + + struct completion init_comp; }; #define to_peer_service(__work) \ container_of(__work, struct perf_peer, service) @@ -546,6 +548,7 @@ static int perf_setup_outbuf(struct perf_peer *peer) /* Initialization is finally done */ set_bit(PERF_STS_DONE, &peer->sts); + complete_all(&peer->init_comp); return 0; } @@ -556,7 +559,7 @@ static void perf_free_inbuf(struct perf_peer *peer) return; (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx); - dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size, + dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size, peer->inbuf, peer->inbuf_xlat); peer->inbuf = NULL; } @@ -585,8 +588,9 @@ static int perf_setup_inbuf(struct perf_peer *peer) perf_free_inbuf(peer); - peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size, - &peer->inbuf_xlat, GFP_KERNEL); + peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev, + peer->inbuf_size, &peer->inbuf_xlat, + GFP_KERNEL); if (!peer->inbuf) { dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n", &peer->inbuf_size); @@ -636,6 +640,7 @@ static void perf_service_work(struct work_struct *work) perf_setup_outbuf(peer); if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) { + init_completion(&peer->init_comp); clear_bit(PERF_STS_DONE, &peer->sts); if (test_bit(0, &peer->perf->busy_flag) && peer == peer->perf->test_peer) { @@ -652,7 +657,7 @@ static int perf_init_service(struct perf_ctx *perf) { u64 mask; - if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) { + if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) { dev_err(&perf->ntb->dev, "Not enough memory windows\n"); return -EINVAL; } @@ -1051,8 +1056,9 @@ static int perf_submit_test(struct perf_peer *peer) struct perf_thread *pthr; int tidx, ret; - if (!test_bit(PERF_STS_DONE, &peer->sts)) - return -ENOLINK; + ret = wait_for_completion_interruptible(&peer->init_comp); + if (ret < 0) + return ret; if (test_and_set_bit_lock(0, &perf->busy_flag)) return -EBUSY; @@ -1418,10 +1424,21 @@ static int perf_init_peers(struct perf_ctx *perf) peer->gidx = pidx; } INIT_WORK(&peer->service, perf_service_work); + init_completion(&peer->init_comp); } if (perf->gidx == -1) perf->gidx = pidx; + /* + * Hardware with only two ports may not have unique port + * numbers. In this case, the gidxs should all be zero. + */ + if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 && + ntb_peer_port_number(perf->ntb, 0) == 0) { + perf->gidx = 0; + perf->peers[0].gidx = 0; + } + for (pidx = 0; pidx < perf->pcnt; pidx++) { ret = perf_setup_peer_mw(&perf->peers[pidx]); if (ret) @@ -1517,4 +1534,3 @@ static void __exit perf_exit(void) destroy_workqueue(perf_wq); } module_exit(perf_exit); - diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 65865e460ab8..18d00eec7b02 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c @@ -121,15 +121,14 @@ static int pp_find_next_peer(struct pp_ctx *pp) link = ntb_link_is_up(pp->ntb, NULL, NULL); /* Find next available peer */ - if (link & pp->nmask) { + if (link & pp->nmask) pidx = __ffs64(link & pp->nmask); - out_db = BIT_ULL(pidx + 1); - } else if (link & pp->pmask) { + else if (link & pp->pmask) pidx = __ffs64(link & pp->pmask); - out_db = BIT_ULL(pidx); - } else { + else return -ENODEV; - } + + out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx)); spin_lock(&pp->lock); pp->out_pidx = pidx; @@ -303,7 +302,7 @@ static void pp_init_flds(struct pp_ctx *pp) break; } - pp->in_db = BIT_ULL(pidx); + pp->in_db = BIT_ULL(lport); pp->pmask = GENMASK_ULL(pidx, 0) >> 1; pp->nmask = GENMASK_ULL(pcnt - 1, pidx); @@ -435,4 +434,3 @@ static void __exit pp_exit(void) debugfs_remove_recursive(pp_dbgfs_topdir); } module_exit(pp_exit); - diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index d592c0ffbd19..311d6ab8d016 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -504,7 +504,7 @@ static ssize_t tool_peer_link_read(struct file *filep, char __user *ubuf, buf[1] = '\n'; buf[2] = '\0'; - return simple_read_from_buffer(ubuf, size, offp, buf, 3); + return simple_read_from_buffer(ubuf, size, offp, buf, 2); } static TOOL_FOPS_RDWR(tool_peer_link_fops, @@ -590,7 +590,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx, inmw->size = min_t(resource_size_t, req_size, size); inmw->size = round_up(inmw->size, addr_align); inmw->size = round_up(inmw->size, size_align); - inmw->mm_base = dma_alloc_coherent(&tc->ntb->dev, inmw->size, + inmw->mm_base = dma_alloc_coherent(&tc->ntb->pdev->dev, inmw->size, &inmw->dma_base, GFP_KERNEL); if (!inmw->mm_base) return -ENOMEM; @@ -612,7 +612,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx, return 0; err_free_dma: - dma_free_coherent(&tc->ntb->dev, inmw->size, inmw->mm_base, + dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base, inmw->dma_base); inmw->mm_base = NULL; inmw->dma_base = 0; @@ -629,7 +629,7 @@ static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx) if (inmw->mm_base != NULL) { ntb_mw_clear_trans(tc->ntb, pidx, widx); - dma_free_coherent(&tc->ntb->dev, inmw->size, + dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base, inmw->dma_base); } @@ -1690,4 +1690,3 @@ static void __exit tool_exit(void) debugfs_remove_recursive(tool_dbgfs_topdir); } module_exit(tool_exit); - diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index d47412dcdf38..5e5c6aafc070 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -1010,8 +1010,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, return -EFAULT; } - if (!desc || (desc->out_num + desc->in_num == 0) || - !test_bit(cmd, &cmd_mask)) + if (!desc || + (desc->out_num + desc->in_num == 0) || + cmd > ND_CMD_CALL || + !test_bit(cmd, &cmd_mask)) return -ENOTTY; /* fail write commands (when read-only) */ diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 196aa44c4936..e0f411021c59 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -344,16 +344,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(state); -static ssize_t available_slots_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) { - struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); + struct device *dev; ssize_t rc; u32 nfree; if (!ndd) return -ENXIO; + dev = ndd->dev; nvdimm_bus_lock(dev); nfree = nd_label_nfree(ndd); if (nfree - 1 > nfree) { @@ -365,6 +365,18 @@ static ssize_t available_slots_show(struct device *dev, nvdimm_bus_unlock(dev); return rc; } + +static ssize_t available_slots_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t rc; + + nd_device_lock(dev); + rc = __available_slots_show(dev_get_drvdata(dev), buf); + nd_device_unlock(dev); + + return rc; +} static DEVICE_ATTR_RO(available_slots); __weak ssize_t security_show(struct device *dev, diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 47a4828b8b31..9251441fd8a3 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -980,6 +980,15 @@ static int __blk_label_update(struct nd_region *nd_region, } } + /* release slots associated with any invalidated UUIDs */ + mutex_lock(&nd_mapping->lock); + list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) + if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) { + reap_victim(nd_mapping, label_ent); + list_move(&label_ent->list, &list); + } + mutex_unlock(&nd_mapping->lock); + /* * Find the resource associated with the first label in the set * per the v1.2 namespace specification. @@ -999,8 +1008,10 @@ static int __blk_label_update(struct nd_region *nd_region, if (is_old_resource(res, old_res_list, old_num_resources)) continue; /* carry-over */ slot = nd_label_alloc_slot(ndd); - if (slot == UINT_MAX) + if (slot == UINT_MAX) { + rc = -ENXIO; goto abort; + } dev_dbg(ndd->dev, "allocated: %d\n", slot); nd_label = to_label(ndd, slot); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index ef423ba1a711..b8236a9e8750 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1142,6 +1142,11 @@ int nvdimm_has_flush(struct nd_region *nd_region) || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) return -ENXIO; + /* Test if an explicit flush function is defined */ + if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush) + return 1; + + /* Test if any flush hints for the region are available */ for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; @@ -1152,8 +1157,8 @@ int nvdimm_has_flush(struct nd_region *nd_region) } /* - * The platform defines dimm devices without hints, assume - * platform persistence mechanism like ADR + * The platform defines dimm devices without hints nor explicit flush, + * assume platform persistence mechanism like ADR */ return 0; } diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 89b85970912d..35d265014e1e 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm) else dev_dbg(&nvdimm->dev, "overwrite completed\n"); - if (nvdimm->sec.overwrite_state) - sysfs_notify_dirent(nvdimm->sec.overwrite_state); + /* + * Mark the overwrite work done and update dimm security flags, + * then send a sysfs event notification to wake up userspace + * poll threads to picked up the changed state. + */ nvdimm->sec.overwrite_tmo = 0; clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); clear_bit(NDD_WORK_PENDING, &nvdimm->flags); - put_device(&nvdimm->dev); nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); - nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + if (nvdimm->sec.overwrite_state) + sysfs_notify_dirent(nvdimm->sec.overwrite_state); + put_device(&nvdimm->dev); } void nvdimm_security_overwrite_query(struct work_struct *work) diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 2b36f052bfb9..7b3f6555e67b 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -64,6 +64,7 @@ config NVME_TCP depends on INET depends on BLK_DEV_NVME select NVME_FABRICS + select CRYPTO select CRYPTO_CRC32C help This provides support for the NVMe over Fabrics protocol using diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9615ba4f8b1c..cd7521fb50ab 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -6,6 +6,7 @@ #include <linux/blkdev.h> #include <linux/blk-mq.h> +#include <linux/compat.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/hdreg.h> @@ -287,11 +288,8 @@ void nvme_complete_rq(struct request *req) nvme_req(req)->ctrl->comp_seen = true; if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if ((req->cmd_flags & REQ_NVME_MPATH) && - blk_path_error(status)) { - nvme_failover_req(req); + if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) return; - } if (!blk_queue_dying(req->q)) { nvme_retry_req(req); @@ -314,11 +312,32 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + nvme_req(req)->flags |= NVME_REQ_CANCELLED; blk_mq_complete_request(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); +void nvme_cancel_tagset(struct nvme_ctrl *ctrl) +{ + if (ctrl->tagset) { + blk_mq_tagset_busy_iter(ctrl->tagset, + nvme_cancel_request, ctrl); + blk_mq_tagset_wait_completed_request(ctrl->tagset); + } +} +EXPORT_SYMBOL_GPL(nvme_cancel_tagset); + +void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) +{ + if (ctrl->admin_tagset) { + blk_mq_tagset_busy_iter(ctrl->admin_tagset, + nvme_cancel_request, ctrl); + blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); + } +} +EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); + bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state) { @@ -437,7 +456,6 @@ static void nvme_free_ns_head(struct kref *ref) nvme_mpath_remove_disk(head); ida_simple_remove(&head->subsys->ns_ida, head->instance); - list_del_init(&head->entry); cleanup_srcu_struct(&head->srcu); nvme_put_subsystem(head->subsys); kfree(head); @@ -633,7 +651,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, } __rq_for_each_bio(bio, req) { - u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); + u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; if (n < segments) { @@ -674,7 +692,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); cmnd->write_zeroes.slba = - cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cmnd->write_zeroes.control = 0; @@ -698,7 +716,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); - cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) @@ -1037,6 +1055,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); +/* + * In NVMe 1.0 the CNS field was just a binary controller or namespace + * flag, thus sending any new CNS opcodes has a big chance of not working. + * Qemu unfortunately had that bug after reporting a 1.1 version compliance + * (but not for any later version). + */ +static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) +{ + if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) + return ctrl->vs < NVME_VS(1, 2, 0); + return ctrl->vs < NVME_VS(1, 1, 0); +} + static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) { struct nvme_command c = { }; @@ -1066,6 +1097,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, int pos; int len; + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) + return 0; + c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; @@ -1076,8 +1110,11 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, NVME_IDENTIFY_DATA_SIZE); - if (status) + if (status) { + dev_warn(ctrl->device, + "Identify Descriptors failed (%d)\n", status); goto free_data; + } for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { struct nvme_ns_id_desc *cur = data + pos; @@ -1250,6 +1287,18 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl) queue_work(nvme_wq, &ctrl->async_event_work); } +/* + * Convert integer values from ioctl structures to user pointers, silently + * ignoring the upper bits in the compat case to match behaviour of 32-bit + * kernels. + */ +static void __user *nvme_to_user_ptr(uintptr_t ptrval) +{ + if (in_compat_syscall()) + ptrval = (compat_uptr_t)ptrval; + return (void __user *)ptrval; +} + static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) { struct nvme_user_io io; @@ -1272,8 +1321,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) } length = (io.nblocks + 1) << ns->lba_shift; - meta_len = (io.nblocks + 1) * ns->ms; - metadata = (void __user *)(uintptr_t)io.metadata; + + if ((io.control & NVME_RW_PRINFO_PRACT) && + ns->ms == sizeof(struct t10_pi_tuple)) { + /* + * Protection information is stripped/inserted by the + * controller. + */ + if (nvme_to_user_ptr(io.metadata)) + return -EINVAL; + meta_len = 0; + metadata = NULL; + } else { + meta_len = (io.nblocks + 1) * ns->ms; + metadata = nvme_to_user_ptr(io.metadata); + } if (ns->ext) { length += meta_len; @@ -1296,7 +1358,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.appmask = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, - (void __user *)(uintptr_t)io.addr, length, + nvme_to_user_ptr(io.addr), length, metadata, meta_len, lower_32_bits(io.slba), NULL, 0); } @@ -1416,9 +1478,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, effects = nvme_passthru_start(ctrl, ns, cmd.opcode); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - (void __user *)(uintptr_t)cmd.metadata, - cmd.metadata_len, 0, &result, timeout); + nvme_to_user_ptr(cmd.addr), cmd.data_len, + nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, + 0, &result, timeout); nvme_passthru_end(ctrl, effects); if (status >= 0) { @@ -1463,8 +1525,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, effects = nvme_passthru_start(ctrl, ns, cmd.opcode); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, + nvme_to_user_ptr(cmd.addr), cmd.data_len, + nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 0, &cmd.result, timeout); nvme_passthru_end(ctrl, effects); @@ -1657,12 +1719,6 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) } #endif /* CONFIG_BLK_DEV_INTEGRITY */ -static void nvme_set_chunk_size(struct nvme_ns *ns) -{ - u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); - blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); -} - static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) { struct nvme_ctrl *ctrl = ns->ctrl; @@ -1694,55 +1750,32 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } -static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) +/* + * Even though NVMe spec explicitly states that MDTS is not applicable to the + * write-zeroes, we are cautious and limit the size to the controllers + * max_hw_sectors value, which is based on the MDTS field and possibly other + * limiting factors. + */ +static void nvme_config_write_zeroes(struct request_queue *q, + struct nvme_ctrl *ctrl) { - u32 max_sectors; - unsigned short bs = 1 << ns->lba_shift; - - if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || - (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) - return; - /* - * Even though NVMe spec explicitly states that MDTS is not - * applicable to the write-zeroes:- "The restriction does not apply to - * commands that do not transfer data between the host and the - * controller (e.g., Write Uncorrectable ro Write Zeroes command).". - * In order to be more cautious use controller's max_hw_sectors value - * to configure the maximum sectors for the write-zeroes which is - * configured based on the controller's MDTS field in the - * nvme_init_identify() if available. - */ - if (ns->ctrl->max_hw_sectors == UINT_MAX) - max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9; - else - max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; - - blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors); + if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && + !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) + blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors); } static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, struct nvme_id_ns *id, struct nvme_ns_ids *ids) { - int ret = 0; - memset(ids, 0, sizeof(*ids)); if (ctrl->vs >= NVME_VS(1, 1, 0)) memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); if (ctrl->vs >= NVME_VS(1, 2, 0)) memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); - if (ctrl->vs >= NVME_VS(1, 3, 0)) { - /* Don't treat error as fatal we potentially - * already have a NGUID or EUI-64 - */ - ret = nvme_identify_ns_descs(ctrl, nsid, ids); - if (ret) - dev_warn(ctrl->device, - "Identify Descriptors failed (%d)\n", ret); - if (ret > 0) - ret = 0; - } - return ret; + if (ctrl->vs >= NVME_VS(1, 3, 0)) + return nvme_identify_ns_descs(ctrl, nsid, ids); + return 0; } static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) @@ -1762,7 +1795,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) static void nvme_update_disk_info(struct gendisk *disk, struct nvme_ns *ns, struct nvme_id_ns *id) { - sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); + sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); unsigned short bs = 1 << ns->lba_shift; u32 atomic_bs, phys_bs, io_opt; @@ -1815,7 +1848,7 @@ static void nvme_update_disk_info(struct gendisk *disk, set_capacity(disk, capacity); nvme_config_discard(disk, ns); - nvme_config_write_zeroes(disk, ns); + nvme_config_write_zeroes(disk->queue, ns->ctrl); if (id->nsattr & (1 << 0)) set_disk_ro(disk, true); @@ -1828,6 +1861,7 @@ static void nvme_update_disk_info(struct gendisk *disk, static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) { struct nvme_ns *ns = disk->private_data; + u32 iob; /* * If identify namespace failed, use default 512 byte block size so @@ -1836,7 +1870,13 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; if (ns->lba_shift == 0) ns->lba_shift = 9; - ns->noiob = le16_to_cpu(id->noiob); + + if ((ns->ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && + is_power_of_2(ns->ctrl->max_hw_sectors)) + iob = ns->ctrl->max_hw_sectors; + else + iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); + ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); /* the PI implementation requires metadata equal t10 pi tuple size */ @@ -1845,14 +1885,14 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) else ns->pi_type = 0; - if (ns->noiob) - nvme_set_chunk_size(ns); + if (iob) + blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob)); nvme_update_disk_info(disk, ns, id); #ifdef CONFIG_NVME_MULTIPATH if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); - revalidate_disk(ns->head->disk); + nvme_mpath_update_disk_size(ns->head->disk); } #endif } @@ -2197,9 +2237,6 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } - if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && - is_power_of_2(ctrl->max_hw_sectors)) - blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); blk_queue_virt_boundary(q, ctrl->page_size - 1); if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) vwc = true; @@ -2921,10 +2958,26 @@ static int nvme_dev_open(struct inode *inode, struct file *file) return -EWOULDBLOCK; } + nvme_get_ctrl(ctrl); + if (!try_module_get(ctrl->ops->module)) { + nvme_put_ctrl(ctrl); + return -EINVAL; + } + file->private_data = ctrl; return 0; } +static int nvme_dev_release(struct inode *inode, struct file *file) +{ + struct nvme_ctrl *ctrl = + container_of(inode->i_cdev, struct nvme_ctrl, cdev); + + module_put(ctrl->ops->module); + nvme_put_ctrl(ctrl); + return 0; +} + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) { struct nvme_ns *ns; @@ -2987,6 +3040,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, static const struct file_operations nvme_dev_fops = { .owner = THIS_MODULE, .open = nvme_dev_open, + .release = nvme_dev_release, .unlocked_ioctl = nvme_dev_ioctl, .compat_ioctl = nvme_dev_ioctl, }; @@ -3185,6 +3239,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + /* Can't delete non-created controllers */ + if (!ctrl->created) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -3310,7 +3368,6 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys, list_for_each_entry(h, &subsys->nsheads, entry) { if (nvme_ns_ids_valid(&new->ids) && - !list_empty(&h->list) && nvme_ns_ids_equal(&new->ids, &h->ids)) return -EINVAL; } @@ -3405,6 +3462,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, "IDs don't match for shared namespace %d\n", nsid); ret = -EINVAL; + nvme_put_ns_head(head); goto out_unlock; } } @@ -3559,10 +3617,14 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) return 0; out_put_disk: + /* prevent double queue cleanup */ + ns->disk->queue = NULL; put_disk(ns->disk); out_unlink_ns: mutex_lock(&ctrl->subsys->lock); list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) + list_del_init(&ns->head->entry); mutex_unlock(&ctrl->subsys->lock); nvme_put_ns_head(ns->head); out_free_id: @@ -3585,7 +3647,10 @@ static void nvme_ns_remove(struct nvme_ns *ns) mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) + list_del_init(&ns->head->entry); mutex_unlock(&ns->ctrl->subsys->lock); + synchronize_rcu(); /* guarantee not available in head->list */ nvme_mpath_clear_current_path(ns); synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ @@ -3733,8 +3798,7 @@ static void nvme_scan_work(struct work_struct *work) mutex_lock(&ctrl->scan_lock); nn = le32_to_cpu(id->nn); - if (ctrl->vs >= NVME_VS(1, 1, 0) && - !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { + if (!nvme_ctrl_limited_cns(ctrl)) { if (!nvme_scan_ns_list(ctrl, nn)) goto out_free_id; } @@ -3979,6 +4043,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) nvme_queue_scan(ctrl); nvme_start_queues(ctrl); } + ctrl->created = true; } EXPORT_SYMBOL_GPL(nvme_start_ctrl); @@ -3996,7 +4061,7 @@ static void nvme_free_ctrl(struct device *dev) container_of(dev, struct nvme_ctrl, ctrl_device); struct nvme_subsystem *subsys = ctrl->subsys; - if (subsys && ctrl->instance != subsys->instance) + if (!subsys || ctrl->instance != subsys->instance) ida_simple_remove(&nvme_instance_ida, ctrl->instance); kfree(ctrl->effects); @@ -4069,6 +4134,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, if (ret) goto out_release_instance; + nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); ctrl->cdev.owner = ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); @@ -4087,6 +4153,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, return 0; out_free_name: + nvme_put_ctrl(ctrl); kfree_const(ctrl->device->kobj.name); out_release_instance: ida_simple_remove(&nvme_instance_ida, ctrl->instance); @@ -4132,7 +4199,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_unfreeze); -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) { struct nvme_ns *ns; @@ -4143,6 +4210,7 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) break; } up_read(&ctrl->namespaces_rwsem); + return timeout; } EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); @@ -4190,8 +4258,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_start_queues); - -void nvme_sync_queues(struct nvme_ctrl *ctrl) +void nvme_sync_io_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; @@ -4199,7 +4266,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl) list_for_each_entry(ns, &ctrl->namespaces, list) blk_sync_queue(ns->queue); up_read(&ctrl->namespaces_rwsem); +} +EXPORT_SYMBOL_GPL(nvme_sync_io_queues); +void nvme_sync_queues(struct nvme_ctrl *ctrl) +{ + nvme_sync_io_queues(ctrl); if (ctrl->admin_q) blk_sync_queue(ctrl->admin_q); } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 74b8818ac9a1..3bb71f177dfd 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, struct nvme_request *req = nvme_req(rq); /* - * If we are in some state of setup or teardown only allow - * internally generated commands. + * currently we have a problem sending passthru commands + * on the admin_q if the controller is not LIVE because we can't + * make sure that they are going out after the admin connect, + * controller enable and/or other commands in the initialization + * sequence. until the controller will be LIVE, fail with + * BLK_STS_RESOURCE so that they will be rescheduled. */ - if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD)) + if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) return false; /* @@ -576,9 +580,8 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * which is require to set the queue live in the appropinquate states. */ switch (ctrl->state) { - case NVME_CTRL_NEW: case NVME_CTRL_CONNECTING: - if (nvme_is_fabrics(req->cmd) && + if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && req->cmd->fabrics.fctype == nvme_fabrics_type_connect) return true; break; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 59474bd0c728..0d2c22cf12a0 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || - !template->max_dif_sgl_segments || !template->dma_boundary || - !template->module) { + !template->max_dif_sgl_segments || !template->dma_boundary) { ret = -EINVAL; goto out_reghost_failed; } @@ -1609,7 +1608,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) sizeof(op->rsp_iu), DMA_FROM_DEVICE); if (opstate == FCPOP_STATE_ABORTED) - status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); + status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); else if (freq->status) { status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); dev_info(ctrl->ctrl.device, @@ -1741,7 +1740,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; goto out_on_error; } @@ -1751,7 +1750,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); @@ -1821,6 +1820,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op; int i; + cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { if (!aen_op->fcp_req.private) @@ -2016,7 +2016,6 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); - struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { @@ -2043,7 +2042,6 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); - module_put(lport->ops->module); } static void @@ -3071,15 +3069,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } - if (!try_module_get(lport->ops->module)) { - ret = -EUNATCH; - goto out_free_ctrl; - } - idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; - goto out_mod_put; + goto out_free_ctrl; } ctrl->ctrl.opts = opts; @@ -3178,10 +3171,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto fail_ctrl; } - nvme_get_ctrl(&ctrl->ctrl); - if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { - nvme_put_ctrl(&ctrl->ctrl); dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to schedule initial connect\n", ctrl->cnum); @@ -3206,6 +3196,7 @@ fail_ctrl: /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); /* Remove core ctrl ref. */ nvme_put_ctrl(&ctrl->ctrl); @@ -3232,8 +3223,6 @@ out_free_queues: out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); -out_mod_put: - module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: @@ -3330,12 +3319,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { if (lport->localport.node_name != laddr.nn || - lport->localport.port_name != laddr.pn) + lport->localport.port_name != laddr.pn || + lport->localport.port_state != FC_OBJSTATE_ONLINE) continue; list_for_each_entry(rport, &lport->endp_list, endp_list) { if (rport->remoteport.node_name != raddr.nn || - rport->remoteport.port_name != raddr.pn) + rport->remoteport.port_name != raddr.pn || + rport->remoteport.port_state != FC_OBJSTATE_ONLINE) continue; /* if fail to get reference fall through. Will error */ diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index aed6354cb271..855edc4e1244 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -3,6 +3,7 @@ * Copyright (c) 2017-2018 Christoph Hellwig. */ +#include <linux/backing-dev.h> #include <linux/moduleparam.h> #include <trace/events/block.h> #include "nvme.h" @@ -64,17 +65,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, } } -void nvme_failover_req(struct request *req) +bool nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status; unsigned long flags; - spin_lock_irqsave(&ns->head->requeue_lock, flags); - blk_steal_bios(&ns->head->requeue_list, req); - spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); - switch (status & 0x7ff) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: @@ -103,15 +99,17 @@ void nvme_failover_req(struct request *req) nvme_mpath_clear_current_path(ns); break; default: - /* - * Reset the controller for any non-ANA error as we don't know - * what caused the error. - */ - nvme_reset_ctrl(ns->ctrl); - break; + /* This was a non-ANA error so follow the normal error path. */ + return false; } + spin_lock_irqsave(&ns->head->requeue_lock, flags); + blk_steal_bios(&ns->head->requeue_list, req); + spin_unlock_irqrestore(&ns->head->requeue_lock, flags); + blk_mq_end_request(req, 0); + kblockd_schedule_work(&ns->head->requeue_work); + return true; } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) @@ -235,7 +233,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, } for (ns = nvme_next_ns(head, old); - ns != old; + ns && ns != old; ns = nvme_next_ns(head, ns)) { if (nvme_path_is_disabled(ns)) continue; @@ -248,6 +246,17 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, fallback = ns; } + /* + * The loop above skips the current path for round-robin semantics. + * Fall back to the current path if either: + * - no other optimized path found and current is optimized, + * - no other usable path found and current is usable. + */ + if (!nvme_path_is_disabled(old) && + (old->ana_state == NVME_ANA_OPTIMIZED || + (!fallback && old->ana_state == NVME_ANA_NONOPTIMIZED))) + return old; + if (!fallback) return NULL; found = fallback; @@ -268,10 +277,13 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) struct nvme_ns *ns; ns = srcu_dereference(head->current_path[node], &head->srcu); - if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns) - ns = nvme_round_robin_path(head, node, ns); - if (unlikely(!ns || !nvme_path_is_optimized(ns))) - ns = __nvme_find_path(head, node); + if (unlikely(!ns)) + return __nvme_find_path(head, node); + + if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) + return nvme_round_robin_path(head, node, ns); + if (unlikely(!nvme_path_is_optimized(ns))) + return __nvme_find_path(head, node); return ns; } @@ -318,7 +330,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, trace_block_bio_remap(bio->bi_disk->queue, bio, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); - ret = direct_make_request(bio); + ret = generic_make_request(bio); } else if (nvme_available_path(head)) { dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); @@ -413,15 +425,14 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; - lockdep_assert_held(&ns->head->lock); - if (!head->disk) return; - if (!(head->disk->flags & GENHD_FL_UP)) + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); + mutex_lock(&head->lock); if (nvme_path_is_optimized(ns)) { int node, srcu_idx; @@ -430,9 +441,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) __nvme_find_path(head, node); srcu_read_unlock(&head->srcu, srcu_idx); } + mutex_unlock(&head->lock); - synchronize_srcu(&ns->head->srcu); - kblockd_schedule_work(&ns->head->requeue_work); + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); } static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, @@ -483,14 +495,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, struct nvme_ns *ns) { - mutex_lock(&ns->head->lock); ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); if (nvme_state_is_live(ns->ana_state)) nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } static int nvme_update_ana_state(struct nvme_ctrl *ctrl, @@ -510,7 +520,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, if (!nr_nsids) return 0; - down_write(&ctrl->namespaces_rwsem); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { unsigned nsid = le32_to_cpu(desc->nsids[n]); @@ -521,7 +531,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, if (++n == nr_nsids) break; } - up_write(&ctrl->namespaces_rwsem); + up_read(&ctrl->namespaces_rwsem); return 0; } @@ -640,31 +650,49 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, } DEVICE_ATTR_RO(ana_state); -static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, +static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { - struct nvme_ns *ns = data; + struct nvme_ana_group_desc *dst = data; - if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { - nvme_update_ns_ana_state(desc, ns); - return -ENXIO; /* just break out of the loop */ - } + if (desc->grpid != dst->grpid) + return 0; - return 0; + *dst = *desc; + return -ENXIO; /* just break out of the loop */ } void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) { if (nvme_ctrl_use_ana(ns->ctrl)) { + struct nvme_ana_group_desc desc = { + .grpid = id->anagrpid, + .state = 0, + }; + mutex_lock(&ns->ctrl->ana_lock); ns->ana_grpid = le32_to_cpu(id->anagrpid); - nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); + nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); mutex_unlock(&ns->ctrl->ana_lock); + if (desc.state) { + /* found the group desc: update */ + nvme_update_ns_ana_state(&desc, ns); + } else { + /* group desc not found: trigger a re-read */ + set_bit(NVME_NS_ANA_PENDING, &ns->flags); + queue_work(nvme_wq, &ns->ctrl->ana_work); + } } else { - mutex_lock(&ns->head->lock); ns->ana_state = NVME_ANA_OPTIMIZED; nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); + } + + if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) { + struct gendisk *disk = ns->head->disk; + + if (disk) + disk->queue->backing_dev_info->capabilities |= + BDI_CAP_STABLE_WRITES; } } @@ -679,6 +707,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); blk_cleanup_queue(head->disk->queue); + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * if device_add_disk wasn't called, prevent + * disk release to put a bogus reference on the + * request queue + */ + head->disk->queue = NULL; + } put_disk(head->disk); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 2b51bb294081..1ab962f4db33 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -115,6 +115,13 @@ enum nvme_quirks { * Prevent tag overlap between queues */ NVME_QUIRK_SHARED_TAGS = (1 << 13), + + /* + * The controller doesn't handle the Identify Namespace + * Identification Descriptor list subcommand despite claiming + * NVMe 1.3 compliance. + */ + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), }; /* @@ -247,6 +254,7 @@ struct nvme_ctrl { struct nvme_command ka_cmd; struct work_struct fw_act_work; unsigned long events; + bool created; #ifdef CONFIG_NVME_MULTIPATH /* asymmetric namespace access: */ @@ -346,6 +354,8 @@ struct nvme_ns_head { spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex lock; + unsigned long flags; +#define NVME_NSHEAD_DISK_LIVE 0 struct nvme_ns __rcu *current_path[]; #endif }; @@ -375,7 +385,6 @@ struct nvme_ns { #define NVME_NS_REMOVING 0 #define NVME_NS_DEAD 1 #define NVME_NS_ANA_PENDING 2 - u16 noiob; struct nvme_fault_inject fault_inject; @@ -420,9 +429,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); } -static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) +/* + * Convert a 512B sector number to a device logical block number. + */ +static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) { - return (sector >> (ns->lba_shift - 9)); + return sector >> (ns->lba_shift - SECTOR_SHIFT); +} + +/* + * Convert a device logical block number to a 512B sector number. + */ +static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) +{ + return lba << (ns->lba_shift - SECTOR_SHIFT); } static inline void nvme_end_request(struct request *req, __le16 status, @@ -449,6 +469,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) void nvme_complete_rq(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); +void nvme_cancel_tagset(struct nvme_ctrl *ctrl); +void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state); bool nvme_wait_reset(struct nvme_ctrl *ctrl); @@ -475,9 +497,10 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl); void nvme_sync_queues(struct nvme_ctrl *ctrl); +void nvme_sync_io_queues(struct nvme_ctrl *ctrl); void nvme_unfreeze(struct nvme_ctrl *ctrl); void nvme_wait_freeze(struct nvme_ctrl *ctrl); -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 @@ -522,7 +545,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); -void nvme_failover_req(struct request *req); +bool nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); @@ -552,6 +575,16 @@ static inline void nvme_trace_bio_complete(struct request *req, req->bio, status); } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ + struct block_device *bdev = bdget_disk(disk, 0); + + if (bdev) { + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); + bdput(bdev); + } +} + extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute subsys_attr_iopolicy; @@ -571,8 +604,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } -static inline void nvme_failover_req(struct request *req) +static inline bool nvme_failover_req(struct request *req) { + return false; } static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { @@ -626,6 +660,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) { } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 737072a44a4a..6d25aa4e3622 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -128,6 +128,9 @@ struct nvme_dev { dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; }; static int io_queue_depth_set(const char *val, const struct kernel_param *kp) @@ -210,25 +213,14 @@ struct nvme_iod { struct scatterlist *sg; }; -static unsigned int max_io_queues(void) +static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) { - return num_possible_cpus() + write_queues + poll_queues; -} - -static unsigned int max_queue_count(void) -{ - /* IO queues + admin queue */ - return 1 + max_io_queues(); -} - -static inline unsigned int nvme_dbbuf_size(u32 stride) -{ - return (max_queue_count() * 8 * stride); + return dev->nr_allocated_queues * 8 * dev->db_stride; } static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) { - unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); + unsigned int mem_size = nvme_dbbuf_size(dev); if (dev->dbbuf_dbs) return 0; @@ -253,7 +245,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) static void nvme_dbbuf_dma_free(struct nvme_dev *dev) { - unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); + unsigned int mem_size = nvme_dbbuf_size(dev); if (dev->dbbuf_dbs) { dma_free_coherent(dev->dev, mem_size, @@ -279,9 +271,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev, nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; } +static void nvme_dbbuf_free(struct nvme_queue *nvmeq) +{ + if (!nvmeq->qid) + return; + + nvmeq->dbbuf_sq_db = NULL; + nvmeq->dbbuf_cq_db = NULL; + nvmeq->dbbuf_sq_ei = NULL; + nvmeq->dbbuf_cq_ei = NULL; +} + static void nvme_dbbuf_set(struct nvme_dev *dev) { struct nvme_command c; + unsigned int i; if (!dev->dbbuf_dbs) return; @@ -295,6 +299,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev) dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); /* Free memory and continue on */ nvme_dbbuf_dma_free(dev); + + for (i = 1; i <= dev->online_queues; i++) + nvme_dbbuf_free(&dev->queues[i]); } } @@ -521,50 +528,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) return true; } -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +static void nvme_free_prps(struct nvme_dev *dev, struct request *req) { - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1; - dma_addr_t dma_addr = iod->first_dma, next_dma_addr; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; int i; - if (iod->dma_len) { - dma_unmap_page(dev->dev, dma_addr, iod->dma_len, - rq_dma_dir(req)); - return; + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = nvme_pci_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); + + dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); + dma_addr = next_dma_addr; } - WARN_ON_ONCE(!iod->nents); +} + +static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) +{ + const int last_sg = SGES_PER_PAGE - 1; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; + int i; + + for (i = 0; i < iod->npages; i++) { + struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); + + dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); + dma_addr = next_dma_addr; + } + +} + +static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); if (is_pci_p2pdma_page(sg_page(iod->sg))) pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); else dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); +} +static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - if (iod->npages == 0) - dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], - dma_addr); - - for (i = 0; i < iod->npages; i++) { - void *addr = nvme_pci_iod_list(req)[i]; - - if (iod->use_sgl) { - struct nvme_sgl_desc *sg_list = addr; - - next_dma_addr = - le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); - } else { - __le64 *prp_list = addr; - - next_dma_addr = le64_to_cpu(prp_list[last_prp]); - } - - dma_pool_free(dev->prp_page_pool, addr, dma_addr); - dma_addr = next_dma_addr; + if (iod->dma_len) { + dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, + rq_dma_dir(req)); + return; } + WARN_ON_ONCE(!iod->nents); + + nvme_unmap_sg(dev, req); + if (iod->npages == 0) + dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], + iod->first_dma); + else if (iod->use_sgl) + nvme_free_sgls(dev, req); + else + nvme_free_prps(dev, req); mempool_free(iod->sg, dev->iod_mempool); } @@ -641,7 +669,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, __le64 *old_prp_list = prp_list; prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) - return BLK_STS_RESOURCE; + goto free_prps; list[iod->npages++] = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); @@ -661,14 +689,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } - done: cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); - return BLK_STS_OK; - - bad_sgl: +free_prps: + nvme_free_prps(dev, req); + return BLK_STS_RESOURCE; +bad_sgl: WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), iod->nents); @@ -740,7 +768,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); if (!sg_list) - return BLK_STS_RESOURCE; + goto free_sgls; i = 0; nvme_pci_iod_list(req)[iod->npages++] = sg_list; @@ -753,6 +781,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, } while (--entries > 0); return BLK_STS_OK; +free_sgls: + nvme_free_sgls(dev, req); + return BLK_STS_RESOURCE; } static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, @@ -807,7 +838,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, return nvme_setup_prp_simple(dev, req, &cmnd->rw, &bv); - if (iod->nvmeq->qid && + if (iod->nvmeq->qid && sgl_threshold && dev->ctrl.sgls & ((1 << 0) | (1 << 1))) return nvme_setup_sgl_simple(dev, req, &cmnd->rw, &bv); @@ -821,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); iod->nents = blk_rq_map_sg(req->q, req, iod->sg); if (!iod->nents) - goto out; + goto out_free_sg; if (is_pci_p2pdma_page(sg_page(iod->sg))) nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, @@ -830,16 +861,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); if (!nr_mapped) - goto out; + goto out_free_sg; iod->use_sgl = nvme_pci_use_sgls(dev, req); if (iod->use_sgl) ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); else ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); -out: if (ret != BLK_STS_OK) - nvme_unmap_data(dev, req); + goto out_unmap_sg; + return BLK_STS_OK; + +out_unmap_sg: + nvme_unmap_sg(dev, req); +out_free_sg: + mempool_free(iod->sg, dev->iod_mempool); return ret; } @@ -1011,13 +1047,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; struct request *req; - if (unlikely(cqe->command_id >= nvmeq->q_depth)) { - dev_warn(nvmeq->dev->ctrl.device, - "invalid id %d completed on queue %d\n", - cqe->command_id, le16_to_cpu(cqe->sq_id)); - return; - } - /* * AEN requests are special as they don't time out and can * survive any kind of queue freeze and often don't respond to @@ -1032,6 +1061,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) } req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); + if (unlikely(!req)) { + dev_warn(nvmeq->dev->ctrl.device, + "invalid id %d completed on queue %d\n", + cqe->command_id, le16_to_cpu(cqe->sq_id)); + return; + } + nvme_eh_io_error(req, le16_to_cpu(cqe->status) >> 1); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); nvme_end_request(req, cqe->status, cqe->result); @@ -1348,8 +1384,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); - nvme_dev_disable(dev, true); nvme_req(req)->flags |= NVME_REQ_CANCELLED; + nvme_dev_disable(dev, true); return BLK_EH_DONE; case NVME_CTRL_RESETTING: return BLK_EH_RESET_TIMER; @@ -1366,10 +1402,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_dev_disable(dev, false); nvme_reset_ctrl(&dev->ctrl); - nvme_req(req)->flags |= NVME_REQ_CANCELLED; return BLK_EH_DONE; } @@ -1692,7 +1728,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; dev->admin_tagset.timeout = ADMIN_TIMEOUT; - dev->admin_tagset.numa_node = dev_to_node(dev->dev); + dev->admin_tagset.numa_node = dev->ctrl.numa_node; dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; dev->admin_tagset.driver_data = dev; @@ -1768,6 +1804,8 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) if (result) return result; + dev->ctrl.numa_node = dev_to_node(dev->dev); + nvmeq = &dev->queues[0]; aqa = nvmeq->q_depth - 1; aqa |= aqa << 16; @@ -2096,7 +2134,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) { struct nvme_dev *dev = affd->priv; - unsigned int nr_read_queues; + unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; /* * If there is no interupt available for queues, ensure that @@ -2112,12 +2150,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) if (!nrirqs) { nrirqs = 1; nr_read_queues = 0; - } else if (nrirqs == 1 || !write_queues) { + } else if (nrirqs == 1 || !nr_write_queues) { nr_read_queues = 0; - } else if (write_queues >= nrirqs) { + } else if (nr_write_queues >= nrirqs) { nr_read_queues = 1; } else { - nr_read_queues = nrirqs - write_queues; + nr_read_queues = nrirqs - nr_write_queues; } dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; @@ -2141,7 +2179,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) * Poll queues don't need interrupts, but we need at least one IO * queue left over for non-polled IO. */ - this_p_queues = poll_queues; + this_p_queues = dev->nr_poll_queues; if (this_p_queues >= nr_io_queues) { this_p_queues = nr_io_queues - 1; irq_queues = 1; @@ -2171,14 +2209,25 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) __nvme_disable_io_queues(dev, nvme_admin_delete_cq); } +static unsigned int nvme_max_io_queues(struct nvme_dev *dev) +{ + return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; +} + static int nvme_setup_io_queues(struct nvme_dev *dev) { struct nvme_queue *adminq = &dev->queues[0]; struct pci_dev *pdev = to_pci_dev(dev->dev); - int result, nr_io_queues; + unsigned int nr_io_queues; unsigned long size; + int result; - nr_io_queues = max_io_queues(); + /* + * Sample the module parameters once at reset time so that we have + * stable values to work with. + */ + dev->nr_write_queues = write_queues; + dev->nr_poll_queues = poll_queues; /* * If tags are shared with admin queue (Apple bug), then @@ -2186,6 +2235,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) nr_io_queues = 1; + else + nr_io_queues = min(nvme_max_io_queues(dev), + dev->nr_allocated_queues - 1); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) @@ -2342,7 +2394,7 @@ static void nvme_dev_add(struct nvme_dev *dev) if (dev->io_queues[HCTX_TYPE_POLL]) dev->tagset.nr_maps++; dev->tagset.timeout = NVME_IO_TIMEOUT; - dev->tagset.numa_node = dev_to_node(dev->dev); + dev->tagset.numa_node = dev->ctrl.numa_node; dev->tagset.queue_depth = min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; dev->tagset.cmd_size = sizeof(struct nvme_iod); @@ -2860,8 +2912,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!dev) return -ENOMEM; - dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue), - GFP_KERNEL, node); + dev->nr_write_queues = write_queues; + dev->nr_poll_queues = poll_queues; + dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; + dev->queues = kcalloc_node(dev->nr_allocated_queues, + sizeof(struct nvme_queue), GFP_KERNEL, node); if (!dev->queues) goto free; @@ -2907,7 +2962,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); nvme_reset_ctrl(&dev->ctrl); - nvme_get_ctrl(&dev->ctrl); async_schedule(nvme_async_probe, dev); return 0; @@ -3031,9 +3085,15 @@ static int nvme_suspend(struct device *dev) * the PCI bus layer to put it into D3 in order to take the PCIe link * down, so as to allow the platform to achieve its minimum low-power * state (which may not be possible if the link is up). + * + * If a host memory buffer is enabled, shut down the device as the NVMe + * specification allows the device to access the host memory buffer in + * host DRAM from all power states, but hosts will fail access to DRAM + * during S3. */ if (pm_suspend_via_firmware() || !ctrl->npss || !pcie_aspm_enabled(pdev) || + ndev->nr_host_mem_descs || (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) return nvme_disable_prepare_reset(ndev, true); @@ -3165,14 +3225,18 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | - NVME_QUIRK_MEDIUM_PRIO_SQ }, + NVME_QUIRK_MEDIUM_PRIO_SQ | + NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ @@ -3182,7 +3246,14 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_DISABLE_WRITE_ZEROES| + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ @@ -3194,7 +3265,11 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 73e8475ddc8a..b8c0f75bfb7b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -451,7 +451,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) * Spread I/O queues completion vectors according their queue index. * Admin queues can always go on completion vector 0. */ - comp_vector = idx == 0 ? idx : idx - 1; + comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; /* Polling queues need direct cq polling context */ if (nvme_rdma_poll_queue(queue)) @@ -666,8 +666,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) return ret; ctrl->ctrl.queue_count = nr_io_queues + 1; - if (ctrl->ctrl.queue_count < 2) - return 0; + if (ctrl->ctrl.queue_count < 2) { + dev_err(ctrl->ctrl.device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); @@ -768,6 +771,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { + cancel_work_sync(&ctrl->ctrl.async_event_work); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; @@ -834,12 +838,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, error = nvme_init_identify(&ctrl->ctrl); if (error) - goto out_stop_queue; + goto out_quiesce_queue; return 0; +out_quiesce_queue: + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + blk_sync_queue(ctrl->ctrl.admin_q); out_stop_queue: nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_cancel_admin_tagset(&ctrl->ctrl); out_cleanup_queue: if (new) blk_cleanup_queue(ctrl->ctrl.admin_q); @@ -890,18 +898,36 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ret = PTR_ERR(ctrl->ctrl.connect_q); goto out_free_tag_set; } - } else { - blk_mq_update_nr_hw_queues(&ctrl->tag_set, - ctrl->ctrl.queue_count - 1); } ret = nvme_rdma_start_io_queues(ctrl); if (ret) goto out_cleanup_connect_q; + if (!new) { + nvme_start_queues(&ctrl->ctrl); + if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + goto out_wait_freeze_timed_out; + } + blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, + ctrl->ctrl.queue_count - 1); + nvme_unfreeze(&ctrl->ctrl); + } + return 0; +out_wait_freeze_timed_out: + nvme_stop_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); out_cleanup_connect_q: + nvme_cancel_tagset(&ctrl->ctrl); if (new) blk_cleanup_queue(ctrl->ctrl.connect_q); out_free_tag_set: @@ -916,6 +942,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); if (ctrl->ctrl.admin_tagset) { blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, @@ -931,7 +958,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (ctrl->ctrl.queue_count > 1) { + nvme_start_freeze(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); if (ctrl->ctrl.tagset) { blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, @@ -1034,10 +1063,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) return 0; destroy_io: - if (ctrl->ctrl.queue_count > 1) + if (ctrl->ctrl.queue_count > 1) { + nvme_stop_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); + nvme_cancel_tagset(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, new); + } destroy_admin: + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_rdma_destroy_admin_queue(ctrl, new); return ret; } @@ -1090,6 +1127,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) return; + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); queue_work(nvme_reset_wq, &ctrl->err_work); } @@ -1496,6 +1534,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } + /* sanity checking for received data length */ + if (unlikely(wc->byte_len < len)) { + dev_err(queue->ctrl->ctrl.device, + "Unexpected nvme completion length(%d)\n", wc->byte_len); + nvme_rdma_error_recovery(queue->ctrl); + return; + } + ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); /* * AEN requests are special as they don't time out and can @@ -1655,7 +1701,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, complete(&queue->cm_done); return 0; case RDMA_CM_EVENT_REJECTED: - nvme_rdma_destroy_queue_ib(queue); cm_error = nvme_rdma_conn_rejected(queue, ev); break; case RDMA_CM_EVENT_ROUTE_ERROR: @@ -1693,6 +1738,18 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, return 0; } +static void nvme_rdma_complete_timed_out(struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + + nvme_rdma_stop_queue(queue); + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } +} + static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq, bool reserved) { @@ -1703,29 +1760,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved) dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", rq->tag, nvme_rdma_queue_idx(queue)); - /* - * Restart the timer if a controller reset is already scheduled. Any - * timed out commands would be handled before entering the connecting - * state. - */ - if (ctrl->ctrl.state == NVME_CTRL_RESETTING) - return BLK_EH_RESET_TIMER; - if (ctrl->ctrl.state != NVME_CTRL_LIVE) { /* - * Teardown immediately if controller times out while starting - * or we are already started error recovery. all outstanding - * requests are completed on shutdown, so we return BLK_EH_DONE. + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. */ - flush_work(&ctrl->err_work); - nvme_rdma_teardown_io_queues(ctrl, false); - nvme_rdma_teardown_admin_queue(ctrl, false); + nvme_rdma_complete_timed_out(rq); return BLK_EH_DONE; } - dev_warn(ctrl->ctrl.device, "starting error recovery\n"); + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ nvme_rdma_error_recovery(ctrl); - return BLK_EH_RESET_TIMER; } @@ -2047,8 +2104,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr); - nvme_get_ctrl(&ctrl->ctrl); - mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); mutex_unlock(&nvme_rdma_ctrl_mutex); @@ -2058,6 +2113,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 244984420b41..718152adc625 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -164,16 +164,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) { struct request *rq; - unsigned int bytes; if (unlikely(nvme_tcp_async_req(req))) return false; /* async events don't have a request */ rq = blk_mq_rq_from_pdu(req); - bytes = blk_rq_payload_bytes(rq); - return rq_data_dir(rq) == WRITE && bytes && - bytes <= nvme_tcp_inline_data_size(req->queue); + return rq_data_dir(rq) == WRITE && req->data_len && + req->data_len <= nvme_tcp_inline_data_size(req->queue); } static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) @@ -188,7 +186,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req) static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) { - return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset, + return min_t(size_t, iov_iter_single_seg_count(&req->iter), req->pdu_len - req->pdu_sent); } @@ -422,6 +420,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) return; + dev_warn(ctrl->device, "starting error recovery\n"); queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); } @@ -513,6 +512,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, req->pdu_len = le32_to_cpu(pdu->r2t_length); req->pdu_sent = 0; + if (unlikely(!req->pdu_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, req->pdu_len); + return -EPROTO; + } + if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { dev_err(queue->ctrl->ctrl.device, "req %d r2t len %u exceeded data len %u (%zu sent)\n", @@ -786,11 +792,11 @@ static void nvme_tcp_data_ready(struct sock *sk) { struct nvme_tcp_queue *queue; - read_lock(&sk->sk_callback_lock); + read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue && queue->rd_enabled)) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); - read_unlock(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); } static void nvme_tcp_write_space(struct sock *sk) @@ -810,7 +816,7 @@ static void nvme_tcp_state_change(struct sock *sk) { struct nvme_tcp_queue *queue; - read_lock(&sk->sk_callback_lock); + read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (!queue) goto done; @@ -832,7 +838,7 @@ static void nvme_tcp_state_change(struct sock *sk) queue->state_change(sk); done: - read_unlock(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); } static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) @@ -861,12 +867,11 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) else flags |= MSG_MORE; - /* can't zcopy slab pages */ - if (unlikely(PageSlab(page))) { - ret = sock_no_sendpage(queue->sock, page, offset, len, + if (sendpage_ok(page)) { + ret = kernel_sendpage(queue->sock, page, offset, len, flags); } else { - ret = kernel_sendpage(queue->sock, page, offset, len, + ret = sock_no_sendpage(queue->sock, page, offset, len, flags); } if (ret <= 0) @@ -1321,6 +1326,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, } } + /* Set 10 seconds timeout for icresp recvmsg */ + queue->sock->sk->sk_rcvtimeo = 10 * HZ; + queue->sock->sk->sk_allocation = GFP_ATOMIC; if (!qid) n = 0; @@ -1437,7 +1445,6 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) return; - __nvme_tcp_stop_queue(queue); } @@ -1505,6 +1512,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) { if (to_tcp_ctrl(ctrl)->async_req.pdu) { + cancel_work_sync(&ctrl->async_event_work); nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); to_tcp_ctrl(ctrl)->async_req.pdu = NULL; } @@ -1642,8 +1650,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) return ret; ctrl->queue_count = nr_io_queues + 1; - if (ctrl->queue_count < 2) - return 0; + if (ctrl->queue_count < 2) { + dev_err(ctrl->device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } dev_info(ctrl->device, "creating %d I/O queues.\n", nr_io_queues); @@ -1683,18 +1694,36 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; } - } else { - blk_mq_update_nr_hw_queues(ctrl->tagset, - ctrl->queue_count - 1); } ret = nvme_tcp_start_io_queues(ctrl); if (ret) goto out_cleanup_connect_q; + if (!new) { + nvme_start_queues(ctrl); + if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + goto out_wait_freeze_timed_out; + } + blk_mq_update_nr_hw_queues(ctrl->tagset, + ctrl->queue_count - 1); + nvme_unfreeze(ctrl); + } + return 0; +out_wait_freeze_timed_out: + nvme_stop_queues(ctrl); + nvme_sync_io_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); out_cleanup_connect_q: + nvme_cancel_tagset(ctrl); if (new) blk_cleanup_queue(ctrl->connect_q); out_free_tag_set: @@ -1756,12 +1785,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) error = nvme_init_identify(ctrl); if (error) - goto out_stop_queue; + goto out_quiesce_queue; return 0; +out_quiesce_queue: + blk_mq_quiesce_queue(ctrl->admin_q); + blk_sync_queue(ctrl->admin_q); out_stop_queue: nvme_tcp_stop_queue(ctrl, 0); + nvme_cancel_admin_tagset(ctrl); out_cleanup_queue: if (new) blk_cleanup_queue(ctrl->admin_q); @@ -1780,6 +1813,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, bool remove) { blk_mq_quiesce_queue(ctrl->admin_q); + blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); if (ctrl->admin_tagset) { blk_mq_tagset_busy_iter(ctrl->admin_tagset, @@ -1796,7 +1830,10 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, { if (ctrl->queue_count <= 1) return; + blk_mq_quiesce_queue(ctrl->admin_q); + nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); + nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); if (ctrl->tagset) { blk_mq_tagset_busy_iter(ctrl->tagset, @@ -1871,10 +1908,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) return 0; destroy_io: - if (ctrl->queue_count > 1) + if (ctrl->queue_count > 1) { + nvme_stop_queues(ctrl); + nvme_sync_io_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); + nvme_cancel_tagset(ctrl); nvme_tcp_destroy_io_queues(ctrl, new); + } destroy_admin: + blk_mq_quiesce_queue(ctrl->admin_q); + blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); + nvme_cancel_admin_tagset(ctrl); nvme_tcp_destroy_admin_queue(ctrl, new); return ret; } @@ -2044,40 +2089,52 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) nvme_tcp_queue_request(&ctrl->async_req); } +static void nvme_tcp_complete_timed_out(struct request *rq) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; + + nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } +} + static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq, bool reserved) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_tcp_cmd_pdu *pdu = req->pdu; - /* - * Restart the timer if a controller reset is already scheduled. Any - * timed out commands would be handled before entering the connecting - * state. - */ - if (ctrl->ctrl.state == NVME_CTRL_RESETTING) - return BLK_EH_RESET_TIMER; - - dev_warn(ctrl->ctrl.device, + dev_warn(ctrl->device, "queue %d: timeout request %#x type %d\n", nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); - if (ctrl->ctrl.state != NVME_CTRL_LIVE) { + if (ctrl->state != NVME_CTRL_LIVE) { /* - * Teardown immediately if controller times out while starting - * or we are already started error recovery. all outstanding - * requests are completed on shutdown, so we return BLK_EH_DONE. + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. */ - flush_work(&ctrl->err_work); - nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); - nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); + nvme_tcp_complete_timed_out(rq); return BLK_EH_DONE; } - dev_warn(ctrl->ctrl.device, "starting error recovery\n"); - nvme_tcp_error_recovery(&ctrl->ctrl); - + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ + nvme_tcp_error_recovery(ctrl); return BLK_EH_RESET_TIMER; } @@ -2090,7 +2147,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, c->common.flags |= NVME_CMD_SGL_METABUF; - if (rq_data_dir(rq) == WRITE && req->data_len && + if (!blk_rq_nr_phys_segments(rq)) + nvme_tcp_set_sg_null(c); + else if (rq_data_dir(rq) == WRITE && req->data_len <= nvme_tcp_inline_data_size(queue)) nvme_tcp_set_sg_inline(queue, c, req->data_len); else @@ -2117,7 +2176,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, req->data_sent = 0; req->pdu_len = 0; req->pdu_sent = 0; - req->data_len = blk_rq_payload_bytes(rq); + req->data_len = blk_rq_nr_phys_segments(rq) ? + blk_rq_payload_bytes(rq) : 0; req->curr_bio = rq->bio; if (rq_data_dir(rq) == WRITE && @@ -2359,8 +2419,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr); - nvme_get_ctrl(&ctrl->ctrl); - mutex_lock(&nvme_tcp_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); mutex_unlock(&nvme_tcp_ctrl_mutex); @@ -2370,6 +2428,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 57a4062cbb59..ee81d94fe810 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -369,6 +369,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -378,6 +381,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); @@ -872,8 +878,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, req->error_loc = NVMET_NO_ERROR_LOC; req->error_slba = 0; - trace_nvmet_req_init(req, req->cmd); - /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { req->error_loc = offsetof(struct nvme_common_command, flags); @@ -907,6 +911,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, if (status) goto fail; + trace_nvmet_req_init(req, req->cmd); + if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; @@ -1025,9 +1031,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) { lockdep_assert_held(&ctrl->lock); - if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || - nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || - nvmet_cc_mps(ctrl->cc) != 0 || + /* + * Only I/O controllers should verify iosqes,iocqes. + * Strictly speaking, the spec says a discovery controller + * should verify iosqes,iocqes are zeroed, however that + * would break backwards compatibility, so don't enforce it. + */ + if (ctrl->subsys->type != NVME_NQN_DISC && + (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || + nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { + ctrl->csts = NVME_CSTS_CFS; + return; + } + + if (nvmet_cc_mps(ctrl->cc) != 0 || nvmet_cc_ams(ctrl->cc) != 0 || nvmet_cc_css(ctrl->cc) != 0) { ctrl->csts = NVME_CSTS_CFS; @@ -1042,7 +1059,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) * in case a host died before it enabled the controller. Hence, simply * reset the keep alive timer when the controller is enabled. */ - mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); + if (ctrl->kato) + mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ce8d819f86cc..fc35f7ae67b0 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1994,9 +1994,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock(&fod->flock); + spin_lock_irqsave(&fod->flock, flags); fod->abort = true; - spin_unlock(&fod->flock); + spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 1c50af6219f3..b50b53db3746 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -850,7 +850,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { - .module = THIS_MODULE, .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 11f5aea97d1b..82b87a4c50f6 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -619,8 +619,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); - nvme_get_ctrl(&ctrl->ctrl); - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); @@ -638,6 +636,7 @@ out_free_queues: kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 36d906a7f70d..50e2007092bc 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -75,6 +75,7 @@ enum nvmet_rdma_queue_state { struct nvmet_rdma_queue { struct rdma_cm_id *cm_id; + struct ib_qp *qp; struct nvmet_port *port; struct ib_cq *cq; atomic_t sq_wr_avail; @@ -464,7 +465,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, if (ndev->srq) ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); else - ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); + ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); if (unlikely(ret)) pr_err("post_recv cmd failed\n"); @@ -503,7 +504,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); if (rsp->n_rdma) { - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); } @@ -587,7 +588,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); rsp->n_rdma = 0; @@ -742,7 +743,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) } if (nvmet_rdma_need_data_in(rsp)) { - if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, + if (rdma_rw_ctx_post(&rsp->rw, queue->qp, queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { @@ -1025,6 +1026,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) pr_err("failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } + queue->qp = queue->cm_id->qp; atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); @@ -1053,11 +1055,10 @@ err_destroy_cq: static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { - struct ib_qp *qp = queue->cm_id->qp; - - ib_drain_qp(qp); - rdma_destroy_id(queue->cm_id); - ib_destroy_qp(qp); + ib_drain_qp(queue->qp); + if (queue->cm_id) + rdma_destroy_id(queue->cm_id); + ib_destroy_qp(queue->qp); ib_free_cq(queue->cq); } @@ -1291,9 +1292,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { - schedule_work(&queue->release_work); - /* Destroying rdma_cm id is not needed here */ - return 0; + /* + * Don't destroy the cm_id in free path, as we implicitly + * destroy the cm_id here with non-zero ret code. + */ + queue->cm_id = NULL; + goto free_queue; } mutex_lock(&nvmet_rdma_queue_mutex); @@ -1302,6 +1306,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, return 0; +free_queue: + nvmet_rdma_free_queue(queue); put_device: kref_put(&ndev->ref, nvmet_rdma_free_dev); @@ -1345,6 +1351,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) spin_lock_irqsave(&queue->state_lock, flags); switch (queue->state) { case NVMET_RDMA_Q_CONNECTING: + while (!list_empty(&queue->rsp_wait_list)) { + struct nvmet_rdma_rsp *rsp; + + rsp = list_first_entry(&queue->rsp_wait_list, + struct nvmet_rdma_rsp, + wait_list); + list_del(&rsp->wait_list); + nvmet_rdma_put_rsp(rsp); + } + fallthrough; case NVMET_RDMA_Q_LIVE: queue->state = NVMET_RDMA_Q_DISCONNECTING; disconnect = true; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 2fe34fd4c3f3..9bfd92b6677b 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -150,6 +150,11 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) { + if (unlikely(!queue->nr_cmds)) { + /* We didn't allocate cmds yet, send 0xffff */ + return USHRT_MAX; + } + return cmd - queue->cmds; } @@ -287,7 +292,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) length = cmd->pdu_len; cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); offset = cmd->rbytes_done; - cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); + cmd->sg_idx = offset / PAGE_SIZE; sg_offset = offset % PAGE_SIZE; sg = &cmd->req.sg[cmd->sg_idx]; @@ -300,6 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) length -= iov_len; sg = sg_next(sg); iov++; + sg_offset = 0; } iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, @@ -794,7 +800,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) icresp->hdr.pdo = 0; icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); - icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */ + icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ icresp->cpda = 0; if (queue->hdr_digest) icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; @@ -847,7 +853,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; - cmd = &queue->cmds[data->ttag]; + if (likely(queue->nr_cmds)) + cmd = &queue->cmds[data->ttag]; + else + cmd = &queue->connect; if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", @@ -1393,7 +1402,7 @@ static void nvmet_tcp_state_change(struct sock *sk) { struct nvmet_tcp_queue *queue; - write_lock_bh(&sk->sk_callback_lock); + read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (!queue) goto done; @@ -1411,7 +1420,7 @@ static void nvmet_tcp_state_change(struct sock *sk) queue->idx, sk->sk_state); } done: - write_unlock_bh(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); } static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h index e645caa882dd..3f61b6657175 100644 --- a/drivers/nvme/target/trace.h +++ b/drivers/nvme/target/trace.h @@ -46,19 +46,12 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req) return req->sq->ctrl; } -static inline void __assign_disk_name(char *name, struct nvmet_req *req, - bool init) +static inline void __assign_req_name(char *name, struct nvmet_req *req) { - struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req); - struct nvmet_ns *ns; - - if ((init && req->sq->qid) || (!init && req->cq->qid)) { - ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid); - strncpy(name, ns->device_path, DISK_NAME_LEN); - return; - } - - memset(name, 0, DISK_NAME_LEN); + if (req->ns) + strncpy(name, req->ns->device_path, DISK_NAME_LEN); + else + memset(name, 0, DISK_NAME_LEN); } #endif @@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init, TP_fast_assign( __entry->cmd = cmd; __entry->ctrl = nvmet_req_to_ctrl(req); - __assign_disk_name(__entry->disk, req, true); + __assign_req_name(__entry->disk, req); __entry->qid = req->sq->qid; __entry->cid = cmd->common.command_id; __entry->opcode = cmd->common.opcode; @@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete, __entry->cid = req->cqe->command_id; __entry->result = le64_to_cpu(req->cqe->result.u64); __entry->status = le16_to_cpu(req->cqe->status) >> 1; - __assign_disk_name(__entry->disk, req, false); + __assign_req_name(__entry->disk, req); ), TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x", __print_ctrl_name(__entry->ctrl), diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 960542dea5ad..3ba68baeed1d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -130,16 +130,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell) blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); } -static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, - const struct nvmem_cell_info *info, - struct nvmem_cell *cell) +static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell *cell) { cell->nvmem = nvmem; cell->offset = info->offset; cell->bytes = info->bytes; - cell->name = kstrdup_const(info->name, GFP_KERNEL); - if (!cell->name) - return -ENOMEM; + cell->name = info->name; cell->bit_offset = info->bit_offset; cell->nbits = info->nbits; @@ -151,13 +149,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, if (!IS_ALIGNED(cell->offset, nvmem->stride)) { dev_err(&nvmem->dev, "cell %s unaligned to nvmem stride %d\n", - cell->name, nvmem->stride); + cell->name ?: "<unknown>", nvmem->stride); return -EINVAL; } return 0; } +static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell *cell) +{ + int err; + + err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); + if (err) + return err; + + cell->name = kstrdup_const(info->name, GFP_KERNEL); + if (!cell->name) + return -ENOMEM; + + return 0; +} + /** * nvmem_add_cells() - Add cell information to an nvmem device * @@ -299,7 +314,9 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) for_each_child_of_node(parent, child) { addr = of_get_property(child, "reg", &len); - if (!addr || (len < 2 * sizeof(u32))) { + if (!addr) + continue; + if (len < 2 * sizeof(u32)) { dev_err(dev, "nvmem: invalid reg on %pOF\n", child); return -EINVAL; } @@ -330,6 +347,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) cell->name, nvmem->stride); /* Cells already added will be freed later. */ kfree_const(cell->name); + of_node_put(cell->np); kfree(cell); return -EINVAL; } @@ -1174,7 +1192,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, if (!nvmem) return -EINVAL; - rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); if (rc) return rc; @@ -1204,7 +1222,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, if (!nvmem) return -EINVAL; - rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); if (rc) return rc; diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index d057f1bfb2e9..8a91717600be 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -27,25 +27,11 @@ static int qfprom_reg_read(void *context, return 0; } -static int qfprom_reg_write(void *context, - unsigned int reg, void *_val, size_t bytes) -{ - struct qfprom_priv *priv = context; - u8 *val = _val; - int i = 0, words = bytes; - - while (words--) - writeb(*val++, priv->base + reg + i++); - - return 0; -} - static struct nvmem_config econfig = { .name = "qfprom", .stride = 1, .word_size = 1, .reg_read = qfprom_reg_read, - .reg_write = qfprom_reg_write, }; static int qfprom_probe(struct platform_device *pdev) diff --git a/drivers/of/address.c b/drivers/of/address.c index 8f74c4626e0e..5abb056b2b51 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -1003,11 +1003,13 @@ EXPORT_SYMBOL_GPL(of_dma_get_range); */ bool of_dma_is_coherent(struct device_node *np) { - struct device_node *node = of_node_get(np); + struct device_node *node; if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT)) return true; + node = of_node_get(np); + while (node) { if (of_property_read_bool(node, "dma-coherent")) { of_node_put(node); diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c index c72eef988041..a32e60b024b8 100644 --- a/drivers/of/kobj.c +++ b/drivers/of/kobj.c @@ -134,8 +134,6 @@ int __of_attach_node_sysfs(struct device_node *np) if (!name) return -ENOMEM; - of_node_get(np); - rc = kobject_add(&np->kobj, parent, "%s", name); kfree(name); if (rc) @@ -144,6 +142,7 @@ int __of_attach_node_sysfs(struct device_node *np) for_each_property_of_node(np, pp) __of_add_property_sysfs(np, pp); + of_node_get(np); return 0; } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index c34a6df712ad..26ddb4cc675a 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -265,10 +265,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) child, addr); if (of_mdiobus_child_is_phy(child)) { + /* -ENODEV is the return code that PHYLIB has + * standardized on to indicate that bus + * scanning should continue. + */ rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc && rc != -ENODEV) + if (!rc) + break; + if (rc != -ENODEV) goto unregister; - break; } } } diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 6bd610ee2cd7..3fb5d8caffd5 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -200,6 +200,16 @@ static int __init __rmem_cmp(const void *a, const void *b) if (ra->base > rb->base) return 1; + /* + * Put the dynamic allocations (address == 0, size == 0) before static + * allocations at address 0x0 so that overlap detection works + * correctly. + */ + if (ra->size < rb->size) + return -1; + if (ra->size > rb->size) + return 1; + return 0; } @@ -217,8 +227,7 @@ static void __init __rmem_check_for_overlap(void) this = &reserved_mem[i]; next = &reserved_mem[i + 1]; - if (!(this->base && next->base)) - continue; + if (this->base + this->size > next->base) { phys_addr_t this_end, next_end; diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 9617b7df7c4d..1688f576ee8a 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop( of_property_set_flag(new_prop, OF_DYNAMIC); + kfree(target_path); + return new_prop; err_free_new_prop: diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index ca7823eef2b4..5707c309a754 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -776,6 +776,10 @@ static void __init of_unittest_changeset(void) unittest(!of_changeset_revert(&chgset), "revert failed\n"); of_changeset_destroy(&chgset); + + of_node_put(n1); + of_node_put(n2); + of_node_put(n21); #endif } @@ -1061,10 +1065,13 @@ static void __init of_unittest_platform_populate(void) of_platform_populate(np, match, NULL, &test_bus->dev); for_each_child_of_node(np, child) { - for_each_child_of_node(child, grandchild) - unittest(of_find_device_by_node(grandchild), + for_each_child_of_node(child, grandchild) { + pdev = of_find_device_by_node(grandchild); + unittest(pdev, "Could not create device for node '%pOFn'\n", grandchild); + of_dev_put(pdev); + } } of_platform_depopulate(&test_bus->dev); @@ -2474,8 +2481,11 @@ static __init void of_unittest_overlay_high_level(void) goto err_unlock; } if (__of_add_property(of_symbols, new_prop)) { + kfree(new_prop->name); + kfree(new_prop->value); + kfree(new_prop); /* "name" auto-generated by unflatten */ - if (!strcmp(new_prop->name, "name")) + if (!strcmp(prop->name, "name")) continue; unittest(0, "duplicate property '%s' in overlay_base node __symbols__", prop->name); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 9ff0538ee83a..088c93dc0085 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -843,10 +843,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) /* Return early if nothing to do */ if (old_freq == freq) { - dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", - __func__, freq); - ret = 0; - goto put_opp_table; + if (!opp_table->required_opp_tables && !opp_table->regulators) { + dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", + __func__, freq); + ret = 0; + goto put_opp_table; + } } temp_freq = old_freq; @@ -988,7 +990,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); INIT_LIST_HEAD(&opp_table->opp_list); kref_init(&opp_table->kref); - kref_init(&opp_table->list_kref); /* Secure the device table modification */ list_add(&opp_table->node, &opp_tables); @@ -1045,6 +1046,10 @@ static void _opp_table_kref_release(struct kref *kref) struct opp_table *opp_table = container_of(kref, struct opp_table, kref); struct opp_device *opp_dev, *temp; + /* Drop the lock as soon as we can */ + list_del(&opp_table->node); + mutex_unlock(&opp_table_lock); + _of_clear_opp_table(opp_table); /* Release clk */ @@ -1066,37 +1071,7 @@ static void _opp_table_kref_release(struct kref *kref) mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->lock); - list_del(&opp_table->node); kfree(opp_table); - - mutex_unlock(&opp_table_lock); -} - -void _opp_remove_all_static(struct opp_table *opp_table) -{ - struct dev_pm_opp *opp, *tmp; - - list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { - if (!opp->dynamic) - dev_pm_opp_put(opp); - } - - opp_table->parsed_static_opps = false; -} - -static void _opp_table_list_kref_release(struct kref *kref) -{ - struct opp_table *opp_table = container_of(kref, struct opp_table, - list_kref); - - _opp_remove_all_static(opp_table); - mutex_unlock(&opp_table_lock); -} - -void _put_opp_list_kref(struct opp_table *opp_table) -{ - kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release, - &opp_table_lock); } void dev_pm_opp_put_opp_table(struct opp_table *opp_table) @@ -1202,6 +1177,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +void _opp_remove_all_static(struct opp_table *opp_table) +{ + struct dev_pm_opp *opp, *tmp; + + mutex_lock(&opp_table->lock); + + if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps) + goto unlock; + + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { + if (!opp->dynamic) + dev_pm_opp_put_unlocked(opp); + } + +unlock: + mutex_unlock(&opp_table->lock); +} + /** * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs * @dev: device for which we do this operation @@ -1804,6 +1797,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table) { int index; + if (!opp_table->genpd_virt_devs) + return; + for (index = 0; index < opp_table->required_opp_count; index++) { if (!opp_table->genpd_virt_devs[index]) continue; @@ -1850,6 +1846,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, if (!opp_table) return ERR_PTR(-ENOMEM); + if (opp_table->genpd_virt_devs) + return opp_table; + /* * If the genpd's OPP table isn't already initialized, parsing of the * required-opps fail for dev. We should retry this after genpd's OPP @@ -2207,7 +2206,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev) return; } - _put_opp_list_kref(opp_table); + _opp_remove_all_static(opp_table); /* Drop reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1e5fcdee043c..249738e1e0b7 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp; /* OPP table is already initialized for the device */ + mutex_lock(&opp_table->lock); if (opp_table->parsed_static_opps) { - kref_get(&opp_table->list_kref); + opp_table->parsed_static_opps++; + mutex_unlock(&opp_table->lock); return 0; } - /* - * Re-initialize list_kref every time we add static OPPs to the OPP - * table as the reference count may be 0 after the last tie static OPPs - * were removed. - */ - kref_init(&opp_table->list_kref); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -678,7 +676,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); of_node_put(np); - goto put_list_kref; + goto remove_static_opp; } else if (opp) { count++; } @@ -687,7 +685,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) /* There should be one of more OPP defined */ if (WARN_ON(!count)) { ret = -ENOENT; - goto put_list_kref; + goto remove_static_opp; } list_for_each_entry(opp, &opp_table->opp_list, node) @@ -698,18 +696,16 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); ret = -ENOENT; - goto put_list_kref; + goto remove_static_opp; } if (pstate_count) opp_table->genpd_performance_state = true; - opp_table->parsed_static_opps = true; - return 0; -put_list_kref: - _put_opp_list_kref(opp_table); +remove_static_opp: + _opp_remove_all_static(opp_table); return ret; } @@ -737,6 +733,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) return -EINVAL; } + mutex_lock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); + val = prop->value; while (nr) { unsigned long freq = be32_to_cpup(val++) * 1000; @@ -746,7 +746,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", __func__, freq, ret); - _put_opp_list_kref(opp_table); + _opp_remove_all_static(opp_table); return ret; } nr -= 2; diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 01a500e2c40a..d14e27102730 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -127,11 +127,10 @@ enum opp_table_access { * @dev_list: list of devices that share these OPPs * @opp_list: table of opps * @kref: for reference count of the table. - * @list_kref: for reference count of the OPP list. * @lock: mutex protecting the opp_list and dev_list. * @np: struct device_node pointer for opp's DT node. * @clock_latency_ns_max: Max clock latency in nanoseconds. - * @parsed_static_opps: True if OPPs are initialized from DT. + * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. * @suspend_opp: Pointer to OPP to be used during device suspend. * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. @@ -167,7 +166,6 @@ struct opp_table { struct list_head dev_list; struct list_head opp_list; struct kref kref; - struct kref list_kref; struct mutex lock; struct device_node *np; @@ -176,7 +174,7 @@ struct opp_table { /* For backward compatibility with v1 bindings */ unsigned int voltage_tolerance_v1; - bool parsed_static_opps; + unsigned int parsed_static_opps; enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index de8e4e347249..e410033b6df0 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ** (one that doesn't overlap memory or LMMIO space) in the ** IBASE and IMASK registers. */ - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 2fccb5762c76..9793f17fa184 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops); static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); static noinline void pci_wait_cfg(struct pci_dev *dev) + __must_hold(&pci_lock) { - DECLARE_WAITQUEUE(wait, current); - - __add_wait_queue(&pci_cfg_wait, &wait); do { - set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); - schedule(); + wait_event(pci_cfg_wait, !dev->block_cfg_access); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); - __remove_wait_queue(&pci_cfg_wait, &wait); } /* Returns 0 on success, negative values indicate error. */ @@ -355,7 +351,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; } -static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) +bool pcie_cap_has_rtctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 8e40b3e6da77..3cef835b375f 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev) dev->match_driver = true; retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) { + if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - return; - } pci_dev_assign_added(dev, true); } diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index b927a92e3463..8c9f88704874 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -301,11 +301,11 @@ static void meson_pcie_init_dw(struct meson_pcie *mp) meson_cfg_writel(mp, val, PCIE_CFG0); val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); - val &= ~LINK_CAPABLE_MASK; + val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE); meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); - val |= LINK_CAPABLE_X1 | FAST_LINK_MODE; + val |= LINK_CAPABLE_X1; meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 8615f1548882..fbcb211cceb4 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -263,6 +263,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) return -ENOMEM; } + irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); + pp->msi_domain = pci_msi_create_irq_domain(fwnode, &dw_pcie_msi_domain_info, pp->irq_domain); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 7e581748ee9f..a8eab4e67af1 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -45,7 +45,13 @@ #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 #define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + #define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 @@ -76,6 +82,18 @@ #define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 #define SLV_ADDR_SPACE_SZ 0x10000000 @@ -85,11 +103,14 @@ struct qcom_pcie_resources_2_1_0 { struct clk *iface_clk; struct clk *core_clk; struct clk *phy_clk; + struct clk *aux_clk; + struct clk *ref_clk; struct reset_control *pci_reset; struct reset_control *axi_reset; struct reset_control *ahb_reset; struct reset_control *por_reset; struct reset_control *phy_reset; + struct reset_control *ext_reset; struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; }; @@ -235,6 +256,14 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (IS_ERR(res->phy_clk)) return PTR_ERR(res->phy_clk); + res->aux_clk = devm_clk_get_optional(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->ref_clk = devm_clk_get_optional(dev, "ref"); + if (IS_ERR(res->ref_clk)) + return PTR_ERR(res->ref_clk); + res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); if (IS_ERR(res->pci_reset)) return PTR_ERR(res->pci_reset); @@ -251,6 +280,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (IS_ERR(res->por_reset)) return PTR_ERR(res->por_reset); + res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); return PTR_ERR_OR_ZERO(res->phy_reset); } @@ -259,14 +292,20 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + clk_disable_unprepare(res->phy_clk); reset_control_assert(res->pci_reset); reset_control_assert(res->axi_reset); reset_control_assert(res->ahb_reset); reset_control_assert(res->por_reset); - reset_control_assert(res->pci_reset); + reset_control_assert(res->ext_reset); + reset_control_assert(res->phy_reset); clk_disable_unprepare(res->iface_clk); clk_disable_unprepare(res->core_clk); - clk_disable_unprepare(res->phy_clk); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->ref_clk); + + writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } @@ -275,9 +314,20 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + struct device_node *node = dev->of_node; u32 val; int ret; + /* reset the PCIe interface as uboot can leave it undefined state */ + reset_control_assert(res->pci_reset); + reset_control_assert(res->axi_reset); + reset_control_assert(res->ahb_reset); + reset_control_assert(res->por_reset); + reset_control_assert(res->ext_reset); + reset_control_assert(res->phy_reset); + + writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); + ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); if (ret < 0) { dev_err(dev, "cannot enable regulators\n"); @@ -296,32 +346,66 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) goto err_assert_ahb; } - ret = clk_prepare_enable(res->phy_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_phy; - } - ret = clk_prepare_enable(res->core_clk); if (ret) { dev_err(dev, "cannot prepare/enable core clock\n"); goto err_clk_core; } + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_clk_aux; + } + + ret = clk_prepare_enable(res->ref_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable ref clock\n"); + goto err_clk_ref; + } + ret = reset_control_deassert(res->ahb_reset); if (ret) { dev_err(dev, "cannot deassert ahb reset\n"); goto err_deassert_ahb; } + ret = reset_control_deassert(res->ext_reset); + if (ret) { + dev_err(dev, "cannot deassert ext reset\n"); + goto err_deassert_ahb; + } + /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); val &= ~BIT(0); writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + /* enable external reference clock */ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); + /* USE_PAD is required only for ipq806x */ + if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); ret = reset_control_deassert(res->phy_reset); @@ -348,6 +432,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return ret; } + ret = clk_prepare_enable(res->phy_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_deassert_ahb; + } + /* wait for clock acquisition */ usleep_range(1000, 1500); @@ -361,10 +451,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return 0; err_deassert_ahb: + clk_disable_unprepare(res->ref_clk); +err_clk_ref: + clk_disable_unprepare(res->aux_clk); +err_clk_aux: clk_disable_unprepare(res->core_clk); err_clk_core: - clk_disable_unprepare(res->phy_clk); -err_clk_phy: clk_disable_unprepare(res->iface_clk); err_assert_ahb: regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); @@ -1289,7 +1381,13 @@ static void qcom_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); static struct platform_driver qcom_pcie_driver = { .probe = qcom_pcie_probe, diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index f89f5acee72d..c06b05ab9f78 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1395,7 +1395,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) ret = pinctrl_pm_select_default_state(dev); if (ret < 0) { dev_err(dev, "Failed to configure sideband pins: %d\n", ret); - goto fail_pinctrl; + goto fail_pm_get_sync; } tegra_pcie_init_controller(pcie); @@ -1422,9 +1422,8 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) fail_host_init: tegra_pcie_deinit_controller(pcie); -fail_pinctrl: - pm_runtime_put_sync(dev); fail_pm_get_sync: + pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; } diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 97245e076548..d0e60441dc8f 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -344,10 +344,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_pcie_wait_for_link(pcie); - reg = PCIE_CORE_LINK_L0S_ENTRY | - (1 << PCIE_CORE_LINK_WIDTH_SHIFT); - advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); - reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | PCIE_CORE_CMD_IO_ACCESS_EN | @@ -507,7 +503,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { * Initialize the configuration space of the PCI-to-PCI bridge * associated with the given PCIe interface. */ -static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) +static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) { struct pci_bridge_emul *bridge = &pcie->bridge; @@ -531,8 +527,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) bridge->data = pcie; bridge->ops = &advk_pci_bridge_emul_ops; - pci_bridge_emul_init(bridge, 0); - + return pci_bridge_emul_init(bridge, 0); } static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, @@ -1031,7 +1026,11 @@ static int advk_pcie_probe(struct platform_device *pdev) advk_pcie_setup_hw(pcie); - advk_sw_pci_bridge_init(pcie); + ret = advk_sw_pci_bridge_init(pcie); + if (ret) { + dev_err(dev, "Failed to register emulated root PCI bridge\n"); + return ret; + } ret = advk_pcie_init_irq_domain(pcie); if (ret) { diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index ac93f5a0398e..cfa3c83d6cc7 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -181,13 +181,6 @@ #define AFI_PEXBIAS_CTRL_0 0x168 -#define RP_PRIV_XP_DL 0x00000494 -#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1) - -#define RP_RX_HDR_LIMIT 0x00000e00 -#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8) -#define RP_RX_HDR_LIMIT_PW (0x0e << 8) - #define RP_ECTL_2_R1 0x00000e84 #define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff @@ -323,7 +316,6 @@ struct tegra_pcie_soc { bool program_uphy; bool update_clamp_threshold; bool program_deskew_time; - bool raw_violation_fixup; bool update_fc_timer; bool has_cache_bars; struct { @@ -669,23 +661,6 @@ static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port) writel(value, port->base + RP_VEND_CTL0); } - /* Fixup for read after write violation. */ - if (soc->raw_violation_fixup) { - value = readl(port->base + RP_RX_HDR_LIMIT); - value &= ~RP_RX_HDR_LIMIT_PW_MASK; - value |= RP_RX_HDR_LIMIT_PW; - writel(value, port->base + RP_RX_HDR_LIMIT); - - value = readl(port->base + RP_PRIV_XP_DL); - value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD; - writel(value, port->base + RP_PRIV_XP_DL); - - value = readl(port->base + RP_VEND_XP); - value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; - value |= soc->update_fc_threshold; - writel(value, port->base + RP_VEND_XP); - } - if (soc->update_fc_timer) { value = readl(port->base + RP_VEND_XP); value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; @@ -2511,7 +2486,6 @@ static const struct tegra_pcie_soc tegra20_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = true, .ectl.enable = false, @@ -2541,7 +2515,6 @@ static const struct tegra_pcie_soc tegra30_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2554,8 +2527,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x44ac44ac, - /* FC threshold is bit[25:18] */ - .update_fc_threshold = 0x03fc0000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2565,7 +2536,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = false, - .raw_violation_fixup = true, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2589,7 +2559,6 @@ static const struct tegra_pcie_soc tegra210_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = true, - .raw_violation_fixup = false, .update_fc_timer = true, .has_cache_bars = false, .ectl = { @@ -2631,7 +2600,6 @@ static const struct tegra_pcie_soc tegra186_pcie = { .program_uphy = false, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2800,7 +2768,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(dev, "fail to enable pcie controller: %d\n", err); - goto teardown_msi; + goto pm_runtime_put; } err = tegra_pcie_request_resources(pcie); @@ -2840,7 +2808,6 @@ free_resources: pm_runtime_put: pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); -teardown_msi: tegra_pcie_msi_teardown(pcie); put_resources: tegra_pcie_put_resources(pcie); diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index d219404bad92..9a86bb7448ac 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -743,7 +743,7 @@ static int v3_pci_probe(struct platform_device *pdev) int ret; LIST_HEAD(res); - host = pci_alloc_host_bridge(sizeof(*v3)); + host = devm_pci_alloc_host_bridge(dev, sizeof(*v3)); if (!host) return -ENOMEM; diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index f4c02da84e59..0bfa5065b440 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -384,13 +384,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu) if (!msi_group->gic_irq) continue; - irq_set_chained_handler(msi_group->gic_irq, - xgene_msi_isr); - err = irq_set_handler_data(msi_group->gic_irq, msi_group); - if (err) { - pr_err("failed to register GIC IRQ handler\n"); - return -EINVAL; - } + irq_set_chained_handler_and_data(msi_group->gic_irq, + xgene_msi_isr, msi_group); + /* * Statically allocate MSI GIC IRQs to each CPU core. * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c index 97e251090b4f..0dfc778f40a7 100644 --- a/drivers/pci/controller/pcie-cadence-host.c +++ b/drivers/pci/controller/pcie-cadence-host.c @@ -102,6 +102,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -121,8 +122,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 0a3f61be5625..a1298f6784ac 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data, struct iproc_msi *msi = irq_data_get_irq_chip_data(data); int target_cpu = cpumask_first(mask); int curr_cpu; + int ret; curr_cpu = hwirq_to_cpu(msi, data->hwirq); if (curr_cpu == target_cpu) - return IRQ_SET_MASK_OK_DONE; + ret = IRQ_SET_MASK_OK_DONE; + else { + /* steer MSI to the target CPU */ + data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; + ret = IRQ_SET_MASK_OK; + } - /* steer MSI to the target CPU */ - data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; + irq_data_update_effective_affinity(data, cpumask_of(target_cpu)); - return IRQ_SET_MASK_OK; + return ret; } static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 933a4346ae5d..c6b1c18165e5 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -307,7 +307,7 @@ enum iproc_pcie_reg { }; /* iProc PCIe PAXB BCMA registers */ -static const u16 iproc_pcie_reg_paxb_bcma[] = { +static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, @@ -318,7 +318,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = { }; /* iProc PCIe PAXB registers */ -static const u16 iproc_pcie_reg_paxb[] = { +static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, @@ -334,7 +334,7 @@ static const u16 iproc_pcie_reg_paxb[] = { }; /* iProc PCIe PAXB v2 registers */ -static const u16 iproc_pcie_reg_paxb_v2[] = { +static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, @@ -363,7 +363,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = { }; /* iProc PCIe PAXC v1 registers */ -static const u16 iproc_pcie_reg_paxc[] = { +static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, @@ -372,7 +372,7 @@ static const u16 iproc_pcie_reg_paxc[] = { }; /* iProc PCIe PAXC v2 registers */ -static const u16 iproc_pcie_reg_paxc_v2[] = { +static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_MSI_GIC_MODE] = 0x050, [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 626a7c352dfd..728a59655825 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1063,14 +1063,14 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) err = of_pci_get_devfn(child); if (err < 0) { dev_err(dev, "failed to parse devfn: %d\n", err); - return err; + goto error_put_node; } slot = PCI_SLOT(err); err = mtk_pcie_parse_port(pcie, child, slot); if (err) - return err; + goto error_put_node; } err = mtk_pcie_subsys_powerup(pcie); @@ -1086,6 +1086,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) mtk_pcie_subsys_powerdown(pcie); return 0; +error_put_node: + of_node_put(child); + return err; } static int mtk_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index 1ad0b56f11b4..04114352d0e7 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -335,11 +335,12 @@ static struct pci_ops rcar_pcie_ops = { }; static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, - struct resource *res) + struct resource_entry *window) { /* Setup PCIe address space mappings for each resource */ resource_size_t size; resource_size_t res_start; + struct resource *res = window->res; u32 mask; rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); @@ -353,9 +354,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); if (res->flags & IORESOURCE_IO) - res_start = pci_pio_to_address(res->start); + res_start = pci_pio_to_address(res->start) - window->offset; else - res_start = res->start; + res_start = res->start - window->offset; rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, @@ -384,7 +385,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) switch (resource_type(res)) { case IORESOURCE_IO: case IORESOURCE_MEM: - rcar_pcie_setup_window(i, pci, res); + rcar_pcie_setup_window(i, pci, win); i++; break; case IORESOURCE_BUS: diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index a35d3f3996d7..9966dcf1d112 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -593,9 +593,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!membar2) return -ENOMEM; offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - readq(membar2 + MB2_SHADOW_OFFSET); + (readq(membar2 + MB2_SHADOW_OFFSET) & + PCI_BASE_ADDRESS_MEM_MASK); offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - readq(membar2 + MB2_SHADOW_OFFSET + 8); + (readq(membar2 + MB2_SHADOW_OFFSET + 8) & + PCI_BASE_ADDRESS_MEM_MASK); pci_iounmap(vmd->dev, membar2); } } @@ -678,9 +680,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); - if (!vmd->irq_domain) + if (!vmd->irq_domain) { + irq_domain_free_fwnode(fn); return -ENODEV; + } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); @@ -691,6 +694,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); return -ENODEV; } @@ -805,6 +809,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd) static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); + struct fwnode_handle *fn = vmd->irq_domain->fwnode; sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); @@ -813,6 +818,7 @@ static void vmd_remove(struct pci_dev *dev) vmd_teardown_dma_ops(vmd); vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); } #ifdef CONFIG_PM_SLEEP @@ -854,6 +860,8 @@ static const struct pci_device_id vmd_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), + .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 1a81af0ba961..fcad9af6df04 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -164,4 +164,13 @@ struct pci_ecam_ops pci_32b_ops = { .write = pci_generic_config_write32, } }; + +struct pci_ecam_ops pci_32b_read_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read32, + .write = pci_generic_config_write, + } +}; #endif diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c index 2bf8bd1f0563..0471643cf536 100644 --- a/drivers/pci/endpoint/pci-epc-mem.c +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size, mem->page_size = page_size; mem->pages = pages; mem->size = size; + mutex_init(&mem->lock); epc->mem = mem; @@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size) { int pageno; - void __iomem *virt_addr; + void __iomem *virt_addr = NULL; struct pci_epc_mem *mem = epc->mem; unsigned int page_shift = ilog2(mem->page_size); int order; @@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, size = ALIGN(size, mem->page_size); order = pci_epc_mem_get_order(mem, size); + mutex_lock(&mem->lock); pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); if (pageno < 0) - return NULL; + goto ret; *phys_addr = mem->phys_base + (pageno << page_shift); virt_addr = ioremap(*phys_addr, size); if (!virt_addr) bitmap_release_region(mem->bitmap, pageno, order); +ret: + mutex_unlock(&mem->lock); return virt_addr; } EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); @@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, pageno = (phys_addr - mem->phys_base) >> page_shift; size = ALIGN(size, mem->page_size); order = pci_epc_mem_get_order(mem, size); + mutex_lock(&mem->lock); bitmap_release_region(mem->bitmap, pageno, order); + mutex_unlock(&mem->lock); } EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index b3869951c0eb..6e60b4b1bf53 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev) struct acpiphp_context *context; acpi_lock_hp_context(); + context = acpiphp_get_context(adev); - if (!context || context->func.parent->is_going_away) { - acpi_unlock_hp_context(); - return NULL; + if (!context) + goto unlock; + + if (context->func.parent->is_going_away) { + acpiphp_put_context(context); + context = NULL; + goto unlock; } + get_bridge(context->func.parent); acpiphp_put_context(context); + +unlock: acpi_unlock_hp_context(); return context; } diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 882ce82c4699..aa61d4c219d7 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -174,10 +174,10 @@ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn); void pciehp_get_latch_status(struct controller *ctrl, u8 *status); int pciehp_query_power_fault(struct controller *ctrl); -bool pciehp_card_present(struct controller *ctrl); -bool pciehp_card_present_or_link_active(struct controller *ctrl); +int pciehp_card_present(struct controller *ctrl); +int pciehp_card_present_or_link_active(struct controller *ctrl); int pciehp_check_link_status(struct controller *ctrl); -bool pciehp_check_link_active(struct controller *ctrl); +int pciehp_check_link_active(struct controller *ctrl); void pciehp_release_ctrl(struct controller *ctrl); int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 56daad828c9e..312cc45c44c7 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -139,10 +139,15 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; + int ret; pci_config_pm_runtime_get(pdev); - *value = pciehp_card_present_or_link_active(ctrl); + ret = pciehp_card_present_or_link_active(ctrl); pci_config_pm_runtime_put(pdev); + if (ret < 0) + return ret; + + *value = ret; return 0; } @@ -158,13 +163,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) */ static void pciehp_check_presence(struct controller *ctrl) { - bool occupied; + int occupied; down_read(&ctrl->reset_lock); mutex_lock(&ctrl->state_lock); occupied = pciehp_card_present_or_link_active(ctrl); - if ((occupied && (ctrl->state == OFF_STATE || + if ((occupied > 0 && (ctrl->state == OFF_STATE || ctrl->state == BLINKINGON_STATE)) || (!occupied && (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE))) diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index dd8e4a5fb282..6503d15effbb 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -226,7 +226,7 @@ void pciehp_handle_disable_request(struct controller *ctrl) void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) { - bool present, link_active; + int present, link_active; /* * If the slot is on and presence or link has changed, turn it off. @@ -257,7 +257,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) mutex_lock(&ctrl->state_lock); present = pciehp_card_present(ctrl); link_active = pciehp_check_link_active(ctrl); - if (!present && !link_active) { + if (present <= 0 && link_active <= 0) { mutex_unlock(&ctrl->state_lock); return; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 86d97f3112f0..88b996764ff9 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -201,17 +201,29 @@ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask) pcie_do_write_cmd(ctrl, cmd, mask, false); } -bool pciehp_check_link_active(struct controller *ctrl) +/** + * pciehp_check_link_active() - Is the link active + * @ctrl: PCIe hotplug controller + * + * Check whether the downstream link is currently active. Note it is + * possible that the card is removed immediately after this so the + * caller may need to take it into account. + * + * If the hotplug controller itself is not available anymore returns + * %-ENODEV. + */ +int pciehp_check_link_active(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 lnk_status; - bool ret; + int ret; + + ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); + if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0) + return -ENODEV; - pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); - - if (ret) - ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); + ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); return ret; } @@ -373,13 +385,29 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status) *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); } -bool pciehp_card_present(struct controller *ctrl) +/** + * pciehp_card_present() - Is the card present + * @ctrl: PCIe hotplug controller + * + * Function checks whether the card is currently present in the slot and + * in that case returns true. Note it is possible that the card is + * removed immediately after the check so the caller may need to take + * this into account. + * + * It the hotplug controller itself is not available anymore returns + * %-ENODEV. + */ +int pciehp_card_present(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; + int ret; - pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); - return slot_status & PCI_EXP_SLTSTA_PDS; + ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); + if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0) + return -ENODEV; + + return !!(slot_status & PCI_EXP_SLTSTA_PDS); } /** @@ -390,10 +418,19 @@ bool pciehp_card_present(struct controller *ctrl) * Presence Detect State bit, this helper also returns true if the Link Active * bit is set. This is a concession to broken hotplug ports which hardwire * Presence Detect State to zero, such as Wilocity's [1ae9:0200]. + * + * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug + * port is not present anymore returns %-ENODEV. */ -bool pciehp_card_present_or_link_active(struct controller *ctrl) +int pciehp_card_present_or_link_active(struct controller *ctrl) { - return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl); + int ret; + + ret = pciehp_card_present(ctrl); + if (ret) + return ret; + + return pciehp_check_link_active(ctrl); } int pciehp_query_power_fault(struct controller *ctrl) @@ -492,7 +529,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); struct device *parent = pdev->dev.parent; - u16 status, events; + u16 status, events = 0; /* * Interrupts only occur in D3hot or shallower and only if enabled @@ -517,6 +554,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) } } +read_status: pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); if (status == (u16) ~0) { ctrl_info(ctrl, "%s: no response from device\n", __func__); @@ -529,24 +567,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) * Slot Status contains plain status bits as well as event * notification bits; right now we only want the event bits. */ - events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | - PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | - PCI_EXP_SLTSTA_DLLSC); + status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | + PCI_EXP_SLTSTA_DLLSC; /* * If we've already reported a power fault, don't report it again * until we've done something to handle it. */ if (ctrl->power_fault_detected) - events &= ~PCI_EXP_SLTSTA_PFD; + status &= ~PCI_EXP_SLTSTA_PFD; + events |= status; if (!events) { if (parent) pm_runtime_put(parent); return IRQ_NONE; } - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + if (status) { + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + + /* + * In MSI mode, all event bits must be zero before the port + * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4). + * So re-read the Slot Status register in case a bit was set + * between read and write. + */ + if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode) + goto read_status; + } + ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events); if (parent) pm_runtime_put(parent); @@ -590,17 +641,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) { ret = pciehp_isr(irq, dev_id); enable_irq(irq); - if (ret != IRQ_WAKE_THREAD) { - pci_config_pm_runtime_put(pdev); - return ret; - } + if (ret != IRQ_WAKE_THREAD) + goto out; } synchronize_hardirq(irq); events = atomic_xchg(&ctrl->pending_events, 0); if (!events) { - pci_config_pm_runtime_put(pdev); - return IRQ_NONE; + ret = IRQ_NONE; + goto out; } /* Check Attention Button Pressed */ @@ -629,10 +678,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) pciehp_handle_presence_or_link_change(ctrl, events); up_read(&ctrl->reset_lock); + ret = IRQ_HANDLED; +out: pci_config_pm_runtime_put(pdev); ctrl->ist_running = false; wake_up(&ctrl->requester); - return IRQ_HANDLED; + return ret; } static int pciehp_poll(void *data) diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index cdbfa5df3a51..dbfa0b55d31a 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_add_slot(drc_name); if (rc) @@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_remove_slot(drc_name); if (rc) diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 32d94ed3b855..b3b802bcefa0 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -158,6 +158,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) virtfn->device = iov->vf_device; virtfn->is_virtfn = 1; virtfn->physfn = pci_dev_get(dev); + virtfn->no_command_memory = 1; if (id == 0) pci_read_vf_config_common(virtfn); @@ -253,8 +254,14 @@ static ssize_t sriov_numvfs_show(struct device *dev, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + u16 num_vfs; - return sprintf(buf, "%u\n", pdev->sriov->num_VFs); + /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */ + device_lock(&pdev->dev); + num_vfs = pdev->sriov->num_VFs; + device_unlock(&pdev->dev); + + return sprintf(buf, "%u\n", num_vfs); } /* diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 0c02d500158f..e30c2a78a88f 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -944,6 +944,16 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev) if (!dev->is_hotplug_bridge) return false; + /* Assume D3 support if the bridge is power-manageable by ACPI. */ + adev = ACPI_COMPANION(&dev->dev); + if (!adev && !pci_dev_is_added(dev)) { + adev = acpi_pci_find_companion(&dev->dev); + ACPI_COMPANION_SET(&dev->dev, adev); + } + + if (adev && acpi_device_power_manageable(adev)) + return true; + /* * Look for a special _DSD property for the root port and if it * is set we know the hierarchy behind it supports D3 just fine. @@ -1050,7 +1060,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) { while (bus->parent) { if (acpi_pm_device_can_wakeup(&bus->self->dev)) - return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable); + return acpi_pm_set_device_wakeup(&bus->self->dev, enable); bus = bus->parent; } @@ -1058,7 +1068,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) /* We have reached the root bus. */ if (bus->bridge) { if (acpi_pm_device_can_wakeup(bus->bridge)) - return acpi_pm_set_bridge_wakeup(bus->bridge, enable); + return acpi_pm_set_device_wakeup(bus->bridge, enable); } return 0; } diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index 5fd90105510d..d3b6b9a05618 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -195,8 +195,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { * RO, the rest is reserved */ .w1c = GENMASK(19, 16), - .ro = GENMASK(20, 19), - .rsvd = GENMASK(31, 21), + .ro = GENMASK(21, 20), + .rsvd = GENMASK(31, 22), }, [PCI_EXP_LNKCAP / 4] = { @@ -236,7 +236,7 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16, .ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS | PCI_EXP_SLTSTA_EIS) << 16, - .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16), + .rsvd = GENMASK(15, 13) | (GENMASK(15, 9) << 16), }, [PCI_EXP_RTCTL / 4] = { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 729eca6e01a2..5ea612a15550 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -26,8 +26,6 @@ struct pci_dynid { struct pci_device_id id; }; -static int network_strict_sort; - /** * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices * @drv: target pci driver @@ -366,32 +364,6 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, return error; } -#define STORAGE_PROBE_DELAY (5*HZ) -static void -storage_probe_delay(struct device *dev) { - static long lastprobe = 0; - long delta = lastprobe + STORAGE_PROBE_DELAY - jiffies; - if(lastprobe && delta > 0 && delta < STORAGE_PROBE_DELAY) { - dev_info(dev, "probe delay %ld ms\n", delta * 1000 / HZ); - while(delta > 0) - delta = schedule_timeout_interruptible(delta); - } - lastprobe = jiffies; -} - -#define NETWORK_PROBE_DELAY (5*HZ) -static void -network_probe_delay(struct device *dev) { - static long lastprobe = 0; - long delta = lastprobe + NETWORK_PROBE_DELAY - jiffies; - if(lastprobe && delta > 0 && delta < NETWORK_PROBE_DELAY) { - dev_info(dev, "probe delay %ld ms\n", delta * 1000 / HZ); - while(delta > 0) - delta = schedule_timeout_interruptible(delta); - } - lastprobe = jiffies; -} - /** * __pci_device_probe - check if a driver wants to claim a specific PCI device * @drv: driver to call to check if it wants the PCI device @@ -409,13 +381,8 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) error = -ENODEV; id = pci_match_device(drv, pci_dev); - if (id) { - if((pci_dev->class>>16) == PCI_BASE_CLASS_STORAGE && pci_dev->vendor != 0x1af4) - storage_probe_delay(&pci_dev->dev); - if((pci_dev->class>>16) == PCI_BASE_CLASS_NETWORK && network_strict_sort) - network_probe_delay(&pci_dev->dev); + if (id) error = pci_call_probe(drv, pci_dev, id); - } } return error; } @@ -952,6 +919,8 @@ static int pci_pm_resume_noirq(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; int error = 0; + pci_power_t prev_state = pci_dev->current_state; + bool skip_bus_pm = pci_dev->skip_bus_pm; if (dev_pm_may_skip_resume(dev)) return 0; @@ -970,12 +939,15 @@ static int pci_pm_resume_noirq(struct device *dev) * configuration here and attempting to put them into D0 again is * pointless, so avoid doing that. */ - if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform())) + if (!(skip_bus_pm && pm_suspend_no_platform())) pci_pm_default_resume_early(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); pcie_pme_root_status_cleanup(pci_dev); + if (!skip_bus_pm && prev_state == PCI_D3cold) + pci_bridge_wait_for_secondary_bus(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); @@ -1366,6 +1338,7 @@ static int pci_pm_runtime_resume(struct device *dev) int rc = 0; struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + pci_power_t prev_state = pci_dev->current_state; /* * Restoring config space is necessary even if the device is not bound @@ -1381,6 +1354,9 @@ static int pci_pm_runtime_resume(struct device *dev) pci_enable_wake(pci_dev, PCI_D0, false); pci_fixup_device(pci_fixup_resume, pci_dev); + if (prev_state == PCI_D3cold) + pci_bridge_wait_for_secondary_bus(pci_dev); + if (pm && pm->runtime_resume) rc = pm->runtime_resume(dev); @@ -1738,10 +1714,3 @@ static int __init pci_driver_init(void) return 0; } postcore_initcall(pci_driver_init); - -static int parse_network_strict_sort(char *arg) -{ - network_strict_sort = 1; - return 0; -} -early_param("network-strict-sort", parse_network_strict_sort); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 981ae16f935b..436bedbaf40b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -802,7 +802,9 @@ static inline bool platform_pci_need_resume(struct pci_dev *dev) static inline bool platform_pci_bridge_d3(struct pci_dev *dev) { - return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false; + if (pci_platform_pm && pci_platform_pm->bridge_d3) + return pci_platform_pm->bridge_d3(dev); + return false; } /** @@ -1019,8 +1021,6 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) * because have already delayed for the bridge. */ if (dev->runtime_d3cold) { - if (dev->d3cold_delay && !dev->imm_ready) - msleep(dev->d3cold_delay); /* * When powering on a bridge from D3cold, the * whole hierarchy may be powered on into @@ -1359,6 +1359,7 @@ int pci_save_state(struct pci_dev *dev) pci_save_ltr_state(dev); pci_save_dpc_state(dev); + pci_save_aer_state(dev); return pci_save_vc_state(dev); } EXPORT_SYMBOL(pci_save_state); @@ -1472,6 +1473,7 @@ void pci_restore_state(struct pci_dev *dev) pci_restore_dpc_state(dev); pci_cleanup_aer_error_status_regs(dev); + pci_restore_aer_state(dev); pci_restore_config_space(dev); @@ -1666,20 +1668,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) int err; int i, bars = 0; - /* - * Power state could be unknown at this point, either due to a fresh - * boot or a device removal call. So get the current power state - * so that things like MSI message writing will behave as expected - * (e.g. if the device really is in D0 at enable time). - */ - if (dev->pm_cap) { - u16 pmcsr; - pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - } - - if (atomic_inc_return(&dev->enable_cnt) > 1) + if (atomic_inc_return(&dev->enable_cnt) > 1) { + pci_update_current_state(dev, dev->current_state); return 0; /* already enabled */ + } bridge = pci_upstream_bridge(dev); if (bridge) @@ -3471,7 +3463,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar) return 0; pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap); - return (cap & PCI_REBAR_CAP_SIZES) >> 4; + cap &= PCI_REBAR_CAP_SIZES; + + /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */ + if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f && + bar == 0 && cap == 0x7000) + cap = 0x3f000; + + return cap >> 4; } /** @@ -3896,6 +3895,10 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, ret = logic_pio_register_range(range); if (ret) kfree(range); + + /* Ignore duplicates due to deferred probing */ + if (ret == -EEXIST) + ret = 0; #endif return ret; @@ -4605,14 +4608,17 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); } + /** - * pcie_wait_for_link - Wait until link is active or inactive + * pcie_wait_for_link_delay - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? + * @delay: Delay to wait after link has become active (in ms) * * Use this to wait till link becomes active or inactive. */ -bool pcie_wait_for_link(struct pci_dev *pdev, bool active) +static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, + int delay) { int timeout = 1000; bool ret; @@ -4620,10 +4626,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active) /* * Some controllers might not implement link active reporting. In this - * case, we wait for 1000 + 100 ms. + * case, we wait for 1000 ms + any delay requested by the caller. */ if (!pdev->link_active_reporting) { - msleep(1100); + msleep(timeout + delay); return true; } @@ -4649,13 +4655,144 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active) timeout -= 10; } if (active && ret) - msleep(100); + msleep(delay); else if (ret != active) pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", active ? "set" : "cleared"); return ret == active; } +/** + * pcie_wait_for_link - Wait until link is active or inactive + * @pdev: Bridge device + * @active: waiting for active or inactive? + * + * Use this to wait till link becomes active or inactive. + */ +bool pcie_wait_for_link(struct pci_dev *pdev, bool active) +{ + return pcie_wait_for_link_delay(pdev, active, 100); +} + +/* + * Find maximum D3cold delay required by all the devices on the bus. The + * spec says 100 ms, but firmware can lower it and we allow drivers to + * increase it as well. + * + * Called with @pci_bus_sem locked for reading. + */ +static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) +{ + const struct pci_dev *pdev; + int min_delay = 100; + int max_delay = 0; + + list_for_each_entry(pdev, &bus->devices, bus_list) { + if (pdev->d3cold_delay < min_delay) + min_delay = pdev->d3cold_delay; + if (pdev->d3cold_delay > max_delay) + max_delay = pdev->d3cold_delay; + } + + return max(min_delay, max_delay); +} + +/** + * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible + * @dev: PCI bridge + * + * Handle necessary delays before access to the devices on the secondary + * side of the bridge are permitted after D3cold to D0 transition. + * + * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For + * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section + * 4.3.2. + */ +void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev) +{ + struct pci_dev *child; + int delay; + + if (pci_dev_is_disconnected(dev)) + return; + + if (!pci_is_bridge(dev) || !dev->bridge_d3) + return; + + down_read(&pci_bus_sem); + + /* + * We only deal with devices that are present currently on the bus. + * For any hot-added devices the access delay is handled in pciehp + * board_added(). In case of ACPI hotplug the firmware is expected + * to configure the devices before OS is notified. + */ + if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { + up_read(&pci_bus_sem); + return; + } + + /* Take d3cold_delay requirements into account */ + delay = pci_bus_max_d3cold_delay(dev->subordinate); + if (!delay) { + up_read(&pci_bus_sem); + return; + } + + child = list_first_entry(&dev->subordinate->devices, struct pci_dev, + bus_list); + up_read(&pci_bus_sem); + + /* + * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before + * accessing the device after reset (that is 1000 ms + 100 ms). In + * practice this should not be needed because we don't do power + * management for them (see pci_bridge_d3_possible()). + */ + if (!pci_is_pcie(dev)) { + pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay); + msleep(1000 + delay); + return; + } + + /* + * For PCIe downstream and root ports that do not support speeds + * greater than 5 GT/s need to wait minimum 100 ms. For higher + * speeds (gen3) we need to wait first for the data link layer to + * become active. + * + * However, 100 ms is the minimum and the PCIe spec says the + * software must allow at least 1s before it can determine that the + * device that did not respond is a broken device. There is + * evidence that 100 ms is not always enough, for example certain + * Titan Ridge xHCI controller does not always respond to + * configuration requests if we only wait for 100 ms (see + * https://bugzilla.kernel.org/show_bug.cgi?id=203885). + * + * Therefore we wait for 100 ms and check for the device presence. + * If it is still not present give it an additional 100 ms. + */ + if (!pcie_downstream_port(dev)) + return; + + if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); + msleep(delay); + } else { + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", + delay); + if (!pcie_wait_for_link_delay(dev, true, delay)) { + /* Did not train, no need to wait any further */ + return; + } + } + + if (!pci_device_is_present(child)) { + pci_dbg(child, "waiting additional %d ms to become accessible\n", delay); + msleep(delay); + } +} + void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; @@ -5995,19 +6132,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, while (*p) { count = 0; if (sscanf(p, "%d%n", &align_order, &count) == 1 && - p[count] == '@') { + p[count] == '@') { p += count + 1; + if (align_order > 63) { + pr_err("PCI: Invalid requested alignment (order %d)\n", + align_order); + align_order = PAGE_SHIFT; + } } else { - align_order = -1; + align_order = PAGE_SHIFT; } ret = pci_dev_str_match(dev, p, &p); if (ret == 1) { *resize = true; - if (align_order == -1) - align = PAGE_SIZE; - else - align = 1 << align_order; + align = 1ULL << align_order; break; } else if (ret < 0) { pr_err("PCI: Can't parse resource_alignment parameter: %s\n", diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 273d60cb0762..b66ffea134dc 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -15,6 +15,7 @@ extern const unsigned char pcie_link_speed[]; extern bool pci_early_dump; bool pcie_cap_has_lnkctl(const struct pci_dev *dev); +bool pcie_cap_has_rtctl(const struct pci_dev *dev); /* Functions internal to the PCI core code */ @@ -107,6 +108,7 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev); void pci_free_cap_save_buffers(struct pci_dev *dev); bool pci_bridge_d3_possible(struct pci_dev *dev); void pci_bridge_d3_update(struct pci_dev *dev); +void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev); static inline void pci_wakeup_event(struct pci_dev *dev) { @@ -337,6 +339,11 @@ struct pci_sriov { u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /** diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 362eb8cfa53b..a5a174d70e05 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -35,6 +35,7 @@ config PCIEAER config PCIEAER_INJECT tristate "PCI Express error injection support" depends on PCIEAER + select GENERIC_IRQ_INJECTION help This enables PCI Express Root Port Advanced Error Reporting (AER) software error injector. diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 271aecfbc3bf..4a818b07a1af 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -15,6 +15,7 @@ #define pr_fmt(fmt) "AER: " fmt #define dev_fmt pr_fmt +#include <linux/bitops.h> #include <linux/cper.h> #include <linux/pci.h> #include <linux/pci-acpi.h> @@ -36,7 +37,7 @@ #define AER_ERROR_SOURCES_MAX 128 #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */ -#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/ +#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/ struct aer_err_source { unsigned int status; @@ -201,6 +202,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev) /** * pcie_ecrc_get_policy - parse kernel command-line ecrc option + * @str: ECRC policy from kernel command line to use */ void pcie_ecrc_get_policy(char *str) { @@ -448,12 +450,70 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) return 0; } +void pci_save_aer_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + u32 *cap; + int pos; + + pos = dev->aer_cap; + if (!pos) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR); + if (!save_state) + return; + + cap = &save_state->cap.data[0]; + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++); + pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++); + pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++); + if (pcie_cap_has_rtctl(dev)) + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++); +} + +void pci_restore_aer_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + u32 *cap; + int pos; + + pos = dev->aer_cap; + if (!pos) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR); + if (!save_state) + return; + + cap = &save_state->cap.data[0]; + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++); + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++); + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++); + pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++); + if (pcie_cap_has_rtctl(dev)) + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++); +} + void pci_aer_init(struct pci_dev *dev) { - dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + int n; - if (dev->aer_cap) - dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); + dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (!dev->aer_cap) + return; + + dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); + + /* + * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER, + * PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event + * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec + * 7.8.4). + */ + n = pcie_cap_has_rtctl(dev) ? 5 : 4; + pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n); pci_cleanup_aer_error_status_regs(dev); } @@ -560,6 +620,7 @@ static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { "BlockedTLP", /* Bit Position 23 */ "AtomicOpBlocked", /* Bit Position 24 */ "TLPBlockedErr", /* Bit Position 25 */ + "PoisonTLPBlocked", /* Bit Position 26 */ }; static const char *aer_agent_string[] = { @@ -657,7 +718,8 @@ const struct attribute_group aer_stats_attr_group = { static void pci_dev_aer_stats_incr(struct pci_dev *pdev, struct aer_err_info *info) { - int status, i, max = -1; + unsigned long status = info->status & ~info->mask; + int i, max = -1; u64 *counter = NULL; struct aer_stats *aer_stats = pdev->aer_stats; @@ -682,10 +744,8 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev, break; } - status = (info->status & ~info->mask); - for (i = 0; i < max; i++) - if (status & (1 << i)) - counter[i]++; + for_each_set_bit(i, &status, max) + counter[i]++; } static void pci_rootport_aer_stats_incr(struct pci_dev *pdev, @@ -717,14 +777,11 @@ static void __print_tlp_header(struct pci_dev *dev, static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { - int i, status; + unsigned long status = info->status & ~info->mask; const char *errmsg = NULL; - status = (info->status & ~info->mask); - - for (i = 0; i < 32; i++) { - if (!(status & (1 << i))) - continue; + int i; + for_each_set_bit(i, &status, 32) { if (info->severity == AER_CORRECTABLE) errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? aer_correctable_error_string[i] : NULL; @@ -1204,7 +1261,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc, /** * aer_isr - consume errors detected by root port - * @work: definition of this work item + * @irq: IRQ assigned to Root Port + * @context: pointer to Root Port data structure * * Invoked, as DPC, when root port records new detected error */ diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 6988fe7389b9..21cc3d3387f7 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -16,7 +16,7 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/irq.h> +#include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/slab.h> @@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj) } pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n", einj->cor_status, einj->uncor_status, pci_name(dev)); - local_irq_disable(); - generic_handle_irq(edev->irq); - local_irq_enable(); + ret = irq_inject_interrupt(edev->irq); } else { pci_err(rpdev, "AER device not found\n"); ret = -ENODEV; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 652ef23bba35..7624c71011c6 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -64,6 +64,7 @@ struct pcie_link_state { u32 clkpm_capable:1; /* Clock PM capable? */ u32 clkpm_enabled:1; /* Current Clock PM state */ u32 clkpm_default:1; /* Default Clock PM state by BIOS */ + u32 clkpm_disable:1; /* Clock PM disabled */ /* Exit latencies */ struct aspm_latency latency_up; /* Upstream direction exit latency */ @@ -161,8 +162,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) static void pcie_set_clkpm(struct pcie_link_state *link, int enable) { - /* Don't enable Clock PM if the link is not Clock PM capable */ - if (!link->clkpm_capable) + /* + * Don't enable Clock PM if the link is not Clock PM capable + * or Clock PM is disabled + */ + if (!link->clkpm_capable || link->clkpm_disable) enable = 0; /* Need nothing if the specified equals to current state */ if (link->clkpm_enabled == enable) @@ -192,7 +196,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) } link->clkpm_enabled = enabled; link->clkpm_default = enabled; - link->clkpm_capable = (blacklist) ? 0 : capable; + link->clkpm_capable = capable; + link->clkpm_disable = blacklist ? 1 : 0; } static bool pcie_retrain_link(struct pcie_link_state *link) @@ -623,16 +628,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Setup initial capable state. Will be updated later */ link->aspm_capable = link->aspm_support; - /* - * If the downstream component has pci bridge function, don't - * do ASPM for now. - */ - list_for_each_entry(child, &linkbus->devices, bus_list) { - if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { - link->aspm_disable = ASPM_STATE_ALL; - break; - } - } /* Get and check endpoint acceptable latencies */ list_for_each_entry(child, &linkbus->devices, bus_list) { @@ -742,9 +737,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) /* Enable what we need to enable */ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CAP_L1_PM_SS, val); + PCI_L1SS_CTL1_L1SS_MASK, val); pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CAP_L1_PM_SS, val); + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) @@ -1097,10 +1092,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) link->aspm_disable |= ASPM_STATE_L1; pcie_config_aspm_link(link, policy_to_aspm_state(link)); - if (state & PCIE_LINK_STATE_CLKPM) { - link->clkpm_capable = 0; - pcie_set_clkpm(link, 0); - } + if (state & PCIE_LINK_STATE_CLKPM) + link->clkpm_disable = 1; + pcie_set_clkpm(link, policy_to_clkpm_state(link)); mutex_unlock(&aspm_lock); if (sem) up_read(&pci_bus_sem); @@ -1163,6 +1157,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); else cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); + cnt += sprintf(buffer + cnt, "\n"); return cnt; } diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index a32ec3487a8d..e06f42f58d3d 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -291,7 +291,7 @@ static int dpc_probe(struct pcie_device *dev) int status; u16 ctl, cap; - if (pcie_aer_get_firmware_first(pdev)) + if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native) return -ENOTSUPP; dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL); diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index b0e6048a9208..c37c8daa83c6 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -199,11 +199,14 @@ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state, bus = dev->subordinate; pci_dbg(dev, "broadcast error_detected message\n"); - if (state == pci_channel_io_frozen) + if (state == pci_channel_io_frozen) { pci_walk_bus(bus, report_frozen_detected, &status); - else + status = reset_link(dev, service); + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; + } else { pci_walk_bus(bus, report_normal_detected, &status); - + } if (state == pci_channel_io_frozen && reset_link(dev, service) != PCI_ERS_RESULT_RECOVERED) goto failed; diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 944827a8c7d3..1e673619b101 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -25,6 +25,8 @@ #define PCIE_PORT_DEVICE_MAXSERVICES 5 +extern bool pcie_ports_dpc_native; + #ifdef CONFIG_PCIEAER int pcie_aer_init(void); #else diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 1b330129089f..5075cb9e850c 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -250,8 +250,13 @@ static int get_port_device_capability(struct pci_dev *dev) pcie_pme_interrupt_enable(dev, false); } + /* + * With dpc-native, allow Linux to use DPC even if it doesn't have + * permission to use AER. + */ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) && - pci_aer_available() && services & PCIE_PORT_SERVICE_AER) + pci_aer_available() && + (pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER))) services |= PCIE_PORT_SERVICE_DPC; if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 0a87091a0800..160d67c59310 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -29,12 +29,20 @@ bool pcie_ports_disabled; */ bool pcie_ports_native; +/* + * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe + * service even if the platform hasn't given us permission. + */ +bool pcie_ports_dpc_native; + static int __init pcie_port_setup(char *str) { if (!strncmp(str, "compat", 6)) pcie_ports_disabled = true; else if (!strncmp(str, "native", 6)) pcie_ports_native = true; + else if (!strncmp(str, "dpc-native", 10)) + pcie_ports_dpc_native = true; return 1; } diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 9361f3aa26ab..357a454cafa0 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -39,10 +39,6 @@ void pci_ptm_init(struct pci_dev *dev) if (!pci_is_pcie(dev)) return; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); - if (!pos) - return; - /* * Enable PTM only on interior devices (root ports, switch ports, * etc.) on the assumption that it causes no link traffic until an @@ -52,6 +48,23 @@ void pci_ptm_init(struct pci_dev *dev) pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)) return; + /* + * Switch Downstream Ports are not permitted to have a PTM + * capability; their PTM behavior is controlled by the Upstream + * Port (PCIe r5.0, sec 7.9.16). + */ + ups = pci_upstream_bridge(dev); + if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM && + ups && ups->ptm_enabled) { + dev->ptm_granularity = ups->ptm_granularity; + dev->ptm_enabled = 1; + return; + } + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); + if (!pos) + return; + pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; @@ -61,7 +74,6 @@ void pci_ptm_init(struct pci_dev *dev) * the spec recommendation (PCIe r3.1, sec 7.32.3), select the * furthest upstream Time Source as the PTM Root. */ - ups = pci_upstream_bridge(dev); if (ups && ups->ptm_enabled) { ctrl = PCI_PTM_CTRL_ENABLE; if (ups->ptm_granularity == 0) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5c68a0bffd20..89c261c159f1 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -871,9 +871,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) goto free; err = device_register(&bridge->dev); - if (err) + if (err) { put_device(&bridge->dev); - + goto free; + } bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); @@ -1781,7 +1782,7 @@ int pci_setup_device(struct pci_dev *dev) /* Device class may be changed after fixup */ class = dev->class >> 8; - if (dev->non_compliant_bars) { + if (dev->non_compliant_bars && !dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); @@ -1893,13 +1894,33 @@ static void pci_configure_mps(struct pci_dev *dev) struct pci_dev *bridge = pci_upstream_bridge(dev); int mps, mpss, p_mps, rc; - if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) + if (!pci_is_pcie(dev)) return; /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ if (dev->is_virtfn) return; + /* + * For Root Complex Integrated Endpoints, program the maximum + * supported value unless limited by the PCIE_BUS_PEER2PEER case. + */ + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { + if (pcie_bus_config == PCIE_BUS_PEER2PEER) + mps = 128; + else + mps = 128 << dev->pcie_mpss; + rc = pcie_set_mps(dev, mps); + if (rc) { + pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", + mps); + } + return; + } + + if (!bridge || !pci_is_pcie(bridge)) + return; + mps = pcie_get_mps(dev); p_mps = pcie_get_mps(bridge); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 2fdceaab7307..5632bab94c24 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1970,26 +1970,92 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk /* * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no * 300641-004US, section 5.7.3. + * + * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003. + * Core IO on Xeon E5 v2, see Intel order no 329188-003. + * Core IO on Xeon E7 v2, see Intel order no 329595-002. + * Core IO on Xeon E5 v3, see Intel order no 330784-003. + * Core IO on Xeon E7 v3, see Intel order no 332315-001US. + * Core IO on Xeon E5 v4, see Intel order no 333810-002US. + * Core IO on Xeon E7 v4, see Intel order no 332315-001US. + * Core IO on Xeon D-1500, see Intel order no 332051-001. + * Core IO on Xeon Scalable, see Intel order no 610950. */ -#define INTEL_6300_IOAPIC_ABAR 0x40 +#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */ #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) +#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */ +#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25) + static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; + u32 pci_config_dword; if (noioapicquirk) return; - pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); - pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; - pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); - + switch (dev->device) { + case PCI_DEVICE_ID_INTEL_ESB_10: + pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, + &pci_config_word); + pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; + pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, + pci_config_word); + break; + case 0x3c28: /* Xeon E5 1600/2600/4600 */ + case 0x0e28: /* Xeon E5/E7 V2 */ + case 0x2f28: /* Xeon E5/E7 V3,V4 */ + case 0x6f28: /* Xeon D-1500 */ + case 0x2034: /* Xeon Scalable Family */ + pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, + &pci_config_dword); + pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH; + pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, + pci_config_dword); + break; + default: + return; + } pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +/* + * Device 29 Func 5 Device IDs of IO-APIC + * containing ABAR—APIC1 Alternate Base Address Register + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, + quirk_disable_intel_boot_interrupt); + +/* + * Device 5 Func 0 Device IDs of Core IO modules/hubs + * containing Coherent Interface Protocol Interrupt Control + * + * Device IDs obtained from volume 2 datasheets of commented + * families above. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034, + quirk_disable_intel_boot_interrupt); /* Disable boot interrupts on HT-1000 */ #define BC_HT1000_FEATURE_REG 0x64 @@ -2264,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) +{ + pci_info(dev, "Disabling ASPM L0s/L1\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); +} + +/* + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; + * disable both L0s and L1 for now to be safe. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); + /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this @@ -3976,6 +4055,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, quirk_dma_func1_alias); +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, quirk_dma_func1_alias); @@ -4286,6 +4368,47 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); +/* + * pci_acs_ctrl_enabled - compare desired ACS controls with those provided + * by a device + * @acs_ctrl_req: Bitmask of desired ACS controls + * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by + * the hardware design + * + * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included + * in @acs_ctrl_ena, i.e., the device provides all the access controls the + * caller desires. Return 0 otherwise. + */ +static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena) +{ + if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req) + return 1; + return 0; +} + +/* + * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability. + * But the implementation could block peer-to-peer transactions between them + * and provide ACS-like functionality. + */ +static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) +{ + if (!pci_is_pcie(dev) || + ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) + return -ENOTTY; + + switch (dev->device) { + case 0x0710 ... 0x071e: + case 0x0721: + case 0x0723 ... 0x0732: + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + return false; +} + /* * AMD has indicated that the devices below do not support peer-to-peer * in any system where they are found in the southbridge with an AMD @@ -4326,10 +4449,12 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) if (ACPI_FAILURE(status)) return -ENODEV; + acpi_put_table(header); + /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); - return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR); #else return -ENODEV; #endif @@ -4356,20 +4481,19 @@ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { + if (!pci_quirk_cavium_acs_match(dev)) + return -ENOTTY; + /* - * Cavium root ports don't advertise an ACS capability. However, + * Cavium Root Ports don't advertise an ACS capability. However, * the RTL internally implements similar protection as if ACS had - * Request Redirection, Completion Redirection, Source Validation, + * Source Validation, Request Redirection, Completion Redirection, * and Upstream Forwarding features enabled. Assert that the * hardware implements and enables equivalent ACS functionality for * these flags. */ - acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); - - if (!pci_quirk_cavium_acs_match(dev)) - return -ENOTTY; - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) @@ -4379,13 +4503,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) * transactions with others, allowing masking out these bits as if they * were unimplemented in the ACS capability. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } /* - * Many Intel PCH root ports do provide ACS-like features to disable peer + * Many Intel PCH Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. This is the list of device IDs known to fall * into that category as provided by Intel in Red Hat bugzilla 1037684. @@ -4433,37 +4556,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) return false; } -#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV) - static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) { - u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ? - INTEL_PCH_ACS_FLAGS : 0; - if (!pci_quirk_intel_pch_acs_match(dev)) return -ENOTTY; - return acs_flags & ~flags ? 0 : 1; + if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK) + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return pci_acs_ctrl_enabled(acs_flags, 0); } /* - * These QCOM root ports do provide ACS-like features to disable peer + * These QCOM Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. Hardware supports source validation but it * will report the issue as Completer Abort instead of ACS Violation. - * Hardware doesn't support peer-to-peer and each root port is a root - * complex with unique segment numbers. It is not possible for one root - * port to pass traffic to another root port. All PCIe transactions are - * terminated inside the root port. + * Hardware doesn't support peer-to-peer and each Root Port is a Root + * Complex with unique segment numbers. It is not possible for one Root + * Port to pass traffic to another Root Port. All PCIe transactions are + * terminated inside the Root Port. */ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) { - u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV); - int ret = acs_flags & ~flags ? 0 : 1; - - pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret); - - return ret; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) @@ -4564,7 +4682,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); - return acs_flags & ~ctrl ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, ctrl); } static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) @@ -4578,10 +4696,23 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) * perform peer-to-peer with other functions, allowing us to mask out * these bits as if they were unimplemented in the ACS capability. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); +} - return acs_flags ? 0 : 1; +static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * Intel RCiEP's are required to allow p2p only on translated + * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16, + * "Root-Complex Peer to Peer Considerations". + */ + if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END) + return -ENOTTY; + + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) @@ -4592,9 +4723,8 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) * Allow each Root Port to be in a separate IOMMU group by masking * SV/RR/CR/UF bits. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static const struct pci_dev_acs_enabled { @@ -4667,6 +4797,7 @@ static const struct pci_dev_acs_enabled { /* I219 */ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs }, /* QCOM QDF2xxx root ports */ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, @@ -4690,12 +4821,33 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, + /* because PLX switch Vendor id is 0x10b5 on phytium cpu */ + { 0x10b5, PCI_ANY_ID, pci_quirk_xgene_acs }, + /* because rootcomplex Vendor id is 0x17cd on phytium cpu */ + { 0x17cd, PCI_ANY_ID, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, + /* Zhaoxin multi-function devices */ + { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs }, + /* Zhaoxin Root/Downstream Ports */ + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, { 0 } }; +/* + * pci_dev_specific_acs_enabled - check whether device provides ACS controls + * @dev: PCI device + * @acs_flags: Bitmask of desired ACS controls + * + * Returns: + * -ENOTTY: No quirk applies to this device; we can't tell whether the + * device provides the desired controls + * 0: Device does not provide all the desired controls + * >0: Device provides all the controls in @acs_flags + */ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { const struct pci_dev_acs_enabled *i; @@ -5015,13 +5167,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); -/* FLR may cause some 82579 devices to hang */ -static void quirk_intel_no_flr(struct pci_dev *dev) +/* + * FLR may cause the following to devices to hang: + * + * AMD Starship/Matisse HD Audio Controller 0x1487 + * AMD Starship USB 3.0 Host Controller 0x148c + * AMD Matisse USB 3.0 Host Controller 0x149c + * Intel 82579LM Gigabit Ethernet Controller 0x1502 + * Intel 82579V Gigabit Ethernet Controller 0x1503 + * + */ +static void quirk_no_flr(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr); static void quirk_no_ext_tags(struct pci_dev *pdev) { @@ -5051,7 +5215,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if (pdev->device == 0x7340 && pdev->revision != 0xc5) + if ((pdev->device == 0x7312 && pdev->revision != 0x00) || + (pdev->device == 0x7340 && pdev->revision != 0xc5)) return; pci_info(pdev, "disabling ATS\n"); @@ -5062,6 +5227,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ @@ -5424,3 +5591,43 @@ out_disable: DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1, PCI_CLASS_DISPLAY_VGA, 8, quirk_reset_lenovo_thinkpad_p50_nvgpu); + +/* + * Device [1b21:2142] + * When in D0, PME# doesn't get asserted when plugging USB 3.0 device. + */ +static void pci_fixup_no_d0_pme(struct pci_dev *dev) +{ + pci_info(dev, "PME# does not work under D0, disabling it\n"); + dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme); + +/* + * Device [12d8:0x400e] and [12d8:0x400f] + * These devices advertise PME# support in all power states but don't + * reliably assert it. + */ +static void pci_fixup_no_pme(struct pci_dev *dev) +{ + pci_info(dev, "PME# is unreliable, disabling it\n"); + dev->pme_support = 0; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme); + +static void apex_pci_fixup_class(struct pci_dev *pdev) +{ + pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; +} +DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a, + PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class); + +#ifdef CONFIG_ALTRA_ERRATUM_82288 +static void quirk_altra_erratum_82288(struct pci_dev *dev) +{ + pr_info_once("Write combining PCI maps disabled due to hardware erratum\n"); + have_altra_erratum_82288 = true; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMPERE, 0xe100, quirk_altra_erratum_82288); +#endif diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 137bf0cee897..8fc9a4e911e3 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) pci_disable_rom(pdev); } EXPORT_SYMBOL(pci_unmap_rom); - -/** - * pci_platform_rom - provides a pointer to any ROM image provided by the - * platform - * @pdev: pointer to pci device struct - * @size: pointer to receive size of pci window over ROM - */ -void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) -{ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt((phys_addr_t)pdev->rom); - } - - return NULL; -} -EXPORT_SYMBOL(pci_platform_rom); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 5356630e0e48..44f4866d95d8 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -752,24 +752,32 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) } /* - * Helper function for sizing routines: find first available bus resource - * of a given type. Note: we intentionally skip the bus resources which - * have already been assigned (that is, have non-NULL parent resource). + * Helper function for sizing routines. Assigned resources have non-NULL + * parent resource. + * + * Return first unassigned resource of the correct type. If there is none, + * return first assigned resource of the correct type. If none of the + * above, return NULL. + * + * Returning an assigned resource of the correct type allows the caller to + * distinguish between already assigned and no resource of the correct type. */ -static struct resource *find_free_bus_resource(struct pci_bus *bus, - unsigned long type_mask, - unsigned long type) +static struct resource *find_bus_resource_of_type(struct pci_bus *bus, + unsigned long type_mask, + unsigned long type) { + struct resource *r, *r_assigned = NULL; int i; - struct resource *r; pci_bus_for_each_resource(bus, r, i) { if (r == &ioport_resource || r == &iomem_resource) continue; if (r && (r->flags & type_mask) == type && !r->parent) return r; + if (r && (r->flags & type_mask) == type && !r_assigned) + r_assigned = r; } - return NULL; + return r_assigned; } static resource_size_t calculate_iosize(resource_size_t size, @@ -866,8 +874,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, struct list_head *realloc_head) { struct pci_dev *dev; - struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, - IORESOURCE_IO); + struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO, + IORESOURCE_IO); resource_size_t size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; resource_size_t min_align, align; @@ -875,6 +883,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, if (!b_res) return; + /* If resource is already assigned, nothing more to do */ + if (b_res->parent) + return; + min_align = window_alignment(bus, IORESOURCE_IO); list_for_each_entry(dev, &bus->devices, bus_list) { int i; @@ -978,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, resource_size_t min_align, align, size, size0, size1; resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */ int order, max_order; - struct resource *b_res = find_free_bus_resource(bus, + struct resource *b_res = find_bus_resource_of_type(bus, mask | IORESOURCE_PREFETCH, type); resource_size_t children_add_size = 0; resource_size_t children_add_align = 0; @@ -987,6 +999,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (!b_res) return -ENOSPC; + /* If resource is already assigned, nothing more to do */ + if (b_res->parent) + return 0; + memset(aligns, 0, sizeof(aligns)); max_order = 0; size = 0; diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index d8ca40a97693..da5023e27951 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -409,10 +409,16 @@ EXPORT_SYMBOL(pci_release_resource); int pci_resize_resource(struct pci_dev *dev, int resno, int size) { struct resource *res = dev->resource + resno; + struct pci_host_bridge *host; int old, ret; u32 sizes; u16 cmd; + /* Check if we must preserve the firmware's resource assignment */ + host = pci_find_host_bridge(dev->bus); + if (host->preserve_config) + return -ENOTSUPP; + /* Make sure the resource isn't assigned before resizing it. */ if (!(res->flags & IORESOURCE_UNSET)) return -EBUSY; @@ -439,10 +445,11 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) res->end = res->start + pci_rebar_size_to_bytes(size) - 1; /* Check if the new config works by trying to assign everything. */ - ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); - if (ret) - goto error_resize; - + if (dev->bus->self) { + ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); + if (ret) + goto error_resize; + } return 0; error_resize: diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index ae4aa0e1f2f4..1e3ed6ec0a4a 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -304,17 +304,20 @@ placeholder: slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + kfree(slot); goto err; } - err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, - "%s", slot_name); - if (err) - goto err; - INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, + "%s", slot_name); + if (err) { + kobject_put(&slot->kobj); + goto err; + } + down_read(&pci_bus_sem); list_for_each_entry(dev, &parent->devices, bus_list) if (PCI_SLOT(dev->devfn) == slot_nr) @@ -329,7 +332,6 @@ out: mutex_unlock(&pci_slot_mutex); return slot; err: - kfree(slot); slot = ERR_PTR(err); goto out; } diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index cc43c855452f..2c9c3061894b 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -175,7 +175,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); - init_completion(&stuser->comp); + reinit_completion(&stuser->comp); list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index 31e39558d49d..8b003c890b87 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -20,7 +20,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, u16 word; u32 dword; long err; - long cfg_ret; + int cfg_ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -46,7 +46,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, } err = -EIO; - if (cfg_ret != PCIBIOS_SUCCESSFUL) + if (cfg_ret) goto error; switch (len) { @@ -105,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_byte(dev, off, byte); - if (err != PCIBIOS_SUCCESSFUL) + if (err) err = -EIO; break; @@ -114,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_word(dev, off, word); - if (err != PCIBIOS_SUCCESSFUL) + if (err) err = -EIO; break; @@ -123,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_dword(dev, off, dword); - if (err != PCIBIOS_SUCCESSFUL) + if (err) err = -EIO; break; diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 7915d10f9aa1..bd549070c011 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); /* * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 8f8606b9bc9e..aca4570f78a8 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1720,6 +1720,7 @@ static struct platform_driver cci_pmu_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = arm_cci_pmu_matches, + .suppress_bind_attrs = true, }, .probe = cci_pmu_probe, .remove = cci_pmu_remove, diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 6fc0273b6129..336948b41bd1 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = { .driver = { .name = "arm-ccn", .of_match_table = arm_ccn_match, + .suppress_bind_attrs = true, }, .probe = arm_ccn_probe, .remove = arm_ccn_remove, diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 70968c8c09d7..4594e2ed13d5 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -759,6 +759,7 @@ static struct platform_driver dsu_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(dsu_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = dsu_pmu_device_probe, .remove = dsu_pmu_device_remove, diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 933bd8410fc2..e35cb76c8d10 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -236,7 +236,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, ret = armpmu_register(pmu); if (ret) - goto out_free; + goto out_free_irqs; return 0; diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 2f8787276d9b..9cdd89b29334 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu_pmu); smmu_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .pmu_enable = smmu_pmu_enable, .pmu_disable = smmu_pmu_disable, @@ -815,7 +816,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) if (err) { dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", err, &res_0->start); - return err; + goto out_clear_affinity; } err = perf_pmu_register(&smmu_pmu->pmu, name, -1); @@ -834,6 +835,8 @@ static int smmu_pmu_probe(struct platform_device *pdev) out_unregister: cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); +out_clear_affinity: + irq_set_affinity_hint(smmu_pmu->irq, NULL); return err; } @@ -843,6 +846,7 @@ static int smmu_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&smmu_pmu->pmu); cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); + irq_set_affinity_hint(smmu_pmu->irq, NULL); return 0; } @@ -857,6 +861,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev) static struct platform_driver smmu_pmu_driver = { .driver = { .name = "arm-smmu-v3-pmcg", + .suppress_bind_attrs = true, }, .probe = smmu_pmu_probe, .remove = smmu_pmu_remove, diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 4e4984a55cd1..079701e8de18 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1228,6 +1228,7 @@ static struct platform_driver arm_spe_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(arm_spe_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = arm_spe_pmu_device_probe, .remove = arm_spe_pmu_device_remove, diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 6eef47de8fcc..09f44c6e2eaf 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -451,6 +451,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, { *pmu = (struct ddr_pmu) { .pmu = (struct pmu) { + .module = THIS_MODULE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, .task_ctx_nr = perf_invalid_context, .attr_groups = attr_groups, @@ -645,6 +646,7 @@ static struct platform_driver imx_ddr_pmu_driver = { .driver = { .name = "imx-ddr-pmu", .of_match_table = imx_ddr_pmu_dt_ids, + .suppress_bind_attrs = true, }, .probe = ddr_perf_probe, .remove = ddr_perf_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index e42d4464c2cf..b79c96b14328 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -381,6 +381,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) ddrc_pmu->sccl_id, ddrc_pmu->index_id); ddrc_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -419,6 +420,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = { .driver = { .name = "hisi_ddrc_pmu", .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_ddrc_pmu_probe, .remove = hisi_ddrc_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index f28063873e11..78865b4ac4a6 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -285,7 +285,7 @@ static struct attribute *hisi_hha_pmu_events_attr[] = { HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), - HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d), + HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), HISI_PMU_EVENT_ATTR(spill_num, 0x20), @@ -392,6 +392,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) hha_pmu->sccl_id, hha_pmu->index_id); hha_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = { .driver = { .name = "hisi_hha_pmu", .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_hha_pmu_probe, .remove = hisi_hha_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 078b8dc57250..9dd50c3bc74e 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -35,7 +35,7 @@ /* L3C has 8-counters */ #define L3C_NR_COUNTERS 0x8 -#define L3C_PERF_CTRL_EN 0x20000 +#define L3C_PERF_CTRL_EN 0x10000 #define L3C_EVTYPE_NONE 0xff /* @@ -382,6 +382,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) l3c_pmu->sccl_id, l3c_pmu->index_id); l3c_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = { .driver = { .name = "hisi_l3c_pmu", .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_l3c_pmu_probe, .remove = hisi_l3c_pmu_remove, diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 21d6991dbe0b..4da37f650f98 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = { .driver = { .name = "qcom-l2cache-pmu", .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = l2_cache_pmu_probe, .remove = l2_cache_pmu_remove, diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 656e830798d9..9ddb577c542b 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = { .driver = { .name = "qcom-l3cache-pmu", .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = qcom_l3_cache_pmu_probe, }; diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index 43d76c85da56..170ccb164c60 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -627,14 +627,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev, list_for_each_entry(rentry, &list, node) { if (resource_type(rentry->res) == IORESOURCE_MEM) { res = *rentry->res; + rentry = NULL; break; } } - - if (!rentry->res) - return NULL; - acpi_dev_free_resource_list(&list); + + if (rentry) { + dev_err(dev, "PMU type %d: Fail to find resource\n", type); + return NULL; + } + base = devm_ioremap_resource(dev, &res); if (IS_ERR(base)) { dev_err(dev, "PMU type %d: Fail to map resource\n", type); @@ -816,6 +819,7 @@ static struct platform_driver tx2_uncore_driver = { .driver = { .name = "tx2-uncore-pmu", .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match), + .suppress_bind_attrs = true, }, .probe = tx2_uncore_probe, .remove = tx2_uncore_remove, diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 7e328d6385c3..50b37f8f5c7f 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1459,17 +1459,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) } #if defined(CONFIG_ACPI) -static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data) -{ - struct resource *res = data; - - if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) - acpi_dev_resource_memory(ares, res); - - /* Always tell the ACPI core to skip this resource */ - return 1; -} - static struct xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, struct acpi_device *adev, u32 type) @@ -1481,6 +1470,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, struct hw_pmu_info *inf; void __iomem *dev_csr; struct resource res; + struct resource_entry *rentry; int enable_bit; int rc; @@ -1489,11 +1479,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, return NULL; INIT_LIST_HEAD(&resource_list); - rc = acpi_dev_get_resources(adev, &resource_list, - acpi_pmu_dev_add_resource, &res); + rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); + if (rc <= 0) { + dev_err(dev, "PMU type %d: No resources found\n", type); + return NULL; + } + + list_for_each_entry(rentry, &resource_list, node) { + if (resource_type(rentry->res) == IORESOURCE_MEM) { + res = *rentry->res; + rentry = NULL; + break; + } + } acpi_dev_free_resource_list(&resource_list); - if (rc < 0) { - dev_err(dev, "PMU type %d: No resource address found\n", type); + + if (rentry) { + dev_err(dev, "PMU type %d: No memory resource found\n", type); return NULL; } @@ -1981,6 +1983,7 @@ static struct platform_driver xgene_pmu_driver = { .name = "xgene-pmu", .of_match_table = xgene_pmu_of_match, .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), + .suppress_bind_attrs = true, }, }; diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 856927382248..e5842e48a5e0 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -545,13 +545,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) struct sun4i_usb_phy_data *data = container_of(work, struct sun4i_usb_phy_data, detect.work); struct phy *phy0 = data->phys[0].phy; - struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); + struct sun4i_usb_phy *phy; bool force_session_end, id_notify = false, vbus_notify = false; int id_det, vbus_det; - if (phy0 == NULL) + if (!phy0) return; + phy = phy_get_drvdata(phy0); id_det = sun4i_usb_phy0_get_id_det(data); vbus_det = sun4i_usb_phy0_get_vbus_det(data); diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c index fe6c58910e4c..7c7862b4f41f 100644 --- a/drivers/phy/broadcom/phy-bcm-sr-usb.c +++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c @@ -16,8 +16,6 @@ enum bcm_usb_phy_version { }; enum bcm_usb_phy_reg { - PLL_NDIV_FRAC, - PLL_NDIV_INT, PLL_CTRL, PHY_CTRL, PHY_PLL_CTRL, @@ -31,18 +29,11 @@ static const u8 bcm_usb_combo_phy_ss[] = { }; static const u8 bcm_usb_combo_phy_hs[] = { - [PLL_NDIV_FRAC] = 0x04, - [PLL_NDIV_INT] = 0x08, [PLL_CTRL] = 0x0c, [PHY_CTRL] = 0x10, }; -#define HSPLL_NDIV_INT_VAL 0x13 -#define HSPLL_NDIV_FRAC_VAL 0x1005 - static const u8 bcm_usb_hs_phy[] = { - [PLL_NDIV_FRAC] = 0x0, - [PLL_NDIV_INT] = 0x4, [PLL_CTRL] = 0x8, [PHY_CTRL] = 0xc, }; @@ -52,7 +43,6 @@ enum pll_ctrl_bits { SSPLL_SUSPEND_EN, PLL_SEQ_START, PLL_LOCK, - PLL_PDIV, }; static const u8 u3pll_ctrl[] = { @@ -66,29 +56,17 @@ static const u8 u3pll_ctrl[] = { #define HSPLL_PDIV_VAL 0x1 static const u8 u2pll_ctrl[] = { - [PLL_PDIV] = 1, [PLL_RESETB] = 5, [PLL_LOCK] = 6, }; enum bcm_usb_phy_ctrl_bits { CORERDY, - AFE_LDO_PWRDWNB, - AFE_PLL_PWRDWNB, - AFE_BG_PWRDWNB, - PHY_ISO, PHY_RESETB, PHY_PCTL, }; #define PHY_PCTL_MASK 0xffff -/* - * 0x0806 of PCTL_VAL has below bits set - * BIT-8 : refclk divider 1 - * BIT-3:2: device mode; mode is not effect - * BIT-1: soft reset active low - */ -#define HSPHY_PCTL_VAL 0x0806 #define SSPHY_PCTL_VAL 0x0006 static const u8 u3phy_ctrl[] = { @@ -98,10 +76,6 @@ static const u8 u3phy_ctrl[] = { static const u8 u2phy_ctrl[] = { [CORERDY] = 0, - [AFE_LDO_PWRDWNB] = 1, - [AFE_PLL_PWRDWNB] = 2, - [AFE_BG_PWRDWNB] = 3, - [PHY_ISO] = 4, [PHY_RESETB] = 5, [PHY_PCTL] = 6, }; @@ -186,38 +160,13 @@ static int bcm_usb_hs_phy_init(struct bcm_usb_phy_cfg *phy_cfg) int ret = 0; void __iomem *regs = phy_cfg->regs; const u8 *offset; - u32 rd_data; offset = phy_cfg->offset; - writel(HSPLL_NDIV_INT_VAL, regs + offset[PLL_NDIV_INT]); - writel(HSPLL_NDIV_FRAC_VAL, regs + offset[PLL_NDIV_FRAC]); - - rd_data = readl(regs + offset[PLL_CTRL]); - rd_data &= ~(HSPLL_PDIV_MASK << u2pll_ctrl[PLL_PDIV]); - rd_data |= (HSPLL_PDIV_VAL << u2pll_ctrl[PLL_PDIV]); - writel(rd_data, regs + offset[PLL_CTRL]); - - /* Set Core Ready high */ - bcm_usb_reg32_setbits(regs + offset[PHY_CTRL], - BIT(u2phy_ctrl[CORERDY])); - - /* Maximum timeout for Core Ready done */ - msleep(30); - + bcm_usb_reg32_clrbits(regs + offset[PLL_CTRL], + BIT(u2pll_ctrl[PLL_RESETB])); bcm_usb_reg32_setbits(regs + offset[PLL_CTRL], BIT(u2pll_ctrl[PLL_RESETB])); - bcm_usb_reg32_setbits(regs + offset[PHY_CTRL], - BIT(u2phy_ctrl[PHY_RESETB])); - - - rd_data = readl(regs + offset[PHY_CTRL]); - rd_data &= ~(PHY_PCTL_MASK << u2phy_ctrl[PHY_PCTL]); - rd_data |= (HSPHY_PCTL_VAL << u2phy_ctrl[PHY_PCTL]); - writel(rd_data, regs + offset[PHY_CTRL]); - - /* Maximum timeout for PLL reset done */ - msleep(30); ret = bcm_usb_pll_lock_check(regs + offset[PLL_CTRL], BIT(u2pll_ctrl[PLL_LOCK])); diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig index 4053ba6cd0fb..6e3e5d67e816 100644 --- a/drivers/phy/marvell/Kconfig +++ b/drivers/phy/marvell/Kconfig @@ -3,8 +3,8 @@ # Phy drivers for Marvell platforms # config ARMADA375_USBCLUSTER_PHY - def_bool y - depends on MACH_ARMADA_375 || COMPILE_TEST + bool "Armada 375 USB cluster PHY support" if COMPILE_TEST + default y if MACH_ARMADA_375 depends on OF && HAS_IOMEM select GENERIC_PHY diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c index 6960dfd8ad8c..0fe408964334 100644 --- a/drivers/phy/marvell/phy-armada38x-comphy.c +++ b/drivers/phy/marvell/phy-armada38x-comphy.c @@ -41,6 +41,7 @@ struct a38x_comphy_lane { struct a38x_comphy { void __iomem *base; + void __iomem *conf; struct device *dev; struct a38x_comphy_lane lane[MAX_A38X_COMPHY]; }; @@ -54,6 +55,21 @@ static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = { { 0, 0, 3 }, }; +static void a38x_set_conf(struct a38x_comphy_lane *lane, bool enable) +{ + struct a38x_comphy *priv = lane->priv; + u32 conf; + + if (priv->conf) { + conf = readl_relaxed(priv->conf); + if (enable) + conf |= BIT(lane->port); + else + conf &= ~BIT(lane->port); + writel(conf, priv->conf); + } +} + static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane, unsigned int offset, u32 mask, u32 value) { @@ -97,6 +113,7 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub) { struct a38x_comphy_lane *lane = phy_get_drvdata(phy); unsigned int gen; + int ret; if (mode != PHY_MODE_ETHERNET) return -EINVAL; @@ -115,13 +132,20 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub) return -EINVAL; } + a38x_set_conf(lane, false); + a38x_comphy_set_speed(lane, gen, gen); - return a38x_comphy_poll(lane, COMPHY_STAT1, - COMPHY_STAT1_PLL_RDY_TX | - COMPHY_STAT1_PLL_RDY_RX, - COMPHY_STAT1_PLL_RDY_TX | - COMPHY_STAT1_PLL_RDY_RX); + ret = a38x_comphy_poll(lane, COMPHY_STAT1, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX); + + if (ret == 0) + a38x_set_conf(lane, true); + + return ret; } static const struct phy_ops a38x_comphy_ops = { @@ -174,14 +198,21 @@ static int a38x_comphy_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); priv->dev = &pdev->dev; priv->base = base; + /* Optional */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf"); + if (res) { + priv->conf = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->conf)) + return PTR_ERR(priv->conf); + } + for_each_available_child_of_node(pdev->dev.of_node, child) { struct phy *phy; int ret; diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c index 1a138be8bd6a..810f25a47632 100644 --- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c @@ -26,7 +26,6 @@ #define COMPHY_SIP_POWER_ON 0x82000001 #define COMPHY_SIP_POWER_OFF 0x82000002 #define COMPHY_SIP_PLL_LOCK 0x82000003 -#define COMPHY_FW_NOT_SUPPORTED (-1) #define COMPHY_FW_MODE_SATA 0x1 #define COMPHY_FW_MODE_SGMII 0x2 @@ -112,10 +111,19 @@ static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane, unsigned long mode) { struct arm_smccc_res res; + s32 ret; arm_smccc_smc(function, lane, mode, 0, 0, 0, 0, 0, &res); + ret = res.a0; - return res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return 0; + case SMCCC_RET_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EINVAL; + } } static int mvebu_a3700_comphy_get_fw_mode(int lane, int port, @@ -220,7 +228,7 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy) } ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param); - if (ret == COMPHY_FW_NOT_SUPPORTED) + if (ret == -EOPNOTSUPP) dev_err(lane->dev, "unsupported SMC call, try updating your firmware\n"); diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c index e3b87c94aaf6..849351b4805f 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c @@ -123,7 +123,6 @@ #define COMPHY_SIP_POWER_ON 0x82000001 #define COMPHY_SIP_POWER_OFF 0x82000002 -#define COMPHY_FW_NOT_SUPPORTED (-1) /* * A lane is described by the following bitfields: @@ -273,10 +272,19 @@ static int mvebu_comphy_smc(unsigned long function, unsigned long phys, unsigned long lane, unsigned long mode) { struct arm_smccc_res res; + s32 ret; arm_smccc_smc(function, phys, lane, mode, 0, 0, 0, 0, &res); + ret = res.a0; - return res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return 0; + case SMCCC_RET_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EINVAL; + } } static int mvebu_comphy_get_mode(bool fw_mode, int lane, int port, @@ -819,7 +827,7 @@ static int mvebu_comphy_power_on(struct phy *phy) if (!ret) return ret; - if (ret == COMPHY_FW_NOT_SUPPORTED) + if (ret == -EOPNOTSUPP) dev_err(priv->dev, "unsupported SMC call, try updating your firmware\n"); diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index 5baf64dfb24d..1bebad36bf2e 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -625,35 +625,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev) generic_phy = devm_phy_create(ddata->dev, NULL, &ops); if (IS_ERR(generic_phy)) { error = PTR_ERR(generic_phy); - return PTR_ERR(generic_phy); + goto out_reg_disable; } phy_set_drvdata(generic_phy, ddata); phy_provider = devm_of_phy_provider_register(ddata->dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) - return PTR_ERR(phy_provider); + if (IS_ERR(phy_provider)) { + error = PTR_ERR(phy_provider); + goto out_reg_disable; + } error = cpcap_usb_init_optional_pins(ddata); if (error) - return error; + goto out_reg_disable; cpcap_usb_init_optional_gpios(ddata); error = cpcap_usb_init_iio(ddata); if (error) - return error; + goto out_reg_disable; error = cpcap_usb_init_interrupts(pdev, ddata); if (error) - return error; + goto out_reg_disable; usb_add_phy_dev(&ddata->phy); atomic_set(&ddata->active, 1); schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1)); return 0; + +out_reg_disable: + regulator_disable(ddata->vusb); + + return error; } static int cpcap_usb_phy_remove(struct platform_device *pdev) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 27dd20a7fe13..5ddbf9a1f328 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -402,8 +402,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6), QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0), @@ -429,7 +429,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0), QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1), @@ -438,7 +437,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), - QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7), }; static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { @@ -446,6 +444,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6), QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2), QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36), + QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a), }; static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { @@ -456,7 +456,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4), - QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4), }; static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { @@ -1107,6 +1106,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .pwrdn_ctrl = SW_PWRDN, }; +static const char * const ipq8074_pciephy_clk_l[] = { + "aux", "cfg_ahb", +}; /* list of resets */ static const char * const ipq8074_pciephy_reset_l[] = { "phy", "common", @@ -1124,8 +1126,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), .pcs_tbl = ipq8074_pcie_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), - .clk_list = NULL, - .num_clks = 0, + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), .vreg_list = NULL, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 335ea5d7ef40..f6b1e6359b8c 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -77,6 +77,8 @@ #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc /* Only for QMP V2 PHY - TX registers */ +#define QSERDES_TX_EMP_POST1_LVL 0x018 +#define QSERDES_TX_SLEW_CNTL 0x040 #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054 #define QSERDES_TX_DEBUG_BUS_SEL 0x064 #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068 diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index bfb22f868857..cfb98bba7715 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -111,6 +111,7 @@ struct rcar_gen3_chan { struct work_struct work; struct mutex lock; /* protects rphys[...].powered */ enum usb_dr_mode dr_mode; + int irq; bool extcon_host; bool is_otg_channel; bool uses_otg_pins; @@ -389,12 +390,38 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) rcar_gen3_device_recognition(ch); } +static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) +{ + struct rcar_gen3_chan *ch = _ch; + void __iomem *usb2_base = ch->base; + u32 status = readl(usb2_base + USB2_OBINTSTA); + irqreturn_t ret = IRQ_NONE; + + if (status & USB2_OBINT_BITS) { + dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); + writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); + rcar_gen3_device_recognition(ch); + ret = IRQ_HANDLED; + } + + return ret; +} + static int rcar_gen3_phy_usb2_init(struct phy *p) { struct rcar_gen3_phy *rphy = phy_get_drvdata(p); struct rcar_gen3_chan *channel = rphy->ch; void __iomem *usb2_base = channel->base; u32 val; + int ret; + + if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) { + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); + ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq, + IRQF_SHARED, dev_name(channel->dev), channel); + if (ret < 0) + dev_err(channel->dev, "No irq handler (%d)\n", channel->irq); + } /* Initialize USB2 part */ val = readl(usb2_base + USB2_INT_ENABLE); @@ -433,6 +460,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) val &= ~USB2_INT_ENABLE_UCOM_INTEN; writel(val, usb2_base + USB2_INT_ENABLE); + if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel)) + free_irq(channel->irq, channel); + return 0; } @@ -503,23 +533,6 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = { .owner = THIS_MODULE, }; -static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) -{ - struct rcar_gen3_chan *ch = _ch; - void __iomem *usb2_base = ch->base; - u32 status = readl(usb2_base + USB2_OBINTSTA); - irqreturn_t ret = IRQ_NONE; - - if (status & USB2_OBINT_BITS) { - dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); - writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); - rcar_gen3_device_recognition(ch); - ret = IRQ_HANDLED; - } - - return ret; -} - static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = { { .compatible = "renesas,usb2-phy-r8a77470", @@ -598,7 +611,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) struct phy_provider *provider; struct resource *res; const struct phy_ops *phy_usb2_ops; - int irq, ret = 0, i; + int ret = 0, i; if (!dev->of_node) { dev_err(dev, "This driver needs device tree\n"); @@ -614,16 +627,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) if (IS_ERR(channel->base)) return PTR_ERR(channel->base); - /* call request_irq for OTG */ - irq = platform_get_irq_optional(pdev, 0); - if (irq >= 0) { - INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); - irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, - IRQF_SHARED, dev_name(dev), channel); - if (irq < 0) - dev_err(dev, "No irq handler (%d)\n", irq); - } - + /* get irq number here and request_irq for OTG in phy_init */ + channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { int ret; @@ -649,8 +654,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) */ pm_runtime_enable(dev); phy_usb2_ops = of_device_get_match_data(dev); - if (!phy_usb2_ops) - return -EINVAL; + if (!phy_usb2_ops) { + ret = -EINVAL; + goto error; + } mutex_init(&channel->lock); for (i = 0; i < NUM_OF_PHYS; i++) { diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c index 2dc19ddd120f..a005fc58bbf0 100644 --- a/drivers/phy/rockchip/phy-rockchip-emmc.c +++ b/drivers/phy/rockchip/phy-rockchip-emmc.c @@ -240,15 +240,17 @@ static int rockchip_emmc_phy_init(struct phy *phy) * - SDHCI driver to get the PHY * - SDHCI driver to init the PHY * - * The clock is optional, so upon any error we just set to NULL. + * The clock is optional, using clk_get_optional() to get the clock + * and do error processing if the return value != NULL * * NOTE: we don't do anything special for EPROBE_DEFER here. Given the * above expected use case, EPROBE_DEFER isn't sensible to expect, so * it's just like any other error. */ - rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk"); + rk_phy->emmcclk = clk_get_optional(&phy->dev, "emmcclk"); if (IS_ERR(rk_phy->emmcclk)) { - dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret); + ret = PTR_ERR(rk_phy->emmcclk); + dev_err(&phy->dev, "Error getting emmcclk: %d\n", ret); rk_phy->emmcclk = NULL; } diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index e510732afb8b..7f6279fb4f8f 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -714,7 +714,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy) struct phy_usb_instance *inst = phy_get_drvdata(phy); struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); - return exynos5420_usbdrd_phy_calibrate(phy_drd); + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) + return exynos5420_usbdrd_phy_calibrate(phy_drd); + return 0; } static const struct phy_ops exynos5_usbdrd_phy_ops = { diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c index 56a5083fe6f9..32be62e49804 100644 --- a/drivers/phy/samsung/phy-s5pv210-usb2.c +++ b/drivers/phy/samsung/phy-s5pv210-usb2.c @@ -139,6 +139,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) udelay(10); rst &= ~rstbits; writel(rst, drv->reg_phy + S5PV210_UPHYRST); + /* The following delay is necessary for the reset sequence to be + * completed + */ + udelay(80); } else { pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); pwr |= phypwr; diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c index ec231e40ef2a..a7577e316baf 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3ss.c +++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c @@ -314,6 +314,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = { .compatible = "socionext,uniphier-pro4-usb3-ssphy", .data = &uniphier_pro4_data, }, + { + .compatible = "socionext,uniphier-pro5-usb3-ssphy", + .data = &uniphier_pro4_data, + }, { .compatible = "socionext,uniphier-pxs2-usb3-ssphy", .data = &uniphier_pxs2_data, diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 2ea8497af82a..bf5d80b97597 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -949,6 +949,7 @@ power_down: reset: reset_control_assert(padctl->rst); remove: + platform_set_drvdata(pdev, NULL); soc->ops->remove(padctl); return err; } diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 88a047b9fa6f..6ef12017ff4e 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -625,8 +625,10 @@ static int serdes_am654_probe(struct platform_device *pdev) pm_runtime_enable(dev); phy = devm_phy_create(dev, NULL, &ops); - if (IS_ERR(phy)) - return PTR_ERR(phy); + if (IS_ERR(phy)) { + ret = PTR_ERR(phy); + goto clk_err; + } phy_set_drvdata(phy, am654_phy); phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate); diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c index 9887f908f540..812e5409d359 100644 --- a/drivers/phy/ti/phy-twl4030-usb.c +++ b/drivers/phy/ti/phy-twl4030-usb.c @@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev) usb_remove_phy(&twl->phy); pm_runtime_get_sync(twl->dev); - cancel_delayed_work(&twl->id_workaround_work); + cancel_delayed_work_sync(&twl->id_workaround_work); device_remove_file(twl->dev, &dev_attr_vbus); /* set transceiver mode to power on defaults */ diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index bb07024d22ed..0a745769e712 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -334,7 +334,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24); #define D22 40 SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8)); -SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8)); +SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8)); PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8); GROUP_DECL(PWM8G0, D22); diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index 54933665b5f8..22aca6d182c0 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -277,13 +277,76 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr) { /* - * The signal type is GPIO if the signal name has "GPIO" as a prefix. - * strncmp (rather than strcmp) is used to implement the prefix - * requirement. + * We need to differentiate between GPIO and non-GPIO signals to + * implement the gpio_request_enable() interface. For better or worse + * the ASPEED pinctrl driver uses the expression names to determine + * whether an expression will mux a pin for GPIO. * - * expr->signal might look like "GPIOT3" in the GPIO case. + * Generally we have the following - A GPIO such as B1 has: + * + * - expr->signal set to "GPIOB1" + * - expr->function set to "GPIOB1" + * + * Using this fact we can determine whether the provided expression is + * a GPIO expression by testing the signal name for the string prefix + * "GPIO". + * + * However, some GPIOs are input-only, and the ASPEED datasheets name + * them differently. An input-only GPIO such as T0 has: + * + * - expr->signal set to "GPIT0" + * - expr->function set to "GPIT0" + * + * It's tempting to generalise the prefix test from "GPIO" to "GPI" to + * account for both GPIOs and GPIs, but in doing so we run aground on + * another feature: + * + * Some pins in the ASPEED BMC SoCs have a "pass-through" GPIO + * function where the input state of one pin is replicated as the + * output state of another (as if they were shorted together - a mux + * configuration that is typically enabled by hardware strapping). + * This feature allows the BMC to pass e.g. power button state through + * to the host while the BMC is yet to boot, but take control of the + * button state once the BMC has booted by muxing each pin as a + * separate, pin-specific GPIO. + * + * Conceptually this pass-through mode is a form of GPIO and is named + * as such in the datasheets, e.g. "GPID0". This naming similarity + * trips us up with the simple GPI-prefixed-signal-name scheme + * discussed above, as the pass-through configuration is not what we + * want when muxing a pin as GPIO for the GPIO subsystem. + * + * On e.g. the AST2400, a pass-through function "GPID0" is grouped on + * balls A18 and D16, where we have: + * + * For ball A18: + * - expr->signal set to "GPID0IN" + * - expr->function set to "GPID0" + * + * For ball D16: + * - expr->signal set to "GPID0OUT" + * - expr->function set to "GPID0" + * + * By contrast, the pin-specific GPIO expressions for the same pins are + * as follows: + * + * For ball A18: + * - expr->signal looks like "GPIOD0" + * - expr->function looks like "GPIOD0" + * + * For ball D16: + * - expr->signal looks like "GPIOD1" + * - expr->function looks like "GPIOD1" + * + * Testing both the signal _and_ function names gives us the means + * differentiate the pass-through GPIO pinmux configuration from the + * pin-specific configuration that the GPIO subsystem is after: An + * expression is a pin-specific (non-pass-through) GPIO configuration + * if the signal prefix is "GPI" and the signal name matches the + * function name. */ - return strncmp(expr->signal, "GPIO", 4) == 0; + return !strncmp(expr->signal, "GPI", 3) && + !strcmp(expr->signal, expr->function); } static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs) diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h index 140c5ce9fbc1..0aaa20653536 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.h +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h @@ -452,10 +452,11 @@ struct aspeed_sig_desc { * evaluation of the descriptors. * * @signal: The signal name for the priority level on the pin. If the signal - * type is GPIO, then the signal name must begin with the string - * "GPIO", e.g. GPIOA0, GPIOT4 etc. + * type is GPIO, then the signal name must begin with the + * prefix "GPI", e.g. GPIOA0, GPIT0 etc. * @function: The name of the function the signal participates in for the - * associated expression + * associated expression. For pin-specific GPIO, the function + * name must match the signal name. * @ndescs: The number of signal descriptors in the expression * @descs: Pointer to an array of signal descriptors that comprise the * function expression diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index dcf7df797af7..0ed14de0134c 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -23,6 +23,7 @@ config PINCTRL_BCM2835 select PINMUX select PINCONF select GENERIC_PINCONF + select GPIOLIB select GPIOLIB_IRQCHIP default ARCH_BCM2835 || ARCH_BRCMSTB help diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 9f42036c5fbb..1f81569c7ae3 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -774,16 +774,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, return 0; } -/* - * imx_free_resources() - free memory used by this driver - * @info: info driver instance - */ -static void imx_free_resources(struct imx_pinctrl *ipctl) -{ - if (ipctl->pctl) - pinctrl_unregister(ipctl->pctl); -} - int imx_pinctrl_probe(struct platform_device *pdev, const struct imx_pinctrl_soc_info *info) { @@ -874,23 +864,18 @@ int imx_pinctrl_probe(struct platform_device *pdev, &ipctl->pctl); if (ret) { dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); - goto free; + return ret; } ret = imx_pinctrl_probe_dt(pdev, ipctl); if (ret) { dev_err(&pdev->dev, "fail to probe dt properties\n"); - goto free; + return ret; } dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); return pinctrl_enable(ipctl->pctl); - -free: - imx_free_resources(ipctl); - - return ret; } static int __maybe_unused imx_pinctrl_suspend(struct device *dev) diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 7e29e3fecdb2..5bb183c0ce31 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -638,7 +638,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); if (ret) { - pinctrl_unregister(ipctl->pctl); dev_err(&pdev->dev, "Failed to populate subdevices\n"); return ret; } diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 606fe216f902..d05f20ca90d7 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -811,6 +811,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev, pm_runtime_put(&vg->pdev->dev); } +static void byt_gpio_direct_irq_check(struct byt_gpio *vg, + unsigned int offset) +{ + void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); + + /* + * Before making any direction modifications, do a check if gpio is set + * for direct IRQ. On Bay Trail, setting GPIO to output does not make + * sense, so let's at least inform the caller before they shoot + * themselves in the foot. + */ + if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) + dev_info_once(&vg->pdev->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); +} + static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, struct pinctrl_gpio_range *range, unsigned int offset, @@ -818,7 +833,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, { struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev); void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); - void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); unsigned long flags; u32 value; @@ -829,14 +843,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, if (input) value |= BYT_OUTPUT_EN; else - /* - * Before making any direction modifications, do a check if gpio - * is set for direct IRQ. On baytrail, setting GPIO to output - * does not make sense, so let's at least warn the caller before - * they shoot themselves in the foot. - */ - WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN, - "Potential Error: Setting GPIO with direct_irq_en to output"); + byt_gpio_direct_irq_check(vg, offset); + writel(value, val_reg); raw_spin_unlock_irqrestore(&byt_lock, flags); @@ -1052,7 +1060,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, break; case PIN_CONFIG_INPUT_DEBOUNCE: debounce = readl(db_reg); - debounce &= ~BYT_DEBOUNCE_PULSE_MASK; if (arg) conf |= BYT_DEBOUNCE_EN; @@ -1061,24 +1068,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, switch (arg) { case 375: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_375US; break; case 750: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_750US; break; case 1500: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_1500US; break; case 3000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_3MS; break; case 6000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_6MS; break; case 12000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_12MS; break; case 24000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_24MS; break; default: @@ -1176,19 +1190,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { - return pinctrl_gpio_direction_input(chip->base + offset); + struct byt_gpio *vg = gpiochip_get_data(chip); + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + unsigned long flags; + u32 reg; + + raw_spin_lock_irqsave(&byt_lock, flags); + + reg = readl(val_reg); + reg &= ~BYT_DIR_MASK; + reg |= BYT_OUTPUT_EN; + writel(reg, val_reg); + + raw_spin_unlock_irqrestore(&byt_lock, flags); + return 0; } +/* + * Note despite the temptation this MUST NOT be converted into a call to + * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this + * MUST be done as a single BYT_VAL_REG register write. + * See the commit message of the commit adding this comment for details. + */ static int byt_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { - int ret = pinctrl_gpio_direction_output(chip->base + offset); + struct byt_gpio *vg = gpiochip_get_data(chip); + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + unsigned long flags; + u32 reg; - if (ret) - return ret; + raw_spin_lock_irqsave(&byt_lock, flags); - byt_gpio_set(chip, offset, value); + byt_gpio_direct_irq_check(vg, offset); + reg = readl(val_reg); + reg &= ~BYT_DIR_MASK; + if (value) + reg |= BYT_LEVEL; + else + reg &= ~BYT_LEVEL; + + writel(reg, val_reg); + + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -1297,6 +1342,7 @@ static const struct gpio_chip byt_gpio_chip = { .direction_output = byt_gpio_direction_output, .get = byt_gpio_get, .set = byt_gpio_set, + .set_config = gpiochip_generic_config, .dbg_show = byt_gpio_dbg_show, }; diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 2c419fa5d1c1..8f06445a8e39 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1474,11 +1474,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) struct chv_pinctrl *pctrl = gpiochip_get_data(gc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long pending; + unsigned long flags; u32 intr_line; chained_irq_enter(chip, desc); + raw_spin_lock_irqsave(&chv_lock, flags); pending = readl(pctrl->regs + CHV_INTSTAT); + raw_spin_unlock_irqrestore(&chv_lock, flags); + for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) { unsigned irq, offset; diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 83981ad66a71..4e89bbf6b76a 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -662,6 +662,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, value |= PADCFG1_TERM_UP; + /* Set default strength value in case none is given */ + if (arg == 1) + arg = 5000; + switch (arg) { case 20000: value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT; @@ -684,6 +688,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, case PIN_CONFIG_BIAS_PULL_DOWN: value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK); + /* Set default strength value in case none is given */ + if (arg == 1) + arg = 5000; + switch (arg) { case 20000: value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT; diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c index 7fdf4257df1e..ad4b446d588e 100644 --- a/drivers/pinctrl/intel/pinctrl-lewisburg.c +++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c @@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = { static const struct intel_community lbg_communities[] = { LBG_COMMUNITY(0, 0, 71), LBG_COMMUNITY(1, 72, 132), - LBG_COMMUNITY(3, 133, 144), - LBG_COMMUNITY(4, 145, 180), - LBG_COMMUNITY(5, 181, 246), + LBG_COMMUNITY(3, 133, 143), + LBG_COMMUNITY(4, 144, 178), + LBG_COMMUNITY(5, 179, 246), }; static const struct intel_pinctrl_soc_data lbg_soc_data = { diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index 04ca8ae95df8..9e91d83b0138 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -741,6 +741,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; bits |= BUFCFG_PU_EN; + /* Set default strength value in case none is given */ + if (arg == 1) + arg = 20000; + switch (arg) { case 50000: bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; @@ -761,6 +765,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; bits |= BUFCFG_PD_EN; + /* Set default strength value in case none is given */ + if (arg == 1) + arg = 20000; + switch (arg) { case 50000: bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index d936e7aa74c4..7b7736abe9d8 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -15,17 +15,18 @@ #include "pinctrl-intel.h" -#define SPT_PAD_OWN 0x020 -#define SPT_PADCFGLOCK 0x0a0 -#define SPT_HOSTSW_OWN 0x0d0 -#define SPT_GPI_IS 0x100 -#define SPT_GPI_IE 0x120 +#define SPT_PAD_OWN 0x020 +#define SPT_H_PADCFGLOCK 0x090 +#define SPT_LP_PADCFGLOCK 0x0a0 +#define SPT_HOSTSW_OWN 0x0d0 +#define SPT_GPI_IS 0x100 +#define SPT_GPI_IE 0x120 #define SPT_COMMUNITY(b, s, e) \ { \ .barno = (b), \ .padown_offset = SPT_PAD_OWN, \ - .padcfglock_offset = SPT_PADCFGLOCK, \ + .padcfglock_offset = SPT_LP_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ @@ -47,7 +48,7 @@ { \ .barno = (b), \ .padown_offset = SPT_PAD_OWN, \ - .padcfglock_offset = SPT_PADCFGLOCK, \ + .padcfglock_offset = SPT_H_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c index a767a05fa3a0..48e2a6c56a83 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c @@ -414,7 +414,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = { MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)), MPP_MODE(15, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS), - MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)), + MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)), MPP_MODE(16, MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS), MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)), diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index eab078244a4c..a85c679b6276 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -153,7 +153,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); pin_reg &= ~BIT(DB_TMR_LARGE_OFF); } else if (debounce < 250000) { - time = debounce / 15600; + time = debounce / 15625; pin_reg |= time & DB_TMR_OUT_MASK; pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF); pin_reg |= BIT(DB_TMR_LARGE_OFF); @@ -163,14 +163,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); pin_reg |= BIT(DB_TMR_LARGE_OFF); } else { - pin_reg &= ~DB_CNTRl_MASK; + pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); ret = -EINVAL; } } else { pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF); pin_reg &= ~BIT(DB_TMR_LARGE_OFF); pin_reg &= ~DB_TMR_OUT_MASK; - pin_reg &= ~DB_CNTRl_MASK; + pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); } writel(pin_reg, gpio_dev->base + offset * 4); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); @@ -435,7 +435,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -443,7 +442,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -451,7 +449,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF; - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -459,8 +456,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; - pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); - pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_level_irq); break; @@ -468,8 +463,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; - pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); - pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_level_irq); break; diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 3e5760f1a715..d4a192df5fab 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = { { .name = "uart0", .pins = uart0_pins, - .npins = 9, + .npins = 5, }, { .name = "uart1", diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c index 62c02b969327..7521a924dffb 100644 --- a/drivers/pinctrl/pinctrl-falcon.c +++ b/drivers/pinctrl/pinctrl-falcon.c @@ -431,24 +431,28 @@ static int pinctrl_falcon_probe(struct platform_device *pdev) /* load and remap the pad resources of the different banks */ for_each_compatible_node(np, NULL, "lantiq,pad-falcon") { - struct platform_device *ppdev = of_find_device_by_node(np); const __be32 *bank = of_get_property(np, "lantiq,bank", NULL); struct resource res; + struct platform_device *ppdev; u32 avail; int pins; if (!of_device_is_available(np)) continue; - if (!ppdev) { - dev_err(&pdev->dev, "failed to find pad pdev\n"); - continue; - } if (!bank || *bank >= PORTS) continue; if (of_address_to_resource(np, 0, &res)) continue; + + ppdev = of_find_device_by_node(np); + if (!ppdev) { + dev_err(&pdev->dev, "failed to find pad pdev\n"); + continue; + } + falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL); + put_device(&ppdev->dev); if (IS_ERR(falcon_info.clk[*bank])) { dev_err(&ppdev->dev, "failed to get clock\n"); of_node_put(np); diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 6e2683016c1f..61e7d938d4c5 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -1378,7 +1378,7 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc, static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc, u8 offset, int value) { - if (jzgc->jzpc->version >= ID_JZ4760) + if (jzgc->jzpc->version >= ID_JZ4770) ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value); @@ -1389,7 +1389,7 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc, { u8 reg1, reg2; - if (jzgc->jzpc->version >= ID_JZ4760) { + if (jzgc->jzpc->version >= ID_JZ4770) { reg1 = JZ4760_GPIO_PAT1; reg2 = JZ4760_GPIO_PAT0; } else { @@ -1464,7 +1464,7 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd) struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); int irq = irqd->hwirq; - if (jzgc->jzpc->version >= ID_JZ4760) + if (jzgc->jzpc->version >= ID_JZ4770) ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true); @@ -1480,7 +1480,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd) ingenic_gpio_irq_mask(irqd); - if (jzgc->jzpc->version >= ID_JZ4760) + if (jzgc->jzpc->version >= ID_JZ4770) ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false); @@ -1500,12 +1500,12 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd) */ high = ingenic_gpio_get_value(jzgc, irq); if (high) - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING); + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW); else - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING); + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH); } - if (jzgc->jzpc->version >= ID_JZ4760) + if (jzgc->jzpc->version >= ID_JZ4770) ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true); @@ -1538,7 +1538,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) */ bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq); - type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING; + type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH; } irq_set_type(jzgc, irqd->hwirq, type); @@ -1562,7 +1562,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc) chained_irq_enter(irq_chip, desc); - if (jzgc->jzpc->version >= ID_JZ4760) + if (jzgc->jzpc->version >= ID_JZ4770) flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG); else flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG); @@ -1643,8 +1643,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) struct ingenic_pinctrl *jzpc = jzgc->jzpc; unsigned int pin = gc->base + offset; - if (jzpc->version >= ID_JZ4760) - return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1); + if (jzpc->version >= ID_JZ4770) + return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) || + ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1); if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT)) return true; @@ -1675,7 +1676,7 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc, ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2); ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1); ingenic_shadow_config_pin_load(jzpc, pin); - } else if (jzpc->version >= ID_JZ4760) { + } else if (jzpc->version >= ID_JZ4770) { ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false); ingenic_config_pin(jzpc, pin, GPIO_MSK, false); ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2); @@ -1683,7 +1684,7 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc, } else { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2); - ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0); + ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1); } return 0; @@ -1733,7 +1734,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true); ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input); ingenic_shadow_config_pin_load(jzpc, pin); - } else if (jzpc->version >= ID_JZ4760) { + } else if (jzpc->version >= ID_JZ4770) { ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false); ingenic_config_pin(jzpc, pin, GPIO_MSK, true); ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input); @@ -1763,7 +1764,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, unsigned int offt = pin / PINS_PER_GPIO_CHIP; bool pull; - if (jzpc->version >= ID_JZ4760) + if (jzpc->version >= ID_JZ4770) pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN); else pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS); @@ -1795,7 +1796,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, unsigned int pin, bool enabled) { - if (jzpc->version >= ID_JZ4760) + if (jzpc->version >= ID_JZ4770) ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !enabled); else ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled); diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 3a235487e38d..d8bcbefcba89 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -122,7 +122,7 @@ static const struct regmap_config mcp23x08_regmap = { .max_register = MCP_OLAT, }; -static const struct reg_default mcp23x16_defaults[] = { +static const struct reg_default mcp23x17_defaults[] = { {.reg = MCP_IODIR << 1, .def = 0xffff}, {.reg = MCP_IPOL << 1, .def = 0x0000}, {.reg = MCP_GPINTEN << 1, .def = 0x0000}, @@ -133,23 +133,23 @@ static const struct reg_default mcp23x16_defaults[] = { {.reg = MCP_OLAT << 1, .def = 0x0000}, }; -static const struct regmap_range mcp23x16_volatile_range = { +static const struct regmap_range mcp23x17_volatile_range = { .range_min = MCP_INTF << 1, .range_max = MCP_GPIO << 1, }; -static const struct regmap_access_table mcp23x16_volatile_table = { - .yes_ranges = &mcp23x16_volatile_range, +static const struct regmap_access_table mcp23x17_volatile_table = { + .yes_ranges = &mcp23x17_volatile_range, .n_yes_ranges = 1, }; -static const struct regmap_range mcp23x16_precious_range = { - .range_min = MCP_GPIO << 1, +static const struct regmap_range mcp23x17_precious_range = { + .range_min = MCP_INTCAP << 1, .range_max = MCP_GPIO << 1, }; -static const struct regmap_access_table mcp23x16_precious_table = { - .yes_ranges = &mcp23x16_precious_range, +static const struct regmap_access_table mcp23x17_precious_table = { + .yes_ranges = &mcp23x17_precious_range, .n_yes_ranges = 1, }; @@ -159,10 +159,10 @@ static const struct regmap_config mcp23x17_regmap = { .reg_stride = 2, .max_register = MCP_OLAT << 1, - .volatile_table = &mcp23x16_volatile_table, - .precious_table = &mcp23x16_precious_table, - .reg_defaults = mcp23x16_defaults, - .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults), + .volatile_table = &mcp23x17_volatile_table, + .precious_table = &mcp23x17_precious_table, + .reg_defaults = mcp23x17_defaults, + .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults), .cache_type = REGCACHE_FLAT, .val_format_endian = REGMAP_ENDIAN_LITTLE, }; diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index fb76fb2e9ea5..0a951a75c82b 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -711,11 +711,12 @@ static void ocelot_irq_handler(struct irq_desc *desc) struct irq_chip *parent_chip = irq_desc_get_chip(desc); struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct ocelot_pinctrl *info = gpiochip_get_data(chip); + unsigned int id_reg = OCELOT_GPIO_INTR_IDENT * info->stride; unsigned int reg = 0, irq, i; unsigned long irqs; for (i = 0; i < info->stride; i++) { - regmap_read(info->map, OCELOT_GPIO_INTR_IDENT + 4 * i, ®); + regmap_read(info->map, id_reg + 4 * i, ®); if (!reg) continue; diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index dc0bbf198cbc..59fe3204e965 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -506,8 +506,8 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, } map_num += grp->npins; - new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map), - GFP_KERNEL); + + new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL); if (!new_map) return -ENOMEM; @@ -517,7 +517,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, /* create mux map */ parent = of_get_parent(np); if (!parent) { - devm_kfree(pctldev->dev, new_map); + kfree(new_map); return -EINVAL; } new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; @@ -544,6 +544,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, static void rockchip_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { + kfree(map); } static const struct pinctrl_ops rockchip_pctrl_ops = { @@ -2810,7 +2811,9 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset) if (!bank->domain) return -ENXIO; + clk_enable(bank->clk); virq = irq_create_mapping(bank->domain, offset); + clk_disable(bank->clk); return (virq) ? : -ENXIO; } @@ -3383,12 +3386,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev) static int __maybe_unused rockchip_pinctrl_resume(struct device *dev) { struct rockchip_pinctrl *info = dev_get_drvdata(dev); - int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, - rk3288_grf_gpio6c_iomux | - GPIO6C6_SEL_WRITE_ENABLE); + int ret; - if (ret) - return ret; + if (info->ctrl->type == RK3288) { + ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, + rk3288_grf_gpio6c_iomux | + GPIO6C6_SEL_WRITE_ENABLE); + if (ret) + return ret; + } return pinctrl_force_default(info->pctl_dev); } diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c index 017fc6b3e27e..ca9da61cfc4e 100644 --- a/drivers/pinctrl/pinctrl-rza1.c +++ b/drivers/pinctrl/pinctrl-rza1.c @@ -418,7 +418,7 @@ static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = { }; static const struct rza1_swio_entry rza1l_swio_entries[] = { - [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins }, + [0] = { ARRAY_SIZE(rza1l_swio_pins), rza1l_swio_pins }, }; /* RZ/A1L (r7s72102x) pinmux flags table */ diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 1e0614daee9b..a9d511982780 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -916,7 +916,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, /* If pinconf isn't supported, don't parse properties in below. */ if (!PCS_HAS_PINCONF) - return 0; + return -ENOTSUPP; /* cacluate how much properties are supported in current node */ for (i = 0; i < ARRAY_SIZE(prop2); i++) { @@ -928,7 +928,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, nconfs++; } if (!nconfs) - return 0; + return -ENOTSUPP; func->conf = devm_kcalloc(pcs->dev, nconfs, sizeof(struct pcs_conf_vals), @@ -1056,9 +1056,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, if (PCS_HAS_PINCONF && function) { res = pcs_parse_pinconf(pcs, np, function, map); - if (res) + if (res == 0) + *num_maps = 2; + else if (res == -ENOTSUPP) + *num_maps = 1; + else goto free_pingroups; - *num_maps = 2; } else { *num_maps = 1; } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 763da0be10d6..44320322037d 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -688,7 +688,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl, pol = msm_readl_intr_cfg(pctrl, g); pol ^= BIT(g->intr_polarity_bit); - msm_writel_intr_cfg(val, pctrl, g); + msm_writel_intr_cfg(pol, pctrl, g); val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit); intstat = msm_readl_intr_status(pctrl, g); diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index f1fece5b9c06..3769ad08eadf 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -170,6 +170,7 @@ struct pmic_gpio_state { struct regmap *map; struct pinctrl_dev *ctrl; struct gpio_chip chip; + struct irq_chip irq; }; static const struct pinconf_generic_params pmic_gpio_bindings[] = { @@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, return 0; } -static struct irq_chip pmic_gpio_irq_chip = { - .name = "spmi-gpio", - .irq_ack = irq_chip_ack_parent, - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_set_type = irq_chip_set_type_parent, - .irq_set_wake = irq_chip_set_wake_parent, - .flags = IRQCHIP_MASK_ON_SUSPEND, -}; - static int pmic_gpio_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, unsigned long *hwirq, @@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev) if (!parent_domain) return -ENXIO; + state->irq.name = "spmi-gpio", + state->irq.irq_ack = irq_chip_ack_parent, + state->irq.irq_mask = irq_chip_mask_parent, + state->irq.irq_unmask = irq_chip_unmask_parent, + state->irq.irq_set_type = irq_chip_set_type_parent, + state->irq.irq_set_wake = irq_chip_set_wake_parent, + state->irq.flags = IRQCHIP_MASK_ON_SUSPEND, + girq = &state->chip.irq; - girq->chip = &pmic_gpio_irq_chip; + girq->chip = &state->irq; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; girq->fwnode = of_node_to_fwnode(state->dev->of_node); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 0599f5127b01..84501c785473 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -40,6 +40,8 @@ struct exynos_irq_chip { u32 eint_pend; u32 eint_wake_mask_value; u32 eint_wake_mask_reg; + void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata, + struct exynos_irq_chip *irq_chip); }; static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip) @@ -265,6 +267,7 @@ struct exynos_eint_gpio_save { u32 eint_con; u32 eint_fltcon0; u32 eint_fltcon1; + u32 eint_mask; }; /* @@ -342,6 +345,47 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) return 0; } +static void +exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, + struct exynos_irq_chip *irq_chip) +{ + struct regmap *pmu_regs; + + if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { + dev_warn(drvdata->dev, + "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); + return; + } + + pmu_regs = drvdata->retention_ctrl->priv; + dev_info(drvdata->dev, + "Setting external wakeup interrupt mask: 0x%x\n", + irq_chip->eint_wake_mask_value); + + regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg, + irq_chip->eint_wake_mask_value); +} + +static void +s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, + struct exynos_irq_chip *irq_chip) + +{ + void __iomem *clk_base; + + if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { + dev_warn(drvdata->dev, + "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); + return; + } + + + clk_base = (void __iomem *) drvdata->retention_ctrl->priv; + + __raw_writel(irq_chip->eint_wake_mask_value, + clk_base + irq_chip->eint_wake_mask_reg); +} + /* * irq_chip for wakeup interrupts */ @@ -360,8 +404,9 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = { .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, - /* Only difference with exynos4210_wkup_irq_chip: */ + /* Only differences with exynos4210_wkup_irq_chip: */ .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK, + .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask, }; static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = { @@ -380,6 +425,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = { .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK, + .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, }; static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = { @@ -398,6 +444,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = { .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET, .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK, + .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, }; /* list of external wakeup controllers supported */ @@ -574,27 +621,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return 0; } -static void -exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, - struct exynos_irq_chip *irq_chip) -{ - struct regmap *pmu_regs; - - if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { - dev_warn(drvdata->dev, - "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); - return; - } - - pmu_regs = drvdata->retention_ctrl->priv; - dev_info(drvdata->dev, - "Setting external wakeup interrupt mask: 0x%x\n", - irq_chip->eint_wake_mask_value); - - regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg, - irq_chip->eint_wake_mask_value); -} - static void exynos_pinctrl_suspend_bank( struct samsung_pinctrl_drv_data *drvdata, struct samsung_pin_bank *bank) @@ -608,10 +634,13 @@ static void exynos_pinctrl_suspend_bank( + 2 * bank->eint_offset); save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4); + save->eint_mask = readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset); pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); + pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); } void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) @@ -626,8 +655,8 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) else if (bank->eint_type == EINT_TYPE_WKUP) { if (!irq_chip) { irq_chip = bank->irq_chip; - exynos_pinctrl_set_eint_wakeup_mask(drvdata, - irq_chip); + irq_chip->set_eint_wakeup_mask(drvdata, + irq_chip); } else if (bank->irq_chip != irq_chip) { dev_warn(drvdata->dev, "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n", @@ -653,6 +682,9 @@ static void exynos_pinctrl_resume_bank( pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4), save->eint_fltcon1); + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset), save->eint_mask); writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + bank->eint_offset); @@ -660,6 +692,8 @@ static void exynos_pinctrl_resume_bank( + 2 * bank->eint_offset); writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4); + writel(save->eint_mask, regs + bank->irq_chip->eint_mask + + bank->eint_offset); } void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 0cbca30b75dc..77783582080c 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1130,20 +1130,22 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc) if (bank == pctl->desc->irq_banks) return; + chained_irq_enter(chip, desc); + reg = sunxi_irq_status_reg_from_bank(pctl->desc, bank); val = readl(pctl->membase + reg); if (val) { int irqoffset; - chained_irq_enter(chip, desc); for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) { int pin_irq = irq_find_mapping(pctl->domain, bank * IRQ_PER_BANK + irqoffset); generic_handle_irq(pin_irq); } - chained_irq_exit(chip, desc); } + + chained_irq_exit(chip, desc); } static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl, diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index e9a7cbb9aa33..01bcef2c01bc 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -685,8 +685,8 @@ static int tegra_pinctrl_resume(struct device *dev) } const struct dev_pm_ops tegra_pinctrl_pm = { - .suspend = &tegra_pinctrl_suspend, - .resume = &tegra_pinctrl_resume + .suspend_noirq = &tegra_pinctrl_suspend, + .resume_noirq = &tegra_pinctrl_resume }; static bool gpio_node_has_range(const char *compatible) diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index 25ca2c894b4d..ab0662a33b41 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -645,8 +645,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device) /* Register croc_ec_dev mfd */ rv = cros_ec_dev_init(client_data); - if (rv) + if (rv) { + down_write(&init_lock); goto end_cros_ec_dev_init_error; + } return 0; diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c index 0c3738c3244d..aa2059b33ba5 100644 --- a/drivers/platform/chrome/cros_ec_rpmsg.c +++ b/drivers/platform/chrome/cros_ec_rpmsg.c @@ -42,6 +42,8 @@ struct cros_ec_rpmsg { struct completion xfer_ack; struct work_struct host_event_work; struct rpmsg_endpoint *ept; + bool has_pending_host_event; + bool probe_done; }; /** @@ -186,7 +188,14 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data, memcpy(ec_dev->din, resp->data, len); complete(&ec_rpmsg->xfer_ack); } else if (resp->type == HOST_EVENT_MARK) { - schedule_work(&ec_rpmsg->host_event_work); + /* + * If the host event is sent before cros_ec_register is + * finished, queue the host event. + */ + if (ec_rpmsg->probe_done) + schedule_work(&ec_rpmsg->host_event_work); + else + ec_rpmsg->has_pending_host_event = true; } else { dev_warn(ec_dev->dev, "rpmsg received invalid type = %d", resp->type); @@ -249,6 +258,11 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev) return ret; } + ec_rpmsg->probe_done = true; + + if (ec_rpmsg->has_pending_host_event) + schedule_work(&ec_rpmsg->host_event_work); + return 0; } diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index a831bd5a5b2f..5e4521b01428 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -739,7 +739,6 @@ static int cros_ec_spi_probe(struct spi_device *spi) int err; spi->bits_per_word = 8; - spi->mode = SPI_MODE_0; spi->rt = true; err = spi_setup(spi); if (err < 0) diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index 190e4a6186ef..2db7113383fd 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c @@ -426,11 +426,8 @@ static int olpc_ec_probe(struct platform_device *pdev) /* get the EC revision */ err = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ec->version, 1); - if (err) { - ec_priv = NULL; - kfree(ec); - return err; - } + if (err) + goto error; config.dev = pdev->dev.parent; config.driver_data = ec; @@ -439,11 +436,17 @@ static int olpc_ec_probe(struct platform_device *pdev) &config); if (IS_ERR(ec->dcon_rdev)) { dev_err(&pdev->dev, "failed to register DCON regulator\n"); - return PTR_ERR(ec->dcon_rdev); + err = PTR_ERR(ec->dcon_rdev); + goto error; } ec->dbgfs_dir = olpc_ec_setup_debugfs(); + return 0; + +error: + ec_priv = NULL; + kfree(ec); return err; } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 1cab99320514..000d5693fae7 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -269,6 +269,7 @@ config FUJITSU_LAPTOP depends on BACKLIGHT_CLASS_DEVICE depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_SPARSEKMAP + select NEW_LEDS select LEDS_CLASS ---help--- This is a driver for laptops built by Fujitsu: diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 60c18f21588d..d27a564389a4 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -30,6 +30,7 @@ #include <linux/input/sparse-keymap.h> #include <acpi/video.h> +ACPI_MODULE_NAME(KBUILD_MODNAME); MODULE_AUTHOR("Carlos Corbacho"); MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); MODULE_LICENSE("GPL"); @@ -80,7 +81,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); enum acer_wmi_event_ids { WMID_HOTKEY_EVENT = 0x1, - WMID_ACCEL_EVENT = 0x5, + WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5, }; static const struct key_entry acer_wmi_keymap[] __initconst = { @@ -111,6 +112,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ {KE_IGNORE, 0x81, {KEY_SLEEP} }, {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */ + {KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */ {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} }, {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} }, @@ -127,7 +129,9 @@ struct event_return_value { u8 function; u8 key_num; u16 device_state; - u32 reserved; + u16 reserved1; + u8 kbd_dock_state; + u8 reserved2; } __attribute__((packed)); /* @@ -205,14 +209,13 @@ struct hotkey_function_type_aa { /* * Interface capability flags */ -#define ACER_CAP_MAILLED (1<<0) -#define ACER_CAP_WIRELESS (1<<1) -#define ACER_CAP_BLUETOOTH (1<<2) -#define ACER_CAP_BRIGHTNESS (1<<3) -#define ACER_CAP_THREEG (1<<4) -#define ACER_CAP_ACCEL (1<<5) -#define ACER_CAP_RFBTN (1<<6) -#define ACER_CAP_ANY (0xFFFFFFFF) +#define ACER_CAP_MAILLED BIT(0) +#define ACER_CAP_WIRELESS BIT(1) +#define ACER_CAP_BLUETOOTH BIT(2) +#define ACER_CAP_BRIGHTNESS BIT(3) +#define ACER_CAP_THREEG BIT(4) +#define ACER_CAP_SET_FUNCTION_MODE BIT(5) +#define ACER_CAP_KBD_DOCK BIT(6) /* * Interface type flags @@ -235,6 +238,7 @@ static int mailled = -1; static int brightness = -1; static int threeg = -1; static int force_series; +static int force_caps = -1; static bool ec_raw_mode; static bool has_type_aa; static u16 commun_func_bitmap; @@ -244,11 +248,13 @@ module_param(mailled, int, 0444); module_param(brightness, int, 0444); module_param(threeg, int, 0444); module_param(force_series, int, 0444); +module_param(force_caps, int, 0444); module_param(ec_raw_mode, bool, 0444); MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); MODULE_PARM_DESC(force_series, "Force a different laptop series"); +MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value"); MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode"); struct acer_data { @@ -318,6 +324,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi) return 1; } +static int __init set_force_caps(const struct dmi_system_id *dmi) +{ + if (force_caps == -1) { + force_caps = (uintptr_t)dmi->driver_data; + pr_info("Found %s, set force_caps to 0x%x\n", dmi->ident, force_caps); + } + return 1; +} + static struct quirk_entry quirk_unknown = { }; @@ -496,6 +511,33 @@ static const struct dmi_system_id acer_quirks[] __initconst = { }, .driver_data = &quirk_acer_travelmate_2490, }, + { + .callback = set_force_caps, + .ident = "Acer Aspire Switch 10E SW3-016", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-016"), + }, + .driver_data = (void *)ACER_CAP_KBD_DOCK, + }, + { + .callback = set_force_caps, + .ident = "Acer Aspire Switch 10 SW5-012", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), + }, + .driver_data = (void *)ACER_CAP_KBD_DOCK, + }, + { + .callback = set_force_caps, + .ident = "Acer One 10 (S1003)", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), + }, + .driver_data = (void *)ACER_CAP_KBD_DOCK, + }, {} }; @@ -1252,10 +1294,8 @@ static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d) interface->capability |= ACER_CAP_THREEG; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH) interface->capability |= ACER_CAP_BLUETOOTH; - if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) { - interface->capability |= ACER_CAP_RFBTN; + if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN; - } commun_fn_key_number = type_aa->commun_fn_key_number; } @@ -1519,7 +1559,7 @@ static int acer_gsensor_event(void) struct acpi_buffer output; union acpi_object out_obj[5]; - if (!has_cap(ACER_CAP_ACCEL)) + if (!acer_wmi_accel_dev) return -1; output.length = sizeof(out_obj); @@ -1542,6 +1582,71 @@ static int acer_gsensor_event(void) return 0; } +/* + * Switch series keyboard dock status + */ +static int acer_kbd_dock_state_to_sw_tablet_mode(u8 kbd_dock_state) +{ + switch (kbd_dock_state) { + case 0x01: /* Docked, traditional clamshell laptop mode */ + return 0; + case 0x04: /* Stand-alone tablet */ + case 0x40: /* Docked, tent mode, keyboard not usable */ + return 1; + default: + pr_warn("Unknown kbd_dock_state 0x%02x\n", kbd_dock_state); + } + + return 0; +} + +static void acer_kbd_dock_get_initial_state(void) +{ + u8 *output, input[8] = { 0x05, 0x00, }; + struct acpi_buffer input_buf = { sizeof(input), input }; + struct acpi_buffer output_buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + int sw_tablet_mode; + + status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input_buf, &output_buf); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, "Error getting keyboard-dock initial status")); + return; + } + + obj = output_buf.pointer; + if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) { + pr_err("Unexpected output format getting keyboard-dock initial status\n"); + goto out_free_obj; + } + + output = obj->buffer.pointer; + if (output[0] != 0x00 || (output[3] != 0x05 && output[3] != 0x45)) { + pr_err("Unexpected output [0]=0x%02x [3]=0x%02x getting keyboard-dock initial status\n", + output[0], output[3]); + goto out_free_obj; + } + + sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(output[4]); + input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode); + +out_free_obj: + kfree(obj); +} + +static void acer_kbd_dock_event(const struct event_return_value *event) +{ + int sw_tablet_mode; + + if (!has_cap(ACER_CAP_KBD_DOCK)) + return; + + sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(event->kbd_dock_state); + input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode); + input_sync(acer_wmi_input_dev); +} + /* * Rfkill devices */ @@ -1769,8 +1874,9 @@ static void acer_wmi_notify(u32 value, void *context) sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true); } break; - case WMID_ACCEL_EVENT: + case WMID_ACCEL_OR_KBD_DOCK_EVENT: acer_gsensor_event(); + acer_kbd_dock_event(&return_value); break; default: pr_warn("Unknown function number - %d - %d\n", @@ -1893,8 +1999,6 @@ static int __init acer_wmi_accel_setup(void) gsensor_handle = acpi_device_handle(adev); acpi_dev_put(adev); - interface->capability |= ACER_CAP_ACCEL; - acer_wmi_accel_dev = input_allocate_device(); if (!acer_wmi_accel_dev) return -ENOMEM; @@ -1920,11 +2024,6 @@ err_free_dev: return err; } -static void acer_wmi_accel_destroy(void) -{ - input_unregister_device(acer_wmi_accel_dev); -} - static int __init acer_wmi_input_setup(void) { acpi_status status; @@ -1942,6 +2041,9 @@ static int __init acer_wmi_input_setup(void) if (err) goto err_free_dev; + if (has_cap(ACER_CAP_KBD_DOCK)) + input_set_capability(acer_wmi_input_dev, EV_SW, SW_TABLET_MODE); + status = wmi_install_notify_handler(ACERWMID_EVENT_GUID, acer_wmi_notify, NULL); if (ACPI_FAILURE(status)) { @@ -1949,6 +2051,9 @@ static int __init acer_wmi_input_setup(void) goto err_free_dev; } + if (has_cap(ACER_CAP_KBD_DOCK)) + acer_kbd_dock_get_initial_state(); + err = input_register_device(acer_wmi_input_dev); if (err) goto err_uninstall_notifier; @@ -2079,7 +2184,7 @@ static int acer_resume(struct device *dev) if (has_cap(ACER_CAP_BRIGHTNESS)) set_u32(data->brightness, ACER_CAP_BRIGHTNESS); - if (has_cap(ACER_CAP_ACCEL)) + if (acer_wmi_accel_dev) acer_gsensor_init(); return 0; @@ -2180,7 +2285,7 @@ static int __init acer_wmi_init(void) } /* WMID always provides brightness methods */ interface->capability |= ACER_CAP_BRIGHTNESS; - } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) { + } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) { pr_err("No WMID device detection method found\n"); return -ENODEV; } @@ -2210,7 +2315,14 @@ static int __init acer_wmi_init(void) if (acpi_video_get_backlight_type() != acpi_backlight_vendor) interface->capability &= ~ACER_CAP_BRIGHTNESS; - if (wmi_has_guid(WMID_GUID3)) { + if (wmi_has_guid(WMID_GUID3)) + interface->capability |= ACER_CAP_SET_FUNCTION_MODE; + + if (force_caps != -1) + interface->capability = force_caps; + + if (wmi_has_guid(WMID_GUID3) && + (interface->capability & ACER_CAP_SET_FUNCTION_MODE)) { if (ACPI_FAILURE(acer_wmi_enable_rf_button())) pr_warn("Cannot enable RF Button Driver\n"); @@ -2269,8 +2381,8 @@ error_device_alloc: error_platform_register: if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); - if (has_cap(ACER_CAP_ACCEL)) - acer_wmi_accel_destroy(); + if (acer_wmi_accel_dev) + input_unregister_device(acer_wmi_accel_dev); return err; } @@ -2280,8 +2392,8 @@ static void __exit acer_wmi_exit(void) if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); - if (has_cap(ACER_CAP_ACCEL)) - acer_wmi_accel_destroy(); + if (acer_wmi_accel_dev) + input_unregister_device(acer_wmi_accel_dev); remove_debugfs(); platform_device_unregister(acer_platform_device); diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index b361c73636a4..59b78a181723 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -110,6 +110,16 @@ static struct quirk_entry quirk_asus_forceals = { .wmi_force_als_set = true, }; +static struct quirk_entry quirk_asus_ga401i = { + .wmi_backlight_power = true, + .wmi_backlight_set_devstate = true, +}; + +static struct quirk_entry quirk_asus_ga502i = { + .wmi_backlight_power = true, + .wmi_backlight_set_devstate = true, +}; + static int dmi_matched(const struct dmi_system_id *dmi) { pr_info("Identified laptop model '%s'\n", dmi->ident); @@ -411,6 +421,78 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_forceals, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IH", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401II", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IV", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IVC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA502II", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"), + }, + .driver_data = &quirk_asus_ga502i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA502IU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"), + }, + .driver_data = &quirk_asus_ga502i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA502IV", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"), + }, + .driver_data = &quirk_asus_ga502i, + }, {}, }; @@ -514,9 +596,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { .detect_quirks = asus_nb_wmi_quirks, }; +static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { + { + /* + * asus-nb-wm adds no functionality. The T100TA has a detachable + * USB kbd, so no hotkeys and it has no WMI rfkill; and loading + * asus-nb-wm causes the camera LED to turn and _stay_ on. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), + }, + }, + { + /* The Asus T200TA has the same issue as the T100TA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), + }, + }, + {} /* Terminating entry */ +}; static int __init asus_nb_wmi_init(void) { + if (dmi_check_system(asus_nb_wmi_blacklist)) + return -ENODEV; + return asus_wmi_register_driver(&asus_nb_wmi_driver); } diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 982f0cc8270c..ed83fb135bab 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -111,6 +111,8 @@ struct bios_args { u32 arg0; u32 arg1; u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */ + u32 arg4; + u32 arg5; } __packed; /* @@ -418,8 +420,12 @@ static int asus_wmi_battery_add(struct power_supply *battery) { /* The WMI method does not provide a way to specific a battery, so we * just assume it is the first battery. + * Note: On some newer ASUS laptops (Zenbook UM431DA), the primary/first + * battery is named BATT. */ - if (strcmp(battery->desc->name, "BAT0") != 0) + if (strcmp(battery->desc->name, "BAT0") != 0 && + strcmp(battery->desc->name, "BAT1") != 0 && + strcmp(battery->desc->name, "BATT") != 0) return -ENODEV; if (device_create_file(&battery->dev, diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 74e988f839e8..4c1dd1d4e60b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -2204,10 +2204,13 @@ static int __init dell_init(void) dell_laptop_register_notifier(&dell_laptop_notifier); - micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE); - ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev); - if (ret < 0) - goto fail_led; + if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) && + dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE)) { + micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE); + ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev); + if (ret < 0) + goto fail_led; + } if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c index fe59b0ebff31..ceb8e701028d 100644 --- a/drivers/platform/x86/dell-smbios-base.c +++ b/drivers/platform/x86/dell-smbios-base.c @@ -594,6 +594,7 @@ static int __init dell_smbios_init(void) if (wmi && smm) { pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n", wmi, smm); + ret = -ENODEV; goto fail_create_group; } diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c index b471b86c28fe..5b516e4c2bfb 100644 --- a/drivers/platform/x86/gpd-pocket-fan.c +++ b/drivers/platform/x86/gpd-pocket-fan.c @@ -128,7 +128,7 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(temp_limits); i++) { if (temp_limits[i] < 20000 || temp_limits[i] > 90000) { - dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n", + dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 20000 and 90000)\n", temp_limits[i]); temp_limits[0] = TEMP_LIMIT0_DEFAULT; temp_limits[1] = TEMP_LIMIT1_DEFAULT; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index a881b709af25..63a530a3d9fe 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -32,6 +32,10 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C"); MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); +static int enable_tablet_mode_sw = -1; +module_param(enable_tablet_mode_sw, int, 0444); +MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)"); + #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" @@ -461,8 +465,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr, static ssize_t als_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - u32 tmp = simple_strtoul(buf, NULL, 10); - int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, + u32 tmp; + int ret; + + ret = kstrtou32(buf, 10, &tmp); + if (ret) + return ret; + + ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) return ret < 0 ? ret : -EINVAL; @@ -648,10 +658,12 @@ static int __init hp_wmi_input_setup(void) } /* Tablet mode */ - val = hp_wmi_hw_state(HPWMI_TABLET_MASK); - if (!(val < 0)) { - __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); + if (enable_tablet_mode_sw > 0) { + val = hp_wmi_hw_state(HPWMI_TABLET_MASK); + if (val >= 0) { + __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); + } } err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL); diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c index ffb8d5d1eb5f..f85a5c720507 100644 --- a/drivers/platform/x86/i2c-multi-instantiate.c +++ b/drivers/platform/x86/i2c-multi-instantiate.c @@ -166,13 +166,29 @@ static const struct i2c_inst_data bsg2150_data[] = { {} }; -static const struct i2c_inst_data int3515_data[] = { - { "tps6598x", IRQ_RESOURCE_APIC, 0 }, - { "tps6598x", IRQ_RESOURCE_APIC, 1 }, - { "tps6598x", IRQ_RESOURCE_APIC, 2 }, - { "tps6598x", IRQ_RESOURCE_APIC, 3 }, - {} -}; +/* + * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt + * issues. The most common problem seen is interrupt flood. + * + * There are at least two known causes. Firstly, on some boards, the + * I2CSerialBus resource index does not match the Interrupt resource, i.e. they + * are not one-to-one mapped like in the array below. Secondly, on some boards + * the IRQ line from the PD controller is not actually connected at all. But the + * interrupt flood is also seen on some boards where those are not a problem, so + * there are some other problems as well. + * + * Because of the issues with the interrupt, the device is disabled for now. If + * you wish to debug the issues, uncomment the below, and add an entry for the + * INT3515 device to the i2c_multi_instance_ids table. + * + * static const struct i2c_inst_data int3515_data[] = { + * { "tps6598x", IRQ_RESOURCE_APIC, 0 }, + * { "tps6598x", IRQ_RESOURCE_APIC, 1 }, + * { "tps6598x", IRQ_RESOURCE_APIC, 2 }, + * { "tps6598x", IRQ_RESOURCE_APIC, 3 }, + * { } + * }; + */ /* * Note new device-ids must also be added to i2c_multi_instantiate_ids in @@ -181,7 +197,6 @@ static const struct i2c_inst_data int3515_data[] = { static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = { { "BSG1160", (unsigned long)bsg1160_data }, { "BSG2150", (unsigned long)bsg2150_data }, - { "INT3515", (unsigned long)int3515_data }, { } }; MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 7598cd46cf60..5b81bafa5c16 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -92,6 +92,7 @@ struct ideapad_private { struct dentry *debug; unsigned long cfg; bool has_hw_rfkill_switch; + bool has_touchpad_switch; const char *fnesc_guid; }; @@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj, } else if (attr == &dev_attr_fn_lock.attr) { supported = acpi_has_method(priv->adev->handle, "HALS") && acpi_has_method(priv->adev->handle, "SALS"); - } else + } else if (attr == &dev_attr_touchpad.attr) + supported = priv->has_touchpad_switch; + else supported = true; return supported ? attr->mode : 0; @@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv) { unsigned long value; + if (!priv->has_touchpad_switch) + return; + /* Without reading from EC touchpad LED doesn't switch state */ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) { /* Some IdeaPads don't really turn off touchpad - they only @@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev) priv->platform_device = pdev; priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list); + /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */ + priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1); + ret = ideapad_sysfs_init(priv); if (ret) return ret; @@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev) if (!priv->has_hw_rfkill_switch) write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1); + /* The same for Touchpad */ + if (!priv->has_touchpad_switch) + write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1); + for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) ideapad_register_rfkill(priv, i); diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ef6d4bd77b1a..164bebbfea21 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -77,6 +77,20 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"), }, }, + { + .ident = "HP Spectre x2 (2015)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"), + }, + }, + { + .ident = "Lenovo ThinkPad X1 Tablet Gen 2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), + }, + }, { } }; @@ -563,7 +577,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-hid: created platform device\n"); diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index b74932307d69..6aaceef3326c 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -15,9 +15,13 @@ #include <linux/platform_device.h> #include <linux/suspend.h> +/* Returned when NOT in tablet mode on some HP Stream x360 11 models */ +#define VGBS_TABLET_MODE_FLAG_ALT 0x10 /* When NOT in tablet mode, VGBS returns with the flag 0x40 */ -#define TABLET_MODE_FLAG 0x40 -#define DOCK_MODE_FLAG 0x80 +#define VGBS_TABLET_MODE_FLAG 0x40 +#define VGBS_DOCK_MODE_FLAG 0x80 + +#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT) MODULE_LICENSE("GPL"); MODULE_AUTHOR("AceLan Kao"); @@ -39,28 +43,59 @@ static const struct key_entry intel_vbtn_keymap[] = { { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */ { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */ { KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */ - { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ - { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ - { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ - { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ - { KE_END }, }; +static const struct key_entry intel_vbtn_switchmap[] = { + /* + * SW_DOCK should only be reported for docking stations, but DSDTs using the + * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set + * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0). + * This causes userspace to think the laptop is docked to a port-replicator + * and to disable suspend-on-lid-close, which is undesirable. + * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting. + */ + { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ + { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ + { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ + { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ +}; + +#define KEYMAP_LEN \ + (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1) + struct intel_vbtn_priv { + struct key_entry keymap[KEYMAP_LEN]; struct input_dev *input_dev; + bool has_switches; bool wakeup_mode; }; static int intel_vbtn_input_setup(struct platform_device *device) { struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); - int ret; + int ret, keymap_len = 0; + + if (true) { + memcpy(&priv->keymap[keymap_len], intel_vbtn_keymap, + ARRAY_SIZE(intel_vbtn_keymap) * + sizeof(struct key_entry)); + keymap_len += ARRAY_SIZE(intel_vbtn_keymap); + } + + if (priv->has_switches) { + memcpy(&priv->keymap[keymap_len], intel_vbtn_switchmap, + ARRAY_SIZE(intel_vbtn_switchmap) * + sizeof(struct key_entry)); + keymap_len += ARRAY_SIZE(intel_vbtn_switchmap); + } + + priv->keymap[keymap_len].type = KE_END; priv->input_dev = devm_input_allocate_device(&device->dev); if (!priv->input_dev) return -ENOMEM; - ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL); + ret = sparse_keymap_setup(priv->input_dev, priv->keymap, NULL); if (ret) return ret; @@ -115,31 +150,86 @@ out_unknown: static void detect_tablet_mode(struct platform_device *device) { - const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); acpi_handle handle = ACPI_HANDLE(&device->dev); - struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; + unsigned long long vgbs; acpi_status status; int m; - if (!(chassis_type && strcmp(chassis_type, "31") == 0)) - goto out; - - status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); + status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); if (ACPI_FAILURE(status)) - goto out; + return; - obj = vgbs_output.pointer; - if (!(obj && obj->type == ACPI_TYPE_INTEGER)) - goto out; - - m = !(obj->integer.value & TABLET_MODE_FLAG); + m = !(vgbs & VGBS_TABLET_MODE_FLAGS); input_report_switch(priv->input_dev, SW_TABLET_MODE, m); - m = (obj->integer.value & DOCK_MODE_FLAG) ? 1 : 0; + m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0; input_report_switch(priv->input_dev, SW_DOCK, m); -out: - kfree(vgbs_output.pointer); +} + +/* + * There are several laptops (non 2-in-1) models out there which support VGBS, + * but simply always return 0, which we translate to SW_TABLET_MODE=1. This in + * turn causes userspace (libinput) to suppress events from the builtin + * keyboard and touchpad, making the laptop essentially unusable. + * + * Since the problem of wrongly reporting SW_TABLET_MODE=1 in combination + * with libinput, leads to a non-usable system. Where as OTOH many people will + * not even notice when SW_TABLET_MODE is not being reported, a DMI based allow + * list is used here. This list mainly matches on the chassis-type of 2-in-1s. + * + * There are also some 2-in-1s which use the intel-vbtn ACPI interface to report + * SW_TABLET_MODE with a chassis-type of 8 ("Portable") or 10 ("Notebook"), + * these are matched on a per model basis, since many normal laptops with a + * possible broken VGBS ACPI-method also use these chassis-types. + */ +static const struct dmi_system_id dmi_switches_allow_list[] = { + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */), + }, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"), + }, + }, + {} /* Array terminator */ +}; + +static bool intel_vbtn_has_switches(acpi_handle handle) +{ + unsigned long long vgbs; + acpi_status status; + + if (!dmi_check_system(dmi_switches_allow_list)) + return false; + + status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); + return ACPI_SUCCESS(status); } static int intel_vbtn_probe(struct platform_device *device) @@ -160,13 +250,16 @@ static int intel_vbtn_probe(struct platform_device *device) return -ENOMEM; dev_set_drvdata(&device->dev, priv); + priv->has_switches = intel_vbtn_has_switches(handle); + err = intel_vbtn_input_setup(device); if (err) { pr_err("Failed to setup Intel Virtual Button\n"); return err; } - detect_tablet_mode(device); + if (priv->has_switches) + detect_tablet_mode(device); status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, @@ -251,7 +344,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-vbtn: created platform device\n"); diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 571b4754477c..4c1312f1616c 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -831,9 +831,15 @@ static const struct pci_device_id pmc_pci_ids[] = { * the platform BIOS enforces 24Mhx Crystal to shutdown * before PMC can assert SLP_S0#. */ +static bool xtal_ignore; static int quirk_xtal_ignore(const struct dmi_system_id *id) { - struct pmc_dev *pmcdev = &pmc; + xtal_ignore = true; + return 0; +} + +static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev) +{ u32 value; value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset); @@ -842,7 +848,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id) /* Low Voltage Mode Enable */ value &= ~SPT_PMC_VRIC1_SLPS0LVEN; pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value); - return 0; } static const struct dmi_system_id pmc_core_dmi_table[] = { @@ -857,6 +862,14 @@ static const struct dmi_system_id pmc_core_dmi_table[] = { {} }; +static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev) +{ + dmi_check_system(pmc_core_dmi_table); + + if (xtal_ignore) + pmc_core_xtal_ignore(pmcdev); +} + static int pmc_core_probe(struct platform_device *pdev) { static bool device_initialized; @@ -898,7 +911,7 @@ static int pmc_core_probe(struct platform_device *pdev) mutex_init(&pmcdev->lock); platform_set_drvdata(pdev, pmcdev); pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); - dmi_check_system(pmc_core_dmi_table); + pmc_core_do_dmi_quirks(pmcdev); pmc_core_dbgfs_register(pmcdev); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c index 3de5a3c66529..0c2aa22c7a12 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { {0x7F, 0x00, 0x0B}, {0x7F, 0x10, 0x12}, {0x7F, 0x20, 0x23}, + {0x94, 0x03, 0x03}, + {0x95, 0x03, 0x03}, }; static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { @@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { {0xD0, 0x03, 0x08}, {0x7F, 0x02, 0x00}, {0x7F, 0x08, 0x00}, + {0x95, 0x03, 0x03}, }; struct isst_cmd { diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h index 1409a5bb5582..4f6f7f0761fc 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h @@ -13,6 +13,9 @@ #define INTEL_RAPL_PRIO_DEVID_0 0x3451 #define INTEL_CFG_MBOX_DEVID_0 0x3459 +#define INTEL_RAPL_PRIO_DEVID_1 0x3251 +#define INTEL_CFG_MBOX_DEVID_1 0x3259 + /* * Validate maximum commands in a single request. * This is enough to handle command to every core in one ioctl, or all diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c index de4169d0796b..95f01e7a87d5 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c @@ -21,13 +21,12 @@ #define PUNIT_MAILBOX_BUSY_BIT 31 /* - * Commands has variable amount of processing time. Most of the commands will - * be done in 0-3 tries, but some takes up to 50. - * The real processing time was observed as 25us for the most of the commands - * at 2GHz. It is possible to optimize this count taking samples on customer - * systems. + * The average time to complete some commands is about 40us. The current + * count is enough to satisfy 40us. But when the firmware is very busy, this + * causes timeout occasionally. So increase to deal with some worst case + * scenarios. Most of the command still complete in few us. */ -#define OS_MAILBOX_RETRY_COUNT 50 +#define OS_MAILBOX_RETRY_COUNT 100 struct isst_if_device { struct mutex mutex; @@ -148,6 +147,7 @@ static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_mbox_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c index ad8c7c0df4d9..aa17fd7817f8 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c @@ -72,6 +72,7 @@ static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_ids); @@ -126,7 +127,7 @@ static void isst_if_remove(struct pci_dev *pdev) struct isst_if_device *punit_dev; punit_dev = pci_get_drvdata(pdev); - isst_if_cdev_unregister(ISST_IF_DEV_MBOX); + isst_if_cdev_unregister(ISST_IF_DEV_MMIO); mutex_destroy(&punit_dev->mutex); } diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8fe51e43f1bc..54db334e52c1 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -234,24 +234,6 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { }; /* Platform hotplug devices */ -static struct i2c_board_info mlxplat_mlxcpld_psu[] = { - { - I2C_BOARD_INFO("24c02", 0x51), - }, - { - I2C_BOARD_INFO("24c02", 0x50), - }, -}; - -static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = { - { - I2C_BOARD_INFO("24c32", 0x51), - }, - { - I2C_BOARD_INFO("24c32", 0x50), - }, -}; - static struct i2c_board_info mlxplat_mlxcpld_pwr[] = { { I2C_BOARD_INFO("dps460", 0x59), @@ -282,15 +264,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = { .label = "psu1", .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(0), - .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0], - .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "psu2", .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(1), - .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1], - .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, }; @@ -357,7 +337,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { .aggr_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF, .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = MLXPLAT_CPLD_PSU_MASK, - .count = ARRAY_SIZE(mlxplat_mlxcpld_psu), + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data), .inversed = 1, .health = false, }, @@ -366,7 +346,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF, .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, .mask = MLXPLAT_CPLD_PWR_MASK, - .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data), .inversed = 0, .health = false, }, @@ -375,7 +355,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { .aggr_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF, .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = MLXPLAT_CPLD_FAN_MASK, - .count = ARRAY_SIZE(mlxplat_mlxcpld_fan), + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data), .inversed = 1, .health = false, }, @@ -453,15 +433,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = { .label = "psu1", .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(0), - .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0], - .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "psu2", .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(1), - .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1], - .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, }; @@ -611,15 +589,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = { .label = "psu1", .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(0), - .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0], - .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "psu2", .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(1), - .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1], - .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, }; diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 2b1a3a6ee8db..597cfabc0967 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = { }, { /* pmc_plt_clk* - are used for ethernet controllers */ - .ident = "Beckhoff CB3163", + .ident = "Beckhoff Baytrail", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), - DMI_MATCH(DMI_BOARD_NAME, "CB3163"), - }, - }, - { - /* pmc_plt_clk* - are used for ethernet controllers */ - .ident = "Beckhoff CB4063", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), - DMI_MATCH(DMI_BOARD_NAME, "CB4063"), - }, - }, - { - /* pmc_plt_clk* - are used for ethernet controllers */ - .ident = "Beckhoff CB6263", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), - DMI_MATCH(DMI_BOARD_NAME, "CB6263"), - }, - }, - { - /* pmc_plt_clk* - are used for ethernet controllers */ - .ident = "Beckhoff CB6363", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), - DMI_MATCH(DMI_BOARD_NAME, "CB6363"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"), }, }, { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index da794dcfdd92..f027609fdab6 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2587,7 +2587,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, */ static int hotkey_kthread(void *data) { - struct tp_nvram_state s[2]; + struct tp_nvram_state s[2] = { 0 }; u32 poll_mask, event_mask; unsigned int si, so; unsigned long t; @@ -3232,7 +3232,14 @@ static int hotkey_init_tablet_mode(void) in_tablet_mode = hotkey_gmms_get_tablet_mode(res, &has_tablet_mode); - if (has_tablet_mode) + /* + * The Yoga 11e series has 2 accelerometers described by a + * BOSC0200 ACPI node. This setup relies on a Windows service + * which calls special ACPI methods on this node to report + * the laptop/tent/tablet mode to the EC. The bmc150 iio driver + * does not support this, so skip the hotkey on these models. + */ + if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1)) tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS; type = "GMMS"; } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) { @@ -4082,13 +4089,19 @@ static bool hotkey_notify_6xxx(const u32 hkey, case TP_HKEY_EV_KEY_NUMLOCK: case TP_HKEY_EV_KEY_FN: - case TP_HKEY_EV_KEY_FN_ESC: /* key press events, we just ignore them as long as the EC * is still reporting them in the normal keyboard stream */ *send_acpi_ev = false; *ignore_acpi_ev = true; return true; + case TP_HKEY_EV_KEY_FN_ESC: + /* Get the media key status to foce the status LED to update */ + acpi_evalf(hkey_handle, NULL, "GMKS", "v"); + *send_acpi_ev = false; + *ignore_acpi_ev = true; + return true; + case TP_HKEY_EV_TABLET_CHANGED: tpacpi_input_send_tabletsw(); hotkey_tablet_mode_notify_change(); @@ -4238,6 +4251,7 @@ static void hotkey_resume(void) pr_err("error while attempting to reset the event firmware interface\n"); tpacpi_send_radiosw_update(); + tpacpi_input_send_tabletsw(); hotkey_tablet_mode_notify_change(); hotkey_wakeup_reason_notify_change(); hotkey_wakeup_hotunplug_complete_notify_change(); @@ -6270,6 +6284,7 @@ enum thermal_access_mode { enum { /* TPACPI_THERMAL_TPEC_* */ TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */ TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */ + TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */ TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */ TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */ @@ -6468,7 +6483,7 @@ static const struct attribute_group thermal_temp_input8_group = { static int __init thermal_init(struct ibm_init_struct *iibm) { - u8 t, ta1, ta2; + u8 t, ta1, ta2, ver = 0; int i; int acpi_tmp7; int res; @@ -6483,7 +6498,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm) * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for * non-implemented, thermal sensors return 0x80 when * not available + * The above rule is unfortunately flawed. This has been seen with + * 0xC2 (power supply ID) causing thermal control problems. + * The EC version can be determined by offset 0xEF and at least for + * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7 + * are not thermal registers. */ + if (!acpi_ec_read(TP_EC_FUNCREV, &ver)) + pr_warn("Thinkpad ACPI EC unable to access EC version\n"); ta1 = ta2 = 0; for (i = 0; i < 8; i++) { @@ -6493,11 +6515,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm) ta1 = 0; break; } - if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) { - ta2 |= t; - } else { - ta1 = 0; - break; + if (ver < 3) { + if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) { + ta2 |= t; + } else { + ta1 = 0; + break; + } } } if (ta1 == 0) { @@ -6510,9 +6534,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm) thermal_read_mode = TPACPI_THERMAL_NONE; } } else { - thermal_read_mode = - (ta2 != 0) ? - TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8; + if (ver >= 3) + thermal_read_mode = TPACPI_THERMAL_TPEC_8; + else + thermal_read_mode = + (ta2 != 0) ? + TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8; } } else if (acpi_tmp7) { if (tpacpi_is_ibm() && @@ -6863,8 +6890,10 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle) list_for_each_entry(child, &device->children, node) { acpi_status status = acpi_evaluate_object(child->handle, "_BCL", NULL, &buffer); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { + buffer.length = ACPI_ALLOCATE_BUFFER; continue; + } obj = (union acpi_object *)buffer.pointer; if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { @@ -9687,6 +9716,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = { TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */ TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */ TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */ + TPACPI_Q_LNV3('R', '0', 'K', true), /* Thinkpad 11e gen 4 celeron BIOS */ }; static int __init tpacpi_battery_init(struct ibm_init_struct *ibm) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index a1e6569427c3..71a969fc3b20 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1485,7 +1485,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file)); char *buffer; char *cmd; - int lcd_out, crt_out, tv_out; + int lcd_out = -1, crt_out = -1, tv_out = -1; int remain = count; int value; int ret; @@ -1517,7 +1517,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, kfree(cmd); - lcd_out = crt_out = tv_out = -1; ret = get_video_status(dev, &video_out); if (!ret) { unsigned int new_video_out = video_out; diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 1c7d8324ff5c..7ed1189a7200 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -231,6 +231,16 @@ static const struct ts_dmi_data digma_citi_e200_data = { .properties = digma_citi_e200_props, }; +static const struct property_entry estar_beauty_hd_props[] = { + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + { } +}; + +static const struct ts_dmi_data estar_beauty_hd_data = { + .acpi_name = "GDIX1001:00", + .properties = estar_beauty_hd_props, +}; + static const struct property_entry gp_electronic_t701_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 960), PROPERTY_ENTRY_U32("touchscreen-size-y", 640), @@ -264,6 +274,21 @@ static const struct ts_dmi_data irbis_tw90_data = { .properties = irbis_tw90_props, }; +static const struct property_entry irbis_tw118_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 20), + PROPERTY_ENTRY_U32("touchscreen-min-y", 30), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1960), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1510), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data irbis_tw118_data = { + .acpi_name = "MSSL1680:00", + .properties = irbis_tw118_props, +}; + static const struct property_entry itworks_tw891_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 1), PROPERTY_ENTRY_U32("touchscreen-min-y", 5), @@ -732,6 +757,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), }, }, + { + /* Estar Beauty HD (MID 7316R) */ + .driver_data = (void *)&estar_beauty_hd_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Estar"), + DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"), + }, + }, { /* GP-electronic T701 */ .driver_data = (void *)&gp_electronic_t701_data, @@ -758,6 +791,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "TW90"), }, }, + { + /* Irbis TW118 */ + .driver_data = (void *)&irbis_tw118_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW118"), + }, + }, { /* I.T.Works TW891 */ .driver_data = (void *)&itworks_tw891_data, diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c index e341cc5c0ea6..c84df27cd548 100644 --- a/drivers/power/reset/at91-sama5d2_shdwc.c +++ b/drivers/power/reset/at91-sama5d2_shdwc.c @@ -37,7 +37,7 @@ #define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */ #define AT91_SHDW_WKUPDBC_SHIFT 24 -#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16) +#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24) #define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \ & AT91_SHDW_WKUPDBC_MASK) diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index 90cbaa8341e3..0bf9ab8653ae 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -143,6 +143,7 @@ static struct platform_driver vexpress_reset_driver = { .driver = { .name = "vexpress-reset", .of_match_table = vexpress_reset_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 5ca047b3f58f..23e7d6447ae9 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -433,7 +433,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) int ret; int data; int bat_remove; - int soc; + int soc = 0; /* measure enable on GPADC1 */ data = MEAS1_GP1; @@ -496,7 +496,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) } mutex_unlock(&info->lock); - calc_soc(info, OCV_MODE_ACTIVE, &soc); + ret = calc_soc(info, OCV_MODE_ACTIVE, &soc); + if (ret < 0) + goto out; data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG); bat_remove = data & BAT_WU_LOG; diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index c84a7b1caeb6..d6fdc10c29f0 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -577,7 +577,7 @@ config CHARGER_BQ24257 tristate "TI BQ24250/24251/24257 battery charger driver" depends on I2C depends on GPIOLIB || COMPILE_TEST - depends on REGMAP_I2C + select REGMAP_I2C help Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery chargers. diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index cf4c67b2d235..7d09e49f04d3 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -548,14 +548,15 @@ out: /* * The HP Pavilion x2 10 series comes in a number of variants: - * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D" - * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E" - * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4" + * Bay Trail SoC + AXP288 PMIC, Micro-USB, DMI_BOARD_NAME: "8021" + * Bay Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "815D" + * Cherry Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "813E" + * Cherry Trail SoC + TI PMIC, Type-C, DMI_BOARD_NAME: "827C" or "82F4" * - * The variants with the AXP288 PMIC are all kinds of special: + * The variants with the AXP288 + Type-C connector are all kinds of special: * - * 1. All variants use a Type-C connector which the AXP288 does not support, so - * when using a Type-C charger it is not recognized. Unlike most AXP288 devices, + * 1. They use a Type-C connector which the AXP288 does not support, so when + * using a Type-C charger it is not recognized. Unlike most AXP288 devices, * this model actually has mostly working ACPI AC / Battery code, the ACPI code * "solves" this by simply setting the input_current_limit to 3A. * There are still some issues with the ACPI code, so we use this native driver, @@ -578,12 +579,17 @@ out: */ static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = { { - /* - * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry - * Trail model has "HP", so we only match on product_name. - */ .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "815D"), + }, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "813E"), }, }, {} /* Terminating entry */ diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index e1bc4e6e6f30..f40fa0e63b6e 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -706,14 +706,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { { /* Intel Cherry Trail Compute Stick, Windows version */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"), }, }, { /* Intel Cherry Trail Compute Stick, version without an OS */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"), }, }, diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 453d6332d43a..1ae5d6d42c9e 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -448,8 +448,10 @@ static ssize_t bq24190_sysfs_show(struct device *dev, return -EINVAL; ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(bdi->dev); return ret; + } ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v); if (ret) @@ -1075,8 +1077,10 @@ static int bq24190_charger_get_property(struct power_supply *psy, dev_dbg(bdi->dev, "prop: %d\n", psp); ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(bdi->dev); return ret; + } switch (psp) { case POWER_SUPPLY_PROP_CHARGE_TYPE: @@ -1147,8 +1151,10 @@ static int bq24190_charger_set_property(struct power_supply *psy, dev_dbg(bdi->dev, "prop: %d\n", psp); ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(bdi->dev); return ret; + } switch (psp) { case POWER_SUPPLY_PROP_ONLINE: @@ -1408,8 +1414,10 @@ static int bq24190_battery_get_property(struct power_supply *psy, dev_dbg(bdi->dev, "prop: %d\n", psp); ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(bdi->dev); return ret; + } switch (psp) { case POWER_SUPPLY_PROP_STATUS: @@ -1454,8 +1462,10 @@ static int bq24190_battery_set_property(struct power_supply *psy, dev_dbg(bdi->dev, "prop: %d\n", psp); ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(bdi->dev); return ret; + } switch (psp) { case POWER_SUPPLY_PROP_ONLINE: diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 195c18c2f426..b1a37aa38880 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1499,27 +1499,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg) return tval * 60; } -/* - * Read an average power register. - * Return < 0 if something fails. - */ -static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di) -{ - int tval; - - tval = bq27xxx_read(di, BQ27XXX_REG_AP, false); - if (tval < 0) { - dev_err(di->dev, "error reading average power register %02x: %d\n", - BQ27XXX_REG_AP, tval); - return tval; - } - - if (di->opts & BQ27XXX_O_ZERO) - return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS; - else - return tval; -} - /* * Returns true if a battery over temperature condition is detected */ @@ -1604,8 +1583,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di) } if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) cache.cycle_count = bq27xxx_battery_read_cyct(di); - if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR) - cache.power_avg = bq27xxx_battery_read_pwr_avg(di); /* We only have to read charge design full once */ if (di->charge_design_full <= 0) @@ -1668,6 +1645,32 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di, return 0; } +/* + * Get the average power in µW + * Return < 0 if something fails. + */ +static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di, + union power_supply_propval *val) +{ + int power; + + power = bq27xxx_read(di, BQ27XXX_REG_AP, false); + if (power < 0) { + dev_err(di->dev, + "error reading average power register %02x: %d\n", + BQ27XXX_REG_AP, power); + return power; + } + + if (di->opts & BQ27XXX_O_ZERO) + val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS; + else + /* Other gauges return a signed value in units of 10mW */ + val->intval = (int)((s16)power) * 10000; + + return 0; +} + static int bq27xxx_battery_status(struct bq27xxx_device_info *di, union power_supply_propval *val) { @@ -1678,8 +1681,6 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di, status = POWER_SUPPLY_STATUS_FULL; else if (di->cache.flags & BQ27000_FLAG_CHGS) status = POWER_SUPPLY_STATUS_CHARGING; - else if (power_supply_am_i_supplied(di->bat) > 0) - status = POWER_SUPPLY_STATUS_NOT_CHARGING; else status = POWER_SUPPLY_STATUS_DISCHARGING; } else { @@ -1691,6 +1692,10 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di, status = POWER_SUPPLY_STATUS_CHARGING; } + if ((status == POWER_SUPPLY_STATUS_DISCHARGING) && + (power_supply_am_i_supplied(di->bat) > 0)) + status = POWER_SUPPLY_STATUS_NOT_CHARGING; + val->intval = status; return 0; @@ -1833,7 +1838,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, ret = bq27xxx_simple_value(di->cache.energy, val); break; case POWER_SUPPLY_PROP_POWER_AVG: - ret = bq27xxx_simple_value(di->cache.power_avg, val); + ret = bq27xxx_battery_pwr_avg(di, val); break; case POWER_SUPPLY_PROP_HEALTH: ret = bq27xxx_simple_value(di->cache.health, val); @@ -1885,7 +1890,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); if (IS_ERR(di->bat)) { - dev_err(di->dev, "failed to register battery\n"); + if (PTR_ERR(di->bat) == -EPROBE_DEFER) + dev_dbg(di->dev, "failed to register battery, deferring probe\n"); + else + dev_err(di->dev, "failed to register battery\n"); return PTR_ERR(di->bat); } diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c index bc462d1ec963..97b0e873e87d 100644 --- a/drivers/power/supply/generic-adc-battery.c +++ b/drivers/power/supply/generic-adc-battery.c @@ -382,7 +382,7 @@ static int gab_remove(struct platform_device *pdev) } kfree(adc_bat->psy_desc.properties); - cancel_delayed_work(&adc_bat->bat_work); + cancel_delayed_work_sync(&adc_bat->bat_work); return 0; } diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 84a206f42a8e..397e5a03b7d9 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev, ret = request_threaded_irq(virq, NULL, lp8788_charger_irq_thread, - 0, name, pchg); + IRQF_ONESHOT, name, pchg); if (ret) break; } @@ -572,27 +572,14 @@ static void lp8788_setup_adc_channel(struct device *dev, return; /* ADC channel for battery voltage */ - chan = iio_channel_get(dev, pdata->adc_vbatt); + chan = devm_iio_channel_get(dev, pdata->adc_vbatt); pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan; /* ADC channel for battery temperature */ - chan = iio_channel_get(dev, pdata->adc_batt_temp); + chan = devm_iio_channel_get(dev, pdata->adc_batt_temp); pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan; } -static void lp8788_release_adc_channel(struct lp8788_charger *pchg) -{ - int i; - - for (i = 0; i < LP8788_NUM_CHG_ADC; i++) { - if (!pchg->chan[i]) - continue; - - iio_channel_release(pchg->chan[i]); - pchg->chan[i] = NULL; - } -} - static ssize_t lp8788_show_charger_status(struct device *dev, struct device_attribute *attr, char *buf) { @@ -735,7 +722,6 @@ static int lp8788_charger_remove(struct platform_device *pdev) flush_work(&pchg->charger_work); lp8788_irq_unregister(pdev, pchg); lp8788_psy_unregister(pchg); - lp8788_release_adc_channel(pchg); return 0; } diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 62499018e68b..2e845045a3fc 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -105,7 +105,7 @@ static void max17040_get_vcell(struct i2c_client *client) vcell = max17040_read_reg(client, MAX17040_VCELL); - chip->vcell = vcell; + chip->vcell = (vcell >> 4) * 1250; } static void max17040_get_soc(struct i2c_client *client) diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c index 17749fc90e16..d2aff1cf4f79 100644 --- a/drivers/power/supply/pm2301_charger.c +++ b/drivers/power/supply/pm2301_charger.c @@ -1095,7 +1095,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client, ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number), NULL, pm2xxx_charger_irq[0].isr, - pm2->pdata->irq_type, + pm2->pdata->irq_type | IRQF_ONESHOT, pm2xxx_charger_irq[0].name, pm2); if (ret != 0) { diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index 75cf861ba492..2e7e2b73b012 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -144,7 +144,7 @@ static int power_supply_hwmon_read_string(struct device *dev, u32 attr, int channel, const char **str) { - *str = channel ? "temp" : "temp ambient"; + *str = channel ? "temp ambient" : "temp"; return 0; } @@ -304,7 +304,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy) goto error; } - ret = devm_add_action(dev, power_supply_hwmon_bitmap_free, + ret = devm_add_action_or_reset(dev, power_supply_hwmon_bitmap_free, psyhw->props); if (ret) goto error; diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c index 3d00b35cafc9..8be31f80035c 100644 --- a/drivers/power/supply/s3c_adc_battery.c +++ b/drivers/power/supply/s3c_adc_battery.c @@ -394,7 +394,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev) gpio_free(pdata->gpio_charge_finished); } - cancel_delayed_work(&bat_work); + cancel_delayed_work_sync(&bat_work); if (pdata->exit) pdata->exit(); diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c index c1d124b8be0c..d102921b3ab2 100644 --- a/drivers/power/supply/smb347-charger.c +++ b/drivers/power/supply/smb347-charger.c @@ -1138,6 +1138,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg) switch (reg) { case IRQSTAT_A: case IRQSTAT_C: + case IRQSTAT_D: case IRQSTAT_E: case IRQSTAT_F: case STAT_A: diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c index c3cad2b6daba..1139ca725195 100644 --- a/drivers/power/supply/test_power.c +++ b/drivers/power/supply/test_power.c @@ -341,6 +341,7 @@ static int param_set_ac_online(const char *key, const struct kernel_param *kp) static int param_get_ac_online(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown")); + strcat(buffer, "\n"); return strlen(buffer); } @@ -354,6 +355,7 @@ static int param_set_usb_online(const char *key, const struct kernel_param *kp) static int param_get_usb_online(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown")); + strcat(buffer, "\n"); return strlen(buffer); } @@ -368,6 +370,7 @@ static int param_set_battery_status(const char *key, static int param_get_battery_status(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_status, battery_status, "unknown")); + strcat(buffer, "\n"); return strlen(buffer); } @@ -382,6 +385,7 @@ static int param_set_battery_health(const char *key, static int param_get_battery_health(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_health, battery_health, "unknown")); + strcat(buffer, "\n"); return strlen(buffer); } @@ -397,6 +401,7 @@ static int param_get_battery_present(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_present, battery_present, "unknown")); + strcat(buffer, "\n"); return strlen(buffer); } @@ -414,6 +419,7 @@ static int param_get_battery_technology(char *buffer, { strcpy(buffer, map_get_key(map_technology, battery_technology, "unknown")); + strcat(buffer, "\n"); return strlen(buffer); } diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c index 6b0098e5a88b..0990b2fa6cd8 100644 --- a/drivers/power/supply/tps65090-charger.c +++ b/drivers/power/supply/tps65090-charger.c @@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev) if (irq != -ENXIO) { ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, - tps65090_charger_isr, 0, "tps65090-charger", cdata); + tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata); if (ret) { dev_err(cdata->dev, "Unable to register irq %d err %d\n", irq, diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c index 814c2b81fdfe..ba33d1617e0b 100644 --- a/drivers/power/supply/tps65217_charger.c +++ b/drivers/power/supply/tps65217_charger.c @@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) for (i = 0; i < NUM_CHARGER_IRQS; i++) { ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL, tps65217_charger_irq, - 0, "tps65217-charger", + IRQF_ONESHOT, "tps65217-charger", charger); if (ret) { dev_err(charger->dev, diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index f808c5fa9838..3f0b8e2ef3d4 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -367,9 +367,9 @@ static void create_power_zone_common_attributes( &dev_attr_max_energy_range_uj.attr; if (power_zone->ops->get_energy_uj) { if (power_zone->ops->reset_energy_uj) - dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; + dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR; else - dev_attr_energy_uj.attr.mode = S_IRUGO; + dev_attr_energy_uj.attr.mode = S_IRUSR; power_zone->zone_dev_attrs[count++] = &dev_attr_energy_uj.attr; } diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c index 333ba83006e4..a12a1ad9b5fe 100644 --- a/drivers/ps3/ps3stor_lib.c +++ b/drivers/ps3/ps3stor_lib.c @@ -189,7 +189,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler) dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf)); dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf, dev->bounce_size, DMA_BIDIRECTIONAL); - if (!dev->bounce_dma) { + if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) { dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n", __func__, __LINE__); error = -ENODEV; diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index 1f829edd8ee7..d392a828fc49 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, u64 tmp, multi, rate; u32 value, prescale; - rate = clk_get_rate(ip->clk); - value = readl(ip->base + IPROC_PWM_CTRL_OFFSET); if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm))) @@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, else state->polarity = PWM_POLARITY_INVERSED; + rate = clk_get_rate(ip->clk); + if (rate == 0) { + state->period = 0; + state->duty_cycle = 0; + return; + } + value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET); prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm); prescale &= IPROC_PWM_PRESCALE_MAX; diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c index 91e24f01b54e..d78f86f8e462 100644 --- a/drivers/pwm/pwm-bcm2835.c +++ b/drivers/pwm/pwm-bcm2835.c @@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &bcm2835_pwm_ops; + pc->chip.base = -1; pc->chip.npwm = 2; pc->chip.of_xlate = of_pwm_xlate_with_flags; pc->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index c9e57bd109fb..a34d95ed70b2 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -129,8 +129,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty = DIV_ROUND_UP(timebase * duty_ns, period_ns); ret = pm_runtime_get_sync(chip->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->dev); return ret; + } val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm)); @@ -275,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev) return PTR_ERR(pwm->pwm_clk); } + platform_set_drvdata(pdev, pwm); + pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); @@ -311,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev) goto err_suspend; } - platform_set_drvdata(pdev, pwm); return 0; err_suspend: @@ -331,8 +334,10 @@ static int img_pwm_remove(struct platform_device *pdev) int ret; ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(&pdev->dev); return ret; + } for (i = 0; i < pwm_chip->chip.npwm; i++) { val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index 9d78cc21cb12..77c28313e95f 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -92,11 +92,12 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, { struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip); unsigned long long tmp; - unsigned long period, duty; + unsigned long rate, period, duty; unsigned int prescaler = 0; uint16_t ctrl; - tmp = (unsigned long long)clk_get_rate(jz4740->clk) * state->period; + rate = clk_get_rate(jz4740->clk); + tmp = rate * state->period; do_div(tmp, 1000000000); period = tmp; @@ -108,8 +109,8 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (prescaler == 6) return -EINVAL; - tmp = (unsigned long long)period * state->duty_cycle; - do_div(tmp, state->period); + tmp = (unsigned long long)rate * state->duty_cycle; + do_div(tmp, NSEC_PER_SEC); duty = period - tmp; if (duty >= period) diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c index 7551253ada32..bf3f14fb5f24 100644 --- a/drivers/pwm/pwm-lp3943.c +++ b/drivers/pwm/pwm-lp3943.c @@ -275,6 +275,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev) lp3943_pwm->chip.dev = &pdev->dev; lp3943_pwm->chip.ops = &lp3943_pwm_ops; lp3943_pwm->chip.npwm = LP3943_NUM_PWMS; + lp3943_pwm->chip.base = -1; platform_set_drvdata(pdev, lp3943_pwm); diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 75bbfe5f3bc2..d77cec2769b7 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, * The equation is: * base_unit = round(base_unit_range * freq / c) */ - base_unit_range = BIT(lpwm->info->base_unit_bits) - 1; + base_unit_range = BIT(lpwm->info->base_unit_bits); freq *= base_unit_range; base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); + /* base_unit must not be 0 and we also want to avoid overflowing it */ + base_unit = clamp_val(base_unit, 1, base_unit_range - 1); on_time_div = 255ULL * duty_ns; do_div(on_time_div, period_ns); @@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, orig_ctrl = ctrl = pwm_lpss_read(pwm); ctrl &= ~PWM_ON_TIME_DIV_MASK; - ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); - base_unit &= base_unit_range; + ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT); ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= on_time_div; diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index b07bdca3d510..590375be5214 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -20,6 +20,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/pm_runtime.h> +#include <linux/bitmap.h> /* * Because the PCA9685 has only one prescaler per chip, changing the period of @@ -74,6 +75,7 @@ struct pca9685 { #if IS_ENABLED(CONFIG_GPIOLIB) struct mutex lock; struct gpio_chip gpio; + DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1); #endif }; @@ -83,53 +85,53 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip) } #if IS_ENABLED(CONFIG_GPIOLIB) +static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx) +{ + bool is_inuse; + + mutex_lock(&pca->lock); + if (pwm_idx >= PCA9685_MAXCHAN) { + /* + * "all LEDs" channel: + * pretend already in use if any of the PWMs are requested + */ + if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) { + is_inuse = true; + goto out; + } + } else { + /* + * regular channel: + * pretend already in use if the "all LEDs" channel is requested + */ + if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) { + is_inuse = true; + goto out; + } + } + is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse); +out: + mutex_unlock(&pca->lock); + return is_inuse; +} + +static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx) +{ + mutex_lock(&pca->lock); + clear_bit(pwm_idx, pca->pwms_inuse); + mutex_unlock(&pca->lock); +} + static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) { struct pca9685 *pca = gpiochip_get_data(gpio); - struct pwm_device *pwm; - mutex_lock(&pca->lock); - - pwm = &pca->chip.pwms[offset]; - - if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) { - mutex_unlock(&pca->lock); + if (pca9685_pwm_test_and_set_inuse(pca, offset)) return -EBUSY; - } - - pwm_set_chip_data(pwm, (void *)1); - - mutex_unlock(&pca->lock); pm_runtime_get_sync(pca->chip.dev); return 0; } -static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm) -{ - bool is_gpio = false; - - mutex_lock(&pca->lock); - - if (pwm->hwpwm >= PCA9685_MAXCHAN) { - unsigned int i; - - /* - * Check if any of the GPIOs are requested and in that case - * prevent using the "all LEDs" channel. - */ - for (i = 0; i < pca->gpio.ngpio; i++) - if (gpiochip_is_requested(&pca->gpio, i)) { - is_gpio = true; - break; - } - } else if (pwm_get_chip_data(pwm)) { - is_gpio = true; - } - - mutex_unlock(&pca->lock); - return is_gpio; -} - static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset) { struct pca9685 *pca = gpiochip_get_data(gpio); @@ -162,6 +164,7 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) pca9685_pwm_gpio_set(gpio, offset, 0); pm_runtime_put(pca->chip.dev); + pca9685_pwm_clear_inuse(pca, offset); } static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip, @@ -213,12 +216,17 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca) return devm_gpiochip_add_data(dev, &pca->gpio, pca); } #else -static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca, - struct pwm_device *pwm) +static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, + int pwm_idx) { return false; } +static inline void +pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx) +{ +} + static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca) { return 0; @@ -402,7 +410,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct pca9685 *pca = to_pca(chip); - if (pca9685_pwm_is_gpio(pca, pwm)) + if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm)) return -EBUSY; pm_runtime_get_sync(chip->dev); @@ -411,8 +419,11 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { + struct pca9685 *pca = to_pca(chip); + pca9685_pwm_disable(chip, pwm); pm_runtime_put(chip->dev); + pca9685_pwm_clear_inuse(pca, pwm->hwpwm); } static const struct pwm_ops pca9685_pwm_ops = { diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 852eb2347954..b98ec8847b48 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -228,24 +228,28 @@ static int rcar_pwm_probe(struct platform_device *pdev) rcar_pwm->chip.base = -1; rcar_pwm->chip.npwm = 1; + pm_runtime_enable(&pdev->dev); + ret = pwmchip_add(&rcar_pwm->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret); + pm_runtime_disable(&pdev->dev); return ret; } - pm_runtime_enable(&pdev->dev); - return 0; } static int rcar_pwm_remove(struct platform_device *pdev) { struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev); + int ret; + + ret = pwmchip_remove(&rcar_pwm->chip); pm_runtime_disable(&pdev->dev); - return pwmchip_remove(&rcar_pwm->chip); + return ret; } static const struct of_device_id rcar_pwm_of_table[] = { diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index 4a855a21b782..8032acc84161 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -415,16 +415,17 @@ static int tpu_probe(struct platform_device *pdev) tpu->chip.base = -1; tpu->chip.npwm = TPU_CHANNEL_MAX; + pm_runtime_enable(&pdev->dev); + ret = pwmchip_add(&tpu->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to register PWM chip\n"); + pm_runtime_disable(&pdev->dev); return ret; } dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id); - pm_runtime_enable(&pdev->dev); - return 0; } @@ -434,12 +435,10 @@ static int tpu_remove(struct platform_device *pdev) int ret; ret = pwmchip_remove(&tpu->chip); - if (ret) - return ret; pm_runtime_disable(&pdev->dev); - return 0; + return ret; } #ifdef CONFIG_OF diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index 73352e6fbccb..6ad6aad215cf 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -361,7 +361,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev) ret = pwmchip_add(&pc->chip); if (ret < 0) { - clk_unprepare(pc->clk); dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); goto err_pclk; } diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c index e2c21cc34a96..3763ce5311ac 100644 --- a/drivers/pwm/pwm-zx.c +++ b/drivers/pwm/pwm-zx.c @@ -238,6 +238,7 @@ static int zx_pwm_probe(struct platform_device *pdev) ret = pwmchip_add(&zpc->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); + clk_disable_unprepare(zpc->pclk); return ret; } diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 677d1aff61b7..788e7830771b 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -37,7 +37,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS config RAPIDIO_DMA_ENGINE bool "DMA Engine support for RapidIO" depends on RAPIDIO - select DMADEVICES + depends on DMADEVICES select DMA_ENGINE help Say Y here if you want to use DMA Engine frameork for RapidIO data diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 8155f59ece38..2b08fdeb87c1 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -873,9 +873,15 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, rmcd_error("get_user_pages_unlocked err=%ld", pinned); nr_pages = 0; - } else + } else { rmcd_error("pinned %ld out of %ld pages", pinned, nr_pages); + /* + * Set nr_pages up to mean "how many pages to unpin, in + * the error handler: + */ + nr_pages = pinned; + } ret = -EFAULT; goto err_pg; } @@ -1677,6 +1683,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, struct rio_dev *rdev; struct rio_switch *rswitch = NULL; struct rio_mport *mport; + struct device *dev; size_t size; u32 rval; u32 swpinfo = 0; @@ -1691,8 +1698,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, dev_info.comptag, dev_info.destid, dev_info.hopcount); - if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) { + dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name); + if (dev) { rmcd_debug(RDEV, "device %s already exists", dev_info.name); + put_device(dev); return -EEXIST; } @@ -2379,13 +2388,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) cdev_init(&md->cdev, &mport_fops); md->cdev.owner = THIS_MODULE; - ret = cdev_device_add(&md->cdev, &md->dev); - if (ret) { - rmcd_error("Failed to register mport %d (err=%d)", - mport->id, ret); - goto err_cdev; - } - INIT_LIST_HEAD(&md->doorbells); spin_lock_init(&md->db_lock); INIT_LIST_HEAD(&md->portwrites); @@ -2405,6 +2407,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) #else md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; #endif + + ret = cdev_device_add(&md->cdev, &md->dev); + if (ret) { + rmcd_error("Failed to register mport %d (err=%d)", + mport->id, ret); + goto err_cdev; + } ret = rio_query_mport(mport, &attr); if (!ret) { md->properties.flags = attr.flags; diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index c09cf55e2d20..569d9ad2c594 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -309,7 +309,7 @@ static bool sanity_check(struct ce_array *ca) return ret; } -int cec_add_elem(u64 pfn) +static int cec_add_elem(u64 pfn) { struct ce_array *ca = &ce_arr; unsigned int to = 0; @@ -527,7 +527,33 @@ err: return 1; } -void __init cec_init(void) +static int cec_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *m = (struct mce *)data; + + if (!m) + return NOTIFY_DONE; + + /* We eat only correctable DRAM errors with usable addresses. */ + if (mce_is_memory_error(m) && + mce_is_correctable(m) && + mce_usable_address(m)) { + if (!cec_add_elem(m->addr >> PAGE_SHIFT)) { + m->kflags |= MCE_HANDLED_CEC; + return NOTIFY_OK; + } + } + + return NOTIFY_DONE; +} + +static struct notifier_block cec_nb = { + .notifier_call = cec_notifier, + .priority = MCE_PRIO_CEC, +}; + +static void __init cec_init(void) { if (ce_arr.disabled) return; @@ -546,8 +572,11 @@ void __init cec_init(void) INIT_DELAYED_WORK(&cec_work, cec_work_fn); schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL); + mce_register_decode_chain(&cec_nb); + pr_info("Correctable Errors collector initialized.\n"); } +late_initcall(cec_init); int __init parse_cec_param(char *str) { diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 16f0c8570036..86a3c2dd0584 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -42,8 +42,9 @@ #define AXP20X_DCDC2_V_OUT_MASK GENMASK(5, 0) #define AXP20X_DCDC3_V_OUT_MASK GENMASK(7, 0) -#define AXP20X_LDO24_V_OUT_MASK GENMASK(7, 4) +#define AXP20X_LDO2_V_OUT_MASK GENMASK(7, 4) #define AXP20X_LDO3_V_OUT_MASK GENMASK(6, 0) +#define AXP20X_LDO4_V_OUT_MASK GENMASK(3, 0) #define AXP20X_LDO5_V_OUT_MASK GENMASK(7, 4) #define AXP20X_PWR_OUT_EXTEN_MASK BIT_MASK(0) @@ -544,14 +545,14 @@ static const struct regulator_desc axp20x_regulators[] = { AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC3_MASK), AXP_DESC_FIXED(AXP20X, LDO1, "ldo1", "acin", 1300), AXP_DESC(AXP20X, LDO2, "ldo2", "ldo24in", 1800, 3300, 100, - AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK, + AXP20X_LDO24_V_OUT, AXP20X_LDO2_V_OUT_MASK, AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO2_MASK), AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, AXP20X_LDO3_V_OUT_MASK, AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO3_MASK), AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_ranges, AXP20X_LDO4_V_OUT_NUM_VOLTAGES, - AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK, + AXP20X_LDO24_V_OUT, AXP20X_LDO4_V_OUT_MASK, AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO4_MASK), AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, AXP20X_LDO5_V_OUT_MASK, @@ -595,7 +596,7 @@ static const struct regulator_desc axp22x_regulators[] = { AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK), AXP_DESC(AXP22X, DLDO2, "dldo2", "dldoin", 700, 3300, 100, - AXP22X_DLDO2_V_OUT, AXP22X_PWR_OUT_DLDO2_MASK, + AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK), AXP_DESC(AXP22X, DLDO3, "dldo3", "dldoin", 700, 3300, 100, AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK, @@ -1071,7 +1072,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) static int axp20x_regulator_parse_dt(struct platform_device *pdev) { struct device_node *np, *regulators; - int ret; + int ret = 0; u32 dcdcfreq = 0; np = of_node_get(pdev->dev.parent->of_node); @@ -1086,13 +1087,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev) ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); if (ret < 0) { dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret); - return ret; } - of_node_put(regulators); } - return 0; + of_node_put(np); + return ret; } static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode) diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index bdab46a5c461..6c431456d298 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -15,6 +15,36 @@ #include <linux/regulator/of_regulator.h> #include <linux/slab.h> +/* Typical regulator startup times as per data sheet in uS */ +#define BD71847_BUCK1_STARTUP_TIME 144 +#define BD71847_BUCK2_STARTUP_TIME 162 +#define BD71847_BUCK3_STARTUP_TIME 162 +#define BD71847_BUCK4_STARTUP_TIME 240 +#define BD71847_BUCK5_STARTUP_TIME 270 +#define BD71847_BUCK6_STARTUP_TIME 200 +#define BD71847_LDO1_STARTUP_TIME 440 +#define BD71847_LDO2_STARTUP_TIME 370 +#define BD71847_LDO3_STARTUP_TIME 310 +#define BD71847_LDO4_STARTUP_TIME 400 +#define BD71847_LDO5_STARTUP_TIME 530 +#define BD71847_LDO6_STARTUP_TIME 400 + +#define BD71837_BUCK1_STARTUP_TIME 160 +#define BD71837_BUCK2_STARTUP_TIME 180 +#define BD71837_BUCK3_STARTUP_TIME 180 +#define BD71837_BUCK4_STARTUP_TIME 180 +#define BD71837_BUCK5_STARTUP_TIME 160 +#define BD71837_BUCK6_STARTUP_TIME 240 +#define BD71837_BUCK7_STARTUP_TIME 220 +#define BD71837_BUCK8_STARTUP_TIME 200 +#define BD71837_LDO1_STARTUP_TIME 440 +#define BD71837_LDO2_STARTUP_TIME 370 +#define BD71837_LDO3_STARTUP_TIME 310 +#define BD71837_LDO4_STARTUP_TIME 400 +#define BD71837_LDO5_STARTUP_TIME 310 +#define BD71837_LDO6_STARTUP_TIME 400 +#define BD71837_LDO7_STARTUP_TIME 530 + /* * BUCK1/2/3/4 * BUCK1RAMPRATE[1:0] BUCK1 DVS ramp rate setting @@ -495,6 +525,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_mask = DVS_BUCK_RUN_MASK, .enable_reg = BD718XX_REG_BUCK1_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71847_BUCK1_STARTUP_TIME, .owner = THIS_MODULE, .of_parse_cb = buck1_set_hw_dvs_levels, }, @@ -519,6 +550,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_mask = DVS_BUCK_RUN_MASK, .enable_reg = BD718XX_REG_BUCK2_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71847_BUCK2_STARTUP_TIME, .owner = THIS_MODULE, .of_parse_cb = buck2_set_hw_dvs_levels, }, @@ -547,6 +579,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .linear_range_selectors = bd71847_buck3_volt_range_sel, .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71847_BUCK3_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -574,6 +607,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_range_mask = BD71847_BUCK4_RANGE_MASK, .linear_range_selectors = bd71847_buck4_volt_range_sel, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71847_BUCK4_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -596,6 +630,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK, .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71847_BUCK5_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -620,6 +655,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK, .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71847_BUCK6_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -646,6 +682,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .linear_range_selectors = bd718xx_ldo1_volt_range_sel, .enable_reg = BD718XX_REG_LDO1_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71847_LDO1_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -668,6 +705,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .n_voltages = ARRAY_SIZE(ldo_2_volts), .enable_reg = BD718XX_REG_LDO2_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71847_LDO2_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -691,6 +729,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_mask = BD718XX_LDO3_MASK, .enable_reg = BD718XX_REG_LDO3_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71847_LDO3_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -714,6 +753,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_mask = BD718XX_LDO4_MASK, .enable_reg = BD718XX_REG_LDO4_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71847_LDO4_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -740,6 +780,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .linear_range_selectors = bd71847_ldo5_volt_range_sel, .enable_reg = BD718XX_REG_LDO5_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71847_LDO5_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -765,6 +806,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .vsel_mask = BD718XX_LDO6_MASK, .enable_reg = BD718XX_REG_LDO6_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71847_LDO6_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -791,6 +833,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = DVS_BUCK_RUN_MASK, .enable_reg = BD718XX_REG_BUCK1_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK1_STARTUP_TIME, .owner = THIS_MODULE, .of_parse_cb = buck1_set_hw_dvs_levels, }, @@ -815,6 +858,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = DVS_BUCK_RUN_MASK, .enable_reg = BD718XX_REG_BUCK2_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK2_STARTUP_TIME, .owner = THIS_MODULE, .of_parse_cb = buck2_set_hw_dvs_levels, }, @@ -839,6 +883,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = DVS_BUCK_RUN_MASK, .enable_reg = BD71837_REG_BUCK3_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK3_STARTUP_TIME, .owner = THIS_MODULE, .of_parse_cb = buck3_set_hw_dvs_levels, }, @@ -863,6 +908,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = DVS_BUCK_RUN_MASK, .enable_reg = BD71837_REG_BUCK4_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK4_STARTUP_TIME, .owner = THIS_MODULE, .of_parse_cb = buck4_set_hw_dvs_levels, }, @@ -891,6 +937,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .linear_range_selectors = bd71837_buck5_volt_range_sel, .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK5_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -915,6 +962,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD71837_BUCK6_MASK, .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK6_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -937,6 +985,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK, .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK7_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -961,6 +1010,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK, .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL, .enable_mask = BD718XX_BUCK_EN, + .enable_time = BD71837_BUCK8_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -987,6 +1037,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .linear_range_selectors = bd718xx_ldo1_volt_range_sel, .enable_reg = BD718XX_REG_LDO1_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71837_LDO1_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -1009,6 +1060,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .n_voltages = ARRAY_SIZE(ldo_2_volts), .enable_reg = BD718XX_REG_LDO2_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71837_LDO2_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -1032,6 +1084,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD718XX_LDO3_MASK, .enable_reg = BD718XX_REG_LDO3_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71837_LDO3_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -1055,6 +1108,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD718XX_LDO4_MASK, .enable_reg = BD718XX_REG_LDO4_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71837_LDO4_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -1080,6 +1134,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD71837_LDO5_MASK, .enable_reg = BD718XX_REG_LDO5_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71837_LDO5_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -1107,6 +1162,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD718XX_LDO6_MASK, .enable_reg = BD718XX_REG_LDO6_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71837_LDO6_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { @@ -1132,6 +1188,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .vsel_mask = BD71837_LDO7_MASK, .enable_reg = BD71837_REG_LDO7_VOLT, .enable_mask = BD718XX_LDO_EN, + .enable_time = BD71837_LDO7_STARTUP_TIME, .owner = THIS_MODULE, }, .init = { diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c index e690c2ce5b3c..25e33028871c 100644 --- a/drivers/regulator/bd9571mwv-regulator.c +++ b/drivers/regulator/bd9571mwv-regulator.c @@ -124,7 +124,7 @@ static const struct regulator_ops vid_ops = { static const struct regulator_desc regulators[] = { BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f, - 0x80, 600000, 10000, 0x3c), + 0x6f, 600000, 10000, 0x3c), BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf, 16, 1625000, 25000, 0), BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf, @@ -133,7 +133,7 @@ static const struct regulator_desc regulators[] = { 11, 2800000, 100000, 0), BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops, BD9571MWV_DVFS_MONIVDAC, 0x7f, - 0x80, 600000, 10000, 0x3c), + 0x6f, 600000, 10000, 0x3c), }; #ifdef CONFIG_PM_SLEEP diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 0011bdc15afb..a31b6ae92a84 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -235,8 +235,8 @@ static bool regulator_supply_is_couple(struct regulator_dev *rdev) static void regulator_unlock_recursive(struct regulator_dev *rdev, unsigned int n_coupled) { - struct regulator_dev *c_rdev; - int i; + struct regulator_dev *c_rdev, *supply_rdev; + int i, supply_n_coupled; for (i = n_coupled; i > 0; i--) { c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1]; @@ -244,10 +244,13 @@ static void regulator_unlock_recursive(struct regulator_dev *rdev, if (!c_rdev) continue; - if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) - regulator_unlock_recursive( - c_rdev->supply->rdev, - c_rdev->coupling_desc.n_coupled); + if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) { + supply_rdev = c_rdev->supply->rdev; + supply_n_coupled = supply_rdev->coupling_desc.n_coupled; + + regulator_unlock_recursive(supply_rdev, + supply_n_coupled); + } regulator_unlock(c_rdev); } @@ -1272,7 +1275,6 @@ static int _regulator_do_enable(struct regulator_dev *rdev); /** * set_machine_constraints - sets regulator constraints * @rdev: regulator source - * @constraints: constraints to apply * * Allows platform initialisation code to define and constrain * regulator circuits e.g. valid voltage/current ranges, etc. NOTE: @@ -1280,21 +1282,11 @@ static int _regulator_do_enable(struct regulator_dev *rdev); * regulator operations to proceed i.e. set_voltage, set_current_limit, * set_mode. */ -static int set_machine_constraints(struct regulator_dev *rdev, - const struct regulation_constraints *constraints) +static int set_machine_constraints(struct regulator_dev *rdev) { int ret = 0; const struct regulator_ops *ops = rdev->desc->ops; - if (constraints) - rdev->constraints = kmemdup(constraints, sizeof(*constraints), - GFP_KERNEL); - else - rdev->constraints = kzalloc(sizeof(*constraints), - GFP_KERNEL); - if (!rdev->constraints) - return -ENOMEM; - ret = machine_constraints_voltage(rdev, rdev->constraints); if (ret != 0) return ret; @@ -1456,7 +1448,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { - struct regulator_map *node; + struct regulator_map *node, *new_node; int has_dev; if (supply == NULL) @@ -1467,6 +1459,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, else has_dev = 0; + new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); + if (new_node == NULL) + return -ENOMEM; + + new_node->regulator = rdev; + new_node->supply = supply; + + if (has_dev) { + new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); + if (new_node->dev_name == NULL) { + kfree(new_node); + return -ENOMEM; + } + } + + mutex_lock(®ulator_list_mutex); list_for_each_entry(node, ®ulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) @@ -1484,26 +1492,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); - return -EBUSY; + goto fail; } - node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); - if (node == NULL) - return -ENOMEM; + list_add(&new_node->list, ®ulator_map_list); + mutex_unlock(®ulator_list_mutex); - node->regulator = rdev; - node->supply = supply; - - if (has_dev) { - node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); - if (node->dev_name == NULL) { - kfree(node); - return -ENOMEM; - } - } - - list_add(&node->list, ®ulator_map_list); return 0; + +fail: + mutex_unlock(®ulator_list_mutex); + kfree(new_node->dev_name); + kfree(new_node); + return -EBUSY; } static void unset_regulator_supplies(struct regulator_dev *rdev) @@ -1575,45 +1576,54 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, const char *supply_name) { struct regulator *regulator; - char buf[REG_STR_SIZE]; - int err, size; + int err = 0; + + if (dev) { + char buf[REG_STR_SIZE]; + int size; + + size = snprintf(buf, REG_STR_SIZE, "%s-%s", + dev->kobj.name, supply_name); + if (size >= REG_STR_SIZE) + return NULL; + + supply_name = kstrdup(buf, GFP_KERNEL); + if (supply_name == NULL) + return NULL; + } else { + supply_name = kstrdup_const(supply_name, GFP_KERNEL); + if (supply_name == NULL) + return NULL; + } regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); - if (regulator == NULL) + if (regulator == NULL) { + kfree(supply_name); return NULL; + } + + regulator->rdev = rdev; + regulator->supply_name = supply_name; regulator_lock(rdev); - regulator->rdev = rdev; list_add(®ulator->list, &rdev->consumer_list); + regulator_unlock(rdev); if (dev) { regulator->dev = dev; /* Add a link to the device sysfs entry */ - size = snprintf(buf, REG_STR_SIZE, "%s-%s", - dev->kobj.name, supply_name); - if (size >= REG_STR_SIZE) - goto overflow_err; - - regulator->supply_name = kstrdup(buf, GFP_KERNEL); - if (regulator->supply_name == NULL) - goto overflow_err; - err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj, - buf); + supply_name); if (err) { rdev_dbg(rdev, "could not add device link %s err %d\n", dev->kobj.name, err); /* non-fatal */ } - } else { - regulator->supply_name = kstrdup_const(supply_name, GFP_KERNEL); - if (regulator->supply_name == NULL) - goto overflow_err; } - regulator->debugfs = debugfs_create_dir(regulator->supply_name, - rdev->debugfs); + if (err != -EEXIST) + regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs); if (!regulator->debugfs) { rdev_dbg(rdev, "Failed to create debugfs directory\n"); } else { @@ -1637,13 +1647,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, _regulator_is_enabled(rdev)) regulator->always_on = true; - regulator_unlock(rdev); return regulator; -overflow_err: - list_del(®ulator->list); - kfree(regulator); - regulator_unlock(rdev); - return NULL; } static int _regulator_get_enable_time(struct regulator_dev *rdev) @@ -1768,13 +1772,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) { struct regulator_dev *r; struct device *dev = rdev->dev.parent; - int ret; + int ret = 0; /* No supply to resolve? */ if (!rdev->supply_name) return 0; - /* Supply already resolved? */ + /* Supply already resolved? (fast-path without locking contention) */ if (rdev->supply) return 0; @@ -1784,7 +1788,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) /* Did the lookup explicitly defer for us? */ if (ret == -EPROBE_DEFER) - return ret; + goto out; if (have_full_constraints()) { r = dummy_regulator_rdev; @@ -1792,10 +1796,22 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) } else { dev_err(dev, "Failed to resolve %s-supply for %s\n", rdev->supply_name, rdev->desc->name); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out; } } + if (r == rdev) { + dev_err(dev, "Supply for %s (%s) resolved to itself\n", + rdev->desc->name, rdev->supply_name); + if (!have_full_constraints()) { + ret = -EINVAL; + goto out; + } + r = dummy_regulator_rdev; + get_device(&r->dev); + } + /* * If the supply's parent device is not the same as the * regulator's parent device, then ensure the parent device @@ -1805,7 +1821,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) if (r->dev.parent && r->dev.parent != rdev->dev.parent) { if (!device_is_bound(r->dev.parent)) { put_device(&r->dev); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out; } } @@ -1813,15 +1830,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) ret = regulator_resolve_supply(r); if (ret < 0) { put_device(&r->dev); - return ret; + goto out; + } + + /* + * Recheck rdev->supply with rdev->mutex lock held to avoid a race + * between rdev->supply null check and setting rdev->supply in + * set_supply() from concurrent tasks. + */ + regulator_lock(rdev); + + /* Supply just resolved by a concurrent task? */ + if (rdev->supply) { + regulator_unlock(rdev); + put_device(&r->dev); + goto out; } ret = set_supply(rdev, r); if (ret < 0) { + regulator_unlock(rdev); put_device(&r->dev); - return ret; + goto out; } + regulator_unlock(rdev); + /* * In set_machine_constraints() we may have turned this regulator on * but we couldn't propagate to the supply if it hadn't been resolved @@ -1832,11 +1866,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) if (ret < 0) { _regulator_put(rdev->supply); rdev->supply = NULL; - return ret; + goto out; } } - return 0; +out: + return ret; } /* Internal regulator request function */ @@ -2217,10 +2252,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias); static int regulator_ena_gpio_request(struct regulator_dev *rdev, const struct regulator_config *config) { - struct regulator_enable_gpio *pin; + struct regulator_enable_gpio *pin, *new_pin; struct gpio_desc *gpiod; gpiod = config->ena_gpiod; + new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL); + + mutex_lock(®ulator_list_mutex); list_for_each_entry(pin, ®ulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { @@ -2229,9 +2267,13 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, } } - pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); - if (pin == NULL) + if (new_pin == NULL) { + mutex_unlock(®ulator_list_mutex); return -ENOMEM; + } + + pin = new_pin; + new_pin = NULL; pin->gpiod = gpiod; list_add(&pin->list, ®ulator_ena_gpio_list); @@ -2239,6 +2281,10 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, update_ena_gpio_to_rdev: pin->request_count++; rdev->ena_pin = pin; + + mutex_unlock(®ulator_list_mutex); + kfree(new_pin); + return 0; } @@ -4019,6 +4065,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev) ret = rdev->desc->fixed_uV; } else if (rdev->supply) { ret = regulator_get_voltage_rdev(rdev->supply->rdev); + } else if (rdev->supply_name) { + return -EPROBE_DEFER; } else { return -EINVAL; } @@ -4857,13 +4905,9 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev) return; } - regulator_lock(c_rdev); - c_desc->coupled_rdevs[i] = c_rdev; c_desc->n_resolved++; - regulator_unlock(c_rdev); - regulator_resolve_coupling(c_rdev); } } @@ -4948,7 +4992,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev) if (!of_check_coupling_data(rdev)) return -EPERM; + mutex_lock(®ulator_list_mutex); rdev->coupling_desc.coupler = regulator_find_coupler(rdev); + mutex_unlock(®ulator_list_mutex); + if (IS_ERR(rdev->coupling_desc.coupler)) { err = PTR_ERR(rdev->coupling_desc.coupler); rdev_err(rdev, "failed to get coupler: %d\n", err); @@ -4987,14 +5034,12 @@ struct regulator_dev * regulator_register(const struct regulator_desc *regulator_desc, const struct regulator_config *cfg) { - const struct regulation_constraints *constraints = NULL; const struct regulator_init_data *init_data; struct regulator_config *config = NULL; static atomic_t regulator_no = ATOMIC_INIT(-1); struct regulator_dev *rdev; bool dangling_cfg_gpiod = false; bool dangling_of_gpiod = false; - bool reg_device_fail = false; struct device *dev; int ret, i; @@ -5044,6 +5089,7 @@ regulator_register(const struct regulator_desc *regulator_desc, ret = -ENOMEM; goto rinse; } + device_initialize(&rdev->dev); /* * Duplicate the config so the driver could override it after @@ -5051,9 +5097,8 @@ regulator_register(const struct regulator_desc *regulator_desc, */ config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL); if (config == NULL) { - kfree(rdev); ret = -ENOMEM; - goto rinse; + goto clean; } init_data = regulator_of_get_init_data(dev, regulator_desc, config, @@ -5065,10 +5110,8 @@ regulator_register(const struct regulator_desc *regulator_desc, * from a gpio extender or something else. */ if (PTR_ERR(init_data) == -EPROBE_DEFER) { - kfree(config); - kfree(rdev); ret = -EPROBE_DEFER; - goto rinse; + goto clean; } /* @@ -5109,9 +5152,7 @@ regulator_register(const struct regulator_desc *regulator_desc, } if (config->ena_gpiod) { - mutex_lock(®ulator_list_mutex); ret = regulator_ena_gpio_request(rdev, config); - mutex_unlock(®ulator_list_mutex); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO: %d\n", ret); @@ -5127,49 +5168,59 @@ regulator_register(const struct regulator_desc *regulator_desc, rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%lu", (unsigned long) atomic_inc_return(®ulator_no)); + dev_set_drvdata(&rdev->dev, rdev); /* set regulator constraints */ if (init_data) - constraints = &init_data->constraints; + rdev->constraints = kmemdup(&init_data->constraints, + sizeof(*rdev->constraints), + GFP_KERNEL); + else + rdev->constraints = kzalloc(sizeof(*rdev->constraints), + GFP_KERNEL); + if (!rdev->constraints) { + ret = -ENOMEM; + goto wash; + } if (init_data && init_data->supply_regulator) rdev->supply_name = init_data->supply_regulator; else if (regulator_desc->supply_name) rdev->supply_name = regulator_desc->supply_name; - /* - * Attempt to resolve the regulator supply, if specified, - * but don't return an error if we fail because we will try - * to resolve it again later as more regulators are added. - */ - if (regulator_resolve_supply(rdev)) - rdev_dbg(rdev, "unable to resolve supply\n"); - - ret = set_machine_constraints(rdev, constraints); + ret = set_machine_constraints(rdev); + if (ret == -EPROBE_DEFER) { + /* Regulator might be in bypass mode and so needs its supply + * to set the constraints */ + /* FIXME: this currently triggers a chicken-and-egg problem + * when creating -SUPPLY symlink in sysfs to a regulator + * that is just being created */ + ret = regulator_resolve_supply(rdev); + if (!ret) + ret = set_machine_constraints(rdev); + else + rdev_dbg(rdev, "unable to resolve supply early: %pe\n", + ERR_PTR(ret)); + } if (ret < 0) goto wash; - mutex_lock(®ulator_list_mutex); ret = regulator_init_coupling(rdev); - mutex_unlock(®ulator_list_mutex); if (ret < 0) goto wash; /* add consumers devices */ if (init_data) { - mutex_lock(®ulator_list_mutex); for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { - mutex_unlock(®ulator_list_mutex); dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } - mutex_unlock(®ulator_list_mutex); } if (!rdev->desc->ops->get_voltage && @@ -5177,12 +5228,9 @@ regulator_register(const struct regulator_desc *regulator_desc, !rdev->desc->fixed_uV) rdev->is_switch = true; - dev_set_drvdata(&rdev->dev, rdev); - ret = device_register(&rdev->dev); - if (ret != 0) { - reg_device_fail = true; + ret = device_add(&rdev->dev); + if (ret != 0) goto unset_supplies; - } rdev_init_debugfs(rdev); @@ -5204,18 +5252,14 @@ unset_supplies: mutex_unlock(®ulator_list_mutex); wash: kfree(rdev->coupling_desc.coupled_rdevs); - kfree(rdev->constraints); mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - if (reg_device_fail) - put_device(&rdev->dev); - else - kfree(rdev); kfree(config); + put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 689537927f6f..44b1da7cc374 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { }; +static const struct regulator_ops pfuze3000_sw_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = pfuze100_set_ramp_delay, + +}; + #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \ [_chip ## _ ## _name] = { \ .desc = { \ @@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { .stby_mask = 0x20, \ } - -#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \ - .desc = { \ - .name = #_name,\ - .n_voltages = ((max) - (min)) / (step) + 1, \ - .ops = &pfuze100_sw_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = _chip ## _ ## _name, \ - .owner = THIS_MODULE, \ - .min_uV = (min), \ - .uV_step = (step), \ - .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ - .vsel_mask = 0x7, \ - }, \ - .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ - .stby_mask = 0x7, \ -} +/* No linar case for the some switches of PFUZE3000 */ +#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \ + [_chip ## _ ## _name] = { \ + .desc = { \ + .name = #_name, \ + .n_voltages = ARRAY_SIZE(voltages), \ + .ops = &pfuze3000_sw_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = _chip ## _ ## _name, \ + .owner = THIS_MODULE, \ + .volt_table = voltages, \ + .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ + .vsel_mask = (mask), \ + .enable_reg = (base) + PFUZE100_MODE_OFFSET, \ + .enable_mask = 0xf, \ + .enable_val = 0x8, \ + .enable_time = 500, \ + }, \ + .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ + .stby_mask = (mask), \ + .sw_reg = true, \ + } #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \ .desc = { \ @@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = { }; static struct pfuze_regulator pfuze3000_regulators[] = { - PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000), - PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst), PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), @@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = { }; static struct pfuze_regulator pfuze3001_regulators[] = { - PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), - PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000), @@ -815,11 +833,14 @@ static int pfuze100_regulator_probe(struct i2c_client *client, * the switched regulator till yet. */ if (pfuze_chip->flags & PFUZE_FLAG_DISABLE_SW) { - if (pfuze_chip->regulator_descs[i].sw_reg) { - desc->ops = &pfuze100_sw_disable_regulator_ops; - desc->enable_val = 0x8; - desc->disable_val = 0x0; - desc->enable_time = 500; + if (pfuze_chip->chip_id == PFUZE100 || + pfuze_chip->chip_id == PFUZE200) { + if (pfuze_chip->regulator_descs[i].sw_reg) { + desc->ops = &pfuze100_sw_disable_regulator_ops; + desc->enable_val = 0x8; + desc->disable_val = 0x0; + desc->enable_time = 500; + } } } diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index e74e11101fc1..0a9d61a91f43 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -279,7 +279,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev, return ret; } - drvdata->state = -EINVAL; + drvdata->state = -ENOTRECOVERABLE; drvdata->duty_cycle_table = duty_cycle_table; drvdata->desc.ops = &pwm_regulator_voltage_table_ops; drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table); diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 0246b6f99fb5..2de7af13288e 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -726,8 +726,8 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = { static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600), - .n_voltages = 5, + .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000), + .n_voltages = 236, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, }; @@ -832,11 +832,11 @@ static const struct rpmh_vreg_init_data pm8150_vreg_data[] = { RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"), RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"), RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), - RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l6-l17"), + RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l16-l17"), RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), - RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l6-l17"), - RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l6-l17"), + RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"), + RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"), RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"), {}, }; @@ -857,7 +857,7 @@ static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = { RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"), RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"), RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"), - RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8-l11"), + RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8"), RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"), RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"), RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"), @@ -874,7 +874,7 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = { RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"), RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"), RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"), - RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"), {}, }; diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 6ca27e9d5ef7..5276f8442f3c 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -544,14 +544,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, rdata = devm_kcalloc(&pdev->dev, pdata->num_regulators, sizeof(*rdata), GFP_KERNEL); - if (!rdata) + if (!rdata) { + of_node_put(regulators_np); return -ENOMEM; + } rmode = devm_kcalloc(&pdev->dev, pdata->num_regulators, sizeof(*rmode), GFP_KERNEL); - if (!rmode) + if (!rmode) { + of_node_put(regulators_np); return -ENOMEM; + } pdata->regulators = rdata; pdata->opmode = rmode; @@ -574,10 +578,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, 0, GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE, "s5m8767"); - if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) + if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) { rdata->ext_control_gpiod = NULL; - else if (IS_ERR(rdata->ext_control_gpiod)) + } else if (IS_ERR(rdata->ext_control_gpiod)) { + of_node_put(reg_np); + of_node_put(regulators_np); return PTR_ERR(rdata->ext_control_gpiod); + } rdata->id = i; rdata->initdata = of_get_regulator_init_data( diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index 89b9314d64c9..016330f909c0 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -342,8 +342,17 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) return ret; } - /* If data is exactly the same, then just update index, no change */ info = &abb->info[sel]; + /* + * When Linux kernel is starting up, we are'nt sure of the + * Bias configuration that bootloader has configured. + * So, we get to know the actual setting the first time + * we are asked to transition. + */ + if (abb->current_info_idx == -EINVAL) + goto just_set_abb; + + /* If data is exactly the same, then just update index, no change */ oinfo = &abb->info[abb->current_info_idx]; if (!memcmp(info, oinfo, sizeof(*info))) { dev_dbg(dev, "%s: Same data new idx=%d, old idx=%d\n", __func__, @@ -351,6 +360,7 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) goto out; } +just_set_abb: ret = ti_abb_set_opp(rdev, abb, info); out: diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index cb0f4a0be032..eaeb6aee6da5 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5) { int ret; + q6v5->running = false; + qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index e953886b2eb7..24e8b7e27177 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -184,8 +184,10 @@ static int adsp_start(struct rproc *rproc) dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX); ret = pm_runtime_get_sync(adsp->dev); - if (ret) + if (ret) { + pm_runtime_put_noidle(adsp->dev); goto disable_xo_clk; + } ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks); if (ret) { @@ -345,15 +347,12 @@ static int adsp_init_mmio(struct qcom_adsp *adsp, struct platform_device *pdev) { struct device_node *syscon; - struct resource *res; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!adsp->qdsp6ss_base) { + adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(adsp->qdsp6ss_base)) { dev_err(adsp->dev, "failed to map QDSP6SS registers\n"); - return -ENOMEM; + return PTR_ERR(adsp->qdsp6ss_base); } syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index de919f2e8b94..5e54e6f5edb1 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -331,8 +331,11 @@ static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, for (i = 0; i < pd_count; i++) { dev_pm_genpd_set_performance_state(pds[i], INT_MAX); ret = pm_runtime_get_sync(pds[i]); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(pds[i]); + dev_pm_genpd_set_performance_state(pds[i], 0); goto unroll_pd_votes; + } } return 0; @@ -381,6 +384,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw) { struct q6v5 *qproc = rproc->priv; + /* MBA is restricted to a maximum size of 1M */ + if (fw->size > qproc->mba_size || fw->size > SZ_1M) { + dev_err(qproc->dev, "MBA firmware load failed\n"); + return -EINVAL; + } + memcpy(qproc->mba_region, fw->data, fw->size); return 0; @@ -875,11 +884,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); } - ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, - false, qproc->mpss_phys, - qproc->mpss_size); - WARN_ON(ret); - q6v5_reset_assert(qproc); q6v5_clk_disable(qproc->dev, qproc->reset_clks, @@ -909,6 +913,23 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) } } +static int q6v5_reload_mba(struct rproc *rproc) +{ + struct q6v5 *qproc = rproc->priv; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, rproc->firmware, qproc->dev); + if (ret < 0) + return ret; + + q6v5_load(rproc, fw); + ret = q6v5_mba_load(qproc); + release_firmware(fw); + + return ret; +} + static int q6v5_mpss_load(struct q6v5 *qproc) { const struct elf32_phdr *phdrs; @@ -969,6 +990,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc) max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); } + /** + * In case of a modem subsystem restart on secure devices, the modem + * memory can be reclaimed only after MBA is loaded. For modem cold + * boot this will be a nop + */ + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, + qproc->mpss_phys, qproc->mpss_size); + mpss_reloc = relocate ? min_addr : qproc->mpss_phys; qproc->mpss_reloc = mpss_reloc; /* Load firmware segments */ @@ -985,7 +1014,13 @@ static int q6v5_mpss_load(struct q6v5 *qproc) goto release_firmware; } - ptr = qproc->mpss_region + offset; + ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz); + if (!ptr) { + dev_err(qproc->dev, + "unable to map memory region: %pa+%zx-%x\n", + &qproc->mpss_phys, offset, phdr->p_memsz); + goto release_firmware; + } if (phdr->p_filesz && phdr->p_offset < fw->size) { /* Firmware is large enough to be non-split */ @@ -994,6 +1029,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) "failed to load segment %d from truncated file %s\n", i, fw_name); ret = -EINVAL; + iounmap(ptr); goto release_firmware; } @@ -1001,14 +1037,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc) } else if (phdr->p_filesz) { /* Replace "xxx.xxx" with "xxx.bxx" */ sprintf(fw_name + fw_name_len - 3, "b%02d", i); - ret = request_firmware(&seg_fw, fw_name, qproc->dev); + ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, + ptr, phdr->p_filesz); if (ret) { dev_err(qproc->dev, "failed to load %s\n", fw_name); + iounmap(ptr); goto release_firmware; } - memcpy(ptr, seg_fw->data, seg_fw->size); - release_firmware(seg_fw); } @@ -1016,6 +1052,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); } + iounmap(ptr); size += phdr->p_memsz; } @@ -1055,23 +1092,43 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, int ret = 0; struct q6v5 *qproc = rproc->priv; unsigned long mask = BIT((unsigned long)segment->priv); - void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); + int offset = segment->da - qproc->mpss_reloc; + void *ptr = NULL; /* Unlock mba before copying segments */ - if (!qproc->dump_mba_loaded) - ret = q6v5_mba_load(qproc); + if (!qproc->dump_mba_loaded) { + ret = q6v5_reload_mba(rproc); + if (!ret) { + /* Reset ownership back to Linux to copy segments */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, + false, + qproc->mpss_phys, + qproc->mpss_size); + } + } - if (!ptr || ret) - memset(dest, 0xff, segment->size); - else + if (!ret) + ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size); + + if (ptr) { memcpy(dest, ptr, segment->size); + iounmap(ptr); + } else { + memset(dest, 0xff, segment->size); + } qproc->dump_segment_mask |= mask; /* Reclaim mba after copying segments */ if (qproc->dump_segment_mask == qproc->dump_complete_mask) { - if (qproc->dump_mba_loaded) + if (qproc->dump_mba_loaded) { + /* Try to reset ownership back to Q6 */ + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, + true, + qproc->mpss_phys, + qproc->mpss_size); q6v5_mba_reclaim(qproc); + } } } @@ -1111,10 +1168,6 @@ static int q6v5_start(struct rproc *rproc) return 0; reclaim_mpss: - xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, - false, qproc->mpss_phys, - qproc->mpss_size); - WARN_ON(xfermemop_ret); q6v5_mba_reclaim(qproc); return ret; @@ -1363,12 +1416,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); - qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); - if (!qproc->mpss_region) { - dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", - &r.start, qproc->mpss_size); - return -EBUSY; - } return 0; } @@ -1410,7 +1457,7 @@ static int q6v5_probe(struct platform_device *pdev) ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1, &qproc->hexagon_mdt_image); if (ret < 0 && ret != -EINVAL) - return ret; + goto free_rproc; platform_set_drvdata(pdev, qproc); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index b542debbc6f0..ce92ae227aa1 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -400,7 +400,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) void rproc_free_vring(struct rproc_vring *rvring) { struct rproc *rproc = rvring->rvdev->rproc; - int idx = rvring->rvdev->vring - rvring; + int idx = rvring - rvring->rvdev->vring; struct fw_rsc_vdev *rsc; idr_remove(&rproc->notifyids, rvring->notifyid); @@ -511,7 +511,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* Initialise vdev subdevice */ snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); - rvdev->dev.parent = rproc->dev.parent; + rvdev->dev.parent = &rproc->dev; rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset; rvdev->dev.release = rproc_rvdev_release; dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); @@ -2036,6 +2036,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->dev.type = &rproc_type; rproc->dev.class = &rproc_class; rproc->dev.driver_data = rproc; + idr_init(&rproc->notifyids); /* Assign a unique device index and name */ rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); @@ -2060,8 +2061,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, mutex_init(&rproc->lock); - idr_init(&rproc->notifyids); - INIT_LIST_HEAD(&rproc->carveouts); INIT_LIST_HEAD(&rproc->mappings); INIT_LIST_HEAD(&rproc->traces); diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 8c07cb2ca8ba..380d52672035 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -334,6 +334,13 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) struct rproc_mem_entry *mem; int ret; + if (rproc->ops->kick == NULL) { + ret = -EINVAL; + dev_err(dev, ".kick method not defined for %s", + rproc->name); + goto out; + } + /* Try to find dedicated vdev buffer carveout */ mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index); if (mem) { @@ -368,6 +375,18 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) goto out; } } + } else { + struct device_node *np = rproc->dev.parent->of_node; + + /* + * If we don't have dedicated buffer, just attempt to re-assign + * the reserved memory from our parent. A default memory-region + * at index 0 from the parent's memory-regions is assigned for + * the rvdev dev to allocate from. Failure is non-critical and + * the allocations will fall back to global pools, so don't + * check return value either. + */ + of_reserved_mem_device_init_by_idx(dev, np, 0); } /* Allocate virtio device */ diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 1995f5b3ea67..d5114abcde19 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -970,7 +970,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid) return -EINVAL; } - complete(&channel->open_ack); + complete_all(&channel->open_ack); return 0; } @@ -1178,7 +1178,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev) __be32 *val = defaults; int size; - if (glink->intentless) + if (glink->intentless || !completion_done(&channel->open_ack)) return 0; prop = of_find_property(np, "qcom,intents", NULL); @@ -1413,7 +1413,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, channel->rcid = ret; spin_unlock_irqrestore(&glink->idr_lock, flags); - complete(&channel->open_req); + complete_all(&channel->open_req); if (create_device) { rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 4abbeea782fa..19903de6268d 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev, ret = of_property_read_u32(node, key, &edge->edge_id); if (ret) { dev_err(dev, "edge missing %s property\n", key); - return -EINVAL; + goto put_node; } edge->remote_pid = QCOM_SMEM_HOST_ANY; @@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev, edge->mbox_client.knows_txdone = true; edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0); if (IS_ERR(edge->mbox_chan)) { - if (PTR_ERR(edge->mbox_chan) != -ENODEV) - return PTR_ERR(edge->mbox_chan); + if (PTR_ERR(edge->mbox_chan) != -ENODEV) { + ret = PTR_ERR(edge->mbox_chan); + goto put_node; + } edge->mbox_chan = NULL; syscon_np = of_parse_phandle(node, "qcom,ipc", 0); if (!syscon_np) { dev_err(dev, "no qcom,ipc node\n"); - return -ENODEV; + ret = -ENODEV; + goto put_node; } edge->ipc_regmap = syscon_node_to_regmap(syscon_np); - if (IS_ERR(edge->ipc_regmap)) - return PTR_ERR(edge->ipc_regmap); + if (IS_ERR(edge->ipc_regmap)) { + ret = PTR_ERR(edge->ipc_regmap); + goto put_node; + } key = "qcom,ipc"; ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); if (ret < 0) { dev_err(dev, "no offset in %s\n", key); - return -EINVAL; + goto put_node; } ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); if (ret < 0) { dev_err(dev, "no bit in %s\n", key); - return -EINVAL; + goto put_node; } } @@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev, irq = irq_of_parse_and_map(node, 0); if (irq < 0) { dev_err(dev, "required smd interrupt missing\n"); - return -EINVAL; + ret = irq; + goto put_node; } ret = devm_request_irq(dev, irq, @@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev, node->name, edge); if (ret) { dev_err(dev, "failed to request smd irq\n"); - return ret; + goto put_node; } edge->irq = irq; return 0; + +put_node: + of_node_put(node); + edge->of_node = NULL; + + return ret; } /* diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index c5b980414086..9ae7ce3f5069 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -683,6 +683,7 @@ config RTC_DRV_S5M tristate "Samsung S2M/S5M series" depends on MFD_SEC_CORE || COMPILE_TEST select REGMAP_IRQ + select REGMAP_I2C help If you say yes here you will get support for the RTC of Samsung S2MPS14 and S5M PMIC series. diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c index 4743b16a8d84..1526402e126b 100644 --- a/drivers/rtc/rtc-88pm860x.c +++ b/drivers/rtc/rtc-88pm860x.c @@ -336,6 +336,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev) info->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, info); + info->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc_dev)) + return PTR_ERR(info->rtc_dev); + ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, rtc_update_handler, IRQF_ONESHOT, "rtc", info); @@ -377,13 +381,11 @@ static int pm860x_rtc_probe(struct platform_device *pdev) } } - info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc", - &pm860x_rtc_ops, THIS_MODULE); - ret = PTR_ERR(info->rtc_dev); - if (IS_ERR(info->rtc_dev)) { - dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); + info->rtc_dev->ops = &pm860x_rtc_ops; + + ret = rtc_register_device(info->rtc_dev); + if (ret) return ret; - } /* * enable internal XO instead of internal 3.25MHz clock since it can diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 367497914c10..28eb96cbaf98 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client, if (!ds1374) return -ENOMEM; + ds1374->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(ds1374->rtc)) + return PTR_ERR(ds1374->rtc); + ds1374->client = client; i2c_set_clientdata(client, ds1374); @@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client, device_set_wakeup_capable(&client->dev, 1); } - ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, - &ds1374_rtc_ops, THIS_MODULE); - if (IS_ERR(ds1374->rtc)) { - dev_err(&client->dev, "unable to register the class device\n"); - return PTR_ERR(ds1374->rtc); - } + ds1374->rtc->ops = &ds1374_rtc_ops; + + ret = rtc_register_device(ds1374->rtc); + if (ret) + return ret; #ifdef CONFIG_RTC_DRV_DS1374_WDT save_client = client; diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 1766496385fe..4fd6afe2228e 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c @@ -33,7 +33,7 @@ struct ep93xx_rtc { static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, unsigned short *delete) { - struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev); + struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev); unsigned long comp; comp = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP); @@ -51,7 +51,7 @@ static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) { - struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev); + struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev); unsigned long time; time = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA); @@ -62,7 +62,7 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm) { - struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev); + struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev); unsigned long secs = rtc_tm_to_time64(tm); writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD); diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c index 1a3420ee6a4d..d5083b013fbc 100644 --- a/drivers/rtc/rtc-goldfish.c +++ b/drivers/rtc/rtc-goldfish.c @@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev, rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC; writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH); writel(rtc_alarm64, base + TIMER_ALARM_LOW); + writel(1, base + TIMER_IRQ_ENABLED); } else { /* * if this function was called with enabled=0 diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index afce2c0b4bd6..d6802e6191cb 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c @@ -308,8 +308,10 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev) mc13xxx_unlock(mc13xxx); ret = rtc_register_device(priv->rtc); - if (ret) + if (ret) { + mc13xxx_lock(mc13xxx); goto err_irq_request; + } return 0; diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 02b069caffd5..d1d37a204264 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -230,10 +230,8 @@ static int pcf2127_nvmem_read(void *priv, unsigned int offset, if (ret) return ret; - ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD, - val, bytes); - - return ret ?: bytes; + return regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD, + val, bytes); } static int pcf2127_nvmem_write(void *priv, unsigned int offset, @@ -248,10 +246,8 @@ static int pcf2127_nvmem_write(void *priv, unsigned int offset, if (ret) return ret; - ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD, - val, bytes); - - return ret ?: bytes; + return regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD, + val, bytes); } /* watchdog driver */ diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 180caebbd355..9566958476df 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -379,8 +379,10 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) device_init_wakeup(&adev->dev, true); ldata->rtc = devm_rtc_allocate_device(&adev->dev); - if (IS_ERR(ldata->rtc)) - return PTR_ERR(ldata->rtc); + if (IS_ERR(ldata->rtc)) { + ret = PTR_ERR(ldata->rtc); + goto out; + } ldata->rtc->ops = ops; diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c index 2b316661a578..bbdfebd70644 100644 --- a/drivers/rtc/rtc-rv3028.c +++ b/drivers/rtc/rtc-rv3028.c @@ -625,6 +625,8 @@ static int rv3028_probe(struct i2c_client *client) return -ENOMEM; rv3028->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(rv3028->regmap)) + return PTR_ERR(rv3028->regmap); i2c_set_clientdata(client, rv3028); diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c index 8102469e27c0..0c436aea0da4 100644 --- a/drivers/rtc/rtc-rx8010.c +++ b/drivers/rtc/rtc-rx8010.c @@ -424,16 +424,26 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) } } -static struct rtc_class_ops rx8010_rtc_ops = { +static const struct rtc_class_ops rx8010_rtc_ops_default = { .read_time = rx8010_get_time, .set_time = rx8010_set_time, .ioctl = rx8010_ioctl, }; +static const struct rtc_class_ops rx8010_rtc_ops_alarm = { + .read_time = rx8010_get_time, + .set_time = rx8010_set_time, + .ioctl = rx8010_ioctl, + .read_alarm = rx8010_read_alarm, + .set_alarm = rx8010_set_alarm, + .alarm_irq_enable = rx8010_alarm_irq_enable, +}; + static int rx8010_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; + const struct rtc_class_ops *rtc_ops; struct rx8010_data *rx8010; int err = 0; @@ -464,16 +474,16 @@ static int rx8010_probe(struct i2c_client *client, if (err) { dev_err(&client->dev, "unable to request IRQ\n"); - client->irq = 0; - } else { - rx8010_rtc_ops.read_alarm = rx8010_read_alarm; - rx8010_rtc_ops.set_alarm = rx8010_set_alarm; - rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable; + return err; } + + rtc_ops = &rx8010_rtc_ops_alarm; + } else { + rtc_ops = &rx8010_rtc_ops_default; } rx8010->rtc = devm_rtc_device_register(&client->dev, client->name, - &rx8010_rtc_ops, THIS_MODULE); + rtc_ops, THIS_MODULE); if (IS_ERR(rx8010->rtc)) { dev_err(&client->dev, "unable to register the class device\n"); diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 86fa723b3b76..795273269d58 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -182,7 +182,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = { int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) { - struct rtc_device *rtc; int ret; spin_lock_init(&info->lock); @@ -211,15 +210,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) writel_relaxed(0, info->rcnr); } - rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { - clk_disable_unprepare(info->clk); - return PTR_ERR(rtc); - } - info->rtc = rtc; + info->rtc->ops = &sa1100_rtc_ops; + info->rtc->max_user_freq = RTC_FREQ; - rtc->max_user_freq = RTC_FREQ; + ret = rtc_register_device(info->rtc); + if (ret) { + clk_disable_unprepare(info->clk); + return ret; + } /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). @@ -268,6 +266,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev) info->irq_1hz = irq_1hz; info->irq_alarm = irq_alarm; + info->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); + ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", &pdev->dev); if (ret) { diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index fc32be687606..c41bc8084d7c 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -276,7 +276,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, 300000000); if (IS_ERR(rtc->int_osc)) { pr_crit("Couldn't register the internal oscillator\n"); - return; + goto err; } parents[0] = clk_hw_get_name(rtc->int_osc); @@ -292,7 +292,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, rtc->losc = clk_register(NULL, &rtc->hw); if (IS_ERR(rtc->losc)) { pr_crit("Couldn't register the LOSC clock\n"); - return; + goto err_register; } of_property_read_string_index(node, "clock-output-names", 1, @@ -303,7 +303,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, &rtc->lock); if (IS_ERR(rtc->ext_losc)) { pr_crit("Couldn't register the LOSC external gate\n"); - return; + goto err_register; } clk_data->num = 2; @@ -316,6 +316,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); return; +err_register: + clk_hw_unregister_fixed_rate(rtc->int_osc); err: kfree(clk_data); } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index cf87eb27879f..b577c8f7e346 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2980,6 +2980,12 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr) if (!block) return -EINVAL; + /* + * If the request is an ERP request there is nothing to requeue. + * This will be done with the remaining original request. + */ + if (cqr->refers) + return 0; spin_lock_irq(&cqr->dq->lock); req = (struct request *) cqr->callback_data; blk_mq_requeue_request(req, false); @@ -3081,7 +3087,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, basedev = block->base; spin_lock_irq(&dq->lock); - if (basedev->state < DASD_STATE_READY) { + if (basedev->state < DASD_STATE_READY || + test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { DBF_DEV_EVENT(DBF_ERR, basedev, "device not ready for request %p", req); rc = BLK_STS_IOERR; @@ -3516,8 +3523,6 @@ void dasd_generic_remove(struct ccw_device *cdev) struct dasd_device *device; struct dasd_block *block; - cdev->handler = NULL; - device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) { dasd_remove_sysfs_files(cdev); @@ -3536,6 +3541,7 @@ void dasd_generic_remove(struct ccw_device *cdev) * no quite down yet. */ dasd_set_target_state(device, DASD_STATE_NEW); + cdev->handler = NULL; /* dasd_delete_device destroys the device reference. */ block = device->block; dasd_delete_device(device); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 99f86612f775..dc78a523a69f 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) return; device->discipline->get_uid(device, &uid); spin_lock_irqsave(&lcu->lock, flags); - list_del_init(&device->alias_list); /* make sure that the workers don't use this device */ if (device == lcu->suc_data.device) { spin_unlock_irqrestore(&lcu->lock, flags); @@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) spin_lock_irqsave(&aliastree.lock, flags); spin_lock(&lcu->lock); + list_del_init(&device->alias_list); if (list_empty(&lcu->grouplist) && list_empty(&lcu->active_devices) && list_empty(&lcu->inactive_devices)) { @@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device, spin_unlock_irqrestore(&lcu->lock, flags); rc = dasd_sleep_on(cqr); - if (rc && !suborder_not_supported(cqr)) { + if (!rc) + goto out; + + if (suborder_not_supported(cqr)) { + /* suborder not supported or device unusable for IO */ + rc = -EOPNOTSUPP; + } else { + /* IO failed but should be retried */ spin_lock_irqsave(&lcu->lock, flags); lcu->flags |= NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); } +out: dasd_sfree_request(cqr, cqr->memdev); return rc; } @@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) return rc; spin_lock_irqsave(&lcu->lock, flags); + /* + * there is another update needed skip the remaining handling + * the data might already be outdated + * but especially do not add the device to an LCU with pending + * update + */ + if (lcu->flags & NEED_UAC_UPDATE) + goto out; lcu->pav = NO_PAV; for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { switch (lcu->uac->unit[i].ua_type) { @@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) alias_list) { _add_device_to_lcu(lcu, device, refdev); } +out: spin_unlock_irqrestore(&lcu->lock, flags); return 0; } @@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device) } if (lcu->flags & UPDATE_PENDING) { list_move(&device->alias_list, &lcu->active_devices); + private->pavgroup = NULL; _schedule_lcu_update(lcu, device); } spin_unlock_irqrestore(&lcu->lock, flags); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 8d4971645cf1..f7ae03fd36cb 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -319,7 +319,7 @@ dasd_diag_check_device(struct dasd_device *device) struct dasd_diag_characteristics *rdc_data; struct vtoc_cms_label *label; struct dasd_block *block; - struct dasd_diag_bio bio; + struct dasd_diag_bio *bio; unsigned int sb, bsize; blocknum_t end_block; int rc; @@ -395,29 +395,36 @@ dasd_diag_check_device(struct dasd_device *device) rc = -ENOMEM; goto out; } + bio = kzalloc(sizeof(*bio), GFP_KERNEL); + if (bio == NULL) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "No memory to allocate initialization bio"); + rc = -ENOMEM; + goto out_label; + } rc = 0; end_block = 0; /* try all sizes - needed for ECKD devices */ for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) { mdsk_init_io(device, bsize, 0, &end_block); - memset(&bio, 0, sizeof (struct dasd_diag_bio)); - bio.type = MDSK_READ_REQ; - bio.block_number = private->pt_block + 1; - bio.buffer = label; + memset(bio, 0, sizeof(*bio)); + bio->type = MDSK_READ_REQ; + bio->block_number = private->pt_block + 1; + bio->buffer = label; memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io)); private->iob.dev_nr = rdc_data->dev_nr; private->iob.key = 0; private->iob.flags = 0; /* do synchronous io */ private->iob.block_count = 1; private->iob.interrupt_params = 0; - private->iob.bio_list = &bio; + private->iob.bio_list = bio; private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; rc = dia250(&private->iob, RW_BIO); if (rc == 3) { pr_warn("%s: A 64-bit DIAG call failed\n", dev_name(&device->cdev->dev)); rc = -EOPNOTSUPP; - goto out_label; + goto out_bio; } mdsk_term_io(device); if (rc == 0) @@ -427,7 +434,7 @@ dasd_diag_check_device(struct dasd_device *device) pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n", dev_name(&device->cdev->dev), rc); rc = -EIO; - goto out_label; + goto out_bio; } /* check for label block */ if (memcmp(label->label_id, DASD_DIAG_CMS1, @@ -457,6 +464,8 @@ dasd_diag_check_device(struct dasd_device *device) (rc == 4) ? ", read-only device" : ""); rc = 0; } +out_bio: + kfree(bio); out_label: free_page((long) label); out: diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index cbb770824226..1a44e321b54e 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -40,6 +40,7 @@ MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; +static void *dasd_fba_zero_page; struct dasd_fba_private { struct dasd_fba_characteristics rdc_data; @@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count) ccw->cmd_code = DASD_FBA_CCW_WRITE; ccw->flags |= CCW_FLAG_SLI; ccw->count = count; - ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0)); + ccw->cda = (__u32) (addr_t) dasd_fba_zero_page; } /* @@ -830,6 +831,11 @@ dasd_fba_init(void) int ret; ASCEBC(dasd_fba_discipline.ebcname, 4); + + dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!dasd_fba_zero_page) + return -ENOMEM; + ret = ccw_driver_register(&dasd_fba_driver); if (!ret) wait_for_device_probe(); @@ -841,6 +847,7 @@ static void __exit dasd_fba_cleanup(void) { ccw_driver_unregister(&dasd_fba_driver); + free_page((unsigned long)dasd_fba_zero_page); } module_init(dasd_fba_init); diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 427b2e24a8ce..cb466ed7eb5e 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -105,16 +105,12 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy) return IRQ_HANDLED; } -static struct irqaction airq_interrupt = { - .name = "AIO", - .handler = do_airq_interrupt, -}; - void __init init_airq_interrupts(void) { irq_set_chip_and_handler(THIN_INTERRUPT, &dummy_irq_chip, handle_percpu_irq); - setup_irq(THIN_INTERRUPT, &airq_interrupt); + if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL)) + panic("Failed to register AIO interrupt\n"); } static inline unsigned long iv_size(unsigned long bits) diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 18f5458f90e8..6d716db2a46a 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -563,16 +563,12 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) return IRQ_HANDLED; } -static struct irqaction io_interrupt = { - .name = "I/O", - .handler = do_cio_interrupt, -}; - void __init init_cio_interrupts(void) { irq_set_chip_and_handler(IO_INTERRUPT, &dummy_irq_chip, handle_percpu_irq); - setup_irq(IO_INTERRUPT, &io_interrupt); + if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL)) + panic("Failed to register I/O interrupt\n"); } #ifdef CONFIG_CCW_CONSOLE diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 831850435c23..5734a78dbb8e 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -677,6 +677,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data) rc = css_evaluate_known_subchannel(sch, 1); if (rc == -EAGAIN) css_schedule_eval(sch->schid); + /* + * The loop might take long time for platforms with lots of + * known devices. Allow scheduling here. + */ + cond_resched(); } return 0; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 0c6245fc7706..23e9227e60fd 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -849,8 +849,10 @@ static void io_subchannel_register(struct ccw_device *cdev) * Now we know this subchannel will stay, we can throw * our delayed uevent. */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } /* make it known to the system */ ret = ccw_device_add(cdev); if (ret) { @@ -1058,8 +1060,11 @@ static int io_subchannel_probe(struct subchannel *sch) * Throw the delayed uevent for the subchannel, register * the ccw_device and exit. */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + if (dev_get_uevent_suppress(&sch->dev)) { + /* should always be the case for the console */ + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } cdev = sch_get_cdev(sch); rc = ccw_device_add(cdev); if (rc) { @@ -1659,10 +1664,10 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev) struct io_subchannel_private *io_priv = to_io_private(sch); set_io_private(sch, NULL); - put_device(&sch->dev); - put_device(&cdev->dev); dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), io_priv->dma_area, io_priv->dma_area_dma); + put_device(&sch->dev); + put_device(&cdev->dev); kfree(io_priv); } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a58b45df95d7..3b0a4483a252 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -372,7 +372,6 @@ static inline int multicast_outbound(struct qdio_q *q) extern u64 last_ai_time; /* prototypes for thin interrupt */ -void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index ee0b3c586211..9dc56aa3ae55 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -479,7 +479,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) setup_queues(irq_ptr, init_data); setup_qib(irq_ptr, init_data); - qdio_setup_thinint(irq_ptr); set_impl_params(irq_ptr, init_data->qib_param_field_format, init_data->qib_param_field, init_data->input_slib_elements, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 93ee067c10ca..ddf780b12d40 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -268,17 +268,19 @@ int __init tiqdio_register_thinints(void) int qdio_establish_thinint(struct qdio_irq *irq_ptr) { + int rc; + if (!is_thinint_irq(irq_ptr)) return 0; - return set_subchannel_ind(irq_ptr, 0); -} -void qdio_setup_thinint(struct qdio_irq *irq_ptr) -{ - if (!is_thinint_irq(irq_ptr)) - return; irq_ptr->dsci = get_indicator(); DBF_HEX(&irq_ptr->dsci, sizeof(void *)); + + rc = set_subchannel_ind(irq_ptr, 0); + if (rc) + put_indicator(irq_ptr->dsci); + + return rc; } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index e401a3d0aa57..339a6bc0339b 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -167,6 +167,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (ret) goto out_disable; + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } + VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", sch->schid.cssid, sch->schid.ssid, sch->schid.sch_no); diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index f0d71ab77c50..15df0a5c03ec 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -506,7 +506,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (ret) return ret; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_GET_REGION_INFO: { @@ -524,7 +524,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (ret) return ret; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_GET_IRQ_INFO: { @@ -545,7 +545,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (info.count == -1) return -EINVAL; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } case VFIO_DEVICE_SET_IRQS: { diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 5c9898e934d9..0658aa5030c6 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -33,9 +33,6 @@ MODULE_DESCRIPTION("s390 protected key interface"); #define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ #define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ -/* mask of available pckmo subfunctions, fetched once at module init */ -static cpacf_mask_t pckmo_functions; - /* * debug feature data and functions */ @@ -78,6 +75,9 @@ static int pkey_clr2protkey(u32 keytype, const struct pkey_clrkey *clrkey, struct pkey_protkey *protkey) { + /* mask of available pckmo subfunctions */ + static cpacf_mask_t pckmo_functions; + long fc; int keysize; u8 paramblock[64]; @@ -101,11 +101,13 @@ static int pkey_clr2protkey(u32 keytype, return -EINVAL; } - /* - * Check if the needed pckmo subfunction is available. - * These subfunctions can be enabled/disabled by customers - * in the LPAR profile or may even change on the fly. - */ + /* Did we already check for PCKMO ? */ + if (!pckmo_functions.bytes[0]) { + /* no, so check now */ + if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) + return -ENODEV; + } + /* check for the pckmo subfunction we need now */ if (!cpacf_test_func(&pckmo_functions, fc)) { DEBUG_ERR("%s pckmo functions not available\n", __func__); return -ENODEV; @@ -1504,7 +1506,7 @@ static struct miscdevice pkey_dev = { */ static int __init pkey_init(void) { - cpacf_mask_t kmc_functions; + cpacf_mask_t func_mask; /* * The pckmo instruction should be available - even if we don't @@ -1512,15 +1514,15 @@ static int __init pkey_init(void) * is also the minimum level for the kmc instructions which * are able to work with protected keys. */ - if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) + if (!cpacf_query(CPACF_PCKMO, &func_mask)) return -ENODEV; /* check for kmc instructions available */ - if (!cpacf_query(CPACF_KMC, &kmc_functions)) + if (!cpacf_query(CPACF_KMC, &func_mask)) return -ENODEV; - if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || - !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || - !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) + if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) || + !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) || + !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256)) return -ENODEV; pkey_debug_init(); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index be2520cc010b..7dc72cb718b0 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -71,15 +71,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev) static void vfio_ap_queue_dev_remove(struct ap_device *apdev) { struct vfio_ap_queue *q; - int apid, apqi; mutex_lock(&matrix_dev->lock); q = dev_get_drvdata(&apdev->device); + vfio_ap_mdev_reset_queue(q, 1); dev_set_drvdata(&apdev->device, NULL); - apid = AP_QID_CARD(q->apqn); - apqi = AP_QID_QUEUE(q->apqn); - vfio_ap_mdev_reset_queue(apid, apqi, 1); - vfio_ap_irq_disable(q); kfree(q); mutex_unlock(&matrix_dev->lock); } diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 5c0f53c6dde7..1ec01148018f 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -25,6 +25,7 @@ #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev); +static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); static int match_apqn(struct device *dev, const void *data) { @@ -49,20 +50,15 @@ static struct vfio_ap_queue *vfio_ap_get_queue( int apqn) { struct vfio_ap_queue *q; - struct device *dev; if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) return NULL; if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) return NULL; - dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, - &apqn, match_apqn); - if (!dev) - return NULL; - q = dev_get_drvdata(dev); - q->matrix_mdev = matrix_mdev; - put_device(dev); + q = vfio_ap_find_queue(apqn); + if (q) + q->matrix_mdev = matrix_mdev; return q; } @@ -119,13 +115,18 @@ static void vfio_ap_wait_for_irqclear(int apqn) */ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) { - if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev) + if (!q) + return; + if (q->saved_isc != VFIO_AP_ISC_INVALID && + !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); - if (q->saved_pfn && q->matrix_mdev) + q->saved_isc = VFIO_AP_ISC_INVALID; + } + if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) { vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &q->saved_pfn, 1); - q->saved_pfn = 0; - q->saved_isc = VFIO_AP_ISC_INVALID; + q->saved_pfn = 0; + } } /** @@ -144,7 +145,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) * Returns if ap_aqic function failed with invalid, deconfigured or * checkstopped AP. */ -struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) +static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) { struct ap_qirq_ctrl aqic_gisa = {}; struct ap_queue_status status; @@ -1114,48 +1115,70 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static void vfio_ap_irq_disable_apqn(int apqn) +static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) { struct device *dev; - struct vfio_ap_queue *q; + struct vfio_ap_queue *q = NULL; dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, &apqn, match_apqn); if (dev) { q = dev_get_drvdata(dev); - vfio_ap_irq_disable(q); put_device(dev); } + + return q; } -int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, +int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry) { struct ap_queue_status status; + int ret; int retry2 = 2; - int apqn = AP_MKQID(apid, apqi); - do { - status = ap_zapq(apqn); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - while (!status.queue_empty && retry2--) { - msleep(20); - status = ap_tapq(apqn, NULL); - } - WARN_ON_ONCE(retry2 <= 0); - return 0; - case AP_RESPONSE_RESET_IN_PROGRESS: - case AP_RESPONSE_BUSY: + if (!q) + return 0; + +retry_zapq: + status = ap_zapq(q->apqn); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + ret = 0; + break; + case AP_RESPONSE_RESET_IN_PROGRESS: + if (retry--) { msleep(20); - break; - default: - /* things are really broken, give up */ - return -EIO; + goto retry_zapq; } - } while (retry--); + ret = -EBUSY; + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + WARN_ON_ONCE(status.irq_enabled); + ret = -EBUSY; + goto free_resources; + default: + /* things are really broken, give up */ + WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n", + status.response_code); + return -EIO; + } - return -EBUSY; + /* wait for the reset to take effect */ + while (retry2--) { + if (status.queue_empty && !status.irq_enabled) + break; + msleep(20); + status = ap_tapq(q->apqn, NULL); + } + WARN_ON_ONCE(retry2 <= 0); + +free_resources: + vfio_ap_free_aqic_resources(q); + + return ret; } static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) @@ -1163,13 +1186,15 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) int ret; int rc = 0; unsigned long apid, apqi; + struct vfio_ap_queue *q; struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, matrix_mdev->matrix.apm_max + 1) { for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, matrix_mdev->matrix.aqm_max + 1) { - ret = vfio_ap_mdev_reset_queue(apid, apqi, 1); + q = vfio_ap_find_queue(AP_MKQID(apid, apqi)); + ret = vfio_ap_mdev_reset_queue(q, 1); /* * Regardless whether a queue turns out to be busy, or * is not operational, we need to continue resetting @@ -1177,7 +1202,6 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) */ if (ret) rc = ret; - vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi)); } } @@ -1255,7 +1279,7 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) info.num_regions = 0; info.num_irqs = 0; - return copy_to_user((void __user *)arg, &info, minsz); + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index f46dde56b464..28e9d9989768 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -88,11 +88,6 @@ struct ap_matrix_mdev { struct mdev_device *mdev; }; -extern int vfio_ap_mdev_register(void); -extern void vfio_ap_mdev_unregister(void); -int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, - unsigned int retry); - struct vfio_ap_queue { struct ap_matrix_mdev *matrix_mdev; unsigned long saved_pfn; @@ -100,5 +95,10 @@ struct vfio_ap_queue { #define VFIO_AP_ISC_INVALID 0xff unsigned char saved_isc; }; -struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q); + +int vfio_ap_mdev_register(void); +void vfio_ap_mdev_unregister(void); +int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, + unsigned int retry); + #endif /* _VFIO_AP_PRIVATE_H_ */ diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 7fa0262e91af..ec41a8a76398 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -1419,7 +1419,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); - if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + if (copy_to_user((int __user *) arg, reqcnt, + sizeof(u32) * AP_DEVICES)) rc = -EFAULT; kfree(reqcnt); return rc; diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 110fe9d0cb91..03999b06affd 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -1684,9 +1684,9 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, *nr_apqns = 0; /* fetch status of all crypto cards */ - device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); + device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); if (!device_status) return -ENOMEM; zcrypt_device_status_mask_ext(device_status); @@ -1754,7 +1754,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, verify = 0; } - kfree(device_status); + kvfree(device_status); return rc; } EXPORT_SYMBOL(cca_findcard2); diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 4fc2056bd227..e615dc240150 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, ISM_NR_DMBS); - if (!ism->smcd) + if (!ism->smcd) { + ret = -ENOMEM; goto err_resource; + } ism->smcd->priv = ism; ret = ism_dev_init(ism); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 820f2c29376c..93b4cb156b0b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -436,10 +436,13 @@ enum qeth_qdio_out_buffer_state { QETH_QDIO_BUF_EMPTY, /* Filled by driver; owned by hardware in order to be sent. */ QETH_QDIO_BUF_PRIMED, - /* Identified to be pending in TPQ. */ + /* Discovered by the TX completion code: */ QETH_QDIO_BUF_PENDING, - /* Found in completion queue. */ - QETH_QDIO_BUF_IN_CQ, + /* Finished by the TX completion code: */ + QETH_QDIO_BUF_NEED_QAOB, + /* Received QAOB notification on CQ: */ + QETH_QDIO_BUF_QAOB_OK, + QETH_QDIO_BUF_QAOB_ERROR, /* Handled via transfer pending / completion queue. */ QETH_QDIO_BUF_HANDLED_DELAYED, }; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index fe70e9875bde..fad1c46d4b0e 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -31,6 +31,7 @@ #include <net/iucv/af_iucv.h> #include <net/dsfield.h> +#include <net/sock.h> #include <asm/ebcdic.h> #include <asm/chpid.h> @@ -425,18 +426,13 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, } } - if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == - QETH_QDIO_BUF_HANDLED_DELAYED)) { - /* for recovery situations */ - qeth_init_qdio_out_buf(q, bidx); - QETH_CARD_TEXT(q->card, 2, "clprecov"); - } } static void qeth_qdio_handle_aob(struct qeth_card *card, unsigned long phys_aob_addr) { + enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK; struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; @@ -448,22 +444,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, buffer = (struct qeth_qdio_out_buffer *) aob->user1; QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); - if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, - QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { - notification = TX_NOTIFY_OK; - } else { - WARN_ON_ONCE(atomic_read(&buffer->state) != - QETH_QDIO_BUF_PENDING); - atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); - notification = TX_NOTIFY_DELAYED_OK; - } - - if (aob->aorc != 0) { - QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); - notification = qeth_compute_cq_notification(aob->aorc, 1); - } - qeth_notify_skbs(buffer->q, buffer, notification); - /* Free dangling allocations. The attached skbs are handled by * qeth_cleanup_handled_pending(). */ @@ -474,7 +454,33 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, kmem_cache_free(qeth_core_header_cache, (void *) aob->sba[i]); } - atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); + + if (aob->aorc) { + QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); + new_state = QETH_QDIO_BUF_QAOB_ERROR; + } + + switch (atomic_xchg(&buffer->state, new_state)) { + case QETH_QDIO_BUF_PRIMED: + /* Faster than TX completion code. */ + notification = qeth_compute_cq_notification(aob->aorc, 0); + qeth_notify_skbs(buffer->q, buffer, notification); + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); + break; + case QETH_QDIO_BUF_PENDING: + /* TX completion code is active and will handle the async + * completion for us. + */ + break; + case QETH_QDIO_BUF_NEED_QAOB: + /* TX completion code is already finished. */ + notification = qeth_compute_cq_notification(aob->aorc, 1); + qeth_notify_skbs(buffer->q, buffer, notification); + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); + break; + default: + WARN_ON_ONCE(1); + } qdio_release_aob(aob); } @@ -1083,7 +1089,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, skb_queue_walk(&buf->skb_list, skb) { QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); - if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) + if (skb->sk && skb->sk->sk_family == PF_IUCV) iucv_sk(skb->sk)->sk_txnotify(skb, notification); } } @@ -1094,9 +1100,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, struct qeth_qdio_out_q *queue = buf->q; struct sk_buff *skb; - /* release may never happen from within CQ tasklet scope */ - WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); - if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); @@ -4163,9 +4166,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); - if (cmd->hdr.return_code) - return -EIO; - qeth_setadpparms_inspect_rc(cmd); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_CARD_TEXT_(card, 2, "rc=%d", @@ -4175,7 +4175,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", access_ctrl_req->subcmd_code, CARD_DEVID(card), cmd->data.setadapterparms.hdr.return_code); - switch (cmd->data.setadapterparms.hdr.return_code) { + switch (qeth_setadpparms_inspect_rc(cmd)) { case SET_ACCESS_CTRL_RC_SUCCESS: if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, @@ -5226,9 +5226,32 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, QETH_QDIO_BUF_PENDING) == - QETH_QDIO_BUF_PRIMED) + QETH_QDIO_BUF_PRIMED) { qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); + /* Handle race with qeth_qdio_handle_aob(): */ + switch (atomic_xchg(&buffer->state, + QETH_QDIO_BUF_NEED_QAOB)) { + case QETH_QDIO_BUF_PENDING: + /* No concurrent QAOB notification. */ + break; + case QETH_QDIO_BUF_QAOB_OK: + qeth_notify_skbs(queue, buffer, + TX_NOTIFY_DELAYED_OK); + atomic_set(&buffer->state, + QETH_QDIO_BUF_HANDLED_DELAYED); + break; + case QETH_QDIO_BUF_QAOB_ERROR: + qeth_notify_skbs(queue, buffer, + TX_NOTIFY_DELAYED_GENERALERROR); + atomic_set(&buffer->state, + QETH_QDIO_BUF_HANDLED_DELAYED); + break; + default: + WARN_ON_ONCE(1); + } + } + QETH_CARD_TEXT_(card, 5, "pel%u", bidx); /* prepare the queue slot for re-use: */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 92bace3b28fd..8c4613617ef1 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1168,12 +1168,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work) NULL }; - /* Role should not change by itself, but if it did, */ - /* information from the hardware is authoritative. */ - mutex_lock(&data->card->sbp_lock); - data->card->options.sbp.role = entry->role; - mutex_unlock(&data->card->sbp_lock); - snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); snprintf(env_role, sizeof(env_role), "ROLE=%s", (entry->role == QETH_SBP_ROLE_NONE) ? "none" : @@ -1199,6 +1193,10 @@ static void qeth_bridge_state_change(struct qeth_card *card, int extrasize; QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->num_entries == 0) { + QETH_CARD_TEXT(card, 2, "BPempty"); + return; + } if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index a1c23e998f97..8dee16aca421 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2114,7 +2114,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - if (qeth_get_ip_version(skb) != 4) + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) features &= ~NETIF_F_HW_VLAN_CTAG_TX; return qeth_features_check(skb, dev, features); } diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 96f0d34e9459..08dc2efb7d8a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -576,7 +576,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) ZFCP_STATUS_ERP_TIMEDOUT)) { req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; zfcp_dbf_rec_run("erscf_1", act); - req->erp_action = NULL; + /* lock-free concurrent access with + * zfcp_erp_timeout_handler() + */ + WRITE_ONCE(req->erp_action, NULL); } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) zfcp_dbf_rec_run("erscf_2", act); @@ -612,8 +615,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) void zfcp_erp_timeout_handler(struct timer_list *t) { struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); - struct zfcp_erp_action *act = fsf_req->erp_action; + struct zfcp_erp_action *act; + if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) + return; + /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */ + act = READ_ONCE(fsf_req->erp_action); + if (!act) + return; zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT); } @@ -725,7 +734,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) adapter->peer_d_id); if (IS_ERR(port)) /* error or port already attached */ return; - _zfcp_erp_port_reopen(port, 0, "ereptp1"); + zfcp_erp_port_reopen(port, 0, "ereptp1"); } static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf( diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index cf63916814cc..5c652deb6fed 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -409,7 +409,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) return; } - del_timer(&req->timer); + del_timer_sync(&req->timer); zfcp_fsf_protstatus_eval(req); zfcp_fsf_fsfstatus_eval(req); req->handler(req); @@ -762,7 +762,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { - del_timer(&req->timer); + del_timer_sync(&req->timer); /* lookup request again, list might have changed */ zfcp_reqlist_find_rm(adapter->req_list, req_id); zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 957889a42d2e..f6f03a349c3f 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -117,7 +117,7 @@ struct virtio_rev_info { }; /* the highest virtio-ccw revision we support */ -#define VIRTIO_CCW_REV_MAX 1 +#define VIRTIO_CCW_REV_MAX 2 struct virtio_ccw_vq_info { struct virtqueue *vq; @@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev) u8 old_status = vcdev->dma_area->status; struct ccw1 *ccw; - if (vcdev->revision < 1) + if (vcdev->revision < 2) return vcdev->dma_area->status; ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 90cf4691b8c3..9ea30fcb4428 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -114,15 +114,6 @@ config BLK_DEV_SR <file:Documentation/scsi/scsi.txt>. The module will be called sr_mod. -config BLK_DEV_SR_VENDOR - bool "Enable vendor-specific extensions (for SCSI CDROM)" - depends on BLK_DEV_SR - help - This enables the usage of vendor specific SCSI commands. This is - required to support multisession CDs with old NEC/TOSHIBA cdrom - drives (and HP Writers). If you have such a drive and get the first - session only, try saying Y here; everybody else says N. - config CHR_DEV_SG tristate "SCSI generic support" depends on SCSI diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 0ed3f806ace5..2388143d59f5 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2467,13 +2467,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", @@ -2559,13 +2559,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 2142a649e865..90fb17c5dd69 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -728,7 +728,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; - } else if (command != HBA_IU_TYPE_SCSI_TM_REQ) + } else return -EINVAL; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 4a858789e6c5..1035f947f1bc 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -723,7 +723,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, (fib_callback) aac_hba_callback, (void *) cmd); - + if (status != -EINPROGRESS) { + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 secs for completion */ for (count = 0; count < 15; ++count) { if (cmd->SCp.sent_command) { @@ -902,11 +906,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) info = &aac->hba_map[bus][cid]; - if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED; - pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host device reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); @@ -921,7 +925,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) status = aac_hba_send(command, fib, (fib_callback) aac_tmf_callback, (void *) info); - + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state == 0) { @@ -960,11 +969,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) info = &aac->hba_map[bus][cid]; - if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED; - pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host target reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); @@ -981,6 +990,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) (fib_callback) aac_tmf_callback, (void *) info); + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } + /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state <= 0) { @@ -1033,7 +1049,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) } } - pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); + pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME); /* * Check the health of the controller @@ -1591,7 +1607,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct Scsi_Host *shost; struct aac_dev *aac; struct list_head *insert = &aac_devices; - int error = -ENODEV; + int error; int unique_id = 0; u64 dmamask; int mask_bits = 0; @@ -1616,7 +1632,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) error = pci_enable_device(pdev); if (error) goto out; - error = -ENODEV; if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -1648,8 +1663,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); - if (!shost) + if (!shost) { + error = -ENOMEM; goto out_disable_pdev; + } shost->irq = pdev->irq; shost->unique_id = unique_id; @@ -1674,8 +1691,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, sizeof(struct fib), GFP_KERNEL); - if (!aac->fibs) + if (!aac->fibs) { + error = -ENOMEM; goto out_free_host; + } + spin_lock_init(&aac->fib_lock); mutex_init(&aac->ioctl_mutex); diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index d12dd89538df..deab66598910 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2911,8 +2911,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); - if (!ashost->base || !ashost->fast) + if (!ashost->base || !ashost->fast) { + ret = -ENOMEM; goto out_put; + } host->irq = ec->irq; ashost->host = host; diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index a1f3e9ee4e63..14e1d001253c 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -450,7 +450,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index 134f040d58e2..f441ec8eb93d 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -571,7 +571,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_remove: fas216_remove(host); diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index c795537a671c..2dc0df005cb3 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -378,7 +378,7 @@ static int powertecscsi_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 2f9213b257a4..93e401180991 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) if (bfad->pci_bar0_kva == NULL) { printk(KERN_ERR "Fail to map bar0\n"); + rc = -ENODEV; goto out_release_region; } diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig index e0ccb48ec961..40e9c9dc04bd 100644 --- a/drivers/scsi/bnx2fc/Kconfig +++ b/drivers/scsi/bnx2fc/Kconfig @@ -5,6 +5,7 @@ config SCSI_BNX2X_FCOE depends on (IPV6 || IPV6=n) depends on LIBFC depends on LIBFCOE + depends on MMU select NETDEVICES select ETHERNET select NET_VENDOR_BROADCOM diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index 702dc82c9501..a0c0791abee6 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig @@ -4,6 +4,7 @@ config SCSI_BNX2_ISCSI depends on NET depends on PCI depends on (IPV6 || IPV6=n) + depends on MMU select SCSI_ISCSI_ATTRS select NETDEVICES select ETHERNET diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index e51923886475..1b6f9351b43f 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); - ret = EINVAL; + ret = -EINVAL; goto bye; } diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 524cdbcd29aa..ec7d01f6e2d5 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk) struct net_device *ndev = cdev->ports[csk->port_id]; struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; struct sk_buff *skb = NULL; + int ret; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); @@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk) csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); if (csk->atid < 0) { pr_err("NO atid available.\n"); - return -EINVAL; + ret = -EINVAL; + goto put_sock; } cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); if (!skb) { - cxgb3_free_atid(t3dev, csk->atid); - cxgbi_sock_put(csk); - return -ENOMEM; + ret = -ENOMEM; + goto free_atid; } skb->sk = (struct sock *)csk; set_arp_failure_handler(skb, act_open_arp_failure); @@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); send_act_open_req(csk, skb, csk->l2t); return 0; + +free_atid: + cxgb3_free_atid(t3dev, csk->atid); +put_sock: + cxgbi_sock_put(csk); + l2t_release(t3dev, csk->l2t); + csk->l2t = NULL; + + return ret; } cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index d1f1baba3285..d1bdd754c6a4 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI depends on PCI && INET && (IPV6 || IPV6=n) depends on THERMAL || !THERMAL depends on ETHERNET + depends on TLS || TLS=n select NET_VENDOR_CHELSIO select CHELSIO_T4 select CHELSIO_LIB diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 93ef97af22df..67d681c53c29 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3746,6 +3746,7 @@ static int cxlflash_probe(struct pci_dev *pdev, cfg->afu_cookie = cfg->ops->create_afu(pdev); if (unlikely(!cfg->afu_cookie)) { dev_err(dev, "%s: create_afu failed\n", __func__); + rc = -ENOMEM; goto out_remove; } diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index f32da0ca529e..df5a3bbeba5e 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -565,10 +565,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) * even though it shouldn't according to T10. * The retry without rtpg_ext_hdr_req set * handles this. + * Note: some arrays return a sense key of ILLEGAL_REQUEST + * with ASC 00h if they don't support the extended header. */ if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && - sense_hdr.sense_key == ILLEGAL_REQUEST && - sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) { + sense_hdr.sense_key == ILLEGAL_REQUEST) { pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; goto retry; } @@ -658,8 +659,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) rcu_read_lock(); list_for_each_entry_rcu(h, &tmp_pg->dh_list, node) { - /* h->sdev should always be valid */ - BUG_ON(!h->sdev); + if (!h->sdev) + continue; h->sdev->access_state = desc[0]; } rcu_read_unlock(); @@ -705,7 +706,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) pg->expiry = 0; rcu_read_lock(); list_for_each_entry_rcu(h, &pg->dh_list, node) { - BUG_ON(!h->sdev); + if (!h->sdev) + continue; h->sdev->access_state = (pg->state & SCSI_ACCESS_STATE_MASK); if (pg->pref) @@ -1147,7 +1149,6 @@ static void alua_bus_detach(struct scsi_device *sdev) spin_lock(&h->pg_lock); pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); rcu_assign_pointer(h->pg, NULL); - h->sdev = NULL; spin_unlock(&h->pg_lock); if (pg) { spin_lock_irq(&pg->lock); @@ -1156,6 +1157,7 @@ static void alua_bus_detach(struct scsi_device *sdev) kref_put(&pg->kref, release_port_group); } sdev->handler_data = NULL; + synchronize_rcu(); kfree(h); } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 1791a393795d..07a0dadc75bf 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -255,9 +255,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) WARN_ON(!fcf_dev); new->fcf_dev = NULL; fcoe_fcf_device_delete(fcf_dev); - kfree(new); mutex_unlock(&cdev->lock); } + kfree(new); } /** diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 18584ab27c32..3a2618bcce67 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -741,6 +741,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); + err = -ENOMEM; fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) goto err_out_free_resources; diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index e3f5c91d5e4f..b60795893994 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1027,7 +1027,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, atomic64_inc(&fnic_stats->io_stats.io_completions); - io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + io_duration_time = jiffies_to_msecs(jiffies) - + jiffies_to_msecs(start_time); if(io_duration_time <= 10) atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 522636e94628..c8bf8c7ada6a 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -444,7 +444,8 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ pr_err("error in devcmd2 init"); - return -ENODEV; + err = -ENODEV; + goto err_free_wq; } /* @@ -460,7 +461,7 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); if (err) - goto err_free_wq; + goto err_disable_wq; vdev->devcmd2->result = (struct devcmd2_result *) vdev->devcmd2->results_ring.descs; @@ -481,8 +482,9 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) err_free_desc_ring: vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); -err_free_wq: +err_disable_wq: vnic_wq_disable(&vdev->devcmd2->wq); +err_free_wq: vnic_wq_free(&vdev->devcmd2->wq); err_free_devcmd2: kfree(vdev->devcmd2); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 849335d76cf6..031aa4043c5e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -904,8 +904,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct device *dev = hisi_hba->dev; + dev_dbg(dev, "phy%d OOB ready\n", phy_no); + if (phy->phy_attached) + return; + if (!timer_pending(&phy->timer)) { - dev_dbg(dev, "phy%d OOB ready\n", phy_no); phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; add_timer(&phy->timer); } @@ -974,12 +977,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; - struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + struct hisi_sas_port *port; unsigned long flags; if (!sas_port) return; + port = to_hisi_sas_port(sas_port); spin_lock_irqsave(&hisi_hba->lock, flags); port->port_attached = 1; port->id = phy->port_id; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index b861a0f14c9d..3364ae0b9bfe 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1645,7 +1645,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) idx = i * HISI_SAS_PHY_INT_NR; for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) { irq = platform_get_irq(pdev, idx); - if (!irq) { + if (irq < 0) { dev_err(dev, "irq init: fail map phy interrupt %d\n", idx); return -ENOENT; @@ -1664,7 +1664,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR; for (i = 0; i < hisi_hba->queue_count; i++, idx++) { irq = platform_get_irq(pdev, idx); - if (!irq) { + if (irq < 0) { dev_err(dev, "irq init: could not map cq interrupt %d\n", idx); return -ENOENT; @@ -1682,7 +1682,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count; for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) { irq = platform_get_irq(pdev, idx); - if (!irq) { + if (irq < 0) { dev_err(dev, "irq init: could not map fatal interrupt %d\n", idx); return -ENOENT; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 216e557f703e..bac705990a96 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -504,6 +504,12 @@ static ssize_t host_store_rescan(struct device *dev, return count; } +static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) +{ + device->offload_enabled = 0; + device->offload_to_be_enabled = 0; +} + static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1738,8 +1744,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, __func__, h->scsi_host->host_no, logical_drive->bus, logical_drive->target, logical_drive->lun); - logical_drive->offload_enabled = 0; - logical_drive->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(logical_drive); logical_drive->queue_depth = 8; } } @@ -2499,8 +2504,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h, IOACCEL2_SERV_RESPONSE_FAILURE) { if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { - dev->offload_enabled = 0; - dev->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(dev); } if (dev->in_reset) { @@ -3670,10 +3674,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, this_device->offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); if (this_device->offload_config) { - this_device->offload_to_be_enabled = + bool offload_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); - if (hpsa_get_raid_map(h, scsi3addr, this_device)) - this_device->offload_to_be_enabled = 0; + /* + * Check to see if offload can be enabled. + */ + if (offload_enabled) { + rc = hpsa_get_raid_map(h, scsi3addr, this_device); + if (rc) /* could not load raid_map */ + goto out; + this_device->offload_to_be_enabled = 1; + } } out: @@ -3996,8 +4007,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; - this_device->offload_enabled = 0; - this_device->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(this_device); this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; @@ -5230,8 +5240,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, /* Handles load balance across RAID 1 members. * (2-drive R1 and R10 with even # of drives.) * Appropriate for SSDs, not optimal for HDDs + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 2); + if (le16_to_cpu(map->layout_map_count) != 2) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } if (dev->offload_to_mirror) map_index += le16_to_cpu(map->data_disks_per_row); dev->offload_to_mirror = !dev->offload_to_mirror; @@ -5239,8 +5253,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, case HPSA_RAID_ADM: /* Handles N-way mirrors (R1-ADM) * and R10 with # of drives divisible by 3.) + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 3); + if (le16_to_cpu(map->layout_map_count) != 3) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } offload_to_mirror = dev->offload_to_mirror; raid_map_helper(map, offload_to_mirror, @@ -5265,7 +5283,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, r5or6_blocks_per_row = le16_to_cpu(map->strip_size) * le16_to_cpu(map->data_disks_per_row); - BUG_ON(r5or6_blocks_per_row == 0); + if (r5or6_blocks_per_row == 0) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } stripesize = r5or6_blocks_per_row * le16_to_cpu(map->layout_map_count); #if BITS_PER_LONG == 32 @@ -8285,7 +8306,7 @@ static int detect_controller_lockup(struct ctlr_info *h) * * Called from monitor controller worker (hpsa_event_monitor_worker) * - * A Volume (or Volumes that comprise an Array set may be undergoing a + * A Volume (or Volumes that comprise an Array set) may be undergoing a * transformation, so we will be turning off ioaccel for all volumes that * make up the Array. */ @@ -8308,6 +8329,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) * Run through current device list used during I/O requests. */ for (i = 0; i < h->ndevices; i++) { + int offload_to_be_enabled = 0; + int offload_config = 0; + device = h->dev[i]; if (!device) @@ -8325,25 +8349,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) continue; ioaccel_status = buf[IOACCEL_STATUS_BYTE]; - device->offload_config = + + /* + * Check if offload is still configured on + */ + offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); - if (device->offload_config) - device->offload_to_be_enabled = + /* + * If offload is configured on, check to see if ioaccel + * needs to be enabled. + */ + if (offload_config) + offload_to_be_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + /* + * If ioaccel is to be re-enabled, re-enable later during the + * scan operation so the driver can get a fresh raidmap + * before turning ioaccel back on. + */ + if (offload_to_be_enabled) + continue; + /* * Immediately turn off ioaccel for any volume the * controller tells us to. Some of the reasons could be: * transformation - change to the LVs of an Array. * degraded volume - component failure - * - * If ioaccel is to be re-enabled, re-enable later during the - * scan operation so the driver can get a fresh raidmap - * before turning ioaccel back on. - * */ - if (!device->offload_to_be_enabled) - device->offload_enabled = 0; + hpsa_turn_off_ioaccel_for_device(device); } kfree(buf); @@ -8820,7 +8854,7 @@ reinit_after_soft_reset: /* hook into SCSI subsystem */ rc = hpsa_scsi_add_host(h); if (rc) - goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ /* Monitor the controller for firmware lockups */ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; @@ -8835,6 +8869,8 @@ reinit_after_soft_reset: HPSA_EVENT_MONITOR_INTERVAL); return 0; +clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + kfree(h->lastlogicals); clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ hpsa_free_performant_mode(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index df897df5cafe..5f8bdc5bde4b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -493,8 +493,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; + case IBMVFC_HOST_ACTION_REENABLE: + case IBMVFC_HOST_ACTION_RESET: + vhost->action = action; + break; case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_LOGO: + case IBMVFC_HOST_ACTION_QUERY_TGTS: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + case IBMVFC_HOST_ACTION_NONE: + default: switch (vhost->action) { case IBMVFC_HOST_ACTION_RESET: case IBMVFC_HOST_ACTION_REENABLE: @@ -504,15 +513,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, break; } break; - case IBMVFC_HOST_ACTION_LOGO: - case IBMVFC_HOST_ACTION_QUERY_TGTS: - case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: - case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_RESET: - case IBMVFC_HOST_ACTION_REENABLE: - default: - vhost->action = action; - break; } } @@ -2881,8 +2881,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); - if (sdev->type == TYPE_DISK) + if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); + } spin_unlock_irqrestore(shost->host_lock, flags); return 0; } @@ -4337,26 +4339,45 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) case IBMVFC_HOST_ACTION_INIT_WAIT: break; case IBMVFC_HOST_ACTION_RESET: - vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; spin_unlock_irqrestore(vhost->host->host_lock, flags); rc = ibmvfc_reset_crq(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); - if (rc == H_CLOSED) + if (!rc || rc == H_CLOSED) vio_enable_interrupts(to_vio_dev(vhost->dev)); - if (rc || (rc = ibmvfc_send_crq_init(vhost)) || - (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); + if (vhost->action == IBMVFC_HOST_ACTION_RESET) { + /* + * The only action we could have changed to would have + * been reenable, in which case, we skip the rest of + * this path and wait until we've done the re-enable + * before sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + + if (rc || (rc = ibmvfc_send_crq_init(vhost)) || + (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); + } } break; case IBMVFC_HOST_ACTION_REENABLE: - vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; spin_unlock_irqrestore(vhost->host->host_lock, flags); rc = ibmvfc_reenable_crq_queue(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); - if (rc || (rc = ibmvfc_send_crq_init(vhost))) { - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); + if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) { + /* + * The only action we could have changed to would have + * been reset, in which case, we skip the rest of this + * path and wait until we've done the reset before + * sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + if (rc || (rc = ibmvfc_send_crq_init(vhost))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); + } } break; case IBMVFC_HOST_ACTION_LOGO: @@ -4788,6 +4809,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (IS_ERR(vhost->work_thread)) { dev_err(dev, "Couldn't create kernel thread: %ld\n", PTR_ERR(vhost->work_thread)); + rc = PTR_ERR(vhost->work_thread); goto free_host_mem; } diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 7f66a7783209..1ab0a61e3fb5 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -415,6 +415,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, int rc = 0; struct vio_dev *vdev = to_vio_dev(hostdata->dev); + set_adapter_info(hostdata); + /* Re-enable the CRQ */ do { if (rc) @@ -804,6 +806,22 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) spin_unlock_irqrestore(hostdata->host->host_lock, flags); } +/** + * ibmvscsi_set_request_limit - Set the adapter request_limit in response to + * an adapter failure, reset, or SRP Login. Done under host lock to prevent + * race with SCSI command submission. + * @hostdata: adapter to adjust + * @limit: new request limit + */ +static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit) +{ + unsigned long flags; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + atomic_set(&hostdata->request_limit, limit); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +} + /** * ibmvscsi_reset_host - Reset the connection to the server * @hostdata: struct ibmvscsi_host_data to reset @@ -811,7 +829,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata) { scsi_block_requests(hostdata->host); - atomic_set(&hostdata->request_limit, 0); + ibmvscsi_set_request_limit(hostdata, 0); purge_requests(hostdata, DID_ERROR); hostdata->action = IBMVSCSI_HOST_ACTION_RESET; @@ -1144,13 +1162,13 @@ static void login_rsp(struct srp_event_struct *evt_struct) dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n", evt_struct->xfer_iu->srp.login_rej.reason); /* Login failed. */ - atomic_set(&hostdata->request_limit, -1); + ibmvscsi_set_request_limit(hostdata, -1); return; default: dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n", evt_struct->xfer_iu->srp.login_rsp.opcode); /* Login failed. */ - atomic_set(&hostdata->request_limit, -1); + ibmvscsi_set_request_limit(hostdata, -1); return; } @@ -1161,7 +1179,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) * This value is set rather than added to request_limit because * request_limit could have been set to -1 by this client. */ - atomic_set(&hostdata->request_limit, + ibmvscsi_set_request_limit(hostdata, be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta)); /* If we had any pending I/Os, kick them */ @@ -1193,13 +1211,13 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); - spin_lock_irqsave(hostdata->host->host_lock, flags); /* Start out with a request limit of 0, since this is negotiated in * the login request we are just sending and login requests always * get sent by the driver regardless of request_limit. */ - atomic_set(&hostdata->request_limit, 0); + ibmvscsi_set_request_limit(hostdata, 0); + spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); dev_info(hostdata->dev, "sent SRP login\n"); @@ -1779,7 +1797,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, return; case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */ scsi_block_requests(hostdata->host); - atomic_set(&hostdata->request_limit, 0); + ibmvscsi_set_request_limit(hostdata, 0); if (crq->format == 0x06) { /* We need to re-setup the interpartition connection */ dev_info(hostdata->dev, "Re-enabling adapter!\n"); @@ -2135,12 +2153,12 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata) } hostdata->action = IBMVSCSI_HOST_ACTION_NONE; + spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rc) { - atomic_set(&hostdata->request_limit, -1); + ibmvscsi_set_request_limit(hostdata, -1); dev_err(hostdata->dev, "error after %s\n", action); } - spin_unlock_irqrestore(hostdata->host->host_lock, flags); scsi_unblock_requests(hostdata->host); } @@ -2224,7 +2242,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) init_waitqueue_head(&hostdata->work_wait_q); hostdata->host = host; hostdata->dev = dev; - atomic_set(&hostdata->request_limit, -1); + ibmvscsi_set_request_limit(hostdata, -1); hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; if (map_persist_bufs(hostdata)) { @@ -2320,16 +2338,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); - unsigned long flags; srp_remove_host(hostdata->host); scsi_remove_host(hostdata->host); purge_requests(hostdata, DID_ERROR); - - spin_lock_irqsave(hostdata->host->host_lock, flags); release_event_pool(&hostdata->pool, hostdata); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index e4857b728033..a64abe38db2d 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -352,7 +352,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, boot_kobj->kobj.kset = boot_kset->kset; if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, NULL, name, index)) { - kfree(boot_kobj); + kobject_put(&boot_kobj->kobj); return NULL; } boot_kobj->data = data; diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index 7f683e42c798..f55b07aa1cca 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c @@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev) if (!esp->command_block) goto fail_unmap_regs; - host->irq = platform_get_irq(dev, 0); + host->irq = err = platform_get_irq(dev, 0); + if (err < 0) + goto fail_unmap_command_block; err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); if (err < 0) goto fail_unmap_command_block; diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 2b865c6423e2..589ddf003886 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; - if (IS_ERR(fp)) - goto redisc; + if (IS_ERR(fp)) { + mutex_lock(&disc->disc_mutex); + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); + goto out; + } cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) @@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, new_rdata->disc_id = disc->disc_id; fc_rport_login(new_rdata); } - goto out; + goto free_fp; } rdata->disc_id = disc->disc_id; mutex_unlock(&rdata->rp_mutex); @@ -626,10 +630,10 @@ redisc: fc_disc_restart(disc); mutex_unlock(&disc->disc_mutex); } +free_fp: + fc_frame_free(fp); out: kref_put(&rdata->kref, fc_rport_destroy); - if (!IS_ERR(fp)) - fc_frame_free(fp); } /** diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 52e866659853..e5b18e5d46da 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1619,8 +1619,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) rc = fc_exch_done_locked(ep); WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); - if (!rc) + if (!rc) { fc_exch_delete(ep); + } else { + FC_EXCH_DBG(ep, "ep is completed already," + "hence skip calling the resp\n"); + goto skip_resp; + } } /* @@ -1639,6 +1644,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) if (!fc_invoke_resp(ep, sp, fp)) fc_frame_free(fp); +skip_resp: fc_exch_release(ep); return; rel: @@ -1895,10 +1901,16 @@ static void fc_exch_reset(struct fc_exch *ep) fc_exch_hold(ep); - if (!rc) + if (!rc) { fc_exch_delete(ep); + } else { + FC_EXCH_DBG(ep, "ep is completed already," + "hence skip calling the resp\n"); + goto skip_resp; + } fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); +skip_resp: fc_seq_set_resp(sp, NULL, ep->arg); fc_exch_release(ep); } diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 684c5e361a28..9399e1455d59 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1729,7 +1729,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " - "lport->mfs:%hu\n", mfs, lport->mfs); + "lport->mfs:%u\n", mfs, lport->mfs); fc_lport_error(lport, fp); goto out; } diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index da6e97d8dc3b..64500417c22e 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) lockdep_assert_held(&lport->disc.disc_mutex); rdata = fc_rport_lookup(lport, port_id); - if (rdata) + if (rdata) { + kref_put(&rdata->kref, fc_rport_destroy); return rdata; + } if (lport->rport_priv_size > 0) rport_priv_size = lport->rport_priv_size; @@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, fc_rport_state_enter(rdata, RPORT_ST_DELETE); - kref_get(&rdata->kref); - if (rdata->event == RPORT_EV_NONE && - !queue_work(rport_event_queue, &rdata->event_work)) - kref_put(&rdata->kref, fc_rport_destroy); + if (rdata->event == RPORT_EV_NONE) { + kref_get(&rdata->kref); + if (!queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + } rdata->event = event; } @@ -1208,9 +1211,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, rjt = fc_frame_payload_get(fp, sizeof(*rjt)); if (!rjt) FC_RPORT_DBG(rdata, "PRLI bad response\n"); - else + else { FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", rjt->er_reason, rjt->er_explan); + if (rjt->er_reason == ELS_RJT_UNAB && + rjt->er_explan == ELS_EXPL_PLOGI_REQD) { + fc_rport_enter_plogi(rdata); + goto out; + } + } fc_rport_error_retry(rdata, FC_EX_ELS_RJT); } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 70b99c0e2e67..c5b7d18513b6 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -533,8 +533,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) if (conn->task == task) conn->task = NULL; - if (conn->ping_task == task) - conn->ping_task = NULL; + if (READ_ONCE(conn->ping_task) == task) + WRITE_ONCE(conn->ping_task, NULL); /* release get from queueing */ __iscsi_put_task(task); @@ -738,6 +738,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, task->conn->session->age); } + if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK)) + WRITE_ONCE(conn->ping_task, task); + if (!ihost->workq) { if (iscsi_prep_mgmt_task(conn, task)) goto free_task; @@ -941,8 +944,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) struct iscsi_nopout hdr; struct iscsi_task *task; - if (!rhdr && conn->ping_task) - return -EINVAL; + if (!rhdr) { + if (READ_ONCE(conn->ping_task)) + return -EINVAL; + WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK); + } memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; @@ -957,11 +963,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); if (!task) { + if (!rhdr) + WRITE_ONCE(conn->ping_task, NULL); iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); return -EIO; } else if (!rhdr) { /* only track our nops */ - conn->ping_task = task; conn->last_ping = jiffies; } @@ -984,7 +991,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task, struct iscsi_conn *conn = task->conn; int rc = 0; - if (conn->ping_task != task) { + if (READ_ONCE(conn->ping_task) != task) { /* * If this is not in response to one of our * nops then it must be from userspace. @@ -1525,14 +1532,9 @@ check_mgmt: } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { - if (rc == -ENOMEM || rc == -EACCES) { - spin_lock_bh(&conn->taskqueuelock); - list_add_tail(&conn->task->running, - &conn->cmdqueue); - conn->task = NULL; - spin_unlock_bh(&conn->taskqueuelock); - goto done; - } else + if (rc == -ENOMEM || rc == -EACCES) + fail_scsi_task(conn->task, DID_IMM_RETRY); + else fail_scsi_task(conn->task, DID_ABORT); spin_lock_bh(&conn->taskqueuelock); continue; @@ -1923,7 +1925,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn) */ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) { - if (conn->ping_task && + if (READ_ONCE(conn->ping_task) && time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + (conn->ping_timeout * HZ), jiffies)) return 1; @@ -2058,7 +2060,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * Checking the transport already or nop from a cmd timeout still * running */ - if (conn->ping_task) { + if (READ_ONCE(conn->ping_task)) { task->have_checked_conn = true; rc = BLK_EH_RESET_TIMER; goto done; @@ -3324,125 +3326,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, switch(param) { case ISCSI_PARAM_FAST_ABORT: - len = sprintf(buf, "%d\n", session->fast_abort); + len = sysfs_emit(buf, "%d\n", session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: - len = sprintf(buf, "%d\n", session->abort_timeout); + len = sysfs_emit(buf, "%d\n", session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: - len = sprintf(buf, "%d\n", session->lu_reset_timeout); + len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: - len = sprintf(buf, "%d\n", session->tgt_reset_timeout); + len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); break; case ISCSI_PARAM_INITIAL_R2T_EN: - len = sprintf(buf, "%d\n", session->initial_r2t_en); + len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: - len = sprintf(buf, "%hu\n", session->max_r2t); + len = sysfs_emit(buf, "%hu\n", session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: - len = sprintf(buf, "%d\n", session->imm_data_en); + len = sysfs_emit(buf, "%d\n", session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: - len = sprintf(buf, "%u\n", session->first_burst); + len = sysfs_emit(buf, "%u\n", session->first_burst); break; case ISCSI_PARAM_MAX_BURST: - len = sprintf(buf, "%u\n", session->max_burst); + len = sysfs_emit(buf, "%u\n", session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: - len = sprintf(buf, "%d\n", session->pdu_inorder_en); + len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: - len = sprintf(buf, "%d\n", session->dataseq_inorder_en); + len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); break; case ISCSI_PARAM_DEF_TASKMGMT_TMO: - len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo); + len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); break; case ISCSI_PARAM_ERL: - len = sprintf(buf, "%d\n", session->erl); + len = sysfs_emit(buf, "%d\n", session->erl); break; case ISCSI_PARAM_TARGET_NAME: - len = sprintf(buf, "%s\n", session->targetname); + len = sysfs_emit(buf, "%s\n", session->targetname); break; case ISCSI_PARAM_TARGET_ALIAS: - len = sprintf(buf, "%s\n", session->targetalias); + len = sysfs_emit(buf, "%s\n", session->targetalias); break; case ISCSI_PARAM_TPGT: - len = sprintf(buf, "%d\n", session->tpgt); + len = sysfs_emit(buf, "%d\n", session->tpgt); break; case ISCSI_PARAM_USERNAME: - len = sprintf(buf, "%s\n", session->username); + len = sysfs_emit(buf, "%s\n", session->username); break; case ISCSI_PARAM_USERNAME_IN: - len = sprintf(buf, "%s\n", session->username_in); + len = sysfs_emit(buf, "%s\n", session->username_in); break; case ISCSI_PARAM_PASSWORD: - len = sprintf(buf, "%s\n", session->password); + len = sysfs_emit(buf, "%s\n", session->password); break; case ISCSI_PARAM_PASSWORD_IN: - len = sprintf(buf, "%s\n", session->password_in); + len = sysfs_emit(buf, "%s\n", session->password_in); break; case ISCSI_PARAM_IFACE_NAME: - len = sprintf(buf, "%s\n", session->ifacename); + len = sysfs_emit(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", session->initiatorname); + len = sysfs_emit(buf, "%s\n", session->initiatorname); break; case ISCSI_PARAM_BOOT_ROOT: - len = sprintf(buf, "%s\n", session->boot_root); + len = sysfs_emit(buf, "%s\n", session->boot_root); break; case ISCSI_PARAM_BOOT_NIC: - len = sprintf(buf, "%s\n", session->boot_nic); + len = sysfs_emit(buf, "%s\n", session->boot_nic); break; case ISCSI_PARAM_BOOT_TARGET: - len = sprintf(buf, "%s\n", session->boot_target); + len = sysfs_emit(buf, "%s\n", session->boot_target); break; case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: - len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable); + len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); break; case ISCSI_PARAM_DISCOVERY_SESS: - len = sprintf(buf, "%u\n", session->discovery_sess); + len = sysfs_emit(buf, "%u\n", session->discovery_sess); break; case ISCSI_PARAM_PORTAL_TYPE: - len = sprintf(buf, "%s\n", session->portal_type); + len = sysfs_emit(buf, "%s\n", session->portal_type); break; case ISCSI_PARAM_CHAP_AUTH_EN: - len = sprintf(buf, "%u\n", session->chap_auth_en); + len = sysfs_emit(buf, "%u\n", session->chap_auth_en); break; case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: - len = sprintf(buf, "%u\n", session->discovery_logout_en); + len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); break; case ISCSI_PARAM_BIDI_CHAP_EN: - len = sprintf(buf, "%u\n", session->bidi_chap_en); + len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); break; case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: - len = sprintf(buf, "%u\n", session->discovery_auth_optional); + len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); break; case ISCSI_PARAM_DEF_TIME2WAIT: - len = sprintf(buf, "%d\n", session->time2wait); + len = sysfs_emit(buf, "%d\n", session->time2wait); break; case ISCSI_PARAM_DEF_TIME2RETAIN: - len = sprintf(buf, "%d\n", session->time2retain); + len = sysfs_emit(buf, "%d\n", session->time2retain); break; case ISCSI_PARAM_TSID: - len = sprintf(buf, "%u\n", session->tsid); + len = sysfs_emit(buf, "%u\n", session->tsid); break; case ISCSI_PARAM_ISID: - len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", + len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", session->isid[0], session->isid[1], session->isid[2], session->isid[3], session->isid[4], session->isid[5]); break; case ISCSI_PARAM_DISCOVERY_PARENT_IDX: - len = sprintf(buf, "%u\n", session->discovery_parent_idx); + len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); break; case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: if (session->discovery_parent_type) - len = sprintf(buf, "%s\n", + len = sysfs_emit(buf, "%s\n", session->discovery_parent_type); else - len = sprintf(buf, "\n"); + len = sysfs_emit(buf, "\n"); break; default: return -ENOSYS; @@ -3474,16 +3476,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: if (sin) - len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr); + len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); else - len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); + len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); break; case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_LOCAL_PORT: if (sin) - len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); else - len = sprintf(buf, "%hu\n", + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin6->sin6_port)); break; default: @@ -3502,88 +3504,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, switch(param) { case ISCSI_PARAM_PING_TMO: - len = sprintf(buf, "%u\n", conn->ping_timeout); + len = sysfs_emit(buf, "%u\n", conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: - len = sprintf(buf, "%u\n", conn->recv_timeout); + len = sysfs_emit(buf, "%u\n", conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: - len = sprintf(buf, "%u\n", conn->max_recv_dlength); + len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: - len = sprintf(buf, "%u\n", conn->max_xmit_dlength); + len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: - len = sprintf(buf, "%d\n", conn->hdrdgst_en); + len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: - len = sprintf(buf, "%d\n", conn->datadgst_en); + len = sysfs_emit(buf, "%d\n", conn->datadgst_en); break; case ISCSI_PARAM_IFMARKER_EN: - len = sprintf(buf, "%d\n", conn->ifmarker_en); + len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); break; case ISCSI_PARAM_OFMARKER_EN: - len = sprintf(buf, "%d\n", conn->ofmarker_en); + len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); break; case ISCSI_PARAM_EXP_STATSN: - len = sprintf(buf, "%u\n", conn->exp_statsn); + len = sysfs_emit(buf, "%u\n", conn->exp_statsn); break; case ISCSI_PARAM_PERSISTENT_PORT: - len = sprintf(buf, "%d\n", conn->persistent_port); + len = sysfs_emit(buf, "%d\n", conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: - len = sprintf(buf, "%s\n", conn->persistent_address); + len = sysfs_emit(buf, "%s\n", conn->persistent_address); break; case ISCSI_PARAM_STATSN: - len = sprintf(buf, "%u\n", conn->statsn); + len = sysfs_emit(buf, "%u\n", conn->statsn); break; case ISCSI_PARAM_MAX_SEGMENT_SIZE: - len = sprintf(buf, "%u\n", conn->max_segment_size); + len = sysfs_emit(buf, "%u\n", conn->max_segment_size); break; case ISCSI_PARAM_KEEPALIVE_TMO: - len = sprintf(buf, "%u\n", conn->keepalive_tmo); + len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); break; case ISCSI_PARAM_LOCAL_PORT: - len = sprintf(buf, "%u\n", conn->local_port); + len = sysfs_emit(buf, "%u\n", conn->local_port); break; case ISCSI_PARAM_TCP_TIMESTAMP_STAT: - len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat); + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); break; case ISCSI_PARAM_TCP_NAGLE_DISABLE: - len = sprintf(buf, "%u\n", conn->tcp_nagle_disable); + len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); break; case ISCSI_PARAM_TCP_WSF_DISABLE: - len = sprintf(buf, "%u\n", conn->tcp_wsf_disable); + len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); break; case ISCSI_PARAM_TCP_TIMER_SCALE: - len = sprintf(buf, "%u\n", conn->tcp_timer_scale); + len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); break; case ISCSI_PARAM_TCP_TIMESTAMP_EN: - len = sprintf(buf, "%u\n", conn->tcp_timestamp_en); + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); break; case ISCSI_PARAM_IP_FRAGMENT_DISABLE: - len = sprintf(buf, "%u\n", conn->fragment_disable); + len = sysfs_emit(buf, "%u\n", conn->fragment_disable); break; case ISCSI_PARAM_IPV4_TOS: - len = sprintf(buf, "%u\n", conn->ipv4_tos); + len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); break; case ISCSI_PARAM_IPV6_TC: - len = sprintf(buf, "%u\n", conn->ipv6_traffic_class); + len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); break; case ISCSI_PARAM_IPV6_FLOW_LABEL: - len = sprintf(buf, "%u\n", conn->ipv6_flow_label); + len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); break; case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: - len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6); + len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); break; case ISCSI_PARAM_TCP_XMIT_WSF: - len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf); + len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); break; case ISCSI_PARAM_TCP_RECV_WSF: - len = sprintf(buf, "%u\n", conn->tcp_recv_wsf); + len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); break; case ISCSI_PARAM_LOCAL_IPADDR: - len = sprintf(buf, "%s\n", conn->local_ipaddr); + len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); break; default: return -ENOSYS; @@ -3601,13 +3603,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - len = sprintf(buf, "%s\n", ihost->netdev); + len = sysfs_emit(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - len = sprintf(buf, "%s\n", ihost->hwaddress); + len = sysfs_emit(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", ihost->initiatorname); + len = sysfs_emit(buf, "%s\n", ihost->initiatorname); break; default: return -ENOSYS; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index e9e00740f7ca..5d28bb7f2ca4 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -200,15 +200,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; + task->data_dir = qc->dma_dir; + } else if (qc->tf.protocol == ATA_PROT_NODATA) { + task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg_dma_len(sg); task->total_xfer_len = xfer; task->num_scatter = si; + task->data_dir = qc->dma_dir; } - - task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index d7302c2052f9..10975f3f7ff6 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -182,10 +182,11 @@ int sas_notify_lldd_dev_found(struct domain_device *dev) pr_warn("driver on host %s cannot handle device %llx, error:%d\n", dev_name(sas_ha->dev), SAS_ADDR(dev->sas_addr), res); + return res; } set_bit(SAS_DEV_FOUND, &dev->state); kref_get(&dev->kref); - return res; + return 0; } diff --git a/drivers/scsi/lpfc/COPYING b/drivers/scsi/lpfc/COPYING new file mode 100644 index 000000000000..1bcc46f53a63 --- /dev/null +++ b/drivers/scsi/lpfc/COPYING @@ -0,0 +1,342 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + <signature of Ty Coon>, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + + diff --git a/drivers/scsi/lpfc/ChangeLog b/drivers/scsi/lpfc/ChangeLog new file mode 100644 index 000000000000..d409d55d9fcc --- /dev/null +++ b/drivers/scsi/lpfc/ChangeLog @@ -0,0 +1,1624 @@ +Changes from 20131002 to 20131014 + + * Changed version number to 8.3.7.34 + * Fixed not processing task management IOCB response status + (149471) + * Fixed spinlock hang (149776) + * Fixed regression in the fcp_icmnd64 changes (149473) + * Fixed invalid Total_Data_Placed value received for els and ct + command responses (149471) + * Fixed invalid fcp_rsp lenght for FCP_ICMND (149473) + +Changes from 20130919 to 20131002 + + * Changed version number to 8.3.7.33 + * Fixed invalid mailbox timeouts (146132) + * Fixed spinlock inversion problem (148339) + +Changes from 20130822 to 20130919 + + * Changed version number to 8.3.7.32 + * Fixed support of kernels that no longer support the + initialization function macros + * Fix crash after xri limit is reached (145907) + * Fixed issue of task management commands having a fixed timeout + * Incorporated upstream patch "lseek(fd, n, SEEK_END) does *not* go + to eof - n" into lpfc driver + * Fixed inconsistent spin lock useage (146476) + * Fix driver's abort loop functionality to skip IOs already getting + aborted + * Fixed failure to allocate SCSI buffer on PPC64 platform for SLI4 + devices (146362) + * Fix WARN_ON when driver unloads (146328) + * Fix crash on driver load due to cpu affinity logic (144668) + * Avoided making pci bar ioremap call during dual-chute WQ/RQ pci + bar selection (146139) + +Changes from 20130814 to 20130822 + + * Changed version number to 8.3.7.31 + * Fixed driver iocbq structure's iocb_flag field running out of + space (145867) + * Fixed system crash on driver load due to using cpu affinity + without proper setup (144668) + +Changes from 20130801 to 20130814 + + * Changed version number to 8.3.7.30 + * Fixed logging format of setting driver sysfs attributes hard to + interpret (145397) + * Fixed back to back RSCNs discovery failure (140905) + * Fixed race condition between BSG I/O dispatch and timeout + handling (142743) + * Fixed function mode field defined too small for not recognizing + dual-chute mode (145031) + * Updated Copyright dates - reconsile with upstream + * Removed obsolete fcp_eq_count and fcp_wq_count driver attributes + +Changes from 20130725 to 20130801 + + * Changed version number to 8.3.7.29 + * Removed data counting (residual fcfi_parm) in the case of + misbehaving target (143476) + * Fixed mailbox memory leak (144016) + +Changes from 20130712 to 20130725 + + * Changed version number to 8.3.7.28 + * Fix ramdom errors using first burst (143435) + +Changes from 20130703 to 20130712 + + * Changed version number to 8.3.7.27 + * Fixed not able to log informational messages at early stage of + driver init time (142741) + * Fixed using unsafe linked list macro for walking and deleting + linked list (139545) + * Fix Initiator FirstBurst behavior (142153) + * Fixed the format of some log message fields (142334) + +Changes from 20130627 to 20130703 + + * Changed version number to 8.3.7.26 + * Fix Multipath hang for TURs with large timeouts (141052) + * Fixed regression driver build issue introduced by fix for incorrect + FCP Priority value. + * Fix driver's FirstBurst capability (142153) + * Fix for incorrect FCP Priority value detected on wire (142120) + +Changes from 20130617 to 20130627 + + * Changed version number to 8.3.7.25 + * Fixed not able to perform PCI function reset when board was not + in online mode (141906) + * Fixed failure in setting SLI3 board mode for performing certain + OCM commands (138124) + * Fixed SLI3 failing FCP write on check-condition no-sense with + residual zero (141400) + * Fixed support for 128 byte WQEs (141687) + * Fixed driver's setting of reserved bits in mailbox commands (125843) + * Fixed max value of lpfc_lun_queue_depth (141043) + * Fixed Receive Queue varied frame size handling (136867) + +Changes from 20130613 to 20130617 + + * Changed version number to 8.3.7.24 + * Fixed TUR not aborted by timeout (141052) + +Changes from 20130603 to 20130613 + + * Changed version number to 8.3.7.23 + * Fix to allow OCM to correctly display Service Processor and ULP FW + names on PPC (111718) + * Updated copyright dates + +Changes from 20130523 to 20130603 + + * Changed version number to 8.3.7.22 + * Fixed freeing of iocb when internal loopback times out (139995) + * Fixed crash when loopback is executed for sli4 adapters (139990) + * Reversed patches of SLI host handling FCF modified event to the + in-use FCF (122752) + +Changes from 20130508 to 20130523 + + * Changed version number to 8.3.7.21 + * Fixed issue mailbox wait routine failed to issue dump memory mbox + command (137589) + * Merged from upstream: Fixed typos in kernel messages (136212) + * Fixed system panic due to unsafe walking and deleting linked list + (139425) + * Fixed Makefile build issues introduced by support of a new OS kernel + * Fixed FCoE connection list record junk vlan identifier + stored/used/displayed (125900) + * Clarified the behavior of the lpfc_max_luns module parameter + (139188) + * Fix to allow OCM to report FEC status (137545) + * Fixed a missing return code in a logging message (138577) + * Fixed some logging message fields (138577) + +Changes from 20130425 to 20130508 + + * Changed version number to 8.3.7.20 + * Fixed list corruption when lpfc_drain_tx runs (138180) + * Fixed devloss timeout race condition caused null pointer + reference system crash (135839) + * Fixed SCSI target bus reset handlers not return error status due + to port timeout (135051) + * Fixed TUR not aborted by adding 2 seconds to mid-layer timeout + (124245) + * Fixed starting reference tag when calculating BG error (138001) + * Fixed inconsistent list removal causes crash (133672) + +Changes from 20130409 to 20130425 + + * Changed version number to 8.3.7.19 + * Fixed system panic during handling unsolicited receive buffer + error condition (137513) + * Fix BlockGuard error checking (124904) + * Fix driver build with di kernel (137148) + * Fixed crash during FCoE failover testing (137039) + * Fix lpfc_used_cpu to be more dynamic (136988) + +Changes from 20130327 to 20130409 + + * Changed version number to 8.3.7.17 + * Fixed driver handling of CLEAR_LA with NPIV enabled causing SID=0 + frames out (106135) + * Fixed lpfc build when wmb() is defined as mb() + * Reduced tmo value set to FLOGI WQE for quick recovery from FLOGI + sequence timeout (126169) + * Provided support for message logging when FLOGI completes with clean + address bit set to zero (136258) + * Fixed driver parameter lpfc_fcp_cpu_map = 0 (135548) + * Fixed build failures with irq_set_affinity_hint (135548) + * Fixed driver vector mapping to CPU affinity (135548) + * Fixed iocb flags not being reset for scsi commands (135965) + +Changes from 20130318 to 20130327 + + * Changed version number to 8.3.7.16 + * Fixed system panic during EEH recovery due to midlayer acting on + outstanding I/O (134408) + * Fixed not returning FAILED status when SCSI invoking host reset + handler failed (135051) + * Fixed bad book keeping in posting els sgls to port (135051) + * Fixed deadlock between hbalock and nlp_lock use (135524) + * Fixed BlockGuard to take advantage of rdprotect/wrprotect info + when available (124904) + +Changes from 20130304 to 20130318 + + * Changed version number to 8.3.7.15 + * Reduced spinlock contention on SCSI buffer list (134075) + +Changes from 20130221 to 20130304 + + * Changed version number to 8.3.7.14 + * Fixed crash when processing bsg's sg list with high memory pages + (126524) + +Changes from 20130207 to 20130221 + + * Changed version number to 8.3.7.13 + * Fix lpfc_fcp_look_ahead module parameter + * Fix driver issues with SCSI Host reset (126376) + * Provided support for logging doorbell information in dual-chute + WQ and RQ setup (126388) + * Fixed driver load issues on kernels that have disabled certain + T10-DIF APIs (126197) + * Fix driver issues with large s/g lists for BlockGuard (125839) + * Fix driver issues with large lpfc_sg_seg_cnt values (125839) + +Changes from 20130206 to 20130207 + + * Changed version number to 8.3.7.12 + * Fixed pt2pt and loop discovery problems on topology changes + (124392) + * Remove driver dependency on HZ (125653) + +Changes from 20130131 to 20130206 + + * Changed version number to 8.3.7.11 + * Fixed a race condition between SLI host and port failed FCF + rediscovery (125527) + * Fixed BlockGuard error reporting (124904) + * Fixed VPI allocation issues after firmware dump is performed + (124422) + +Changes from 20130116 to 20130131 + + * Changed version number to 8.3.7.10 + * Fixed potential mis-interpretation of READ_TOPOLOGY reserved + fields (106135) + * Fix default value for lpfc_enable_rrq (124850) + * Fixed circular locking dependency and inconsistent lock state + issues (124659) + * Fixed PT2PT bring up problem for FC SLI4 (124480) + +Changes from 20130111 to 20130116 + + * Changed version number to 8.3.7.9 + * Fixed OXID reuse issue (CR 124270) + * Fixed asynchronous FCF modified event to in-use FCF failure to + trigger recovery (CR 122752) + +Changes from 20121214 to 20130111 + + * Changed version number to 8.3.7.8 + * Fix potential NULL pointer dereference in lpfc_sli4_rq_put() + * Fixed deadlock condition in FCF round robin handling (123778) + * Fixed bsg timeout handling issues that would result in crashes + (123696) + * Fixed NMI watch dog panic's when resetting the hba (123383) + * Fixed degraded performance after cable pulls (123431) + * Provided support for change_queue_type method in + scsi_host_template (111959) + +Changes from 20121127 to 20121214 + + * Changed version number to 8.3.7.7 + * Fixed infinite loop in lpfc_sli4_fcf_rr_next_index_get (112709) + * Fixed system crash due to SLI Port provisioning of invalid + resource count (113184) + +Changes from 20121026 to 20121127 + + * Changed version number to 8.3.7.6 + * Fixed potential memory corruption bug + * Provide support for FCoE protocol dual-chute (ULP) operation + (110792) + * Fixed processing stale ndlp state when the node is marked for + deferred removal (111669) + * Fixed no-context ABTS received on unsolicited receive queue + failed with BA_RJT (111529) + * Removed use of NOP mailboxes for interrupt verification in + pci_probe_one_s4 (112375) + * Fixed exhausted retry for plogi to nameserver (109854) + * Fixed not properly handle ELS_REC received on the unsolicited + receive queue (111528) + * Provided support for change_queue_type method in + scsi_host_template (111959) + * Correct mask error for ct retry check + * Correct buffer length overrun for error messages + +Changes from 20121008 to 20121026 + + * Changed version number to 8.3.7.5 + * Reconcile spelling issues and whitespace fix with upstream + * Fixed setting sequential delivery bit in a service class that is + not valid (30847) + * Fix the value of DIF field in WQE (111168) + +Changes from 20120913 to 20121008 + + * Changed version number to 8.3.7.4 + * Fixed a bug when receiving Target Resets during bus reset function with FCP2 devices + * Fixed build issues from missing version.h include + * Fixed boot from san failure when SLI4 FC device presented on the + same PCI bus (107869) + * Fixed whitespace regression. + * Reconcile whitespace issues and includes with upstream + * Fixed not reporting logical link speed to SCSI midlayer when QoS + not on (109014) + +Changes from 20120910 to 20120913 + + * Changed version number to 8.3.7.3 + * Fix performance problem in pt2pt mode (108953) + * Fixed SCSI host create showing wrong link speed on SLI3 HBA ports + (93969) + +Changes from 20120801 to 20120910 + + * Changed version number to 8.3.7.2 + * Corrected text for misconfigured port messages (108240) + * Fixed not checking solicition in progress bit when verifying FCF + record for use (108550) + * Fixed kernel warning on spinlock usage on some distributions + (108430) + * Fixed PRLI not being retried if a LS_RJT with a reason (93956) + * Fixed SLES11-SP2 taking up to 60 second longer to boot with SLI4 + board installed (33385) + * Fix FCP2 Retries for non-r/w commands (108188) + * Fixed Linux generic firmware download on SLI4 devices with longer + module names (108080) + * Expand I/O channel support for large systems + * Fixed SLI4 running heavy IO with function reset on PPC induced + PCI bus EEH error (81900) + * Fix incorrect comment in T10 DIF attributes + * Fixed checking BMBX register for RDY bit before writing the first + address in (107229) + * Fix imax to dmult conversion for eq_create (107074) + * Fix null pointer error for piocbq + * Corrected handling of jumps to mempool_free to fix potential memory leak + * Fixed leaking memory from pci dma pool (106836) + +Changes from 20120725 to 20120801 + + * Changed version number to 8.3.7.1 + * Cast uint64_t variables to unsigned long long to fix compile + warnings + * Fix long division for 32 bit environments (106454) + * Fixed unnecessary SCSI device reset escalation due to LLD + handling of I/O abort (106432) + * Fix lpfc_use_msi=0 for INTX (106448) + * Logged XRI of the SCSI command to be aborted on abort handler + timeout (29251) + * Fix bug with driver logging too many fcp underrun messages + (106279) + +Changes from 20120723 to 20120725 + + * Changed version number to 8.3.7.0 + * Fix number of IO channels to match CPUs + * Fix duplicate functionality between idiag/queInfo and + dumpSLIqinfo + * Remove LINUX_VERSION_CODE ifdefs + * Change default value of lpfc_fcp_imax + * Change name of fcp_eq_count module parameter to fcp_io_channel + * Fixed queInfo to include queue stats + * Add lpfc_fcp_look_ahead module parameter + * Fix checking SLI rev lpfc_fcp_imax_store + * Make WQ distribution algorithm a module parameter + * Change naming convention for SLI4 Interrupt vector + * Change fcp_imax to be HBA centric + * Utilize all MSI-X vectors for FCP IO + * Remove lockless entry from scsi template + * Fix ABTS recovery to be FC spec compliant (75373) + +Changes from 20120716 to 20120723 + + * Changed version number to 8.3.5.82 + * Fix BlockGuard lpfc_printf_vlog messages (103707) + * Fix parameter field in CQE to mask for LOCAL_REJECT status + (93992) + * Fix Resource and Capacity Descriptors to support SLI-4 + interface change (96637) + +Changes from 20120629 to 20120716 + + * Changed version number to 8.3.5.81 + * Fixed incomplete list of SLI4 commands with extended 300 second + timeout value (96156) + * Fix switching ports on Fabric causing additional fc_host rport + entries (95843) + +Changes from 20120621 to 20120629 + + * Changed version number to 8.3.5.80 + * Fix discovery bug when swapping cables (95526) + * Revert unecessary code change (95526) + * Fixed kernel panic after scsi_eh escalation by checking the + proper return status (CR 129611) + * Fix conflicts in log message numbers + +Changes from 20120611 to 20120621 + + * Changed version number to 8.3.5.79 + * Fix driver not checking data transfered on write commands (CR + 127391) + * Fix bug with message 2520 appearing in the messages file (CR + 132548) + * Fix bug with rrq_pool not being destroyed during driver removal + (CR 132386) + +Changes from 20120525 to 20120611 + + * Changed version number to 8.3.5.78 + * Fix Driver not attaching to OCe14000 adapters + * Fix bug with driver not setting the diag set valid bit for + loopback testing (CR 130864) + * Fix bug with driver does not reporting misconfigured ports for + GM-LP-R (CR 131589) + +Changes from 20120524 to 20120525 + + * Changed version number to 8.3.5.77 + * Fix System Panic During IO Test using Medusa tool (CR 129611) + +Changes from 20120511 to 20120524 + + * Changed version number to 8.3.5.76 + * Fix fcp_imax module parameter to dynamically change FCP EQ delay + multiplier + * Fix successful aborts returning incorrect status (CR 131626) + * Fixed system held-up when performing resource provision through + same PCI function (CR 131421) + * Fix system hang due to bad protection module parameters (CR + 130769) + +Changes from 20120503 to 20120511 + + * Changed version number to 8.3.5.75 + * Fixed debug helper routine failed to dump CQ and EQ entries in + non-MSI-X mode (CR 130735) + +Changes from 20120427 to 20120503 + + * Changed version number to 8.3.5.74 + * Fixed system crash due to not providing SCSI error-handling host + reset handler (CR 130121) + * Fix bug with driver using the wrong xritag when sending an els + echo (CR 130019) + +Changes from 20120413 to 20120427 + + * Changed version number to 8.3.5.73 + * Debug helper utility routines for dumping various SLI4 queues (CR + 130735) + * Fix unsol abts xri lookup (CR 129215) + * Fixed issues with LPe16000 to LPe16000 discovery (CR 130446). + * Reregister VPI for SLI3 after cable moved to new Saturn port (CR + 129438) + * Fix driver crash during back-to-back ramp events (CR 128489) + +Changes from 20120403 to 20120413 + + * Changed version number to 8.3.5.72 + * Fix log message 2597 displayed when no error is detected (CR + 129185) + * Update Makefile and lpfc_attr.c to build for OL UEK R2 kernel + * Fixed FCP LOG for easier Finisar trace correlation (CR 130098) + * Fix kernel panic when going into to sleep state (CR 127161) + +Changes from 20120316 to 20120403 + + * Changed version number to 8.3.5.71 + * Fix error message displayed even when not an error (CR 129683) + * Fix Read Link status data (CR 129681) + * Fixed system panic due to midlayer abort and driver complete race + on SCSI cmd (CR 128422) + * Fix unable to create vports on FCoE SLI4 adapter (CR 129170) + * Fix the case where an array controller does not apply FCP + priority rules (CR 129626) + * Fix initiator sending flogi after acking flogi from target (CR + 128501) + +Changes from 20120302 to 20120316 + + * Changed version number to 8.3.5.70 + * Fix bug with driver not supporting the get controller attributes + command (CR 129259) + * Added support for handling SLI4-port XRI resource-provisioning + profile change (CR 127354) + * Add new BUILD_* variable to properly build for new UEK kernels, + rename old ones + +Changes from 20120217 to 20120302 + + * Changed version number to 8.3.5.69 + * Synch up with upstream driver sources. + * Upstream patch sync + * Change default DA_ID support from disabled to enabled (CR 126653) + * Fix bug with driver unload leaving a scsi host for a vport around + (CR 128762) + * Update copyright date for files modified in 2012 + * Fix bug with mailbox handling of REG_VFI and cable pull (CR + 127762) + * Enhance T10 DIF debugfs error injection (CR 123966) + +Changes from 20120210 to 20120217 + + * Changed version number to 8.3.5.68 + * Fix compilation errors on some kernels due to debugfs + variable definitions. + +Changes from 20120130 to 20120210 + + * Changed version number to 8.3.5.67 + * Used PCI configure space read to flush PCI function reset + register write (CR 128101) + * Update copyright date for files modified in 2012 + * Fixed system panic when extents enabled with large number of + small blocks (CR 128010) + * Fixed system panic during EEH recovery (CR 127062) + * Fix resource leak when acc fails for received plogi (CR 127847) + +Changes from 20120118 to 20120130 + + * Changed version number to 8.3.5.66 + * Fix SLI4 driver module load and unload test in a loop crashes + the system (CR 126397) + * Fix missing CVL event causing round-robin FCF failover process + to stop (CR 123367) + * Fix deadlock during adapter offline request (CR 127217) + * Fix same RPI registered multiple times after HBA reset (CR + 127176) + +Changes from 20120113 to 20120118 + + * Changed version number to 8.3.5.65 + * Fix driver handling of XRI Aborted CQE response (CR 127345) + * Fixed port and system failure in handling SLI4 FC port function + reset (CR 126551) + * Fix bug with driver not sending a LOGO with vport delete (CR + 126625) + +Changes from 20120106 to 20120113 + + * Changed version number to 8.3.5.64 + * Make BA_ACC work on a fully qualified exchange (CR 126289) + * Fix for SLI4 Port delivery for BLS ABORT ACC (CR 126289) + * Fix KERNEL allocation while lock held + * Incorrect usage of bghm for BlockGuard errors (CR 127022) + * Fix warning on i386 system (CR 132966) + * Fix ndlp nodelist not empty wait timeout during driver + unloading (CR 127052) + * Fix mailbox and vpi memory leaks (CR 126818) + +Changes from 20111223 to 20120106 + + * Changed version number to 8.3.5.63 + * Create character device to take a reference on the driver (CR + 126082) + * Fix for FDISC failures after firmware reset or link bounce (CR + 126779) + * Enhance debugfs for injecting T10 DIF errors (CR 123966) + * Fix SLI4 BlockGuard behavior when protection data is generated by + HBA (CR 121980) + +Changes from 20111216 to 20111223 + + * Changed version number to 8.3.5.62 + * Fix for driver using duplicate RPIs after 16Gb/s adapter + port reset (CR 126723) + * Fix discovery problem when in pt2pt (CR 126887) + * Fixed failure in handling large CQ/EQ identifiers in an IOV + environment (CR 126856) + * Fix Locking code raises IRQ twice + * Fix driver not returning when bad ndlp found in abts error event + handling (CR 126209) + * Added more driver logs in area of SLI4 port error attention and + reset recovery (CR 126813) + +Changes from 20111208 to 20111216 + + * Changed version number to 8.3.5.61 + * Fix bug with driver returning the wrong ndlp (CR 125743) + * Sync up with actual upstream lpfc driver code + * Added SLI4 16Gb FC Adapter support for T10 DIF / BlockGuard + (CR 121980) + * Merge from upstream: scsi: Fix up files implicitly depending on + module.h inclusion + * Fix driver behavior when receiving an ADISC (CR 126654) + * Fixed unbounded firmware revision string from port caused the + system panic (CR 126560) + * Fix bug with driver processing dump command type 4 using 16Gb FC + Adapter (CR 126406) + * Fix driver does not reset port when reset is needed during + fw_dump (CR 125807) + +Changes from 20111205 to 20111208 + + * Changed version number to 8.3.5.60 + * Fix ELS FDISC failing with local reject / invalid RPI (CR 126350) + * Changed SLI4 FC port internal loopback from outer serdes to inner + internal (CR 126409) + * Fix REG_RPI fails on SLI4 HBA putting NPort into NPR state (CR + 126230) + * Fix bug with driver processing an els command using 16Gb FC + Adapter (CR 126345) + +Changes from 20111201 to 20111205 + + * Changed version number to 8.3.5.59 + * Fix NMI seen due to CQE starvation (CR 126149) + * Fixed SLI4 FC port obtained link type and number dependent on + link connection (CR 126264) + * Fixed SLI4 FC port internal loopback without SFP and external + link/loopback plug (CR 125843) + * Fix driver incorrectly building fcpCdb during scsi command prep + (CR 126209) + +Changes from 20111118 to 20111201 + + * Changed version number to 8.3.5.58 + * Fixed FCP EQ memory check prevent initialize to single interrupt + vector case (CR 125832) + * Fixed SLI4 16Gbit FC port internal and external diagnostic + loopback tests (CR 125843) + * Fixed system panic during EEH recovery on SLI4 FC port (CR + 125832) + * Fix FLOGI fails with invalid RPI (0x04) after connecting to a + arbitrated loop (CR 126094) + * Fix ELS SCR failing with local reject / invalid RPI (CR 126070) + * Fix SLI4 port failures after firmware reset or dump (CR 125888) + * Fix port reset log messages indicate error when no error is seen + (CR 125989) + * Fixed an issue where HBQ buffers are uninitialized in the + base driver (CR 125663) + * Fixed missing shost lock protection on setting/clearing bit in + vport->fc_flag (CR 126002) + * Fixed missing hbalock protection on setting/clearing bit in + phba->link_flag (CR 125994) + * Fixed illegal state transition during driver unload (CR 124191) + * Fixed system crash on SLI4 FC port looopback diagnostic test with + large buffers (CR 124951) + * Fixed a potential error path that might try to free DMA memory + not allocated (CR 125766) + * Fixed SLI4 FC port Internal and External loopback diagnostic + tests (CR 124951) + +Changes from 20111114 to 20111118 + + * Changed version number to 8.3.5.57 + * Fix a crash while deleting 256 vports (CR 124996) + * Fix vport never finished discovery if switch runs out of + resources (CR 124996) + * Fix kernel build warnings (CR 125622) + * Fix kernel panic when putting board in offline state twice (CR + 125572) + * Fix memory leak when running hba resets to SLI-4 board (CR + 125124) + +Changes from 20111110 to 20111114 + + * Changed version number to 8.3.5.56 + * Fix driver message 3092 and 3116 to KERN_WARNING (CR 124768) + * Fix PCI read failure looks like a over temperature event (CR + 125403) + * Fixed driver event reporting to mgmt on error attention due to + forced dump (CR 122986) + * Fix not displaying firmware rev for BE UCNA boards (CR 125369) + * Fixed ABTS failure logic not rediscovering targets (CR 124578) + * Fix bug with driver not byte swapping extended mailbox data (CR + 125081) + +Changes from 20111103 to 20111110 + + * Changed version number to 8.3.5.55 + * Fix premature ndlp cleanup after second target LOGO (CR 123924) + * Fix Link pull causes I/O failures (CR 124668) + * Add loop support for sli4 fc (CR 124721) + +Changes from 20111017 to 20111103 + + * Changed version number to 8.3.5.54 + * Fixed OCM failing COMN_READ_OBJECT and COMN_WRITE_OBJECT mailbox + pass-through (CR 124771) + * No longer read fcoe parameters if board is not fcoe (CR 124731) + * Fix bug with driver passing a wrong count value (CR 122550) + * Added restore state and error log when sysfs board_mode attribute + access failed (CR 124158) + * Fixed a typo in two of the log messages just introduced (CR + 124466) + * Added support for SLI4_CONFIG non-embedded + COMN_GET_CNTL_ADDL_ATTR pass-through (CR 124466) + * Added support for SLI4 FC port persistent linkdown through config + region 23 (CR 124534) + * Fix for ABTS. Do not free original IOCB whenever ABTS fails (CR + 115829) + * Fixed Linux driver inconsistency in reporting FC host port-state + through OCM (CR 120018) + * Return an error if the mbox sysfs is called (CR 124210) + * Fix firmware upgrade attempt on unsupported adapter (CR 124406) + * Fix incomplete message number 2889 (CR 124385) + * Fix system crash when Lancer fails to initialize (CR 124390) + +Changes from 20111007 to 20111017 + + * Changed version number to 8.3.5.53 + * Merged non-functional updates from the upstream driver sources. + * Fix virtual fabrics bit set in FLOGI incorrectly (CR 124339) + * Fix bug with driver not returning extended mailbox data (CR + 122523) + * Fix unsupported link speed accepted by SLI4 port (CR 124185 + 124122) + * Incremented minor management version to reflect additional + "protocol" sysfs attribute (CR 124102) + * Modify WQ handling to use entry_repost (CR 123981) + * Moved link type definitions to apply to all driver streams + (CR 124102) + * Added a sysfs attribute "protocol" to report SLI4 port link + protocol type (CR 124102) + * Added SLI4 INTF_TYPE and SLI_FAMILY as sub-field to the fwrev + sysfs attribute (CR 124103) + +Changes from 20110927 to 20111007 + + * Changed version number to 8.3.5.52 + * Added structure to support T10 DIF in SLI4 (CR 121980) + * Fix performance slowed due to too many register accesses (CR + 123981) + * Fixed a warning when converting from simple_strtoul to + strict_strtoul + * Add Initial code to inject T10 DIF errors (CR 123966) + * Fixed a possible driver crash when cfg_fcp_eq_count is zero (CR + 123879) + * Fixed non-embedded COMMON_GET_CNTL_ATTR mbox command failed with + status 0x4 (CR 123792) + +Changes from 20110915 to 20110927 + + * Changed version number to 8.3.5.51 + * Fixed null pointer dereference after clearing FCP policy rules + (CR 123378) + * Changed the way of SLI4 device detecting physical port name (CR + 122006) + * Fixed FCP policy exchange management rule parsing (CR 123362) + * Add new check to lpfc_decode_firmware_rev to handle releases that + have an 'X' (CR 122167) + * Fixed RPI leaks in ELS protocol handling (CR 123150) + * Fix kernel crash during boot with SLI4 card installed (CR 122270) + * Fixed NPIV FDISC failure on SLI4 if-type 2 ports (CR 122788) + * Fixed driver failed to follow IP reset procedure in SLI4 error + attention handler (CR 122932 123052) + +Changes from 20110906 to 20110915 + + * Changed version number to 8.3.5.50 + * Fixed warnings with OEL5 and OEL6 kernel builds (CR 122844) + * Fix sysfs lists fabric name for port that has been disconnected + from fabric (CR 93875 122918) + * Add FCP policy exchange management (CR 122912) + * Changed the timeout to 300 seconds for SLI_CONFIG (0x9B) mailbox + command (CR 122650) + * Fixed fcp underrun reporting + +Changes from 20110825 to 20110906 + + * Changed version number to 8.3.5.49 + * Fixed a driver handling IP reset issue when PCI read return error + (CR 121797) + * Fixed casting problem in calculating tx_words statistics + * Added proper error code return for libdfc calling API to make + proper decisions + * Fix sli4 mailbox status code. Management software has + dependencies + +Changes from 20110819 to 20110825 + + * Changed version number to 8.3.5.48 + * Fix compiler warning due to uninitialized local variable + +Changes from 20110711 to 20110819 + + * Changed version number to 8.3.5.47 + * Fix bus reset handler fails with bad failure code (CR 121856) + * Made proper error code return for libdfc calling API to make + proper decisions + * Made error log include PCI BAR bitmap from kernel when enable PCI + device failed + * Fixed sysfs API mismatch for building against SLES11 SP2 beta + kernel + * Fixed scsi midlayer queuecommand API mismatch for SLES11 SP2 beta + kernel + * Fixed ctlreg write bug not checking key string and limiting bytes + to write (CR 121771) + * Fix default adapter name for the OCe15100 + * Fixed incomplete scsi messages displayed + * Fix cable pull failure on interface type 2 SLI-4 adapters + * Fix enable_bg config parameter for SLI4 + * Fixed not able to perform firmware reset through sysfs board_mode + attribute (CR 121468) + * Fixed not recovering SLI port in handling error attention with RN + bit set + * Fixed two crash cases when unsolicted ELS ECHO_CMD is received + * Fix direct connect does not come up for SLI4 FC ports + * Fixed long wait when firmware reset to a SLI port without + required privilege + * Fix request firmware support for little endian systems + * Fix driver firmware update to match new firmware image format + * Fixed SLI4 device firmware reset with SR-IOV virtual functions + * Fix SLI4 CT handling for sequences > 4K + * Fixed handling of unsolicited frames for vports + * Fixed handling of CVL for vports + * Fixed crash when aborting els IOs + * Fix up CT and oxid/rxid for unsol rcv frames + * Fixed mailbox double free panic + * Added fcf priority record selection for fcf failover + * Fixed compiler warning for putting large amount of memory on + stack + +Changes from 20110606 to 20110711 + + * Changed version number to 8.3.5.46 + * Fixed driver build not building in debugfs even if kernel + CONFIG_DEBUG_FS=y is set + * Fix issue where the FC/FCoE Async Receive CQE did not scale for + 16G FC adapters + * FLOGI payload has Multiple N_port_ID set when lpfc_enable_npiv + clear (CR 119918) + * Removed unused argument passed to the lpfc_bsg_diag_mode_enter + routine + * Fixed new ASIC device-mgmt BSG pass-through failed multi-buffer + fw download (CR 120183) + * Add sysfs logging when writeable parameters are changed (CR + 120132) + * Used PCI config space capability for sysfs get PF supported SR- + IOV number of VFs + * Added wait for SLI port status register for readyness following + firmware reset + * iDiag added SLI4 device PCI BAR memory mapped reigster access + methods (CR 114450) + * Consolidated duplicating macro definitions + * Fixed failure to enable PCIe AER prevent driver initialize and + attach to device (CR 119658) + * Modified log messages for T10-PI processing + * iDiag fixed mailbox capture and dump overlapping bitmap macro + definition + +Changes from 20110519 to 20110606 + + * Changed version number to 8.3.5.44_x1 + * iDiag extended debugfs setting up inline mailbox capture and dump + capability + * Fix bug in BlockGuard error path handling + * Fix EEH recovery so state is saved after every PCI reset for SLI4 + * Fix FC Port swap on SLI3 adapters (CR 118947) + * Fixed Virtual link loss during Face plate failover test (CR + 118954) + * Relaxed the driver enforcement of extended mailbox command with + 2KB limitation (CR 118899) + * Fix vpi initialization in lpfc_init_vfi + * Turn on MSI-X by default + * Resolved T10-PI implementation issues + * Fixed failure to show 16 Gbit from FC host supported_speeds sysfs + entry (CR 118863) + * Added setting specific pf_number in GET_PROFILE_CONFIG to get + maximum virtfn (CR 118717) + * Added driver capability of resetting new ASIC firmware and device + from sysfs entry (CR 118735) + * Add firmware upgrade code to driver + * Added capability of inducing New ASIC firmware dump obj file to + flash filesystem (CR 118716) + * Added 100ms delay before driver action following IF_TYPE_2 + function reset + * Added Linux driver SR-IOV management support for both SLI3 and + SLI4 devices + * Fragment ELS and SCSI SGE lists based on Extent regions + * Remove GET_ALLOCATED extent count response work-around (CR + 117595) + * Fixed potential missed SLI4 device initialization failure + conditions + * Fixed build warning on SLES11-SP1 + * Fixed Back to back Flogis sent without a logo (CR 118272) + * Add model names for new hardware + * Fix bug with lpfc driver not updating the wwnn and wwpn after a + name change (CR 117517) + * New WWNs written successfully + * WWNs restored successfully + * Set the maximum SGE size to 0x80000000 when HBA does not have any + restriction + * Added New ASIC device link diagonstic test and link diagnostic + loopback test support (CR 117009) + * Fixed mix-and-match backward compability with the new ASIC + device management (CR 117009) + * Fix SLI3 and non-NPIV crashes with new extent code + * Fixed BSG failure in handing pass-through mailbox with multiple + external buffers (CR 117009) + * Fix CT command never completing on Big Endian system + * Refactor lpfc_sli4_alloc_extent some more + * Restore SLI4_PARAMETER mailbox can fail as nonerror functionality + * Do not post RPI Headers to SLI4 port that support extents + * Fixed a race condition where the driver may invalidate an + RPI of an incoming PLOGI before responding to it (CR 117746) + * Refactor code in lpfc_sli4_alloc_extent + * Restore removed branch + * Fix port capabilities and get parameters mailbox calls + * Fix SLI2 crashes with new extent code + * Fixed EEH failure on PPC-P7 due to the platform required PCI + fundamental reset (CR 116845) + * Fixed incorrect size set into the sysfs binary file access + interface (CR 116845) + * Brought debugfs accessing New ASIC extents information into the + iDiag framework + * Fix mailbox processing to not overwrite mailbox status codes + * Changed enumerate members starting from none zero value (CR + 117009) + * Remove driver workaround for COMMON_ALLOC_RESOURCE_EXTENTS + endianess issue + * Fix lpfc_printf_log message numbers + * Implement debugfs support for resource extents + * iDiag method for read write bitset bitclear access to new ASIC + control registers (CR 114450) + * Call correct mailbox cleanup routine after extents are allocated + * Implement extent block list member cleanup and free memory + resources + * Fix memory leak in extent block lists + * Modified variables for XRIs to be unsigned variable + +Changes from 20110412 to 20110415 + + * Changed version number to 8.3.5.41_x1 + * Fix crash in rpi clean when driver load fails + * iDiag method for dumping mailbox command from SLI4 issue + mailbox command routine (CR 114450) + * Implement support for nonembedded Extent mailbox IOCTLs. + * Update package README files to reflect first Phase4 kernel + and architecture support. + * Fixed race condition between driver multi-buffer seesion + reset and bsg job done. + * Fix Port Error detected during POST. + * Added protection on new ASIC specific mbox cmd passed to + other ASIC interface type. + * Added protection on non-embedded mailbox command for number + of external buffers. + * Implement extent block list member cleanup and free memory + resources. + * Fix memory leak in extent block lists. + * Modified variables for XRIs to be unsigned variable. + * Fix crash in rpi clean when driver load fails. + * Fixed race condition between driver multi-buffer seesion + reset and bsg job done (CR 116491) + +Changes from 20110329 to 20110412 + + * Changed version number to 8.3.5.41 + * Fixed mask size for the wq_id mask + * Fixed mailbox command completion invoke BSG job_done while + holding spinlock (CR 117009) + * Fixed double byte swap on received RRQ + * Fixed no BSG data transfer size protection in mailbox command + pass-through path (CR 117007) + * Make adjustments for systems with Page Size Larger than 4k + * Fixed new ASIC mbox queue id collision with work queue id in + debugfs queue access (CR 116937) + * Fixed bug in BSG pass-through mailbox size check to non-embedded + external buffer + +Changes from 20110315 to 20110329 + + * Changed version number to 8.3.5.40 + * Fixed ISO C90 build error + * Fix FCFI incorrect on received unsolicited frames (CR 115710) + * Add LOG_ELS message to NPIV LOGO + * Fixed Vports not sending FDISC after lips (CR 116365) + * Fix bug with lpfc driver causing a system crash during driver + unload (CR 116071) + * Added user-kernel shared sli-config mailbox header for new ASIC + device management + +Changes from 20110314 to 20110315 + + * Changed version number to 8.3.5.39 + * Added iDiag driver support for debugfs SLI4 device doorbell + register access methods (CR 114450) + * Fixed driver sending FLOGI to a disconnected FCF (CR 115785) + * Fix RQ_CREATE version 1 fails (CR 115564) + * Fix FCFI incorrect on received unsolicited frames (CR 115710) + * Added iDiag driver support for debugfs SLI4 device display + host/port index in decimal (CR 114450) + * Do not limit RPI Count to a minimum of 64 + * Do not override CT field in issue_els_flogi for SLI4 IF type 2 + * Add Temporary RPI field to the ELS request WQE + * Allow SLI4 with FCOE_MODE not set for new SLI4 FC adapters + * Restrict driver to look at BAR2 or BAR4 only for if_type 0 + * Added iDiag driver support for debugfs SLI4 device queue entry + access methods (CR 114450) + +Changes from 20110301 to 20110314 + + * Changed version number to 8.3.5.38 + * Update copyright date for all changed files + * Merge from upstream: block target when port queuing + limit is hit + * Merge from upstream: force retry in queuecommand when port is + transitioning + * Fix bug with incorrect BLS Response to BLS Abort (CR 114886) + +Changes from 20110215 to 20110301 + + * Changed version number to 8.3.5.37 + * Fixed crash when mailbox commands timeout through BSG (CR + 115249) + * Modified existing driver code to handle PCI Link drop detection + failure (CR 110174) + * Fixed issue when BDEs cross 4K boundary + * Fixed an issue where OneConnect UCNA on PPC architecture was + unable to login into Fabric + * Fix debugfs build error caused by undefined macro + * Add selective reset jump table entry + +Changes from 20110131 to 20110215 + + * Changed version number to 8.3.5.36 + * Add new Queue create Mailbox versions for new ASIC + * Place LPFC driver module parameters to + /sys/module/<driver_name>/parameters (CR 26210 114646) + * Performance Hints support + * Add new driver interfaces for encryption products (CR 114577) + * iDiag driver support debugfs queue information get (CR 114450) + * iDiag driver support debugfs PCI config space register bits + set/clear methods (CR 114450) + * iDiag driver support debugfs framework and read/write PCI config + space registers (CR 114450) + +Changes from 20110118 to 20110131 + + * Changed version number to 8.3.5.35 + * Defined the right macro in the GNUmakefile for enabling lpfc + driver debugfs + * Fix rrq cleanup for vport delete (CR 113341) + * Configuration parameter lpfc_suppress_link_up is ignored for + SLI-4 + * LOGO completion routine must invalidate both RPI and D_ID (CR + 113738) + * Do not take lock when clearing rrq active (CR 113252) + * Save IRQ level when taking host_lock in findnode_did + * Fixed hang in lpfc_get_scsi_buf_s4 (CR 113960) + * Fix xri lookup for received rrq + * Fixed missed setting of RRQ active for target aborted IOs + * Modified lpfc_delay_discovery implementation + +Changes from 20110107 to 20110118 + + * Changed version number to 8.3.5.34 + * Fixed fdisc sent with invalid VPI (CR 111517) + * Print something out if the link_speed is not supported by this + adapter (CR 86577) + * Fixed UE error reported by OneConnect UCNA BE2 hba with + f/w 2.702.542.0 during reboot (CR 113607) + +Changes from 20101222 to 20110107 + + * Changed version number to 8.3.5.33 + * Added support for clean address bit (CR 113373) + * Fixed XRI reuse issue (CR 111518) + +Changes from 20101216 to 20101222 + + * Changed version number to 8.3.5.32 + * Unreg login when PLOGI received from logged in port (CR 113197) + * Fixed crashes for NULL vport dereference (CR 112630) + +Changes from 20101214 to 20101216 + + * Changed version number to 8.3.5.31 + * Fix for kmalloc failures in lpfc_workq_post_event (CR 112404) + * Adjust payload_length and request_length for sli4_config mailbox + commands + +Changes from 20101129 to 20101214 + + * Changed version number to 8.3.5.30 + * Turned parity and serr bits back on after performing sli4 board + reset PCI access + * Use VPI for ALL ELS commands and allocate RPIs at node creation + * Correct bit-definitions in SLI4 data structures + +Changes from 20101118 to 20101129 + + * Changed version number to 8.3.5.29 + * Implement new SLI4 initialization procedures based on if_type + * Implement the FC and SLI async event handlers + * Fixed management command context setting + * Fixed panic in the __lpfc_sli_get_sglq + +Changes from 20101111 to 20101118 + + * Changed version number to 8.3.5.28 + * Set heartbeat timer off by default + * Added support for ELS RRQ command (CR 110683) + * Init VFI and VPI for the physical port (Lancer SLI4 FC Support) + +Changes from 20101103 to 20101111 + + * Changed version number to 8.3.5.27 + * Fixed a NULL pointer dereference duing memory allocation failure + (CR 111997) + * Modified the return status of unsupport ELS commands + * Implement READ_TOPOLOGY mailbox command and add new speed definition + * Implement doorbell register changes for new hardware support + * Fix bug with remote SLI4 firmware download data not being + transmitted (CR 110883) + * Implement the new SLI 4 SLI_INTF register definitions + * Added PCI ID definitions for new hardware support + * Fix for failure to log into FDMI_DID after link bounce (CR + 110885) + * Updated driver to handle CVL after Nameserver PLOGI timeouts (CR + 111516) + * Prep for fc host dev loss tmo support + * Cleanup mailbox commands in mboxq_cmpl when CVL is received (CR + 111515) + * Add new SLI4 WQE support + +Changes from 20101006 to 20101103 + + * Changed version number to 8.3.5.26 + * Prevent lock_irqsave from being called twice in a row + * Fix locking issue for security mailbox commands + * Fix regression error for handling SLI4 unsolicted ELS. Read + Timeout Value (RTV) (CR 110683) + * Properly handle devloss timeout during various phases of FIP + engine state transactions (CR 109955) + * Abort all I/Os and wait XRI exchange busy complete before + function reset ioctl in SLI4 driver unload (CR 110980) + * Fix regression error for handling ECHO response support + (CR 110683) + * Fix regression error for handling SLI4 unsolicted ELS. Read + Link Error Status Block (RLS) (CR 110683) + * Fix internal loopback causing kernel panic (CR 110438) + * Fixed crashes for NULL pnode dereference (CR 111422) + +Changes from 20100928 to 20101006 + + * Changed version number to 8.3.5.25 + * Fixed a race condition that can cause driver to send FDISC to + un-initialized VPI (CR 111250) + * Fix mailbox handling for UNREG_RPI_ALL case (CR 111249) + +Changes from 20100910 to 20100928 + + * Changed version number to 8.3.5.24 + * Used PCI function reset ioctl mbox command to clean up CNA during + driver unload (CR 110521) + * Fixed crashes for BUG_ONs hit in the lpfc_abort_handler (CR + 110994) + * Fail I/O when adapter detects a lost frame and target reports a + check condition (CR 110825) + * Fixed abort WQEs for FIP frames + +Changes from 20100901 to 20100910 + + * Changed version number to 8.3.5.23 + * Added unreg all rpi mailbox command before unreg vpi + * Make all error values negative (CR 99986) + * Clean up duplicate code in lpfc_els_retry + * Fixed circular spinlock dependency between low-level driver and + SCSI midlayer (CR 110305) + +Changes from 20100824 to 20100901 + + * Changed version number to 8.3.5.22 + * Fixed FC-AL bit set issue in FLOGI rejected by FC swich in NPV + setup (CR 110237) + * Fixed cases of skipping possible roundrobin failover of multiple + eligible FCFs (CR 110048) + * Fixed driver not able to unregister unused FCF upon devloss + timeout to all nodes (CR 109954) + * Fix bug with external loopback testing not becoming ready (CR + 109853) + * Fixed heartbeat timeout during controller pause test + +Changes from 20100811 to 20100824 + + * Changed version number to 8.3.5.21 + * Added support for OneSecure firmware download authentication and + SLI authentication (CR 106981) + * Add support for bsg MBX_SLI4_CONFIG (CR 107295) + * Remove owner field from attribute initialization + * Replaced some spin_lock_irqsave with spin_lock_irq + * Fixed lpfc_initial_flogi not returning failure in one failure + condition + * Fixed stray state update in case a new FCF matched in-use FCF + * Treated firmware matching FCF property with different index as + error condition + +Changes from 20100730 to 20100811 + + * Changed version number to 8.3.5.20 + * Move Unload flag earlier in vport delete (CR 103830) + * Fix for IOCB leak on FDISC completion (CR 106902) + * Start looking at the return code for fc_block_scsi_eh + * Call the lpfc_drain_txq only for SLI4 hba (CR 105946) + +Changes from 20100721 to 20100730 + + * Changed version number to 8.3.5.19 + * Remove whitespace at end of line + * Change log message 0318 from an error to a warning as it is not + an error + * Add Security Crypto support to CONFIG_PORT mailbox command + * Switch call from memcpy_toio to __write32_copy to prevent + unaligned 64 bit copy + * Fix nodelist not empty message on console after rmmod (CR 105401) + * Fix failure to roundrobin on all available FCFs when FLOGI to + in-use FCF rejected by switch (CR 105402) + * Fixed heartbeat timeout during fabric reconfiguration + +Changes from 20100709 to 20100721 + + * Changed version number to 8.3.5.18 + * Fixed failure to recover from back-to-back Clear Virtual Link + with a single FCF (CR 104974) + * Clear VFI_REGISTERED flag when UNREG_VFI completes + * Ignore the failure of REG_VPI mailbox with UPD bit set on older + set on older OneConnect firmware (CR 104912) + * Use active ndlp for ct response after cable swap (CR 104032) + * Properly set ndlp after cable swap (CR 104706) + * Add support for UPD bit of REG_VPI mailbox command (CR + 104912) + * Properly send ct response data with sli4 (CR 103765) + +Changes from 20100707 to 20100709 + + * Changed version number to 8.3.5.17 + * Fix endian conversion for BlockGuard in IOCB response + * Fix driver discovery issue where driver is unable to + discover a target after Eveready back link bounce test (CR + 104654) + +Changes from 20100625 to 20100707 + + * Changed version number to 8.3.5.16 + * Fixed BlockGuard endian conversion problem for supporting PowerPC + EEH + * Fixed VLAN ID 0xFFF set to reg_fcfi mailbox command on FCF empty + FCF VLAN bitmap (CR 104503) + +Changes from 20100608 to 20100625 + + * Changed version number to 8.3.5.15 + * Fix race condition causing FLOGI issued from dual processes + (CR 104288) + * Fix bug with ct response data not being sent with sli4 (CR + 103765) + * Fix RoundRobin FCF failover due to mis-interpretation of kernel + find_next_bit (CR 103435) + * Enhance round-robin FCF failover algorithm to re-start on new + FCF async event + * Clear Ignore Reg Login Flag when purging mailbox queue (CR + 104131) + * Fix for ELS commands stuck on txq (CR 103535) + * Add target queuedepth module parameter + * Fix bug with unsolicited CT event command not setting a flag (CR + 103648) + +Changes from 20100511 to 20100608 + + * Changed version number to 8.3.5.14 + * Fix LPFC_READ_REV mailbox command for BE + * Added driver Advanced Error Reporting (AER) support for SLI4 + devices + * Fixed FCF discovery failure after swapping FCoE port on + Brocade8000 FCoE switch (CR 102947) + * Fix RPI leak after a VFC shut/noshut storm (CR 102507) + * Allow enabling MSI-X interrupt with fewer vectors than requested + on SLI4 device + * Removed NOOP condition checking logic + * Prevent point to point discovery on a FCoE UCNA (CR 102844) + * Prevent unregistring of unused FCF when FLOGI is pending (CR + 102642) + * Restrict scsi buffer allocation (CR 102643) + * Fixed accounting of allocated SCSI buffers when post sgl fail (CR + 102644) + +Changes from 20100504 to 20100511 + + * Changed version number to 8.3.5.13 + * Remove module ref counting for active bsg requests + * Add PCI-ID for Gb PCIe Shared I/O Fibre Channel Adapter + * Only send ADISCs to Targets (CR 101933) + * Fix memory leak (CR 102354) + * Add support for SLER errors + * Fix SLI4 RPI leaks (CR 102369) + * Change default FCF max receive size (CR 102087) + * Remove unused Interrupt Notification Block Code + * Fix lpfc_sli_issue_wait call to put iocb on txcmpq that should + have been removed + * Fixed Illegal State Transition (CR 10202) + * Additional check-in for high vport fix (CR 99636) + +Changes from 20100429 to 20100504 + + * Changed version number to 8.3.5.12 + * Check whether index is within bounds before testing vport + * Change positive error return into negative + * Fixed Illegal State Transition after devloss tmo (CR 102025) + * Fixed Illegal State Transition (CR 10202) + * Fixed heartbeat timeout failure (CR 10142) + +Changes from 20100427 to 20100429 + + * Changed version number to 8.3.5.11 + * Fix bug with dma maps on nested scsi objects (CR 101174, 101425) + +Changes from 20100407 to 20100427 + + * Changed version number to 8.3.5.10 + * Add module ref counting to prevent module removal with active bsg + requests (CR 101863) + * Fix swapping target's FC ports caused I/O failure due to target + rejecting PLOGI (CR 100464) + * Reduce hba_queue_depth of SLI4 HBAs to account for XRIs reserved + for ELS commands + * Fixed hang on hba resets (CR 100032) + +Changes from 20100311 to 20100407 + + * Changed version number to 8.3.5.9 + * Fix for lpfc_get_hba_info to return correct value for max vpi + count + * Fix misspelled variable name in BSG interface API + * Add missing management support for MBX_READ_EVENT_LOG + * Check if mailbox command succeeded before copying data + * Make sure driver sets the "FCP Initiator" flag for remote + initiator ports (CR 100554) + * Add checks for lpfc_mbuf_alloc failure and sli4 dump zero bde + size + * Reset HBA when UNREG_VPI fail with busy status (CR 100218) + * Prevent failure of unreg_vpi mailbox command (CR 100219) + * Add additional warning message and refuse a mailbox command if + mgmt is blocked + * Add support for missing BSG mailbox commands dump, update cfg + and event log status + * Fix management regression bug with BSG BIU mailbox command not + copying application data + * Fixed hang on link transitions (CR 99675) + * Change mailbox commands to use SLI4 mailbox size instead of + PAGE_SIZE + * Fix UE and other issues seen after HBA resets with I/O (CR 97910) + * Fix crash with els reject data being formed (CR 99991) + * Rework the logical link speed (CR 99148 99149) + * Add ratov and edtov to the reg_vfi + * Enable required NPIV by default + * Fix crash while doing IO to a hornet + * Fix functionality regression for new PDE syle for BlockGuard + * Prevent log message 1801 during vport delete + * Fix functionality regression logical link speed event (CR 99148 + 99149) + * Fix hang on ia64 systems + +Changes from 20100226 to 20100311 + + * Changed version number to 8.3.5.7 + * Fix bug with copy in extended data to the extended buffer + * Add support for extended mailbox commands + * Enable NPIV by default + * Separate logging New FCF Found and FCF Parameter Modified + messages + * Fix devloss timeout running Hazard with AER error injection every + 30-60 seconds (CR 97468) + * Fix driver issue where the driver is unable to discover the + paths of luns on physical port of legacy adapter after uplink + failover on VC-FC module (CR 99120) + * Fix sli4 params read + * Add support for PCI loopback testing + * Made the FCF related informational log message only logged to + FCoE devices + * Made fast FCF failover randomly choose from eligible FCF records + with equal probability + * Fix Big Endian sli4 support + +Changes from 20100212 to 20100226 + + * Changed version number to 8.3.5.6 + * Added round robin FCF failover on initial or FCF rediscovery + FLOGI failure + * Fixed issue where the driver was unable to discover the paths of + vport luns on HBAs after uplink failover (CR 99070) + * Changed magic numbers to manifests for suppress_link_up param + * Remove unused variable from lpfc_nodev_tmo_show + * Added CEE bridge chip management support + * LPSe12002-ML1-E EmulexSecure Fibre Channel Adapter changes for + the encryption IOCB special handling + * Add logging messages for critical errors (CR 98726) + * Fixed not clearing LPFC_DRIVER_ABORTED bit in aborted FCP + cmdiocb->iocb_flag + * Removed fast FCF failover fabric name matching to allow failover + to any fit FCF + * Fixed SGL leak when XB bit is set (CR 98736) + * Added fast FCF failover in response to FCF DEAD event on current + FCF record + * Made driver SLI4 reporting ulpWord[4] status on aborted iocb same + as SLI3 (CR 98963) + * Fixed memory allocation failure in the read_sparam mailbox + handling (CR 98738) + * Fixed memory allocation issue in lpfc_config_port_post (CR 98739) + * Fixed vport discovery failure after CVL handling + * Fixed NULL pointer dereference when kernel is out of memory (CR + 98587) + +Changes from 20100202 to 20100212 + + * Changed version number to 8.3.5.5 + * Relax Event Queue field checking to allow non-zero minor codes + * Complete fix for vport->fc_flag set outside of shost host_lock + * Implement the PORT_CAPABITIES mailbox command + * Use asynchronous mailbox command for requesting FCF recovery + * LPSe12002-ML1-E EmulexSecure Fibre Channel Adapter changes for + delayed link up and fix a missing jump table initialization + * Fixed tab indent problem from previous commit + * Corrected comment to reflect lpfc_suppress_link_up instead of + lpfc_init_link + * Add Fast FCF failover support + * Change sysfs parameter macros to be uint instead of int (CR + 98283) + * Add support for new SLI features + * Added init_vpi mailbox command before re-registering VPI + * Add support for PCI BAR region 0 if BAR0 is a 64 bit register + * Call pci_save_state() after pci_restore_state() call to cope with + kernel change + +Changes from 20100112 to 20100202 + + * Changed version number to 8.3.5.4 + * Remove DHCHAP support + r7328) + * Fix bug with bsg issue mbox not handling a busy mailbox status + * Submit abort WQE to same work queue as the command WQE + * Add missing function comments, fix comment errors + * Add missing functionality LPFC_BSG_VENDOR_GET_MGMT_REV + * Add missing functionality LPFC_BSG_VENDOR_MBOX + * Add timeout handler for EVT jobs, goes with r7236 + * Add missing functionality LPFC_BSG_VENDOR_DIAG_MODE, + LPFC_BSG_VENDOR_DIAG_TEST + * Fix bug with mgmt response restting cmdiocbq for checkin r7235 + * Add missing functionality LPFC_BSG_VENDOR_SEND_MGMT_RESP + * Modify FC_BSG_RPT_ELS to operate asynchronously + * Fix bug with kzmalloc being called with ct_ev_lock held + * Falcon changes for IOCB command define values + * Modify LPFC_BSG_VENDOR_SET_CT_EVENT adding driver private data + * Modify FC_BSG_RPT_CT to operate asynchronously and handle a + timeout condition + * Update code infrastructure to add support for missing SGIO + functionality + * Do not check class specific parameters for FLOGI (CR 97788) + +Changes from 20091221 to 20100112 + + * Changed version number to 8.3.5.3 + * Merge change from upstream scsi-misc-2.6 git tree to move blocking + scsi_eh code from FC LLDs to SCSI FC transport class. + * Clean up ELS commands when unregistering unused ELS commands (CR + 97048) + * Fix an issue in SLI4 when aborted els cmd's xri (SGL) was released + before HBA's abort XRI event (CR 97288) + * Support for Nport ID change after Clear Virtual Link (CR 97188) + * Fixed unsolicited CT command response on SLI4 + * Add QOS Link Speed info (CR 96665) + +Changes from 20091023 to 20091221 + + * Changed version number to 8.3.5.2 + * Fix Dead FCF not triggering discovery of other FCF (CR 97048 + 97049) + * Fix vport->fc_flag set outside of lock caused I/O failure + during pci-func reset (CR 96975) + * Fix driver tries to process failed read fcf record + * Fixed hbq buff adds to receive queue + * Fixed hbq buff only for sli4 + * Fix multi-frame sequence response frames go to wrong DID (CR + 96915) + * Fixed fc header seq_count checks (CR 97010) + * Fixed adapter reset and offline/online stress test failing with + I/O errors (CR 92904) + * Fix vport fails to register VPI after devloss timeout (CR 96740) + * Blocked all SCSI I/O requests from midlayer until target + rediscovery during EEH (CR 95889) + * Fix PCI device vendor for EmulexSecure HBA + * Fixed in-band remote firmware download (CR 96090) + * Made OneCommand CNA set up and use single FCP EQ only under + INTx interrupt mode (CR 96645) + * Fix vport not logging out when being deleted (CR 96339) + * Add new READ_FCF_RECORD failure code + * Remove temporary OneConnect CNA PCI Device IDs + * Fixed panic when unmapping luns (CR 94833) + * Added handling of ELS request for Reinstate Recovery Qualifier + (RRQ) + * Phase out use of ONLINE registers + * Made ABTS WQE go to the same WQ as the WQE to be aborted (CR + 96110) + * Add new READ_FCF_RECORD failure code + * Fixed Dead FCoE port after creating vports (CR 95449) + * Fix crash driver when fcauthd is started (CR 95584) + * Fix hbq pointer corruption (CR 95313) + * Fixed: panic during pci-hot-plug testing (CR 95246) + * Fix reg_vfi and reg_vpi routines to use little endian wwn + * Change default TC BE3 driver identification string to + include model number + * Migrate LUN queue depth ramp up code to scsi mid-layer + * Fix for lost MSI interrupt (CR 95404) + * Fix vport keep-alive does not contain the correct WWN + * Ported the fix on reporting of max_vpi to uppper layer + * Added PCI read after EQarm doorbell PCI write in INTx mode to + flush PCI pipeline + * Fix driver unable to discover targets after FCF DEAD + +Changes from 8.3.5 to 20091023 + + * Changed version number to 8.3.5.1 + * Fixed: lpfc_unreg_vfi failure after devloss timeout. Fixed RPI + bit leak (CR 94542) + * Fix crash due to list corruption while unloading driver (CR + 94889) + * Fix Zeroed frame on wire after FLOGI (CR 94950) + * Fix Vport does not rediscover after FCF goes away + * Fixed the call from lpfc_new_scsi_buf_s3 to use + lpfc_release_scsi_buf_s3 + * Fix memory leak found in lpfc_sli4_read_rev + * Fixed total_scsi_bufs counting + * Fix CVL received on Port 1 not processed by driver + * Fixed locking issue + * Added PCI ID for LPSe12002-ML1-E EmulexSecure Fibre Channel Adapter + * Add DHCHAP support + * Stop and abort all I/Os on HBA for AER uncorrectable non-fatal + error handling + * Made AER sysfs entry point return "Operation not permitted" to + OneConnect HBAs + * Removed the use of the locally defined FC transport layer related + macros + * SLI4_FIXED: Fixed Check for aborted els command + * Fix bug with missing new line characters in log messages (CR + 94092) + * Fix bug with probe_one routines not putting the Scsi_Host back + upon error (CR 94088) + * Fixed discovery failure during quick link bounce + * Add support for Clear Virtual Link + * Handling unsolicited CT exchange sequence abort (lower level + driver part) + * Fix switch name not used in the FCF record for FCoE HBAs + * SLI4: Enabled SLI4 HBA UE error polling error-condition action + code + * Fix Vports unable to be created on Port2 Tigershark HBA (CR + 90670) + * Implemented persistent port disable functionality + * Rewrite lpfc_sli4_scmd_to_wqidx_distr() to handle counter + rollover cleanly + * Added new state in link_state sysfs attribute for persistent link + down + * Fix for firmware dump failure (CR 90533) + * Use PCI config space register to determine SLI rev of the HBA + * Fixed mailbox timeout during HBA reset + * Modified resume_rpi mailbox data structure according to new spec + * Remove always true conditional in lpfc_sli_read_serdes_param() + * SLI4: Do not issue mailbox command when LPFC_HBA_ERROR with + MBX_POLL mode + * Fix accumulated total length not being filled in on SLI4 + unsolicited IOCBs + * Fixed SLI3 inband remote mamagement (CR 91042) + * Fix FCoE Paramers in region 23 not being read correctly + * Wait for HBA POST completion before checking Online and UE + registers (SLI4) + * Fixed static vport creation on SLI4 HBAs + * Fixed vport create not to send init_vpi before REG_VFI + * Remove cast when using pci_read_config_dword() to access + LPFC_SLIREV_CONF_WORD + * Fix use of first_iocbq pointer before checking for NULL + +Changes from 8.3.4 to 8.3.5 + + * Update driver version to 8.3.5 + * Fixed panic during HBA reset. + * Fixed FCoE event tag passed in resume_rpi. + * Fix out of order ELS commands + * Fix UNREG_VPI failure on extended link pull + * Fixed crash while processing unsolicited FC frames. + * Clear retry count in the delayed ELS handler + * Fixed discovery failure during VLAN and quick link bounce. + * Fix mask size for CT field in WQE + * Fix VPI base not used when unregistering VPI on port 1. + * Fix UNREG_VPI mailbox command to unreg the correct VPI + * Fixed Check for aborted els command + * Fix error when trying to load driver with wrong firmware on FCoE HBA. + * Fix bug with probe_one routines not putting the Scsi_Host back upon + error + * Add support for Clear Virtual Link Async Events + * Add support for unsolicited CT exchange sequence abort + * Add 0x0714 OCeXXXXX PCI ID + * Fix crash when "error" is echoed to board_mode sysfs parameter + * Fix FCoE Parameter parsing in regions 23 + * Fix driver crash when creating vport with large number of targets on + SLI4 + * Fix bug with npiv message being logged when it is not supported by + the adapter + * Fix a potential dereferencing mailbox structure after free bug + * Fix firmware crash after vport create with high target count + * Error out requests to set board_mode to warm restart via sysfs on + SLI4 HBAs + * Fix a memory corruption issue during GID_FT IO prep + * Fix failed to allocate XRI message is not a critical failure + * Update and fix formatting in some log messages + * Removed the use of the locally defined FC transport layer related + macros + * Check the rsplen in lpfc_handle_fcp_err function before using rsplen + * Add AER support. + * Fixed Panic/Hang when using polling mode for fcp commands + * Added support for Read_rev mbox bits indicating FIP mode of HBA + * Optimize performance of slow-path handling of els responses + * Add code to cleanup orphaned unsolicited receive sequences + * Fixed Devloss timeout when multiple initiators are in same zone + +Changes from 8.3.3 to 8.3.4 + * Update driver version to 8.3.4 + * Remove spaces before newlines in several log messages + * Add bsg (SGIOv4) support for ELS/CT support + * NPIV vport fixes + * Fix a pair of FCoE issues + * Various SLI3 fixes + * Consistently Implement persistent port disable + * Various SLI4 fixes + + diff --git a/drivers/scsi/lpfc/GNUmakefile b/drivers/scsi/lpfc/GNUmakefile new file mode 100644 index 000000000000..afbd23dcf4cf --- /dev/null +++ b/drivers/scsi/lpfc/GNUmakefile @@ -0,0 +1,95 @@ +#/******************************************************************* +# * This file is part of the Emulex Linux Device Driver for * +# * Fibre Channel Host Bus Adapters. * +# * Copyright (C) 2020 Broadcom. All Rights Reserved. The term * +# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * +# * Copyright (C) 2004-2009 Emulex. All rights reserved. * +# * EMULEX and SLI are trademarks of Emulex. * +# * www.broadcom.com * +# * * +# * This program is free software; you can redistribute it and/or * +# * modify it under the terms of version 2 of the GNU General * +# * Public License as published by the Free Software Foundation. * +# * This program is distributed in the hope that it will be useful. * +# * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * +# * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * +# * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * +# * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * +# * TO BE LEGALLY INVALID. See the GNU General Public License for * +# * more details, a copy of which can be found in the file COPYING * +# * included with this package. * +# *******************************************************************/ +###################################################################### + +KERNELVERSION ?= $(shell uname -r) +BASEINCLUDE ?= /lib/modules/$(KERNELVERSION)/build +DRIVERDIR ?= $(PWD) +DEBUG_FS = $(shell grep CONFIG_DEBUG_FS=y $(BASEINCLUDE)/.config) + +ifeq ($(DEBUG_FS),CONFIG_DEBUG_FS=y) + EXTRA_CFLAGS += -DCONFIG_SCSI_LPFC_DEBUG_FS + export EXTRA_CFLAGS +endif + +ifneq ($(GCOV),) + EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage + EXTRA_CFLAGS += -O0 + export EXTRA_CFLAGS +endif + +clean-files += Modules.symvers Module.symvers Module.markers modules.order + +export clean-files + +$(info GNUmakefile setting EXTRA_CFLAGS to $(EXTRA_CFLAGS)) +default: + $(MAKE) -C $(BASEINCLUDE) M=$(DRIVERDIR) CONFIG_SCSI_LPFC=m modules + +brcmfcoe: + @cp lpfc_attr.c brcmfcoe_attr.c + @cp lpfc_bsg.c brcmfcoe_bsg.c + @cp lpfc_ct.c brcmfcoe_ct.c + @cp lpfc_debugfs.c brcmfcoe_debugfs.c + @cp lpfc_els.c brcmfcoe_els.c + @cp lpfc_hbadisc.c brcmfcoe_hbadisc.c + @cp lpfc_init.c brcmfcoe_init.c + @cp lpfc_mbox.c brcmfcoe_mbox.c + @cp lpfc_mem.c brcmfcoe_mem.c + @cp lpfc_nportdisc.c brcmfcoe_nportdisc.c + @cp lpfc_scsi.c brcmfcoe_scsi.c + @cp lpfc_sli.c brcmfcoe_sli.c + @cp lpfc_vport.c brcmfcoe_vport.c + @cp lpfc_nvme.c brcmfcoe_nvme.c + @cp lpfc_nvmet.c brcmfcoe_nvmet.c + $(MAKE) -C $(BASEINCLUDE) M=$(DRIVERDIR) CONFIG_SCSI_BRCMFCOE=m BUILD_FLAGS=-DBUILD_BRCMFCOE modules + @rm -f brcmfcoe_attr.c + @rm -f brcmfcoe_bsg.c + @rm -f brcmfcoe_ct.c + @rm -f brcmfcoe_debugfs.c + @rm -f brcmfcoe_els.c + @rm -f brcmfcoe_hbadisc.c + @rm -f brcmfcoe_init.c + @rm -f brcmfcoe_mbox.c + @rm -f brcmfcoe_mem.c + @rm -f brcmfcoe_nportdisc.c + @rm -f brcmfcoe_scsi.c + @rm -f brcmfcoe_sli.c + @rm -f brcmfcoe_vport.c + @rm -f brcmfcoe_nvme.c + @rm -f brcmfcoe_nvmet.c + + +install: + @rm -f /lib/modules/$(KERNELVERSION)/kernel/drivers/scsi/lpfc.ko + install -d /lib/modules/$(KERNELVERSION)/kernel/drivers/scsi/lpfc + install -c lpfc.ko /lib/modules/$(KERNELVERSION)/kernel/drivers/scsi/lpfc + depmod -a + +brcmfcoeinstall: + @rm -f /lib/modules/$(KERNELVERSION)/kernel/drivers/scsi/brcmfcoe.ko + install -d /lib/modules/$(KERNELVERSION)/kernel/drivers/scsi/brcmfcoe + install -c brcmfcoe.ko /lib/modules/$(KERNELVERSION)/kernel/drivers/scsi/brcmfcoe + depmod -a + +clean: + $(MAKE) -C $(BASEINCLUDE) M=$(DRIVERDIR) CONFIG_SCSI_LPFC=m clean diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 092a971d066b..54e30da2b917 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,8 +1,8 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * -# * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * -# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * +# * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * +# * Broadcom refers to Broadcom Inc. and/or its subsidiaries. * # * Copyright (C) 2004-2012 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * # * www.broadcom.com * @@ -21,16 +21,116 @@ # *******************************************************************/ ###################################################################### -ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage -ccflags-$(GCOV) += -O0 +SUBLEVEL = $(shell echo ${KERNELVERSION} | \ +sed -e 's/[0-9]*.[0-9]*.\([0-9]*\).[0-9]*.*/\1/') +EXTRAVERSION = $(shell echo ${KERNELRELEASE} | \ +sed -e 's/[0-9]*.[0-9]*.[0-9]*.\([0-9]*\).*/\1/') +MAJORVERSION = $(shell echo ${KERNELVERSION} | \ +sed -e 's/\([0-9]*\).*/\1/') +MINORVERSION = $(shell echo ${KERNELVERSION} | \ +sed -e 's/[0-9]*.\([0-9]*\).*/\1/') -ifdef WARNINGS_BECOME_ERRORS -ccflags-y += -Werror +EXTRA_CFLAGS += -Werror -DKERNEL_MAJOR=${MAJORVERSION} -DKERNEL_MINOR=${MINORVERSION} + +ifneq ($(GCOV),) + EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage + EXTRA_CFLAGS += -O0 endif +# +# DISTRO KERNELVERSION SUBLEVEL EXTRAVERSION +# sles15-sp3 5.3.18-37 18 37 +# sles15-sp2 5.3.18-22 18 22 +# sles15-sp1 4.12.14-195 14 195 +# sles15-sp0 4.12.14-23 14 23 +# sles12-sp5 4.12.14-100 14 100 +# sles12-sp4 4.12.14-94.4 14 94 +# rhel8.4 4.18.0-259.el8 Alpha-1.0 +# rhel8.3 4.18.0-240.el8 +# rhel8.2 4.18.0-193.el8 +# rhel8.1 4.18.0-147.el8 +# rhel8.0 4.18.0-80.el8 +# + +ifeq (,$(findstring -DBUILD_BRCMFCOE,$(BUILD_FLAGS))) +ifeq ("$(wildcard /etc/debian_version)","") + EXTRA_CFLAGS += -DBUILD_NVME +endif +endif + +# This will pick out a Citrix kernel, xs5.6 / xs6.0 / xs6.1 +EXTRAXSINFO = $(shell echo ${KERNELVERSION} | \ +grep 'xs[156].[016]' | sed -e 's/.*xs.*/xs/') + +ifeq ($(EXTRAXSINFO),xs) + EXTRA_CFLAGS += -DBUILD_CITRIX_XS +endif +EXTRAXSINFO = $(shell echo ${KERNELVERSION} | \ +grep 'el8' | sed -e 's/.*el8.*/el8/') + +ifeq ($(EXTRAXSINFO),el8) + EXTRA_CFLAGS += -DBUILD_RHEL8 + RHEL8_NZ = $(shell echo "${KERNELVERSION} 4.18.0-83.el8.x86_64" | \ + tr " " "\n" | sort -V | head -n1) +ifeq ($(RHEL8_NZ), 4.18.0-83.el8.x86_64) + EXTRA_CFLAGS += -DBUILD_RHEL8_NZ +endif +ifeq ($(shell test $(EXTRAVERSION) -ge 211; echo $$?), 0) + EXTRA_CFLAGS += -DBUILD_RHEL83 +endif +# RHEL 8.4 adds NVME1+ Addendum +ifeq ($(shell test $(EXTRAVERSION) -ge 259; echo $$?), 0) + EXTRA_CFLAGS += -DBUILD_NVME_PLUS +endif +endif + +ifneq ("$(wildcard /etc/os-release)","") + SLES15SP = $(shell grep -q 'sles:15:sp' /etc/os-release && echo 1) +ifeq ($(SLES15SP),1) + EXTRA_CFLAGS += -DBUILD_SLES15SP +endif + SLES12SP5 = $(shell grep -q 'sles:12:sp5' /etc/os-release && echo 1) +ifeq ($(SLES12SP5),1) + EXTRA_CFLAGS += -DBUILD_SLES12SP5 +endif + SLES15SP2 = $(shell grep -q 'sles:15:sp2' /etc/os-release && echo 1) +ifeq ($(SLES15SP2),1) + EXTRA_CFLAGS += -DBUILD_SLES15SP2 -DBUILD_NVME_PLUS +endif + SLES15SP3 = $(shell grep -q 'sles:15:sp3' /etc/os-release && echo 1) +ifeq ($(SLES15SP3),1) + EXTRA_CFLAGS += -DBUILD_SLES15SP2 -DBUILD_NVME_PLUS +endif +endif + +# This will pick out an openEuler kernel +EXTRAOE1INFO = $(shell echo ${KERNELVERSION} | grep oe1 | \ + sed -e 's/.*oe1.*/oe1/') +ifeq ($(EXTRAOE1INFO),oe1) +ifneq ("$(wildcard /etc/os-release)","") +OE1_20 = $(shell grep -q 'VERSION="20.' /etc/os-release && echo 1) +ifeq ($(OE1_20),1) + EXTRA_CFLAGS += -DBUILD_OE1_20 +endif +endif +endif + +# This will pick out a xen kernel +EXTRAXENINFO = $(shell echo ${KERNELVERSION} | \ +grep 'xen' | sed -e 's/.*xen.*/xen/') + +EXTRA_CFLAGS += ${BUILD_FLAGS} + obj-$(CONFIG_SCSI_LPFC) := lpfc.o lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \ lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \ lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \ - lpfc_nvme.o lpfc_nvmet.o + lpfc_nvme.o lpfc_nvmet.o lpfc_auth.o + +obj-$(CONFIG_SCSI_BRCMFCOE) := brcmfcoe.o + +brcmfcoe-objs := brcmfcoe_mem.o brcmfcoe_sli.o brcmfcoe_ct.o brcmfcoe_els.o brcmfcoe_hbadisc.o \ + brcmfcoe_init.o brcmfcoe_mbox.o brcmfcoe_nportdisc.o brcmfcoe_scsi.o brcmfcoe_attr.o \ + brcmfcoe_vport.o brcmfcoe_debugfs.o brcmfcoe_bsg.o \ + brcmfcoe_nvme.o brcmfcoe_nvmet.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 691acbdcc46d..3cf85cc2995e 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -22,6 +22,7 @@ *******************************************************************/ #include <scsi/scsi_host.h> +#include <linux/version.h> #include <linux/ktime.h> #include <linux/workqueue.h> @@ -29,6 +30,15 @@ #define CONFIG_SCSI_LPFC_DEBUG_FS #endif +/* define XS6.5 and XS7.0 kernel version since + * XenServer build environment does not pass + * these values in + */ +#if defined(XS_API) +#define KERNEL_MAJOR 3 +#define KERNEL_MINOR 10 +#endif + struct lpfc_sli2_slim; #define ELX_MODEL_NAME_SIZE 80 @@ -55,6 +65,7 @@ struct lpfc_sli2_slim; #define LPFC_MAX_SG_TABLESIZE 0xffff #define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ #define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */ +#define LPFC_MAX_ED_SLI4_SEG_CNT_DIF 256 /* sg element count for ExternalDIF */ #define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */ @@ -69,6 +80,9 @@ struct lpfc_sli2_slim; #define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ #define LPFC_MIN_TGT_QDEPTH 10 #define LPFC_MAX_TGT_QDEPTH 0xFFFF +#define LPFC_MIN_LUN_QDEPTH 1 +#define LPFC_DEF_LUN_QDEPTH 64 +#define LPFC_MAX_LUN_QDEPTH 512 #define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data collection. */ @@ -114,6 +128,12 @@ struct lpfc_sli2_slim; #define LPFC_MBX_NO_WAIT 0 #define LPFC_MBX_WAIT 1 +#define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005 +#define LPFC_PORT_CFG_NAME "/cfg/port.cfg" + +#define lpfc_rangecheck(val, min, max) \ + ((uint)(val) >= (uint)(min) && (val) <= (max)) + enum lpfc_polling_flags { ENABLE_FCP_RING_POLLING = 0x1, DISABLE_FCP_RING_INT = 0x2 @@ -207,8 +227,7 @@ typedef struct lpfc_vpd { } rev; struct { #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd3 :19; /* Reserved */ - uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 :20; /* Reserved */ uint32_t rsvd2 : 3; /* Reserved */ uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ @@ -230,8 +249,7 @@ typedef struct lpfc_vpd { uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t rsvd2 : 3; /* Reserved */ - uint32_t cdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd3 :19; /* Reserved */ + uint32_t rsvd3 :20; /* Reserved */ #endif } sli3Feat; } lpfc_vpd_t; @@ -262,13 +280,15 @@ struct lpfc_stats { uint32_t elsRcvPRLI; uint32_t elsRcvLIRR; uint32_t elsRcvRLS; - uint32_t elsRcvRPS; uint32_t elsRcvRPL; uint32_t elsRcvRRQ; uint32_t elsRcvRTV; uint32_t elsRcvECHO; uint32_t elsRcvLCB; uint32_t elsRcvRDP; + uint32_t elsRcvAUTH; + uint32_t elsRcvFPIN; + uint32_t elsRcvEDC; uint32_t elsXmitFLOGI; uint32_t elsXmitFDISC; uint32_t elsXmitPLOGI; @@ -282,6 +302,8 @@ struct lpfc_stats { uint32_t elsXmitFARPR; uint32_t elsXmitACC; uint32_t elsXmitLSRJT; + uint32_t elsXmitRDF; + uint32_t elsXmitEDC; uint32_t frameRcvBcast; uint32_t frameRcvMulti; @@ -335,6 +357,52 @@ enum hba_state { LPFC_HBA_ERROR = -1 }; +/* Structure used to identify all devices controlled + * by the External DIF logic. + */ +struct lpfc_external_dif_support { + struct list_head listentry; + struct lpfc_name portName; + uint64_t lun; + uint16_t sid; + uint8_t dif_info; + uint8_t state; +#define LPFC_EDSM_IGNORE 0 +#define LPFC_EDSM_INIT 1 +#define LPFC_EDSM_NEEDS_INQUIRY 2 +#define LPFC_EDSM_PATH_STANDBY 3 +#define LPFC_EDSM_PATH_READY 4 +#define LPFC_EDSM_PATH_CHECK 5 + uint16_t inq_retry; +#define LPFC_EDSM_MAX_RETRY 3 + uint16_t inq_cnt; + uint32_t err3f_cnt; + uint32_t err4b_cnt; +}; + +/* Struct used to identify External DIF vendor specific info */ +struct lpfc_vendor_dif { + uint8_t length; + uint8_t version; + uint8_t dif_info; +#define LPFC_FDIF_ATO 0x01 +#define LPFC_FDIF_REFCHK 0x02 +#define LPFC_FDIF_APPCHK 0x04 +#define LPFC_FDIF_GRDCHK 0x08 +#define LPFC_FDIF_SPTMASK 0x70 +#define LPFC_FDIF_PROTECT 0x80 + uint8_t reserved1; +}; + +/* Defines used to identify a External DIF device */ +#define LPFC_INQ_VID_OFFSET 8 +#define LPFC_INQ_VDIF_OFFSET 168 +#define LPFC_INQ_FDIF_SZ (LPFC_INQ_VDIF_OFFSET + 4) +#define LPFC_INQ_FDIF_VENDOR "3PARdata" /* Vendor Identification */ +#define LPFC_INQ_FDIF_VERSION 1 +#define LPFC_INQ_FDIF_SIZE 2 +#define LPFC_FDIF_CDB_PROTECT 0x20 /* Set RD/WR PROTECT = 001 */ + struct lpfc_trunk_link_state { enum hba_state state; uint8_t fault; @@ -347,6 +415,168 @@ struct lpfc_trunk_link { link3; }; +/* Format of congestion module parameters */ +struct lpfc_cgn_param { + uint32_t cgn_param_magic; + uint8_t cgn_param_version; /* version 1 */ + uint8_t cgn_param_mode; /* 0=off 1=managed 2=monitor only */ +#define LPFC_CFG_OFF 0 +#define LPFC_CFG_MANAGED 1 +#define LPFC_CFG_MONITOR 2 + uint8_t cgn_rsvd1; /* Unused for CMF */ + uint8_t cgn_rsvd2; /* Unused for CMF */ + uint8_t cgn_param_acr; /* 0=conserve 1=moderate 2=severe */ +#define LPFC_CFG_CONSERVE 0 +#define LPFC_CFG_MODERATE 1 +#define LPFC_CFG_SEVERE 2 + uint8_t cgn_param_rxburst; /* Burst in 512K units */ +#define LPFC_CFG_BURST_MIN 0 +#define LPFC_CFG_BURST_MAX 32 +#define LPFC_CFG_BURST_DEF 4 + uint8_t cgn_param_rxbw; /* BW in units of 5% */ +#define LPFC_CFG_BW_MIN 1 +#define LPFC_CFG_BW_MAX 20 +#define LPFC_CFG_BW_DEF 18 + uint8_t cgn_param_acr_min_rxbw; /* in units of 5%, must be < acrrxbw */ +#define LPFC_CFG_DEFAULT_MIN_BW 8 + uint8_t cgn_param_acr_scale; /* used in recovery and ramp up/down */ +#define LPFC_CFG_LOW_SCALE 1 +#define LPFC_CFG_DEFAULT_SCALE 10 /* 100% */ +#define LPFC_CFG_HIGH_SCALE 255 + uint8_t cgn_param_acr_max_rxburst; /* Burst in 512K units */ + uint8_t cgn_param_acr_max_rxbw; /* BW in units of 5% */ + uint8_t cgn_param_acr_min_rxburst; /* units of 5%, be < acrrxburst */ +}; + +/* Max number of days of congestion data */ +#define LPFC_MAX_CGN_DAYS 10 + +/* Format of congestion buffer info + * This structure defines memory thats allocated and registered with + * the HBA firmware. When adding or removing fields from this structure + * the alignment must match the HBA firmware. + */ + +struct lpfc_cgn_info { + /* Header */ + __le16 cgn_info_size; /* is sizeof(struct lpfc_cgn_info) */ + uint8_t cgn_info_version; /* represents format of structure */ +#define LPFC_CGN_INFO_V1 1 +#define LPFC_CGN_INFO_V2 2 +#define LPFC_CGN_INFO_V3 3 + uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */ + uint8_t cgn_info_detect; /* 1=driver 2-fabric 3=both */ + uint8_t cgn_info_action; /* 0=fixed 1=adaptive */ + uint8_t cgn_info_acr; /* 0=conserve 1=moderate 2=aggressive */ + uint8_t cgn_info_rxburst; /* Burst in 512K units */ + uint8_t cgn_info_rxbw; /* BW in units of 5% */ + + /* Start Time */ + uint8_t cgn_info_month; + uint8_t cgn_info_day; + uint8_t cgn_info_year; + uint8_t cgn_info_hour; + uint8_t cgn_info_minute; + uint8_t cgn_info_second; + + /* minute / hours / daily indices */ + uint8_t cgn_index_minute; + uint8_t cgn_index_hour; + uint8_t cgn_index_day; + + __le16 cgn_warn_freq; + __le16 cgn_alarm_freq; + __le16 cgn_lunq; + uint8_t cgn_pad1[8]; + + /* Driver Information */ + __le16 cgn_drvr_min[60]; + __le32 cgn_drvr_hr[24]; + __le32 cgn_drvr_day[LPFC_MAX_CGN_DAYS]; + + /* Congestion Warnings */ + __le16 cgn_warn_min[60]; + __le32 cgn_warn_hr[24]; + __le32 cgn_warn_day[LPFC_MAX_CGN_DAYS]; + + /* Latency Information */ + __le32 cgn_latency_min[60]; + __le32 cgn_latency_hr[24]; + __le32 cgn_latency_day[LPFC_MAX_CGN_DAYS]; + + /* Bandwidth Information */ + __le16 cgn_bw_min[60]; + __le16 cgn_bw_hr[24]; + __le16 cgn_bw_day[LPFC_MAX_CGN_DAYS]; + + /* Congestion Alarms */ + __le16 cgn_alarm_min[60]; + __le32 cgn_alarm_hr[24]; + __le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS]; + + /* Start of congestion statistics */ + uint8_t cgn_stat_npm; /* Notifications per minute */ + + /* Start Time */ + uint8_t cgn_stat_month; + uint8_t cgn_stat_day; + uint8_t cgn_stat_year; + uint8_t cgn_stat_hour; + uint8_t cgn_stat_minute; + uint8_t cgn_pad2[2]; + + __le32 cgn_notification; + __le32 cgn_peer_notification; + __le32 link_integ_notification; + __le32 delivery_notification; + + uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */ + uint8_t cgn_stat_cgn_day; + uint8_t cgn_stat_cgn_year; + uint8_t cgn_stat_cgn_hour; + uint8_t cgn_stat_cgn_min; + uint8_t cgn_stat_cgn_sec; + + uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */ + uint8_t cgn_stat_peer_day; + uint8_t cgn_stat_peer_year; + uint8_t cgn_stat_peer_hour; + uint8_t cgn_stat_peer_min; + uint8_t cgn_stat_peer_sec; + + uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */ + uint8_t cgn_stat_lnk_day; + uint8_t cgn_stat_lnk_year; + uint8_t cgn_stat_lnk_hour; + uint8_t cgn_stat_lnk_min; + uint8_t cgn_stat_lnk_sec; + + uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */ + uint8_t cgn_stat_del_day; + uint8_t cgn_stat_del_year; + uint8_t cgn_stat_del_hour; + uint8_t cgn_stat_del_min; + uint8_t cgn_stat_del_sec; +#define LPFC_CGN_STAT_SIZE 48 +#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \ + LPFC_CGN_STAT_SIZE - sizeof(uint32_t)) + + __le32 cgn_info_crc; +#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41 +#define LPFC_CGN_CRC32_SEED 0xFFFFFFFF +}; + +#define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \ + sizeof(uint32_t)) + +struct lpfc_cgn_stat { + atomic64_t total_bytes; + atomic64_t rcv_bytes; + atomic64_t rx_latency; +#define LPFC_CGN_NOT_SENT 0xFFFFFFFFFFFFFFFFLL + atomic_t rx_io_cnt; +}; + struct lpfc_vport { struct lpfc_hba *phba; struct list_head listentry; @@ -388,6 +618,7 @@ struct lpfc_vport { #define FC_VFI_REGISTERED 0x800000 /* VFI is registered */ #define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */ #define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */ +#define FC_CT_CGN_RPA 0x4000000 /* CGN RPA sent to switch */ uint32_t ct_flags; #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ @@ -397,6 +628,7 @@ struct lpfc_vport { #define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */ struct list_head fc_nodes; + uint32_t throttle_cnt; /* Keep counters for the number of entries in each list. */ uint16_t fc_plogi_cnt; @@ -479,10 +711,11 @@ struct lpfc_vport { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct dentry *debug_disc_trc; struct dentry *debug_nodelist; + struct dentry *debug_ediflist; struct dentry *debug_nvmestat; struct dentry *debug_scsistat; - struct dentry *debug_nvmektime; - struct dentry *debug_cpucheck; + struct dentry *debug_ioktime; + struct dentry *debug_hdwqstat; struct dentry *vport_debugfs_root; struct lpfc_debugfs_trc *disc_trc; atomic_t disc_trc_cnt; @@ -496,6 +729,11 @@ struct lpfc_vport { #define FAWWPN_SET 2 #define FAWWPN_PARAM_CHG 4 + struct throttle_history log; /* Throttle log history for phba */ + /* Used to discover External DIF devices */ + struct list_head external_dif_list; + spinlock_t external_dif_lock; /* lock for external_dif_list */ + uint16_t fdmi_num_disc; uint32_t fdmi_hba_mask; uint32_t fdmi_port_mask; @@ -605,6 +843,12 @@ struct lpfc_epd_pool { spinlock_t lock; /* lock for expedite pool */ }; +enum ras_state { + INACTIVE, + REG_INPROGRESS, + ACTIVE +}; + struct lpfc_ras_fwlog { uint8_t *fwlog_buff; uint32_t fw_buffcount; /* Buffer size posted to FW */ @@ -621,7 +865,28 @@ struct lpfc_ras_fwlog { bool ras_enabled; /* Ras Enabled for the function */ #define LPFC_RAS_DISABLE_LOGGING 0x00 #define LPFC_RAS_ENABLE_LOGGING 0x01 - bool ras_active; /* RAS logging running state */ + enum ras_state state; /* RAS logging running state */ +}; + +#define DBG_LOG_STR_SZ 256 +#define DBG_LOG_SZ 256 + +struct dbg_log_ent { + char log[DBG_LOG_STR_SZ]; + u64 t_ns; +}; + +enum lpfc_irq_chann_mode { + /* Assign IRQs to all possible cpus that have hardware queues */ + NORMAL_MODE, + + /* Assign IRQs only to cpus on the same numa node as HBA */ + NUMA_MODE, + + /* Assign IRQs only on non-hyperthreaded CPUs. This is the + * same as normal_mode, but assign IRQS only on physical CPUs. + */ + NHT_MODE, }; struct lpfc_hba { @@ -637,7 +902,7 @@ struct lpfc_hba { (struct lpfc_hba *, struct lpfc_io_buf *); void (*lpfc_rampdown_queue_depth) (struct lpfc_hba *); - void (*lpfc_scsi_prep_cmnd) + int (*lpfc_scsi_prep_cmnd) (struct lpfc_vport *, struct lpfc_io_buf *, struct lpfc_nodelist *); @@ -674,9 +939,9 @@ struct lpfc_hba { void (*lpfc_stop_port) (struct lpfc_hba *); int (*lpfc_hba_init_link) - (struct lpfc_hba *, uint32_t); + (struct lpfc_hba *); int (*lpfc_hba_down_link) - (struct lpfc_hba *, uint32_t); + (struct lpfc_hba *); int (*lpfc_selective_reset) (struct lpfc_hba *); @@ -693,6 +958,9 @@ struct lpfc_hba { struct workqueue_struct *wq; struct delayed_work eq_delay_work; +#define LPFC_IDLE_STAT_DELAY 1000 + struct delayed_work idle_stat_delay_work; + struct lpfc_sli sli; uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ @@ -725,7 +993,8 @@ struct lpfc_hba { #define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ -#define ELS_XRI_ABORT_EVENT 0x40 +#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ +#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */ #define ASYNC_EVENT 0x80 #define LINK_DISABLED 0x100 /* Link disabled by user */ #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ @@ -742,6 +1011,12 @@ struct lpfc_hba { * capability */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ +#define HBA_CGN_BOTTOM 0x200000 /* HBA Congestion mgmt hits bottom */ +#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */ +#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ +#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ +#define HBA_HBEAT_INP 0x2000000 /* mbox HBEAT is in progress */ +#define HBA_HBEAT_TMO 0x4000000 /* HBEAT initiated after timeout */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -804,6 +1079,9 @@ struct lpfc_hba { uint32_t cfg_xpsgl; uint32_t cfg_enable_npiv; uint32_t cfg_enable_rrq; +#ifndef NO_APEX + uint32_t cfg_enable_fcp_priority; +#endif uint32_t cfg_topology; uint32_t cfg_link_speed; #define LPFC_FCF_FOV 1 /* Fast fcf failover */ @@ -823,9 +1101,9 @@ struct lpfc_hba { uint32_t cfg_use_msi; uint32_t cfg_auto_imax; uint32_t cfg_fcp_imax; - uint32_t cfg_force_rscn; uint32_t cfg_cq_poll_threshold; uint32_t cfg_cq_max_proc_limit; + uint32_t cfg_force_rscn; uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_mq_threshold; uint32_t cfg_hdw_queue; @@ -843,8 +1121,6 @@ struct lpfc_hba { uint32_t cfg_nvme_seg_cnt; uint32_t cfg_scsi_seg_cnt; uint32_t cfg_sg_dma_buf_size; - uint64_t cfg_soft_wwnn; - uint64_t cfg_soft_wwpn; uint32_t cfg_hba_queue_depth; uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; @@ -866,24 +1142,27 @@ struct lpfc_hba { uint32_t cfg_enable_bg; uint32_t cfg_prot_mask; uint32_t cfg_prot_guard; + uint32_t cfg_external_dif; uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_enable_fc4_type; uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_request_firmware_upgrade; - uint32_t cfg_iocb_cnt; uint32_t cfg_suppress_link_up; + uint32_t cfg_throttle_log_cnt; + uint32_t cfg_throttle_log_time; + struct throttle_history log; /* Throttle log history for phba */ uint32_t cfg_rrq_xri_bitmap_sz; uint32_t cfg_delay_discovery; uint32_t cfg_sli_mode; #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ - uint32_t cfg_enable_dss; uint32_t cfg_fdmi_on; #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ + uint32_t cfg_enable_e2e; uint32_t cfg_enable_SmartSAN; uint32_t cfg_enable_mds_diags; uint32_t cfg_ras_fwlog_level; @@ -895,7 +1174,13 @@ struct lpfc_hba { #define LPFC_ENABLE_NVME 2 #define LPFC_ENABLE_BOTH 3 uint32_t cfg_enable_pbde; + uint32_t cfg_enable_scsi_mq; + uint32_t cfg_enable_auth; +#define LPFC_AUTH_DISABLE 0 +#define LPFC_AUTH_ENABLE 1 +#if defined(BUILD_NVME) struct nvmet_fc_target_port *targetport; +#endif lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; @@ -905,6 +1190,15 @@ struct lpfc_hba { uint32_t work_hs; /* HS stored in case of ERRAT */ uint32_t work_status[2]; /* Extra status from SLIM */ + uint32_t nvme_pendq_cnt; + struct list_head nvme_pendq; + spinlock_t nvme_pendq_lock; + struct list_head nvme_abts_pendq; + atomic_t nvme_abts_cnt; + + struct workqueue_struct *nvme_io; + struct work_struct nvme_defer_work; + wait_queue_head_t work_waitq; struct task_struct *worker_thread; unsigned long data_flags; @@ -962,8 +1256,6 @@ struct lpfc_hba { #define VPD_PORT 0x8 /* valid vpd port data */ #define VPD_MASK 0xf /* mask for any vpd data */ - uint8_t soft_wwn_enable; - struct timer_list fcp_poll_timer; struct timer_list eratt_poll; uint32_t eratt_poll_interval; @@ -982,6 +1274,7 @@ struct lpfc_hba { uint32_t total_iocbq_bufs; struct list_head active_rrq_list; spinlock_t hbalock; + struct work_struct unblock_request_work; /* SCSI layer unblock IOs */ /* dma_mem_pools */ struct dma_pool *lpfc_sg_dma_buf_pool; @@ -990,7 +1283,6 @@ struct lpfc_hba { struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */ struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ - struct dma_pool *txrdy_payload_pool; struct dma_pool *lpfc_cmd_rsp_buf_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; @@ -1000,9 +1292,11 @@ struct lpfc_hba { mempool_t *active_rrq_pool; struct fc_host_statistics link_stats; + enum lpfc_irq_chann_mode irq_chann_mode; enum intr_type_t intr_type; uint32_t intr_mode; #define LPFC_INTR_ERROR 0xFFFFFFFF + struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; struct list_head port_list; spinlock_t port_list_lock; /* lock for port_list mutations */ struct lpfc_vport *pport; /* physical lpfc_vport pointer */ @@ -1022,6 +1316,8 @@ struct lpfc_hba { uint16_t vpi_count; struct list_head lpfc_vpi_blk_list; + struct list_head lpfc_auth_active_cfg_list; + /* Data structure used by fabric iocb scheduler */ struct list_head fabric_iocb_list; atomic_t fabric_iocb_count; @@ -1055,6 +1351,9 @@ struct lpfc_hba { #ifdef LPFC_HDWQ_LOCK_STAT struct dentry *debug_lockstat; #endif + struct dentry *debug_cgn_buffer; + struct dentry *debug_rx_monitor; + struct dentry *debug_ras_log; atomic_t nvmeio_trc_cnt; uint32_t nvmeio_trc_size; uint32_t nvmeio_trc_output_idx; @@ -1098,7 +1397,6 @@ struct lpfc_hba { unsigned long last_completion_time; unsigned long skipped_hb; struct timer_list hb_tmofunc; - uint8_t hb_outstanding; struct timer_list rrq_tmr; enum hba_temp_state over_temp_state; /* ndlp reference management */ @@ -1149,8 +1447,10 @@ struct lpfc_hba { uint32_t iocb_cnt; uint32_t iocb_max; atomic_t sdev_cnt; - uint8_t fips_spec_rev; - uint8_t fips_level; + uint8_t dif_wqidx; +#ifndef NO_APEX + struct lpfc_fcp_pri_rules *fcp_priority_rules; +#endif spinlock_t devicelock; /* lock for luns list */ mempool_t *device_data_mem_pool; struct list_head luns; @@ -1168,12 +1468,11 @@ struct lpfc_hba { uint16_t sfp_warning; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - uint16_t cpucheck_on; + uint16_t hdwqstat_on; #define LPFC_CHECK_OFF 0 #define LPFC_CHECK_NVME_IO 1 -#define LPFC_CHECK_NVMET_RCV 2 -#define LPFC_CHECK_NVMET_IO 4 -#define LPFC_CHECK_SCSI_IO 8 +#define LPFC_CHECK_NVMET_IO 2 +#define LPFC_CHECK_SCSI_IO 4 uint16_t ktime_on; uint64_t ktime_data_samples; uint64_t ktime_status_samples; @@ -1209,6 +1508,106 @@ struct lpfc_hba { uint64_t ktime_seg10_min; uint64_t ktime_seg10_max; #endif + /* CMF objects */ + struct lpfc_cgn_stat __percpu *cmf_stat; + uint32_t cmf_interval_rate; /* timer interval limit in ms */ + uint32_t cmf_timer_cnt; +#define LPFC_CMF_INTERVAL 90 + uint64_t cmf_link_byte_count; + uint64_t cmf_max_line_rate; + uint64_t cmf_max_bytes_per_interval; + uint64_t cmf_last_sync_bw; +#define LPFC_CMF_BLK_SIZE 512 + struct hrtimer cmf_timer; + atomic_t cmf_bw_wait; + atomic_t cmf_busy; + atomic_t cmf_stop_io; /* To block request and stop IO's */ + uint32_t cmf_active_mode; + uint32_t cmf_info_per_interval; +#define LPFC_MAX_CMF_INFO 32 + struct timespec64 cmf_latency; /* Interval congestion timestamp */ + uint32_t cmf_last_ts; /* Interval congestion time (ms) */ + uint32_t cmf_active_info; + + /* Signal / FPIN handling for Congestion Mgmt */ + u8 cgn_reg_fpin; /* Negotiated value from RDF */ + u8 cgn_init_reg_fpin; /* Initial value from READ_CONFIG */ +#define LPFC_CGN_FPIN_NONE 0x0 +#define LPFC_CGN_FPIN_WARN 0x1 +#define LPFC_CGN_FPIN_ALARM 0x2 +#define LPFC_CGN_FPIN_BOTH (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM) + u8 cgn_reg_signal; /* Negotiated value from EDC */ + u8 cgn_init_reg_signal; /* Initial value from READ_CONFIG */ +#define LPFC_CGN_SIGNAL_NO_SPT 0x0 +#define LPFC_CGN_SIGNAL_WARN 0x1 +#define LPFC_CGN_SIGNAL_BOTH 0x2 + u16 cgn_fpin_frequency; +#define LPFC_FPIN_INIT_FREQ 0xffff + u32 cgn_sig_freq; + u32 cgn_acqe_cnt; + + /* RX monitor handling for CMF */ + struct rxtable_entry *rxtable; /* RX_monitor information */ + atomic_t rxtable_idx_head; +#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73) + atomic_t rxtable_idx_tail; + atomic_t rx_max_read_cnt; /* Maximum read bytes */ + uint64_t rx_block_cnt; + + /* Congestion parameters from flash */ + struct lpfc_cgn_param cgn_p; + + /* Congestion buffer information */ + struct lpfc_dmabuf *cgn_i; /* Congestion Info buffer */ + atomic_t cgn_fabric_warn_cnt; /* Total warning cgn events for info */ + atomic_t cgn_fabric_alarm_cnt; /* Total alarm cgn events for info */ + atomic_t cgn_sync_warn_cnt; /* Total warning events for SYNC wqe */ + atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */ + atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */ + atomic_t cgn_latency_evt_cnt; + struct timespec64 cgn_daily_ts; + atomic64_t cgn_latency_evt; /* Avg latency per minute */ + unsigned long cgn_evt_timestamp; +#define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */ + uint32_t cgn_evt_minute; +#define LPFC_SEC_MIN 60 +#define LPFC_MIN_HOUR 60 +#define LPFC_HOUR_DAY 24 +#define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY) + + struct hlist_node cpuhp; /* used for cpuhp per hba callback */ + struct timer_list cpuhp_poll_timer; + struct list_head poll_list; /* slowpath eq polling list */ +#define LPFC_POLL_HB 1 /* slowpath heartbeat */ +#define LPFC_POLL_FASTPATH 0 /* called from fastpath */ +#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */ + + char os_host_name[MAXHOSTNAMELEN]; + + /* SCSI host template information - for physical port */ + struct scsi_host_template port_template; + /* SCSI host template information - for all vports */ + struct scsi_host_template vport_template; + atomic_t dbg_log_idx; + atomic_t dbg_log_cnt; + atomic_t dbg_log_dmping; + struct dbg_log_ent dbg_log[DBG_LOG_SZ]; +}; + +#define LPFC_MAX_RXMONITOR_ENTRY 800 +#define LPFC_MAX_RXMONITOR_DUMP 32 +struct rxtable_entry { + uint64_t total_bytes; /* Total no of read bytes requested */ + uint64_t rcv_bytes; /* Total no of read bytes completed */ + uint64_t avg_io_size; + uint64_t avg_io_latency;/* Average io latency in microseconds */ + uint64_t max_read_cnt; /* Maximum read bytes */ + uint64_t max_bytes_per_interval; + uint32_t cmf_busy; + uint32_t cmf_info; /* CMF_SYNC_WQE info */ + uint32_t io_cnt; + uint32_t timer_utilization; + uint32_t timer_interval; }; static inline struct Scsi_Host * @@ -1317,3 +1716,24 @@ lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq, writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr); eq->q_mode = delay; } + +/** + * lpfc_next_online_cpu - Finds next online CPU on cpumask + * @mask: Pointer to phba's cpumask member. + * @start: starting cpu index + * + * Note: If no valid cpu found, then nr_cpu_ids is returned. + * + **/ +static inline unsigned int +lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) +{ + unsigned int cpu_it; + + for_each_cpu_wrap(cpu_it, mask, start) { + if (cpu_online(cpu_it)) + break; + } + + return cpu_it; +} diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 25aa7a53d255..94def7d5be78 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -28,7 +28,6 @@ #include <linux/module.h> #include <linux/aer.h> #include <linux/gfp.h> -#include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -36,8 +35,11 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> - +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme-fc-driver.h> +#endif +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -55,14 +57,16 @@ #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_attr.h" +#ifndef BUILD_BRCMFCOE +#include "lpfc_auth.h" +#endif #define LPFC_DEF_DEVLOSS_TMO 30 #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 -#define LPFC_DEF_MRQ_POST 512 -#define LPFC_MIN_MRQ_POST 512 -#define LPFC_MAX_MRQ_POST 2048 +#define LPFC_MAX_INFO_TMP_LEN 100 +#define LPFC_INFO_MORE_STR "\nCould be more info...\n" /* * Write key size should be multiple of 4. If write key is changed @@ -119,6 +123,422 @@ lpfc_jedec_to_ascii(int incr, char hdw[]) return; } +static ssize_t +lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_cgn_info *cp = NULL; + struct lpfc_cgn_stat *cgs; + int len = 0; + int cpu; + u64 rcv, total; + char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; + + if (phba->cgn_i) + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + scnprintf(tmp, sizeof(tmp), + "Congestion Mgmt Info: E2Eattr %d Ver %d " + "CMF %d cnt %d\n", + phba->sli4_hba.pc_sli4_params.E2E_attr, + cp ? cp->cgn_info_version : 0, + phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); + + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + if (!phba->sli4_hba.pc_sli4_params.cmf) + goto buffer_done; + + switch (phba->cgn_init_reg_signal) { + case LPFC_CGN_SIGNAL_WARN: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:WARN "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CGN_SIGNAL_BOTH: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:WARN|ALARM "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + default: + scnprintf(tmp, sizeof(tmp), + "Register: Init: Signal:NONE "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + } + + switch (phba->cgn_init_reg_fpin) { + case LPFC_CGN_FPIN_WARN: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CGN_FPIN_ALARM: + scnprintf(tmp, sizeof(tmp), + "FPIN:ALARM\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CGN_FPIN_BOTH: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN|ALARM\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + default: + scnprintf(tmp, sizeof(tmp), + "FPIN:NONE\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + } + + switch (phba->cgn_reg_signal) { + case LPFC_CGN_SIGNAL_WARN: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:WARN "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CGN_SIGNAL_BOTH: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:WARN|ALARM "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + default: + scnprintf(tmp, sizeof(tmp), + " Current: Signal:NONE "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + } + + switch (phba->cgn_reg_fpin) { + case LPFC_CGN_FPIN_WARN: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CGN_FPIN_ALARM: + scnprintf(tmp, sizeof(tmp), + "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CGN_FPIN_BOTH: + scnprintf(tmp, sizeof(tmp), + "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + default: + scnprintf(tmp, sizeof(tmp), + "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + } + + if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) { + switch (phba->cmf_active_mode) { + case LPFC_CFG_OFF: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CFG_MANAGED: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CFG_MONITOR: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + default: + scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n"); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + } + + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Off "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CFG_MANAGED: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + case LPFC_CFG_MONITOR: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + break; + default: + scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown "); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + + scnprintf(tmp, sizeof(tmp), "Ver:%d ACR:%d Scale:%d\n", + phba->cgn_p.cgn_param_version, + phba->cgn_p.cgn_param_acr, + phba->cgn_p.cgn_param_acr_scale); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "Config: BW:%d Burst:%d " + "ACR: BW:%d(min %d) Burst:%d(min %d)\n", + phba->cgn_p.cgn_param_rxbw, + phba->cgn_p.cgn_param_rxburst, + phba->cgn_p.cgn_param_acr_max_rxbw, + phba->cgn_p.cgn_param_acr_min_rxbw, + phba->cgn_p.cgn_param_acr_max_rxburst, + phba->cgn_p.cgn_param_acr_min_rxburst); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + total = 0; + rcv = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_read(&cgs->total_bytes); + rcv += atomic64_read(&cgs->rcv_bytes); + } + + scnprintf(tmp, sizeof(tmp), + "IObusy:%d NVMEBsyCnt:%d NVMEAbortPendCnt:%d\n" + "Info:%d Bytes: Rcv:x%llx Total:x%llx\n", + atomic_read(&phba->cmf_busy), + phba->nvme_pendq_cnt, + (u32)atomic_read(&phba->nvme_abts_cnt), + phba->cmf_active_info, rcv, total); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), + "Port_speed:%d Link_byte_cnt:%ld " + "Max_byte_per_interval:%ld\n", + lpfc_sli_port_speed_get(phba), + (unsigned long)phba->cmf_link_byte_count, + (unsigned long)phba->cmf_max_bytes_per_interval); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + +buffer_done: + len = strnlen(buf, PAGE_SIZE); + + if (unlikely(len >= (PAGE_SIZE - 1))) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6312 Catching potential buffer " + "overflow > PAGE_SIZE = %lu bytes\n", + PAGE_SIZE); + strlcpy(buf + PAGE_SIZE - 1 - + strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1), + LPFC_INFO_MORE_STR, + strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1) + + 1); + } + return len; +} + +static ssize_t +lpfc_cmf_info_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t payload[64]; + int len, status = 0; + int flashit = 0; + unsigned long val; + + /* fpin_warn Simulates a Fabric detected warning + * fpin_alarm Simulates a Fabric detected alarm + * alarm_sig Forces an alarm signal detection + * warn_sig <cnt> Forces cnt warning signal detections + * alarm_fpin Forces an alarm FPIN detection + * warn_fpin Forces cnt warning FPIN detections + * mode <value> Switches to off(0), managed (1) or monitor(2) mode + * scale <value> Updates ACR scalar parameter (1 - 255) + * acr <value> Updates ACR param conserve(0) moderate(1) severe(2) + * BW <value> Updates rxbw parameter (5% units, 1-20) + * minBurst <value> Updates acr_min_rxburst param (512k units, 0-32) + * minBW <value> Updates acr_min_rxbw param (5% units, 1-20) + * maxBurst <value> Updates acr_max_rxburst param (512k units, 0-32) + * maxBW <value> Updates acr_max_rxbw param (5% units, 1-20) + * wobjmt Delete congestion params from flash + * rxmonitor Dump rxmonitor table + * set_frequency <val> Sets signal frequency (10 sec default) + */ + if ((strncmp(buf, "fpin_warn", strlen("fpin_warn")) == 0)) { + /* Simulate a FPIN warning from Fabric */ + payload[0] = cpu_to_be32(0x16000000); + payload[1] = cpu_to_be32(0x00000014); + payload[2] = cpu_to_be32(0x00020004); + payload[3] = cpu_to_be32(0x0000000c); + payload[4] = cpu_to_be32(0x00030000); + payload[5] = cpu_to_be32(0x0000ea60); + payload[6] = cpu_to_be32(0xf1000000); + len = 28; + lpfc_els_rcv_fpin(vport, payload, len); + } else if ((strncmp(buf, "fpin_alarm", strlen("fpin_alarm")) == 0)) { + /* Simulate a FPIN alarm from Fabric */ + payload[0] = cpu_to_be32(0x16000000); + payload[1] = cpu_to_be32(0x00000014); + payload[2] = cpu_to_be32(0x00020004); + payload[3] = cpu_to_be32(0x0000000c); + payload[4] = cpu_to_be32(0x00030000); + payload[5] = cpu_to_be32(0x0000ea60); + payload[6] = cpu_to_be32(0xf7000000); + len = 28; + lpfc_els_rcv_fpin(vport, payload, len); + } else if ((strncmp(buf, "alarm_sig", strlen("alarm_sig")) == 0)) { + atomic_inc(&phba->cgn_sync_alarm_cnt); + } else if ((strncmp(buf, "warn_sig", strlen("warn_sig")) == 0)) { + val = 0; + status = kstrtoul(buf + 9, 0, &val); + if (!status && val) + atomic_set(&phba->cgn_sync_warn_cnt, (u32)val); + } else if ((strncmp(buf, "alarm_fpin", strlen("alarm_fpin")) == 0)) { + atomic_inc(&phba->cgn_sync_alarm_cnt); + } else if ((strncmp(buf, "warn_fpin", strlen("warn_fpin")) == 0)) { + atomic_inc(&phba->cgn_sync_warn_cnt); + } else if ((strncmp(buf, "mode", strlen("mode")) == 0)) { + val = 0; + status = kstrtoul(buf + 5, 0, &val); + if (status) + return -EINVAL; + switch (val) { + case LPFC_CFG_OFF: + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6326 Disable CMF\n"); + break; + case LPFC_CFG_MANAGED: + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6327 Enable CMF\n"); + break; + case LPFC_CFG_MONITOR: + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6328 Monitor CMF\n"); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6329 Invalid CMF mode: %ld\n", val); + return -EINVAL; + } + phba->cgn_p.cgn_param_mode = val; + flashit = 1; + } else if ((strncmp(buf, "scale", strlen("scale")) == 0)) { + val = 0; + status = kstrtoul(buf + 6, 0, &val); + if (status) + return -EINVAL; + if (val >= LPFC_CFG_LOW_SCALE && + val <= LPFC_CFG_HIGH_SCALE) { + phba->cgn_p.cgn_param_acr_scale = val; + flashit = 1; + status = 0; + } else { + status = -EINVAL; + } + } else if ((strncmp(buf, "acr", strlen("acr")) == 0)) { + val = 0; + status = kstrtoul(buf + 4, 0, &val); + if (status) + return -EINVAL; + if (val >= LPFC_CFG_CONSERVE && + val <= LPFC_CFG_SEVERE) { + phba->cgn_p.cgn_param_acr = val; + flashit = 1; + status = 0; + } else { + status = -EINVAL; + } + } else if ((strncmp(buf, "BW", strlen("BW")) == 0)) { + val = 0; + status = kstrtoul(buf + 3, 0, &val); + if (status) + return -EINVAL; + phba->cgn_p.cgn_param_rxbw = val; + flashit = 1; + } else if ((strncmp(buf, "maxBurst", strlen("maxBurst")) == 0)) { + val = 0; + status = kstrtoul(buf + 9, 0, &val); + if (status) + return -EINVAL; + phba->cgn_p.cgn_param_acr_max_rxburst = val; + flashit = 1; + } else if ((strncmp(buf, "maxBW", strlen("maxBW")) == 0)) { + val = 0; + status = kstrtoul(buf + 6, 0, &val); + if (status) + return -EINVAL; + phba->cgn_p.cgn_param_acr_max_rxbw = val; + flashit = 1; + } else if ((strncmp(buf, "minBurst", strlen("minBurst")) == 0)) { + val = 0; + status = kstrtoul(buf + 9, 0, &val); + if (status) + return -EINVAL; + phba->cgn_p.cgn_param_acr_min_rxburst = val; + flashit = 1; + } else if ((strncmp(buf, "minBW", strlen("minBW")) == 0)) { + val = 0; + status = kstrtoul(buf + 6, 0, &val); + if (status) + return -EINVAL; + phba->cgn_p.cgn_param_acr_min_rxbw = val; + flashit = 1; + } else if ((strncmp(buf, "set_frequency", + strlen("set_frequency")) == 0)) { + val = 0; + status = kstrtoul(buf + 14, 0, &val); + if (status) + return -EINVAL; + lpfc_acqe_cgn_frequency = val; + lpfc_config_cgn_signal(phba); + } else if ((strncmp(buf, "wobjmt", strlen("wobjmt")) == 0)) { + /* Delete the port.cfg store. */ + (void)lpfc_del_object(phba, (char *)LPFC_PORT_CFG_NAME); + } else if ((strncmp(buf, "rxmonitor", strlen("rxmonitor")) == 0)) { + lpfc_cgn_dump_rxmonitor(phba); + } else { + return -EINVAL; + } + + if (flashit) { + if (phba->sli4_hba.pc_sli4_params.cmf) { + /* Update FW flash with driver parameters */ + status = lpfc_write_object + (phba, (char *)LPFC_PORT_CFG_NAME, + (uint32_t *)&phba->cgn_p, + sizeof(struct lpfc_cgn_param)); + if (status <= 0) + status = -EACCES; + } + } + + if (status == 0) + return strlen(buf); + return status; +} /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. @@ -156,10 +576,13 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, return scnprintf(buf, PAGE_SIZE, "0\n"); } +#if defined(BUILD_NVME) static ssize_t lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, char *buf) { + int len = 0; +#if (IS_ENABLED(CONFIG_NVME_FC)) struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = shost_priv(shost); struct lpfc_hba *phba = vport->phba; @@ -174,9 +597,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, uint64_t totin, totout, tot; char *statep; int i; - int len = 0; - char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; - unsigned long iflags = 0; + char tmp[LPFC_MAX_INFO_TMP_LEN] = {0}; if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); @@ -194,6 +615,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, statep = "REGISTERED"; else statep = "INIT"; + scnprintf(tmp, sizeof(tmp), "NVME Target Enabled State %s\n", statep); @@ -333,6 +755,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.nvmet_io_wait_total, tot); strlcat(buf, tmp, PAGE_SIZE); + goto buffer_done; } @@ -347,7 +770,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; - rcu_read_lock(); scnprintf(tmp, sizeof(tmp), "XRI Dist lpfc%d Total %d IO %d ELS %d\n", phba->brd_no, @@ -355,7 +777,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.io_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -371,15 +793,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; + + spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; - spin_lock_irqsave(&vport->phba->hbalock, iflags); + spin_lock(&vport->phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) nrport = rport->remoteport; - spin_unlock_irqrestore(&vport->phba->hbalock, iflags); + spin_unlock(&vport->phba->hbalock); if (!nrport) continue; @@ -398,39 +822,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -438,14 +862,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } - rcu_read_unlock(); + spin_unlock_irq(shost->host_lock); if (!lport) goto buffer_done; @@ -505,11 +929,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); - /* RCU is already unlocked. */ + /* host_lock is already unlocked. */ goto buffer_done; - rcu_unlock_buf_done: - rcu_read_unlock(); + unlock_buf_done: + spin_unlock_irq(shost->host_lock); buffer_done: len = strnlen(buf, PAGE_SIZE); @@ -520,14 +944,19 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, "overflow > PAGE_SIZE = %lu bytes\n", PAGE_SIZE); strlcpy(buf + PAGE_SIZE - 1 - - strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1), - LPFC_NVME_INFO_MORE_STR, - strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1) + strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1), + LPFC_INFO_MORE_STR, + strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1) + 1); } +#else + len = scnprintf(buf, PAGE_SIZE, + "NVME not Supported in kernel\n"); +#endif return len; } +#endif static ssize_t lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr, @@ -587,17 +1016,29 @@ lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; + int len = 0; if (phba->cfg_enable_bg) { if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) - return scnprintf(buf, PAGE_SIZE, - "BlockGuard Enabled\n"); + len += scnprintf(buf, PAGE_SIZE, + "BlockGuard Enabled\n"); else - return scnprintf(buf, PAGE_SIZE, + len += scnprintf(buf, PAGE_SIZE, "BlockGuard Not Supported\n"); - } else - return scnprintf(buf, PAGE_SIZE, + } else { + len += scnprintf(buf, PAGE_SIZE, "BlockGuard Disabled\n"); + } + + if (phba->cfg_external_dif) { + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) + len += scnprintf(buf + len, PAGE_SIZE, + "External DIF Enabled\n"); + else + len += scnprintf(buf + len, PAGE_SIZE, + "External DIF Not Supported\n"); + } + return len; } static ssize_t @@ -650,7 +1091,7 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *host = class_to_shost(dev); - return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host)); + return scnprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); } /** @@ -669,7 +1110,7 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber); + return scnprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); } /** @@ -691,7 +1132,7 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support); + return scnprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); } /** @@ -710,7 +1151,7 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc); + return scnprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); } /** @@ -729,7 +1170,7 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName); + return scnprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); } /** @@ -748,7 +1189,7 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType); + return scnprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); } /** @@ -786,7 +1227,7 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port); + return scnprintf(buf, PAGE_SIZE, "%s\n",phba->Port); } /** @@ -864,7 +1305,7 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, if (phba->sli_rev < LPFC_SLI_REV4) return scnprintf(buf, PAGE_SIZE, "%s\n", - phba->OptionROMVersion); + phba->OptionROMVersion); lpfc_decode_firmware_rev(phba, fwrev, 1); return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev); @@ -898,20 +1339,20 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: if (phba->hba_flag & LINK_DISABLED) - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Link Down - User disabled\n"); else - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Link Down\n"); break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: - len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - "); + len += scnprintf(buf + len, PAGE_SIZE - len, "Link Up - "); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Configuring Link\n"); break; case LPFC_FDISC: @@ -926,12 +1367,12 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, break; case LPFC_VPORT_READY: len += scnprintf(buf + len, PAGE_SIZE - len, - "Ready\n"); + "Ready\n"); break; case LPFC_VPORT_FAILED: len += scnprintf(buf + len, PAGE_SIZE - len, - "Failed\n"); + "Failed\n"); break; case LPFC_VPORT_UNKNOWN: @@ -940,21 +1381,21 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, break; } if (phba->sli.sli_flag & LPFC_MENLO_MAINT) - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, " Menlo Maint Mode\n"); else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, " Public Loop\n"); else - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, " Private Loop\n"); } else { if (vport->fc_flag & FC_FABRIC) - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, " Fabric\n"); else - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, " Point-2-Point\n"); } } @@ -1071,10 +1512,10 @@ lpfc_link_state_store(struct device *dev, struct device_attribute *attr, if ((strncmp(buf, "up", sizeof("up") - 1) == 0) && (phba->link_state == LPFC_LINK_DOWN)) - status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + status = phba->lpfc_hba_init_link(phba); else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) && (phba->link_state >= LPFC_LINK_UP)) - status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT); + status = phba->lpfc_hba_down_link(phba); if (status == 0) return strlen(buf); @@ -1119,7 +1560,7 @@ lpfc_num_discovered_ports_show(struct device *dev, * -EIO error sending the mailbox command * zero for success **/ -static int +int lpfc_issue_lip(struct Scsi_Host *shost) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; @@ -1185,7 +1626,7 @@ lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0466 %s %s\n", "Outstanding IO when ", - "bringing Adapter offline\n"); + "waiting for IO to drain\n"); return 0; } spin_lock_irq(lock); @@ -1352,8 +1793,14 @@ lpfc_reset_pci_bus(struct lpfc_hba *phba) } } +#if defined(BUILD_RHEL8) || defined(BUILD_SLES15SP) || \ +defined(BUILD_SLES12SP5) || (KERNEL_VERSION(4, 18, 0) < LINUX_VERSION_CODE) /* Issue PCI bus reset */ res = pci_reset_bus(pdev); +#else + /* Issue PCI bus reset */ + res = pci_reset_bus(pdev->bus); +#endif if (res) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "8350 PCI reset bus failed: %d\n", res); @@ -1475,8 +1922,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) int i; msleep(100); - lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, - &portstat_reg.word0); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0)) + return -EIO; /* verify if privileged for the request operation */ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && @@ -1486,8 +1934,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) /* wait for the SLI port firmware ready after firmware reset */ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { msleep(10); - lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, - &portstat_reg.word0); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0)) + continue; if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) @@ -1792,6 +2241,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1) == 0) status = lpfc_reset_pci_bus(phba); + else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0) + lpfc_issue_hb_tmo(phba); else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0) status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk")); else @@ -2228,66 +2679,6 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, return strlen(buf); } -/** - * lpfc_fips_level_show - Return the current FIPS level for the HBA - * @dev: class unused variable. - * @attr: device attribute, not used. - * @buf: on return contains the module description text. - * - * Returns: size of formatted string. - **/ -static ssize_t -lpfc_fips_level_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - - return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level); -} - -/** - * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA - * @dev: class unused variable. - * @attr: device attribute, not used. - * @buf: on return contains the module description text. - * - * Returns: size of formatted string. - **/ -static ssize_t -lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - - return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); -} - -/** - * lpfc_dss_show - Return the current state of dss and the configured state - * @dev: class converted to a Scsi_host structure. - * @attr: device attribute, not used. - * @buf: on return contains the formatted text. - * - * Returns: size of formatted string. - **/ -static ssize_t -lpfc_dss_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - - return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n", - (phba->cfg_enable_dss) ? "Enabled" : "Disabled", - (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? - "" : "Not "); -} - /** * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions * @dev: class converted to a Scsi_host structure. @@ -2314,11 +2705,6 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } -static inline bool lpfc_rangecheck(uint val, uint min, uint max) -{ - return val >= min && val <= max; -} - /** * lpfc_enable_bbcr_set: Sets an attribute value. * @phba: pointer the the adapter structure. @@ -2431,8 +2817,9 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ - "0449 lpfc_"#attr" attribute cannot be set to %d, "\ - "allowed range is ["#minval", "#maxval"]\n", val); \ + "0449 "LPFC_DRIVER_NAME"_"#attr" attribute " \ + "cannot be set to %d, allowed range is [" \ + #minval", "#maxval"]\n", val); \ phba->cfg_##attr = default;\ return -EINVAL;\ } @@ -2463,14 +2850,15 @@ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ { \ if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ - "3052 lpfc_" #attr " changed from %d to %d\n", \ - phba->cfg_##attr, val); \ + "3052 "LPFC_DRIVER_NAME"_" #attr " changed from " \ + "%d to %d\n", phba->cfg_##attr, val); \ phba->cfg_##attr = val;\ return 0;\ }\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ - "0450 lpfc_"#attr" attribute cannot be set to %d, "\ - "allowed range is ["#minval", "#maxval"]\n", val); \ + "0450 "LPFC_DRIVER_NAME"_"#attr" attribute " \ + "cannot be set to %d, allowed range is [" \ + #minval", "#maxval"]\n", val); \ return -EINVAL;\ } @@ -2590,8 +2978,9 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ - "0423 lpfc_"#attr" attribute cannot be set to %d, "\ - "allowed range is ["#minval", "#maxval"]\n", val); \ + "0423 "LPFC_DRIVER_NAME"_"#attr" attribute " \ + "cannot be set to %d, allowed range is [" \ + #minval", "#maxval"]\n", val); \ vport->cfg_##attr = default;\ return -EINVAL;\ } @@ -2619,7 +3008,7 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ - "3053 lpfc_" #attr \ + "3053 "LPFC_DRIVER_NAME"_" #attr \ " changed from %d (x%x) to %d (x%x)\n", \ vport->cfg_##attr, vport->cfg_##attr, \ val, val); \ @@ -2627,8 +3016,9 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ return 0;\ }\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ - "0424 lpfc_"#attr" attribute cannot be set to %d, "\ - "allowed range is ["#minval", "#maxval"]\n", val); \ + "0424 "LPFC_DRIVER_NAME"_"#attr" attribute " \ + "cannot be set to %d, allowed range is [" \ + #minval", "#maxval"]\n", val); \ return -EINVAL;\ } @@ -2668,7 +3058,9 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ } +#ifdef BUILD_NVME static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); +#endif static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL); static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); @@ -2690,8 +3082,8 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO, lpfc_num_discovered_ports_show, NULL); static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); -static DEVICE_ATTR_RO(lpfc_drvr_version); -static DEVICE_ATTR_RO(lpfc_enable_fip); +static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); +static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL); static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); @@ -2702,16 +3094,15 @@ static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); -static DEVICE_ATTR_RO(lpfc_temp_sensor); -static DEVICE_ATTR_RO(lpfc_fips_level); -static DEVICE_ATTR_RO(lpfc_fips_rev); -static DEVICE_ATTR_RO(lpfc_dss); -static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn); +static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); +static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, + lpfc_sriov_hw_max_virtfn_show, NULL); static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); +static DEVICE_ATTR(cmf_info, S_IRUGO | S_IWUSR, lpfc_cmf_info_show, + lpfc_cmf_info_store); -static char *lpfc_soft_wwn_key = "C99G71SL8032A"; #define WWN_SZ 8 /** * lpfc_wwn_set - Convert string to the 8 byte WWN value. @@ -2755,228 +3146,6 @@ lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) } return 0; } -/** - * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid - * @dev: class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: containing the string lpfc_soft_wwn_key. - * @count: must be size of lpfc_soft_wwn_key. - * - * Returns: - * -EINVAL if the buffer does not contain lpfc_soft_wwn_key - * length of buf indicates success - **/ -static ssize_t -lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - unsigned int cnt = count; - uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; - u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0]; - - /* - * We're doing a simple sanity check for soft_wwpn setting. - * We require that the user write a specific key to enable - * the soft_wwpn attribute to be settable. Once the attribute - * is written, the enable key resets. If further updates are - * desired, the key must be written again to re-enable the - * attribute. - * - * The "key" is not secret - it is a hardcoded string shown - * here. The intent is to protect against the random user or - * application that is just writing attributes. - */ - if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0051 "LPFC_DRIVER_NAME" soft wwpn can not" - " be enabled: fawwpn is enabled\n"); - return -EINVAL; - } - - /* count may include a LF at end of string */ - if (buf[cnt-1] == '\n') - cnt--; - - if ((cnt != strlen(lpfc_soft_wwn_key)) || - (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0)) - return -EINVAL; - - phba->soft_wwn_enable = 1; - - dev_printk(KERN_WARNING, &phba->pcidev->dev, - "lpfc%d: soft_wwpn assignment has been enabled.\n", - phba->brd_no); - dev_printk(KERN_WARNING, &phba->pcidev->dev, - " The soft_wwpn feature is not supported by Broadcom."); - - return count; -} -static DEVICE_ATTR_WO(lpfc_soft_wwn_enable); - -/** - * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter - * @dev: class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: on return contains the wwpn in hexadecimal. - * - * Returns: size of formatted string. - **/ -static ssize_t -lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - - return scnprintf(buf, PAGE_SIZE, "0x%llx\n", - (unsigned long long)phba->cfg_soft_wwpn); -} - -/** - * lpfc_soft_wwpn_store - Set the ww port name of the adapter - * @dev class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: contains the wwpn in hexadecimal. - * @count: number of wwpn bytes in buf - * - * Returns: - * -EACCES hba reset not enabled, adapter over temp - * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid - * -EIO error taking adapter offline or online - * value of count on success - **/ -static ssize_t -lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; - struct completion online_compl; - int stat1 = 0, stat2 = 0; - unsigned int cnt = count; - u8 wwpn[WWN_SZ]; - int rc; - - if (!phba->cfg_enable_hba_reset) - return -EACCES; - spin_lock_irq(&phba->hbalock); - if (phba->over_temp_state == HBA_OVER_TEMP) { - spin_unlock_irq(&phba->hbalock); - return -EACCES; - } - spin_unlock_irq(&phba->hbalock); - /* count may include a LF at end of string */ - if (buf[cnt-1] == '\n') - cnt--; - - if (!phba->soft_wwn_enable) - return -EINVAL; - - /* lock setting wwpn, wwnn down */ - phba->soft_wwn_enable = 0; - - rc = lpfc_wwn_set(buf, cnt, wwpn); - if (rc) { - /* not able to set wwpn, unlock it */ - phba->soft_wwn_enable = 1; - return rc; - } - - phba->cfg_soft_wwpn = wwn_to_u64(wwpn); - fc_host_port_name(shost) = phba->cfg_soft_wwpn; - if (phba->cfg_soft_wwnn) - fc_host_node_name(shost) = phba->cfg_soft_wwnn; - - dev_printk(KERN_NOTICE, &phba->pcidev->dev, - "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); - - stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (stat1) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0463 lpfc_soft_wwpn attribute set failed to " - "reinit adapter - %d\n", stat1); - init_completion(&online_compl); - rc = lpfc_workq_post_event(phba, &stat2, &online_compl, - LPFC_EVT_ONLINE); - if (rc == 0) - return -ENOMEM; - - wait_for_completion(&online_compl); - if (stat2) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0464 lpfc_soft_wwpn attribute set failed to " - "reinit adapter - %d\n", stat2); - return (stat1 || stat2) ? -EIO : count; -} -static DEVICE_ATTR_RW(lpfc_soft_wwpn); - -/** - * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter - * @dev: class device that is converted into a Scsi_host. - * @attr: device attribute, not used. - * @buf: on return contains the wwnn in hexadecimal. - * - * Returns: size of formatted string. - **/ -static ssize_t -lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - return scnprintf(buf, PAGE_SIZE, "0x%llx\n", - (unsigned long long)phba->cfg_soft_wwnn); -} - -/** - * lpfc_soft_wwnn_store - sets the ww node name of the adapter - * @cdev: class device that is converted into a Scsi_host. - * @buf: contains the ww node name in hexadecimal. - * @count: number of wwnn bytes in buf. - * - * Returns: - * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid - * value of count on success - **/ -static ssize_t -lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - unsigned int cnt = count; - u8 wwnn[WWN_SZ]; - int rc; - - /* count may include a LF at end of string */ - if (buf[cnt-1] == '\n') - cnt--; - - if (!phba->soft_wwn_enable) - return -EINVAL; - - rc = lpfc_wwn_set(buf, cnt, wwnn); - if (rc) { - /* Allow wwnn to be set many times, as long as the enable - * is set. However, once the wwpn is set, everything locks. - */ - return rc; - } - - phba->cfg_soft_wwnn = wwn_to_u64(wwnn); - - dev_printk(KERN_NOTICE, &phba->pcidev->dev, - "lpfc%d: soft_wwnn set. Value will take effect upon " - "setting of the soft_wwpn\n", phba->brd_no); - - return count; -} -static DEVICE_ATTR_RW(lpfc_soft_wwnn); /** * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for @@ -3100,10 +3269,7 @@ lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, if (ret || (val > 0x7f)) return -EINVAL; - if (val) - phba->cfg_oas_priority = (uint8_t)val; - else - phba->cfg_oas_priority = phba->cfg_XLanePriority; + phba->cfg_oas_priority = (uint8_t)val; return count; } static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, @@ -3413,7 +3579,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, if (oas_lun != NOT_OAS_ENABLED_LUN) phba->cfg_oas_flags |= OAS_LUN_VALID; - len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); + len += scnprintf(buf + len, PAGE_SIZE - len, "0x%llx", oas_lun); return len; } @@ -3459,9 +3625,6 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, return -EINVAL; pri = phba->cfg_oas_priority; - if (pri == 0) - pri = phba->cfg_XLanePriority; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " "priority 0x%x with oas state %d\n", @@ -3480,12 +3643,14 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, lpfc_oas_lun_show, lpfc_oas_lun_store); +#ifdef BUILD_NVME int lpfc_enable_nvmet_cnt; unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444); MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target"); +#endif static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); @@ -3494,7 +3659,8 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" " 1 - poll with interrupts enabled" " 3 - poll and disable FCP ring interrupts"); -static DEVICE_ATTR_RW(lpfc_poll); +static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, + lpfc_poll_show, lpfc_poll_store); int lpfc_no_hba_reset_cnt; unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = { @@ -3514,6 +3680,11 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1, LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); +#ifndef NO_APEX +LPFC_ATTR_RW(enable_fcp_priority, 0, 0, 1, + "Enable/disable, (1,0) FCP Priority"); +#endif + /* # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures # 0x0 = disabled, XRI/OXID use not tracked. @@ -3533,6 +3704,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2, LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, LPFC_DELAY_INIT_LINK_INDEFINITELY, "Suppress Link Up at initialization"); + +static ssize_t +lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + phba->sli4_hba.pc_sli4_params.pls); +} +static DEVICE_ATTR(pls, S_IRUGO, + lpfc_pls_show, NULL); + +static ssize_t +lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); +} +static DEVICE_ATTR(pt, S_IRUGO, + lpfc_pt_show, NULL); + /* # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS # 1 - (1024) @@ -3580,9 +3776,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); -LPFC_ATTR_R(iocb_cnt, 2, 1, 5, - "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); - /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear # until the timer expires. Value range is [0,255]. Default value is 30. @@ -3634,9 +3827,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) vport->cfg_nodev_tmo = vport->cfg_devloss_tmo; if (val != LPFC_DEF_DEVLOSS_TMO) lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0407 Ignoring lpfc_nodev_tmo module " - "parameter because lpfc_devloss_tmo " - "is set.\n"); + "0407 Ignoring "LPFC_DRIVER_NAME + "_nodev_tmo module parameter because " + LPFC_DRIVER_NAME"devloss_tmo is set.\n"); return 0; } @@ -3646,8 +3839,8 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val) return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0400 lpfc_nodev_tmo attribute cannot be set to" - " %d, allowed range is [%d, %d]\n", + "0400 "LPFC_DRIVER_NAME"_nodev_tmo attribute cannot " + "be set to %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; return -EINVAL; @@ -3665,7 +3858,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) { struct Scsi_Host *shost; struct lpfc_nodelist *ndlp; -#if (IS_ENABLED(CONFIG_NVME_FC)) +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_nvme_rport *rport; struct nvme_fc_remote_port *remoteport = NULL; #endif @@ -3677,7 +3870,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) continue; if (ndlp->rport) ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; -#if (IS_ENABLED(CONFIG_NVME_FC)) +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_FC)) spin_lock(&vport->phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) @@ -3712,31 +3905,27 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) if (vport->dev_loss_tmo_changed || (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0401 Ignoring change to lpfc_nodev_tmo " - "because lpfc_devloss_tmo is set.\n"); + "0401 Ignoring change to "LPFC_DRIVER_NAME + "_nodev_tmo because devloss_tmo is set.\n"); return 0; } if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; - /* - * For compat: set the fc_host dev loss so new rports - * will get the value. - */ - fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0403 lpfc_nodev_tmo attribute cannot be set to " - "%d, allowed range is [%d, %d]\n", + "0403 "LPFC_DRIVER_NAME"_nodev_tmo attribute cannot " + "be set to %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(nodev_tmo) -static DEVICE_ATTR_RW(lpfc_nodev_tmo); +static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, + lpfc_nodev_tmo_show, lpfc_nodev_tmo_store); /* # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that @@ -3772,20 +3961,20 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; vport->dev_loss_tmo_changed = 1; - fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0404 lpfc_devloss_tmo attribute cannot be set to " - "%d, allowed range is [%d, %d]\n", + "0404 "LPFC_DRIVER_NAME"_devloss_tmo attribute " + "cannot be set to %d, allowed range is [%d, %d]\n", val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); return -EINVAL; } lpfc_vport_param_store(devloss_tmo) -static DEVICE_ATTR_RW(lpfc_devloss_tmo); +static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, + lpfc_devloss_tmo_show, lpfc_devloss_tmo_store); /* * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it @@ -3796,6 +3985,7 @@ static DEVICE_ATTR_RW(lpfc_devloss_tmo); LPFC_ATTR_R(suppress_rsp, 1, 0, 1, "Enable suppress rsp feature is firmware supports it"); +#ifdef BUILD_NVME /* * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs @@ -3815,7 +4005,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST, LPFC_NVMET_RQE_DEF_COUNT, "Specify number of RQ buffers to initially post"); +#endif +#ifdef BUILD_NVME /* * lpfc_enable_fc4_type: Defines what FC4 types are supported. * Supported Values: 1 - register just FCP @@ -3825,6 +4017,12 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, "Enable FC4 Protocol support - FCP / NVME"); +#else +LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, + LPFC_ENABLE_FCP, LPFC_ENABLE_FCP, + "Enable FC4 Protocol support - FCP"); + +#endif /* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being @@ -3845,19 +4043,84 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, /* # lun_queue_depth: This parameter is used to limit the number of outstanding # commands per FCP LUN. Value range is [1,512]. Default value is 30. -# If this parameter value is greater than 1/8th the maximum number of exchanges -# supported by the HBA port, then the lun queue depth will be reduced to -# 1/8th the maximum number of exchanges. */ -LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512, - "Max number of FCP commands we can queue to a specific LUN"); +static u16 lpfc_lun_queue_depth = LPFC_DEF_LUN_QDEPTH; +module_param(lpfc_lun_queue_depth, ushort, 0644); +MODULE_PARM_DESC(lpfc_lun_queue_depth, + "Max number of FCP commands we can queue to a specific LUN"); +lpfc_vport_param_show(lun_queue_depth); +lpfc_vport_param_init(lun_queue_depth, LPFC_DEF_LUN_QDEPTH, LPFC_MIN_LUN_QDEPTH, + LPFC_MAX_LUN_QDEPTH); + +static ssize_t +lpfc_lun_queue_depth_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct scsi_device *sdev; + struct lpfc_cgn_info *cp; + u16 new_queue_depth; + u32 crc; + int rc; + + rc = kstrtou16(buf, 0, &new_queue_depth); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "4385 lpfc_lun_queue_depth: failed to convert " + "\"%s\" to u16 (%d)\n", + buf, rc); + return -EINVAL; + } + + /* Check allowed range */ + if (new_queue_depth < LPFC_MIN_LUN_QDEPTH || + new_queue_depth > LPFC_MAX_LUN_QDEPTH) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "4386 lpfc_lun_queue_depth: %d outside of " + "allowed range [%u, %u]\n", + new_queue_depth, LPFC_MIN_LUN_QDEPTH, + LPFC_MAX_LUN_QDEPTH); + return -EINVAL; + } + + /* Check if previously set same value */ + if (new_queue_depth == vport->cfg_lun_queue_depth) + goto buffer_done; + + /* Store for future calls to slave_configure */ + vport->cfg_lun_queue_depth = new_queue_depth; + + /* Change all the current LUNs' queue depths */ + shost_for_each_device(sdev, shost) { + sdev->max_queue_depth = new_queue_depth; + scsi_change_queue_depth(sdev, new_queue_depth); + } + + if (phba->cgn_i) { + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + new_queue_depth = (uint16_t)(phba->pport->cfg_lun_queue_depth); + + /* The location of cgn_info (cp) is registered with + * firmware and the contents maintained in LE format. + */ + cp->cgn_lunq = cpu_to_le16(new_queue_depth); + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + } +buffer_done: + return strlen(buf); +} +static DEVICE_ATTR_RW(lpfc_lun_queue_depth); /* # tgt_queue_depth: This parameter is used to limit the number of outstanding # commands per target port. Value range is [10,65535]. Default value is 65535. */ static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH; -module_param(lpfc_tgt_queue_depth, uint, 0444); +module_param(lpfc_tgt_queue_depth, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth"); lpfc_vport_param_show(tgt_queue_depth); lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH, @@ -3960,9 +4223,9 @@ lpfc_restrict_login_init(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0422 lpfc_restrict_login attribute cannot " - "be set to %d, allowed range is [0, 1]\n", - val); + "0422 "LPFC_DRIVER_NAME"_restrict_login " + "attribute cannot be set to %d, allowed " + "range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } @@ -3995,16 +4258,16 @@ lpfc_restrict_login_set(struct lpfc_vport *vport, int val) { if (val < 0 || val > 1) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0425 lpfc_restrict_login attribute cannot " - "be set to %d, allowed range is [0, 1]\n", - val); + "0425 "LPFC_DRIVER_NAME"_restrict_login " + "attribute cannot be set to %d, allowed " + "range is [0, 1]\n", val); vport->cfg_restrict_login = 1; return -EINVAL; } if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0468 lpfc_restrict_login must be 0 for " - "Physical ports.\n"); + "0468 "LPFC_DRIVER_NAME"_restrict_login " + "must be 0 for Physical ports.\n"); vport->cfg_restrict_login = 0; return 0; } @@ -4012,7 +4275,8 @@ lpfc_restrict_login_set(struct lpfc_vport *vport, int val) return 0; } lpfc_vport_param_store(restrict_login); -static DEVICE_ATTR_RW(lpfc_restrict_login); +static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR, + lpfc_restrict_login_show, lpfc_restrict_login_store); /* # Some disk devices have a "select ID" or "select Target" capability. @@ -4096,8 +4360,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, val); return -EINVAL; } - if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + /* + * The 'topology' is not a configurable parameter if : + * - persistent topology enabled + * - G7/G6 with no private loop support + */ + + if ((phba->hba_flag & HBA_PERSISTENT_TOPO || + (!phba->sli4_hba.pc_sli4_params.pls && + (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || + phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3114 Loop mode not supported\n"); @@ -4127,7 +4399,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, } lpfc_param_show(topology) -static DEVICE_ATTR_RW(lpfc_topology); +static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, + lpfc_topology_show, lpfc_topology_store); /** * lpfc_static_vport_show: Read callback function for @@ -4157,7 +4430,8 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, /* * Sysfs attribute to control the statistical data collection. */ -static DEVICE_ATTR_RO(lpfc_static_vport); +static DEVICE_ATTR(lpfc_static_vport, S_IRUGO, + lpfc_static_vport_show, NULL); /** * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file @@ -4384,7 +4658,8 @@ lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr, /* * Sysfs attribute to control the statistical data collection. */ -static DEVICE_ATTR_RW(lpfc_stat_data_ctrl); +static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR, + lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store); /* * lpfc_drvr_stat_data: sysfs attr to get driver statistical data. @@ -4476,7 +4751,7 @@ sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj, static struct bin_attribute sysfs_drvr_stat_data_attr = { .attr = { - .name = "lpfc_drvr_stat_data", + .name = LPFC_DRIVER_NAME"_drvr_stat_data", .mode = S_IRUSR, }, .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, @@ -4619,7 +4894,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val) { if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3111 lpfc_link_speed of %d cannot " + "3111 "LPFC_DRIVER_NAME"_link_speed of %d cannot " "support loop mode, setting topology to default.\n", val); phba->cfg_topology = 0; @@ -4646,7 +4921,8 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val) } } -static DEVICE_ATTR_RW(lpfc_link_speed); +static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, + lpfc_link_speed_show, lpfc_link_speed_store); /* # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) @@ -4739,7 +5015,8 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, return rc; } -static DEVICE_ATTR_RW(lpfc_aer_support); +static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR, + lpfc_aer_support_show, lpfc_aer_support_store); /** * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device @@ -4751,8 +5028,8 @@ static DEVICE_ATTR_RW(lpfc_aer_support); * Description: * If the @buf contains 1 and the device currently has the AER support * enabled, then invokes the kernel AER helper routine - * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable - * error status register. + * pci_aer_clear_nonfatal_status to clean up the uncorrectable error + * status register. * * Notes: * @@ -4777,7 +5054,11 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, return -EINVAL; if (phba->hba_flag & HBA_AER_ENABLED) +#if defined(BUILD_SLES15SP2) || defined(BUILD_RHEL83) + rc = pci_aer_clear_nonfatal_status(phba->pcidev); +#else rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev); +#endif if (rc == 0) return strlen(buf); @@ -4886,7 +5167,8 @@ LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN, "Enable PCIe device SR-IOV virtual fn"); lpfc_param_show(sriov_nr_virtfn) -static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn); +static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR, + lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store); /** * lpfc_request_firmware_store - Request for Linux generic firmware upgrade @@ -4996,8 +5278,8 @@ lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr, * * Value range is any ascii value */ -static int lpfc_force_rscn; -module_param(lpfc_force_rscn, int, 0644); +static int lpfc_force_rscn = 0; +module_param(lpfc_force_rscn, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(lpfc_force_rscn, "Force an RSCN to be sent to all remote NPorts"); lpfc_param_show(force_rscn) @@ -5015,6 +5297,7 @@ lpfc_force_rscn_init(struct lpfc_hba *phba, int val) { return 0; } + static DEVICE_ATTR_RW(lpfc_force_rscn); /** @@ -5128,14 +5411,25 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3016 lpfc_fcp_imax: %d out of range, using default\n", - val); + "3016 "LPFC_DRIVER_NAME"_fcp_imax: " + "%d out of range, using default\n", val); phba->cfg_fcp_imax = LPFC_DEF_IMAX; return 0; } -static DEVICE_ATTR_RW(lpfc_fcp_imax); +static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, + lpfc_fcp_imax_show, lpfc_fcp_imax_store); + +/* + * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a + * single handler call which should request a polled completion rather + * than re-enabling interrupts. + */ +LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, + LPFC_CQ_MIN_THRESHOLD_TO_POLL, + LPFC_CQ_MAX_THRESHOLD_TO_POLL, + "CQE Processing Threshold to enable Polling"); /** * lpfc_cq_max_proc_limit_store @@ -5205,16 +5499,6 @@ MODULE_PARM_DESC(lpfc_cq_max_proc_limit, "CQ processing"); lpfc_param_show(cq_max_proc_limit) -/* - * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a - * single handler call which should request a polled completion rather - * than re-enabling interrupts. - */ -LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, - LPFC_CQ_MIN_THRESHOLD_TO_POLL, - LPFC_CQ_MAX_THRESHOLD_TO_POLL, - "CQE Processing Threshold to enable Polling"); - /** * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit * @phba: lpfc_hba pointer. @@ -5250,7 +5534,8 @@ lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val) return 0; } -static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit); +static DEVICE_ATTR(lpfc_cq_max_proc_limit, S_IRUGO | S_IWUSR, + lpfc_cq_max_proc_limit_show, lpfc_cq_max_proc_limit_store); /** * lpfc_state_show - Display current driver CPU affinity @@ -5276,12 +5561,12 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, switch (phba->cfg_fcp_cpu_map) { case 0: - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "fcp_cpu_map: No mapping (%d)\n", phba->cfg_fcp_cpu_map); return len; case 1: - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "fcp_cpu_map: HBA centric mapping (%d): " "%d of %d CPUs online from %d possible CPUs\n", phba->cfg_fcp_cpu_map, num_online_cpus(), @@ -5290,15 +5575,15 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, break; } - while (phba->sli4_hba.curr_disp_cpu < - phba->sli4_hba.num_possible_cpu) { + while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_possible_cpu) { cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; + /* margin should fit in this and the truncated message */ if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) len += scnprintf(buf + len, PAGE_SIZE - len, "CPU %02d not present\n", phba->sli4_hba.curr_disp_cpu); - else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { + else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) len += scnprintf( buf + len, PAGE_SIZE - len, @@ -5311,10 +5596,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, else len += scnprintf( buf + len, PAGE_SIZE - len, - "CPU %02d EQ %04d hdwq %04d " + "CPU %02d EQ None hdwq %04d " "physid %d coreid %d ht %d ua %d\n", phba->sli4_hba.curr_disp_cpu, - cpup->eq, cpup->hdwq, cpup->phys_id, + cpup->hdwq, cpup->phys_id, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); @@ -5329,7 +5614,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN), - cpup->irq); + lpfc_get_irq(cpup->eq)); else len += scnprintf( buf + len, PAGE_SIZE - len, @@ -5340,7 +5625,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN), - cpup->irq); + lpfc_get_irq(cpup->eq)); } phba->sli4_hba.curr_disp_cpu++; @@ -5421,14 +5706,15 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3326 lpfc_fcp_cpu_map: %d out of range, using " - "default\n", val); + "3326 "LPFC_DRIVER_NAME"_fcp_cpu_map: %d out " + "of range, using default\n", val); phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; return 0; } -static DEVICE_ATTR_RW(lpfc_fcp_cpu_map); +static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR, + lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store); /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. @@ -5452,6 +5738,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, "First burst size for Targets that support first burst"); +#ifdef BUILD_NVME /* * lpfc_nvmet_fb_size: NVME Target mode supported first burst size. * When the driver is configured as an NVME target, this value is @@ -5474,6 +5761,7 @@ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, */ LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, "Enable First Burst feature for NVME Initiator."); +#endif /* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue @@ -5511,7 +5799,9 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) return 0; } lpfc_vport_param_store(max_scsicmpl_time); -static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time); +static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR, + lpfc_max_scsicmpl_time_show, + lpfc_max_scsicmpl_time_store); /* # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value @@ -5628,6 +5918,18 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); */ LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); +/* + * lpfc_enable_e2e: Enable E2E for ESM support + * 0 = disabled + * 1 = enabled (default) + * This will effect whether the driver registers a vendor specific + * E2EM port parameter with the Fabric Mgmt server. + * Value range is [0,1]. + */ +LPFC_ATTR_R(enable_e2e, 1, 0, 1, + "E2E: 1=enable 0=disable. Change only under direction of " + "Emulex Tech. Support"); + /* # Specifies the maximum number of ELS cmds we can have outstanding (for # discovery). Value range is [1,64]. Default value = 32. @@ -5683,6 +5985,7 @@ LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); +#ifdef BUILD_NVME /* * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs * @@ -5705,13 +6008,14 @@ LPFC_ATTR_RW(nvme_oas, 0, 0, 1, */ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, "Embed NVME Command in WQE"); +#endif /* * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues * the driver will advertise it supports to the SCSI layer. * * 0 = Set nr_hw_queues by the number of CPUs or HW queues. - * 1,128 = Manually specify the maximum nr_hw_queue value to be set, + * 1,256 = Manually specify the maximum nr_hw_queue value to be set, * * Value range is [0,256]. Default value is 8. */ @@ -5729,30 +6033,192 @@ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. * * 0 = Configure the number of hdw queues to the number of active CPUs. - * 1,128 = Manually specify how many hdw queues to use. + * 1,256 = Manually specify how many hdw queues to use. * - * Value range is [0,128]. Default value is 0. + * Value range is [0,256]. Default value is 0. */ LPFC_ATTR_R(hdw_queue, LPFC_HBA_HDWQ_DEF, LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, "Set the number of I/O Hardware Queues"); +#if IS_ENABLED(CONFIG_X86) +/** + * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on + * irq_chann_mode + * @phba: Pointer to HBA context object. + **/ +static void +lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba) +{ + unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE; + const struct cpumask *sibling_mask; + struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask; + + cpumask_clear(aff_mask); + + if (phba->irq_chann_mode == NUMA_MODE) { + /* Check if we're a NUMA architecture */ + numa_node = dev_to_node(&phba->pcidev->dev); + if (numa_node == NUMA_NO_NODE) { + phba->irq_chann_mode = NORMAL_MODE; + return; + } + } + + for_each_possible_cpu(cpu) { + switch (phba->irq_chann_mode) { + case NUMA_MODE: + if (cpu_to_node(cpu) == numa_node) + cpumask_set_cpu(cpu, aff_mask); + break; + case NHT_MODE: + sibling_mask = topology_sibling_cpumask(cpu); + first_cpu = cpumask_first(sibling_mask); + if (first_cpu < nr_cpu_ids) + cpumask_set_cpu(first_cpu, aff_mask); + break; + default: + break; + } + } +} +#endif + +static void +lpfc_assign_default_irq_chann(struct lpfc_hba *phba) +{ +#if IS_ENABLED(CONFIG_X86) + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + /* If AMD architecture, then default is NUMA_MODE */ + phba->irq_chann_mode = NUMA_MODE; + break; + case X86_VENDOR_INTEL: + /* If Intel architecture, then default is no hyperthread mode */ + phba->irq_chann_mode = NHT_MODE; + break; + default: + phba->irq_chann_mode = NORMAL_MODE; + break; + } + lpfc_cpumask_irq_mode_init(phba); +#else + phba->irq_chann_mode = NORMAL_MODE; +#endif +} + /* * lpfc_irq_chann: Set the number of IRQ vectors that are available * for Hardware Queues to utilize. This also will map to the number * of EQ / MSI-X vectors the driver will create. This should never be * more than the number of Hardware Queues * - * 0 = Configure number of IRQ Channels to the number of active CPUs. - * 1,128 = Manually specify how many IRQ Channels to use. + * 0 = Configure number of IRQ Channels to: + * if AMD architecture, number of CPUs on HBA's NUMA node + * if Intel architecture, number of physical CPUs. + * otherwise, number of active CPUs. + * [1,256] = Manually specify how many IRQ Channels to use. * - * Value range is [0,128]. Default value is 0. + * Value range is [0,256]. Default value is [0]. */ -LPFC_ATTR_R(irq_chann, - LPFC_HBA_HDWQ_DEF, - LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, - "Set the number of I/O IRQ Channels"); +static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF; +module_param(lpfc_irq_chann, uint, 0444); +MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate"); + +/* lpfc_irq_chann_init - Set the hba irq_chann initial value + * @phba: lpfc_hba pointer. + * @val: contains the initial value + * + * Description: + * Validates the initial value is within range and assigns it to the + * adapter. If not in range, an error message is posted and the + * default value is assigned. + * + * Returns: + * zero if value is in range and is set + * -EINVAL if value was out of range + **/ +static int +lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) +{ + const struct cpumask *aff_mask; + + if (phba->cfg_use_msi != 2) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8532 use_msi = %u ignoring cfg_irq_numa\n", + phba->cfg_use_msi); + phba->irq_chann_mode = NORMAL_MODE; + phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; + return 0; + } + + /* Check if default setting was passed */ + if (val == LPFC_IRQ_CHANN_DEF) + lpfc_assign_default_irq_chann(phba); + + if (phba->irq_chann_mode != NORMAL_MODE) { + aff_mask = &phba->sli4_hba.irq_aff_mask; + + if (cpumask_empty(aff_mask)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8533 Could not identify CPUS for " + "mode %d, ignoring\n", + phba->irq_chann_mode); + phba->irq_chann_mode = NORMAL_MODE; + phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; + } else { + phba->cfg_irq_chann = cpumask_weight(aff_mask); + + /* If no hyperthread mode, then set hdwq count to + * aff_mask weight as well + */ + if (phba->irq_chann_mode == NHT_MODE) + phba->cfg_hdw_queue = phba->cfg_irq_chann; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8543 lpfc_irq_chann set to %u " + "(mode: %d)\n", phba->cfg_irq_chann, + phba->irq_chann_mode); + } + } else { + if (val > LPFC_IRQ_CHANN_MAX) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8545 lpfc_irq_chann attribute cannot " + "be set to %u, allowed range is " + "[%u,%u]\n", + val, + LPFC_IRQ_CHANN_MIN, + LPFC_IRQ_CHANN_MAX); + phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF; + return -EINVAL; + } + phba->cfg_irq_chann = val; + } + + return 0; +} + +/** + * lpfc_irq_chann_show - Display value of irq_chann + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains a string with the list sizes + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann); +} + +static DEVICE_ATTR_RO(lpfc_irq_chann); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. @@ -5786,6 +6252,30 @@ LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); */ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); + +/* + * For T10 DIF / protection data support, the driver supports 4 modes + * of operation. + * + * Mode 1: (lpfc_enable_bg=1 lpfc_external_dif=1) + * All normal T10 DIF devices are supported. + * External DIF devices are supported. + * + * Mode 2: (lpfc_enable_bg=0 lpfc_external_dif=1) + * If you don't want to have the extra overhead of the upper SCSI Layer + * supporting T10-DIF, but you still want to support External DIF devices. + * Normal T10 DIF devices are NOT supported. + * External DIF devices are supported. + * + * Mode 3: (lpfc_enable_bg=1 lpfc_external_dif=0) + * All normal T10 DIF devices are supported. + * External DIF devices are NOT supported. + * + * Mode 4: (lpfc_enable_bg=0 lpfc_external_dif=1) + * No normal T10-DIF and no external DIF devices supported, + * This would be the driver default values for these module parameters. + */ + /* # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) # 0 = BlockGuard disabled (default) @@ -5794,11 +6284,21 @@ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); +/* +# lpfc_external_dif: Enable External DIF support on select HBAs +# Only supported on SLI4 FC - if_type 2 HBAs +# 0 = External DIF disabled (default) +# 1 = External DIF enabled +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(external_dif, 1, 0, 1, + "External T10-DIF Support, on select HBAs"); + /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the # SCSI mid-layer -# - Only meaningful if BG is turned on (lpfc_enable_bg=1). +# - Only meaningful if BG is turned on, lpfc_enable_bg = 1. # - Allows you to ultimately specify which profiles to use # - Default will result in registering capabilities for all profiles. # - SHOST_DIF_TYPE1_PROTECTION 1 @@ -5824,6 +6324,7 @@ LPFC_ATTR(prot_mask, # - Bit mask of protection guard types to register with the SCSI mid-layer # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum # - Allows you to ultimately specify which profiles to use +# - Only meaningful if BG is turned on, lpfc_enable_bg = 1. # - Default will result in registering capabilities for all guard types # */ @@ -5888,7 +6389,7 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr, return len; } -static DEVICE_ATTR_RO(lpfc_sg_seg_cnt); +static DEVICE_ATTR(lpfc_sg_seg_cnt, S_IRUGO, lpfc_sg_seg_cnt_show, NULL); /** * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value @@ -5919,6 +6420,20 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val) return -EINVAL; } +#define THROTTLE_CNT_MIN 1 +#define THROTTLE_CNT_MAX 1000 +#define THROTTLE_CNT_DEF 10 +LPFC_ATTR_RW(throttle_log_cnt, THROTTLE_CNT_DEF, THROTTLE_CNT_MIN, + THROTTLE_CNT_MAX, "Do not exceed this number of messages " + "logged within throttle_log_time"); + +#define THROTTLE_TIME_MIN 1 +#define THROTTLE_TIME_MAX 60 +#define THROTTLE_TIME_DEF 1 +LPFC_ATTR_RW(throttle_log_time, THROTTLE_TIME_DEF, THROTTLE_TIME_MIN, + THROTTLE_TIME_MAX, "Do not exceed throttle_log_cnt within " + "this limit (seconds)"); + /* * lpfc_enable_mds_diags: Enable MDS Diagnostics * 0 = MDS Diagnostics disabled (default) @@ -5933,7 +6448,53 @@ LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); * [1-4] = Multiple of 1/4th Mb of host memory for FW logging * Value range [0..4]. Default value is 0 */ -LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); +LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); +lpfc_param_show(ras_fwlog_buffsize); + +static ssize_t +lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) +{ + int ret = 0; + enum ras_state state; + + if (!lpfc_rangecheck(val, 0, 4)) + return -EINVAL; + + if (phba->cfg_ras_fwlog_buffsize == val) + return 0; + + if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) + return -EINVAL; + + spin_lock_irq(&phba->hbalock); + state = phba->ras_fwlog.state; + spin_unlock_irq(&phba->hbalock); + + if (state == REG_INPROGRESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " + "registration is in progress\n"); + return -EBUSY; + } + + /* For disable logging: stop the logs and free the DMA. + * For ras_fwlog_buffsize size change we still need to free and + * reallocate the DMA in lpfc_sli4_ras_fwlog_init. + */ + phba->cfg_ras_fwlog_buffsize = val; + if (state == ACTIVE) { + lpfc_ras_stop_fwlog(phba); + lpfc_sli4_ras_dma_free(phba); + } + + lpfc_sli4_ras_init(phba); + if (phba->ras_fwlog.ras_enabled) + ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, + LPFC_RAS_ENABLE_LOGGING); + return ret; +} + +lpfc_param_store(ras_fwlog_buffsize); +static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize); /* * lpfc_ras_fwlog_level: Firmware logging verbosity level @@ -5960,16 +6521,362 @@ LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function"); */ LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery"); +/* Hidden module parameter */ +int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ +module_param(lpfc_fabric_cgn_frequency, int, 0444); +MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling primitive freq"); + +/* Hidden module parameter */ +int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ +module_param(lpfc_acqe_cgn_frequency, int, 0444); +MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); + +/* Hidden module parameter */ +int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ +module_param(lpfc_use_cgn_signal, int, 0444); +MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available"); + +#ifndef BUILD_BRCMFCOE /* * lpfc_enable_dpp: Enable DPP on G7 * 0 = DPP on G7 disabled * 1 = DPP on G7 enabled (default) - * Value range is [0,1]. Default value is 1. + * Value range is [0,1]. Default value is 1 on X86, 0 on other architectures. */ +#ifdef CONFIG_X86 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); +#else +LPFC_ATTR_RW(enable_dpp, 0, 0, 1, "Enable Direct Packet Push"); +#endif + +/* + * lpfc_enable_scsi_mq: Enable SCSI MQ support in lpfc + * 0 = disabled + * 1 = enabled (default) + * Value range is [0,1]. Default value is 1 + */ +LPFC_ATTR_R(enable_scsi_mq, 1, 0, 1, "Enable SCSI MQ support in LPFC"); + +/* + * lpfc_enable_auth: Enable DH-CHAP Authentication + * 0 = Authentication is disabled (default) + * 1 = Authentication is enabled + * Value range is [0,1]. Default value is 0. + */ +static uint lpfc_enable_auth = LPFC_AUTH_DISABLE; +module_param(lpfc_enable_auth, uint, S_IRUGO); +MODULE_PARM_DESC(lpfc_enable_auth, "Enable DH-CHAP Authentication"); +lpfc_param_show(enable_auth); +lpfc_param_init(enable_auth, LPFC_AUTH_DISABLE, LPFC_AUTH_DISABLE, + LPFC_AUTH_ENABLE); + +/** + * lpfc_enable_auth_store: Sets an attribute value. + * @phba: pointer the the adapter structure. + * @val: integer attribute value. + * + * Description: Sets the parameter to the new value. If the transition + * is to disable authentication, there is some cleanup done. + * + * Returns: + * zero on success + * -EINVAL if val is invalid + */ +static int +lpfc_enable_auth_set(struct lpfc_hba *phba, uint val) +{ + struct lpfc_vport *vport = phba->pport; + struct lpfc_nodelist *ndlp; + + if (!lpfc_rangecheck(val, LPFC_AUTH_DISABLE, LPFC_AUTH_ENABLE) || + phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) + return -EINVAL; + + if (val == phba->cfg_enable_auth) + return 0; + if (val == LPFC_AUTH_DISABLE) { + phba->cfg_enable_auth = val; + return 0; + } else if (val == LPFC_AUTH_ENABLE) { + phba->cfg_enable_auth = val; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) + ndlp->dhc_cfg.state = LPFC_AUTH_UNKNOWN; + + if (!phba->fc_fabparam.cmn.fcsp) + lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); + + return 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3033 lpfc_enable_auth attribute cannot be set to " + "%d, allowed range is [0, 1]\n", val); + return -EINVAL; +} + +lpfc_param_store(enable_auth); +static DEVICE_ATTR(lpfc_enable_auth, S_IRUGO | S_IWUSR, + lpfc_enable_auth_show, lpfc_enable_auth_store); + +static ssize_t +lpfc_authenticate_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + struct lpfc_name wwpn; + int status; + unsigned int cnt = count; + + if (!phba->cfg_enable_auth) + return -EACCES; + + if (lpfc_wwn_set(buf, cnt, wwpn.u.wwn)) + return -EINVAL; + + if (vport->port_state == LPFC_VPORT_FAILED) + return -ENXIO; + + if ((vport->fc_flag & FC_OFFLINE_MODE) || + (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) || + (!phba->cfg_enable_auth)) + return -EPERM; + + if (wwn_to_u64(wwpn.u.wwn) == AUTH_FABRIC_WWN) + ndlp = lpfc_findnode_did(vport, Fabric_DID); + else + ndlp = lpfc_findnode_wwpn(vport, &wwpn); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return -EPERM; + + /* If vport already in the middle of authentication do not restart */ + if ((ndlp->dhc_cfg.msg == LPFC_AUTH_NEGOTIATE) || + (ndlp->dhc_cfg.msg == LPFC_DHCHAP_CHALLENGE) || + (ndlp->dhc_cfg.msg == LPFC_DHCHAP_REPLY)) + return -EAGAIN; + + status = lpfc_auth_start(vport, ndlp); + if (status) + return status; + return strlen(buf); +} +static DEVICE_ATTR(lpfc_authenticate, S_IWUSR, + NULL, lpfc_authenticate_store); + +static ssize_t +lpfc_auth_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_nodelist *ndlp; + + /* for now assume we're only authenticating the fabric */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + + switch (ndlp->dhc_cfg.state) { + case LPFC_AUTH_UNKNOWN: + if (ndlp->dhc_cfg.msg == LPFC_AUTH_NEGOTIATE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_CHALLENGE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_REPLY || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_SUCCESS_REPLY) + return scnprintf(buf, PAGE_SIZE, "Authenticating\n"); + else + return scnprintf(buf, PAGE_SIZE, "Not Authenticated\n"); + case LPFC_AUTH_FAIL: + return scnprintf(buf, PAGE_SIZE, "Failed\n"); + case LPFC_AUTH_SUCCESS: + if (ndlp->dhc_cfg.msg == LPFC_AUTH_NEGOTIATE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_CHALLENGE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_REPLY || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_SUCCESS_REPLY) + return scnprintf(buf, PAGE_SIZE, "Authenticating\n"); + else if (ndlp->dhc_cfg.msg == LPFC_DHCHAP_SUCCESS) + return scnprintf(buf, PAGE_SIZE, "Authenticated\n"); + } + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_auth_dir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + /* for now assume we're only authenticating the fabric */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + + if (!phba->cfg_enable_auth || + ndlp->dhc_cfg.state != LPFC_AUTH_SUCCESS) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + if (ndlp->dhc_cfg.direction == AUTH_DIRECTION_LOCAL) + return scnprintf(buf, PAGE_SIZE, + "Local Authentication complete\n"); + else if (ndlp->dhc_cfg.direction == AUTH_DIRECTION_REMOTE) + return scnprintf(buf, PAGE_SIZE, + "Remote Authentication complete\n"); + else if (ndlp->dhc_cfg.direction == AUTH_DIRECTION_BIDI) + return scnprintf(buf, PAGE_SIZE, + "Bidirectional Authentication complete\n"); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_auth_protocol_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + + /* for now assume we're only authenticating the fabric */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + + if (phba->cfg_enable_auth && + ndlp->dhc_cfg.state == LPFC_AUTH_SUCCESS) + return scnprintf(buf, PAGE_SIZE, "1 (DH-CHAP)\n"); + else + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_auth_dhgroup_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + + /* for now assume we're only authenticating the fabric */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + + if (!phba->cfg_enable_auth || + ndlp->dhc_cfg.state != LPFC_AUTH_SUCCESS) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + + switch (ndlp->dhc_cfg.dh_grp_id) { + case DH_GROUP_NULL: + return scnprintf(buf, PAGE_SIZE, "0 (NULL)\n"); + case DH_GROUP_1024: + return scnprintf(buf, PAGE_SIZE, "1 (1024)\n"); + case DH_GROUP_1280: + return scnprintf(buf, PAGE_SIZE, "2 (1280)\n"); + case DH_GROUP_1536: + return scnprintf(buf, PAGE_SIZE, "3 (1536)\n"); + case DH_GROUP_2048: + return scnprintf(buf, PAGE_SIZE, "4 (2048)\n"); + } + return scnprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n", + ndlp->dhc_cfg.dh_grp_id); +} + +static ssize_t +lpfc_auth_hash_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + + /* for now assume we're only authenticating the fabric */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + + if (!phba->cfg_enable_auth || + ndlp->dhc_cfg.state != LPFC_AUTH_SUCCESS) + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); + + switch (ndlp->dhc_cfg.hash_id) { + case HASH_MD5: + return scnprintf(buf, PAGE_SIZE, "5 (MD5)\n"); + case HASH_SHA1: + return scnprintf(buf, PAGE_SIZE, "6 (SHA1)\n"); + } + return scnprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n", + ndlp->dhc_cfg.hash_id); +} + +static ssize_t +lpfc_auth_last_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + unsigned long last_time; + + /* for now assume we're only authenticating the fabric */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return scnprintf(buf, PAGE_SIZE, "%d\n", -1); + + if (!phba->cfg_enable_auth || ndlp->dhc_cfg.last_auth == 0) + return scnprintf(buf, PAGE_SIZE, "%d\n", -1); + + last_time = jiffies_to_msecs((long)jiffies - + (long)ndlp->dhc_cfg.last_auth) / 1000; + return scnprintf(buf, PAGE_SIZE, "%ld\n", last_time); +} + +static ssize_t +lpfc_auth_next_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + unsigned long next_time; + + /* for now assume we're only authenticating the fabric */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return scnprintf(buf, PAGE_SIZE, "%d\n", -1); + + if (!phba->cfg_enable_auth || + ndlp->dhc_cfg.last_auth == 0 || + ndlp->dhc_cfg.reauth_interval == 0) + return scnprintf(buf, PAGE_SIZE, "%d\n", -1); + + next_time = ndlp->dhc_cfg.last_auth + + msecs_to_jiffies(ndlp->dhc_cfg.reauth_interval * 60000); + if (time_after(jiffies, next_time)) + return scnprintf(buf, PAGE_SIZE, "%d\n", -1); + + next_time = jiffies_to_msecs((long)next_time - (long)jiffies) / 1000; + return scnprintf(buf, PAGE_SIZE, "%ld\n", next_time); +} +static DEVICE_ATTR(auth_state, S_IRUGO, lpfc_auth_state_show, NULL); +static DEVICE_ATTR(auth_dir, S_IRUGO, lpfc_auth_dir_show, NULL); +static DEVICE_ATTR(auth_protocol, S_IRUGO, lpfc_auth_protocol_show, NULL); +static DEVICE_ATTR(auth_dhgroup, S_IRUGO, lpfc_auth_dhgroup_show, NULL); +static DEVICE_ATTR(auth_hash, S_IRUGO, lpfc_auth_hash_show, NULL); +static DEVICE_ATTR(auth_last, S_IRUGO, lpfc_auth_last_show, NULL); +static DEVICE_ATTR(auth_next, S_IRUGO, lpfc_auth_next_show, NULL); + +#endif struct device_attribute *lpfc_hba_attrs[] = { +#ifdef BUILD_NVME &dev_attr_nvme_info, +#endif &dev_attr_scsi_stat, &dev_attr_bg_info, &dev_attr_bg_guard_err, @@ -6015,10 +6922,14 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_multi_ring_rctl, &dev_attr_lpfc_multi_ring_type, &dev_attr_lpfc_fdmi_on, + &dev_attr_lpfc_enable_e2e, &dev_attr_lpfc_enable_SmartSAN, &dev_attr_lpfc_max_luns, &dev_attr_lpfc_enable_npiv, &dev_attr_lpfc_fcf_failover_policy, +#ifndef NO_APEX + &dev_attr_lpfc_enable_fcp_priority, +#endif &dev_attr_lpfc_enable_rrq, &dev_attr_nport_evt_cnt, &dev_attr_board_mode, @@ -6034,25 +6945,27 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_task_mgmt_tmo, &dev_attr_lpfc_use_msi, +#ifdef BUILD_NVME &dev_attr_lpfc_nvme_oas, &dev_attr_lpfc_nvme_embed_cmd, +#endif &dev_attr_lpfc_fcp_imax, - &dev_attr_lpfc_force_rscn, &dev_attr_lpfc_cq_poll_threshold, &dev_attr_lpfc_cq_max_proc_limit, + &dev_attr_lpfc_force_rscn, &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_mq_threshold, &dev_attr_lpfc_hdw_queue, &dev_attr_lpfc_irq_chann, &dev_attr_lpfc_suppress_rsp, +#ifdef BUILD_NVME &dev_attr_lpfc_nvmet_mrq, &dev_attr_lpfc_nvmet_mrq_post, &dev_attr_lpfc_nvme_enable_fb, &dev_attr_lpfc_nvmet_fb_size, +#endif &dev_attr_lpfc_enable_bg, - &dev_attr_lpfc_soft_wwnn, - &dev_attr_lpfc_soft_wwpn, - &dev_attr_lpfc_soft_wwn_enable, + &dev_attr_lpfc_external_dif, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, &dev_attr_lpfc_EnableXLane, @@ -6071,22 +6984,36 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_sriov_nr_virtfn, &dev_attr_lpfc_req_fw_upgrade, &dev_attr_lpfc_suppress_link_up, - &dev_attr_lpfc_iocb_cnt, &dev_attr_iocb_hw, + &dev_attr_pls, + &dev_attr_pt, &dev_attr_txq_hw, &dev_attr_txcmplq_hw, - &dev_attr_lpfc_fips_level, - &dev_attr_lpfc_fips_rev, - &dev_attr_lpfc_dss, &dev_attr_lpfc_sriov_hw_max_virtfn, &dev_attr_protocol, &dev_attr_lpfc_xlane_supported, + &dev_attr_lpfc_throttle_log_cnt, + &dev_attr_lpfc_throttle_log_time, &dev_attr_lpfc_enable_mds_diags, &dev_attr_lpfc_ras_fwlog_buffsize, &dev_attr_lpfc_ras_fwlog_level, &dev_attr_lpfc_ras_fwlog_func, &dev_attr_lpfc_enable_bbcr, +#ifndef BUILD_BRCMFCOE &dev_attr_lpfc_enable_dpp, + &dev_attr_lpfc_enable_scsi_mq, + &dev_attr_lpfc_enable_auth, + &dev_attr_lpfc_authenticate, + + &dev_attr_auth_state, + &dev_attr_auth_dir, + &dev_attr_auth_protocol, + &dev_attr_auth_dhgroup, + &dev_attr_auth_hash, + &dev_attr_auth_last, + &dev_attr_auth_next, +#endif + &dev_attr_cmf_info, NULL, }; @@ -6113,8 +7040,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_static_vport, - &dev_attr_lpfc_fips_level, - &dev_attr_lpfc_fips_rev, + &dev_attr_cmf_info, NULL, }; @@ -6510,11 +7436,17 @@ lpfc_get_host_speed(struct Scsi_Host *shost) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } - } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { + } else if ((lpfc_is_link_up(phba)) && (phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_1GBPS: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; case LPFC_ASYNC_LINK_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case LPFC_ASYNC_LINK_SPEED_20GBPS: + fc_host_speed(shost) = FC_PORTSPEED_20GBIT; + break; case LPFC_ASYNC_LINK_SPEED_25GBPS: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; @@ -6583,6 +7515,7 @@ lpfc_get_stats(struct Scsi_Host *shost) struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; + unsigned long seconds; int rc = 0; /* @@ -6683,7 +7616,12 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->dumped_frames = -1; - hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start; + seconds = get_seconds(); + if (seconds < psli->stats_start) + hs->seconds_since_last_reset = seconds + + ((unsigned long)-1 - psli->stats_start); + else + hs->seconds_since_last_reset = seconds - psli->stats_start; mempool_free(pmboxq, phba->mbox_mem_pool); @@ -6762,7 +7700,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) else lso->link_events = (phba->fc_eventTag >> 1); - psli->stats_start = ktime_get_seconds(); + psli->stats_start = get_seconds(); mempool_free(pmboxq, phba->mbox_mem_pool); @@ -6857,9 +7795,9 @@ lpfc_get_starget_port_name(struct scsi_target *starget) static void lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { +#if (IS_ENABLED(CONFIG_NVME_FC)) && defined(BUILD_NVME) struct lpfc_rport_data *rdata = rport->dd_data; struct lpfc_nodelist *ndlp = rdata->pnode; -#if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_nvme_rport *nrport = NULL; #endif @@ -6868,6 +7806,7 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) else rport->dev_loss_tmo = 1; +#if (IS_ENABLED(CONFIG_NVME_FC)) && defined(BUILD_NVME) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { dev_info(&rport->dev, "Cannot find remote node to " "set rport dev loss tmo, port_id x%x\n", @@ -6875,7 +7814,6 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) return; } -#if (IS_ENABLED(CONFIG_NVME_FC)) nrport = lpfc_ndlp_get_nrport(ndlp); if (nrport && nrport->remoteport) @@ -7099,6 +8037,9 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba) void lpfc_get_cfgparam(struct lpfc_hba *phba) { + uint32_t if_type; + + lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); lpfc_ns_query_init(phba, lpfc_ns_query); lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); @@ -7116,22 +8057,26 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); +#ifndef NO_APEX + lpfc_enable_fcp_priority_init(phba, lpfc_enable_fcp_priority); +#endif lpfc_fdmi_on_init(phba, lpfc_fdmi_on); + lpfc_enable_e2e_init(phba, lpfc_enable_e2e); lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); lpfc_use_msi_init(phba, lpfc_use_msi); +#ifdef BUILD_NVME lpfc_nvme_oas_init(phba, lpfc_nvme_oas); lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd); +#endif lpfc_fcp_imax_init(phba, lpfc_fcp_imax); - lpfc_force_rscn_init(phba, lpfc_force_rscn); lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold); lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit); + lpfc_force_rscn_init(phba, lpfc_force_rscn); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_EnableXLane_init(phba, lpfc_EnableXLane); - if (phba->sli_rev != LPFC_SLI_REV4) - phba->cfg_EnableXLane = 0; lpfc_XLanePriority_init(phba, lpfc_XLanePriority); memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); @@ -7143,38 +8088,60 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_bg_init(phba, lpfc_enable_bg); lpfc_prot_mask_init(phba, lpfc_prot_mask); lpfc_prot_guard_init(phba, lpfc_prot_guard); - if (phba->sli_rev == LPFC_SLI_REV4) - phba->cfg_poll = 0; - else + lpfc_external_dif_init(phba, lpfc_external_dif); + + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_EnableXLane = 0; + phba->cfg_external_dif = 0; + phba->cfg_enable_e2e = 0; phba->cfg_poll = lpfc_poll; + } else { + /* Only supported on SLI4 FC - if_type 2/6 */ + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if (if_type < LPFC_SLI_INTF_IF_TYPE_2) + phba->cfg_external_dif = 0; + phba->cfg_poll = 0; + } +#if defined(BUILD_CITRIX_XS) + phba->cfg_enable_bg = 0; +#endif /* Get the function mode */ lpfc_get_hba_function_mode(phba); /* BlockGuard allowed for FC only. */ - if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { + if ((phba->cfg_enable_bg || phba->cfg_external_dif) && + phba->hba_flag & HBA_FCOE_MODE) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0581 BlockGuard feature not supported\n"); /* If set, clear the BlockGuard support param */ phba->cfg_enable_bg = 0; - } else if (phba->cfg_enable_bg) { + } else if (phba->cfg_enable_bg || phba->cfg_external_dif) phba->sli3_options |= LPFC_SLI3_BG_ENABLED; - } - lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); +#ifdef BUILD_NVME lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); +#endif lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); lpfc_hdw_queue_init(phba, lpfc_hdw_queue); lpfc_irq_chann_init(phba, lpfc_irq_chann); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); +#ifndef BUILD_BRCMFCOE lpfc_enable_dpp_init(phba, lpfc_enable_dpp); + lpfc_enable_scsi_mq_init(phba, lpfc_enable_scsi_mq); + lpfc_enable_auth_init(phba, lpfc_enable_auth); +#endif + + phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF; + phba->cmf_active_mode = LPFC_CFG_OFF; if (phba->sli_rev != LPFC_SLI_REV4) { /* NVME only supported on SLI4 */ @@ -7183,6 +8150,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; phba->cfg_enable_bbcr = 0; phba->cfg_xri_rebalancing = 0; +#ifndef BUILD_BRCMFCOE + phba->cfg_enable_auth = LPFC_AUTH_DISABLE; +#endif } else { /* We MUST have FCP support */ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) @@ -7190,10 +8160,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) } phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; - phba->cfg_enable_pbde = 0; /* A value of 0 means use the number of CPUs found in the system */ + if (phba->cfg_fcp_mq_threshold == 0) + phba->cfg_fcp_mq_threshold = phba->sli4_hba.num_present_cpu; if (phba->cfg_hdw_queue == 0) phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; if (phba->cfg_irq_chann == 0) @@ -7201,19 +8172,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) if (phba->cfg_irq_chann > phba->cfg_hdw_queue) phba->cfg_irq_chann = phba->cfg_hdw_queue; - phba->cfg_soft_wwnn = 0L; - phba->cfg_soft_wwpn = 0L; +#ifdef BUILD_NVME +#endif lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); - lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); - lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); lpfc_delay_discovery_init(phba, lpfc_delay_discovery); lpfc_sli_mode_init(phba, lpfc_sli_mode); - phba->cfg_enable_dss = 1; + lpfc_throttle_log_cnt_init(phba, lpfc_throttle_log_cnt); + lpfc_throttle_log_time_init(phba, lpfc_throttle_log_time); lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize); lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); @@ -7230,12 +8200,26 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) { - if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) + int logit = 0; + + if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) { phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; - if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) + logit = 1; + } + if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) { phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; - if (phba->cfg_irq_chann > phba->cfg_hdw_queue) + logit = 1; + } + if (phba->cfg_irq_chann > phba->cfg_hdw_queue) { phba->cfg_irq_chann = phba->cfg_hdw_queue; + logit = 1; + } + if (logit) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2006 Reducing Queues - CPU limitation: " + "IRQ %d HDWQ %d\n", + phba->cfg_irq_chann, + phba->cfg_hdw_queue); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && phba->nvmet_support) { @@ -7256,11 +8240,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) } if (!phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_irq_chann; + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ - if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) { - phba->cfg_nvmet_mrq = phba->cfg_irq_chann; + if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, "6018 Adjust lpfc_nvmet_mrq to %d\n", phba->cfg_nvmet_mrq); diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h index 9659a8fff971..a19c6312f9f8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.h +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -21,6 +21,8 @@ * included with this package. * *******************************************************************/ +#ifndef BUILD_BRCMFCOE + #define LPFC_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ module_param(lpfc_##name, uint, S_IRUGO);\ @@ -126,3 +128,251 @@ lpfc_vport_param_set(name, defval, minval, maxval)\ lpfc_vport_param_store(name)\ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ lpfc_##name##_show, lpfc_##name##_store) + +#else + +#define LPFC_ATTR(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_param_init(name, defval, minval, maxval) + +#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO , lpfc_##name##_show, NULL) + +#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_BBCR_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO , lpfc_##name##_show, NULL) + +#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_vport_param_init(name, defval, minval, maxval) + +#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO , lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \ +static uint64_t brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, ullong, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO , lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO , lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint brcmfcoe_##name = defval;\ +module_param(brcmfcoe_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(brcmfcoe_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(brcmfcoe_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define lpfc_drvr_version brcmfcoe_drvr_version +#define lpfc_enable_fip brcmfcoe_enable_fip +#define lpfc_temp_sensor brcmfcoe_temp_sensor +#define lpfc_sriov_hw_max_virtfn brcmfcoe_sriov_hw_max_virtfn +#define lpfc_xlane_supported brcmfcoe_xlane_supported + +#define dev_attr_lpfc_log_verbose dev_attr_brcmfcoe_log_verbose +#define dev_attr_lpfc_lun_queue_depth dev_attr_brcmfcoe_lun_queue_depth +#define dev_attr_lpfc_tgt_queue_depth dev_attr_brcmfcoe_tgt_queue_depth +#define dev_attr_lpfc_hba_queue_depth dev_attr_brcmfcoe_hba_queue_depth +#define dev_attr_lpfc_peer_port_login dev_attr_brcmfcoe_peer_port_login +#define dev_attr_lpfc_fcp_class dev_attr_brcmfcoe_fcp_class +#define dev_attr_lpfc_use_adisc dev_attr_brcmfcoe_use_adisc +#define dev_attr_lpfc_first_burst_size dev_attr_brcmfcoe_first_burst_size +#define dev_attr_lpfc_ack0 dev_attr_brcmfcoe_ack0 +#define dev_attr_lpfc_xri_rebalancing dev_attr_brcmfcoe_xri_rebalancing +#define dev_attr_lpfc_scan_down dev_attr_brcmfcoe_scan_down +#define dev_attr_lpfc_fcp_io_sched dev_attr_brcmfcoe_fcp_io_sched +#define dev_attr_lpfc_ns_query dev_attr_brcmfcoe_ns_query +#define dev_attr_lpfc_fcp2_no_tgt_reset dev_attr_brcmfcoe_fcp2_no_tgt_reset +#define dev_attr_lpfc_cr_delay dev_attr_brcmfcoe_cr_delay +#define dev_attr_lpfc_cr_count dev_attr_brcmfcoe_cr_count +#define dev_attr_lpfc_multi_ring_support dev_attr_brcmfcoe_multi_ring_support +#define dev_attr_lpfc_multi_ring_rctl dev_attr_brcmfcoe_multi_ring_rctl +#define dev_attr_lpfc_multi_ring_type dev_attr_brcmfcoe_multi_ring_type +#define dev_attr_lpfc_fdmi_on dev_attr_brcmfcoe_fdmi_on +#define dev_attr_lpfc_enable_SmartSAN dev_attr_brcmfcoe_enable_SmartSAN +#define dev_attr_lpfc_max_luns dev_attr_brcmfcoe_max_luns +#define dev_attr_lpfc_fcf_failover_policy dev_attr_brcmfcoe_fcf_failover_policy +#define dev_attr_lpfc_enable_fcp_priority dev_attr_brcmfcoe_enable_fcp_priority +#define dev_attr_lpfc_poll_tmo dev_attr_brcmfcoe_poll_tmo +#define dev_attr_lpfc_task_mgmt_tmo dev_attr_brcmfcoe_task_mgmt_tmo +#define dev_attr_lpfc_enable_npiv dev_attr_brcmfcoe_enable_npiv +#define dev_attr_lpfc_use_msi dev_attr_brcmfcoe_use_msi +#define dev_attr_lpfc_cq_max_proc_limit dev_attr_brcmfcoe_cq_max_proc_limit +#define dev_attr_lpfc_fcp_imax dev_attr_brcmfcoe_fcp_imax +#define dev_attr_lpfc_cq_poll_threshold dev_attr_brcmfcoe_cq_poll_threshold +#define dev_attr_lpfc_fcp_mq_threshold dev_attr_brcmfcoe_fcp_mq_threshold +#define dev_attr_lpfc_hdw_queue dev_attr_brcmfcoe_hdw_queue +#define dev_attr_lpfc_irq_chann dev_attr_brcmfcoe_irq_chann +#define dev_attr_lpfc_enable_bg dev_attr_brcmfcoe_enable_bg +#define dev_attr_lpfc_external_dif dev_attr_brcmfcoe_external_dif +#define dev_attr_lpfc_enable_hba_reset dev_attr_brcmfcoe_enable_hba_reset +#define dev_attr_lpfc_enable_hba_heartbeat \ + dev_attr_brcmfcoe_enable_hba_heartbeat +#define dev_attr_lpfc_EnableXLane dev_attr_brcmfcoe_EnableXLane +#define dev_attr_lpfc_XLanePriority dev_attr_brcmfcoe_XLanePriority +#define dev_attr_lpfc_sg_seg_cnt dev_attr_brcmfcoe_sg_seg_cnt +#define dev_attr_lpfc_enable_rrq dev_attr_brcmfcoe_enable_rrq +#define dev_attr_lpfc_suppress_link_up dev_attr_brcmfcoe_suppress_link_up +#define dev_attr_lpfc_throttle_log_cnt dev_attr_brcmfcoe_throttle_log_cnt +#define dev_attr_lpfc_throttle_log_time dev_attr_brcmfcoe_throttle_log_time +#define dev_attr_lpfc_enable_mds_diags dev_attr_brcmfcoe_enable_mds_diags +#define lpfc_enable_bbcr brcmfcoe_enable_bbcr +#define dev_attr_lpfc_enable_da_id dev_attr_brcmfcoe_enable_da_id +#define dev_attr_lpfc_suppress_rsp dev_attr_brcmfcoe_suppress_rsp +#define dev_attr_lpfc_enable_fc4_type dev_attr_brcmfcoe_enable_fc4_type +#define dev_attr_lpfc_xri_split dev_attr_brcmfcoe_xri_split +#define dev_attr_lpfc_ras_fwlog_buffsize dev_attr_brcmfcoe_ras_fwlog_buffsize +#define dev_attr_lpfc_ras_fwlog_level dev_attr_brcmfcoe_ras_fwlog_level +#define dev_attr_lpfc_ras_fwlog_func dev_attr_brcmfcoe_ras_fwlog_func + +#define lpfc_fcp_io_sched brcmfcoe_fcp_io_sched +#define lpfc_ns_query brcmfcoe_ns_query +#define lpfc_fcp2_no_tgt_reset brcmfcoe_fcp2_no_tgt_reset +#define lpfc_cr_delay brcmfcoe_cr_delay +#define lpfc_cr_count brcmfcoe_cr_count +#define lpfc_multi_ring_support brcmfcoe_multi_ring_support +#define lpfc_multi_ring_rctl brcmfcoe_multi_ring_rctl +#define lpfc_multi_ring_type brcmfcoe_multi_ring_type +#define lpfc_ack0 brcmfcoe_ack0 +#define lpfc_xri_rebalancing brcmfcoe_xri_rebalancing +#define lpfc_topology brcmfcoe_topology +#define lpfc_link_speed brcmfcoe_link_speed +#define lpfc_poll_tmo brcmfcoe_poll_tmo +#define lpfc_task_mgmt_tmo brcmfcoe_task_mgmt_tmo +#define lpfc_enable_npiv brcmfcoe_enable_npiv +#define lpfc_fcf_failover_policy brcmfcoe_fcf_failover_policy +#define lpfc_enable_rrq brcmfcoe_enable_rrq +#define lpfc_enable_fcp_priority brcmfcoe_enable_fcp_priority +#define lpfc_fdmi_on brcmfcoe_fdmi_on +#define lpfc_enable_SmartSAN brcmfcoe_enable_SmartSAN +#define lpfc_use_msi brcmfcoe_use_msi +#define lpfc_cq_max_proc_limit brcmfcoe_cq_max_proc_limit +#define lpfc_fcp_imax brcmfcoe_fcp_imax +#define lpfc_cq_poll_threshold brcmfcoe_cq_poll_threshold +#define lpfc_fcp_cpu_map brcmfcoe_fcp_cpu_map +#define lpfc_fcp_mq_threshold brcmfcoe_fcp_mq_threshold +#define lpfc_hdw_queue brcmfcoe_hdw_queue +#define lpfc_irq_chann brcmfcoe_irq_chann +#define lpfc_enable_hba_reset brcmfcoe_enable_hba_reset +#define lpfc_enable_hba_heartbeat brcmfcoe_enable_hba_heartbeat +#define lpfc_EnableXLane brcmfcoe_EnableXLane +#define lpfc_XLanePriority brcmfcoe_XLanePriority +#define lpfc_enable_bg brcmfcoe_enable_bg +#define lpfc_prot_mask brcmfcoe_prot_mask +#define lpfc_prot_guard brcmfcoe_prot_guard +#define lpfc_external_dif brcmfcoe_external_dif +#define lpfc_sg_seg_cnt brcmfcoe_sg_seg_cnt +#define lpfc_hba_queue_depth brcmfcoe_hba_queue_depth +#define lpfc_log_verbose brcmfcoe_log_verbose +#define lpfc_aer_support brcmfcoe_aer_support +#define lpfc_sriov_nr_virtfn brcmfcoe_sriov_nr_virtfn +#define lpfc_req_fw_upgrade brcmfcoe_req_fw_upgrade +#define lpfc_suppress_link_up brcmfcoe_suppress_link_up +#define lpfc_throttle_log_cnt brcmfcoe_throttle_log_cnt +#define lpfc_throttle_log_time brcmfcoe_throttle_log_time +#define lpfc_enable_mds_diags brcmfcoe_enable_mds_diags +#define lpfc_lun_queue_depth brcmfcoe_lun_queue_depth +#define lpfc_tgt_queue_depth brcmfcoe_tgt_queue_depth +#define lpfc_peer_port_login brcmfcoe_peer_port_login +#define lpfc_fcp_class brcmfcoe_fcp_class +#define lpfc_use_adisc brcmfcoe_use_adisc +#define lpfc_first_burst_size brcmfcoe_first_burst_size +#define lpfc_discovery_threads brcmfcoe_discovery_threads +#define lpfc_max_luns brcmfcoe_max_luns +#define lpfc_scan_down brcmfcoe_scan_down +#define lpfc_enable_da_id brcmfcoe_enable_da_id +#define lpfc_sli_mode brcmfcoe_sli_mode +#define lpfc_delay_discovery brcmfcoe_delay_discovery +#define lpfc_devloss_tmo brcmfcoe_devloss_tmo +#define lpfc_drvr_stat_data brcmfcoe_drvr_stat_data +#define lpfc_max_scsicmpl_time brcmfcoe_max_scsicmpl_time +#define lpfc_nodev_tmo brcmfcoe_nodev_tmo +#define lpfc_poll brcmfcoe_poll +#define lpfc_stat_data_ctrl brcmfcoe_stat_data_ctrl +#define lpfc_xlane_lun brcmfcoe_xlane_lun +#define lpfc_xlane_lun_state brcmfcoe_xlane_lun_state +#define lpfc_xlane_lun_status brcmfcoe_xlane_lun_status +#define lpfc_xlane_priority brcmfcoe_xlane_priority +#define lpfc_xlane_tgt brcmfcoe_xlane_tgt +#define lpfc_xlane_vpt brcmfcoe_xlane_vpt +#define lpfc_enable_fc4_type brcmfcoe_enable_fc4_type +#define lpfc_xri_split brcmfcoe_xri_split +#define lpfc_suppress_rsp brcmfcoe_suppress_rsp +#define lpfc_ras_fwlog_buffsize brcmfcoe_ras_fwlog_buffsize +#define lpfc_ras_fwlog_level brcmfcoe_ras_fwlog_level +#define lpfc_ras_fwlog_func brcmfcoe_ras_fwlog_func +#endif diff --git a/drivers/scsi/lpfc/lpfc_auth.c b/drivers/scsi/lpfc/lpfc_auth.c new file mode 100644 index 000000000000..c66fe750c13a --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_auth.c @@ -0,0 +1,2296 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2018-2020 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ +/* See Fibre Channel protocol T11 FC-SP-2 for details */ +#include <linux/blkdev.h> +#include <linux/pci.h> +#include <linux/list.h> +#include <linux/mpi.h> +#include <crypto/hash.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport_fc.h> + +#include "lpfc_hw4.h" +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_sli4.h" +#include "lpfc_nl.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" +#include "lpfc_auth.h" + +struct dh_group dh_group_array[5] = { + { + DH_GROUP_NULL, 0, + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + } + }, + { + DH_GROUP_1024, 128, + { + 0xEE, 0xAF, 0x0A, 0xB9, 0xAD, 0xB3, 0x8D, 0xD6, + 0x9C, 0x33, 0xF8, 0x0A, 0xFA, 0x8F, 0xC5, 0xE8, + 0x60, 0x72, 0x61, 0x87, 0x75, 0xFF, 0x3C, 0x0B, + 0x9E, 0xA2, 0x31, 0x4C, 0x9C, 0x25, 0x65, 0x76, + 0xD6, 0x74, 0xDF, 0x74, 0x96, 0xEA, 0x81, 0xD3, + 0x38, 0x3B, 0x48, 0x13, 0xD6, 0x92, 0xC6, 0xE0, + 0xE0, 0xD5, 0xD8, 0xE2, 0x50, 0xB9, 0x8B, 0xE4, + 0x8E, 0x49, 0x5C, 0x1D, 0x60, 0x89, 0xDA, 0xD1, + 0x5D, 0xC7, 0xD7, 0xB4, 0x61, 0x54, 0xD6, 0xB6, + 0xCE, 0x8E, 0xF4, 0xAD, 0x69, 0xB1, 0x5D, 0x49, + 0x82, 0x55, 0x9B, 0x29, 0x7B, 0xCF, 0x18, 0x85, + 0xC5, 0x29, 0xF5, 0x66, 0x66, 0x0E, 0x57, 0xEC, + 0x68, 0xED, 0xBC, 0x3C, 0x05, 0x72, 0x6C, 0xC0, + 0x2F, 0xD4, 0xCB, 0xF4, 0x97, 0x6E, 0xAA, 0x9A, + 0xFD, 0x51, 0x38, 0xFE, 0x83, 0x76, 0x43, 0x5B, + 0x9F, 0xC6, 0x1D, 0x2F, 0xC0, 0xEB, 0x06, 0xE3, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + } + }, + { + DH_GROUP_1280, 160, + { + 0xD7, 0x79, 0x46, 0x82, 0x6E, 0x81, 0x19, 0x14, + 0xB3, 0x94, 0x01, 0xD5, 0x6A, 0x0A, 0x78, 0x43, + 0xA8, 0xE7, 0x57, 0x5D, 0x73, 0x8C, 0x67, 0x2A, + 0x09, 0x0A, 0xB1, 0x18, 0x7D, 0x69, 0x0D, 0xC4, + 0x38, 0x72, 0xFC, 0x06, 0xA7, 0xB6, 0xA4, 0x3F, + 0x3B, 0x95, 0xBE, 0xAE, 0xC7, 0xDF, 0x04, 0xB9, + 0xD2, 0x42, 0xEB, 0xDC, 0x48, 0x11, 0x11, 0x28, + 0x32, 0x16, 0xCE, 0x81, 0x6E, 0x00, 0x4B, 0x78, + 0x6C, 0x5F, 0xCE, 0x85, 0x67, 0x80, 0xD4, 0x18, + 0x37, 0xD9, 0x5A, 0xD7, 0x87, 0xA5, 0x0B, 0xBE, + 0x90, 0xBD, 0x3A, 0x9C, 0x98, 0xAC, 0x0F, 0x5F, + 0xC0, 0xDE, 0x74, 0x4B, 0x1C, 0xDE, 0x18, 0x91, + 0x69, 0x08, 0x94, 0xBC, 0x1F, 0x65, 0xE0, 0x0D, + 0xE1, 0x5B, 0x4B, 0x2A, 0xA6, 0xD8, 0x71, 0x00, + 0xC9, 0xEC, 0xC2, 0x52, 0x7E, 0x45, 0xEB, 0x84, + 0x9D, 0xEB, 0x14, 0xBB, 0x20, 0x49, 0xB1, 0x63, + 0xEA, 0x04, 0x18, 0x7F, 0xD2, 0x7C, 0x1B, 0xD9, + 0xC7, 0x95, 0x8C, 0xD4, 0x0C, 0xE7, 0x06, 0x7A, + 0x9C, 0x02, 0x4F, 0x9B, 0x7C, 0x5A, 0x0B, 0x4F, + 0x50, 0x03, 0x68, 0x61, 0x61, 0xF0, 0x60, 0x5B, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + } + }, + { + DH_GROUP_1536, 192, + { + 0x9D, 0xEF, 0x3C, 0xAF, 0xB9, 0x39, 0x27, 0x7A, + 0xB1, 0xF1, 0x2A, 0x86, 0x17, 0xA4, 0x7B, 0xBB, + 0xDB, 0xA5, 0x1D, 0xF4, 0x99, 0xAC, 0x4C, 0x80, + 0xBE, 0xEE, 0xA9, 0x61, 0x4B, 0x19, 0xCC, 0x4D, + 0x5F, 0x4F, 0x5F, 0x55, 0x6E, 0x27, 0xCB, 0xDE, + 0x51, 0xC6, 0xA9, 0x4B, 0xE4, 0x60, 0x7A, 0x29, + 0x15, 0x58, 0x90, 0x3B, 0xA0, 0xD0, 0xF8, 0x43, + 0x80, 0xB6, 0x55, 0xBB, 0x9A, 0x22, 0xE8, 0xDC, + 0xDF, 0x02, 0x8A, 0x7C, 0xEC, 0x67, 0xF0, 0xD0, + 0x81, 0x34, 0xB1, 0xC8, 0xB9, 0x79, 0x89, 0x14, + 0x9B, 0x60, 0x9E, 0x0B, 0xE3, 0xBA, 0xB6, 0x3D, + 0x47, 0x54, 0x83, 0x81, 0xDB, 0xC5, 0xB1, 0xFC, + 0x76, 0x4E, 0x3F, 0x4B, 0x53, 0xDD, 0x9D, 0xA1, + 0x15, 0x8B, 0xFD, 0x3E, 0x2B, 0x9C, 0x8C, 0xF5, + 0x6E, 0xDF, 0x01, 0x95, 0x39, 0x34, 0x96, 0x27, + 0xDB, 0x2F, 0xD5, 0x3D, 0x24, 0xB7, 0xC4, 0x86, + 0x65, 0x77, 0x2E, 0x43, 0x7D, 0x6C, 0x7F, 0x8C, + 0xE4, 0x42, 0x73, 0x4A, 0xF7, 0xCC, 0xB7, 0xAE, + 0x83, 0x7C, 0x26, 0x4A, 0xE3, 0xA9, 0xBE, 0xB8, + 0x7F, 0x8A, 0x2F, 0xE9, 0xB8, 0xB5, 0x29, 0x2E, + 0x5A, 0x02, 0x1F, 0xFF, 0x5E, 0x91, 0x47, 0x9E, + 0x8C, 0xE7, 0xA2, 0x8C, 0x24, 0x42, 0xC6, 0xF3, + 0x15, 0x18, 0x0F, 0x93, 0x49, 0x9A, 0x23, 0x4D, + 0xCF, 0x76, 0xE3, 0xFE, 0xD1, 0x35, 0xF9, 0xBB, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + } + }, + { + DH_GROUP_2048, 256, + { + 0xAC, 0x6B, 0xDB, 0x41, 0x32, 0x4A, 0x9A, 0x9B, + 0xF1, 0x66, 0xDE, 0x5E, 0x13, 0x89, 0x58, 0x2F, + 0xAF, 0x72, 0xB6, 0x65, 0x19, 0x87, 0xEE, 0x07, + 0xFC, 0x31, 0x92, 0x94, 0x3D, 0xB5, 0x60, 0x50, + 0xA3, 0x73, 0x29, 0xCB, 0xB4, 0xA0, 0x99, 0xED, + 0x81, 0x93, 0xE0, 0x75, 0x77, 0x67, 0xA1, 0x3D, + 0xD5, 0x23, 0x12, 0xAB, 0x4B, 0x03, 0x31, 0x0D, + 0xCD, 0x7F, 0x48, 0xA9, 0xDA, 0x04, 0xFD, 0x50, + 0xE8, 0x08, 0x39, 0x69, 0xED, 0xB7, 0x67, 0xB0, + 0xCF, 0x60, 0x95, 0x17, 0x9A, 0x16, 0x3A, 0xB3, + 0x66, 0x1A, 0x05, 0xFB, 0xD5, 0xFA, 0xAA, 0xE8, + 0x29, 0x18, 0xA9, 0x96, 0x2F, 0x0B, 0x93, 0xB8, + 0x55, 0xF9, 0x79, 0x93, 0xEC, 0x97, 0x5E, 0xEA, + 0xA8, 0x0D, 0x74, 0x0A, 0xDB, 0xF4, 0xFF, 0x74, + 0x73, 0x59, 0xD0, 0x41, 0xD5, 0xC3, 0x3E, 0xA7, + 0x1D, 0x28, 0x1E, 0x44, 0x6B, 0x14, 0x77, 0x3B, + 0xCA, 0x97, 0xB4, 0x3A, 0x23, 0xFB, 0x80, 0x16, + 0x76, 0xBD, 0x20, 0x7A, 0x43, 0x6C, 0x64, 0x81, + 0xF1, 0xD2, 0xB9, 0x07, 0x87, 0x17, 0x46, 0x1A, + 0x5B, 0x9D, 0x32, 0xE6, 0x88, 0xF8, 0x77, 0x48, + 0x54, 0x45, 0x23, 0xB5, 0x24, 0xB0, 0xD5, 0x7D, + 0x5E, 0xA7, 0x7A, 0x27, 0x75, 0xD2, 0xEC, 0xFA, + 0x03, 0x2C, 0xFB, 0xDB, 0xF5, 0x2F, 0xB3, 0x78, + 0x61, 0x60, 0x27, 0x90, 0x04, 0xE5, 0x7A, 0xE6, + 0xAF, 0x87, 0x4E, 0x73, 0x03, 0xCE, 0x53, 0x29, + 0x9C, 0xCC, 0x04, 0x1C, 0x7B, 0xC3, 0x08, 0xD8, + 0x2A, 0x56, 0x98, 0xF3, 0xA8, 0xD0, 0xC3, 0x82, + 0x71, 0xAE, 0x35, 0xF8, 0xE9, 0xDB, 0xFB, 0xB6, + 0x94, 0xB5, 0xC8, 0x03, 0xD8, 0x9F, 0x7A, 0xE4, + 0x35, 0xDE, 0x23, 0x6D, 0x52, 0x5F, 0x54, 0x75, + 0x9B, 0x65, 0xE3, 0x72, 0xFC, 0xD6, 0x8E, 0xF2, + 0x0F, 0xA7, 0x11, 0x1F, 0x9E, 0x4A, 0xFF, 0x73, + } + }, +}; + +/** + * lpfc_auth_cancel_tmr + * @ptr: pointer to a node-list data structure. + * + * This routine cancels any pending authentication timer. + * + **/ +void +lpfc_auth_cancel_tmr(struct lpfc_nodelist *ndlp) +{ + struct lpfc_vport *vport = ndlp->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (!(ndlp->nlp_flag & NLP_AUTH_TMO)) + return; + + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_AUTH_TMO; + spin_unlock_irq(shost->host_lock); + del_timer_sync(&ndlp->dhc_cfg.nlp_reauthfunc); +} + +/** + * lpfc_auth_start_tmr + * @ndlp: pointer to a node-list data structure. + * @value: timer value (in msecs). + * + * This routine cancels any pending reauthentication timer and updates + * the timer with a new value. This timer may also be used during the + * authentication process to check for auth_tmo. + * + **/ +void +lpfc_auth_start_tmr(struct lpfc_nodelist *ndlp, uint32_t value) +{ + struct lpfc_vport *vport = ndlp->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (ndlp->nlp_flag & NLP_AUTH_TMO) + lpfc_auth_cancel_tmr(ndlp); + + mod_timer(&ndlp->dhc_cfg.nlp_reauthfunc, + jiffies + msecs_to_jiffies(value)); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_AUTH_TMO; + spin_unlock_irq(shost->host_lock); +} + +/** + * lpfc_auth_start_reauth_tmr. + * @ndlp: pointer to a node-list data structure. + * + * Set-up timeout value for scheduled re-authentication. + **/ +void +lpfc_auth_start_reauth_tmr(struct lpfc_nodelist *ndlp) +{ + u32 msecs; + + msecs = ndlp->dhc_cfg.reauth_interval * 60000; + lpfc_auth_start_tmr(ndlp, msecs); +} + +/** + * lpfc_auth_findnode - Search for a nodelist item by wwpn pair. + * @vport: pointer to a vport structure. + * @lwwpn: pointer to a local portname. + * @rwwpn: pointer to a remote portname. + * + * This routine returns an node list pointer for authentication. The search + * uses local and remote portnames provided, with specially accomodation for + * the wwpn used to identify the fabric. The routine returns pointer to the + * matching ndlp or NULL if no matching entry is found. + **/ +struct lpfc_nodelist * +lpfc_auth_findnode(struct lpfc_vport *vport, struct lpfc_name *lwwpn, + struct lpfc_name *rwwpn) +{ + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_name wwpn; + + wwpn.u.name = AUTH_FABRIC_WWN; + if (!memcmp(&vport->fc_portname, lwwpn->u.wwn, 8)) { + if (!memcmp(wwpn.u.wwn, rwwpn->u.wwn, 8)) + ndlp = lpfc_findnode_did(vport, Fabric_DID); + else + ndlp = lpfc_findnode_wwpn(vport, rwwpn); + } + return ndlp; +} + +/** + * lpfc_auth_find_cfg_item - Search for a configuration entry by wwpn pair. + * @phba: pointer to a host. + * @lwwpn: pointer to a local portname. + * @rwwpn: pointer to a remote portname. + * @valid: address of entry valid flag. + * + * This routine returns a pointer to an active configuration entry matching + * the local and remote portnames provided, or NULL if no matching entry is + * found. + **/ +struct lpfc_auth_cfg_entry * +lpfc_auth_find_cfg_item(struct lpfc_hba *phba, uint8_t *lwwpn, uint8_t *rwwpn, + uint8_t *valid) +{ + struct lpfc_auth_cfg *pcfg = NULL; + struct lpfc_auth_cfg_entry *entry = NULL; + uint8_t *ptmp = NULL; + uint16_t entry_cnt, i; + + *valid = 0; + pcfg = lpfc_auth_get_active_cfg_ptr(phba); + if (!pcfg) + return entry; + + entry_cnt = pcfg->hdr.entry_cnt; + if (entry_cnt > MAX_AUTH_CONFIG_ENTRIES) + entry_cnt = MAX_AUTH_CONFIG_ENTRIES; + + ptmp = (uint8_t *)&pcfg->entry[0]; + for (i = 0; i < entry_cnt; i++) { + if (i == AUTH_CONFIG_ENTRIES_PER_PAGE) { + ptmp = (uint8_t *)lpfc_auth_get_active_cfg_p2(phba); + if (!ptmp) + return entry; + } + if ((memcmp(ptmp + 8, rwwpn, 8)) || + (memcmp(ptmp, lwwpn, 8))) { + ptmp += sizeof(struct lpfc_auth_cfg_entry); + } else { + entry = (struct lpfc_auth_cfg_entry *)ptmp; + /* match found, check validity */ + *valid = entry->auth_flags.valid; + break; + } + } + return entry; +} + +/** + * lpfc_auth_lookup_cfg - Set-up auth configuration information for an ndlp. + * @ndlp: pointer to lpfc_nodelist instance. + * + * This routine will initiate a search for a configuration entry matching the + * local wwpn and remote wwpn for the ndlp. Initial inplementation is limited + * to authentication with the fabric. If a configuration entry is found, the + * dh-chap configuration in the ndlp structure is populated. + * + * Return codes + * 0 - Success + * 1 - Failure + **/ +int +lpfc_auth_lookup_cfg(struct lpfc_nodelist *ndlp) +{ + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; + struct lpfc_auth_cfg_entry *entry = NULL; + uint8_t rwwpn[8]; + uint8_t valid; + int i, rc = 0; + + /* limited to authentication with the fabric */ + u64_to_wwn(AUTH_FABRIC_WWN, rwwpn); + entry = lpfc_auth_find_cfg_item(phba, (uint8_t *)&vport->fc_portname, + rwwpn, &valid); + if (!entry || !valid) + return 1; + + ndlp->dhc_cfg.auth_tmo = entry->auth_tmo; + ndlp->dhc_cfg.auth_mode = entry->auth_mode; + ndlp->dhc_cfg.bidirectional = entry->auth_flags.bi_direction; + ndlp->dhc_cfg.reauth_interval = entry->reauth_interval; + ndlp->dhc_cfg.local_passwd_len = entry->pwd.local_pw_length; + ndlp->dhc_cfg.remote_passwd_len = entry->pwd.remote_pw_length; + ndlp->dhc_cfg.direction = AUTH_DIRECTION_NONE; + memcpy(ndlp->dhc_cfg.local_passwd, &entry->pwd.local_pw, + entry->pwd.local_pw_length); + memcpy(ndlp->dhc_cfg.remote_passwd, &entry->pwd.remote_pw, + entry->pwd.remote_pw_length); + for (i = 0; i < 4; i++) { + switch (entry->hash_priority[i]) { + case LPFC_HASH_MD5: + ndlp->dhc_cfg.hash_pri[i] = HASH_MD5; + break; + case LPFC_HASH_SHA1: + ndlp->dhc_cfg.hash_pri[i] = HASH_SHA1; + break; + default: + ndlp->dhc_cfg.hash_pri[i] = 0; + } + } + + ndlp->dhc_cfg.dh_grp_cnt = 0; + for (i = 0; i < 8; i++) { + switch (entry->dh_grp_priority[i]) { + case LPFC_DH_GROUP_2048: + ndlp->dhc_cfg.dh_grp_pri[i] = DH_GROUP_2048; + break; + case LPFC_DH_GROUP_1536: + ndlp->dhc_cfg.dh_grp_pri[i] = DH_GROUP_1536; + break; + case LPFC_DH_GROUP_1280: + ndlp->dhc_cfg.dh_grp_pri[i] = DH_GROUP_1280; + break; + case LPFC_DH_GROUP_1024: + ndlp->dhc_cfg.dh_grp_pri[i] = DH_GROUP_1024; + break; + case LPFC_DH_GROUP_NULL: + ndlp->dhc_cfg.dh_grp_pri[i] = DH_GROUP_NULL; + break; + default: + return rc; + } + ndlp->dhc_cfg.dh_grp_cnt++; + } + return rc; +} + +/** + * lpfc_auth_free_active_cfg_list - Free authentication config resources. + * @phba: pointer to a host. + * + * This routine frees any resources allocated to hold the driver's active + * copy of the authentication configuration object. + **/ +void +lpfc_auth_free_active_cfg_list(struct lpfc_hba *phba) +{ + struct lpfc_dmabuf *dmabuf, *next; + + if (!list_empty(&phba->lpfc_auth_active_cfg_list)) { + list_for_each_entry_safe(dmabuf, next, + &phba->lpfc_auth_active_cfg_list, + list) { + list_del(&dmabuf->list); + dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + } +} + +/** + * lpfc_auth_alloc_active_cfg_list - Allocate authentication config resources. + * @phba: pointer to a host. + * + * This routine attempts to allocate resources to hold the driver's active + * copy of the authentication configuration object. + * + * Return codes + * 0 - Success + * 1 - Failure + **/ +int +lpfc_auth_alloc_active_cfg_list(struct lpfc_hba *phba) +{ + struct lpfc_dmabuf *dmabuf; + uint8_t i; + int rc = 0; + + if (!list_empty(&phba->lpfc_auth_active_cfg_list)) + return -EINVAL; + + for (i = 0; i < 2; i++) { + dmabuf = kzalloc(sizeof(*dmabuf), GFP_KERNEL); + if (!dmabuf) { + rc = -ENOMEM; + goto out; + } + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + rc = -ENOMEM; + goto out; + } + list_add_tail(&dmabuf->list, &phba->lpfc_auth_active_cfg_list); + } +out: + if (rc) + lpfc_auth_free_active_cfg_list(phba); + return rc; +} + +/** + * lpfc_auth_get_active_cfg_ptr - Returns the base address of the + * authentication config. + * @phba: pointer to a host. + * + * This routine returns a pointer to the first page of the driver's active copy + * of the authentication configuration for the port. The routine will return + * NULL if there is no active configuration. + * + **/ +struct lpfc_auth_cfg * +lpfc_auth_get_active_cfg_ptr(struct lpfc_hba *phba) +{ + struct lpfc_dmabuf *dmabuf; + struct lpfc_auth_cfg *p = NULL; + + if (!list_empty(&phba->lpfc_auth_active_cfg_list)) { + dmabuf = list_first_entry(&phba->lpfc_auth_active_cfg_list, + struct lpfc_dmabuf, list); + + p = (struct lpfc_auth_cfg *)dmabuf->virt; + } + return p; +} + +/** + * lpfc_auth_get_active_cfg_p2 - Returns the base address for the second page + * of the active authentication config. + * @phba: pointer to a host. + * + * This routine returns a pointer to the second page of the driver's active copy + * of the authentication configuration for the port. The routine will return + * NULL if there is no active configuation. + * + **/ +struct lpfc_auth_cfg_entry * +lpfc_auth_get_active_cfg_p2(struct lpfc_hba *phba) +{ + struct lpfc_dmabuf *dmabuf; + struct lpfc_auth_cfg_entry *p = NULL; + + if (!list_empty(&phba->lpfc_auth_active_cfg_list)) { + dmabuf = list_entry((&phba->lpfc_auth_active_cfg_list)->prev, + struct lpfc_dmabuf, list); + + p = (struct lpfc_auth_cfg_entry *)dmabuf->virt; + } + return p; +} + +/** + * lpfc_auth_cmpl_rd_object, completion for read object mbx cmd. + * @phba: pointer to a host. + * @mboxq: pointer to lpfc mailbox command context. + * + * This routine handles the completion of a read object mailbox command + * issued to populate the driver's active copy of the authentication + * configuration. + **/ +void +lpfc_auth_cmpl_rd_object(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_mbx_auth_rd_object *rd_object; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + int mb_sts = mboxq->u.mb.mbxStatus; + + rd_object = (struct lpfc_mbx_auth_rd_object *)mboxq->sge_array->addr[0]; + shdr = &rd_object->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if (shdr_status || shdr_add_status || mb_sts) { + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_AUTH, + "3040 Read Object mailbox cmd failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, mb_sts); + + if (mboxq->ctx_buf == &phba->lpfc_auth_active_cfg_list) + lpfc_auth_free_active_cfg_list(phba); + } + + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** + * lpfc_auth_read_cfg_object, to read object. + * @phba: pointer to a host. + * + * This routine issues a read object mailbox command to populate the driver's + * active copy of the authentication configuration. + * + * Return codes + * 0 - Success + * 1 - Failure + **/ +int +lpfc_auth_read_cfg_object(struct lpfc_hba *phba) +{ + uint32_t dma_size; + uint32_t offset; + char *obj_str = {"/driver/auth.cfg"}; + int rc = 0; + + if (list_empty(&phba->lpfc_auth_active_cfg_list)) + rc = lpfc_auth_alloc_active_cfg_list(phba); + if (rc) + return rc; + + /* 8k per port */ + dma_size = SLI4_PAGE_SIZE * 2; + offset = phba->sli4_hba.lnk_info.lnk_no * dma_size; + rc = lpfc_auth_issue_rd_object(phba, &phba->lpfc_auth_active_cfg_list, + dma_size, &offset, obj_str); + if (rc) { + /* MBX_BUSY is acceptable, no reason to alarm caller */ + if (rc == MBX_BUSY) + rc = MBX_SUCCESS; + else + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_AUTH, + "3041 Read Object abnormal exit:%x.\n", + rc); + } + if (rc != MBX_SUCCESS) + lpfc_auth_free_active_cfg_list(phba); + + return rc; +} + +/** + * lpfc_auth_compute_hash - Calculate the hash + * @vport: pointer to a host virtual N_Port data structure. + * + * Return value + * Pointer to hash + **/ +static uint8_t * +lpfc_auth_compute_hash(uint32_t hash_id, + uint8_t *val1, + uint32_t val1_len, + uint8_t *val2, + uint32_t val2_len, + uint8_t *val3, + uint32_t val3_len) +{ + struct crypto_shash *tfm = NULL; + struct shash_desc *desc = NULL; + uint8_t *digest = NULL; + uint32_t chal_len = 0; + int rc = 0; + + switch (hash_id) { + case HASH_MD5: + tfm = crypto_alloc_shash("md5", 0, 0); + chal_len = MD5_CHAL_LEN; + break; + case HASH_SHA1: + tfm = crypto_alloc_shash("sha1", 0, 0); + chal_len = SHA1_CHAL_LEN; + break; + default: + return NULL; + } + + if (!tfm) + return NULL; + + digest = kzalloc(chal_len, GFP_KERNEL); + if (!digest) + goto out; + + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), + GFP_KERNEL); + if (!desc) { + rc = 1; + goto out; + } + desc->tfm = tfm; +#if (KERNEL_MAJOR < 5) + desc->flags = 0; +#endif + + rc = crypto_shash_init(desc); + if (rc < 0) + goto out; + if (val1 && val1_len) { + rc = crypto_shash_update(desc, val1, val1_len); + if (rc < 0) + goto out; + } + if (val2 && val2_len) { + rc = crypto_shash_update(desc, val2, val2_len); + if (rc < 0) + goto out; + } + if (val3 && val3_len) { + rc = crypto_shash_update(desc, val3, val3_len); + if (rc < 0) + goto out; + } + rc = crypto_shash_final(desc, digest); + +out: + if (rc) { + kfree(digest); + digest = NULL; + } + kfree(desc); + crypto_free_shash(tfm); + return digest; +} + +/** + * lpfc_auth_compute_dhkey - Calculate the dhkey + * @base: base value + * @base_len: base value length + * @expo: exponent value + * @expo_len: exponent value length + * @mod: modulo value + * @mod_len: modulo value length + * @key: resulting key value + * @key_len: resulting key value length + * + * Return value + * Success / Failure + **/ +static int +lpfc_auth_compute_dhkey(uint8_t *base, + uint32_t base_len, + uint8_t *expo, + uint32_t expo_len, + uint8_t *mod, + uint32_t mod_len, + uint8_t *key, + uint32_t key_len) +{ + int i, rc; + uint8_t *temp = NULL; + uint32_t temp_len = 0; + MPI k, b, e, m; + + /* + * key = ((base)^expo modulo mod) + */ + b = mpi_read_raw_data(base, base_len); + e = mpi_read_raw_data(expo, expo_len); + m = mpi_read_raw_data(mod, mod_len); + + k = mpi_alloc(mpi_get_nlimbs(m) * 2); + + rc = mpi_powm(k, b, e, m); + if (!rc) { + temp = mpi_get_buffer(k, &temp_len, NULL); + if (!temp) { + rc = 1; + goto out; + } + + if (key_len < temp_len) { + rc = 1; + goto out; + } + + /* Pad the key with leading zeros */ + memset(key, 0, key_len); + i = key_len - temp_len; + memcpy(key + i, temp, temp_len); + } + +out: + mpi_free(b); + mpi_free(e); + mpi_free(m); + mpi_free(k); + kfree(temp); + return rc; +} + +/** + * lpfc_auth_cmpl_negotiate - Completion callback routine + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + **/ +static void +lpfc_auth_cmpl_negotiate(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = phba->pport; + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_nodelist *ndlp = cmdiocb->context1; + + if (irsp->ulpStatus) + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3043 ELS_AUTH cmpl, ulpstatus=x%x/%x\n", + irsp->ulpStatus, irsp->un.ulpWord[4]); + lpfc_els_free_iocb(phba, cmdiocb); + + if ((irsp->ulpStatus == IOSTAT_LS_RJT) && + ndlp && NLP_CHK_NODE_ACT(ndlp) && + (ndlp->dhc_cfg.auth_mode == LPFC_AUTH_MODE_ACTIVE)) { + ndlp->dhc_cfg.state = LPFC_AUTH_UNKNOWN; + /* logout should be sent on LS REJECT */ + lpfc_issue_els_logo(vport, ndlp, 0); + } +} + +/** + * lpfc_auth_issue_negotiate - Issue AUTH_NEGOTIATE command + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine issues AUTH_NEGOTIATE ELS command to the remote + * port as the authentication initiator. + * + * Return codes + * 0 - Success + * 1 - Failure + **/ +static int +lpfc_auth_issue_negotiate(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + struct lpfc_auth_hdr *hdr; + struct lpfc_auth_negotiate_cmn *negotiate; + struct lpfc_dhchap_param_cmn *dhchap_param; + struct lpfc_auth_param_hdr *param_hdr; + uint32_t param_len; + uint32_t *hash; + uint32_t *dh_group; + uint16_t cmdsize; + uint8_t i, dh_grp_cnt; + int rc; + + /* Support 2 Hash Functions and 5 DH-Groups */ + cmdsize = sizeof(struct lpfc_auth_hdr) + + sizeof(struct lpfc_auth_negotiate_cmn) + + sizeof(struct lpfc_dhchap_param_cmn) + + sizeof(struct lpfc_auth_param_hdr) + 2 * sizeof(uint32_t) + + sizeof(struct lpfc_auth_param_hdr) + 5 * sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, + ndlp->nlp_DID, ELS_CMD_AUTH); + + if (!elsiocb) + return 1; + + /* Save the auth details for later use */ + if (ndlp->dhc_cfg.state != LPFC_AUTH_SUCCESS) + ndlp->dhc_cfg.state = LPFC_AUTH_UNKNOWN; + ndlp->dhc_cfg.msg = LPFC_AUTH_NEGOTIATE; + + pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + ndlp->dhc_cfg.tran_id += 1; + + /* Build the AUTH_NEGOTIATE header */ + hdr = (struct lpfc_auth_hdr *)pcmd; + hdr->auth_els_code = (uint8_t) ELS_CMD_AUTH; + hdr->auth_els_flags = 0; + hdr->auth_msg_code = AUTH_NEGOTIATE; + hdr->protocol_ver = AUTH_PROTOCOL_VER_1; + hdr->msg_len = be32_to_cpu(cmdsize - sizeof(struct lpfc_auth_hdr)); + hdr->tran_id = be32_to_cpu(ndlp->dhc_cfg.tran_id); + pcmd += sizeof(struct lpfc_auth_hdr); + + /* Set up AUTH_NEGOTIATE common data */ + negotiate = (struct lpfc_auth_negotiate_cmn *)pcmd; + negotiate->name_tag = be16_to_cpu(AUTH_NAME_TAG); + negotiate->name_len = be16_to_cpu(sizeof(struct lpfc_name)); + memcpy(&negotiate->port_name, &vport->fc_portname, + sizeof(struct lpfc_name)); + negotiate->num_protocol = be32_to_cpu(1); /* Only support DHCHAP */ + pcmd += sizeof(struct lpfc_auth_negotiate_cmn); + + /* Set up DH-CHAP parameters common data */ + dh_grp_cnt = ndlp->dhc_cfg.dh_grp_cnt; + if (dh_grp_cnt > 5) + dh_grp_cnt = 5; + dhchap_param = (struct lpfc_dhchap_param_cmn *)pcmd; + param_len = sizeof(uint32_t) + + sizeof(struct lpfc_auth_param_hdr) + 2 * sizeof(uint32_t) + + sizeof(struct lpfc_auth_param_hdr) + + dh_grp_cnt * sizeof(uint32_t); + dhchap_param->param_len = be32_to_cpu(param_len); + dhchap_param->protocol_id = be32_to_cpu(PROTOCOL_DHCHAP); + pcmd += sizeof(struct lpfc_dhchap_param_cmn); + + /* Set up Hash Functions */ + param_hdr = (struct lpfc_auth_param_hdr *)pcmd; + param_hdr->tag = be16_to_cpu(DHCHAP_TAG_HASHLIST); + param_hdr->wcnt = be16_to_cpu(2); /* 2 HASH Functions */ + pcmd += sizeof(struct lpfc_auth_param_hdr); + hash = (uint32_t *)pcmd; + hash[0] = be32_to_cpu(ndlp->dhc_cfg.hash_pri[0]); + hash[1] = be32_to_cpu(ndlp->dhc_cfg.hash_pri[1]); + pcmd += 2 * sizeof(uint32_t); + + /* Setup DH Groups */ + param_hdr = (struct lpfc_auth_param_hdr *)pcmd; + param_hdr->tag = be16_to_cpu(DHCHAP_TAG_GRP_IDLIST); + param_hdr->wcnt = be16_to_cpu(dh_grp_cnt); /* max 5 DH Groups */ + pcmd += sizeof(struct lpfc_auth_param_hdr); + dh_group = (uint32_t *)pcmd; + for (i = 0; i < dh_grp_cnt; i++) + dh_group[i] = be32_to_cpu(ndlp->dhc_cfg.dh_grp_pri[i]); + + elsiocb->iocb_cmpl = lpfc_auth_cmpl_negotiate; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + /* Set up authentication timer */ + if (ndlp->dhc_cfg.auth_tmo) + lpfc_auth_start_tmr(ndlp, ndlp->dhc_cfg.auth_tmo * 1000); + + return 0; +} + +/** + * lpfc_auth_issue_dhchap_challenge - Issue DHCHAP_CHALLENGE command + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine issues DHCHAP_CHALLENGE ELS command to the remote + * port as the authentication responder. + * + * Return codes + * 0 - Success + * 1 - Failure + **/ +static int +lpfc_auth_issue_dhchap_challenge(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_auth_hdr *hdr; + struct lpfc_auth_challenge *challenge; + uint8_t *pcmd; + uint16_t cmdsize; + uint32_t chal_len; + uint32_t hash_id = ndlp->dhc_cfg.hash_id; + uint32_t dh_grp_id = ndlp->dhc_cfg.dh_grp_id; + uint32_t dh_val_len = dh_group_array[dh_grp_id].length; + uint8_t nonce[16], key[256]; + uint8_t *rand_num = NULL; + uint32_t key_len; + uint8_t key_base = 2; + int rc = 0; + + switch (hash_id) { + case HASH_MD5: + chal_len = MD5_CHAL_LEN; + break; + case HASH_SHA1: + chal_len = SHA1_CHAL_LEN; + break; + default: + return 1; + } + + rand_num = kzalloc(chal_len, GFP_KERNEL); + if (!rand_num) { + rc = 1; + goto out; + } + get_random_bytes(rand_num, chal_len); + + cmdsize = sizeof(struct lpfc_auth_hdr) + + sizeof(struct lpfc_auth_challenge) + + chal_len + sizeof(uint32_t) + dh_val_len; + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, + ndlp->nlp_DID, ELS_CMD_AUTH); + + if (!elsiocb) { + rc = 1; + goto out; + } + + pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + /* Build the DHCHAP_REPLY payload */ + hdr = (struct lpfc_auth_hdr *)pcmd; + hdr->auth_els_code = (uint8_t) ELS_CMD_AUTH; + hdr->auth_els_flags = 0; + hdr->auth_msg_code = DHCHAP_CHALLENGE; + hdr->protocol_ver = AUTH_PROTOCOL_VER_1; + hdr->msg_len = be32_to_cpu(cmdsize - sizeof(struct lpfc_auth_hdr)); + hdr->tran_id = be32_to_cpu(ndlp->dhc_cfg.tran_id); + pcmd += sizeof(struct lpfc_auth_hdr); + + /* Set up DHCHAP_CHALLENGE payload */ + challenge = (struct lpfc_auth_challenge *)pcmd; + challenge->name_tag = be16_to_cpu(AUTH_NAME_TAG); + challenge->name_len = be16_to_cpu(sizeof(struct lpfc_name)); + memcpy(&challenge->port_name, &vport->fc_portname, + sizeof(struct lpfc_name)); + challenge->hash_id = be32_to_cpu(ndlp->dhc_cfg.hash_id); + challenge->dh_grp_id = be32_to_cpu(ndlp->dhc_cfg.dh_grp_id); + challenge->chal_len = be32_to_cpu(chal_len); + pcmd += sizeof(struct lpfc_auth_challenge); + memcpy(pcmd, rand_num, chal_len); + pcmd += chal_len; + *((uint32_t *)pcmd) = be32_to_cpu(dh_val_len); + pcmd += sizeof(uint32_t); + + /* Save the challenge */ + memcpy(ndlp->dhc_cfg.rand_num, rand_num, chal_len); + + if (dh_grp_id) { + key_len = dh_group_array[dh_grp_id].length; + get_random_bytes(nonce, sizeof(nonce)); + + rc = lpfc_auth_compute_dhkey(&key_base, sizeof(key_base), + nonce, sizeof(nonce), + dh_group_array[dh_grp_id].value, + dh_group_array[dh_grp_id].length, + key, key_len); + if (rc) + goto out; + + memcpy(pcmd, key, key_len); + + /* Save the nonce */ + memcpy(ndlp->dhc_cfg.nonce, nonce, sizeof(nonce)); + } + + elsiocb->iocb_cmpl = lpfc_auth_cmpl_negotiate; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + +out: + kfree(rand_num); + return rc; +} + +/** + * lpfc_auth_issue_dhchap_reply - Issue DHCHAP_REPLY cmd + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_issue_dhchap_reply(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + uint32_t tran_id, uint32_t dh_grp_id, + uint32_t hash_id, uint8_t *hash_value, + uint8_t *dh_value, uint32_t dh_val_len) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_auth_hdr *hdr; + uint8_t *pcmd; + uint16_t cmdsize; + uint32_t chal_len = 0; + uint8_t *rand_num = NULL; + int rc = 0; + + switch (hash_id) { + case HASH_MD5: + /* Check for bi_directional support */ + if (ndlp->dhc_cfg.bidirectional) + chal_len = MD5_CHAL_LEN; + cmdsize = sizeof(struct lpfc_auth_hdr) + + sizeof(uint32_t) + MD5_CHAL_LEN + + sizeof(uint32_t) + dh_val_len + + sizeof(uint32_t) + chal_len; + break; + case HASH_SHA1: + /* Check for bi_directional support */ + if (ndlp->dhc_cfg.bidirectional) + chal_len = SHA1_CHAL_LEN; + cmdsize = sizeof(struct lpfc_auth_hdr) + + sizeof(uint32_t) + SHA1_CHAL_LEN + + sizeof(uint32_t) + dh_val_len + + sizeof(uint32_t) + chal_len; + break; + default: + return 1; + } + + ndlp->dhc_cfg.msg = LPFC_DHCHAP_REPLY; + + /* Check for bi_directional support before allocating rand_num */ + if (ndlp->dhc_cfg.bidirectional) { + rand_num = kzalloc(chal_len, GFP_KERNEL); + if (!rand_num) { + rc = 1; + goto out; + } + get_random_bytes(rand_num, chal_len); + } + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, + ndlp->nlp_DID, ELS_CMD_AUTH); + + if (!elsiocb) { + rc = 1; + goto out; + } + + pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + /* Build the DHCHAP_REPLY payload */ + hdr = (struct lpfc_auth_hdr *)pcmd; + hdr->auth_els_code = (uint8_t) ELS_CMD_AUTH; + hdr->auth_els_flags = 0; + hdr->auth_msg_code = DHCHAP_REPLY; + hdr->protocol_ver = AUTH_PROTOCOL_VER_1; + hdr->msg_len = be32_to_cpu(cmdsize - sizeof(struct lpfc_auth_hdr)); + hdr->tran_id = be32_to_cpu(tran_id); + + pcmd += sizeof(struct lpfc_auth_hdr); + if (hash_id == HASH_MD5) { + *((uint32_t *)pcmd) = be32_to_cpu(MD5_CHAL_LEN); + pcmd += sizeof(uint32_t); + memcpy(pcmd, hash_value, MD5_CHAL_LEN); + pcmd += MD5_CHAL_LEN; + } else if (hash_id == HASH_SHA1) { + *((uint32_t *)pcmd) = be32_to_cpu(SHA1_CHAL_LEN); + pcmd += sizeof(uint32_t); + memcpy(pcmd, hash_value, SHA1_CHAL_LEN); + pcmd += SHA1_CHAL_LEN; + } + + *((uint32_t *)pcmd) = be32_to_cpu(dh_val_len); + pcmd += sizeof(uint32_t); + if (dh_val_len) { + memcpy(pcmd, dh_value, dh_val_len); + pcmd += dh_val_len; + } + *((uint32_t *)pcmd) = be32_to_cpu(chal_len); + if (chal_len) { + pcmd += sizeof(uint32_t); + memcpy(pcmd, rand_num, chal_len); + } + + /* Save the auth details for later use */ + ndlp->dhc_cfg.hash_id = hash_id; + ndlp->dhc_cfg.tran_id = tran_id; + ndlp->dhc_cfg.dh_grp_id = dh_grp_id; + memcpy(ndlp->dhc_cfg.rand_num, rand_num, chal_len); + + elsiocb->iocb_cmpl = lpfc_auth_cmpl_negotiate; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + + /* Set up authentication timer */ + if (ndlp->dhc_cfg.auth_tmo) + lpfc_auth_start_tmr(ndlp, ndlp->dhc_cfg.auth_tmo * 1000); +out: + kfree(rand_num); + return rc; +} + +/** + * lpfc_auth_issue_dhchap_success - Issue DHCHAP_SUCCESS cmd + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_issue_dhchap_success(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + uint8_t *hash_value) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_auth_hdr *hdr; + uint8_t *pcmd; + uint16_t cmdsize; + uint32_t chal_len = 0; + int rc = 0; + + if (ndlp->dhc_cfg.hash_id == HASH_MD5) + chal_len = MD5_CHAL_LEN; + else if (ndlp->dhc_cfg.hash_id == HASH_SHA1) + chal_len = SHA1_CHAL_LEN; + + cmdsize = sizeof(struct lpfc_auth_hdr) + sizeof(uint32_t); + if (hash_value) + cmdsize += chal_len; + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, + ndlp->nlp_DID, ELS_CMD_AUTH); + + if (!elsiocb) { + rc = 1; + goto out; + } + + pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + /* Build the DHCHAP_SUCCESS payload */ + hdr = (struct lpfc_auth_hdr *)pcmd; + hdr->auth_els_code = (uint8_t) ELS_CMD_AUTH; + hdr->auth_els_flags = 0; + hdr->auth_msg_code = DHCHAP_SUCCESS; + hdr->protocol_ver = AUTH_PROTOCOL_VER_1; + hdr->msg_len = be32_to_cpu(cmdsize - sizeof(struct lpfc_auth_hdr)); + hdr->tran_id = be32_to_cpu(ndlp->dhc_cfg.tran_id); + pcmd += sizeof(struct lpfc_auth_hdr); + + if (hash_value) { + *((uint32_t *)pcmd) = be32_to_cpu(chal_len); + pcmd += sizeof(uint32_t); + memcpy(pcmd, hash_value, chal_len); + } else { + *((uint32_t *)pcmd) = 0; + } + + elsiocb->iocb_cmpl = lpfc_auth_cmpl_negotiate; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + + ndlp->dhc_cfg.state = LPFC_AUTH_SUCCESS; + ndlp->dhc_cfg.msg = LPFC_DHCHAP_SUCCESS_REPLY; + ndlp->dhc_cfg.direction = AUTH_DIRECTION_LOCAL; + ndlp->dhc_cfg.last_auth = jiffies; +out: + return rc; +} + +/** + * lpfc_auth_issue_reject - Issue AUTH_REJECT cmd + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * @tran_id: transaction id + * @rsn_code: reason code + * @rsn_expl: reason code explanation + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_issue_reject(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + uint32_t tran_id, + uint8_t rsn_code, + uint8_t rsn_expl) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_auth_hdr *hdr; + struct lpfc_auth_reject *rjt; + uint8_t *pcmd; + uint16_t cmdsize; + int rc = 0; + + cmdsize = sizeof(struct lpfc_auth_hdr) + sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, + ndlp->nlp_DID, ELS_CMD_AUTH); + + if (!elsiocb) { + rc = 1; + goto out; + } + + pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + /* Build the AUTH_REJECT payload */ + hdr = (struct lpfc_auth_hdr *)pcmd; + hdr->auth_els_code = (uint8_t) ELS_CMD_AUTH; + hdr->auth_els_flags = 0; + hdr->auth_msg_code = AUTH_REJECT; + hdr->protocol_ver = AUTH_PROTOCOL_VER_1; + hdr->msg_len = be32_to_cpu(cmdsize - sizeof(struct lpfc_auth_hdr)); + hdr->tran_id = be32_to_cpu(tran_id); + + rjt = (struct lpfc_auth_reject *)(pcmd + sizeof(struct lpfc_auth_hdr)); + rjt->rsn_code = rsn_code; + rjt->rsn_expl = rsn_expl; + + elsiocb->iocb_cmpl = lpfc_auth_cmpl_negotiate; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + + ndlp->dhc_cfg.msg = LPFC_AUTH_REJECT; + ndlp->dhc_cfg.direction = AUTH_DIRECTION_NONE; +out: + return rc; +} + +/** + * lpfc_auth_handle_reject - Handle AUTH Reject for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @payload: pointer to AUTH Reject cmd payload. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_handle_reject(struct lpfc_vport *vport, uint8_t *payload, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_auth_reject *rjt; + uint8_t rsn_code; + uint8_t rsn_expl; + + rjt = (struct lpfc_auth_reject *) + (payload + sizeof(struct lpfc_auth_hdr)); + rsn_code = rjt->rsn_code; + rsn_expl = rjt->rsn_expl; + + if (rsn_code == AUTHRJT_LOGICAL_ERR && + rsn_expl == AUTHEXPL_RESTART_AUTH) { + /* Restart authentication */ + lpfc_auth_issue_negotiate(vport, ndlp); + } else { + ndlp->dhc_cfg.state = LPFC_AUTH_FAIL; + ndlp->dhc_cfg.msg = LPFC_AUTH_REJECT; + } + ndlp->dhc_cfg.direction = AUTH_DIRECTION_NONE; + + return 0; +} + +/** + * lpfc_auth_handle_negotiate - Handle AUTH Negotiate for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @payload: pointer to AUTH Negotiate cmd payload. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_handle_negotiate(struct lpfc_vport *vport, uint8_t *payload, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_auth_hdr *hdr; + struct lpfc_auth_negotiate_cmn *negotiate; + struct lpfc_dhchap_param_cmn *dhchap_param; + struct lpfc_auth_param_hdr *param_hdr; + uint32_t protocol_id, hash = 0, dh_group = 0; + uint8_t found = 0; + int i; + int rc = 0; + + hdr = (struct lpfc_auth_hdr *)payload; + + /* Nx_Port with the higher port_name shall remain the initiator */ + if (memcmp(&vport->fc_portname, &ndlp->nlp_portname, + sizeof(struct lpfc_name)) > 0) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_LOGICAL_ERR, + AUTHEXPL_AUTH_STARTED); + return 0; + } + + /* Sanity check the AUTH message header */ + if (hdr->protocol_ver != AUTH_PROTOCOL_VER_1) { + rc = 1; + goto out; + } + + /* Sanity check the Negotiate payload */ + payload += sizeof(struct lpfc_auth_hdr); + negotiate = (struct lpfc_auth_negotiate_cmn *)payload; + if (be16_to_cpu(negotiate->name_tag) != AUTH_NAME_TAG || + be16_to_cpu(negotiate->name_len) != sizeof(struct lpfc_name) || + be32_to_cpu(negotiate->num_protocol) == 0) { + rc = 1; + goto out; + } + + payload += sizeof(struct lpfc_auth_negotiate_cmn); + + /* Scan the payload for Protocol support */ + for (i = 0; i < be32_to_cpu(negotiate->num_protocol); i++) { + dhchap_param = (struct lpfc_dhchap_param_cmn *)payload; + protocol_id = be32_to_cpu(dhchap_param->protocol_id); + + if (protocol_id == PROTOCOL_DHCHAP) { + found = 1; + break; + } + + payload += be32_to_cpu(dhchap_param->param_len); + } + + if (!found) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_LOGICAL_ERR, + AUTHEXPL_MECH_UNUSABLE); + return 0; + } + + payload += sizeof(struct lpfc_dhchap_param_cmn); + param_hdr = (struct lpfc_auth_param_hdr *)payload; + payload += sizeof(struct lpfc_auth_param_hdr); + + /* Scan the payload for Hash support */ + found = 0; + for (i = 0; i < be32_to_cpu(param_hdr->wcnt); i++) { + hash = be32_to_cpu(*((uint32_t *)payload)); + + if (hash == HASH_MD5 || hash == HASH_SHA1) { + found = 1; + break; + } + + payload += sizeof(uint32_t); + } + + if (!found) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_LOGICAL_ERR, + AUTHEXPL_HASH_UNUSABLE); + return 0; + } + + param_hdr = (struct lpfc_auth_param_hdr *)payload; + payload += sizeof(struct lpfc_auth_param_hdr); + + /* Scan the payload for DH Group support */ + found = 0; + for (i = 0; i < be32_to_cpu(param_hdr->wcnt); i++) { + dh_group = be32_to_cpu(*((uint32_t *)payload)); + + if (dh_group >= DH_GROUP_NULL && dh_group <= DH_GROUP_2048) { + found = 1; + break; + } + + payload += sizeof(uint32_t); + } + + if (!found) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_LOGICAL_ERR, + AUTHEXPL_DHGRP_UNUSABLE); + return 0; + } + + /* Save the Authentication settings for this node */ + ndlp->dhc_cfg.tran_id = be32_to_cpu(hdr->tran_id); + ndlp->dhc_cfg.hash_id = hash; + ndlp->dhc_cfg.dh_grp_id = dh_group; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "6041 AUTH_NEGOTIATE " + "tran_id: x%x hash_id: x%x dh_grp: x%x\n", + be32_to_cpu(hdr->tran_id), hash, dh_group); + + /* Issue a DH-CHAP Challenge to the initiator */ + lpfc_auth_issue_dhchap_challenge(vport, ndlp); + return 0; + +out: + if (rc) + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; +} + +/** + * lpfc_auth_handle_dhchap_chal - Handle DHCHAP Challenge for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @payload: pointer to DHCHAP Challenge cmd payload. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_handle_dhchap_chal(struct lpfc_vport *vport, uint8_t *payload, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_auth_hdr *hdr; + struct lpfc_auth_challenge *chal; + uint8_t *hash_value = NULL; + uint32_t dh_val_len = 0; + uint8_t *dh_value = NULL; + uint8_t *aug_chal = NULL; + uint8_t challenge[20]; + uint8_t tran_id; + uint32_t hash_id; + uint32_t chal_len; + uint32_t dh_grp_id; + uint8_t nonce[16], key[256], rsp_key[256]; + uint32_t key_len; + uint32_t rsp_key_len; + uint8_t rsp_key_base = 2; + char *passwd = ndlp->dhc_cfg.local_passwd; + uint32_t passwd_len = ndlp->dhc_cfg.local_passwd_len; + int rc; + + hdr = (struct lpfc_auth_hdr *)payload; + /* Sanity check the AUTH message header */ + if (hdr->protocol_ver != AUTH_PROTOCOL_VER_1 || + be32_to_cpu(hdr->tran_id) != ndlp->dhc_cfg.tran_id) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + payload += sizeof(struct lpfc_auth_hdr); + chal = (struct lpfc_auth_challenge *)payload; + + tran_id = be32_to_cpu(hdr->tran_id) & 0xff; + hash_id = be32_to_cpu(chal->hash_id); + chal_len = be32_to_cpu(chal->chal_len); + dh_grp_id = be32_to_cpu(chal->dh_grp_id); + + payload += sizeof(struct lpfc_auth_challenge); + + ndlp->dhc_cfg.msg = LPFC_DHCHAP_CHALLENGE; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3044 AUTH_CHALLENGE " + "tran_id:x%x hash_id:x%x chal_len=x%x dh_grp=x%x\n", + tran_id, hash_id, chal_len, dh_grp_id); + + if (hash_id == HASH_MD5 && chal_len == MD5_CHAL_LEN) { + memcpy(challenge, payload, MD5_CHAL_LEN); + } else if (hash_id == HASH_SHA1 && chal_len == SHA1_CHAL_LEN) { + memcpy(challenge, payload, SHA1_CHAL_LEN); + } else { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + payload += chal_len; + + switch (dh_grp_id) { + case DH_GROUP_2048: + case DH_GROUP_1536: + case DH_GROUP_1280: + case DH_GROUP_1024: + key_len = dh_group_array[dh_grp_id].length; + rsp_key_len = dh_group_array[dh_grp_id].length; + /* Save the dh value */ + dh_val_len = be32_to_cpu(*((uint32_t *)payload)); + payload += sizeof(uint32_t); + + dh_value = kzalloc(dh_val_len, GFP_KERNEL); + if (!dh_value) + goto out; + memcpy(dh_value, payload, dh_val_len); + + /* Compute Ephemeral DH Key */ + get_random_bytes(nonce, sizeof(nonce)); + + rc = lpfc_auth_compute_dhkey(dh_value, dh_val_len, nonce, + sizeof(nonce), + dh_group_array[dh_grp_id].value, + dh_group_array[dh_grp_id].length, + key, key_len); + if (rc) + break; + + /* Save this key, (((g)^x)^y mod p) */ + memcpy(ndlp->dhc_cfg.dhkey, key, key_len); + ndlp->dhc_cfg.dhkey_len = key_len; + + rc = lpfc_auth_compute_dhkey(&rsp_key_base, + sizeof(rsp_key_base), + nonce, sizeof(nonce), + dh_group_array[dh_grp_id].value, + dh_group_array[dh_grp_id].length, + rsp_key, rsp_key_len); + if (rc) + break; + + /* + * Compute the augmented challenge + * aug_chal = hash(Challenge || Ephemeral DH Key) + */ + aug_chal = lpfc_auth_compute_hash(hash_id, challenge, chal_len, + key, key_len, 0, 0); + if (!aug_chal) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3045 Null aug_chal\n"); + break; + } + + /* Calculate the hash response */ + hash_value = lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + passwd, + passwd_len, + aug_chal, + chal_len); + break; + case DH_GROUP_NULL: + /* Calculate the hash response */ + hash_value = lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + passwd, + passwd_len, + challenge, + chal_len); + rsp_key_len = 0; + break; + default: + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, + AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + if (!hash_value) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3046 Null hash_value\n"); + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_AUTH_FAILED); + goto out; + } + + /* Issue DHCHAP reply */ + lpfc_auth_issue_dhchap_reply(vport, ndlp, be32_to_cpu(hdr->tran_id), + dh_grp_id, hash_id, hash_value, rsp_key, + rsp_key_len); + +out: + kfree(dh_value); + kfree(aug_chal); + kfree(hash_value); + return 0; +} + +/** + * lpfc_auth_handle_dhchap_reply - Handle DHCHAP Reply for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @payload: pointer to DHCHAP Challenge cmd payload. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_handle_dhchap_reply(struct lpfc_vport *vport, uint8_t *payload, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_auth_hdr *hdr; + uint8_t tran_id = ndlp->dhc_cfg.tran_id & 0xff; + uint32_t hash_id = ndlp->dhc_cfg.hash_id; + uint32_t dh_grp_id = ndlp->dhc_cfg.dh_grp_id; + uint32_t dh_val_len; + uint8_t dh_value[256], key[256]; + uint32_t resp_len; + uint32_t key_len = 0; + uint8_t resp_value[20]; + uint8_t *aug_chal = NULL; + uint8_t *hash_value = NULL; + uint8_t challenge[20]; + uint32_t chal_len; + char *local_passwd = ndlp->dhc_cfg.local_passwd; + uint32_t local_passwd_len = ndlp->dhc_cfg.local_passwd_len; + char *remote_passwd = ndlp->dhc_cfg.remote_passwd; + uint32_t remote_passwd_len = ndlp->dhc_cfg.remote_passwd_len; + int rc = 0; + + hdr = (struct lpfc_auth_hdr *)payload; + /* Sanity check the AUTH message header */ + if (hdr->protocol_ver != AUTH_PROTOCOL_VER_1 || + be32_to_cpu(hdr->tran_id) != ndlp->dhc_cfg.tran_id) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, + AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + payload += sizeof(struct lpfc_auth_hdr); + resp_len = be32_to_cpu(*(uint32_t *)payload); + payload += sizeof(uint32_t); + + if (hash_id == HASH_MD5 && resp_len == MD5_CHAL_LEN) { + memcpy(resp_value, payload, MD5_CHAL_LEN); + } else if (hash_id == HASH_SHA1 && resp_len == SHA1_CHAL_LEN) { + memcpy(resp_value, payload, SHA1_CHAL_LEN); + } else { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + payload += resp_len; + dh_val_len = be32_to_cpu(*(uint32_t *)payload); + payload += sizeof(uint32_t); + + if (dh_val_len != dh_group_array[dh_grp_id].length) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + memcpy(dh_value, payload, dh_val_len); + payload += dh_val_len; + + /* Check for bidirectional challenge */ + chal_len = be32_to_cpu(*(uint32_t *)payload); + payload += sizeof(uint32_t); + if (chal_len) { + if (chal_len != resp_len) { + lpfc_auth_issue_reject(vport, ndlp, + be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, + AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + memcpy(challenge, payload, chal_len); + /* compare this challenge with what was issued */ + if (!memcmp(challenge, ndlp->dhc_cfg.rand_num, chal_len)) { + lpfc_auth_issue_reject(vport, ndlp, + be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, + AUTHEXPL_BAD_PAYLOAD); + return 0; + } + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "6043 AUTH_REPLY tran_id: x%x hash_id: x%x " + "resp_len: x%x chal_len: x%x dh_grp: x%x\n", + tran_id, hash_id, resp_len, chal_len, dh_grp_id); + + switch (dh_grp_id) { + case DH_GROUP_2048: + case DH_GROUP_1536: + case DH_GROUP_1280: + case DH_GROUP_1024: + key_len = dh_group_array[dh_grp_id].length; + + /* Compute Ephemeral DH Key */ + rc = lpfc_auth_compute_dhkey(dh_value, dh_val_len, + ndlp->dhc_cfg.nonce, + sizeof(ndlp->dhc_cfg.nonce), + dh_group_array[dh_grp_id].value, + dh_group_array[dh_grp_id].length, + key, key_len); + if (rc) + break; + + /* + * Compute the augmented challenge + * aug_chal = hash(Challenge || Ephemeral DH Key) + */ + aug_chal = lpfc_auth_compute_hash(hash_id, + ndlp->dhc_cfg.rand_num, + resp_len, key, key_len, + 0, 0); + if (!aug_chal) { + rc = 1; + goto out; + } + + /* Calculate the hash response */ + hash_value = lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + remote_passwd, + remote_passwd_len, + aug_chal, + resp_len); + break; + case DH_GROUP_NULL: + hash_value = lpfc_auth_compute_hash(hash_id, + &tran_id, sizeof(tran_id), + remote_passwd, + remote_passwd_len, + ndlp->dhc_cfg.rand_num, + resp_len); + break; + default: + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + if (!hash_value) { + rc = 1; + goto out; + } + + /* compare the hash with hash received from the remote port */ + if (memcmp(hash_value, resp_value, resp_len)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3047 Hash verification failed\n"); + rc = 1; + goto out; + } + + kfree(aug_chal); + aug_chal = NULL; + kfree(hash_value); + + /* Compute the response hash */ + if (dh_grp_id) { + /* + * Compute the augmented challenge + * aug_chal = hash(Challenge || Ephemeral DH Key) + */ + aug_chal = lpfc_auth_compute_hash(hash_id, + challenge, chal_len, + key, key_len, 0, 0); + + if (!aug_chal) { + rc = 1; + goto out; + } + hash_value = lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + local_passwd, + local_passwd_len, + aug_chal, + chal_len); + } else { + hash_value = lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + local_passwd, + local_passwd_len, + challenge, + chal_len); + } + + if (!hash_value) { + rc = 1; + goto out; + } + + lpfc_auth_issue_dhchap_success(vport, ndlp, hash_value); + +out: + kfree(aug_chal); + kfree(hash_value); + if (rc) + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_AUTH_FAILED); + return 0; +} + +/** + * lpfc_auth_handle_dhchap_success - Handle DHCHAP Success for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @payload: pointer to DHCHAP Challenge cmd payload. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +static int +lpfc_auth_handle_dhchap_success(struct lpfc_vport *vport, uint8_t *payload, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_auth_hdr *hdr; + uint32_t resp_len; + uint8_t *hash_value = NULL; + uint8_t *resp_value = NULL; + uint8_t *aug_chal = NULL; + uint8_t tran_id; + uint32_t hash_id; + uint32_t dh_grp_id; + uint32_t chal_len = 0; + char *local_passwd = ndlp->dhc_cfg.local_passwd; + uint32_t local_passwd_len = ndlp->dhc_cfg.local_passwd_len; + char *remote_passwd = ndlp->dhc_cfg.remote_passwd; + uint32_t remote_passwd_len = ndlp->dhc_cfg.remote_passwd_len; + int rc = 0; + + hdr = (struct lpfc_auth_hdr *)payload; + /* Sanity check the AUTH message header */ + if (hdr->protocol_ver != AUTH_PROTOCOL_VER_1 || + be32_to_cpu(hdr->tran_id) != ndlp->dhc_cfg.tran_id) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; + } + payload += sizeof(struct lpfc_auth_hdr); + + tran_id = be32_to_cpu(hdr->tran_id) & 0xff; + resp_len = be32_to_cpu(*((uint32_t *)payload)); + resp_value = payload + sizeof(uint32_t); + + hash_id = ndlp->dhc_cfg.hash_id; + if (hash_id == HASH_MD5) { + chal_len = MD5_CHAL_LEN; + } else if (hash_id == HASH_SHA1) { + chal_len = SHA1_CHAL_LEN; + } else { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + dh_grp_id = ndlp->dhc_cfg.dh_grp_id; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3048 DHCHAP_SUCCESS resp_len = x%x, tran_id=x%x " + "hash_id=x%x\n", + resp_len, tran_id, hash_id); + if (resp_len) { + if (resp_len != chal_len) { + lpfc_auth_issue_reject(vport, ndlp, + be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, + AUTHEXPL_BAD_PAYLOAD); + return 0; + } + + if (dh_grp_id) { + /* + * Compute the augmented challenge + * aug_chal = hash(Challenge || Ephemeral DH Key) + */ + aug_chal = + lpfc_auth_compute_hash(hash_id, + ndlp->dhc_cfg.rand_num, + chal_len, + ndlp->dhc_cfg.dhkey, + ndlp->dhc_cfg.dhkey_len, + 0, 0); + if (!aug_chal) { + rc = 1; + goto out; + } + + /* Calculate the hash response */ + hash_value = lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + remote_passwd, + remote_passwd_len, + aug_chal, + chal_len); + } else { + /* Calculate the hash response */ + hash_value = + lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + remote_passwd, + remote_passwd_len, + ndlp->dhc_cfg.rand_num, + chal_len); + } + + if (!hash_value) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3049 Null hash_value\n"); + rc = 1; + goto out; + } + + /* compare the hash with hash received from the remote port */ + if (memcmp(hash_value, resp_value, resp_len)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_AUTH, + "3100 Hash verification failed\n"); + rc = 1; + goto out; + } + + if (!dh_grp_id) { + /* test for identical local and remote secrets */ + hash_value = + lpfc_auth_compute_hash(hash_id, + &tran_id, + sizeof(tran_id), + local_passwd, + local_passwd_len, + ndlp->dhc_cfg.rand_num, + chal_len); + if (!hash_value) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_AUTH, + "3014 Null hash_value\n"); + rc = 1; + goto out; + } + if (!memcmp(hash_value, resp_value, resp_len)) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_AUTH, + "3101 Local and Remote " + "secrets must differ.\n"); + rc = 1; + goto out; + } + } + + lpfc_auth_issue_dhchap_success(vport, ndlp, NULL); + + } else { + ndlp->dhc_cfg.state = LPFC_AUTH_SUCCESS; + ndlp->dhc_cfg.last_auth = jiffies; + } + ndlp->dhc_cfg.msg = LPFC_DHCHAP_SUCCESS; + ndlp->dhc_cfg.direction |= AUTH_DIRECTION_REMOTE; +out: + kfree(aug_chal); + kfree(hash_value); + if (rc) { + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_AUTH_FAILED); + lpfc_issue_els_logo(vport, ndlp, 0); + } + return 0; +} + +/** + * lpfc_auth_handle_cmd - Handle unsolicited auth cmd for a vport + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - + * 1 - + **/ +int +lpfc_auth_handle_cmd(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + uint8_t *pcmd = NULL; + uint32_t size, size1 = 0, size2 = 0; + struct lpfc_auth_hdr *hdr; + struct ulp_bde64 *pbde; + int rc = 0; + + size = cmdiocb->iocb.unsli3.rcvsli3.acc_len; + pcmd = kzalloc(size, GFP_KERNEL); + if (!pcmd) + goto out; + + size1 = cmdiocb->iocb.un.cont64[0].tus.f.bdeSize; + memcpy(pcmd, ((struct lpfc_dmabuf *)cmdiocb->context2)->virt, size1); + if (cmdiocb->iocb.ulpBdeCount == 2) { + pbde = (struct ulp_bde64 *)&cmdiocb->iocb.unsli3.sli3Words[4]; + size2 = pbde->tus.f.bdeSize; + memcpy(pcmd + size1, + ((struct lpfc_dmabuf *)cmdiocb->context3)->virt, size2); + } + + lpfc_auth_cancel_tmr(ndlp); + hdr = (struct lpfc_auth_hdr *)pcmd; + + switch (hdr->auth_msg_code) { + case AUTH_REJECT: + rc = lpfc_auth_handle_reject(vport, pcmd, ndlp); + break; + case AUTH_NEGOTIATE: + rc = lpfc_auth_handle_negotiate(vport, pcmd, ndlp); + break; + case AUTH_DONE: + /* Nothing to do */ + break; + case DHCHAP_CHALLENGE: + rc = lpfc_auth_handle_dhchap_chal(vport, pcmd, ndlp); + break; + case DHCHAP_REPLY: + rc = lpfc_auth_handle_dhchap_reply(vport, pcmd, ndlp); + break; + case DHCHAP_SUCCESS: + rc = lpfc_auth_handle_dhchap_success(vport, pcmd, ndlp); + if (!rc) { + /* Authentication complete */ + if (vport->port_state < LPFC_VPORT_READY) { + if (vport->port_state != LPFC_FDISC) + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } + + /* Set up reauthentication timer */ + if (ndlp->dhc_cfg.reauth_interval) + lpfc_auth_start_reauth_tmr(ndlp); + } + break; + default: + lpfc_auth_issue_reject(vport, ndlp, be32_to_cpu(hdr->tran_id), + AUTHRJT_FAILURE, AUTHEXPL_BAD_PAYLOAD); + break; + } + +out: + kfree(pcmd); + return rc; +} + +/** + * lpfc_auth_start - Start DH-CHAP authentication + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine initiates authentication on the @vport. + * + * Return codes + * 0 - Authentication required before logins continue + * 1 - No Authentication will be done + **/ +int +lpfc_auth_start(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + uint8_t fcsp_en; + int rc = 0; + + if (!phba->cfg_enable_auth) + return 1; + + if (phba->sli4_hba.fawwpn_in_use) + return 1; + + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return 1; + + /* + * If FC_DISC_DELAYED is set, delay the authentication. + */ + spin_lock_irq(shost->host_lock); + if (vport->fc_flag & FC_DISC_DELAYED) { + spin_unlock_irq(shost->host_lock); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "3435 Delay authentication for %d seconds\n", + phba->fc_ratov); + mod_timer(&vport->delayed_disc_tmo, + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); + return 0; + } + spin_unlock_irq(shost->host_lock); + + fcsp_en = (ndlp->nlp_DID == Fabric_DID) ? phba->fc_fabparam.cmn.fcsp : + ndlp->fc_sparam.cmn.fcsp; + + /* Determine the dh-chap configuration for port pair */ + rc = lpfc_auth_lookup_cfg(ndlp); + if (rc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_AUTH, + "3042 No Authentication configuration found " + "for request.\n"); + + if (fcsp_en) { + ndlp->dhc_cfg.state = LPFC_AUTH_FAIL; + lpfc_issue_els_logo(vport, ndlp, 0); + return 0; + } + return 1; + } + + switch (ndlp->dhc_cfg.auth_mode) { + case LPFC_AUTH_MODE_PASSIVE: + if (!fcsp_en) + return 1; + /* Fall Thru */ + case LPFC_AUTH_MODE_ACTIVE: + rc = lpfc_auth_issue_negotiate(vport, ndlp); + if (rc) + ndlp->dhc_cfg.state = LPFC_AUTH_UNKNOWN; + break; + case LPFC_AUTH_MODE_DISABLE: + if (!fcsp_en) + return 1; + /* fall through */ + default: + ndlp->dhc_cfg.state = LPFC_AUTH_UNKNOWN; + /* logout should be sent */ + lpfc_issue_els_logo(vport, ndlp, 0); + } + + return rc; +} + +/** + * lpfc_auth_reauth - Restart DH-CHAP authentication + * @ptr: pointer to a node-list data structure. + * + * This routine initiates authentication on the @vport. + * + **/ +void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_auth_reauth(unsigned long ptr) +#else +lpfc_auth_reauth(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)ptr; +#else + struct lpfc_nodelist *ndlp = from_timer(ndlp, t, + dhc_cfg.nlp_reauthfunc); +#endif + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; + unsigned long flags; + struct lpfc_work_evt *evtp = &ndlp->reauth_evt; + + spin_lock_irqsave(&phba->hbalock, flags); + if (!list_empty(&evtp->evt_listp)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + /* We need to hold the node by incrementing the reference + * count until the queued work is done + */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + if (evtp->evt_arg1) { + evtp->evt = LPFC_EVT_REAUTH; + list_add_tail(&evtp->evt_listp, &phba->work_list); + lpfc_worker_wake_up(phba); + } + spin_unlock_irqrestore(&phba->hbalock, flags); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_AUTH, + "3102 Start Reauthentication\n"); +} + +/** + * lpfc_auth_timeout_handler - process authentication timer expiry + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine authentication will clean up an incomplete authentication + * or start reauthentication. + * + **/ +void +lpfc_auth_timeout_handler(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_work_evt *evtp; + + /* determine whether previous authentication completed */ + if (ndlp->dhc_cfg.msg == LPFC_DHCHAP_SUCCESS) { + lpfc_auth_start(vport, ndlp); + } else { + ndlp->dhc_cfg.state = LPFC_AUTH_FAIL; + ndlp->dhc_cfg.msg = LPFC_AUTH_REJECT; + ndlp->dhc_cfg.direction = AUTH_DIRECTION_NONE; + + if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) + return; + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); + del_timer_sync(&ndlp->nlp_delayfunc); + ndlp->nlp_last_elscmd = 0; + if (!list_empty(&ndlp->els_retry_evt.evt_listp)) { + list_del_init(&ndlp->els_retry_evt.evt_listp); + /* Decrement ndlp ref count held for delayed retry */ + evtp = &ndlp->els_retry_evt; + lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); + } + } +} + diff --git a/drivers/scsi/lpfc/lpfc_auth.h b/drivers/scsi/lpfc/lpfc_auth.h new file mode 100644 index 000000000000..9edd01f23d01 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_auth.h @@ -0,0 +1,222 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.broadcom.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ +/* See Fibre Channel protocol T11 FC-SP-2 for details */ + +/* AUTH_ELS Message header */ +struct lpfc_auth_hdr { + uint8_t auth_els_code; /* 0x90 */ + uint8_t auth_els_flags; + uint8_t auth_msg_code; + uint8_t protocol_ver; /* version = 1 */ + uint32_t msg_len; /* msg payload len */ + uint32_t tran_id; /* transaction id */ +}; + +/* protocol_ver defines */ +#define AUTH_PROTOCOL_VER_1 0x1 + +/* auth_msg_code defines */ +#define AUTH_REJECT 0x0A +#define AUTH_NEGOTIATE 0x0B +#define AUTH_DONE 0x0C +#define DHCHAP_CHALLENGE 0x10 +#define DHCHAP_REPLY 0x11 +#define DHCHAP_SUCCESS 0x12 + +/* Auth Parameter Header */ +struct lpfc_auth_param_hdr { + uint16_t tag; + uint16_t wcnt; +}; + +struct lpfc_dhchap_param_cmn { + uint32_t param_len; + uint32_t protocol_id; /* DH-CHAP = 1 */ + /* DH-CHAP specific */ + /* variable length data */ +}; + +/* AUTH_ELS Negotiate payload */ +struct lpfc_auth_negotiate_cmn { + uint16_t name_tag; /* tag = 1 */ + uint16_t name_len; /* len = 8 */ + uint32_t port_name[2]; /* WWPN */ + uint32_t num_protocol; /* only 1 protocol for now */ + /* Protocol specific */ + /* variable length data */ +}; + +/* name_tag defines */ +#define AUTH_NAME_TAG 1 + +/* protocol_id defines */ +#define PROTOCOL_DHCHAP 1 +#define PROTOCOL_FCAP 2 +#define PROTOCOL_FCPAP 3 +#define PROTOCOL_KERBEROS 4 + +/* DHCHAP param tag defines */ +#define DHCHAP_TAG_HASHLIST 1 +#define DHCHAP_TAG_GRP_IDLIST 2 + +/* DHCHAP Group ID defines */ +#define DH_GROUP_NULL 0x00 +#define DH_GROUP_1024 0x01 +#define DH_GROUP_1280 0x02 +#define DH_GROUP_1536 0x03 +#define DH_GROUP_2048 0x04 + +/* hash function defines */ +#define HASH_MD5 5 +#define HASH_SHA1 6 + +#define MD5_CHAL_LEN 16 +#define SHA1_CHAL_LEN 20 + +#define AUTH_DIRECTION_NONE 0 +#define AUTH_DIRECTION_REMOTE 1 +#define AUTH_DIRECTION_LOCAL 2 +#define AUTH_DIRECTION_BIDI (AUTH_DIRECTION_LOCAL | AUTH_DIRECTION_REMOTE) + +#define AUTH_FABRIC_WWN 0xFFFFFFFFFFFFFFFFLL + +enum auth_state { + LPFC_AUTH_UNKNOWN = 0, + LPFC_AUTH_SUCCESS = 1, + LPFC_AUTH_FAIL = 2, +}; + +enum auth_msg_state { + LPFC_AUTH_NONE = 0, + LPFC_AUTH_REJECT = 1, /* Sent a Reject */ + LPFC_AUTH_NEGOTIATE = 2, /* Auth Negotiate */ + LPFC_DHCHAP_CHALLENGE = 3, /* Challenge */ + LPFC_DHCHAP_REPLY = 4, /* Reply */ + LPFC_DHCHAP_SUCCESS_REPLY = 5, /* Success with Reply */ + LPFC_DHCHAP_SUCCESS = 6, /* Success */ + LPFC_AUTH_DONE = 7, +}; + +#define LPFC_AUTH_STATE_UNKNOWN 0 +#define LPFC_AUTH_STATE_SUCCESS 1 +#define LPFC_AUTH_STATE_FAIL 2 + +#define DHC_STATE_AUTH_NEGO 1 +#define DHC_STATE_AUTH_CHAL 2 +#define DHC_STATE_AUTH_REPLY 3 +#define DHC_STATE_AUTH_SUCCESS 4 +#define DHC_STATE_AUTH_FAIL 5 + +/* AUTH_ELS Challenge payload */ +struct lpfc_auth_challenge { + uint16_t name_tag; /* tag = 1 */ + uint16_t name_len; /* len = 8 */ + uint32_t port_name[2]; /* WWPN */ + uint32_t hash_id; /* Hash function id */ + uint32_t dh_grp_id; /* DH_Group id */ + uint32_t chal_len; /* Challenge length */ + /* Variable length data */ +}; + +/* AUTH Reject reason code defines */ +#define AUTHRJT_FAILURE 1 +#define AUTHRJT_LOGICAL_ERR 2 + +/* AUTH Reject reason code explanation defines */ +#define AUTHEXPL_MECH_UNUSABLE 1 +#define AUTHEXPL_DHGRP_UNUSABLE 2 +#define AUTHEXPL_HASH_UNUSABLE 3 +#define AUTHEXPL_AUTH_STARTED 4 +#define AUTHEXPL_AUTH_FAILED 5 +#define AUTHEXPL_BAD_PAYLOAD 6 +#define AUTHEXPL_BAD_PROTOCOL 7 +#define AUTHEXPL_RESTART_AUTH 8 + +/* AUTH_ELS Reject payload */ +struct lpfc_auth_reject { + uint8_t rsn_code; + uint8_t rsn_expl; + uint8_t rsvd[2]; +}; + +struct dh_group { + uint32_t groupid; + uint32_t length; + uint8_t value[256]; +}; + +#define LPFC_AUTH_PSSWD_MAX_LEN 128 +struct lpfc_auth_pwd_entry { + uint8_t local_pw_length; + uint8_t local_pw_mode; + uint8_t remote_pw_length; + uint8_t remote_pw_mode; + uint8_t local_pw[LPFC_AUTH_PSSWD_MAX_LEN]; + uint8_t remote_pw[LPFC_AUTH_PSSWD_MAX_LEN]; +}; + +struct lpfc_auth_cfg_hdr { + uint32_t signature; + uint8_t version; + uint8_t size; + uint16_t entry_cnt; +}; + +struct lpfc_auth_cfg_entry { + uint8_t local_wwn[8]; + uint8_t remote_wwn[8]; + uint16_t auth_tmo; +#define LPFC_AUTH_MODE_DISABLE 1 +#define LPFC_AUTH_MODE_ACTIVE 2 +#define LPFC_AUTH_MODE_PASSIVE 3 + uint8_t auth_mode; +#define LPFC_AUTH_BIDIR_DISABLE 0 +#define LPFC_AUTH_BIDIR_ENABLE 1 + struct { + uint8_t bi_direction : 1; + uint8_t rsvd : 6; + uint8_t valid : 1; + } auth_flags; + uint8_t auth_priority[4]; +#define LPFC_HASH_MD5 1 +#define LPFC_HASH_SHA1 2 + uint8_t hash_priority[4]; +#define LPFC_DH_GROUP_NULL 1 +#define LPFC_DH_GROUP_1024 2 +#define LPFC_DH_GROUP_1280 3 +#define LPFC_DH_GROUP_1536 4 +#define LPFC_DH_GROUP_2048 5 + uint8_t dh_grp_priority[8]; + uint32_t reauth_interval; + struct lpfc_auth_pwd_entry pwd; +}; + +#define LPFC_AUTH_OBJECT_MAX_SIZE 0x8000 +#define MAX_AUTH_CONFIG_ENTRIES 16 +#define AUTH_CONFIG_ENTRIES_PER_PAGE ((SLI4_PAGE_SIZE - \ + sizeof(struct lpfc_auth_cfg_hdr)) / sizeof(struct lpfc_auth_cfg_entry)) + +struct lpfc_auth_cfg { + struct lpfc_auth_cfg_hdr hdr; + struct lpfc_auth_cfg_entry entry[MAX_AUTH_CONFIG_ENTRIES]; +}; + diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 6c2b03415a2c..2d7c882570cc 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -26,7 +26,6 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/list.h> -#include <linux/bsg-lib.h> #include <linux/vmalloc.h> #include <scsi/scsi.h> @@ -34,6 +33,9 @@ #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_bsg_fc.h> #include <scsi/fc/fc_fs.h> +#ifndef NO_APEX +#include <scsi/scsi_device.h> +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -49,6 +51,9 @@ #include "lpfc_debugfs.h" #include "lpfc_vport.h" #include "lpfc_version.h" +#ifndef BUILD_BRCMFCOE +#include "lpfc_auth.h" +#endif struct lpfc_bsg_event { struct list_head node; @@ -278,6 +283,328 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, return bytes_copied; } +#ifndef NO_APEX +/** + * lpfc_chk_param - memcmp two fields. + * @prta: uint8_t pointer. + * @prtb: uint8_t pointer. + * @size: size to compare. + * returns 1 if equal, 0 if not equal + **/ +static int +lpfc_chk_params(uint8_t *ptra, uint8_t *ptrb, int size) +{ + return memcmp(ptra, ptrb, size) == 0; +} + +/** + * lpfc_chk_wwpn - memcmp two port names. + * @prta: uint8_t pointer to portname. + * @prtb: uint8_t pointer to portname. + * @size: size to compare. + * returns 1 if equal, 0 if not equal + **/ +static int +lpfc_chk_wwpn(uint8_t *ptra, uint8_t *ptrb, int size) +{ + return lpfc_chk_params(ptra, ptrb, size); +} + +/** + * lpfc_chk_lun - memcmp two luns. + * @prta: uint8_t pointer to ist lun. + * @prtb: uint8_t pointer to 2nd lun. + * @size: size to compare. + * returns 1 if equal, 0 if not equal + **/ +static int +lpfc_chk_lun(uint8_t *ptra, uint8_t *ptrb, int size) +{ + return lpfc_chk_params(ptra, ptrb, size); +} + +/** + * lpfc_conv_lun - convert fcp lun to scsi lun.. + * @prta: uint8_t pointer to fcp lun. + * returns lun as an integer. + **/ +static int +lpfc_conv_lun(uint8_t *fcp_lun) +{ + return scsilun_to_int((struct scsi_lun *)fcp_lun); +} + +/** + * lpfc_set_ndlp_fcp_pri - set the fcp priority for the lun range. + * @vport: lpfc_vport pointer. + * @first_lunr: 1st lun. + * @last_lun: last lun in the range. + * @fcp_priority: fcp priority level + * This function sets the priority level for the effected lun range in + * the ndlps fcp_priority arrary. + **/ +static void +lpfc_set_ndlp_fcp_pri(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + int first_lun, int last_lun, + uint8_t fcp_priority) +{ + int i = 0; + uint8_t pri = 0; + int lun_offset = 0; + + for (i = first_lun; i <= last_lun; i++) { + if (i == 0) + lun_offset = 0; + else + lun_offset = i >> 1; + pri = ndlp->fcp_priority[lun_offset]; + if (i & 1) + pri = ((pri & 0x0f) | (fcp_priority << 4)); + else + pri = ((pri & 0xf0) | fcp_priority); + ndlp->fcp_priority[lun_offset] = pri; + } +} + +/** + * lpfc_apply_fcp_priority_rule - set the fcp priority I/T nexus. + * @vport: lpfc_vport pointer. + * @rule: fcp_priority rule. + * This function walks the vports ndlp list and calls lpfc_set_ndlp_fcp_pri + * to apply the rule to the ndlp + **/ +static void +lpfc_apply_fcp_priority_rule(struct lpfc_vport *vport, + struct lpfc_fcp_pri_rule *rule) +{ + int wild_src = 0; + int wild_tgt = 0; + int wild_first_lun = 0; + int wild_last_lun = 0; + uint64_t wild_card; + uint8_t *wild; + int first_lun = 0; + int last_lun = 0; + struct lpfc_nodelist *ndlp; + + wild_card = (uint64_t) -1; + wild = (uint8_t *) &wild_card; + wild_src = lpfc_chk_wwpn(rule->src_wwpn, wild, + sizeof(rule->src_wwpn)); + if (!wild_src && !lpfc_chk_wwpn(rule->src_wwpn, + (uint8_t *)&vport->fc_portname, + sizeof(rule->src_wwpn))) + return; /* this rule does not apply to this vport */ + wild_tgt = lpfc_chk_wwpn(rule->tgt_wwpn, wild, + sizeof(rule->tgt_wwpn)); + wild_first_lun = lpfc_chk_lun(rule->first_lun, wild, + sizeof(rule->first_lun)); + wild_last_lun = lpfc_chk_lun(rule->last_lun, wild, + sizeof(rule->last_lun)); + if (!wild_first_lun) { + first_lun = lpfc_conv_lun(rule->first_lun); + if (first_lun > vport->cfg_max_luns) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3032 Bad 1st lun:%d in FCP rule\n", + first_lun); + return; + } + } + + if (!wild_last_lun) + last_lun = lpfc_conv_lun(rule->first_lun); + else + last_lun = vport->cfg_max_luns; + if (last_lun > vport->cfg_max_luns) + last_lun = vport->cfg_max_luns; + + if (!wild_tgt) { + ndlp = lpfc_findnode_wwpn(vport, + (struct lpfc_name *)rule->tgt_wwpn); + if ((!ndlp) || !(ndlp->nlp_type & NLP_FCP_TARGET)) + return; /* The target has not been discovered yet */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC, + "3072 Applying fcp_priority:%d to" + " I=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " + " T=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x" + " luns:0x%x - 0x%x\n", + rule->fcp_priority, + vport->fc_portname.u.wwn[0], + vport->fc_portname.u.wwn[1], + vport->fc_portname.u.wwn[2], + vport->fc_portname.u.wwn[3], + vport->fc_portname.u.wwn[4], + vport->fc_portname.u.wwn[5], + vport->fc_portname.u.wwn[6], + vport->fc_portname.u.wwn[7], + ndlp->nlp_portname.u.wwn[0], + ndlp->nlp_portname.u.wwn[1], + ndlp->nlp_portname.u.wwn[2], + ndlp->nlp_portname.u.wwn[3], + ndlp->nlp_portname.u.wwn[4], + ndlp->nlp_portname.u.wwn[5], + ndlp->nlp_portname.u.wwn[6], + ndlp->nlp_portname.u.wwn[7], + first_lun, last_lun); + lpfc_set_ndlp_fcp_pri(vport, ndlp, first_lun, last_lun, + rule->fcp_priority); + } else { + /* walk the vports list of ndlp's and set the priority. */ + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (!(ndlp->nlp_type & NLP_FCP_TARGET)) + continue; + lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC, + "3090 Applying fcp_priority:%d to" + " I=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " + " T=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x" + " luns:0x%x - 0x%x\n", + rule->fcp_priority, + vport->fc_portname.u.wwn[0], + vport->fc_portname.u.wwn[1], + vport->fc_portname.u.wwn[2], + vport->fc_portname.u.wwn[3], + vport->fc_portname.u.wwn[4], + vport->fc_portname.u.wwn[5], + vport->fc_portname.u.wwn[6], + vport->fc_portname.u.wwn[7], + ndlp->nlp_portname.u.wwn[0], + ndlp->nlp_portname.u.wwn[1], + ndlp->nlp_portname.u.wwn[2], + ndlp->nlp_portname.u.wwn[3], + ndlp->nlp_portname.u.wwn[4], + ndlp->nlp_portname.u.wwn[5], + ndlp->nlp_portname.u.wwn[6], + ndlp->nlp_portname.u.wwn[7], + first_lun, last_lun); + + lpfc_set_ndlp_fcp_pri(vport, ndlp, first_lun, last_lun, + rule->fcp_priority); + } + } + return; +} + +/** + * lpfc_set_fcp_priority - set the fcp priority for each rule. + * @phba: lpfc_hba pointer. + * @rules: fcp_priority rules list. + * This function walks the rules list and attempts to apply them to every vport + * and ndlp. + **/ +static void +lpfc_set_fcp_priority(struct lpfc_hba *phba, struct lpfc_fcp_pri_rules *rules) +{ + struct lpfc_vport **vports; + int i = 0; + int j = 0; + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (j = rules->number_of_entries - 1; j >= 0; j--) { + for (i = 0; + i <= phba->max_vports && vports[i] != NULL; + i++) { + lpfc_apply_fcp_priority_rule(vports[i], + &rules->rule_entry[j]); + } + } + lpfc_destroy_vport_work_array(phba, vports); + } +} + +/** + * lpfc_set_fcp_priority - set the fcp priority for each rule. + * @phba: lpfc_hba pointer. + * @rules: fcp_priority rules list. + * This function walks the rules list and attempts to apply them to every vport + * and ndlp. + **/ +void +lpfc_set_ndlps_fcp_priority(struct lpfc_nodelist *ndlp) +{ + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba; + struct lpfc_fcp_pri_rules *rules; + int j = 0; + int istgt = 0; + int isinit = 0; + uint64_t wild_card; + uint8_t *wild; + + if (!vport) + return; + phba = vport->phba; + if (!phba) + return; + if (!phba->fcp_priority_rules) + return; + rules = phba->fcp_priority_rules; + if (!rules) + return; + wild_card = (uint64_t) -1; + wild = (uint8_t *) &wild_card; + for (j = rules->number_of_entries - 1; j >= 0; j--) { + isinit = lpfc_chk_wwpn(rules->rule_entry[j].src_wwpn, wild, + sizeof(rules->rule_entry[j].src_wwpn)); + if (!isinit) + isinit = lpfc_chk_wwpn(rules->rule_entry[j].src_wwpn, + (uint8_t *)&vport->fc_portname, + sizeof(rules->rule_entry[j].src_wwpn)); + + istgt = lpfc_chk_wwpn(rules->rule_entry[j].tgt_wwpn, wild, + sizeof(rules->rule_entry[j].tgt_wwpn)); + if (!istgt) + istgt = lpfc_chk_wwpn(rules->rule_entry[j].tgt_wwpn, + (uint8_t *)&ndlp->nlp_portname, + sizeof(rules->rule_entry[j].tgt_wwpn)); + if (istgt && isinit) + lpfc_apply_fcp_priority_rule(vport, + &rules->rule_entry[j]); + } +} + +/** + * lpfc_clr_ndlps_pri - clear the fcp priority for each target. + * @vport: lpfc_vport pointer. + * This function walks the ndlp list and for this vport and clears + * all of the fcp_priority. + **/ +static void +lpfc_clr_ndlps_pri(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + + /* walk the vports list of ndlp's and sent the priority. */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3070 lpc_clr_ndlps_pri: fcp_priority sz = %d\n", + (int)sizeof(ndlp->fcp_priority)); + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + memset(ndlp->fcp_priority, 0, sizeof(ndlp->fcp_priority)); + } + return; +} + +/** + * lpfc_clr_vports_fcp_priority - clear the fcp priority for each vport. + * @phba: lpfc_hba pointer. + * This function walks the list of vports and calls lpfc_clr_ndlps_pri. + **/ +static void +lpfc_clr_vports_fcp_prority(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i = 0; + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; + i++) { + lpfc_clr_ndlps_pri(vports[i]); + } + lpfc_destroy_vport_work_array(phba, vports); + } +} +#endif + /** * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler * @phba: Pointer to HBA context object. @@ -641,6 +968,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, } lpfc_nlp_put(ndlp); + cmdiocbq->iocb_flag &= ~LPFC_IO_RAW; lpfc_els_free_iocb(phba, cmdiocbq); kfree(dd_data); @@ -675,6 +1003,9 @@ lpfc_bsg_rport_els(struct bsg_job *job) unsigned long flags; uint32_t creg_val; int rc = 0; + uint32_t did; + uint32_t *ptr; + uint32_t raw_data[2]; /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; @@ -698,11 +1029,46 @@ lpfc_bsg_rport_els(struct bsg_job *job) } elscmd = bsg_request->rqst_data.r_els.els_code; - cmdsize = job->request_payload.payload_len; - if (!lpfc_nlp_get(ndlp)) { - rc = -ENODEV; - goto free_dd_data; + /* Check for raw ELS data as opposed to a specific command */ + if (elscmd == ELS_CMD_RAW) { + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + raw_data, sizeof(raw_data)); + + /* The first word indicates raw data, the second is the did */ + did = raw_data[1]; + + /* Now find an appropriate ndlp */ + if (did != ndlp->nlp_DID) { + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, did); + if (!ndlp) { + rc = -ENODEV; + goto free_dd_data; + } + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, + NLP_STE_UNUSED_NODE); + if (!ndlp) { + rc = -ENODEV; + goto free_dd_data; + } + } + } + + /* The raw data that forms the ELS command starts at word 2 */ + cmdsize = job->request_payload.payload_len - + (2 * sizeof(uint32_t)); + } else { + did = ndlp->nlp_DID; + if (!lpfc_nlp_get(ndlp)) { + rc = -ENODEV; + goto free_dd_data; + } + cmdsize = job->request_payload.payload_len; } /* We will use the allocated dma buffers by prep els iocb for command @@ -712,20 +1078,35 @@ lpfc_bsg_rport_els(struct bsg_job *job) */ cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, - ndlp->nlp_DID, elscmd); + did, elscmd); if (!cmdiocbq) { rc = -EIO; goto release_ndlp; } - rpi = ndlp->nlp_rpi; - /* Transfer the request payload to allocated command dma buffer */ - sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, - cmdsize); + job->request_payload.payload_len); + + /* Check for raw ELS data as opposed to a specific command */ + if (elscmd == ELS_CMD_RAW) { + cmdiocbq->iocb_flag |= LPFC_IO_RAW; + + /* The raw data for the ELS command starts ar word 2, + * so shift the entire payload back 2 words. + */ + ptr = (uint32_t *) + ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt; + while (cmdsize > 0) { + *ptr = *(ptr + 2); + ptr++; + cmdsize -= sizeof(uint32_t); + } + } + + rpi = ndlp->nlp_rpi; if (phba->sli_rev == LPFC_SLI_REV4) cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; @@ -939,28 +1320,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, INIT_LIST_HEAD(&head); list_add_tail(&head, &piocbq->list); - if (piocbq->iocb.ulpBdeCount == 0 || - piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) - goto error_ct_unsol_exit; - - if (phba->link_state == LPFC_HBA_ERROR || - (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) - goto error_ct_unsol_exit; - - if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) - dmabuf = bdeBuf1; - else { - dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, - piocbq->iocb.un.cont64[0].addrLow); - dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); - } - if (dmabuf == NULL) - goto error_ct_unsol_exit; - ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; + ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; evt_req_id = ct_req->FsType; cmd = ct_req->CommandResponse.bits.CmdRsp; - if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) - lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry(evt, &phba->ct_ev_waiters, node) { @@ -1618,6 +1980,231 @@ no_dd_data: return rc; } +#ifndef NO_APEX +/** + * lpfc_bsg_set_fcp_priority - process a bsg vendor command + * @job: BSG_SET_FCP_PRIORITY fc_bsg_job + **/ +static int +lpfc_bsg_set_fcp_priority(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct set_lpfc_fcp_pri_rules *command; + int len; + int copied; + int rc = 0; + struct lpfc_fcp_pri_rules *myrule; + + + /* in case no data is transferred */ + bsg_reply->reply_payload_rcv_len = 0; + + /* check request len */ + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct set_lpfc_fcp_pri_rules)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3078 Received GET_FCP_PRI request below " + "minimum size\n"); + rc = -EINVAL; + goto set_fcp_priority_exit_1; + } + + /* check reply len */ + if (job->reply_len < sizeof(*bsg_reply)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3079 Received GET_FCP_PRI reply below " + "minimum size\n"); + rc = -EINVAL; + goto set_fcp_priority_exit_1; + } + + /* point to the vendor command */ + command = (struct set_lpfc_fcp_pri_rules *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* app adding an extra beyond what we already have */ + if (command->number_of_entries > LPFC_MAX_FCP_SET_RULES) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3074 Received GET_FCP_PRI reply below " + "minimum size\n"); + rc = -EINVAL; + goto set_fcp_priority_exit_1; + } + + /* get size of rules data, add one extra for adds */ + len = (int)sizeof(struct lpfc_fcp_pri_rules) + + (command->number_of_entries * + sizeof(struct lpfc_fcp_pri_rule)); + myrule = kmalloc(len, GFP_KERNEL); + if (!myrule) { + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "9009 No memory for rules \n"); + goto set_fcp_priority_exit_1; + } + if (command->number_of_entries > 0) { + len = (int)sizeof(struct lpfc_fcp_pri_rules) + + (command->number_of_entries * + sizeof(struct lpfc_fcp_pri_rule)); + copied = sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, myrule, len); + if (len != copied) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3075 Received GET_FCP_PRI reply below " + "minimum size\n"); + rc = -EINVAL; + goto set_fcp_priority_exit_2; + } + + } else + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "3069 Clearing FCP rules\n"); + if (phba->fcp_priority_rules) { + lpfc_clr_vports_fcp_prority(phba); + kfree(phba->fcp_priority_rules); + phba->fcp_priority_rules = NULL; + } + if (command->number_of_entries > 0) { + phba->fcp_priority_rules = myrule; + lpfc_set_fcp_priority(phba, myrule); + } else + kfree(myrule); + + job->dd_data = NULL; + bsg_reply->result = 0; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; + +set_fcp_priority_exit_2: + kfree(myrule); +set_fcp_priority_exit_1: + /* make error code available to userspace */ + bsg_reply->result = rc; + job->dd_data = NULL; + return rc; +} + +/** + * lpfc_bsg_get_fcp_priority - process a bsg vendor command + * @job: BSG_GET_FCP_PRIORITY fc_bsg_job + **/ +static int +lpfc_bsg_get_fcp_priority(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + int rc = 0; + struct get_lpfc_fcp_pri_rules *command; + int len; + struct lpfc_fcp_pri_rules *myrule; + + /* get size of dummy data */ + len = (int)sizeof(struct lpfc_fcp_pri_rules); + myrule = kmalloc(len, GFP_KERNEL); + if (!myrule) { + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "3071 no memory for priority rules\n"); + goto get_fcp_priority_exit1; + } + if (phba->fcp_priority_rules) + myrule->number_of_entries = + phba->fcp_priority_rules->number_of_entries; + else + myrule->number_of_entries = 0; + + /* in case no data is transferred */ + bsg_reply->reply_payload_rcv_len = 0; + + /* check request len */ + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct get_lpfc_fcp_pri_rules)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3073 Received GET_FCP_PRI request below " + "minimum size\n"); + rc = -EINVAL; + goto get_fcp_priority_exit; + } + + /* check reply len */ + if (job->reply_len < sizeof(*bsg_reply) + + sizeof(struct get_lpfc_fcp_pri_rules_reply)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3076 Received GET_FCP_PRI reply below " + "minimum size\n"); + rc = -EINVAL; + goto get_fcp_priority_exit; + } + + /* point to the vendor command */ + command = (struct get_lpfc_fcp_pri_rules *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* app wants more rules than we have */ + if (phba->fcp_priority_rules && (command->number_of_entries > + phba->fcp_priority_rules->number_of_entries)) + len = (int)sizeof(struct lpfc_fcp_pri_rules) + + (phba->fcp_priority_rules->number_of_entries * + sizeof(struct lpfc_fcp_pri_rule)); + /* app wants some or all of our rules */ + else if (phba->fcp_priority_rules && command->number_of_entries > 0) + len = (int)sizeof(struct lpfc_fcp_pri_rules) + + (command->number_of_entries * + sizeof(struct lpfc_fcp_pri_rule)); + /* app is fishing for how many rules we have */ + else + /* return just the count of rules */ + len = (int)sizeof(struct lpfc_fcp_pri_rules); + if (len == (int)sizeof(struct lpfc_fcp_pri_rules)) { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + myrule, len); + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + phba->fcp_priority_rules, len); + } + + if (bsg_reply->reply_payload_rcv_len != len) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3077 Received GET_FCP_PRI reply below " + "minimum size data:%d %d %d %d %d\n", + bsg_reply->reply_payload_rcv_len, + len, command->number_of_entries, + job->request_payload.sg_cnt, + myrule->number_of_entries); + bsg_reply->reply_payload_rcv_len = 0; + rc = -EINVAL; + goto get_fcp_priority_exit; + } + + kfree(myrule); + job->dd_data = NULL; + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return 0; + +get_fcp_priority_exit: + kfree(myrule); +get_fcp_priority_exit1: + /* make error code available to userspace */ + bsg_reply->result = rc; + job->dd_data = NULL; + return rc; +} +#endif + /** * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command * @job: SEND_MGMT_RESP fc_bsg_job @@ -2404,33 +2991,27 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; struct diag_status *diag_status_reply; - int mbxstatus, rc = 0; + int mbxstatus, rc = -ENODEV, rc1 = 0; shost = fc_bsg_to_shost(job); - if (!shost) { - rc = -ENODEV; + if (!shost) goto job_error; - } - vport = shost_priv(shost); - if (!vport) { - rc = -ENODEV; - goto job_error; - } - phba = vport->phba; - if (!phba) { - rc = -ENODEV; - goto job_error; - } - if (phba->sli_rev < LPFC_SLI_REV4) { - rc = -ENODEV; + vport = shost_priv(shost); + if (!vport) goto job_error; - } + + phba = vport->phba; + if (!phba) + goto job_error; + + + if (phba->sli_rev < LPFC_SLI_REV4) + goto job_error; + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < - LPFC_SLI_INTF_IF_TYPE_2) { - rc = -ENODEV; + LPFC_SLI_INTF_IF_TYPE_2) goto job_error; - } if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct sli4_link_diag)) { @@ -2457,16 +3038,20 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) goto job_error; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmboxq) + if (!pmboxq) { + rc = -ENOMEM; goto link_diag_test_exit; + } req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - sizeof(struct lpfc_sli4_cfg_mhdr)); alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, req_len, LPFC_SLI4_MBX_EMBED); - if (alloc_len != req_len) + if (alloc_len != req_len) { + rc = -ENOMEM; goto link_diag_test_exit; + } run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, @@ -2498,13 +3083,12 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) diag_status_reply = (struct diag_status *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3012 Received Run link diag test reply " "below minimum size (%d): reply_len:%d\n", - (int)(sizeof(struct fc_bsg_request) + - sizeof(struct diag_status)), + (int)(sizeof(*bsg_reply) + + sizeof(*diag_status_reply)), job->reply_len); rc = -EINVAL; goto job_error; @@ -2515,7 +3099,7 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) diag_status_reply->shdr_add_status = shdr_add_status; link_diag_test_exit: - rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); + rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0); if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); @@ -2524,6 +3108,8 @@ link_diag_test_exit: job_error: /* make error code available to userspace */ + if (rc1 && !rc) + rc = rc1; bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) @@ -2916,6 +3502,9 @@ diag_cmd_data_alloc(struct lpfc_hba *phba, if (nocopydata) { bpl->tus.f.bdeFlags = 0; + pci_dma_sync_single_for_device(phba->pcidev, + dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); + } else { memset((uint8_t *)dmp->dma.virt, 0, cnt); bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; @@ -3099,8 +3688,8 @@ static int lpfc_bsg_diag_loopback_run(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); - struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; + struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_bsg_event *evt; struct event_data *evdat; struct lpfc_sli *psli = &phba->sli; @@ -3403,8 +3992,8 @@ static int lpfc_bsg_get_dfc_rev(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); - struct fc_bsg_reply *bsg_reply = job->reply; struct lpfc_hba *phba = vport->phba; + struct fc_bsg_reply *bsg_reply = job->reply; struct get_mgmt_rev_reply *event_reply; int rc = 0; @@ -3420,8 +4009,7 @@ lpfc_bsg_get_dfc_rev(struct bsg_job *job) event_reply = (struct get_mgmt_rev_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2741 Received GET_DFC_REV reply below " "minimum size\n"); @@ -3454,8 +4042,8 @@ static void lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; - struct fc_bsg_reply *bsg_reply; struct bsg_job *job; + struct fc_bsg_reply *bsg_reply; uint32_t size; unsigned long flags; uint8_t *pmb, *pmb_buf; @@ -3691,7 +4279,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) bsg_reply->result = 0; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, - "2937 SLI_CONFIG ext-buffer mailbox command " + "2937 SLI_CONFIG ext-buffer maibox command " "(x%x/x%x) complete bsg job done, bsize:%d\n", phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, size); @@ -3702,7 +4290,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); } else { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, - "2938 SLI_CONFIG ext-buffer mailbox " + "2938 SLI_CONFIG ext-buffer maibox " "command (x%x/x%x) failure, rc:x%x\n", phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, rc); @@ -3736,7 +4324,7 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, - "2939 SLI_CONFIG ext-buffer rd mailbox command " + "2939 SLI_CONFIG ext-buffer rd maibox command " "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); @@ -3752,6 +4340,7 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); } + return; } @@ -3776,7 +4365,7 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, - "2940 SLI_CONFIG ext-buffer wr mailbox command " + "2940 SLI_CONFIG ext-buffer wr maibox command " "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); @@ -4058,12 +4647,12 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2947 Issued SLI_CONFIG ext-buffer " - "mailbox command, rc:x%x\n", rc); + "maibox command, rc:x%x\n", rc); return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2948 Failed to issue SLI_CONFIG ext-buffer " - "mailbox command, rc:x%x\n", rc); + "maibox command, rc:x%x\n", rc); rc = -EPIPE; job_error: @@ -4217,12 +4806,12 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2955 Issued SLI_CONFIG ext-buffer " - "mailbox command, rc:x%x\n", rc); + "maibox command, rc:x%x\n", rc); return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2956 Failed to issue SLI_CONFIG ext-buffer " - "mailbox command, rc:x%x\n", rc); + "maibox command, rc:x%x\n", rc); rc = -EPIPE; goto job_error; } @@ -4306,6 +4895,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: case COMN_OPCODE_GET_CNTL_ATTRIBUTES: case COMN_OPCODE_GET_PROFILE_CONFIG: + case COMN_OPCODE_SET_FEATURES: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3106 Handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", @@ -4489,6 +5079,12 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, phba->mbox_ext_buf_ctx.seqNum++; nemb_tp = phba->mbox_ext_buf_ctx.nembType; + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + pbuf = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, @@ -4525,13 +5121,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, "2968 SLI_CONFIG ext-buffer wr all %d " "ebuffers received\n", phba->mbox_ext_buf_ctx.numBuf); - - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (!dd_data) { - rc = -ENOMEM; - goto job_error; - } - /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { @@ -4563,12 +5152,12 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2969 Issued SLI_CONFIG ext-buffer " - "mailbox command, rc:x%x\n", rc); + "maibox command, rc:x%x\n", rc); return SLI_CONFIG_HANDLED; } lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2970 Failed to issue SLI_CONFIG ext-buffer " - "mailbox command, rc:x%x\n", rc); + "maibox command, rc:x%x\n", rc); rc = -EPIPE; goto job_error; } @@ -4580,8 +5169,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, return SLI_CONFIG_HANDLED; job_error: - if (pmboxq) - mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); @@ -5017,9 +5604,9 @@ static int lpfc_bsg_mbox_cmd(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; - struct lpfc_hba *phba = vport->phba; struct dfc_mbox_req *mbox_req; int rc = 0; @@ -5177,9 +5764,9 @@ static int lpfc_menlo_cmd(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; - struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocbq; IOCB_t *cmd; int rc = 0; @@ -5203,8 +5790,8 @@ lpfc_menlo_cmd(struct bsg_job *job) goto no_dd_data; } - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { + if (job->reply_len < sizeof(*bsg_reply) + + sizeof(struct menlo_response)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2785 Received MENLO_CMD reply below " "minimum size\n"); @@ -5340,8 +5927,7 @@ no_dd_data: static int lpfc_forced_link_speed(struct bsg_job *job) { - struct Scsi_Host *shost = fc_bsg_to_shost(job); - struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_hba *phba = vport->phba; struct fc_bsg_reply *bsg_reply = job->reply; struct forced_link_speed_support_reply *forced_reply; @@ -5360,9 +5946,7 @@ lpfc_forced_link_speed(struct bsg_job *job) forced_reply = (struct forced_link_speed_support_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + - sizeof(struct forced_link_speed_support_reply)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "0049 Received FORCED_LINK_SPEED reply below " "minimum size\n"); @@ -5381,6 +5965,811 @@ job_error: return rc; } +#ifndef BUILD_BRCMFCOE +/** + * lpfc_manage_auth_update_obj - Update the auth configuration in flash. + * @phba: Pointer to HBA context object. + * @buf - Pointer to the object name to be updated. + * + * This function reads the existing flash configuration and rewrites + * the object, saving the active configuration for the port. + **/ +static int +lpfc_manage_auth_update_obj(struct lpfc_hba *phba, uint8_t *buf) +{ + struct list_head dma_buffer_list; + struct lpfc_dmabuf *dmabuf, *next; + struct lpfc_auth_cfg *dcfg = NULL; + struct lpfc_auth_cfg_entry *dcfg_p2 = NULL; + int i, bcnt; + uint32_t offset, temp_offset; + char *obj_name = (char *)buf; + int rc = 0; + + /* create buffer list and allocate buffers */ + bcnt = LPFC_AUTH_OBJECT_MAX_SIZE / SLI4_PAGE_SIZE; + INIT_LIST_HEAD(&dma_buffer_list); + for (i = 0; i < bcnt; i++) { + dmabuf = kzalloc(sizeof(*dmabuf), GFP_KERNEL); + if (!dmabuf) { + rc = -ENOMEM; + goto release_out; + } + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + rc = -ENOMEM; + goto release_out; + } + list_add_tail(&dmabuf->list, &dma_buffer_list); + } + + /* read capture the entire object */ + offset = 0; + rc = lpfc_auth_rd_object(phba, &dma_buffer_list, + LPFC_AUTH_OBJECT_MAX_SIZE, &offset, obj_name); + if (rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC | LOG_AUTH, + "3108 Read Object failed, status:%x.\n", rc); + rc = LPFC_BSG_AUTH_STS_NOT_CONFIGURED; + goto release_out; + } + + /* update with driver config for this port */ + offset = phba->sli4_hba.lnk_info.lnk_no * SLI4_PAGE_SIZE * 2; + dcfg = lpfc_auth_get_active_cfg_ptr(phba); + dcfg_p2 = lpfc_auth_get_active_cfg_p2(phba); + if (!dcfg || !dcfg_p2) + return LPFC_BSG_AUTH_STS_NOT_CONFIGURED; + + temp_offset = 0; + list_for_each_entry(dmabuf, &dma_buffer_list, list) { + if (temp_offset == offset) + memcpy(dmabuf->virt, dcfg, SLI4_PAGE_SIZE); + else if (temp_offset == offset + SLI4_PAGE_SIZE) + memcpy(dmabuf->virt, dcfg_p2, SLI4_PAGE_SIZE); + + temp_offset += SLI4_PAGE_SIZE; + } + + /* write back the entire object */ + offset = 0; + rc = lpfc_auth_wr_object(phba, &dma_buffer_list, + LPFC_AUTH_OBJECT_MAX_SIZE, &offset, obj_name); + if (rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC | LOG_AUTH, + "3109 Write Object failed, status:%x.\n", rc); + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + } +release_out: + list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { + list_del(&dmabuf->list); + dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + return rc; +} + +/** + * lpfc_manage_auth_pwd_fetch - Return password information. + * @phba: Pointer to HBA context object. + * @buf - Pointer to a password descriptor. + * @src - Argument specifying the password information source. + * 0 - active (driver) configuration. + * 1 - saved (flash) configuration. + * + * This function returns password information, if available, for + * a configuration entry matching the wwpn pair described by the + * password descriptor. The src parameter specifies whether the + * saved or active configuration values are to be used. + **/ +static int +lpfc_manage_auth_pwd_fetch(struct lpfc_hba *phba, uint8_t *buf, + uint8_t src) +{ + struct lpfc_bsg_auth_pwd_item *pbuf; + struct list_head dmabuf_list; + struct lpfc_dmabuf *dmabuf, *next; + struct lpfc_auth_cfg *pcfg = NULL; + struct lpfc_auth_cfg_entry *ptmp; + struct lpfc_auth_cfg_entry *ptmp_p2 = NULL; + struct lpfc_auth_cfg_entry *entry = NULL; + uint32_t offset; + uint8_t i, valid; + char *obj_name = {"/driver/auth.cfg"}; + int rc = 0; + + pbuf = (struct lpfc_bsg_auth_pwd_item *)buf; + if (!pbuf) + return LPFC_BSG_AUTH_STS_GENERAL_ERROR; + + if (src) { + /* use the configuration from flash */ + INIT_LIST_HEAD(&dmabuf_list); + for (i = 0; i < 2; i++) { + dmabuf = kzalloc(sizeof(*dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + rc = -ENOMEM; + goto release_out; + } + if (i) + ptmp_p2 = (struct lpfc_auth_cfg_entry *) + dmabuf->virt; + else + pcfg = (struct lpfc_auth_cfg *)dmabuf->virt; + + list_add_tail(&dmabuf->list, &dmabuf_list); + } + + offset = phba->sli4_hba.lnk_info.lnk_no * SLI4_PAGE_SIZE * 2; + rc = lpfc_auth_rd_object(phba, &dmabuf_list, + SLI4_PAGE_SIZE * 2, &offset, + obj_name); + if (rc || offset < sizeof(struct lpfc_auth_cfg_hdr)) { + rc = LPFC_BSG_AUTH_STS_NOT_CONFIGURED; + goto release_out; + } + + ptmp = &pcfg->entry[0]; + for (i = 0; i < pcfg->hdr.entry_cnt; i++, ptmp++) { + if (i == AUTH_CONFIG_ENTRIES_PER_PAGE) + ptmp = ptmp_p2; + + /* look for matching wwpn pair */ + if (!memcmp(&ptmp->local_wwn[0], + &pbuf->lwwpn[0], 8) && + !memcmp(&ptmp->remote_wwn[0], + &pbuf->rwwpn[0], 8)) { + entry = ptmp; + break; + } + } + } else + entry = lpfc_auth_find_cfg_item(phba, &pbuf->lwwpn[0], + &pbuf->rwwpn[0], &valid); + + if (entry) { + if ((entry->pwd.local_pw_mode == 0) || + (entry->pwd.local_pw_mode > 2) || + (entry->pwd.local_pw_length == 0) || + (entry->pwd.local_pw[0] == 0)) + pbuf->local.type = 3; + else { + pbuf->local.type = entry->pwd.local_pw_mode; + pbuf->local.length = entry->pwd.local_pw_length; + memcpy(&pbuf->local.pwd[0], + &entry->pwd.local_pw[0], + LPFC_AUTH_PSSWD_MAX_LEN); + } + if ((entry->pwd.remote_pw_mode == 0) || + (entry->pwd.remote_pw_mode > 2) || + (entry->pwd.remote_pw_length == 0) || + (entry->pwd.remote_pw[0] == 0)) + + pbuf->remote.type = 3; + else { + pbuf->remote.type = entry->pwd.remote_pw_mode; + pbuf->remote.length = entry->pwd.remote_pw_length; + memcpy(&pbuf->remote.pwd[0], + &entry->pwd.remote_pw[0], + LPFC_AUTH_PSSWD_MAX_LEN); + } + } else { + rc = LPFC_BSG_AUTH_STS_NOT_CONFIGURED; + } + +release_out: + if (src) + list_for_each_entry_safe(dmabuf, next, &dmabuf_list, list) { + list_del(&dmabuf->list); + dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + + return rc; +} + +/** + * lpfc_manage_auth_password_update - Update password information. + * @phba: Pointer to HBA context object. + * @buf - Pointer to a password information structure. + * + * This function updates password information, for a lwwpn/rwwpn pair + * described in the password information structure. + **/ +static int +lpfc_manage_auth_pwd_update(struct lpfc_vport *vport, uint8_t *buf) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_bsg_auth_pwd_item *pbuf; + struct lpfc_auth_cfg *pcfg = NULL; + struct lpfc_auth_cfg_entry *entry = NULL; + struct lpfc_nodelist *ndlp = NULL; + uint8_t valid; + int rc = 0; + + pbuf = (struct lpfc_bsg_auth_pwd_item *)buf; + if (!pbuf) + return LPFC_BSG_AUTH_STS_GENERAL_ERROR; + + if (list_empty(&phba->lpfc_auth_active_cfg_list)) { + rc = lpfc_auth_alloc_active_cfg_list(phba); + if (rc) + return rc; + + pcfg = lpfc_auth_get_active_cfg_ptr(phba); + + pcfg->hdr.signature = cpu_to_le32(0x61757468); + pcfg->hdr.version = 0; + pcfg->hdr.size = sizeof(struct lpfc_auth_cfg_hdr); + pcfg->hdr.entry_cnt = 0; + } + + entry = lpfc_auth_find_cfg_item(phba, &pbuf->lwwpn[0], + &pbuf->rwwpn[0], &valid); + if (!entry) { + pcfg = lpfc_auth_get_active_cfg_ptr(phba); + if (pcfg->hdr.entry_cnt == MAX_AUTH_CONFIG_ENTRIES) + return LPFC_BSG_AUTH_STS_OUT_OF_RESOURCES; + + /* Do not allow matching local and remote passwords */ + if (!memcmp(&pbuf->local.pwd[0], &pbuf->remote.pwd[0], + LPFC_AUTH_PSSWD_MAX_LEN)) + return LPFC_BSG_AUTH_STS_PSSWDS_SAME; + + if (pcfg->hdr.entry_cnt < AUTH_CONFIG_ENTRIES_PER_PAGE) { + entry = &pcfg->entry[pcfg->hdr.entry_cnt]; + } else { + entry = lpfc_auth_get_active_cfg_p2(phba); + entry += pcfg->hdr.entry_cnt - + AUTH_CONFIG_ENTRIES_PER_PAGE; + } + memcpy(&entry->local_wwn[0], &pbuf->lwwpn[0], 8); + memcpy(&entry->remote_wwn[0], &pbuf->rwwpn[0], 8); + pcfg->hdr.entry_cnt++; + } + + if (entry) { + /* Do not allow matching passwords */ + if (pbuf->local.type && pbuf->local.type < 3) { + if (pbuf->remote.type && pbuf->remote.type < 3) { + if (!memcmp(&pbuf->local.pwd[0], + &pbuf->remote.pwd[0], + LPFC_AUTH_PSSWD_MAX_LEN)) + return LPFC_BSG_AUTH_STS_PSSWDS_SAME; + } else { + if (!memcmp(&pbuf->local.pwd[0], + &entry->pwd.remote_pw[0], + LPFC_AUTH_PSSWD_MAX_LEN)) + return LPFC_BSG_AUTH_STS_PSSWDS_SAME; + } + } else + if (pbuf->remote.type && pbuf->remote.type < 3) { + if (!memcmp(&pbuf->remote.pwd[0], + &entry->pwd.local_pw[0], + LPFC_AUTH_PSSWD_MAX_LEN)) + return LPFC_BSG_AUTH_STS_PSSWDS_SAME; + } + /* Authentication with the fabric may be active. */ + ndlp = lpfc_auth_findnode(vport, + (struct lpfc_name *)pbuf->lwwpn, + (struct lpfc_name *)pbuf->rwwpn); + /* Check if authentication is in progress. */ + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + (ndlp->dhc_cfg.msg == LPFC_AUTH_NEGOTIATE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_CHALLENGE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_REPLY)) + return LPFC_BSG_AUTH_STS_IN_PROGRESS; + + if (pbuf->local.type && pbuf->local.type < 3) { + entry->pwd.local_pw_length = + (uint8_t)pbuf->local.length; + entry->pwd.local_pw_mode = + (uint8_t)pbuf->local.type; + memcpy(&entry->pwd.local_pw[0], + &pbuf->local.pwd[0], + LPFC_AUTH_PSSWD_MAX_LEN); + } + if (pbuf->remote.type && pbuf->remote.type < 3) { + entry->pwd.remote_pw_length = + (uint8_t)pbuf->remote.length; + entry->pwd.remote_pw_mode = + (uint8_t)pbuf->remote.type; + memcpy(&entry->pwd.remote_pw[0], + &pbuf->remote.pwd[0], + LPFC_AUTH_PSSWD_MAX_LEN); + } + + /* if ndlp found, refresh auth cfg */ + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + ndlp->dhc_cfg.local_passwd_len = + entry->pwd.local_pw_length; + ndlp->dhc_cfg.remote_passwd_len = + entry->pwd.remote_pw_length; + memcpy(ndlp->dhc_cfg.local_passwd, + &entry->pwd.local_pw, + entry->pwd.local_pw_length); + memcpy(ndlp->dhc_cfg.remote_passwd, + &entry->pwd.remote_pw, + entry->pwd.remote_pw_length); + } + } + return rc; +} + +/** + * lpfc_manage_auth_drvr_cfg_update - Update configuration information. + * @phba: Pointer to HBA context object. + * @buf - Pointer to a list of configuration descriptors. + * + * This function updates configuration information, for one or more lwwpn/rwwpn + * pairs described in the list of configuration items provided. + **/ +static int +lpfc_manage_auth_drvr_cfg_update(struct lpfc_vport *vport, uint8_t *buf) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_bsg_auth_cfg_list *clist; + struct lpfc_bsg_auth_cfg_item *ci; + struct lpfc_auth_cfg_entry *entry = NULL; + uint8_t i, item_cnt, valid; + struct lpfc_nodelist *ndlp = NULL; + uint8_t wwpn[8]; + int rc = LPFC_BSG_AUTH_STS_SUCCESS; + + u64_to_wwn(AUTH_FABRIC_WWN, wwpn); + clist = (struct lpfc_bsg_auth_cfg_list *)buf; + if (!clist) + return LPFC_BSG_AUTH_STS_GENERAL_ERROR; + + item_cnt = clist->entry_cnt; + for (i = 0; i < item_cnt; i++) { + entry = lpfc_auth_find_cfg_item(phba, + &clist->cfg_item[i].lwwpn[0], + &clist->cfg_item[i].rwwpn[0], + &valid); + if (entry) { + ci = &clist->cfg_item[i]; + if ((entry->pwd.local_pw_length == 0) || + (entry->pwd.local_pw_mode == 0) || + (entry->pwd.local_pw_mode > 2) || + (entry->pwd.local_pw[0] == 0)) { + ci->status = + LPFC_BSG_AUTH_STS_LOCAL_SECRET_NOT_SET; + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + continue; + } + + /* Authentication with the fabric may be active. */ + ndlp = lpfc_auth_findnode(vport, + (struct lpfc_name *)ci->lwwpn, + (struct lpfc_name *)ci->rwwpn + ); + /* Check if authentication is in progress. */ + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + (ndlp->dhc_cfg.msg == LPFC_AUTH_NEGOTIATE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_CHALLENGE || + ndlp->dhc_cfg.msg == LPFC_DHCHAP_REPLY)) { + ci->status = + LPFC_BSG_AUTH_STS_IN_PROGRESS; + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + continue; + } + + /* update existing entry */ + memcpy(entry, ci->lwwpn, + sizeof(struct lpfc_auth_cfg_entry) - + sizeof(struct lpfc_auth_pwd_entry)); + + if (ci->reauth_interval > REAUTH_MAX_INTERVAL) + entry->reauth_interval = REAUTH_MAX_INTERVAL; + + entry->auth_flags.valid = 1; + ci->status = 0; + + /* if ndlp found, refresh auth cfg */ + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + ndlp->dhc_cfg.auth_tmo = entry->auth_tmo; + ndlp->dhc_cfg.auth_mode = entry->auth_mode; + ndlp->dhc_cfg.bidirectional = + entry->auth_flags.bi_direction; + ndlp->dhc_cfg.reauth_interval = + entry->reauth_interval; + ndlp->dhc_cfg.direction = AUTH_DIRECTION_NONE; + + /* [re]start re-authentication */ + if (ndlp->dhc_cfg.reauth_interval && + ndlp->dhc_cfg.state == LPFC_AUTH_SUCCESS) + lpfc_auth_start_reauth_tmr(ndlp); + } + } else { + clist->cfg_item[i].status = + LPFC_BSG_AUTH_STS_LOCAL_SECRET_NOT_SET; + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + } + } + return rc; +} + +/** + * lpfc_manage_auth_cfg_fetch - Return configuration information. + * @phba: Pointer to HBA context object. + * @buf - Pointer to a list of configuration descriptors. + * @src - Argument specifying the configuration information source. + * 0 - active (driver) configuration. + * 1 - saved (flash) configuration. + * + * This function returns configuration information, if available, for one or + * more configuration entries matching the wwpn pairs described in the list. + * The src parameter specifies whether the saved or active configuration + * values are to be used. + **/ +static int +lpfc_manage_auth_cfg_fetch(struct lpfc_hba *phba, uint8_t *buf, + uint8_t *wwpn, uint8_t src) +{ + struct lpfc_bsg_auth_cfg_list *clist; + struct lpfc_bsg_auth_cfg_item *cfg_item; + uint8_t wwn_match[8]; + struct list_head dmabuf_list; + struct lpfc_dmabuf *dmabuf, *next; + struct lpfc_auth_cfg *pcfg = NULL; + struct lpfc_auth_cfg_entry *ptmp_p2 = NULL; + struct lpfc_auth_cfg_entry *entry = NULL; + uint32_t offset; + uint8_t i, j, item_cnt; + char *obj_name = {"/driver/auth.cfg"}; + int rc = 0; + + if (src) { + /* use the saved configuration */ + INIT_LIST_HEAD(&dmabuf_list); + for (i = 0; i < 2; i++) { + dmabuf = kzalloc(sizeof(*dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + SLI4_PAGE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + rc = -ENOMEM; + goto release_out; + } + if (i) + ptmp_p2 = (struct lpfc_auth_cfg_entry *) + dmabuf->virt; + else + pcfg = (struct lpfc_auth_cfg *)dmabuf->virt; + + list_add_tail(&dmabuf->list, &dmabuf_list); + } + + offset = phba->sli4_hba.lnk_info.lnk_no * SLI4_PAGE_SIZE * 2; + rc = lpfc_auth_rd_object(phba, &dmabuf_list, + SLI4_PAGE_SIZE * 2, &offset, + obj_name); + if (rc || offset < sizeof(struct lpfc_auth_cfg_hdr)) { + rc = LPFC_BSG_AUTH_STS_CONFIG_NOT_FOUND; + goto release_out; + } + + } else { + /* use active configuration */ + pcfg = lpfc_auth_get_active_cfg_ptr(phba); + if (!pcfg) + return LPFC_BSG_AUTH_STS_CONFIG_NOT_FOUND; + + ptmp_p2 = lpfc_auth_get_active_cfg_p2(phba); + } + + clist = (struct lpfc_bsg_auth_cfg_list *)buf; + if (!clist) { + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + goto release_out; + } + u64_to_wwn(AUTH_FABRIC_WWN, wwn_match); + item_cnt = clist->entry_cnt; + if (!item_cnt) { + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + goto release_out; + } + + if (!pcfg->hdr.entry_cnt) + return LPFC_BSG_AUTH_STS_CONFIG_NOT_FOUND; + + if (item_cnt > MAX_AUTH_CONFIG_ENTRIES) + item_cnt = MAX_AUTH_CONFIG_ENTRIES; + + entry = &pcfg->entry[0]; + cfg_item = &clist->cfg_item[0]; /* only used for exact match search */ + for (i = 0, j = 0; i < pcfg->hdr.entry_cnt; i++, entry++) { + if (i == AUTH_CONFIG_ENTRIES_PER_PAGE) + entry = ptmp_p2; /* switching to second page */ + + if (memcmp(wwpn, &wwn_match, 8)) { /* match specified */ + if (*(uint64_t *)wwpn == 0) { /* match exact */ + if ((memcmp(&entry->local_wwn, + &cfg_item->lwwpn, 8)) || + (memcmp(&entry->remote_wwn, + &cfg_item->rwwpn, 8))) + continue; + } else if (memcmp(wwpn, &entry->local_wwn, 8)) { + continue; + } + } + if (j < item_cnt) { + memcpy(&clist->cfg_item[j], entry, + sizeof(struct lpfc_bsg_auth_cfg_item)); + /* check for a config */ + if (entry->auth_flags.valid) + clist->cfg_item[j].status = 0; + else { + clist->cfg_item[j].status = + LPFC_BSG_AUTH_STS_NOT_CONFIGURED; + rc = LPFC_BSG_AUTH_STS_NOT_CONFIGURED; + } + } + j++; + } + clist->entry_cnt = j; + if (clist->entry_cnt > item_cnt) + rc = LPFC_BSG_AUTH_STS_MORE_DATA_AVAILABLE; + else if (!rc && !clist->entry_cnt) + rc = LPFC_BSG_AUTH_STS_CONFIG_NOT_FOUND; +release_out: + if (src) + list_for_each_entry_safe(dmabuf, next, &dmabuf_list, list) { + list_del(&dmabuf->list); + dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + + return rc; +} + +/** + * lpfc_manage_auth_cfg_delete - remove configuration information. + * @phba: Pointer to HBA context object. + * @buf - Pointer to a list of configuration descriptors. + * @flag - argument to specify scope of the wwpn matching. + * + * This function deletes configuration information for one or more lwwpn/rwwpn + * pairs described in the list of configuration items provided. + **/ +static int +lpfc_manage_auth_cfg_delete(struct lpfc_vport *vport, uint8_t *buf, + uint8_t flag) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_bsg_auth_cfg_list *plist; + struct lpfc_bsg_auth_cfg_item *cfg_item; + struct lpfc_auth_cfg *d_cfg = NULL; + struct lpfc_auth_cfg_entry *d_item, *d_last; + struct lpfc_nodelist *ndlp = NULL; + uint8_t i, j, item_cnt_p, item_cnt_d, *p2; + int rc = 0; + + plist = (struct lpfc_bsg_auth_cfg_list *)buf; + if (plist) + item_cnt_p = plist->entry_cnt; + + d_cfg = lpfc_auth_get_active_cfg_ptr(phba); + if (!d_cfg) { + /* no driver config found. */ + if (plist) { + for (i = 0; i < item_cnt_p; i++) + plist->cfg_item[i].status = + LPFC_BSG_AUTH_STS_CONFIG_NOT_FOUND; + } + return LPFC_BSG_AUTH_STS_GENERAL_ERROR; + } + + if (flag == 1) { + /* delete all entries */ + memset(&d_cfg->entry[0], 0, + SLI4_PAGE_SIZE - sizeof(struct lpfc_auth_cfg_hdr)); + + if (d_cfg->hdr.entry_cnt > AUTH_CONFIG_ENTRIES_PER_PAGE) { + p2 = (uint8_t *)lpfc_auth_get_active_cfg_p2(phba); + if (p2) + memset(p2, 0, SLI4_PAGE_SIZE); + } + d_cfg->hdr.entry_cnt = 0; + return rc; + } + + if (!plist) + return LPFC_BSG_AUTH_STS_GENERAL_ERROR; + + item_cnt_d = d_cfg->hdr.entry_cnt; + if (item_cnt_d > MAX_AUTH_CONFIG_ENTRIES) + item_cnt_d = MAX_AUTH_CONFIG_ENTRIES; + + /* delete items described in list */ + for (i = 0; i < item_cnt_p; i++) { + cfg_item = &plist->cfg_item[i]; + cfg_item->status = LPFC_BSG_AUTH_STS_CONFIG_NOT_FOUND; + + if (d_cfg->hdr.entry_cnt == 0) { + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + continue; + } + d_item = &d_cfg->entry[0]; + if (d_cfg->hdr.entry_cnt < AUTH_CONFIG_ENTRIES_PER_PAGE) { + d_last = &d_cfg->entry[d_cfg->hdr.entry_cnt - 1]; + } else { + d_last = lpfc_auth_get_active_cfg_p2(phba); + d_last += d_cfg->hdr.entry_cnt - + AUTH_CONFIG_ENTRIES_PER_PAGE; + } + + for (j = 0; j < d_cfg->hdr.entry_cnt; j++) { + if (j == AUTH_CONFIG_ENTRIES_PER_PAGE) + d_item = (struct lpfc_auth_cfg_entry *) + lpfc_auth_get_active_cfg_p2(phba); + + if ((memcmp(&d_item->local_wwn[0], + &cfg_item->lwwpn[0], 8) == 0) && + (memcmp(&d_item->remote_wwn[0], + &cfg_item->rwwpn[0], 8) == 0)) { + /* match found */ + ndlp = lpfc_auth_findnode(vport, + (struct lpfc_name *) + d_item->local_wwn, + (struct lpfc_name *) + d_item->remote_wwn); + + /* check for authentication active */ + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + (ndlp->dhc_cfg.msg == + LPFC_AUTH_NEGOTIATE || + ndlp->dhc_cfg.msg == + LPFC_DHCHAP_CHALLENGE || + ndlp->dhc_cfg.msg == + LPFC_DHCHAP_REPLY)) { + cfg_item->status = + LPFC_BSG_AUTH_STS_IN_PROGRESS; + rc = LPFC_BSG_AUTH_STS_GENERAL_ERROR; + continue; + } + + if (d_last != d_item) + memcpy(d_item, d_last, + sizeof(struct + lpfc_auth_cfg_entry)); + + memset(d_last, 0, + sizeof(struct lpfc_auth_cfg_entry)); + d_cfg->hdr.entry_cnt--; + cfg_item->status = 0; /* success */ + break; + } + d_item++; + } + } + return rc; +} + +/** + * lpfc_manage_auth_cfg - Provides processing of various auth cfg changes. + * @job: fc_bsg_job to handle + * + * This function updates configuration information described in the job. + **/ +static int +lpfc_manage_auth_cfg(struct bsg_job *job) + +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct auth_cfg_mgmt_req *auth_req; + uint32_t xfr_size; + uint8_t *xfr_data = NULL; + int rc = 0; + + /* in case no data transferred */ + bsg_reply->reply_payload_rcv_len = 0; + + if (job->request_len < (sizeof(struct fc_bsg_request) + + sizeof(struct auth_cfg_mgmt_req))) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "3149 Received AUTH_MGMT request below " + "minimum size\n"); + rc = -EINVAL; + goto update_auth_cfg_exit; + } + auth_req = (struct auth_cfg_mgmt_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + if (!auth_req) { + rc = -EINVAL; + goto update_auth_cfg_exit; + } + + xfr_size = job->request_payload.payload_len; + if (xfr_size) { + if (xfr_size > BUF_SZ_4K) + xfr_size = BUF_SZ_4K; + xfr_data = kmalloc(xfr_size, GFP_KERNEL); + if (!xfr_data) { + rc = -ENOMEM; + goto update_auth_cfg_exit; + } + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + xfr_data, xfr_size); + } + + switch (auth_req->cfg_op) { + case AUTH_MGMT_OP_GET_CFG: + rc = lpfc_manage_auth_cfg_fetch(phba, xfr_data, + auth_req->wwpn, + (uint8_t)auth_req->param); + break; + case AUTH_MGMT_OP_SET_CFG: + rc = lpfc_manage_auth_drvr_cfg_update(vport, xfr_data); + break; + case AUTH_MGMT_OP_DEL_CFG: + rc = lpfc_manage_auth_cfg_delete(vport, xfr_data, + (uint8_t)auth_req->param); + break; + case AUTH_MGMT_OP_SET_PWD: + rc = lpfc_manage_auth_pwd_update(vport, xfr_data); + break; + case AUTH_MGMT_OP_GET_PWD: + rc = lpfc_manage_auth_pwd_fetch(phba, xfr_data, + (uint8_t)auth_req->param); + break; + case AUTH_MGMT_OP_DEL_OBJ: + rc = lpfc_del_object(phba, xfr_data); + break; + case AUTH_MGMT_OP_RMW_OBJ: + rc = lpfc_manage_auth_update_obj(phba, xfr_data); + break; + default: + rc = -EINVAL; + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC | LOG_AUTH, + "3171 Unknown auth mgmt cmd:%x op:%x " + "wwpn: %llx flags x%x\n", + auth_req->command, auth_req->cfg_op, + *(uint64_t *)&auth_req->wwpn, auth_req->param); + break; + } + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + xfr_data, xfr_size); + + kfree(xfr_data); +update_auth_cfg_exit: + bsg_reply->result = rc; + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return rc; +} +#endif + /** * lpfc_check_fwlog_support: Check FW log support on the adapter * @phba: Pointer to HBA context object. @@ -5438,10 +6827,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job) bsg_reply->reply_data.vendor_reply.vendor_rsp; /* Current logging state */ - if (ras_fwlog->ras_active == true) + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state == ACTIVE) ras_reply->state = LPFC_RASLOG_STATE_RUNNING; else ras_reply->state = LPFC_RASLOG_STATE_STOPPED; + spin_unlock_irq(&phba->hbalock); ras_reply->log_level = phba->ras_fwlog.fw_loglevel; ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; @@ -5498,10 +6889,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) if (action == LPFC_RASACTION_STOP_LOGGING) { /* Check if already disabled */ - if (ras_fwlog->ras_active == false) { + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); rc = -ESRCH; goto ras_job_error; } + spin_unlock_irq(&phba->hbalock); /* Disable logging */ lpfc_ras_stop_fwlog(phba); @@ -5512,8 +6906,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) * FW-logging with new log-level. Return status * "Logging already Running" to caller. **/ - if (ras_fwlog->ras_active) + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state != INACTIVE) action_status = -EINPROGRESS; + spin_unlock_irq(&phba->hbalock); /* Enable logging */ rc = lpfc_sli4_ras_fwlog_init(phba, log_level, @@ -5629,10 +7025,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job) goto ras_job_error; /* Logging to be stopped before reading */ - if (ras_fwlog->ras_active == true) { + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state == ACTIVE) { + spin_unlock_irq(&phba->hbalock); rc = -EINPROGRESS; goto ras_job_error; } + spin_unlock_irq(&phba->hbalock); if (job->request_len < sizeof(struct fc_bsg_request) + @@ -5706,8 +7105,7 @@ lpfc_get_trunk_info(struct bsg_job *job) event_reply = (struct lpfc_trunk_info *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2728 Received GET TRUNK _INFO reply below " "minimum size\n"); @@ -5758,6 +7156,92 @@ job_error: } +static int +lpfc_get_cgnbuf_info(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct get_cgnbuf_info_req *cgnbuf_req; + struct lpfc_cgn_info *cp; + uint8_t *cgn_buff; + int size, cinfosz; + int rc = 0; + + if (job->request_len < sizeof(struct fc_bsg_request) + + sizeof(struct get_cgnbuf_info_req)) { + rc = -ENOMEM; + goto job_exit; + } + + if (!phba->sli4_hba.pc_sli4_params.cmf) { + rc = -ENOENT; + goto job_exit; + } + + if (!phba->cgn_i || !phba->cgn_i->virt) { + rc = -ENOENT; + goto job_exit; + } + + cp = phba->cgn_i->virt; + if (cp->cgn_info_version < LPFC_CGN_INFO_V3) { + rc = -EPERM; + goto job_exit; + } + + cgnbuf_req = (struct get_cgnbuf_info_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* For reset or size == 0 */ + bsg_reply->reply_payload_rcv_len = 0; + + if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) { + lpfc_init_congestion_stat(phba); + goto job_exit; + } + + /* We don't want to include the CRC at the end */ + cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t); + + size = cgnbuf_req->read_size; + if (!size) + goto job_exit; + + if (size < cinfosz) { + /* Just copy back what we can */ + cinfosz = size; + rc = -E2BIG; + } + + /* Allocate memory to read congestion info */ + cgn_buff = vmalloc(cinfosz); + if (!cgn_buff) { + rc = -ENOMEM; + goto job_exit; + } + + memcpy(cgn_buff, cp, cinfosz); + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + cgn_buff, cinfosz); + + vfree(cgn_buff); + +job_exit: + bsg_reply->result = rc; + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + else + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2724 GET CGNBUF error: %d\n", rc); + return rc; +} + /** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle @@ -5802,9 +7286,22 @@ lpfc_bsg_hst_vendor(struct bsg_job *job) case LPFC_BSG_VENDOR_MENLO_DATA: rc = lpfc_menlo_cmd(job); break; +#ifndef NO_APEX + case LPFC_BSG_VENDOR_SET_FCP_PRIORITY: + rc = lpfc_bsg_set_fcp_priority(job); + break; + case LPFC_BSG_VENDOR_GET_FCP_PRIORITY: + rc = lpfc_bsg_get_fcp_priority(job); + break; +#endif case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: rc = lpfc_forced_link_speed(job); break; +#ifndef BUILD_BRCMFCOE + case LPFC_BSG_VENDOR_AUTH_CFG_MGMT: + rc = lpfc_manage_auth_cfg(job); + break; +#endif case LPFC_BSG_VENDOR_RAS_GET_LWPD: rc = lpfc_bsg_get_ras_lwpd(job); break; @@ -5820,6 +7317,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job) case LPFC_BSG_VENDOR_GET_TRUNK_INFO: rc = lpfc_get_trunk_info(job); break; + case LPFC_BSG_VENDOR_GET_CGNBUF_INFO: + rc = lpfc_get_cgnbuf_info(job); + break; default: rc = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; @@ -5987,3 +7487,4 @@ lpfc_bsg_timeout(struct bsg_job *job) */ return rc; } + diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index d1708133fd54..af6bd925e571 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2010-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -37,12 +37,18 @@ #define LPFC_BSG_VENDOR_MENLO_DATA 9 #define LPFC_BSG_VENDOR_DIAG_MODE_END 10 #define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11 +#ifndef NO_APEX +#define LPFC_BSG_VENDOR_GET_FCP_PRIORITY 12 +#define LPFC_BSG_VENDOR_SET_FCP_PRIORITY 13 +#endif #define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14 +#define LPFC_BSG_VENDOR_AUTH_CFG_MGMT 15 #define LPFC_BSG_VENDOR_RAS_GET_LWPD 16 #define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17 #define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18 #define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19 #define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20 +#define LPFC_BSG_VENDOR_GET_CGNBUF_INFO 21 struct set_ct_event { uint32_t command; @@ -105,8 +111,13 @@ struct get_mgmt_rev { uint32_t command; }; +#ifndef NO_APEX +#define MANAGEMENT_MAJOR_REV 2 +#define MANAGEMENT_MINOR_REV 0 +#else #define MANAGEMENT_MAJOR_REV 1 #define MANAGEMENT_MINOR_REV 1 +#endif /* the MgmtRevInfo structure */ struct MgmtRevInfo { @@ -225,6 +236,10 @@ struct lpfc_sli_config_hdr { uint32_t reserved5; }; +#define LPFC_CSF_BOOT_DEV 0x1D +#define LPFC_CSF_QUERY 0 +#define LPFC_CSF_SAVE 1 + struct lpfc_sli_config_emb0_subsys { struct lpfc_sli_config_hdr sli_config_hdr; #define LPFC_MBX_SLI_CONFIG_MAX_MSE 19 @@ -243,6 +258,15 @@ struct lpfc_sli_config_emb0_subsys { #define FCOE_OPCODE_ADD_FCF 0x09 #define FCOE_OPCODE_SET_DPORT_MODE 0x27 #define FCOE_OPCODE_GET_DPORT_RESULTS 0x28 + uint32_t timeout; /* comn_set_feature timeout */ + uint32_t request_length; /* comn_set_feature request len */ + uint32_t version; /* comn_set_feature version */ + uint32_t csf_feature; /* comn_set_feature feature */ + uint32_t word69; /* comn_set_feature parameter len */ + uint32_t word70; /* comn_set_feature parameter val0 */ +#define lpfc_emb0_subcmnd_csf_p0_SHIFT 0 +#define lpfc_emb0_subcmnd_csf_p0_MASK 0x3 +#define lpfc_emb0_subcmnd_csf_p0_WORD word70 }; struct lpfc_sli_config_emb1_subsys { @@ -261,6 +285,7 @@ struct lpfc_sli_config_emb1_subsys { #define COMN_OPCODE_WRITE_OBJECT 0xAC #define COMN_OPCODE_READ_OBJECT_LIST 0xAD #define COMN_OPCODE_DELETE_OBJECT 0xAE +#define COMN_OPCODE_SET_FEATURES 0xBF #define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79 #define COMN_OPCODE_GET_CNTL_ATTRIBUTES 0x20 uint32_t timeout; @@ -294,6 +319,40 @@ struct lpfc_sli_config_mbox { } un; }; +#ifndef NO_APEX +struct lpfc_fcp_pri_rule { + uint8_t src_wwpn[8]; + uint8_t tgt_wwpn[8]; + uint8_t first_lun[8]; + uint8_t last_lun[8]; + uint8_t fcp_priority; + uint8_t rsvrd[3]; +}; + +struct lpfc_fcp_pri_rules { + uint32_t number_of_entries; /* Number of rules in the array */ + struct lpfc_fcp_pri_rule rule_entry[0]; +}; + +struct get_lpfc_fcp_pri_rules { + uint32_t command; + uint32_t number_of_entries; /* number of rules to return */ +}; + +struct get_lpfc_fcp_pri_rules_reply { + struct lpfc_fcp_pri_rules rule; +}; + +struct set_lpfc_fcp_pri_rules { + uint32_t command; + uint32_t number_of_entries; /* number of rules passed to driver */ +}; +/* limits for fcp priority rules */ +#define LPFC_MAX_FCP_RULES 999 /* number of rules */ +/* so we can add 1 more than the get */ +#define LPFC_MAX_FCP_SET_RULES (LPFC_MAX_FCP_RULES + 1) +#endif + #define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED 0 #define LPFC_FORCED_LINK_SPEED_SUPPORTED 1 struct get_forced_link_speed_support { @@ -303,6 +362,73 @@ struct forced_link_speed_support_reply { uint8_t supported; }; +struct lpfc_bsg_auth_cfg_item { + uint8_t lwwpn[8]; + uint8_t rwwpn[8]; + uint16_t auth_tmo; + uint8_t auth_mode; + struct { + uint8_t bi_direction : 1; + uint8_t rsvd : 6; + uint8_t valid : 1; + } auth_flags; + uint8_t auth_priority[4]; + uint8_t hash_priority[4]; + uint8_t dh_grp_priority[8]; + uint32_t reauth_interval; +#define REAUTH_MAX_INTERVAL 3600 /* minutes */ + uint32_t status; + uint32_t rsvd; +}; + +struct lpfc_bsg_auth_cfg_list { + uint32_t version; + uint32_t entry_cnt; + uint32_t rsvd; + struct lpfc_bsg_auth_cfg_item cfg_item[0]; +}; + +#define AUTH_MGMT_OP_SET_CFG 1 +#define AUTH_MGMT_OP_GET_CFG 2 +#define AUTH_MGMT_OP_DEL_CFG 3 +#define AUTH_MGMT_OP_SET_PWD 4 +#define AUTH_MGMT_OP_GET_PWD 5 +#define AUTH_MGMT_OP_RMW_OBJ 6 +#define AUTH_MGMT_OP_DEL_OBJ 7 + +struct auth_cfg_mgmt_req { + uint32_t command; + uint32_t cfg_op; + uint8_t wwpn[8]; + uint32_t param; +}; + +struct lpfc_bsg_auth_pwd_info { + uint16_t length; + uint16_t type; + uint8_t pwd[128]; +}; + +struct lpfc_bsg_auth_pwd_item { + uint8_t lwwpn[8]; + uint8_t rwwpn[8]; + struct lpfc_bsg_auth_pwd_info local; + struct lpfc_bsg_auth_pwd_info remote; +}; + +#define LPFC_BSG_AUTH_STS_SUCCESS 0x0000 +#define LPFC_BSG_AUTH_STS_NOT_CONFIGURED 0x8001 +#define LPFC_BSG_AUTH_STS_MORE_DATA_AVAILABLE 0x8007 +#define LPFC_BSG_AUTH_STS_INVALID_OPERATION 0x8009 +#define LPFC_BSG_AUTH_STS_OUT_OF_RESOURCES 0x800a +#define LPFC_BSG_AUTH_STS_IN_PROGRESS 0x800b +#define LPFC_BSG_AUTH_STS_PSSWDS_SAME 0x8011 +#define LPFC_BSG_AUTH_STS_DRVTYPE_NOT_SUPPORTED 0x8030 +#define LPFC_BSG_AUTH_STS_AUTHENTICATION_NOT_SUPPORTED 0x8031 +#define LPFC_BSG_AUTH_STS_GENERAL_ERROR 0x8032 +#define LPFC_BSG_AUTH_STS_CONFIG_NOT_FOUND 0x8034 +#define LPFC_BSG_AUTH_STS_LOCAL_SECRET_NOT_SET 0x8050 + struct lpfc_bsg_ras_req { uint32_t command; }; @@ -372,6 +498,13 @@ struct get_trunk_info_req { uint32_t command; }; +struct get_cgnbuf_info_req { + uint32_t command; + uint32_t read_size; + uint32_t reset; +#define LPFC_BSG_CGN_RESET_STAT 1 +}; + /* driver only */ #define SLI_CONFIG_NOT_HANDLED 0 #define SLI_CONFIG_HANDLED 1 diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h index 43cf46a3a71f..ef128fea4afb 100644 --- a/drivers/scsi/lpfc/lpfc_compat.h +++ b/drivers/scsi/lpfc/lpfc_compat.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b2ad8c750486..564474b63a08 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -23,6 +23,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); struct fc_rport; +bool lpfc_throttler(struct lpfc_vport *, struct throttle_history *); struct fc_frame_header; struct lpfc_nvmet_rcv_ctx; void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -62,6 +63,8 @@ int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *, uint16_t, uint16_t, bool); int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_reg_congestion_buf(struct lpfc_hba *); +int lpfc_unreg_congestion_buf(struct lpfc_hba *phba); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); @@ -78,6 +81,20 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt); void lpfc_free_iocb_list(struct lpfc_hba *phba); int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq, int count, int idx); +uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba); +void lpfc_cmf_signal_init(struct lpfc_hba *phba); +void lpfc_cmf_start(struct lpfc_hba *phba); +void lpfc_cmf_stop(struct lpfc_hba *phba); +void lpfc_init_congestion_stat(struct lpfc_hba *phba); +void lpfc_init_congestion_buf(struct lpfc_hba *phba); +int lpfc_sli4_cgn_params_read(struct lpfc_hba *); +uint32_t lpfc_cgn_calc_crc32(void *, uint32_t, uint32_t); +int lpfc_config_cgn_signal(struct lpfc_hba *); +int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total); +void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba); +void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag); +void lpfc_unblock_requests(struct lpfc_hba *); +void lpfc_block_requests(struct lpfc_hba *); void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -106,13 +123,50 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did); struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); int lpfc_nlp_put(struct lpfc_nodelist *); +void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 len); int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); void lpfc_disc_list_loopmap(struct lpfc_vport *); void lpfc_disc_start(struct lpfc_vport *); void lpfc_cleanup_discovery_resources(struct lpfc_vport *); void lpfc_cleanup(struct lpfc_vport *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_disc_timeout(unsigned long); +#else void lpfc_disc_timeout(struct timer_list *); +#endif +#ifndef BUILD_BRCMFCOE +struct lpfc_nodelist * +lpfc_auth_findnode(struct lpfc_vport *, struct lpfc_name *, struct lpfc_name *); +int lpfc_auth_alloc_active_cfg_list(struct lpfc_hba *); +void lpfc_auth_free_active_cfg_list(struct lpfc_hba *); +int lpfc_auth_read_cfg_object(struct lpfc_hba *); +int lpfc_auth_issue_rd_object(struct lpfc_hba *, struct list_head *, + uint32_t, uint32_t *, char *); +void lpfc_auth_cmpl_rd_object(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_del_object(struct lpfc_hba *, char *); +int lpfc_auth_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, + uint32_t *, char *); +int lpfc_auth_rd_object(struct lpfc_hba *, struct list_head *, uint32_t, + uint32_t *, char *); +struct lpfc_auth_cfg *lpfc_auth_get_active_cfg_ptr(struct lpfc_hba *phba); +struct lpfc_auth_cfg_entry *lpfc_auth_get_active_cfg_p2(struct lpfc_hba *phba); +struct lpfc_auth_cfg_entry *lpfc_auth_find_cfg_item(struct lpfc_hba *phba, + uint8_t *lwwpn, + uint8_t *rwwpn, + uint8_t *valid); +int lpfc_auth_start(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_auth_start_reauth_tmr(struct lpfc_nodelist *ndlp); +int lpfc_auth_handle_cmd(struct lpfc_vport *, struct lpfc_iocbq *, + struct lpfc_nodelist *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_auth_reauth(unsigned long ptr); +#else +void lpfc_auth_reauth(struct timer_list *); +#endif +void lpfc_auth_timeout_handler(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); +#endif int lpfc_unregister_fcf_prep(struct lpfc_hba *); struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); @@ -140,9 +194,11 @@ int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); -int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); -int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry); +int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry); +int lpfc_issue_els_rscn(struct lpfc_vport *, uint8_t); int lpfc_issue_fabric_reglogin(struct lpfc_vport *); +int lpfc_issue_els_rdf(struct lpfc_vport *, uint8_t); +int lpfc_issue_els_edc(struct lpfc_vport *phba, uint8_t retry); int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, @@ -154,7 +210,11 @@ int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *, int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *, struct lpfc_nodelist *); void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_els_retry_delay(unsigned long); +#else void lpfc_els_retry_delay(struct timer_list *); +#endif void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); @@ -165,7 +225,11 @@ void lpfc_els_flush_all_cmd(struct lpfc_hba *); void lpfc_els_flush_cmd(struct lpfc_vport *); int lpfc_els_disc_adisc(struct lpfc_vport *); int lpfc_els_disc_plogi(struct lpfc_vport *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_els_timeout(unsigned long); +#else void lpfc_els_timeout(struct timer_list *); +#endif void lpfc_els_timeout_handler(struct lpfc_vport *); struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t, uint8_t, struct lpfc_nodelist *, @@ -175,13 +239,17 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); -int lpfc_issue_gidpt(struct lpfc_vport *vport); -int lpfc_issue_gidft(struct lpfc_vport *vport); +uint32_t lpfc_issue_gidpt(struct lpfc_vport *vport); +uint32_t lpfc_issue_gidft(struct lpfc_vport *vport); int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); -void lpfc_fdmi_num_disc_check(struct lpfc_vport *); +void lpfc_fdmi_change_check(struct lpfc_vport *vport); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_delayed_disc_tmo(unsigned long); +#else void lpfc_delayed_disc_tmo(struct timer_list *); +#endif void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); int lpfc_config_port_prep(struct lpfc_hba *); @@ -192,6 +260,7 @@ int lpfc_hba_down_post(struct lpfc_hba *); void lpfc_hba_init(struct lpfc_hba *, uint32_t *); int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int); void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); +int lpfc_issue_lip(struct Scsi_Host *); int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *, int); @@ -215,6 +284,19 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); +int lpfc_read_object(struct lpfc_hba *, char *, uint32_t *, uint32_t); +int lpfc_write_object(struct lpfc_hba *, char *, uint32_t *, uint32_t); + +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba); +int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_sli4_poll_hbtimer(unsigned long ptr); +#else +void lpfc_sli4_poll_hbtimer(struct timer_list *t); +#endif +void lpfc_sli4_start_polling(struct lpfc_queue *q); +void lpfc_sli4_stop_polling(struct lpfc_queue *q); + void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); @@ -251,8 +333,10 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, struct fc_frame_header *fc_hdr); void lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, struct lpfc_queue *wq); -void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba); -void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba); +void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *); +#if defined(BUILD_NVME) +void lpfc_nvme_issue_pend_io(struct work_struct *work); +#endif void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, uint16_t); int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, @@ -278,9 +362,17 @@ void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_poll_timeout(unsigned long ptr); +#else void lpfc_poll_timeout(struct timer_list *t); +#endif void lpfc_poll_start_timer(struct lpfc_hba *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_poll_eratt(unsigned long); +#else void lpfc_poll_eratt(struct timer_list *); +#endif int lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); @@ -296,7 +388,7 @@ int lpfc_selective_reset(struct lpfc_hba *); void lpfc_reset_barrier(struct lpfc_hba *); int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); int lpfc_sli_brdkill(struct lpfc_hba *); -int lpfc_sli_chipset_init(struct lpfc_hba *phba); +int lpfc_sli_chipset_init(struct lpfc_hba *); int lpfc_sli_brdreset(struct lpfc_hba *); int lpfc_sli_brdrestart(struct lpfc_hba *); int lpfc_sli_hba_setup(struct lpfc_hba *); @@ -350,13 +442,19 @@ int lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, uint64_t, lpfc_ctx_cmd); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_mbox_timeout(unsigned long); +#else void lpfc_mbox_timeout(struct timer_list *t); +#endif void lpfc_mbox_timeout_handler(struct lpfc_hba *); +int lpfc_issue_hb_mbox(struct lpfc_hba *phba); +void lpfc_issue_hb_tmo(struct lpfc_hba *phba); struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, struct lpfc_name *); -struct lpfc_nodelist *lpfc_findnode_mapped(struct lpfc_vport *vport); +struct lpfc_nodelist *lpfc_findnode_mapped(struct lpfc_vport *); int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); @@ -380,7 +478,7 @@ void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp); int lpfc_link_reset(struct lpfc_vport *vport); /* Function prototypes. */ -int lpfc_check_pci_resettable(const struct lpfc_hba *phba); +int lpfc_check_pci_resettable(struct lpfc_hba *phba); const char* lpfc_info(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); @@ -397,16 +495,14 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *); extern struct device_attribute *lpfc_hba_attrs[]; extern struct device_attribute *lpfc_vport_attrs[]; extern struct scsi_host_template lpfc_template; -extern struct scsi_host_template lpfc_template_no_hr; extern struct scsi_host_template lpfc_template_nvme; -extern struct scsi_host_template lpfc_vport_template; extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); void lpfc_terminate_rport_io(struct fc_rport *); -void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); +void lpfc_dev_loss_tmo_callbk(struct fc_rport *); struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *); int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); @@ -433,10 +529,19 @@ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t, int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t, uint16_t *, uint16_t *); +extern void lpfc_edsm_validate_target(struct lpfc_vport *, + struct lpfc_nodelist *); +extern void lpfc_edsm_set_state(struct lpfc_vport *, + struct lpfc_external_dif_support *, uint32_t); + /* Interface exported by fabric iocb scheduler */ void lpfc_fabric_abort_nport(struct lpfc_nodelist *); void lpfc_fabric_abort_hba(struct lpfc_hba *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_fabric_block_timeout(unsigned long); +#else void lpfc_fabric_block_timeout(struct timer_list *); +#endif void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); void lpfc_rampdown_queue_depth(struct lpfc_hba *); void lpfc_ramp_down_queue_handler(struct lpfc_hba *); @@ -450,6 +555,9 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); void lpfc_create_static_vport(struct lpfc_hba *); void lpfc_stop_hba_timers(struct lpfc_hba *); void lpfc_stop_port(struct lpfc_hba *); +int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t sz); +int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t val, uint32_t sz, + struct Scsi_Host *shost); void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *); void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); @@ -457,6 +565,7 @@ int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); void lpfc_start_fdiscs(struct lpfc_hba *phba); struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); +#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 #define HBA_EVENT_LINK_DOWN 3 @@ -474,6 +583,9 @@ struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *, int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); uint32_t lpfc_drain_txq(struct lpfc_hba *); +#ifndef NO_APEX +void lpfc_set_ndlps_fcp_priority(struct lpfc_nodelist *); +#endif void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *); int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t); void lpfc_handle_rrq_active(struct lpfc_hba *); @@ -496,9 +608,9 @@ int lpfc_sli4_queue_create(struct lpfc_hba *); void lpfc_sli4_queue_destroy(struct lpfc_hba *); void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *, struct sli4_wcqe_xri_aborted *); +int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t); void lpfc_sli_abts_recover_port(struct lpfc_vport *, struct lpfc_nodelist *); -int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t); int lpfc_issue_reg_vfi(struct lpfc_vport *); int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); @@ -583,10 +695,25 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, int); void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd, struct lpfc_sli4_hdw_queue *qp); +void lpfc_mode_sense_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *p_io_in, + struct lpfc_iocbq *p_io_out); +int lpfc_issue_scsi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_scsi_io_req *cmd_req); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd); +#endif +#if defined(BUILD_NVME) void lpfc_nvme_cmd_template(void); void lpfc_nvmet_cmd_template(void); -void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn); +void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt); extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; +#endif +void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + uint32_t stat, uint32_t param); extern int lpfc_no_hba_reset_cnt; extern unsigned long lpfc_no_hba_reset[]; +extern int lpfc_acqe_cgn_frequency; +extern int lpfc_fabric_cgn_frequency; +extern int lpfc_use_cgn_signal; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index f81d1453eefb..fe114d4abe21 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -100,21 +100,274 @@ lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size); } +/** + * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands + * @phba : pointer to lpfc hba data structure. + * @cmdiocb : pointer to lpfc command iocb data structure. + * @rspiocb : pointer to lpfc response iocb data structure. + * + * This routine is the callback function for issuing unsol ct reject command. + * The memory allocated in the reject command path is freed up here. + **/ +static void +lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *mp, *bmp; + + ndlp = (struct lpfc_nodelist *)cmdiocb->context1; + if (ndlp) + lpfc_nlp_put(ndlp); + + mp = cmdiocb->context2; + bmp = cmdiocb->context3; + if (mp) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + cmdiocb->context2 = NULL; + } + + if (bmp) { + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); + cmdiocb->context3 = NULL; + } + + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_ct_reject_event : Issue reject for unhandled E2E commands + * @ndlp : pointer to a node-list data structure. + * ct_req : pointer to the CT request data structure. + * rx_id : ulp_context of the received UNSOL CT command + * ox_id : ox_id of the UNSOL CT command + * + * This routine is invoked by the lpfc_ct_handle_e2e routine for sending + * reject response. Reject response is sent for the unhandled E2E commands. + **/ +static void +lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, + struct lpfc_sli_ct_request *ct_req, + u16 ulp_context, u16 ox_id) +{ + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ct_request *ct_rsp; + struct lpfc_iocbq *cmdiocbq = NULL; + struct lpfc_dmabuf *bmp = NULL; + struct lpfc_dmabuf *mp = NULL; + struct ulp_bde64 *bpl; + IOCB_t *icmd; + u8 rc = 0; + + /* fill in BDEs for command */ + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (!mp) { + rc = 1; + goto ct_e2e_exit; + } + + mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys); + if (!mp->virt) { + rc = 2; + goto ct_e2e_free_mp; + } + + /* Allocate buffer for Buffer ptr list */ + bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); + if (!bmp) { + rc = 3; + goto ct_e2e_free_mpvirt; + } + + bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys); + if (!bmp->virt) { + rc = 4; + goto ct_e2e_free_bmp; + } + + INIT_LIST_HEAD(&mp->list); + INIT_LIST_HEAD(&bmp->list); + + bpl = (struct ulp_bde64 *)bmp->virt; + memset(bpl, 0, sizeof(struct ulp_bde64)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); + bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64; + bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); + bpl->tus.w = le32_to_cpu(bpl->tus.w); + + ct_rsp = (struct lpfc_sli_ct_request *)mp->virt; + memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request)); + + ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION; + ct_rsp->RevisionId.bits.InId = 0; + ct_rsp->FsType = ct_req->FsType; + ct_rsp->FsSubType = ct_req->FsSubType; + ct_rsp->CommandResponse.bits.Size = 0; + ct_rsp->CommandResponse.bits.CmdRsp = + cpu_to_be16(SLI_CT_RESPONSE_FS_RJT); + ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED; + ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL; + + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = 5; + goto ct_e2e_free_bmpvirt; + } + + icmd = &cmdiocbq->iocb; + icmd->un.genreq64.bdl.ulpIoTag32 = 0; + icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); + icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + icmd->un.genreq64.bdl.bdeSize = sizeof(struct ulp_bde64); + icmd->un.genreq64.w5.hcsw.Fctl = (LS | LA); + icmd->un.genreq64.w5.hcsw.Dfctl = 0; + icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; + icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; + icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + + /* Save for completion so we can release these resources */ + cmdiocbq->context1 = lpfc_nlp_get(ndlp); + cmdiocbq->context2 = (uint8_t *)mp; + cmdiocbq->context3 = (uint8_t *)bmp; + cmdiocbq->iocb_cmpl = lpfc_ct_unsol_cmpl; + icmd->ulpContext = ulp_context;; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = ox_id; + icmd->un.ulpWord[3] = + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + icmd->ulpTimeout = (3 * phba->fc_ratov); + + cmdiocbq->retry = 0; + cmdiocbq->vport = vport; + cmdiocbq->context_un.ndlp = NULL; + cmdiocbq->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); + if (!rc) + return; + + rc = 6; + lpfc_nlp_put(ndlp); + lpfc_sli_release_iocbq(phba, cmdiocbq); +ct_e2e_free_bmpvirt: + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); +ct_e2e_free_bmp: + kfree(bmp); +ct_e2e_free_mpvirt: + lpfc_mbuf_free(phba, mp->virt, mp->phys); +ct_e2e_free_mp: + kfree(mp); +ct_e2e_exit: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6440 UNSOL CT Rsp err %d Data: x%x\n", + rc, vport->fc_flag); +} + +/** + * lpfc_ct_handle_e2e- Process an unsolicited E2E event data buffer + * @phba: pointer to lpfc hba data structure. + * @ctiocb: pointer to lpfc E2E CT command iocb data structure. + * + * This routine is used for processing the IOCB associated with a unsolicited + * E2E event. It first determines whether there is an existing ndlp that matches + * the DID from the unsolicited IOCB. If not, it will return. The E2E-CT command + * from the unsolicited IOCB is then used to invoke the proper routine and to + * set up proper state of the discovery state machine. + * + **/ +static void +lpfc_ct_handle_e2e(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) +{ + struct lpfc_sli_ct_request *ct_req; + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_vport *vport = NULL; + IOCB_t *icmd = &ctiocbq->iocb; + u32 did = 0, vpi; + u32 e2e_cmd; + + vpi = ctiocbq->iocb.unsli3.rcvsli3.vpi; + vport = lpfc_find_vport_by_vpid(phba, vpi); + if (!vport) { + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "6437 Unsol CT : VPORT NULL vpi : x%x\n", + vpi); + return; + } + + did = ctiocbq->iocb.un.rcvels.remoteID; + if (icmd->ulpStatus) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6438 Unsol CT: status:x%x/x%x did : x%x\n", + icmd->ulpStatus, icmd->un.ulpWord[4], did); + return; + } + + /* Ignore traffic received during vport shutdown */ + if (vport->fc_flag & FC_UNLOADING) + return; + + ndlp = lpfc_findnode_did(vport, did); + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6439 Unsol CT:NDLP Not Found for DID : x%x", + did); + return; + } + + ct_req = ((struct lpfc_sli_ct_request *) + (((struct lpfc_dmabuf *)ctiocbq->context2)->virt)); + + e2e_cmd = ct_req->CommandResponse.bits.CmdRsp; + switch (e2e_cmd) { + case 0: + default: + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "6442 : E2E Cmd : x%x Not Supported\n", + e2e_cmd); + lpfc_ct_reject_event(ndlp, ct_req, + ctiocbq->iocb.ulpContext, + ctiocbq->iocb.unsli3.rcvsli3.ox_id); + return; + } +} + +/** + * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring + * @phba: pointer to lpfc hba data structure. + * @pring: pointer to a SLI ring. + * @ctiocbq: pointer to lpfc ct iocb data structure. + * + * This routine is used to process an unsolicited event received from a SLI + * (Service Level Interface) ring. The actual processing of the data buffer + * associated with the unsolicited event is done by invoking appropiate routine + * after properly set up the iocb buffer from the SLI ring on which the + * unsolicited event was received. + **/ void lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - struct lpfc_iocbq *piocbq) + struct lpfc_iocbq *ctiocbq) { struct lpfc_dmabuf *mp = NULL; - IOCB_t *icmd = &piocbq->iocb; + IOCB_t *icmd = &ctiocbq->iocb; int i; struct lpfc_iocbq *iocbq; - dma_addr_t paddr; + dma_addr_t dma_addr; uint32_t size; struct list_head head; - struct lpfc_dmabuf *bdeBuf; + struct lpfc_sli_ct_request *ct_req; + struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2; + struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3; - if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0) - return; + ctiocbq->context1 = NULL; + ctiocbq->context2 = NULL; + ctiocbq->context3 = NULL; if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); @@ -128,46 +381,76 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return; } - /* If there are no BDEs associated with this IOCB, - * there is nothing to do. + /* If there are no BDEs associated + * with this IOCB, there is nothing to do. */ if (icmd->ulpBdeCount == 0) return; + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + ctiocbq->context2 = bdeBuf1; + if (icmd->ulpBdeCount == 2) + ctiocbq->context3 = bdeBuf2; + } else { + dma_addr = getPaddr(icmd->un.cont64[0].addrHigh, + icmd->un.cont64[0].addrLow); + ctiocbq->context2 = lpfc_sli_ringpostbuf_get(phba, pring, + dma_addr); + if (icmd->ulpBdeCount == 2) { + dma_addr = getPaddr(icmd->un.cont64[1].addrHigh, + icmd->un.cont64[1].addrLow); + ctiocbq->context3 = lpfc_sli_ringpostbuf_get(phba, + pring, + dma_addr); + } + } + + ct_req = ((struct lpfc_sli_ct_request *) + (((struct lpfc_dmabuf *)ctiocbq->context2)->virt)); + + /* Check if request is for E2E */ + if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE && + ct_req->FsSubType == SLI_CT_E2E_Subtypes) { + lpfc_ct_handle_e2e(phba, ctiocbq); + } else { + if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq)) + return; + } + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { INIT_LIST_HEAD(&head); - list_add_tail(&head, &piocbq->list); + list_add_tail(&head, &ctiocbq->list); list_for_each_entry(iocbq, &head, list) { icmd = &iocbq->iocb; if (icmd->ulpBdeCount == 0) continue; - bdeBuf = iocbq->context2; + bdeBuf1 = iocbq->context2; iocbq->context2 = NULL; size = icmd->un.cont64[0].tus.f.bdeSize; - lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size); - lpfc_in_buf_free(phba, bdeBuf); + lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size); + lpfc_in_buf_free(phba, bdeBuf1); if (icmd->ulpBdeCount == 2) { - bdeBuf = iocbq->context3; + bdeBuf2 = iocbq->context3; iocbq->context3 = NULL; size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize; - lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, + lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2, size); - lpfc_in_buf_free(phba, bdeBuf); + lpfc_in_buf_free(phba, bdeBuf2); } } list_del(&head); } else { INIT_LIST_HEAD(&head); - list_add_tail(&head, &piocbq->list); + list_add_tail(&head, &ctiocbq->list); list_for_each_entry(iocbq, &head, list) { icmd = &iocbq->iocb; if (icmd->ulpBdeCount == 0) lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0); for (i = 0; i < icmd->ulpBdeCount; i++) { - paddr = getPaddr(icmd->un.cont64[i].addrHigh, - icmd->un.cont64[i].addrLow); + dma_addr = getPaddr(icmd->un.cont64[i].addrHigh, + icmd->un.cont64[i].addrLow); mp = lpfc_sli_ringpostbuf_get(phba, pring, - paddr); + dma_addr); size = icmd->un.cont64[i].tus.f.bdeSize; lpfc_ct_unsol_buffer(phba, iocbq, mp, size); lpfc_in_buf_free(phba, mp); @@ -462,7 +745,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) struct lpfc_nodelist *ndlp; if ((vport->port_type != LPFC_NPIV_PORT) || - (fc4_type == FC_TYPE_FCP) || !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { ndlp = lpfc_setup_disc_node(vport, Did); @@ -715,7 +997,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* This is a GID_FT completing so the gidft_inp counter was * incremented before the GID_FT was issued to the wire. */ - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; /* * Skip processing the NS response @@ -743,16 +1026,19 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; /* CT command is being retried */ - vport->gidft_inp--; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, vport->fc_ns_retry, type); if (rc == 0) goto out; + else { /* Unable to send NS cmd */ + if (vport->gidft_inp) + vport->gidft_inp--; + } } if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0257 GID_FT Query error: 0x%x 0x%x\n", irsp->ulpStatus, vport->fc_ns_retry); } else { @@ -763,9 +1049,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0208 NameServer Rsp Data: x%x x%x " - "sz x%x\n", + "x%x x%x sz x%x\n", vport->fc_flag, CTreq->un.gid.Fc4Type, + vport->num_disc_nodes, + vport->gidft_inp, irsp->un.genreq64.bdl.bdeSize); lpfc_ns_rsp(vport, @@ -811,7 +1099,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } else { /* NameServer Rsp Error */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0241 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", CTrsp->CommandResponse.bits.CmdRsp, @@ -825,7 +1113,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -918,7 +1207,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* This is a GID_PT completing so the gidft_inp counter was * incremented before the GID_PT was issued to the wire. */ - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; /* * Skip processing the NS response @@ -942,16 +1232,19 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->fc_ns_retry++; /* CT command is being retried */ - vport->gidft_inp--; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, vport->fc_ns_retry, GID_PT_N_PORT); if (rc == 0) goto out; + else { /* Unable to send NS cmd */ + if (vport->gidft_inp) + vport->gidft_inp--; + } } if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4103 GID_FT Query error: 0x%x 0x%x\n", irsp->ulpStatus, vport->fc_ns_retry); } else { @@ -961,9 +1254,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "4105 NameServer Rsp Data: x%x x%x\n", + "4105 NameServer Rsp Data: x%x x%x " + "x%x x%x sz x%x\n", vport->fc_flag, - CTreq->un.gid.Fc4Type); + CTreq->un.gid.Fc4Type, + vport->num_disc_nodes, + vport->gidft_inp, + irsp->un.genreq64.bdl.bdeSize); lpfc_ns_rsp(vport, outp, @@ -1008,7 +1305,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } else { /* NameServer Rsp Error */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4109 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", CTrsp->CommandResponse.bits.CmdRsp, @@ -1023,8 +1320,14 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6450 GID_PT cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + /* Link up / RSCN discovery */ if ((vport->num_disc_nodes == 0) && (vport->gidft_inp == 0)) { @@ -1134,7 +1437,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } } - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0267 NameServer GFF Rsp " "x%x Error (%d %d) Data: x%x x%x\n", did, irsp->ulpStatus, irsp->un.ulpWord[4], @@ -1159,6 +1462,11 @@ out: /* Link up / RSCN discovery */ if (vport->num_disc_nodes) vport->num_disc_nodes--; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6451 GFF_ID cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + if (vport->num_disc_nodes == 0) { /* * The driver has cycled through all Nports in the RSCN payload. @@ -1227,9 +1535,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "3064 Setting ndlp x%px, DID x%06x " - "with FC4 x%08x, Data: x%08x x%08x " - "%d\n", + "3064 Setting ndlp x%px, DID x%06x with " + "FC4 x%08x, Data: x%08x x%08x %d\n", ndlp, did, ndlp->nlp_fc4_type, FC_TYPE_FCP, FC_TYPE_NVME, ndlp->nlp_state); @@ -1257,7 +1564,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } } else - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); lpfc_ct_free_iocb(phba, cmdiocb); @@ -1306,7 +1613,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode); if (irsp->ulpStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0268 NS cmd x%x Error (x%x x%x)\n", cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); @@ -1495,7 +1802,7 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, if (strlcat(symbol, tmp, size) >= size) goto buffer_done; - scnprintf(tmp, sizeof(tmp), " HN:%s", init_utsname()->nodename); + scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; @@ -1829,7 +2136,7 @@ ns_cmd_free_mpvirt: ns_cmd_free_mp: kfree(mp); ns_cmd_exit: - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n", cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt); return 1; @@ -1942,6 +2249,15 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, return; case SLI_MGMT_RPA: + if (vport->fc_flag & FC_CT_CGN_RPA) { + vport->fc_flag &= ~FC_CT_CGN_RPA; + if (phba->cmf_active_mode == LPFC_CFG_OFF) + return; + lpfc_vlog_msg(vport, KERN_WARNING, + LOG_DISCOVERY | LOG_CGN_MGMT, + "6435 E2E FDMI RPA failure\n"); + return; + } if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { /* Fallback to FDMI-1 */ vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; @@ -1978,20 +2294,54 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); break; + case SLI_MGMT_RPA: + if (vport->port_type == LPFC_PHYSICAL_PORT && + phba->sli4_hba.pc_sli4_params.E2E_attr) { + if (vport->fc_flag & FC_CT_CGN_RPA) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_CGN_MGMT, + "6436 E2E FDMI RPA Success\n"); + vport->fc_flag &= ~FC_CT_CGN_RPA; + return; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6210 Issue Vendor E2E FDMI %x\n", + phba->sli4_hba.pc_sli4_params.E2E_attr); + + /* CGN is only for the phyical port, no vports */ + if (lpfc_fdmi_cmd(vport, ndlp, cmd, + LPFC_FDMI_VENDOR_ATTR_cgn) == 0) + vport->fc_flag |= FC_CT_CGN_RPA; + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_ELS, + "6458 Send E2E FDMI:%x Flag x%x\n", + phba->sli4_hba.pc_sli4_params. + E2E_attr, + phba->link_flag); + } else { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY | LOG_ELS, + "6459 No FDMI E2E support - " + "RPA Success\n"); + } + break; } return; } /** - * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to + * lpfc_fdmi_change_check - Check for changed FDMI parameters * @vport: pointer to a host virtual N_Port data structure. * - * Called from hbeat timeout routine to check if the number of discovered - * ports has changed. If so, re-register thar port Attribute. + * Check how many mapped NPorts we are connected to + * Check if our hostname changed + * Called from hbeat timeout routine to check if any FDMI parameters + * changed. If so, re-register those Attributes. */ void -lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) +lpfc_fdmi_change_check(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; @@ -2004,17 +2354,44 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) if (!(vport->fc_flag & FC_FABRIC)) return; + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return; + + /* Check if system hostname changed */ + if (strcmp(phba->os_host_name, init_utsname()->nodename)) { + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); + lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); + + /* Since this effects multiple HBA and PORT attributes, we need + * de-register and go thru the whole FDMI registration cycle. + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) + */ + if (vport->port_type == LPFC_PHYSICAL_PORT) { + /* For extra ELXE2EM RPA */ + vport->fc_flag &= ~FC_CT_CGN_RPA; + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + } else { + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + } + + /* Since this code path registers all the port attributes + * we can just return without further checking. + */ + return; + } + if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) return; + /* Check if the number of mapped NPorts changed */ cnt = lpfc_find_map_node(vport); if (cnt == vport->fdmi_num_disc) return; - ndlp = lpfc_findnode_did(vport, FDMI_DID); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) - return; - if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, LPFC_FDMI_PORT_ATTR_num_disc); @@ -2025,14 +2402,14 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) } /* Routines for all individual HBA attributes */ -static int +int lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2041,15 +2418,15 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) ad->AttrType = cpu_to_be16(RHBA_NODENAME); return size; } -static int +int lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); /* This string MUST be consistent with other FC platforms * supported by Broadcom. @@ -2066,15 +2443,15 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_hba *phba = vport->phba; struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->SerialNumber, sizeof(ae->un.AttrString)); @@ -2087,7 +2464,7 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) return size; } -static int +int lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2095,8 +2472,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -2108,7 +2485,7 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2116,8 +2493,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelDesc, sizeof(ae->un.AttrString)); @@ -2130,7 +2507,7 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2139,8 +2516,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t i, j, incr, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); /* Convert JEDEC ID to ascii for hardware version */ incr = vp->rev.biuRev; @@ -2162,15 +2539,15 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, lpfc_release_version, sizeof(ae->un.AttrString)); @@ -2183,7 +2560,7 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2191,8 +2568,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); @@ -2208,7 +2585,7 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2216,8 +2593,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); len = strnlen(ae->un.AttrString, @@ -2229,15 +2606,15 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", init_utsname()->sysname, @@ -2252,14 +2629,14 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); size = FOURBYTES + sizeof(uint32_t); @@ -2268,15 +2645,15 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); len = lpfc_vport_symbolic_node_name(vport, ae->un.AttrString, 256); @@ -2287,14 +2664,14 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Nothing is defined for this currently */ ae->un.AttrInt = cpu_to_be32(0); @@ -2304,14 +2681,14 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Each driver instance corresponds to a single port */ ae->un.AttrInt = cpu_to_be32(1); @@ -2321,15 +2698,15 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, sizeof(struct lpfc_name)); @@ -2339,7 +2716,7 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2347,8 +2724,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strlcat(ae->un.AttrString, phba->BIOSVersion, sizeof(ae->un.AttrString)); @@ -2361,14 +2738,14 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Driver doesn't have access to this information */ ae->un.AttrInt = cpu_to_be32(0); @@ -2378,15 +2755,15 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "EMULEX", sizeof(ae->un.AttrString)); @@ -2400,7 +2777,7 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, } /* Routines for all individual PORT attributes */ -static int +int lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2408,10 +2785,9 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); - ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */ @@ -2426,7 +2802,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2434,7 +2810,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = 0; if (!(phba->hba_flag & HBA_FCOE_MODE)) { @@ -2480,7 +2856,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2488,7 +2864,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; if (!(phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { @@ -2550,7 +2926,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2558,7 +2934,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; hsp = (struct serv_parm *)&vport->fc_sparam; ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | @@ -2570,7 +2946,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2578,8 +2954,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "/sys/class/scsi_host/host%d", shost->host_no); @@ -2592,18 +2968,18 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); - snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", - init_utsname()->nodename); + scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", + vport->phba->os_host_name); len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); len += (len & 3) ? (4 - (len & 3)) : 4; @@ -2613,15 +2989,15 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2631,15 +3007,15 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); @@ -2649,15 +3025,15 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); len += (len & 3) ? (4 - (len & 3)) : 4; @@ -2667,7 +3043,7 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2675,7 +3051,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); else @@ -2686,14 +3062,14 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2701,15 +3077,15 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fabric_portname, sizeof(struct lpfc_name)); @@ -2719,17 +3095,16 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); - ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */ @@ -2743,14 +3118,14 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Link Up - operational */ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); size = FOURBYTES + sizeof(uint32_t); @@ -2759,14 +3134,14 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; vport->fdmi_num_disc = lpfc_find_map_node(vport); ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); size = FOURBYTES + sizeof(uint32_t); @@ -2775,14 +3150,14 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2790,15 +3165,15 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "Smart SAN Initiator", sizeof(ae->un.AttrString)); @@ -2811,15 +3186,15 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2832,17 +3207,18 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); - strncpy(ae->un.AttrString, "Smart SAN Version 2.0", + strncpy(ae->un.AttrString, + "Smart SAN Version 2.0", sizeof(ae->un.AttrString)); len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); @@ -2853,7 +3229,7 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { @@ -2861,8 +3237,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -2874,34 +3250,34 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* SRIOV (type 3) is not supported */ if (vport->vpi) - ae->un.AttrInt = cpu_to_be32(2); /* NPIV */ + ae->un.AttrInt = cpu_to_be32(2); /* NPIV */ else - ae->un.AttrInt = cpu_to_be32(1); /* Physical */ + ae->un.AttrInt = cpu_to_be32(1); /* Physical */ size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO); return size; } -static int +int lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(0); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2909,14 +3285,14 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, return size; } -static int +int lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) { struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(1); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2924,6 +3300,30 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, return size; } +int +lpfc_fdmi_vendor_attr_cgn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + char cgnrevision[16]; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + sprintf(cgnrevision, "ELXE2EM:%04d", + phba->sli4_hba.pc_sli4_params.E2E_attr); + strncpy(ae->un.AttrString, &cgnrevision[0], + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_VENDOR_CGN); + return size; +} + /* RHBA attribute jump table */ int (*lpfc_fdmi_hba_action[]) (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = { @@ -2975,6 +3375,7 @@ int (*lpfc_fdmi_port_action[]) lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */ lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */ lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */ + lpfc_fdmi_vendor_attr_cgn, /* bit23 RPRT_VENDOR_CGN */ }; /** @@ -3004,7 +3405,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_fdmi_attr_block *ab = NULL; int (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad); void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, - struct lpfc_iocbq *); + struct lpfc_iocbq *); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) return 0; @@ -3064,7 +3465,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Registered Port List */ /* One entry (port) per adapter */ rh->rpl.EntryCnt = cpu_to_be32(1); - memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, + memcpy(&rh->rpl.pe.PortName, + &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); /* point to the HBA attribute block */ @@ -3229,9 +3631,17 @@ fdmi_cmd_exit: * the worker thread. **/ void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_delayed_disc_tmo(unsigned long ptr) +#else lpfc_delayed_disc_tmo(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_vport *vport = (struct lpfc_vport *)ptr; +#else struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo); +#endif struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; @@ -3258,6 +3668,10 @@ void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); +#ifndef BUILD_BRCMFCOE + struct lpfc_nodelist *ndlp = NULL; + int rc = 0; +#endif spin_lock_irq(shost->host_lock); if (!(vport->fc_flag & FC_DISC_DELAYED)) { @@ -3267,7 +3681,18 @@ lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) vport->fc_flag &= ~FC_DISC_DELAYED; spin_unlock_irq(shost->host_lock); +#ifndef BUILD_BRCMFCOE + ndlp = lpfc_findnode_did(vport, Fabric_DID); + /* check for authentication requirement */ + rc = lpfc_auth_start(vport, ndlp); + if (rc) { + if (vport->port_state != LPFC_FDISC) + lpfc_start_fdiscs(vport->phba); + lpfc_do_scr_ns_plogi(vport->phba, vport); + } +#else lpfc_do_scr_ns_plogi(vport->phba, vport); +#endif } void diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 8d34be60d379..475551aab351 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -31,14 +31,18 @@ #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/ctype.h> +#include <linux/vmalloc.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> - +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme-fc-driver.h> +#endif +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -107,10 +111,12 @@ MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); /* This MUST be a power of 2 */ +#ifdef BUILD_NVME static int lpfc_debugfs_max_nvmeio_trc; module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444); MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc, "Set debugfs NVME IO trace depth"); +#endif static int lpfc_debugfs_mask_disc_trc; module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); @@ -170,7 +176,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += scnprintf(buf+len, size-len, buffer, + len += scnprintf(buf + len, size - len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { @@ -181,7 +187,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += scnprintf(buf+len, size-len, buffer, + len += scnprintf(buf + len, size - len, buffer, dtp->data1, dtp->data2, dtp->data3); } @@ -236,7 +242,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += scnprintf(buf+len, size-len, buffer, + len += scnprintf(buf + len, size - len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { @@ -247,7 +253,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += scnprintf(buf+len, size-len, buffer, + len += scnprintf(buf + len, size - len, buffer, dtp->data1, dtp->data2, dtp->data3); } @@ -307,7 +313,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) i = lpfc_debugfs_last_hbq; - len += scnprintf(buf+len, size-len, "HBQ %d Info\n", i); + len += scnprintf(buf + len, size - len, "HBQ %d Info\n", i); hbqs = &phba->hbqs[i]; posted = 0; @@ -315,21 +321,21 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) posted++; hip = lpfc_hbq_defs[i]; - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n", hip->hbq_index, hip->profile, hip->rn, hip->buffer_count, hip->init_count, hip->add_count, posted); raw_index = phba->hbq_get[i]; getidx = le32_to_cpu(raw_index); - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; for (j=0; j<hbqs->entry_count; j++) { - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "%03d: %08x %04x %05x ", j, le32_to_cpu(hbqe->bde.addrLow), le32_to_cpu(hbqe->bde.tus.w), @@ -341,16 +347,16 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) low = hbqs->hbqPutIdx - posted; if (low >= 0) { if ((j >= hbqs->hbqPutIdx) || (j < low)) { - len += scnprintf(buf + len, size - len, - "Unused\n"); + len += scnprintf(buf + len, + size - len, "Unused\n"); goto skipit; } } else { if ((j >= hbqs->hbqPutIdx) && (j < (hbqs->entry_count+low))) { - len += scnprintf(buf + len, size - len, - "Unused\n"); + len += scnprintf(buf + len, + size - len, "Unused\n"); goto skipit; } } @@ -360,7 +366,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); if (phys == le32_to_cpu(hbqe->bde.addrLow)) { - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "Buf%d: x%px %06x\n", i, hbq_buf->dbuf.virt, hbq_buf->tag); found = 1; @@ -369,7 +375,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) i++; } if (!found) { - len += scnprintf(buf+len, size-len, "No DMAinfo?\n"); + len += scnprintf(buf + len, + size - len, "No DMAinfo?\n"); } skipit: hbqe++; @@ -568,6 +575,71 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) return strnlen(buf, size); } +/** + * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine clears multi-XRI pools statistics when buf contains "clear". + * + * Return Value: + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char mybuf[64]; + char *pbuf; + u32 i; + u32 hwq_count; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + + if (nbytes > 64) + nbytes = 64; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "clear", strlen("clear"))) == 0) { + hwq_count = phba->cfg_hdw_queue; + for (i = 0; i < hwq_count; i++) { + qp = &phba->sli4_hba.hdwq[i]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + continue; + + qp->empty_io_bufs = 0; + multixri_pool->pbl_empty_count = 0; +#ifdef LPFC_MXP_STAT + multixri_pool->above_limit_count = 0; + multixri_pool->below_limit_count = 0; + multixri_pool->stat_max_hwm = 0; + multixri_pool->local_pbl_hit_count = 0; + multixri_pool->other_pbl_hit_count = 0; + + multixri_pool->stat_pbl_count = 0; + multixri_pool->stat_pvt_count = 0; + multixri_pool->stat_busy_count = 0; + multixri_pool->stat_snapshot_taken = 0; +#endif + } + return strlen(pbuf); + } + + return -EINVAL; +} #ifdef LPFC_HDWQ_LOCK_STAT static int lpfc_debugfs_last_lock; @@ -676,7 +748,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) off = 0; spin_lock_irq(&phba->hbalock); - len += scnprintf(buf+len, size-len, "HBA SLIM\n"); + len += scnprintf(buf + len, size - len, "HBA SLIM\n"); lpfc_memcpy_from_slim(buffer, phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024); @@ -690,7 +762,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) i = 1024; while (i > 0) { - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); @@ -734,11 +806,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) off = 0; spin_lock_irq(&phba->hbalock); - len += scnprintf(buf+len, size-len, "SLIM Mailbox\n"); + len += scnprintf(buf + len, size - len, "SLIM Mailbox\n"); ptr = (uint32_t *)phba->slim2p.virt; i = sizeof(MAILBOX_t); while (i > 0) { - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); @@ -747,11 +819,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) off += (8 * sizeof(uint32_t)); } - len += scnprintf(buf+len, size-len, "SLIM PCB\n"); + len += scnprintf(buf + len, size - len, "SLIM PCB\n"); ptr = (uint32_t *)phba->pcb; i = sizeof(PCB_t); while (i > 0) { - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); @@ -764,7 +836,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) for (i = 0; i < 4; i++) { pgpp = &phba->port_gp[i]; pring = &psli->sli3_ring[i]; - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "Ring %d: CMD GetInx:%d " "(Max:%d Next:%d " "Local:%d flg:x%x) " @@ -781,7 +853,8 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) word1 = readl(phba->CAregaddr); word2 = readl(phba->HSregaddr); word3 = readl(phba->HCregaddr); - len += scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " + len += scnprintf(buf + len, + size - len, "HA:%08x CA:%08x HS:%08x " "HC:%08x\n", word0, word1, word2, word3); } spin_unlock_irq(&phba->hbalock); @@ -809,22 +882,26 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) int len = 0; int i, iocnt, outio, cnt; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; unsigned char *statep; +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_hba *phba = vport->phba; struct nvme_fc_local_port *localport; struct nvme_fc_remote_port *nrport = NULL; struct lpfc_nvme_rport *rport; +#endif +#endif cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); outio = 0; - len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); + len += scnprintf(buf + len, size - len, "\nFCP Nodelist Entries ...\n"); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { iocnt = 0; if (!cnt) { - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "Missing Nodelist Entries\n"); break; } @@ -862,34 +939,37 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) default: statep = "UNKNOWN"; } - len += scnprintf(buf+len, size-len, "%s DID:x%06x ", + len += scnprintf(buf + len, size - len, "%s DID:x%06x ", statep, ndlp->nlp_DID); - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "WWPN x%llx ", wwn_to_u64(ndlp->nlp_portname.u.wwn)); - len += scnprintf(buf+len, size-len, + len += scnprintf(buf + len, size - len, "WWNN x%llx ", wwn_to_u64(ndlp->nlp_nodename.u.wwn)); if (ndlp->nlp_flag & NLP_RPI_REGISTERED) - len += scnprintf(buf+len, size-len, "RPI:%03d ", + len += scnprintf(buf + len, size - len, "RPI:%03d ", ndlp->nlp_rpi); else - len += scnprintf(buf+len, size-len, "RPI:none "); - len += scnprintf(buf+len, size-len, "flag:x%08x ", + len += scnprintf(buf + len, size - len, "RPI:none "); + len += scnprintf(buf + len, size - len, "flag:x%08x ", ndlp->nlp_flag); if (!ndlp->nlp_type) - len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); + len += scnprintf(buf + len, size - len, + "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) - len += scnprintf(buf+len, size-len, "FC_NODE "); + len += scnprintf(buf + len, size - len, "FC_NODE "); if (ndlp->nlp_type & NLP_FABRIC) { - len += scnprintf(buf+len, size-len, "FABRIC "); + len += scnprintf(buf + len, size - len, "FABRIC "); iocnt = 0; } if (ndlp->nlp_type & NLP_FCP_TARGET) - len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ", - ndlp->nlp_sid); + len += scnprintf(buf + len, + size - len, "FCP_TGT sid:%d ", + ndlp->nlp_sid); if (ndlp->nlp_type & NLP_FCP_INITIATOR) - len += scnprintf(buf+len, size-len, "FCP_INITIATOR "); + len += scnprintf(buf + len, + size - len, "FCP_INITIATOR "); if (ndlp->nlp_type & NLP_NVME_TARGET) len += scnprintf(buf + len, size - len, "NVME_TGT sid:%d ", @@ -897,9 +977,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) if (ndlp->nlp_type & NLP_NVME_INITIATOR) len += scnprintf(buf + len, size - len, "NVME_INITIATOR "); - len += scnprintf(buf+len, size-len, "usgmap:%x ", + len += scnprintf(buf + len, size - len, "usgmap:%x ", ndlp->nlp_usg_map); - len += scnprintf(buf+len, size-len, "refcnt:%x", + len += scnprintf(buf + len, size - len, "refcnt:%x", kref_read(&ndlp->kref)); if (iocnt) { i = atomic_read(&ndlp->cmd_pending); @@ -910,13 +990,15 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) } len += scnprintf(buf + len, size - len, "defer:%x ", ndlp->nlp_defer_did); - len += scnprintf(buf+len, size-len, "\n"); + len += scnprintf(buf + len, size - len, "\n"); } spin_unlock_irq(shost->host_lock); len += scnprintf(buf + len, size - len, "\nOutstanding IO x%x\n", outio); +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { len += scnprintf(buf + len, size - len, "\nNVME Targetport Entry ...\n"); @@ -993,26 +1075,29 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) - len += scnprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "INITIATOR "); if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) - len += scnprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "TARGET "); if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) - len += scnprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "DISCSRVC "); if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | FC_PORT_ROLE_NVME_DISCOVERY)) - len += scnprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "UNKNOWN ROLE x%x", nrport->port_role); /* Terminate the string. */ - len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, "\n"); } spin_unlock_irq(shost->host_lock); + out_exit: +#endif +#endif return len; } @@ -1032,6 +1117,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) static int lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) { + int len = 0; +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_hba *phba = vport->phba; struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; @@ -1041,7 +1128,6 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) uint64_t data1, data2, data3; uint64_t tot, totin, totout; int cnt, i; - int len = 0; if (phba->nvmet_support) { if (!phba->targetport) @@ -1123,7 +1209,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp_error)); - len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, "\n"); cnt = 0; spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); @@ -1138,8 +1224,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) "ABORT: %d ctx entries\n", cnt); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, - &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, - list) { + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + list) { if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) break; len += scnprintf(buf + len, size - len, @@ -1234,6 +1320,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) atomic_read(&lport->cmpl_fcp_err)); } +#endif return len; } @@ -1299,8 +1386,88 @@ buffer_done: return len; } +void +lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + uint64_t seg1, seg2, seg3, seg4; + uint64_t segsum; + + if (!lpfc_cmd->ts_last_cmd || + !lpfc_cmd->ts_cmd_start || + !lpfc_cmd->ts_cmd_wqput || + !lpfc_cmd->ts_isr_cmpl || + !lpfc_cmd->ts_data_io) + return; + + if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start) + return; + if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd) + return; + if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start) + return; + if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput) + return; + if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl) + return; + /* + * Segment 1 - Time from Last FCP command cmpl is handed + * off to NVME Layer to start of next command. + * Segment 2 - Time from Driver receives a IO cmd start + * from NVME Layer to WQ put is done on IO cmd. + * Segment 3 - Time from Driver WQ put is done on IO cmd + * to MSI-X ISR for IO cmpl. + * Segment 4 - Time from MSI-X ISR for IO cmpl to when + * cmpl is handled off to the NVME Layer. + */ + seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd; + if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ + seg1 = 0; + + /* Calculate times relative to start of IO */ + seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start); + segsum = seg2; + seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start; + if (segsum > seg3) + return; + seg3 -= segsum; + segsum += seg3; + + seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start; + if (segsum > seg4) + return; + seg4 -= segsum; + + phba->ktime_data_samples++; + phba->ktime_seg1_total += seg1; + if (seg1 < phba->ktime_seg1_min) + phba->ktime_seg1_min = seg1; + else if (seg1 > phba->ktime_seg1_max) + phba->ktime_seg1_max = seg1; + phba->ktime_seg2_total += seg2; + if (seg2 < phba->ktime_seg2_min) + phba->ktime_seg2_min = seg2; + else if (seg2 > phba->ktime_seg2_max) + phba->ktime_seg2_max = seg2; + phba->ktime_seg3_total += seg3; + if (seg3 < phba->ktime_seg3_min) + phba->ktime_seg3_min = seg3; + else if (seg3 > phba->ktime_seg3_max) + phba->ktime_seg3_max = seg3; + phba->ktime_seg4_total += seg4; + if (seg4 < phba->ktime_seg4_min) + phba->ktime_seg4_min = seg4; + else if (seg4 > phba->ktime_seg4_max) + phba->ktime_seg4_max = seg4; + + lpfc_cmd->ts_last_cmd = 0; + lpfc_cmd->ts_cmd_start = 0; + lpfc_cmd->ts_cmd_wqput = 0; + lpfc_cmd->ts_isr_cmpl = 0; + lpfc_cmd->ts_data_io = 0; +} + /** - * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer + * lpfc_debugfs_ioktime_data - Dump target node list to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. @@ -1313,13 +1480,13 @@ buffer_done: * not exceed @size. **/ static int -lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) +lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size) { struct lpfc_hba *phba = vport->phba; int len = 0; if (phba->nvmet_support == 0) { - /* NVME Initiator */ + /* Initiator */ len += scnprintf(buf + len, PAGE_SIZE - len, "ktime %s: Total Samples: %lld\n", (phba->ktime_on ? "Enabled" : "Disabled"), @@ -1329,8 +1496,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) len += scnprintf( buf + len, PAGE_SIZE - len, - "Segment 1: Last NVME Cmd cmpl " - "done -to- Start of next NVME cnd (in driver)\n"); + "Segment 1: Last Cmd cmpl " + "done -to- Start of next Cmd (in driver)\n"); len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", @@ -1340,7 +1507,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) phba->ktime_seg1_max); len += scnprintf( buf + len, PAGE_SIZE - len, - "Segment 2: Driver start of NVME cmd " + "Segment 2: Driver start of Cmd " "-to- Firmware WQ doorbell\n"); len += scnprintf( buf + len, PAGE_SIZE - len, @@ -1363,7 +1530,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 4: MSI-X ISR cmpl -to- " - "NVME cmpl done\n"); + "Cmd cmpl done\n"); len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", @@ -1383,7 +1550,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) } /* NVME Target */ - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "ktime %s: Total Samples: %lld %lld\n", (phba->ktime_on ? "Enabled" : "Disabled"), phba->ktime_data_samples, @@ -1391,46 +1558,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) if (phba->ktime_data_samples == 0) return len; - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 1: MSI-X ISR Rcv cmd -to- " "cmd pass to NVME Layer\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg1_total, phba->ktime_data_samples), phba->ktime_seg1_min, phba->ktime_seg1_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 2: cmd pass to NVME Layer- " "-to- Driver rcv cmd OP (action)\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg2_total, phba->ktime_data_samples), phba->ktime_seg2_min, phba->ktime_seg2_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 3: Driver rcv cmd OP -to- " "Firmware WQ doorbell: cmd\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg3_total, phba->ktime_data_samples), phba->ktime_seg3_min, phba->ktime_seg3_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 4: Firmware WQ doorbell: cmd " "-to- MSI-X ISR for cmd cmpl\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg4_total, phba->ktime_data_samples), phba->ktime_seg4_min, phba->ktime_seg4_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 5: MSI-X ISR for cmd cmpl " "-to- NVME layer passed cmd done\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg5_total, phba->ktime_data_samples), @@ -1438,10 +1605,10 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) phba->ktime_seg5_max); if (phba->ktime_status_samples == 0) { - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Total: cmd received by MSI-X ISR " "-to- cmd completed on wire\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld " "max %08lld\n", div_u64(phba->ktime_seg10_total, @@ -1451,46 +1618,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) return len; } - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 6: NVME layer passed cmd done " "-to- Driver rcv rsp status OP\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg6_total, phba->ktime_status_samples), phba->ktime_seg6_min, phba->ktime_seg6_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 7: Driver rcv rsp status OP " "-to- Firmware WQ doorbell: status\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg7_total, phba->ktime_status_samples), phba->ktime_seg7_min, phba->ktime_seg7_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 8: Firmware WQ doorbell: status" " -to- MSI-X ISR for status cmpl\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg8_total, phba->ktime_status_samples), phba->ktime_seg8_min, phba->ktime_seg8_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Segment 9: MSI-X ISR for status cmpl " "-to- NVME layer passed status done\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg9_total, phba->ktime_status_samples), phba->ktime_seg9_min, phba->ktime_seg9_max); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Total: cmd received by MSI-X ISR -to- " "cmd completed on wire\n"); - len += scnprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg10_total, phba->ktime_status_samples), @@ -1547,7 +1714,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) if (!dtp->fmt) continue; - len += scnprintf(buf + len, size - len, dtp->fmt, + len += scnprintf(buf + len, size - len, dtp->fmt, dtp->data1, dtp->data2, dtp->data3); if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { @@ -1576,7 +1743,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) if (!dtp->fmt) continue; - len += scnprintf(buf + len, size - len, dtp->fmt, + len += scnprintf(buf + len, size - len, dtp->fmt, dtp->data1, dtp->data2, dtp->data3); if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { @@ -1602,42 +1769,50 @@ out: } /** - * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer + * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer * @vport: The vport to gather target node info from. * @buf: The buffer to dump log into. * @size: The maximum amount of data to process. * * Description: - * This routine dumps the NVME statistics associated with @vport + * This routine dumps the NVME + SCSI statistics associated with @vport * * Return Value: * This routine returns the amount of bytes that were dumped into @buf and will * not exceed @size. **/ static int -lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) +lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size) { struct lpfc_hba *phba = vport->phba; struct lpfc_sli4_hdw_queue *qp; - int i, j, max_cnt; - int len = 0; + struct lpfc_hdwq_stat *c_stat; + int i, j, len; uint32_t tot_xmt; uint32_t tot_rcv; uint32_t tot_cmpl; + char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; - len += scnprintf(buf + len, PAGE_SIZE - len, - "CPUcheck %s ", - (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? - "Enabled" : "Disabled")); - if (phba->nvmet_support) { - len += scnprintf(buf + len, PAGE_SIZE - len, - "%s\n", - (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? - "Rcv Enabled\n" : "Rcv Disabled\n")); - } else { - len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); - } - max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ; + scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n"); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ", + (phba->hdwqstat_on & + (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ? + "Enabled" : "Disabled")); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ", + (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ? + "Enabled" : "Disabled")); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "\n\n"); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; for (i = 0; i < phba->cfg_hdw_queue; i++) { qp = &phba->sli4_hba.hdwq[i]; @@ -1645,46 +1820,149 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) tot_rcv = 0; tot_xmt = 0; tot_cmpl = 0; - for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { - tot_xmt += qp->cpucheck_xmt_io[j]; - tot_cmpl += qp->cpucheck_cmpl_io[j]; + + for_each_present_cpu(j) { + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j); + + /* Only display for this HDWQ */ + if (i != c_stat->hdwq_no) + continue; + + /* Only display non-zero counters */ + if (!c_stat->xmt_io && !c_stat->cmpl_io && + !c_stat->rcv_io) + continue; + + if (!tot_xmt && !tot_cmpl && !tot_rcv) { + /* Print HDWQ string only the first time */ + scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } + + tot_xmt += c_stat->xmt_io; + tot_cmpl += c_stat->cmpl_io; if (phba->nvmet_support) - tot_rcv += qp->cpucheck_rcv_io[j]; + tot_rcv += c_stat->rcv_io; + + scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + if (phba->nvmet_support) { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x RCV 0x%x |", + c_stat->xmt_io, c_stat->cmpl_io, + c_stat->rcv_io); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } else { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x |", + c_stat->xmt_io, c_stat->cmpl_io); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } } - /* Only display Hardware Qs with something */ + /* Check if nothing to display */ if (!tot_xmt && !tot_cmpl && !tot_rcv) continue; - len += scnprintf(buf + len, PAGE_SIZE - len, - "HDWQ %03d: ", i); - for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { - /* Only display non-zero counters */ - if (!qp->cpucheck_xmt_io[j] && - !qp->cpucheck_cmpl_io[j] && - !qp->cpucheck_rcv_io[j]) - continue; - if (phba->nvmet_support) { - len += scnprintf(buf + len, PAGE_SIZE - len, - "CPU %03d: %x/%x/%x ", j, - qp->cpucheck_rcv_io[j], - qp->cpucheck_xmt_io[j], - qp->cpucheck_cmpl_io[j]); - } else { - len += scnprintf(buf + len, PAGE_SIZE - len, - "CPU %03d: %x/%x ", j, - qp->cpucheck_xmt_io[j], - qp->cpucheck_cmpl_io[j]); - } - } - len += scnprintf(buf + len, PAGE_SIZE - len, - "Total: %x\n", tot_xmt); - if (len >= max_cnt) { - len += scnprintf(buf + len, PAGE_SIZE - len, - "Truncated ...\n"); - return len; + scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: "); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + if (phba->nvmet_support) { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n", + tot_xmt, tot_cmpl, tot_rcv); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } else { + scnprintf(tmp, sizeof(tmp), + "XMT 0x%x CMPL 0x%x]\n\n", + tot_xmt, tot_cmpl); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; } } + +buffer_done: + len = strnlen(buf, size); + return len; +} + +/** + * lpfc_debugfs_ediflist_data - Dump target External DIF list to a buffer + * @vport: The vport to gather target External DIF info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps current target External DIF list associated with @vport + * to @buf up to @size bytes of data. Each node entry in the dump will contain a + * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_ediflist_data(struct lpfc_vport *vport, char *buf, int size) +{ + int len = 0; + int cnt; + struct lpfc_external_dif_support *edifp; + unsigned char *statep, *name; + + cnt = (LPFC_EDIFLIST_SIZE / LPFC_EDIFLIST_ENTRY_SIZE); + + spin_lock_irq(&vport->external_dif_lock); + list_for_each_entry(edifp, &vport->external_dif_list, listentry) { + if (!cnt) { + len += scnprintf(buf + len, size - len, + "Missing External DIF Entries\n"); + break; + } + cnt--; + switch (edifp->state) { + case LPFC_EDSM_IGNORE: + statep = "IGNORE "; + break; + case LPFC_EDSM_INIT: + statep = "INIT "; + break; + case LPFC_EDSM_NEEDS_INQUIRY: + statep = "NEEDS_INQ"; + break; + case LPFC_EDSM_PATH_STANDBY: + statep = "STANDBY "; + break; + case LPFC_EDSM_PATH_READY: + statep = "READY "; + break; + case LPFC_EDSM_PATH_CHECK: + statep = "CHECK "; + break; + default: + statep = "UNKNOWN "; + } + len += scnprintf(buf + len, + size - len, "%s Device (x%x/x%llx) ", + statep, edifp->sid, edifp->lun); + name = (unsigned char *)&edifp->portName; + len += scnprintf(buf + len, size - len, + "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", + *name, *(name+1), *(name+2), *(name+3), + *(name+4), *(name+5), *(name+6), *(name+7)); + len += scnprintf(buf + len, size - len, + "dif_info:x%x inq:%d err3f:%d err4b:%d", + edifp->dif_info, edifp->inq_cnt, edifp->err3f_cnt, + edifp->err4b_cnt); + len += scnprintf(buf + len, size - len, "\n"); + } + spin_unlock_irq(&vport->external_dif_lock); return len; } @@ -2047,10 +2325,6 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, char *pbuf; int i; - /* Protect copy from user */ - if (!access_ok(buf, nbytes)) - return -EFAULT; - memset(mybuf, 0, sizeof(mybuf)); if (copy_from_user(mybuf, buf, nbytes)) @@ -2078,6 +2352,103 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, } #endif +int +lpfc_debugfs_ras_log_data(struct lpfc_hba *phba, char *buffer, int size) +{ + int copied = 0; + struct lpfc_dmabuf *dmabuf, *next; + + memset(buffer, 0, size); + + spin_lock_irq(&phba->hbalock); + if (phba->ras_fwlog.state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(dmabuf, next, + &phba->ras_fwlog.fwlog_buff_list, list) { + /* Check if copying will go over size and a '\0' char */ + if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) { + memcpy(buffer + copied, dmabuf->virt, + size - copied - 1); + copied += size - copied - 1; + break; + } + memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE); + copied += LPFC_RAS_MAX_ENTRY_SIZE; + } + return copied; +} + +static int +lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int size; + int rc = -ENOMEM; + + spin_lock_irq(&phba->hbalock); + if (phba->ras_fwlog.state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); + rc = -EINVAL; + goto out; + } + spin_unlock_irq(&phba->hbalock); + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize; + debug->buffer = vmalloc(size); + if (!debug->buffer) + goto free_debug; + + debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size); + if (debug->len < 0) { + rc = -EINVAL; + goto free_buffer; + } + file->private_data = debug; + + return 0; + +free_buffer: + vfree(debug->buffer); +free_debug: + kfree(debug); +out: + return rc; +} + /** * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. @@ -2162,6 +2533,13 @@ out: return rc; } +static int +lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + static ssize_t lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) @@ -2186,7 +2564,7 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); else if (dent == phba->debug_InjErrNPortID) cnt = scnprintf(cbuf, 32, "0x%06x\n", - phba->lpfc_injerr_nportid); + phba->lpfc_injerr_nportid); else if (dent == phba->debug_InjErrWWPN) { memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)); tmp = cpu_to_be64(tmp); @@ -2220,7 +2598,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, return 0; if (dent == phba->debug_InjErrLBA) { - if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f')) + if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') && + (dstbuf[2] == 'f')) tmp = (uint64_t)(-1); } @@ -2301,6 +2680,48 @@ out: return rc; } +/** + * lpfc_debugfs_ediflist_open - Open the ediflist debugfs file + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return an negative + * error value. + **/ +static int +lpfc_debugfs_ediflist_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_EDIFLIST_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_ediflist_data(vport, debug->buffer, + LPFC_EDIFLIST_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + /** * lpfc_debugfs_lseek - Seek through a debugfs file * @file: The file pointer to seek through. @@ -2321,8 +2742,22 @@ out: static loff_t lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) { - struct lpfc_debug *debug = file->private_data; - return fixed_size_llseek(file, off, whence, debug->len); + struct lpfc_debug *debug; + loff_t pos = -1; + + debug = file->private_data; + + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = debug->len + off; + } + return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); } /** @@ -2374,76 +2809,6 @@ lpfc_debugfs_release(struct inode *inode, struct file *file) return 0; } -/** - * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics - * @file: The file pointer to read from. - * @buf: The buffer to copy the user data from. - * @nbytes: The number of bytes to get. - * @ppos: The position in the file to start reading from. - * - * Description: - * This routine clears multi-XRI pools statistics when buf contains "clear". - * - * Return Value: - * It returns the @nbytges passing in from debugfs user space when successful. - * In case of error conditions, it returns proper error code back to the user - * space. - **/ -static ssize_t -lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) -{ - struct lpfc_debug *debug = file->private_data; - struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; - char mybuf[64]; - char *pbuf; - u32 i; - u32 hwq_count; - struct lpfc_sli4_hdw_queue *qp; - struct lpfc_multixri_pool *multixri_pool; - - if (nbytes > 64) - nbytes = 64; - - /* Protect copy from user */ - if (!access_ok(buf, nbytes)) - return -EFAULT; - - memset(mybuf, 0, sizeof(mybuf)); - - if (copy_from_user(mybuf, buf, nbytes)) - return -EFAULT; - pbuf = &mybuf[0]; - - if ((strncmp(pbuf, "clear", strlen("clear"))) == 0) { - hwq_count = phba->cfg_hdw_queue; - for (i = 0; i < hwq_count; i++) { - qp = &phba->sli4_hba.hdwq[i]; - multixri_pool = qp->p_multixri_pool; - if (!multixri_pool) - continue; - - qp->empty_io_bufs = 0; - multixri_pool->pbl_empty_count = 0; -#ifdef LPFC_MXP_STAT - multixri_pool->above_limit_count = 0; - multixri_pool->below_limit_count = 0; - multixri_pool->stat_max_hwm = 0; - multixri_pool->local_pbl_hit_count = 0; - multixri_pool->other_pbl_hit_count = 0; - - multixri_pool->stat_pbl_count = 0; - multixri_pool->stat_pvt_count = 0; - multixri_pool->stat_busy_count = 0; - multixri_pool->stat_snapshot_taken = 0; -#endif - } - return strlen(pbuf); - } - - return -EINVAL; -} - static int lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file) { @@ -2477,6 +2842,7 @@ static ssize_t lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; @@ -2529,6 +2895,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0); } +#endif return nbytes; } @@ -2571,10 +2938,6 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf, char mybuf[6] = {0}; int i; - /* Protect copy from user */ - if (!access_ok(buf, nbytes)) - return -EFAULT; - if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ? (sizeof(mybuf) - 1) : nbytes)) return -EFAULT; @@ -2591,7 +2954,7 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf, } static int -lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file) +lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; @@ -2602,14 +2965,14 @@ lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file) goto out; /* Round to page boundary */ - debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL); + debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } - debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer, - LPFC_NVMEKTIME_SIZE); + debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer, + LPFC_IOKTIME_SIZE); debug->i_private = inode->i_private; file->private_data = debug; @@ -2620,8 +2983,8 @@ out: } static ssize_t -lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) +lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; @@ -2807,7 +3170,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, kfree(phba->nvmeio_trc); /* Allocate new trace buffer and initialize */ - phba->nvmeio_trc = kzalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) * + phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) * sz), GFP_KERNEL); if (!phba->nvmeio_trc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -2815,6 +3178,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, "nvmeio_trc buffer\n"); return -ENOMEM; } + memset(phba->nvmeio_trc, 0, + (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz)); atomic_set(&phba->nvmeio_trc_cnt, 0); phba->nvmeio_trc_on = 0; phba->nvmeio_trc_output_idx = 0; @@ -2823,7 +3188,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, } static int -lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file) +lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file) { struct lpfc_vport *vport = inode->i_private; struct lpfc_debug *debug; @@ -2834,14 +3199,14 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file) goto out; /* Round to page boundary */ - debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL); + debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } - debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer, - LPFC_CPUCHECK_SIZE); + debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer, + LPFC_SCSISTAT_SIZE); debug->i_private = inode->i_private; file->private_data = debug; @@ -2852,16 +3217,16 @@ out: } static ssize_t -lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, +lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; - struct lpfc_sli4_hdw_queue *qp; + struct lpfc_hdwq_stat *c_stat; char mybuf[64]; char *pbuf; - int i, j; + int i; if (nbytes > 64) nbytes = 64; @@ -2874,41 +3239,39 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { if (phba->nvmet_support) - phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; + phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; else - phba->cpucheck_on |= (LPFC_CHECK_NVME_IO | + phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO | LPFC_CHECK_SCSI_IO); return strlen(pbuf); } else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) { if (phba->nvmet_support) - phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; + phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO; else - phba->cpucheck_on |= LPFC_CHECK_NVME_IO; + phba->hdwqstat_on |= LPFC_CHECK_NVME_IO; return strlen(pbuf); } else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) { - phba->cpucheck_on |= LPFC_CHECK_SCSI_IO; + if (!phba->nvmet_support) + phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO; return strlen(pbuf); - } else if ((strncmp(pbuf, "rcv", - sizeof("rcv") - 1) == 0)) { - if (phba->nvmet_support) - phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV; - else - return -EINVAL; + } else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) { + phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO | + LPFC_CHECK_NVMET_IO); + return strlen(pbuf); + } else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) { + phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO; return strlen(pbuf); } else if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) { - phba->cpucheck_on = LPFC_CHECK_OFF; + phba->hdwqstat_on = LPFC_CHECK_OFF; return strlen(pbuf); } else if ((strncmp(pbuf, "zero", sizeof("zero") - 1) == 0)) { - for (i = 0; i < phba->cfg_hdw_queue; i++) { - qp = &phba->sli4_hba.hdwq[i]; - - for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { - qp->cpucheck_rcv_io[j] = 0; - qp->cpucheck_xmt_io[j] = 0; - qp->cpucheck_cmpl_io[j] = 0; - } + for_each_present_cpu(i) { + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i); + c_stat->xmt_io = 0; + c_stat->cmpl_io = 0; + c_stat->rcv_io = 0; } return strlen(pbuf); } @@ -3129,17 +3492,17 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, switch (count) { case SIZE_U8: /* byte (8 bits) */ pci_read_config_byte(pdev, where, &u8val); - len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_CFG_SIZE - len, "%03x: %02x\n", where, u8val); break; case SIZE_U16: /* word (16 bits) */ pci_read_config_word(pdev, where, &u16val); - len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_CFG_SIZE - len, "%03x: %04x\n", where, u16val); break; case SIZE_U32: /* double word (32 bits) */ pci_read_config_dword(pdev, where, &u32val); - len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_CFG_SIZE - len, "%03x: %08x\n", where, u32val); break; case LPFC_PCI_CFG_BROWSE: /* browse all */ @@ -3159,25 +3522,25 @@ pcicfg_browse: offset = offset_label; /* Read PCI config space */ - len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_CFG_SIZE - len, "%03x: ", offset_label); while (index > 0) { pci_read_config_dword(pdev, offset, &u32val); - len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_CFG_SIZE - len, "%08x ", u32val); offset += sizeof(uint32_t); if (offset >= LPFC_PCI_CFG_SIZE) { - len += scnprintf(pbuffer+len, - LPFC_PCI_CFG_SIZE-len, "\n"); + len += scnprintf(pbuffer + len, + LPFC_PCI_CFG_SIZE - len, "\n"); break; } index -= sizeof(uint32_t); if (!index) - len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_CFG_SIZE - len, "\n"); else if (!(index % (8 * sizeof(uint32_t)))) { offset_label += (8 * sizeof(uint32_t)); - len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_CFG_SIZE - len, "\n%03x: ", offset_label); } } @@ -3448,7 +3811,7 @@ lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes, if (acc_range == SINGLE_WORD) { offset_run = offset; u32val = readl(mem_mapped_bar + offset_run); - len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_BAR_RD_BUF_SIZE - len, "%05x: %08x\n", offset_run, u32val); } else goto baracc_browse; @@ -3462,36 +3825,36 @@ baracc_browse: offset_run = offset_label; /* Read PCI bar memory mapped space */ - len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_BAR_RD_BUF_SIZE - len, "%05x: ", offset_label); index = LPFC_PCI_BAR_RD_SIZE; while (index > 0) { u32val = readl(mem_mapped_bar + offset_run); - len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_PCI_BAR_RD_BUF_SIZE - len, "%08x ", u32val); offset_run += sizeof(uint32_t); if (acc_range == LPFC_PCI_BAR_BROWSE) { if (offset_run >= bar_size) { - len += scnprintf(pbuffer+len, - LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + len += scnprintf(pbuffer + len, + LPFC_PCI_BAR_RD_BUF_SIZE - len, "\n"); break; } } else { if (offset_run >= offset + (acc_range * sizeof(uint32_t))) { - len += scnprintf(pbuffer+len, - LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + len += scnprintf(pbuffer + len, + LPFC_PCI_BAR_RD_BUF_SIZE - len, "\n"); break; } } index -= sizeof(uint32_t); if (!index) - len += scnprintf(pbuffer+len, - LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); + len += scnprintf(pbuffer + len, + LPFC_PCI_BAR_RD_BUF_SIZE - len, "\n"); else if (!(index % (8 * sizeof(uint32_t)))) { offset_label += (8 * sizeof(uint32_t)); - len += scnprintf(pbuffer+len, - LPFC_PCI_BAR_RD_BUF_SIZE-len, + len += scnprintf(pbuffer + len, + LPFC_PCI_BAR_RD_BUF_SIZE - len, "\n%05x: ", offset_label); } } @@ -3675,7 +4038,7 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->hba_index, qp->notify_interval); - len += scnprintf(pbuffer + len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } @@ -3719,8 +4082,7 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, qp->entry_size, qp->host_index, qp->notify_interval, qp->max_proc_limit); - len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "\n"); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } @@ -3817,8 +4179,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->notify_interval, qp->max_proc_limit, qp->chann); - len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "\n"); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } @@ -3872,9 +4233,9 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, phba->lpfc_idiag_last_eq = 0; len += scnprintf(pbuffer + len, - LPFC_QUE_INFO_GET_BUF_SIZE - len, - "HDWQ %d out of %d HBA HDWQs\n", - x, phba->cfg_hdw_queue); + LPFC_QUE_INFO_GET_BUF_SIZE - len, + "HDWQ %d out of %d HBA HDWQs\n", + x, phba->cfg_hdw_queue); /* Fast-path EQ */ qp = phba->sli4_hba.hdwq[x].hba_eq; @@ -3956,7 +4317,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); too_big: - len += scnprintf(pbuffer + len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n"); out: spin_unlock_irq(&phba->hbalock); @@ -4012,22 +4373,22 @@ lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque, return 0; esize = pque->entry_size; - len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_QUE_ACC_BUF_SIZE - len, "QE-INDEX[%04d]:\n", index); offset = 0; pentry = lpfc_sli4_qe(pque, index); while (esize > 0) { - len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_QUE_ACC_BUF_SIZE - len, "%08x ", *pentry); pentry++; offset += sizeof(uint32_t); esize -= sizeof(uint32_t); if (esize > 0 && !(offset % (4 * sizeof(uint32_t)))) - len += scnprintf(pbuffer+len, - LPFC_QUE_ACC_BUF_SIZE-len, "\n"); + len += scnprintf(pbuffer + len, + LPFC_QUE_ACC_BUF_SIZE - len, "\n"); } - len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); + len += scnprintf(pbuffer + len, LPFC_QUE_ACC_BUF_SIZE - len, "\n"); return len; } @@ -4208,17 +4569,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, idiag.ptr_private = phba->sli4_hba.els_cq; goto pass_check; } - /* NVME LS complete queue */ - if (phba->sli4_hba.nvmels_cq && - phba->sli4_hba.nvmels_cq->queue_id == queid) { - /* Sanity check */ - rc = lpfc_idiag_que_param_check( - phba->sli4_hba.nvmels_cq, index, count); - if (rc) - goto error_out; - idiag.ptr_private = phba->sli4_hba.nvmels_cq; - goto pass_check; - } /* FCP complete queue */ if (phba->sli4_hba.hdwq) { for (qidx = 0; qidx < phba->cfg_hdw_queue; @@ -4263,17 +4613,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, idiag.ptr_private = phba->sli4_hba.els_wq; goto pass_check; } - /* NVME LS work queue */ - if (phba->sli4_hba.nvmels_wq && - phba->sli4_hba.nvmels_wq->queue_id == queid) { - /* Sanity check */ - rc = lpfc_idiag_que_param_check( - phba->sli4_hba.nvmels_wq, index, count); - if (rc) - goto error_out; - idiag.ptr_private = phba->sli4_hba.nvmels_wq; - goto pass_check; - } if (phba->sli4_hba.hdwq) { /* FCP/SCSI work queue */ @@ -4290,7 +4629,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, } } } - goto error_out; break; case LPFC_IDIAG_RQ: @@ -4378,7 +4716,7 @@ lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer, switch (drbregid) { case LPFC_DRB_EQ: - len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, "EQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.EQDBregaddr)); break; @@ -4388,17 +4726,17 @@ lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer, readl(phba->sli4_hba.CQDBregaddr)); break; case LPFC_DRB_MQ: - len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, "MQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.MQDBregaddr)); break; case LPFC_DRB_WQ: - len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, "WQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.WQDBregaddr)); break; case LPFC_DRB_RQ: - len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, "RQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.RQDBregaddr)); break; @@ -4588,37 +4926,37 @@ lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer, switch (ctlregid) { case LPFC_CTL_PORT_SEM: - len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_CTL_ACC_BUF_SIZE - len, "Port SemReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET)); break; case LPFC_CTL_PORT_STA: - len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_CTL_ACC_BUF_SIZE - len, "Port StaReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_STA_OFFSET)); break; case LPFC_CTL_PORT_CTL: - len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_CTL_ACC_BUF_SIZE - len, "Port CtlReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_CTL_OFFSET)); break; case LPFC_CTL_PORT_ER1: - len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_CTL_ACC_BUF_SIZE - len, "Port Er1Reg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET)); break; case LPFC_CTL_PORT_ER2: - len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_CTL_ACC_BUF_SIZE - len, "Port Er2Reg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER2_OFFSET)); break; case LPFC_CTL_PDEV_CTL: - len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_CTL_ACC_BUF_SIZE - len, "PDev CtlReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET)); @@ -4811,13 +5149,13 @@ lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer) mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; - len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_MBX_ACC_BUF_SIZE - len, "mbx_dump_map: 0x%08x\n", mbx_dump_map); - len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_MBX_ACC_BUF_SIZE - len, "mbx_dump_cnt: %04d\n", mbx_dump_cnt); - len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_MBX_ACC_BUF_SIZE - len, "mbx_word_cnt: %04d\n", mbx_word_cnt); - len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_MBX_ACC_BUF_SIZE - len, "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd); return len; @@ -4966,35 +5304,35 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len) { uint16_t ext_cnt, ext_size; - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\nAvailable Extents Information:\n"); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tPort Available VPI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI, &ext_cnt, &ext_size); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Count %3d, Size %3d\n", ext_cnt, ext_size); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tPort Available VFI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI, &ext_cnt, &ext_size); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Count %3d, Size %3d\n", ext_cnt, ext_size); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tPort Available RPI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI, &ext_cnt, &ext_size); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Count %3d, Size %3d\n", ext_cnt, ext_size); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tPort Available XRI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI, &ext_cnt, &ext_size); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Count %3d, Size %3d\n", ext_cnt, ext_size); return len; @@ -5018,55 +5356,55 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len) uint16_t ext_cnt, ext_size; int rc; - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\nAllocated Extents Information:\n"); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tHost Allocated VPI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI, &ext_cnt, &ext_size); if (!rc) - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "N/A\n"); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tHost Allocated VFI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI, &ext_cnt, &ext_size); if (!rc) - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "N/A\n"); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tHost Allocated RPI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI, &ext_cnt, &ext_size); if (!rc) - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "N/A\n"); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tHost Allocated XRI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI, &ext_cnt, &ext_size); if (!rc) - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "N/A\n"); return len; @@ -5090,49 +5428,49 @@ lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len) struct lpfc_rsrc_blks *rsrc_blks; int index; - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\nDriver Extents Information:\n"); - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tVPI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) { - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tVFI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list, list) { - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tRPI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list, list) { - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\tXRI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list, list) { - len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_EXT_ACC_BUF_SIZE - len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); @@ -5237,6 +5575,180 @@ lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes, return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } +static int +lpfc_cgn_buffer_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *buffer = debug->buffer; + uint32_t *ptr; + int cnt, len = 0; + + if (!phba->sli4_hba.pc_sli4_params.E2E_attr || !phba->cgn_i) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "E2E Congestion Mgmt is not supported\n"); + goto out; + } + ptr = (uint32_t *)phba->cgn_i->virt; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Buffer Header\n"); + /* Dump the first 32 bytes */ + cnt = 32; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "000: %08x %08x %08x %08x %08x %08x %08x %08x\n", + *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), + *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); + ptr += 8; + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Congestion Buffer Data\n"); + while (cnt < sizeof(struct lpfc_cgn_info)) { + if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) { + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "Truncated . . .\n"); + break; + } + len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len, + "%03x: %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", + cnt, *ptr, *(ptr + 1), *(ptr + 2), + *(ptr + 3), *(ptr + 4), *(ptr + 5), + *(ptr + 6), *(ptr + 7)); + cnt += 32; + ptr += 8; + } +out: + return simple_read_from_buffer(buf, nbytes, ppos, buffer, len); +} + +int +lpfc_cgn_buffer_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + +static int +lpfc_rx_monitor_open(struct inode *inode, struct file *file) +{ + struct lpfc_rx_monitor_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_rx_monitor_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char *buffer = debug->buffer; + struct rxtable_entry *entry; + int i, len = 0, head, tail, last, start; + + head = atomic_read(&phba->rxtable_idx_head); + while (head == LPFC_RXMONITOR_TABLE_IN_USE) { + /* Table is getting updated */ + msleep(10); + head = atomic_read(&phba->rxtable_idx_head); + } + + tail = atomic_xchg(&phba->rxtable_idx_tail, head); + if (head == tail) { + len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, + "Rxtable is empty\n"); + goto out; + } + last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY; + start = tail; + + len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, + " MaxBPI\t Total Data Cmd Total Data Cmpl" + " Latency(us) Avg IO Size\tMax IO Size IO cnt" + " Info BWutil(ms)\n"); +get_table: + for (i = start; i < last; i++) { + entry = &phba->rxtable[i]; + len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len, + "%3d:%12lld %12lld\t%12lld\t" + "%8lldus\t%8lld\t%10lld " + "%8d %2d %2d(%2d)\n", + i, entry->max_bytes_per_interval, + entry->total_bytes, + entry->rcv_bytes, + entry->avg_io_latency, + entry->avg_io_size, + entry->max_read_cnt, + entry->io_cnt, + entry->cmf_info, + entry->timer_utilization, + entry->timer_interval); + } + + if (head != last) { + start = 0; + last = head; + goto get_table; + } +out: + return simple_read_from_buffer(buf, nbytes, ppos, buffer, len); +} + +int +lpfc_rx_monitor_release(struct inode *inode, struct file *file) +{ + struct lpfc_rx_monitor_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -5255,6 +5767,15 @@ static const struct file_operations lpfc_debugfs_op_nodelist = { .release = lpfc_debugfs_release, }; +#undef lpfc_debugfs_op_ediflist +static const struct file_operations lpfc_debugfs_op_ediflist = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_ediflist_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + #undef lpfc_debugfs_op_multixripools static const struct file_operations lpfc_debugfs_op_multixripools = { .owner = THIS_MODULE, @@ -5324,13 +5845,13 @@ static const struct file_operations lpfc_debugfs_op_scsistat = { .release = lpfc_debugfs_release, }; -#undef lpfc_debugfs_op_nvmektime -static const struct file_operations lpfc_debugfs_op_nvmektime = { +#undef lpfc_debugfs_op_ioktime +static const struct file_operations lpfc_debugfs_op_ioktime = { .owner = THIS_MODULE, - .open = lpfc_debugfs_nvmektime_open, + .open = lpfc_debugfs_ioktime_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, - .write = lpfc_debugfs_nvmektime_write, + .write = lpfc_debugfs_ioktime_write, .release = lpfc_debugfs_release, }; @@ -5344,20 +5865,20 @@ static const struct file_operations lpfc_debugfs_op_nvmeio_trc = { .release = lpfc_debugfs_release, }; -#undef lpfc_debugfs_op_cpucheck -static const struct file_operations lpfc_debugfs_op_cpucheck = { +#undef lpfc_debugfs_op_hdwqstat +static const struct file_operations lpfc_debugfs_op_hdwqstat = { .owner = THIS_MODULE, - .open = lpfc_debugfs_cpucheck_open, + .open = lpfc_debugfs_hdwqstat_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_read, - .write = lpfc_debugfs_cpucheck_write, + .write = lpfc_debugfs_hdwqstat_write, .release = lpfc_debugfs_release, }; #undef lpfc_debugfs_op_dif_err static const struct file_operations lpfc_debugfs_op_dif_err = { .owner = THIS_MODULE, - .open = simple_open, + .open = lpfc_debugfs_dif_err_open, .llseek = lpfc_debugfs_lseek, .read = lpfc_debugfs_dif_err_read, .write = lpfc_debugfs_dif_err_write, @@ -5457,6 +5978,32 @@ static const struct file_operations lpfc_idiag_op_extAcc = { .release = lpfc_idiag_cmd_release, }; +#undef lpfc_cgn_buffer_op +static const struct file_operations lpfc_cgn_buffer_op = { + .owner = THIS_MODULE, + .open = lpfc_cgn_buffer_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_cgn_buffer_read, + .release = lpfc_cgn_buffer_release, +}; + +#undef lpfc_rx_monitor_op +static const struct file_operations lpfc_rx_monitor_op = { + .owner = THIS_MODULE, + .open = lpfc_rx_monitor_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_rx_monitor_read, + .release = lpfc_rx_monitor_release, +}; + +#undef lpfc_debugfs_ras_log +static const struct file_operations lpfc_debugfs_ras_log = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_ras_log_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_ras_log_release, +}; #endif /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command @@ -5538,11 +6085,12 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if (i != 0) pr_err("%s\n", line_buf); len = 0; - len += scnprintf(line_buf+len, - LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf + len, + LPFC_MBX_ACC_LBUF_SZ - len, "%03d: ", i); } - len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf + len, + LPFC_MBX_ACC_LBUF_SZ - len, "%08x ", (uint32_t)*pword); pword++; } @@ -5605,11 +6153,12 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); - len += scnprintf(line_buf+len, - LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf + len, + LPFC_MBX_ACC_LBUF_SZ - len, "%03d: ", i); } - len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf + len, + LPFC_MBX_ACC_LBUF_SZ - len, "%08x ", ((uint32_t)*pword) & 0xffffffff); pword++; @@ -5628,19 +6177,19 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); - len += scnprintf(line_buf+len, - LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf + len, + LPFC_MBX_ACC_LBUF_SZ - len, "%03d: ", i); } for (j = 0; j < 4; j++) { - len += scnprintf(line_buf+len, - LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf + len, + LPFC_MBX_ACC_LBUF_SZ - len, "%02x", ((uint8_t)*pbyte) & 0xff); pbyte++; } - len += scnprintf(line_buf+len, - LPFC_MBX_ACC_LBUF_SZ-len, " "); + len += scnprintf(line_buf + len, + LPFC_MBX_ACC_LBUF_SZ - len, " "); } if ((i - 1) % 8) pr_err("%s\n", line_buf); @@ -5681,6 +6230,11 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) if (!lpfc_debugfs_root) { lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL); atomic_set(&lpfc_debugfs_hba_count, 0); + if (!lpfc_debugfs_root) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0408 Cannot create debugfs root\n"); + goto debug_failed; + } } if (!lpfc_debugfs_start_time) lpfc_debugfs_start_time = jiffies; @@ -5691,6 +6245,11 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) pport_setup = true; phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); + if (!phba->hba_debugfs_root) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0412 Cannot create debugfs hba\n"); + goto debug_failed; + } atomic_inc(&lpfc_debugfs_hba_count); atomic_set(&phba->debugfs_vport_count, 0); @@ -5707,12 +6266,56 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } + /* E2E Congestion Info Buffer */ + snprintf(name, sizeof(name), "cgn_buffer"); + phba->debug_cgn_buffer = + debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_cgn_buffer_op); + if (!phba->debug_cgn_buffer) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6527 Cannot create debugfs" + " cgn_buffer\n"); + goto debug_failed; + } + + /* RX Monitor */ + snprintf(name, sizeof(name), "rx_monitor"); + phba->debug_rx_monitor = + debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_rx_monitor_op); + if (!phba->debug_rx_monitor) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6528 Cannot create debugfs" + " rx_monitor\n"); + goto debug_failed; + } + + /* RAS log */ + snprintf(name, sizeof(name), "ras_log"); + phba->debug_ras_log = + debugfs_create_file(name, 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_ras_log); + if (!phba->debug_ras_log) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6148 Cannot create debugfs" + " ras_log\n"); + goto debug_failed; + } + /* Setup hbqinfo */ snprintf(name, sizeof(name), "hbqinfo"); phba->debug_hbqinfo = debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_hbqinfo); + if (!phba->debug_hbqinfo) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0411 Cant create debugfs hbqinfo\n"); + goto debug_failed; + } #ifdef LPFC_HDWQ_LOCK_STAT /* Setup lockstat */ @@ -5736,6 +6339,12 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHBASlim); + if (!phba->debug_dumpHBASlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0413 Cannot create debugfs " + "dumpHBASlim\n"); + goto debug_failed; + } } else phba->debug_dumpHBASlim = NULL; @@ -5747,6 +6356,12 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHostSlim); + if (!phba->debug_dumpHostSlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0414 Cannot create debugfs " + "dumpHostSlim\n"); + goto debug_failed; + } } else phba->debug_dumpHostSlim = NULL; @@ -5756,6 +6371,11 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrLBA) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0807 Cannot create debugfs InjErrLBA\n"); + goto debug_failed; + } phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; snprintf(name, sizeof(name), "InjErrNPortID"); @@ -5763,48 +6383,88 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrNPortID) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0809 Cannot create debugfs InjErrNPortID\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "InjErrWWPN"); phba->debug_InjErrWWPN = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrWWPN) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0810 Cannot create debugfs InjErrWWPN\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "writeGuardInjErr"); phba->debug_writeGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeGuard) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0802 Cannot create debugfs writeGuard\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "writeAppInjErr"); phba->debug_writeApp = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeApp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0803 Cannot create debugfs writeApp\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "writeRefInjErr"); phba->debug_writeRef = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeRef) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0804 Cannot create debugfs writeRef\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "readGuardInjErr"); phba->debug_readGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readGuard) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0808 Cannot create debugfs readGuard\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "readAppInjErr"); phba->debug_readApp = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readApp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0805 Cannot create debugfs readApp\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "readRefInjErr"); phba->debug_readRef = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readRef) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0806 Cannot create debugfs readApp\n"); + goto debug_failed; + } /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { @@ -5828,6 +6488,12 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_slow_ring_trc); + if (!phba->debug_slow_ring_trc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0415 Cannot create debugfs " + "slow_ring_trace\n"); + goto debug_failed; + } if (!phba->slow_ring_trc) { phba->slow_ring_trc = kmalloc( (sizeof(struct lpfc_debugfs_trc) * @@ -5845,11 +6511,17 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) lpfc_debugfs_max_slow_ring_trc)); } +#ifdef BUILD_NVME snprintf(name, sizeof(name), "nvmeio_trc"); phba->debug_nvmeio_trc = debugfs_create_file(name, 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_nvmeio_trc); + if (!phba->debug_nvmeio_trc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0574 No create debugfs nvmeio_trc\n"); + goto debug_failed; + } atomic_set(&phba->nvmeio_trc_cnt, 0); if (lpfc_debugfs_max_nvmeio_trc) { @@ -5871,7 +6543,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc; /* Allocate trace buffer and initialize */ - phba->nvmeio_trc = kzalloc( + phba->nvmeio_trc = kmalloc( (sizeof(struct lpfc_debugfs_nvmeio_trc) * phba->nvmeio_trc_size), GFP_KERNEL); @@ -5881,6 +6553,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) "nvmeio_trc buffer\n"); goto nvmeio_off; } + memset(phba->nvmeio_trc, 0, + (sizeof(struct lpfc_debugfs_nvmeio_trc) * + phba->nvmeio_trc_size)); phba->nvmeio_trc_on = 1; phba->nvmeio_trc_output_idx = 0; phba->nvmeio_trc = NULL; @@ -5891,12 +6566,18 @@ nvmeio_off: phba->nvmeio_trc_output_idx = 0; phba->nvmeio_trc = NULL; } +#endif } snprintf(name, sizeof(name), "vport%d", vport->vpi); if (!vport->vport_debugfs_root) { vport->vport_debugfs_root = debugfs_create_dir(name, phba->hba_debugfs_root); + if (!vport->vport_debugfs_root) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0417 Can't create debugfs\n"); + goto debug_failed; + } atomic_inc(&phba->debugfs_vport_count); } @@ -5933,17 +6614,43 @@ nvmeio_off: debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_disc_trc); + if (!vport->debug_disc_trc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0419 Cannot create debugfs " + "discovery_trace\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "nodelist"); vport->debug_nodelist = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nodelist); + if (!vport->debug_nodelist) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2985 Can't create debugfs nodelist\n"); + goto debug_failed; + } + snprintf(name, sizeof(name), "ediflist"); + vport->debug_ediflist = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_ediflist); + if (!vport->debug_ediflist) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2987 Can't create debugfs ediflist\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "nvmestat"); vport->debug_nvmestat = debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nvmestat); + if (!vport->debug_nvmestat) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0811 Cannot create debugfs nvmestat\n"); + goto debug_failed; + } snprintf(name, sizeof(name), "scsistat"); vport->debug_scsistat = @@ -5956,17 +6663,27 @@ nvmeio_off: goto debug_failed; } - snprintf(name, sizeof(name), "nvmektime"); - vport->debug_nvmektime = + snprintf(name, sizeof(name), "ioktime"); + vport->debug_ioktime = debugfs_create_file(name, 0644, vport->vport_debugfs_root, - vport, &lpfc_debugfs_op_nvmektime); + vport, &lpfc_debugfs_op_ioktime); + if (!vport->debug_ioktime) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0815 Cannot create debugfs ioktime\n"); + goto debug_failed; + } - snprintf(name, sizeof(name), "cpucheck"); - vport->debug_cpucheck = + snprintf(name, sizeof(name), "hdwqstat"); + vport->debug_hdwqstat = debugfs_create_file(name, 0644, vport->vport_debugfs_root, - vport, &lpfc_debugfs_op_cpucheck); + vport, &lpfc_debugfs_op_hdwqstat); + if (!vport->debug_hdwqstat) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0819 Cannot create debugfs hdwqstat\n"); + goto debug_failed; + } /* * The following section is for additional directories/files for the @@ -5986,6 +6703,11 @@ nvmeio_off: if (!phba->idiag_root) { phba->idiag_root = debugfs_create_dir(name, phba->hba_debugfs_root); + if (!phba->idiag_root) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2922 Can't create idiag debugfs\n"); + goto debug_failed; + } /* Initialize iDiag data structure */ memset(&idiag, 0, sizeof(idiag)); } @@ -5996,6 +6718,11 @@ nvmeio_off: phba->idiag_pci_cfg = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); + if (!phba->idiag_pci_cfg) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2923 Can't create idiag debugfs\n"); + goto debug_failed; + } idiag.offset.last_rd = 0; } @@ -6005,6 +6732,11 @@ nvmeio_off: phba->idiag_bar_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_barAcc); + if (!phba->idiag_bar_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "3056 Can't create idiag debugfs\n"); + goto debug_failed; + } idiag.offset.last_rd = 0; } @@ -6014,6 +6746,11 @@ nvmeio_off: phba->idiag_que_info = debugfs_create_file(name, S_IFREG|S_IRUGO, phba->idiag_root, phba, &lpfc_idiag_op_queInfo); + if (!phba->idiag_que_info) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2924 Can't create idiag debugfs\n"); + goto debug_failed; + } } /* iDiag access PCI function queue */ @@ -6022,6 +6759,11 @@ nvmeio_off: phba->idiag_que_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_queAcc); + if (!phba->idiag_que_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2926 Can't create idiag debugfs\n"); + goto debug_failed; + } } /* iDiag access PCI function doorbell registers */ @@ -6030,6 +6772,11 @@ nvmeio_off: phba->idiag_drb_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_drbAcc); + if (!phba->idiag_drb_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2927 Can't create idiag debugfs\n"); + goto debug_failed; + } } /* iDiag access PCI function control registers */ @@ -6038,6 +6785,11 @@ nvmeio_off: phba->idiag_ctl_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc); + if (!phba->idiag_ctl_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2981 Can't create idiag debugfs\n"); + goto debug_failed; + } } /* iDiag access mbox commands */ @@ -6046,6 +6798,11 @@ nvmeio_off: phba->idiag_mbx_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc); + if (!phba->idiag_mbx_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2980 Can't create idiag debugfs\n"); + goto debug_failed; + } } /* iDiag extents access commands */ @@ -6057,6 +6814,12 @@ nvmeio_off: S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_extAcc); + if (!phba->idiag_ext_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2986 Cant create " + "idiag debugfs\n"); + goto debug_failed; + } } } @@ -6082,8 +6845,13 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct lpfc_hba *phba = vport->phba; - kfree(vport->disc_trc); - vport->disc_trc = NULL; + if (vport->disc_trc) { + kfree(vport->disc_trc); + vport->disc_trc = NULL; + } + + debugfs_remove(vport->debug_ediflist); /* ediflist */ + vport->debug_ediflist = NULL; debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ vport->debug_disc_trc = NULL; @@ -6097,11 +6865,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(vport->debug_scsistat); /* scsistat */ vport->debug_scsistat = NULL; - debugfs_remove(vport->debug_nvmektime); /* nvmektime */ - vport->debug_nvmektime = NULL; + debugfs_remove(vport->debug_ioktime); /* ioktime */ + vport->debug_ioktime = NULL; - debugfs_remove(vport->debug_cpucheck); /* cpucheck */ - vport->debug_cpucheck = NULL; + debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */ + vport->debug_hdwqstat = NULL; if (vport->vport_debugfs_root) { debugfs_remove(vport->vport_debugfs_root); /* vportX */ @@ -6117,6 +6885,15 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; + debugfs_remove(phba->debug_cgn_buffer); + phba->debug_cgn_buffer = NULL; + + debugfs_remove(phba->debug_rx_monitor); + phba->debug_rx_monitor = NULL; + + debugfs_remove(phba->debug_ras_log); + phba->debug_ras_log = NULL; + #ifdef LPFC_HDWQ_LOCK_STAT debugfs_remove(phba->debug_lockstat); /* lockstat */ phba->debug_lockstat = NULL; @@ -6154,8 +6931,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_readRef); /* readRef */ phba->debug_readRef = NULL; - kfree(phba->slow_ring_trc); - phba->slow_ring_trc = NULL; + if (phba->slow_ring_trc) { + kfree(phba->slow_ring_trc); + phba->slow_ring_trc = NULL; + } /* slow_ring_trace */ debugfs_remove(phba->debug_slow_ring_trc); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 20f2537af511..c89c2c4f26b1 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -32,6 +32,10 @@ #define LPFC_NODELIST_SIZE 8192 #define LPFC_NODELIST_ENTRY_SIZE 120 +/* ediflist output buffer size */ +#define LPFC_EDIFLIST_SIZE 8192 +#define LPFC_EDIFLIST_ENTRY_SIZE 120 + /* dumpHBASlim output buffer size */ #define LPFC_DUMPHBASLIM_SIZE 4096 @@ -44,15 +48,27 @@ /* hbqinfo output buffer size */ #define LPFC_HBQINFO_SIZE 8192 +/* multixripool output buffer size */ +#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 + +enum { + DUMP_IO, + DUMP_MBX, + DUMP_ELS, + DUMP_NVMELS, +}; + /* nvmestat output buffer size */ #define LPFC_NVMESTAT_SIZE 8192 -#define LPFC_NVMEKTIME_SIZE 8192 -#define LPFC_CPUCHECK_SIZE 8192 +#define LPFC_IOKTIME_SIZE 8192 #define LPFC_NVMEIO_TRC_SIZE 8192 /* scsistat output buffer size */ #define LPFC_SCSISTAT_SIZE 8192 +/* E2E Congestion Info Buffer size */ +#define LPFC_CGN_BUF_SIZE 8192 + #define LPFC_DEBUG_OUT_LINE_SZ 80 /* @@ -279,22 +295,12 @@ struct lpfc_idiag { struct lpfc_idiag_offset offset; void *ptr_private; }; - -#else - -#define lpfc_nvmeio_data(phba, fmt, arg...) \ - no_printk(fmt, ##arg) - #endif -/* multixripool output buffer size */ -#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 - -enum { - DUMP_IO, - DUMP_MBX, - DUMP_ELS, - DUMP_NVMELS, +#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY) +struct lpfc_rx_monitor_debug { + char *i_private; + char *buffer; }; /* Mask for discovery_trace */ diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 482e4a888dae..34ce065d081a 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -41,6 +41,10 @@ enum lpfc_work_type { LPFC_EVT_DEV_LOSS, LPFC_EVT_FASTPATH_MGMT_EVT, LPFC_EVT_RESET_HBA, +#ifndef BUILD_BRCMFCOE + LPFC_EVT_REAUTH, +#endif + LPFC_EVT_RECOVER_PORT }; /* structure used to queue event to the discovery tasklet */ @@ -76,8 +80,46 @@ struct lpfc_node_rrqs { unsigned long xri_bitmap[XRI_BITMAP_ULONGS]; }; +struct throttle_history { + /* Throttle log defaults if more than 10 messages per second */ + #define ONE_SEC 1000000 /* one second */ + bool logit; /* True means throttle logging on */ + uint32_t log_messages_lost; /* Messages discarded due to throttle */ + uint32_t log_messages; /* Log messages count within second */ + uint64_t log_start; /* start of message burst in usec */ + char announcement[16]; /* announcement string */ +}; + +struct dhchap_config { + uint16_t auth_tmo; + uint8_t auth_mode; + uint8_t bidirectional; + uint32_t tran_id; + uint32_t hash_id; + uint32_t dh_grp_id; + uint32_t reauth_interval; + uint8_t local_passwd[128]; + uint32_t local_passwd_len; + uint8_t remote_passwd[128]; + uint32_t remote_passwd_len; + uint8_t state; + uint8_t msg; + uint8_t direction; + uint8_t dh_grp_cnt; + uint8_t hash_pri[4]; + uint8_t dh_grp_pri[8]; + uint8_t rand_num[20]; + uint8_t nonce[16]; + uint8_t dhkey[256]; + uint32_t dhkey_len; + unsigned long last_auth; + struct timer_list nlp_reauthfunc; +}; + struct lpfc_nodelist { struct list_head nlp_listp; + struct throttle_history log; /* used to record throttle */ + struct serv_parm fc_sparam; /* buffer for service params */ struct lpfc_name nlp_portname; struct lpfc_name nlp_nodename; uint32_t nlp_flag; /* entry flags */ @@ -115,6 +157,7 @@ struct lpfc_nodelist { u8 nlp_nvme_info; /* NVME NSLER Support */ #define NLP_NVME_NSLER 0x1 /* NVME NSLER device */ + uint32_t first_burst; /* First Burst size for ndlp */ uint16_t nlp_usg_map; /* ndlp management usage bitmap */ #define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ #define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */ @@ -128,19 +171,28 @@ struct lpfc_nodelist { struct lpfc_vport *vport; struct lpfc_work_evt els_retry_evt; struct lpfc_work_evt dev_loss_evt; + struct lpfc_work_evt reauth_evt; + struct lpfc_work_evt recovery_evt; struct kref kref; atomic_t cmd_pending; uint32_t cmd_qdepth; +#ifndef NO_APEX +#define MAX_FCP_PRI_LUN 256 + uint8_t fcp_priority[MAX_FCP_PRI_LUN]; /* 512 lun priority */ +#endif unsigned long last_change_time; unsigned long *active_rrqs_xri_bitmap; struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ uint32_t fc4_prli_sent; uint32_t upcall_flags; #define NLP_WAIT_FOR_UNREG 0x1 +#define NLP_WAIT_FOR_LOGO 0x2 uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ #define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ + struct dhchap_config dhc_cfg; uint32_t nlp_defer_did; + wait_queue_head_t *logo_waitq; }; struct lpfc_node_rrq { struct list_head list; @@ -159,7 +211,8 @@ struct lpfc_node_rrq { /* Defines for nlp_flag (uint32) */ #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ #define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ -#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ +#define NLP_SKIP_EXT_DIF 0x00000004 /* Target could support External DIF */ +#define NLP_RELEASE_RPI 0x00000008 /* Release RPI to free pool */ #define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ #define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ #define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ @@ -169,6 +222,7 @@ struct lpfc_node_rrq { #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ #define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ #define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */ +#define NLP_AUTH_TMO 0x00004000 /* authentication timer is active */ #define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ @@ -185,7 +239,7 @@ struct lpfc_node_rrq { #define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ -#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ +#define NLP_EXTERNAL_DIF 0x20000000 /* Target could support External DIF */ #define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ #define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 66f8867dd837..10c491b0d713 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -55,8 +55,12 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry); static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb); +static void +lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); static int lpfc_max_els_tries = 3; +extern int lpfc_fabric_cgn_frequency; /** * lpfc_els_chk_latt - Check host link attention event for a vport @@ -100,7 +104,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) return 0; /* Pending Link Event during Discovery */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0237 Pending Link Event during " "Discovery: State x%x\n", phba->pport->port_state); @@ -440,8 +444,9 @@ fail_free_mbox: fail: lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0249 Cannot issue Register Fabric login: Err %d\n", err); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0249 Cannot issue Register Fabric login: Err %d\n", + err); return -ENXIO; } @@ -524,8 +529,8 @@ fail: } lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0289 Issue Register VFI failed: Err %d\n", rc); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0289 Issue Register VFI failed: Err %d\n", rc); return rc; } @@ -550,7 +555,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport) mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2556 UNREG_VFI mbox allocation failed" "HBA state x%x\n", phba->pport->port_state); return -ENOMEM; @@ -562,7 +567,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2557 UNREG_VFI issue mbox failed rc x%x " "HBA state x%x\n", rc, phba->pport->port_state); @@ -1041,18 +1046,18 @@ stop_rr_fcf_flogi: if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "2858 FLOGI failure Status:x%x/x%x " - "TMO:x%x Data x%x x%x\n", - irsp->ulpStatus, irsp->un.ulpWord[4], - irsp->ulpTimeout, phba->hba_flag, - phba->fcf.fcf_flag); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2858 FLOGI failure Status:x%x/x%x TMO" + ":x%x Data x%x x%x\n", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->ulpTimeout, phba->hba_flag, + phba->fcf.fcf_flag); /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; - lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, "0150 FLOGI failure Status:x%x/x%x " "xri x%x TMO:x%x\n", irsp->ulpStatus, irsp->un.ulpWord[4], @@ -1061,8 +1066,15 @@ stop_rr_fcf_flogi: /* If this is not a loop open failure, bail out */ if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == - IOERR_LOOP_OPEN_FAILURE))) + IOERR_LOOP_OPEN_FAILURE))) { + /* FLOGI failure */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0100 FLOGI failure Status:x%x/x%x " + "TMO:x%x\n", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->ulpTimeout); goto flogifail; + } /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); @@ -1132,8 +1144,7 @@ stop_rr_fcf_flogi: else if (!(phba->hba_flag & HBA_FCOE_MODE)) rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); else { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_FIP | LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2831 FLOGI response with cleared Fabric " "bit fcf_index 0x%x " "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " @@ -1314,6 +1325,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Can't do SLI4 class2 without support sequence coalescing */ sp->cls2.classValid = 0; sp->cls2.seqDelivery = 0; + + if (phba->cfg_enable_auth) + sp->cmn.fcsp = 1; } else { /* Historical, setting sequential-delivery bit for SLI3 */ sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; @@ -1426,6 +1440,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) lpfc_sli_issue_abort_iotag(phba, pring, iocb); } } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + spin_unlock_irq(&phba->hbalock); return 0; @@ -1473,6 +1490,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport) return 0; } + /* Reset the Fabric flag; topology change may have happened */ + vport->fc_flag &= ~FC_FABRIC; if (lpfc_issue_els_flogi(vport, ndlp, 0)) { /* This decrement of reference count to node shall kick off * the release of the node. @@ -1748,7 +1767,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, keep_nlp_state = new_ndlp->nlp_state; lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); - /* interchange the nvme remoteport structs */ + /* For nvme, swap the nrport. */ keep_nrport = new_ndlp->nrport; new_ndlp->nrport = ndlp->nrport; @@ -1934,7 +1953,7 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2882 RRQ completes to NPort x%x " "with no ndlp. Data: x%x x%x x%x\n", irsp->un.elsreq64.remoteID, @@ -1957,10 +1976,11 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "2881 RRQ failure DID:%06X Status:x%x/x%x\n", - ndlp->nlp_DID, irsp->ulpStatus, - irsp->un.ulpWord[4]); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2881 RRQ failure DID:%06X Status:" + "x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); } out: if (rrq) @@ -2010,7 +2030,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0136 PLOGI completes to NPort x%x " "with no ndlp. Data: x%x x%x x%x\n", irsp->un.elsreq64.remoteID, @@ -2059,7 +2079,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || (phba)->pport->cfg_log_verbose & LOG_ELS) - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4]); @@ -2237,6 +2257,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_nodelist *ndlp; char *mode; + u32 loglevel; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; @@ -2278,13 +2299,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * could be expected. */ if ((vport->fc_flag & FC_FABRIC) || - (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) + (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { mode = KERN_ERR; - else + loglevel = LOG_TRACE_EVENT; + } else { mode = KERN_INFO; + loglevel = LOG_ELS; + } /* PRLI failed */ - lpfc_printf_vlog(vport, mode, LOG_ELS, + lpfc_printf_vlog(vport, mode, loglevel, "2754 PRLI failure DID:%06X Status:x%x/x%x, " "data: x%x\n", ndlp->nlp_DID, irsp->ulpStatus, @@ -2425,7 +2449,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } npr->estabImagePair = 1; npr->readXferRdyDis = 1; - if (vport->cfg_first_burst_size) + if (phba->sli_rev == LPFC_SLI_REV4 && + !(phba->hba_flag & HBA_FCOE_MODE) && + vport->cfg_first_burst_size) npr->writeXferRdyDis = 1; /* For FCP support */ @@ -2477,25 +2503,22 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitPRLI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_PRLI_SND; + + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } /* The vport counters are used for lpfc_scan_finished, but * the ndlp is used to track outstanding PRLIs for different * FC4 types. */ + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_PRLI_SND; vport->fc_prli_sent++; ndlp->fc4_prli_sent++; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~NLP_PRLI_SND; - spin_unlock_irq(shost->host_lock); - lpfc_els_free_iocb(phba, elsiocb); - return 1; - } - /* The driver supports 2 FC4 types. Make sure * a PRLI is issued for all types before exiting. @@ -2695,7 +2718,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* ADISC failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2755 ADISC failure DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4]); @@ -2806,9 +2829,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = ndlp->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; - struct lpfcMboxq *mbox; unsigned long flags; uint32_t skip_recovery = 0; + int wake_up_waiter = 0; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; @@ -2816,6 +2839,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp = &(rspiocb->iocb); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; + if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) { + wake_up_waiter = 1; + ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO; + } spin_unlock_irq(shost->host_lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, @@ -2853,7 +2880,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ if (irsp->ulpStatus) { /* LOGO failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4]); @@ -2869,32 +2896,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, out: lpfc_els_free_iocb(phba, cmdiocb); - /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ - if ((vport->fc_flag & FC_PT2PT) && - !(vport->fc_flag & FC_PT2PT_PLOGI)) { - phba->pport->fc_myDID = 0; - if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { - if (phba->nvmet_support) - lpfc_nvmet_update_targetport(phba); - else - lpfc_nvme_update_localport(phba->pport); - } - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (mbox) { - lpfc_config_link(phba, mbox); - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == - MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); - skip_recovery = 1; - } - } - } + /* At this point, the LOGO processing is complete. NOTE: For a + * pt2pt topology, we are assuming the NPortID will only change + * on link up processing. For a LOGO / PLOGI initiated by the + * Initiator, we are assuming the NPortID is not going to change. + */ + if (wake_up_waiter && ndlp->logo_waitq) + wake_up(ndlp->logo_waitq); /* * If the node is a target, the handling attempts to recover the port. * For any other port type, the rpi is unregistered as an implicit @@ -3008,10 +3018,9 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * This routine is a generic completion callback function for ELS commands. * Specifically, it is the callback function which does not need to perform * any command specific operations. It is currently used by the ELS command - * issuing routines for the ELS State Change Request (SCR), - * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution - * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than - * certain debug loggings, this callback function simply invokes the + * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel + * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). + * Other than certain debug loggings, this callback function simply invokes the * lpfc_els_chk_latt() routine to check whether link went down during the * discovery process. **/ @@ -3025,109 +3034,134 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp = &rspiocb->iocb; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "ELS cmd cmpl: status:x%x/x%x did:x%x", - irsp->ulpStatus, irsp->un.ulpWord[4], - irsp->un.elsreq64.remoteID); + "ELS cmd cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->un.elsreq64.remoteID); + /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); + + /* Check to see if link went down during discovery */ + lpfc_els_chk_latt(vport); + lpfc_els_free_iocb(phba, cmdiocb); +} + +/** + * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is a generic completion callback function for Discovery ELS cmd. + * Currently used by the ELS command issuing routines for the ELS State Change + * Request (SCR), lpfc_issue_els_scr(), Exchange Diagnostic Capabilities (EDC), + * lpfc_issue_els_edc() and the ELS RDF, lpfc_issue_els_rdf(). + * These commands will be retried once only for ELS timeout errors. + **/ +static void +lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp; + struct lpfc_els_rdf_rsp *prdf; + struct lpfc_dmabuf *pcmd, *prsp; + u32 *pdata; + u32 cmd; + + irsp = &rspiocb->iocb; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "ELS cmd cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->un.elsreq64.remoteID); + + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", + irsp->ulpIoTag, irsp->ulpStatus, + irsp->un.ulpWord[4], irsp->ulpTimeout, + cmdiocb->retry); + + pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; + if (!pcmd) + goto out; + + pdata = (u32 *)pcmd->virt; + if (!pdata) + goto out; + cmd = *pdata; + + /* Only 1 retry for ELS Timeout only */ + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_SEQUENCE_TIMEOUT)) { + cmdiocb->retry++; + if (cmdiocb->retry <= 1) { + switch (cmd) { + case ELS_CMD_SCR: + lpfc_issue_els_scr(vport, cmdiocb->retry); + break; + case ELS_CMD_EDC: + lpfc_issue_els_edc(vport, cmdiocb->retry); + break; + case ELS_CMD_RDF: + cmdiocb->context1 = NULL; /* save ndlp refcnt */ + lpfc_issue_els_rdf(vport, cmdiocb->retry); + break; + } + goto out; + } + phba->fc_stat.elsRetryExceeded++; + } + if (cmd == ELS_CMD_EDC) { + lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); + return; + } + if (irsp->ulpStatus) { + /* ELS discovery cmd completes with error */ + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, + "4203 ELS cmd x%x error: x%x x%X\n", cmd, + irsp->ulpStatus, irsp->un.ulpWord[4]); + goto out; + } + + /* The RDF response doesn't have any impact on the running driver + * but the notification descriptors are dumped here for support. + */ + if (cmd == ELS_CMD_RDF) { + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; + if (!prdf) + goto out; + + lpfc_sli_bemem_bcopy(prdf, prdf, sizeof(*prdf)); + + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4677 Fabric RDF Notification Grant Data: " + "%u 0x%08x 0x%08x 0x%08x 0x%08x Reg: %x %x\n", + prdf->rdf_desc.count, + prdf->rdf_desc.desc[0], + prdf->rdf_desc.desc[1], + prdf->rdf_desc.desc[2], + prdf->rdf_desc.desc[3], + phba->cgn_reg_signal, phba->cgn_reg_fpin); + } + + out: /* Check to see if link went down during discovery */ lpfc_els_chk_latt(vport); lpfc_els_free_iocb(phba, cmdiocb); return; } -/** - * lpfc_issue_els_scr - Issue a scr to an node on a vport - * @vport: pointer to a host virtual N_Port data structure. - * @nportid: N_Port identifier to the remote node. - * @retry: number of retries to the command IOCB. - * - * This routine issues a State Change Request (SCR) to a fabric node - * on a @vport. The remote node @nportid is passed into the function. It - * first search the @vport node list to find the matching ndlp. If no such - * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An - * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() - * routine is invoked to send the SCR IOCB. - * - * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp - * will be incremented by 1 for holding the ndlp and the reference to ndlp - * will be stored into the context1 field of the IOCB for the completion - * callback function to the SCR ELS command. - * - * Return code - * 0 - Successfully issued scr command - * 1 - Failed to issue scr command - **/ -int -lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) -{ - struct lpfc_hba *phba = vport->phba; - struct lpfc_iocbq *elsiocb; - uint8_t *pcmd; - uint16_t cmdsize; - struct lpfc_nodelist *ndlp; - - cmdsize = (sizeof(uint32_t) + sizeof(SCR)); - - ndlp = lpfc_findnode_did(vport, nportid); - if (!ndlp) { - ndlp = lpfc_nlp_init(vport, nportid); - if (!ndlp) - return 1; - lpfc_enqueue_node(vport, ndlp); - } else if (!NLP_CHK_NODE_ACT(ndlp)) { - ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); - if (!ndlp) - return 1; - } - - elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_SCR); - - if (!elsiocb) { - /* This will trigger the release of the node just - * allocated - */ - lpfc_nlp_put(ndlp); - return 1; - } - - pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); - - *((uint32_t *) (pcmd)) = ELS_CMD_SCR; - pcmd += sizeof(uint32_t); - - /* For SCR, remainder of payload is SCR parameter page */ - memset(pcmd, 0, sizeof(SCR)); - ((SCR *) pcmd)->Function = SCR_FUNC_FULL; - - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "Issue SCR: did:x%x", - ndlp->nlp_DID, 0, 0); - - phba->fc_stat.elsXmitSCR++; - elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { - /* The additional lpfc_nlp_put will cause the following - * lpfc_els_free_iocb routine to trigger the rlease of - * the node. - */ - lpfc_nlp_put(ndlp); - lpfc_els_free_iocb(phba, elsiocb); - return 1; - } - /* This will cause the callback-function lpfc_cmpl_els_cmd to - * trigger the release of node. - */ - if (!(vport->fc_flag & FC_PT2PT)) - lpfc_nlp_put(ndlp); - return 0; -} - /** * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) * or the other nport (pt2pt). @@ -3205,9 +3239,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) event->rscn.rscn_cmd = ELS_RSCN; event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); event->rscn.rscn_plen = cpu_to_be16(cmdsize); - nportid = vport->fc_myDID; - /* appears that page flags must be 0 for fabric to broadcast RSCN */ event->portid.rscn_page_flags = 0; event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; @@ -3234,10 +3266,624 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) */ if (!(vport->fc_flag & FC_PT2PT)) lpfc_nlp_put(ndlp); - return 0; } +/** + * lpfc_issue_els_scr - Issue a scr to an node on a vport + * @vport: pointer to a host virtual N_Port data structure. + * @retry: retry counter for the command IOCB. + * + * This routine issues a State Change Request (SCR) to a fabric node + * on a @vport. The remote node is Fabric Controller (0xfffffd). It + * first search the @vport node list to find the matching ndlp. If no such + * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An + * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() + * routine is invoked to send the SCR IOCB. + * + * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp + * will be incremented by 1 for holding the ndlp and the reference to ndlp + * will be stored into the context1 field of the IOCB for the completion + * callback function to the SCR ELS command. + * + * Return code + * 0 - Successfully issued scr command + * 1 - Failed to issue scr command + **/ +int +lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + struct lpfc_nodelist *ndlp; + + cmdsize = (sizeof(uint32_t) + sizeof(SCR)); + + ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); + if (!ndlp) + return 1; + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) + return 1; + } + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_SCR); + + if (!elsiocb) { + /* This will trigger the release of the node just + * allocated + */ + lpfc_nlp_put(ndlp); + return 1; + } + + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + + *((uint32_t *) (pcmd)) = ELS_CMD_SCR; + pcmd += sizeof(uint32_t); + + /* For SCR, remainder of payload is SCR parameter page */ + memset(pcmd, 0, sizeof(SCR)); + ((SCR *) pcmd)->Function = SCR_FUNC_FULL; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue SCR: did:x%x", + ndlp->nlp_DID, 0, 0); + + phba->fc_stat.elsXmitSCR++; + elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the rlease of + * the node. + */ + lpfc_nlp_put(ndlp); + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + + /* Keep the ndlp just in case RDF is being sent */ + return 0; +} + +/** + * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @retry: retry counter for the command IOCB. + * + * This routine issues an ELS RDF to the Fabric Controller to register + * for diagnostic functions. + * + * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp + * will be incremented by 1 for holding the ndlp and the reference to ndlp + * will be stored into the context1 field of the IOCB for the completion + * callback function to the SCR ELS command. + * + * Return code + * 0 - Successfully issued scr command + * 1 - Failed to issue scr command + **/ +int +lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_els_rdf_req *prdf; + uint16_t cmdsize; + struct lpfc_nodelist *ndlp; + + cmdsize = sizeof(struct lpfc_els_rdf_req); + + ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); + if (!ndlp) { + ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); + if (!ndlp) + return -ENODEV; + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) + return -ENODEV; + } + + /* RDF ELS is not required on an NPIV VN_Port. */ + if (vport->port_type == LPFC_NPIV_PORT) { + lpfc_nlp_put(ndlp); /* Cleanup ndlp from SCR */ + return -EACCES; + } + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_RDF); + if (!elsiocb) { + /* This will trigger the release of the node just + * allocated + */ + lpfc_nlp_put(ndlp); + return -ENOMEM; + } + + /* Configure the payload for the supported FPIN events. */ + prdf = (struct lpfc_els_rdf_req *) + (((struct lpfc_dmabuf *)elsiocb->context2)->virt); + memset(prdf, 0, cmdsize); + prdf->word0 = ELS_CMD_RDF; + + /* The desc_list_len is size of the supported descriptor + * payload (28 bytes). The rdf_desc_len is count + + * 4 notification descriptors. Driver sends all and only + * sets the supported type as nonzero. + */ + prdf->rdf_desc.rdf_desc_tag = cpu_to_be32(ELS_FPIN_REG); + + /* Set the default supported descriptors and assume an initial + * descriptor length. + */ + /* Use the default list, 4 descriptors */ + prdf->desc_list_len = cpu_to_be32(sizeof(struct rdf_desc)); + prdf->rdf_desc.rdf_desc_len = + cpu_to_be32(sizeof(struct rdf_desc) - 8); + prdf->rdf_desc.desc[0] = cpu_to_be32(FPIN_NOTIFY_LINK_INTEG); + prdf->rdf_desc.desc[1] = cpu_to_be32(FPIN_NOTIFY_DELIVERY); + prdf->rdf_desc.desc[2] = cpu_to_be32(FPIN_NOTIFY_PEER_CGN); + prdf->rdf_desc.desc[3] = cpu_to_be32(FPIN_NOTIFY_CGN); + prdf->rdf_desc.count = cpu_to_be32(LPFC_MAX_NUM_OF_FPIN_TYPES); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue RDF: did:x%x", + ndlp->nlp_DID, 0, 0); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "6444 Xmit RDF to remote " + "NPORT x%x Reg: %x %x\n", + ndlp->nlp_DID, phba->cgn_reg_signal, + phba->cgn_reg_fpin); + + phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; + phba->fc_stat.elsXmitRDF++; + elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the rlease of + * the node. + */ + lpfc_nlp_put(ndlp); + lpfc_els_free_iocb(phba, elsiocb); + return -EIO; + } + + /* An RDF was issued - this put ensures the ndlp is cleaned up + * when the RDF completes. + */ + lpfc_nlp_put(ndlp); + return 0; +} + +/** + * lpfc_least_capable_settings - helper function for EDC rsp processing + * @phba: pointer to lpfc hba data structure. + * @pcgndet: pointer to congestion detection descriptor in EDC rsp. + * + * This helper routine determines the least capable setting for + * congestion signals, signal freq, including scale, from the + * congestion detection descriptor in the EDC rsp. The routine + * sets @phba values in preparation for a set_featues mailbox. + **/ +static void +lpfc_least_capable_settings(struct lpfc_hba *phba, struct cgn_det_desc *pcgndet) +{ + u32 rsp_sig_cap = 0, drv_sig_cap = 0; + u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; + struct lpfc_cgn_info *cp; + u32 crc; + u16 sig_freq; + + /* Get rsp signal and frequency capabilities. */ + rsp_sig_cap = bf_get(xmt_sig_cap, pcgndet); + rsp_sig_freq_cyc = bf_get(xmt_sig_freq_cyc, pcgndet); + rsp_sig_freq_scale = bf_get(xmt_sig_freq_scale, pcgndet); + + /* If the Fport does not support signals. Set FPIN only */ + if (rsp_sig_cap == XMT_SIG_CAP_NO_SPT) { + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + phba->cgn_sig_freq = RCV_SIG_FREQ_CYC_NO_SPT; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | + LPFC_CGN_FPIN_WARN; + return; + } + + /* Apply the xmt scale to the xmt cycle to get the correct frequency. + * Adapter default is 100 millisSeconds. Convert all xmt cycle values + * to milliSeconds. + */ + switch (rsp_sig_freq_scale) { + case RCV_SIG_FREQ_SCALE_SEC: + rsp_sig_freq_cyc *= MSEC_PER_SEC; + break; + case RCV_SIG_FREQ_SCALE_USEC: + case RCV_SIG_FREQ_SCALE_NSEC: + rsp_sig_freq_cyc = 1; + break; + case RCV_SIG_FREQ_SCALE_NO_SPT: + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + phba->cgn_sig_freq = RCV_SIG_FREQ_CYC_NO_SPT; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + return; + default: + /*milliSeconds - all good. */ + break; + } + + /* Convenient shorthand. */ + drv_sig_cap = phba->cgn_reg_signal; + + /* Choose the least capable frequency. */ + if (rsp_sig_freq_cyc > phba->cgn_sig_freq) + phba->cgn_sig_freq = rsp_sig_freq_cyc; + + /* Should be some common signals support. Settle on least capable + * signal and adjust FPIN values. Initialize defaults to ease the + * decision. + */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + if (rsp_sig_cap == XMT_SIG_CAP_WARN_SPT && + (drv_sig_cap == LPFC_CGN_SIGNAL_WARN || + drv_sig_cap == LPFC_CGN_SIGNAL_BOTH)) { + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_WARN; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + if (rsp_sig_cap == XMT_SIG_CAP_BOTH_SPT) { + if (drv_sig_cap == LPFC_CGN_SIGNAL_BOTH) { + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_BOTH; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; + } + if (drv_sig_cap == LPFC_CGN_SIGNAL_WARN) { + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_WARN; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + } + + if (!phba->cgn_i) + return; + + /* Update signal frequency in congestion info buffer */ + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + /* Frequency (in ms) Signal Warning/Signal Congestion Notifications + * are received by the HBA + */ + sig_freq = phba->cgn_sig_freq; + + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN) + cp->cgn_warn_freq = cpu_to_le16(sig_freq); + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) { + cp->cgn_alarm_freq = cpu_to_le16(sig_freq); + cp->cgn_warn_freq = cpu_to_le16(sig_freq); + } + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); +} + +/** + * lpfc_cmpl_els_edc - Completion callback function for EDC + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + * This routine is the completion callback function for issuing the Exchange + * Diagnostic Capabilities (EDC) command. The driver issues an EDC to + * notify the FPort of its Congestion and Link Fault capabilities. This + * routine parses the FPort's response and decides on the least common + * values applicable to both FPort and NPort for Warnings and Alarms that + * are communicated via hardware signals. + **/ +static void +lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + IOCB_t *irsp; + struct lpfc_els_edc_rsp *pedc; + struct cgn_det_desc *pcgndet; + struct link_flt_desc *plnkflt; + struct unknown_desc *punk; + struct lpfc_dmabuf *pcmd, *prsp; + u8 *payload, bytes; + u32 *pdata, desctag; + int desc_total_bytes; + bool rcv_cap_desc = false; + + irsp = &rspiocb->iocb; + + lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, + "EDC cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->un.elsreq64.remoteID); + + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", + irsp->ulpIoTag, irsp->ulpStatus, + irsp->un.ulpWord[4], irsp->ulpTimeout); + + pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; + if (!pcmd) + goto out; + + pdata = (u32 *)pcmd->virt; + if (!pdata) + goto out; + + /* Need to clear signal values, send features MB and RDF with FPIN. */ + if (irsp->ulpStatus) { + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, + "4202 EDC rsp error - sending RDF " + "for FPIN only.\n"); + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + phba->cgn_sig_freq = RCV_SIG_FREQ_CYC_NO_SPT; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + goto rsp_err; + } + + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + pedc = (struct lpfc_els_edc_rsp *)prsp->virt; + if (!pedc) + goto out; + + lpfc_sli_bemem_bcopy(pedc, pedc, sizeof(*pedc)); + + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "4676 Fabric EDC Rsp Data: " + "0x%08x, 0x%08x, 0x%08x, 0x%08x 0x%08x\n", + pedc->word0, pedc->desc_list_len, + pedc->lnk_srv_req_word2, + pedc->lnk_srv_req_word3, + pedc->lnk_srv_req_word4); + + /* Move to the Descriptors payload and start processing. */ + payload = (u8 *)(pedc) + sizeof(*pedc); + bytes = 0; + + /* Detect EDC response with no descriptors */ + if (pedc->desc_list_len <= 12) { + /* If there are no descriptors use FPIN only */ + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + phba->cgn_sig_freq = RCV_SIG_FREQ_CYC_NO_SPT; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + goto rsp_err; + } + /* Payload length in bytes is the response descriptor list + * length minus the 12 bytes of Link Service Request + * Information descriptor in the reply. + */ + desc_total_bytes = pedc->desc_list_len - 12; + + lpfc_sli_bemem_bcopy(payload, payload, desc_total_bytes); + while (bytes < desc_total_bytes) { + desctag = *((u32 *)payload); + if (desctag == CGN_DET_CAP_DESC_TAG) { + pcgndet = (struct cgn_det_desc *)payload; + lpfc_printf_log(phba, KERN_INFO, + LOG_ELS | LOG_CGN_MGMT, + "4616 CGN Desc Data: 0x%08x 0x%08x " + "0x%08x 0x%08x 0x%08x 0x%08x\n", + pcgndet->cgn_det_tag, + pcgndet->cgn_desc_len, + pcgndet->word2, + pcgndet->word3, + pcgndet->word4, + pcgndet->word5); + + /* Descriptor length does not account for 8 bytes of + * tag and length words. + */ + bytes += pcgndet->cgn_desc_len + 8; + payload += bytes; + + /* Compare driver and Fport capabilities and choose + * least common. + */ + lpfc_least_capable_settings(phba, pcgndet); + rcv_cap_desc = true; + } else if (desctag == LINK_FLT_CAP_DESC_TAG) { + plnkflt = (struct link_flt_desc *)payload; + lpfc_printf_log(phba, KERN_INFO, + LOG_ELS | LOG_CGN_MGMT, + "4617 Link Fault Desc Data: " + "0x%08x 0x%08x 0x%08x 0x%08x " + "0x%08x\n", + plnkflt->link_flt_cap_tag, + plnkflt->desc_len, + plnkflt->degrade_act_thld, + plnkflt->degrade_deact_thld, + plnkflt->fec_degrade_intvl); + + /* Descriptor length does not account for 8 bytes of + * tag and length words. + */ + bytes += plnkflt->desc_len + 8; + payload += bytes; + } else { + punk = (struct unknown_desc *)payload; + lpfc_printf_log(phba, KERN_ERR, + LOG_ELS | LOG_CGN_MGMT, + "4919 Unknown Desc Tag x%08x\n", + desctag); + + /* Descriptor length does not account for 8 bytes of + * tag and length words. + */ + bytes += punk->desc_len + 8; + payload += bytes; + } + } + + if (!rcv_cap_desc) + goto out; + + /* The FW needs to be updated with the negotiated congestion signal + * values. + */ + rsp_err: + lpfc_config_cgn_signal(phba); + + /* Check to see if link went down during discovery */ + lpfc_els_chk_latt(phba->pport); + lpfc_els_free_iocb(phba, cmdiocb); + return; + + out: + /* Check to see if link went down during discovery */ + lpfc_els_chk_latt(phba->pport); + lpfc_els_free_iocb(phba, cmdiocb); + + /* If there is a EDC error, move on to RDF */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + lpfc_issue_els_rdf(phba->pport, 0); +} + + /** + * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @retry: retry counter for the command IOCB. + * + * This routine issues an ELS EDC to the F-Port Controller to communicate + * this N_Port's support of hardware signals in its Congestion + * Capabilities Descriptor. + * + * Note: This routine does not check if one or more signals are + * set in the cgn_reg_signal parameter. The caller makes the + * decision to enforce cgn_reg_signal as nonzero or zero depending + * on the conditions. During Fabric requests, the driver + * requires cgn_reg_signals to be nonzero. But a dynamic request + * to set the congestion mode to OFF from Monitor or Manage + * would correctly issue an EDC with no signals enabled to + * turn off switch functionality and then update the FW. + * + * Return code + * 0 - Successfully issued edc command + * 1 - Failed to issue edc command + **/ +int +lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_els_edc_req *edc_req; + struct cgn_det_desc *cgn_desc; + u16 cmdsize; + struct lpfc_nodelist *ndlp; + u8 *pcmd = NULL; + u32 edc_req_size, cgn_desc_size; + extern int lpfc_fabric_cgn_frequency; + int rc; + + if (vport->port_type == LPFC_NPIV_PORT) + return -EACCES; + + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENODEV; + + /* If HBA doesn't support signals, drop into RDF */ + if (!phba->cgn_init_reg_signal) + goto try_rdf; + + edc_req_size = sizeof(struct lpfc_els_edc_req); + cgn_desc_size = sizeof(struct cgn_det_desc); + cmdsize = edc_req_size + cgn_desc_size; + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_EDC); + if (!elsiocb) { + /* This will trigger the release of the node just + * allocated + */ + lpfc_nlp_put(ndlp); + goto try_rdf; + } + + /* Configure the payload for the supported Diagnostics capabilities. */ + pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + memset(pcmd, 0, cmdsize); + edc_req = (struct lpfc_els_edc_req *)pcmd; + edc_req->word0 = ELS_CMD_EDC; + edc_req->desc_list_len = cpu_to_be32(cgn_desc_size); + + /* Configure the congestion detection capability */ + cgn_desc = ((struct cgn_det_desc *)(pcmd + edc_req_size)); + cgn_desc->cgn_det_tag = cpu_to_be32(CGN_DET_CAP_DESC_TAG); + + /* Descriptor len doesn't include the tag or len fields. */ + cgn_desc->cgn_desc_len = cpu_to_be32(cgn_desc_size - 8); + bf_set(xmt_sig_cap, cgn_desc, XMT_SIG_CAP_NO_SPT); + bf_set(xmt_sig_freq_cyc, cgn_desc, XMT_SIG_FREQ_CYC_NO_SPT); + bf_set(xmt_sig_freq_scale, cgn_desc, XMT_SIG_FREQ_SCALE_NO_SPT); + + if (phba->cmf_active_mode == LPFC_CFG_OFF) { + bf_set(rcv_sig_cap, cgn_desc, RCV_SIG_CAP_NO_SPT); + bf_set(rcv_sig_freq_cyc, cgn_desc, 0); + bf_set(rcv_sig_freq_scale, cgn_desc, 0); + phba->cgn_sig_freq = 0; + } else { + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN) + bf_set(rcv_sig_cap, cgn_desc, RCV_SIG_CAP_WARN_SPT); + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) + bf_set(rcv_sig_cap, cgn_desc, RCV_SIG_CAP_BOTH_SPT); + bf_set(rcv_sig_freq_cyc, cgn_desc, lpfc_fabric_cgn_frequency); + + /* We start negotiation with lpfc_fabric_cgn_frequency, after + * the completion we settle on the higher frequency. + */ + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + bf_set(rcv_sig_freq_scale, cgn_desc, RCV_SIG_FREQ_SCALE_MSEC); + } + + cgn_desc->word2 = cpu_to_be32(cgn_desc->word2); + cgn_desc->word3 = cpu_to_be32(cgn_desc->word3); + cgn_desc->word4 = cpu_to_be32(cgn_desc->word4); + cgn_desc->word5 = cpu_to_be32(cgn_desc->word5); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT, + "4623 Xmit EDC to remote " + "NPORT x%x reg_sig x%x reg_fpin:x%x\n", + ndlp->nlp_DID, phba->cgn_reg_signal, + phba->cgn_reg_fpin); + + phba->fc_stat.elsXmitEDC++; + elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { + /* The additional lpfc_nlp_put will cause the following + * lpfc_els_free_iocb routine to trigger the rlease of + * the node. + */ + lpfc_nlp_put(ndlp); + lpfc_els_free_iocb(phba, elsiocb); + goto try_rdf; + } + return 0; +try_rdf: + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + rc = lpfc_issue_els_rdf(vport, 0); + return rc; +} + /** * lpfc_issue_els_farpr - Issue a farp to an node on a vport * @vport: pointer to a host virtual N_Port data structure. @@ -3413,9 +4059,17 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) * to the event associated with the ndlp. **/ void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_els_retry_delay(unsigned long ptr) +#else lpfc_els_retry_delay(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; +#else struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); +#endif struct lpfc_vport *vport = ndlp->vport; struct lpfc_hba *phba = vport->phba; unsigned long flags; @@ -3537,7 +4191,7 @@ lpfc_link_reset(struct lpfc_vport *vport) "2851 Attempt link reset\n"); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2852 Failed to allocate mbox memory"); return 1; } @@ -3559,7 +4213,7 @@ lpfc_link_reset(struct lpfc_vport *vport) mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2853 Failed to issue INIT_LINK " "mbox command, rc:x%x\n", rc); mempool_free(mbox, phba->mbox_mem_pool); @@ -3663,7 +4317,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; case IOERR_ILLEGAL_COMMAND: - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0124 Retry illegal cmd x%x " "retry:x%x delay:x%x\n", cmd, cmdiocb->retry, delay); @@ -3709,6 +4363,16 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, retry = 1; delay = 100; break; + + case IOERR_SLI_ABORTED: + /* Retry the ELS command if it's a PLOGI. Possibly the + * rport just wasn't ready. + */ + if (cmd == ELS_CMD_PLOGI) { + retry = 1; + maxretry = 2; + } + break; } break; @@ -3735,10 +4399,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case LSRJT_UNABLE_TPC: /* The driver has a VALID PLOGI but the rport has * rejected the PRLI - can't do it now. Delay - * for 1 second and try again - don't care about - * the explanation. + * for 1 second and try again. + * + * However, if explanation is REQ_UNSUPPORTED there's + * no point to retry PRLI. */ - if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) { + if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && + stat.un.b.lsRjtRsnCodeExp != + LSEXP_REQ_UNSUPPORTED) { delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; @@ -3773,7 +4441,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (cmd == ELS_CMD_FDISC) && (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0125 FDISC Failed (x%x). " "Fabric out of resources\n", stat.un.lsRjtError); @@ -3812,7 +4481,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, LSEXP_NOTHING_MORE) { vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; retry = 1; - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0820 FLOGI Failed (x%x). " "BBCredit Not Supported\n", stat.un.lsRjtError); @@ -3825,7 +4495,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) ) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0122 FDISC Failed (x%x). " "Fabric Detected Bad WWN\n", stat.un.lsRjtError); @@ -3846,12 +4517,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * on this rport. */ if (stat.un.b.lsRjtRsnCodeExp == - LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; - spin_unlock_irq(shost->host_lock); - retry = 0; - goto out_retry; + LSEXP_REQ_UNSUPPORTED) { + if (cmd == ELS_CMD_PRLI) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; + spin_unlock_irq(shost->host_lock); + retry = 0; + goto out_retry; + } } break; } @@ -4003,7 +4676,7 @@ out_retry: } /* No retry ELS command <elsCmd> to remote NPORT <did> */ if (logerr) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0137 No retry ELS command x%x to remote " "NPORT x%x: Out of Resources: Error:x%x/%x\n", cmd, did, irsp->ulpStatus, @@ -4153,14 +4826,12 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; lpfc_els_free_data(phba, buf_ptr1); - elsiocb->context2 = NULL; } } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; lpfc_els_free_bpl(phba, buf_ptr); - elsiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; @@ -4301,6 +4972,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp = &rspiocb->iocb; + if (!vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3177 ELS response failed\n"); + goto out; + } if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; @@ -4356,23 +5032,29 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { if (!lpfc_unreg_rpi(vport, ndlp) && - (!(vport->fc_flag & FC_PT2PT)) && - (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || - ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) { - lpfc_printf_vlog(vport, KERN_INFO, - LOG_DISCOVERY, - "0314 PLOGI recov DID x%x " - "Data: x%x x%x x%x\n", - ndlp->nlp_DID, ndlp->nlp_state, - ndlp->nlp_rpi, ndlp->nlp_flag); - mp = mbox->ctx_buf; - if (mp) { - lpfc_mbuf_free(phba, mp->virt, - mp->phys); - kfree(mp); + (!(vport->fc_flag & FC_PT2PT))) { + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + ndlp->nlp_state == + NLP_STE_REG_LOGIN_ISSUE) { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "0314 PLOGI recov " + "DID x%x " + "Data: x%x x%x x%x\n", + ndlp->nlp_DID, + ndlp->nlp_state, + ndlp->nlp_rpi, + ndlp->nlp_flag); + mp = (struct lpfc_dmabuf *) + mbox->ctx_buf; + if (mp) { + lpfc_mbuf_free(phba, mp->virt, + mp->phys); + kfree(mp); + } + mempool_free(mbox, phba->mbox_mem_pool); + goto out; } - mempool_free(mbox, phba->mbox_mem_pool); - goto out; } /* Increment reference count to ndlp to hold the @@ -4403,7 +5085,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; /* ELS rsp: Cannot issue reg_login for <NPortid> */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0138 ELS rsp: Cannot issue reg_login for x%x " "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -4442,7 +5124,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, out: if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); + if (mbox) + ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; spin_unlock_irq(shost->host_lock); /* If the node is not being used by another discovery thread, @@ -4635,6 +5319,15 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, lpfc_els_free_iocb(phba, elsiocb); return 1; } + + /* Xmit ELS ACC response tag <ulpIoTag> */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " + "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "RPI: x%x, fc_flag x%x\n", + rc, elsiocb->iotag, elsiocb->sli4_xritag, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, vport->fc_flag); return 0; } @@ -4715,6 +5408,104 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 0; } + /** + * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: NPort to where rsp is directed + * + * This routine issues an EDC ACC RSP to the F-Port Controller to communicate + * this N_Port's support of hardware signals in its Congestion + * Capabilities Descriptor. + * + * Return code + * 0 - Successfully issued edc rsp command + * 1 - Failed to issue edc rsp command + **/ +static int +lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_els_edc_rsp *pedc; + struct cgn_det_desc *cgn_desc; + struct lpfc_iocbq *elsiocb; + IOCB_t *icmd, *cmd; + uint8_t *pcmd; + int cmdsize, rc, cnt; + + cmdsize = sizeof(struct lpfc_els_edc_rsp) + sizeof(struct cgn_det_desc); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + icmd = &elsiocb->iocb; + cmd = &cmdiocb->iocb; + icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; + pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); + memset(pcmd, 0, cmdsize); + + *((uint32_t *)(pcmd)) = ELS_CMD_ACC; + + pedc = (struct lpfc_els_edc_rsp *)pcmd; + cnt = cmdsize - (2 * sizeof(u32)); + pedc->desc_list_len = cpu_to_be32(cnt); + pedc->lnk_srv_req_word2 = cpu_to_be32(1); + pedc->lnk_srv_req_word3 = cpu_to_be32(sizeof(u32)); + pedc->lnk_srv_req_word4 = ELS_CMD_EDC; + + cgn_desc = (struct cgn_det_desc *)(pcmd + + sizeof(struct lpfc_els_edc_rsp)); + cgn_desc->cgn_det_tag = cpu_to_be32(CGN_DET_CAP_DESC_TAG); + cnt = sizeof(struct cgn_det_desc) - (2 * sizeof(u32)); + cgn_desc->cgn_desc_len = cpu_to_be32(cnt); + bf_set(xmt_sig_cap, cgn_desc, XMT_SIG_CAP_NO_SPT); + bf_set(xmt_sig_freq_cyc, cgn_desc, XMT_SIG_FREQ_CYC_NO_SPT); + bf_set(xmt_sig_freq_scale, cgn_desc, XMT_SIG_FREQ_SCALE_NO_SPT); + + if (phba->cmf_active_mode == LPFC_CFG_OFF) { + bf_set(rcv_sig_cap, cgn_desc, RCV_SIG_CAP_NO_SPT); + bf_set(rcv_sig_freq_cyc, cgn_desc, 0); + bf_set(rcv_sig_freq_scale, cgn_desc, 0); + phba->cgn_sig_freq = 0; + } else { + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN) + bf_set(rcv_sig_cap, cgn_desc, RCV_SIG_CAP_WARN_SPT); + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) + bf_set(rcv_sig_cap, cgn_desc, RCV_SIG_CAP_BOTH_SPT); + bf_set(rcv_sig_freq_scale, cgn_desc, RCV_SIG_FREQ_SCALE_MSEC); + bf_set(rcv_sig_freq_cyc, cgn_desc, lpfc_fabric_cgn_frequency); + } + + cgn_desc->word2 = cpu_to_be32(cgn_desc->word2); + cgn_desc->word3 = cpu_to_be32(cgn_desc->word3); + cgn_desc->word4 = cpu_to_be32(cgn_desc->word4); + cgn_desc->word5 = cpu_to_be32(cgn_desc->word5); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue EDC ACC: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + + phba->fc_stat.elsXmitACC++; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + + /* Xmit ELS ACC response tag <ulpIoTag> */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " + "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "RPI: x%x, fc_flag x%x\n", + rc, elsiocb->iotag, elsiocb->sli4_xritag, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, vport->fc_flag); + + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + return rc; +} + /** * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd * @vport: pointer to a virtual N_Port data structure. @@ -4786,15 +5577,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, lpfc_els_free_iocb(phba, elsiocb); return 1; } - - /* Xmit ELS ACC response tag <ulpIoTag> */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " - "RPI: x%x, fc_flag x%x\n", - rc, elsiocb->iotag, elsiocb->sli4_xritag, - ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_rpi, vport->fc_flag); return 0; } @@ -5270,6 +6052,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) } } } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6452 Discover PLOGI %d flag x%x\n", + sentplogi, vport->fc_flag); + if (sentplogi) { lpfc_set_disctmo(vport); } @@ -5281,7 +6068,7 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) return sentplogi; } -static uint32_t +uint32_t lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, uint32_t word0) { @@ -5293,7 +6080,7 @@ lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, return sizeof(struct fc_rdp_link_service_desc); } -static uint32_t +uint32_t lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, uint8_t *page_a0, uint8_t *page_a2) { @@ -5358,7 +6145,7 @@ lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, return sizeof(struct fc_rdp_sfp_desc); } -static uint32_t +uint32_t lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, READ_LNK_VAR *stat) { @@ -5387,7 +6174,7 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, return sizeof(struct fc_rdp_link_error_status_desc); } -static uint32_t +uint32_t lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, struct lpfc_vport *vport) { @@ -5412,7 +6199,7 @@ lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, return sizeof(struct fc_rdp_bbc_desc); } -static uint32_t +uint32_t lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { @@ -5440,7 +6227,7 @@ lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, return sizeof(struct fc_rdp_oed_sfp_desc); } -static uint32_t +uint32_t lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) @@ -5469,7 +6256,7 @@ lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, return sizeof(struct fc_rdp_oed_sfp_desc); } -static uint32_t +uint32_t lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) @@ -5498,7 +6285,7 @@ lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, return sizeof(struct fc_rdp_oed_sfp_desc); } -static uint32_t +uint32_t lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) @@ -5528,7 +6315,7 @@ lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, } -static uint32_t +uint32_t lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) @@ -5557,7 +6344,7 @@ lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, return sizeof(struct fc_rdp_oed_sfp_desc); } -static uint32_t +uint32_t lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, uint8_t *page_a0, struct lpfc_vport *vport) { @@ -5571,7 +6358,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, return sizeof(struct fc_rdp_opd_sfp_desc); } -static uint32_t +uint32_t lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) { if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) @@ -5588,7 +6375,7 @@ lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) return sizeof(struct fc_fec_rdp_desc); } -static uint32_t +uint32_t lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) { uint16_t rdp_cap = 0; @@ -5657,7 +6444,7 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) return sizeof(struct fc_rdp_port_speed_desc); } -static uint32_t +uint32_t lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_vport *vport) { @@ -5674,7 +6461,7 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, return sizeof(struct fc_rdp_port_name_desc); } -static uint32_t +uint32_t lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { @@ -5698,7 +6485,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, return sizeof(struct fc_rdp_port_name_desc); } -static void +void lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, int status) { @@ -5798,7 +6585,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, phba->fc_stat.elsXmitACC++; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); - if (rc == IOCB_ERROR) + if(rc == IOCB_ERROR) lpfc_els_free_iocb(phba, elsiocb); kfree(rdp_context); @@ -5829,9 +6616,10 @@ error: lpfc_els_free_iocb(phba, elsiocb); free_rdp_context: kfree(rdp_context); + return; } -static int +int lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) { LPFC_MBOXQ_t *mbox = NULL; @@ -5889,7 +6677,6 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_rdp_context *rdp_context; IOCB_t *cmd = NULL; struct ls_rjt stat; - if (phba->sli_rev < LPFC_SLI_REV4 || bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) { @@ -5956,7 +6743,6 @@ error: return 1; } - static void lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { @@ -5968,7 +6754,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct ls_rjt *stat; union lpfc_sli4_cfg_shdr *shdr; struct lpfc_lcb_context *lcb_context; - struct fc_lcb_res_frame *lcb_res; + struct fc_lcb_res_frame *lcb_res; uint32_t cmdsize, shdr_status, shdr_add_status; int rc; @@ -5984,14 +6770,14 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, - "0194 SET_BEACON_CONFIG mailbox " - "completed with status x%x add_status x%x," - " mbx status x%x\n", - shdr_status, shdr_add_status, mb->mbxStatus); + "0194 SET_BEACON_CONFIG mailbox " + "completed with status x%x add_status x%x," + " mbx status x%x\n", + shdr_status, shdr_add_status, mb->mbxStatus); - if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || - (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || - (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { + if (mb->mbxStatus != MBX_SUCCESS || shdr_status || + shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE || + shdr_add_status == ADD_STATUS_INVALID_REQUEST) { mempool_free(pmb, phba->mbox_mem_pool); goto error; } @@ -5999,8 +6785,8 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mempool_free(pmb, phba->mbox_mem_pool); cmdsize = sizeof(struct fc_lcb_res_frame); elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, - lpfc_max_els_tries, ndlp, - ndlp->nlp_DID, ELS_CMD_ACC); + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); /* Decrement the ndlp reference count from previous mbox command */ lpfc_nlp_put(ndlp); @@ -6060,6 +6846,7 @@ error: lpfc_els_free_iocb(phba, elsiocb); free_lcb_context: kfree(lcb_context); + return; } static int @@ -6067,7 +6854,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, struct lpfc_lcb_context *lcb_context, uint32_t beacon_state) { - struct lpfc_hba *phba = vport->phba; + struct lpfc_hba *phba = vport->phba; union lpfc_sli4_cfg_shdr *cfg_shdr; LPFC_MBOXQ_t *mbox = NULL; uint32_t len; @@ -6142,7 +6929,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, * * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. * First, the payload of the unsolicited LCB is checked. - * Then based on Subcommand beacon will either turn on or off. + * Then based on Subcommand either Becon will turn on or off. * * Return code * 0 - Sent the acc response @@ -6152,10 +6939,10 @@ static int lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { - struct lpfc_hba *phba = vport->phba; + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint8_t *lp; - struct fc_lcb_request_frame *beacon; + struct fc_lcb_request_frame *beacon; struct lpfc_lcb_context *lcb_context; uint8_t state, rjt_err; struct ls_rjt stat; @@ -6204,8 +6991,8 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lcb_context->rx_id = cmdiocb->iocb.ulpContext; lcb_context->ndlp = lpfc_nlp_get(ndlp); if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { - lpfc_printf_vlog(ndlp->vport, KERN_ERR, - LOG_ELS, "0193 failed to send mail box"); + lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, + "0193 failed to send mail box"); kfree(lcb_context); lpfc_nlp_put(ndlp); rjt_err = LSRJT_UNABLE_TPC; @@ -6414,7 +7201,7 @@ lpfc_send_rscn_event(struct lpfc_vport *vport, rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + payload_len, GFP_KERNEL); if (!rscn_event_data) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0147 Failed to allocate memory for RSCN event\n"); return; } @@ -6499,6 +7286,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (ndlp->nlp_fc4_type & NLP_FC4_NVME && ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) lpfc_nvme_rescan_port(vport, ndlp); + return 0; } @@ -6680,9 +7468,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) /* RSCN processed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0215 RSCN processed Data: x%x x%x x%x x%x\n", + "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", vport->fc_flag, 0, vport->fc_rscn_id_cnt, - vport->port_state); + vport->port_state, vport->num_disc_nodes, + vport->gidft_inp); /* To process RSCN, first compare RSCN data with NameServer */ vport->fc_ns_retry = 0; @@ -6790,7 +7579,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* An FLOGI ELS command <elsCmd> was received from DID <did> in Loop Mode */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0113 An FLOGI ELS command x%x was " "received from DID x%x in Loop Mode\n", cmd, did); @@ -7043,16 +7832,16 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * * This routine is the completion callback function for the MBX_READ_LNK_STAT * mailbox command. This callback function is to actually send the Accept - * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It + * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It * collects the link statistics from the completion of the MBX_READ_LNK_STAT - * mailbox command, constructs the RPS response with the link statistics + * mailbox command, constructs the RLS response with the link statistics * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC - * response to the RPS. + * response to the RLS. * * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp * will be incremented by 1 for holding the ndlp and the reference to ndlp * will be stored into the context1 field of the IOCB for the completion - * callback function to the RPS Accept Response ELS IOCB command. + * callback function to the RLS Accept Response ELS IOCB command. * **/ static void @@ -7073,8 +7862,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); - pmb->ctx_buf = NULL; pmb->ctx_ndlp = NULL; + pmb->ctx_buf = NULL; if (mb->mbxStatus) { mempool_free(pmb, phba->mbox_mem_pool); @@ -7123,109 +7912,13 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_els_free_iocb(phba, elsiocb); } -/** - * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd - * @phba: pointer to lpfc hba data structure. - * @pmb: pointer to the driver internal queue element for mailbox command. - * - * This routine is the completion callback function for the MBX_READ_LNK_STAT - * mailbox command. This callback function is to actually send the Accept - * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It - * collects the link statistics from the completion of the MBX_READ_LNK_STAT - * mailbox command, constructs the RPS response with the link statistics - * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC - * response to the RPS. - * - * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp - * will be incremented by 1 for holding the ndlp and the reference to ndlp - * will be stored into the context1 field of the IOCB for the completion - * callback function to the RPS Accept Response ELS IOCB command. - * - **/ -static void -lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) -{ - MAILBOX_t *mb; - IOCB_t *icmd; - RPS_RSP *rps_rsp; - uint8_t *pcmd; - struct lpfc_iocbq *elsiocb; - struct lpfc_nodelist *ndlp; - uint16_t status; - uint16_t oxid; - uint16_t rxid; - uint32_t cmdsize; - - mb = &pmb->u.mb; - - ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; - rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); - oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); - pmb->ctx_ndlp = NULL; - pmb->ctx_buf = NULL; - - if (mb->mbxStatus) { - mempool_free(pmb, phba->mbox_mem_pool); - return; - } - - cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); - mempool_free(pmb, phba->mbox_mem_pool); - elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, - lpfc_max_els_tries, ndlp, - ndlp->nlp_DID, ELS_CMD_ACC); - - /* Decrement the ndlp reference count from previous mbox command */ - lpfc_nlp_put(ndlp); - - if (!elsiocb) - return; - - icmd = &elsiocb->iocb; - icmd->ulpContext = rxid; - icmd->unsli3.rcvsli3.ox_id = oxid; - - pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); - *((uint32_t *) (pcmd)) = ELS_CMD_ACC; - pcmd += sizeof(uint32_t); /* Skip past command */ - rps_rsp = (RPS_RSP *)pcmd; - - if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) - status = 0x10; - else - status = 0x8; - if (phba->pport->fc_flag & FC_FABRIC) - status |= 0x4; - - rps_rsp->rsvd1 = 0; - rps_rsp->portStatus = cpu_to_be16(status); - rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); - rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); - rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); - rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); - rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); - rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); - /* Xmit ELS RPS ACC response tag <ulpIoTag> */ - lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, - "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", - elsiocb->iotag, elsiocb->iocb.ulpContext, - ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_rpi); - elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - phba->fc_stat.elsXmitACC++; - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) - lpfc_els_free_iocb(phba, elsiocb); - return; -} - /** * lpfc_els_rcv_rls - Process an unsolicited rls iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. * - * This routine processes Read Port Status (RPL) IOCB received as an + * This routine processes Read Link Status (RLS) IOCB received as an * ELS unsolicited event. It first checks the remote port state. If the * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE * state, it invokes the lpfc_els_rsl_reject() routine to send the reject @@ -7247,7 +7940,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) - /* reject the unsolicited RPS request and done with it */ + /* reject the unsolicited RLS request and done with it */ goto reject_out; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); @@ -7295,7 +7988,7 @@ reject_out: * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp * will be incremented by 1 for holding the ndlp and the reference to ndlp * will be stored into the context1 field of the IOCB for the completion - * callback function to the RPS Accept Response ELS IOCB command. + * callback function to the RTV Accept Response ELS IOCB command. * * Return codes * 0 - Successfully processed rtv iocb (currently always return 0) @@ -7314,7 +8007,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) - /* reject the unsolicited RPS request and done with it */ + /* reject the unsolicited RTV request and done with it */ goto reject_out; cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); @@ -7367,84 +8060,7 @@ reject_out: return 0; } -/* lpfc_els_rcv_rps - Process an unsolicited rps iocb - * @vport: pointer to a host virtual N_Port data structure. - * @cmdiocb: pointer to lpfc command iocb data structure. - * @ndlp: pointer to a node-list data structure. - * - * This routine processes Read Port Status (RPS) IOCB received as an - * ELS unsolicited event. It first checks the remote port state. If the - * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE - * state, it invokes the lpfc_els_rsp_reject() routine to send the reject - * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command - * for reading the HBA link statistics. It is for the callback function, - * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command - * to actually sending out RPS Accept (ACC) response. - * - * Return codes - * 0 - Successfully processed rps iocb (currently always return 0) - **/ -static int -lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, - struct lpfc_nodelist *ndlp) -{ - struct lpfc_hba *phba = vport->phba; - uint32_t *lp; - uint8_t flag; - LPFC_MBOXQ_t *mbox; - struct lpfc_dmabuf *pcmd; - RPS *rps; - struct ls_rjt stat; - - if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && - (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) - /* reject the unsolicited RPS request and done with it */ - goto reject_out; - - pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; - lp = (uint32_t *) pcmd->virt; - flag = (be32_to_cpu(*lp++) & 0xf); - rps = (RPS *) lp; - - if ((flag == 0) || - ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || - ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, - sizeof(struct lpfc_name)) == 0))) { - - printk("Fix me....\n"); - dump_stack(); - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); - if (mbox) { - lpfc_read_lnk_stat(phba, mbox); - mbox->ctx_buf = (void *)((unsigned long) - ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | - cmdiocb->iocb.ulpContext)); /* rx_id */ - mbox->ctx_ndlp = lpfc_nlp_get(ndlp); - mbox->vport = vport; - mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; - if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) - != MBX_NOT_FINISHED) - /* Mbox completion will send ELS Response */ - return 0; - /* Decrement reference count used for the failed mbox - * command. - */ - lpfc_nlp_put(ndlp); - mempool_free(mbox, phba->mbox_mem_pool); - } - } - -reject_out: - /* issue rejection response */ - stat.un.b.lsRjtRsvd0 = 0; - stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; - stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; - stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); - return 0; -} - -/* lpfc_issue_els_rrq - Process an unsolicited rps iocb +/* lpfc_issue_els_rrq - Process an unsolicited rrq iocb * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. * @did: DID of the target. @@ -7525,10 +8141,7 @@ int lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) { struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, - rrq->nlp_DID); - if (!ndlp) - return 1; - + rrq->nlp_DID); if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) return lpfc_issue_els_rrq(rrq->vport, ndlp, rrq->nlp_DID, rrq); @@ -7842,6 +8455,137 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 0; } +#ifndef BUILD_BRCMFCOE +/** + * lpfc_els_rcv_auth - Process an unsolicited auth iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully processed echo iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_auth(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct ls_rjt stat; + + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) + /* reject the unsolicited auth request and be done with it */ + goto reject_out; + + /* Send back ACC */ + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + + return lpfc_auth_handle_cmd(vport, cmdiocb, ndlp); + +reject_out: + /* issue rejection response */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_PORT_LOGIN_REQ; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 0; +} +#endif + +/** + * lpfc_els_rcv_edc - Process an unsolicited EDC iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully processed echo iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_els_edc_req *edc_req; + struct cgn_det_desc *pcgndet; + struct link_flt_desc *plnkflt; + struct unknown_desc *punk; + uint8_t *payload; + uint32_t *ptr; + int sz, cnt; + + payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt; + /* Swap payload bytes back to cpu mode.*/ + lpfc_sli_bemem_bcopy(payload, payload, LPFC_BPL_SIZE); + + edc_req = (struct lpfc_els_edc_req *)payload; + sz = edc_req->desc_list_len; + + ptr = (uint32_t *)payload; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, + "3319 Rcv EDC payload len %d: x%x x%x x%x\n", sz, + *ptr, *(ptr + 1), *(ptr + 2)); + + /* No signal support unless there is a congestion descriptor */ + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + phba->cgn_sig_freq = RCV_SIG_FREQ_CYC_NO_SPT; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; + + if (sz <= sizeof(struct lpfc_els_edc_req)) + goto out; + + /* now point to the first descriptor */ + sz -= sizeof(struct lpfc_els_edc_req); + payload += sizeof(struct lpfc_els_edc_req); + punk = (struct unknown_desc *)payload; + + while (sz > 0) { + switch (punk->unk_tag) { + case CGN_DET_CAP_DESC_TAG: + pcgndet = (struct cgn_det_desc *)payload; + phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = phba->cgn_init_reg_signal; + + /* We start negotiation with lpfc_fabric_cgn_frequency. + * When we process the EDC, we will settle on the + * higher frequency. + */ + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + + /* process congestion descriptor */ + lpfc_least_capable_settings(phba, pcgndet); + + sz = 0; /* break from while loop */ + break; + case LINK_FLT_CAP_DESC_TAG: + plnkflt = (struct link_flt_desc *)payload; + /* No action for Link Fault descriptor for now */ + cnt = (2 * sizeof(u32) + plnkflt->desc_len); + sz -= cnt; + + /* goto the next descriptor, if any */ + payload += cnt; + punk = (struct unknown_desc *)payload; + break; + default: + /* A descriptor we are not interested in */ + cnt = (2 * sizeof(u32) + punk->desc_len); + sz -= cnt; + + /* goto the next descriptor, if any */ + payload += cnt; + punk = (struct unknown_desc *)payload; + break; + } + } +out: + /* Need to send back an ACC */ + lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); + + lpfc_config_cgn_signal(phba); + return 0; +} + /** * lpfc_els_timeout - Handler funciton to the els timer * @ptr: holder for the timer function associated data. @@ -7853,9 +8597,17 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. **/ void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_els_timeout(unsigned long ptr) +#else lpfc_els_timeout(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_vport *vport = (struct lpfc_vport *) ptr; +#else struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); +#endif struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; @@ -7901,19 +8653,13 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) if (unlikely(!pring)) return; - if ((phba->pport->load_flag & FC_UNLOADING)) + if (phba->pport->load_flag & FC_UNLOADING) return; + spin_lock_irq(&phba->hbalock); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); - if ((phba->pport->load_flag & FC_UNLOADING)) { - if (phba->sli_rev == LPFC_SLI_REV4) - spin_unlock(&pring->ring_lock); - spin_unlock_irq(&phba->hbalock); - return; - } - list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { cmd = &piocb->iocb; @@ -7959,7 +8705,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { cmd = &piocb->iocb; - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0127 ELS timeout Data: x%x x%x x%x " "x%x\n", els_command, remote_ID, cmd->ulpCommand, cmd->ulpIoTag); @@ -7969,6 +8715,9 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) spin_unlock_irq(&phba->hbalock); } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + if (!list_empty(&pring->txcmplq)) if (!(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&vport->els_tmofunc, @@ -8068,8 +8817,11 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) lpfc_sli_issue_abort_iotag(phba, pring, piocb); spin_unlock_irqrestore(&phba->hbalock, iflags); } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + if (!list_empty(&abort_list)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3387 abort list for txq not empty\n"); INIT_LIST_HEAD(&abort_list); @@ -8240,7 +8992,7 @@ lpfc_send_els_event(struct lpfc_vport *vport, if (*payload == ELS_CMD_LOGO) { logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); if (!logo_data) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0148 Failed to allocate memory " "for LOGO event\n"); return; @@ -8250,7 +9002,7 @@ lpfc_send_els_event(struct lpfc_vport *vport, els_data = kmalloc(sizeof(struct lpfc_els_event_header), GFP_KERNEL); if (!els_data) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0149 Failed to allocate memory " "for ELS event\n"); return; @@ -8298,6 +9050,407 @@ lpfc_send_els_event(struct lpfc_vport *vport, return; } +DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, + FC_LS_TLV_DTAG_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, + FC_FPIN_LI_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, + FC_FPIN_DELI_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, + FC_FPIN_CONGN_EVT_TYPES_INIT); + +DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_sev_nm, + fc_fpin_congn_sev_types, + FC_FPIN_CONGN_SEV_INIT); + +/** + * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port + * @phba: Pointer to phba object. + * @cnt: count of WWPNs in FPIN payload + * + * This routine is called by LI and PC descriptors. + * Limit the number of WWPNs displayed to 6 log messages, 6 per log message + */ +void +lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) +{ + char buf[LPFC_FPIN_WWPN_LINE_SZ]; + __be64 wwn; + u64 wwpn; + int i, len; + int line = 0; + int wcnt = 0; + bool endit = false; + + len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); + for (i = 0; i < cnt; i++) { + /* Are we on the last WWPN */ + if (i == (cnt - 1)) + endit = true; + + /* Extract the next WWPN from the payload */ + wwn = *wwnlist++; + wwpn = be64_to_cpu(wwn); + len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ, + " %016llx", wwpn); + + /* Log a message if we are on the last WWPN + * or if we hit the max allowed per message. + */ + wcnt++; + if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { + buf[len] = 0; + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4686 %s\n", buf); + + /* Check if we reached the last WWPN */ + if (endit) + return; + + /* Limit the number of log message displayed per FPIN */ + line++; + if (line == LPFC_FPIN_WWPN_NUM_LINE) { + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4687 %d WWPNs Truncated\n", + cnt - i - 1); + return; + } + + /* Start over with next log message */ + wcnt = 0; + len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, + "Additional WWPNs:"); + } + } +} + +/** + * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. + * @phba: Pointer to hba object. + * @tlv: Pointer to the Link Integrity Notification Descriptor. + * + * This function processes a Delivery FPIN event by + * logging a message. It retuns 1 to indicate deliver this message + * to the upper layer. + **/ +static void +lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; + const char *li_evt_str; + u32 li_evt, cnt; + + li_evt = be16_to_cpu(li->event_type); + li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); + cnt = be32_to_cpu(li->pname_count); + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4680 FPIN Link Integrity %s (x%x) " + "Detecting PN x%016llx Attached PN x%016llx " + "Duration %d mSecs Count %d Port Cnt %d\n", + li_evt_str, li_evt, + be64_to_cpu(li->detecting_wwpn), + be64_to_cpu(li->attached_wwpn), + be32_to_cpu(li->event_threshold), + be32_to_cpu(li->event_count), cnt); + + lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); +} + +/** + * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. + * @phba: Pointer to hba object. + * @tlv: Pointer to the Delivery Notification Descriptor TLV + * + * This function processes a link integrity FPIN event by + * logging a message. It retuns 1 to indicate deliver this message + * to the upper layer. + **/ +static void +lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; + const char *del_rsn_str; + u32 del_rsn; + __be32 *frame; + + del_rsn = be16_to_cpu(del->deli_reason_code); + del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); + + /* Skip over desc_tag/desc_len header to payload */ + frame = (__be32 *)(del + 1); + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "4681 FPIN Delivery %s (x%x) " + "Detecting PN x%016llx Attached PN x%016llx " + "DiscHdr0 x%08x " + "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " + "DiscHdr4 x%08x DiscHdr5 x%08x\n", + del_rsn_str, del_rsn, + be64_to_cpu(del->detecting_wwpn), + be64_to_cpu(del->attached_wwpn), + be32_to_cpu(frame[0]), + be32_to_cpu(frame[1]), + be32_to_cpu(frame[2]), + be32_to_cpu(frame[3]), + be32_to_cpu(frame[4]), + be32_to_cpu(frame[5])); +} + +/** + * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. + * @phba: Pointer to hba object. + * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV + * + * This function processes a Peer Congestion FPIN event by + * logging a message. It retuns 1 to indicate deliver this message + * to the upper layer. + **/ +static void +lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; + const char *pc_evt_str; + u32 pc_evt, cnt; + + pc_evt = be16_to_cpu(pc->event_type); + pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); + cnt = be32_to_cpu(pc->pname_count); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, + "4684 FPIN Peer Congestion %s (x%x) " + "Duration %d mSecs " + "Detecting PN x%016llx Attached PN x%016llx " + "Impacted Port Cnt %d\n", + pc_evt_str, pc_evt, + be32_to_cpu(pc->event_period), + be64_to_cpu(pc->detecting_wwpn), + be64_to_cpu(pc->attached_wwpn), + cnt); + + lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); +} + +/** + * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification + * @phba: Pointer to hba object. + * @tlv: Pointer to the Congestion Notification Descriptor TLV + * + * This function processes an FPIN Congestion Notifiction. The notification + * could be an Alarm or Warning. This routine feeds that data into driver's + * running congestion algorithm. It also processes the FPIN by + * logging a message. It retuns 1 to indicate deliver this message + * to the upper layer or 0 to indicate don't deliver it. + **/ +static int +lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) +{ + struct lpfc_cgn_info *cp; + struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; + const char *cgn_evt_str; + u32 cgn_evt; + const char *cgn_sev_str; + u32 cgn_sev; + uint16_t value; + u32 crc; + bool nm_log = false; + int rc = 1; + + cgn_evt = be16_to_cpu(cgn->event_type); + cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); + cgn_sev = cgn->severity; + cgn_sev_str = lpfc_get_fpin_congn_sev_nm(cgn_sev); + + /* The driver only takes action on a Credit Stall or Oversubscription + * event type to engage the IO algorithm. The driver prints an + * unmaskable message only for Lost Credit and Credit Stall. + * TODO: Still need to have definition of host action on clear, + * lost credit and device specific event types. + */ + switch (cgn_evt) { + case FPIN_CONGN_LOST_CREDIT: + nm_log = true; + break; + case FPIN_CONGN_CREDIT_STALL: + nm_log = true; + /* fall through */ + case FPIN_CONGN_OVERSUBSCRIPTION: + if (cgn_evt == FPIN_CGN_EVT_OVERSUB) + nm_log = false; + switch (cgn_sev) { + case FPIN_CONGN_SEV_ERROR: + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { + /* Track of alarm cnt for cgn_info */ + atomic_inc(&phba->cgn_fabric_alarm_cnt); + /* Track of alarm cnt for SYNC_WQE */ + atomic_inc(&phba->cgn_sync_alarm_cnt); + } + goto cleanup; + } + break; + case FPIN_CONGN_SEV_WARNING: + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { + /* Track of warning cnt for cgn_info */ + atomic_inc(&phba->cgn_fabric_warn_cnt); + /* Track of warning cnt for SYNC_WQE */ + atomic_inc(&phba->cgn_sync_warn_cnt); + } +cleanup: + /* Save frequency in ms */ + phba->cgn_fpin_frequency = + be32_to_cpu(cgn->event_period); + value = phba->cgn_fpin_frequency; + if (phba->cgn_i) { + cp = (struct lpfc_cgn_info *) + phba->cgn_i->virt; + if (phba->cgn_reg_fpin & + LPFC_CGN_FPIN_ALARM) + cp->cgn_alarm_freq = + cpu_to_le16(value); + if (phba->cgn_reg_fpin & + LPFC_CGN_FPIN_WARN) + cp->cgn_warn_freq = + cpu_to_le16(value); + crc = lpfc_cgn_calc_crc32 + (cp, + LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + } + + /* Don't deliver to upper layer since + * driver took action on this tlv. + */ + rc = 0; + } + break; + } + break; + } + + /* Change the log level to unmaskable for the following event types. */ + lpfc_printf_log(phba, (nm_log ? KERN_ERR : KERN_INFO), + LOG_CGN_MGMT | LOG_ELS, + "4683 FPIN CONGESTION %s type %s (x%x) Event " + "Duration %d mSecs\n", + cgn_sev_str, cgn_evt_str, cgn_evt, + be32_to_cpu(cgn->event_period)); + return rc; +} + +void +lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_els_fpin *fpin = (struct lpfc_els_fpin *)p; + struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; + const char *dtag_nm; + int desc_cnt = 0, bytes_remain, cnt; + u32 dtag, deliver = 0; + int len; + + /* FPINs handled only if we are in the right discovery state */ + if (vport->port_state < LPFC_DISC_AUTH) + return; + + /* make sure there is the full fpin header */ + if (fpin_length < sizeof(struct lpfc_els_fpin)) + return; + + /* Sanity check descriptor length. The desc_len does not include + * 1 word for the ELS command and 1 word for the desc_len itself. + */ + len = be32_to_cpu(fpin->desc_len); + if (fpin_length < len + (2 * sizeof(uint32_t))) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4671 Bad FPIN list length %d: %d\n", + len, fpin_length); + return; + } + + tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; + first_tlv = tlv; + bytes_remain = fpin_length - offsetof(struct lpfc_els_fpin, fpin_desc); + bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); + + /* process each descriptor seperately */ + while (bytes_remain >= FC_TLV_DESC_HDR_SZ && + bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { + dtag = be32_to_cpu(tlv->desc_tag); + switch (dtag) { + case ELS_DTAG_LNK_INTEGRITY: + lpfc_els_rcv_fpin_li(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_DELIVERY: + lpfc_els_rcv_fpin_del(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_PEER_CONGEST: + lpfc_els_rcv_fpin_peer_cgn(phba, tlv); + deliver = 1; + break; + case ELS_DTAG_CONGESTION: + deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); + break; + default: + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4678 unknown FPIN descriptor[%d]: " + "tag x%x (%s)\n", + desc_cnt, dtag, dtag_nm); + + /* If descriptor is bad, drop the rest of the data */ + return; + } + lpfc_cgn_update_stat(phba, dtag); + cnt = be32_to_cpu(tlv->desc_len); + + /* Sanity check descriptor length. The desc_len does not include + * 1 word for the desc_tag and 1 word for the desc_len itself. + */ + len -= (cnt + (2 * sizeof(uint32_t))); + if (len < 0) { + dtag_nm = lpfc_get_tlv_dtag_nm(dtag); + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "4672 Bad FPIN descriptor length " + "%d: %d %d %s\n", + cnt, len, fpin_length, dtag_nm); + return; + } + + current_tlv = tlv; + bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); + tlv = fc_tlv_next_desc(tlv); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 2, 0) || defined(BUILD_RHEL83) + /* Format payload such that the FPIN delivered to the + * upper layer is a single descriptor FPIN. + */ + if (desc_cnt) + memcpy(first_tlv, current_tlv, + (cnt + sizeof(struct lpfc_els_fpin))); + + /* Adjust the length so that it only reflects a + * single descriptor FPIN. + */ + fpin_length = cnt + sizeof(struct lpfc_els_fpin); + fpin->desc_len = cpu_to_be32(fpin_length); + fpin_length += sizeof(struct lpfc_els_fpin); /* entire FPIN */ + + /* Send every descriptor individually to the upper layer */ + if (deliver) + fc_host_fpin_rcv(lpfc_shost_from_vport(vport), + fpin_length, (char *)fpin); +#endif + desc_cnt++; + } +} /** * lpfc_els_unsol_buffer - Process an unsolicited event data buffer @@ -8320,7 +9473,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct Scsi_Host *shost; struct lpfc_nodelist *ndlp; struct ls_rjt stat; - uint32_t *payload; + uint32_t *payload, payload_len; uint32_t cmd, did, newnode; uint8_t rjt_exp, rjt_err = 0, init_link = 0; IOCB_t *icmd = &elsiocb->iocb; @@ -8331,9 +9484,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, newnode = 0; payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; + payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; cmd = *payload; - if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) + if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) { lpfc_post_buffer(phba, pring, 1); + if (icmd->ulpBdeCount == 2) + lpfc_post_buffer(phba, pring, 1); + } did = icmd->un.rcvels.remoteID; if (icmd->ulpStatus) { @@ -8401,7 +9558,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, elsiocb->context1 = lpfc_nlp_get(ndlp); elsiocb->vport = vport; - if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { + /* + * Since RSCN and AUTH commands define + * other bytes in 4-byte payload, lets mask those off + */ + if (((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) || + ((cmd & ELS_CMD_MASK) == ELS_CMD_AUTH)) { cmd &= ELS_CMD_MASK; } /* ELS command <elsCmd> received from NPORT <did> */ @@ -8621,16 +9783,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (newnode) lpfc_nlp_put(ndlp); break; - case ELS_CMD_RPS: - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RPS: did:x%x/ste:x%x flg:x%x", - did, vport->port_state, ndlp->nlp_flag); - - phba->fc_stat.elsRcvRPS++; - lpfc_els_rcv_rps(vport, elsiocb, ndlp); - if (newnode) - lpfc_nlp_put(ndlp); - break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RPL: did:x%x/ste:x%x flg:x%x", @@ -8685,13 +9837,33 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_INVALID_OX_RX; break; +#ifndef BUILD_BRCMFCOE + case ELS_CMD_AUTH: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV AUTH: did:x%x/ste:x%x " + "flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvAUTH++; + lpfc_els_rcv_auth(vport, elsiocb, ndlp); + if (newnode) + lpfc_nlp_put(ndlp); + break; +#endif case ELS_CMD_FPIN: - /* - * Received FPIN from fabric - pass it to the - * transport FPIN handler. - */ - fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len, - (char *)payload); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FPIN: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvFPIN++; + + lpfc_els_rcv_fpin(vport, payload, payload_len); + + /* There are no replies, so no rjt codes */ + break; + case ELS_CMD_EDC: + phba->fc_stat.elsRcvEDC++; + lpfc_els_rcv_edc(vport, elsiocb, ndlp); break; default: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -8703,7 +9875,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; /* Unknown ELS command <elsCmd> received from NPORT <did> */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0115 Unknown ELS command x%x " "received from NPORT x%x\n", cmd, did); if (newnode) @@ -8748,7 +9920,7 @@ lsrjt: dropit: if (vport && !(vport->load_flag & FC_UNLOADING)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0111 Dropping received ELS cmd " "Data: x%x x%x x%x\n", icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); @@ -8814,37 +9986,39 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, */ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { elsiocb->context2 = bdeBuf1; + if (icmd->ulpBdeCount == 2) + elsiocb->context3 = bdeBuf2; } else { paddr = getPaddr(icmd->un.cont64[0].addrHigh, icmd->un.cont64[0].addrLow); elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, paddr); + if (icmd->ulpBdeCount == 2) { + paddr = getPaddr(icmd->un.cont64[1].addrHigh, + icmd->un.cont64[1].addrLow); + elsiocb->context3 = lpfc_sli_ringpostbuf_get(phba, + pring, + paddr); + } } lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); /* * The different unsolicited event handlers would tell us - * if they are done with "mp" by setting context2 to NULL. + * if they are done with "mp" by setting context2/context3 to NULL. */ if (elsiocb->context2) { lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); elsiocb->context2 = NULL; } - /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ - if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && - icmd->ulpBdeCount == 2) { - elsiocb->context2 = bdeBuf2; - lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); - /* free mp if we are done with it */ - if (elsiocb->context2) { - lpfc_in_buf_free(phba, elsiocb->context2); - elsiocb->context2 = NULL; - } + if (elsiocb->context3) { + lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context3); + elsiocb->context3 = NULL; } } -static void +void lpfc_start_fdmi(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; @@ -8898,7 +10072,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3334 Delay fc port discovery for %d seconds\n", phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, @@ -8916,7 +10090,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0251 NameServer login: no memory\n"); return; } @@ -8928,7 +10102,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0348 NameServer login: node freed\n"); return; } @@ -8939,7 +10113,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0252 Cannot issue NameServer login\n"); return; } @@ -8976,7 +10150,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_unlock_irq(shost->host_lock); if (mb->mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0915 Register VPI failed : Status: x%x" " upd bit: x%x \n", mb->mbxStatus, mb->un.varRegVpi.upd); @@ -9006,8 +10180,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_vlog(vport, - KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "2732 Failed to issue INIT_VPI" " mailbox command\n"); } else { @@ -9044,6 +10218,21 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); else { +#ifndef BUILD_BRCMFCOE + ndlp = lpfc_findnode_did(vport, Fabric_DID); + /* check for authentication requirement */ + rc = lpfc_auth_start(vport, ndlp); + if (rc) { + /* + * If the physical port is + * instantiated using FDISC, + * do not start vport discovery. + */ + if (vport->port_state != LPFC_FDISC) + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } +#else /* * If the physical port is instantiated using * FDISC, do not start vport discovery. @@ -9051,6 +10240,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (vport->port_state != LPFC_FDISC) lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); +#endif } } else lpfc_do_scr_ns_plogi(phba, vport); @@ -9095,12 +10285,12 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, lpfc_nlp_put(ndlp); mempool_free(mbox, phba->mbox_mem_pool); - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0253 Register VPI: Can't send mbox\n"); goto mbox_err_exit; } } else { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0254 Register VPI: no memory\n"); goto mbox_err_exit; } @@ -9262,7 +10452,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; /* FDISC failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0126 FDISC failed. (x%x/x%x)\n", irsp->ulpStatus, irsp->un.ulpWord[4]); goto fdisc_failed; @@ -9384,7 +10574,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ELS_CMD_FDISC); if (!elsiocb) { lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0255 Issue FDISC: no IOCB\n"); return 1; } @@ -9438,7 +10628,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0256 Issue FDISC: Cannot send IOCB\n"); return 1; } @@ -9568,9 +10758,17 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * posted event WORKER_FABRIC_BLOCK_TMO. **/ void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_fabric_block_timeout(unsigned long ptr) +#else lpfc_fabric_block_timeout(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; +#else struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); +#endif unsigned long iflags; uint32_t tmo_posted; @@ -9703,7 +10901,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, { struct ls_rjt stat; - BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); + if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) + BUG(); switch (rspiocb->iocb.ulpStatus) { case IOSTAT_NPORT_RJT: @@ -9970,7 +11169,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, rxid, 1); /* Check if TXQ queue needs to be serviced */ - if (pring && !list_empty(&pring->txq)) + if (pring && (!(list_empty(&pring->txq)))) lpfc_worker_wake_up(phba); return; } @@ -10019,8 +11218,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, "rport in state 0x%x\n", ndlp->nlp_state); return; } - lpfc_printf_log(phba, KERN_ERR, - LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " "flags 0x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index ee70d14e7a9d..070b9c7e1c0e 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -27,15 +27,18 @@ #include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> -#include <linux/lockdep.h> +#include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> - +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme-fc-driver.h> +#endif +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -50,6 +53,9 @@ #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" +#ifndef BUILD_BRCMFCOE +#include "lpfc_auth.h" +#endif /* AlpaArray for assignment of scsid for scan-down and bind_method */ static uint8_t lpfcAlpaArray[] = { @@ -72,6 +78,7 @@ static void lpfc_disc_timeout_handler(struct lpfc_vport *); static void lpfc_disc_flush_list(struct lpfc_vport *vport); static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); static int lpfc_fcf_inuse(struct lpfc_hba *); +static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_terminate_rport_io(struct fc_rport *rport) @@ -156,17 +163,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, - "6789 rport name %llx != node port name %llx", - rport->port_name, - wwn_to_u64(ndlp->nlp_portname.u.wwn)); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6789 rport name %llx != node port name %llx", + rport->port_name, + wwn_to_u64(ndlp->nlp_portname.u.wwn)); evtp = &ndlp->dev_loss_evt; if (!list_empty(&evtp->evt_listp)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, - "6790 rport name %llx dev_loss_evt pending", - rport->port_name); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6790 rport name %llx dev_loss_evt pending", + rport->port_name); return; } @@ -296,7 +303,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) } if (warn_on) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x\n", @@ -305,7 +312,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } else { - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " "NPort x%06x Data: x%x x%x x%x\n", @@ -518,6 +525,9 @@ lpfc_work_list_done(struct lpfc_hba *phba) { struct lpfc_work_evt *evtp = NULL; struct lpfc_nodelist *ndlp; +#ifndef BUILD_BRCMFCOE + struct lpfc_vport *vport = phba->pport; +#endif int free_evt; int fcf_inuse; uint32_t nlp_did; @@ -552,6 +562,26 @@ lpfc_work_list_done(struct lpfc_hba *phba) fcf_inuse, nlp_did); break; +#ifndef BUILD_BRCMFCOE + case LPFC_EVT_REAUTH: + ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); + lpfc_auth_timeout_handler(vport, ndlp); + free_evt = 0; + /* decrement the node reference count held for + * this queued work + */ + lpfc_nlp_put(ndlp); + break; +#endif + case LPFC_EVT_RECOVER_PORT: + ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); + lpfc_sli_abts_recover_port(ndlp->vport, ndlp); + free_evt = 0; + /* decrement the node reference count held for + * this queued work + */ + lpfc_nlp_put(ndlp); + break; case LPFC_EVT_ONLINE: if (phba->link_state < LPFC_LINK_DOWN) *(int *) (evtp->evt_arg1) = lpfc_online(phba); @@ -756,7 +786,7 @@ lpfc_do_work(void *p) || kthread_should_stop())); /* Signal wakeup shall terminate the worker thread */ if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0433 Wakeup on signal: rc=x%x\n", rc); break; } @@ -824,6 +854,13 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) if ((phba->sli_rev < LPFC_SLI_REV4) && (!remove && ndlp->nlp_type & NLP_FABRIC)) continue; +#ifndef BUILD_BRCMFCOE + else if ((phba->sli_rev == LPFC_SLI_REV4) && + (ndlp->nlp_type & NLP_FABRIC) && + (ndlp->dhc_cfg.state == LPFC_AUTH_SUCCESS)) + ndlp->dhc_cfg.state = LPFC_AUTH_UNKNOWN; +#endif + lpfc_disc_state_machine(vport, ndlp, NULL, remove ? NLP_EVT_DEVICE_RM @@ -842,6 +879,7 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) void lpfc_port_link_failure(struct lpfc_vport *vport) { + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); /* Cleanup any outstanding received buffers */ @@ -1021,7 +1059,8 @@ lpfc_linkup_port(struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | - FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); + FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY | + FC_CT_CGN_RPA); vport->fc_flag |= FC_NDISC_ACTIVE; vport->fc_ns_retry = 0; spin_unlock_irq(shost->host_lock); @@ -1087,7 +1126,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Check for error */ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0320 CLEAR_LA mbxStatus error x%x hba " "state x%x\n", mb->mbxStatus, vport->port_state); @@ -1133,12 +1172,13 @@ out: return; } - void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - uint8_t bbscn = 0; + LPFC_MBOXQ_t *sparam_mb; + struct lpfc_dmabuf *sparam_mp; + int rc; if (pmb->u.mb.mbxStatus) goto out; @@ -1163,31 +1203,59 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } /* Start discovery by sending a FLOGI. port_state is identically - * LPFC_FLOGI while waiting for FLOGI cmpl + * LPFC_FLOGI while waiting for FLOGI cmpl. */ if (vport->port_state != LPFC_FLOGI) { - if (phba->bbcredit_support && phba->cfg_enable_bbcr) { - bbscn = bf_get(lpfc_bbscn_def, - &phba->sli4_hba.bbscn_params); - vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; - vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4); + /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if + * bb-credit recovery is in place. + */ + if (phba->bbcredit_support && phba->cfg_enable_bbcr && + !(phba->link_flag & LS_LOOPBACK_MODE)) { + sparam_mb = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!sparam_mb) + goto sparam_out; + + rc = lpfc_read_sparam(phba, sparam_mb, 0); + if (rc) { + mempool_free(sparam_mb, phba->mbox_mem_pool); + goto sparam_out; + } + sparam_mb->vport = vport; + sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; + rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + sparam_mp = (struct lpfc_dmabuf *) + sparam_mb->ctx_buf; + lpfc_mbuf_free(phba, sparam_mp->virt, + sparam_mp->phys); + kfree(sparam_mp); + sparam_mb->ctx_buf = NULL; + mempool_free(sparam_mb, phba->mbox_mem_pool); + goto sparam_out; + } + + phba->hba_flag |= HBA_DEFER_FLOGI; + } else { + lpfc_initial_flogi(vport); } - lpfc_initial_flogi(vport); - } else if (vport->fc_flag & FC_PT2PT) { - lpfc_disc_start(vport); + } else { + if (vport->fc_flag & FC_PT2PT) + lpfc_disc_start(vport); } return; out: - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", pmb->u.mb.mbxStatus, vport->port_state); +sparam_out: mempool_free(pmb, phba->mbox_mem_pool); lpfc_linkdown(phba); - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0200 CONFIG_LINK bad hba state x%x\n", vport->port_state); @@ -1223,10 +1291,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "2017 REG_FCFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2017 REG_FCFI mbxStatus error x%x " + "HBA state x%x\n", mboxq->u.mb.mbxStatus, + vport->port_state); goto fail_out; } @@ -1375,8 +1443,6 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, { struct lpfc_fcf_pri *fcf_pri; - lockdep_assert_held(&phba->hbalock); - fcf_pri = &phba->fcf.fcf_pri[fcf_index]; fcf_pri->fcf_rec.fcf_index = fcf_index; /* FCF record priority */ @@ -1461,8 +1527,6 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record, uint32_t addr_mode, uint16_t vlan_id, uint32_t flag) { - lockdep_assert_held(&phba->hbalock); - /* Copy the fields from the HBA's FCF record */ lpfc_copy_fcf_record(fcf_rec, new_fcf_record); /* Update other fields of driver FCF record */ @@ -1799,7 +1863,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) * use through a sequence of @fcf_cnt eligible FCF records with equal * probability. To perform integer manunipulation of random numbers with * size unit32_t, the lower 16 bits of the 32-bit random number returned - * from prandom_u32() are taken as the random random number generated. + * from random32() are taken as the random random number generated. * * Returns true when outcome is for the newly read FCF record should be * chosen; otherwise, return false when outcome is for keeping the previously @@ -1811,7 +1875,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) uint32_t rand_num; /* Get 16-bit uniform random number */ - rand_num = 0xFFFF & prandom_u32(); + rand_num = (0xFFFF & prandom_u32()); /* Decision with probability 1/fcf_cnt */ if ((fcf_cnt * rand_num) < 0xFFFF) @@ -1849,7 +1913,7 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); if (unlikely(!mboxq->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2524 Failed to get the non-embedded SGE " "virtual address\n"); return NULL; @@ -1865,11 +1929,12 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, if (shdr_status || shdr_add_status) { if (shdr_status == STATUS_FCF_TABLE_EMPTY || if_type == LPFC_SLI_INTF_IF_TYPE_2) - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "2726 READ_FCF_RECORD Indicates empty " "FCF table.\n"); else - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2521 READ_FCF_RECORD mailbox failed " "with status x%x add_status x%x, " "mbx\n", shdr_status, shdr_add_status); @@ -2247,7 +2312,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2765 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); /* Let next new FCF event trigger fast failover */ @@ -2291,7 +2356,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) new_fcf_record, LPFC_FCOE_IGNORE_VID)) { if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != phba->fcf.current_rec.fcf_indx) { - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "2862 FCF (x%x) matches property " "of in-use FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, @@ -2361,7 +2427,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->pport->fc_flag); goto out; } else - lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2863 New FCF (x%x) matches " "property of in-use FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, @@ -2775,10 +2841,9 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0) && mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_MBOX, - "2891 Init VFI mailbox failed 0x%x\n", - mboxq->u.mb.mbxStatus); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2891 Init VFI mailbox failed 0x%x\n", + mboxq->u.mb.mbxStatus); mempool_free(mboxq, phba->mbox_mem_pool); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; @@ -2806,7 +2871,7 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport) mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_vlog(vport, KERN_ERR, - LOG_MBOX, "2892 Failed to allocate " + LOG_TRACE_EVENT, "2892 Failed to allocate " "init_vfi mailbox\n"); return; } @@ -2814,8 +2879,8 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport) mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n"); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2893 Failed to issue init_vfi mailbox\n"); mempool_free(mboxq, vport->phba->mbox_mem_pool); } } @@ -2835,10 +2900,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (mboxq->u.mb.mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_MBOX, - "2609 Init VPI mailbox failed 0x%x\n", - mboxq->u.mb.mbxStatus); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2609 Init VPI mailbox failed 0x%x\n", + mboxq->u.mb.mbxStatus); mempool_free(mboxq, phba->mbox_mem_pool); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; @@ -2852,7 +2916,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) lpfc_printf_vlog(vport, KERN_ERR, - LOG_DISCOVERY, + LOG_TRACE_EVENT, "2731 Cannot find fabric " "controller node\n"); else @@ -2865,7 +2929,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_initial_fdisc(vport); else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2606 No NPIV Fabric support\n"); } mempool_free(mboxq, phba->mbox_mem_pool); @@ -2888,8 +2952,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport) if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { vpi = lpfc_alloc_vpi(vport->phba); if (!vpi) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3303 Failed to obtain vport vpi\n"); lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; @@ -2900,7 +2963,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport) mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { lpfc_printf_vlog(vport, KERN_ERR, - LOG_MBOX, "2607 Failed to allocate " + LOG_TRACE_EVENT, "2607 Failed to allocate " "init_vpi mailbox\n"); return; } @@ -2909,8 +2972,8 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport) mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n"); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2608 Failed to issue init_vpi mailbox\n"); mempool_free(mboxq, vport->phba->mbox_mem_pool); } } @@ -2954,7 +3017,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba) lpfc_vport_set_state(vports[i], FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vports[i], KERN_ERR, - LOG_ELS, + LOG_TRACE_EVENT, "0259 No NPIV " "Fabric support\n"); } @@ -2969,6 +3032,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf; struct lpfc_vport *vport = mboxq->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); +#ifndef BUILD_BRCMFCOE + struct lpfc_nodelist *ndlp; + int rc; +#endif /* * VFI not supported for interface type 0, so ignore any mailbox @@ -2978,10 +3045,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0) && mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "2018 REG_VFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2018 REG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); @@ -3041,6 +3108,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) else lpfc_disc_start(vport); } else { +#ifndef BUILD_BRCMFCOE + ndlp = lpfc_findnode_did(vport, Fabric_DID); + rc = lpfc_auth_start(vport, ndlp); + if (!rc) + goto out_free_mem; + +#endif lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } @@ -3068,7 +3142,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Check for error */ if (mb->mbxStatus) { /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0319 READ_SPARAM mbxStatus error x%x " "hba state x%x>\n", mb->mbxStatus, vport->port_state); @@ -3076,8 +3150,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto out; } - memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, - sizeof (struct serv_parm)); + memcpy((uint8_t *)sp, (uint8_t *)mp->virt, sizeof(struct serv_parm)); ed_tov = be32_to_cpu(sp->cmn.e_d_tov); if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ @@ -3100,6 +3173,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); + + /* Check if sending the FLOGI is being deferred to after we get + * up to date CSPs from MBX_READ_SPARAM. + */ + if (phba->hba_flag & HBA_DEFER_FLOGI) { + lpfc_initial_flogi(vport); + phba->hba_flag &= ~HBA_DEFER_FLOGI; + } return; out: @@ -3230,6 +3311,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) } lpfc_linkup(phba); + sparam_mbox = NULL; + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!sparam_mbox) goto out; @@ -3275,7 +3358,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) GFP_KERNEL); if (unlikely(!fcf_record)) { lpfc_printf_log(phba, KERN_ERR, - LOG_MBOX | LOG_SLI, + LOG_TRACE_EVENT, "2554 Could not allocate memory for " "fcf record\n"); rc = -ENODEV; @@ -3287,7 +3370,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) rc = lpfc_sli4_add_fcf_record(phba, fcf_record); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, - LOG_MBOX | LOG_SLI, + LOG_TRACE_EVENT, "2013 Could not manually add FCF " "record 0, status %d\n", rc); rc = -ENODEV; @@ -3322,10 +3405,14 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) lpfc_sli4_clear_fcf_rr_bmask(phba); } + /* Prepare for LINK up registrations */ + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n", vport->port_state, sparam_mbox, cfglink_mbox); lpfc_issue_clear_la(phba, vport); @@ -3447,8 +3534,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) phba->wait_4_mlo_maint_flg); } lpfc_mbx_process_link_up(phba, la); - } else if (attn_type == LPFC_ATT_LINK_DOWN || - attn_type == LPFC_ATT_UNEXP_WWPN) { + + if (phba->cmf_active_mode != LPFC_CFG_OFF) + lpfc_cmf_signal_init(phba); + + } else if ((attn_type == LPFC_ATT_LINK_DOWN) || + (attn_type == LPFC_ATT_UNEXP_WWPN)) { phba->fc_stat.LinkDown++; if (phba->link_flag & LS_LOOPBACK_MODE) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, @@ -3459,8 +3550,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) phba->pport->port_state, vport->fc_flag); else if (attn_type == LPFC_ATT_UNEXP_WWPN) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, - "1313 Link Down UNEXP WWPN Event x%x received " - "Data: x%x x%x x%x x%x x%x\n", + "1313 Link Down Unexpected FA WWPN Event x%x " + "received Data: x%x x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag, bf_get(lpfc_mbx_read_top_mm, la), @@ -3475,8 +3566,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) bf_get(lpfc_mbx_read_top_fa, la)); lpfc_mbx_issue_link_down(phba); } - if (phba->sli.sli_flag & LPFC_MENLO_MAINT && - attn_type == LPFC_ATT_LINK_UP) { + if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) && + (attn_type == LPFC_ATT_LINK_UP)) { if (phba->link_state != LPFC_LINK_DOWN) { phba->fc_stat.LinkDown++; lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, @@ -3598,7 +3689,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) break; /* If VPI is busy, reset the HBA */ case 0x9700: - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", vport->vpi, mb->mbxStatus); if (!(phba->pport->load_flag & FC_UNLOADING)) @@ -3636,7 +3727,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1800 Could not issue unreg_vpi\n"); mempool_free(mbox, phba->mbox_mem_pool); vport->unreg_vpi_cmpl = VPORT_ERROR; @@ -3723,7 +3814,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba) pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0542 lpfc_create_static_vport failed to" " allocate mailbox memory\n"); return; @@ -3733,7 +3824,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba) vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); if (!vport_info) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0543 lpfc_create_static_vport failed to" " allocate vport_info\n"); mempool_free(pmb, phba->mbox_mem_pool); @@ -3794,11 +3885,12 @@ lpfc_create_static_vport(struct lpfc_hba *phba) if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) != VPORT_INFO_REV)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0545 lpfc_create_static_vport bad" - " information header 0x%x 0x%x\n", - le32_to_cpu(vport_info->signature), - le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0545 lpfc_create_static_vport bad" + " information header 0x%x 0x%x\n", + le32_to_cpu(vport_info->signature), + le32_to_cpu(vport_info->rev) & + VPORT_INFO_REV_MASK); goto out; } @@ -3862,7 +3954,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->ctx_buf = NULL; if (mb->mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0258 Register Fabric login error: 0x%x\n", mb->mbxStatus); lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -3921,11 +4013,18 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } - /* - * This routine will issue a GID_FT for each FC4 Type supported - * by the driver. ALL GID_FTs must complete before discovery is started. - */ -int +/** + * lpfc_issue_gidft - issue a GID_FT for each FC4 Type supported by the driver + * @vport: The virtual port for which this call is being executed. + * + * This routine will issue a GID_FT for each FC4 Type supported by the driver. + * ALL GID_FTs must complete before discovery is started. + * + * Return value : + * 0 - Failure to issue a GID_FT + * n - Count of GID_FTs issued outstanding + **/ +uint32_t lpfc_issue_gidft(struct lpfc_vport *vport) { /* Good status, issue CT Request to NameServer */ @@ -3935,7 +4034,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport) /* Cannot issue NameServer FCP Query, so finish up * discovery */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0604 %s FC TYPE %x %s\n", "Failed to issue GID_FT to ", FC_TYPE_FCP, @@ -3951,7 +4051,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport) /* Cannot issue NameServer NVME Query, so finish up * discovery */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0605 %s FC_TYPE %x %s %d\n", "Failed to issue GID_FT to ", FC_TYPE_NVME, @@ -3975,7 +4076,7 @@ lpfc_issue_gidft(struct lpfc_vport *vport) * 0 - Failure to issue a GID_PT * 1 - GID_PT issued **/ -int +uint32_t lpfc_issue_gidpt(struct lpfc_vport *vport) { /* Good status, issue CT Request to NameServer */ @@ -3983,7 +4084,7 @@ lpfc_issue_gidpt(struct lpfc_vport *vport) /* Cannot issue NameServer FCP Query, so finish up * discovery */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0606 %s Port TYPE %x %s\n", "Failed to issue GID_PT to ", GID_PT_N_PORT, @@ -4007,13 +4108,14 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; struct lpfc_vport *vport = pmb->vport; + int rc; pmb->ctx_buf = NULL; pmb->ctx_ndlp = NULL; vport->gidft_inp = 0; if (mb->mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0260 Register NameServer error: 0x%x\n", mb->mbxStatus); @@ -4049,7 +4151,7 @@ out: ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), @@ -4072,7 +4174,25 @@ out: FC_TYPE_NVME); /* Issue SCR just before NameServer GID_FT Query */ - lpfc_issue_els_scr(vport, SCR_DID, 0); + lpfc_issue_els_scr(vport, 0); + + /* Link was bounced or a Fabric LOGO occurred. Start EDC + * with initial FW values provided the congestion mode is + * not off. Note that signals may or may not be supported + * by the adapter but FPIN is provided by default for 1 + * or both missing signals support. + */ + if (phba->cmf_active_mode != LPFC_CFG_OFF) { + phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = phba->cgn_init_reg_signal; + rc = lpfc_issue_els_edc(vport, 0); + lpfc_printf_log(phba, KERN_INFO, + LOG_INIT | LOG_ELS | LOG_DISCOVERY, + "4220 EDC issue error x%x, Data: x%x\n", + rc, phba->cgn_init_reg_signal); + } else { + lpfc_issue_els_rdf(vport, 0); + } } vport->fc_ns_retry = 0; @@ -4141,6 +4261,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (vport->load_flag & FC_UNLOADING) return; + if (ndlp->nlp_flag & NLP_EXTERNAL_DIF) + lpfc_edsm_validate_target(vport, ndlp); + ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport || !get_device(&rport->dev)) { dev_printk(KERN_WARNING, &phba->pcidev->dev, @@ -4155,15 +4278,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) rdata->pnode = lpfc_nlp_get(ndlp); if (ndlp->nlp_type & NLP_FCP_TARGET) - rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; + rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (ndlp->nlp_type & NLP_FCP_INITIATOR) - rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; - if (ndlp->nlp_type & NLP_NVME_INITIATOR) - rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; - if (ndlp->nlp_type & NLP_NVME_TARGET) - rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; - if (ndlp->nlp_type & NLP_NVME_DISCOVERY) - rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; + rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); @@ -4176,6 +4293,32 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } + + lpfc_throttle_vlog(ndlp->vport, &ndlp->log, LOG_NODE, + "3237 SCSI ID %d " + " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " + " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", + ndlp->nlp_sid, + ndlp->nlp_nodename.u.wwn[0], + ndlp->nlp_nodename.u.wwn[1], + ndlp->nlp_nodename.u.wwn[2], + ndlp->nlp_nodename.u.wwn[3], + ndlp->nlp_nodename.u.wwn[4], + ndlp->nlp_nodename.u.wwn[5], + ndlp->nlp_nodename.u.wwn[6], + ndlp->nlp_nodename.u.wwn[7], + ndlp->nlp_portname.u.wwn[0], + ndlp->nlp_portname.u.wwn[1], + ndlp->nlp_portname.u.wwn[2], + ndlp->nlp_portname.u.wwn[3], + ndlp->nlp_portname.u.wwn[4], + ndlp->nlp_portname.u.wwn[5], + ndlp->nlp_portname.u.wwn[6], + ndlp->nlp_portname.u.wwn[7]); + +#ifndef NO_APEX + lpfc_set_ndlps_fcp_priority(ndlp); +#endif return; } @@ -4184,6 +4327,8 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; struct lpfc_vport *vport = ndlp->vport; + struct lpfc_external_dif_support *dp; + unsigned long flags; if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) return; @@ -4198,6 +4343,22 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) fc_remote_port_delete(rport); + /* + * Cleanup all External DIF paths that match this ndlp. + * They will need to be revalidated if the NPort comes back. + */ + if (ndlp->nlp_flag & NLP_EXTERNAL_DIF) { + + spin_lock_irqsave(&vport->external_dif_lock, flags); + list_for_each_entry(dp, &vport->external_dif_list, listentry) { + if (memcmp(&ndlp->nlp_portname, + (uint8_t *)&dp->portName, + sizeof(struct lpfc_name)) == 0) + lpfc_edsm_set_state(vport, dp, + LPFC_EDSM_NEEDS_INQUIRY); + } + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + } return; } @@ -4258,6 +4419,10 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || old_state == NLP_STE_UNMAPPED_NODE)) { + /* SCSI-FCP first burst needs to be renegotiated on the + * next successful PRLI completion. + */ + ndlp->first_burst = 0; if (ndlp->rport) { vport->phba->nport_event_cnt++; lpfc_unregister_remote_port(ndlp); @@ -4323,7 +4488,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, GFP_KERNEL); if (!ndlp->lat_data) - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0286 lpfc_nlp_state_cleanup failed to " "allocate statistical data buffer DID " "0x%x\n", ndlp->nlp_DID); @@ -4377,8 +4542,8 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, char name1[16], name2[16]; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0904 NPort state transition x%06x, %s -> %s\n", - ndlp->nlp_DID, + "0904 NPort state transition %p x%06x x%x: %s -> %s\n", + ndlp, ndlp->nlp_DID, ndlp->nlp_flag, lpfc_nlp_state_name(name1, sizeof(name1), old_state), lpfc_nlp_state_name(name2, sizeof(name2), state)); @@ -4462,7 +4627,25 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, + (unsigned long)ndlp); +#else timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); +#endif + +#ifndef BUILD_BRCMFCOE + INIT_LIST_HEAD(&ndlp->reauth_evt.evt_listp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + setup_timer(&ndlp->dhc_cfg.nlp_reauthfunc, lpfc_auth_reauth, + (unsigned long)ndlp); +#else + timer_setup(&ndlp->dhc_cfg.nlp_reauthfunc, lpfc_auth_reauth, + 0); +#endif +#endif + INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp); + ndlp->nlp_DID = did; ndlp->vport = vport; ndlp->phba = vport->phba; @@ -4531,7 +4714,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* First preserve the orginal DID, xri_bitmap and some flags */ did = ndlp->nlp_DID; - flag = (ndlp->nlp_flag & NLP_UNREG_INP); + flag = (ndlp->nlp_flag & (NLP_EXTERNAL_DIF | NLP_UNREG_INP)); if (flag & NLP_UNREG_INP) defer_did = ndlp->nlp_defer_did; if (phba->sli_rev == LPFC_SLI_REV4) @@ -4578,8 +4761,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp; free_rpi: - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli4_free_rpi(vport->phba, rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + } return NULL; } @@ -4838,6 +5023,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (ndlp->nlp_flag & NLP_RELEASE_RPI) { lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } ndlp->nlp_flag &= ~NLP_UNREG_INP; } @@ -4901,7 +5087,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->nlp_flag & NLP_RPI_REGISTERED || ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " "unregistered nlp_flag x%x " "did x%x\n", @@ -4912,7 +5099,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * no need to queue up another one. */ if (ndlp->nlp_flag & NLP_UNREG_INP) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " "NPort x%x deferred x%x flg x%x " "Data: x%px\n", @@ -4942,7 +5130,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (!(vport->fc_flag & FC_OFFLINE_MODE))) ndlp->nlp_flag |= NLP_UNREG_INP; - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " "NPort x%x deferred flg x%x " "Data:x%px\n", @@ -4954,6 +5143,29 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; } + } else { + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "1444 Failed to allocate mempool " + "unreg_rpi UNREG x%x, " + "DID x%x, flag x%x, " + "ndlp x%px\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp); + + /* Because mempool_alloc failed, we + * will issue a LOGO here and keep the rpi alive if + * not unloading. + */ + if (!(vport->load_flag & FC_UNLOADING)) { + ndlp->nlp_flag &= ~NLP_UNREG_INP; + lpfc_issue_els_logo(vport, ndlp, 0); + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); + } + + return 1; } lpfc_no_rpi(phba, ndlp); out: @@ -4986,8 +5198,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (!vports) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2884 Vport array allocation failed \n"); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2884 Vport array allocation failed \n"); return; } for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { @@ -5030,9 +5242,10 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, - "1836 Could not issue " - "unreg_login(all_rpis) status %d\n", rc); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1836 Could not issue " + "unreg_login(all_rpis) status %d\n", + rc); } } @@ -5059,7 +5272,7 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport) mempool_free(mbox, phba->mbox_mem_pool); if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1815 Could not issue " "unreg_did (default rpis) status %d\n", rc); @@ -5156,6 +5369,13 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); + +#ifndef BUILD_BRCMFCOE + del_timer_sync(&ndlp->dhc_cfg.nlp_reauthfunc); + list_del_init(&ndlp->reauth_evt.evt_listp); +#endif + list_del_init(&ndlp->recovery_evt.evt_listp); + lpfc_cleanup_vports_rrqs(vport, ndlp); if (phba->sli_rev == LPFC_SLI_REV4) ndlp->nlp_flag |= NLP_RELEASE_RPI; @@ -5194,12 +5414,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if ((ndlp->nlp_flag & NLP_DEFER_RM) && !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && !(ndlp->nlp_flag & NLP_RPI_REGISTERED) && - phba->sli_rev != LPFC_SLI_REV4) { + (phba->sli_rev != LPFC_SLI_REV4)) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n", + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x " + "ref %d map:x%x ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp); @@ -5236,8 +5458,9 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, "0940 removed node x%px DID x%x " - " rport not null x%px\n", - ndlp, ndlp->nlp_DID, ndlp->rport); + "rpi %d rport not null x%px\n", + ndlp, ndlp->nlp_DID, ndlp->nlp_rpi, + ndlp->rport); rport = ndlp->rport; rdata = rport->dd_data; rdata->pnode = NULL; @@ -5330,19 +5553,6 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) return NULL; } -struct lpfc_nodelist * -lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) -{ - struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - struct lpfc_nodelist *ndlp; - unsigned long iflags; - - spin_lock_irqsave(shost->host_lock, iflags); - ndlp = __lpfc_findnode_did(vport, did); - spin_unlock_irqrestore(shost->host_lock, iflags); - return ndlp; -} - struct lpfc_nodelist * lpfc_findnode_mapped(struct lpfc_vport *vport) { @@ -5378,6 +5588,19 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) return NULL; } +struct lpfc_nodelist * +lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + unsigned long iflags; + + spin_lock_irqsave(shost->host_lock, iflags); + ndlp = __lpfc_findnode_did(vport, did); + spin_unlock_irqrestore(shost->host_lock, iflags); + return ndlp; +} + struct lpfc_nodelist * lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) { @@ -5395,6 +5618,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (!ndlp) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6453 Setup New Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -5408,6 +5638,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) "0014 Could not enable ndlp\n"); return NULL; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6454 Setup Enabled Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -5427,6 +5663,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) */ lpfc_cancel_retry_delay_tmo(vport, ndlp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6455 Setup RSCN Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + /* NVME Target mode waits until rport is known to be * impacted by the RSCN before it transitions. No * active management - just go to NPR provided the @@ -5449,9 +5691,21 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); - } else + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6456 Skip Setup RSCN Node x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); ndlp = NULL; + } } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6457 Setup Active Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + /* If the initiator received a PLOGI from this NPort or if the * initiator is already in the process of discovery on it, * there's no need to try to discover it again. @@ -5603,10 +5857,10 @@ lpfc_disc_start(struct lpfc_vport *vport) /* Start Discovery state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0202 Start Discovery hba state x%x " - "Data: x%x x%x x%x\n", + "0202 Start Discovery port state x%x " + "flg x%x Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, - vport->fc_adisc_cnt); + vport->fc_adisc_cnt, vport->fc_npr_cnt); /* First do ADISCs - if any */ num_sent = lpfc_els_disc_adisc(vport); @@ -5715,6 +5969,9 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) } spin_unlock_irq(&phba->hbalock); + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); @@ -5763,9 +6020,17 @@ lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) */ /*****************************************************************************/ void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_disc_timeout(unsigned long ptr) +#else lpfc_disc_timeout(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_vport *vport = (struct lpfc_vport *) ptr; +#else struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); +#endif struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long flags = 0; @@ -5846,7 +6111,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) case LPFC_FLOGI: /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ /* Initial FLOGI timeout */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0222 Initial %s timeout\n", vport->vpi ? "FDISC" : "FLOGI"); @@ -5864,7 +6130,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) case LPFC_FABRIC_CFG_LINK: /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for NameServer login */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0223 Timeout while waiting for " "NameServer login\n"); /* Next look for NameServer ndlp */ @@ -5877,7 +6144,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) case LPFC_NS_QRY: /* Check for wait for NameServer Rsp timeout */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0224 NameServer Query timeout " "Data: x%x x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY); @@ -5886,8 +6154,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Try it one more time */ vport->fc_ns_retry++; vport->gidft_inp = 0; - rc = lpfc_issue_gidft(vport); - if (rc == 0) + if (lpfc_issue_gidft(vport) != 0) break; } vport->fc_ns_retry = 0; @@ -5910,7 +6177,8 @@ restart_disc: /* Setup and issue mailbox INITIALIZE LINK command */ initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0206 Device Discovery " "completion error\n"); phba->link_state = LPFC_HBA_ERROR; @@ -5932,7 +6200,8 @@ restart_disc: case LPFC_DISC_AUTH: /* Node Authentication timeout */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0227 Node Authentication timeout\n"); lpfc_disc_flush_list(vport); @@ -5952,7 +6221,8 @@ restart_disc: case LPFC_VPORT_READY: if (vport->fc_flag & FC_RSCN_MODE) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0231 RSCN timeout Data: x%x " "x%x\n", vport->fc_ns_retry, LPFC_MAX_NS_RETRY); @@ -5966,7 +6236,8 @@ restart_disc: break; default: - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0273 Unexpected discovery timeout, " "vport State x%x\n", vport->port_state); break; @@ -5975,7 +6246,8 @@ restart_disc: switch (phba->link_state) { case LPFC_CLEAR_LA: /* CLEAR LA timeout */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0228 CLEAR LA timeout\n"); clrlaerr = 1; break; @@ -5989,7 +6261,8 @@ restart_disc: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0230 Unexpected timeout, hba link " "state x%x\n", phba->link_state); clrlaerr = 1; @@ -6034,7 +6307,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), @@ -6045,10 +6318,12 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * DHBA -> DPRT -> RHBA -> RPA (physical port) * DPRT -> RPRT (vports) */ - if (vport->port_type == LPFC_PHYSICAL_PORT) + if (vport->port_type == LPFC_PHYSICAL_PORT) { + vport->fc_flag &= ~FC_CT_CGN_RPA; /* For extra ELXE2EM RPA */ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); - else + } else { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + } /* decrement the node reference count held for this callback @@ -6085,17 +6360,19 @@ static struct lpfc_nodelist * __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) { struct lpfc_nodelist *ndlp; + uint32_t data1; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (filter(ndlp, param)) { + data1 = (((uint32_t) ndlp->nlp_state << 24) | + ((uint32_t) ndlp->nlp_xri << 16) | + ((uint32_t) ndlp->nlp_type << 8) | + ((uint32_t) ndlp->nlp_rpi & 0xff)); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "3185 FIND node filter %ps DID " - "ndlp x%px did x%x flg x%x st x%x " - "xri x%x type x%x rpi x%x\n", + "Data: x%px x%x x%x x%x\n", filter, ndlp, ndlp->nlp_DID, - ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_xri, ndlp->nlp_type, - ndlp->nlp_rpi); + ndlp->nlp_flag, data1); return ndlp; } } @@ -6174,15 +6451,15 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) * Translate the physical vpi to the logical vpi. The * vport stores the logical vpi. */ - for (i = 0; i < phba->max_vpi; i++) { + for (i = 0; i <= phba->max_vpi; i++) { if (vpi == phba->vpi_ids[i]) break; } - if (i >= phba->max_vpi) { - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, - "2936 Could not find Vport mapped " - "to vpi %d\n", vpi); + if (i > phba->max_vpi) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2936 Could not find Vport mapped " + "to vpi %d\n", vpi); return NULL; } } @@ -6223,12 +6500,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0007 rpi:%x DID:%x flg:%x refcnt:%d " - "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, - ndlp->nlp_flag, - kref_read(&ndlp->kref), - ndlp->nlp_usg_map, ndlp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:%x " + "flg:x%x refcnt:%d map:x%x\n", + ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, kref_read(&ndlp->kref), + ndlp->nlp_usg_map); ndlp->active_rrqs_xri_bitmap = mempool_alloc(vport->phba->active_rrq_pool, @@ -6457,7 +6734,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) goto out; } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { ret = 1; - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "2624 RPI %x DID %x flag %x " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, @@ -6485,10 +6763,10 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (mboxq->u.mb.mbxStatus) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2555 UNREG_VFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2555 UNREG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_VFI_REGISTERED; @@ -6510,10 +6788,10 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) struct lpfc_vport *vport = mboxq->vport; if (mboxq->u.mb.mbxStatus) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2550 UNREG_FCFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2550 UNREG_FCFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); } mempool_free(mboxq, phba->mbox_mem_pool); return; @@ -6602,7 +6880,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2551 UNREG_FCFI mbox allocation failed" "HBA state x%x\n", phba->pport->port_state); return -ENOMEM; @@ -6613,7 +6891,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2552 Unregister FCFI command failed rc x%x " "HBA state x%x\n", rc, phba->pport->port_state); @@ -6637,7 +6915,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) /* Preparation for unregistering fcf */ rc = lpfc_unregister_fcf_prep(phba); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2748 Failed to prepare for unregistering " "HBA's FCF record: rc=%d\n", rc); return; @@ -6673,7 +6951,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2553 lpfc_unregister_unused_fcf failed " "to read FCF record HBA state x%x\n", phba->pport->port_state); @@ -6695,7 +6973,7 @@ lpfc_unregister_fcf(struct lpfc_hba *phba) /* Preparation for unregistering fcf */ rc = lpfc_unregister_fcf_prep(phba); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2749 Failed to prepare for unregistering " "HBA's FCF record: rc=%d\n", rc); return; @@ -6782,9 +7060,9 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), GFP_KERNEL); if (!conn_entry) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2566 Failed to allocate connection" - " table entry\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2566 Failed to allocate connection" + " table entry\n"); return; } @@ -6928,7 +7206,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba, /* Check the region signature first */ if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2567 Config region 23 has bad signature\n"); return; } @@ -6937,8 +7215,8 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba, /* Check the data structure version */ if (buff[offset] != LPFC_REGION23_VERSION) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2568 Config region 23 has bad version\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2568 Config region 23 has bad version\n"); return; } offset += 4; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 436cdc8c5ef4..af1abc9d0b5f 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -22,7 +22,7 @@ #define FDMI_DID 0xfffffaU #define NameServer_DID 0xfffffcU -#define SCR_DID 0xfffffdU +#define Fabric_Cntl_DID 0xfffffdU #define Fabric_DID 0xfffffeU #define Bcast_DID 0xffffffU #define Mask_DID 0xffffffU @@ -404,7 +404,8 @@ struct csp { uint16_t huntgroup:1; /* FC Word 1, bit 23 */ uint16_t simplex:1; /* FC Word 1, bit 22 */ - uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ + uint16_t fcsp:1; /* FC Word 1, bit 21 */ + uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */ uint16_t dhd:1; /* FC Word 1, bit 18 */ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ uint16_t payloadlength:1; /* FC Word 1, bit 16 */ @@ -421,7 +422,8 @@ struct csp { uint16_t payloadlength:1; /* FC Word 1, bit 16 */ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ uint16_t dhd:1; /* FC Word 1, bit 18 */ - uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ + uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */ + uint16_t fcsp:1; /* FC Word 1, bit 21 */ uint16_t simplex:1; /* FC Word 1, bit 22 */ uint16_t huntgroup:1; /* FC Word 1, bit 23 */ #endif @@ -560,10 +562,10 @@ struct fc_vft_header { #define fc_vft_hdr_hopct_WORD word1 }; -#include <uapi/scsi/fc/fc_els.h> - /* * Extended Link Service LS_COMMAND codes (Payload Word 0) + * The supported payload size is also hard-coded into this word + * when necessary. */ #ifdef __BIG_ENDIAN_BITFIELD #define ELS_CMD_MASK 0xffff0000 @@ -587,7 +589,10 @@ struct fc_vft_header { #define ELS_CMD_TEST 0x11000000 #define ELS_CMD_RRQ 0x12000000 #define ELS_CMD_REC 0x13000000 +#define ELS_CMD_FPIN 0x16000000 +#define ELS_CMD_EDC 0x17000000 #define ELS_CMD_RDP 0x18000000 +#define ELS_CMD_RDF 0x19000000 #define ELS_CMD_PRLI 0x20100014 #define ELS_CMD_NVMEPRLI 0x20140018 #define ELS_CMD_PRLO 0x21100014 @@ -597,16 +602,16 @@ struct fc_vft_header { #define ELS_CMD_ADISC 0x52000000 #define ELS_CMD_FARP 0x54000000 #define ELS_CMD_FARPR 0x55000000 -#define ELS_CMD_RPS 0x56000000 #define ELS_CMD_RPL 0x57000000 #define ELS_CMD_FAN 0x60000000 -#define ELS_CMD_RSCN 0x61040000 #define ELS_CMD_RSCN_XMT 0x61040008 +#define ELS_CMD_RSCN 0x61040000 #define ELS_CMD_SCR 0x62000000 #define ELS_CMD_RNID 0x78000000 #define ELS_CMD_LIRR 0x7A000000 #define ELS_CMD_LCB 0x81000000 -#define ELS_CMD_FPIN 0x16000000 +#define ELS_CMD_AUTH 0x90000000 +#define ELS_CMD_RAW 0xff000000 #else /* __LITTLE_ENDIAN_BITFIELD */ #define ELS_CMD_MASK 0xffff #define ELS_RSP_MASK 0xff @@ -629,7 +634,14 @@ struct fc_vft_header { #define ELS_CMD_TEST 0x11 #define ELS_CMD_RRQ 0x12 #define ELS_CMD_REC 0x13 +#if defined(FCH_EVT_LINK_FPIN) +#define ELS_CMD_FPIN ELS_FPIN +#else +#define ELS_CMD_FPIN 0x16 +#endif +#define ELS_CMD_EDC 0x17 #define ELS_CMD_RDP 0x18 +#define ELS_CMD_RDF 0x19 #define ELS_CMD_PRLI 0x14001020 #define ELS_CMD_NVMEPRLI 0x18001420 #define ELS_CMD_PRLO 0x14001021 @@ -639,16 +651,16 @@ struct fc_vft_header { #define ELS_CMD_ADISC 0x52 #define ELS_CMD_FARP 0x54 #define ELS_CMD_FARPR 0x55 -#define ELS_CMD_RPS 0x56 #define ELS_CMD_RPL 0x57 #define ELS_CMD_FAN 0x60 -#define ELS_CMD_RSCN 0x0461 #define ELS_CMD_RSCN_XMT 0x08000461 +#define ELS_CMD_RSCN 0x0461 #define ELS_CMD_SCR 0x62 #define ELS_CMD_RNID 0x78 #define ELS_CMD_LIRR 0x7A #define ELS_CMD_LCB 0x81 -#define ELS_CMD_FPIN ELS_FPIN +#define ELS_CMD_AUTH 0x90 +#define ELS_CMD_RAW 0xff #endif /* @@ -919,24 +931,6 @@ typedef struct _RNID { /* Structure is in Big Endian format */ } un; } __packed RNID; -typedef struct _RPS { /* Structure is in Big Endian format */ - union { - uint32_t portNum; - struct lpfc_name portName; - } un; -} RPS; - -typedef struct _RPS_RSP { /* Structure is in Big Endian format */ - uint16_t rsvd1; - uint16_t portStatus; - uint32_t linkFailureCnt; - uint32_t lossSyncCnt; - uint32_t lossSignalCnt; - uint32_t primSeqErrCnt; - uint32_t invalidXmitWord; - uint32_t crcCnt; -} RPS_RSP; - struct RLS { /* Structure is in Big Endian format */ uint32_t rls; #define rls_rsvd_SHIFT 24 @@ -1099,6 +1093,11 @@ struct fc_lcb_res_frame { uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ }; +struct fc_RSCN { /* RSCN ELS frame */ +#define RSCN_EQ_PORT_ATTRIBUTE 2 + uint32_t NPort_ID; +}; + /* * Read Diagnostic Parameters (RDP) ELS frame. */ @@ -1340,25 +1339,8 @@ struct fc_rdp_res_frame { /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ -/* - * Registered Port List Format - */ -struct lpfc_fdmi_reg_port_list { - uint32_t EntryCnt; - uint32_t pe; /* Variable-length array */ -}; - - /* Definitions for HBA / Port attribute entries */ -struct lpfc_fdmi_attr_def { /* Defined in TLV format */ - /* Structure is in Big Endian format */ - uint32_t AttrType:16; - uint32_t AttrLen:16; - uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ -}; - - /* Attribute Entry */ struct lpfc_fdmi_attr_entry { union { @@ -1369,7 +1351,13 @@ struct lpfc_fdmi_attr_entry { } un; }; -#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) +struct lpfc_fdmi_attr_def { /* Defined in TLV format */ + /* Structure is in Big Endian format */ + uint32_t AttrType:16; + uint32_t AttrLen:16; + /* Marks start of Value (ATTRIBUTE_ENTRY) */ + struct lpfc_fdmi_attr_entry AttrValue; +} __packed; /* * HBA Attribute Block @@ -1393,15 +1381,25 @@ struct lpfc_fdmi_hba_ident { struct lpfc_name PortName; }; +/* + * Registered Port List Format + */ +struct lpfc_fdmi_reg_port_list { + uint32_t EntryCnt; + struct lpfc_fdmi_port_entry pe; +} __packed; + /* * Register HBA(RHBA) */ struct lpfc_fdmi_reg_hba { struct lpfc_fdmi_hba_ident hi; - struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ -/* struct lpfc_fdmi_attr_block ab; */ + struct lpfc_fdmi_reg_port_list rpl; }; +/******** E2E ********/ +#define SLI_CT_E2E_Subtypes 0x11 + /* * Register HBA Attributes (RHAT) */ @@ -1505,6 +1503,7 @@ struct lpfc_fdmi_reg_portattr { #define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */ #define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */ #define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */ +#define RPRT_VENDOR_CGN 0xf047 /* canned ASCII string */ #define RPRT_SMART_SERVICE 0xf100 /* 4 to 256 byte ASCII string */ #define RPRT_SMART_GUID 0xf101 /* 8 byte WWNN + 8 byte WWPN */ #define RPRT_SMART_VERSION 0xf102 /* 4 to 256 byte ASCII string */ @@ -1537,6 +1536,7 @@ struct lpfc_fdmi_reg_portattr { #define LPFC_FDMI_SMART_ATTR_port_info 0x00100000 /* Vendor specific */ #define LPFC_FDMI_SMART_ATTR_qos 0x00200000 /* Vendor specific */ #define LPFC_FDMI_SMART_ATTR_security 0x00400000 /* Vendor specific */ +#define LPFC_FDMI_VENDOR_ATTR_cgn 0x00800000 /* Vendor specific */ /* Bit mask for FDMI-1 defined PORT attributes */ #define LPFC_FDMI1_PORT_ATTR 0x0000003f @@ -1591,8 +1591,10 @@ struct lpfc_fdmi_reg_portattr { #define PCI_DEVICE_ID_PROTEUS_PF 0xe180 #define PCI_DEVICE_ID_LANCER_FC 0xe200 #define PCI_DEVICE_ID_LANCER_FC_VF 0xe208 +#ifndef BUILD_RHEL8 #define PCI_DEVICE_ID_LANCER_FCOE 0xe260 #define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268 +#endif #define PCI_DEVICE_ID_LANCER_G6_FC 0xe300 #define PCI_DEVICE_ID_LANCER_G7_FC 0xf400 #define PCI_DEVICE_ID_SAT_SMB 0xf011 @@ -3284,8 +3286,7 @@ typedef struct { #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd1 : 19; /* Reserved */ - uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd1 : 20; /* Reserved */ uint32_t casabt : 1; /* Configure async abts status notice */ uint32_t rsvd2 : 2; /* Reserved */ uint32_t cbg : 1; /* Configure BlockGuard */ @@ -3309,12 +3310,10 @@ typedef struct { uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t rsvd2 : 2; /* Reserved */ uint32_t casabt : 1; /* Configure async abts status notice */ - uint32_t cdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd1 : 19; /* Reserved */ + uint32_t rsvd1 : 20; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd3 : 19; /* Reserved */ - uint32_t gdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 : 20; /* Reserved */ uint32_t gasabt : 1; /* Grant async abts status notice */ uint32_t rsvd4 : 2; /* Reserved */ uint32_t gbg : 1; /* Grant BlockGuard */ @@ -3338,8 +3337,7 @@ typedef struct { uint32_t gbg : 1; /* Grant BlockGuard */ uint32_t rsvd4 : 2; /* Reserved */ uint32_t gasabt : 1; /* Grant async abts status notice */ - uint32_t gdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd3 : 19; /* Reserved */ + uint32_t rsvd3 : 20; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD @@ -3361,15 +3359,11 @@ typedef struct { uint32_t rsvd6; /* Reserved */ #ifdef __BIG_ENDIAN_BITFIELD - uint32_t fips_rev : 3; /* FIPS Spec Revision */ - uint32_t fips_level : 4; /* FIPS Level */ - uint32_t sec_err : 9; /* security crypto error */ + uint32_t rsvd7 : 16; uint32_t max_vpi : 16; /* Max number of virt N-Ports */ #else /* __LITTLE_ENDIAN */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ - uint32_t sec_err : 9; /* security crypto error */ - uint32_t fips_level : 4; /* FIPS Level */ - uint32_t fips_rev : 3; /* FIPS Spec Revision */ + uint32_t rsvd7 : 16; #endif } CONFIG_PORT_VAR; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index bd533475c86a..de23721aa174 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -20,6 +20,9 @@ * included with this package. * *******************************************************************/ +#include <uapi/scsi/fc/fc_fs.h> +#include <uapi/scsi/fc/fc_els.h> + /* Macros to deal with bit fields. Each bit field must have 3 #defines * associated with it (_SHIFT, _MASK, and _WORD). * EG. For a bit field that is in the 7th bit of the "field4" field of a @@ -210,7 +213,6 @@ struct lpfc_sli_intf { #define LPFC_MAX_IMAX 5000000 #define LPFC_DEF_IMAX 0 -#define LPFC_IMAX_THRESHOLD 1000 #define LPFC_MAX_AUTO_EQ_DELAY 120 #define LPFC_EQ_DELAY_STEP 15 #define LPFC_EQD_ISR_TRIGGER 20000 @@ -388,6 +390,12 @@ struct lpfc_wcqe_complete { #define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF #define lpfc_wcqe_c_ersp0_WORD word0 uint32_t total_data_placed; +#define lpfc_wcqe_c_cmf_cg_SHIFT 31 +#define lpfc_wcqe_c_cmf_cg_MASK 0x00000001 +#define lpfc_wcqe_c_cmf_cg_WORD total_data_placed +#define lpfc_wcqe_c_cmf_bw_SHIFT 0 +#define lpfc_wcqe_c_cmf_bw_MASK 0x0FFFFFFF +#define lpfc_wcqe_c_cmf_bw_WORD total_data_placed uint32_t parameter; #define lpfc_wcqe_c_bg_edir_SHIFT 5 #define lpfc_wcqe_c_bg_edir_MASK 0x00000001 @@ -649,6 +657,9 @@ struct lpfc_register { #define lpfc_sliport_status_oti_SHIFT 29 #define lpfc_sliport_status_oti_MASK 0x1 #define lpfc_sliport_status_oti_WORD word0 +#define lpfc_sliport_status_dip_SHIFT 25 +#define lpfc_sliport_status_dip_MASK 0x1 +#define lpfc_sliport_status_dip_WORD word0 #define lpfc_sliport_status_rn_SHIFT 24 #define lpfc_sliport_status_rn_MASK 0x1 #define lpfc_sliport_status_rn_WORD word0 @@ -679,6 +690,7 @@ struct lpfc_register { #define lpfc_sliport_eqdelay_id_MASK 0xfff #define lpfc_sliport_eqdelay_id_WORD word0 #define LPFC_SEC_TO_USEC 1000000 +#define LPFC_SEC_TO_MSEC 1000 /* The following Registers apply to SLI4 if_type 0 UCNAs. They typically * reside in BAR 2. @@ -951,6 +963,12 @@ union lpfc_sli4_cfg_shdr { #define lpfc_mbox_hdr_add_status_SHIFT 8 #define lpfc_mbox_hdr_add_status_MASK 0x000000FF #define lpfc_mbox_hdr_add_status_WORD word7 +#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2 +#define lpfc_mbox_hdr_add_status_2_SHIFT 16 +#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_2_WORD word7 +#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01 +#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02 uint32_t response_length; uint32_t actual_response_length; } response; @@ -1007,6 +1025,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D #define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73 #define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74 +#define LPFC_MBOX_OPCODE_REG_CONGESTION_BUF 0x8E #define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A #define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B #define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C @@ -1115,6 +1134,12 @@ struct lpfc_mbx_sge { uint32_t length; }; +struct lpfc_mbx_host_buf { + uint32_t length; + uint32_t pa_lo; + uint32_t pa_hi; +}; + struct lpfc_mbx_nembed_cmd { struct lpfc_sli4_cfg_mhdr cfg_mhdr; #define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 @@ -1125,6 +1150,63 @@ struct lpfc_mbx_nembed_sge_virt { void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; }; +struct lpfc_mbx_read_object { /* Version 0 */ + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rd_object_rlen_SHIFT 0 +#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF +#define lpfc_mbx_rd_object_rlen_WORD word0 + uint32_t rd_object_offset; + uint32_t rd_object_name[26]; + uint32_t rd_object_cnt; + struct lpfc_mbx_host_buf rd_object_hbuf[4]; + } request; + struct { + uint32_t rd_object_actual_rlen; + uint32_t word1; +#define lpfc_mbx_rd_object_eof_SHIFT 31 +#define lpfc_mbx_rd_object_eof_MASK 0x1 +#define lpfc_mbx_rd_object_eof_WORD word1 + } response; + } u; +}; + +struct lpfc_mbx_write_object { /* Version 0 */ + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wr_object_rlen_SHIFT 0 +#define lpfc_mbx_wr_object_rlen_MASK 0x00FFFFFF +#define lpfc_mbx_wr_object_rlen_WORD word0 +#define lpfc_mbx_wr_object_noc_SHIFT 30 +#define lpfc_mbx_wr_object_noc_MASK 0x1 +#define lpfc_mbx_wr_object_noc_WORD word0 +#define lpfc_mbx_wr_object_eof_SHIFT 31 +#define lpfc_mbx_wr_object_eof_MASK 0x1 +#define lpfc_mbx_wr_object_eof_WORD word0 + uint32_t wr_object_offset; + uint32_t wr_object_name[26]; +#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ + uint32_t wr_object_cnt; + struct lpfc_mbx_host_buf wr_object_hbuf[4]; + } request; + struct { + uint32_t wr_object_actual_rlen; + uint32_t word1; +#define lpfc_mbx_wr_object_reset_SHIFT 0 +#define lpfc_mbx_wr_object_reset_MASK 0xff +#define lpfc_mbx_wr_object_reset_WORD word1 +#define LPFC_NO_RESET_REQ 0x0 +#define LPFC_PHYS_DEVICE_RESET_REQ 0x1 +#define LPFC_FW_RESET_REQ 0x2 +#define LPFC_PROTOCOL_RESET_REQ 0x3 + } response; + } u; +}; + struct lpfc_mbx_eq_create { struct mbox_header header; union { @@ -1433,7 +1515,7 @@ struct lpfc_mbx_wq_create { #define lpfc_mbx_wq_create_page_size_SHIFT 0 #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF #define lpfc_mbx_wq_create_page_size_WORD word1 -#define LPFC_WQ_PAGE_SIZE_4096 0x1 +#define LPFC_WQ_PAGE_SIZE_4096 0x1 #define lpfc_mbx_wq_create_dpp_req_SHIFT 15 #define lpfc_mbx_wq_create_dpp_req_MASK 0x00000001 #define lpfc_mbx_wq_create_dpp_req_WORD word1 @@ -2320,6 +2402,8 @@ struct lpfc_mbx_redisc_fcf_tbl { #define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 #define ADD_STATUS_FW_NOT_SUPPORTED 0xEB #define ADD_STATUS_INVALID_REQUEST 0x4B +#define ADD_STATUS_INVALID_OBJECT_NAME 0xA0 +#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58 struct lpfc_mbx_sli4_config { struct mbox_header header; @@ -2791,6 +2875,15 @@ struct lpfc_mbx_read_rev { struct lpfc_mbx_read_config { uint32_t word1; +#define lpfc_mbx_rd_conf_acs_SHIFT 27 /* alarm signaling */ +#define lpfc_mbx_rd_conf_acs_MASK 0x00000001 +#define lpfc_mbx_rd_conf_acs_WORD word1 +#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */ +#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001 +#define lpfc_mbx_rd_conf_wcs_WORD word1 +#define lpfc_mbx_rd_conf_fawwpn_inuse_SHIFT 30 +#define lpfc_mbx_rd_conf_fawwpn_inuse_MASK 0x00000001 +#define lpfc_mbx_rd_conf_fawwpn_inuse_WORD word1 #define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31 #define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001 #define lpfc_mbx_rd_conf_extnts_inuse_WORD word1 @@ -2809,6 +2902,15 @@ struct lpfc_mbx_read_config { #define lpfc_mbx_rd_conf_trunk_SHIFT 12 #define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F #define lpfc_mbx_rd_conf_trunk_WORD word2 +#define lpfc_mbx_rd_conf_pt_SHIFT 20 +#define lpfc_mbx_rd_conf_pt_MASK 0x00000003 +#define lpfc_mbx_rd_conf_pt_WORD word2 +#define lpfc_mbx_rd_conf_tf_SHIFT 22 +#define lpfc_mbx_rd_conf_tf_MASK 0x00000001 +#define lpfc_mbx_rd_conf_tf_WORD word2 +#define lpfc_mbx_rd_conf_ptv_SHIFT 23 +#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001 +#define lpfc_mbx_rd_conf_ptv_WORD word2 #define lpfc_mbx_rd_conf_topology_SHIFT 24 #define lpfc_mbx_rd_conf_topology_MASK 0x000000FF #define lpfc_mbx_rd_conf_topology_WORD word2 @@ -3475,7 +3577,6 @@ struct lpfc_sli4_parameters { #define cfg_nosr_SHIFT 9 #define cfg_nosr_MASK 0x00000001 #define cfg_nosr_WORD word19 - #define cfg_bv1s_SHIFT 10 #define cfg_bv1s_MASK 0x00000001 #define cfg_bv1s_WORD word19 @@ -3483,14 +3584,28 @@ struct lpfc_sli4_parameters { #define cfg_nsler_SHIFT 12 #define cfg_nsler_MASK 0x00000001 #define cfg_nsler_WORD word19 +#define cfg_pvl_SHIFT 13 +#define cfg_pvl_MASK 0x00000001 +#define cfg_pvl_WORD word19 + +#define cfg_pbde_SHIFT 20 +#define cfg_pbde_MASK 0x00000001 +#define cfg_pbde_WORD word19 uint32_t word20; #define cfg_max_tow_xri_SHIFT 0 #define cfg_max_tow_xri_MASK 0x0000ffff #define cfg_max_tow_xri_WORD word20 - uint32_t word21; /* RESERVED */ - uint32_t word22; /* RESERVED */ + uint32_t word21; +#define cfg_e2em_attr_value_SHIFT 0 +#define cfg_e2em_attr_value_MASK 0x0000ffff +#define cfg_e2em_attr_value_WORD word21 +#define cfg_cmf_SHIFT 24 +#define cfg_cmf_MASK 0x000000ff +#define cfg_cmf_WORD word21 + + uint32_t cgn_buf_size; /* word 22 */ uint32_t word23; /* RESERVED */ uint32_t word24; @@ -3517,21 +3632,46 @@ struct lpfc_sli4_parameters { }; #define LPFC_SET_UE_RECOVERY 0x10 -#define LPFC_SET_MDS_DIAGS 0x11 +#define LPFC_SET_MDS_DIAGS 0x12 +#define LPFC_SET_CGN_SIGNAL 0x1f +#define LPFC_SET_DUAL_DUMP 0x1e +#define LPFC_SET_ENABLE_E2E 0x21 +#define LPFC_SET_ENABLE_CMF 0x24 struct lpfc_mbx_set_feature { struct mbox_header header; - uint32_t feature; - uint32_t param_len; - uint32_t word6; + u32 feature; + u32 param_len; + u32 word6; #define lpfc_mbx_set_feature_UER_SHIFT 0 #define lpfc_mbx_set_feature_UER_MASK 0x00000001 #define lpfc_mbx_set_feature_UER_WORD word6 -#define lpfc_mbx_set_feature_mds_SHIFT 0 +#define lpfc_mbx_set_feature_mds_SHIFT 2 #define lpfc_mbx_set_feature_mds_MASK 0x00000001 #define lpfc_mbx_set_feature_mds_WORD word6 #define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 #define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001 #define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6 +#define lpfc_mbx_set_feature_CGN_warn_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_warn_freq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_CGN_warn_freq_WORD word6 +#define lpfc_mbx_set_feature_dd_SHIFT 0 +#define lpfc_mbx_set_feature_dd_MASK 0x00000001 +#define lpfc_mbx_set_feature_dd_WORD word6 +#define lpfc_mbx_set_feature_ddquery_SHIFT 1 +#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001 +#define lpfc_mbx_set_feature_ddquery_WORD word6 +#define LPFC_DISABLE_DUAL_DUMP 0 +#define LPFC_ENABLE_DUAL_DUMP 1 +#define LPFC_QUERY_OP_DUAL_DUMP 2 +#define lpfc_mbx_set_feature_cmf_SHIFT 0 +#define lpfc_mbx_set_feature_cmf_MASK 0x00000001 +#define lpfc_mbx_set_feature_cmf_WORD word6 +#define lpfc_mbx_set_feature_e2e_SHIFT 0 +#define lpfc_mbx_set_feature_e2e_MASK 0x0000ffff +#define lpfc_mbx_set_feature_e2e_WORD word6 +#define lpfc_mbx_set_feature_e2elunq_SHIFT 16 +#define lpfc_mbx_set_feature_e2elunq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_e2elunq_WORD word6 uint32_t word7; #define lpfc_mbx_set_feature_UERP_SHIFT 0 #define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff @@ -3539,16 +3679,51 @@ struct lpfc_mbx_set_feature { #define lpfc_mbx_set_feature_UESR_SHIFT 16 #define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff #define lpfc_mbx_set_feature_UESR_WORD word7 +#define lpfc_mbx_set_feature_CGN_alarm_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_alarm_freq_MASK 0x0000ffff +#define lpfc_mbx_set_feature_CGN_alarm_freq_WORD word7 + u32 word8; +#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0 +#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff +#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8 }; #define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2 +#define LPFC_SET_HOST_DATE_TIME 0x4 + +struct lpfc_mbx_set_host_date_time { + uint32_t word6; +#define lpfc_mbx_set_host_month_WORD word6 +#define lpfc_mbx_set_host_month_SHIFT 16 +#define lpfc_mbx_set_host_month_MASK 0xFF +#define lpfc_mbx_set_host_day_WORD word6 +#define lpfc_mbx_set_host_day_SHIFT 8 +#define lpfc_mbx_set_host_day_MASK 0xFF +#define lpfc_mbx_set_host_year_WORD word6 +#define lpfc_mbx_set_host_year_SHIFT 0 +#define lpfc_mbx_set_host_year_MASK 0xFF + uint32_t word7; +#define lpfc_mbx_set_host_hour_WORD word7 +#define lpfc_mbx_set_host_hour_SHIFT 16 +#define lpfc_mbx_set_host_hour_MASK 0xFF +#define lpfc_mbx_set_host_min_WORD word7 +#define lpfc_mbx_set_host_min_SHIFT 8 +#define lpfc_mbx_set_host_min_MASK 0xFF +#define lpfc_mbx_set_host_sec_WORD word7 +#define lpfc_mbx_set_host_sec_SHIFT 0 +#define lpfc_mbx_set_host_sec_MASK 0xFF +}; + struct lpfc_mbx_set_host_data { #define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48 struct mbox_header header; uint32_t param_id; uint32_t param_len; - uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE]; + union { + uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE]; + struct lpfc_mbx_set_host_date_time tm; + } un; }; struct lpfc_mbx_set_trunk_mode { @@ -3566,6 +3741,21 @@ struct lpfc_mbx_get_sli4_parameters { struct lpfc_sli4_parameters sli4_parameters; }; +struct lpfc_mbx_reg_congestion_buf { + struct mbox_header header; + uint32_t word0; +#define lpfc_mbx_reg_cgn_buf_type_WORD word0 +#define lpfc_mbx_reg_cgn_buf_type_SHIFT 0 +#define lpfc_mbx_reg_cgn_buf_type_MASK 0xFF +#define lpfc_mbx_reg_cgn_buf_cnt_WORD word0 +#define lpfc_mbx_reg_cgn_buf_cnt_SHIFT 16 +#define lpfc_mbx_reg_cgn_buf_cnt_MASK 0xFF + uint32_t word1; + uint32_t length; + uint32_t addr_lo; + uint32_t addr_hi; +}; + struct lpfc_rscr_desc_generic { #define LPFC_RSRC_DESC_WSIZE 22 uint32_t desc[LPFC_RSRC_DESC_WSIZE]; @@ -3731,6 +3921,9 @@ struct lpfc_controller_attribute { #define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8 #define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff #define lpfc_cntl_attr_eprom_ver_hi_WORD word17 +#define lpfc_cntl_attr_flash_id_SHIFT 16 +#define lpfc_cntl_attr_flash_id_MASK 0x000000ff +#define lpfc_cntl_attr_flash_id_WORD word17 uint32_t mbx_da_struct_ver; uint32_t ep_fw_da_struct_ver; uint32_t ncsi_ver_str[3]; @@ -3872,6 +4065,90 @@ struct lpfc_mbx_get_port_name { #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 #define MB_CQE_STATUS_DMA_FAILED 0x5 +#define LPFC_MBX_OBJECT_NAME_LEN_DW 26 +#define LPFC_MBX_AUTH_RD_CONFIG_MAX_BDE 8 +struct lpfc_mbx_auth_rd_object { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word4; +#define lpfc_rd_object_read_length_SHIFT 0 +#define lpfc_rd_object_read_length_MASK 0x00FFFFFF +#define lpfc_rd_object_read_length_WORD word4 + uint32_t read_offset; + uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; + uint32_t bde_count; + struct ulp_bde64 bde[LPFC_MBX_AUTH_RD_CONFIG_MAX_BDE]; + } request; + struct { + uint32_t actual_read_length; + uint32_t word5; +#define lpfc_rd_object_eof_SHIFT 31 +#define lpfc_rd_object_eof_MASK 0x00000001 +#define lpfc_rd_object_eof_WORD word5 + } response; + } u; +}; + +#define LPFC_MBX_RD_CONFIG_MAX_BDE 1 +struct lpfc_mbx_rd_object { + struct mbox_header header; + union { + struct { + uint32_t word4; +#define lpfc_rd_object_read_length_SHIFT 0 +#define lpfc_rd_object_read_length_MASK 0x00FFFFFF +#define lpfc_rd_object_read_length_WORD word4 + uint32_t read_offset; + uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; + uint32_t bde_count; + struct ulp_bde64 bde[LPFC_MBX_RD_CONFIG_MAX_BDE]; + } request; + struct { + uint32_t actual_read_length; + uint32_t word5; +#define lpfc_rd_object_eof_SHIFT 31 +#define lpfc_rd_object_eof_MASK 0x00000001 +#define lpfc_rd_object_eof_WORD word5 + } response; + } u; +}; + +#define LPFC_MBX_AUTH_WR_CONFIG_MAX_BDE 8 +struct lpfc_mbx_auth_wr_object { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word4; +#define lpfc_wr_object_eof_SHIFT 31 +#define lpfc_wr_object_eof_MASK 0x00000001 +#define lpfc_wr_object_eof_WORD word4 +#define lpfc_wr_object_eas_SHIFT 29 +#define lpfc_wr_object_eas_MASK 0x00000001 +#define lpfc_wr_object_eas_WORD word4 +#define lpfc_wr_object_write_length_SHIFT 0 +#define lpfc_wr_object_write_length_MASK 0x00FFFFFF +#define lpfc_wr_object_write_length_WORD word4 + uint32_t write_offset; + uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; + uint32_t bde_count; + struct ulp_bde64 bde[LPFC_MBX_AUTH_WR_CONFIG_MAX_BDE]; + } request; + struct { + uint32_t actual_write_length; + uint32_t word5; +#define lpfc_wr_object_change_status_SHIFT 0 +#define lpfc_wr_object_change_status_MASK 0x000000FF +#define lpfc_wr_object_change_status_WORD word5 +#define LPFC_CHANGE_STATUS_NO_RESET_NEEDED 0x00 +#define LPFC_CHANGE_STATUS_PHYS_DEV_RESET 0x01 +#define LPFC_CHANGE_STATUS_FW_RESET 0x02 +#define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04 +#define LPFC_CHANGE_STATUS_PCI_RESET 0x05 + } response; + } u; +}; + #define LPFC_MBX_WR_CONFIG_MAX_BDE 1 struct lpfc_mbx_wr_object { struct mbox_header header; @@ -3888,7 +4165,7 @@ struct lpfc_mbx_wr_object { #define lpfc_wr_object_write_length_MASK 0x00FFFFFF #define lpfc_wr_object_write_length_WORD word4 uint32_t write_offset; - uint32_t object_name[26]; + uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; uint32_t bde_count; struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE]; } request; @@ -3903,10 +4180,22 @@ struct lpfc_mbx_wr_object { #define LPFC_CHANGE_STATUS_FW_RESET 0x02 #define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04 #define LPFC_CHANGE_STATUS_PCI_RESET 0x05 +#define lpfc_wr_object_csf_SHIFT 8 +#define lpfc_wr_object_csf_MASK 0x00000001 +#define lpfc_wr_object_csf_WORD word5 } response; } u; }; +struct lpfc_mbx_del_object { + struct mbox_header header; + struct { + uint32_t word4; + uint32_t word5; + uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; + } request; +}; + /* mailbox queue entry structure */ struct lpfc_mqe { uint32_t word0; @@ -3934,6 +4223,8 @@ struct lpfc_mqe { struct lpfc_mbx_unreg_fcfi unreg_fcfi; struct lpfc_mbx_mq_create mq_create; struct lpfc_mbx_mq_create_ext mq_create_ext; + struct lpfc_mbx_read_object read_object; + struct lpfc_mbx_write_object write_object; struct lpfc_mbx_eq_create eq_create; struct lpfc_mbx_modify_eq_delay eq_delay; struct lpfc_mbx_cq_create cq_create; @@ -3961,12 +4252,15 @@ struct lpfc_mqe { struct lpfc_mbx_supp_pages supp_pages; struct lpfc_mbx_pc_sli4_params sli4_params; struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; + struct lpfc_mbx_reg_congestion_buf reg_congestion_buf; struct lpfc_mbx_set_link_diag_state link_diag_state; struct lpfc_mbx_set_link_diag_loopback link_diag_loopback; struct lpfc_mbx_run_link_diag_test link_diag_test; struct lpfc_mbx_get_func_cfg get_func_cfg; struct lpfc_mbx_get_prof_cfg get_prof_cfg; + struct lpfc_mbx_rd_object rd_object; struct lpfc_mbx_wr_object wr_object; + struct lpfc_mbx_del_object del_object; struct lpfc_mbx_get_port_name get_port_name; struct lpfc_mbx_set_feature set_feature; struct lpfc_mbx_memory_dump_type3 mem_dump_type3; @@ -4009,12 +4303,13 @@ struct lpfc_mcqe { #define lpfc_trailer_code_SHIFT 8 #define lpfc_trailer_code_MASK 0x000000FF #define lpfc_trailer_code_WORD trailer -#define LPFC_TRAILER_CODE_LINK 0x1 -#define LPFC_TRAILER_CODE_FCOE 0x2 -#define LPFC_TRAILER_CODE_DCBX 0x3 -#define LPFC_TRAILER_CODE_GRP5 0x5 -#define LPFC_TRAILER_CODE_FC 0x10 -#define LPFC_TRAILER_CODE_SLI 0x11 +#define LPFC_TRAILER_CODE_LINK 0x1 +#define LPFC_TRAILER_CODE_FCOE 0x2 +#define LPFC_TRAILER_CODE_DCBX 0x3 +#define LPFC_TRAILER_CODE_GRP5 0x5 +#define LPFC_TRAILER_CODE_FC 0x10 +#define LPFC_TRAILER_CODE_SLI 0x11 +#define LPFC_TRAILER_CODE_CMSTAT 0x13 }; struct lpfc_acqe_link { @@ -4249,11 +4544,25 @@ struct lpfc_acqe_misconfigured_event { #define LPFC_SLI_EVENT_STATUS_UNCERTIFIED 0x05 }; +struct lpfc_acqe_cgn_signal { + u32 word0; +#define lpfc_warn_acqe_SHIFT 0 +#define lpfc_warn_acqe_MASK 0x7FFFFFFF +#define lpfc_warn_acqe_WORD word0 +#define lpfc_imm_acqe_SHIFT 31 +#define lpfc_imm_acqe_MASK 0x1 +#define lpfc_imm_acqe_WORD word0 + u32 alarm_cnt; + u32 word2; + u32 trailer; +}; + struct lpfc_acqe_sli { - uint32_t event_data1; - uint32_t event_data2; - uint32_t reserved; - uint32_t trailer; + u32 event_data1; +#define LPFC_SLI_CONG_PARAM_CHANGE_EVENT 0x2 + u32 event_data2; + u32 reserved; + u32 trailer; #define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1 #define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2 #define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3 @@ -4261,6 +4570,11 @@ struct lpfc_acqe_sli { #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 #define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 #define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA +#define LPFC_SLI_EVENT_TYPE_PORT_INT_EVT 0xD +#define LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG 0xE +#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF +#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10 +#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11 }; /* @@ -4421,6 +4735,9 @@ struct wqe_common { #define wqe_sup_SHIFT 6 #define wqe_sup_MASK 0x00000001 #define wqe_sup_WORD word11 +#define wqe_ffrq_SHIFT 6 +#define wqe_ffrq_MASK 0x00000001 +#define wqe_ffrq_WORD word11 #define wqe_wqec_SHIFT 7 #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 @@ -4659,9 +4976,73 @@ struct create_xri_wqe { uint32_t rsvd_12_15[4]; /* word 12-15 */ }; +#define INHIBIT_ABORT 1 #define T_REQUEST_TAG 3 #define T_XRI_TAG 1 +struct cmf_sync_wqe { // CMF_DBG - this may change + uint32_t rsrvd[3]; + uint32_t word3; +#define cmf_sync_interval_SHIFT 0 +#define cmf_sync_interval_MASK 0x00000ffff +#define cmf_sync_interval_WORD word3 +#define cmf_sync_afpin_SHIFT 16 +#define cmf_sync_afpin_MASK 0x000000001 +#define cmf_sync_afpin_WORD word3 +#define cmf_sync_asig_SHIFT 17 +#define cmf_sync_asig_MASK 0x000000001 +#define cmf_sync_asig_WORD word3 +#define cmf_sync_op_SHIFT 20 +#define cmf_sync_op_MASK 0x00000000f +#define cmf_sync_op_WORD word3 +#define cmf_sync_ver_SHIFT 24 +#define cmf_sync_ver_MASK 0x0000000ff +#define cmf_sync_ver_WORD word3 +#define LPFC_CMF_SYNC_VER 1 + uint32_t event_tag; + uint32_t word5; +#define cmf_sync_wsigmax_SHIFT 0 +#define cmf_sync_wsigmax_MASK 0x00000ffff +#define cmf_sync_wsigmax_WORD word5 +#define cmf_sync_wsigcnt_SHIFT 16 +#define cmf_sync_wsigcnt_MASK 0x00000ffff +#define cmf_sync_wsigcnt_WORD word5 + uint32_t word6; + uint32_t word7; +#define cmf_sync_cmnd_SHIFT 8 +#define cmf_sync_cmnd_MASK 0x0000000ff +#define cmf_sync_cmnd_WORD word7 + uint32_t word8; + uint32_t word9; +#define cmf_sync_reqtag_SHIFT 0 +#define cmf_sync_reqtag_MASK 0x00000ffff +#define cmf_sync_reqtag_WORD word9 +#define cmf_sync_wfpinmax_SHIFT 16 +#define cmf_sync_wfpinmax_MASK 0x0000000ff +#define cmf_sync_wfpinmax_WORD word9 +#define cmf_sync_wfpincnt_SHIFT 24 +#define cmf_sync_wfpincnt_MASK 0x0000000ff +#define cmf_sync_wfpincnt_WORD word9 + uint32_t word10; +#define cmf_sync_qosd_SHIFT 9 +#define cmf_sync_qosd_MASK 0x00000001 +#define cmf_sync_qosd_WORD word10 + uint32_t word11; +#define cmf_sync_cmd_type_SHIFT 0 +#define cmf_sync_cmd_type_MASK 0x0000000f +#define cmf_sync_cmd_type_WORD word11 +#define cmf_sync_wqec_SHIFT 7 +#define cmf_sync_wqec_MASK 0x00000001 +#define cmf_sync_wqec_WORD word11 +#define cmf_sync_cqid_SHIFT 16 +#define cmf_sync_cqid_MASK 0x0000ffff +#define cmf_sync_cqid_WORD word11 + uint32_t read_bytes; + uint32_t word13; + uint32_t word14; + uint32_t word15; +}; + struct abort_cmd_wqe { uint32_t rsrvd[3]; uint32_t word3; @@ -4724,6 +5105,19 @@ struct fcp_icmnd64_wqe { uint32_t rsvd_12_15[4]; /* word 12-15 */ }; +#define CMD_SEND_FRAME 0xE1 + +struct send_frame_wqe { + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t frame_len; /* word 3 */ + uint32_t fc_hdr_wd0; /* word 4 */ + uint32_t fc_hdr_wd1; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fc_hdr_wd2; /* word 12 */ + uint32_t fc_hdr_wd3; /* word 13 */ + uint32_t fc_hdr_wd4; /* word 14 */ + uint32_t fc_hdr_wd5; /* word 15 */ +}; struct fcp_trsp64_wqe { struct ulp_bde64 bde; uint32_t response_len; @@ -4753,20 +5147,270 @@ struct fcp_treceive64_wqe { }; #define TXRDY_PAYLOAD_LEN 12 -#define CMD_SEND_FRAME 0xE1 -struct send_frame_wqe { - struct ulp_bde64 bde; /* words 0-2 */ - uint32_t frame_len; /* word 3 */ - uint32_t fc_hdr_wd0; /* word 4 */ - uint32_t fc_hdr_wd1; /* word 5 */ - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t fc_hdr_wd2; /* word 12 */ - uint32_t fc_hdr_wd3; /* word 13 */ - uint32_t fc_hdr_wd4; /* word 14 */ - uint32_t fc_hdr_wd5; /* word 15 */ +/* EDC Exchange Diagnostic Capabilities Structure layout. + * Link Faults (T11 standard, but not supported.) + * Congestion Detection (future, but supported now) + */ +struct cgn_det_desc { +#define CGN_DET_CAP_DESC_TAG 0x0001000f + u32 cgn_det_tag; + u32 cgn_desc_len; + u32 word2; /* transmit signal capability */ +#define xmt_sig_cap_SHIFT 0 +#define xmt_sig_cap_MASK 0x0000000f +#define xmt_sig_cap_WORD word2 +#define XMT_SIG_CAP_NO_SPT 0 +#define XMT_SIG_CAP_WARN_SPT 1 +#define XMT_SIG_CAP_BOTH_SPT 2 + u32 word3; /* transmit signal frequency */ +#define xmt_sig_freq_cyc_SHIFT 16 +#define xmt_sig_freq_cyc_MASK 0x000003ff +#define xmt_sig_freq_cyc_WORD word3 +#define XMT_SIG_FREQ_CYC_NO_SPT 0 +#define XMT_SIG_FREQ_CYC_MIN 1 +#define XMT_SIG_FREQ_CYC_MAX 999 +#define xmt_sig_freq_scale_SHIFT 0 +#define xmt_sig_freq_scale_MASK 0x0000000f +#define xmt_sig_freq_scale_WORD word3 +#define XMT_SIG_FREQ_SCALE_NO_SPT 0 +#define XMT_SIG_FREQ_SCALE_SEC 1 +#define XMT_SIG_FREQ_SCALE_MSEC 2 +#define XMT_SIG_FREQ_SCALE_USEC 3 +#define XMT_SIG_FREQ_SCALE_NSEC 4 + u32 word4; /* receive signal capability */ +#define rcv_sig_cap_SHIFT 0 +#define rcv_sig_cap_MASK 0x0000000f +#define rcv_sig_cap_WORD word4 +#define RCV_SIG_CAP_NO_SPT 0 +#define RCV_SIG_CAP_WARN_SPT 1 +#define RCV_SIG_CAP_BOTH_SPT 2 + u32 word5; /* receive signal frequency */ +#define rcv_sig_freq_cyc_SHIFT 16 +#define rcv_sig_freq_cyc_MASK 0x000003ff +#define rcv_sig_freq_cyc_WORD word5 +#define RCV_SIG_FREQ_CYC_NO_SPT 0 +#define RCV_SIG_FREQ_CYC_MIN 1 +#define RCV_SIG_FREQ_CYC_DFT 100 +#define RCV_SIG_FREQ_CYC_MAX 999 +#define rcv_sig_freq_scale_SHIFT 0 +#define rcv_sig_freq_scale_MASK 0x0000000f +#define rcv_sig_freq_scale_WORD word5 +#define RCV_SIG_FREQ_SCALE_NO_SPT 0 +#define RCV_SIG_FREQ_SCALE_SEC 1 +#define RCV_SIG_FREQ_SCALE_MSEC 2 +#define RCV_SIG_FREQ_SCALE_USEC 3 +#define RCV_SIG_FREQ_SCALE_NSEC 4 }; +/* This is the T11 definition targeting FC-FS-5 */ +struct link_flt_desc { +#define LINK_FLT_CAP_DESC_TAG 0x0001000d + u32 link_flt_cap_tag; + u32 desc_len; + u32 degrade_act_thld; + u32 degrade_deact_thld; + u32 fec_degrade_intvl; +}; + +/* Not a Link Fault or Congestion descriptor */ +struct unknown_desc { + u32 unk_tag; + u32 desc_len; +}; + +/* EDC supports two descriptors. When allocated, it is the + * size of this structure plus each supported descriptor. + */ +struct lpfc_els_edc_req { + u32 word0; +#define edc_els_req_SHIFT 24 +#define edc_els_req_MASK 0x000000ff +#define edc_els_req_WORD word0 + u32 desc_list_len; +}; + +/* Minimum structure defines for the EDC response. + * Balance is in buffer. + */ +struct lpfc_els_edc_rsp { + u32 word0; +#define edc_rsp_SHIFT 24 +#define edc_rsp_MASK 0x0000000ff +#define edc_rsp_WORD word0 + u32 desc_list_len; + u32 lnk_srv_req_word2; + u32 lnk_srv_req_word3; + u32 lnk_srv_req_word4; +}; + +enum lpfc_fpin_types { + LPFC_FPIN_LINK_INTEG, + LPFC_FPIN_DELIVERY, + LPFC_FPIN_PEER_CGN, + LPFC_FPIN_CGN, + LPFC_MAX_NUM_OF_FPIN_TYPES, +}; + +/* RDF - Register Diagnostic Functions Structure layout. + * The response structure is larger than the request forcing + * different structure defines to keep the command correct + * on the wire. + */ +struct rdf_desc { + uint32_t rdf_desc_tag; +#define ELS_FPIN_REG 0x00030001 + uint32_t rdf_desc_len; + uint32_t count; + uint32_t desc[LPFC_MAX_NUM_OF_FPIN_TYPES]; +}; + +struct lpfc_els_rdf_req { + uint32_t word0; +#define rdf_els_req_SHIFT 24 +#define rdf_els_req_MASK 0x000000ff +#define rdf_els_req_WORD word0 + uint32_t desc_list_len; + struct rdf_desc rdf_desc; +}; + +struct lpfc_els_rdf_rsp { + uint32_t word0; +#define edc_rsp_SHIFT 24 +#define edc_rsp_MASK 0x0000000ff +#define edc_rsp_WORD word0 + uint32_t desc_list_len; + uint32_t lnk_srv_req_word2; + uint32_t lnk_srv_req_word3; + uint32_t lnk_srv_req_word4; + struct rdf_desc rdf_desc; +}; + +/* FPIN Fabric Performance Impact Notification. + * + * FPIN layout: + * fpin_els_notify - two words + * FPIN event descriptor - N words describing the event data payload. + * The descriptor notify_tag tells the driver what descriptor + * is getting delivered. + * + * A note on the portlist payload. Only Link Integrity and Congestion + * Notifications have a portlist in the payload. The payload buffer is + * an MBUF with size 1KB. Payload takes maximally 12 words (Link Integrity) + * or 48 bytes. That leaves (1024 - 48) / 8 = 122 WWPNs in the portlist. + * The Congestion notification would have 123 WWPNs. It isn't clear + * the driver needs or wants to handle the portlist payload and the FW will + * only DMA the MBUF size provided. Given this, the implemenation + * isn't concerned about missing WWPNs. + * + */ +struct fpin_els_notify { + uint32_t word0; +#define fpin_els_req_SHIFT 24 +#define fpin_els_req_MASK 0x000000ff +#define fpin_els_req_WORD word0 + uint32_t fpin_list_len; +}; + +/* The Fabric broadcasts an FPIN Link Integrity Notification to all peer N_Port + * IDs that have registered for FPIN ELS notifications and are within the same + * zone as the impacted N_Port ID. + */ +struct fpin_link_integ_not { + uint32_t ntfy_tag; +#define FPIN_NOTIFY_LINK_INTEG 0x00020001 + uint32_t ntfy_desc_len; + uint32_t det_pname[2]; + uint32_t att_pname[2]; + uint32_t word6; +#define fpin_link_evt_type_SHIFT 16 +#define fpin_link_evt_type_MASK 0x0000ffff +#define fpin_link_evt_type_WORD word6 +#define fpin_link_evt_mod_SHIFT 0 +#define fpin_link_evt_mod_MASK 0x0000ffff +#define fpin_link_evt_mod_WORD word6 +#define FPIN_LINK_EVT_UNKNOWN 0x0 +#define FPIN_LINK_EVT_LINK_FAIL 0x1 +#define FPIN_LINK_EVT_LOSS_SYNC 0x2 +#define FPIN_LINK_EVT_LOSS_SIG 0x3 +#define FPIN_LINK_EVT_PRIM_SEQ 0x4 +#define FPIN_LINK_EVT_INV_XMT_WD 0x5 +#define FPIN_LINK_EVT_INV_CRC 0x6 +#define FPIN_LINK_EVT_DEV_SPEC 0xF + uint32_t evt_threshold; + uint32_t evt_cnt; + uint32_t port_list_cnt; +}; + +/* The Fabric transmits a Delivery notification to the N_Port ID that sourced + * the impacted frames(s). + */ +struct fpin_delivery_not { + uint32_t ntfy_tag; +#define FPIN_NOTIFY_DELIVERY 0x00020002 + uint32_t ntfy_desc_len; + uint32_t det_pname[2]; + uint32_t att_pname[2]; + uint32_t delivery_reason; +#define FPIN_DEL_EVT_UNKNOWN 0x0 +#define FPIN_DEL_EVT_TMO 0x1 +#define FPIN_DEL_EVT_NO_ROUTE 0x2 +#define FPIN_DEL_EVT_DEV_SPEC 0xf + uint32_t discard_hdr[6]; +}; + +/* The Fabric transmits a Peer Congestion notification to all peer N_Port IDs + * that have registered for FPIN ELS notifications and are within the same zone + * as the impacted N_Port ID. + * The Fabric transmits a Congestion notification to the N_Port ID that sourced + * the impacted frame(s). Congestion Event Types are shared with the Peer + * congestions event types. + * The congestion event types are shared between the Peer and Attached N_Ports. + */ +#define FPIN_CGN_EVT_CLEAR_NONE 0x0 +#define FPIN_CGN_EVT_LOST_CRED 0x1 +#define FPIN_CGN_EVT_CRED_STALL 0x2 +#define FPIN_CGN_EVT_OVERSUB 0x3 +#define FPIN_CGN_EVT_DEV_SPEC 0xF + +struct fpin_peer_cgn_not { + uint32_t ntfy_tag; +#define FPIN_NOTIFY_PEER_CGN 0x00020003 + uint32_t ntfy_desc_len; + uint32_t det_pname[2]; + uint32_t att_pname[2]; + uint32_t word6; +#define fpin_pcgn_evt_type_SHIFT 16 +#define fpin_pcgn_evt_type_MASK 0x0000ffff +#define fpin_pcgn_evt_type_WORD word6 +#define fpin_pcgn_evt_mod_SHIFT 0 +#define fpin_pcgn_evt_mod_MASK 0x0000ffff +#define fpin_pcgn_evt_mod_WORD word6 + uint32_t evt_period; + uint32_t port_list_cnt; +}; + +struct fpin_cgn_not { + uint32_t ntfy_tag; +#define FPIN_NOTIFY_CGN 0x00020004 + uint32_t ntfy_desc_len; + uint32_t word2; +#define fpin_cgn_evt_type_SHIFT 16 +#define fpin_cgn_evt_type_MASK 0x0000ffff +#define fpin_cgn_evt_type_WORD word2 +#define fpin_cgn_evt_mod_SHIFT 0 +#define fpin_cgn_evt_mod_MASK 0x0000ffff +#define fpin_cgn_evt_mod_WORD word2 + uint32_t evt_period; + uint32_t word4; +#define fpin_cgn_sev_not_SHIFT 24 +#define fpin_cgn_sev_not_MASK 0x000000ff +#define fpin_cgn_sev_not_WORD word4 +#define FPIN_SEV_STATUS_WARNING 0xf1 +#define FPIN_SEV_STATUS_ALARM 0xf7 +}; + + union lpfc_wqe { uint32_t words[16]; struct lpfc_wqe_generic generic; @@ -4774,6 +5418,7 @@ union lpfc_wqe { struct fcp_iread64_wqe fcp_iread; struct fcp_iwrite64_wqe fcp_iwrite; struct abort_cmd_wqe abort_cmd; + struct cmf_sync_wqe cmf_sync; struct create_xri_wqe create_xri; struct xmit_bcast64_wqe xmit_bcast64; struct xmit_seq64_wqe xmit_sequence; @@ -4781,10 +5426,11 @@ union lpfc_wqe { struct xmit_els_rsp64_wqe xmit_els_rsp; struct els_request64_wqe els_req; struct gen_req64_wqe gen_req; + struct send_frame_wqe send_frame; struct fcp_trsp64_wqe fcp_trsp; struct fcp_tsend64_wqe fcp_tsend; struct fcp_treceive64_wqe fcp_treceive; - struct send_frame_wqe send_frame; + }; union lpfc_wqe128 { @@ -4794,6 +5440,7 @@ union lpfc_wqe128 { struct fcp_iread64_wqe fcp_iread; struct fcp_iwrite64_wqe fcp_iwrite; struct abort_cmd_wqe abort_cmd; + struct cmf_sync_wqe cmf_sync; struct create_xri_wqe create_xri; struct xmit_bcast64_wqe xmit_bcast64; struct xmit_seq64_wqe xmit_sequence; @@ -4834,6 +5481,7 @@ struct lpfc_grp_hdr { #define FCP_COMMAND_TRSP 0x3 #define FCP_COMMAND_TSEND 0x7 #define OTHER_COMMAND 0x8 +#define CMF_SYNC_COMMAND 0xA #define ELS_COMMAND_NON_FIP 0xC #define ELS_COMMAND_FIP 0xD @@ -4855,6 +5503,7 @@ struct lpfc_grp_hdr { #define CMD_FCP_TRECEIVE64_WQE 0xA1 #define CMD_FCP_TRSP64_WQE 0xA3 #define CMD_GEN_REQUEST64_WQE 0xC2 +#define CMD_CMF_SYNC_WQE 0xE8 #define CMD_WQE_MASK 0xff @@ -4862,3 +5511,308 @@ struct lpfc_grp_hdr { #define LPFC_FW_DUMP 1 #define LPFC_FW_RESET 2 #define LPFC_DV_RESET 3 + +#if !defined(FC_LS_TLV_DTAG_INIT) +/* On an older kernel. Add what should have been in <uapi/scsi/fc/fc_els.h> */ + +/* + * Link Service TLV Descriptor Tag Values + */ +enum fc_ls_tlv_dtag { + ELS_DTAG_LS_REQ_INFO = 0x00000001, + /* Link Service Request Information Descriptor */ + ELS_DTAG_LNK_INTEGRITY = 0x00020001, + /* Link Integrity Notification Descriptor */ + ELS_DTAG_DELIVERY = 0x00020002, + /* Delivery Notification Descriptor */ + ELS_DTAG_PEER_CONGEST = 0x00020003, + /* Peer Congestion Notification Descriptor */ + ELS_DTAG_CONGESTION = 0x00020004, + /* Congestion Notification Descriptor */ + ELS_DTAG_FPIN_REGISTER = 0x00030001, + /* FPIN Registration Descriptor */ +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_LS_TLV_DTAG_INIT { \ + { ELS_DTAG_LS_REQ_INFO, "Link Service Request Information" }, \ + { ELS_DTAG_LNK_INTEGRITY, "Link Integrity Notification" }, \ + { ELS_DTAG_DELIVERY, "Delivery Notification Present" }, \ + { ELS_DTAG_PEER_CONGEST, "Peer Congestion Notification" }, \ + { ELS_DTAG_CONGESTION, "Congestion Notification" }, \ + { ELS_DTAG_FPIN_REGISTER, "FPIN Registration" }, \ +} + +/* + * Generic Link Service TLV Descriptor format + * + * This structure, as it defines no payload, will also be referred to + * as the "tlv header" - which contains the tag and len fields. + */ +struct fc_tlv_desc { + __be32 desc_tag; /* Notification Descriptor Tag */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __u8 desc_value[0]; /* Descriptor Value */ +}; + +/* Descriptor tag and len fields are considered the mandatory header + * for a descriptor + */ +#define FC_TLV_DESC_HDR_SZ sizeof(struct fc_tlv_desc) + +/* + * Macro, used when initializing payloads, to return the descriptor length. + * Length is size of descriptor minus the tag and len fields. + */ +#define FC_TLV_DESC_LENGTH_FROM_SZ(desc) \ + (sizeof(desc) - FC_TLV_DESC_HDR_SZ) + +/* Macro, used on received payloads, to return the descriptor length */ +#define FC_TLV_DESC_SZ_FROM_LENGTH(tlv) \ + (__be32_to_cpu((tlv)->desc_len) + FC_TLV_DESC_HDR_SZ) + +/* + * This helper is used to walk descriptors in a descriptor list. + * Given the address of the current descriptor, which minimally contains a + * tag and len field, calculate the address of the next descriptor based + * on the len field. + */ +static inline void *fc_tlv_next_desc(void *desc) +{ + struct fc_tlv_desc *tlv = desc; + + return (desc + FC_TLV_DESC_SZ_FROM_LENGTH(tlv)); +} + +enum fc_fpin_li_event_types { + FPIN_LI_UNKNOWN = 0x0, + FPIN_LI_LINK_FAILURE = 0x1, + FPIN_LI_LOSS_OF_SYNC = 0x2, + FPIN_LI_LOSS_OF_SIG = 0x3, + FPIN_LI_PRIM_SEQ_ERR = 0x4, + FPIN_LI_INVALID_TX_WD = 0x5, + FPIN_LI_INVALID_CRC = 0x6, + FPIN_LI_DEVICE_SPEC = 0xF, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_FPIN_LI_EVT_TYPES_INIT { \ + { FPIN_LI_UNKNOWN, "Unknown" }, \ + { FPIN_LI_LINK_FAILURE, "Link Failure" }, \ + { FPIN_LI_LOSS_OF_SYNC, "Loss of Synchronization" }, \ + { FPIN_LI_LOSS_OF_SIG, "Loss of Signal" }, \ + { FPIN_LI_PRIM_SEQ_ERR, "Primitive Sequence Protocol Error" }, \ + { FPIN_LI_INVALID_TX_WD, "Invalid Transmission Word" }, \ + { FPIN_LI_INVALID_CRC, "Invalid CRC" }, \ + { FPIN_LI_DEVICE_SPEC, "Device Specific" }, \ +} + +/* + * Link Integrity Notification Descriptor + */ +struct fc_fn_li_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020001) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be64 detecting_wwpn; /* Port Name that detected event */ + __be64 attached_wwpn; /* Port Name of device attached to + * detecting Port Name + */ + __be16 event_type; /* see enum fc_fpin_li_event_types */ + __be16 event_modifier; /* Implementation specific value + * describing the event type + */ + __be32 event_threshold;/* duration in ms of the link + * integrity detection cycle + */ + __be32 event_count; /* minimum number of event + * occurrences during the event + * threshold to caause the LI event + */ + __be32 pname_count; /* number of portname_list elements */ + __be64 pname_list[0]; /* list of N_Port_Names accessible + * through the attached port + */ +}; + +#endif /* !defined(FC_LS_TLV_DTAG_INIT) */ + +#if !defined(FC_FPIN_DELI_EVT_TYPES_INIT) + +enum fc_fpin_deli_event_types { + FPIN_DELI_UNKNOWN = 0x0, + FPIN_DELI_TIMEOUT = 0x1, + FPIN_DELI_UNABLE_TO_ROUTE = 0x2, + FPIN_DELI_DEVICE_SPEC = 0xF, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_FPIN_DELI_EVT_TYPES_INIT { \ + { FPIN_DELI_UNKNOWN, "Unknown" }, \ + { FPIN_DELI_TIMEOUT, "Link Failure" }, \ + { FPIN_DELI_UNABLE_TO_ROUTE, "Loss of Synchronization" }, \ + { FPIN_DELI_DEVICE_SPEC, "Device Specific" }, \ +} + +/* + * Delivery Descriptor + */ +struct fc_fn_deli_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020002) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be64 detecting_wwpn; /* Port Name that detected event */ + __be64 attached_wwpn; /* Port Name of device attached to + * detecting Port Name + */ + __be32 deli_reason_code; /* See enum fc_fpin_deli_evt_types + * for values. + */ +}; +#endif /* !defined(FC_FPIN_DELI_EVT_TYPES_INIT) */ + +#if !defined(FC_FPIN_CONGN_EVT_TYPES_INIT) +enum fc_fpin_congn_event_types { + FPIN_CONGN_CLEAR = 0x0, + FPIN_CONGN_LOST_CREDIT = 0x1, + FPIN_CONGN_CREDIT_STALL = 0x2, + FPIN_CONGN_OVERSUBSCRIPTION = 0x3, + FPIN_CONGN_DEVICE_SPEC = 0xF, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_FPIN_CONGN_EVT_TYPES_INIT { \ + { FPIN_CONGN_CLEAR, "None" }, \ + { FPIN_CONGN_LOST_CREDIT, "Lost Credit" }, \ + { FPIN_CONGN_CREDIT_STALL, "Credit Stall" }, \ + { FPIN_CONGN_OVERSUBSCRIPTION, "Oversubscription" }, \ + { FPIN_CONGN_DEVICE_SPEC, "Device Specific" }, \ +} + +/* + * Peer Congestion Notification Descriptor + */ +struct fc_fn_peer_congn_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020003) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be64 detecting_wwpn; /* Port Name that detected event */ + __be64 attached_wwpn; /* Port Name of device attached to + * detecting Port Name + */ + __be16 event_type; /* see enum fc_fpin_congn_event_types */ + __be16 event_modifier; /* Implementation specific value + * describing the event type + */ + __be32 event_period; /* duration in ms of the link + * integrity detection cycle + */ + __be32 pname_count; /* number of portname_list elements */ + __be64 pname_list[0]; /* list of N_Port_Names accessible + * through the attached port + */ +}; + +/* + * Congestion Notification Descriptor + */ +struct fc_fn_congn_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020004) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be16 event_type; /* see enum fc_fpin_pc_event_types */ + __be16 event_modifier; /* Implementation specific value + * describing the event type + */ + __be32 event_period; /* duration in ms of the link + * integrity detection cycle + */ + __u8 severity; /* Urgency of notification */ + __u8 rsvd[3]; +}; + +#endif /* !defined(FC_FPIN_CONGN_EVT_TYPES_INIT) */ + +enum fc_fpin_congn_sev_types { + FPIN_CONGN_SEV_WARNING = 0xf1, + FPIN_CONGN_SEV_ERROR = 0xf7, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_FPIN_CONGN_SEV_INIT { \ + { FPIN_CONGN_SEV_WARNING, "Warning" }, \ + { FPIN_CONGN_SEV_ERROR, "Alarm" }, \ +} + +/* + * ELS_FPIN - Fabric Performance Impact Notification + */ +struct lpfc_els_fpin { + __u8 fpin_cmd; /* command (0x16) */ + __u8 fpin_zero[3]; /* specified as zero - part of cmd */ + __be32 desc_len; /* Length of Descriptor List (in bytes). + * Size of ELS excluding fpin_cmd, + * fpin_zero and desc_len fields. + */ + struct fc_tlv_desc fpin_desc[0]; /* Descriptor list */ +}; + +/* Used for logging FPIN messages */ +#define LPFC_FPIN_WWPN_LINE_SZ 128 +#define LPFC_FPIN_WWPN_LINE_CNT 6 +#define LPFC_FPIN_WWPN_NUM_LINE 6 + +/* + * Macro that declares tables and a routine to perform enum type to + * ascii string lookup. + * + * Defines a <key,value> table for an enum. Uses xxx_INIT defines for + * the enum to populate the table. Macro defines a routine (named + * by caller) that will search all elements of the table for the key + * and return the name string if found or "Unrecognized" if not found. + */ +#define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \ +static struct { \ + enum enum_name value; \ + char *name; \ +} fc_##enum_name##_e2str_names[] = enum_init; \ +static const char *(routine)(enum enum_name table_key) \ +{ \ + int i; \ + char *name = "Unrecognized"; \ + \ + for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\ + if (fc_##enum_name##_e2str_names[i].value == table_key) {\ + name = fc_##enum_name##_e2str_names[i].name; \ + break; \ + } \ + } \ + return name; \ +} + diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h index d48414e295a0..8d609bef82d6 100644 --- a/drivers/scsi/lpfc/lpfc_ids.h +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -24,62 +24,7 @@ #include <linux/pci.h> const struct pci_device_id lpfc_id_table[] = { - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, - PCI_ANY_ID, PCI_ANY_ID, }, +#ifndef BUILD_BRCMFCOE {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, @@ -92,35 +37,29 @@ const struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, - PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, PCI_ANY_ID, PCI_ANY_ID, }, +#ifndef BUILD_RHEL8 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, - PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, PCI_ANY_ID, PCI_ANY_ID, }, +#endif + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, + PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC, PCI_ANY_ID, PCI_ANY_ID, }, +#else + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, + PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, PCI_ANY_ID, PCI_ANY_ID, }, +#endif { 0 } }; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e8813d26e594..aab455a3629e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -38,8 +38,9 @@ #include <linux/percpu.h> #include <linux/msi.h> #include <linux/irq.h> -#include <linux/bitops.h> #include <linux/crash_dump.h> +#include <linux/cpu.h> +#include <linux/cpuhotplug.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -48,7 +49,11 @@ #include <scsi/scsi_tcq.h> #include <scsi/fc/fc_fs.h> +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme-fc-driver.h> +#endif +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -66,9 +71,25 @@ #include "lpfc_version.h" #include "lpfc_ids.h" +/* + * Later versions of the kernel removed support of the + * initialization function macros. In this case, define + * the macros as nops to allow the code to compile + */ + +#ifndef __devinit +#define __devinit +#define __devexit +#define __devexit_p(x) x +#endif + +static enum cpuhp_state lpfc_cpuhp_state; /* Used when mapping IRQ vectors in a driver centric manner */ static uint32_t lpfc_present_cpu; +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); +static void lpfc_cpuhp_remove(struct lpfc_hba *phba); +static void lpfc_cpuhp_add(struct lpfc_hba *phba); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); @@ -90,6 +111,7 @@ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); +static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -150,7 +172,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0324 Config Port initialization " "error, mbxCmd x%x READ_NVPARM, " "mbxStatus x%x\n", @@ -174,7 +196,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) lpfc_read_rev(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0439 Adapter failed to init, mbxCmd x%x " "READ_REV, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); @@ -189,7 +211,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) */ if (mb->un.varRdRev.rr == 0) { vp->rev.rBit = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0440 Adapter failed to init, READ_REV has " "missing revision information.\n"); mempool_free(pmb, phba->mbox_mem_pool); @@ -250,13 +272,15 @@ lpfc_config_port_prep(struct lpfc_hba *phba) */ if (mb->un.varDmp.word_cnt == 0) break; - if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) - mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; + + i = mb->un.varDmp.word_cnt * sizeof(uint32_t); + if (offset + i > DMP_VPD_SIZE) + i = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, - lpfc_vpd_data + offset, - mb->un.varDmp.word_cnt); - offset += mb->un.varDmp.word_cnt; - } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); + lpfc_vpd_data + offset, i); + offset += i; + } while (offset < DMP_VPD_SIZE); + lpfc_parse_vpd(phba, lpfc_vpd_data, offset); kfree(lpfc_vpd_data); @@ -332,10 +356,8 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /** * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, - * cfg_soft_wwnn, cfg_soft_wwpn * @vport: pointer to lpfc vport data structure. * - * * Return codes * None. **/ @@ -345,19 +367,11 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport) uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; - /* If the soft name exists then update it using the service params */ - if (vport->phba->cfg_soft_wwnn) - u64_to_wwn(vport->phba->cfg_soft_wwnn, - vport->fc_sparam.nodeName.u.wwn); - if (vport->phba->cfg_soft_wwpn) - u64_to_wwn(vport->phba->cfg_soft_wwpn, - vport->fc_sparam.portName.u.wwn); - /* - * If the name is empty or there exists a soft name + * If the name is empty * then copy the service params name, otherwise use the fc name */ - if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) + if (vport->fc_nodename.u.wwn[0] == 0) memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); else @@ -368,19 +382,19 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport) * If the port name has changed, then set the Param changes flag * to unreg the login */ - if (vport->fc_portname.u.wwn[0] != 0 && + if ((vport->fc_portname.u.wwn[0] != 0) && memcmp(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name))) vport->vport_flag |= FAWWPN_PARAM_CHG; - if (vport->fc_portname.u.wwn[0] == 0 || - vport->phba->cfg_soft_wwpn || - (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || - vport->vport_flag & FAWWPN_SET) { + if ((vport->fc_portname.u.wwn[0] == 0) || + (vvvl == 1 && (cpu_to_be32(*fawwpn_key)) == FAPWWN_KEY_VENDOR) || + (vport->vport_flag & FAWWPN_SET)) { memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); vport->vport_flag &= ~FAWWPN_SET; - if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) + if (vvvl == 1 && (cpu_to_be32(*fawwpn_key)) == + FAPWWN_KEY_VENDOR) vport->vport_flag |= FAWWPN_SET; } else @@ -439,7 +453,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0448 Adapter failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); @@ -493,7 +507,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_read_config(phba, pmb); pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0453 Adapter failed to init, mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", mb->mbxCommand, mb->mbxStatus); @@ -506,21 +520,12 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_sli_read_link_ste(phba); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - i = (mb->un.varRdConfig.max_xri + 1); - if (phba->cfg_hba_queue_depth > i) { + if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "3359 HBA queue depth changed from %d to %d\n", - phba->cfg_hba_queue_depth, i); - phba->cfg_hba_queue_depth = i; - } - - /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ - i = (mb->un.varRdConfig.max_xri >> 3); - if (phba->pport->cfg_lun_queue_depth > i) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "3360 LUN queue depth changed from %d to %d\n", - phba->pport->cfg_lun_queue_depth, i); - phba->pport->cfg_lun_queue_depth = i; + phba->cfg_hba_queue_depth, + mb->un.varRdConfig.max_xri); + phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; } phba->lmt = mb->un.varRdConfig.lmt; @@ -551,7 +556,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) } rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0352 Config MSI mailbox command " "failed, mbxCmd x%x, mbxStatus x%x\n", pmb->u.mb.mbxCommand, @@ -595,31 +600,29 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); - phba->hb_outstanding = 0; + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); if (phba->hba_flag & LINK_DISABLED) { - lpfc_printf_log(phba, - KERN_ERR, LOG_INIT, - "2598 Adapter Link is disabled.\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2598 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { - lpfc_printf_log(phba, - KERN_ERR, LOG_INIT, - "2599 Adapter failed to issue DOWN_LINK" - " mbox command rc 0x%x\n", rc); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2599 Adapter failed to issue DOWN_LINK" + " mbox command rc 0x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { mempool_free(pmb, phba->mbox_mem_pool); - rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + rc = phba->lpfc_hba_init_link(phba); if (rc) return rc; } @@ -636,9 +639,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0456 Adapter failed to issue " "ASYNCEVT_ENABLE mbox status x%x\n", rc); @@ -658,7 +659,8 @@ lpfc_config_port_post(struct lpfc_hba *phba) rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0435 Adapter failed " "to get Option ROM version status x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); } @@ -669,7 +671,6 @@ lpfc_config_port_post(struct lpfc_hba *phba) /** * lpfc_hba_init_link - Initialize the FC link * @phba: pointer to lpfc hba data structure. - * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the INIT_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data @@ -681,16 +682,15 @@ lpfc_config_port_post(struct lpfc_hba *phba) * Any other value - error **/ static int -lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) +lpfc_hba_init_link(struct lpfc_hba *phba) { - return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); + return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology); } /** * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology * @phba: pointer to lpfc hba data structure. * @fc_topology: desired fc topology. - * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the INIT_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data @@ -702,8 +702,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) * Any other value - error **/ int -lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, - uint32_t flag) +lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology) { struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *pmb; @@ -736,22 +735,22 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) { /* Reset link speed to auto */ - lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, - "1302 Invalid speed for this board:%d " - "Reset link speed to auto.\n", - phba->cfg_link_speed); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1302 Invalid speed for this board:%d " + "Reset link speed to auto.\n", + phba->cfg_link_speed); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; } lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; if (phba->sli_rev < LPFC_SLI_REV4) lpfc_set_loopback_flag(phba); - rc = lpfc_sli_issue_mbox(phba, pmb, flag); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0498 Adapter failed to init, mbxCmd x%x " - "INIT_LINK, mbxStatus x%x\n", - mb->mbxCommand, mb->mbxStatus); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0498 Adapter failed to init, mbxCmd x%x " + "INIT_LINK, mbxStatus x%x\n", + mb->mbxCommand, mb->mbxStatus); if (phba->sli_rev <= LPFC_SLI_REV3) { /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); @@ -761,13 +760,11 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, readl(phba->HAregaddr); /* flush */ } phba->link_state = LPFC_HBA_ERROR; - if (rc != MBX_BUSY || flag == MBX_POLL) + if (rc != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; - if (flag == MBX_POLL) - mempool_free(pmb, phba->mbox_mem_pool); return 0; } @@ -775,7 +772,6 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, /** * lpfc_hba_down_link - this routine downs the FC link * @phba: pointer to lpfc hba data structure. - * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT * * This routine will issue the DOWN_LINK mailbox command call. * It is available to other drivers through the lpfc_hba data @@ -786,7 +782,7 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, * Any other value - error **/ static int -lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) +lpfc_hba_down_link(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmb; int rc; @@ -797,24 +793,19 @@ lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) return -ENOMEM; } - lpfc_printf_log(phba, - KERN_ERR, LOG_INIT, - "0491 Adapter Link is disabled.\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0491 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(phba, pmb, flag); + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { - lpfc_printf_log(phba, - KERN_ERR, LOG_INIT, - "2522 Adapter failed to issue DOWN_LINK" - " mbox command rc 0x%x\n", rc); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2522 Adapter failed to issue DOWN_LINK" + " mbox command rc 0x%x\n", rc); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } - if (flag == MBX_POLL) - mempool_free(pmb, phba->mbox_mem_pool); - return 0; } @@ -1141,13 +1132,21 @@ lpfc_hba_down_post(struct lpfc_hba *phba) * be cleared by the worker thread after it has taken the event bitmap out. **/ static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_hb_timeout(unsigned long ptr) +#else lpfc_hb_timeout(struct timer_list *t) +#endif { struct lpfc_hba *phba; uint32_t tmo_posted; unsigned long iflag; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + phba = (struct lpfc_hba *)ptr; +#else phba = from_timer(phba, t, hb_tmofunc); +#endif /* Check for heart beat timeout conditions */ spin_lock_irqsave(&phba->pport->work_port_lock, iflag); @@ -1175,12 +1174,20 @@ lpfc_hb_timeout(struct timer_list *t) * be cleared by the worker thread after it has taken the event bitmap out. **/ static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_rrq_timeout(unsigned long ptr) +#else lpfc_rrq_timeout(struct timer_list *t) +#endif { struct lpfc_hba *phba; unsigned long iflag; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + phba = (struct lpfc_hba *)ptr; +#else phba = from_timer(phba, t, rrq_tmr); +#endif spin_lock_irqsave(&phba->pport->work_port_lock, iflag); if (!(phba->pport->load_flag & FC_UNLOADING)) phba->hba_flag |= HBA_RRQ_ACTIVE; @@ -1214,10 +1221,10 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) unsigned long drvr_flag; spin_lock_irqsave(&phba->hbalock, drvr_flag); - phba->hb_outstanding = 0; + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - /* Check and reset heart-beat timer is necessary */ + /* Check and reset heart-beat timer if necessary */ mempool_free(pmboxq, phba->mbox_mem_pool); if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && !(phba->link_state == LPFC_HBA_ERROR) && @@ -1228,6 +1235,78 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) return; } +/** + * lpfc_idle_stat_delay_work - idle_stat tracking + * + * This routine tracks per-cq idle_stat and determines polling decisions. + * + * Note: Will skip irq polling decisions if CGN mgt is enabled. + * + * Return codes: + * None + **/ +static void +lpfc_idle_stat_delay_work(struct work_struct *work) +{ + struct lpfc_hba *phba = container_of(to_delayed_work(work), + struct lpfc_hba, + idle_stat_delay_work); + struct lpfc_queue *cq; + struct lpfc_sli4_hdw_queue *hdwq; + struct lpfc_idle_stat *idle_stat; + u32 i, idle_percent; + u64 wall, wall_idle, diff_wall, diff_idle, busy_time; + + if (phba->pport->load_flag & FC_UNLOADING) + return; + + if (phba->link_state == LPFC_HBA_ERROR || + phba->pport->fc_flag & FC_OFFLINE_MODE || + phba->cmf_active_mode != LPFC_CFG_OFF) + goto requeue; + + for_each_present_cpu(i) { + hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; + cq = hdwq->io_cq; + + /* Skip if we've already handled this cq's primary CPU */ + if (cq->chann != i) + continue; + + idle_stat = &phba->sli4_hba.idle_stat[i]; + + /* get_cpu_idle_time returns values as running counters. Thus, + * to know the amount for this period, the prior counter values + * need to be subtracted from the current counter values. + * From there, the idle time stat can be calculated as a + * percentage of 100 - the sum of the other consumption times. + */ + wall_idle = get_cpu_idle_time(i, &wall, 1); + diff_idle = wall_idle - idle_stat->prev_idle; + diff_wall = wall - idle_stat->prev_wall; + + if (diff_wall <= diff_idle) + busy_time = 0; + else + busy_time = diff_wall - diff_idle; + + idle_percent = div64_u64(100 * busy_time, diff_wall); + idle_percent = 100 - idle_percent; + + if (idle_percent < 15) + cq->poll_mode = LPFC_QUEUE_WORK; + else + cq->poll_mode = LPFC_IRQ_POLL; + + idle_stat->prev_idle = wall_idle; + idle_stat->prev_wall = wall; + } + +requeue: + schedule_delayed_work(&phba->idle_stat_delay_work, + msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); +} + static void lpfc_hb_eq_delay_work(struct work_struct *work) { @@ -1235,10 +1314,9 @@ lpfc_hb_eq_delay_work(struct work_struct *work) struct lpfc_hba, eq_delay_work); struct lpfc_eq_intr_info *eqi, *eqi_new; struct lpfc_queue *eq, *eq_next; - unsigned char *eqcnt = NULL; + unsigned char *ena_delay = NULL; uint32_t usdelay; int i; - bool update = false; if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) return; @@ -1247,44 +1325,35 @@ lpfc_hb_eq_delay_work(struct work_struct *work) phba->pport->fc_flag & FC_OFFLINE_MODE) goto requeue; - eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), - GFP_KERNEL); - if (!eqcnt) + ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), + GFP_KERNEL); + if (!ena_delay) goto requeue; - if (phba->cfg_irq_chann > 1) { - /* Loop thru all IRQ vectors */ - for (i = 0; i < phba->cfg_irq_chann; i++) { - /* Get the EQ corresponding to the IRQ vector */ - eq = phba->sli4_hba.hba_eq_hdl[i].eq; - if (!eq) - continue; - if (eq->q_mode) { - update = true; - break; - } - if (eqcnt[eq->last_cpu] < 2) - eqcnt[eq->last_cpu]++; + for (i = 0; i < phba->cfg_irq_chann; i++) { + /* Get the EQ corresponding to the IRQ vector */ + eq = phba->sli4_hba.hba_eq_hdl[i].eq; + if (!eq) + continue; + if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { + eq->q_flag &= ~HBA_EQ_DELAY_CHK; + ena_delay[eq->last_cpu] = 1; } - } else - update = true; + } for_each_present_cpu(i) { eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); - if (!update && eqcnt[i] < 2) { - eqi->icnt = 0; - continue; + if (ena_delay[i]) { + usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; + if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) + usdelay = LPFC_MAX_AUTO_EQ_DELAY; + } else { + usdelay = 0; } - - usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * - LPFC_EQ_DELAY_STEP; - if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) - usdelay = LPFC_MAX_AUTO_EQ_DELAY; - eqi->icnt = 0; list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { - if (eq->last_cpu != i) { + if (unlikely(eq->last_cpu != i)) { eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, eq->last_cpu); list_move_tail(&eq->cpu_list, &eqi_new->list); @@ -1296,7 +1365,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work) } } - kfree(eqcnt); + kfree(ena_delay); requeue: queue_delayed_work(phba->wq, &phba->eq_delay_work, @@ -1330,6 +1399,60 @@ static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) } } +/** + * lpfc_issue_hb_mbox - Issues heart-beat mailbox command + * @phba: pointer to lpfc hba data structure. + * + * If a HB mbox is not already in progrees, this routine will allocate + * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, + * and issue it. The HBA_HBEAT_INP flag means the command is in progress. + **/ +int +lpfc_issue_hb_mbox(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmboxq; + int retval; + + /* Is a Heartbeat mbox already in progress */ + if (phba->hba_flag & HBA_HBEAT_INP) + return 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return -ENOMEM; + + lpfc_heart_beat(phba, pmboxq); + pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; + pmboxq->vport = phba->pport; + retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + + if (retval != MBX_BUSY && retval != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + return -ENXIO; + } + phba->hba_flag |= HBA_HBEAT_INP; + + return 0; +} + +/** + * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command + * @phba: pointer to lpfc hba data structure. + * + * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO + * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless + * of the value of lpfc_enable_hba_heartbeat. + * If lpfc_enable_hba_heartbeat is set, the timeout routine will always + * try to issue a MBX_HEARTBEAT mbox command. + **/ +void +lpfc_issue_hb_tmo(struct lpfc_hba *phba) +{ + if (phba->cfg_enable_hba_heartbeat) + return; + phba->hba_flag |= HBA_HBEAT_TMO; +} + /** * lpfc_hb_timeout_handler - The HBA-timer timeout handler * @phba: pointer to lpfc hba data structure. @@ -1350,9 +1473,9 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *phba) { struct lpfc_vport **vports; - LPFC_MBOXQ_t *pmboxq; struct lpfc_dmabuf *buf_ptr; - int retval, i; + int retval = 0; + int i, tmo; struct lpfc_sli *psli = &phba->sli; LIST_HEAD(completions); @@ -1365,7 +1488,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_rcv_seq_check_edtov(vports[i]); - lpfc_fdmi_num_disc_check(vports[i]); + lpfc_fdmi_change_check(vports[i]); } lpfc_destroy_vport_work_array(phba, vports); @@ -1374,24 +1497,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) (phba->pport->fc_flag & FC_OFFLINE_MODE)) return; - spin_lock_irq(&phba->pport->work_port_lock); - - if (time_after(phba->last_completion_time + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), - jiffies)) { - spin_unlock_irq(&phba->pport->work_port_lock); - if (!phba->hb_outstanding) - mod_timer(&phba->hb_tmofunc, - jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); - else - mod_timer(&phba->hb_tmofunc, - jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); - return; - } - spin_unlock_irq(&phba->pport->work_port_lock); - if (phba->elsbuf_cnt && (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { spin_lock_irq(&phba->hbalock); @@ -1411,37 +1516,43 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* If there is no heart beat outstanding, issue a heartbeat command */ if (phba->cfg_enable_hba_heartbeat) { - if (!phba->hb_outstanding) { + /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ + spin_lock_irq(&phba->pport->work_port_lock); + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { + spin_unlock_irq(&phba->pport->work_port_lock); + if (phba->hba_flag & HBA_HBEAT_INP) + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + else + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); + goto out; + } + spin_unlock_irq(&phba->pport->work_port_lock); + + /* Check if a MBX_HEARTBEAT is already in progress */ + if (phba->hba_flag & HBA_HBEAT_INP) { + /* + * If heart beat timeout called with HBA_HBEAT_INP set + * we need to give the hb mailbox cmd a chance to + * complete or TMO. + */ + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0459 Adapter heartbeat still out" + "standing:last compl time was %d ms.\n", + jiffies_to_msecs(jiffies + - phba->last_completion_time)); + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + } else { if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && (list_empty(&psli->mboxq))) { - pmboxq = mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL); - if (!pmboxq) { - mod_timer(&phba->hb_tmofunc, - jiffies + - msecs_to_jiffies(1000 * - LPFC_HB_MBOX_INTERVAL)); - return; - } - lpfc_heart_beat(phba, pmboxq); - pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; - pmboxq->vport = phba->pport; - retval = lpfc_sli_issue_mbox(phba, pmboxq, - MBX_NOWAIT); - - if (retval != MBX_BUSY && - retval != MBX_SUCCESS) { - mempool_free(pmboxq, - phba->mbox_mem_pool); - mod_timer(&phba->hb_tmofunc, - jiffies + - msecs_to_jiffies(1000 * - LPFC_HB_MBOX_INTERVAL)); - return; + retval = lpfc_issue_hb_mbox(phba); + if (retval) { + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); + goto out; } phba->skipped_hb = 0; - phba->hb_outstanding = 1; } else if (time_before_eq(phba->last_completion_time, phba->skipped_hb)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -1452,30 +1563,23 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) } else phba->skipped_hb = jiffies; - mod_timer(&phba->hb_tmofunc, - jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); - return; - } else { - /* - * If heart beat timeout called with hb_outstanding set - * we need to give the hb mailbox cmd a chance to - * complete or TMO. - */ - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0459 Adapter heartbeat still out" - "standing:last compl time was %d ms.\n", - jiffies_to_msecs(jiffies - - phba->last_completion_time)); - mod_timer(&phba->hb_tmofunc, - jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + goto out; } } else { - mod_timer(&phba->hb_tmofunc, - jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + /* Check to see if we want to force a MBX_HEARTBEAT */ + if (phba->hba_flag & HBA_HBEAT_TMO) { + retval = lpfc_issue_hb_mbox(phba); + if (retval) + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); + else + tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); + goto out; + } + tmo = (1000 * LPFC_HB_MBOX_INTERVAL); } +out: + mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); } /** @@ -1553,11 +1657,11 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) return; } - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0479 Deferred Adapter Hardware Error " - "Data: x%x x%x x%x\n", - phba->work_hs, - phba->work_status[0], phba->work_status[1]); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0479 Deferred Adapter Hardware Error " + "Data: x%x x%x x%x\n", + phba->work_hs, phba->work_status[0], + phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; @@ -1708,7 +1812,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) temp_event_data.event_code = LPFC_CRIT_TEMP; temp_event_data.data = (uint32_t)temperature; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0406 Adapter maximum temperature exceeded " "(%ld), taking this port offline " "Data: x%x x%x x%x\n", @@ -1719,8 +1823,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), (char *) &temp_event_data, - SCSI_NL_VID_TYPE_PCI - | PCI_VENDOR_ID_EMULEX); + LPFC_NL_VENDOR_ID); spin_lock_irq(&phba->hbalock); phba->over_temp_state = HBA_OVER_TEMP; @@ -1732,7 +1835,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * failure is a value other than FFER6. Do not call the offline * twice. This is the adapter hardware error path. */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0457 Adapter Hardware Error " "Data: x%x x%x x%x\n", phba->work_hs, @@ -1742,7 +1845,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) shost = lpfc_shost_from_vport(vport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + LPFC_NL_VENDOR_ID); lpfc_offline_eratt(phba); } @@ -1766,6 +1869,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, { int rc; uint32_t intr_mode; + LPFC_MBOXQ_t *mboxq; if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { @@ -1780,9 +1884,27 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, /* need reset: attempt for port recovery */ if (en_rn_msg) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2887 Reset Needed: Attempting Port " "Recovery...\n"); + + /* If we are no wait, the HBA has been reset and is not + * functional, thus we should clear + * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. + */ + if (mbx_action == LPFC_MBX_NO_WAIT) { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + } + spin_unlock_irq(&phba->hbalock); + } + lpfc_offline_prep(phba, mbx_action); lpfc_sli_flush_io_rings(phba); lpfc_offline(phba); @@ -1790,14 +1912,14 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, lpfc_sli4_disable_intr(phba); rc = lpfc_sli_brdrestart(phba); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6309 Failed to restart board\n"); return rc; } /* request and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3175 Failed to enable interrupt\n"); return -EIO; } @@ -1836,7 +1958,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) * we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3166 pci channel is offline\n"); lpfc_sli4_offline_eratt(phba); return; @@ -1859,7 +1981,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) lpfc_sli4_offline_eratt(phba); return; } - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "7623 Checking UE recoverable"); for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { @@ -1876,7 +1998,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) msleep(1000); } - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "4827 smphr_port_status x%x : Waited %dSec", smphr_port_status, i); @@ -1894,14 +2016,14 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) LPFC_MBX_NO_WAIT, en_rn_msg); if (rc == 0) return; - lpfc_printf_log(phba, - KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "4215 Failed to recover UE"); break; } } } - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "7624 Firmware not ready: Failing UE recovery," " waited %dSec", i); phba->link_state = LPFC_HBA_ERROR; @@ -1914,7 +2036,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) &portstat_reg.word0); /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3151 PCI bus read access failure: x%x\n", readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); lpfc_sli4_offline_eratt(phba); @@ -1923,10 +2045,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2889 Port Overtemperature event, " - "taking port offline Data: x%x x%x\n", - reg_err1, reg_err2); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2889 Port Overtemperature event, " + "taking port offline Data: x%x x%x\n", + reg_err1, reg_err2); phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; @@ -1948,17 +2070,17 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3143 Port Down: Firmware Update " "Detected\n"); en_rn_msg = false; } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3144 Port Down: Debug Dump\n"); else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3145 Port Down: Provisioning\n"); /* If resets are disabled then leave the HBA alone and return */ @@ -1977,7 +2099,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) break; } /* fall through for not able to recover */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3152 Unrecoverable error\n"); phba->link_state = LPFC_HBA_ERROR; break; @@ -2095,8 +2217,8 @@ lpfc_handle_latt_err_exit: lpfc_linkdown(phba); phba->link_state = LPFC_HBA_ERROR; - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); return; } @@ -2458,7 +2580,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; - m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe10100", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TOMCAT: oneConnect = 1; @@ -2479,6 +2602,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPe16000", "PCIe", "Obsolete, Unsupported Fibre Channel Adapter"}; break; +#ifndef BUILD_RHEL8 case PCI_DEVICE_ID_LANCER_FCOE: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; @@ -2488,6 +2612,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"OCe15100", "PCIe", "Obsolete, Unsupported FCoE"}; break; +#endif case PCI_DEVICE_ID_LANCER_G6_FC: m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; break; @@ -2788,6 +2913,8 @@ lpfc_cleanup(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_external_dif_support *dp, *next_dp; + unsigned long iflags; int i = 0; if (phba->link_state > LPFC_LINK_DOWN) @@ -2799,22 +2926,22 @@ lpfc_cleanup(struct lpfc_vport *vport) NLP_STE_UNUSED_NODE); if (!ndlp) continue; - spin_lock_irq(&phba->ndlp_lock); + spin_lock_irqsave(&phba->ndlp_lock, iflags); NLP_SET_FREE_REQ(ndlp); - spin_unlock_irq(&phba->ndlp_lock); + spin_unlock_irqrestore(&phba->ndlp_lock, iflags); /* Trigger the release of the ndlp memory */ lpfc_nlp_put(ndlp); continue; } - spin_lock_irq(&phba->ndlp_lock); + spin_lock_irqsave(&phba->ndlp_lock, iflags); if (NLP_CHK_FREE_REQ(ndlp)) { /* The ndlp should not be in memory free mode already */ - spin_unlock_irq(&phba->ndlp_lock); + spin_unlock_irqrestore(&phba->ndlp_lock, iflags); continue; } else /* Indicate request for freeing ndlp memory */ NLP_SET_FREE_REQ(ndlp); - spin_unlock_irq(&phba->ndlp_lock); + spin_unlock_irqrestore(&phba->ndlp_lock, iflags); if (vport->port_type != LPFC_PHYSICAL_PORT && ndlp->nlp_DID == Fabric_DID) { @@ -2845,12 +2972,13 @@ lpfc_cleanup(struct lpfc_vport *vport) */ while (!list_empty(&vport->fc_nodes)) { if (i++ > 3000) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0233 Nodelist not empty\n"); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { lpfc_printf_vlog(ndlp->vport, KERN_ERR, - LOG_NODE, + LOG_TRACE_EVENT, "0282 did:x%x ndlp:x%px " "usgmap:x%x refcnt:%d\n", ndlp->nlp_DID, (void *)ndlp, @@ -2864,6 +2992,15 @@ lpfc_cleanup(struct lpfc_vport *vport) msleep(10); } lpfc_cleanup_vports_rrqs(vport, NULL); + + /* Cleanup any discovered External DIF devices for this vport */ + list_for_each_entry_safe(dp, next_dp, &vport->external_dif_list, + listentry) { + spin_lock_irqsave(&vport->external_dif_lock, iflags); + list_del(&dp->listentry); + spin_unlock_irqrestore(&vport->external_dif_lock, iflags); + kfree(dp); + } } /** @@ -2924,6 +3061,138 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); } +/** + * lpfc_cmf_stop - Stop CMF processing + * @phba: pointer to lpfc hba data structure. + * + * This is called when the link goes down or if CMF mode is turned OFF. + * It is also called when going offline or unloaded just before the + * congestion info buffer is unregistered. + **/ +void +lpfc_cmf_stop(struct lpfc_hba *phba) +{ + int cpu; + struct lpfc_cgn_stat *cgs; + + /* We only do something if CMF is enabled */ + if (!phba->sli4_hba.pc_sli4_params.cmf) + return; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6221 Stop CMF / Cancel Timer\n"); + + /* Cancel the CMF timer */ + hrtimer_cancel(&phba->cmf_timer); + + /* Zero CMF counters */ + atomic_set(&phba->cmf_busy, 0); + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + atomic64_set(&cgs->total_bytes, 0); + atomic64_set(&cgs->rcv_bytes, 0); + atomic_set(&cgs->rx_io_cnt, 0); + atomic64_set(&cgs->rx_latency, 0); + } + atomic_set(&phba->cmf_bw_wait, 0); + + /* Resume any blocked IO - Queue unblock on workqueue */ + queue_work(phba->wq, &phba->unblock_request_work); + +#if defined(BUILD_NVME) + if (phba->nvme_io) + queue_work(phba->nvme_io, &phba->nvme_defer_work); +#endif +} + +static inline uint64_t +lpfc_get_max_line_rate(struct lpfc_hba *phba) +{ + uint64_t rate = lpfc_sli_port_speed_get(phba); + + return ((((unsigned long)rate) * 1024 * 1024) / 10); +} + +void +lpfc_cmf_signal_init(struct lpfc_hba *phba) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6223 Signal CMF init\n"); + + /* Use the new fc_linkspeed to recalculate */ + phba->cmf_interval_rate = LPFC_CMF_INTERVAL; + phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); + phba->cmf_link_byte_count = (phba->cmf_max_line_rate * + phba->cmf_interval_rate) / 1000; + phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; + + /* This is a signal to firmware to sync up CMF BW with link speed */ + lpfc_issue_cmf_sync_wqe(phba, 0, 0); +} + +/** + * lpfc_cmf_start - Start CMF processing + * @phba: pointer to lpfc hba data structure. + * + * This is called when the link comes up or if CMF mode is turned OFF + * to Monitor or Managed. + **/ +void +lpfc_cmf_start(struct lpfc_hba *phba) +{ + struct Scsi_Host *shost; + struct lpfc_cgn_stat *cgs; + int cpu; + + /* We only do something if CMF is enabled */ + if (!phba->sli4_hba.pc_sli4_params.cmf || + phba->cmf_active_mode == LPFC_CFG_OFF) + return; + + if (phba->pport) + shost = lpfc_shost_from_vport(phba->pport); + else + shost = NULL; + + /* Reinitialize congestion buffer info */ + lpfc_init_congestion_buf(phba); + + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + + atomic_set(&phba->cmf_busy, 0); + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + atomic64_set(&cgs->total_bytes, 0); + atomic64_set(&cgs->rcv_bytes, 0); + atomic_set(&cgs->rx_io_cnt, 0); + atomic64_set(&cgs->rx_latency, 0); + } + phba->cmf_latency.tv_sec = 0; + phba->cmf_latency.tv_nsec = 0; + + lpfc_cmf_signal_init(phba); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6222 Start CMF / Timer\n"); + + phba->cmf_timer_cnt = 0; + hrtimer_start(&phba->cmf_timer, + ktime_set(0, LPFC_CMF_INTERVAL * 1000000), + HRTIMER_MODE_REL); + /* Setup for latency check in IO cmpl routines */ + ktime_get_real_ts64(&phba->cmf_latency); + + atomic_set(&phba->cmf_bw_wait, 0); + atomic_set(&phba->cmf_stop_io, 0); +#if defined(BUILD_NVME) + if (phba->nvme_io) + queue_work(phba->nvme_io, &phba->nvme_defer_work); +#endif +} + /** * lpfc_stop_hba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. @@ -2937,15 +3206,17 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) if (phba->pport) lpfc_stop_vport_timers(phba->pport); cancel_delayed_work_sync(&phba->eq_delay_work); + cancel_delayed_work_sync(&phba->idle_stat_delay_work); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); del_timer_sync(&phba->hb_tmofunc); + if (phba->sli_rev == LPFC_SLI_REV4) { del_timer_sync(&phba->rrq_tmr); phba->hba_flag &= ~HBA_RRQ_ACTIVE; } - phba->hb_outstanding = 0; + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: @@ -2953,11 +3224,11 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) del_timer_sync(&phba->fcp_poll_timer); break; case LPFC_PCI_DEV_OC: - /* Stop any OneConnect device specific driver timers */ + /* Stop any OneConnect device sepcific driver timers */ lpfc_sli4_stop_fcf_redisc_wait_timer(phba); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0297 Invalid device group (x%x)\n", phba->pci_dev_grp); break; @@ -3004,10 +3275,10 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2813 Mgmt IO is Blocked %x " - "- mbox cmd %x still active\n", - phba->sli.sli_flag, actcmd); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2813 Mgmt IO is Blocked %x " + "- mbox cmd %x still active\n", + phba->sli.sli_flag, actcmd); break; } } @@ -3053,11 +3324,12 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) continue; } ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0009 rpi:%x DID:%x " - "flg:%x map:%x x%px\n", ndlp->nlp_rpi, - ndlp->nlp_DID, ndlp->nlp_flag, - ndlp->nlp_usg_map, ndlp); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0009 Assign RPI x%x to ndlp x%px " + "DID:x%06x flg:x%x map:x%x\n", + ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->nlp_usg_map); } } lpfc_destroy_vport_work_array(phba, vports); @@ -3347,13 +3619,17 @@ lpfc_online(struct lpfc_hba *phba) /* Reestablish the local initiator port. * The offline process destroyed the previous lport. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && - !phba->nvmet_support) { - error = lpfc_nvme_create_localport(phba->pport); - if (error) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6132 NVME restore reg failed " - "on nvmei error x%x\n", error); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + if (!phba->nvmet_support) { + error = lpfc_nvme_create_localport(phba->pport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "6132 NVME restore reg " + "failed on nvmei error " + "x%x\n", error); + } + } } } else { lpfc_sli_queue_init(phba); @@ -3387,6 +3663,8 @@ lpfc_online(struct lpfc_hba *phba) if (phba->cfg_xri_rebalancing) lpfc_create_multixri_pools(phba); + lpfc_cpuhp_add(phba); + lpfc_unblock_mgmt_io(phba); return 0; } @@ -3453,10 +3731,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (!NLP_CHK_NODE_ACT(ndlp)) - continue; - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + if ((!NLP_CHK_NODE_ACT(ndlp)) || + ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + /* Driver must assume RPI is invalid for + * any unused or inactive node. + */ + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; continue; + } + if (ndlp->nlp_type & NLP_FABRIC) { lpfc_disc_state_machine(vports[i], ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); @@ -3472,16 +3755,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) * comes back online. */ if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(ndlp->vport, - KERN_INFO, LOG_NODE, - "0011 lpfc_offline: " - "ndlp:x%px did %x " - "usgmap:x%x rpi:%x\n", - ndlp, ndlp->nlp_DID, - ndlp->nlp_usg_map, - ndlp->nlp_rpi); - + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0011 Free RPI x%x on " + "ndlp:x%px did x%x " + "usgmap:x%x\n", + ndlp->nlp_rpi, ndlp, + ndlp->nlp_DID, + ndlp->nlp_usg_map); lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } lpfc_unreg_rpi(vports[i], ndlp); } @@ -3545,6 +3828,7 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + __lpfc_cpuhp_remove(phba); if (phba->cfg_xri_rebalancing) lpfc_destroy_multixri_pools(phba); @@ -3683,7 +3967,8 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); if (sglq_entry == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "2562 Failure to allocate an " "ELS sgl entry:%d\n", i); rc = -ENOMEM; @@ -3694,7 +3979,8 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) &sglq_entry->phys); if (sglq_entry->virt == NULL) { kfree(sglq_entry); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "2563 Failure to allocate an " "ELS mbuf:%d\n", i); rc = -ENOMEM; @@ -3749,7 +4035,8 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) &phba->sli4_hba.lpfc_els_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "2400 Failed to allocate xri for " "ELS sgl\n"); rc = -ENOMEM; @@ -3804,7 +4091,8 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); if (sglq_entry == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6303 Failure to allocate an " "NVMET sgl entry:%d\n", i); rc = -ENOMEM; @@ -3815,7 +4103,8 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) &sglq_entry->phys); if (sglq_entry->virt == NULL) { kfree(sglq_entry); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6304 Failure to allocate an " "NVMET buf:%d\n", i); rc = -ENOMEM; @@ -3871,7 +4160,8 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6307 Failed to allocate xri for " "NVMET sgl\n"); rc = -ENOMEM; @@ -4045,7 +4335,8 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) &io_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6075 Failed to allocate xri for " "nvme buffer\n"); rc = -ENOMEM; @@ -4115,7 +4406,8 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && (((unsigned long)(lpfc_ncmd->data) & (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "3369 Memory alignment err: " "addr=%lx\n", (unsigned long)lpfc_ncmd->data); @@ -4144,7 +4436,7 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6121 Failed to allocate IOTAG for" " XRI:0x%x\n", lxri); lpfc_sli4_free_xri(phba, lxri); @@ -4195,7 +4487,7 @@ lpfc_get_wwpn(struct lpfc_hba *phba) lpfc_read_nv(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6019 Mailbox failed , mbxCmd x%x " "READ_NV, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), @@ -4210,7 +4502,9 @@ lpfc_get_wwpn(struct lpfc_hba *phba) if (phba->sli_rev == LPFC_SLI_REV4) return be64_to_cpu(wwn); else - return rol64(wwn, 32); + return (((wwn & 0xffffffff00000000) >> 32) | + ((wwn & 0x00000000ffffffff) << 32)); + } /** @@ -4234,6 +4528,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) { struct lpfc_vport *vport; struct Scsi_Host *shost = NULL; + struct scsi_host_template *template; int error = 0; int i; uint64_t wwn; @@ -4254,7 +4549,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { if (wwn == lpfc_no_hba_reset[i]) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6020 Setting use_no_reset port=%llx\n", wwn); use_no_reset_hba = true; @@ -4262,22 +4558,50 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) } } - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { - if (dev != &phba->pcidev->dev) { - shost = scsi_host_alloc(&lpfc_vport_template, - sizeof(struct lpfc_vport)); + /* Seed template for SCSI host registration */ + if (dev == &phba->pcidev->dev) { + template = &phba->port_template; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + /* Seed physical port template */ + memcpy(template, &lpfc_template, sizeof(*template)); + + if (use_no_reset_hba) { + /* template is for a no reset SCSI Host */ + template->max_sectors = 0xffff; + template->eh_host_reset_handler = NULL; + } + + /* Template for all vports this physical port creates */ + memcpy(&phba->vport_template, &lpfc_template, + sizeof(*template)); + phba->vport_template.max_sectors = 0xffff; + phba->vport_template.shost_attrs = lpfc_vport_attrs; + phba->vport_template.eh_bus_reset_handler = NULL; + phba->vport_template.eh_host_reset_handler = NULL; + phba->vport_template.vendor_id = 0; + + /* Initialize the host templates with updated value */ + if (phba->sli_rev == LPFC_SLI_REV4) { + template->sg_tablesize = phba->cfg_scsi_seg_cnt; + phba->vport_template.sg_tablesize = + phba->cfg_scsi_seg_cnt; + } else { + template->sg_tablesize = phba->cfg_sg_seg_cnt; + phba->vport_template.sg_tablesize = + phba->cfg_sg_seg_cnt; + } + } else { - if (!use_no_reset_hba) - shost = scsi_host_alloc(&lpfc_template, - sizeof(struct lpfc_vport)); - else - shost = scsi_host_alloc(&lpfc_template_no_hr, - sizeof(struct lpfc_vport)); + /* NVMET is for physical port only */ + memcpy(template, &lpfc_template_nvme, + sizeof(*template)); } - } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - shost = scsi_host_alloc(&lpfc_template_nvme, - sizeof(struct lpfc_vport)); + } else { + template = &phba->vport_template; } + + shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); if (!shost) goto out; @@ -4298,6 +4622,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { +#if (KERNEL_MAJOR < 5) +#ifndef BUILD_RHEL8 + if (!(shost_use_blk_mq(shost) && phba->cfg_enable_scsi_mq)) + phba->cfg_fcp_mq_threshold = LPFC_FCP_SQ_THRESHOLD; +#endif +#endif if (!phba->cfg_fcp_mq_threshold || phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; @@ -4312,11 +4642,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; else shost->sg_tablesize = phba->cfg_scsi_seg_cnt; - } else - /* SLI-3 has a limited number of hardware queues (3), - * thus there is only one for FCP processing. - */ + } else { + /* Always hardcode 1 nr_hw_queue for SLI3 */ shost->nr_hw_queues = 1; + } /* * Set initial can_queue value since 0 is no longer supported and @@ -4332,24 +4661,47 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport->port_type = LPFC_PHYSICAL_PORT; } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9081 CreatePort TMPLATE type %x TBLsize %d " + "SEGcnt %d/%d\n", + vport->port_type, shost->sg_tablesize, + phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); + /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + setup_timer(&vport->fc_disctmo, lpfc_disc_timeout, + (unsigned long)vport); + + setup_timer(&vport->els_tmofunc, lpfc_els_timeout, + (unsigned long)vport); + + setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, + (unsigned long)vport); + +#else timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); +#endif - if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) + /* Setup T10-DIF interface with SCSI Layer API */ + if (phba->cfg_enable_bg) lpfc_setup_bg(phba, shost); error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; + /* Initalize objects used to discover External DIF devices */ + INIT_LIST_HEAD(&vport->external_dif_list); + spin_lock_init(&vport->external_dif_lock); + spin_lock_irq(&phba->port_list_lock); list_add_tail(&vport->listentry, &phba->port_list); spin_unlock_irq(&phba->port_list_lock); @@ -4399,10 +4751,11 @@ destroy_port(struct lpfc_vport *vport) int lpfc_get_instance(void) { - int ret; - - ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); - return ret < 0 ? -1 : ret; + int instance = 0; + instance = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL ); + if (instance < 0) + return -1; + return instance; } /** @@ -4470,6 +4823,13 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) struct lpfc_hba *phba = vport->phba; fc_host_supported_speeds(shost) = 0; + /* + * Avoid reporting supported link speed for FCoE as it can't be + * controlled via FCoE. + */ + if (phba->hba_flag & HBA_FCOE_MODE) + return; + if (phba->lmt & LMT_128Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; if (phba->lmt & LMT_64Gb) @@ -4523,8 +4883,6 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; - fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; - /* This value is also unchanging */ memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); @@ -4626,9 +4984,17 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) * worker thread context. **/ static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) +#else lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_hba *phba = (struct lpfc_hba *)ptr; +#else struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); +#endif /* Don't send FCF rediscovery event if timer cancelled */ spin_lock_irq(&phba->hbalock); @@ -4665,7 +5031,7 @@ lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, case LPFC_ASYNC_LINK_FAULT_LR_LRR: break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0398 Unknown link fault code: x%x\n", bf_get(lpfc_acqe_link_fault, acqe_link)); break; @@ -4701,7 +5067,7 @@ lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, att_type = LPFC_ATT_LINK_UP; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0399 Invalid link attention type: x%x\n", bf_get(lpfc_acqe_link_status, acqe_link)); att_type = LPFC_ATT_RESERVED; @@ -4803,6 +5169,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, case LPFC_ASYNC_LINK_SPEED_40GBPS: port_speed = 40000; break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + port_speed = 100000; + break; default: port_speed = 0; } @@ -4873,19 +5242,19 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, phba->fcoe_eventtag = acqe_link->event_tag; pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0395 The mboxq allocation failed\n"); return; } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0396 The lpfc_dmabuf allocation failed\n"); goto out_free_pmb; } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0397 The mbuf allocation failed\n"); goto out_free_dmabuf; } @@ -5033,6 +5402,659 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) return port_speed; } +void +lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) +{ + struct rxtable_entry *entry; + int cnt = 0, head, tail, last, start; + + head = atomic_read(&phba->rxtable_idx_head); + tail = atomic_read(&phba->rxtable_idx_tail); + if (head == tail) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "4411 Rxtable is empty\n"); + return; + } + last = tail; + start = head; + + /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */ + while (start != last) { + if (start) + start--; + else + start = LPFC_MAX_RXMONITOR_ENTRY - 1; + entry = &phba->rxtable[start]; + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld " + "Lat %lld ASz %lld Info %02d BWUtil %d " + "Int %d slot %d\n", + cnt, entry->max_bytes_per_interval, + entry->total_bytes, entry->rcv_bytes, + entry->avg_io_latency, entry->avg_io_size, + entry->cmf_info, entry->timer_utilization, + entry->timer_interval, start); + cnt++; + if (cnt >= LPFC_MAX_RXMONITOR_DUMP) + return; + } +} + +/** + * lpfc_cgn_update_stat - Save data into congestion stats buffer + * @phba: pointer to lpfc hba data structure. + * @dtag: FPIN descriptor received + * + * Increment the FPIN received counter/time when it happens. + */ +void +lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) +{ + struct lpfc_cgn_info *cp; + struct tm broken; + struct timespec64 cur_time; + u32 cnt; + u16 value; + + /* Make sure we have a congestion info buffer */ + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &broken); + + /* Update congestion statistics */ + switch (dtag) { + case ELS_DTAG_LNK_INTEGRITY: + cnt = le32_to_cpu(cp->link_integ_notification); + cnt++; + cp->link_integ_notification = cpu_to_le32(cnt); + + cp->cgn_stat_lnk_month = broken.tm_mon + 1; + cp->cgn_stat_lnk_day = broken.tm_mday; + cp->cgn_stat_lnk_year = broken.tm_year - 100; + cp->cgn_stat_lnk_hour = broken.tm_hour; + cp->cgn_stat_lnk_min = broken.tm_min; + cp->cgn_stat_lnk_sec = broken.tm_sec; + break; + case ELS_DTAG_DELIVERY: + cnt = le32_to_cpu(cp->delivery_notification); + cnt++; + cp->delivery_notification = cpu_to_le32(cnt); + + cp->cgn_stat_del_month = broken.tm_mon + 1; + cp->cgn_stat_del_day = broken.tm_mday; + cp->cgn_stat_del_year = broken.tm_year - 100; + cp->cgn_stat_del_hour = broken.tm_hour; + cp->cgn_stat_del_min = broken.tm_min; + cp->cgn_stat_del_sec = broken.tm_sec; + break; + case ELS_DTAG_PEER_CONGEST: + cnt = le32_to_cpu(cp->cgn_peer_notification); + cnt++; + cp->cgn_peer_notification = cpu_to_le32(cnt); + + cp->cgn_stat_peer_month = broken.tm_mon + 1; + cp->cgn_stat_peer_day = broken.tm_mday; + cp->cgn_stat_peer_year = broken.tm_year - 100; + cp->cgn_stat_peer_hour = broken.tm_hour; + cp->cgn_stat_peer_min = broken.tm_min; + cp->cgn_stat_peer_sec = broken.tm_sec; + break; + case ELS_DTAG_CONGESTION: + cnt = le32_to_cpu(cp->cgn_notification); + cnt++; + cp->cgn_notification = cpu_to_le32(cnt); + + cp->cgn_stat_cgn_month = broken.tm_mon + 1; + cp->cgn_stat_cgn_day = broken.tm_mday; + cp->cgn_stat_cgn_year = broken.tm_year - 100; + cp->cgn_stat_cgn_hour = broken.tm_hour; + cp->cgn_stat_cgn_min = broken.tm_min; + cp->cgn_stat_cgn_sec = broken.tm_sec; + } + if (phba->cgn_fpin_frequency && + phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { + value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; + cp->cgn_stat_npm = cpu_to_le32(value); + } + value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(value); +} + +/** + * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer + * @phba: pointer to lpfc hba data structure. + * + * Save the congestion event data every minute. + * On the hour collapse all the minute data into hour data. Every day + * collapse all the hour data into daily data. Separate driver + * and fabrc congestion event counters that will be saved out + * to the registered congestion buffer every minute. + */ +void +lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + struct tm broken; + struct timespec64 cur_time; + uint32_t i, index; + uint16_t value, mvalue; + uint64_t bps; + uint32_t mbps; + uint32_t dvalue, wvalue, lvalue, avalue; + uint64_t latsum; + uint16_t *ptr; + uint32_t *lptr; + uint16_t *mptr; + + /* Make sure we have a congestion info buffer */ + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + if (time_before(jiffies, phba->cgn_evt_timestamp)) + return; + phba->cgn_evt_timestamp = jiffies + + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); + phba->cgn_evt_minute++; + + /* We should get to this point in the routine on 1 minute intervals */ + + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &broken); + + if (phba->cgn_fpin_frequency && + phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { + value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; + cp->cgn_stat_npm = cpu_to_le32(value); + } + + /* Read and clear the latency counters for this minute */ + lvalue = atomic_read(&phba->cgn_latency_evt_cnt); + latsum = atomic64_read(&phba->cgn_latency_evt); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + + /* We need to store MB/sec bandwidth in the congestion information. + * block_cnt is count of 512 byte blocks for the entire minute, + * bps will get bytes per sec before finally converting to MB/sec. + */ + bps = ((phba->rx_block_cnt / LPFC_SEC_MIN) * 512); + phba->rx_block_cnt = 0; + mvalue = bps / (1024 * 1024); /* convert to MB/sec */ + + /* Every minute */ + /* cgn parameters */ + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_acr = phba->cgn_p.cgn_param_acr; + cp->cgn_info_rxburst = phba->cgn_p.cgn_param_rxburst; + cp->cgn_info_rxbw = phba->cgn_p.cgn_param_rxbw; + + /* Fill in default LUN qdepth */ + value = (uint16_t)(phba->pport->cfg_lun_queue_depth); + cp->cgn_lunq = cpu_to_le16(value); + + /* Record congestion buffer info - every minute + * cgn_driver_evt_cnt (Driver events) + * cgn_fabric_warn_cnt (Congestion Warnings) + * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) + * cgn_fabric_alarm_cnt (Congestion Alarms) + */ + index = ++cp->cgn_index_minute; + if (cp->cgn_index_minute == LPFC_MIN_HOUR) { + cp->cgn_index_minute = 0; + index = 0; + } + + /* Get the number of driver events in this sample and reset counter */ + dvalue = atomic_read(&phba->cgn_driver_evt_cnt); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + + /* Get the number of warning events - FPIN and Signal for this minute */ + wvalue = 0; + if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || + phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN || + phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) + wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + + /* Get the number of alarm events - FPIN and Signal for this minute */ + avalue = 0; + if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || + phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) + avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + + /* Collect the driver, warning, alarm and latency counts for this + * minute into the driver congestion buffer. + */ + ptr = &cp->cgn_drvr_min[index]; + value = (uint16_t)dvalue; + *ptr = cpu_to_le16(value); + + ptr = &cp->cgn_warn_min[index]; + value = (uint16_t)wvalue; + *ptr = cpu_to_le16(value); + + ptr = &cp->cgn_alarm_min[index]; + value = (uint16_t)avalue; + *ptr = cpu_to_le16(value); + + lptr = &cp->cgn_latency_min[index]; + if (lvalue) { + lvalue = (uint32_t)(latsum / lvalue); + *lptr = cpu_to_le32(lvalue); + } else { + *lptr = 0; + } + + /* Collect the bandwidth value into the driver's congesion buffer. */ + mptr = &cp->cgn_bw_min[index]; + *mptr = cpu_to_le16(mvalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", + index, dvalue, wvalue, *lptr, mvalue, avalue); + + /* Every hour */ + if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { + /* Record congestion buffer info - every hour + * Collapse all minutes into an hour + */ + index = ++cp->cgn_index_hour; + if (cp->cgn_index_hour == LPFC_HOUR_DAY) { + cp->cgn_index_hour = 0; + index = 0; + } + + dvalue = 0; + wvalue = 0; + lvalue = 0; + avalue = 0; + mvalue = 0; + mbps = 0; + for (i = 0; i < LPFC_MIN_HOUR; i++) { + dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); + wvalue += le16_to_cpu(cp->cgn_warn_min[i]); + lvalue += le32_to_cpu(cp->cgn_latency_min[i]); + mbps += le16_to_cpu(cp->cgn_bw_min[i]); + avalue += le16_to_cpu(cp->cgn_alarm_min[i]); + } + if (lvalue) /* Avg of latency averages */ + lvalue /= LPFC_MIN_HOUR; + if (mbps) /* Avg of Bandwidth averages */ + mvalue = mbps / LPFC_MIN_HOUR; + + lptr = &cp->cgn_drvr_hr[index]; + *lptr = cpu_to_le32(dvalue); + lptr = &cp->cgn_warn_hr[index]; + *lptr = cpu_to_le32(wvalue); + lptr = &cp->cgn_latency_hr[index]; + *lptr = cpu_to_le32(lvalue); + mptr = &cp->cgn_bw_hr[index]; + *mptr = cpu_to_le16(mvalue); + lptr = &cp->cgn_alarm_hr[index]; + *lptr = cpu_to_le32(avalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2419 Congestion Info - hour " + "(%d): %d %d %d %d %d\n", + index, dvalue, wvalue, lvalue, mvalue, avalue); + } + + /* Every day */ + if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { + /* Record congestion buffer info - every hour + * Collapse all hours into a day. Rotate days + * after LPFC_MAX_CGN_DAYS. + */ + index = ++cp->cgn_index_day; + if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { + cp->cgn_index_day = 0; + index = 0; + } + + /* Anytime we overwrite daily index 0, after we wrap, + * we will be overwriting the oldest day, so we must + * update the congestion data start time for that day. + * That start time should have previously been saved after + * we wrote the last days worth of data. + */ + if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { + time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); + cp->cgn_info_month = broken.tm_mon + 1; + cp->cgn_info_day = broken.tm_mday; + cp->cgn_info_year = broken.tm_year - 100; + cp->cgn_info_hour = broken.tm_hour; + cp->cgn_info_minute = broken.tm_min; + cp->cgn_info_second = broken.tm_sec; + + lpfc_printf_log + (phba, KERN_INFO, LOG_CGN_MGMT, + "2646 CGNInfo idx0 Start Time: " + "%d/%d/%d %d:%d:%d\n", + cp->cgn_info_day, cp->cgn_info_month, + cp->cgn_info_year, cp->cgn_info_hour, + cp->cgn_info_minute, cp->cgn_info_second); + } + + dvalue = 0; + wvalue = 0; + lvalue = 0; + mvalue = 0; + mbps = 0; + avalue = 0; + for (i = 0; i < LPFC_HOUR_DAY; i++) { + dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); + wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); + lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); + mbps += le32_to_cpu(cp->cgn_bw_hr[i]); + avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); + } + if (lvalue) /* Avg of latency averages */ + lvalue /= LPFC_HOUR_DAY; + if (mbps) /* Avg of Bandwidth averages */ + mvalue = mbps / LPFC_HOUR_DAY; + + lptr = &cp->cgn_drvr_day[index]; + *lptr = cpu_to_le32(dvalue); + lptr = &cp->cgn_warn_day[index]; + *lptr = cpu_to_le32(wvalue); + lptr = &cp->cgn_latency_day[index]; + *lptr = cpu_to_le32(lvalue); + mptr = &cp->cgn_bw_day[index]; + *mptr = cpu_to_le16(mvalue); + lptr = &cp->cgn_alarm_day[index]; + *lptr = cpu_to_le32(avalue); + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2420 Congestion Info - daily (%d): " + "%d %d %d %d %d\n", + index, dvalue, wvalue, lvalue, mvalue, avalue); + + /* We just wrote LPFC_MAX_CGN_DAYS of data, + * so we are wrapped on any data after this. + * Save this as the start time for the next day. + */ + if (index == (LPFC_MAX_CGN_DAYS - 1)) { + phba->hba_flag |= HBA_CGN_DAY_WRAP; + ktime_get_real_ts64(&phba->cgn_daily_ts); + } + } + + /* Use the frequency found in the last rcv'ed FPIN */ + value = phba->cgn_fpin_frequency; + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) + cp->cgn_warn_freq = cpu_to_le16(value); + if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) + cp->cgn_alarm_freq = cpu_to_le16(value); + + /* Frequency (in ms) Signal Warning/Signal Congestion Notifications + * are received by the HBA + */ + value = phba->cgn_sig_freq; + + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN || + phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) + cp->cgn_warn_freq = cpu_to_le16(value); + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) + cp->cgn_alarm_freq = cpu_to_le16(value); + + lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(lvalue); +} + +/** + * lpfc_calc_cmf_latency - latency from start of rxate timer interval + * @phba: The Hba for which this call is being executed. + * + * The routine calculates the latency from the beginning of the CMF timer + * inteval to the current point in time. It is called from IO completion + * when we exceed our Bandwidth limitation for the time interval. + */ +uint32_t +lpfc_calc_cmf_latency(struct lpfc_hba *phba) +{ + struct timespec64 cmpl_time; + uint32_t msec = 0; + + ktime_get_real_ts64(&cmpl_time); + + /* This routine works on a ms granularity so sec and usec are + * converted accordingly. + */ + if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { + msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / + NSEC_PER_MSEC; + } else { + if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { + msec = (cmpl_time.tv_sec - + phba->cmf_latency.tv_sec) * MSEC_PER_SEC; + msec += ((cmpl_time.tv_nsec - + phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); + } else { + msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - + 1) * MSEC_PER_SEC; + msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + + cmpl_time.tv_nsec) / NSEC_PER_MSEC); + } + } + return msec; +} + +/** + * lpfc_cmf_timer - This is the timer function for one congestion + * rate interval. + */ +enum hrtimer_restart +lpfc_cmf_timer(struct hrtimer *timer) +{ + struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, + cmf_timer); + struct rxtable_entry *entry; + uint32_t io_cnt; + uint32_t head, tail; + uint32_t busy, max_read; + uint64_t total, rcv, lat, mbpi, extra; + int timer_interval = LPFC_CMF_INTERVAL; + uint32_t ms; + struct lpfc_cgn_stat *cgs; + int cpu; + + /* Only restart the timer if congestion mgmt is on */ + if (phba->cmf_active_mode == LPFC_CFG_OFF || + !phba->cmf_latency.tv_sec) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6224 CMF timer exit: %d %lld\n", + phba->cmf_active_mode, + (uint64_t)phba->cmf_latency.tv_sec); + return HRTIMER_NORESTART; + } + + /* If pport is not ready yet, just exit and wait for + * the next timer cycle to hit. + */ + if (!phba->pport) + goto skip; + + /* Do not block SCSI IO while in the timer routine since + * total_bytes will be cleared + */ + atomic_set(&phba->cmf_stop_io, 1); + + /* First we need to calculate the actual ms between + * the last timer interrupt and this one. We ask for + * LPFC_CMF_INTERVAL, however the actual time may + * vary depending on system overhead. + */ + ms = lpfc_calc_cmf_latency(phba); + + /* Immediately after we calculate the time since the last + * timer interrupt, set the start time for the next + * interrupt + */ + ktime_get_real_ts64(&phba->cmf_latency); + + phba->cmf_link_byte_count = + (phba->cmf_max_line_rate * LPFC_CMF_INTERVAL) / 1000; + + /* Collect all the stats from the prior timer interval */ + total = 0; + io_cnt = 0; + lat = 0; + rcv = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_xchg(&cgs->total_bytes, 0); + io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); + lat += atomic64_xchg(&cgs->rx_latency, 0); + rcv += atomic64_xchg(&cgs->rcv_bytes, 0); + } + + /* Before we issue another CMF_SYNC_WQE, retrieve the BW + * returned from the last CMF_SYNC_WQE issued, from + * cmf_last_sync_bw. This will be the target BW for + * this next timer interval. + */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED && + phba->link_state != LPFC_LINK_DOWN && + phba->hba_flag & HBA_SETUP) { + mbpi = phba->cmf_last_sync_bw; + phba->cmf_last_sync_bw = 0; + extra = 0; + + /* Calculate any extra bytes needed to account for the + * timer accuracy. If we are less than LPFC_CMF_INTERVAL + * add an extra 2% slop factor, equal to LPFC_CMF_INTERVAL + * add an extra 1%. The goal is to equalize total with a + * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1 + */ + if (ms == LPFC_CMF_INTERVAL) + extra = div_u64(total, 100); + else if (ms < LPFC_CMF_INTERVAL) + extra = div_u64(total, 50); + lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); + } else { + /* For Monitor mode or link down we want mbpi + * to be the full link speed + */ + mbpi = phba->cmf_link_byte_count; + } + phba->cmf_timer_cnt++; + + if (io_cnt) { + /* Update congestion info buffer latency in us */ + atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); + atomic64_add(lat, &phba->cgn_latency_evt); + } + busy = atomic_xchg(&phba->cmf_busy, 0); + max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); + + /* Calculate MBPI for the next timer interval */ + if (mbpi) { + if (mbpi > phba->cmf_link_byte_count || + phba->cmf_active_mode == LPFC_CFG_MONITOR) + mbpi = phba->cmf_link_byte_count; + + /* Change max_bytes_per_interval to what the prior + * CMF_SYNC_WQE cmpl indicated. + */ + if (mbpi != phba->cmf_max_bytes_per_interval) + phba->cmf_max_bytes_per_interval = mbpi; + } + + /* Save rxmonitor information for debug */ + if (phba->rxtable) { + head = atomic_xchg(&phba->rxtable_idx_head, + LPFC_RXMONITOR_TABLE_IN_USE); + entry = &phba->rxtable[head]; + entry->total_bytes = total; + entry->rcv_bytes = rcv; + entry->cmf_busy = busy; + entry->cmf_info = phba->cmf_active_info; + if (io_cnt) { + entry->avg_io_latency = lat / io_cnt; + entry->avg_io_size = rcv / io_cnt; + } else { + entry->avg_io_latency = 0; + entry->avg_io_size = 0; + } + entry->max_read_cnt = max_read; + entry->io_cnt = io_cnt; + entry->max_bytes_per_interval = mbpi; + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + entry->timer_utilization = phba->cmf_last_ts; + else + entry->timer_utilization = ms; + entry->timer_interval = ms; + phba->cmf_last_ts = 0; + + /* Increment rxtable index */ + head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; + tail = atomic_read(&phba->rxtable_idx_tail); + if (head == tail) { + tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; + atomic_set(&phba->rxtable_idx_tail, tail); + } + atomic_set(&phba->rxtable_idx_head, head); + } + + if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { + /* If Monitor mode, check if we are oversubscribed + * against the full line rate. + */ + if (mbpi && total > mbpi) + atomic_inc(&phba->cgn_driver_evt_cnt); + } + phba->rx_block_cnt += (rcv / 512); /* save 512 byte block cnt */ + + /* Each minute save Fabric and Driver congestion information */ + lpfc_cgn_save_evt_cnt(phba); + + /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the + * minute, adjust our next timer interval, if needed, to ensure a + * 1 minute granularity when we get the next timer interrupt. + */ + if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), + phba->cgn_evt_timestamp)) { + timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - + jiffies); + if (timer_interval <= 0) + timer_interval = LPFC_CMF_INTERVAL; + + /* If we adjust timer_interval, max_bytes_per_interval + * needs to be adjusted as well. + */ + phba->cmf_link_byte_count = (phba->cmf_max_line_rate * + timer_interval) / 1000; + if (phba->cmf_active_mode == LPFC_CFG_MONITOR) + phba->cmf_max_bytes_per_interval = + phba->cmf_link_byte_count; + } + + /* Since total_bytes has aleady been zero'ed, its okay to unblock + * after max_bytes_per_interval is setup. + */ + if (atomic_xchg(&phba->cmf_bw_wait, 0)) + queue_work(phba->wq, &phba->unblock_request_work); + +#if defined(BUILD_NVME) + if (phba->nvme_io) + queue_work(phba->nvme_io, &phba->nvme_defer_work); +#endif + + /* SCSI IO is now unblocked */ + atomic_set(&phba->cmf_stop_io, 0); + +skip: + hrtimer_forward_now(timer, + ktime_set(0, timer_interval * NSEC_PER_MSEC)); + return HRTIMER_RESTART; +} + #define trunk_link_status(__idx)\ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ @@ -5086,17 +6108,21 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba, phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; } - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2910 Async FC Trunking Event - Speed:%d\n" + phba->fc_eventTag = acqe_fc->event_tag; + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "2910 Async FC Trunking Event %d - Speed:%d\n" "\tLogical speed:%d " "port0: %s port1: %s port2: %s port3: %s\n", - phba->sli4_hba.link_state.speed, + phba->fc_eventTag, phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.logical_speed, trunk_link_status(0), trunk_link_status(1), trunk_link_status(2), trunk_link_status(3)); + if (phba->cmf_active_mode != LPFC_CFG_OFF) + lpfc_cmf_signal_init(phba); + if (port_fault) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "3202 trunk error:0x%x (%s) seen on port0:%s " /* * SLI-4: We have only 0xA error codes @@ -5130,7 +6156,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) if (bf_get(lpfc_trailer_type, acqe_fc) != LPFC_FC_LA_EVENT_TYPE_FC_LINK) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2895 Non FC link Event detected.(%d)\n", bf_get(lpfc_trailer_type, acqe_fc)); return; @@ -5178,19 +6204,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) phba->sli4_hba.link_state.fault); pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2897 The mboxq allocation failed\n"); return; } mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2898 The lpfc_dmabuf allocation failed\n"); goto out_free_pmb; } mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2899 The mbuf allocation failed\n"); goto out_free_dmabuf; } @@ -5276,22 +6302,22 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) uint8_t operational = 0; struct temp_event temp_event_data; struct lpfc_acqe_misconfigured_event *misconfigured; + struct lpfc_acqe_cgn_signal *cgn_signal; struct Scsi_Host *shost; struct lpfc_vport **vports; - int rc, i; + int rc, i, cnt; evt_type = bf_get(lpfc_trailer_type, acqe_sli); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2901 Async SLI event - Event Data1:x%08x Event Data2:" - "x%08x SLI Event Type:%d\n", + "2901 Async SLI event - Type:%d, Event Data: x%08x " + "x%08x x%08x x%08x\n", evt_type, acqe_sli->event_data1, acqe_sli->event_data2, - evt_type); + acqe_sli->reserved, acqe_sli->trailer); port_name = phba->Port[0]; if (port_name == 0x00) port_name = '?'; /* get port name is empty */ - switch (evt_type) { case LPFC_SLI_EVENT_TYPE_OVER_TEMP: temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; @@ -5357,10 +6383,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) &misconfigured->theEvent); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "3296 " - "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " - "event: Invalid link %d", + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3296 LPFC_SLI_EVENT_TYPE_" + "MISCONFIGURED event: " + "Invalid link %d", phba->sli4_hba.lnk_info.lnk_no); return; } @@ -5409,7 +6435,8 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) rc = lpfc_sli4_read_config(phba); if (rc) { phba->lmt = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "3194 Unable to retrieve supported " "speeds, rc = 0x%x\n", rc); } @@ -5433,11 +6460,62 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) "Event Data1:x%08x Event Data2: x%08x\n", acqe_sli->event_data1, acqe_sli->event_data2); break; + case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: + /* Call FW to obtain active parms */ + lpfc_sli4_cgn_parm_chg_evt(phba); + break; + case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: + /* Misconfigured WWN. Reports that the SLI Port is configured + * to use FA-WWN, but the attached device doesn’t support it. + * No driver action is required. + * Event Data1 - N.A, Event Data2 - N.A + */ + lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, + "2699 Misconfigured FA-WWN - Attached device does " + "not support FA-WWN\n"); + break; + case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: + /* EEPROM failure. No driver action is required */ + lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, + "2518 EEPROM failure - " + "Event Data1: x%08x Event Data2: x%08x\n", + acqe_sli->event_data1, acqe_sli->event_data2); + break; + case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: + if (phba->cmf_active_mode == LPFC_CFG_OFF) + break; + cgn_signal = (struct lpfc_acqe_cgn_signal *) + &acqe_sli->event_data1; + phba->cgn_acqe_cnt++; + + cnt = bf_get(lpfc_warn_acqe, cgn_signal); + + /* no threshold for CMF, even 1 signal will trigger an event */ + + /* Alarm overrides warning, so check that first */ + if (cgn_signal->alarm_cnt) { + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) { + /* Keep track of alarm cnt for cgn_info */ + atomic_add(cgn_signal->alarm_cnt, + &phba->cgn_fabric_alarm_cnt); + /* Keep track of alarm cnt for CMF_SYNC_WQE */ + atomic_add(cgn_signal->alarm_cnt, + &phba->cgn_sync_alarm_cnt); + } + } else if (cnt) { + /* signal action needs to be taken */ + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN || + phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) { + /* Keep track of warning cnt for cgn_info */ + atomic_add(cnt, &phba->cgn_fabric_warn_cnt); + /* Keep track of warning cnt for CMF_SYNC_WQE */ + atomic_add(cnt, &phba->cgn_sync_warn_cnt); + } + } + break; default: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3193 Async SLI event - Event Data1:x%08x Event Data2:" - "x%08x SLI Event Type:%d\n", - acqe_sli->event_data1, acqe_sli->event_data2, + "3193 Unrecognized SLI event, type: 0x%x", evt_type); break; } @@ -5546,8 +6624,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, case LPFC_FIP_EVENT_TYPE_NEW_FCF: case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | - LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2546 New FCF event, evt_tag:x%x, " "index:x%x\n", acqe_fip->event_tag, @@ -5600,23 +6677,24 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2547 Issue FCF scan read FCF mailbox " "command failed (x%x)\n", rc); break; case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2548 FCF Table full count 0x%x tag 0x%x\n", - bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), - acqe_fip->event_tag); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2548 FCF Table full count 0x%x tag 0x%x\n", + bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), + acqe_fip->event_tag); break; case LPFC_FIP_EVENT_TYPE_FCF_DEAD: phba->fcoe_cvl_eventtag = acqe_fip->event_tag; - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, - "2549 FCF (x%x) disconnected from network, " - "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2549 FCF (x%x) disconnected from network, " + "tag:x%x\n", acqe_fip->index, + acqe_fip->event_tag); /* * If we are in the middle of FCF failover process, clear * the corresponding FCF bit in the roundrobin bitmap. @@ -5653,8 +6731,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | - LOG_DISCOVERY, - "2772 Issue FCF rediscover mailbox " + LOG_TRACE_EVENT, + "2772 Issue FCF rediscover mabilbox " "command failed, fail through to FCF " "dead event\n"); spin_lock_irq(&phba->hbalock); @@ -5677,7 +6755,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, break; case LPFC_FIP_EVENT_TYPE_CVL: phba->fcoe_cvl_eventtag = acqe_fip->event_tag; - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); @@ -5744,9 +6823,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | - LOG_DISCOVERY, + LOG_TRACE_EVENT, "2774 Issue FCF rediscover " - "mailbox command failed, " + "mabilbox command failed, " "through to CVL event\n"); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; @@ -5765,9 +6844,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, } break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0288 Unknown FCoE event type 0x%x event tag " - "0x%x\n", event_type, acqe_fip->event_tag); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0288 Unknown FCoE event type 0x%x event tag " + "0x%x\n", event_type, acqe_fip->event_tag); break; } } @@ -5784,7 +6863,7 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, struct lpfc_acqe_dcbx *acqe_dcbx) { phba->fc_eventTag = acqe_dcbx->event_tag; - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0290 The SLI4 DCBX asynchronous event is not " "handled yet\n"); } @@ -5815,6 +6894,326 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, phba->sli4_hba.link_state.logical_speed); } +/** + * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event + * @phba: pointer to lpfc hba data structure. + * + * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event + * is an asynchronous notification of a request to reset CM stats. + **/ +static void +lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) +{ + if (!phba->cgn_i) + return; + lpfc_init_congestion_stat(phba); +} + +/** + * lpfc_cgn_params_val - Validate FW congestion parameters. + * @phba: pointer to lpfc hba data structure. + * @p_cfg_param: pointer to FW provided congestion parameters. + * + * This routine validates the 8 congestion parameters passed + * by the FW to the driver via an ACQE event. The parameters + * are checked for in-range values. Any value out of range + * is set back to its default. + * + **/ +static void +lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) +{ + spin_lock_irq(&phba->hbalock); + if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, + LPFC_CFG_MONITOR)) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6225 CMF mode param out of range: %d\n", + p_cfg_param->cgn_param_mode); + p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; + } + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_acr, LPFC_CFG_CONSERVE, + LPFC_CFG_SEVERE)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6226 CMF ACR param out of range: %d\n", + p_cfg_param->cgn_param_acr); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_rxburst, + LPFC_CFG_BURST_MIN, LPFC_CFG_BURST_MAX)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6227 CMF rxburst param out of range: %d\n", + p_cfg_param->cgn_param_rxburst); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_rxbw, + LPFC_CFG_BW_MIN, LPFC_CFG_BW_MAX)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6228 CMF rxbw param out of range: %d\n", + p_cfg_param->cgn_param_rxbw); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_acr_max_rxburst, + LPFC_CFG_BURST_MIN, LPFC_CFG_BURST_MAX)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6229 CMF maxACRburst param out of range: %d\n", + p_cfg_param->cgn_param_acr_max_rxburst); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_acr_max_rxbw, + LPFC_CFG_BW_MIN, LPFC_CFG_BW_MAX)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6230 CMF maxACRbw param out of range: %d\n", + p_cfg_param->cgn_param_acr_max_rxbw); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_acr_scale, + LPFC_CFG_LOW_SCALE, LPFC_CFG_HIGH_SCALE)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6231 CMF ACRscale param out of range: %d\n", + p_cfg_param->cgn_param_acr_scale); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_acr_min_rxburst, 1, + p_cfg_param->cgn_param_acr_max_rxburst)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6232 CMF minACRburst param out of range: %d\n", + p_cfg_param->cgn_param_acr_min_rxburst); + + if (!lpfc_rangecheck(p_cfg_param->cgn_param_acr_min_rxbw, + LPFC_CFG_BW_MIN, + p_cfg_param->cgn_param_acr_max_rxbw)) + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6233 CMF minACRbw param out of range: %d\n", + p_cfg_param->cgn_param_acr_min_rxbw); + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_congparm_chg_evt - Process a FW cong parm Change event + * @phba: pointer to lpfc hba data structure. + * @p_cgn_params: pointer to a data buffer with the FW cong params. + * @len: the size of pdata in bytes. + * + * This routine validates the congestion management buffer signature + * from the FW, validates the contents and makes corrections for + * valid, in-range values. If the signature magic is correct and + * after parameter validation, the contents are copied to the driver's + * @phba structure. If the magic is incorrect, an error message is + * logged. + **/ +static void +lpfc_cgn_params_parse(struct lpfc_hba *phba, + struct lpfc_cgn_param *p_cgn_param, uint32_t len) +{ + struct lpfc_cgn_info *cp; + uint32_t crc, oldmode; + + /* Make sure the FW has encoded the correct magic number to + * validate the congestion parameter in FW memory. + */ + if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "4668 FW cgn parm buffer data: " + "magic 0x%x version %d mode %d " + "acr %d rx_burst_sz %d " + "rx_bw_rate_max %d acr_burst_rate_max %d " + "acr_bw_rate %d acr_burst_rate_min %d " + "acr_bw_min %d scale %d activeMode %d\n", + p_cgn_param->cgn_param_magic, + p_cgn_param->cgn_param_version, + p_cgn_param->cgn_param_mode, + p_cgn_param->cgn_param_acr, + p_cgn_param->cgn_param_rxburst, + p_cgn_param->cgn_param_rxbw, + p_cgn_param->cgn_param_acr_max_rxburst, + p_cgn_param->cgn_param_acr_max_rxbw, + p_cgn_param->cgn_param_acr_min_rxburst, + p_cgn_param->cgn_param_acr_min_rxbw, + p_cgn_param->cgn_param_acr_scale, + phba->cmf_active_mode); + + oldmode = phba->cmf_active_mode; + + /* Any parameters out of range are corrected to defaults + * by this routine. No need to fail. + */ + lpfc_cgn_params_val(phba, p_cgn_param); + + /* Parameters are verified, move them into driver storage */ + spin_lock_irq(&phba->hbalock); + memcpy(&phba->cgn_p, p_cgn_param, + sizeof(struct lpfc_cgn_param)); + + /* Update parameters in congestion info buffer now */ + if (phba->cgn_i) { + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_acr = phba->cgn_p.cgn_param_acr; + cp->cgn_info_rxburst = phba->cgn_p.cgn_param_rxburst; + cp->cgn_info_rxbw = phba->cgn_p.cgn_param_rxbw; + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, + LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + } + spin_unlock_irq(&phba->hbalock); + + phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; + + switch (oldmode) { + case LPFC_CFG_OFF: + if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { + /* Turning CMF on */ + lpfc_cmf_start(phba); + + if (phba->link_state >= LPFC_LINK_UP) { + phba->cgn_reg_fpin = + phba->cgn_init_reg_fpin; + phba->cgn_reg_signal = + phba->cgn_init_reg_signal; + lpfc_issue_els_edc(phba->pport, 0); + } + } + break; + case LPFC_CFG_MANAGED: + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + /* Turning CMF off */ + lpfc_cmf_stop(phba); + if (phba->link_state >= LPFC_LINK_UP) + lpfc_issue_els_edc(phba->pport, 0); + break; + case LPFC_CFG_MONITOR: + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4661 Switch from MANAGED to " + "`MONITOR mode\n"); + phba->cmf_max_bytes_per_interval = + phba->cmf_link_byte_count; + + /* Resume blocked IO - unblock on workqueue */ + queue_work(phba->wq, + &phba->unblock_request_work); + break; + } + break; + case LPFC_CFG_MONITOR: + switch (phba->cgn_p.cgn_param_mode) { + case LPFC_CFG_OFF: + /* Turning CMF off */ + lpfc_cmf_stop(phba); + if (phba->link_state >= LPFC_LINK_UP) + lpfc_issue_els_edc(phba->pport, 0); + break; + case LPFC_CFG_MANAGED: + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4662 Switch from MONITOR to " + "MANAGED mode\n"); + lpfc_cmf_signal_init(phba); + break; + } + break; + } + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4669 FW cgn parm buf wrong magic 0x%x " + "version %d\n", p_cgn_param->cgn_param_magic, + p_cgn_param->cgn_param_version); + } +} + +/** + * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. + * @phba: pointer to lpfc hba data structure. + * + * This routine issues a read_object mailbox command to + * get the congestion management parameters from the FW + * parses it and updates the driver maintained values. + * + * Returns + * 0 if the object was empty + * -Eval if an error was encountered + * Count if bytes were read from object + **/ +int +lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) +{ + int ret = 0; + struct lpfc_cgn_param *p_cgn_param = NULL; + u32 *pdata = NULL; + u32 len = 0; + + /* Find out if the FW has a new set of congestion parameters. */ + len = sizeof(struct lpfc_cgn_param); + pdata = kzalloc(len, GFP_KERNEL); + ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, + pdata, len); + + /* 0 means no data. A negative means error. A positive means + * bytes were copied. + */ + if (!ret) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4670 CGN RD OBJ returns no data\n"); + goto rd_obj_err; + } else if (ret < 0) { + /* Some error. Just exit and return it to the caller.*/ + goto rd_obj_err; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "6234 READ CGN PARAMS Successful %d\n", len); + + /* Parse data pointer over len and update the phba congestion + * parameters with values passed back. The receive rate values + * may have been altered in FW, but take no action here. + */ + p_cgn_param = (struct lpfc_cgn_param *)pdata; + lpfc_cgn_params_parse(phba, p_cgn_param, len); + + rd_obj_err: + kfree(pdata); + return ret; +} + +/** + * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event + * @phba: pointer to lpfc hba data structure. + * + * The FW generated Async ACQE SLI event calls this routine when + * the event type is an SLI Internal Port Event and the Event Code + * indicates a change to the FW maintained congestion parameters. + * + * This routine executes a Read_Object mailbox call to obtain the + * current congestion parameters maintained in FW and corrects + * the driver's active congestion parameters. + * + * The acqe event is not passed because there is no further data + * required. + * + * Returns nonzero error if event processing encountered an error. + * Zero otherwise for success. + **/ +static int +lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) +{ + int ret = 0; + + if (!phba->sli4_hba.pc_sli4_params.cmf) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4664 Cgn Evt when E2E off. Drop event\n"); + return -EACCES; + } + + /* If the event is claiming an empty object, it's ok. A write + * could have cleared it. Only error is a negative return + * status. + */ + ret = lpfc_sli4_cgn_params_read(phba); + if (ret < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4667 Error reading Cgn Params (%d)\n", + ret); + } else if (!ret) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "4673 CGN Event empty object.\n"); + } + return ret; +} + /** * lpfc_sli4_async_event_proc - Process all the pending asynchronous event * @phba: pointer to lpfc hba data structure. @@ -5825,18 +7224,21 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; + unsigned long iflags; /* First, declare the async event has been handled */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~ASYNC_EVENT; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Now, handle all the async events */ + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { - /* Get the first event from the head of the event queue */ - spin_lock_irq(&phba->hbalock); list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, cq_event, struct lpfc_cq_event, list); - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, + iflags); + /* Process the asynchronous event */ switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { case LPFC_TRAILER_CODE_LINK: @@ -5860,16 +7262,23 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) case LPFC_TRAILER_CODE_SLI: lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); break; + case LPFC_TRAILER_CODE_CMSTAT: + lpfc_sli4_async_cmstat_evt(phba); + break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "1804 Invalid asynchrous event code: " "x%x\n", bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)); break; } + /* Free the completion event processed to the free pool */ lpfc_sli4_cq_event_release(phba, cq_event); + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); } + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); } /** @@ -5897,7 +7306,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) "2777 Start post-quiescent FCF table scan\n"); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2747 Issue FCF scan read FCF mailbox " "command failed 0x%x\n", rc); } @@ -5968,7 +7377,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) "0480 Enabled MSI-X interrupt mode.\n"); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0482 Illegal interrupt mode.\n"); break; } @@ -5990,17 +7399,20 @@ static int lpfc_enable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; + int bars = 0; /* Obtain PCI device reference */ if (!phba->pcidev) goto out_error; else pdev = phba->pcidev; + /* Select PCI BARs */ + bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Enable PCI device */ if (pci_enable_device_mem(pdev)) goto out_error; /* Request PCI resource for the device */ - if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) + if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) goto out_disable_device; /* Set up device as PCI master and save state for EEH */ pci_set_master(pdev); @@ -6008,7 +7420,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba) pci_save_state(pdev); /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ - if (pci_is_pcie(pdev)) + if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) pdev->needs_freset = 1; return 0; @@ -6016,8 +7428,8 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba) out_disable_device: pci_disable_device(pdev); out_error: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1401 Failed to enable pci device\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "1401 Failed to enable pci device, bars:x%x\n", bars); return -ENODEV; } @@ -6032,15 +7444,20 @@ static void lpfc_disable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; + int bars; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; + /* Select PCI BARs */ + bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Release PCI resource and disable PCI device */ - pci_release_mem_regions(pdev); + pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); + /* Null out PCI private reference to driver */ + pci_set_drvdata(pdev, NULL); return; } @@ -6062,10 +7479,14 @@ lpfc_reset_hba(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return; } - if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + + /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { lpfc_offline_prep(phba, LPFC_MBX_WAIT); - else + } else { lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); + lpfc_sli_flush_io_rings(phba); + } lpfc_offline(phba); lpfc_sli_brdrestart(phba); lpfc_online(phba); @@ -6117,7 +7538,7 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); if (nr_vfn > max_nr_vfn) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3057 Requested vfs (%d) greater than " "supported vfs (%d)", nr_vfn, max_nr_vfn); return -EINVAL; @@ -6136,6 +7557,15 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) return rc; } +static void +lpfc_unblock_requests_work(struct work_struct *work) +{ + struct lpfc_hba *phba = container_of(work, struct lpfc_hba, + unblock_request_work); + + lpfc_unblock_requests(phba); +} + /** * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. * @phba: pointer to lpfc hba data structure. @@ -6156,6 +7586,9 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) * Driver resources common to all SLI revisions */ atomic_set(&phba->fast_event_count, 0); + atomic_set(&phba->dbg_log_idx, 0); + atomic_set(&phba->dbg_log_cnt, 0); + atomic_set(&phba->dbg_log_dmping, 0); spin_lock_init(&phba->hbalock); /* Initialize ndlp management spinlock */ @@ -6198,6 +7631,18 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) spin_lock_init(&phba->devicelock); INIT_LIST_HEAD(&phba->luns); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + /* MBOX heartbeat timer */ + setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba); + /* Fabric block timer */ + setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout, + (unsigned long)phba); + /* EA polling mode timer */ + setup_timer(&phba->eratt_poll, lpfc_poll_eratt, + (unsigned long)phba); + /* Heartbeat timer */ + setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba); +#else /* MBOX heartbeat timer */ timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); /* Fabric block timer */ @@ -6206,9 +7651,11 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); /* Heartbeat timer */ timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); - +#endif INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); - + INIT_DELAYED_WORK(&phba->idle_stat_delay_work, + lpfc_idle_stat_delay_work); + INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); return 0; } @@ -6233,7 +7680,12 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) */ /* FCP polling mode timer */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout, + (unsigned long)phba); +#else timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); +#endif /* Host attention work mask setup */ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); @@ -6255,9 +7707,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) } if (!phba->sli.sli3_ring) - phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, - sizeof(struct lpfc_sli_ring), - GFP_KERNEL); + phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING * + sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (!phba->sli.sli3_ring) return -ENOMEM; @@ -6266,18 +7717,16 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) * used to create the sg_dma_buf_pool must be dynamically calculated. */ - /* Initialize the host templates the configured values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; - if (phba->sli_rev == LPFC_SLI_REV4) entry_sz = sizeof(struct sli4_sge); else entry_sz = sizeof(struct ulp_bde64); + atomic_set(&phba->rxtable_idx_head, 0); + atomic_set(&phba->rxtable_idx_tail, 0); + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ - if (phba->cfg_enable_bg) { + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { /* * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, * the FCP rsp, and a BDE for each. Sice we have no control @@ -6311,7 +7760,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) } lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, - "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); @@ -6405,18 +7854,20 @@ static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; - MAILBOX_t *mb; int rc, i, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; int longs; int extra; +#ifdef BUILD_NVME + MAILBOX_t *mb; uint64_t wwn; +#endif u32 if_type; u32 if_fam; phba->sli4_hba.num_present_cpu = lpfc_present_cpu; - phba->sli4_hba.num_possible_cpu = num_possible_cpus(); + phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; phba->sli4_hba.curr_disp_cpu = 0; /* Get all the module params for configuring this host */ @@ -6436,15 +7887,32 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* The lpfc_wq workqueue for deferred irq use */ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); +#if defined(BUILD_NVME) + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + phba->nvme_io = alloc_workqueue("lpfc_nvme_io", + WQ_MEM_RECLAIM, 0); +#endif /* * Initialize timers used by driver */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba); + /* FCF rediscover timer */ + setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, + (unsigned long)phba); + +#else timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); /* FCF rediscover timer */ timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); +#endif + + /* CMF congestion timer */ + hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + phba->cmf_timer.function = lpfc_cmf_timer; /* * Control structure for handling external multi-buffer mailbox @@ -6495,6 +7963,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* This abort list used by worker thread */ spin_lock_init(&phba->sli4_hba.sgl_list_lock); spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); + spin_lock_init(&phba->sli4_hba.asynce_list_lock); + spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); /* * Initialize driver internal slow-path work queues @@ -6506,8 +7976,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); /* Asynchronous event CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); - /* Fast-path XRI aborted CQ Event work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); /* Slow-path XRI aborted CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Receive queue CQ Event work queue list */ @@ -6519,6 +7987,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); + /* Initialise active auth configuration list */ + INIT_LIST_HEAD(&phba->lpfc_auth_active_cfg_list); + /* Initialize mboxq lists. If the early init routines fail * these lists need to be correctly initialized. */ @@ -6578,6 +8049,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } /* Check for NVMET being configured */ +#if defined(BUILD_NVME) phba->nvmet_support = 0; if (lpfc_enable_nvmet_cnt) { @@ -6585,7 +8057,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) lpfc_read_nv(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6016 Mailbox failed , mbxCmd x%x " "READ_NV, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &mboxq->u.mqe), @@ -6604,7 +8077,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* wwn is WWPN of HBA instance */ wwn = cpu_to_be64(wwn); phba->sli4_hba.wwpn.u.name = wwn; - /* Check to see if it matches any module parameter */ for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { if (wwn == lpfc_enable_nvmet[i]) { @@ -6614,21 +8086,31 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->nvmet_support = 1; /* a match */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6017 NVME Target %016llx\n", wwn); #else - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "6021 Can't enable NVME Target." " NVME_TARGET_FC infrastructure" " is not in kernel\n"); #endif /* Not supported for NVMET */ phba->cfg_xri_rebalancing = 0; + if (phba->irq_chann_mode == NHT_MODE) { + phba->cfg_irq_chann = + phba->sli4_hba.num_present_cpu; + phba->cfg_hdw_queue = + phba->sli4_hba.num_present_cpu; + phba->irq_chann_mode = NORMAL_MODE; + } break; } } } +#endif lpfc_nvme_mod_param_dep(phba); @@ -6671,20 +8153,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) &phba->sli4_hba.sli_intf); if (phba->sli4_hba.extents_in_use && phba->sli4_hba.rpi_hdrs_in_use) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2999 Unsupported SLI4 Parameters " - "Extents and RPI headers enabled.\n"); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2999 Unsupported SLI4 Parameters " + "Extents and RPI headers enabled.\n"); if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && if_fam == LPFC_SLI_INTF_FAMILY_BE2) { mempool_free(mboxq, phba->mbox_mem_pool); - rc = -EIO; goto out_free_bsmbx; } } if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { mempool_free(mboxq, phba->mbox_mem_pool); - rc = -EIO; goto out_free_bsmbx; } } @@ -6726,16 +8206,30 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; - /* - * If supporting DIF, reduce the seg count for scsi to - * allow room for the DIF sges. - */ - if (phba->cfg_enable_bg && - phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) - phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; - else - phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; - + if (phba->cfg_enable_bg) { + /* + * Advertise to SCSI Layer less s/g support then + * we configure due to DIF setup overhead. + */ + if (phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) + phba->cfg_scsi_seg_cnt = + LPFC_MAX_BG_SLI4_SEG_CNT_DIF; + else + phba->cfg_scsi_seg_cnt = + phba->cfg_sg_seg_cnt; + } else { + /* + * If just External DIF is enabled, there is + * no protection data to deal with, so we have less + * SGE overhead in the SGL. + */ + if (phba->cfg_sg_seg_cnt > LPFC_MAX_ED_SLI4_SEG_CNT_DIF) + phba->cfg_scsi_seg_cnt = + LPFC_MAX_ED_SLI4_SEG_CNT_DIF; + else + phba->cfg_scsi_seg_cnt = + phba->cfg_sg_seg_cnt; + } } else { /* * The scsi_buf for a regular I/O holds the FCP cmnd, @@ -6768,6 +8262,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->border_sge_num = phba->cfg_sg_dma_buf_size / sizeof(struct sli4_sge); +#if defined(BUILD_NVME) /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { @@ -6779,11 +8274,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } else phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; } - - /* Initialize the host templates with the updated values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; - lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; +#endif lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, "9087 sg_seg_cnt:%d dmabuf_size:%d " @@ -6838,23 +8329,23 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Allocate and initialize active sgl array */ rc = lpfc_init_active_sgl_array(phba); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1430 Failed to initialize sgl list.\n"); goto out_destroy_cq_event_pool; } rc = lpfc_sli4_init_rpi_hdrs(phba); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1432 Failed to initialize rpi headers.\n"); goto out_free_active_sgl; } /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; - phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), + phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->fcf.fcf_rr_bmask) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2759 Failed allocate memory for FCF round " "robin failover bmask\n"); rc = -ENOMEM; @@ -6865,7 +8356,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_hba_eq_hdl), GFP_KERNEL); if (!phba->sli4_hba.hba_eq_hdl) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2572 Failed allocate memory for " "fast-path per-EQ handle array\n"); rc = -ENOMEM; @@ -6876,7 +8367,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_vector_map_info), GFP_KERNEL); if (!phba->sli4_hba.cpu_map) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3327 Failed allocate memory for msi-x " "interrupt vector mapping\n"); rc = -ENOMEM; @@ -6885,11 +8376,39 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); if (!phba->sli4_hba.eq_info) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3321 Failed allocation for per_cpu stats\n"); rc = -ENOMEM; goto out_free_hba_cpu_map; } + + phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, + sizeof(*phba->sli4_hba.idle_stat), + GFP_KERNEL); + if (!phba->sli4_hba.idle_stat) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3390 Failed allocation for idle_stat\n"); + rc = -ENOMEM; + goto out_free_hba_eq_info; + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); + if (!phba->sli4_hba.c_stat) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3332 Failed allocating per cpu hdwq stats\n"); + rc = -ENOMEM; + goto out_free_hba_idle_stat; + } +#endif + + phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); + if (!phba->cmf_stat) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3331 Failed allocating per cpu cgn stats\n"); + rc = -ENOMEM; + goto out_free_hba_hdwq_info; + } /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -6909,6 +8428,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_hba_hdwq_info: + free_percpu(phba->sli4_hba.c_stat); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +out_free_hba_idle_stat: + kfree(phba->sli4_hba.idle_stat); +#endif +out_free_hba_eq_info: + free_percpu(phba->sli4_hba.eq_info); out_free_hba_cpu_map: kfree(phba->sli4_hba.cpu_map); out_free_hba_eq_hdl: @@ -6947,12 +8474,18 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; free_percpu(phba->sli4_hba.eq_info); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + free_percpu(phba->sli4_hba.c_stat); +#endif + free_percpu(phba->cmf_stat); + kfree(phba->sli4_hba.idle_stat); /* Free memory allocated for msi-x interrupt vector to CPU mapping */ kfree(phba->sli4_hba.cpu_map); phba->sli4_hba.num_possible_cpu = 0; phba->sli4_hba.num_present_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0; + cpumask_clear(&phba->sli4_hba.irq_aff_mask); /* Free memory allocated for fast-path work queue handles */ kfree(phba->sli4_hba.hba_eq_hdl); @@ -6988,7 +8521,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) list_del_init(&conn_entry->list); kfree(conn_entry); } - return; } @@ -7020,7 +8552,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) phba->lpfc_stop_port = lpfc_stop_port_s4; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1431 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; @@ -7052,7 +8584,6 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) error = PTR_ERR(phba->worker_thread); return error; } - return 0; } @@ -7073,6 +8604,13 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) phba->wq = NULL; } +#if defined(BUILD_NVME) + if (phba->nvme_io) { + flush_workqueue(phba->nvme_io); + destroy_workqueue(phba->nvme_io); + phba->nvme_io = NULL; + } +#endif /* Stop kernel worker thread */ if (phba->worker_thread) kthread_stop(phba->worker_thread); @@ -7126,7 +8664,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) if (iocbq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d iocbs of " "expected %d count. Unloading driver.\n", - __func__, i, LPFC_IOCB_LIST_CNT); + __func__, i, iocb_count); goto out_free_iocbq; } @@ -7315,7 +8853,7 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0391 Error during rpi post operation\n"); lpfc_sli4_remove_rpis(phba); rc = -ENODEV; @@ -7543,21 +9081,15 @@ lpfc_create_shost(struct lpfc_hba *phba) shost = lpfc_shost_from_vport(vport); phba->pport = vport; +#if defined(BUILD_NVME) if (phba->nvmet_support) { /* Only 1 vport (pport) will support NVME target */ - if (phba->txrdy_payload_pool == NULL) { - phba->txrdy_payload_pool = dma_pool_create( - "txrdy_pool", &phba->pcidev->dev, - TXRDY_PAYLOAD_LEN, 16, 0); - if (phba->txrdy_payload_pool) { - phba->targetport = NULL; - phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; - lpfc_printf_log(phba, KERN_INFO, - LOG_INIT | LOG_NVME_DISC, - "6076 NVME Target Found\n"); - } - } + phba->targetport = NULL; + phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, + "6076 NVME Target Found\n"); } +#endif lpfc_debugfs_initialize(vport); /* Put reference to SCSI host to driver's device private data */ @@ -7635,7 +9167,7 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) if (phba->cfg_prot_mask && phba->cfg_prot_guard) { if ((old_mask != phba->cfg_prot_mask) || (old_guard != phba->cfg_prot_guard)) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1475 Registering BlockGuard with the " "SCSI layer: mask %d guard %d\n", phba->cfg_prot_mask, @@ -7644,7 +9176,7 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) scsi_host_set_prot(shost, phba->cfg_prot_mask); scsi_host_set_guard(shost, phba->cfg_prot_guard); } else - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1479 Not Registering BlockGuard with the SCSI " "layer, Bad protection parameters: %d %d\n", old_mask, old_guard); @@ -7739,7 +9271,7 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) if (!phba->slim_memmap_p) { dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLIM memory.\n"); - goto out; + return error; } /* Map HBA Control Registers to a kernel virtual address. */ @@ -7798,7 +9330,7 @@ out_iounmap: iounmap(phba->ctrl_regs_memmap_p); out_iounmap_slim: iounmap(phba->slim_memmap_p); -out: + return error; } @@ -7875,7 +9407,7 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) * other register reads as the data may not be valid. Just exit. */ if (port_error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1408 Port Failed POST - portsmphr=0x%x, " "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " "scr2=x%x, hscratch=x%x, pstatus=x%x\n", @@ -7924,7 +9456,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "1422 Unrecoverable Error " "Detected during POST " "uerr_lo_reg=0x%x, " @@ -7951,7 +9484,7 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) phba->work_status[1] = readl(phba->sli4_hba.u.if_type2. ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2888 Unrecoverable port error " "following POST: port status reg " "0x%x, port_smphr reg 0x%x, " @@ -8054,7 +9587,7 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) break; case LPFC_SLI_INTF_IF_TYPE_1: default: - dev_printk(KERN_ERR, &phba->pcidev->dev, + dev_err(&phba->pcidev->dev, "FATAL - unsupported SLI4 interface type - %d\n", if_type); break; @@ -8072,8 +9605,7 @@ lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) { switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: - phba->sli4_hba.PSMPHRregaddr = - phba->sli4_hba.ctrl_regs_memmap_p + + phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_SLIPORT_IF0_SMPHR; phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + LPFC_HST_ISR0; @@ -8097,7 +9629,7 @@ lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_1: default: - dev_err(&phba->pcidev->dev, + dev_printk(KERN_ERR, &phba->pcidev->dev, "FATAL - unsupported SLI4 interface type - %d\n", if_type); break; @@ -8235,6 +9767,86 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); } +static const char * const lpfc_topo_to_str[] = { + "Loop then P2P", + "Loopback", + "P2P Only", + "Unsupported", + "Loop Only", + "Unsupported", + "P2P then Loop", +}; + +/** + * lpfc_map_topology - Map the topology read from READ_CONFIG + * @phba: pointer to lpfc hba data structure. + * @rdconf: pointer to read config data + * + * This routine is invoked to map the topology values as read + * from the read config mailbox command. If the persistent + * topology feature is supported, the firmware will provide the + * saved topology information to be used in INIT_LINK + * + **/ +#define LINK_FLAGS_DEF 0x0 +#define LINK_FLAGS_P2P 0x1 +#define LINK_FLAGS_LOOP 0x2 +static void +lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) +{ + u8 ptv, tf, pt; + + ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); + tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); + pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", + ptv, tf, pt); + if (!ptv) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2019 FW does not support persistent topology " + "Using driver parameter defined value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + return; + } + /* FW supports persistent topology - override module parameter value */ + phba->hba_flag |= HBA_PERSISTENT_TOPO; + switch (phba->pcidev->device) { + case PCI_DEVICE_ID_LANCER_G7_FC: + case PCI_DEVICE_ID_LANCER_G6_FC: + if (!tf) { + phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) + ? FLAGS_TOPOLOGY_MODE_LOOP + : FLAGS_TOPOLOGY_MODE_PT_PT); + } else { + phba->hba_flag &= ~HBA_PERSISTENT_TOPO; + } + break; + default: /* G5 */ + if (tf) { + /* If topology failover set - pt is '0' or '1' */ + phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : + FLAGS_TOPOLOGY_MODE_LOOP_PT); + } else { + phba->cfg_topology = ((pt == LINK_FLAGS_P2P) + ? FLAGS_TOPOLOGY_MODE_PT_PT + : FLAGS_TOPOLOGY_MODE_LOOP); + } + break; + } + if (phba->hba_flag & HBA_PERSISTENT_TOPO) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2020 Using persistent topology value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2021 Invalid topology values from FW " + "Using driver parameter defined value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + } +} + /** * lpfc_sli4_read_config - Get the config parameters. * @phba: pointer to lpfc hba data structure. @@ -8265,7 +9877,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2011 Unable to allocate memory for issuing " "SLI_CONFIG_SPECIAL mailbox command\n"); return -ENOMEM; @@ -8275,11 +9887,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2012 Mailbox failed , mbxCmd x%x " - "READ_CONFIG, mbxStatus x%x\n", - bf_get(lpfc_mqe_command, &pmb->u.mqe), - bf_get(lpfc_mqe_status, &pmb->u.mqe)); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2012 Mailbox failed , mbxCmd x%x " + "READ_CONFIG, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &pmb->u.mqe), + bf_get(lpfc_mqe_status, &pmb->u.mqe)); rc = -EIO; } else { rd_config = &pmb->u.mqe.un.rd_config; @@ -8302,6 +9914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.bbscn_params.word0 = rd_config->word8; } + phba->sli4_hba.fawwpn_in_use = + bf_get(lpfc_mbx_rd_conf_fawwpn_inuse, rd_config); phba->sli4_hba.conf_trunk = bf_get(lpfc_mbx_rd_conf_trunk, rd_config); phba->sli4_hba.extents_in_use = @@ -8346,13 +9960,60 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; + + /* Next decide on FPIN or Signal E2E CGN support + * For congestion alarms and warnings valid combination are: + * 1. FPIN alarms / FPIN warnings + * 2. Signal alarms / Signal warnings + * 3. FPIN alarms / Signal warnings + * 4. Signal alarms / FPIN warnings + * + * Initialize the adapter frequency to 100 mSecs + */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; + + if (lpfc_use_cgn_signal) { + if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_WARN; + phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; + } + if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { + /* MUST support both alarm and warning + * because EDC does not support alarm alone. + */ + if (phba->cgn_reg_signal != + LPFC_CGN_SIGNAL_WARN) { + /* Must support both or none */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; + phba->cgn_reg_signal = + LPFC_CGN_SIGNAL_NO_SPT; + } else { + phba->cgn_reg_signal = + LPFC_CGN_SIGNAL_BOTH; + phba->cgn_reg_fpin = + LPFC_CGN_FPIN_NONE; + } + } + } + + /* Set the congestion initial signal and fpin values. */ + phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; + phba->cgn_init_reg_signal = phba->cgn_reg_signal; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", + phba->cgn_reg_signal, phba->cgn_reg_fpin); + + lpfc_map_topology(phba, rd_config); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2003 cfg params Extents? %d " "XRI(B:%d M:%d), " "VPI(B:%d M:%d) " "VFI(B:%d M:%d) " "RPI(B:%d M:%d) " - "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", + "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", phba->sli4_hba.extents_in_use, phba->sli4_hba.max_cfg_param.xri_base, phba->sli4_hba.max_cfg_param.max_xri, @@ -8366,7 +10027,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.max_eq, phba->sli4_hba.max_cfg_param.max_cq, phba->sli4_hba.max_cfg_param.max_wq, - phba->sli4_hba.max_cfg_param.max_rq); + phba->sli4_hba.max_cfg_param.max_rq, + phba->lmt); /* * Calculate queue resources based on how @@ -8388,8 +10050,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) /* Check to see if there is enough for NVME */ if ((phba->cfg_irq_chann > qmin) || (phba->cfg_hdw_queue > qmin)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2005 Reducing Queues: " + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "2005 Reducing Queues - " + "FW resource limitation: " "WQ %d CQ %d EQ %d: min %d: " "IRQ %d HDWQ %d\n", phba->sli4_hba.max_cfg_param.max_wq, @@ -8454,10 +10118,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) LPFC_USER_LINK_SPEED_AUTO; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0047 Unrecognized link " - "speed : %d\n", - forced_link_speed); + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "0047 Unrecognized link speed" + " : %d\n", forced_link_speed); phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; } @@ -8491,7 +10155,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (rc2 || shdr_status || shdr_add_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3026 Mailbox failed , mbxCmd x%x " "GET_FUNCTION_CONFIG, mbxStatus x%x\n", bf_get(lpfc_mqe_command, &pmb->u.mqe), @@ -8528,7 +10192,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) "vf_number:%d\n", phba->sli4_hba.iov.pf_number, phba->sli4_hba.iov.vf_number); else - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3028 GET_FUNCTION_CONFIG: failed to find " "Resource Descriptor:x%x\n", LPFC_RSRC_DESC_TYPE_FCFCOE); @@ -8565,7 +10229,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0492 Unable to allocate memory for " "issuing SLI_CONFIG_SPECIAL mailbox " "command\n"); @@ -8580,7 +10244,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0493 SLI_CONFIG_SPECIAL mailbox " "failed with status x%x\n", rc); @@ -8619,8 +10283,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) */ if (phba->nvmet_support) { - if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_irq_chann; + if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; } @@ -8660,8 +10324,9 @@ lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0499 Failed allocate fast-path IO CQ (%d)\n", idx); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0499 Failed allocate fast-path IO CQ (%d)\n", + idx); return 1; } qdesc->qe_valid = 1; @@ -8683,7 +10348,7 @@ lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0503 Failed allocate fast-path IO WQ (%d)\n", idx); return 1; @@ -8739,7 +10404,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), GFP_KERNEL); if (!phba->sli4_hba.hdwq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6427 Failed allocate memory for " "fast-path Hardware Queue array\n"); goto out_error; @@ -8771,7 +10436,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.nvmet_cqset) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3121 Fail allocate memory for " "fast-path CQ set array\n"); goto out_error; @@ -8781,7 +10446,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.nvmet_mrq_hdr) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3122 Fail allocate memory for " "fast-path RQ set hdr array\n"); goto out_error; @@ -8791,7 +10456,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.nvmet_mrq_data) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3124 Fail allocate memory for " "fast-path RQ set data array\n"); goto out_error; @@ -8819,7 +10484,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0497 Failed allocate EQ (%d)\n", cpup->hdwq); goto out_error; @@ -8862,7 +10527,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (lpfc_alloc_io_wq_cq(phba, idx)) goto out_error; } - if (phba->nvmet_support) { for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { cpu = lpfc_find_cpu_handle(phba, idx, @@ -8873,7 +10537,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3142 Failed allocate NVME " "CQ Set (%d)\n", idx); goto out_error; @@ -8895,7 +10559,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0500 Failed allocate slow-path mailbox CQ\n"); goto out_error; } @@ -8907,7 +10571,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0501 Failed allocate slow-path ELS CQ\n"); goto out_error; } @@ -8926,7 +10590,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.mq_esize, phba->sli4_hba.mq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0505 Failed allocate slow-path MQ\n"); goto out_error; } @@ -8942,7 +10606,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0504 Failed allocate slow-path ELS WQ\n"); goto out_error; } @@ -8956,7 +10620,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6079 Failed allocate NVME LS CQ\n"); goto out_error; } @@ -8969,7 +10633,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6080 Failed allocate NVME LS WQ\n"); goto out_error; } @@ -8987,7 +10651,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0506 Failed allocate receive HRQ\n"); goto out_error; } @@ -8998,7 +10662,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0507 Failed allocate receive DRQ\n"); goto out_error; } @@ -9016,7 +10680,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) LPFC_NVMET_RQE_DEF_COUNT, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3146 Failed allocate " "receive HRQ\n"); goto out_error; @@ -9029,7 +10693,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) GFP_KERNEL, cpu_to_node(cpu)); if (qdesc->rqbp == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6131 Failed allocate " "Header RQBP\n"); goto out_error; @@ -9045,7 +10709,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) LPFC_NVMET_RQE_DEF_COUNT, cpu); if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3156 Failed allocate " "receive DRQ\n"); goto out_error; @@ -9055,6 +10719,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } } +#if (IS_ENABLED(CONFIG_NVME_FC)) /* Clear NVME stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { @@ -9062,6 +10727,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); } } +#endif /* Clear SCSI stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { @@ -9116,6 +10782,7 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba) /* Free the CQ/WQ corresponding to the Hardware Queue */ lpfc_sli4_queue_free(hdwq[idx].io_cq); lpfc_sli4_queue_free(hdwq[idx].io_wq); + hdwq[idx].hba_eq = NULL; hdwq[idx].io_cq = NULL; hdwq[idx].io_wq = NULL; if (phba->cfg_xpsgl && !phba->nvmet_support) @@ -9160,6 +10827,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) } spin_unlock_irq(&phba->hbalock); + lpfc_sli4_cleanup_poll_list(phba); + /* Release HBA eqs */ if (phba->sli4_hba.hdwq) lpfc_sli4_release_hdwq(phba); @@ -9233,7 +10902,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, int rc; if (!eq || !cq || !wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6085 Fast-path %s (%d) not allocated\n", ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); return -ENOMEM; @@ -9243,9 +10912,9 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, rc = lpfc_cq_create(phba, cq, eq, (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6086 Failed setup of CQ (%d), rc = 0x%x\n", - qidx, (uint32_t)rc); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6086 Failed setup of CQ (%d), rc = 0x%x\n", + qidx, (uint32_t)rc); return rc; } @@ -9261,7 +10930,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, /* create the wq */ rc = lpfc_wq_create(phba, wq, cq, qtype); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); /* no need to tear down cq - caller will do so */ @@ -9279,9 +10948,9 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, } else { rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0539 Failed setup of slow-path MQ: " - "rc = 0x%x\n", rc); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0539 Failed setup of slow-path MQ: " + "rc = 0x%x\n", rc); /* no need to tear down cq - caller will do so */ return rc; } @@ -9354,7 +11023,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* Check for dual-ULP support */ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3249 Unable to allocate memory for " "QUERY_FW_CFG mailbox command\n"); return -ENOMEM; @@ -9372,7 +11041,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3250 QUERY_FW_CFG mailbox failed with status " "x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -9403,7 +11072,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* Set up HBA event queue */ if (!qp) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; goto out_error; @@ -9427,7 +11096,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, phba->cfg_fcp_imax); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0523 Failed setup of fast-path" " EQ (%d), rc = 0x%x\n", cpup->eq, (uint32_t)rc); @@ -9459,7 +11128,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) qidx, LPFC_IO); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0535 Failed to setup fastpath " "IO WQ/CQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); @@ -9474,7 +11143,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* Set up slow-path MBOX CQ/MQ */ if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0528 %s not allocated\n", phba->sli4_hba.mbx_cq ? "Mailbox WQ" : "Mailbox CQ"); @@ -9487,14 +11156,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.mbx_wq, NULL, 0, LPFC_MBOX); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } if (phba->nvmet_support) { if (!phba->sli4_hba.nvmet_cqset) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3165 Fast-path NVME CQ Set " "array not allocated\n"); rc = -ENOMEM; @@ -9506,7 +11175,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) qp, LPFC_WCQ, LPFC_NVMET); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3164 Failed setup of NVME CQ " "Set, rc = 0x%x\n", (uint32_t)rc); @@ -9518,7 +11187,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) qp[0].hba_eq, LPFC_WCQ, LPFC_NVMET); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6089 Failed setup NVMET CQ: " "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; @@ -9535,7 +11204,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* Set up slow-path ELS WQ/CQ */ if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0530 ELS %s not allocated\n", phba->sli4_hba.els_cq ? "WQ" : "CQ"); rc = -ENOMEM; @@ -9545,10 +11214,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.els_cq, phba->sli4_hba.els_wq, NULL, 0, LPFC_ELS); + if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", - (uint32_t)rc); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4101 Failed setup of ELS WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -9559,7 +11229,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Set up NVME LS Complete Queue */ if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6091 LS %s not allocated\n", phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); rc = -ENOMEM; @@ -9570,9 +11240,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.nvmels_wq, NULL, 0, LPFC_NVME_LS); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0526 Failed setup of NVVME LS WQ/CQ: " - "rc = 0x%x\n", (uint32_t)rc); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4102 Failed setup of NVVME LS WQ/CQ: " + "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } @@ -9590,7 +11260,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if ((!phba->sli4_hba.nvmet_cqset) || (!phba->sli4_hba.nvmet_mrq_hdr) || (!phba->sli4_hba.nvmet_mrq_data)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6130 MRQ CQ Queues not " "allocated\n"); rc = -ENOMEM; @@ -9603,7 +11273,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.nvmet_cqset, LPFC_NVMET); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6098 Failed setup of NVMET " "MRQ: rc = 0x%x\n", (uint32_t)rc); @@ -9617,7 +11287,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.nvmet_cqset[0], LPFC_NVMET); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6057 Failed setup of NVMET " "Receive Queue: rc = 0x%x\n", (uint32_t)rc); @@ -9636,7 +11306,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) } if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0540 Receive Queue not allocated\n"); rc = -ENOMEM; goto out_destroy; @@ -9645,7 +11315,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, phba->sli4_hba.els_cq, LPFC_USOL); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0541 Failed setup of Receive Queue: " "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; @@ -9670,10 +11340,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (phba->sli4_hba.cq_max) { kfree(phba->sli4_hba.cq_lookup); - phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), + phba->sli4_hba.cq_lookup = kzalloc((phba->sli4_hba.cq_max + 1) * sizeof(struct lpfc_queue *), GFP_KERNEL); if (!phba->sli4_hba.cq_lookup) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0549 Failed setup of CQ Lookup table: " "size 0x%x\n", phba->sli4_hba.cq_max); rc = -ENOMEM; @@ -9920,26 +11590,28 @@ lpfc_sli4_cq_event_release(struct lpfc_hba *phba, static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) { - LIST_HEAD(cqelist); - struct lpfc_cq_event *cqe; + LIST_HEAD(cq_event_list); + struct lpfc_cq_event *cq_event; unsigned long iflags; /* Retrieve all the pending WCQEs from pending WCQE lists */ - spin_lock_irqsave(&phba->hbalock, iflags); - /* Pending FCP XRI abort events */ - list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, - &cqelist); - /* Pending ELS XRI abort events */ - list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, - &cqelist); - /* Pending asynnc events */ - list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, - &cqelist); - spin_unlock_irqrestore(&phba->hbalock, iflags); - while (!list_empty(&cqelist)) { - list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); - lpfc_sli4_cq_event_release(phba, cqe); + /* Pending ELS XRI abort events */ + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + &cq_event_list); + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); + + /* Pending asynnc events */ + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); + list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, + &cq_event_list); + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); + + while (!list_empty(&cq_event_list)) { + list_remove_head(&cq_event_list, cq_event, + struct lpfc_cq_event, list); + lpfc_sli4_cq_event_release(phba, cq_event); } } @@ -9973,7 +11645,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0494 Unable to allocate memory for " "issuing SLI_FUNCTION_RESET mailbox " "command\n"); @@ -9993,7 +11665,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) if (rc != MBX_TIMEOUT) mempool_free(mboxq, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0495 SLI_FUNCTION_RESET mailbox " "failed with status x%x add_status x%x," " mbx status x%x\n", @@ -10009,7 +11681,7 @@ wait: * up to 30 seconds. If the port doesn't respond, treat * it as an error. */ - for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { + for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) { if (lpfc_readl(phba->sli4_hba.u.if_type2. STATUSregaddr, ®_data.word0)) { rc = -ENODEV; @@ -10025,7 +11697,7 @@ wait: phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl( phba->sli4_hba.u.if_type2.ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2890 Port not ready, port status reg " "0x%x error 1=0x%x, error 2=0x%x\n", reg_data.word0, @@ -10067,7 +11739,7 @@ wait: out: /* Catch the not-ready port failure after a port reset. */ if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3317 HBA not functional: IP Reset Failed " "try: echo fw_reset > board_mode\n"); rc = -ENODEV; @@ -10117,7 +11789,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) /* There is no SLI3 failback for SLI4 devices. */ if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_VALID) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2894 SLI_INTF reg contents invalid " "sli_intf reg 0x%x\n", phba->sli4_hba.sli_intf.word0); @@ -10182,7 +11854,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.ctrl_regs_memmap_p) { - dev_err(&pdev->dev, + dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA " "control registers.\n"); error = -ENOMEM; @@ -10208,7 +11880,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { - dev_err(&pdev->dev, + dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA doorbell registers.\n"); error = -ENOMEM; goto out_iounmap_conf; @@ -10230,7 +11902,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { - dev_err(&pdev->dev, + dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA" " doorbell registers.\n"); error = -ENOMEM; @@ -10247,8 +11919,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) } } - if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && - pci_resource_start(pdev, PCI_64BIT_BAR4)) { +#ifndef BUILD_BRCMFCOE + if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && + (pci_resource_start(pdev, PCI_64BIT_BAR4))) { /* * Map SLI4 if type 6 HBA DPP Register base to a kernel * virtual address and setup the registers. @@ -10256,18 +11929,17 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); phba->sli4_hba.dpp_regs_memmap_p = - ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->sli4_hba.dpp_regs_memmap_p) { - dev_err(&pdev->dev, + ioremap_wc(phba->pci_bar2_map, bar2map_len); + if (!phba->sli4_hba.dpp_regs_memmap_p) + /* not a failure, we simply won't use dpp */ + dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 HBA dpp registers.\n"); - error = -ENOMEM; - goto out_iounmap_ctrl; - } phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; } +#endif /* Set up the EQ/CQ register handeling functions now */ - switch (if_type) { + switch(if_type) { case LPFC_SLI_INTF_IF_TYPE_0: case LPFC_SLI_INTF_IF_TYPE_2: phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; @@ -10320,6 +11992,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) case LPFC_SLI_INTF_IF_TYPE_6: iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p); + if (phba->sli4_hba.dpp_regs_memmap_p) + iounmap(phba->sli4_hba.dpp_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_1: default: @@ -10345,23 +12019,34 @@ static int lpfc_sli_enable_msix(struct lpfc_hba *phba) { int rc; + int i; LPFC_MBOXQ_t *pmb; /* Set up MSI-X multi-message vectors */ - rc = pci_alloc_irq_vectors(phba->pcidev, - LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); - if (rc < 0) { + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + phba->msix_entries[i].entry = i; + + /* Configure MSI-X capability structure */ + rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, + LPFC_MSIX_VECTORS); + if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); - goto vec_fail_out; + goto msi_fail_out; } + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0477 MSI-X entry[%d]: vector=x%x " + "message=%d\n", i, + phba->msix_entries[i].vector, + phba->msix_entries[i].entry); /* * Assign MSI-X vectors to interrupt handlers */ /* vector-0 is associated to slow-path handler */ - rc = request_irq(pci_irq_vector(phba->pcidev, 0), + rc = request_irq(phba->msix_entries[0].vector, &lpfc_sli_sp_intr_handler, 0, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { @@ -10372,10 +12057,9 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) } /* vector-1 is associated to fast-path handler */ - rc = request_irq(pci_irq_vector(phba->pcidev, 1), + rc = request_irq(phba->msix_entries[1].vector, &lpfc_sli_fp_intr_handler, 0, LPFC_FP_DRIVER_HANDLER_NAME, phba); - if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0429 MSI-X fast-path request_irq failed " @@ -10390,7 +12074,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) if (!pmb) { rc = -ENOMEM; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0474 Unable to allocate memory for issuing " "MBOX_CONFIG_MSI command\n"); goto mem_fail_out; @@ -10417,17 +12101,16 @@ mbx_fail_out: mem_fail_out: /* free the irq already requested */ - free_irq(pci_irq_vector(phba->pcidev, 1), phba); + free_irq(phba->msix_entries[1].vector, phba); irq_fail_out: /* free the irq already requested */ - free_irq(pci_irq_vector(phba->pcidev, 0), phba); + free_irq(phba->msix_entries[0].vector, phba); msi_fail_out: /* Unconfigure MSI-X capability structure */ - pci_free_irq_vectors(phba->pcidev); + pci_disable_msix(phba->pcidev); -vec_fail_out: return rc; } @@ -10541,16 +12224,18 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) static void lpfc_sli_disable_intr(struct lpfc_hba *phba) { - int nr_irqs, i; + int i; - if (phba->intr_type == MSIX) - nr_irqs = LPFC_MSIX_VECTORS; - else - nr_irqs = 1; - - for (i = 0; i < nr_irqs; i++) - free_irq(pci_irq_vector(phba->pcidev, i), phba); - pci_free_irq_vectors(phba->pcidev); + if (phba->intr_type == MSIX) { + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + free_irq(phba->msix_entries[i].vector, phba); + pci_disable_msix(phba->pcidev); + } else if (phba->intr_type == MSI) { + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); + } else { + free_irq(phba->pcidev->irq, phba); + } /* Reset interrupt management states */ phba->intr_type = NONE; @@ -10581,7 +12266,6 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) */ if ((match == LPFC_FIND_BY_EQ) && (cpup->flag & LPFC_CPU_FIRST_IRQ) && - (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && (cpup->eq == id)) return cpu; @@ -10619,6 +12303,75 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu, } #endif +/* + * lpfc_assign_eq_map_info - Assigns eq for vector_map structure + * @phba: pointer to lpfc hba data structure. + * @eqidx: index for eq and irq vector + * @flag: flags to set for vector_map structure + * @cpu: cpu used to index vector_map structure + * + * The routine assigns eq info into vector_map structure + */ +static inline void +lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, + unsigned int cpu) +{ + struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; + struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); + + cpup->eq = eqidx; + cpup->flag |= flag; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", + cpu, eqhdl->irq, cpup->eq, cpup->flag); +} + +/** + * lpfc_cpu_map_array_init - Initialize cpu_map structure + * @phba: pointer to lpfc hba data structure. + * + * The routine initializes the cpu_map array structure + */ +static void +lpfc_cpu_map_array_init(struct lpfc_hba *phba) +{ + struct lpfc_vector_map_info *cpup; + struct lpfc_eq_intr_info *eqi; + int cpu; + + for_each_possible_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; + cpup->core_id = LPFC_VECTOR_MAP_EMPTY; + cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; + cpup->eq = LPFC_VECTOR_MAP_EMPTY; + cpup->flag = 0; + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); + INIT_LIST_HEAD(&eqi->list); + eqi->icnt = 0; + } +} + +/** + * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure + * @phba: pointer to lpfc hba data structure. + * + * The routine initializes the hba_eq_hdl array structure + */ +static void +lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) +{ + struct lpfc_hba_eq_hdl *eqhdl; + int i; + + for (i = 0; i < phba->cfg_irq_chann; i++) { + eqhdl = lpfc_get_eq_hdl(i); + eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; + eqhdl->phba = phba; + } +} + /** * lpfc_cpu_affinity_check - Check vector CPU affinity mappings * @phba: pointer to lpfc hba data structure. @@ -10637,21 +12390,12 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) int max_core_id, min_core_id; struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *new_cpup; - const struct cpumask *maskp; #ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo; #endif - - /* Init cpu_map array */ - for_each_possible_cpu(cpu) { - cpup = &phba->sli4_hba.cpu_map[cpu]; - cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; - cpup->core_id = LPFC_VECTOR_MAP_EMPTY; - cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; - cpup->eq = LPFC_VECTOR_MAP_EMPTY; - cpup->irq = LPFC_VECTOR_MAP_EMPTY; - cpup->flag = 0; - } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_hdwq_stat *c_stat; +#endif max_phys_id = 0; min_phys_id = LPFC_VECTOR_MAP_EMPTY; @@ -10688,65 +12432,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) min_core_id = cpup->core_id; } - for_each_possible_cpu(i) { - struct lpfc_eq_intr_info *eqi = - per_cpu_ptr(phba->sli4_hba.eq_info, i); - - INIT_LIST_HEAD(&eqi->list); - eqi->icnt = 0; - } - - /* This loop sets up all CPUs that are affinitized with a - * irq vector assigned to the driver. All affinitized CPUs - * will get a link to that vectors IRQ and EQ. - * - * NULL affinity mask handling: - * If irq count is greater than one, log an error message. - * If the null mask is received for the first irq, find the - * first present cpu, and assign the eq index to ensure at - * least one EQ is assigned. - */ - for (idx = 0; idx < phba->cfg_irq_chann; idx++) { - /* Get a CPU mask for all CPUs affinitized to this vector */ - maskp = pci_irq_get_affinity(phba->pcidev, idx); - if (!maskp) { - if (phba->cfg_irq_chann > 1) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3329 No affinity mask found " - "for vector %d (%d)\n", - idx, phba->cfg_irq_chann); - if (!idx) { - cpu = cpumask_first(cpu_present_mask); - cpup = &phba->sli4_hba.cpu_map[cpu]; - cpup->eq = idx; - cpup->irq = pci_irq_vector(phba->pcidev, idx); - cpup->flag |= LPFC_CPU_FIRST_IRQ; - } - break; - } - - i = 0; - /* Loop through all CPUs associated with vector idx */ - for_each_cpu_and(cpu, maskp, cpu_present_mask) { - /* Set the EQ index and IRQ for that vector */ - cpup = &phba->sli4_hba.cpu_map[cpu]; - cpup->eq = idx; - cpup->irq = pci_irq_vector(phba->pcidev, idx); - - /* If this is the first CPU thats assigned to this - * vector, set LPFC_CPU_FIRST_IRQ. - */ - if (!i) - cpup->flag |= LPFC_CPU_FIRST_IRQ; - i++; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3336 Set Affinity: CPU %d " - "irq %d eq %d flag x%x\n", - cpu, cpup->irq, cpup->eq, cpup->flag); - } - } - /* After looking at each irq vector assigned to this pcidev, its * possible to see that not ALL CPUs have been accounted for. * Next we will set any unassigned (unaffinitized) cpu map @@ -10772,7 +12457,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && - (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) && + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && (new_cpup->phys_id == cpup->phys_id)) goto found_same; new_cpu = cpumask_next( @@ -10785,7 +12470,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) found_same: /* We found a matching phys_id, so copy the IRQ info */ cpup->eq = new_cpup->eq; - cpup->irq = new_cpup->irq; /* Bump start_cpu to the next slot to minmize the * chance of having multiple unassigned CPU entries @@ -10797,9 +12481,10 @@ found_same: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3337 Set Affinity: CPU %d " - "irq %d from id %d same " + "eq %d from peer cpu %d same " "phys_id (%d)\n", - cpu, cpup->irq, new_cpu, cpup->phys_id); + cpu, cpup->eq, new_cpu, + cpup->phys_id); } } @@ -10823,7 +12508,7 @@ found_same: for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && - (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY)) + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) goto found_any; new_cpu = cpumask_next( new_cpu, cpu_present_mask); @@ -10833,13 +12518,12 @@ found_same: /* We should never leave an entry unassigned */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3339 Set Affinity: CPU %d " - "irq %d UNASSIGNED\n", - cpup->hdwq, cpup->irq); + "eq %d UNASSIGNED\n", + cpup->hdwq, cpup->eq); continue; found_any: /* We found an available entry, copy the IRQ info */ cpup->eq = new_cpup->eq; - cpup->irq = new_cpup->irq; /* Bump start_cpu to the next slot to minmize the * chance of having multiple unassigned CPU entries @@ -10851,8 +12535,8 @@ found_any: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3338 Set Affinity: CPU %d " - "irq %d from id %d (%d/%d)\n", - cpu, cpup->irq, new_cpu, + "eq %d from peer cpu %d (%d/%d)\n", + cpu, cpup->eq, new_cpu, new_cpup->phys_id, new_cpup->core_id); } } @@ -10873,11 +12557,11 @@ found_any: idx++; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3333 Set Affinity: CPU %d (phys %d core %d): " - "hdwq %d eq %d irq %d flg x%x\n", + "hdwq %d eq %d flg x%x\n", cpu, cpup->phys_id, cpup->core_id, - cpup->hdwq, cpup->eq, cpup->irq, cpup->flag); + cpup->hdwq, cpup->eq, cpup->flag); } - /* Finally we need to associate a hdwq with each cpu_map entry + /* Associate a hdwq with each cpu_map entry * This will be 1 to 1 - hdwq to cpu, unless there are less * hardware queues then CPUs. For that case we will just round-robin * the available hardware queues as they get assigned to CPUs. @@ -10951,9 +12635,33 @@ found_any: logit: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3335 Set Affinity: CPU %d (phys %d core %d): " - "hdwq %d eq %d irq %d flg x%x\n", + "hdwq %d eq %d flg x%x\n", cpu, cpup->phys_id, cpup->core_id, - cpup->hdwq, cpup->eq, cpup->irq, cpup->flag); + cpup->hdwq, cpup->eq, cpup->flag); + } + + /* + * Initialize the cpu_map slots for not-present cpus in case + * a cpu is hot-added. Perform a simple hdwq round robin assignment. + */ + idx = 0; + for_each_possible_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); + c_stat->hdwq_no = cpup->hdwq; +#endif + if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) + continue; + + cpup->hdwq = idx++ % phba->cfg_hdw_queue; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + c_stat->hdwq_no = cpup->hdwq; +#endif + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3340 Set Affinity: not present " + "CPU %d hdwq %d\n", + cpu, cpup->hdwq); } /* The cpu_map array will be used later during initialization @@ -10962,12 +12670,289 @@ found_any: return; } +/** + * lpfc_cpuhp_get_eq + * + * @phba: pointer to lpfc hba data structure. + * @cpu: cpu going offline + * @eqlist: + */ +static int +lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, + struct list_head *eqlist) +{ + const struct cpumask *maskp; + struct lpfc_queue *eq; + struct cpumask *tmp; + u16 idx; + + tmp = kzalloc(cpumask_size(), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + maskp = pci_irq_get_affinity(phba->pcidev, idx); + if (!maskp) + continue; + /* + * if irq is not affinitized to the cpu going + * then we don't need to poll the eq attached + * to it. + */ + if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) + continue; + /* get the cpus that are online and are affini- + * tized to this irq vector. If the count is + * more than 1 then cpuhp is not going to shut- + * down this vector. Since this cpu has not + * gone offline yet, we need >1. + */ + cpumask_and(tmp, maskp, cpu_online_mask); + if (cpumask_weight(tmp) > 1) + continue; + + /* Now that we have an irq to shutdown, get the eq + * mapped to this irq. Note: multiple hdwq's in + * the software can share an eq, but eventually + * only eq will be mapped to this vector + */ + eq = phba->sli4_hba.hba_eq_hdl[idx].eq; + list_add(&eq->_poll_list, eqlist); + } + kfree(tmp); + return 0; +} + +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) +{ + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, + &phba->cpuhp); + /* + * unregistering the instance doesn't stop the polling + * timer. Wait for the poll timer to retire. + */ + synchronize_rcu(); + del_timer_sync(&phba->cpuhp_poll_timer); +} + +static void lpfc_cpuhp_remove(struct lpfc_hba *phba) +{ + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + return; + + __lpfc_cpuhp_remove(phba); +} + +static void lpfc_cpuhp_add(struct lpfc_hba *phba) +{ + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + rcu_read_lock(); + + if (!list_empty(&phba->poll_list)) + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + + rcu_read_unlock(); + + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, + &phba->cpuhp); +} + +static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) +{ + if (phba->pport->load_flag & FC_UNLOADING) { + *retval = -EAGAIN; + return true; + } + + if (phba->sli_rev != LPFC_SLI_REV4) { + *retval = 0; + return true; + } + + /* proceed with the hotplug */ + return false; +} + +/** + * lpfc_irq_set_aff - set IRQ affinity + * @eqhdl: EQ handle + * @cpu: cpu to set affinity + * + **/ +static inline void +lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) +{ + cpumask_clear(&eqhdl->aff_mask); + cpumask_set_cpu(cpu, &eqhdl->aff_mask); + irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); + irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); +} + +/** + * lpfc_irq_clear_aff - clear IRQ affinity + * @eqhdl: EQ handle + * + **/ +static inline void +lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) +{ + cpumask_clear(&eqhdl->aff_mask); + irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); +} + +/** + * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event + * @phba: pointer to HBA context object. + * @cpu: cpu going offline/online + * @offline: true, cpu is going offline. false, cpu is coming online. + * + * If cpu is going offline, we'll try our best effort to find the next + * online cpu on the phba's original_mask and migrate all offlining IRQ + * affinities. + * + * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. + * + * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on + * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. + * + **/ +static void +lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) +{ + struct lpfc_vector_map_info *cpup; + struct cpumask *aff_mask; + unsigned int cpu_select, cpu_next, idx; + const struct cpumask *orig_mask; + + if (phba->irq_chann_mode == NORMAL_MODE) + return; + + orig_mask = &phba->sli4_hba.irq_aff_mask; + + if (!cpumask_test_cpu(cpu, orig_mask)) + return; + + cpup = &phba->sli4_hba.cpu_map[cpu]; + + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) + return; + + if (offline) { + /* Find next online CPU on original mask */ + cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); + cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); + + /* Found a valid CPU */ + if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { + /* Go through each eqhdl and ensure offlining + * cpu aff_mask is migrated + */ + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + aff_mask = lpfc_get_aff_mask(idx); + + /* Migrate affinity */ + if (cpumask_test_cpu(cpu, aff_mask)) + lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), + cpu_select); + } + } else { + /* Rely on irqbalance if no online CPUs left on NUMA */ + for (idx = 0; idx < phba->cfg_irq_chann; idx++) + lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); + } + } else { + /* Migrate affinity back to this CPU */ + lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); + } +} + +static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); + struct lpfc_queue *eq, *next; + LIST_HEAD(eqlist); + int retval; + + if (!phba) { + WARN_ONCE(!phba, "cpu: %u. phba:%px.", raw_smp_processor_id(), + phba); + return 0; + } + + if (__lpfc_cpuhp_checks(phba, &retval)) + return retval; + + lpfc_irq_rebalance(phba, cpu, true); + + retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); + if (retval) + return retval; + + /* start polling on these eq's */ + list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { + list_del_init(&eq->_poll_list); + lpfc_sli4_start_polling(eq); + } + + return 0; +} + +static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); + struct lpfc_queue *eq, *next; + unsigned int n; + int retval; + + if (!phba) { + WARN_ONCE(!phba, "cpu: %u. phba:%px.", raw_smp_processor_id(), + phba); + return 0; + } + + if (__lpfc_cpuhp_checks(phba, &retval)) + return retval; + + lpfc_irq_rebalance(phba, cpu, false); + + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { + n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); + if (n == cpu) + lpfc_sli4_stop_polling(eq); + } + + return 0; +} + /** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-4 interface spec. + * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them + * to cpus on the system. + * + * When cfg_irq_numa is enabled, the adapter will only allocate vectors for + * the number of cpus on the same numa node as this adapter. The vectors are + * allocated without requesting OS affinity mapping. A vector will be + * allocated and assigned to each online and offline cpu. If the cpu is + * online, then affinity will be set to that cpu. If the cpu is offline, then + * affinity will be set to the nearest peer cpu within the numa node that is + * online. If there are no online cpus within the numa node, affinity is not + * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping + * is consistent with the way cpu online/offline is handled when cfg_irq_numa is + * configured. + * + * If numa mode is not enabled and there is more than 1 vector allocated, then + * the driver relies on the managed irq interface where the OS assigns vector to + * cpu affinity. The driver will then use that affinity mapping to setup its + * cpu mapping table. * * Return codes * 0 - successful @@ -10978,67 +12963,163 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) { int vectors, rc, index; char *name; + const struct cpumask *aff_mask = NULL; + unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; + struct lpfc_vector_map_info *cpup; + struct lpfc_hba_eq_hdl *eqhdl; + const struct cpumask *maskp; + unsigned int flags = PCI_IRQ_MSIX; - /* Set up MSI-X multi-message vectors */ + /* Configure MSI-X capability structure */ vectors = phba->cfg_irq_chann; - rc = pci_alloc_irq_vectors(phba->pcidev, - 1, - vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + if (phba->irq_chann_mode != NORMAL_MODE) + aff_mask = &phba->sli4_hba.irq_aff_mask; + + if (aff_mask) { + cpu_cnt = cpumask_weight(aff_mask); + vectors = min(phba->cfg_irq_chann, cpu_cnt); + + /* cpu: iterates over aff_mask including offline or online + * cpu_select: iterates over online aff_mask to set affinity + */ + cpu = cpumask_first(aff_mask); + cpu_select = lpfc_next_online_cpu(aff_mask, cpu); + } else { + flags |= PCI_IRQ_AFFINITY; + } + + rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); - goto vec_fail_out; + goto msi_fail_out; } + vectors = rc; /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { - name = phba->sli4_hba.hba_eq_hdl[index].handler_name; + eqhdl = lpfc_get_eq_hdl(index); + name = eqhdl->handler_name; memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, LPFC_DRIVER_HANDLER_NAME"%d", index); - phba->sli4_hba.hba_eq_hdl[index].idx = index; - phba->sli4_hba.hba_eq_hdl[index].phba = phba; + eqhdl->idx = index; rc = request_irq(pci_irq_vector(phba->pcidev, index), - &lpfc_sli4_hba_intr_handler, 0, - name, - &phba->sli4_hba.hba_eq_hdl[index]); + &lpfc_sli4_hba_intr_handler, 0, + name, eqhdl); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " "request_irq failed (%d)\n", index, rc); goto cfg_fail_out; } + + eqhdl->irq = pci_irq_vector(phba->pcidev, index); + + if (aff_mask) { + /* If found a neighboring online cpu, set affinity */ + if (cpu_select < nr_cpu_ids) + lpfc_irq_set_aff(eqhdl, cpu_select); + + /* Assign EQ to cpu_map */ + lpfc_assign_eq_map_info(phba, index, + LPFC_CPU_FIRST_IRQ, + cpu); + + /* Iterate to next offline or online cpu in aff_mask */ + cpu = cpumask_next(cpu, aff_mask); + + /* Find next online cpu in aff_mask to set affinity */ + cpu_select = lpfc_next_online_cpu(aff_mask, cpu); + } else if (vectors == 1) { + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, + cpu); + } else { + maskp = pci_irq_get_affinity(phba->pcidev, index); + + /* Loop through all CPUs associated with vector index */ + for_each_cpu_and(cpu, maskp, cpu_present_mask) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* If this is the first CPU thats assigned to + * this vector, set LPFC_CPU_FIRST_IRQ. + * + * With certain platforms its possible that irq + * vectors are affinitized to all the cpu's. + * This can result in each cpu_map.eq to be set + * to the last vector, resulting in overwrite + * of all the previous cpu_map.eq. Ensure that + * each vector receives a place in cpu_map. + * Later call to lpfc_cpu_affinity_check will + * ensure we are nicely balanced out. + */ + if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) + continue; + lpfc_assign_eq_map_info(phba, index, + LPFC_CPU_FIRST_IRQ, + cpu); + break; + } + } } if (vectors != phba->cfg_irq_chann) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3238 Reducing IO channels to match number of " "MSI-X vectors, requested %d got %d\n", phba->cfg_irq_chann, vectors); if (phba->cfg_irq_chann > vectors) phba->cfg_irq_chann = vectors; - if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors)) - phba->cfg_nvmet_mrq = vectors; } return rc; cfg_fail_out: /* free the irq already requested */ - for (--index; index >= 0; index--) - free_irq(pci_irq_vector(phba->pcidev, index), - &phba->sli4_hba.hba_eq_hdl[index]); + for (--index; index >= 0; index--) { + eqhdl = lpfc_get_eq_hdl(index); + lpfc_irq_clear_aff(eqhdl); + irq_set_affinity_hint(eqhdl->irq, NULL); + free_irq(eqhdl->irq, eqhdl); + } /* Unconfigure MSI-X capability structure */ - pci_free_irq_vectors(phba->pcidev); + pci_disable_msix(phba->pcidev); -vec_fail_out: +msi_fail_out: return rc; } +/** + * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release the MSI-X vectors and then disable the + * MSI-X interrupt mode to device with SLI-4 interface spec. + **/ +static void +lpfc_sli4_disable_msix(struct lpfc_hba *phba) +{ + int index; + struct lpfc_hba_eq_hdl *eqhdl; + + /* Free up MSI-X multi-message vectors */ + for (index = 0; index < phba->cfg_irq_chann; index++) { + eqhdl = lpfc_get_eq_hdl(index); + lpfc_irq_clear_aff(eqhdl); + irq_set_affinity_hint(eqhdl->irq, NULL); + free_irq(eqhdl->irq, eqhdl); + } + /* Disable MSI-X */ + pci_disable_msix(phba->pcidev); + + return; +} + /** * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. @@ -11057,6 +13138,8 @@ static int lpfc_sli4_enable_msi(struct lpfc_hba *phba) { int rc, index; + unsigned int cpu; + struct lpfc_hba_eq_hdl *eqhdl; rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_AFFINITY); @@ -11078,14 +13161,38 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) return rc; } + eqhdl = lpfc_get_eq_hdl(0); + eqhdl->irq = pci_irq_vector(phba->pcidev, 0); + + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); + for (index = 0; index < phba->cfg_irq_chann; index++) { - phba->sli4_hba.hba_eq_hdl[index].idx = index; - phba->sli4_hba.hba_eq_hdl[index].phba = phba; + eqhdl = lpfc_get_eq_hdl(index); + eqhdl->idx = index; } return 0; } +/** + * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the MSI interrupt mode to device with + * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has + * done request_irq() on before calling pci_disable_msi(). Failure to do so + * results in a BUG_ON() and a device will be left with MSI enabled and leaks + * its vector. + **/ +static void +lpfc_sli4_disable_msi(struct lpfc_hba *phba) +{ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); + return; +} + /** * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. @@ -11138,15 +13245,21 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { struct lpfc_hba_eq_hdl *eqhdl; + unsigned int cpu; /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; + eqhdl = lpfc_get_eq_hdl(0); + eqhdl->irq = pci_irq_vector(phba->pcidev, 0); + + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, + cpu); for (idx = 0; idx < phba->cfg_irq_chann; idx++) { - eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; + eqhdl = lpfc_get_eq_hdl(idx); eqhdl->idx = idx; - eqhdl->phba = phba; } } } @@ -11166,22 +13279,12 @@ static void lpfc_sli4_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ - if (phba->intr_type == MSIX) { - int index; - - /* Free up MSI-X multi-message vectors */ - for (index = 0; index < phba->cfg_irq_chann; index++) { - irq_set_affinity_hint( - pci_irq_vector(phba->pcidev, index), - NULL); - free_irq(pci_irq_vector(phba->pcidev, index), - &phba->sli4_hba.hba_eq_hdl[index]); - } - } else { + if (phba->intr_type == MSIX) + lpfc_sli4_disable_msix(phba); + else if (phba->intr_type == MSI) + lpfc_sli4_disable_msi(phba); + else if (phba->intr_type == INTx) free_irq(phba->pcidev->irq, phba); - } - - pci_free_irq_vectors(phba->pcidev); /* Reset interrupt management states */ phba->intr_type = NONE; @@ -11272,17 +13375,17 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { if (!nvmet_xri_cmpl) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6424 NVMET XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); if (!io_xri_cmpl) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6100 IO XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); if (!els_xri_cmpl) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2878 ELS XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); @@ -11332,9 +13435,15 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) struct pci_dev *pdev = phba->pcidev; lpfc_stop_hba_timers(phba); + hrtimer_cancel(&phba->cmf_timer); + if (phba->pport) phba->sli4_hba.intr_enable = 0; +#ifndef BUILD_BRCMFCOE + if (!list_empty(&phba->lpfc_auth_active_cfg_list)) + lpfc_auth_free_active_cfg_list(phba); +#endif /* * Gracefully wait out the potential current outstanding asynchronous * mailbox command. @@ -11367,6 +13476,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Wait for completion of device XRI exchange busy */ lpfc_sli4_xri_exchange_busy_wait(phba); + /* per-phba callback de-registration for hotplug event */ + lpfc_cpuhp_remove(phba); + /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); @@ -11470,13 +13582,243 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) return rc; } +static uint32_t +lpfc_cgn_crc32(uint32_t crc, u8 byte) +{ + uint32_t msb = 0; + uint32_t bit; + + for (bit = 0; bit < 8; bit++) { + msb = (crc >> 31) & 1; + crc <<= 1; + + if (msb ^ (byte & 1)) { + crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; + crc |= 1; + } + byte >>= 1; + } + return crc; +} + +static uint32_t +lpfc_cgn_reverse_bits(uint32_t wd) +{ + uint32_t result = 0; + uint32_t i; + + for (i = 0; i < 32; i++) { + result <<= 1; + result |= (1 & (wd >> i)); + } + return result; +} + +/* + * The routine corresponds with the algorithm the HBA firmware + * uses to validate the data integrity. + */ +uint32_t +lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) +{ + uint32_t i; + uint32_t result; + uint8_t *data = (uint8_t *)ptr; + + for (i = 0; i < byteLen; ++i) + crc = lpfc_cgn_crc32(crc, data[i]); + + result = ~lpfc_cgn_reverse_bits(crc); + return result; +} + +void +lpfc_init_congestion_buf(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + struct timespec64 cmpl_time; + struct tm broken; + uint16_t size; + uint32_t crc; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6235 INIT Congestion Buffer %px\n", phba->cgn_i); + + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + phba->cgn_evt_minute = 0; + phba->hba_flag &= ~HBA_CGN_DAY_WRAP; + + memset(cp, 0xff, LPFC_CGN_DATA_SIZE); + cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); + cp->cgn_info_version = LPFC_CGN_INFO_V3; + + /* cgn parameters */ + cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; + cp->cgn_info_acr = phba->cgn_p.cgn_param_acr; + cp->cgn_info_rxburst = phba->cgn_p.cgn_param_rxburst; + cp->cgn_info_rxbw = phba->cgn_p.cgn_param_rxbw; + + ktime_get_real_ts64(&cmpl_time); + time64_to_tm(cmpl_time.tv_sec, 0, &broken); + cp->cgn_info_month = broken.tm_mon + 1; + cp->cgn_info_day = broken.tm_mday; + cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ + cp->cgn_info_hour = broken.tm_hour; + cp->cgn_info_minute = broken.tm_min; + cp->cgn_info_second = broken.tm_sec; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "2643 CGNInfo Init: Start Time " + "%d/%d/%d %d:%d:%d\n", + cp->cgn_info_day, cp->cgn_info_month, + cp->cgn_info_year, cp->cgn_info_hour, + cp->cgn_info_minute, cp->cgn_info_second); + + /* Fill in default LUN qdepth */ + if (phba->pport) { + size = (uint16_t)(phba->pport->cfg_lun_queue_depth); + cp->cgn_lunq = cpu_to_le16(size); + } + + /* last used Index initialized to 0xff already */ + + cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ; + cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ; + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); + + phba->cgn_evt_timestamp = jiffies + + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); +} + +void +lpfc_init_congestion_stat(struct lpfc_hba *phba) +{ + struct lpfc_cgn_info *cp; + struct timespec64 cmpl_time; + struct tm broken; + uint32_t crc; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6236 INIT Congestion Stat %px\n", phba->cgn_i); + + if (!phba->cgn_i) + return; + cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; + memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE); + + ktime_get_real_ts64(&cmpl_time); + time64_to_tm(cmpl_time.tv_sec, 0, &broken); + cp->cgn_stat_month = broken.tm_mon + 1; + cp->cgn_stat_day = broken.tm_mday; + cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ + cp->cgn_stat_hour = broken.tm_hour; + cp->cgn_stat_minute = broken.tm_min; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, + "2647 CGNstat Init: Start Time " + "%d/%d/%d %d:%d\n", + cp->cgn_stat_day, cp->cgn_stat_month, + cp->cgn_stat_year, cp->cgn_stat_hour, + cp->cgn_stat_minute); + + crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); + cp->cgn_info_crc = cpu_to_le32(crc); +} + +/** + * _lpfc_reg_congestion_buf - register congestion info buffer with HBA + * @phba: Pointer to hba context object. + * reg: flag to determine register or unregister. + */ +int +_lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) +{ + struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + LPFC_MBOXQ_t *mboxq; + int length, rc; + + if (!phba->cgn_i) + return -ENXIO; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2641 REG_CONGESTION_BUF mbox allocation fail: " + "HBA state x%x reg %d\n", + phba->pport->port_state, reg); + return -ENOMEM; + } + + length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, + LPFC_SLI4_MBX_EMBED); + reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; + bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); + if (reg > 0) + bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); + else + bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); + reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); + reg_congestion_buf->addr_lo = + putPaddrLow(phba->cgn_i->phys); + reg_congestion_buf->addr_hi = + putPaddrHigh(phba->cgn_i->phys); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2642 REG_CONGESTION_BUF mailbox " + "failed with status x%x add_status x%x," + " mbx status x%x reg %d\n", + shdr_status, shdr_add_status, rc, reg); + return -ENXIO; + } + return 0; +} + +int +lpfc_unreg_congestion_buf(struct lpfc_hba *phba) +{ + lpfc_cmf_stop(phba); + return _lpfc_reg_congestion_buf(phba, 0); +} + +int +lpfc_reg_congestion_buf(struct lpfc_hba *phba) +{ + return _lpfc_reg_congestion_buf(phba, 1); +} + + /** * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. * @phba: Pointer to HBA context object. * @mboxq: Pointer to the mailboxq memory for the mailbox command response. * * This function is called in the SLI4 code path to read the port's - * sli4 capabilities. + * sli4 capabilities. Called on proble from lpfc_sli4_driver_resource_setup. * * This function may be be called from any context that can block-wait * for the completion. The expectation is that this routine is called @@ -11538,6 +13880,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); + sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); @@ -11588,18 +13931,18 @@ fcponly: } } +#ifdef BUILD_NVME /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to - * accommodate 512K and 1M IOs in a single nvme buf and supply - * enough NVME LS iocb buffers for larger connectivity counts. + * accommodate 512K and 1M IOs in a single nvme buf. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; - phba->cfg_iocb_cnt = 5; - } +#endif - /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ - if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) + /* Enable embedded Payload BDE if support is indicated */ + if (bf_get(cfg_pbde, mbx_sli4_parameters)) + phba->cfg_enable_pbde = 1; + else phba->cfg_enable_pbde = 0; /* @@ -11660,7 +14003,6 @@ fcponly: phba->mds_diags_support = 1; else phba->mds_diags_support = 0; - /* * Check if the SLI port supports NSLER */ @@ -11669,6 +14011,13 @@ fcponly: else phba->nsler = 0; +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_FC)) + INIT_LIST_HEAD(&phba->nvme_pendq); + INIT_LIST_HEAD(&phba->nvme_abts_pendq); + spin_lock_init(&phba->nvme_pendq_lock); + INIT_WORK(&phba->nvme_defer_work, lpfc_nvme_issue_pend_io); +#endif + phba->nvme_pendq_cnt = 0; return 0; } @@ -11689,7 +14038,7 @@ fcponly: * 0 - driver can claim the device * negative value - driver can not claim the device **/ -static int +static int __devinit lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; @@ -11713,6 +14062,9 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) if (error) goto out_disable_pci_dev; + phba->log.logit = true; /* Turn on throttle logs for Adapter */ + sprintf(phba->log.announcement, "adapter"); + /* Set up SLI-3 specific device PCI memory space */ error = lpfc_sli_pci_mem_setup(phba); if (error) { @@ -11729,6 +14081,9 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_unset_pci_mem_s3; } + /* no SCSI MQ support for SLI 3 */ + phba->cfg_enable_scsi_mq = 0; + /* Initialize and populate the iocb list per host */ error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); @@ -11775,14 +14130,14 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0431 Failed to enable interrupt.\n"); error = -ENODEV; goto out_free_sysfs_attr; } /* SLI-3 HBA setup */ if (lpfc_sli_hba_setup(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1477 Failed to set up hba\n"); error = -ENODEV; goto out_remove_device; @@ -11849,7 +14204,7 @@ out_free_phba: * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ -static void +static void __devexit lpfc_pci_remove_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -11857,6 +14212,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; + int bars = pci_select_bars(pdev, IORESOURCE_MEM); spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; @@ -11910,6 +14266,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Disable interrupt */ lpfc_sli_disable_intr(phba); + pci_set_drvdata(pdev, NULL); scsi_host_put(shost); /* @@ -11934,7 +14291,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) lpfc_hba_free(phba); - pci_release_mem_regions(pdev); + pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); } @@ -12040,7 +14397,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0430 PM resume Failed to enable interrupt\n"); return -EIO; } else @@ -12066,7 +14423,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) static void lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2723 PCI channel I/O abort preparing for recovery\n"); /* @@ -12087,7 +14444,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) static void lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2710 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ @@ -12118,7 +14475,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) static void lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2711 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); @@ -12169,7 +14526,7 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) return PCI_ERS_RESULT_DISCONNECT; default: /* Unknown state, prepare and request slot reset */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0472 Unknown PCI error state: x%x\n", state); lpfc_sli_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; @@ -12227,7 +14584,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev) /* Configure and enable interrupt */ intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0427 Cannot re-enable interrupt after " "slot reset.\n"); return PCI_ERS_RESULT_DISCONNECT; @@ -12263,6 +14620,14 @@ lpfc_io_resume_s3(struct pci_dev *pdev) /* Bring device online, it will be no-op for non-fatal error resume */ lpfc_online(phba); + + /* Clean up Advanced Error Reporting (AER) if needed */ + if (phba->hba_flag & HBA_AER_ENABLED) +#if defined(BUILD_SLES15SP2) || defined(BUILD_RHEL83) + pci_aer_clear_nonfatal_status(pdev); +#else + pci_cleanup_aer_uncorrect_error_status(pdev); +#endif } /** @@ -12312,35 +14677,57 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) } -static void +static int lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, const struct firmware *fw) { - if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || + int rc; + + /* Three cases: (1) FW was not supported on the detected adapter. + * (2) FW update has been locked out administratively. + * (3) Some other error during FW update. + * In each case, an unmaskable message is written to the console + * for admin diagnosis. + */ + if (offset == ADD_STATUS_FW_NOT_SUPPORTED || (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && magic_number != MAGIC_NUMER_G6) || (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && - magic_number != MAGIC_NUMER_G7)) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3030 This firmware version is not supported on " - "this HBA model. Device:%x Magic:%x Type:%x " - "ID:%x Size %d %zd\n", - phba->pcidev->device, magic_number, ftype, fid, - fsize, fw->size); - else - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3022 FW Download failed. Device:%x Magic:%x Type:%x " - "ID:%x Size %d %zd\n", - phba->pcidev->device, magic_number, ftype, fid, - fsize, fw->size); + magic_number != MAGIC_NUMER_G7)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3030 This firmware version is not supported on " + "this HBA model. Device:%x Magic:%x Type:%x " + "ID:%x Size %d %zd\n", + phba->pcidev->device, magic_number, ftype, fid, + fsize, fw->size); + rc = -EINVAL; + } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3021 Firmware downloads have been prohibited " + "by a system configuration setting on " + "Device:%x Magic:%x Type:%x ID:%x Size %d " + "%zd\n", + phba->pcidev->device, magic_number, ftype, fid, + fsize, fw->size); + rc = -EACCES; + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3022 FW Download failed. Add Status x%x " + "Device:%x Magic:%x Type:%x ID:%x Size %d " + "%zd\n", + offset, phba->pcidev->device, magic_number, + ftype, fid, fsize, fw->size); + rc = -EIO; + } + return rc; } - /** * lpfc_write_firmware - attempt to write a firmware image to the port * @fw: pointer to firmware image returned from request_firmware. - * @phba: pointer to lpfc hba data structure. + * @context: pointer to firmware image returned from request_firmware. + * @ret: return value this routine provides to the caller. * **/ static void @@ -12370,7 +14757,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context) INIT_LIST_HEAD(&dma_buffer_list); lpfc_decode_firmware_rev(phba, fwrev, 1); if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3023 Updating Firmware, Current Version:%s " "New Version:%s\n", fwrev, image->revision); @@ -12409,14 +14796,18 @@ lpfc_write_firmware(const struct firmware *fw, void *context) rc = lpfc_wr_object(phba, &dma_buffer_list, (fw->size - offset), &offset); if (rc) { - lpfc_log_write_firmware_error(phba, offset, - magic_number, ftype, fid, fsize, fw); + rc = lpfc_log_write_firmware_error(phba, offset, + magic_number, + ftype, + fid, + fsize, + fw); goto release_out; } } rc = offset; } else - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3029 Skipped Firmware update, Current " "Version:%s New Version:%s\n", fwrev, image->revision); @@ -12430,9 +14821,12 @@ release_out: } release_firmware(fw); out: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3024 Firmware update done: %d.\n", rc); - return; + if (rc < 0) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3062 Firmware update error, status %d.\n", rc); + else + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3024 Firmware update success: size %d.\n", rc); } /** @@ -12456,6 +14850,11 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 13) + ret = request_firmware(&fw, file_name, &phba->pcidev->dev); + if (!ret) + lpfc_write_firmware(fw, (void *)phba); +#else if (fw_upgrade == INT_FW_UPGRADE) { ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, file_name, &phba->pcidev->dev, @@ -12468,6 +14867,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) } else { ret = -EINVAL; } +#endif return ret; } @@ -12490,7 +14890,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) * 0 - driver can claim the device * negative value - driver can not claim the device **/ -static int +static int __devinit lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) { struct lpfc_hba *phba; @@ -12514,6 +14914,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) if (error) goto out_disable_pci_dev; + phba->log.logit = true; /* Turn on throttle logs for Adapter */ + sprintf(phba->log.announcement, "adapter"); + /* Set up SLI-4 specific device PCI memory space */ error = lpfc_sli4_pci_mem_setup(phba); if (error) { @@ -12551,10 +14954,16 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) phba->pport = NULL; lpfc_stop_port(phba); + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); + + /* Init hba_eq_hdl array */ + lpfc_hba_eq_hdl_array_init(phba); + /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0426 Failed to enable interrupt.\n"); error = -ENODEV; goto out_unset_driver_resource; @@ -12589,7 +14998,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1421 Failed to set up hba\n"); error = -ENODEV; goto out_free_sysfs_attr; @@ -12614,7 +15023,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) */ error = lpfc_nvme_create_localport(vport); if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6004 NVME registration " "failed, error x%x\n", error); @@ -12632,6 +15041,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Enable RAS FW log support */ lpfc_sli4_ras_setup(phba); + INIT_LIST_HEAD(&phba->poll_list); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + setup_timer(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, + (unsigned long)phba); +#else + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); +#endif + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); + return 0; out_free_sysfs_attr: @@ -12664,7 +15082,7 @@ out_free_phba: * removed from PCI bus, it performs all the necessary cleanup for the HBA * device to be removed from the PCI subsystem properly. **/ -static void +static void __devexit lpfc_pci_remove_one_s4(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -12677,6 +15095,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); + if (phba->cgn_i) + lpfc_unreg_congestion_buf(phba); /* Free the HBA sysfs attributes */ lpfc_free_sysfs_attr(vport); @@ -12803,8 +15223,8 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) * state. * * Return code - * 0 - driver suspended the device - * Error otherwise + * 0 - driver resumed the device + * Error otherwise **/ static int lpfc_pci_resume_one_s4(struct pci_dev *pdev) @@ -12844,7 +15264,7 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0294 PM resume Failed to enable interrupt\n"); return -EIO; } else @@ -12870,7 +15290,7 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) static void lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2828 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq @@ -12890,7 +15310,7 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2826 PCI channel disable preparing for reset\n"); /* Block any management I/Os to the device */ @@ -12922,7 +15342,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) static void lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2827 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ @@ -12972,7 +15392,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) return PCI_ERS_RESULT_DISCONNECT; default: /* Unknown state, prepare and request slot reset */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2825 Unknown PCI error state: x%x\n", state); lpfc_sli4_prep_dev_for_reset(phba); return PCI_ERS_RESULT_NEED_RESET; @@ -13030,7 +15450,7 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2824 Cannot re-enable interrupt after " "slot reset.\n"); return PCI_ERS_RESULT_DISCONNECT; @@ -13073,6 +15493,14 @@ lpfc_io_resume_s4(struct pci_dev *pdev) /* Bring the device back online */ lpfc_online(phba); } + + /* Clean up Advanced Error Reporting (AER) if needed */ + if (phba->hba_flag & HBA_AER_ENABLED) +#if defined(BUILD_SLES15SP2) || defined(BUILD_RHEL83) + pci_aer_clear_nonfatal_status(pdev); +#else + pci_cleanup_aer_uncorrect_error_status(pdev); +#endif } /** @@ -13093,7 +15521,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev) * 0 - driver can claim the device * negative value - driver can not claim the device **/ -static int +static int __devinit lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { int rc; @@ -13121,7 +15549,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) * remove routine, which will perform all the necessary cleanup for the * device to be removed from the PCI subsystem properly. **/ -static void +static void __devexit lpfc_pci_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); @@ -13135,7 +15563,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) lpfc_pci_remove_one_s4(pdev); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1424 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; @@ -13172,7 +15600,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) rc = lpfc_pci_suspend_one_s4(pdev, msg); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1425 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; @@ -13208,7 +15636,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev) rc = lpfc_pci_resume_one_s4(pdev); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1426 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; @@ -13246,7 +15674,7 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) rc = lpfc_io_error_detected_s4(pdev, state); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1427 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; @@ -13283,7 +15711,7 @@ lpfc_io_slot_reset(struct pci_dev *pdev) rc = lpfc_io_slot_reset_s4(pdev); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1428 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; @@ -13315,7 +15743,7 @@ lpfc_io_resume(struct pci_dev *pdev) lpfc_io_resume_s4(pdev); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1429 Invalid PCI device group: 0x%x\n", phba->pci_dev_grp); break; @@ -13323,6 +15751,36 @@ lpfc_io_resume(struct pci_dev *pdev) return; } +/** + * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace + * @inode: pointer to the inode representing the lpfcmgmt device + * @filep: pointer to the file representing the open lpfcmgmt device + * + * This routine puts a reference count on the lpfc module whenever the + * character device is opened + **/ +static int +lpfc_mgmt_open(struct inode *inode, struct file *filep) +{ + try_module_get(THIS_MODULE); + return 0; +} + +/** + * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace + * @inode: pointer to the inode representing the lpfcmgmt device + * @filep: pointer to the file representing the open lpfcmgmt device + * + * This routine removes a reference count from the lpfc module when the + * character device is closed + **/ +static int +lpfc_mgmt_release(struct inode *inode, struct file *filep) +{ + module_put(THIS_MODULE); + return 0; +} + /** * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter * @phba: pointer to lpfc hba data structure. @@ -13378,9 +15836,74 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba) } +/** + * lpfc_throttler - lpfc log throttling function + * @phba : the adapter instance. + * + * This routine is to be invoked by lpfc_throttle_log and lpfc_throttle_vlog. + * It is controlled using two module parameters. + * lpfc_throttle_log_time: The number of seconds to monitor the log count + * lpfc_throttle_log_cnt: The number of logs that cannot be exceeded within + * the throttle log time. + * + * This function acts as a gate to the logger. It monitors the the lpfc log + * load. If the log load exceeds the threshold, it return false which is a + * hint to the caller not to send the log. + * + * Return codes + * true - Okay to log + * false - Do not log + */ +bool +lpfc_throttler(struct lpfc_vport *vport, struct throttle_history *tlog) +{ + /* All times in microseconds */ + uint64_t now = (jiffies * ONE_SEC) / HZ; + struct lpfc_hba *phba = vport->phba; + + if (tlog->logit) { + /* Currently Logging */ + if (now > (tlog->log_start + + (phba->cfg_throttle_log_time * ONE_SEC))) { + /* Last Log more than one second ago */ + tlog->log_messages_lost = 0; + tlog->log_messages = 1; + tlog->log_start = now; + } + else if (++tlog->log_messages > phba->cfg_throttle_log_cnt) { + /* Transition to throttling */ + lpfc_printf_log(phba, KERN_INFO, LOG_EVENT, + "0040 Log compression on %s starting." + "\n", tlog->announcement); + tlog->log_messages_lost++; + tlog->log_messages = 0; + tlog->log_start = now; + tlog->logit = false; + } + + } else if (now > (tlog->log_start + + (300 * ONE_SEC))) { + /* Transition to logging if quiesced for five minutes */ + lpfc_printf_log(phba, KERN_INFO, LOG_EVENT, + "0041 Log compression %s ending, messages " + "have been quiesced with %d compressed.\n", + tlog->announcement, tlog->log_messages_lost); + + + tlog->log_start = now; + tlog->log_messages_lost = 0; + tlog->log_messages = 0; + tlog->logit = true; + } else { + /* Currently throttling */ + tlog->log_messages_lost++; + } + return tlog->logit; +} + MODULE_DEVICE_TABLE(pci, lpfc_id_table); -static const struct pci_error_handlers lpfc_err_handler = { +static struct pci_error_handlers lpfc_err_handler = { .error_detected = lpfc_io_error_detected, .slot_reset = lpfc_io_slot_reset, .resume = lpfc_io_resume, @@ -13390,20 +15913,20 @@ static struct pci_driver lpfc_driver = { .name = LPFC_DRIVER_NAME, .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, - .remove = lpfc_pci_remove_one, - .shutdown = lpfc_pci_remove_one, + .remove = __devexit_p(lpfc_pci_remove_one), .suspend = lpfc_pci_suspend_one, .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, }; static const struct file_operations lpfc_mgmt_fop = { - .owner = THIS_MODULE, + .open = lpfc_mgmt_open, + .release = lpfc_mgmt_release, }; static struct miscdevice lpfc_mgmt_dev = { .minor = MISC_DYNAMIC_MINOR, - .name = "lpfcmgmt", + .name = LPFCMGMT_NAME, .fops = &lpfc_mgmt_fop, }; @@ -13444,19 +15967,112 @@ lpfc_init(void) fc_release_transport(lpfc_transport_template); return -ENOMEM; } +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_FC)) lpfc_nvme_cmd_template(); lpfc_nvmet_cmd_template(); +#endif /* Initialize in case vector mapping is needed */ lpfc_present_cpu = num_present_cpus(); + error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "lpfc/sli4:online", + lpfc_cpu_online, lpfc_cpu_offline); + if (error < 0) + goto cpuhp_failure; + lpfc_cpuhp_state = error; + error = pci_register_driver(&lpfc_driver); - if (error) { - fc_release_transport(lpfc_transport_template); - fc_release_transport(lpfc_vport_transport_template); - } + if (error) + goto unwind; return error; + +unwind: + cpuhp_remove_multi_state(lpfc_cpuhp_state); +cpuhp_failure: + fc_release_transport(lpfc_transport_template); + fc_release_transport(lpfc_vport_transport_template); + + return error; +} + +void lpfc_dmp_dbg(struct lpfc_hba *phba) +{ + unsigned int start_idx; + unsigned int dbg_cnt; + unsigned int temp_idx; + int i; + int j = 0; + unsigned long rem_nsec; + + if (phba->cfg_log_verbose) + return; + + if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) + return; + + start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; + dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); + temp_idx = start_idx; + if (dbg_cnt >= DBG_LOG_SZ) { + dbg_cnt = DBG_LOG_SZ; + temp_idx -= 1; + } else { + if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { + temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; + } else { + if (start_idx < dbg_cnt) + start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); + else + start_idx -= dbg_cnt; + } + } + dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", + start_idx, temp_idx, dbg_cnt); + + for (i = 0; i < dbg_cnt; i++) { + if ((start_idx + i) < DBG_LOG_SZ) + temp_idx = (start_idx + i) % DBG_LOG_SZ; + else + temp_idx = j++; + rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); + dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", + temp_idx, + (unsigned long)phba->dbg_log[temp_idx].t_ns, + rem_nsec / 1000, + phba->dbg_log[temp_idx].log); + } + atomic_set(&phba->dbg_log_cnt, 0); + atomic_set(&phba->dbg_log_dmping, 0); +} + +void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) +{ + unsigned int idx; + va_list args; + int dbg_dmping = atomic_read(&phba->dbg_log_dmping); + struct va_format vaf; + + + va_start(args, fmt); + if (unlikely(dbg_dmping)) { + vaf.fmt = fmt; + vaf.va = &args; + dev_info(&phba->pcidev->dev, "%pV", &vaf); + va_end(args); + return; + } + idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % + DBG_LOG_SZ; + + atomic_inc(&phba->dbg_log_cnt); + + vscnprintf(phba->dbg_log[idx].log, + sizeof(phba->dbg_log[idx].log), fmt, args); + va_end(args); + + phba->dbg_log[idx].t_ns = local_clock(); } /** @@ -13471,6 +16087,7 @@ lpfc_exit(void) { misc_deregister(&lpfc_mgmt_dev); pci_unregister_driver(&lpfc_driver); + cpuhp_remove_multi_state(lpfc_cpuhp_state); fc_release_transport(lpfc_transport_template); fc_release_transport(lpfc_vport_transport_template); idr_destroy(&lpfc_hba_index); @@ -13478,7 +16095,8 @@ lpfc_exit(void) module_init(lpfc_init); module_exit(lpfc_exit); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(LPFC_MODULE_DESC); MODULE_AUTHOR("Broadcom"); MODULE_VERSION("0:" LPFC_DRIVER_VERSION); +MODULE_INFO(supported, "external"); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index ea10f03437f5..c15acf654dc6 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -44,13 +44,43 @@ #define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */ #define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */ #define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ -#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ +#define LOG_EDIF 0x01000000 /* External DIF events */ +#define LOG_AUTH 0x02000000 /* Authentication events */ +#define LOG_CGN_MGMT 0x04000000 /* Congestion Mgmt events */ +#define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */ +#define LOG_ALL_MSG 0x7fffffff /* LOG all messages */ + +void lpfc_dmp_dbg(struct lpfc_hba *phba); +void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...); + +/* generate message by verbose log setting or severity */ +#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \ +{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \ + dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } + +#define lpfc_log_msg(phba, level, mask, fmt, arg...) \ +do { \ + { uint32_t log_verbose = (phba)->pport ? \ + (phba)->pport->cfg_log_verbose : \ + (phba)->cfg_log_verbose; \ + if (((mask) & log_verbose) || (level[1] <= '4')) \ + dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ + fmt, phba->brd_no, ##arg); \ + } \ +} while (0) #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ do { \ - { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \ + { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \ + if ((mask) & LOG_TRACE_EVENT) \ + lpfc_dmp_dbg((vport)->phba); \ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ - fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \ + } else if (!(vport)->cfg_log_verbose) \ + lpfc_dbg_print((vport)->phba, "%d:(%d):" fmt, \ + (vport)->phba->brd_no, (vport)->vpi, ##arg); \ + } \ } while (0) #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ @@ -58,8 +88,52 @@ do { \ { uint32_t log_verbose = (phba)->pport ? \ (phba)->pport->cfg_log_verbose : \ (phba)->cfg_log_verbose; \ - if (((mask) & log_verbose) || (level[1] <= '3')) \ + if (((mask) & log_verbose) || (level[1] <= '3')) { \ + if ((mask) & LOG_TRACE_EVENT) \ + lpfc_dmp_dbg(phba); \ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ - fmt, phba->brd_no, ##arg); \ + fmt, phba->brd_no, ##arg); \ + } else if (!(phba)->cfg_log_verbose)\ + lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \ } \ } while (0) + +#define lpfc_throttle_vlog(vport, hist, log, fmt, arg...) \ +do { \ + { if (!lpfc_throttler(vport, (hist))) \ + break; \ + lpfc_printf_vlog(vport, KERN_INFO, log, \ + fmt, arg); \ + }\ +} while (0) + +#define lpfc_throttle_log(phba, hist, log, fmt, arg...) \ +do { \ + { if (!lpfc_throttler((phba->pport), (hist))) \ + break; \ + lpfc_printf_log(phba, KERN_INFO, log, \ + fmt, arg); \ + }\ +} while (0) + +#define lpfc_optioned_vlog(vport, hist, level, mask, fmt, arg...) \ +do { \ + { if (!lpfc_throttler(vport, (hist))) \ + { lpfc_printf_vlog(vport, level, mask, fmt, ##arg); \ + break; \ + }\ + dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \ + }\ +} while (0) + +#define lpfc_optioned_log(phba, hist, level, mask, fmt, arg...) \ +do { \ + { if (!lpfc_throttler((phba->pport), (hist))) \ + { lpfc_printf_log(phba, level, mask, fmt, ##arg); \ + break; \ + }\ + dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ + fmt, phba->brd_no, ##arg); \ + }\ +} while (0) diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 8abe933bad09..d2ce919ccfe5 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba, if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + !(phba->sli4_hba.pc_sli4_params.pls) && mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; @@ -755,7 +756,6 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, struct lpfc_dmabuf *mp; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varRegLogin.rpi = 0; if (phba->sli_rev == LPFC_SLI_REV4) mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi]; @@ -1296,10 +1296,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* If HBA supports SLI=3 ask for it */ if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { - if (phba->cfg_enable_bg) + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ - if (phba->cfg_enable_dss) - mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); @@ -1379,7 +1377,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) */ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { - phba->host_gp = &phba->mbox->us.s2.host[0]; + phba->host_gp = (struct lpfc_hgp __iomem *) + &phba->mbox->us.s2.host[0]; phba->hbq_put = NULL; offset = (uint8_t *)&phba->mbox->us.s2.host - (uint8_t *)phba->slim2p.virt; @@ -2085,7 +2084,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ - if (phba->cfg_enable_bg) + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable NPIV only if configured to do so. */ @@ -2299,7 +2298,7 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox) return 0; } -static void +void lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { MAILBOX_t *mb; @@ -2320,7 +2319,7 @@ mbx_failed: rdp_context->cmpl(phba, rdp_context, rc); } -static void +void lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf; @@ -2367,7 +2366,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) goto error; lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, - DMP_SFF_PAGE_A0_SIZE); + DMP_SFF_PAGE_A0_SIZE); memset(mbox, 0, sizeof(*mbox)); @@ -2380,13 +2379,13 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); bf_set(lpfc_mbx_memory_dump_type3_type, - &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); bf_set(lpfc_mbx_memory_dump_type3_link, - &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); bf_set(lpfc_mbx_memory_dump_type3_page_no, - &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); bf_set(lpfc_mbx_memory_dump_type3_length, - &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); @@ -2439,13 +2438,13 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox) mbox->ctx_buf = mp; bf_set(lpfc_mbx_memory_dump_type3_type, - &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); bf_set(lpfc_mbx_memory_dump_type3_link, - &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); bf_set(lpfc_mbx_memory_dump_type3_page_no, - &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0); + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0); bf_set(lpfc_mbx_memory_dump_type3_length, - &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index ae09bb863497..2e9a6a6bd0b4 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -30,9 +30,11 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> - +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme-fc-driver.h> - +#endif +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -49,6 +51,8 @@ #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ +#define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */ +#define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */ int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { @@ -89,16 +93,14 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_mbuf_pool) goto fail; - pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, - sizeof(struct lpfc_dmabuf), - GFP_KERNEL); + pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) * + LPFC_MBUF_POOL_SIZE, GFP_KERNEL); if (!pool->elements) goto fail_free_lpfc_mbuf_pool; @@ -113,8 +115,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) pool->current_count++; } - phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, - sizeof(LPFC_MBOXQ_t)); + phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, + sizeof(LPFC_MBOXQ_t)); if (!phba->mbox_mem_pool) goto fail_free_mbuf_pool; @@ -125,7 +127,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) if (phba->sli_rev == LPFC_SLI_REV4) { phba->rrq_pool = - mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, + mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE, sizeof(struct lpfc_node_rrq)); if (!phba->rrq_pool) goto fail_free_nlp_mem_pool; @@ -230,9 +232,6 @@ lpfc_mem_free(struct lpfc_hba *phba) dma_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; - dma_pool_destroy(phba->txrdy_payload_pool); - phba->txrdy_payload_pool = NULL; - dma_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; @@ -240,7 +239,8 @@ lpfc_mem_free(struct lpfc_hba *phba) phba->rrq_pool = NULL; /* Free NLP memory pool */ - mempool_destroy(phba->nlp_mem_pool); + if (phba->nlp_mem_pool) + mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { mempool_destroy(phba->active_rrq_pool); @@ -248,7 +248,8 @@ lpfc_mem_free(struct lpfc_hba *phba) } /* Free mbox memory pool */ - mempool_destroy(phba->mbox_mem_pool); + if (phba->mbox_mem_pool) + mempool_destroy(phba->mbox_mem_pool); phba->mbox_mem_pool = NULL; /* Free MBUF memory pool */ @@ -339,6 +340,19 @@ lpfc_mem_free_all(struct lpfc_hba *phba) dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); phba->lpfc_cmd_rsp_buf_pool = NULL; + /* Free Congestion Data buffer */ + if (phba->cgn_i) { + dma_free_coherent(&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + phba->cgn_i->virt, phba->cgn_i->phys); + kfree(phba->cgn_i); + phba->cgn_i = NULL; + } + + /* Free RX table */ + kfree(phba->rxtable); + phba->rxtable = NULL; + /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; @@ -593,8 +607,6 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * - * Notes: Not interrupt-safe. Must be called with no locks held. - * * Returns: * pointer to HBQ on success * NULL on failure @@ -604,7 +616,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) { struct rqb_dmabuf *dma_buf; - dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); + dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL); if (!dma_buf) return NULL; @@ -727,7 +739,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); if (rc < 0) { - (rqbp->rqb_free_buffer)(phba, rqb_entry); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6409 Cannot post to HRQ %d: %x %x %x " "DRQ %x %x\n", @@ -737,6 +748,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) rqb_entry->hrq->entry_count, rqb_entry->drq->host_index, rqb_entry->drq->hba_index); + (rqbp->rqb_free_buffer)(phba, rqb_entry); } else { list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); rqbp->buffer_count++; diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index 95d60ab5ebf9..09db13acaaf0 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2010 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 696171382558..444329b4df32 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -31,8 +31,11 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> - +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme-fc-driver.h> +#endif +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -154,7 +157,7 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); return 1; bad_service_param: - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0207 Device %x " "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " "invalid service parameters. Ignoring device.\n", @@ -252,6 +255,8 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_sli_issue_abort_iotag(phba, pring, iocb); spin_unlock_irq(&phba->hbalock); } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); INIT_LIST_HEAD(&abort_list); @@ -279,6 +284,109 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); } +/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up + * @phba: pointer to lpfc hba data structure. + * @link_mbox: pointer to CONFIG_LINK mailbox object + * + * This routine is only called if we are SLI3, direct connect pt2pt + * mode and the remote NPort issues the PLOGI after link up. + */ +void +lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) +{ + LPFC_MBOXQ_t *login_mbox; + MAILBOX_t *mb = &link_mbox->u.mb; + struct lpfc_iocbq *save_iocb; + struct lpfc_nodelist *ndlp; + int rc; + + ndlp = link_mbox->ctx_ndlp; + login_mbox = link_mbox->context3; + save_iocb = login_mbox->context3; + link_mbox->context3 = NULL; + login_mbox->context3 = NULL; + + /* Check for CONFIG_LINK error */ + if (mb->mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4575 CONFIG_LINK fails pt2pt discovery: %x\n", + mb->mbxStatus); + mempool_free(login_mbox, phba->mbox_mem_pool); + mempool_free(link_mbox, phba->mbox_mem_pool); + kfree(save_iocb); + return; + } + + /* Now that CONFIG_LINK completed, and our SID is configured, + * we can now proceed with sending the PLOGI ACC. + */ + rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI, + save_iocb, ndlp, login_mbox); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4576 PLOGI ACC fails pt2pt discovery: %x\n", + rc); + mempool_free(login_mbox, phba->mbox_mem_pool); + } + + mempool_free(link_mbox, phba->mbox_mem_pool); + kfree(save_iocb); +} + +/** + * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function provides the unreg rpi mailbox completion handler for a tgt. + * The routine frees the memory resources associated with the completed + * mailbox command and transmits the ELS ACC. + * + * This routine is only called if we are SLI4, acting in target + * mode and the remote NPort issues the PLOGI after link up. + **/ +void +lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; + LPFC_MBOXQ_t *mbox = pmb->context3; + struct lpfc_iocbq *piocb = NULL; + int rc; + + if (mbox) { + pmb->context3 = NULL; + piocb = mbox->context3; + mbox->context3 = NULL; + } + + /* + * Complete the unreg rpi mbx request, and update flags. + * This will also restart any deferred events. + */ + lpfc_nlp_get(ndlp); + lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb); + + if (!piocb) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "4578 PLOGI ACC fail\n"); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); + if (rc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "4579 PLOGI ACC fail %x\n", rc); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + } + kfree(piocb); +out: + lpfc_nlp_put(ndlp); +} + static int lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) @@ -291,17 +399,20 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, IOCB_t *icmd; struct serv_parm *sp; uint32_t ed_tov; - LPFC_MBOXQ_t *mbox; + LPFC_MBOXQ_t *link_mbox; + LPFC_MBOXQ_t *login_mbox; + struct lpfc_iocbq *save_iocb; struct ls_rjt stat; uint32_t vid, flag; - int rc; + u16 rpi; + int rc, defer_acc; memset(&stat, 0, sizeof (struct ls_rjt)); pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); if (wwn_to_u64(sp->portName.u.wwn) == 0) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0140 PLOGI Reject: invalid nname\n"); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; @@ -310,7 +421,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; } if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0141 PLOGI Reject: invalid pname\n"); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; @@ -343,6 +454,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, else ndlp->nlp_fcp_info |= CLASS3; + defer_acc = 0; ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; @@ -354,7 +466,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: @@ -371,13 +482,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ if (!(ndlp->nlp_type & NLP_FABRIC) && !(phba->nvmet_support)) { + /* Clear ndlp info, since follow up PRLI may have + * updated ndlp information + */ + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); return 1; } if (nlp_portwwn != 0 && nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0143 PLOGI recv'd from DID: x%x " "WWPN changed: old %llx new %llx\n", ndlp->nlp_DID, @@ -394,8 +514,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; ndlp->nlp_flag &= ~NLP_FIRSTBURST; + login_mbox = NULL; + link_mbox = NULL; + save_iocb = NULL; + /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { @@ -423,17 +548,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (phba->sli_rev == LPFC_SLI_REV4) lpfc_issue_reg_vfi(vport); else { - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (mbox == NULL) + defer_acc = 1; + link_mbox = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!link_mbox) goto out; - lpfc_config_link(phba, mbox); - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); + lpfc_config_link(phba, link_mbox); + link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc; + link_mbox->vport = vport; + link_mbox->ctx_ndlp = ndlp; + + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); + if (!save_iocb) goto out; - } + /* Save info from cmd IOCB used in rsp */ + memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, + sizeof(struct lpfc_iocbq)); } lpfc_can_disctmo(vport); @@ -448,30 +578,57 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_flag |= NLP_SUPPRESS_RSP; } - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) + login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!login_mbox) goto out; /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->nvmet_support && !defer_acc) { + link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!link_mbox) + goto out; + + /* As unique identifiers such as iotag would be overwritten + * with those from the cmdiocb, allocate separate temporary + * storage for the copy. + */ + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); + if (!save_iocb) + goto out; + + /* Unreg RPI is required for SLI4. */ + rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox); + link_mbox->vport = vport; + link_mbox->ctx_ndlp = ndlp; + link_mbox->mbox_cmpl = lpfc_defer_acc_rsp; + + if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && + (!(vport->fc_flag & FC_OFFLINE_MODE))) + ndlp->nlp_flag |= NLP_UNREG_INP; + + /* Save info from cmd IOCB used in rsp */ + memcpy(save_iocb, cmdiocb, sizeof(*save_iocb)); + + /* Delay sending ACC till unreg RPI completes. */ + defer_acc = 1; + } else if (phba->sli_rev == LPFC_SLI_REV4) lpfc_unreg_rpi(vport, ndlp); rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, - (uint8_t *) sp, mbox, ndlp->nlp_rpi); - if (rc) { - mempool_free(mbox, phba->mbox_mem_pool); + (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); + if (rc) goto out; - } /* ACC PLOGI rsp command needs to execute first, - * queue this mbox command to be processed later. + * queue this login_mbox command to be processed later. */ - mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; /* - * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox + * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox * command issued in lpfc_cmpl_els_acc(). */ - mbox->vport = vport; + login_mbox->vport = vport; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); spin_unlock_irq(shost->host_lock); @@ -495,6 +652,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if ((vport->port_type == LPFC_NPIV_PORT && vport->cfg_restrict_login)) { + /* no deferred ACC */ + kfree(save_iocb); + /* In order to preserve RPIs, we want to cleanup * the default RPI the firmware created to rcv * this ELS request. The only way to do this is @@ -506,16 +666,50 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, - ndlp, mbox); + ndlp, login_mbox); if (rc) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(login_mbox, phba->mbox_mem_pool); return 1; } - rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); + if (defer_acc) { + /* So the order here should be: + * SLI3 pt2pt + * Issue CONFIG_LINK mbox + * CONFIG_LINK cmpl + * SLI4 tgt + * Issue UNREG RPI mbx + * UNREG RPI cmpl + * Issue PLOGI ACC + * PLOGI ACC cmpl + * Issue REG_LOGIN mbox + */ + + /* Save the REG_LOGIN mbox for and rcv IOCB copy later */ + link_mbox->context3 = login_mbox; + login_mbox->context3 = save_iocb; + + /* Start the ball rolling by issuing CONFIG_LINK here */ + rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out; + return 1; + } + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox); if (rc) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(login_mbox, phba->mbox_mem_pool); return 1; out: + if (defer_acc) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "4577 discovery failure: %p %p %p\n", + save_iocb, link_mbox, login_mbox); + kfree(save_iocb); + if (link_mbox) + mempool_free(link_mbox, phba->mbox_mem_pool); + if (login_mbox) + mempool_free(login_mbox, phba->mbox_mem_pool); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); @@ -615,11 +809,17 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp, NULL); } out: - /* If we are authenticated, move to the proper state */ - if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) - lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); - else - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + /* If we are authenticated, move to the proper state. + * It is possible an ADISC arrived and the remote nport + * is already in MAPPED or UNMAPPED state. Catch this + * condition and don't set the nlp_state again because + * it causes an unnecessary transport unregister/register. + */ + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { + if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_MAPPED_NODE); + } return 1; } @@ -792,6 +992,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_INITIATOR; + ndlp->nlp_flag &= ~NLP_EXTERNAL_DIF; } if (npr->targetFunc) { if (npr->prliType == PRLI_FCP_TYPE) @@ -853,9 +1054,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!(vport->fc_flag & FC_PT2PT)) { /* Check config parameter use-adisc or FCP-2 */ - if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || + if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && - (ndlp->nlp_type & NLP_FCP_TARGET)))) { + (ndlp->nlp_type & NLP_FCP_TARGET))) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); @@ -903,8 +1104,8 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "2796 mailbox memory allocation failed \n"); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2796 mailbox memory allocation failed \n"); else { lpfc_unreg_login(phba, vport->vpi, rpi, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -942,7 +1143,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, rpi = pmb->u.mb.un.varWords[0]; lpfc_release_rpi(phba, vport, ndlp, rpi); } - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0271 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, @@ -960,11 +1161,11 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * to stop it. */ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, - "0272 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", - ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, - ndlp->nlp_flag); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0272 Illegal State Transition: node x%x " + "event x%x, state x%x Data: x%x x%x\n", + ndlp->nlp_DID, evt, ndlp->nlp_state, + ndlp->nlp_rpi, ndlp->nlp_flag); } return ndlp->nlp_state; } @@ -1184,7 +1385,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, if ((ndlp->nlp_DID != FDMI_DID) && (wwn_to_u64(sp->portName.u.wwn) == 0 || wwn_to_u64(sp->nodeName.u.wwn) == 0)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0142 PLOGI RSP: Invalid WWN.\n"); goto out; } @@ -1246,7 +1447,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, } else { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0133 PLOGI: no memory " "for config_link " "Data: x%x x%x x%x x%x\n", @@ -1271,7 +1473,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0018 PLOGI: no memory for reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, @@ -1311,7 +1513,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, kfree(mp); mempool_free(mbox, phba->mbox_mem_pool); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0134 PLOGI: cannot issue reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, @@ -1319,7 +1521,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, } else { mempool_free(mbox, phba->mbox_mem_pool); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0135 PLOGI: cannot format reg_login " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, @@ -1330,7 +1532,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, out: if (ndlp->nlp_DID == NameServer_DID) { lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0261 Cannot Register NameServer login\n"); } @@ -1549,7 +1751,13 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, } } - if (ndlp->nlp_type & NLP_FCP_TARGET) { + if (ndlp->nlp_type & NLP_FCP_TARGET) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + + if (ndlp->nlp_type & NLP_NVME_TARGET) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } else { @@ -1752,8 +1960,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, if (mb->mbxStatus) { /* RegLogin failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, - "0246 RegLogin failed Data: x%x x%x x%x x%x " + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0246 RegLogin failed Data: x%x x%x x%x x%x " "x%x\n", did, mb->mbxStatus, vport->port_state, mb->un.varRegLogin.vpi, @@ -1836,8 +2044,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } } else { +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_TARGET_FC)) if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) phba->targetport->port_id = vport->fc_myDID; +#endif /* Only Fabric ports should transition. NVME target * must complete PRLI. @@ -1920,6 +2130,7 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) return ndlp->nlp_state; + lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } @@ -1972,7 +2183,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, IOCB_t *irsp; PRLI *npr; struct lpfc_nvme_prli *nvpr; + struct lpfc_scsi_io_req cmd_req; void *temp_ptr; + int ret = 0; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; @@ -2012,16 +2225,28 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "6028 FCP NPR PRLI Cmpl Init %d Target %d\n", npr->initiatorFunc, npr->targetFunc); - if (npr->initiatorFunc) + + if (npr->initiatorFunc) { ndlp->nlp_type |= NLP_FCP_INITIATOR; + ndlp->nlp_flag &= ~NLP_EXTERNAL_DIF; + } if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; - if (npr->writeXferRdyDis) + if (npr->writeXferRdyDis) { ndlp->nlp_flag |= NLP_FIRSTBURST; + cmd_req.cmd_id = MODE_SENSE; + cmd_req.dev_id = CTRL_ID; + cmd_req.cmd_cmpl = lpfc_mode_sense_cmpl; + ret = lpfc_issue_scsi_cmd(vport, ndlp, + &cmd_req); + lpfc_printf_vlog(vport, KERN_INFO, + LOG_FCP | LOG_FCP_ERROR, + "6334 FB issue returns %d\n", + ret); + } } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; - } else if (nvpr && (bf_get_be32(prli_acc_rsp_code, nvpr) == PRLI_REQ_EXECUTED) && @@ -2032,7 +2257,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (bf_get_be32(prli_init, nvpr)) ndlp->nlp_type |= NLP_NVME_INITIATOR; - if (phba->nsler && bf_get_be32(prli_nsler, nvpr)) + if (phba->nsler && bf_get_be32(prli_nsler, nvpr) && + bf_get_be32(prli_conf, nvpr)) + ndlp->nlp_nvme_info |= NLP_NVME_NSLER; else ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 8e0f03ef346b..70257b6eb077 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -35,10 +35,14 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> - +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme.h> #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> +#define BUILD_NVME_FC +#endif +#endif #include "lpfc_version.h" #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -55,13 +59,24 @@ #include "lpfc_debugfs.h" /* NVME initiator-based functions */ - +#ifdef BUILD_NVME_FC static struct lpfc_io_buf * lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx, int expedite); static void lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); +static void +lpfc_nvme_complete_pend_io(struct lpfc_hba *, + struct nvmefc_fcp_req *); +static void +lpfc_nvme_insert_pend_io(struct lpfc_hba *, + struct nvmefc_fcp_req *, + struct nvme_fc_local_port *, + struct nvme_fc_remote_port *, + void *, + u32 to_head); + static struct nvme_fc_port_template lpfc_nvme_template; @@ -195,6 +210,46 @@ lpfc_nvme_cmd_template(void) /* Word 12, 13, 14, 15 - is zero */ } +/** + * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry. + * @pwqeq: Pointer to command iocb. + * @xritag: Tag that uniqely identifies the local exchange resource. + * @opt: Option bits - + * bit 0 = inhibit sending abts on the link + * + * This function is called with hbalock held. + **/ +void +lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt) +{ + union lpfc_wqe128 *wqe = &pwqeq->wqe; + + /* WQEs are reused. Clear stale data and set key fields to + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. + */ + memset(wqe, 0, sizeof(*wqe)); + + if (opt & INHIBIT_ABORT) + bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); + /* Abort specified xri tag, with the mask deliberately zeroed */ + bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); + + bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + + /* Abort the IO associated with this outstanding exchange ID. */ + wqe->abort_cmd.wqe_com.abort_tag = xritag; + + /* iotag for the wqe completion. */ + bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag); + + bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); +} + /** * lpfc_nvme_create_queue - * @lpfc_pnvme: Pointer to the driver's nvme instance data @@ -245,6 +300,10 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, qhandle->index = qidx; } + spin_lock(&lport->qhandle_list_lock); + list_add(&qhandle->list, &lport->qhandle_list); + spin_unlock(&lport->qhandle_list_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6073 Binding %s HdwQueue %d (cpu %d) to " "hdw_queue %d qhandle x%px\n", str, @@ -274,6 +333,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, { struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; + struct lpfc_nvme_qhandle *qhandle; + struct lpfc_nvme_qhandle *tmp; + bool found = false; if (!pnvme_lport->private) return; @@ -284,9 +346,29 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", lport, qidx, handle); - kfree(handle); -} + /* Protect against double-free errors from misbehaved upper layers */ + if (handle) { + qhandle = (struct lpfc_nvme_qhandle *)handle; + /* Search if on active list */ + spin_lock(&lport->qhandle_list_lock); + list_for_each_entry(tmp, &lport->qhandle_list, list) { + if (tmp == qhandle) { + list_del_init(&tmp->list); + kfree(qhandle); + found = true; + break; + } + } + spin_unlock(&lport->qhandle_list_lock); + } + + if (!found) + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, + "6568 Invalid qhandle ptr to free " + "lpfc_pnvme x%p, qidx x%x, qhandle x%p\n", + lport, qidx, handle); +} static void lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) { @@ -336,19 +418,21 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) remoteport); spin_lock_irq(&vport->phba->hbalock); - /* The register rebind might have occurred before the delete + /* The register rebind might have occured before the delete * downcall. Guard against this race. */ if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) { ndlp->nrport = NULL; ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG; - } - spin_unlock_irq(&vport->phba->hbalock); + spin_unlock_irq(&vport->phba->hbalock); - /* Remove original register reference. The host transport - * won't reference this rport/remoteport any further. - */ - lpfc_nlp_put(ndlp); + /* Remove original register reference. The host transport + * won't reference this rport/remoteport any further. + */ + lpfc_nlp_put(ndlp); + } else { + spin_unlock_irq(&vport->phba->hbalock); + } rport_err: return; @@ -402,7 +486,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if (pnvme_lsreq->done) pnvme_lsreq->done(pnvme_lsreq, status); else - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6046 nvme cmpl without done call back? " "Data %px DID %x Xri: %x status %x\n", pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, @@ -533,7 +617,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); if (rc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6045 Issue GEN REQ WQE to NPORT x%x " "Data: x%x x%x\n", ndlp->nlp_DID, genwqe->iotag, @@ -569,7 +653,6 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, struct lpfc_nodelist *ndlp; struct ulp_bde64 *bpl; struct lpfc_dmabuf *bmp; - uint16_t ntype, nstate; /* there are two dma buf in the request, actually there is one and * the second one is just the start address + cmd size. @@ -595,7 +678,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, /* Need the ndlp. It is stored in the driver's rport. */ ndlp = rport->ndlp; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6051 Remoteport x%px, rport has invalid ndlp. " "Failing LS Req\n", pnvme_rport); return -ENODEV; @@ -604,11 +687,11 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, /* The remote node has to be a mapped nvme target or an * unmapped nvme initiator or it's an error. */ - ntype = ndlp->nlp_type; - nstate = ndlp->nlp_state; - if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || - (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + if (((ndlp->nlp_type & NLP_NVME_TARGET) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) || + ((ndlp->nlp_type & NLP_NVME_INITIATOR) && + (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6088 DID x%06x not ready for " "IO. State x%x, Type x%x\n", pnvme_rport->port_id, @@ -618,7 +701,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6044 Could not find node for DID %x\n", pnvme_rport->port_id); return 2; @@ -626,7 +709,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, INIT_LIST_HEAD(&bmp->list); bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); if (!bmp->virt) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6042 Could not find node for DID %x\n", pnvme_rport->port_id); kfree(bmp); @@ -666,7 +749,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, ndlp, 2, 30, 0); if (ret != WQE_SUCCESS) { atomic_inc(&lport->xmt_ls_err); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6052 EXIT. issue ls wqe failed lport x%px, " "rport x%px lsreq x%px Status %x DID %x\n", pnvme_lport, pnvme_rport, pnvme_lsreq, @@ -717,7 +800,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); if (!ndlp) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6049 Could not find node for DID %x\n", pnvme_rport->port_id); return; @@ -758,6 +841,10 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, lpfc_sli_issue_abort_iotag(phba, pring, wqe); spin_unlock_irq(&phba->hbalock); } + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + } /* Fix up the existing sgls for NVME IO. */ @@ -857,89 +944,6 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, sgl->sge_len = cpu_to_le32(nCmd->rsplen); } -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS -static void -lpfc_nvme_ktime(struct lpfc_hba *phba, - struct lpfc_io_buf *lpfc_ncmd) -{ - uint64_t seg1, seg2, seg3, seg4; - uint64_t segsum; - - if (!lpfc_ncmd->ts_last_cmd || - !lpfc_ncmd->ts_cmd_start || - !lpfc_ncmd->ts_cmd_wqput || - !lpfc_ncmd->ts_isr_cmpl || - !lpfc_ncmd->ts_data_nvme) - return; - - if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start) - return; - if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) - return; - if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) - return; - if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput) - return; - if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl) - return; - /* - * Segment 1 - Time from Last FCP command cmpl is handed - * off to NVME Layer to start of next command. - * Segment 2 - Time from Driver receives a IO cmd start - * from NVME Layer to WQ put is done on IO cmd. - * Segment 3 - Time from Driver WQ put is done on IO cmd - * to MSI-X ISR for IO cmpl. - * Segment 4 - Time from MSI-X ISR for IO cmpl to when - * cmpl is handled off to the NVME Layer. - */ - seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; - if (seg1 > 5000000) /* 5 ms - for sequential IOs only */ - seg1 = 0; - - /* Calculate times relative to start of IO */ - seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); - segsum = seg2; - seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start; - if (segsum > seg3) - return; - seg3 -= segsum; - segsum += seg3; - - seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start; - if (segsum > seg4) - return; - seg4 -= segsum; - - phba->ktime_data_samples++; - phba->ktime_seg1_total += seg1; - if (seg1 < phba->ktime_seg1_min) - phba->ktime_seg1_min = seg1; - else if (seg1 > phba->ktime_seg1_max) - phba->ktime_seg1_max = seg1; - phba->ktime_seg2_total += seg2; - if (seg2 < phba->ktime_seg2_min) - phba->ktime_seg2_min = seg2; - else if (seg2 > phba->ktime_seg2_max) - phba->ktime_seg2_max = seg2; - phba->ktime_seg3_total += seg3; - if (seg3 < phba->ktime_seg3_min) - phba->ktime_seg3_min = seg3; - else if (seg3 > phba->ktime_seg3_max) - phba->ktime_seg3_max = seg3; - phba->ktime_seg4_total += seg4; - if (seg4 < phba->ktime_seg4_min) - phba->ktime_seg4_min = seg4; - else if (seg4 > phba->ktime_seg4_max) - phba->ktime_seg4_max = seg4; - - lpfc_ncmd->ts_last_cmd = 0; - lpfc_ncmd->ts_cmd_start = 0; - lpfc_ncmd->ts_cmd_wqput = 0; - lpfc_ncmd->ts_isr_cmpl = 0; - lpfc_ncmd->ts_data_nvme = 0; -} -#endif - /** * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO * @lpfc_pnvme: Pointer to the driver's nvme instance data @@ -970,11 +974,15 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, uint32_t code, status, idx; uint16_t cid, sqhd, data; uint32_t *ptr; + uint32_t lat; + bool call_done = false; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int cpu; +#endif /* Sanity check on return of outstanding command */ if (!lpfc_ncmd) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_NODE | LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6071 Null lpfc_ncmd pointer. No " "release, skip completion\n"); return; @@ -985,7 +993,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, if (!lpfc_ncmd->nvmeCmd) { spin_unlock(&lpfc_ncmd->buf_lock); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " "nvmeCmd x%px\n", lpfc_ncmd, lpfc_ncmd->nvmeCmd); @@ -1012,13 +1020,14 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, status, wcqe->parameter); + /* * Catch race where our node has transitioned, but the * transport is still transitioning. */ ndlp = lpfc_ncmd->ndlp; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6062 Ignoring NVME cmpl. No ndlp\n"); goto out_err; } @@ -1089,7 +1098,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, /* Sanity check */ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) break; - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6081 NVME Completion Protocol Error: " "xri %x status x%x result x%x " "placed x%x\n", @@ -1138,38 +1147,44 @@ out_err: #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_ncmd->ts_cmd_start) { lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; - lpfc_ncmd->ts_data_nvme = ktime_get_ns(); - phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; - lpfc_nvme_ktime(phba, lpfc_ncmd); + lpfc_ncmd->ts_data_io = ktime_get_ns(); + phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; + lpfc_io_ktime(phba, lpfc_ncmd); } - if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) { - uint32_t cpu; - idx = lpfc_ncmd->cur_iocbq.hba_wqidx; + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { cpu = raw_smp_processor_id(); - if (cpu < LPFC_CHECK_CPU_CNT) { - if (lpfc_ncmd->cpu != cpu) - lpfc_printf_vlog(vport, - KERN_INFO, LOG_NVME_IOERR, - "6701 CPU Check cmpl: " - "cpu %d expect %d\n", - cpu, lpfc_ncmd->cpu); - phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; - } + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); + if (lpfc_ncmd->cpu != cpu) + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_IOERR, + "6701 CPU Check cmpl: " + "cpu %d expect %d\n", + cpu, lpfc_ncmd->cpu); } #endif /* NVME targets need completion held off until the abort exchange * completes unless the NVME Rport is getting unregistered. */ - if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { freqpriv = nCmd->private; freqpriv->nvme_buf = NULL; lpfc_ncmd->nvmeCmd = NULL; - spin_unlock(&lpfc_ncmd->buf_lock); + call_done = true; + } + spin_unlock(&lpfc_ncmd->buf_lock); + + /* Check if IO qualified for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + nCmd->io_dir == NVMEFC_FCP_READ && + nCmd->payload_length) { + /* Used when calculating average latency */ + lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; + lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); + } + + if (call_done) nCmd->done(nCmd); - } else - spin_unlock(&lpfc_ncmd->buf_lock); /* Call release with XB=1 to queue the IO into the abort list. */ lpfc_release_nvme_buf(phba, lpfc_ncmd); @@ -1200,6 +1215,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; + struct nvme_common_command *sqe; struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t req_len; @@ -1247,6 +1263,10 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ wqe->fcp_iread.rsrvd5 = 0; + /* For a CMF Managed port, iod must be zero'ed */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_NONE); cstat->input_requests++; } } else { @@ -1256,8 +1276,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, cstat->control_requests++; } - if (pnode->nlp_nvme_info & NLP_NVME_NSLER) + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { bf_set(wqe_erp, &wqe->generic.wqe_com, 1); + sqe = &((struct nvme_fc_cmd_iu *) + nCmd->cmdaddr)->sqe.common; + if (sqe->opcode == nvme_admin_async_event) + bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); + } + /* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer @@ -1337,11 +1363,11 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, first_data_sgl = sgl; lpfc_ncmd->seg_cnt = nCmd->sg_cnt; if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6058 Too many sg segments from " "NVME Transport. Max %d, " "nvmeIO sg_cnt %d\n", - phba->cfg_nvme_seg_cnt + 1, + phba->cfg_nvme_seg_cnt, lpfc_ncmd->seg_cnt); lpfc_ncmd->seg_cnt = 0; return 1; @@ -1360,7 +1386,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, j = 2; for (i = 0; i < nseg; i++) { if (data_sg == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6059 dptr err %d, nseg %d\n", i, nseg); lpfc_ncmd->seg_cnt = 0; @@ -1461,7 +1487,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, * and sg_cnt must zero. */ if (nCmd->payload_length != 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6063 NVME DMA Prep Err: sg_cnt %d " "payload_length x%x\n", nCmd->sg_cnt, nCmd->payload_length); @@ -1507,9 +1533,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, struct lpfc_nvme_qhandle *lpfc_queue_info; struct lpfc_nvme_fcpreq_priv *freqpriv; struct nvme_common_command *sqe; -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint64_t start = 0; -#endif + unsigned long iflag = 0; + struct nvme_fc_cmd_iu *nvme_cmd_iu; /* Validate pointers. LLDD fault handling with transport does * have timing races. @@ -1524,7 +1550,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, if (unlikely(!hw_queue_handle)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, - "6117 Fail IO, NULL hw_queue_handle\n"); + "6117 Busy IO, NULL hw_queue_handle\n"); atomic_inc(&lport->xmt_fcp_err); ret = -EBUSY; goto out_fail; @@ -1532,11 +1558,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, phba = vport->phba; - if (vport->load_flag & FC_UNLOADING) { - ret = -ENODEV; - goto out_fail; - } - if (unlikely(vport->load_flag & FC_UNLOADING)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6124 Fail IO, Driver unload\n"); @@ -1580,9 +1601,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, if ((ndlp->nlp_type & NLP_NVME_TARGET) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, - "6036 Fail IO, DID x%06x not ready for " + "6036 Busy IO %p, DID x%06x not ready for " "IO. State x%x, Type x%x Flg x%x\n", - pnvme_rport->port_id, + freqpriv, pnvme_rport->port_id, ndlp->nlp_state, ndlp->nlp_type, ndlp->upcall_flags); atomic_inc(&lport->xmt_fcp_bad_ndlp); @@ -1595,13 +1616,67 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, * if the driver runs out of a resource. These should only be * issued on the admin queue, qidx 0 */ + nvme_cmd_iu = (struct nvme_fc_cmd_iu *)pnvme_fcreq->cmdaddr; if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { - sqe = &((struct nvme_fc_cmd_iu *) - pnvme_fcreq->cmdaddr)->sqe.common; + sqe = &nvme_cmd_iu->sqe.common; if (sqe->opcode == nvme_admin_keep_alive) expedite = 1; } + /* Driver is setting the nvme command category field. For qidx > 0, + * the cmd IU category is 1000 (base 2) or 0x8 which is CSS value of 0. + * Hardcode this until the nvme transport evolves to track the CSS per + * controller. + * For qidx == 0, the admin cmd IU category is 0001 (base 2) or 0x1. + * The category field is located in word1 bits <11:8> of byte 2. Word1 + * if defined as a 3-byte array and a 4th flags byte. + */ +#if !defined(NVME_CMD_FORMAT_ID) + if (lpfc_queue_info->qidx) + nvme_cmd_iu->rsvd4[2] = 0x8; + else + nvme_cmd_iu->rsvd4[2] = 1; +#endif + + /* Check if IO qualifies for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + pnvme_fcreq->io_dir == NVMEFC_FCP_READ && + pnvme_fcreq->payload_length) { + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) { + /* If this is an IO from the upper layer, and we have + * outstanding deferred IOs, don't even bother trying + * to send this. Just put it directly on the pendq. + */ + spin_lock_irqsave(&phba->nvme_pendq_lock, iflag); + if (!list_empty(&phba->nvme_pendq) && + (freqpriv->status != -EAGAIN)) { + lpfc_nvme_insert_pend_io(phba, pnvme_fcreq, + pnvme_lport, + pnvme_rport, + hw_queue_handle, 0); + spin_unlock_irqrestore(&phba->nvme_pendq_lock, + iflag); + ret = 0; + goto out_fail; + } + spin_unlock_irqrestore(&phba->nvme_pendq_lock, iflag); + } + ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); + if (ret) { + spin_lock_irqsave(&phba->nvme_pendq_lock, iflag); + lpfc_nvme_insert_pend_io(phba, pnvme_fcreq, pnvme_lport, + pnvme_rport, hw_queue_handle, + 1); + spin_unlock_irqrestore(&phba->nvme_pendq_lock, iflag); + ret = 0; + goto out_fail; + } + freqpriv->status = 0; + + /* Get start time for IO latency */ + start = ktime_get_ns(); + } + /* The node is shared with FCP IO, make sure the IO pending count does * not exceed the programmed depth. */ @@ -1609,14 +1684,14 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && !expedite) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, - "6174 Fail IO, ndlp qdepth exceeded: " + "6174 Busy IO, ndlp qdepth exceeded: " "idx %d DID %x pend %d qdepth %d\n", lpfc_queue_info->index, ndlp->nlp_DID, atomic_read(&ndlp->cmd_pending), ndlp->cmd_qdepth); atomic_inc(&lport->xmt_fcp_qdepth); ret = -EBUSY; - goto out_fail; + goto out_fail1; } } @@ -1632,11 +1707,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, if (lpfc_ncmd == NULL) { atomic_inc(&lport->xmt_fcp_noxri); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, - "6065 Fail IO, driver buffer pool is empty: " + "6065 Busy IO, waiting for available buffer: " "idx %d DID %x\n", lpfc_queue_info->index, ndlp->nlp_DID); ret = -EBUSY; - goto out_fail; + goto out_fail1; } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (start) { @@ -1646,6 +1721,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, lpfc_ncmd->ts_cmd_start = 0; } #endif + lpfc_ncmd->rx_cmd_start = start; /* * Store the data needed by the driver to issue, abort, and complete @@ -1703,19 +1779,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, if (lpfc_ncmd->ts_cmd_start) lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); - if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { + if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { cpu = raw_smp_processor_id(); - if (cpu < LPFC_CHECK_CPU_CNT) { - lpfc_ncmd->cpu = cpu; - if (idx != cpu) - lpfc_printf_vlog(vport, - KERN_INFO, LOG_NVME_IOERR, - "6702 CPU Check cmd: " - "cpu %d wq %d\n", - lpfc_ncmd->cpu, - lpfc_queue_info->index); - phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++; - } + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); + lpfc_ncmd->cpu = cpu; + if (idx != cpu) + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_IOERR, + "6702 CPU Check cmd: " + "cpu %d wq %d\n", + lpfc_ncmd->cpu, + lpfc_queue_info->index); } #endif return 0; @@ -1729,6 +1803,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, } else cstat->control_requests--; lpfc_release_nvme_buf(phba, lpfc_ncmd); + out_fail1: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, + pnvme_fcreq->payload_length, NULL); out_fail: return ret; } @@ -1744,6 +1821,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, * Return value: * None **/ +#endif void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_wcqe_complete *abts_cmpl) @@ -1758,7 +1836,122 @@ lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), bf_get(lpfc_wcqe_c_status, abts_cmpl), bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); +#ifdef BUILD_NVME_FC lpfc_sli_release_iocbq(phba, cmdiocb); +#endif +} + +#ifdef BUILD_NVME_FC +/** + * Driver calls this routine to complete a pending nvme IO to the nvme + * transport. The IO is pending for one of two reasons: (1) The congestion + * management handling could not send it (2) The nvme abort handler is + * completing a pending IO. The caller must not hold the + * phba->nvme_pendq_lock as this routine upcalls the transport. + **/ +static void +lpfc_nvme_complete_pend_io(struct lpfc_hba *phba, + struct nvmefc_fcp_req *pnvme_fcreq) +{ + struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private; + + /* Complete the IO for the transport. */ + pnvme_fcreq->transferred_length = 0; + pnvme_fcreq->rcv_rsplen = 0; + pnvme_fcreq->status = NVME_SC_INTERNAL; + + /* Complete the driver's piece. */ + freqpriv->nvme_buf = NULL; + freqpriv->status = 0; + pnvme_fcreq->done(pnvme_fcreq); +} + +/** + * Driver calls this routine to insert an nvme IO to the driver's + * nvme pending IO list. The IO is pending because the congestion + * management handling could not send it. The caller is expected + * to hold the phba->nvme_pendq_lock. + **/ +static void +lpfc_nvme_insert_pend_io(struct lpfc_hba *phba, + struct nvmefc_fcp_req *pnvme_fcreq, + struct nvme_fc_local_port *pnvme_lport, + struct nvme_fc_remote_port *pnvme_rport, + void *hw_queue_handle, u32 to_head) +{ + struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private; + + lockdep_assert_held(&phba->nvme_pendq_lock); + + freqpriv->pnvme_lport = pnvme_lport; + freqpriv->pnvme_rport = pnvme_rport; + freqpriv->hw_queue_handle = hw_queue_handle; + freqpriv->pnvme_fcreq = pnvme_fcreq; + freqpriv->status = -EBUSY; + + /* Put IO back on Head of the pending queue to + * preserve IO order and not introduce transport + * timing races. + */ + if (!to_head) + list_add_tail(&freqpriv->nvme_pend_list, &phba->nvme_pendq); + else + list_add(&freqpriv->nvme_pend_list, &phba->nvme_pendq); + phba->nvme_pendq_cnt++; +} + +void +lpfc_nvme_issue_pend_io(struct work_struct *work) +{ + struct lpfc_hba *phba = + container_of(work, struct lpfc_hba, nvme_defer_work); + struct lpfc_nvme_fcpreq_priv *freqpriv; + unsigned long iflag = 0; + int ret = 0; + + while (atomic_read(&phba->nvme_abts_cnt) != 0) { + spin_lock_irqsave(&phba->nvme_pendq_lock, iflag); + list_remove_head(&phba->nvme_abts_pendq, freqpriv, + struct lpfc_nvme_fcpreq_priv, + nvme_abts_pend_list); + spin_unlock_irqrestore(&phba->nvme_pendq_lock, iflag); + + lpfc_nvme_complete_pend_io(phba, freqpriv->pnvme_fcreq); + atomic_dec(&phba->nvme_abts_cnt); + } + + spin_lock_irqsave(&phba->nvme_pendq_lock, iflag); + while (!list_empty(&phba->nvme_pendq)) { + list_remove_head(&phba->nvme_pendq, freqpriv, + struct lpfc_nvme_fcpreq_priv, nvme_pend_list); + phba->nvme_pendq_cnt--; + + /* Set the status to distinguish an IO the transport issued + * versus a driver retry. + */ + freqpriv->status = -EAGAIN; + + spin_unlock_irqrestore(&phba->nvme_pendq_lock, iflag); + ret = lpfc_nvme_fcp_io_submit(freqpriv->pnvme_lport, + freqpriv->pnvme_rport, + freqpriv->hw_queue_handle, + freqpriv->pnvme_fcreq); + + /* If the driver returns a failure code, then the IO has to be + * be completed as there no retry scheduled. Otherwise, the + * IO was either send or rescheduled. + */ + if (ret) + lpfc_nvme_complete_pend_io(phba, freqpriv->pnvme_fcreq); + + spin_lock_irqsave(&phba->nvme_pendq_lock, iflag); + + /* If we hit our BW threshold, no sense in continuing */ + if (freqpriv->status == -EBUSY) + break; + freqpriv->status = 0; + } + spin_unlock_irqrestore(&phba->nvme_pendq_lock, iflag); } /** @@ -1791,7 +1984,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct lpfc_iocbq *abts_buf; struct lpfc_iocbq *nvmereq_wqe; struct lpfc_nvme_fcpreq_priv *freqpriv; - union lpfc_wqe128 *abts_wqe; unsigned long flags; int ret_val; @@ -1828,11 +2020,26 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* If the hba is getting reset, this flag is set. It is * cleared when the reset is complete and rings reestablished. */ + spin_lock_irqsave(&phba->nvme_pendq_lock, flags); + if ((freqpriv->status == -EAGAIN) || + (freqpriv->status == -EBUSY)) { + /* Move the pending IO to the abts list*/ + list_del_init(&freqpriv->nvme_pend_list); + phba->nvme_pendq_cnt--; + + list_add_tail(&freqpriv->nvme_abts_pend_list, + &phba->nvme_abts_pendq); + atomic_inc(&phba->nvme_abts_cnt); + spin_unlock_irqrestore(&phba->nvme_pendq_lock, flags); + queue_work(phba->nvme_io, &phba->nvme_defer_work); + return; + } + spin_unlock_irqrestore(&phba->nvme_pendq_lock, flags); spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, flags); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6139 Driver in reset cleanup - flushing " "NVME Req now. hba_flag x%x\n", phba->hba_flag); @@ -1842,13 +2049,13 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, lpfc_nbuf = freqpriv->nvme_buf; if (!lpfc_nbuf) { spin_unlock_irqrestore(&phba->hbalock, flags); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, - "6140 NVME IO req has no matching lpfc nvme " - "io buffer. Skipping abort req.\n"); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6140 NVME IO req %p has no matching lpfc nvme " + "io buffer. Skipping abort req.\n", freqpriv); return; } else if (!lpfc_nbuf->nvmeCmd) { spin_unlock_irqrestore(&phba->hbalock, flags); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6141 lpfc NVME IO req has no nvme_fcreq " "io buffer. Skipping abort req.\n"); return; @@ -1866,7 +2073,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, * has not seen it yet. */ if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6143 NVME req mismatch: " "lpfc_nbuf x%px nvmeCmd x%px, " "pnvme_fcreq x%px. Skipping Abort xri x%x\n", @@ -1877,7 +2084,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Don't abort IOs no longer on the pending queue. */ if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6142 NVME IO req x%px not queued - skipping " "abort req xri x%x\n", pnvme_fcreq, nvmereq_wqe->sli4_xritag); @@ -1891,7 +2098,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Outstanding abort is in progress */ if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6144 Outstanding NVME I/O Abort Request " "still pending on nvme_fcreq x%px, " "lpfc_ncmd %px xri x%x\n", @@ -1902,7 +2109,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, abts_buf = __lpfc_sli_get_iocbq(phba); if (!abts_buf) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6136 No available abort wqes. Skipping " "Abts req for nvme_fcreq x%px xri x%x\n", pnvme_fcreq, nvmereq_wqe->sli4_xritag); @@ -1912,37 +2119,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Ready - mark outstanding as aborted by driver. */ nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; - /* Complete prepping the abort wqe and issue to the FW. */ - abts_wqe = &abts_buf->wqe; - - /* WQEs are reused. Clear stale data and set key fields to - * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. - */ - memset(abts_wqe, 0, sizeof(*abts_wqe)); - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); - - /* word 7 */ - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); - bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, - nvmereq_wqe->iocb.ulpClass); - - /* word 8 - tell the FW to abort the IO associated with this - * outstanding exchange ID. - */ - abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag; - - /* word 9 - this is the iotag for the abts_wqe completion. */ - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, - abts_buf->iotag); - - /* word 10 */ - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); - - /* word 11 */ - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abts_buf->iocb_flag |= LPFC_IO_NVME; @@ -1952,8 +2129,12 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf); spin_unlock(&lpfc_nbuf->buf_lock); spin_unlock_irqrestore(&phba->hbalock, flags); + + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + if (ret_val) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6137 Failed abts issue_wqe with status x%x " "for nvme_fcreq x%px.\n", ret_val, pnvme_fcreq); @@ -1976,8 +2157,6 @@ out_unlock: /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { - .module = THIS_MODULE, - /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, @@ -2037,6 +2216,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, lpfc_ncmd->start_time = jiffies; lpfc_ncmd->flags = 0; + /* Rsp SGE will be filled in when we rcv an IO * from the NVME Layer to be sent. * The cmd is going to be embedded so we need a SKIP SGE. @@ -2054,7 +2234,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, atomic_inc(&ndlp->cmd_pending); lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; } - } else { qp = &phba->sli4_hba.hdwq[idx]; qp->empty_io_bufs++; @@ -2086,7 +2265,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; qp = lpfc_ncmd->hdwq; - if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { + if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6310 XB release deferred for " "ox_id x%x on reqtag x%x\n", @@ -2101,6 +2280,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) } else lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); } +#endif /** * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. @@ -2122,6 +2302,7 @@ int lpfc_nvme_create_localport(struct lpfc_vport *vport) { int ret = 0; +#ifdef BUILD_NVME_FC struct lpfc_hba *phba = vport->phba; struct nvme_fc_port_info nfcp_info; struct nvme_fc_local_port *localport; @@ -2141,20 +2322,14 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) */ lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; - /* Advertise how many hw queues we support based on fcp_io_sched */ - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) - lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; - else - lpfc_nvme_template.max_hw_queues = - phba->sli4_hba.num_present_cpu; - - if (!IS_ENABLED(CONFIG_NVME_FC)) - return ret; + /* Advertise how many hw queues we support based on cfg_hdw_queue, + * which will not exceed cpu count. + */ + lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; /* localport is allocated from the stack, but the registration * call allocates heap memory as well as the private area. */ - ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, &vport->phba->pcidev->dev, &localport); if (!ret) { @@ -2171,6 +2346,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) vport->localport = localport; lport->vport = vport; vport->nvmei_support = 1; + INIT_LIST_HEAD(&lport->qhandle_list); + spin_lock_init(&lport->qhandle_list_lock); atomic_set(&lport->xmt_fcp_noxri, 0); atomic_set(&lport->xmt_fcp_bad_ndlp, 0); @@ -2187,10 +2364,14 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) atomic_set(&lport->fc4NvmeLsRequests, 0); atomic_set(&lport->fc4NvmeLsCmpls, 0); } +#else + ret = -ENOMEM; +#endif return ret; } +#if defined(BUILD_NVME) #if (IS_ENABLED(CONFIG_NVME_FC)) /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. * @@ -2211,6 +2392,8 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, int ret, i, pending = 0; struct lpfc_sli_ring *pring; struct lpfc_hba *phba = vport->phba; + struct lpfc_sli4_hdw_queue *qp; + int abts_scsi, abts_nvme; /* Host transport has to clean up and confirm requiring an indefinite * wait. Print a message if a 10 second wait expires and renew the @@ -2221,17 +2404,23 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); if (unlikely(!ret)) { pending = 0; + abts_scsi = 0; + abts_nvme = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].io_wq->pring; + qp = &phba->sli4_hba.hdwq[i]; + pring = qp->io_wq->pring; if (!pring) continue; - if (pring->txcmplq_cnt) - pending += pring->txcmplq_cnt; + pending += pring->txcmplq_cnt; + abts_scsi += qp->abts_scsi_io_bufs; + abts_nvme += qp->abts_nvme_io_bufs; } - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6176 Lport x%px Localport x%px wait " - "timed out. Pending %d. Renewing.\n", - lport, vport->localport, pending); + "timed out. Pending %d [%d:%d]. " + "Renewing.\n", + lport, vport->localport, pending, + abts_scsi, abts_nvme); continue; } break; @@ -2241,6 +2430,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, lport, vport->localport); } #endif +#endif /** * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. @@ -2255,7 +2445,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, void lpfc_nvme_destroy_localport(struct lpfc_vport *vport) { -#if (IS_ENABLED(CONFIG_NVME_FC)) +#ifdef BUILD_NVME_FC struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; int ret; @@ -2305,7 +2495,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) void lpfc_nvme_update_localport(struct lpfc_vport *vport) { -#if (IS_ENABLED(CONFIG_NVME_FC)) +#ifdef BUILD_NVME_FC struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; @@ -2341,7 +2531,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport) int lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { -#if (IS_ENABLED(CONFIG_NVME_FC)) +#ifdef BUILD_NVME_FC int ret = 0; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; @@ -2350,6 +2540,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct nvme_fc_remote_port *remote_port; struct nvme_fc_port_info rpinfo; struct lpfc_nodelist *prev_ndlp = NULL; + struct fc_rport *srport = ndlp->rport; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, "6006 Register NVME PORT. DID x%06x nlptype x%x\n", @@ -2379,6 +2570,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + if (srport) + rpinfo.dev_loss_tmo = srport->dev_loss_tmo; + else + rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; spin_lock_irq(&vport->phba->hbalock); oldrport = lpfc_ndlp_get_nrport(ndlp); @@ -2404,39 +2599,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_unlock_irq(&vport->phba->hbalock); rport = remote_port->private; if (oldrport) { - /* New remoteport record does not guarantee valid - * host private memory area. - */ - if (oldrport == remote_port->private) { - /* Same remoteport - ndlp should match. - * Just reuse. - */ - lpfc_printf_vlog(ndlp->vport, KERN_INFO, - LOG_NVME_DISC, - "6014 Rebind lport to current " - "remoteport x%px wwpn 0x%llx, " - "Data: x%x x%x x%px x%px x%x " - " x%06x\n", - remote_port, - remote_port->port_name, - remote_port->port_id, - remote_port->port_role, - oldrport->ndlp, - ndlp, - ndlp->nlp_type, - ndlp->nlp_DID); - - /* It's a complete rebind only if the driver - * is registering with the same ndlp. Otherwise - * the driver likely executed a node swap - * prior to this registration and the ndlp to - * remoteport binding needs to be redone. - */ - if (prev_ndlp == ndlp) - return 0; - - } - /* Sever the ndlp<->rport association * before dropping the ndlp ref from * register. @@ -2471,14 +2633,14 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) "6022 Bind lport x%px to remoteport x%px " "rport x%px WWNN 0x%llx, " "Rport WWPN 0x%llx DID " - "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", + "x%06x Role x%x, ndlp x%px prev_ndlp x%px\n", lport, remote_port, rport, rpinfo.node_name, rpinfo.port_name, rpinfo.port_id, rpinfo.port_role, ndlp, prev_ndlp); } else { lpfc_printf_vlog(vport, KERN_ERR, - LOG_NVME_DISC | LOG_NODE, + LOG_TRACE_EVENT, "6031 RemotePort Registration failed " "err: %d, DID x%06x\n", ret, ndlp->nlp_DID); @@ -2490,8 +2652,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) #endif } -/** - * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport +/* lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport * * If the ndlp represents an NVME Target, that we are logged into, * ping the NVME FC Transport layer to initiate a device rescan @@ -2500,7 +2661,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) void lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { -#if (IS_ENABLED(CONFIG_NVME_FC)) +#ifdef BUILD_NVME_FC struct lpfc_nvme_rport *nrport; struct nvme_fc_remote_port *remoteport = NULL; @@ -2519,12 +2680,12 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!nrport || !remoteport) goto rescan_exit; - /* Only rescan if we are an NVME target in the MAPPED state */ - if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && - ndlp->nlp_state == NLP_STE_MAPPED_NODE) { + /* Rescan an NVME target in MAPPED state with DISCOVERY role set */ + if ((remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) && + (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { nvme_fc_rescan_remoteport(remoteport); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6172 NVME rescanned DID x%06x " "port_state x%x\n", ndlp->nlp_DID, remoteport->port_state); @@ -2552,7 +2713,7 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) void lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { -#if (IS_ENABLED(CONFIG_NVME_FC)) +#ifdef BUILD_NVME_FC int ret; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; @@ -2607,7 +2768,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ret = nvme_fc_unregister_remoteport(remoteport); if (ret != 0) { lpfc_nlp_put(ndlp); - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6167 NVME unregister failed %d " "port_state x%x\n", ret, remoteport->port_state); @@ -2617,7 +2778,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) input_err: #endif - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6168 State error: lport x%px, rport x%px FCID x%06x\n", vport->localport, ndlp->rport, ndlp->nlp_DID); } @@ -2637,16 +2798,16 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri, struct lpfc_io_buf *lpfc_ncmd) { +#ifdef BUILD_NVME_FC uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); struct nvmefc_fcp_req *nvme_cmd = NULL; struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; - if (ndlp) lpfc_sli4_abts_err_handler(phba, ndlp, axri); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6311 nvme_cmd %p xri x%x tag x%x abort complete and " + "6311 nvme_cmd x%px xri x%x tag x%x abort complete and " "xri released\n", lpfc_ncmd->nvmeCmd, xri, lpfc_ncmd->cur_iocbq.iotag); @@ -2661,6 +2822,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, lpfc_ncmd->nvmeCmd = NULL; } lpfc_release_nvme_buf(phba, lpfc_ncmd); +#endif } /** @@ -2702,7 +2864,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) * dump a message. Something is wrong. */ if ((wait_cnt % 1000) == 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6178 NVME IO not empty, " "cnt %d\n", wait_cnt); } @@ -2711,14 +2873,17 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) } void -lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn) +lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, + uint32_t stat, uint32_t param) { -#if (IS_ENABLED(CONFIG_NVME_FC)) +#ifdef BUILD_NVME_FC struct lpfc_io_buf *lpfc_ncmd; struct nvmefc_fcp_req *nCmd; - struct lpfc_nvme_fcpreq_priv *freqpriv; + struct lpfc_wcqe_complete wcqe; + struct lpfc_wcqe_complete *wcqep = &wcqe; - if (!pwqeIn->context1) { + lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1; + if (!lpfc_ncmd) { lpfc_sli_release_iocbq(phba, pwqeIn); return; } @@ -2728,31 +2893,30 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn) lpfc_sli_release_iocbq(phba, pwqeIn); return; } - lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1; spin_lock(&lpfc_ncmd->buf_lock); - if (!lpfc_ncmd->nvmeCmd) { + nCmd = lpfc_ncmd->nvmeCmd; + if (!nCmd) { spin_unlock(&lpfc_ncmd->buf_lock); lpfc_release_nvme_buf(phba, lpfc_ncmd); return; } + spin_unlock(&lpfc_ncmd->buf_lock); - nCmd = lpfc_ncmd->nvmeCmd; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6194 NVME Cancel xri %x\n", lpfc_ncmd->cur_iocbq.sli4_xritag); - nCmd->transferred_length = 0; - nCmd->rcv_rsplen = 0; - nCmd->status = NVME_SC_INTERNAL; - freqpriv = nCmd->private; - freqpriv->nvme_buf = NULL; - lpfc_ncmd->nvmeCmd = NULL; - - spin_unlock(&lpfc_ncmd->buf_lock); - nCmd->done(nCmd); + wcqep->word0 = 0; + bf_set(lpfc_wcqe_c_status, wcqep, stat); + wcqep->parameter = param; + wcqep->word3 = 0; /* xb is 0 */ /* Call release with XB=1 to queue the IO into the abort list. */ - lpfc_release_nvme_buf(phba, lpfc_ncmd); + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + bf_set(lpfc_wcqe_c_xb, wcqep, 1); + + (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep); #endif } + diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index 593c48ff634e..589192951edd 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -21,7 +21,16 @@ * included with this package. * ********************************************************************/ -#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */ +#if !defined(BUILD_NVME) +#define FC_TYPE_NVME 0x28 +#endif + +#define LPFC_NVME_MIN_SEGS 16 +#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */ +#define LPFC_NVME_MAX_SEGS 510 +#define LPFC_NVMET_MIN_POSTBUF 16 +#define LPFC_NVMET_DEFAULT_POSTBUF 1024 +#define LPFC_NVMET_MAX_POSTBUF 4096 #define LPFC_NVME_ERSP_LEN 0x20 @@ -30,14 +39,12 @@ #define LPFC_NVME_FB_SHIFT 9 #define LPFC_NVME_MAX_FB (1 << 20) /* 1M */ -#define LPFC_MAX_NVME_INFO_TMP_LEN 100 -#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n" - #define lpfc_ndlp_get_nrport(ndlp) \ ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \ ? NULL : ndlp->nrport) struct lpfc_nvme_qhandle { + struct list_head list; /* list entry to maintain qhandle list */ uint32_t index; /* WQ index to use */ uint32_t qidx; /* queue index passed to create */ uint32_t cpu_id; /* current cpu id at time of create */ @@ -62,6 +69,9 @@ struct lpfc_nvme_lport { atomic_t cmpl_fcp_err; atomic_t cmpl_ls_xb; atomic_t cmpl_ls_err; + /* Maintaining qhandle list */ + struct list_head qhandle_list; + spinlock_t qhandle_list_lock; }; struct lpfc_nvme_rport { @@ -73,4 +83,13 @@ struct lpfc_nvme_rport { struct lpfc_nvme_fcpreq_priv { struct lpfc_io_buf *nvme_buf; + + /* Used for pending IO */ + struct list_head nvme_pend_list; + struct list_head nvme_abts_pend_list; + struct nvme_fc_local_port *pnvme_lport; + struct nvme_fc_remote_port *pnvme_rport; + void *hw_queue_handle; + struct nvmefc_fcp_req *pnvme_fcreq; + uint32_t status; }; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 9884228800a5..bb6fe445d022 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channsel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -35,11 +35,14 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> - +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) #include <linux/nvme.h> #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> - +#define BUILD_NVMET_FC +#endif +#endif #include "lpfc_version.h" #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -56,6 +59,7 @@ #include "lpfc_vport.h" #include "lpfc_debugfs.h" +#ifdef BUILD_NVMET_FC static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, struct lpfc_nvmet_rcv_ctx *, dma_addr_t rspbuf, @@ -302,7 +306,11 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe) { struct lpfc_nvmet_tgtport *tgtp; +#if defined(BUILD_NVME_PLUS) + struct nvmefc_ls_rsp *rsp; +#else struct nvmefc_tgt_ls_req *rsp; +#endif struct lpfc_nvmet_rcv_ctx *ctxp; uint32_t status, result; @@ -311,7 +319,7 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ctxp = cmdwqe->context2; if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6410 NVMET LS cmpl state mismatch IO x%x: " "%d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); @@ -351,6 +359,7 @@ out: rsp->done(rsp); kfree(ctxp); } +#endif /** * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context @@ -368,7 +377,7 @@ out: void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) { -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) +#ifdef BUILD_NVMET_FC struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; @@ -378,15 +387,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) int cpu; unsigned long iflag; - if (ctxp->txrdy) { - dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, - ctxp->txrdy_phys); - ctxp->txrdy = NULL; - ctxp->txrdy_phys = 0; - } - if (ctxp->state == LPFC_NVMET_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6411 NVMET free, already free IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } @@ -430,7 +432,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; ctxp->wqeq = NULL; - ctxp->txrdy = NULL; ctxp->offset = 0; ctxp->phba = phba; ctxp->size = size; @@ -466,7 +467,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6181 Unable to queue deferred work " "for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", @@ -500,6 +501,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) #endif } +#ifdef BUILD_NVMET_FC #ifdef CONFIG_SCSI_LPFC_DEBUG_FS static void lpfc_nvmet_ktime(struct lpfc_hba *phba, @@ -695,7 +697,7 @@ out: phba->ktime_status_samples++; } #endif - +#endif /** * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response * @phba: Pointer to HBA context object. @@ -706,6 +708,7 @@ out: * lock held. This function is the completion handler for NVME FCP commands * The function frees memory resources used for the NVME commands. **/ +#ifdef BUILD_NVMET_FC static void lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe) @@ -715,7 +718,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_nvmet_rcv_ctx *ctxp; uint32_t status, result, op, start_clean, logerr; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - uint32_t id; + int id; #endif ctxp = cmdwqe->context2; @@ -822,23 +825,27 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, rsp->done(rsp); } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { id = raw_smp_processor_id(); - if (id < LPFC_CHECK_CPU_CNT) { - if (ctxp->cpu != id) - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, - "6704 CPU Check cmdcmpl: " - "cpu %d expect %d\n", - id, ctxp->cpu); - phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++; - } + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); + if (ctxp->cpu != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6704 CPU Check cmdcmpl: " + "cpu %d expect %d\n", + id, ctxp->cpu); } #endif } +#if defined(BUILD_NVME_PLUS) +static int +lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, + struct nvmefc_ls_rsp *rsp) +#else static int lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_ls_req *rsp) +#endif { struct lpfc_nvmet_rcv_ctx *ctxp = container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); @@ -851,9 +858,6 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, struct ulp_bde64 bpl; int rc; - if (phba->pport->load_flag & FC_UNLOADING) - return -ENODEV; - if (phba->pport->load_flag & FC_UNLOADING) return -ENODEV; @@ -862,7 +866,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) || (ctxp->entry_cnt != 1)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6412 NVMET LS rsp state mismatch " "oxid x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); @@ -874,7 +878,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, rsp->rsplen); if (nvmewqeq == NULL) { atomic_inc(&nvmep->xmt_ls_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6150 LS Drop IO x%x: Prep\n", ctxp->oxid); lpfc_in_buf_free(phba, &nvmebuf->dbuf); @@ -914,7 +918,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, } /* Give back resources */ atomic_inc(&nvmep->xmt_ls_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6151 LS Drop IO x%x: Issue %d\n", ctxp->oxid, rc); @@ -939,11 +943,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, struct lpfc_sli_ring *pring; unsigned long iflags; int rc; - - if (phba->pport->load_flag & FC_UNLOADING) { - rc = -ENODEV; - goto aerr; - } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int id; +#endif if (phba->pport->load_flag & FC_UNLOADING) { rc = -ENODEV; @@ -962,16 +964,14 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - int id = raw_smp_processor_id(); - if (id < LPFC_CHECK_CPU_CNT) { - if (rsp->hwqid != id) - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, - "6705 CPU Check OP: " - "cpu %d expect %d\n", - id, rsp->hwqid); - phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++; - } + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { + id = raw_smp_processor_id(); + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); + if (rsp->hwqid != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6705 CPU Check OP: " + "cpu %d expect %d\n", + id, rsp->hwqid); ctxp->cpu = id; /* Setup cpu for cmpl check */ } #endif @@ -980,7 +980,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || (ctxp->state == LPFC_NVMET_STE_ABORT)) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6102 IO oxid x%x aborted\n", ctxp->oxid); rc = -ENXIO; @@ -990,7 +990,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); if (nvmewqeq == NULL) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6152 FCP Drop IO x%x: Prep\n", ctxp->oxid); rc = -ENXIO; @@ -1038,7 +1038,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, /* Give back resources */ atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6153 FCP Drop IO x%x: Issue: %d\n", ctxp->oxid, rc); @@ -1071,9 +1071,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, struct lpfc_queue *wq; unsigned long flags; - if (phba->pport->load_flag & FC_UNLOADING) - return; - if (phba->pport->load_flag & FC_UNLOADING) return; @@ -1141,7 +1138,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, ctxp->flag, ctxp->oxid); else if (ctxp->state != LPFC_NVMET_STE_DONE && ctxp->state != LPFC_NVMET_STE_ABORT) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6413 NVMET release bad state %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); @@ -1188,7 +1185,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, return; } - tgtp = phba->targetport->private; + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (tgtp) atomic_inc(&tgtp->rcv_fcp_cmd_defer); @@ -1199,6 +1196,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } +#ifdef OOB_FC_TRANSPORT static void lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) { @@ -1210,10 +1208,11 @@ lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) phba = tgtp->phba; rc = lpfc_issue_els_rscn(phba->pport, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6420 NVMET subsystem change: Notification %s\n", (rc) ? "Failed" : "Sent"); } +#endif static struct nvmet_fc_target_template lpfc_tgttemplate = { .targetport_delete = lpfc_nvmet_targetport_delete, @@ -1222,7 +1221,9 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .fcp_abort = lpfc_nvmet_xmt_fcp_abort, .fcp_req_release = lpfc_nvmet_xmt_fcp_release, .defer_rcv = lpfc_nvmet_defer_rcv, +#ifdef OOB_FC_TRANSPORT .discovery_event = lpfc_nvmet_discovery_event, +#endif .max_hw_queues = 1, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, @@ -1304,7 +1305,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); if (!phba->sli4_hba.nvmet_ctx_info) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6419 Failed allocate memory for " "nvmet context lists\n"); return -ENOMEM; @@ -1362,7 +1363,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); if (!ctx_buf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6404 Ran out of memory for NVMET\n"); return -ENOMEM; } @@ -1371,7 +1372,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) GFP_KERNEL); if (!ctx_buf->context) { kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6405 Ran out of NVMET " "context memory\n"); return -ENOMEM; @@ -1383,7 +1384,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) if (!ctx_buf->iocbq) { kfree(ctx_buf->context); kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6406 Ran out of NVMET iocb/WQEs\n"); return -ENOMEM; } @@ -1402,7 +1403,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } @@ -1444,14 +1445,16 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) } return 0; } +#endif int lpfc_nvmet_create_targetport(struct lpfc_hba *phba) { + int error = 0; +#ifdef BUILD_NVMET_FC struct lpfc_vport *vport = phba->pport; struct lpfc_nvmet_tgtport *tgtp; struct nvmet_fc_port_info pinfo; - int error; if (phba->targetport) return 0; @@ -1473,15 +1476,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, &phba->pcidev->dev, &phba->targetport); -#else - error = -ENOENT; -#endif if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6025 Cannot register NVME targetport x%x: " "portnm %llx nodenm %llx segs %d qs %d\n", error, @@ -1543,12 +1542,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) atomic_set(&tgtp->defer_fod, 0); atomic_set(&tgtp->defer_wqfull, 0); } +#else + error = -ENOMEM; +#endif return error; } int lpfc_nvmet_update_targetport(struct lpfc_hba *phba) { +#ifdef BUILD_NVMET_FC struct lpfc_vport *vport = phba->pport; if (!phba->targetport) @@ -1559,355 +1562,14 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba) phba->targetport, vport->fc_myDID); phba->targetport->port_id = vport->fc_myDID; - return 0; -} - -/** - * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort - * @phba: pointer to lpfc hba data structure. - * @axri: pointer to the nvmet xri abort wcqe structure. - * - * This routine is invoked by the worker thread to process a SLI4 fast-path - * NVMET aborted xri. - **/ -void -lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri) -{ -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); - uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); - struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; - struct lpfc_nvmet_tgtport *tgtp; - struct nvmefc_tgt_fcp_req *req = NULL; - struct lpfc_nodelist *ndlp; - unsigned long iflag = 0; - int rrq_empty = 0; - bool released = false; - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); - - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return; - - if (phba->targetport) { - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe); - } - - spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - list_for_each_entry_safe(ctxp, next_ctxp, - &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, - list) { - if (ctxp->ctxbuf->sglq->sli4_xritag != xri) - continue; - - spin_lock(&ctxp->ctxlock); - /* Check if we already received a free context call - * and we have completed processing an abort situation. - */ - if (ctxp->flag & LPFC_NVMET_CTX_RLS && - !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { - list_del_init(&ctxp->list); - released = true; - } - ctxp->flag &= ~LPFC_NVMET_XBUSY; - spin_unlock(&ctxp->ctxlock); - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - - rrq_empty = list_empty(&phba->active_rrq_list); - spin_unlock_irqrestore(&phba->hbalock, iflag); - ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); - if (ndlp && NLP_CHK_NODE_ACT(ndlp) && - (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || - ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { - lpfc_set_rrq_active(phba, ndlp, - ctxp->ctxbuf->sglq->sli4_lxritag, - rxid, 1); - lpfc_sli4_abts_err_handler(phba, ndlp, axri); - } - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6318 XB aborted oxid x%x flg x%x (%x)\n", - ctxp->oxid, ctxp->flag, released); - if (released) - lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); - - if (rrq_empty) - lpfc_worker_wake_up(phba); - return; - } - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - - ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); - if (ctxp) { - /* - * Abort already done by FW, so BA_ACC sent. - * However, the transport may be unaware. - */ - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6323 NVMET Rcv ABTS xri x%x ctxp state x%x " - "flag x%x oxid x%x rxid x%x\n", - xri, ctxp->state, ctxp->flag, ctxp->oxid, - rxid); - - spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->flag |= LPFC_NVMET_ABTS_RCV; - ctxp->state = LPFC_NVMET_STE_ABORT; - spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - - lpfc_nvmeio_data(phba, - "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", - xri, raw_smp_processor_id(), 0); - - req = &ctxp->ctx.fcp_req; - if (req) - nvmet_fc_rcv_fcp_abort(phba->targetport, req); - } -#endif -} - -int -lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, - struct fc_frame_header *fc_hdr) -{ -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - struct lpfc_hba *phba = vport->phba; - struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; - struct nvmefc_tgt_fcp_req *rsp; - uint32_t sid; - uint16_t oxid, xri; - unsigned long iflag = 0; - - sid = sli4_sid_from_fc_hdr(fc_hdr); - oxid = be16_to_cpu(fc_hdr->fh_ox_id); - - spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - list_for_each_entry_safe(ctxp, next_ctxp, - &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, - list) { - if (ctxp->oxid != oxid || ctxp->sid != sid) - continue; - - xri = ctxp->ctxbuf->sglq->sli4_xritag; - - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - - spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->flag |= LPFC_NVMET_ABTS_RCV; - spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - - lpfc_nvmeio_data(phba, - "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", - xri, raw_smp_processor_id(), 0); - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); - - rsp = &ctxp->ctx.fcp_req; - nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); - - /* Respond with BA_ACC accordingly */ - lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); - return 0; - } - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - - /* check the wait list */ - if (phba->sli4_hba.nvmet_io_wait_cnt) { - struct rqb_dmabuf *nvmebuf; - struct fc_frame_header *fc_hdr_tmp; - u32 sid_tmp; - u16 oxid_tmp; - bool found = false; - - spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); - - /* match by oxid and s_id */ - list_for_each_entry(nvmebuf, - &phba->sli4_hba.lpfc_nvmet_io_wait_list, - hbuf.list) { - fc_hdr_tmp = (struct fc_frame_header *) - (nvmebuf->hbuf.virt); - oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id); - sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp); - if (oxid_tmp != oxid || sid_tmp != sid) - continue; - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6321 NVMET Rcv ABTS oxid x%x from x%x " - "is waiting for a ctxp\n", - oxid, sid); - - list_del_init(&nvmebuf->hbuf.list); - phba->sli4_hba.nvmet_io_wait_cnt--; - found = true; - break; - } - spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, - iflag); - - /* free buffer since already posted a new DMA buffer to RQ */ - if (found) { - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); - /* Respond with BA_ACC accordingly */ - lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); - return 0; - } - } - - /* check active list */ - ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); - if (ctxp) { - xri = ctxp->ctxbuf->sglq->sli4_xritag; - - spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP); - spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - - lpfc_nvmeio_data(phba, - "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", - xri, raw_smp_processor_id(), 0); - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x " - "flag x%x state x%x\n", - ctxp->oxid, xri, ctxp->flag, ctxp->state); - - if (ctxp->flag & LPFC_NVMET_TNOTIFY) { - /* Notify the transport */ - nvmet_fc_rcv_fcp_abort(phba->targetport, - &ctxp->ctx.fcp_req); - } else { - cancel_work_sync(&ctxp->ctxbuf->defer_work); - spin_lock_irqsave(&ctxp->ctxlock, iflag); - lpfc_nvmet_defer_release(phba, ctxp); - spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - } - lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, - ctxp->oxid); - - lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); - return 0; - } - - lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n", - oxid, raw_smp_processor_id(), 1); - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid); - - /* Respond with BA_RJT accordingly */ - lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); #endif return 0; } -static void -lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, - struct lpfc_nvmet_rcv_ctx *ctxp) -{ - struct lpfc_sli_ring *pring; - struct lpfc_iocbq *nvmewqeq; - struct lpfc_iocbq *next_nvmewqeq; - unsigned long iflags; - struct lpfc_wcqe_complete wcqe; - struct lpfc_wcqe_complete *wcqep; - - pring = wq->pring; - wcqep = &wcqe; - - /* Fake an ABORT error code back to cmpl routine */ - memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete)); - bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT); - wcqep->parameter = IOERR_ABORT_REQUESTED; - - spin_lock_irqsave(&pring->ring_lock, iflags); - list_for_each_entry_safe(nvmewqeq, next_nvmewqeq, - &wq->wqfull_list, list) { - if (ctxp) { - /* Checking for a specific IO to flush */ - if (nvmewqeq->context2 == ctxp) { - list_del(&nvmewqeq->list); - spin_unlock_irqrestore(&pring->ring_lock, - iflags); - lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, - wcqep); - return; - } - continue; - } else { - /* Flush all IOs */ - list_del(&nvmewqeq->list); - spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep); - spin_lock_irqsave(&pring->ring_lock, iflags); - } - } - if (!ctxp) - wq->q_flag &= ~HBA_NVMET_WQFULL; - spin_unlock_irqrestore(&pring->ring_lock, iflags); -} - -void -lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, - struct lpfc_queue *wq) -{ -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - struct lpfc_sli_ring *pring; - struct lpfc_iocbq *nvmewqeq; - struct lpfc_nvmet_rcv_ctx *ctxp; - unsigned long iflags; - int rc; - - /* - * Some WQE slots are available, so try to re-issue anything - * on the WQ wqfull_list. - */ - pring = wq->pring; - spin_lock_irqsave(&pring->ring_lock, iflags); - while (!list_empty(&wq->wqfull_list)) { - list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, - list); - spin_unlock_irqrestore(&pring->ring_lock, iflags); - ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2; - rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); - spin_lock_irqsave(&pring->ring_lock, iflags); - if (rc == -EBUSY) { - /* WQ was full again, so put it back on the list */ - list_add(&nvmewqeq->list, &wq->wqfull_list); - spin_unlock_irqrestore(&pring->ring_lock, iflags); - return; - } - if (rc == WQE_SUCCESS) { -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (ctxp->ts_cmd_nvme) { - if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP) - ctxp->ts_status_wqput = ktime_get_ns(); - else - ctxp->ts_data_wqput = ktime_get_ns(); - } -#endif - } else { - WARN_ON(rc); - } - } - wq->q_flag &= ~HBA_NVMET_WQFULL; - spin_unlock_irqrestore(&pring->ring_lock, iflags); - -#endif -} - void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) { -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) +#ifdef BUILD_NVMET_FC struct lpfc_nvmet_tgtport *tgtp; struct lpfc_queue *wq; uint32_t qidx; @@ -1923,9 +1585,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6179 Unreg targetport x%px timeout " "reached.\n", phba->targetport); lpfc_nvmet_cleanup_io_context(phba); @@ -1951,19 +1613,16 @@ static void lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct hbq_dmabuf *nvmebuf) { -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) +#ifdef BUILD_NVMET_FC struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; struct lpfc_nvmet_rcv_ctx *ctxp; uint32_t *payload; uint32_t size, oxid, sid, rc; - fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); - oxid = be16_to_cpu(fc_hdr->fh_ox_id); - - if (!phba->targetport) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6154 LS Drop IO x%x\n", oxid); + if (!nvmebuf || !phba->targetport) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6154 LS Drop IO\n"); oxid = 0; size = 0; sid = 0; @@ -1971,6 +1630,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, goto dropit; } + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; payload = (uint32_t *)(nvmebuf->dbuf.virt); size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); @@ -1979,14 +1641,15 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); if (ctxp == NULL) { atomic_inc(&tgtp->rcv_ls_req_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6155 LS Drop IO x%x: Alloc\n", oxid); dropit: lpfc_nvmeio_data(phba, "NVMET LS DROP: " "xri x%x sz %d from %06x\n", oxid, size, sid); - lpfc_in_buf_free(phba, &nvmebuf->dbuf); + if (nvmebuf) + lpfc_in_buf_free(phba, &nvmebuf->dbuf); return; } ctxp->phba = phba; @@ -2007,8 +1670,13 @@ dropit: * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. */ atomic_inc(&tgtp->rcv_ls_req_in); +#if defined(BUILD_NVME_PLUS) + rc = nvmet_fc_rcv_ls_req(phba->targetport, NULL, &ctxp->ctx.ls_req, + payload, size); +#else rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, payload, size); +#endif lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " @@ -2025,139 +1693,24 @@ dropit: oxid, size, sid); atomic_inc(&tgtp->rcv_ls_req_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", ctxp->oxid, rc); /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ - lpfc_in_buf_free(phba, &nvmebuf->dbuf); + if (nvmebuf) + lpfc_in_buf_free(phba, &nvmebuf->dbuf); atomic_inc(&tgtp->xmt_ls_abort); lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); #endif } -static void -lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) -{ -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; - struct lpfc_hba *phba = ctxp->phba; - struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; - struct lpfc_nvmet_tgtport *tgtp; - uint32_t *payload, qno; - uint32_t rc; - unsigned long iflags; - - if (!nvmebuf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6159 process_rcv_fcp_req, nvmebuf is NULL, " - "oxid: x%x flg: x%x state: x%x\n", - ctxp->oxid, ctxp->flag, ctxp->state); - spin_lock_irqsave(&ctxp->ctxlock, iflags); - lpfc_nvmet_defer_release(phba, ctxp); - spin_unlock_irqrestore(&ctxp->ctxlock, iflags); - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, - ctxp->oxid); - return; - } - - if (ctxp->flag & LPFC_NVMET_ABTS_RCV) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6324 IO oxid x%x aborted\n", - ctxp->oxid); - return; - } - - payload = (uint32_t *)(nvmebuf->dbuf.virt); - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - ctxp->flag |= LPFC_NVMET_TNOTIFY; -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (ctxp->ts_isr_cmd) - ctxp->ts_cmd_nvme = ktime_get_ns(); -#endif - /* - * The calling sequence should be: - * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done - * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. - * When we return from nvmet_fc_rcv_fcp_req, all relevant info - * the NVME command / FC header is stored. - * A buffer has already been reposted for this IO, so just free - * the nvmebuf. - */ - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, - payload, ctxp->size); - /* Process FCP command */ - if (rc == 0) { - atomic_inc(&tgtp->rcv_fcp_cmd_out); - spin_lock_irqsave(&ctxp->ctxlock, iflags); - if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) || - (nvmebuf != ctxp->rqb_buffer)) { - spin_unlock_irqrestore(&ctxp->ctxlock, iflags); - return; - } - ctxp->rqb_buffer = NULL; - spin_unlock_irqrestore(&ctxp->ctxlock, iflags); - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ - return; - } - - /* Processing of FCP command is deferred */ - if (rc == -EOVERFLOW) { - lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " - "from %06x\n", - ctxp->oxid, ctxp->size, ctxp->sid); - atomic_inc(&tgtp->rcv_fcp_cmd_out); - atomic_inc(&tgtp->defer_fod); - spin_lock_irqsave(&ctxp->ctxlock, iflags); - if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { - spin_unlock_irqrestore(&ctxp->ctxlock, iflags); - return; - } - spin_unlock_irqrestore(&ctxp->ctxlock, iflags); - /* - * Post a replacement DMA buffer to RQ and defer - * freeing rcv buffer till .defer_rcv callback - */ - qno = nvmebuf->idx; - lpfc_post_rq_buffer( - phba, phba->sli4_hba.nvmet_mrq_hdr[qno], - phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); - return; - } - ctxp->flag &= ~LPFC_NVMET_TNOTIFY; - atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", - ctxp->oxid, rc, - atomic_read(&tgtp->rcv_fcp_cmd_in), - atomic_read(&tgtp->rcv_fcp_cmd_out), - atomic_read(&tgtp->xmt_fcp_release)); - lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", - ctxp->oxid, ctxp->size, ctxp->sid); - spin_lock_irqsave(&ctxp->ctxlock, iflags); - lpfc_nvmet_defer_release(phba, ctxp); - spin_unlock_irqrestore(&ctxp->ctxlock, iflags); - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); -#endif -} - -static void -lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) -{ -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) - struct lpfc_nvmet_ctxbuf *ctx_buf = - container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); - - lpfc_nvmet_process_rcv_fcp_req(ctx_buf); -#endif -} - +#ifdef BUILD_NVMET_FC static struct lpfc_nvmet_ctxbuf * lpfc_nvmet_replenish_context(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *current_infop) { -#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; struct lpfc_nvmet_ctx_info *get_infop; int i; @@ -2186,8 +1739,9 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, /* Just take the entire context list, if there are any */ if (get_infop->nvmet_ctx_list_cnt) { - list_splice_init(&get_infop->nvmet_ctx_list, + list_splice(&get_infop->nvmet_ctx_list, ¤t_infop->nvmet_ctx_list); + INIT_LIST_HEAD(&get_infop->nvmet_ctx_list); current_infop->nvmet_ctx_list_cnt = get_infop->nvmet_ctx_list_cnt - 1; get_infop->nvmet_ctx_list_cnt = 0; @@ -2205,10 +1759,10 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, get_infop = get_infop->nvmet_ctx_next_cpu; } -#endif /* Nothing found, all contexts for the MRQ are in-flight */ return NULL; } +#endif /** * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer @@ -2232,6 +1786,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, uint64_t isr_timestamp, uint8_t cqflag) { +#ifdef BUILD_NVMET_FC struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; @@ -2241,12 +1796,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, unsigned long iflag; int current_cpu; - if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) - return; - ctx_buf = NULL; if (!nvmebuf || !phba->targetport) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6157 NVMET FCP Drop IO\n"); if (nvmebuf) lpfc_rq_buf_free(phba, &nvmebuf->hbuf); @@ -2277,15 +1829,13 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, size = nvmebuf->bytes_recv; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { - if (current_cpu < LPFC_CHECK_CPU_CNT) { - if (idx != current_cpu) - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, - "6703 CPU Check rcv: " - "cpu %d expect %d\n", - current_cpu, idx); - phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++; - } + if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) { + this_cpu_inc(phba->sli4_hba.c_stat->rcv_io); + if (idx != current_cpu) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6703 CPU Check rcv: " + "cpu %d expect %d\n", + current_cpu, idx); } #endif @@ -2311,6 +1861,10 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); atomic_inc(&tgtp->defer_ctx); + lpfc_throttle_log(phba, &phba->log, LOG_NVME_IOERR, + "6180 IO waiting, driver buffer pool is " + "empty: oxid x%x queue %d S_ID x%x\n", + oxid, qno, sli4_sid_from_fc_hdr(fc_hdr)); return; } @@ -2321,12 +1875,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); if (ctxp->state != LPFC_NVMET_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6414 NVMET Context corrupt %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); } ctxp->wqeq = NULL; - ctxp->txrdy = NULL; ctxp->offset = 0; ctxp->phba = phba; ctxp->size = size; @@ -2364,7 +1917,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6325 Unable to queue work for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", ctxp->oxid, @@ -2377,6 +1930,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); } +#endif } /** @@ -2401,6 +1955,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, d_buf = piocb->context2; nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3015 LS Drop IO\n"); + return; + } if (phba->nvmet_support == 0) { lpfc_in_buf_free(phba, &nvmebuf->dbuf); return; @@ -2429,10 +1988,17 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint64_t isr_timestamp, uint8_t cqflag) { +#ifdef BUILD_NVMET_FC + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3167 NVMET FCP Drop IO\n"); + return; + } if (phba->nvmet_support == 0) { lpfc_rq_buf_free(phba, &nvmebuf->hbuf); return; } +#endif lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag); } @@ -2461,6 +2027,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, * Pointer to the newly allocated/prepared nvme wqe data structure * NULL - when nvme wqe data structure allocation/preparation failed **/ +#ifdef BUILD_NVMET_FC static struct lpfc_iocbq * lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, @@ -2471,7 +2038,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, union lpfc_wqe128 *wqe; if (!lpfc_is_link_up(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6104 NVMET prep LS wqe: link err: " "NPORT x%x oxid:x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2481,7 +2048,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, /* Allocate buffer for command wqe */ nvmewqe = lpfc_sli_get_iocbq(phba); if (nvmewqe == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6105 NVMET prep LS wqe: No WQE: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2492,7 +2059,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6106 NVMET prep LS wqe: No ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2595,14 +2162,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, struct scatterlist *sgel; union lpfc_wqe128 *wqe; struct ulp_bde64 *bde; - uint32_t *txrdy; dma_addr_t physaddr; - int i, cnt; + int i, cnt, nsegs; int do_pbde; int xc = 1; if (!lpfc_is_link_up(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6107 NVMET prep FCP wqe: link err:" "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2613,7 +2179,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6108 NVMET prep FCP wqe: no ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2621,13 +2187,14 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, } if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6109 NVMET prep FCP wqe: seg cnt err: " "NPORT x%x oxid x%x ste %d cnt %d\n", ctxp->sid, ctxp->oxid, ctxp->state, phba->cfg_nvme_seg_cnt); return NULL; } + nsegs = rsp->sg_cnt; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; nvmewqe = ctxp->wqeq; @@ -2635,7 +2202,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* Allocate buffer for command wqe */ nvmewqe = ctxp->ctxbuf->iocbq; if (nvmewqe == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6110 NVMET prep FCP wqe: No " "WQE: NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2653,7 +2220,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, (ctxp->state == LPFC_NVMET_STE_DATA)) { wqe = &nvmewqe->wqe; } else { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6111 Wrong state NVMET FCP: %d cnt %d\n", ctxp->state, ctxp->entry_cnt); return NULL; @@ -2757,23 +2324,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, &lpfc_treceive_cmd_template.words[3], sizeof(uint32_t) * 9); - /* Words 0 - 2 : The first sg segment */ - txrdy = dma_pool_alloc(phba->txrdy_payload_pool, - GFP_KERNEL, &physaddr); - if (!txrdy) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6041 Bad txrdy buffer: oxid x%x\n", - ctxp->oxid); - return NULL; - } - ctxp->txrdy = txrdy; - ctxp->txrdy_phys = physaddr; - wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; - wqe->fcp_treceive.bde.addrLow = - cpu_to_le32(putPaddrLow(physaddr)); - wqe->fcp_treceive.bde.addrHigh = - cpu_to_le32(putPaddrHigh(physaddr)); + /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */ + wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP; + wqe->fcp_treceive.bde.tus.f.bdeSize = 0; + wqe->fcp_treceive.bde.addrLow = 0; + wqe->fcp_treceive.bde.addrHigh = 0; /* Word 4 */ wqe->fcp_treceive.relative_offset = ctxp->offset; @@ -2808,17 +2363,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; - /* Setup 1 TXRDY and 1 SKIP SGE */ - txrdy[0] = 0; - txrdy[1] = cpu_to_be32(rsp->transfer_length); - txrdy[2] = 0; - - sgl->addr_hi = putPaddrHigh(physaddr); - sgl->addr_lo = putPaddrLow(physaddr); + /* Setup 2 SKIP SGEs */ + sgl->addr_hi = 0; + sgl->addr_lo = 0; sgl->word2 = 0; - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); + sgl->sge_len = 0; sgl++; sgl->addr_hi = 0; sgl->addr_lo = 0; @@ -2883,7 +2434,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, wqe->fcp_trsp.rsvd_12_15[0] = 0; /* Use rspbuf, NOT sg list */ - rsp->sg_cnt = 0; + nsegs = 0; sgl->word2 = 0; atomic_inc(&tgtp->xmt_fcp_rsp); break; @@ -2900,7 +2451,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; nvmewqe->context1 = ndlp; - for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) { + for_each_sg(rsp->sg, sgel, nsegs, i) { physaddr = sg_dma_address(sgel); cnt = sg_dma_len(sgel); sgl->addr_hi = putPaddrHigh(physaddr); @@ -2915,7 +2466,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (i == 0) { bde = (struct ulp_bde64 *)&wqe->words[13]; if (do_pbde) { - /* Words 13-15 (PBDE) */ + /* Words 13-15 (PBDE)*/ bde->addrLow = sgl->addr_lo; bde->addrHigh = sgl->addr_hi; bde->tus.f.bdeSize = @@ -3027,7 +2578,6 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, result = wcqe->parameter; if (!ctxp) { - /* if context is clear, related io alrady complete */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", wcqe->word0, wcqe->total_data_placed, @@ -3042,7 +2592,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, /* Sanity check */ if (ctxp->state != LPFC_NVMET_STE_ABORT) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6112 ABTS Wrong state:%d oxid x%x\n", ctxp->state, ctxp->oxid); } @@ -3114,7 +2664,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, result, wcqe->word3); if (!ctxp) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6415 NVMET LS Abort No ctx: WCQE: " "%08x %08x %08x %08x\n", wcqe->word0, wcqe->total_data_placed, @@ -3125,7 +2675,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6416 NVMET LS abort cmpl state mismatch: " "oxid x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); @@ -3158,7 +2708,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6134 Drop ABTS - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); @@ -3204,7 +2754,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); /* Word 10 */ - bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); + bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 0); bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_LENLOC_WORD12); @@ -3239,9 +2789,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; - union lpfc_wqe128 *abts_wqe; struct lpfc_nodelist *ndlp; unsigned long flags; + u8 opt; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -3255,7 +2805,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6160 Drop ABORT - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); @@ -3271,7 +2821,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, spin_lock_irqsave(&ctxp->ctxlock, flags); if (!ctxp->abort_wqeq) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6161 ABORT failed: No wqeqs: " "xri: x%x\n", ctxp->oxid); /* No failure to an ABTS request. */ @@ -3280,8 +2830,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, return 0; } abts_wqeq = ctxp->abort_wqeq; - abts_wqe = &abts_wqeq->wqe; ctxp->state = LPFC_NVMET_STE_ABORT; + opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0; spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* Announce entry to new IO submit field. */ @@ -3298,7 +2848,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6163 Driver in reset cleanup - flushing " "NVME Req now. hba_flag x%x oxid x%x\n", phba->hba_flag, ctxp->oxid); @@ -3313,7 +2863,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6164 Outstanding NVME I/O Abort Request " "still pending on oxid x%x\n", ctxp->oxid); @@ -3327,35 +2877,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* Ready - mark outstanding as aborted by driver. */ abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; - /* WQEs are reused. Clear stale data and set key fields to - * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. - */ - memset(abts_wqe, 0, sizeof(*abts_wqe)); - - /* word 3 */ - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); - - /* word 7 */ - bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); - - /* word 8 - tell the FW to abort the IO associated with this - * outstanding exchange ID. - */ - abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; - - /* word 9 - this is the iotag for the abts_wqe completion. */ - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, - abts_wqeq->iotag); - - /* word 10 */ - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); - - /* word 11 */ - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; @@ -3379,7 +2901,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, ctxp->flag &= ~LPFC_NVMET_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_sli_release_iocbq(phba, abts_wqeq); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6166 Failed ABORT issue_wqe with status x%x " "for oxid x%x.\n", rc, ctxp->oxid); @@ -3404,7 +2926,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, } if (ctxp->state == LPFC_NVMET_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); rc = WQE_BUSY; @@ -3442,7 +2964,7 @@ aerr: spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6135 Failed to Issue ABTS for oxid x%x. Status x%x " "(%x)\n", ctxp->oxid, rc, released); @@ -3466,7 +2988,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, ctxp->state = LPFC_NVMET_STE_LS_ABORT; ctxp->entry_cnt++; } else { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6418 NVMET LS abort state mismatch " "IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); @@ -3478,7 +3000,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, /* Issue ABTS for this WQE based on iotag */ ctxp->wqeq = lpfc_sli_get_iocbq(phba); if (!ctxp->wqeq) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6068 Abort failed: No wqeqs: " "xri: x%x\n", xri); /* No failure to an ABTS request. */ @@ -3509,7 +3031,476 @@ out: abts_wqeq->context3 = NULL; lpfc_sli_release_iocbq(phba, abts_wqeq); kfree(ctxp); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6056 Failed to Issue ABTS. Status x%x\n", rc); return 0; } +#endif + +/** + * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the nvmet xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * NVMET aborted xri. + **/ +void +lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ +#ifdef BUILD_NVMET_FC + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); + struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; + struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_fcp_req *req = NULL; + struct lpfc_nodelist *ndlp; + unsigned long iflag = 0; + int rrq_empty = 0; + bool released = false; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); + + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + return; + + if (phba->targetport) { + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe); + } + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_for_each_entry_safe(ctxp, next_ctxp, + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + list) { + if (ctxp->ctxbuf->sglq->sli4_xritag != xri) + continue; + + spin_lock(&ctxp->ctxlock); + /* Check if we already received a free context call + * and we have completed processing an abort situation. + */ + if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && + !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { + list_del_init(&ctxp->list); + released = true; + } + ctxp->flag &= ~LPFC_NVMET_XBUSY; + spin_unlock(&ctxp->ctxlock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + + rrq_empty = list_empty(&phba->active_rrq_list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || + (ndlp->nlp_state == NLP_STE_MAPPED_NODE))) { + lpfc_set_rrq_active(phba, ndlp, + ctxp->ctxbuf->sglq->sli4_lxritag, + rxid, 1); + lpfc_sli4_abts_err_handler(phba, ndlp, axri); + } + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6318 XB aborted oxid x%x flg x%x (%x)\n", + ctxp->oxid, ctxp->flag, released); + if (released) + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + + if (rrq_empty) + lpfc_worker_wake_up(phba); + return; + } + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + + ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); + if (ctxp) { + /* + * Abort already done by FW, so BA_ACC sent. + * However, the transport may be unaware. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6323 NVMET Rcv ABTS xri x%x ctxp state x%x " + "flag x%x oxid x%x rxid x%x\n", + xri, ctxp->state, ctxp->flag, ctxp->oxid, + rxid); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVMET_ABTS_RCV; + ctxp->state = LPFC_NVMET_STE_ABORT; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + req = &ctxp->ctx.fcp_req; + if (req) + nvmet_fc_rcv_fcp_abort(phba->targetport, req); + } +#endif +} + +int +lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, + struct fc_frame_header *fc_hdr) +{ +#ifdef BUILD_NVMET_FC +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_hba *phba = vport->phba; + struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; + struct nvmefc_tgt_fcp_req *rsp; + uint32_t sid; + uint16_t oxid, xri; + unsigned long iflag = 0; + + sid = sli4_sid_from_fc_hdr(fc_hdr); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_for_each_entry_safe(ctxp, next_ctxp, + &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + list) { + if (ctxp->oxid != oxid || ctxp->sid != sid) + continue; + + xri = ctxp->ctxbuf->sglq->sli4_xritag; + + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVMET_ABTS_RCV; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6319 NVMET Rcv ABTS:acc xri x%x\n", + xri); + + rsp = &ctxp->ctx.fcp_req; + nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); + + /* Respond with BA_ACC accordingly */ + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* check the wait list */ + if (phba->sli4_hba.nvmet_io_wait_cnt) { + struct rqb_dmabuf *nvmebuf; + struct fc_frame_header *fc_hdr_tmp; + u32 sid_tmp; + u16 oxid_tmp; + bool found = false; + + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + + /* match by oxid and s_id */ + list_for_each_entry(nvmebuf, + &phba->sli4_hba.lpfc_nvmet_io_wait_list, + hbuf.list) { + fc_hdr_tmp = (struct fc_frame_header *) + (nvmebuf->hbuf.virt); + oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id); + sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp); + if (oxid_tmp != oxid || sid_tmp != sid) + continue; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6321 NVMET Rcv ABTS oxid x%x from x%x " + "is waiting for a ctxp\n", + oxid, sid); + + list_del_init(&nvmebuf->hbuf.list); + phba->sli4_hba.nvmet_io_wait_cnt--; + found = true; + break; + } + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, + iflag); + + /* free buffer since already posted a new DMA buffer to RQ */ + if (found) { + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + /* Respond with BA_ACC accordingly */ + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + } + + /* check active list */ + ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); + if (ctxp) { + xri = ctxp->ctxbuf->sglq->sli4_xritag; + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x " + "flag x%x state x%x\n", + ctxp->oxid, xri, ctxp->flag, ctxp->state); + + if (ctxp->flag & LPFC_NVMET_TNOTIFY) { + /* Notify the transport */ + nvmet_fc_rcv_fcp_abort(phba->targetport, + &ctxp->ctx.fcp_req); + } else { + cancel_work_sync(&ctxp->ctxbuf->defer_work); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + } + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + + lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n", + oxid, raw_smp_processor_id(), 1); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid); + + /* Respond with BA_RJT accordingly */ + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); +#endif +#endif + return 0; +} + +#ifdef BUILD_NVMET_FC +static void +lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, + struct lpfc_nvmet_rcv_ctx *ctxp) +{ + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_iocbq *next_nvmewqeq; + unsigned long iflags; + struct lpfc_wcqe_complete wcqe; + struct lpfc_wcqe_complete *wcqep; + + pring = wq->pring; + wcqep = &wcqe; + + /* Fake an ABORT error code back to cmpl routine */ + memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete)); + bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT); + wcqep->parameter = IOERR_ABORT_REQUESTED; + + spin_lock_irqsave(&pring->ring_lock, iflags); + list_for_each_entry_safe(nvmewqeq, next_nvmewqeq, + &wq->wqfull_list, list) { + if (ctxp) { + /* Checking for a specific IO to flush */ + if (nvmewqeq->context2 == ctxp) { + list_del(&nvmewqeq->list); + spin_unlock_irqrestore(&pring->ring_lock, + iflags); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, + wcqep); + return; + } + continue; + } else { + /* Flush all IOs */ + list_del(&nvmewqeq->list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep); + spin_lock_irqsave(&pring->ring_lock, iflags); + } + } + if (!ctxp) + wq->q_flag &= ~HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); +} +#endif + +void +lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, + struct lpfc_queue *wq) +{ +#ifdef BUILD_NVMET_FC +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_nvmet_rcv_ctx *ctxp; + unsigned long iflags; + int rc; + + /* + * Some WQE slots are available, so try to re-issue anything + * on the WQ wqfull_list. + */ + pring = wq->pring; + spin_lock_irqsave(&pring->ring_lock, iflags); + while (!list_empty(&wq->wqfull_list)) { + list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, + list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2; + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); + spin_lock_irqsave(&pring->ring_lock, iflags); + if (rc == -EBUSY) { + /* WQ was full again, so put it back on the list */ + list_add(&nvmewqeq->list, &wq->wqfull_list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return; + } + if (rc == WQE_SUCCESS) { +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_cmd_nvme) { + if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP) + ctxp->ts_status_wqput = ktime_get_ns(); + else + ctxp->ts_data_wqput = ktime_get_ns(); + } +#endif + } else { + WARN_ON(rc); + } + } + wq->q_flag &= ~HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); + +#endif +#endif +} + +#ifdef BUILD_NVMET_FC +static void +lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; + struct lpfc_hba *phba = ctxp->phba; + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t *payload, qno; + uint32_t rc; + unsigned long iflags; + + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6159 process_rcv_fcp_req, nvmebuf is NULL, " + "oxid: x%x flg: x%x state: x%x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + return; + } + + if (ctxp->flag & LPFC_NVMET_ABTS_RCV) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6324 IO oxid x%x aborted\n", + ctxp->oxid); + return; + } + + payload = (uint32_t *)(nvmebuf->dbuf.virt); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + ctxp->flag |= LPFC_NVMET_TNOTIFY; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_isr_cmd) + ctxp->ts_cmd_nvme = ktime_get_ns(); +#endif + /* + * The calling sequence should be: + * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + * When we return from nvmet_fc_rcv_fcp_req, all relevant info + * the NVME command / FC header is stored. + * A buffer has already been reposted for this IO, so just free + * the nvmebuf. + */ + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, + payload, ctxp->size); + /* Process FCP command */ + if (rc == 0) { + atomic_inc(&tgtp->rcv_fcp_cmd_out); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) || + (nvmebuf != ctxp->rqb_buffer)) { + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + return; + } + ctxp->rqb_buffer = NULL; + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ + return; + } + + /* Processing of FCP command is deferred */ + if (rc == -EOVERFLOW) { + lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " + "from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + atomic_inc(&tgtp->rcv_fcp_cmd_out); + atomic_inc(&tgtp->defer_fod); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + return; + } + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + /* + * Post a replacement DMA buffer to RQ and defer + * freeing rcv buffer till .defer_rcv callback + */ + qno = nvmebuf->idx; + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[qno], + phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); + return; + } + ctxp->flag &= ~LPFC_NVMET_TNOTIFY; + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", + ctxp->oxid, rc, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); +#endif +} + +static void +lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_ctxbuf *ctx_buf = + container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); + + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); +#endif +} +#endif + diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 8ff67deac10a..e80f18394d2b 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -21,7 +21,9 @@ * included with this package. * ********************************************************************/ -#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ +#define LPFC_NVMET_MIN_SEGS 16 +#define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */ +#define LPFC_NVMET_MAX_SEGS 510 #define LPFC_NVMET_RQE_MIN_POST 128 #define LPFC_NVMET_RQE_DEF_POST 512 #define LPFC_NVMET_RQE_DEF_COUNT 2048 @@ -90,31 +92,22 @@ struct lpfc_nvmet_tgtport { atomic_t defer_wqfull; }; -struct lpfc_nvmet_ctx_info { - struct list_head nvmet_ctx_list; - spinlock_t nvmet_ctx_list_lock; /* lock per CPU */ - struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu; - struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu; - uint16_t nvmet_ctx_list_cnt; - char pad[16]; /* pad to a cache-line */ -}; - -/* This retrieves the context info associated with the specified cpu / mrq */ -#define lpfc_get_ctx_list(phba, cpu, mrq) \ - (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq)) - struct lpfc_nvmet_rcv_ctx { +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_FC)) union { +#if defined(BUILD_NVME_PLUS) + struct nvmefc_ls_rsp ls_req; +#else struct nvmefc_tgt_ls_req ls_req; +#endif struct nvmefc_tgt_fcp_req fcp_req; } ctx; +#endif struct list_head list; struct lpfc_hba *phba; struct lpfc_iocbq *wqeq; struct lpfc_iocbq *abort_wqeq; - dma_addr_t txrdy_phys; spinlock_t ctxlock; /* protect flag access */ - uint32_t *txrdy; uint32_t sid; uint32_t offset; uint16_t oxid; @@ -158,3 +151,17 @@ struct lpfc_nvmet_rcv_ctx { uint64_t ts_status_nvme; #endif }; + +struct lpfc_nvmet_ctx_info { + struct list_head nvmet_ctx_list; + spinlock_t nvmet_ctx_list_lock; /* lock per CPU */ + struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu; + struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu; + uint16_t nvmet_ctx_list_cnt; + char pad[16]; /* pad to a cache-line */ +}; + +/* This retrieves the context info associated with the specified cpu / mrq */ +#define lpfc_get_ctx_list(phba, cpu, mrq) \ + (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq)) + diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 40706cb842fd..c33f97f8b965 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -23,10 +23,8 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> -#include <linux/t10-pi.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> @@ -80,6 +78,18 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) return (struct lpfc_rport_data *)sdev->hostdata; } +static struct lpfc_external_dif_support * +lpfc_external_dif_match(struct lpfc_vport *vport, uint64_t lun, uint32_t sid); + +static int +lpfc_external_dif(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + struct lpfc_nodelist *ndlp, uint8_t *cdb_ptr); +static void +lpfc_external_dif_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, + struct lpfc_iocbq *pIocbOut); +extern int +lpfc_issue_scsi_inquiry(struct lpfc_vport *, + struct lpfc_nodelist *, uint32_t); static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static void @@ -87,6 +97,33 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); +#ifdef BUILD_ASMIO +static inline unsigned +lpfc_cmd_blksize(struct scsi_cmnd *sc) +{ + return scsi_prot_interval(sc); +} + +#define LPFC_CHECK_PROTECT_GUARD 1 +#define LPFC_CHECK_PROTECT_REF 2 +static inline unsigned +lpfc_cmd_protect(struct scsi_cmnd *sc, int flag) +{ + if (flag == LPFC_CHECK_PROTECT_GUARD) + return (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)); + if (flag == LPFC_CHECK_PROTECT_REF) + return (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)); + return 0; +} + +static inline unsigned +lpfc_cmd_guard_csum(struct scsi_cmnd *sc) +{ + return (scsi_prot_flagged(sc, SCSI_PROT_IP_CHECKSUM)); +} + +#else + static inline unsigned lpfc_cmd_blksize(struct scsi_cmnd *sc) { @@ -110,6 +147,7 @@ lpfc_cmd_guard_csum(struct scsi_cmnd *sc) return 1; return 0; } +#endif /** * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. @@ -134,21 +172,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, /** * lpfc_update_stats - Update statistical data for the command completion - * @phba: Pointer to HBA object. + * @vport: The virtual port on which this call is executing. * @lpfc_cmd: lpfc scsi command object pointer. * * This function is called when there is a command completion and this * function updates the statistical data for the command completion. **/ static void -lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) { + struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; struct scsi_cmnd *cmd = lpfc_cmd->pCmd; unsigned long flags; - struct Scsi_Host *shost = cmd->device->host; - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long latency; int i; @@ -188,6 +226,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) spin_unlock_irqrestore(shost->host_lock, flags); } + /** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. @@ -293,21 +332,40 @@ void lpfc_scsi_dev_block(struct lpfc_hba *phba) { struct lpfc_vport **vports; + struct lpfc_vport *vport; struct Scsi_Host *shost; struct scsi_device *sdev; struct fc_rport *rport; + struct lpfc_external_dif_support *dp; + unsigned long flags; int i; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { - shost = lpfc_shost_from_vport(vports[i]); + vport = vports[i]; + shost = lpfc_shost_from_vport(vport); shost_for_each_device(sdev, shost) { rport = starget_to_rport(scsi_target(sdev)); fc_remote_port_delete(rport); } + + /* + * Cleanup all External DIF paths on this vport. They + * will need to be revalidated if the NPort comes back. + */ + spin_lock_irqsave(&vport->external_dif_lock, flags); + list_for_each_entry(dp, &vport->external_dif_list, + listentry) { + lpfc_edsm_set_state(vport, dp, + LPFC_EDSM_NEEDS_INQUIRY); + } + spin_unlock_irqrestore(&vport->external_dif_lock, + flags); + } lpfc_destroy_vport_work_array(phba, vports); + } /** @@ -366,7 +424,6 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) break; } - /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { @@ -474,6 +531,10 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + /* may be called before queues established if hba_setup fails */ + if (!phba->sli4_hba.hdwq) + return; + spin_lock_irqsave(&phba->hbalock, iflag); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; @@ -481,7 +542,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) spin_lock(&qp->abts_io_buf_list_lock); list_for_each_entry_safe(psb, next_psb, &qp->lpfc_abts_io_buf_list, list) { - if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) + if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) continue; if (psb->rdata && psb->rdata->pnode && @@ -528,13 +589,15 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, list_del_init(&psb->list); psb->flags &= ~LPFC_SBUF_XBUSY; psb->status = IOSTAT_SUCCESS; - if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) { +#if (IS_ENABLED(CONFIG_NVME_FC)) + if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) { qp->abts_nvme_io_bufs--; spin_unlock(&qp->abts_io_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_sli4_nvme_xri_aborted(phba, axri, psb); return; } +#endif qp->abts_scsi_io_bufs--; spin_unlock(&qp->abts_io_buf_list_lock); @@ -641,7 +704,18 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct fcp_cmd_rsp_buf *tmp = NULL; cpu = raw_smp_processor_id(); - if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { + + /* Lookup Hardware Queue index based on fcp_io_sched module parameter + * and if SCSI mq is turned on. + */ +#if (KERNEL_MAJOR > 4) || defined(BUILD_RHEL8) + if (cmnd && phba->cfg_enable_scsi_mq && + (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)) { +#else + if (cmnd && shost_use_blk_mq(cmnd->device->host) && + phba->cfg_enable_scsi_mq && + (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)) { +#endif tag = blk_mq_unique_tag(cmnd->request); idx = blk_mq_unique_tag_to_hwq(tag); } else { @@ -665,14 +739,17 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, lpfc_cmd->timeout = 0; lpfc_cmd->flags = 0; lpfc_cmd->start_time = jiffies; + lpfc_cmd->edifp = NULL; lpfc_cmd->waitq = NULL; lpfc_cmd->cpu = cpu; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS lpfc_cmd->prot_data_type = 0; #endif tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); - if (!tmp) + if (!tmp) { + lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); return NULL; + } lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; lpfc_cmd->fcp_rsp = tmp->fcp_rsp; @@ -719,7 +796,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, iocb->ulpLe = 1; iocb->ulpClass = CLASS3; - if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { atomic_inc(&ndlp->cmd_pending); lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; } @@ -756,6 +833,7 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { unsigned long iflag = 0; + psb->edifp = NULL; psb->seg_cnt = 0; psb->prot_seg_cnt = 0; @@ -782,6 +860,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; + psb->edifp = NULL; psb->seg_cnt = 0; psb->prot_seg_cnt = 0; @@ -811,10 +890,29 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) atomic_dec(&psb->ndlp->cmd_pending); - psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; + psb->flags &= ~(LPFC_SBUF_NORMAL_DIF | LPFC_SBUF_PASS_DIF | + LPFC_SBUF_BUMP_QDEPTH); phba->lpfc_release_scsi_buf(phba, psb); } +/** + * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB + * @data: A pointer to the immediate command data portion of the IOCB. + * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. + * + * The routine copies the entire FCP command from @fcp_cmnd to @data while + * byte swapping the data to big endian format for transmission on the wire. + **/ +static void +lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) +{ + int i, j; + for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); + i += sizeof(uint32_t), j++) { + ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); + } +} + /** * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. @@ -865,11 +963,11 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9064 BLKGRD: %s: Too many sg segments from " - "dma_map_sg. Config %d, seg_cnt %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9064 BLKGRD: %s: Too many sg segments" + " from dma_map_sg. Config %d, seg_cnt" + " %d\n", __func__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); @@ -951,9 +1049,42 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); return 0; } +/** + * lpfc_scsi_get_prot_op - Gets the SCSI defined protection data operation + * @sc: The SCSI Layer structure for the IO in question. + * + * This routine calls the SCSI Layer to get the protectio data operation + * associated with the specified IO. Then, if this is an IO effected by an + * External DIF device, the protection operation is adjusted accordingly. + * + * Returns the SCSI defined protection data operation + **/ +uint32_t +lpfc_scsi_get_prot_op(struct scsi_cmnd *sc) +{ + struct lpfc_io_buf *lpfc_cmd; + uint32_t op = scsi_get_prot_op(sc); + + lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; + if (lpfc_cmd->flags & LPFC_SBUF_NORMAL_DIF) { + if (sc->sc_data_direction == DMA_FROM_DEVICE) + op = SCSI_PROT_READ_STRIP; + else if (sc->sc_data_direction == DMA_TO_DEVICE) + op = SCSI_PROT_WRITE_INSERT; + } else if (lpfc_cmd->flags & LPFC_SBUF_PASS_DIF) { + if (sc->sc_data_direction == DMA_FROM_DEVICE) + op = SCSI_PROT_READ_PASS; + else if (sc->sc_data_direction == DMA_TO_DEVICE) + op = SCSI_PROT_WRITE_PASS; + } + return op; +} + + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /* Return BG_ERR_INIT if error injection is detected by Initiator */ @@ -987,7 +1118,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct scsi_dif_tuple *src = NULL; struct lpfc_nodelist *ndlp; struct lpfc_rport_data *rdata; - uint32_t op = scsi_get_prot_op(sc); + uint32_t op = lpfc_scsi_get_prot_op(sc); uint32_t blksize; uint32_t numblks; sector_t lba; @@ -1059,7 +1190,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, * inserted in middle of the IO. */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "9076 BLKGRD: Injecting reftag error: " "write lba x%lx + x%x oldrefTag x%x\n", (unsigned long)lba, blockoff, @@ -1109,7 +1241,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, } rc = BG_ERR_TGT | BG_ERR_CHECK; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9078 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsigned long)lba); break; @@ -1130,7 +1262,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, } rc = BG_ERR_INIT; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9077 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsigned long)lba); break; @@ -1157,7 +1289,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, } rc = BG_ERR_INIT; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9079 BLKGRD: Injecting reftag error: " "read lba x%lx\n", (unsigned long)lba); break; @@ -1179,7 +1311,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, * inserted in middle of the IO. */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, "9080 BLKGRD: Injecting apptag error: " "write lba x%lx + x%x oldappTag x%x\n", (unsigned long)lba, blockoff, @@ -1228,7 +1361,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, } rc = BG_ERR_TGT | BG_ERR_CHECK; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0813 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsigned long)lba); break; @@ -1249,7 +1382,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, } rc = BG_ERR_INIT; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0812 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsigned long)lba); break; @@ -1276,7 +1409,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, } rc = BG_ERR_INIT; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0814 BLKGRD: Injecting apptag error: " "read lba x%lx\n", (unsigned long)lba); break; @@ -1311,7 +1444,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, rc |= BG_ERR_TGT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0817 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsigned long)lba); break; @@ -1333,7 +1466,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0816 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsigned long)lba); break; @@ -1361,7 +1494,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0818 BLKGRD: Injecting guard error: " "read lba x%lx\n", (unsigned long)lba); } @@ -1390,7 +1523,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint8_t ret = 0; if (lpfc_cmd_guard_csum(sc)) { - switch (scsi_get_prot_op(sc)) { + switch (lpfc_scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: *rxop = BG_OP_IN_NODIF_OUT_CSUM; @@ -1411,15 +1544,15 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, case SCSI_PROT_NORMAL: default: - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9063 BLKGRD: Bad op/guard:%d/IP combination\n", - scsi_get_prot_op(sc)); + lpfc_scsi_get_prot_op(sc)); ret = 1; break; } } else { - switch (scsi_get_prot_op(sc)) { + switch (lpfc_scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: *rxop = BG_OP_IN_CRC_OUT_NODIF; @@ -1440,9 +1573,9 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, case SCSI_PROT_NORMAL: default: - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", - scsi_get_prot_op(sc)); + lpfc_scsi_get_prot_op(sc)); ret = 1; break; } @@ -1470,7 +1603,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint8_t ret = 0; if (lpfc_cmd_guard_csum(sc)) { - switch (scsi_get_prot_op(sc)) { + switch (lpfc_scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: *rxop = BG_OP_IN_NODIF_OUT_CRC; @@ -1495,7 +1628,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, } } else { - switch (scsi_get_prot_op(sc)) { + switch (lpfc_scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: *rxop = BG_OP_IN_CSUM_OUT_NODIF; @@ -1726,7 +1859,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, sgde = scsi_sglist(sc); if (!sgpe || !sgde) { - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9020 Invalid s/g entry: data=x%px prot=x%px\n", sgpe, sgde); return 0; @@ -1838,7 +1971,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, return num_bde + 1; if (!sgde) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9065 BLKGRD:%s Invalid data segment\n", __func__); return 0; @@ -1901,8 +2034,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, reftag += protgrp_blks; } else { /* if we're here, we have a bug */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9054 BLKGRD: bug in %s\n", __func__); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9054 BLKGRD: bug in %s\n", __func__); } } while (!alldone); @@ -2152,7 +2285,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, sgde = scsi_sglist(sc); if (!sgpe || !sgde) { - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9082 Invalid s/g entry: data=x%px prot=x%px\n", sgpe, sgde); return 0; @@ -2305,7 +2438,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, return num_sge + 1; if (!sgde) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9086 BLKGRD:%s Invalid data segment\n", __func__); return 0; @@ -2410,8 +2543,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, reftag += protgrp_blks; } else { /* if we're here, we have a bug */ - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9085 BLKGRD: bug in %s\n", __func__); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9085 BLKGRD: bug in %s\n", __func__); } } while (!alldone); @@ -2436,7 +2569,7 @@ static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) { int ret = LPFC_PG_TYPE_INVALID; - unsigned char op = scsi_get_prot_op(sc); + unsigned char op = lpfc_scsi_get_prot_op(sc); switch (op) { case SCSI_PROT_READ_STRIP: @@ -2451,7 +2584,7 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) break; default: if (phba) - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9021 Unsupported protection op:%d\n", op); break; @@ -2481,12 +2614,12 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, /* Check if there is protection data on the wire */ if (sc->sc_data_direction == DMA_FROM_DEVICE) { /* Read check for protection data */ - if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + if (lpfc_scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) return fcpdl; } else { /* Write check for protection data */ - if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + if (lpfc_scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) return fcpdl; } @@ -2525,7 +2658,6 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, int prot_group_type = 0; int fcpdl; int ret = 1; - struct lpfc_vport *vport = phba->pport; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -2615,7 +2747,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, scsi_dma_unmap(scsi_cmnd); lpfc_cmd->seg_cnt = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9022 Unexpected protection group %i\n", prot_group_type); return 2; @@ -2647,7 +2779,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, * length for DIF */ if (iocb_cmd->un.fcpi.fcpi_XRdy && - (fcpdl < vport->cfg_first_burst_size)) + (fcpdl < lpfc_cmd->ndlp->first_burst)) iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; return 0; @@ -2659,7 +2791,7 @@ err: scsi_prot_sg_count(scsi_cmnd), scsi_cmnd->sc_data_direction); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9023 Cannot setup S/G List for HBA" "IO segs %d/%d BPL %d SCSI %d: %d %d\n", lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, @@ -2682,7 +2814,29 @@ lpfc_bg_crc(uint8_t *data, int count) uint16_t crc = 0; uint16_t x; +#if defined(BUILD_CITRIX_XS) + uint16_t poly = 0x8BB7L; + unsigned int poly_length = 16; + unsigned int i, j, fb; + + for (i = 0; i < count; i += 2) { + + x = (data[i] << 8) | data[i+1]; + + /* serial shift register implementation */ + for (j = 0; j < poly_length; j++) { + fb = ((x & 0x8000L) == 0x8000L) ^ ((crc & + 0x8000L) == 0x8000L); + x <<= 1; + crc <<= 1; + if (fb) + crc ^= poly; + } + } + +#else crc = crc_t10dif(data, count); +#endif x = cpu_to_be16(crc); return x; } @@ -2727,7 +2881,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) guard_tag = 0; /* First check to see if there is protection data to examine */ - prot = scsi_get_prot_op(cmd); + prot = lpfc_scsi_get_prot_op(cmd); if ((prot == SCSI_PROT_READ_STRIP) || (prot == SCSI_PROT_WRITE_INSERT) || (prot == SCSI_PROT_NORMAL)) @@ -2766,8 +2920,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * First check to see if a protection data * check is valid */ - if ((src->ref_tag == T10_PI_REF_ESCAPE) || - (src->app_tag == T10_PI_APP_ESCAPE)) { + if ((src->ref_tag == 0xffffffff) || + (src->app_tag == 0xffff)) { start_ref_tag++; goto skipit; } @@ -2988,7 +3142,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, cmd->sense_buffer[10] = 0x80; /* Validity bit */ /* bghm is a "on the wire" FC frame based count */ - switch (scsi_get_prot_op(cmd)) { + switch (lpfc_scsi_get_prot_op(cmd)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: bghm /= cmd->device->sector_size; @@ -3083,11 +3237,12 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) lpfc_cmd->seg_cnt = nseg; if (!phba->cfg_xpsgl && lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" - " %s: Too many sg segments from " - "dma_map_sg. Config %d, seg_cnt %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "9074 BLKGRD:" + " %s: Too many sg segments from " + "dma_map_sg. Config %d, seg_cnt %d\n", + __func__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); @@ -3263,7 +3418,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, int prot_group_type = 0; int fcpdl; int ret = 1; - struct lpfc_vport *vport = phba->pport; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd @@ -3364,14 +3518,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, scsi_dma_unmap(scsi_cmnd); lpfc_cmd->seg_cnt = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9083 Unexpected protection group %i\n", prot_group_type); return 2; } } - switch (scsi_get_prot_op(scsi_cmnd)) { + switch (lpfc_scsi_get_prot_op(scsi_cmnd)) { case SCSI_PROT_WRITE_STRIP: case SCSI_PROT_READ_STRIP: lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP; @@ -3400,7 +3554,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, * length for DIF */ if (iocb_cmd->un.fcpi.fcpi_XRdy && - (fcpdl < vport->cfg_first_burst_size)) + (fcpdl < lpfc_cmd->ndlp->first_burst)) iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; /* @@ -3420,7 +3574,7 @@ err: scsi_prot_sg_count(scsi_cmnd), scsi_cmnd->sc_data_direction); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9084 Cannot setup S/G List for HBA" "IO segs %d/%d SGL %d SCSI %d: %d %d\n", lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, @@ -3589,6 +3743,235 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) psb->pCmd->sc_data_direction); } +uint32_t +lpfc_edsm_process_inq_data_chg(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd) +{ + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; + struct lpfc_external_dif_support *dp = lpfc_cmd->edifp; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode; + unsigned long flags; + int rc; + + rdata = lpfc_cmd->rdata; + pnode = rdata->pnode; + + spin_lock_irqsave(&vport->external_dif_lock, flags); + if (dp == NULL) { + dp = lpfc_external_dif_match(vport, cmnd->device->lun, + pnode->nlp_sid); + if (dp == NULL) { + spin_unlock_irqrestore(&vport->external_dif_lock, + flags); + return DID_OK; + } + } + rc = lpfc_issue_scsi_inquiry(vport, pnode, dp->lun); + if (rc == 0) + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_PATH_CHECK); + else + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_NEEDS_INQUIRY); + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + dp->err3f_cnt++; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0913 INQ Data Change: Data: " + "%x %x %x", + cmnd->cmnd[0], + be32_to_cpu(fcpcmd->fcpDl), + lpfc_cmd->cur_iocbq.sli4_xritag); + + /* Convert to retryable err */ + return DID_ERROR; +} + +uint32_t +lpfc_edsm_process_data_phase(struct lpfc_vport *vport, + struct lpfc_io_buf *lpfc_cmd) +{ + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; + struct lpfc_external_dif_support *dp = lpfc_cmd->edifp; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode; + unsigned long flags; + int rc; + + rdata = lpfc_cmd->rdata; + pnode = rdata->pnode; + + spin_lock_irqsave(&vport->external_dif_lock, flags); + if (dp == NULL) { + dp = lpfc_external_dif_match(vport, cmnd->device->lun, + pnode->nlp_sid); + if (dp == NULL) { + spin_unlock_irqrestore(&vport->external_dif_lock, + flags); + return DID_OK; + } + } + /* + * Extra check in case we dropped the + * LPFC_ASC_TARGET_CHANGED error. + */ + if (dp->state != LPFC_EDSM_PATH_CHECK && + dp->state != LPFC_EDSM_PATH_STANDBY) { + rc = lpfc_issue_scsi_inquiry(vport, pnode, dp->lun); + if (rc == 0) + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_PATH_CHECK); + else + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_NEEDS_INQUIRY); + } + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + dp->err4b_cnt++; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0914 Data Phase Err: Data: " + "%x %x %x", + cmnd->cmnd[0], + be32_to_cpu(fcpcmd->fcpDl), + lpfc_cmd->cur_iocbq.sli4_xritag); + + /* Convert to retryable err */ + return DID_ERROR; +} + +/** + * lpfc_unblock_requests: allow further commands from being queued. + * For single vport, just call scsi_unblock_requests on physical port. + * For multiple vports sent scsi_unblock_requests for all the vports. + */ +void +lpfc_unblock_requests(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + int i; + + if (phba->sli_rev == LPFC_SLI_REV4 && + !phba->sli4_hba.max_cfg_param.vpi_used) { + shost = lpfc_shost_from_vport(phba->pport); + scsi_unblock_requests(shost); + return; + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_unblock_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_block_requests: prevent further commands from being queued. + * For single vport, just call scsi_block_requests on physical port. + * For multiple vports sent scsi_block_requests for all the vports. + */ +void +lpfc_block_requests(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + struct Scsi_Host *shost; + int i; + + if (atomic_read(&phba->cmf_stop_io)) + return; + + if (phba->sli_rev == LPFC_SLI_REV4 && + !phba->sli4_hba.max_cfg_param.vpi_used) { + shost = lpfc_shost_from_vport(phba->pport); + scsi_block_requests(shost); + return; + } + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + scsi_block_requests(shost); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** + * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion + * @phba: The HBA for which this call is being executed. + * @time: The latency of the IO that completed (in ns) + * @size: The size of the IO that completed + * @shost: SCSI host the IO completed on (NULL for a NVME IO) + * + * The routine adjusts the various Burst and Bandwidth counters used in + * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, + * that means the IO was never issued to the HBA, so this routine is + * just being called to cleanup the counter from a previous + * lpfc_update_cmf_cmd call. + */ +int +lpfc_update_cmf_cmpl(struct lpfc_hba *phba, + uint64_t time, uint32_t size, struct Scsi_Host *shost) +{ + struct lpfc_cgn_stat *cgs; + + if (time != LPFC_CGN_NOT_SENT) { + /* lat is ns coming in, save latency in us */ + if (time < 1000) + time = 1; + else + time = (time + 500) / 1000; /* round it */ + + cgs = this_cpu_ptr(phba->cmf_stat); + atomic64_add(size, &cgs->rcv_bytes); + atomic64_add(time, &cgs->rx_latency); + atomic_inc(&cgs->rx_io_cnt); + } + return 0; +} + +/** + * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission + * @phba: The HBA for which this call is being executed. + * @size: The size of the IO that will be issued + * + * The routine adjusts the various Burst and Bandwidth counters used in + * Congestion management and E2E. + */ +int +lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) +{ + uint64_t total; + struct lpfc_cgn_stat *cgs; + int cpu; + + /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED && + phba->cmf_max_bytes_per_interval) { + total = 0; + for_each_present_cpu(cpu) { + cgs = per_cpu_ptr(phba->cmf_stat, cpu); + total += atomic64_read(&cgs->total_bytes); + } + if (total >= phba->cmf_max_bytes_per_interval) { + if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { + lpfc_block_requests(phba); + phba->cmf_last_ts = + lpfc_calc_cmf_latency(phba); + } + atomic_inc(&phba->cmf_busy); + return -EBUSY; + } + if (size > atomic_read(&phba->rx_max_read_cnt)) + atomic_set(&phba->rx_max_read_cnt, size); + } + + cgs = this_cpu_ptr(phba->cmf_stat); + atomic64_add(size, &cgs->total_bytes); + return 0; +} + /** * lpfc_handler_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. @@ -3610,12 +3993,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; uint32_t resp_info = fcprsp->rspStatus2; uint32_t scsi_status = fcprsp->rspStatus3; + struct lpfc_external_dif_support *dp; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *pnode; uint32_t *lp; uint32_t host_status = DID_OK; uint32_t rsplen = 0; uint32_t fcpDl; uint32_t logit = LOG_FCP | LOG_FCP_ERROR; - + uint8_t asc, ascq; /* * If this is a task management command, there is no @@ -3630,17 +4016,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, if (resp_info & RSP_LEN_VALID) { rsplen = be32_to_cpu(fcprsp->rspRspLen); if (rsplen != 0 && rsplen != 4 && rsplen != 8) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "2719 Invalid response length: " - "tgt x%x lun x%llx cmnd x%x rsplen x%x\n", - cmnd->device->id, - cmnd->device->lun, cmnd->cmnd[0], - rsplen); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "2719 Invalid response length: " + "tgt x%x lun x%llx cmnd x%x rsplen " + "x%x\n", cmnd->device->id, + cmnd->device->lun, cmnd->cmnd[0], + rsplen); host_status = DID_ERROR; goto out; } if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2757 Protocol failure detected during " "processing of FCP I/O op: " "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", @@ -3773,7 +4159,80 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, scsi_set_resid(cmnd, scsi_bufflen(cmnd)); } - out: +out: + rdata = lpfc_cmd->rdata; + pnode = rdata->pnode; + if (pnode && (pnode->nlp_flag & NLP_EXTERNAL_DIF)) { + /* Is this an External DIF array? */ + dp = lpfc_cmd->edifp; + asc = cmnd->sense_buffer[12]; + ascq = cmnd->sense_buffer[13]; + /* Check for LOGICAL BLOCK GUARD CHECK / REF TAG failed */ + if (scsi_status == SAM_STAT_CHECK_CONDITION) { + if (dp && + (asc == LPFC_ASC_DIF_ERR) && + ((ascq == LPFC_ASCQ_GUARD_CHECK) || + (ascq == LPFC_ASCQ_REFTAG_CHECK))) { + host_status = DID_ERROR; + scsi_status = SAM_STAT_GOOD; + /* Convert to retryable err */ + } + + /* + * A Operation Condition Change error on an External DIF + * device indicates its in transition from one mode to + * another. + */ + if ((asc == LPFC_ASC_TARGET_CHANGED) && + (ascq == LPFC_ASCQ_INQUIRY_DATA)) { + host_status = + lpfc_edsm_process_inq_data_chg( + vport, lpfc_cmd); + if (host_status == DID_ERROR) + scsi_status = SAM_STAT_GOOD; + } + /* + * A Data Phase error on External DIF device indicates + * the IO was sent in the wrong mode. + */ + if ((asc == LPFC_ASC_DATA_PHASE_ERR) && + (ascq == LPFC_ASCQ_EDIF_MODE)) { + host_status = + lpfc_edsm_process_data_phase( + vport, lpfc_cmd); + if (host_status == DID_ERROR) + scsi_status = SAM_STAT_GOOD; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "3031 FCP command x%x force ERROR " + "asc/ascq:x%x/x%x " + "xri x%x lba x%llx stat %d:%d\n", + cmnd->cmnd[0], asc, ascq, + lpfc_cmd->cur_iocbq.sli4_xritag, + (unsigned long long)scsi_get_lba(cmnd), + host_status, scsi_status); + /* + * If the scsi_status has been changed to SAM_STAT_GOOD, + * we need to massage the cmnd and remove all trace + * of the check condition. + */ + if (scsi_status == SAM_STAT_GOOD) { + /* Cleanup FCP response */ + fcprsp->rspStatus2 &= + ~(SNS_LEN_VALID | RESID_UNDER); + fcprsp->rspSnsLen = 0; + + /* resid must be 0 */ + scsi_set_resid(cmnd, 0); + + /* Get rid of any unwanted sense data */ + memset(cmnd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); + } + + } + } cmnd->result = host_status << 16 | scsi_status; lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); } @@ -3803,17 +4262,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct Scsi_Host *shost; int idx; uint32_t logit = LOG_FCP; -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - int cpu; -#endif + uint32_t lat; /* Guard against abort handler being called at same time */ spin_lock(&lpfc_cmd->buf_lock); /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; - if (!cmd) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + if (!cmd || !phba) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2621 IO completion: Not an active IO\n"); spin_unlock(&lpfc_cmd->buf_lock); return; @@ -3824,11 +4281,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { - cpu = raw_smp_processor_id(); - if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq) - phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; - } + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) + this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); #endif shost = cmd->device->host; @@ -3872,7 +4326,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } #endif - if (lpfc_cmd->status) { + if (unlikely(lpfc_cmd->status)) { if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && (lpfc_cmd->result & IOERR_DRVR_MASK)) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; @@ -3955,7 +4409,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || lpfc_cmd->result == IOERR_TX_DMA_FAILED) && pIocbOut->iocb.unsli3.sli3_bg.bgstat) { - if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + if (lpfc_scsi_get_prot_op(cmd) != + SCSI_PROT_NORMAL) { /* * This is a response for a BG enabled * cmd. Parse BG error @@ -4005,7 +4460,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, scsi_get_resid(cmd)); } - lpfc_update_stats(phba, lpfc_cmd); + lpfc_update_stats(vport, lpfc_cmd); + if (vport->cfg_max_scsicmpl_time && time_after(jiffies, lpfc_cmd->start_time + msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { @@ -4026,9 +4482,26 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (lpfc_cmd->ts_cmd_start) { + lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; + lpfc_cmd->ts_data_io = ktime_get_ns(); + phba->ktime_last_cmd = lpfc_cmd->ts_data_io; + lpfc_io_ktime(phba, lpfc_cmd); + } +#endif lpfc_cmd->pCmd = NULL; spin_unlock(&lpfc_cmd->buf_lock); + /* Check if IO qualified for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + cmd->sc_data_direction == DMA_FROM_DEVICE && + (scsi_sg_count(cmd))) { + /* Used when calculating average latency */ + lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; + lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); + } + /* The sdev is not guaranteed to be valid post scsi_done upcall. */ cmd->scsi_done(cmd); @@ -4045,24 +4518,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_release_scsi_buf(phba, lpfc_cmd); } -/** - * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB - * @data: A pointer to the immediate command data portion of the IOCB. - * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. - * - * The routine copies the entire FCP command from @fcp_cmnd to @data while - * byte swapping the data to big endian format for transmission on the wire. - **/ -static void -lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) -{ - int i, j; - for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); - i += sizeof(uint32_t), j++) { - ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); - } -} - /** * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. @@ -4072,7 +4527,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) * This routine initializes fcp_cmnd and iocb data structure from scsi command * to transfer for device with SLI3 interface spec. **/ -static void +static int lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, struct lpfc_nodelist *pnode) { @@ -4083,13 +4538,20 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); struct lpfc_sli4_hdw_queue *hdwq = NULL; int datadir = scsi_cmnd->sc_data_direction; - int idx; + int rc, idx; uint8_t *ptr; bool sli4; + bool scan_io; + uint8_t tmo; uint32_t fcpdl; +#ifndef NO_APEX + uint8_t fcp_priority = 0; + uint8_t pri; + uint32_t lun_index; +#endif if (!pnode || !NLP_CHK_NODE_ACT(pnode)) - return; + return 0; lpfc_cmd->fcp_rsp->rspSnsLen = 0; /* clear task management bits */ @@ -4105,7 +4567,37 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); } + /* Check if we want to make this IO an External DIF device */ + if (vport->phba->cfg_external_dif && + !(pnode->nlp_flag & NLP_SKIP_EXT_DIF)) { + rc = lpfc_external_dif(vport, lpfc_cmd, pnode, + &fcp_cmnd->fcpCdb[0]); + if (rc) + return rc; + } + + scan_io = (scsi_cmnd->cmnd[0] == 0 || scsi_cmnd->cmnd[0] == 0x12 || + scsi_cmnd->cmnd[0] == 0xa0); fcp_cmnd->fcpCntl1 = SIMPLE_Q; +#ifndef NO_APEX + /* + * Get the fcp priority from the table and unpack it. Each byte + * contains the priority for two luns. Low nibble has the even lun. + * Upper nibble has the odd lun. + */ + if (vport->phba->cfg_enable_fcp_priority && + (scsi_cmnd->device->lun < MAX_FCP_PRI_LUN)) { + lun_index = scsi_cmnd->device->lun >> 1; + pri = pnode->fcp_priority[lun_index]; + if (scsi_cmnd->device->lun & 1) + fcp_priority = (pri & 0xf0) >> 4; + else + fcp_priority = pri & 0xf; + } else { + fcp_priority = 0; + } + fcp_cmnd->fcpCntl1 |= (fcp_priority & 0xf) << 3; +#endif sli4 = (phba->sli_rev == LPFC_SLI_REV4); piocbq->iocb.un.fcpi.fcpi_XRdy = 0; @@ -4123,14 +4615,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; - if (vport->cfg_first_burst_size && + if (pnode->first_burst && (pnode->nlp_flag & NLP_FIRSTBURST)) { + u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); - if (fcpdl < vport->cfg_first_burst_size) - piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; - else - piocbq->iocb.un.fcpi.fcpi_XRdy = - vport->cfg_first_burst_size; + xrdy_len = min(fcpdl, pnode->first_burst); + piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; } fcp_cmnd->fcpCntl3 = WRITE_DATA; if (hdwq) @@ -4150,9 +4640,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, if (hdwq) hdwq->scsi_cstat.control_requests++; } - if (phba->sli_rev == 3 && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) - lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); /* * Finish initializing those IOCB fields that are independent * of the scsi_cmnd request_buffer @@ -4168,9 +4655,28 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); piocbq->context1 = lpfc_cmd; - piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; - piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; + if (piocbq->iocb_cmpl == NULL) + piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; + if (scan_io) { + /* + * timeout the command at the firmware level. + * The driver will not get the XRI back if + * this is a tur from the SCAN. + */ + if (scsi_cmnd->request->timeout > 0) { + tmo = (uint8_t) + (scsi_cmnd->request->timeout / 1000) + 2; + if (tmo == 0 || tmo > 60) + tmo = 30; + } else { + tmo = 30; + } + piocbq->iocb.ulpTimeout = tmo; + } else { + piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; + } piocbq->vport = vport; + return 0; } /** @@ -4273,7 +4779,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1418 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; @@ -4320,7 +4826,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, * 0, successful */ int -lpfc_check_pci_resettable(const struct lpfc_hba *phba) +lpfc_check_pci_resettable(struct lpfc_hba *phba) { const struct pci_dev *pdev = phba->pcidev; struct pci_dev *ptr = NULL; @@ -4464,10 +4970,17 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba) * This routine restarts fcp_poll timer, when FCP ring polling is enable * and FCP Ring interrupt is disable. **/ - +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_poll_timeout(unsigned long ptr) +#else void lpfc_poll_timeout(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; +#else struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); +#endif if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, @@ -4478,6 +4991,672 @@ void lpfc_poll_timeout(struct timer_list *t) } } +/* + * External DIF State (EDSM) routines + * + * STATE Descriptions: + * LPFC_EDSM_INIT + * Used when LUN path is initialized and External DIF is configured for + * this HBA. This logic assumes the SCSI Layer will be sending a standard + * INQUIRY, page 0 / evpd 0, after device initialization is complete + * LPFC_EDSM_NEEDS_INQUIRY + * On a Link Up, or RSCN, previously discovered paths imay need to be checked + * for External DIF capability by issuing an INQUIRY P0 + * LPFC_EDSM_PATH_STANDBY + * Used when the LUN path points to a External DIF array; however, External + * DIF is NOT enabled at this time. + * LPFC_EDSM_PATH_READY + * Used when the LUN path points to a External DIF array and External DIF + * is enabled. + * LPFC_EDSM_PATH_CHECK + * Used when the External DIF state of the device has changed. + */ +void +lpfc_edsm_set_state(struct lpfc_vport *vport, + struct lpfc_external_dif_support *dp, + uint32_t state) +{ + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0918 EDSM: Device(%d/%lld) State Change %d to %d ", + dp->sid, dp->lun, dp->state, state); + + dp->state = state; +} + +/** + * lpfc_edsm_process_inq - Process a SCSI Layer Inquiry + * @vport: The virtual port for which this call being executed. + * @lpfc_cmd: lpfc scsi command object pointer. + * + * Piggy back on a SCSI Layer Inquiry to check for External DIF support + */ +static uint32_t +lpfc_edsm_process_inq(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + uint8_t *cdb_ptr) +{ + struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); + + /* We are only interested in EVPD=0 && page 0 */ + if ((cdb_ptr[1] & 0x1) || cdb_ptr[2]) + return 0; + /* Setup to intercept INQUIRY completion */ + piocbq->iocb_cmpl = lpfc_external_dif_cmpl; + return 1; +} + +/** + * lpfc_edsm_rw_path_rdy - Process a SCSI read/write command + * @vport: The virtual port for which this call being executed. + * @lpfc_cmd: lpfc scsi command object pointer. + * + * Process a SCSI read/write command on a External DIF path + */ +static void +lpfc_edsm_rw_path_rdy(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + uint8_t *cdb_ptr, uint32_t op) +{ + cdb_ptr[1] |= LPFC_FDIF_CDB_PROTECT; /* Set PROTECT = 001 */ + + switch (op) { + case SCSI_PROT_NORMAL: + lpfc_cmd->flags |= LPFC_SBUF_NORMAL_DIF; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + lpfc_cmd->flags |= LPFC_SBUF_PASS_DIF; + break; + } +} + +/** + * lpfc_edsm_vaiidate_target - Check a remote NPort for External DIF support + * @vport: The virtual port for which this call being executed. + * @ndlp - Pointer to lpfc_nodelist instance. + * + * Step thru all paths to a remote NPort to see if External DIF needs + * to be validated. + */ +void +lpfc_edsm_validate_target(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct lpfc_external_dif_support *dp; + unsigned long flags; + int rc; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0920 External DIF: Validate Target %06x: sid %d", + ndlp->nlp_DID, ndlp->nlp_sid); + + spin_lock_irqsave(&vport->external_dif_lock, flags); + list_for_each_entry(dp, &vport->external_dif_list, listentry) { + if (memcmp(&ndlp->nlp_portname, (uint8_t *)&dp->portName, + sizeof(struct lpfc_name)) == 0) { + if (dp->state != LPFC_EDSM_PATH_READY) { + rc = lpfc_issue_scsi_inquiry(vport, ndlp, + dp->lun); + if (rc == 0) { + lpfc_edsm_set_state( + vport, dp, + LPFC_EDSM_PATH_CHECK); + } else { + lpfc_edsm_set_state( + vport, dp, + LPFC_EDSM_NEEDS_INQUIRY); + } + } + } + } + spin_unlock_irqrestore(&vport->external_dif_lock, flags); +} + +/** + * lpfc_issue_scsi_inquiry - Send a driver initiated SCSI INQUIRY + * @vport: The virtual port for which this call being executed. + * @ndlp - Pointer to lpfc_nodelist instance. + * + * This function is called when the driver want to issue a SCSI + * INQUIRY Page 0 evpd 0. This is typically called when the driver + * wants to verify if a device supports External DIF. + **/ +int +lpfc_issue_scsi_inquiry(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, uint32_t lunId) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_io_buf *lpfc_cmd = NULL; + struct sli4_sge *sgl; + dma_addr_t pdma_phys_data; + IOCB_t *iocb_cmd; + uint32_t err; + + /* Check if there is a path to the target */ + if (!NLP_CHK_NODE_ACT(ndlp)) + return EINVAL; + if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) + return EINVAL; + + lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, NULL); + if (!lpfc_cmd) + return EIO; + + /* + * lpfc_cmd is partially setup for the IO. + * We will use 4 SGEs in the SGL, immediately follwed by + * 256 bytes for the INQUIRY data. + * The lpfc_cmd data (virt) / dma_handle (phys) point to the SGL. + * The first SGE, fcp_cmnd is already setup + * The next SGE fcp_rsp is already setup. + * Next setup the SGE for the data. + */ + + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl++; + bf_set(lpfc_sli4_sge_last, sgl, 0); /* FCP_RSP is no longer last */ + sgl++; + pdma_phys_data = lpfc_cmd->dma_handle + (4 * sizeof(struct sli4_sge)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_data)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_data)); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + bf_set(lpfc_sli4_sge_offset, sgl, 0); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(256); + sgl++; + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + sgl->sge_len = 0; + + /* + * The lpfc_cmd->data / lpfc_cmd->dma_handle should now point to + * the following construct: + * + * FCP_CMD SGE sizeof(sli4_sge) + * FCP_RSP SGE sizeof(sli4_sge) + * INQUIRY DATA SGE sizeof(sli4_sge) + * Zero'ed SGE sizeof(sli4_sge) + * INQUIRY Data 256 bytes + * Filler + * FCP_CMD Data sizeof(struct fcp_cmd) + * FCP_RSP_Data sizeof(struct fcp_rsp) + */ + + /* Setup the FCP_CMD */ + memset(lpfc_cmd->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + lpfc_cmd->fcp_cmnd->fcpCdb[0] = INQUIRY; + lpfc_cmd->fcp_cmnd->fcpCdb[4] = 0xff; + lpfc_cmd->fcp_cmnd->fcpCntl3 = READ_DATA; + int_to_scsilun(lunId, &lpfc_cmd->fcp_cmnd->fcp_lun); + lpfc_cmd->fcp_cmnd->fcpDl = cpu_to_be32(0xff); + + /* Zero the FCP_RSP */ + memset((void *)lpfc_cmd->fcp_rsp, 0, sizeof(struct fcp_rsp)); + + /* + * Setup the lpfc_cmd / IOCB + * set LPFC_IO_DRVR_INIT in iocb_flag field to indicate driver use of + * iocb from scsi buf list for a driver initiated SCSI IO. + */ + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DRVR_INIT; + lpfc_cmd->cur_iocbq.iocb_cmpl = lpfc_external_dif_cmpl; + lpfc_cmd->cur_iocbq.context1 = lpfc_cmd; + lpfc_cmd->cur_iocbq.context2 = (void *)(unsigned long)lunId; + lpfc_cmd->cur_iocbq.vport = vport; + lpfc_cmd->timeout = 30; + lpfc_cmd->pCmd = NULL; + lpfc_cmd->seg_cnt = 1; + lpfc_cmd->rdata = (void *)ndlp; /* Save ndlp in rdata */ + lpfc_cmd->ndlp = ndlp; + + iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; + iocb_cmd->ulpPU = PARM_READ_CHECK; + iocb_cmd->un.fcpi.fcpi_parm = 0xff; + iocb_cmd->ulpContext = ndlp->nlp_rpi; + iocb_cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) + iocb_cmd->ulpFCP2Rcvy = 1; + iocb_cmd->ulpClass = (ndlp->nlp_fcp_info & 0x0f); + iocb_cmd->ulpTimeout = lpfc_cmd->timeout; + + err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, + &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0919 External DIF: Issue INQUIRY (%d/%d) ret %d " + "flag x%x iotag x%x xri x%x", + ndlp->nlp_sid, lunId, err, + lpfc_cmd->cur_iocbq.iocb_flag, + lpfc_cmd->cur_iocbq.iotag, + lpfc_cmd->cur_iocbq.sli4_xritag); + + if (err) { + lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_IO_DRVR_INIT; + lpfc_cmd->rdata = NULL; + lpfc_release_scsi_buf(phba, lpfc_cmd); + return EIO; + } + + if (phba->cfg_xri_rebalancing) + lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); + + return 0; +} + +/** + * lpfc_external_dif_match - Look up a specific External DIF device + * @vport: The virtual port for which this call is being executed. + * @lun: lun id used to specify the desired External DIF device + * @sid: SCSI id used to specify the desired External DIF device + * + * This routine scans the discovered External DIF devices for the vport + * for a match using the lun/sid criteria. + * MUST hold external_dif_lock BEFORE calling this routine + * + * Return code : + * NULL - device not found + * dp - struct lpfc_external_dif_support of matching device + **/ +static struct lpfc_external_dif_support * +lpfc_external_dif_match(struct lpfc_vport *vport, uint64_t lun, uint32_t sid) +{ + struct lpfc_external_dif_support *dp; + + list_for_each_entry(dp, &vport->external_dif_list, listentry) { + if ((dp->sid == sid) && (dp->lun == lun)) { + return dp; + } + } + return NULL; +} + +/** + * lpfc_retry_scsi_inquiry - Send a driver initiated SCSI INQUIRY + * @vport: The virtual port for which this call being executed. + * @ndlp - Pointer to lpfc_nodelist instance. + * + * This function is called when the driver want to retry a SCSI + * INQUIRY Page 0 evpd 0. This is typically called when the driver + * wants to verify if a device supports External DIF and a previous + * driver initiated INQUIRY has failed. + **/ +int +lpfc_retry_scsi_inquiry(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint32_t lunId, uint32_t status) +{ + struct lpfc_external_dif_support *dp; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&vport->external_dif_lock, flags); + dp = lpfc_external_dif_match(vport, lunId, ndlp->nlp_sid); + if (dp) { + dp->inq_retry++; + if (dp->inq_retry < LPFC_EDSM_MAX_RETRY) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0921 External DIF: Inquiry retry %d " + "NPort %06x: (%d/%d) err x%x", + dp->inq_retry, ndlp->nlp_DID, + ndlp->nlp_sid, lunId, status); + rc = lpfc_issue_scsi_inquiry(vport, ndlp, dp->lun); + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0922 External DIF: Inquiry retry " + "exceeded NPort %06x: (%d/%d) err x%x", + ndlp->nlp_DID, ndlp->nlp_sid, + lunId, status); + rc = ENXIO; + } + } + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + return rc; +} + +/** + * lpfc_external_dif_cmpl - IOCB completion routine for a External DIF IO + * @phba: The Hba for which this call is being executed. + * @pIocbIn: The command IOCBQ for the scsi cmnd. + * @pIocbOut: The response IOCBQ for the scsi cmnd. + * + * This routine processes the External DIF SCSi command cmpl before calling the + * normal SCSI cmpl routine (lpfc_scsi_cmd_iocb_cmpl). There are 2 types of + * External DIF completions, INQUIRY and READ/WRITE SCSI commands. + * + * We use INQUIRY to discover External DIF devices. An External DIF device does + * not advertise itself as T10-DIF capable using standard bits in the INQUIRY + * and READ_CAPACITY commands. Instead, it uses some vendor specfic information + * in the standard INQUIRY command to turn on this feature. + * + * For READ/WRITE IOs we convert the IO back into a normal IO so it can be + * completed to the SCSI layer. The SCSI layer is unaware the IO was actually + * transmitted on the wire in T10 DIF Type 1 format. + **/ +static void +lpfc_external_dif_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, + struct lpfc_iocbq *pIocbOut) +{ + struct lpfc_io_buf *lpfc_cmd = + (struct lpfc_io_buf *)pIocbIn->context1; + struct lpfc_vport *vport = pIocbIn->vport; + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; + struct lpfc_external_dif_support *dp; + uint32_t resp_info = fcprsp->rspStatus2; + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + uint32_t status = pIocbOut->iocb.ulpStatus; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_vendor_dif *vendor_dif_infop; + struct fcp_cmnd *fcpcmd; + struct scatterlist *sgde; + unsigned long flags; + uint8_t *data_inq, *buf = NULL, *ptr; + uint8_t *name; + uint32_t cnt, cmd, lun, resid, len; + + if (status && cmnd) { + if ((status != IOSTAT_FCP_RSP_ERROR) || + !(resp_info & RESID_UNDER)) + goto out; + } + fcpcmd = lpfc_cmd->fcp_cmnd; + cmd = fcpcmd->fcpCdb[0]; + + /* Only success and RESID_UNDER make it here */ + switch (cmd) { + case INQUIRY: + + if (cmnd) { + /* Inquiry issued by SCSI Layer */ + rdata = lpfc_cmd->rdata; + ndlp = rdata->pnode; + + sgde = scsi_sglist(cmnd); + buf = kmalloc(scsi_bufflen(cmnd), GFP_ATOMIC); + len = scsi_bufflen(cmnd); + ptr = buf; + while (sgde && len) { +#if (KERNEL_MAJOR < 3) || ((KERNEL_MAJOR == 3) && (KERNEL_MINOR < 4)) + data_inq = kmap_atomic(sg_page(sgde), KM_IRQ0) + + sgde->offset; +#else + data_inq = kmap_atomic(sg_page(sgde)) + + sgde->offset; +#endif + memcpy(ptr, data_inq, sgde->length); + len -= sgde->length; + ptr += sgde->length; +#if (KERNEL_MAJOR < 3) || ((KERNEL_MAJOR == 3) && (KERNEL_MINOR < 4)) + kunmap_atomic(data_inq - sgde->offset, KM_IRQ0); +#else + kunmap_atomic(data_inq - sgde->offset); +#endif + sgde = sg_next(sgde); + } + + data_inq = buf; + lun = cmnd->device->lun; + } else { + /* Inquiry issued by driver */ + ndlp = (void *)lpfc_cmd->rdata; + lpfc_cmd->rdata = NULL; + + data_inq = (uint8_t *)lpfc_cmd->dma_sgl + + (4 * sizeof(struct sli4_sge)); + lun = (uint32_t)(unsigned long)(pIocbIn->context2); + + if ((status != IOSTAT_FCP_RSP_ERROR) || + !(resp_info & RESID_UNDER)) { + /* If its an unexpected error, retry it */ + lpfc_retry_scsi_inquiry(vport, ndlp, lun, + status); + break; + } + } + + /* Jump to T10 Vendor Identification field */ + data_inq += LPFC_INQ_VID_OFFSET; + if ((memcmp(data_inq, LPFC_INQ_FDIF_VENDOR, + sizeof(LPFC_INQ_FDIF_VENDOR) != 0))) { + ndlp->nlp_flag &= ~NLP_EXTERNAL_DIF; + ndlp->nlp_flag |= NLP_SKIP_EXT_DIF; + break; + } + + /* Jump to Vendor specific DIF info */ + vendor_dif_infop = (struct lpfc_vendor_dif *)(data_inq + + (LPFC_INQ_VDIF_OFFSET - LPFC_INQ_VID_OFFSET)); + + name = (uint8_t *)&ndlp->nlp_portname; + + spin_lock_irqsave(&vport->external_dif_lock, flags); + dp = lpfc_external_dif_match(vport, lun, ndlp->nlp_sid); + if (dp) { + dp->inq_cnt++; + dp->inq_retry = 0; + ndlp->nlp_flag |= NLP_EXTERNAL_DIF; + + if (dp->state == LPFC_EDSM_PATH_READY) { + spin_unlock_irqrestore( + &vport->external_dif_lock, flags); + break; /* device already exists */ + } + } else { + + /* New External DIF device found */ + dp = kmalloc(sizeof(struct lpfc_external_dif_support), + GFP_ATOMIC); + if (!dp) { + spin_unlock_irqrestore( + &vport->external_dif_lock, flags); + break; + } + memset(dp, 0x0, + sizeof(struct lpfc_external_dif_support)); + dp->lun = lun; + dp->sid = ndlp->nlp_sid; + dp->dif_info = vendor_dif_infop->dif_info; + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_INIT); + memcpy(&dp->portName, name, sizeof(struct lpfc_name)); + dp->inq_cnt = 1; + dp->err3f_cnt = 0; + dp->err4b_cnt = 0; + ndlp->nlp_flag |= NLP_EXTERNAL_DIF; + + list_add_tail(&dp->listentry, + &vport->external_dif_list); + } + + /* Make sure the INQUIRY payload has our + * vendor specfic info included. + */ + if (status == IOSTAT_FCP_RSP_ERROR) + resid = be32_to_cpu(fcprsp->rspResId); + else + resid = 0; + cnt = be32_to_cpu(fcpcmd->fcpDl) - resid; + if (cnt < LPFC_INQ_FDIF_SZ) { + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_IGNORE); + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0917 External DIF Vendor size error " + "Data: %02x %02x %02x\n", + be32_to_cpu(fcpcmd->fcpDl), + be32_to_cpu(fcprsp->rspResId), + vendor_dif_infop->length); + ndlp->nlp_flag &= ~NLP_EXTERNAL_DIF; + break; + } + + /* Check to see if External DIF protection is enabled and we + * are version 1. + */ + if ((vendor_dif_infop->length != LPFC_INQ_FDIF_SIZE) || + (vendor_dif_infop->version != LPFC_INQ_FDIF_VERSION)) { + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_IGNORE); + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_EDIF, + "0709 Possible External DIF INQUIRY " + "info error: " + "lun %d xri x%x fcpdl %d resid %d " + "Data: %02x %02x %02x\n", + lun, lpfc_cmd->cur_iocbq.sli4_xritag, + be32_to_cpu(fcpcmd->fcpDl), resid, + vendor_dif_infop->length, + vendor_dif_infop->version, + vendor_dif_infop->dif_info); + dp->dif_info = 0; + ndlp->nlp_flag &= ~NLP_EXTERNAL_DIF; + break; + } + + dp->dif_info = vendor_dif_infop->dif_info; + + /* Currently we only support DIF Type 1 * (GRD_CHK / REF_CHK) */ + if (vendor_dif_infop->dif_info != (LPFC_FDIF_PROTECT | + LPFC_FDIF_REFCHK | LPFC_FDIF_GRDCHK)) { + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_PATH_STANDBY); + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0701 External DIF INQUIRY protection " + "off: xri x%x fcpdl %d resid %d " + "Data: %02x %02x %02x\n", + lpfc_cmd->cur_iocbq.sli4_xritag, + be32_to_cpu(fcpcmd->fcpDl), + be32_to_cpu(fcprsp->rspResId), + vendor_dif_infop->length, + vendor_dif_infop->version, + vendor_dif_infop->dif_info); + break; + } + + lpfc_edsm_set_state(vport, dp, LPFC_EDSM_PATH_READY); + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + + lpfc_printf_vlog(vport, KERN_WARNING, LOG_EDIF, + "0712 Discovered External DIF device NPortId " + "x%x: scsi_id x%x: lun_id x%llx: WWPN " + "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + ndlp->nlp_DID, dp->sid, dp->lun, + *name, *(name+1), *(name+2), *(name+3), + *(name+4), *(name+5), *(name+6), *(name+7)); + break; + default: + break; + } +out: + kfree(buf); + if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DRVR_INIT) { + if (ndlp == NULL) { + ndlp = (void *)lpfc_cmd->rdata; + lpfc_cmd->rdata = NULL; + } + pIocbIn->context2 = NULL; + lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_IO_DRVR_INIT; + lpfc_release_scsi_buf(phba, lpfc_cmd); + return; + } + + lpfc_scsi_cmd_iocb_cmpl(phba, pIocbIn, pIocbOut); +} + +/** + * lpfc_external_dif - Check to see if we want to process this IO as a External DIF + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. + * + * This routine will selectively force normal IOs to be processed as a + * READ_STRIP / WRITE_INSERT T10-DIF IO. The upper SCSI Layer will be unaware + * that the IO is going to be transmitted on the wire with T10-DIF protection + * data. This routine also diverts INQUIRY command cmpletions so they can be + * used to scan for External DIF devices. + **/ +static int +lpfc_external_dif(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + struct lpfc_nodelist *ndlp, uint8_t *cdb_ptr) +{ + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + uint32_t op = scsi_get_prot_op(cmnd); + struct lpfc_external_dif_support *dp; + struct scsi_device *sdev; + unsigned long flags; + int rc; + + switch (op) { + case SCSI_PROT_NORMAL: + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + break; + default: + return 0; + } + + switch (cdb_ptr[0]) { + case INQUIRY: + lpfc_edsm_process_inq(vport, lpfc_cmd, cdb_ptr); + break; + case READ_10: + case READ_12: + case READ_16: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_SAME: + case WRITE_SAME_16: + case WRITE_VERIFY: + case VERIFY: + case VERIFY_12: + case VERIFY_16: + + /* Is this an External DIF device */ + sdev = cmnd->device; + spin_lock_irqsave(&vport->external_dif_lock, flags); + dp = lpfc_external_dif_match(vport, sdev->lun, sdev->id); + if (dp == NULL) { + spin_unlock_irqrestore(&vport->external_dif_lock, + flags); + break; + } + switch (dp->state) { + case LPFC_EDSM_PATH_STANDBY: + /* IO is sent normally */ + break; + + case LPFC_EDSM_NEEDS_INQUIRY: + rc = lpfc_issue_scsi_inquiry(vport, ndlp, dp->lun); + if (rc == 0) + lpfc_edsm_set_state(vport, dp, + LPFC_EDSM_PATH_CHECK); + else + lpfc_edsm_set_state(vport, dp, + LPFC_EDSM_NEEDS_INQUIRY); + /* fall through */ + + case LPFC_EDSM_PATH_CHECK: + /* In process of checking path, need to retry the IO */ + spin_unlock_irqrestore(&vport->external_dif_lock, + flags); + lpfc_printf_vlog(vport, KERN_INFO, LOG_EDIF, + "0923 External DIF: In process of " + "checking path, retry IO\n"); + return 1; + + case LPFC_EDSM_PATH_READY: + /* modify CDB as needed */ + lpfc_edsm_rw_path_rdy(vport, lpfc_cmd, cdb_ptr, op); + lpfc_cmd->edifp = (void *)dp; + + default: + break; + } + spin_unlock_irqrestore(&vport->external_dif_lock, flags); + break; + default: + break; + } + return 0; +} + /** * lpfc_queuecommand - scsi_host_template queuecommand entry point * @cmnd: Pointer to scsi_cmnd data structure. @@ -4492,8 +5671,15 @@ void lpfc_poll_timeout(struct timer_list *t) * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. **/ static int +#ifdef DEF_SCSI_QCMD lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) +#else +lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) +#endif { +#ifndef DEF_SCSI_QCMD + struct Scsi_Host *shost = cmnd->device->host; +#endif struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; @@ -4501,27 +5687,43 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) struct lpfc_io_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); int err, idx; -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - int cpu; -#endif + uint64_t start; + start = ktime_get_ns(); rdata = lpfc_rport_data_from_scsi_device(cmnd->device); /* sanity check on references */ if (unlikely(!rdata) || unlikely(!rport)) goto out_fail_command; +#ifndef DEF_SCSI_QCMD + spin_unlock_irq(shost->host_lock); +#endif + err = fc_remote_port_chkready(rport); if (err) { cmnd->result = err; goto out_fail_command; } + /* + * Do not let the mid-layer retry I/O too fast. If an I/O is retried + * without waiting a bit then indicate that the device is busy. + */ + if (cmnd->retries && + time_before(jiffies, (cmnd->jiffies_at_alloc + + msecs_to_jiffies(LPFC_RETRY_PAUSE * + cmnd->retries)))) { +#ifndef DEF_SCSI_QCMD + spin_lock_irq(shost->host_lock); +#endif + return SCSI_MLQUEUE_DEVICE_BUSY; + } ndlp = rdata->pnode; if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" " op:%02x str=%s without registering for" " BlockGuard - Rejecting command\n", @@ -4535,35 +5737,47 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) * transport is still transitioning. */ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) - goto out_tgt_busy; + goto out_tgt_busy1; + + /* Check if IO qualifies for CMF */ + if (phba->cmf_active_mode != LPFC_CFG_OFF && + cmnd->sc_data_direction == DMA_FROM_DEVICE && + (scsi_sg_count(cmnd))) { + /* Latency start time saved in rx_cmd_start later in routine */ + err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); + if (err) + goto out_tgt_busy1; + } + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, - "3377 Target Queue Full, scsi Id:%d " - "Qdepth:%d Pending command:%d" - " WWNN:%02x:%02x:%02x:%02x:" - "%02x:%02x:%02x:%02x, " - " WWPN:%02x:%02x:%02x:%02x:" - "%02x:%02x:%02x:%02x", - ndlp->nlp_sid, ndlp->cmd_qdepth, - atomic_read(&ndlp->cmd_pending), - ndlp->nlp_nodename.u.wwn[0], - ndlp->nlp_nodename.u.wwn[1], - ndlp->nlp_nodename.u.wwn[2], - ndlp->nlp_nodename.u.wwn[3], - ndlp->nlp_nodename.u.wwn[4], - ndlp->nlp_nodename.u.wwn[5], - ndlp->nlp_nodename.u.wwn[6], - ndlp->nlp_nodename.u.wwn[7], - ndlp->nlp_portname.u.wwn[0], - ndlp->nlp_portname.u.wwn[1], - ndlp->nlp_portname.u.wwn[2], - ndlp->nlp_portname.u.wwn[3], - ndlp->nlp_portname.u.wwn[4], - ndlp->nlp_portname.u.wwn[5], - ndlp->nlp_portname.u.wwn[6], - ndlp->nlp_portname.u.wwn[7]); - goto out_tgt_busy; + lpfc_throttle_vlog(ndlp->vport, &ndlp->log, + LOG_FCP_ERROR, + "3377 Target Queue Full, scsi Id:%d" + " Qdepth:%d Pending command:%d" + " WWNN:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x, " + " WWPN:%02x:%02x:%02x:%02x" + ":%02x:%02x:%02x:%02x", + ndlp->nlp_sid, ndlp->cmd_qdepth, + atomic_read(&ndlp->cmd_pending), + ndlp->nlp_nodename.u.wwn[0], + ndlp->nlp_nodename.u.wwn[1], + ndlp->nlp_nodename.u.wwn[2], + ndlp->nlp_nodename.u.wwn[3], + ndlp->nlp_nodename.u.wwn[4], + ndlp->nlp_nodename.u.wwn[5], + ndlp->nlp_nodename.u.wwn[6], + ndlp->nlp_nodename.u.wwn[7], + ndlp->nlp_portname.u.wwn[0], + ndlp->nlp_portname.u.wwn[1], + ndlp->nlp_portname.u.wwn[2], + ndlp->nlp_portname.u.wwn[3], + ndlp->nlp_portname.u.wwn[4], + ndlp->nlp_portname.u.wwn[5], + ndlp->nlp_portname.u.wwn[6], + ndlp->nlp_portname.u.wwn[7]); + goto out_tgt_busy2; } } @@ -4576,6 +5790,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) "IO busied\n"); goto out_host_busy; } + lpfc_cmd->rx_cmd_start = start; /* * Store the midlayer's command structure for the completion phase @@ -4584,10 +5799,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) lpfc_cmd->pCmd = cmnd; lpfc_cmd->rdata = rdata; lpfc_cmd->ndlp = ndlp; + lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; cmnd->host_scribble = (unsigned char *)lpfc_cmd; +#ifndef DEF_SCSI_QCMD + cmnd->scsi_done = done; +#endif - if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { - if (vport->phba->cfg_enable_bg) { + err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); + if (err) + goto out_host_busy_release_buf; + + if (lpfc_scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { lpfc_printf_vlog(vport, KERN_INFO, LOG_SCSI_CMD, "9033 BLKGRD: rcvd %s cmd:x%x " @@ -4600,7 +5823,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { - if (vport->phba->cfg_enable_bg) { + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { lpfc_printf_vlog(vport, KERN_INFO, LOG_SCSI_CMD, "9038 BLKGRD: rcvd PROT_NORMAL cmd: " @@ -4613,34 +5836,21 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } - if (err == 2) { - cmnd->result = DID_ERROR << 16; - goto out_fail_command_release_buf; - } else if (err) { + if (unlikely(err)) { + if (err == 2) { + cmnd->result = ScsiResult(DID_ERROR, 0); + goto out_fail_command_release_buf; + } goto out_host_busy_free_buf; } - lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); - -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { - cpu = raw_smp_processor_id(); - if (cpu < LPFC_CHECK_CPU_CNT) { - struct lpfc_sli4_hdw_queue *hdwq = - &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no]; - hdwq->cpucheck_xmt_io[cpu]++; - } - } -#endif - err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, - &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); - if (err) { + if (cmnd->cmnd[0] == 0 || cmnd->cmnd[0] == 0x12 || + cmnd->cmnd[0] == 0xa0 || cmnd->cmnd[0] == 0x11) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "3376 FCP could not issue IOCB err %x" - "FCP cmd x%x <%d/%llu> " + "3322 FCP cmd x%x <%d/%lld> " "sid: x%x did: x%x oxid: x%x " - "Data: x%x x%x x%x x%x\n", - err, cmnd->cmnd[0], + "Data: x%x x%x x%x x%x %x\n", + cmnd->cmnd[0], cmnd->device ? cmnd->device->id : 0xffff, cmnd->device ? cmnd->device->lun : (u64) -1, vport->fc_myDID, ndlp->nlp_DID, @@ -4650,10 +5860,46 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) lpfc_cmd->cur_iocbq.iocb.ulpIoTag, lpfc_cmd->cur_iocbq.iocb.ulpTimeout, (uint32_t) - (cmnd->request->timeout / 1000)); + (cmnd->request->timeout / 1000), + lpfc_cmd->cur_iocbq.iocb.ulpCommand); + } + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) + this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); +#endif + err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, + &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + lpfc_cmd->ts_cmd_start = start; + lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; + lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); + } else { + lpfc_cmd->ts_cmd_start = 0; + } +#endif + if (err) { + lpfc_throttle_vlog(vport, &vport->log, LOG_FCP, + "3376 FCP could not issue IOCB err %x" + "FCP cmd x%x <%d/%llu> " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x x%x x%x\n", + err, cmnd->cmnd[0], + cmnd->device ? cmnd->device->id : 0xffff, + cmnd->device ? cmnd->device->lun : (u64) -1, + vport->fc_myDID, ndlp->nlp_DID, + phba->sli_rev == LPFC_SLI_REV4 ? + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, + lpfc_cmd->cur_iocbq.iocb.ulpContext, + lpfc_cmd->cur_iocbq.iocb.ulpIoTag, + lpfc_cmd->cur_iocbq.iocb.ulpTimeout, + (uint32_t) + (cmnd->request->timeout / 1000)); goto out_host_busy_free_buf; } + if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); @@ -4665,6 +5911,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (phba->cfg_xri_rebalancing) lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); +#ifndef DEF_SCSI_QCMD + spin_lock_irq(shost->host_lock); +#endif return 0; out_host_busy_free_buf: @@ -4682,21 +5931,320 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; } } + out_host_busy_release_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); +#ifndef DEF_SCSI_QCMD + spin_lock_irq(shost->host_lock); +#endif return SCSI_MLQUEUE_HOST_BUSY; - out_tgt_busy: + out_tgt_busy2: + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); + out_tgt_busy1: +#ifndef DEF_SCSI_QCMD + spin_lock_irq(shost->host_lock); +#endif return SCSI_MLQUEUE_TARGET_BUSY; out_fail_command_release_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); + lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), + shost); out_fail_command: +#ifdef DEF_SCSI_QCMD cmnd->scsi_done(cmnd); +#else + spin_lock_irq(shost->host_lock); + done(cmnd); +#endif return 0; } +/* lpfc_mode_sense_cmpl. + * + * This is the completion routine for a driver-issued mode sense + * used to determine if an FCP target supports first_burst. This + * routine could be used for other mode sense data values. + */ +void +lpfc_mode_sense_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *p_io_in, + struct lpfc_iocbq *p_io_out) +{ + struct lpfc_io_buf *lpfc_cmd = p_io_in->context1; + u32 resp_info = lpfc_cmd->fcp_rsp->rspStatus2; + struct sli4_sge *sgl; + struct lpfc_nodelist *ndlp = lpfc_cmd->ndlp; + u32 status = p_io_out->iocb.ulpStatus; + u32 result = p_io_out->iocb.un.ulpWord[4] & IOERR_PARAM_MASK; + u32 fb = 0; + int ret_val; + struct lpfc_first_burst_page *p_data; + + /* Make sure the driver's remoteport state is still valid. If the + * reference put results in an invalid remoteport, just exit. + */ + ret_val = lpfc_nlp_put(ndlp); + if (ret_val) { + ret_val = -ENOMEM; + goto out; + } + + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ndlp->nlp_state != NLP_STE_MAPPED_NODE) { + ret_val = -ENXIO; + goto out; + } + + /* Only accept a successful IO or an IO with an underflow status + * because any other FCP RSP error indicates the IO had some other + * error. + */ + if (status && + !(status == IOSTAT_FCP_RSP_ERROR && resp_info & RESID_UNDER)) { + ret_val = -EIO; + goto out; + } + + /* No errors. clear the return value. */ + ret_val = 0; + + /* Skip over the SGEs and headers to get the response data region. */ + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl += 4; + p_data = (struct lpfc_first_burst_page *)(((u32 *)sgl) + 3); + + /* Validate the correct page code response and the number of bytes + * available before reading the first burst size. Initialize + * the first_burst in case the response doesn't have a value. + */ + ndlp->first_burst = 0; + if (p_data->page_code == 0x2 && p_data->page_len == 0xe) { + fb = be16_to_cpu(p_data->first_burst); + ndlp->first_burst = min((u32)(fb * 512), (u32)LPFC_FCP_FB_MAX); + } else { + lpfc_printf_vlog(phba->pport, KERN_INFO, + LOG_FCP | LOG_FCP_ERROR, + "6332 Clear first burst value " + "Code x%x Len x%x\n", p_data->page_code, + p_data->page_len); + } + + lpfc_printf_vlog(phba->pport, KERN_INFO, + LOG_FCP | LOG_FCP_ERROR, + "6336 FB Rsp: DID x%x Data: x%x x%x x%x x%x x%x x%x " + "x%x\n", ndlp->nlp_DID, fb, ndlp->first_burst, + status, result, resp_info, + lpfc_cmd->cur_iocbq.iocb_flag, ret_val); + + /* Pick up SLI4 exhange busy status from HBA */ + if (p_io_out->iocb_flag & LPFC_EXCHANGE_BUSY) + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; + + out: + /* Push a driver message only if the IO completion is in error. */ + if (ret_val) + lpfc_printf_vlog(phba->pport, KERN_INFO, + LOG_FCP_UNDER | LOG_FCP_ERROR, + "6331 FB err %d <x%x/x%x> resp_info x%x\n", + ret_val, status, result, resp_info); + + p_io_in->context2 = NULL; + lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_IO_DRVR_INIT; + lpfc_release_scsi_buf(phba, lpfc_cmd); +} + +/** + * lpfc_init_scsi_cmd - Initiatize SCSI command-specific fields. + * @vport: The virtual port for which this call being executed. + * @lpfc_cmd: The lpfc_io_buf containing this command. + * @dev_id - device identifier for this command + * @cmd_id: SCSI command used to format the driver io and CDB + * @data_len: Number of bytes available for data. + * + * This routine initializes an lpfc_io_buf for the specified @cmd_id to + * the specified @dev_id for @data_len bytes. + * + * Returns: + * 0 - on success + * A negative error value in the form -Exxxx otherwise. + * + **/ +static int +lpfc_init_scsi_cmd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, + struct lpfc_scsi_io_req *cmd_req, u32 data_len) +{ + int ret = 0; + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + + /* Set command-independent values. */ + lpfc_cmd->fcp_cmnd->fcpCdb[0] = cmd_req->cmd_id; + lpfc_cmd->fcp_cmnd->fcpCdb[4] = data_len; + lpfc_cmd->cur_iocbq.iocb_cmpl = cmd_req->cmd_cmpl; + lpfc_cmd->fcp_cmnd->fcpDl = cpu_to_be32(data_len); + int_to_scsilun(cmd_req->dev_id, &lpfc_cmd->fcp_cmnd->fcp_lun); + + /* SCSI Command specific initialization. */ + switch (cmd_req->cmd_id) { + case MODE_SENSE: + /* Send the DISCONNECT-RECONNECT page code */ + lpfc_cmd->fcp_cmnd->fcpCdb[2] = 0x2; + lpfc_cmd->fcp_cmnd->fcpCntl3 = READ_DATA; + lpfc_cmd->cur_iocbq.context2 = NULL; + iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; + iocb_cmd->ulpPU = PARM_READ_CHECK; + break; + default: + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6335 Unsupported CMD x%x\n", cmd_req->cmd_id); + ret = -EINVAL; + break; + } + return ret; +} + +/** + * lpfc_issue_scsi_cmd - Send a driver initiated SCSI cmd + * @vport: The virtual port for which this call being executed. + * @ndlp - Pointer to lpfc_nodelist instance. + * @scsi_io_req - pointer to a job structure with the command, device ids, + * and the associated completion routine. + * + * The driver calls this function to dynamically determine if an FCP + * target suppports a particular feature. The caller needs to specify + * what command is needed to what device. + * + * Returns: + * 0 - on success + * A negative error value in the form -Exxxx otherwise. + * + **/ +int +lpfc_issue_scsi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_scsi_io_req *cmd_req) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_io_buf *lpfc_cmd = NULL; + struct sli4_sge *sgl; + dma_addr_t pdma_phys_data; + IOCB_t *iocb_cmd; + int err = 0; + + /* Requirements must be met. */ + if (phba->sli_rev != LPFC_SLI_REV4 || + phba->hba_flag == HBA_FCOE_MODE || + !phba->pport->cfg_first_burst_size) + return -EPERM; + + /* Because the driver potentially sends an FCP and NVME PRLI, the + * caller is required to ensure an FCP PRLI has been successfully + * completed. + */ + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return -EINVAL; + + lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, NULL); + if (!lpfc_cmd) + return -EIO; + + /* + * Every lpfc_io_buf is partially setup for the IO - an FCP Cmd SGE + * and an FCP RSP SGE. This routine configures two more - one for + * this scsi command and an empty SGE. A 256 byte data area follows + * for the command payload data. + */ + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl++; + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl++; + + /* SGE for the SCSI command */ + pdma_phys_data = lpfc_cmd->dma_handle + (4 * sizeof(struct sli4_sge)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_data)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_data)); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + bf_set(lpfc_sli4_sge_offset, sgl, 0); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(LPFC_DATA_SIZE); + sgl++; + + /* Empty SGE. Clear to prevent stale data errors. */ + sgl->addr_hi = 0; + sgl->addr_lo = 0; + sgl->word2 = 0; + sgl->sge_len = 0; + + /* Finish prepping the command, response and scsi command. */ + memset(lpfc_cmd->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + memset((void *)lpfc_cmd->fcp_rsp, 0, sizeof(struct fcp_rsp)); + err = lpfc_init_scsi_cmd(vport, lpfc_cmd, cmd_req, LPFC_DATA_SIZE); + if (err) { + err = -EPERM; + goto out_err; + } + + /* This is an FCP IO the driver tracks so don't let it stay + * outstanding forever - provide a generous 16 sec TMO. + */ + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_FCP; + lpfc_cmd->cur_iocbq.context1 = lpfc_cmd; + lpfc_cmd->cur_iocbq.vport = vport; + lpfc_cmd->timeout = LPFC_DRVR_TIMEOUT; + lpfc_cmd->pCmd = NULL; + lpfc_cmd->seg_cnt = 1; + + /* Get an ndlp reference. Ignore rdata as this IO is driver owned. */ + lpfc_cmd->ndlp = lpfc_nlp_get(ndlp); + if (!lpfc_cmd->ndlp) { + err = -EINVAL; + goto out_err; + } + + iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + iocb_cmd->un.fcpi.fcpi_parm = LPFC_DATA_SIZE; + iocb_cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) + iocb_cmd->ulpFCP2Rcvy = 1; + iocb_cmd->ulpClass = (ndlp->nlp_fcp_info & 0x0f); + iocb_cmd->ulpTimeout = lpfc_cmd->timeout; + + err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, &lpfc_cmd->cur_iocbq, + SLI_IOCB_RET_IOCB); + if (err) { + err = -EIO; + lpfc_nlp_put(ndlp); + goto out_err; + } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP | LOG_FCP_ERROR, + "6333 Issue cmd x%x to 0x%06x status %d " + "flag x%x iotag x%x xri x%x\n", + cmd_req->cmd_id, ndlp->nlp_DID, err, + lpfc_cmd->cur_iocbq.iocb_flag, + lpfc_cmd->cur_iocbq.iotag, + lpfc_cmd->cur_iocbq.sli4_xritag); + + if (phba->cfg_xri_rebalancing) + lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); + return 0; + + out_err: + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP | LOG_FCP_ERROR, + "6337 Could not issue cmd x%x err %d\n", + cmd_req->cmd_id, err); + lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_IO_DRVR_INIT; + lpfc_cmd->rdata = NULL; + lpfc_release_scsi_buf(phba, lpfc_cmd); + return err; +} /** * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point @@ -4847,6 +6395,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb, 0); } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + if (ret_val == IOCB_ERROR) { /* Indicate the IO is not being aborted by the driver. */ iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; @@ -4867,7 +6418,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); wait_for_cmpl: - /* Wait for abort to complete */ + /* + * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait + * for abort to complete. + */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); @@ -4876,7 +6430,7 @@ wait_for_cmpl: if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0748 abort handler timed out waiting " "for aborting I/O (xri:x%x) to complete: " "ret %#x, ID %d, LUN %llu\n", @@ -4958,8 +6512,8 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) rsp_info_code = fcprsp->rspInfo3; - lpfc_printf_vlog(vport, KERN_INFO, - LOG_FCP, + lpfc_optioned_vlog(vport, &vport->log, + KERN_INFO, LOG_FCP, "0706 fcp_rsp valid 0x%x," " rsp len=%d code 0x%x\n", rsp_info, @@ -4973,8 +6527,10 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) ((rsp_len == 8) || (rsp_len == 4))) { switch (rsp_info_code) { case RSP_NO_FAILURE: - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "0715 Task Mgmt No Failure\n"); + lpfc_optioned_vlog(vport, &vport->log, + KERN_INFO, LOG_FCP, + "0715 Task Mgmt No " + "Failure\n"); ret = SUCCESS; break; case RSP_TM_NOT_SUPPORTED: /* TM rejected */ @@ -5016,7 +6572,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) **/ static int lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, - unsigned int tgt_id, uint64_t lun_id, + unsigned tgt_id, uint64_t lun_id, uint8_t task_mgmt_cmd) { struct lpfc_hba *phba = vport->phba; @@ -5029,11 +6585,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, int status; rdata = lpfc_rport_data_from_scsi_device(cmnd->device); - if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) - return FAILED; pnode = rdata->pnode; + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + return FAILED; - lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL); + lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL); if (lpfc_cmd == NULL) return FAILED; lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; @@ -5068,8 +6624,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, if ((status != IOCB_SUCCESS) || (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { if (status != IOCB_SUCCESS || - iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0727 TMF %s to TGT %d LUN %llu " "failed (%d, %d) iocb_flag x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), @@ -5085,7 +6641,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); else ret = FAILED; - } else if (status == IOCB_TIMEDOUT) { + } else if ((status == IOCB_TIMEDOUT) || + (status == IOCB_ABORTED)) { ret = TIMEOUT_ERROR; } else { ret = FAILED; @@ -5095,7 +6652,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, lpfc_sli_release_iocbq(phba, iocbqrsp); - if (ret != TIMEOUT_ERROR) + if (status != IOCB_TIMEDOUT) lpfc_release_scsi_buf(phba, lpfc_cmd); return ret; @@ -5184,7 +6741,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); } if (cnt) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0724 I/O flush failure for context %s : cnt x%x\n", ((context == LPFC_CTX_LUN) ? "LUN" : ((context == LPFC_CTX_TGT) ? "TGT" : @@ -5210,17 +6767,18 @@ static int lpfc_device_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; uint64_t lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status; + int status = 0; + u32 logit = LOG_FCP; rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata || !rdata->pnode) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0798 Device Reset rdata failure: rdata x%px\n", rdata); return FAILED; @@ -5232,7 +6790,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_chk_tgt_mapped(vport, cmnd); if (status == FAILED) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0721 Device Reset rport failure: rdata x%px\n", rdata); return FAILED; } @@ -5248,8 +6806,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, FCP_LUN_RESET); + if (status != SUCCESS) + logit = LOG_TRACE_EVENT; - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, logit, "0713 SCSI layer issued Device Reset (%d, %llu) " "return x%x\n", tgt_id, lun_id, status); @@ -5287,11 +6847,15 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) unsigned tgt_id = cmnd->device->id; uint64_t lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status; + int status = 0; + u32 logit = LOG_FCP; + u32 dev_loss_tmo = vport->cfg_devloss_tmo; + unsigned long flags; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata || !rdata->pnode) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0799 Target Reset rdata failure: rdata x%px\n", rdata); return FAILED; @@ -5303,13 +6867,13 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_chk_tgt_mapped(vport, cmnd); if (status == FAILED) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, flags); pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, flags); } lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_TGT); @@ -5327,8 +6891,47 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, FCP_TARGET_RESET); + if (status != SUCCESS) { + logit = LOG_TRACE_EVENT; - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + /* Issue LOGO, if no LOGO is outstanding */ + spin_lock_irqsave(shost->host_lock, flags); + if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) && + !pnode->logo_waitq) { + pnode->logo_waitq = &waitq; + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + pnode->nlp_flag |= NLP_ISSUE_LOGO; + pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; + spin_unlock_irqrestore(shost->host_lock, flags); + lpfc_unreg_rpi(vport, pnode); + wait_event_timeout(waitq, + (!(pnode->upcall_flags & + NLP_WAIT_FOR_LOGO)), + msecs_to_jiffies(dev_loss_tmo * + 1000)); + + if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { + lpfc_printf_vlog(vport, KERN_ERR, logit, + "0725 SCSI layer TGTRST failed" + " & LOGO TMO (%d, %llu) " + "return x%x\n", + tgt_id, lun_id, status); + spin_lock_irqsave(shost->host_lock, flags); + pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; + } else { + spin_lock_irqsave(shost->host_lock, flags); + } + pnode->logo_waitq = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); + status = SUCCESS; + } else { + + spin_unlock_irqrestore(shost->host_lock, flags); + status = FAILED; + } + } + + lpfc_printf_vlog(vport, KERN_ERR, logit, "0723 SCSI layer issued Target Reset (%d, %llu) " "return x%x\n", tgt_id, lun_id, status); @@ -5363,7 +6966,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_nodelist *ndlp = NULL; struct lpfc_scsi_event_header scsi_event; int match; - int ret = SUCCESS, status, i; + int ret = SUCCESS, status = SUCCESS, i; + u32 logit = LOG_FCP; scsi_event.event_type = FC_REG_SCSI_EVENT; scsi_event.subcategory = LPFC_EVENT_BUSRESET; @@ -5409,7 +7013,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) i, 0, FCP_TARGET_RESET); if (status != SUCCESS) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0700 Bus Reset on target %d failed\n", i); ret = FAILED; @@ -5425,8 +7029,10 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); if (status != SUCCESS) ret = FAILED; + if (ret == FAILED) + logit = LOG_TRACE_EVENT; - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, logit, "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); return ret; } @@ -5472,7 +7078,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) return ret; error: - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3323 Failed host reset\n"); lpfc_unblock_mgmt_io(phba); return FAILED; @@ -5583,7 +7189,7 @@ lpfc_slave_alloc(struct scsi_device *sdev) } num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0708 Allocation request of %d " "command buffers did not succeed. " "Allocated %d buffers.\n", @@ -5600,6 +7206,7 @@ lpfc_slave_alloc(struct scsi_device *sdev) * * This routine configures following items * - Tag command queuing support for @sdev if supported. + * - Dev loss time out value of fc_rport. * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. * * Return codes: @@ -5610,9 +7217,17 @@ lpfc_slave_configure(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; + struct fc_rport *rport = starget_to_rport(sdev->sdev_target); + + /* + * Initialize the fc transport attributes for the target + * containing this scsi device. Also note that the driver's + * target pointer is stored in the starget_data for the + * driver's sysfs entry point functions. + */ + rport->dev_loss_tmo = vport->cfg_devloss_tmo; scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); - if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); @@ -6014,44 +7629,21 @@ struct scsi_host_template lpfc_template_nvme = { .this_id = -1, .sg_tablesize = 1, .cmd_per_lun = 1, +#if (KERNEL_MAJOR < 5) + .use_clustering = ENABLE_CLUSTERING, +#endif .shost_attrs = lpfc_hba_attrs, .max_sectors = 0xFFFF, .vendor_id = LPFC_NL_VENDOR_ID, .track_queue_depth = 0, }; -struct scsi_host_template lpfc_template_no_hr = { - .module = THIS_MODULE, - .name = LPFC_DRIVER_NAME, - .proc_name = LPFC_DRIVER_NAME, - .info = lpfc_info, - .queuecommand = lpfc_queuecommand, - .eh_timed_out = fc_eh_timed_out, - .eh_abort_handler = lpfc_abort_handler, - .eh_device_reset_handler = lpfc_device_reset_handler, - .eh_target_reset_handler = lpfc_target_reset_handler, - .eh_bus_reset_handler = lpfc_bus_reset_handler, - .slave_alloc = lpfc_slave_alloc, - .slave_configure = lpfc_slave_configure, - .slave_destroy = lpfc_slave_destroy, - .scan_finished = lpfc_scan_finished, - .this_id = -1, - .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, - .cmd_per_lun = LPFC_CMD_PER_LUN, - .shost_attrs = lpfc_hba_attrs, - .max_sectors = 0xFFFFFFFF, - .vendor_id = LPFC_NL_VENDOR_ID, - .change_queue_depth = scsi_change_queue_depth, - .track_queue_depth = 1, -}; - struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, - .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, @@ -6064,32 +7656,12 @@ struct scsi_host_template lpfc_template = { .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, +#if (KERNEL_MAJOR < 5) + .use_clustering = ENABLE_CLUSTERING, +#endif .shost_attrs = lpfc_hba_attrs, - .max_sectors = 0xFFFF, + .max_sectors = 0xFFFFFFFF, .vendor_id = LPFC_NL_VENDOR_ID, .change_queue_depth = scsi_change_queue_depth, .track_queue_depth = 1, }; - -struct scsi_host_template lpfc_vport_template = { - .module = THIS_MODULE, - .name = LPFC_DRIVER_NAME, - .proc_name = LPFC_DRIVER_NAME, - .info = lpfc_info, - .queuecommand = lpfc_queuecommand, - .eh_timed_out = fc_eh_timed_out, - .eh_abort_handler = lpfc_abort_handler, - .eh_device_reset_handler = lpfc_device_reset_handler, - .eh_target_reset_handler = lpfc_target_reset_handler, - .slave_alloc = lpfc_slave_alloc, - .slave_configure = lpfc_slave_configure, - .slave_destroy = lpfc_slave_destroy, - .scan_finished = lpfc_scan_finished, - .this_id = -1, - .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, - .cmd_per_lun = LPFC_CMD_PER_LUN, - .shost_attrs = lpfc_vport_attrs, - .max_sectors = 0xFFFF, - .change_queue_depth = scsi_change_queue_depth, - .track_queue_depth = 1, -}; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index f76667b7da7b..3bdbfcad8bf3 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -2,7 +2,7 @@ * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -24,6 +24,9 @@ struct lpfc_hba; #define LPFC_FCP_CDB_LEN 16 +#define CTRL_ID 0 +#define LPFC_DATA_SIZE 255 +#define LPFC_FCP_FB_MAX 65536 #define list_remove_head(list, entry, type, member) \ do { \ @@ -130,6 +133,28 @@ struct lpfc_scsicmd_bkt { uint32_t cmd_count; }; +struct lpfc_scsi_io_req { + u32 dev_id; + u32 cmd_id; + void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *p_io_in, + struct lpfc_iocbq *p_io_out); +}; + +struct lpfc_first_burst_page { + u8 page_code; + u8 page_len; + u8 buf_full_r; + u8 buf_mt_r; + __be16 bus_inact_lmt; + __be16 disc_tmo; + __be16 conn_tmo; + __be16 max_burst; + u8 byte12; + u8 rsvd; + __be16 first_burst; +}; + +#define LPFC_RETRY_PAUSE 300 #define LPFC_SCSI_DMA_EXT_SIZE 264 #define LPFC_BPL_SIZE 1024 #define MDAC_DIRECT_CMD 0x22 @@ -138,12 +163,65 @@ struct lpfc_scsicmd_bkt { #define NO_MORE_OAS_LUN -1 #define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN +#ifndef WRITE_SAME_16 +#define WRITE_SAME_16 0x93 +#endif + +#ifndef VERIFY_12 +#define VERIFY_12 0xaf +#endif + +/* ASC/ASCQ codes used in driver */ +#define LPFC_ASC_DIF_ERR 0x10 +#define LPFC_ASCQ_GUARD_CHECK 0x1 +#define LPFC_ASCQ_REFTAG_CHECK 0x3 +#define LPFC_ASC_TARGET_CHANGED 0x3f +#define LPFC_ASCQ_INQUIRY_DATA 0x3 +#define LPFC_ASC_DATA_PHASE_ERR 0x4b +#define LPFC_ASCQ_EDIF_MODE 0x82 + +/* This macro is available from RHEL 7.1 onwards. */ +#ifndef FC_PORTSPEED_32GBIT +#define FC_PORTSPEED_32GBIT 0x40 +#endif + +#ifndef FC_PORTSPEED_64GBIT +#define FC_PORTSPEED_64GBIT 0x1000 +#endif + +#ifndef FC_PORTSPEED_25GBIT +#define FC_PORTSPEED_25GBIT 0x800 +#endif + +#ifndef FC_PORTSPEED_40GBIT +#define FC_PORTSPEED_40GBIT 0x100 +#endif + +#ifndef FC_PORTSPEED_100GBIT +#define FC_PORTSPEED_100GBIT 0x400 +#endif + #ifndef FC_PORTSPEED_128GBIT #define FC_PORTSPEED_128GBIT 0x2000 #endif #define TXRDY_PAYLOAD_LEN 12 +/* Safe command codes for scanning, excluding group code */ +#define LPFC_INQUIRY_CMD_CODE (INQUIRY & 0x1f) +#define LPFC_LOG_SELECT_CMD_CODE (LOG_SELECT & 0x1f) +#define LPFC_LOG_SENSE_CMD_CODE (LOG_SENSE & 0x1f) +#define LPFC_MODE_SELECT_CMD_CODE (MODE_SELECT & 0x1f) +#define LPFC_MODE_SENSE_CMD_CODE (MODE_SENSE & 0x1f) +#define LPFC_REPORT_LUNS_CMD_CODE (REPORT_LUNS & 0x1f) +#define LPFC_SEND_DIAGNOSTIC_CMD_CODE (SEND_DIAGNOSTIC & 0x1f) +#define LPFC_MAINTENANCE_IN_CMD_CODE (MAINTENANCE_IN & 0x1f) +#define LPFC_MAINTENANCE_OUT_CMD_CODE (MAINTENANCE_OUT & 0x1f) +#define LPFC_PERSISTENT_RESERVE_IN_CMD_CODE (PERSISTENT_RESERVE_IN & 0x1f) +#define LPFC_PERSISTENT_RESERVE_OUT_CMD_CODE (PERSISTENT_RESERVE_OUT & 0x1f) +#define LPFC_READ_BUFFER_CMD_CODE (READ_BUFFER & 0x1f) +#define LPFC_WRITE_BUFFER_CMD_CODE (WRITE_BUFFER & 0x1f) + /* For sysfs/debugfs tmp string max len */ #define LPFC_MAX_SCSI_INFO_TMP_LEN 79 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e2cec1f6e659..c0f9d4fe39e4 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,8 +1,8 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * - * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * @@ -26,7 +26,6 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> -#include <linux/lockdep.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -35,11 +34,12 @@ #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include <linux/aer.h> -#ifdef CONFIG_X86 -#include <asm/set_memory.h> -#endif - +#include <linux/crash_dump.h> +#if defined(BUILD_NVME) +#if (IS_ENABLED(CONFIG_NVME_FC)) #include <linux/nvme-fc-driver.h> +#endif +#endif #include "lpfc_hw4.h" #include "lpfc_hw.h" @@ -87,6 +87,13 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); +static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); +static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, + struct lpfc_cqe *cqe); + +extern int lpfc_fabric_cgn_frequency; +extern int lpfc_acqe_cgn_frequency; static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -138,12 +145,15 @@ static int lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) { union lpfc_wqe *temp_wqe; + struct lpfc_hba *phba = q->phba; struct lpfc_register doorbell; uint32_t host_index; uint32_t idx; +#ifndef BUILD_BRCMFCOE uint32_t i = 0; uint8_t *tmp; u32 if_type; +#endif /* sanity check on queue memory */ if (unlikely(!q)) @@ -153,6 +163,10 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) /* If the host has not yet processed the next entry then we are done */ idx = ((q->host_index + 1) % q->entry_count); if (idx == q->hba_index) { + lpfc_throttle_log(q->phba, &q->phba->pport->log, LOG_SLI, + "9998 WQ failure. WQ Full. " + "q_id x%x, host x%x, hba x%x\n", + q->queue_id, idx, q->hba_index); q->WQ_overflow++; return -EBUSY; } @@ -162,22 +176,22 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); else bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); - if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) + if (phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); - if (q->dpp_enable && q->phba->cfg_enable_dpp) { +#ifndef BUILD_BRCMFCOE + if (q->dpp_enable && phba->cfg_enable_dpp) { /* write to DPP aperture taking advatage of Combined Writes */ - tmp = (uint8_t *)temp_wqe; -#ifdef __raw_writeq + tmp = (uint8_t *)wqe; +#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) - __raw_writeq(*((uint64_t *)(tmp + i)), - q->dpp_regaddr + i); + writeq(*((uint64_t *)(tmp + i)), q->dpp_regaddr + i); #else for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) - __raw_writel(*((uint32_t *)(tmp + i)), - q->dpp_regaddr + i); + writel(*((uint32_t *)(tmp + i)), q->dpp_regaddr + i); #endif } +#endif /* ensure WQE bcopy and DPP flushed before doorbell write */ wmb(); @@ -189,7 +203,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) /* Ring Doorbell */ doorbell.word0 = 0; if (q->db_format == LPFC_DB_LIST_FORMAT) { - if (q->dpp_enable && q->phba->cfg_enable_dpp) { +#ifndef BUILD_BRCMFCOE + if (q->dpp_enable && phba->cfg_enable_dpp) { bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, @@ -202,11 +217,16 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) /* Leave bits <23:16> clear for if_type 6 dpp */ if_type = bf_get(lpfc_sli_intf_if_type, - &q->phba->sli4_hba.sli_intf); + &phba->sli4_hba.sli_intf); if (if_type != LPFC_SLI_INTF_IF_TYPE_6) bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); } +#else + bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); + bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); + bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); +#endif } else if (q->db_format == LPFC_DB_RING_FORMAT) { bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); @@ -226,25 +246,16 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) * This routine will update the HBA index of a queue to reflect consumption of * Work Queue Entries by the HBA. When the HBA indicates that it has consumed * an entry the host calls this function to update the queue's internal - * pointers. This routine returns the number of entries that were consumed by - * the HBA. + * pointers. **/ -static uint32_t +static void lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) { - uint32_t released = 0; - /* sanity check on queue memory */ if (unlikely(!q)) - return 0; + return; - if (q->hba_index == index) - return 0; - do { - q->hba_index = ((q->hba_index + 1) % q->entry_count); - released++; - } while (q->hba_index != index); - return released; + q->hba_index = index; } /** @@ -415,7 +426,7 @@ lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); /* PCI read to flush PCI pipeline on re-arming for INTx mode */ - if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) + if (q->phba->intr_type == INTx && arm == LPFC_QUEUE_REARM) readl(q->phba->sli4_hba.EQDBregaddr); } @@ -442,13 +453,14 @@ lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, /* ring doorbell for number popped */ doorbell.word0 = 0; - if (arm) + if (arm) { bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); + } bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); /* PCI read to flush PCI pipeline on re-arming for INTx mode */ - if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) + if (q->phba->intr_type == INTx && arm == LPFC_QUEUE_REARM) readl(q->phba->sli4_hba.EQDBregaddr); } @@ -467,25 +479,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, } static void -lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) +lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) { - struct lpfc_eqe *eqe; - uint32_t count = 0; + struct lpfc_eqe *eqe = NULL; + u32 eq_count = 0, cq_count = 0; + struct lpfc_cqe *cqe = NULL; + struct lpfc_queue *cq = NULL, *childq = NULL; + int cqid = 0; /* walk all the EQ entries and drop on the floor */ eqe = lpfc_sli4_eq_get(eq); while (eqe) { + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + cq = NULL; + + list_for_each_entry(childq, &eq->child_list, list) { + if (childq->queue_id == cqid) { + cq = childq; + break; + } + } + /* If CQ is valid, iterate through it and drop all the CQEs */ + if (cq) { + cqe = lpfc_sli4_cq_get(cq); + while (cqe) { + __lpfc_sli4_consume_cqe(phba, cq, cqe); + cq_count++; + cqe = lpfc_sli4_cq_get(cq); + } + /* Clear and re-arm the CQ */ + phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, + LPFC_QUEUE_REARM); + cq_count = 0; + } __lpfc_sli4_consume_eqe(phba, eq, eqe); - count++; + eq_count++; eqe = lpfc_sli4_eq_get(eq); } /* Clear and re-arm the EQ */ - phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); + phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); } static int -lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) +lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, + uint8_t rearm) { struct lpfc_eqe *eqe; int count = 0, consumed = 0; @@ -516,11 +555,11 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) if (count > eq->EQ_max_eqe) eq->EQ_max_eqe = count; - eq->queue_claimed = 0; + xchg(&eq->queue_claimed, 0); rearm_and_exit: - /* Always clear and re-arm the EQ */ - phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); + /* Always clear the EQ. */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); return count; } @@ -776,8 +815,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; struct lpfc_iocbq * iocbq = NULL; - lockdep_assert_held(&phba->hbalock); - list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); if (iocbq) phba->iocb_cnt++; @@ -991,9 +1028,10 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * @ndlp: Targets nodelist pointer for this exchange. * @xritag the xri in the bitmap to test. * - * This function returns: - * 0 = rrq not active for this xri - * 1 = rrq is valid for this xri. + * This function relies on the atomic test_bit and does + * not require a lock. + * returns 0 = rrq not active for this xri + * 1 = rrq is valid for this xri. **/ int lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, @@ -1060,7 +1098,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, goto out; spin_unlock_irqrestore(&phba->hbalock, iflags); - rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); + rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC); if (!rrq) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" @@ -1101,11 +1139,10 @@ out: * @phba: Pointer to HBA context object. * @piocb: Pointer to the iocbq. * - * The driver calls this function with either the nvme ls ring lock - * or the fc els ring lock held depending on the iocb usage. This function - * gets a new driver sglq object from the sglq list. If the list is not empty - * then it is successful, it returns pointer to the newly allocated sglq - * object else it returns NULL. + * This function is called with the ELS ring lock held. This function + * gets a new driver sglq object from the sglq list. If the + * list is not empty then it is successful, it returns pointer to the newly + * allocated sglq object else it returns NULL. **/ static struct lpfc_sglq * __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) @@ -1115,15 +1152,8 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) struct lpfc_sglq *start_sglq = NULL; struct lpfc_io_buf *lpfc_cmd; struct lpfc_nodelist *ndlp; - struct lpfc_sli_ring *pring = NULL; int found = 0; - if (piocbq->iocb_flag & LPFC_IO_NVME_LS) - pring = phba->sli4_hba.nvmels_wq->pring; - else - pring = lpfc_phba_elsring(phba); - - lockdep_assert_held(&pring->ring_lock); if (piocbq->iocb_flag & LPFC_IO_FCP) { lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; @@ -1247,8 +1277,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) unsigned long iflag = 0; struct lpfc_sli_ring *pring; - lockdep_assert_held(&phba->hbalock); - if (iocbq->sli4_xritag == NO_XRI) sglq = NULL; else @@ -1300,7 +1328,7 @@ out: memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; - iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | + iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | LPFC_IO_NVME_LS); list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } @@ -1321,8 +1349,6 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); - lockdep_assert_held(&phba->hbalock); - /* * Clean all volatile data fields, preserve iotag and node struct. */ @@ -1344,8 +1370,6 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) static void __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { - lockdep_assert_held(&phba->hbalock); - phba->__lpfc_sli_release_iocbq(phba, iocbq); phba->iocb_cnt--; } @@ -1391,15 +1415,19 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, while (!list_empty(iocblist)) { list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); - if (!piocb->iocb_cmpl) { - if (piocb->iocb_flag & LPFC_IO_NVME) - lpfc_nvme_cancel_iocb(phba, piocb); - else + if (piocb->wqe_cmpl) { + if (piocb->iocb_flag & LPFC_IO_NVME) { + lpfc_nvme_cancel_iocb(phba, piocb, + ulpstatus, ulpWord4); + } else { lpfc_sli_release_iocbq(phba, piocb); - } else { + } + } else if (piocb->iocb_cmpl) { piocb->iocb.ulpStatus = ulpstatus; piocb->iocb.un.ulpWord[4] = ulpWord4; (piocb->iocb_cmpl) (phba, piocb, piocb); + } else { + lpfc_sli_release_iocbq(phba, piocb); } } return; @@ -1474,6 +1502,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case DSSCMD_IWRITE64_CX: case DSSCMD_IREAD64_CR: case DSSCMD_IREAD64_CX: + case CMD_SEND_FRAME: type = LPFC_SOL_IOCB; break; case CMD_ABORT_XRI_CN: @@ -1548,7 +1577,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba) lpfc_config_ring(phba, i, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0446 Adapter failed to init (%d), " "mbxCmd x%x CFG_RING, mbxStatus x%x, " "ring %d\n", @@ -1569,23 +1598,19 @@ lpfc_sli_ring_map(struct lpfc_hba *phba) * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to the driver iocb object. * - * The driver calls this function with the hbalock held for SLI3 ports or - * the ring lock held for SLI4 ports. The function adds the - * new iocb to txcmplq of the given ring. This function always returns - * 0. If this function is called for ELS ring, this function checks if - * there is a vport associated with the ELS command. This function also - * starts els_tmofunc timer if this is an ELS command. + * This function is called with hbalock(sli3) or the ringlock(sli4) + * held. The function adds the new iocb to txcmplq of the given ring. + * This function always returns 0. If this function is called for ELS + * ring, this function checks if there is a vport associated with the + * ELS command. This function also starts els_tmofunc timer if this is + * an ELS command. **/ static int lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { - if (phba->sli_rev == LPFC_SLI_REV4) - lockdep_assert_held(&pring->ring_lock); - else - lockdep_assert_held(&phba->hbalock); - BUG_ON(!piocb); + BUG_ON(!piocb || !piocb->vport); list_add_tail(&piocb->list, &pring->txcmplq); piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; @@ -1593,13 +1618,11 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && - (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { - BUG_ON(!piocb->vport); - if (!(piocb->vport->load_flag & FC_UNLOADING)) - mod_timer(&piocb->vport->els_tmofunc, - jiffies + - msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); - } + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && + (!(piocb->vport->load_flag & FC_UNLOADING))) + mod_timer(&piocb->vport->els_tmofunc, + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); return 0; } @@ -1619,12 +1642,256 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_iocbq *cmd_iocb; - lockdep_assert_held(&phba->hbalock); - list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); return cmd_iocb; } +/** + * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to driver command iocb object. + * @cmf_cmpl: Pointer to driver response cqe object. + * + * This routine will inform the driver of any BW adjustments we need + * to make. These changes will be picked up during the next CMF + * timer interrupt. In addition, any BW changes will be logged + * with LOG_CGN_MGMT. + **/ +void +lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_wcqe_complete *cmf_cmpl) +{ + union lpfc_wqe128 *wqe; + uint32_t status, info; + uint64_t bw, bwdif, slop; + uint64_t pcent, bwpcent; + int asig, afpin, sigcnt, fpincnt; + int wsigmax, wfpinmax, cg, tdp; + char *s; + + /* First check for error */ + status = bf_get(lpfc_wcqe_c_status, cmf_cmpl); + if (status) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6211 CMF_SYNC_WQE Error " + "req_tag x%x status x%x hwstatus x%x " + "tdatap x%x parm x%x\n", + bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl), + bf_get(lpfc_wcqe_c_status, cmf_cmpl), + bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl), + cmf_cmpl->total_data_placed, + cmf_cmpl->parameter); + goto out; + } + + /* Gather congestion information on a successful cmpl */ + info = cmf_cmpl->parameter; + phba->cmf_active_info = info; + + /* See if firmware info count is valid or has changed */ + if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info) + info = 0; + else + phba->cmf_info_per_interval = info; + + tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl); + cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl); + + /* Get BW requirement from firmware */ + bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; + if (!bw) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6212 CMF_SYNC_WQE x%x: NULL bw\n", + bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl)); + goto out; + } + + /* Gather information needed for logging if a BW change is required */ + wqe = &cmdiocb->wqe; + asig = bf_get(cmf_sync_asig, &wqe->cmf_sync); + afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync); + fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync); + sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync); + if (phba->cmf_max_bytes_per_interval != bw || + (asig || afpin || sigcnt || fpincnt)) { + /* Are we increasing or decreasing BW */ + if (phba->cmf_max_bytes_per_interval < bw) { + bwdif = bw - phba->cmf_max_bytes_per_interval; + s = "Increase"; + } else { + bwdif = phba->cmf_max_bytes_per_interval - bw; + s = "Decrease"; + } + + /* What is the change percentage */ + slop = (phba->cmf_link_byte_count / 200); /* For rounding */ + pcent = ((bwdif * 100) + slop) / phba->cmf_link_byte_count; + bwpcent = ((bw * 100) + slop) / phba->cmf_link_byte_count; + if (asig) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6237 BW Threshold %lld%% (%lld): " + "%lld%% %s: Signal Alarm: cg:%d " + "Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } else if (afpin) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6238 BW Threshold %lld%% (%lld): " + "%lld%% %s: FPIN Alarm: cg:%d " + "Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } else if (sigcnt) { + wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6239 BW Threshold %lld%% (%lld): " + "%lld%% %s: Signal Warning: " + "Cnt %d Max %d: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, sigcnt, + wsigmax, cg, phba->cmf_active_info); + } else if (fpincnt) { + wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6240 BW Threshold %lld%% (%lld): " + "%lld%% %s: FPIN Warning: " + "Cnt %d Max %d: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, fpincnt, + wfpinmax, cg, phba->cmf_active_info); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6241 BW Threshold %lld%% (%lld): " + "CMF %lld%% %s: cg:%d Info:%u\n", + bwpcent, bw, pcent, s, cg, + phba->cmf_active_info); + } + } else if (info) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6246 Info Threshold %u\n", info); + } + + /* Save BW change to be picked up during next timer interrupt */ + phba->cmf_last_sync_bw = bw; +out: + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +/** + * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE + * @phba: Pointer to HBA context object. + * @ms: ms to set in WQE interval, 0 means use init op + * @total Total rcv bytes for this interval + * + * This routine is called every CMF timer interrupt. Its purpose is + * to issue a CMF_SYNC_WQE to the firmware to inform it of any events + * that may indicate we have congestion (FPINs or Signals). Upon + * completion, the firmware will indicate any BW restrictions the + * driver may need to take. + **/ +int +lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) +{ + union lpfc_wqe128 *wqe; + struct lpfc_iocbq *sync_buf; + unsigned long iflags; + u32 ret_val; + u32 atot, wtot, max; + + /* First address any alarm / warning activity */ + atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); + wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0); + + /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ + if (phba->cmf_active_mode != LPFC_CFG_MANAGED || + phba->link_state == LPFC_LINK_DOWN) + return 0; + + spin_lock_irqsave(&phba->hbalock, iflags); + sync_buf = __lpfc_sli_get_iocbq(phba); + if (!sync_buf) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6213 No available WQEs for CMF_SYNC_WQE\n"); + ret_val = ENOMEM; + goto out_unlock; + } + + wqe = &sync_buf->wqe; + + /* WQEs are reused. Clear stale data and set key fields to zero */ + memset(wqe, 0, sizeof(*wqe)); + + /* If this is the very first CMF_SYNC_WQE, issue an init operation */ + if (!ms) { + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6441 CMF Init %d - CMF_SYNC_WQE\n", + phba->fc_eventTag); + bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */ + bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL); + goto initpath; + } + + bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */ + bf_set(cmf_sync_interval, &wqe->cmf_sync, ms); + + /* Check for alarms / warnings */ + if (atot) { + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) { + /* We hit an Signal alarm condition */ + bf_set(cmf_sync_asig, &wqe->cmf_sync, 1); + } else { + /* We hit a FPIN alarm condition */ + bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1); + } + } else if (wtot) { + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN || + phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) { + /* We hit an Signal warning condition */ + max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency * + lpfc_acqe_cgn_frequency; + bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); + bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); + } else { + /* We hit a FPIN warning condition */ + bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1); + bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1); + } + } + + /* Update total read blocks during previous timer interval */ + wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE); + +initpath: + bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER); + wqe->cmf_sync.event_tag = phba->fc_eventTag; + bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE); + + /* Setup reqtag to match the wqe completion. */ + bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag); + + bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1); + + bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND); + bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1); + bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); + + sync_buf->vport = phba->pport; + sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl; + sync_buf->iocb_cmpl = NULL; + sync_buf->context1 = NULL; + sync_buf->context2 = NULL; + sync_buf->context3 = NULL; + sync_buf->sli4_xritag = NO_XRI; + + sync_buf->iocb_flag |= LPFC_IO_CMF; + ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf); + if (ret_val) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6214 Cannot issue CMF_SYNC_WQE: x%x\n", + ret_val); +out_unlock: + spin_unlock_irqrestore(&phba->hbalock, iflags); + return ret_val; +} + /** * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring * @phba: Pointer to HBA context object. @@ -1645,8 +1912,6 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; - lockdep_assert_held(&phba->hbalock); - if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) pring->sli.sli3.next_cmdidx = 0; @@ -1657,7 +1922,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0315 Ring %d issue: portCmdGet %d " "is bigger than cmd ring %d\n", pring->ringno, @@ -1717,7 +1982,7 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) - LPFC_IOCBQ_LOOKUP_INCREMENT)) { new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; spin_unlock_irq(&phba->hbalock); - new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), + new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), GFP_KERNEL); if (new_arr) { spin_lock_irq(&phba->hbalock); @@ -1767,7 +2032,7 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) * @nextiocb: Pointer to driver iocb object which need to be * posted to firmware. * - * This function is called with hbalock held to post a new iocb to + * This function is called from the normal IO path and from the ISR. * the firmware. This function copies the new iocb to ring iocb slot and * updates the ring pointers. It adds the new iocb to txcmplq if there is * a completion call back for this iocb else the function will free the @@ -1777,7 +2042,6 @@ static void lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t *iocb, struct lpfc_iocbq *nextiocb) { - lockdep_assert_held(&phba->hbalock); /* * Set up an iotag */ @@ -1877,7 +2141,8 @@ lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * - * This function is called with hbalock held to post pending iocbs + * This function is called from the normal IO path and from + * within the isr code path. * in the txq to the firmware. This function is called when driver * detects space available in the ring. **/ @@ -1887,7 +2152,6 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) IOCB_t *iocb; struct lpfc_iocbq *nextiocb; - lockdep_assert_held(&phba->hbalock); /* * Check to see if: @@ -1930,8 +2194,6 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) { struct hbq_s *hbqp = &phba->hbqs[hbqno]; - lockdep_assert_held(&phba->hbalock); - if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && ++hbqp->next_hbqPutIdx >= hbqp->entry_count) hbqp->next_hbqPutIdx = 0; @@ -1943,8 +2205,7 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) hbqp->local_hbqGetIdx = getidx; if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { - lpfc_printf_log(phba, KERN_ERR, - LOG_SLI | LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1802 HBQ %d: local_hbqGetIdx " "%u is > than hbqp->entry_count %u\n", hbqno, hbqp->local_hbqGetIdx, @@ -2013,7 +2274,6 @@ static int lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) { - lockdep_assert_held(&phba->hbalock); return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); } @@ -2035,7 +2295,6 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, struct lpfc_hbq_entry *hbqe; dma_addr_t physaddr = hbq_buf->dbuf.phys; - lockdep_assert_held(&phba->hbalock); /* Get next HBQ entry slot to use */ hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); if (hbqe) { @@ -2083,7 +2342,6 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, hrq = phba->sli4_hba.hdr_rq; drq = phba->sli4_hba.dat_rq; - lockdep_assert_held(&phba->hbalock); hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); @@ -2283,7 +2541,7 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) } } spin_unlock_irq(&phba->hbalock); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1803 Bad hbq tag. Data: x%x x%x\n", tag, phba->hbqs[tag >> 16].buffer_count); return NULL; @@ -2480,6 +2738,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) !pmb->u.mb.mbxStatus) { rpi = pmb->u.mb.un.varWords[0]; vpi = pmb->u.mb.un.varRegLogin.vpi; + if (phba->sli_rev == LPFC_SLI_REV4) + vpi -= phba->sli4_hba.max_cfg_param.vpi_base; lpfc_unreg_login(phba, vpi, rpi, pmb); pmb->vport = vport; pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -2535,7 +2795,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Check security permission status on INIT_LINK mailbox command */ if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2860 SLI authentication is required " "for INIT_LINK but has not done yet\n"); @@ -2671,10 +2931,11 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == MBX_SHUTDOWN) { /* Unknown mailbox command compl */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):0323 Unknown Mailbox command " "x%x (x%x/x%x) Cmpl\n", - pmb->vport ? pmb->vport->vpi : 0, + pmb->vport ? pmb->vport->vpi : + LPFC_VPORT_UNKNOWN, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2695,7 +2956,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) "(%d):0305 Mbox cmd cmpl " "error - RETRYing Data: x%x " "(x%x/x%x) x%x x%x x%x\n", - pmb->vport ? pmb->vport->vpi : 0, + pmb->vport ? pmb->vport->vpi : + LPFC_VPORT_UNKNOWN, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2703,7 +2965,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmb), pmbox->mbxStatus, pmbox->un.varWords[0], - pmb->vport->port_state); + pmb->vport ? pmb->vport->port_state : + LPFC_VPORT_UNKNOWN); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -2996,8 +3259,11 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * * This function looks up the iocb_lookup table to get the command iocb * corresponding to the given response iocb using the iotag of the - * response iocb. The driver calls this function with the hbalock held - * for SLI3 ports or the ring lock held for SLI4 ports. + * response iocb. This function is called with the hbalock held + * for sli3 devices or the ring_lock for sli4 devices. + * except when the LPFC_FCP_POLLING module parameter is set and + * lpfc_queuecommand + * call the lpfc_handle_fast_ring_event. * This function returns the command iocb object if it finds the command * iocb else returns NULL. **/ @@ -3008,15 +3274,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, { struct lpfc_iocbq *cmd_iocb = NULL; uint16_t iotag; - spinlock_t *temp_lock = NULL; - unsigned long iflag = 0; - if (phba->sli_rev == LPFC_SLI_REV4) - temp_lock = &pring->ring_lock; - else - temp_lock = &phba->hbalock; - - spin_lock_irqsave(temp_lock, iflag); iotag = prspiocb->iocb.ulpIoTag; if (iotag != 0 && iotag <= phba->sli.last_iotag) { @@ -3026,13 +3284,11 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, list_del_init(&cmd_iocb->list); cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt--; - spin_unlock_irqrestore(temp_lock, iflag); return cmd_iocb; } } - spin_unlock_irqrestore(temp_lock, iflag); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0317 iotag x%x is out of " "range: max iotag x%x wd0 x%x\n", iotag, phba->sli.last_iotag, @@ -3047,8 +3303,8 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, * @iotag: IOCB tag. * * This function looks up the iocb_lookup table to get the command iocb - * corresponding to the given iotag. The driver calls this function with - * the ring lock held because this function is an SLI4 port only helper. + * corresponding to the given iotag. This function is called with the + * hbalock held. * This function returns the command iocb object if it finds the command * iocb else returns NULL. **/ @@ -3057,15 +3313,7 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint16_t iotag) { struct lpfc_iocbq *cmd_iocb = NULL; - spinlock_t *temp_lock = NULL; - unsigned long iflag = 0; - if (phba->sli_rev == LPFC_SLI_REV4) - temp_lock = &pring->ring_lock; - else - temp_lock = &phba->hbalock; - - spin_lock_irqsave(temp_lock, iflag); if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { @@ -3073,13 +3321,11 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, list_del_init(&cmd_iocb->list); cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt--; - spin_unlock_irqrestore(temp_lock, iflag); return cmd_iocb; } } - spin_unlock_irqrestore(temp_lock, iflag); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0372 iotag x%x lookup error: max iotag (x%x) " "iocb_flag x%x\n", iotag, phba->sli.last_iotag, @@ -3112,7 +3358,17 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int rc = 1; unsigned long iflag; + /* Based on the iotag field, get the cmd IOCB from the txcmplq */ + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock_irqsave(&pring->ring_lock, iflag); + else + spin_lock_irqsave(&phba->hbalock, iflag); cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock_irqrestore(&pring->ring_lock, iflag); + else + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (cmdiocbp) { if (cmdiocbp->iocb_cmpl) { /* @@ -3255,7 +3511,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * Ring <ringno> handler: portRspPut <portRspPut> is bigger than * rsp ring <portRspMax> */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0312 Ring %d handler: portRspPut %d " "is bigger than rsp ring %d\n", pring->ringno, le32_to_cpu(pgp->rspPutInx), @@ -3285,13 +3541,21 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * and wake up worker thread to process it. Otherwise, it will set up the * Error Attention polling timer for the next poll. **/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_poll_eratt(unsigned long ptr) +#else void lpfc_poll_eratt(struct timer_list *t) +#endif { struct lpfc_hba *phba; uint32_t eratt = 0; uint64_t sli_intr, cnt; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + phba = (struct lpfc_hba *)ptr; +#else phba = from_timer(phba, t, eratt_poll); +#endif /* Here we will also keep track of interrupts per sec of the hba */ sli_intr = phba->sli.slistat.sli_intr; @@ -3443,14 +3707,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, break; } - spin_unlock_irqrestore(&phba->hbalock, iflag); cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); - spin_lock_irqsave(&phba->hbalock, iflag); if (unlikely(!cmdiocbq)) break; - if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) - cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; if (cmdiocbq->iocb_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, @@ -3474,7 +3734,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, phba->brd_no, adaptermsg); } else { /* Unknown IOCB command */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0334 Unknown IOCB command " "Data: x%x, x%x x%x x%x x%x\n", type, irsp->ulpCommand, @@ -3640,12 +3900,9 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, case LPFC_ABORT_IOCB: cmdiocbp = NULL; - if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) { - spin_unlock_irqrestore(&phba->hbalock, iflag); + if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - } if (cmdiocbp) { /* Call the specified completion routine */ if (cmdiocbp->iocb_cmpl) { @@ -3672,7 +3929,7 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->brd_no, adaptermsg); } else { /* Unknown IOCB command */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0335 Unknown IOCB " "command Data: x%x " "x%x x%x x%x\n", @@ -3752,7 +4009,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, * Ring <ringno> handler: portRspPut <portRspPut> is bigger than * rsp ring <portRspMax> */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0303 Ring %d handler: portRspPut %d " "is bigger than rsp ring %d\n", pring->ringno, portRspPut, portRspMax); @@ -3954,6 +4211,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) lpfc_sli_issue_abort_iotag(phba, pring, iocb); spin_unlock_irq(&phba->hbalock); } + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, @@ -4010,6 +4269,11 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) struct lpfc_iocbq *piocb, *next_iocb; spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & HBA_IOQ_FLUSH || + !phba->sli4_hba.hdwq) { + spin_unlock_irq(&phba->hbalock); + return; + } /* Indicate the I/O queues are flushed */ phba->hba_flag |= HBA_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); @@ -4119,7 +4383,7 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) /* Check to see if any errors occurred during init */ if ((status & HS_FFERM) || (i >= 20)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2751 Adapter failed to restart, " "status reg x%x, FW Data: A8 x%x AC x%x\n", status, @@ -4165,6 +4429,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) } else phba->sli4_hba.intr_enable = 0; + phba->hba_flag &= ~HBA_SETUP; return retval; } @@ -4189,7 +4454,8 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) * @phba: Pointer to HBA context object. * * This function is called before resetting an HBA. This function is called - * with hbalock held and requests HBA to quiesce DMAs before a reset. + * from lpfc_work_list_done which releases the hbalock and then calls + * lpfc_reset_barrier to requests HBA to quiesce DMAs before a reset. **/ void lpfc_reset_barrier(struct lpfc_hba *phba) { @@ -4200,7 +4466,6 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) int i; uint8_t hdrtype; - lockdep_assert_held(&phba->hbalock); pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); if (hdrtype != 0x80 || @@ -4341,7 +4606,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) if (retval != MBX_SUCCESS) { if (retval != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2752 KILL_BOARD command failed retval %d\n", retval); spin_lock_irq(&phba->hbalock); @@ -4484,6 +4749,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; + phba->hba_flag &= ~HBA_SETUP; spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~(LPFC_PROCESS_LA); @@ -4582,7 +4848,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); - psli->stats_start = ktime_get_seconds(); + psli->stats_start = get_seconds(); /* Give the INITFF and Post time to settle. */ mdelay(100); @@ -4633,7 +4899,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); - psli->stats_start = ktime_get_seconds(); + psli->stats_start = get_seconds(); /* Reset HBA AER if it was enabled, note hba_flag was reset above */ if (hba_aer_enabled) @@ -4693,7 +4959,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) if (i++ >= 200) { /* Adapter failed to init, timeout, status reg <status> */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0436 Adapter failed to init, " "timeout, status reg x%x, " "FW Data: A8 x%x AC x%x\n", status, @@ -4708,7 +4974,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) /* ERROR: During chipset initialization */ /* Adapter failed to init, chipset, status reg <status> */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0437 Adapter failed to init, " "chipset, status reg x%x, " "FW Data: A8 x%x AC x%x\n", status, @@ -4739,7 +5005,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) if (status & HS_FFERM) { /* ERROR: During chipset initialization */ /* Adapter failed to init, chipset, status reg <status> */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0438 Adapter failed to init, chipset, " "status reg x%x, " "FW Data: A8 x%x AC x%x\n", status, @@ -4884,8 +5150,17 @@ static int lpfc_sli4_rb_setup(struct lpfc_hba *phba) { phba->hbq_in_use = 1; - phba->hbqs[LPFC_ELS_HBQ].entry_count = - lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; + /** + * Specific case when the MDS diagnostics is enabled and supported. + * The receive buffer count is truncated to manage the incoming + * traffic. + **/ + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; + else + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; phba->hbq_count = 1; lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); /* Initially populate or replenish the HBQs */ @@ -4953,7 +5228,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) LPFC_SLI3_CRP_ENABLED | LPFC_SLI3_DSS_ENABLED); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0442 Adapter failed to init, mbxCmd x%x " "CONFIG_PORT, mbxStatus x%x Data: x%x\n", pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); @@ -4991,23 +5266,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) } else phba->max_vpi = 0; - phba->fips_level = 0; - phba->fips_spec_rev = 0; - if (pmb->u.mb.un.varCfgPort.gdss) { - phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; - phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; - phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2850 Security Crypto Active. FIPS x%d " - "(Spec Rev: x%d)", - phba->fips_level, phba->fips_spec_rev); - } - if (pmb->u.mb.un.varCfgPort.sec_err) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2856 Config Port Security Crypto " - "Error: x%x ", - pmb->u.mb.un.varCfgPort.sec_err); - } if (pmb->u.mb.un.varCfgPort.gerbm) phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; if (pmb->u.mb.un.varCfgPort.gcrp) @@ -5016,11 +5274,17 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; phba->port_gp = phba->mbox->us.s3_pgp.port; + /* + * If the port cannot support the host's requested features + * then turn off the global config parameters to disable the + * feature in the driver. This is not a fatal error. + */ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { if (pmb->u.mb.un.varCfgPort.gbg == 0) { phba->cfg_enable_bg = 0; + phba->cfg_external_dif = 0; phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0443 Adapter did not grant " "BlockGuard\n"); } @@ -5059,7 +5323,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) switch (phba->cfg_sli_mode) { case 2: if (phba->cfg_enable_npiv) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1824 NPIV enabled: Override sli_mode " "parameter (%d) to auto (0).\n", phba->cfg_sli_mode); @@ -5071,7 +5335,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) case 3: break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1819 Unrecognized sli_mode parameter: %d.\n", phba->cfg_sli_mode); @@ -5082,7 +5346,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli_config_port(phba, mode); if (rc && phba->cfg_sli_mode == 3) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1820 Unable to select SLI-3. " "Not supported by adapter.\n"); if (rc && mode != 2) @@ -5117,7 +5381,6 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) } else { phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; - phba->sli3_options = 0; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -5137,17 +5400,16 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) */ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; - phba->vpi_bmask = kcalloc(longs, - sizeof(unsigned long), + phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->vpi_bmask) { rc = -ENOMEM; goto lpfc_sli_hba_setup_error; } - phba->vpi_ids = kcalloc(phba->max_vpi + 1, - sizeof(uint16_t), - GFP_KERNEL); + phba->vpi_ids = kzalloc( + (phba->max_vpi+1) * sizeof(uint16_t), + GFP_KERNEL); if (!phba->vpi_ids) { kfree(phba->vpi_bmask); rc = -ENOMEM; @@ -5176,7 +5438,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) lpfc_sli_hba_setup_error: phba->link_state = LPFC_HBA_ERROR; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0445 Firmware initialization failed\n"); return rc; } @@ -5373,7 +5635,7 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) LPFC_SLI4_MBX_NEMBED); if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3084 Allocated DMA memory size (%d) is " "less than the requested DMA memory size " "(%d)\n", alloclen, reqlen); @@ -5404,16 +5666,20 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); phba->sli4_hba.lnk_info.lnk_no = bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); + phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); + phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", + "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " + "flash_id: x%02x, asic_rev: x%02x\n", phba->sli4_hba.lnk_info.lnk_tp, phba->sli4_hba.lnk_info.lnk_no, - phba->BIOSVersion); + phba->BIOSVersion, phba->sli4_hba.flash_id, + phba->sli4_hba.asic_rev); out_free_mboxq: if (rc != MBX_TIMEOUT) { if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) @@ -5549,7 +5815,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); if (sli4_hba->nvmels_cq) sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, - LPFC_QUEUE_REARM); + LPFC_QUEUE_REARM); if (sli4_hba->hdwq) { /* Loop thru all Hardware Queues */ @@ -5633,7 +5899,7 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; if (bf_get(lpfc_mbox_hdr_status, &rsrc_info->header.cfg_shdr.response)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2930 Failed to get resource extents " "Status 0x%x Add'l Status 0x%x\n", bf_get(lpfc_mbox_hdr_status, @@ -5771,7 +6037,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, req_len, *emb); if (alloc_len < req_len) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2982 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); @@ -5827,7 +6093,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) return -EIO; if ((rsrc_cnt == 0) || (rsrc_size == 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3009 No available Resource Extents " "for resource type 0x%x: Count: 0x%x, " "Size 0x%x\n", type, rsrc_cnt, @@ -5876,14 +6142,14 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) length = sizeof(struct lpfc_rsrc_blks); switch (type) { case LPFC_RSC_TYPE_FCOE_RPI: - phba->sli4_hba.rpi_bmask = kcalloc(longs, + phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_bmask)) { rc = -ENOMEM; goto err_exit; } - phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, + phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_ids)) { @@ -5905,13 +6171,15 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; break; case LPFC_RSC_TYPE_FCOE_VPI: - phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), + phba->vpi_bmask = kzalloc(longs * + sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->vpi_bmask)) { rc = -ENOMEM; goto err_exit; } - phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), + phba->vpi_ids = kzalloc(rsrc_id_cnt * + sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->vpi_ids)) { kfree(phba->vpi_bmask); @@ -5925,7 +6193,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) ext_blk_list = &phba->lpfc_vpi_blk_list; break; case LPFC_RSC_TYPE_FCOE_XRI: - phba->sli4_hba.xri_bmask = kcalloc(longs, + phba->sli4_hba.xri_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_bmask)) { @@ -5933,7 +6201,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) goto err_exit; } phba->sli4_hba.max_cfg_param.xri_used = 0; - phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, + phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_ids)) { @@ -5948,14 +6216,14 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; break; case LPFC_RSC_TYPE_FCOE_VFI: - phba->sli4_hba.vfi_bmask = kcalloc(longs, + phba->sli4_hba.vfi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_bmask)) { rc = -ENOMEM; goto err_exit; } - phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, + phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_ids)) { @@ -6078,7 +6346,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; if (bf_get(lpfc_mbox_hdr_status, &dealloc_rsrc->header.cfg_shdr.response)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2919 Failed to release resource extents " "for type %d - Status 0x%x Add'l Status 0x%x. " "Resource memory not released.\n", @@ -6142,11 +6410,12 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) return rc; } -static void +void lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, uint32_t feature) { uint32_t len; + u32 sig_freq = 0; len = sizeof(struct lpfc_mbx_set_feature) - sizeof(struct lpfc_sli4_cfg_mhdr); @@ -6169,8 +6438,61 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; mbox->u.mqe.un.set_feature.param_len = 8; break; - } + case LPFC_SET_CGN_SIGNAL: + if (phba->cmf_active_mode == LPFC_CFG_OFF) + sig_freq = 0; + else + sig_freq = phba->cgn_sig_freq; + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_BOTH) { + bf_set(lpfc_mbx_set_feature_CGN_alarm_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + bf_set(lpfc_mbx_set_feature_CGN_warn_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + } + + if (phba->cgn_reg_signal == LPFC_CGN_SIGNAL_WARN) + bf_set(lpfc_mbx_set_feature_CGN_warn_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + + if (phba->cmf_active_mode == LPFC_CFG_OFF || + phba->cgn_reg_signal == LPFC_CGN_SIGNAL_NO_SPT) + sig_freq = 0; + else + sig_freq = lpfc_acqe_cgn_frequency; + + bf_set(lpfc_mbx_set_feature_CGN_acqe_freq, + &mbox->u.mqe.un.set_feature, sig_freq); + + mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL; + mbox->u.mqe.un.set_feature.param_len = 12; + break; + case LPFC_SET_DUAL_DUMP: + bf_set(lpfc_mbx_set_feature_dd, + &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); + bf_set(lpfc_mbx_set_feature_ddquery, + &mbox->u.mqe.un.set_feature, 0); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; + mbox->u.mqe.un.set_feature.param_len = 4; + break; + case LPFC_SET_ENABLE_E2E: + mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_E2E; + mbox->u.mqe.un.set_feature.param_len = 4; + bf_set(lpfc_mbx_set_feature_e2elunq, + &mbox->u.mqe.un.set_feature, + phba->pport->cfg_lun_queue_depth); + bf_set(lpfc_mbx_set_feature_e2e, + &mbox->u.mqe.un.set_feature, + phba->sli4_hba.pc_sli4_params.E2E_attr); + break; + case LPFC_SET_ENABLE_CMF: + bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF; + mbox->u.mqe.un.set_feature.param_len = 4; + bf_set(lpfc_mbx_set_feature_cmf, + &mbox->u.mqe.un.set_feature, 1); + break; + } return; } @@ -6186,11 +6508,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba) { struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; - ras_fwlog->ras_active = false; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); /* Disable FW logging to host memory */ writel(LPFC_CTL_PDEV_CTL_DDL_RAS, phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); + + /* Wait 10 milli Sec Fimware need to stop using DMA buffer */ + msleep(10); } /** @@ -6226,7 +6553,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) ras_fwlog->lwpd.virt = NULL; } - ras_fwlog->ras_active = false; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); } /** @@ -6257,7 +6586,7 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, &ras_fwlog->lwpd.phys, GFP_KERNEL); if (!ras_fwlog->lwpd.virt) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6185 LWPD Memory Alloc Failed\n"); return -ENOMEM; @@ -6276,7 +6605,8 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, LPFC_RAS_MAX_ENTRY_SIZE, - &dmabuf->phys, GFP_KERNEL); + &dmabuf->phys, + GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); rc = -ENOMEM; @@ -6318,7 +6648,7 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6188 FW LOG mailbox " "completed with status x%x add_status x%x," " mbx status x%x\n", @@ -6328,7 +6658,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto disable_ras; } - ras_fwlog->ras_active = true; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = ACTIVE; + spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); return; @@ -6360,6 +6692,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; int rc = 0; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); + fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize); fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); @@ -6380,7 +6716,7 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, /* Setup Mailbox command */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6190 RAS MBX Alloc Failed"); rc = -ENOMEM; goto mem_free; @@ -6419,13 +6755,16 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = REG_INPROGRESS; + spin_unlock_irq(&phba->hbalock); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6191 FW-Log Mailbox failed. " "status %d mbxStatus : x%x", rc, bf_get(lpfc_mqe_status, &mbox->u.mqe)); @@ -6561,7 +6900,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) /* RPIs. */ count = phba->sli4_hba.max_cfg_param.max_rpi; if (count <= 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3279 Invalid provisioning of " "rpi:%d\n", count); rc = -EINVAL; @@ -6569,14 +6908,15 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) } base = phba->sli4_hba.max_cfg_param.rpi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; - phba->sli4_hba.rpi_bmask = kcalloc(longs, + phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_bmask)) { rc = -ENOMEM; goto err_exit; } - phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), + phba->sli4_hba.rpi_ids = kzalloc(count * + sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.rpi_ids)) { rc = -ENOMEM; @@ -6589,7 +6929,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) /* VPIs. */ count = phba->sli4_hba.max_cfg_param.max_vpi; if (count <= 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3280 Invalid provisioning of " "vpi:%d\n", count); rc = -EINVAL; @@ -6597,13 +6937,15 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) } base = phba->sli4_hba.max_cfg_param.vpi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; - phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), + phba->vpi_bmask = kzalloc(longs * + sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->vpi_bmask)) { rc = -ENOMEM; goto free_rpi_ids; } - phba->vpi_ids = kcalloc(count, sizeof(uint16_t), + phba->vpi_ids = kzalloc(count * + sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->vpi_ids)) { rc = -ENOMEM; @@ -6616,7 +6958,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) /* XRIs. */ count = phba->sli4_hba.max_cfg_param.max_xri; if (count <= 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3281 Invalid provisioning of " "xri:%d\n", count); rc = -EINVAL; @@ -6624,7 +6966,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) } base = phba->sli4_hba.max_cfg_param.xri_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; - phba->sli4_hba.xri_bmask = kcalloc(longs, + phba->sli4_hba.xri_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_bmask)) { @@ -6632,7 +6974,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) goto free_vpi_ids; } phba->sli4_hba.max_cfg_param.xri_used = 0; - phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), + phba->sli4_hba.xri_ids = kzalloc(count * + sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.xri_ids)) { rc = -ENOMEM; @@ -6645,7 +6988,7 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) /* VFIs. */ count = phba->sli4_hba.max_cfg_param.max_vfi; if (count <= 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3282 Invalid provisioning of " "vfi:%d\n", count); rc = -EINVAL; @@ -6653,14 +6996,15 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) } base = phba->sli4_hba.max_cfg_param.vfi_base; longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; - phba->sli4_hba.vfi_bmask = kcalloc(longs, + phba->sli4_hba.vfi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_bmask)) { rc = -ENOMEM; goto free_xri_ids; } - phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), + phba->sli4_hba.vfi_ids = kzalloc(count * + sizeof(uint16_t), GFP_KERNEL); if (unlikely(!phba->sli4_hba.vfi_ids)) { rc = -ENOMEM; @@ -6823,7 +7167,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, req_len, emb); if (alloc_len < req_len) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2983 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); @@ -6866,7 +7210,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, } if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2984 Failed to read allocated resources " "for type %d - Status 0x%x Add'l Status 0x%x.\n", type, @@ -6881,7 +7225,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, } /** - * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block + * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block * @phba: pointer to lpfc hba data structure. * @pring: Pointer to driver SLI ring object. * @sgl_list: linked link of sgl buffers to post @@ -7021,7 +7365,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, spin_unlock(&phba->sli4_hba.sgl_list_lock); spin_unlock_irq(&phba->hbalock); } else { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3161 Failure to post sgl to port.\n"); return -EIO; } @@ -7076,7 +7420,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; mbox->u.mqe.un.set_host_data.param_len = LPFC_HOST_OS_DRIVER_VERSION_SIZE; - snprintf(mbox->u.mqe.un.set_host_data.data, + snprintf(mbox->u.mqe.un.set_host_data.un.data, LPFC_HOST_OS_DRIVER_VERSION_SIZE, "Linux %s v"LPFC_DRIVER_VERSION, (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); @@ -7094,12 +7438,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct rqb_dmabuf *rqb_buffer; LIST_HEAD(rqb_buf_list); - spin_lock_irqsave(&phba->hbalock, flags); rqbp = hrq->rqbp; for (i = 0; i < count; i++) { + spin_lock_irqsave(&phba->hbalock, flags); /* IF RQ is already full, don't bother */ - if (rqbp->buffer_count + i >= rqbp->entry_count - 1) + if (rqbp->buffer_count + i >= rqbp->entry_count - 1) { + spin_unlock_irqrestore(&phba->hbalock, flags); break; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + rqb_buffer = rqbp->rqb_alloc_buffer(phba); if (!rqb_buffer) break; @@ -7108,6 +7456,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, rqb_buffer->idx = idx; list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); } + + spin_lock_irqsave(&phba->hbalock, flags); while (!list_empty(&rqb_buf_list)) { list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, hbuf.list); @@ -7118,7 +7468,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); if (rc < 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6421 Cannot post to HRQ %d: %x %x %x " "DRQ %x %x\n", hrq->queue_id, @@ -7138,6 +7488,407 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, return 1; } +static void lpfc_sli4_dip(struct lpfc_hba *phba) +{ + uint32_t if_type; + + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); + if (if_type == LPFC_SLI_INTF_IF_TYPE_2 || + if_type == LPFC_SLI_INTF_IF_TYPE_6) { + struct lpfc_register reg_data; + + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + ®_data.word0)) + return; + + if (bf_get(lpfc_sliport_status_dip, ®_data)) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2904 Firmware Dump Image Present" + " on Adapter"); + } +} + +static void +lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + union lpfc_sli4_cfg_shdr *shdr; + u32 shdr_status, shdr_add_status; + u32 sig, acqe; + + /* Two outcomes. (1) Set featurs was successul and EDC negotiation + * is done. (2) Mailbox failed and send FPIN support only. + */ + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "2516 CGN SET_FEATURE mbox failed with " + "status x%x add_status x%x, mbx status x%x " + "Reset Congestion to FPINs only\n", + shdr_status, shdr_add_status, + pmb->u.mb.mbxStatus); + /* If there is a mbox error, move on to RDF */ + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + goto out; + } + + /* Zero out Congestion Signal ACQE counter */ + phba->cgn_acqe_cnt = 0; + + acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, + &pmb->u.mqe.un.set_feature); + sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq, + &pmb->u.mqe.un.set_feature); + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4620 SET_FEATURES Success: Freq: %ds %dms " + " Reg: x%x x%x\n", acqe, sig, + phba->cgn_reg_signal, phba->cgn_reg_fpin); +out: + mempool_free(pmb, phba->mbox_mem_pool); + + /* Register for FPIN events from the fabric now that the + * EDC common_set_features has completed. + */ + lpfc_issue_els_rdf(vport, 0); +} + +int +lpfc_config_cgn_signal(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + u32 rc; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "4621 SET_FEATURES: FREQ sig x%x acqe x%x: " + "Reg: x%x x%x\n", + phba->cgn_sig_freq, lpfc_acqe_cgn_frequency, + phba->cgn_reg_signal, phba->cgn_reg_fpin); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out; + return 0; + + out: + mempool_free(mboxq, phba->mbox_mem_pool); + + /* If there is a mbox error, move on to RDF */ + phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; + phba->cgn_reg_signal = LPFC_CGN_SIGNAL_NO_SPT; + lpfc_issue_els_rdf(phba->pport, 0); + return -EIO; +} + +/** + * lpfc_init_idle_stat_hb - Initialize idle_stat tracking + * + * This routine initializes the per-cq idle_stat to dynamically dictate + * polling decisions. + * + * Note: NVMET and CGN mgmt modes are not supported. + * + * Return codes: + * None + **/ +static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) +{ + int i; + struct lpfc_sli4_hdw_queue *hdwq; + struct lpfc_queue *cq; + struct lpfc_idle_stat *idle_stat; + u64 wall; + + for_each_present_cpu(i) { + hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; + cq = hdwq->io_cq; + + /* Skip if we've already handled this cq's primary CPU */ + if (cq->chann != i) + continue; + + idle_stat = &phba->sli4_hba.idle_stat[i]; + + idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1); + idle_stat->prev_wall = wall; + + if (phba->nvmet_support || + phba->cmf_active_mode != LPFC_CFG_OFF) + cq->poll_mode = LPFC_QUEUE_WORK; + else + cq->poll_mode = LPFC_IRQ_POLL; + } + + if (!phba->nvmet_support) + schedule_delayed_work(&phba->idle_stat_delay_work, + msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); +} + +/** + * lpfc_cmf_setup - Initialize idle_stat tracking + * @phba: Pointer to HBA context object. + * + * This is called from HBA setup during driver load or when the HBA + * comes online. this does all the initialization to support CMF and E2E. + **/ +static int +lpfc_cmf_setup(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mqe *mqe; + struct lpfc_dmabuf *mp; + struct lpfc_pc_sli4_params *sli4_params; + struct lpfc_sli4_parameters *mbx_sli4_parameters; + int length; + int rc, cmf, e2e_ver; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + mqe = &mboxq->u.mqe; + + /* Read the port's SLI4 Config Parameters */ + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, + length, LPFC_SLI4_MBX_EMBED); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; + } + + /* Gather info on CMF and E2E support */ + sli4_params = &phba->sli4_hba.pc_sli4_params; + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + sli4_params->E2E_attr = bf_get(cfg_e2em_attr_value, + mbx_sli4_parameters); + sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters); + + /* Are we forcing E2E off via module parameter? */ + if (!phba->cfg_enable_e2e) + sli4_params->E2E_attr = 0; + + /* Always try to enable E2E feature if we can */ + if (sli4_params->E2E_attr) { + lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_E2E); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + e2e_ver = bf_get(lpfc_mbx_set_feature_e2e, + &mboxq->u.mqe.un.set_feature); + + if (rc == MBX_SUCCESS) { + if (e2e_ver) { + lpfc_printf_log(phba, + KERN_WARNING, LOG_CGN_MGMT, + "6215 E2E is enabled\n"); + sli4_params->E2E_attr = e2e_ver; + } else { + lpfc_printf_log(phba, + KERN_WARNING, LOG_CGN_MGMT, + "6338 E2E is disabled\n"); + sli4_params->E2E_attr = 0; + } + } else { + /* E2E_attr is already set from GET_SLI4_PARAMETERS */ + lpfc_printf_log(phba, KERN_INFO, + LOG_CGN_MGMT | LOG_INIT, + "6216 Enable E2E Mailbox x%x (x%x/x%x) " + "failed, rc:x%x e2e:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get + (phba, mboxq), + lpfc_sli_config_mbox_opcode_get + (phba, mboxq), + rc, sli4_params->E2E_attr); + } + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6217 E2E is disabled\n"); + } + + /* Ensure FDMI is enabled for E2E if enable_e2e is set */ + if (sli4_params->E2E_attr) + phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; + + /* Always try to enable CMF feature if we can */ + if (sli4_params->cmf) { + lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + cmf = bf_get(lpfc_mbx_set_feature_cmf, + &mboxq->u.mqe.un.set_feature); + if (rc == MBX_SUCCESS && cmf) { + lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, + "6218 CMF is enabled: mode %d\n", + phba->cmf_active_mode); + } else { + lpfc_printf_log(phba, KERN_WARNING, + LOG_CGN_MGMT | LOG_INIT, + "6219 Enable CMF Mailbox x%x (x%x/x%x) " + "failed, rc:x%x dd:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get + (phba, mboxq), + lpfc_sli_config_mbox_opcode_get + (phba, mboxq), + rc, cmf); + sli4_params->cmf = 0; + phba->cmf_active_mode = LPFC_CFG_OFF; + goto no_cmf; + } + + /* Allocate Congestion Information Buffer */ + if (!phba->cgn_i) { + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (mp) + mp->virt = dma_alloc_coherent + (&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + &mp->phys, GFP_KERNEL); + if (!mp || !mp->virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2640 Failed to alloc memory " + "for Congestion Info\n"); + kfree(mp); + sli4_params->cmf = 0; + phba->cmf_active_mode = LPFC_CFG_OFF; + goto no_cmf; + } + phba->cgn_i = mp; + + /* initialize congestion buffer info */ + lpfc_init_congestion_buf(phba); + lpfc_init_congestion_stat(phba); + } + + rc = lpfc_sli4_cgn_params_read(phba); + if (rc < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "6242 Error reading Cgn Params (%d)\n", + rc); + /* Ensure CGN Mode is off */ + sli4_params->cmf = 0; + } else if (!rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, + "6243 CGN Event empty object.\n"); + /* Ensure CGN Mode is off */ + sli4_params->cmf = 0; + } + } else { +no_cmf: + lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, + "6220 CMF is disabled\n"); + } + + /* Only register congestion buffer with firmware if BOTH + * CMF and E2E are enabled. + */ + if (sli4_params->cmf && sli4_params->E2E_attr) { + rc = lpfc_reg_congestion_buf(phba); + if (rc) { + dma_free_coherent(&phba->pcidev->dev, + sizeof(struct lpfc_cgn_info), + phba->cgn_i->virt, phba->cgn_i->phys); + kfree(phba->cgn_i); + phba->cgn_i = NULL; + /* Ensure CGN Mode is off */ + phba->cmf_active_mode = LPFC_CFG_OFF; + return 0; + } + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6470 Setup E2Eattr %d CMF %d mode %d\n", + sli4_params->E2E_attr, sli4_params->cmf, + phba->cmf_active_mode); + + mempool_free(mboxq, phba->mbox_mem_pool); + + /* Initialize atomic counters */ + atomic_set(&phba->cgn_fabric_warn_cnt, 0); + atomic_set(&phba->cgn_fabric_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_alarm_cnt, 0); + atomic_set(&phba->cgn_sync_warn_cnt, 0); + atomic_set(&phba->cgn_driver_evt_cnt, 0); + atomic_set(&phba->cgn_latency_evt_cnt, 0); + atomic64_set(&phba->cgn_latency_evt, 0); + + phba->cmf_interval_rate = LPFC_CMF_INTERVAL; + + /* Allocate RX Monitor Buffer */ + if (!phba->rxtable) { + phba->rxtable = kmalloc(LPFC_MAX_RXMONITOR_ENTRY * + sizeof(struct rxtable_entry), + GFP_KERNEL); + if (!phba->rxtable) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2644 Failed to alloc memory " + "for RX Monitor Buffer\n"); + return -ENOMEM; + } + } + atomic_set(&phba->rxtable_idx_head, 0); + atomic_set(&phba->rxtable_idx_tail, 0); + return 0; +} + +static int +lpfc_set_host_tm(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t len, rc; + struct timespec64 cur_time; + struct tm broken; + uint32_t month, day, year; + uint32_t hour, minute, second; + struct lpfc_mbx_set_host_date_time *tm; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + len = sizeof(struct lpfc_mbx_set_host_data) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_HOST_DATA, len, + LPFC_SLI4_MBX_EMBED); + + mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME; + mboxq->u.mqe.un.set_host_data.param_len = + sizeof(struct lpfc_mbx_set_host_date_time); + tm = &mboxq->u.mqe.un.set_host_data.un.tm; + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &broken); + month = broken.tm_mon + 1; + day = broken.tm_mday; + year = broken.tm_year - 100; + hour = broken.tm_hour; + minute = broken.tm_min; + second = broken.tm_sec; + bf_set(lpfc_mbx_set_host_month, tm, month); + bf_set(lpfc_mbx_set_host_day, tm, day); + bf_set(lpfc_mbx_set_host_year, tm, year); + bf_set(lpfc_mbx_set_host_hour, tm, hour); + bf_set(lpfc_mbx_set_host_min, tm, minute); + bf_set(lpfc_mbx_set_host_sec, tm, second); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + /** * lpfc_sli4_hba_setup - SLI4 device initialization PCI function * @phba: Pointer to HBA context object. @@ -7150,7 +7901,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, int lpfc_sli4_hba_setup(struct lpfc_hba *phba) { - int rc, i, cnt, len; + int rc, i, cnt, len, dd; LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; uint8_t *vpd; @@ -7160,6 +7911,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) struct lpfc_vport *vport = phba->pport; struct lpfc_dmabuf *mp; struct lpfc_rqb *rqbp; + u32 flg; /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); @@ -7173,9 +7925,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) else { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + flg = phba->sli.sli_flag; spin_unlock_irq(&phba->hbalock); + /* Allow a little time after setting SLI_ACTIVE for any polled + * MBX commands to complete via BSG. + */ + for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { + msleep(20); + spin_lock_irq(&phba->hbalock); + flg = phba->sli.sli_flag; + spin_unlock_irq(&phba->hbalock); + } } + lpfc_sli4_dip(phba); + /* * Allocate a single mailbox container for initializing the * port. @@ -7203,6 +7967,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { phba->hba_flag |= HBA_FCOE_MODE; phba->fcp_embed_io = 0; /* SLI4 FC support only */ +#ifndef BUILD_BRCMFCOE + phba->cfg_enable_auth = LPFC_AUTH_DISABLE; /* SLI4 FC only */ +#endif } else { phba->hba_flag &= ~HBA_FCOE_MODE; } @@ -7216,7 +7983,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->hba_flag &= ~HBA_IOQ_FLUSH; if (phba->sli_rev != LPFC_SLI_REV4) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0376 READ_REV Error. SLI Level %d " "FCoE enabled %d\n", phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); @@ -7225,6 +7992,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + rc = lpfc_set_host_tm(phba); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, + "6468 Set host date / time: Status x%x:\n", rc); + /* * Continue initialization with default values even if driver failed * to read FCoE param config regions, only read parameters if the @@ -7258,7 +8029,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) */ rc = lpfc_parse_vpd(phba, vpd, vpd_size); if (unlikely(!rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0377 Error %d parsing vpd. " "Using defaults.\n", rc); rc = 0; @@ -7304,15 +8075,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); - /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ - rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); - if (phba->pport->cfg_lun_queue_depth > rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "3362 LUN queue depth changed from %d to %d\n", - phba->pport->cfg_lun_queue_depth, rc); - phba->pport->cfg_lun_queue_depth = rc; - } - if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_0) { lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); @@ -7375,6 +8137,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { phba->cfg_enable_bg = 0; + phba->cfg_external_dif = 0; phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; ftr_rsp++; } @@ -7390,8 +8153,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "x%x x%x x%x\n", mqe->un.req_ftrs.word2, mqe->un.req_ftrs.word3, phba->cfg_enable_bg, phba->cfg_enable_npiv, phba->max_vpi); - if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) - phba->cfg_enable_bg = 0; + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) phba->cfg_enable_npiv = 0; } @@ -7401,13 +8163,30 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); spin_unlock_irq(&phba->hbalock); + /* Always try to enable dual dump feature if we can */ + lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); + if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6448 Dual Dump is enabled\n"); + else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, + "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " + "rc:x%x dd:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get( + phba, mboxq), + lpfc_sli_config_mbox_opcode_get( + phba, mboxq), + rc, dd); /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. */ rc = lpfc_sli4_alloc_resource_identifiers(phba); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2920 Failed to alloc Resource IDs " "rc = x%x\n", rc); goto out_free_mbox; @@ -7418,8 +8197,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "2134 Failed to set host os driver version %x", - rc); + "2134 Failed to set host os driver " + "version %x ", rc); } /* Read the port's service parameters. */ @@ -7446,7 +8225,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) kfree(mp); mboxq->ctx_buf = NULL; if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0382 READ_SPARAM command failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, mqe)); @@ -7464,7 +8243,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3089 Failed to allocate queues\n"); rc = -ENODEV; goto out_free_mbox; @@ -7472,7 +8251,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Set up all the queues to the device */ rc = lpfc_sli4_queue_setup(phba); if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0381 Error %d during queue setup.\n ", rc); goto out_stop_timers; } @@ -7483,7 +8262,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* update host els xri-sgl sizes and mappings */ rc = lpfc_sli4_els_sgl_update(phba); if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1400 Failed to update xri-sgl size and " "mapping: %d\n", rc); goto out_destroy_queue; @@ -7493,7 +8272,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, phba->sli4_hba.els_xri_cnt); if (unlikely(rc < 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0582 Error %d during els sgl post " "operation\n", rc); rc = -ENODEV; @@ -7505,7 +8284,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* update host nvmet xri-sgl sizes and mappings */ rc = lpfc_sli4_nvmet_sgl_update(phba); if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6308 Failed to update nvmet-sgl size " "and mapping: %d\n", rc); goto out_destroy_queue; @@ -7517,7 +8296,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) &phba->sli4_hba.lpfc_nvmet_sgl_list, phba->sli4_hba.nvmet_xri_cnt); if (unlikely(rc < 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3117 Error %d during nvmet " "sgl post\n", rc); rc = -ENODEV; @@ -7525,14 +8304,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } phba->sli4_hba.nvmet_xri_cnt = rc; - cnt = phba->cfg_iocb_cnt * 1024; - /* We need 1 iocbq for every SGL, for IO processing */ - cnt += phba->sli4_hba.nvmet_xri_cnt; + /* We allocate an iocbq for every receive context SGL. + * The additional allocation is for abort and ls handling. + */ + cnt = phba->sli4_hba.nvmet_xri_cnt + + phba->sli4_hba.max_cfg_param.max_xri; } else { /* update host common xri-sgl sizes and mappings */ rc = lpfc_sli4_io_sgl_update(phba); if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6082 Failed to update nvme-sgl size " "and mapping: %d\n", rc); goto out_destroy_queue; @@ -7541,7 +8322,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* register the allocated common sgl pool to the port */ rc = lpfc_sli4_repost_io_sgl_list(phba); if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6116 Error %d during nvme sgl post " "operation\n", rc); /* Some NVME buffers were moved to abort nvme list */ @@ -7549,17 +8330,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_destroy_queue; } - cnt = phba->cfg_iocb_cnt * 1024; + /* Each lpfc_io_buf job structure has an iocbq element. + * This cnt provides for abort, els, ct and ls requests. + */ + cnt = phba->sli4_hba.max_cfg_param.max_xri; } if (!phba->sli.iocbq_lookup) { /* Initialize and populate the iocb list per host */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2821 initialize iocb list %d total %d\n", - phba->cfg_iocb_cnt, cnt); + "2821 initialize iocb list with %d entries\n", + cnt); rc = lpfc_init_iocb_list(phba, cnt); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1413 Failed to init iocb list.\n"); goto out_destroy_queue; } @@ -7588,7 +8372,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Post the rpi header region to the device. */ rc = lpfc_sli4_post_all_rpi_hdrs(phba); if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0393 Error %d during rpi post operation\n", rc); rc = -ENODEV; @@ -7636,7 +8420,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Don't post more new bufs if repost already recovered * the nvme sgls. - */ + **/ if (phba->nvmet_support == 0) { if (phba->sli4_hba.io_xri_cnt == 0) { len = lpfc_new_io_buf( @@ -7672,7 +8456,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); - phba->hb_outstanding = 0; + phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); phba->last_completion_time = jiffies; /* start eq_delay heartbeat */ @@ -7680,6 +8464,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) queue_delayed_work(phba->wq, &phba->eq_delay_work, msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); + /* start per phba idle_stat_delay heartbeat */ + lpfc_init_idle_stat_hb(phba); + /* Start error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); @@ -7727,14 +8514,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Indicate device interrupt mode */ phba->sli4_hba.intr_enable = 1; + /* Setup CMF after HBA is initialized */ + lpfc_cmf_setup(phba); + if (!(phba->hba_flag & HBA_FCOE_MODE) && (phba->hba_flag & LINK_DISABLED)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3103 Adapter Link is disabled.\n"); lpfc_down_link(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3104 Adapter failed to issue " "DOWN_LINK mbox cmd, rc:x%x\n", rc); goto out_io_buff_free; @@ -7742,12 +8532,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { /* don't perform init_link on SLI4 FC port loopback test */ if (!(phba->link_flag & LS_LOOPBACK_MODE)) { - rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + rc = phba->lpfc_hba_init_link(phba); if (rc) goto out_io_buff_free; } } mempool_free(mboxq, phba->mbox_mem_pool); + +#ifndef BUILD_BRCMFCOE + lpfc_auth_read_cfg_object(phba); +#endif + phba->hba_flag |= HBA_SETUP; return rc; out_io_buff_free: /* Free allocated IO Buffers */ @@ -7778,9 +8573,17 @@ out_free_mbox: * done by the worker thread function lpfc_mbox_timeout_handler. **/ void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +lpfc_mbox_timeout(unsigned long ptr) +#else lpfc_mbox_timeout(struct timer_list *t) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; +#else struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); +#endif unsigned long iflag; uint32_t tmo_posted; @@ -7868,7 +8671,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) if (sli4_hba->hdwq) { for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; - if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { + if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { fpeq = eq; break; } @@ -7894,7 +8697,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) if (mbox_pending) /* process and rearm the EQ */ - lpfc_sli4_process_eq(phba, fpeq); + lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); else /* Always clear and re-arm the EQ */ sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); @@ -7919,8 +8722,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; - /* If the mailbox completed, process the completion and return */ - if (lpfc_sli4_process_missed_mbox_completions(phba)) + /* If the mailbox completed, process the completion */ + lpfc_sli4_process_missed_mbox_completions(phba); + + if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) return; if (pmbox != NULL) @@ -7941,7 +8746,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) } /* Mbox cmd <mbxCommand> timeout */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", mb->mbxCommand, phba->pport->port_state, @@ -7961,9 +8766,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); - lpfc_sli_abort_fcp_rings(phba); - - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0345 Resetting board due to mailbox timeout\n"); /* Reset the HBA device */ @@ -8061,7 +8864,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):0311 Mailbox command x%x cannot " "issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, @@ -8073,7 +8876,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (lpfc_readl(phba->HCregaddr, &hc_copy) || !(hc_copy & HC_MBINT_ENA)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2528 Mailbox command x%x cannot " "issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, @@ -8092,7 +8895,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2529 Mailbox command x%x " "cannot issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, @@ -8104,7 +8907,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2530 Mailbox command x%x " "cannot issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, @@ -8157,7 +8960,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2531 Mailbox command x%x " "cannot issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, @@ -8240,7 +9043,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, word */ to_slim = phba->MBslimaddr + sizeof (uint32_t); lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], - MAILBOX_CMD_SIZE - sizeof (uint32_t)); + MAILBOX_CMD_SIZE - sizeof(uint32_t)); /* Next copy over first word, with mbxOwner set */ ldata = *((uint32_t *)mbx); @@ -8405,8 +9208,11 @@ static int lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; int rc = 0; unsigned long timeout = 0; + u32 sli_flag; + u8 cmd, subsys, opcode; /* Mark the asynchronous mailbox command posting as blocked */ spin_lock_irq(&phba->hbalock); @@ -8424,12 +9230,37 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) if (timeout) lpfc_sli4_process_missed_mbox_completions(phba); - /* Wait for the outstnading mailbox command to complete */ + /* Wait for the outstanding mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) { - /* Timeout, marked the outstanding cmd not complete */ + /* Timeout, mark the outstanding cmd not complete */ + + /* Sanity check sli.mbox_active has not completed or + * cancelled from another context during last 2ms sleep, + * so take hbalock to be sure before logging. + */ + spin_lock_irq(&phba->hbalock); + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + cmd = mboxq->u.mb.mbxCommand; + subsys = lpfc_sli_config_mbox_subsys_get(phba, + mboxq); + opcode = lpfc_sli_config_mbox_opcode_get(phba, + mboxq); + sli_flag = psli->sli_flag; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2352 Mailbox command x%x " + "(x%x/x%x) sli_flag x%x could " + "not complete\n", + cmd, subsys, opcode, + sli_flag); + } else { + spin_unlock_irq(&phba->hbalock); + } + rc = 1; break; } @@ -8548,7 +9379,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_lock_irqsave(&phba->hbalock, iflag); if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2532 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, @@ -8669,7 +9500,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, rc = lpfc_mbox_dev_check(phba); if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2544 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, @@ -8691,7 +9522,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2541 Mailbox command x%x " "(x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, @@ -8725,7 +9556,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2597 Sync Mailbox command " "x%x (x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, @@ -8746,7 +9577,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, /* Now, interrupt mode asynchrous mailbox command */ rc = lpfc_mbox_cmd_check(phba, mboxq); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2543 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, @@ -8814,7 +9645,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) } if (unlikely(phba->sli.mbox_active)) { spin_unlock_irqrestore(&phba->hbalock, iflags); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0384 There is pending active mailbox cmd\n"); return MBX_NOT_FINISHED; } @@ -8875,7 +9706,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Post the mailbox command to the port */ rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):2533 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, @@ -8951,7 +9782,7 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1420 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; @@ -8974,7 +9805,6 @@ void __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { - lockdep_assert_held(&phba->hbalock); /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); } @@ -9002,7 +9832,6 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { struct lpfc_iocbq * nextiocb; - lockdep_assert_held(&phba->hbalock); nextiocb = lpfc_sli_ringtx_get(phba, pring); if (!nextiocb) { @@ -9043,13 +9872,10 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, IOCB_t *iocb; struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; - lockdep_assert_held(&phba->hbalock); - if (piocb->iocb_cmpl && (!piocb->vport) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { - lpfc_printf_log(phba, KERN_ERR, - LOG_SLI | LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1807 IOCB x%x failed. No vport\n", piocb->iocb.ulpCommand); dump_stack(); @@ -9289,6 +10115,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, struct lpfc_nodelist *ndlp; uint32_t *pcmd; uint32_t if_type; + struct lpfc_io_buf *lpfc_cmd; fip = phba->hba_flag & HBA_FIP_SUPPORT; /* The fcp commands will set command type */ @@ -9303,6 +10130,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Some of the fields are in the right position already */ memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); + /* The ct field has moved so reset */ wqe->generic.wqe_com.word7 = 0; wqe->generic.wqe_com.word10 = 0; @@ -9344,7 +10172,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, else ndlp = (struct lpfc_nodelist *)iocbq->context1; if (!iocbq->iocb.ulpLe) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2007 Only Limited Edition cmd Format" " supported 0x%x\n", iocbq->iocb.ulpCommand); @@ -9375,10 +10203,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { if (pcmd && (*pcmd == ELS_CMD_FLOGI || *pcmd == ELS_CMD_SCR || + *pcmd == ELS_CMD_RDF || + *pcmd == ELS_CMD_EDC || *pcmd == ELS_CMD_RSCN_XMT || *pcmd == ELS_CMD_FDISC || *pcmd == ELS_CMD_LOGO || - *pcmd == ELS_CMD_PLOGI)) { + *pcmd == ELS_CMD_PLOGI || + (*pcmd & ELS_CMD_MASK) == ELS_CMD_AUTH || + iocbq->iocb_flag & LPFC_IO_RAW)) { bf_set(els_req64_sp, &wqe->els_req, 1); bf_set(els_req64_sid, &wqe->els_req, iocbq->vport->fc_myDID); @@ -9446,6 +10278,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); break; case CMD_FCP_IWRITE64_CR: + lpfc_cmd = iocbq->context1; + + /* The ndlp indicates if first burst support is supported. */ + if (iocbq->iocb_flag & LPFC_IO_LIBDFC) + ndlp = iocbq->context_un.ndlp; + else + ndlp = lpfc_cmd->ndlp; + command_type = FCP_COMMAND_DATA_OUT; /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ @@ -9453,8 +10293,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, xmit_len + sizeof(struct fcp_rsp)); bf_set(cmd_buff_len, &wqe->fcp_iwrite, 0); - /* word4 iocb=parameter wqe=total_xfer_length memcpy */ - /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ + + /* Set first-burst provided it was successfully negotiated */ + if (phba->sli_rev == LPFC_SLI_REV4 && + iocbq->iocb_flag & LPFC_IO_FCP && + !(phba->hba_flag & HBA_FCOE_MODE) && ndlp->first_burst) { + u32 init_len; + + total_len = be32_to_cpu(lpfc_cmd->fcp_cmnd->fcpDl); + init_len = min(total_len, ndlp->first_burst); + wqe->fcp_iwrite.initial_xfer_len = init_len; + wqe->fcp_iwrite.total_xfer_len = total_len; + } + bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpFCP2Rcvy); bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); @@ -9484,7 +10335,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); if (phba->fcp_embed_io) { - struct lpfc_io_buf *lpfc_cmd; struct sli4_sge *sgl; struct fcp_cmnd *fcp_cmnd; uint32_t *ptr; @@ -9522,12 +10372,21 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpFCP2Rcvy); bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); - /* Always open the exchange */ - bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); + + /* For a CMF Managed port, iod must be zero'ed */ + if (phba->cmf_active_mode == LPFC_CFG_MANAGED) + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_NONE); + else + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, + LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); + bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, + iocbq->iocb.ulpTimeout); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); @@ -9591,6 +10450,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, LPFC_WQE_LENLOC_NONE); bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, iocbq->iocb.ulpFCP2Rcvy); + bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, + iocbq->iocb.ulpTimeout); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); @@ -9651,7 +10512,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* word6 context tag copied in memcpy */ if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2015 Invalid CT %x command 0x%x\n", ct, iocbq->iocb.ulpCommand); return IOCB_ERROR; @@ -9830,7 +10691,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, case CMD_FCP_TRSP64_CX: /* Target mode rcv */ case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2014 Invalid command 0x%x\n", iocbq->iocb.ulpCommand); return IOCB_ERROR; @@ -9852,6 +10713,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + return 0; } @@ -9865,7 +10727,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue * an iocb command to an HBA with SLI-4 interface spec. * - * This function is called with hbalock held. The function will return success + * This function is called with ringlock held. The function will return success * after it successfully submit the iocb to firmware or after adding to the * txq. **/ @@ -9992,7 +10854,7 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1419 Invalid HBA PCI-device group: 0x%x\n", dev_grp); return -ENODEV; @@ -10055,10 +10917,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_sli_ring *pring; + struct lpfc_queue *eq; unsigned long iflags; int rc; if (phba->sli_rev == LPFC_SLI_REV4) { + eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; + pring = lpfc_sli4_calc_ring(phba, piocb); if (unlikely(pring == NULL)) return IOCB_ERROR; @@ -10066,6 +10931,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, spin_lock_irqsave(&pring->ring_lock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH); } else { /* For now, SLI2/3 will still use hbalock */ spin_lock_irqsave(&phba->hbalock, iflags); @@ -10121,6 +10988,32 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) return 0; } +static void +lpfc_sli_post_recovery_event(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + struct lpfc_work_evt *evtp = &ndlp->recovery_evt; + + spin_lock_irqsave(&phba->hbalock, iflags); + if (!list_empty(&evtp->evt_listp)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; + } + + /* Incrementing the reference count until the queued work is done. */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + if (!evtp->evt_arg1) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; + } + evtp->evt = LPFC_EVT_RECOVER_PORT; + list_add_tail(&evtp->evt_listp, &phba->work_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + lpfc_worker_wake_up(phba); +} + /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. * @phba: Pointer to HBA context object. * @iocbq: Pointer to iocb object. @@ -10211,7 +11104,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, ext_status = axri->parameter & IOERR_PARAM_MASK; if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) - lpfc_sli_abts_recover_port(vport, ndlp); + lpfc_sli_post_recovery_event(phba, ndlp); } /** @@ -10247,13 +11140,13 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; if (evt_code == ASYNC_TEMP_WARN) { temp_event_data.event_code = LPFC_THRESHOLD_TEMP; - lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0347 Adapter is very hot, please take " "corrective action. temperature : %d Celsius\n", (uint32_t) icmd->ulpContext); } else { temp_event_data.event_code = LPFC_NORMAL_TEMP; - lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0340 Adapter temperature is OK now. " "temperature : %d Celsius\n", (uint32_t) icmd->ulpContext); @@ -10270,7 +11163,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, break; default: iocb_w = (uint32_t *) icmd; - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0346 Ring %d handler: unexpected ASYNC_STATUS" " evt_code 0x%x\n" "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" @@ -10446,7 +11339,7 @@ lpfc_sli_setup(struct lpfc_hba *phba) } if (totiocbsize > MAX_SLIM_IOCB_SIZE) { /* Too many cmd / rsp ring entries in SLI2 SLIM */ - printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " + printk(KERN_ERR "%d:0012 Too many cmd / rsp ring entries in " "SLI2 SLIM Data: x%x x%lx\n", phba->brd_no, totiocbsize, (unsigned long) MAX_SLIM_IOCB_SIZE); @@ -10680,14 +11573,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport) set_bit(LPFC_DATA_READY, &phba->data_flags); } prev_pring_flag = pring->flag; - spin_lock_irq(&pring->ring_lock); + spin_lock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->vport != vport) continue; list_move_tail(&iocb->list, &completions); } - spin_unlock_irq(&pring->ring_lock); + spin_unlock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { if (iocb->vport != vport) @@ -10699,6 +11592,9 @@ lpfc_sli_host_down(struct lpfc_vport *vport) } spin_unlock_irqrestore(&phba->hbalock, flags); + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_DOWN); @@ -10944,7 +11840,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } spin_unlock_irq(&phba->hbalock); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0402 Cannot find virtual addr for buffer tag on " "ring %d Data x%lx x%px x%px x%x\n", pring->ringno, (unsigned long) tag, @@ -10988,7 +11884,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } spin_unlock_irq(&phba->hbalock); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0410 Cannot find virtual addr for mapped buf on " "ring %d Data x%llx x%px x%px x%x\n", pring->ringno, (unsigned long long)phys, @@ -11112,8 +12008,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, unsigned long iflags; struct lpfc_nodelist *ndlp; - lockdep_assert_held(&phba->hbalock); - /* * There are certain command types we don't want to abort. And we * don't want to abort commands that are already in the process of @@ -11219,8 +12113,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int retval = IOCB_ERROR; IOCB_t *icmd = NULL; - lockdep_assert_held(&phba->hbalock); - /* * There are certain command types we don't want to abort. And we * don't want to abort commands that are already in the process of @@ -11324,7 +12216,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd; int rc = 1; - if (iocbq->vport != vport) + if (!iocbq || iocbq->vport != vport) return rc; if (!(iocbq->iocb_flag & LPFC_IO_FCP) || @@ -11858,7 +12750,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = msecs_to_jiffies(timeout * 1000); + /* make host tmo longer */ + timeout_req = msecs_to_jiffies((timeout + 20) * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); @@ -11875,20 +12768,29 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, } spin_unlock_irqrestore(&phba->hbalock, iflags); if (iocb_completed) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0331 IOCB wake signaled\n"); + if ((prspiocbq) && + (prspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY)) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3324 IOCB wake signaled, " + "port timeout the I/O.\n"); + retval = IOCB_ABORTED; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0331 IOCB wake signaled, " + "port complete the I/O.\n"); + } /* Note: we are not indicating if the IOCB has a success * status or not - that's for the caller to check. * IOCB_SUCCESS means just that the command was sent and * completed. Not that it completed successfully. * */ } else if (timeleft == 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0338 IOCB wait timeout error - no " "wake response Data x%x\n", timeout); retval = IOCB_TIMEDOUT; } else { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0330 IOCB wake NOT set, " "Data x%x x%lx\n", timeout, (timeleft / jiffies)); @@ -12150,7 +13052,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) } if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1423 HBA Unrecoverable error: " "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " "ue_mask_lo_reg=0x%x, " @@ -12181,7 +13083,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2885 Port Status Event: " "port status reg 0x%x, " "port smphr reg 0x%x, " @@ -12197,7 +13099,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) break; case LPFC_SLI_INTF_IF_TYPE_1: default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2886 HBA Error Attention on unsupported " "if type %d.", if_type); return 1; @@ -12261,7 +13163,7 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) ha_copy = lpfc_sli4_eratt_read(phba); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0299 Invalid SLI revision (%d)\n", phba->sli_rev); ha_copy = 0; @@ -12494,8 +13396,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) * Stray Mailbox Interrupt, mbxCommand <cmd> * mbxStatus <status> */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | - LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "(%d):0304 Stray Mailbox " "Interrupt mbxCommand x%x " "mbxStatus x%x\n", @@ -12555,7 +13456,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) if (rc != MBX_BUSY) lpfc_printf_log(phba, KERN_ERR, - LOG_MBOX | LOG_SLI, + LOG_TRACE_EVENT, "0350 rc should have" "been MBX_BUSY\n"); if (rc != MBX_NOT_FINISHED) @@ -12570,7 +13471,21 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) spin_unlock_irqrestore( &phba->pport->work_port_lock, iflag); - lpfc_mbox_cmpl_put(phba, pmb); + + /* Do NOT queue MBX_HEARTBEAT to the worker + * thread for processing. + */ + if (pmbox->mbxCommand == MBX_HEARTBEAT) { + /* Process mbox now */ + phba->sli.mbox_active = NULL; + phba->sli.sli_flag &= + ~LPFC_SLI_MBOX_ACTIVE; + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba, pmb); + } else { + /* Queue to worker thread to process */ + lpfc_mbox_cmpl_put(phba, pmb); + } } } else spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -12584,8 +13499,9 @@ send_current_mbox: MBX_NOWAIT); } while (rc == MBX_NOT_FINISHED); if (rc != MBX_SUCCESS) - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | - LOG_SLI, "0349 rc should be " + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "0349 rc should be " "MBX_SUCCESS\n"); } @@ -12819,23 +13735,30 @@ lpfc_sli_intr_handler(int irq, void *dev_id) void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; + unsigned long iflags; /* First, declare the els xri abort event has been handled */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Now, handle all the els xri abort events */ + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { /* Get the first event from the head of the event queue */ - spin_lock_irq(&phba->hbalock); list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, cq_event, struct lpfc_cq_event, list); - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); /* Notify aborted XRI for ELS work queue */ lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); + /* Free the event processed back to the free pool */ lpfc_sli4_cq_event_release(phba, cq_event); + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); } + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); } /** @@ -12979,11 +13902,13 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, return NULL; wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; + spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); if (unlikely(!cmdiocbq)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0386 ELS complete with no corresponding " "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", @@ -12993,7 +13918,6 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, return NULL; } - spin_lock_irqsave(&pring->ring_lock, iflags); /* Put the iocb back on the txcmplq */ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -13012,7 +13936,7 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) /* Allocate a new internal CQ_EVENT entry */ cq_event = lpfc_sli4_cq_event_alloc(phba); if (!cq_event) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0602 Failed to alloc CQ_EVENT entry\n"); return NULL; } @@ -13046,9 +13970,13 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); if (!cq_event) return false; - spin_lock_irqsave(&phba->hbalock, iflags); + + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); + /* Set the async event flag */ + spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag |= ASYNC_EVENT; spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -13087,7 +14015,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) spin_lock_irqsave(&phba->hbalock, iflags); pmb = phba->sli.mbox_active; if (unlikely(!pmb)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1832 No pending MBOX command to handle\n"); spin_unlock_irqrestore(&phba->hbalock, iflags); goto out_no_mqe_complete; @@ -13136,8 +14064,9 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) pmb->vport = vport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_BUSY) - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | - LOG_SLI, "0385 rc should " + lpfc_printf_log(phba, KERN_ERR, + LOG_TRACE_EVENT, + "0385 rc should " "have been MBX_BUSY\n"); if (rc != MBX_NOT_FINISHED) goto send_current_mbox; @@ -13147,7 +14076,26 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) phba->pport->work_port_events &= ~WORKER_MBOX_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); - /* There is mailbox completion work to do */ + /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */ + if (pmbox->mbxCommand == MBX_HEARTBEAT) { + spin_lock_irqsave(&phba->hbalock, iflags); + /* Release the mailbox command posting token */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Post the next mbox command, if there is one */ + lpfc_sli4_post_async_mbox(phba); + + /* Process cmpl now */ + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba, pmb); + return false; + } + + /* There is mailbox completion work to queue to the worker thread */ spin_lock_irqsave(&phba->hbalock, iflags); __lpfc_mbox_cmpl_put(phba, pmb); phba->work_ha |= HA_MBATT; @@ -13225,7 +14173,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_sli_ring *pring = cq->pring; int txq_cnt = 0; int txcmplq_cnt = 0; - int fcp_txcmplq_cnt = 0; /* Check for response status */ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { @@ -13245,11 +14192,10 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, txq_cnt++; if (!list_empty(&pring->txcmplq)) txcmplq_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " - "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", + "els_txcmplq_cnt=%d\n", txq_cnt, phba->iocb_cnt, - fcp_txcmplq_cnt, txcmplq_cnt); return false; } @@ -13323,21 +14269,24 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, break; case LPFC_NVME_LS: /* NVME LS uses ELS resources */ case LPFC_ELS: - cq_event = lpfc_cq_event_setup( - phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); - if (!cq_event) - return false; + cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe)); + if (!cq_event) { + workposted = false; + break; + } cq_event->hdwq = cq->hdwq; - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); list_add_tail(&cq_event->list, &phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Set the els xri abort event flag */ phba->hba_flag |= ELS_XRI_ABORT_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, + iflags); workposted = true; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0603 Invalid CQ subtype %d: " "%08x %08x %08x %08x\n", cq->subtype, wcqe->word0, wcqe->parameter, @@ -13348,7 +14297,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, return workposted; } -#define FC_RCTL_MDS_DIAGS 0xF4 +#define FC_RCTL_MDS_DIAGS 0xF4 /** * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry @@ -13366,7 +14315,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) struct fc_frame_header *fc_hdr; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq; +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_tgtport *tgtp; +#endif struct hbq_dmabuf *dma_buf; uint32_t status, rq_id; unsigned long iflags; @@ -13385,7 +14336,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) status = bf_get(lpfc_rcqe_status, rcqe); switch (status) { case FC_STATUS_RQ_BUF_LEN_EXCEEDED: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2537 Receive Frame Truncated!!\n"); /* fall through */ case FC_STATUS_RQ_SUCCESS: @@ -13401,13 +14352,18 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) hrq->RQ_buf_posted--; memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); + /* If a NVME LS event (type 0x28), treat it as Fast path */ fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { spin_unlock_irqrestore(&phba->hbalock, iflags); /* Handle MDS Loopback frames */ - lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli4_handle_mds_loopback(phba->pport, + dma_buf); + else + lpfc_in_buf_free(phba, &dma_buf->dbuf); break; } @@ -13419,10 +14375,11 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_TARGET_FC)) case FC_STATUS_INSUFF_BUF_FRM_DISC: if (phba->nvmet_support) { tgtp = phba->targetport->private; - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6402 RQE Error x%x, posted %d err_cnt " "%d: %x %x %x\n", status, hrq->RQ_buf_posted, @@ -13431,8 +14388,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } - /* fallthrough */ - +#endif + /* fall through */ case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ @@ -13494,7 +14451,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, (struct lpfc_rcqe *)&cqevt); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0388 Not a valid WCQE code: x%x\n", bf_get(lpfc_cqe_code, &cqevt)); break; @@ -13521,6 +14478,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, { struct lpfc_queue *cq = NULL, *childq; uint16_t cqid; + int ret = 0; /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); @@ -13533,7 +14491,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, } if (unlikely(!cq)) { if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0365 Slow-path CQ identifier " "(%d) does not exist\n", cqid); return; @@ -13542,9 +14500,14 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Save EQ associated with this CQ */ cq->assoc_qp = speq; - if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0390 Cannot schedule soft IRQ " + if (is_kdump_kernel()) + ret = queue_work(phba->wq, &cq->spwork); + else + ret = queue_work_on(cq->chann, phba->wq, &cq->spwork); + + if (!ret) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0390 Cannot schedule queue work " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", cqid, cq->queue_id, raw_smp_processor_id()); } @@ -13555,6 +14518,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, * @cq: Pointer to CQ to be processed * @handler: Routine to process each cqe * @delay: Pointer to usdelay to set in case of rescheduling of the handler + * @poll_mode: Polling mode we were called from * * This routine processes completion queue entries in a CQ. While a valid * queue element is found, the handler is called. During processing checks @@ -13572,7 +14536,8 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, static bool __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_cqe *), unsigned long *delay) + struct lpfc_cqe *), unsigned long *delay, + enum lpfc_poll_mode poll_mode) { struct lpfc_cqe *cqe; bool workposted = false; @@ -13600,6 +14565,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, LPFC_QUEUE_NOARM); consumed = 0; + cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; } if (count == LPFC_NVMET_CQ_NOTIFY) @@ -13612,6 +14578,10 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, arm = false; } + /* Note: complete the irq_poll softirq before rearming CQ */ + if (poll_mode == LPFC_IRQ_POLL) + irq_poll_complete(&cq->iop); + /* Track the max number of CQEs processed in 1 EQ */ if (count > cq->CQ_max_cqe) cq->CQ_max_cqe = count; @@ -13624,7 +14594,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, "0369 No entry from completion queue " "qid=%d\n", cq->queue_id); - cq->queue_claimed = 0; + xchg(&cq->queue_claimed, 0); rearm_and_exit: phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, @@ -13660,20 +14630,20 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) case LPFC_MCQ: workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_mcqe, - &delay); + &delay, LPFC_QUEUE_WORK); break; case LPFC_WCQ: if (cq->subtype == LPFC_IO) workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, - &delay); + &delay, LPFC_QUEUE_WORK); else workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_cqe, - &delay); + &delay, LPFC_QUEUE_WORK); break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0370 Invalid completion queue type (%d)\n", cq->type); return; @@ -13682,8 +14652,8 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) if (delay) { if (!queue_delayed_work_on(cq->chann, phba->wq, &cq->sched_spwork, delay)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0394 Cannot schedule soft IRQ " + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0394 Cannot schedule queue work " "for cqid=%d on CPU %d\n", cq->queue_id, cq->chann); } @@ -13752,9 +14722,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, IOERR_NO_RESOURCES)) phba->lpfc_rampdown_queue_depth(phba); - /* Log the error status */ + /* Log the cmpl status */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0373 FCP CQE error: status=x%x: " + "0373 FCP CQE cmpl: status=x%x: " "CQE: %08x %08x %08x %08x\n", bf_get(lpfc_wcqe_c_status, wcqe), wcqe->word0, wcqe->total_data_placed, @@ -13764,9 +14734,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Look up the FCP command IOCB and create pseudo response IOCB */ spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; - spin_unlock_irqrestore(&pring->ring_lock, iflags); cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&pring->ring_lock, iflags); if (unlikely(!cmdiocbq)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0374 FCP complete with no corresponding " @@ -13774,7 +14744,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, bf_get(lpfc_wcqe_c_request_tag, wcqe)); return; } -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) cmdiocbq->isr_timestamp = cq->isr_timestamp; #endif if (cmdiocbq->iocb_cmpl == NULL) { @@ -13799,12 +14769,6 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Fake the irspiocb and copy necessary response information */ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); - if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { - spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; - spin_unlock_irqrestore(&phba->hbalock, iflags); - } - /* Pass the cmd_iocb and the rsp state to the upper layer */ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); } @@ -13863,7 +14827,9 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_queue *drq; struct rqb_dmabuf *dma_buf; struct fc_frame_header *fc_hdr; +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_tgtport *tgtp; +#endif uint32_t status, rq_id; unsigned long iflags; uint32_t fctl, idx; @@ -13892,7 +14858,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, status = bf_get(lpfc_rcqe_status, rcqe); switch (status) { case FC_STATUS_RQ_BUF_LEN_EXCEEDED: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6126 Receive Frame Truncated!!\n"); /* fall through */ case FC_STATUS_RQ_SUCCESS: @@ -13929,10 +14895,11 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, drop: lpfc_rq_buf_free(phba, &dma_buf->hbuf); break; +#if defined(BUILD_NVME) && (IS_ENABLED(CONFIG_NVME_TARGET_FC)) case FC_STATUS_INSUFF_BUF_FRM_DISC: if (phba->nvmet_support) { tgtp = phba->targetport->private; - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6401 RQE Error x%x, posted %d err_cnt " "%d: %x %x %x\n", status, hrq->RQ_buf_posted, @@ -13941,8 +14908,8 @@ drop: atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } - /* fallthrough */ - +#endif + /* fall through */ case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ @@ -14006,7 +14973,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, } break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0144 Not a valid CQE code: x%x\n", bf_get(lpfc_wcqe_c_code, &wcqe)); break; @@ -14014,6 +14981,47 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, return workposted; } +/** + * lpfc_sli4_sched_cq_work - Schedules cq work + * @phba: Pointer to HBA context object. + * @cq: Pointer to CQ + * @cqid: CQ ID + * + * This routine checks the poll mode of the CQ corresponding to + * cq->chann, then either schedules a softirq or queue_work to complete + * cq work. + * + * queue_work path is taken if in NVMET mode, or if poll_mode is in + * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken. + * + **/ +static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, + struct lpfc_queue *cq, uint16_t cqid) +{ + int ret = 0; + switch (cq->poll_mode) { + case LPFC_IRQ_POLL: + /* CGN mgmt is mutually exclusive from softirq processing */ + if (phba->cmf_active_mode == LPFC_CFG_OFF) { + irq_poll_sched(&cq->iop); + break; + } + /* fallthrough */ + case LPFC_QUEUE_WORK: + default: + if (is_kdump_kernel()) + ret = queue_work(phba->wq, &cq->irqwork); + else + ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); + if (!ret) + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0383 Cannot schedule queue work " + "for CQ eqcqid=%d, cqid=%d on CPU %d\n", + cqid, cq->queue_id, + raw_smp_processor_id()); + } +} + /** * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry * @phba: Pointer to HBA context object. @@ -14035,7 +15043,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t cqid, id; if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0366 Not a valid completion " "event: majorcode=x%x, minorcode=x%x\n", bf_get_le32(lpfc_eqe_major_code, eqe), @@ -14078,7 +15086,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, process_cq: if (unlikely(cqid != cq->queue_id)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0368 Miss-matched fast-path completion " "queue identifier: eqcqid=%d, fcpcqid=%d\n", cqid, cq->queue_id); @@ -14092,16 +15100,13 @@ work_cq: else cq->isr_timestamp = 0; #endif - if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0363 Cannot schedule soft IRQ " - "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, raw_smp_processor_id()); + lpfc_sli4_sched_cq_work(phba, cq, cqid); } /** * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry * @cq: Pointer to CQ to be processed + * @poll_mode: Enum lpfc_poll_state to determine poll mode * * This routine calls the cq processing routine with the handler for * fast path CQEs. @@ -14115,7 +15120,8 @@ work_cq: * the delay indicates when to reschedule it. **/ static void -__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) +__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq, + enum lpfc_poll_mode poll_mode) { struct lpfc_hba *phba = cq->phba; unsigned long delay; @@ -14123,15 +15129,15 @@ __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) /* process and rearm the CQ */ workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, - &delay); + &delay, poll_mode); if (delay) { if (!queue_delayed_work_on(cq->chann, phba->wq, &cq->sched_irqwork, delay)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0367 Cannot schedule soft IRQ " - "for cqid=%d on CPU %d\n", - cq->queue_id, cq->chann); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0367 Cannot schedule queue work " + "for cqid=%d on CPU %d\n", + cq->queue_id, cq->chann); } /* wake up worker thread if there are works to be done */ @@ -14151,7 +15157,7 @@ lpfc_sli4_hba_process_cq(struct work_struct *work) { struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); - __lpfc_sli4_hba_process_cq(cq); + __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); } /** @@ -14166,7 +15172,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work) struct lpfc_queue *cq = container_of(to_delayed_work(work), struct lpfc_queue, sched_irqwork); - __lpfc_sli4_hba_process_cq(cq); + __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); } /** @@ -14205,7 +15211,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) int ecount = 0; int hba_eqidx; struct lpfc_eq_intr_info *eqi; - uint32_t icnt; /* Get the driver's phba structure from the dev_id */ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; @@ -14228,24 +15233,25 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) spin_lock_irqsave(&phba->hbalock, iflag); if (phba->link_state < LPFC_LINK_DOWN) /* Flush, clear interrupt, and rearm the EQ */ - lpfc_sli4_eq_flush(phba, fpeq); + lpfc_sli4_eqcq_flush(phba, fpeq); spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } - eqi = phba->sli4_hba.eq_info; - icnt = this_cpu_inc_return(eqi->icnt); + eqi = this_cpu_ptr(phba->sli4_hba.eq_info); + eqi->icnt++; + fpeq->last_cpu = raw_smp_processor_id(); - if (icnt > LPFC_EQD_ISR_TRIGGER && - phba->cfg_irq_chann == 1 && + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && + fpeq->q_flag & HBA_EQ_DELAY_CHK && phba->cfg_auto_imax && fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && phba->sli.sli_flag & LPFC_SLI_USE_EQDR) lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); /* process and rearm the EQ */ - ecount = lpfc_sli4_process_eq(phba, fpeq); + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); if (unlikely(ecount == 0)) { fpeq->EQ_no_entry++; @@ -14305,6 +15311,153 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; } /* lpfc_sli4_intr_handler */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +void lpfc_sli4_poll_hbtimer(unsigned long ptr) +#else +void lpfc_sli4_poll_hbtimer(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct lpfc_hba *phba = (struct lpfc_hba *)ptr; +#else + struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); +#endif + struct lpfc_queue *eq; + int i = 0; + + rcu_read_lock(); + + list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) + i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH); + if (!list_empty(&phba->poll_list)) + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + + rcu_read_unlock(); +} + +inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path) +{ + struct lpfc_hba *phba = eq->phba; + int i = 0; + + /* + * Unlocking an irq is one of the entry point to check + * for re-schedule, but we are good for io submission + * path as midlayer does a get_cpu to glue us in. Flush + * out the invalidate queue so we can see the updated + * value for flag. + */ + smp_rmb(); + + if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) + /* We will not likely get the completion for the caller + * during this iteration but i guess that's fine. + * Future io's coming on this eq should be able to + * pick it up. As for the case of single io's, they + * will be handled through a sched from polling timer + * function which is currently triggered every 1msec. + */ + i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); + + return i; +} + +static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* kickstart slowpath processing if needed */ + if (list_empty(&phba->poll_list)) + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + + list_add_rcu(&eq->_poll_list, &phba->poll_list); + synchronize_rcu(); +} + +static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* Disable slowpath processing for this eq. Kick start the eq + * by RE-ARMING the eq's ASAP + */ + list_del_rcu(&eq->_poll_list); + synchronize_rcu(); + + if (list_empty(&phba->poll_list)) + del_timer_sync(&phba->cpuhp_poll_timer); +} + +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) +{ + struct lpfc_queue *eq, *next; + + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) + list_del(&eq->_poll_list); + + INIT_LIST_HEAD(&phba->poll_list); + synchronize_rcu(); +} + +static inline void +__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) +{ + if (mode == eq->mode) + return; + /* + * currently this function is only called during a hotplug + * event and the cpu on which this function is executing + * is going offline. By now the hotplug has instructed + * the scheduler to remove this cpu from cpu active mask. + * So we don't need to work about being put aside by the + * scheduler for a high priority process. Yes, the inte- + * rrupts could come but they are known to retire ASAP. + */ + + /* Disable polling in the fastpath */ + WRITE_ONCE(eq->mode, mode); + /* flush out the store buffer */ + smp_wmb(); + + /* + * Add this eq to the polling list and start polling. For + * a grace period both interrupt handler and poller will + * try to process the eq _but_ that's fine. We have a + * synchronization mechanism in place (queue_claimed) to + * deal with it. This is just a draining phase for int- + * errupt handler (not eq's) as we have guranteed through + * barrier that all the cpu's have seen the new CQ_POLLED + * state. which will effectively disable the REARMING of + * the EQ. The whole idea is eq's die off eventually as + * we are not rearming EQ's anymore. + */ + mode ? lpfc_sli4_add_to_poll_list(eq) : + lpfc_sli4_remove_from_poll_list(eq); +} + +void lpfc_sli4_start_polling(struct lpfc_queue *eq) +{ + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); +} + +void lpfc_sli4_stop_polling(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); + + /* Kick start for the pending io's in h/w. + * Once we switch back to interrupt processing on a eq + * the io path completion will only arm eq's when it + * receives a completion. But since eq's are in disa- + * rmed state it doesn't receive a completion. This + * creates a deadlock scenaro. + */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); +} + /** * lpfc_sli4_queue_free - free a queue structure and associated memory * @queue: The queue structure to free. @@ -14379,6 +15532,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, return NULL; INIT_LIST_HEAD(&queue->list); + INIT_LIST_HEAD(&queue->_poll_list); INIT_LIST_HEAD(&queue->wq_list); INIT_LIST_HEAD(&queue->wqfull_list); INIT_LIST_HEAD(&queue->page_list); @@ -14519,7 +15673,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6428 Failed allocating mailbox cmd buffer." " EQ delay was not set.\n"); return; @@ -14561,7 +15715,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2512 MODIFY_EQ_DELAY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -14638,14 +15792,15 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) dmult); switch (eq->entry_count) { default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0360 Unsupported EQ count. (%d)\n", eq->entry_count); if (eq->entry_count < 256) { status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + /* otherwise default to smallest count */ + /* fall through */ case 256: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_256); @@ -14682,7 +15837,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2500 EQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -14701,6 +15856,15 @@ out: return status; } +static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget) +{ + struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop); + + __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL); + + return 1; +} + /** * lpfc_cq_create - Create a Completion Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. @@ -14776,9 +15940,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_CQ_CNT_WORD7); break; } - /* fall through */ + /* Fall Thru */ default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0361 Unsupported CQ count: " "entry cnt %d sz %d pg cnt %d\n", cq->entry_count, cq->entry_size, @@ -14787,7 +15951,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + /* otherwise default to smallest count */ + /* fall through */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_256); @@ -14814,7 +15979,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2501 CQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -14840,6 +16005,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, if (cq->queue_id > phba->sli4_hba.cq_max) phba->sli4_hba.cq_max = cq->queue_id; + + irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler); out: mempool_free(mbox, phba->mbox_mem_pool); return status; @@ -14899,7 +16066,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, LPFC_SLI4_MBX_NEMBED); if (alloclen < length) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3098 Allocated DMA memory size (%d) is " "less than the requested DMA memory size " "(%d)\n", alloclen, length); @@ -14953,14 +16120,15 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, } /* fall through */ default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3118 Bad CQ count. (%d)\n", cq->entry_count); if (cq->entry_count < 256) { status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest */ + /* otherwise default to smallest (drop thru) */ + /* fall through */ case 256: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_256); @@ -15071,7 +16239,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3119 CQ_CREATE_SET mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -15229,14 +16397,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, cq->queue_id); switch (mq->entry_count) { default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0362 Unsupported MQ count. (%d)\n", mq->entry_count); if (mq->entry_count < 16) { status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + /* otherwise default to smallest count */ + /* fall through */ case 16: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, @@ -15285,7 +16454,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2502 MQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -15346,9 +16515,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, void __iomem *bar_memmap_p; uint32_t db_offset; uint16_t pci_barset; +#ifndef BUILD_BRCMFCOE uint8_t dpp_barset; uint32_t dpp_offset; - unsigned long pg_addr; +#endif uint8_t wq_create_version; /* sanity check on queue memory */ @@ -15382,12 +16552,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, else wq_create_version = LPFC_Q_CREATE_VERSION_0; - - if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) - wq_create_version = LPFC_Q_CREATE_VERSION_1; - else - wq_create_version = LPFC_Q_CREATE_VERSION_0; - switch (wq_create_version) { case LPFC_Q_CREATE_VERSION_1: bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, @@ -15408,8 +16572,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_128); break; } +#ifndef BUILD_BRCMFCOE /* Request DPP by default */ bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); +#endif bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, (wq->page_size / SLI4_PAGE_SIZE)); @@ -15434,7 +16600,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2503 WQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -15461,7 +16627,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, &wq_create->u.response); if ((wq->db_format != LPFC_DB_LIST_FORMAT) && (wq->db_format != LPFC_DB_RING_FORMAT)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3265 WQ[%d] doorbell format " "not supported: x%x\n", wq->queue_id, wq->db_format); @@ -15473,7 +16639,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); if (!bar_memmap_p) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3263 WQ[%d] failed to memmap " "pci barset:x%x\n", wq->queue_id, pci_barset); @@ -15483,7 +16649,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, db_offset = wq_create->u.response.doorbell_offset; if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && (db_offset != LPFC_ULP1_WQ_DOORBELL)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3252 WQ[%d] doorbell offset " "not supported: x%x\n", wq->queue_id, db_offset); @@ -15497,6 +16663,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, pci_barset, db_offset, wq->db_format); } else wq->db_regaddr = phba->sli4_hba.WQDBregaddr; +#ifndef BUILD_BRCMFCOE } else { /* Check if DPP was honored by the firmware */ wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, @@ -15507,7 +16674,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); if (!bar_memmap_p) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3267 WQ[%d] failed to memmap " "pci barset:x%x\n", wq->queue_id, pci_barset); @@ -15522,40 +16689,30 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, &wq_create->u.response_1); bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, dpp_barset); - if (!bar_memmap_p) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3268 WQ[%d] failed to memmap " - "pci barset:x%x\n", - wq->queue_id, dpp_barset); - status = -ENOMEM; - goto out; - } - dpp_offset = wq_create->u.response_1.dpp_offset; - wq->dpp_regaddr = bar_memmap_p + dpp_offset; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + if (bar_memmap_p) { + dpp_offset = wq_create->u.response_1.dpp_offset; + wq->dpp_regaddr = bar_memmap_p + dpp_offset; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3271 WQ[%d]: barset:x%x, offset:x%x, " "dpp_id:x%x dpp_barset:x%x " "dpp_offset:x%x\n", wq->queue_id, pci_barset, db_offset, wq->dpp_id, dpp_barset, dpp_offset); - - /* Enable combined writes for DPP aperture */ - pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; -#ifdef CONFIG_X86 - rc = set_memory_wc(pg_addr, 1); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3272 Cannot setup Combined " - "Write on WQ[%d] - disable DPP\n", - wq->queue_id); + } else { phba->cfg_enable_dpp = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "3268 WQ[%d] dpp memmap " + "unavailable pci barset:x%x\n", + wq->queue_id, dpp_barset); } -#else - phba->cfg_enable_dpp = 0; -#endif } else wq->db_regaddr = phba->sli4_hba.WQDBregaddr; + } +#else + } else { + wq->db_regaddr = phba->sli4_hba.WQDBregaddr; } +#endif wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (wq->pring == NULL) { status = -ENOMEM; @@ -15647,14 +16804,15 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } else { switch (hrq->entry_count) { default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2535 Unsupported RQ count. (%d)\n", hrq->entry_count); if (hrq->entry_count < 512) { status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + /* otherwise default to smallest count */ + /* fall through */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, @@ -15698,7 +16856,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2504 RQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -15716,7 +16874,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, &rq_create->u.response); if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && (hrq->db_format != LPFC_DB_RING_FORMAT)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3262 RQ [%d] doorbell format not " "supported: x%x\n", hrq->queue_id, hrq->db_format); @@ -15728,7 +16886,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, &rq_create->u.response); bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); if (!bar_memmap_p) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3269 RQ[%d] failed to memmap pci " "barset:x%x\n", hrq->queue_id, pci_barset); @@ -15739,7 +16897,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, db_offset = rq_create->u.response.doorbell_offset; if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && (db_offset != LPFC_ULP1_RQ_DOORBELL)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3270 RQ[%d] doorbell offset not " "supported: x%x\n", hrq->queue_id, db_offset); @@ -15784,14 +16942,15 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } else { switch (drq->entry_count) { default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2536 Unsupported RQ count. (%d)\n", drq->entry_count); if (drq->entry_count < 512) { status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + /* otherwise default to smallest count */ + /* fall through */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, @@ -15921,7 +17080,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_NEMBED); if (alloclen < length) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3099 Allocated DMA memory size (%d) is " "less than the requested DMA memory size " "(%d)\n", alloclen, length); @@ -16031,7 +17190,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3120 RQ_CREATE mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16101,7 +17260,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2505 EQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16156,7 +17315,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2506 CQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16210,7 +17369,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2507 MQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16263,7 +17422,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2508 WQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16320,7 +17479,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2509 RQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16336,7 +17495,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2510 RQ_DESTROY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16384,7 +17543,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, union lpfc_sli4_cfg_shdr *shdr; if (xritag == NO_XRI) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0364 Invalid param:\n"); return -EINVAL; } @@ -16425,7 +17584,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2511 POST_SGL mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16556,7 +17715,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, reqlen = post_cnt * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2559 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); return -ENOMEM; @@ -16572,7 +17731,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, LPFC_SLI4_MBX_NEMBED); if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0285 Allocated DMA memory size (%d) is " "less than the requested DMA memory " "size (%d)\n", alloclen, reqlen); @@ -16620,7 +17779,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, if (rc != MBX_TIMEOUT) lpfc_sli4_mbox_cmd_free(phba, mbox); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2513 POST_SGL_BLOCK mailbox command failed " "status x%x add_status x%x mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16668,7 +17827,7 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6119 Failed to allocate mbox cmd memory\n"); return -ENOMEM; } @@ -16679,7 +17838,7 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, reqlen, LPFC_SLI4_MBX_NEMBED); if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6120 Allocated DMA memory size (%d) is " "less than the requested DMA memory " "size (%d)\n", alloclen, reqlen); @@ -16733,7 +17892,7 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, if (rc != MBX_TIMEOUT) lpfc_sli4_mbox_cmd_free(phba, mbox); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6125 POST_SGL_BLOCK mailbox command failed " "status x%x add_status x%x mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -16887,8 +18046,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) struct fc_vft_header *fc_vft_hdr; uint32_t *header = (uint32_t *) fc_hdr; -#define FC_RCTL_MDS_DIAGS 0xF4 - switch (fc_hdr->fh_r_ctl) { case FC_RCTL_DD_UNCAT: /* uncategorized information */ case FC_RCTL_DD_SOL_DATA: /* solicited data */ @@ -16916,7 +18073,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) case FC_RCTL_F_BSY: /* fabric busy to data frame */ case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ case FC_RCTL_LCR: /* link credit reset */ - case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ case FC_RCTL_END: /* end */ break; case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ @@ -17317,7 +18473,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, /* Failure means BLS ABORT RSP did not get delivered to remote node*/ if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3154 BLS ABORT RSP failed, data: x%x/x%x\n", rsp_iocbq->iocb.ulpStatus, rsp_iocbq->iocb.un.ulpWord[4]); @@ -17479,7 +18635,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); if (rc == IOCB_ERROR) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2925 Failed to issue CT ABTS RSP x%x on " "xri x%x, Data x%x\n", icmd->un.xseq64.w5.hcsw.Rctl, oxid, @@ -17711,6 +18867,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) list_add_tail(&iocbq->list, &first_iocbq->list); } } + /* Free the sequence's header buffer */ + if (!first_iocbq) + lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); + return first_iocbq; } @@ -17725,7 +18885,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; iocbq = lpfc_prep_seq(vport, seq_dmabuf); if (!iocbq) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2707 Ring %d handler: Failed to allocate " "iocb Rctl x%x Type x%x received\n", LPFC_ELS_RING, @@ -17736,7 +18896,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, phba->sli4_hba.els_wq->pring, iocbq, fc_hdr->fh_r_ctl, fc_hdr->fh_type)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2540 Ring %d handler: unexpected Rctl " "x%x Type x%x received\n", LPFC_ELS_RING, @@ -17880,7 +19040,10 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { vport = phba->pport; /* Handle MDS Loopback frames */ - lpfc_sli4_handle_mds_loopback(vport, dmabuf); + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli4_handle_mds_loopback(vport, dmabuf); + else + lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } @@ -17898,17 +19061,6 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); - if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { - vport = phba->pport; - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2023 MDS Loopback %d bytes\n", - bf_get(lpfc_rcqe_length, - &dmabuf->cq_event.cqe.rcqe_cmpl)); - /* Handle MDS Loopback frames */ - lpfc_sli4_handle_mds_loopback(vport, dmabuf); - return; - } - /* d_id this frame is directed to */ did = sli4_did_from_fc_hdr(fc_hdr); @@ -18001,7 +19153,7 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2008 Error %d posting all rpi " "headers\n", rc); rc = -EIO; @@ -18047,7 +19199,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) /* The port is notified of the header region via a mailbox command. */ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2001 Unable to allocate memory for issuing " "SLI_CONFIG_SPECIAL mailbox command\n"); return -ENOMEM; @@ -18077,7 +19229,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) if (rc != MBX_TIMEOUT) mempool_free(mboxq, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2514 POST_RPI_HDR mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); @@ -18132,8 +19284,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.rpi_used++; phba->sli4_hba.rpi_count++; } - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0001 rpi:%x max:%x lim:%x\n", + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0001 Allocated rpi:x%x max:x%x lim:x%x\n", (int) rpi, max_rpi, rpi_limit); /* @@ -18166,7 +19319,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2002 Error Could not grow rpi " "count\n"); } else { @@ -18200,7 +19353,8 @@ __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) phba->sli4_hba.rpi_count--; phba->sli4_hba.max_cfg_param.rpi_used--; } else { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "2016 rpi %x not inuse\n", rpi); } @@ -18267,7 +19421,7 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, mboxq->vport = ndlp->vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2010 Resume RPI Mailbox failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, &mboxq->u.mqe)); @@ -18302,7 +19456,7 @@ lpfc_sli4_init_vpi(struct lpfc_vport *vport) mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); if (rc != MBX_SUCCESS) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2022 INIT VPI Mailbox failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, &mboxq->u.mqe)); @@ -18338,7 +19492,7 @@ lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if ((shdr_status || shdr_add_status) && (shdr_status != STATUS_FCF_IN_USE)) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2558 ADD_FCF_RECORD mailbox failed with " "status x%x add_status x%x\n", shdr_status, shdr_add_status); @@ -18368,7 +19522,7 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2009 Failed to allocate mbox for ADD_FCF cmd\n"); return -ENOMEM; } @@ -18381,7 +19535,7 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) LPFC_MBOX_OPCODE_FCOE_ADD_FCF, req_len, LPFC_SLI4_MBX_NEMBED); if (alloc_len < req_len) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2523 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); @@ -18414,7 +19568,7 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2515 ADD_FCF_RECORD mailbox failed with " "status 0x%x\n", rc); lpfc_sli4_mbox_cmd_free(phba, mboxq); @@ -18487,7 +19641,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2000 Failed to allocate mbox for " "READ_FCF cmd\n"); error = -ENOMEM; @@ -18930,7 +20084,7 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2745 Failed to allocate mbox for " "requesting FCF rediscover.\n"); return -ENOMEM; @@ -18998,14 +20152,14 @@ lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) LPFC_MBOXQ_t *pmb = NULL; MAILBOX_t *mb; uint32_t offset = 0; - int rc; + int i, rc; if (!rgn23_data) return 0; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2600 failed to allocate mailbox memory\n"); return 0; } @@ -19028,14 +20182,14 @@ lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) */ if (mb->un.varDmp.word_cnt == 0) break; - if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) - mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; + i = mb->un.varDmp.word_cnt * sizeof(uint32_t); + if (offset + i > DMP_RGN23_SIZE) + i = DMP_RGN23_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, - rgn23_data + offset, - mb->un.varDmp.word_cnt); - offset += mb->un.varDmp.word_cnt; - } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); + rgn23_data + offset, i); + offset += i; + } while (offset < DMP_RGN23_SIZE); mempool_free(pmb, phba->mbox_mem_pool); return offset; @@ -19064,7 +20218,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3105 failed to allocate mailbox memory\n"); return 0; } @@ -19128,7 +20282,7 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba) /* Check the region signature first */ if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2619 Config region 23 has bad signature\n"); goto out; } @@ -19136,7 +20290,7 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba) /* Check the data structure version */ if (rgn23_data[offset] != LPFC_REGION23_VERSION) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2620 Config region 23 has bad version\n"); goto out; } @@ -19191,6 +20345,465 @@ out: return; } +/** + * lpfc_log_fw_write_cmpl - logs firmware write completion status + * @phba: pointer to lpfc hba data structure + * @shdr_status: wr_object rsp's status field + * @shdr_add_status: wr_object rsp's add_status field + * @shdr_add_status_2: wr_object rsp's add_status_2 field + * @shdr_change_status: wr_object rsp's change_status field + * @shdr_csf: wr_object rsp's csf bit + * + * This routine is intended to be called after a firmware write completes. + * It will log next action items to be performed by the user to instantiate + * the newly downloaded firmware or reason for incompatibility. + **/ +static void +lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, + u32 shdr_add_status, u32 shdr_add_status_2, + u32 shdr_change_status, u32 shdr_csf) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "4198 %s: flash_id x%02x, asic_rev x%02x, " + "status x%02x, add_status x%02x, add_status_2 x%02x, " + "change_status x%02x, csf %01x\n", __func__, + phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, + shdr_status, shdr_add_status, shdr_add_status_2, + shdr_change_status, shdr_csf); + + if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { + switch (shdr_add_status_2) { + case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4199 Firmware write failed: " + "image incompatible with flash x%02x\n", + phba->sli4_hba.flash_id); + break; + case LPFC_ADD_STATUS_2_INCORRECT_ASIC: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4200 Firmware write failed: " + "image incompatible with ASIC " + "architecture x%02x\n", + phba->sli4_hba.asic_rev); + break; + default: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4210 Firmware write failed: " + "add_status_2 x%02x\n", + shdr_add_status_2); + break; + } + } else if (!shdr_status && !shdr_add_status) { + if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || + shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { + if (shdr_csf) + shdr_change_status = + LPFC_CHANGE_STATUS_PCI_RESET; + } + + switch (shdr_change_status) { + case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3198 Firmware write complete: System " + "reboot required to instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_FW_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3199 Firmware write complete: Firmware" + " reset required to instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PORT_MIGRATION): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3200 Firmware write complete: Port " + "Migration or PCI Reset required to " + "instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PCI_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3201 Firmware write complete: PCI " + "Reset required to instantiate\n"); + break; + default: + break; + } + } +} + +#ifndef BUILD_BRCMFCOE +/** + * lpfc_del_object - delete an object maintained by the firmware + * @phba: HBA structure that indicates port to create a queue on. + * @obj_str: Object name. + * + * This routine will create a delete_object mailbox command to send to the port. + * + * Return 0 is successful. + * Return negative value for error cases. + **/ +int +lpfc_del_object(struct lpfc_hba *phba, char *obj_str) +{ + struct lpfc_mbx_del_object *del_object; + LPFC_MBOXQ_t *mbox; + int rc = 0, i = 0; + uint32_t shdr_status, shdr_add_status; + uint32_t mbox_tmo; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox || !obj_str) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_DELETE_OBJECT, + sizeof(struct lpfc_mbx_del_object) - + sizeof(struct lpfc_sli4_cfg_mhdr), + LPFC_SLI4_MBX_EMBED); + + del_object = (struct lpfc_mbx_del_object *)&mbox->u.mqe.un.del_object; + sprintf((uint8_t *)del_object->request.object_name, obj_str); + for (i = 0; i < LPFC_MBX_OBJECT_NAME_LEN_DW; i++) + del_object->request.object_name[i] = + cpu_to_le32(del_object->request.object_name[i]); + + if (!phba->sli4_hba.intr_enable) { + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + } else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *)&del_object->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_AUTH, + "3034 Delete Object mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_auth_issue_rd_object - read an object from the firmware + * @phba: HBA structure that indicates port to create a queue on. + * @dmabuf_list: list of dmabufs for object to be read from the port. + * @size: the total byte value of the object to be read. + * @offset: the current offset to be used to start the transfer. + * @obj_str: pointer to the object name. + * + * This routine will create a rd_object mailbox command to send to the port. + * the mailbox command will be constructed using the dma buffers described in + * @dmabuf_list to create a list of BDEs. This routine will fill in as many + * BDEs that the non-embedded mailbox can support. The @offset variable will be + * used to indicate the starting offset of the transfer and will also return + * the offset after the read object mailbox has completed. The size reported + * in the response and the eof bit can used to determine the end of the object. + * + * Return 0 is successful and offset will contain the the new offset to use + * for the next read. + * Return negative value for error cases. + **/ +int +lpfc_auth_issue_rd_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, + uint32_t size, uint32_t *offset, char *obj_str) +{ + struct lpfc_dmabuf *dmabuf; + struct lpfc_mbx_auth_rd_object *rd_object; + LPFC_MBOXQ_t *mbox; + uint32_t alloclen, reqlen; + uint32_t xfersize = 0; + int rc = 0, i = 0, j; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + reqlen = sizeof(struct lpfc_mbx_auth_rd_object); + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_READ_OBJECT, reqlen, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_AUTH, + "3035 Allocated DMA memory size (%d) is " + "less than requested (%d)\n", + alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + rd_object = (struct lpfc_mbx_auth_rd_object *)mbox->sge_array->addr[0]; + rd_object->u.request.read_offset = *offset; + sprintf((uint8_t *)rd_object->u.request.object_name, obj_str); + for (j = 0; j < LPFC_MBX_OBJECT_NAME_LEN_DW; j++) + rd_object->u.request.object_name[j] = + cpu_to_le32(rd_object->u.request.object_name[j]); + + list_for_each_entry(dmabuf, dmabuf_list, list) { + if (i >= LPFC_MBX_AUTH_RD_CONFIG_MAX_BDE || xfersize >= size) + break; + rd_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); + rd_object->u.request.bde[i].addrHigh = + putPaddrHigh(dmabuf->phys); + if (xfersize + SLI4_PAGE_SIZE >= size) { + rd_object->u.request.bde[i].tus.f.bdeSize = + (size - xfersize); + xfersize += (size - xfersize); + } else { + rd_object->u.request.bde[i].tus.f.bdeSize = + SLI4_PAGE_SIZE; + xfersize += SLI4_PAGE_SIZE; + } + i++; + } + rd_object->u.request.bde_count = i; + bf_set(lpfc_rd_object_read_length, &rd_object->u.request, size); + + mbox->mbox_cmpl = lpfc_auth_cmpl_rd_object; + mbox->vport = phba->pport; + mbox->ctx_buf = dmabuf_list; + mbox->ctx_ndlp = NULL; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = -ENXIO; + goto fail; + } + return rc; + +fail: + if (mbox) + lpfc_sli4_mbox_cmd_free(phba, mbox); + return rc; +} + +/** + * lpfc_auth_rd_object - read an object from the firmware + * @phba: HBA structure that indicates port to create a queue on. + * @dmabuf_list: list of dmabufs to write to the port. + * @size: the total byte value of the objects to write to the port. + * @offset: the current offset to be used to start the transfer. + * @obj_str: Object name. + * + * This routine will create a rd_object mailbox command to send to the port. + * the mailbox command will be constructed using the dma buffers described in + * @dmabuf_list to create a list of BDEs. This routine will fill in as many + * BDEs that the embedded mailbox can support. The @offset variable will be + * used to indicate the starting offset of the transfer and will also return + * the offset after the read object mailbox has completed. + * + * Return 0 is successful and offset will contain the the new offset to use + * for the next read. + * Return negative value for error cases. + **/ +int +lpfc_auth_rd_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, + uint32_t size, uint32_t *offset, char *obj_str) +{ + struct lpfc_mbx_auth_rd_object *rd_object; + LPFC_MBOXQ_t *mbox; + uint32_t shdr_status, shdr_add_status; + uint32_t mbox_tmo; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_dmabuf *dmabuf; + uint32_t alloclen, reqlen; + uint32_t xfersize = 0; + int rc = 0, i = 0, j; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + reqlen = sizeof(struct lpfc_mbx_auth_rd_object); + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_READ_OBJECT, reqlen, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_AUTH, + "3036 Allocated DMA memory size (%d) is " + "less than requested (%d)\n", + alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + rd_object = (struct lpfc_mbx_auth_rd_object *)mbox->sge_array->addr[0]; + rd_object->u.request.read_offset = *offset; + sprintf((uint8_t *)rd_object->u.request.object_name, obj_str); + for (j = 0; j < LPFC_MBX_OBJECT_NAME_LEN_DW; j++) + rd_object->u.request.object_name[j] = + cpu_to_le32(rd_object->u.request.object_name[j]); + + list_for_each_entry(dmabuf, dmabuf_list, list) { + if (i >= LPFC_MBX_AUTH_RD_CONFIG_MAX_BDE || xfersize >= size) + break; + rd_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); + rd_object->u.request.bde[i].addrHigh = + putPaddrHigh(dmabuf->phys); + if (xfersize + SLI4_PAGE_SIZE >= size) { + rd_object->u.request.bde[i].tus.f.bdeSize = + (size - xfersize); + xfersize += (size - xfersize); + } else { + rd_object->u.request.bde[i].tus.f.bdeSize = + SLI4_PAGE_SIZE; + xfersize += SLI4_PAGE_SIZE; + } + i++; + } + rd_object->u.request.bde_count = i; + bf_set(lpfc_rd_object_read_length, &rd_object->u.request, xfersize); + if (!phba->sli4_hba.intr_enable) { + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + } else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = &rd_object->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + *offset += rd_object->u.response.actual_read_length; + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_AUTH, + "3037 Read Object mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + *offset = shdr_add_status; + } + return rc; +} + +/** + * lpfc_auth_wr_object - write an object to the firmware + * @phba: HBA structure that indicates port to create a queue on. + * @dmabuf_list: list of dmabufs to write to the port. + * @size: the total byte value of the objects to write to the port. + * @offset: the current offset to be used to start the transfer. + * @obj_str: Object group/name. + * + * This routine will create a wr_object mailbox command to send to the port. + * the mailbox command will be constructed using the dma buffers described in + * @dmabuf_list to create a list of BDEs. This routine will fill in as many + * BDEs that the non-embedded mailbox can support. The @offset variable will be + * used to indicate the starting offset of the transfer and will also return + * the offset after the write object mailbox has completed. @size is used to + * determine the end of the object and whether the eof bit should be set. + * + * Return 0 is successful and offset will contain the the new offset to use + * for the next write. + * Return negative value for error cases. + **/ +int +lpfc_auth_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, + uint32_t size, uint32_t *offset, char *obj_str) +{ + struct lpfc_mbx_auth_wr_object *wr_object; + LPFC_MBOXQ_t *mbox; + int rc = 0, i = 0, j; + uint32_t shdr_status, shdr_add_status, shdr_add_status_2; + uint32_t shdr_change_status = 0, shdr_csf = 0; + uint32_t mbox_tmo; + struct lpfc_dmabuf *dmabuf; + uint32_t written = 0; + uint32_t alloclen, reqlen; + bool check_change_status = false; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + reqlen = sizeof(struct lpfc_mbx_auth_wr_object); + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_WRITE_OBJECT, reqlen, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI | LOG_AUTH, + "3038 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + wr_object = (struct lpfc_mbx_auth_wr_object *)mbox->sge_array->addr[0]; + wr_object->u.request.write_offset = *offset; + sprintf((uint8_t *)wr_object->u.request.object_name, obj_str); + for (j = 0; j < LPFC_MBX_OBJECT_NAME_LEN_DW; j++) + wr_object->u.request.object_name[j] = + cpu_to_le32(wr_object->u.request.object_name[j]); + bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); + list_for_each_entry(dmabuf, dmabuf_list, list) { + if (i >= LPFC_MBX_AUTH_WR_CONFIG_MAX_BDE || written >= size) + break; + wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); + wr_object->u.request.bde[i].addrHigh = + putPaddrHigh(dmabuf->phys); + if (written + SLI4_PAGE_SIZE >= size) { + wr_object->u.request.bde[i].tus.f.bdeSize = + (size - written); + written += (size - written); + bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); + bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); + check_change_status = true; + } else { + wr_object->u.request.bde[i].tus.f.bdeSize = + SLI4_PAGE_SIZE; + written += SLI4_PAGE_SIZE; + } + i++; + } + wr_object->u.request.bde_count = i; + bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); + + if (!phba->sli4_hba.intr_enable) { + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + } else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, + &wr_object->cfg_shdr.response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &wr_object->cfg_shdr.response); + shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, + &wr_object->cfg_shdr.response); + if (check_change_status) { + shdr_change_status = bf_get(lpfc_wr_object_change_status, + &wr_object->u.response); + shdr_csf = bf_get(lpfc_wr_object_csf, + &wr_object->u.response); + } + *offset += wr_object->u.response.actual_write_length; + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_AUTH, + "3039 Write Object mailbox failed with " + "status x%x add_status x%x, add_status_2 x%x, " + "mbx status x%x\n", + shdr_status, shdr_add_status, shdr_add_status_2, + rc); + rc = -ENXIO; + *offset = shdr_add_status; + } + + if (rc || check_change_status) + lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, + shdr_add_status_2, shdr_change_status, + shdr_csf); + return rc; +} +#endif + /** * lpfc_wr_object - write an object to the firmware * @phba: HBA structure that indicates port to create a queue on. @@ -19217,7 +20830,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, struct lpfc_mbx_wr_object *wr_object; LPFC_MBOXQ_t *mbox; int rc = 0, i = 0; - uint32_t shdr_status, shdr_add_status, shdr_change_status; + uint32_t shdr_status, shdr_add_status, shdr_add_status_2; + uint32_t shdr_change_status = 0, shdr_csf = 0; uint32_t mbox_tmo; struct lpfc_dmabuf *dmabuf; uint32_t written = 0; @@ -19271,46 +20885,34 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, &wr_object->header.cfg_shdr.response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &wr_object->header.cfg_shdr.response); + shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, + &wr_object->header.cfg_shdr.response); if (check_change_status) { shdr_change_status = bf_get(lpfc_wr_object_change_status, &wr_object->u.response); - switch (shdr_change_status) { - case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3198 Firmware write complete: System " - "reboot required to instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_FW_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3199 Firmware write complete: Firmware" - " reset required to instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_PORT_MIGRATION): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3200 Firmware write complete: Port " - "Migration or PCI Reset required to " - "instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_PCI_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3201 Firmware write complete: PCI " - "Reset required to instantiate\n"); - break; - default: - break; - } + shdr_csf = bf_get(lpfc_wr_object_csf, + &wr_object->u.response); } + if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3025 Write Object mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); + "status x%x add_status x%x, add_status_2 x%x, " + "mbx status x%x\n", + shdr_status, shdr_add_status, shdr_add_status_2, + rc); rc = -ENXIO; *offset = shdr_add_status; - } else + } else { *offset += wr_object->u.response.actual_write_length; + } + + if (rc || check_change_status) + lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, + shdr_add_status_2, shdr_change_status, + shdr_csf); return rc; } @@ -19483,7 +21085,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) piocbq = lpfc_sli_ringtx_get(phba, pring); if (!piocbq) { spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2823 txq empty and txq_cnt is %d\n ", txq_cnt); break; @@ -19512,7 +21114,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) if (fail_msg) { /* Failed means we can't issue and need to cancel */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2822 IOCB failed %s iotag 0x%x " "xri 0x%x\n", fail_msg, @@ -19698,11 +21300,14 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); return 0; } /* NVME_FCREQ and NVME_ABTS requests */ - if (pwqe->iocb_flag & LPFC_IO_NVME) { + if (pwqe->iocb_flag & LPFC_IO_NVME || + pwqe->iocb_flag & LPFC_IO_CMF) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ wq = qp->io_wq; pring = wq->pring; @@ -19718,6 +21323,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); return 0; } @@ -19746,6 +21353,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); return 0; } return WQE_ERROR; @@ -20108,6 +21717,10 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; + /* First Burst initial and total transfer length must be zeroed. */ + lpfc_ncmd->cur_iocbq.wqe.words[4] = 0; + lpfc_ncmd->cur_iocbq.wqe.words[5] = 0; + if (phba->cfg_xpsgl && !phba->nvmet_support && !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); @@ -20176,8 +21789,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, list_add_tail(&lpfc_ncmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; - spin_unlock_irqrestore(&qp->io_buf_list_put_lock, - iflag); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } } @@ -20394,6 +22006,227 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, return lpfc_cmd; } +/** + * lpfc_read_object - Retrieve object data from HBA + * @phba: The HBA for which this call is being executed. + * @rdobject: Pathname of object data we want to read. + * @datap: Pointer to where data will be copied to. + * @datasz: size of data area + * + * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. + * The data will be truncated if datasz is not large enough. + * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. + * Returns the actual bytes read from the object. + */ +int +lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap, + uint32_t datasz) +{ + struct lpfc_mbx_read_object *read_object; + LPFC_MBOXQ_t *mbox; + int rc, length, eof, j, byte_cnt = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_dmabuf *pcmd; + + /* sanity check on queue memory */ + if (!datap) + return -ENODEV; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_read_object) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_READ_OBJECT, + length, LPFC_SLI4_MBX_EMBED); + read_object = &mbox->u.mqe.un.read_object; + shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr; + + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); + bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz); + read_object->u.request.rd_object_offset = 0; + read_object->u.request.rd_object_cnt = 1; + + memset((void *)read_object->u.request.rd_object_name, 0, + LPFC_OBJ_NAME_SZ); + sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject); + for (j = 0; j < strlen(rdobject); j++) + read_object->u.request.rd_object_name[j] = + cpu_to_le32(read_object->u.request.rd_object_name[j]); + + pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); + if (pcmd) + pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); + if (!pcmd || !pcmd->virt) { + kfree(pcmd); + mempool_free(mbox, phba->mbox_mem_pool); + return -ENOMEM; + } + memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); + read_object->u.request.rd_object_hbuf[0].pa_lo = + putPaddrLow(pcmd->phys); + read_object->u.request.rd_object_hbuf[0].pa_hi = + putPaddrHigh(pcmd->phys); + read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE; + + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_buf = NULL; + mbox->ctx_ndlp = NULL; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if (shdr_status == STATUS_FAILED && + shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "4674 No port cfg file in FW.\n"); + byte_cnt = -ENOENT; + } else if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, + "2625 READ_OBJECT mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + byte_cnt = -ENXIO; + } else { + /* Success */ + length = read_object->u.response.rd_object_actual_rlen; + eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response); + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, + "2626 READ_OBJECT Success len %d:%d, EOF %d\n", + length, datasz, eof); + + /* Detect the port config file exists but is empty */ + if (!length && eof) { + byte_cnt = 0; + goto exit; + } + + byte_cnt = length; + lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt); + } + + exit: + lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); + kfree(pcmd); + mempool_free(mbox, phba->mbox_mem_pool); + return byte_cnt; +} + +/** + * lpfc_write_object - Retrieve object data from HBA + * @phba: The HBA for which this call is being executed. + * @rdobject: Pathname of object data we want to write. + * @datap: Pointer to where data will be copied from. + * @datasz: size of data area + * + * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. + * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. + * Returns the actual bytes written to the object. + */ +int +lpfc_write_object(struct lpfc_hba *phba, char *wrobject, uint32_t *datap, + uint32_t datasz) +{ + struct lpfc_mbx_write_object *write_object; + LPFC_MBOXQ_t *mbox; + int rc, length, j, byte_cnt = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_dmabuf *pcmd; + + /* sanity check on queue memory */ + if (!datap) + return -ENODEV; + + if (datasz > LPFC_BPL_SIZE) + return -ENOSPC; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_write_object) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_WRITE_OBJECT, + length, LPFC_SLI4_MBX_EMBED); + + write_object = &mbox->u.mqe.un.write_object; + shdr = (union lpfc_sli4_cfg_shdr *)&write_object->header.cfg_shdr; + + /* Complete initializing the write object. */ + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); + bf_set(lpfc_mbx_wr_object_rlen, &write_object->u.request, datasz); + write_object->u.request.wr_object_offset = 0; + write_object->u.request.wr_object_cnt = 1; + + /* Do a byte-by-byte copy to prevent garbage data post NULL term. */ + sprintf((uint8_t *)write_object->u.request.wr_object_name, wrobject); + for (j = 0; j < strlen(wrobject); j++) + write_object->u.request.wr_object_name[j] = + cpu_to_le32(write_object->u.request.wr_object_name[j]); + + pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); + if (pcmd) + pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); + if (!pcmd || !pcmd->virt) { + kfree(pcmd); + mempool_free(mbox, phba->mbox_mem_pool); + return -ENOMEM; + } + + /* Clear the DMA data buffer to prevent unintended writes. Then copy + * the callers data to complete the write. + */ + memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); + write_object->u.request.wr_object_hbuf[0].pa_lo = + putPaddrLow(pcmd->phys); + write_object->u.request.wr_object_hbuf[0].pa_hi = + putPaddrHigh(pcmd->phys); + write_object->u.request.wr_object_hbuf[0].length = LPFC_BPL_SIZE; + lpfc_sli_pcimem_bcopy(datap, pcmd->virt, datasz); + + /* Caller's data is ready for FW. Terminate the data. */ + bf_set(lpfc_mbx_wr_object_eof, &write_object->u.request, 1); + + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->ctx_buf = NULL; + mbox->ctx_ndlp = NULL; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status == STATUS_FAILED && + shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, + "4675 No port cfg file in FW.\n"); + byte_cnt = -ENOENT; + } else if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2627 WRITE_OBJECT mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + byte_cnt = -ENXIO; + } else { + /* Success */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, + "2628 WRITE_OBJECT Success len %d: reset %d\n", + write_object->u.response.wr_object_actual_rlen, + bf_get(lpfc_mbx_wr_object_reset, + &write_object->u.response)); + byte_cnt = write_object->u.response.wr_object_actual_rlen; + } + + lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); + kfree(pcmd); + mempool_free(mbox, phba->mbox_mem_pool); + return byte_cnt; +} + /** * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool * @phba: The HBA for which this call is being executed. diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7bcf922a8be2..eb9740449391 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -95,11 +95,15 @@ struct lpfc_iocbq { #define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ #define LPFC_IO_FOF 0x20000 /* FOF FCP IO */ #define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */ +#define LPFC_IO_DRVR_INIT 0x80000000 /* Driver initiated SCSI cmd */ #define LPFC_PRLI_NVME_REQ 0x80000 /* This is an NVME PRLI. */ #define LPFC_PRLI_FCP_REQ 0x100000 /* This is an NVME PRLI. */ #define LPFC_IO_NVME 0x200000 /* NVME FCP command */ #define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */ #define LPFC_IO_NVMET 0x800000 /* NVMET command */ +#define LPFC_IO_RAW 0x1000000 /* Raw ELS command */ +#define LPFC_IO_CMF 0x2000000 /* CMF command */ +#define LPFC_IO_DRVR_INIT 0x80000000 uint32_t drvrTimeout; /* driver timeout in seconds */ struct lpfc_vport *vport;/* virtual port pointer */ @@ -130,6 +134,7 @@ struct lpfc_iocbq { #define IOCB_BUSY 1 #define IOCB_ERROR 2 #define IOCB_TIMEDOUT 3 +#define IOCB_ABORTED 4 #define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */ @@ -348,7 +353,7 @@ struct lpfc_sli { struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ size_t iocbq_lookup_len; /* current lengs of the array */ uint16_t last_iotag; /* last allocated IOTAG */ - time64_t stats_start; /* in seconds */ + unsigned long stats_start; /* in seconds */ struct lpfc_lnk_stat lnk_stat_offsets; }; @@ -411,6 +416,10 @@ struct lpfc_io_buf { * protection data */ + void *edifp; /* External DIF information + * for IO + */ + /* * data and dma_handle are the kernel virtual and bus * address of the dma-able buffer containing the @@ -439,6 +448,7 @@ struct lpfc_io_buf { struct { struct nvmefc_fcp_req *nvmeCmd; uint16_t qidx; + }; }; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -446,6 +456,7 @@ struct lpfc_io_buf { uint64_t ts_last_cmd; uint64_t ts_cmd_wqput; uint64_t ts_isr_cmpl; - uint64_t ts_data_nvme; + uint64_t ts_data_io; #endif + uint64_t rx_cmd_start; }; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 0d4882a9e634..8709547880cd 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,6 +20,9 @@ * included with this package. * *******************************************************************/ +#include <linux/irq_poll.h> +#include <linux/cpufreq.h> + #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) #define CONFIG_SCSI_LPFC_DEBUG_FS #endif @@ -41,10 +44,16 @@ /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ #define LPFC_HBA_HDWQ_MIN 0 -#define LPFC_HBA_HDWQ_MAX 128 -#define LPFC_HBA_HDWQ_DEF 0 +#define LPFC_HBA_HDWQ_MAX 256 +#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN + +/* irq_chann range, values */ +#define LPFC_IRQ_CHANN_MIN 0 +#define LPFC_IRQ_CHANN_MAX 256 +#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN /* FCP MQ queue count limiting */ +#define LPFC_FCP_SQ_THRESHOLD 4 #define LPFC_FCP_MQ_THRESHOLD_MIN 0 #define LPFC_FCP_MQ_THRESHOLD_MAX 256 #define LPFC_FCP_MQ_THRESHOLD_DEF 8 @@ -130,9 +139,36 @@ struct lpfc_rqb { struct rqb_dmabuf *); }; +enum lpfc_poll_mode { + LPFC_QUEUE_WORK, + LPFC_IRQ_POLL +}; + +struct lpfc_idle_stat { + u64 prev_idle; + u64 prev_wall; +}; + struct lpfc_queue { struct list_head list; struct list_head wq_list; + + /* + * If interrupts are in effect on _all_ the eq's the footprint + * of polling code is zero (except mode). This memory is chec- + * ked for every io to see if the io needs to be polled and + * while completion to check if the eq's needs to be rearmed. + * Keep in same cacheline as the queue ptr to avoid cpu fetch + * stalls. Using 1B memory will leave us with 7B hole. Fill + * it with other frequently used members. + */ + uint16_t last_cpu; /* most recent cpu */ + uint16_t hdwq; + uint8_t qe_valid; + uint8_t mode; /* interrupt or polling */ +#define LPFC_EQ_INTERRUPT 0 +#define LPFC_EQ_POLL 1 + struct list_head wqfull_list; enum lpfc_sli4_queue_type type; enum lpfc_sli4_queue_subtype subtype; @@ -199,11 +235,14 @@ struct lpfc_queue { uint8_t q_flag; #define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */ #define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */ +#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */ #define LPFC_NVMET_CQ_NOTIFY 4 void __iomem *db_regaddr; +#ifndef BUILD_BRCMFCOE uint16_t dpp_enable; uint16_t dpp_id; void __iomem *dpp_regaddr; +#endif /* For q stats */ uint32_t q_cnt_1; @@ -239,11 +278,13 @@ struct lpfc_queue { struct delayed_work sched_spwork; uint64_t isr_timestamp; - uint16_t hdwq; - uint16_t last_cpu; /* most recent cpu */ - uint8_t qe_valid; struct lpfc_queue *assoc_qp; + struct list_head _poll_list; void **q_pgs; /* array to index entries per page */ + +#define LPFC_IRQ_POLL_WEIGHT 256 + struct irq_poll iop; + enum lpfc_poll_mode poll_mode; }; struct lpfc_sli4_link { @@ -451,11 +492,17 @@ struct lpfc_hba; #define LPFC_SLI4_HANDLER_NAME_SZ 16 struct lpfc_hba_eq_hdl { uint32_t idx; + uint16_t irq; char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; struct lpfc_hba *phba; struct lpfc_queue *eq; + struct cpumask aff_mask; }; +#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx]) +#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask) +#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq) + /*BB Credit recovery value*/ struct lpfc_bbscn_params { uint32_t word0; @@ -505,6 +552,8 @@ struct lpfc_pc_sli4_params { uint32_t hdr_pp_align; uint32_t sgl_pages_max; uint32_t sgl_pp_align; + uint16_t E2E_attr; + uint8_t cmf; uint8_t cqv; uint8_t mqv; uint8_t wqv; @@ -513,6 +562,7 @@ struct lpfc_pc_sli4_params { uint8_t cqav; uint8_t wqsize; uint8_t bv1s; + uint8_t pls; #define LPFC_WQ_SZ64_SUPPORT 1 #define LPFC_WQ_SZ128_SUPPORT 2 uint8_t wqpcnt; @@ -544,11 +594,10 @@ struct lpfc_sli4_lnk_info { #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \ LPFC_FOF_IO_CHAN_NUM) -/* Used for IRQ vector to CPU mapping */ +/* Used for tracking CPU mapping attributes */ struct lpfc_vector_map_info { uint16_t phys_id; uint16_t core_id; - uint16_t irq; uint16_t eq; uint16_t hdwq; uint16_t flag; @@ -670,13 +719,6 @@ struct lpfc_sli4_hdw_queue { struct lpfc_lock_stat lock_conflict; #endif -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS -#define LPFC_CHECK_CPU_CNT 128 - uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; -#endif - /* Per HDWQ pool resources */ struct list_head sgl_list; struct list_head cmd_rsp_buf_list; @@ -713,19 +755,37 @@ struct lpfc_sli4_hdw_queue { #define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock) #endif +/* This cpumask_last macro define is for internal repos only. + * (RHEL 7.*, SLES15, SLES12SP4) OSes do not have cpumask_last + * routine defined in the kernel's cpumask.h + */ +#define cpumask_last(maskp) \ + find_last_bit(cpumask_bits(maskp), nr_cpumask_bits) + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +struct lpfc_hdwq_stat { + u32 hdwq_no; + u32 rcv_io; + u32 xmt_io; + u32 cmpl_io; +}; +#endif + struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for - * config space registers + * PCI BAR0, config space registers */ void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for - * control registers + * PCI BAR1, control registers */ void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for - * doorbell registers + * PCI BAR2, doorbell registers */ +#ifndef BUILD_BRCMFCOE void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for * dpp registers */ +#endif union { struct { /* IF Type 0, BAR 0 PCI cfg space reg mem map */ @@ -874,8 +934,9 @@ struct lpfc_sli4_hba { struct list_head sp_queue_event; struct list_head sp_cqe_event_pool; struct list_head sp_asynce_work_queue; - struct list_head sp_fcp_xri_aborted_work_queue; + spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */ struct list_head sp_els_xri_aborted_work_queue; + spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */ struct list_head sp_unsol_work_queue; struct lpfc_sli4_link link_state; struct lpfc_sli4_lnk_info lnk_info; @@ -891,8 +952,15 @@ struct lpfc_sli4_hba { struct lpfc_vector_map_info *cpu_map; uint16_t num_possible_cpu; uint16_t num_present_cpu; + struct cpumask irq_aff_mask; uint16_t curr_disp_cpu; struct lpfc_eq_intr_info __percpu *eq_info; + + uint16_t fawwpn_in_use; /* captured FA-WWPN support state */ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + struct lpfc_hdwq_stat __percpu *c_stat; +#endif + struct lpfc_idle_stat *idle_stat; uint32_t conf_trunk; #define lpfc_conf_trunk_port0_WORD conf_trunk #define lpfc_conf_trunk_port0_SHIFT 0 @@ -918,6 +986,8 @@ struct lpfc_sli4_hba { #define lpfc_conf_trunk_port3_nd_WORD conf_trunk #define lpfc_conf_trunk_port3_nd_SHIFT 7 #define lpfc_conf_trunk_port3_nd_MASK 0x1 + uint8_t flash_id; + uint8_t asic_rev; }; enum lpfc_sge_type { @@ -1023,6 +1093,7 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, struct lpfc_queue **drqp, struct lpfc_queue **cqp, uint32_t subtype); +void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); @@ -1052,8 +1123,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *); void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *); int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); -void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); -void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba); void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri, struct lpfc_io_buf *lpfc_ncmd); @@ -1094,16 +1164,17 @@ uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *); uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba); struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, - struct lpfc_io_buf *buf); + struct lpfc_io_buf *lpfc_buf); struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, - struct lpfc_io_buf *buf); -int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf); + struct lpfc_io_buf + *lpfc_buf); +int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf); int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, - struct lpfc_io_buf *buf); + struct lpfc_io_buf *lpfc_buf); void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, - struct lpfc_sli4_hdw_queue *hdwq); + struct lpfc_sli4_hdw_queue *lpfc_buf); void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, - struct lpfc_sli4_hdw_queue *hdwq); + struct lpfc_sli4_hdw_queue *lpfc_buf); static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx) { return q->q_pgs[idx / q->entry_cnt_per_pg] + diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index b8aae31ffda3..dc9ef3e52a2f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,8 +20,12 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.4.0.0" +#define LPFC_DRIVER_VERSION "12.8.542.34" + +#ifndef BUILD_BRCMFCOE + #define LPFC_DRIVER_NAME "lpfc" +#define LPFCMGMT_NAME "lpfcmgmt" /* Used for SLI 2/3 */ #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" @@ -32,6 +36,25 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright (C) 2017-2019 Broadcom. All Rights " \ +#define LPFC_COPYRIGHT "Copyright (C) 2017-2021 Broadcom. All Rights " \ "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ "and/or its subsidiaries." + +#else + +#define LPFC_DRIVER_NAME "brcmfcoe" +#define LPFCMGMT_NAME "brcmfcoemgmt" + +/* Used for SLI 2/3 */ +#define LPFC_SP_DRIVER_HANDLER_NAME "brcmfcoe:sp" +#define LPFC_FP_DRIVER_HANDLER_NAME "brcmfcoe:fp" + +/* Used for SLI4 */ +#define LPFC_DRIVER_HANDLER_NAME "brcmfcoe:" + +#define LPFC_MODULE_DESC "Emulex FCoE SCSI driver " \ + LPFC_DRIVER_VERSION +#define LPFC_COPYRIGHT "Copyright (C) 2017 Broadcom. All Rights Reserved. " \ + "The term \"Broadcom\" refers to Broadcom Limited " \ + "and/or its subsidiaries." +#endif diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index b76646357980..0df36272298e 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -145,7 +145,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (signal_pending(current)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1830 Signal aborted mbxCmd x%x\n", mb->mbxCommand); lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -154,7 +154,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) mempool_free(pmb, phba->mbox_mem_pool); return -EINTR; } else { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1818 VPort failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x, rc = x%x\n", mb->mbxCommand, mb->mbxStatus, rc); @@ -190,7 +190,7 @@ lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn, ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0))) return 1; - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1822 Invalid %s: %02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x\n", name_type, @@ -284,11 +284,11 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) } if (time_after(jiffies, wait_time_max)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, - "1835 Vport discovery quiesce failed:" - " state x%x fc_flags x%x wait msecs x%x\n", - vport->port_state, vport->fc_flag, - jiffies_to_msecs(jiffies - start_time)); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "1835 Vport discovery quiesce failed:" + " state x%x fc_flags x%x wait msecs x%x\n", + vport->port_state, vport->fc_flag, + jiffies_to_msecs(jiffies - start_time)); } int @@ -305,7 +305,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) int status; if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1808 Create VPORT failed: " "NPIV is not enabled: SLImode:%d\n", phba->sli_rev); @@ -315,7 +315,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) /* NPIV is not supported if HBA has NVME Target enabled */ if (phba->nvmet_support) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3189 Create VPORT failed: " "NPIV is not supported on NVME Target\n"); rc = VPORT_INVAL; @@ -324,7 +324,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) vpi = lpfc_alloc_vpi(phba); if (vpi == 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1809 Create VPORT failed: " "Max VPORTs (%d) exceeded\n", phba->max_vpi); @@ -334,7 +334,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1810 Create VPORT failed: Cannot get " "instance number\n"); lpfc_free_vpi(phba, vpi); @@ -344,7 +344,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) vport = lpfc_create_port(phba, instance, &fc_vport->dev); if (!vport) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1811 Create VPORT failed: vpi x%x\n", vpi); lpfc_free_vpi(phba, vpi); rc = VPORT_NORESOURCES; @@ -356,11 +356,11 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) if ((status = lpfc_vport_sparm(phba, vport))) { if (status == -EINTR) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1831 Create VPORT Interrupted.\n"); rc = VPORT_ERROR; } else { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1813 Create VPORT failed. " "Cannot get sparam\n"); rc = VPORT_NORESOURCES; @@ -378,7 +378,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1821 Create VPORT failed. " "Invalid WWN format\n"); lpfc_free_vpi(phba, vpi); @@ -388,7 +388,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) } if (!lpfc_unique_wwpn(phba, vport)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1823 Create VPORT failed. " "Duplicate WWN on HBA\n"); lpfc_free_vpi(phba, vpi); @@ -426,7 +426,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) (pport->fc_flag & FC_VFI_REGISTERED)) { rc = lpfc_sli4_init_vpi(vport); if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "1838 Failed to INIT_VPI on vpi %d " "status %d\n", vpi, rc); rc = VPORT_NORESOURCES; @@ -469,7 +469,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) lpfc_initial_fdisc(vport); } else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0262 No NPIV Fabric support\n"); } } else { @@ -479,7 +479,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) out: lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, - "1825 Vport Created.\n"); + "1825 Vport Created.\n"); lpfc_host_attrib_init(lpfc_shost_from_vport(vport)); error_out: return rc; @@ -534,7 +534,7 @@ disable_vport(struct fc_vport *fc_vport) } lpfc_vport_set_state(vport, FC_VPORT_DISABLED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1826 Vport Disabled.\n"); return VPORT_OK; } @@ -575,7 +575,7 @@ enable_vport(struct fc_vport *fc_vport) lpfc_initial_fdisc(vport); } else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0264 No NPIV Fabric support\n"); } } else { @@ -583,7 +583,7 @@ enable_vport(struct fc_vport *fc_vport) } out: - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1827 Vport Enabled.\n"); return VPORT_OK; } @@ -609,7 +609,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) bool ns_ndlp_referenced = false; if (vport->port_type == LPFC_PHYSICAL_PORT) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1812 vport_delete failed: Cannot delete " "physical host\n"); return VPORT_ERROR; @@ -618,7 +618,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) /* If the vport is a static vport fail the deletion. */ if ((vport->vport_flag & STATIC_VPORT) && !(phba->pport->load_flag & FC_UNLOADING)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1837 vport_delete failed: Cannot delete " "static vport.\n"); return VPORT_ERROR; @@ -642,27 +642,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) vport->port_state < LPFC_VPORT_READY) return -EAGAIN; } + /* - * This is a bit of a mess. We want to ensure the shost doesn't get - * torn down until we're done with the embedded lpfc_vport structure. - * - * Beyond holding a reference for this function, we also need a - * reference for outstanding I/O requests we schedule during delete - * processing. But once we scsi_remove_host() we can no longer obtain - * a reference through scsi_host_get(). - * - * So we take two references here. We release one reference at the - * bottom of the function -- after delinking the vport. And we - * release the other at the completion of the unreg_vpi that get's - * initiated after we've disposed of all other resources associated - * with the port. + * Take early refcount for outstanding I/O requests we schedule during + * delete processing for unreg_vpi. Always keep this before + * scsi_remove_host() as we can no longer obtain a reference through + * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. */ if (!scsi_host_get(shost)) return VPORT_INVAL; - if (!scsi_host_get(shost)) { - scsi_host_put(shost); - return VPORT_INVAL; - } + lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); @@ -809,15 +798,16 @@ skip_logo: if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || lpfc_mbx_unreg_vpi(vport)) scsi_host_put(shost); - } else + } else { scsi_host_put(shost); + } lpfc_free_vpi(phba, vport->vpi); vport->work_port_events = 0; spin_lock_irq(&phba->port_list_lock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->port_list_lock); - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "1828 Vport Deleted.\n"); scsi_host_put(shost); return VPORT_OK; @@ -829,7 +819,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba) struct lpfc_vport *port_iterator; struct lpfc_vport **vports; int index = 0; - vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *), + vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *), GFP_KERNEL); if (vports == NULL) return NULL; @@ -838,7 +828,8 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba) if (port_iterator->load_flag & FC_UNLOADING) continue; if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { - lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(port_iterator, KERN_ERR, + LOG_TRACE_EVENT, "1801 Create vport work array FAILED: " "cannot do scsi_host_get\n"); continue; @@ -908,7 +899,8 @@ lpfc_alloc_bucket(struct lpfc_vport *vport) GFP_ATOMIC); if (!ndlp->lat_data) - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + lpfc_printf_vlog(vport, KERN_ERR, + LOG_TRACE_EVENT, "0287 lpfc_alloc_bucket failed to " "allocate statistical data buffer DID " "0x%x\n", ndlp->nlp_DID); diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index f4b8528dd2e7..9de5bd97bc52 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2006 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0cbe6740e0c9..b9c1f722f1de 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5586,9 +5586,13 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) &instance->irq_context[i])) { dev_err(&instance->pdev->dev, "Failed to register IRQ for vector %d.\n", i); - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { + if (j < instance->low_latency_index_start) + irq_set_affinity_hint( + pci_irq_vector(pdev, j), NULL); free_irq(pci_irq_vector(pdev, j), &instance->irq_context[j]); + } /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; instance->msix_load_balance = false; @@ -5626,6 +5630,9 @@ megasas_destroy_irqs(struct megasas_instance *instance) { if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { + if (i < instance->low_latency_index_start) + irq_set_affinity_hint( + pci_irq_vector(instance->pdev, i), NULL); free_irq(pci_irq_vector(instance->pdev, i), &instance->irq_context[i]); } @@ -8031,7 +8038,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, int error = 0, i; void *sense = NULL; dma_addr_t sense_handle; - unsigned long *sense_ptr; + void *sense_ptr; u32 opcode = 0; memset(kbuff_arr, 0, sizeof(kbuff_arr)); @@ -8153,6 +8160,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, } if (ioc->sense_len) { + /* make sure the pointer is part of the frame */ + if (ioc->sense_off > + (sizeof(union megasas_frame) - sizeof(__le64))) { + error = -EINVAL; + goto out; + } + sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, &sense_handle, GFP_KERNEL); if (!sense) { @@ -8160,12 +8174,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, goto out; } - sense_ptr = - (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); - if (instance->consistent_mask_64bit) - *sense_ptr = cpu_to_le64(sense_handle); - else - *sense_ptr = cpu_to_le32(sense_handle); + /* always store 64 bits regardless of addressing */ + sense_ptr = (void *)cmd->frame + ioc->sense_off; + put_unaligned_le64(sense_handle, sense_ptr); } /* diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 89daa9815d4e..17b4496d8780 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3738,7 +3738,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget) instance = irq_ctx->instance; if (irq_ctx->irq_line_enable) { - disable_irq(irq_ctx->os_irq); + disable_irq_nosync(irq_ctx->os_irq); irq_ctx->irq_line_enable = false; } @@ -3787,10 +3787,8 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp) if (instance->mask_interrupts) return IRQ_NONE; -#if defined(ENABLE_IRQ_POLL) if (irq_context->irq_poll_scheduled) return IRQ_HANDLED; -#endif if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance); @@ -4227,6 +4225,7 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) struct fusion_context *fusion; struct megasas_cmd *cmd_mfi; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; u16 smid; bool refire_cmd = 0; u8 result; @@ -4284,6 +4283,11 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) break; } + scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) + cmd_fusion->io_request; + if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) + result = RETURN_CMD; + switch (result) { case REFIRE_CMD: megasas_fire_cmd_fusion(instance, req_desc); @@ -4481,7 +4485,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, if (!timeleft) { dev_err(&instance->pdev->dev, "task mgmt type 0x%x timed out\n", type); - cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE; mutex_unlock(&instance->reset_mutex); rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); mutex_lock(&instance->reset_mutex); diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 74fb50644678..4dd50db90677 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1045,6 +1045,8 @@ static void handle_error(struct mesh_state *ms) while ((in_8(&mr->bus_status1) & BS1_RST) != 0) udelay(1); printk("done\n"); + if (ms->dma_started) + halt_dma(ms); handle_reset(ms); /* request_q is empty, no point in mesh_start() */ return; @@ -1357,7 +1359,8 @@ static void halt_dma(struct mesh_state *ms) ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), ms->tgts[ms->conn_tgt].data_goes_out); } - scsi_dma_unmap(cmd); + if (cmd) + scsi_dma_unmap(cmd); ms->dma_started = 0; } @@ -1712,6 +1715,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd) spin_lock_irqsave(ms->host->host_lock, flags); + if (ms->dma_started) + halt_dma(ms); + /* Reset the controller & dbdma channel */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig index 86564232ec16..de86f85040b2 100755 --- a/drivers/scsi/mpt3sas/Kconfig +++ b/drivers/scsi/mpt3sas/Kconfig @@ -47,6 +47,7 @@ config SCSI_MPT3SAS depends on PCI && SCSI select SCSI_SAS_ATTRS select RAID_ATTRS + select IRQ_POLL ---help--- This driver supports PCI-Express SAS 12Gb/s Host Adapters. diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile index 4748143b14c5..2ff040224ab5 100755 --- a/drivers/scsi/mpt3sas/Makefile +++ b/drivers/scsi/mpt3sas/Makefile @@ -12,4 +12,5 @@ mpt3sas-y += mpt3sas_base.o \ mpt3sas_transport.o \ mpt3sas_ctl.o \ mpt3sas_trigger_diag.o \ + mpt3sas_debugfs.o \ diff --git a/drivers/scsi/mpt3sas/csmi/csmisas.c b/drivers/scsi/mpt3sas/csmi/csmisas.c index bb3f8b9a7358..b2ee2c83786d 100755 --- a/drivers/scsi/mpt3sas/csmi/csmisas.c +++ b/drivers/scsi/mpt3sas/csmi/csmisas.c @@ -639,6 +639,7 @@ _csmisas_get_cntlr_status(struct MPT3SAS_ADAPTER *ioc, break; case MPI2_IOC_STATE_FAULT: + case MPI2_IOC_STATE_COREDUMP: karg->Status.uStatus = CSMI_SAS_CNTLR_STATUS_FAILED; karg->Status.uOfflineReason = 0; break; @@ -2194,7 +2195,7 @@ _csmisas_ssp_passthru(struct MPT3SAS_ADAPTER *ioc, printk(MPT3SAS_INFO_FMT "issue target reset: handle" "(0x%04x)\n", ioc->name, sas_device.handle); mpt3sas_scsih_issue_locked_tm(ioc, sas_device.handle, 0, 0, 0, - MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 0); + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid, 30, 0); if (ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) { printk(MPT3SAS_INFO_FMT "target reset completed: handle" "(0x%04x)\n", ioc->name, sas_device.handle); diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index 81eaed7c1cfb..77f72e7bdda1 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -1,1350 +1,1353 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2.h - * Title: MPI Message independent structures and definitions - * including System Interface Register Set and - * scatter/gather formats. - * Creation Date: June 21, 2006 - * - * mpi2.h Version: 02.00.55 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved ReplyPostHostIndex register to offset 0x6C of the - * MPI2_SYSTEM_INTERFACE_REGS and modified the define for - * MPI2_REPLY_POST_HOST_INDEX_OFFSET. - * Added union of request descriptors. - * Added union of reply descriptors. - * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_VERSION_02_00. - * Fixed the size of the FunctionDependent5 field in the - * MPI2_DEFAULT_REPLY structure. - * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. - * Removed the MPI-defined Fault Codes and extended the - * product specific codes up to 0xEFFF. - * Added a sixth key value for the WriteSequence register - * and changed the flush value to 0x0. - * Added message function codes for Diagnostic Buffer Post - * and Diagnsotic Release. - * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED - * Moved MPI2_VERSION_UNION from mpi2_ioc.h. - * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. - * Added #defines for marking a reply descriptor as unused. - * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved LUN field defines from mpi2_init.h. - * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. - * In all request and reply descriptors, replaced VF_ID - * field with MSIxIndex field. - * Removed DevHandle field from - * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those - * bytes reserved. - * Added RAID Accelerator functionality. - * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MSI-x index mask and shift for Reply Post Host - * Index register. - * Added function code for Host Based Discovery Action. - * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. - * Added defines for product-specific range of message - * function codes, 0xF0 to 0xFF. - * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. - * Added alternative defines for the SGE Direction bit. - * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. - * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. - * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. - * Incorporating additions for MPI v2.5. - * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. - * Added Hard Reset delay timings. - * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. - * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. - * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. - * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. - * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. - * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-18-14 02.00.36 Updated copyright information. - * Bumped MPI2_HEADER_VERSION_UNIT. - * 03-16-15 02.00.37 Updated for MPI v2.6. - * Bumped MPI2_HEADER_VERSION_UNIT. - * Added Scratchpad registers and - * AtomicRequestDescriptorPost register to - * MPI2_SYSTEM_INTERFACE_REGS. - * Added MPI2_DIAG_SBR_RELOAD. - * Added MPI2_IOCSTATUS_INSUFFICIENT_POWER. - * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT - * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. - * Added V7 HostDiagnostic register defines - * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT - * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT - * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines - * to be unique within first 32 characters. - * Removed AHCI support. - * Removed SOP support. - * Bumped MPI2_HEADER_VERSION_UNIT. - * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. - * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. - * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. - * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-22-18 02.00.51 Added SECURE_BOOT define. - * Bumped MPI2_HEADER_VERSION_UNIT - * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNITMPI2_HEADER_VERSION_UNIT. - * Added MPI2_IOCSTATUS_FAILURE - * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT - * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_H -#define MPI2_H - - -/***************************************************************************** -* -* MPI Version Definitions -* -*****************************************************************************/ - -#define MPI2_VERSION_MAJOR_MASK (0xFF00) -#define MPI2_VERSION_MAJOR_SHIFT (8) -#define MPI2_VERSION_MINOR_MASK (0x00FF) -#define MPI2_VERSION_MINOR_SHIFT (0) - -/* major version for all MPI v2.x */ -#define MPI2_VERSION_MAJOR (0x02) - -/* minor version for MPI v2.0 compatible products */ -#define MPI2_VERSION_MINOR (0x00) -#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ - MPI2_VERSION_MINOR) -#define MPI2_VERSION_02_00 (0x0200) - - -/* minor version for MPI v2.5 compatible products */ -#define MPI25_VERSION_MINOR (0x05) -#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ - MPI25_VERSION_MINOR) -#define MPI2_VERSION_02_05 (0x0205) - - -/* minor version for MPI v2.6 compatible products */ -#define MPI26_VERSION_MINOR (0x06) -#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ - MPI26_VERSION_MINOR) -#define MPI2_VERSION_02_06 (0x0206) - - -/* Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x37) -#define MPI2_HEADER_VERSION_DEV (0x00) -#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) -#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) -#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) -#define MPI2_HEADER_VERSION_DEV_SHIFT (0) -#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV) - - -/***************************************************************************** -* -* IOC State Definitions -* -*****************************************************************************/ - -#define MPI2_IOC_STATE_RESET (0x00000000) -#define MPI2_IOC_STATE_READY (0x10000000) -#define MPI2_IOC_STATE_OPERATIONAL (0x20000000) -#define MPI2_IOC_STATE_FAULT (0x40000000) - -#define MPI2_IOC_STATE_MASK (0xF0000000) -#define MPI2_IOC_STATE_SHIFT (28) - -/* Fault state range for prodcut specific codes */ -#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000) -#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF) - - -/***************************************************************************** -* -* System Interface Register Definitions -* -*****************************************************************************/ - -typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS -{ - U32 Doorbell; /* 0x00 */ - U32 WriteSequence; /* 0x04 */ - U32 HostDiagnostic; /* 0x08 */ - U32 Reserved1; /* 0x0C */ - U32 DiagRWData; /* 0x10 */ - U32 DiagRWAddressLow; /* 0x14 */ - U32 DiagRWAddressHigh; /* 0x18 */ - U32 Reserved2[5]; /* 0x1C */ - U32 HostInterruptStatus; /* 0x30 */ - U32 HostInterruptMask; /* 0x34 */ - U32 DCRData; /* 0x38 */ - U32 DCRAddress; /* 0x3C */ - U32 Reserved3[2]; /* 0x40 */ - U32 ReplyFreeHostIndex; /* 0x48 */ - U32 Reserved4[8]; /* 0x4C */ - U32 ReplyPostHostIndex; /* 0x6C */ - U32 Reserved5; /* 0x70 */ - U32 HCBSize; /* 0x74 */ - U32 HCBAddressLow; /* 0x78 */ - U32 HCBAddressHigh; /* 0x7C */ - U32 Reserved6[12]; /* 0x80 */ - U32 Scratchpad[4]; /* 0xB0 */ - U32 RequestDescriptorPostLow; /* 0xC0 */ - U32 RequestDescriptorPostHigh; /* 0xC4 */ - U32 AtomicRequestDescriptorPost;/* 0xC8 */ /* MPI v2.6 and later; reserved in earlier versions */ - U32 Reserved7[13]; /* 0xCC */ -} MPI2_SYSTEM_INTERFACE_REGS, MPI2_POINTER PTR_MPI2_SYSTEM_INTERFACE_REGS, - Mpi2SystemInterfaceRegs_t, MPI2_POINTER pMpi2SystemInterfaceRegs_t; - -/* - * Defines for working with the Doorbell register. - */ -#define MPI2_DOORBELL_OFFSET (0x00000000) - -/* IOC --> System values */ -#define MPI2_DOORBELL_USED (0x08000000) -#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000) -#define MPI2_DOORBELL_WHO_INIT_SHIFT (24) -#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF) -#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF) - -/* System --> IOC values */ -#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000) -#define MPI2_DOORBELL_FUNCTION_SHIFT (24) -#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000) -#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16) - - -/* - * Defines for the WriteSequence register - */ -#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) -#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F) -#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) -#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) -#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) -#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) -#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) -#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) -#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) - -/* - * Defines for the HostDiagnostic register - */ -#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) - -#define MPI26_DIAG_SECURE_BOOT (0x80000000) - -#define MPI2_DIAG_SBR_RELOAD (0x00002000) - -#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) -#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) -#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) - -/* Defines for V7A/V7R HostDiagnostic Register */ -#define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000) -#define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800) -#define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000) -#define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800) - -#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400) -#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200) -#define MPI2_DIAG_HCB_MODE (0x00000100) -#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080) -#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040) -#define MPI2_DIAG_RESET_HISTORY (0x00000020) -#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010) -#define MPI2_DIAG_RESET_ADAPTER (0x00000004) -#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002) - -/* - * Offsets for DiagRWData and address - */ -#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010) -#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014) -#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018) - -/* - * Defines for the HostInterruptStatus register - */ -#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030) -#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000) -#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS -#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000) -#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008) -#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001) -#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS - -/* - * Defines for the HostInterruptMask register - */ -#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034) -#define MPI2_HIM_RESET_IRQ_MASK (0x40000000) -#define MPI2_HIM_REPLY_INT_MASK (0x00000008) -#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK -#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001) -#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK - -/* - * Offsets for DCRData and address - */ -#define MPI2_DCR_DATA_OFFSET (0x00000038) -#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C) - -/* - * Offset for the Reply Free Queue - */ -#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) - -/* - * Defines for the Reply Descriptor Post Queue - */ -#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) -#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) -#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) -#define MPI2_RPHI_MSIX_INDEX_SHIFT (24) -#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */ - - -/* - * Defines for the HCBSize and address - */ -#define MPI2_HCB_SIZE_OFFSET (0x00000074) -#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000) -#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001) - -#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078) -#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) - -/* - * Offsets for the Scratchpad registers - */ -#define MPI26_SCRATCHPAD0_OFFSET (0x000000B0) -#define MPI26_SCRATCHPAD1_OFFSET (0x000000B4) -#define MPI26_SCRATCHPAD2_OFFSET (0x000000B8) -#define MPI26_SCRATCHPAD3_OFFSET (0x000000BC) - -/* - * Offsets for the Request Descriptor Post Queue - */ -#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) -#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) -#define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8) - - -/* Hard Reset delay timings */ -#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) -#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000) -#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000) - -/***************************************************************************** -* -* Message Descriptors -* -*****************************************************************************/ - -/* Request Descriptors */ - -/* Default Request Descriptor */ -typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR -{ - U8 RequestFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U16 LMID; /* 0x04 */ - U16 DescriptorTypeDependent; /* 0x06 */ -} MPI2_DEFAULT_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, - Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t; - -/* defines for the RequestFlags field */ -#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E) -#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1) /* use carefully; values below are pre-shifted left */ -#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) -#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) -#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) -#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) -#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) -#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) -#define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10) - -#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) - - -/* High Priority Request Descriptor */ -typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR -{ - U8 RequestFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U16 LMID; /* 0x04 */ - U16 Reserved1; /* 0x06 */ -} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, - Mpi2HighPriorityRequestDescriptor_t, - MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t; - - -/* SCSI IO Request Descriptor */ -typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR -{ - U8 RequestFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U16 LMID; /* 0x04 */ - U16 DevHandle; /* 0x06 */ -} MPI2_SCSI_IO_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, - Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t; - - -/* SCSI Target Request Descriptor */ -typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR -{ - U8 RequestFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U16 LMID; /* 0x04 */ - U16 IoIndex; /* 0x06 */ -} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, - Mpi2SCSITargetRequestDescriptor_t, - MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t; - - -/* RAID Accelerator Request Descriptor */ -typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR -{ - U8 RequestFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U16 LMID; /* 0x04 */ - U16 Reserved; /* 0x06 */ -} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, - Mpi2RAIDAcceleratorRequestDescriptor_t, - MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t; - - -/* Fast Path SCSI IO Request Descriptor */ -typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR - MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, - Mpi25FastPathSCSIIORequestDescriptor_t, - MPI2_POINTER pMpi25FastPathSCSIIORequestDescriptor_t; - - -/* PCIe Encapsulated Request Descriptor */ -typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR - MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, - Mpi26PCIeEncapsulatedRequestDescriptor_t, - MPI2_POINTER pMpi26PCIeEncapsulatedRequestDescriptor_t; - - -/* union of Request Descriptors */ -typedef union _MPI2_REQUEST_DESCRIPTOR_UNION -{ - MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; - MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; - MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; - MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; - MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; - MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; - MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated; - U64 Words; -} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION, - Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t; - - -/* Atomic Request Descriptors */ - -/* - * All Atomic Request Descriptors have the same format, so the following - * structure is used for all Atomic Request Descriptors: - * Atomic Default Request Descriptor - * Atomic High Priority Request Descriptor - * Atomic SCSI IO Request Descriptor - * Atomic SCSI Target Request Descriptor - * Atomic RAID Accelerator Request Descriptor - * Atomic Fast Path SCSI IO Request Descriptor - * Atomic PCIe Encapsulated Request Descriptor - */ - -/* Atomic Request Descriptor */ -typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR -{ - U8 RequestFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ -} MPI26_ATOMIC_REQUEST_DESCRIPTOR, - MPI2_POINTER PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR, - Mpi26AtomicRequestDescriptor_t, MPI2_POINTER pMpi26AtomicRequestDescriptor_t; - -/* for the RequestFlags field, use the same defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR */ - - -/* Reply Descriptors */ - -/* Default Reply Descriptor */ -typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR -{ - U8 ReplyFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 DescriptorTypeDependent1; /* 0x02 */ - U32 DescriptorTypeDependent2; /* 0x04 */ -} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, - Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t; - -/* defines for the ReplyFlags field */ -#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) -#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) -#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) -#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02) -#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) -#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05) -#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) -#define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08) -#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) - -/* values for marking a reply descriptor as unused */ -#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) -#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF) - -/* Address Reply Descriptor */ -typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR -{ - U8 ReplyFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U32 ReplyFrameAddress; /* 0x04 */ -} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, - Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t; - -#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00) - - -/* SCSI IO Success Reply Descriptor */ -typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR -{ - U8 ReplyFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U16 TaskTag; /* 0x04 */ - U16 Reserved1; /* 0x06 */ -} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - Mpi2SCSIIOSuccessReplyDescriptor_t, - MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t; - - -/* TargetAssist Success Reply Descriptor */ -typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR -{ - U8 ReplyFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U8 SequenceNumber; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 IoIndex; /* 0x06 */ -} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, - Mpi2TargetAssistSuccessReplyDescriptor_t, - MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t; - - -/* Target Command Buffer Reply Descriptor */ -typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR -{ - U8 ReplyFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U8 VP_ID; /* 0x02 */ - U8 Flags; /* 0x03 */ - U16 InitiatorDevHandle; /* 0x04 */ - U16 IoIndex; /* 0x06 */ -} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, - Mpi2TargetCommandBufferReplyDescriptor_t, - MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t; - -/* defines for Flags field */ -#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) - - -/* RAID Accelerator Success Reply Descriptor */ -typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR -{ - U8 ReplyFlags; /* 0x00 */ - U8 MSIxIndex; /* 0x01 */ - U16 SMID; /* 0x02 */ - U32 Reserved; /* 0x04 */ -} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, - MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, - Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, - MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; - - -/* Fast Path SCSI IO Success Reply Descriptor */ -typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR - MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - MPI2_POINTER PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, - Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, - MPI2_POINTER pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; - - -/* PCIe Encapsulated Success Reply Descriptor */ -typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR - MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, - MPI2_POINTER PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, - Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t, - MPI2_POINTER pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t; - - -/* union of Reply Descriptors */ -typedef union _MPI2_REPLY_DESCRIPTORS_UNION -{ - MPI2_DEFAULT_REPLY_DESCRIPTOR Default; - MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; - MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; - MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; - MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; - MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; - MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; - MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR PCIeEncapsulatedSuccess; - U64 Words; -} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, - Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; - - - -/***************************************************************************** -* -* Message Functions -* -*****************************************************************************/ - -#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ -#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) /* SCSI Task Management */ -#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ -#define MPI2_FUNCTION_IOC_FACTS (0x03) /* IOC Facts */ -#define MPI2_FUNCTION_CONFIG (0x04) /* Configuration */ -#define MPI2_FUNCTION_PORT_FACTS (0x05) /* Port Facts */ -#define MPI2_FUNCTION_PORT_ENABLE (0x06) /* Port Enable */ -#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) /* Event Notification */ -#define MPI2_FUNCTION_EVENT_ACK (0x08) /* Event Acknowledge */ -#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) /* FW Download */ -#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) /* Target Assist */ -#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) /* Target Status Send */ -#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) /* Target Mode Abort */ -#define MPI2_FUNCTION_FW_UPLOAD (0x12) /* FW Upload */ -#define MPI2_FUNCTION_RAID_ACTION (0x15) /* RAID Action */ -#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) /* SCSI IO RAID Passthrough */ -#define MPI2_FUNCTION_TOOLBOX (0x17) /* Toolbox */ -#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) /* SCSI Enclosure Processor */ -#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) /* SMP Passthrough */ -#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) /* SAS IO Unit Control */ /* for MPI v2.5 and earlier */ -#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B) /* IO Unit Control */ /* for MPI v2.6 and later */ -#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) /* SATA Passthrough */ -#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) /* Diagnostic Buffer Post */ -#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */ -#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ -#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ -#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator */ -#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) /* Host Based Discovery Action */ -#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) /* Power Management Control */ -#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) /* Send Host Message */ -#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33) /* NVMe Encapsulated (MPI v2.6) */ -#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) /* beginning of product-specific range */ -#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) /* end of product-specific range */ - - - -/* Doorbell functions */ -#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) -#define MPI2_FUNCTION_HANDSHAKE (0x42) - - -/***************************************************************************** -* -* IOC Status Values -* -*****************************************************************************/ - -/* mask for IOCStatus status value */ -#define MPI2_IOCSTATUS_MASK (0x7FFF) - -/**************************************************************************** -* Common IOCStatus values for all replies -****************************************************************************/ - -#define MPI2_IOCSTATUS_SUCCESS (0x0000) -#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001) -#define MPI2_IOCSTATUS_BUSY (0x0002) -#define MPI2_IOCSTATUS_INVALID_SGL (0x0003) -#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004) -#define MPI2_IOCSTATUS_INVALID_VPID (0x0005) -#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) -#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) -#define MPI2_IOCSTATUS_INVALID_STATE (0x0008) -#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) -#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) /* MPI v2.6 and later */ -#define MPI2_IOCSTATUS_FAILURE (0x000F) - -/**************************************************************************** -* Config IOCStatus values -****************************************************************************/ - -#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) -#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) -#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) -#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) -#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) -#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) - -/**************************************************************************** -* SCSI IO Reply -****************************************************************************/ - -#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) -#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) -#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) -#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) -#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) -#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) -#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) -#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) -#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) -#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) -#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) -#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) - -/**************************************************************************** -* For use by SCSI Initiator and SCSI Target end-to-end data protection -****************************************************************************/ - -#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) -#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) -#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) - -/**************************************************************************** -* SCSI Target values -****************************************************************************/ - -#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) -#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063) -#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) -#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) -#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) -#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) -#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) -#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) -#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) -#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) - -/**************************************************************************** -* Serial Attached SCSI values -****************************************************************************/ - -#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) -#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) - -/**************************************************************************** -* Diagnostic Buffer Post / Diagnostic Release values -****************************************************************************/ - -#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) - -/**************************************************************************** -* RAID Accelerator values -****************************************************************************/ - -#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0) - -/**************************************************************************** -* IOCStatus flag to indicate that log info is available -****************************************************************************/ - -#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) - -/**************************************************************************** -* IOCLogInfo Types -****************************************************************************/ - -#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000) -#define MPI2_IOCLOGINFO_TYPE_SHIFT (28) -#define MPI2_IOCLOGINFO_TYPE_NONE (0x0) -#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1) -#define MPI2_IOCLOGINFO_TYPE_FC (0x2) -#define MPI2_IOCLOGINFO_TYPE_SAS (0x3) -#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4) -#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF) - - -/***************************************************************************** -* -* Standard Message Structures -* -*****************************************************************************/ - -/**************************************************************************** -* Request Message Header for all request messages -****************************************************************************/ - -typedef struct _MPI2_REQUEST_HEADER -{ - U16 FunctionDependent1; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 FunctionDependent2; /* 0x04 */ - U8 FunctionDependent3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ -} MPI2_REQUEST_HEADER, MPI2_POINTER PTR_MPI2_REQUEST_HEADER, - MPI2RequestHeader_t, MPI2_POINTER pMPI2RequestHeader_t; - - -/**************************************************************************** -* Default Reply -****************************************************************************/ - -typedef struct _MPI2_DEFAULT_REPLY -{ - U16 FunctionDependent1; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 FunctionDependent2; /* 0x04 */ - U8 FunctionDependent3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U16 FunctionDependent5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_DEFAULT_REPLY, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY, - MPI2DefaultReply_t, MPI2_POINTER pMPI2DefaultReply_t; - - -/* common version structure/union used in messages and configuration pages */ - -typedef struct _MPI2_VERSION_STRUCT -{ - U8 Dev; /* 0x00 */ - U8 Unit; /* 0x01 */ - U8 Minor; /* 0x02 */ - U8 Major; /* 0x03 */ -} MPI2_VERSION_STRUCT; - -typedef union _MPI2_VERSION_UNION -{ - MPI2_VERSION_STRUCT Struct; - U32 Word; -} MPI2_VERSION_UNION; - - -/* LUN field defines, common to many structures */ -#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) -#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) -#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) -#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000) -#define MPI2_LUN_LEVEL_1_WORD (0xFF00) -#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00) - - -/***************************************************************************** -* -* Fusion-MPT MPI Scatter Gather Elements -* -*****************************************************************************/ - -/**************************************************************************** -* MPI Simple Element structures -****************************************************************************/ - -typedef struct _MPI2_SGE_SIMPLE32 -{ - U32 FlagsLength; - U32 Address; -} MPI2_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_SGE_SIMPLE32, - Mpi2SGESimple32_t, MPI2_POINTER pMpi2SGESimple32_t; - -typedef struct _MPI2_SGE_SIMPLE64 -{ - U32 FlagsLength; - U64 Address; -} MPI2_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_SGE_SIMPLE64, - Mpi2SGESimple64_t, MPI2_POINTER pMpi2SGESimple64_t; - -typedef struct _MPI2_SGE_SIMPLE_UNION -{ - U32 FlagsLength; - union - { - U32 Address32; - U64 Address64; - } u; -} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION, - Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t; - - -/**************************************************************************** -* MPI Chain Element structures - for MPI v2.0 products only -****************************************************************************/ - -typedef struct _MPI2_SGE_CHAIN32 -{ - U16 Length; - U8 NextChainOffset; - U8 Flags; - U32 Address; -} MPI2_SGE_CHAIN32, MPI2_POINTER PTR_MPI2_SGE_CHAIN32, - Mpi2SGEChain32_t, MPI2_POINTER pMpi2SGEChain32_t; - -typedef struct _MPI2_SGE_CHAIN64 -{ - U16 Length; - U8 NextChainOffset; - U8 Flags; - U64 Address; -} MPI2_SGE_CHAIN64, MPI2_POINTER PTR_MPI2_SGE_CHAIN64, - Mpi2SGEChain64_t, MPI2_POINTER pMpi2SGEChain64_t; - -typedef struct _MPI2_SGE_CHAIN_UNION -{ - U16 Length; - U8 NextChainOffset; - U8 Flags; - union - { - U32 Address32; - U64 Address64; - } u; -} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION, - Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t; - - -/**************************************************************************** -* MPI Transaction Context Element structures - for MPI v2.0 products only -****************************************************************************/ - -typedef struct _MPI2_SGE_TRANSACTION32 -{ - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[1]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION32, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION32, - Mpi2SGETransaction32_t, MPI2_POINTER pMpi2SGETransaction32_t; - -typedef struct _MPI2_SGE_TRANSACTION64 -{ - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[2]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION64, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION64, - Mpi2SGETransaction64_t, MPI2_POINTER pMpi2SGETransaction64_t; - -typedef struct _MPI2_SGE_TRANSACTION96 -{ - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[3]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION96, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION96, - Mpi2SGETransaction96_t, MPI2_POINTER pMpi2SGETransaction96_t; - -typedef struct _MPI2_SGE_TRANSACTION128 -{ - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - U32 TransactionContext[4]; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION128, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION128, - Mpi2SGETransaction_t128, MPI2_POINTER pMpi2SGETransaction_t128; - -typedef struct _MPI2_SGE_TRANSACTION_UNION -{ - U8 Reserved; - U8 ContextSize; - U8 DetailsLength; - U8 Flags; - union - { - U32 TransactionContext32[1]; - U32 TransactionContext64[2]; - U32 TransactionContext96[3]; - U32 TransactionContext128[4]; - } u; - U32 TransactionDetails[1]; -} MPI2_SGE_TRANSACTION_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION_UNION, - Mpi2SGETransactionUnion_t, MPI2_POINTER pMpi2SGETransactionUnion_t; - - -/**************************************************************************** -* MPI SGE union for IO SGL's - for MPI v2.0 products only -****************************************************************************/ - -typedef struct _MPI2_MPI_SGE_IO_UNION -{ - union - { - MPI2_SGE_SIMPLE_UNION Simple; - MPI2_SGE_CHAIN_UNION Chain; - } u; -} MPI2_MPI_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_IO_UNION, - Mpi2MpiSGEIOUnion_t, MPI2_POINTER pMpi2MpiSGEIOUnion_t; - - -/**************************************************************************** -* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only -****************************************************************************/ - -typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION -{ - union - { - MPI2_SGE_SIMPLE_UNION Simple; - MPI2_SGE_TRANSACTION_UNION Transaction; - } u; -} MPI2_SGE_TRANS_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANS_SIMPLE_UNION, - Mpi2SGETransSimpleUnion_t, MPI2_POINTER pMpi2SGETransSimpleUnion_t; - - -/**************************************************************************** -* All MPI SGE types union -****************************************************************************/ - -typedef struct _MPI2_MPI_SGE_UNION -{ - union - { - MPI2_SGE_SIMPLE_UNION Simple; - MPI2_SGE_CHAIN_UNION Chain; - MPI2_SGE_TRANSACTION_UNION Transaction; - } u; -} MPI2_MPI_SGE_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_UNION, - Mpi2MpiSgeUnion_t, MPI2_POINTER pMpi2MpiSgeUnion_t; - - -/**************************************************************************** -* MPI SGE field definition and masks -****************************************************************************/ - -/* Flags field bit definitions */ - -#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80) -#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40) -#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30) -#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08) -#define MPI2_SGE_FLAGS_DIRECTION (0x04) -#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02) -#define MPI2_SGE_FLAGS_END_OF_LIST (0x01) - -#define MPI2_SGE_FLAGS_SHIFT (24) - -#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF) -#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF) - -/* Element Type */ - -#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) /* for MPI v2.0 products only */ -#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10) -#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) /* for MPI v2.0 products only */ -#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30) - -/* Address location */ - -#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00) - -/* Direction */ - -#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) -#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) - -#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) -#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) - -/* Address Size */ - -#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) -#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) - -/* Context Size */ - -#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00) -#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02) -#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04) -#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06) - -#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000) -#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16) - -/**************************************************************************** -* MPI SGE operation Macros -****************************************************************************/ - -/* SIMPLE FlagsLength manipulations... */ -#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) -#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> MPI2_SGE_FLAGS_SHIFT) -#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) -#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) - -#define MPI2_SGE_SET_FLAGS_LENGTH(f,l) (MPI2_SGE_SET_FLAGS(f) | MPI2_SGE_LENGTH(l)) - -#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) -#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) -#define MPI2_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_SGE_SET_FLAGS_LENGTH(f,l) - -/* CAUTION - The following are READ-MODIFY-WRITE! */ -#define MPI2_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_SGE_SET_FLAGS(f) -#define MPI2_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_SGE_LENGTH(l) - -#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> MPI2_SGE_CHAIN_OFFSET_SHIFT) - - -/***************************************************************************** -* -* Fusion-MPT IEEE Scatter Gather Elements -* -*****************************************************************************/ - -/**************************************************************************** -* IEEE Simple Element structures -****************************************************************************/ - -/* MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ -typedef struct _MPI2_IEEE_SGE_SIMPLE32 -{ - U32 Address; - U32 FlagsLength; -} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32, - Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t; - -typedef struct _MPI2_IEEE_SGE_SIMPLE64 -{ - U64 Address; - U32 Length; - U16 Reserved1; - U8 Reserved2; - U8 Flags; -} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64, - Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t; - -typedef union _MPI2_IEEE_SGE_SIMPLE_UNION -{ - MPI2_IEEE_SGE_SIMPLE32 Simple32; - MPI2_IEEE_SGE_SIMPLE64 Simple64; -} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION, - Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t; - - -/**************************************************************************** -* IEEE Chain Element structures -****************************************************************************/ - -/* MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ -typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; - -/* MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ -typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; - -typedef union _MPI2_IEEE_SGE_CHAIN_UNION -{ - MPI2_IEEE_SGE_CHAIN32 Chain32; - MPI2_IEEE_SGE_CHAIN64 Chain64; -} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION, - Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t; - -/* MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */ -typedef struct _MPI25_IEEE_SGE_CHAIN64 -{ - U64 Address; - U32 Length; - U16 Reserved1; - U8 NextChainOffset; - U8 Flags; -} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64, - Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t; - - -/**************************************************************************** -* All IEEE SGE types union -****************************************************************************/ - -/* MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ -typedef struct _MPI2_IEEE_SGE_UNION -{ - union - { - MPI2_IEEE_SGE_SIMPLE_UNION Simple; - MPI2_IEEE_SGE_CHAIN_UNION Chain; - } u; -} MPI2_IEEE_SGE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_UNION, - Mpi2IeeeSgeUnion_t, MPI2_POINTER pMpi2IeeeSgeUnion_t; - - -/**************************************************************************** -* IEEE SGE union for IO SGL's -****************************************************************************/ - -typedef union _MPI25_SGE_IO_UNION -{ - MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; - MPI25_IEEE_SGE_CHAIN64 IeeeChain; -} MPI25_SGE_IO_UNION, MPI2_POINTER PTR_MPI25_SGE_IO_UNION, - Mpi25SGEIOUnion_t, MPI2_POINTER pMpi25SGEIOUnion_t; - - -/**************************************************************************** -* IEEE SGE field definitions and masks -****************************************************************************/ - -/* Flags field bit definitions */ - -#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80) -#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40) - -#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24) - -#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF) - -/* Element Type */ - -#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) -#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) - -/* Next Segment Format */ - -#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) -#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) -#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) -#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) - -/* Data Location Address Space */ - -#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) -#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5 and later, use in IEEE Simple or Chain element */ -#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) /* use in IEEE Simple Element only */ -#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) -#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5, use in IEEE Simple or Chain element */ -#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) /* use in MPI v2.0 IEEE Chain Element only */ -#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */ - -#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02) /* for MPI v2.6 only */ - -/**************************************************************************** -* IEEE SGE operation Macros -****************************************************************************/ - -/* SIMPLE FlagsLength manipulations... */ -#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) -#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) >> MPI2_IEEE32_SGE_FLAGS_SHIFT) -#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) - -#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) | MPI2_IEEE32_SGE_LENGTH(l)) - -#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) -#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) -#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f,l) - -/* CAUTION - The following are READ-MODIFY-WRITE! */ -#define MPI2_IEEE32_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_IEEE32_SGE_SET_FLAGS(f) -#define MPI2_IEEE32_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_IEEE32_SGE_LENGTH(l) - - - -/***************************************************************************** -* -* Fusion-MPT MPI/IEEE Scatter Gather Unions -* -*****************************************************************************/ - -typedef union _MPI2_SIMPLE_SGE_UNION -{ - MPI2_SGE_SIMPLE_UNION MpiSimple; - MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; -} MPI2_SIMPLE_SGE_UNION, MPI2_POINTER PTR_MPI2_SIMPLE_SGE_UNION, - Mpi2SimpleSgeUntion_t, MPI2_POINTER pMpi2SimpleSgeUntion_t; - - -typedef union _MPI2_SGE_IO_UNION -{ - MPI2_SGE_SIMPLE_UNION MpiSimple; - MPI2_SGE_CHAIN_UNION MpiChain; - MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; - MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; -} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION, - Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t; - - -/**************************************************************************** -* -* Values for SGLFlags field, used in many request messages with an SGL -* -****************************************************************************/ - -/* values for MPI SGL Data Location Address Space subfield */ -#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C) -#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) -#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) -#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.5 and earlier */ -#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.6 */ -#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) /* only for MPI v2.5 and earlier */ -/* values for SGL Type subfield */ -#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) -#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00) -#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) /* MPI v2.0 products only */ -#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02) - - -#endif - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2.h + * Title: MPI Message independent structures and definitions + * including System Interface Register Set and + * scatter/gather formats. + * Creation Date: June 21, 2006 + * + * mpi2.h Version: 02.00.55 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-14 02.00.36 Updated copyright information. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 03-16-15 02.00.37 Updated for MPI v2.6. + * Bumped MPI2_HEADER_VERSION_UNIT. + * Added Scratchpad registers and + * AtomicRequestDescriptorPost register to + * MPI2_SYSTEM_INTERFACE_REGS. + * Added MPI2_DIAG_SBR_RELOAD. + * Added MPI2_IOCSTATUS_INSUFFICIENT_POWER. + * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. + * Added V7 HostDiagnostic register defines + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT + * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT + * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines + * to be unique within first 32 characters. + * Removed AHCI support. + * Removed SOP support. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. + * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-22-18 02.00.51 Added SECURE_BOOT define. + * Bumped MPI2_HEADER_VERSION_UNIT + * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNITMPI2_HEADER_VERSION_UNIT. + * Added MPI2_IOCSTATUS_FAILURE + * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT + * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT + * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT + * 10-02-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_H +#define MPI2_H + + +/***************************************************************************** +* +* MPI Version Definitions +* +*****************************************************************************/ + +#define MPI2_VERSION_MAJOR_MASK (0xFF00) +#define MPI2_VERSION_MAJOR_SHIFT (8) +#define MPI2_VERSION_MINOR_MASK (0x00FF) +#define MPI2_VERSION_MINOR_SHIFT (0) + +/* major version for all MPI v2.x */ +#define MPI2_VERSION_MAJOR (0x02) + +/* minor version for MPI v2.0 compatible products */ +#define MPI2_VERSION_MINOR (0x00) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) +#define MPI2_VERSION_02_00 (0x0200) + + +/* minor version for MPI v2.5 compatible products */ +#define MPI25_VERSION_MINOR (0x05) +#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI25_VERSION_MINOR) +#define MPI2_VERSION_02_05 (0x0205) + + +/* minor version for MPI v2.6 compatible products */ +#define MPI26_VERSION_MINOR (0x06) +#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI26_VERSION_MINOR) +#define MPI2_VERSION_02_06 (0x0206) + + +/* Unit and Dev versioning for this MPI header set */ +#define MPI2_HEADER_VERSION_UNIT (0x39) +#define MPI2_HEADER_VERSION_DEV (0x00) +#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) +#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) +#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) +#define MPI2_HEADER_VERSION_DEV_SHIFT (0) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV) + + +/***************************************************************************** +* +* IOC State Definitions +* +*****************************************************************************/ + +#define MPI2_IOC_STATE_RESET (0x00000000) +#define MPI2_IOC_STATE_READY (0x10000000) +#define MPI2_IOC_STATE_OPERATIONAL (0x20000000) +#define MPI2_IOC_STATE_FAULT (0x40000000) +#define MPI2_IOC_STATE_COREDUMP (0x50000000) + +#define MPI2_IOC_STATE_MASK (0xF0000000) +#define MPI2_IOC_STATE_SHIFT (28) + +/* Fault state range for prodcut specific codes */ +#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000) +#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF) + + +/***************************************************************************** +* +* System Interface Register Definitions +* +*****************************************************************************/ + +typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS +{ + U32 Doorbell; /* 0x00 */ + U32 WriteSequence; /* 0x04 */ + U32 HostDiagnostic; /* 0x08 */ + U32 Reserved1; /* 0x0C */ + U32 DiagRWData; /* 0x10 */ + U32 DiagRWAddressLow; /* 0x14 */ + U32 DiagRWAddressHigh; /* 0x18 */ + U32 Reserved2[5]; /* 0x1C */ + U32 HostInterruptStatus; /* 0x30 */ + U32 HostInterruptMask; /* 0x34 */ + U32 DCRData; /* 0x38 */ + U32 DCRAddress; /* 0x3C */ + U32 Reserved3[2]; /* 0x40 */ + U32 ReplyFreeHostIndex; /* 0x48 */ + U32 Reserved4[8]; /* 0x4C */ + U32 ReplyPostHostIndex; /* 0x6C */ + U32 Reserved5; /* 0x70 */ + U32 HCBSize; /* 0x74 */ + U32 HCBAddressLow; /* 0x78 */ + U32 HCBAddressHigh; /* 0x7C */ + U32 Reserved6[12]; /* 0x80 */ + U32 Scratchpad[4]; /* 0xB0 */ + U32 RequestDescriptorPostLow; /* 0xC0 */ + U32 RequestDescriptorPostHigh; /* 0xC4 */ + U32 AtomicRequestDescriptorPost;/* 0xC8 */ /* MPI v2.6 and later; reserved in earlier versions */ + U32 Reserved7[13]; /* 0xCC */ +} MPI2_SYSTEM_INTERFACE_REGS, MPI2_POINTER PTR_MPI2_SYSTEM_INTERFACE_REGS, + Mpi2SystemInterfaceRegs_t, MPI2_POINTER pMpi2SystemInterfaceRegs_t; + +/* + * Defines for working with the Doorbell register. + */ +#define MPI2_DOORBELL_OFFSET (0x00000000) + +/* IOC --> System values */ +#define MPI2_DOORBELL_USED (0x08000000) +#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000) +#define MPI2_DOORBELL_WHO_INIT_SHIFT (24) +#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF) +#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF) + +/* System --> IOC values */ +#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000) +#define MPI2_DOORBELL_FUNCTION_SHIFT (24) +#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000) +#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16) + + +/* + * Defines for the WriteSequence register + */ +#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) +#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F) +#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) +#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) +#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) +#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) +#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) +#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) +#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) + +/* + * Defines for the HostDiagnostic register + */ +#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) + +#define MPI26_DIAG_SECURE_BOOT (0x80000000) + +#define MPI2_DIAG_SBR_RELOAD (0x00002000) + +#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) + +/* Defines for V7A/V7R HostDiagnostic Register */ +#define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000) +#define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800) +#define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000) +#define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800) + +#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400) +#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200) +#define MPI2_DIAG_HCB_MODE (0x00000100) +#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080) +#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040) +#define MPI2_DIAG_RESET_HISTORY (0x00000020) +#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010) +#define MPI2_DIAG_RESET_ADAPTER (0x00000004) +#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002) + +/* + * Offsets for DiagRWData and address + */ +#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010) +#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014) +#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018) + +/* + * Defines for the HostInterruptStatus register + */ +#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030) +#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS +#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000) +#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008) +#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001) +#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS + +/* + * Defines for the HostInterruptMask register + */ +#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034) +#define MPI2_HIM_RESET_IRQ_MASK (0x40000000) +#define MPI2_HIM_REPLY_INT_MASK (0x00000008) +#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK +#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001) +#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK + +/* + * Offsets for DCRData and address + */ +#define MPI2_DCR_DATA_OFFSET (0x00000038) +#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C) + +/* + * Offset for the Reply Free Queue + */ +#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) + +/* + * Defines for the Reply Descriptor Post Queue + */ +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) +#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) +#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) +#define MPI2_RPHI_MSIX_INDEX_SHIFT (24) +#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */ + + +/* + * Defines for the HCBSize and address + */ +#define MPI2_HCB_SIZE_OFFSET (0x00000074) +#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000) +#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001) + +#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078) +#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) + +/* + * Offsets for the Scratchpad registers + */ +#define MPI26_SCRATCHPAD0_OFFSET (0x000000B0) +#define MPI26_SCRATCHPAD1_OFFSET (0x000000B4) +#define MPI26_SCRATCHPAD2_OFFSET (0x000000B8) +#define MPI26_SCRATCHPAD3_OFFSET (0x000000BC) + +/* + * Offsets for the Request Descriptor Post Queue + */ +#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) +#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) +#define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8) + + +/* Hard Reset delay timings */ +#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) +#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000) +#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000) + +/***************************************************************************** +* +* Message Descriptors +* +*****************************************************************************/ + +/* Request Descriptors */ + +/* Default Request Descriptor */ +typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 DescriptorTypeDependent; /* 0x06 */ +} MPI2_DEFAULT_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, + Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t; + +/* defines for the RequestFlags field */ +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1) /* use carefully; values below are pre-shifted left */ +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) +#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) +#define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10) + +#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) + + +/* High Priority Request Descriptor */ +typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 Reserved1; /* 0x06 */ +} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + Mpi2HighPriorityRequestDescriptor_t, + MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t; + + +/* SCSI IO Request Descriptor */ +typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 DevHandle; /* 0x06 */ +} MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t; + + +/* SCSI Target Request Descriptor */ +typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 IoIndex; /* 0x06 */ +} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + Mpi2SCSITargetRequestDescriptor_t, + MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t; + + +/* RAID Accelerator Request Descriptor */ +typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 LMID; /* 0x04 */ + U16 Reserved; /* 0x06 */ +} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + Mpi2RAIDAcceleratorRequestDescriptor_t, + MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t; + + +/* Fast Path SCSI IO Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi25FastPathSCSIIORequestDescriptor_t, + MPI2_POINTER pMpi25FastPathSCSIIORequestDescriptor_t; + + +/* PCIe Encapsulated Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + Mpi26PCIeEncapsulatedRequestDescriptor_t, + MPI2_POINTER pMpi26PCIeEncapsulatedRequestDescriptor_t; + + +/* union of Request Descriptors */ +typedef union _MPI2_REQUEST_DESCRIPTOR_UNION +{ + MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated; + U64 Words; +} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION, + Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t; + + +/* Atomic Request Descriptors */ + +/* + * All Atomic Request Descriptors have the same format, so the following + * structure is used for all Atomic Request Descriptors: + * Atomic Default Request Descriptor + * Atomic High Priority Request Descriptor + * Atomic SCSI IO Request Descriptor + * Atomic SCSI Target Request Descriptor + * Atomic RAID Accelerator Request Descriptor + * Atomic Fast Path SCSI IO Request Descriptor + * Atomic PCIe Encapsulated Request Descriptor + */ + +/* Atomic Request Descriptor */ +typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR +{ + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ +} MPI26_ATOMIC_REQUEST_DESCRIPTOR, + MPI2_POINTER PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR, + Mpi26AtomicRequestDescriptor_t, MPI2_POINTER pMpi26AtomicRequestDescriptor_t; + +/* for the RequestFlags field, use the same defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR */ + + +/* Reply Descriptors */ + +/* Default Reply Descriptor */ +typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 DescriptorTypeDependent1; /* 0x02 */ + U32 DescriptorTypeDependent2; /* 0x04 */ +} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, + Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t; + +/* defines for the ReplyFlags field */ +#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) +#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05) +#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08) +#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +/* values for marking a reply descriptor as unused */ +#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) +#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF) + +/* Address Reply Descriptor */ +typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U32 ReplyFrameAddress; /* 0x04 */ +} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, + Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t; + +#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00) + + +/* SCSI IO Success Reply Descriptor */ +typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U16 TaskTag; /* 0x04 */ + U16 Reserved1; /* 0x06 */ +} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi2SCSIIOSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t; + + +/* TargetAssist Success Reply Descriptor */ +typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U8 SequenceNumber; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 IoIndex; /* 0x06 */ +} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + Mpi2TargetAssistSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t; + + +/* Target Command Buffer Reply Descriptor */ +typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U8 VP_ID; /* 0x02 */ + U8 Flags; /* 0x03 */ + U16 InitiatorDevHandle; /* 0x04 */ + U16 IoIndex; /* 0x06 */ +} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + Mpi2TargetCommandBufferReplyDescriptor_t, + MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t; + +/* defines for Flags field */ +#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) + + +/* RAID Accelerator Success Reply Descriptor */ +typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR +{ + U8 ReplyFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ + U32 Reserved; /* 0x04 */ +} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, + MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; + + +/* Fast Path SCSI IO Success Reply Descriptor */ +typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, + MPI2_POINTER pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; + + +/* PCIe Encapsulated Success Reply Descriptor */ +typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + MPI2_POINTER PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t, + MPI2_POINTER pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t; + + +/* union of Reply Descriptors */ +typedef union _MPI2_REPLY_DESCRIPTORS_UNION +{ + MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR PCIeEncapsulatedSuccess; + U64 Words; +} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, + Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; + + + +/***************************************************************************** +* +* Message Functions +* +*****************************************************************************/ + +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) /* SCSI Task Management */ +#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ +#define MPI2_FUNCTION_IOC_FACTS (0x03) /* IOC Facts */ +#define MPI2_FUNCTION_CONFIG (0x04) /* Configuration */ +#define MPI2_FUNCTION_PORT_FACTS (0x05) /* Port Facts */ +#define MPI2_FUNCTION_PORT_ENABLE (0x06) /* Port Enable */ +#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) /* Event Notification */ +#define MPI2_FUNCTION_EVENT_ACK (0x08) /* Event Acknowledge */ +#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) /* FW Download */ +#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) /* Target Assist */ +#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) /* Target Status Send */ +#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) /* Target Mode Abort */ +#define MPI2_FUNCTION_FW_UPLOAD (0x12) /* FW Upload */ +#define MPI2_FUNCTION_RAID_ACTION (0x15) /* RAID Action */ +#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) /* SCSI IO RAID Passthrough */ +#define MPI2_FUNCTION_TOOLBOX (0x17) /* Toolbox */ +#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) /* SCSI Enclosure Processor */ +#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) /* SMP Passthrough */ +#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) /* SAS IO Unit Control */ /* for MPI v2.5 and earlier */ +#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B) /* IO Unit Control */ /* for MPI v2.6 and later */ +#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) /* SATA Passthrough */ +#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) /* Diagnostic Buffer Post */ +#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */ +#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */ +#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */ +#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator */ +#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) /* Host Based Discovery Action */ +#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) /* Power Management Control */ +#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) /* Send Host Message */ +#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33) /* NVMe Encapsulated (MPI v2.6) */ +#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) /* beginning of product-specific range */ +#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) /* end of product-specific range */ + + + +/* Doorbell functions */ +#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) +#define MPI2_FUNCTION_HANDSHAKE (0x42) + + +/***************************************************************************** +* +* IOC Status Values +* +*****************************************************************************/ + +/* mask for IOCStatus status value */ +#define MPI2_IOCSTATUS_MASK (0x7FFF) + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + +#define MPI2_IOCSTATUS_SUCCESS (0x0000) +#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define MPI2_IOCSTATUS_BUSY (0x0002) +#define MPI2_IOCSTATUS_INVALID_SGL (0x0003) +#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define MPI2_IOCSTATUS_INVALID_VPID (0x0005) +#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) +#define MPI2_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) +#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) /* MPI v2.6 and later */ +#define MPI2_IOCSTATUS_FAILURE (0x000F) + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + +#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + +#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + +#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + +#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063) +#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + +#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + +#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) + +/**************************************************************************** +* RAID Accelerator values +****************************************************************************/ + +#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0) + +/**************************************************************************** +* IOCStatus flag to indicate that log info is available +****************************************************************************/ + +#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +/**************************************************************************** +* IOCLogInfo Types +****************************************************************************/ + +#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000) +#define MPI2_IOCLOGINFO_TYPE_SHIFT (28) +#define MPI2_IOCLOGINFO_TYPE_NONE (0x0) +#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1) +#define MPI2_IOCLOGINFO_TYPE_FC (0x2) +#define MPI2_IOCLOGINFO_TYPE_SAS (0x3) +#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4) +#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF) + + +/***************************************************************************** +* +* Standard Message Structures +* +*****************************************************************************/ + +/**************************************************************************** +* Request Message Header for all request messages +****************************************************************************/ + +typedef struct _MPI2_REQUEST_HEADER +{ + U16 FunctionDependent1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 FunctionDependent2; /* 0x04 */ + U8 FunctionDependent3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ +} MPI2_REQUEST_HEADER, MPI2_POINTER PTR_MPI2_REQUEST_HEADER, + MPI2RequestHeader_t, MPI2_POINTER pMPI2RequestHeader_t; + + +/**************************************************************************** +* Default Reply +****************************************************************************/ + +typedef struct _MPI2_DEFAULT_REPLY +{ + U16 FunctionDependent1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 FunctionDependent2; /* 0x04 */ + U8 FunctionDependent3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U16 FunctionDependent5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_DEFAULT_REPLY, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY, + MPI2DefaultReply_t, MPI2_POINTER pMPI2DefaultReply_t; + + +/* common version structure/union used in messages and configuration pages */ + +typedef struct _MPI2_VERSION_STRUCT +{ + U8 Dev; /* 0x00 */ + U8 Unit; /* 0x01 */ + U8 Minor; /* 0x02 */ + U8 Major; /* 0x03 */ +} MPI2_VERSION_STRUCT; + +typedef union _MPI2_VERSION_UNION +{ + MPI2_VERSION_STRUCT Struct; + U32 Word; +} MPI2_VERSION_UNION; + + +/* LUN field defines, common to many structures */ +#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_LEVEL_1_WORD (0xFF00) +#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00) + + +/***************************************************************************** +* +* Fusion-MPT MPI Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* MPI Simple Element structures +****************************************************************************/ + +typedef struct _MPI2_SGE_SIMPLE32 +{ + U32 FlagsLength; + U32 Address; +} MPI2_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_SGE_SIMPLE32, + Mpi2SGESimple32_t, MPI2_POINTER pMpi2SGESimple32_t; + +typedef struct _MPI2_SGE_SIMPLE64 +{ + U32 FlagsLength; + U64 Address; +} MPI2_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_SGE_SIMPLE64, + Mpi2SGESimple64_t, MPI2_POINTER pMpi2SGESimple64_t; + +typedef struct _MPI2_SGE_SIMPLE_UNION +{ + U32 FlagsLength; + union + { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION, + Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t; + + +/**************************************************************************** +* MPI Chain Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_CHAIN32 +{ + U16 Length; + U8 NextChainOffset; + U8 Flags; + U32 Address; +} MPI2_SGE_CHAIN32, MPI2_POINTER PTR_MPI2_SGE_CHAIN32, + Mpi2SGEChain32_t, MPI2_POINTER pMpi2SGEChain32_t; + +typedef struct _MPI2_SGE_CHAIN64 +{ + U16 Length; + U8 NextChainOffset; + U8 Flags; + U64 Address; +} MPI2_SGE_CHAIN64, MPI2_POINTER PTR_MPI2_SGE_CHAIN64, + Mpi2SGEChain64_t, MPI2_POINTER pMpi2SGEChain64_t; + +typedef struct _MPI2_SGE_CHAIN_UNION +{ + U16 Length; + U8 NextChainOffset; + U8 Flags; + union + { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION, + Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t; + + +/**************************************************************************** +* MPI Transaction Context Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANSACTION32 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[1]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION32, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION32, + Mpi2SGETransaction32_t, MPI2_POINTER pMpi2SGETransaction32_t; + +typedef struct _MPI2_SGE_TRANSACTION64 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[2]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION64, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION64, + Mpi2SGETransaction64_t, MPI2_POINTER pMpi2SGETransaction64_t; + +typedef struct _MPI2_SGE_TRANSACTION96 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[3]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION96, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION96, + Mpi2SGETransaction96_t, MPI2_POINTER pMpi2SGETransaction96_t; + +typedef struct _MPI2_SGE_TRANSACTION128 +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[4]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION128, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION128, + Mpi2SGETransaction_t128, MPI2_POINTER pMpi2SGETransaction_t128; + +typedef struct _MPI2_SGE_TRANSACTION_UNION +{ + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + union + { + U32 TransactionContext32[1]; + U32 TransactionContext64[2]; + U32 TransactionContext96[3]; + U32 TransactionContext128[4]; + } u; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION_UNION, + Mpi2SGETransactionUnion_t, MPI2_POINTER pMpi2SGETransactionUnion_t; + + +/**************************************************************************** +* MPI SGE union for IO SGL's - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_IO_UNION +{ + union + { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + } u; +} MPI2_MPI_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_IO_UNION, + Mpi2MpiSGEIOUnion_t, MPI2_POINTER pMpi2MpiSGEIOUnion_t; + + +/**************************************************************************** +* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION +{ + union + { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_SGE_TRANS_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANS_SIMPLE_UNION, + Mpi2SGETransSimpleUnion_t, MPI2_POINTER pMpi2SGETransSimpleUnion_t; + + +/**************************************************************************** +* All MPI SGE types union +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_UNION +{ + union + { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_MPI_SGE_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_UNION, + Mpi2MpiSgeUnion_t, MPI2_POINTER pMpi2MpiSgeUnion_t; + + +/**************************************************************************** +* MPI SGE field definition and masks +****************************************************************************/ + +/* Flags field bit definitions */ + +#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80) +#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40) +#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30) +#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08) +#define MPI2_SGE_FLAGS_DIRECTION (0x04) +#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02) +#define MPI2_SGE_FLAGS_END_OF_LIST (0x01) + +#define MPI2_SGE_FLAGS_SHIFT (24) + +#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF) +#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF) + +/* Element Type */ + +#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) /* for MPI v2.0 products only */ +#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) /* for MPI v2.0 products only */ +#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30) + +/* Address location */ + +#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00) + +/* Direction */ + +#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) +#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) + +#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) +#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) + +/* Address Size */ + +#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +/* Context Size */ + +#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00) +#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02) +#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04) +#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06) + +#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000) +#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16) + +/**************************************************************************** +* MPI SGE operation Macros +****************************************************************************/ + +/* SIMPLE FlagsLength manipulations... */ +#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) +#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) + +#define MPI2_SGE_SET_FLAGS_LENGTH(f,l) (MPI2_SGE_SET_FLAGS(f) | MPI2_SGE_LENGTH(l)) + +#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_SGE_SET_FLAGS_LENGTH(f,l) + +/* CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_SGE_SET_FLAGS(f) +#define MPI2_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_SGE_LENGTH(l) + +#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> MPI2_SGE_CHAIN_OFFSET_SHIFT) + + +/***************************************************************************** +* +* Fusion-MPT IEEE Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* IEEE Simple Element structures +****************************************************************************/ + +/* MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_SIMPLE32 +{ + U32 Address; + U32 FlagsLength; +} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32, + Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t; + +typedef struct _MPI2_IEEE_SGE_SIMPLE64 +{ + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64, + Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t; + +typedef union _MPI2_IEEE_SGE_SIMPLE_UNION +{ + MPI2_IEEE_SGE_SIMPLE32 Simple32; + MPI2_IEEE_SGE_SIMPLE64 Simple64; +} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION, + Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t; + + +/**************************************************************************** +* IEEE Chain Element structures +****************************************************************************/ + +/* MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; + +/* MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; + +typedef union _MPI2_IEEE_SGE_CHAIN_UNION +{ + MPI2_IEEE_SGE_CHAIN32 Chain32; + MPI2_IEEE_SGE_CHAIN64 Chain64; +} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION, + Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t; + +/* MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */ +typedef struct _MPI25_IEEE_SGE_CHAIN64 +{ + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64, + Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t; + + +/**************************************************************************** +* All IEEE SGE types union +****************************************************************************/ + +/* MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_UNION +{ + union + { + MPI2_IEEE_SGE_SIMPLE_UNION Simple; + MPI2_IEEE_SGE_CHAIN_UNION Chain; + } u; +} MPI2_IEEE_SGE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_UNION, + Mpi2IeeeSgeUnion_t, MPI2_POINTER pMpi2IeeeSgeUnion_t; + + +/**************************************************************************** +* IEEE SGE union for IO SGL's +****************************************************************************/ + +typedef union _MPI25_SGE_IO_UNION +{ + MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; + MPI25_IEEE_SGE_CHAIN64 IeeeChain; +} MPI25_SGE_IO_UNION, MPI2_POINTER PTR_MPI25_SGE_IO_UNION, + Mpi25SGEIOUnion_t, MPI2_POINTER pMpi25SGEIOUnion_t; + + +/**************************************************************************** +* IEEE SGE field definitions and masks +****************************************************************************/ + +/* Flags field bit definitions */ + +#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80) +#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40) + +#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24) + +#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF) + +/* Element Type */ + +#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) + +/* Next Segment Format */ + +#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) +#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) + +/* Data Location Address Space */ + +#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5 and later, use in IEEE Simple or Chain element */ +#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) /* use in IEEE Simple Element only */ +#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5, use in IEEE Simple or Chain element */ +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) /* use in MPI v2.0 IEEE Chain Element only */ +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */ + +#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02) /* for MPI v2.6 only */ + +/**************************************************************************** +* IEEE SGE operation Macros +****************************************************************************/ + +/* SIMPLE FlagsLength manipulations... */ +#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) >> MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) + +#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) | MPI2_IEEE32_SGE_LENGTH(l)) + +#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f,l) + +/* CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_IEEE32_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_IEEE32_SGE_SET_FLAGS(f) +#define MPI2_IEEE32_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_IEEE32_SGE_LENGTH(l) + + + +/***************************************************************************** +* +* Fusion-MPT MPI/IEEE Scatter Gather Unions +* +*****************************************************************************/ + +typedef union _MPI2_SIMPLE_SGE_UNION +{ + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; +} MPI2_SIMPLE_SGE_UNION, MPI2_POINTER PTR_MPI2_SIMPLE_SGE_UNION, + Mpi2SimpleSgeUntion_t, MPI2_POINTER pMpi2SimpleSgeUntion_t; + + +typedef union _MPI2_SGE_IO_UNION +{ + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_SGE_CHAIN_UNION MpiChain; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION, + Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t; + + +/**************************************************************************** +* +* Values for SGLFlags field, used in many request messages with an SGL +* +****************************************************************************/ + +/* values for MPI SGL Data Location Address Space subfield */ +#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C) +#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) +#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) +#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.5 and earlier */ +#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.6 */ +#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) /* only for MPI v2.5 and earlier */ +/* values for SGL Type subfield */ +#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) +#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) /* MPI v2.0 products only */ +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02) + + +#endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 71e905cb1c89..5ae3215464c6 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -1,3908 +1,3913 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_cnfg.h - * Title: MPI Configuration messages and pages - * Creation Date: November 10, 2006 - * - * mpi2_cnfg.h Version: 02.00.48 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. - * Added Manufacturing Page 11. - * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE - * define. - * 06-26-07 02.00.02 Adding generic structure for product-specific - * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. - * Rework of BIOS Page 2 configuration page. - * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the - * forms. - * Added configuration pages IOC Page 8 and Driver - * Persistent Mapping Page 0. - * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated - * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, - * RAID Physical Disk Pages 0 and 1, RAID Configuration - * Page 0). - * Added new value for AccessStatus field of SAS Device - * Page 0 (_SATA_NEEDS_INITIALIZATION). - * 10-31-07 02.00.04 Added missing SEPDevHandle field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for - * NVDATA. - * Modified IOC Page 7 to use masks and added field for - * SASBroadcastPrimitiveMasks. - * Added MPI2_CONFIG_PAGE_BIOS_4. - * Added MPI2_CONFIG_PAGE_LOG_0. - * 02-29-08 02.00.06 Modified various names to make them 32-character unique. - * Added SAS Device IDs. - * Updated Integrated RAID configuration pages including - * Manufacturing Page 4, IOC Page 6, and RAID Configuration - * Page 0. - * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. - * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. - * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. - * Added missing MaxNumRoutedSasAddresses field to - * MPI2_CONFIG_PAGE_EXPANDER_0. - * Added SAS Port Page 0. - * Modified structure layout for - * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. - * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use - * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. - * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF - * to 0x000000FF. - * Added two new values for the Physical Disk Coercion Size - * bits in the Flags field of Manufacturing Page 4. - * Added product-specific Manufacturing pages 16 to 31. - * Modified Flags bits for controlling write cache on SATA - * drives in IO Unit Page 1. - * Added new bit to AdditionalControlFlags of SAS IO Unit - * Page 1 to control Invalid Topology Correction. - * Added additional defines for RAID Volume Page 0 - * VolumeStatusFlags field. - * Modified meaning of RAID Volume Page 0 VolumeSettings - * define for auto-configure of hot-swap drives. - * Added SupportedPhysDisks field to RAID Volume Page 1 and - * added related defines. - * Added PhysDiskAttributes field (and related defines) to - * RAID Physical Disk Page 0. - * Added MPI2_SAS_PHYINFO_PHY_VACANT define. - * Added three new DiscoveryStatus bits for SAS IO Unit - * Page 0 and SAS Expander Page 0. - * Removed multiplexing information from SAS IO Unit pages. - * Added BootDeviceWaitTime field to SAS IO Unit Page 4. - * Removed Zone Address Resolved bit from PhyInfo and from - * Expander Page 0 Flags field. - * Added two new AccessStatus values to SAS Device Page 0 - * for indicating routing problems. Added 3 reserved words - * to this page. - * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. - * Inserted missing reserved field into structure for IOC - * Page 6. - * Added more pending task bits to RAID Volume Page 0 - * VolumeStatusFlags defines. - * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. - * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 - * and SAS Expander Page 0 to flag a downstream initiator - * when in simplified routing mode. - * Removed SATA Init Failure defines for DiscoveryStatus - * fields of SAS IO Unit Page 0 and SAS Expander Page 0. - * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. - * Added PortGroups, DmaGroup, and ControlGroup fields to - * SAS Device Page 0. - * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO - * Unit Page 6. - * Added expander reduced functionality data to SAS - * Expander Page 0. - * Added SAS PHY Page 2 and SAS PHY Page 3. - * 07-30-09 02.00.12 Added IO Unit Page 7. - * Added new device ids. - * Added SAS IO Unit Page 5. - * Added partial and slumber power management capable flags - * to SAS Device Page 0 Flags field. - * Added PhyInfo defines for power condition. - * Added Ethernet configuration pages. - * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. - * Added SAS PHY Page 4 structure and defines. - * 02-10-10 02.00.14 Modified the comments for the configuration page - * structures that contain an array of data. The host - * should use the "count" field in the page data (e.g. the - * NumPhys field) to determine the number of valid elements - * in the array. - * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. - * Added PowerManagementCapabilities to IO Unit Page 7. - * Added PortWidthModGroup field to - * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. - * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT - * define. - * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. - * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. - * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) - * defines. - * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to - * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for - * the Pinout field. - * Added BoardTemperature and BoardTemperatureUnits fields - * to MPI2_CONFIG_PAGE_IO_UNIT_7. - * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define - * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. - * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. - * Added IO Unit Page 8, IO Unit Page 9, - * and IO Unit Page 10. - * Added SASNotifyPrimitiveMasks field to - * MPI2_CONFIG_PAGE_IOC_7. - * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). - * 05-25-11 02.00.20 Cleaned up a few comments. - * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities - * for PCIe link as obsolete. - * Added SpinupFlags field containing a Disable Spin-up bit - * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO - * Unit Page 4. - * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. - * Added UEFIVersion field to BIOS Page 1 and defined new - * BiosOptions bits. - * Incorporating additions for MPI v2.5. - * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. - * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. - * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as - * obsolete for MPI v2.5 and later. - * Added some defines for 12G SAS speeds. - * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. - * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to - * match the specification. - * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for - * future use. - * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for - * MPI2_CONFIG_PAGE_MAN_7. - * Added EnclosureLevel and ConnectorName fields to - * MPI2_CONFIG_PAGE_SAS_DEV_0. - * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for - * MPI2_CONFIG_PAGE_SAS_DEV_0. - * Added EnclosureLevel field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 01-08-14 02.00.28 Added more defines for the BiosOptions field of - * MPI2_CONFIG_PAGE_BIOS_1. - * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and - * more defines for the BiosOptions field. - * 11-18-14 02.00.30 Updated copyright information. - * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. - * Added AdapterOrderAux fields to BIOS Page 3. - * 03-16-15 02.00.31 Updated for MPI v2.6. - * Added BoardPowerRequirement, PCISlotPowerAllocation, and - * Flags field to IO Unit Page 7. - * Added IO Unit Page 11. - * Added new SAS Phy Event codes - * Added PCIe configuration pages. - * 03-19-15 02.00.32 Fixed PCIe Link Config page structure names to be - * unique in first 32 characters. - * 05-25-15 02.00.33 Added more defines for the BiosOptions field of - * MPI2_CONFIG_PAGE_BIOS_1. - * 08-25-15 02.00.34 Added PCIe Device Page 2 SGL format capability. - * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. - * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. - * Added Link field to PCIe Link Pages - * Added EnclosureLevel and ConnectorName to PCIe - * Device Page 0. - * Added define for PCIE IoUnit page 1 max rate shift. - * Added comment for reserved ExtPageTypes. - * Added SAS 4 22.5 gbs speed support. - * Added PCIe 4 16.0 GT/sec speec support. - * Removed AHCI support. - * Removed SOP support. - * Added NegotiatedLinkRate and NegotiatedPortWidth to - * PCIe device page 0. - * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines - * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. - * Changed declaration of ConnectorName in PCIe DevicePage0 - * to match SAS DevicePage 0. - * Added SATADeviceWaitTime to IO Unit Page 11. - * Added MPI26_MFGPAGE_DEVID_SAS4008 - * Added x16 PCIe width to IO Unit Page 7 - * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 - * phy data. - * Added InitStatus to PCIe IO Unit Page 1 header. - * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. - * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and - * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. - * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. - * Added ChassisSlot field to SAS Enclosure Page 0. - * Added ChassisSlot Valid bit (bit 5) to the Flags field - * in SAS Enclosure Page 0. - * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and - * MPI26_MFGPAGE_DEVID_SAS3916 defines. - * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. - * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. - * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to - * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. - * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to - * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. - * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. - * Added NOIOB field to PCIe Device Page 2. - * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to - * the Capabilities field of PCIe Device Page 2. - * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816. - * Added WRiteCache defines to IO Unit Page 1. - * Added MaxEnclosureLevel to BIOS Page 1. - * Added OEMRD to SAS Enclosure Page 1. - * Added DMDReportPCIe to PCIe IO Unit Page 1. - * Added Flags field and flags for Retimers to - * PCIe Switch Page 1. - * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. - * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 - * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 - * Added DMDReport Delay Time defines to PCIeIOUnitPage1 - * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. - * 06-24-19 02.00.48 Add DEVID for PCI Switch and MPI endpoints. - * Add Man7 flag for Connector Lane. - * Modify NVMe WriteCache values. - * Add ConfigSource to PCIeIOUnit0. - * Add various PCI id info to PCIeDevice2. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_CNFG_H -#define MPI2_CNFG_H - -/***************************************************************************** -* Configuration Page Header and defines -*****************************************************************************/ - -/* Config Page Header */ -typedef struct _MPI2_CONFIG_PAGE_HEADER -{ - U8 PageVersion; /* 0x00 */ - U8 PageLength; /* 0x01 */ - U8 PageNumber; /* 0x02 */ - U8 PageType; /* 0x03 */ -} MPI2_CONFIG_PAGE_HEADER, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER, - Mpi2ConfigPageHeader_t, MPI2_POINTER pMpi2ConfigPageHeader_t; - -typedef union _MPI2_CONFIG_PAGE_HEADER_UNION -{ - MPI2_CONFIG_PAGE_HEADER Struct; - U8 Bytes[4]; - U16 Word16[2]; - U32 Word32; -} MPI2_CONFIG_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER_UNION, - Mpi2ConfigPageHeaderUnion, MPI2_POINTER pMpi2ConfigPageHeaderUnion; - -/* Extended Config Page Header */ -typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER -{ - U8 PageVersion; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 PageNumber; /* 0x02 */ - U8 PageType; /* 0x03 */ - U16 ExtPageLength; /* 0x04 */ - U8 ExtPageType; /* 0x06 */ - U8 Reserved2; /* 0x07 */ -} MPI2_CONFIG_EXTENDED_PAGE_HEADER, - MPI2_POINTER PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, - Mpi2ConfigExtendedPageHeader_t, MPI2_POINTER pMpi2ConfigExtendedPageHeader_t; - -typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION -{ - MPI2_CONFIG_PAGE_HEADER Struct; - MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; - U8 Bytes[8]; - U16 Word16[4]; - U32 Word32[2]; -} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, - Mpi2ConfigPageExtendedHeaderUnion, MPI2_POINTER pMpi2ConfigPageExtendedHeaderUnion; - - -/* PageType field values */ -#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00) -#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10) -#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20) -#define MPI2_CONFIG_PAGEATTR_MASK (0xF0) - -#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00) -#define MPI2_CONFIG_PAGETYPE_IOC (0x01) -#define MPI2_CONFIG_PAGETYPE_BIOS (0x02) -#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08) -#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09) -#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) -#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F) -#define MPI2_CONFIG_PAGETYPE_MASK (0x0F) - -#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF) - - -/* ExtPageType field values */ -#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) -#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) -#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) -#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) -#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14) -#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) -#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) -#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) -#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) -#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) -#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B) /* MPI v2.6 and later */ -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C) /* MPI v2.6 and later */ -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D) /* MPI v2.6 and later */ -#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E) /* MPI v2.6 and later */ -/* Product specific reserved values 0xE0 - 0xEF */ -/* Vendor specific reserved values 0xF0 - 0xFF */ - - -/***************************************************************************** -* PageAddress defines -*****************************************************************************/ - -/* RAID Volume PageAddress format */ -#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000) -#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) -#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) - -#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF) - - -/* RAID Physical Disk PageAddress format */ -#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000) -#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) -#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) -#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000) - -#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) -#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF) - - -/* SAS Expander PageAddress format */ -#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000) -#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) -#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) -#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) - -#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF) -#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000) -#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) - - -/* SAS Device PageAddress format */ -#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000) -#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) -#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) - -#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) - - -/* SAS PHY PageAddress format */ -#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000) -#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) -#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000) - -#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF) -#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF) - - -/* SAS Port PageAddress format */ -#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000) -#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) -#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) - -#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF) - - -/* SAS Enclosure PageAddress format */ -#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000) -#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) -#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) - -#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) - -/* Enclosure PageAddress format */ -#define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000) -#define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) -#define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000) - -#define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) - -/* RAID Configuration PageAddress format */ -#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) -#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) -#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000) -#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000) - -#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF) - - -/* Driver Persistent Mapping PageAddress format */ -#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000) -#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000) - -#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000) -#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16) -#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) - - -/* Ethernet PageAddress format */ -#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) -#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) - -#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) - - -/* PCIe Switch PageAddress format */ -#define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000) -#define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000) -#define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000) -#define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000) - -#define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF) -#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000) -#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16) - - -/* PCIe Device PageAddress format */ -#define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000) -#define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) -#define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000) - -#define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) - -/* PCIe Link PageAddress format */ -#define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000) -#define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000) -#define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000) - -#define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF) - - - -/**************************************************************************** -* Configuration messages -****************************************************************************/ - -/* Configuration Request Message */ -typedef struct _MPI2_CONFIG_REQUEST -{ - U8 Action; /* 0x00 */ - U8 SGLFlags; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 ExtPageLength; /* 0x04 */ - U8 ExtPageType; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U8 Reserved2; /* 0x0C */ - U8 ProxyVF_ID; /* 0x0D */ - U16 Reserved4; /* 0x0E */ - U32 Reserved3; /* 0x10 */ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ - U32 PageAddress; /* 0x18 */ - MPI2_SGE_IO_UNION PageBufferSGE; /* 0x1C */ -} MPI2_CONFIG_REQUEST, MPI2_POINTER PTR_MPI2_CONFIG_REQUEST, - Mpi2ConfigRequest_t, MPI2_POINTER pMpi2ConfigRequest_t; - -/* values for the Action field */ -#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00) -#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) -#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) -#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03) -#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) -#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05) -#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) -#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) - -/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ - - -/* Config Reply Message */ -typedef struct _MPI2_CONFIG_REPLY -{ - U8 Action; /* 0x00 */ - U8 SGLFlags; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 ExtPageLength; /* 0x04 */ - U8 ExtPageType; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U16 Reserved2; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ -} MPI2_CONFIG_REPLY, MPI2_POINTER PTR_MPI2_CONFIG_REPLY, - Mpi2ConfigReply_t, MPI2_POINTER pMpi2ConfigReply_t; - - - -/***************************************************************************** -* -* C o n f i g u r a t i o n P a g e s -* -*****************************************************************************/ - -/**************************************************************************** -* Manufacturing Config pages -****************************************************************************/ - -#define MPI2_MFGPAGE_VENDORID_LSI (0x1000) - -/* MPI v2.0 SAS products */ -#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070) -#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072) -#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074) -#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076) -#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) -#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) -#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) - -#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E) - -#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) -#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) -#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) -#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) -#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) -#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) -#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) -#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) -#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) - -/* MPI v2.5 SAS products */ -#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) -#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097) -#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090) -#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091) -#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094) -#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095) - -/* MPI v2.6 SAS Products */ -#define MPI26_MFGPAGE_DEVID_SAS3216 (0x00C9) -#define MPI26_MFGPAGE_DEVID_SAS3224 (0x00C4) -#define MPI26_MFGPAGE_DEVID_SAS3316_1 (0x00C5) -#define MPI26_MFGPAGE_DEVID_SAS3316_2 (0x00C6) -#define MPI26_MFGPAGE_DEVID_SAS3316_3 (0x00C7) -#define MPI26_MFGPAGE_DEVID_SAS3316_4 (0x00C8) -#define MPI26_MFGPAGE_DEVID_SAS3324_1 (0x00C0) -#define MPI26_MFGPAGE_DEVID_SAS3324_2 (0x00C1) -#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2) -#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3) - -#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA) -#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB) -#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC) -#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD) -#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE) -#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF) - -#define MPI26_MFGPAGE_DEVID_PEX88000 (0x00B2) - -#define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0) -#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1) -#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2) - -#define MPI26_MFGPAGE_DEVID_SEC_MASK_3916 (0x0003) -#define MPI26_MFGPAGE_DEVID_INVALID0_3916 (0x00E0) -#define MPI26_MFGPAGE_DEVID_CFG_SEC_3916 (0x00E1) -#define MPI26_MFGPAGE_DEVID_HARD_SEC_3916 (0x00E2) -#define MPI26_MFGPAGE_DEVID_INVALID1_3916 (0x00E3) - -#define MPI26_MFGPAGE_DEVID_SEC_MASK_3816 (0x0003) -#define MPI26_MFGPAGE_DEVID_INVALID0_3816 (0x00E4) -#define MPI26_MFGPAGE_DEVID_CFG_SEC_3816 (0x00E5) -#define MPI26_MFGPAGE_DEVID_HARD_SEC_3816 (0x00E6) -#define MPI26_MFGPAGE_DEVID_INVALID1_3816 (0x00E7) - -#define MPI26_MFGPAGE_DEVID_SWCH_MPI_EP (0x02B0) -#define MPI26_MFGPAGE_DEVID_SWCH_MPI_EP_1 (0x02B1) - - -/* Manufacturing Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_MAN_0 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 ChipName[16]; /* 0x04 */ - U8 ChipRevision[8]; /* 0x14 */ - U8 BoardName[16]; /* 0x1C */ - U8 BoardAssembly[16]; /* 0x2C */ - U8 BoardTracerNumber[16]; /* 0x3C */ -} MPI2_CONFIG_PAGE_MAN_0, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_0, - Mpi2ManufacturingPage0_t, MPI2_POINTER pMpi2ManufacturingPage0_t; - -#define MPI2_MANUFACTURING0_PAGEVERSION (0x00) - - -/* Manufacturing Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_MAN_1 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 VPD[256]; /* 0x04 */ -} MPI2_CONFIG_PAGE_MAN_1, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_1, - Mpi2ManufacturingPage1_t, MPI2_POINTER pMpi2ManufacturingPage1_t; - -#define MPI2_MANUFACTURING1_PAGEVERSION (0x00) - - -typedef struct _MPI2_CHIP_REVISION_ID -{ - U16 DeviceID; /* 0x00 */ - U8 PCIRevisionID; /* 0x02 */ - U8 Reserved; /* 0x03 */ -} MPI2_CHIP_REVISION_ID, MPI2_POINTER PTR_MPI2_CHIP_REVISION_ID, - Mpi2ChipRevisionId_t, MPI2_POINTER pMpi2ChipRevisionId_t; - - -/* Manufacturing Page 2 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength at runtime. - */ -#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS -#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_MAN_2 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */ - U32 HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 0x08 */ -} MPI2_CONFIG_PAGE_MAN_2, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_2, - Mpi2ManufacturingPage2_t, MPI2_POINTER pMpi2ManufacturingPage2_t; - -#define MPI2_MANUFACTURING2_PAGEVERSION (0x00) - - -/* Manufacturing Page 3 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check Header.PageLength at runtime. - */ -#ifndef MPI2_MAN_PAGE_3_INFO_WORDS -#define MPI2_MAN_PAGE_3_INFO_WORDS (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_MAN_3 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */ - U32 Info[MPI2_MAN_PAGE_3_INFO_WORDS];/* 0x08 */ -} MPI2_CONFIG_PAGE_MAN_3, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_3, - Mpi2ManufacturingPage3_t, MPI2_POINTER pMpi2ManufacturingPage3_t; - -#define MPI2_MANUFACTURING3_PAGEVERSION (0x00) - - -/* Manufacturing Page 4 */ - -typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS -{ - U8 PowerSaveFlags; /* 0x00 */ - U8 InternalOperationsSleepTime; /* 0x01 */ - U8 InternalOperationsRunTime; /* 0x02 */ - U8 HostIdleTime; /* 0x03 */ -} MPI2_MANPAGE4_PWR_SAVE_SETTINGS, - MPI2_POINTER PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, - Mpi2ManPage4PwrSaveSettings_t, MPI2_POINTER pMpi2ManPage4PwrSaveSettings_t; - -/* defines for the PowerSaveFlags field */ -#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03) -#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00) -#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01) -#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02) - -typedef struct _MPI2_CONFIG_PAGE_MAN_4 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 Flags; /* 0x08 */ - U8 InquirySize; /* 0x0C */ - U8 Reserved2; /* 0x0D */ - U16 Reserved3; /* 0x0E */ - U8 InquiryData[56]; /* 0x10 */ - U32 RAID0VolumeSettings; /* 0x48 */ - U32 RAID1EVolumeSettings; /* 0x4C */ - U32 RAID1VolumeSettings; /* 0x50 */ - U32 RAID10VolumeSettings; /* 0x54 */ - U32 Reserved4; /* 0x58 */ - U32 Reserved5; /* 0x5C */ - MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /* 0x60 */ - U8 MaxOCEDisks; /* 0x64 */ - U8 ResyncRate; /* 0x65 */ - U16 DataScrubDuration; /* 0x66 */ - U8 MaxHotSpares; /* 0x68 */ - U8 MaxPhysDisksPerVol; /* 0x69 */ - U8 MaxPhysDisks; /* 0x6A */ - U8 MaxVolumes; /* 0x6B */ -} MPI2_CONFIG_PAGE_MAN_4, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_4, - Mpi2ManufacturingPage4_t, MPI2_POINTER pMpi2ManufacturingPage4_t; - -#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A) - -/* Manufacturing Page 4 Flags field */ -#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000) -#define MPI2_MANPAGE4_METADATA_512MB (0x00000000) - -#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000) -#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000) -#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000) - -#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00) -#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000) -#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400) -#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800) -#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00) - -#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300) -#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000) -#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100) -#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200) - -#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080) -#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040) -#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020) -#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010) -#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008) -#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004) -#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002) -#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001) - - -/* Manufacturing Page 5 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES -#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) -#endif - -typedef struct _MPI2_MANUFACTURING5_ENTRY -{ - U64 WWID; /* 0x00 */ - U64 DeviceName; /* 0x08 */ -} MPI2_MANUFACTURING5_ENTRY, MPI2_POINTER PTR_MPI2_MANUFACTURING5_ENTRY, - Mpi2Manufacturing5Entry_t, MPI2_POINTER pMpi2Manufacturing5Entry_t; - -typedef struct _MPI2_CONFIG_PAGE_MAN_5 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 NumPhys; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - U32 Reserved3; /* 0x08 */ - U32 Reserved4; /* 0x0C */ - MPI2_MANUFACTURING5_ENTRY Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/* 0x08 */ -} MPI2_CONFIG_PAGE_MAN_5, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_5, - Mpi2ManufacturingPage5_t, MPI2_POINTER pMpi2ManufacturingPage5_t; - -#define MPI2_MANUFACTURING5_PAGEVERSION (0x03) - - -/* Manufacturing Page 6 */ - -typedef struct _MPI2_CONFIG_PAGE_MAN_6 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 ProductSpecificInfo;/* 0x04 */ -} MPI2_CONFIG_PAGE_MAN_6, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_6, - Mpi2ManufacturingPage6_t, MPI2_POINTER pMpi2ManufacturingPage6_t; - -#define MPI2_MANUFACTURING6_PAGEVERSION (0x00) - - -/* Manufacturing Page 7 */ - -typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO -{ - U32 Pinout; /* 0x00 */ - U8 Connector[16]; /* 0x04 */ - U8 Location; /* 0x14 */ - U8 ReceptacleID; /* 0x15 */ - U16 Slot; /* 0x16 */ - U16 Slotx2; /* 0x18 */ - U16 Slotx4; /* 0x1A */ -} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO, - Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t; - -/* defines for the Pinout field */ -#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) -#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) - -#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF) -#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00) -#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01) -#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02) -#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03) -#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04) -#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05) -#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06) -#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07) -#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08) -#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09) -#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A) -#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) -#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) -#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) -#define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E) -#define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F) -#define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10) -#define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11) -#define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12) -#define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13) - -/* defines for the Location field */ -#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) -#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02) -#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04) -#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08) -#define MPI2_MANPAGE7_LOCATION_AUTO (0x10) -#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) -#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) - -/* defines for the Slot field */ -#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX -#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_MAN_7 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 Reserved2; /* 0x08 */ - U32 Flags; /* 0x0C */ - U8 EnclosureName[16]; /* 0x10 */ - U8 NumPhys; /* 0x20 */ - U8 Reserved3; /* 0x21 */ - U16 Reserved4; /* 0x22 */ - MPI2_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /* 0x24 */ -} MPI2_CONFIG_PAGE_MAN_7, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7, - Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t; - -#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) - -/* defines for the Flags field */ -#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) -#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) -#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) - -#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020) - -/* - * Generic structure to use for product-specific manufacturing pages - * (currently Manufacturing Page 8 through Manufacturing Page 31). - */ - -typedef struct _MPI2_CONFIG_PAGE_MAN_PS -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 ProductSpecificInfo;/* 0x04 */ -} MPI2_CONFIG_PAGE_MAN_PS, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_PS, - Mpi2ManufacturingPagePS_t, MPI2_POINTER pMpi2ManufacturingPagePS_t; - -#define MPI2_MANUFACTURING8_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING9_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING10_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING11_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING12_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING13_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING14_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING15_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING16_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING17_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING18_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING19_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING20_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING21_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING22_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING23_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING24_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING25_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING26_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING27_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING28_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING29_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING30_PAGEVERSION (0x00) -#define MPI2_MANUFACTURING31_PAGEVERSION (0x00) - - -/**************************************************************************** -* IO Unit Config Pages -****************************************************************************/ - -/* IO Unit Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U64 UniqueValue; /* 0x04 */ - MPI2_VERSION_UNION NvdataVersionDefault; /* 0x08 */ - MPI2_VERSION_UNION NvdataVersionPersistent; /* 0x0A */ -} MPI2_CONFIG_PAGE_IO_UNIT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, - Mpi2IOUnitPage0_t, MPI2_POINTER pMpi2IOUnitPage0_t; - -#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02) - - -/* IO Unit Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Flags; /* 0x04 */ -} MPI2_CONFIG_PAGE_IO_UNIT_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, - Mpi2IOUnitPage1_t, MPI2_POINTER pMpi2IOUnitPage1_t; - -#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) - -/* IO Unit Page 1 Flags defines */ -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000) -#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) -#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) -#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) -#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) -#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) -#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) -#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) -#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) -#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) -#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) -#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) -#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) -#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) - - -/* IO Unit Page 3 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for GPIOCount at runtime. - */ -#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX -#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 GPIOCount; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - U16 GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/* 0x08 */ -} MPI2_CONFIG_PAGE_IO_UNIT_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, - Mpi2IOUnitPage3_t, MPI2_POINTER pMpi2IOUnitPage3_t; - -#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01) - -/* defines for IO Unit Page 3 GPIOVal field */ -#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC) -#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) -#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000) -#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) - - -/* IO Unit Page 5 */ - -/* - * Upper layer code (drivers, utilities, etc.) should leave this define set to - * one and check the value returned for NumDmaEngines at runtime. - */ -#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES -#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U64 RaidAcceleratorBufferBaseAddress; /* 0x04 */ - U64 RaidAcceleratorBufferSize; /* 0x0C */ - U64 RaidAcceleratorControlBaseAddress; /* 0x14 */ - U8 RAControlSize; /* 0x1C */ - U8 NumDmaEngines; /* 0x1D */ - U8 RAMinControlSize; /* 0x1E */ - U8 RAMaxControlSize; /* 0x1F */ - U32 Reserved1; /* 0x20 */ - U32 Reserved2; /* 0x24 */ - U32 Reserved3; /* 0x28 */ - U32 DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /* 0x2C */ -} MPI2_CONFIG_PAGE_IO_UNIT_5, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, - Mpi2IOUnitPage5_t, MPI2_POINTER pMpi2IOUnitPage5_t; - -#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) - -/* defines for IO Unit Page 5 DmaEngineCapabilities field */ -#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) -#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) - -#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) -#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004) -#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002) -#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001) - - -/* IO Unit Page 6 */ - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U16 Flags; /* 0x04 */ - U8 RAHostControlSize; /* 0x06 */ - U8 Reserved0; /* 0x07 */ - U64 RaidAcceleratorHostControlBaseAddress; /* 0x08 */ - U32 Reserved1; /* 0x10 */ - U32 Reserved2; /* 0x14 */ - U32 Reserved3; /* 0x18 */ -} MPI2_CONFIG_PAGE_IO_UNIT_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, - Mpi2IOUnitPage6_t, MPI2_POINTER pMpi2IOUnitPage6_t; - -#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00) - -/* defines for IO Unit Page 6 Flags field */ -#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) - - -/* IO Unit Page 7 */ - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 CurrentPowerMode; /* 0x04 */ /* reserved in MPI 2.0 */ - U8 PreviousPowerMode; /* 0x05 */ /* reserved in MPI 2.0 */ - U8 PCIeWidth; /* 0x06 */ - U8 PCIeSpeed; /* 0x07 */ - U32 ProcessorState; /* 0x08 */ - U32 PowerManagementCapabilities; /* 0x0C */ - U16 IOCTemperature; /* 0x10 */ - U8 IOCTemperatureUnits; /* 0x12 */ - U8 IOCSpeed; /* 0x13 */ - U16 BoardTemperature; /* 0x14 */ - U8 BoardTemperatureUnits; /* 0x16 */ - U8 Reserved3; /* 0x17 */ - U32 BoardPowerRequirement; /* 0x18 */ /* reserved prior to MPI v2.6 */ - U32 PCISlotPowerAllocation; /* 0x1C */ /* reserved prior to MPI v2.6 */ - U8 Flags; /* 0x20 */ /* reserved prior to MPI v2.6 */ - U8 Reserved6; /* 0x21 */ - U16 Reserved7; /* 0x22 */ - U32 Reserved8; /* 0x24 */ -} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, - Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; - -#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) - -/* defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ -#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) -#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00) -#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40) -#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80) -#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0) - -#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07) -#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00) -#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01) -#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04) -#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05) -#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06) - - -/* defines for IO Unit Page 7 PCIeWidth field */ -#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) -#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) -#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) -#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) -#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10) - -/* defines for IO Unit Page 7 PCIeSpeed field */ -#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) -#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) -#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) -#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03) - -/* defines for IO Unit Page 7 ProcessorState field */ -#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) -#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) - -#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) -#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) -#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) - -/* defines for IO Unit Page 7 PowerManagementCapabilities field */ -#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000) -#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000) -#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000) -#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000) -#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000) -#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000) -#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000) -#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000) -#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000) -#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400) -#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200) -#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100) -#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040) -#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020) -#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010) -#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) /* obsolete */ -#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) /* obsolete */ -#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) /* obsolete */ -#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) /* obsolete */ - -/* obsolete names for the PowerManagementCapabilities bits (above) */ -#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) -#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) -#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) -#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */ -#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */ - - -/* defines for IO Unit Page 7 IOCTemperatureUnits field */ -#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) -#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) -#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) - -/* defines for IO Unit Page 7 IOCSpeed field */ -#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) -#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) -#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) -#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) - -/* defines for IO Unit Page 7 BoardTemperatureUnits field */ -#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) -#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) -#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) - -/* defines for IO Unit Page 7 Flags field */ -#define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) - - -/* IO Unit Page 8 */ - -#define MPI2_IOUNIT8_NUM_THRESHOLDS (4) - -typedef struct _MPI2_IOUNIT8_SENSOR -{ - U16 Flags; /* 0x00 */ - U16 Reserved1; /* 0x02 */ - U16 Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */ - U32 Reserved2; /* 0x0C */ - U32 Reserved3; /* 0x10 */ - U32 Reserved4; /* 0x14 */ -} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR, - Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t; - -/* defines for IO Unit Page 8 Sensor Flags field */ -#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) -#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) -#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) -#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumSensors at runtime. - */ -#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES -#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 Reserved2; /* 0x08 */ - U8 NumSensors; /* 0x0C */ - U8 PollingInterval; /* 0x0D */ - U16 Reserved3; /* 0x0E */ - MPI2_IOUNIT8_SENSOR Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */ -} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, - Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t; - -#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) - - -/* IO Unit Page 9 */ - -typedef struct _MPI2_IOUNIT9_SENSOR -{ - U16 CurrentTemperature; /* 0x00 */ - U16 Reserved1; /* 0x02 */ - U8 Flags; /* 0x04 */ - U8 Reserved2; /* 0x05 */ - U16 Reserved3; /* 0x06 */ - U32 Reserved4; /* 0x08 */ - U32 Reserved5; /* 0x0C */ -} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR, - Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t; - -/* defines for IO Unit Page 9 Sensor Flags field */ -#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumSensors at runtime. - */ -#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES -#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 Reserved2; /* 0x08 */ - U8 NumSensors; /* 0x0C */ - U8 Reserved4; /* 0x0D */ - U16 Reserved3; /* 0x0E */ - MPI2_IOUNIT9_SENSOR Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */ -} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, - Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t; - -#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) - - -/* IO Unit Page 10 */ - -typedef struct _MPI2_IOUNIT10_FUNCTION -{ - U8 CreditPercent; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ -} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION, - Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t; - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumFunctions at runtime. - */ -#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES -#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 NumFunctions; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - U32 Reserved3; /* 0x08 */ - U32 Reserved4; /* 0x0C */ - MPI2_IOUNIT10_FUNCTION Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES]; /* 0x10 */ -} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, - Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t; - -#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) - - -/* IO Unit Page 11 (for MPI v2.6 and later) */ - -typedef struct _MPI26_IOUNIT11_SPINUP_GROUP -{ - U8 MaxTargetSpinup; /* 0x00 */ - U8 SpinupDelay; /* 0x01 */ - U8 SpinupFlags; /* 0x02 */ - U8 Reserved1; /* 0x03 */ -} MPI26_IOUNIT11_SPINUP_GROUP, MPI2_POINTER PTR_MPI26_IOUNIT11_SPINUP_GROUP, - Mpi26IOUnit11SpinupGroup_t, MPI2_POINTER pMpi26IOUnit11SpinupGroup_t; - -/* defines for IO Unit Page 11 SpinupFlags */ -#define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01) - - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * four and check the value returned for NumPhys at runtime. - */ -#ifndef MPI26_IOUNITPAGE11_PHY_MAX -#define MPI26_IOUNITPAGE11_PHY_MAX (4) -#endif - -typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */ - U32 Reserved2; /* 0x18 */ - U32 Reserved3; /* 0x1C */ - U32 Reserved4; /* 0x20 */ - U8 BootDeviceWaitTime; /* 0x24 */ - U8 SATADeviceWaitTime; /* 0x25 */ - U8 PCIeWaitTime; /* 0x26 */ - U8 Reserved6; /* 0x27 */ - U8 NumPhys; /* 0x28 */ - U8 PEInitialSpinupDelay; /* 0x29 */ - U8 PEReplyDelay; /* 0x2A */ - U8 Flags; /* 0x2B */ - U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/* 0x2C */ -} MPI26_CONFIG_PAGE_IO_UNIT_11, - MPI2_POINTER PTR_MPI26_CONFIG_PAGE_IO_UNIT_11, - Mpi26IOUnitPage11_t, MPI2_POINTER pMpi26IOUnitPage11_t; - -#define MPI26_IOUNITPAGE11_PAGEVERSION (0x00) - -/* defines for Flags field */ -#define MPI26_IOUNITPAGE11_FLAGS_AUTO_PORTENABLE (0x01) - -/* defines for PHY field */ -#define MPI26_IOUNITPAGE11_PHY_SPINUP_GROUP_MASK (0x03) - - - -/**************************************************************************** -* IOC Config Pages -****************************************************************************/ - -/* IOC Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_IOC_0 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 Reserved2; /* 0x08 */ - U16 VendorID; /* 0x0C */ - U16 DeviceID; /* 0x0E */ - U8 RevisionID; /* 0x10 */ - U8 Reserved3; /* 0x11 */ - U16 Reserved4; /* 0x12 */ - U32 ClassCode; /* 0x14 */ - U16 SubsystemVendorID; /* 0x18 */ - U16 SubsystemID; /* 0x1A */ -} MPI2_CONFIG_PAGE_IOC_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_0, - Mpi2IOCPage0_t, MPI2_POINTER pMpi2IOCPage0_t; - -#define MPI2_IOCPAGE0_PAGEVERSION (0x02) - - -/* IOC Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_IOC_1 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Flags; /* 0x04 */ - U32 CoalescingTimeout; /* 0x08 */ - U8 CoalescingDepth; /* 0x0C */ - U8 PCISlotNum; /* 0x0D */ - U8 PCIBusNum; /* 0x0E */ - U8 PCIDomainSegment; /* 0x0F */ - U32 Reserved1; /* 0x10 */ - U32 ProductSpecific; /* 0x14 */ -} MPI2_CONFIG_PAGE_IOC_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_1, - Mpi2IOCPage1_t, MPI2_POINTER pMpi2IOCPage1_t; - -#define MPI2_IOCPAGE1_PAGEVERSION (0x05) - -/* defines for IOC Page 1 Flags field */ -#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001) - -#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF) -#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF) -#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF) - -/* IOC Page 6 */ - -typedef struct _MPI2_CONFIG_PAGE_IOC_6 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 CapabilitiesFlags; /* 0x04 */ - U8 MaxDrivesRAID0; /* 0x08 */ - U8 MaxDrivesRAID1; /* 0x09 */ - U8 MaxDrivesRAID1E; /* 0x0A */ - U8 MaxDrivesRAID10; /* 0x0B */ - U8 MinDrivesRAID0; /* 0x0C */ - U8 MinDrivesRAID1; /* 0x0D */ - U8 MinDrivesRAID1E; /* 0x0E */ - U8 MinDrivesRAID10; /* 0x0F */ - U32 Reserved1; /* 0x10 */ - U8 MaxGlobalHotSpares; /* 0x14 */ - U8 MaxPhysDisks; /* 0x15 */ - U8 MaxVolumes; /* 0x16 */ - U8 MaxConfigs; /* 0x17 */ - U8 MaxOCEDisks; /* 0x18 */ - U8 Reserved2; /* 0x19 */ - U16 Reserved3; /* 0x1A */ - U32 SupportedStripeSizeMapRAID0; /* 0x1C */ - U32 SupportedStripeSizeMapRAID1E; /* 0x20 */ - U32 SupportedStripeSizeMapRAID10; /* 0x24 */ - U32 Reserved4; /* 0x28 */ - U32 Reserved5; /* 0x2C */ - U16 DefaultMetadataSize; /* 0x30 */ - U16 Reserved6; /* 0x32 */ - U16 MaxBadBlockTableEntries; /* 0x34 */ - U16 Reserved7; /* 0x36 */ - U32 IRNvsramVersion; /* 0x38 */ -} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6, - Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t; - -#define MPI2_IOCPAGE6_PAGEVERSION (0x05) - -/* defines for IOC Page 6 CapabilitiesFlags */ -#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020) -#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) -#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) -#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004) -#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002) -#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) - - -/* IOC Page 7 */ - -#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4) - -typedef struct _MPI2_CONFIG_PAGE_IOC_7 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */ - U16 SASBroadcastPrimitiveMasks; /* 0x18 */ - U16 SASNotifyPrimitiveMasks; /* 0x1A */ - U32 Reserved3; /* 0x1C */ -} MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7, - Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t; - -#define MPI2_IOCPAGE7_PAGEVERSION (0x02) - - -/* IOC Page 8 */ - -typedef struct _MPI2_CONFIG_PAGE_IOC_8 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 NumDevsPerEnclosure; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - U16 MaxPersistentEntries; /* 0x08 */ - U16 MaxNumPhysicalMappedIDs; /* 0x0A */ - U16 Flags; /* 0x0C */ - U16 Reserved3; /* 0x0E */ - U16 IRVolumeMappingFlags; /* 0x10 */ - U16 Reserved4; /* 0x12 */ - U32 Reserved5; /* 0x14 */ -} MPI2_CONFIG_PAGE_IOC_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_8, - Mpi2IOCPage8_t, MPI2_POINTER pMpi2IOCPage8_t; - -#define MPI2_IOCPAGE8_PAGEVERSION (0x00) - -/* defines for IOC Page 8 Flags field */ -#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020) -#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010) - -#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E) -#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000) -#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002) - -#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001) -#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000) - -/* defines for IOC Page 8 IRVolumeMappingFlags */ -#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) -#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) -#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001) - - -/**************************************************************************** -* BIOS Config Pages -****************************************************************************/ - -/* BIOS Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_BIOS_1 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 BiosOptions; /* 0x04 */ - U32 IOCSettings; /* 0x08 */ - U8 SSUTimeout; /* 0x0C */ - U8 MaxEnclosureLevel; /* 0x0D */ - U16 Reserved2; /* 0x0E */ - U32 DeviceSettings; /* 0x10 */ - U16 NumberOfDevices; /* 0x14 */ - U16 UEFIVersion; /* 0x16 */ - U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */ - U16 IOTimeoutSequential; /* 0x1A */ - U16 IOTimeoutOther; /* 0x1C */ - U16 IOTimeoutBlockDevicesRM; /* 0x1E */ -} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, - Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; - -#define MPI2_BIOSPAGE1_PAGEVERSION (0x07) - -/* values for BIOS Page 1 BiosOptions field */ -#define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000) -#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000) - -#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) -#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) -#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) -#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) -#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) -#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) - -#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) - -#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) -#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) -#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) -#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) -#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) - -#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) -#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) - -#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) -#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) -#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) -#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) - -#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) - -/* values for BIOS Page 1 IOCSettings field */ -#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) -#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) -#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) - -#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0) -#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000) -#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040) -#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080) - -#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030) -#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000) -#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010) -#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020) -#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030) - -#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008) - -/* values for BIOS Page 1 DeviceSettings field */ -#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010) -#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008) -#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004) -#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) -#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) - -/* defines for BIOS Page 1 UEFIVersion field */ -#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00) -#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8) -#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF) -#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0) - - - -/* BIOS Page 2 */ - -typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER -{ - U32 Reserved1; /* 0x00 */ - U32 Reserved2; /* 0x04 */ - U32 Reserved3; /* 0x08 */ - U32 Reserved4; /* 0x0C */ - U32 Reserved5; /* 0x10 */ - U32 Reserved6; /* 0x14 */ -} MPI2_BOOT_DEVICE_ADAPTER_ORDER, - MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, - Mpi2BootDeviceAdapterOrder_t, MPI2_POINTER pMpi2BootDeviceAdapterOrder_t; - -typedef struct _MPI2_BOOT_DEVICE_SAS_WWID -{ - U64 SASAddress; /* 0x00 */ - U8 LUN[8]; /* 0x08 */ - U32 Reserved1; /* 0x10 */ - U32 Reserved2; /* 0x14 */ -} MPI2_BOOT_DEVICE_SAS_WWID, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_SAS_WWID, - Mpi2BootDeviceSasWwid_t, MPI2_POINTER pMpi2BootDeviceSasWwid_t; - -typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT -{ - U64 EnclosureLogicalID; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ - U16 SlotNumber; /* 0x10 */ - U16 Reserved3; /* 0x12 */ - U32 Reserved4; /* 0x14 */ -} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, - MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, - Mpi2BootDeviceEnclosureSlot_t, MPI2_POINTER pMpi2BootDeviceEnclosureSlot_t; - -typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME -{ - U64 DeviceName; /* 0x00 */ - U8 LUN[8]; /* 0x08 */ - U32 Reserved1; /* 0x10 */ - U32 Reserved2; /* 0x14 */ -} MPI2_BOOT_DEVICE_DEVICE_NAME, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, - Mpi2BootDeviceDeviceName_t, MPI2_POINTER pMpi2BootDeviceDeviceName_t; - -typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE -{ - MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; - MPI2_BOOT_DEVICE_SAS_WWID SasWwid; - MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; - MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; -} MPI2_BIOSPAGE2_BOOT_DEVICE, MPI2_POINTER PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, - Mpi2BiosPage2BootDevice_t, MPI2_POINTER pMpi2BiosPage2BootDevice_t; - -typedef struct _MPI2_CONFIG_PAGE_BIOS_2 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 Reserved2; /* 0x08 */ - U32 Reserved3; /* 0x0C */ - U32 Reserved4; /* 0x10 */ - U32 Reserved5; /* 0x14 */ - U32 Reserved6; /* 0x18 */ - U8 ReqBootDeviceForm; /* 0x1C */ - U8 Reserved7; /* 0x1D */ - U16 Reserved8; /* 0x1E */ - MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /* 0x20 */ - U8 ReqAltBootDeviceForm; /* 0x38 */ - U8 Reserved9; /* 0x39 */ - U16 Reserved10; /* 0x3A */ - MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /* 0x3C */ - U8 CurrentBootDeviceForm; /* 0x58 */ - U8 Reserved11; /* 0x59 */ - U16 Reserved12; /* 0x5A */ - MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /* 0x58 */ -} MPI2_CONFIG_PAGE_BIOS_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_2, - Mpi2BiosPage2_t, MPI2_POINTER pMpi2BiosPage2_t; - -#define MPI2_BIOSPAGE2_PAGEVERSION (0x04) - -/* values for BIOS Page 2 BootDeviceForm fields */ -#define MPI2_BIOSPAGE2_FORM_MASK (0x0F) -#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) -#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05) -#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) -#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07) - - -/* BIOS Page 3 */ - -#define MPI2_BIOSPAGE3_NUM_ADAPTER (4) - -typedef struct _MPI2_ADAPTER_INFO -{ - U8 PciBusNumber; /* 0x00 */ - U8 PciDeviceAndFunctionNumber; /* 0x01 */ - U16 AdapterFlags; /* 0x02 */ -} MPI2_ADAPTER_INFO, MPI2_POINTER PTR_MPI2_ADAPTER_INFO, - Mpi2AdapterInfo_t, MPI2_POINTER pMpi2AdapterInfo_t; - -#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) -#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) - -typedef struct _MPI2_ADAPTER_ORDER_AUX -{ - U64 WWID; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ -} MPI2_ADAPTER_ORDER_AUX, MPI2_POINTER PTR_MPI2_ADAPTER_ORDER_AUX, - Mpi2AdapterOrderAux_t, MPI2_POINTER pMpi2AdapterOrderAux_t; - -typedef struct _MPI2_CONFIG_PAGE_BIOS_3 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U32 GlobalFlags; /* 0x04 */ - U32 BiosVersion; /* 0x08 */ - MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER]; /* 0x0C */ - U32 Reserved1; /* 0x1C */ - MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER]; /* 0x20 */ /* MPI v2.5 and newer */ -} MPI2_CONFIG_PAGE_BIOS_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_3, - Mpi2BiosPage3_t, MPI2_POINTER pMpi2BiosPage3_t; - -#define MPI2_BIOSPAGE3_PAGEVERSION (0x01) - -/* values for BIOS Page 3 GlobalFlags */ -#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) -#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004) -#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010) - -#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0) -#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000) -#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020) -#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040) - - -/* BIOS Page 4 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES -#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) -#endif - -typedef struct _MPI2_BIOS4_ENTRY -{ - U64 ReassignmentWWID; /* 0x00 */ - U64 ReassignmentDeviceName; /* 0x08 */ -} MPI2_BIOS4_ENTRY, MPI2_POINTER PTR_MPI2_BIOS4_ENTRY, - Mpi2MBios4Entry_t, MPI2_POINTER pMpi2Bios4Entry_t; - -typedef struct _MPI2_CONFIG_PAGE_BIOS_4 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 NumPhys; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - MPI2_BIOS4_ENTRY Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /* 0x08 */ -} MPI2_CONFIG_PAGE_BIOS_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_4, - Mpi2BiosPage4_t, MPI2_POINTER pMpi2BiosPage4_t; - -#define MPI2_BIOSPAGE4_PAGEVERSION (0x01) - - -/**************************************************************************** -* RAID Volume Config Pages -****************************************************************************/ - -/* RAID Volume Page 0 */ - -typedef struct _MPI2_RAIDVOL0_PHYS_DISK -{ - U8 RAIDSetNum; /* 0x00 */ - U8 PhysDiskMap; /* 0x01 */ - U8 PhysDiskNum; /* 0x02 */ - U8 Reserved; /* 0x03 */ -} MPI2_RAIDVOL0_PHYS_DISK, MPI2_POINTER PTR_MPI2_RAIDVOL0_PHYS_DISK, - Mpi2RaidVol0PhysDisk_t, MPI2_POINTER pMpi2RaidVol0PhysDisk_t; - -/* defines for the PhysDiskMap field */ -#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01) -#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02) - -typedef struct _MPI2_RAIDVOL0_SETTINGS -{ - U16 Settings; /* 0x00 */ - U8 HotSparePool; /* 0x01 */ - U8 Reserved; /* 0x02 */ -} MPI2_RAIDVOL0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDVOL0_SETTINGS, - Mpi2RaidVol0Settings_t, MPI2_POINTER pMpi2RaidVol0Settings_t; - -/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ -#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01) -#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02) -#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04) -#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08) -#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10) -#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20) -#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40) -#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80) - -/* RAID Volume Page 0 VolumeSettings defines */ -#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008) -#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004) - -#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003) -#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000) -#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001) -#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhysDisks at runtime. - */ -#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX -#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U16 DevHandle; /* 0x04 */ - U8 VolumeState; /* 0x06 */ - U8 VolumeType; /* 0x07 */ - U32 VolumeStatusFlags; /* 0x08 */ - MPI2_RAIDVOL0_SETTINGS VolumeSettings; /* 0x0C */ - U64 MaxLBA; /* 0x10 */ - U32 StripeSize; /* 0x18 */ - U16 BlockSize; /* 0x1C */ - U16 Reserved1; /* 0x1E */ - U8 SupportedPhysDisks; /* 0x20 */ - U8 ResyncRate; /* 0x21 */ - U16 DataScrubDuration; /* 0x22 */ - U8 NumPhysDisks; /* 0x24 */ - U8 Reserved2; /* 0x25 */ - U8 Reserved3; /* 0x26 */ - U8 InactiveStatus; /* 0x27 */ - MPI2_RAIDVOL0_PHYS_DISK PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /* 0x28 */ -} MPI2_CONFIG_PAGE_RAID_VOL_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, - Mpi2RaidVolPage0_t, MPI2_POINTER pMpi2RaidVolPage0_t; - -#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A) - -/* values for RAID VolumeState */ -#define MPI2_RAID_VOL_STATE_MISSING (0x00) -#define MPI2_RAID_VOL_STATE_FAILED (0x01) -#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02) -#define MPI2_RAID_VOL_STATE_ONLINE (0x03) -#define MPI2_RAID_VOL_STATE_DEGRADED (0x04) -#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05) - -/* values for RAID VolumeType */ -#define MPI2_RAID_VOL_TYPE_RAID0 (0x00) -#define MPI2_RAID_VOL_TYPE_RAID1E (0x01) -#define MPI2_RAID_VOL_TYPE_RAID1 (0x02) -#define MPI2_RAID_VOL_TYPE_RAID10 (0x05) -#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF) - -/* values for RAID Volume Page 0 VolumeStatusFlags field */ -#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000) -#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000) -#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000) -#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000) -#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000) -#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000) -#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000) -#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) -#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) -#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) -#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080) -#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) -#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) -#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) -#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010) -#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008) -#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004) -#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002) -#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001) - -/* values for RAID Volume Page 0 SupportedPhysDisks field */ -#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08) -#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04) -#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02) -#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01) - -/* values for RAID Volume Page 0 InactiveStatus field */ -#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) -#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01) -#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02) -#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03) -#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04) -#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05) -#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06) - - -/* RAID Volume Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U16 DevHandle; /* 0x04 */ - U16 Reserved0; /* 0x06 */ - U8 GUID[24]; /* 0x08 */ - U8 Name[16]; /* 0x20 */ - U64 WWID; /* 0x30 */ - U32 Reserved1; /* 0x38 */ - U32 Reserved2; /* 0x3C */ -} MPI2_CONFIG_PAGE_RAID_VOL_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, - Mpi2RaidVolPage1_t, MPI2_POINTER pMpi2RaidVolPage1_t; - -#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03) - - -/**************************************************************************** -* RAID Physical Disk Config Pages -****************************************************************************/ - -/* RAID Physical Disk Page 0 */ - -typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS -{ - U16 Reserved1; /* 0x00 */ - U8 HotSparePool; /* 0x02 */ - U8 Reserved2; /* 0x03 */ -} MPI2_RAIDPHYSDISK0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_SETTINGS, - Mpi2RaidPhysDisk0Settings_t, MPI2_POINTER pMpi2RaidPhysDisk0Settings_t; - -/* use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ - -typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA -{ - U8 VendorID[8]; /* 0x00 */ - U8 ProductID[16]; /* 0x08 */ - U8 ProductRevLevel[4]; /* 0x18 */ - U8 SerialNum[32]; /* 0x1C */ -} MPI2_RAIDPHYSDISK0_INQUIRY_DATA, - MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, - Mpi2RaidPhysDisk0InquiryData_t, MPI2_POINTER pMpi2RaidPhysDisk0InquiryData_t; - -typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U16 DevHandle; /* 0x04 */ - U8 Reserved1; /* 0x06 */ - U8 PhysDiskNum; /* 0x07 */ - MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /* 0x08 */ - U32 Reserved2; /* 0x0C */ - MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /* 0x10 */ - U32 Reserved3; /* 0x4C */ - U8 PhysDiskState; /* 0x50 */ - U8 OfflineReason; /* 0x51 */ - U8 IncompatibleReason; /* 0x52 */ - U8 PhysDiskAttributes; /* 0x53 */ - U32 PhysDiskStatusFlags; /* 0x54 */ - U64 DeviceMaxLBA; /* 0x58 */ - U64 HostMaxLBA; /* 0x60 */ - U64 CoercedMaxLBA; /* 0x68 */ - U16 BlockSize; /* 0x70 */ - U16 Reserved5; /* 0x72 */ - U32 Reserved6; /* 0x74 */ -} MPI2_CONFIG_PAGE_RD_PDISK_0, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, - Mpi2RaidPhysDiskPage0_t, MPI2_POINTER pMpi2RaidPhysDiskPage0_t; - -#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05) - -/* PhysDiskState defines */ -#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00) -#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01) -#define MPI2_RAID_PD_STATE_OFFLINE (0x02) -#define MPI2_RAID_PD_STATE_ONLINE (0x03) -#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04) -#define MPI2_RAID_PD_STATE_DEGRADED (0x05) -#define MPI2_RAID_PD_STATE_REBUILDING (0x06) -#define MPI2_RAID_PD_STATE_OPTIMAL (0x07) - -/* OfflineReason defines */ -#define MPI2_PHYSDISK0_ONLINE (0x00) -#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01) -#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03) -#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04) -#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05) -#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06) -#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF) - -/* IncompatibleReason defines */ -#define MPI2_PHYSDISK0_COMPATIBLE (0x00) -#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01) -#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02) -#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) -#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) -#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) -#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) -#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) - -/* PhysDiskAttributes defines */ -#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) -#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) -#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) - -#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03) -#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) -#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) - -/* PhysDiskStatusFlags defines */ -#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040) -#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020) -#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010) -#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000) -#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008) -#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004) -#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002) -#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001) - - -/* RAID Physical Disk Page 1 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhysDiskPaths at runtime. - */ -#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX -#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) -#endif - -typedef struct _MPI2_RAIDPHYSDISK1_PATH -{ - U16 DevHandle; /* 0x00 */ - U16 Reserved1; /* 0x02 */ - U64 WWID; /* 0x04 */ - U64 OwnerWWID; /* 0x0C */ - U8 OwnerIdentifier; /* 0x14 */ - U8 Reserved2; /* 0x15 */ - U16 Flags; /* 0x16 */ -} MPI2_RAIDPHYSDISK1_PATH, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK1_PATH, - Mpi2RaidPhysDisk1Path_t, MPI2_POINTER pMpi2RaidPhysDisk1Path_t; - -/* RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ -#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004) -#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) -#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001) - -typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 -{ - MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ - U8 NumPhysDiskPaths; /* 0x04 */ - U8 PhysDiskNum; /* 0x05 */ - U16 Reserved1; /* 0x06 */ - U32 Reserved2; /* 0x08 */ - MPI2_RAIDPHYSDISK1_PATH PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/* 0x0C */ -} MPI2_CONFIG_PAGE_RD_PDISK_1, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, - Mpi2RaidPhysDiskPage1_t, MPI2_POINTER pMpi2RaidPhysDiskPage1_t; - -#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02) - - -/**************************************************************************** -* values for fields used by several types of SAS Config Pages -****************************************************************************/ - -/* values for NegotiatedLinkRates fields */ -#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0) -#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4) -#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) -/* link rates used for Negotiated Physical and Logical Link Rate */ -#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) -#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) -#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) -#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) -#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) -#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) -#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) -#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) -#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) -#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) -#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B) -#define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C) - - -/* values for AttachedPhyInfo fields */ -#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) -#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) -#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) - -#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F) -#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000) -#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001) -#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002) -#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003) -#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004) -#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005) -#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006) -#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007) -#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) - - -/* values for PhyInfo fields */ -#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) - -#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) -#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27) -#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) -#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) -#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) - -#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) -#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) -#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) -#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000) -#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000) -#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000) - -#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000) -#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000) -#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000) -#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000) -#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000) -#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000) -#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000) -#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000) -#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000) -#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000) - -#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000) -#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000) -#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000) -#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) - -#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00) -#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8) - -#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0) -#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000) -#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010) -#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020) - - -/* values for SAS ProgrammedLinkRate fields */ -#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0) -#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) -#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) -#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) -#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) -#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) -#define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0) -#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) -#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) -#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) -#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09) -#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A) -#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B) -#define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C) - - -/* values for SAS HwLinkRate fields */ -#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0) -#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) -#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) -#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) -#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) -#define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0) -#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) -#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) -#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) -#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A) -#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B) -#define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C) - - - -/**************************************************************************** -* SAS IO Unit Config Pages -****************************************************************************/ - -/* SAS IO Unit Page 0 */ - -typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA -{ - U8 Port; /* 0x00 */ - U8 PortFlags; /* 0x01 */ - U8 PhyFlags; /* 0x02 */ - U8 NegotiatedLinkRate; /* 0x03 */ - U32 ControllerPhyDeviceInfo;/* 0x04 */ - U16 AttachedDevHandle; /* 0x08 */ - U16 ControllerDevHandle; /* 0x0A */ - U32 DiscoveryStatus; /* 0x0C */ - U32 Reserved; /* 0x10 */ -} MPI2_SAS_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, - Mpi2SasIOUnit0PhyData_t, MPI2_POINTER pMpi2SasIOUnit0PhyData_t; - -#define MPI26_SASIOUNIT0_PHY_PORT_NO_PORT (0xFF) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI2_SAS_IOUNIT0_PHY_MAX -#define MPI2_SAS_IOUNIT0_PHY_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U8 NumPhys; /* 0x0C */ - U8 Reserved2; /* 0x0D */ - U16 Reserved3; /* 0x0E */ - MPI2_SAS_IO_UNIT0_PHY_DATA PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /* 0x10 */ -} MPI2_CONFIG_PAGE_SASIOUNIT_0, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, - Mpi2SasIOUnitPage0_t, MPI2_POINTER pMpi2SasIOUnitPage0_t; - -#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05) - -/* values for SAS IO Unit Page 0 PortFlags */ -#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) -#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) - -/* values for SAS IO Unit Page 0 PhyFlags */ -#define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) -#define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) -#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) -#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) - -/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ - -/* see mpi2_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ - -/* values for SAS IO Unit Page 0 DiscoveryStatus */ -#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) -#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000) -#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000) -#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) -#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000) -#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) -#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) -#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000) -#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) -#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) -#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400) -#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) -#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100) -#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080) -#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040) -#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020) -#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010) -#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004) -#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002) -#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001) - - -/* SAS IO Unit Page 1 */ - -typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA -{ - U8 Port; /* 0x00 */ - U8 PortFlags; /* 0x01 */ - U8 PhyFlags; /* 0x02 */ - U8 MaxMinLinkRate; /* 0x03 */ - U32 ControllerPhyDeviceInfo; /* 0x04 */ - U16 MaxTargetPortConnectTime; /* 0x08 */ - U16 Reserved1; /* 0x0A */ -} MPI2_SAS_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, - Mpi2SasIOUnit1PhyData_t, MPI2_POINTER pMpi2SasIOUnit1PhyData_t; - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI2_SAS_IOUNIT1_PHY_MAX -#define MPI2_SAS_IOUNIT1_PHY_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U16 ControlFlags; /* 0x08 */ - U16 SASNarrowMaxQueueDepth; /* 0x0A */ - U16 AdditionalControlFlags; /* 0x0C */ - U16 SASWideMaxQueueDepth; /* 0x0E */ - U8 NumPhys; /* 0x10 */ - U8 SATAMaxQDepth; /* 0x11 */ - U8 ReportDeviceMissingDelay; /* 0x12 */ - U8 IODeviceMissingDelay; /* 0x13 */ - MPI2_SAS_IO_UNIT1_PHY_DATA PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /* 0x14 */ -} MPI2_CONFIG_PAGE_SASIOUNIT_1, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, - Mpi2SasIOUnitPage1_t, MPI2_POINTER pMpi2SasIOUnitPage1_t; - -#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09) - -/* values for SAS IO Unit Page 1 ControlFlags */ -#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) -#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) -#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */ -#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) - -#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600) -#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) -#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0) -#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1) -#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2) - -#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) -#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) -#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) -#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010) -#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) -#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) -#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) -#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */ - -/* values for SAS IO Unit Page 1 AdditionalControlFlags */ -#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) -#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) -#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) -#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) -#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010) -#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008) -#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004) -#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) -#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) - -/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ -#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) -#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) - -/* values for SAS IO Unit Page 1 PortFlags */ -#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) - -/* values for SAS IO Unit Page 1 PhyFlags */ -#define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) -#define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) -#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) -#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) - -/* values for SAS IO Unit Page 1 MaxMinLinkRate */ -#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) -#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) -#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) -#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0) -#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0) -#define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0) -#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F) -#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08) -#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09) -#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A) -#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) -#define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C) - -/* see mpi2_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ - - -/* SAS IO Unit Page 4 (for MPI v2.5 and earlier) */ - -typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP -{ - U8 MaxTargetSpinup; /* 0x00 */ - U8 SpinupDelay; /* 0x01 */ - U8 SpinupFlags; /* 0x02 */ - U8 Reserved1; /* 0x03 */ -} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, - Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t; - -/* defines for SAS IO Unit Page 4 SpinupFlags */ -#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01) - - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI2_SAS_IOUNIT4_PHY_MAX -#define MPI2_SAS_IOUNIT4_PHY_MAX (4) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - MPI2_SAS_IOUNIT4_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */ - U32 Reserved1; /* 0x18 */ - U32 Reserved2; /* 0x1C */ - U32 Reserved3; /* 0x20 */ - U8 BootDeviceWaitTime; /* 0x24 */ - U8 SATADeviceWaitTime; /* 0x25 */ - U16 Reserved5; /* 0x26 */ - U8 NumPhys; /* 0x28 */ - U8 PEInitialSpinupDelay; /* 0x29 */ - U8 PEReplyDelay; /* 0x2A */ - U8 Flags; /* 0x2B */ - U8 PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /* 0x2C */ -} MPI2_CONFIG_PAGE_SASIOUNIT_4, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, - Mpi2SasIOUnitPage4_t, MPI2_POINTER pMpi2SasIOUnitPage4_t; - -#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02) - -/* defines for Flags field */ -#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01) - -/* defines for PHY field */ -#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) - - -/* SAS IO Unit Page 5 */ - -typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS -{ - U8 ControlFlags; /* 0x00 */ - U8 PortWidthModGroup; /* 0x01 */ - U16 InactivityTimerExponent; /* 0x02 */ - U8 SATAPartialTimeout; /* 0x04 */ - U8 Reserved2; /* 0x05 */ - U8 SATASlumberTimeout; /* 0x06 */ - U8 Reserved3; /* 0x07 */ - U8 SASPartialTimeout; /* 0x08 */ - U8 Reserved4; /* 0x09 */ - U8 SASSlumberTimeout; /* 0x0A */ - U8 Reserved5; /* 0x0B */ -} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, - MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, - Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t; - -/* defines for ControlFlags field */ -#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) -#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) -#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) -#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) - -/* defines for PortWidthModeGroup field */ -#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF) - -/* defines for InactivityTimerExponent field */ -#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) -#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) -#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) -#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) -#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) -#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) -#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) -#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) - -#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) -#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) -#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) -#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) -#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) -#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) -#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) -#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI2_SAS_IOUNIT5_PHY_MAX -#define MPI2_SAS_IOUNIT5_PHY_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 NumPhys; /* 0x08 */ - U8 Reserved1; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 Reserved3; /* 0x0C */ - MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */ -} MPI2_CONFIG_PAGE_SASIOUNIT_5, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, - Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t; - -#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01) - - -/* SAS IO Unit Page 6 */ - -typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS -{ - U8 CurrentStatus; /* 0x00 */ - U8 CurrentModulation; /* 0x01 */ - U8 CurrentUtilization; /* 0x02 */ - U8 Reserved1; /* 0x03 */ - U32 Reserved2; /* 0x04 */ -} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, - MPI2_POINTER PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, - Mpi2SasIOUnit6PortWidthModGroupStatus_t, - MPI2_POINTER pMpi2SasIOUnit6PortWidthModGroupStatus_t; - -/* defines for CurrentStatus field */ -#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00) -#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01) -#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02) -#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03) -#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04) -#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05) -#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06) -#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07) - -/* defines for CurrentModulation field */ -#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00) -#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01) -#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02) -#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumGroups at runtime. - */ -#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX -#define MPI2_SAS_IOUNIT6_GROUP_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ - U8 NumGroups; /* 0x10 */ - U8 Reserved3; /* 0x11 */ - U16 Reserved4; /* 0x12 */ - MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS - PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /* 0x14 */ -} MPI2_CONFIG_PAGE_SASIOUNIT_6, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, - Mpi2SasIOUnitPage6_t, MPI2_POINTER pMpi2SasIOUnitPage6_t; - -#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00) - - -/* SAS IO Unit Page 7 */ - -typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS -{ - U8 Flags; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U8 Threshold75Pct; /* 0x04 */ - U8 Threshold50Pct; /* 0x05 */ - U8 Threshold25Pct; /* 0x06 */ - U8 Reserved3; /* 0x07 */ -} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, - MPI2_POINTER PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, - Mpi2SasIOUnit7PortWidthModGroupSettings_t, - MPI2_POINTER pMpi2SasIOUnit7PortWidthModGroupSettings_t; - -/* defines for Flags field */ -#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01) - - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumGroups at runtime. - */ -#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX -#define MPI2_SAS_IOUNIT7_GROUP_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 SamplingInterval; /* 0x08 */ - U8 WindowLength; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U32 Reserved2; /* 0x0C */ - U32 Reserved3; /* 0x10 */ - U8 NumGroups; /* 0x14 */ - U8 Reserved4; /* 0x15 */ - U16 Reserved5; /* 0x16 */ - MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS - PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX]; /* 0x18 */ -} MPI2_CONFIG_PAGE_SASIOUNIT_7, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, - Mpi2SasIOUnitPage7_t, MPI2_POINTER pMpi2SasIOUnitPage7_t; - -#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00) - - -/* SAS IO Unit Page 8 */ - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 PowerManagementCapabilities; /* 0x0C */ - U8 TxRxSleepStatus; /* 0x10 */ /* reserved in MPI 2.0 */ - U8 Reserved2; /* 0x11 */ - U16 Reserved3; /* 0x12 */ -} MPI2_CONFIG_PAGE_SASIOUNIT_8, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, - Mpi2SasIOUnitPage8_t, MPI2_POINTER pMpi2SasIOUnitPage8_t; - -#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) - -/* defines for PowerManagementCapabilities field */ -#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) -#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) -#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) -#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200) -#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100) -#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010) -#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) -#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) -#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) -#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) - -/* defines for TxRxSleepStatus field */ -#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00) -#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01) -#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02) -#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03) - - - -/* SAS IO Unit Page 16 */ - -typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U64 TimeStamp; /* 0x08 */ - U32 Reserved1; /* 0x10 */ - U32 Reserved2; /* 0x14 */ - U32 FastPathPendedRequests; /* 0x18 */ - U32 FastPathUnPendedRequests; /* 0x1C */ - U32 FastPathHostRequestStarts; /* 0x20 */ - U32 FastPathFirmwareRequestStarts; /* 0x24 */ - U32 FastPathHostCompletions; /* 0x28 */ - U32 FastPathFirmwareCompletions; /* 0x2C */ - U32 NonFastPathRequestStarts; /* 0x30 */ - U32 NonFastPathHostCompletions; /* 0x30 */ -} MPI2_CONFIG_PAGE_SASIOUNIT16, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, - Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t; - -#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00) - - -/**************************************************************************** -* SAS Expander Config Pages -****************************************************************************/ - -/* SAS Expander Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 PhysicalPort; /* 0x08 */ - U8 ReportGenLength; /* 0x09 */ - U16 EnclosureHandle; /* 0x0A */ - U64 SASAddress; /* 0x0C */ - U32 DiscoveryStatus; /* 0x14 */ - U16 DevHandle; /* 0x18 */ - U16 ParentDevHandle; /* 0x1A */ - U16 ExpanderChangeCount; /* 0x1C */ - U16 ExpanderRouteIndexes; /* 0x1E */ - U8 NumPhys; /* 0x20 */ - U8 SASLevel; /* 0x21 */ - U16 Flags; /* 0x22 */ - U16 STPBusInactivityTimeLimit; /* 0x24 */ - U16 STPMaxConnectTimeLimit; /* 0x26 */ - U16 STP_SMP_NexusLossTime; /* 0x28 */ - U16 MaxNumRoutedSasAddresses; /* 0x2A */ - U64 ActiveZoneManagerSASAddress;/* 0x2C */ - U16 ZoneLockInactivityLimit; /* 0x34 */ - U16 Reserved1; /* 0x36 */ - U8 TimeToReducedFunc; /* 0x38 */ - U8 InitialTimeToReducedFunc; /* 0x39 */ - U8 MaxReducedFuncTime; /* 0x3A */ - U8 Reserved2; /* 0x3B */ -} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0, - Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t; - -#define MPI2_SASEXPANDER0_PAGEVERSION (0x06) - -/* values for SAS Expander Page 0 DiscoveryStatus field */ -#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) -#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000) -#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000) -#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) -#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000) -#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) -#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) -#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000) -#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) -#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800) -#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400) -#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200) -#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100) -#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080) -#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040) -#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020) -#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010) -#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004) -#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002) -#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) - -/* values for SAS Expander Page 0 Flags field */ -#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) -#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) -#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) -#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) -#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200) -#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100) -#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080) -#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010) -#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004) -#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002) -#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) - - -/* SAS Expander Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 PhysicalPort; /* 0x08 */ - U8 Reserved1; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U8 NumPhys; /* 0x0C */ - U8 Phy; /* 0x0D */ - U16 NumTableEntriesProgrammed; /* 0x0E */ - U8 ProgrammedLinkRate; /* 0x10 */ - U8 HwLinkRate; /* 0x11 */ - U16 AttachedDevHandle; /* 0x12 */ - U32 PhyInfo; /* 0x14 */ - U32 AttachedDeviceInfo; /* 0x18 */ - U16 ExpanderDevHandle; /* 0x1C */ - U8 ChangeCount; /* 0x1E */ - U8 NegotiatedLinkRate; /* 0x1F */ - U8 PhyIdentifier; /* 0x20 */ - U8 AttachedPhyIdentifier; /* 0x21 */ - U8 Reserved3; /* 0x22 */ - U8 DiscoveryInfo; /* 0x23 */ - U32 AttachedPhyInfo; /* 0x24 */ - U8 ZoneGroup; /* 0x28 */ - U8 SelfConfigStatus; /* 0x29 */ - U16 Reserved4; /* 0x2A */ -} MPI2_CONFIG_PAGE_EXPANDER_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_1, - Mpi2ExpanderPage1_t, MPI2_POINTER pMpi2ExpanderPage1_t; - -#define MPI2_SASEXPANDER1_PAGEVERSION (0x02) - -/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ - -/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ - -/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */ - -/* see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines used for the AttachedDeviceInfo field */ - -/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ - -/* values for SAS Expander Page 1 DiscoveryInfo field */ -#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) -#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) -#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) - -/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ - - -/**************************************************************************** -* SAS Device Config Pages -****************************************************************************/ - -/* SAS Device Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U16 Slot; /* 0x08 */ - U16 EnclosureHandle; /* 0x0A */ - U64 SASAddress; /* 0x0C */ - U16 ParentDevHandle; /* 0x14 */ - U8 PhyNum; /* 0x16 */ - U8 AccessStatus; /* 0x17 */ - U16 DevHandle; /* 0x18 */ - U8 AttachedPhyIdentifier; /* 0x1A */ - U8 ZoneGroup; /* 0x1B */ - U32 DeviceInfo; /* 0x1C */ - U16 Flags; /* 0x20 */ - U8 PhysicalPort; /* 0x22 */ - U8 MaxPortConnections; /* 0x23 */ - U64 DeviceName; /* 0x24 */ - U8 PortGroups; /* 0x2C */ - U8 DmaGroup; /* 0x2D */ - U8 ControlGroup; /* 0x2E */ - U8 EnclosureLevel; /* 0x2F */ - U8 ConnectorName[4]; /* 0x30 */ - U32 Reserved3; /* 0x34 */ -} MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, - Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t; - -#define MPI2_SASDEVICE0_PAGEVERSION (0x09) - -/* values for SAS Device Page 0 AccessStatus field */ -#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) -#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) -#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) -#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) -#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) -#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) -#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) -#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) -/* specific values for SATA Init failures */ -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) -#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) - -/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ - -/* values for SAS Device Page 0 Flags field */ -#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) -#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000) -#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) -#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) -#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) -#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) -#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) -#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) -#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) -#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) -#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) -#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) -#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) -#define MPI2_SAS_DEVICE0_FLAGS_PERSIST_CAPABLE (0x0004) -#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) -#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) - -/* SAS Device Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U64 SASAddress; /* 0x0C */ - U32 Reserved2; /* 0x14 */ - U16 DevHandle; /* 0x18 */ - U16 Reserved3; /* 0x1A */ - U8 InitialRegDeviceFIS[20];/* 0x1C */ -} MPI2_CONFIG_PAGE_SAS_DEV_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, - Mpi2SasDevicePage1_t, MPI2_POINTER pMpi2SasDevicePage1_t; - -#define MPI2_SASDEVICE1_PAGEVERSION (0x01) - - -/**************************************************************************** -* SAS PHY Config Pages -****************************************************************************/ - -/* SAS PHY Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U16 OwnerDevHandle; /* 0x08 */ - U16 Reserved1; /* 0x0A */ - U16 AttachedDevHandle; /* 0x0C */ - U8 AttachedPhyIdentifier; /* 0x0E */ - U8 Reserved2; /* 0x0F */ - U32 AttachedPhyInfo; /* 0x10 */ - U8 ProgrammedLinkRate; /* 0x14 */ - U8 HwLinkRate; /* 0x15 */ - U8 ChangeCount; /* 0x16 */ - U8 Flags; /* 0x17 */ - U32 PhyInfo; /* 0x18 */ - U8 NegotiatedLinkRate; /* 0x1C */ - U8 Reserved3; /* 0x1D */ - U16 Reserved4; /* 0x1E */ -} MPI2_CONFIG_PAGE_SAS_PHY_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, - Mpi2SasPhyPage0_t, MPI2_POINTER pMpi2SasPhyPage0_t; - -#define MPI2_SASPHY0_PAGEVERSION (0x03) - -/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ - -/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ - -/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ - -/* values for SAS PHY Page 0 Flags field */ -#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) - -/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */ - -/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ - - -/* SAS PHY Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 InvalidDwordCount; /* 0x0C */ - U32 RunningDisparityErrorCount; /* 0x10 */ - U32 LossDwordSynchCount; /* 0x14 */ - U32 PhyResetProblemCount; /* 0x18 */ -} MPI2_CONFIG_PAGE_SAS_PHY_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, - Mpi2SasPhyPage1_t, MPI2_POINTER pMpi2SasPhyPage1_t; - -#define MPI2_SASPHY1_PAGEVERSION (0x01) - - -/* SAS PHY Page 2 */ - -typedef struct _MPI2_SASPHY2_PHY_EVENT -{ - U8 PhyEventCode; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 PhyEventInfo; /* 0x04 */ -} MPI2_SASPHY2_PHY_EVENT, MPI2_POINTER PTR_MPI2_SASPHY2_PHY_EVENT, - Mpi2SasPhy2PhyEvent_t, MPI2_POINTER pMpi2SasPhy2PhyEvent_t; - -/* use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ - - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhyEvents at runtime. - */ -#ifndef MPI2_SASPHY2_PHY_EVENT_MAX -#define MPI2_SASPHY2_PHY_EVENT_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U8 NumPhyEvents; /* 0x0C */ - U8 Reserved2; /* 0x0D */ - U16 Reserved3; /* 0x0E */ - MPI2_SASPHY2_PHY_EVENT PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /* 0x10 */ -} MPI2_CONFIG_PAGE_SAS_PHY_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, - Mpi2SasPhyPage2_t, MPI2_POINTER pMpi2SasPhyPage2_t; - -#define MPI2_SASPHY2_PAGEVERSION (0x00) - - -/* SAS PHY Page 3 */ - -typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG -{ - U8 PhyEventCode; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U8 CounterType; /* 0x04 */ - U8 ThresholdWindow; /* 0x05 */ - U8 TimeUnits; /* 0x06 */ - U8 Reserved3; /* 0x07 */ - U32 EventThreshold; /* 0x08 */ - U16 ThresholdFlags; /* 0x0C */ - U16 Reserved4; /* 0x0E */ -} MPI2_SASPHY3_PHY_EVENT_CONFIG, MPI2_POINTER PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, - Mpi2SasPhy3PhyEventConfig_t, MPI2_POINTER pMpi2SasPhy3PhyEventConfig_t; - -/* values for PhyEventCode field */ -#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00) -#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) -#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) -#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03) -#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04) -#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05) -#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06) -#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20) -#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21) -#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22) -#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23) -#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24) -#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25) -#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26) -#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27) -#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28) -#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29) -#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A) -#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B) -#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C) -#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D) -#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E) -#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40) -#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41) -#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42) -#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43) -#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44) -#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45) -#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50) -#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51) -#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52) -#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60) -#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61) -#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63) -#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0) -#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) -#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) -/* Following codes are product specific and in MPI v2.6 and later */ -#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3) -#define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4) -#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5) -#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6) -#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7) -#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8) -#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9) -#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA) -#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB) -#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC) - -/* values for the CounterType field */ -#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) -#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) -#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) - -/* values for the TimeUnits field */ -#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) -#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) -#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) -#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) - -/* values for the ThresholdFlags field */ -#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002) -#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhyEvents at runtime. - */ -#ifndef MPI2_SASPHY3_PHY_EVENT_MAX -#define MPI2_SASPHY3_PHY_EVENT_MAX (1) -#endif - -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U8 NumPhyEvents; /* 0x0C */ - U8 Reserved2; /* 0x0D */ - U16 Reserved3; /* 0x0E */ - MPI2_SASPHY3_PHY_EVENT_CONFIG PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /* 0x10 */ -} MPI2_CONFIG_PAGE_SAS_PHY_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, - Mpi2SasPhyPage3_t, MPI2_POINTER pMpi2SasPhyPage3_t; - -#define MPI2_SASPHY3_PAGEVERSION (0x00) - - -/* SAS PHY Page 4 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U16 Reserved1; /* 0x08 */ - U8 Reserved2; /* 0x0A */ - U8 Flags; /* 0x0B */ - U8 InitialFrame[28]; /* 0x0C */ -} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, - Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t; - -#define MPI2_SASPHY4_PAGEVERSION (0x00) - -/* values for the Flags field */ -#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) -#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) - - - - -/**************************************************************************** -* SAS Port Config Pages -****************************************************************************/ - -/* SAS Port Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 PortNumber; /* 0x08 */ - U8 PhysicalPort; /* 0x09 */ - U8 PortWidth; /* 0x0A */ - U8 PhysicalPortWidth; /* 0x0B */ - U8 ZoneGroup; /* 0x0C */ - U8 Reserved1; /* 0x0D */ - U16 Reserved2; /* 0x0E */ - U64 SASAddress; /* 0x10 */ - U32 DeviceInfo; /* 0x18 */ - U32 Reserved3; /* 0x1C */ - U32 Reserved4; /* 0x20 */ -} MPI2_CONFIG_PAGE_SAS_PORT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, - Mpi2SasPortPage0_t, MPI2_POINTER pMpi2SasPortPage0_t; - -#define MPI2_SASPORT0_PAGEVERSION (0x00) - -/* see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ - - -/**************************************************************************** -* SAS Enclosure Config Pages -****************************************************************************/ - -/* SAS Enclosure Page 0, Enclosure Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U64 EnclosureLogicalID; /* 0x0C */ - U16 Flags; /* 0x14 */ - U16 EnclosureHandle; /* 0x16 */ - U16 NumSlots; /* 0x18 */ - U16 StartSlot; /* 0x1A */ - U8 ChassisSlot; /* 0x1C */ - U8 EnclosureLevel; /* 0x1D */ - U16 SEPDevHandle; /* 0x1E */ - U8 OEMRD; /* 0x20 */ - U8 Reserved1a; /* 0x21 */ - U16 Reserved2; /* 0x22 */ - U32 Reserved3; /* 0x24 */ -} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, - Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t, - MPI26_CONFIG_PAGE_ENCLOSURE_0, - MPI2_POINTER PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0, - Mpi26EnclosurePage0_t, MPI2_POINTER pMpi26EnclosurePage0_t; - -#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) - -/* values for SAS Enclosure Page 0 Flags field */ -#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID (0x0080) -#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) -#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) -#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) -#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) -#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) -#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) -#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) -#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) -#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) -#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) - -#define MPI26_ENCLOSURE0_PAGEVERSION (0x04) - -/* Values for Enclosure Page 0 Flags field */ -#define MPI26_ENCLS0_FLAGS_OEMRD_VALID (0x0080) -#define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) -#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) -#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) -#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F) -#define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) -#define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) -#define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) -#define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) -#define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) -#define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) -#define MPI26_ENCLS0_FLAGS_MNG_PCIE_SW_SES_ENCL (0x0006) - -/**************************************************************************** -* Log Config Page -****************************************************************************/ - -/* Log Page 0 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumLogEntries at runtime. - */ -#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES -#define MPI2_LOG_0_NUM_LOG_ENTRIES (1) -#endif - -#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C) - -typedef struct _MPI2_LOG_0_ENTRY -{ - U64 TimeStamp; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U16 LogSequence; /* 0x0C */ - U16 LogEntryQualifier; /* 0x0E */ - U8 VP_ID; /* 0x10 */ - U8 VF_ID; /* 0x11 */ - U16 Reserved2; /* 0x12 */ - U8 LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/* 0x14 */ -} MPI2_LOG_0_ENTRY, MPI2_POINTER PTR_MPI2_LOG_0_ENTRY, - Mpi2Log0Entry_t, MPI2_POINTER pMpi2Log0Entry_t; - -/* values for Log Page 0 LogEntry LogEntryQualifier field */ -#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000) -#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001) -#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002) -#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000) -#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF) - -typedef struct _MPI2_CONFIG_PAGE_LOG_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ - U16 NumLogEntries; /* 0x10 */ - U16 Reserved3; /* 0x12 */ - MPI2_LOG_0_ENTRY LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /* 0x14 */ -} MPI2_CONFIG_PAGE_LOG_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_LOG_0, - Mpi2LogPage0_t, MPI2_POINTER pMpi2LogPage0_t; - -#define MPI2_LOG_0_PAGEVERSION (0x02) - - -/**************************************************************************** -* RAID Config Page -****************************************************************************/ - -/* RAID Page 0 */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumElements at runtime. - */ -#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS -#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) -#endif - -typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT -{ - U16 ElementFlags; /* 0x00 */ - U16 VolDevHandle; /* 0x02 */ - U8 HotSparePool; /* 0x04 */ - U8 PhysDiskNum; /* 0x05 */ - U16 PhysDiskDevHandle; /* 0x06 */ -} MPI2_RAIDCONFIG0_CONFIG_ELEMENT, - MPI2_POINTER PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, - Mpi2RaidConfig0ConfigElement_t, MPI2_POINTER pMpi2RaidConfig0ConfigElement_t; - -/* values for the ElementFlags field */ -#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) -#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000) -#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) -#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) -#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) - - -typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 NumHotSpares; /* 0x08 */ - U8 NumPhysDisks; /* 0x09 */ - U8 NumVolumes; /* 0x0A */ - U8 ConfigNum; /* 0x0B */ - U32 Flags; /* 0x0C */ - U8 ConfigGUID[24]; /* 0x10 */ - U32 Reserved1; /* 0x28 */ - U8 NumElements; /* 0x2C */ - U8 Reserved2; /* 0x2D */ - U16 Reserved3; /* 0x2E */ - MPI2_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /* 0x30 */ -} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, - Mpi2RaidConfigurationPage0_t, MPI2_POINTER pMpi2RaidConfigurationPage0_t; - -#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00) - -/* values for RAID Configuration Page 0 Flags field */ -#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001) - - -/**************************************************************************** -* Driver Persistent Mapping Config Pages -****************************************************************************/ - -/* Driver Persistent Mapping Page 0 */ - -typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY -{ - U64 PhysicalIdentifier; /* 0x00 */ - U16 MappingInformation; /* 0x08 */ - U16 DeviceIndex; /* 0x0A */ - U32 PhysicalBitsMapping; /* 0x0C */ - U32 Reserved1; /* 0x10 */ -} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, - Mpi2DriverMap0Entry_t, MPI2_POINTER pMpi2DriverMap0Entry_t; - -typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /* 0x08 */ -} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, - Mpi2DriverMappingPage0_t, MPI2_POINTER pMpi2DriverMappingPage0_t; - -#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00) - -/* values for Driver Persistent Mapping Page 0 MappingInformation field */ -#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0) -#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4) -#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) - - -/**************************************************************************** -* Ethernet Config Pages -****************************************************************************/ - -/* Ethernet Page 0 */ - -/* IP address (union of IPv4 and IPv6) */ -typedef union _MPI2_ETHERNET_IP_ADDR -{ - U32 IPv4Addr; - U32 IPv6Addr[4]; -} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR, - Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t; - -#define MPI2_ETHERNET_HOST_NAME_LENGTH (32) - -typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 NumInterfaces; /* 0x08 */ - U8 Reserved0; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U32 Status; /* 0x0C */ - U8 MediaState; /* 0x10 */ - U8 Reserved2; /* 0x11 */ - U16 Reserved3; /* 0x12 */ - U8 MacAddress[6]; /* 0x14 */ - U8 Reserved4; /* 0x1A */ - U8 Reserved5; /* 0x1B */ - MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */ - MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */ - MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */ - MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */ - MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */ - MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */ - U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ -} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0, - Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t; - -#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) - -/* values for Ethernet Page 0 Status field */ -#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) -#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) -#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) -#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) -#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) -#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) -#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) -#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) -#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) -#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) -#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) -#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) - -/* values for Ethernet Page 0 MediaState field */ -#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) -#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) -#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) - -#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) -#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) -#define MPI2_ETHPG0_MS_10MBIT (0x01) -#define MPI2_ETHPG0_MS_100MBIT (0x02) -#define MPI2_ETHPG0_MS_1GBIT (0x03) - - -/* Ethernet Page 1 */ - -typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved0; /* 0x08 */ - U32 Flags; /* 0x0C */ - U8 MediaState; /* 0x10 */ - U8 Reserved1; /* 0x11 */ - U16 Reserved2; /* 0x12 */ - U8 MacAddress[6]; /* 0x14 */ - U8 Reserved3; /* 0x1A */ - U8 Reserved4; /* 0x1B */ - MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */ - MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */ - MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */ - MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */ - MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */ - U32 Reserved5; /* 0x6C */ - U32 Reserved6; /* 0x70 */ - U32 Reserved7; /* 0x74 */ - U32 Reserved8; /* 0x78 */ - U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ -} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1, - Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t; - -#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) - -/* values for Ethernet Page 1 Flags field */ -#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) -#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) -#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) -#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) -#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) -#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) -#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) -#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) -#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) - -/* values for Ethernet Page 1 MediaState field */ -#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) -#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) -#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) - -#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) -#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) -#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) -#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) -#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) - - -/**************************************************************************** -* Extended Manufacturing Config Pages -****************************************************************************/ - -/* - * Generic structure to use for product-specific extended manufacturing pages - * (currently Extended Manufacturing Page 40 through Extended Manufacturing - * Page 60). - */ - -typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 ProductSpecificInfo; /* 0x08 */ -} MPI2_CONFIG_PAGE_EXT_MAN_PS, - MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, - Mpi2ExtManufacturingPagePS_t, MPI2_POINTER pMpi2ExtManufacturingPagePS_t; - -/* PageVersion should be provided by product-specific code */ - - -/**************************************************************************** -* values for fields used by several types of PCIe Config Pages -****************************************************************************/ - -/* values for NegotiatedLinkRates fields */ -#define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) -/* link rates used for Negotiated Physical Link Rate */ -#define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00) -#define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01) -#define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02) -#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03) -#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04) -#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05) - - -/**************************************************************************** -* PCIe IO Unit Config Pages (MPI v2.6 and later) -****************************************************************************/ - -/* PCIe IO Unit Page 0 */ - -typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA -{ - U8 Link; /* 0x00 */ - U8 LinkFlags; /* 0x01 */ - U8 PhyFlags; /* 0x02 */ - U8 NegotiatedLinkRate; /* 0x03 */ - U32 ControllerPhyDeviceInfo;/* 0x04 */ - U16 AttachedDevHandle; /* 0x08 */ - U16 ControllerDevHandle; /* 0x0A */ - U32 EnumerationStatus; /* 0x0C */ - U32 Reserved1; /* 0x10 */ -} MPI26_PCIE_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA, - Mpi26PCIeIOUnit0PhyData_t, MPI2_POINTER pMpi26PCIeIOUnit0PhyData_t; - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX -#define MPI26_PCIE_IOUNIT0_PHY_MAX (1) -#endif - -typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U8 NumPhys; /* 0x0C */ - U8 InitStatus; /* 0x0D */ - U16 Reserved3; /* 0x0E */ - MPI26_PCIE_IO_UNIT0_PHY_DATA PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /* 0x10 */ -} MPI26_CONFIG_PAGE_PIOUNIT_0, - MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PIOUNIT_0, - Mpi26PCIeIOUnitPage0_t, MPI2_POINTER pMpi26PCIeIOUnitPage0_t; - -#define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00) - -/* values for PCIe IO Unit Page 0 LinkFlags */ -#define MPI26_PCIEIOUNIT0_LF_ENUMERATION_IN_PROGRESS (0x08) -#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_MASK (0x01) -#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_PAGE1 (0x00) -#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_BACKPLANE (0x01) - -/* values for PCIe IO Unit Page 0 PhyFlags */ -#define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) - -/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ - -/* see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo values */ - -/* values for PCIe IO Unit Page 0 EnumerationStatus */ -#define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000) -#define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000) - - -/* PCIe IO Unit Page 1 */ - -typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA -{ - U8 Link; /* 0x00 */ - U8 LinkFlags; /* 0x01 */ - U8 PhyFlags; /* 0x02 */ - U8 MaxMinLinkRate; /* 0x03 */ - U32 ControllerPhyDeviceInfo; /* 0x04 */ - U32 Reserved1; /* 0x08 */ -} MPI26_PCIE_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA, - Mpi26PCIeIOUnit1PhyData_t, MPI2_POINTER pMpi26PCIeIOUnit1PhyData_t; - -/* values for LinkFlags */ -#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK (0x00) -#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN (0x01) -#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumPhys at runtime. - */ -#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX -#define MPI26_PCIE_IOUNIT1_PHY_MAX (1) -#endif - -typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U16 ControlFlags; /* 0x08 */ - U16 Reserved; /* 0x0A */ - U16 AdditionalControlFlags; /* 0x0C */ - U16 NVMeMaxQueueDepth; /* 0x0E */ - U8 NumPhys; /* 0x10 */ - U8 DMDReportPCIe; /* 0x11 */ - U16 Reserved2; /* 0x12 */ - MPI26_PCIE_IO_UNIT1_PHY_DATA PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/* 0x14 */ -} MPI26_CONFIG_PAGE_PIOUNIT_1, - MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PIOUNIT_1, - Mpi26PCIeIOUnitPage1_t, MPI2_POINTER pMpi26PCIeIOUnitPage1_t; - -#define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00) - -/* values for PCIe IO Unit Page 1 ControlFlags */ -#define MPI26_PCIEIOUNIT1_CF_BP_CONFIG_DISABLE (0x0080) -#define MPI26_PCIEIOUNIT1_CF_BP_AUTO_CLOCK_ENABLE (0x0060) -#define MPI26_PCIEIOUNIT1_CF_BP_CLK_MASK (0x0030) -#define MPI26_PCIEIOUNIT1_CF_BP_CLK_ALL_DIS (0x0000) -#define MPI26_PCIEIOUNIT1_CF_BP_CLK_SRIS_EN (0x0010) -#define MPI26_PCIEIOUNIT1_CF_BP_CLK_SRNS_EN (0x0020) -#define MPI26_PCIEIOUNIT1_CF_BP_RATE_MASK (0x000F) -#define MPI26_PCIEIOUNIT1_CF_BP_RATE_DISABLE (0x0000) -#define MPI26_PCIEIOUNIT1_CF_BP_RATE_2_5_MAX (0x0002) -#define MPI26_PCIEIOUNIT1_CF_BP_RATE_5_0_MAX (0x0003) -#define MPI26_PCIEIOUNIT1_CF_BP_RATE_8_0_MAX (0x0004) -#define MPI26_PCIEIOUNIT1_CF_BP_RATE_16_0_MAX (0x0005) - - -/* values for PCIe IO Unit Page 1 PhyFlags */ -#define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) -#define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01) - -/* values for PCIe IO Unit Page 1 MaxMinLinkRate */ -#define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0) -#define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4) -#define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20) -#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30) -#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40) -#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50) - -/* values for PCIe IO Unit Page 1 DMDReportPCIe */ -#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80) -#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC (0x00) -#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC (0x80) -#define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK (0x7F) - - -/* see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo values */ - - -/**************************************************************************** -* PCIe Switch Config Pages (MPI v2.6 and later) -****************************************************************************/ - -/* PCIe Switch Page 0 */ - -typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 PhysicalPort; /* 0x08 */ - U8 Reserved1; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U16 DevHandle; /* 0x0C */ - U16 ParentDevHandle; /* 0x0E */ - U8 NumPorts; /* 0x10 */ - U8 PCIeLevel; /* 0x11 */ - U16 Reserved3; /* 0x12 */ - U32 Reserved4; /* 0x14 */ - U32 Reserved5; /* 0x18 */ - U32 Reserved6; /* 0x1C */ -} MPI26_CONFIG_PAGE_PSWITCH_0, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PSWITCH_0, - Mpi26PCIeSwitchPage0_t, MPI2_POINTER pMpi26PCIeSwitchPage0_t; - -#define MPI26_PCIESWITCH0_PAGEVERSION (0x00) - - -/* PCIe Switch Page 1 */ - -typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 PhysicalPort; /* 0x08 */ - U8 Reserved1; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U8 NumPorts; /* 0x0C */ - U8 PortNum; /* 0x0D */ - U16 AttachedDevHandle; /* 0x0E */ - U16 SwitchDevHandle; /* 0x10 */ - U8 NegotiatedPortWidth; /* 0x12 */ - U8 NegotiatedLinkRate; /* 0x13 */ - U16 Flags; /* 0x14 */ - U16 Reserved4; /* 0x16 */ - U32 Reserved5; /* 0x18 */ -} MPI26_CONFIG_PAGE_PSWITCH_1, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PSWITCH_1, - Mpi26PCIeSwitchPage1_t, MPI2_POINTER pMpi26PCIeSwitchPage1_t; - -#define MPI26_PCIESWITCH1_PAGEVERSION (0x00) - -/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ - -/* defines for the Flags field */ -#define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE (0x0002) -#define MPI26_PCIESWITCH1_RETIMER_PRESENCE (0x0001) - - - -/**************************************************************************** -* PCIe Device Config Pages (MPI v2.6 and later) -****************************************************************************/ - -/* PCIe Device Page 0 */ - -typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U16 Slot; /* 0x08 */ - U16 EnclosureHandle; /* 0x0A */ - U64 WWID; /* 0x0C */ - U16 ParentDevHandle; /* 0x14 */ - U8 PortNum; /* 0x16 */ - U8 AccessStatus; /* 0x17 */ - U16 DevHandle; /* 0x18 */ - U8 PhysicalPort; /* 0x1A */ - U8 Reserved1; /* 0x1B */ - U32 DeviceInfo; /* 0x1C */ - U32 Flags; /* 0x20 */ - U8 SupportedLinkRates; /* 0x24 */ - U8 MaxPortWidth; /* 0x25 */ - U8 NegotiatedPortWidth; /* 0x26 */ - U8 NegotiatedLinkRate; /* 0x27 */ - U8 EnclosureLevel; /* 0x28 */ - U8 Reserved2; /* 0x29 */ - U16 Reserved3; /* 0x2A */ - U8 ConnectorName[4]; /* 0x2C */ - U32 Reserved4; /* 0x30 */ - U32 Reserved5; /* 0x34 */ -} MPI26_CONFIG_PAGE_PCIEDEV_0, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIEDEV_0, - Mpi26PCIeDevicePage0_t, MPI2_POINTER pMpi26PCIeDevicePage0_t; - -#define MPI26_PCIEDEVICE0_PAGEVERSION (0x01) - -/* values for PCIe Device Page 0 AccessStatus field */ -#define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00) -#define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04) -#define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02) -#define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07) -#define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08) -#define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09) -#define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A) -#define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10) - -#define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30) -#define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31) -#define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32) -#define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33) -#define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34) -#define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35) -#define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36) -#define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37) -#define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38) - -#define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F) - -/* see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo field */ - -/* values for PCIe Device Page 0 Flags field */ -#define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE (0x00020000) -#define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE (0x00010000) -#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x00008000) -#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x00004000) -#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x00002000) -#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x00000400) -#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x00000200) -#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x00000100) -#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x00000080) -#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x00000040) -#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x00000020) -#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x00000010) -#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x00000002) -#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x00000001) - -/* values for PCIe Device Page 0 SupportedLinkRates field */ -#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08) -#define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04) -#define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02) -#define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01) - -/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ - - -/* PCIe Device Page 2 */ - -typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U16 DevHandle; /* 0x08 */ - U8 ControllerResetTO; /* 0x0A */ - U8 Reserved1; /* 0x0B */ - U32 MaximumDataTransferSize;/* 0x0C */ - U32 Capabilities; /* 0x10 */ - U16 NOIOB; /* 0x14 */ - U16 ShutdownLatency; /* 0x16 */ - U16 VendorID; /* 0x18 */ - U16 DeviceID; /* 0x1A */ - U16 SubsystemVendorID; /* 0x1C */ - U16 SubsystemID; /* 0x1E */ - U8 RevisionID; /* 0x20 */ - U8 Reserved21[3]; /* 0x21 */ -} MPI26_CONFIG_PAGE_PCIEDEV_2, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, - Mpi26PCIeDevicePage2_t, MPI2_POINTER pMpi26PCIeDevicePage2_t; - -#define MPI26_PCIEDEVICE2_PAGEVERSION (0x01) - -/* defines for PCIe Device Page 2 Capabilities field */ -#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008) -#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004) -#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002) -#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001) - -/* Defines for the NOIOB field */ -#define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000) - - -/**************************************************************************** -* PCIe Link Config Pages (MPI v2.6 and later) -****************************************************************************/ - -/* PCIe Link Page 1 */ - -typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 Link; /* 0x08 */ - U8 Reserved1; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 CorrectableErrorCount; /* 0x0C */ - U16 NonFatalErrorCount; /* 0x10 */ - U16 Reserved3; /* 0x12 */ - U16 FatalErrorCount; /* 0x14 */ - U16 Reserved4; /* 0x16 */ -} MPI26_CONFIG_PAGE_PCIELINK_1, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_1, - Mpi26PcieLinkPage1_t, MPI2_POINTER pMpi26PcieLinkPage1_t; - -#define MPI26_PCIELINK1_PAGEVERSION (0x00) - -/* PCIe Link Page 2 */ - -typedef struct _MPI26_PCIELINK2_LINK_EVENT -{ - U8 LinkEventCode; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 LinkEventInfo; /* 0x04 */ -} MPI26_PCIELINK2_LINK_EVENT, MPI2_POINTER PTR_MPI26_PCIELINK2_LINK_EVENT, - Mpi26PcieLink2LinkEvent_t, MPI2_POINTER pMpi26PcieLink2LinkEvent_t; - -/* use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */ - - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumLinkEvents at runtime. - */ -#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX -#define MPI26_PCIELINK2_LINK_EVENT_MAX (1) -#endif - -typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 Link; /* 0x08 */ - U8 Reserved1; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U8 NumLinkEvents; /* 0x0C */ - U8 Reserved3; /* 0x0D */ - U16 Reserved4; /* 0x0E */ - MPI26_PCIELINK2_LINK_EVENT LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /* 0x10 */ -} MPI26_CONFIG_PAGE_PCIELINK_2, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_2, - Mpi26PcieLinkPage2_t, MPI2_POINTER pMpi26PcieLinkPage2_t; - -#define MPI26_PCIELINK2_PAGEVERSION (0x00) - - -/* PCIe Link Page 3 */ - -typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG -{ - U8 LinkEventCode; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U8 CounterType; /* 0x04 */ - U8 ThresholdWindow; /* 0x05 */ - U8 TimeUnits; /* 0x06 */ - U8 Reserved3; /* 0x07 */ - U32 EventThreshold; /* 0x08 */ - U16 ThresholdFlags; /* 0x0C */ - U16 Reserved4; /* 0x0E */ -} MPI26_PCIELINK3_LINK_EVENT_CONFIG, MPI2_POINTER PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG, - Mpi26PcieLink3LinkEventConfig_t, MPI2_POINTER pMpi26PcieLink3LinkEventConfig_t; - -/* values for LinkEventCode field */ -#define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00) -#define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01) -#define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02) -#define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03) -#define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04) -#define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05) -#define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06) -#define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07) -#define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08) -#define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09) -#define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A) -#define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B) -#define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C) -#define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D) -#define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E) -#define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F) -#define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10) -#define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11) -#define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12) - -/* values for the CounterType field */ -#define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00) -#define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01) -#define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02) - -/* values for the TimeUnits field */ -#define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00) -#define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01) -#define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02) -#define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03) - -/* values for the ThresholdFlags field */ -#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001) - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check the value returned for NumLinkEvents at runtime. - */ -#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX -#define MPI26_PCIELINK3_LINK_EVENT_MAX (1) -#endif - -typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 -{ - MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ - U8 Link; /* 0x08 */ - U8 Reserved1; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U8 NumLinkEvents; /* 0x0C */ - U8 Reserved3; /* 0x0D */ - U16 Reserved4; /* 0x0E */ - MPI26_PCIELINK3_LINK_EVENT_CONFIG LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /* 0x10 */ -} MPI26_CONFIG_PAGE_PCIELINK_3, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_3, - Mpi26PcieLinkPage3_t, MPI2_POINTER pMpi26PcieLinkPage3_t; - -#define MPI26_PCIELINK3_PAGEVERSION (0x00) - - -#endif - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_cnfg.h + * Title: MPI Configuration messages and pages + * Creation Date: November 10, 2006 + * + * mpi2_cnfg.h Version: 02.00.48 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for + * MPI2_CONFIG_PAGE_MAN_7. + * Added EnclosureLevel and ConnectorName fields to + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added EnclosureLevel field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and + * more defines for the BiosOptions field. + * 11-18-14 02.00.30 Updated copyright information. + * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. + * Added AdapterOrderAux fields to BIOS Page 3. + * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added BoardPowerRequirement, PCISlotPowerAllocation, and + * Flags field to IO Unit Page 7. + * Added IO Unit Page 11. + * Added new SAS Phy Event codes + * Added PCIe configuration pages. + * 03-19-15 02.00.32 Fixed PCIe Link Config page structure names to be + * unique in first 32 characters. + * 05-25-15 02.00.33 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 08-25-15 02.00.34 Added PCIe Device Page 2 SGL format capability. + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. + * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. + * Added Link field to PCIe Link Pages + * Added EnclosureLevel and ConnectorName to PCIe + * Device Page 0. + * Added define for PCIE IoUnit page 1 max rate shift. + * Added comment for reserved ExtPageTypes. + * Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * Added NegotiatedLinkRate and NegotiatedPortWidth to + * PCIe device page 0. + * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines + * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. + * Changed declaration of ConnectorName in PCIe DevicePage0 + * to match SAS DevicePage 0. + * Added SATADeviceWaitTime to IO Unit Page 11. + * Added MPI26_MFGPAGE_DEVID_SAS4008 + * Added x16 PCIe width to IO Unit Page 7 + * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 + * phy data. + * Added InitStatus to PCIe IO Unit Page 1 header. + * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. + * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and + * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. + * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. + * Added ChassisSlot field to SAS Enclosure Page 0. + * Added ChassisSlot Valid bit (bit 5) to the Flags field + * in SAS Enclosure Page 0. + * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and + * MPI26_MFGPAGE_DEVID_SAS3916 defines. + * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. + * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. + * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to + * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. + * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. + * Added NOIOB field to PCIe Device Page 2. + * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to + * the Capabilities field of PCIe Device Page 2. + * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816. + * Added WRiteCache defines to IO Unit Page 1. + * Added MaxEnclosureLevel to BIOS Page 1. + * Added OEMRD to SAS Enclosure Page 1. + * Added DMDReportPCIe to PCIe IO Unit Page 1. + * Added Flags field and flags for Retimers to + * PCIe Switch Page 1. + * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. + * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 + * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 + * Added DMDReport Delay Time defines to PCIeIOUnitPage1 + * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. + * 06-24-19 02.00.48 Add DEVID for PCI Switch and MPI endpoints. + * Add Man7 flag for Connector Lane. + * Modify NVMe WriteCache values. + * Add ConfigSource to PCIeIOUnit0. + * Add various PCI id info to PCIeDevice2. + * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID + * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT + * Correct def of MPI26_PCIEIOUNIT1_CF_BP_AUTO_CLOCK_ENABLE + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_CNFG_H +#define MPI2_CNFG_H + +/***************************************************************************** +* Configuration Page Header and defines +*****************************************************************************/ + +/* Config Page Header */ +typedef struct _MPI2_CONFIG_PAGE_HEADER +{ + U8 PageVersion; /* 0x00 */ + U8 PageLength; /* 0x01 */ + U8 PageNumber; /* 0x02 */ + U8 PageType; /* 0x03 */ +} MPI2_CONFIG_PAGE_HEADER, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER, + Mpi2ConfigPageHeader_t, MPI2_POINTER pMpi2ConfigPageHeader_t; + +typedef union _MPI2_CONFIG_PAGE_HEADER_UNION +{ + MPI2_CONFIG_PAGE_HEADER Struct; + U8 Bytes[4]; + U16 Word16[2]; + U32 Word32; +} MPI2_CONFIG_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER_UNION, + Mpi2ConfigPageHeaderUnion, MPI2_POINTER pMpi2ConfigPageHeaderUnion; + +/* Extended Config Page Header */ +typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER +{ + U8 PageVersion; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 PageNumber; /* 0x02 */ + U8 PageType; /* 0x03 */ + U16 ExtPageLength; /* 0x04 */ + U8 ExtPageType; /* 0x06 */ + U8 Reserved2; /* 0x07 */ +} MPI2_CONFIG_EXTENDED_PAGE_HEADER, + MPI2_POINTER PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, + Mpi2ConfigExtendedPageHeader_t, MPI2_POINTER pMpi2ConfigExtendedPageHeader_t; + +typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION +{ + MPI2_CONFIG_PAGE_HEADER Struct; + MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; + U8 Bytes[8]; + U16 Word16[4]; + U32 Word32[2]; +} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + Mpi2ConfigPageExtendedHeaderUnion, MPI2_POINTER pMpi2ConfigPageExtendedHeaderUnion; + + +/* PageType field values */ +#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00) +#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10) +#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI2_CONFIG_PAGEATTR_MASK (0xF0) + +#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define MPI2_CONFIG_PAGETYPE_IOC (0x01) +#define MPI2_CONFIG_PAGETYPE_BIOS (0x02) +#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define MPI2_CONFIG_PAGETYPE_MASK (0x0F) + +#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF) + + +/* ExtPageType field values */ +#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14) +#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) +#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B) /* MPI v2.6 and later */ +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C) /* MPI v2.6 and later */ +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D) /* MPI v2.6 and later */ +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E) /* MPI v2.6 and later */ +/* Product specific reserved values 0xE0 - 0xEF */ +/* Vendor specific reserved values 0xF0 - 0xFF */ + + +/***************************************************************************** +* PageAddress defines +*****************************************************************************/ + +/* RAID Volume PageAddress format */ +#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF) + + +/* RAID Physical Disk PageAddress format */ +#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000) +#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) +#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000) + +#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) +#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF) + + +/* SAS Expander PageAddress format */ +#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) + + +/* SAS Device PageAddress format */ +#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + + +/* SAS PHY PageAddress format */ +#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000) + +#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF) +#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF) + + +/* SAS Port PageAddress format */ +#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) +#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) + +#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF) + + +/* SAS Enclosure PageAddress format */ +#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + +/* Enclosure PageAddress format */ +#define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + +/* RAID Configuration PageAddress format */ +#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) +#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000) +#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000) + +#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF) + + +/* Driver Persistent Mapping PageAddress format */ +#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000) +#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000) + +#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000) +#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16) +#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) + + +/* Ethernet PageAddress format */ +#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) +#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) + +#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) + + +/* PCIe Switch PageAddress format */ +#define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000) +#define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000) +#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16) + + +/* PCIe Device PageAddress format */ +#define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + +/* PCIe Link PageAddress format */ +#define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000) +#define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000) + +#define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF) + + + +/**************************************************************************** +* Configuration messages +****************************************************************************/ + +/* Configuration Request Message */ +typedef struct _MPI2_CONFIG_REQUEST +{ + U8 Action; /* 0x00 */ + U8 SGLFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 ExtPageLength; /* 0x04 */ + U8 ExtPageType; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U8 Reserved2; /* 0x0C */ + U8 ProxyVF_ID; /* 0x0D */ + U16 Reserved4; /* 0x0E */ + U32 Reserved3; /* 0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ + U32 PageAddress; /* 0x18 */ + MPI2_SGE_IO_UNION PageBufferSGE; /* 0x1C */ +} MPI2_CONFIG_REQUEST, MPI2_POINTER PTR_MPI2_CONFIG_REQUEST, + Mpi2ConfigRequest_t, MPI2_POINTER pMpi2ConfigRequest_t; + +/* values for the Action field */ +#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00) +#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) +#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05) +#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) +#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) + +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/* Config Reply Message */ +typedef struct _MPI2_CONFIG_REPLY +{ + U8 Action; /* 0x00 */ + U8 SGLFlags; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 ExtPageLength; /* 0x04 */ + U8 ExtPageType; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U16 Reserved2; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */ +} MPI2_CONFIG_REPLY, MPI2_POINTER PTR_MPI2_CONFIG_REPLY, + Mpi2ConfigReply_t, MPI2_POINTER pMpi2ConfigReply_t; + + + +/***************************************************************************** +* +* C o n f i g u r a t i o n P a g e s +* +*****************************************************************************/ + +/**************************************************************************** +* Manufacturing Config pages +****************************************************************************/ + +#define MPI2_MFGPAGE_VENDORID_LSI (0x1000) + +/* MPI v2.0 SAS products */ +#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070) +#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072) +#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074) +#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076) +#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) +#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) +#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) + +#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E) + +#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) +#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) +#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) +#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) +#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) +#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) +#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) +#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) +#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) + +/* MPI v2.5 SAS products */ +#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) +#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097) +#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090) +#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091) +#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094) +#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095) + +/* MPI v2.6 SAS Products */ +#define MPI26_MFGPAGE_DEVID_SAS3216 (0x00C9) +#define MPI26_MFGPAGE_DEVID_SAS3224 (0x00C4) +#define MPI26_MFGPAGE_DEVID_SAS3316_1 (0x00C5) +#define MPI26_MFGPAGE_DEVID_SAS3316_2 (0x00C6) +#define MPI26_MFGPAGE_DEVID_SAS3316_3 (0x00C7) +#define MPI26_MFGPAGE_DEVID_SAS3316_4 (0x00C8) +#define MPI26_MFGPAGE_DEVID_SAS3324_1 (0x00C0) +#define MPI26_MFGPAGE_DEVID_SAS3324_2 (0x00C1) +#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2) +#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3) + +#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA) +#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB) +#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC) +#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD) +#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE) +#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF) + +#define MPI26_MFGPAGE_DEVID_PEX88000 (0x00B2) + +#define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0) +#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1) +#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2) + +#define MPI26_MFGPAGE_DEVID_SEC_MASK_3916 (0x0003) +#define MPI26_MFGPAGE_DEVID_INVALID0_3916 (0x00E0) +#define MPI26_MFGPAGE_DEVID_CFG_SEC_3916 (0x00E1) +#define MPI26_MFGPAGE_DEVID_HARD_SEC_3916 (0x00E2) +#define MPI26_MFGPAGE_DEVID_INVALID1_3916 (0x00E3) + +#define MPI26_MFGPAGE_DEVID_SEC_MASK_3816 (0x0003) +#define MPI26_MFGPAGE_DEVID_INVALID0_3816 (0x00E4) +#define MPI26_MFGPAGE_DEVID_CFG_SEC_3816 (0x00E5) +#define MPI26_MFGPAGE_DEVID_HARD_SEC_3816 (0x00E6) +#define MPI26_MFGPAGE_DEVID_INVALID1_3816 (0x00E7) + +#define MPI26_MFGPAGE_DEVID_SWCH_MPI_EP (0x02B0) +#define MPI26_MFGPAGE_DEVID_SWCH_MPI_EP_1 (0x02B1) + + +/* Manufacturing Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 ChipName[16]; /* 0x04 */ + U8 ChipRevision[8]; /* 0x14 */ + U8 BoardName[16]; /* 0x1C */ + U8 BoardAssembly[16]; /* 0x2C */ + U8 BoardTracerNumber[16]; /* 0x3C */ +} MPI2_CONFIG_PAGE_MAN_0, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_0, + Mpi2ManufacturingPage0_t, MPI2_POINTER pMpi2ManufacturingPage0_t; + +#define MPI2_MANUFACTURING0_PAGEVERSION (0x00) + + +/* Manufacturing Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 VPD[256]; /* 0x04 */ +} MPI2_CONFIG_PAGE_MAN_1, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_1, + Mpi2ManufacturingPage1_t, MPI2_POINTER pMpi2ManufacturingPage1_t; + +#define MPI2_MANUFACTURING1_PAGEVERSION (0x00) + + +typedef struct _MPI2_CHIP_REVISION_ID +{ + U16 DeviceID; /* 0x00 */ + U8 PCIRevisionID; /* 0x02 */ + U8 Reserved; /* 0x03 */ +} MPI2_CHIP_REVISION_ID, MPI2_POINTER PTR_MPI2_CHIP_REVISION_ID, + Mpi2ChipRevisionId_t, MPI2_POINTER pMpi2ChipRevisionId_t; + + +/* Manufacturing Page 2 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS +#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_2 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */ + U32 HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 0x08 */ +} MPI2_CONFIG_PAGE_MAN_2, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_2, + Mpi2ManufacturingPage2_t, MPI2_POINTER pMpi2ManufacturingPage2_t; + +#define MPI2_MANUFACTURING2_PAGEVERSION (0x00) + + +/* Manufacturing Page 3 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_3_INFO_WORDS +#define MPI2_MAN_PAGE_3_INFO_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_3 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */ + U32 Info[MPI2_MAN_PAGE_3_INFO_WORDS];/* 0x08 */ +} MPI2_CONFIG_PAGE_MAN_3, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_3, + Mpi2ManufacturingPage3_t, MPI2_POINTER pMpi2ManufacturingPage3_t; + +#define MPI2_MANUFACTURING3_PAGEVERSION (0x00) + + +/* Manufacturing Page 4 */ + +typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS +{ + U8 PowerSaveFlags; /* 0x00 */ + U8 InternalOperationsSleepTime; /* 0x01 */ + U8 InternalOperationsRunTime; /* 0x02 */ + U8 HostIdleTime; /* 0x03 */ +} MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + MPI2_POINTER PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + Mpi2ManPage4PwrSaveSettings_t, MPI2_POINTER pMpi2ManPage4PwrSaveSettings_t; + +/* defines for the PowerSaveFlags field */ +#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03) +#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00) +#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01) +#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02) + +typedef struct _MPI2_CONFIG_PAGE_MAN_4 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Flags; /* 0x08 */ + U8 InquirySize; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + U8 InquiryData[56]; /* 0x10 */ + U32 RAID0VolumeSettings; /* 0x48 */ + U32 RAID1EVolumeSettings; /* 0x4C */ + U32 RAID1VolumeSettings; /* 0x50 */ + U32 RAID10VolumeSettings; /* 0x54 */ + U32 Reserved4; /* 0x58 */ + U32 Reserved5; /* 0x5C */ + MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /* 0x60 */ + U8 MaxOCEDisks; /* 0x64 */ + U8 ResyncRate; /* 0x65 */ + U16 DataScrubDuration; /* 0x66 */ + U8 MaxHotSpares; /* 0x68 */ + U8 MaxPhysDisksPerVol; /* 0x69 */ + U8 MaxPhysDisks; /* 0x6A */ + U8 MaxVolumes; /* 0x6B */ +} MPI2_CONFIG_PAGE_MAN_4, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_4, + Mpi2ManufacturingPage4_t, MPI2_POINTER pMpi2ManufacturingPage4_t; + +#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A) + +/* Manufacturing Page 4 Flags field */ +#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000) +#define MPI2_MANPAGE4_METADATA_512MB (0x00000000) + +#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000) +#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000) +#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000) + +#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00) +#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000) +#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400) +#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800) +#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00) + +#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300) +#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000) +#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100) +#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200) + +#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080) +#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040) +#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020) +#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010) +#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008) +#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004) +#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002) +#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001) + + +/* Manufacturing Page 5 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES +#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_MANUFACTURING5_ENTRY +{ + U64 WWID; /* 0x00 */ + U64 DeviceName; /* 0x08 */ +} MPI2_MANUFACTURING5_ENTRY, MPI2_POINTER PTR_MPI2_MANUFACTURING5_ENTRY, + Mpi2Manufacturing5Entry_t, MPI2_POINTER pMpi2Manufacturing5Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_MAN_5 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhys; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ + MPI2_MANUFACTURING5_ENTRY Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/* 0x08 */ +} MPI2_CONFIG_PAGE_MAN_5, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_5, + Mpi2ManufacturingPage5_t, MPI2_POINTER pMpi2ManufacturingPage5_t; + +#define MPI2_MANUFACTURING5_PAGEVERSION (0x03) + + +/* Manufacturing Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_6 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 ProductSpecificInfo;/* 0x04 */ +} MPI2_CONFIG_PAGE_MAN_6, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_6, + Mpi2ManufacturingPage6_t, MPI2_POINTER pMpi2ManufacturingPage6_t; + +#define MPI2_MANUFACTURING6_PAGEVERSION (0x00) + + +/* Manufacturing Page 7 */ + +typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO +{ + U32 Pinout; /* 0x00 */ + U8 Connector[16]; /* 0x04 */ + U8 Location; /* 0x14 */ + U8 ReceptacleID; /* 0x15 */ + U16 Slot; /* 0x16 */ + U16 Slotx2; /* 0x18 */ + U16 Slotx4; /* 0x1A */ +} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO, + Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t; + +/* defines for the Pinout field */ +#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) +#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) + +#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF) +#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00) +#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01) +#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02) +#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03) +#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04) +#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07) +#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08) +#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) +#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) +#define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F) +#define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10) +#define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11) +#define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12) +#define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13) + +/* defines for the Location field */ +#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) +#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02) +#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04) +#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08) +#define MPI2_MANPAGE7_LOCATION_AUTO (0x10) +#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) +#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) + +/* defines for the Slot field */ +#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX +#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_7 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U32 Flags; /* 0x0C */ + U8 EnclosureName[16]; /* 0x10 */ + U8 NumPhys; /* 0x20 */ + U8 Reserved3; /* 0x21 */ + U16 Reserved4; /* 0x22 */ + MPI2_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /* 0x24 */ +} MPI2_CONFIG_PAGE_MAN_7, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7, + Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t; + +#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) + +/* defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) +#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) +#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) + +#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020) +#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID (0x00000010) + +/* + * Generic structure to use for product-specific manufacturing pages + * (currently Manufacturing Page 8 through Manufacturing Page 31). + */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_PS +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 ProductSpecificInfo;/* 0x04 */ +} MPI2_CONFIG_PAGE_MAN_PS, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_PS, + Mpi2ManufacturingPagePS_t, MPI2_POINTER pMpi2ManufacturingPagePS_t; + +#define MPI2_MANUFACTURING8_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING9_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING10_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING11_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING12_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING13_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING14_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING15_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING16_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING17_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING18_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING19_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING20_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING21_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING22_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING23_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING24_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING25_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING26_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING27_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING28_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING29_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING30_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING31_PAGEVERSION (0x00) + + +/**************************************************************************** +* IO Unit Config Pages +****************************************************************************/ + +/* IO Unit Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U64 UniqueValue; /* 0x04 */ + MPI2_VERSION_UNION NvdataVersionDefault; /* 0x08 */ + MPI2_VERSION_UNION NvdataVersionPersistent; /* 0x0A */ +} MPI2_CONFIG_PAGE_IO_UNIT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, + Mpi2IOUnitPage0_t, MPI2_POINTER pMpi2IOUnitPage0_t; + +#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02) + + +/* IO Unit Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Flags; /* 0x04 */ +} MPI2_CONFIG_PAGE_IO_UNIT_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, + Mpi2IOUnitPage1_t, MPI2_POINTER pMpi2IOUnitPage1_t; + +#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) + +/* IO Unit Page 1 Flags defines */ +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT (16) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000) +#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) +#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) +#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) +#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) +#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) +#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) +#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) +#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) +#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) +#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) +#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) +#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) + + +/* IO Unit Page 3 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for GPIOCount at runtime. + */ +#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX +#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 GPIOCount; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U16 GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/* 0x08 */ +} MPI2_CONFIG_PAGE_IO_UNIT_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, + Mpi2IOUnitPage3_t, MPI2_POINTER pMpi2IOUnitPage3_t; + +#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01) + +/* defines for IO Unit Page 3 GPIOVal field */ +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC) +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) + + +/* IO Unit Page 5 */ + +/* + * Upper layer code (drivers, utilities, etc.) should leave this define set to + * one and check the value returned for NumDmaEngines at runtime. + */ +#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES +#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U64 RaidAcceleratorBufferBaseAddress; /* 0x04 */ + U64 RaidAcceleratorBufferSize; /* 0x0C */ + U64 RaidAcceleratorControlBaseAddress; /* 0x14 */ + U8 RAControlSize; /* 0x1C */ + U8 NumDmaEngines; /* 0x1D */ + U8 RAMinControlSize; /* 0x1E */ + U8 RAMaxControlSize; /* 0x1F */ + U32 Reserved1; /* 0x20 */ + U32 Reserved2; /* 0x24 */ + U32 Reserved3; /* 0x28 */ + U32 DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /* 0x2C */ +} MPI2_CONFIG_PAGE_IO_UNIT_5, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, + Mpi2IOUnitPage5_t, MPI2_POINTER pMpi2IOUnitPage5_t; + +#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) + +/* defines for IO Unit Page 5 DmaEngineCapabilities field */ +#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) +#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) + +#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) +#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004) +#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002) +#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001) + + +/* IO Unit Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 Flags; /* 0x04 */ + U8 RAHostControlSize; /* 0x06 */ + U8 Reserved0; /* 0x07 */ + U64 RaidAcceleratorHostControlBaseAddress; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ + U32 Reserved3; /* 0x18 */ +} MPI2_CONFIG_PAGE_IO_UNIT_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, + Mpi2IOUnitPage6_t, MPI2_POINTER pMpi2IOUnitPage6_t; + +#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00) + +/* defines for IO Unit Page 6 Flags field */ +#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) + + +/* IO Unit Page 7 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 CurrentPowerMode; /* 0x04 */ /* reserved in MPI 2.0 */ + U8 PreviousPowerMode; /* 0x05 */ /* reserved in MPI 2.0 */ + U8 PCIeWidth; /* 0x06 */ + U8 PCIeSpeed; /* 0x07 */ + U32 ProcessorState; /* 0x08 */ + U32 PowerManagementCapabilities; /* 0x0C */ + U16 IOCTemperature; /* 0x10 */ + U8 IOCTemperatureUnits; /* 0x12 */ + U8 IOCSpeed; /* 0x13 */ + U16 BoardTemperature; /* 0x14 */ + U8 BoardTemperatureUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ + U32 BoardPowerRequirement; /* 0x18 */ /* reserved prior to MPI v2.6 */ + U32 PCISlotPowerAllocation; /* 0x1C */ /* reserved prior to MPI v2.6 */ + U8 Flags; /* 0x20 */ /* reserved prior to MPI v2.6 */ + U8 Reserved6; /* 0x21 */ + U16 Reserved7; /* 0x22 */ + U32 Reserved8; /* 0x24 */ +} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, + Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; + +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) + +/* defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ +#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) +#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40) +#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80) +#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0) + +#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07) +#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01) +#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04) +#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05) +#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06) + + +/* defines for IO Unit Page 7 PCIeWidth field */ +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10) + +/* defines for IO Unit Page 7 PCIeSpeed field */ +#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03) + +/* defines for IO Unit Page 7 ProcessorState field */ +#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) +#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) + +#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) +#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) + +/* defines for IO Unit Page 7 PowerManagementCapabilities field */ +#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100) +#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040) +#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020) +#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) /* obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) /* obsolete */ +#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) /* obsolete */ +#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) /* obsolete */ + +/* obsolete names for the PowerManagementCapabilities bits (above) */ +#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */ + + +/* defines for IO Unit Page 7 IOCTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) + +/* defines for IO Unit Page 7 IOCSpeed field */ +#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) +#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) +#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) +#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) + +/* defines for IO Unit Page 7 BoardTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) + +/* defines for IO Unit Page 7 Flags field */ +#define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) + + +/* IO Unit Page 8 */ + +#define MPI2_IOUNIT8_NUM_THRESHOLDS (4) + +typedef struct _MPI2_IOUNIT8_SENSOR +{ + U16 Flags; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U16 Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */ + U32 Reserved2; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U32 Reserved4; /* 0x14 */ +} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR, + Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t; + +/* defines for IO Unit Page 8 Sensor Flags field */ +#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U8 NumSensors; /* 0x0C */ + U8 PollingInterval; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_IOUNIT8_SENSOR Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, + Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t; + +#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) + + +/* IO Unit Page 9 */ + +typedef struct _MPI2_IOUNIT9_SENSOR +{ + U16 CurrentTemperature; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U8 Flags; /* 0x04 */ + U8 Reserved2; /* 0x05 */ + U16 Reserved3; /* 0x06 */ + U32 Reserved4; /* 0x08 */ + U32 Reserved5; /* 0x0C */ +} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR, + Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t; + +/* defines for IO Unit Page 9 Sensor Flags field */ +#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U8 NumSensors; /* 0x0C */ + U8 Reserved4; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_IOUNIT9_SENSOR Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, + Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t; + +#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) + + +/* IO Unit Page 10 */ + +typedef struct _MPI2_IOUNIT10_FUNCTION +{ + U8 CreditPercent; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ +} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION, + Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t; + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumFunctions at runtime. + */ +#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES +#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumFunctions; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ + MPI2_IOUNIT10_FUNCTION Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES]; /* 0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, + Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t; + +#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) + + +/* IO Unit Page 11 (for MPI v2.6 and later) */ + +typedef struct _MPI26_IOUNIT11_SPINUP_GROUP +{ + U8 MaxTargetSpinup; /* 0x00 */ + U8 SpinupDelay; /* 0x01 */ + U8 SpinupFlags; /* 0x02 */ + U8 Reserved1; /* 0x03 */ +} MPI26_IOUNIT11_SPINUP_GROUP, MPI2_POINTER PTR_MPI26_IOUNIT11_SPINUP_GROUP, + Mpi26IOUnit11SpinupGroup_t, MPI2_POINTER pMpi26IOUnit11SpinupGroup_t; + +/* defines for IO Unit Page 11 SpinupFlags */ +#define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01) + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * four and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_IOUNITPAGE11_PHY_MAX +#define MPI26_IOUNITPAGE11_PHY_MAX (4) +#endif + +typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */ + U32 Reserved2; /* 0x18 */ + U32 Reserved3; /* 0x1C */ + U32 Reserved4; /* 0x20 */ + U8 BootDeviceWaitTime; /* 0x24 */ + U8 SATADeviceWaitTime; /* 0x25 */ + U8 PCIeWaitTime; /* 0x26 */ + U8 Reserved6; /* 0x27 */ + U8 NumPhys; /* 0x28 */ + U8 PEInitialSpinupDelay; /* 0x29 */ + U8 PEReplyDelay; /* 0x2A */ + U8 Flags; /* 0x2B */ + U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/* 0x2C */ +} MPI26_CONFIG_PAGE_IO_UNIT_11, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_IO_UNIT_11, + Mpi26IOUnitPage11_t, MPI2_POINTER pMpi26IOUnitPage11_t; + +#define MPI26_IOUNITPAGE11_PAGEVERSION (0x00) + +/* defines for Flags field */ +#define MPI26_IOUNITPAGE11_FLAGS_AUTO_PORTENABLE (0x01) + +/* defines for PHY field */ +#define MPI26_IOUNITPAGE11_PHY_SPINUP_GROUP_MASK (0x03) + + + +/**************************************************************************** +* IOC Config Pages +****************************************************************************/ + +/* IOC Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U16 VendorID; /* 0x0C */ + U16 DeviceID; /* 0x0E */ + U8 RevisionID; /* 0x10 */ + U8 Reserved3; /* 0x11 */ + U16 Reserved4; /* 0x12 */ + U32 ClassCode; /* 0x14 */ + U16 SubsystemVendorID; /* 0x18 */ + U16 SubsystemID; /* 0x1A */ +} MPI2_CONFIG_PAGE_IOC_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_0, + Mpi2IOCPage0_t, MPI2_POINTER pMpi2IOCPage0_t; + +#define MPI2_IOCPAGE0_PAGEVERSION (0x02) + + +/* IOC Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Flags; /* 0x04 */ + U32 CoalescingTimeout; /* 0x08 */ + U8 CoalescingDepth; /* 0x0C */ + U8 PCISlotNum; /* 0x0D */ + U8 PCIBusNum; /* 0x0E */ + U8 PCIDomainSegment; /* 0x0F */ + U32 Reserved1; /* 0x10 */ + U32 ProductSpecific; /* 0x14 */ +} MPI2_CONFIG_PAGE_IOC_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_1, + Mpi2IOCPage1_t, MPI2_POINTER pMpi2IOCPage1_t; + +#define MPI2_IOCPAGE1_PAGEVERSION (0x05) + +/* defines for IOC Page 1 Flags field */ +#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001) + +#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF) + +/* IOC Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_6 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 CapabilitiesFlags; /* 0x04 */ + U8 MaxDrivesRAID0; /* 0x08 */ + U8 MaxDrivesRAID1; /* 0x09 */ + U8 MaxDrivesRAID1E; /* 0x0A */ + U8 MaxDrivesRAID10; /* 0x0B */ + U8 MinDrivesRAID0; /* 0x0C */ + U8 MinDrivesRAID1; /* 0x0D */ + U8 MinDrivesRAID1E; /* 0x0E */ + U8 MinDrivesRAID10; /* 0x0F */ + U32 Reserved1; /* 0x10 */ + U8 MaxGlobalHotSpares; /* 0x14 */ + U8 MaxPhysDisks; /* 0x15 */ + U8 MaxVolumes; /* 0x16 */ + U8 MaxConfigs; /* 0x17 */ + U8 MaxOCEDisks; /* 0x18 */ + U8 Reserved2; /* 0x19 */ + U16 Reserved3; /* 0x1A */ + U32 SupportedStripeSizeMapRAID0; /* 0x1C */ + U32 SupportedStripeSizeMapRAID1E; /* 0x20 */ + U32 SupportedStripeSizeMapRAID10; /* 0x24 */ + U32 Reserved4; /* 0x28 */ + U32 Reserved5; /* 0x2C */ + U16 DefaultMetadataSize; /* 0x30 */ + U16 Reserved6; /* 0x32 */ + U16 MaxBadBlockTableEntries; /* 0x34 */ + U16 Reserved7; /* 0x36 */ + U32 IRNvsramVersion; /* 0x38 */ +} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6, + Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t; + +#define MPI2_IOCPAGE6_PAGEVERSION (0x05) + +/* defines for IOC Page 6 CapabilitiesFlags */ +#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002) +#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) + + +/* IOC Page 7 */ + +#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4) + +typedef struct _MPI2_CONFIG_PAGE_IOC_7 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */ + U16 SASBroadcastPrimitiveMasks; /* 0x18 */ + U16 SASNotifyPrimitiveMasks; /* 0x1A */ + U32 Reserved3; /* 0x1C */ +} MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7, + Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t; + +#define MPI2_IOCPAGE7_PAGEVERSION (0x02) + + +/* IOC Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_8 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumDevsPerEnclosure; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U16 MaxPersistentEntries; /* 0x08 */ + U16 MaxNumPhysicalMappedIDs; /* 0x0A */ + U16 Flags; /* 0x0C */ + U16 Reserved3; /* 0x0E */ + U16 IRVolumeMappingFlags; /* 0x10 */ + U16 Reserved4; /* 0x12 */ + U32 Reserved5; /* 0x14 */ +} MPI2_CONFIG_PAGE_IOC_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_8, + Mpi2IOCPage8_t, MPI2_POINTER pMpi2IOCPage8_t; + +#define MPI2_IOCPAGE8_PAGEVERSION (0x00) + +/* defines for IOC Page 8 Flags field */ +#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020) +#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010) + +#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E) +#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002) + +#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001) +#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000) + +/* defines for IOC Page 8 IRVolumeMappingFlags */ +#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001) + + +/**************************************************************************** +* BIOS Config Pages +****************************************************************************/ + +/* BIOS Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_BIOS_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 BiosOptions; /* 0x04 */ + U32 IOCSettings; /* 0x08 */ + U8 SSUTimeout; /* 0x0C */ + U8 MaxEnclosureLevel; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U32 DeviceSettings; /* 0x10 */ + U16 NumberOfDevices; /* 0x14 */ + U16 UEFIVersion; /* 0x16 */ + U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */ + U16 IOTimeoutSequential; /* 0x1A */ + U16 IOTimeoutOther; /* 0x1C */ + U16 IOTimeoutBlockDevicesRM; /* 0x1E */ +} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, + Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; + +#define MPI2_BIOSPAGE1_PAGEVERSION (0x07) + +/* values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000) +#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000) + +#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) + +#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) +#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) +#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) +#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) +#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) + +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) + +/* values for BIOS Page 1 IOCSettings field */ +#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) +#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0) +#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040) +#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030) +#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010) +#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020) +#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030) + +#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008) + +/* values for BIOS Page 1 DeviceSettings field */ +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) + +/* defines for BIOS Page 1 UEFIVersion field */ +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00) +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0) + + + +/* BIOS Page 2 */ + +typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER +{ + U32 Reserved1; /* 0x00 */ + U32 Reserved2; /* 0x04 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ + U32 Reserved5; /* 0x10 */ + U32 Reserved6; /* 0x14 */ +} MPI2_BOOT_DEVICE_ADAPTER_ORDER, + MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, + Mpi2BootDeviceAdapterOrder_t, MPI2_POINTER pMpi2BootDeviceAdapterOrder_t; + +typedef struct _MPI2_BOOT_DEVICE_SAS_WWID +{ + U64 SASAddress; /* 0x00 */ + U8 LUN[8]; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ +} MPI2_BOOT_DEVICE_SAS_WWID, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_SAS_WWID, + Mpi2BootDeviceSasWwid_t, MPI2_POINTER pMpi2BootDeviceSasWwid_t; + +typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT +{ + U64 EnclosureLogicalID; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + U16 SlotNumber; /* 0x10 */ + U16 Reserved3; /* 0x12 */ + U32 Reserved4; /* 0x14 */ +} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + Mpi2BootDeviceEnclosureSlot_t, MPI2_POINTER pMpi2BootDeviceEnclosureSlot_t; + +typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME +{ + U64 DeviceName; /* 0x00 */ + U8 LUN[8]; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ +} MPI2_BOOT_DEVICE_DEVICE_NAME, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, + Mpi2BootDeviceDeviceName_t, MPI2_POINTER pMpi2BootDeviceDeviceName_t; + +typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE +{ + MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + MPI2_BOOT_DEVICE_SAS_WWID SasWwid; + MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; +} MPI2_BIOSPAGE2_BOOT_DEVICE, MPI2_POINTER PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, + Mpi2BiosPage2BootDevice_t, MPI2_POINTER pMpi2BiosPage2BootDevice_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_2 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U32 Reserved3; /* 0x0C */ + U32 Reserved4; /* 0x10 */ + U32 Reserved5; /* 0x14 */ + U32 Reserved6; /* 0x18 */ + U8 ReqBootDeviceForm; /* 0x1C */ + U8 Reserved7; /* 0x1D */ + U16 Reserved8; /* 0x1E */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /* 0x20 */ + U8 ReqAltBootDeviceForm; /* 0x38 */ + U8 Reserved9; /* 0x39 */ + U16 Reserved10; /* 0x3A */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /* 0x3C */ + U8 CurrentBootDeviceForm; /* 0x58 */ + U8 Reserved11; /* 0x59 */ + U16 Reserved12; /* 0x5A */ + MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /* 0x58 */ +} MPI2_CONFIG_PAGE_BIOS_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_2, + Mpi2BiosPage2_t, MPI2_POINTER pMpi2BiosPage2_t; + +#define MPI2_BIOSPAGE2_PAGEVERSION (0x04) + +/* values for BIOS Page 2 BootDeviceForm fields */ +#define MPI2_BIOSPAGE2_FORM_MASK (0x0F) +#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + + +/* BIOS Page 3 */ + +#define MPI2_BIOSPAGE3_NUM_ADAPTER (4) + +typedef struct _MPI2_ADAPTER_INFO +{ + U8 PciBusNumber; /* 0x00 */ + U8 PciDeviceAndFunctionNumber; /* 0x01 */ + U16 AdapterFlags; /* 0x02 */ +} MPI2_ADAPTER_INFO, MPI2_POINTER PTR_MPI2_ADAPTER_INFO, + Mpi2AdapterInfo_t, MPI2_POINTER pMpi2AdapterInfo_t; + +#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) +#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) + +typedef struct _MPI2_ADAPTER_ORDER_AUX +{ + U64 WWID; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_ADAPTER_ORDER_AUX, MPI2_POINTER PTR_MPI2_ADAPTER_ORDER_AUX, + Mpi2AdapterOrderAux_t, MPI2_POINTER pMpi2AdapterOrderAux_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_3 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U32 GlobalFlags; /* 0x04 */ + U32 BiosVersion; /* 0x08 */ + MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER]; /* 0x0C */ + U32 Reserved1; /* 0x1C */ + MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER]; /* 0x20 */ /* MPI v2.5 and newer */ +} MPI2_CONFIG_PAGE_BIOS_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_3, + Mpi2BiosPage3_t, MPI2_POINTER pMpi2BiosPage3_t; + +#define MPI2_BIOSPAGE3_PAGEVERSION (0x01) + +/* values for BIOS Page 3 GlobalFlags */ +#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) +#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004) +#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010) + +#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0) +#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040) + + +/* BIOS Page 4 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES +#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_BIOS4_ENTRY +{ + U64 ReassignmentWWID; /* 0x00 */ + U64 ReassignmentDeviceName; /* 0x08 */ +} MPI2_BIOS4_ENTRY, MPI2_POINTER PTR_MPI2_BIOS4_ENTRY, + Mpi2MBios4Entry_t, MPI2_POINTER pMpi2Bios4Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_4 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhys; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + MPI2_BIOS4_ENTRY Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /* 0x08 */ +} MPI2_CONFIG_PAGE_BIOS_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_4, + Mpi2BiosPage4_t, MPI2_POINTER pMpi2BiosPage4_t; + +#define MPI2_BIOSPAGE4_PAGEVERSION (0x01) + + +/**************************************************************************** +* RAID Volume Config Pages +****************************************************************************/ + +/* RAID Volume Page 0 */ + +typedef struct _MPI2_RAIDVOL0_PHYS_DISK +{ + U8 RAIDSetNum; /* 0x00 */ + U8 PhysDiskMap; /* 0x01 */ + U8 PhysDiskNum; /* 0x02 */ + U8 Reserved; /* 0x03 */ +} MPI2_RAIDVOL0_PHYS_DISK, MPI2_POINTER PTR_MPI2_RAIDVOL0_PHYS_DISK, + Mpi2RaidVol0PhysDisk_t, MPI2_POINTER pMpi2RaidVol0PhysDisk_t; + +/* defines for the PhysDiskMap field */ +#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAIDVOL0_SETTINGS +{ + U16 Settings; /* 0x00 */ + U8 HotSparePool; /* 0x01 */ + U8 Reserved; /* 0x02 */ +} MPI2_RAIDVOL0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDVOL0_SETTINGS, + Mpi2RaidVol0Settings_t, MPI2_POINTER pMpi2RaidVol0Settings_t; + +/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01) +#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02) +#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04) +#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08) +#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10) +#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20) +#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40) +#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80) + +/* RAID Volume Page 0 VolumeSettings defines */ +#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008) +#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004) + +#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003) +#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000) +#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001) +#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhysDisks at runtime. + */ +#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX +#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x04 */ + U8 VolumeState; /* 0x06 */ + U8 VolumeType; /* 0x07 */ + U32 VolumeStatusFlags; /* 0x08 */ + MPI2_RAIDVOL0_SETTINGS VolumeSettings; /* 0x0C */ + U64 MaxLBA; /* 0x10 */ + U32 StripeSize; /* 0x18 */ + U16 BlockSize; /* 0x1C */ + U16 Reserved1; /* 0x1E */ + U8 SupportedPhysDisks; /* 0x20 */ + U8 ResyncRate; /* 0x21 */ + U16 DataScrubDuration; /* 0x22 */ + U8 NumPhysDisks; /* 0x24 */ + U8 Reserved2; /* 0x25 */ + U8 Reserved3; /* 0x26 */ + U8 InactiveStatus; /* 0x27 */ + MPI2_RAIDVOL0_PHYS_DISK PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /* 0x28 */ +} MPI2_CONFIG_PAGE_RAID_VOL_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, + Mpi2RaidVolPage0_t, MPI2_POINTER pMpi2RaidVolPage0_t; + +#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A) + +/* values for RAID VolumeState */ +#define MPI2_RAID_VOL_STATE_MISSING (0x00) +#define MPI2_RAID_VOL_STATE_FAILED (0x01) +#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02) +#define MPI2_RAID_VOL_STATE_ONLINE (0x03) +#define MPI2_RAID_VOL_STATE_DEGRADED (0x04) +#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05) + +/* values for RAID VolumeType */ +#define MPI2_RAID_VOL_TYPE_RAID0 (0x00) +#define MPI2_RAID_VOL_TYPE_RAID1E (0x01) +#define MPI2_RAID_VOL_TYPE_RAID1 (0x02) +#define MPI2_RAID_VOL_TYPE_RAID10 (0x05) +#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF) + +/* values for RAID Volume Page 0 VolumeStatusFlags field */ +#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000) +#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000) +#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) +#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080) +#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) +#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010) +#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004) +#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001) + +/* values for RAID Volume Page 0 SupportedPhysDisks field */ +#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08) +#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04) +#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02) +#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01) + +/* values for RAID Volume Page 0 InactiveStatus field */ +#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) +#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01) +#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03) +#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05) +#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06) + + +/* RAID Volume Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x04 */ + U16 Reserved0; /* 0x06 */ + U8 GUID[24]; /* 0x08 */ + U8 Name[16]; /* 0x20 */ + U64 WWID; /* 0x30 */ + U32 Reserved1; /* 0x38 */ + U32 Reserved2; /* 0x3C */ +} MPI2_CONFIG_PAGE_RAID_VOL_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, + Mpi2RaidVolPage1_t, MPI2_POINTER pMpi2RaidVolPage1_t; + +#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03) + + +/**************************************************************************** +* RAID Physical Disk Config Pages +****************************************************************************/ + +/* RAID Physical Disk Page 0 */ + +typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS +{ + U16 Reserved1; /* 0x00 */ + U8 HotSparePool; /* 0x02 */ + U8 Reserved2; /* 0x03 */ +} MPI2_RAIDPHYSDISK0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_SETTINGS, + Mpi2RaidPhysDisk0Settings_t, MPI2_POINTER pMpi2RaidPhysDisk0Settings_t; + +/* use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ + +typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA +{ + U8 VendorID[8]; /* 0x00 */ + U8 ProductID[16]; /* 0x08 */ + U8 ProductRevLevel[4]; /* 0x18 */ + U8 SerialNum[32]; /* 0x1C */ +} MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + Mpi2RaidPhysDisk0InquiryData_t, MPI2_POINTER pMpi2RaidPhysDisk0InquiryData_t; + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 PhysDiskNum; /* 0x07 */ + MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /* 0x10 */ + U32 Reserved3; /* 0x4C */ + U8 PhysDiskState; /* 0x50 */ + U8 OfflineReason; /* 0x51 */ + U8 IncompatibleReason; /* 0x52 */ + U8 PhysDiskAttributes; /* 0x53 */ + U32 PhysDiskStatusFlags; /* 0x54 */ + U64 DeviceMaxLBA; /* 0x58 */ + U64 HostMaxLBA; /* 0x60 */ + U64 CoercedMaxLBA; /* 0x68 */ + U16 BlockSize; /* 0x70 */ + U16 Reserved5; /* 0x72 */ + U32 Reserved6; /* 0x74 */ +} MPI2_CONFIG_PAGE_RD_PDISK_0, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, + Mpi2RaidPhysDiskPage0_t, MPI2_POINTER pMpi2RaidPhysDiskPage0_t; + +#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05) + +/* PhysDiskState defines */ +#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define MPI2_RAID_PD_STATE_OFFLINE (0x02) +#define MPI2_RAID_PD_STATE_ONLINE (0x03) +#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04) +#define MPI2_RAID_PD_STATE_DEGRADED (0x05) +#define MPI2_RAID_PD_STATE_REBUILDING (0x06) +#define MPI2_RAID_PD_STATE_OPTIMAL (0x07) + +/* OfflineReason defines */ +#define MPI2_PHYSDISK0_ONLINE (0x00) +#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01) +#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03) +#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04) +#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05) +#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06) +#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF) + +/* IncompatibleReason defines */ +#define MPI2_PHYSDISK0_COMPATIBLE (0x00) +#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01) +#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) +#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) +#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) +#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) + +/* PhysDiskAttributes defines */ +#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) +#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) +#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) + +#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03) +#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) +#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) + +/* PhysDiskStatusFlags defines */ +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040) +#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020) +#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010) +#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000) +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008) +#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004) +#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001) + + +/* RAID Physical Disk Page 1 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhysDiskPaths at runtime. + */ +#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX +#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) +#endif + +typedef struct _MPI2_RAIDPHYSDISK1_PATH +{ + U16 DevHandle; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U64 WWID; /* 0x04 */ + U64 OwnerWWID; /* 0x0C */ + U8 OwnerIdentifier; /* 0x14 */ + U8 Reserved2; /* 0x15 */ + U16 Flags; /* 0x16 */ +} MPI2_RAIDPHYSDISK1_PATH, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK1_PATH, + Mpi2RaidPhysDisk1Path_t, MPI2_POINTER pMpi2RaidPhysDisk1Path_t; + +/* RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ +#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004) +#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) +#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001) + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 +{ + MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhysDiskPaths; /* 0x04 */ + U8 PhysDiskNum; /* 0x05 */ + U16 Reserved1; /* 0x06 */ + U32 Reserved2; /* 0x08 */ + MPI2_RAIDPHYSDISK1_PATH PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/* 0x0C */ +} MPI2_CONFIG_PAGE_RD_PDISK_1, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, + Mpi2RaidPhysDiskPage1_t, MPI2_POINTER pMpi2RaidPhysDiskPage1_t; + +#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02) + + +/**************************************************************************** +* values for fields used by several types of SAS Config Pages +****************************************************************************/ + +/* values for NegotiatedLinkRates fields */ +#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0) +#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4) +#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/* link rates used for Negotiated Physical and Logical Link Rate */ +#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) +#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) +#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) +#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B) +#define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C) + + +/* values for AttachedPhyInfo fields */ +#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) +#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) +#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) + +#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F) +#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001) +#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002) +#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003) +#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004) +#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005) +#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006) +#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007) +#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) + + +/* values for PhyInfo fields */ +#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) + +#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) +#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27) +#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) + +#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) +#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) +#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000) +#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000) + +#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000) +#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000) +#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000) +#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000) +#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000) +#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000) +#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000) +#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000) +#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000) + +#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000) +#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000) +#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000) +#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00) +#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8) + +#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0) +#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000) +#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010) +#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020) + + +/* values for SAS ProgrammedLinkRate fields */ +#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) +#define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0) +#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B) +#define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C) + + +/* values for SAS HwLinkRate fields */ +#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) +#define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0) +#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B) +#define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C) + + + +/**************************************************************************** +* SAS IO Unit Config Pages +****************************************************************************/ + +/* SAS IO Unit Page 0 */ + +typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA +{ + U8 Port; /* 0x00 */ + U8 PortFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 NegotiatedLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo;/* 0x04 */ + U16 AttachedDevHandle; /* 0x08 */ + U16 ControllerDevHandle; /* 0x0A */ + U32 DiscoveryStatus; /* 0x0C */ + U32 Reserved; /* 0x10 */ +} MPI2_SAS_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, + Mpi2SasIOUnit0PhyData_t, MPI2_POINTER pMpi2SasIOUnit0PhyData_t; + +#define MPI26_SASIOUNIT0_PHY_PORT_NO_PORT (0xFF) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT0_PHY_MAX +#define MPI2_SAS_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhys; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_SAS_IO_UNIT0_PHY_DATA PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /* 0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_0, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, + Mpi2SasIOUnitPage0_t, MPI2_POINTER pMpi2SasIOUnitPage0_t; + +#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05) + +/* values for SAS IO Unit Page 0 PortFlags */ +#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) + +/* values for SAS IO Unit Page 0 PhyFlags */ +#define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/* see mpi2_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ + +/* values for SAS IO Unit Page 0 DiscoveryStatus */ +#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400) +#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001) + + +/* SAS IO Unit Page 1 */ + +typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA +{ + U8 Port; /* 0x00 */ + U8 PortFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 MaxMinLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo; /* 0x04 */ + U16 MaxTargetPortConnectTime; /* 0x08 */ + U16 Reserved1; /* 0x0A */ +} MPI2_SAS_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, + Mpi2SasIOUnit1PhyData_t, MPI2_POINTER pMpi2SasIOUnit1PhyData_t; + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT1_PHY_MAX +#define MPI2_SAS_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 ControlFlags; /* 0x08 */ + U16 SASNarrowMaxQueueDepth; /* 0x0A */ + U16 AdditionalControlFlags; /* 0x0C */ + U16 SASWideMaxQueueDepth; /* 0x0E */ + U8 NumPhys; /* 0x10 */ + U8 SATAMaxQDepth; /* 0x11 */ + U8 ReportDeviceMissingDelay; /* 0x12 */ + U8 IODeviceMissingDelay; /* 0x13 */ + MPI2_SAS_IO_UNIT1_PHY_DATA PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /* 0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_1, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, + Mpi2SasIOUnitPage1_t, MPI2_POINTER pMpi2SasIOUnitPage1_t; + +#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09) + +/* values for SAS IO Unit Page 1 ControlFlags */ +#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */ +#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) + +#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600) +#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2) + +#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) +#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) +#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010) +#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) +#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) +#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) +#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */ + +/* values for SAS IO Unit Page 1 AdditionalControlFlags */ +#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) +#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) +#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) +#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) +#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010) +#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008) +#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004) +#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) +#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) + +/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ +#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) + +/* values for SAS IO Unit Page 1 PortFlags */ +#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) + +/* values for SAS IO Unit Page 1 PhyFlags */ +#define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +/* values for SAS IO Unit Page 1 MaxMinLinkRate */ +#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) +#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) +#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0) +#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0) +#define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0) +#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F) +#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08) +#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09) +#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A) +#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) +#define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C) + +/* see mpi2_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ + + +/* SAS IO Unit Page 4 (for MPI v2.5 and earlier) */ + +typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP +{ + U8 MaxTargetSpinup; /* 0x00 */ + U8 SpinupDelay; /* 0x01 */ + U8 SpinupFlags; /* 0x02 */ + U8 Reserved1; /* 0x03 */ +} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, + Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t; + +/* defines for SAS IO Unit Page 4 SpinupFlags */ +#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01) + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT4_PHY_MAX +#define MPI2_SAS_IOUNIT4_PHY_MAX (4) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + MPI2_SAS_IOUNIT4_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */ + U32 Reserved1; /* 0x18 */ + U32 Reserved2; /* 0x1C */ + U32 Reserved3; /* 0x20 */ + U8 BootDeviceWaitTime; /* 0x24 */ + U8 SATADeviceWaitTime; /* 0x25 */ + U16 Reserved5; /* 0x26 */ + U8 NumPhys; /* 0x28 */ + U8 PEInitialSpinupDelay; /* 0x29 */ + U8 PEReplyDelay; /* 0x2A */ + U8 Flags; /* 0x2B */ + U8 PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /* 0x2C */ +} MPI2_CONFIG_PAGE_SASIOUNIT_4, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, + Mpi2SasIOUnitPage4_t, MPI2_POINTER pMpi2SasIOUnitPage4_t; + +#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02) + +/* defines for Flags field */ +#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01) + +/* defines for PHY field */ +#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) + + +/* SAS IO Unit Page 5 */ + +typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS +{ + U8 ControlFlags; /* 0x00 */ + U8 PortWidthModGroup; /* 0x01 */ + U16 InactivityTimerExponent; /* 0x02 */ + U8 SATAPartialTimeout; /* 0x04 */ + U8 Reserved2; /* 0x05 */ + U8 SATASlumberTimeout; /* 0x06 */ + U8 Reserved3; /* 0x07 */ + U8 SASPartialTimeout; /* 0x08 */ + U8 Reserved4; /* 0x09 */ + U8 SASSlumberTimeout; /* 0x0A */ + U8 Reserved5; /* 0x0B */ +} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t; + +/* defines for ControlFlags field */ +#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) +#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) +#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) +#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) + +/* defines for PortWidthModeGroup field */ +#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF) + +/* defines for InactivityTimerExponent field */ +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) + +#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) +#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) +#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) +#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) +#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) +#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT5_PHY_MAX +#define MPI2_SAS_IOUNIT5_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumPhys; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_5, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, + Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t; + +#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01) + + +/* SAS IO Unit Page 6 */ + +typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS +{ + U8 CurrentStatus; /* 0x00 */ + U8 CurrentModulation; /* 0x01 */ + U8 CurrentUtilization; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 Reserved2; /* 0x04 */ +} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + MPI2_POINTER PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + Mpi2SasIOUnit6PortWidthModGroupStatus_t, + MPI2_POINTER pMpi2SasIOUnit6PortWidthModGroupStatus_t; + +/* defines for CurrentStatus field */ +#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00) +#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01) +#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02) +#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03) +#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04) +#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07) + +/* defines for CurrentModulation field */ +#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00) +#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01) +#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02) +#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX +#define MPI2_SAS_IOUNIT6_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + U8 NumGroups; /* 0x10 */ + U8 Reserved3; /* 0x11 */ + U16 Reserved4; /* 0x12 */ + MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS + PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /* 0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_6, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, + Mpi2SasIOUnitPage6_t, MPI2_POINTER pMpi2SasIOUnitPage6_t; + +#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00) + + +/* SAS IO Unit Page 7 */ + +typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS +{ + U8 Flags; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U8 Threshold75Pct; /* 0x04 */ + U8 Threshold50Pct; /* 0x05 */ + U8 Threshold25Pct; /* 0x06 */ + U8 Reserved3; /* 0x07 */ +} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + MPI2_POINTER PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + Mpi2SasIOUnit7PortWidthModGroupSettings_t, + MPI2_POINTER pMpi2SasIOUnit7PortWidthModGroupSettings_t; + +/* defines for Flags field */ +#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01) + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX +#define MPI2_SAS_IOUNIT7_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 SamplingInterval; /* 0x08 */ + U8 WindowLength; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Reserved2; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U8 NumGroups; /* 0x14 */ + U8 Reserved4; /* 0x15 */ + U16 Reserved5; /* 0x16 */ + MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS + PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX]; /* 0x18 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_7, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, + Mpi2SasIOUnitPage7_t, MPI2_POINTER pMpi2SasIOUnitPage7_t; + +#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00) + + +/* SAS IO Unit Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 PowerManagementCapabilities; /* 0x0C */ + U8 TxRxSleepStatus; /* 0x10 */ /* reserved in MPI 2.0 */ + U8 Reserved2; /* 0x11 */ + U16 Reserved3; /* 0x12 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_8, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, + Mpi2SasIOUnitPage8_t, MPI2_POINTER pMpi2SasIOUnitPage8_t; + +#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) + +/* defines for PowerManagementCapabilities field */ +#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100) +#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) + +/* defines for TxRxSleepStatus field */ +#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00) +#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01) +#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02) +#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03) + + + +/* SAS IO Unit Page 16 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U64 TimeStamp; /* 0x08 */ + U32 Reserved1; /* 0x10 */ + U32 Reserved2; /* 0x14 */ + U32 FastPathPendedRequests; /* 0x18 */ + U32 FastPathUnPendedRequests; /* 0x1C */ + U32 FastPathHostRequestStarts; /* 0x20 */ + U32 FastPathFirmwareRequestStarts; /* 0x24 */ + U32 FastPathHostCompletions; /* 0x28 */ + U32 FastPathFirmwareCompletions; /* 0x2C */ + U32 NonFastPathRequestStarts; /* 0x30 */ + U32 NonFastPathHostCompletions; /* 0x30 */ +} MPI2_CONFIG_PAGE_SASIOUNIT16, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, + Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t; + +#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00) + + +/**************************************************************************** +* SAS Expander Config Pages +****************************************************************************/ + +/* SAS Expander Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 ReportGenLength; /* 0x09 */ + U16 EnclosureHandle; /* 0x0A */ + U64 SASAddress; /* 0x0C */ + U32 DiscoveryStatus; /* 0x14 */ + U16 DevHandle; /* 0x18 */ + U16 ParentDevHandle; /* 0x1A */ + U16 ExpanderChangeCount; /* 0x1C */ + U16 ExpanderRouteIndexes; /* 0x1E */ + U8 NumPhys; /* 0x20 */ + U8 SASLevel; /* 0x21 */ + U16 Flags; /* 0x22 */ + U16 STPBusInactivityTimeLimit; /* 0x24 */ + U16 STPMaxConnectTimeLimit; /* 0x26 */ + U16 STP_SMP_NexusLossTime; /* 0x28 */ + U16 MaxNumRoutedSasAddresses; /* 0x2A */ + U64 ActiveZoneManagerSASAddress;/* 0x2C */ + U16 ZoneLockInactivityLimit; /* 0x34 */ + U16 Reserved1; /* 0x36 */ + U8 TimeToReducedFunc; /* 0x38 */ + U8 InitialTimeToReducedFunc; /* 0x39 */ + U8 MaxReducedFuncTime; /* 0x3A */ + U8 Reserved2; /* 0x3B */ +} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0, + Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t; + +#define MPI2_SASEXPANDER0_PAGEVERSION (0x06) + +/* values for SAS Expander Page 0 DiscoveryStatus field */ +#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400) +#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) + +/* values for SAS Expander Page 0 Flags field */ +#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) +#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) +#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200) +#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100) +#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080) +#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010) +#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004) +#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002) +#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) + + +/* SAS Expander Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumPhys; /* 0x0C */ + U8 Phy; /* 0x0D */ + U16 NumTableEntriesProgrammed; /* 0x0E */ + U8 ProgrammedLinkRate; /* 0x10 */ + U8 HwLinkRate; /* 0x11 */ + U16 AttachedDevHandle; /* 0x12 */ + U32 PhyInfo; /* 0x14 */ + U32 AttachedDeviceInfo; /* 0x18 */ + U16 ExpanderDevHandle; /* 0x1C */ + U8 ChangeCount; /* 0x1E */ + U8 NegotiatedLinkRate; /* 0x1F */ + U8 PhyIdentifier; /* 0x20 */ + U8 AttachedPhyIdentifier; /* 0x21 */ + U8 Reserved3; /* 0x22 */ + U8 DiscoveryInfo; /* 0x23 */ + U32 AttachedPhyInfo; /* 0x24 */ + U8 ZoneGroup; /* 0x28 */ + U8 SelfConfigStatus; /* 0x29 */ + U16 Reserved4; /* 0x2A */ +} MPI2_CONFIG_PAGE_EXPANDER_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_1, + Mpi2ExpanderPage1_t, MPI2_POINTER pMpi2ExpanderPage1_t; + +#define MPI2_SASEXPANDER1_PAGEVERSION (0x02) + +/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/* see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines used for the AttachedDeviceInfo field */ + +/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/* values for SAS Expander Page 1 DiscoveryInfo field */ +#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) +#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) +#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) + +/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + + +/**************************************************************************** +* SAS Device Config Pages +****************************************************************************/ + +/* SAS Device Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 Slot; /* 0x08 */ + U16 EnclosureHandle; /* 0x0A */ + U64 SASAddress; /* 0x0C */ + U16 ParentDevHandle; /* 0x14 */ + U8 PhyNum; /* 0x16 */ + U8 AccessStatus; /* 0x17 */ + U16 DevHandle; /* 0x18 */ + U8 AttachedPhyIdentifier; /* 0x1A */ + U8 ZoneGroup; /* 0x1B */ + U32 DeviceInfo; /* 0x1C */ + U16 Flags; /* 0x20 */ + U8 PhysicalPort; /* 0x22 */ + U8 MaxPortConnections; /* 0x23 */ + U64 DeviceName; /* 0x24 */ + U8 PortGroups; /* 0x2C */ + U8 DmaGroup; /* 0x2D */ + U8 ControlGroup; /* 0x2E */ + U8 EnclosureLevel; /* 0x2F */ + U8 ConnectorName[4]; /* 0x30 */ + U32 Reserved3; /* 0x34 */ +} MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, + Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t; + +#define MPI2_SASDEVICE0_PAGEVERSION (0x09) + +/* values for SAS Device Page 0 AccessStatus field */ +#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +/* specific values for SATA Init failures */ +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) + +/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ + +/* values for SAS Device Page 0 Flags field */ +#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000) +#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) +#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) +#define MPI2_SAS_DEVICE0_FLAGS_PERSIST_CAPABLE (0x0004) +#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) +#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + +/* SAS Device Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U64 SASAddress; /* 0x0C */ + U32 Reserved2; /* 0x14 */ + U16 DevHandle; /* 0x18 */ + U16 Reserved3; /* 0x1A */ + U8 InitialRegDeviceFIS[20];/* 0x1C */ +} MPI2_CONFIG_PAGE_SAS_DEV_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, + Mpi2SasDevicePage1_t, MPI2_POINTER pMpi2SasDevicePage1_t; + +#define MPI2_SASDEVICE1_PAGEVERSION (0x01) + + +/**************************************************************************** +* SAS PHY Config Pages +****************************************************************************/ + +/* SAS PHY Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 OwnerDevHandle; /* 0x08 */ + U16 Reserved1; /* 0x0A */ + U16 AttachedDevHandle; /* 0x0C */ + U8 AttachedPhyIdentifier; /* 0x0E */ + U8 Reserved2; /* 0x0F */ + U32 AttachedPhyInfo; /* 0x10 */ + U8 ProgrammedLinkRate; /* 0x14 */ + U8 HwLinkRate; /* 0x15 */ + U8 ChangeCount; /* 0x16 */ + U8 Flags; /* 0x17 */ + U32 PhyInfo; /* 0x18 */ + U8 NegotiatedLinkRate; /* 0x1C */ + U8 Reserved3; /* 0x1D */ + U16 Reserved4; /* 0x1E */ +} MPI2_CONFIG_PAGE_SAS_PHY_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, + Mpi2SasPhyPage0_t, MPI2_POINTER pMpi2SasPhyPage0_t; + +#define MPI2_SASPHY0_PAGEVERSION (0x03) + +/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + +/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/* values for SAS PHY Page 0 Flags field */ +#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) + +/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/* SAS PHY Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 InvalidDwordCount; /* 0x0C */ + U32 RunningDisparityErrorCount; /* 0x10 */ + U32 LossDwordSynchCount; /* 0x14 */ + U32 PhyResetProblemCount; /* 0x18 */ +} MPI2_CONFIG_PAGE_SAS_PHY_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, + Mpi2SasPhyPage1_t, MPI2_POINTER pMpi2SasPhyPage1_t; + +#define MPI2_SASPHY1_PAGEVERSION (0x01) + + +/* SAS PHY Page 2 */ + +typedef struct _MPI2_SASPHY2_PHY_EVENT +{ + U8 PhyEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 PhyEventInfo; /* 0x04 */ +} MPI2_SASPHY2_PHY_EVENT, MPI2_POINTER PTR_MPI2_SASPHY2_PHY_EVENT, + Mpi2SasPhy2PhyEvent_t, MPI2_POINTER pMpi2SasPhy2PhyEvent_t; + +/* use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY2_PHY_EVENT_MAX +#define MPI2_SASPHY2_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhyEvents; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_SASPHY2_PHY_EVENT PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /* 0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, + Mpi2SasPhyPage2_t, MPI2_POINTER pMpi2SasPhyPage2_t; + +#define MPI2_SASPHY2_PAGEVERSION (0x00) + + +/* SAS PHY Page 3 */ + +typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG +{ + U8 PhyEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U8 CounterType; /* 0x04 */ + U8 ThresholdWindow; /* 0x05 */ + U8 TimeUnits; /* 0x06 */ + U8 Reserved3; /* 0x07 */ + U32 EventThreshold; /* 0x08 */ + U16 ThresholdFlags; /* 0x0C */ + U16 Reserved4; /* 0x0E */ +} MPI2_SASPHY3_PHY_EVENT_CONFIG, MPI2_POINTER PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, + Mpi2SasPhy3PhyEventConfig_t, MPI2_POINTER pMpi2SasPhy3PhyEventConfig_t; + +/* values for PhyEventCode field */ +#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00) +#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) +#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) +#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03) +#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04) +#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05) +#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06) +#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20) +#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21) +#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22) +#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23) +#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26) +#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27) +#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28) +#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29) +#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43) +#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44) +#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45) +#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50) +#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51) +#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63) +#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0) +#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) +/* Following codes are product specific and in MPI v2.6 and later */ +#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3) +#define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4) +#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5) +#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6) +#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7) +#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9) +#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA) +#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB) +#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC) + +/* values for the CounterType field */ +#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) +#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/* values for the TimeUnits field */ +#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) +#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) +#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) +#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) + +/* values for the ThresholdFlags field */ +#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002) +#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY3_PHY_EVENT_MAX +#define MPI2_SASPHY3_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhyEvents; /* 0x0C */ + U8 Reserved2; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI2_SASPHY3_PHY_EVENT_CONFIG PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /* 0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, + Mpi2SasPhyPage3_t, MPI2_POINTER pMpi2SasPhyPage3_t; + +#define MPI2_SASPHY3_PAGEVERSION (0x00) + + +/* SAS PHY Page 4 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 Reserved1; /* 0x08 */ + U8 Reserved2; /* 0x0A */ + U8 Flags; /* 0x0B */ + U8 InitialFrame[28]; /* 0x0C */ +} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, + Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t; + +#define MPI2_SASPHY4_PAGEVERSION (0x00) + +/* values for the Flags field */ +#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) +#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) + + + + +/**************************************************************************** +* SAS Port Config Pages +****************************************************************************/ + +/* SAS Port Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PortNumber; /* 0x08 */ + U8 PhysicalPort; /* 0x09 */ + U8 PortWidth; /* 0x0A */ + U8 PhysicalPortWidth; /* 0x0B */ + U8 ZoneGroup; /* 0x0C */ + U8 Reserved1; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U64 SASAddress; /* 0x10 */ + U32 DeviceInfo; /* 0x18 */ + U32 Reserved3; /* 0x1C */ + U32 Reserved4; /* 0x20 */ +} MPI2_CONFIG_PAGE_SAS_PORT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, + Mpi2SasPortPage0_t, MPI2_POINTER pMpi2SasPortPage0_t; + +#define MPI2_SASPORT0_PAGEVERSION (0x00) + +/* see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ + + +/**************************************************************************** +* SAS Enclosure Config Pages +****************************************************************************/ + +/* SAS Enclosure Page 0, Enclosure Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U64 EnclosureLogicalID; /* 0x0C */ + U16 Flags; /* 0x14 */ + U16 EnclosureHandle; /* 0x16 */ + U16 NumSlots; /* 0x18 */ + U16 StartSlot; /* 0x1A */ + U8 ChassisSlot; /* 0x1C */ + U8 EnclosureLevel; /* 0x1D */ + U16 SEPDevHandle; /* 0x1E */ + U8 OEMRD; /* 0x20 */ + U8 Reserved1a; /* 0x21 */ + U16 Reserved2; /* 0x22 */ + U32 Reserved3; /* 0x24 */ +} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t, + MPI26_CONFIG_PAGE_ENCLOSURE_0, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0, + Mpi26EnclosurePage0_t, MPI2_POINTER pMpi26EnclosurePage0_t; + +#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) + +/* values for SAS Enclosure Page 0 Flags field */ +#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID (0x0080) +#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) +#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) +#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) + +#define MPI26_ENCLOSURE0_PAGEVERSION (0x04) + +/* Values for Enclosure Page 0 Flags field */ +#define MPI26_ENCLS0_FLAGS_OEMRD_VALID (0x0080) +#define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) +#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) +#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) +#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) +#define MPI26_ENCLS0_FLAGS_MNG_PCIE_SW_SES_ENCL (0x0006) + +/**************************************************************************** +* Log Config Page +****************************************************************************/ + +/* Log Page 0 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumLogEntries at runtime. + */ +#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES +#define MPI2_LOG_0_NUM_LOG_ENTRIES (1) +#endif + +#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_LOG_0_ENTRY +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U16 LogSequence; /* 0x0C */ + U16 LogEntryQualifier; /* 0x0E */ + U8 VP_ID; /* 0x10 */ + U8 VF_ID; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + U8 LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/* 0x14 */ +} MPI2_LOG_0_ENTRY, MPI2_POINTER PTR_MPI2_LOG_0_ENTRY, + Mpi2Log0Entry_t, MPI2_POINTER pMpi2Log0Entry_t; + +/* values for Log Page 0 LogEntry LogEntryQualifier field */ +#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000) +#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001) +#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002) +#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000) +#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF) + +typedef struct _MPI2_CONFIG_PAGE_LOG_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ + U16 NumLogEntries; /* 0x10 */ + U16 Reserved3; /* 0x12 */ + MPI2_LOG_0_ENTRY LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /* 0x14 */ +} MPI2_CONFIG_PAGE_LOG_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_LOG_0, + Mpi2LogPage0_t, MPI2_POINTER pMpi2LogPage0_t; + +#define MPI2_LOG_0_PAGEVERSION (0x02) + + +/**************************************************************************** +* RAID Config Page +****************************************************************************/ + +/* RAID Page 0 */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumElements at runtime. + */ +#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS +#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) +#endif + +typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT +{ + U16 ElementFlags; /* 0x00 */ + U16 VolDevHandle; /* 0x02 */ + U8 HotSparePool; /* 0x04 */ + U8 PhysDiskNum; /* 0x05 */ + U16 PhysDiskDevHandle; /* 0x06 */ +} MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + MPI2_POINTER PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + Mpi2RaidConfig0ConfigElement_t, MPI2_POINTER pMpi2RaidConfig0ConfigElement_t; + +/* values for the ElementFlags field */ +#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + + +typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumHotSpares; /* 0x08 */ + U8 NumPhysDisks; /* 0x09 */ + U8 NumVolumes; /* 0x0A */ + U8 ConfigNum; /* 0x0B */ + U32 Flags; /* 0x0C */ + U8 ConfigGUID[24]; /* 0x10 */ + U32 Reserved1; /* 0x28 */ + U8 NumElements; /* 0x2C */ + U8 Reserved2; /* 0x2D */ + U16 Reserved3; /* 0x2E */ + MPI2_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /* 0x30 */ +} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + Mpi2RaidConfigurationPage0_t, MPI2_POINTER pMpi2RaidConfigurationPage0_t; + +#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00) + +/* values for RAID Configuration Page 0 Flags field */ +#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001) + + +/**************************************************************************** +* Driver Persistent Mapping Config Pages +****************************************************************************/ + +/* Driver Persistent Mapping Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY +{ + U64 PhysicalIdentifier; /* 0x00 */ + U16 MappingInformation; /* 0x08 */ + U16 DeviceIndex; /* 0x0A */ + U32 PhysicalBitsMapping; /* 0x0C */ + U32 Reserved1; /* 0x10 */ +} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + Mpi2DriverMap0Entry_t, MPI2_POINTER pMpi2DriverMap0Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /* 0x08 */ +} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + Mpi2DriverMappingPage0_t, MPI2_POINTER pMpi2DriverMappingPage0_t; + +#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00) + +/* values for Driver Persistent Mapping Page 0 MappingInformation field */ +#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0) +#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4) +#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) + + +/**************************************************************************** +* Ethernet Config Pages +****************************************************************************/ + +/* Ethernet Page 0 */ + +/* IP address (union of IPv4 and IPv6) */ +typedef union _MPI2_ETHERNET_IP_ADDR +{ + U32 IPv4Addr; + U32 IPv6Addr[4]; +} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR, + Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t; + +#define MPI2_ETHERNET_HOST_NAME_LENGTH (32) + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 NumInterfaces; /* 0x08 */ + U8 Reserved0; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Status; /* 0x0C */ + U8 MediaState; /* 0x10 */ + U8 Reserved2; /* 0x11 */ + U16 Reserved3; /* 0x12 */ + U8 MacAddress[6]; /* 0x14 */ + U8 Reserved4; /* 0x1A */ + U8 Reserved5; /* 0x1B */ + MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */ + MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */ + MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */ + MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */ + MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */ + MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */ + U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0, + Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t; + +#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) + +/* values for Ethernet Page 0 Status field */ +#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) +#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) +#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) +#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) +#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) +#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) +#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) +#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) +#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) +#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) + +/* values for Ethernet Page 0 MediaState field */ +#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) +#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) +#define MPI2_ETHPG0_MS_10MBIT (0x01) +#define MPI2_ETHPG0_MS_100MBIT (0x02) +#define MPI2_ETHPG0_MS_1GBIT (0x03) + + +/* Ethernet Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved0; /* 0x08 */ + U32 Flags; /* 0x0C */ + U8 MediaState; /* 0x10 */ + U8 Reserved1; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + U8 MacAddress[6]; /* 0x14 */ + U8 Reserved3; /* 0x1A */ + U8 Reserved4; /* 0x1B */ + MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */ + MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */ + MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */ + MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */ + MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */ + U32 Reserved5; /* 0x6C */ + U32 Reserved6; /* 0x70 */ + U32 Reserved7; /* 0x74 */ + U32 Reserved8; /* 0x78 */ + U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1, + Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t; + +#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) + +/* values for Ethernet Page 1 Flags field */ +#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) +#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) +#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) +#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) +#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) + +/* values for Ethernet Page 1 MediaState field */ +#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) +#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) +#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) +#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) +#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) + + +/**************************************************************************** +* Extended Manufacturing Config Pages +****************************************************************************/ + +/* + * Generic structure to use for product-specific extended manufacturing pages + * (currently Extended Manufacturing Page 40 through Extended Manufacturing + * Page 60). + */ + +typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 ProductSpecificInfo; /* 0x08 */ +} MPI2_CONFIG_PAGE_EXT_MAN_PS, + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, + Mpi2ExtManufacturingPagePS_t, MPI2_POINTER pMpi2ExtManufacturingPagePS_t; + +/* PageVersion should be provided by product-specific code */ + + +/**************************************************************************** +* values for fields used by several types of PCIe Config Pages +****************************************************************************/ + +/* values for NegotiatedLinkRates fields */ +#define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/* link rates used for Negotiated Physical Link Rate */ +#define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00) +#define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02) +#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03) +#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04) +#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05) + + +/**************************************************************************** +* PCIe IO Unit Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/* PCIe IO Unit Page 0 */ + +typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA +{ + U8 Link; /* 0x00 */ + U8 LinkFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 NegotiatedLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo;/* 0x04 */ + U16 AttachedDevHandle; /* 0x08 */ + U16 ControllerDevHandle; /* 0x0A */ + U32 EnumerationStatus; /* 0x0C */ + U32 Reserved1; /* 0x10 */ +} MPI26_PCIE_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA, + Mpi26PCIeIOUnit0PhyData_t, MPI2_POINTER pMpi26PCIeIOUnit0PhyData_t; + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX +#define MPI26_PCIE_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 NumPhys; /* 0x0C */ + U8 InitStatus; /* 0x0D */ + U16 Reserved3; /* 0x0E */ + MPI26_PCIE_IO_UNIT0_PHY_DATA PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /* 0x10 */ +} MPI26_CONFIG_PAGE_PIOUNIT_0, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PIOUNIT_0, + Mpi26PCIeIOUnitPage0_t, MPI2_POINTER pMpi26PCIeIOUnitPage0_t; + +#define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00) + +/* values for PCIe IO Unit Page 0 LinkFlags */ +#define MPI26_PCIEIOUNIT0_LF_ENUMERATION_IN_PROGRESS (0x08) +#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_MASK (0x01) +#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_PAGE1 (0x00) +#define MPI26_PCIEIOUNIT0_LF_CFG_SRC_BACKPLANE (0x01) + +/* values for PCIe IO Unit Page 0 PhyFlags */ +#define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/* see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo values */ + +/* values for PCIe IO Unit Page 0 EnumerationStatus */ +#define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000) +#define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000) + + +/* PCIe IO Unit Page 1 */ + +typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA +{ + U8 Link; /* 0x00 */ + U8 LinkFlags; /* 0x01 */ + U8 PhyFlags; /* 0x02 */ + U8 MaxMinLinkRate; /* 0x03 */ + U32 ControllerPhyDeviceInfo; /* 0x04 */ + U32 Reserved1; /* 0x08 */ +} MPI26_PCIE_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA, + Mpi26PCIeIOUnit1PhyData_t, MPI2_POINTER pMpi26PCIeIOUnit1PhyData_t; + +/* values for LinkFlags */ +#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK (0x00) +#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN (0x01) +#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX +#define MPI26_PCIE_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 ControlFlags; /* 0x08 */ + U16 Reserved; /* 0x0A */ + U16 AdditionalControlFlags; /* 0x0C */ + U16 NVMeMaxQueueDepth; /* 0x0E */ + U8 NumPhys; /* 0x10 */ + U8 DMDReportPCIe; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + MPI26_PCIE_IO_UNIT1_PHY_DATA PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/* 0x14 */ +} MPI26_CONFIG_PAGE_PIOUNIT_1, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PIOUNIT_1, + Mpi26PCIeIOUnitPage1_t, MPI2_POINTER pMpi26PCIeIOUnitPage1_t; + +#define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00) + +/* values for PCIe IO Unit Page 1 ControlFlags */ +#define MPI26_PCIEIOUNIT1_CF_BP_CONFIG_DISABLE (0x0080) +#define MPI26_PCIEIOUNIT1_CF_BP_AUTO_CLOCK_ENABLE (0x0040) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_MASK (0x0030) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_ALL_DIS (0x0000) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_SRIS_EN (0x0010) +#define MPI26_PCIEIOUNIT1_CF_BP_CLK_SRNS_EN (0x0020) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_MASK (0x000F) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_DISABLE (0x0000) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_2_5_MAX (0x0002) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_5_0_MAX (0x0003) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_8_0_MAX (0x0004) +#define MPI26_PCIEIOUNIT1_CF_BP_RATE_16_0_MAX (0x0005) + + +/* values for PCIe IO Unit Page 1 PhyFlags */ +#define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) +#define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01) + +/* values for PCIe IO Unit Page 1 MaxMinLinkRate */ +#define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4) +#define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20) +#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30) +#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40) +#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50) + +/* values for PCIe IO Unit Page 1 DMDReportPCIe */ +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80) +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC (0x00) +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC (0x80) +#define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK (0x7F) + + +/* see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo values */ + + +/**************************************************************************** +* PCIe Switch Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/* PCIe Switch Page 0 */ + +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 DevHandle; /* 0x0C */ + U16 ParentDevHandle; /* 0x0E */ + U8 NumPorts; /* 0x10 */ + U8 PCIeLevel; /* 0x11 */ + U16 Reserved3; /* 0x12 */ + U32 Reserved4; /* 0x14 */ + U32 Reserved5; /* 0x18 */ + U32 Reserved6; /* 0x1C */ +} MPI26_CONFIG_PAGE_PSWITCH_0, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PSWITCH_0, + Mpi26PCIeSwitchPage0_t, MPI2_POINTER pMpi26PCIeSwitchPage0_t; + +#define MPI26_PCIESWITCH0_PAGEVERSION (0x00) + + +/* PCIe Switch Page 1 */ + +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 PhysicalPort; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumPorts; /* 0x0C */ + U8 PortNum; /* 0x0D */ + U16 AttachedDevHandle; /* 0x0E */ + U16 SwitchDevHandle; /* 0x10 */ + U8 NegotiatedPortWidth; /* 0x12 */ + U8 NegotiatedLinkRate; /* 0x13 */ + U16 Flags; /* 0x14 */ + U16 Reserved4; /* 0x16 */ + U32 Reserved5; /* 0x18 */ +} MPI26_CONFIG_PAGE_PSWITCH_1, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PSWITCH_1, + Mpi26PCIeSwitchPage1_t, MPI2_POINTER pMpi26PCIeSwitchPage1_t; + +#define MPI26_PCIESWITCH1_PAGEVERSION (0x00) + +/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/* defines for the Flags field */ +#define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE (0x0002) +#define MPI26_PCIESWITCH1_RETIMER_PRESENCE (0x0001) + + + +/**************************************************************************** +* PCIe Device Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/* PCIe Device Page 0 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 Slot; /* 0x08 */ + U16 EnclosureHandle; /* 0x0A */ + U64 WWID; /* 0x0C */ + U16 ParentDevHandle; /* 0x14 */ + U8 PortNum; /* 0x16 */ + U8 AccessStatus; /* 0x17 */ + U16 DevHandle; /* 0x18 */ + U8 PhysicalPort; /* 0x1A */ + U8 Reserved1; /* 0x1B */ + U32 DeviceInfo; /* 0x1C */ + U32 Flags; /* 0x20 */ + U8 SupportedLinkRates; /* 0x24 */ + U8 MaxPortWidth; /* 0x25 */ + U8 NegotiatedPortWidth; /* 0x26 */ + U8 NegotiatedLinkRate; /* 0x27 */ + U8 EnclosureLevel; /* 0x28 */ + U8 Reserved2; /* 0x29 */ + U16 Reserved3; /* 0x2A */ + U8 ConnectorName[4]; /* 0x2C */ + U32 Reserved4; /* 0x30 */ + U32 Reserved5; /* 0x34 */ +} MPI26_CONFIG_PAGE_PCIEDEV_0, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIEDEV_0, + Mpi26PCIeDevicePage0_t, MPI2_POINTER pMpi26PCIeDevicePage0_t; + +#define MPI26_PCIEDEVICE0_PAGEVERSION (0x01) + +/* values for PCIe Device Page 0 AccessStatus field */ +#define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00) +#define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04) +#define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02) +#define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07) +#define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08) +#define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09) +#define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A) +#define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10) + +#define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30) +#define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31) +#define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32) +#define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33) +#define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34) +#define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35) +#define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36) +#define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37) +#define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38) + +#define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F) + +/* see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo field */ + +/* values for PCIe Device Page 0 Flags field */ +#define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE (0x00020000) +#define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE (0x00010000) +#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x00008000) +#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x00004000) +#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x00002000) +#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x00000400) +#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x00000200) +#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x00000100) +#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x00000080) +#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x00000040) +#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x00000020) +#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x00000010) +#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x00000002) +#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x00000001) + +/* values for PCIe Device Page 0 SupportedLinkRates field */ +#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08) +#define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04) +#define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02) +#define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01) + +/* use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/* PCIe Device Page 2 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 DevHandle; /* 0x08 */ + U8 ControllerResetTO; /* 0x0A */ + U8 Reserved1; /* 0x0B */ + U32 MaximumDataTransferSize;/* 0x0C */ + U32 Capabilities; /* 0x10 */ + U16 NOIOB; /* 0x14 */ + U16 ShutdownLatency; /* 0x16 */ + U16 VendorID; /* 0x18 */ + U16 DeviceID; /* 0x1A */ + U16 SubsystemVendorID; /* 0x1C */ + U16 SubsystemID; /* 0x1E */ + U8 RevisionID; /* 0x20 */ + U8 Reserved21[3]; /* 0x21 */ +} MPI26_CONFIG_PAGE_PCIEDEV_2, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, + Mpi26PCIeDevicePage2_t, MPI2_POINTER pMpi26PCIeDevicePage2_t; + +#define MPI26_PCIEDEVICE2_PAGEVERSION (0x01) + +/* defines for PCIe Device Page 2 Capabilities field */ +#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008) +#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004) +#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002) +#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001) + +/* Defines for the NOIOB field */ +#define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000) + + +/**************************************************************************** +* PCIe Link Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/* PCIe Link Page 1 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 Link; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 CorrectableErrorCount; /* 0x0C */ + U16 NonFatalErrorCount; /* 0x10 */ + U16 Reserved3; /* 0x12 */ + U16 FatalErrorCount; /* 0x14 */ + U16 Reserved4; /* 0x16 */ +} MPI26_CONFIG_PAGE_PCIELINK_1, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_1, + Mpi26PcieLinkPage1_t, MPI2_POINTER pMpi26PcieLinkPage1_t; + +#define MPI26_PCIELINK1_PAGEVERSION (0x00) + +/* PCIe Link Page 2 */ + +typedef struct _MPI26_PCIELINK2_LINK_EVENT +{ + U8 LinkEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 LinkEventInfo; /* 0x04 */ +} MPI26_PCIELINK2_LINK_EVENT, MPI2_POINTER PTR_MPI26_PCIELINK2_LINK_EVENT, + Mpi26PcieLink2LinkEvent_t, MPI2_POINTER pMpi26PcieLink2LinkEvent_t; + +/* use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */ + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumLinkEvents at runtime. + */ +#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX +#define MPI26_PCIELINK2_LINK_EVENT_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 Link; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumLinkEvents; /* 0x0C */ + U8 Reserved3; /* 0x0D */ + U16 Reserved4; /* 0x0E */ + MPI26_PCIELINK2_LINK_EVENT LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /* 0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_2, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_2, + Mpi26PcieLinkPage2_t, MPI2_POINTER pMpi26PcieLinkPage2_t; + +#define MPI26_PCIELINK2_PAGEVERSION (0x00) + + +/* PCIe Link Page 3 */ + +typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG +{ + U8 LinkEventCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U8 CounterType; /* 0x04 */ + U8 ThresholdWindow; /* 0x05 */ + U8 TimeUnits; /* 0x06 */ + U8 Reserved3; /* 0x07 */ + U32 EventThreshold; /* 0x08 */ + U16 ThresholdFlags; /* 0x0C */ + U16 Reserved4; /* 0x0E */ +} MPI26_PCIELINK3_LINK_EVENT_CONFIG, MPI2_POINTER PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG, + Mpi26PcieLink3LinkEventConfig_t, MPI2_POINTER pMpi26PcieLink3LinkEventConfig_t; + +/* values for LinkEventCode field */ +#define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00) +#define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01) +#define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02) +#define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03) +#define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04) +#define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05) +#define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06) +#define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07) +#define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08) +#define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F) +#define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10) +#define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11) +#define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12) + +/* values for the CounterType field */ +#define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01) +#define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/* values for the TimeUnits field */ +#define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00) +#define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01) +#define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02) +#define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03) + +/* values for the ThresholdFlags field */ +#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check the value returned for NumLinkEvents at runtime. + */ +#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX +#define MPI26_PCIELINK3_LINK_EVENT_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U8 Link; /* 0x08 */ + U8 Reserved1; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U8 NumLinkEvents; /* 0x0C */ + U8 Reserved3; /* 0x0D */ + U16 Reserved4; /* 0x0E */ + MPI26_PCIELINK3_LINK_EVENT_CONFIG LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /* 0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_3, MPI2_POINTER PTR_MPI26_CONFIG_PAGE_PCIELINK_3, + Mpi26PcieLinkPage3_t, MPI2_POINTER pMpi26PcieLinkPage3_t; + +#define MPI26_PCIELINK3_PAGEVERSION (0x00) + + +#endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_hbd.h b/drivers/scsi/mpt3sas/mpi/mpi2_hbd.h index 8d4af26144fb..48d823965f9c 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_hbd.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_hbd.h @@ -1,122 +1,122 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_hbd.h - * Title: MPI Host Based Discovery messages and structures - * Creation Date: October 21, 2009 - * - * mpi2_hbd.h Version: 02.00.04 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 10-28-09 02.00.00 Initial version. - * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from - * HBD Action request, replaced by AdditionalInfo field. - * 11-18-11 02.00.02 Incorporating additions for MPI v2.5. - * 11-18-14 02.00.03 Updated copyright information. - * 02-17-16 02.00.04 Added SAS 4 22.5 gbs speed support. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_HBD_H -#define MPI2_HBD_H - -/**************************************************************************** -* Host Based Discovery Action messages -****************************************************************************/ - -/* Host Based Discovery Action Request Message */ -typedef struct _MPI2_HBD_ACTION_REQUEST -{ - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U32 Reserved4; /* 0x0C */ - U64 SASAddress; /* 0x10 */ - U32 Reserved5; /* 0x18 */ - U32 HbdDeviceInfo; /* 0x1C */ - U16 ParentDevHandle; /* 0x20 */ - U16 MaxQDepth; /* 0x22 */ - U8 FirstPhyIdentifier; /* 0x24 */ - U8 Port; /* 0x25 */ - U8 MaxConnections; /* 0x26 */ - U8 MaxRate; /* 0x27 */ - U32 AdditionalInfo; /* 0x28 */ - U16 InitialAWT; /* 0x2C */ - U16 Reserved7; /* 0x2E */ - U32 Reserved8; /* 0x30 */ -} MPI2_HBD_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_HBD_ACTION_REQUEST, - Mpi2HbdActionRequest_t, MPI2_POINTER pMpi2HbdActionRequest_t; - -/* values for the Operation field */ -#define MPI2_HBD_OP_ADD_DEVICE (0x01) -#define MPI2_HBD_OP_REMOVE_DEVICE (0x02) -#define MPI2_HBD_OP_UPDATE_DEVICE (0x03) - -/* values for the HbdDeviceInfo field */ -#define MPI2_HBD_DEVICE_INFO_VIRTUAL_DEVICE (0x00004000) -#define MPI2_HBD_DEVICE_INFO_ATAPI_DEVICE (0x00002000) -#define MPI2_HBD_DEVICE_INFO_DIRECT_ATTACH (0x00000800) -#define MPI2_HBD_DEVICE_INFO_SSP_TARGET (0x00000400) -#define MPI2_HBD_DEVICE_INFO_STP_TARGET (0x00000200) -#define MPI2_HBD_DEVICE_INFO_SMP_TARGET (0x00000100) -#define MPI2_HBD_DEVICE_INFO_SATA_DEVICE (0x00000080) -#define MPI2_HBD_DEVICE_INFO_SSP_INITIATOR (0x00000040) -#define MPI2_HBD_DEVICE_INFO_STP_INITIATOR (0x00000020) -#define MPI2_HBD_DEVICE_INFO_SMP_INITIATOR (0x00000010) -#define MPI2_HBD_DEVICE_INFO_SATA_HOST (0x00000008) - -#define MPI2_HBD_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) -#define MPI2_HBD_DEVICE_INFO_NO_DEVICE (0x00000000) -#define MPI2_HBD_DEVICE_INFO_END_DEVICE (0x00000001) -#define MPI2_HBD_DEVICE_INFO_EDGE_EXPANDER (0x00000002) -#define MPI2_HBD_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) - -/* values for the MaxRate field */ -#define MPI2_HBD_MAX_RATE_MASK (0x0F) -#define MPI2_HBD_MAX_RATE_1_5 (0x08) -#define MPI2_HBD_MAX_RATE_3_0 (0x09) -#define MPI2_HBD_MAX_RATE_6_0 (0x0A) -#define MPI25_HBD_MAX_RATE_12_0 (0x0B) -#define MPI26_HBD_MAX_RATE_22_5 (0x0C) - - -/* Host Based Discovery Action Reply Message */ -typedef struct _MPI2_HBD_ACTION_REPLY -{ - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_HBD_ACTION_REPLY, MPI2_POINTER PTR_MPI2_HBD_ACTION_REPLY, - Mpi2HbdActionReply_t, MPI2_POINTER pMpi2HbdActionReply_t; - - -#endif - - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_hbd.h + * Title: MPI Host Based Discovery messages and structures + * Creation Date: October 21, 2009 + * + * mpi2_hbd.h Version: 02.00.04 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 10-28-09 02.00.00 Initial version. + * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from + * HBD Action request, replaced by AdditionalInfo field. + * 11-18-11 02.00.02 Incorporating additions for MPI v2.5. + * 11-18-14 02.00.03 Updated copyright information. + * 02-17-16 02.00.04 Added SAS 4 22.5 gbs speed support. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_HBD_H +#define MPI2_HBD_H + +/**************************************************************************** +* Host Based Discovery Action messages +****************************************************************************/ + +/* Host Based Discovery Action Request Message */ +typedef struct _MPI2_HBD_ACTION_REQUEST +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U32 Reserved4; /* 0x0C */ + U64 SASAddress; /* 0x10 */ + U32 Reserved5; /* 0x18 */ + U32 HbdDeviceInfo; /* 0x1C */ + U16 ParentDevHandle; /* 0x20 */ + U16 MaxQDepth; /* 0x22 */ + U8 FirstPhyIdentifier; /* 0x24 */ + U8 Port; /* 0x25 */ + U8 MaxConnections; /* 0x26 */ + U8 MaxRate; /* 0x27 */ + U32 AdditionalInfo; /* 0x28 */ + U16 InitialAWT; /* 0x2C */ + U16 Reserved7; /* 0x2E */ + U32 Reserved8; /* 0x30 */ +} MPI2_HBD_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_HBD_ACTION_REQUEST, + Mpi2HbdActionRequest_t, MPI2_POINTER pMpi2HbdActionRequest_t; + +/* values for the Operation field */ +#define MPI2_HBD_OP_ADD_DEVICE (0x01) +#define MPI2_HBD_OP_REMOVE_DEVICE (0x02) +#define MPI2_HBD_OP_UPDATE_DEVICE (0x03) + +/* values for the HbdDeviceInfo field */ +#define MPI2_HBD_DEVICE_INFO_VIRTUAL_DEVICE (0x00004000) +#define MPI2_HBD_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI2_HBD_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI2_HBD_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI2_HBD_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI2_HBD_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI2_HBD_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI2_HBD_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI2_HBD_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI2_HBD_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI2_HBD_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI2_HBD_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI2_HBD_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI2_HBD_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI2_HBD_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI2_HBD_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +/* values for the MaxRate field */ +#define MPI2_HBD_MAX_RATE_MASK (0x0F) +#define MPI2_HBD_MAX_RATE_1_5 (0x08) +#define MPI2_HBD_MAX_RATE_3_0 (0x09) +#define MPI2_HBD_MAX_RATE_6_0 (0x0A) +#define MPI25_HBD_MAX_RATE_12_0 (0x0B) +#define MPI26_HBD_MAX_RATE_22_5 (0x0C) + + +/* Host Based Discovery Action Reply Message */ +typedef struct _MPI2_HBD_ACTION_REPLY +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_HBD_ACTION_REPLY, MPI2_POINTER PTR_MPI2_HBD_ACTION_REPLY, + Mpi2HbdActionReply_t, MPI2_POINTER pMpi2HbdActionReply_t; + + +#endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_history.txt b/drivers/scsi/mpt3sas/mpi/mpi2_history.txt index 3609b5afa91c..38d339c357b6 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_history.txt +++ b/drivers/scsi/mpt3sas/mpi/mpi2_history.txt @@ -1,796 +1,796 @@ - ============================== - Fusion-MPT MPI 2.0 / 2.5 Header File Change History - ============================== - - Copyright 2000-2015 Avago Technologies. All rights reserved. - - --------------------------------------- - Header Set Release Version: 02.00.50 - Header Set Release Date: 09-29-17 - --------------------------------------- - - Filename Current version Prior version - ---------- --------------- ------------- - mpi2.h 02.00.50 02.00.49 - mpi2_cnfg.h 02.00.42 02.00.41 - mpi2_init.h 02.00.21 02.00.21 - mpi2_ioc.h 02.00.34 02.00.33 - mpi2_raid.h 02.00.11 02.00.11 - mpi2_sas.h 02.00.10 02.00.10 - mpi2_targ.h 02.00.09 02.00.09 - mpi2_tool.h 02.00.14 02.00.14 - mpi2_type.h 02.00.01 02.00.01 - mpi2_ra.h 02.00.01 02.00.01 - mpi2_hbd.h 02.00.04 02.00.04 - mpi2_pci.h 02.00.02 02.00.02 - mpi2_history.txt 02.00.46 02.00.45 - - - * Date Version Description - * -------- -------- ------------------------------------------------------ - -mpi2.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved ReplyPostHostIndex register to offset 0x6C of the - * MPI2_SYSTEM_INTERFACE_REGS and modified the define for - * MPI2_REPLY_POST_HOST_INDEX_OFFSET. - * Added union of request descriptors. - * Added union of reply descriptors. - * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_VERSION_02_00. - * Fixed the size of the FunctionDependent5 field in the - * MPI2_DEFAULT_REPLY structure. - * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. - * Removed the MPI-defined Fault Codes and extended the - * product specific codes up to 0xEFFF. - * Added a sixth key value for the WriteSequence register - * and changed the flush value to 0x0. - * Added message function codes for Diagnostic Buffer Post - * and Diagnsotic Release. - * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED - * Moved MPI2_VERSION_UNION from mpi2_ioc.h. - * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. - * Added #defines for marking a reply descriptor as unused. - * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved LUN field defines from mpi2_init.h. - * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. - * In all request and reply descriptors, replaced VF_ID - * field with MSIxIndex field. - * Removed DevHandle field from - * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those - * bytes reserved. - * Added RAID Accelerator functionality. - * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MSI-x index mask and shift for Reply Post Host - * Index register. - * Added function code for Host Based Discovery Action. - * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. - * Added defines for product-specific range of message - * function codes, 0xF0 to 0xFF. - * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. - * Added alternative defines for the SGE Direction bit. - * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. - * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. - * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. - * Incorporating additions for MPI v2.5. - * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. - * Added Hard Reset delay timings. - * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. - * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. - * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. - * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. - * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. - * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-18-14 02.00.36 Updated copyright information. - * Bumped MPI2_HEADER_VERSION_UNIT. - * 03-16-15 02.00.37 Updated for MPI v2.6. - * Bumped MPI2_HEADER_VERSION_UNIT. - * Added Scratchpad registers and - * AtomicRequestDescriptorPost register to - * MPI2_SYSTEM_INTERFACE_REGS. - * Added MPI2_DIAG_SBR_RELOAD. - * Added MPI2_IOCSTATUS_INSUFFICIENT_POWER. - * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. - * Added V7 HostDiagnostic register defines - * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT - * 01-04-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT - * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines - * to be unique within first 32 characters. - * Removed AHCI support. - * Removed SOP support. - * Bumped MPI2_HEADER_VERSION_UNIT. - * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. - * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. - * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. - * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. - * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. - * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. - * -------------------------------------------------------------------------- - -mpi2_cnfg.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. - * Added Manufacturing Page 11. - * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE - * define. - * 06-26-07 02.00.02 Adding generic structure for product-specific - * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. - * Rework of BIOS Page 2 configuration page. - * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the - * forms. - * Added configuration pages IOC Page 8 and Driver - * Persistent Mapping Page 0. - * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated - * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, - * RAID Physical Disk Pages 0 and 1, RAID Configuration - * Page 0). - * Added new value for AccessStatus field of SAS Device - * Page 0 (_SATA_NEEDS_INITIALIZATION). - * 10-31-07 02.00.04 Added missing SEPDevHandle field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for - * NVDATA. - * Modified IOC Page 7 to use masks and added field for - * SASBroadcastPrimitiveMasks. - * Added MPI2_CONFIG_PAGE_BIOS_4. - * Added MPI2_CONFIG_PAGE_LOG_0. - * 02-29-08 02.00.06 Modified various names to make them 32-character unique. - * Added SAS Device IDs. - * Updated Integrated RAID configuration pages including - * Manufacturing Page 4, IOC Page 6, and RAID Configuration - * Page 0. - * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. - * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. - * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. - * Added missing MaxNumRoutedSasAddresses field to - * MPI2_CONFIG_PAGE_EXPANDER_0. - * Added SAS Port Page 0. - * Modified structure layout for - * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. - * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use - * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. - * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF - * to 0x000000FF. - * Added two new values for the Physical Disk Coercion Size - * bits in the Flags field of Manufacturing Page 4. - * Added product-specific Manufacturing pages 16 to 31. - * Modified Flags bits for controlling write cache on SATA - * drives in IO Unit Page 1. - * Added new bit to AdditionalControlFlags of SAS IO Unit - * Page 1 to control Invalid Topology Correction. - * Added SupportedPhysDisks field to RAID Volume Page 1 and - * added related defines. - * Added additional defines for RAID Volume Page 0 - * VolumeStatusFlags field. - * Modified meaning of RAID Volume Page 0 VolumeSettings - * define for auto-configure of hot-swap drives. - * Added PhysDiskAttributes field (and related defines) to - * RAID Physical Disk Page 0. - * Added MPI2_SAS_PHYINFO_PHY_VACANT define. - * Added three new DiscoveryStatus bits for SAS IO Unit - * Page 0 and SAS Expander Page 0. - * Removed multiplexing information from SAS IO Unit pages. - * Added BootDeviceWaitTime field to SAS IO Unit Page 4. - * Removed Zone Address Resolved bit from PhyInfo and from - * Expander Page 0 Flags field. - * Added two new AccessStatus values to SAS Device Page 0 - * for indicating routing problems. Added 3 reserved words - * to this page. - * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. - * Inserted missing reserved field into structure for IOC - * Page 6. - * Added more pending task bits to RAID Volume Page 0 - * VolumeStatusFlags defines. - * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. - * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 - * and SAS Expander Page 0 to flag a downstream initiator - * when in simplified routing mode. - * Removed SATA Init Failure defines for DiscoveryStatus - * fields of SAS IO Unit Page 0 and SAS Expander Page 0. - * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. - * Added PortGroups, DmaGroup, and ControlGroup fields to - * SAS Device Page 0. - * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO - * Unit Page 6. - * Added expander reduced functionality data to SAS - * Expander Page 0. - * Added SAS PHY Page 2 and SAS PHY Page 3. - * 07-30-09 02.00.12 Added IO Unit Page 7. - * Added new device ids. - * Added SAS IO Unit Page 5. - * Added partial and slumber power management capable flags - * to SAS Device Page 0 Flags field. - * Added PhyInfo defines for power condition. - * Added Ethernet configuration pages. - * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. - * Added SAS PHY Page 4 structure and defines. - * 02-10-10 02.00.14 Modified the comments for the configuration page - * structures that contain an array of data. The host - * should use the "count" field in the page data (e.g. the - * NumPhys field) to determine the number of valid elements - * in the array. - * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. - * Added PowerManagementCapabilities to IO Unit Page 7. - * Added PortWidthModGroup field to - * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. - * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. - * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT - * define. - * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. - * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. - * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) - * defines. - * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to - * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for - * the Pinout field. - * Added BoardTemperature and BoardTemperatureUnits fields - * to MPI2_CONFIG_PAGE_IO_UNIT_7. - * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define - * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. - * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. - * Added IO Unit Page 8, IO Unit Page 9, - * and IO Unit Page 10. - * Added SASNotifyPrimitiveMasks field to - * MPI2_CONFIG_PAGE_IOC_7. - * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). - * 05-25-11 02.00.20 Cleaned up a few comments. - * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities - * for PCIe link as obsolete. - * Added SpinupFlags field containing a Disable Spin-up bit - * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO - * Unit Page 4. - * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. - * Added UEFIVersion field to BIOS Page 1 and defined new - * BiosOptions bits. - * Incorporating additions for MPI v2.5. - * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. - * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. - * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as - * obsolete for MPI v2.5 and later. - * Added some defines for 12G SAS speeds. - * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. - * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to - * match the specification. - * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for - * future use. - * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for - * MPI2_CONFIG_PAGE_MAN_7. - * Added EnclosureLevel and ConnectorName fields to - * MPI2_CONFIG_PAGE_SAS_DEV_0. - * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for - * MPI2_CONFIG_PAGE_SAS_DEV_0. - * Added EnclosureLevel field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 01-08-14 02.00.28 Added more defines for the BiosOptions field of - * MPI2_CONFIG_PAGE_BIOS_1. - * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and - * more defines for the BiosOptions field.. - * 11-18-14 02.00.30 Updated copyright information. - * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. - * Added AdapterOrderAux fields to BIOS Page 3. - * 03-16-15 02.00.31 Updated for MPI v2.6. - * Added BoardPowerRequirement, PCISlotPowerAllocation, and - * Flags field to IO Unit Page 7. - * Added IO Unit Page 11. - * Added new SAS Phy Event codes - * Added PCIe configuration pages. - * 03-19-15 02.00.32 Fixed PCIe Link Config page structure names to be - * unique in first 32 characters. - * 05-25-15 02.00.33 Added more defines for the BiosOptions field of - * MPI2_CONFIG_PAGE_BIOS_1. - * 08-25-15 02.00.34 Added PCIe Device Page 2 SGL format capability. - * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. - * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. - * Added Link field to PCIe Link Pages - * Added EnclosureLevel and ConnectorName to PCIe - * Device Page 0. - * Added define for PCIE IoUnit page 1 max rate shift. - * Added comment for reserved ExtPageTypes. - * Added SAS 4 22.5 gbs speed support. - * Added PCIe 4 16.0 GT/sec speec support. - * Removed AHCI support. - * Removed SOP support. - * Added NegotiatedLinkRate and NegotiatedPortWidth to - * PCIe device page 0. - * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines - * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. - * Changed declaration of ConnectorName in PCIe DevicePage0 - * to match SAS DevicePage 0. - * Added SATADeviceWaitTime to IO Unit Page 11. - * Added MPI26_MFGPAGE_DEVID_SAS4008 - * Added x16 PCIe width to IO Unit Page 7 - * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 - * phy data. - * Added InitStatus to PCIe IO Unit Page 1 header. - * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. - * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and - * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. - * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. - * Added ChassisSlot field to SAS Enclosure Page 0. - * Added ChassisSlot Valid bit (bit 5) to the Flags field - * in SAS Enclosure Page 0. - * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and - * MPI26_MFGPAGE_DEVID_SAS3916 defines. - * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. - * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. - * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to - * MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. - * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to - * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. - * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. - * Added NOIOB field to PCIe Device Page 2. - * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to - * the Capabilities field of PCIe Device Page 2. - * -------------------------------------------------------------------------- - -mpi2_init.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. - * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. - * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. - * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. - * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. - * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO - * Control field Task Attribute flags. - * Moved LUN field defines to mpi2.h becasue they are - * common to many structures. - * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to - * Query Asynchronous Event. - * Defined two new bits in the SlotStatus field of the SCSI - * Enclosure Processor Request and Reply. - * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for - * both SCSI IO Error Reply and SCSI Task Management Reply. - * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. - * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. - * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. - * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. - * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. - * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. - * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command - * Priority to match SAM-4. - * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. - * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. - * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, - * replacing the Reserved4 field. - * 11-18-14 02.00.16 Updated copyright information. - * 03-16-15 02.00.17 Updated for MPI v2.6. - * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. - * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and - * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. - * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. - * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. - * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. - * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to - * be unique within first 32 characters. - * -------------------------------------------------------------------------- - -mpi2_ioc.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to - * MaxTargets. - * Added TotalImageSize field to FWDownload Request. - * Added reserved words to FWUpload Request. - * 06-26-07 02.00.02 Added IR Configuration Change List Event. - * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit - * request and replaced it with - * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. - * Replaced the MinReplyQueueDepth field of the IOCFacts - * reply with MaxReplyDescriptorPostQueueDepth. - * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum - * depth for the Reply Descriptor Post Queue. - * Added SASAddress field to Initiator Device Table - * Overflow Event data. - * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING - * for SAS Initiator Device Status Change Event data. - * Modified Reason Code defines for SAS Topology Change - * List Event data, including adding a bit for PHY Vacant - * status, and adding a mask for the Reason Code. - * Added define for - * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. - * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. - * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of - * the IOCFacts Reply. - * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Moved MPI2_VERSION_UNION to mpi2.h. - * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks - * instead of enables, and added SASBroadcastPrimitiveMasks - * field. - * Added Log Entry Added Event and related structure. - * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. - * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. - * Added MaxVolumes and MaxPersistentEntries fields to - * IOCFacts reply. - * Added ProtocalFlags and IOCCapabilities fields to - * MPI2_FW_IMAGE_HEADER. - * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. - * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to - * a U16 (from a U32). - * Removed extra 's' from EventMasks name. - * 06-27-08 02.00.08 Fixed an offset in a comment. - * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. - * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and - * renamed MinReplyFrameSize to ReplyFrameSize. - * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. - * Added two new RAIDOperation values for Integrated RAID - * Operations Status Event data. - * Added four new IR Configuration Change List Event data - * ReasonCode values. - * Added two new ReasonCode defines for SAS Device Status - * Change Event data. - * Added three new DiscoveryStatus bits for the SAS - * Discovery event data. - * Added Multiplexing Status Change bit to the PhyStatus - * field of the SAS Topology Change List event data. - * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. - * BootFlags are now product-specific. - * Added defines for the indivdual signature bytes - * for MPI2_INIT_IMAGE_FOOTER. - * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. - * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR - * define. - * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE - * define. - * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. - * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. - * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. - * Added two new reason codes for SAS Device Status Change - * Event. - * Added new event: SAS PHY Counter. - * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. - * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Added new product id family for 2208. - * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. - * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. - * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. - * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. - * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. - * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. - * Added Host Based Discovery Phy Event data. - * Added defines for ProductID Product field - * (MPI2_FW_HEADER_PID_). - * Modified values for SAS ProductID Family - * (MPI2_FW_HEADER_PID_FAMILY_). - * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. - * Added PowerManagementControl Request structures and - * defines. - * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. - * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. - * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. - * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added - * SASNotifyPrimitiveMasks field to - * MPI2_EVENT_NOTIFICATION_REQUEST. - * Added Temperature Threshold Event. - * Added Host Message Event. - * Added Send Host Message request and reply. - * 05-25-11 02.00.18 For Extended Image Header, added - * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and - * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. - * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. - * 08-24-11 02.00.19 Added PhysicalPort field to - * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. - * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. - * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. - * 03-29-12 02.00.21 Added a product specific range to event values. - * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. - * Added ElapsedSeconds field to - * MPI2_EVENT_DATA_IR_OPERATION_STATUS. - * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE - * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. - * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. - * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. - * Added Encrypted Hash Extended Image. - * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. - * 11-18-14 02.00.25 Updated copyright information. - * 03-16-15 02.00.26 Updated for MPI v2.6. - * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and - * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. - * Added MPI2_EVENT_PCIE_LINK_COUNTER and - * MPI26_EVENT_DATA_PCIE_LINK_COUNTER. - * Added MPI26_CTRL_OP_SHUTDOWN. - * Added MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG - * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and - * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. - * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. - * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. - * Added ConigurationFlags field to IOCInit message to - * support NVMe SGL format control. - * Added PCIe SRIOV support. - * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. - * Added PCIe 4 16.0 GT/sec speec support. - * Removed AHCI support. - * Removed SOP support. - * 07-01-16 02.00.29 Added Archclass for 4008 product. - * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED. - * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload - * Request Message. - * Added new defines for the ImageType field of FWUpload - * Request Message. - * Added new values for the RegionType field in the Layout - * Data sections of the FLASH Layout Extended Image Data. - * Added new defines for the ReasonCode field of - * Active Cable Exception Event. - * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and - * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. - * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and - * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. - * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. - * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related - * defines for the ReasonCode field. - * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. - * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED - * to the ReasonCode field in PCIe Device Status Change - * Event Data. - * -------------------------------------------------------------------------- - -mpi2_raid.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Modifications to RAID Action request and reply, - * including the Actions and ActionData. - * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. - * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that - * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT - * can be sized by the build environment. - * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of - * VolumeCreationFlags and marked the old one as obsolete. - * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. - * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with - * related structures and defines. - * Added product-specific range to RAID Action values. - * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. - * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. - * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. - * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. - * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. - * -------------------------------------------------------------------------- - -mpi2_sas.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit - * Control Request. - * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control - * Request. - * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST - * to MPI2_SGE_IO_UNION since it supports chained SGLs. - * 05-12-10 02.00.04 Modified some comments. - * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. - * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. - * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA - * Passthrough Request message. - * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete - * for anything newer than MPI v2.0. - * 11-18-14 02.00.09 Updated copyright information. - * 03-16-15 02.00.10 Updated for MPI v2.6. - * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. - * -------------------------------------------------------------------------- - -mpi2_targ.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to - * BufferPostFlags field of CommandBufferPostBase Request. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 10-02-08 02.00.03 Removed NextCmdBufferOffset from - * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. - * Target Status Send Request only takes a single SGE for - * response data. - * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure. - * 11-18-11 02.00.05 Incorporating additions for MPI v2.5. - * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT - * request message structure. - * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and - * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS. - * 06-13-14 02.00.07 Added MinMSIxIndex and MaxMSIxIndex fields to - * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. - * 11-18-14 02.00.08 Updated copyright information. - * 03-16-15 02.00.09 Updated for MPI v2.6. - * Added MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH. - * -------------------------------------------------------------------------- - -mpi2_tool.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release - * structures and defines. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. - * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request - * and reply messages. - * Added MPI2_DIAG_BUF_TYPE_EXTENDED. - * Incremented MPI2_DIAG_BUF_TYPE_COUNT. - * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. - * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer - * Post Request. - * 05-25-11 02.00.07 Added Flags field and related defines to - * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. - * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. - * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request - * message. - * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that - * it uses MPI Chain SGE as well as MPI Simple SGE. - * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. - * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. - * 11-18-14 02.00.13 Updated copyright information. - * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean - * Tool Request Message. - * -------------------------------------------------------------------------- - -mpi2_type.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 11-18-14 02.00.01 Updated copyright information. - * -------------------------------------------------------------------------- - -mpi2_ra.h - * 05-06-09 02.00.00 Initial version. - * 11-18-14 02.00.01 Updated copyright information. - * -------------------------------------------------------------------------- - -mpi2_hbd.h - * 10-28-09 02.00.00 Initial version. - * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from - * HBD Action request, replaced by AdditionalInfo field. - * 11-18-11 02.00.02 Incorporating additions for MPI v2.5. - * 11-18-14 02.00.03 Updated copyright information. - * 02-17-16 02.00.04 Added SAS 4 22.5 gbs speed support. - * -------------------------------------------------------------------------- - -mpi2_pci.h - * 03-16-15 02.00.00 Initial version. - * 02-17-16 02.00.01 Removed AHCI support. - * Removed SOP support. - * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to - * NVME Encapsulated Request. - * -------------------------------------------------------------------------- - -mpi2_history.txt Parts list history - -Filename 02.00.50 02.00.49 02.00.48 ----------- -------- -------- -------- -mpi2.h 02.00.50 02.00.49 02.00.48 -mpi2_cnfg.h 02.00.42 02.00.41 02.00.40 -mpi2_init.h 02.00.21 02.00.21 02.00.21 -mpi2_ioc.h 02.00.34 02.00.33 02.00.32 -mpi2_raid.h 02.00.11 02.00.11 02.00.11 -mpi2_sas.h 02.00.10 02.00.10 02.00.10 -mpi2_targ.h 02.00.09 02.00.09 02.00.09 -mpi2_tool.h 02.00.14 02.00.14 02.00.14 -mpi2_type.h 02.00.01 02.00.01 02.00.01 -mpi2_ra.h 02.00.01 02.00.01 02.00.01 -mpi2_hbd.h 02.00.04 02.00.04 02.00.04 -mpi2_pci.h 02.00.02 02.00.02 02.00.02 - -Filename 02.00.47 02.00.46 02.00.45 02.00.44 02.00.43 02.00.42 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.47 02.00.46 02.00.45 02.00.44 02.00.43 02.00.42 -mpi2_cnfg.h 02.00.39 02.00.39 02.00.38 02.00.37 02.00.36 02.00.35 -mpi2_init.h 02.00.21 02.00.21 02.00.21 02.00.21 02.00.21 02.00.20 -mpi2_ioc.h 02.00.31 02.00.30 02.00.29 02.00.28 02.00.28 02.00.27 -mpi2_raid.h 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 -mpi2_sas.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 -mpi2_targ.h 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 -mpi2_tool.h 02.00.14 02.00.14 02.00.13 02.00.13 02.00.13 02.00.13 -mpi2_type.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 -mpi2_ra.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 -mpi2_hbd.h 02.00.04 02.00.04 02.00.04 02.00.04 02.00.04 02.00.03 -mpi2_pci.h 02.00.02 02.00.02 02.00.02 02.00.01 02.00.01 02.00.00 - -Filename 02.00.41 02.00.40 02.00.39 02.00.38 02.00.37 02.00.36 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.41 02.00.40 02.00.39 02.00.38 02.00.37 02.00.36 -mpi2_cnfg.h 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 -mpi2_init.h 02.00.19 02.00.18 02.00.17 02.00.17 02.00.17 02.00.16 -mpi2_ioc.h 02.00.27 02.00.27 02.00.26 02.00.26 02.00.26 02.00.25 -mpi2_raid.h 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 -mpi2_sas.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.09 -mpi2_targ.h 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 02.00.08 -mpi2_tool.h 02.00.13 02.00.13 02.00.13 02.00.13 02.00.13 02.00.13 -mpi2_type.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 -mpi2_ra.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 -mpi2_hbd.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.03 02.00.03 -mpi2_pci.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 - -Filename 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 -mpi2_cnfg.h 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.25 -mpi2_init.h 02.00.15 02.00.15 02.00.15 02.00.15 02.00.15 02.00.15 -mpi2_ioc.h 02.00.24 02.00.24 02.00.24 02.00.23 02.00.22 02.00.22 -mpi2_raid.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.09 -mpi2_sas.h 02.00.08 02.00.08 02.00.08 02.00.08 02.00.07 02.00.07 -mpi2_targ.h 02.00.07 02.00.06 02.00.06 02.00.06 02.00.06 02.00.06 -mpi2_tool.h 02.00.12 02.00.12 02.00.11 02.00.11 02.00.10 02.00.10 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 - -Filename 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24 -mpi2_cnfg.h 02.00.24 02.00.23 02.00.22 02.00.22 02.00.22 02.00.22 -mpi2_init.h 02.00.14 02.00.14 02.00.14 02.00.14 02.00.13 02.00.13 -mpi2_ioc.h 02.00.22 02.00.22 02.00.22 02.00.21 02.00.21 02.00.20 -mpi2_raid.h 02.00.09 02.00.09 02.00.09 02.00.08 02.00.08 02.00.08 -mpi2_sas.h 02.00.07 02.00.07 02.00.07 02.00.07 02.00.06 02.00.06 -mpi2_targ.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 -mpi2_tool.h 02.00.10 02.00.10 02.00.10 02.00.09 02.00.08 02.00.08 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 - -Filename 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 -mpi2_cnfg.h 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 02.00.17 -mpi2_init.h 02.00.12 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 -mpi2_ioc.h 02.00.20 02.00.19 02.00.18 02.00.17 02.00.17 02.00.16 -mpi2_raid.h 02.00.07 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 -mpi2_sas.h 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 02.00.05 -mpi2_targ.h 02.00.05 02.00.04 02.00.04 02.00.04 02.00.04 02.00.04 -mpi2_tool.h 02.00.08 02.00.07 02.00.07 02.00.06 02.00.06 02.00.06 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_hbd.h 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 - -Filename 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 -mpi2_cnfg.h 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11 -mpi2_init.h 02.00.10 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 -mpi2_ioc.h 02.00.15 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11 -mpi2_raid.h 02.00.05 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03 -mpi2_sas.h 02.00.05 02.00.04 02.00.03 02.00.03 02.00.02 02.00.02 -mpi2_targ.h 02.00.04 02.00.04 02.00.04 02.00.03 02.00.03 02.00.03 -mpi2_tool.h 02.00.06 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_hbd.h 02.00.01 02.00.00 02.00.00 02.00.00 - -Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 -mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06 -mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03 -mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06 -mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02 -mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 -mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02 -mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 - -Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 -mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 -mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 -mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 - + ============================== + Fusion-MPT MPI 2.0 / 2.5 Header File Change History + ============================== + + Copyright 2000-2015 Avago Technologies. All rights reserved. + + --------------------------------------- + Header Set Release Version: 02.00.50 + Header Set Release Date: 09-29-17 + --------------------------------------- + + Filename Current version Prior version + ---------- --------------- ------------- + mpi2.h 02.00.50 02.00.49 + mpi2_cnfg.h 02.00.42 02.00.41 + mpi2_init.h 02.00.21 02.00.21 + mpi2_ioc.h 02.00.34 02.00.33 + mpi2_raid.h 02.00.11 02.00.11 + mpi2_sas.h 02.00.10 02.00.10 + mpi2_targ.h 02.00.09 02.00.09 + mpi2_tool.h 02.00.14 02.00.14 + mpi2_type.h 02.00.01 02.00.01 + mpi2_ra.h 02.00.01 02.00.01 + mpi2_hbd.h 02.00.04 02.00.04 + mpi2_pci.h 02.00.02 02.00.02 + mpi2_history.txt 02.00.46 02.00.45 + + + * Date Version Description + * -------- -------- ------------------------------------------------------ + +mpi2.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-14 02.00.36 Updated copyright information. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 03-16-15 02.00.37 Updated for MPI v2.6. + * Bumped MPI2_HEADER_VERSION_UNIT. + * Added Scratchpad registers and + * AtomicRequestDescriptorPost register to + * MPI2_SYSTEM_INTERFACE_REGS. + * Added MPI2_DIAG_SBR_RELOAD. + * Added MPI2_IOCSTATUS_INSUFFICIENT_POWER. + * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. + * Added V7 HostDiagnostic register defines + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT + * 01-04-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT + * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines + * to be unique within first 32 characters. + * Removed AHCI support. + * Removed SOP support. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. + * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. + * -------------------------------------------------------------------------- + +mpi2_cnfg.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for + * MPI2_CONFIG_PAGE_MAN_7. + * Added EnclosureLevel and ConnectorName fields to + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added EnclosureLevel field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and + * more defines for the BiosOptions field.. + * 11-18-14 02.00.30 Updated copyright information. + * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. + * Added AdapterOrderAux fields to BIOS Page 3. + * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added BoardPowerRequirement, PCISlotPowerAllocation, and + * Flags field to IO Unit Page 7. + * Added IO Unit Page 11. + * Added new SAS Phy Event codes + * Added PCIe configuration pages. + * 03-19-15 02.00.32 Fixed PCIe Link Config page structure names to be + * unique in first 32 characters. + * 05-25-15 02.00.33 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 08-25-15 02.00.34 Added PCIe Device Page 2 SGL format capability. + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. + * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. + * Added Link field to PCIe Link Pages + * Added EnclosureLevel and ConnectorName to PCIe + * Device Page 0. + * Added define for PCIE IoUnit page 1 max rate shift. + * Added comment for reserved ExtPageTypes. + * Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * Added NegotiatedLinkRate and NegotiatedPortWidth to + * PCIe device page 0. + * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines + * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. + * Changed declaration of ConnectorName in PCIe DevicePage0 + * to match SAS DevicePage 0. + * Added SATADeviceWaitTime to IO Unit Page 11. + * Added MPI26_MFGPAGE_DEVID_SAS4008 + * Added x16 PCIe width to IO Unit Page 7 + * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 + * phy data. + * Added InitStatus to PCIe IO Unit Page 1 header. + * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. + * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and + * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. + * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. + * Added ChassisSlot field to SAS Enclosure Page 0. + * Added ChassisSlot Valid bit (bit 5) to the Flags field + * in SAS Enclosure Page 0. + * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and + * MPI26_MFGPAGE_DEVID_SAS3916 defines. + * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. + * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. + * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. + * Added NOIOB field to PCIe Device Page 2. + * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to + * the Capabilities field of PCIe Device Page 2. + * -------------------------------------------------------------------------- + +mpi2_init.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. + * 11-18-14 02.00.16 Updated copyright information. + * 03-16-15 02.00.17 Updated for MPI v2.6. + * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. + * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and + * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. + * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to + * be unique within first 32 characters. + * -------------------------------------------------------------------------- + +mpi2_ioc.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * 11-18-14 02.00.25 Updated copyright information. + * 03-16-15 02.00.26 Updated for MPI v2.6. + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. + * Added MPI2_EVENT_PCIE_LINK_COUNTER and + * MPI26_EVENT_DATA_PCIE_LINK_COUNTER. + * Added MPI26_CTRL_OP_SHUTDOWN. + * Added MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. + * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. + * Added ConigurationFlags field to IOCInit message to + * support NVMe SGL format control. + * Added PCIe SRIOV support. + * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.29 Added Archclass for 4008 product. + * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED. + * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload + * Request Message. + * Added new defines for the ImageType field of FWUpload + * Request Message. + * Added new values for the RegionType field in the Layout + * Data sections of the FLASH Layout Extended Image Data. + * Added new defines for the ReasonCode field of + * Active Cable Exception Event. + * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and + * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. + * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and + * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. + * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. + * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related + * defines for the ReasonCode field. + * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. + * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED + * to the ReasonCode field in PCIe Device Status Change + * Event Data. + * -------------------------------------------------------------------------- + +mpi2_raid.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * -------------------------------------------------------------------------- + +mpi2_sas.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. + * 11-18-14 02.00.09 Updated copyright information. + * 03-16-15 02.00.10 Updated for MPI v2.6. + * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. + * -------------------------------------------------------------------------- + +mpi2_targ.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to + * BufferPostFlags field of CommandBufferPostBase Request. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 10-02-08 02.00.03 Removed NextCmdBufferOffset from + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * Target Status Send Request only takes a single SGE for + * response data. + * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure. + * 11-18-11 02.00.05 Incorporating additions for MPI v2.5. + * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT + * request message structure. + * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and + * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS. + * 06-13-14 02.00.07 Added MinMSIxIndex and MaxMSIxIndex fields to + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * 11-18-14 02.00.08 Updated copyright information. + * 03-16-15 02.00.09 Updated for MPI v2.6. + * Added MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH. + * -------------------------------------------------------------------------- + +mpi2_tool.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * 11-18-14 02.00.13 Updated copyright information. + * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean + * Tool Request Message. + * -------------------------------------------------------------------------- + +mpi2_type.h + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + +mpi2_ra.h + * 05-06-09 02.00.00 Initial version. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + +mpi2_hbd.h + * 10-28-09 02.00.00 Initial version. + * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from + * HBD Action request, replaced by AdditionalInfo field. + * 11-18-11 02.00.02 Incorporating additions for MPI v2.5. + * 11-18-14 02.00.03 Updated copyright information. + * 02-17-16 02.00.04 Added SAS 4 22.5 gbs speed support. + * -------------------------------------------------------------------------- + +mpi2_pci.h + * 03-16-15 02.00.00 Initial version. + * 02-17-16 02.00.01 Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to + * NVME Encapsulated Request. + * -------------------------------------------------------------------------- + +mpi2_history.txt Parts list history + +Filename 02.00.50 02.00.49 02.00.48 +---------- -------- -------- -------- +mpi2.h 02.00.50 02.00.49 02.00.48 +mpi2_cnfg.h 02.00.42 02.00.41 02.00.40 +mpi2_init.h 02.00.21 02.00.21 02.00.21 +mpi2_ioc.h 02.00.34 02.00.33 02.00.32 +mpi2_raid.h 02.00.11 02.00.11 02.00.11 +mpi2_sas.h 02.00.10 02.00.10 02.00.10 +mpi2_targ.h 02.00.09 02.00.09 02.00.09 +mpi2_tool.h 02.00.14 02.00.14 02.00.14 +mpi2_type.h 02.00.01 02.00.01 02.00.01 +mpi2_ra.h 02.00.01 02.00.01 02.00.01 +mpi2_hbd.h 02.00.04 02.00.04 02.00.04 +mpi2_pci.h 02.00.02 02.00.02 02.00.02 + +Filename 02.00.47 02.00.46 02.00.45 02.00.44 02.00.43 02.00.42 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.47 02.00.46 02.00.45 02.00.44 02.00.43 02.00.42 +mpi2_cnfg.h 02.00.39 02.00.39 02.00.38 02.00.37 02.00.36 02.00.35 +mpi2_init.h 02.00.21 02.00.21 02.00.21 02.00.21 02.00.21 02.00.20 +mpi2_ioc.h 02.00.31 02.00.30 02.00.29 02.00.28 02.00.28 02.00.27 +mpi2_raid.h 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 +mpi2_sas.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 +mpi2_targ.h 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 +mpi2_tool.h 02.00.14 02.00.14 02.00.13 02.00.13 02.00.13 02.00.13 +mpi2_type.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_ra.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_hbd.h 02.00.04 02.00.04 02.00.04 02.00.04 02.00.04 02.00.03 +mpi2_pci.h 02.00.02 02.00.02 02.00.02 02.00.01 02.00.01 02.00.00 + +Filename 02.00.41 02.00.40 02.00.39 02.00.38 02.00.37 02.00.36 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.41 02.00.40 02.00.39 02.00.38 02.00.37 02.00.36 +mpi2_cnfg.h 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 +mpi2_init.h 02.00.19 02.00.18 02.00.17 02.00.17 02.00.17 02.00.16 +mpi2_ioc.h 02.00.27 02.00.27 02.00.26 02.00.26 02.00.26 02.00.25 +mpi2_raid.h 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 +mpi2_sas.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.09 +mpi2_targ.h 02.00.09 02.00.09 02.00.09 02.00.09 02.00.09 02.00.08 +mpi2_tool.h 02.00.13 02.00.13 02.00.13 02.00.13 02.00.13 02.00.13 +mpi2_type.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_ra.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_hbd.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.03 02.00.03 +mpi2_pci.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 + +Filename 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.35 02.00.34 02.00.33 02.00.32 02.00.31 02.00.30 +mpi2_cnfg.h 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.25 +mpi2_init.h 02.00.15 02.00.15 02.00.15 02.00.15 02.00.15 02.00.15 +mpi2_ioc.h 02.00.24 02.00.24 02.00.24 02.00.23 02.00.22 02.00.22 +mpi2_raid.h 02.00.10 02.00.10 02.00.10 02.00.10 02.00.10 02.00.09 +mpi2_sas.h 02.00.08 02.00.08 02.00.08 02.00.08 02.00.07 02.00.07 +mpi2_targ.h 02.00.07 02.00.06 02.00.06 02.00.06 02.00.06 02.00.06 +mpi2_tool.h 02.00.12 02.00.12 02.00.11 02.00.11 02.00.10 02.00.10 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 + +Filename 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24 +mpi2_cnfg.h 02.00.24 02.00.23 02.00.22 02.00.22 02.00.22 02.00.22 +mpi2_init.h 02.00.14 02.00.14 02.00.14 02.00.14 02.00.13 02.00.13 +mpi2_ioc.h 02.00.22 02.00.22 02.00.22 02.00.21 02.00.21 02.00.20 +mpi2_raid.h 02.00.09 02.00.09 02.00.09 02.00.08 02.00.08 02.00.08 +mpi2_sas.h 02.00.07 02.00.07 02.00.07 02.00.07 02.00.06 02.00.06 +mpi2_targ.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 +mpi2_tool.h 02.00.10 02.00.10 02.00.10 02.00.09 02.00.08 02.00.08 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 + +Filename 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 +mpi2_cnfg.h 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 02.00.17 +mpi2_init.h 02.00.12 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11 +mpi2_ioc.h 02.00.20 02.00.19 02.00.18 02.00.17 02.00.17 02.00.16 +mpi2_raid.h 02.00.07 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 +mpi2_sas.h 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 02.00.05 +mpi2_targ.h 02.00.05 02.00.04 02.00.04 02.00.04 02.00.04 02.00.04 +mpi2_tool.h 02.00.08 02.00.07 02.00.07 02.00.06 02.00.06 02.00.06 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01 + +Filename 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 +mpi2_cnfg.h 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11 +mpi2_init.h 02.00.10 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 +mpi2_ioc.h 02.00.15 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11 +mpi2_raid.h 02.00.05 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03 +mpi2_sas.h 02.00.05 02.00.04 02.00.03 02.00.03 02.00.02 02.00.02 +mpi2_targ.h 02.00.04 02.00.04 02.00.04 02.00.03 02.00.03 02.00.03 +mpi2_tool.h 02.00.06 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_hbd.h 02.00.01 02.00.00 02.00.00 02.00.00 + +Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 +mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06 +mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03 +mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06 +mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02 +mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 +mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02 +mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 + +Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +---------- -------- -------- -------- -------- -------- -------- +mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 +mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 +mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 +mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 +mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 +mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h index 4631c92693d6..99f921c52cd9 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h @@ -1,516 +1,520 @@ -/* - * Copyright 2016-2020 Broadcom Limited. All rights reserved. - * - * Name: mpi2_image.h - * Description: Contains definitions for firmware and other component images. - * Creation Date: 04/02/2018 - * Version: 02.06.05 - * - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 08-01-18 02.06.00 Initial version for MPI 2.6.5. - * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 - * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE - * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES - * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP - * Shorten some defines to be compatible with DOS - * 06-24-19 02.06.05 Whitespace adjustments to help with identifier checking tool. - */ -#ifndef MPI2_IMAGE_H -#define MPI2_IMAGE_H - - -/* FW Image Header */ -typedef struct _MPI2_FW_IMAGE_HEADER -{ - U32 Signature; /* 0x00 */ - U32 Signature0; /* 0x04 */ - U32 Signature1; /* 0x08 */ - U32 Signature2; /* 0x0C */ - MPI2_VERSION_UNION MPIVersion; /* 0x10 */ - MPI2_VERSION_UNION FWVersion; /* 0x14 */ - MPI2_VERSION_UNION NVDATAVersion; /* 0x18 */ - MPI2_VERSION_UNION PackageVersion; /* 0x1C */ - U16 VendorID; /* 0x20 */ - U16 ProductID; /* 0x22 */ - U16 ProtocolFlags; /* 0x24 */ - U16 Reserved26; /* 0x26 */ - U32 IOCCapabilities; /* 0x28 */ - U32 ImageSize; /* 0x2C */ - U32 NextImageHeaderOffset; /* 0x30 */ - U32 Checksum; /* 0x34 */ - U32 Reserved38; /* 0x38 */ - U32 Reserved3C; /* 0x3C */ - U32 Reserved40; /* 0x40 */ - U32 Reserved44; /* 0x44 */ - U32 Reserved48; /* 0x48 */ - U32 Reserved4C; /* 0x4C */ - U32 Reserved50; /* 0x50 */ - U32 Reserved54; /* 0x54 */ - U32 Reserved58; /* 0x58 */ - U32 Reserved5C; /* 0x5C */ - U32 BootFlags; /* 0x60 */ /* reserved in MPI v2.5 and earlier */ - U32 FirmwareVersionNameWhat; /* 0x64 */ - U8 FirmwareVersionName[32]; /* 0x68 */ - U32 VendorNameWhat; /* 0x88 */ - U8 VendorName[32]; /* 0x8C */ - U32 PackageNameWhat; /* 0x88 */ - U8 PackageName[32]; /* 0x8C */ - U32 ReservedD0; /* 0xD0 */ - U32 ReservedD4; /* 0xD4 */ - U32 ReservedD8; /* 0xD8 */ - U32 ReservedDC; /* 0xDC */ - U32 ReservedE0; /* 0xE0 */ - U32 ReservedE4; /* 0xE4 */ - U32 ReservedE8; /* 0xE8 */ - U32 ReservedEC; /* 0xEC */ - U32 ReservedF0; /* 0xF0 */ - U32 ReservedF4; /* 0xF4 */ - U32 ReservedF8; /* 0xF8 */ - U32 ReservedFC; /* 0xFC */ -} MPI2_FW_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_FW_IMAGE_HEADER, - Mpi2FWImageHeader_t, MPI2_POINTER pMpi2FWImageHeader_t; - -/* Signature field */ -#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) -#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) -#define MPI2_FW_HEADER_SIGNATURE (0xEA000000) -#define MPI26_FW_HEADER_SIGNATURE (0xEB000000) - -/* Signature0 field */ -#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) -#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) -#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) /* Last byte is defined by architecture */ -#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) -#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) -#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) -#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02) -#define MPI26_FW_HEADER_SIGNATURE0 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_0) // legacy (0x5AEAA55A) -#define MPI26_FW_HEADER_SIGNATURE0_3516 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_1) -#define MPI26_FW_HEADER_SIGNATURE0_4008 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_3) - -/* Signature1 field */ -#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) -#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) -#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5) - -/* Signature2 field */ -#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) -#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) -#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA) - - -/* defines for using the ProductID field */ -#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) -#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) - -#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) -#define MPI2_FW_HEADER_PID_PROD_A (0x0000) -#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) -#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) - - -#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) -/* SAS ProductID Family bits */ -#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) -#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) -#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) -#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028) -#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031) - -/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ - -/* use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ - -#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) -#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) - -#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60) -#define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG (0x00000001) -#define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG (0x00000002) -#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004) /* This image has a auto-discovery version of SPI */ - -#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) - -#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840) - -#define MPI2_FW_HEADER_SIZE (0x100) - - -/**************************************************************************** - * Component Image Format and related defines * - ****************************************************************************/ - -/* Maximum number of Hash Exclusion entries in a Component Image Header */ -#define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL (4) - -/* Hash Exclusion Format */ -typedef struct _MPI26_HASH_EXCLUSION_FORMAT -{ - U32 Offset; /* 0x00 */ - U32 Size; /* 0x04 */ -} MPI26_HASH_EXCLUSION_FORMAT, MPI2_POINTER PTR_MPI26_HASH_EXCLUSION_FORMAT, -Mpi26HashSxclusionFormat_t, MPI2_POINTER pMpi26HashExclusionFormat_t; - -/* FW Image Header */ -typedef struct _MPI26_COMPONENT_IMAGE_HEADER -{ - U32 Signature0; /* 0x00 */ - U32 LoadAddress; /* 0x04 */ - U32 DataSize; /* 0x08 */ - U32 StartAddress; /* 0x0C */ - U32 Signature1; /* 0x10 */ - U32 FlashOffset; /* 0x14 */ - U32 FlashSize; /* 0x18 */ - U32 VersionStringOffset; /* 0x1C */ - U32 BuildDateStringOffset; /* 0x20 */ - U32 BuildTimeStringOffset; /* 0x24 */ - U32 EnvironmentVariableOffset; /* 0x28 */ - U32 ApplicationSpecific; /* 0x2C */ - U32 Signature2; /* 0x30 */ - U32 HeaderSize; /* 0x34 */ - U32 Crc; /* 0x38 */ - U8 NotFlashImage; /* 0x3C */ - U8 Compressed; /* 0x3D */ - U16 Reserved3E; /* 0x3E */ - U32 SecondaryFlashOffset; /* 0x40 */ - U32 Reserved44; /* 0x44 */ - U32 Reserved48; /* 0x48 */ - MPI2_VERSION_UNION RMCInterfaceVersion; /* 0x4C */ - MPI2_VERSION_UNION Reserved50; /* 0x50 */ - MPI2_VERSION_UNION FWVersion; /* 0x54 */ - MPI2_VERSION_UNION NvdataVersion; /* 0x58 */ - MPI26_HASH_EXCLUSION_FORMAT HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL]; /* 0x5C */ - U32 NextImageHeaderOffset; /* 0x7C */ - U32 Reserved80[32]; /* 0x80 -- 0xFC */ -} MPI26_COMPONENT_IMAGE_HEADER, MPI2_POINTER PTR_MPI26_COMPONENT_IMAGE_HEADER, - Mpi26ComponentImageHeader_t, MPI2_POINTER pMpi26ComponentImageHeader_t; - - -/**** Definitions for Signature0 field ****/ -#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042) - -/**** Definitions for Signature1 field ****/ -#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041) -#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243) -#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D) -#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942) -#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948) -#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948) -#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043) -#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053) -#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) -#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) -#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) - -/**** Definitions for Signature2 field ****/ -#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) - -/**** Offsets for Image Header Fields ****/ -#define MPI26_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00) -#define MPI26_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04) -#define MPI26_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08) -#define MPI26_IMAGE_HEADER_START_ADDRESS_OFFSET (0x0C) -#define MPI26_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10) -#define MPI26_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14) -#define MPI26_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18) -#define MPI26_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1C) -#define MPI26_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20) -#define MPI26_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24) -#define MPI26_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28) -#define MPI26_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2C) -#define MPI26_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30) -#define MPI26_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34) -#define MPI26_IMAGE_HEADER_CRC_OFFSET (0x38) -#define MPI26_IMAGE_HEADER_NOT_FLASH_IMAGE_OFFSET (0x3C) -#define MPI26_IMAGE_HEADER_COMPRESSED_OFFSET (0x3D) -#define MPI26_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40) -#define MPI26_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4C) -#define MPI26_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54) -#define MPI26_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5C) -#define MPI26_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7C) - - -#define MPI26_IMAGE_HEADER_SIZE (0x100) - - -/* Extended Image Header */ -typedef struct _MPI2_EXT_IMAGE_HEADER - -{ - U8 ImageType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 Checksum; /* 0x04 */ - U32 ImageSize; /* 0x08 */ - U32 NextImageHeaderOffset; /* 0x0C */ - U32 PackageVersion; /* 0x10 */ - U32 Reserved3; /* 0x14 */ - U32 Reserved4; /* 0x18 */ - U32 Reserved5; /* 0x1C */ - U8 IdentifyString[32]; /* 0x20 */ -} MPI2_EXT_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_EXT_IMAGE_HEADER, - Mpi2ExtImageHeader_t, MPI2_POINTER pMpi2ExtImageHeader_t; - -/* useful offsets */ -#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) -#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) -#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C) -#define MPI2_EXT_IMAGE_PACKAGEVERSION_OFFSET (0x10) - -#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) - -/* defines for the ImageType field */ -#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) -#define MPI2_EXT_IMAGE_TYPE_FW (0x01) -#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) -#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) -#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) -#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) -#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) -#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) -#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) /* MPI v2.5 and newer */ -#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A) -#define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B) -#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) -#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) - -#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */ - - -/* FLASH Layout Extended Image Data */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check RegionsPerLayout at runtime. - */ -#ifndef MPI2_FLASH_NUMBER_OF_REGIONS -#define MPI2_FLASH_NUMBER_OF_REGIONS (1) -#endif - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check NumberOfLayouts at runtime. - */ -#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS -#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1) -#endif - -typedef struct _MPI2_FLASH_REGION -{ - U8 RegionType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 RegionOffset; /* 0x04 */ - U32 RegionSize; /* 0x08 */ - U32 Reserved3; /* 0x0C */ -} MPI2_FLASH_REGION, MPI2_POINTER PTR_MPI2_FLASH_REGION, - Mpi2FlashRegion_t, MPI2_POINTER pMpi2FlashRegion_t; - -typedef struct _MPI2_FLASH_LAYOUT -{ - U32 FlashSize; /* 0x00 */ - U32 Reserved1; /* 0x04 */ - U32 Reserved2; /* 0x08 */ - U32 Reserved3; /* 0x0C */ - MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];/* 0x10 */ -} MPI2_FLASH_LAYOUT, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT, - Mpi2FlashLayout_t, MPI2_POINTER pMpi2FlashLayout_t; - -typedef struct _MPI2_FLASH_LAYOUT_DATA -{ - U8 ImageRevision; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 SizeOfRegion; /* 0x02 */ - U8 Reserved2; /* 0x03 */ - U16 NumberOfLayouts; /* 0x04 */ - U16 RegionsPerLayout; /* 0x06 */ - U16 MinimumSectorAlignment; /* 0x08 */ - U16 Reserved3; /* 0x0A */ - U32 Reserved4; /* 0x0C */ - MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];/* 0x10 */ -} MPI2_FLASH_LAYOUT_DATA, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT_DATA, - Mpi2FlashLayoutData_t, MPI2_POINTER pMpi2FlashLayoutData_t; - -/* defines for the RegionType field */ -#define MPI2_FLASH_REGION_UNUSED (0x00) -#define MPI2_FLASH_REGION_FIRMWARE (0x01) -#define MPI2_FLASH_REGION_BIOS (0x02) -#define MPI2_FLASH_REGION_NVDATA (0x03) -#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05) -#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06) -#define MPI2_FLASH_REGION_CONFIG_1 (0x07) -#define MPI2_FLASH_REGION_CONFIG_2 (0x08) -#define MPI2_FLASH_REGION_MEGARAID (0x09) -#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A) -#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) /* older name */ -#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D) -#define MPI2_FLASH_REGION_SBR (0x0E) -#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F) -#define MPI2_FLASH_REGION_HIIM (0x10) -#define MPI2_FLASH_REGION_HIIA (0x11) -#define MPI2_FLASH_REGION_CTLR (0x12) -#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13) -#define MPI2_FLASH_REGION_MR_NVDATA (0x14) -#define MPI2_FLASH_REGION_CPLD (0x15) -#define MPI2_FLASH_REGION_PSOC (0x16) - -/* ImageRevision */ -#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) - - -/* Supported Devices Extended Image Data */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check NumberOfDevices at runtime. - */ -#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES -#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1) -#endif - -typedef struct _MPI2_SUPPORTED_DEVICE -{ - U16 DeviceID; /* 0x00 */ - U16 VendorID; /* 0x02 */ - U16 DeviceIDMask; /* 0x04 */ - U16 Reserved1; /* 0x06 */ - U8 LowPCIRev; /* 0x08 */ - U8 HighPCIRev; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 Reserved3; /* 0x0C */ -} MPI2_SUPPORTED_DEVICE, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICE, - Mpi2SupportedDevice_t, MPI2_POINTER pMpi2SupportedDevice_t; - -typedef struct _MPI2_SUPPORTED_DEVICES_DATA -{ - U8 ImageRevision; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 NumberOfDevices; /* 0x02 */ - U8 Reserved2; /* 0x03 */ - U32 Reserved3; /* 0x04 */ - MPI2_SUPPORTED_DEVICE SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES]; /* 0x08 */ -} MPI2_SUPPORTED_DEVICES_DATA, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICES_DATA, - Mpi2SupportedDevicesData_t, MPI2_POINTER pMpi2SupportedDevicesData_t; - -/* ImageRevision */ -#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00) - - -/* Init Extended Image Data */ - -typedef struct _MPI2_INIT_IMAGE_FOOTER - -{ - U32 BootFlags; /* 0x00 */ - U32 ImageSize; /* 0x04 */ - U32 Signature0; /* 0x08 */ - U32 Signature1; /* 0x0C */ - U32 Signature2; /* 0x10 */ - U32 ResetVector; /* 0x14 */ -} MPI2_INIT_IMAGE_FOOTER, MPI2_POINTER PTR_MPI2_INIT_IMAGE_FOOTER, - Mpi2InitImageFooter_t, MPI2_POINTER pMpi2InitImageFooter_t; - -/* defines for the BootFlags field */ -#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00) - -/* defines for the ImageSize field */ -#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04) - -/* defines for the Signature0 field */ -#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08) -#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA) - -/* defines for the Signature1 field */ -#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C) -#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5) - -/* defines for the Signature2 field */ -#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10) -#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A) - -/* Signature fields as individual bytes */ -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A) - -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5) - -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA) -#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A) - -/* defines for the ResetVector field */ -#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) - - -/* Encrypted Hash Extended Image Data */ - -typedef struct _MPI25_ENCRYPTED_HASH_ENTRY -{ - U8 HashImageType; /* 0x00 */ - U8 HashAlgorithm; /* 0x01 */ - U8 EncryptionAlgorithm; /* 0x02 */ - U8 Reserved1; /* 0x03 */ - U32 Reserved2; /* 0x04 */ - U32 EncryptedHash[1]; /* 0x08 */ /* variable length */ -} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY, - Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t; - -/* values for HashImageType */ -#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) -#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) -#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) - -#define MPI26_HASH_IMAGE_TYPE_UNUSED (0x00) -#define MPI26_HASH_IMAGE_TYPE_FIRMWARE (0x01) -#define MPI26_HASH_IMAGE_TYPE_BIOS (0x02) -#define MPI26_HASH_IMAGE_TYPE_KEY_HASH (0x03) - -/* values for HashAlgorithm */ -#define MPI25_HASH_ALGORITHM_UNUSED (0x00) -#define MPI25_HASH_ALGORITHM_SHA256 (0x01) - -#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0) -#define MPI26_HASH_ALGORITHM_VER_NONE (0x00) -#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20) -#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40) -#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60) -#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) -#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01) -#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02) - - -/* values for EncryptionAlgorithm */ -#define MPI25_ENCRYPTION_ALG_UNUSED (0x00) -#define MPI25_ENCRYPTION_ALG_RSA256 (0x01) - -#define MPI26_ENCRYPTION_ALG_UNUSED (0x00) -#define MPI26_ENCRYPTION_ALG_RSA256 (0x01) -#define MPI26_ENCRYPTION_ALG_RSA512 (0x02) -#define MPI26_ENCRYPTION_ALG_RSA1024 (0x03) -#define MPI26_ENCRYPTION_ALG_RSA2048 (0x04) -#define MPI26_ENCRYPTION_ALG_RSA4096 (0x05) - -typedef struct _MPI25_ENCRYPTED_HASH_DATA -{ - U8 ImageVersion; /* 0x00 */ - U8 NumHash; /* 0x01 */ - U16 Reserved1; /* 0x02 */ - U32 Reserved2; /* 0x04 */ - MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */ /* variable number of entries */ -} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA, - Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t; - - -#endif /* MPI2_IMAGE_H */ - +/* + * Copyright 2016-2020 Broadcom Limited. All rights reserved. + * + * Name: mpi2_image.h + * Description: Contains definitions for firmware and other component images. + * Creation Date: 04/02/2018 + * Version: 02.06.05 + * + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 08-01-18 02.06.00 Initial version for MPI 2.6.5. + * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 + * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE + * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 12-17-18 02.06.04 Added MPI2_EXT_IMAGE_TYPE_PBLP + * Shorten some defines to be compatible with DOS + * 06-24-19 02.06.05 Whitespace adjustments to help with identifier checking tool. + * 10-02-19 02.06.06 Added MPI26_IMAGE_HEADER_SIG1_COREDUMP + * Added MPI2_FLASH_REGION_COREDUMP + */ +#ifndef MPI2_IMAGE_H +#define MPI2_IMAGE_H + + +/* FW Image Header */ +typedef struct _MPI2_FW_IMAGE_HEADER +{ + U32 Signature; /* 0x00 */ + U32 Signature0; /* 0x04 */ + U32 Signature1; /* 0x08 */ + U32 Signature2; /* 0x0C */ + MPI2_VERSION_UNION MPIVersion; /* 0x10 */ + MPI2_VERSION_UNION FWVersion; /* 0x14 */ + MPI2_VERSION_UNION NVDATAVersion; /* 0x18 */ + MPI2_VERSION_UNION PackageVersion; /* 0x1C */ + U16 VendorID; /* 0x20 */ + U16 ProductID; /* 0x22 */ + U16 ProtocolFlags; /* 0x24 */ + U16 Reserved26; /* 0x26 */ + U32 IOCCapabilities; /* 0x28 */ + U32 ImageSize; /* 0x2C */ + U32 NextImageHeaderOffset; /* 0x30 */ + U32 Checksum; /* 0x34 */ + U32 Reserved38; /* 0x38 */ + U32 Reserved3C; /* 0x3C */ + U32 Reserved40; /* 0x40 */ + U32 Reserved44; /* 0x44 */ + U32 Reserved48; /* 0x48 */ + U32 Reserved4C; /* 0x4C */ + U32 Reserved50; /* 0x50 */ + U32 Reserved54; /* 0x54 */ + U32 Reserved58; /* 0x58 */ + U32 Reserved5C; /* 0x5C */ + U32 BootFlags; /* 0x60 */ /* reserved in MPI v2.5 and earlier */ + U32 FirmwareVersionNameWhat; /* 0x64 */ + U8 FirmwareVersionName[32]; /* 0x68 */ + U32 VendorNameWhat; /* 0x88 */ + U8 VendorName[32]; /* 0x8C */ + U32 PackageNameWhat; /* 0x88 */ + U8 PackageName[32]; /* 0x8C */ + U32 ReservedD0; /* 0xD0 */ + U32 ReservedD4; /* 0xD4 */ + U32 ReservedD8; /* 0xD8 */ + U32 ReservedDC; /* 0xDC */ + U32 ReservedE0; /* 0xE0 */ + U32 ReservedE4; /* 0xE4 */ + U32 ReservedE8; /* 0xE8 */ + U32 ReservedEC; /* 0xEC */ + U32 ReservedF0; /* 0xF0 */ + U32 ReservedF4; /* 0xF4 */ + U32 ReservedF8; /* 0xF8 */ + U32 ReservedFC; /* 0xFC */ +} MPI2_FW_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_FW_IMAGE_HEADER, + Mpi2FWImageHeader_t, MPI2_POINTER pMpi2FWImageHeader_t; + +/* Signature field */ +#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) +#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) +#define MPI2_FW_HEADER_SIGNATURE (0xEA000000) +#define MPI26_FW_HEADER_SIGNATURE (0xEB000000) + +/* Signature0 field */ +#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) +#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) +#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) /* Last byte is defined by architecture */ +#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02) +#define MPI26_FW_HEADER_SIGNATURE0 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_0) // legacy (0x5AEAA55A) +#define MPI26_FW_HEADER_SIGNATURE0_3516 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_1) +#define MPI26_FW_HEADER_SIGNATURE0_4008 (MPI26_FW_HEADER_SIGNATURE0_BASE + MPI26_FW_HEADER_SIGNATURE0_ARC_3) + +/* Signature1 field */ +#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) +#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) +#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5) + +/* Signature2 field */ +#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) +#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) +#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA) + + +/* defines for using the ProductID field */ +#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) +#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) + +#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) +#define MPI2_FW_HEADER_PID_PROD_A (0x0000) +#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) +#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) + + +#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) +/* SAS ProductID Family bits */ +#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) +#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) +#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) +#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028) +#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031) + +/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ + +/* use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ + +#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) +#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) + +#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60) +#define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG (0x00000001) +#define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG (0x00000002) +#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004) /* This image has a auto-discovery version of SPI */ + +#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) + +#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840) + +#define MPI2_FW_HEADER_SIZE (0x100) + + +/**************************************************************************** + * Component Image Format and related defines * + ****************************************************************************/ + +/* Maximum number of Hash Exclusion entries in a Component Image Header */ +#define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL (4) + +/* Hash Exclusion Format */ +typedef struct _MPI26_HASH_EXCLUSION_FORMAT +{ + U32 Offset; /* 0x00 */ + U32 Size; /* 0x04 */ +} MPI26_HASH_EXCLUSION_FORMAT, MPI2_POINTER PTR_MPI26_HASH_EXCLUSION_FORMAT, +Mpi26HashSxclusionFormat_t, MPI2_POINTER pMpi26HashExclusionFormat_t; + +/* FW Image Header */ +typedef struct _MPI26_COMPONENT_IMAGE_HEADER +{ + U32 Signature0; /* 0x00 */ + U32 LoadAddress; /* 0x04 */ + U32 DataSize; /* 0x08 */ + U32 StartAddress; /* 0x0C */ + U32 Signature1; /* 0x10 */ + U32 FlashOffset; /* 0x14 */ + U32 FlashSize; /* 0x18 */ + U32 VersionStringOffset; /* 0x1C */ + U32 BuildDateStringOffset; /* 0x20 */ + U32 BuildTimeStringOffset; /* 0x24 */ + U32 EnvironmentVariableOffset; /* 0x28 */ + U32 ApplicationSpecific; /* 0x2C */ + U32 Signature2; /* 0x30 */ + U32 HeaderSize; /* 0x34 */ + U32 Crc; /* 0x38 */ + U8 NotFlashImage; /* 0x3C */ + U8 Compressed; /* 0x3D */ + U16 Reserved3E; /* 0x3E */ + U32 SecondaryFlashOffset; /* 0x40 */ + U32 Reserved44; /* 0x44 */ + U32 Reserved48; /* 0x48 */ + MPI2_VERSION_UNION RMCInterfaceVersion; /* 0x4C */ + MPI2_VERSION_UNION Reserved50; /* 0x50 */ + MPI2_VERSION_UNION FWVersion; /* 0x54 */ + MPI2_VERSION_UNION NvdataVersion; /* 0x58 */ + MPI26_HASH_EXCLUSION_FORMAT HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL]; /* 0x5C */ + U32 NextImageHeaderOffset; /* 0x7C */ + U32 Reserved80[32]; /* 0x80 -- 0xFC */ +} MPI26_COMPONENT_IMAGE_HEADER, MPI2_POINTER PTR_MPI26_COMPONENT_IMAGE_HEADER, + Mpi26ComponentImageHeader_t, MPI2_POINTER pMpi26ComponentImageHeader_t; + + +/**** Definitions for Signature0 field ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042) + +/**** Definitions for Signature1 field ****/ +#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041) +#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243) +#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D) +#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942) +#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948) +#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948) +#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043) +#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053) +#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) +#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) +#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) +#define MPI26_IMAGE_HEADER_SIG1_COREDUMP (0x504D5544) /* little-endian "DUMP" */ + +/**** Definitions for Signature2 field ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) + +/**** Offsets for Image Header Fields ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00) +#define MPI26_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04) +#define MPI26_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08) +#define MPI26_IMAGE_HEADER_START_ADDRESS_OFFSET (0x0C) +#define MPI26_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10) +#define MPI26_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14) +#define MPI26_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18) +#define MPI26_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1C) +#define MPI26_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20) +#define MPI26_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24) +#define MPI26_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28) +#define MPI26_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2C) +#define MPI26_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30) +#define MPI26_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34) +#define MPI26_IMAGE_HEADER_CRC_OFFSET (0x38) +#define MPI26_IMAGE_HEADER_NOT_FLASH_IMAGE_OFFSET (0x3C) +#define MPI26_IMAGE_HEADER_COMPRESSED_OFFSET (0x3D) +#define MPI26_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40) +#define MPI26_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4C) +#define MPI26_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54) +#define MPI26_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5C) +#define MPI26_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7C) + + +#define MPI26_IMAGE_HEADER_SIZE (0x100) + + +/* Extended Image Header */ +typedef struct _MPI2_EXT_IMAGE_HEADER + +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Checksum; /* 0x04 */ + U32 ImageSize; /* 0x08 */ + U32 NextImageHeaderOffset; /* 0x0C */ + U32 PackageVersion; /* 0x10 */ + U32 Reserved3; /* 0x14 */ + U32 Reserved4; /* 0x18 */ + U32 Reserved5; /* 0x1C */ + U8 IdentifyString[32]; /* 0x20 */ +} MPI2_EXT_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_EXT_IMAGE_HEADER, + Mpi2ExtImageHeader_t, MPI2_POINTER pMpi2ExtImageHeader_t; + +/* useful offsets */ +#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) +#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) +#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C) +#define MPI2_EXT_IMAGE_PACKAGEVERSION_OFFSET (0x10) + +#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) + +/* defines for the ImageType field */ +#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI2_EXT_IMAGE_TYPE_FW (0x01) +#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) +#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) +#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) +#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) +#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) +#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) /* MPI v2.5 and newer */ +#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A) +#define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B) +#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) + +#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */ + + +/* FLASH Layout Extended Image Data */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check RegionsPerLayout at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_REGIONS +#define MPI2_FLASH_NUMBER_OF_REGIONS (1) +#endif + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumberOfLayouts at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS +#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1) +#endif + +typedef struct _MPI2_FLASH_REGION +{ + U8 RegionType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 RegionOffset; /* 0x04 */ + U32 RegionSize; /* 0x08 */ + U32 Reserved3; /* 0x0C */ +} MPI2_FLASH_REGION, MPI2_POINTER PTR_MPI2_FLASH_REGION, + Mpi2FlashRegion_t, MPI2_POINTER pMpi2FlashRegion_t; + +typedef struct _MPI2_FLASH_LAYOUT +{ + U32 FlashSize; /* 0x00 */ + U32 Reserved1; /* 0x04 */ + U32 Reserved2; /* 0x08 */ + U32 Reserved3; /* 0x0C */ + MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];/* 0x10 */ +} MPI2_FLASH_LAYOUT, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT, + Mpi2FlashLayout_t, MPI2_POINTER pMpi2FlashLayout_t; + +typedef struct _MPI2_FLASH_LAYOUT_DATA +{ + U8 ImageRevision; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 SizeOfRegion; /* 0x02 */ + U8 Reserved2; /* 0x03 */ + U16 NumberOfLayouts; /* 0x04 */ + U16 RegionsPerLayout; /* 0x06 */ + U16 MinimumSectorAlignment; /* 0x08 */ + U16 Reserved3; /* 0x0A */ + U32 Reserved4; /* 0x0C */ + MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];/* 0x10 */ +} MPI2_FLASH_LAYOUT_DATA, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT_DATA, + Mpi2FlashLayoutData_t, MPI2_POINTER pMpi2FlashLayoutData_t; + +/* defines for the RegionType field */ +#define MPI2_FLASH_REGION_UNUSED (0x00) +#define MPI2_FLASH_REGION_FIRMWARE (0x01) +#define MPI2_FLASH_REGION_BIOS (0x02) +#define MPI2_FLASH_REGION_NVDATA (0x03) +#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05) +#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06) +#define MPI2_FLASH_REGION_CONFIG_1 (0x07) +#define MPI2_FLASH_REGION_CONFIG_2 (0x08) +#define MPI2_FLASH_REGION_MEGARAID (0x09) +#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A) +#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) /* older name */ +#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D) +#define MPI2_FLASH_REGION_SBR (0x0E) +#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F) +#define MPI2_FLASH_REGION_HIIM (0x10) +#define MPI2_FLASH_REGION_HIIA (0x11) +#define MPI2_FLASH_REGION_CTLR (0x12) +#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13) +#define MPI2_FLASH_REGION_MR_NVDATA (0x14) +#define MPI2_FLASH_REGION_CPLD (0x15) +#define MPI2_FLASH_REGION_PSOC (0x16) +#define MPI2_FLASH_REGION_COREDUMP (0x17) + +/* ImageRevision */ +#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) + + +/* Supported Devices Extended Image Data */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumberOfDevices at runtime. + */ +#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES +#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1) +#endif + +typedef struct _MPI2_SUPPORTED_DEVICE +{ + U16 DeviceID; /* 0x00 */ + U16 VendorID; /* 0x02 */ + U16 DeviceIDMask; /* 0x04 */ + U16 Reserved1; /* 0x06 */ + U8 LowPCIRev; /* 0x08 */ + U8 HighPCIRev; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ +} MPI2_SUPPORTED_DEVICE, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICE, + Mpi2SupportedDevice_t, MPI2_POINTER pMpi2SupportedDevice_t; + +typedef struct _MPI2_SUPPORTED_DEVICES_DATA +{ + U8 ImageRevision; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 NumberOfDevices; /* 0x02 */ + U8 Reserved2; /* 0x03 */ + U32 Reserved3; /* 0x04 */ + MPI2_SUPPORTED_DEVICE SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES]; /* 0x08 */ +} MPI2_SUPPORTED_DEVICES_DATA, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICES_DATA, + Mpi2SupportedDevicesData_t, MPI2_POINTER pMpi2SupportedDevicesData_t; + +/* ImageRevision */ +#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00) + + +/* Init Extended Image Data */ + +typedef struct _MPI2_INIT_IMAGE_FOOTER + +{ + U32 BootFlags; /* 0x00 */ + U32 ImageSize; /* 0x04 */ + U32 Signature0; /* 0x08 */ + U32 Signature1; /* 0x0C */ + U32 Signature2; /* 0x10 */ + U32 ResetVector; /* 0x14 */ +} MPI2_INIT_IMAGE_FOOTER, MPI2_POINTER PTR_MPI2_INIT_IMAGE_FOOTER, + Mpi2InitImageFooter_t, MPI2_POINTER pMpi2InitImageFooter_t; + +/* defines for the BootFlags field */ +#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00) + +/* defines for the ImageSize field */ +#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04) + +/* defines for the Signature0 field */ +#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08) +#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA) + +/* defines for the Signature1 field */ +#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C) +#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5) + +/* defines for the Signature2 field */ +#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10) +#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A) + +/* Signature fields as individual bytes */ +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A) + +/* defines for the ResetVector field */ +#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) + + +/* Encrypted Hash Extended Image Data */ + +typedef struct _MPI25_ENCRYPTED_HASH_ENTRY +{ + U8 HashImageType; /* 0x00 */ + U8 HashAlgorithm; /* 0x01 */ + U8 EncryptionAlgorithm; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 EncryptedHash[1]; /* 0x08 */ /* variable length */ +} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY, + Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t; + +/* values for HashImageType */ +#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) +#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) + +#define MPI26_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI26_HASH_IMAGE_TYPE_FIRMWARE (0x01) +#define MPI26_HASH_IMAGE_TYPE_BIOS (0x02) +#define MPI26_HASH_IMAGE_TYPE_KEY_HASH (0x03) + +/* values for HashAlgorithm */ +#define MPI25_HASH_ALGORITHM_UNUSED (0x00) +#define MPI25_HASH_ALGORITHM_SHA256 (0x01) + +#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0) +#define MPI26_HASH_ALGORITHM_VER_NONE (0x00) +#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20) +#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40) +#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60) +#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) +#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01) +#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02) + + +/* values for EncryptionAlgorithm */ +#define MPI25_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI25_ENCRYPTION_ALG_RSA256 (0x01) + +#define MPI26_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI26_ENCRYPTION_ALG_RSA256 (0x01) +#define MPI26_ENCRYPTION_ALG_RSA512 (0x02) +#define MPI26_ENCRYPTION_ALG_RSA1024 (0x03) +#define MPI26_ENCRYPTION_ALG_RSA2048 (0x04) +#define MPI26_ENCRYPTION_ALG_RSA4096 (0x05) + +typedef struct _MPI25_ENCRYPTED_HASH_DATA +{ + U8 ImageVersion; /* 0x00 */ + U8 NumHash; /* 0x01 */ + U16 Reserved1; /* 0x02 */ + U32 Reserved2; /* 0x04 */ + MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */ /* variable number of entries */ +} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA, + Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t; + + +#endif /* MPI2_IMAGE_H */ + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h index 63a0f4968f0c..6f6e7e7cfc23 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -1,603 +1,603 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_init.h - * Title: MPI SCSI initiator mode messages and structures - * Creation Date: June 23, 2006 - * - * mpi2_init.h Version: 02.00.21 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. - * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. - * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. - * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. - * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. - * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO - * Control field Task Attribute flags. - * Moved LUN field defines to mpi2.h becasue they are - * common to many structures. - * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to - * Query Asynchronous Event. - * Defined two new bits in the SlotStatus field of the SCSI - * Enclosure Processor Request and Reply. - * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for - * both SCSI IO Error Reply and SCSI Task Management Reply. - * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. - * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. - * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. - * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. - * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. - * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. - * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command - * Priority to match SAM-4. - * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. - * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. - * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, - * replacing the Reserved4 field. - * 11-18-14 02.00.16 Updated copyright information. - * 03-16-15 02.00.17 Updated for MPI v2.6. - * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. - * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and - * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. - * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. - * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. - * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. - * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to - * be unique within first 32 characters. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_INIT_H -#define MPI2_INIT_H - -/***************************************************************************** -* -* SCSI Initiator Messages -* -*****************************************************************************/ - -/**************************************************************************** -* SCSI IO messages and associated structures -****************************************************************************/ - -typedef struct _MPI2_SCSI_IO_CDB_EEDP32 -{ - U8 CDB[20]; /* 0x00 */ - __be32 PrimaryReferenceTag; /* 0x14 */ - U16 PrimaryApplicationTag; /* 0x18 */ - U16 PrimaryApplicationTagMask; /* 0x1A */ - U32 TransferLength; /* 0x1C */ -} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32, - Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t; - -/* MPI v2.0 CDB field */ -typedef union _MPI2_SCSI_IO_CDB_UNION -{ - U8 CDB32[32]; - MPI2_SCSI_IO_CDB_EEDP32 EEDP32; - MPI2_SGE_SIMPLE_UNION SGE; -} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION, - Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t; - -/* MPI v2.0 SCSI IO Request Message */ -typedef struct _MPI2_SCSI_IO_REQUEST -{ - U16 DevHandle; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U32 SenseBufferLowAddress; /* 0x0C */ - U16 SGLFlags; /* 0x10 */ - U8 SenseBufferLength; /* 0x12 */ - U8 Reserved4; /* 0x13 */ - U8 SGLOffset0; /* 0x14 */ - U8 SGLOffset1; /* 0x15 */ - U8 SGLOffset2; /* 0x16 */ - U8 SGLOffset3; /* 0x17 */ - U32 SkipCount; /* 0x18 */ - U32 DataLength; /* 0x1C */ - U32 BidirectionalDataLength; /* 0x20 */ - U16 IoFlags; /* 0x24 */ - U16 EEDPFlags; /* 0x26 */ - U32 EEDPBlockSize; /* 0x28 */ - U32 SecondaryReferenceTag; /* 0x2C */ - U16 SecondaryApplicationTag; /* 0x30 */ - U16 ApplicationTagTranslationMask; /* 0x32 */ - U8 LUN[8]; /* 0x34 */ - U32 Control; /* 0x3C */ - MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ - -#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */ - MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; -#endif - - MPI2_SGE_IO_UNION SGL; /* 0x60 */ - -} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST, - Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t; - -/* SCSI IO MsgFlags bits */ - -/* MsgFlags for SenseBufferAddressSpace */ -#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C) -#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) -#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) -#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) /* for MPI v2.5 and earlier only */ -#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) /* for MPI v2.5 and earlier only */ -#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08) /* for MPI v2.6 only */ - -/* SCSI IO SGLFlags bits */ - -/* base values for Data Location Address Space */ -#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C) -#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00) -#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04) -#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08) -#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) - -/* base values for Type */ -#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03) -#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00) -#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01) -#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02) - -/* shift values for each sub-field */ -#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12) -#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8) -#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) -#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) - -/* number of SGLOffset fields */ -#define MPI2_SCSIIO_NUM_SGLOFFSETS (4) - -/* SCSI IO IoFlags bits */ - -/* Large CDB Address Space */ -#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000) -#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000) -#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000) -#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000) -#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000) - -#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) -#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) -#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400) -#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200) -#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) - -/* SCSI IO EEDPFlags bits */ - -#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) -#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000) -#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000) -#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000) - -#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) -#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) -#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) - -#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) - -#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007) -#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000) -#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001) -#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002) -#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) -#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) -#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006) -#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007) - -/* SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ - -/* SCSI IO Control bits */ -#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000) -#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) - -#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) -#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24) -#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) -#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) -#define MPI2_SCSIIO_CONTROL_READ (0x02000000) -#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) - -#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) -#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) -/* alternate name for the previous field; called Command Priority in SAM-4 */ -#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800) -#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11) - -#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) -#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000) -#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100) -#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200) -#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400) - -#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0) -#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000) -#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040) -#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080) - - -/* MPI v2.5 CDB field */ -typedef union _MPI25_SCSI_IO_CDB_UNION -{ - U8 CDB32[32]; - MPI2_SCSI_IO_CDB_EEDP32 EEDP32; - MPI2_IEEE_SGE_SIMPLE64 SGE; -} MPI25_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI25_SCSI_IO_CDB_UNION, - Mpi25ScsiIoCdb_t, MPI2_POINTER pMpi25ScsiIoCdb_t; - -/* MPI v2.5/2.6 SCSI IO Request Message */ -typedef struct _MPI25_SCSI_IO_REQUEST -{ - U16 DevHandle; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U32 SenseBufferLowAddress; /* 0x0C */ - U8 DMAFlags; /* 0x10 */ - U8 Reserved5; /* 0x11 */ - U8 SenseBufferLength; /* 0x12 */ - U8 Reserved4; /* 0x13 */ - U8 SGLOffset0; /* 0x14 */ - U8 SGLOffset1; /* 0x15 */ - U8 SGLOffset2; /* 0x16 */ - U8 SGLOffset3; /* 0x17 */ - U32 SkipCount; /* 0x18 */ - U32 DataLength; /* 0x1C */ - U32 BidirectionalDataLength; /* 0x20 */ - U16 IoFlags; /* 0x24 */ - U16 EEDPFlags; /* 0x26 */ - U16 EEDPBlockSize; /* 0x28 */ - U16 Reserved6; /* 0x2A */ - U32 SecondaryReferenceTag; /* 0x2C */ - U16 SecondaryApplicationTag; /* 0x30 */ - U16 ApplicationTagTranslationMask; /* 0x32 */ - U8 LUN[8]; /* 0x34 */ - U32 Control; /* 0x3C */ - MPI25_SCSI_IO_CDB_UNION CDB; /* 0x40 */ - -#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */ - MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; -#endif - - MPI25_SGE_IO_UNION SGL; /* 0x60 */ - -} MPI25_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI25_SCSI_IO_REQUEST, - Mpi25SCSIIORequest_t, MPI2_POINTER pMpi25SCSIIORequest_t; - -/* use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ - -/* Defines for the DMAFlags field - * Each setting affects 4 SGLS, from SGL0 to SGL3. - * D = Data - * C = Cache DIF - * I = Interleaved - * H = Host DIF - */ -#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E) -#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F) - -/* number of SGLOffset fields */ -#define MPI25_SCSIIO_NUM_SGLOFFSETS (4) - -/* defines for the IoFlags field */ -#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) -#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) -#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) - -#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) /* MPI v2.6 and later */ -#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) -#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) -#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400) /* MPI v2.6 and later; IOC use only */ -#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) - -/* MPI v2.5 defines for the EEDPFlags bits */ -/* use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ -#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) -#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000) -#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) -#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) -#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) - -#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) -#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) -#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) - -/* use MPI2_LUN_ defines from mpi2.h for the LUN field */ - -/* use MPI2_SCSIIO_CONTROL_ defines for the Control field */ - - -/* NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so - * MPI2_SCSI_IO_REPLY is used for both. - */ - -/* SCSI IO Error Reply Message */ -typedef struct _MPI2_SCSI_IO_REPLY -{ - U16 DevHandle; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U8 SCSIStatus; /* 0x0C */ - U8 SCSIState; /* 0x0D */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 TransferCount; /* 0x14 */ - U32 SenseCount; /* 0x18 */ - U32 ResponseInfo; /* 0x1C */ - U16 TaskTag; /* 0x20 */ - U16 SCSIStatusQualifier; /* 0x22 */ - U32 BidirectionalTransferCount; /* 0x24 */ - U32 EEDPErrorOffset; /* 0x28 */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ - U16 EEDPObservedAppTag; /* 0x2C */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ - U16 EEDPObservedGuard; /* 0x2E */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ - U32 EEDPObservedRefTag; /* 0x30 */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ -} MPI2_SCSI_IO_REPLY, MPI2_POINTER PTR_MPI2_SCSI_IO_REPLY, - Mpi2SCSIIOReply_t, MPI2_POINTER pMpi2SCSIIOReply_t; - -/* SCSI IO Reply MsgFlags bits */ -#define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01) -#define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02) -#define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04) - - -/* SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ - -#define MPI2_SCSI_STATUS_GOOD (0x00) -#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02) -#define MPI2_SCSI_STATUS_CONDITION_MET (0x04) -#define MPI2_SCSI_STATUS_BUSY (0x08) -#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10) -#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) -#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18) -#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /* obsolete */ -#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28) -#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30) -#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40) - -/* SCSI IO Reply SCSIState flags */ - -#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10) -#define MPI2_SCSI_STATE_TERMINATED (0x08) -#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04) -#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) -#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) - -/* masks and shifts for the ResponseInfo field */ - -#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) -#define MPI2_SCSI_RI_SHIFT_REASONCODE (0) - -#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) - - -/**************************************************************************** -* SCSI Task Management messages -****************************************************************************/ - -/* SCSI Task Management Request Message */ -typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST -{ - U16 DevHandle; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U8 Reserved1; /* 0x04 */ - U8 TaskType; /* 0x05 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U8 LUN[8]; /* 0x0C */ - U32 Reserved4[7]; /* 0x14 */ - U16 TaskMID; /* 0x30 */ - U16 Reserved5; /* 0x32 */ -} MPI2_SCSI_TASK_MANAGE_REQUEST, - MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, - Mpi2SCSITaskManagementRequest_t, - MPI2_POINTER pMpi2SCSITaskManagementRequest_t; - -/* TaskType values */ - -#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) -#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) -#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) -#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) -#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) -#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) -#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) -#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) -#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) - -/* obsolete TaskType name */ -#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) - -/* MsgFlags bits */ -#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) -#define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00) -#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) -#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) -#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) -#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) -#define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18) - - -/* SCSI Task Management Reply Message */ -typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY -{ - U16 DevHandle; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U8 ResponseCode; /* 0x04 */ - U8 TaskType; /* 0x05 */ - U8 Reserved1; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U16 Reserved3; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 TerminationCount; /* 0x14 */ - U32 ResponseInfo; /* 0x18 */ -} MPI2_SCSI_TASK_MANAGE_REPLY, - MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY, - Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t; - -/* ResponseCode values */ - -#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) -#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) -#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) -#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) -#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) -#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) -#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) -#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) - -/* masks and shifts for the ResponseInfo field */ - -#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) -#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) -#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00) -#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8) -#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000) -#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16) -#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) -#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) - - -/**************************************************************************** -* SCSI Enclosure Processor messages -****************************************************************************/ - -/* SCSI Enclosure Processor Request Message */ -typedef struct _MPI2_SEP_REQUEST -{ - U16 DevHandle; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U8 Action; /* 0x04 */ - U8 Flags; /* 0x05 */ - U8 Reserved1; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 SlotStatus; /* 0x0C */ - U32 Reserved3; /* 0x10 */ - U32 Reserved4; /* 0x14 */ - U32 Reserved5; /* 0x18 */ - U16 Slot; /* 0x1C */ - U16 EnclosureHandle; /* 0x1E */ -} MPI2_SEP_REQUEST, MPI2_POINTER PTR_MPI2_SEP_REQUEST, - Mpi2SepRequest_t, MPI2_POINTER pMpi2SepRequest_t; - -/* Action defines */ -#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00) -#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01) - -/* Flags defines */ -#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) -#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) - -/* SlotStatus defines */ -#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000) /* MPI v2.6 and newer */ -#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) -#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) -#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) -#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) -#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) -#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) -#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) -#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) -#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) -#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) -#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) - - -/* SCSI Enclosure Processor Reply Message */ -typedef struct _MPI2_SEP_REPLY -{ - U16 DevHandle; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U8 Action; /* 0x04 */ - U8 Flags; /* 0x05 */ - U8 Reserved1; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U16 Reserved3; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 SlotStatus; /* 0x14 */ - U32 Reserved4; /* 0x18 */ - U16 Slot; /* 0x1C */ - U16 EnclosureHandle; /* 0x1E */ -} MPI2_SEP_REPLY, MPI2_POINTER PTR_MPI2_SEP_REPLY, - Mpi2SepReply_t, MPI2_POINTER pMpi2SepReply_t; - -/* SlotStatus defines */ -#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000) /* MPI v2.6 and newer */ -#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) -#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) -#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) -#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) -#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) -#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) -#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) -#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) -#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) -#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) -#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) - - -#endif - - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_init.h + * Title: MPI SCSI initiator mode messages and structures + * Creation Date: June 23, 2006 + * + * mpi2_init.h Version: 02.00.21 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. + * 11-18-14 02.00.16 Updated copyright information. + * 03-16-15 02.00.17 Updated for MPI v2.6. + * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. + * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and + * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. + * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to + * be unique within first 32 characters. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_INIT_H +#define MPI2_INIT_H + +/***************************************************************************** +* +* SCSI Initiator Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SCSI IO messages and associated structures +****************************************************************************/ + +typedef struct _MPI2_SCSI_IO_CDB_EEDP32 +{ + U8 CDB[20]; /* 0x00 */ + __be32 PrimaryReferenceTag; /* 0x14 */ + U16 PrimaryApplicationTag; /* 0x18 */ + U16 PrimaryApplicationTagMask; /* 0x1A */ + U32 TransferLength; /* 0x1C */ +} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32, + Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t; + +/* MPI v2.0 CDB field */ +typedef union _MPI2_SCSI_IO_CDB_UNION +{ + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_SGE_SIMPLE_UNION SGE; +} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION, + Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t; + +/* MPI v2.0 SCSI IO Request Message */ +typedef struct _MPI2_SCSI_IO_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U32 SenseBufferLowAddress; /* 0x0C */ + U16 SGLFlags; /* 0x10 */ + U8 SenseBufferLength; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U32 EEDPBlockSize; /* 0x28 */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U8 LUN[8]; /* 0x34 */ + U32 Control; /* 0x3C */ + MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ + +#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */ + MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI2_SGE_IO_UNION SGL; /* 0x60 */ + +} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST, + Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t; + +/* SCSI IO MsgFlags bits */ + +/* MsgFlags for SenseBufferAddressSpace */ +#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C) +#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) +#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) /* for MPI v2.5 and earlier only */ +#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) /* for MPI v2.5 and earlier only */ +#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08) /* for MPI v2.6 only */ + +/* SCSI IO SGLFlags bits */ + +/* base values for Data Location Address Space */ +#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) + +/* base values for Type */ +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02) + +/* shift values for each sub-field */ +#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12) +#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8) +#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) +#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) + +/* number of SGLOffset fields */ +#define MPI2_SCSIIO_NUM_SGLOFFSETS (4) + +/* SCSI IO IoFlags bits */ + +/* Large CDB Address Space */ +#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000) +#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000) +#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000) + +#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400) +#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200) +#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/* SCSI IO EEDPFlags bits */ + +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007) +#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + +/* SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ + +/* SCSI IO Control bits */ +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000) +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) + +#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) +#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24) +#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI2_SCSIIO_CONTROL_READ (0x02000000) +#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) + +#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) +/* alternate name for the previous field; called Command Priority in SAM-4 */ +#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11) + +#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) +#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100) +#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400) + +#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0) +#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000) +#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040) +#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080) + + +/* MPI v2.5 CDB field */ +typedef union _MPI25_SCSI_IO_CDB_UNION +{ + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_IEEE_SGE_SIMPLE64 SGE; +} MPI25_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI25_SCSI_IO_CDB_UNION, + Mpi25ScsiIoCdb_t, MPI2_POINTER pMpi25ScsiIoCdb_t; + +/* MPI v2.5/2.6 SCSI IO Request Message */ +typedef struct _MPI25_SCSI_IO_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U32 SenseBufferLowAddress; /* 0x0C */ + U8 DMAFlags; /* 0x10 */ + U8 Reserved5; /* 0x11 */ + U8 SenseBufferLength; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U16 EEDPBlockSize; /* 0x28 */ + U16 Reserved6; /* 0x2A */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U8 LUN[8]; /* 0x34 */ + U32 Control; /* 0x3C */ + MPI25_SCSI_IO_CDB_UNION CDB; /* 0x40 */ + +#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */ + MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI25_SGE_IO_UNION SGL; /* 0x60 */ + +} MPI25_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI25_SCSI_IO_REQUEST, + Mpi25SCSIIORequest_t, MPI2_POINTER pMpi25SCSIIORequest_t; + +/* use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ + +/* Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF + */ +#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F) + +/* number of SGLOffset fields */ +#define MPI25_SCSIIO_NUM_SGLOFFSETS (4) + +/* defines for the IoFlags field */ +#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) +#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) +#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) + +#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) /* MPI v2.6 and later */ +#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400) /* MPI v2.6 and later; IOC use only */ +#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/* MPI v2.5 defines for the EEDPFlags bits */ +/* use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ +#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) +#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) + +#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) +#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) + +/* use MPI2_LUN_ defines from mpi2.h for the LUN field */ + +/* use MPI2_SCSIIO_CONTROL_ defines for the Control field */ + + +/* NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so + * MPI2_SCSI_IO_REPLY is used for both. + */ + +/* SCSI IO Error Reply Message */ +typedef struct _MPI2_SCSI_IO_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U8 SCSIStatus; /* 0x0C */ + U8 SCSIState; /* 0x0D */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 TransferCount; /* 0x14 */ + U32 SenseCount; /* 0x18 */ + U32 ResponseInfo; /* 0x1C */ + U16 TaskTag; /* 0x20 */ + U16 SCSIStatusQualifier; /* 0x22 */ + U32 BidirectionalTransferCount; /* 0x24 */ + U32 EEDPErrorOffset; /* 0x28 */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedAppTag; /* 0x2C */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedGuard; /* 0x2E */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPObservedRefTag; /* 0x30 */ /* MPI 2.5+ only; Reserved in MPI 2.0 */ +} MPI2_SCSI_IO_REPLY, MPI2_POINTER PTR_MPI2_SCSI_IO_REPLY, + Mpi2SCSIIOReply_t, MPI2_POINTER pMpi2SCSIIOReply_t; + +/* SCSI IO Reply MsgFlags bits */ +#define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01) +#define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02) +#define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04) + + +/* SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ + +#define MPI2_SCSI_STATUS_GOOD (0x00) +#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02) +#define MPI2_SCSI_STATUS_CONDITION_MET (0x04) +#define MPI2_SCSI_STATUS_BUSY (0x08) +#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10) +#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /* obsolete */ +#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28) +#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30) +#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40) + +/* SCSI IO Reply SCSIState flags */ + +#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI2_SCSI_STATE_TERMINATED (0x08) +#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) + +/* masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSI_RI_SHIFT_REASONCODE (0) + +#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) + + +/**************************************************************************** +* SCSI Task Management messages +****************************************************************************/ + +/* SCSI Task Management Request Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Reserved1; /* 0x04 */ + U8 TaskType; /* 0x05 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U8 LUN[8]; /* 0x0C */ + U32 Reserved4[7]; /* 0x14 */ + U16 TaskMID; /* 0x30 */ + U16 Reserved5; /* 0x32 */ +} MPI2_SCSI_TASK_MANAGE_REQUEST, + MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, + Mpi2SCSITaskManagementRequest_t, + MPI2_POINTER pMpi2SCSITaskManagementRequest_t; + +/* TaskType values */ + +#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) + +/* obsolete TaskType name */ +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) + +/* MsgFlags bits */ +#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) +#define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) +#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) +#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) +#define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18) + + +/* SCSI Task Management Reply Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 ResponseCode; /* 0x04 */ + U8 TaskType; /* 0x05 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 TerminationCount; /* 0x14 */ + U32 ResponseInfo; /* 0x18 */ +} MPI2_SCSI_TASK_MANAGE_REPLY, + MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY, + Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t; + +/* ResponseCode values */ + +#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) +#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +/* masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) + + +/**************************************************************************** +* SCSI Enclosure Processor messages +****************************************************************************/ + +/* SCSI Enclosure Processor Request Message */ +typedef struct _MPI2_SEP_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Action; /* 0x04 */ + U8 Flags; /* 0x05 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 SlotStatus; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U32 Reserved4; /* 0x14 */ + U32 Reserved5; /* 0x18 */ + U16 Slot; /* 0x1C */ + U16 EnclosureHandle; /* 0x1E */ +} MPI2_SEP_REQUEST, MPI2_POINTER PTR_MPI2_SEP_REQUEST, + Mpi2SepRequest_t, MPI2_POINTER pMpi2SepRequest_t; + +/* Action defines */ +#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01) + +/* Flags defines */ +#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) + +/* SlotStatus defines */ +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000) /* MPI v2.6 and newer */ +#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) +#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) + + +/* SCSI Enclosure Processor Reply Message */ +typedef struct _MPI2_SEP_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Action; /* 0x04 */ + U8 Flags; /* 0x05 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 SlotStatus; /* 0x14 */ + U32 Reserved4; /* 0x18 */ + U16 Slot; /* 0x1C */ + U16 EnclosureHandle; /* 0x1E */ +} MPI2_SEP_REPLY, MPI2_POINTER PTR_MPI2_SEP_REPLY, + Mpi2SepReply_t, MPI2_POINTER pMpi2SepReply_t; + +/* SlotStatus defines */ +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000) /* MPI v2.6 and newer */ +#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) +#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) + + +#endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index ad2b1292320b..84d8cd27c7d4 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -1,1872 +1,1880 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_ioc.h - * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages - * Creation Date: October 11, 2006 - * - * mpi2_ioc.h Version: 02.00.37 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to - * MaxTargets. - * Added TotalImageSize field to FWDownload Request. - * Added reserved words to FWUpload Request. - * 06-26-07 02.00.02 Added IR Configuration Change List Event. - * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit - * request and replaced it with - * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. - * Replaced the MinReplyQueueDepth field of the IOCFacts - * reply with MaxReplyDescriptorPostQueueDepth. - * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum - * depth for the Reply Descriptor Post Queue. - * Added SASAddress field to Initiator Device Table - * Overflow Event data. - * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING - * for SAS Initiator Device Status Change Event data. - * Modified Reason Code defines for SAS Topology Change - * List Event data, including adding a bit for PHY Vacant - * status, and adding a mask for the Reason Code. - * Added define for - * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. - * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. - * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of - * the IOCFacts Reply. - * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Moved MPI2_VERSION_UNION to mpi2.h. - * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks - * instead of enables, and added SASBroadcastPrimitiveMasks - * field. - * Added Log Entry Added Event and related structure. - * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. - * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. - * Added MaxVolumes and MaxPersistentEntries fields to - * IOCFacts reply. - * Added ProtocalFlags and IOCCapabilities fields to - * MPI2_FW_IMAGE_HEADER. - * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. - * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to - * a U16 (from a U32). - * Removed extra 's' from EventMasks name. - * 06-27-08 02.00.08 Fixed an offset in a comment. - * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. - * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and - * renamed MinReplyFrameSize to ReplyFrameSize. - * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. - * Added two new RAIDOperation values for Integrated RAID - * Operations Status Event data. - * Added four new IR Configuration Change List Event data - * ReasonCode values. - * Added two new ReasonCode defines for SAS Device Status - * Change Event data. - * Added three new DiscoveryStatus bits for the SAS - * Discovery event data. - * Added Multiplexing Status Change bit to the PhyStatus - * field of the SAS Topology Change List event data. - * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. - * BootFlags are now product-specific. - * Added defines for the indivdual signature bytes - * for MPI2_INIT_IMAGE_FOOTER. - * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. - * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR - * define. - * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE - * define. - * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. - * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. - * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. - * Added two new reason codes for SAS Device Status Change - * Event. - * Added new event: SAS PHY Counter. - * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. - * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Added new product id family for 2208. - * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. - * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. - * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. - * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. - * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. - * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. - * Added Host Based Discovery Phy Event data. - * Added defines for ProductID Product field - * (MPI2_FW_HEADER_PID_). - * Modified values for SAS ProductID Family - * (MPI2_FW_HEADER_PID_FAMILY_). - * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. - * Added PowerManagementControl Request structures and - * defines. - * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. - * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. - * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. - * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added - * SASNotifyPrimitiveMasks field to - * MPI2_EVENT_NOTIFICATION_REQUEST. - * Added Temperature Threshold Event. - * Added Host Message Event. - * Added Send Host Message request and reply. - * 05-25-11 02.00.18 For Extended Image Header, added - * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and - * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. - * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. - * 08-24-11 02.00.19 Added PhysicalPort field to - * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. - * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. - * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. - * 03-29-12 02.00.21 Added a product specific range to event values. - * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. - * Added ElapsedSeconds field to - * MPI2_EVENT_DATA_IR_OPERATION_STATUS. - * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE - * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. - * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. - * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. - * Added Encrypted Hash Extended Image. - * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. - * 11-18-14 02.00.25 Updated copyright information. - * 03-16-15 02.00.26 Updated for MPI v2.6. - * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and - * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. - * Added MPI2_EVENT_PCIE_LINK_COUNTER and - * MPI26_EVENT_DATA_PCIE_LINK_COUNTER. - * Added MPI26_CTRL_OP_SHUTDOWN. - * Added MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG - * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and - * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. - * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. - * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. - * Added ConigurationFlags field to IOCInit message to - * support NVMe SGL format control. - * Added PCIe SRIOV support. - * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. - * Added PCIe 4 16.0 GT/sec speec support. - * Removed AHCI support. - * Removed SOP support. - * 07-01-16 02.00.29 Added Archclass for 4008 product. - * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED - * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload - * Request Message. - * Added new defines for the ImageType field of FWUpload - * Request Message. - * Added new values for the RegionType field in the Layout - * Data sections of the FLASH Layout Extended Image Data. - * Added new defines for the ReasonCode field of - * Active Cable Exception Event. - * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and - * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. - * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and - * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. - * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. - * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related - * defines for the ReasonCode field. - * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. - * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED - * to the ReasonCode field in PCIe Device Status Change - * Event Data. - * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC. - * Moved FW image definitions ionto new mpi2_image,h - * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) - * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_IOC_H -#define MPI2_IOC_H - -/***************************************************************************** -* -* IOC Messages -* -*****************************************************************************/ - -/**************************************************************************** -* IOCInit message -****************************************************************************/ - -/* IOCInit Request message */ -typedef struct _MPI2_IOC_INIT_REQUEST -{ - U8 WhoInit; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 MsgVersion; /* 0x0C */ - U16 HeaderVersion; /* 0x0E */ - U32 Reserved5; /* 0x10 */ - U16 ConfigurationFlags; /* 0x14 */ - U8 HostPageSize; /* 0x16 */ - U8 HostMSIxVectors; /* 0x17 */ - U16 Reserved8; /* 0x18 */ - U16 SystemRequestFrameSize; /* 0x1A */ - U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ - U16 ReplyFreeQueueDepth; /* 0x1E */ - U32 SenseBufferAddressHigh; /* 0x20 */ - U32 SystemReplyAddressHigh; /* 0x24 */ - U64 SystemRequestFrameBaseAddress; /* 0x28 */ - U64 ReplyDescriptorPostQueueAddress;/* 0x30 */ - U64 ReplyFreeQueueAddress; /* 0x38 */ - U64 TimeStamp; /* 0x40 */ -} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST, - Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t; - -/* WhoInit values */ -#define MPI2_WHOINIT_NOT_INITIALIZED (0x00) -#define MPI2_WHOINIT_SYSTEM_BIOS (0x01) -#define MPI2_WHOINIT_ROM_BIOS (0x02) -#define MPI2_WHOINIT_PCI_PEER (0x03) -#define MPI2_WHOINIT_HOST_DRIVER (0x04) -#define MPI2_WHOINIT_MANUFACTURER (0x05) - -/* MsgFlags */ -#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) - -/* MsgVersion */ -#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) -#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) -#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF) -#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0) - -/* HeaderVersion */ -#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00) -#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8) -#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) -#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) - -/* ConfigurationFlags */ -#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001) - -/* minimum depth for a Reply Descriptor Post Queue */ -#define MPI2_RDPQ_DEPTH_MIN (16) - -/* Reply Descriptor Post Queue Array Entry */ -typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY -{ - U64 RDPQBaseAddress; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ -} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, - MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, - Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry; - -/* IOCInit Reply message */ -typedef struct _MPI2_IOC_INIT_REPLY -{ - U8 WhoInit; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_IOC_INIT_REPLY, MPI2_POINTER PTR_MPI2_IOC_INIT_REPLY, - Mpi2IOCInitReply_t, MPI2_POINTER pMpi2IOCInitReply_t; - - -/**************************************************************************** -* IOCFacts message -****************************************************************************/ - -/* IOCFacts Request message */ -typedef struct _MPI2_IOC_FACTS_REQUEST -{ - U16 Reserved1; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ -} MPI2_IOC_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_IOC_FACTS_REQUEST, - Mpi2IOCFactsRequest_t, MPI2_POINTER pMpi2IOCFactsRequest_t; - - -/* IOCFacts Reply message */ -typedef struct _MPI2_IOC_FACTS_REPLY -{ - U16 MsgVersion; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 HeaderVersion; /* 0x04 */ - U8 IOCNumber; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U16 IOCExceptions; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U8 MaxChainDepth; /* 0x14 */ - U8 WhoInit; /* 0x15 */ - U8 NumberOfPorts; /* 0x16 */ - U8 MaxMSIxVectors; /* 0x17 */ - U16 RequestCredit; /* 0x18 */ - U16 ProductID; /* 0x1A */ - U32 IOCCapabilities; /* 0x1C */ - MPI2_VERSION_UNION FWVersion; /* 0x20 */ - U16 IOCRequestFrameSize; /* 0x24 */ - U16 IOCMaxChainSegmentSize; /* 0x26 */ /* MPI 2.5 only; Reserved in MPI 2.0 */ - U16 MaxInitiators; /* 0x28 */ - U16 MaxTargets; /* 0x2A */ - U16 MaxSasExpanders; /* 0x2C */ - U16 MaxEnclosures; /* 0x2E */ - U16 ProtocolFlags; /* 0x30 */ - U16 HighPriorityCredit; /* 0x32 */ - U16 MaxReplyDescriptorPostQueueDepth; /* 0x34 */ - U8 ReplyFrameSize; /* 0x36 */ - U8 MaxVolumes; /* 0x37 */ - U16 MaxDevHandle; /* 0x38 */ - U16 MaxPersistentEntries; /* 0x3A */ - U16 MinDevHandle; /* 0x3C */ - U8 CurrentHostPageSize; /* 0x3E */ - U8 Reserved4; /* 0x3F */ - U8 SGEModifierMask; /* 0x40 */ - U8 SGEModifierValue; /* 0x41 */ - U8 SGEModifierShift; /* 0x42 */ - U8 Reserved5; /* 0x43 */ -} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY, - Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t; - -/* MsgVersion */ -#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) -#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8) -#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) -#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0) - -/* HeaderVersion */ -#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00) -#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8) -#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF) -#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) - -/* IOCExceptions */ -#define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400) -#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) -#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) - -#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) -#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000) -#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020) -#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040) -#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060) - -#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010) -#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008) -#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) -#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) -#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) - -/* defines for WhoInit field are after the IOCInit Request */ - -/* ProductID field uses MPI2_FW_HEADER_PID_ */ - -/* IOCCapabilities */ -#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) -#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) -#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) -#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) -#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) -#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) -#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) -#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) -#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) -#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800) -#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) -#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) -#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) -#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) -#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) -#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) -#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) - -/* ProtocolFlags */ -#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008) /* MPI v2.6 and later */ -#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) -#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) - - -/**************************************************************************** -* PortFacts message -****************************************************************************/ - -/* PortFacts Request message */ -typedef struct _MPI2_PORT_FACTS_REQUEST -{ - U16 Reserved1; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 PortNumber; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ -} MPI2_PORT_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_PORT_FACTS_REQUEST, - Mpi2PortFactsRequest_t, MPI2_POINTER pMpi2PortFactsRequest_t; - -/* PortFacts Reply message */ -typedef struct _MPI2_PORT_FACTS_REPLY -{ - U16 Reserved1; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 PortNumber; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U8 Reserved5; /* 0x14 */ - U8 PortType; /* 0x15 */ - U16 Reserved6; /* 0x16 */ - U16 MaxPostedCmdBuffers; /* 0x18 */ - U16 Reserved7; /* 0x1A */ -} MPI2_PORT_FACTS_REPLY, MPI2_POINTER PTR_MPI2_PORT_FACTS_REPLY, - Mpi2PortFactsReply_t, MPI2_POINTER pMpi2PortFactsReply_t; - -/* PortType values */ -#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00) -#define MPI2_PORTFACTS_PORTTYPE_FC (0x10) -#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) -#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) -#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) -#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40) /* MPI v2.6 and later */ - - -/**************************************************************************** -* PortEnable message -****************************************************************************/ - -/* PortEnable Request message */ -typedef struct _MPI2_PORT_ENABLE_REQUEST -{ - U16 Reserved1; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U8 Reserved2; /* 0x04 */ - U8 PortFlags; /* 0x05 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ -} MPI2_PORT_ENABLE_REQUEST, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REQUEST, - Mpi2PortEnableRequest_t, MPI2_POINTER pMpi2PortEnableRequest_t; - - -/* PortEnable Reply message */ -typedef struct _MPI2_PORT_ENABLE_REPLY -{ - U16 Reserved1; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U8 Reserved2; /* 0x04 */ - U8 PortFlags; /* 0x05 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_PORT_ENABLE_REPLY, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REPLY, - Mpi2PortEnableReply_t, MPI2_POINTER pMpi2PortEnableReply_t; - - -/**************************************************************************** -* EventNotification message -****************************************************************************/ - -/* EventNotification Request message */ -#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4) - -typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST -{ - U16 Reserved1; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 Reserved5; /* 0x0C */ - U32 Reserved6; /* 0x10 */ - U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */ - U16 SASBroadcastPrimitiveMasks; /* 0x24 */ - U16 SASNotifyPrimitiveMasks; /* 0x26 */ - U32 Reserved8; /* 0x28 */ -} MPI2_EVENT_NOTIFICATION_REQUEST, - MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST, - Mpi2EventNotificationRequest_t, MPI2_POINTER pMpi2EventNotificationRequest_t; - - -/* EventNotification Reply message */ -typedef struct _MPI2_EVENT_NOTIFICATION_REPLY -{ - U16 EventDataLength; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 AckRequired; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U16 Reserved3; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U16 Event; /* 0x14 */ - U16 Reserved4; /* 0x16 */ - U32 EventContext; /* 0x18 */ - U32 EventData[1]; /* 0x1C */ -} MPI2_EVENT_NOTIFICATION_REPLY, MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REPLY, - Mpi2EventNotificationReply_t, MPI2_POINTER pMpi2EventNotificationReply_t; - -/* AckRequired */ -#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00) -#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) - -/* Event */ -#define MPI2_EVENT_LOG_DATA (0x0001) -#define MPI2_EVENT_STATE_CHANGE (0x0002) -#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) -#define MPI2_EVENT_EVENT_CHANGE (0x000A) -#define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */ -#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) -#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) -#define MPI2_EVENT_SAS_DISCOVERY (0x0016) -#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) -#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) -#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) -#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) -#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) -#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D) /* MPI v2.6 and later */ -#define MPI2_EVENT_IR_VOLUME (0x001E) -#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) -#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) -#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) -#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) -#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) -#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) -#define MPI2_EVENT_SAS_QUIESCE (0x0025) -#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) -#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) -#define MPI2_EVENT_HOST_MESSAGE (0x0028) -#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) -#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030) /* MPI v2.6 and later */ -#define MPI2_EVENT_PCIE_ENUMERATION (0x0031) /* MPI v2.6 and later */ -#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032) /* MPI v2.6 and later */ -#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033) /* MPI v2.6 and later */ -#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) /* MPI v2.6 and later */ -#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) /* MPI v2.5 and later */ -#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) -#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) - - -/* Log Entry Added Event data */ - -/* the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ -#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C) - -typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED -{ - U64 TimeStamp; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U16 LogSequence; /* 0x0C */ - U16 LogEntryQualifier; /* 0x0E */ - U8 VP_ID; /* 0x10 */ - U8 VF_ID; /* 0x11 */ - U16 Reserved2; /* 0x12 */ - U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH];/* 0x14 */ -} MPI2_EVENT_DATA_LOG_ENTRY_ADDED, - MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, - Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t; - - -/* GPIO Interrupt Event data */ - -typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT -{ - U8 GPIONum; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ -} MPI2_EVENT_DATA_GPIO_INTERRUPT, - MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, - Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t; - - -/* Temperature Threshold Event data */ - -typedef struct _MPI2_EVENT_DATA_TEMPERATURE -{ - U16 Status; /* 0x00 */ - U8 SensorNum; /* 0x02 */ - U8 Reserved1; /* 0x03 */ - U16 CurrentTemperature; /* 0x04 */ - U16 Reserved2; /* 0x06 */ - U32 Reserved3; /* 0x08 */ - U32 Reserved4; /* 0x0C */ -} MPI2_EVENT_DATA_TEMPERATURE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE, - Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t; - -/* Temperature Threshold Event data Status bits */ -#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) -#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) -#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) -#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) - - -/* Host Message Event data */ - -typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE -{ - U8 SourceVF_ID; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 Reserved3; /* 0x04 */ - U32 HostData[1]; /* 0x08 */ -} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE, - Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t; - - -/* Power Performance Change Event data */ - -typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE -{ - U8 CurrentPowerMode; /* 0x00 */ - U8 PreviousPowerMode; /* 0x01 */ - U16 Reserved1; /* 0x02 */ -} MPI2_EVENT_DATA_POWER_PERF_CHANGE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, - Mpi2EventDataPowerPerfChange_t, MPI2_POINTER pMpi2EventDataPowerPerfChange_t; - -/* defines for CurrentPowerMode and PreviousPowerMode fields */ -#define MPI2_EVENT_PM_INIT_MASK (0xC0) -#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00) -#define MPI2_EVENT_PM_INIT_HOST (0x40) -#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80) -#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0) - -#define MPI2_EVENT_PM_MODE_MASK (0x07) -#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00) -#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01) -#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04) -#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) -#define MPI2_EVENT_PM_MODE_STANDBY (0x06) - - -/* Active Cable Exception Event data */ - -typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT -{ - U32 ActiveCablePowerRequirement; /* 0x00 */ - U8 ReasonCode; /* 0x04 */ - U8 ReceptacleID; /* 0x05 */ - U16 Reserved1; /* 0x06 */ -} MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - MPI2_POINTER PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - Mpi25EventDataActiveCableExcept_t, - MPI2_POINTER pMpi25EventDataActiveCableExcept_t, - MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - MPI2_POINTER PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, - Mpi26EventDataActiveCableExcept_t, - MPI2_POINTER pMpi26EventDataActiveCableExcept_t; - -/* MPI2.5 defines for the ReasonCode field */ -#define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) -#define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01) -#define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02) - -/* MPI2.6 defines for the ReasonCode field */ -#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) -#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01) -#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02) - -/* Hard Reset Received Event data */ - -typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED -{ - U8 Reserved1; /* 0x00 */ - U8 Port; /* 0x01 */ - U16 Reserved2; /* 0x02 */ -} MPI2_EVENT_DATA_HARD_RESET_RECEIVED, - MPI2_POINTER PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, - Mpi2EventDataHardResetReceived_t, - MPI2_POINTER pMpi2EventDataHardResetReceived_t; - - -/* Task Set Full Event data */ -/* this event is obsolete */ - -typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL -{ - U16 DevHandle; /* 0x00 */ - U16 CurrentDepth; /* 0x02 */ -} MPI2_EVENT_DATA_TASK_SET_FULL, MPI2_POINTER PTR_MPI2_EVENT_DATA_TASK_SET_FULL, - Mpi2EventDataTaskSetFull_t, MPI2_POINTER pMpi2EventDataTaskSetFull_t; - - -/* SAS Device Status Change Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE -{ - U16 TaskTag; /* 0x00 */ - U8 ReasonCode; /* 0x02 */ - U8 PhysicalPort; /* 0x03 */ - U8 ASC; /* 0x04 */ - U8 ASCQ; /* 0x05 */ - U16 DevHandle; /* 0x06 */ - U32 Reserved2; /* 0x08 */ - U64 SASAddress; /* 0x0C */ - U8 LUN[8]; /* 0x14 */ -} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, - Mpi2EventDataSasDeviceStatusChange_t, - MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t; - -/* SAS Device Status Change Event data ReasonCode values */ -#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) -#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) -#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) -#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) -#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) -#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) -#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) -#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) -#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) -#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) -#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) -#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) -#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) - - -/* Integrated RAID Operation Status Event data */ - -typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS -{ - U16 VolDevHandle; /* 0x00 */ - U16 Reserved1; /* 0x02 */ - U8 RAIDOperation; /* 0x04 */ - U8 PercentComplete; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - U32 ElapsedSeconds; /* 0x08 */ -} MPI2_EVENT_DATA_IR_OPERATION_STATUS, - MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, - Mpi2EventDataIrOperationStatus_t, - MPI2_POINTER pMpi2EventDataIrOperationStatus_t; - -/* Integrated RAID Operation Status Event data RAIDOperation values */ -#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00) -#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) -#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) -#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) -#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) - - -/* Integrated RAID Volume Event data */ - -typedef struct _MPI2_EVENT_DATA_IR_VOLUME -{ - U16 VolDevHandle; /* 0x00 */ - U8 ReasonCode; /* 0x02 */ - U8 Reserved1; /* 0x03 */ - U32 NewValue; /* 0x04 */ - U32 PreviousValue; /* 0x08 */ -} MPI2_EVENT_DATA_IR_VOLUME, MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_VOLUME, - Mpi2EventDataIrVolume_t, MPI2_POINTER pMpi2EventDataIrVolume_t; - -/* Integrated RAID Volume Event data ReasonCode values */ -#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01) -#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02) -#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) - - -/* Integrated RAID Physical Disk Event data */ - -typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK -{ - U16 Reserved1; /* 0x00 */ - U8 ReasonCode; /* 0x02 */ - U8 PhysDiskNum; /* 0x03 */ - U16 PhysDiskDevHandle; /* 0x04 */ - U16 Reserved2; /* 0x06 */ - U16 Slot; /* 0x08 */ - U16 EnclosureHandle; /* 0x0A */ - U32 NewValue; /* 0x0C */ - U32 PreviousValue; /* 0x10 */ -} MPI2_EVENT_DATA_IR_PHYSICAL_DISK, - MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, - Mpi2EventDataIrPhysicalDisk_t, MPI2_POINTER pMpi2EventDataIrPhysicalDisk_t; - -/* Integrated RAID Physical Disk Event data ReasonCode values */ -#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01) -#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02) -#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) - - -/* Integrated RAID Configuration Change List Event data */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check NumElements at runtime. - */ -#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT -#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1) -#endif - -typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT -{ - U16 ElementFlags; /* 0x00 */ - U16 VolDevHandle; /* 0x02 */ - U8 ReasonCode; /* 0x04 */ - U8 PhysDiskNum; /* 0x05 */ - U16 PhysDiskDevHandle; /* 0x06 */ -} MPI2_EVENT_IR_CONFIG_ELEMENT, MPI2_POINTER PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, - Mpi2EventIrConfigElement_t, MPI2_POINTER pMpi2EventIrConfigElement_t; - -/* IR Configuration Change List Event data ElementFlags values */ -#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) -#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) -#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) -#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) - -/* IR Configuration Change List Event data ReasonCode values */ -#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01) -#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02) -#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) -#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04) -#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05) -#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) -#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) -#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) -#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) - -typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST -{ - U8 NumElements; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 Reserved2; /* 0x02 */ - U8 ConfigNum; /* 0x03 */ - U32 Flags; /* 0x04 */ - MPI2_EVENT_IR_CONFIG_ELEMENT ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT]; /* 0x08 */ -} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, - MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, - Mpi2EventDataIrConfigChangeList_t, - MPI2_POINTER pMpi2EventDataIrConfigChangeList_t; - -/* IR Configuration Change List Event data Flags values */ -#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) - - -/* SAS Discovery Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY -{ - U8 Flags; /* 0x00 */ - U8 ReasonCode; /* 0x01 */ - U8 PhysicalPort; /* 0x02 */ - U8 Reserved1; /* 0x03 */ - U32 DiscoveryStatus; /* 0x04 */ -} MPI2_EVENT_DATA_SAS_DISCOVERY, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, - Mpi2EventDataSasDiscovery_t, MPI2_POINTER pMpi2EventDataSasDiscovery_t; - -/* SAS Discovery Event data Flags values */ -#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02) -#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01) - -/* SAS Discovery Event data ReasonCode values */ -#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01) -#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02) - -/* SAS Discovery Event data DiscoveryStatus values */ -#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000) -#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000) -#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000) -#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) -#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000) -#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) -#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) -#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000) -#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) -#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800) -#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400) -#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200) -#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100) -#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080) -#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040) -#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020) -#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010) -#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004) -#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002) -#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001) - - -/* SAS Broadcast Primitive Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE -{ - U8 PhyNum; /* 0x00 */ - U8 Port; /* 0x01 */ - U8 PortWidth; /* 0x02 */ - U8 Primitive; /* 0x03 */ -} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, - Mpi2EventDataSasBroadcastPrimitive_t, - MPI2_POINTER pMpi2EventDataSasBroadcastPrimitive_t; - -/* defines for the Primitive field */ -#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01) -#define MPI2_EVENT_PRIMITIVE_SES (0x02) -#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03) -#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) -#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05) -#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06) -#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) -#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) - - -/* SAS Notify Primitive Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE -{ - U8 PhyNum; /* 0x00 */ - U8 Port; /* 0x01 */ - U8 Reserved1; /* 0x02 */ - U8 Primitive; /* 0x03 */ -} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, - Mpi2EventDataSasNotifyPrimitive_t, - MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t; - -/* defines for the Primitive field */ -#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) -#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) -#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) -#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) - - -/* SAS Initiator Device Status Change Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE -{ - U8 ReasonCode; /* 0x00 */ - U8 PhysicalPort; /* 0x01 */ - U16 DevHandle; /* 0x02 */ - U64 SASAddress; /* 0x04 */ -} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, - Mpi2EventDataSasInitDevStatusChange_t, - MPI2_POINTER pMpi2EventDataSasInitDevStatusChange_t; - -/* SAS Initiator Device Status Change event ReasonCode values */ -#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01) -#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) - - -/* SAS Initiator Device Table Overflow Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW -{ - U16 MaxInit; /* 0x00 */ - U16 CurrentInit; /* 0x02 */ - U64 SASAddress; /* 0x04 */ -} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, - Mpi2EventDataSasInitTableOverflow_t, - MPI2_POINTER pMpi2EventDataSasInitTableOverflow_t; - - -/* SAS Topology Change List Event data */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check NumEntries at runtime. - */ -#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT -#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1) -#endif - -typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY -{ - U16 AttachedDevHandle; /* 0x00 */ - U8 LinkRate; /* 0x02 */ - U8 PhyStatus; /* 0x03 */ -} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, MPI2_POINTER PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, - Mpi2EventSasTopoPhyEntry_t, MPI2_POINTER pMpi2EventSasTopoPhyEntry_t; - -typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST -{ - U16 EnclosureHandle; /* 0x00 */ - U16 ExpanderDevHandle; /* 0x02 */ - U8 NumPhys; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - U8 NumEntries; /* 0x08 */ - U8 StartPhyNum; /* 0x09 */ - U8 ExpStatus; /* 0x0A */ - U8 PhysicalPort; /* 0x0B */ - MPI2_EVENT_SAS_TOPO_PHY_ENTRY PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /* 0x0C*/ -} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, - Mpi2EventDataSasTopologyChangeList_t, - MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t; - -/* values for the ExpStatus field */ -#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) -#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) -#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) -#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) -#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) - -/* defines for the LinkRate field */ -#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0) -#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) -#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F) -#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0) - -#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00) -#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01) -#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02) -#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) -#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) -#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) -#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) -#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) -#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) -#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) -#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) -#define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C) - -/* values for the PhyStatus field */ -#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) -#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10) -/* values for the PhyStatus ReasonCode sub-field */ -#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F) -#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) -#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) -#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) -#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) -#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) - - -/* SAS Enclosure Device Status Change Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE -{ - U16 EnclosureHandle; /* 0x00 */ - U8 ReasonCode; /* 0x02 */ - U8 PhysicalPort; /* 0x03 */ - U64 EnclosureLogicalID; /* 0x04 */ - U16 NumSlots; /* 0x0C */ - U16 StartSlot; /* 0x0E */ - U32 PhyBits; /* 0x10 */ -} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, - Mpi2EventDataSasEnclDevStatusChange_t, - MPI2_POINTER pMpi2EventDataSasEnclDevStatusChange_t, - MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, - MPI2_POINTER PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, - Mpi26EventDataEnclDevStatusChange_t, - MPI2_POINTER pMpi26EventDataEnclDevStatusChange_t; - -/* SAS Enclosure Device Status Change event ReasonCode values */ -#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) -#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) - -/* Enclosure Device Status Change event ReasonCode values */ -#define MPI26_EVENT_ENCL_RC_ADDED (0x01) -#define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02) - -/* SAS PHY Counter Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER -{ - U64 TimeStamp; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U8 PhyEventCode; /* 0x0C */ - U8 PhyNum; /* 0x0D */ - U16 Reserved2; /* 0x0E */ - U32 PhyEventInfo; /* 0x10 */ - U8 CounterType; /* 0x14 */ - U8 ThresholdWindow; /* 0x15 */ - U8 TimeUnits; /* 0x16 */ - U8 Reserved3; /* 0x17 */ - U32 EventThreshold; /* 0x18 */ - U16 ThresholdFlags; /* 0x1C */ - U16 Reserved4; /* 0x1E */ -} MPI2_EVENT_DATA_SAS_PHY_COUNTER, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, - Mpi2EventDataSasPhyCounter_t, MPI2_POINTER pMpi2EventDataSasPhyCounter_t; - -/* use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h for the PhyEventCode field */ - -/* use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType field */ - -/* use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits field */ - -/* use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */ - - -/* SAS Quiesce Event data */ - -typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE -{ - U8 ReasonCode; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 Reserved3; /* 0x04 */ -} MPI2_EVENT_DATA_SAS_QUIESCE, - MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_QUIESCE, - Mpi2EventDataSasQuiesce_t, MPI2_POINTER pMpi2EventDataSasQuiesce_t; - -/* SAS Quiesce Event data ReasonCode values */ -#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01) -#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02) - - -typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR -{ - U16 DevHandle; /* 0x00 */ - U8 ReasonCode; /* 0x02 */ - U8 PhysicalPort; /* 0x03 */ - U32 Reserved1[2]; /* 0x04 */ - U64 SASAddress; /* 0x0C */ - U32 Reserved2[2]; /* 0x14 */ -} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, - MPI2_POINTER PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, - Mpi25EventDataSasDeviceDiscoveryError_t, - MPI2_POINTER pMpi25EventDataSasDeviceDiscoveryError_t; - -/* SAS Device Discovery Error Event data ReasonCode values */ -#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) -#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) - - -/* Host Based Discovery Phy Event data */ - -typedef struct _MPI2_EVENT_HBD_PHY_SAS -{ - U8 Flags; /* 0x00 */ - U8 NegotiatedLinkRate; /* 0x01 */ - U8 PhyNum; /* 0x02 */ - U8 PhysicalPort; /* 0x03 */ - U32 Reserved1; /* 0x04 */ - U8 InitialFrame[28]; /* 0x08 */ -} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS, - Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t; - -/* values for the Flags field */ -#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) -#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) - -/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for the NegotiatedLinkRate field */ - -typedef union _MPI2_EVENT_HBD_DESCRIPTOR -{ - MPI2_EVENT_HBD_PHY_SAS Sas; -} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR, - Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t; - -typedef struct _MPI2_EVENT_DATA_HBD_PHY -{ - U8 DescriptorType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 Reserved3; /* 0x04 */ - MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */ -} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY, - Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t; - -/* values for the DescriptorType field */ -#define MPI2_EVENT_HBD_DT_SAS (0x01) - - -/* PCIe Device Status Change Event data (MPI v2.6 and later) */ - -typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE -{ - U16 TaskTag; /* 0x00 */ - U8 ReasonCode; /* 0x02 */ - U8 PhysicalPort; /* 0x03 */ - U8 ASC; /* 0x04 */ - U8 ASCQ; /* 0x05 */ - U16 DevHandle; /* 0x06 */ - U32 Reserved2; /* 0x08 */ - U64 WWID; /* 0x0C */ - U8 LUN[8]; /* 0x14 */ -} MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, - MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, - Mpi26EventDataPCIeDeviceStatusChange_t, - MPI2_POINTER pMpi26EventDataPCIeDeviceStatusChange_t; - -/* PCIe Device Status Change Event data ReasonCode values */ -#define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05) -#define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07) -#define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) -#define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) -#define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) -#define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) -#define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) -#define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) -#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) -#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) -#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10) -#define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11) - - -/* PCIe Enumeration Event data (MPI v2.6 and later) */ - -typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION -{ - U8 Flags; /* 0x00 */ - U8 ReasonCode; /* 0x01 */ - U8 PhysicalPort; /* 0x02 */ - U8 Reserved1; /* 0x03 */ - U32 EnumerationStatus; /* 0x04 */ -} MPI26_EVENT_DATA_PCIE_ENUMERATION, - MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION, - Mpi26EventDataPCIeEnumeration_t, - MPI2_POINTER pMpi26EventDataPCIeEnumeration_t; - -/* PCIe Enumeration Event data Flags values */ -#define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02) -#define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01) - -/* PCIe Enumeration Event data ReasonCode values */ -#define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01) -#define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02) - -/* PCIe Enumeration Event data EnumerationStatus values */ -#define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) -#define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) -#define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) - - -/* PCIe Topology Change List Event data (MPI v2.6 and later) */ - -/* - * Host code (drivers, BIOS, utilities, etc.) should leave this define set to - * one and check NumEntries at runtime. - */ -#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT -#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1) -#endif - -typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY -{ - U16 AttachedDevHandle; /* 0x00 */ - U8 PortStatus; /* 0x02 */ - U8 Reserved1; /* 0x03 */ - U8 CurrentPortInfo; /* 0x04 */ - U8 Reserved2; /* 0x05 */ - U8 PreviousPortInfo; /* 0x06 */ - U8 Reserved3; /* 0x07 */ -} MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, - MPI2_POINTER PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, - Mpi26EventPCIeTopoPortEntry_t, - MPI2_POINTER pMpi26EventPCIeTopoPortEntry_t; - -/* PCIe Topology Change List Event data PortStatus values */ -#define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01) -#define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02) -#define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03) -#define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04) -#define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05) - -/* PCIe Topology Change List Event data defines for CurrentPortInfo and PreviousPortInfo */ -#define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0) -#define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00) -#define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10) -#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20) -#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30) -#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40) -#define MPI26_EVENT_PCIE_TOPO_PI_16_LANES (0x50) - -#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F) -#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00) -#define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01) -#define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02) -#define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03) -#define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04) -#define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05) - -typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST -{ - U16 EnclosureHandle; /* 0x00 */ - U16 SwitchDevHandle; /* 0x02 */ - U8 NumPorts; /* 0x04 */ - U8 Reserved1; /* 0x05 */ - U16 Reserved2; /* 0x06 */ - U8 NumEntries; /* 0x08 */ - U8 StartPortNum; /* 0x09 */ - U8 SwitchStatus; /* 0x0A */ - U8 PhysicalPort; /* 0x0B */ - MPI26_EVENT_PCIE_TOPO_PORT_ENTRY PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /* 0x0C */ -} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, - MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, - Mpi26EventDataPCIeTopologyChangeList_t, - MPI2_POINTER pMpi26EventDataPCIeTopologyChangeList_t; - -/* PCIe Topology Change List Event data SwitchStatus values */ -#define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) -#define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01) -#define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02) -#define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03) -#define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04) - -/* PCIe Link Counter Event data (MPI v2.6 and later) */ - -typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER -{ - U64 TimeStamp; /* 0x00 */ - U32 Reserved1; /* 0x08 */ - U8 LinkEventCode; /* 0x0C */ - U8 LinkNum; /* 0x0D */ - U16 Reserved2; /* 0x0E */ - U32 LinkEventInfo; /* 0x10 */ - U8 CounterType; /* 0x14 */ - U8 ThresholdWindow; /* 0x15 */ - U8 TimeUnits; /* 0x16 */ - U8 Reserved3; /* 0x17 */ - U32 EventThreshold; /* 0x18 */ - U16 ThresholdFlags; /* 0x1C */ - U16 Reserved4; /* 0x1E */ -} MPI26_EVENT_DATA_PCIE_LINK_COUNTER, - MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER, - Mpi26EventDataPcieLinkCounter_t, MPI2_POINTER pMpi26EventDataPcieLinkCounter_t; - - -/* use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode field */ - -/* use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType field */ - -/* use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits field */ - -/* use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */ - -/**************************************************************************** -* EventAck message -****************************************************************************/ - -/* EventAck Request message */ -typedef struct _MPI2_EVENT_ACK_REQUEST -{ - U16 Reserved1; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Event; /* 0x0C */ - U16 Reserved5; /* 0x0E */ - U32 EventContext; /* 0x10 */ -} MPI2_EVENT_ACK_REQUEST, MPI2_POINTER PTR_MPI2_EVENT_ACK_REQUEST, - Mpi2EventAckRequest_t, MPI2_POINTER pMpi2EventAckRequest_t; - - -/* EventAck Reply message */ -typedef struct _MPI2_EVENT_ACK_REPLY -{ - U16 Reserved1; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_EVENT_ACK_REPLY, MPI2_POINTER PTR_MPI2_EVENT_ACK_REPLY, - Mpi2EventAckReply_t, MPI2_POINTER pMpi2EventAckReply_t; - - -/**************************************************************************** -* SendHostMessage message -****************************************************************************/ - -/* SendHostMessage Request message */ -typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST -{ - U16 HostDataLength; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U8 Reserved4; /* 0x0C */ - U8 DestVF_ID; /* 0x0D */ - U16 Reserved5; /* 0x0E */ - U32 Reserved6; /* 0x10 */ - U32 Reserved7; /* 0x14 */ - U32 Reserved8; /* 0x18 */ - U32 Reserved9; /* 0x1C */ - U32 Reserved10; /* 0x20 */ - U32 HostData[1]; /* 0x24 */ -} MPI2_SEND_HOST_MESSAGE_REQUEST, - MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, - Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t; - - -/* SendHostMessage Reply message */ -typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY -{ - U16 HostDataLength; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY, - Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t; - - -/**************************************************************************** -* FWDownload message -****************************************************************************/ - -/* MPI v2.0 FWDownload Request message */ -typedef struct _MPI2_FW_DOWNLOAD_REQUEST -{ - U8 ImageType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 TotalImageSize; /* 0x0C */ - U32 Reserved5; /* 0x10 */ - MPI2_MPI_SGE_UNION SGL; /* 0x14 */ -} MPI2_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REQUEST, - Mpi2FWDownloadRequest, MPI2_POINTER pMpi2FWDownloadRequest; - -#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01) - -#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01) -#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02) -#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06) -#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07) -#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08) -#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) -#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) -#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) -#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) /* MPI v2.5 and newer */ -#define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D) -#define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E) -#define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F) -#define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10) -#define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11) -#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12) -#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13) -#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14) -#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) /* MPI v2.6 and newer */ -#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) /* MPI v2.6 and newer */ -#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) -#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF) /* MPI v2.6 and newer */ - - -/* MPI v2.0 FWDownload TransactionContext Element */ -typedef struct _MPI2_FW_DOWNLOAD_TCSGE -{ - U8 Reserved1; /* 0x00 */ - U8 ContextSize; /* 0x01 */ - U8 DetailsLength; /* 0x02 */ - U8 Flags; /* 0x03 */ - U32 Reserved2; /* 0x04 */ - U32 ImageOffset; /* 0x08 */ - U32 ImageSize; /* 0x0C */ -} MPI2_FW_DOWNLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_TCSGE, - Mpi2FWDownloadTCSGE_t, MPI2_POINTER pMpi2FWDownloadTCSGE_t; - - -/* MPI v2.5 FWDownload Request message */ -typedef struct _MPI25_FW_DOWNLOAD_REQUEST -{ - U8 ImageType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 TotalImageSize; /* 0x0C */ - U32 Reserved5; /* 0x10 */ - U32 Reserved6; /* 0x14 */ - U32 ImageOffset; /* 0x18 */ - U32 ImageSize; /* 0x1C */ - MPI25_SGE_IO_UNION SGL; /* 0x20 */ -} MPI25_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_DOWNLOAD_REQUEST, - Mpi25FWDownloadRequest, MPI2_POINTER pMpi25FWDownloadRequest; - - -/* FWDownload Reply message */ -typedef struct _MPI2_FW_DOWNLOAD_REPLY -{ - U8 ImageType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_FW_DOWNLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REPLY, - Mpi2FWDownloadReply_t, MPI2_POINTER pMpi2FWDownloadReply_t; - - -/**************************************************************************** -* FWUpload message -****************************************************************************/ - -/* MPI v2.0 FWUpload Request message */ -typedef struct _MPI2_FW_UPLOAD_REQUEST -{ - U8 ImageType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 Reserved5; /* 0x0C */ - U32 Reserved6; /* 0x10 */ - MPI2_MPI_SGE_UNION SGL; /* 0x14 */ -} MPI2_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REQUEST, - Mpi2FWUploadRequest_t, MPI2_POINTER pMpi2FWUploadRequest_t; - -#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00) -#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01) -#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) -#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) -#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06) -#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07) -#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08) -#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09) -#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) -#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) -#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D) -#define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E) -#define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F) -#define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10) -#define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11) -#define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12) -#define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13) -#define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14) - -/* MPI v2.0 FWUpload TransactionContext Element */ -typedef struct _MPI2_FW_UPLOAD_TCSGE -{ - U8 Reserved1; /* 0x00 */ - U8 ContextSize; /* 0x01 */ - U8 DetailsLength; /* 0x02 */ - U8 Flags; /* 0x03 */ - U32 Reserved2; /* 0x04 */ - U32 ImageOffset; /* 0x08 */ - U32 ImageSize; /* 0x0C */ -} MPI2_FW_UPLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_UPLOAD_TCSGE, - Mpi2FWUploadTCSGE_t, MPI2_POINTER pMpi2FWUploadTCSGE_t; - - -/* MPI v2.5 FWUpload Request message */ -typedef struct _MPI25_FW_UPLOAD_REQUEST -{ - U8 ImageType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 Reserved5; /* 0x0C */ - U32 Reserved6; /* 0x10 */ - U32 Reserved7; /* 0x14 */ - U32 ImageOffset; /* 0x18 */ - U32 ImageSize; /* 0x1C */ - MPI25_SGE_IO_UNION SGL; /* 0x20 */ -} MPI25_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_UPLOAD_REQUEST, - Mpi25FWUploadRequest_t, MPI2_POINTER pMpi25FWUploadRequest_t; - - -/* FWUpload Reply message */ -typedef struct _MPI2_FW_UPLOAD_REPLY -{ - U8 ImageType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 ActualImageSize; /* 0x14 */ -} MPI2_FW_UPLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REPLY, - Mpi2FWUploadReply_t, MPI2_POINTER pMPi2FWUploadReply_t; - - -/**************************************************************************** -* PowerManagementControl message -****************************************************************************/ - -/* PowerManagementControl Request message */ -typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST -{ - U8 Feature; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U8 Parameter1; /* 0x0C */ - U8 Parameter2; /* 0x0D */ - U8 Parameter3; /* 0x0E */ - U8 Parameter4; /* 0x0F */ - U32 Reserved5; /* 0x10 */ - U32 Reserved6; /* 0x14 */ -} MPI2_PWR_MGMT_CONTROL_REQUEST, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, - Mpi2PwrMgmtControlRequest_t, MPI2_POINTER pMpi2PwrMgmtControlRequest_t; - -/* defines for the Feature field */ -#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) -#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) -#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */ -#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) -#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) /* reserved in MPI 2.0 */ -#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) -#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) - -/* parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ -/* Parameter1 contains a PHY number */ -/* Parameter2 indicates power condition action using these defines */ -#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01) -#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02) -#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03) -/* Parameter3 and Parameter4 are reserved */ - -/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION Feature */ -/* Parameter1 contains SAS port width modulation group number */ -/* Parameter2 indicates IOC action using these defines */ -#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01) -#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02) -#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03) -/* Parameter3 indicates desired modulation level using these defines */ -#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00) -#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01) -#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02) -#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03) -/* Parameter4 is reserved */ - -/* this next set (_PCIE_LINK) is obsolete */ -/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ -/* Parameter1 indicates desired PCIe link speed using these defines */ -#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */ -#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */ -#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */ -/* Parameter2 indicates desired PCIe link width using these defines */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */ -#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */ -/* Parameter3 and Parameter4 are reserved */ - -/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ -/* Parameter1 indicates desired IOC hardware clock speed using these defines */ -#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01) -#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02) -#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04) -#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08) -/* Parameter2, Parameter3, and Parameter4 are reserved */ - -/* parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature */ -/* Parameter1 indicates host action regarding global power management mode */ -#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01) -#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02) -#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03) -/* Parameter2 indicates the requested global power management mode */ -#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01) -#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08) -#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40) -/* Parameter3 and Parameter4 are reserved */ - - -/* PowerManagementControl Reply message */ -typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY -{ - U8 Feature; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_PWR_MGMT_CONTROL_REPLY, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REPLY, - Mpi2PwrMgmtControlReply_t, MPI2_POINTER pMpi2PwrMgmtControlReply_t; - - -/**************************************************************************** -* IO Unit Control messages (MPI v2.6 and later only.) -****************************************************************************/ - -/* IO Unit Control Request Message */ -typedef struct _MPI26_IOUNIT_CONTROL_REQUEST -{ - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 IOCParameter; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U8 PhyNum; /* 0x0E */ - U8 PrimFlags; /* 0x0F */ - U32 Primitive; /* 0x10 */ - U8 LookupMethod; /* 0x14 */ - U8 Reserved5; /* 0x15 */ - U16 SlotNumber; /* 0x16 */ - U64 LookupAddress; /* 0x18 */ - U32 IOCParameterValue; /* 0x20 */ - U32 Reserved7; /* 0x24 */ - U32 Reserved8; /* 0x28 */ -} MPI26_IOUNIT_CONTROL_REQUEST, - MPI2_POINTER PTR_MPI26_IOUNIT_CONTROL_REQUEST, - Mpi26IoUnitControlRequest_t, MPI2_POINTER pMpi26IoUnitControlRequest_t; - -/* values for the Operation field */ -#define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02) -#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) -#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) -#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) -#define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09) -#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) -#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) -#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) -#define MPI26_CTRL_OP_LOOKUP_MAPPING (0x0E) -#define MPI26_CTRL_OP_SET_IOC_PARAMETER (0x0F) -#define MPI26_CTRL_OP_ENABLE_FP_DEVICE (0x10) -#define MPI26_CTRL_OP_DISABLE_FP_DEVICE (0x11) -#define MPI26_CTRL_OP_ENABLE_FP_ALL (0x12) -#define MPI26_CTRL_OP_DISABLE_FP_ALL (0x13) -#define MPI26_CTRL_OP_DEV_ENABLE_NCQ (0x14) -#define MPI26_CTRL_OP_DEV_DISABLE_NCQ (0x15) -#define MPI26_CTRL_OP_SHUTDOWN (0x16) -#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17) -#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18) -#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19) -#define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A) -#define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B) -#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80) - -/* values for the PrimFlags field */ -#define MPI26_CTRL_PRIMFLAGS_SINGLE (0x08) -#define MPI26_CTRL_PRIMFLAGS_TRIPLE (0x02) -#define MPI26_CTRL_PRIMFLAGS_REDUNDANT (0x01) - -/* values for the LookupMethod field */ -#define MPI26_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01) -#define MPI26_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02) -#define MPI26_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) - - -/* IO Unit Control Reply Message */ -typedef struct _MPI26_IOUNIT_CONTROL_REPLY -{ - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 IOCParameter; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI26_IOUNIT_CONTROL_REPLY, MPI2_POINTER PTR_MPI26_IOUNIT_CONTROL_REPLY, - Mpi26IoUnitControlReply_t, MPI2_POINTER pMpi26IoUnitControlReply_t; - - -#endif - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_ioc.h + * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages + * Creation Date: October 11, 2006 + * + * mpi2_ioc.h Version: 02.00.37 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * 11-18-14 02.00.25 Updated copyright information. + * 03-16-15 02.00.26 Updated for MPI v2.6. + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. + * Added MPI2_EVENT_PCIE_LINK_COUNTER and + * MPI26_EVENT_DATA_PCIE_LINK_COUNTER. + * Added MPI26_CTRL_OP_SHUTDOWN. + * Added MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. + * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. + * Added ConigurationFlags field to IOCInit message to + * support NVMe SGL format control. + * Added PCIe SRIOV support. + * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.29 Added Archclass for 4008 product. + * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED + * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload + * Request Message. + * Added new defines for the ImageType field of FWUpload + * Request Message. + * Added new values for the RegionType field in the Layout + * Data sections of the FLASH Layout Extended Image Data. + * Added new defines for the ReasonCode field of + * Active Cable Exception Event. + * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and + * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. + * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and + * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. + * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. + * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related + * defines for the ReasonCode field. + * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. + * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED + * to the ReasonCode field in PCIe Device Status Change + * Event Data. + * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC. + * Moved FW image definitions ionto new mpi2_image,h + * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) + * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 10-02-19 02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE + * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED + * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP + * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_IOC_H +#define MPI2_IOC_H + +/***************************************************************************** +* +* IOC Messages +* +*****************************************************************************/ + +/**************************************************************************** +* IOCInit message +****************************************************************************/ + +/* IOCInit Request message */ +typedef struct _MPI2_IOC_INIT_REQUEST +{ + U8 WhoInit; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 MsgVersion; /* 0x0C */ + U16 HeaderVersion; /* 0x0E */ + U32 Reserved5; /* 0x10 */ + U16 ConfigurationFlags; /* 0x14 */ + U8 HostPageSize; /* 0x16 */ + U8 HostMSIxVectors; /* 0x17 */ + U16 Reserved8; /* 0x18 */ + U16 SystemRequestFrameSize; /* 0x1A */ + U16 ReplyDescriptorPostQueueDepth; /* 0x1C */ + U16 ReplyFreeQueueDepth; /* 0x1E */ + U32 SenseBufferAddressHigh; /* 0x20 */ + U32 SystemReplyAddressHigh; /* 0x24 */ + U64 SystemRequestFrameBaseAddress; /* 0x28 */ + U64 ReplyDescriptorPostQueueAddress;/* 0x30 */ + U64 ReplyFreeQueueAddress; /* 0x38 */ + U64 TimeStamp; /* 0x40 */ +} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST, + Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t; + +/* WhoInit values */ +#define MPI2_WHOINIT_NOT_INITIALIZED (0x00) +#define MPI2_WHOINIT_SYSTEM_BIOS (0x01) +#define MPI2_WHOINIT_ROM_BIOS (0x02) +#define MPI2_WHOINIT_PCI_PEER (0x03) +#define MPI2_WHOINIT_HOST_DRIVER (0x04) +#define MPI2_WHOINIT_MANUFACTURER (0x05) + +/* MsgFlags */ +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + +/* MsgVersion */ +#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0) + +/* HeaderVersion */ +#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) + +/* ConfigurationFlags */ +#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001) +#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE (0x0002) + +/* minimum depth for a Reply Descriptor Post Queue */ +#define MPI2_RDPQ_DEPTH_MIN (16) + +/* Reply Descriptor Post Queue Array Entry */ +typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY +{ + U64 RDPQBaseAddress; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, + MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, + Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry; + +/* IOCInit Reply message */ +typedef struct _MPI2_IOC_INIT_REPLY +{ + U8 WhoInit; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_IOC_INIT_REPLY, MPI2_POINTER PTR_MPI2_IOC_INIT_REPLY, + Mpi2IOCInitReply_t, MPI2_POINTER pMpi2IOCInitReply_t; + + +/**************************************************************************** +* IOCFacts message +****************************************************************************/ + +/* IOCFacts Request message */ +typedef struct _MPI2_IOC_FACTS_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ +} MPI2_IOC_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_IOC_FACTS_REQUEST, + Mpi2IOCFactsRequest_t, MPI2_POINTER pMpi2IOCFactsRequest_t; + + +/* IOCFacts Reply message */ +typedef struct _MPI2_IOC_FACTS_REPLY +{ + U16 MsgVersion; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 HeaderVersion; /* 0x04 */ + U8 IOCNumber; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U16 IOCExceptions; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 MaxChainDepth; /* 0x14 */ + U8 WhoInit; /* 0x15 */ + U8 NumberOfPorts; /* 0x16 */ + U8 MaxMSIxVectors; /* 0x17 */ + U16 RequestCredit; /* 0x18 */ + U16 ProductID; /* 0x1A */ + U32 IOCCapabilities; /* 0x1C */ + MPI2_VERSION_UNION FWVersion; /* 0x20 */ + U16 IOCRequestFrameSize; /* 0x24 */ + U16 IOCMaxChainSegmentSize; /* 0x26 */ /* MPI 2.5 only; Reserved in MPI 2.0 */ + U16 MaxInitiators; /* 0x28 */ + U16 MaxTargets; /* 0x2A */ + U16 MaxSasExpanders; /* 0x2C */ + U16 MaxEnclosures; /* 0x2E */ + U16 ProtocolFlags; /* 0x30 */ + U16 HighPriorityCredit; /* 0x32 */ + U16 MaxReplyDescriptorPostQueueDepth; /* 0x34 */ + U8 ReplyFrameSize; /* 0x36 */ + U8 MaxVolumes; /* 0x37 */ + U16 MaxDevHandle; /* 0x38 */ + U16 MaxPersistentEntries; /* 0x3A */ + U16 MinDevHandle; /* 0x3C */ + U8 CurrentHostPageSize; /* 0x3E */ + U8 Reserved4; /* 0x3F */ + U8 SGEModifierMask; /* 0x40 */ + U8 SGEModifierValue; /* 0x41 */ + U8 SGEModifierShift; /* 0x42 */ + U8 Reserved5; /* 0x43 */ +} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY, + Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t; + +/* MsgVersion */ +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0) + +/* HeaderVersion */ +#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) + +/* IOCExceptions */ +#define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400) +#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) +#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) + +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060) + +#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010) +#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008) +#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) +#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) +#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) + +/* defines for WhoInit field are after the IOCInit Request */ + +/* ProductID field uses MPI2_FW_HEADER_PID_ */ + +/* IOCCapabilities */ +#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000) +#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) +#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) +#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) +#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) +#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) +#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) +#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) +#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) +#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) +#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) + +/* ProtocolFlags */ +#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008) /* MPI v2.6 and later */ +#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) + + +/**************************************************************************** +* PortFacts message +****************************************************************************/ + +/* PortFacts Request message */ +typedef struct _MPI2_PORT_FACTS_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 PortNumber; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ +} MPI2_PORT_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_PORT_FACTS_REQUEST, + Mpi2PortFactsRequest_t, MPI2_POINTER pMpi2PortFactsRequest_t; + +/* PortFacts Reply message */ +typedef struct _MPI2_PORT_FACTS_REPLY +{ + U16 Reserved1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 PortNumber; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 Reserved5; /* 0x14 */ + U8 PortType; /* 0x15 */ + U16 Reserved6; /* 0x16 */ + U16 MaxPostedCmdBuffers; /* 0x18 */ + U16 Reserved7; /* 0x1A */ +} MPI2_PORT_FACTS_REPLY, MPI2_POINTER PTR_MPI2_PORT_FACTS_REPLY, + Mpi2PortFactsReply_t, MPI2_POINTER pMpi2PortFactsReply_t; + +/* PortType values */ +#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00) +#define MPI2_PORTFACTS_PORTTYPE_FC (0x10) +#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) +#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) +#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) +#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40) /* MPI v2.6 and later */ + + +/**************************************************************************** +* PortEnable message +****************************************************************************/ + +/* PortEnable Request message */ +typedef struct _MPI2_PORT_ENABLE_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Reserved2; /* 0x04 */ + U8 PortFlags; /* 0x05 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ +} MPI2_PORT_ENABLE_REQUEST, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REQUEST, + Mpi2PortEnableRequest_t, MPI2_POINTER pMpi2PortEnableRequest_t; + + +/* PortEnable Reply message */ +typedef struct _MPI2_PORT_ENABLE_REPLY +{ + U16 Reserved1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U8 Reserved2; /* 0x04 */ + U8 PortFlags; /* 0x05 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_PORT_ENABLE_REPLY, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REPLY, + Mpi2PortEnableReply_t, MPI2_POINTER pMpi2PortEnableReply_t; + + +/**************************************************************************** +* EventNotification message +****************************************************************************/ + +/* EventNotification Request message */ +#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4) + +typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */ + U16 SASBroadcastPrimitiveMasks; /* 0x24 */ + U16 SASNotifyPrimitiveMasks; /* 0x26 */ + U32 Reserved8; /* 0x28 */ +} MPI2_EVENT_NOTIFICATION_REQUEST, + MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST, + Mpi2EventNotificationRequest_t, MPI2_POINTER pMpi2EventNotificationRequest_t; + + +/* EventNotification Reply message */ +typedef struct _MPI2_EVENT_NOTIFICATION_REPLY +{ + U16 EventDataLength; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 AckRequired; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 Event; /* 0x14 */ + U16 Reserved4; /* 0x16 */ + U32 EventContext; /* 0x18 */ + U32 EventData[1]; /* 0x1C */ +} MPI2_EVENT_NOTIFICATION_REPLY, MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REPLY, + Mpi2EventNotificationReply_t, MPI2_POINTER pMpi2EventNotificationReply_t; + +/* AckRequired */ +#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00) +#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) + +/* Event */ +#define MPI2_EVENT_LOG_DATA (0x0001) +#define MPI2_EVENT_STATE_CHANGE (0x0002) +#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) +#define MPI2_EVENT_EVENT_CHANGE (0x000A) +#define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */ +#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) +#define MPI2_EVENT_SAS_DISCOVERY (0x0016) +#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D) /* MPI v2.6 and later */ +#define MPI2_EVENT_IR_VOLUME (0x001E) +#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) +#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) +#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) +#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) +#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) +#define MPI2_EVENT_SAS_QUIESCE (0x0025) +#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) +#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) +#define MPI2_EVENT_HOST_MESSAGE (0x0028) +#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) +#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030) /* MPI v2.6 and later */ +#define MPI2_EVENT_PCIE_ENUMERATION (0x0031) /* MPI v2.6 and later */ +#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032) /* MPI v2.6 and later */ +#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033) /* MPI v2.6 and later */ +#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) /* MPI v2.6 and later */ +#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) /* MPI v2.5 and later */ +#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) +#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) + + +/* Log Entry Added Event data */ + +/* the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ +#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U16 LogSequence; /* 0x0C */ + U16 LogEntryQualifier; /* 0x0E */ + U8 VP_ID; /* 0x10 */ + U8 VF_ID; /* 0x11 */ + U16 Reserved2; /* 0x12 */ + U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH];/* 0x14 */ +} MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t; + + +/* GPIO Interrupt Event data */ + +typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT +{ + U8 GPIONum; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ +} MPI2_EVENT_DATA_GPIO_INTERRUPT, + MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, + Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t; + + +/* Temperature Threshold Event data */ + +typedef struct _MPI2_EVENT_DATA_TEMPERATURE +{ + U16 Status; /* 0x00 */ + U8 SensorNum; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U16 CurrentTemperature; /* 0x04 */ + U16 Reserved2; /* 0x06 */ + U32 Reserved3; /* 0x08 */ + U32 Reserved4; /* 0x0C */ +} MPI2_EVENT_DATA_TEMPERATURE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE, + Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t; + +/* Temperature Threshold Event data Status bits */ +#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) +#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) +#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) +#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) + + +/* Host Message Event data */ + +typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE +{ + U8 SourceVF_ID; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Reserved3; /* 0x04 */ + U32 HostData[1]; /* 0x08 */ +} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE, + Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t; + + +/* Power Performance Change Event data */ + +typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE +{ + U8 CurrentPowerMode; /* 0x00 */ + U8 PreviousPowerMode; /* 0x01 */ + U16 Reserved1; /* 0x02 */ +} MPI2_EVENT_DATA_POWER_PERF_CHANGE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, + Mpi2EventDataPowerPerfChange_t, MPI2_POINTER pMpi2EventDataPowerPerfChange_t; + +/* defines for CurrentPowerMode and PreviousPowerMode fields */ +#define MPI2_EVENT_PM_INIT_MASK (0xC0) +#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_INIT_HOST (0x40) +#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80) +#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0) + +#define MPI2_EVENT_PM_MODE_MASK (0x07) +#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01) +#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04) +#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) +#define MPI2_EVENT_PM_MODE_STANDBY (0x06) + + +/* Active Cable Exception Event data */ + +typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT +{ + U32 ActiveCablePowerRequirement; /* 0x00 */ + U8 ReasonCode; /* 0x04 */ + U8 ReceptacleID; /* 0x05 */ + U16 Reserved1; /* 0x06 */ +} MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + MPI2_POINTER PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi25EventDataActiveCableExcept_t, + MPI2_POINTER pMpi25EventDataActiveCableExcept_t, + MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + MPI2_POINTER PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi26EventDataActiveCableExcept_t, + MPI2_POINTER pMpi26EventDataActiveCableExcept_t; + +/* MPI2.5 defines for the ReasonCode field */ +#define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) +#define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01) +#define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02) + +/* MPI2.6 defines for the ReasonCode field */ +#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) +#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01) +#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02) + +/* Hard Reset Received Event data */ + +typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED +{ + U8 Reserved1; /* 0x00 */ + U8 Port; /* 0x01 */ + U16 Reserved2; /* 0x02 */ +} MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + MPI2_POINTER PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + Mpi2EventDataHardResetReceived_t, + MPI2_POINTER pMpi2EventDataHardResetReceived_t; + + +/* Task Set Full Event data */ +/* this event is obsolete */ + +typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL +{ + U16 DevHandle; /* 0x00 */ + U16 CurrentDepth; /* 0x02 */ +} MPI2_EVENT_DATA_TASK_SET_FULL, MPI2_POINTER PTR_MPI2_EVENT_DATA_TASK_SET_FULL, + Mpi2EventDataTaskSetFull_t, MPI2_POINTER pMpi2EventDataTaskSetFull_t; + + +/* SAS Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE +{ + U16 TaskTag; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U8 ASC; /* 0x04 */ + U8 ASCQ; /* 0x05 */ + U16 DevHandle; /* 0x06 */ + U32 Reserved2; /* 0x08 */ + U64 SASAddress; /* 0x0C */ + U8 LUN[8]; /* 0x14 */ +} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + Mpi2EventDataSasDeviceStatusChange_t, + MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t; + +/* SAS Device Status Change Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + + +/* Integrated RAID Operation Status Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS +{ + U16 VolDevHandle; /* 0x00 */ + U16 Reserved1; /* 0x02 */ + U8 RAIDOperation; /* 0x04 */ + U8 PercentComplete; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U32 ElapsedSeconds; /* 0x08 */ +} MPI2_EVENT_DATA_IR_OPERATION_STATUS, + MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, + Mpi2EventDataIrOperationStatus_t, + MPI2_POINTER pMpi2EventDataIrOperationStatus_t; + +/* Integrated RAID Operation Status Event data RAIDOperation values */ +#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00) +#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + + +/* Integrated RAID Volume Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_VOLUME +{ + U16 VolDevHandle; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 NewValue; /* 0x04 */ + U32 PreviousValue; /* 0x08 */ +} MPI2_EVENT_DATA_IR_VOLUME, MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_VOLUME, + Mpi2EventDataIrVolume_t, MPI2_POINTER pMpi2EventDataIrVolume_t; + +/* Integrated RAID Volume Event data ReasonCode values */ +#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) + + +/* Integrated RAID Physical Disk Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK +{ + U16 Reserved1; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysDiskNum; /* 0x03 */ + U16 PhysDiskDevHandle; /* 0x04 */ + U16 Reserved2; /* 0x06 */ + U16 Slot; /* 0x08 */ + U16 EnclosureHandle; /* 0x0A */ + U32 NewValue; /* 0x0C */ + U32 PreviousValue; /* 0x10 */ +} MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + Mpi2EventDataIrPhysicalDisk_t, MPI2_POINTER pMpi2EventDataIrPhysicalDisk_t; + +/* Integrated RAID Physical Disk Event data ReasonCode values */ +#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + + +/* Integrated RAID Configuration Change List Event data */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumElements at runtime. + */ +#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT +#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT +{ + U16 ElementFlags; /* 0x00 */ + U16 VolDevHandle; /* 0x02 */ + U8 ReasonCode; /* 0x04 */ + U8 PhysDiskNum; /* 0x05 */ + U16 PhysDiskDevHandle; /* 0x06 */ +} MPI2_EVENT_IR_CONFIG_ELEMENT, MPI2_POINTER PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, + Mpi2EventIrConfigElement_t, MPI2_POINTER pMpi2EventIrConfigElement_t; + +/* IR Configuration Change List Event data ElementFlags values */ +#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) + +/* IR Configuration Change List Event data ReasonCode values */ +#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST +{ + U8 NumElements; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 Reserved2; /* 0x02 */ + U8 ConfigNum; /* 0x03 */ + U32 Flags; /* 0x04 */ + MPI2_EVENT_IR_CONFIG_ELEMENT ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT]; /* 0x08 */ +} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + Mpi2EventDataIrConfigChangeList_t, + MPI2_POINTER pMpi2EventDataIrConfigChangeList_t; + +/* IR Configuration Change List Event data Flags values */ +#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) + + +/* SAS Discovery Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY +{ + U8 Flags; /* 0x00 */ + U8 ReasonCode; /* 0x01 */ + U8 PhysicalPort; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 DiscoveryStatus; /* 0x04 */ +} MPI2_EVENT_DATA_SAS_DISCOVERY, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, + Mpi2EventDataSasDiscovery_t, MPI2_POINTER pMpi2EventDataSasDiscovery_t; + +/* SAS Discovery Event data Flags values */ +#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02) +#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01) + +/* SAS Discovery Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02) + +/* SAS Discovery Event data DiscoveryStatus values */ +#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400) +#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001) + + +/* SAS Broadcast Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE +{ + U8 PhyNum; /* 0x00 */ + U8 Port; /* 0x01 */ + U8 PortWidth; /* 0x02 */ + U8 Primitive; /* 0x03 */ +} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + Mpi2EventDataSasBroadcastPrimitive_t, + MPI2_POINTER pMpi2EventDataSasBroadcastPrimitive_t; + +/* defines for the Primitive field */ +#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01) +#define MPI2_EVENT_PRIMITIVE_SES (0x02) +#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03) +#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) +#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05) +#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06) +#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) +#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) + + +/* SAS Notify Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE +{ + U8 PhyNum; /* 0x00 */ + U8 Port; /* 0x01 */ + U8 Reserved1; /* 0x02 */ + U8 Primitive; /* 0x03 */ +} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + Mpi2EventDataSasNotifyPrimitive_t, + MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t; + +/* defines for the Primitive field */ +#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) +#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) +#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) +#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) + + +/* SAS Initiator Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE +{ + U8 ReasonCode; /* 0x00 */ + U8 PhysicalPort; /* 0x01 */ + U16 DevHandle; /* 0x02 */ + U64 SASAddress; /* 0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + Mpi2EventDataSasInitDevStatusChange_t, + MPI2_POINTER pMpi2EventDataSasInitDevStatusChange_t; + +/* SAS Initiator Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) + + +/* SAS Initiator Device Table Overflow Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW +{ + U16 MaxInit; /* 0x00 */ + U16 CurrentInit; /* 0x02 */ + U64 SASAddress; /* 0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + Mpi2EventDataSasInitTableOverflow_t, + MPI2_POINTER pMpi2EventDataSasInitTableOverflow_t; + + +/* SAS Topology Change List Event data */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumEntries at runtime. + */ +#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT +#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY +{ + U16 AttachedDevHandle; /* 0x00 */ + U8 LinkRate; /* 0x02 */ + U8 PhyStatus; /* 0x03 */ +} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, MPI2_POINTER PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, + Mpi2EventSasTopoPhyEntry_t, MPI2_POINTER pMpi2EventSasTopoPhyEntry_t; + +typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST +{ + U16 EnclosureHandle; /* 0x00 */ + U16 ExpanderDevHandle; /* 0x02 */ + U8 NumPhys; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U8 NumEntries; /* 0x08 */ + U8 StartPhyNum; /* 0x09 */ + U8 ExpStatus; /* 0x0A */ + U8 PhysicalPort; /* 0x0B */ + MPI2_EVENT_SAS_TOPO_PHY_ENTRY PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /* 0x0C*/ +} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + Mpi2EventDataSasTopologyChangeList_t, + MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t; + +/* values for the ExpStatus field */ +#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) +#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) + +/* defines for the LinkRate field */ +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0) +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0) + +#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00) +#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01) +#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02) +#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) +#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) +#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) +#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) +#define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C) + +/* values for the PhyStatus field */ +#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10) +/* values for the PhyStatus ReasonCode sub-field */ +#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + + +/* SAS Enclosure Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE +{ + U16 EnclosureHandle; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U64 EnclosureLogicalID; /* 0x04 */ + U16 NumSlots; /* 0x0C */ + U16 StartSlot; /* 0x0E */ + U32 PhyBits; /* 0x10 */ +} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + Mpi2EventDataSasEnclDevStatusChange_t, + MPI2_POINTER pMpi2EventDataSasEnclDevStatusChange_t, + MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + MPI2_POINTER PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + Mpi26EventDataEnclDevStatusChange_t, + MPI2_POINTER pMpi26EventDataEnclDevStatusChange_t; + +/* SAS Enclosure Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +/* Enclosure Device Status Change event ReasonCode values */ +#define MPI26_EVENT_ENCL_RC_ADDED (0x01) +#define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02) + +/* SAS PHY Counter Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 PhyEventCode; /* 0x0C */ + U8 PhyNum; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U32 PhyEventInfo; /* 0x10 */ + U8 CounterType; /* 0x14 */ + U8 ThresholdWindow; /* 0x15 */ + U8 TimeUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ + U32 EventThreshold; /* 0x18 */ + U16 ThresholdFlags; /* 0x1C */ + U16 Reserved4; /* 0x1E */ +} MPI2_EVENT_DATA_SAS_PHY_COUNTER, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, + Mpi2EventDataSasPhyCounter_t, MPI2_POINTER pMpi2EventDataSasPhyCounter_t; + +/* use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h for the PhyEventCode field */ + +/* use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType field */ + +/* use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits field */ + +/* use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */ + + +/* SAS Quiesce Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE +{ + U8 ReasonCode; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Reserved3; /* 0x04 */ +} MPI2_EVENT_DATA_SAS_QUIESCE, + MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_QUIESCE, + Mpi2EventDataSasQuiesce_t, MPI2_POINTER pMpi2EventDataSasQuiesce_t; + +/* SAS Quiesce Event data ReasonCode values */ +#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02) + + +typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR +{ + U16 DevHandle; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U32 Reserved1[2]; /* 0x04 */ + U64 SASAddress; /* 0x0C */ + U32 Reserved2[2]; /* 0x14 */ +} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + MPI2_POINTER PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + Mpi25EventDataSasDeviceDiscoveryError_t, + MPI2_POINTER pMpi25EventDataSasDeviceDiscoveryError_t; + +/* SAS Device Discovery Error Event data ReasonCode values */ +#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) +#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) + + +/* Host Based Discovery Phy Event data */ + +typedef struct _MPI2_EVENT_HBD_PHY_SAS +{ + U8 Flags; /* 0x00 */ + U8 NegotiatedLinkRate; /* 0x01 */ + U8 PhyNum; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U32 Reserved1; /* 0x04 */ + U8 InitialFrame[28]; /* 0x08 */ +} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS, + Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t; + +/* values for the Flags field */ +#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) +#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) + +/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for the NegotiatedLinkRate field */ + +typedef union _MPI2_EVENT_HBD_DESCRIPTOR +{ + MPI2_EVENT_HBD_PHY_SAS Sas; +} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR, + Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t; + +typedef struct _MPI2_EVENT_DATA_HBD_PHY +{ + U8 DescriptorType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 Reserved3; /* 0x04 */ + MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */ +} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY, + Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t; + +/* values for the DescriptorType field */ +#define MPI2_EVENT_HBD_DT_SAS (0x01) + + +/* PCIe Device Status Change Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE +{ + U16 TaskTag; /* 0x00 */ + U8 ReasonCode; /* 0x02 */ + U8 PhysicalPort; /* 0x03 */ + U8 ASC; /* 0x04 */ + U8 ASCQ; /* 0x05 */ + U16 DevHandle; /* 0x06 */ + U32 Reserved2; /* 0x08 */ + U64 WWID; /* 0x0C */ + U8 LUN[8]; /* 0x14 */ +} MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, + Mpi26EventDataPCIeDeviceStatusChange_t, + MPI2_POINTER pMpi26EventDataPCIeDeviceStatusChange_t; + +/* PCIe Device Status Change Event data ReasonCode values */ +#define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05) +#define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10) +#define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11) + + +/* PCIe Enumeration Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION +{ + U8 Flags; /* 0x00 */ + U8 ReasonCode; /* 0x01 */ + U8 PhysicalPort; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 EnumerationStatus; /* 0x04 */ +} MPI26_EVENT_DATA_PCIE_ENUMERATION, + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION, + Mpi26EventDataPCIeEnumeration_t, + MPI2_POINTER pMpi26EventDataPCIeEnumeration_t; + +/* PCIe Enumeration Event data Flags values */ +#define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02) +#define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01) + +/* PCIe Enumeration Event data ReasonCode values */ +#define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01) +#define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02) + +/* PCIe Enumeration Event data EnumerationStatus values */ +#define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) +#define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) +#define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) + + +/* PCIe Topology Change List Event data (MPI v2.6 and later) */ + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check NumEntries at runtime. + */ +#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT +#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1) +#endif + +typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY +{ + U16 AttachedDevHandle; /* 0x00 */ + U8 PortStatus; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U8 CurrentPortInfo; /* 0x04 */ + U8 Reserved2; /* 0x05 */ + U8 PreviousPortInfo; /* 0x06 */ + U8 Reserved3; /* 0x07 */ +} MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, + MPI2_POINTER PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, + Mpi26EventPCIeTopoPortEntry_t, + MPI2_POINTER pMpi26EventPCIeTopoPortEntry_t; + +/* PCIe Topology Change List Event data PortStatus values */ +#define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01) +#define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02) +#define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03) +#define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04) +#define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05) + +/* PCIe Topology Change List Event data defines for CurrentPortInfo and PreviousPortInfo */ +#define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0) +#define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00) +#define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10) +#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20) +#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30) +#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40) +#define MPI26_EVENT_PCIE_TOPO_PI_16_LANES (0x50) + +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05) + +typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST +{ + U16 EnclosureHandle; /* 0x00 */ + U16 SwitchDevHandle; /* 0x02 */ + U8 NumPorts; /* 0x04 */ + U8 Reserved1; /* 0x05 */ + U16 Reserved2; /* 0x06 */ + U8 NumEntries; /* 0x08 */ + U8 StartPortNum; /* 0x09 */ + U8 SwitchStatus; /* 0x0A */ + U8 PhysicalPort; /* 0x0B */ + MPI26_EVENT_PCIE_TOPO_PORT_ENTRY PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /* 0x0C */ +} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, + Mpi26EventDataPCIeTopologyChangeList_t, + MPI2_POINTER pMpi26EventDataPCIeTopologyChangeList_t; + +/* PCIe Topology Change List Event data SwitchStatus values */ +#define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) +#define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01) +#define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02) +#define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03) +#define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04) + +/* PCIe Link Counter Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER +{ + U64 TimeStamp; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U8 LinkEventCode; /* 0x0C */ + U8 LinkNum; /* 0x0D */ + U16 Reserved2; /* 0x0E */ + U32 LinkEventInfo; /* 0x10 */ + U8 CounterType; /* 0x14 */ + U8 ThresholdWindow; /* 0x15 */ + U8 TimeUnits; /* 0x16 */ + U8 Reserved3; /* 0x17 */ + U32 EventThreshold; /* 0x18 */ + U16 ThresholdFlags; /* 0x1C */ + U16 Reserved4; /* 0x1E */ +} MPI26_EVENT_DATA_PCIE_LINK_COUNTER, + MPI2_POINTER PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER, + Mpi26EventDataPcieLinkCounter_t, MPI2_POINTER pMpi26EventDataPcieLinkCounter_t; + + +/* use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode field */ + +/* use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType field */ + +/* use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits field */ + +/* use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */ + +/**************************************************************************** +* EventAck message +****************************************************************************/ + +/* EventAck Request message */ +typedef struct _MPI2_EVENT_ACK_REQUEST +{ + U16 Reserved1; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Event; /* 0x0C */ + U16 Reserved5; /* 0x0E */ + U32 EventContext; /* 0x10 */ +} MPI2_EVENT_ACK_REQUEST, MPI2_POINTER PTR_MPI2_EVENT_ACK_REQUEST, + Mpi2EventAckRequest_t, MPI2_POINTER pMpi2EventAckRequest_t; + + +/* EventAck Reply message */ +typedef struct _MPI2_EVENT_ACK_REPLY +{ + U16 Reserved1; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_EVENT_ACK_REPLY, MPI2_POINTER PTR_MPI2_EVENT_ACK_REPLY, + Mpi2EventAckReply_t, MPI2_POINTER pMpi2EventAckReply_t; + + +/**************************************************************************** +* SendHostMessage message +****************************************************************************/ + +/* SendHostMessage Request message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST +{ + U16 HostDataLength; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U8 Reserved4; /* 0x0C */ + U8 DestVF_ID; /* 0x0D */ + U16 Reserved5; /* 0x0E */ + U32 Reserved6; /* 0x10 */ + U32 Reserved7; /* 0x14 */ + U32 Reserved8; /* 0x18 */ + U32 Reserved9; /* 0x1C */ + U32 Reserved10; /* 0x20 */ + U32 HostData[1]; /* 0x24 */ +} MPI2_SEND_HOST_MESSAGE_REQUEST, + MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, + Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t; + + +/* SendHostMessage Reply message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY +{ + U16 HostDataLength; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY, + Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t; + + +/**************************************************************************** +* FWDownload message +****************************************************************************/ + +/* MPI v2.0 FWDownload Request message */ +typedef struct _MPI2_FW_DOWNLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 TotalImageSize; /* 0x0C */ + U32 Reserved5; /* 0x10 */ + MPI2_MPI_SGE_UNION SGL; /* 0x14 */ +} MPI2_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REQUEST, + Mpi2FWDownloadRequest, MPI2_POINTER pMpi2FWDownloadRequest; + +#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01) + +#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01) +#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02) +#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) /* MPI v2.5 and newer */ +#define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D) +#define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E) +#define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F) +#define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10) +#define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11) +#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12) +#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13) +#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14) +#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) /* MPI v2.6 and newer */ +#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) /* MPI v2.6 and newer */ +#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP (0x17) +#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) + + +/* MPI v2.0 FWDownload TransactionContext Element */ +typedef struct _MPI2_FW_DOWNLOAD_TCSGE +{ + U8 Reserved1; /* 0x00 */ + U8 ContextSize; /* 0x01 */ + U8 DetailsLength; /* 0x02 */ + U8 Flags; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 ImageOffset; /* 0x08 */ + U32 ImageSize; /* 0x0C */ +} MPI2_FW_DOWNLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_TCSGE, + Mpi2FWDownloadTCSGE_t, MPI2_POINTER pMpi2FWDownloadTCSGE_t; + + +/* MPI v2.5 FWDownload Request message */ +typedef struct _MPI25_FW_DOWNLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 TotalImageSize; /* 0x0C */ + U32 Reserved5; /* 0x10 */ + U32 Reserved6; /* 0x14 */ + U32 ImageOffset; /* 0x18 */ + U32 ImageSize; /* 0x1C */ + MPI25_SGE_IO_UNION SGL; /* 0x20 */ +} MPI25_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_DOWNLOAD_REQUEST, + Mpi25FWDownloadRequest, MPI2_POINTER pMpi25FWDownloadRequest; + + +/* FWDownload Reply message */ +typedef struct _MPI2_FW_DOWNLOAD_REPLY +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_FW_DOWNLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REPLY, + Mpi2FWDownloadReply_t, MPI2_POINTER pMpi2FWDownloadReply_t; + + +/**************************************************************************** +* FWUpload message +****************************************************************************/ + +/* MPI v2.0 FWUpload Request message */ +typedef struct _MPI2_FW_UPLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + MPI2_MPI_SGE_UNION SGL; /* 0x14 */ +} MPI2_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REQUEST, + Mpi2FWUploadRequest_t, MPI2_POINTER pMpi2FWUploadRequest_t; + +#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00) +#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01) +#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) +#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) +#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D) +#define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E) +#define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F) +#define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10) +#define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11) +#define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12) +#define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13) +#define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14) +/* skipping 0x15, 0x16. They are defined in DOWNLOAD, but not needed here */ +#define MPI2_FW_UPLOAD_ITYPE_COREDUMP (0x17) + +/* MPI v2.0 FWUpload TransactionContext Element */ +typedef struct _MPI2_FW_UPLOAD_TCSGE +{ + U8 Reserved1; /* 0x00 */ + U8 ContextSize; /* 0x01 */ + U8 DetailsLength; /* 0x02 */ + U8 Flags; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 ImageOffset; /* 0x08 */ + U32 ImageSize; /* 0x0C */ +} MPI2_FW_UPLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_UPLOAD_TCSGE, + Mpi2FWUploadTCSGE_t, MPI2_POINTER pMpi2FWUploadTCSGE_t; + + +/* MPI v2.5 FWUpload Request message */ +typedef struct _MPI25_FW_UPLOAD_REQUEST +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + U32 Reserved7; /* 0x14 */ + U32 ImageOffset; /* 0x18 */ + U32 ImageSize; /* 0x1C */ + MPI25_SGE_IO_UNION SGL; /* 0x20 */ +} MPI25_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_UPLOAD_REQUEST, + Mpi25FWUploadRequest_t, MPI2_POINTER pMpi25FWUploadRequest_t; + + +/* FWUpload Reply message */ +typedef struct _MPI2_FW_UPLOAD_REPLY +{ + U8 ImageType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 ActualImageSize; /* 0x14 */ +} MPI2_FW_UPLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REPLY, + Mpi2FWUploadReply_t, MPI2_POINTER pMPi2FWUploadReply_t; + + +/**************************************************************************** +* PowerManagementControl message +****************************************************************************/ + +/* PowerManagementControl Request message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST +{ + U8 Feature; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Parameter1; /* 0x0C */ + U8 Parameter2; /* 0x0D */ + U8 Parameter3; /* 0x0E */ + U8 Parameter4; /* 0x0F */ + U32 Reserved5; /* 0x10 */ + U32 Reserved6; /* 0x14 */ +} MPI2_PWR_MGMT_CONTROL_REQUEST, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, + Mpi2PwrMgmtControlRequest_t, MPI2_POINTER pMpi2PwrMgmtControlRequest_t; + +/* defines for the Feature field */ +#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) +#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) +#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */ +#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) /* reserved in MPI 2.0 */ +#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) + +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ +/* Parameter1 contains a PHY number */ +/* Parameter2 indicates power condition action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01) +#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02) +#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03) +/* Parameter3 and Parameter4 are reserved */ + +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION Feature */ +/* Parameter1 contains SAS port width modulation group number */ +/* Parameter2 indicates IOC action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01) +#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02) +#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03) +/* Parameter3 indicates desired modulation level using these defines */ +#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00) +#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01) +#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02) +#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03) +/* Parameter4 is reserved */ + +/* this next set (_PCIE_LINK) is obsolete */ +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ +/* Parameter1 indicates desired PCIe link speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */ +/* Parameter2 indicates desired PCIe link width using these defines */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */ +/* Parameter3 and Parameter4 are reserved */ + +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ +/* Parameter1 indicates desired IOC hardware clock speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01) +#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02) +#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08) +/* Parameter2, Parameter3, and Parameter4 are reserved */ + +/* parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature */ +/* Parameter1 indicates host action regarding global power management mode */ +#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01) +#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02) +#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03) +/* Parameter2 indicates the requested global power management mode */ +#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01) +#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08) +#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40) +/* Parameter3 and Parameter4 are reserved */ + + +/* PowerManagementControl Reply message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY +{ + U8 Feature; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_PWR_MGMT_CONTROL_REPLY, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REPLY, + Mpi2PwrMgmtControlReply_t, MPI2_POINTER pMpi2PwrMgmtControlReply_t; + + +/**************************************************************************** +* IO Unit Control messages (MPI v2.6 and later only.) +****************************************************************************/ + +/* IO Unit Control Request Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REQUEST +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U8 PhyNum; /* 0x0E */ + U8 PrimFlags; /* 0x0F */ + U32 Primitive; /* 0x10 */ + U8 LookupMethod; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 SlotNumber; /* 0x16 */ + U64 LookupAddress; /* 0x18 */ + U32 IOCParameterValue; /* 0x20 */ + U32 Reserved7; /* 0x24 */ + U32 Reserved8; /* 0x28 */ +} MPI26_IOUNIT_CONTROL_REQUEST, + MPI2_POINTER PTR_MPI26_IOUNIT_CONTROL_REQUEST, + Mpi26IoUnitControlRequest_t, MPI2_POINTER pMpi26IoUnitControlRequest_t; + +/* values for the Operation field */ +#define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) +#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) +#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09) +#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) +#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) +#define MPI26_CTRL_OP_LOOKUP_MAPPING (0x0E) +#define MPI26_CTRL_OP_SET_IOC_PARAMETER (0x0F) +#define MPI26_CTRL_OP_ENABLE_FP_DEVICE (0x10) +#define MPI26_CTRL_OP_DISABLE_FP_DEVICE (0x11) +#define MPI26_CTRL_OP_ENABLE_FP_ALL (0x12) +#define MPI26_CTRL_OP_DISABLE_FP_ALL (0x13) +#define MPI26_CTRL_OP_DEV_ENABLE_NCQ (0x14) +#define MPI26_CTRL_OP_DEV_DISABLE_NCQ (0x15) +#define MPI26_CTRL_OP_SHUTDOWN (0x16) +#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17) +#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18) +#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19) +#define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A) +#define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B) +#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/* values for the PrimFlags field */ +#define MPI26_CTRL_PRIMFLAGS_SINGLE (0x08) +#define MPI26_CTRL_PRIMFLAGS_TRIPLE (0x02) +#define MPI26_CTRL_PRIMFLAGS_REDUNDANT (0x01) + +/* values for the LookupMethod field */ +#define MPI26_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01) +#define MPI26_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02) +#define MPI26_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + + +/* IO Unit Control Reply Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REPLY +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI26_IOUNIT_CONTROL_REPLY, MPI2_POINTER PTR_MPI26_IOUNIT_CONTROL_REPLY, + Mpi26IoUnitControlReply_t, MPI2_POINTER pMpi26IoUnitControlReply_t; + + +#endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h index 612dbcc940a9..4704fb0950e7 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h @@ -1,118 +1,118 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_pci.h - * Title: MPI PCIe Attached Devices structures and definitions. - * Creation Date: October 9, 2012 - * - * mpi2_pci.h Version: 02.00.04 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 03-16-15 02.00.00 Initial version. - * 02-17-16 02.00.01 Removed AHCI support. - * Removed SOP support. - * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to - * NVME Encapsulated Request. - * 07-22-18 02.00.03 Updated flags field for NVME Encapsulated req - * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI - * Shortten some defines to be compatible with DOS - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_PCI_H -#define MPI2_PCI_H - - -/* - * Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event - * data and PCIe Configuration pages. - */ -#define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010) - -#define MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE (0x0000000F) -#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000) -#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001) -#define MPI26_PCIE_DEVINFO_NVME (0x00000003) -#define MPI26_PCIE_DEVINFO_SCSI (0x00000004) - - -/**************************************************************************** -* NVMe Encapsulated message -****************************************************************************/ - -/* NVME Encapsulated Request Message */ -typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST -{ - U16 DevHandle; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 EncapsulatedCommandLength; /* 0x04 */ - U8 Reserved1; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 Reserved3; /* 0x0C */ - U64 ErrorResponseBaseAddress; /* 0x10 */ - U16 ErrorResponseAllocationLength; /* 0x18 */ - U16 Flags; /* 0x1A */ - U32 DataLength; /* 0x1C */ - U8 NVMe_Command[4]; /* 0x20 */ /* variable length */ - -} MPI26_NVME_ENCAPSULATED_REQUEST, MPI2_POINTER PTR_MPI26_NVME_ENCAPSULATED_REQUEST, - Mpi26NVMeEncapsulatedRequest_t, MPI2_POINTER pMpi26NVMeEncapsulatedRequest_t; - -/* defines for the Flags field */ -#define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020) -/* Submission Queue Type*/ -#define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010) -#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) -#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) -/* Error Response Address Space */ -#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C) -#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000) -#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008) -/* Data Direction*/ -#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003) -#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000) -#define MPI26_NVME_FLAGS_WRITE (0x0001) -#define MPI26_NVME_FLAGS_READ (0x0002) -#define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003) - - -/* NVMe Encapuslated Reply Message */ -typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY -{ - U16 DevHandle; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 EncapsulatedCommandLength; /* 0x04 */ - U8 Reserved1; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U16 Reserved3; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U16 ErrorResponseCount; /* 0x14 */ - U16 Reserved4; /* 0x16 */ -} MPI26_NVME_ENCAPSULATED_ERROR_REPLY, - MPI2_POINTER PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY, - Mpi26NVMeEncapsulatedErrorReply_t, - MPI2_POINTER pMpi26NVMeEncapsulatedErrorReply_t; - - -#endif - - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_pci.h + * Title: MPI PCIe Attached Devices structures and definitions. + * Creation Date: October 9, 2012 + * + * mpi2_pci.h Version: 02.00.04 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 03-16-15 02.00.00 Initial version. + * 02-17-16 02.00.01 Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to + * NVME Encapsulated Request. + * 07-22-18 02.00.03 Updated flags field for NVME Encapsulated req + * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI + * Shortten some defines to be compatible with DOS + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_PCI_H +#define MPI2_PCI_H + + +/* + * Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event + * data and PCIe Configuration pages. + */ +#define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010) + +#define MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE (0x0000000F) +#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000) +#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001) +#define MPI26_PCIE_DEVINFO_NVME (0x00000003) +#define MPI26_PCIE_DEVINFO_SCSI (0x00000004) + + +/**************************************************************************** +* NVMe Encapsulated message +****************************************************************************/ + +/* NVME Encapsulated Request Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 EncapsulatedCommandLength; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U64 ErrorResponseBaseAddress; /* 0x10 */ + U16 ErrorResponseAllocationLength; /* 0x18 */ + U16 Flags; /* 0x1A */ + U32 DataLength; /* 0x1C */ + U8 NVMe_Command[4]; /* 0x20 */ /* variable length */ + +} MPI26_NVME_ENCAPSULATED_REQUEST, MPI2_POINTER PTR_MPI26_NVME_ENCAPSULATED_REQUEST, + Mpi26NVMeEncapsulatedRequest_t, MPI2_POINTER pMpi26NVMeEncapsulatedRequest_t; + +/* defines for the Flags field */ +#define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020) +/* Submission Queue Type*/ +#define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010) +#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) +#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) +/* Error Response Address Space */ +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008) +/* Data Direction*/ +#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003) +#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000) +#define MPI26_NVME_FLAGS_WRITE (0x0001) +#define MPI26_NVME_FLAGS_READ (0x0002) +#define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003) + + +/* NVMe Encapuslated Reply Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 EncapsulatedCommandLength; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 ErrorResponseCount; /* 0x14 */ + U16 Reserved4; /* 0x16 */ +} MPI26_NVME_ENCAPSULATED_ERROR_REPLY, + MPI2_POINTER PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY, + Mpi26NVMeEncapsulatedErrorReply_t, + MPI2_POINTER pMpi26NVMeEncapsulatedErrorReply_t; + + +#endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ra.h b/drivers/scsi/mpt3sas/mpi/mpi2_ra.h index b7f10e96ba54..87b1afa36da6 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ra.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ra.h @@ -1,86 +1,86 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_ra.h - * Title: MPI RAID Accelerator messages and structures - * Creation Date: April 13, 2009 - * - * mpi2_ra.h Version: 02.00.01 - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 05-06-09 02.00.00 Initial version. - * 11-18-14 02.00.01 Updated copyright information. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_RA_H -#define MPI2_RA_H - -/* generic structure for RAID Accelerator Control Block */ -typedef struct _MPI2_RAID_ACCELERATOR_CONTROL_BLOCK -{ - U32 Reserved[8]; /* 0x00 */ - U32 RaidAcceleratorCDB[1]; /* 0x20 */ -} MPI2_RAID_ACCELERATOR_CONTROL_BLOCK, - MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_CONTROL_BLOCK, - Mpi2RAIDAcceleratorControlBlock_t, - MPI2_POINTER pMpi2RAIDAcceleratorControlBlock_t; - - -/****************************************************************************** -* -* RAID Accelerator Messages -* -*******************************************************************************/ - -/* RAID Accelerator Request Message */ -typedef struct _MPI2_RAID_ACCELERATOR_REQUEST -{ - U16 Reserved0; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U64 RaidAcceleratorControlBlockAddress; /* 0x0C */ - U8 DmaEngineNumber; /* 0x14 */ - U8 Reserved4; /* 0x15 */ - U16 Reserved5; /* 0x16 */ - U32 Reserved6; /* 0x18 */ - U32 Reserved7; /* 0x1C */ - U32 Reserved8; /* 0x20 */ -} MPI2_RAID_ACCELERATOR_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REQUEST, - Mpi2RAIDAcceleratorRequest_t, MPI2_POINTER pMpi2RAIDAcceleratorRequest_t; - - -/* RAID Accelerator Error Reply Message */ -typedef struct _MPI2_RAID_ACCELERATOR_REPLY -{ - U16 Reserved0; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 ProductSpecificData[3]; /* 0x14 */ -} MPI2_RAID_ACCELERATOR_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REPLY, - Mpi2RAIDAcceleratorReply_t, MPI2_POINTER pMpi2RAIDAcceleratorReply_t; - - -#endif - - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_ra.h + * Title: MPI RAID Accelerator messages and structures + * Creation Date: April 13, 2009 + * + * mpi2_ra.h Version: 02.00.01 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 05-06-09 02.00.00 Initial version. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_RA_H +#define MPI2_RA_H + +/* generic structure for RAID Accelerator Control Block */ +typedef struct _MPI2_RAID_ACCELERATOR_CONTROL_BLOCK +{ + U32 Reserved[8]; /* 0x00 */ + U32 RaidAcceleratorCDB[1]; /* 0x20 */ +} MPI2_RAID_ACCELERATOR_CONTROL_BLOCK, + MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_CONTROL_BLOCK, + Mpi2RAIDAcceleratorControlBlock_t, + MPI2_POINTER pMpi2RAIDAcceleratorControlBlock_t; + + +/****************************************************************************** +* +* RAID Accelerator Messages +* +*******************************************************************************/ + +/* RAID Accelerator Request Message */ +typedef struct _MPI2_RAID_ACCELERATOR_REQUEST +{ + U16 Reserved0; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U64 RaidAcceleratorControlBlockAddress; /* 0x0C */ + U8 DmaEngineNumber; /* 0x14 */ + U8 Reserved4; /* 0x15 */ + U16 Reserved5; /* 0x16 */ + U32 Reserved6; /* 0x18 */ + U32 Reserved7; /* 0x1C */ + U32 Reserved8; /* 0x20 */ +} MPI2_RAID_ACCELERATOR_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REQUEST, + Mpi2RAIDAcceleratorRequest_t, MPI2_POINTER pMpi2RAIDAcceleratorRequest_t; + + +/* RAID Accelerator Error Reply Message */ +typedef struct _MPI2_RAID_ACCELERATOR_REPLY +{ + U16 Reserved0; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 ProductSpecificData[3]; /* 0x14 */ +} MPI2_RAID_ACCELERATOR_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REPLY, + Mpi2RAIDAcceleratorReply_t, MPI2_POINTER pMpi2RAIDAcceleratorReply_t; + + +#endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h index af56ac4065ff..da36ffbc9dd3 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -1,374 +1,374 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_raid.h - * Title: MPI Integrated RAID messages and structures - * Creation Date: April 26, 2007 - * - * mpi2_raid.h Version: 02.00.11 - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Modifications to RAID Action request and reply, - * including the Actions and ActionData. - * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. - * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that - * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT - * can be sized by the build environment. - * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of - * VolumeCreationFlags and marked the old one as obsolete. - * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. - * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with - * related structures and defines. - * Added product-specific range to RAID Action values. - * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. - * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. - * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. - * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. - * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. - * 11-18-14 02.00.11 Updated copyright information. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_RAID_H -#define MPI2_RAID_H - -/***************************************************************************** -* -* Integrated RAID Messages -* -*****************************************************************************/ - -/**************************************************************************** -* RAID Action messages -****************************************************************************/ - -/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ -#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) - -/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ -#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) -#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) - -/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ - -/* ActionDataWord defines for use with MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ -#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001) - -/* ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ -typedef struct _MPI2_RAID_ACTION_RATE_DATA -{ - U8 RateToChange; /* 0x00 */ - U8 RateOrMode; /* 0x01 */ - U16 DataScrubDuration; /* 0x02 */ -} MPI2_RAID_ACTION_RATE_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_RATE_DATA, - Mpi2RaidActionRateData_t, MPI2_POINTER pMpi2RaidActionRateData_t; - -#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00) -#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01) -#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02) - -/* ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ -typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION -{ - U8 RAIDFunction; /* 0x00 */ - U8 Flags; /* 0x01 */ - U16 Reserved1; /* 0x02 */ -} MPI2_RAID_ACTION_START_RAID_FUNCTION, - MPI2_POINTER PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, - Mpi2RaidActionStartRaidFunction_t, - MPI2_POINTER pMpi2RaidActionStartRaidFunction_t; - -/* defines for the RAIDFunction field */ -#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00) -#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01) -#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02) - -/* defines for the Flags field */ -#define MPI2_RAID_ACTION_START_NEW (0x00) -#define MPI2_RAID_ACTION_START_RESUME (0x01) - -/* ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ -typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION -{ - U8 RAIDFunction; /* 0x00 */ - U8 Flags; /* 0x01 */ - U16 Reserved1; /* 0x02 */ -} MPI2_RAID_ACTION_STOP_RAID_FUNCTION, - MPI2_POINTER PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, - Mpi2RaidActionStopRaidFunction_t, - MPI2_POINTER pMpi2RaidActionStopRaidFunction_t; - -/* defines for the RAIDFunction field */ -#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00) -#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01) -#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02) - -/* defines for the Flags field */ -#define MPI2_RAID_ACTION_STOP_ABORT (0x00) -#define MPI2_RAID_ACTION_STOP_PAUSE (0x01) - -/* ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ -typedef struct _MPI2_RAID_ACTION_HOT_SPARE -{ - U8 HotSparePool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 DevHandle; /* 0x02 */ -} MPI2_RAID_ACTION_HOT_SPARE, MPI2_POINTER PTR_MPI2_RAID_ACTION_HOT_SPARE, - Mpi2RaidActionHotSpare_t, MPI2_POINTER pMpi2RaidActionHotSpare_t; - -/* ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ -typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE -{ - U8 Flags; /* 0x00 */ - U8 DeviceFirmwareUpdateModeTimeout; /* 0x01 */ - U16 Reserved1; /* 0x02 */ -} MPI2_RAID_ACTION_FW_UPDATE_MODE, - MPI2_POINTER PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, - Mpi2RaidActionFwUpdateMode_t, MPI2_POINTER pMpi2RaidActionFwUpdateMode_t; - -/* ActionDataWord defines for use with MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ -#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00) -#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01) - -typedef union _MPI2_RAID_ACTION_DATA -{ - U32 Word; - MPI2_RAID_ACTION_RATE_DATA Rates; - MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; - MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; - MPI2_RAID_ACTION_HOT_SPARE HotSpare; - MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; -} MPI2_RAID_ACTION_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_DATA, - Mpi2RaidActionData_t, MPI2_POINTER pMpi2RaidActionData_t; - - -/* RAID Action Request Message */ -typedef struct _MPI2_RAID_ACTION_REQUEST -{ - U8 Action; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 VolDevHandle; /* 0x04 */ - U8 PhysDiskNum; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 Reserved3; /* 0x0C */ - MPI2_RAID_ACTION_DATA ActionDataWord; /* 0x10 */ - MPI2_SGE_SIMPLE_UNION ActionDataSGE; /* 0x14 */ -} MPI2_RAID_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACTION_REQUEST, - Mpi2RaidActionRequest_t, MPI2_POINTER pMpi2RaidActionRequest_t; - -/* RAID Action request Action values */ - -#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01) -#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02) -#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03) -#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04) -#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05) -#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) -#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B) -#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F) -#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11) -#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15) -#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17) -#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18) -#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19) -#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C) -#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D) -#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E) -#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20) -#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) -#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) -#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23) -#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24) -#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) -#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) - - -/* RAID Volume Creation Structure */ - -/* - * The following define can be customized for the targeted product. - */ -#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS -#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1) -#endif - -typedef struct _MPI2_RAID_VOLUME_PHYSDISK -{ - U8 RAIDSetNum; /* 0x00 */ - U8 PhysDiskMap; /* 0x01 */ - U16 PhysDiskDevHandle; /* 0x02 */ -} MPI2_RAID_VOLUME_PHYSDISK, MPI2_POINTER PTR_MPI2_RAID_VOLUME_PHYSDISK, - Mpi2RaidVolumePhysDisk_t, MPI2_POINTER pMpi2RaidVolumePhysDisk_t; - -/* defines for the PhysDiskMap field */ -#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01) -#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02) - -typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT -{ - U8 NumPhysDisks; /* 0x00 */ - U8 VolumeType; /* 0x01 */ - U16 Reserved1; /* 0x02 */ - U32 VolumeCreationFlags; /* 0x04 */ - U32 VolumeSettings; /* 0x08 */ - U8 Reserved2; /* 0x0C */ - U8 ResyncRate; /* 0x0D */ - U16 DataScrubDuration; /* 0x0E */ - U64 VolumeMaxLBA; /* 0x10 */ - U32 StripeSize; /* 0x18 */ - U8 Name[16]; /* 0x1C */ - MPI2_RAID_VOLUME_PHYSDISK PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS];/* 0x2C */ -} MPI2_RAID_VOLUME_CREATION_STRUCT, - MPI2_POINTER PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, - Mpi2RaidVolumeCreationStruct_t, MPI2_POINTER pMpi2RaidVolumeCreationStruct_t; - -/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ - -/* defines for the VolumeCreationFlags field */ -#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) -#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) /* MPI 2.0 only */ -#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) -#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) -/* The following is an obsolete define. - * It must be shifted left 24 bits in order to set the proper bit. - */ -#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) - - -/* RAID Online Capacity Expansion Structure */ - -typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION -{ - U32 Flags; /* 0x00 */ - U16 DevHandle0; /* 0x04 */ - U16 Reserved1; /* 0x06 */ - U16 DevHandle1; /* 0x08 */ - U16 Reserved2; /* 0x0A */ -} MPI2_RAID_ONLINE_CAPACITY_EXPANSION, - MPI2_POINTER PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, - Mpi2RaidOnlineCapacityExpansion_t, - MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t; - - -/* RAID Compatibility Input Structure */ - -typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT -{ - U16 SourceDevHandle; /* 0x00 */ - U16 CandidateDevHandle; /* 0x02 */ - U32 Flags; /* 0x04 */ - U32 Reserved1; /* 0x08 */ - U32 Reserved2; /* 0x0C */ -} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, - MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, - Mpi2RaidCompatibilityInputStruct_t, - MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t; - -/* defines for RAID Compatibility Structure Flags field */ -#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002) -#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001) - - -/* RAID Volume Indicator Structure */ - -typedef struct _MPI2_RAID_VOL_INDICATOR -{ - U64 TotalBlocks; /* 0x00 */ - U64 BlocksRemaining; /* 0x08 */ - U32 Flags; /* 0x10 */ - U32 ElapsedSeconds; /* 0x14 */ -} MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR, - Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t; - -/* defines for RAID Volume Indicator Flags field */ -#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) - -#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) -#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) -#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) -#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) -#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) -#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) - - -/* RAID Compatibility Result Structure */ - -typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT -{ - U8 State; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 Reserved2; /* 0x02 */ - U32 GenericAttributes; /* 0x04 */ - U32 OEMSpecificAttributes; /* 0x08 */ - U32 Reserved3; /* 0x0C */ - U32 Reserved4; /* 0x10 */ -} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, - MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, - Mpi2RaidCompatibilityResultStruct_t, - MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t; - -/* defines for RAID Compatibility Result Structure State field */ -#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00) -#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01) - -/* defines for RAID Compatibility Result Structure GenericAttributes field */ -#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010) - -#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C) -#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008) -#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004) - -#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003) -#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002) -#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001) - - -/* RAID Action Reply ActionData union */ -typedef union _MPI2_RAID_ACTION_REPLY_DATA -{ - U32 Word[6]; - MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; - U16 VolDevHandle; - U8 VolumeState; - U8 PhysDiskNum; - MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; -} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA, - Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t; - -/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ - - -/* RAID Action Reply Message */ -typedef struct _MPI2_RAID_ACTION_REPLY -{ - U8 Action; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 VolDevHandle; /* 0x04 */ - U8 PhysDiskNum; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U16 Reserved3; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - MPI2_RAID_ACTION_REPLY_DATA ActionData; /* 0x14 */ -} MPI2_RAID_ACTION_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY, - Mpi2RaidActionReply_t, MPI2_POINTER pMpi2RaidActionReply_t; - - -#endif - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_raid.h + * Title: MPI Integrated RAID messages and structures + * Creation Date: April 26, 2007 + * + * mpi2_raid.h Version: 02.00.11 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * 11-18-14 02.00.11 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_RAID_H +#define MPI2_RAID_H + +/***************************************************************************** +* +* Integrated RAID Messages +* +*****************************************************************************/ + +/**************************************************************************** +* RAID Action messages +****************************************************************************/ + +/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ +#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) + +/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ +#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) +#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) + +/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/* ActionDataWord defines for use with MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ +#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001) + +/* ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ +typedef struct _MPI2_RAID_ACTION_RATE_DATA +{ + U8 RateToChange; /* 0x00 */ + U8 RateOrMode; /* 0x01 */ + U16 DataScrubDuration; /* 0x02 */ +} MPI2_RAID_ACTION_RATE_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_RATE_DATA, + Mpi2RaidActionRateData_t, MPI2_POINTER pMpi2RaidActionRateData_t; + +#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00) +#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01) +#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02) + +/* ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION +{ + U8 RAIDFunction; /* 0x00 */ + U8 Flags; /* 0x01 */ + U16 Reserved1; /* 0x02 */ +} MPI2_RAID_ACTION_START_RAID_FUNCTION, + MPI2_POINTER PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, + Mpi2RaidActionStartRaidFunction_t, + MPI2_POINTER pMpi2RaidActionStartRaidFunction_t; + +/* defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02) + +/* defines for the Flags field */ +#define MPI2_RAID_ACTION_START_NEW (0x00) +#define MPI2_RAID_ACTION_START_RESUME (0x01) + +/* ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION +{ + U8 RAIDFunction; /* 0x00 */ + U8 Flags; /* 0x01 */ + U16 Reserved1; /* 0x02 */ +} MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + MPI2_POINTER PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + Mpi2RaidActionStopRaidFunction_t, + MPI2_POINTER pMpi2RaidActionStopRaidFunction_t; + +/* defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02) + +/* defines for the Flags field */ +#define MPI2_RAID_ACTION_STOP_ABORT (0x00) +#define MPI2_RAID_ACTION_STOP_PAUSE (0x01) + +/* ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ +typedef struct _MPI2_RAID_ACTION_HOT_SPARE +{ + U8 HotSparePool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 DevHandle; /* 0x02 */ +} MPI2_RAID_ACTION_HOT_SPARE, MPI2_POINTER PTR_MPI2_RAID_ACTION_HOT_SPARE, + Mpi2RaidActionHotSpare_t, MPI2_POINTER pMpi2RaidActionHotSpare_t; + +/* ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ +typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE +{ + U8 Flags; /* 0x00 */ + U8 DeviceFirmwareUpdateModeTimeout; /* 0x01 */ + U16 Reserved1; /* 0x02 */ +} MPI2_RAID_ACTION_FW_UPDATE_MODE, + MPI2_POINTER PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, + Mpi2RaidActionFwUpdateMode_t, MPI2_POINTER pMpi2RaidActionFwUpdateMode_t; + +/* ActionDataWord defines for use with MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ +#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00) +#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01) + +typedef union _MPI2_RAID_ACTION_DATA +{ + U32 Word; + MPI2_RAID_ACTION_RATE_DATA Rates; + MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + MPI2_RAID_ACTION_HOT_SPARE HotSpare; + MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +} MPI2_RAID_ACTION_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_DATA, + Mpi2RaidActionData_t, MPI2_POINTER pMpi2RaidActionData_t; + + +/* RAID Action Request Message */ +typedef struct _MPI2_RAID_ACTION_REQUEST +{ + U8 Action; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 VolDevHandle; /* 0x04 */ + U8 PhysDiskNum; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + MPI2_RAID_ACTION_DATA ActionDataWord; /* 0x10 */ + MPI2_SGE_SIMPLE_UNION ActionDataSGE; /* 0x14 */ +} MPI2_RAID_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACTION_REQUEST, + Mpi2RaidActionRequest_t, MPI2_POINTER pMpi2RaidActionRequest_t; + +/* RAID Action request Action values */ + +#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01) +#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02) +#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03) +#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04) +#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05) +#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) +#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B) +#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F) +#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11) +#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15) +#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17) +#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18) +#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19) +#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C) +#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D) +#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E) +#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20) +#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) +#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) +#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23) +#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24) +#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) + + +/* RAID Volume Creation Structure */ + +/* + * The following define can be customized for the targeted product. + */ +#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS +#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1) +#endif + +typedef struct _MPI2_RAID_VOLUME_PHYSDISK +{ + U8 RAIDSetNum; /* 0x00 */ + U8 PhysDiskMap; /* 0x01 */ + U16 PhysDiskDevHandle; /* 0x02 */ +} MPI2_RAID_VOLUME_PHYSDISK, MPI2_POINTER PTR_MPI2_RAID_VOLUME_PHYSDISK, + Mpi2RaidVolumePhysDisk_t, MPI2_POINTER pMpi2RaidVolumePhysDisk_t; + +/* defines for the PhysDiskMap field */ +#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT +{ + U8 NumPhysDisks; /* 0x00 */ + U8 VolumeType; /* 0x01 */ + U16 Reserved1; /* 0x02 */ + U32 VolumeCreationFlags; /* 0x04 */ + U32 VolumeSettings; /* 0x08 */ + U8 Reserved2; /* 0x0C */ + U8 ResyncRate; /* 0x0D */ + U16 DataScrubDuration; /* 0x0E */ + U64 VolumeMaxLBA; /* 0x10 */ + U32 StripeSize; /* 0x18 */ + U8 Name[16]; /* 0x1C */ + MPI2_RAID_VOLUME_PHYSDISK PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS];/* 0x2C */ +} MPI2_RAID_VOLUME_CREATION_STRUCT, + MPI2_POINTER PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, + Mpi2RaidVolumeCreationStruct_t, MPI2_POINTER pMpi2RaidVolumeCreationStruct_t; + +/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ + +/* defines for the VolumeCreationFlags field */ +#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) +#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) /* MPI 2.0 only */ +#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) +#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) +/* The following is an obsolete define. + * It must be shifted left 24 bits in order to set the proper bit. + */ +#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) + + +/* RAID Online Capacity Expansion Structure */ + +typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION +{ + U32 Flags; /* 0x00 */ + U16 DevHandle0; /* 0x04 */ + U16 Reserved1; /* 0x06 */ + U16 DevHandle1; /* 0x08 */ + U16 Reserved2; /* 0x0A */ +} MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + MPI2_POINTER PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + Mpi2RaidOnlineCapacityExpansion_t, + MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t; + + +/* RAID Compatibility Input Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT +{ + U16 SourceDevHandle; /* 0x00 */ + U16 CandidateDevHandle; /* 0x02 */ + U32 Flags; /* 0x04 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + Mpi2RaidCompatibilityInputStruct_t, + MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t; + +/* defines for RAID Compatibility Structure Flags field */ +#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002) +#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001) + + +/* RAID Volume Indicator Structure */ + +typedef struct _MPI2_RAID_VOL_INDICATOR +{ + U64 TotalBlocks; /* 0x00 */ + U64 BlocksRemaining; /* 0x08 */ + U32 Flags; /* 0x10 */ + U32 ElapsedSeconds; /* 0x14 */ +} MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR, + Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t; + +/* defines for RAID Volume Indicator Flags field */ +#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) + +#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) +#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) +#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) +#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) +#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) +#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) + + +/* RAID Compatibility Result Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT +{ + U8 State; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 Reserved2; /* 0x02 */ + U32 GenericAttributes; /* 0x04 */ + U32 OEMSpecificAttributes; /* 0x08 */ + U32 Reserved3; /* 0x0C */ + U32 Reserved4; /* 0x10 */ +} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + Mpi2RaidCompatibilityResultStruct_t, + MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t; + +/* defines for RAID Compatibility Result Structure State field */ +#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00) +#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01) + +/* defines for RAID Compatibility Result Structure GenericAttributes field */ +#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010) + +#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C) +#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008) +#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004) + +#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003) +#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002) +#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001) + + +/* RAID Action Reply ActionData union */ +typedef union _MPI2_RAID_ACTION_REPLY_DATA +{ + U32 Word[6]; + MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA, + Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t; + +/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + + +/* RAID Action Reply Message */ +typedef struct _MPI2_RAID_ACTION_REPLY +{ + U8 Action; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 VolDevHandle; /* 0x04 */ + U8 PhysDiskNum; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U16 Reserved3; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + MPI2_RAID_ACTION_REPLY_DATA ActionData; /* 0x14 */ +} MPI2_RAID_ACTION_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY, + Mpi2RaidActionReply_t, MPI2_POINTER pMpi2RaidActionReply_t; + + +#endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h index 3b515efef2c8..cde1e9de916f 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -1,319 +1,319 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_sas.h - * Title: MPI Serial Attached SCSI structures and definitions - * Creation Date: February 9, 2007 - * - * mpi2_sas.h Version: 02.00.10 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit - * Control Request. - * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control - * Request. - * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST - * to MPI2_SGE_IO_UNION since it supports chained SGLs. - * 05-12-10 02.00.04 Modified some comments. - * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. - * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. - * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA - * Passthrough Request message. - * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete - * for anything newer than MPI v2.0. - * 11-18-14 02.00.09 Updated copyright information. - * 03-16-15 02.00.10 Updated for MPI v2.6. - * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_SAS_H -#define MPI2_SAS_H - -/* - * Values for SASStatus. - */ -#define MPI2_SASSTATUS_SUCCESS (0x00) -#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01) -#define MPI2_SASSTATUS_INVALID_FRAME (0x02) -#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03) -#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04) -#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05) -#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06) -#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07) -#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08) -#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09) -#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A) -#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B) -#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C) -#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D) -#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E) -#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F) -#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10) -#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11) -#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12) -#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) -#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) - - -/* - * Values for the SAS DeviceInfo field used in SAS Device Status Change Event - * data and SAS Configuration pages. - */ -#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000) -#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) -#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) -#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800) -#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) -#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200) -#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) -#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) -#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) -#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) -#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) -#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008) - -#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) -#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) -#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001) -#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) -#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) - - -/***************************************************************************** -* -* SAS Messages -* -*****************************************************************************/ - -/**************************************************************************** -* SMP Passthrough messages -****************************************************************************/ - -/* SMP Passthrough Request Message */ -typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST -{ - U8 PassthroughFlags; /* 0x00 */ - U8 PhysicalPort; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 RequestDataLength; /* 0x04 */ - U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U32 Reserved2; /* 0x0C */ - U64 SASAddress; /* 0x10 */ - U32 Reserved3; /* 0x18 */ - U32 Reserved4; /* 0x1C */ - MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */ /* MPI v2.5: IEEE Simple 64 elements only */ -} MPI2_SMP_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REQUEST, - Mpi2SmpPassthroughRequest_t, MPI2_POINTER pMpi2SmpPassthroughRequest_t; - -/* values for PassthroughFlags field */ -#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) - -/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ - - -/* SMP Passthrough Reply Message */ -typedef struct _MPI2_SMP_PASSTHROUGH_REPLY -{ - U8 PassthroughFlags; /* 0x00 */ - U8 PhysicalPort; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 ResponseDataLength; /* 0x04 */ - U8 SGLFlags; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U8 Reserved2; /* 0x0C */ - U8 SASStatus; /* 0x0D */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 Reserved3; /* 0x14 */ - U8 ResponseData[4]; /* 0x18 */ -} MPI2_SMP_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REPLY, - Mpi2SmpPassthroughReply_t, MPI2_POINTER pMpi2SmpPassthroughReply_t; - -/* values for PassthroughFlags field */ -#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) - -/* values for SASStatus field are at the top of this file */ - - -/**************************************************************************** -* SATA Passthrough messages -****************************************************************************/ - -typedef union _MPI2_SATA_PT_SGE_UNION -{ - MPI2_SGE_SIMPLE_UNION MpiSimple; /* MPI v2.0 only */ - MPI2_SGE_CHAIN_UNION MpiChain; /* MPI v2.0 only */ - MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; - MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /* MPI v2.0 only */ - MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /* MPI v2.5 only */ -} MPI2_SATA_PT_SGE_UNION, MPI2_POINTER PTR_MPI2_SATA_PT_SGE_UNION, - Mpi2SataPTSGEUnion_t, MPI2_POINTER pMpi2SataPTSGEUnion_t; - - -/* SATA Passthrough Request Message */ -typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST -{ - U16 DevHandle; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 PassthroughFlags; /* 0x04 */ - U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U32 Reserved2; /* 0x0C */ - U32 Reserved3; /* 0x10 */ - U32 Reserved4; /* 0x14 */ - U32 DataLength; /* 0x18 */ - U8 CommandFIS[20]; /* 0x1C */ - MPI2_SATA_PT_SGE_UNION SGL; /* 0x30 */ /* MPI v2.5: IEEE 64 elements only */ -} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, - Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; - -/* values for PassthroughFlags field */ -#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) -#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040) /* MPI v2.6 and newer */ -#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) -#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) -#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) -#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) -#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) - -/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ - - -/* SATA Passthrough Reply Message */ -typedef struct _MPI2_SATA_PASSTHROUGH_REPLY -{ - U16 DevHandle; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 PassthroughFlags; /* 0x04 */ - U8 SGLFlags; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved1; /* 0x0A */ - U8 Reserved2; /* 0x0C */ - U8 SASStatus; /* 0x0D */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U8 StatusFIS[20]; /* 0x14 */ - U32 StatusControlRegisters; /* 0x28 */ - U32 TransferCount; /* 0x2C */ -} MPI2_SATA_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REPLY, - Mpi2SataPassthroughReply_t, MPI2_POINTER pMpi2SataPassthroughReply_t; - -/* values for SASStatus field are at the top of this file */ - - -/**************************************************************************** -* SAS IO Unit Control messages -* (MPI v2.5 and earlier only. -* Replaced by IO Unit Control messages in MPI v2.6 and later.) -****************************************************************************/ - -/* SAS IO Unit Control Request Message */ -typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST -{ - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 IOCParameter; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U8 PhyNum; /* 0x0E */ - U8 PrimFlags; /* 0x0F */ - U32 Primitive; /* 0x10 */ - U8 LookupMethod; /* 0x14 */ - U8 Reserved5; /* 0x15 */ - U16 SlotNumber; /* 0x16 */ - U64 LookupAddress; /* 0x18 */ - U32 IOCParameterValue; /* 0x20 */ - U32 Reserved7; /* 0x24 */ - U32 Reserved8; /* 0x28 */ -} MPI2_SAS_IOUNIT_CONTROL_REQUEST, - MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, - Mpi2SasIoUnitControlRequest_t, MPI2_POINTER pMpi2SasIoUnitControlRequest_t; - -/* values for the Operation field */ -#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) -#define MPI2_SAS_OP_PHY_LINK_RESET (0x06) -#define MPI2_SAS_OP_PHY_HARD_RESET (0x07) -#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) -#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) -#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) -#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */ -#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) -#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) -#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) -#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10) -#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11) -#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12) -#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13) -#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) -#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) -#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) - -/* values for the PrimFlags field */ -#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08) -#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02) -#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01) - -/* values for the LookupMethod field */ -#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01) -#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02) -#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) - - -/* SAS IO Unit Control Reply Message */ -typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY -{ - U8 Operation; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 DevHandle; /* 0x04 */ - U8 IOCParameter; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_SAS_IOUNIT_CONTROL_REPLY, - MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, - Mpi2SasIoUnitControlReply_t, MPI2_POINTER pMpi2SasIoUnitControlReply_t; - - -#endif - - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_sas.h + * Title: MPI Serial Attached SCSI structures and definitions + * Creation Date: February 9, 2007 + * + * mpi2_sas.h Version: 02.00.10 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. + * 11-18-14 02.00.09 Updated copyright information. + * 03-16-15 02.00.10 Updated for MPI v2.6. + * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_SAS_H +#define MPI2_SAS_H + +/* + * Values for SASStatus. + */ +#define MPI2_SASSTATUS_SUCCESS (0x00) +#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01) +#define MPI2_SASSTATUS_INVALID_FRAME (0x02) +#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03) +#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04) +#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05) +#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06) +#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07) +#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08) +#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09) +#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A) +#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B) +#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C) +#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D) +#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E) +#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F) +#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10) +#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11) +#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12) +#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) +#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) + + +/* + * Values for the SAS DeviceInfo field used in SAS Device Status Change Event + * data and SAS Configuration pages. + */ +#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000) +#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) +#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + + +/***************************************************************************** +* +* SAS Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SMP Passthrough messages +****************************************************************************/ + +/* SMP Passthrough Request Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST +{ + U8 PassthroughFlags; /* 0x00 */ + U8 PhysicalPort; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 RequestDataLength; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Reserved2; /* 0x0C */ + U64 SASAddress; /* 0x10 */ + U32 Reserved3; /* 0x18 */ + U32 Reserved4; /* 0x1C */ + MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */ /* MPI v2.5: IEEE Simple 64 elements only */ +} MPI2_SMP_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REQUEST, + Mpi2SmpPassthroughRequest_t, MPI2_POINTER pMpi2SmpPassthroughRequest_t; + +/* values for PassthroughFlags field */ +#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) + +/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/* SMP Passthrough Reply Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REPLY +{ + U8 PassthroughFlags; /* 0x00 */ + U8 PhysicalPort; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 ResponseDataLength; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U8 Reserved2; /* 0x0C */ + U8 SASStatus; /* 0x0D */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 Reserved3; /* 0x14 */ + U8 ResponseData[4]; /* 0x18 */ +} MPI2_SMP_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REPLY, + Mpi2SmpPassthroughReply_t, MPI2_POINTER pMpi2SmpPassthroughReply_t; + +/* values for PassthroughFlags field */ +#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) + +/* values for SASStatus field are at the top of this file */ + + +/**************************************************************************** +* SATA Passthrough messages +****************************************************************************/ + +typedef union _MPI2_SATA_PT_SGE_UNION +{ + MPI2_SGE_SIMPLE_UNION MpiSimple; /* MPI v2.0 only */ + MPI2_SGE_CHAIN_UNION MpiChain; /* MPI v2.0 only */ + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /* MPI v2.0 only */ + MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /* MPI v2.5 only */ +} MPI2_SATA_PT_SGE_UNION, MPI2_POINTER PTR_MPI2_SATA_PT_SGE_UNION, + Mpi2SataPTSGEUnion_t, MPI2_POINTER pMpi2SataPTSGEUnion_t; + + +/* SATA Passthrough Request Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST +{ + U16 DevHandle; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 PassthroughFlags; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U32 Reserved2; /* 0x0C */ + U32 Reserved3; /* 0x10 */ + U32 Reserved4; /* 0x14 */ + U32 DataLength; /* 0x18 */ + U8 CommandFIS[20]; /* 0x1C */ + MPI2_SATA_PT_SGE_UNION SGL; /* 0x30 */ /* MPI v2.5: IEEE 64 elements only */ +} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, + Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; + +/* values for PassthroughFlags field */ +#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) +#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040) /* MPI v2.6 and newer */ +#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) +#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) +#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) +#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) +#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) + +/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/* SATA Passthrough Reply Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REPLY +{ + U16 DevHandle; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 PassthroughFlags; /* 0x04 */ + U8 SGLFlags; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved1; /* 0x0A */ + U8 Reserved2; /* 0x0C */ + U8 SASStatus; /* 0x0D */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 StatusFIS[20]; /* 0x14 */ + U32 StatusControlRegisters; /* 0x28 */ + U32 TransferCount; /* 0x2C */ +} MPI2_SATA_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REPLY, + Mpi2SataPassthroughReply_t, MPI2_POINTER pMpi2SataPassthroughReply_t; + +/* values for SASStatus field are at the top of this file */ + + +/**************************************************************************** +* SAS IO Unit Control messages +* (MPI v2.5 and earlier only. +* Replaced by IO Unit Control messages in MPI v2.6 and later.) +****************************************************************************/ + +/* SAS IO Unit Control Request Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U8 PhyNum; /* 0x0E */ + U8 PrimFlags; /* 0x0F */ + U32 Primitive; /* 0x10 */ + U8 LookupMethod; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 SlotNumber; /* 0x16 */ + U64 LookupAddress; /* 0x18 */ + U32 IOCParameterValue; /* 0x20 */ + U32 Reserved7; /* 0x24 */ + U32 Reserved8; /* 0x28 */ +} MPI2_SAS_IOUNIT_CONTROL_REQUEST, + MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, + Mpi2SasIoUnitControlRequest_t, MPI2_POINTER pMpi2SasIoUnitControlRequest_t; + +/* values for the Operation field */ +#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI2_SAS_OP_PHY_LINK_RESET (0x06) +#define MPI2_SAS_OP_PHY_HARD_RESET (0x07) +#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) +#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */ +#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) +#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) +#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) +#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10) +#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11) +#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12) +#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13) +#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) +#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) +#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/* values for the PrimFlags field */ +#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08) +#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02) +#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01) + +/* values for the LookupMethod field */ +#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01) +#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02) +#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + + +/* SAS IO Unit Control Reply Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY +{ + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_SAS_IOUNIT_CONTROL_REPLY, + MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, + Mpi2SasIoUnitControlReply_t, MPI2_POINTER pMpi2SasIoUnitControlReply_t; + + +#endif + + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_targ.h b/drivers/scsi/mpt3sas/mpi/mpi2_targ.h index 2e88a535c9a9..1b6aa870a31c 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_targ.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_targ.h @@ -1,575 +1,575 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_targ.h - * Title: MPI Target mode messages and structures - * Creation Date: September 8, 2006 - * - * mpi2_targ.h Version: 02.00.09 - * - * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 - * prefix are for use only on MPI v2.5 products, and must not be used - * with MPI v2.0 products. Unless otherwise noted, names beginning with - * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to - * BufferPostFlags field of CommandBufferPostBase Request. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 10-02-08 02.00.03 Removed NextCmdBufferOffset from - * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. - * Target Status Send Request only takes a single SGE for - * response data. - * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure. - * 11-18-11 02.00.05 Incorporating additions for MPI v2.5. - * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT - * request message structure. - * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and - * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS. - * 06-13-14 02.00.07 Added MinMSIxIndex and MaxMSIxIndex fields to - * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. - * 11-18-14 02.00.08 Updated copyright information. - * 03-16-15 02.00.09 Updated for MPI v2.6. - * Added MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_TARG_H -#define MPI2_TARG_H - - -/****************************************************************************** -* -* SCSI Target Messages -* -*******************************************************************************/ - -/**************************************************************************** -* Target Command Buffer Post Base Request -****************************************************************************/ - -typedef struct _MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST -{ - U8 BufferPostFlags; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 TotalCmdBuffers; /* 0x04 */ - U8 Reserved; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 Reserved3; /* 0x0C */ - U16 CmdBufferLength; /* 0x10 */ - U8 MinMSIxIndex; /* 0x12 */ /* MPI 2.5 and newer only; Reserved in MPI 2.0 */ - U8 MaxMSIxIndex; /* 0x13 */ /* MPI 2.5 and newer only; Reserved in MPI 2.0 */ - U32 BaseAddressLow; /* 0x14 */ - U32 BaseAddressHigh; /* 0x18 */ -} MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST, - MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST, - Mpi2TargetCmdBufferPostBaseRequest_t, - MPI2_POINTER pMpi2TargetCmdBufferPostBaseRequest_t; - -/* values for the BufferPostflags field */ -#define MPI2_CMD_BUF_POST_BASE_ADDRESS_SPACE_MASK (0x0C) -#define MPI2_CMD_BUF_POST_BASE_SYSTEM_ADDRESS_SPACE (0x00) -#define MPI2_CMD_BUF_POST_BASE_IOCDDR_ADDRESS_SPACE (0x04) -#define MPI2_CMD_BUF_POST_BASE_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.5 and earlier */ -#define MPI26_CMD_BUF_POST_BASE_IOCCTL_ADDRESS_SPACE (0x08) /* for MPI v2.6 only */ -#define MPI2_CMD_BUF_POST_BASE_IOCPLBNTA_ADDRESS_SPACE (0x0C) /* only for MPI v2.5 and earlier */ - -#define MPI2_CMD_BUF_POST_BASE_FLAGS_AUTO_POST_ALL (0x01) - - -/**************************************************************************** -* Target Command Buffer Post List Request -****************************************************************************/ - -typedef struct _MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST -{ - U16 Reserved; /* 0x00 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 CmdBufferCount; /* 0x04 */ - U8 Reserved1; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved2; /* 0x0A */ - U32 Reserved3; /* 0x0C */ - U16 IoIndex[2]; /* 0x10 */ -} MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST, - MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST, - Mpi2TargetCmdBufferPostListRequest_t, - MPI2_POINTER pMpi2TargetCmdBufferPostListRequest_t; - -/**************************************************************************** -* Target Command Buffer Post Base List Reply -****************************************************************************/ - -typedef struct _MPI2_TARGET_BUF_POST_BASE_LIST_REPLY -{ - U8 Flags; /* 0x00 */ - U8 Reserved; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U16 IoIndex; /* 0x14 */ - U16 Reserved5; /* 0x16 */ - U32 Reserved6; /* 0x18 */ -} MPI2_TARGET_BUF_POST_BASE_LIST_REPLY, - MPI2_POINTER PTR_MPI2_TARGET_BUF_POST_BASE_LIST_REPLY, - Mpi2TargetCmdBufferPostBaseListReply_t, - MPI2_POINTER pMpi2TargetCmdBufferPostBaseListReply_t; - -/* Flags defines */ -#define MPI2_CMD_BUF_POST_REPLY_IOINDEX_VALID (0x01) - - -/**************************************************************************** -* Command Buffer Formats (with 16 byte CDB) -****************************************************************************/ - -typedef struct _MPI2_TARGET_SSP_CMD_BUFFER -{ - U8 FrameType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 InitiatorConnectionTag; /* 0x02 */ - U32 HashedSourceSASAddress; /* 0x04 */ - U16 Reserved2; /* 0x08 */ - U16 Flags; /* 0x0A */ - U32 Reserved3; /* 0x0C */ - U16 Tag; /* 0x10 */ - U16 TargetPortTransferTag; /* 0x12 */ - U32 DataOffset; /* 0x14 */ - /* COMMAND information unit starts here */ - U8 LogicalUnitNumber[8]; /* 0x18 */ - U8 Reserved4; /* 0x20 */ - U8 TaskAttribute; /* lower 3 bits */ /* 0x21 */ - U8 Reserved5; /* 0x22 */ - U8 AdditionalCDBLength; /* upper 5 bits */ /* 0x23 */ - U8 CDB[16]; /* 0x24 */ - /* Additional CDB bytes extend past the CDB field */ -} MPI2_TARGET_SSP_CMD_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_CMD_BUFFER, - Mpi2TargetSspCmdBuffer, MPI2_POINTER pMp2iTargetSspCmdBuffer; - -typedef struct _MPI2_TARGET_SSP_TASK_BUFFER -{ - U8 FrameType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U16 InitiatorConnectionTag; /* 0x02 */ - U32 HashedSourceSASAddress; /* 0x04 */ - U16 Reserved2; /* 0x08 */ - U16 Flags; /* 0x0A */ - U32 Reserved3; /* 0x0C */ - U16 Tag; /* 0x10 */ - U16 TargetPortTransferTag; /* 0x12 */ - U32 DataOffset; /* 0x14 */ - /* TASK information unit starts here */ - U8 LogicalUnitNumber[8]; /* 0x18 */ - U16 Reserved4; /* 0x20 */ - U8 TaskManagementFunction; /* 0x22 */ - U8 Reserved5; /* 0x23 */ - U16 ManagedTaskTag; /* 0x24 */ - U16 Reserved6; /* 0x26 */ - U32 Reserved7; /* 0x28 */ - U32 Reserved8; /* 0x2C */ - U32 Reserved9; /* 0x30 */ -} MPI2_TARGET_SSP_TASK_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_TASK_BUFFER, - Mpi2TargetSspTaskBuffer, MPI2_POINTER pMpi2TargetSspTaskBuffer; - -/* mask and shift for HashedSourceSASAddress field */ -#define MPI2_TARGET_HASHED_SAS_ADDRESS_MASK (0xFFFFFF00) -#define MPI2_TARGET_HASHED_SAS_ADDRESS_SHIFT (8) - - -/**************************************************************************** -* MPI v2.0 Target Assist Request -****************************************************************************/ - -typedef struct _MPI2_TARGET_ASSIST_REQUEST -{ - U8 Reserved1; /* 0x00 */ - U8 TargetAssistFlags; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 QueueTag; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 IoIndex; /* 0x0C */ - U16 InitiatorConnectionTag; /* 0x0E */ - U16 SGLFlags; /* 0x10 */ - U8 SequenceNumber; /* 0x12 */ - U8 Reserved4; /* 0x13 */ - U8 SGLOffset0; /* 0x14 */ - U8 SGLOffset1; /* 0x15 */ - U8 SGLOffset2; /* 0x16 */ - U8 SGLOffset3; /* 0x17 */ - U32 SkipCount; /* 0x18 */ - U32 DataLength; /* 0x1C */ - U32 BidirectionalDataLength; /* 0x20 */ - U16 IoFlags; /* 0x24 */ - U16 EEDPFlags; /* 0x26 */ - U32 EEDPBlockSize; /* 0x28 */ - U32 SecondaryReferenceTag; /* 0x2C */ - U16 SecondaryApplicationTag; /* 0x30 */ - U16 ApplicationTagTranslationMask; /* 0x32 */ - U32 PrimaryReferenceTag; /* 0x34 */ - U16 PrimaryApplicationTag; /* 0x38 */ - U16 PrimaryApplicationTagMask; /* 0x3A */ - U32 RelativeOffset; /* 0x3C */ - U32 Reserved5; /* 0x40 */ - U32 Reserved6; /* 0x44 */ - U32 Reserved7; /* 0x48 */ - U32 Reserved8; /* 0x4C */ - MPI2_SGE_IO_UNION SGL[1]; /* 0x50 */ -} MPI2_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI2_TARGET_ASSIST_REQUEST, - Mpi2TargetAssistRequest_t, MPI2_POINTER pMpi2TargetAssistRequest_t; - -/* Target Assist TargetAssistFlags bits */ - -#define MPI2_TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80) -#define MPI2_TARGET_ASSIST_FLAGS_TLR (0x10) -#define MPI2_TARGET_ASSIST_FLAGS_RETRANSMIT (0x04) -#define MPI2_TARGET_ASSIST_FLAGS_AUTO_STATUS (0x02) -#define MPI2_TARGET_ASSIST_FLAGS_DATA_DIRECTION (0x01) - -/* Target Assist SGLFlags bits */ - -/* base values for Data Location Address Space */ -#define MPI2_TARGET_ASSIST_SGLFLAGS_ADDR_MASK (0x0C) -#define MPI2_TARGET_ASSIST_SGLFLAGS_SYSTEM_ADDR (0x00) -#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCDDR_ADDR (0x04) -#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCPLB_ADDR (0x08) -#define MPI2_TARGET_ASSIST_SGLFLAGS_PLBNTA_ADDR (0x0C) - -/* base values for Type */ -#define MPI2_TARGET_ASSIST_SGLFLAGS_TYPE_MASK (0x03) -#define MPI2_TARGET_ASSIST_SGLFLAGS_MPI_TYPE (0x00) -#define MPI2_TARGET_ASSIST_SGLFLAGS_32IEEE_TYPE (0x01) -#define MPI2_TARGET_ASSIST_SGLFLAGS_64IEEE_TYPE (0x02) - -/* shift values for each sub-field */ -#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL3_SHIFT (12) -#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL2_SHIFT (8) -#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL1_SHIFT (4) -#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL0_SHIFT (0) - -/* Target Assist IoFlags bits */ - -#define MPI2_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800) -#define MPI2_TARGET_ASSIST_IOFLAGS_MULTICAST (0x0400) -#define MPI2_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200) - -/* Target Assist EEDPFlags bits */ - -#define MPI2_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000) -#define MPI2_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000) -#define MPI2_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000) -#define MPI2_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000) - -#define MPI2_TA_EEDPFLAGS_CHECK_REFTAG (0x0400) -#define MPI2_TA_EEDPFLAGS_CHECK_APPTAG (0x0200) -#define MPI2_TA_EEDPFLAGS_CHECK_GUARD (0x0100) - -#define MPI2_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) - -#define MPI2_TA_EEDPFLAGS_MASK_OP (0x0007) -#define MPI2_TA_EEDPFLAGS_NOOP_OP (0x0000) -#define MPI2_TA_EEDPFLAGS_CHECK_OP (0x0001) -#define MPI2_TA_EEDPFLAGS_STRIP_OP (0x0002) -#define MPI2_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) -#define MPI2_TA_EEDPFLAGS_INSERT_OP (0x0004) -#define MPI2_TA_EEDPFLAGS_REPLACE_OP (0x0006) -#define MPI2_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007) - - -/**************************************************************************** -* MPI v2.5 Target Assist Request -****************************************************************************/ - -typedef struct _MPI25_TARGET_ASSIST_REQUEST -{ - U8 Reserved1; /* 0x00 */ - U8 TargetAssistFlags; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 QueueTag; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 IoIndex; /* 0x0C */ - U16 InitiatorConnectionTag; /* 0x0E */ - U8 DMAFlags; /* 0x10 */ - U8 Reserved9; /* 0x11 */ - U8 SequenceNumber; /* 0x12 */ - U8 Reserved4; /* 0x13 */ - U8 SGLOffset0; /* 0x14 */ - U8 SGLOffset1; /* 0x15 */ - U8 SGLOffset2; /* 0x16 */ - U8 SGLOffset3; /* 0x17 */ - U32 SkipCount; /* 0x18 */ - U32 DataLength; /* 0x1C */ - U32 BidirectionalDataLength; /* 0x20 */ - U16 IoFlags; /* 0x24 */ - U16 EEDPFlags; /* 0x26 */ - U16 EEDPBlockSize; /* 0x28 */ - U16 Reserved10; /* 0x2A */ - U32 SecondaryReferenceTag; /* 0x2C */ - U16 SecondaryApplicationTag; /* 0x30 */ - U16 ApplicationTagTranslationMask; /* 0x32 */ - U32 PrimaryReferenceTag; /* 0x34 */ - U16 PrimaryApplicationTag; /* 0x38 */ - U16 PrimaryApplicationTagMask; /* 0x3A */ - U32 RelativeOffset; /* 0x3C */ - U32 Reserved5; /* 0x40 */ - U32 Reserved6; /* 0x44 */ - U32 Reserved7; /* 0x48 */ - U32 Reserved8; /* 0x4C */ - MPI25_SGE_IO_UNION SGL; /* 0x50 */ -} MPI25_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI25_TARGET_ASSIST_REQUEST, - Mpi25TargetAssistRequest_t, MPI2_POINTER pMpi25TargetAssistRequest_t; - -/* use MPI2_TARGET_ASSIST_FLAGS_ defines for the Flags field */ - -/* Defines for the DMAFlags field - * Each setting affects 4 SGLS, from SGL0 to SGL3. - * D = Data - * C = Cache DIF - * I = Interleaved - * H = Host DIF - */ -#define MPI25_TA_DMAFLAGS_OP_MASK (0x0F) -#define MPI25_TA_DMAFLAGS_OP_D_D_D_D (0x00) -#define MPI25_TA_DMAFLAGS_OP_D_D_D_C (0x01) -#define MPI25_TA_DMAFLAGS_OP_D_D_D_I (0x02) -#define MPI25_TA_DMAFLAGS_OP_D_D_C_C (0x03) -#define MPI25_TA_DMAFLAGS_OP_D_D_C_I (0x04) -#define MPI25_TA_DMAFLAGS_OP_D_D_I_I (0x05) -#define MPI25_TA_DMAFLAGS_OP_D_C_C_C (0x06) -#define MPI25_TA_DMAFLAGS_OP_D_C_C_I (0x07) -#define MPI25_TA_DMAFLAGS_OP_D_C_I_I (0x08) -#define MPI25_TA_DMAFLAGS_OP_D_I_I_I (0x09) -#define MPI25_TA_DMAFLAGS_OP_D_H_D_D (0x0A) -#define MPI25_TA_DMAFLAGS_OP_D_H_D_C (0x0B) -#define MPI25_TA_DMAFLAGS_OP_D_H_D_I (0x0C) -#define MPI25_TA_DMAFLAGS_OP_D_H_C_C (0x0D) -#define MPI25_TA_DMAFLAGS_OP_D_H_C_I (0x0E) -#define MPI25_TA_DMAFLAGS_OP_D_H_I_I (0x0F) - -/* defines for the IoFlags field */ -#define MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) /* MPI v2.6 and later */ -#define MPI25_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800) -#define MPI25_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200) - -/* defines for the EEDPFlags field */ -#define MPI25_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000) -#define MPI25_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000) -#define MPI25_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000) -#define MPI25_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000) - -#define MPI25_TA_EEDPFLAGS_CHECK_REFTAG (0x0400) -#define MPI25_TA_EEDPFLAGS_CHECK_APPTAG (0x0200) -#define MPI25_TA_EEDPFLAGS_CHECK_GUARD (0x0100) - -#define MPI25_TA_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) -#define MPI25_TA_EEDPFLAGS_COMPATIBLE_MODE (0x0000) -#define MPI25_TA_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) -#define MPI25_TA_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) -#define MPI25_TA_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) - -#define MPI25_TA_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) -#define MPI25_TA_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) -#define MPI25_TA_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) - -#define MPI25_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) - -#define MPI25_TA_EEDPFLAGS_MASK_OP (0x0007) -#define MPI25_TA_EEDPFLAGS_NOOP_OP (0x0000) -#define MPI25_TA_EEDPFLAGS_CHECK_OP (0x0001) -#define MPI25_TA_EEDPFLAGS_STRIP_OP (0x0002) -#define MPI25_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) -#define MPI25_TA_EEDPFLAGS_INSERT_OP (0x0004) -#define MPI25_TA_EEDPFLAGS_REPLACE_OP (0x0006) -#define MPI25_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007) - - -/**************************************************************************** -* Target Status Send Request -****************************************************************************/ - -typedef struct _MPI2_TARGET_STATUS_SEND_REQUEST -{ - U8 Reserved1; /* 0x00 */ - U8 StatusFlags; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 QueueTag; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 IoIndex; /* 0x0C */ - U16 InitiatorConnectionTag; /* 0x0E */ - U16 SGLFlags; /* 0x10 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ - U16 Reserved4; /* 0x12 */ - U8 SGLOffset0; /* 0x14 */ - U8 Reserved5; /* 0x15 */ - U16 Reserved6; /* 0x16 */ - U32 Reserved7; /* 0x18 */ - U32 Reserved8; /* 0x1C */ - MPI2_SIMPLE_SGE_UNION StatusDataSGE; /* 0x20 */ /* MPI v2.5: This must be an IEEE Simple Element 64. */ -} MPI2_TARGET_STATUS_SEND_REQUEST, - MPI2_POINTER PTR_MPI2_TARGET_STATUS_SEND_REQUEST, - Mpi2TargetStatusSendRequest_t, MPI2_POINTER pMpi2TargetStatusSendRequest_t; - -/* Target Status Send StatusFlags bits */ - -#define MPI2_TSS_FLAGS_REPOST_CMD_BUFFER (0x80) -#define MPI2_TSS_FLAGS_RETRANSMIT (0x04) -#define MPI2_TSS_FLAGS_AUTO_GOOD_STATUS (0x01) - -/* Target Status Send SGLFlags bits - MPI v2.0 only */ -/* Data Location Address Space */ -#define MPI2_TSS_SGLFLAGS_ADDR_MASK (0x0C) -#define MPI2_TSS_SGLFLAGS_SYSTEM_ADDR (0x00) -#define MPI2_TSS_SGLFLAGS_IOCDDR_ADDR (0x04) -#define MPI2_TSS_SGLFLAGS_IOCPLB_ADDR (0x08) -#define MPI2_TSS_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) -/* Type */ -#define MPI2_TSS_SGLFLAGS_TYPE_MASK (0x03) -#define MPI2_TSS_SGLFLAGS_MPI_TYPE (0x00) -#define MPI2_TSS_SGLFLAGS_IEEE32_TYPE (0x01) -#define MPI2_TSS_SGLFLAGS_IEEE64_TYPE (0x02) - - - -/* - * NOTE: The SSP status IU is big-endian. When used on a little-endian system, - * this structure properly orders the bytes. - */ -typedef struct _MPI2_TARGET_SSP_RSP_IU -{ - U32 Reserved0[6]; /* reserved for SSP header */ /* 0x00 */ - - /* start of RESPONSE information unit */ - U32 Reserved1; /* 0x18 */ - U32 Reserved2; /* 0x1C */ - U16 Reserved3; /* 0x20 */ - U8 DataPres; /* lower 2 bits */ /* 0x22 */ - U8 Status; /* 0x23 */ - U32 Reserved4; /* 0x24 */ - U32 SenseDataLength; /* 0x28 */ - U32 ResponseDataLength; /* 0x2C */ - - /* start of Response or Sense Data (size may vary dynamically) */ - U8 ResponseSenseData[4]; /* 0x30 */ -} MPI2_TARGET_SSP_RSP_IU, MPI2_POINTER PTR_MPI2_TARGET_SSP_RSP_IU, - Mpi2TargetSspRspIu_t, MPI2_POINTER pMpi2TargetSspRspIu_t; - - -/**************************************************************************** -* Target Standard Reply - used with Target Assist or Target Status Send -****************************************************************************/ - -typedef struct _MPI2_TARGET_STANDARD_REPLY -{ - U16 Reserved; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U16 IoIndex; /* 0x14 */ - U16 Reserved5; /* 0x16 */ - U32 TransferCount; /* 0x18 */ - U32 BidirectionalTransferCount; /* 0x1C */ -} MPI2_TARGET_STANDARD_REPLY, MPI2_POINTER PTR_MPI2_TARGET_STANDARD_REPLY, - Mpi2TargetErrorReply_t, MPI2_POINTER pMpi2TargetErrorReply_t; - - -/**************************************************************************** -* Target Mode Abort Request -****************************************************************************/ - -typedef struct _MPI2_TARGET_MODE_ABORT_REQUEST -{ - U8 AbortType; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 IoIndexToAbort; /* 0x0C */ - U16 InitiatorDevHandle; /* 0x0E */ - U32 MidToAbort; /* 0x10 */ -} MPI2_TARGET_MODE_ABORT, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT, - Mpi2TargetModeAbort_t, MPI2_POINTER pMpi2TargetModeAbort_t; - -/* Target Mode Abort AbortType values */ - -#define MPI2_TARGET_MODE_ABORT_ALL_CMD_BUFFERS (0x00) -#define MPI2_TARGET_MODE_ABORT_ALL_IO (0x01) -#define MPI2_TARGET_MODE_ABORT_EXACT_IO (0x02) -#define MPI2_TARGET_MODE_ABORT_EXACT_IO_REQUEST (0x03) -#define MPI2_TARGET_MODE_ABORT_IO_REQUEST_AND_IO (0x04) -#define MPI2_TARGET_MODE_ABORT_DEVHANDLE (0x05) -#define MPI2_TARGET_MODE_ABORT_ALL_COMMANDS (0x06) - - -/**************************************************************************** -* Target Mode Abort Reply -****************************************************************************/ - -typedef struct _MPI2_TARGET_MODE_ABORT_REPLY -{ - U16 Reserved; /* 0x00 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved1; /* 0x04 */ - U8 Reserved2; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved3; /* 0x0A */ - U16 Reserved4; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 AbortCount; /* 0x14 */ -} MPI2_TARGET_MODE_ABORT_REPLY, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT_REPLY, - Mpi2TargetModeAbortReply_t, MPI2_POINTER pMpi2TargetModeAbortReply_t; - - -#endif - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_targ.h + * Title: MPI Target mode messages and structures + * Creation Date: September 8, 2006 + * + * mpi2_targ.h Version: 02.00.09 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to + * BufferPostFlags field of CommandBufferPostBase Request. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 10-02-08 02.00.03 Removed NextCmdBufferOffset from + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * Target Status Send Request only takes a single SGE for + * response data. + * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure. + * 11-18-11 02.00.05 Incorporating additions for MPI v2.5. + * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT + * request message structure. + * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and + * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS. + * 06-13-14 02.00.07 Added MinMSIxIndex and MaxMSIxIndex fields to + * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. + * 11-18-14 02.00.08 Updated copyright information. + * 03-16-15 02.00.09 Updated for MPI v2.6. + * Added MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TARG_H +#define MPI2_TARG_H + + +/****************************************************************************** +* +* SCSI Target Messages +* +*******************************************************************************/ + +/**************************************************************************** +* Target Command Buffer Post Base Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST +{ + U8 BufferPostFlags; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 TotalCmdBuffers; /* 0x04 */ + U8 Reserved; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 CmdBufferLength; /* 0x10 */ + U8 MinMSIxIndex; /* 0x12 */ /* MPI 2.5 and newer only; Reserved in MPI 2.0 */ + U8 MaxMSIxIndex; /* 0x13 */ /* MPI 2.5 and newer only; Reserved in MPI 2.0 */ + U32 BaseAddressLow; /* 0x14 */ + U32 BaseAddressHigh; /* 0x18 */ +} MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST, + MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST, + Mpi2TargetCmdBufferPostBaseRequest_t, + MPI2_POINTER pMpi2TargetCmdBufferPostBaseRequest_t; + +/* values for the BufferPostflags field */ +#define MPI2_CMD_BUF_POST_BASE_ADDRESS_SPACE_MASK (0x0C) +#define MPI2_CMD_BUF_POST_BASE_SYSTEM_ADDRESS_SPACE (0x00) +#define MPI2_CMD_BUF_POST_BASE_IOCDDR_ADDRESS_SPACE (0x04) +#define MPI2_CMD_BUF_POST_BASE_IOCPLB_ADDRESS_SPACE (0x08) /* only for MPI v2.5 and earlier */ +#define MPI26_CMD_BUF_POST_BASE_IOCCTL_ADDRESS_SPACE (0x08) /* for MPI v2.6 only */ +#define MPI2_CMD_BUF_POST_BASE_IOCPLBNTA_ADDRESS_SPACE (0x0C) /* only for MPI v2.5 and earlier */ + +#define MPI2_CMD_BUF_POST_BASE_FLAGS_AUTO_POST_ALL (0x01) + + +/**************************************************************************** +* Target Command Buffer Post List Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST +{ + U16 Reserved; /* 0x00 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 CmdBufferCount; /* 0x04 */ + U8 Reserved1; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved2; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 IoIndex[2]; /* 0x10 */ +} MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST, + MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST, + Mpi2TargetCmdBufferPostListRequest_t, + MPI2_POINTER pMpi2TargetCmdBufferPostListRequest_t; + +/**************************************************************************** +* Target Command Buffer Post Base List Reply +****************************************************************************/ + +typedef struct _MPI2_TARGET_BUF_POST_BASE_LIST_REPLY +{ + U8 Flags; /* 0x00 */ + U8 Reserved; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 IoIndex; /* 0x14 */ + U16 Reserved5; /* 0x16 */ + U32 Reserved6; /* 0x18 */ +} MPI2_TARGET_BUF_POST_BASE_LIST_REPLY, + MPI2_POINTER PTR_MPI2_TARGET_BUF_POST_BASE_LIST_REPLY, + Mpi2TargetCmdBufferPostBaseListReply_t, + MPI2_POINTER pMpi2TargetCmdBufferPostBaseListReply_t; + +/* Flags defines */ +#define MPI2_CMD_BUF_POST_REPLY_IOINDEX_VALID (0x01) + + +/**************************************************************************** +* Command Buffer Formats (with 16 byte CDB) +****************************************************************************/ + +typedef struct _MPI2_TARGET_SSP_CMD_BUFFER +{ + U8 FrameType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 InitiatorConnectionTag; /* 0x02 */ + U32 HashedSourceSASAddress; /* 0x04 */ + U16 Reserved2; /* 0x08 */ + U16 Flags; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 Tag; /* 0x10 */ + U16 TargetPortTransferTag; /* 0x12 */ + U32 DataOffset; /* 0x14 */ + /* COMMAND information unit starts here */ + U8 LogicalUnitNumber[8]; /* 0x18 */ + U8 Reserved4; /* 0x20 */ + U8 TaskAttribute; /* lower 3 bits */ /* 0x21 */ + U8 Reserved5; /* 0x22 */ + U8 AdditionalCDBLength; /* upper 5 bits */ /* 0x23 */ + U8 CDB[16]; /* 0x24 */ + /* Additional CDB bytes extend past the CDB field */ +} MPI2_TARGET_SSP_CMD_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_CMD_BUFFER, + Mpi2TargetSspCmdBuffer, MPI2_POINTER pMp2iTargetSspCmdBuffer; + +typedef struct _MPI2_TARGET_SSP_TASK_BUFFER +{ + U8 FrameType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U16 InitiatorConnectionTag; /* 0x02 */ + U32 HashedSourceSASAddress; /* 0x04 */ + U16 Reserved2; /* 0x08 */ + U16 Flags; /* 0x0A */ + U32 Reserved3; /* 0x0C */ + U16 Tag; /* 0x10 */ + U16 TargetPortTransferTag; /* 0x12 */ + U32 DataOffset; /* 0x14 */ + /* TASK information unit starts here */ + U8 LogicalUnitNumber[8]; /* 0x18 */ + U16 Reserved4; /* 0x20 */ + U8 TaskManagementFunction; /* 0x22 */ + U8 Reserved5; /* 0x23 */ + U16 ManagedTaskTag; /* 0x24 */ + U16 Reserved6; /* 0x26 */ + U32 Reserved7; /* 0x28 */ + U32 Reserved8; /* 0x2C */ + U32 Reserved9; /* 0x30 */ +} MPI2_TARGET_SSP_TASK_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_TASK_BUFFER, + Mpi2TargetSspTaskBuffer, MPI2_POINTER pMpi2TargetSspTaskBuffer; + +/* mask and shift for HashedSourceSASAddress field */ +#define MPI2_TARGET_HASHED_SAS_ADDRESS_MASK (0xFFFFFF00) +#define MPI2_TARGET_HASHED_SAS_ADDRESS_SHIFT (8) + + +/**************************************************************************** +* MPI v2.0 Target Assist Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_ASSIST_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 TargetAssistFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 QueueTag; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 IoIndex; /* 0x0C */ + U16 InitiatorConnectionTag; /* 0x0E */ + U16 SGLFlags; /* 0x10 */ + U8 SequenceNumber; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U32 EEDPBlockSize; /* 0x28 */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U32 PrimaryReferenceTag; /* 0x34 */ + U16 PrimaryApplicationTag; /* 0x38 */ + U16 PrimaryApplicationTagMask; /* 0x3A */ + U32 RelativeOffset; /* 0x3C */ + U32 Reserved5; /* 0x40 */ + U32 Reserved6; /* 0x44 */ + U32 Reserved7; /* 0x48 */ + U32 Reserved8; /* 0x4C */ + MPI2_SGE_IO_UNION SGL[1]; /* 0x50 */ +} MPI2_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI2_TARGET_ASSIST_REQUEST, + Mpi2TargetAssistRequest_t, MPI2_POINTER pMpi2TargetAssistRequest_t; + +/* Target Assist TargetAssistFlags bits */ + +#define MPI2_TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80) +#define MPI2_TARGET_ASSIST_FLAGS_TLR (0x10) +#define MPI2_TARGET_ASSIST_FLAGS_RETRANSMIT (0x04) +#define MPI2_TARGET_ASSIST_FLAGS_AUTO_STATUS (0x02) +#define MPI2_TARGET_ASSIST_FLAGS_DATA_DIRECTION (0x01) + +/* Target Assist SGLFlags bits */ + +/* base values for Data Location Address Space */ +#define MPI2_TARGET_ASSIST_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_TARGET_ASSIST_SGLFLAGS_PLBNTA_ADDR (0x0C) + +/* base values for Type */ +#define MPI2_TARGET_ASSIST_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_TARGET_ASSIST_SGLFLAGS_MPI_TYPE (0x00) +#define MPI2_TARGET_ASSIST_SGLFLAGS_32IEEE_TYPE (0x01) +#define MPI2_TARGET_ASSIST_SGLFLAGS_64IEEE_TYPE (0x02) + +/* shift values for each sub-field */ +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL3_SHIFT (12) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL2_SHIFT (8) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL1_SHIFT (4) +#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL0_SHIFT (0) + +/* Target Assist IoFlags bits */ + +#define MPI2_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI2_TARGET_ASSIST_IOFLAGS_MULTICAST (0x0400) +#define MPI2_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200) + +/* Target Assist EEDPFlags bits */ + +#define MPI2_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI2_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI2_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI2_TA_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_TA_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_TA_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI2_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI2_TA_EEDPFLAGS_MASK_OP (0x0007) +#define MPI2_TA_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI2_TA_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI2_TA_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI2_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_TA_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_TA_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI2_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + + +/**************************************************************************** +* MPI v2.5 Target Assist Request +****************************************************************************/ + +typedef struct _MPI25_TARGET_ASSIST_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 TargetAssistFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 QueueTag; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 IoIndex; /* 0x0C */ + U16 InitiatorConnectionTag; /* 0x0E */ + U8 DMAFlags; /* 0x10 */ + U8 Reserved9; /* 0x11 */ + U8 SequenceNumber; /* 0x12 */ + U8 Reserved4; /* 0x13 */ + U8 SGLOffset0; /* 0x14 */ + U8 SGLOffset1; /* 0x15 */ + U8 SGLOffset2; /* 0x16 */ + U8 SGLOffset3; /* 0x17 */ + U32 SkipCount; /* 0x18 */ + U32 DataLength; /* 0x1C */ + U32 BidirectionalDataLength; /* 0x20 */ + U16 IoFlags; /* 0x24 */ + U16 EEDPFlags; /* 0x26 */ + U16 EEDPBlockSize; /* 0x28 */ + U16 Reserved10; /* 0x2A */ + U32 SecondaryReferenceTag; /* 0x2C */ + U16 SecondaryApplicationTag; /* 0x30 */ + U16 ApplicationTagTranslationMask; /* 0x32 */ + U32 PrimaryReferenceTag; /* 0x34 */ + U16 PrimaryApplicationTag; /* 0x38 */ + U16 PrimaryApplicationTagMask; /* 0x3A */ + U32 RelativeOffset; /* 0x3C */ + U32 Reserved5; /* 0x40 */ + U32 Reserved6; /* 0x44 */ + U32 Reserved7; /* 0x48 */ + U32 Reserved8; /* 0x4C */ + MPI25_SGE_IO_UNION SGL; /* 0x50 */ +} MPI25_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI25_TARGET_ASSIST_REQUEST, + Mpi25TargetAssistRequest_t, MPI2_POINTER pMpi25TargetAssistRequest_t; + +/* use MPI2_TARGET_ASSIST_FLAGS_ defines for the Flags field */ + +/* Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF + */ +#define MPI25_TA_DMAFLAGS_OP_MASK (0x0F) +#define MPI25_TA_DMAFLAGS_OP_D_D_D_D (0x00) +#define MPI25_TA_DMAFLAGS_OP_D_D_D_C (0x01) +#define MPI25_TA_DMAFLAGS_OP_D_D_D_I (0x02) +#define MPI25_TA_DMAFLAGS_OP_D_D_C_C (0x03) +#define MPI25_TA_DMAFLAGS_OP_D_D_C_I (0x04) +#define MPI25_TA_DMAFLAGS_OP_D_D_I_I (0x05) +#define MPI25_TA_DMAFLAGS_OP_D_C_C_C (0x06) +#define MPI25_TA_DMAFLAGS_OP_D_C_C_I (0x07) +#define MPI25_TA_DMAFLAGS_OP_D_C_I_I (0x08) +#define MPI25_TA_DMAFLAGS_OP_D_I_I_I (0x09) +#define MPI25_TA_DMAFLAGS_OP_D_H_D_D (0x0A) +#define MPI25_TA_DMAFLAGS_OP_D_H_D_C (0x0B) +#define MPI25_TA_DMAFLAGS_OP_D_H_D_I (0x0C) +#define MPI25_TA_DMAFLAGS_OP_D_H_C_C (0x0D) +#define MPI25_TA_DMAFLAGS_OP_D_H_C_I (0x0E) +#define MPI25_TA_DMAFLAGS_OP_D_H_I_I (0x0F) + +/* defines for the IoFlags field */ +#define MPI26_TARGET_ASSIST_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) /* MPI v2.6 and later */ +#define MPI25_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI25_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200) + +/* defines for the EEDPFlags field */ +#define MPI25_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI25_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI25_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI25_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI25_TA_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI25_TA_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI25_TA_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI25_TA_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) +#define MPI25_TA_EEDPFLAGS_COMPATIBLE_MODE (0x0000) +#define MPI25_TA_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI25_TA_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) +#define MPI25_TA_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) + +#define MPI25_TA_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) +#define MPI25_TA_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) +#define MPI25_TA_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) + +#define MPI25_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI25_TA_EEDPFLAGS_MASK_OP (0x0007) +#define MPI25_TA_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI25_TA_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI25_TA_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI25_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI25_TA_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI25_TA_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI25_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + + +/**************************************************************************** +* Target Status Send Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_STATUS_SEND_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 StatusFlags; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 QueueTag; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 IoIndex; /* 0x0C */ + U16 InitiatorConnectionTag; /* 0x0E */ + U16 SGLFlags; /* 0x10 */ /* MPI v2.0 only. Reserved on MPI v2.5. */ + U16 Reserved4; /* 0x12 */ + U8 SGLOffset0; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 Reserved6; /* 0x16 */ + U32 Reserved7; /* 0x18 */ + U32 Reserved8; /* 0x1C */ + MPI2_SIMPLE_SGE_UNION StatusDataSGE; /* 0x20 */ /* MPI v2.5: This must be an IEEE Simple Element 64. */ +} MPI2_TARGET_STATUS_SEND_REQUEST, + MPI2_POINTER PTR_MPI2_TARGET_STATUS_SEND_REQUEST, + Mpi2TargetStatusSendRequest_t, MPI2_POINTER pMpi2TargetStatusSendRequest_t; + +/* Target Status Send StatusFlags bits */ + +#define MPI2_TSS_FLAGS_REPOST_CMD_BUFFER (0x80) +#define MPI2_TSS_FLAGS_RETRANSMIT (0x04) +#define MPI2_TSS_FLAGS_AUTO_GOOD_STATUS (0x01) + +/* Target Status Send SGLFlags bits - MPI v2.0 only */ +/* Data Location Address Space */ +#define MPI2_TSS_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_TSS_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_TSS_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_TSS_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_TSS_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) +/* Type */ +#define MPI2_TSS_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_TSS_SGLFLAGS_MPI_TYPE (0x00) +#define MPI2_TSS_SGLFLAGS_IEEE32_TYPE (0x01) +#define MPI2_TSS_SGLFLAGS_IEEE64_TYPE (0x02) + + + +/* + * NOTE: The SSP status IU is big-endian. When used on a little-endian system, + * this structure properly orders the bytes. + */ +typedef struct _MPI2_TARGET_SSP_RSP_IU +{ + U32 Reserved0[6]; /* reserved for SSP header */ /* 0x00 */ + + /* start of RESPONSE information unit */ + U32 Reserved1; /* 0x18 */ + U32 Reserved2; /* 0x1C */ + U16 Reserved3; /* 0x20 */ + U8 DataPres; /* lower 2 bits */ /* 0x22 */ + U8 Status; /* 0x23 */ + U32 Reserved4; /* 0x24 */ + U32 SenseDataLength; /* 0x28 */ + U32 ResponseDataLength; /* 0x2C */ + + /* start of Response or Sense Data (size may vary dynamically) */ + U8 ResponseSenseData[4]; /* 0x30 */ +} MPI2_TARGET_SSP_RSP_IU, MPI2_POINTER PTR_MPI2_TARGET_SSP_RSP_IU, + Mpi2TargetSspRspIu_t, MPI2_POINTER pMpi2TargetSspRspIu_t; + + +/**************************************************************************** +* Target Standard Reply - used with Target Assist or Target Status Send +****************************************************************************/ + +typedef struct _MPI2_TARGET_STANDARD_REPLY +{ + U16 Reserved; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 IoIndex; /* 0x14 */ + U16 Reserved5; /* 0x16 */ + U32 TransferCount; /* 0x18 */ + U32 BidirectionalTransferCount; /* 0x1C */ +} MPI2_TARGET_STANDARD_REPLY, MPI2_POINTER PTR_MPI2_TARGET_STANDARD_REPLY, + Mpi2TargetErrorReply_t, MPI2_POINTER pMpi2TargetErrorReply_t; + + +/**************************************************************************** +* Target Mode Abort Request +****************************************************************************/ + +typedef struct _MPI2_TARGET_MODE_ABORT_REQUEST +{ + U8 AbortType; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 IoIndexToAbort; /* 0x0C */ + U16 InitiatorDevHandle; /* 0x0E */ + U32 MidToAbort; /* 0x10 */ +} MPI2_TARGET_MODE_ABORT, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT, + Mpi2TargetModeAbort_t, MPI2_POINTER pMpi2TargetModeAbort_t; + +/* Target Mode Abort AbortType values */ + +#define MPI2_TARGET_MODE_ABORT_ALL_CMD_BUFFERS (0x00) +#define MPI2_TARGET_MODE_ABORT_ALL_IO (0x01) +#define MPI2_TARGET_MODE_ABORT_EXACT_IO (0x02) +#define MPI2_TARGET_MODE_ABORT_EXACT_IO_REQUEST (0x03) +#define MPI2_TARGET_MODE_ABORT_IO_REQUEST_AND_IO (0x04) +#define MPI2_TARGET_MODE_ABORT_DEVHANDLE (0x05) +#define MPI2_TARGET_MODE_ABORT_ALL_COMMANDS (0x06) + + +/**************************************************************************** +* Target Mode Abort Reply +****************************************************************************/ + +typedef struct _MPI2_TARGET_MODE_ABORT_REPLY +{ + U16 Reserved; /* 0x00 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved1; /* 0x04 */ + U8 Reserved2; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 AbortCount; /* 0x14 */ +} MPI2_TARGET_MODE_ABORT_REPLY, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT_REPLY, + Mpi2TargetModeAbortReply_t, MPI2_POINTER pMpi2TargetModeAbortReply_t; + + +#endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h index 1ac0c59f78da..fdbcdd2d8ad0 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -1,592 +1,592 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_tool.h - * Title: MPI diagnostic tool structures and definitions - * Creation Date: March 26, 2007 - * - * mpi2_tool.h Version: 02.00.16 - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release - * structures and defines. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. - * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request - * and reply messages. - * Added MPI2_DIAG_BUF_TYPE_EXTENDED. - * Incremented MPI2_DIAG_BUF_TYPE_COUNT. - * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. - * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer - * Post Request. - * 05-25-11 02.00.07 Added Flags field and related defines to - * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. - * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. - * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request - * message. - * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that - * it uses MPI Chain SGE as well as MPI Simple SGE. - * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. - * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. - * 11-18-14 02.00.13 Updated copyright information. - * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean - * Tool Request Message. - * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. - * Added option for DeviceInfo field in ISTWI tool. - * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_TOOL_H -#define MPI2_TOOL_H - -/***************************************************************************** -* -* Toolbox Messages -* -*****************************************************************************/ - -/* defines for the Tools */ -#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) -#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) -#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) -#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) -#define MPI2_TOOLBOX_BEACON_TOOL (0x05) -#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) -#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) -#define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN (0x08) - - -/**************************************************************************** -* Toolbox reply -****************************************************************************/ - -typedef struct _MPI2_TOOLBOX_REPLY -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_TOOLBOX_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_REPLY, - Mpi2ToolboxReply_t, MPI2_POINTER pMpi2ToolboxReply_t; - - -/**************************************************************************** -* Toolbox Clean Tool request -****************************************************************************/ - -typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 Flags; /* 0x0C */ - } MPI2_TOOLBOX_CLEAN_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_CLEAN_REQUEST, - Mpi2ToolboxCleanRequest_t, MPI2_POINTER pMpi2ToolboxCleanRequest_t; - -/* values for the Flags field */ -#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) -#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) -#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) -#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) -#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) -#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) -#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) -#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) -#define MPI2_TOOLBOX_CLEAN_SBR (0x00800000) -#define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000) -#define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000) -#define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000) -#define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000) -#define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000) -#define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000) -#define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0) -#define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010) -#define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008) -#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) -#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) -#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) - - -/**************************************************************************** -* Toolbox Memory Move request -****************************************************************************/ - -typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - MPI2_SGE_SIMPLE_UNION SGL; /* 0x0C */ -} MPI2_TOOLBOX_MEM_MOVE_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, - Mpi2ToolboxMemMoveRequest_t, MPI2_POINTER pMpi2ToolboxMemMoveRequest_t; - - -/**************************************************************************** -* Toolbox Diagnostic Data Upload request -****************************************************************************/ - -typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U8 SGLFlags; /* 0x0C */ - U8 Reserved5; /* 0x0D */ - U16 Reserved6; /* 0x0E */ - U32 Flags; /* 0x10 */ - U32 DataLength; /* 0x14 */ - MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */ -} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, - MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, - Mpi2ToolboxDiagDataUploadRequest_t, - MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t; - -/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ - - -typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER -{ - U32 DiagDataLength; /* 00h */ - U8 FormatCode; /* 04h */ - U8 Reserved1; /* 05h */ - U16 Reserved2; /* 06h */ -} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, - Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t; - - -/**************************************************************************** -* Toolbox ISTWI Read Write Tool -****************************************************************************/ - -/* Toolbox ISTWI Read Write Tool request message */ -typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 Reserved5; /* 0x0C */ - U32 Reserved6; /* 0x10 */ - U8 DevIndex; /* 0x14 */ - U8 Action; /* 0x15 */ - U8 SGLFlags; /* 0x16 */ - U8 Flags; /* 0x17 */ - U16 TxDataLength; /* 0x18 */ - U16 RxDataLength; /* 0x1A */ - U32 DeviceInfo[3]; /* 0x1C */ - U32 Reserved11; /* 0x28 */ - U32 Reserved12; /* 0x2C */ - MPI2_SGE_SIMPLE_UNION SGL; /* 0x30 */ -} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, - MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, - Mpi2ToolboxIstwiReadWriteRequest_t, - MPI2_POINTER pMpi2ToolboxIstwiReadWriteRequest_t; - -/* values for the Action field */ -#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01) -#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02) -#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03) -#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10) -#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) -#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) - -/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ - -/* values for the Flags field */ -#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) -#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) - -/* MPI26 TOOLBOX Request MsgFlags defines */ -#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01) -#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00) /* Request uses Man Page 43 device index addressing */ -#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01) /* Request uses Man Page 43 device info struct addressing */ - - -/* Toolbox ISTWI Read Write Tool reply message */ -typedef struct _MPI2_TOOLBOX_ISTWI_REPLY -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U8 DevIndex; /* 0x14 */ - U8 Action; /* 0x15 */ - U8 IstwiStatus; /* 0x16 */ - U8 Reserved6; /* 0x17 */ - U16 TxDataCount; /* 0x18 */ - U16 RxDataCount; /* 0x1A */ -} MPI2_TOOLBOX_ISTWI_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_REPLY, - Mpi2ToolboxIstwiReply_t, MPI2_POINTER pMpi2ToolboxIstwiReply_t; - - -/**************************************************************************** -* Toolbox Beacon Tool request -****************************************************************************/ - -typedef struct _MPI2_TOOLBOX_BEACON_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U8 Reserved5; /* 0x0C */ - U8 PhysicalPort; /* 0x0D */ - U8 Reserved6; /* 0x0E */ - U8 Flags; /* 0x0F */ -} MPI2_TOOLBOX_BEACON_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_BEACON_REQUEST, - Mpi2ToolboxBeaconRequest_t, MPI2_POINTER pMpi2ToolboxBeaconRequest_t; - -/* values for the Flags field */ -#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00) -#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) - - -/**************************************************************************** -* Toolbox Diagnostic CLI Tool -****************************************************************************/ - -#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) - -/* MPI v2.0 Toolbox Diagnostic CLI Tool request message */ -typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U8 SGLFlags; /* 0x0C */ - U8 Reserved5; /* 0x0D */ - U16 Reserved6; /* 0x0E */ - U32 DataLength; /* 0x10 */ - U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */ - MPI2_MPI_SGE_IO_UNION SGL; /* 0x70 */ -} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - Mpi2ToolboxDiagnosticCliRequest_t, - MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t; - -/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ - - -/* MPI v2.5 Toolbox Diagnostic CLI Tool request message */ -typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U32 Reserved5; /* 0x0C */ - U32 DataLength; /* 0x10 */ - U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */ - MPI25_SGE_IO_UNION SGL; /* 0x70 */ -} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - MPI2_POINTER PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, - Mpi25ToolboxDiagnosticCliRequest_t, - MPI2_POINTER pMpi25ToolboxDiagnosticCliRequest_t; - - -/* Toolbox Diagnostic CLI Tool reply message */ -typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 ReturnedDataLength; /* 0x14 */ -} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY, - MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, - Mpi2ToolboxDiagnosticCliReply_t, - MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t; - - -/**************************************************************************** -* Toolbox Console Text Display Tool -****************************************************************************/ - -/* Toolbox Console Text Display Tool request message */ -typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U8 Console; /* 0x0C */ - U8 Flags; /* 0x0D */ - U16 Reserved6; /* 0x0E */ - U8 TextToDisplay[4]; /* 0x10 */ /* actual length determined at runtime based on frame size */ -} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, - MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, - Mpi2ToolboxTextDisplayRequest_t, - MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t; - -/* defines for the Console field */ -#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) -#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00) -#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10) -#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20) - -#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F) - -/* defines for the Flags field */ -#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) - - -/**************************************************************************** -* Toolbox Backend Lane Margining Tool -****************************************************************************/ - -/* Toolbox Backend Lane Margining Tool request message */ -typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U8 Command; /* 0x0C */ - U8 SwitchPort; /* 0x0D */ - U16 DevHandle; /* 0x0E */ - U8 RegisterOffset; /* 0x10 */ - U8 Reserved5; /* 0x11 */ - U16 DataLength; /* 0x12 */ - MPI25_SGE_IO_UNION SGL; /* 0x14 */ -} MPI26_TOOLBOX_LANE_MARGINING_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST, - Mpi26ToolboxLaneMarginingRequest_t, MPI2_POINTER pMpi2ToolboxLaneMarginingRequest_t; - -/* defines for the Command field */ -#define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE (0x01) -#define MPI26_TOOL_MARGIN_COMMAND_READ_REGISTER_DATA (0x02) -#define MPI26_TOOL_MARGIN_COMMAND_WRITE_REGISTER_DATA (0x03) -#define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE (0x04) - - -/* Toolbox Backend Lane Margining Tool reply message */ -typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY -{ - U8 Tool; /* 0x00 */ - U8 Reserved1; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U16 ReturnedDataLength; /* 0x14 */ - U16 Reserved6; /* 0x16 */ -} MPI26_TOOLBOX_LANE_MARGINING_REPLY, - MPI2_POINTER PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY, - Mpi26ToolboxLaneMarginingReply_t, - MPI2_POINTER pMpi26ToolboxLaneMarginingReply_t; - - -/***************************************************************************** -* -* Diagnostic Buffer Messages -* -*****************************************************************************/ - - -/**************************************************************************** -* Diagnostic Buffer Post request -****************************************************************************/ - -typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST -{ - U8 ExtendedType; /* 0x00 */ - U8 BufferType; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U64 BufferAddress; /* 0x0C */ - U32 BufferLength; /* 0x14 */ - U32 Reserved5; /* 0x18 */ - U32 Reserved6; /* 0x1C */ - U32 Flags; /* 0x20 */ - U32 ProductSpecific[23]; /* 0x24 */ -} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST, - Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t; - -/* values for the ExtendedType field */ -#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) - -/* values for the BufferType field */ -#define MPI2_DIAG_BUF_TYPE_TRACE (0x00) -#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) -#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) -/* count of the number of buffer types */ -#define MPI2_DIAG_BUF_TYPE_COUNT (0x03) - -/* values for the Flags field */ -#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) /* for MPI v2.0 products only */ -#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) - - -/**************************************************************************** -* Diagnostic Buffer Post reply -****************************************************************************/ - -typedef struct _MPI2_DIAG_BUFFER_POST_REPLY -{ - U8 ExtendedType; /* 0x00 */ - U8 BufferType; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ - U32 TransferLength; /* 0x14 */ -} MPI2_DIAG_BUFFER_POST_REPLY, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REPLY, - Mpi2DiagBufferPostReply_t, MPI2_POINTER pMpi2DiagBufferPostReply_t; - - -/**************************************************************************** -* Diagnostic Release request -****************************************************************************/ - -typedef struct _MPI2_DIAG_RELEASE_REQUEST -{ - U8 Reserved1; /* 0x00 */ - U8 BufferType; /* 0x01 */ - U8 ChainOffset; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ -} MPI2_DIAG_RELEASE_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REQUEST, - Mpi2DiagReleaseRequest_t, MPI2_POINTER pMpi2DiagReleaseRequest_t; - - -/**************************************************************************** -* Diagnostic Buffer Post reply -****************************************************************************/ - -typedef struct _MPI2_DIAG_RELEASE_REPLY -{ - U8 Reserved1; /* 0x00 */ - U8 BufferType; /* 0x01 */ - U8 MsgLength; /* 0x02 */ - U8 Function; /* 0x03 */ - U16 Reserved2; /* 0x04 */ - U8 Reserved3; /* 0x06 */ - U8 MsgFlags; /* 0x07 */ - U8 VP_ID; /* 0x08 */ - U8 VF_ID; /* 0x09 */ - U16 Reserved4; /* 0x0A */ - U16 Reserved5; /* 0x0C */ - U16 IOCStatus; /* 0x0E */ - U32 IOCLogInfo; /* 0x10 */ -} MPI2_DIAG_RELEASE_REPLY, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REPLY, - Mpi2DiagReleaseReply_t, MPI2_POINTER pMpi2DiagReleaseReply_t; - - -#endif - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_tool.h + * Title: MPI diagnostic tool structures and definitions + * Creation Date: March 26, 2007 + * + * mpi2_tool.h Version: 02.00.16 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * 11-18-14 02.00.13 Updated copyright information. + * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean + * Tool Request Message. + * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. + * Added option for DeviceInfo field in ISTWI tool. + * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TOOL_H +#define MPI2_TOOL_H + +/***************************************************************************** +* +* Toolbox Messages +* +*****************************************************************************/ + +/* defines for the Tools */ +#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) +#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) +#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) +#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) +#define MPI2_TOOLBOX_BEACON_TOOL (0x05) +#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) +#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) +#define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN (0x08) + + +/**************************************************************************** +* Toolbox reply +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_TOOLBOX_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_REPLY, + Mpi2ToolboxReply_t, MPI2_POINTER pMpi2ToolboxReply_t; + + +/**************************************************************************** +* Toolbox Clean Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Flags; /* 0x0C */ + } MPI2_TOOLBOX_CLEAN_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_CLEAN_REQUEST, + Mpi2ToolboxCleanRequest_t, MPI2_POINTER pMpi2ToolboxCleanRequest_t; + +/* values for the Flags field */ +#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) +#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) +#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) +#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) +#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) +#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) +#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) +#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) +#define MPI2_TOOLBOX_CLEAN_SBR (0x00800000) +#define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000) +#define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000) +#define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000) +#define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000) +#define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000) +#define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000) +#define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0) +#define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010) +#define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008) +#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) +#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) +#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) + + +/**************************************************************************** +* Toolbox Memory Move request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + MPI2_SGE_SIMPLE_UNION SGL; /* 0x0C */ +} MPI2_TOOLBOX_MEM_MOVE_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, + Mpi2ToolboxMemMoveRequest_t, MPI2_POINTER pMpi2ToolboxMemMoveRequest_t; + + +/**************************************************************************** +* Toolbox Diagnostic Data Upload request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 SGLFlags; /* 0x0C */ + U8 Reserved5; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U32 Flags; /* 0x10 */ + U32 DataLength; /* 0x14 */ + MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */ +} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + Mpi2ToolboxDiagDataUploadRequest_t, + MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t; + +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER +{ + U32 DiagDataLength; /* 00h */ + U8 FormatCode; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 06h */ +} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, + Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t; + + +/**************************************************************************** +* Toolbox ISTWI Read Write Tool +****************************************************************************/ + +/* Toolbox ISTWI Read Write Tool request message */ +typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 Reserved6; /* 0x10 */ + U8 DevIndex; /* 0x14 */ + U8 Action; /* 0x15 */ + U8 SGLFlags; /* 0x16 */ + U8 Flags; /* 0x17 */ + U16 TxDataLength; /* 0x18 */ + U16 RxDataLength; /* 0x1A */ + U32 DeviceInfo[3]; /* 0x1C */ + U32 Reserved11; /* 0x28 */ + U32 Reserved12; /* 0x2C */ + MPI2_SGE_SIMPLE_UNION SGL; /* 0x30 */ +} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + Mpi2ToolboxIstwiReadWriteRequest_t, + MPI2_POINTER pMpi2ToolboxIstwiReadWriteRequest_t; + +/* values for the Action field */ +#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01) +#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02) +#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03) +#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10) +#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) +#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) + +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/* values for the Flags field */ +#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) +#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) + +/* MPI26 TOOLBOX Request MsgFlags defines */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01) +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00) /* Request uses Man Page 43 device index addressing */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01) /* Request uses Man Page 43 device info struct addressing */ + + +/* Toolbox ISTWI Read Write Tool reply message */ +typedef struct _MPI2_TOOLBOX_ISTWI_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U8 DevIndex; /* 0x14 */ + U8 Action; /* 0x15 */ + U8 IstwiStatus; /* 0x16 */ + U8 Reserved6; /* 0x17 */ + U16 TxDataCount; /* 0x18 */ + U16 RxDataCount; /* 0x1A */ +} MPI2_TOOLBOX_ISTWI_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_REPLY, + Mpi2ToolboxIstwiReply_t, MPI2_POINTER pMpi2ToolboxIstwiReply_t; + + +/**************************************************************************** +* Toolbox Beacon Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_BEACON_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Reserved5; /* 0x0C */ + U8 PhysicalPort; /* 0x0D */ + U8 Reserved6; /* 0x0E */ + U8 Flags; /* 0x0F */ +} MPI2_TOOLBOX_BEACON_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_BEACON_REQUEST, + Mpi2ToolboxBeaconRequest_t, MPI2_POINTER pMpi2ToolboxBeaconRequest_t; + +/* values for the Flags field */ +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00) +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) + + +/**************************************************************************** +* Toolbox Diagnostic CLI Tool +****************************************************************************/ + +#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) + +/* MPI v2.0 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 SGLFlags; /* 0x0C */ + U8 Reserved5; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U32 DataLength; /* 0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */ + MPI2_MPI_SGE_IO_UNION SGL; /* 0x70 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi2ToolboxDiagnosticCliRequest_t, + MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t; + +/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/* MPI v2.5 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U32 Reserved5; /* 0x0C */ + U32 DataLength; /* 0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */ + MPI25_SGE_IO_UNION SGL; /* 0x70 */ +} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + MPI2_POINTER PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi25ToolboxDiagnosticCliRequest_t, + MPI2_POINTER pMpi25ToolboxDiagnosticCliRequest_t; + + +/* Toolbox Diagnostic CLI Tool reply message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 ReturnedDataLength; /* 0x14 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY, + MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, + Mpi2ToolboxDiagnosticCliReply_t, + MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t; + + +/**************************************************************************** +* Toolbox Console Text Display Tool +****************************************************************************/ + +/* Toolbox Console Text Display Tool request message */ +typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Console; /* 0x0C */ + U8 Flags; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U8 TextToDisplay[4]; /* 0x10 */ /* actual length determined at runtime based on frame size */ +} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, + MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, + Mpi2ToolboxTextDisplayRequest_t, + MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t; + +/* defines for the Console field */ +#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) +#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00) +#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10) +#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20) + +#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F) + +/* defines for the Flags field */ +#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) + + +/**************************************************************************** +* Toolbox Backend Lane Margining Tool +****************************************************************************/ + +/* Toolbox Backend Lane Margining Tool request message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Command; /* 0x0C */ + U8 SwitchPort; /* 0x0D */ + U16 DevHandle; /* 0x0E */ + U8 RegisterOffset; /* 0x10 */ + U8 Reserved5; /* 0x11 */ + U16 DataLength; /* 0x12 */ + MPI25_SGE_IO_UNION SGL; /* 0x14 */ +} MPI26_TOOLBOX_LANE_MARGINING_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST, + Mpi26ToolboxLaneMarginingRequest_t, MPI2_POINTER pMpi2ToolboxLaneMarginingRequest_t; + +/* defines for the Command field */ +#define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE (0x01) +#define MPI26_TOOL_MARGIN_COMMAND_READ_REGISTER_DATA (0x02) +#define MPI26_TOOL_MARGIN_COMMAND_WRITE_REGISTER_DATA (0x03) +#define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE (0x04) + + +/* Toolbox Backend Lane Margining Tool reply message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY +{ + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U16 ReturnedDataLength; /* 0x14 */ + U16 Reserved6; /* 0x16 */ +} MPI26_TOOLBOX_LANE_MARGINING_REPLY, + MPI2_POINTER PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY, + Mpi26ToolboxLaneMarginingReply_t, + MPI2_POINTER pMpi26ToolboxLaneMarginingReply_t; + + +/***************************************************************************** +* +* Diagnostic Buffer Messages +* +*****************************************************************************/ + + +/**************************************************************************** +* Diagnostic Buffer Post request +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST +{ + U8 ExtendedType; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U64 BufferAddress; /* 0x0C */ + U32 BufferLength; /* 0x14 */ + U32 Reserved5; /* 0x18 */ + U32 Reserved6; /* 0x1C */ + U32 Flags; /* 0x20 */ + U32 ProductSpecific[23]; /* 0x24 */ +} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST, + Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t; + +/* values for the ExtendedType field */ +#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) + +/* values for the BufferType field */ +#define MPI2_DIAG_BUF_TYPE_TRACE (0x00) +#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) +#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) +/* count of the number of buffer types */ +#define MPI2_DIAG_BUF_TYPE_COUNT (0x03) + +/* values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) /* for MPI v2.0 products only */ +#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) + + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REPLY +{ + U8 ExtendedType; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ + U32 TransferLength; /* 0x14 */ +} MPI2_DIAG_BUFFER_POST_REPLY, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REPLY, + Mpi2DiagBufferPostReply_t, MPI2_POINTER pMpi2DiagBufferPostReply_t; + + +/**************************************************************************** +* Diagnostic Release request +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REQUEST +{ + U8 Reserved1; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ +} MPI2_DIAG_RELEASE_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REQUEST, + Mpi2DiagReleaseRequest_t, MPI2_POINTER pMpi2DiagReleaseRequest_t; + + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REPLY +{ + U8 Reserved1; /* 0x00 */ + U8 BufferType; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U16 Reserved5; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI2_DIAG_RELEASE_REPLY, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REPLY, + Mpi2DiagReleaseReply_t, MPI2_POINTER pMpi2DiagReleaseReply_t; + + +#endif + diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h index e5f465721f4b..080a1005bbd0 100755 --- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h @@ -1,119 +1,119 @@ -/* - * Copyright 2000-2020 Broadcom Inc. All rights reserved. - * - * - * Name: mpi2_type.h - * Title: MPI basic type definitions - * Creation Date: August 16, 2006 - * - * mpi2_type.h Version: 02.00.01 - * - * Version History - * --------------- - * - * Date Version Description - * -------- -------- ------------------------------------------------------ - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 11-18-14 02.00.01 Updated copyright information. - * -------------------------------------------------------------------------- - */ - -#ifndef MPI2_TYPE_H -#define MPI2_TYPE_H - - -/******************************************************************************* - * Define MPI2_POINTER if it hasn't already been defined. By default - * MPI2_POINTER is defined to be a near pointer. MPI2_POINTER can be defined as - * a far pointer by defining MPI2_POINTER as "far *" before this header file is - * included. - */ -#ifndef MPI2_POINTER -#define MPI2_POINTER * -#endif - -#ifdef MPT3SAS_BASE_H_INCLUDED - -/***************************************************************************** - * - * Basic Types - * - *****************************************************************************/ - -typedef u8 U8; -typedef __le16 U16; -typedef __le32 U32; -typedef __le64 U64 __attribute__((aligned(4))); - -/***************************************************************************** - * - * Pointer Types - * - *****************************************************************************/ - -typedef U8 *PU8; -typedef U16 *PU16; -typedef U32 *PU32; -typedef U64 *PU64; - -#endif - -/* the basic types may have already been included by mpi_type.h */ -#if !defined(MPI_TYPE_H) && !defined(MPT3SAS_BASE_H_INCLUDED) -/***************************************************************************** -* -* Basic Types -* -*****************************************************************************/ - -typedef signed char S8; -typedef unsigned char U8; -typedef signed short S16; -typedef unsigned short U16; - - -#if defined(unix) || defined(__arm) || defined(ALPHA) || defined(__PPC__) || defined(__ppc) - - typedef signed int S32; - typedef unsigned int U32; - -#else - - typedef signed long S32; - typedef unsigned long U32; - -#endif - - -typedef struct _S64 -{ - U32 Low; - S32 High; -} S64; - -typedef struct _U64 -{ - U32 Low; - U32 High; -} U64; - - -/***************************************************************************** -* -* Pointer Types -* -*****************************************************************************/ - -typedef S8 *PS8; -typedef U8 *PU8; -typedef S16 *PS16; -typedef U16 *PU16; -typedef S32 *PS32; -typedef U32 *PU32; -typedef S64 *PS64; -typedef U64 *PU64; - -#endif - -#endif - +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_type.h + * Title: MPI basic type definitions + * Creation Date: August 16, 2006 + * + * mpi2_type.h Version: 02.00.01 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TYPE_H +#define MPI2_TYPE_H + + +/******************************************************************************* + * Define MPI2_POINTER if it hasn't already been defined. By default + * MPI2_POINTER is defined to be a near pointer. MPI2_POINTER can be defined as + * a far pointer by defining MPI2_POINTER as "far *" before this header file is + * included. + */ +#ifndef MPI2_POINTER +#define MPI2_POINTER * +#endif + +#ifdef MPT3SAS_BASE_H_INCLUDED + +/***************************************************************************** + * + * Basic Types + * + *****************************************************************************/ + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __attribute__((aligned(4))); + +/***************************************************************************** + * + * Pointer Types + * + *****************************************************************************/ + +typedef U8 *PU8; +typedef U16 *PU16; +typedef U32 *PU32; +typedef U64 *PU64; + +#endif + +/* the basic types may have already been included by mpi_type.h */ +#if !defined(MPI_TYPE_H) && !defined(MPT3SAS_BASE_H_INCLUDED) +/***************************************************************************** +* +* Basic Types +* +*****************************************************************************/ + +typedef signed char S8; +typedef unsigned char U8; +typedef signed short S16; +typedef unsigned short U16; + + +#if defined(unix) || defined(__arm) || defined(ALPHA) || defined(__PPC__) || defined(__ppc) + + typedef signed int S32; + typedef unsigned int U32; + +#else + + typedef signed long S32; + typedef unsigned long U32; + +#endif + + +typedef struct _S64 +{ + U32 Low; + S32 High; +} S64; + +typedef struct _U64 +{ + U32 Low; + U32 High; +} U64; + + +/***************************************************************************** +* +* Pointer Types +* +*****************************************************************************/ + +typedef S8 *PS8; +typedef U8 *PU8; +typedef S16 *PS16; +typedef U16 *PU16; +typedef S32 *PS32; +typedef U32 *PU32; +typedef S64 *PS64; +typedef U64 *PU64; + +#endif + +#endif + diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 700944d33a6d..7aed8a6120e4 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -153,6 +153,13 @@ enum mpt3sas_perf_mode { static void _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc); +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, + u32 ioc_state, int timeout); + +void +mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc); + /** * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. * @@ -676,6 +683,85 @@ mpt3sas_base_pci_device_is_available(struct MPT3SAS_ADAPTER *ioc) return 1; } +/** + * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp. + * + * @ioc: Per Adapter Object + * + * Return nothing. + */ +static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + ktime_t current_time; +#else + struct timeval current_time; +#endif + u64 TimeStamp = 0; + u8 issue_reset = 0; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + pr_err("%s: scsih_cmd in use %s\n", ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s: failed obtaining a smid %s\n", ioc->name, + __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER; + mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) + current_time = ktime_get_real(); + TimeStamp = ktime_to_ms(current_time); +#else + do_gettimeofday(¤t_time); + TimeStamp = (u64) (current_time.tv_sec * 1000) + + (current_time.tv_usec / 1000); +#endif + mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", + ioc->name, TimeStamp)); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ); + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset); + goto issue_host_reset; + } + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT + "Io Unit Control sync timestamp (complete): " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } +issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + out: + mutex_unlock(&ioc->scsih_cmds.mutex); + return; +} + /** * _base_fault_reset_work - workq handling ioc fault conditions * @work: input argument, used to derive ioc @@ -701,7 +787,8 @@ _base_fault_reset_work(void *arg) spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->shost_recovery || ioc->pci_error_recovery || ioc->remove_host) + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery || ioc->remove_host) goto rearm_timer; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); @@ -751,6 +838,48 @@ _base_fault_reset_work(void *arg) return; /* don't rearm timer */ } + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec)? + ioc->manu_pg11.CoreDumpTOSec: + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + timeout /= (FAULT_POLLING_INTERVAL/1000); + + if (ioc->ioc_coredump_loop == 0) { + mpt3sas_base_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + /* do not accept any IOs and disable the interrupts */ + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + mpt3sas_base_mask_interrupts(ioc); + _base_clear_outstanding_mpt_commands(ioc); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + } + + drsprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: CoreDump loop %d.", ioc->name, + __func__, ioc->ioc_coredump_loop)); + + /* Wait until CoreDump completes or times out */ + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + } + + if (ioc->ioc_coredump_loop) { + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP) + printk(MPT3SAS_ERR_FMT "%s: CoreDump completed. LoopCount: %d", ioc->name, + __func__, ioc->ioc_coredump_loop); + else + printk(MPT3SAS_ERR_FMT "%s: CoreDump Timed out. LoopCount: %d", ioc->name, + __func__, ioc->ioc_coredump_loop); + + ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; + } + ioc->non_operational_loop = 0; if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { @@ -758,8 +887,11 @@ _base_fault_reset_work(void *arg) printk(MPT3SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name, __func__, (rc == 0) ? "success" : "failed"); doorbell = mpt3sas_base_get_iocstate(ioc, 0); - if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) - mpt3sas_base_fault_info(ioc, doorbell & + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) + mpt3sas_base_coredump_info(ioc, doorbell & MPI2_DOORBELL_DATA_MASK); if (rc && (doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) @@ -772,6 +904,14 @@ _base_fault_reset_work(void *arg) #endif } + ioc->ioc_coredump_loop = 0; + + if (ioc->time_sync_interval && + ++ioc->timestamp_update_count >= ioc->time_sync_interval) { + ioc->timestamp_update_count = 0; + _base_sync_drv_fw_timestamp(ioc); + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); rearm_timer: if (ioc->fault_reset_work_q) @@ -871,7 +1011,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) if (ioc->fault_reset_work_q) return; - + + ioc->timestamp_update_count = 0; /* initialize fault polling */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); @@ -1051,6 +1192,47 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) ioc->name, fault_code); } +/** + * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state + * @ioc: per adapter object + * @fault_code: fault code + * + * Return nothing. + */ +void +mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) +{ + printk(MPT3SAS_ERR_FMT "coredump_state(0x%04x)!\n", + ioc->name, fault_code); +} + +/** + * mpt3sas_base_wait_for_coredump_completion - Wait until coredump + * completes or times out + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec)?ioc->manu_pg11.CoreDumpTOSec: + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + printk(MPT3SAS_ERR_FMT "%s: CoreDump timed out. " + " (ioc_state=0x%x)\n", ioc->name, caller, ioc_state); + else + printk(MPT3SAS_INFO_FMT "%s: CoreDump completed. " + " (ioc_state=0x%x)\n", ioc->name, caller, ioc_state); + + return ioc_state; +} + /** * mpt3sas_halt_firmware - halt's mpt controller firmware * @ioc: per adapter object @@ -1073,8 +1255,12 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc, u8 set_fault) dump_stack(); doorbell = ioc->base_readl(&ioc->chip->Doorbell); - if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) - mpt3sas_base_fault_info(ioc , doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc , doorbell); + } + else if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) + mpt3sas_base_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); else { writel(0xC0FFEE00, &ioc->chip->Doorbell); if (!set_fault) @@ -1160,6 +1346,20 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return; + /* + * Older Firmware version doesn't support driver trigger pages. + * So, skip displaying 'config invalid type' type + * of error message. + */ + if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { + Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr; + + if ((rqst->ExtPageType == + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) && + !(ioc->logging_level & MPT_DEBUG_CONFIG)) { + return; + } + } switch (ioc_status) { @@ -1759,15 +1959,15 @@ mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) } /** - * _base_unmask_interrupts - enable interrupts + * mpt3sas_base_unmask_interrupts - enable interrupts * @ioc: per adapter object * * Enabling only Reply Interrupts * * Return nothing. */ -static void -_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) { u32 him_register; @@ -1915,7 +2115,7 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) * So that FW can find enough entries to post the Reply * Descriptors in the reply descriptor post queue. */ - if (!base_mod64(completed_cmds, ioc->thresh_hold)) { + if (completed_cmds >= ioc->thresh_hold) { if (ioc->combined_reply_queue) { writel(reply_q->reply_post_host_index | ((msix_index & 7) << @@ -2016,7 +2216,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget) { reply_q = container_of(irqpoll, struct adapter_reply_queue, irqpoll); if (reply_q->irq_line_enable) { - disable_irq(reply_q->os_irq); + disable_irq_nosync(reply_q->os_irq); reply_q->irq_line_enable = false; } @@ -2076,7 +2276,6 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; } - /** ** This routine was added because mpt3sas_base_flush_reply_queues() ** skips over reply queues that are currently busy (i.e. being handled @@ -2090,6 +2289,8 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) ** ** mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts ** @ioc: per adapter object + ** @poll: poll over reply descriptor pools incase interrupt for + ** timed-out SCSI command got delayed ** Context: non ISR conext ** ** Called when a Task Management request has completed. @@ -2097,7 +2298,7 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) ** Return nothing. **/ void -mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) +mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) { struct adapter_reply_queue *reply_q; @@ -2106,6 +2307,7 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) */ if (!_base_is_controller_msix_enabled(ioc)) return; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) @@ -2113,7 +2315,11 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue; - +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) + synchronize_irq(reply_q->vector); +#else + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); +#endif #if defined(MPT3SAS_ENABLE_IRQ_POLL) if (reply_q->irq_poll_scheduled) { /* Calling irq_poll_disable will wait for any pending @@ -2121,18 +2327,18 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) */ irq_poll_disable(&reply_q->irqpoll); irq_poll_enable(&reply_q->irqpoll); - reply_q->irq_poll_scheduled = false; - reply_q->irq_line_enable = true; - enable_irq(reply_q->os_irq); - continue; + /* check how the scheduled poll has ended, + * clean up only if necessary + */ + if (reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } } #endif - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) - synchronize_irq(reply_q->vector); -#else - synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); -#endif + if (poll) + _base_process_reply_queue(reply_q); } } @@ -3596,13 +3802,14 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) char *desc = "64"; u64 consistant_dma_mask = DMA_BIT_MASK(64); - if(ioc->is_mcpu_endpoint) + if (ioc->is_mcpu_endpoint || ioc->use_32bit_dma) goto try_32bit: /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { consistant_dma_mask = DMA_BIT_MASK(63); desc = "63"; + ioc->dma_mask = 63; } if (sizeof(dma_addr_t) > 4) { @@ -3613,7 +3820,6 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) goto try_32bit; required_mask = dma_get_required_mask(&pdev->dev); - if (required_mask > DMA_32BIT_MASK && !pci_set_consistent_dma_mask(pdev, consistant_dma_mask)) { ioc->base_add_sg_single = &_base_add_sg_single_64; @@ -3629,6 +3835,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) ioc->base_add_sg_single = &_base_add_sg_single_32; ioc->sge_size = sizeof(Mpi2SGESimple32_t); desc = "32"; + ioc->dma_mask = 32; } else return -ENODEV; @@ -4059,6 +4266,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) */ if (!ioc->combined_reply_queue && ioc->hba_mpi_version_belonged != MPI2_VERSION) { + printk(MPT3SAS_INFO_FMT + "combined reply queue is off, so enabling msix load balance\n", + ioc->name); ioc->msix_load_balance = true; } @@ -4073,9 +4283,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) r = _base_alloc_irq_vectors(ioc); if (r < 0) { - dfailprintk(ioc, printk(MPT3SAS_INFO_FMT + printk(MPT3SAS_WARN_FMT "pci_alloc_irq_vectors failed (r=%d) !!!\n", - ioc->name, r)); + ioc->name, r); goto try_ioapic; } @@ -4092,9 +4302,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), GFP_KERNEL); if (!entries) { - dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "kcalloc " + printk(MPT3SAS_WARN_FMT "kcalloc " "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__, - __LINE__, __func__)); + __LINE__, __func__); goto try_ioapic; } for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) @@ -4457,7 +4667,12 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) doorbell = ioc->base_readl(&ioc->chip->Doorbell); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc , doorbell); + mpt3sas_print_fault_code(ioc , doorbell); + return -EFAULT; + } + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_base_coredump_info(ioc , doorbell); return -EFAULT; } } else if (int_status == 0xFFFFFFFF) @@ -4617,10 +4832,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, if (ioc->logging_level & MPT_DEBUG_INIT) { mfp = (__le32 *)reply; - printk(KERN_INFO "\toffset:data\n"); + printk(MPT3SAS_INFO_FMT "\toffset:data\n", ioc->name); for (i = 0; i < reply_bytes/4; i++) - printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, - le32_to_cpu(mfp[i])); + printk(MPT3SAS_INFO_FMT "\t[0x%02x]:%08x\n", ioc->name, + i*4, le32_to_cpu(mfp[i])); } return 0; } @@ -4653,6 +4868,23 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) return current_state; } +/** + * _base_dump_reg_set - This function will print hexdump of register set + * @ioc: per adapter object + * + * Returns nothing. + */ +static inline void +_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int i, sz = 256; + u32 __iomem *reg = (u32 __iomem *)ioc->chip; + + printk(MPT3SAS_FMT "System Register set:\n", ioc->name); + for (i = 0; i < (sz / sizeof(u32)); i++) + printk("%08x: %08x\n", (i * 4), readl(®[i])); +} + /** * _base_diag_reset - the "big hammer" start of day reset * @ioc: per adapter object @@ -4695,8 +4927,13 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) /* wait 100 msec */ msleep(100); - if (count++ > 20) + if (count++ > 20) { + printk(MPT3SAS_ERR_FMT + "Giving up writing magic sequence after 20 retries\n", + ioc->name); + _base_dump_reg_set(ioc); goto out; + } host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); drsprintk(ioc, printk(MPT3SAS_INFO_FMT "wrote magic " @@ -4737,8 +4974,13 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); - if (host_diagnostic == 0xFFFFFFFF) + if (host_diagnostic == 0xFFFFFFFF) { + printk(MPT3SAS_ERR_FMT + "Invalid host diagnostic register value\n", + ioc->name); + _base_dump_reg_set(ioc); goto out; + } if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; @@ -4777,6 +5019,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) if (ioc_state) { printk(MPT3SAS_ERR_FMT "%s: failed going to ready state " " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); + _base_dump_reg_set(ioc); goto out; } @@ -4834,10 +5077,15 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); goto issue_diag_reset; } + else if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + printk(MPT3SAS_ERR_FMT "%s: Skipping the diag reset here. " + " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); + return -EFAULT; + } ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); @@ -4876,10 +5124,16 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) ioc->name, __func__, ioc_state)); if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); rc = _base_diag_reset(ioc); } + else if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + mpt3sas_base_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + rc = _base_diag_reset(ioc); + } return rc; } @@ -5162,8 +5416,9 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) sizeof(u64 *), GFP_KERNEL); #endif if (!ioc->replyPostRegisterIndex) { - dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "allocation " - "for reply Post Register Index failed!!!\n", ioc->name)); + printk(MPT3SAS_ERR_FMT + "allocation for reply Post Register Index failed!!!\n", + ioc->name); r = -ENOMEM; goto out_fail; } @@ -5337,6 +5592,27 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, return ioc->cpu_msix_table[raw_smp_processor_id()]; } +/** + * _base_sdev_nr_inflight_request -get number of inflight requests + * of a request queue. + * @ioc: per adapter object + * @scmd: scsi_cmnd object + * + * returns number of inflight request of a request queue. + */ +inline unsigned long +_base_sdev_nr_inflight_request(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_BITMAP_BLK_MQ) { + struct blk_mq_hw_ctx *hctx = + scmd->device->request_queue->queue_hw_ctx[0]; + return atomic_read(&hctx->nr_active); + } + else + return atomic_read(&scmd->device->device_busy); +} + /** * _base_get_high_iops_msix_index - get the msix index of high iops queues * @ioc: per adapter object @@ -5356,8 +5632,8 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, * IOs on the target device is >=8. */ #if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 7 && RHEL_MINOR >= 2)) || \ - LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) - if (atomic_read(&scmd->device->device_busy) > + (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))) + if (_base_sdev_nr_inflight_request(ioc, scmd) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) #else if (scmd->device->device_busy > @@ -5560,7 +5836,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_loc spin_lock_irqsave(writeq_lock, flags); writel((u32)(data_out), addr); writel((u32)(data_out >> 32), (addr + 4)); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (((LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) && (!defined(RHEL_MAJOR))) || \ + (defined(RHEL_MAJOR) && ((RHEL_MAJOR == 8 && RHEL_MINOR < 2) || (RHEL_MAJOR < 8)))) mmiowb(); #endif spin_unlock_irqrestore(writeq_lock, flags); @@ -6264,7 +6541,8 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length, &fwpkg_data_dma); if (!fwpkg_data) { - printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + printk(MPT3SAS_ERR_FMT + "Memory allocation for fwpkg data got failed at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -ENOMEM; } @@ -6583,6 +6861,7 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) "performance mode: balanced\n", ioc->name); return; } + /* Fall through */ case MPT_PERF_MODE_LATENCY: /* * Enable interrupt coalescing on all reply queues @@ -6609,6 +6888,312 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) } } +/** + * _base_get_mpi_diag_triggers - get mpi diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return nothing. + */ +static void +_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage4_t trigger_pg4; + struct SL_WH_MPI_TRIGGER_T *status_tg; + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, + &trigger_pg4); + if (r) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return; + } + + if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) { + count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_mpi.ValidEntries = count; + + status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0]; + mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0]; + + for (i = 0; i < count; i++) { + status_tg->IOCStatus = le16_to_cpu( + mpi_status_tg->IOCStatus); + status_tg->IocLogInfo = le32_to_cpu( + mpi_status_tg->LogInfo); + + status_tg++; + mpi_status_tg++; + } + } +} + +/** + * _base_get_scsi_diag_triggers - get scsi diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return nothing. + */ +static void +_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage3_t trigger_pg3; + struct SL_WH_SCSI_TRIGGER_T *scsi_tg; + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, + &trigger_pg3); + if (r) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return; + } + + if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) { + count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_scsi.ValidEntries = count; + + scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0]; + mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0]; + for (i = 0; i < count; i++) { + scsi_tg->ASCQ = mpi_scsi_tg->ASCQ; + scsi_tg->ASC = mpi_scsi_tg->ASC; + scsi_tg->SenseKey = mpi_scsi_tg->SenseKey; + + scsi_tg++; + mpi_scsi_tg++; + } + } +} + +/** + * _base_get_event_diag_triggers - get event diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return nothing. + */ +static void +_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage2_t trigger_pg2; + struct SL_WH_EVENT_TRIGGER_T *event_tg; + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, + &trigger_pg2); + if (r) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return; + } + + if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) { + count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_event.ValidEntries = count; + + event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0]; + mpi_event_tg = &trigger_pg2.MPIEventTriggers[0]; + for (i = 0; i < count; i++) { + event_tg->EventValue = le16_to_cpu( + mpi_event_tg->MPIEventCode); + event_tg->LogEntryQualifier = le16_to_cpu( + mpi_event_tg->MPIEventCodeSpecific); + event_tg++; + mpi_event_tg++; + } + } +} + +/** + * _base_get_master_diag_triggers - get master diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return nothing. + */ +static void +_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage1_t trigger_pg1; + Mpi2ConfigReply_t mpi_reply; + int r; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, + &trigger_pg1); + if (r) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return; + } + + if (le16_to_cpu(trigger_pg1.NumMasterTrigger)) + ioc->diag_trigger_master.MasterData |= + le32_to_cpu( + trigger_pg1.MasterTriggers[0].MasterTriggerFlags); +} + +/** + * _base_check_for_trigger_pages_support - checks whether HBA FW supports + * driver trigger pages or not + * @ioc : per adapter object + * + * Returns trigger flags mask if HBA FW supports driver trigger pages, + * otherwise returns EFAULT. + */ +static int +_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage0_t trigger_pg0; + int r = 0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, + &trigger_pg0); + if (r) + return -EFAULT; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return -EFAULT; + + return le16_to_cpu(trigger_pg0.TriggerFlags); +} + +/** + * _base_get_diag_triggers - Retrieve diag trigger values from + * persistent pages. + * @ioc : per adapter object + * + * Return nothing. + */ +static void +_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + short trigger_flags; + + /* + * Default setting of master trigger. + */ + ioc->diag_trigger_master.MasterData = + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + + trigger_flags = _base_check_for_trigger_pages_support(ioc); + if (trigger_flags < 0) + return; + + ioc->supports_trigger_pages = 1; + + /* + * Retrieve master diag trigger values from driver trigger pg1 + * if master trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) + _base_get_master_diag_triggers(ioc); + + /* + * Retrieve event diag trigger values from driver trigger pg2 + * if event trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) + _base_get_event_diag_triggers(ioc); + + /* + * Retrieve scsi diag trigger values from driver trigger pg3 + * if scsi trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) + _base_get_scsi_diag_triggers(ioc); + + /* + * Retrieve mpi error diag trigger values from driver trigger pg4 + * if loginfo trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) + _base_get_mpi_diag_triggers(ioc); +} + +/** + * _base_update_diag_trigger_pages - Update the driver trigger pages after + * online FW update, incase updated FW supports driver + * trigger pages. + * @ioc : per adapter object + * + * Return nothing. + */ +static void +_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc) +{ + + if (ioc->diag_trigger_master.MasterData) + mpt3sas_config_update_driver_trigger_pg1(ioc, + &ioc->diag_trigger_master, 1); + + if (ioc->diag_trigger_event.ValidEntries) + mpt3sas_config_update_driver_trigger_pg2(ioc, + &ioc->diag_trigger_event, 1); + + if (ioc->diag_trigger_scsi.ValidEntries) + mpt3sas_config_update_driver_trigger_pg3(ioc, + &ioc->diag_trigger_scsi, 1); + + if (ioc->diag_trigger_mpi.ValidEntries) + mpt3sas_config_update_driver_trigger_pg4(ioc, + &ioc->diag_trigger_mpi, 1); +} + /** * _base_static_config_pages - static start of day config pages * @ioc: per adapter object @@ -6620,6 +7205,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) { Mpi2ConfigReply_t mpi_reply; u32 iounit_pg1_flags; + int tg_flags; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) && \ LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) u32 cap; @@ -6659,6 +7245,27 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) else ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO; } + + ioc->time_sync_interval = + ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK; + if (ioc->time_sync_interval) { + if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK) + ioc->time_sync_interval = + ioc->time_sync_interval * SECONDS_PER_HOUR; + else + ioc->time_sync_interval = + ioc->time_sync_interval * SECONDS_PER_MIN; + dinitprintk(ioc, printk(MPT3SAS_FMT + "Driver-FW TimeSync interval is %d seconds. " + "ManuPg11 TimeSync Unit is in %s's", ioc->name, + ioc->time_sync_interval, ((ioc->manu_pg11.TimeSyncInterval & + MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"))); + } + else { + if (ioc->is_gen35_ioc) + pr_warn("%s: TimeSync Interval in Manuf page-11 is not enabled.\n" + "Periodic Time-Sync will be disabled \n", ioc->name); + } mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); @@ -6709,6 +7316,30 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) #endif if (ioc->is_aero_ioc) _base_update_ioc_page1_inlinewith_perf_mode(ioc); + + if (ioc->is_gen35_ioc) { + if (ioc->is_driver_loading) + _base_get_diag_triggers(ioc); + else { + /* + * In case of online HBA FW update operation, + * check whether updated FW supports the driver trigger + * pages or not. + * - If previous FW has not supported driver trigger + * pages and newer FW supports them then update these + * pages with current diag trigger values. + * - If previous FW has supported driver trigger pages + * and new FW doesn't support them then disable + * support_trigger_pages flag. + */ + tg_flags = _base_check_for_trigger_pages_support(ioc); + if (!ioc->supports_trigger_pages && tg_flags != -EFAULT) + _base_update_diag_trigger_pages(ioc); + else if (ioc->supports_trigger_pages && + tg_flags == -EFAULT) + ioc->supports_trigger_pages = 0; + } + } } @@ -6745,14 +7376,15 @@ static void _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) { int i, j; + int dma_alloc_count = 0; struct chain_tracker *ct; - u16 set_of_rdpqs = (ioc->hba_mpi_version_belonged == MPI26_VERSION) ? 16 : 8; + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, __func__)); if (ioc->request) { - pci_free_consistent(ioc->pdev, ioc->request_dma_sz, + dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, ioc->request, ioc->request_dma); dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "request_pool(0x%p)" ": free\n", ioc->name, ioc->request)); @@ -6760,102 +7392,70 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) } if (ioc->sense) { - pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); - if (ioc->sense_dma_pool) - pci_pool_destroy(ioc->sense_dma_pool); + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + dma_pool_destroy(ioc->sense_dma_pool); dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "sense_pool(0x%p)" ": free\n", ioc->name, ioc->sense)); ioc->sense = NULL; } if (ioc->reply) { - pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); - if (ioc->reply_dma_pool) - pci_pool_destroy(ioc->reply_dma_pool); + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + dma_pool_destroy(ioc->reply_dma_pool); dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_pool(0x%p)" ": free\n", ioc->name, ioc->reply)); ioc->reply = NULL; } if (ioc->reply_free) { - pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, ioc->reply_free_dma); - if (ioc->reply_free_dma_pool) - pci_pool_destroy(ioc->reply_free_dma_pool); + dma_pool_destroy(ioc->reply_free_dma_pool); dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free_pool" "(0x%p): free\n", ioc->name, ioc->reply_free)); ioc->reply_free = NULL; } if (ioc->reply_post) { - if (ioc->rdpq_array_enable) { - for (i=0; i<ioc->reply_queue_count; i++) { - if ((i%set_of_rdpqs == 0) && ((i+(set_of_rdpqs - 1)) < ioc->reply_queue_count)) - { - if (ioc->reply_post[i].reply_post_free) { - pci_pool_free( - ioc->reply_post[i].dma_pool, - ioc->reply_post[i].reply_post_free, - ioc->reply_post[i].reply_post_free_dma); - dexitprintk(ioc, printk(MPT3SAS_INFO_FMT - "reply_post_free_pool(0x%p): free\n", - ioc->name, - ioc->reply_post[i].reply_post_free)); - ioc->reply_post[i].reply_post_free = NULL; - } + dma_alloc_count = DIV_ROUND_UP(count, + RDPQ_MAX_INDEX_IN_ONE_CHUNK); + for (i = 0; i < count; i++) { + if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 + && dma_alloc_count) { + if (ioc->reply_post[i].reply_post_free) { + dma_pool_free( + ioc->reply_post_free_dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + printk(MPT3SAS_ERR_FMT + "reply_post_free_pool(0x%p): free\n", + ioc->name, + ioc->reply_post[i].reply_post_free); + ioc->reply_post[i].reply_post_free = + NULL; } - else if(i%set_of_rdpqs == 0) { - if (ioc->reply_post[i].reply_post_free) { - pci_pool_free( - ioc->reply_post[i].dma_pool, - ioc->reply_post[i].reply_post_free, - ioc->reply_post[i].reply_post_free_dma); - dexitprintk(ioc, printk(MPT3SAS_INFO_FMT - "reply_post_free_pool(0x%p): free\n", - ioc->name, - ioc->reply_post[i].reply_post_free)); - ioc->reply_post[i].reply_post_free = NULL; - } - } - } - - if (ioc->reply_post_free_array) { - pci_pool_free(ioc->reply_post_free_array_dma_pool, - ioc->reply_post_free_array, ioc->reply_post_free_array_dma); - ioc->reply_post_free_array = NULL; - } - if (ioc->reply_post_free_array_dma_pool) - pci_pool_destroy(ioc->reply_post_free_array_dma_pool); - } else { - if (ioc->reply_post[0].reply_post_free) { - pci_pool_free(ioc->reply_post_free_dma_pool, - ioc->reply_post[0].reply_post_free, - ioc->reply_post[0].reply_post_free_dma); - dexitprintk(ioc, printk(MPT3SAS_INFO_FMT - "reply_post_free_pool(0x%p): free\n", ioc->name, - ioc->reply_post[0].reply_post_free)); - ioc->reply_post[0].reply_post_free = NULL; + --dma_alloc_count; } } - if (ioc->reply_post_free_dma_pool) - pci_pool_destroy(ioc->reply_post_free_dma_pool); - if (ioc->reply_post_free_dma_pool_align) - pci_pool_destroy(ioc->reply_post_free_dma_pool_align); - if (ioc->reply_post_free_dma_pool_mod_rdpq_set) - pci_pool_destroy(ioc->reply_post_free_dma_pool_mod_rdpq_set); - if (ioc->reply_post_free_dma_pool_mod_rdpq_set_align) - pci_pool_destroy(ioc->reply_post_free_dma_pool_mod_rdpq_set_align); + dma_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_array && + ioc->rdpq_array_enable) { + dma_pool_free(ioc->reply_post_free_array_dma_pool, + ioc->reply_post_free_array, + ioc->reply_post_free_array_dma); + ioc->reply_post_free_array = NULL; + } + dma_pool_destroy(ioc->reply_post_free_array_dma_pool); kfree(ioc->reply_post); } if(ioc->pcie_sgl_dma_pool) { for (i = 0; i < ioc->scsiio_depth; i++) { - pci_pool_free(ioc->pcie_sgl_dma_pool, + dma_pool_free(ioc->pcie_sgl_dma_pool, ioc->pcie_sg_lookup[i].pcie_sgl, ioc->pcie_sg_lookup[i].pcie_sgl_dma); } - if (ioc->pcie_sgl_dma_pool) - pci_pool_destroy(ioc->pcie_sgl_dma_pool); + dma_pool_destroy(ioc->pcie_sgl_dma_pool); } if (ioc->pcie_sg_lookup) @@ -6865,7 +7465,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) dexitprintk(ioc, printk(MPT3SAS_INFO_FMT "config_page(0x%p): free\n", ioc->name, ioc->config_page)); - pci_free_consistent(ioc->pdev, ioc->config_page_sz, + dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, ioc->config_page, ioc->config_page_dma); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)) @@ -6882,21 +7482,20 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) j < ioc->chains_needed_per_io; j++) { ct = &ioc->chain_lookup[i].chains_per_smid[j]; if (ct && ct->chain_buffer) - pci_pool_free(ioc->chain_dma_pool, + dma_pool_free(ioc->chain_dma_pool, ct->chain_buffer, ct->chain_buffer_dma); } kfree(ioc->chain_lookup[i].chains_per_smid); } - if (ioc->chain_dma_pool) - pci_pool_destroy(ioc->chain_dma_pool); + dma_pool_destroy(ioc->chain_dma_pool); kfree(ioc->chain_lookup); ioc->chain_lookup = NULL; } } /** - * is_MSB_are_same - checks whether all reply queues in a set are + * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are * having same upper 32bits in their base memory address. * @reply_pool_start_address: Base address of a reply queue set * @pool_sz: Size of single Reply Descriptor Post Queues pool size @@ -6906,20 +7505,419 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) */ static int -is_MSB_are_same(long reply_pool_start_address, u32 pool_sz) +mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz) { long reply_pool_end_address; - unsigned long bit_divisor_16 = 0x10000; reply_pool_end_address = reply_pool_start_address + pool_sz; - if (((reply_pool_start_address / bit_divisor_16) / (bit_divisor_16)) == - ((reply_pool_end_address / bit_divisor_16) / bit_divisor_16)) + if (upper_32_bits(reply_pool_start_address) == + upper_32_bits(reply_pool_end_address)) return 1; else return 0; } + +/** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ + int reduce_sz = 64; + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +/** + * _base_allocate_reply_post_free_array - Allocating DMA'able memory + * for reply post free array. + * @ioc: Adapter object + * @reply_post_free_array_sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, + int reply_post_free_array_sz) +{ + ioc->reply_post_free_array_dma_pool = + dma_pool_create("reply_post_free_array pool", + &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + if (!ioc->reply_post_free_array_dma_pool) { + dinitprintk(ioc, + pr_err("reply_post_free_array pool: dma_pool_create failed\n")); + return -ENOMEM; + } + ioc->reply_post_free_array = + dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + GFP_KERNEL, &ioc->reply_post_free_array_dma); + if (!ioc->reply_post_free_array) { + dinitprintk(ioc, pr_err( + "reply_post_free_array pool: dma_pool_alloc failed\n")); + return -EAGAIN; + } + if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array, + reply_post_free_array_sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Free Pool! Reply Free (0x%p)" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + + return 0; +} + +/** + * base_alloc_rdpq_dma_pool - Allocating DMA'able memory + * for reply queues. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) +{ + int i = 0; + u32 dma_alloc_count = 0; + int reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct), GFP_KERNEL); + if (!ioc->reply_post) { + printk(MPT3SAS_ERR_FMT + "reply_post_free pool: kcalloc failed\n", ioc->name); + return -ENOMEM; + } + /* + * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and + * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should + * be within 4GB boundary and also reply queues in a set must have same + * upper 32-bits in their memory address. so here driver is allocating + * the DMA'able memory for reply queues according. + * Driver uses limitation of + * VENTURA_SERIES to manage INVADER_SERIES as well. + */ + dma_alloc_count = DIV_ROUND_UP(count, + RDPQ_MAX_INDEX_IN_ONE_CHUNK); + ioc->reply_post_free_dma_pool = + dma_pool_create("reply_post_free pool", + &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + pr_err(KERN_ERR "reply_post_free pool: dma_pool_create failed\n"); + return -ENOMEM; + } + for (i = 0; i < count; i++) { + if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { + ioc->reply_post[i].reply_post_free = + dma_pool_zalloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + pr_err(KERN_ERR "reply_post_free pool: " + "dma_pool_alloc failed\n"); + return -EAGAIN; + } + /* reply desc pool requires to be in same 4 gb region. + * Below function will check this. + * In case of failure, new pci pool will be created with updated + * alignment. + * For RDPQ buffers, driver allocates two separate pci pool. + * Alignment will be used such a way that next allocation if + * success, will always meet same 4gb region requirement. + * Flag dma_pool keeps track of each buffers pool, + * It will help driver while freeing the resources. + */ + if (!mpt3sas_check_same_4gb_region( + (long)ioc->reply_post[i].reply_post_free, sz)) { + dinitprintk(ioc, + printk(MPT3SAS_ERR_FMT "bad Replypost free pool(0x%p)" + "reply_post_free_dma = (0x%llx)\n", ioc->name, + ioc->reply_post[i].reply_post_free, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + dma_alloc_count--; + } else { + ioc->reply_post[i].reply_post_free = + (Mpi2ReplyDescriptorsUnion_t *) + ((long)ioc->reply_post[i-1].reply_post_free + + reply_post_free_sz); + ioc->reply_post[i].reply_post_free_dma = + (dma_addr_t) + (ioc->reply_post[i-1].reply_post_free_dma + + reply_post_free_sz); + } + } + return 0; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + * for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * @ct: Chain tracker + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, int sz, + struct chain_tracker *ct) +{ + int i = 0, j = 0; + + ioc->pcie_sgl_dma_pool = dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, + ioc->page_size, 0); + if (!ioc->pcie_sgl_dma_pool) { + printk(MPT3SAS_ERR_FMT "PCIe SGL pool: dma_pool_create failed\n", + ioc->name); + return -ENOMEM; + } + + ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; + ioc->chains_per_prp_buffer = + min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->pcie_sg_lookup[i].pcie_sgl = + dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, + &ioc->pcie_sg_lookup[i].pcie_sgl_dma); + if (!ioc->pcie_sg_lookup[i].pcie_sgl) { + printk(MPT3SAS_ERR_FMT + "PCIe SGL pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + + if (!mpt3sas_check_same_4gb_region( + (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) { + printk(MPT3SAS_ERR_FMT "PCIE SGLs are not in same 4G !!" + " pcie sgl (0x%p) dma = (0x%llx)\n", + ioc->name, ioc->pcie_sg_lookup[i].pcie_sgl, + (unsigned long long) + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + + for (j = 0; j < ioc->chains_per_prp_buffer; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + ct->chain_buffer = + ioc->pcie_sg_lookup[i].pcie_sgl + + (j * ioc->chain_segment_sz); + ct->chain_buffer_dma = + ioc->pcie_sg_lookup[i].pcie_sgl_dma + + (j * ioc->chain_segment_sz); + } + } + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "PCIe sgl pool depth(%d), " + "element_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "Number of chains can " + "fit in a PRP page(%d)\n", ioc->name, ioc->chains_per_prp_buffer)); + return 0; +} + +/** + * _base_allocate_chain_dma_pool - Allocating DMA'able memory + * for chain dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * @ct: Chain tracker + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz, + struct chain_tracker *ctr) +{ + int i = 0, j = 0; + + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, + ioc->chain_segment_sz, 16, 0); + if (!ioc->chain_dma_pool) { + printk(MPT3SAS_ERR_FMT "chain_dma_pool: dma_pool_create " + "failed\n", ioc->name); + return -ENOMEM; + } + + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ctr = &ioc->chain_lookup[i].chains_per_smid[j]; + ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, &ctr->chain_buffer_dma); + if (!ctr->chain_buffer) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region( + (long)ctr->chain_buffer, ioc->chain_segment_sz)) { + printk(MPT3SAS_ERR_FMT + "Chain buffers are not in same 4G !!!" + "Chain buff (0x%p) dma = (0x%llx)\n", + ioc->name, ctr->chain_buffer, + (unsigned long long)ctr->chain_buffer_dma); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + } + } + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "chain_lookup depth" + "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz))/1024)); + return 0; +} + +/** + * _base_allocate_sense_dma_pool - Allocating DMA'able memory + * for sense dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) +{ + + ioc->sense_dma_pool = + dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); + if (!ioc->sense_dma_pool) { + printk(MPT3SAS_ERR_FMT "sense pool: dma_pool_create failed\n", + ioc->name); + return -ENOMEM; + } + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, + GFP_KERNEL, &ioc->sense_dma); + if (!ioc->sense) { + printk(MPT3SAS_ERR_FMT "sense pool: dma_pool_alloc failed\n", + ioc->name); + return -EAGAIN; + } + /* sense buffer requires to be in same 4 gb region. + * Below function will check the same. + * In case of failure, new pci pool will be created with + * updated alignment. + * Older allocation and pool will be destroyed. + * Alignment will be used such a way that next allocation if success, + * will always meet same 4gb region requirement. + * Actual requirement is not alignment, but we need start and end of + * DMA address must have same upper 32 bit address. + */ + if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) { + dinitprintk(ioc, + pr_err("Bad Sense Pool! sense (0x%p)" + "sense_dma = (0x%llx)\n", + ioc->sense, + (unsigned long long) ioc->sense_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + printk(MPT3SAS_INFO_FMT + "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", + ioc->name, ioc->sense, (unsigned long long)ioc->sense_dma, + ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); + return 0; +} + +/** + * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory + * for reply free dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) +{ + /* reply free queue, 16 byte align */ + ioc->reply_free_dma_pool = dma_pool_create( + "reply_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) { + printk(MPT3SAS_ERR_FMT "reply_free pool: dma_pool_create " + "failed\n", ioc->name); + return -ENOMEM; + } + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, + GFP_KERNEL, &ioc->reply_free_dma); + if (!ioc->reply_free) { + printk(MPT3SAS_ERR_FMT "reply_free pool: dma_pool_alloc " + "failed\n", ioc->name); + return -EAGAIN; + } + if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Free Pool! Reply Free (0x%p)" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free pool(0x%p): " + "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); + dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free_dma" + "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma)); + return 0; +} + +/** + * _base_allocate_reply_pool - Allocating DMA'able memory + * for reply pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, int sz) +{ + /* reply pool, 4 byte align */ + ioc->reply_dma_pool = dma_pool_create("reply pool", + &ioc->pdev->dev, sz, 4, 0); + if (!ioc->reply_dma_pool) { + printk(MPT3SAS_ERR_FMT "reply pool: dma_pool_create failed\n", + ioc->name); + return -ENOMEM; + } + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) { + printk(MPT3SAS_ERR_FMT "reply pool: dma_pool_alloc failed\n", + ioc->name); + return -EAGAIN; + } + if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Pool! Reply (0x%p)" + "Reply dma = (0x%llx)\n", + ioc->reply, + (unsigned long long) ioc->reply_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + ioc->reply_dma_min_address = (u32)(ioc->reply_dma); + ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; + printk(MPT3SAS_INFO_FMT + "reply pool(0x%p) - dma(0x%llx): depth(%d)" + "frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->reply, (unsigned long long)ioc->reply_dma, + ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); + return 0; +} + /** * _base_allocate_memory_pools - allocate start of day memory pools * @ioc: per adapter object @@ -6932,16 +7930,17 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) struct mpt3sas_facts *facts; u16 max_sge_elements; u16 chains_needed_per_io; - u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz, mod_sz=0, rc=0; - u32 retry_sz, reduce_sz; + u32 sz, total_sz, reply_post_free_sz, rc=0; + u32 retry_sz; + u32 rdpq_sz = 0, sense_sz = 0, reply_post_free_array_sz = 0; + u32 sgl_sz = 0; u16 max_request_credit, nvme_blocks_needed; unsigned short sg_tablesize; u16 sge_size; - u16 set_of_rdpqs = (ioc->hba_mpi_version_belonged == MPI26_VERSION) ? 16 : 8; #if defined(TARGET_MODE) int num_cmd_buffers; #endif - int i, j; + int i = 0; struct chain_tracker *ct; dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -7112,11 +8111,11 @@ retry: ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; } - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "scatter gather: " + printk(MPT3SAS_INFO_FMT "scatter gather: " "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, - ioc->chains_needed_per_io)); + ioc->chains_needed_per_io); ioc->scsiio_depth = ioc->hba_queue_depth - ioc->hi_priority_depth - ioc->internal_depth; @@ -7146,9 +8145,10 @@ retry: sz += (ioc->internal_depth * ioc->request_sz); ioc->request_dma_sz = sz; - ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); + ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, + &ioc->request_dma, GFP_KERNEL); if (!ioc->request) { - printk(MPT3SAS_ERR_FMT "request pool: pci_alloc_consistent " + printk(MPT3SAS_ERR_FMT "request pool: dma_alloc_consistent " "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, ioc->chains_needed_per_io, ioc->request_sz, sz/1024); @@ -7170,7 +8170,7 @@ retry: memset(ioc->request, 0, sz); if (retry_sz) - printk(MPT3SAS_ERR_FMT "request pool: pci_alloc_consistent " + printk(MPT3SAS_ERR_FMT "request pool: dma_alloc_consistent " "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, ioc->chains_needed_per_io, ioc->request_sz, sz/1024); @@ -7187,13 +8187,12 @@ retry: ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "request pool(0x%p): " + printk(MPT3SAS_INFO_FMT "request pool(0x%p) - dma(0x%llx): " "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, - ioc->request, ioc->hba_queue_depth, ioc->request_sz, - (ioc->hba_queue_depth * ioc->request_sz)/1024)); + ioc->request, (unsigned long long) ioc->request_dma, + ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz)/1024); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "request pool: dma(0x%llx)\n", - ioc->name, (unsigned long long) ioc->request_dma)); total_sz += sz; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)) @@ -7207,8 +8206,8 @@ retry: (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { max_request_credit -= 64; if (ioc->request) { - pci_free_consistent(ioc->pdev, ioc->request_dma_sz, - ioc->request, ioc->request_dma); + dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); ioc->request = NULL; } goto retry; @@ -7319,440 +8318,93 @@ retry: rc = -ENOMEM; goto out; } - - sz = nvme_blocks_needed * ioc->page_size; - ioc->pcie_sgl_dma_pool = pci_pool_create("PCIe SGL pool", ioc->pdev, sz, - ioc->page_size, 0); - if (!ioc->pcie_sgl_dma_pool) { - printk(MPT3SAS_ERR_FMT "PCIe SGL pool: pci_pool_create failed\n", - ioc->name); - rc = -ENOMEM; - goto out; - } - - ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; - ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); - - for (i = 0; i < ioc->scsiio_depth; i++) { - ioc->pcie_sg_lookup[i].pcie_sgl = pci_pool_alloc( - ioc->pcie_sgl_dma_pool, GFP_KERNEL, - &ioc->pcie_sg_lookup[i].pcie_sgl_dma); - if (!ioc->pcie_sg_lookup[i].pcie_sgl) { - printk(MPT3SAS_ERR_FMT "PCIe SGL pool: pci_pool_alloc " - "failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - for (j = 0; j < ioc->chains_per_prp_buffer; j++) { - ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = - ioc->pcie_sg_lookup[i].pcie_sgl + - (j * ioc->chain_segment_sz); - ct->chain_buffer_dma = - ioc->pcie_sg_lookup[i].pcie_sgl_dma + - (j * ioc->chain_segment_sz); - } - } - - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "PCIe sgl pool depth(%d), " - "element_size(%d), pool_size(%d kB)\n", ioc->name, - ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "Number of chains can " - "fit in a PRP page(%d)\n", ioc->name, - ioc->chains_per_prp_buffer)); - total_sz += sz * ioc->scsiio_depth; + sgl_sz = nvme_blocks_needed * ioc->page_size; + if ((rc = _base_allocate_pcie_sgl_pool(ioc, sgl_sz, ct)) == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sgl_sz * ioc->scsiio_depth; } - ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, - ioc->chain_segment_sz, 16, 0); - if (!ioc->chain_dma_pool) { - printk(MPT3SAS_ERR_FMT "chain_dma_pool: pci_pool_create " - "failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - for (i = 0; i < ioc->scsiio_depth; i++) { - for (j = ioc->chains_per_prp_buffer; j < ioc->chains_needed_per_io; j++) { - ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = pci_pool_alloc(ioc->chain_dma_pool, - GFP_KERNEL, &ct->chain_buffer_dma); - if (!ct->chain_buffer) { - if ((max_request_credit - 64) > - (ioc->internal_depth + - INTERNAL_SCSIIO_CMDS_COUNT)) { - max_request_credit -= 64; - _base_release_memory_pools(ioc); - goto retry_allocation; - } else { - printk(MPT3SAS_ERR_FMT "chain_lookup: " - " pci_pool_alloc failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } + rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz, ct); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) { + if (ioc->use_32bit_dma && ioc->dma_mask > 32) + goto try_32bit_dma; + else { + if ((max_request_credit - 64) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + _base_release_memory_pools(ioc); + goto retry_allocation; + } else { + printk(MPT3SAS_ERR_FMT "chain_lookup: " + " dma_pool_alloc failed\n", ioc->name); + return -ENOMEM; } - total_sz += ioc->chain_segment_sz; } } - - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "chain_lookup depth" - "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, - ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * - (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * - ioc->chain_segment_sz))/1024)); + total_sz += ioc->chain_segment_sz * + ((ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->scsiio_depth); /* sense buffers, 4 byte align */ - sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; - ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, - 0); - if (!ioc->sense_dma_pool) { - printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_create failed\n", - ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, - &ioc->sense_dma); - if (!ioc->sense) { - printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n", - ioc->name); - rc = -ENOMEM; - goto out; - } - /* sense buffer requires to be in same 4 gb region. - * Below function will check the same. - * In case of failure, new pci pool will be created with updated alignment. - * Older allocation and pool will be destroyed. - * Alignment will be used such a way that next allocation if success, will - * always meet same 4gb region requirement. - * Actual requirement is not alignment, but we need start and end of - * DMA address must have same upper 32 bit address. - */ - if (!is_MSB_are_same((long)ioc->sense, sz)) { - //Release Sense pool & Allocations - pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); - pci_pool_destroy(ioc->sense_dma_pool); - ioc->sense = NULL; - - ioc->sense_dma_pool = - pci_pool_create("sense pool", ioc->pdev, sz, - roundup_pow_of_two(sz), 0); - if (!ioc->sense_dma_pool) { - printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_create failed\n", - ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, - &ioc->sense_dma); - if (!ioc->sense) { - printk(MPT3SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n", - ioc->name); - rc = -ENOMEM; - goto out; - } - } - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT - "sense pool(0x%p): depth(%d), element_size(%d), pool_size" - "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, - SCSI_SENSE_BUFFERSIZE, sz/1024)); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "sense_dma(0x%llx)\n", - ioc->name, (unsigned long long)ioc->sense_dma)); - total_sz += sz; - + sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE ; + if ((rc = _base_allocate_sense_dma_pool(ioc, sense_sz)) == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sense_sz; /* reply pool, 4 byte align */ sz = ioc->reply_free_queue_depth * ioc->reply_sz; - ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, - 0); - if (!ioc->reply_dma_pool) { - printk(MPT3SAS_ERR_FMT "reply pool: pci_pool_create failed\n", - ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, - &ioc->reply_dma); - if (!ioc->reply) { - printk(MPT3SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n", - ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->reply_dma_min_address = (u32)(ioc->reply_dma); - ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply pool(0x%p): depth" - "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply, - ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_dma(0x%llx)\n", - ioc->name, (unsigned long long)ioc->reply_dma)); + if ((rc = _base_allocate_reply_pool(ioc, sz)) == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; total_sz += sz; /* reply free queue, 16 byte align */ sz = ioc->reply_free_queue_depth * 4; - ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", - ioc->pdev, sz, 16, 0); - if (!ioc->reply_free_dma_pool) { - printk(MPT3SAS_ERR_FMT "reply_free pool: pci_pool_create " - "failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, - &ioc->reply_free_dma); - if (!ioc->reply_free) { - printk(MPT3SAS_ERR_FMT "reply_free pool: pci_pool_alloc " - "failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - memset(ioc->reply_free, 0, sz); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free pool(0x%p): " - "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, - ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply_free_dma" - "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma)); + if ((rc = _base_allocate_reply_free_dma_pool(ioc, sz)) == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; total_sz += sz; - - if (ioc->rdpq_array_enable) { -/* - * According to MPI25 Spec each set of 8 reply queues(0-7, 8-15, ..) whereas for - * MPI26 Spec each set of 16 reply queues(0-15, 16-31, ..) should be within 4GB - * boundary and also reply queues in a set must have same upper 32-bits in their - * memory address. so here driver is allocating the DMA'able memory for reply - * queues based on MPI Spec specification. - */ - ioc->reply_post = kcalloc(ioc->reply_queue_count, - sizeof(struct reply_post_struct), GFP_KERNEL); - - /* reply post queue, 16 byte align */ - reply_post_free_sz = ioc->reply_post_queue_depth * - sizeof(Mpi2DefaultReplyDescriptor_t); - sz = reply_post_free_sz * set_of_rdpqs; - ioc->reply_post_free_dma_pool = - pci_pool_create("reply_post_free pool", ioc->pdev, sz, - 16, 0); - ioc->reply_post_free_dma_pool_align = - pci_pool_create("reply_post_free pool alligned", ioc->pdev, sz, - roundup_pow_of_two(sz), 0); - if ((!ioc->reply_post_free_dma_pool) || - (!ioc->reply_post_free_dma_pool_align)) { - printk(MPT3SAS_ERR_FMT "reply_post_free pool: " - "pci_pool_create failed\n", ioc->name); - rc = -ENOMEM; - goto out; + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; + if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) + rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; + if ((rc = base_alloc_rdpq_dma_pool(ioc, rdpq_sz)) == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + else { + if (ioc->rdpq_array_enable && rc == 0) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + if ((rc = _base_allocate_reply_post_free_array(ioc, + reply_post_free_array_sz)) == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; } - if (ioc->reply_queue_count % set_of_rdpqs) - { - mod_sz = reply_post_free_sz * (ioc->reply_queue_count % set_of_rdpqs); - ioc->reply_post_free_dma_pool_mod_rdpq_set = - pci_pool_create("reply_post_free_mod_rdpq_set pool", ioc->pdev, mod_sz, - 16, 0); - ioc->reply_post_free_dma_pool_mod_rdpq_set_align = - pci_pool_create("reply_post_free_mod_rdpq_set pool", ioc->pdev, mod_sz, - roundup_pow_of_two(mod_sz), 0); - if (!ioc->reply_post_free_dma_pool_mod_rdpq_set || - !ioc->reply_post_free_dma_pool_mod_rdpq_set_align) { - printk(MPT3SAS_ERR_FMT "reply_post_free_mod_rdpq_set pool: " - "pci_pool_create failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - } - for (i = 0; i < ioc->reply_queue_count; i++) { - if ((i % set_of_rdpqs == 0) && ((i + (set_of_rdpqs - 1)) < ioc->reply_queue_count)) { - ioc->reply_post[i].reply_post_free = - pci_pool_alloc(ioc->reply_post_free_dma_pool, - GFP_KERNEL, - &ioc->reply_post[i].reply_post_free_dma); - if (!ioc->reply_post[i].reply_post_free) { - printk(MPT3SAS_ERR_FMT "reply_post_free pool: " - "pci_pool_alloc failed\n", ioc->name); - reduce_sz = 64; - if((ioc->hba_queue_depth - reduce_sz) > - (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { - _base_release_memory_pools(ioc); - ioc->hba_queue_depth -= reduce_sz; - goto retry_allocation; - } - else { - rc = -ENOMEM; - goto out; - } - } - - if (!is_MSB_are_same((long)ioc->reply_post[i].reply_post_free, sz)) - { - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT - "bad reply post free pool (0x%p), reply_post_free_dma = (0x%llx)\n", ioc->name, - ioc->reply_post[i].reply_post_free, - (unsigned long long)ioc->reply_post[i].reply_post_free_dma)); - - pci_pool_free(ioc->reply_post_free_dma_pool, - ioc->reply_post[i].reply_post_free, - ioc->reply_post[i].reply_post_free_dma); - - ioc->reply_post[i].reply_post_free = NULL; - ioc->reply_post[i].reply_post_free_dma = 0; - - //Retry Here with modified alignment - ioc->reply_post[i].reply_post_free = - pci_pool_alloc(ioc->reply_post_free_dma_pool_align, - GFP_KERNEL, - &ioc->reply_post[i].reply_post_free_dma); - if (!ioc->reply_post[i].reply_post_free) { - printk(MPT3SAS_ERR_FMT "reply_post_free pool: " - "pci_pool_alloc failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->reply_post[i].dma_pool = ioc->reply_post_free_dma_pool_align; - } else - ioc->reply_post[i].dma_pool = ioc->reply_post_free_dma_pool; - - memset(ioc->reply_post[i].reply_post_free, 0, sz); - total_sz += sz; - } - else if (i % set_of_rdpqs == 0) { - ioc->reply_post[i].reply_post_free = - pci_pool_alloc(ioc->reply_post_free_dma_pool_mod_rdpq_set, - GFP_KERNEL, - &ioc->reply_post[i].reply_post_free_dma); - if (!ioc->reply_post[i].reply_post_free) { - printk(MPT3SAS_ERR_FMT "reply_post_free pool: " - "pci_pool_alloc failed\n", ioc->name); - reduce_sz = 64; - if((ioc->hba_queue_depth - reduce_sz) > - (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { - _base_release_memory_pools(ioc); - ioc->hba_queue_depth -= reduce_sz; - goto retry_allocation; - } - else { - rc = -ENOMEM; - goto out; - } - } - if (!is_MSB_are_same((long)ioc->reply_post[i].reply_post_free, mod_sz)) - { - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT - "bad reply post free pool (0x%p), reply_post_free_dma = (0x%llx)\n", ioc->name, - ioc->reply_post[i].reply_post_free, - (unsigned long long)ioc->reply_post[i].reply_post_free_dma)); - - pci_pool_free(ioc->reply_post_free_dma_pool_mod_rdpq_set, - ioc->reply_post[i].reply_post_free, - ioc->reply_post[i].reply_post_free_dma); - - ioc->reply_post[i].reply_post_free = NULL; - ioc->reply_post[i].reply_post_free_dma = 0; - - //Try Allocating from modified allignment pool - ioc->reply_post[i].reply_post_free = - pci_pool_alloc(ioc->reply_post_free_dma_pool_mod_rdpq_set_align, - GFP_KERNEL, - &ioc->reply_post[i].reply_post_free_dma); - if (!ioc->reply_post[i].reply_post_free) { - printk(MPT3SAS_ERR_FMT "reply_post_free pool: " - "pci_pool_alloc failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->reply_post[i].dma_pool = - ioc->reply_post_free_dma_pool_mod_rdpq_set_align; - } else - ioc->reply_post[i].dma_pool = - ioc->reply_post_free_dma_pool_mod_rdpq_set; - - memset(ioc->reply_post[i].reply_post_free, 0, mod_sz); - total_sz += mod_sz; - } - else { - ioc->reply_post[i].reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) - ((long)ioc->reply_post[i-1].reply_post_free + reply_post_free_sz); - ioc->reply_post[i].reply_post_free_dma = (dma_addr_t) - (ioc->reply_post[i-1].reply_post_free_dma + reply_post_free_sz); - } - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT - "reply post free pool (0x%p): depth(%d), " - "element_size(%d), pool_size(%d kB)\n", ioc->name, - ioc->reply_post[i].reply_post_free, - ioc->reply_post_queue_depth, set_of_rdpqs, reply_post_free_sz/1024)); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT - "reply_post_free_dma = (0x%llx)\n", ioc->name, - (unsigned long long)ioc->reply_post[i].reply_post_free_dma)); - } - - reply_post_free_array_sz = ioc->reply_queue_count * - sizeof(Mpi2IOCInitRDPQArrayEntry); - ioc->reply_post_free_array_dma_pool = - pci_pool_create("reply_post_free_array pool", - ioc->pdev, reply_post_free_array_sz, 16, 0); - if (!ioc->reply_post_free_array_dma_pool) { - printk(MPT3SAS_ERR_FMT "reply_post_free_array pool: " - "pci_pool_create failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->reply_post_free_array = - pci_pool_alloc(ioc->reply_post_free_array_dma_pool, - GFP_KERNEL, &ioc->reply_post_free_array_dma); - if (!ioc->reply_post_free_array) { - printk(MPT3SAS_ERR_FMT "reply_post_free_array pool: " - "pci_pool_alloc failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - } else { - ioc->reply_post = kzalloc(sizeof(struct reply_post_struct), - GFP_KERNEL); - /* reply post queue, 16 byte align */ - reply_post_free_sz = ioc->reply_post_queue_depth * - sizeof(Mpi2DefaultReplyDescriptor_t); - if (_base_is_controller_msix_enabled(ioc)) - sz = reply_post_free_sz * ioc->reply_queue_count; - else - sz = reply_post_free_sz; - ioc->reply_post_free_dma_pool = - pci_pool_create("reply_post_free pool", - ioc->pdev, sz, 16, 0); - if (!ioc->reply_post_free_dma_pool) { - printk(MPT3SAS_ERR_FMT "reply_post_free pool: " - "pci_pool_create failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - ioc->reply_post[0].reply_post_free = - pci_pool_alloc(ioc->reply_post_free_dma_pool, - GFP_KERNEL, &ioc->reply_post[0].reply_post_free_dma); - if (!ioc->reply_post[0].reply_post_free) { - printk(MPT3SAS_ERR_FMT "reply_post_free pool: " - "pci_pool_alloc failed\n", ioc->name); - rc = -ENOMEM; - goto out; - } - memset(ioc->reply_post[0].reply_post_free, 0, sz); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "reply post free pool" - "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->name, ioc->reply_post[0].reply_post_free, - ioc->reply_post_queue_depth, set_of_rdpqs, sz/1024)); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT - "reply_post_free_dma = (0x%llx)\n", ioc->name, - (unsigned long long)ioc->reply_post[0].reply_post_free_dma)); - total_sz += sz; } - + total_sz += rdpq_sz; ioc->config_page_sz = 512; - ioc->config_page = pci_alloc_consistent(ioc->pdev, - ioc->config_page_sz, &ioc->config_page_dma); + ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, + ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL); if (!ioc->config_page) { - printk(MPT3SAS_ERR_FMT "config page: pci_pool_alloc " + printk(MPT3SAS_ERR_FMT "config page: dma_pool_alloc " "failed\n", ioc->name); rc = -ENOMEM; goto out; } - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "config page(0x%p): size" - "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz)); - dinitprintk(ioc, printk(MPT3SAS_INFO_FMT "config_page_dma" - "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma)); + printk(MPT3SAS_INFO_FMT "config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->name, ioc->config_page, + (unsigned long long)ioc->config_page_dma, ioc->config_page_sz); total_sz += ioc->config_page_sz; printk(MPT3SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n", @@ -7760,8 +8412,21 @@ retry: printk(MPT3SAS_INFO_FMT "Current Controller Queue Depth(%d), " "Max Controller Queue Depth(%d)\n", ioc->name, ioc->shost->can_queue, facts->RequestCredit); - printk(MPT3SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n", - ioc->name, ioc->shost->sg_tablesize); + + return 0; + +try_32bit_dma: + _base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + /* Change dma coherent mask to 32 bit and reallocate */ + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (_base_reduce_hba_queue_depth(ioc) !=0) + return -ENOMEM; + goto retry_allocation; out: return rc; @@ -7781,8 +8446,7 @@ _base_flush_ios_and_panic(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) mpt3sas_base_stop_watchdog(ioc); mpt3sas_base_stop_hba_unplug_watchdog(ioc); mpt3sas_scsih_flush_running_cmds(ioc); - mpt3sas_base_fault_info(ioc, fault_code); - panic("TEMPERATURE FAULT: STOPPING; panic in %s\n", __func__); + mpt3sas_print_fault_code(ioc, fault_code); } /** @@ -7804,8 +8468,10 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) (sc != MPI2_IOC_STATE_MASK)) { if ((sc == MPI2_IOC_STATE_FAULT) && ((s & MPI2_DOORBELL_DATA_MASK) == - IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED)) + IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED)) { _base_flush_ios_and_panic(ioc, s & MPI2_DOORBELL_DATA_MASK); + panic("TEMPERATURE FAULT: STOPPING; panic in %s\n", __func__); + } } return cooked ? sc : s; } @@ -7826,6 +8492,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) { u32 ioc_state; int r = 0; + unsigned long flags; if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { printk(MPT3SAS_ERR_FMT "%s: unknown reset_type\n", @@ -7841,10 +8508,21 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, &ioc->chip->Doorbell); - if ((_base_wait_for_doorbell_ack(ioc, 15))) { + if ((_base_wait_for_doorbell_ack(ioc, 15))) + r = -EFAULT; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP + && (ioc->is_driver_loading == 1 || ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_base_coredump_info(ioc, ioc_state); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); r = -EFAULT; goto out; } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (r != 0) // doorbell did not ACK + goto out; ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); if (ioc_state) { @@ -7959,10 +8637,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, ioc->ioc_link_reset_in_progress) ioc->ioc_link_reset_in_progress = 0; if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(Mpi2SasIoUnitControlRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset); goto issue_host_reset; } if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) @@ -8041,10 +8718,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, wait_for_completion_timeout(&ioc->base_cmds.done, msecs_to_jiffies(10000)); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(Mpi2SepRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(Mpi2SepRequest_t)/4, issue_reset); goto issue_host_reset; } if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) @@ -8171,6 +8847,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); } + /* CoreDump. Set the flag to enable CoreDump in the FW */ + mpi_request.ConfigurationFlags |= MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE; + /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. */ @@ -8188,15 +8867,15 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) int i; mfp = (__le32 *)&mpi_request; - printk(KERN_INFO "\toffset:data\n"); + printk(MPT3SAS_INFO_FMT "\toffset:data\n", ioc->name); for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) - printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, - le32_to_cpu(mfp[i])); + printk(MPT3SAS_INFO_FMT "\t[0x%02x]:%08x\n", + ioc->name, i*4, le32_to_cpu(mfp[i])); } r = _base_handshake_req_reply_wait(ioc, sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, - sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); + sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); if (r != 0) { printk(MPT3SAS_ERR_FMT "%s: handshake failed (r=%d)\n", @@ -8210,6 +8889,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) printk(MPT3SAS_ERR_FMT "%s: failed\n", ioc->name, __func__); r = -EIO; } + + /* Reset TimeSync Counter*/ + ioc->timestamp_update_count = 0; return r; } @@ -8587,16 +9269,25 @@ mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) return 0; if (ioc_state & MPI2_DOORBELL_USED) { - dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "unexpected doorbell " - "active!\n", ioc->name)); + printk(MPT3SAS_INFO_FMT "unexpected doorbell active!\n", + ioc->name); goto issue_diag_reset; } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); goto issue_diag_reset; } + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { + mpt3sas_base_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + } + + goto issue_diag_reset; + } if (type == FORCE_BIG_HAMMER) goto issue_diag_reset; @@ -8773,7 +9464,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) skip_init_reply_post_host_index: mpt3sas_base_start_hba_unplug_watchdog(ioc); - _base_unmask_interrupts(ioc); + mpt3sas_base_unmask_interrupts(ioc); if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { r = _base_display_fwpkg_version(ioc); if (r) @@ -8868,8 +9559,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); ioc->reply_queue_count = 1; if (!ioc->cpu_msix_table) { - dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "allocation for " - "cpu_msix_table failed!!!\n", ioc->name)); + printk(MPT3SAS_ERR_FMT + "allocation for cpu_msix_table failed!!!\n", ioc->name); r = -ENOMEM; goto out_free_resources; } @@ -8883,14 +9574,16 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) sizeof(u64 *), GFP_KERNEL); #endif if (!ioc->reply_post_host_index) { - dfailprintk(ioc, printk(MPT3SAS_INFO_FMT "allocation " - "for reply_post_host_index failed!!!\n", - ioc->name)); + printk(MPT3SAS_INFO_FMT + "allocation for reply_post_host_index failed!!!\n", + ioc->name); r = -ENOMEM; goto out_free_resources; } } ioc->rdpq_array_enable_assigned = 0; + ioc->use_32bit_dma = 0; + ioc->dma_mask = 64; if (ioc->is_aero_ioc) ioc->base_readl = &_base_readl_aero; @@ -8999,6 +9692,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, sizeof(struct mpt3sas_port_facts), GFP_KERNEL); if (!ioc->pfacts) { + printk(MPT3SAS_ERR_FMT + "allocation for port facts failed!!!\n", ioc->name); r = -ENOMEM; goto out_free_resources; } @@ -9157,6 +9852,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) sizeof(struct mpt3sas_facts)); ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; ioc->got_task_abort_from_ioctl = 0; ioc->got_task_abort_from_sysfs = 0; return 0; @@ -9519,8 +10215,12 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, enum reset_type typ (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_RELEASED))) { is_trigger = 1; - if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { is_fault = 1; + ioc->htb_rel.trigger_info_dwords[1] = + (ioc_state & MPI2_DOORBELL_DATA_MASK); + } } _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); mpt3sas_wait_for_commands_to_complete(ioc); @@ -9561,9 +10261,8 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, enum reset_type typ _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); out: - dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: %s\n", - ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); - + printk(MPT3SAS_INFO_FMT "%s: %s\n", + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")); spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); ioc->ioc_reset_status = r; ioc->shost_recovery = 0; @@ -9609,4 +10308,3 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, enum reset_type typ #if defined(TARGET_MODE) EXPORT_SYMBOL(mpt3sas_base_hard_reset_handler); #endif - diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index d040e467171d..932bc919f823 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -73,13 +73,14 @@ #include "mpt3sas_compatibility.h" #include "mpt3sas_debug.h" #include "mpt3sas_trigger_diag.h" +#include "mpt3sas_trigger_pages.h" /* mpt3sas driver versioning info */ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Broadcom Inc. <MPT-FusionLinux.pdl@broadcom.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 & SAS 3.5 Device Driver" -#define MPT3SAS_DRIVER_VERSION "32.00.00.00" -#define MPT3SAS_MAJOR_VERSION 32 +#define MPT3SAS_DRIVER_VERSION "37.00.00.00" +#define MPT3SAS_MAJOR_VERSION 37 #define MPT3SAS_MINOR_VERSION 00 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 0 @@ -93,6 +94,17 @@ #define MPT2SAS_BUILD_VERSION 3 #define MPT2SAS_RELEASE_VERSION 0 +/* CoreDump: Default timeout */ +#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /* 15 seconds */ +#define MPT3SAS_TIMESYNC_TIMEOUT_SECONDS (10) /* 10 seconds */ +#define MPT3SAS_TIMESYNC_UPDATE_INTERVAL (900) /* 15 minutes */ +#define MPT3SAS_TIMESYNC_UNIT_MASK (0x80) /* bit 7 */ +#define MPT3SAS_TIMESYNC_MASK (0x7F) /* 0 - 6 bits */ +#define SECONDS_PER_MIN (60) +#define SECONDS_PER_HOUR (3600) +#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF) +#define MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP (0x81) + /* * Set MPT3SAS_SG_DEPTH value based on user input. */ @@ -478,6 +490,7 @@ struct read_cap_parameter { #define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8 #define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16 #define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128 +#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16 /* Get status code type(bits 27-25) with status code(bits 24-17) * value from status field. @@ -659,7 +672,10 @@ struct Mpi2ManufacturingPage11_t { u8 HostTraceBufferFlags; /* 4Fh */ u16 HostTraceBufferMaxSizeKB; /* 50h */ u16 HostTraceBufferMinSizeKB; /* 52h */ - __le32 Reserved10[2]; /* 54h - 5Bh */ + u8 CoreDumpTOSec; /* 54h */ + u8 TimeSyncInterval; /* 55h */ + u16 Reserved9; /* 56h */ + __le32 Reserved10; /* 58h */ }; /** @@ -940,6 +956,7 @@ struct _pcie_device { u8 *serial_number; u8 reset_timeout; u8 access_status; + u16 shutdown_latency; struct kref refcount; }; @@ -1076,6 +1093,7 @@ struct _sas_port { * @handle: device handle for this phy * @attached_handle: device handle for attached device * @phy_belongs_to_port: port has been created for this phy + * @hba_vphy: flag to identify HBA vSES device phy * @port: hba port entry */ struct _sas_phy { @@ -1087,6 +1105,7 @@ struct _sas_phy { u16 handle; u16 attached_handle; u8 phy_belongs_to_port; + u8 hba_vphy; struct hba_port *port; }; @@ -1104,6 +1123,7 @@ struct _sas_phy { * @port: hba port entry * @phy: a list of phys that make up this sas_host/expander * @sas_port_list: list of ports attached to this sas_host/expander + * @rphy: sas_rphy object of this expander */ struct _sas_node { struct list_head list; @@ -1118,6 +1138,7 @@ struct _sas_node { struct hba_port *port; struct _sas_phy *phy; struct list_head sas_port_list; + struct sas_rphy *rphy; }; @@ -1353,25 +1374,87 @@ struct mpt3sas_port_facts { struct reply_post_struct { Mpi2ReplyDescriptorsUnion_t *reply_post_free; dma_addr_t reply_post_free_dma; - struct dma_pool *dma_pool; - }; +/** + * struct virtual_phy - vSES phy structure + * sas_address: SAS Address of vSES device + * phy_mask: vSES device's phy number + * flags: flags used to manage this structure + */ +struct virtual_phy { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 flags; +}; + +#define MPT_VPHY_FLAG_DIRTY_PHY 0x01 + /** * struct hba_port - Saves each HBA's Wide/Narrow port info * @port_id: port number * @sas_address: sas address of this wide/narrow port's attached device * @phy_mask: HBA PHY's belonging to this port * @flags: hba port flags + * @vphys_mask : mask of vSES devices Phy number + * @vphys_list : list containing vSES device structures */ struct hba_port { struct list_head list; - u8 port_id; u64 sas_address; u32 phy_mask; + u8 port_id; u8 flags; + u32 vphys_mask; + struct list_head vphys_list; }; +/** + * struct htb_rel_query - diagnostic buffer release reason + * @unique_id - unique id associated with this buffer. + * @buffer_rel_condition - Release condition ioctl/sysfs/reset + * @reserved + * @trigger_type - Master/Event/scsi/MPI + * @trigger_info_dwords - Data Correspondig to trigger type + */ +struct htb_rel_query +{ + u16 buffer_rel_condition; + u16 reserved; + u32 trigger_type; + u32 trigger_info_dwords[2]; +}; + +/* Buffer_rel_condition bit fields */ + +/* Bit 0 - Diag Buffer not Released */ +#define MPT3_DIAG_BUFFER_NOT_RELEASED (0x00) +/* Bit 0 - Diag Buffer Released */ +#define MPT3_DIAG_BUFFER_RELEASED (0x01) + +/* + * Bit 1 - Diag Buffer Released by IOCTL, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_IOCTL (0x02 | MPT3_DIAG_BUFFER_RELEASED) + +/* + * Bit 2 - Diag Buffer Released by Trigger, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_TRIGGER (0x04 | MPT3_DIAG_BUFFER_RELEASED) + +/* + * Bit 3 - Diag Buffer Released by SysFs, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_SYSFS (0x08 | MPT3_DIAG_BUFFER_RELEASED) + +/* DIAG RESET Master trigger flags */ +#define MPT_DIAG_RESET_ISSUED_BY_DRIVER 0x00000000 +#define MPT_DIAG_RESET_ISSUED_BY_USER 0x00000001 + /* hba port flags */ #define HBA_PORT_FLAG_DIRTY_PORT 0x01 #define HBA_PORT_FLAG_NEW_PORT 0x02 @@ -1405,6 +1488,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @firmware_event_thread: "" * @fw_event_lock: * @fw_event_list: list of fw events + * @current_evet: current processing firmware event + * @fw_event_cleanup: set to one while cleaning up the fw events * @aen_event_read_flag: event log was read * @broadcast_aen_busy: broadcast aen waiting to be serviced * @shost_recovery: host reset in progress @@ -1428,10 +1513,13 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @total_io_cnt: Gives total IO count, used to load balance the interrupts * @high_iops_outstanding: used to load balance the interrupts within high iops reply queues * @high_iops_queues: high iops reply queues count + * @drv_internal_flags: Bit map internal to driver * @drv_support_bitmap: driver's supported feature bit map * @msix_load_balance: Enables load balancing of interrupts across the multiple MSIXs * @thresh_hold: Max number of reply descriptors processed before updating Host Index * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands + * @timestamp_update_count: Counter to fire timeSync command + * time_sync_interval: Time sync interval read from man page 11 * @scsi_io_cb_idx: shost generated commands * @tm_cb_idx: task management commands * @scsih_cb_idx: scsih internal commands @@ -1617,6 +1705,8 @@ struct MPT3SAS_ADAPTER { struct workqueue_struct *firmware_event_thread; spinlock_t fw_event_lock; struct list_head fw_event_list; + struct fw_event_work *current_event; + u8 fw_events_cleanup; /* misc flags */ int aen_event_read_flag; @@ -1654,13 +1744,20 @@ struct MPT3SAS_ADAPTER { u32 ioc_reset_count; MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; u32 non_operational_loop; + u8 ioc_coredump_loop; + u32 timestamp_update_count; + u32 time_sync_interval; u8 multipath_on_hba; atomic64_t total_io_cnt; atomic64_t high_iops_outstanding; bool msix_load_balance; u16 thresh_hold; u8 high_iops_queues; + u32 drv_internal_flags; u32 drv_support_bitmap; + u32 dma_mask; + bool enable_sdev_max_qd; + bool use_32bit_dma; /* internal commands, callback index */ u8 scsi_io_cb_idx; @@ -1713,6 +1810,7 @@ struct MPT3SAS_ADAPTER { u8 disable_eedp_support; u8 tm_custom_handling; u8 nvme_abort_timeout; + u16 max_shutdown_latency; /* static config pages */ struct mpt3sas_facts facts; @@ -1846,9 +1944,6 @@ struct MPT3SAS_ADAPTER { u16 reply_post_queue_depth; struct reply_post_struct *reply_post; struct dma_pool *reply_post_free_dma_pool; - struct dma_pool *reply_post_free_dma_pool_align; - struct dma_pool *reply_post_free_dma_pool_mod_rdpq_set; - struct dma_pool *reply_post_free_dma_pool_mod_rdpq_set_align; struct dma_pool *reply_post_free_array_dma_pool; Mpi2IOCInitRDPQArrayEntry *reply_post_free_array; dma_addr_t reply_post_free_array_dma; @@ -1884,6 +1979,8 @@ struct MPT3SAS_ADAPTER { u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; u32 ring_buffer_offset; u32 ring_buffer_sz; + struct htb_rel_query htb_rel; + u8 reset_from_user; #if defined(TARGET_MODE) char stm_name[MPT_NAME_LENGTH]; u8 stm_io_cb_idx; /* normal io */ @@ -1922,6 +2019,7 @@ struct MPT3SAS_ADAPTER { struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; + u8 supports_trigger_pages; void *device_remove_in_progress; u16 device_remove_in_progress_sz; u8 *tm_tr_retry; @@ -1929,10 +2027,22 @@ struct MPT3SAS_ADAPTER { u8 temp_sensors_count; u8 is_gen35_ioc; u8 is_aero_ioc; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; + struct dentry *ioc_dump; +#endif struct list_head port_table_list; }; -#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001 +struct mpt3sas_debugfs_buffer { + void *buf; + u32 len; +}; + +#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001 +#define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002 + +#define MPT_DRV_INTERNAL_BITMAP_BLK_MQ 0x00000001 typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); @@ -1983,7 +2093,7 @@ __le64 mpt3sas_base_get_sense_buffer_dma_64(struct MPT3SAS_ADAPTER *ioc, u16 smid); void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid); dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid); -void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll); /* hi-priority queue */ u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); @@ -2007,6 +2117,13 @@ u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); int _base_check_and_get_msix_vectors(struct pci_dev *pdev); void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +#define mpt3sas_print_fault_code(ioc, fault_code) \ + printk(MPT3SAS_ERR_FMT "fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_fault_info (ioc, fault_code) + +void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller); int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, Mpi2SasIoUnitControlReply_t *mpi_reply, Mpi2SasIoUnitControlRequest_t *mpi_request); @@ -2038,7 +2155,9 @@ void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, u8 status, void *mpi_request, int sz); - +#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ + printk(MPT3SAS_ERR_FMT "In func: %s\n", ioc->name, __func__); \ + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, status, mpi_request, sz) int mpt3sas_wait_for_ioc_to_operational(struct MPT3SAS_ADAPTER *ioc, int wait_count); @@ -2046,6 +2165,7 @@ void mpt3sas_base_start_hba_unplug_watchdog(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_base_stop_hba_unplug_watchdog(struct MPT3SAS_ADAPTER *ioc); int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc); /* scsih shared API */ extern char driver_name[MPT_NAME_LENGTH]; @@ -2070,7 +2190,9 @@ void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid); struct hba_port * -mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port); +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port, u8 skip_dirty_flag); +struct virtual_phy * +mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, struct hba_port *port, u32 phy); struct _sas_node *mpt3sas_scsih_expander_find_by_handle( struct MPT3SAS_ADAPTER *ioc, u16 handle); @@ -2098,6 +2220,8 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc); u32 base_mod64(u64 dividend, u32 divisor); +void +mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth); /** * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device @@ -2214,6 +2338,33 @@ int mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); +int +mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page); +int +mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set); /* ctl shared API */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) @@ -2311,6 +2462,11 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, void _scsih_hide_unhide_sas_devices(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_init_debugfs(void); +void mpt3sas_exit_debugfs(void); + #ifdef MPT2SAS_WD_DDIOCOUNT #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) ssize_t diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 57752c27d24e..aea470415cab 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -103,9 +103,6 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, Mpi2ConfigRequest_t *mpi_request; char *desc = NULL; - if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) - return; - mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { case MPI2_CONFIG_PAGETYPE_IO_UNIT: @@ -273,7 +270,8 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, mpi_reply->MsgLength*4); } ioc->config_cmds.status &= ~MPT3_CMD_PENDING; - _config_display_some_debug(ioc, smid, "config_done", mpi_reply); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); ioc->config_cmds.smid = USHORT_MAX; complete(&ioc->config_cmds.done); return 1; @@ -309,6 +307,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t u8 retry_count, issue_host_reset = 0; struct config_request mem; u32 ioc_status = UINT_MAX; + u8 issue_reset; mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { @@ -381,19 +380,24 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t r = 0; memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t)); + memset(ioc->config_cmds.reply, 0, sizeof(Mpi2ConfigReply_t)); ioc->config_cmds.status = MPT3_CMD_PENDING; config_request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->config_cmds.smid = smid; memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); - _config_display_some_debug(ioc, smid, "config_request", NULL); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_request", NULL); init_completion(&ioc->config_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { - mpt3sas_base_check_cmd_timeout(ioc, + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, smid, + "config_request no reply", NULL); + mpt3sas_check_cmd_timeout(ioc, ioc->config_cmds.status, mpi_request, - sizeof(Mpi2ConfigRequest_t)/4); + sizeof(Mpi2ConfigRequest_t)/4, issue_reset); retry_count++; if (ioc->config_cmds.smid == smid) mpt3sas_base_free_smid(ioc, smid); @@ -413,8 +417,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t /* Reply Frame Sanity Checks to workaround FW issues */ if ((mpi_request->Header.PageType & 0xF) != (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, smid, + "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" " mpi_reply mismatch: Requested PageType(0x%02x)" " Reply PageType(0x%02x)\n", @@ -426,8 +433,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (((mpi_request->Header.PageType & 0xF) == MPI2_CONFIG_PAGETYPE_EXTENDED) && mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, smid, + "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" " mpi_reply mismatch: Requested ExtPageType(0x%02x)" " Reply ExtPageType(0x%02x)\n", @@ -451,8 +461,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (p) { if ((mpi_request->Header.PageType & 0xF) != (p[3] & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, smid, + "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" @@ -467,8 +480,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (((mpi_request->Header.PageType & 0xF) == MPI2_CONFIG_PAGETYPE_EXTENDED) && (mpi_request->ExtPageType != p[6])) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, smid, + "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); panic(MPT3SAS_WARN_FMT "%s: Firmware BUG:" @@ -1753,6 +1769,766 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t return r; } +/** + * mpt3sas_config_get_driver_trigger_pg0 - obtain driver trigger page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_driver_trigger_pg0 - write driver trigger page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg0 - update driver trigger page 0 + * @ioc: per adapter object + * @trigger_flags: trigger type bit map + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + u16 trigger_flag, bool set) +{ + Mpi26DriverTriggerPage0_t tg_pg0; + Mpi2ConfigReply_t mpi_reply; + int rc; + u16 flags, ioc_status; + + rc = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0); + if (rc) + return rc; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg0, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return -EFAULT; + } + + if (set) + flags = le16_to_cpu(tg_pg0.TriggerFlags) | trigger_flag; + else + flags = le16_to_cpu(tg_pg0.TriggerFlags) & ~trigger_flag; + + tg_pg0.TriggerFlags = cpu_to_le16(flags); + + rc = _config_set_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0); + if (rc) + return rc; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to update trigger pg0, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return -EFAULT; + } + + return 0; +} + +/** + * mpt3sas_config_get_driver_trigger_pg1 - obtain driver trigger page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_driver_trigger_pg1 - write driver trigger page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg1 - update driver trigger page 1 + * @ioc: per adapter object + * @master_tg: Master trigger bit map + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set) +{ + Mpi26DriverTriggerPage1_t tg_pg1; + Mpi2ConfigReply_t mpi_reply; + int rc; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + tg_pg1.NumMasterTrigger = cpu_to_le16(1); + tg_pg1.MasterTriggers[0].MasterTriggerFlags = cpu_to_le32( + master_tg->MasterData); + } else { + tg_pg1.NumMasterTrigger = 0; + tg_pg1.MasterTriggers[0].MasterTriggerFlags = 0; + } + + rc = _config_set_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg2 - obtain driver trigger page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_driver_trigger_pg2 - write driver trigger page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg2 - update driver trigger page 2 + * @ioc: per adapter object + * @event_tg: list of Event Triggers + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set) +{ + Mpi26DriverTriggerPage2_t tg_pg2; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + count = event_tg->ValidEntries; + tg_pg2.NumMPIEventTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg2.MPIEventTriggers[i].MPIEventCode = + cpu_to_le16( + event_tg->EventTriggerEntry[i].EventValue); + tg_pg2.MPIEventTriggers[i].MPIEventCodeSpecific = + cpu_to_le16( + event_tg->EventTriggerEntry[i].LogEntryQualifier); + } + } else { + tg_pg2.NumMPIEventTrigger = 0; + memset(&tg_pg2.MPIEventTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg3 - obtain driver trigger page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_driver_trigger_pg3 - write driver trigger page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg3 - update driver trigger page 3 + * @ioc: per adapter object + * @scsi_tg: scsi trigger list + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set) +{ + Mpi26DriverTriggerPage3_t tg_pg3; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return -EFAULT; + } + + if (set) { + count = scsi_tg->ValidEntries; + tg_pg3.NumSCSISenseTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg3.SCSISenseTriggers[i].ASCQ = + scsi_tg->SCSITriggerEntry[i].ASCQ; + tg_pg3.SCSISenseTriggers[i].ASC = + scsi_tg->SCSITriggerEntry[i].ASC; + tg_pg3.SCSISenseTriggers[i].SenseKey = + scsi_tg->SCSITriggerEntry[i].SenseKey; + } + } else { + tg_pg3.NumSCSISenseTrigger = 0; + memset(&tg_pg3.SCSISenseTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + return -EFAULT; + } + + return 0; +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg4 - obtain driver trigger page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_driver_trigger_pg4 - write driver trigger page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg4 - update driver trigger page 4 + * @ioc: per adapter object + * @mpi_tg: mpi trigger list + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set) +{ + Mpi26DriverTriggerPage4_t tg_pg4; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + count = mpi_tg->ValidEntries; + tg_pg4.NumIOCStatusLogInfoTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg4.IOCStatusLoginfoTriggers[i].IOCStatus = + cpu_to_le16(mpi_tg->MPITriggerEntry[i].IOCStatus); + tg_pg4.IOCStatusLoginfoTriggers[i].LogInfo = + cpu_to_le32(mpi_tg->MPITriggerEntry[i].IocLogInfo); + } + } else { + tg_pg4.NumIOCStatusLogInfoTrigger = 0; + memset(&tg_pg4.IOCStatusLoginfoTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + pr_err(MPT3SAS_FMT + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + ioc->name, __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, !set); + + return rc; +} + /** * mpt3sas_config_get_volume_handle - returns volume handle for give hidden * raid components diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 0475b2571b66..a74c63551ef7 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -218,6 +218,12 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, case MPI2_FUNCTION_SMP_PASSTHROUGH: desc = "smp_passthrough"; break; + case MPI2_FUNCTION_TOOLBOX: + desc = "toolbox"; + break; + case MPI2_FUNCTION_NVME_ENCAPSULATED: + desc = "nvme_encapsulated"; + break; } if (!desc) @@ -535,6 +541,8 @@ mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) /* add a log message to indicate the release */ printk(MPT3SAS_INFO_FMT "%s: Releasing the trace buffer " "due to adapter reset.", ioc->name, __func__); + ioc->htb_rel.buffer_rel_condition = + MPT3_DIAG_BUFFER_REL_TRIGGER; mpt3sas_send_diag_release(ioc, i, &issue_reset); } break; @@ -960,7 +968,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, u8 *data; /* ioc determines which port to use */ - smp_request->PhysicalPort = 0xFF; + if (!ioc->multipath_on_hba) + smp_request->PhysicalPort = 0xFF; if (smp_request->PassthroughFlags & MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) data = (u8 *)&smp_request->SGL; @@ -1063,6 +1072,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } /* drop to default case for posting the request */ } + /* fall through */ default: ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); @@ -1088,10 +1098,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, ioc->ignore_loginfos = 0; } if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - karg.data_sge_offset); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); goto issue_host_reset; } @@ -1174,13 +1183,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, - 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid, pcie_device->reset_timeout, MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); else mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, - 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, + 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); } else mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); @@ -1398,11 +1407,12 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); - + + ioc->reset_from_user = 1; scsi_block_requests(ioc->shost); retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); scsi_unblock_requests(ioc->shost); - printk(MPT3SAS_INFO_FMT "host reset: %s\n", + printk(MPT3SAS_INFO_FMT "ioctl: host reset: %s\n", ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); return 0; } @@ -1785,6 +1795,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, request_data = ioc->diag_buffer[buffer_type]; request_data_sz = diag_register->requested_buffer_size; ioc->unique_id[buffer_type] = diag_register->unique_id; + /* Reset ioc variables used for additional query commands */ + ioc->reset_from_user = 0; + memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query)); ioc->diag_buffer_status[buffer_type] &= MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; memcpy(ioc->product_specific[buffer_type], diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); @@ -1841,10 +1854,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_diag_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_diag_cmds.status, mpi_request, - sizeof(Mpi2DiagBufferPostRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_diag_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); goto issue_host_reset; } @@ -1912,8 +1924,6 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) printk(MPT3SAS_INFO_FMT "registering trace buffer support\n", ioc->name); - ioc->diag_trigger_master.MasterData = - (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; diag_register.unique_id = (ioc->hba_mpi_version_belonged == MPI2_VERSION)? @@ -2230,6 +2240,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, u16 ioc_status; u32 ioc_state; int rc; + u8 reset_needed = 0; dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -2281,9 +2292,10 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_diag_cmds.status & MPT3_CMD_COMPLETE)) { - *issue_reset = mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_diag_cmds.status, mpi_request, - sizeof(Mpi2DiagReleaseRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_diag_cmds.status, mpi_request, + sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); + *issue_reset = reset_needed; ioc->diag_buffer_status[buffer_type] |= MPT3_DIAG_BUFFER_IS_RELEASED; rc = -EFAULT; @@ -2399,8 +2411,9 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) return 0; } + ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_IOCTL; rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); - + if (issue_reset) mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); @@ -2555,10 +2568,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_diag_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_diag_cmds.status, mpi_request, - sizeof(Mpi2DiagBufferPostRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_diag_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); goto issue_host_reset; } @@ -2598,6 +2610,62 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) } +/** + * _ctl_addnl_diag_query - query relevant info associated with diag buffers + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + * + * The application will send only unique_id. Driver will + * inspect unique_id first, if valid, fill the details related to cause + * for diag buffer release. + */ +static long +_ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_addnl_diag_query karg; + u32 buffer_type = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "%s\n", ioc->name, + __func__)); + if (karg.unique_id == 0) { + pr_err("%s: %s: unique_id is(0x%08x) \n", + ioc->name, __func__, karg.unique_id); + return -EPERM; + } + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + pr_err("%s: %s: buffer with unique_id(0x%08x) not found\n", + ioc->name, __func__, karg.unique_id); + return -EPERM; + } + memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query)); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_info("%s: %s: buffer_type(0x%02x) is not registered\n", + ioc->name, __func__, buffer_type); + goto out; + } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + pr_err("%s: %s: buffer_type(0x%02x) is not released\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + memcpy(&karg.buffer_rel_condition, &ioc->htb_rel, + sizeof(struct htb_rel_query)); +out: + if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) { + pr_err("%s: %s: unable to write mpt3_addnl_diag_query data @ %p\n", + ioc->name, __func__, arg); + return -EFAULT; + } + return 0; +} #ifdef CONFIG_COMPAT /** @@ -2661,7 +2729,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, struct MPT3SAS_ADAPTER *ioc; struct mpt3_ioctl_header ioctl_header; enum block_state state; - long ret = -EINVAL; + long ret = -ENOIOCTLCMD; /* get IOCTL header */ if (copy_from_user(&ioctl_header, (char __user *)arg, @@ -2777,6 +2845,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) ret = _ctl_diag_read_buffer(ioc, arg); break; + case MPT3ADDNLDIAGQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query)) + ret = _ctl_addnl_diag_query(ioc, arg); + break; default: dctlprintk(ioc, printk(MPT3SAS_INFO_FMT "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); @@ -3591,7 +3663,9 @@ mpt3sas_ctl_tm_sysfs(struct MPT3SAS_ADAPTER *ioc, u8 task_type) tm_count++; doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & - MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP ) goto fault_in_progress; ioc->put_smid_hi_priority(ioc, hpr_smid, 0); } @@ -3643,7 +3717,9 @@ mpt3sas_ctl_tm_sysfs(struct MPT3SAS_ADAPTER *ioc, u8 task_type) tm_count++; doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & - MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); goto fault_in_progress; @@ -3699,7 +3775,9 @@ mpt3sas_ctl_tm_sysfs(struct MPT3SAS_ADAPTER *ioc, u8 task_type) tm_count++; doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & - MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); goto fault_in_progress; @@ -3750,7 +3828,9 @@ mpt3sas_ctl_tm_sysfs(struct MPT3SAS_ADAPTER *ioc, u8 task_type) tm_count++; doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & - MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { spin_unlock_irqrestore(&ioc->raid_device_lock, flags); goto fault_in_progress; @@ -3821,7 +3901,9 @@ mpt3sas_ctl_tm_sysfs(struct MPT3SAS_ADAPTER *ioc, u8 task_type) tm_count++; doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & - MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (doorbell & + MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { scsi_device_put(sdev); goto fault_in_progress; } @@ -3878,8 +3960,10 @@ _ctl_task_management_store(struct class_device *cdev, const char *buf, switch (opcode) { case 1: + ioc->reset_from_user = 1; scsi_block_requests(ioc->shost); - printk(MPT3SAS_INFO_FMT "diag reset: %s\n", ioc->name, + printk(MPT3SAS_INFO_FMT + "sysfs: diag reset issued: %s\n", ioc->name, ((!mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER)) ? "SUCCESS" : "FAILED")); scsi_unblock_requests(ioc->shost); @@ -3887,8 +3971,10 @@ _ctl_task_management_store(struct class_device *cdev, const char *buf, break; case 2: + ioc->reset_from_user = 1; scsi_block_requests(ioc->shost); - printk(MPT3SAS_INFO_FMT "message unit reset: %s\n", ioc->name, + printk(MPT3SAS_INFO_FMT + "sysfs: message unit reset issued: %s\n", ioc->name, ((!mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET)) ? "SUCCESS" : "FAILED")); scsi_unblock_requests(ioc->shost); @@ -3896,7 +3982,8 @@ _ctl_task_management_store(struct class_device *cdev, const char *buf, break; case 3: - printk(MPT3SAS_INFO_FMT "TASKTYPE_ABORT_TASK:\n", ioc->name); + printk(MPT3SAS_INFO_FMT + "sysfs: TASKTYPE_ABORT_TASK :\n", ioc->name); ioc->got_task_abort_from_sysfs = 1; mpt3sas_ctl_tm_sysfs(ioc, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK); @@ -3904,20 +3991,22 @@ _ctl_task_management_store(struct class_device *cdev, const char *buf, break; case 4: - printk(MPT3SAS_INFO_FMT "TASKTYPE_TARGET_RESET:\n", ioc->name); + printk(MPT3SAS_INFO_FMT + "sysfs: TASKTYPE_TARGET_RESET:\n", ioc->name); mpt3sas_ctl_tm_sysfs(ioc, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); break; case 5: - printk(MPT3SAS_INFO_FMT "TASKTYPE_LOGICAL_UNIT_RESET:\n", - ioc->name); + printk(MPT3SAS_INFO_FMT + "sysfs: TASKTYPE_LOGICAL_UNIT_RESET:\n", ioc->name); mpt3sas_ctl_tm_sysfs(ioc, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); break; case 6: - printk(MPT3SAS_INFO_FMT "TASKTYPE_ABRT_TASK_SET\n", ioc->name); + printk(MPT3SAS_INFO_FMT "sysfs: TASKTYPE_ABRT_TASK_SET\n", + ioc->name); mpt3sas_ctl_tm_sysfs(ioc, MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET); break; @@ -4306,6 +4395,7 @@ _ctl_host_trace_buffer_enable_store(struct class_device *cdev, const char *buf, goto out; printk(MPT3SAS_INFO_FMT "releasing host trace buffer\n", ioc->name); + ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS; mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset); } @@ -4372,17 +4462,38 @@ _ctl_diag_trigger_master_store(struct class_device *cdev, const char *buf, { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + struct SL_WH_MASTER_TRIGGER_T *master_tg; unsigned long flags; ssize_t rc; + bool set = 1; + + rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); + + if (ioc->supports_trigger_pages) { + master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T), + GFP_KERNEL); + if (!master_tg) + return -ENOMEM; + + memcpy(master_tg, buf, rc); + if (!master_tg->MasterData) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg, + set)) { + kfree(master_tg); + return -EFAULT; + } + + } spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); memset(&ioc->diag_trigger_master, 0, sizeof(struct SL_WH_MASTER_TRIGGER_T)); memcpy(&ioc->diag_trigger_master, buf, rc); ioc->diag_trigger_master.MasterData |= (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) @@ -4440,17 +4551,37 @@ _ctl_diag_trigger_event_store(struct class_device *cdev, const char *buf, { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + struct SL_WH_EVENT_TRIGGERS_T *event_tg; unsigned long flags; ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T), + GFP_KERNEL); + if (!event_tg) + return -ENOMEM; + + memcpy(event_tg, buf, sz); + if (!event_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg, + set)) { + kfree(event_tg); + return -EFAULT; + } + } spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); + memset(&ioc->diag_trigger_event, 0, sizeof(struct SL_WH_EVENT_TRIGGERS_T)); memcpy(&ioc->diag_trigger_event, buf, sz); if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) @@ -4508,16 +4639,36 @@ _ctl_diag_trigger_scsi_store(struct class_device *cdev, const char *buf, { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg; unsigned long flags; ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T), + GFP_KERNEL); + if (!scsi_tg) + return -ENOMEM; + + memcpy(scsi_tg, buf, sz); + if (!scsi_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg, + set)) { + kfree(scsi_tg); + return -EFAULT; + } + } spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - sz = min(sizeof(ioc->diag_trigger_scsi), count); + memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi)); memcpy(&ioc->diag_trigger_scsi, buf, sz); if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) @@ -4576,17 +4727,36 @@ _ctl_diag_trigger_mpi_store(struct class_device *cdev, const char *buf, { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + struct SL_WH_MPI_TRIGGERS_T *mpi_tg; unsigned long flags; ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T), + GFP_KERNEL); + if (!mpi_tg) + return -ENOMEM; + + memcpy(mpi_tg, buf, sz); + if (!mpi_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg, + set)) { + kfree(mpi_tg); + return -EFAULT; + } + } spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); memset(&ioc->diag_trigger_mpi, 0, sizeof(struct SL_WH_EVENT_TRIGGERS_T)); memcpy(&ioc->diag_trigger_mpi, buf, sz); if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) @@ -4628,6 +4798,123 @@ static CLASS_DEVICE_ATTR(drv_support_bitmap, S_IRUGO, _ctl_drv_support_bitmap_show, NULL); #endif +/** + * _ctl_enable_sdev_max_qd_show - display whether enable_sdev_max_qd is + * enabled/disabled + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static ssize_t +_ctl_enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +#else +static ssize_t +_ctl_enable_sdev_max_qd_show(struct class_device *cdev, char *buf) +#endif +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); +} + +/** + * _ctl_enable_sdev_max_qd_store - Enable/disable enable_sdev_max_qd + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + * If this attribute is disabled then targets will have default + * queue depth. + */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static ssize_t +_ctl_enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +#else +static ssize_t +_ctl_enable_sdev_max_qd_store(struct class_device *cdev, const char *buf, + size_t count) +#endif +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + int val = 0; + struct scsi_device *sdev; + struct _raid_device *raid_device; + int qdepth; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + switch (val) { + case 0: + ioc->enable_sdev_max_qd = 0; + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + continue; + + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { + raid_device = mpt3sas_raid_device_find_by_handle(ioc, + sas_target_priv_data->handle); + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + else + qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + case MPI2_RAID_VOL_TYPE_RAID1: + case MPI2_RAID_VOL_TYPE_RAID10: + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + } + } else if (sas_target_priv_data->flags & + MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = MPT3SAS_NVME_QUEUE_DEPTH; + else + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + break; + case 1: + ioc->enable_sdev_max_qd = 1; + shost_for_each_device(sdev, ioc->shost) { + mpt3sas_scsih_change_queue_depth(sdev, shost->can_queue); + } + break; + default: + return -EINVAL; + } + + return strlen(buf); +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) +static DEVICE_ATTR(enable_sdev_max_qd, S_IRUGO | S_IWUSR, + _ctl_enable_sdev_max_qd_show, _ctl_enable_sdev_max_qd_store); +#else +static CLASS_DEVICE_ATTR(enable_sdev_max_qd, S_IRUGO | S_IWUSR, + _ctl_enable_sdev_max_qd_show, _ctl_enable_sdev_max_qd_store); +#endif + + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)) struct device_attribute *mpt3sas_host_attrs[] = { &dev_attr_version_fw, @@ -4664,6 +4951,7 @@ struct device_attribute *mpt3sas_host_attrs[] = { &dev_attr_ddio_err_count, #endif &dev_attr_drv_support_bitmap, + &dev_attr_enable_sdev_max_qd, NULL, }; #else @@ -4702,6 +4990,7 @@ struct class_device_attribute *mpt3sas_host_attrs[] = { &class_device_attr_ddio_err_count, #endif &class_device_attr_drv_support_bitmap, + &class_device_attr_enable_sdev_max_qd, NULL, }; #endif diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index 603738dc7af8..417bb984da32 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -104,6 +104,8 @@ struct mpt3_diag_query) #define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ struct mpt3_diag_read_buffer) +#define MPT3ADDNLDIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 32, \ + struct mpt3_addnl_diag_query) /* Trace Buffer default UniqueId */ #define MPT2DIAGBUFFUNIQUEID (0x07075900) @@ -440,6 +442,26 @@ struct mpt3_diag_read_buffer { uint32_t diagnostic_data[1]; }; +/** + * struct mpt3_addnl_diag_query - diagnostic buffer release reason + * @hdr - generic header + * @unique_id - unique id associated with this buffer. + * @buffer_rel_condition - Release condition ioctl/sysfs/reset + * @reserved1 + * @trigger_type - Master/Event/scsi/MPI + * @trigger_info_dwords - Data Correspondig to trigger type + * @reserved2 + */ +struct mpt3_addnl_diag_query { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; + uint16_t buffer_rel_condition; + uint16_t reserved1; + uint32_t trigger_type; + uint32_t trigger_info_dwords[2]; + uint32_t reserved2[2]; +}; + /* Chunk size to use when doing a FW Download */ #define FW_DL_CHUNK_SIZE 0x4000 #endif /* MPT3SAS_CTL_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_debugfs.c b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c new file mode 100755 index 000000000000..ba6c4e253897 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c @@ -0,0 +1,199 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * Copyright (C) 2020 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/compat.h> +#include <linux/uio.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> + +#include "mpt3sas_base.h" + +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> + +struct dentry *mpt3sas_debugfs_root = NULL; + +/* + * _debugfs_iocdump_read : copy ioc dump from debugfs buffer + */ +static ssize_t +_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt, + loff_t *ppos) + +{ + struct mpt3sas_debugfs_buffer *debug = filp->private_data; + + if (!debug || !debug->buf) + return 0; + + return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len); +} + +/* + * _debugfs_iocdump_open : open the ioc_dump debugfs attribute file + */ +static int +_debugfs_iocdump_open(struct inode *inode, struct file *file) +{ + struct MPT3SAS_ADAPTER *ioc = inode->i_private; + struct mpt3sas_debugfs_buffer *debug; + + debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->buf = (void *)ioc; + debug->len = sizeof(struct MPT3SAS_ADAPTER); + file->private_data = debug; + + return 0; +} + +/* + * _debugfs_iocdump_release : release the ioc_dump debugfs attribute + */ +static int +_debugfs_iocdump_release(struct inode *inode, struct file *file) +{ + struct mpt3sas_debugfs_buffer *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static const struct file_operations mpt3sas_debugfs_iocdump_fops = { + .owner = THIS_MODULE, + .open = _debugfs_iocdump_open, + .read = _debugfs_iocdump_read, + .release = _debugfs_iocdump_release, +}; + +/* + * mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver + */ +void mpt3sas_init_debugfs(void) +{ + mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL); + if (!mpt3sas_debugfs_root) + pr_info("mpt3sas: Cannot create debugfs root\n"); +} + +/* + * mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver + */ +void mpt3sas_exit_debugfs(void) +{ + if (mpt3sas_debugfs_root) + debugfs_remove_recursive(mpt3sas_debugfs_root); +} + +/* + * mpt3sas_setup_debugfs : Setup debugfs per HBA adapter + * ioc: MPT3SAS_ADAPTER object + */ +void +mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc) +{ + char name[64]; + + snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no); + if (!ioc->debugfs_root) { + ioc->debugfs_root = + debugfs_create_dir(name, mpt3sas_debugfs_root); + if (!ioc->debugfs_root) { + dev_err(&ioc->pdev->dev, + "Cannot create per adapter debugfs directory\n"); + return; + } + } + + snprintf(name, sizeof(name), "ioc_dump"); + ioc->ioc_dump = debugfs_create_file(name, S_IRUGO, + ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops); + if (!ioc->ioc_dump) { + dev_err(&ioc->pdev->dev, + "Cannot create ioc_dump debugfs file\n"); + debugfs_remove(ioc->debugfs_root); + return; + } + + snprintf(name, sizeof(name), "host_recovery"); + debugfs_create_u8(name, S_IRUGO, ioc->debugfs_root, &ioc->shost_recovery); + +} + +/* + * mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter + * ioc: MPT3SAS_ADAPTER object + */ +void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc) +{ + if (ioc->debugfs_root) + debugfs_remove_recursive(ioc->debugfs_root); +} + +#else +void mpt3sas_init_debugfs(void) +{ +} +void mpt3sas_exit_debugfs(void) +{ +} +void mpt3sas_setup_debugfs(struct megasas_instance *instance) +{ +} +void mpt3sas_destroy_debugfs(struct megasas_instance *instance) +{ +} +#endif /*CONFIG_DEBUG_FS*/ + diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 00b3096571a6..d38617f7f6f7 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -53,6 +53,11 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> +#if !((defined(RHEL_MAJOR) && (RHEL_MAJOR == 8) && (RHEL_MINOR > 2)) || \ + (LINUX_VERSION_CODE > KERNEL_VERSION(5,4,0)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))) +#include <linux/pci-aspm.h> +#endif #include <linux/interrupt.h> #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) #include <linux/nvme.h> @@ -153,8 +158,10 @@ static u32 logging_level; MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info " "(default=0)"); -static u32 sdev_queue_depth = MPT3SAS_SAS_QUEUE_DEPTH; -MODULE_PARM_DESC(sdev_queue_depth, " globally setting SAS device queue depth "); +static bool enable_sdev_max_qd; +module_param(enable_sdev_max_qd, bool, 0444); +MODULE_PARM_DESC(enable_sdev_max_qd, + "Enable sdev max qd as can_queue, def=disabled(0)"); static ushort max_sectors = 0xFFFF; module_param(max_sectors, ushort, 0444); @@ -408,7 +415,7 @@ static struct fw_event_work *alloc_fw_event_work(int len) return fw_event; } -#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0))) +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0))) static inline unsigned int mpt3sas_scsi_prot_interval(struct scsi_cmnd *scmd) { return scmd->device->sector_size; @@ -610,7 +617,8 @@ _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, * if available return port address otherwise return NULL. */ struct hba_port * -mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port_id) +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port_id, + u8 skip_dirty_flag) { struct hba_port *port, *port_next; @@ -619,10 +627,22 @@ mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port_id) port_id = MULTIPATH_DISABLED_PORT_ID; list_for_each_entry_safe (port, port_next, - &ioc->port_table_list, list) { - if (port->port_id == port_id && - !(port->flags & HBA_PORT_FLAG_DIRTY_PORT)) + &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) + continue; + return port; + } + + if (skip_dirty_flag) { + port = port_next = NULL; + list_for_each_entry_safe (port, port_next, + &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; return port; + } } if (unlikely(!ioc->multipath_on_hba)) { @@ -644,6 +664,29 @@ mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port_id) return NULL; } +/** + * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number + * @ioc: per adapter object + * @port: hba_port object + * @phy: phy number + * + * Return virtual_phy object corresponding to phy number. + */ +struct virtual_phy * +mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, struct hba_port *port, u32 phy) +{ + struct virtual_phy *vphy, *vphy_next; + + if (!port->vphys_mask) + return NULL; + + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + if (vphy->phy_mask & ( 1 << phy)) + return vphy; + } + return NULL; +} + /** * _scsih_is_boot_device - search for matching boot device. * @sas_address: sas address or WWID if PCIe device @@ -1408,6 +1451,32 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) return pcie_device; } +/** + * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. + * @ioc: per adapter object + * Context: This function will acquire ioc->pcie_device_lock + * + * Update ioc->max_shutdown_latency by checking available devices. + */ +static void +_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (pcie_device->shutdown_latency) { + if (shutdown_latency < pcie_device->shutdown_latency) + shutdown_latency = + pcie_device->shutdown_latency; + } + } + ioc->max_shutdown_latency = shutdown_latency; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + /** * _scsih_pcie_device_remove - remove pcie_device from list. * @ioc: per adapter object @@ -1422,6 +1491,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, { unsigned long flags; int was_on_pcie_device_list = 0; + u8 update_latency = 0; if (!pcie_device) return; @@ -1444,11 +1514,19 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, list_del_init(&pcie_device->list); was_on_pcie_device_list = 1; } + + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; + if (was_on_pcie_device_list) { kfree(pcie_device->serial_number); pcie_device_put(pcie_device); } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); + } /** @@ -1464,6 +1542,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) struct _pcie_device *pcie_device; unsigned long flags; int was_on_pcie_device_list = 0; + u8 update_latency = 0; if (ioc->shost_recovery) return; @@ -1476,12 +1555,21 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) was_on_pcie_device_list = 1; pcie_device_put(pcie_device); } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (was_on_pcie_device_list) { _scsih_pcie_device_remove_from_sml(ioc, pcie_device); pcie_device_put(pcie_device); } + + /* If max shutwown latency is from this device, then update + * ioc->max_shutdown_latency by iterating over the list to + * find max value after removing the device from list. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); } /** @@ -1498,6 +1586,7 @@ _scsih_pcie_device_remove_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) struct _pcie_device *pcie_device; unsigned long flags; int was_on_pcie_device_list = 0; + u8 update_latency = 0; if (ioc->shost_recovery) return; @@ -1510,12 +1599,16 @@ _scsih_pcie_device_remove_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) was_on_pcie_device_list = 1; pcie_device_put(pcie_device); } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (was_on_pcie_device_list) { _scsih_pcie_device_remove_from_sml(ioc, pcie_device); pcie_device_put(pcie_device); } + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); } /** @@ -2000,6 +2093,27 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) return scmd; } + +static void +_scsih_display_sdev_qd(struct scsi_device *sdev) +{ + if (sdev->inquiry_len <= 7) + return; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, ((sdev->inquiry[7] & 2) >> 1), sdev->simple_tags, + sdev->ordered_tags, sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1); +#else + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); +#endif +} + + #if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) static void _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) @@ -2020,7 +2134,13 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) max_depth = shost->can_queue; - /* limit max device queue for SATA to 32 */ + /* + * limit max device queue for SATA to 32 if enable_sdev_max_qd + * is disabled. + */ + if (ioc->enable_sdev_max_qd) + goto not_sata; + sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) goto not_sata; @@ -2032,6 +2152,7 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device) { if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) max_depth = MPT3SAS_SATA_QUEUE_DEPTH; @@ -2041,27 +2162,32 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) spin_unlock_irqrestore(&ioc->sas_device_lock, flags); not_sata: - - if (!sdev->tagged_supported || sdev->type == TYPE_ENCLOSURE) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) + /* + * if sdev INQUIRY data shows that CmdQue is zero then set sdev QD as one. + */ + if (!sdev->tagged_supported || + !((sdev->scsi_level >= SCSI_2) && (sdev->inquiry_len > 7) && + (sdev->inquiry[7] & 2))) max_depth = 1; +#else + if (!sdev->tagged_supported) + max_depth = 1; +#endif if (qdepth > max_depth) qdepth = max_depth; #if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + _scsih_display_sdev_qd(sdev); #elif (!defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) scsi_adjust_queue_depth(sdev, ((qdepth == 1) ? 0 : MSG_SIMPLE_TAG), qdepth); - if (sdev->inquiry_len > 7) - sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " - "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", - sdev->queue_depth, - ((sdev->type == TYPE_ENCLOSURE) ? 0 : sdev->tagged_supported), - sdev->simple_tags, sdev->ordered_tags, sdev->scsi_level, - (sdev->inquiry[7] & 2) >> 1); - + _scsih_display_sdev_qd(sdev); return sdev->queue_depth; #else - return scsi_change_queue_depth(sdev, qdepth); + scsi_change_queue_depth(sdev, qdepth); + _scsih_display_sdev_qd(sdev); + return sdev->queue_depth; #endif } @@ -2085,18 +2211,33 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) else return -EOPNOTSUPP; - if (sdev->inquiry_len > 7) - sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " - "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", - sdev->queue_depth, - ((sdev->type == TYPE_ENCLOSURE) ? 0 : sdev->tagged_supported), - sdev->simple_tags, sdev->ordered_tags, sdev->scsi_level, - (sdev->inquiry[7] & 2) >> 1); - return sdev->queue_depth; } #endif +/** + * mpt3sas_scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Returns nothing. + */ +void +mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_private(shost); + + if (ioc->enable_sdev_max_qd) + qdepth = shost->can_queue; + +#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); +#else + _scsih_change_queue_depth(sdev, qdepth); +#endif +} + #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)) /** * _scsih_change_queue_type - changing device queue tag type @@ -2121,46 +2262,6 @@ _scsih_change_queue_type(struct scsi_device *sdev, int tag_type) } #endif -/** - * _scsih_set_sdev_queue_depth - global setting of device queue depth - * - * Note: only for SAS devices - */ -static int -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) -_scsih_set_sdev_queue_depth(const char *val, const struct kernel_param *kp) -#else -_scsih_set_sdev_queue_depth(const char *val, struct kernel_param *kp) -#endif -{ - int ret = param_set_int(val, kp); - struct MPT3SAS_ADAPTER *ioc; - struct sas_rphy *rphy; - struct scsi_device *sdev; - - if (ret) - return ret; - - list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { - shost_for_each_device(sdev, ioc->shost) { - rphy = target_to_rphy(sdev->sdev_target); - if (rphy->identify.target_port_protocols & - SAS_PROTOCOL_SSP) -#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) - _scsih_change_queue_depth(sdev, - sdev_queue_depth, SCSI_QDEPTH_DEFAULT); -#else - _scsih_change_queue_depth(sdev, - sdev_queue_depth); -#endif - } - } - - return 0; -} -module_param_call(sdev_queue_depth, _scsih_set_sdev_queue_depth, param_get_int, - &sdev_queue_depth, 0600); - /** * _scsih_target_alloc - target add routine * @starget: scsi target struct @@ -3003,11 +3104,7 @@ _scsih_slave_configure(struct scsi_device *sdev) } #endif -#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) - _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); -#else - _scsih_change_queue_depth(sdev, qdepth); -#endif + mpt3sas_scsih_change_queue_depth(sdev, qdepth); /* raid transport support */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) @@ -3077,11 +3174,7 @@ _scsih_slave_configure(struct scsi_device *sdev) if (serial_number) sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n", serial_number); -#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) - _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); -#else - _scsih_change_queue_depth(sdev, qdepth); -#endif + mpt3sas_scsih_change_queue_depth(sdev, qdepth); /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be * merged and can eliminate holes created during merging @@ -3124,8 +3217,7 @@ _scsih_slave_configure(struct scsi_device *sdev) sas_device->volume_wwid = volume_wwid; sas_device->serial_number = serial_number; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { - qdepth = (int) sdev_queue_depth > 0 ? sdev_queue_depth : - MPT3SAS_SAS_QUEUE_DEPTH; + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; ssp_target = 1; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SEP) { sdev_printk(KERN_WARNING, sdev, @@ -3172,11 +3264,7 @@ _scsih_slave_configure(struct scsi_device *sdev) sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n", serial_number); -#if (defined(RAMP_UP_SUPPORT) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))) - _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); -#else - _scsih_change_queue_depth(sdev, qdepth); -#endif + mpt3sas_scsih_change_queue_depth(sdev, qdepth); if (ssp_target) { sas_read_port_mode_page(sdev); @@ -3417,6 +3505,95 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) } } +/** + * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status + * @ioc - per adapter object + * @channel - the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * + * Look whether TM has aborted the timed out SCSI command, if + * TM has aborted the IO then return SUCCESS else return FAILED. + */ +int +scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, + uint id, uint lun, u8 type, u16 smid_task) +{ + + if (smid_task <= ioc->shost->can_queue) { + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + + if (!(_scsih_scsi_lookup_find_by_target(ioc, id, channel))) + return SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))) + return SUCCESS; + break; + default: + return SUCCESS; + } + } else if (smid_task == ioc->scsih_cmds.smid) { + if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || + (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) + return SUCCESS; + } else if (smid_task == ioc->ctl_cmds.smid) { + if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || + (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) + return SUCCESS; + } + + return FAILED; +} + +/** + * scsih_tm_post_processing - post proceesing of target & LUN reset + * @ioc - per adapter object + * @handle: device handle + * @channel - the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * + * Post processing of target & LUN reset. Due to interrupt latency + * issue it possible that interrupt for aborted IO might not + * received yet. So before returning failure status poll the + * reply descriptor pools for the reply of timed out SCSI command. + * Return FAILED status if reply for timed out is not received + * otherwise return SUCCESS. + */ +int +scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, u16 smid_task) +{ + int rc; + + rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); + if (rc == SUCCESS) + return rc; + + printk(MPT3SAS_INFO_FMT + "Poll ReplyDescriptor queues for completion of" + " smid(%d), task_type(0x%02x), handle(0x%04x)\n", + ioc->name, smid_task, type, handle); + + /* + * Due to interrupt latency issues, driver may receive interrupt for + * TM first and then for aborted SCSI IO command. So, poll all the + * ReplyDescriptor pools before returning the FAILED status to SML. + */ + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_sync_reply_irqs(ioc, 1); + mpt3sas_base_unmask_interrupts(ioc); + + return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); +} + /** * mpt3sas_scsih_issue_tm - main routine for sending tm requests * @ioc: per adapter struct @@ -3449,6 +3626,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, struct scsiio_tracker *scsi_lookup = NULL; int rc; u16 msix_task = 0; + u8 issue_reset = 0; lockdep_assert_held(&ioc->tm_cmds.mutex); if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { @@ -3466,14 +3644,20 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, ioc_state = mpt3sas_base_get_iocstate(ioc, 0); if (ioc_state & MPI2_DOORBELL_USED) { - dhsprintk(ioc, printk(MPT3SAS_INFO_FMT "unexpected doorbell " - "active!\n", ioc->name)); + printk(MPT3SAS_INFO_FMT "unexpected doorbell active!\n", + ioc->name); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); return (!rc) ? SUCCESS : FAILED; } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + else if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + mpt3sas_base_coredump_info(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); return (!rc) ? SUCCESS : FAILED; @@ -3500,7 +3684,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = type; mpi_request->MsgFlags = tr_method; - mpi_request->TaskMID = cpu_to_le16(smid_task); + if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + mpi_request->TaskMID = cpu_to_le16(smid_task); int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); mpt3sas_scsih_set_tm_flag(ioc, handle); init_completion(&ioc->tm_cmds.done); @@ -3512,16 +3698,17 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, ioc->put_smid_hi_priority(ioc, smid, msix_task); wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { - if (mpt3sas_base_check_cmd_timeout(ioc, + mpt3sas_check_cmd_timeout(ioc, ioc->tm_cmds.status, mpi_request, - sizeof(Mpi2SCSITaskManagementRequest_t)/4)) { + sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); + if (issue_reset) { rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; goto out; } } - mpt3sas_base_sync_reply_irqs(ioc); + mpt3sas_base_sync_reply_irqs(ioc, 0); if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); @@ -3556,25 +3743,18 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, request = mpt3sas_base_get_msg_frame(ioc, smid_task); if (le16_to_cpu(request->DevHandle) != handle) break; + + printk(MPT3SAS_INFO_FMT + "Task abort tm failed: handle(0x%04x), timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", + ioc->name, handle, timeout, tr_method, smid_task, msix_task); rc = FAILED; break; case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: - if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) { - rc = SUCCESS; - break; - } - if (_scsih_scsi_lookup_find_by_target(ioc, id, channel)) - rc = FAILED; - else - rc = SUCCESS; - break; case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: - if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) - rc = FAILED; - else - rc = SUCCESS; + rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, + type, smid_task); break; case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: rc = SUCCESS; @@ -3720,12 +3900,14 @@ _scsih_abort(struct scsi_cmnd *scmd) u8 timeout = 30; struct _pcie_device *pcie_device = NULL; - sdev_printk(KERN_INFO, scmd->device, "attempting task abort! " - "scmd(%p)\n", scmd); + sdev_printk(KERN_INFO, scmd->device, + "attempting task abort! scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scmd->request->timeout / HZ) * 1000); _scsih_tm_display_info(ioc, scmd); if (mpt3sas_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { - sdev_printk(KERN_INFO, scmd->device, "%s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", ((ioc->remove_host)?("shost is getting removed!"): ("pci device been removed!")), scmd); if (st && st->smid) @@ -3738,7 +3920,7 @@ _scsih_abort(struct scsi_cmnd *scmd) sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! " - "scmd(%p)\n", scmd); + "scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -3747,6 +3929,9 @@ _scsih_abort(struct scsi_cmnd *scmd) /* check for completed command */ if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, + "No reference found at driver, assuming scmd(0x%p) might have completed\n", + scmd); scmd->result = DID_RESET << 16; r = SUCCESS; goto out; @@ -3774,7 +3959,7 @@ _scsih_abort(struct scsi_cmnd *scmd) 0); out: - sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (pcie_device) pcie_device_put(pcie_device); @@ -3808,11 +3993,11 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) } sdev_printk(KERN_INFO, scmd->device, "attempting device reset! " - "scmd(%p)\n", scmd); + "scmd(0x%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); if (mpt3sas_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { - sdev_printk(KERN_INFO, scmd->device, "%s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", ((ioc->remove_host)?("shost is getting removed!"): ("pci device been removed!")), scmd); scmd->result = DID_NO_CONNECT << 16; @@ -3823,7 +4008,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! " - "scmd(%p)\n", scmd); + "scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -3861,7 +4046,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) scmd->device->id, scmd->device->lun, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, tr_timeout, tr_method); out: - sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (sas_device) @@ -3893,11 +4078,11 @@ _scsih_target_reset(struct scsi_cmnd *scmd) struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; starget_printk(KERN_INFO, starget, "attempting target reset! " - "scmd(%p)\n", scmd); + "scmd(0x%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); if (mpt3sas_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { - sdev_printk(KERN_INFO, scmd->device, "%s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", ((ioc->remove_host)?("shost is getting removed!"): ("pci device been removed!")), scmd); scmd->result = DID_NO_CONNECT << 16; @@ -3908,7 +4093,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { starget_printk(KERN_INFO, starget, "target been deleted! " - "scmd(%p)\n", scmd); + "scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -3947,7 +4132,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) tr_timeout, tr_method); out: - starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", + starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (sas_device) @@ -3981,12 +4166,12 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) u8 tr_method = 0; starget_printk(KERN_INFO, starget, "attempting target reset! " - "scmd(%p)\n", scmd); + "scmd(0x%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); if (mpt3sas_base_pci_device_is_unplugged(ioc)) { sdev_printk(KERN_INFO, scmd->device, - "pci device been removed! scmd(%p)\n", scmd); + "pci device been removed! scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -3996,7 +4181,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { starget_printk(KERN_INFO, starget, "target been deleted! " - "scmd(%p)\n", scmd); + "scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -4037,7 +4222,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) tr_timeout, tr_method); out: - starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", + starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (sas_device) sas_device_put(sas_device); @@ -4059,7 +4244,7 @@ _scsih_host_reset(struct scsi_cmnd *scmd) struct MPT3SAS_ADAPTER *ioc = shost_private(scmd->device->host); int r, retval; - printk(MPT3SAS_INFO_FMT "attempting host reset! scmd(%p)\n", + printk(MPT3SAS_INFO_FMT "attempting host reset! scmd(0x%p)\n", ioc->name, scmd); scsi_print_command(scmd); @@ -4074,7 +4259,7 @@ _scsih_host_reset(struct scsi_cmnd *scmd) r = (retval < 0) ? FAILED : SUCCESS; out: - printk(MPT3SAS_INFO_FMT "host reset: %s scmd(%p)\n", + printk(MPT3SAS_INFO_FMT "host reset: %s scmd(0x%p)\n", ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; @@ -4282,11 +4467,14 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) { struct fw_event_work *fw_event; - if (list_empty(&ioc->fw_event_list) || + if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || !ioc->firmware_event_thread || in_interrupt()) return; - while ((fw_event = dequeue_next_fw_event(ioc))) { + ioc->fw_events_cleanup = 1; + + while ((fw_event = dequeue_next_fw_event(ioc)) || + (fw_event = ioc->current_event)) { #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)) if (fw_event->delayed_work_active) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) @@ -4306,6 +4494,8 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) #endif fw_event_work_put(fw_event); } + + ioc->fw_events_cleanup = 0; } /** @@ -5940,8 +6130,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { doorbell = mpt3sas_base_get_iocstate(ioc, 0); - if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) - mpt3sas_base_fault_info(ioc, doorbell & + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) + mpt3sas_base_coredump_info(ioc, doorbell & MPI2_DOORBELL_DATA_MASK); } } @@ -6227,9 +6420,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, || (defined(CONFIG_SUSE_KERNEL) && ((CONFIG_SUSE_VERSION == 12) && (CONFIG_SUSE_PATCHLEVEL >= 5)))) cpu_to_be32(t10_pi_ref_tag(scmd->request)); -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)) - cpu_to_be32(scsi_prot_ref_tag(scmd)); -#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0))) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0))) cpu_to_be32(mpt3sas_scsi_prot_ref_tag(scmd)); #else cpu_to_be32(scsi_get_lba(scmd)); @@ -6310,10 +6501,43 @@ int _scsih_build_nvme_unmap(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, dma_addr_t nvme_dsm_ranges_dma_handle = 0; list_len = get_unaligned_be16(&scmd->cmnd[7]); - if (!list_len) - return -EINVAL; + if (!list_len) { + pr_warn(MPT3SAS_FMT + "%s: CDB received with zero parameter length\n", + ioc->name, __func__); + scsi_print_command(scmd); + scmd->result = DID_OK << 16; + scmd->scsi_done(scmd); + return 1; + } - plist = kmalloc(list_len, GFP_KERNEL); + if (list_len < 24) { + pr_warn(MPT3SAS_FMT + "%s: CDB received with invalid param_len: %d\n", + ioc->name, __func__, list_len); + scsi_print_command(scmd); + scmd->result = (DRIVER_SENSE << 24) | + SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, + 0x1A, 0); + scmd->scsi_done(scmd); + return 1; + } + + if (list_len != scsi_bufflen(scmd)) { + pr_warn(MPT3SAS_FMT + "%s: CDB received with param_len: %d bufflen: %d\n", + ioc->name, __func__, list_len, scsi_bufflen(scmd)); + scsi_print_command(scmd); + scmd->result = (DRIVER_SENSE << 24) | + SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, + 0x1A, 0); + scmd->scsi_done(scmd); + return 1; + } + + plist = kzalloc(list_len, GFP_KERNEL); if (!plist) return -ENOMEM; @@ -6324,16 +6548,14 @@ int _scsih_build_nvme_unmap(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, * if number of descripts is zero. */ ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; - if (!ndesc) { - res = 1; - goto out; - } else if (ndesc > 256) { + if (ndesc == 0 || ndesc > 256) { res = -EINVAL; goto out; } data_length = ndesc * sizeof(*nvme_dsm_ranges); - if (nvme_mdts && (data_length > nvme_mdts)) { + if ((nvme_mdts && (data_length > nvme_mdts)) || + (data_length > (list_len - 8))) { res = -EINVAL; goto out; } @@ -6531,9 +6753,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) pcie_device = sas_target_priv_data->pcie_dev; rc = _scsih_build_nvme_unmap(ioc, scmd, handle, sas_device_priv_data->lun, pcie_device->nvme_mdts); - if (rc == 1) { /* return command to SML with success status */ - scmd->result = DID_OK << 16; - scmd->scsi_done(scmd); + if (rc == 1) { /* return command back to SML */ rc = 0; goto out; } else if (!rc) { /* Issued NVMe Encapsulated Request Message */ @@ -6769,16 +6989,19 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, desc_ioc_state = "eedp guard error"; break; } + /* fall through */ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: if (!ioc->disable_eedp_support) { desc_ioc_state = "eedp ref tag error"; break; } + /* fall through */ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: if (!ioc->disable_eedp_support) { desc_ioc_state = "eedp app tag error"; break; } + /* fall through */ case MPI2_IOCSTATUS_INSUFFICIENT_POWER: desc_ioc_state = "insufficient power"; break; @@ -7536,6 +7759,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) #else scsi_set_resid(scmd, 0); #endif + /* fall through */ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SUCCESS: scmd->result = (DID_OK << 16) | scsi_status; @@ -7550,12 +7774,13 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + /* fall through */ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: if (!ioc->disable_eedp_support) { _scsih_eedp_error_handling(scmd, ioc_status); break; } - + /* fall through */ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INVALID_SGL: @@ -7617,6 +7842,205 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) return 0; } +/** + * _scsih_update_vphys_after_reset - update the Port's vphys_list after reset + * @ioc: per adapter object + * + * Returns nothing. + */ +static void +_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u16 sz, ioc_status; + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found=0, port_id; + Mpi2SasPhyPage0_t phy_pg0; + struct hba_port *port, *port_next, *mport; + struct virtual_phy *vphy, *vphy_next; + struct _sas_device *sas_device; + + /* + * Mark all the vphys objects as dirty. + */ + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; + } + } + + /* + * Read SASIOUnitPage0 to get each HBA Phy's data. + */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + + /* + * Loop over each HBA Phy. + */ + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + + /* + * Check whether Phy's Negotiation Link Rate is > 1.5G or not. + */ + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + MPI2_SAS_NEG_LINK_RATE_1_5) + continue; + + /* + * Check whether Phy is connected to SEP device or not, + * if it is SEP device then read the Phy's SASPHYPage0 data to + * determine whether Phy is a virtual Phy or not. if it is + * virtual phy then it is conformed that the attached remote + * device is a HBA's vSES device. + */ + if (!(le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP)) + continue; + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + if (!(le32_to_cpu(phy_pg0.PhyInfo) & MPI2_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + + /* + * Get the vSES device's SAS Address. + */ + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. + AttachedDevHandle); + if (_scsih_get_sas_address(ioc, attached_handle, &attached_sas_addr) + != 0) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + + found = 0; + port = port_next = NULL; + + /* + * Loop over each virtual_phy object from each port's vphys_list. + */ + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + + /* + * Continue with next virtual_phy object + * if the object is not marked as dirty. + */ + if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) + continue; + + /* + * Continue with next virtual_phy object + * if the object's SAS Address is not equals + * to current Phy's vSES device SAS Address. + */ + if (vphy->sas_address != attached_sas_addr) + continue; + + /* + * Enable current Phy number bit in object's + * phy_mask field. + */ + if (!(vphy->phy_mask & (1 << i))) + vphy->phy_mask = (1 << i); + + /* + * Get hba_port object from hba_port table + * corresponding to current phy's Port ID. + * if there is no hba_port object corresponding + * to Phy's Port ID then create a new hba_port + * object & add to hba_port table. + */ + port_id = sas_iounit_pg0->PhyData[i].Port; + mport = mpt3sas_get_port_by_id(ioc, port_id, 1); + if (!mport) { + mport = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + if (!mport) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + break; + } + mport->port_id = port_id; + printk(MPT3SAS_INFO_FMT "%s: hba_port entry: %p," + " port: %d is added to hba_port list\n", + ioc->name, __func__, mport, mport->port_id); + list_add_tail(&mport->list, &ioc->port_table_list); + } + + /* + * If mport & port pointers are not pointing to + * same hba_port object then it means that vSES + * device's Port ID got changed after reset and + * hence move current virtual_phy object from + * port's vphys_list to mport's vphys_list. + */ + if (port != mport) { + if (!mport->vphys_mask) + INIT_LIST_HEAD(&mport->vphys_list); + + mport->vphys_mask |= (1 << i); + port->vphys_mask &= ~(1 << i); + list_move(&vphy->list, &mport->vphys_list); + + sas_device = mpt3sas_get_sdev_by_addr(ioc, + attached_sas_addr, port); + if (sas_device) + sas_device->port = mport; + } + + /* + * Earlier while updating the hba_port table, + * it is determined that there is no other + * direct attached device with mport's Port ID, + * Hence mport was marked as dirty. Only vSES + * device has this Port ID, so unmark the mport + * as dirt. + */ + if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { + mport->sas_address = 0; + mport->phy_mask = 0; + mport->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; + } + + /* + * Unmark current virtual_phy object as dirty. + */ + vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; + found = 1; + break; + } + if (found) + break; + } + } +out: + kfree(sas_iounit_pg0); +} + /** * _scsih_get_port_table_after_reset - Construct temporary port table * @ioc: per adapter object @@ -7689,10 +8113,13 @@ out: return port_count; } -#define MATCHED_WITH_ADDR_AND_PHYMASK 1 -#define MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT 2 -#define MATCHED_WITH_ADDR_AND_SUBPHYMASK 3 -#define MATCHED_WITH_ADDR 4 +enum hba_port_matched_codes { + NOT_MATCHED = 0, + MATCHED_WITH_ADDR_AND_PHYMASK, + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, + MATCHED_WITH_ADDR_AND_SUBPHYMASK, + MATCHED_WITH_ADDR, +}; /** * _scsih_look_and_get_matched_port_entry - Get matched port entry @@ -7710,11 +8137,8 @@ _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, struct hba_port *port_entry, struct hba_port **matched_port_entry, int *count) { - struct hba_port *port_table_entry = NULL; - struct hba_port *port_sasaddr_phymask = NULL; - struct hba_port *port_sasaddr_subphymask_port = NULL; - struct hba_port *port_sasaddr_subphymask = NULL; - struct hba_port *port_sasaddr = NULL; + struct hba_port *port_table_entry, *matched_port = NULL; + enum hba_port_matched_codes matched_code = NOT_MATCHED; int lcount=0; *matched_port_entry = NULL; @@ -7724,51 +8148,45 @@ _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, if((port_table_entry->sas_address == port_entry->sas_address) && (port_table_entry->phy_mask == port_entry->phy_mask)) { - port_sasaddr_phymask = port_table_entry; + matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; + matched_port = port_table_entry; break; } if((port_table_entry->sas_address == port_entry->sas_address) && (port_table_entry->phy_mask & port_entry->phy_mask) && (port_table_entry->port_id == port_entry->port_id)) { - port_sasaddr_subphymask_port = port_table_entry; + matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; + matched_port = port_table_entry; continue; } if((port_table_entry->sas_address == port_entry->sas_address) && (port_table_entry->phy_mask & port_entry->phy_mask)) { - port_sasaddr_subphymask = port_table_entry; + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; + matched_port = port_table_entry; continue; } if(port_table_entry->sas_address == port_entry->sas_address) { - port_sasaddr = port_table_entry; + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) + continue; + matched_code = MATCHED_WITH_ADDR; + matched_port = port_table_entry; lcount++; } } - if (port_sasaddr_phymask) { - *matched_port_entry = port_sasaddr_phymask; - return MATCHED_WITH_ADDR_AND_PHYMASK; - } - - if (port_sasaddr_subphymask_port) { - *matched_port_entry = port_sasaddr_subphymask_port; - return MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; - } - - if (port_sasaddr_subphymask) { - *matched_port_entry = port_sasaddr_subphymask; - return MATCHED_WITH_ADDR_AND_SUBPHYMASK; - } - - if (port_sasaddr) { - *matched_port_entry = port_sasaddr; + *matched_port_entry = matched_port; + if (matched_code == MATCHED_WITH_ADDR) *count = lcount; - return MATCHED_WITH_ADDR; - } - - return 0; + return matched_code; } /** @@ -7843,6 +8261,38 @@ _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, } } +/** + * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. + * @ioc: per adapter object + * + * Returns nothing. + */ +static void +_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) +{ + struct hba_port *port, *port_next; + struct virtual_phy *vphy, *vphy_next; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { + drsprintk(ioc, printk(MPT3SAS_INFO_FMT + "Deleting vphy %p entry from" + " port id: %d\t, Phy_mask 0x%08x\n", + ioc->name, vphy, port->port_id, + vphy->phy_mask)); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + if (!port->vphys_mask && !port->sas_address) + port->flags |= HBA_PORT_FLAG_DIRTY_PORT; + } +} + /** * _scsih_del_dirty_port_entries - delete dirty port entries from port list * after host reset @@ -7859,10 +8309,9 @@ _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) port->flags & HBA_PORT_FLAG_NEW_PORT) continue; drsprintk(ioc, printk(MPT3SAS_INFO_FMT - "Deleting port table entry having" - " Port: %d\t Phy_mask 0x%08x\n", ioc->name, - port->port_id, - port->phy_mask)); + "Deleting port table entry %p having" + " Port id: %d\t, Phy_mask 0x%08x\n", ioc->name, + port, port->port_id, port->phy_mask)); list_del(&port->list); kfree(port); } @@ -7876,7 +8325,7 @@ static void _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) { u8 port_count=0; - struct hba_port port_table[ioc->sas_hba.num_phys]; + struct hba_port *port_table; struct hba_port *port_table_entry; struct hba_port *port_entry = NULL; int i, j, ret, count=0, lcount=0; @@ -7886,7 +8335,12 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) "updating ports for sas_host(0x%016llx)\n", ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); - port_count = _scsih_get_port_table_after_reset(ioc, &port_table[0]); + port_table = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct hba_port), GFP_KERNEL); + if (!port_table) + return; + + port_count = _scsih_get_port_table_after_reset(ioc, port_table); if (!port_count) return; @@ -7927,7 +8381,7 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: case MATCHED_WITH_ADDR_AND_SUBPHYMASK: _scsih_add_or_del_phys_from_existing_port(ioc, - port_entry, &port_table[0], j, port_count); + port_entry, port_table, j, port_count); break; case MATCHED_WITH_ADDR: sas_addr = port_table[j].sas_address; @@ -7940,7 +8394,7 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) port_entry = NULL; else _scsih_add_or_del_phys_from_existing_port(ioc, - port_entry, &port_table[0], j, port_count); + port_entry, port_table, j, port_count); } if (!port_entry) @@ -7956,6 +8410,50 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) } +/** + * _scsih_alloc_vphy - allocate virtual_phy object + * @ioc: per adapter object + * @port_id: Port ID number + * @phy_num: HBA Phy number + * + * Returns allocated virtual_phy object. + */ +static struct virtual_phy * +_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) +{ + struct virtual_phy *vphy; + struct hba_port *port; + + port = mpt3sas_get_port_by_id(ioc, port_id, 0); + if (!port) + return NULL; + + vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); + if (!vphy) { + vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); + if (!vphy) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } + + /* + * Enable bit corresponding to HBA phy number on it's + * parent hba_port object's vphys_mask field. + */ + port->vphys_mask |= (1 << phy_num); + vphy->phy_mask |= (1 << phy_num); + + INIT_LIST_HEAD(&port->vphys_list); + list_add_tail(&vphy->list, &port->vphys_list); + + printk(MPT3SAS_INFO_FMT "vphy entry: %p," + " port id: %d, phy:%d is added to port's vphys_list\n", + ioc->name, vphy, port->port_id, phy_num); + } + return vphy; +} + /** * _scsih_sas_host_refresh - refreshing sas host object contents * @ioc: per adapter object @@ -7978,6 +8476,7 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) u16 attached_handle; u8 link_rate, port_id; struct hba_port *port; + Mpi2SasPhyPage0_t phy_pg0; dtmprintk(ioc, printk(MPT3SAS_INFO_FMT "updating handles for sas_host(0x%016llx)\n", @@ -8004,7 +8503,7 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> PhyData[0].ControllerDevHandle); port_id = sas_iounit_pg0->PhyData[i].Port; - if (!(mpt3sas_get_port_by_id(ioc, port_id))) { + if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); if (!port) { @@ -8020,12 +8519,39 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) port->flags = HBA_PORT_FLAG_NEW_PORT; list_add_tail(&port->list, &ioc->port_table_list); } + + /* + * Check whether current Phy belongs to HBA vSES device or not. + */ + if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP && + (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { + + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + if (!(le32_to_cpu(phy_pg0.PhyInfo) & MPI2_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + + /* + * Allocate a virtual_phy object for vSES device, if + * this vSES device is hot added. + */ + if (!_scsih_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; - ioc->sas_hba.phy[i].port = mpt3sas_get_port_by_id(ioc, port_id); + ioc->sas_hba.phy[i].port = mpt3sas_get_port_by_id(ioc, port_id, 0); mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, attached_handle, i, link_rate, ioc->sas_hba.phy[i].port); } @@ -8152,7 +8678,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) PhyData[0].ControllerDevHandle); port_id = sas_iounit_pg0->PhyData[i].Port; - if (!(mpt3sas_get_port_by_id(ioc, port_id))) { + if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); if (!port) { printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", @@ -8165,9 +8691,24 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) ioc->name, port, port->port_id); list_add_tail(&port->list, &ioc->port_table_list); } + + /* + * Check whether current Phy belongs to HBA vSES device or not. + */ + if ((le32_to_cpu(phy_pg0.PhyInfo) & MPI2_SAS_PHYINFO_VIRTUAL_PHY) && + (phy_pg0.NegotiatedLinkRate >> 4) >= MPI2_SAS_NEG_LINK_RATE_1_5) { + + /* + * Allocate a virtual_phy object for vSES device. + */ + if (!_scsih_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; ioc->sas_hba.phy[i].phy_id = i; - ioc->sas_hba.phy[i].port = mpt3sas_get_port_by_id(ioc, port_id); + ioc->sas_hba.phy[i].port = mpt3sas_get_port_by_id(ioc, port_id, 0); mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], phy_pg0, ioc->sas_hba.parent_dev); } @@ -8260,7 +8801,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, - sas_address_parent, mpt3sas_get_port_by_id(ioc, port_id)); + sas_address_parent, mpt3sas_get_port_by_id(ioc, port_id, 0)); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_expander) { rc = _scsih_expander_add(ioc, parent_handle); @@ -8272,7 +8813,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_address = le64_to_cpu(expander_pg0.SASAddress); sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, - sas_address, mpt3sas_get_port_by_id(ioc, port_id)); + sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (sas_expander) @@ -8290,7 +8831,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) sas_expander->num_phys = expander_pg0.NumPhys; sas_expander->sas_address_parent = sas_address_parent; sas_expander->sas_address = sas_address; - sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id); + sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); if (!sas_expander->port) { printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -8324,6 +8865,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) goto out_fail; } sas_expander->parent_dev = &mpt3sas_port->rphy->dev; + sas_expander->rphy = mpt3sas_port->rphy; for (i = 0 ; i < sas_expander->num_phys ; i++) { if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, @@ -8335,7 +8877,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) } sas_expander->phy[i].handle = handle; sas_expander->phy[i].phy_id = i; - sas_expander->phy[i].port = mpt3sas_get_port_by_id(ioc, port_id); + sas_expander->phy[i].port = mpt3sas_get_port_by_id(ioc, port_id, 0); if ((mpt3sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], expander_pg1, @@ -8565,10 +9107,9 @@ _scsi_send_scsi_io(struct MPT3SAS_ADAPTER *ioc, struct _scsi_io_transfer timeleft = wait_for_completion_timeout(&ioc->scsih_cmds.done, transfer_packet->timeout*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->scsih_cmds.status, mpi_request, - sizeof(Mpi2SCSIIORequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2SCSIIORequest_t)/4, issue_reset); goto issue_target_reset; } if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { @@ -8598,7 +9139,7 @@ _scsi_send_scsi_io(struct MPT3SAS_ADAPTER *ioc, struct _scsi_io_transfer tm_return_code = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0xFFFFFFFF, 0xFFFFFFFF, 0, - MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid, tr_timeout, tr_method); if (tm_return_code == SUCCESS) { @@ -9431,7 +9972,7 @@ _scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, { struct _scsi_io_transfer *transfer_packet; enum device_responsive_state rc; - u8 *idd_data; + u16 *idd_data; int return_code; u32 data_length; @@ -9471,7 +10012,7 @@ _scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, rc = _scsih_determine_disposition(ioc, transfer_packet); if (rc == DEVICE_READY) { // Check if nominal media rotation rate is set to 1 i.e. SSD device - if(idd_data[434] == 1) + if(le16_to_cpu(idd_data[217]) == 1) *is_ssd_device = 1; } break; @@ -9725,7 +10266,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(sas_device_pg0.SASAddress); - port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort); + port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); if (!port) goto out_unlock; @@ -9889,7 +10430,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count, port_id = sas_device_pg0.PhysicalPort; sas_device = mpt3sas_get_sdev_by_addr(ioc, sas_address, - mpt3sas_get_port_by_id(ioc, port_id)); + mpt3sas_get_port_by_id(ioc, port_id, 0)); if (sas_device) { clear_bit(handle, ioc->pend_os_device_add); @@ -9971,7 +10512,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count, sas_device->slot = le16_to_cpu(sas_device_pg0.Slot); sas_device->device_info = device_info; sas_device->sas_address = sas_address; - sas_device->port = mpt3sas_get_port_by_id(ioc, port_id); + sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); if (!sas_device->port) { printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -10202,7 +10743,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, } parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); - port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort); + port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); /* handle expander add */ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) @@ -10302,6 +10843,8 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, event_data->PHY[i].PhyStatus &= 0xF0; event_data->PHY[i].PhyStatus |= MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED; + + /* fall through */ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: @@ -10440,7 +10983,7 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(event_data->SASAddress); sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, - mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort)); + mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); if (!sas_device || !sas_device->starget) goto out; @@ -10943,6 +11486,11 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count) le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { pcie_device->nvme_mdts = le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); + pcie_device->shutdown_latency = + le16_to_cpu(pcie_device_pg2.ShutdownLatency); + if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) + ioc->max_shutdown_latency = + pcie_device->shutdown_latency; if (pcie_device_pg2.ControllerResetTO) pcie_device->reset_timeout = pcie_device_pg2.ControllerResetTO; @@ -10951,7 +11499,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count) } else pcie_device->reset_timeout = 30; - #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { if (!ioc->disable_eedp_support) { @@ -11142,6 +11689,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, event_data->PortEntry[i].PortStatus &= 0xF0; event_data->PortEntry[i].PortStatus |= MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; + /* fall through */ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: if (ioc->shost_recovery) break; @@ -11712,10 +12260,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->scsih_cmds.status, mpi_request, - sizeof(Mpi2RaidActionRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); rc = -EFAULT; goto out; } @@ -12041,7 +12588,7 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, - mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort)); + mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0)); if(ioc->hba_mpi_version_belonged != MPI2_VERSION) _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); _scsih_add_device(ioc, handle, 0, 1); @@ -12347,7 +12894,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, - mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort)); + mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0)); _scsih_add_device(ioc, handle, 0, 1); @@ -12479,7 +13026,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0) unsigned long flags; struct hba_port *port; - port = mpt3sas_get_port_by_id(ioc, sas_device_pg0->PhysicalPort); + port = mpt3sas_get_port_by_id(ioc, sas_device_pg0->PhysicalPort, 0); if (sas_device_pg0->EnclosureHandle) { enclosure_dev = @@ -12939,7 +13486,7 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, unsigned long flags; int i; u8 port_id = expander_pg0->PhysicalPort; - struct hba_port *port = mpt3sas_get_port_by_id(ioc, port_id); + struct hba_port *port = mpt3sas_get_port_by_id(ioc, port_id, 0); struct _enclosure_node *enclosure_dev = NULL; u16 handle = le16_to_cpu(expander_pg0->DevHandle); u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); @@ -13215,7 +13762,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) expander_device = mpt3sas_scsih_expander_find_by_sas_address( ioc, le64_to_cpu(expander_pg0.SASAddress), - mpt3sas_get_port_by_id(ioc, port_id)); + mpt3sas_get_port_by_id(ioc, port_id, 0)); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (expander_device) _scsih_refresh_expander_links(ioc, expander_device, @@ -13286,7 +13833,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, - mpt3sas_get_port_by_id(ioc, port_id)); + mpt3sas_get_port_by_id(ioc, port_id, 0)); set_bit(handle, ioc->pd_handles); retry_count = 0; /* This will retry adding the end device. @@ -13387,7 +13934,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) port_id = sas_device_pg0.PhysicalPort; sas_device = mpt3sas_get_sdev_by_addr(ioc, le64_to_cpu(sas_device_pg0.SASAddress), - mpt3sas_get_port_by_id(ioc, port_id)); + mpt3sas_get_port_by_id(ioc, port_id, 0)); if (sas_device) { sas_device_put(sas_device); continue; @@ -13401,7 +13948,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, - mpt3sas_get_port_by_id(ioc, port_id)); + mpt3sas_get_port_by_id(ioc, port_id, 0)); retry_count = 0; /* This will retry adding the end device. * _scsih_add_device() will decide on retries and @@ -13526,8 +14073,10 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) "MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { - if (ioc->multipath_on_hba) + if (ioc->multipath_on_hba) { _scsih_sas_port_refresh(ioc); + _scsih_update_vphys_after_reset(ioc); + } _scsih_prep_device_scan(ioc); _scsih_create_enclosure_list_after_reset(ioc); _scsih_search_responding_sas_devices(ioc); @@ -13551,11 +14100,13 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) static void _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { + ioc->current_event = fw_event; _scsih_fw_event_del_from_list(ioc, fw_event); /* the queue is being flushed so ignore this event */ if (ioc->remove_host || ioc->pci_error_recovery) { fw_event_work_put(fw_event); + ioc->current_event = NULL; return; } @@ -13567,17 +14118,19 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) { /* - * If we're unloading, bail. Otherwise, this can become - * an infinite loop. + * If we're unloading or cancelling the work, bail. + * Otherwise, this can become an infinite loop. */ - if (ioc->remove_host) + if (ioc->remove_host || ioc->fw_events_cleanup) goto out; ssleep(1); } _scsih_remove_unresponding_devices(ioc); + _scsih_del_dirty_vphy(ioc); _scsih_del_dirty_port_entries(ioc); _scsih_scan_for_devices_after_reset(ioc); + _scsih_set_nvme_max_shutdown_latency(ioc); if(ioc->hba_mpi_version_belonged == MPI2_VERSION) _scsih_hide_unhide_sas_devices(ioc); break; @@ -13600,6 +14153,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: if (_scsih_sas_topology_change_event(ioc, fw_event)) { _scsih_fw_event_requeue(ioc, fw_event, 1000); + ioc->current_event = NULL; return; } break; @@ -13642,12 +14196,14 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: if (_scsih_pcie_topology_change_event(ioc, fw_event)) { _scsih_fw_event_requeue(ioc, fw_event, 1000); + ioc->current_event = NULL; return; } break; } out: fw_event_work_put(fw_event); + ioc->current_event = NULL; } /** @@ -13778,6 +14334,7 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, mpi_reply->EventData); break; } + /* fall through */ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: _scsih_sas_device_status_change_event(ioc, (Mpi2EventDataSasDeviceStatusChange_t *) @@ -13966,9 +14523,12 @@ _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) init_completion(&ioc->scsih_cmds.done); ioc->put_smid_default(ioc, smid); - /* Wait for six seconds */ + /* Wait for max_shutdown_latency seconds */ + printk(MPT3SAS_INFO_FMT + "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", + ioc->name, ioc->max_shutdown_latency); wait_for_completion_timeout(&ioc->scsih_cmds.done, - IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT*HZ); + ioc->max_shutdown_latency*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { printk(MPT3SAS_ERR_FMT "%s: timeout\n", @@ -14118,6 +14678,7 @@ scsih_remove(struct pci_dev *pdev) struct workqueue_struct *wq; unsigned long flags; struct hba_port *port, *port_next; + struct virtual_phy *vphy, *vphy_next; Mpi2ConfigReply_t mpi_reply; if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) { @@ -14147,7 +14708,8 @@ scsih_remove(struct pci_dev *pdev) mpt3sas_base_stop_smart_polling(ioc); spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); - mpt3sas_scsih_flush_running_cmds(ioc); + if (mpt3sas_base_pci_device_is_unplugged(ioc)) + mpt3sas_scsih_flush_running_cmds(ioc); _scsih_fw_event_cleanup_queue(ioc); spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); @@ -14169,6 +14731,7 @@ scsih_remove(struct pci_dev *pdev) /* release all the volumes */ _scsih_ir_shutdown(ioc); + mpt3sas_destroy_debugfs(ioc); sas_remove_host(shost); scsi_remove_host(shost); @@ -14210,8 +14773,14 @@ scsih_remove(struct pci_dev *pdev) } list_for_each_entry_safe(port, - port_next, - &ioc->port_table_list, list) { + port_next, &ioc->port_table_list, list) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + list_del(&vphy->list); + kfree(vphy); + } + } list_del(&port->list); kfree(port); } @@ -14934,15 +15503,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) { case MPI26_MFGPAGE_DEVID_SAS3324_2: case MPI26_MFGPAGE_DEVID_SAS3324_3: case MPI26_MFGPAGE_DEVID_SAS3324_4: - case MPI26_MFGPAGE_DEVID_SAS3508: case MPI26_MFGPAGE_DEVID_SAS3508_1: case MPI26_MFGPAGE_DEVID_SAS3408: - case MPI26_MFGPAGE_DEVID_SAS3516: case MPI26_MFGPAGE_DEVID_SAS3516_1: case MPI26_MFGPAGE_DEVID_SAS3416: case MPI26_MFGPAGE_DEVID_SAS3716: case MPI26_MFGPAGE_DEVID_SAS3616: - case MPI26_MFGPAGE_DEVID_SAS3708: case MPI26_MFGPAGE_DEVID_PEX88000: case MPI26_MFGPAGE_DEVID_INVALID0_3816: case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: @@ -15063,29 +15629,36 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); switch (pdev->device) { - case MPI26_MFGPAGE_DEVID_SAS3508: case MPI26_MFGPAGE_DEVID_SAS3508_1: case MPI26_MFGPAGE_DEVID_SAS3408: - case MPI26_MFGPAGE_DEVID_SAS3516: case MPI26_MFGPAGE_DEVID_SAS3516_1: case MPI26_MFGPAGE_DEVID_SAS3416: case MPI26_MFGPAGE_DEVID_SAS3716: case MPI26_MFGPAGE_DEVID_SAS3616: - case MPI26_MFGPAGE_DEVID_SAS3708: case MPI26_MFGPAGE_DEVID_PEX88000: ioc->is_gen35_ioc = 1; break; case MPI26_MFGPAGE_DEVID_INVALID0_3816: case MPI26_MFGPAGE_DEVID_INVALID0_3916: + dev_printk(KERN_ERR, &pdev->dev, + "HBA with DeviceId 0x%04x, subsystem VendorId 0x%04x," + " subsystem DeviceId 0x%04x is Invalid", + pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); + return 1; case MPI26_MFGPAGE_DEVID_INVALID1_3816: case MPI26_MFGPAGE_DEVID_INVALID1_3916: dev_printk(KERN_ERR, &pdev->dev, - "HBA is in Non Secure mode"); + "HBA with DeviceId 0x%04x, subsystem VendorId 0x%04x," + " subsystem DeviceId 0x%04x is Tampered", + pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); return 1; case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: - dev_printk(KERN_INFO, &pdev->dev, - "HBA is in Configurable Secure mode"); + dev_printk(KERN_ERR, &pdev->dev, + "HBA with DeviceId 0x%04x, subsystem VendorId 0x%04x," + " subsystem DeviceId 0x%04x is in Configurable Secured Mode", + pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); + /* fall through */ case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; @@ -15110,10 +15683,21 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) } } - if(multipath_on_hba == -1 || multipath_on_hba == 0) - ioc->multipath_on_hba = 0; - else - ioc->multipath_on_hba = 1; + switch (ioc->is_gen35_ioc) { + case 0: + if(multipath_on_hba == -1 || multipath_on_hba == 0) + ioc->multipath_on_hba = 0; + else + ioc->multipath_on_hba = 1; + break; + case 1: + if(multipath_on_hba == -1 || multipath_on_hba > 0) + ioc->multipath_on_hba = 1; + else + ioc->multipath_on_hba = 0; + default: + break; + } break; default: @@ -15145,10 +15729,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->logging_level = logging_level; ioc->schedule_dead_ioc_flush_running_cmds = &mpt3sas_scsih_flush_running_cmds; + ioc->enable_sdev_max_qd = enable_sdev_max_qd; + + /* Host waits for six seconds */ + ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; /* * Enable MEMORY MOVE support flag. */ ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; + /* Enable ADDITIONAL QUERY support flag. */ + ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; /* misc semaphores and spin locks */ mutex_init(&ioc->reset_in_progress_mutex); @@ -15201,6 +15791,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) } #endif +#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR >= 8)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))) + ioc->drv_internal_flags |= MPT_DRV_INTERNAL_BITMAP_BLK_MQ; +#elif ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 7) && (RHEL_MINOR >= 2)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))) + if(shost->use_blk_mq) + ioc->drv_internal_flags |= MPT_DRV_INTERNAL_BITMAP_BLK_MQ; +#endif + + #ifdef RHEL_MAJOR #if (RHEL_MAJOR == 6 && RHEL_MINOR >= 2) if(!host_lock_mode) @@ -15359,6 +15959,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) && (sata_smart_polling)) mpt3sas_base_start_smart_polling(ioc); #endif + + mpt3sas_setup_debugfs(ioc); return 0; out_add_shost_fail: @@ -15447,6 +16049,8 @@ scsih_resume(struct pci_dev *pdev) if (r) return r; + printk(MPT3SAS_INFO_FMT "issuing hard reset as part of OS resume\n", + ioc->name); mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); scsi_unblock_requests(shost); mpt3sas_base_start_watchdog(ioc); @@ -15543,6 +16147,8 @@ scsih_pci_slot_reset(struct pci_dev *pdev) ioc->pci_error_recovery = 0; } + printk(MPT3SAS_INFO_FMT + "issuing hard reset as part of PCI slot reset\n", ioc->name); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); printk(MPT3SAS_WARN_FMT "hard reset: %s\n", ioc->name, @@ -15575,7 +16181,12 @@ scsih_pci_resume(struct pci_dev *pdev) printk(MPT3SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) || \ + (defined(RHEL_MAJOR) && (RHEL_MAJOR == 8) && (RHEL_MINOR >= 3)) \ + || (defined(CONFIG_SUSE_KERNEL) && ((CONFIG_SUSE_VERSION == 15) \ + && (CONFIG_SUSE_PATCHLEVEL >= 2)))) + pci_aer_clear_nonfatal_status(pdev); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) pci_cleanup_aer_uncorrect_error_status(pdev); #endif mpt3sas_base_start_watchdog(ioc); @@ -15735,26 +16346,20 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ - { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, - PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, PCI_ANY_ID, PCI_ANY_ID }, - { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, - PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, PCI_ANY_ID, PCI_ANY_ID }, - /* Marlin, Mercator & Marut ~ 3716, 3616 and 3708*/ + /* Marlin, Mercator ~ 3716, 3616 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3716, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, PCI_ANY_ID, PCI_ANY_ID }, - { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3708, - PCI_ANY_ID, PCI_ANY_ID }, /* Atlas PCIe Switch Management Port*/ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_PEX88000, @@ -15869,6 +16474,8 @@ scsih_init(void) mpt3sas_base_stm_initialize_callback_handler(); #endif + mpt3sas_init_debugfs(); + return 0; } @@ -15921,6 +16528,7 @@ scsih_exit(void) raid_class_release(mpt2sas_raid_template); #endif sas_release_transport(mpt3sas_transport_template); + mpt3sas_exit_debugfs(); } /** diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index ede8194ef366..598486b36d69 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -132,6 +132,26 @@ _transport_sas_node_find_by_sas_phy(struct MPT3SAS_ADAPTER *ioc, } #endif +/** + * _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to + * @phy - sas_phy object + * + * Return Port number + */ +static inline u8 +_transport_get_port_id_by_sas_phy(struct sas_phy *phy) +{ + u8 port_id = 0xFF; +#if defined(SAS_PHY_HOSTDATA) + struct hba_port *port = phy->hostdata; + if (port) + port_id = port->port_id; + else + BUG(); +#endif + return port_id; +} + static int _transport_find_parent_node(struct MPT3SAS_ADAPTER *ioc, struct sas_phy *phy) @@ -159,6 +179,49 @@ _transport_find_parent_node(struct MPT3SAS_ADAPTER *ioc, return 0; } +/** + * _transport_get_port_id_by_rphy - Get Port number from rphy object + * @ioc: per adapter object + * @rphy: sas_rphy object + * + * Returns Port number. + */ +u8 +_transport_get_port_id_by_rphy(struct MPT3SAS_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct _sas_node *sas_expander; + struct _sas_device *sas_device; + unsigned long flags; + u8 port_id = 0xFF; + + if (!rphy) + return port_id; + + if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->rphy == rphy) { + port_id = sas_expander->port->port_id; + break; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + } else if (rphy->identify.device_type == SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + port_id = sas_device->port->port_id; + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + return port_id; +} + /** * _transport_convert_phy_link_rate - * @link_rate: link rate returned from mpt firmware @@ -377,7 +440,7 @@ struct rep_manu_reply { */ static int _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, - u64 sas_address, struct sas_expander_device *edev) + u64 sas_address, struct sas_expander_device *edev, u8 port_id) { Mpi2SmpPassthroughRequest_t *mpi_request; Mpi2SmpPassthroughReply_t *mpi_reply; @@ -448,7 +511,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; - mpi_request->PhysicalPort = 0xFF; + mpi_request->PhysicalPort = port_id; mpi_request->SASAddress = cpu_to_le64(sas_address); mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); psge = &mpi_request->SGL; @@ -740,6 +803,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, #if defined(MPT_WIDE_PORT_API) struct sas_port *port; #endif + struct virtual_phy *vphy = NULL; if (!hba_port) { printk(MPT3SAS_ERR_FMT "failure at %s:%d/%s()!\n", @@ -793,10 +857,21 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, continue; list_add_tail(&sas_node->phy[i].port_siblings, &mpt3sas_port->phy_list); - if (sas_node->handle <= ioc->sas_hba.num_phys) { - hba_port->phy_mask |= (1 << i); - } mpt3sas_port->num_phys++; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!sas_node->phy[i].hba_vphy) { + hba_port->phy_mask |= (1 << i); + continue; + } + + vphy = mpt3sas_get_vphy_by_phy(ioc, hba_port, i); + if (!vphy) { + printk(MPT3SAS_ERR_FMT + "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + } } if (!mpt3sas_port->num_phys) { @@ -847,8 +922,14 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { rphy = sas_end_device_alloc(port); sas_device->rphy=rphy; - if (sas_node->handle <= ioc->sas_hba.num_phys) - hba_port->sas_address = sas_device->sas_address; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!vphy) + hba_port->sas_address = + sas_device->sas_address; + else + vphy->sas_address = + sas_device->sas_address; + } } else { rphy = sas_expander_alloc(port, mpt3sas_port->remote_identify.device_type); @@ -876,11 +957,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, sas_device->pend_sas_rphy_add = 0; sas_device_put(sas_device); } - if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) - dev_printk(KERN_INFO, &rphy->dev, "add: handle(0x%04x), " - "sas_addr(0x%016llx)\n", handle, - (unsigned long long) - mpt3sas_port->remote_identify.sas_address); + + dev_printk(KERN_INFO, &rphy->dev, + "%s: added: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, handle, (unsigned long long) + mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->rphy = rphy; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); @@ -894,7 +976,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) _transport_expander_report_manufacture(ioc, mpt3sas_port->remote_identify.sas_address, - rphy_to_expander_device(rphy)); + rphy_to_expander_device(rphy), hba_port->port_id); #endif return mpt3sas_port; @@ -932,6 +1014,7 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, struct _sas_phy *mpt3sas_phy, *next_phy; #endif struct hba_port *hba_port, *hba_port_next=NULL; + struct virtual_phy *vphy, *vphy_next=NULL; if (!port) return; @@ -962,17 +1045,52 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, if ((sas_node->handle <= ioc->sas_hba.num_phys) && (ioc->multipath_on_hba)) { + + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->sas_address != sas_address) + continue; + printk(MPT3SAS_INFO_FMT + "remove vphy entry: %p of port:%p,from %d" + " port's vphys list\n", ioc->name, + vphy, port, port->port_id); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + + if (!port->vphys_mask && !port->sas_address) { + printk(MPT3SAS_INFO_FMT + "remove hba_port entry: %p port: %d from" + " hba_port list\n", ioc->name, + port, port->port_id); + list_del(&port->list); + kfree(port); + } + } + list_for_each_entry_safe(hba_port, hba_port_next, - &ioc->port_table_list, list) { + &ioc->port_table_list, list) { if (hba_port != port) continue; if (hba_port->sas_address != sas_address) continue; - printk(MPT3SAS_INFO_FMT "remove hba_port entry: %p" - " port: %d from hba_port list\n", ioc->name, - hba_port, hba_port->port_id); - list_del(&hba_port->list); - kfree(hba_port); + if (!port->vphys_mask) { + printk(MPT3SAS_INFO_FMT + "remove hba_port entry: %p port: %d from" + " hba_port list\n", ioc->name, + hba_port, hba_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + } else { + printk(MPT3SAS_INFO_FMT + "clearing sas_address from hba_port" + " entry: %p port: %d from hba_port list\n", + ioc->name, hba_port, hba_port->port_id); + port->sas_address = 0; + } + break; } } @@ -1000,6 +1118,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, } if(!ioc->remove_host) sas_port_delete(mpt3sas_port->port); + printk(MPT3SAS_INFO_FMT "%s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); #else if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) dev_printk(KERN_INFO, &mpt3sas_port->rphy->dev, @@ -1007,6 +1127,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, (unsigned long long)sas_address); if(!ioc->remove_host) sas_rphy_delete(mpt3sas_port->rphy); + printk(MPT3SAS_INFO_FMT "%s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); #endif kfree(mpt3sas_port); } @@ -1354,7 +1476,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; - mpi_request->PhysicalPort = 0xFF; + mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); @@ -1646,7 +1768,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; - mpi_request->PhysicalPort = 0xFF; + mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); @@ -2141,7 +2263,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; - mpi_request->PhysicalPort = 0xFF; + mpi_request->PhysicalPort = _transport_get_port_id_by_rphy(ioc, rphy); mpi_request->SASAddress = (rphy) ? cpu_to_le64(rphy->identify.sas_address) : cpu_to_le64(ioc->sas_hba.sas_address); @@ -2369,7 +2491,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; - mpi_request->PhysicalPort = 0xFF; + mpi_request->PhysicalPort = _transport_get_port_id_by_rphy(ioc, rphy); mpi_request->SASAddress = (rphy) ? cpu_to_le64(rphy->identify.sas_address) : cpu_to_le64(ioc->sas_hba.sas_address); diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c index 5eab03e8ac99..663bb8df4c85 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c @@ -1,436 +1,468 @@ -/* - * This module provides common API to set Diagnostic trigger for MPT - * (Message Passing Technology) based controllers - * - * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c - * Copyright (C) 2013-2018 LSI Corporation - * Copyright (C) 2013-2018 Avago Technologies - * Copyright (C) 2013-2018 Broadcom Inc. - * (mailto:MPT-FusionLinux.pdl@broadcom.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * NO WARRANTY - * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT - * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is - * solely responsible for determining the appropriateness of using and - * distributing the Program and assumes all risks associated with its - * exercise of rights under this Agreement, including but not limited to - * the risks and costs of program errors, damage to or loss of data, - * programs or equipment, and unavailability or interruption of operations. - - * DISCLAIMER OF LIABILITY - * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR - * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED - * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES - - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, - * USA. - */ - -#include <linux/version.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/compat.h> -#include <linux/poll.h> - -#include <linux/io.h> -#include <asm/uaccess.h> - -#include "mpt3sas_base.h" - -/** - * _mpt3sas_raise_sigio - notifiy app - * @ioc: per adapter object - * @event_data: - */ -static void -_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, - struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) -{ - Mpi2EventNotificationReply_t *mpi_reply; - u16 sz, event_data_sz; - unsigned long flags; - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", - ioc->name, __func__)); - - sz = offsetof(Mpi2EventNotificationReply_t, EventData) + - sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; - mpi_reply = kzalloc(sz, GFP_KERNEL); - if (!mpi_reply) - goto out; - mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED); - event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4; - mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); - memcpy(&mpi_reply->EventData, event_data, - sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: add to driver " - "event log\n", ioc->name, __func__)); - mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); - kfree(mpi_reply); - out: - - /* clearing the diag_trigger_active flag */ - spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: clearing " - "diag_trigger_active flag\n", ioc->name, __func__)); - ioc->diag_trigger_active = 0; - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, - __func__)); -} - -/** - * mpt3sas_process_trigger_data - process the event data for the trigger - * @ioc: per adapter object - * @event_data: - */ -void -mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, - struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) -{ - u8 issue_reset = 0; - u32 *trig_data = (u32*)&event_data->u.master; - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", - ioc->name, __func__)); - - /* release the diag buffer trace */ - if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: release " - "trace diag buffer\n", ioc->name, __func__)); - - /* add a log message so that user knows which event caused the release */ - printk(MPT3SAS_INFO_FMT "%s: Releasing the trace buffer." - "Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n", ioc->name, - __func__, event_data->trigger_type, trig_data[0], trig_data[1]); - - mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, - &issue_reset); - } - - _mpt3sas_raise_sigio(ioc, event_data); - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, - __func__)); -} - -/** - * mpt3sas_trigger_master - Master trigger handler - * @ioc: per adapter object - * @trigger_bitmask: - * - */ -void -mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) -{ - struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; - unsigned long flags; - u8 found_match = 0; - - spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - - if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || - trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) - goto by_pass_checks; - - /* check to see if trace buffers are currently registered */ - if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - /* check to see if trace buffers are currently released */ - if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_RELEASED) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - by_pass_checks: - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " - "trigger_bitmask = 0x%08x\n", ioc->name, __func__, - trigger_bitmask)); - - /* don't send trigger if an trigger is currently active */ - if (ioc->diag_trigger_active) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - goto out; - } - - /* check for the trigger condition */ - if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { - found_match = 1; - ioc->diag_trigger_active = 1; - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " - "diag_trigger_active flag\n", ioc->name, __func__)); - } - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - - if (!found_match) - goto out; - - memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); - event_data.trigger_type = MPT3SAS_TRIGGER_MASTER; - event_data.u.master.MasterData = trigger_bitmask; - - if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || - trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) - _mpt3sas_raise_sigio(ioc, &event_data); - else - mpt3sas_send_trigger_data_event(ioc, &event_data); - - out: - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, - __func__)); -} - -/** - * mpt3sas_trigger_event - Event trigger handler - * @ioc: per adapter object - * @event: - * @log_entry_qualifier: - * - */ -void -mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, - u16 log_entry_qualifier) -{ - struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; - struct SL_WH_EVENT_TRIGGER_T *event_trigger; - int i; - unsigned long flags; - u8 found_match; - - spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - - /* check to see if trace buffers are currently registered */ - if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - /* check to see if trace buffers are currently released */ - if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_RELEASED) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " - "event = 0x%04x, log_entry_qualifier = 0x%04x\n", ioc->name, - __func__, event, log_entry_qualifier)); - - /* don't send trigger if an trigger is currently active */ - if (ioc->diag_trigger_active) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - goto out; - } - - /* check for the trigger condition */ - event_trigger = ioc->diag_trigger_event.EventTriggerEntry; - for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries - && !found_match; i++, event_trigger++) { - if (event_trigger->EventValue != event) - continue; - if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { - if (event_trigger->LogEntryQualifier == - log_entry_qualifier) - found_match = 1; - continue; - } - found_match = 1; - ioc->diag_trigger_active = 1; - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " - "diag_trigger_active flag\n", ioc->name, __func__)); - } - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - - if (!found_match) - goto out; - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " - "diag_trigger_active flag\n", ioc->name, __func__)); - memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); - event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; - event_data.u.event.EventValue = event; - event_data.u.event.LogEntryQualifier = log_entry_qualifier; - mpt3sas_send_trigger_data_event(ioc, &event_data); - out: - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, - __func__)); -} - -/** - * mpt3sas_trigger_scsi - SCSI trigger handler - * @ioc: per adapter object - * @sense_key: - * @asc: - * @ascq: - * - */ -void -mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, - u8 ascq) -{ - struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; - struct SL_WH_SCSI_TRIGGER_T *scsi_trigger; - int i; - unsigned long flags; - u8 found_match; - - spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - - /* check to see if trace buffers are currently registered */ - if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - /* check to see if trace buffers are currently released */ - if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_RELEASED) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " - "sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", ioc->name, - __func__, sense_key, asc, ascq)); - - /* don't send trigger if an trigger is currently active */ - if (ioc->diag_trigger_active) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - goto out; - } - - /* check for the trigger condition */ - scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry; - for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries - && !found_match; i++, scsi_trigger++) { - if (scsi_trigger->SenseKey != sense_key) - continue; - if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc)) - continue; - if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq)) - continue; - found_match = 1; - ioc->diag_trigger_active = 1; - } - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - - if (!found_match) - goto out; - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " - "diag_trigger_active flag\n", ioc->name, __func__)); - memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); - event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; - event_data.u.scsi.SenseKey = sense_key; - event_data.u.scsi.ASC = asc; - event_data.u.scsi.ASCQ = ascq; - mpt3sas_send_trigger_data_event(ioc, &event_data); - out: - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, - __func__)); -} - -/** - * mpt3sas_trigger_mpi - MPI trigger handler - * @ioc: per adapter object - * @ioc_status: - * @loginfo: - * - */ -void -mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) -{ - struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; - struct SL_WH_MPI_TRIGGER_T *mpi_trigger; - int i; - unsigned long flags; - u8 found_match; - - spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - - /* check to see if trace buffers are currently registered */ - if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - /* check to see if trace buffers are currently released */ - if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & - MPT3_DIAG_BUFFER_IS_RELEASED) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - return; - } - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " - "ioc_status = 0x%04x, loginfo = 0x%08x\n", ioc->name, __func__, - ioc_status, loginfo)); - - /* don't send trigger if an trigger is currently active */ - if (ioc->diag_trigger_active) { - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - goto out; - } - - /* check for the trigger condition */ - mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry; - for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries - && !found_match; i++, mpi_trigger++) { - if (mpi_trigger->IOCStatus != ioc_status) - continue; - if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF || - mpi_trigger->IocLogInfo == loginfo)) - continue; - found_match = 1; - ioc->diag_trigger_active = 1; - } - spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); - - if (!found_match) - goto out; - - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " - "diag_trigger_active flag\n", ioc->name, __func__)); - memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); - event_data.trigger_type = MPT3SAS_TRIGGER_MPI; - event_data.u.mpi.IOCStatus = ioc_status; - event_data.u.mpi.IocLogInfo = loginfo; - mpt3sas_send_trigger_data_event(ioc, &event_data); - out: - dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, - __func__)); -} +/* + * This module provides common API to set Diagnostic trigger for MPT + * (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/compat.h> +#include <linux/poll.h> + +#include <linux/io.h> +#include <asm/uaccess.h> + +#include "mpt3sas_base.h" + +/** + * _mpt3sas_raise_sigio - notifiy app + * @ioc: per adapter object + * @event_data: + */ +static void +_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + Mpi2EventNotificationReply_t *mpi_reply; + u16 sz, event_data_sz; + unsigned long flags; + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", + ioc->name, __func__)); + + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; + mpi_reply = kzalloc(sz, GFP_KERNEL); + if (!mpi_reply) + goto out; + mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED); + event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4; + mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); + memcpy(&mpi_reply->EventData, event_data, + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: add to driver " + "event log\n", ioc->name, __func__)); + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + kfree(mpi_reply); + out: + + /* clearing the diag_trigger_active flag */ + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: clearing " + "diag_trigger_active flag\n", ioc->name, __func__)); + ioc->diag_trigger_active = 0; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_process_trigger_data - process the event data for the trigger + * @ioc: per adapter object + * @event_data: + */ +void +mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + u8 issue_reset = 0; + u32 *trig_data = (u32*)&event_data->u.master; + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter\n", + ioc->name, __func__)); + + /* release the diag buffer trace */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: release " + "trace diag buffer\n", ioc->name, __func__)); + + /* add a log message so that user knows which event caused the release */ + printk(MPT3SAS_INFO_FMT "%s: Releasing the trace buffer." + "Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n", ioc->name, + __func__, event_data->trigger_type, trig_data[0], trig_data[1]); + + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_TRIGGER; + if (event_data) { + ioc->htb_rel.trigger_type = event_data->trigger_type; + switch (event_data->trigger_type) { + case MPT3SAS_TRIGGER_SCSI: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.scsi, + sizeof(struct SL_WH_SCSI_TRIGGER_T)); + break; + case MPT3SAS_TRIGGER_MPI: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.mpi, + sizeof(struct SL_WH_MPI_TRIGGER_T)); + break; + case MPT3SAS_TRIGGER_MASTER: + ioc->htb_rel.trigger_info_dwords[0] = + event_data->u.master.MasterData; + break; + case MPT3SAS_TRIGGER_EVENT: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.event, + sizeof(struct SL_WH_EVENT_TRIGGER_T)); + break; + default: + pr_err("%s: %d - Is not a valid Trigger type\n", + ioc->name, event_data->trigger_type); + break; + } + } + _mpt3sas_raise_sigio(ioc, event_data); + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_master - Master trigger handler + * @ioc: per adapter object + * @trigger_bitmask: + * + */ +void +mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + unsigned long flags; + u8 found_match = 0; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) + goto by_pass_checks; + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + by_pass_checks: + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "trigger_bitmask = 0x%08x\n", ioc->name, __func__, + trigger_bitmask)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MASTER; + event_data.u.master.MasterData = trigger_bitmask; + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) { + ioc->htb_rel.trigger_type = MPT3SAS_TRIGGER_MASTER; + ioc->htb_rel.trigger_info_dwords[0] = trigger_bitmask; + if (ioc->reset_from_user) + ioc->htb_rel.trigger_info_dwords[1] = + MPT_DIAG_RESET_ISSUED_BY_USER; + _mpt3sas_raise_sigio(ioc, &event_data); + } else + mpt3sas_send_trigger_data_event(ioc, &event_data); + + out: + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_event - Event trigger handler + * @ioc: per adapter object + * @event: + * @log_entry_qualifier: + * + */ +void +mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_EVENT_TRIGGER_T *event_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "event = 0x%04x, log_entry_qualifier = 0x%04x\n", ioc->name, + __func__, event, log_entry_qualifier)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + event_trigger = ioc->diag_trigger_event.EventTriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries + && !found_match; i++, event_trigger++) { + if (event_trigger->EventValue != event) + continue; + if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { + if (event_trigger->LogEntryQualifier == + log_entry_qualifier) + found_match = 1; + continue; + } + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; + event_data.u.event.EventValue = event; + event_data.u.event.LogEntryQualifier = log_entry_qualifier; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_scsi - SCSI trigger handler + * @ioc: per adapter object + * @sense_key: + * @asc: + * @ascq: + * + */ +void +mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, + u8 ascq) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_SCSI_TRIGGER_T *scsi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", ioc->name, + __func__, sense_key, asc, ascq)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries + && !found_match; i++, scsi_trigger++) { + if (scsi_trigger->SenseKey != sense_key) + continue; + if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc)) + continue; + if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; + event_data.u.scsi.SenseKey = sense_key; + event_data.u.scsi.ASC = asc; + event_data.u.scsi.ASCQ = ascq; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_mpi - MPI trigger handler + * @ioc: per adapter object + * @ioc_status: + * @loginfo: + * + */ +void +mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_MPI_TRIGGER_T *mpi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: enter - " + "ioc_status = 0x%04x, loginfo = 0x%08x\n", ioc->name, __func__, + ioc_status, loginfo)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries + && !found_match; i++, mpi_trigger++) { + if (mpi_trigger->IOCStatus != ioc_status) + continue; + if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF || + mpi_trigger->IocLogInfo == loginfo)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: setting " + "diag_trigger_active flag\n", ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MPI; + event_data.u.mpi.IOCStatus = ioc_status; + event_data.u.mpi.IocLogInfo = loginfo; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, printk(MPT3SAS_INFO_FMT "%s: exit\n", ioc->name, + __func__)); +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h index be8e748f46f0..1549b9f3099f 100755 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h @@ -1,195 +1,195 @@ -/* - * This is the Fusion MPT base driver providing common API layer interface - * to set Diagnostic triggers for MPT (Message Passing Technology) based - * controllers - * - * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h - * Copyright (C) 2013-2018 LSI Corporation - * Copyright (C) 2013-2018 Avago Technologies - * Copyright (C) 2013-2018 Broadcom Inc. - * (mailto:MPT-FusionLinux.pdl@broadcom.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * NO WARRANTY - * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT - * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is - * solely responsible for determining the appropriateness of using and - * distributing the Program and assumes all risks associated with its - * exercise of rights under this Agreement, including but not limited to - * the risks and costs of program errors, damage to or loss of data, - * programs or equipment, and unavailability or interruption of operations. - - * DISCLAIMER OF LIABILITY - * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR - * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED - * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES - - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, - * USA. - */ - /* Diagnostic Trigger Configuration Data Structures */ - -#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED -#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED - -/* limitation on number of entries */ -#define NUM_VALID_ENTRIES (20) - -/* trigger types */ -#define MPT3SAS_TRIGGER_MASTER (1) -#define MPT3SAS_TRIGGER_EVENT (2) -#define MPT3SAS_TRIGGER_SCSI (3) -#define MPT3SAS_TRIGGER_MPI (4) - -/* trigger names */ -#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master" -#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event" -#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi" -#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi" - -/* master trigger bitmask */ -#define MASTER_TRIGGER_FW_FAULT (0x00000001) -#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002) -#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004) -#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008) - -/* fake firmware event for tigger */ -#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E) - -/** - * MasterTrigger is a single U32 passed to/from sysfs. - * - * Bit Flags (enables) include: - * 1. FW Faults - * 2. Adapter Reset issued by driver - * 3. TMs - * 4. Device Remove Event sent by FW - */ - -struct SL_WH_MASTER_TRIGGER_T { - uint32_t MasterData; -}; - -/** - * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element - * @EventValue: Event Code to trigger on - * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only) - * - * Defines an event that should induce a DIAG_TRIGGER driver event if observed. - */ -struct SL_WH_EVENT_TRIGGER_T { - uint16_t EventValue; - uint16_t LogEntryQualifier; -}; - -/** - * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a - * list of Event Triggers to be monitored for. - * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this - * structure. - * @EventTriggerEntry: List of Event trigger elements. - * - * This binary structure is transferred via sysfs to get/set Event Triggers - * in the Linux Driver. - */ - -struct SL_WH_EVENT_TRIGGERS_T { - uint32_t ValidEntries; - struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES]; -}; - -/** - * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element - * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for - * wildcard. - * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard - * @SenseKey: SCSI Sense Key - * - * Defines a sense key (single or many variants) that should induce a - * DIAG_TRIGGER driver event if observed. - */ -struct SL_WH_SCSI_TRIGGER_T { - U8 ASCQ; - U8 ASC; - U8 SenseKey; - U8 Reserved; -}; - -/** - * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a - * list of SCSI sense codes that should trigger a DIAG_SERVICE event when - * observed. - * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this - * structure. - * @SCSITriggerEntry: List of SCSI Sense Code trigger elements. - * - * This binary structure is transferred via sysfs to get/set SCSI Sense Code - * Triggers in the Linux Driver. - */ -struct SL_WH_SCSI_TRIGGERS_T { - uint32_t ValidEntries; - struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES]; -}; - -/** - * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element - * @IOCStatus: MPI IOCStatus - * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard - * - * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER - * driver event if observed. - */ -struct SL_WH_MPI_TRIGGER_T { - uint16_t IOCStatus; - uint16_t Reserved; - uint32_t IocLogInfo; -}; - -/** - * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a - * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE - * event when observed. - * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this - * structure. - * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements. - * - * This binary structure is transferred via sysfs to get/set MPI Error Triggers - * in the Linux Driver. - */ -struct SL_WH_MPI_TRIGGERS_T { - uint32_t ValidEntries; - struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES]; -}; - -/** - * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger - * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX) - * @u: trigger condition that caused trigger to be sent - */ -struct SL_WH_TRIGGERS_EVENT_DATA_T { - uint32_t trigger_type; - union { - struct SL_WH_MASTER_TRIGGER_T master; - struct SL_WH_EVENT_TRIGGER_T event; - struct SL_WH_SCSI_TRIGGER_T scsi; - struct SL_WH_MPI_TRIGGER_T mpi; - } u; -}; -#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */ +/* + * This is the Fusion MPT base driver providing common API layer interface + * to set Diagnostic triggers for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + /* Diagnostic Trigger Configuration Data Structures */ + +#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED +#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED + +/* limitation on number of entries */ +#define NUM_VALID_ENTRIES (20) + +/* trigger types */ +#define MPT3SAS_TRIGGER_MASTER (1) +#define MPT3SAS_TRIGGER_EVENT (2) +#define MPT3SAS_TRIGGER_SCSI (3) +#define MPT3SAS_TRIGGER_MPI (4) + +/* trigger names */ +#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master" +#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event" +#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi" +#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi" + +/* master trigger bitmask */ +#define MASTER_TRIGGER_FW_FAULT (0x00000001) +#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002) +#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004) +#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008) + +/* fake firmware event for tigger */ +#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E) + +/** + * MasterTrigger is a single U32 passed to/from sysfs. + * + * Bit Flags (enables) include: + * 1. FW Faults + * 2. Adapter Reset issued by driver + * 3. TMs + * 4. Device Remove Event sent by FW + */ + +struct SL_WH_MASTER_TRIGGER_T { + uint32_t MasterData; +}; + +/** + * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element + * @EventValue: Event Code to trigger on + * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only) + * + * Defines an event that should induce a DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_EVENT_TRIGGER_T { + uint16_t EventValue; + uint16_t LogEntryQualifier; +}; + +/** + * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of Event Triggers to be monitored for. + * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this + * structure. + * @EventTriggerEntry: List of Event trigger elements. + * + * This binary structure is transferred via sysfs to get/set Event Triggers + * in the Linux Driver. + */ + +struct SL_WH_EVENT_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element + * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for + * wildcard. + * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard + * @SenseKey: SCSI Sense Key + * + * Defines a sense key (single or many variants) that should induce a + * DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_SCSI_TRIGGER_T { + U8 ASCQ; + U8 ASC; + U8 SenseKey; + U8 Reserved; +}; + +/** + * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of SCSI sense codes that should trigger a DIAG_SERVICE event when + * observed. + * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this + * structure. + * @SCSITriggerEntry: List of SCSI Sense Code trigger elements. + * + * This binary structure is transferred via sysfs to get/set SCSI Sense Code + * Triggers in the Linux Driver. + */ +struct SL_WH_SCSI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element + * @IOCStatus: MPI IOCStatus + * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard + * + * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER + * driver event if observed. + */ +struct SL_WH_MPI_TRIGGER_T { + uint16_t IOCStatus; + uint16_t Reserved; + uint32_t IocLogInfo; +}; + +/** + * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE + * event when observed. + * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this + * structure. + * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements. + * + * This binary structure is transferred via sysfs to get/set MPI Error Triggers + * in the Linux Driver. + */ +struct SL_WH_MPI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger + * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX) + * @u: trigger condition that caused trigger to be sent + */ +struct SL_WH_TRIGGERS_EVENT_DATA_T { + uint32_t trigger_type; + union { + struct SL_WH_MASTER_TRIGGER_T master; + struct SL_WH_EVENT_TRIGGER_T event; + struct SL_WH_SCSI_TRIGGER_T scsi; + struct SL_WH_MPI_TRIGGER_T mpi; + } u; +}; +#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h new file mode 100755 index 000000000000..f5aacc8efb95 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h @@ -0,0 +1,157 @@ + +/* + * This is the Fusion MPT base driver providing common API layer interface + * to store diag trigger values into persistent driver triggers pages + * for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2013-2020 LSI Corporation + * Copyright (C) 2013-2020 Avago Technologies + * Copyright (C) 2013-2020 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "mpi/mpi2_cnfg.h" + +#ifndef MPI2_TRIGGER_PAGES_H +#define MPI2_TRIGGER_PAGES_H + +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER (0xE0) + +#define MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION (0x01) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 TriggerFlags; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + U32 Reserved0xC[61]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_0, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_DRIVER_TIGGER_0, + Mpi26DriverTriggerPage0_t, MPI2_POINTER pMpi26DriverTriggerPage0_t; + +/* Trigger Flags */ +#define MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID (0x0001) +#define MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID (0x0002) +#define MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID (0x0004) +#define MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID (0x0008) + + +#define MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_MASTER_TIGGER_ENTRY +{ + U32 MasterTriggerFlags; +} MPI26_DRIVER_MASTER_TIGGER_ENTRY, MPI2_POINTER PTR_MPI26_DRIVER_MASTER_TIGGER_ENTRY; + +#define MPI26_MAX_MASTER_TRIGGERS (1) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_1 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumMasterTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_MASTER_TIGGER_ENTRY MasterTriggers[MPI26_MAX_MASTER_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_1, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_DRIVER_TIGGER_1, + Mpi26DriverTriggerPage1_t, MPI2_POINTER pMpi26DriverTriggerPage1_t; + +/* Master Trigger Flags */ +#define MPI26_DRIVER_TRIGGER1_FLAG_FW_FAULT (0x00000001) +#define MPI26_DRIVER_TRIGGER1_FLAG_DIAG_RESET (0x00000002) +#define MPI26_DRIVER_TRIGGER1_FLAG_TASK_MGMT (0x00000004) +#define MPI26_DRIVER_TRIGGER1_FLAG_DEVICE_REMOVAL (0x00000008) + + +#define MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY +{ + U16 MPIEventCode; /* 0x00 */ + U16 MPIEventCodeSpecific; /* 0x02 */ +} MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY, MPI2_POINTER PTR_MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY; + +#define MPI26_MAX_MPI_EVENT_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_2 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumMPIEventTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY MPIEventTriggers[MPI26_MAX_MPI_EVENT_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_2, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_DRIVER_TIGGER_2, + Mpi26DriverTriggerPage2_t, MPI2_POINTER pMpi26DriverTriggerPage2_t; + + +#define MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY +{ + U8 ASCQ; /* 0x00 */ + U8 ASC; /* 0x01 */ + U8 SenseKey; /* 0x02 */ + U8 Reserved; /* 0x03 */ +} MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY, MPI2_POINTER PTR_MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY; + +#define MPI26_MAX_SCSI_SENSE_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_3 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumSCSISenseTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY SCSISenseTriggers[MPI26_MAX_SCSI_SENSE_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_3, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_DRIVER_TIGGER_3, + Mpi26DriverTriggerPage3_t, MPI2_POINTER pMpi26DriverTriggerPage3_t; + +#define MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY +{ + U16 IOCStatus; /* 0x00 */ + U16 Reserved; /* 0x02 */ + U32 LogInfo; /* 0x04 */ +} MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY, MPI2_POINTER PTR_MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY; + +#define MPI26_MAX_LOGINFO_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_4 +{ + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumIOCStatusLogInfoTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY IOCStatusLoginfoTriggers[MPI26_MAX_LOGINFO_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_4, + MPI2_POINTER PTR_MPI26_CONFIG_PAGE_DRIVER_TIGGER_4, + Mpi26DriverTriggerPage4_t, MPI2_POINTER pMpi26DriverTriggerPage4_t; + +#endif diff --git a/drivers/scsi/mpt3sas/scripts/collect_ioc_dump_on_reset.sh b/drivers/scsi/mpt3sas/scripts/collect_ioc_dump_on_reset.sh new file mode 100755 index 000000000000..6641bf4e34f4 --- /dev/null +++ b/drivers/scsi/mpt3sas/scripts/collect_ioc_dump_on_reset.sh @@ -0,0 +1,42 @@ +# DISCLAIMER OF LIABILITY +# THIS IS SAMPLE SCRIPT. +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +#!/bin/bash + +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT + +clear +CWD=`pwd`; +scsi_host="/sys/class/scsi_host" +cd ${scsi_host} +subfolders=`ls -1` +for i in ${subfolders}; do + cd ${i}; + if [ `cat proc_name` == "mpt3sas" ]; then + reset_count=`cat ioc_reset_count` + { + while true; do + if [[ `cat /sys/kernel/debug/mpt3sas/scsi_${i}/host_recovery` == 1 && `cat ioc_reset_count` == $reset_count ]]; then + date_str=$(date +'%y-%m-%d-%H-%M-%S') + file_name=${i}-iocdump-${date_str}; + echo "==== host reset on ${i} is observed at ${date_str}, hence copied the ioc dump to file ${file_name} ====" + cat /sys/kernel/debug/mpt3sas/scsi_${i}/ioc_dump > ${CWD}/$file_name; + reset_count=`expr $reset_count + 1`; + fi; + sleep 1; + done; + } & + fi; + cd ${scsi_host} +done; + +while true; do sleep 300; done + +echo -e \\n diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 8906aceda4c4..0354898d7cac 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) if (IS_ERR(mhba->dm_thread)) { dev_err(&mhba->pdev->dev, "failed to create device scan thread\n"); + ret = PTR_ERR(mhba->dm_thread); mutex_unlock(&mhba->sas_discovery_mutex); goto fail_create_thread; } diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index eb0dd566330a..cfc3f8b4174a 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2274,12 +2274,12 @@ static void myrs_cleanup(struct myrs_hba *cs) if (cs->mmio_base) { cs->disable_intr(cs); iounmap(cs->mmio_base); + cs->mmio_base = NULL; } if (cs->irq) free_irq(cs->irq, cs); if (cs->io_addr) release_region(cs->io_addr, 0x80); - iounmap(cs->mmio_base); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); scsi_host_put(cs->host); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 3374f553c617..8882ba33ca87 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1040,7 +1040,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev, pm8001_init_sas_add(pm8001_ha); /* phy setting support for motherboard controller */ - if (pm8001_configure_phy_settings(pm8001_ha)) + rc = pm8001_configure_phy_settings(pm8001_ha); + if (rc) goto err_out_shost; pm8001_post_sas_ha_init(shost, chip); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 7e48154e11c3..36f5bab09f73 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -816,7 +816,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) - return res; + goto ex_err; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; @@ -1202,8 +1202,8 @@ int pm8001_abort_task(struct sas_task *task) pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); phy_id = pm8001_dev->attached_phy; - rc = pm8001_find_tag(task, &tag); - if (rc == 0) { + ret = pm8001_find_tag(task, &tag); + if (ret == 0) { pm8001_printk("no tag for task:%p\n", task); return TMF_RESP_FUNC_FAILED; } @@ -1241,26 +1241,50 @@ int pm8001_abort_task(struct sas_task *task) /* 2. Send Phy Control Hard Reset */ reinit_completion(&completion); + phy->port_reset_status = PORT_RESET_TMO; phy->reset_success = false; phy->enable_completion = &completion; phy->reset_completion = &completion_reset; ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_HARD_RESET); - if (ret) + if (ret) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; goto out; + } + + /* In the case of the reset timeout/fail we still + * abort the command at the firmware. The assumption + * here is that the drive is off doing something so + * that it's not processing requests, and we want to + * avoid getting a completion for this and either + * leaking the task in libsas or losing the race and + * getting a double free. + */ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Waiting for local phy ctl\n")); - wait_for_completion(&completion); - if (!phy->reset_success) - goto out; - - /* 3. Wait for Port Reset complete / Port reset TMO */ - PM8001_MSG_DBG(pm8001_ha, + ret = wait_for_completion_timeout(&completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret || !phy->reset_success) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + } else { + /* 3. Wait for Port Reset complete or + * Port reset TMO + */ + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Waiting for Port reset\n")); - wait_for_completion(&completion_reset); - if (phy->port_reset_status) { - pm8001_dev_gone_notify(dev); - goto out; + ret = wait_for_completion_timeout( + &completion_reset, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + phy->reset_completion = NULL; + WARN_ON(phy->port_reset_status == + PORT_RESET_TMO); + if (phy->port_reset_status == PORT_RESET_TMO) { + pm8001_dev_gone_notify(dev); + goto out; + } } /* diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index f3f399fe10c8..0da4e16fb23a 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -355,6 +355,7 @@ struct qedf_ctx { #define QEDF_GRCDUMP_CAPTURE 4 #define QEDF_IN_RECOVERY 5 #define QEDF_DBG_STOP_IO 6 +#define QEDF_PROBING 8 unsigned long flags; /* Miscellaneous state flags */ int fipvlan_retries; u8 num_queues; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 59ca98f12afd..9c0955c334e3 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -668,7 +668,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) rdata = fcport->rdata; if (!rdata || !kref_get_unless_zero(&rdata->kref)) { QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); - rc = 1; + rc = SUCCESS; goto out; } @@ -3153,7 +3153,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) { int rc = -EINVAL; struct fc_lport *lport; - struct qedf_ctx *qedf; + struct qedf_ctx *qedf = NULL; struct Scsi_Host *host; bool is_vf = false; struct qed_ll2_params params; @@ -3183,6 +3183,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) /* Initialize qedf_ctx */ qedf = lport_priv(lport); + set_bit(QEDF_PROBING, &qedf->flags); qedf->lport = lport; qedf->ctlr.lp = lport; qedf->pdev = pdev; @@ -3206,9 +3207,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) } else { /* Init pointers during recovery */ qedf = pci_get_drvdata(pdev); + set_bit(QEDF_PROBING, &qedf->flags); lport = qedf->lport; } + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); + host = lport->host; /* Allocate mempool for qedf_io_work structs */ @@ -3513,6 +3517,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) else fc_fabric_login(lport); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); + + clear_bit(QEDF_PROBING, &qedf->flags); + /* All good */ return 0; @@ -3538,6 +3546,11 @@ err2: err1: scsi_host_put(lport->host); err0: + if (qedf) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); + + clear_bit(QEDF_PROBING, &qedf->flags); + } return rc; } @@ -3687,11 +3700,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data) { struct qedf_ctx *qedf = dev; struct qed_mfw_tlv_fcoe *fcoe = data; - struct fc_lport *lport = qedf->lport; - struct Scsi_Host *host = lport->host; - struct fc_host_attrs *fc_host = shost_to_fc_host(host); + struct fc_lport *lport; + struct Scsi_Host *host; + struct fc_host_attrs *fc_host; struct fc_host_statistics *hst; + if (!qedf) { + QEDF_ERR(NULL, "qedf is null.\n"); + return; + } + + if (test_bit(QEDF_PROBING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); + return; + } + + lport = qedf->lport; + host = lport->host; + fc_host = shost_to_fc_host(host); + /* Force a refresh of the fc_host stats including offload stats */ hst = qedf_fc_get_host_stats(host); diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 946cebc4c932..90aa64604ad7 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi, "Freeing tid=0x%x for cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); + spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); @@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi, cmd->task_id, qedi_conn->iscsi_conn_id, &cmd->io_cmd); } + spin_unlock(&qedi_conn->list_lock); cmd->state = RESPONSE_RECEIVED; qedi_clear_task_idx(qedi, cmd->task_id); @@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi, "Freeing tid=0x%x for cid=0x%x\n", cmd->task_id, qedi_conn->iscsi_conn_id); + spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); @@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi, cmd->task_id, qedi_conn->iscsi_conn_id, &cmd->io_cmd); } + spin_unlock(&qedi_conn->list_lock); cmd->state = RESPONSE_RECEIVED; qedi_clear_task_idx(qedi, cmd->task_id); @@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi, tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr; + spin_lock(&qedi_conn->list_lock); if (likely(qedi_cmd->io_cmd_in_list)) { qedi_cmd->io_cmd_in_list = false; list_del_init(&qedi_cmd->io_cmd); qedi_conn->active_cmd_count--; } + spin_unlock(&qedi_conn->list_lock); if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) || @@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi, ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK; qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; + spin_lock(&qedi_conn->list_lock); if (likely(cmd->io_cmd_in_list)) { cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } + spin_unlock(&qedi_conn->list_lock); memset(task_ctx, '\0', sizeof(*task_ctx)); @@ -817,8 +825,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, qedi_clear_task_idx(qedi_conn->qedi, rtid); spin_lock(&qedi_conn->list_lock); - list_del_init(&dbg_cmd->io_cmd); - qedi_conn->active_cmd_count--; + if (likely(dbg_cmd->io_cmd_in_list)) { + dbg_cmd->io_cmd_in_list = false; + list_del_init(&dbg_cmd->io_cmd); + qedi_conn->active_cmd_count--; + } spin_unlock(&qedi_conn->list_lock); qedi_cmd->state = CLEANUP_RECV; wake_up_interruptible(&qedi_conn->wait_queue); @@ -1236,6 +1247,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, qedi_conn->cmd_cleanup_req++; qedi_iscsi_cleanup_task(ctask, true); + cmd->io_cmd_in_list = false; list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; QEDI_WARN(&qedi->dbg_ctx, @@ -1447,8 +1459,11 @@ ldel_exit: spin_unlock_bh(&qedi_conn->tmf_work_lock); spin_lock(&qedi_conn->list_lock); - list_del_init(&cmd->io_cmd); - qedi_conn->active_cmd_count--; + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } spin_unlock(&qedi_conn->list_lock); clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 8829880a54c3..755f66b1ff9c 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -972,11 +972,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn) { struct qedi_cmd *cmd, *cmd_tmp; + spin_lock(&qedi_conn->list_lock); list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, io_cmd) { list_del_init(&cmd->io_cmd); qedi_conn->active_cmd_count--; } + spin_unlock(&qedi_conn->list_lock); } static void qedi_ep_disconnect(struct iscsi_endpoint *ep) @@ -997,7 +999,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; - flush_work(&qedi_ep->offload_work); + if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) + flush_work(&qedi_ep->offload_work); if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; @@ -1061,6 +1064,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) break; } + if (!abrt_conn) + wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; + qedi_ep->state = EP_STATE_DISCONN_START; ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); if (ret) { @@ -1214,6 +1220,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) } iscsi_cid = (u32)path_data->handle; + if (iscsi_cid >= qedi->max_active_conns) { + ret = -EINVAL; + goto set_path_exit; + } qedi_ep = qedi->ep_tbl[iscsi_cid]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index acb930b8c6a6..4498add3d4d6 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1605,6 +1605,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) if (!qedi->global_queues[i]) { QEDI_ERR(&qedi->dbg_ctx, "Unable to allocation global queue %d.\n", i); + status = -ENOMEM; goto mem_alloc_failure; } @@ -2175,7 +2176,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: - rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, chap_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: @@ -2183,7 +2184,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, mchap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: - rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, mchap_secret); break; case ISCSI_BOOT_TGT_FLAGS: @@ -2630,7 +2631,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) QEDI_ERR(&qedi->dbg_ctx, "Unable to start offload thread!\n"); rc = -ENODEV; - goto free_cid_que; + goto free_tmf_thread; } /* F/w needs 1st task context memory entry for performance */ @@ -2650,6 +2651,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) return 0; +free_tmf_thread: + destroy_workqueue(qedi->tmf_thread); free_cid_que: qedi_release_cid_que(qedi); free_uio: diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1fbc5c6c6c14..580d30cd5c35 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1775,9 +1775,6 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, return -EINVAL; } - ql_log(ql_log_info, vha, 0x70d6, - "port speed:%d\n", ha->link_data_rate); - return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]); } @@ -2694,6 +2691,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost) vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); if (IS_FWI2_CAPABLE(ha)) { + int rval; + stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, GFP_KERNEL); if (!stats) { @@ -2703,7 +2702,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost) } /* reset firmware statistics */ - qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x70de, + "Resetting ISP statistics failed: rval = %d\n", + rval); dma_free_coherent(&ha->pdev->dev, sizeof(*stats), stats, stats_dma); @@ -2926,11 +2929,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) msleep(1000); - qla_nvme_delete(vha); qla24xx_disable_vp(vha); qla2x00_wait_for_sess_deletion(vha); + qla_nvme_delete(vha); vha->flags.delete_progress = 1; qlt_remove_target(ha, vha); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index cbaf178fc979..ce55121910e8 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -17,10 +17,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; + sp->free(sp); + bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); - sp->free(sp); } void qla2x00_bsg_sp_free(srb_t *sp) diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 7bbff91f8883..88a56e8480f7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -18,7 +18,7 @@ * | Device Discovery | 0x2134 | 0x210e-0x2116 | * | | | 0x211a | * | | | 0x211c-0x2128 | - * | | | 0x212a-0x2130 | + * | | | 0x212a-0x2134 | * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c57b95a20688..7c22f8eea3ea 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2281,7 +2281,7 @@ typedef struct { uint8_t fabric_port_name[WWN_SIZE]; uint16_t fp_speed; uint8_t fc4_type; - uint8_t fc4f_nvme; /* nvme fc4 feature bits */ + uint8_t fc4_features; } sw_info_t; /* FCP-4 types */ @@ -2452,7 +2452,7 @@ typedef struct fc_port { u32 supported_classes; uint8_t fc4_type; - uint8_t fc4f_nvme; + uint8_t fc4_features; uint8_t scan_state; unsigned long last_queue_full; @@ -2466,6 +2466,7 @@ typedef struct fc_port { struct qla_tgt_sess *tgt_session; struct ct_sns_desc ct_desc; enum discovery_state disc_state; + atomic_t shadow_disc_state; enum discovery_state next_disc_state; enum login_state fw_login_state; unsigned long dm_login_expire; @@ -2483,6 +2484,9 @@ typedef struct fc_port { u16 n2n_chip_reset; } fc_port_t; +#define FC4_PRIORITY_NVME 0 +#define FC4_PRIORITY_FCP 1 + #define QLA_FCPORT_SCAN 1 #define QLA_FCPORT_FOUND 2 @@ -2507,6 +2511,19 @@ struct event_arg { extern const char *const port_state_str[5]; +static const char * const port_dstate_str[] = { + "DELETED", + "GNN_ID", + "GNL", + "LOGIN_PEND", + "LOGIN_FAILED", + "GPDB", + "UPD_FCPORT", + "LOGIN_COMPLETE", + "ADISC", + "DELETE_PEND" +}; + /* * FC port flags. */ @@ -4298,6 +4315,8 @@ struct qla_hw_data { atomic_t nvme_active_aen_cnt; uint16_t nvme_last_rptd_aen; /* Last recorded aen count */ + uint8_t fc4_type_priority; + atomic_t zio_threshold; uint16_t last_zio_threshold; @@ -4823,6 +4842,23 @@ struct sff_8247_a0 { ha->current_topology == ISP_CFG_N || \ !ha->current_topology) +#define NVME_TYPE(fcport) \ + (fcport->fc4_type & FS_FC4TYPE_NVME) \ + +#define FCP_TYPE(fcport) \ + (fcport->fc4_type & FS_FC4TYPE_FCP) \ + +#define NVME_ONLY_TARGET(fcport) \ + (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \ + +#define NVME_FCP_TARGET(fcport) \ + (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \ + +#define NVME_TARGET(ha, fcport) \ + ((NVME_FCP_TARGET(fcport) && \ + (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \ + NVME_ONLY_TARGET(fcport)) \ + #define PRLI_PHASE(_cls) \ ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP)) diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index dc2366a29665..9dc09c117416 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -2105,4 +2105,6 @@ struct qla_fcp_prio_cfg { #define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4) #define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4) +#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196 + #endif diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index d11416dcee4e..7aa233771ec8 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -80,6 +80,7 @@ extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); +extern int qla24xx_async_abort_cmd(srb_t *, bool); extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state); extern fc_port_t * @@ -255,6 +256,7 @@ extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); +extern int qla24xx_async_abort_cmd(srb_t *, bool); extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); @@ -917,4 +919,5 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode); /* nvme.c */ void qla_nvme_unregister_remote_port(struct fc_port *fcport); +void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 84bb4a048016..d9b5ea77fde9 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -248,7 +248,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) WWN_SIZE); fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? - FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER; + FS_FC4TYPE_FCP : FC4_TYPE_OTHER; if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) @@ -2887,7 +2887,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; - uint8_t fcp_scsi_features = 0; + uint8_t fcp_scsi_features = 0, nvme_features = 0; struct ct_arg arg; for (i = 0; i < ha->max_fibre_devices; i++) { @@ -2933,14 +2933,19 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; fcp_scsi_features &= 0x0f; - if (fcp_scsi_features) - list[i].fc4_type = FC4_TYPE_FCP_SCSI; - else - list[i].fc4_type = FC4_TYPE_OTHER; + if (fcp_scsi_features) { + list[i].fc4_type = FS_FC4TYPE_FCP; + list[i].fc4_features = fcp_scsi_features; + } - list[i].fc4f_nvme = + nvme_features = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; - list[i].fc4f_nvme &= 0xf; + nvme_features &= 0xf; + + if (nvme_features) { + list[i].fc4_type |= FS_FC4TYPE_NVME; + list[i].fc4_features = nvme_features; + } } /* Last device exit. */ @@ -3435,6 +3440,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res) fc_port_t *fcport = sp->fcport; struct ct_sns_rsp *ct_rsp; struct event_arg ea; + uint8_t fc4_scsi_feat; + uint8_t fc4_nvme_feat; ql_dbg(ql_dbg_disc, vha, 0x2133, "Async done-%s res %x ID %x. %8phC\n", @@ -3442,24 +3449,25 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res) fcport->flags &= ~FCF_ASYNC_SENT; ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; + fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; + /* * FC-GS-7, 5.2.3.12 FC-4 Features - format * The format of the FC-4 Features object, as defined by the FC-4, * Shall be an array of 4-bit values, one for each type code value */ if (!res) { - if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) { + if (fc4_scsi_feat & 0xf) { /* w1 b00:03 */ - fcport->fc4_type = - ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; - fcport->fc4_type &= 0xf; - } + fcport->fc4_type = FS_FC4TYPE_FCP; + fcport->fc4_features = fc4_scsi_feat & 0xf; + } - if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) { + if (fc4_nvme_feat & 0xf) { /* w5 [00:03]/28h */ - fcport->fc4f_nvme = - ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; - fcport->fc4f_nvme &= 0xf; + fcport->fc4_type |= FS_FC4TYPE_NVME; + fcport->fc4_features = fc4_nvme_feat & 0xf; } } @@ -3638,7 +3646,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) qla2x00_clear_loop_id(fcport); fcport->flags |= FCF_FABRIC_DEVICE; } else if (fcport->d_id.b24 != rp->id.b24 || - fcport->scan_needed) { + (fcport->scan_needed && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_NVME_INITIATOR)) { qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; @@ -3671,10 +3681,22 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } if (fcport->scan_state != QLA_FCPORT_FOUND) { + bool do_delete = false; + + if (fcport->scan_needed && + fcport->disc_state == DSC_LOGIN_PEND) { + /* Cable got disconnected after we sent + * a login. Do delete to prevent timeout. + */ + fcport->logout_on_delete = 1; + do_delete = true; + } + fcport->scan_needed = 0; - if ((qla_dual_mode_enabled(vha) || - qla_ini_mode_enabled(vha)) && - atomic_read(&fcport->state) == FCS_ONLINE) { + if (((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) || + do_delete) { if (fcport->loop_id != FC_NO_LOOP_ID) { if (fcport->flags & FCF_FCP2_DEVICE) fcport->logout_on_delete = 0; @@ -4268,7 +4290,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; - fcport->disc_state = DSC_GNN_ID; + qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ac4c47fc5f4c..643b8ae36cbe 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -50,16 +50,9 @@ qla2x00_sp_timeout(struct timer_list *t) { srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; - struct req_que *req; - unsigned long flags; - struct qla_hw_data *ha = sp->vha->hw; - WARN_ON_ONCE(irqs_disabled()); - spin_lock_irqsave(&ha->hardware_lock, flags); - req = sp->qpair->req; - req->outstanding_cmds[sp->handle] = NULL; + WARN_ON(irqs_disabled()); iocb = &sp->u.iocb_cmd; - spin_unlock_irqrestore(&ha->hardware_lock, flags); iocb->timeout(sp); } @@ -71,6 +64,16 @@ void qla2x00_sp_free(srb_t *sp) qla2x00_rel_sp(sp); } +void qla2xxx_rel_done_warning(srb_t *sp, int res) +{ + WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); +} + +void qla2xxx_rel_free_warning(srb_t *sp) +{ + WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); +} + /* Asynchronous Login/Logout Routines -------------------------------------- */ unsigned long @@ -143,7 +146,7 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res) sp->free(sp); } -static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) +int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) { scsi_qla_host_t *vha = cmd_sp->vha; struct srb_iocb *abt_iocb; @@ -243,6 +246,7 @@ qla2x00_async_iocb_timeout(void *data) case SRB_NACK_PRLI: case SRB_NACK_LOGO: case SRB_CTRL_VP: + default: rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); @@ -259,10 +263,6 @@ qla2x00_async_iocb_timeout(void *data) sp->done(sp, QLA_FUNCTION_TIMEOUT); } break; - default: - WARN_ON_ONCE(true); - sp->done(sp, QLA_FUNCTION_TIMEOUT); - break; } } @@ -327,10 +327,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, if (!sp) goto done; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); fcport->flags |= FCF_ASYNC_SENT; fcport->logout_completed = 0; - fcport->disc_state = DSC_LOGIN_PEND; sp->type = SRB_LOGIN_CMD; sp->name = "login"; sp->gen1 = fcport->rscn_gen; @@ -346,7 +346,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, else lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; - if (fcport->fc4f_nvme) + if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; ql_dbg(ql_dbg_disc, vha, 0x2072, @@ -534,7 +534,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; - fcport->disc_state = DSC_LOGIN_PEND; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); return qla2x00_post_work(vha, e); } @@ -757,14 +757,12 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport->fc4_type &= ~FS_FC4TYPE_NVME; } - ql_dbg(ql_dbg_disc, vha, 0x20e2, - "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", + "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", __func__, fcport->port_name, e->current_login_state, fcport->fw_login_state, - fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, loop_id, fcport->loop_id); + fcport->fc4_type, id.b24, fcport->d_id.b24, + loop_id, fcport->loop_id); switch (fcport->disc_state) { case DSC_DELETE_PEND: @@ -846,7 +844,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, * with GNL. Push disc_state back to DELETED * so GNL can go out again */ - fcport->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(fcport, + DSC_DELETED); break; case DSC_LS_PRLI_COMP: if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) @@ -922,7 +921,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, qla24xx_fcport_handle_login(vha, fcport); break; case ISP_CFG_N: - fcport->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); if (time_after_eq(jiffies, fcport->dm_login_expire)) { if (fcport->n2n_link_reset_cnt < 2) { fcport->n2n_link_reset_cnt++; @@ -1002,7 +1001,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) set_bit(loop_id, vha->hw->loop_id_map); wwn = wwn_to_u64(e->port_name); - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, + ql_dbg(ql_dbg_disc, vha, 0x20e8, "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", __func__, (void *)&wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, @@ -1061,6 +1060,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); vha->gnl.sent = 0; + if (!list_empty(&vha->gnl.fcports)) { + /* retrigger gnl */ + list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, + gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) + break; + } + } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp->free(sp); @@ -1082,7 +1091,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags |= FCF_ASYNC_SENT; - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; @@ -1257,13 +1266,13 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) sp->done = qla2x00_async_prli_sp_done; lio->u.logio.flags = 0; - if (fcport->fc4f_nvme) + if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; ql_dbg(ql_dbg_disc, vha, 0x211b, "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, - fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc"); + fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc"); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -1312,12 +1321,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) return rval; } - fcport->disc_state = DSC_GPDB; - sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); + fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_MB_IOCB; sp->name = "gpdb"; @@ -1396,7 +1405,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x20d6, "%s %d %8phC session revalidate success\n", __func__, __LINE__, ea->fcport->port_name); - ea->fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } @@ -1414,14 +1423,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) fcport->flags &= ~FCF_ASYNC_SENT; ql_dbg(ql_dbg_disc, vha, 0x20d2, - "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name, - fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme, - ea->rc); + "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__, + fcport->port_name, fcport->disc_state, pd->current_login_state, + fcport->fc4_type, ea->rc); if (fcport->disc_state == DSC_DELETE_PEND) return; - if (fcport->fc4f_nvme) + if (NVME_TARGET(vha->hw, fcport)) ls = pd->current_login_state >> 4; else ls = pd->current_login_state & 0xf; @@ -1450,7 +1459,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) /* Set discovery state back to GNL to Relogin attempt */ if (qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) { - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } return; @@ -1610,7 +1619,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post %s PRLI\n", __func__, __LINE__, fcport->port_name, - fcport->fc4f_nvme ? "NVME" : "FC"); + NVME_TARGET(vha->hw, fcport) ? "NVME" : + "FC"); qla24xx_post_prli_work(vha, fcport); } break; @@ -1737,6 +1747,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, qla24xx_fcport_handle_login(vha, fcport); } +void qla_handle_els_plogi_done(scsi_qla_host_t *vha, + struct event_arg *ea) +{ + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post PRLI\n", + __func__, __LINE__, ea->fcport->port_name); + qla24xx_post_prli_work(vha, ea->fcport); +} + /* * RSCN(s) came in for this fcport, but the RSCN(s) was not able * to be consumed by the fcport @@ -1765,9 +1784,23 @@ qla2x00_tmf_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; + int rc, h; + unsigned long flags; - tmf->u.tmf.comp_status = CS_TIMEOUT; - complete(&tmf->u.tmf.comp); + rc = qla24xx_async_abort_cmd(sp, false); + if (rc) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + tmf->u.tmf.comp_status = CS_TIMEOUT; + tmf->u.tmf.data = QLA_FUNCTION_FAILED; + complete(&tmf->u.tmf.comp); + } } static void qla2x00_tmf_sp_done(srb_t *sp, int res) @@ -1896,38 +1929,26 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) break; } - if (ea->fcport->fc4f_nvme) { + /* + * Retry PRLI with other FC-4 type if failure occurred on dual + * FCP/NVMe port + */ + if (NVME_FCP_TARGET(ea->fcport)) { ql_dbg(ql_dbg_disc, vha, 0x2118, - "%s %d %8phC post fc4 prli\n", - __func__, __LINE__, ea->fcport->port_name); - ea->fcport->fc4f_nvme = 0; - qla24xx_post_prli_work(vha, ea->fcport); - return; + "%s %d %8phC post %s prli\n", + __func__, __LINE__, ea->fcport->port_name, + (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ? + "NVMe" : "FCP"); + if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) + ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; + else + ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; } - /* at this point both PRLI NVME & PRLI FCP failed */ - if (N2N_TOPO(vha->hw)) { - if (ea->fcport->n2n_link_reset_cnt < 3) { - ea->fcport->n2n_link_reset_cnt++; - /* - * remote port is not sending Plogi. Reset - * link to kick start his state machine - */ - set_bit(N2N_LINK_RESET, &vha->dpc_flags); - } else { - ql_log(ql_log_warn, vha, 0x2119, - "%s %d %8phC Unable to reconnect\n", - __func__, __LINE__, ea->fcport->port_name); - } - } else { - /* - * switch connect. login failed. Take connection - * down and allow relogin to retrigger - */ - ea->fcport->flags &= ~FCF_ASYNC_SENT; - ea->fcport->keep_nport_handle = 0; - qlt_schedule_sess_for_deletion(ea->fcport); - } + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; + qlt_schedule_sess_for_deletion(ea->fcport); break; } } @@ -1988,14 +2009,14 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - if (ea->fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, ea->fcport)) { ql_dbg(ql_dbg_disc, vha, 0x2117, "%s %d %8phC post prli\n", __func__, __LINE__, ea->fcport->port_name); qla24xx_post_prli_work(vha, ea->fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", + "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->loop_id, ea->fcport->d_id.b24); @@ -2015,7 +2036,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) __func__, __LINE__, ea->fcport->port_name, ea->data[1]); ea->fcport->flags &= ~FCF_ASYNC_SENT; - ea->fcport->disc_state = DSC_LOGIN_FAILED; + qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else @@ -2066,6 +2087,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) set_bit(lid, vha->hw->loop_id_map); ea->fcport->loop_id = lid; ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; @@ -5394,7 +5416,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", __func__, fcport->port_name); - fcport->disc_state = DSC_UPD_FCPORT; + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); fcport->deleted = 0; @@ -5412,9 +5434,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_iidma_fcport(vha, fcport); - if (fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, fcport)) { qla_nvme_register_remote(vha, fcport); - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); qla2x00_set_fcport_state(fcport, FCS_ONLINE); return; } @@ -5459,7 +5481,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); } void qla_register_fcport_fn(struct work_struct *work) @@ -5740,11 +5762,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) new_fcport->fc4_type = swl[swl_idx].fc4_type; new_fcport->nvme_flag = 0; - new_fcport->fc4f_nvme = 0; if (vha->flags.nvme_enabled && - swl[swl_idx].fc4f_nvme) { - new_fcport->fc4f_nvme = - swl[swl_idx].fc4f_nvme; + swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { ql_log(ql_log_info, vha, 0x2131, "FOUND: NVME port %8phC as FC Type 28h\n", new_fcport->port_name); @@ -5800,7 +5819,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) /* Bypass ports whose FCP-4 type is not FCP_SCSI */ if (ql2xgffidenable && - (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && + (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) continue; @@ -5869,9 +5888,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) break; } - if (fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, fcport)) { if (fcport->disc_state == DSC_DELETE_PEND) { - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); vha->fcport_count--; fcport->login_succ = 0; } @@ -8544,6 +8563,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* N2N: driver will initiate Login instead of FW */ icb->firmware_options_3 |= BIT_8; + /* Determine NVMe/FCP priority for target ports */ + ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); + ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", + ha->fc4_type_priority & BIT_0 ? "FCP" : "NVMe"); + if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 0c3d907af769..477b0b8a5f4b 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx) INIT_LIST_HEAD(&ctx->dsd_list); } +static inline void +qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) +{ + int old_val; + uint8_t shiftbits, mask; + + /* This will have to change when the max no. of states > 16 */ + shiftbits = 4; + mask = (1 << shiftbits) - 1; + + fcport->disc_state = state; + while (1) { + old_val = atomic_read(&fcport->shadow_disc_state); + if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state, + old_val, (old_val << shiftbits) | state)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2134, + "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n", + fcport->port_name, port_dstate_str[old_val & mask], + port_dstate_str[state], fcport->d_id.b24); + return; + } + } +} + static inline int qla2x00_hba_err_chk_enabled(srb_t *sp) { @@ -183,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, return sp; } +void qla2xxx_rel_done_warning(srb_t *sp, int res); +void qla2xxx_rel_free_warning(srb_t *sp); + static inline void qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp) { sp->qpair = NULL; + sp->done = qla2xxx_rel_done_warning; + sp->free = qla2xxx_rel_free_warning; mempool_free(sp, qpair->srb_mempool); QLA_QPAIR_MARK_NOT_BUSY(qpair); } @@ -307,3 +336,15 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair) WRT_REG_DWORD(req->req_q_in, req->ring_index); } + +static inline int +qla2xxx_get_fc4_priority(struct scsi_qla_host *vha) +{ + uint32_t data; + + data = + ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET]; + + + return ((data >> 6) & BIT_0); +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index bdf1994251b9..936103604d02 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2537,13 +2537,32 @@ qla2x00_els_dcmd_iocb_timeout(void *data) fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; + unsigned long flags = 0; + int res, h; ql_dbg(ql_dbg_io, vha, 0x3069, "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - complete(&lio->u.els_logo.comp); + /* Abort the exchange */ + res = qla24xx_async_abort_cmd(sp, false); + if (res) { + ql_dbg(ql_dbg_io, vha, 0x3070, + "mbx abort_command failed.\n"); + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + complete(&lio->u.els_logo.comp); + } else { + ql_dbg(ql_dbg_io, vha, 0x3071, + "mbx abort_command success.\n"); + } } static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) @@ -2708,23 +2727,29 @@ qla2x00_els_dcmd2_iocb_timeout(void *data) srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; - struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; - int res; + int res, h; ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); /* Abort the exchange */ - spin_lock_irqsave(&ha->hardware_lock, flags); - res = ha->isp_ops->abort_command(sp); + res = qla24xx_async_abort_cmd(sp, false); ql_dbg(ql_dbg_io, vha, 0x3070, "mbx abort_command %s\n", (res == QLA_SUCCESS) ? "successful" : "failed"); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - sp->done(sp, QLA_FUNCTION_TIMEOUT); + if (res) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + sp->done(sp, QLA_FUNCTION_TIMEOUT); + } } void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) @@ -2749,6 +2774,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) struct scsi_qla_host *vha = sp->vha; struct event_arg ea; struct qla_work_evt *e; + struct fc_port *conflict_fcport; + port_id_t cid; /* conflict Nport id */ + u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; + u16 lid; ql_dbg(ql_dbg_disc, vha, 0x3072, "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", @@ -2760,14 +2789,102 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&lio->u.els_plogi.comp); else { - if (res) { - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } else { + switch (fw_status[0]) { + case CS_DATA_UNDERRUN: + case CS_COMPLETE: memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; - ea.data[0] = MBS_COMMAND_COMPLETE; - ea.sp = sp; - qla24xx_handle_plogi_done_event(vha, &ea); + ea.rc = res; + qla_handle_els_plogi_done(vha, &ea); + break; + + case CS_IOCB_ERROR: + switch (fw_status[1]) { + case LSC_SCODE_PORTID_USED: + lid = fw_status[2] & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(fcport->port_name), + fcport->d_id, lid, &conflict_fcport); + if (conflict_fcport) { + /* + * Another fcport shares the same + * loop_id & nport id; conflict + * fcport needs to finish cleanup + * before this fcport can proceed + * to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x sched del\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + qla2x00_clear_loop_id(fcport); + set_bit(lid, vha->hw->loop_id_map); + fcport->loop_id = lid; + fcport->keep_nport_handle = 0; + qlt_schedule_sess_for_deletion(fcport); + } + break; + + case LSC_SCODE_NPORT_USED: + cid.b.domain = (fw_status[2] >> 16) & 0xff; + cid.b.area = (fw_status[2] >> 8) & 0xff; + cid.b.al_pa = fw_status[2] & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0x20ec, + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", + __func__, __LINE__, fcport->port_name, + fcport->loop_id, cid.b24); + set_bit(fcport->loop_id, + vha->hw->loop_id_map); + fcport->loop_id = FC_NO_LOOP_ID; + qla24xx_post_gnl_work(vha, fcport); + break; + + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xd046, + "Exchange starvation. Resetting RISC\n"); + vha->hw->exch_starvation = 0; + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + /* fall through */ + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + fcport->flags &= ~FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, + DSC_LOGIN_FAILED); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + break; + + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; } e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); @@ -2801,11 +2918,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, return -ENOMEM; } + fcport->flags |= FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); elsio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_io, vha, 0x3073, "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); - fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 1ef8907314e5..098388a12feb 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -335,14 +335,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (time_after(jiffies, wait_time)) break; - /* - * Check if it's UNLOADING, cause we cannot poll in - * this case, or else a NULL pointer dereference - * is triggered. - */ - if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) - return QLA_FUNCTION_TIMEOUT; - /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); @@ -1932,7 +1924,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) pd24 = (struct port_database_24xx *) pd; /* Check for logged in state. */ - if (fcport->fc4f_nvme) { + if (NVME_TARGET(ha, fcport)) { current_login_state = pd24->current_login_state >> 4; last_login_state = pd24->last_login_state >> 4; } else { @@ -3117,7 +3109,7 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); - if (vha->flags.qpairs_available && sp->qpair) + if (sp->qpair) req = sp->qpair->req; else return QLA_FUNCTION_FAILED; @@ -3899,8 +3891,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, fcport->scan_state = QLA_FCPORT_FOUND; fcport->n2n_flag = 1; fcport->keep_nport_handle = 1; + fcport->fc4_type = FS_FC4TYPE_FCP; if (vha->flags.nvme_enabled) - fcport->fc4f_nvme = 1; + fcport->fc4_type |= FS_FC4TYPE_NVME; switch (fcport->disc_state) { case DSC_DELETED: @@ -6358,7 +6351,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, uint64_t zero = 0; u8 current_login_state, last_login_state; - if (fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, fcport)) { current_login_state = pd->current_login_state >> 4; last_login_state = pd->last_login_state >> 4; } else { @@ -6393,8 +6386,8 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; - if (fcport->fc4f_nvme) { - fcport->port_type = 0; + if (NVME_TARGET(vha->hw, fcport)) { + fcport->port_type = FCT_NVME; if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) fcport->port_type |= FCT_NVME_INITIATOR; if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index bfcd02fdf2b8..11656e864fca 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -535,6 +535,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; + if (!priv) { + /* nvme association has been torn down */ + return rval; + } + fcport = qla_rport->fcport; if (!qpair || !fcport || (qpair && !qpair->fw_started) || @@ -610,7 +615,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) } static struct nvme_fc_port_template qla_nvme_fc_transport = { - .module = THIS_MODULE, .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, @@ -678,7 +682,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) struct nvme_fc_port_template *tmpl; struct qla_hw_data *ha; struct nvme_fc_port_info pinfo; - int ret = EINVAL; + int ret = -EINVAL; if (!IS_ENABLED(CONFIG_NVME_FC)) return ret; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 06037e3c7854..052ce7881407 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -983,8 +983,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); - if (rval == QLA_INTERFACE_ERROR) - goto qc24_free_sp_fail_command; goto qc24_host_busy_free_sp; } @@ -996,11 +994,6 @@ qc24_host_busy_free_sp: qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; -qc24_free_sp_fail_command: - sp->free(sp); - CMD_SP(cmd) = NULL; - qla2xxx_rel_qpair_sp(sp->qpair, sp); - qc24_fail_command: cmd->scsi_done(cmd); @@ -1993,6 +1986,11 @@ skip_pio: /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; + + /* Check if FW supports MQ or not */ + if (!(ha->fw_attributes & BIT_6)) + goto mqiobase_exit; + if (!ql2xmqsupport || !ql2xnvmeenable || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; @@ -2804,10 +2802,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* This may fail but that's ok */ pci_enable_pcie_error_reporting(pdev); - /* Turn off T10-DIF when FC-NVMe is enabled */ - if (ql2xnvmeenable) - ql2xenabledif = 0; - ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); if (!ha) { ql_log_pci(ql_log_fatal, pdev, 0x0009, @@ -3700,6 +3694,13 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ha->flags.fw_started) @@ -3718,15 +3719,6 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_wait_for_sess_deletion(base_vha); - /* - * if UNLOAD flag is already set, then continue unload, - * where it was set first. - */ - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - return; - - set_bit(UNLOADING, &base_vha->dpc_flags); - qla_nvme_delete(base_vha); dma_free_coherent(&ha->pdev->dev, @@ -4859,6 +4851,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) struct qla_work_evt *e; uint8_t bail; + if (test_bit(UNLOADING, &vha->dpc_flags)) + return NULL; + QLA_VHA_MARK_BUSY(vha, bail); if (bail) return NULL; @@ -5012,7 +5007,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport) fcport->jiffies_at_registration = jiffies; fcport->sec_since_registration = 0; fcport->next_disc_state = DSC_DELETED; - fcport->disc_state = DSC_UPD_FCPORT; + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); spin_unlock_irqrestore(&fcport->vha->work_lock, flags); queue_work(system_unbound_wq, &fcport->reg_work); @@ -5053,19 +5048,17 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) fcport->d_id = e->u.new_sess.id; fcport->flags |= FCF_FABRIC_DEVICE; fcport->fw_login_state = DSC_LS_PLOGI_PEND; - if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP) - fcport->fc4_type = FC4_TYPE_FCP_SCSI; - - if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) { - fcport->fc4_type = FC4_TYPE_OTHER; - fcport->fc4f_nvme = FC4_TYPE_NVME; - } memcpy(fcport->port_name, e->u.new_sess.port_name, WWN_SIZE); - if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) + fcport->fc4_type = e->u.new_sess.fc4_type; + if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { + fcport->fc4_type = FS_FC4TYPE_FCP; fcport->n2n_flag = 1; + if (vha->flags.nvme_enabled) + fcport->fc4_type |= FS_FC4TYPE_NVME; + } } else { ql_dbg(ql_dbg_disc, vha, 0xffff, @@ -5169,7 +5162,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) fcport->flags &= ~FCF_FABRIC_DEVICE; fcport->keep_nport_handle = 1; if (vha->flags.nvme_enabled) { - fcport->fc4f_nvme = 1; + fcport->fc4_type = + (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); fcport->n2n_flag = 1; } fcport->fw_login_state = 0; @@ -6053,13 +6047,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - /* - * if UNLOAD flag is already set, then continue unload, - * where it was set first. - */ - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - return; - ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); @@ -6070,9 +6057,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) return; } - qla2x00_wait_for_sess_deletion(base_vha); + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; - set_bit(UNLOADING, &base_vha->dpc_flags); + qla2x00_wait_for_sess_deletion(base_vha); qla2x00_delete_all_vps(ha, base_vha); @@ -6296,6 +6288,7 @@ qla2x00_do_dpc(void *data) if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { + base_vha->flags.online = 1; ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); if (ha->isp_ops->abort_isp(base_vha)) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index cb8a892e2d39..509539ec58e9 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -596,7 +596,8 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else { sp->fcport->login_retry = 0; - sp->fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(sp->fcport, + DSC_LOGIN_COMPLETE); sp->fcport->deleted = 0; sp->fcport->logout_on_delete = 1; } @@ -957,7 +958,7 @@ void qlt_free_session_done(struct work_struct *work) struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, + ql_dbg(ql_dbg_disc, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, @@ -1024,7 +1025,7 @@ void qlt_free_session_done(struct work_struct *work) while (!READ_ONCE(sess->logout_completed)) { if (!traced) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, + ql_dbg(ql_dbg_disc, vha, 0xf086, "%s: waiting for sess %p logout\n", __func__, sess); traced = true; @@ -1045,6 +1046,10 @@ void qlt_free_session_done(struct work_struct *work) (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); } + spin_lock_irqsave(&vha->work_lock, flags); + sess->flags &= ~FCF_ASYNC_SENT; + spin_unlock_irqrestore(&vha->work_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->se_sess) { sess->se_sess = NULL; @@ -1052,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work) tgt->sess_count--; } - sess->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(sess, DSC_DELETED); sess->fw_login_state = DSC_LS_PORT_UNAVAIL; sess->deleted = QLA_SESS_DELETED; @@ -1108,7 +1113,7 @@ void qlt_free_session_done(struct work_struct *work) spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); sess->free_pending = 0; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, + ql_dbg(ql_dbg_disc, vha, 0xf001, "Unregistration of sess %p %8phC finished fcp_cnt %d\n", sess, sess->port_name, vha->fcport_count); @@ -1151,13 +1156,18 @@ void qlt_unreg_sess(struct fc_port *sess) return; } sess->free_pending = 1; + /* + * Use FCF_ASYNC_SENT flag to block other cmds used in sess + * management from being sent. + */ + sess->flags |= FCF_ASYNC_SENT; spin_unlock_irqrestore(&sess->vha->work_lock, flags); if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - sess->disc_state = DSC_DELETE_PEND; + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; @@ -1221,14 +1231,15 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) case DSC_DELETE_PEND: return; case DSC_DELETED: - if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) - wake_up_all(&tgt->waitQ); - if (sess->vha->fcport_count == 0) - wake_up_all(&sess->vha->fcport_waitQ); - if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && - !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) + !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { + if (tgt && tgt->tgt_stop && tgt->sess_count == 0) + wake_up_all(&tgt->waitQ); + + if (sess->vha->fcport_count == 0) + wake_up_all(&sess->vha->fcport_waitQ); return; + } break; case DSC_UPD_FCPORT: /* @@ -1258,11 +1269,11 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) spin_unlock_irqrestore(&sess->vha->work_lock, flags); sess->prli_pend_timer = 0; - sess->disc_state = DSC_DELETE_PEND; + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); qla24xx_chk_fcp_state(sess); - ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, + ql_dbg(ql_dbg_disc, sess->vha, 0xe001, "Scheduling sess %p for deletion %8phC\n", sess, sess->port_name); @@ -3206,8 +3217,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { cmd->state = QLA_TGT_STATE_PROCESSED; - res = 0; - goto free; + return 0; } ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, @@ -3218,8 +3228,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); - if (unlikely(res != 0)) - goto free; + if (unlikely(res != 0)) { + return res; + } spin_lock_irqsave(qpair->qp_lock_ptr, flags); @@ -3239,8 +3250,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - res = 0; - goto free; + return 0; } /* Does F/W have an IOCBs for this request */ @@ -3343,8 +3353,6 @@ out_unmap_unlock: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); -free: - vha->hw->tgt.tgt_ops->free_cmd(cmd); return res; } EXPORT_SYMBOL(qlt_xmit_response); @@ -4580,7 +4588,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport_id collision */ if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, + ql_dbg(ql_dbg_disc, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4596,7 +4604,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ - ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, + ql_dbg(ql_dbg_disc, vha, 0xf01b, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4611,7 +4619,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport handle collision */ if ((loop_id == other_sess->loop_id) && (loop_id != FC_NO_LOOP_ID)) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, + ql_dbg(ql_dbg_disc, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -5668,7 +5676,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, /* found existing exchange */ qpair->retry_term_cnt++; if (qpair->retry_term_cnt >= 5) { - rc = EIO; + rc = -EIO; qpair->retry_term_cnt = 0; ql_log(ql_log_warn, vha, 0xffff, "Unable to send ABTS Respond. Dumping firmware.\n"); @@ -6054,7 +6062,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, if (!IS_SW_RESV_ADDR(fcport->d_id)) vha->fcport_count++; fcport->login_gen++; - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); fcport->login_succ = 1; newfcport = 1; } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index d006f0a97b8c..2236751a3a56 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -116,7 +116,6 @@ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) #endif -#endif #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ @@ -244,6 +243,7 @@ struct ctio_to_2xxx { #ifndef CTIO_RET_TYPE #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ +#endif struct fcp_hdr { uint8_t r_ctl; diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 294d77c02cdf..f169292448bd 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -910,7 +910,8 @@ qla27xx_template_checksum(void *p, ulong size) static inline int qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) { - return qla27xx_template_checksum(tmp, tmp->template_size) == 0; + return qla27xx_template_checksum(tmp, + le32_to_cpu(tmp->template_size)) == 0; } static inline int @@ -926,7 +927,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha, ulong len = 0; if (qla27xx_fwdt_template_valid(tmp)) { - len = tmp->template_size; + len = le32_to_cpu(tmp->template_size); tmp = memcpy(buf, tmp, len); ql27xx_edit_template(vha, tmp); qla27xx_walk_template(vha, tmp, buf, &len); @@ -942,7 +943,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p) ulong len = 0; if (qla27xx_fwdt_template_valid(tmp)) { - len = tmp->template_size; + len = le32_to_cpu(tmp->template_size); qla27xx_walk_template(vha, tmp, NULL, &len); } @@ -954,7 +955,7 @@ qla27xx_fwdt_template_size(void *p) { struct qla27xx_fwdt_template *tmp = p; - return tmp->template_size; + return le32_to_cpu(tmp->template_size); } int diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index d2a0014e8b21..fb8ab3bc86c2 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -13,7 +13,7 @@ struct __packed qla27xx_fwdt_template { __le32 template_type; __le32 entry_offset; - uint32_t template_size; + __le32 template_size; uint32_t count; /* borrow field for running/residual count */ __le32 entry_count; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index abe7f79bb789..df8644da2c32 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -623,7 +623,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; if (cmd->aborted) { /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task @@ -636,7 +635,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); - vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } @@ -664,7 +662,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; int xmit_type = QLA_TGT_XMIT_STATUS; if (cmd->aborted) { @@ -678,7 +675,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); - vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } cmd->bufflen = se_cmd->data_length; @@ -926,6 +922,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return count; @@ -1088,6 +1085,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return count; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 5504ab11decc..df43cf6405a8 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -1220,7 +1220,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); exit_host_stats: if (ql_iscsi_stats) - dma_free_coherent(&ha->pdev->dev, host_stats_size, + dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, iscsi_stats_dma); ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 32965ec76965..44181a2cbf18 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5296,6 +5296,12 @@ static int __init scsi_debug_init(void) pr_err("submit_queues must be 1 or more\n"); return -EINVAL; } + + if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) { + pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE); + return -EINVAL; + } + sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), GFP_KERNEL); if (sdebug_q_arr == NULL) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index df14597752ec..fb5a7832353c 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -239,6 +239,7 @@ static struct { {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES | BLIST_INQUIRY_36}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 42f0550d6b11..6f41e4b5a2b8 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, {"LENOVO", "DE_Series", "rdac", }, + {"FUJITSU", "ETERNUS_AHB", "rdac", }, {NULL, NULL, NULL }, }; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 91c007d26c1e..b5867e1566f4 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -551,7 +551,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) } } -static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) +static void scsi_free_sgtables(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, @@ -563,11 +563,20 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { - scsi_mq_free_sgtables(cmd); + scsi_free_sgtables(cmd); scsi_uninit_cmd(cmd); scsi_del_cmd_from_list(cmd); } +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) + kblockd_schedule_work(&sdev->requeue_work); + else + blk_mq_run_hw_queues(sdev->request_queue, true); +} + /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes) @@ -612,11 +621,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, __blk_mq_end_request(req, error); - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); + scsi_run_queue_async(sdev); percpu_ref_put(&q->q_usage_counter); return false; @@ -1063,7 +1068,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) return BLK_STS_OK; out_free_sgtables: - scsi_mq_free_sgtables(cmd); + scsi_free_sgtables(cmd); return ret; } EXPORT_SYMBOL(scsi_init_io); @@ -1214,6 +1219,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + blk_status_t ret; if (!blk_rq_bytes(req)) cmd->sc_data_direction = DMA_NONE; @@ -1223,9 +1229,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, cmd->sc_data_direction = DMA_FROM_DEVICE; if (blk_rq_is_scsi(req)) - return scsi_setup_scsi_cmnd(sdev, req); + ret = scsi_setup_scsi_cmnd(sdev, req); else - return scsi_setup_fs_cmnd(sdev, req); + ret = scsi_setup_fs_cmnd(sdev, req); + + if (ret != BLK_STS_OK) + scsi_free_sgtables(cmd); + + return ret; } static blk_status_t @@ -1723,6 +1734,7 @@ out_put_budget: */ if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); break; } return ret; @@ -2918,6 +2930,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev) } EXPORT_SYMBOL(sdev_enable_disk_events); +static unsigned char designator_prio(const unsigned char *d) +{ + if (d[1] & 0x30) + /* not associated with LUN */ + return 0; + + if (d[3] == 0) + /* invalid length */ + return 0; + + /* + * Order of preference for lun descriptor: + * - SCSI name string + * - NAA IEEE Registered Extended + * - EUI-64 based 16-byte + * - EUI-64 based 12-byte + * - NAA IEEE Registered + * - NAA IEEE Extended + * - EUI-64 based 8-byte + * - SCSI name string (truncated) + * - T10 Vendor ID + * as longer descriptors reduce the likelyhood + * of identification clashes. + */ + + switch (d[1] & 0xf) { + case 8: + /* SCSI name string, variable-length UTF-8 */ + return 9; + case 3: + switch (d[4] >> 4) { + case 6: + /* NAA registered extended */ + return 8; + case 5: + /* NAA registered */ + return 5; + case 4: + /* NAA extended */ + return 4; + case 3: + /* NAA locally assigned */ + return 1; + default: + break; + } + break; + case 2: + switch (d[3]) { + case 16: + /* EUI64-based, 16 byte */ + return 7; + case 12: + /* EUI64-based, 12 byte */ + return 6; + case 8: + /* EUI64-based, 8 byte */ + return 3; + default: + break; + } + break; + case 1: + /* T10 vendor ID */ + return 1; + default: + break; + } + + return 0; +} + /** * scsi_vpd_lun_id - return a unique device identification * @sdev: SCSI device @@ -2934,7 +3018,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events); */ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) { - u8 cur_id_type = 0xff; + u8 cur_id_prio = 0; u8 cur_id_size = 0; const unsigned char *d, *cur_id_str; const struct scsi_vpd *vpd_pg83; @@ -2947,20 +3031,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) return -ENXIO; } - /* - * Look for the correct descriptor. - * Order of preference for lun descriptor: - * - SCSI name string - * - NAA IEEE Registered Extended - * - EUI-64 based 16-byte - * - EUI-64 based 12-byte - * - NAA IEEE Registered - * - NAA IEEE Extended - * - T10 Vendor ID - * as longer descriptors reduce the likelyhood - * of identification clashes. - */ - /* The id string must be at least 20 bytes + terminating NULL byte */ if (id_len < 21) { rcu_read_unlock(); @@ -2970,8 +3040,9 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) memset(id, 0, id_len); d = vpd_pg83->data + 4; while (d < vpd_pg83->data + vpd_pg83->len) { - /* Skip designators not referring to the LUN */ - if ((d[1] & 0x30) != 0x00) + u8 prio = designator_prio(d); + + if (prio == 0 || cur_id_prio > prio) goto next_desig; switch (d[1] & 0xf) { @@ -2979,28 +3050,19 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) /* T10 Vendor ID */ if (cur_id_size > d[3]) break; - /* Prefer anything */ - if (cur_id_type > 0x01 && cur_id_type != 0xff) - break; + cur_id_prio = prio; cur_id_size = d[3]; if (cur_id_size + 4 > id_len) cur_id_size = id_len - 4; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; id_size = snprintf(id, id_len, "t10.%*pE", cur_id_size, cur_id_str); break; case 0x2: /* EUI-64 */ - if (cur_id_size > d[3]) - break; - /* Prefer NAA IEEE Registered Extended */ - if (cur_id_type == 0x3 && - cur_id_size == d[3]) - break; + cur_id_prio = prio; cur_id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, @@ -3018,17 +3080,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) cur_id_str); break; default: - cur_id_size = 0; break; } break; case 0x3: /* NAA */ - if (cur_id_size > d[3]) - break; + cur_id_prio = prio; cur_id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, @@ -3041,26 +3100,25 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) cur_id_str); break; default: - cur_id_size = 0; break; } break; case 0x8: /* SCSI name string */ - if (cur_id_size + 4 > d[3]) + if (cur_id_size > d[3]) break; /* Prefer others for truncated descriptor */ - if (cur_id_size && d[3] > id_len) - break; + if (d[3] > id_len) { + prio = 2; + if (cur_id_prio > prio) + break; + } + cur_id_prio = prio; cur_id_size = id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; if (cur_id_size >= id_len) cur_id_size = id_len - 1; memcpy(id, cur_id_str, cur_id_size); - /* Decrease priority for truncated descriptor */ - if (cur_id_size != id_size) - cur_id_size = 6; break; default: break; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 3717eea37ecb..5f0ad8b32e3a 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev, dev_dbg(dev, "scsi resume: %d\n", err); if (err == 0) { + bool was_runtime_suspended; + + was_runtime_suspended = pm_runtime_suspended(dev); + pm_runtime_disable(dev); err = pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev, */ if (!err && scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); - - blk_set_runtime_active(sdev->request_queue); + if (was_runtime_suspended) + blk_post_runtime_resume(sdev->request_queue, 0); + else + blk_set_runtime_active(sdev->request_queue); } } diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 058079f915f1..79232cef1af1 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1715,15 +1715,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost) */ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) { - struct async_scan_data *data; + struct async_scan_data *data = NULL; unsigned long flags; if (strncmp(scsi_scan_type, "sync", 4) == 0) return NULL; + mutex_lock(&shost->scan_mutex); if (shost->async_scan) { shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); - return NULL; + goto err; } data = kmalloc(sizeof(*data), GFP_KERNEL); @@ -1734,7 +1735,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) goto err; init_completion(&data->prev_finished); - mutex_lock(&shost->scan_mutex); spin_lock_irqsave(shost->host_lock, flags); shost->async_scan = 1; spin_unlock_irqrestore(shost->host_lock, flags); @@ -1749,6 +1749,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) return data; err: + mutex_unlock(&shost->scan_mutex); kfree(data); return NULL; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 271afea654e2..a26df7d6d5d1 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -124,7 +124,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); - return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return sysfs_emit(buf, "%llu\n", + (unsigned long long)iscsi_handle(priv->iscsi_transport)); } static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); @@ -134,7 +138,7 @@ show_transport_##name(struct device *dev, \ struct device_attribute *attr,char *buf) \ { \ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ - return sprintf(buf, format"\n", priv->iscsi_transport->name); \ + return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ } \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); @@ -175,7 +179,7 @@ static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - return sprintf(buf, "%llu\n", (unsigned long long) ep->id); + return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); @@ -2012,7 +2016,7 @@ static void __iscsi_unbind_session(struct work_struct *work) if (session->target_id == ISCSI_MAX_TARGET) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - return; + goto unbind_session_exit; } target_id = session->target_id; @@ -2024,6 +2028,8 @@ static void __iscsi_unbind_session(struct work_struct *work) ida_simple_remove(&iscsi_sess_ida, target_id); scsi_remove_target(&session->dev); + +unbind_session_exit: iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); } @@ -2763,6 +2769,9 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) struct iscsi_cls_session *session; int err = 0, value = 0; + if (ev->u.set_param.len > PAGE_SIZE) + return -EINVAL; + session = iscsi_session_lookup(ev->u.set_param.sid); conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); if (!conn || !session) @@ -2910,6 +2919,9 @@ iscsi_set_host_param(struct iscsi_transport *transport, if (!transport->set_host_param) return -ENOSYS; + if (ev->u.set_host_param.len > PAGE_SIZE) + return -EINVAL; + shost = scsi_host_lookup(ev->u.set_host_param.host_no); if (!shost) { printk(KERN_ERR "set_host_param could not find host no %u\n", @@ -3172,7 +3184,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; @@ -3497,6 +3509,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; + u32 pdu_len; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; @@ -3504,6 +3517,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; + if (!netlink_capable(skb, CAP_SYS_ADMIN)) + return -EPERM; + if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) *group = ISCSI_NL_GRP_UIP; else @@ -3611,6 +3627,14 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) err = -EINVAL; break; case ISCSI_UEVENT_SEND_PDU: + pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); + + if ((ev->u.send_pdu.hdr_size > pdu_len) || + (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { + err = -EINVAL; + break; + } + conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); if (conn) ev->r.retcode = transport->send_pdu(conn, @@ -4017,7 +4041,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%s\n", iscsi_session_state_name(session->state)); + return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); } static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, NULL); @@ -4026,7 +4050,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%d\n", session->creator); + return sysfs_emit(buf, "%d\n", session->creator); } static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, NULL); @@ -4035,7 +4059,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%d\n", session->target_id); + return sysfs_emit(buf, "%d\n", session->target_id); } static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, show_priv_session_target_id, NULL); @@ -4048,8 +4072,8 @@ show_priv_session_##field(struct device *dev, \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if (session->field == -1) \ - return sprintf(buf, "off\n"); \ - return sprintf(buf, format"\n", session->field); \ + return sysfs_emit(buf, "off\n"); \ + return sysfs_emit(buf, format"\n", session->field); \ } #define iscsi_priv_session_attr_store(field) \ diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index f8661062ef95..c37dd15d16d2 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd, sshdr = &sshdr_tmp; for(i = 0; i < DV_RETRIES; i++) { + /* + * The purpose of the RQF_PM flag below is to bypass the + * SDEV_QUIESCE state. + */ result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, sshdr, DV_TIMEOUT, /* retries */ 1, REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER, - 0, NULL); + RQF_PM, NULL); if (driver_byte(result) != DRIVER_SENSE || sshdr->sense_key != UNIT_ATTENTION) break; @@ -339,7 +343,7 @@ store_spi_transport_##field(struct device *dev, \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ - if (i->f->set_##field) \ + if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ @@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev) */ lock_system_sleep(); + if (scsi_autopm_get_device(sdev)) + goto unlock_system_sleep; + if (unlikely(spi_dv_in_progress(starget))) - goto unlock; + goto put_autopm; if (unlikely(scsi_device_get(sdev))) - goto unlock; + goto put_autopm; spi_dv_in_progress(starget) = 1; buffer = kzalloc(len, GFP_KERNEL); if (unlikely(!buffer)) - goto out_put; + goto put_sdev; /* We need to verify that the actual device will quiesce; the * later target quiesce is just a nice to have */ if (unlikely(scsi_device_quiesce(sdev))) - goto out_free; + goto free_buffer; scsi_target_quiesce(starget); @@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev) spi_initial_dv(starget) = 1; - out_free: +free_buffer: kfree(buffer); - out_put: + +put_sdev: spi_dv_in_progress(starget) = 0; scsi_device_put(sdev); -unlock: +put_autopm: + scsi_autopm_put_device(sdev); + +unlock_system_sleep: unlock_system_sleep(); } EXPORT_SYMBOL(spi_dv_device); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index d4d1104fac99..9fee851c23a5 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport) res = mutex_lock_interruptible(&rport->mutex); if (res) goto out; - scsi_target_block(&shost->shost_gendev); + if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) + /* + * sdev state must be SDEV_TRANSPORT_OFFLINE, transition + * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() + * later is ok though, scsi_internal_device_unblock_nowait() + * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. + */ + scsi_target_block(&shost->shost_gendev); res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; pr_debug("%s (state %d): transport.reconnect() returned %d\n", dev_name(&shost->shost_gendev), rport->state, res); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6a2f8bacface..363a6fcd611b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -568,7 +568,6 @@ static struct scsi_driver sd_template = { .name = "sd", .owner = THIS_MODULE, .probe = sd_probe, - .probe_type = PROBE_PREFER_ASYNCHRONOUS, .remove = sd_remove, .shutdown = sd_shutdown, .pm = &sd_pm_ops, @@ -934,8 +933,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) } } - if (sdp->no_write_same) + if (sdp->no_write_same) { + rq->rq_flags |= RQF_QUIET; return BLK_STS_TARGET; + } if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) return sd_setup_write_same16_cmnd(cmd, false); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 1eab779f812b..ae0947c36534 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -116,6 +116,9 @@ struct scsi_disk { unsigned urswrz : 1; unsigned security : 1; unsigned ignore_medium_access_errors : 1; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cce757506383..9c6bf13daaee 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -689,8 +689,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; - if (__copy_from_user(cmnd, buf, cmd_size)) + if (__copy_from_user(cmnd, buf, cmd_size)) { + sg_remove_request(sfp, srp); return -EFAULT; + } /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there @@ -803,8 +805,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); - if (hp->dxfer_len >= SZ_256M) + if (hp->dxfer_len >= SZ_256M) { + sg_remove_request(sfp, srp); return -EINVAL; + } k = sg_start_req(srp, cmnd); if (k) { diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile index 28985e508b5c..ba62e72dfc61 100644 --- a/drivers/scsi/smartpqi/Makefile +++ b/drivers/scsi/smartpqi/Makefile @@ -1,3 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 +ccflags-y += -I. obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o -smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o +smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o smartpqi_kernel_compat.o + diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 79d2af36f655..602792b8ad9c 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1,19 +1,27 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * * Questions/Comments/Bugfixes to storagedev@microchip.com * */ -#include <linux/io-64-nonatomic-lo-hi.h> - #if !defined(_SMARTPQI_H) #define _SMARTPQI_H +#define TORTUGA 0 + #include <scsi/scsi_host.h> #include <linux/bsg-lib.h> @@ -59,7 +67,7 @@ struct pqi_device_registers { /* * controller registers * - * These are defined by the Microsemi implementation. + * These are defined by the Microchip implementation. * * Some registers (those named sis_*) are only used when in * legacy SIS mode before we transition the controller into @@ -79,7 +87,8 @@ struct pqi_ctrl_registers { __le32 sis_ctrl_to_host_doorbell_clear; /* A0h */ u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))]; __le32 sis_driver_scratch; /* B0h */ - u8 reserved5[0xbc - (0xb0 + sizeof(__le32))]; + __le32 sis_product_identifier; /* B4h */ + u8 reserved5[0xbc - (0xb4 + sizeof(__le32))]; __le32 sis_firmware_status; /* BCh */ u8 reserved6[0x1000 - (0xbc + sizeof(__le32))]; __le32 sis_mailbox[8]; /* 1000h */ @@ -128,10 +137,13 @@ struct pqi_iu_header { __le16 iu_length; /* in bytes - does not include the length */ /* of this header */ __le16 response_queue_id; /* specifies the OQ where the */ - /* response IU is to be delivered */ - u8 work_area[2]; /* reserved for driver use */ + /* response IU is to be delivered */ + u16 driver_flags; /* reserved for driver use */ }; +/* manifest constants for pqi_iu_header.driver_flags */ +#define PQI_DRIVER_NONBLOCKABLE_REQUEST 0x1 + /* * According to the PQI spec, the IU header is only the first 4 bytes of our * pqi_iu_header structure. @@ -256,6 +268,7 @@ struct pqi_device_capability { }; #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4 +#define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS 3 struct pqi_raid_path_request { struct pqi_iu_header header; @@ -276,9 +289,10 @@ struct pqi_raid_path_request { u8 reserved4 : 2; u8 additional_cdb_bytes_usage : 3; u8 reserved5 : 3; - u8 cdb[32]; - struct pqi_sg_descriptor - sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; + u8 cdb[16]; + u8 reserved6[12]; + __le32 timeout; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; }; struct pqi_aio_path_request { @@ -305,8 +319,73 @@ struct pqi_aio_path_request { u8 cdb_length; u8 lun_number[8]; u8 reserved4[4]; - struct pqi_sg_descriptor - sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; +}; + +#define PQI_RAID1_NVME_XFER_LIMIT (32 * 1024) /* 32 KiB */ + +struct pqi_aio_r1_path_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 volume_id; /* ID of the RAID volume */ + __le32 it_nexus_1; /* IT nexus of the 1st drive in the RAID volume */ + __le32 it_nexus_2; /* IT nexus of the 2nd drive in the RAID volume */ + __le32 it_nexus_3; /* IT nexus of the 3rd drive in the RAID volume */ + __le32 data_length; /* total bytes to read/write */ + u8 data_direction : 2; + u8 partial : 1; + u8 memory_type : 1; + u8 fence : 1; + u8 encryption_enable : 1; + u8 reserved : 2; + u8 task_attribute : 3; + u8 command_priority : 4; + u8 reserved2 : 1; + __le16 data_encryption_key_index; + u8 cdb[16]; + __le16 error_index; + u8 num_sg_descriptors; + u8 cdb_length; + u8 num_drives; /* number of drives in the RAID volume (2 or 3) */ + u8 reserved3[3]; + __le32 encrypt_tweak_lower; + __le32 encrypt_tweak_upper; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; +}; + +#define PQI_DEFAULT_MAX_WRITE_RAID_5_6 (8 * 1024U) +#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA (~0U) +#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME (32 * 1024U) + +struct pqi_aio_r56_path_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 volume_id; /* ID of the RAID volume */ + __le32 data_it_nexus; /* IT nexus for the data drive */ + __le32 p_parity_it_nexus; /* IT nexus for the P parity drive */ + __le32 q_parity_it_nexus; /* IT nexus for the Q parity drive */ + __le32 data_length; /* total bytes to read/write */ + u8 data_direction : 2; + u8 partial : 1; + u8 mem_type : 1; /* 0 = PCIe, 1 = DDR */ + u8 fence : 1; + u8 encryption_enable : 1; + u8 reserved : 2; + u8 task_attribute : 3; + u8 command_priority : 4; + u8 reserved1 : 1; + __le16 data_encryption_key_index; + u8 cdb[16]; + __le16 error_index; + u8 num_sg_descriptors; + u8 cdb_length; + u8 xor_multiplier; + u8 reserved2[3]; + __le32 encrypt_tweak_lower; + __le32 encrypt_tweak_upper; + __le64 row; /* row = logical LBA/blocks per row */ + u8 reserved3[8]; + struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS]; }; struct pqi_io_response { @@ -351,13 +430,13 @@ struct pqi_event_config { #define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0 #define PQI_EVENT_OFA_QUIESCE 0x1 -#define PQI_EVENT_OFA_CANCELLED 0x2 +#define PQI_EVENT_OFA_CANCELED 0x2 struct pqi_event_response { struct pqi_iu_header header; u8 event_type; u8 reserved2 : 7; - u8 request_acknowlege : 1; + u8 request_acknowledge : 1; __le16 event_id; __le32 additional_event_id; union { @@ -385,7 +464,8 @@ struct pqi_task_management_request { struct pqi_iu_header header; __le16 request_id; __le16 nexus_id; - u8 reserved[4]; + u8 reserved[2]; + __le16 timeout; u8 lun_number[8]; __le16 protocol_specific; __le16 outbound_queue_id_to_manage; @@ -439,18 +519,14 @@ struct pqi_vendor_general_response { #define PQI_OFA_SIGNATURE "OFA_QRM" #define PQI_OFA_MAX_SG_DESCRIPTORS 64 -#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \ - (offsetof(struct pqi_ofa_memory, sg_descriptor) + \ - (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor))) - struct pqi_ofa_memory { __le64 signature; /* "OFA_QRM" */ - __le16 version; /* version of this struct(1 = 1st version) */ + __le16 version; /* version of this struct (1 = 1st version) */ u8 reserved[62]; __le32 bytes_allocated; /* total allocated memory in bytes */ __le16 num_memory_descriptors; u8 reserved1[2]; - struct pqi_sg_descriptor sg_descriptor[1]; + struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS]; }; struct pqi_aio_error_info { @@ -480,6 +556,9 @@ struct pqi_raid_error_info { #define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13 #define PQI_REQUEST_IU_RAID_PATH_IO 0x14 #define PQI_REQUEST_IU_AIO_PATH_IO 0x15 +#define PQI_REQUEST_IU_AIO_PATH_RAID5_IO 0x18 +#define PQI_REQUEST_IU_AIO_PATH_RAID6_IO 0x19 +#define PQI_REQUEST_IU_AIO_PATH_RAID1_IO 0x1A #define PQI_REQUEST_IU_GENERAL_ADMIN 0x60 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73 @@ -582,6 +661,7 @@ struct pqi_raid_error_info { /* these values are defined by the PQI spec */ #define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255 #define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535 + #define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64 #define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16 #define PQI_ADMIN_INDEX_ALIGNMENT 64 @@ -651,7 +731,7 @@ struct pqi_admin_queues_aligned { struct pqi_admin_queues { void *iq_element_array; void *oq_element_array; - pqi_index_t *iq_ci; + pqi_index_t __iomem *iq_ci; pqi_index_t __iomem *oq_pi; dma_addr_t iq_element_array_bus_addr; dma_addr_t oq_element_array_bus_addr; @@ -676,8 +756,8 @@ struct pqi_queue_group { dma_addr_t oq_element_array_bus_addr; __le32 __iomem *iq_pi[2]; pqi_index_t iq_pi_copy[2]; - pqi_index_t __iomem *iq_ci[2]; - pqi_index_t __iomem *oq_pi; + pqi_index_t __iomem *iq_ci[2]; + pqi_index_t __iomem *oq_pi; dma_addr_t iq_ci_bus_addr[2]; dma_addr_t oq_pi_bus_addr; __le32 __iomem *oq_ci; @@ -690,7 +770,7 @@ struct pqi_event_queue { u16 oq_id; u16 int_msg_num; void *oq_element_array; - pqi_index_t __iomem *oq_pi; + pqi_index_t __iomem *oq_pi; dma_addr_t oq_element_array_bus_addr; dma_addr_t oq_pi_bus_addr; __le32 __iomem *oq_ci; @@ -698,7 +778,11 @@ struct pqi_event_queue { }; #define PQI_DEFAULT_QUEUE_GROUP 0 +#if TORTUGA +#define PQI_MAX_QUEUE_GROUPS 1 +#else #define PQI_MAX_QUEUE_GROUPS PQI_MAX_MSIX_VECTORS +#endif struct pqi_encryption_info { u16 data_encryption_key_index; @@ -756,11 +840,29 @@ struct pqi_config_table_firmware_features { u8 features_supported[]; /* u8 features_requested_by_host[]; */ /* u8 features_enabled[]; */ +/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */ +/* __le16 firmware_max_known_feature; */ +/* __le16 host_max_known_feature; */ }; -#define PQI_FIRMWARE_FEATURE_OFA 0 -#define PQI_FIRMWARE_FEATURE_SMP 1 -#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 +#define PQI_FIRMWARE_FEATURE_OFA 0 +#define PQI_FIRMWARE_FEATURE_SMP 1 +#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE 2 +#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS 3 +#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS 4 +#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS 5 +#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS 6 +#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS 7 +#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS 8 +#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS 9 +#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS 10 +#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 +#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN 12 +#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13 +#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14 +#define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME 15 +#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16 +#define PQI_FIRMWARE_FEATURE_MAXIMUM 16 struct pqi_config_table_debug { struct pqi_config_table_section_header header; @@ -810,10 +912,17 @@ union pqi_reset_register { #define PQI_RESET_POLL_INTERVAL_MSECS 100 +#if TORTUGA +#define PQI_MAX_OUTSTANDING_REQUESTS 32 +#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP PQI_MAX_OUTSTANDING_REQUESTS +#define PQI_MAX_TRANSFER_SIZE (512 * 1024U) +#define PQI_MAX_TRANSFER_SIZE_KDUMP PQI_MAX_TRANSFER_SIZE +#else #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 -#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U) +#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) #define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) +#endif #define RAID_MAP_MAX_ENTRIES 1024 @@ -826,10 +935,17 @@ union pqi_reset_register { struct report_lun_header { __be32 list_length; - u8 extended_response; + u8 flags; u8 reserved[3]; }; +/* for flags field of struct report_lun_header */ +#define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID (1 << 0) +#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5) +#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6) + +#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1) + struct report_log_lun_extended_entry { u8 lunid[8]; u8 volume_id[16]; @@ -851,7 +967,7 @@ struct report_phys_lun_extended_entry { }; /* for device_flags field of struct report_phys_lun_extended_entry */ -#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8 +#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8 struct report_phys_lun_extended { struct report_lun_header header; @@ -864,7 +980,7 @@ struct raid_map_disk_data { u8 reserved[2]; }; -/* constants for flags field of RAID map */ +/* for flags field of RAID map */ #define RAID_MAP_ENCRYPTION_ENABLED 0x1 struct raid_map { @@ -894,8 +1010,65 @@ struct raid_map { #pragma pack() +struct pqi_scsi_dev_raid_map_data { + bool is_write; + u8 raid_level; + u32 map_index; + u64 first_block; + u64 last_block; + u32 data_length; + u32 block_cnt; + u32 blocks_per_row; + u64 first_row; + u64 last_row; + u32 first_row_offset; + u32 last_row_offset; + u32 first_column; + u32 last_column; + u64 r5or6_first_row; + u64 r5or6_last_row; + u32 r5or6_first_row_offset; + u32 r5or6_last_row_offset; + u32 r5or6_first_column; + u32 r5or6_last_column; + u16 data_disks_per_row; + u32 total_disks_per_row; + u16 layout_map_count; + u32 stripesize; + u16 strip_size; + u32 first_group; + u32 last_group; + u32 map_row; + u32 aio_handle; + u64 disk_block; + u32 disk_block_cnt; + u8 cdb[16]; + u8 cdb_length; + + /* RAID 1 specific */ +#define NUM_RAID1_MAP_ENTRIES 3 + u32 num_it_nexus_entries; + u32 it_nexus[NUM_RAID1_MAP_ENTRIES]; + + /* RAID 5 / RAID 6 specific */ + u32 p_parity_it_nexus; /* aio_handle */ + u32 q_parity_it_nexus; /* aio_handle */ + u8 xor_mult; + u64 row; + u64 stripe_lba; + u32 p_index; + u32 q_index; +}; + #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" +#define NUM_STREAMS_PER_LUN 8 + +struct pqi_stream_data { + u64 next_lba; + u32 last_accessed; +}; + struct pqi_scsi_dev { int devtype; /* as reported by INQUIRY commmand */ u8 device_type; /* as reported by */ @@ -907,7 +1080,6 @@ struct pqi_scsi_dev { u8 scsi3addr[8]; __be64 wwid; u8 volume_id[16]; - u8 unique_id[16]; u8 is_physical_device : 1; u8 is_external_raid_device : 1; u8 is_expander_smp_device : 1; @@ -916,8 +1088,8 @@ struct pqi_scsi_dev { u8 new_device : 1; u8 keep_device : 1; u8 volume_offline : 1; + u8 rescan : 1; bool aio_enabled; /* only valid for physical disks */ - bool in_reset; bool in_remove; bool device_offline; u8 vendor[8]; /* bytes 8-15 of inquiry data */ @@ -936,11 +1108,12 @@ struct pqi_scsi_dev { u8 phy_connected_dev_type; u8 box[8]; u16 phys_connector[8]; + u8 phy_id; bool raid_bypass_configured; /* RAID bypass configured */ bool raid_bypass_enabled; /* RAID bypass enabled */ - int offload_to_mirror; /* Send next RAID bypass request */ - /* to mirror drive. */ + u32 next_bypass_group; struct raid_map *raid_map; /* RAID bypass map */ + u32 max_transfer_encrypted; struct pqi_sas_port *sas_port; struct scsi_device *sdev; @@ -950,17 +1123,16 @@ struct pqi_scsi_dev { struct list_head add_list_entry; struct list_head delete_list_entry; + struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; atomic_t scsi_cmds_outstanding; + atomic_t raid_bypass_cnt; + u8 page_83_identifier[16]; }; /* VPD inquiry pages */ -#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */ -#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */ #define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ #define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */ #define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */ -#define SCSI_VPD_HEADER_SZ 4 -#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */ #define VPD_PAGE (1 << 8) @@ -1060,10 +1232,8 @@ struct pqi_io_request { struct pqi_event { bool pending; u8 event_type; - __le16 event_id; - __le32 additional_event_id; - __le32 ofa_bytes_requested; - __le16 ofa_cancel_reason; + u16 event_id; + u32 additional_event_id; }; #define PQI_RESERVED_IO_SLOTS_LUN_RESET 1 @@ -1073,13 +1243,20 @@ struct pqi_event { (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \ PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS) +#define PQI_CTRL_PRODUCT_ID_GEN1 0 +#define PQI_CTRL_PRODUCT_ID_GEN2 7 +#define PQI_CTRL_PRODUCT_REVISION_A 0 +#define PQI_CTRL_PRODUCT_REVISION_B 1 + struct pqi_ctrl_info { unsigned int ctrl_id; struct pci_dev *pci_dev; - char firmware_version[11]; + char firmware_version[32]; char serial_number[17]; char model[17]; char vendor[9]; + u8 product_id; + u8 product_revision; void __iomem *iomem_base; struct pqi_ctrl_registers __iomem *registers; struct pqi_device_registers __iomem *pqi_registers; @@ -1109,6 +1286,7 @@ struct pqi_ctrl_info { u16 max_inbound_iu_length_per_firmware; u16 max_inbound_iu_length; unsigned int max_sg_per_iu; + unsigned int max_sg_per_r56_iu; void *admin_queue_memory_base; u32 admin_queue_memory_length; dma_addr_t admin_queue_memory_base_dma_handle; @@ -1122,21 +1300,36 @@ struct pqi_ctrl_info { int max_msix_vectors; int num_msix_vectors_enabled; int num_msix_vectors_initialized; + u32 msix_vectors[PQI_MAX_MSIX_VECTORS]; + void *intr_data[PQI_MAX_MSIX_VECTORS]; int event_irq; struct Scsi_Host *scsi_host; struct mutex scan_mutex; struct mutex lun_reset_mutex; - struct mutex ofa_mutex; /* serialize ofa */ bool controller_online; bool block_requests; - bool in_shutdown; - bool in_ofa; + bool scan_blocked; u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; u8 pqi_reset_quiesce_supported : 1; u8 soft_reset_handshake_supported : 1; + u8 raid_iu_timeout_supported : 1; + u8 tmf_iu_timeout_supported : 1; + u8 unique_wwid_in_report_phys_lun_supported : 1; + u8 enable_r1_writes : 1; + u8 enable_r5_writes : 1; + u8 enable_r6_writes : 1; + u8 lv_drive_type_mix_valid : 1; + u8 enable_stream_detection : 1; + + u8 ciss_report_log_flags; + u32 max_transfer_encrypted_sas_sata; + u32 max_transfer_encrypted_nvme; + u32 max_write_raid_5_6; + u32 max_write_raid_1_10_2drive; + u32 max_write_raid_1_10_3drive; struct list_head scsi_device_list; spinlock_t scsi_device_list_lock; @@ -1166,13 +1359,16 @@ struct pqi_ctrl_info { atomic_t num_blocked_threads; wait_queue_head_t block_requests_wait; - struct list_head raid_bypass_retry_list; - spinlock_t raid_bypass_retry_list_lock; - struct work_struct raid_bypass_retry_work; + struct mutex ofa_mutex; + struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; + dma_addr_t pqi_ofa_mem_dma_handle; + void **pqi_ofa_chunk_virt_addr; + struct work_struct ofa_memory_alloc_work; + struct work_struct ofa_quiesce_work; + u32 ofa_bytes_requested; + u16 ofa_cancel_reason; - struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; - dma_addr_t pqi_ofa_mem_dma_handle; - void **pqi_ofa_chunk_virt_addr; + atomic_t total_scmds_outstanding; }; enum pqi_ctrl_mode { @@ -1191,15 +1387,12 @@ enum pqi_ctrl_mode { #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ #define CISS_GET_RAID_MAP 0xc8 -/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */ -#define CISS_REPORT_LOG_EXTENDED 0x1 -#define CISS_REPORT_PHYS_EXTENDED 0x2 - /* BMIC commands */ #define BMIC_IDENTIFY_CONTROLLER 0x11 #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 #define BMIC_READ 0x26 #define BMIC_WRITE 0x27 +#define BMIC_SENSE_FEATURE 0x61 #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 #define BMIC_CSMI_PASSTHRU 0x68 @@ -1208,7 +1401,7 @@ enum pqi_ctrl_mode { #define BMIC_SET_DIAG_OPTIONS 0xf4 #define BMIC_SENSE_DIAG_OPTIONS 0xf5 -#define CSMI_CC_SAS_SMP_PASSTHRU 0X17 +#define CSMI_CC_SAS_SMP_PASSTHRU 0x17 #define SA_FLUSH_CACHE 0x1 @@ -1219,6 +1412,19 @@ enum pqi_ctrl_mode { (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \ CISS_GET_LEVEL_2_TARGET((lunid))) +#define LV_GET_DRIVE_TYPE_MIX(lunid) ((lunid)[6]) + +#define LV_DRIVE_TYPE_MIX_UNKNOWN 0 +#define LV_DRIVE_TYPE_MIX_NO_RESTRICTION 1 +#define LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY 2 +#define LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY 3 +#define LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY 4 +#define LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY 5 +#define LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY 6 +#define LV_DRIVE_TYPE_MIX_SAS_ONLY 7 +#define LV_DRIVE_TYPE_MIX_SATA_ONLY 8 +#define LV_DRIVE_TYPE_MIX_NVME_ONLY 9 + #define NO_TIMEOUT ((unsigned long) -1) #pragma pack(1) @@ -1226,7 +1432,7 @@ enum pqi_ctrl_mode { struct bmic_identify_controller { u8 configured_logical_drive_count; __le32 configuration_signature; - u8 firmware_version[4]; + u8 firmware_version_short[4]; u8 reserved[145]; __le16 extended_logical_unit_count; u8 reserved1[34]; @@ -1234,20 +1440,29 @@ struct bmic_identify_controller { u8 reserved2[8]; u8 vendor_id[8]; u8 product_id[16]; - u8 reserved3[68]; + u8 reserved3[62]; + __le32 extra_controller_flags; + u8 reserved4[2]; u8 controller_mode; - u8 reserved4[32]; + u8 spare_part_number[32]; + u8 firmware_version_long[32]; }; +/* constants for extra_controller_flags field of bmic_identify_controller */ +#define BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED 0x20000000 + struct bmic_sense_subsystem_info { u8 reserved[44]; u8 ctrl_serial_number[16]; }; -#define SA_EXPANDER_SMP_DEVICE 0x05 -#define SA_CONTROLLER_DEVICE 0x07 -/*SCSI Invalid Device Type for SAS devices*/ -#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff +/* constants for device_type field */ +#define SA_DEVICE_TYPE_SATA 0x1 +#define SA_DEVICE_TYPE_SAS 0x2 +#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5 +#define SA_DEVICE_TYPE_SES 0x6 +#define SA_DEVICE_TYPE_CONTROLLER 0x7 +#define SA_DEVICE_TYPE_NVME 0x9 struct bmic_identify_physical_device { u8 scsi_bus; /* SCSI Bus number on controller */ @@ -1273,7 +1488,7 @@ struct bmic_identify_physical_device { __le32 rpm; /* drive rotational speed in RPM */ u8 device_type; /* type of drive */ u8 sata_version; /* only valid when device_type = */ - /* BMIC_DEVICE_TYPE_SATA */ + /* SA_DEVICE_TYPE_SATA */ __le64 big_total_block_count; __le64 ris_starting_lba; __le32 ris_size; @@ -1329,6 +1544,34 @@ struct bmic_identify_physical_device { u8 padding_to_multiple_of_512[9]; }; +#define BMIC_SENSE_FEATURE_IO_PAGE 0x8 +#define BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE 0x2 + +struct bmic_sense_feature_buffer_header { + u8 page_code; + u8 subpage_code; + __le16 buffer_length; +}; + +struct bmic_sense_feature_page_header { + u8 page_code; + u8 subpage_code; + __le16 page_length; +}; + +struct bmic_sense_feature_io_page_aio_subpage { + struct bmic_sense_feature_page_header header; + u8 firmware_read_support; + u8 driver_read_support; + u8 firmware_write_support; + u8 driver_write_support; + __le16 max_transfer_encrypted_sas_sata; + __le16 max_transfer_encrypted_nvme; + __le16 max_write_raid_5_6; + __le16 max_write_raid_1_10_2drive; + __le16 max_write_raid_1_10_3drive; +}; + struct bmic_smp_request { u8 frame_type; u8 function; @@ -1403,29 +1646,9 @@ static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) return *((struct pqi_ctrl_info **)hostdata); } -static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) -{ - return !ctrl_info->controller_online; -} - -static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) -{ - atomic_inc(&ctrl_info->num_busy_threads); -} - -static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) -{ - atomic_dec(&ctrl_info->num_busy_threads); -} - -static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) -{ - return ctrl_info->block_requests; -} - void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy); - +int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd); int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info); void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info); int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, @@ -1440,4 +1663,4 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, extern struct sas_function_template pqi_sas_transport_functions; -#endif /* _SMARTPQI_H */ +#endif /* _SMARTPQI_H */ diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index ea5409bebf57..93a088acdb9d 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1,10 +1,18 @@ -// SPDX-License-Identifier: GPL-2.0 /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * * Questions/Comments/Bugfixes to storagedev@microchip.com * */ @@ -19,42 +27,51 @@ #include <linux/bcd.h> #include <linux/reboot.h> #include <linux/cciss_ioctl.h> -#include <linux/blk-mq-pci.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_transport_sas.h> +#include <scsi/scsi_dbg.h> #include <asm/unaligned.h> #include "smartpqi.h" #include "smartpqi_sis.h" +#include "smartpqi_kernel_compat.h" #if !defined(BUILD_TIMESTAMP) #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "1.2.8-026" -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 +#define DRIVER_VERSION "2.1.8-040" +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 1 #define DRIVER_RELEASE 8 -#define DRIVER_REVISION 26 +#define DRIVER_REVISION 32 -#define DRIVER_NAME "Microsemi PQI Driver (v" \ +#define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" #define DRIVER_NAME_SHORT "smartpqi" #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) +#define PQI_1MB_SECTORS 2048 /* sectors */ -MODULE_AUTHOR("Microsemi"); -MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " - DRIVER_VERSION); -MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); +#define PQI_POST_RESET_DELAY_SECS 5 +#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 + +MODULE_AUTHOR("Microchip"); +#if TORTUGA +MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " + DRIVER_VERSION " (d-39ee840/s-eca423e)" " (d147/s325)"); +#else +MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " + DRIVER_VERSION " (d-39ee840/s-eca423e)"); +#endif +MODULE_SUPPORTED_DEVICE("Microchip Smart Family Controllers"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); static void pqi_ctrl_offline_worker(struct work_struct *work); -static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, @@ -62,20 +79,27 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request); static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, struct pqi_iu_header *request, unsigned int flags, - struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); + struct pqi_raid_error_info *error_info); static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, unsigned int cdb_length, struct pqi_queue_group *queue_group, struct pqi_encryption_info *encryption_info, bool raid_bypass); +static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd); +static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd); static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); -static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, - u32 bytes_requested); +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); +static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device, unsigned long timeout_secs); + struct pqi_scsi_dev *device, unsigned long timeout_msecs); /* for flags argument to pqi_submit_raid_request_synchronous() */ #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 @@ -122,40 +146,43 @@ static unsigned int pqi_supported_event_types[] = { static int pqi_disable_device_id_wildcards; module_param_named(disable_device_id_wildcards, - pqi_disable_device_id_wildcards, int, 0644); + pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(disable_device_id_wildcards, "Disable device ID wildcards."); static int pqi_disable_heartbeat; module_param_named(disable_heartbeat, - pqi_disable_heartbeat, int, 0644); + pqi_disable_heartbeat, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(disable_heartbeat, "Disable heartbeat."); static int pqi_disable_ctrl_shutdown; module_param_named(disable_ctrl_shutdown, - pqi_disable_ctrl_shutdown, int, 0644); + pqi_disable_ctrl_shutdown, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(disable_ctrl_shutdown, "Disable controller shutdown when controller locked up."); static char *pqi_lockup_action_param; module_param_named(lockup_action, - pqi_lockup_action_param, charp, 0644); + pqi_lockup_action_param, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" "\t\tSupported: none, reboot, panic\n" "\t\tDefault: none"); static int pqi_expose_ld_first; module_param_named(expose_ld_first, - pqi_expose_ld_first, int, 0644); -MODULE_PARM_DESC(expose_ld_first, - "Expose logical drives before physical drives."); + pqi_expose_ld_first, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); static int pqi_hide_vsep; module_param_named(hide_vsep, - pqi_hide_vsep, int, 0644); -MODULE_PARM_DESC(hide_vsep, - "Hide the virtual SEP for direct attached drives."); + pqi_hide_vsep, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); + +static int pqi_limit_xfer_to_1MB; +module_param_named(limit_xfer_size_to_1MB, + pqi_limit_xfer_to_1MB, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(limit_xfer_size_to_1MB, "Limit max transfer size to 1MB."); static char *raid_levels[] = { "RAID-0", @@ -163,8 +190,8 @@ static char *raid_levels[] = { "RAID-1(1+0)", "RAID-5", "RAID-5+1", - "RAID-ADG", - "RAID-1(ADM)", + "RAID-6", + "RAID-1(Triple)", }; static char *pqi_raid_level_to_string(u8 raid_level) @@ -181,21 +208,10 @@ static char *pqi_raid_level_to_string(u8 raid_level) #define SA_RAID_5 3 /* also used for RAID 50 */ #define SA_RAID_51 4 #define SA_RAID_6 5 /* also used for RAID 60 */ -#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ -#define SA_RAID_MAX SA_RAID_ADM +#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ +#define SA_RAID_MAX SA_RAID_TRIPLE #define SA_RAID_UNKNOWN 0xff -static inline void pqi_scsi_done(struct scsi_cmnd *scmd) -{ - pqi_prep_for_scsi_done(scmd); - scmd->scsi_done(scmd); -} - -static inline void pqi_disable_write_same(struct scsi_device *sdev) -{ - sdev->no_write_same = 1; -} - static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) { return memcmp(scsi3addr1, scsi3addr2, 8) == 0; @@ -211,6 +227,11 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) return scsi3addr[2] != 0; } +static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) +{ + return !ctrl_info->controller_online; +} + static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) { if (ctrl_info->controller_online) @@ -223,8 +244,7 @@ static inline bool pqi_is_hba_lunid(u8 *scsi3addr) return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); } -static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( - struct pqi_ctrl_info *ctrl_info) +static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) { return sis_read_driver_scratch(ctrl_info); } @@ -235,54 +255,131 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, sis_write_driver_scratch(ctrl_info, mode); } +static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->scan_blocked = true; + mutex_lock(&ctrl_info->scan_mutex); +} + +static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->scan_blocked = false; + mutex_unlock(&ctrl_info->scan_mutex); +} + +static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->scan_blocked; +} + +static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) +{ + mutex_lock(&ctrl_info->lun_reset_mutex); +} + +static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) +{ + mutex_unlock(&ctrl_info->lun_reset_mutex); +} + +static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) +{ + struct Scsi_Host *shost; + unsigned int num_loops; + int msecs_sleep; + + shost = ctrl_info->scsi_host; + + scsi_block_requests(shost); + + num_loops = 0; + msecs_sleep = 20; + while (pqi_scsi_host_busy(shost)) { + num_loops++; + if (num_loops == 10) { + dev_warn(&ctrl_info->pci_dev->dev, + "shost %d Waited for %d milli seconds to be unbusy\n", + shost->host_no, num_loops * msecs_sleep); + msecs_sleep = 500; + } + msleep(msecs_sleep); + if(num_loops % 20 == 0) + dev_warn(&ctrl_info->pci_dev->dev, + "shost %d waited for %d more seconds to be unbusy\n", + shost->host_no, msecs_sleep * 20 / 1000); + } +} + +static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) +{ + scsi_unblock_requests(ctrl_info->scsi_host); +} + +static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_inc(&ctrl_info->num_busy_threads); +} + +static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_dec(&ctrl_info->num_busy_threads); +} + +static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->block_requests; +} + static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) { ctrl_info->block_requests = true; - scsi_block_requests(ctrl_info->scsi_host); } static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) { ctrl_info->block_requests = false; wake_up_all(&ctrl_info->block_requests_wait); - pqi_retry_raid_bypass_requests(ctrl_info); - scsi_unblock_requests(ctrl_info->scsi_host); } -static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, - unsigned long timeout_msecs) +static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) { - unsigned long remaining_msecs; - if (!pqi_ctrl_blocked(ctrl_info)) - return timeout_msecs; + return; atomic_inc(&ctrl_info->num_blocked_threads); - - if (timeout_msecs == NO_TIMEOUT) { - wait_event(ctrl_info->block_requests_wait, - !pqi_ctrl_blocked(ctrl_info)); - remaining_msecs = timeout_msecs; - } else { - unsigned long remaining_jiffies; - - remaining_jiffies = - wait_event_timeout(ctrl_info->block_requests_wait, - !pqi_ctrl_blocked(ctrl_info), - msecs_to_jiffies(timeout_msecs)); - remaining_msecs = jiffies_to_msecs(remaining_jiffies); - } - + wait_event(ctrl_info->block_requests_wait, + !pqi_ctrl_blocked(ctrl_info)); atomic_dec(&ctrl_info->num_blocked_threads); - - return remaining_msecs; } +#define PQI_QUIESE_WARNING_TIMEOUT_SECS 10 + static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) { + unsigned long start_jiffies; + unsigned long warning_timeout; + bool displayed_warning; + + displayed_warning = false; + start_jiffies = jiffies; + warning_timeout = (PQI_QUIESE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies; + while (atomic_read(&ctrl_info->num_busy_threads) > - atomic_read(&ctrl_info->num_blocked_threads)) - usleep_range(1000, 2000); + atomic_read(&ctrl_info->num_blocked_threads)) { + if (time_after(jiffies, warning_timeout)) { + dev_warn(&ctrl_info->pci_dev->dev, + "waiting %u seconds for driver activity to quiesce\n", + jiffies_to_msecs(jiffies - start_jiffies) / 1000); + displayed_warning = true; + warning_timeout = (PQI_QUIESE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies; + } + msleep(1); + } + + if (displayed_warning) + dev_warn(&ctrl_info->pci_dev->dev, + "driver activity quiesced after waiting for %u seconds\n", + jiffies_to_msecs(jiffies - start_jiffies) / 1000); } static inline bool pqi_device_offline(struct pqi_scsi_dev *device) @@ -290,34 +387,25 @@ static inline bool pqi_device_offline(struct pqi_scsi_dev *device) return device->device_offline; } -static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) -{ - device->in_reset = true; -} - -static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) -{ - device->in_reset = false; -} - -static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) -{ - return device->in_reset; -} - static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) { - ctrl_info->in_ofa = true; + mutex_lock(&ctrl_info->ofa_mutex); } static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) { - ctrl_info->in_ofa = false; + mutex_unlock(&ctrl_info->ofa_mutex); } -static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) +static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) { - return ctrl_info->in_ofa; + mutex_lock(&ctrl_info->ofa_mutex); + mutex_unlock(&ctrl_info->ofa_mutex); +} + +static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) +{ + return mutex_is_locked(&ctrl_info->ofa_mutex); } static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) @@ -325,19 +413,32 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) device->in_remove = true; } -static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) +static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) { - return device->in_remove && !ctrl_info->in_shutdown; + return device->in_remove; } -static inline void pqi_schedule_rescan_worker_with_delay( - struct pqi_ctrl_info *ctrl_info, unsigned long delay) +static inline int pqi_event_type_to_event_index(unsigned int event_type) +{ + int index; + + for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) + if (event_type == pqi_supported_event_types[index]) + return index; + + return -1; +} + +static inline bool pqi_is_supported_event(unsigned int event_type) +{ + return pqi_event_type_to_event_index(event_type) != -1; +} + +static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, + unsigned long delay) { if (pqi_ctrl_offline(ctrl_info)) return; - if (pqi_ctrl_in_ofa(ctrl_info)) - return; schedule_delayed_work(&ctrl_info->rescan_work, delay); } @@ -349,8 +450,7 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) -static inline void pqi_schedule_rescan_worker_delayed( - struct pqi_ctrl_info *ctrl_info) +static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) { pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); } @@ -360,6 +460,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) cancel_delayed_work_sync(&ctrl_info->rescan_work); } +static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) +{ + cancel_work_sync(&ctrl_info->event_work); +} + static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) { if (!ctrl_info->heartbeat_counter) @@ -370,22 +475,15 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) { - if (!ctrl_info->soft_reset_status) - return 0; - return readb(ctrl_info->soft_reset_status); } -static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, - u8 clear) +static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) { u8 status; - if (!ctrl_info->soft_reset_status) - return; - status = pqi_read_soft_reset_status(ctrl_info); - status &= ~clear; + status &= ~PQI_SOFT_RESET_ABORT; writeb(status, ctrl_info->soft_reset_status); } @@ -462,9 +560,9 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, request->data_direction = SOP_READ_FLAG; cdb[0] = cmd; if (cmd == CISS_REPORT_PHYS) - cdb[1] = CISS_REPORT_PHYS_EXTENDED; + cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; else - cdb[1] = CISS_REPORT_LOG_EXTENDED; + cdb[1] = ctrl_info->ciss_report_log_flags; put_unaligned_be32(cdb_length, &cdb[6]); break; case CISS_GET_RAID_MAP: @@ -474,6 +572,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, put_unaligned_be32(cdb_length, &cdb[6]); break; case SA_FLUSH_CACHE: + request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; request->data_direction = SOP_WRITE_FLAG; cdb[0] = BMIC_WRITE; cdb[6] = BMIC_FLUSH_CACHE; @@ -485,6 +584,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, case BMIC_IDENTIFY_CONTROLLER: case BMIC_IDENTIFY_PHYSICAL_DEVICE: case BMIC_SENSE_SUBSYSTEM_INFORMATION: + case BMIC_SENSE_FEATURE: request->data_direction = SOP_READ_FLAG; cdb[0] = BMIC_READ; cdb[6] = cmd; @@ -507,8 +607,8 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, put_unaligned_be16(cdb_length, &cdb[7]); break; default: - dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", - cmd); + dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); + BUG(); break; } @@ -567,144 +667,68 @@ static void pqi_free_io_request(struct pqi_io_request *io_request) } static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, - u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, - struct pqi_raid_error_info *error_info, - unsigned long timeout_msecs) + u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, + struct pqi_raid_error_info *error_info) { int rc; - enum dma_data_direction dir; struct pqi_raid_path_request request; + enum dma_data_direction dir; - rc = pqi_build_raid_path_request(ctrl_info, &request, - cmd, scsi3addr, buffer, - buffer_length, vpd_page, &dir); + rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, + buffer, buffer_length, vpd_page, &dir); if (rc) return rc; - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, error_info, timeout_msecs); + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, + error_info); pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + return rc; } -/* Helper functions for pqi_send_scsi_raid_request */ +/* helper functions for pqi_send_scsi_raid_request */ static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, - u8 cmd, void *buffer, size_t buffer_length) + u8 cmd, void *buffer, size_t buffer_length) { return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, - buffer, buffer_length, 0, NULL, NO_TIMEOUT); + buffer, buffer_length, 0, NULL); } static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, - u8 cmd, void *buffer, size_t buffer_length, - struct pqi_raid_error_info *error_info) + u8 cmd, void *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info) { return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, - buffer, buffer_length, 0, error_info, NO_TIMEOUT); + buffer, buffer_length, 0, error_info); } - static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, - struct bmic_identify_controller *buffer) + struct bmic_identify_controller *buffer) { return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, - buffer, sizeof(*buffer)); + buffer, sizeof(*buffer)); } static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, - struct bmic_sense_subsystem_info *sense_info) + struct bmic_sense_subsystem_info *sense_info) { return pqi_send_ctrl_raid_request(ctrl_info, - BMIC_SENSE_SUBSYSTEM_INFORMATION, - sense_info, sizeof(*sense_info)); + BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, + sizeof(*sense_info)); } static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) { return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, - buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); -} - -static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info, - u8 *scsi3addr, u16 vpd_page) -{ - int rc; - int i; - int pages; - unsigned char *buf, bufsize; - - buf = kzalloc(256, GFP_KERNEL); - if (!buf) - return false; - - /* Get the size of the page list first */ - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, - buf, SCSI_VPD_HEADER_SZ); - if (rc != 0) - goto exit_unsupported; - - pages = buf[3]; - if ((pages + SCSI_VPD_HEADER_SZ) <= 255) - bufsize = pages + SCSI_VPD_HEADER_SZ; - else - bufsize = 255; - - /* Get the whole VPD page list */ - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, - buf, bufsize); - if (rc != 0) - goto exit_unsupported; - - pages = buf[3]; - for (i = 1; i <= pages; i++) - if (buf[3 + i] == vpd_page) - goto exit_supported; - -exit_unsupported: - kfree(buf); - return false; - -exit_supported: - kfree(buf); - return true; -} - -static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info, - u8 *scsi3addr, u8 *device_id, int buflen) -{ - int rc; - unsigned char *buf; - - if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID)) - return 1; /* function not supported */ - - buf = kzalloc(64, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_DEVICE_ID, - buf, 64); - if (rc == 0) { - if (buflen > 16) - buflen = 16; - memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen); - } - - kfree(buf); - - return rc; + buffer, buffer_length, vpd_page, NULL); } static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, - struct bmic_identify_physical_device *buffer, - size_t buffer_length) + struct bmic_identify_physical_device *buffer, size_t buffer_length) { int rc; enum dma_data_direction dir; @@ -721,10 +745,107 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, request.cdb[2] = (u8)bmic_device_index; request.cdb[9] = (u8)(bmic_device_index >> 8); - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, NULL, NO_TIMEOUT); + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + + return rc; +} + +static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) +{ + u32 bytes; + + bytes = get_unaligned_le16(limit); + if (bytes == 0) + bytes = ~0; + else + bytes *= 1024; + + return bytes; +} + +#pragma pack(1) + +struct bmic_sense_feature_buffer { + struct bmic_sense_feature_buffer_header header; + struct bmic_sense_feature_io_page_aio_subpage aio_subpage; +}; + +#pragma pack() + +#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ + offsetofend(struct bmic_sense_feature_buffer, \ + aio_subpage.max_write_raid_1_10_3drive) + +#define MINIMUM_AIO_SUBPAGE_LENGTH \ + (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ + max_write_raid_1_10_3drive) - \ + FIELD_SIZEOF(struct bmic_sense_feature_io_page_aio_subpage, header)) + +static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + enum dma_data_direction dir; + struct pqi_raid_path_request request; + struct bmic_sense_feature_buffer *buffer; + + buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, + buffer, sizeof(*buffer), 0, &dir); + if (rc) + goto error; + + request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; + request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; + + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); + + pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + + if (rc) + goto error; + + if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || + buffer->header.subpage_code != + BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || + get_unaligned_le16(&buffer->header.buffer_length) < + MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || + buffer->aio_subpage.header.page_code != + BMIC_SENSE_FEATURE_IO_PAGE || + buffer->aio_subpage.header.subpage_code != + BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || + get_unaligned_le16(&buffer->aio_subpage.header.page_length) < + MINIMUM_AIO_SUBPAGE_LENGTH) { + goto error; + } + + ctrl_info->max_transfer_encrypted_sas_sata = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_transfer_encrypted_sas_sata); + + ctrl_info->max_transfer_encrypted_nvme = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_transfer_encrypted_nvme); + + ctrl_info->max_write_raid_5_6 = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_write_raid_5_6); + + ctrl_info->max_write_raid_1_10_2drive = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_write_raid_1_10_2drive); + + ctrl_info->max_write_raid_1_10_3drive = + pqi_aio_limit_to_bytes( + &buffer->aio_subpage.max_write_raid_1_10_3drive); + +error: + kfree(buffer); + return rc; } @@ -734,13 +855,6 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, int rc; struct bmic_flush_cache *flush_cache; - /* - * Don't bother trying to flush the cache if the controller is - * locked up. - */ - if (pqi_ctrl_offline(ctrl_info)) - return -ENXIO; - flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); if (!flush_cache) return -ENOMEM; @@ -763,7 +877,7 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, buffer, buffer_length, error_info); } -#define PQI_FETCH_PTRAID_DATA (1UL<<31) +#define PQI_FETCH_PTRAID_DATA (1 << 31) static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) { @@ -775,14 +889,15 @@ static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) return -ENOMEM; rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, - diag, sizeof(*diag)); + diag, sizeof(*diag)); if (rc) goto out; diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); - rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, - diag, sizeof(*diag)); + rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, + sizeof(*diag)); + out: kfree(diag); @@ -793,7 +908,7 @@ static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, void *buffer, size_t buffer_length) { return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, - buffer, buffer_length); + buffer, buffer_length); } #pragma pack(1) @@ -864,7 +979,7 @@ static int pqi_write_current_time_to_host_wellness( int rc; struct bmic_host_wellness_time *buffer; size_t buffer_length; - time64_t local_time; + unsigned long local_time; unsigned int year; struct tm tm; @@ -918,9 +1033,6 @@ static void pqi_update_time_worker(struct work_struct *work) ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, update_time_work); - if (pqi_ctrl_offline(ctrl_info)) - return; - rc = pqi_write_current_time_to_host_wellness(ctrl_info); if (rc) dev_warn(&ctrl_info->pci_dev->dev, @@ -930,27 +1042,23 @@ static void pqi_update_time_worker(struct work_struct *work) PQI_UPDATE_TIME_WORK_INTERVAL); } -static inline void pqi_schedule_update_time_worker( - struct pqi_ctrl_info *ctrl_info) +static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) { schedule_delayed_work(&ctrl_info->update_time_work, 0); } -static inline void pqi_cancel_update_time_worker( - struct pqi_ctrl_info *ctrl_info) +static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) { cancel_delayed_work_sync(&ctrl_info->update_time_work); } -static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, - void *buffer, size_t buffer_length) +static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, + size_t buffer_length) { - return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, - buffer_length); + return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); } -static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, - void **buffer) +static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) { int rc; size_t lun_list_length; @@ -965,8 +1073,7 @@ static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, goto out; } - rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, - sizeof(*report_lun_header)); + rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); if (rc) goto out; @@ -990,8 +1097,8 @@ again: if (rc) goto out; - new_lun_list_length = get_unaligned_be32( - &((struct report_lun_header *)lun_data)->list_length); + new_lun_list_length = + get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); if (new_lun_list_length > lun_list_length) { lun_list_length = new_lun_list_length; @@ -1012,15 +1119,12 @@ out: return rc; } -static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, - void **buffer) +static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) { - return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, - buffer); + return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer); } -static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, - void **buffer) +static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) { return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); } @@ -1177,9 +1281,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, err_msg = "invalid RAID-1 map"; goto bad_raid_map; } - } else if (device->raid_level == SA_RAID_ADM) { + } else if (device->raid_level == SA_RAID_TRIPLE) { if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { - err_msg = "invalid RAID-1(ADM) map"; + err_msg = "invalid RAID-1(Triple) map"; goto bad_raid_map; } } else if ((device->raid_level == SA_RAID_5 || @@ -1218,9 +1322,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, return -ENOMEM; rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, - device->scsi3addr, raid_map, sizeof(*raid_map), - 0, NULL, NO_TIMEOUT); - + device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); if (rc) goto error; @@ -1235,15 +1337,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, return -ENOMEM; rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, - device->scsi3addr, raid_map, raid_map_size, - 0, NULL, NO_TIMEOUT); + device->scsi3addr, raid_map, raid_map_size, 0, NULL); if (rc) goto error; if (get_unaligned_le32(&raid_map->structure_size) - != raid_map_size) { + != raid_map_size ) { dev_warn(&ctrl_info->pci_dev->dev, - "Requested %d bytes, received %d bytes", + "requested %u bytes, received %u bytes\n", raid_map_size, get_unaligned_le32(&raid_map->structure_size)); goto error; @@ -1264,6 +1365,39 @@ error: return rc; } +static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + if (!ctrl_info->lv_drive_type_mix_valid) { + device->max_transfer_encrypted = ~0; + return; + } + + switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { + case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: + case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: + case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: + case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: + case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: + case LV_DRIVE_TYPE_MIX_SAS_ONLY: + case LV_DRIVE_TYPE_MIX_SATA_ONLY: + device->max_transfer_encrypted = + ctrl_info->max_transfer_encrypted_sas_sata; + break; + case LV_DRIVE_TYPE_MIX_NVME_ONLY: + device->max_transfer_encrypted = + ctrl_info->max_transfer_encrypted_nvme; + break; + case LV_DRIVE_TYPE_MIX_UNKNOWN: + case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: + default: + device->max_transfer_encrypted = + min(ctrl_info->max_transfer_encrypted_sas_sata, + ctrl_info->max_transfer_encrypted_nvme); + break; + } +} + static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { @@ -1280,17 +1414,21 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, if (rc) goto out; -#define RAID_BYPASS_STATUS 4 -#define RAID_BYPASS_CONFIGURED 0x1 -#define RAID_BYPASS_ENABLED 0x2 +#define RAID_BYPASS_STATUS 4 +#define RAID_BYPASS_CONFIGURED 0x1 +#define RAID_BYPASS_ENABLED 0x2 bypass_status = buffer[RAID_BYPASS_STATUS]; device->raid_bypass_configured = (bypass_status & RAID_BYPASS_CONFIGURED) != 0; if (device->raid_bypass_configured && (bypass_status & RAID_BYPASS_ENABLED) && - pqi_get_raid_map(ctrl_info, device) == 0) + pqi_get_raid_map(ctrl_info, device) == 0) { device->raid_bypass_enabled = true; + if (get_unaligned_le16(&device->raid_map->flags) & + RAID_MAP_ENCRYPTION_ENABLED) + pqi_set_max_transfer_encrypted(ctrl_info, device); + } out: kfree(buffer); @@ -1338,68 +1476,9 @@ no_buffer: device->volume_offline = volume_offline; } -#define PQI_INQUIRY_PAGE0_RETRIES 3 +#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 -static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) -{ - int rc; - u8 *buffer; - unsigned int retries; - - if (device->is_expander_smp_device) - return 0; - - buffer = kmalloc(64, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - /* Send an inquiry to the device to see what it is. */ - for (retries = 0;;) { - rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, - buffer, 64); - if (rc == 0) - break; - if (pqi_is_logical_device(device) || - rc != PQI_CMD_STATUS_ABORTED || - ++retries > PQI_INQUIRY_PAGE0_RETRIES) - goto out; - } - - scsi_sanitize_inquiry_string(&buffer[8], 8); - scsi_sanitize_inquiry_string(&buffer[16], 16); - - device->devtype = buffer[0] & 0x1f; - memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); - memcpy(device->model, &buffer[16], sizeof(device->model)); - - if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { - if (device->is_external_raid_device) { - device->raid_level = SA_RAID_UNKNOWN; - device->volume_status = CISS_LV_OK; - device->volume_offline = false; - } else { - pqi_get_raid_level(ctrl_info, device); - pqi_get_raid_bypass_status(ctrl_info, device); - pqi_get_volume_status(ctrl_info, device); - } - } - - if (pqi_get_device_id(ctrl_info, device->scsi3addr, - device->unique_id, sizeof(device->unique_id)) < 0) - dev_warn(&ctrl_info->pci_dev->dev, - "Can't get device id for scsi %d:%d:%d:%d\n", - ctrl_info->scsi_host->host_no, - device->bus, device->target, - device->lun); - -out: - kfree(buffer); - - return rc; -} - -static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, +static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct bmic_identify_physical_device *id_phys) { @@ -1411,14 +1490,20 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, id_phys, sizeof(*id_phys)); if (rc) { device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; - return; + return rc; } + + scsi_sanitize_inquiry_string(&id_phys->model[0], 8); + scsi_sanitize_inquiry_string(&id_phys->model[8], 16); + + memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); + memcpy(device->model, &id_phys->model[8], sizeof(device->model)); + device->box_index = id_phys->box_index; device->phys_box_on_bus = id_phys->phys_box_on_bus; device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; device->queue_depth = get_unaligned_le16(&id_phys->current_queue_depth_limit); - device->device_type = id_phys->device_type; device->active_path_index = id_phys->active_path_number; device->path_map = id_phys->redundant_path_present_map; memcpy(&device->box, @@ -1428,6 +1513,75 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, &id_phys->alternate_paths_phys_connector, sizeof(device->phys_connector)); device->bay = id_phys->phys_bay_in_box; + + memcpy(&device->page_83_identifier, &id_phys->page_83_identifier, + sizeof(device->page_83_identifier)); + + if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && + id_phys->phy_count) + device->phy_id = + id_phys->phy_to_phy_map[device->active_path_index]; + else + device->phy_id = 0xFF; + + return 0; +} + +static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + u8 *buffer; + + buffer = kmalloc(64, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + /* Send an inquiry to the device to see what it is. */ + rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); + if (rc) + goto out; + + scsi_sanitize_inquiry_string(&buffer[8], 8); + scsi_sanitize_inquiry_string(&buffer[16], 16); + + device->devtype = buffer[0] & 0x1f; + memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); + memcpy(device->model, &buffer[16], sizeof(device->model)); + + if (device->devtype == TYPE_DISK) { + if (device->is_external_raid_device) { + device->raid_level = SA_RAID_UNKNOWN; + device->volume_status = CISS_LV_OK; + device->volume_offline = false; + } else { + pqi_get_raid_level(ctrl_info, device); + pqi_get_raid_bypass_status(ctrl_info, device); + pqi_get_volume_status(ctrl_info, device); + } + } + +out: + kfree(buffer); + + return rc; +} + +static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + + if (device->is_expander_smp_device) + return 0; + + if (pqi_is_logical_device(device)) + rc = pqi_get_logical_device_info(ctrl_info, device); + else + rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); + + return rc; } static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, @@ -1557,20 +1711,17 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, return rc; } -#define PQI_PENDING_IO_TIMEOUT_SECS 20 +#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) -static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) +static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { int rc; - pqi_device_remove_start(device); - rc = pqi_device_wait_for_pending_io(ctrl_info, device, - PQI_PENDING_IO_TIMEOUT_SECS); + PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); if (rc) dev_err(&ctrl_info->pci_dev->dev, - "scsi %d:%d:%d:%d removing device with %d outstanding commands\n", + "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, atomic_read(&device->scsi_cmds_outstanding)); @@ -1588,17 +1739,14 @@ static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, { struct pqi_scsi_dev *device; - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) - if (device->bus == bus && device->target == target && - device->lun == lun) + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) + if (device->bus == bus && device->target == target && device->lun == lun) return device; return NULL; } -static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, - struct pqi_scsi_dev *dev2) +static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) { if (dev1->is_physical_device != dev2->is_physical_device) return false; @@ -1606,8 +1754,7 @@ static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, if (dev1->is_physical_device) return dev1->wwid == dev2->wwid; - return memcmp(dev1->volume_id, dev2->volume_id, - sizeof(dev1->volume_id)) == 0; + return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; } enum pqi_find_result { @@ -1617,15 +1764,12 @@ enum pqi_find_result { }; static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device_to_find, - struct pqi_scsi_dev **matching_device) + struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) { struct pqi_scsi_dev *device; - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) { - if (pqi_scsi3addr_equal(device_to_find->scsi3addr, - device->scsi3addr)) { + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { *matching_device = device; if (pqi_device_equal(device_to_find, device)) { if (device_to_find->volume_offline) @@ -1655,32 +1799,32 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, ssize_t count; char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; - count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, + count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); if (device->target_lun_valid) - count += snprintf(buffer + count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, "%d:%d", device->target, device->lun); else - count += snprintf(buffer + count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, "-:-"); if (pqi_is_logical_device(device)) - count += snprintf(buffer + count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %08x%08x", *((u32 *)&device->scsi3addr), *((u32 *)&device->scsi3addr[4])); else - count += snprintf(buffer + count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %016llx", device->sas_address); - count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %s %.8s %.16s ", pqi_device_type(device), device->vendor, @@ -1688,19 +1832,19 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, if (pqi_is_logical_device(device)) { if (device->devtype == TYPE_DISK) - count += snprintf(buffer + count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, "SSDSmartPathCap%c En%c %-12s", device->raid_bypass_configured ? '+' : '-', device->raid_bypass_enabled ? '+' : '-', pqi_raid_level_to_string(device->raid_level)); } else { - count += snprintf(buffer + count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, "AIO%c", device->aio_enabled ? '+' : '-'); if (device->devtype == TYPE_DISK || device->devtype == TYPE_ZBC) - count += snprintf(buffer + count, + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " qd=%-6d", device->queue_depth); } @@ -1713,7 +1857,6 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) { - existing_device->devtype = new_device->devtype; existing_device->device_type = new_device->device_type; existing_device->bus = new_device->bus; if (new_device->target_lun_valid) { @@ -1722,6 +1865,11 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, existing_device->target_lun_valid = true; } + if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION || + existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) && + new_device->volume_status == CISS_LV_OK) + existing_device->rescan = true; + /* By definition, the scsi3addr and wwid fields are already the same. */ existing_device->is_physical_device = new_device->is_physical_device; @@ -1740,17 +1888,17 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, existing_device->aio_handle = new_device->aio_handle; existing_device->volume_status = new_device->volume_status; existing_device->active_path_index = new_device->active_path_index; + existing_device->phy_id = new_device->phy_id; existing_device->path_map = new_device->path_map; existing_device->bay = new_device->bay; existing_device->box_index = new_device->box_index; existing_device->phys_box_on_bus = new_device->phys_box_on_bus; - existing_device->phy_connected_dev_type = - new_device->phy_connected_dev_type; + existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); - existing_device->offload_to_mirror = 0; + existing_device->next_bypass_group = 0; kfree(existing_device->raid_map); existing_device->raid_map = new_device->raid_map; existing_device->raid_bypass_configured = @@ -1820,15 +1968,14 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); /* Assume that all devices in the existing list have gone away. */ - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) device->device_gone = true; for (i = 0; i < num_new_devices; i++) { device = new_device_list[i]; find_result = pqi_scsi_find_entry(ctrl_info, device, - &matching_device); + &matching_device); switch (find_result) { case DEVICE_SAME: @@ -1854,6 +2001,9 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, */ device->new_device = true; break; + default: + BUG(); + break; } } @@ -1882,12 +2032,16 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - if (pqi_ctrl_in_ofa(ctrl_info)) - pqi_ctrl_ofa_done(ctrl_info); + if (pqi_ofa_in_progress(ctrl_info)) { + list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) + if (pqi_is_device_added(device)) + pqi_device_remove_start(device); + pqi_ctrl_unblock_device_reset(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + } /* Remove all devices that have gone away. */ - list_for_each_entry_safe(device, next, &delete_list, - delete_list_entry) { + list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { if (device->volume_offline) { pqi_dev_info(ctrl_info, "offline", device); pqi_show_volume_status(ctrl_info, device); @@ -1904,22 +2058,24 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, * Notify the SCSI ML if the queue depth of any existing device has * changed. */ - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) { - if (device->sdev && device->queue_depth != - device->advertised_queue_depth) { + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + if (device->sdev && device->queue_depth != device->advertised_queue_depth) { device->advertised_queue_depth = device->queue_depth; - scsi_change_queue_depth(device->sdev, - device->advertised_queue_depth); + scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); + } + if (device->rescan) { + scsi_rescan_device(&device->sdev->sdev_gendev); + device->rescan = false; } } /* Expose any new devices. */ list_for_each_entry_safe(device, next, &add_list, add_list_entry) { if (!pqi_is_device_added(device)) { - pqi_dev_info(ctrl_info, "added", device); rc = pqi_add_device(ctrl_info, device); - if (rc) { + if (rc == 0) { + pqi_dev_info(ctrl_info, "added", device); + } else { dev_warn(&ctrl_info->pci_dev->dev, "scsi %d:%d:%d:%d addition failed, device not added\n", ctrl_info->scsi_host->host_no, @@ -1931,36 +2087,19 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, } } -static bool pqi_is_supported_device(struct pqi_scsi_dev *device) +static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) { - bool is_supported; + /* + * Only support the HBA controller itself as a RAID + * controller. If it's a RAID controller other than + * the HBA itself (an external RAID controller, for + * example), we don't support it. + */ + if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && + !pqi_is_hba_lunid(device->scsi3addr)) + return false; - if (device->is_expander_smp_device) - return true; - - is_supported = false; - - switch (device->devtype) { - case TYPE_DISK: - case TYPE_ZBC: - case TYPE_TAPE: - case TYPE_MEDIUM_CHANGER: - case TYPE_ENCLOSURE: - is_supported = true; - break; - case TYPE_RAID: - /* - * Only support the HBA controller itself as a RAID - * controller. If it's a RAID controller other than - * the HBA itself (an external RAID controller, for - * example), we don't support it. - */ - if (pqi_is_hba_lunid(device->scsi3addr)) - is_supported = true; - break; - } - - return is_supported; + return true; } static inline bool pqi_skip_device(u8 *scsi3addr) @@ -1979,16 +2118,10 @@ static inline void pqi_mask_device(u8 *scsi3addr) static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) { - if (!device->is_physical_device) - return false; - - if (device->is_expander_smp_device) - return true; - - switch (device->devtype) { - case TYPE_DISK: - case TYPE_ZBC: - case TYPE_ENCLOSURE: + switch (device->device_type) { + case SA_DEVICE_TYPE_SAS: + case SA_DEVICE_TYPE_EXPANDER_SMP: + case SA_DEVICE_TYPE_SES: return true; } @@ -1997,8 +2130,17 @@ static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) static inline bool pqi_expose_device(struct pqi_scsi_dev *device) { - return !device->is_physical_device || - !pqi_skip_device(device->scsi3addr); + return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); +} + +static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry) +{ + if (ctrl_info->unique_wwid_in_report_phys_lun_supported || + pqi_is_device_with_sas_address(device)) + device->wwid = phys_lun_ext_entry->wwid; + else + device->wwid = *(u64 *)device->page_83_identifier; } static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) @@ -2057,28 +2199,27 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) rc = -ENOMEM; goto out; } - if (pqi_hide_vsep) { - int i; + if (pqi_hide_vsep) { for (i = num_physicals - 1; i >= 0; i--) { phys_lun_ext_entry = &physdev_list->lun_entries[i]; - if (CISS_GET_DRIVE_NUMBER( - phys_lun_ext_entry->lunid) == - PQI_VSEP_CISS_BTL) { - pqi_mask_device( - phys_lun_ext_entry->lunid); + if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) { + pqi_mask_device(phys_lun_ext_entry->lunid); break; } } } } + if (num_logicals && + (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) + ctrl_info->lv_drive_type_mix_valid = true; + num_new_devices = num_physicals + num_logicals; - new_device_list = kmalloc_array(num_new_devices, - sizeof(*new_device_list), - GFP_KERNEL); + new_device_list = kmalloc(sizeof(*new_device_list) * + num_new_devices, GFP_KERNEL); if (!new_device_list) { dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); rc = -ENOMEM; @@ -2131,16 +2272,19 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); device->is_physical_device = is_physical_device; if (is_physical_device) { - if (phys_lun_ext_entry->device_type == - SA_EXPANDER_SMP_DEVICE) + device->device_type = phys_lun_ext_entry->device_type; + if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) device->is_expander_smp_device = true; } else { device->is_external_raid_device = pqi_is_external_raid_addr(scsi3addr); } + if (!pqi_is_supported_device(device)) + continue; + /* Gather information about the device. */ - rc = pqi_get_device_info(ctrl_info, device); + rc = pqi_get_device_info(ctrl_info, device, id_phys); if (rc == -ENOMEM) { dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); @@ -2150,8 +2294,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) if (device->is_physical_device) dev_warn(&ctrl_info->pci_dev->dev, "obtaining device info failed, skipping physical device %016llx\n", - get_unaligned_be64( - &phys_lun_ext_entry->wwid)); + get_unaligned_be64(&phys_lun_ext_entry->wwid)); else dev_warn(&ctrl_info->pci_dev->dev, "obtaining device info failed, skipping logical device %08x%08x\n", @@ -2161,24 +2304,17 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) continue; } - if (!pqi_is_supported_device(device)) - continue; - pqi_assign_bus_target_lun(device); if (device->is_physical_device) { - device->wwid = phys_lun_ext_entry->wwid; + pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry); if ((phys_lun_ext_entry->device_flags & - REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && + CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && phys_lun_ext_entry->aio_handle) { - device->aio_enabled = true; + device->aio_enabled = true; device->aio_handle = phys_lun_ext_entry->aio_handle; } - - pqi_get_physical_disk_info(ctrl_info, - device, id_phys); - } else { memcpy(device->volume_id, log_lun_ext_entry->volume_id, sizeof(device->volume_id)); @@ -2213,54 +2349,112 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) { unsigned long flags; struct pqi_scsi_dev *device; + struct pqi_scsi_dev *next; - while (1) { - spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); - - device = list_first_entry_or_null(&ctrl_info->scsi_device_list, - struct pqi_scsi_dev, scsi_device_list_entry); - if (device) - list_del(&device->scsi_device_list_entry); - - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, - flags); - - if (!device) - break; + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { if (pqi_is_device_added(device)) pqi_remove_device(ctrl_info, device); + list_del(&device->scsi_device_list_entry); pqi_free_device(device); } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +} + +#if TORTUGA + +static int pqi_add_controller(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned long flags; + struct pqi_scsi_dev *device; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to allocate memory for controller device\n"); + return -ENOMEM; + } + + device->devtype = TYPE_RAID; + pqi_assign_bus_target_lun(device); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_add_tail(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + rc = pqi_add_device(ctrl_info, device); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d addition failed, device not added\n", + ctrl_info->scsi_host->host_no, + device->bus, device->target, + device->lun); + goto error; + } + + return 0; + +error: + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_del(&device->scsi_device_list_entry); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + kfree(device); + + return rc; } static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) { - int rc = 0; + int rc; + + if (list_empty(&ctrl_info->scsi_device_list)) + rc = pqi_add_controller(ctrl_info); + else + rc = 0; + + return rc; +} + +#else + +static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + int mutex_acquired; if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; - if (!mutex_trylock(&ctrl_info->scan_mutex)) { + mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); + + if (!mutex_acquired) { + if (pqi_ctrl_scan_blocked(ctrl_info)) + return -EBUSY; pqi_schedule_rescan_worker_delayed(ctrl_info); - rc = -EINPROGRESS; - } else { - rc = pqi_update_scsi_devices(ctrl_info); - if (rc) - pqi_schedule_rescan_worker_delayed(ctrl_info); - mutex_unlock(&ctrl_info->scan_mutex); + return -EINPROGRESS; } + rc = pqi_update_scsi_devices(ctrl_info); + if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) + pqi_schedule_rescan_worker_delayed(ctrl_info); + + mutex_unlock(&ctrl_info->scan_mutex); + return rc; } +#endif + static void pqi_scan_start(struct Scsi_Host *shost) { struct pqi_ctrl_info *ctrl_info; ctrl_info = shost_to_hba(shost); - if (pqi_ctrl_in_ofa(ctrl_info)) - return; pqi_scan_scsi_devices(ctrl_info); } @@ -2277,27 +2471,8 @@ static int pqi_scan_finished(struct Scsi_Host *shost, return !mutex_is_locked(&ctrl_info->scan_mutex); } -static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) -{ - mutex_lock(&ctrl_info->scan_mutex); - mutex_unlock(&ctrl_info->scan_mutex); -} - -static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) -{ - mutex_lock(&ctrl_info->lun_reset_mutex); - mutex_unlock(&ctrl_info->lun_reset_mutex); -} - -static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) -{ - mutex_lock(&ctrl_info->ofa_mutex); - mutex_unlock(&ctrl_info->ofa_mutex); -} - -static inline void pqi_set_encryption_info( - struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, - u64 first_block) +static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, + struct raid_map *raid_map, u64 first_block) { u32 volume_blk_size; @@ -2320,333 +2495,337 @@ static inline void pqi_set_encryption_info( * Attempt to perform RAID bypass mapping for a logical volume I/O. */ +static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct pqi_scsi_dev_raid_map_data *rmd) +{ + bool is_supported = true; + + switch (rmd->raid_level) { + case SA_RAID_0: + break; + case SA_RAID_1: + if (rmd->is_write && (!ctrl_info->enable_r1_writes || + rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) + is_supported = false; + break; + case SA_RAID_TRIPLE: + if (rmd->is_write && (!ctrl_info->enable_r1_writes || + rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) + is_supported = false; + break; + case SA_RAID_5: + if (rmd->is_write && (!ctrl_info->enable_r5_writes || + rmd->data_length > ctrl_info->max_write_raid_5_6)) + is_supported = false; + break; + case SA_RAID_6: + if (rmd->is_write && (!ctrl_info->enable_r6_writes || + rmd->data_length > ctrl_info->max_write_raid_5_6)) + is_supported = false; + break; + default: + is_supported = false; + break; + } + + return is_supported; +} + #define PQI_RAID_BYPASS_INELIGIBLE 1 -static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, - struct pqi_queue_group *queue_group) +static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, + struct pqi_scsi_dev_raid_map_data *rmd) { - struct raid_map *raid_map; - bool is_write = false; - u32 map_index; - u64 first_block; - u64 last_block; - u32 block_cnt; - u32 blocks_per_row; - u64 first_row; - u64 last_row; - u32 first_row_offset; - u32 last_row_offset; - u32 first_column; - u32 last_column; - u64 r0_first_row; - u64 r0_last_row; - u32 r5or6_blocks_per_row; - u64 r5or6_first_row; - u64 r5or6_last_row; - u32 r5or6_first_row_offset; - u32 r5or6_last_row_offset; - u32 r5or6_first_column; - u32 r5or6_last_column; - u16 data_disks_per_row; - u32 total_disks_per_row; - u16 layout_map_count; - u32 stripesize; - u16 strip_size; - u32 first_group; - u32 last_group; - u32 current_group; - u32 map_row; - u32 aio_handle; - u64 disk_block; - u32 disk_block_cnt; - u8 cdb[16]; - u8 cdb_length; - int offload_to_mirror; - struct pqi_encryption_info *encryption_info_ptr; - struct pqi_encryption_info encryption_info; -#if BITS_PER_LONG == 32 - u64 tmpdiv; -#endif - /* Check for valid opcode, get LBA and block count. */ switch (scmd->cmnd[0]) { case WRITE_6: - is_write = true; + rmd->is_write = true; /* fall through */ case READ_6: - first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | + rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); - block_cnt = (u32)scmd->cmnd[4]; - if (block_cnt == 0) - block_cnt = 256; + rmd->block_cnt = (u32)scmd->cmnd[4]; + if (rmd->block_cnt == 0) + rmd->block_cnt = 256; break; case WRITE_10: - is_write = true; + rmd->is_write = true; /* fall through */ case READ_10: - first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); - block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); + rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); + rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); break; case WRITE_12: - is_write = true; + rmd->is_write = true; /* fall through */ case READ_12: - first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); - block_cnt = get_unaligned_be32(&scmd->cmnd[6]); + rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); + rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); break; case WRITE_16: - is_write = true; + rmd->is_write = true; /* fall through */ case READ_16: - first_block = get_unaligned_be64(&scmd->cmnd[2]); - block_cnt = get_unaligned_be32(&scmd->cmnd[10]); + rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); + rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); break; default: /* Process via normal I/O path. */ return PQI_RAID_BYPASS_INELIGIBLE; } - /* Check for write to non-RAID-0. */ - if (is_write && device->raid_level != SA_RAID_0) - return PQI_RAID_BYPASS_INELIGIBLE; + put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); - if (unlikely(block_cnt == 0)) - return PQI_RAID_BYPASS_INELIGIBLE; + return 0; +} - last_block = first_block + block_cnt - 1; - raid_map = device->raid_map; +static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) +{ + rmd->last_block = rmd->first_block + rmd->block_cnt - 1; /* Check for invalid block or wraparound. */ - if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || - last_block < first_block) + if (rmd->last_block >= + get_unaligned_le64(&raid_map->volume_blk_cnt) || + rmd->last_block < rmd->first_block) return PQI_RAID_BYPASS_INELIGIBLE; - data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); - strip_size = get_unaligned_le16(&raid_map->strip_size); - layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); + rmd->data_disks_per_row = + get_unaligned_le16(&raid_map->data_disks_per_row); + rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); + rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); /* Calculate stripe information for the request. */ - blocks_per_row = data_disks_per_row * strip_size; -#if BITS_PER_LONG == 32 - tmpdiv = first_block; - do_div(tmpdiv, blocks_per_row); - first_row = tmpdiv; - tmpdiv = last_block; - do_div(tmpdiv, blocks_per_row); - last_row = tmpdiv; - first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); - last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); - tmpdiv = first_row_offset; - do_div(tmpdiv, strip_size); - first_column = tmpdiv; - tmpdiv = last_row_offset; - do_div(tmpdiv, strip_size); - last_column = tmpdiv; -#else - first_row = first_block / blocks_per_row; - last_row = last_block / blocks_per_row; - first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); - last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); - first_column = first_row_offset / strip_size; - last_column = last_row_offset / strip_size; -#endif + rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; + rmd->first_row = rmd->first_block / rmd->blocks_per_row; + rmd->last_row = rmd->last_block / rmd->blocks_per_row; + rmd->first_row_offset = (u32)(rmd->first_block - + (rmd->first_row * rmd->blocks_per_row)); + rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * + rmd->blocks_per_row)); + rmd->first_column = rmd->first_row_offset / rmd->strip_size; + rmd->last_column = rmd->last_row_offset / rmd->strip_size; /* If this isn't a single row/column then give to the controller. */ - if (first_row != last_row || first_column != last_column) + if (rmd->first_row != rmd->last_row || + rmd->first_column != rmd->last_column) return PQI_RAID_BYPASS_INELIGIBLE; /* Proceeding with driver mapping. */ - total_disks_per_row = data_disks_per_row + + rmd->total_disks_per_row = rmd->data_disks_per_row + get_unaligned_le16(&raid_map->metadata_disks_per_row); - map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % + rmd->map_row = ((u32)(rmd->first_row >> + raid_map->parity_rotation_shift)) % get_unaligned_le16(&raid_map->row_cnt); - map_index = (map_row * total_disks_per_row) + first_column; + rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + + rmd->first_column; - /* RAID 1 */ - if (device->raid_level == SA_RAID_1) { - if (device->offload_to_mirror) - map_index += data_disks_per_row; - device->offload_to_mirror = !device->offload_to_mirror; - } else if (device->raid_level == SA_RAID_ADM) { - /* RAID ADM */ - /* - * Handles N-way mirrors (R1-ADM) and R10 with # of drives - * divisible by 3. - */ - offload_to_mirror = device->offload_to_mirror; - if (offload_to_mirror == 0) { - /* use physical disk in the first mirrored group. */ - map_index %= data_disks_per_row; - } else { - do { - /* - * Determine mirror group that map_index - * indicates. - */ - current_group = map_index / data_disks_per_row; + return 0; +} - if (offload_to_mirror != current_group) { - if (current_group < - layout_map_count - 1) { - /* - * Select raid index from - * next group. - */ - map_index += data_disks_per_row; - current_group++; - } else { - /* - * Select raid index from first - * group. - */ - map_index %= data_disks_per_row; - current_group = 0; - } - } - } while (offload_to_mirror != current_group); +static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, + struct raid_map *raid_map) +{ + /* RAID 50/60 */ + /* Verify first and last block are in same RAID group. */ + rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; + rmd->first_group = (rmd->first_block % + rmd->stripesize) / rmd->blocks_per_row; + rmd->last_group = (rmd->last_block % + rmd->stripesize) / rmd->blocks_per_row; + if (rmd->first_group != rmd->last_group) + return PQI_RAID_BYPASS_INELIGIBLE; + + /* Verify request is in a single row of RAID 5/6. */ + rmd->first_row = rmd->r5or6_first_row = + rmd->first_block / rmd->stripesize; + rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; + if (rmd->r5or6_first_row != rmd->r5or6_last_row) + return PQI_RAID_BYPASS_INELIGIBLE; + + /* Verify request is in a single column. */ + rmd->first_row_offset = rmd->r5or6_first_row_offset = + (u32)((rmd->first_block % rmd->stripesize) % + rmd->blocks_per_row); + + rmd->r5or6_last_row_offset = + (u32)((rmd->last_block % rmd->stripesize) % + rmd->blocks_per_row); + + rmd->first_column = + rmd->r5or6_first_row_offset / rmd->strip_size; + rmd->r5or6_first_column = rmd->first_column; + rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; + if (rmd->r5or6_first_column != rmd->r5or6_last_column) + return PQI_RAID_BYPASS_INELIGIBLE; + + /* Request is eligible. */ + rmd->map_row = + ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % + get_unaligned_le16(&raid_map->row_cnt); + + rmd->map_index = (rmd->first_group * + (get_unaligned_le16(&raid_map->row_cnt) * + rmd->total_disks_per_row)) + + (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; + + if (rmd->is_write) { + u32 index; + + index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); + index *= rmd->total_disks_per_row; + index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); + + rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; + if (rmd->raid_level == SA_RAID_6) { + rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; + rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; } - - /* Set mirror group to use next time. */ - offload_to_mirror = - (offload_to_mirror >= layout_map_count - 1) ? - 0 : offload_to_mirror + 1; - WARN_ON(offload_to_mirror >= layout_map_count); - device->offload_to_mirror = offload_to_mirror; - /* - * Avoid direct use of device->offload_to_mirror within this - * function since multiple threads might simultaneously - * increment it beyond the range of device->layout_map_count -1. - */ - } else if ((device->raid_level == SA_RAID_5 || - device->raid_level == SA_RAID_6) && layout_map_count > 1) { - /* RAID 50/60 */ - /* Verify first and last block are in same RAID group */ - r5or6_blocks_per_row = strip_size * data_disks_per_row; - stripesize = r5or6_blocks_per_row * layout_map_count; -#if BITS_PER_LONG == 32 - tmpdiv = first_block; - first_group = do_div(tmpdiv, stripesize); - tmpdiv = first_group; - do_div(tmpdiv, r5or6_blocks_per_row); - first_group = tmpdiv; - tmpdiv = last_block; - last_group = do_div(tmpdiv, stripesize); - tmpdiv = last_group; - do_div(tmpdiv, r5or6_blocks_per_row); - last_group = tmpdiv; -#else - first_group = (first_block % stripesize) / r5or6_blocks_per_row; - last_group = (last_block % stripesize) / r5or6_blocks_per_row; -#endif - if (first_group != last_group) + if (rmd->blocks_per_row == 0) return PQI_RAID_BYPASS_INELIGIBLE; - - /* Verify request is in a single row of RAID 5/6 */ -#if BITS_PER_LONG == 32 - tmpdiv = first_block; - do_div(tmpdiv, stripesize); - first_row = r5or6_first_row = r0_first_row = tmpdiv; - tmpdiv = last_block; - do_div(tmpdiv, stripesize); - r5or6_last_row = r0_last_row = tmpdiv; -#else - first_row = r5or6_first_row = r0_first_row = - first_block / stripesize; - r5or6_last_row = r0_last_row = last_block / stripesize; -#endif - if (r5or6_first_row != r5or6_last_row) - return PQI_RAID_BYPASS_INELIGIBLE; - - /* Verify request is in a single column */ -#if BITS_PER_LONG == 32 - tmpdiv = first_block; - first_row_offset = do_div(tmpdiv, stripesize); - tmpdiv = first_row_offset; - first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); - r5or6_first_row_offset = first_row_offset; - tmpdiv = last_block; - r5or6_last_row_offset = do_div(tmpdiv, stripesize); - tmpdiv = r5or6_last_row_offset; - r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); - tmpdiv = r5or6_first_row_offset; - do_div(tmpdiv, strip_size); - first_column = r5or6_first_column = tmpdiv; - tmpdiv = r5or6_last_row_offset; - do_div(tmpdiv, strip_size); - r5or6_last_column = tmpdiv; -#else - first_row_offset = r5or6_first_row_offset = - (u32)((first_block % stripesize) % - r5or6_blocks_per_row); - - r5or6_last_row_offset = - (u32)((last_block % stripesize) % - r5or6_blocks_per_row); - - first_column = r5or6_first_row_offset / strip_size; - r5or6_first_column = first_column; - r5or6_last_column = r5or6_last_row_offset / strip_size; -#endif - if (r5or6_first_column != r5or6_last_column) - return PQI_RAID_BYPASS_INELIGIBLE; - - /* Request is eligible */ - map_row = - ((u32)(first_row >> raid_map->parity_rotation_shift)) % - get_unaligned_le16(&raid_map->row_cnt); - - map_index = (first_group * - (get_unaligned_le16(&raid_map->row_cnt) * - total_disks_per_row)) + - (map_row * total_disks_per_row) + first_column; + rmd->row = rmd->first_block / rmd->blocks_per_row; } - aio_handle = raid_map->disk_data[map_index].aio_handle; - disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + - first_row * strip_size + - (first_row_offset - first_column * strip_size); - disk_block_cnt = block_cnt; + return 0; +} + +static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) +{ + /* Build the new CDB for the physical disk I/O. */ + if (rmd->disk_block > 0xffffffff) { + rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; + rmd->cdb[1] = 0; + put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); + put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); + rmd->cdb[14] = 0; + rmd->cdb[15] = 0; + rmd->cdb_length = 16; + } else { + rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; + rmd->cdb[1] = 0; + put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); + rmd->cdb[6] = 0; + put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); + rmd->cdb[9] = 0; + rmd->cdb_length = 10; + } +} + +static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, + struct pqi_scsi_dev_raid_map_data *rmd) +{ + u32 index; + u32 group; + + group = rmd->map_index / rmd->data_disks_per_row; + + index = rmd->map_index - (group * rmd->data_disks_per_row); + rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; + index += rmd->data_disks_per_row; + rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; + if (rmd->layout_map_count > 2) { + index += rmd->data_disks_per_row; + rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; + } + + rmd->num_it_nexus_entries = rmd->layout_map_count; +} + +static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, + struct pqi_queue_group *queue_group) +{ + int rc; + struct raid_map *raid_map; + u32 group; + u32 next_bypass_group; + struct pqi_encryption_info *encryption_info_ptr; + struct pqi_encryption_info encryption_info; + struct pqi_scsi_dev_raid_map_data rmd = { 0 }; + + rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); + if (rc) + return PQI_RAID_BYPASS_INELIGIBLE; + + rmd.raid_level = device->raid_level; + + if (!pqi_aio_raid_level_supported(ctrl_info, device, &rmd)) + return PQI_RAID_BYPASS_INELIGIBLE; + + if (unlikely(rmd.block_cnt == 0)) + return PQI_RAID_BYPASS_INELIGIBLE; + + raid_map = device->raid_map; + + rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); + if (rc) + return PQI_RAID_BYPASS_INELIGIBLE; + + if (device->raid_level == SA_RAID_1 || + device->raid_level == SA_RAID_TRIPLE) { + if (rmd.is_write) { + pqi_calc_aio_r1_nexus(raid_map, &rmd); + } else { + group = device->next_bypass_group; + next_bypass_group = group + 1; + if (next_bypass_group >= rmd.layout_map_count) + next_bypass_group = 0; + device->next_bypass_group = next_bypass_group; + rmd.map_index += group * rmd.data_disks_per_row; + } + } else if ((device->raid_level == SA_RAID_5 || + device->raid_level == SA_RAID_6) && + (rmd.layout_map_count > 1 || rmd.is_write)) { + rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); + if (rc) + return PQI_RAID_BYPASS_INELIGIBLE; + } + + if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) + return PQI_RAID_BYPASS_INELIGIBLE; + + rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; + rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + + rmd.first_row * rmd.strip_size + + (rmd.first_row_offset - rmd.first_column * rmd.strip_size); + rmd.disk_block_cnt = rmd.block_cnt; /* Handle differing logical/physical block sizes. */ if (raid_map->phys_blk_shift) { - disk_block <<= raid_map->phys_blk_shift; - disk_block_cnt <<= raid_map->phys_blk_shift; + rmd.disk_block <<= raid_map->phys_blk_shift; + rmd.disk_block_cnt <<= raid_map->phys_blk_shift; } - if (unlikely(disk_block_cnt > 0xffff)) + if (unlikely(rmd.disk_block_cnt > 0xffff)) return PQI_RAID_BYPASS_INELIGIBLE; - /* Build the new CDB for the physical disk I/O. */ - if (disk_block > 0xffffffff) { - cdb[0] = is_write ? WRITE_16 : READ_16; - cdb[1] = 0; - put_unaligned_be64(disk_block, &cdb[2]); - put_unaligned_be32(disk_block_cnt, &cdb[10]); - cdb[14] = 0; - cdb[15] = 0; - cdb_length = 16; - } else { - cdb[0] = is_write ? WRITE_10 : READ_10; - cdb[1] = 0; - put_unaligned_be32((u32)disk_block, &cdb[2]); - cdb[6] = 0; - put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); - cdb[9] = 0; - cdb_length = 10; - } + pqi_set_aio_cdb(&rmd); - if (get_unaligned_le16(&raid_map->flags) & - RAID_MAP_ENCRYPTION_ENABLED) { - pqi_set_encryption_info(&encryption_info, raid_map, - first_block); + if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { + if (rmd.data_length > device->max_transfer_encrypted) + return PQI_RAID_BYPASS_INELIGIBLE; + pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); encryption_info_ptr = &encryption_info; } else { encryption_info_ptr = NULL; } - return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, - cdb, cdb_length, queue_group, encryption_info_ptr, true); + if (rmd.is_write) { + switch (device->raid_level) { + case SA_RAID_1: + case SA_RAID_TRIPLE: + return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, + encryption_info_ptr, device, &rmd); + case SA_RAID_5: + case SA_RAID_6: + return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, + encryption_info_ptr, device, &rmd); + } + } + + return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, + rmd.cdb, rmd.cdb_length, queue_group, + encryption_info_ptr, true); } #define PQI_STATUS_IDLE 0x0 @@ -2809,25 +2988,10 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request) scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && sshdr.sense_key == HARDWARE_ERROR && - sshdr.asc == 0x3e) { - struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); - struct pqi_scsi_dev *device = scmd->device->hostdata; - - switch (sshdr.ascq) { - case 0x1: /* LOGICAL UNIT FAILURE */ - if (printk_ratelimit()) - scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", - ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); - pqi_take_device_offline(scmd->device, "RAID"); - host_byte = DID_NO_CONNECT; - break; - - default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ - if (printk_ratelimit()) - scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", - sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); - break; - } + sshdr.asc == 0x3e && + sshdr.ascq == 0x1) { + pqi_take_device_offline(scmd->device, "RAID"); + host_byte = DID_NO_CONNECT; } if (sense_data_length > SCSI_SENSE_BUFFERSIZE) @@ -2943,7 +3107,7 @@ static void pqi_process_io_error(unsigned int iu_type, } } -static int pqi_interpret_task_management_response( +static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, struct pqi_task_management_response *response) { int rc; @@ -2961,13 +3125,21 @@ static int pqi_interpret_task_management_response( break; } + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); + return rc; } -static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, - struct pqi_queue_group *queue_group) +static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) { - unsigned int num_responses; + pqi_take_ctrl_offline(ctrl_info); +} + +static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) +{ + int num_responses; pqi_index_t oq_pi; pqi_index_t oq_ci; struct pqi_io_request *io_request; @@ -2979,6 +3151,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, while (1) { oq_pi = readl(queue_group->oq_pi); + if (oq_pi >= ctrl_info->num_elements_per_oq) { + pqi_invalid_response(ctrl_info); + dev_err(&ctrl_info->pci_dev->dev, + "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", + oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); + return -1; + } if (oq_pi == oq_ci) break; @@ -2987,10 +3166,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); request_id = get_unaligned_le16(&response->request_id); - WARN_ON(request_id >= ctrl_info->max_io_slots); + if (request_id >= ctrl_info->max_io_slots) { + pqi_invalid_response(ctrl_info); + dev_err(&ctrl_info->pci_dev->dev, + "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", + request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); + return -1; + } io_request = &ctrl_info->io_request_pool[request_id]; - WARN_ON(atomic_read(&io_request->refcount) == 0); + if (atomic_read(&io_request->refcount) == 0) { + pqi_invalid_response(ctrl_info); + dev_err(&ctrl_info->pci_dev->dev, + "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", + request_id, oq_pi, oq_ci); + return -1; + } switch (response->header.iu_type) { case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: @@ -3003,13 +3194,11 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, case PQI_RESPONSE_IU_VENDOR_GENERAL: io_request->status = get_unaligned_le16( - &((struct pqi_vendor_general_response *) - response)->status); + &((struct pqi_vendor_general_response *)response)->status); break; case PQI_RESPONSE_IU_TASK_MANAGEMENT: - io_request->status = - pqi_interpret_task_management_response( - (void *)response); + io_request->status = pqi_interpret_task_management_response(ctrl_info, + (void *)response); break; case PQI_RESPONSE_IU_AIO_PATH_DISABLED: pqi_aio_path_disabled(io_request); @@ -3020,24 +3209,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, io_request->error_info = ctrl_info->error_buffer + (get_unaligned_le16(&response->error_index) * PQI_ERROR_BUFFER_ELEMENT_LENGTH); - pqi_process_io_error(response->header.iu_type, - io_request); + pqi_process_io_error(response->header.iu_type, io_request); break; default: + pqi_invalid_response(ctrl_info); dev_err(&ctrl_info->pci_dev->dev, - "unexpected IU type: 0x%x\n", - response->header.iu_type); - break; + "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", + response->header.iu_type, oq_pi, oq_ci); + return -1; } - io_request->io_complete_callback(io_request, - io_request->context); + io_request->io_complete_callback(io_request, io_request->context); /* * Note that the I/O request structure CANNOT BE TOUCHED after * returning from the I/O completion callback! */ - oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; } @@ -3119,8 +3306,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); request.event_type = event->event_type; - request.event_id = event->event_id; - request.additional_event_id = event->additional_event_id; + put_unaligned_le16(event->event_id, &request.event_id); + put_unaligned_le16(event->additional_event_id, &request.additional_event_id); pqi_send_event_ack(ctrl_info, &request, sizeof(request)); } @@ -3131,8 +3318,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( struct pqi_ctrl_info *ctrl_info) { - unsigned long timeout; u8 status; + unsigned long timeout; timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; @@ -3144,121 +3331,169 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( if (status & PQI_SOFT_RESET_ABORT) return RESET_ABORT; + if (!sis_is_firmware_running(ctrl_info)) + return RESET_NORESPONSE; + if (time_after(jiffies, timeout)) { - dev_err(&ctrl_info->pci_dev->dev, + dev_warn(&ctrl_info->pci_dev->dev, "timed out waiting for soft reset status\n"); return RESET_TIMEDOUT; } - if (!sis_is_firmware_running(ctrl_info)) - return RESET_NORESPONSE; - ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); } } -static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, - enum pqi_soft_reset_status reset_status) +static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) { int rc; + unsigned int delay_secs; + enum pqi_soft_reset_status reset_status; + + if (ctrl_info->soft_reset_handshake_supported) + reset_status = pqi_poll_for_soft_reset_status(ctrl_info); + else + reset_status = RESET_INITIATE_FIRMWARE; + + delay_secs = PQI_POST_RESET_DELAY_SECS; switch (reset_status) { - case RESET_INITIATE_DRIVER: - /* fall through */ - case RESET_TIMEDOUT: - dev_info(&ctrl_info->pci_dev->dev, - "resetting controller %u\n", ctrl_info->ctrl_id); - sis_soft_reset(ctrl_info); - /* fall through */ - case RESET_INITIATE_FIRMWARE: - rc = pqi_ofa_ctrl_restart(ctrl_info); - pqi_ofa_free_host_buffer(ctrl_info); - dev_info(&ctrl_info->pci_dev->dev, - "Online Firmware Activation for controller %u: %s\n", - ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); - break; - case RESET_ABORT: - pqi_ofa_ctrl_unquiesce(ctrl_info); - dev_info(&ctrl_info->pci_dev->dev, - "Online Firmware Activation for controller %u: %s\n", - ctrl_info->ctrl_id, "ABORTED"); - break; - case RESET_NORESPONSE: - pqi_ofa_free_host_buffer(ctrl_info); - pqi_take_ctrl_offline(ctrl_info); - break; + case RESET_TIMEDOUT: + delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; + /* fall through */ + case RESET_INITIATE_DRIVER: + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation: resetting controller\n"); + sis_soft_reset(ctrl_info); + /* fall through */ + case RESET_INITIATE_FIRMWARE: + ctrl_info->pqi_mode_enabled = false; + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); + rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); + pqi_ofa_free_host_buffer(ctrl_info); + pqi_ctrl_ofa_done(ctrl_info); + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation: %s\n", + rc == 0 ? "SUCCESS" : "FAILED"); + break; + case RESET_ABORT: + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation ABORTED\n"); + if (ctrl_info->soft_reset_handshake_supported) + pqi_clear_soft_reset_status(ctrl_info); + pqi_ofa_free_host_buffer(ctrl_info); + pqi_ctrl_ofa_done(ctrl_info); + pqi_ofa_ctrl_unquiesce(ctrl_info); + break; + case RESET_NORESPONSE: + default: + dev_err(&ctrl_info->pci_dev->dev, + "unexpected Online Firmware Activation reset status: 0x%x\n", + reset_status); + pqi_ofa_free_host_buffer(ctrl_info); + pqi_ctrl_ofa_done(ctrl_info); + pqi_ofa_ctrl_unquiesce(ctrl_info); + pqi_take_ctrl_offline(ctrl_info); + break; } } -static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, +static void pqi_ofa_memory_alloc_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); + + pqi_ctrl_ofa_start(ctrl_info); + pqi_ofa_setup_host_buffer(ctrl_info); + pqi_ofa_host_memory_update(ctrl_info); +} + +static void pqi_ofa_quiesce_worker(struct work_struct *work) +{ + struct pqi_ctrl_info *ctrl_info; + struct pqi_event *event; + + ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); + + event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; + + pqi_ofa_ctrl_quiesce(ctrl_info); + pqi_acknowledge_event(ctrl_info, event); + pqi_process_soft_reset(ctrl_info); +} + +static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, struct pqi_event *event) { - u16 event_id; - enum pqi_soft_reset_status status; + bool ack_event; - event_id = get_unaligned_le16(&event->event_id); + ack_event = true; - mutex_lock(&ctrl_info->ofa_mutex); - - if (event_id == PQI_EVENT_OFA_QUIESCE) { + switch (event->event_id) { + case PQI_EVENT_OFA_MEMORY_ALLOCATION: dev_info(&ctrl_info->pci_dev->dev, - "Received Online Firmware Activation quiesce event for controller %u\n", - ctrl_info->ctrl_id); - pqi_ofa_ctrl_quiesce(ctrl_info); - pqi_acknowledge_event(ctrl_info, event); - if (ctrl_info->soft_reset_handshake_supported) { - status = pqi_poll_for_soft_reset_status(ctrl_info); - pqi_process_soft_reset(ctrl_info, status); - } else { - pqi_process_soft_reset(ctrl_info, - RESET_INITIATE_FIRMWARE); - } - - } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { - pqi_acknowledge_event(ctrl_info, event); - pqi_ofa_setup_host_buffer(ctrl_info, - le32_to_cpu(event->ofa_bytes_requested)); - pqi_ofa_host_memory_update(ctrl_info); - } else if (event_id == PQI_EVENT_OFA_CANCELLED) { + "received Online Firmware Activation memory allocation request\n"); + schedule_work(&ctrl_info->ofa_memory_alloc_work); + break; + case PQI_EVENT_OFA_QUIESCE: + dev_info(&ctrl_info->pci_dev->dev, + "received Online Firmware Activation quiesce request\n"); + schedule_work(&ctrl_info->ofa_quiesce_work); + ack_event = false; + break; + case PQI_EVENT_OFA_CANCELED: + dev_info(&ctrl_info->pci_dev->dev, + "received Online Firmware Activation cancel request: reason: %u\n", + ctrl_info->ofa_cancel_reason); pqi_ofa_free_host_buffer(ctrl_info); - pqi_acknowledge_event(ctrl_info, event); - dev_info(&ctrl_info->pci_dev->dev, - "Online Firmware Activation(%u) cancel reason : %u\n", - ctrl_info->ctrl_id, event->ofa_cancel_reason); + pqi_ctrl_ofa_done(ctrl_info); + break; + default: + dev_err(&ctrl_info->pci_dev->dev, + "received unknown Online Firmware Activation request: event ID: %u\n", + event->event_id); + break; } - mutex_unlock(&ctrl_info->ofa_mutex); + return ack_event; } static void pqi_event_worker(struct work_struct *work) { unsigned int i; + bool rescan_needed; struct pqi_ctrl_info *ctrl_info; struct pqi_event *event; + bool ack_event; ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); pqi_ctrl_busy(ctrl_info); - pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); + pqi_wait_if_ctrl_blocked(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) goto out; - pqi_schedule_rescan_worker_delayed(ctrl_info); - + rescan_needed = false; event = ctrl_info->events; for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { if (event->pending) { event->pending = false; if (event->event_type == PQI_EVENT_TYPE_OFA) { - pqi_ctrl_unbusy(ctrl_info); - pqi_ofa_process_event(ctrl_info, event); - return; + ack_event = pqi_ofa_process_event(ctrl_info, event); + } else { + ack_event = true; + rescan_needed = true; } - pqi_acknowledge_event(ctrl_info, event); + if (ack_event) + pqi_acknowledge_event(ctrl_info, event); } event++; } + if (rescan_needed) + pqi_schedule_rescan_worker_delayed(ctrl_info); + out: pqi_ctrl_unbusy(ctrl_info); } @@ -3269,8 +3504,9 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t) { int num_interrupts; u32 heartbeat_count; - struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, - heartbeat_timer); + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) @@ -3287,9 +3523,8 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t) pqi_take_ctrl_offline(ctrl_info); return; } - } else { + } else ctrl_info->previous_num_interrupts = num_interrupts; - } ctrl_info->previous_heartbeat_count = heartbeat_count; mod_timer(&ctrl_info->heartbeat_timer, @@ -3316,43 +3551,24 @@ static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) del_timer_sync(&ctrl_info->heartbeat_timer); } -static inline int pqi_event_type_to_event_index(unsigned int event_type) +static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, + struct pqi_event *event, struct pqi_event_response *response) { - int index; - - for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) - if (event_type == pqi_supported_event_types[index]) - return index; - - return -1; -} - -static inline bool pqi_is_supported_event(unsigned int event_type) -{ - return pqi_event_type_to_event_index(event_type) != -1; -} - -static void pqi_ofa_capture_event_payload(struct pqi_event *event, - struct pqi_event_response *response) -{ - u16 event_id; - - event_id = get_unaligned_le16(&event->event_id); - - if (event->event_type == PQI_EVENT_TYPE_OFA) { - if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { - event->ofa_bytes_requested = - response->data.ofa_memory_allocation.bytes_requested; - } else if (event_id == PQI_EVENT_OFA_CANCELLED) { - event->ofa_cancel_reason = - response->data.ofa_cancelled.reason; - } + switch (event->event_id) { + case PQI_EVENT_OFA_MEMORY_ALLOCATION: + ctrl_info->ofa_bytes_requested = + get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); + break; + case PQI_EVENT_OFA_CANCELED: + ctrl_info->ofa_cancel_reason = + get_unaligned_le16(&response->data.ofa_cancelled.reason); + break; } } -static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) +static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) { - unsigned int num_events; + int num_events; pqi_index_t oq_pi; pqi_index_t oq_ci; struct pqi_event_queue *event_queue; @@ -3366,26 +3582,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) while (1) { oq_pi = readl(event_queue->oq_pi); + if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { + pqi_invalid_response(ctrl_info); + dev_err(&ctrl_info->pci_dev->dev, + "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", + oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); + return -1; + } + if (oq_pi == oq_ci) break; num_events++; - response = event_queue->oq_element_array + - (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); + response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); - event_index = - pqi_event_type_to_event_index(response->event_type); + event_index = pqi_event_type_to_event_index(response->event_type); - if (event_index >= 0) { - if (response->request_acknowlege) { - event = &ctrl_info->events[event_index]; - event->pending = true; - event->event_type = response->event_type; - event->event_id = response->event_id; - event->additional_event_id = - response->additional_event_id; - pqi_ofa_capture_event_payload(event, response); - } + if (event_index >= 0 && response->request_acknowledge) { + event = &ctrl_info->events[event_index]; + event->pending = true; + event->event_type = response->event_type; + event->event_id = get_unaligned_le16(&response->event_id); + event->additional_event_id = + get_unaligned_le32(&response->additional_event_id); + if (event->event_type == PQI_EVENT_TYPE_OFA) + pqi_ofa_capture_event_payload(ctrl_info, event, response); } oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; @@ -3402,8 +3623,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) #define PQI_LEGACY_INTX_MASK 0x1 -static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, - bool enable_intx) +static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) { u32 intx_mask; struct pqi_device_registers __iomem *pqi_registers; @@ -3480,8 +3700,7 @@ static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) valid_irq = true; break; case IRQ_MODE_INTX: - intx_status = - readl(&ctrl_info->pqi_registers->legacy_intx_status); + intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); if (intx_status & PQI_LEGACY_INTX_PENDING) valid_irq = true; else @@ -3500,7 +3719,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) { struct pqi_ctrl_info *ctrl_info; struct pqi_queue_group *queue_group; - unsigned int num_responses_handled; + int num_io_responses_handled; + int num_events_handled; queue_group = data; ctrl_info = queue_group->ctrl_info; @@ -3508,35 +3728,44 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) if (!pqi_is_valid_irq(ctrl_info)) return IRQ_NONE; - num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); + num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); + if (num_io_responses_handled < 0) + goto out; - if (irq == ctrl_info->event_irq) - num_responses_handled += pqi_process_event_intr(ctrl_info); + if (irq == ctrl_info->event_irq) { + num_events_handled = pqi_process_event_intr(ctrl_info); + if (num_events_handled < 0) + goto out; + } else { + num_events_handled = 0; + } - if (num_responses_handled) + if (num_io_responses_handled + num_events_handled > 0) atomic_inc(&ctrl_info->num_interrupts); pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); +out: return IRQ_HANDLED; } + static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) { - struct pci_dev *pci_dev = ctrl_info->pci_dev; int i; int rc; - ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); + ctrl_info->event_irq = ctrl_info->msix_vectors[0]; for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { - rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, - DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); + rc = request_irq(ctrl_info->msix_vectors[i], + pqi_irq_handler, 0, + DRIVER_NAME_SHORT, ctrl_info->intr_data[i]); if (rc) { - dev_err(&pci_dev->dev, + dev_err(&ctrl_info->pci_dev->dev, "irq %u init failed with error %d\n", - pci_irq_vector(pci_dev, i), rc); + ctrl_info->msix_vectors[i], rc); return rc; } ctrl_info->num_msix_vectors_initialized++; @@ -3550,19 +3779,27 @@ static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) int i; for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) - free_irq(pci_irq_vector(ctrl_info->pci_dev, i), - &ctrl_info->queue_groups[i]); + free_irq(ctrl_info->msix_vectors[i], + ctrl_info->intr_data[i]); ctrl_info->num_msix_vectors_initialized = 0; } static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) { + unsigned int i; + int max_vectors; int num_vectors_enabled; + struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS]; + + max_vectors = ctrl_info->num_queue_groups; + + for (i = 0; i < max_vectors; i++) + msix_entries[i].entry = i; + + num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev, + msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors); - num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, - PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (num_vectors_enabled < 0) { dev_err(&ctrl_info->pci_dev->dev, "MSI-X init failed with error %d\n", @@ -3571,18 +3808,50 @@ static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) } ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; + for (i = 0; i < num_vectors_enabled; i++) { + ctrl_info->msix_vectors[i] = msix_entries[i].vector; + ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i]; + } + ctrl_info->irq_mode = IRQ_MODE_MSIX; + return 0; } static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) { if (ctrl_info->num_msix_vectors_enabled) { - pci_free_irq_vectors(ctrl_info->pci_dev); + pci_disable_msix(ctrl_info->pci_dev); ctrl_info->num_msix_vectors_enabled = 0; } } +static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info) +{ + int i; + int rc; + int cpu; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) { + rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i], + get_cpu_mask(cpu)); + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "error %d setting affinity hint for irq vector %u\n", + rc, ctrl_info->msix_vectors[i]); + cpu = cpumask_next(cpu, cpu_online_mask); + } +} + +static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info) +{ + int i; + + for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) + irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL); +} + static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) { unsigned int i; @@ -3638,9 +3907,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) alloc_length += PQI_EXTRA_SGL_MEMORY; ctrl_info->queue_memory_base = - dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, - &ctrl_info->queue_memory_base_dma_handle, - GFP_KERNEL); + dma_zalloc_coherent(&ctrl_info->pci_dev->dev, + alloc_length, + &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); if (!ctrl_info->queue_memory_base) return -ENOMEM; @@ -3777,9 +4046,10 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; ctrl_info->admin_queue_memory_base = - dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, - &ctrl_info->admin_queue_memory_base_dma_handle, - GFP_KERNEL); + dma_zalloc_coherent(&ctrl_info->pci_dev->dev, + alloc_length, + &ctrl_info->admin_queue_memory_base_dma_handle, + GFP_KERNEL); if (!ctrl_info->admin_queue_memory_base) return -ENOMEM; @@ -3841,9 +4111,10 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) &pqi_registers->admin_oq_pi_addr); reg = PQI_ADMIN_IQ_NUM_ELEMENTS | - (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | + (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | (admin_queues->int_msg_num << 16); writel(reg, &pqi_registers->admin_iq_num_elements); + writel(PQI_CREATE_ADMIN_QUEUE_PAIR, &pqi_registers->function_and_status_code); @@ -3923,7 +4194,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, } if (!sis_is_firmware_running(ctrl_info)) return -ENXIO; - usleep_range(1000, 2000); + msleep(1); } memcpy(response, admin_queues->oq_element_array + @@ -4048,8 +4319,8 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, complete(waiting); } -static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info - *error_info) +static int pqi_process_raid_io_error_synchronous( + struct pqi_raid_error_info *error_info) { int rc = -EIO; @@ -4071,53 +4342,32 @@ static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info return rc; } +static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) +{ + return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; +} + static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, struct pqi_iu_header *request, unsigned int flags, - struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) + struct pqi_raid_error_info *error_info) { int rc = 0; struct pqi_io_request *io_request; - unsigned long start_jiffies; - unsigned long msecs_blocked; size_t iu_length; DECLARE_COMPLETION_ONSTACK(wait); - /* - * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value - * are mutually exclusive. - */ - if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { if (down_interruptible(&ctrl_info->sync_request_sem)) return -ERESTARTSYS; } else { - if (timeout_msecs == NO_TIMEOUT) { - down(&ctrl_info->sync_request_sem); - } else { - start_jiffies = jiffies; - if (down_timeout(&ctrl_info->sync_request_sem, - msecs_to_jiffies(timeout_msecs))) - return -ETIMEDOUT; - msecs_blocked = - jiffies_to_msecs(jiffies - start_jiffies); - if (msecs_blocked >= timeout_msecs) { - rc = -ETIMEDOUT; - goto out; - } - timeout_msecs -= msecs_blocked; - } + down(&ctrl_info->sync_request_sem); } pqi_ctrl_busy(ctrl_info); - timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); - if (timeout_msecs == 0) { - pqi_ctrl_unbusy(ctrl_info); - rc = -ETIMEDOUT; - goto out; - } + if (pqi_is_blockable_request(request)) + pqi_wait_if_ctrl_blocked(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) { - pqi_ctrl_unbusy(ctrl_info); rc = -ENXIO; goto out; } @@ -4138,37 +4388,24 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, io_request->io_complete_callback = pqi_raid_synchronous_complete; io_request->context = &wait; - pqi_start_io(ctrl_info, - &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, + pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, io_request); - pqi_ctrl_unbusy(ctrl_info); - - if (timeout_msecs == NO_TIMEOUT) { - pqi_wait_for_completion_io(ctrl_info, &wait); - } else { - if (!wait_for_completion_io_timeout(&wait, - msecs_to_jiffies(timeout_msecs))) { - dev_warn(&ctrl_info->pci_dev->dev, - "command timed out\n"); - rc = -ETIMEDOUT; - } - } + pqi_wait_for_completion_io(ctrl_info, &wait); if (error_info) { if (io_request->error_info) - memcpy(error_info, io_request->error_info, - sizeof(*error_info)); + memcpy(error_info, io_request->error_info, sizeof(*error_info)); else memset(error_info, 0, sizeof(*error_info)); } else if (rc == 0 && io_request->error_info) { - rc = pqi_process_raid_io_error_synchronous( - io_request->error_info); + rc = pqi_process_raid_io_error_synchronous(io_request->error_info); } pqi_free_io_request(io_request); out: + pqi_ctrl_unbusy(ctrl_info); up(&ctrl_info->sync_request_sem); return rc; @@ -4205,8 +4442,7 @@ static int pqi_submit_admin_request_synchronous( rc = pqi_poll_for_admin_response(ctrl_info, response); if (rc == 0) - rc = pqi_validate_admin_response(response, - request->function_code); + rc = pqi_validate_admin_response(response, request->function_code); return rc; } @@ -4240,8 +4476,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) if (rc) goto out; - rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, - &response); + rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); pqi_pci_unmap(ctrl_info->pci_dev, &request.data.report_device_capability.sg_descriptor, 1, @@ -4577,7 +4812,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, goto out; rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, NULL, NO_TIMEOUT); + 0, NULL); pqi_pci_unmap(ctrl_info->pci_dev, request.data.report_event_configuration.sg_descriptors, 1, @@ -4590,7 +4825,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, event_descriptor = &event_config->descriptors[i]; if (enable_events && pqi_is_supported_event(event_descriptor->event_type)) - put_unaligned_le16(ctrl_info->event_queue.oq_id, + put_unaligned_le16(ctrl_info->event_queue.oq_id, &event_descriptor->oq_id); else put_unaligned_le16(0, &event_descriptor->oq_id); @@ -4613,7 +4848,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, goto out; rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, - NULL, NO_TIMEOUT); + NULL); pqi_pci_unmap(ctrl_info->pci_dev, request.data.report_event_configuration.sg_descriptors, 1, @@ -4665,10 +4900,9 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) { - ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, - ctrl_info->error_buffer_length, - &ctrl_info->error_buffer_dma_handle, - GFP_KERNEL); + ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, + ctrl_info->error_buffer_length, + &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); if (!ctrl_info->error_buffer) return -ENOMEM; @@ -4685,9 +4919,8 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) struct device *dev; struct pqi_io_request *io_request; - ctrl_info->io_request_pool = - kcalloc(ctrl_info->max_io_slots, - sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); + ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots * + sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); if (!ctrl_info->io_request_pool) { dev_err(&ctrl_info->pci_dev->dev, @@ -4700,8 +4933,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) io_request = ctrl_info->io_request_pool; for (i = 0; i < ctrl_info->max_io_slots; i++) { - io_request->iu = - kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); + io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); if (!io_request->iu) { dev_err(&ctrl_info->pci_dev->dev, @@ -4721,8 +4953,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) io_request->index = i; io_request->sg_chain_buffer = sg_chain_buffer; - io_request->sg_chain_buffer_dma_handle = - sg_chain_buffer_dma_handle; + io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; io_request++; } @@ -4829,10 +5060,16 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / sizeof(struct pqi_sg_descriptor)) + PQI_MAX_EMBEDDED_SG_DESCRIPTORS; + + ctrl_info->max_sg_per_r56_iu = + ((ctrl_info->max_inbound_iu_length - + PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / + sizeof(struct pqi_sg_descriptor)) + + PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; } -static inline void pqi_set_sg_descriptor( - struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) +static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, + struct scatterlist *sg) { u64 address = (u64)sg_dma_address(sg); unsigned int length = sg_dma_len(sg); @@ -4842,16 +5079,52 @@ static inline void pqi_set_sg_descriptor( put_unaligned_le32(0, &sg_descriptor->flags); } +static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, + struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, + int max_sg_per_iu, bool *chained) +{ + int i; + unsigned int num_sg_in_iu; + + *chained = false; + i = 0; + num_sg_in_iu = 0; + max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ + + while (1) { + pqi_set_sg_descriptor(sg_descriptor, sg); + if (!*chained) + num_sg_in_iu++; + i++; + if (i == sg_count) + break; + sg_descriptor++; + if (i == max_sg_per_iu) { + put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, + &sg_descriptor->address); + put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), + &sg_descriptor->length); + put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); + *chained = true; + num_sg_in_iu++; + sg_descriptor = io_request->sg_chain_buffer; + } + sg = sg_next(sg); + } + + put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); + + return num_sg_in_iu; +} + static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, struct pqi_io_request *io_request) { - int i; u16 iu_length; int sg_count; bool chained; unsigned int num_sg_in_iu; - unsigned int max_sg_per_iu; struct scatterlist *sg; struct pqi_sg_descriptor *sg_descriptor; @@ -4867,36 +5140,10 @@ static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, sg = scsi_sglist(scmd); sg_descriptor = request->sg_descriptors; - max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; - chained = false; - num_sg_in_iu = 0; - i = 0; - while (1) { - pqi_set_sg_descriptor(sg_descriptor, sg); - if (!chained) - num_sg_in_iu++; - i++; - if (i == sg_count) - break; - sg_descriptor++; - if (i == max_sg_per_iu) { - put_unaligned_le64( - (u64)io_request->sg_chain_buffer_dma_handle, - &sg_descriptor->address); - put_unaligned_le32((sg_count - num_sg_in_iu) - * sizeof(*sg_descriptor), - &sg_descriptor->length); - put_unaligned_le32(CISS_SG_CHAIN, - &sg_descriptor->flags); - chained = true; - num_sg_in_iu++; - sg_descriptor = io_request->sg_chain_buffer; - } - sg = sg_next(sg); - } + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_iu, &chained); - put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); request->partial = chained; iu_length += num_sg_in_iu * sizeof(*sg_descriptor); @@ -4906,16 +5153,90 @@ out: return 0; } -static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, - struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, +static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, + struct pqi_io_request *io_request) +{ + u16 iu_length; + int sg_count; + bool chained; + unsigned int num_sg_in_iu; + struct scatterlist *sg; + struct pqi_sg_descriptor *sg_descriptor; + + sg_count = scsi_dma_map(scmd); + if (sg_count < 0) + return sg_count; + + iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - + PQI_REQUEST_HEADER_LENGTH; + num_sg_in_iu = 0; + + if (sg_count == 0) + goto out; + + sg = scsi_sglist(scmd); + sg_descriptor = request->sg_descriptors; + + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_iu, &chained); + + request->partial = chained; + iu_length += num_sg_in_iu * sizeof(*sg_descriptor); + +out: + put_unaligned_le16(iu_length, &request->header.iu_length); + request->num_sg_descriptors = num_sg_in_iu; + + return 0; +} + +static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, + struct pqi_io_request *io_request) +{ + u16 iu_length; + int sg_count; + bool chained; + unsigned int num_sg_in_iu; + struct scatterlist *sg; + struct pqi_sg_descriptor *sg_descriptor; + + sg_count = scsi_dma_map(scmd); + if (sg_count < 0) + return sg_count; + + iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - + PQI_REQUEST_HEADER_LENGTH; + num_sg_in_iu = 0; + + if (sg_count == 0) + goto out; + + sg = scsi_sglist(scmd); + sg_descriptor = request->sg_descriptors; + + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_r56_iu, &chained); + + request->partial = chained; + iu_length += num_sg_in_iu * sizeof(*sg_descriptor); + +out: + put_unaligned_le16(iu_length, &request->header.iu_length); + request->num_sg_descriptors = num_sg_in_iu; + + return 0; +} + +static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, + struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, struct pqi_io_request *io_request) { - int i; u16 iu_length; int sg_count; bool chained; unsigned int num_sg_in_iu; - unsigned int max_sg_per_iu; struct scatterlist *sg; struct pqi_sg_descriptor *sg_descriptor; @@ -4932,35 +5253,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, sg = scsi_sglist(scmd); sg_descriptor = request->sg_descriptors; - max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; - chained = false; - i = 0; - while (1) { - pqi_set_sg_descriptor(sg_descriptor, sg); - if (!chained) - num_sg_in_iu++; - i++; - if (i == sg_count) - break; - sg_descriptor++; - if (i == max_sg_per_iu) { - put_unaligned_le64( - (u64)io_request->sg_chain_buffer_dma_handle, - &sg_descriptor->address); - put_unaligned_le32((sg_count - num_sg_in_iu) - * sizeof(*sg_descriptor), - &sg_descriptor->length); - put_unaligned_le32(CISS_SG_CHAIN, - &sg_descriptor->flags); - chained = true; - num_sg_in_iu++; - sg_descriptor = io_request->sg_chain_buffer; - } - sg = sg_next(sg); - } + num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, + ctrl_info->max_sg_per_iu, &chained); - put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); request->partial = chained; iu_length += num_sg_in_iu * sizeof(*sg_descriptor); @@ -4995,16 +5291,14 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( io_request->scmd = scmd; request = io_request->iu; - memset(request, 0, - offsetof(struct pqi_raid_path_request, sg_descriptors)); + memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; put_unaligned_le16(io_request->index, &request->request_id); request->error_index = request->request_id; - memcpy(request->lun_number, device->scsi3addr, - sizeof(request->lun_number)); + memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); memcpy(request->cdb, scmd->cmnd, cdb_length); @@ -5014,33 +5308,27 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( case 10: case 12: case 16: - /* No bytes in the Additional CDB bytes field */ - request->additional_cdb_bytes_usage = - SOP_ADDITIONAL_CDB_BYTES_0; + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; break; case 20: - /* 4 bytes in the Additional cdb field */ - request->additional_cdb_bytes_usage = - SOP_ADDITIONAL_CDB_BYTES_4; + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; break; case 24: - /* 8 bytes in the Additional cdb field */ - request->additional_cdb_bytes_usage = - SOP_ADDITIONAL_CDB_BYTES_8; + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; break; case 28: - /* 12 bytes in the Additional cdb field */ - request->additional_cdb_bytes_usage = - SOP_ADDITIONAL_CDB_BYTES_12; + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; break; case 32: default: - /* 16 bytes in the Additional cdb field */ - request->additional_cdb_bytes_usage = - SOP_ADDITIONAL_CDB_BYTES_16; + request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; break; } +#if TORTUGA + scmd->sc_data_direction = DMA_BIDIRECTIONAL; +#endif + switch (scmd->sc_data_direction) { case DMA_TO_DEVICE: request->data_direction = SOP_READ_FLAG; @@ -5058,6 +5346,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( dev_err(&ctrl_info->pci_dev->dev, "unknown data direction: %d\n", scmd->sc_data_direction); + BUG(); break; } @@ -5084,12 +5373,6 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, device, scmd, queue_group); } -static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) -{ - if (!pqi_ctrl_blocked(ctrl_info)) - schedule_work(&ctrl_info->raid_bypass_retry_work); -} - static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) { struct scsi_cmnd *scmd; @@ -5106,7 +5389,7 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) return false; device = scmd->device->hostdata; - if (pqi_device_offline(device)) + if (pqi_device_offline(device) || pqi_device_in_remove(device)) return false; ctrl_info = shost_to_hba(scmd->device->host); @@ -5116,132 +5399,6 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) return true; } -static inline void pqi_add_to_raid_bypass_retry_list( - struct pqi_ctrl_info *ctrl_info, - struct pqi_io_request *io_request, bool at_head) -{ - unsigned long flags; - - spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); - if (at_head) - list_add(&io_request->request_list_entry, - &ctrl_info->raid_bypass_retry_list); - else - list_add_tail(&io_request->request_list_entry, - &ctrl_info->raid_bypass_retry_list); - spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); -} - -static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, - void *context) -{ - struct scsi_cmnd *scmd; - - scmd = io_request->scmd; - pqi_free_io_request(io_request); - pqi_scsi_done(scmd); -} - -static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) -{ - struct scsi_cmnd *scmd; - struct pqi_ctrl_info *ctrl_info; - - io_request->io_complete_callback = pqi_queued_raid_bypass_complete; - scmd = io_request->scmd; - scmd->result = 0; - ctrl_info = shost_to_hba(scmd->device->host); - - pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); - pqi_schedule_bypass_retry(ctrl_info); -} - -static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) -{ - struct scsi_cmnd *scmd; - struct pqi_scsi_dev *device; - struct pqi_ctrl_info *ctrl_info; - struct pqi_queue_group *queue_group; - - scmd = io_request->scmd; - device = scmd->device->hostdata; - if (pqi_device_in_reset(device)) { - pqi_free_io_request(io_request); - set_host_byte(scmd, DID_RESET); - pqi_scsi_done(scmd); - return 0; - } - - ctrl_info = shost_to_hba(scmd->device->host); - queue_group = io_request->queue_group; - - pqi_reinit_io_request(io_request); - - return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, - device, scmd, queue_group); -} - -static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( - struct pqi_ctrl_info *ctrl_info) -{ - unsigned long flags; - struct pqi_io_request *io_request; - - spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); - io_request = list_first_entry_or_null( - &ctrl_info->raid_bypass_retry_list, - struct pqi_io_request, request_list_entry); - if (io_request) - list_del(&io_request->request_list_entry); - spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); - - return io_request; -} - -static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) -{ - int rc; - struct pqi_io_request *io_request; - - pqi_ctrl_busy(ctrl_info); - - while (1) { - if (pqi_ctrl_blocked(ctrl_info)) - break; - io_request = pqi_next_queued_raid_bypass_request(ctrl_info); - if (!io_request) - break; - rc = pqi_retry_raid_bypass(io_request); - if (rc) { - pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, - true); - pqi_schedule_bypass_retry(ctrl_info); - break; - } - } - - pqi_ctrl_unbusy(ctrl_info); -} - -static void pqi_raid_bypass_retry_worker(struct work_struct *work) -{ - struct pqi_ctrl_info *ctrl_info; - - ctrl_info = container_of(work, struct pqi_ctrl_info, - raid_bypass_retry_work); - pqi_retry_raid_bypass_requests(ctrl_info); -} - -static void pqi_clear_all_queued_raid_bypass_retries( - struct pqi_ctrl_info *ctrl_info) -{ - unsigned long flags; - - spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); - INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); - spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); -} - static void pqi_aio_io_complete(struct pqi_io_request *io_request, void *context) { @@ -5249,12 +5406,11 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request, scmd = io_request->scmd; scsi_dma_unmap(scmd); - if (io_request->status == -EAGAIN) + if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { set_host_byte(scmd, DID_IMM_RETRY); - else if (pqi_raid_bypass_retry_needed(io_request)) { - pqi_queue_raid_bypass_retry(io_request); - return; + scmd->SCp.this_residual++; } + pqi_free_io_request(io_request); pqi_scsi_done(scmd); } @@ -5282,8 +5438,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, io_request->raid_bypass = raid_bypass; request = io_request->iu; - memset(request, 0, - offsetof(struct pqi_raid_path_request, sg_descriptors)); + memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; put_unaligned_le32(aio_handle, &request->nexus_id); @@ -5313,6 +5468,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, dev_err(&ctrl_info->pci_dev->dev, "unknown data direction: %d\n", scmd->sc_data_direction); + BUG(); break; } @@ -5337,16 +5493,171 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, return 0; } -static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, - struct scsi_cmnd *scmd) +static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd) { - u16 hw_queue; + int rc; + struct pqi_io_request *io_request; + struct pqi_aio_r1_path_request *r1_request; - hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); - if (hw_queue > ctrl_info->max_hw_queue_index) - hw_queue = 0; + io_request = pqi_alloc_io_request(ctrl_info); + io_request->io_complete_callback = pqi_aio_io_complete; + io_request->scmd = scmd; + io_request->raid_bypass = true; - return hw_queue; + r1_request = io_request->iu; + memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); + + r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; + put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); + r1_request->num_drives = rmd->num_it_nexus_entries; + put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); + put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); + if (rmd->num_it_nexus_entries == 3) + put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); + + put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); + r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + put_unaligned_le16(io_request->index, &r1_request->request_id); + r1_request->error_index = r1_request->request_id; + if (rmd->cdb_length > sizeof(r1_request->cdb)) + rmd->cdb_length = sizeof(r1_request->cdb); + r1_request->cdb_length = rmd->cdb_length; + memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); + + switch (scmd->sc_data_direction) { + case DMA_TO_DEVICE: + r1_request->data_direction = SOP_READ_FLAG; + break; + case DMA_FROM_DEVICE: + r1_request->data_direction = SOP_WRITE_FLAG; + break; + case DMA_NONE: + r1_request->data_direction = SOP_NO_DIRECTION_FLAG; + break; + case DMA_BIDIRECTIONAL: + r1_request->data_direction = SOP_BIDIRECTIONAL; + break; + default: + dev_err(&ctrl_info->pci_dev->dev, + "unknown data direction: %d\n", + scmd->sc_data_direction); + BUG(); + break; + } + + if (encryption_info) { + r1_request->encryption_enable = true; + put_unaligned_le16(encryption_info->data_encryption_key_index, + &r1_request->data_encryption_key_index); + put_unaligned_le32(encryption_info->encrypt_tweak_lower, + &r1_request->encrypt_tweak_lower); + put_unaligned_le32(encryption_info->encrypt_tweak_upper, + &r1_request->encrypt_tweak_upper); + } + + rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); + if (rc) { + pqi_free_io_request(io_request); + return SCSI_MLQUEUE_HOST_BUSY; + } + + pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); + + return 0; +} + +static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, + struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, + struct pqi_scsi_dev_raid_map_data *rmd) +{ + int rc; + struct pqi_io_request *io_request; + struct pqi_aio_r56_path_request *r56_request; + + io_request = pqi_alloc_io_request(ctrl_info); + io_request->io_complete_callback = pqi_aio_io_complete; + io_request->scmd = scmd; + io_request->raid_bypass = true; + + r56_request = io_request->iu; + memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); + + if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) + r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; + else + r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; + + put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); + put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); + put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); + if (rmd->raid_level == SA_RAID_6) { + put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); + r56_request->xor_multiplier = rmd->xor_mult; + } + put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); + r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + put_unaligned_le64(rmd->row, &r56_request->row); + + put_unaligned_le16(io_request->index, &r56_request->request_id); + r56_request->error_index = r56_request->request_id; + + if (rmd->cdb_length > sizeof(r56_request->cdb)) + rmd->cdb_length = sizeof(r56_request->cdb); + r56_request->cdb_length = rmd->cdb_length; + memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); + + switch (scmd->sc_data_direction) { + case DMA_TO_DEVICE: + r56_request->data_direction = SOP_READ_FLAG; + break; + case DMA_FROM_DEVICE: + r56_request->data_direction = SOP_WRITE_FLAG; + break; + case DMA_NONE: + r56_request->data_direction = SOP_NO_DIRECTION_FLAG; + break; + case DMA_BIDIRECTIONAL: + r56_request->data_direction = SOP_BIDIRECTIONAL; + break; + default: + dev_err(&ctrl_info->pci_dev->dev, + "unknown data direction: %d\n", + scmd->sc_data_direction); + BUG(); + break; + } + + if (encryption_info) { + r56_request->encryption_enable = true; + put_unaligned_le16(encryption_info->data_encryption_key_index, + &r56_request->data_encryption_key_index); + put_unaligned_le32(encryption_info->encrypt_tweak_lower, + &r56_request->encrypt_tweak_lower); + put_unaligned_le32(encryption_info->encrypt_tweak_upper, + &r56_request->encrypt_tweak_upper); + } + + rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); + if (rc) { + pqi_free_io_request(io_request); + return SCSI_MLQUEUE_HOST_BUSY; + } + + pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); + + return 0; +} + +static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) +{ + if (blk_rq_is_passthrough(scmd->request)) + return false; + + return scmd->SCp.this_residual == 0; } /* @@ -5357,6 +5668,8 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) { struct pqi_scsi_dev *device; + struct pqi_ctrl_info *ctrl_info; + struct Scsi_Host *shost; if (!scmd->device) { set_host_byte(scmd, DID_NO_CONNECT); @@ -5369,11 +5682,89 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) return; } + shost = scmd->device->host; + ctrl_info = shost_to_hba(shost); + atomic_dec(&device->scsi_cmds_outstanding); + atomic_dec(&ctrl_info->total_scmds_outstanding); } -static int pqi_scsi_queue_command(struct Scsi_Host *shost, +static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) +{ + u32 oldest_jiffies; + u8 lru_index; + int i; + int rc; + struct pqi_scsi_dev *device; + struct pqi_stream_data *pqi_stream_data; + struct pqi_scsi_dev_raid_map_data rmd; + + if (!ctrl_info->enable_stream_detection) + return false; + + rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); + if (rc) + return false; + + /* Check writes only. */ + if (!rmd.is_write) + return false; + + device = scmd->device->hostdata; + + /* Check for RAID 5/6 streams. */ + if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) + return false; + + /* + * If controller does not support AIO RAID{5,6} writes, need to send + * requests down non-AIO path. + */ + if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || + (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) + return true; + + lru_index = 0; + oldest_jiffies = INT_MAX; + for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { + pqi_stream_data = &device->stream_data[i]; + /* + * Check for adjacent request or request is within + * the previous request. + */ + if ((pqi_stream_data->next_lba && + rmd.first_block >= pqi_stream_data->next_lba) && + rmd.first_block <= pqi_stream_data->next_lba + + rmd.block_cnt) { + pqi_stream_data->next_lba = rmd.first_block + + rmd.block_cnt; + pqi_stream_data->last_accessed = jiffies; + return true; + } + + /* unused entry */ + if (pqi_stream_data->last_accessed == 0) { + lru_index = i; + break; + } + + /* Find entry with oldest last accessed time. */ + if (pqi_stream_data->last_accessed <= oldest_jiffies) { + oldest_jiffies = pqi_stream_data->last_accessed; + lru_index = i; + } + } + + /* Set LRU entry. */ + pqi_stream_data = &device->stream_data[lru_index]; + pqi_stream_data->last_accessed = jiffies; + pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; + + return false; +} + +int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { int rc; struct pqi_ctrl_info *ctrl_info; @@ -5392,17 +5783,19 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, } atomic_inc(&device->scsi_cmds_outstanding); + if (atomic_inc_return(&ctrl_info->total_scmds_outstanding) > + ctrl_info->scsi_ml_can_queue) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } - if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info, - device)) { + if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { set_host_byte(scmd, DID_NO_CONNECT); pqi_scsi_done(scmd); return 0; } - pqi_ctrl_busy(ctrl_info); - if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || - pqi_ctrl_in_ofa(ctrl_info)) { + if (pqi_ctrl_blocked(ctrl_info)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -5419,28 +5812,28 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, if (pqi_is_logical_device(device)) { raid_bypassed = false; if (device->raid_bypass_enabled && - !blk_rq_is_passthrough(scmd->request)) { - rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, - scmd, queue_group); - if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) + pqi_is_bypass_eligible_request(scmd) && + !pqi_is_parity_write_stream(ctrl_info, scmd)) { + rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); + if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; + atomic_inc(&device->raid_bypass_cnt); + } } if (!raid_bypassed) - rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, - queue_group); + rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); } else { if (device->aio_enabled) - rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, - queue_group); + rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); else - rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, - queue_group); + rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); } out: - pqi_ctrl_unbusy(ctrl_info); - if (rc) + if (rc) { atomic_dec(&device->scsi_cmds_outstanding); + atomic_dec(&ctrl_info->total_scmds_outstanding); + } return rc; } @@ -5465,7 +5858,7 @@ static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; - usleep_range(1000, 2000); + msleep(1); } } @@ -5498,7 +5891,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; - usleep_range(1000, 2000); + msleep(1); } } } @@ -5528,6 +5921,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, list_for_each_entry_safe(io_request, next, &queue_group->request_list[path], request_list_entry) { + scmd = io_request->scmd; if (!scmd) continue; @@ -5547,104 +5941,39 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, } } -static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) -{ - unsigned int i; - unsigned int path; - struct pqi_queue_group *queue_group; - unsigned long flags; - struct pqi_io_request *io_request; - struct pqi_io_request *next; - struct scsi_cmnd *scmd; - - for (i = 0; i < ctrl_info->num_queue_groups; i++) { - queue_group = &ctrl_info->queue_groups[i]; - - for (path = 0; path < 2; path++) { - spin_lock_irqsave(&queue_group->submit_lock[path], - flags); - - list_for_each_entry_safe(io_request, next, - &queue_group->request_list[path], - request_list_entry) { - - scmd = io_request->scmd; - if (!scmd) - continue; - - list_del(&io_request->request_list_entry); - set_host_byte(scmd, DID_RESET); - pqi_scsi_done(scmd); - } - - spin_unlock_irqrestore( - &queue_group->submit_lock[path], flags); - } - } -} +#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device, unsigned long timeout_secs) + struct pqi_scsi_dev *device, unsigned long timeout_msecs) { - unsigned long timeout; + int cmds_outstanding; + unsigned long start_jiffies; + unsigned long warning_timeout; + unsigned long msecs_waiting; - timeout = (timeout_secs * PQI_HZ) + jiffies; + start_jiffies = jiffies; + warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies; - while (atomic_read(&device->scsi_cmds_outstanding)) { + while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) { pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; - if (timeout_secs != NO_TIMEOUT) { - if (time_after(jiffies, timeout)) { - dev_err(&ctrl_info->pci_dev->dev, - "timed out waiting for pending IO\n"); - return -ETIMEDOUT; - } + msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); + if (msecs_waiting > timeout_msecs) { + dev_err(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, + device->lun, msecs_waiting / 1000, cmds_outstanding); + return -ETIMEDOUT; } - usleep_range(1000, 2000); - } - - return 0; -} - -static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, - unsigned long timeout_secs) -{ - bool io_pending; - unsigned long flags; - unsigned long timeout; - struct pqi_scsi_dev *device; - - timeout = (timeout_secs * PQI_HZ) + jiffies; - while (1) { - io_pending = false; - - spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); - list_for_each_entry(device, &ctrl_info->scsi_device_list, - scsi_device_list_entry) { - if (atomic_read(&device->scsi_cmds_outstanding)) { - io_pending = true; - break; - } + if (time_after(jiffies, warning_timeout)) { + dev_warn(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, + device->lun, msecs_waiting / 1000, cmds_outstanding); + warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies; } - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, - flags); - - if (!io_pending) - break; - - pqi_check_ctrl_health(ctrl_info); - if (pqi_ctrl_offline(ctrl_info)) - return -ENXIO; - - if (timeout_secs != NO_TIMEOUT) { - if (time_after(jiffies, timeout)) { - dev_err(&ctrl_info->pci_dev->dev, - "timed out waiting for pending IO\n"); - return -ETIMEDOUT; - } - } - usleep_range(1000, 2000); + msleep(1); } return 0; @@ -5658,16 +5987,19 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request, complete(waiting); } -#define PQI_LUN_RESET_TIMEOUT_SECS 10 +#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct completion *wait) { int rc; + unsigned int wait_secs; + + wait_secs = 0; while (1) { if (wait_for_completion_io_timeout(wait, - PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) { + PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) { rc = 0; break; } @@ -5677,13 +6009,21 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, rc = -ENXIO; break; } + + wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; + + dev_warn(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, + wait_secs); } return rc; } -static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) +#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 + +static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { int rc; struct pqi_io_request *io_request; @@ -5704,9 +6044,10 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; + if (ctrl_info->tmf_iu_timeout_supported) + put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); - pqi_start_io(ctrl_info, - &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, + pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, io_request); rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); @@ -5718,31 +6059,33 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, return rc; } -/* Performs a reset at the LUN level. */ +#define PQI_LUN_RESET_RETRIES 3 +#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) +#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) +#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) -#define PQI_LUN_RESET_RETRIES 3 -#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 -#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120 - -static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) +static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { - int rc; + int reset_rc; + int wait_rc; unsigned int retries; - unsigned long timeout_secs; + unsigned long timeout_msecs; for (retries = 0;;) { - rc = pqi_lun_reset(ctrl_info, device); - if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES) + reset_rc = pqi_lun_reset(ctrl_info, device); + if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES) break; msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); } - timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT; + timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : + PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; - rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); + wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs); + if (wait_rc && reset_rc == 0) + reset_rc = wait_rc; - return rc == 0 ? SUCCESS : FAILED; + return reset_rc == 0 ? SUCCESS : FAILED; } static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, @@ -5750,23 +6093,15 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, { int rc; - mutex_lock(&ctrl_info->lun_reset_mutex); - pqi_ctrl_block_requests(ctrl_info); pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_fail_io_queued_for_device(ctrl_info, device); rc = pqi_wait_until_inbound_queues_empty(ctrl_info); - pqi_device_reset_start(device); - pqi_ctrl_unblock_requests(ctrl_info); - if (rc) rc = FAILED; else - rc = _pqi_device_reset(ctrl_info, device); - - pqi_device_reset_done(device); - - mutex_unlock(&ctrl_info->lun_reset_mutex); + rc = pqi_lun_reset_with_retries(ctrl_info, device); + pqi_ctrl_unblock_requests(ctrl_info); return rc; } @@ -5782,29 +6117,25 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) ctrl_info = shost_to_hba(shost); device = scmd->device->hostdata; + mutex_lock(&ctrl_info->lun_reset_mutex); + dev_err(&ctrl_info->pci_dev->dev, - "resetting scsi %d:%d:%d:%d\n", - shost->host_no, device->bus, device->target, device->lun); + "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n", shost->host_no, + device->bus, device->target, device->lun, scmd->cmnd[0]); pqi_check_ctrl_health(ctrl_info); - if (pqi_ctrl_offline(ctrl_info)) { - dev_err(&ctrl_info->pci_dev->dev, - "controller %u offlined - cannot send device reset\n", - ctrl_info->ctrl_id); + if (pqi_ctrl_offline(ctrl_info)) rc = FAILED; - goto out; - } + else + rc = pqi_device_reset(ctrl_info, device); - pqi_wait_until_ofa_finished(ctrl_info); - - rc = pqi_device_reset(ctrl_info, device); - -out: dev_err(&ctrl_info->pci_dev->dev, "reset of scsi %d:%d:%d:%d: %s\n", shost->host_no, device->bus, device->target, device->lun, rc == SUCCESS ? "SUCCESS" : "FAILED"); + mutex_unlock(&ctrl_info->lun_reset_mutex); + return rc; } @@ -5842,10 +6173,16 @@ static int pqi_slave_alloc(struct scsi_device *sdev) scsi_change_queue_depth(sdev, device->advertised_queue_depth); } - if (pqi_is_logical_device(device)) + if (pqi_is_logical_device(device)) { pqi_disable_write_same(sdev); - else + if (pqi_limit_xfer_to_1MB) + blk_queue_max_hw_sectors(sdev->request_queue, + PQI_1MB_SECTORS); + } else { sdev->allow_restart = 1; + if (device->device_type == SA_DEVICE_TYPE_NVME) + pqi_disable_write_same(sdev); + } } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -5853,16 +6190,27 @@ static int pqi_slave_alloc(struct scsi_device *sdev) return 0; } -static int pqi_map_queues(struct Scsi_Host *shost) +static int pqi_slave_configure(struct scsi_device *sdev) { - struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + struct pqi_scsi_dev *device; - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - ctrl_info->pci_dev, 0); + device = sdev->hostdata; + device->devtype = sdev->type; + + return 0; } -static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, - void __user *arg) +static void pqi_slave_destroy(struct scsi_device *sdev) +{ + struct pqi_scsi_dev *device; + + device = sdev->hostdata; + + if (device) + pqi_device_remove_start(device); +} + +static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) { struct pci_dev *pci_dev; u32 subsystem_vendor; @@ -5879,8 +6227,7 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, pciinfo.dev_fn = pci_dev->devfn; subsystem_vendor = pci_dev->subsystem_vendor; subsystem_device = pci_dev->subsystem_device; - pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | - subsystem_vendor; + pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) return -EFAULT; @@ -5986,6 +6333,8 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; + if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) + return -EBUSY; if (!arg) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) @@ -6066,8 +6415,11 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) put_unaligned_le16(iu_length, &request.header.iu_length); + if (ctrl_info->raid_iu_timeout_supported) + put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); + PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); if (iocommand.buf_size > 0) pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, @@ -6111,17 +6463,13 @@ out: return rc; } -static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, - void __user *arg) +static int pqi_ioctl(struct scsi_device *sdev, IOCTL_INT cmd, void __user *arg) { int rc; struct pqi_ctrl_info *ctrl_info; ctrl_info = shost_to_hba(sdev->host); - if (pqi_ctrl_in_ofa(ctrl_info)) - return -EBUSY; - switch (cmd) { case CCISS_DEREGDISK: case CCISS_REGNEWDISK: @@ -6154,20 +6502,13 @@ static ssize_t pqi_firmware_version_show(struct device *dev, shost = class_to_shost(dev); ctrl_info = shost_to_hba(shost); - return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); } static ssize_t pqi_driver_version_show(struct device *dev, struct device_attribute *attr, char *buffer) { - struct Scsi_Host *shost; - struct pqi_ctrl_info *ctrl_info; - - shost = class_to_shost(dev); - ctrl_info = shost_to_hba(shost); - - return snprintf(buffer, PAGE_SIZE, - "%s\n", DRIVER_VERSION BUILD_TIMESTAMP); + return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP); } static ssize_t pqi_serial_number_show(struct device *dev, @@ -6179,7 +6520,7 @@ static ssize_t pqi_serial_number_show(struct device *dev, shost = class_to_shost(dev); ctrl_info = shost_to_hba(shost); - return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); } static ssize_t pqi_model_show(struct device *dev, @@ -6191,7 +6532,7 @@ static ssize_t pqi_model_show(struct device *dev, shost = class_to_shost(dev); ctrl_info = shost_to_hba(shost); - return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); } static ssize_t pqi_vendor_show(struct device *dev, @@ -6203,7 +6544,7 @@ static ssize_t pqi_vendor_show(struct device *dev, shost = class_to_shost(dev); ctrl_info = shost_to_hba(shost); - return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); + return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); } static ssize_t pqi_host_rescan_store(struct device *dev, @@ -6224,14 +6565,14 @@ static ssize_t pqi_lockup_action_show(struct device *dev, for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { if (pqi_lockup_actions[i].action == pqi_lockup_action) - count += snprintf(buffer + count, PAGE_SIZE - count, + count += scnprintf(buffer + count, PAGE_SIZE - count, "[%s] ", pqi_lockup_actions[i].name); else - count += snprintf(buffer + count, PAGE_SIZE - count, + count += scnprintf(buffer + count, PAGE_SIZE - count, "%s ", pqi_lockup_actions[i].name); } - count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); + count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); return count; } @@ -6256,14 +6597,94 @@ static ssize_t pqi_lockup_action_store(struct device *dev, return -EINVAL; } -static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); -static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); -static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); -static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); -static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); -static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); -static DEVICE_ATTR(lockup_action, 0644, - pqi_lockup_action_show, pqi_lockup_action_store); +static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + return snprintf(buffer, 10, "%hhx\n", + ctrl_info->enable_stream_detection); +} + +static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + u8 set_stream_detection = 0; + + if (sscanf(buffer, "%hhx", &set_stream_detection) != 1) + return -EINVAL; + + ctrl_info->enable_stream_detection = set_stream_detection; + + return count; +} + +static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + return snprintf(buffer, 10, "%hhx\n", ctrl_info->enable_r5_writes); +} + +static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + u8 set_r5_writes = 0; + + if (sscanf(buffer, "%hhx", &set_r5_writes) != 1) + return -EINVAL; + + ctrl_info->enable_r5_writes = set_r5_writes; + + return count; +} + +static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + + return snprintf(buffer, 10, "%hhx\n", ctrl_info->enable_r6_writes); +} + +static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + u8 set_r6_writes = 0; + + if (sscanf(buffer, "%hhx", &set_r6_writes) != 1) + return -EINVAL; + + ctrl_info->enable_r6_writes = set_r6_writes; + + return count; +} + +static DEVICE_ATTR(driver_version, S_IRUGO, pqi_driver_version_show, NULL); +static DEVICE_ATTR(firmware_version, S_IRUGO, pqi_firmware_version_show, NULL); +static DEVICE_ATTR(model, S_IRUGO, pqi_model_show, NULL); +static DEVICE_ATTR(serial_number, S_IRUGO, pqi_serial_number_show, NULL); +static DEVICE_ATTR(vendor, S_IRUGO, pqi_vendor_show, NULL); +static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store); +static DEVICE_ATTR(lockup_action, S_IWUSR | S_IRUGO, pqi_lockup_action_show, + pqi_lockup_action_store); +static DEVICE_ATTR(enable_stream_detection, S_IWUSR | S_IRUGO, + pqi_host_enable_stream_detection_show, + pqi_host_enable_stream_detection_store); +static DEVICE_ATTR(enable_r5_writes, S_IWUSR | S_IRUGO, + pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); +static DEVICE_ATTR(enable_r6_writes, S_IWUSR | S_IRUGO, + pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); static struct device_attribute *pqi_shost_attrs[] = { &dev_attr_driver_version, @@ -6273,6 +6694,9 @@ static struct device_attribute *pqi_shost_attrs[] = { &dev_attr_vendor, &dev_attr_rescan, &dev_attr_lockup_action, + &dev_attr_enable_stream_detection, + &dev_attr_enable_r5_writes, + &dev_attr_enable_r6_writes, NULL }; @@ -6283,7 +6707,7 @@ static ssize_t pqi_unique_id_show(struct device *dev, struct scsi_device *sdev; struct pqi_scsi_dev *device; unsigned long flags; - unsigned char uid[16]; + u8 unique_id[16]; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -6292,20 +6716,26 @@ static ssize_t pqi_unique_id_show(struct device *dev, device = sdev->hostdata; if (!device) { - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, - flags); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return -ENODEV; } - memcpy(uid, device->unique_id, sizeof(uid)); + + if (device->is_physical_device) { + memset(unique_id, 0, 8); + memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); + } else { + memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); + } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - return snprintf(buffer, PAGE_SIZE, - "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", - uid[0], uid[1], uid[2], uid[3], - uid[4], uid[5], uid[6], uid[7], - uid[8], uid[9], uid[10], uid[11], - uid[12], uid[13], uid[14], uid[15]); + return scnprintf(buffer, PAGE_SIZE, + "%02X%02X%02X%02X%02X%02X%02X%02X" + "%02X%02X%02X%02X%02X%02X%02X%02X\n", + unique_id[0], unique_id[1], unique_id[2], unique_id[3], + unique_id[4], unique_id[5], unique_id[6], unique_id[7], + unique_id[8], unique_id[9], unique_id[10], unique_id[11], + unique_id[12], unique_id[13], unique_id[14], unique_id[15]); } static ssize_t pqi_lunid_show(struct device *dev, @@ -6324,18 +6754,19 @@ static ssize_t pqi_lunid_show(struct device *dev, device = sdev->hostdata; if (!device) { - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, - flags); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return -ENODEV; } + memcpy(lunid, device->scsi3addr, sizeof(lunid)); spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); + return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); } -#define MAX_PATHS 8 +#define MAX_PATHS 8 + static ssize_t pqi_path_info_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -6347,9 +6778,9 @@ static ssize_t pqi_path_info_show(struct device *dev, int output_len = 0; u8 box; u8 bay; - u8 path_map_index = 0; + u8 path_map_index; char *active; - unsigned char phys_connector[2]; + u8 phys_connector[2]; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -6358,14 +6789,13 @@ static ssize_t pqi_path_info_show(struct device *dev, device = sdev->hostdata; if (!device) { - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, - flags); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return -ENODEV; } bay = device->bay; for (i = 0; i < MAX_PATHS; i++) { - path_map_index = 1<<i; + path_map_index = 1 << i; if (i == device->active_path_index) active = "Active"; else if (device->path_map & path_map_index) @@ -6416,10 +6846,10 @@ end_buffer: } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return output_len; } - static ssize_t pqi_sas_address_show(struct device *dev, struct device_attribute *attr, char *buffer) { @@ -6435,16 +6865,16 @@ static ssize_t pqi_sas_address_show(struct device *dev, spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); device = sdev->hostdata; - if (pqi_is_logical_device(device)) { - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, - flags); + if (!device || !pqi_is_device_with_sas_address(device)) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return -ENODEV; } + sas_address = device->sas_address; spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); + return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); } static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, @@ -6461,6 +6891,11 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + buffer[0] = device->raid_bypass_enabled ? '1' : '0'; buffer[1] = '\n'; buffer[2] = '\0'; @@ -6485,6 +6920,10 @@ static ssize_t pqi_raid_level_show(struct device *dev, spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } if (pqi_is_logical_device(device)) raid_level = pqi_raid_level_to_string(device->raid_level); @@ -6493,16 +6932,43 @@ static ssize_t pqi_raid_level_show(struct device *dev, spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); - return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); + return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); } -static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); -static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); -static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); -static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); -static DEVICE_ATTR(ssd_smart_path_enabled, 0444, - pqi_ssd_smart_path_enabled_show, NULL); -static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); +static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + int raid_bypass_cnt; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return -ENODEV; + } + + raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt); + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); +} + +static DEVICE_ATTR(lunid, S_IRUGO, pqi_lunid_show, NULL); +static DEVICE_ATTR(unique_id, S_IRUGO, pqi_unique_id_show, NULL); +static DEVICE_ATTR(path_info, S_IRUGO, pqi_path_info_show, NULL); +static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL); +static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO, pqi_ssd_smart_path_enabled_show, NULL); +static DEVICE_ATTR(raid_level, S_IRUGO, pqi_raid_level_show, NULL); +static DEVICE_ATTR(raid_bypass_cnt, S_IRUGO, pqi_raid_bypass_cnt_show, NULL); static struct device_attribute *pqi_sdev_attrs[] = { &dev_attr_lunid, @@ -6511,6 +6977,7 @@ static struct device_attribute *pqi_sdev_attrs[] = { &dev_attr_sas_address, &dev_attr_ssd_smart_path_enabled, &dev_attr_raid_level, + &dev_attr_raid_bypass_cnt, NULL }; @@ -6518,14 +6985,15 @@ static struct scsi_host_template pqi_driver_template = { .module = THIS_MODULE, .name = DRIVER_NAME_SHORT, .proc_name = DRIVER_NAME_SHORT, - .queuecommand = pqi_scsi_queue_command, + .queuecommand = PQI_SCSI_QUEUE_COMMAND, .scan_start = pqi_scan_start, .scan_finished = pqi_scan_finished, .this_id = -1, .eh_device_reset_handler = pqi_eh_device_reset_handler, .ioctl = pqi_ioctl, .slave_alloc = pqi_slave_alloc, - .map_queues = pqi_map_queues, + .slave_configure = pqi_slave_configure, + .slave_destroy = pqi_slave_destroy, .sdev_attrs = pqi_sdev_attrs, .shost_attrs = pqi_shost_attrs, }; @@ -6535,11 +7003,11 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) int rc; struct Scsi_Host *shost; + pqi_compat_init_scsi_host_template(&pqi_driver_template); + shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); if (!shost) { - dev_err(&ctrl_info->pci_dev->dev, - "scsi_host_alloc failed for controller %u\n", - ctrl_info->ctrl_id); + dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; } @@ -6555,24 +7023,20 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) shost->cmd_per_lun = shost->can_queue; shost->sg_tablesize = ctrl_info->sg_tablesize; shost->transportt = pqi_sas_transport_template; - shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); + shost->irq = ctrl_info->msix_vectors[0]; shost->unique_id = shost->irq; - shost->nr_hw_queues = ctrl_info->num_queue_groups; shost->hostdata[0] = (unsigned long)ctrl_info; + pqi_compat_init_scsi_host(shost, ctrl_info); rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); if (rc) { - dev_err(&ctrl_info->pci_dev->dev, - "scsi_add_host failed for controller %u\n", - ctrl_info->ctrl_id); + dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); goto free_host; } rc = pqi_add_sas_host(shost, ctrl_info); if (rc) { - dev_err(&ctrl_info->pci_dev->dev, - "add SAS host failed for controller %u\n", - ctrl_info->ctrl_id); + dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); goto remove_host; } @@ -6642,8 +7106,7 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info) rc = sis_pqi_reset_quiesce(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, - "PQI reset failed during quiesce with error %d\n", - rc); + "PQI reset failed during quiesce with error %d\n", rc); return rc; } } @@ -6698,13 +7161,23 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) if (rc) goto out; - memcpy(ctrl_info->firmware_version, identify->firmware_version, - sizeof(identify->firmware_version)); - ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; - snprintf(ctrl_info->firmware_version + - strlen(ctrl_info->firmware_version), - sizeof(ctrl_info->firmware_version), - "-%u", get_unaligned_le16(&identify->firmware_build_number)); + if (get_unaligned_le32(&identify->extra_controller_flags) & + BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { + memcpy(ctrl_info->firmware_version, + identify->firmware_version_long, + sizeof(identify->firmware_version_long)); + } else { + memcpy(ctrl_info->firmware_version, + identify->firmware_version_short, + sizeof(identify->firmware_version_short)); + ctrl_info->firmware_version + [sizeof(identify->firmware_version_short)] = '\0'; + snprintf(ctrl_info->firmware_version + + strlen(ctrl_info->firmware_version), + sizeof(ctrl_info->firmware_version), + "-%u", + get_unaligned_le16(&identify->firmware_build_number)); + } memcpy(ctrl_info->model, identify->product_id, sizeof(identify->product_id)); @@ -6792,7 +7265,7 @@ static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, &request.data.config_table_update.last_section); return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, NULL, NO_TIMEOUT); + 0, NULL); } static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, @@ -6801,6 +7274,7 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, { void *features_requested; void __iomem *features_requested_iomem_addr; + void __iomem *host_max_known_feature_iomem_addr; features_requested = firmware_features->features_supported + le16_to_cpu(firmware_features->num_elements); @@ -6811,6 +7285,16 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, memcpy_toio(features_requested_iomem_addr, features_requested, le16_to_cpu(firmware_features->num_elements)); + if (pqi_is_firmware_feature_supported(firmware_features, + PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { + host_max_known_feature_iomem_addr = + features_requested_iomem_addr + + (le16_to_cpu(firmware_features->num_elements) * 2) + + sizeof(__le16); + writew(PQI_FIRMWARE_FEATURE_MAXIMUM, + host_max_known_feature_iomem_addr); + } + return pqi_config_table_update(ctrl_info, PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); @@ -6828,11 +7312,8 @@ struct pqi_firmware_feature { static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, struct pqi_firmware_feature *firmware_feature) { - if (!firmware_feature->supported) { - dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", - firmware_feature->feature_name); + if (!firmware_feature->supported) return; - } if (firmware_feature->enabled) { dev_info(&ctrl_info->pci_dev->dev, @@ -6844,6 +7325,39 @@ static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, firmware_feature->feature_name); } +static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature) +{ + switch (firmware_feature->feature_bit) { + case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: + ctrl_info->enable_r1_writes = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: + ctrl_info->enable_r5_writes = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: + ctrl_info->enable_r6_writes = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: + ctrl_info->soft_reset_handshake_supported = + firmware_feature->enabled && + ctrl_info->soft_reset_status; + break; + case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: + ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: + ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN: + ctrl_info->unique_wwid_in_report_phys_lun_supported = + firmware_feature->enabled; + break; + } + + pqi_firmware_feature_status(ctrl_info, firmware_feature); +} + static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, struct pqi_firmware_feature *firmware_feature) { @@ -6864,11 +7378,76 @@ static struct pqi_firmware_feature pqi_firmware_features[] = { .feature_bit = PQI_FIRMWARE_FEATURE_SMP, .feature_status = pqi_firmware_feature_status, }, + { + .feature_name = "Maximum Known Feature", + .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 0 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 1 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 5 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 6 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 0 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 1 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID 5 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID 6 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, { .feature_name = "New Soft Reset Handshake", .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "TMF IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, .feature_status = pqi_firmware_feature_status, }, + { + .feature_name = "Unique WWID in Report Physical LUN", + .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN, + .feature_status = pqi_ctrl_update_feature_flags, + }, }; static void pqi_process_firmware_features( @@ -6921,18 +7500,13 @@ static void pqi_process_firmware_features( return; } - ctrl_info->soft_reset_handshake_supported = false; for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { if (!pqi_firmware_features[i].supported) continue; if (pqi_is_firmware_feature_enabled(firmware_features, firmware_features_iomem_addr, pqi_firmware_features[i].feature_bit)) { - pqi_firmware_features[i].enabled = true; - if (pqi_firmware_features[i].feature_bit == - PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE) - ctrl_info->soft_reset_handshake_supported = - true; + pqi_firmware_features[i].enabled = true; } pqi_firmware_feature_update(ctrl_info, &pqi_firmware_features[i]); @@ -6958,14 +7532,34 @@ static void pqi_process_firmware_features_section( mutex_unlock(&pqi_firmware_features_mutex); } +/* + * Reset all controller settings that can be initialized during the processing + * of the PQI Configuration Table. + */ + +static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->heartbeat_counter = NULL; + ctrl_info->soft_reset_status = NULL; + ctrl_info->soft_reset_handshake_supported = false; + ctrl_info->enable_r1_writes = false; + ctrl_info->enable_r5_writes = false; + ctrl_info->enable_r6_writes = false; + ctrl_info->raid_iu_timeout_supported = false; + ctrl_info->tmf_iu_timeout_supported = false; + ctrl_info->unique_wwid_in_report_phys_lun_supported = false; +} + static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) { u32 table_length; u32 section_offset; + bool firmware_feature_section_present; void __iomem *table_iomem_addr; struct pqi_config_table *config_table; struct pqi_config_table_section_header *section; struct pqi_config_table_section_info section_info; + struct pqi_config_table_section_info feature_section_info; table_length = ctrl_info->config_table_length; if (table_length == 0) @@ -6982,25 +7576,24 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) * Copy the config table contents from I/O memory space into the * temporary buffer. */ - table_iomem_addr = ctrl_info->iomem_base + - ctrl_info->config_table_offset; + table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; memcpy_fromio(config_table, table_iomem_addr, table_length); + firmware_feature_section_present = false; section_info.ctrl_info = ctrl_info; - section_offset = - get_unaligned_le32(&config_table->first_section_offset); + section_offset = get_unaligned_le32(&config_table->first_section_offset); while (section_offset) { section = (void *)config_table + section_offset; section_info.section = section; section_info.section_offset = section_offset; - section_info.section_iomem_addr = - table_iomem_addr + section_offset; + section_info.section_iomem_addr = table_iomem_addr + section_offset; switch (get_unaligned_le16(§ion->section_id)) { case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: - pqi_process_firmware_features_section(§ion_info); + firmware_feature_section_present = true; + feature_section_info = section_info; break; case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: if (pqi_disable_heartbeat) @@ -7010,8 +7603,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) ctrl_info->heartbeat_counter = table_iomem_addr + section_offset + - offsetof( - struct pqi_config_table_heartbeat, + offsetof(struct pqi_config_table_heartbeat, heartbeat_counter); break; case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: @@ -7019,14 +7611,21 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) table_iomem_addr + section_offset + offsetof(struct pqi_config_table_soft_reset, - soft_reset_status); + soft_reset_status); break; } - section_offset = - get_unaligned_le16(§ion->next_section_offset); + section_offset = get_unaligned_le16(§ion->next_section_offset); } + /* + * We process the firmware feature section after all other sections + * have been processed so that the feature bit callbacks can take + * into account the settings configured by other sections. + */ + if (firmware_feature_section_present) + pqi_process_firmware_features_section(&feature_section_info); + kfree(config_table); return 0; @@ -7077,10 +7676,16 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; + u32 product_id; - rc = pqi_force_sis_mode(ctrl_info); - if (rc) - return rc; + if (reset_devices) { + sis_soft_reset(ctrl_info); + msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ); + } else { + rc = pqi_force_sis_mode(ctrl_info); + if (rc) + return rc; + } /* * Wait until the controller is ready to start accepting SIS @@ -7108,15 +7713,32 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return rc; } + product_id = sis_get_product_id(ctrl_info); + ctrl_info->product_id = (u8)product_id; + ctrl_info->product_revision = (u8)(product_id >> 8); + +#if 1 + /* THIS CODE IS TEMPORARY AND MUST BE REMOVED FROM A PRODUCTION DRIVER! */ + if (ctrl_info->product_id == PQI_CTRL_PRODUCT_ID_GEN2 && + ctrl_info->product_revision == PQI_CTRL_PRODUCT_REVISION_A) { + dev_err(&ctrl_info->pci_dev->dev, + "this driver is incompatible with Excalibur revision A\n"); + return -ENOTSUPP; + } +#endif + + if (ctrl_info->product_id != PQI_CTRL_PRODUCT_ID_GEN1) + ctrl_info->enable_stream_detection = true; + if (reset_devices) { if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) - ctrl_info->max_outstanding_requests = + ctrl_info->max_outstanding_requests = PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; } else { if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS) - ctrl_info->max_outstanding_requests = + ctrl_info->max_outstanding_requests = PQI_MAX_OUTSTANDING_REQUESTS; } @@ -7207,6 +7829,8 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; + pqi_irq_set_affinity_hint(ctrl_info); + rc = pqi_create_queues(ctrl_info); if (rc) return rc; @@ -7221,6 +7845,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) pqi_start_heartbeat_timer(ctrl_info); + if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { + rc = pqi_get_advanced_raid_bypass_config(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining advanced RAID bypass configuration\n"); + return rc; + } + ctrl_info->ciss_report_log_flags |= + CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; + } + rc = pqi_enable_events(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -7370,12 +8005,25 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) ctrl_info->controller_online = true; pqi_ctrl_unblock_requests(ctrl_info); + pqi_ctrl_reset_config(ctrl_info); + rc = pqi_process_config_table(ctrl_info); if (rc) return rc; pqi_start_heartbeat_timer(ctrl_info); + if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { + rc = pqi_get_advanced_raid_bypass_config(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining advanced RAID bypass configuration\n"); + return rc; + } + ctrl_info->ciss_report_log_flags |= + CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; + } + rc = pqi_enable_events(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -7386,7 +8034,7 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) rc = pqi_get_ctrl_product_details(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, - "error obtaining product detail\n"); + "error obtaining product details\n"); return rc; } @@ -7404,15 +8052,15 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return rc; } - pqi_schedule_update_time_worker(ctrl_info); + if (pqi_ofa_in_progress(ctrl_info)) + pqi_ctrl_unblock_scan(ctrl_info); pqi_scan_scsi_devices(ctrl_info); return 0; } -static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, - u16 timeout) +static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) { return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); @@ -7435,7 +8083,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) else mask = DMA_BIT_MASK(32); - rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); + rc = pqi_dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); if (rc) { dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); goto disable_device; @@ -7514,6 +8162,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) INIT_WORK(&ctrl_info->event_work, pqi_event_worker); atomic_set(&ctrl_info->num_interrupts, 0); + atomic_set(&ctrl_info->total_scmds_outstanding, 0); INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); @@ -7521,19 +8170,26 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); + INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); + INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); + sema_init(&ctrl_info->sync_request_sem, PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); init_waitqueue_head(&ctrl_info->block_requests_wait); - INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); - spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); - INIT_WORK(&ctrl_info->raid_bypass_retry_work, - pqi_raid_bypass_retry_worker); - ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; ctrl_info->irq_mode = IRQ_MODE_NONE; ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; + ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; + ctrl_info->max_transfer_encrypted_sas_sata = + PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; + ctrl_info->max_transfer_encrypted_nvme = + PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; + ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; + ctrl_info->max_write_raid_1_10_2drive = ~0; + ctrl_info->max_write_raid_1_10_3drive = ~0; + return ctrl_info; } @@ -7544,6 +8200,7 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) { + pqi_irq_unset_affinity_hint(ctrl_info); pqi_free_irqs(ctrl_info); pqi_disable_msix_interrupts(ctrl_info); } @@ -7586,81 +8243,57 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) { - pqi_cancel_update_time_worker(ctrl_info); - pqi_cancel_rescan_worker(ctrl_info); - pqi_wait_until_lun_reset_finished(ctrl_info); - pqi_wait_until_scan_finished(ctrl_info); - pqi_ctrl_ofa_start(ctrl_info); + pqi_ctrl_block_scan(ctrl_info); + pqi_scsi_block_requests(ctrl_info); + pqi_ctrl_block_device_reset(ctrl_info); pqi_ctrl_block_requests(ctrl_info); pqi_ctrl_wait_until_quiesced(ctrl_info); - pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); - pqi_fail_io_queued_for_all_devices(ctrl_info); - pqi_wait_until_inbound_queues_empty(ctrl_info); pqi_stop_heartbeat_timer(ctrl_info); - ctrl_info->pqi_mode_enabled = false; - pqi_save_ctrl_mode(ctrl_info, SIS_MODE); } static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) { - pqi_ofa_free_host_buffer(ctrl_info); - ctrl_info->pqi_mode_enabled = true; - pqi_save_ctrl_mode(ctrl_info, PQI_MODE); - ctrl_info->controller_online = true; - pqi_ctrl_unblock_requests(ctrl_info); pqi_start_heartbeat_timer(ctrl_info); - pqi_schedule_update_time_worker(ctrl_info); - pqi_clear_soft_reset_status(ctrl_info, - PQI_SOFT_RESET_ABORT); - pqi_scan_scsi_devices(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); + pqi_ctrl_unblock_device_reset(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + pqi_ctrl_unblock_scan(ctrl_info); } -static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, - u32 total_size, u32 chunk_size) +static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) { - u32 sg_count; - u32 size; int i; - struct pqi_sg_descriptor *mem_descriptor = NULL; + u32 sg_count; struct device *dev; struct pqi_ofa_memory *ofap; - - dev = &ctrl_info->pci_dev->dev; - - sg_count = (total_size + chunk_size - 1); - sg_count /= chunk_size; + struct pqi_sg_descriptor *mem_descriptor; + dma_addr_t dma_handle; ofap = ctrl_info->pqi_ofa_mem_virt_addr; - if (sg_count*chunk_size < total_size) + sg_count = DIV_ROUND_UP(total_size, chunk_size); + if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) goto out; - ctrl_info->pqi_ofa_chunk_virt_addr = - kcalloc(sg_count, sizeof(void *), GFP_KERNEL); + ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); if (!ctrl_info->pqi_ofa_chunk_virt_addr) goto out; - for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { - dma_addr_t dma_handle; + dev = &ctrl_info->pci_dev->dev; + for (i = 0; i < sg_count; i++) { ctrl_info->pqi_ofa_chunk_virt_addr[i] = - dma_alloc_coherent(dev, chunk_size, &dma_handle, - GFP_KERNEL); - + dma_zalloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) - break; - + goto out_free_chunks; mem_descriptor = &ofap->sg_descriptor[i]; - put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address); - put_unaligned_le32 (chunk_size, &mem_descriptor->length); + put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); + put_unaligned_le32(chunk_size, &mem_descriptor->length); } - if (!size || size < total_size) - goto out_free_chunks; - put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); - put_unaligned_le32(size, &ofap->bytes_allocated); + put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); return 0; @@ -7668,80 +8301,87 @@ out_free_chunks: while (--i >= 0) { mem_descriptor = &ofap->sg_descriptor[i]; dma_free_coherent(dev, chunk_size, - ctrl_info->pqi_ofa_chunk_virt_addr[i], - get_unaligned_le64(&mem_descriptor->address)); + ctrl_info->pqi_ofa_chunk_virt_addr[i], + get_unaligned_le64(&mem_descriptor->address)); } kfree(ctrl_info->pqi_ofa_chunk_virt_addr); out: - put_unaligned_le32 (0, &ofap->bytes_allocated); return -ENOMEM; } static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) { u32 total_size; + u32 chunk_size; u32 min_chunk_size; - u32 chunk_sz; - total_size = le32_to_cpu( - ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); - min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; + if (ctrl_info->ofa_bytes_requested == 0) + return 0; - for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) - if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) + total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); + min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); + min_chunk_size = PAGE_ALIGN(min_chunk_size); + + for (chunk_size = total_size; chunk_size >= min_chunk_size;) { + if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) return 0; + chunk_size /= 2; + chunk_size = PAGE_ALIGN(chunk_size); + } return -ENOMEM; } -static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, - u32 bytes_requested) +static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) { - struct pqi_ofa_memory *pqi_ofa_memory; struct device *dev; + struct pqi_ofa_memory *ofap; dev = &ctrl_info->pci_dev->dev; - pqi_ofa_memory = dma_alloc_coherent(dev, - PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, - &ctrl_info->pqi_ofa_mem_dma_handle, - GFP_KERNEL); - if (!pqi_ofa_memory) + ofap = dma_zalloc_coherent(dev, sizeof(*ofap), + &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); + if (!ofap) return; - put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); - memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, - sizeof(pqi_ofa_memory->signature)); - pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); - - ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; + ctrl_info->pqi_ofa_mem_virt_addr = ofap; if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { - dev_err(dev, "Failed to allocate host buffer of size = %u", - bytes_requested); + dev_err(dev, + "failed to allocate host buffer for Online Firmware Activation\n"); + dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); + ctrl_info->pqi_ofa_mem_virt_addr = NULL; + return; } + + put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); + memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); } static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) { - int i; - struct pqi_sg_descriptor *mem_descriptor; + unsigned int i; + struct device *dev; struct pqi_ofa_memory *ofap; + struct pqi_sg_descriptor *mem_descriptor; + unsigned int num_memory_descriptors; ofap = ctrl_info->pqi_ofa_mem_virt_addr; - if (!ofap) return; - if (!ofap->bytes_allocated) + dev = &ctrl_info->pci_dev->dev; + + if (get_unaligned_le32(&ofap->bytes_allocated) == 0) goto out; mem_descriptor = ofap->sg_descriptor; + num_memory_descriptors = + get_unaligned_le16(&ofap->num_memory_descriptors); - for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); - i++) { - dma_free_coherent(&ctrl_info->pci_dev->dev, + for (i = 0; i < num_memory_descriptors; i++) { + dma_free_coherent(dev, get_unaligned_le32(&mem_descriptor[i].length), ctrl_info->pqi_ofa_chunk_virt_addr[i], get_unaligned_le64(&mem_descriptor[i].address)); @@ -7749,49 +8389,46 @@ static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) kfree(ctrl_info->pqi_ofa_chunk_virt_addr); out: - dma_free_coherent(&ctrl_info->pci_dev->dev, - PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, - ctrl_info->pqi_ofa_mem_dma_handle); + dma_free_coherent(dev, sizeof(*ofap), ofap, + ctrl_info->pqi_ofa_mem_dma_handle); ctrl_info->pqi_ofa_mem_virt_addr = NULL; } static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) { + u32 buffer_length; struct pqi_vendor_general_request request; - size_t size; struct pqi_ofa_memory *ofap; memset(&request, 0, sizeof(request)); - ofap = ctrl_info->pqi_ofa_mem_virt_addr; - request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, &request.function_code); + ofap = ctrl_info->pqi_ofa_mem_virt_addr; + if (ofap) { - size = offsetof(struct pqi_ofa_memory, sg_descriptor) + + buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + get_unaligned_le16(&ofap->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, &request.data.ofa_memory_allocation.buffer_address); - put_unaligned_le32(size, + put_unaligned_le32(buffer_length, &request.data.ofa_memory_allocation.buffer_length); - } return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, NULL, NO_TIMEOUT); + 0, NULL); } -#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 - -static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) { - msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); + ssleep(delay_secs); + return pqi_ctrl_init_resume(ctrl_info); } @@ -7849,7 +8486,6 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) pqi_cancel_update_time_worker(ctrl_info); pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_fail_all_outstanding_requests(ctrl_info); - pqi_clear_all_queued_raid_bypass_retries(ctrl_info); pqi_ctrl_unblock_requests(ctrl_info); } @@ -7884,7 +8520,7 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev, if (id->driver_data) ctrl_description = (char *)id->driver_data; else - ctrl_description = "Microsemi Smart Family Controller"; + ctrl_description = "Microchip Smart Family Controller"; dev_info(&pci_dev->dev, "%s found\n", ctrl_description); } @@ -7951,33 +8587,96 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) if (!ctrl_info) return; - ctrl_info->in_shutdown = true; - pqi_remove_ctrl(ctrl_info); } + +static void pqi_dump_request(struct pqi_ctrl_info *ctrl_info, + struct pqi_io_request *io_request) +{ + struct scsi_cmnd *scmd; + + scmd = io_request->scmd; + if (scmd) { + struct Scsi_Host *shost; + struct pqi_scsi_dev *device; + + if (scmd->device == NULL || scmd->device->host == NULL || + scmd->device->hostdata == NULL) + return; + + shost = scmd->device->host; + device = scmd->device->hostdata; + + dev_warn(&ctrl_info->pci_dev->dev, + "%d:%d:%d:%d scsicmnd=[0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x] scmd=%p outstanding cmds = %d\n", + shost->host_no, device->bus, device->target, device->lun, + scmd->cmnd[0], scmd->cmnd[1], scmd->cmnd[2], scmd->cmnd[3], + scmd->cmnd[4], scmd->cmnd[5], scmd->cmnd[6], scmd->cmnd[7], + scmd->cmnd[8], scmd->cmnd[9], scmd->cmnd[10], scmd->cmnd[11], + scmd->cmnd[12], scmd->cmnd[13], scmd->cmnd[14], scmd->cmnd[15], + scmd, atomic_read(&device->scsi_cmds_outstanding)); + } else { + struct pqi_iu_header request_h; + size_t iu_length; + + memcpy(&request_h, io_request->iu, PQI_REQUEST_HEADER_LENGTH); + iu_length = get_unaligned_le16(&request_h.iu_length) + + PQI_REQUEST_HEADER_LENGTH; + + dev_warn(&ctrl_info->pci_dev->dev, + "sync cmd IU type = 0x%02x len = %u\n", + request_h.iu_type, request_h.iu_length); + } +} + + +static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct pqi_io_request *io_request; + bool pending = false; + + for (i = 0; i < ctrl_info->max_io_slots; i++) { + io_request = &ctrl_info->io_request_pool[i]; + if (atomic_read(&io_request->refcount) == 0) + continue; + pqi_dump_request(ctrl_info, io_request); + pending = true; + } + BUG_ON(pending); +} + static void pqi_shutdown(struct pci_dev *pci_dev) { int rc; struct pqi_ctrl_info *ctrl_info; ctrl_info = pci_get_drvdata(pci_dev); - if (!ctrl_info) - goto error; + if (!ctrl_info) { + dev_err(&pci_dev->dev, + "cache could not be flushed\n"); + return; + } + + pqi_wait_until_ofa_finished(ctrl_info); + + pqi_scsi_block_requests(ctrl_info); + pqi_ctrl_block_device_reset(ctrl_info); + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); /* * Write all data in the controller's battery-backed cache to * storage. */ rc = pqi_flush_cache(ctrl_info, SHUTDOWN); - pqi_free_interrupts(ctrl_info); - pqi_reset(ctrl_info); - if (rc == 0) - return; + if (rc) + dev_err(&pci_dev->dev, + "unable to flush controller cache\n"); -error: - dev_warn(&pci_dev->dev, - "unable to flush controller cache\n"); + pqi_crash_if_pending_command(ctrl_info); + pqi_reset(ctrl_info); } static void pqi_process_lockup_action_param(void) @@ -8004,25 +8703,26 @@ static void pqi_process_module_params(void) pqi_process_lockup_action_param(); } -static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) +#if defined(CONFIG_PM) + +static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct pqi_ctrl_info *ctrl_info; ctrl_info = pci_get_drvdata(pci_dev); - pqi_disable_events(ctrl_info); - pqi_cancel_update_time_worker(ctrl_info); - pqi_cancel_rescan_worker(ctrl_info); - pqi_wait_until_scan_finished(ctrl_info); - pqi_wait_until_lun_reset_finished(ctrl_info); pqi_wait_until_ofa_finished(ctrl_info); - pqi_flush_cache(ctrl_info, SUSPEND); + + pqi_ctrl_block_scan(ctrl_info); + pqi_scsi_block_requests(ctrl_info); + pqi_ctrl_block_device_reset(ctrl_info); pqi_ctrl_block_requests(ctrl_info); pqi_ctrl_wait_until_quiesced(ctrl_info); - pqi_wait_until_inbound_queues_empty(ctrl_info); - pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); + pqi_flush_cache(ctrl_info, SUSPEND); pqi_stop_heartbeat_timer(ctrl_info); + pqi_crash_if_pending_command(ctrl_info); + if (state.event == PM_EVENT_FREEZE) return 0; @@ -8035,7 +8735,7 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat return 0; } -static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) +static int pqi_resume(struct pci_dev *pci_dev) { int rc; struct pqi_ctrl_info *ctrl_info; @@ -8046,151 +8746,183 @@ static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) ctrl_info->max_hw_queue_index = 0; pqi_free_interrupts(ctrl_info); pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); - rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, - IRQF_SHARED, DRIVER_NAME_SHORT, - &ctrl_info->queue_groups[0]); + rc = request_irq(pci_dev->irq, pqi_irq_handler, IRQF_SHARED, + DRIVER_NAME_SHORT, ctrl_info->intr_data[0]); if (rc) { dev_err(&ctrl_info->pci_dev->dev, "irq %u init failed with error %d\n", pci_dev->irq, rc); return rc; } - pqi_start_heartbeat_timer(ctrl_info); + pqi_ctrl_unblock_device_reset(ctrl_info); pqi_ctrl_unblock_requests(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + pqi_ctrl_unblock_scan(ctrl_info); return 0; } pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); + pqi_ctrl_unblock_device_reset(ctrl_info); + pqi_ctrl_unblock_requests(ctrl_info); + pqi_scsi_unblock_requests(ctrl_info); + pqi_ctrl_unblock_scan(ctrl_info); + return pqi_ctrl_init_resume(ctrl_info); } +#endif /* CONFIG_PM */ + /* Define the PCI IDs for the controllers that we support. */ static const struct pci_device_id pqi_pci_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x105b, 0x1211) + PCI_VENDOR_ID_FOXCONN, 0x1211) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x105b, 0x1321) + PCI_VENDOR_ID_FOXCONN, 0x1321) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x152d, 0x8a22) + PCI_VENDOR_ID_QUANTA, 0x8a22) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x152d, 0x8a23) + PCI_VENDOR_ID_QUANTA, 0x8a23) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x152d, 0x8a24) + PCI_VENDOR_ID_QUANTA, 0x8a24) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x152d, 0x8a36) + PCI_VENDOR_ID_QUANTA, 0x8a36) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x152d, 0x8a37) + PCI_VENDOR_ID_QUANTA, 0x8a37) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0x1104) + PCI_VENDOR_ID_H3C, 0x8460) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0x1105) + PCI_VENDOR_ID_H3C, 0x1104) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0x1106) + PCI_VENDOR_ID_H3C, 0x1105) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0x1107) + PCI_VENDOR_ID_H3C, 0x1106) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0x8460) + PCI_VENDOR_ID_H3C, 0x1107) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0x8461) + PCI_VENDOR_ID_H3C, 0x1108) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0xc460) + PCI_VENDOR_ID_H3C, 0x8460) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0xc461) + PCI_VENDOR_ID_H3C, 0x8461) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0xf460) + PCI_VENDOR_ID_H3C, 0xc460) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x193d, 0xf461) + PCI_VENDOR_ID_H3C, 0xc461) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x0045) + PCI_VENDOR_ID_H3C, 0xf460) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x0046) + PCI_VENDOR_ID_H3C, 0xf461) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x0047) + PCI_VENDOR_ID_INSPUR, 0x0045) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x0048) + PCI_VENDOR_ID_INSPUR, 0x0046) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x004a) + PCI_VENDOR_ID_INSPUR, 0x0047) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x004b) + PCI_VENDOR_ID_INSPUR, 0x0048) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x004c) + PCI_VENDOR_ID_INSPUR, 0x004a) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1bd4, 0x004f) + PCI_VENDOR_ID_INSPUR, 0x004b) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x19e5, 0xd227) + PCI_VENDOR_ID_INSPUR, 0x004c) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x19e5, 0xd228) + PCI_VENDOR_ID_INSPUR, 0x004f) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x19e5, 0xd229) + PCI_VENDOR_ID_INSPUR, 0x0051) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x19e5, 0xd22a) + PCI_VENDOR_ID_INSPUR, 0x0052) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x19e5, 0xd22b) + PCI_VENDOR_ID_INSPUR, 0x0053) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x19e5, 0xd22c) + PCI_VENDOR_ID_INSPUR, 0x0054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HUAWEI, 0xd227) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HUAWEI, 0xd228) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HUAWEI, 0xd229) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HUAWEI, 0xd22a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HUAWEI, 0xd22b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HUAWEI, 0xd22c) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, @@ -8240,6 +8972,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0809) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x080a) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0900) @@ -8324,6 +9060,122 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x1380) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1400) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1402) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1410) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1411) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1412) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1420) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1430) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1440) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1441) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1450) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1452) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1462) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1470) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1471) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1472) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1480) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1490) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1491) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14a1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14b0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14b1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c1) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14d0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14e0) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14f0) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) @@ -8388,6 +9240,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_HP, 0x1001) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_HP, 0x1002) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_HP, 0x1100) @@ -8398,19 +9254,35 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1d8d, 0x0800) + PCI_VENDOR_ID_HPE, 0x0294) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1d8d, 0x0908) + PCI_VENDOR_ID_HPE, 0x02db) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1d8d, 0x0806) + PCI_VENDOR_ID_HPE, 0x02dc) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, - 0x1d8d, 0x0916) + PCI_VENDOR_ID_HPE, 0x032e) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_FIBERHOME, 0x0800) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_FIBERHOME, 0x0908) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_FIBERHOME, 0x0806) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_FIBERHOME, 0x0916) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, @@ -8443,8 +9315,7 @@ static int __init pqi_init(void) pr_info(DRIVER_NAME "\n"); - pqi_sas_transport_template = - sas_attach_transport(&pqi_sas_transport_functions); + pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); if (!pqi_sas_transport_template) return -ENODEV; @@ -8478,6 +9349,8 @@ static void __attribute__((unused)) verify_structures(void) sis_ctrl_to_host_doorbell_clear) != 0xa0); BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, sis_driver_scratch) != 0xb0); + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, + sis_product_identifier) != 0xb4); BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, sis_firmware_status) != 0xbc); BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, @@ -8492,7 +9365,7 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_iu_header, response_queue_id) != 0x4); BUILD_BUG_ON(offsetof(struct pqi_iu_header, - work_area) != 0x6); + driver_flags) != 0x6); BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, @@ -8590,7 +9463,7 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, header.iu_length) != 2); BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, - header.work_area) != 6); + header.driver_flags) != 6); BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, request_id) != 8); BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, @@ -8646,7 +9519,7 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, header.iu_length) != 2); BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, - header.work_area) != 6); + header.driver_flags) != 6); BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, request_id) != 8); BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, @@ -8670,7 +9543,7 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, header.response_queue_id) != 4); BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, - header.work_area) != 6); + header.driver_flags) != 6); BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, request_id) != 8); BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, @@ -8685,6 +9558,8 @@ static void __attribute__((unused)) verify_structures(void) error_index) != 27); BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, cdb) != 32); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + timeout) != 60); BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, sg_descriptors) != 64); BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != @@ -8697,7 +9572,7 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, header.response_queue_id) != 4); BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, - header.work_area) != 6); + header.driver_flags) != 6); BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, request_id) != 8); BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, @@ -8839,6 +9714,8 @@ static void __attribute__((unused)) verify_structures(void) request_id) != 8); BUILD_BUG_ON(offsetof(struct pqi_task_management_request, nexus_id) != 10); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + timeout) != 14); BUILD_BUG_ON(offsetof(struct pqi_task_management_request, lun_number) != 16); BUILD_BUG_ON(offsetof(struct pqi_task_management_request, @@ -8870,13 +9747,23 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct bmic_identify_controller, configuration_signature) != 1); BUILD_BUG_ON(offsetof(struct bmic_identify_controller, - firmware_version) != 5); + firmware_version_short) != 5); BUILD_BUG_ON(offsetof(struct bmic_identify_controller, extended_logical_unit_count) != 154); BUILD_BUG_ON(offsetof(struct bmic_identify_controller, firmware_build_number) != 190); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + vendor_id) != 200); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + product_id) != 208); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + extra_controller_flags) != 286); BUILD_BUG_ON(offsetof(struct bmic_identify_controller, controller_mode) != 292); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + spare_part_number) != 293); + BUILD_BUG_ON(offsetof(struct bmic_identify_controller, + firmware_version_long) != 325); BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, phys_bay_in_box) != 115); @@ -8894,6 +9781,45 @@ static void __attribute__((unused)) verify_structures(void) current_queue_depth_limit) != 1796); BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); + BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, + page_code) != 0); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, + subpage_code) != 1); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, + buffer_length) != 2); + + BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, + page_code) != 0); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, + subpage_code) != 1); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, + page_length) != 2); + + BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) + != 18); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + header) != 0); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + firmware_read_support) != 4); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + driver_read_support) != 5); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + firmware_write_support) != 6); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + driver_write_support) != 7); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_transfer_encrypted_sas_sata) != 8); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_transfer_encrypted_nvme) != 10); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_write_raid_5_6) != 12); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_write_raid_1_10_2drive) != 14); + BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, + max_write_raid_1_10_3drive) != 16); + BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % diff --git a/drivers/scsi/smartpqi/smartpqi_kernel_compat.c b/drivers/scsi/smartpqi/smartpqi_kernel_compat.c new file mode 100644 index 000000000000..1308c04fe349 --- /dev/null +++ b/drivers/scsi/smartpqi/smartpqi_kernel_compat.c @@ -0,0 +1,311 @@ +/* + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2016-2018 Microsemi Corporation + * Copyright (c) 2016 PMC-Sierra, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * Questions/Comments/Bugfixes to storagedev@microchip.com + * + */ + +#include <linux/pci.h> +#include <linux/bsg-lib.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include "smartpqi.h" +#include "smartpqi_kernel_compat.h" + +#if !KFEATURE_HAS_2011_03_QUEUECOMMAND + +int pqi_scsi_queue_command_compat(struct scsi_cmnd *scmd, + void (*done)(struct scsi_cmnd *)) +{ + scmd->SCp.ptr = (char *)done; + + return pqi_scsi_queue_command(scmd->device->host, scmd); +} + +#endif /* !KFEATURE_HAS_2011_03_QUEUECOMMAND */ + +#if !KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE + +int pci_enable_msix_range(struct pci_dev *pci_dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(pci_dev, entries, nvec); + if (rc < 0) + return rc; + if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} + +#endif /* !KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE */ + +#if !KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH + +int scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) +{ + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), queue_depth); + + return queue_depth; +} + +static int pqi_change_queue_depth(struct scsi_device *sdev, int qdepth, + int reason) +{ + if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { + struct pqi_scsi_dev *device = sdev->hostdata; + + if (!device) + return -ENODEV; + + if (qdepth < 1) + qdepth = 1; + else if (qdepth > device->queue_depth) + qdepth = device->queue_depth; + + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + + } else if (reason == SCSI_QDEPTH_QFULL) + scsi_track_queue_full(sdev, qdepth); + else + return -ENOTSUPP; + + return sdev->queue_depth; +} + +static int pqi_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else { + tag_type = 0; + } + + return tag_type; +} + +#endif /* !KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH */ + +void pqi_compat_init_scsi_host_template(struct scsi_host_template *hostt) +{ +#if !KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH + hostt->change_queue_depth = pqi_change_queue_depth; + hostt->change_queue_type = pqi_change_queue_type; +#endif /* !KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH */ +#if KFEATURE_HAS_LOCKLESS_DISPATCH_IO + hostt->lockless = 1; +#endif +#if KFEATURE_HAS_USE_CLUSTERING + hostt->use_clustering = ENABLE_CLUSTERING; +#endif +} + +void pqi_compat_init_scsi_host(struct Scsi_Host *shost, + struct pqi_ctrl_info *ctrl_info) +{ +#if KFEATURE_HAS_MQ_SUPPORT + shost->nr_hw_queues = ctrl_info->num_queue_groups; +#endif /* KFEATURE_HAS_MQ_SUPPORT */ +} + +#if !KFEATURE_HAS_SCSI_SANITIZE_INQUIRY_STRING + +void scsi_sanitize_inquiry_string(unsigned char *s, int len) +{ + bool terminated = false; + + for (; len > 0; (--len, ++s)) { + if (*s == 0) + terminated = true; + if (terminated || *s < 0x20 || *s > 0x7e) + *s = ' '; + } +} + +#endif /* !KFEATURE_HAS_SCSI_SANITIZE_INQUIRY_STRING */ + +#if !KFEATURE_HAS_PCIE_CAPABILITY_SUPPORT + +#if defined(RHEL6U3) +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; +} + +int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} + +#endif /* RHEL6U3 */ + +int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = pcie_capability_write_word(dev, pos, val); + } + + return ret; +} + +#endif + +#if !KFEATURE_HAS_BSG_JOB_SMP_HANDLER + +static int pqi_bsg_map_buffer(struct bsg_buffer *buf, struct request *req) +{ + size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); + + if (!req->nr_phys_segments) { + WARN_ON(!req->nr_phys_segments); + return -EINVAL; + } + + buf->sg_list = kzalloc(sz, GFP_KERNEL); + if (!buf->sg_list) + return -ENOMEM; + sg_init_table(buf->sg_list, req->nr_phys_segments); + buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); + buf->payload_len = blk_rq_bytes(req); + return 0; +} + +static int pqi_bsg_prepare_job(struct bsg_job *job, struct request *rq) +{ + struct request *rsp = rq->next_rq; + int ret; +#if KFEATURE_HAS_SCSI_REQUEST + struct scsi_request *req = scsi_req(rq); +#else + struct request *req = rq; +#endif + + job->request = req->cmd; + job->request_len = req->cmd_len; + job->reply = req->sense; + + if (rq->bio) { + ret = pqi_bsg_map_buffer(&job->request_payload, rq); + if (ret) + goto failjob_rls_job; + } + + if (rsp && rsp->bio) { + ret = pqi_bsg_map_buffer(&job->reply_payload, rsp); + if (ret) + goto failjob_rls_rqst_payload; + } + + return 0; + +failjob_rls_rqst_payload: + kfree(job->request_payload.sg_list); +failjob_rls_job: + return -ENOMEM; +} + +struct bsg_return_data { + int result; + unsigned int reply_payload_rcv_len; +}; +static struct bsg_return_data bsg_ret; + +void pqi_bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len) +{ + bsg_ret.result = result; + bsg_ret.reply_payload_rcv_len = reply_payload_rcv_len; + complete(job->dd_data); +} + +int pqi_sas_smp_handler_compat(struct Scsi_Host *shost, struct sas_rphy *rphy, + struct request *rq) +{ + struct bsg_job *job; + struct completion bsg_job; +#if KFEATURE_HAS_SCSI_REQUEST + struct scsi_request *req = scsi_req(rq); + struct scsi_request *resp = scsi_req(rq->next_rq); +#else + struct request *req = rq; + struct request *resp = req->next_rq; +#endif + + init_completion(&bsg_job); + job = kzalloc(sizeof(struct bsg_job), GFP_KERNEL); + if (!job) + return -ENOMEM; + job->dd_data = &bsg_job; + + pqi_bsg_prepare_job(job, rq); + pqi_sas_smp_handler(job, shost, rphy); + + wait_for_completion(&bsg_job); + + req->sense_len = job->reply_len; + memcpy(req->sense, job->reply, job->reply_len); + + resp->resid_len -= min(bsg_ret.reply_payload_rcv_len, resp->resid_len); + req->resid_len = 0; + + kfree(job); + return bsg_ret.result; +} + +#endif /* !KFEATURE_HAS_BSG_JOB_SMP_HANDLER */ diff --git a/drivers/scsi/smartpqi/smartpqi_kernel_compat.h b/drivers/scsi/smartpqi/smartpqi_kernel_compat.h new file mode 100644 index 000000000000..3d2fca982aad --- /dev/null +++ b/drivers/scsi/smartpqi/smartpqi_kernel_compat.h @@ -0,0 +1,618 @@ +/* + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2016-2018 Microsemi Corporation + * Copyright (c) 2016 PMC-Sierra, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * Questions/Comments/Bugfixes to storagedev@microchip.com + * + */ + +#if !defined(_SMARTPQI_KERNEL_COMPAT_H) +#define _SMARTPQI_KERNEL_COMPAT_H + +/* #define RHEL6 */ +/* #define RHEL7 */ +/* default is kernel.org */ + +/* ----- RHEL6 variants --------- */ +#if \ + defined(RHEL6U0) || \ + defined(RHEL6U1) || \ + defined(RHEL6U2) || \ + defined(RHEL6U3) || \ + defined(RHEL6U4) || \ + defined(RHEL6U5) || \ + defined(RHEL6U6) || \ + defined(RHEL6U7) || \ + defined(RHEL6U8) || \ + defined(RHEL6U9) || \ + defined(RHEL6U10) +#define RHEL6 +#endif + +/* ----- RHEL7 variants --------- */ +#if \ + defined(RHEL7U0) || \ + defined(RHEL7U1) || \ + defined(RHEL7U2) || \ + defined(RHEL7U3) || \ + defined(RHEL7U4) || \ + defined(RHEL7U4ARM) || \ + defined(RHEL7U5) || \ + defined(RHEL7U5ARM) || \ + defined(RHEL7U6) || \ + defined(RHEL7U7) || \ + defined(RHEL7U8) || \ + defined(RHEL7U9) +#define RHEL7 +#endif + +/* ----- RHEL8 variants --------- */ +#if \ + defined(RHEL8U0) || \ + defined(RHEL8U1) || \ + defined(RHEL8U2) || \ + defined(RHEL8U3) || \ + defined(RHEL8U4) || \ + defined(RHEL8U5) || \ + defined(RHEL8U6) || \ + defined(RHEL8U7) +#define RHEL8 +#endif + +/* ----- SLES11 variants --------- */ +#if \ + defined(SLES11SP0) || \ + defined(SLES11SP1) || \ + defined(SLES11SP2) || \ + defined(SLES11SP3) || \ + defined(SLES11SP4) +#define SLES11 +#endif + +/* ----- SLES12 variants --------- */ +#if \ + defined(SLES12SP0) || \ + defined(SLES12SP1) || \ + defined(SLES12SP2) || \ + defined(SLES12SP3) || \ + defined(SLES12SP4) || \ + defined(SLES12SP5) +#define SLES12 +#endif + +/* ----- SLES15 variants --------- */ +#if \ + defined(SLES15SP0) || \ + defined(SLES15SP1) || \ + defined(SLES15SP2) || \ + defined(SLES15SP3) || \ + defined(SLES15SP4) +#define SLES15 +#endif + +#include <scsi/scsi_tcq.h> +#include <linux/bsg-lib.h> +#include <linux/ktime.h> +#include <linux/dma-mapping.h> + +#if defined(MSG_SIMPLE_TAG) +#define KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH 0 +#if !defined(RHEL7U3) +#define KFEATURE_HAS_MQ_SUPPORT 0 +#endif +#endif + +#if defined(RHEL8) || defined(CENTOS7ALTARM) +#define KFEATURE_HAS_MQ_SUPPORT 0 +#endif + +#if defined(XEN7) +#define KCLASS4A +#endif + +#if !defined(PCI_EXP_DEVCTL2_COMP_TIMEOUT) +#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f +#if TORTUGA +#define KFEATURE_HAS_PCIE_CAPABILITY_SUPPORT 1 +#else +#define KFEATURE_HAS_PCIE_CAPABILITY_SUPPORT 0 +#endif +#endif + +#if defined(RHEL6) +#define KFEATURE_HAS_WAIT_FOR_COMPLETION_IO 0 +#define KFEATURE_HAS_2011_03_QUEUECOMMAND 0 +#define KFEATURE_HAS_NO_WRITE_SAME 0 +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#if defined(RHEL6U3) || defined(RHEL6U4) || defined(RHEL6U5) +#if defined(RHEL6U3) +#define KFEATURE_HAS_DMA_ZALLOC_COHERENT 0 +#endif +#define KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE 0 +#endif +#if !defined(RHEL6U0) && !defined(RHEL6U1) +#define KFEATURE_HAS_LOCKLESS_DISPATCH_IO 1 +#endif +#if defined(RHEL6U5) +#define KFEATURE_HAS_DMA_MASK_AND_COHERENT 0 +#endif +#elif defined(RHEL7) +#if defined(RHEL7U0) +#define KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE 0 +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#endif +#if defined(RHEL7U1) +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#endif +#if defined(RHEL7U4ARM) || defined(RHEL7U5ARM) +#endif +#elif defined(SLES11) +#define KFEATURE_HAS_WAIT_FOR_COMPLETION_IO 0 +#define KFEATURE_HAS_NO_WRITE_SAME 0 +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#if defined(SLES11SP0) || defined(SLES11SP1) +#define KFEATURE_HAS_2011_03_QUEUECOMMAND 0 +#endif +#if defined(SLES11SP3) +#define KFEATURE_HAS_DMA_ZALLOC_COHERENT 0 +#define KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE 0 +#endif +#elif defined(SLES12) +#if defined(SLES12SP2) || defined(SLES12SP3) +#define KFEATURE_HAS_KTIME_SECONDS 1 +#endif +#if defined(SLES12SP0) +#define KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE 0 +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#endif +#if defined(SLES12SP1) +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#endif +#elif defined(SLES15) +#define KFEATURE_HAS_SCSI_REQUEST 1 +#define KFEATURE_HAS_KTIME_SECONDS 1 +#elif defined(UBUNTU1404) || TORTUGA || defined(KCLASS3C) +#define KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE 0 +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#elif defined(OL7U2) || defined(KCLASS3B) +#define KFEATURE_HAS_DMA_MASK_AND_COHERENT 0 +#define KFEATURE_HAS_WAIT_FOR_COMPLETION_IO 0 +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 0 +#endif +#if defined(KCLASS4A) +#define KFEATURE_HAS_KTIME_SECONDS 1 +#endif +#if defined(KCLASS4B) || defined(KCLASS4C) || defined(SLES12SP4) || \ + defined(SLES12SP5) || defined(RHEL8) || defined(KCLASS5A) || \ + defined(KCLASS5B) || defined(KCLASS5C) || defined(KCLASS5D) || \ + defined(SLES15SP2) || defined(SLES15SP3) || defined (CENTOS7ALTARM) +#define KFEATURE_HAS_KTIME_SECONDS 1 +#define KFEATURE_HAS_SCSI_REQUEST 1 +#define KFEATURE_HAS_KTIME64 1 +#endif +#if defined(KCLASS4C) || defined(RHEL8) || defined(SLES15SP1) || \ + defined(SLES15SP2) || defined(SLES15SP3) || defined(KCLASS5A) || \ + defined(KCLASS5B) || defined(KCLASS5C) || defined(KCLASS5D) || \ + defined(SLES12SP5) || defined (CENTOS7ALTARM) +#define KFEATURE_HAS_BSG_JOB_SMP_HANDLER 1 +#endif +#if defined(RHEL8U3) +#define KFEATURE_HAS_HOST_BUSY_FUNCTION 1 +#endif + +#if defined(KCLASS3D) +#define KFEATURE_HAS_KTIME_SECONDS 1 +#endif +#if defined(KCLASS5A) || defined(KCLASS5B) || defined(KCLASS5C) || defined(KCLASS5D) || \ + defined(KCLASS4D) || defined(SLES15SP2) || defined(SLES15SP3) +#define dma_zalloc_coherent dma_alloc_coherent +#define shost_use_blk_mq(x) 1 +#define KFEATURE_HAS_USE_CLUSTERING 0 +#endif + +#if defined(KCLASS5B) || defined(KCLASS5C) || defined(KCLASS5D) || \ + defined(KCLASS4D) || defined(SLES15SP2) || defined(SLES15SP3) +#define IOCTL_INT unsigned int +#else +#define IOCTL_INT int +#endif + +#if defined(KCLASS5C) || defined(KCLASS5D) +#define KFEATURE_HAS_HOST_BUSY_FUNCTION 1 +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#define ioremap_nocache ioremap +#endif + + +#if !defined(from_timer) +#define KFEATURE_HAS_OLD_TIMER 1 +#endif + +/* default values */ +#define KFEATURE_HAS_KTIME_SECONDS 1 +#define KFEATURE_HAS_BSG_JOB_SMP_HANDLER 1 +#define KFEATURE_HAS_SCSI_SANITIZE_INQUIRY_STRING 1 +#define KFEATURE_HAS_KTIME64 1 +#define KFEATURE_HAS_DMA_ZALLOC_COHERENT 0 +#define KFEATURE_HAS_USE_CLUSTERING 0 +#define shost_use_blk_mq(x) 1 + +#if !defined(KFEATURE_HAS_WAIT_FOR_COMPLETION_IO) +#define KFEATURE_HAS_WAIT_FOR_COMPLETION_IO 1 +#endif +#if !defined(KFEATURE_HAS_2011_03_QUEUECOMMAND) +#define KFEATURE_HAS_2011_03_QUEUECOMMAND 1 +#endif +#if !defined(KFEATURE_HAS_DMA_ZALLOC_COHERENT) +#define KFEATURE_HAS_DMA_ZALLOC_COHERENT 1 +#endif +#if !defined(KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE) +#define KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE 1 +#endif +#if !defined(KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH) +#define KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH 1 +#endif +#if !defined(KFEATURE_HAS_MQ_SUPPORT) +#define KFEATURE_HAS_MQ_SUPPORT 1 +#endif +#if !defined(KFEATURE_HAS_SCSI_SANITIZE_INQUIRY_STRING) +#define KFEATURE_HAS_SCSI_SANITIZE_INQUIRY_STRING 1 +#endif +#if !defined(KFEATURE_HAS_PCIE_CAPABILITY_SUPPORT) +#define KFEATURE_HAS_PCIE_CAPABILITY_SUPPORT 1 +#endif +#if !defined(KFEATURE_HAS_NO_WRITE_SAME) +#define KFEATURE_HAS_NO_WRITE_SAME 1 +#endif +#if !defined(KFEATURE_HAS_BSG_JOB_SMP_HANDLER) +#define KFEATURE_HAS_BSG_JOB_SMP_HANDLER 0 +#endif +#if !defined(KFEATURE_HAS_HOST_BUSY_FUNCTION) +#define KFEATURE_HAS_HOST_BUSY_FUNCTION 0 +#endif +#if !defined(KFEATURE_HAS_SCSI_REQUEST) +#define KFEATURE_HAS_SCSI_REQUEST 0 +#endif +#if !defined(KFEATURE_HAS_LOCKLESS_DISPATCH_IO) +#define KFEATURE_HAS_LOCKLESS_DISPATCH_IO 0 +#endif +#if !defined(KFEATURE_HAS_USE_CLUSTERING) +#define KFEATURE_HAS_USE_CLUSTERING 1 +#define IOCTL_INT int +#else +/* for tk4 */ +#ifdef IOCTL_INT +#undef IOCTL_INT +#define IOCTL_INT unsigned int +#endif +#endif +#if !defined(KFEATURE_HAS_OLD_TIMER) +#define KFEATURE_HAS_OLD_TIMER 0 +#endif +#if !defined(KFEATURE_HAS_KTIME_SECONDS) +#define KFEATURE_HAS_KTIME_SECONDS 0 +#endif +#if !defined(KFEATURE_HAS_KTIME64) +#define KFEATURE_HAS_KTIME64 0 +#endif +#if !defined(KFEATURE_HAS_DMA_MASK_AND_COHERENT) +#define KFEATURE_HAS_DMA_MASK_AND_COHERENT 1 +#endif +#if !defined(KFEATURE_HAS_ATOMIC_HOST_BUSY) +#define KFEATURE_HAS_ATOMIC_HOST_BUSY 1 +#endif + +#if !defined(list_next_entry) +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif + +#if !defined(list_first_entry_or_null) +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#if !defined(TYPE_ZBC) +#define TYPE_ZBC 0x14 +#endif + +#if !defined(readq) +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + u32 lower32; + u32 upper32; + + lower32 = readl(addr); + upper32 = readl(addr + 4); + + return ((u64)upper32 << 32) | lower32; +} +#endif + +#if !defined(writeq) +#define writeq writeq +static inline void writeq(u64 value, volatile void __iomem *addr) +{ + u32 lower32; + u32 upper32; + + lower32 = lower_32_bits(value); + upper32 = upper_32_bits(value); + + writel(lower32, addr); + writel(upper32, addr + 4); +} +#endif + +static inline void pqi_disable_write_same(struct scsi_device *sdev) +{ +#if KFEATURE_HAS_NO_WRITE_SAME + sdev->no_write_same = 1; +#endif +} + +#if !defined(PCI_DEVICE_SUB) +#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = (subvend), .subdevice = (subdev) +#endif + +#if !defined(PCI_VENDOR_ID_HPE) +#define PCI_VENDOR_ID_HPE 0x1590 +#endif + +#if !defined(PCI_VENDOR_ID_ADVANTECH) +#define PCI_VENDOR_ID_ADVANTECH 0x13fe +#endif + +#if !defined(PCI_VENDOR_ID_FIBERHOME) +#define PCI_VENDOR_ID_FIBERHOME 0x1d8d +#endif + +#if !defined(PCI_VENDOR_ID_GIGABYTE) +#define PCI_VENDOR_ID_GIGABYTE 0x1458 +#endif + +#if !defined(PCI_VENDOR_ID_FOXCONN) +#define PCI_VENDOR_ID_FOXCONN 0x105b +#endif + +#if !defined(PCI_VENDOR_ID_HUAWEI) +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#endif + +#if !defined(PCI_VENDOR_ID_H3C) +#define PCI_VENDOR_ID_H3C 0x193d +#endif + +#if !defined(PCI_VENDOR_ID_QUANTA) +#define PCI_VENDOR_ID_QUANTA 0x152d +#endif + +#if !defined(PCI_VENDOR_ID_INSPUR) +#define PCI_VENDOR_ID_INSPUR 0x1bd4 +#endif + +#if !defined(offsetofend) +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) +#endif + +void pqi_compat_init_scsi_host_template(struct scsi_host_template *template); +void pqi_compat_init_scsi_host(struct Scsi_Host *shost, + struct pqi_ctrl_info *ctrl_info); + +#if !KFEATURE_HAS_WAIT_FOR_COMPLETION_IO + +static inline unsigned long wait_for_completion_io_timeout(struct completion *x, + unsigned long timeout) +{ + return wait_for_completion_timeout(x, timeout); +} + +static inline unsigned long wait_for_completion_io(struct completion *x) +{ + wait_for_completion(x); + return 0; +} + +#endif /* !KFEATURE_HAS_WAIT_FOR_COMPLETION_IO */ + +#if KFEATURE_HAS_2011_03_QUEUECOMMAND + +#define PQI_SCSI_QUEUE_COMMAND pqi_scsi_queue_command + +static inline void pqi_scsi_done(struct scsi_cmnd *scmd) +{ + pqi_prep_for_scsi_done(scmd); + if (scmd && scmd->scsi_done) + scmd->scsi_done(scmd); +} + +#else + +int pqi_scsi_queue_command_compat(struct scsi_cmnd *scmd, + void (*done)(struct scsi_cmnd *)); + +#define PQI_SCSI_QUEUE_COMMAND pqi_scsi_queue_command_compat + +static inline void pqi_scsi_done(struct scsi_cmnd *scmd) +{ + void (*scsi_done)(struct scsi_cmnd *); + + pqi_prep_for_scsi_done(scmd); + if (scmd) { + scsi_done = (void(*)(struct scsi_cmnd *))scmd->SCp.ptr; + scsi_done(scmd); + } +} + +#endif /* KFEATURE_HAS_2011_03_QUEUECOMMAND */ + +#if !KFEATURE_HAS_DMA_ZALLOC_COHERENT + +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); + return ret; +} + +#endif /* !KFEATURE_HAS_DMA_ZALLOC_COHERENT */ + +#if !KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE + +int pci_enable_msix_range(struct pci_dev *pci_dev, struct msix_entry *entries, + int minvec, int maxvec); + +#endif /* !KFEATURE_HAS_PCI_ENABLE_MSIX_RANGE */ + +#if !KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH + +int scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth); + +#endif /* !KFEATURE_HAS_SCSI_CHANGE_QUEUE_DEPTH */ + +#if !KFEATURE_HAS_SCSI_SANITIZE_INQUIRY_STRING + +void scsi_sanitize_inquiry_string(unsigned char *s, int len); + +#endif /* !KFEATURE_HAS_SCSI_SANITIZE_INQUIRY_STRING */ + +#if !KFEATURE_HAS_PCIE_CAPABILITY_SUPPORT + +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); + +#endif /* !KFEATURE_HAS_PCIE_CAPABILITY_SUPPORT */ + +static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, + struct scsi_cmnd *scmd) +{ + u16 hw_queue; + +#if KFEATURE_HAS_MQ_SUPPORT + if (shost_use_blk_mq(scmd->device->host)) + hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); + else + hw_queue = smp_processor_id(); +#else + hw_queue = smp_processor_id(); +#endif + if (hw_queue > ctrl_info->max_hw_queue_index) + hw_queue = 0; + + return hw_queue; +} + +#ifdef KFEATURE_NEEDS_BLK_RQ_IS_PASSTHROUGH + +static inline bool blk_rq_is_passthrough(struct request *rq) +{ + return rq->cmd_type != REQ_TYPE_FS; +} + +#endif /* KFEATURE_NEEDS_BLK_RQ_IS_PASSTHROUGH */ + +#if !KFEATURE_HAS_BSG_JOB_SMP_HANDLER + +int pqi_sas_smp_handler_compat(struct Scsi_Host *shost, struct sas_rphy *rphy, + struct request *req); + +void pqi_bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len); + +#define PQI_SAS_SMP_HANDLER pqi_sas_smp_handler_compat + +#else + +#define PQI_SAS_SMP_HANDLER pqi_sas_smp_handler + +static inline void pqi_bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len) +{ + bsg_job_done(job, result, reply_payload_rcv_len); +} + +#endif /* !KFEATURE_HAS_BSG_JOB_SMP_HANDLER */ + +#if KFEATURE_HAS_OLD_TIMER +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#if !defined(TIMER_DATA_TYPE) +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) +#endif + +static inline void timer_setup (struct timer_list *timer, + void (*func) (struct timer_list *), unsigned long data) +{ + init_timer(timer); + timer->function = (TIMER_FUNC_TYPE) func; + timer->data = (unsigned long) timer; +} +#endif /* KFEATURE_HAS_OLD_TIMER */ + +#if !KFEATURE_HAS_KTIME64 +#define time64_to_tm time_to_tm +#endif + +#if !KFEATURE_HAS_KTIME_SECONDS +static inline unsigned long ktime_get_real_seconds(void) +{ + ktime_t tv; + struct timeval time; + + tv = ktime_get_real(); + time = ktime_to_timeval(tv); + + return time.tv_sec; +} +#endif + +#if !KFEATURE_HAS_DMA_MASK_AND_COHERENT + +static inline int pqi_dma_set_mask_and_coherent(struct device *device, u64 mask) +{ + return dma_set_mask(device, mask); +} + +#else + +static inline int pqi_dma_set_mask_and_coherent(struct device *device, u64 mask) +{ + return dma_set_mask_and_coherent(device, mask); +} + +#endif /* !KFEATURE_HAS_DMA_MASK_AND_COHERENT */ + +static inline bool pqi_scsi_host_busy(struct Scsi_Host *shost) +{ +#if KFEATURE_HAS_HOST_BUSY_FUNCTION + return scsi_host_busy(shost); +#else +#if KFEATURE_HAS_ATOMIC_HOST_BUSY + return atomic_read(&shost->host_busy) > 0; +#else + return shost->host_busy > 0; +#endif +#endif +} + +#endif /* _SMARTPQI_KERNEL_COMPAT_H */ diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index 6776dfc1d317..1d8b09b9ccb1 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -1,10 +1,18 @@ -// SPDX-License-Identifier: GPL-2.0 /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * * Questions/Comments/Bugfixes to storagedev@microchip.com * */ @@ -16,6 +24,7 @@ #include <scsi/scsi_transport_sas.h> #include <asm/unaligned.h> #include "smartpqi.h" +#include "smartpqi_kernel_compat.h" static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port) { @@ -45,9 +54,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy) struct sas_phy *phy = pqi_sas_phy->phy; sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (pqi_sas_phy->added_to_port) list_del(&pqi_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(pqi_sas_phy); } @@ -65,8 +74,8 @@ static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy) memset(identify, 0, sizeof(*identify)); identify->sas_address = pqi_sas_port->sas_address; identify->device_type = SAS_END_DEVICE; - identify->initiator_port_protocols = SAS_PROTOCOL_STP; - identify->target_port_protocols = SAS_PROTOCOL_STP; + identify->initiator_port_protocols = SAS_PROTOCOL_ALL; + identify->target_port_protocols = SAS_PROTOCOL_ALL; phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; @@ -92,14 +101,25 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port, identify = &rphy->identify; identify->sas_address = pqi_sas_port->sas_address; + identify->phy_identifier = pqi_sas_port->device->phy_id; - if (pqi_sas_port->device && - pqi_sas_port->device->is_expander_smp_device) { - identify->initiator_port_protocols = SAS_PROTOCOL_SMP; - identify->target_port_protocols = SAS_PROTOCOL_SMP; - } else { - identify->initiator_port_protocols = SAS_PROTOCOL_STP; - identify->target_port_protocols = SAS_PROTOCOL_STP; + identify->initiator_port_protocols = SAS_PROTOCOL_ALL; + identify->target_port_protocols = SAS_PROTOCOL_STP; + + if (pqi_sas_port->device) { + switch (pqi_sas_port->device->device_type) { + case SA_DEVICE_TYPE_SAS: + case SA_DEVICE_TYPE_SES: + case SA_DEVICE_TYPE_NVME: + identify->target_port_protocols = SAS_PROTOCOL_SSP; + break; + case SA_DEVICE_TYPE_EXPANDER_SMP: + identify->target_port_protocols = SAS_PROTOCOL_SMP; + break; + case SA_DEVICE_TYPE_SATA: + default: + break; + } } return sas_rphy_add(rphy); @@ -107,8 +127,7 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port, static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port) { - if (pqi_sas_port->device && - pqi_sas_port->device->is_expander_smp_device) + if (pqi_sas_port->device && pqi_sas_port->device->is_expander_smp_device) return sas_expander_alloc(pqi_sas_port->port, SAS_FANOUT_EXPANDER_DEVICE); @@ -161,7 +180,7 @@ static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port) list_for_each_entry_safe(pqi_sas_phy, next, &pqi_sas_port->phy_list_head, phy_list_entry) - pqi_free_sas_phy(pqi_sas_phy); + pqi_free_sas_phy(pqi_sas_phy); sas_port_delete(pqi_sas_port->port); list_del(&pqi_sas_port->port_list_entry); @@ -191,7 +210,7 @@ static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node) list_for_each_entry_safe(pqi_sas_port, next, &pqi_sas_node->port_list_head, port_list_entry) - pqi_free_sas_port(pqi_sas_port); + pqi_free_sas_port(pqi_sas_port); kfree(pqi_sas_node); } @@ -312,7 +331,6 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy) static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { - int rc; unsigned long flags; struct Scsi_Host *shost; @@ -361,7 +379,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy, } } - if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) { + if (found_device->phy_connected_dev_type != SA_DEVICE_TYPE_CONTROLLER) { rc = -EINVAL; goto out; } @@ -382,12 +400,10 @@ out: spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return rc; - } static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy) { - int rc; unsigned long flags; struct pqi_ctrl_info *ctrl_info; @@ -482,7 +498,6 @@ pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy, req_size -= SMP_CRC_FIELD_LENGTH; put_unaligned_le32(req_size, ¶meters->request_length); - put_unaligned_le32(resp_size, ¶meters->response_length); sg_copy_to_buffer(job->request_payload.sg_list, @@ -502,7 +517,7 @@ static unsigned int pqi_build_sas_smp_handler_reply( job->reply_len = le16_to_cpu(error_info->sense_data_length); memcpy(job->reply, error_info->data, - le16_to_cpu(error_info->sense_data_length)); + le16_to_cpu(error_info->sense_data_length)); return job->reply_payload.payload_len - get_unaligned_le32(&error_info->data_in_transferred); @@ -512,12 +527,12 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { int rc; - struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + struct pqi_ctrl_info *ctrl_info; struct bmic_csmi_smp_passthru_buffer *smp_buf; struct pqi_raid_error_info error_info; unsigned int reslen = 0; - pqi_ctrl_busy(ctrl_info); + ctrl_info = shost_to_hba(shost); if (job->reply_payload.payload_len == 0) { rc = -ENOMEM; @@ -539,16 +554,6 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, goto out; } - if (pqi_ctrl_offline(ctrl_info)) { - rc = -ENXIO; - goto out; - } - - if (pqi_ctrl_blocked(ctrl_info)) { - rc = -EBUSY; - goto out; - } - smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job); if (!smp_buf) { rc = -ENOMEM; @@ -561,10 +566,11 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, goto out; reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info); + out: - bsg_job_done(job, rc, reslen); - pqi_ctrl_unbusy(ctrl_info); + pqi_bsg_job_done(job, rc, reslen); } + struct sas_function_template pqi_sas_transport_functions = { .get_linkerrors = pqi_sas_get_linkerrors, .get_enclosure_identifier = pqi_sas_get_enclosure_identifier, @@ -574,5 +580,6 @@ struct sas_function_template pqi_sas_transport_functions = { .phy_setup = pqi_sas_phy_setup, .phy_release = pqi_sas_phy_release, .set_phy_speed = pqi_sas_phy_speed, - .smp_handler = pqi_sas_smp_handler, + .smp_handler = PQI_SAS_SMP_HANDLER, }; + diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index f0d6e88ba2c1..be702bfc71f2 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -1,10 +1,18 @@ -// SPDX-License-Identifier: GPL-2.0 /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * * Questions/Comments/Bugfixes to storagedev@microchip.com * */ @@ -53,7 +61,11 @@ #define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_PANIC 0x100 +#if TORTUGA +#define SIS_CTRL_READY_TIMEOUT_SECS 150 +#else #define SIS_CTRL_READY_TIMEOUT_SECS 180 +#endif #define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10 @@ -71,7 +83,7 @@ struct sis_base_struct { /* error response data */ __le32 error_buffer_element_length; /* length of each PQI error */ /* response buffer element */ - /* in bytes */ + /* in bytes */ __le32 error_buffer_num_elements; /* total number of PQI error */ /* response buffers available */ }; @@ -146,7 +158,12 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info) bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info) { return readl(&ctrl_info->registers->sis_firmware_status) & - SIS_CTRL_KERNEL_UP; + SIS_CTRL_KERNEL_UP; +} + +u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info) +{ + return readl(&ctrl_info->registers->sis_product_identifier); } /* used for passing command parameters/results when issuing SIS commands */ @@ -361,7 +378,7 @@ static int sis_wait_for_doorbell_bit_to_clear( rc = -ETIMEDOUT; break; } - usleep_range(1000, 2000); + msleep(1); } return rc; @@ -436,3 +453,4 @@ static void __attribute__((unused)) verify_structures(void) error_buffer_num_elements) != 0x14); BUILD_BUG_ON(sizeof(struct sis_base_struct) != 0x18); } + diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 86b0e484d921..e53032ab46df 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -1,10 +1,18 @@ -/* SPDX-License-Identifier: GPL-2.0 */ /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * * Questions/Comments/Bugfixes to storagedev@microchip.com * */ @@ -27,5 +35,6 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); +u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info); #endif /* _SMARTPQI_SIS_H */ diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index a85d52b5dc32..0521575c6773 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -59,6 +59,7 @@ static int snirm710_probe(struct platform_device *dev) struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; struct resource *res; + int rc; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) @@ -84,7 +85,9 @@ static int snirm710_probe(struct platform_device *dev) goto out_kfree; host->this_id = 7; host->base = base; - host->irq = platform_get_irq(dev, 0); + host->irq = rc = platform_get_irq(dev, 0); + if (rc < 0) + goto out_put_host; if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) { printk(KERN_ERR "snirm710: request_irq failed!\n"); goto out_put_host; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 4664fdf75c0f..70a28f6fb1d0 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -750,7 +750,7 @@ static int sr_probe(struct device *dev) cd->cdi.disk = disk; if (register_cdrom(&cd->cdi)) - goto fail_put; + goto fail_minor; /* * Initialize block layer runtime PM stuffs before the @@ -768,6 +768,10 @@ static int sr_probe(struct device *dev) return 0; +fail_minor: + spin_lock(&sr_index_lock); + clear_bit(minor, sr_index_bits); + spin_unlock(&sr_index_lock); fail_put: put_disk(disk); fail_free: diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index e3b0ce25162b..b9db2ec6d036 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -66,9 +66,6 @@ void sr_vendor_init(Scsi_CD *cd) { -#ifndef CONFIG_BLK_DEV_SR_VENDOR - cd->vendor = VENDOR_SCSI3; -#else const char *vendor = cd->device->vendor; const char *model = cd->device->model; @@ -100,7 +97,6 @@ void sr_vendor_init(Scsi_CD *cd) cd->vendor = VENDOR_TOSHIBA; } -#endif } @@ -114,10 +110,8 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) struct ccs_modesel_head *modesel; int rc, density = 0; -#ifdef CONFIG_BLK_DEV_SR_VENDOR if (cd->vendor == VENDOR_TOSHIBA) density = (blocklength > 2048) ? 0x81 : 0x83; -#endif buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); if (!buffer) @@ -205,7 +199,6 @@ int sr_cd_check(struct cdrom_device_info *cdi) } break; -#ifdef CONFIG_BLK_DEV_SR_VENDOR case VENDOR_NEC:{ unsigned long min, sec, frame; cgc.cmd[0] = 0xde; @@ -298,7 +291,6 @@ int sr_cd_check(struct cdrom_device_info *cdi) sector = buffer[11] + (buffer[10] << 8) + (buffer[9] << 16) + (buffer[8] << 24); break; -#endif /* CONFIG_BLK_DEV_SR_VENDOR */ default: /* should not happen */ diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index e3266a64a477..2121e44c342f 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1267,8 +1267,8 @@ static int st_open(struct inode *inode, struct file *filp) spin_lock(&st_use_lock); if (STp->in_use) { spin_unlock(&st_use_lock); - scsi_tape_put(STp); DEBC_printk(STp, "Device already in use.\n"); + scsi_tape_put(STp); return (-EBUSY); } diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 440a73eae647..f8b1e5748440 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!esp->command_block) goto fail_unmap_regs_dma; - host->irq = platform_get_irq(dev, 0); + host->irq = err = platform_get_irq(dev, 0); + if (err < 0) + goto fail_unmap_command_block; err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "SUN3X ESP", esp); if (err < 0) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 411ef60b2c14..4f066e3b19af 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1492,9 +1492,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) */ } mask <<= offset; - - pm_runtime_get_sync(host->hba->dev); - ufshcd_hold(host->hba, false); ufshcd_rmwl(host->hba, TEST_BUS_SEL, (u32)host->testbus.select_major << 19, REG_UFS_CFG1); @@ -1507,8 +1504,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) * committed before returning. */ mb(); - ufshcd_release(host->hba); - pm_runtime_put_sync(host->hba->dev); return 0; } @@ -1546,11 +1541,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) /* sleep a bit intermittently as we are dumping too much data */ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_testbus_read(hba); - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_print_unipro_testbus(hba); - usleep_range(1000, 1100); + udelay(1000); } /** diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index d2197a31abe5..bad366e49159 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -106,8 +106,10 @@ static int ufs_bsg_request(struct bsg_job *job) desc_op = bsg_request->upiu_req.qr.opcode; ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, &desc_len, desc_op); - if (ret) + if (ret) { + pm_runtime_put_sync(hba->dev); goto out; + } /* fall through */ case UPIU_TRANSACTION_NOP_OUT: diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index fe6cad9b2a0d..03985919150b 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -12,6 +12,7 @@ #define UFS_ANY_VENDOR 0xFFFF #define UFS_ANY_MODEL "ANY_MODEL" +#define UFS_VENDOR_MICRON 0x12C #define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_SAMSUNG 0x1CE #define UFS_VENDOR_SKHYNIX 0x1AD diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 3b19de3ae9a3..e4ba2445ff56 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -96,6 +96,30 @@ static int ufshcd_pci_resume(struct device *dev) { return ufshcd_system_resume(dev_get_drvdata(dev)); } + +/** + * ufshcd_pci_poweroff - suspend-to-disk poweroff function + * @dev: pointer to PCI device handle + * + * Returns 0 if successful + * Returns non-zero otherwise + */ +static int ufshcd_pci_poweroff(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int spm_lvl = hba->spm_lvl; + int ret; + + /* + * For poweroff we need to set the UFS device to PowerDown mode. + * Force spm_lvl to ensure that. + */ + hba->spm_lvl = 5; + ret = ufshcd_system_suspend(hba); + hba->spm_lvl = spm_lvl; + return ret; +} + #endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_PM @@ -190,8 +214,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } static const struct dev_pm_ops ufshcd_pci_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, - ufshcd_pci_resume) +#ifdef CONFIG_PM_SLEEP + .suspend = ufshcd_pci_suspend, + .resume = ufshcd_pci_resume, + .freeze = ufshcd_pci_suspend, + .thaw = ufshcd_pci_resume, + .poweroff = ufshcd_pci_poweroff, + .restore = ufshcd_pci_resume, +#endif SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend, ufshcd_pci_runtime_resume, ufshcd_pci_runtime_idle) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d9ea0ae4f374..25112c2fe2db 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -217,6 +217,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, @@ -237,7 +239,7 @@ static struct ufs_dev_fix ufs_fixups[] = { END_FIX }; -static void ufshcd_tmc_handler(struct ufs_hba *hba); +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); @@ -332,27 +334,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, u8 opcode = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; int transfer_len = -1; if (!trace_ufshcd_command_enabled()) { /* trace UPIU W/O tracing command */ - if (lrbp->cmd) + if (cmd) ufshcd_add_cmd_upiu_trace(hba, tag, str); return; } - if (lrbp->cmd) { /* data phase exists */ + if (cmd) { /* data phase exists */ /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str); - opcode = (u8)(*lrbp->cmd->cmnd); + opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* * Currently we only fully trace read(10) and write(10) * commands */ - if (lrbp->cmd->request && lrbp->cmd->request->bio) - lba = - lrbp->cmd->request->bio->bi_iter.bi_sector; + if (cmd->request && cmd->request->bio) + lba = cmd->request->bio->bi_iter.bi_sector; transfer_len = be32_to_cpu( lrbp->ucd_req_ptr->sc.exp_data_transfer_len); } @@ -494,8 +496,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) static void ufshcd_print_host_state(struct ufs_hba *hba) { dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", - hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", + hba->outstanding_reqs, hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -642,40 +644,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; } -/** - * ufshcd_get_tm_free_slot - get a free slot for task management request - * @hba: per adapter instance - * @free_slot: pointer to variable with available slot value - * - * Get a free tag and lock it until ufshcd_put_tm_slot() is called. - * Returns 0 if free slot is not available, else return 1 with tag value - * in @free_slot. - */ -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) -{ - int tag; - bool ret = false; - - if (!free_slot) - goto out; - - do { - tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); - if (tag >= hba->nutmrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); - - *free_slot = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) -{ - clear_bit_unlock(slot, &hba->tm_slots_in_use); -} - /** * ufshcd_utrl_clear - Clear a bit in UTRLCLR register * @hba: per adapter instance @@ -1255,8 +1223,15 @@ static int ufshcd_devfreq_target(struct device *dev, } spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + pm_runtime_get_noresume(hba->dev); + if (!pm_runtime_active(hba->dev)) { + pm_runtime_put_noidle(hba->dev); + ret = -EAGAIN; + goto out; + } start = ktime_get(); ret = ufshcd_devfreq_scale(hba, scale_up); + pm_runtime_put(hba->dev); trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), (scale_up ? "up" : "down"), @@ -1270,6 +1245,24 @@ out: return ret; } +static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved) +{ + int *busy = priv; + + WARN_ON_ONCE(reserved); + (*busy)++; + return false; +} + +/* Whether or not any tag is in use by a request that is in progress. */ +static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) +{ + struct request_queue *q = hba->cmd_queue; + int busy = 0; + + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy); + return busy; +} static int ufshcd_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) @@ -1514,6 +1507,7 @@ unblock_reqs: int ufshcd_hold(struct ufs_hba *hba, bool async) { int rc = 0; + bool flush_result; unsigned long flags; if (!ufshcd_is_clkgating_allowed(hba)) @@ -1539,8 +1533,15 @@ start: */ if (ufshcd_can_hibern8_during_gating(hba) && ufshcd_is_link_hibern8(hba)) { + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } spin_unlock_irqrestore(hba->host->host_lock, flags); - flush_work(&hba->clk_gating.ungate_work); + flush_result = flush_work(&hba->clk_gating.ungate_work); + if (hba->clk_gating.is_suspended && !flush_result) + goto out; spin_lock_irqsave(hba->host->host_lock, flags); goto start; } @@ -1559,12 +1560,12 @@ start: */ /* fallthrough */ case CLKS_OFF: - ufshcd_scsi_block_requests(hba); hba->clk_gating.state = REQ_CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - queue_work(hba->clk_gating.clk_gating_workq, - &hba->clk_gating.ungate_work); + if (queue_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.ungate_work)) + ufshcd_scsi_block_requests(hba); /* * fall through to check if we should wait for this * work to be done or not. @@ -1616,7 +1617,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.active_reqs || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done) goto rel_lock; @@ -1670,7 +1671,7 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done || ufshcd_eh_in_progress(hba)) return; @@ -1878,12 +1879,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); - ufshcd_add_command_trace(hba, task_tag, "send"); } /** @@ -2440,22 +2441,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba->req_abort_count = 0; - /* acquire the tag to make sure device cmds don't use it */ - if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { - /* - * Dev manage command in progress, requeue the command. - * Requeuing the command helps in cases where the request *may* - * find different tag instead of waiting for dev manage command - * completion. - */ - err = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - err = ufshcd_hold(hba, true); if (err) { err = SCSI_MLQUEUE_HOST_BUSY; - clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } WARN_ON(hba->clk_gating.state != CLKS_ON); @@ -2475,8 +2463,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_map_sg(hba, lrbp); if (err) { + ufshcd_release(hba); lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } /* Make sure descriptors are ready before ringing the doorbell */ @@ -2623,44 +2611,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, return err; } -/** - * ufshcd_get_dev_cmd_tag - Get device management command tag - * @hba: per-adapter instance - * @tag_out: pointer to variable with available slot value - * - * Get a free slot and lock it until device management command - * completes. - * - * Returns false if free slot is unavailable for locking, else - * return true with tag value in @tag. - */ -static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) -{ - int tag; - bool ret = false; - unsigned long tmp; - - if (!tag_out) - goto out; - - do { - tmp = ~hba->lrb_in_use; - tag = find_last_bit(&tmp, hba->nutrs); - if (tag >= hba->nutrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); - - *tag_out = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) -{ - clear_bit_unlock(tag, &hba->lrb_in_use); -} - /** * ufshcd_exec_dev_cmd - API for sending device management requests * @hba: UFS hba @@ -2673,6 +2623,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err; int tag; @@ -2686,7 +2638,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by SCSI request timeout. */ - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -2711,8 +2669,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, err ? "query_complete_err" : "query_complete"); out_put_tag: - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -3575,7 +3533,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba) ret = ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) dev_err(hba->dev, - "dme-reset: error code %d\n", ret); + "dme-enable: error code %d\n", ret); return ret; } @@ -4797,19 +4755,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * ufshcd_uic_cmd_compl - handle completion of uic command * @hba: per adapter instance * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { hba->active_uic_cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); hba->active_uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); complete(&hba->active_uic_cmd->done); + retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { complete(hba->uic_async_done); + retval = IRQ_HANDLED; + } + return retval; } /** @@ -4835,7 +4803,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, cmd->result = result; /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; - clear_bit_unlock(index, &hba->lrb_in_use); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); @@ -4857,16 +4824,17 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, hba->outstanding_reqs ^= completed_reqs; ufshcd_clk_scaling_update_busy(hba); - - /* we might have free'd some tags above */ - wake_up(&hba->dev_cmd.tag_wq); } /** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { unsigned long completed_reqs; u32 tr_doorbell; @@ -4885,7 +4853,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - __ufshcd_transfer_req_compl(hba, completed_reqs); + if (completed_reqs) { + __ufshcd_transfer_req_compl(hba, completed_reqs); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } } /** @@ -5095,7 +5068,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); - hba->urgent_bkops_lvl = curr_status; out: return err; } @@ -5407,67 +5379,84 @@ out: /** * ufshcd_update_uic_error - check and set fatal UIC error flags. * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_update_uic_error(struct ufs_hba *hba) +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) { u32 reg; + irqreturn_t retval = IRQ_NONE; /* PHY layer lane error */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); /* Ignore LINERESET indication, as this is not an error */ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && - (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { + (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { /* * To know whether this error is fatal or not, DB timeout * must be checked but this error is handled separately. */ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg); + retval |= IRQ_HANDLED; } /* PA_INIT_ERROR is fatal and needs UIC reset */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); - if (reg) + if ((reg & UIC_DATA_LINK_LAYER_ERROR) && + (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg); - if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) - hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; - else if (hba->dev_quirks & - UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { - if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) - hba->uic_error |= - UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; - else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) - hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } + retval |= IRQ_HANDLED; } /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); - if (reg) { + if ((reg & UIC_NETWORK_LAYER_ERROR) && + (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg); hba->uic_error |= UFSHCD_UIC_NL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); - if (reg) { + if ((reg & UIC_TRANSPORT_LAYER_ERROR) && + (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg); hba->uic_error |= UFSHCD_UIC_TL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); - if (reg) { + if ((reg & UIC_DME_ERROR) && + (reg & UIC_DME_ERROR_CODE_MASK)) { ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg); hba->uic_error |= UFSHCD_UIC_DME_ERROR; + retval |= IRQ_HANDLED; } dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", __func__, hba->uic_error); + return retval; } static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, u32 intr_mask) { - if (!ufshcd_is_auto_hibern8_supported(hba)) + if (!ufshcd_is_auto_hibern8_supported(hba) || + !ufshcd_is_auto_hibern8_enabled(hba)) return false; if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) @@ -5484,10 +5473,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, /** * ufshcd_check_errors - Check for errors that need s/w attention * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_check_errors(struct ufs_hba *hba) +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) { bool queue_eh_work = false; + irqreturn_t retval = IRQ_NONE; if (hba->errors & INT_FATAL_ERRORS) { ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors); @@ -5496,7 +5490,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) if (hba->errors & UIC_ERROR) { hba->uic_error = 0; - ufshcd_update_uic_error(hba); + retval = ufshcd_update_uic_error(hba); if (hba->uic_error) queue_eh_work = true; } @@ -5544,6 +5538,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) } schedule_work(&hba->eh_work); } + retval |= IRQ_HANDLED; } /* * if (!queue_eh_work) - @@ -5551,44 +5546,81 @@ static void ufshcd_check_errors(struct ufs_hba *hba) * itself without s/w intervention or errors that will be * handled by the SCSI core layer. */ + return retval; +} + +struct ctm_info { + struct ufs_hba *hba; + unsigned long pending; + unsigned int ncpl; +}; + +static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved) +{ + struct ctm_info *const ci = priv; + struct completion *c; + + WARN_ON_ONCE(reserved); + if (test_bit(req->tag, &ci->pending)) + return true; + ci->ncpl++; + c = req->end_io_data; + if (c) + complete(c); + return true; } /** * ufshcd_tmc_handler - handle task management function completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_tmc_handler(struct ufs_hba *hba) +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) { - u32 tm_doorbell; + struct request_queue *q = hba->tmf_queue; + struct ctm_info ci = { + .hba = hba, + .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL), + }; - tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; - wake_up(&hba->tm_wq); + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); + return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; } /** * ufshcd_sl_intr - Interrupt service routine * @hba: per adapter instance * @intr_status: contains interrupts generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + hba->errors = UFSHCD_ERROR_MASK & intr_status; if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); if (hba->errors) - ufshcd_check_errors(hba); + retval |= ufshcd_check_errors(hba); if (intr_status & UFSHCD_UIC_MASK) - ufshcd_uic_cmd_compl(hba, intr_status); + retval |= ufshcd_uic_cmd_compl(hba, intr_status); if (intr_status & UTP_TASK_REQ_COMPL) - ufshcd_tmc_handler(hba); + retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - ufshcd_transfer_req_compl(hba); + retval |= ufshcd_transfer_req_compl(hba); + + return retval; } /** @@ -5596,12 +5628,13 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * @irq: irq number * @__hba: pointer to adapter instance * - * Returns IRQ_HANDLED - If interrupt is valid - * IRQ_NONE - If invalid interrupt + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status; + u32 intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; @@ -5615,18 +5648,22 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) * read, make sure we handle them by checking the interrupt status * again in a loop until we process all of the reqs before returning. */ - do { + while (intr_status && retries--) { enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - if (enabled_intr_status) { - ufshcd_sl_intr(hba, enabled_intr_status); - retval = IRQ_HANDLED; - } + if (enabled_intr_status) + retval |= ufshcd_sl_intr(hba, enabled_intr_status); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - } while (intr_status && --retries); + } + + if (retval == IRQ_NONE) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", + __func__, intr_status); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + } spin_unlock(hba->host->host_lock); return retval; @@ -5656,33 +5693,36 @@ out: static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, struct utp_task_req_desc *treq, u8 tm_function) { + struct request_queue *q = hba->tmf_queue; struct Scsi_Host *host = hba->host; + DECLARE_COMPLETION_ONSTACK(wait); + struct request *req; unsigned long flags; - int free_slot, task_tag, err; + int task_tag, err; /* - * Get free slot, sleep if slots are unavailable. - * Even though we use wait_event() which sleeps indefinitely, - * the maximum wait time is bounded by %TM_CMD_TIMEOUT. + * blk_get_request() is used here only to get a free tag. */ - wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED); + req->end_io_data = &wait; ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); - task_tag = hba->nutrs + free_slot; + blk_mq_start_request(req); + task_tag = req->tag; treq->req_header.dword_0 |= cpu_to_be32(task_tag); - memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); - ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); /* send command to the controller */ - __set_bit(free_slot, &hba->outstanding_tasks); + __set_bit(task_tag, &hba->outstanding_tasks); /* Make sure descriptors are ready before ringing the task doorbell */ wmb(); - ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); @@ -5691,33 +5731,35 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); /* wait until the task management command is completed */ - err = wait_event_timeout(hba->tm_wq, - test_bit(free_slot, &hba->tm_condition), + err = wait_for_completion_io_timeout(&wait, msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { + /* + * Make sure that ufshcd_compl_tm() does not trigger a + * use-after-free. + */ + req->end_io_data = NULL; ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); - if (ufshcd_clear_tm_cmd(hba, free_slot)) - dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", - __func__, free_slot); + if (ufshcd_clear_tm_cmd(hba, task_tag)) + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", + __func__, task_tag); err = -ETIMEDOUT; } else { err = 0; - memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); } spin_lock_irqsave(hba->host->host_lock, flags); - __clear_bit(free_slot, &hba->outstanding_tasks); + __clear_bit(task_tag, &hba->outstanding_tasks); spin_unlock_irqrestore(hba->host->host_lock, flags); - clear_bit(free_slot, &hba->tm_condition); - ufshcd_put_tm_slot(hba, free_slot); - wake_up(&hba->tm_tag_wq); - ufshcd_release(hba); + blk_put_request(req); + return err; } @@ -5791,6 +5833,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, int cmd_type, enum query_opcode desc_op) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err = 0; int tag; @@ -5800,7 +5844,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, down_read(&hba->clk_scaling_lock); - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -5874,8 +5924,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, } } - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -5962,19 +6012,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct Scsi_Host *host; struct ufs_hba *hba; - unsigned int tag; u32 pos; int err; - u8 resp = 0xF; - struct ufshcd_lrb *lrbp; + u8 resp = 0xF, lun; unsigned long flags; host = cmd->device->host; hba = shost_priv(host); - tag = cmd->request->tag; - lrbp = &hba->lrb[tag]; - err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); + lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { if (!err) err = resp; @@ -5983,7 +6030,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) /* clear the commands that were pending for corresponding LUN */ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[pos].lun == lrbp->lun) { + if (hba->lrb[pos].lun == lun) { err = ufshcd_clear_cmd(hba, pos); if (err) break; @@ -6129,7 +6176,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) /* command completed already */ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", __func__, tag); - goto out; + goto cleanup; } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", @@ -6163,6 +6210,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto out; } +cleanup: scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); @@ -6170,9 +6218,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); - clear_bit_unlock(tag, &hba->lrb_in_use); - wake_up(&hba->dev_cmd.tag_wq); - out: if (!err) { err = SUCCESS; @@ -8148,11 +8193,7 @@ int ufshcd_shutdown(struct ufs_hba *hba) if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) goto out; - if (pm_runtime_suspended(hba->dev)) { - ret = ufshcd_runtime_resume(hba); - if (ret) - goto out; - } + pm_runtime_get_sync(hba->dev); ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); out: @@ -8172,6 +8213,9 @@ void ufshcd_remove(struct ufs_hba *hba) { ufs_bsg_remove(hba); ufs_sysfs_remove_nodes(hba->dev); + blk_cleanup_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); + blk_cleanup_queue(hba->cmd_queue); scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); @@ -8250,6 +8294,18 @@ out_error: } EXPORT_SYMBOL(ufshcd_alloc_host); +/* This function exists because blk_mq_alloc_tag_set() requires this. */ +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) +{ + WARN_ON_ONCE(true); + return BLK_STS_NOTSUPP; +} + +static const struct blk_mq_ops ufshcd_tmf_ops = { + .queue_rq = ufshcd_queue_tmf, +}; + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -8319,10 +8375,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->max_pwr_info.is_valid = false; - /* Initailize wait queue for task management */ - init_waitqueue_head(&hba->tm_wq); - init_waitqueue_head(&hba->tm_tag_wq); - /* Initialize work queues */ INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); @@ -8335,9 +8387,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_rwsem(&hba->clk_scaling_lock); - /* Initialize device management tag acquire wait queue */ - init_waitqueue_head(&hba->dev_cmd.tag_wq); - ufshcd_init_clk_gating(hba); ufshcd_init_clk_scaling(hba); @@ -8371,6 +8420,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto exit_gating; } + hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); + if (IS_ERR(hba->cmd_queue)) { + err = PTR_ERR(hba->cmd_queue); + goto out_remove_scsi_host; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto free_cmd_queue; + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + /* Reset the attached device */ ufshcd_vops_device_reset(hba); @@ -8380,7 +8450,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_host_regs(hba); ufshcd_print_host_state(hba); - goto out_remove_scsi_host; + goto free_tmf_queue; } /* @@ -8417,6 +8487,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) return 0; +free_tmf_queue: + blk_cleanup_queue(hba->tmf_queue); +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); +free_cmd_queue: + blk_cleanup_queue(hba->cmd_queue); out_remove_scsi_host: scsi_remove_host(hba->host); exit_gating: diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 5260e594e0b9..92ef6e6a3e51 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -55,6 +55,7 @@ #include <linux/clk.h> #include <linux/completion.h> #include <linux/regulator/consumer.h> +#include <linux/bitfield.h> #include "unipro.h" #include <asm/irq.h> @@ -212,13 +213,11 @@ struct ufs_query { * @type: device management command type - Query, NOP OUT * @lock: lock to allow one command at a time * @complete: internal commands completion - * @tag_wq: wait queue until free command slot is available */ struct ufs_dev_cmd { enum dev_cmd_type type; struct mutex lock; struct completion *complete; - wait_queue_head_t tag_wq; struct ufs_query query; }; @@ -483,7 +482,7 @@ struct ufs_stats { * @host: Scsi_Host instance of the driver * @dev: device handle * @lrb: local reference block - * @lrb_in_use: lrb in use + * @cmd_queue: Used to allocate command tags from hba->host->tag_set. * @outstanding_tasks: Bits representing outstanding task requests * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities @@ -495,11 +494,9 @@ struct ufs_stats { * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command - * @tm_wq: wait queue for task management - * @tm_tag_wq: wait queue for free task management slots - * @tm_slots_in_use: bit map of task management request slots in use + * @tmf_tag_set: TMF tag set. + * @tmf_queue: Used to allocate TMF tags. * @pwr_done: completion for power mode change - * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits @@ -542,6 +539,7 @@ struct ufs_hba { struct Scsi_Host *host; struct device *dev; + struct request_queue *cmd_queue; /* * This field is to keep a reference to "scsi_device" corresponding to * "UFS device" W-LU. @@ -562,7 +560,6 @@ struct ufs_hba { u32 ahit; struct ufshcd_lrb *lrb; - unsigned long lrb_in_use; unsigned long outstanding_tasks; unsigned long outstanding_reqs; @@ -644,10 +641,8 @@ struct ufs_hba { /* Device deviations from standard UFS device spec. */ unsigned int dev_quirks; - wait_queue_head_t tm_wq; - wait_queue_head_t tm_tag_wq; - unsigned long tm_condition; - unsigned long tm_slots_in_use; + struct blk_mq_tag_set tmf_tag_set; + struct request_queue *tmf_queue; struct uic_command *active_uic_cmd; struct mutex uic_cmd_mutex; @@ -771,6 +766,11 @@ static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT); } +static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) +{ + return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false; +} + #define ufshcd_writel(hba, val, reg) \ writel((val), (hba)->mmio_base + (reg)) #define ufshcd_readl(hba, reg) \ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index dbb75cd28dc8..c2961d37cc1c 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -195,7 +195,7 @@ enum { /* UECDL - Host UIC Error Code Data Link Layer 3Ch */ #define UIC_DATA_LINK_LAYER_ERROR 0x80000000 -#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF +#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF #define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2 #define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4 #define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8 diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c index 526e3215d8fe..130c798921b5 100644 --- a/drivers/slimbus/core.c +++ b/drivers/slimbus/core.c @@ -283,6 +283,7 @@ EXPORT_SYMBOL_GPL(slim_register_controller); /* slim_remove_device: Remove the effect of slim_add_device() */ static void slim_remove_device(struct slim_device *sbdev) { + of_node_put(sbdev->dev.of_node); device_unregister(&sbdev->dev); } @@ -301,8 +302,6 @@ int slim_unregister_controller(struct slim_controller *ctrl) { /* Remove all clients */ device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device); - /* Enter Clock Pause */ - slim_ctrl_clk_pause(ctrl, false, 0); ida_simple_remove(&ctrl_ida, ctrl->id); return 0; @@ -326,8 +325,8 @@ void slim_report_absent(struct slim_device *sbdev) mutex_lock(&ctrl->lock); sbdev->is_laddr_valid = false; mutex_unlock(&ctrl->lock); - - ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr); + if (!ctrl->get_laddr) + ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr); slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN); } EXPORT_SYMBOL_GPL(slim_report_absent); diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 29fbab55c3b3..b60541c3f72d 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1201,6 +1201,9 @@ static int qcom_slim_ngd_runtime_resume(struct device *dev) struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; + if (!ctrl->qmi.handle) + return 0; + if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP) ret = qcom_slim_ngd_power_up(ctrl); if (ret) { @@ -1273,9 +1276,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl, { struct qcom_slim_ngd_qmi *qmi = container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); + struct qcom_slim_ngd_ctrl *ctrl = + container_of(qmi, struct qcom_slim_ngd_ctrl, qmi); qmi->svc_info.sq_node = 0; qmi->svc_info.sq_port = 0; + + qcom_slim_ngd_enable(ctrl, false); } static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { @@ -1354,7 +1361,6 @@ static int of_qcom_slim_ngd_register(struct device *parent, ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME; ngd->pdev->dev.of_node = node; ctrl->ngd = ngd; - platform_set_drvdata(ngd->pdev, ctrl); platform_device_add(ngd->pdev); ngd->base = ctrl->base + ngd->id * data->offset + @@ -1369,12 +1375,13 @@ static int of_qcom_slim_ngd_register(struct device *parent, static int qcom_slim_ngd_probe(struct platform_device *pdev) { - struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent); int ret; ctrl->ctrl.dev = dev; + platform_set_drvdata(pdev, ctrl); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND); pm_runtime_set_suspended(dev); @@ -1493,6 +1500,9 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; + if (!ctrl->qmi.handle) + return 0; + ret = qcom_slim_qmi_power_request(ctrl, false); if (ret && ret != -EBUSY) dev_info(ctrl->dev, "slim resource not idle:%d\n", ret); diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c index c655f5f92b12..d0329ad170d1 100644 --- a/drivers/soc/amlogic/meson-canvas.c +++ b/drivers/soc/amlogic/meson-canvas.c @@ -72,8 +72,10 @@ struct meson_canvas *meson_canvas_get(struct device *dev) * current state, this driver probe cannot return -EPROBE_DEFER */ canvas = dev_get_drvdata(&canvas_pdev->dev); - if (!canvas) + if (!canvas) { + put_device(&canvas_pdev->dev); return ERR_PTR(-EINVAL); + } return canvas; } diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c index f3d8d53ab84d..538d7aab8db5 100644 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c @@ -11,6 +11,7 @@ */ #include <linux/bitops.h> +#include <linux/clk.h> #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/kfifo.h> @@ -67,6 +68,7 @@ struct aspeed_lpc_snoop_channel { struct aspeed_lpc_snoop { struct regmap *regmap; int irq; + struct clk *clk; struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS]; }; @@ -93,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer, return -EINTR; } ret = kfifo_to_user(&chan->fifo, buffer, count, &copied); + if (ret) + return ret; - return ret ? ret : copied; + return copied; } static __poll_t snoop_file_poll(struct file *file, @@ -282,22 +286,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev) return -ENODEV; } + lpc_snoop->clk = devm_clk_get(dev, NULL); + if (IS_ERR(lpc_snoop->clk)) { + rc = PTR_ERR(lpc_snoop->clk); + if (rc != -EPROBE_DEFER) + dev_err(dev, "couldn't get clock\n"); + return rc; + } + rc = clk_prepare_enable(lpc_snoop->clk); + if (rc) { + dev_err(dev, "couldn't enable clock\n"); + return rc; + } + rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev); if (rc) - return rc; + goto err; rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port); if (rc) - return rc; + goto err; /* Configuration of 2nd snoop channel port is optional */ if (of_property_read_u32_index(dev->of_node, "snoop-ports", 1, &port) == 0) { rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port); - if (rc) + if (rc) { aspeed_lpc_disable_snoop(lpc_snoop, 0); + goto err; + } } + return 0; + +err: + clk_disable_unprepare(lpc_snoop->clk); + return rc; } @@ -309,6 +333,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev) aspeed_lpc_disable_snoop(lpc_snoop, 0); aspeed_lpc_disable_snoop(lpc_snoop, 1); + clk_disable_unprepare(lpc_snoop->clk); + return 0; } diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index 096a83cf0caf..4b4174597150 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -264,8 +264,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs) return soc_dev; } +static const struct of_device_id at91_soc_allowed_list[] __initconst = { + { .compatible = "atmel,at91rm9200", }, + { .compatible = "atmel,at91sam9", }, + { .compatible = "atmel,sama5", }, + { .compatible = "atmel,samv7", }, + { } +}; + static int __init atmel_soc_device_init(void) { + struct device_node *np = of_find_node_by_path("/"); + + if (!of_match_node(at91_soc_allowed_list, np)) + return 0; + at91_soc_init(socs); return 0; diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c index 70014ecce2a7..7f397b4ad878 100644 --- a/drivers/soc/fsl/dpio/dpio-driver.c +++ b/drivers/soc/fsl/dpio/dpio-driver.c @@ -95,7 +95,6 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) { int error; struct fsl_mc_device_irq *irq; - cpumask_t mask; irq = dpio_dev->irqs[0]; error = devm_request_irq(&dpio_dev->dev, @@ -112,9 +111,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu) } /* set the affinity hint */ - cpumask_clear(&mask); - cpumask_set_cpu(cpu, &mask); - if (irq_set_affinity_hint(irq->msi_desc->irq, &mask)) + if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu))) dev_err(&dpio_dev->dev, "irq_set_affinity failed irq %d cpu %d\n", irq->msi_desc->irq, cpu); @@ -233,10 +230,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) goto err_allocate_irqs; } - err = register_dpio_irq_handlers(dpio_dev, desc.cpu); - if (err) - goto err_register_dpio_irq; - priv->io = dpaa2_io_create(&desc, dev); if (!priv->io) { dev_err(dev, "dpaa2_io_create failed\n"); @@ -244,6 +237,10 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) goto err_dpaa2_io_create; } + err = register_dpio_irq_handlers(dpio_dev, desc.cpu); + if (err) + goto err_register_dpio_irq; + dev_info(dev, "probed\n"); dev_dbg(dev, " receives_notifications = %d\n", desc.receives_notifications); diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index f4fb527d8301..c5dd026fe889 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c @@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid) } done: put_affine_portal(); - return 0; + return err; } struct gen_pool *bm_bpalloc; diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index bf68d86d80ee..95f9e4805245 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -186,7 +186,7 @@ struct qm_eqcr_entry { __be32 tag; struct qm_fd fd; u8 __reserved3[32]; -} __packed; +} __packed __aligned(8); #define QM_EQCR_VERB_VBIT 0x80 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 98b9d9a902ae..90a8b2c0676f 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) { struct imx_pm_domain *pd = to_imx_pm_domain(genpd); - int i, ret, sw, sw2iso; - u32 val; + int i, ret; + u32 val, req; if (pd->supply) { ret = regulator_enable(pd->supply); @@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, 0x1, 0x1); - /* Read ISO and ISO2SW power up delays */ - regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); - sw = val & 0x3f; - sw2iso = (val >> 8) & 0x3f; - /* Request GPC to power up domain */ - val = BIT(pd->cntr_pdn_bit + 1); - regmap_update_bits(pd->regmap, GPC_CNTR, val, val); + req = BIT(pd->cntr_pdn_bit + 1); + regmap_update_bits(pd->regmap, GPC_CNTR, req, req); - /* Wait ISO + ISO2SW IPG clock cycles */ - udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); + /* Wait for the PGC to handle the request */ + ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req), + 1, 50); + if (ret) + pr_err("powerup request on domain %s timed out\n", genpd->name); + + /* Wait for reset to propagate through peripherals */ + usleep_range(5, 10); /* Disable reset clocks for all devices in the domain */ for (i = 0; i < pd->num_clks; i++) @@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = { .rd_table = &access_table, .wr_table = &access_table, .max_register = 0x2ac, + .fast_io = true, }; static struct generic_pm_domain *imx_gpc_onecell_domains[] = { diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 73a852b2f417..34eec26b0c1f 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -258,7 +258,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, spin_unlock_irqrestore(&client->lock, flags); } - mbox_send_message(client->chan, pkt); + err = mbox_send_message(client->chan, pkt); + if (err < 0) + return err; /* We can send next packet immediately, so just call txdone. */ mbox_client_txdone(client->chan, 0); diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 503222d0d0da..75f25f08245f 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -446,6 +446,7 @@ static void mtk_register_power_domains(struct platform_device *pdev, for (i = 0; i < num; i++) { struct scp_domain *scpd = &scp->domains[i]; struct generic_pm_domain *genpd = &scpd->genpd; + bool on; /* * Initially turn on all domains to make the domains usable @@ -453,9 +454,9 @@ static void mtk_register_power_domains(struct platform_device *pdev, * software. The unused domains will be switched off during * late_init time. */ - genpd->power_on(genpd); + on = !WARN_ON(genpd->power_on(genpd) < 0); - pm_genpd_init(genpd, NULL, false); + pm_genpd_init(genpd, NULL, !on); } /* diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 24cd193dec55..eba7f76f9d61 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, break; } + if (phdr->p_filesz > phdr->p_memsz) { + dev_err(dev, + "refusing to load segment %d with p_filesz > p_memsz\n", + i); + ret = -EINVAL; + break; + } + ptr = mem_region + offset; if (phdr->p_filesz && phdr->p_offset < fw->size) { @@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, break; } + if (seg_fw->size != phdr->p_filesz) { + dev_err(dev, + "failed to load segment %d from truncated file %s\n", + i, fw_name); + release_firmware(seg_fw); + ret = -EINVAL; + break; + } + release_firmware(seg_fw); } diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index 7d622ea1274e..2e66098e8297 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -282,10 +282,23 @@ static void geni_se_select_fifo_mode(struct geni_se *se) static void geni_se_select_dma_mode(struct geni_se *se) { + u32 proto = geni_se_read_proto(se); u32 val; geni_se_irq_clear(se); + val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN); + if (proto != GENI_SE_UART) { + val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN); + val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN); + } + writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN); + + val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN); + if (proto != GENI_SE_UART) + val &= ~S_CMD_DONE_EN; + writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN); + val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN); val |= GENI_DMA_MODE_EN; writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN); @@ -644,7 +657,7 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L); writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H); writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR); - writel_relaxed(len, se->base + SE_DMA_TX_LEN); + writel(len, se->base + SE_DMA_TX_LEN); return 0; } EXPORT_SYMBOL(geni_se_tx_dma_prep); @@ -681,7 +694,7 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H); /* RX does not have EOT buffer type bit. So just reset RX_ATTR */ writel_relaxed(0, se->base + SE_DMA_RX_ATTR); - writel_relaxed(len, se->base + SE_DMA_RX_LEN); + writel(len, se->base + SE_DMA_RX_LEN); return 0; } EXPORT_SYMBOL(geni_se_rx_dma_prep); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index e278fc11fe5c..8924fcd9f5f5 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -148,7 +148,7 @@ int rpmh_rsc_invalidate(struct rsc_drv *drv) static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, const struct tcs_request *msg) { - int type, ret; + int type; struct tcs_group *tcs; switch (msg->state) { @@ -169,19 +169,10 @@ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, * If we are making an active request on a RSC that does not have a * dedicated TCS for active state use, then re-purpose a wake TCS to * send active votes. - * NOTE: The driver must be aware that this RSC does not have a - * dedicated AMC, and therefore would invalidate the sleep and wake - * TCSes before making an active state request. */ tcs = get_tcs_of_type(drv, type); - if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) { + if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) tcs = get_tcs_of_type(drv, WAKE_TCS); - if (tcs->num_tcs) { - ret = rpmh_rsc_invalidate(drv); - if (ret) - return ERR_PTR(ret); - } - } return tcs; } @@ -201,6 +192,42 @@ static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, return NULL; } +static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) +{ + u32 enable; + + /* + * HW req: Clear the DRV_CONTROL and enable TCS again + * While clearing ensure that the AMC mode trigger is cleared + * and then the mode enable is cleared. + */ + enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0); + enable &= ~TCS_AMC_MODE_TRIGGER; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + enable &= ~TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + + if (trigger) { + /* Enable the AMC mode on the TCS and then trigger the TCS */ + enable = TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + enable |= TCS_AMC_MODE_TRIGGER; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + } +} + +static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) +{ + u32 data; + + data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0); + if (enable) + data |= BIT(tcs_id); + else + data &= ~BIT(tcs_id); + write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data); +} + /** * tcs_tx_done: TX Done interrupt handler */ @@ -237,6 +264,14 @@ static irqreturn_t tcs_tx_done(int irq, void *p) } trace_rpmh_tx_done(drv, i, req, err); + + /* + * If wake tcs was re-purposed for sending active + * votes, clear AMC trigger & enable modes and + * disable interrupt for this TCS + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + __tcs_set_trigger(drv, i, false); skip: /* Reclaim the TCS */ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0); @@ -244,6 +279,13 @@ skip: write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i)); spin_lock(&drv->lock); clear_bit(i, drv->tcs_in_use); + /* + * Disable interrupt for WAKE TCS to avoid being + * spammed with interrupts coming when the solver + * sends its wake votes. + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + enable_tcs_irq(drv, i, false); spin_unlock(&drv->lock); if (req) rpmh_tx_done(req, err); @@ -285,28 +327,6 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable); } -static void __tcs_trigger(struct rsc_drv *drv, int tcs_id) -{ - u32 enable; - - /* - * HW req: Clear the DRV_CONTROL and enable TCS again - * While clearing ensure that the AMC mode trigger is cleared - * and then the mode enable is cleared. - */ - enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0); - enable &= ~TCS_AMC_MODE_TRIGGER; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - enable &= ~TCS_AMC_MODE_ENABLE; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - - /* Enable the AMC mode on the TCS and then trigger the TCS */ - enable = TCS_AMC_MODE_ENABLE; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - enable |= TCS_AMC_MODE_TRIGGER; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); -} - static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, const struct tcs_request *msg) { @@ -377,10 +397,20 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg) tcs->req[tcs_id - tcs->offset] = msg; set_bit(tcs_id, drv->tcs_in_use); + if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { + /* + * Clear previously programmed WAKE commands in selected + * repurposed TCS to avoid triggering them. tcs->slots will be + * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() + */ + write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); + write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0); + enable_tcs_irq(drv, tcs_id, true); + } spin_unlock(&drv->lock); __tcs_buffer_write(drv, tcs_id, 0, msg); - __tcs_trigger(drv, tcs_id); + __tcs_set_trigger(drv, tcs_id, true); done_write: spin_unlock_irqrestore(&tcs->lock, flags); @@ -685,6 +715,7 @@ static struct platform_driver rpmh_driver = { .driver = { .name = "rpmh", .of_match_table = rpmh_drv_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 035091fd44b8..13e1b450f296 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -119,6 +119,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, { struct cache_req *req; unsigned long flags; + u32 old_sleep_val, old_wake_val; spin_lock_irqsave(&ctrlr->cache_lock, flags); req = __find_req(ctrlr, cmd->addr); @@ -133,26 +134,27 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, req->addr = cmd->addr; req->sleep_val = req->wake_val = UINT_MAX; - INIT_LIST_HEAD(&req->list); list_add_tail(&req->list, &ctrlr->cache); existing: + old_sleep_val = req->sleep_val; + old_wake_val = req->wake_val; + switch (state) { case RPMH_ACTIVE_ONLY_STATE: - if (req->sleep_val != UINT_MAX) - req->wake_val = cmd->data; - break; case RPMH_WAKE_ONLY_STATE: req->wake_val = cmd->data; break; case RPMH_SLEEP_STATE: req->sleep_val = cmd->data; break; - default: - break; } - ctrlr->dirty = true; + ctrlr->dirty |= (req->sleep_val != old_sleep_val || + req->wake_val != old_wake_val) && + req->sleep_val != UINT_MAX && + req->wake_val != UINT_MAX; + unlock: spin_unlock_irqrestore(&ctrlr->cache_lock, flags); @@ -287,6 +289,7 @@ static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) spin_lock_irqsave(&ctrlr->cache_lock, flags); list_add_tail(&req->list, &ctrlr->batch_cache); + ctrlr->dirty = true; spin_unlock_irqrestore(&ctrlr->cache_lock, flags); } @@ -314,18 +317,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr) return ret; } -static void invalidate_batch(struct rpmh_ctrlr *ctrlr) -{ - struct batch_cache_req *req, *tmp; - unsigned long flags; - - spin_lock_irqsave(&ctrlr->cache_lock, flags); - list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) - kfree(req); - INIT_LIST_HEAD(&ctrlr->batch_cache); - spin_unlock_irqrestore(&ctrlr->cache_lock, flags); -} - /** * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the * batch to finish. @@ -465,6 +456,13 @@ int rpmh_flush(const struct device *dev) return 0; } + /* Invalidate the TCSes first to avoid stale data */ + do { + ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); + } while (ret == -EAGAIN); + if (ret) + return ret; + /* First flush the cached batch requests */ ret = flush_batch(ctrlr); if (ret) @@ -496,25 +494,25 @@ int rpmh_flush(const struct device *dev) EXPORT_SYMBOL(rpmh_flush); /** - * rpmh_invalidate: Invalidate all sleep and active sets - * sets. + * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache * * @dev: The device making the request * - * Invalidate the sleep and active values in the TCS blocks. + * Invalidate the sleep and wake values in batch_cache. */ int rpmh_invalidate(const struct device *dev) { struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); - int ret; + struct batch_cache_req *req, *tmp; + unsigned long flags; - invalidate_batch(ctrlr); + spin_lock_irqsave(&ctrlr->cache_lock, flags); + list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) + kfree(req); + INIT_LIST_HEAD(&ctrlr->batch_cache); ctrlr->dirty = true; + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); - do { - ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); - } while (ret == -EAGAIN); - - return ret; + return 0; } EXPORT_SYMBOL(rpmh_invalidate); diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index c7300d54e444..42e0b8f647ae 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -318,15 +318,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p, static int smp2p_update_bits(void *data, u32 mask, u32 value) { struct smp2p_entry *entry = data; + unsigned long flags; u32 orig; u32 val; - spin_lock(&entry->lock); + spin_lock_irqsave(&entry->lock, flags); val = orig = readl(entry->value); val &= ~mask; val |= value; writel(val, entry->value); - spin_unlock(&entry->lock); + spin_unlock_irqrestore(&entry->lock, flags); if (val != orig) qcom_smp2p_kick(entry->smp2p); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index a39ea5061dc5..176696f8f38d 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -428,6 +428,8 @@ static int qcom_socinfo_probe(struct platform_device *pdev) qs->attr.family = "Snapdragon"; qs->attr.machine = socinfo_machine(&pdev->dev, le32_to_cpu(info->id)); + qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", + le32_to_cpu(info->id)); qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", SOCINFO_MAJOR(le32_to_cpu(info->ver)), SOCINFO_MINOR(le32_to_cpu(info->ver))); diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c index 54b616ad4a62..beb1c7211c3d 100644 --- a/drivers/soc/renesas/rmobile-sysc.c +++ b/drivers/soc/renesas/rmobile-sysc.c @@ -327,6 +327,7 @@ static int __init rmobile_init_pm_domains(void) pmd = of_get_child_by_name(np, "pm-domains"); if (!pmd) { + iounmap(base); pr_warn("%pOF lacks pm-domains node\n", np); continue; } diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index c8ef05d6b8c7..25df4406ce52 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -130,6 +130,7 @@ config SOC_TEGRA_FLOWCTRL config SOC_TEGRA_PMC bool + select GENERIC_PINCONF config SOC_TEGRA_POWERGATE_BPMP def_bool y diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c index 70d3f6e1aa33..8050742237b7 100644 --- a/drivers/soc/tegra/fuse/speedo-tegra210.c +++ b/drivers/soc/tegra/fuse/speedo-tegra210.c @@ -94,7 +94,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num) unsigned int i; for (i = 0; i < num; i++) - if (value < speedos[num]) + if (value < speedos[i]) return i; return -EINVAL; diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index 6285cd8efb21..981a9014c9c4 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c @@ -759,8 +759,9 @@ static int knav_dma_probe(struct platform_device *pdev) pm_runtime_enable(kdev->dev); ret = pm_runtime_get_sync(kdev->dev); if (ret < 0) { + pm_runtime_put_noidle(kdev->dev); dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); - return ret; + goto err_pm_disable; } /* Initialise all packet dmas */ @@ -774,7 +775,8 @@ static int knav_dma_probe(struct platform_device *pdev) if (list_empty(&kdev->list)) { dev_err(dev, "no valid dma instance\n"); - return -ENODEV; + ret = -ENODEV; + goto err_put_sync; } debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, @@ -782,6 +784,13 @@ static int knav_dma_probe(struct platform_device *pdev) device_ready = true; return ret; + +err_put_sync: + pm_runtime_put_sync(kdev->dev); +err_pm_disable: + pm_runtime_disable(kdev->dev); + + return ret; } static int knav_dma_remove(struct platform_device *pdev) diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 1ccc9064e1eb..b8210479ec99 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1791,6 +1791,7 @@ static int knav_queue_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); dev_err(dev, "Failed to enable QMSS\n"); return ret; } @@ -1858,9 +1859,10 @@ static int knav_queue_probe(struct platform_device *pdev) if (ret) goto err; - regions = of_get_child_by_name(node, "descriptor-regions"); + regions = of_get_child_by_name(node, "descriptor-regions"); if (!regions) { dev_err(dev, "descriptor-regions not specified\n"); + ret = -ENODEV; goto err; } ret = knav_queue_setup_regions(kdev, regions); diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig index 01e76b58dd78..3fa162c1fde7 100644 --- a/drivers/soc/xilinx/Kconfig +++ b/drivers/soc/xilinx/Kconfig @@ -19,7 +19,7 @@ config XILINX_VCU config ZYNQMP_POWER bool "Enable Xilinx Zynq MPSoC Power Management driver" - depends on PM && ARCH_ZYNQMP + depends on PM && ZYNQMP_FIRMWARE default y help Say yes to enable power management support for ZyqnMP SoC. @@ -31,7 +31,7 @@ config ZYNQMP_POWER config ZYNQMP_PM_DOMAINS bool "Enable Zynq MPSoC generic PM domains" default y - depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE + depends on PM && ZYNQMP_FIRMWARE select PM_GENERIC_DOMAINS help Say yes to enable device power management through PM domains diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index fc53dbe57f85..d90f00d09257 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -113,6 +113,8 @@ static int sdw_delete_slave(struct device *dev, void *data) struct sdw_slave *slave = dev_to_sdw_dev(dev); struct sdw_bus *bus = slave->bus; + pm_runtime_disable(dev); + sdw_slave_debugfs_exit(slave); mutex_lock(&bus->bus_lock); @@ -523,7 +525,7 @@ static int sdw_program_device_num(struct sdw_bus *bus) struct sdw_slave *slave, *_s; struct sdw_slave_id id; struct sdw_msg msg; - bool found = false; + bool found; int count = 0, ret; u64 addr; @@ -555,6 +557,7 @@ static int sdw_program_device_num(struct sdw_bus *bus) sdw_extract_slave_id(bus, addr, &id); + found = false; /* Now compare with entries */ list_for_each_entry_safe(slave, _s, &bus->slaves, node) { if (sdw_compare_devid(slave, id) == 0) { diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 502ed4ec8f07..f7d0f63921dc 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -231,6 +231,22 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value) return -EAGAIN; } +/* + * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL + * need to be confirmed with a write to MCP_CONFIG_UPDATE + */ +static int cdns_update_config(struct sdw_cdns *cdns) +{ + int ret; + + ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, + CDNS_MCP_CONFIG_UPDATE_BIT); + if (ret < 0) + dev_err(cdns->dev, "Config update timedout\n"); + + return ret; +} + /* * debugfs */ @@ -352,10 +368,10 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns, if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) { no_ack = 1; dev_dbg_ratelimited(cdns->dev, "Msg Ack not received\n"); - if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) { - nack = 1; - dev_err_ratelimited(cdns->dev, "Msg NACK received\n"); - } + } + if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) { + nack = 1; + dev_err_ratelimited(cdns->dev, "Msg NACK received\n"); } } @@ -752,7 +768,38 @@ EXPORT_SYMBOL(sdw_cdns_thread); /* * init routines */ -static int _cdns_enable_interrupt(struct sdw_cdns *cdns) + +/** + * sdw_cdns_exit_reset() - Program reset parameters and start bus operations + * @cdns: Cadence instance + */ +int sdw_cdns_exit_reset(struct sdw_cdns *cdns) +{ + /* program maximum length reset to be safe */ + cdns_updatel(cdns, CDNS_MCP_CONTROL, + CDNS_MCP_CONTROL_RST_DELAY, + CDNS_MCP_CONTROL_RST_DELAY); + + /* use hardware generated reset */ + cdns_updatel(cdns, CDNS_MCP_CONTROL, + CDNS_MCP_CONTROL_HW_RST, + CDNS_MCP_CONTROL_HW_RST); + + /* enable bus operations with clock and data */ + cdns_updatel(cdns, CDNS_MCP_CONFIG, + CDNS_MCP_CONFIG_OP, + CDNS_MCP_CONFIG_OP_NORMAL); + + /* commit changes */ + return cdns_update_config(cdns); +} +EXPORT_SYMBOL(sdw_cdns_exit_reset); + +/** + * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config + * @cdns: Cadence instance + */ +int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns) { u32 mask; @@ -784,24 +831,8 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns) cdns_writel(cdns, CDNS_MCP_INTMASK, mask); - return 0; -} - -/** - * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config - * @cdns: Cadence instance - */ -int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns) -{ - int ret; - - _cdns_enable_interrupt(cdns); - ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, - CDNS_MCP_CONFIG_UPDATE_BIT); - if (ret < 0) - dev_err(cdns->dev, "Config update timedout\n"); - - return ret; + /* commit changes */ + return cdns_update_config(cdns); } EXPORT_SYMBOL(sdw_cdns_enable_interrupt); @@ -975,6 +1006,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns) cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL); cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL); + /* flush command FIFOs */ + cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST, + CDNS_MCP_CONTROL_CMD_RST); + /* Set cmd accept mode */ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, CDNS_MCP_CONTROL_CMD_ACCEPT); @@ -997,13 +1032,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns) /* Set cmd mode for Tx and Rx cmds */ val &= ~CDNS_MCP_CONFIG_CMD; - /* Set operation to normal */ - val &= ~CDNS_MCP_CONFIG_OP; - val |= CDNS_MCP_CONFIG_OP_NORMAL; - cdns_writel(cdns, CDNS_MCP_CONFIG, val); - return 0; + /* commit changes */ + return cdns_update_config(cdns); } EXPORT_SYMBOL(sdw_cdns_init); diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index 0b72b7094735..1a67728c5000 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -161,6 +161,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id); int sdw_cdns_init(struct sdw_cdns *cdns); int sdw_cdns_pdi_init(struct sdw_cdns *cdns, struct sdw_cdns_stream_config config); +int sdw_cdns_exit_reset(struct sdw_cdns *cdns); int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index d1839707128a..a2da04946f0b 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -842,8 +842,9 @@ static int intel_create_dai(struct sdw_cdns *cdns, /* TODO: Read supported rates/formats from hardware */ for (i = off; i < (off + num); i++) { - dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", - cdns->instance, i); + dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, + "SDW%d Pin%d", + cdns->instance, i); if (!dais[i].name) return -ENOMEM; @@ -1049,8 +1050,6 @@ static int intel_probe(struct platform_device *pdev) if (ret) goto err_init; - ret = sdw_cdns_enable_interrupt(&sdw->cdns); - /* Read the PDI config and initialize cadence PDI */ intel_pdi_init(sdw, &config); ret = sdw_cdns_pdi_init(&sdw->cdns, config); @@ -1068,6 +1067,18 @@ static int intel_probe(struct platform_device *pdev) goto err_init; } + ret = sdw_cdns_enable_interrupt(&sdw->cdns); + if (ret < 0) { + dev_err(sdw->cdns.dev, "cannot enable interrupts\n"); + goto err_init; + } + + ret = sdw_cdns_exit_reset(&sdw->cdns); + if (ret < 0) { + dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n"); + goto err_init; + } + /* Register DAIs */ ret = intel_register_dai(sdw); if (ret) { diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index 6473fa602f82..611f4f5bc36a 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -57,6 +57,8 @@ static int sdw_slave_add(struct sdw_bus *bus, list_del(&slave->node); mutex_unlock(&bus->bus_lock); put_device(&slave->dev); + + return ret; } sdw_slave_debugfs_init(slave); diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index e69f94a8c3a8..23accfedbf4d 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -702,6 +702,7 @@ error: kfree(wbuf); error_1: kfree(wr_msg); + bus->defer_msg.msg = NULL; return ret; } @@ -825,9 +826,10 @@ static int do_bank_switch(struct sdw_stream_runtime *stream) error: list_for_each_entry(m_rt, &stream->master_list, stream_node) { bus = m_rt->bus; - - kfree(bus->defer_msg.msg->buf); - kfree(bus->defer_msg.msg); + if (bus->defer_msg.msg) { + kfree(bus->defer_msg.msg->buf); + kfree(bus->defer_msg.msg); + } } msg_unlock: @@ -1355,8 +1357,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave, } ret = sdw_config_stream(&slave->dev, stream, stream_config, true); - if (ret) + if (ret) { + /* + * sdw_release_master_stream will release s_rt in slave_rt_list in + * stream_error case, but s_rt is only added to slave_rt_list + * when sdw_config_stream is successful, so free s_rt explicitly + * when sdw_config_stream is failed. + */ + kfree(s_rt); goto stream_error; + } list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 6f7fdcbb9151..5bf754208777 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -944,4 +944,7 @@ config SPI_SLAVE_SYSTEM_CONTROL endif # SPI_SLAVE +config SPI_DYNAMIC + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE + endif # SPI diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 13def7f78b9e..5fd929e023e1 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -284,10 +284,14 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, if (dummy_cycles) ifr |= QSPI_IFR_NBDUM(dummy_cycles); - /* Set data enable */ - if (op->data.nbytes) + /* Set data enable and data transfer type. */ + if (op->data.nbytes) { ifr |= QSPI_IFR_DATAEN; + if (op->addr.nbytes) + ifr |= QSPI_IFR_TFRTYP_MEM; + } + /* * If the QSPI controller is set in regular SPI mode, set it in * Serial Memory Mode (SMM). @@ -312,7 +316,7 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, writel_relaxed(icr, aq->regs + QSPI_WICR); writel_relaxed(ifr, aq->regs + QSPI_IFR); } else { - if (op->data.dir == SPI_MEM_DATA_OUT) + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR; /* Set QSPI Instruction Frame registers */ @@ -454,7 +458,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) struct resource *res; int irq, err = 0; - ctrl = spi_alloc_master(&pdev->dev, sizeof(*aq)); + ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq)); if (!ctrl) return -ENOMEM; @@ -476,8 +480,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) aq->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(aq->regs)) { dev_err(&pdev->dev, "missing registers\n"); - err = PTR_ERR(aq->regs); - goto exit; + return PTR_ERR(aq->regs); } /* Map the AHB memory */ @@ -485,8 +488,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) aq->mem = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(aq->mem)) { dev_err(&pdev->dev, "missing AHB memory\n"); - err = PTR_ERR(aq->mem); - goto exit; + return PTR_ERR(aq->mem); } aq->mmap_size = resource_size(res); @@ -498,22 +500,21 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (IS_ERR(aq->pclk)) { dev_err(&pdev->dev, "missing peripheral clock\n"); - err = PTR_ERR(aq->pclk); - goto exit; + return PTR_ERR(aq->pclk); } /* Enable the peripheral clock */ err = clk_prepare_enable(aq->pclk); if (err) { dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); - goto exit; + return err; } aq->caps = of_device_get_match_data(&pdev->dev); if (!aq->caps) { dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); err = -EINVAL; - goto exit; + goto disable_pclk; } if (aq->caps->has_qspick) { @@ -557,8 +558,6 @@ disable_qspick: clk_disable_unprepare(aq->qspick); disable_pclk: clk_disable_unprepare(aq->pclk); -exit: - spi_controller_put(ctrl); return err; } diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index eb9a243e9526..98ace748cd98 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev) master->use_gpio_descriptors = true; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->setup = spi_bitbang_setup; - master->cleanup = spi_bitbang_cleanup; + master->flags = SPI_MASTER_GPIO_SS; if (pdata) { master->bus_num = pdata->bus_num; master->num_chipselect = pdata->num_chipselect; diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index abbc1582f457..d9711ea5b01d 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1569,7 +1569,7 @@ static int atmel_spi_probe(struct platform_device *pdev) if (ret == 0) { as->use_dma = true; } else if (ret == -EPROBE_DEFER) { - return ret; + goto out_unmap_regs; } } else if (as->caps.has_pdc_support) { as->use_pdc = true; diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 7a3531856491..8a4be34bccfd 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -670,7 +670,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) if (buf) buf[tp.byte] = read_rxram_slot_u8(qspi, slot); dev_dbg(&qspi->pdev->dev, "RD %02x\n", - buf ? buf[tp.byte] : 0xff); + buf ? buf[tp.byte] : 0x0); } else { u16 *buf = tp.trans->rx_buf; @@ -678,7 +678,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) buf[tp.byte / 2] = read_rxram_slot_u16(qspi, slot); dev_dbg(&qspi->pdev->dev, "RD %04x\n", - buf ? buf[tp.byte] : 0xffff); + buf ? buf[tp.byte / 2] : 0x0); } update_qspi_trans_byte_count(qspi, &tp, @@ -733,13 +733,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) while (!tstatus && slot < MSPI_NUM_CDRAM) { if (tp.trans->bits_per_word <= 8) { const u8 *buf = tp.trans->tx_buf; - u8 val = buf ? buf[tp.byte] : 0xff; + u8 val = buf ? buf[tp.byte] : 0x00; write_txram_slot_u8(qspi, slot, val); dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); } else { const u16 *buf = tp.trans->tx_buf; - u16 val = buf ? buf[tp.byte / 2] : 0xffff; + u16 val = buf ? buf[tp.byte / 2] : 0x0000; write_txram_slot_u16(qspi, slot, val); dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); @@ -1213,13 +1213,18 @@ int bcm_qspi_probe(struct platform_device *pdev, if (!of_match_node(bcm_qspi_of_match, dev->of_node)) return -ENODEV; - master = spi_alloc_master(dev, sizeof(struct bcm_qspi)); + master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi)); if (!master) { dev_err(dev, "error allocating spi_master\n"); return -ENOMEM; } qspi = spi_master_get_devdata(master); + + qspi->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); + qspi->pdev = pdev; qspi->trans_pos.trans = NULL; qspi->trans_pos.byte = 0; @@ -1247,21 +1252,17 @@ int bcm_qspi_probe(struct platform_device *pdev, if (res) { qspi->base[MSPI] = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->base[MSPI])) { - ret = PTR_ERR(qspi->base[MSPI]); - goto qspi_resource_err; - } + if (IS_ERR(qspi->base[MSPI])) + return PTR_ERR(qspi->base[MSPI]); } else { - goto qspi_resource_err; + return 0; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); if (res) { qspi->base[BSPI] = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->base[BSPI])) { - ret = PTR_ERR(qspi->base[BSPI]); - goto qspi_resource_err; - } + if (IS_ERR(qspi->base[BSPI])) + return PTR_ERR(qspi->base[BSPI]); qspi->bspi_mode = true; } else { qspi->bspi_mode = false; @@ -1272,18 +1273,14 @@ int bcm_qspi_probe(struct platform_device *pdev, res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg"); if (res) { qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->base[CHIP_SELECT])) { - ret = PTR_ERR(qspi->base[CHIP_SELECT]); - goto qspi_resource_err; - } + if (IS_ERR(qspi->base[CHIP_SELECT])) + return PTR_ERR(qspi->base[CHIP_SELECT]); } qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id), GFP_KERNEL); - if (!qspi->dev_ids) { - ret = -ENOMEM; - goto qspi_resource_err; - } + if (!qspi->dev_ids) + return -ENOMEM; for (val = 0; val < num_irqs; val++) { irq = -1; @@ -1332,13 +1329,6 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->soc_intc = NULL; } - qspi->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(qspi->clk)) { - dev_warn(dev, "unable to get clock\n"); - ret = PTR_ERR(qspi->clk); - goto qspi_probe_err; - } - ret = clk_prepare_enable(qspi->clk); if (ret) { dev_err(dev, "failed to prepare clock\n"); @@ -1359,7 +1349,7 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->xfer_mode.addrlen = -1; qspi->xfer_mode.hp = -1; - ret = devm_spi_register_master(&pdev->dev, master); + ret = spi_register_master(master); if (ret < 0) { dev_err(dev, "can't register master\n"); goto qspi_reg_err; @@ -1372,8 +1362,6 @@ qspi_reg_err: clk_disable_unprepare(qspi->clk); qspi_probe_err: kfree(qspi->dev_ids); -qspi_resource_err: - spi_master_put(master); return ret; } /* probe function to be called by SoC specific platform driver probe */ @@ -1383,10 +1371,10 @@ int bcm_qspi_remove(struct platform_device *pdev) { struct bcm_qspi *qspi = platform_get_drvdata(pdev); + spi_unregister_master(qspi->master); bcm_qspi_hw_uninit(qspi); clk_disable_unprepare(qspi->clk); kfree(qspi->dev_ids); - spi_unregister_master(qspi->master); return 0; } diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index b4070c0de3df..56ee84e85bee 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1179,7 +1179,6 @@ static int bcm2835_spi_setup(struct spi_device *spi) struct spi_controller *ctlr = spi->controller; struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct gpio_chip *chip; - enum gpio_lookup_flags lflags; u32 cs; /* @@ -1245,21 +1244,9 @@ static int bcm2835_spi_setup(struct spi_device *spi) if (!chip) return 0; - /* - * Retrieve the corresponding GPIO line used for CS. - * The inversion semantics will be handled by the GPIO core - * code, so we pass GPIOS_OUT_LOW for "unasserted" and - * the correct flag for inversion semantics. The SPI_CS_HIGH - * on spi->mode cannot be checked for polarity in this case - * as the flag use_gpio_descriptors enforces SPI_CS_HIGH. - */ - if (of_property_read_bool(spi->dev.of_node, "spi-cs-high")) - lflags = GPIO_ACTIVE_HIGH; - else - lflags = GPIO_ACTIVE_LOW; spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select, DRV_NAME, - lflags, + GPIO_LOOKUP_FLAGS_DEFAULT, GPIOD_OUT_LOW); if (IS_ERR(spi->cs_gpiod)) return PTR_ERR(spi->cs_gpiod); @@ -1277,7 +1264,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) struct bcm2835_spi *bs; int err; - ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs), + ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs), dma_get_cache_alignment())); if (!ctlr) return -ENOMEM; @@ -1297,23 +1284,19 @@ static int bcm2835_spi_probe(struct platform_device *pdev) bs = spi_controller_get_devdata(ctlr); bs->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(bs->regs)) { - err = PTR_ERR(bs->regs); - goto out_controller_put; - } + if (IS_ERR(bs->regs)) + return PTR_ERR(bs->regs); bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); - goto out_controller_put; + return err; } bs->irq = platform_get_irq(pdev, 0); - if (bs->irq <= 0) { - err = bs->irq ? bs->irq : -ENODEV; - goto out_controller_put; - } + if (bs->irq <= 0) + return bs->irq ? bs->irq : -ENODEV; clk_prepare_enable(bs->clk); @@ -1327,24 +1310,23 @@ static int bcm2835_spi_probe(struct platform_device *pdev) dev_name(&pdev->dev), ctlr); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } - err = devm_spi_register_controller(&pdev->dev, ctlr); + err = spi_register_controller(ctlr); if (err) { dev_err(&pdev->dev, "could not register SPI controller: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); return 0; -out_clk_disable: +out_dma_release: + bcm2835_dma_release(ctlr, bs); clk_disable_unprepare(bs->clk); -out_controller_put: - spi_controller_put(ctlr); return err; } @@ -1355,6 +1337,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) bcm2835_debugfs_remove(bs); + spi_unregister_controller(ctlr); + /* Clear FIFOs, and disable the HW block */ bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index a2162ff56a12..8211107bfbe8 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -494,7 +494,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) unsigned long clk_hz; int err; - master = spi_alloc_master(&pdev->dev, sizeof(*bs)); + master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs)); if (!master) return -ENOMEM; @@ -524,29 +524,25 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) /* the main area */ bs->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(bs->regs)) { - err = PTR_ERR(bs->regs); - goto out_master_put; - } + if (IS_ERR(bs->regs)) + return PTR_ERR(bs->regs); bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); - goto out_master_put; + return err; } bs->irq = platform_get_irq(pdev, 0); - if (bs->irq <= 0) { - err = bs->irq ? bs->irq : -ENODEV; - goto out_master_put; - } + if (bs->irq <= 0) + return bs->irq ? bs->irq : -ENODEV; /* this also enables the HW block */ err = clk_prepare_enable(bs->clk); if (err) { dev_err(&pdev->dev, "could not prepare clock: %d\n", err); - goto out_master_put; + return err; } /* just checking if the clock returns a sane value */ @@ -569,7 +565,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) goto out_clk_disable; } - err = devm_spi_register_master(&pdev->dev, master); + err = spi_register_master(master); if (err) { dev_err(&pdev->dev, "could not register SPI master: %d\n", err); goto out_clk_disable; @@ -581,8 +577,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) out_clk_disable: clk_disable_unprepare(bs->clk); -out_master_put: - spi_master_put(master); return err; } @@ -593,6 +587,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) bcm2835aux_debugfs_remove(bs); + spi_unregister_master(master); + bcm2835aux_spi_reset_hw(bs); /* disable the HW block by releasing the clock */ diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index 36f7eb8ab2df..509476a2d79b 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -483,8 +483,10 @@ static int bcm63xx_hsspi_resume(struct device *dev) if (bs->pll_clk) { ret = clk_prepare_enable(bs->pll_clk); - if (ret) + if (ret) { + clk_disable_unprepare(bs->clk); return ret; + } } spi_master_resume(master); diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 82a0ee09cbe1..1d0c335b0bf8 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -115,6 +115,7 @@ struct cdns_spi { void __iomem *regs; struct clk *ref_clk; struct clk *pclk; + unsigned int clk_rate; u32 speed_hz; const u8 *txbuf; u8 *rxbuf; @@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi, u32 ctrl_reg, baud_rate_val; unsigned long frequency; - frequency = clk_get_rate(xspi->ref_clk); + frequency = xspi->clk_rate; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); @@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev) master->auto_runtime_pm = true; master->mode_bits = SPI_CPOL | SPI_CPHA; + xspi->clk_rate = clk_get_rate(xspi->ref_clk); /* Set to default valid value */ - master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4; + master->max_speed_hz = xspi->clk_rate / 4; xspi->speed_hz = master->max_speed_hz; master->bits_per_word_mask = SPI_BPW_MASK(8); diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index f71c497393a6..5260183a58a8 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -1040,13 +1040,13 @@ static int davinci_spi_remove(struct platform_device *pdev) spi_bitbang_stop(&dspi->bitbang); clk_disable_unprepare(dspi->clk); - spi_master_put(master); if (dspi->dma_rx) { dma_release_channel(dspi->dma_rx); dma_release_channel(dspi->dma_tx); } + spi_master_put(master); return 0; } diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c index 75b33d7d14b0..9a4d942fafcf 100644 --- a/drivers/spi/spi-dln2.c +++ b/drivers/spi/spi-dln2.c @@ -780,7 +780,7 @@ exit_free_master: static int dln2_spi_remove(struct platform_device *pdev) { - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct spi_master *master = platform_get_drvdata(pdev); struct dln2_spi *dln2 = spi_master_get_devdata(master); pm_runtime_disable(&pdev->dev); diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 2663bb12d9ce..b07710c76fc9 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -147,6 +147,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, if (!xfer->tx_buf) return NULL; + memset(&txconf, 0, sizeof(txconf)); txconf.direction = DMA_MEM_TO_DEV; txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = 16; @@ -193,6 +194,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, if (!xfer->rx_buf) return NULL; + memset(&rxconf, 0, sizeof(rxconf)); rxconf.direction = DMA_DEV_TO_MEM; rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = 16; @@ -218,19 +220,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) { - u16 dma_ctrl = 0; + u16 imr = 0, dma_ctrl = 0; dw_writel(dws, DW_SPI_DMARDLR, 0xf); dw_writel(dws, DW_SPI_DMATDLR, 0x10); - if (xfer->tx_buf) + if (xfer->tx_buf) { dma_ctrl |= SPI_DMA_TDMAE; - if (xfer->rx_buf) + imr |= SPI_INT_TXOI; + } + if (xfer->rx_buf) { dma_ctrl |= SPI_DMA_RDMAE; + imr |= SPI_INT_RXUI | SPI_INT_RXOI; + } dw_writel(dws, DW_SPI_DMACR, dma_ctrl); /* Set the interrupt mask */ - spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); + spi_umask_intr(dws, imr); dws->transfer_handler = dma_transfer; @@ -260,7 +266,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) dma_async_issue_pending(dws->txchan); } - return 0; + return 1; } static void mid_spi_dma_stop(struct dw_spi *dws) diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 11cac7e10663..c2f96941ad04 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -128,12 +128,20 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) { struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct chip_data *chip = spi_get_ctldata(spi); + bool cs_high = !!(spi->mode & SPI_CS_HIGH); /* Chip select logic is inverted from spi_set_cs() */ if (chip && chip->cs_control) chip->cs_control(!enable); - if (!enable) + /* + * DW SPI controller demands any native CS being set in order to + * proceed with data transfer. So in order to activate the SPI + * communications we must set a corresponding bit in the Slave + * Enable register no matter whether the SPI core is configured to + * support active-high or active-low CS level. + */ + if (cs_high == enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -297,6 +305,9 @@ static int dw_spi_transfer_one(struct spi_controller *master, dws->len = transfer->len; spin_unlock_irqrestore(&dws->buf_lock, flags); + /* Ensure dw->rx and dw->rx_end are visible */ + smp_mb(); + spi_enable_chip(dws, 0); /* Handle per transfer options for bpw and speed */ @@ -370,11 +381,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, spi_enable_chip(dws, 1); - if (dws->dma_mapped) { - ret = dws->dma_ops->dma_transfer(dws, transfer); - if (ret < 0) - return ret; - } + if (dws->dma_mapped) + return dws->dma_ops->dma_transfer(dws, transfer); if (chip->poll_mode) return poll_transfer(dws); @@ -518,10 +526,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->dma_inited = 0; } else { master->can_dma = dws->dma_ops->can_dma; + master->flags |= SPI_CONTROLLER_MUST_TX; } } - ret = devm_spi_register_controller(dev, master); + ret = spi_register_controller(master); if (ret) { dev_err(&master->dev, "problem registering spi master\n"); goto err_dma_exit; @@ -545,6 +554,8 @@ void dw_spi_remove_host(struct dw_spi *dws) { dw_spi_debugfs_remove(dws); + spi_unregister_controller(dws->master); + if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index d47bd26577b3..c7560d7d1627 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. +// Copyright 2020 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -33,6 +34,9 @@ #define SPI_MCR_CLR_TXF BIT(11) #define SPI_MCR_CLR_RXF BIT(10) #define SPI_MCR_XSPI BIT(3) +#define SPI_MCR_DIS_TXF BIT(13) +#define SPI_MCR_DIS_RXF BIT(12) +#define SPI_MCR_HALT BIT(0) #define SPI_TCR 0x08 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16) @@ -192,8 +196,7 @@ struct fsl_dspi { u8 bytes_per_word; const struct fsl_dspi_devtype_data *devtype_data; - wait_queue_head_t waitq; - u32 waitflags; + struct completion xfer_done; struct fsl_dspi_dma *dma; }; @@ -703,10 +706,8 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF))) return IRQ_NONE; - if (dspi_rxtx(dspi) == 0) { - dspi->waitflags = 1; - wake_up_interruptible(&dspi->waitq); - } + if (dspi_rxtx(dspi) == 0) + complete(&dspi->xfer_done); return IRQ_HANDLED; } @@ -800,13 +801,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, status = dspi_poll(dspi); } while (status == -EINPROGRESS); } else if (trans_mode != DSPI_DMA_MODE) { - status = wait_event_interruptible(dspi->waitq, - dspi->waitflags); - dspi->waitflags = 0; + wait_for_completion(&dspi->xfer_done); + reinit_completion(&dspi->xfer_done); } - if (status) - dev_err(&dspi->pdev->dev, - "Waiting for transfer to complete failed!\n"); if (transfer->delay_usecs) udelay(transfer->delay_usecs); @@ -908,6 +905,8 @@ static int dspi_suspend(struct device *dev) struct spi_controller *ctlr = dev_get_drvdata(dev); struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); + if (dspi->irq) + disable_irq(dspi->irq); spi_controller_suspend(ctlr); clk_disable_unprepare(dspi->clk); @@ -928,6 +927,8 @@ static int dspi_resume(struct device *dev) if (ret) return ret; spi_controller_resume(ctlr); + if (dspi->irq) + enable_irq(dspi->irq); return 0; } @@ -1115,21 +1116,21 @@ static int dspi_probe(struct platform_device *pdev) goto poll_mode; } - ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, - IRQF_SHARED, pdev->name, dspi); + ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, + IRQF_SHARED, pdev->name, dspi); if (ret < 0) { dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); goto out_clk_put; } - init_waitqueue_head(&dspi->waitq); + init_completion(&dspi->xfer_done); poll_mode: if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { ret = dspi_request_dma(dspi, res->start); if (ret < 0) { dev_err(&pdev->dev, "can't get dma channels\n"); - goto out_clk_put; + goto out_free_irq; } } @@ -1141,11 +1142,14 @@ poll_mode: ret = spi_register_controller(ctlr); if (ret != 0) { dev_err(&pdev->dev, "Problem registering DSPI ctlr\n"); - goto out_clk_put; + goto out_free_irq; } return ret; +out_free_irq: + if (dspi->irq) + free_irq(dspi->irq, dspi); out_clk_put: clk_disable_unprepare(dspi->clk); out_ctlr_put: @@ -1160,13 +1164,29 @@ static int dspi_remove(struct platform_device *pdev) struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); /* Disconnect from the SPI framework */ - dspi_release_dma(dspi); - clk_disable_unprepare(dspi->clk); spi_unregister_controller(dspi->ctlr); + /* Disable RX and TX */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF); + + /* Stop Running */ + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); + + dspi_release_dma(dspi); + if (dspi->irq) + free_irq(dspi->irq, dspi); + clk_disable_unprepare(dspi->clk); + return 0; } +static void dspi_shutdown(struct platform_device *pdev) +{ + dspi_remove(pdev); +} + static struct platform_driver fsl_dspi_driver = { .driver.name = DRIVER_NAME, .driver.of_match_table = fsl_dspi_dt_ids, @@ -1174,6 +1194,7 @@ static struct platform_driver fsl_dspi_driver = { .driver.pm = &dspi_pm, .probe = dspi_probe, .remove = dspi_remove, + .shutdown = dspi_shutdown, }; module_platform_driver(fsl_dspi_driver); diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index f20326714b9d..215bf6624e7c 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -555,13 +555,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { struct fsl_espi *espi = context_data; - u32 events; + u32 events, mask; spin_lock(&espi->lock); /* Get interrupt events(tx/rx) */ events = fsl_espi_read_reg(espi, ESPI_SPIE); - if (!events) { + mask = fsl_espi_read_reg(espi, ESPI_SPIM); + if (!(events & mask)) { spin_unlock(&espi->lock); return IRQ_NONE; } diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 92e460d4f3d1..58b2da91be1c 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -207,7 +207,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller) spi_controller_get_devdata(controller); int ret; - ret = pm_runtime_get_sync(fsl_lpspi->dev); + ret = pm_runtime_resume_and_get(fsl_lpspi->dev); if (ret < 0) { dev_err(fsl_lpspi->dev, "failed to enable clock\n"); return ret; @@ -973,9 +973,6 @@ static int fsl_lpspi_remove(struct platform_device *pdev) spi_controller_get_devdata(controller); pm_runtime_disable(fsl_lpspi->dev); - - spi_master_put(controller); - return 0; } diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index be7c6ba73072..18a93a2854d8 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -717,10 +717,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) type = fsl_spi_get_type(&ofdev->dev); if (type == TYPE_FSL) { struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); + bool spisel_boot = false; #if IS_ENABLED(CONFIG_FSL_SOC) struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); - bool spisel_boot = of_property_read_bool(np, "fsl,spisel_boot"); + spisel_boot = of_property_read_bool(np, "fsl,spisel_boot"); if (spisel_boot) { pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4); if (!pinfo->immr_spi_cs) { @@ -737,10 +738,14 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) * supported on the GRLIB variant. */ ret = gpiod_count(dev, "cs"); - if (ret <= 0) + if (ret < 0) + ret = 0; + if (ret == 0 && !spisel_boot) { pdata->max_chipselect = 1; - else + } else { + pdata->max_chipselect = ret + spisel_boot; pdata->cs_control = fsl_spi_cs_control; + } } ret = of_address_to_resource(np, 0, &mem); diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index f9c5bbb74714..e7dc1fad4a87 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -350,11 +350,6 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev, return 0; } -static void spi_gpio_put(void *data) -{ - spi_master_put(data); -} - static int spi_gpio_probe(struct platform_device *pdev) { int status; @@ -366,16 +361,10 @@ static int spi_gpio_probe(struct platform_device *pdev) of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev); - master = spi_alloc_master(dev, sizeof(*spi_gpio)); + master = devm_spi_alloc_master(dev, sizeof(*spi_gpio)); if (!master) return -ENOMEM; - status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master); - if (status) { - spi_master_put(master); - return status; - } - if (of_id) status = spi_gpio_probe_dt(pdev, master); else @@ -435,7 +424,7 @@ static int spi_gpio_probe(struct platform_device *pdev) if (status) return status; - return devm_spi_register_master(&pdev->dev, spi_master_get(master)); + return devm_spi_register_master(&pdev->dev, master); } MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index f4a8f470aecc..e9ef80983b79 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -771,8 +771,10 @@ static int img_spfi_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret) + if (ret) { + pm_runtime_put_noidle(dev); return ret; + } spfi_reset(spfi); pm_runtime_put(dev); diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c index 9dfe8b04e688..f9bc1705c0d4 100644 --- a/drivers/spi/spi-lantiq-ssc.c +++ b/drivers/spi/spi-lantiq-ssc.c @@ -184,6 +184,7 @@ struct lantiq_ssc_spi { unsigned int tx_fifo_size; unsigned int rx_fifo_size; unsigned int base_cs; + unsigned int fdx_tx_level; }; static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg) @@ -481,6 +482,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) u32 data; unsigned int tx_free = tx_fifo_free(spi); + spi->fdx_tx_level = 0; while (spi->tx_todo && tx_free) { switch (spi->bits_per_word) { case 2 ... 8: @@ -509,6 +511,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) lantiq_ssc_writel(spi, data, LTQ_SPI_TB); tx_free--; + spi->fdx_tx_level++; } } @@ -520,6 +523,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi) u32 data; unsigned int rx_fill = rx_fifo_level(spi); + /* + * Wait until all expected data to be shifted in. + * Otherwise, rx overrun may occur. + */ + while (rx_fill != spi->fdx_tx_level) + rx_fill = rx_fifo_level(spi); + while (rx_fill) { data = lantiq_ssc_readl(spi, LTQ_SPI_RB); @@ -907,7 +917,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev) master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32); - spi->wq = alloc_ordered_workqueue(dev_name(dev), 0); + spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM); if (!spi->wq) { err = -ENOMEM; goto err_clk_put; diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 6f18d4952767..51633b2b6437 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -90,7 +90,7 @@ static struct spi_test spi_tests[] = { { .description = "tx/rx-transfer - crossing PAGE_SIZE", .fill_option = FILL_COUNT_8, - .iterate_len = { ITERATE_MAX_LEN }, + .iterate_len = { ITERATE_LEN }, .iterate_tx_align = ITERATE_ALIGN, .iterate_rx_align = ITERATE_ALIGN, .transfer_count = 1, diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 9f0fa9f3116d..33115bcfc787 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -108,15 +108,17 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) return 0; case 2: - if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || - (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) + if ((tx && + (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || + (!tx && + (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) return 0; break; case 4: - if ((tx && (mode & SPI_TX_QUAD)) || - (!tx && (mode & SPI_RX_QUAD))) + if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || + (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) return 0; break; @@ -235,6 +237,7 @@ static int spi_mem_access_start(struct spi_mem *mem) ret = pm_runtime_get_sync(ctlr->dev.parent); if (ret < 0) { + pm_runtime_put_noidle(ctlr->dev.parent); dev_err(&ctlr->dev, "Failed to power device: %d\n", ret); return ret; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 6888a4dcff6d..8acf24f7c5d4 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -36,7 +36,6 @@ #define SPI_CFG0_SCK_LOW_OFFSET 8 #define SPI_CFG0_CS_HOLD_OFFSET 16 #define SPI_CFG0_CS_SETUP_OFFSET 24 -#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 @@ -48,6 +47,8 @@ #define SPI_CFG1_CS_IDLE_MASK 0xff #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 +#define SPI_CFG2_SCK_HIGH_OFFSET 0 +#define SPI_CFG2_SCK_LOW_OFFSET 16 #define SPI_CMD_ACT BIT(0) #define SPI_CMD_RESUME BIT(1) @@ -279,7 +280,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable) static void mtk_spi_prepare_transfer(struct spi_master *master, struct spi_transfer *xfer) { - u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; + u32 spi_clk_hz, div, sck_time, cs_time, reg_val; struct mtk_spi *mdata = spi_master_get_devdata(master); spi_clk_hz = clk_get_rate(mdata->spi_clk); @@ -292,18 +293,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master, cs_time = sck_time * 2; if (mdata->dev_comp->enhance_timing) { + reg_val = (((sck_time - 1) & 0xffff) + << SPI_CFG2_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xffff) - << SPI_CFG0_SCK_HIGH_OFFSET); - reg_val |= (((sck_time - 1) & 0xffff) - << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); + << SPI_CFG2_SCK_LOW_OFFSET); writel(reg_val, mdata->base + SPI_CFG2_REG); - reg_val |= (((cs_time - 1) & 0xffff) + reg_val = (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); reg_val |= (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); writel(reg_val, mdata->base + SPI_CFG0_REG); } else { - reg_val |= (((sck_time - 1) & 0xff) + reg_val = (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c index 2c3b7a2a1ec7..b4b9b7309b5e 100644 --- a/drivers/spi/spi-mt7621.c +++ b/drivers/spi/spi-mt7621.c @@ -350,9 +350,10 @@ static int mt7621_spi_probe(struct platform_device *pdev) if (status) return status; - master = spi_alloc_master(&pdev->dev, sizeof(*rs)); + master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs)); if (!master) { dev_info(&pdev->dev, "master allocation failed\n"); + clk_disable_unprepare(clk); return -ENOMEM; } @@ -377,10 +378,15 @@ static int mt7621_spi_probe(struct platform_device *pdev) ret = device_reset(&pdev->dev); if (ret) { dev_err(&pdev->dev, "SPI reset failed!\n"); + clk_disable_unprepare(clk); return ret; } - return devm_spi_register_controller(&pdev->dev, master); + ret = spi_register_controller(master); + if (ret) + clk_disable_unprepare(clk); + + return ret; } static int mt7621_spi_remove(struct platform_device *pdev) @@ -391,6 +397,7 @@ static int mt7621_spi_remove(struct platform_device *pdev) master = dev_get_drvdata(&pdev->dev); rs = spi_controller_get_devdata(master); + spi_unregister_controller(master); clk_disable_unprepare(rs->clk); return 0; diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index f48563c09b97..eba706d5671e 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -528,7 +528,7 @@ static int mxic_spi_probe(struct platform_device *pdev) struct mxic_spi *mxic; int ret; - master = spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi)); + master = devm_spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi)); if (!master) return -ENOMEM; @@ -573,15 +573,9 @@ static int mxic_spi_probe(struct platform_device *pdev) ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); - goto err_put_master; + pm_runtime_disable(&pdev->dev); } - return 0; - -err_put_master: - spi_master_put(master); - pm_runtime_disable(&pdev->dev); - return ret; } diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 996c1c8a9c71..34856c9ad931 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -608,6 +608,7 @@ static int mxs_spi_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(ssp->dev); if (ret < 0) { + pm_runtime_put_noidle(ssp->dev); dev_err(ssp->dev, "runtime_get_sync failed\n"); goto out_pm_runtime_disable; } diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index cb52fd8008d0..1e770d673905 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -677,10 +677,9 @@ static int npcm_fiu_probe(struct platform_device *pdev) struct npcm_fiu_spi *fiu; void __iomem *regbase; struct resource *res; - int ret; - int id; + int id, ret; - ctrl = spi_alloc_master(dev, sizeof(*fiu)); + ctrl = devm_spi_alloc_master(dev, sizeof(*fiu)); if (!ctrl) return -ENOMEM; @@ -738,9 +737,9 @@ static int npcm_fiu_probe(struct platform_device *pdev) ret = devm_spi_register_master(dev, ctrl); if (ret) - return ret; + clk_disable_unprepare(fiu->clk); - return 0; + return ret; } static int npcm_fiu_remove(struct platform_device *pdev) diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 28ae5229f889..efd9e908e224 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -948,6 +948,7 @@ static int nxp_fspi_probe(struct platform_device *pdev) struct resource *res; struct nxp_fspi *f; int ret; + u32 reg; ctlr = spi_alloc_master(&pdev->dev, sizeof(*f)); if (!ctlr) @@ -974,6 +975,12 @@ static int nxp_fspi_probe(struct platform_device *pdev) goto err_put_ctrl; } + /* Clear potential interrupts */ + reg = fspi_readl(f, f->iobase + FSPI_INTR); + if (reg) + fspi_writel(f, reg, f->iobase + FSPI_INTR); + + /* find the resources - controller memory mapped space */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); f->ahb_addr = devm_ioremap_resource(dev, res); diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index b955ca8796d2..b8e201c09484 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c @@ -426,7 +426,7 @@ err: static int omap1_spi100k_remove(struct platform_device *pdev) { - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct spi_master *master = platform_get_drvdata(pdev); struct omap1_spi100k *spi100k = spi_master_get_devdata(master); pm_runtime_disable(&pdev->dev); @@ -440,7 +440,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int omap1_spi100k_runtime_suspend(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct omap1_spi100k *spi100k = spi_master_get_devdata(master); clk_disable_unprepare(spi100k->ick); @@ -451,7 +451,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev) static int omap1_spi100k_runtime_resume(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct omap1_spi100k *spi100k = spi_master_get_devdata(master); int ret; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 4433cb4de564..7646b4b56bed 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -24,7 +24,6 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/gcd.h> -#include <linux/iopoll.h> #include <linux/spi/spi.h> #include <linux/gpio.h> @@ -348,9 +347,19 @@ disable_fifo: static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) { - u32 val; + unsigned long timeout; - return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC); + timeout = jiffies + msecs_to_jiffies(1000); + while (!(readl_relaxed(reg) & bit)) { + if (time_after(jiffies, timeout)) { + if (!(readl_relaxed(reg) & bit)) + return -ETIMEDOUT; + else + return 0; + } + cpu_relax(); + } + return 0; } static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi, diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c index 69f517ec59c6..8272bde5d706 100644 --- a/drivers/spi/spi-pic32.c +++ b/drivers/spi/spi-pic32.c @@ -825,6 +825,7 @@ static int pic32_spi_probe(struct platform_device *pdev) return 0; err_bailout: + pic32_spi_dma_unprep(pic32s); clk_disable_unprepare(pic32s->clk); err_master: spi_master_put(master); diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index f236e3034cf8..aafac128bb5f 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -21,7 +21,8 @@ enum { PORT_BSW1, PORT_BSW2, PORT_CE4100, - PORT_LPT, + PORT_LPT0, + PORT_LPT1, }; struct pxa_spi_info { @@ -57,8 +58,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 }; static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 }; static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 }; -static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 }; -static struct dw_dma_slave lpt_rx_param = { .src_id = 1 }; +static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 }; +static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 }; +static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 }; +static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 }; static bool lpss_dma_filter(struct dma_chan *chan, void *param) { @@ -185,12 +188,19 @@ static struct pxa_spi_info spi_info_configs[] = { .num_chipselect = 1, .max_clk_rate = 50000000, }, - [PORT_LPT] = { + [PORT_LPT0] = { .type = LPSS_LPT_SSP, .port_id = 0, .setup = lpss_spi_setup, - .tx_param = &lpt_tx_param, - .rx_param = &lpt_rx_param, + .tx_param = &lpt0_tx_param, + .rx_param = &lpt0_rx_param, + }, + [PORT_LPT1] = { + .type = LPSS_LPT_SSP, + .port_id = 1, + .setup = lpss_spi_setup, + .tx_param = &lpt1_tx_param, + .rx_param = &lpt1_rx_param, }, }; @@ -285,8 +295,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = { { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 }, { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 }, { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 }, - { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT }, - { }, + { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 }, + { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 }, + { } }; MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 723145673206..f5a10a94f156 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -148,6 +148,7 @@ static const struct lpss_config lpss_platforms[] = { .tx_threshold_hi = 48, .cs_sel_shift = 8, .cs_sel_mask = 3 << 8, + .cs_clk_stays_gated = true, }, { /* LPSS_CNL_SSP */ .offset = 0x200, @@ -1674,9 +1675,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } if (platform_info->is_slave) - controller = spi_alloc_slave(dev, sizeof(struct driver_data)); + controller = devm_spi_alloc_slave(dev, sizeof(*drv_data)); else - controller = spi_alloc_master(dev, sizeof(struct driver_data)); + controller = devm_spi_alloc_master(dev, sizeof(*drv_data)); if (!controller) { dev_err(&pdev->dev, "cannot alloc spi_controller\n"); @@ -1880,7 +1881,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); - status = devm_spi_register_controller(&pdev->dev, controller); + status = spi_register_controller(controller); if (status != 0) { dev_err(&pdev->dev, "problem registering spi controller\n"); goto out_error_pm_runtime_enabled; @@ -1889,7 +1890,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) return status; out_error_pm_runtime_enabled: - pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); out_error_clock_enabled: @@ -1900,7 +1900,6 @@ out_error_dma_irq_alloc: free_irq(ssp->irq, drv_data); out_error_controller_alloc: - spi_controller_put(controller); pxa_ssp_free(ssp); return status; } @@ -1916,6 +1915,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); + spi_unregister_controller(drv_data->controller); + /* Disable the SSP at the peripheral and SOC level */ pxa2xx_spi_write(drv_data, SSCR0, 0); clk_disable_unprepare(ssp->clk); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index fa8079fbea77..d1dfb52008b4 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev) struct spi_qup *controller = spi_master_get_devdata(master); int ret; - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) return ret; diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c index 4c9620e0d18c..1ad3f11a22b2 100644 --- a/drivers/spi/spi-rb4xx.c +++ b/drivers/spi/spi-rb4xx.c @@ -142,7 +142,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev) if (IS_ERR(spi_base)) return PTR_ERR(spi_base); - master = spi_alloc_master(&pdev->dev, sizeof(*rbspi)); + master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi)); if (!master) return -ENOMEM; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 2cc6d9951b52..008b64f4e031 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -286,7 +286,7 @@ static void rockchip_spi_pio_writer(struct rockchip_spi *rs) static void rockchip_spi_pio_reader(struct rockchip_spi *rs) { u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR); - u32 rx_left = rs->rx_left - words; + u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0; /* the hardware doesn't allow us to change fifo threshold * level while spi is enabled, so instead make sure to leave diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S index e95d6282109e..68ea12bead22 100644 --- a/drivers/spi/spi-s3c24xx-fiq.S +++ b/drivers/spi/spi-s3c24xx-fiq.S @@ -33,7 +33,6 @@ @ and an offset to the irq acknowledgment word ENTRY(s3c24xx_spi_fiq_rx) -s3c24xx_spi_fix_rx: .word fiq_rx_end - fiq_rx_start .word fiq_rx_irq_ack - fiq_rx_start fiq_rx_start: @@ -47,7 +46,7 @@ fiq_rx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do + subsne pc, lr, #4 @@ return, still have work to do @@ set IRQ controller so that next op will trigger IRQ mov fiq_rtmp, #0 @@ -59,7 +58,6 @@ fiq_rx_irq_ack: fiq_rx_end: ENTRY(s3c24xx_spi_fiq_txrx) -s3c24xx_spi_fiq_txrx: .word fiq_txrx_end - fiq_txrx_start .word fiq_txrx_irq_ack - fiq_txrx_start fiq_txrx_start: @@ -74,7 +72,7 @@ fiq_txrx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do + subsne pc, lr, #4 @@ return, still have work to do mov fiq_rtmp, #0 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] @@ -86,7 +84,6 @@ fiq_txrx_irq_ack: fiq_txrx_end: ENTRY(s3c24xx_spi_fiq_tx) -s3c24xx_spi_fix_tx: .word fiq_tx_end - fiq_tx_start .word fiq_tx_irq_ack - fiq_tx_start fiq_tx_start: @@ -99,7 +96,7 @@ fiq_tx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do + subsne pc, lr, #4 @@ return, still have work to do mov fiq_rtmp, #0 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 7b7151ec14c8..1d948fee1a03 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -122,6 +122,7 @@ struct s3c64xx_spi_dma_data { struct dma_chan *ch; + dma_cookie_t cookie; enum dma_transfer_direction direction; }; @@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data) spin_unlock_irqrestore(&sdd->lock, flags); } -static void prepare_dma(struct s3c64xx_spi_dma_data *dma, +static int prepare_dma(struct s3c64xx_spi_dma_data *dma, struct sg_table *sgt) { struct s3c64xx_spi_driver_data *sdd; struct dma_slave_config config; struct dma_async_tx_descriptor *desc; + int ret; memset(&config, 0, sizeof(config)); @@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, dma->direction, DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist", + dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx"); + return -ENOMEM; + } desc->callback = s3c64xx_spi_dmacb; desc->callback_param = dma; - dmaengine_submit(desc); + dma->cookie = dmaengine_submit(desc); + ret = dma_submit_error(dma->cookie); + if (ret) { + dev_err(&sdd->pdev->dev, "DMA submission failed"); + return -EIO; + } + dma_async_issue_pending(dma->ch); + return 0; } static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) @@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master, return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; } -static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, +static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, struct spi_transfer *xfer, int dma_mode) { void __iomem *regs = sdd->regs; u32 modecfg, chcfg; + int ret = 0; modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); @@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; - prepare_dma(&sdd->tx_dma, &xfer->tx_sg); + ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg); } else { switch (sdd->cur_bpw) { case 32: @@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); - prepare_dma(&sdd->rx_dma, &xfer->rx_sg); + ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg); } } + if (ret) + return ret; + writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); writel(chcfg, regs + S3C64XX_SPI_CH_CFG); + + return 0; } static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, @@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd, return 0; } -static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) +static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) { void __iomem *regs = sdd->regs; + int ret; u32 val; /* Disable Clock */ @@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) if (sdd->port_conf->clk_from_cmu) { /* The src_clk clock is divided internally by 2 */ - clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + if (ret) + return ret; } else { /* Configure Clock */ val = readl(regs + S3C64XX_SPI_CLK_CFG); @@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) val |= S3C64XX_SPI_ENCLK_ENABLE; writel(val, regs + S3C64XX_SPI_CLK_CFG); } + + return 0; } #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) @@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, sdd->cur_bpw = bpw; sdd->cur_speed = speed; sdd->cur_mode = spi->mode; - s3c64xx_spi_config(sdd); + status = s3c64xx_spi_config(sdd); + if (status) + return status; } if (!is_polling(sdd) && (xfer->len > fifo_len) && @@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, sdd->state &= ~RXBUSY; sdd->state &= ~TXBUSY; - s3c64xx_enable_datapath(sdd, xfer, use_dma); - /* Start the signals */ s3c64xx_spi_set_cs(spi, true); + status = s3c64xx_enable_datapath(sdd, xfer, use_dma); + spin_unlock_irqrestore(&sdd->lock, flags); + if (status) { + dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status); + break; + } + if (use_dma) status = s3c64xx_wait_for_dma(sdd, xfer); else diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index 11acddc83304..ced365efc5f0 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c @@ -239,13 +239,12 @@ static int sc18is602_probe(struct i2c_client *client, struct sc18is602_platform_data *pdata = dev_get_platdata(dev); struct sc18is602 *hw; struct spi_master *master; - int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) return -EINVAL; - master = spi_alloc_master(dev, sizeof(struct sc18is602)); + master = devm_spi_alloc_master(dev, sizeof(struct sc18is602)); if (!master) return -ENOMEM; @@ -299,15 +298,7 @@ static int sc18is602_probe(struct i2c_client *client, master->min_speed_hz = hw->freq / 128; master->max_speed_hz = hw->freq / 4; - error = devm_spi_register_master(dev, master); - if (error) - goto error_reg; - - return 0; - -error_reg: - spi_master_put(master); - return error; + return devm_spi_register_master(dev, master); } static const struct i2c_device_id sc18is602_id[] = { diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c index 20bdae5fdf3b..15123a8f41e1 100644 --- a/drivers/spi/spi-sh.c +++ b/drivers/spi/spi-sh.c @@ -440,7 +440,7 @@ static int spi_sh_probe(struct platform_device *pdev) if (irq < 0) return irq; - master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); + master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); if (master == NULL) { dev_err(&pdev->dev, "spi_alloc_master error.\n"); return -ENOMEM; @@ -458,16 +458,14 @@ static int spi_sh_probe(struct platform_device *pdev) break; default: dev_err(&pdev->dev, "No support width\n"); - ret = -ENODEV; - goto error1; + return -ENODEV; } ss->irq = irq; ss->master = master; ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (ss->addr == NULL) { dev_err(&pdev->dev, "ioremap error.\n"); - ret = -ENOMEM; - goto error1; + return -ENOMEM; } INIT_LIST_HEAD(&ss->queue); spin_lock_init(&ss->lock); @@ -477,7 +475,7 @@ static int spi_sh_probe(struct platform_device *pdev) ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); if (ret < 0) { dev_err(&pdev->dev, "request_irq error\n"); - goto error1; + return ret; } master->num_chipselect = 2; @@ -496,9 +494,6 @@ static int spi_sh_probe(struct platform_device *pdev) error3: free_irq(irq, ss); - error1: - spi_master_put(master); - return ret; } diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index 9613cfe3c0a2..09f983524d51 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -384,9 +384,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this, sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY); /* Load the watchdog timeout value, 50ms is always enough. */ + sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW, WDG_LOAD_VAL & WDG_LOAD_MASK); - sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); /* Start the watchdog to reset system */ sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val); diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index fa597e27be17..c2bdf19ccdd2 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -563,11 +563,11 @@ static int sprd_spi_dma_request(struct sprd_spi *ss) ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn"); if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) { + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER) return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); dev_err(ss->dev, "request TX DMA channel failed!\n"); - dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); } @@ -1008,6 +1008,7 @@ static int sprd_spi_remove(struct platform_device *pdev) ret = pm_runtime_get_sync(ss->dev); if (ret < 0) { + pm_runtime_put_noidle(ss->dev); dev_err(ss->dev, "failed to resume SPI controller\n"); return ret; } diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 77d26d64541a..6c44dda9ee8c 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -375,13 +375,14 @@ static int spi_st_probe(struct platform_device *pdev) ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "Failed to register master\n"); - goto clk_disable; + goto rpm_disable; } return 0; -clk_disable: +rpm_disable: pm_runtime_disable(&pdev->dev); +clk_disable: clk_disable_unprepare(spi_st->clk); put_master: spi_master_put(master); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index b222ce8d083e..3af6a5a3a4b2 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -14,6 +14,7 @@ #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/spi/spi.h> @@ -442,7 +443,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, { u32 div, mbrdiv; - div = DIV_ROUND_UP(spi->clk_rate, speed_hz); + /* Ensure spi->clk_rate is even */ + div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if @@ -468,26 +470,36 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, /** * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level * @spi: pointer to the spi controller data structure + * @xfer_len: length of the message to be transferred */ -static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len) { - u32 fthlv, half_fifo; + u32 fthlv, half_fifo, packet; /* data packet should not exceed 1/2 of fifo space */ half_fifo = (spi->fifo_size / 2); - if (spi->cur_bpw <= 8) - fthlv = half_fifo; - else if (spi->cur_bpw <= 16) - fthlv = half_fifo / 2; + /* data_packet should not exceed transfer length */ + if (half_fifo > xfer_len) + packet = xfer_len; else - fthlv = half_fifo / 4; + packet = half_fifo; + + if (spi->cur_bpw <= 8) + fthlv = packet; + else if (spi->cur_bpw <= 16) + fthlv = packet / 2; + else + fthlv = packet / 4; /* align packet size with data registers access */ if (spi->cur_bpw > 8) - fthlv -= (fthlv % 2); /* multiple of 2 */ + fthlv += (fthlv % 2) ? 1 : 0; else - fthlv -= (fthlv % 4); /* multiple of 4 */ + fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0; + + if (!fthlv) + fthlv = 1; return fthlv; } @@ -912,14 +924,18 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) mask |= STM32H7_SPI_SR_RXP; if (!(sr & mask)) { - dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", - sr, ier); + dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", + sr, ier); spin_unlock_irqrestore(&spi->lock, flags); return IRQ_NONE; } if (sr & STM32H7_SPI_SR_SUSP) { - dev_warn(spi->dev, "Communication suspended\n"); + static DEFINE_RATELIMIT_STATE(rs, + DEFAULT_RATELIMIT_INTERVAL * 10, + 1); + if (__ratelimit(&rs)) + dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi, false); /* @@ -936,15 +952,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } if (sr & STM32H7_SPI_SR_OVR) { - dev_warn(spi->dev, "Overrun: received value discarded\n"); - if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32h7_spi_read_rxfifo(spi, false); - /* - * If overrun is detected while using DMA, it means that - * something went wrong, so stop the current transfer - */ - if (spi->cur_usedma) - end = true; + dev_err(spi->dev, "Overrun: RX data lost\n"); + end = true; } if (sr & STM32H7_SPI_SR_EOT) { @@ -961,13 +970,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi, false); - writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR); + writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); if (end) { - spi_finalize_current_transfer(master); stm32h7_spi_disable(spi); + spi_finalize_current_transfer(master); } return IRQ_HANDLED; @@ -1395,7 +1404,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi) cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) & STM32H7_SPI_CFG1_DSIZE; - spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi); + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen); fthlv = spi->cur_fthlv - 1; cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV; @@ -1578,39 +1587,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, unsigned long flags; unsigned int comm_type; int nb_words, ret = 0; + int mbr; spin_lock_irqsave(&spi->lock, flags); - if (spi->cur_bpw != transfer->bits_per_word) { - spi->cur_bpw = transfer->bits_per_word; - spi->cfg->set_bpw(spi); + spi->cur_xferlen = transfer->len; + + spi->cur_bpw = transfer->bits_per_word; + spi->cfg->set_bpw(spi); + + /* Update spi->cur_speed with real clock speed */ + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, + spi->cfg->baud_rate_div_min, + spi->cfg->baud_rate_div_max); + if (mbr < 0) { + ret = mbr; + goto out; } - if (spi->cur_speed != transfer->speed_hz) { - int mbr; - - /* Update spi->cur_speed with real clock speed */ - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, - spi->cfg->baud_rate_div_min, - spi->cfg->baud_rate_div_max); - if (mbr < 0) { - ret = mbr; - goto out; - } - - transfer->speed_hz = spi->cur_speed; - stm32_spi_set_mbr(spi, mbr); - } + transfer->speed_hz = spi->cur_speed; + stm32_spi_set_mbr(spi, mbr); comm_type = stm32_spi_communication_type(spi_dev, transfer); - if (spi->cur_comm != comm_type) { - ret = spi->cfg->set_mode(spi, comm_type); + ret = spi->cfg->set_mode(spi, comm_type); + if (ret < 0) + goto out; - if (ret < 0) - goto out; - - spi->cur_comm = comm_type; - } + spi->cur_comm = comm_type; if (spi->cfg->set_data_idleness) spi->cfg->set_data_idleness(spi, transfer->len); @@ -1628,8 +1631,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, goto out; } - spi->cur_xferlen = transfer->len; - dev_dbg(spi->dev, "transfer communication mode set to %d\n", spi->cur_comm); dev_dbg(spi->dev, @@ -1660,6 +1661,10 @@ static int stm32_spi_transfer_one(struct spi_master *master, struct stm32_spi *spi = spi_master_get_devdata(master); int ret; + /* Don't do anything on 0 bytes transfers */ + if (transfer->len == 0) + return 0; + spi->tx_buf = transfer->tx_buf; spi->rx_buf = transfer->rx_buf; spi->tx_len = spi->tx_buf ? transfer->len : 0; @@ -1921,7 +1926,7 @@ static int stm32_spi_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - ret = devm_spi_register_master(&pdev->dev, master); + ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi master registration failed: %d\n", ret); @@ -1975,6 +1980,7 @@ static int stm32_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct stm32_spi *spi = spi_master_get_devdata(master); + spi_unregister_master(master); spi->cfg->disable(spi); if (master->dma_tx) @@ -1986,6 +1992,8 @@ static int stm32_spi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); + pinctrl_pm_select_sleep_state(&pdev->dev); + return 0; } @@ -1997,13 +2005,18 @@ static int stm32_spi_runtime_suspend(struct device *dev) clk_disable_unprepare(spi->clk); - return 0; + return pinctrl_pm_select_sleep_state(dev); } static int stm32_spi_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct stm32_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; return clk_prepare_enable(spi->clk); } @@ -2033,10 +2046,24 @@ static int stm32_spi_resume(struct device *dev) return ret; ret = spi_master_resume(master); - if (ret) + if (ret) { clk_disable_unprepare(spi->clk); + return ret; + } - return ret; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + dev_err(dev, "Unable to power device:%d\n", ret); + return ret; + } + + spi->cfg->config(spi); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; } #endif diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index ec7967be9e2f..956df79035d5 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -198,7 +198,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, struct spi_transfer *tfr) { struct sun6i_spi *sspi = spi_master_get_devdata(master); - unsigned int mclk_rate, div, timeout; + unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout; unsigned int start, end, tx_time; unsigned int trig_level; unsigned int tx_len = 0; @@ -287,14 +287,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master, * First try CDR2, and if we can't reach the expected * frequency, fall back to CDR1. */ - div = mclk_rate / (2 * tfr->speed_hz); - if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { - if (div > 0) - div--; - - reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS; + div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz); + div_cdr2 = DIV_ROUND_UP(div_cdr1, 2); + if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { + reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS; } else { - div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); + div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1)); reg = SUN6I_CLK_CTL_CDR1(div); } diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c index ae17c99cce03..785e7c445123 100644 --- a/drivers/spi/spi-synquacer.c +++ b/drivers/spi/spi-synquacer.c @@ -490,6 +490,10 @@ static void synquacer_spi_set_cs(struct spi_device *spi, bool enable) val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT); val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT; + + if (!enable) + val |= SYNQUACER_HSSPI_DMSTOP_STOP; + writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); } @@ -658,7 +662,8 @@ static int synquacer_spi_probe(struct platform_device *pdev) if (!master->max_speed_hz) { dev_err(&pdev->dev, "missing clock source\n"); - return -EINVAL; + ret = -EINVAL; + goto disable_clk; } master->min_speed_hz = master->max_speed_hz / 254; @@ -671,7 +676,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) rx_irq = platform_get_irq(pdev, 0); if (rx_irq <= 0) { ret = rx_irq; - goto put_spi; + goto disable_clk; } snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx", dev_name(&pdev->dev)); @@ -679,13 +684,13 @@ static int synquacer_spi_probe(struct platform_device *pdev) 0, sspi->rx_irq_name, sspi); if (ret) { dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret); - goto put_spi; + goto disable_clk; } tx_irq = platform_get_irq(pdev, 1); if (tx_irq <= 0) { ret = tx_irq; - goto put_spi; + goto disable_clk; } snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx", dev_name(&pdev->dev)); @@ -693,7 +698,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) 0, sspi->tx_irq_name, sspi); if (ret) { dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret); - goto put_spi; + goto disable_clk; } master->dev.of_node = np; @@ -711,7 +716,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) ret = synquacer_spi_enable(master); if (ret) - goto fail_enable; + goto disable_clk; pm_runtime_set_active(sspi->dev); pm_runtime_enable(sspi->dev); @@ -724,7 +729,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) disable_pm: pm_runtime_disable(sspi->dev); -fail_enable: +disable_clk: clk_disable_unprepare(sspi->clk); put_spi: spi_master_put(master); diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 39374c2edcf3..594905bf89aa 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -954,6 +954,7 @@ static int tegra_spi_setup(struct spi_device *spi) ret = pm_runtime_get_sync(tspi->dev); if (ret < 0) { + pm_runtime_put_noidle(tspi->dev); dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); if (cdata) tegra_spi_cleanup(spi); @@ -1472,6 +1473,7 @@ static int tegra_spi_resume(struct device *dev) ret = pm_runtime_get_sync(dev); if (ret < 0) { + pm_runtime_put_noidle(dev); dev_err(dev, "pm runtime failed, e = %d\n", ret); return ret; } diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index a841a7250d14..ecb620169753 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -551,6 +551,7 @@ static int tegra_sflash_resume(struct device *dev) ret = pm_runtime_get_sync(dev); if (ret < 0) { + pm_runtime_put_noidle(dev); dev_err(dev, "pm runtime failed, e = %d\n", ret); return ret; } diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 374a2a32edcd..2a1905c43a0b 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -756,6 +756,7 @@ static int tegra_slink_setup(struct spi_device *spi) ret = pm_runtime_get_sync(tspi->dev); if (ret < 0) { + pm_runtime_put_noidle(tspi->dev); dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); return ret; } @@ -1192,6 +1193,7 @@ static int tegra_slink_resume(struct device *dev) ret = pm_runtime_get_sync(dev); if (ret < 0) { + pm_runtime_put_noidle(dev); dev_err(dev, "pm runtime failed, e = %d\n", ret); return ret; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 66dcb6128539..6b6ef8944283 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -176,6 +176,7 @@ static int ti_qspi_setup(struct spi_device *spi) ret = pm_runtime_get_sync(qspi->dev); if (ret < 0) { + pm_runtime_put_noidle(qspi->dev); dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); return ret; } @@ -655,6 +656,17 @@ static int ti_qspi_runtime_resume(struct device *dev) return 0; } +static void ti_qspi_dma_cleanup(struct ti_qspi *qspi) +{ + if (qspi->rx_bb_addr) + dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE, + qspi->rx_bb_addr, + qspi->rx_bb_dma_addr); + + if (qspi->rx_chan) + dma_release_channel(qspi->rx_chan); +} + static const struct of_device_id ti_qspi_match[] = { {.compatible = "ti,dra7xxx-qspi" }, {.compatible = "ti,am4372-qspi" }, @@ -808,6 +820,8 @@ no_dma: if (!ret) return 0; + ti_qspi_dma_cleanup(qspi); + pm_runtime_disable(&pdev->dev); free_master: spi_master_put(master); @@ -826,12 +840,7 @@ static int ti_qspi_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - if (qspi->rx_bb_addr) - dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE, - qspi->rx_bb_addr, - qspi->rx_bb_dma_addr); - if (qspi->rx_chan) - dma_release_channel(qspi->rx_chan); + ti_qspi_dma_cleanup(qspi); return 0; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c186d3a944cd..7592d4de20c9 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -405,9 +405,11 @@ static int spi_drv_probe(struct device *dev) if (ret) return ret; - ret = sdrv->probe(spi); - if (ret) - dev_pm_domain_detach(dev, true); + if (sdrv->probe) { + ret = sdrv->probe(spi); + if (ret) + dev_pm_domain_detach(dev, true); + } return ret; } @@ -415,9 +417,10 @@ static int spi_drv_probe(struct device *dev) static int spi_drv_remove(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); - int ret; + int ret = 0; - ret = sdrv->remove(to_spi_device(dev)); + if (sdrv->remove) + ret = sdrv->remove(to_spi_device(dev)); dev_pm_domain_detach(dev, true); return ret; @@ -442,10 +445,8 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) { sdrv->driver.owner = owner; sdrv->driver.bus = &spi_bus_type; - if (sdrv->probe) - sdrv->driver.probe = spi_drv_probe; - if (sdrv->remove) - sdrv->driver.remove = spi_drv_remove; + sdrv->driver.probe = spi_drv_probe; + sdrv->driver.remove = spi_drv_remove; if (sdrv->shutdown) sdrv->driver.shutdown = spi_drv_shutdown; return driver_register(&sdrv->driver); @@ -475,6 +476,12 @@ static LIST_HEAD(spi_controller_list); */ static DEFINE_MUTEX(board_lock); +/* + * Prevents addition of devices with same chip select and + * addition of devices below an unregistering controller. + */ +static DEFINE_MUTEX(spi_add_lock); + /** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected @@ -553,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { - static DEFINE_MUTEX(spi_add_lock); struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status; @@ -581,6 +587,13 @@ int spi_add_device(struct spi_device *spi) goto done; } + /* Controller may unregister concurrently */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && + !device_is_registered(&ctlr->dev)) { + status = -ENODEV; + goto done; + } + /* Descriptors take precedence */ if (ctlr->cs_gpiods) spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; @@ -1229,8 +1242,6 @@ out: if (msg->status && ctlr->handle_err) ctlr->handle_err(ctlr, msg); - spi_res_release(ctlr, msg); - spi_finalize_current_message(ctlr); return ret; @@ -1513,6 +1524,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spi_unmap_msg(ctlr, mesg); + /* In the prepare_messages callback the spi bus has the opportunity to + * split a transfer to smaller chunks. + * Release splited transfers here since spi_map_msg is done on the + * splited transfers. + */ + spi_res_release(ctlr, mesg); + if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { @@ -1950,6 +1968,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) } lookup->max_speed_hz = sb->connection_speed; + lookup->bits_per_word = sb->data_bit_length; if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) lookup->mode |= SPI_CPHA; @@ -2239,6 +2258,50 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, } EXPORT_SYMBOL_GPL(__spi_alloc_controller); +static void devm_spi_release_controller(struct device *dev, void *ctlr) +{ + spi_controller_put(*(struct spi_controller **)ctlr); +} + +/** + * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() + * @dev: physical device of SPI controller + * @size: how much zeroed driver-private data to allocate + * @slave: whether to allocate an SPI master (false) or SPI slave (true) + * Context: can sleep + * + * Allocate an SPI controller and automatically release a reference on it + * when @dev is unbound from its driver. Drivers are thus relieved from + * having to call spi_controller_put(). + * + * The arguments to this function are identical to __spi_alloc_controller(). + * + * Return: the SPI controller structure on success, else NULL. + */ +struct spi_controller *__devm_spi_alloc_controller(struct device *dev, + unsigned int size, + bool slave) +{ + struct spi_controller **ptr, *ctlr; + + ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return NULL; + + ctlr = __spi_alloc_controller(dev, size, slave); + if (ctlr) { + ctlr->devm_allocated = true; + *ptr = ctlr; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return ctlr; +} +EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); + #ifdef CONFIG_OF static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) { @@ -2581,6 +2644,12 @@ void spi_unregister_controller(struct spi_controller *ctlr) struct spi_controller *found; int id = ctlr->bus_num; + /* Prevent addition of new devices, unregister existing ones */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_lock(&spi_add_lock); + + device_for_each_child(&ctlr->dev, NULL, __unregister); + /* First make sure that this controller was ever added */ mutex_lock(&board_lock); found = idr_find(&spi_master_idr, id); @@ -2593,13 +2662,22 @@ void spi_unregister_controller(struct spi_controller *ctlr) list_del(&ctlr->list); mutex_unlock(&board_lock); - device_for_each_child(&ctlr->dev, NULL, __unregister); - device_unregister(&ctlr->dev); + device_del(&ctlr->dev); + + /* Release the last reference on the controller if its driver + * has not yet been converted to devm_spi_alloc_master/slave(). + */ + if (!ctlr->devm_allocated) + put_device(&ctlr->dev); + /* free bus id */ mutex_lock(&board_lock); if (found == ctlr) idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); + + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_unlock(&spi_add_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index ab2c3848f5bf..be503a0e6ef7 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -223,6 +223,11 @@ static int spidev_message(struct spidev_data *spidev, for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; n--, k_tmp++, u_tmp++) { + /* Ensure that also following allocations from rx_buf/tx_buf will meet + * DMA alignment requirements. + */ + unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN); + k_tmp->len = u_tmp->len; total += k_tmp->len; @@ -238,17 +243,17 @@ static int spidev_message(struct spidev_data *spidev, if (u_tmp->rx_buf) { /* this transfer needs space in RX bounce buffer */ - rx_total += k_tmp->len; + rx_total += len_aligned; if (rx_total > bufsiz) { status = -EMSGSIZE; goto done; } k_tmp->rx_buf = rx_buf; - rx_buf += k_tmp->len; + rx_buf += len_aligned; } if (u_tmp->tx_buf) { /* this transfer needs space in TX bounce buffer */ - tx_total += k_tmp->len; + tx_total += len_aligned; if (tx_total > bufsiz) { status = -EMSGSIZE; goto done; @@ -258,7 +263,7 @@ static int spidev_message(struct spidev_data *spidev, (uintptr_t) u_tmp->tx_buf, u_tmp->len)) goto done; - tx_buf += k_tmp->len; + tx_buf += len_aligned; } k_tmp->cs_change = !!u_tmp->cs_change; @@ -290,16 +295,16 @@ static int spidev_message(struct spidev_data *spidev, goto done; /* copy any rx data out of bounce buffer */ - rx_buf = spidev->rx_buffer; - for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; + n; + n--, k_tmp++, u_tmp++) { if (u_tmp->rx_buf) { if (copy_to_user((u8 __user *) - (uintptr_t) u_tmp->rx_buf, rx_buf, + (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf, u_tmp->len)) { status = -EFAULT; goto done; } - rx_buf += u_tmp->len; } } status = total; @@ -605,15 +610,20 @@ err_find_dev: static int spidev_release(struct inode *inode, struct file *filp) { struct spidev_data *spidev; + int dofree; mutex_lock(&device_list_lock); spidev = filp->private_data; filp->private_data = NULL; + spin_lock_irq(&spidev->spi_lock); + /* ... after we unbound from the underlying device? */ + dofree = (spidev->spi == NULL); + spin_unlock_irq(&spidev->spi_lock); + /* last close? */ spidev->users--; if (!spidev->users) { - int dofree; kfree(spidev->tx_buffer); spidev->tx_buffer = NULL; @@ -621,19 +631,14 @@ static int spidev_release(struct inode *inode, struct file *filp) kfree(spidev->rx_buffer); spidev->rx_buffer = NULL; - spin_lock_irq(&spidev->spi_lock); - if (spidev->spi) - spidev->speed_hz = spidev->spi->max_speed_hz; - - /* ... after we unbound from the underlying device? */ - dofree = (spidev->spi == NULL); - spin_unlock_irq(&spidev->spi_lock); - if (dofree) kfree(spidev); + else + spidev->speed_hz = spidev->spi->max_speed_hz; } #ifdef CONFIG_SPI_SLAVE - spi_slave_abort(spidev->spi); + if (!dofree) + spi_slave_abort(spidev->spi); #endif mutex_unlock(&device_list_lock); @@ -783,13 +788,13 @@ static int spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); + /* prevent new opens */ + mutex_lock(&device_list_lock); /* make sure ops on existing fds can abort cleanly */ spin_lock_irq(&spidev->spi_lock); spidev->spi = NULL; spin_unlock_irq(&spidev->spi_lock); - /* prevent new opens */ - mutex_lock(&device_list_lock); list_del(&spidev->device_entry); device_destroy(spidev_class, spidev->devt); clear_bit(MINOR(spidev->devt), minors); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index de844b412110..bbbd311eda03 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved. */ #include <linux/bitmap.h> #include <linux/delay.h> @@ -505,8 +505,7 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id) static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid) { unsigned int irq; - u32 status; - int id; + u32 status, id; u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF; u8 per = pmic_arb->apid_data[apid].ppid & 0xFF; diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index c6695354b123..19b0cc5ea33f 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -95,6 +95,15 @@ static DEFINE_MUTEX(ashmem_mutex); static struct kmem_cache *ashmem_area_cachep __read_mostly; static struct kmem_cache *ashmem_range_cachep __read_mostly; +/* + * A separate lockdep class for the backing shmem inodes to resolve the lockdep + * warning about the race between kswapd taking fs_reclaim before inode_lock + * and write syscall taking inode_lock and then fs_reclaim. + * Note that such race is impossible because ashmem does not support write + * syscalls operating on the backing shmem. + */ +static struct lock_class_key backing_shmem_inode_class; + static inline unsigned long range_size(struct ashmem_range *range) { return range->pgend - range->pgstart + 1; @@ -396,6 +405,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) if (!asma->file) { char *name = ASHMEM_NAME_DEF; struct file *vmfile; + struct inode *inode; if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') name = asma->name; @@ -407,6 +417,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) goto out; } vmfile->f_mode |= FMODE_LSEEK; + inode = file_inode(vmfile); + lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class); asma->file = vmfile; /* * override mmap operation of the vmfile so that it can't be diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 473b465724f1..0755b11348ed 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -99,12 +99,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) { - void *addr = vm_map_ram(pages, num, -1, pgprot); + void *addr = vmap(pages, num, VM_MAP, pgprot); if (!addr) return -ENOMEM; memset(addr, 0, PAGE_SIZE * num); - vm_unmap_ram(addr, num); + vunmap(addr); return 0; } diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 08d1bbbebf2d..e84b4fb493d6 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2725,8 +2725,10 @@ static int comedi_open(struct inode *inode, struct file *file) } cfp = kzalloc(sizeof(*cfp), GFP_KERNEL); - if (!cfp) + if (!cfp) { + comedi_dev_put(dev); return -ENOMEM; + } cfp->dev = dev; diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 560649be9d13..2176d3289eff 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1032_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | @@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; @@ -252,6 +260,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) struct apci1032_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; unsigned int ctrl; + unsigned short val; /* check interrupt is from this device */ if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) & @@ -267,7 +276,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG); s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff; - comedi_buf_write_samples(s, &s->state, 1); + val = s->state; + comedi_buf_write_samples(s, &val, 1); comedi_handle_events(dev, s); /* enable the interrupt */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 45ad4ba92f94..8c3eff7cf465 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -208,7 +208,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) struct comedi_device *dev = d; struct apci1500_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; - unsigned int status = 0; + unsigned short status = 0; unsigned int val; val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR); @@ -238,14 +238,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) * * Mask Meaning * ---------- ------------------------------------------ - * 0x00000001 Event 1 has occurred - * 0x00000010 Event 2 has occurred - * 0x00000100 Counter/timer 1 has run down (not implemented) - * 0x00001000 Counter/timer 2 has run down (not implemented) - * 0x00010000 Counter 3 has run down (not implemented) - * 0x00100000 Watchdog has run down (not implemented) - * 0x01000000 Voltage error - * 0x10000000 Short-circuit error + * 0b00000001 Event 1 has occurred + * 0b00000010 Event 2 has occurred + * 0b00000100 Counter/timer 1 has run down (not implemented) + * 0b00001000 Counter/timer 2 has run down (not implemented) + * 0b00010000 Counter 3 has run down (not implemented) + * 0b00100000 Watchdog has run down (not implemented) + * 0b01000000 Voltage error + * 0b10000000 Short-circuit error */ comedi_buf_write_samples(s, &status, 1); comedi_handle_events(dev, s); @@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, struct apci1500_private *devpriv = dev->private; unsigned int trig = data[1]; unsigned int shift = data[3]; - unsigned int hi_mask = data[4] << shift; - unsigned int lo_mask = data[5] << shift; - unsigned int chan_mask = hi_mask | lo_mask; - unsigned int old_mask = (1 << shift) - 1; - unsigned int pm = devpriv->pm[trig] & old_mask; - unsigned int pt = devpriv->pt[trig] & old_mask; - unsigned int pp = devpriv->pp[trig] & old_mask; + unsigned int hi_mask; + unsigned int lo_mask; + unsigned int chan_mask; + unsigned int old_mask; + unsigned int pm; + unsigned int pt; + unsigned int pp; + unsigned int invalid_chan; if (trig > 1) { dev_dbg(dev->class_dev, @@ -466,11 +467,28 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, return -EINVAL; } - if (chan_mask > 0xffff) { + if (shift <= 16) { + hi_mask = data[4] << shift; + lo_mask = data[5] << shift; + old_mask = (1U << shift) - 1; + invalid_chan = (data[4] | data[5]) >> (16 - shift); + } else { + hi_mask = 0; + lo_mask = 0; + old_mask = 0xffff; + invalid_chan = data[4] | data[5]; + } + chan_mask = hi_mask | lo_mask; + + if (invalid_chan) { dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); return -EINVAL; } + pm = devpriv->pm[trig] & old_mask; + pt = devpriv->pt[trig] & old_mask; + pp = devpriv->pp[trig] & old_mask; + switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: /* clear trigger configuration */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c index 10501fe6bb25..1268ba34be5f 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1564.c +++ b/drivers/staging/comedi/drivers/addi_apci_1564.c @@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1564_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | @@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index ddc0dc93d08b..eca5fa8a9eb8 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -300,11 +300,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev, static int pci1710_ai_read_sample(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int cur_chan, - unsigned int *val) + unsigned short *val) { const struct boardtype *board = dev->board_ptr; struct pci1710_private *devpriv = dev->private; - unsigned int sample; + unsigned short sample; unsigned int chan; sample = inw(dev->iobase + PCI171X_AD_DATA_REG); @@ -345,7 +345,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev, pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1); for (i = 0; i < insn->n; i++) { - unsigned int val; + unsigned short val; /* start conversion */ outw(0, dev->iobase + PCI171X_SOFTTRG_REG); @@ -395,7 +395,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev, { struct comedi_cmd *cmd = &s->async->cmd; unsigned int status; - unsigned int val; + unsigned short val; int ret; status = inw(dev->iobase + PCI171X_STATUS_REG); @@ -455,7 +455,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev, } for (i = 0; i < devpriv->max_samples; i++) { - unsigned int val; + unsigned short val; int ret; ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val); diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c index 02ae00c95313..10f097a7d847 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas.c +++ b/drivers/staging/comedi/drivers/cb_pcidas.c @@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, devpriv->amcc + AMCC_OP_REG_INTCSR); ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED, - dev->board_name, dev); + "cb_pcidas", dev); if (ret) { dev_dbg(dev->class_dev, "unable to allocate irq %d\n", pcidev->irq); @@ -1342,6 +1342,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, if (dev->irq && board->has_ao_fifo) { dev->write_subdev = s; s->subdev_flags |= SDF_CMD_WRITE; + s->len_chanlist = s->n_chan; s->do_cmdtest = cb_pcidas_ao_cmdtest; s->do_cmd = cb_pcidas_ao_cmd; s->cancel = cb_pcidas_ao_cancel; diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c index e1774e09a320..9fe8b65cd9e3 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas64.c +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c @@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev, init_stc_registers(dev); retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, - dev->board_name, dev); + "cb_pcidas64", dev); if (retval) { dev_dbg(dev->class_dev, "unable to allocate irq %u\n", pcidev->irq); diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c index f99211ec46de..0034005bdf8f 100644 --- a/drivers/staging/comedi/drivers/das6402.c +++ b/drivers/staging/comedi/drivers/das6402.c @@ -186,7 +186,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d) if (status & DAS6402_STATUS_FFULL) { async->events |= COMEDI_CB_OVERFLOW; } else if (status & DAS6402_STATUS_FFNE) { - unsigned int val; + unsigned short val; val = das6402_ai_read_sample(dev, s); comedi_buf_write_samples(s, &val, 1); diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c index 8cf09ef3012f..4bd8fd5218c8 100644 --- a/drivers/staging/comedi/drivers/das800.c +++ b/drivers/staging/comedi/drivers/das800.c @@ -427,7 +427,7 @@ static irqreturn_t das800_interrupt(int irq, void *d) struct comedi_cmd *cmd; unsigned long irq_flags; unsigned int status; - unsigned int val; + unsigned short val; bool fifo_empty; bool fifo_overflow; int i; diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c index 75693cdde313..c180d18ce517 100644 --- a/drivers/staging/comedi/drivers/dmm32at.c +++ b/drivers/staging/comedi/drivers/dmm32at.c @@ -404,7 +404,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d) { struct comedi_device *dev = d; unsigned char intstat; - unsigned int val; + unsigned short val; int i; if (!dev->attached) { diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c index 83026ba63d1c..78a7c1b3448a 100644 --- a/drivers/staging/comedi/drivers/dt2815.c +++ b/drivers/staging/comedi/drivers/dt2815.c @@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, int ret; for (i = 0; i < insn->n; i++) { + /* FIXME: lo bit 0 chooses voltage output or current output */ lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01; hi = (data[i] & 0xff0) >> 4; @@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, if (ret) return ret; + outb(hi, dev->iobase + DT2815_DATA); + devpriv->ao_readback[chan] = data[i]; } return i; diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c index ee53571a8969..ead8000b5929 100644 --- a/drivers/staging/comedi/drivers/me4000.c +++ b/drivers/staging/comedi/drivers/me4000.c @@ -924,7 +924,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) struct comedi_subdevice *s = dev->read_subdev; int i; int c = 0; - unsigned int lval; + unsigned short lval; if (!dev->attached) return IRQ_NONE; diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c index ea430237efa7..9da8dd748078 100644 --- a/drivers/staging/comedi/drivers/mf6x4.c +++ b/drivers/staging/comedi/drivers/mf6x4.c @@ -112,8 +112,9 @@ static int mf6x4_ai_eoc(struct comedi_device *dev, struct mf6x4_private *devpriv = dev->private; unsigned int status; + /* EOLC goes low at end of conversion. */ status = ioread32(devpriv->gpioc_reg); - if (status & MF6X4_GPIOC_EOLC) + if ((status & MF6X4_GPIOC_EOLC) == 0) return 0; return -EBUSY; } diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 4d1eccb5041d..4518c2680b7c 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev, case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: /* check shift amount */ shift = data[3]; - if (shift >= s->n_chan) { + if (shift >= 32) { mask = 0; rising = 0; falling = 0; diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c index a5937206bf1c..e9abae418062 100644 --- a/drivers/staging/comedi/drivers/pcl711.c +++ b/drivers/staging/comedi/drivers/pcl711.c @@ -184,7 +184,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; - unsigned int data; + unsigned short data; if (!dev->attached) { dev_err(dev->class_dev, "spurious interrupt\n"); diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c index 0af5315d4357..fc8afffc1815 100644 --- a/drivers/staging/comedi/drivers/pcl818.c +++ b/drivers/staging/comedi/drivers/pcl818.c @@ -423,7 +423,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev, static bool pcl818_ai_write_sample(struct comedi_device *dev, struct comedi_subdevice *s, - unsigned int chan, unsigned int val) + unsigned int chan, unsigned short val) { struct pcl818_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 65dc6c51037e..7956abcbae22 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -667,6 +667,9 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev) if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; + if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx)) + return -EINVAL; + return 0; } diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index 147481bf680c..a6c893ddbf28 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -2594,7 +2594,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep, if (req->unaligned) { if (!ep->virt_buf) - ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE, + ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE, &ep->phys_buf, GFP_ATOMIC | GFP_DMA); if (ep->epnum > 0) { @@ -3153,7 +3153,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev) for (i = 0; i < NUM_ENDPOINTS; i++) { ep = &udc->ep[i]; if (ep->virt_buf) - dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf, + dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf, ep->phys_buf); } diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c index 58c7d66060f7..dd12777b9a78 100644 --- a/drivers/staging/exfat/exfat_super.c +++ b/drivers/staging/exfat/exfat_super.c @@ -59,7 +59,7 @@ static void exfat_write_super(struct super_block *sb); /* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp) { - ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day, + ts->tv_sec = mktime64(tp->Year + 1980, tp->Month, tp->Day, tp->Hour, tp->Minute, tp->Second); ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC; diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c index 2ecffa42e561..fbe693816f07 100644 --- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c +++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c @@ -297,7 +297,7 @@ static int controller_probe(struct platform_device *pdev) regulator = devm_regulator_register(dev, &can_power_desc, &config); if (IS_ERR(regulator)) { err = PTR_ERR(regulator); - goto out_reset; + goto out_ida; } /* make controller info visible to userspace */ cd->class_dev = kzalloc(sizeof(*cd->class_dev), GFP_KERNEL); diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c index aec0f19597a9..35833753c735 100644 --- a/drivers/staging/fwserial/fwserial.c +++ b/drivers/staging/fwserial/fwserial.c @@ -1223,7 +1223,7 @@ static int get_serial_info(struct tty_struct *tty, ss->flags = port->port.flags; ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN; ss->baud_base = 400000000; - ss->close_delay = port->port.close_delay; + ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10; mutex_unlock(&port->port.mutex); return 0; } @@ -1232,20 +1232,24 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct fwtty_port *port = tty->driver_data; + unsigned int cdelay; if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 || ss->baud_base != 400000000) return -EPERM; + cdelay = msecs_to_jiffies(ss->close_delay * 10); + mutex_lock(&port->port.mutex); if (!capable(CAP_SYS_ADMIN)) { - if (((ss->flags & ~ASYNC_USR_MASK) != + if (cdelay != port->port.close_delay || + ((ss->flags & ~ASYNC_USR_MASK) != (port->port.flags & ~ASYNC_USR_MASK))) { mutex_unlock(&port->port.mutex); return -EPERM; } } - port->port.close_delay = ss->close_delay * HZ / 100; + port->port.close_delay = cdelay; mutex_unlock(&port->port.mutex); return 0; @@ -2189,6 +2193,7 @@ static int fwserial_create(struct fw_unit *unit) err = fw_core_add_address_handler(&port->rx_handler, &fw_high_memory_region); if (err) { + tty_port_destroy(&port->port); kfree(port); goto free_ports; } @@ -2271,6 +2276,7 @@ unregister_ttys: free_ports: for (--i; i >= 0; --i) { + fw_core_remove_address_handler(&serial->ports[i]->rx_handler); tty_port_destroy(&serial->ports[i]->port); kfree(serial->ports[i]); } diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c index 46199c8ca441..f12f81c8dd2f 100644 --- a/drivers/staging/gasket/apex_driver.c +++ b/drivers/staging/gasket/apex_driver.c @@ -570,13 +570,6 @@ static const struct pci_device_id apex_pci_ids[] = { { PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 } }; -static void apex_pci_fixup_class(struct pci_dev *pdev) -{ - pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; -} -DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID, - PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class); - static int apex_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c index 13179f063a61..6f9c0d18d9ce 100644 --- a/drivers/staging/gasket/gasket_core.c +++ b/drivers/staging/gasket/gasket_core.c @@ -926,6 +926,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma, gasket_get_bar_index(gasket_dev, (vma->vm_pgoff << PAGE_SHIFT) + driver_desc->legacy_mmap_address_offset); + + if (bar_index < 0) + return DO_MAP_REGION_INVALID; + phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset; while (mapped_bytes < map_length) { /* diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c index 2d6195f7300e..864342acfd86 100644 --- a/drivers/staging/gasket/gasket_interrupt.c +++ b/drivers/staging/gasket/gasket_interrupt.c @@ -487,14 +487,16 @@ int gasket_interrupt_system_status(struct gasket_dev *gasket_dev) int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data, int interrupt, int event_fd) { - struct eventfd_ctx *ctx = eventfd_ctx_fdget(event_fd); - - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + struct eventfd_ctx *ctx; if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts) return -EINVAL; + ctx = eventfd_ctx_fdget(event_fd); + + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + interrupt_data->eventfd_ctxs[interrupt] = ctx; return 0; } @@ -505,6 +507,9 @@ int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data, if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts) return -EINVAL; - interrupt_data->eventfd_ctxs[interrupt] = NULL; + if (interrupt_data->eventfd_ctxs[interrupt]) { + eventfd_ctx_put(interrupt_data->eventfd_ctxs[interrupt]); + interrupt_data->eventfd_ctxs[interrupt] = NULL; + } return 0; } diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c index a2d67c28f530..af26bc9f184a 100644 --- a/drivers/staging/gasket/gasket_sysfs.c +++ b/drivers/staging/gasket/gasket_sysfs.c @@ -228,8 +228,7 @@ int gasket_sysfs_create_entries(struct device *device, } mutex_lock(&mapping->mutex); - for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER); - i++) { + for (i = 0; attrs[i].attr.attr.name != NULL; i++) { if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) { dev_err(device, "Maximum number of sysfs nodes reached for device\n"); @@ -340,6 +339,7 @@ void gasket_sysfs_put_attr(struct device *device, dev_err(device, "Unable to put unknown attribute: %s\n", attr->attr.attr.name); + put_mapping(mapping); } EXPORT_SYMBOL(gasket_sysfs_put_attr); @@ -373,6 +373,7 @@ ssize_t gasket_sysfs_register_store(struct device *device, gasket_dev = mapping->gasket_dev; if (!gasket_dev) { dev_err(device, "Device driver may have been removed\n"); + put_mapping(mapping); return 0; } diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h index 1d0eed66a7f4..ab5aa351d555 100644 --- a/drivers/staging/gasket/gasket_sysfs.h +++ b/drivers/staging/gasket/gasket_sysfs.h @@ -30,10 +30,6 @@ */ #define GASKET_SYSFS_MAX_NODES 196 -/* End markers for sysfs struct arrays. */ -#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END -#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN) - /* * Terminator struct for a gasket_sysfs_attr array. Must be at the end of * all gasket_sysfs_attribute arrays. diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c index dc4da66c3695..54bdb64f52e8 100644 --- a/drivers/staging/gdm724x/gdm_usb.c +++ b/drivers/staging/gdm724x/gdm_usb.c @@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev, static int request_mac_address(struct lte_udev *udev) { - u8 buf[16] = {0,}; - struct hci_packet *hci = (struct hci_packet *)buf; + struct hci_packet *hci; struct usb_device *usbdev = udev->usbdev; int actual; int ret = -1; + hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL); + if (!hci) + return -ENOMEM; + hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION); hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1); hci->data[0] = MAC_ADDRESS; - ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5, + ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5, &actual, 1000); udev->request_mac_addr = 1; + kfree(hci); return ret; } diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c index 08746c85dea6..3259bf02ba25 100644 --- a/drivers/staging/greybus/audio_codec.c +++ b/drivers/staging/greybus/audio_codec.c @@ -489,6 +489,7 @@ static int gbcodec_hw_params(struct snd_pcm_substream *substream, if (ret) { dev_err_ratelimited(dai->dev, "%d: Error during set_config\n", ret); + gb_pm_runtime_put_noidle(bundle); mutex_unlock(&codec->lock); return ret; } @@ -565,6 +566,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream, break; } if (ret) { + gb_pm_runtime_put_noidle(bundle); mutex_unlock(&codec->lock); dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n", ret); diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index 4ac30accf226..cc329b990e16 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -460,6 +460,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0] & mask; connect = !!val; + ret = gb_pm_runtime_get_sync(bundle); + if (ret) + return ret; + + ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id, + GB_AUDIO_INVALID_INDEX, &gbvalue); + if (ret) + goto exit; + /* update ucontrol */ if (gbvalue.value.integer_value[0] != val) { for (wi = 0; wi < wlist->num_widgets; wi++) { @@ -473,25 +482,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, gbvalue.value.integer_value[0] = cpu_to_le32(ucontrol->value.integer.value[0]); - ret = gb_pm_runtime_get_sync(bundle); - if (ret) - return ret; - ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id, GB_AUDIO_INVALID_INDEX, &gbvalue); - - gb_pm_runtime_put_autosuspend(bundle); - - if (ret) { - dev_err_ratelimited(codec->dev, - "%d:Error in %s for %s\n", ret, - __func__, kcontrol->id.name); - return ret; - } } - return 0; +exit: + gb_pm_runtime_put_autosuspend(bundle); + if (ret) + dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret, + __func__, kcontrol->id.name); + return ret; } #define SOC_DAPM_MIXER_GB(xname, kcount, data) \ diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index d6ba25f21d80..d2672b65c3f4 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -1026,7 +1026,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id) light->channels_count = conf.channel_count; light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL); - + if (!light->name) + return -ENOMEM; light->channels = kcalloc(light->channels_count, sizeof(struct gb_channel), GFP_KERNEL); if (!light->channels) diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c index 68c5718be827..c4b16bb5c1a4 100644 --- a/drivers/staging/greybus/sdio.c +++ b/drivers/staging/greybus/sdio.c @@ -411,6 +411,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) struct gb_sdio_command_request request = {0}; struct gb_sdio_command_response response; struct mmc_data *data = host->mrq->data; + unsigned int timeout_ms; u8 cmd_flags; u8 cmd_type; int i; @@ -469,9 +470,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) request.data_blksz = cpu_to_le16(data->blksz); } - ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND, - &request, sizeof(request), &response, - sizeof(response)); + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : + GB_OPERATION_TIMEOUT_DEFAULT; + + ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND, + &request, sizeof(request), &response, + sizeof(response), timeout_ms); if (ret < 0) goto out; diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index 55c51143bb09..fc1bd68889c9 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -537,9 +537,9 @@ static void gb_tty_set_termios(struct tty_struct *tty, } if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) - newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN; + newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN; else - newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN; + newline.flow_control = 0; if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) { memcpy(&gb_tty->line_coding, &newline, sizeof(newline)); @@ -625,10 +625,12 @@ static int get_serial_info(struct tty_struct *tty, ss->line = gb_tty->minor; ss->xmit_fifo_size = 16; ss->baud_base = 9600; - ss->close_delay = gb_tty->port.close_delay / 10; + ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10; ss->closing_wait = gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? - ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10; + ASYNC_CLOSING_WAIT_NONE : + jiffies_to_msecs(gb_tty->port.closing_wait) / 10; + return 0; } @@ -640,17 +642,16 @@ static int set_serial_info(struct tty_struct *tty, unsigned int close_delay; int retval = 0; - close_delay = ss->close_delay * 10; + close_delay = msecs_to_jiffies(ss->close_delay * 10); closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ? - ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10; + ASYNC_CLOSING_WAIT_NONE : + msecs_to_jiffies(ss->closing_wait * 10); mutex_lock(&gb_tty->port.mutex); if (!capable(CAP_SYS_ADMIN)) { if ((close_delay != gb_tty->port.close_delay) || (closing_wait != gb_tty->port.closing_wait)) retval = -EPERM; - else - retval = -EOPNOTSUPP; } else { gb_tty->port.close_delay = close_delay; gb_tty->port.closing_wait = closing_wait; diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index 4b25a3a314ed..ed404355ea4c 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -130,17 +130,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) static int ad2s1210_config_read(struct ad2s1210_state *st, unsigned char address) { - struct spi_transfer xfer = { - .len = 2, - .rx_buf = st->rx, - .tx_buf = st->tx, + struct spi_transfer xfers[] = { + { + .len = 1, + .rx_buf = &st->rx[0], + .tx_buf = &st->tx[0], + .cs_change = 1, + }, { + .len = 1, + .rx_buf = &st->rx[1], + .tx_buf = &st->tx[1], + }, }; int ret = 0; ad2s1210_set_mode(MOD_CONFIG, st); st->tx[0] = address | AD2S1210_MSB_IS_HIGH; st->tx[1] = AD2S1210_REG_FAULT; - ret = spi_sync_transfer(st->sdev, &xfer, 1); + ret = spi_sync_transfer(st->sdev, xfers, 2); if (ret < 0) return ret; diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c index 871441658f0e..9c67852b19e1 100644 --- a/drivers/staging/kpc2000/kpc2000/core.c +++ b/drivers/staging/kpc2000/kpc2000/core.c @@ -298,7 +298,6 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, { int err = 0; struct kp2000_device *pcard; - int rv; unsigned long reg_bar_phys_addr; unsigned long reg_bar_phys_len; unsigned long dma_bar_phys_addr; @@ -445,11 +444,11 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, if (err < 0) goto err_release_dma; - rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, - pcard->name, pcard); - if (rv) { + err = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED, + pcard->name, pcard); + if (err) { dev_err(&pcard->pdev->dev, - "%s: failed to request_irq: %d\n", __func__, rv); + "%s: failed to request_irq: %d\n", __func__, err); goto err_disable_msi; } diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index 3cffc8be6656..e61bd8e1d246 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -1120,6 +1120,7 @@ static int ks_wlan_set_scan(struct net_device *dev, { struct ks_wlan_private *priv = netdev_priv(dev); struct iw_scan_req *req = NULL; + int len; if (priv->sleep_mode == SLP_SLEEP) return -EPERM; @@ -1129,8 +1130,9 @@ static int ks_wlan_set_scan(struct net_device *dev, if (wrqu->data.length == sizeof(struct iw_scan_req) && wrqu->data.flags & IW_SCAN_THIS_ESSID) { req = (struct iw_scan_req *)extra; - priv->scan_ssid_len = req->essid_len; - memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); + len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + priv->scan_ssid_len = len; + memcpy(priv->scan_ssid, req->essid, len); } else { priv->scan_ssid_len = 0; } diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c index 6f0cd0784786..c5a262a12e40 100644 --- a/drivers/staging/media/allegro-dvt/allegro-core.c +++ b/drivers/staging/media/allegro-dvt/allegro-core.c @@ -393,7 +393,10 @@ struct mcu_msg_create_channel { u32 freq_ird; u32 freq_lt; u32 gdr_mode; - u32 gop_length; + u16 gop_length; + u8 num_b; + u8 freq_golden_ref; + u32 unknown39; u32 subframe_latency; diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c index ecd34a7db190..8b76f1f13b06 100644 --- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c +++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c @@ -67,12 +67,17 @@ hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu, unsigned char *chroma_qtable) { u32 reg, i; + __be32 *luma_qtable_p; + __be32 *chroma_qtable_p; + + luma_qtable_p = (__be32 *)luma_qtable; + chroma_qtable_p = (__be32 *)chroma_qtable; for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) { - reg = get_unaligned_be32(&luma_qtable[i]); + reg = get_unaligned_be32(&luma_qtable_p[i]); vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i)); - reg = get_unaligned_be32(&chroma_qtable[i]); + reg = get_unaligned_be32(&chroma_qtable_p[i]); vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i)); } } diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c index 06162f569b5e..4f9272e5b8d9 100644 --- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c +++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c @@ -98,12 +98,17 @@ rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu, unsigned char *chroma_qtable) { u32 reg, i; + __be32 *luma_qtable_p; + __be32 *chroma_qtable_p; + + luma_qtable_p = (__be32 *)luma_qtable; + chroma_qtable_p = (__be32 *)chroma_qtable; for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) { - reg = get_unaligned_be32(&luma_qtable[i]); + reg = get_unaligned_be32(&luma_qtable_p[i]); vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i)); - reg = get_unaligned_be32(&chroma_qtable[i]); + reg = get_unaligned_be32(&chroma_qtable_p[i]); vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i)); } } diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c index 46576e32581f..fabbfceaa107 100644 --- a/drivers/staging/media/imx/imx-media-capture.c +++ b/drivers/staging/media/imx/imx-media-capture.c @@ -553,7 +553,7 @@ static int capture_validate_fmt(struct capture_priv *priv) priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height || priv->vdev.cc->cs != cc->cs || priv->vdev.compose.width != compose.width || - priv->vdev.compose.height != compose.height) ? -EINVAL : 0; + priv->vdev.compose.height != compose.height) ? -EPIPE : 0; } static int capture_start_streaming(struct vb2_queue *vq, unsigned int count) @@ -785,7 +785,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev) /* setup default format */ fmt_src.pad = priv->src_sd_pad; fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; - v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); + ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); if (ret) { v4l2_err(sd, "failed to get src_sd format\n"); goto unreg; diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c index 2b635ebf62d6..a15d970adb98 100644 --- a/drivers/staging/media/imx/imx-media-csc-scaler.c +++ b/drivers/staging/media/imx/imx-media-csc-scaler.c @@ -866,11 +866,7 @@ void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev) struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev); struct video_device *vfd = priv->vdev.vfd; - mutex_lock(&priv->mutex); - video_unregister_device(vfd); - - mutex_unlock(&priv->mutex); } struct imx_media_video_dev * diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c index 2c3c2adca683..e16408af92d9 100644 --- a/drivers/staging/media/imx/imx-media-dev.c +++ b/drivers/staging/media/imx/imx-media-dev.c @@ -53,6 +53,7 @@ static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier) imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd); if (IS_ERR(imxmd->m2m_vdev)) { ret = PTR_ERR(imxmd->m2m_vdev); + imxmd->m2m_vdev = NULL; goto unlock; } @@ -107,10 +108,14 @@ static int imx_media_remove(struct platform_device *pdev) v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n"); + if (imxmd->m2m_vdev) { + imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev); + imxmd->m2m_vdev = NULL; + } + v4l2_async_notifier_unregister(&imxmd->notifier); imx_media_unregister_ipu_internal_subdevs(imxmd); v4l2_async_notifier_cleanup(&imxmd->notifier); - imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev); media_device_unregister(&imxmd->md); v4l2_device_unregister(&imxmd->v4l2_dev); media_device_cleanup(&imxmd->md); diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c index bfd6b5fbf484..d24897d06947 100644 --- a/drivers/staging/media/imx/imx7-media-csi.c +++ b/drivers/staging/media/imx/imx7-media-csi.c @@ -1009,6 +1009,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, sdformat->format.width = in_fmt->width; sdformat->format.height = in_fmt->height; sdformat->format.code = in_fmt->code; + sdformat->format.field = in_fmt->field; *cc = in_cc; sdformat->format.colorspace = in_fmt->colorspace; @@ -1023,6 +1024,9 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi, false); sdformat->format.code = (*cc)->codes[0]; } + + if (sdformat->format.field != V4L2_FIELD_INTERLACED) + sdformat->format.field = V4L2_FIELD_NONE; break; default: return -EINVAL; diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c index e50b1f88e25b..021bbd420390 100644 --- a/drivers/staging/media/imx/imx7-mipi-csis.c +++ b/drivers/staging/media/imx/imx7-mipi-csis.c @@ -579,7 +579,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable) state->flags |= ST_STREAMING; } else { v4l2_subdev_call(state->src_sd, video, s_stream, 0); - ret = v4l2_subdev_call(state->src_sd, core, s_power, 1); + ret = v4l2_subdev_call(state->src_sd, core, s_power, 0); mipi_csis_stop_stream(state); state->flags &= ~ST_STREAMING; if (state->debug) @@ -657,28 +657,6 @@ static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd, return 0; } -static struct csis_pix_format const * -mipi_csis_try_format(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *mf) -{ - struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); - struct csis_pix_format const *csis_fmt; - - csis_fmt = find_csis_format(mf->code); - if (!csis_fmt) - csis_fmt = &mipi_csis_formats[0]; - - v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH, - csis_fmt->pix_width_alignment, - &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1, - 0); - - state->format_mbus.code = csis_fmt->code; - state->format_mbus.width = mf->width; - state->format_mbus.height = mf->height; - - return csis_fmt; -} - static struct v4l2_mbus_framefmt * mipi_csis_get_format(struct csi_state *state, struct v4l2_subdev_pad_config *cfg, @@ -691,40 +669,6 @@ mipi_csis_get_format(struct csi_state *state, return &state->format_mbus; } -static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *sdformat) -{ - struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); - struct csis_pix_format const *csis_fmt; - struct v4l2_mbus_framefmt *fmt; - - if (sdformat->pad >= CSIS_PADS_NUM) - return -EINVAL; - - fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); - - mutex_lock(&state->lock); - if (sdformat->pad == CSIS_PAD_SOURCE) { - sdformat->format = *fmt; - goto unlock; - } - - csis_fmt = mipi_csis_try_format(mipi_sd, &sdformat->format); - - sdformat->format = *fmt; - - if (csis_fmt && sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) - state->csis_fmt = csis_fmt; - else - cfg->try_fmt = sdformat->format; - -unlock: - mutex_unlock(&state->lock); - - return 0; -} - static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *sdformat) @@ -733,11 +677,59 @@ static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *fmt; mutex_lock(&state->lock); + fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); + sdformat->format = *fmt; + mutex_unlock(&state->lock); + + return 0; +} + +static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); + struct csis_pix_format const *csis_fmt; + struct v4l2_mbus_framefmt *fmt; + + /* + * The CSIS can't transcode in any way, the source format can't be + * modified. + */ + if (sdformat->pad == CSIS_PAD_SOURCE) + return mipi_csis_get_fmt(mipi_sd, cfg, sdformat); + + if (sdformat->pad != CSIS_PAD_SINK) + return -EINVAL; fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); + mutex_lock(&state->lock); + + /* Validate the media bus code and clamp the size. */ + csis_fmt = find_csis_format(sdformat->format.code); + if (!csis_fmt) + csis_fmt = &mipi_csis_formats[0]; + + fmt->code = csis_fmt->code; + fmt->width = sdformat->format.width; + fmt->height = sdformat->format.height; + + v4l_bound_align_image(&fmt->width, 1, CSIS_MAX_PIX_WIDTH, + csis_fmt->pix_width_alignment, + &fmt->height, 1, CSIS_MAX_PIX_HEIGHT, 1, 0); + sdformat->format = *fmt; + /* Propagate the format from sink to source. */ + fmt = mipi_csis_get_format(state, cfg, sdformat->which, + CSIS_PAD_SOURCE); + *fmt = sdformat->format; + + /* Store the CSIS format descriptor for active formats. */ + if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) + state->csis_fmt = csis_fmt; + mutex_unlock(&state->lock); return 0; diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h index 0b1cb9f9cbd1..1bfa8c86132a 100644 --- a/drivers/staging/media/ipu3/include/intel-ipu3.h +++ b/drivers/staging/media/ipu3/include/intel-ipu3.h @@ -450,7 +450,7 @@ struct ipu3_uapi_awb_fr_config_s { __u32 bayer_sign; __u8 bayer_nf; __u8 reserved2[7]; -} __attribute__((aligned(32))) __packed; +} __packed; /** * struct ipu3_uapi_4a_config - 4A config @@ -466,7 +466,8 @@ struct ipu3_uapi_4a_config { struct ipu3_uapi_ae_grid_config ae_grd_config; __u8 padding[20]; struct ipu3_uapi_af_config_s af_config; - struct ipu3_uapi_awb_fr_config_s awb_fr_config; + struct ipu3_uapi_awb_fr_config_s awb_fr_config + __attribute__((aligned(32))); } __packed; /** @@ -2472,7 +2473,7 @@ struct ipu3_uapi_acc_param { struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32))); struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32))); struct ipu3_uapi_anr_config anr; - struct ipu3_uapi_awb_fr_config_s awb_fr __attribute__((aligned(32))); + struct ipu3_uapi_awb_fr_config_s awb_fr; struct ipu3_uapi_ae_config ae; struct ipu3_uapi_af_config_s af; struct ipu3_uapi_awb_config awb; diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c index 4533dacad4be..ef3b5d07137a 100644 --- a/drivers/staging/media/ipu3/ipu3-css-params.c +++ b/drivers/staging/media/ipu3/ipu3-css-params.c @@ -161,7 +161,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width, memset(&cfg->scaler_coeffs_chroma, 0, sizeof(cfg->scaler_coeffs_chroma)); - memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma)); + memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma)); do { phase_step_correction++; diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c index 3d969b0522ab..abcf1f3e5f63 100644 --- a/drivers/staging/media/ipu3/ipu3-mmu.c +++ b/drivers/staging/media/ipu3/ipu3-mmu.c @@ -174,8 +174,10 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) spin_lock_irqsave(&mmu->lock, flags); l2pt = mmu->l2pts[l1pt_idx]; - if (l2pt) - goto done; + if (l2pt) { + spin_unlock_irqrestore(&mmu->lock, flags); + return l2pt; + } spin_unlock_irqrestore(&mmu->lock, flags); @@ -190,8 +192,9 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) l2pt = mmu->l2pts[l1pt_idx]; if (l2pt) { + spin_unlock_irqrestore(&mmu->lock, flags); imgu_mmu_free_page_table(new_l2pt); - goto done; + return l2pt; } l2pt = new_l2pt; @@ -200,7 +203,6 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt)); mmu->l1pt[l1pt_idx] = pteval; -done: spin_unlock_irqrestore(&mmu->lock, flags); return l2pt; } diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index 3c7ad1eed434..908ae74aa970 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -367,8 +367,10 @@ static void imgu_vb2_buf_queue(struct vb2_buffer *vb) vb2_set_plane_payload(vb, 0, need_bytes); + mutex_lock(&imgu->streaming_lock); if (imgu->streaming) imgu_queue_buffers(imgu, false, node->pipe); + mutex_unlock(&imgu->streaming_lock); dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__, node->pipe, node->id); @@ -468,10 +470,13 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) dev_dbg(dev, "%s node name %s pipe %u id %u", __func__, node->name, node->pipe, node->id); + mutex_lock(&imgu->streaming_lock); if (imgu->streaming) { r = -EBUSY; + mutex_unlock(&imgu->streaming_lock); goto fail_return_bufs; } + mutex_unlock(&imgu->streaming_lock); if (!node->enabled) { dev_err(dev, "IMGU node is not enabled"); @@ -498,9 +503,11 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) /* Start streaming of the whole pipeline now */ dev_dbg(dev, "IMGU streaming is ready to start"); + mutex_lock(&imgu->streaming_lock); r = imgu_s_stream(imgu, true); if (!r) imgu->streaming = true; + mutex_unlock(&imgu->streaming_lock); return 0; @@ -532,6 +539,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) dev_err(&imgu->pci_dev->dev, "failed to stop subdev streaming\n"); + mutex_lock(&imgu->streaming_lock); /* Was this the first node with streaming disabled? */ if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) { /* Yes, really stop streaming now */ @@ -542,6 +550,8 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) } imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); + mutex_unlock(&imgu->streaming_lock); + media_pipeline_stop(&node->vdev.entity); } @@ -675,6 +685,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node, dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id); + css_q = imgu_node_to_queue(node); for (i = 0; i < IPU3_CSS_QUEUES; i++) { unsigned int inode = imgu_map_node(imgu, i); @@ -682,6 +693,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node, if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS) continue; + /* CSS expects some format on OUT queue */ + if (i != IPU3_CSS_QUEUE_OUT && + !imgu_pipe->nodes[inode].enabled) { + fmts[i] = NULL; + continue; + } + + if (i == css_q) { + fmts[i] = &f->fmt.pix_mp; + continue; + } + if (try) { fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp, sizeof(struct v4l2_pix_format_mplane), @@ -694,10 +717,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node, fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp; } - /* CSS expects some format on OUT queue */ - if (i != IPU3_CSS_QUEUE_OUT && - !imgu_pipe->nodes[inode].enabled) - fmts[i] = NULL; } if (!try) { @@ -714,16 +733,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node, rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height; } - /* - * imgu doesn't set the node to the value given by user - * before we return success from this function, so set it here. - */ - css_q = imgu_node_to_queue(node); if (!fmts[css_q]) { ret = -EINVAL; goto out; } - *fmts[css_q] = f->fmt.pix_mp; if (try) ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe); @@ -734,15 +747,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node, if (ret < 0) goto out; - if (try) - f->fmt.pix_mp = *fmts[css_q]; - else - f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt; + /* + * imgu doesn't set the node to the value given by user + * before we return success from this function, so set it here. + */ + if (!try) + imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp; out: if (try) { for (i = 0; i < IPU3_CSS_QUEUES; i++) - kfree(fmts[i]); + if (i != css_q) + kfree(fmts[i]); } return ret; diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c index 06a61f31ca50..08eb6791918b 100644 --- a/drivers/staging/media/ipu3/ipu3.c +++ b/drivers/staging/media/ipu3/ipu3.c @@ -261,6 +261,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe ivb = list_first_entry(&imgu_pipe->nodes[node].buffers, struct imgu_vb2_buffer, list); + list_del(&ivb->list); vb = &ivb->vbb.vb2_buf; r = imgu_css_set_parameters(&imgu->css, pipe, vb2_plane_vaddr(vb, 0)); @@ -274,7 +275,6 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe vb2_buffer_done(vb, VB2_BUF_STATE_DONE); dev_dbg(&imgu->pci_dev->dev, "queue user parameters %d to css.", vb->index); - list_del(&ivb->list); } else if (imgu_pipe->queue_enabled[node]) { struct imgu_css_buffer *buf = imgu_queue_getbuf(imgu, node, pipe); @@ -663,6 +663,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev, return r; mutex_init(&imgu->lock); + mutex_init(&imgu->streaming_lock); atomic_set(&imgu->qbuf_barrier, 0); init_waitqueue_head(&imgu->buf_drain_wq); @@ -726,6 +727,7 @@ out_mmu_exit: out_css_powerdown: imgu_css_set_powerdown(&pci_dev->dev, imgu->base); out_mutex_destroy: + mutex_destroy(&imgu->streaming_lock); mutex_destroy(&imgu->lock); return r; @@ -743,6 +745,7 @@ static void imgu_pci_remove(struct pci_dev *pci_dev) imgu_css_set_powerdown(&pci_dev->dev, imgu->base); imgu_dmamap_exit(imgu); imgu_mmu_exit(imgu->mmu); + mutex_destroy(&imgu->streaming_lock); mutex_destroy(&imgu->lock); } diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h index 73b123b2b8a2..8cd6a0077d99 100644 --- a/drivers/staging/media/ipu3/ipu3.h +++ b/drivers/staging/media/ipu3/ipu3.h @@ -146,6 +146,10 @@ struct imgu_device { * vid_buf.list and css->queue */ struct mutex lock; + + /* Lock to protect writes to streaming flag in this struct */ + struct mutex streaming_lock; + /* Forbid streaming and buffer queuing during system suspend. */ atomic_t qbuf_barrier; /* Indicate if system suspend take place while imgu is streaming. */ diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index 1a966cb2f3a6..eed495f7665d 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -1240,8 +1240,10 @@ static int iss_probe(struct platform_device *pdev) if (ret < 0) goto error; - if (!omap4iss_get(iss)) + if (!omap4iss_get(iss)) { + ret = -EINVAL; goto error; + } ret = iss_reset(iss); if (ret < 0) diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index 3439f6ad6338..e80e82a276e9 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -159,6 +159,7 @@ static int cedrus_request_validate(struct media_request *req) struct v4l2_ctrl *ctrl_test; unsigned int count; unsigned int i; + int ret = 0; list_for_each_entry(obj, &req->objects, list) { struct vb2_buffer *vb; @@ -203,12 +204,16 @@ static int cedrus_request_validate(struct media_request *req) if (!ctrl_test) { v4l2_info(&ctx->dev->v4l2_dev, "Missing required codec control\n"); - return -ENOENT; + ret = -ENOENT; + break; } } v4l2_ctrl_request_hdl_put(hdl); + if (ret) + return ret; + return vb2_request_validate(req); } diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c index 56ca4c9ad01c..47940f02457b 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c @@ -65,6 +65,8 @@ void cedrus_device_run(void *priv) v4l2_m2m_buf_copy_metadata(run.src, run.dst, true); + cedrus_dst_format_set(dev, &ctx->dst_fmt); + dev->dec_ops[ctx->current_codec]->setup(ctx, &run); /* Complete request(s) controls if needed. */ diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c index eeee3efd247b..966f9f3ed9d3 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c @@ -286,7 +286,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cedrus_ctx *ctx = cedrus_file2ctx(file); - struct cedrus_dev *dev = ctx->dev; struct vb2_queue *vq; int ret; @@ -300,8 +299,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv, ctx->dst_fmt = f->fmt.pix; - cedrus_dst_format_set(dev, &ctx->dst_fmt); - return 0; } diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c index 79817061fcfa..4225ee9fcf7b 100644 --- a/drivers/staging/most/sound/sound.c +++ b/drivers/staging/most/sound/sound.c @@ -98,6 +98,8 @@ static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes) { unsigned int i = 0; + if (bytes < 2) + return; while (i < bytes - 2) { dest[i] = source[i + 2]; dest[i + 1] = source[i + 1]; diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile index 66da1bf10c32..23256d1286f3 100644 --- a/drivers/staging/mt7621-dma/Makefile +++ b/drivers/staging/mt7621-dma/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o +obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o ccflags-y += -I$(srctree)/drivers/dma diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c similarity index 99% rename from drivers/staging/mt7621-dma/mtk-hsdma.c rename to drivers/staging/mt7621-dma/hsdma-mt7621.c index d964642d95a3..803b66d8ee6b 100644 --- a/drivers/staging/mt7621-dma/mtk-hsdma.c +++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c @@ -714,7 +714,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) ret = dma_async_device_register(dd); if (ret) { dev_err(&pdev->dev, "failed to register dma device\n"); - return ret; + goto err_uninit_hsdma; } ret = of_dma_controller_register(pdev->dev.of_node, @@ -730,6 +730,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev) err_unregister: dma_async_device_unregister(dd); +err_uninit_hsdma: + mtk_hsdma_uninit(hsdma); return ret; } @@ -749,7 +751,7 @@ static struct platform_driver mtk_hsdma_driver = { .probe = mtk_hsdma_probe, .remove = mtk_hsdma_remove, .driver = { - .name = "hsdma-mt7621", + .name = KBUILD_MODNAME, .of_match_table = mtk_hsdma_of_match, }, }; diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index ffac0c4b3f5c..022c71e69a23 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -147,12 +147,6 @@ int cvm_oct_phy_setup_device(struct net_device *dev) phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0); if (!phy_node && of_phy_is_fixed_link(priv->of_node)) { - int rc; - - rc = of_phy_register_fixed_link(priv->of_node); - if (rc) - return rc; - phy_node = of_node_get(priv->of_node); } if (!phy_node) diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 0e65955c746b..9a242d68a26b 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -69,15 +69,17 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) else port = work->word1.cn38xx.ipprt; - if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { + if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) /* * Ignore length errors on min size packets. Some * equipment incorrectly pads packets to 64+4FCS * instead of 60+4FCS. Note these packets still get * counted as frame errors. */ - } else if (work->word2.snoip.err_code == 5 || - work->word2.snoip.err_code == 7) { + return 0; + + if (work->word2.snoip.err_code == 5 || + work->word2.snoip.err_code == 7) { /* * We received a packet with either an alignment error * or a FCS error. This may be signalling that we are @@ -108,7 +110,10 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) /* Port received 0xd5 preamble */ work->packet_ptr.s.addr += i + 1; work->word1.len -= i + 5; - } else if ((*ptr & 0xf) == 0xd) { + return 0; + } + + if ((*ptr & 0xf) == 0xd) { /* Port received 0xd preamble */ work->packet_ptr.s.addr += i; work->word1.len -= i + 4; @@ -118,21 +123,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) ((*(ptr + 1) & 0xf) << 4); ptr++; } - } else { - printk_ratelimited("Port %d unknown preamble, packet dropped\n", - port); - cvm_oct_free_work(work); - return 1; + return 0; } + + printk_ratelimited("Port %d unknown preamble, packet dropped\n", + port); + cvm_oct_free_work(work); + return 1; } - } else { - printk_ratelimited("Port %d receive error code %d, packet dropped\n", - port, work->word2.snoip.err_code); - cvm_oct_free_work(work); - return 1; } - return 0; + printk_ratelimited("Port %d receive error code %d, packet dropped\n", + port, work->word2.snoip.err_code); + cvm_oct_free_work(work); + return 1; } static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb) diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 83469061a542..fe6e1ae73460 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -352,10 +352,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) skb_dst_set(skb, NULL); skb_ext_reset(skb); nf_reset_ct(skb); + skb_reset_redirect(skb); #ifdef CONFIG_NET_SCHED skb->tc_index = 0; - skb_reset_tc(skb); #endif /* CONFIG_NET_SCHED */ #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index cf8e9a23ebf9..77b7c1fd9d78 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -13,6 +13,7 @@ #include <linux/phy.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> @@ -894,6 +895,14 @@ static int cvm_oct_probe(struct platform_device *pdev) break; } + if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) { + if (of_phy_register_fixed_link(priv->of_node)) { + netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n", + interface, priv->port); + dev->netdev_ops = NULL; + } + } + if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig index 54e8029e6b1a..0017376234e2 100644 --- a/drivers/staging/ralink-gdma/Kconfig +++ b/drivers/staging/ralink-gdma/Kconfig @@ -2,6 +2,7 @@ config DMA_RALINK tristate "RALINK DMA support" depends on RALINK && !SOC_RT288X + depends on DMADEVICES select DMA_ENGINE select DMA_VIRTUAL_CHANNELS diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index 51a5b71f8c25..1c0300ce6336 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -784,6 +784,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) /* SSID */ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_)); if (p && ie_len > 0) { + ie_len = min_t(int, ie_len, sizeof(pbss_network->ssid.ssid)); memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(pbss_network->ssid.ssid, (p + 2), ie_len); pbss_network->ssid.ssid_length = ie_len; @@ -802,6 +803,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) /* get supported rates */ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_)); if (p) { + ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX); memcpy(supportRate, p + 2, ie_len); supportRateNum = ie_len; } @@ -809,6 +811,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) /* get ext_supported rates */ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_); if (p) { + ie_len = min_t(int, ie_len, + NDIS_802_11_LENGTH_RATES_EX - supportRateNum); memcpy(supportRate + supportRateNum, p + 2, ie_len); supportRateNum += ie_len; } @@ -922,6 +926,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) pht_cap->mcs.rx_mask[0] = 0xff; pht_cap->mcs.rx_mask[1] = 0x0; + ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap)); memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len); } diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index 1ec3b237212e..7cee7b4d5270 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -1729,9 +1729,11 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_ if ((ndisauthmode == Ndis802_11AuthModeWPA) || (ndisauthmode == Ndis802_11AuthModeWPAPSK)) authmode = _WPA_IE_ID_; - if ((ndisauthmode == Ndis802_11AuthModeWPA2) || + else if ((ndisauthmode == Ndis802_11AuthModeWPA2) || (ndisauthmode == Ndis802_11AuthModeWPA2PSK)) authmode = _WPA2_IE_ID_; + else + authmode = 0x0; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index d4278361e002..a036ef104198 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -1525,21 +1525,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); - if (sub_skb) { - skb_reserve(sub_skb, 12); - skb_put_data(sub_skb, pdata, nSubframe_Length); - } else { - sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC); - if (sub_skb) { - sub_skb->data = pdata; - sub_skb->len = nSubframe_Length; - skb_set_tail_pointer(sub_skb, nSubframe_Length); - } else { - DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes); - break; - } + if (!sub_skb) { + DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes); + break; } + skb_reserve(sub_skb, 12); + skb_put_data(sub_skb, pdata, nSubframe_Length); + subframes[nr_subframes++] = sub_skb; if (nr_subframes >= MAX_SUBFRAME_COUNT) { diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index 630e7d933b10..7b83f0920f3c 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1160,9 +1160,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, break; } sec_len = *(pos++); len -= 1; - if (sec_len > 0 && sec_len <= len) { + if (sec_len > 0 && + sec_len <= len && + sec_len <= 32) { ssid[ssid_index].ssid_length = sec_len; - memcpy(ssid[ssid_index].ssid, pos, ssid[ssid_index].ssid_length); + memcpy(ssid[ssid_index].ssid, pos, sec_len); ssid_index++; } pos += sec_len; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index f7f09c0d273f..5b103e829ee7 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c index 16bcee13f64b..407effde5e71 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c @@ -406,9 +406,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - ieee->current_network.ssid_len = req->essid_len; - memcpy(ieee->current_network.ssid, req->essid, - req->essid_len); + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + + ieee->current_network.ssid_len = len; + memcpy(ieee->current_network.ssid, req->essid, len); } } diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 328f410daa03..2eeb9a43734e 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -1105,7 +1105,7 @@ struct rtllib_network { bool bWithAironetIE; bool bCkipSupported; bool bCcxRmEnable; - u16 CcxRmState[2]; + u8 CcxRmState[2]; bool bMBssidValid; u8 MBssidMask; u8 MBssid[ETH_ALEN]; diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 0bae0a0a4cbe..83c30e2d82f5 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1968,7 +1968,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee, info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if (info_element->len == 6) { - memcpy(network->CcxRmState, &info_element[4], 2); + memcpy(network->CcxRmState, &info_element->data[4], 2); if (network->CcxRmState[0] != 0) network->bCcxRmEnable = true; else diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index 5c33bcb0db2e..00e34c392a38 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -585,7 +585,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee, prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE, sizeof(struct ieee80211_rxb *), - GFP_KERNEL); + GFP_ATOMIC); if (!prxbIndicateArray) return; diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 511136dce3a4..66cd43f963c9 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -2401,7 +2401,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev) ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_CCK >> 1)); if (ret < 0) return ret; - priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8; + priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8; } else priv->EEPROMTxPowerLevelCCK = 0x10; RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK); @@ -3248,7 +3248,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum, u32 *TotalRxDataNum) { u16 SlotIndex; - u8 i; + u16 i; *TotalRxBcnNum = 0; *TotalRxDataNum = 0; diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c index 5822bb7984b9..8f10672ade37 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.c +++ b/drivers/staging/rtl8192u/r8192U_wx.c @@ -333,8 +333,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - ieee->current_network.ssid_len = req->essid_len; - memcpy(ieee->current_network.ssid, req->essid, req->essid_len); + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); + + ieee->current_network.ssid_len = len; + memcpy(ieee->current_network.ssid, req->essid, len); } } diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c index 40145c0338e4..42c0a3c947f1 100644 --- a/drivers/staging/rtl8712/hal_init.c +++ b/drivers/staging/rtl8712/hal_init.c @@ -33,7 +33,6 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context) { struct _adapter *adapter = context; - complete(&adapter->rtl8712_fw_ready); if (!firmware) { struct usb_device *udev = adapter->dvobjpriv.pusbdev; struct usb_interface *usb_intf = adapter->pusb_intf; @@ -41,11 +40,13 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context) dev_err(&udev->dev, "r8712u: Firmware request failed\n"); usb_put_dev(udev); usb_set_intfdata(usb_intf, NULL); + complete(&adapter->rtl8712_fw_ready); return; } adapter->fw = firmware; /* firmware available - start netdev */ register_netdev(adapter->pnetdev); + complete(&adapter->rtl8712_fw_ready); } static const char firmware_file[] = "rtlwifi/rtl8712u.bin"; diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index 26b618008fcf..d989229de88b 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -197,8 +197,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter, psurveyPara->ss_ssidlen = 0; memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1); if ((pssid != NULL) && (pssid->SsidLength)) { - memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength); - psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength); + int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE); + + memcpy(psurveyPara->ss_ssid, pssid->Ssid, len); + psurveyPara->ss_ssidlen = cpu_to_le32(len); } set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); r8712_enqueue_cmd(pcmdpriv, ph2c); diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index 944336e0d2e2..cff918d8bcb5 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -928,7 +928,7 @@ static int r871x_wx_set_priv(struct net_device *dev, struct iw_point *dwrq = (struct iw_point *)awrq; len = dwrq->length; - ext = memdup_user(dwrq->pointer, len); + ext = strndup_user(dwrq->pointer, len); if (IS_ERR(ext)) return PTR_ERR(ext); diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index a87562f632a7..2fcd65260f4c 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -595,13 +595,17 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf) if (pnetdev) { struct _adapter *padapter = netdev_priv(pnetdev); - usb_set_intfdata(pusb_intf, NULL); - release_firmware(padapter->fw); /* never exit with a firmware callback pending */ wait_for_completion(&padapter->rtl8712_fw_ready); + pnetdev = usb_get_intfdata(pusb_intf); + usb_set_intfdata(pusb_intf, NULL); + if (!pnetdev) + goto firmware_load_fail; + release_firmware(padapter->fw); if (drvpriv.drv_registered) padapter->surprise_removed = true; - unregister_netdev(pnetdev); /* will call netdev_close() */ + if (pnetdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(pnetdev); /* will call netdev_close() */ flush_scheduled_work(); udelay(1); /* Stop driver mlme relation timer */ @@ -614,6 +618,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf) */ usb_put_dev(udev); } +firmware_load_fail: /* If we didn't unplug usb dongle and remove/insert module, driver * fails on sitesurvey for the first time when device is up. * Reset usb port for sitesurvey fail issue. diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h index be731f1a2209..91b65731fcaa 100644 --- a/drivers/staging/rtl8712/wifi.h +++ b/drivers/staging/rtl8712/wifi.h @@ -440,7 +440,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe) /* block-ack parameters */ #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 @@ -532,13 +532,6 @@ struct ieee80211_ht_addt_info { #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 -/* block-ack parameters */ -#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 -#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 -#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 -#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 - /* * A-PMDU buffer sizes * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index ea3ea2a6b314..f6678ba6d4bc 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -1845,12 +1845,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len); if (!pIE) return _FAIL; + if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates)) + return _FAIL; memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); supportRateNum = ie_len; pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len); - if (pIE) + if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum)) memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); return _SUCCESS; diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c index 3784a27641a6..df2513234cf7 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c @@ -21,6 +21,7 @@ static const struct sdio_device_id sdio_ids[] = { SDIO_DEVICE(0x024c, 0x0525), }, { SDIO_DEVICE(0x024c, 0x0623), }, { SDIO_DEVICE(0x024c, 0x0626), }, + { SDIO_DEVICE(0x024c, 0x0627), }, { SDIO_DEVICE(0x024c, 0xb723), }, { /* end: all zeroes */ }, }; diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c index 578b9f734231..65592bf84f38 100644 --- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c +++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c @@ -34,7 +34,7 @@ NL80211_RRF_PASSIVE_SCAN) static const struct ieee80211_regdomain rtw_regdom_rd = { - .n_reg_rules = 3, + .n_reg_rules = 2, .alpha2 = "99", .reg_rules = { RTW_2GHZ_CH01_11, diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 59568d18ce23..5b72aa81d94c 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -898,6 +898,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) fix->visual = FB_VISUAL_PSEUDOCOLOR; break; case 16: + case 24: case 32: fix->visual = FB_VISUAL_TRUECOLOR; break; diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c index dccb4ea29d37..45ac48d03180 100644 --- a/drivers/staging/speakup/speakup_dectlk.c +++ b/drivers/staging/speakup/speakup_dectlk.c @@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth); static int in_escape; static int is_flushing; -static spinlock_t flush_lock; +static DEFINE_SPINLOCK(flush_lock); static DECLARE_WAIT_QUEUE_HEAD(flush); static struct var_t vars[] = { diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index 5a9eff08cb96..472804c3f44d 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c @@ -47,9 +47,12 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty) { struct spk_ldisc_data *ldisc_data; + if (tty != speakup_tty) + /* Somebody tried to use this line discipline outside speakup */ + return -ENODEV; + if (!tty->ops->write) return -EOPNOTSUPP; - speakup_tty = tty; ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL); if (!ldisc_data) @@ -57,7 +60,7 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty) init_completion(&ldisc_data->completion); ldisc_data->buf_free = true; - speakup_tty->disc_data = ldisc_data; + tty->disc_data = ldisc_data; return 0; } @@ -179,9 +182,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth) tty_unlock(tty); + mutex_lock(&speakup_tty_mutex); + speakup_tty = tty; ret = tty_set_ldisc(tty, N_SPEAKUP); if (ret) - pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); + speakup_tty = NULL; + mutex_unlock(&speakup_tty_mutex); + + if (!ret) + /* Success */ + return 0; + + pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); + + tty_lock(tty); + if (tty->ops->close) + tty->ops->close(tty, NULL); + tty_unlock(tty); + + tty_kclose(tty); return ret; } diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c index 4c2cae99776b..3703409715da 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c @@ -224,7 +224,7 @@ int snd_bcm2835_new_ctl(struct bcm2835_chip *chip) { int err; - strcpy(chip->card->mixername, "Broadcom Mixer"); + strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername)); err = create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl), snd_bcm2835_ctl); if (err < 0) return err; @@ -261,7 +261,7 @@ static const struct snd_kcontrol_new snd_bcm2835_headphones_ctl[] = { int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip) { - strcpy(chip->card->mixername, "Broadcom Mixer"); + strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername)); return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_headphones_ctl), snd_bcm2835_headphones_ctl); } @@ -295,7 +295,7 @@ static const struct snd_kcontrol_new snd_bcm2835_hdmi[] = { int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip) { - strcpy(chip->card->mixername, "Broadcom Mixer"); + strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername)); return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_hdmi), snd_bcm2835_hdmi); } diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c index 826016c3431a..8708f97b46f3 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c @@ -351,7 +351,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name, pcm->private_data = chip; pcm->nonatomic = true; - strcpy(pcm->name, name); + strscpy(pcm->name, name, sizeof(pcm->name)); if (!spdif) { chip->dest = route; chip->volume = 0; diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c index cf5f80f5ca6b..c250fbef2fa3 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c @@ -185,9 +185,9 @@ static int snd_add_child_device(struct device *dev, goto error; } - strcpy(card->driver, audio_driver->driver.name); - strcpy(card->shortname, audio_driver->shortname); - strcpy(card->longname, audio_driver->longname); + strscpy(card->driver, audio_driver->driver.name, sizeof(card->driver)); + strscpy(card->shortname, audio_driver->shortname, sizeof(card->shortname)); + strscpy(card->longname, audio_driver->longname, sizeof(card->longname)); err = audio_driver->newpcm(chip, audio_driver->shortname, audio_driver->route, diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index af6bf0736b52..eb76cc2cbfd8 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -3257,6 +3257,7 @@ failed_platform_init: static int vchiq_remove(struct platform_device *pdev) { + platform_device_unregister(bcm2835_audio); platform_device_unregister(bcm2835_camera); vchiq_debugfs_deinit(); device_destroy(vchiq_class, vchiq_devid); diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c index af215860be4c..ac563e23868e 100644 --- a/drivers/staging/vt6656/int.c +++ b/drivers/staging/vt6656/int.c @@ -145,7 +145,8 @@ void vnt_int_process_data(struct vnt_private *priv) priv->wake_up_count = priv->hw->conf.listen_interval; - --priv->wake_up_count; + if (priv->wake_up_count) + --priv->wake_up_count; /* Turn on wake up to listen next beacon */ if (priv->wake_up_count == 1) diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index dcd933a6b66e..40c58ac4e209 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -83,9 +83,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, case VNT_KEY_PAIRWISE: key_mode |= mode; key_inx = 4; - /* Don't save entry for pairwise key for station mode */ - if (priv->op_mode == NL80211_IFTYPE_STATION) - clear_bit(entry, &priv->key_entry_inuse); break; default: return -EINVAL; @@ -109,7 +106,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_vif *vif, struct ieee80211_key_conf *key) { - struct ieee80211_bss_conf *conf = &vif->bss_conf; struct vnt_private *priv = hw->priv; u8 *mac_addr = NULL; u8 key_dec_mode = 0; @@ -151,16 +147,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE, key_dec_mode, true); - } else { - vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY, + else + vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - vnt_set_keymode(hw, (u8 *)conf->bssid, key, - VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - } - return 0; } diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 69a48383611f..48db31238d56 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -633,8 +633,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) priv->op_mode = vif->type; - vnt_set_bss_mode(priv); - /* LED blink on TX */ vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER); @@ -721,7 +719,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->basic_rates = conf->basic_rates; vnt_update_top_rates(priv); - vnt_set_bss_mode(priv); dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates); } @@ -750,11 +747,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->short_slot_time = false; vnt_set_short_slot_time(priv); - vnt_update_ifs(priv); vnt_set_vga_gain_offset(priv, priv->bb_vga[0]); vnt_update_pre_ed_threshold(priv, false); } + if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT)) + vnt_set_bss_mode(priv); + if (changed & BSS_CHANGED_TXPOWER) vnt_rf_setpower(priv, priv->current_rate, conf->chandef.chan->hw_value); @@ -778,12 +778,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); - vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, - conf->sync_tsf, priv->current_tsf); - vnt_mac_set_beacon_interval(priv, conf->beacon_int); vnt_reset_next_tbtt(priv, conf->beacon_int); + + vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, + conf->sync_tsf, priv->current_tsf); + + vnt_update_next_tbtt(priv, + conf->sync_tsf, conf->beacon_int); } else { vnt_clear_current_tsf(priv); @@ -818,15 +821,11 @@ static void vnt_configure(struct ieee80211_hw *hw, { struct vnt_private *priv = hw->priv; u8 rx_mode = 0; - int rc; *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC; - rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, - MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); - - if (!rc) - rx_mode = RCR_MULTICAST | RCR_BROADCAST; + vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, + MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode); @@ -867,8 +866,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; break; case DISABLE_KEY: - if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) + if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) { clear_bit(key->hw_key_idx, &priv->key_entry_inuse); + + vnt_mac_disable_keyentry(priv, key->hw_key_idx); + } + default: break; } diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c index 77d0732f451b..221e3d93db14 100644 --- a/drivers/staging/wilc1000/wilc_hif.c +++ b/drivers/staging/wilc1000/wilc_hif.c @@ -12,6 +12,8 @@ #define WILC_FALSE_FRMWR_CHANNEL 100 #define WILC_MAX_RATES_SUPPORTED 12 +#define WILC_SCAN_WID_LIST_SIZE 6 + struct wilc_rcvd_mac_info { u8 status; }; @@ -233,7 +235,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type, void *user_arg, struct cfg80211_scan_request *request) { int result = 0; - struct wid wid_list[5]; + struct wid wid_list[WILC_SCAN_WID_LIST_SIZE]; u32 index = 0; u32 i, scan_timeout; u8 *buffer; diff --git a/drivers/staging/wilc1000/wilc_mon.c b/drivers/staging/wilc1000/wilc_mon.c index d6f14f69ad64..017e8e91334f 100644 --- a/drivers/staging/wilc1000/wilc_mon.c +++ b/drivers/staging/wilc1000/wilc_mon.c @@ -236,11 +236,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, if (register_netdevice(wl->monitor_dev)) { netdev_err(real_dev, "register_netdevice failed\n"); + free_netdev(wl->monitor_dev); return NULL; } priv = netdev_priv(wl->monitor_dev); - if (!priv) - return NULL; priv->real_ndev = real_dev; diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 771d8cb68dc1..02f551536e18 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c @@ -578,7 +578,6 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) entries = ((reg >> 3) & 0x3f); break; } - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); } while (--timeout); if (timeout <= 0) { ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0); diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index e29c14e0ed49..ed4ff78dd02a 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -526,13 +526,8 @@ static void hfa384x_usb_defer(struct work_struct *data) */ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) { - memset(hw, 0, sizeof(*hw)); hw->usb = usb; - /* set up the endpoints */ - hw->endp_in = usb_rcvbulkpipe(usb, 1); - hw->endp_out = usb_sndbulkpipe(usb, 2); - /* Set up the waitq */ init_waitqueue_head(&hw->cmdq); diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index d8d86761b790..9eee72aff723 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,11 +61,16 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; + result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL); + if (result) + goto failed; + dev = interface_to_usbdev(interface); wlandev = create_wlan(); if (!wlandev) { @@ -82,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface, } /* Initialize the hw data */ + hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress); + hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress); hfa384x_create(hw, dev); hw->wlandev = wlandev; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index fcdc4211e3c2..45a1bfa2f735 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb) if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)) length += sizeof(struct cpl_tx_data_iso); -#define MAX_IMM_TX_PKT_LEN 256 - return length <= MAX_IMM_TX_PKT_LEN; + return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN; } /* diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d19e051f2bc2..3403667a9592 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -483,8 +483,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp); void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { spin_lock_bh(&conn->cmd_lock); - if (!list_empty(&cmd->i_conn_node) && - !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) + if (!list_empty(&cmd->i_conn_node)) list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); @@ -1386,14 +1385,27 @@ static u32 iscsit_do_crypto_hash_sg( sg = cmd->first_data_sg; page_off = cmd->first_data_sg_off; + if (data_length && page_off) { + struct scatterlist first_sg; + u32 len = min_t(u32, data_length, sg->length - page_off); + + sg_init_table(&first_sg, 1); + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); + + ahash_request_set_crypt(hash, &first_sg, NULL, len); + crypto_ahash_update(hash); + + data_length -= len; + sg = sg_next(sg); + } + while (data_length) { - u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); + u32 cur_len = min_t(u32, data_length, sg->length); ahash_request_set_crypt(hash, sg, NULL, cur_len); crypto_ahash_update(hash); data_length -= cur_len; - page_off = 0; /* iscsit_map_iovec has already checked for invalid sg pointers */ sg = sg_next(sg); } @@ -4069,12 +4081,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) spin_lock_bh(&conn->cmd_lock); list_splice_init(&conn->conn_cmd_list, &tmp_list); - list_for_each_entry(cmd, &tmp_list, i_conn_node) { + list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { struct se_cmd *se_cmd = &cmd->se_cmd; if (se_cmd->se_tfo != NULL) { spin_lock_irq(&se_cmd->t_state_lock); - se_cmd->transport_state |= CMD_T_FABRIC_STOP; + if (se_cmd->transport_state & CMD_T_ABORTED) { + /* + * LIO's abort path owns the cleanup for this, + * so put it back on the list and let + * aborted_task handle it. + */ + list_move_tail(&cmd->i_conn_node, + &conn->conn_cmd_list); + } else { + se_cmd->transport_state |= CMD_T_FABRIC_STOP; + } spin_unlock_irq(&se_cmd->t_state_lock); } } @@ -4303,30 +4325,37 @@ int iscsit_close_connection( if (!atomic_read(&sess->session_reinstatement) && atomic_read(&sess->session_fall_back_to_erl0)) { spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); iscsit_close_session(sess); return 0; } else if (atomic_read(&sess->session_logout)) { pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); sess->session_state = TARG_SESS_STATE_FREE; - spin_unlock_bh(&sess->conn_lock); - if (atomic_read(&sess->sleep_on_sess_wait_comp)) - complete(&sess->session_wait_comp); + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess); + } else { + spin_unlock_bh(&sess->conn_lock); + } return 0; } else { pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); sess->session_state = TARG_SESS_STATE_FAILED; - if (!atomic_read(&sess->session_continuation)) { - spin_unlock_bh(&sess->conn_lock); + if (!atomic_read(&sess->session_continuation)) iscsit_start_time2retain_handler(sess); - } else - spin_unlock_bh(&sess->conn_lock); - if (atomic_read(&sess->sleep_on_sess_wait_comp)) - complete(&sess->session_wait_comp); + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess); + } else { + spin_unlock_bh(&sess->conn_lock); + } return 0; } @@ -4432,9 +4461,9 @@ static void iscsit_logout_post_handler_closesession( complete(&conn->conn_logout_comp); iscsit_dec_conn_usage_count(conn); + atomic_set(&sess->session_close, 1); iscsit_stop_session(sess, sleep, sleep); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); } static void iscsit_logout_post_handler_samecid( @@ -4569,49 +4598,6 @@ void iscsit_fail_session(struct iscsi_session *sess) sess->session_state = TARG_SESS_STATE_FAILED; } -int iscsit_free_session(struct iscsi_session *sess) -{ - u16 conn_count = atomic_read(&sess->nconn); - struct iscsi_conn *conn, *conn_tmp = NULL; - int is_last; - - spin_lock_bh(&sess->conn_lock); - atomic_set(&sess->sleep_on_sess_wait_comp, 1); - - list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, - conn_list) { - if (conn_count == 0) - break; - - if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { - is_last = 1; - } else { - iscsit_inc_conn_usage_count(conn_tmp); - is_last = 0; - } - iscsit_inc_conn_usage_count(conn); - - spin_unlock_bh(&sess->conn_lock); - iscsit_cause_connection_reinstatement(conn, 1); - spin_lock_bh(&sess->conn_lock); - - iscsit_dec_conn_usage_count(conn); - if (is_last == 0) - iscsit_dec_conn_usage_count(conn_tmp); - - conn_count--; - } - - if (atomic_read(&sess->nconn)) { - spin_unlock_bh(&sess->conn_lock); - wait_for_completion(&sess->session_wait_comp); - } else - spin_unlock_bh(&sess->conn_lock); - - iscsit_close_session(sess); - return 0; -} - void iscsit_stop_session( struct iscsi_session *sess, int session_sleep, @@ -4622,8 +4608,6 @@ void iscsit_stop_session( int is_last; spin_lock_bh(&sess->conn_lock); - if (session_sleep) - atomic_set(&sess->sleep_on_sess_wait_comp, 1); if (connection_sleep) { list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, @@ -4681,12 +4665,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) spin_lock(&sess->conn_lock); if (atomic_read(&sess->session_fall_back_to_erl0) || atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess->conn_lock); continue; } + iscsit_inc_session_usage_count(sess); atomic_set(&sess->session_reinstatement, 1); atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); spin_unlock(&sess->conn_lock); list_move_tail(&se_sess->sess_list, &free_list); @@ -4696,7 +4683,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; - iscsit_free_session(sess); + list_del_init(&se_sess->sess_list); + iscsit_stop_session(sess, 1, 1); + iscsit_dec_session_usage_count(sess); session_count++; } diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index c95f56a3ce31..7409ce2a6607 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *); extern int iscsit_close_connection(struct iscsi_conn *); extern int iscsit_close_session(struct iscsi_session *); extern void iscsit_fail_session(struct iscsi_session *); -extern int iscsit_free_session(struct iscsi_session *); extern void iscsit_stop_session(struct iscsi_session *, int, int); extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 42b369fc415e..0fa1d57b26fa 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1476,20 +1476,23 @@ static void lio_tpg_close_session(struct se_session *se_sess) spin_lock(&sess->conn_lock); if (atomic_read(&sess->session_fall_back_to_erl0) || atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess->conn_lock); spin_unlock_bh(&se_tpg->session_lock); return; } + iscsit_inc_session_usage_count(sess); atomic_set(&sess->session_reinstatement, 1); atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); spin_unlock(&sess->conn_lock); iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); iscsit_stop_session(sess, 1, 1); - iscsit_close_session(sess); + iscsit_dec_session_usage_count(sess); } static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index f53330813207..0cc5ea195273 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -156,6 +156,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) spin_lock(&sess_p->conn_lock); if (atomic_read(&sess_p->session_fall_back_to_erl0) || atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess_p->conn_lock); continue; @@ -166,6 +167,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) (sess_p->sess_ops->SessionType == sessiontype))) { atomic_set(&sess_p->session_reinstatement, 1); atomic_set(&sess_p->session_fall_back_to_erl0, 1); + atomic_set(&sess_p->session_close, 1); spin_unlock(&sess_p->conn_lock); iscsit_inc_session_usage_count(sess_p); iscsit_stop_time2retain_timer(sess_p); @@ -190,7 +192,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) if (sess->session_state == TARG_SESS_STATE_FAILED) { spin_unlock_bh(&sess->conn_lock); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); return 0; } spin_unlock_bh(&sess->conn_lock); @@ -198,7 +199,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) iscsit_stop_session(sess, 1, 1); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); return 0; } @@ -486,6 +486,7 @@ static int iscsi_login_non_zero_tsih_s2( sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; if (atomic_read(&sess_p->session_fall_back_to_erl0) || atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) continue; if (!memcmp(sess_p->isid, pdu->isid, 6) && @@ -1171,7 +1172,7 @@ void iscsit_free_conn(struct iscsi_conn *conn) } void iscsi_target_login_sess_out(struct iscsi_conn *conn, - struct iscsi_np *np, bool zero_tsih, bool new_sess) + bool zero_tsih, bool new_sess) { if (!new_sess) goto old_sess_out; @@ -1189,7 +1190,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, conn->sess = NULL; old_sess_out: - iscsi_stop_login_thread_timer(np); /* * If login negotiation fails check if the Time2Retain timer * needs to be restarted. @@ -1429,8 +1429,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: + iscsi_stop_login_thread_timer(np); tpg_np = conn->tpg_np; - iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); + iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; if (tpg) { diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 3b8e3639ff5d..fc95e6150253 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_conn *); extern int iscsit_start_kthreads(struct iscsi_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); -extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, - bool, bool); +extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); extern int iscsi_target_login_thread(void *); extern void iscsi_handle_login_thread_timeout(struct timer_list *t); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 685d771b51d4..e32d93b92742 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -535,12 +535,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) { - struct iscsi_np *np = login->np; bool zero_tsih = login->zero_tsih; iscsi_remove_failed_auth_entry(conn); iscsi_target_nego_release(conn); - iscsi_target_login_sess_out(conn, np, zero_tsih, true); + iscsi_target_login_sess_out(conn, zero_tsih, true); } struct conn_timeout { diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 3305b47fdf53..16d5a4e117a2 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -545,32 +545,15 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd) return 0; } -static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) +static int tcm_loop_queue_data_or_status(const char *func, + struct se_cmd *se_cmd, u8 scsi_status) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", - __func__, sc, sc->cmnd[0]); - - sc->result = SAM_STAT_GOOD; - set_host_byte(sc, DID_OK); - if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || - (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) - scsi_set_resid(sc, se_cmd->residual_count); - sc->scsi_done(sc); - return 0; -} - -static int tcm_loop_queue_status(struct se_cmd *se_cmd) -{ - struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, - struct tcm_loop_cmd, tl_se_cmd); - struct scsi_cmnd *sc = tl_cmd->sc; - - pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", - __func__, sc, sc->cmnd[0]); + func, sc, sc->cmnd[0]); if (se_cmd->sense_buffer && ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || @@ -581,7 +564,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) sc->result = SAM_STAT_CHECK_CONDITION; set_driver_byte(sc, DRIVER_SENSE); } else - sc->result = se_cmd->scsi_status; + sc->result = scsi_status; set_host_byte(sc, DID_OK); if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || @@ -591,6 +574,17 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) return 0; } +static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) +{ + return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD); +} + +static int tcm_loop_queue_status(struct se_cmd *se_cmd) +{ + return tcm_loop_queue_data_or_status(__func__, + se_cmd, se_cmd->scsi_status); +} + static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 6b4b354c88aa..b5c970faf585 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -63,7 +63,7 @@ static int fc_get_pr_transport_id( * encoded TransportID. */ ptr = &se_nacl->initiatorname[0]; - for (i = 0; i < 24; ) { + for (i = 0; i < 23; ) { if (!strncmp(&ptr[i], ":", 1)) { i++; continue; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 51ffd5c002de..1c181d31f4c8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) target_to_linux_sector(dev, cmd->t_task_lba), target_to_linux_sector(dev, sbc_get_write_same_sectors(cmd)), - GFP_KERNEL, false); + GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); if (ret) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 853344415963..e7b3c6e5d574 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -138,6 +138,7 @@ int init_se_kmem_caches(void); void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); +void transport_uninit_session(struct se_session *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 5e931690e697..51e690ab4d29 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -3731,6 +3731,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) spin_unlock(&dev->t10_pr.registration_lock); put_unaligned_be32(add_len, &buf[4]); + target_set_cmd_data_length(cmd, 8 + add_len); transport_kunmap_data_sg(cmd); @@ -3749,7 +3750,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) struct t10_pr_registration *pr_reg; unsigned char *buf; u64 pr_res_key; - u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ + u32 add_len = 0; if (cmd->data_length < 8) { pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" @@ -3767,8 +3768,9 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) pr_reg = dev->dev_pr_res_holder; if (pr_reg) { /* - * Set the hardcoded Additional Length + * Set the Additional Length to 16 when a reservation is held */ + add_len = 16; put_unaligned_be32(add_len, &buf[4]); if (cmd->data_length < 22) @@ -3804,6 +3806,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) (pr_reg->pr_res_type & 0x0f); } + target_set_cmd_data_length(cmd, 8 + add_len); + err: spin_unlock(&dev->dev_reservation_lock); transport_kunmap_data_sg(cmd); @@ -3822,7 +3826,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) struct se_device *dev = cmd->se_dev; struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char *buf; - u16 add_len = 8; /* Hardcoded to 8. */ + u16 len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" @@ -3834,7 +3838,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - put_unaligned_be16(add_len, &buf[0]); + put_unaligned_be16(len, &buf[0]); buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ @@ -3863,6 +3867,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + target_set_cmd_data_length(cmd, len); + transport_kunmap_data_sg(cmd); return 0; @@ -4023,6 +4029,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) * Set ADDITIONAL_LENGTH */ put_unaligned_be32(add_len, &buf[4]); + target_set_cmd_data_length(cmd, 8 + add_len); transport_kunmap_data_sg(cmd); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index c9d92b3e777d..55fe93296deb 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, unsigned char *buf; buf = transport_kmap_data_sg(cmd); - if (!buf) + if (!buf) { ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ + } if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -939,6 +940,14 @@ new_bio: return 0; fail: + if (bio) + bio_put(bio); + while (req->bio) { + bio = req->bio; + req->bio = bio->bi_next; + bio_put(bio); + } + req->biotail = NULL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d542e26ca56a..a16835c0bb1d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -236,6 +236,11 @@ int transport_init_session(struct se_session *se_sess) } EXPORT_SYMBOL(transport_init_session); +void transport_uninit_session(struct se_session *se_sess) +{ + percpu_ref_exit(&se_sess->cmd_count); +} + /** * transport_alloc_session - allocate a session object and initialize it * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. @@ -579,7 +584,7 @@ void transport_free_session(struct se_session *se_sess) sbitmap_queue_free(&se_sess->sess_tag_pool); kvfree(se_sess->sess_cmd_map); } - percpu_ref_exit(&se_sess->cmd_count); + transport_uninit_session(se_sess); kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); @@ -868,11 +873,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) } EXPORT_SYMBOL(target_complete_cmd); -void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +void target_set_cmd_data_length(struct se_cmd *cmd, int length) { - if ((scsi_status == SAM_STAT_GOOD || - cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && - length < cmd->data_length) { + if (length < cmd->data_length) { if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { cmd->residual_count += cmd->data_length - length; } else { @@ -882,6 +885,15 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len cmd->data_length = length; } +} +EXPORT_SYMBOL(target_set_cmd_data_length); + +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD || + cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { + target_set_cmd_data_length(cmd, length); + } target_complete_cmd(cmd, scsi_status); } @@ -3336,6 +3348,7 @@ static void target_tmr_work(struct work_struct *work) cmd->se_tfo->queue_tm_rsp(cmd); + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 35be1be87d2a..d6634baebb47 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) size = round_up(size+offset, PAGE_SIZE); while (size) { - flush_dcache_page(virt_to_page(start)); + flush_dcache_page(vmalloc_to_page(start)); start += PAGE_SIZE; size -= PAGE_SIZE; } @@ -669,15 +669,17 @@ static void scatter_data_area(struct tcmu_dev *udev, void *from, *to = NULL; size_t copy_bytes, to_offset, offset; struct scatterlist *sg; - struct page *page; + struct page *page = NULL; for_each_sg(data_sg, sg, data_nents, i) { int sg_remaining = sg->length; from = kmap_atomic(sg_page(sg)) + sg->offset; while (sg_remaining > 0) { if (block_remaining == 0) { - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } block_remaining = DATA_BLOCK_SIZE; dbi = tcmu_cmd_get_dbi(tcmu_cmd); @@ -722,7 +724,6 @@ static void scatter_data_area(struct tcmu_dev *udev, memcpy(to + offset, from + sg->length - sg_remaining, copy_bytes); - tcmu_flush_dcache_range(to, copy_bytes); } sg_remaining -= copy_bytes; @@ -731,8 +732,10 @@ static void scatter_data_area(struct tcmu_dev *udev, kunmap_atomic(from - sg->offset); } - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, @@ -778,13 +781,13 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = tcmu_cmd_get_dbi(cmd); page = tcmu_get_block_page(udev, dbi); from = kmap_atomic(page); + flush_dcache_page(page); } copy_bytes = min_t(size_t, sg_remaining, block_remaining); if (read_len < copy_bytes) copy_bytes = read_len; offset = DATA_BLOCK_SIZE - block_remaining; - tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from + offset, copy_bytes); @@ -882,41 +885,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, return command_size; } -static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, - struct timer_list *timer) +static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, + struct timer_list *timer) { - struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; - int cmd_id; - - if (tcmu_cmd->cmd_id) - goto setup_timer; - - cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); - if (cmd_id < 0) { - pr_err("tcmu: Could not allocate cmd id.\n"); - return cmd_id; - } - tcmu_cmd->cmd_id = cmd_id; - - pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, - udev->name, tmo / MSEC_PER_SEC); - -setup_timer: if (!tmo) - return 0; + return; tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); if (!timer_pending(timer)) mod_timer(timer, tcmu_cmd->deadline); - return 0; + pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, + tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); } static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; unsigned int tmo; - int ret; /* * For backwards compat if qfull_time_out is not set use @@ -931,13 +917,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) else tmo = TCMU_TIME_OUT; - ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); - if (ret) - return ret; + tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); - pr_debug("adding cmd %u on dev %s to ring space wait queue\n", - tcmu_cmd->cmd_id, udev->name); + pr_debug("adding cmd %p on dev %s to ring space wait queue\n", + tcmu_cmd, udev->name); return 0; } @@ -959,7 +943,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) struct tcmu_mailbox *mb; struct tcmu_cmd_entry *entry; struct iovec *iov; - int iov_cnt, ret; + int iov_cnt, cmd_id; uint32_t cmd_head; uint64_t cdb_off; bool copy_to_data_area; @@ -1026,7 +1010,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, sizeof(entry->hdr)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1060,14 +1044,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) } entry->req.iov_bidi_cnt = iov_cnt; - ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, - &udev->cmd_timer); - if (ret) { - tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); + cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); + if (cmd_id < 0) { + pr_err("tcmu: Could not allocate cmd id.\n"); + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); *scsi_err = TCM_OUT_OF_RESOURCES; return -1; } + tcmu_cmd->cmd_id = cmd_id; + + pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, + tcmu_cmd, udev->name); + + tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); + entry->hdr.cmd_id = tcmu_cmd->cmd_id; /* @@ -1084,7 +1075,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) cdb_off = CMDR_OFF + cmd_head + base_command_size; memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); entry->req.cdb_off = cdb_off; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, command_size); UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1232,7 +1223,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + /* + * Flush max. up to end of cmd ring since current entry might + * be a padding that is shorter than sizeof(*entry) + */ + size_t ring_left = head_to_end(udev->cmdr_last_cleaned, + udev->cmdr_size); + tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? + ring_left : sizeof(*entry)); if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { UPDATE_HEAD(udev->cmdr_last_cleaned, @@ -1279,50 +1277,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) return handled; } -static int tcmu_check_expired_cmd(int id, void *p, void *data) +static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) { - struct tcmu_cmd *cmd = p; - struct tcmu_dev *udev = cmd->tcmu_dev; - u8 scsi_status; struct se_cmd *se_cmd; - bool is_running; - - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) - return 0; if (!time_after(jiffies, cmd->deadline)) - return 0; + return; - is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); + set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); + list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; + cmd->se_cmd = NULL; - if (is_running) { - /* - * If cmd_time_out is disabled but qfull is set deadline - * will only reflect the qfull timeout. Ignore it. - */ - if (!udev->cmd_time_out) - return 0; + pr_debug("Timing out inflight cmd %u on dev %s.\n", + cmd->cmd_id, cmd->tcmu_dev->name); - set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); - /* - * target_complete_cmd will translate this to LUN COMM FAILURE - */ - scsi_status = SAM_STAT_CHECK_CONDITION; - list_del_init(&cmd->queue_entry); - cmd->se_cmd = NULL; - } else { - list_del_init(&cmd->queue_entry); - idr_remove(&udev->commands, id); - tcmu_free_cmd(cmd); - scsi_status = SAM_STAT_TASK_SET_FULL; - } + target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); +} - pr_debug("Timing out cmd %u on dev %s that is %s.\n", - id, udev->name, is_running ? "inflight" : "queued"); +static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) +{ + struct se_cmd *se_cmd; - target_complete_cmd(se_cmd, scsi_status); - return 0; + if (!time_after(jiffies, cmd->deadline)) + return; + + pr_debug("Timing out queued cmd %p on dev %s.\n", + cmd, cmd->tcmu_dev->name); + + list_del_init(&cmd->queue_entry); + se_cmd = cmd->se_cmd; + tcmu_free_cmd(cmd); + + target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); } static void tcmu_device_timedout(struct tcmu_dev *udev) @@ -1407,16 +1394,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) return &udev->se_dev; } -static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) +static void run_qfull_queue(struct tcmu_dev *udev, bool fail) { struct tcmu_cmd *tcmu_cmd, *tmp_cmd; LIST_HEAD(cmds); - bool drained = true; sense_reason_t scsi_ret; int ret; if (list_empty(&udev->qfull_queue)) - return true; + return; pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); @@ -1425,11 +1411,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { list_del_init(&tcmu_cmd->queue_entry); - pr_debug("removing cmd %u on dev %s from queue\n", - tcmu_cmd->cmd_id, udev->name); + pr_debug("removing cmd %p on dev %s from queue\n", + tcmu_cmd, udev->name); if (fail) { - idr_remove(&udev->commands, tcmu_cmd->cmd_id); /* * We were not able to even start the command, so * fail with busy to allow a retry in case runner @@ -1444,10 +1429,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); if (ret < 0) { - pr_debug("cmd %u on dev %s failed with %u\n", - tcmu_cmd->cmd_id, udev->name, scsi_ret); - - idr_remove(&udev->commands, tcmu_cmd->cmd_id); + pr_debug("cmd %p on dev %s failed with %u\n", + tcmu_cmd, udev->name, scsi_ret); /* * Ignore scsi_ret for now. target_complete_cmd * drops it. @@ -1462,13 +1445,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) * the queue */ list_splice_tail(&cmds, &udev->qfull_queue); - drained = false; break; } } tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); - return drained; } static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) @@ -1652,6 +1633,8 @@ static void tcmu_dev_kref_release(struct kref *kref) if (tcmu_check_and_free_pending_cmd(cmd) != 0) all_expired = false; } + if (!list_empty(&udev->qfull_queue)) + all_expired = false; idr_destroy(&udev->commands); WARN_ON(!all_expired); @@ -2037,9 +2020,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mutex_lock(&udev->cmdr_lock); idr_for_each_entry(&udev->commands, cmd, i) { - if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) - continue; - pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", cmd->cmd_id, udev->name, test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); @@ -2073,9 +2053,12 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mb->cmd_tail = 0; mb->cmd_head = 0; tcmu_flush_dcache_range(mb, sizeof(*mb)); + clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); del_timer(&udev->cmd_timer); + run_qfull_queue(udev, false); + mutex_unlock(&udev->cmdr_lock); } @@ -2697,6 +2680,7 @@ static void find_free_blocks(void) static void check_timedout_devices(void) { struct tcmu_dev *udev, *tmp_dev; + struct tcmu_cmd *cmd, *tmp_cmd; LIST_HEAD(devs); spin_lock_bh(&timed_out_udevs_lock); @@ -2707,9 +2691,24 @@ static void check_timedout_devices(void) spin_unlock_bh(&timed_out_udevs_lock); mutex_lock(&udev->cmdr_lock); - idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); - tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); + /* + * If cmd_time_out is disabled but qfull is set deadline + * will only reflect the qfull timeout. Ignore it. + */ + if (udev->cmd_time_out) { + list_for_each_entry_safe(cmd, tmp_cmd, + &udev->inflight_queue, + queue_entry) { + tcmu_check_expired_ring_cmd(cmd); + } + tcmu_set_next_deadline(&udev->inflight_queue, + &udev->cmd_timer); + } + list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, + queue_entry) { + tcmu_check_expired_queue_cmd(cmd); + } tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); mutex_unlock(&udev->cmdr_lock); diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index b9b1e92c6f8d..596ad3edec9c 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) return 0; } -struct xcopy_dev_search_info { - const unsigned char *dev_wwn; - struct se_device *found_dev; -}; - +/** + * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers + * + * @se_dev: device being considered for match + * @dev_wwn: XCOPY requested NAA dev_wwn + * @return: 1 on match, 0 on no-match + */ static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, - void *data) + const unsigned char *dev_wwn) { - struct xcopy_dev_search_info *info = data; unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; int rc; - if (!se_dev->dev_attrib.emulate_3pc) + if (!se_dev->dev_attrib.emulate_3pc) { + pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev); return 0; + } memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); - rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); - if (rc != 0) + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); + if (rc != 0) { + pr_debug("XCOPY: skip non-matching: %*ph\n", + XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn); return 0; - - info->found_dev = se_dev; + } pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); - rc = target_depend_item(&se_dev->dev_group.cg_item); - if (rc != 0) { - pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n", - rc, se_dev); - return rc; - } - - pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n", - se_dev, &se_dev->dev_group); return 1; } -static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, - struct se_device **found_dev) +static int target_xcopy_locate_se_dev_e4(struct se_session *sess, + const unsigned char *dev_wwn, + struct se_device **_found_dev, + struct percpu_ref **_found_lun_ref) { - struct xcopy_dev_search_info info; - int ret; + struct se_dev_entry *deve; + struct se_node_acl *nacl; + struct se_lun *this_lun = NULL; + struct se_device *found_dev = NULL; - memset(&info, 0, sizeof(info)); - info.dev_wwn = dev_wwn; + /* cmd with NULL sess indicates no associated $FABRIC_MOD */ + if (!sess) + goto err_out; - ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info); - if (ret == 1) { - *found_dev = info.found_dev; - return 0; - } else { - pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); - return -EINVAL; + pr_debug("XCOPY 0xe4: searching for: %*ph\n", + XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn); + + nacl = sess->se_node_acl; + rcu_read_lock(); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { + struct se_device *this_dev; + int rc; + + this_lun = rcu_dereference(deve->se_lun); + this_dev = rcu_dereference_raw(this_lun->lun_se_dev); + + rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn); + if (rc) { + if (percpu_ref_tryget_live(&this_lun->lun_ref)) + found_dev = this_dev; + break; + } } + rcu_read_unlock(); + if (found_dev == NULL) + goto err_out; + + pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n", + found_dev, &found_dev->dev_group); + *_found_dev = found_dev; + *_found_lun_ref = &this_lun->lun_ref; + return 0; +err_out: + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + return -EINVAL; } static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, @@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, switch (xop->op_origin) { case XCOL_SOURCE_RECV_OP: - rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn, - &xop->dst_dev); + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, + xop->dst_tid_wwn, + &xop->dst_dev, + &xop->remote_lun_ref); break; case XCOL_DEST_RECV_OP: - rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn, - &xop->src_dev); + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, + xop->src_tid_wwn, + &xop->src_dev, + &xop->remote_lun_ref); break; default: pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " @@ -396,18 +423,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) { - struct se_device *remote_dev; - if (xop->op_origin == XCOL_SOURCE_RECV_OP) - remote_dev = xop->dst_dev; + pr_debug("putting dst lun_ref for %p\n", xop->dst_dev); else - remote_dev = xop->src_dev; + pr_debug("putting src lun_ref for %p\n", xop->src_dev); - pr_debug("Calling configfs_undepend_item for" - " remote_dev: %p remote_dev->dev_group: %p\n", - remote_dev, &remote_dev->dev_group.cg_item); - - target_undepend_item(&remote_dev->dev_group.cg_item); + percpu_ref_put(xop->remote_lun_ref); } static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) @@ -479,7 +500,7 @@ int target_xcopy_setup_pt(void) memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); ret = transport_init_session(&xcopy_pt_sess); if (ret < 0) - return ret; + goto destroy_wq; xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; @@ -488,12 +509,19 @@ int target_xcopy_setup_pt(void) xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; return 0; + +destroy_wq: + destroy_workqueue(xcopy_wq); + xcopy_wq = NULL; + return ret; } void target_xcopy_release_pt(void) { - if (xcopy_wq) + if (xcopy_wq) { destroy_workqueue(xcopy_wq); + transport_uninit_session(&xcopy_pt_sess); + } } /* diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h index 26ba4c3c9cff..974bc1e19ff2 100644 --- a/drivers/target/target_core_xcopy.h +++ b/drivers/target/target_core_xcopy.h @@ -29,6 +29,7 @@ struct xcopy_op { struct se_device *dst_dev; unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + struct percpu_ref *remote_lun_ref; sector_t src_lba; sector_t dst_lba; diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index cf2367ba08d6..ea79482ebda4 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -7,6 +7,7 @@ #include <linux/err.h> #include <linux/errno.h> #include <linux/mm.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/tee_drv.h> #include <linux/types.h> @@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) */ optee_cq_wait_for_completion(&optee->call_queue, &w); } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { - might_sleep(); + if (need_resched()) + cond_resched(); param.a0 = res.a0; param.a1 = res.a1; param.a2 = res.a2; @@ -530,7 +532,8 @@ void optee_free_pages_list(void *list, size_t num_entries) static bool is_normal_memory(pgprot_t p) { #if defined(CONFIG_ARM) - return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC; + return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) || + ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK)); #elif defined(CONFIG_ARM64) return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); #else diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index b830e0a87fba..ba6cfba589a6 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -78,16 +78,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params, return rc; p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; p->u.memref.shm = shm; - - /* Check that the memref is covered by the shm object */ - if (p->u.memref.size) { - size_t o = p->u.memref.shm_offs + - p->u.memref.size - 1; - - rc = tee_shm_get_pa(shm, o, NULL); - if (rc) - return rc; - } break; case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6b9865c786ba..9d24bc05df0d 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -210,11 +210,11 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, int i; struct freq_table *freq_table = cpufreq_cdev->freq_table; - for (i = 1; i <= cpufreq_cdev->max_level; i++) - if (power > freq_table[i].power) + for (i = 0; i < cpufreq_cdev->max_level; i++) + if (power >= freq_table[i].power) break; - return freq_table[i - 1].frequency; + return freq_table[i].frequency; } /** @@ -320,6 +320,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; + int ret; /* Request state should be less than max_level */ if (WARN_ON(state > cpufreq_cdev->max_level)) @@ -329,10 +330,12 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, if (cpufreq_cdev->cpufreq_state == state) return 0; - cpufreq_cdev->cpufreq_state = state; + ret = freq_qos_update_request(&cpufreq_cdev->qos_req, + cpufreq_cdev->freq_table[state].frequency); + if (ret > 0) + cpufreq_cdev->cpufreq_state = state; - return freq_qos_update_request(&cpufreq_cdev->qos_req, - cpufreq_cdev->freq_table[state].frequency); + return ret; } /** diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index bb6754a5342c..85511c1160b7 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -656,7 +656,7 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match); static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) { struct device_node *np; - int ret; + int ret = 0; data->policy = cpufreq_cpu_get(0); if (!data->policy) { @@ -671,11 +671,12 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) if (IS_ERR(data->cdev)) { ret = PTR_ERR(data->cdev); cpufreq_cpu_put(data->policy); - return ret; } } - return 0; + of_node_put(np); + + return ret; } static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data) diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index a7bbd8584ae2..e7b6f6f256a9 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -74,7 +74,7 @@ static void int3403_notify(acpi_handle handle, THERMAL_TRIP_CHANGED); break; default: - dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); + dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); break; } } diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index acf4854cbb8b..2783973b101c 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -211,6 +211,9 @@ enum { /* The total number of temperature sensors in the MT8183 */ #define MT8183_NUM_SENSORS 6 +/* The number of banks in the MT8183 */ +#define MT8183_NUM_ZONES 1 + /* The number of sensing points per bank */ #define MT8183_NUM_SENSORS_PER_ZONE 6 @@ -498,7 +501,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = { static const struct mtk_thermal_data mt8183_thermal_data = { .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL, - .num_banks = MT8183_NUM_SENSORS_PER_ZONE, + .num_banks = MT8183_NUM_ZONES, .num_sensors = MT8183_NUM_SENSORS, .vts_index = mt8183_vts_index, .cali_val = MT8183_CALIBRATION, @@ -591,8 +594,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) u32 raw; for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { - raw = readl(mt->thermal_base + - conf->msr[conf->bank_data[bank->id].sensors[i]]); + raw = readl(mt->thermal_base + conf->msr[i]); temp = raw_to_mcelsius(mt, conf->bank_data[bank->id].sensors[i], @@ -733,8 +735,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, for (i = 0; i < conf->bank_data[num].num_sensors; i++) writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], - mt->thermal_base + - conf->adcpnp[conf->bank_data[num].sensors[i]]); + mt->thermal_base + conf->adcpnp[i]); writel((1 << conf->bank_data[num].num_sensors) - 1, controller_base + TEMP_MONCTL0); diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index bf7bae42c141..6dc879fea9c8 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved. */ #include <linux/bitops.h> @@ -191,7 +191,7 @@ static int qpnp_tm_get_temp(void *data, int *temp) chip->temp = mili_celsius; } - *temp = chip->temp < 0 ? 0 : chip->temp; + *temp = chip->temp; return 0; } diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 755d2b5bd2c2..1ab2ffff4e7c 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -169,7 +169,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp) { struct rcar_gen3_thermal_tsc *tsc = devdata; int mcelsius, val; - u32 reg; + int reg; /* Read register and convert to mili Celsius */ reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK; diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index d0873de718da..43f0cd2bd0ae 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -526,8 +526,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); common->base = devm_ioremap_resource(dev, res); - if (IS_ERR(common->base)) - return PTR_ERR(common->base); + if (IS_ERR(common->base)) { + ret = PTR_ERR(common->base); + goto error_unregister; + } idle = 0; /* polling delay is not needed */ } diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index aa99edb4dff7..4dce4a8f71ed 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -770,6 +770,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, { struct cooling_dev_stats *stats = cdev->stats; + if (!stats) + return; + spin_lock(&stats->lock); if (stats->state == new_state) diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c index 63b02bfb2adf..fdb8a495ab69 100644 --- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c @@ -37,20 +37,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { /* * Temperature values in milli degree celsius - * ADC code values from 530 to 923 + * ADC code values from 13 to 107, see TRM + * "18.4.10.2.3 ADC Codes Versus Temperature". */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { - -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, - -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, - -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, - 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, - 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, - 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, - 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, - 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, - 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, - 117000, 118000, 120000, 122000, 123000, + -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, + -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, + -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, + 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, + 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, + 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, + 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, + 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, + 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, + 115000, 117000, 118500, 120000, 122000, 123500, 125000, }; /* OMAP4430 data */ diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h index a453ff8eb313..9a3955c3853b 100644 --- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h @@ -53,9 +53,13 @@ * and thresholds for OMAP4430. */ -/* ADC conversion table limits */ -#define OMAP4430_ADC_START_VALUE 0 -#define OMAP4430_ADC_END_VALUE 127 +/* + * ADC conversion table limits. Ignore values outside the TRM listed + * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter + * "18.4.10.2.3 ADC Codes Versus Temperature". + */ +#define OMAP4430_ADC_START_VALUE 13 +#define OMAP4430_ADC_END_VALUE 107 /* bandgap clock limits (no control on 4430) */ #define OMAP4430_MAX_FREQ 32768 #define OMAP4430_MIN_FREQ 32768 diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index d3e959d01606..2ce4b19f312a 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -169,7 +169,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, data = ti_bandgap_get_sensor_data(bgp, id); - if (!data || IS_ERR(data)) + if (IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) @@ -196,7 +196,7 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data && data->ti_thermal) { + if (!IS_ERR_OR_NULL(data) && data->ti_thermal) { if (data->our_zone) thermal_zone_device_unregister(data->ti_thermal); } @@ -262,7 +262,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data) { + if (!IS_ERR_OR_NULL(data)) { cpufreq_cooling_unregister(data->cool_dev); if (data->policy) cpufreq_cpu_put(data->policy); diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 245588f691e7..2f932b61b69a 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -1919,7 +1919,9 @@ static int complete_rpm(struct device *dev, void *data) static void remove_unplugged_switch(struct tb_switch *sw) { - pm_runtime_get_sync(sw->dev.parent); + struct device *parent = get_device(sw->dev.parent); + + pm_runtime_get_sync(parent); /* * Signal this and switches below for rpm_complete because @@ -1930,8 +1932,10 @@ static void remove_unplugged_switch(struct tb_switch *sw) bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); tb_switch_remove(sw); - pm_runtime_mark_last_busy(sw->dev.parent); - pm_runtime_put_autosuspend(sw->dev.parent); + pm_runtime_mark_last_busy(parent); + pm_runtime_put_autosuspend(parent); + + put_device(parent); } static void icm_free_unplugged_children(struct tb_switch *sw) diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 641b21b54460..73a698eec743 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -410,12 +410,23 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend) ring->vector = ret; - ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); - if (ring->irq < 0) - return ring->irq; + ret = pci_irq_vector(ring->nhi->pdev, ring->vector); + if (ret < 0) + goto err_ida_remove; + + ring->irq = ret; irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; - return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); + ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); + if (ret) + goto err_ida_remove; + + return 0; + +err_ida_remove: + ida_simple_remove(&nhi->msix_ida, ring->vector); + + return ret; } static void ring_release_msix(struct tb_ring *ring) diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 4e17a7c7bf0a..9e9bf8771345 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -830,6 +830,7 @@ static void enumerate_services(struct tb_xdomain *xd) id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); if (id < 0) { + kfree(svc->key); kfree(svc); break; } diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index 8330fd809a05..34d6ef75c95c 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -1032,6 +1032,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) if (!serial_isroot()) { if ((ss->baud_base != state->baud_base) || (ss->close_delay != port->close_delay) || + (ss->closing_wait != port->closing_wait) || (ss->xmit_fifo_size != state->xmit_fifo_size) || ((ss->flags & ~ASYNC_USR_MASK) != (port->flags & ~ASYNC_USR_MASK))) { diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 769e0a5d1dfc..3c6dd06ec5fb 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c @@ -136,6 +136,21 @@ static int find_console_handle(void) return 1; } +static unsigned int local_ev_byte_channel_send(unsigned int handle, + unsigned int *count, + const char *p) +{ + char buffer[EV_BYTE_CHANNEL_MAX_BYTES]; + unsigned int c = *count; + + if (c < sizeof(buffer)) { + memcpy(buffer, p, c); + memset(&buffer[c], 0, sizeof(buffer) - c); + p = buffer; + } + return ev_byte_channel_send(handle, count, p); +} + /*************************** EARLY CONSOLE DRIVER ***************************/ #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC @@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data) do { count = 1; - ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, + ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, &count, &data); } while (ret == EV_EAGAIN); } @@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s, while (count) { len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES); do { - ret = ev_byte_channel_send(handle, &len, s); + ret = local_ev_byte_channel_send(handle, &len, s); } while (ret == EV_EAGAIN); count -= len; s += len; @@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc) CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), EV_BYTE_CHANNEL_MAX_BYTES); - ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); + ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); /* 'len' is valid only if the return code is 0 or EV_EAGAIN */ if (!ret || (ret == EV_EAGAIN)) diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 27284a2dcd2b..cdcc64ea2554 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) vtermnos[index] = vtermno; cons_ops[index] = ops; - /* reserve all indices up to and including this index */ - if (last_hvc < index) - last_hvc = index; - /* check if we need to re-register the kernel console */ hvc_check_console(index); @@ -375,15 +371,14 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) * tty fields and return the kref reference. */ if (rc) { - tty_port_tty_set(&hp->port, NULL); - tty->driver_data = NULL; - tty_port_put(&hp->port); printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); - } else + } else { /* We are ready... raise DTR/RTS */ if (C_BAUD(tty)) if (hp->ops->dtr_rts) hp->ops->dtr_rts(hp, 1); + tty_port_set_initialized(&hp->port, true); + } /* Force wakeup of the polling thread */ hvc_kick(); @@ -393,22 +388,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) static void hvc_close(struct tty_struct *tty, struct file * filp) { - struct hvc_struct *hp; + struct hvc_struct *hp = tty->driver_data; unsigned long flags; if (tty_hung_up_p(filp)) return; - /* - * No driver_data means that this close was issued after a failed - * hvc_open by the tty layer's release_dev() function and we can just - * exit cleanly because the kref reference wasn't made. - */ - if (!tty->driver_data) - return; - - hp = tty->driver_data; - spin_lock_irqsave(&hp->port.lock, flags); if (--hp->port.count == 0) { @@ -416,6 +401,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) /* We are done with the tty pointer now. */ tty_port_tty_set(&hp->port, NULL); + if (!tty_port_initialized(&hp->port)) + return; + if (C_HUPCL(tty)) if (hp->ops->dtr_rts) hp->ops->dtr_rts(hp, 0); @@ -432,6 +420,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) * waking periodically to check chars_in_buffer(). */ tty_wait_until_sent(tty, HVC_CLOSE_WAIT); + tty_port_set_initialized(&hp->port, false); } else { if (hp->port.count < 0) printk(KERN_ERR "hvc_close %X: oops, count is %d\n", @@ -960,13 +949,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, cons_ops[i] == hp->ops) break; - /* no matching slot, just use a counter */ - if (i >= MAX_NR_HVC_CONSOLES) - i = ++last_hvc; + if (i >= MAX_NR_HVC_CONSOLES) { + + /* find 'empty' slot for console */ + for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) { + } + + /* no matching slot, just use a counter */ + if (i == MAX_NR_HVC_CONSOLES) + i = ++last_hvc + MAX_NR_HVC_CONSOLES; + } hp->index = i; - cons_ops[i] = ops; - vtermnos[i] = vtermno; + if (i < MAX_NR_HVC_CONSOLES) { + cons_ops[i] = ops; + vtermnos[i] = vtermno; + } list_add_tail(&(hp->next), &hvc_structs); mutex_unlock(&hvc_structs_mutex); diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index ee0604cd9c6b..0c498b20d8cb 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c @@ -1218,13 +1218,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) tty_wait_until_sent(tty, HVCS_CLOSE_WAIT); - /* - * This line is important because it tells hvcs_open that this - * device needs to be re-configured the next time hvcs_open is - * called. - */ - tty->driver_data = NULL; - free_irq(irq, hvcsd); return; } else if (hvcsd->port.count < 0) { @@ -1239,6 +1232,13 @@ static void hvcs_cleanup(struct tty_struct * tty) { struct hvcs_struct *hvcsd = tty->driver_data; + /* + * This line is important because it tells hvcs_open that this + * device needs to be re-configured the next time hvcs_open is + * called. + */ + tty->driver_data = NULL; + tty_port_put(&hvcsd->port); } diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c index cf20616340a1..fe569f6294a2 100644 --- a/drivers/tty/ipwireless/network.c +++ b/drivers/tty/ipwireless/network.c @@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel, skb->len, notify_packet_sent, network); - if (ret == -1) { + if (ret < 0) { skb_pull(skb, 2); return 0; } @@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel, notify_packet_sent, network); kfree(buf); - if (ret == -1) + if (ret < 0) return 0; } kfree_skb(skb); diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c index fad3401e604d..23584769fc29 100644 --- a/drivers/tty/ipwireless/tty.c +++ b/drivers/tty/ipwireless/tty.c @@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty, ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS, buf, count, ipw_write_packet_sent_callback, tty); - if (ret == -1) { + if (ret < 0) { mutex_unlock(&tty->ipw_tty_mutex); return 0; } diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 3a1a5e0ee93f..1254b39074ed 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty, ss->line = info->port.tty->index, ss->flags = info->port.flags, ss->baud_base = 921600, - ss->close_delay = info->port.close_delay; + ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10; mutex_unlock(&info->port.mutex); return 0; } @@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct moxa_port *info = tty->driver_data; + unsigned int close_delay; if (tty->index == MAX_PORTS) return -EINVAL; @@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty, ss->baud_base != 921600) return -EPERM; + close_delay = msecs_to_jiffies(ss->close_delay * 10); + mutex_lock(&info->port.mutex); if (!capable(CAP_SYS_ADMIN)) { - if (((ss->flags & ~ASYNC_USR_MASK) != + if (close_delay != info->port.close_delay || + ss->type != info->type || + ((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) { mutex_unlock(&info->port.mutex); return -EPERM; } + } else { + info->port.close_delay = close_delay; + + MoxaSetFifo(info, ss->type == PORT_16550A); + + info->type = ss->type; } - info->port.close_delay = ss->close_delay * HZ / 100; - - MoxaSetFifo(info, ss->type == PORT_16550A); - - info->type = ss->type; mutex_unlock(&info->port.mutex); return 0; } diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 36a3eb4ad4c5..38eb49ba361f 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -665,11 +665,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, * FIXME: lock against link layer control transmissions */ -static void gsm_data_kick(struct gsm_mux *gsm) +static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg, *nmsg; int len; - int skip_sof = 0; list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { if (gsm->constipated && msg->addr) @@ -691,18 +690,23 @@ static void gsm_data_kick(struct gsm_mux *gsm) print_hex_dump_bytes("gsm_data_kick: ", DUMP_PREFIX_OFFSET, gsm->txframe, len); - - if (gsm->output(gsm, gsm->txframe + skip_sof, - len - skip_sof) < 0) + if (gsm->output(gsm, gsm->txframe, len) < 0) break; /* FIXME: Can eliminate one SOF in many more cases */ gsm->tx_bytes -= msg->len; - /* For a burst of frames skip the extra SOF within the - burst */ - skip_sof = 1; list_del(&msg->list); kfree(msg); + + if (dlci) { + tty_port_tty_wakeup(&dlci->port); + } else { + int i = 0; + + for (i = 0; i < NUM_DLCI; i++) + if (gsm->dlci[i]) + tty_port_tty_wakeup(&gsm->dlci[i]->port); + } } } @@ -754,7 +758,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) /* Add to the actual output queue */ list_add_tail(&msg->list, &gsm->tx_list); gsm->tx_bytes += msg->len; - gsm_data_kick(gsm); + gsm_data_kick(gsm, dlci); } /** @@ -1215,7 +1219,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, gsm_control_reply(gsm, CMD_FCON, NULL, 0); /* Kick the link in case it is idling */ spin_lock_irqsave(&gsm->tx_lock, flags); - gsm_data_kick(gsm); + gsm_data_kick(gsm, NULL); spin_unlock_irqrestore(&gsm->tx_lock, flags); break; case CMD_FCOFF: @@ -2373,8 +2377,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) /* Don't register device 0 - this is the control channel and not a usable tty interface */ base = mux_num_to_base(gsm); /* Base for this MUX */ - for (i = 1; i < NUM_DLCI; i++) - tty_register_device(gsm_tty_driver, base + i, NULL); + for (i = 1; i < NUM_DLCI; i++) { + struct device *dev; + + dev = tty_register_device(gsm_tty_driver, + base + i, NULL); + if (IS_ERR(dev)) { + for (i--; i >= 1; i--) + tty_unregister_device(gsm_tty_driver, + base + i); + return PTR_ERR(dev); + } + } } return ret; } @@ -2525,7 +2539,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty) /* Queue poll */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); spin_lock_irqsave(&gsm->tx_lock, flags); - gsm_data_kick(gsm); + gsm_data_kick(gsm, NULL); if (gsm->tx_bytes < TX_THRESH_LO) { gsm_dlci_data_sweep(gsm); } diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 00099a8439d2..c6a1d8c4e689 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) spin_lock_irqsave(&to->port->lock, flags); /* Stuff the data into the input queue of the other end */ c = tty_insert_flip_string(to->port, buf, c); + spin_unlock_irqrestore(&to->port->lock, flags); /* And shovel */ if (c) tty_flip_buffer_push(to->port); - spin_unlock_irqrestore(&to->port->lock, flags); } return c; } diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index 5ba6816ebf81..bbaad2887ce7 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev) tty_port_init(&info->port); info->port.ops = &rocket_port_ops; info->flags &= ~ROCKET_MODE_MASK; - switch (pc104[board][line]) { - case 422: - info->flags |= ROCKET_MODE_RS422; - break; - case 485: - info->flags |= ROCKET_MODE_RS485; - break; - case 232: - default: + if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1)) + switch (pc104[board][line]) { + case 422: + info->flags |= ROCKET_MODE_RS422; + break; + case 485: + info->flags |= ROCKET_MODE_RS485; + break; + case 232: + default: + info->flags |= ROCKET_MODE_RS232; + break; + } + else info->flags |= ROCKET_MODE_RS232; - break; - } info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR; if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) { diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 28bdbd7b4ab2..2675771a03a0 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -524,6 +524,7 @@ static void __init serial8250_isa_init_ports(void) */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; + serial8250_set_defaults(up); } /* chain base port ops to support Remote Supervisor Adapter */ @@ -547,7 +548,6 @@ static void __init serial8250_isa_init_ports(void) port->membase = old_serial_port[i].iomem_base; port->iotype = old_serial_port[i].io_type; port->regshift = old_serial_port[i].iomem_reg_shift; - serial8250_set_defaults(up); port->irqflags |= irqflag; if (serial8250_isa_config != NULL) @@ -1026,7 +1026,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up) gpios = mctrl_gpio_init(&uart->port, 0); if (IS_ERR(gpios)) { ret = PTR_ERR(gpios); - goto out_unlock; + goto err; } else { uart->gpios = gpios; } @@ -1075,8 +1075,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) serial8250_apply_quirks(uart); ret = uart_add_one_port(&serial8250_reg, &uart->port); - if (ret == 0) - ret = uart->port.line; + if (ret) + goto err; + + ret = uart->port.line; } else { dev_info(uart->port.dev, "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", @@ -1098,10 +1100,14 @@ int serial8250_register_8250_port(struct uart_8250_port *up) } } -out_unlock: mutex_unlock(&serial_mutex); return ret; + +err: + uart->port.dev = NULL; + mutex_unlock(&serial_mutex); + return ret; } EXPORT_SYMBOL(serial8250_register_8250_port); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index e1268646ee56..93367dea4d8a 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -307,7 +307,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p) * devices will export them as GPIOs, so we pre-configure them safely * as inputs. */ - u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00; + + u8 dir = 0x00; + + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { + // Configure GPIO as inputs for Commtech adapters + dir = 0xff; + } else { + // Configure GPIO as outputs for SeaLevel adapters + dir = 0x00; + } writeb(0x00, p + UART_EXAR_MPIOINT_7_0); writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); @@ -715,6 +725,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_fastcom35x_2 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_4 = { + .num_ports = 4, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_8 = { + .num_ports = 8, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -785,9 +813,9 @@ static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x), EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358), EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358), - EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x), + EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2), + EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4), + EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8), EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2), EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4), diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index 4d067f515f74..e9eb454c2472 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -305,7 +305,20 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, } #endif - serial8250_do_set_termios(port, termios, old); + /* + * Store the requested baud rate before calling the generic 8250 + * set_termios method. Standard 8250 port expects bauds to be + * no higher than (uartclk / 16) so the baud will be clamped if it + * gets out of that bound. Mediatek 8250 port supports speed + * higher than that, therefore we'll get original baud rate back + * after calling the generic set_termios method and recalculate + * the speed later in this method. + */ + baud = tty_termios_baud_rate(termios); + + serial8250_do_set_termios(port, termios, NULL); + + tty_termios_encode_baud_rate(termios, baud, baud); /* * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS) @@ -338,6 +351,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, */ spin_lock_irqsave(&port->lock, flags); + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 836e736ae188..efe793a2fc65 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -170,11 +170,6 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up, struct omap8250_priv *priv) { u8 timeout = 255; - u8 old_mdr1; - - old_mdr1 = serial_in(up, UART_OMAP_MDR1); - if (old_mdr1 == priv->mdr1) - return; serial_out(up, UART_OMAP_MDR1, priv->mdr1); udelay(2); @@ -790,7 +785,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); count = dma->rx_size - state.residue; - + if (count < dma->rx_size) + dmaengine_terminate_async(dma->rxchan); + if (!count) + goto unlock; ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); p->port.icount.rx += ret; @@ -852,7 +850,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) spin_unlock_irqrestore(&priv->rx_dma_lock, flags); __dma_rx_do_complete(p); - dmaengine_terminate_all(dma->rxchan); } static int omap_8250_rx_dma(struct uart_8250_port *p) @@ -1234,6 +1231,7 @@ static int omap8250_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_dma_lock); device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); /* @@ -1247,7 +1245,6 @@ static int omap8250_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, -1); pm_runtime_irq_safe(&pdev->dev); - pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 8a01d034f9d1..8814ff38aa67 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1871,12 +1871,6 @@ pci_moxa_setup(struct serial_private *priv, #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 #define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253 -#define PCI_VENDOR_ID_PERICOM 0x12D8 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 - #define PCI_VENDOR_ID_ACCESIO 0x494f #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051 #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053 @@ -5574,6 +5568,17 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_wch384_4 }, + /* + * Realtek RealManage + */ + { PCI_VENDOR_ID_REALTEK, 0x816a, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_REALTEK, 0x816b, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + /* Fintek PCI serial cards */ { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 }, { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 }, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 2c65c775bf5a..5b673077639b 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1816,6 +1816,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) unsigned char status; unsigned long flags; struct uart_8250_port *up = up_to_u8250p(port); + bool skip_rx = false; if (iir & UART_IIR_NO_INT) return 0; @@ -1824,7 +1825,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) status = serial_port_in(port, UART_LSR); - if (status & (UART_LSR_DR | UART_LSR_BI)) { + /* + * If port is stopped and there are no error conditions in the + * FIFO, then don't drain the FIFO, as this may lead to TTY buffer + * overflow. Not servicing, RX FIFO would trigger auto HW flow + * control when FIFO occupancy reaches preset threshold, thus + * halting RX. This only works when auto HW flow control is + * available. + */ + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && + (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && + !(port->read_status_mask & UART_LSR_DR)) + skip_rx = true; + + if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); } @@ -2198,6 +2212,10 @@ int serial8250_do_startup(struct uart_port *port) if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; + + if (port->irqflags & IRQF_SHARED) + disable_irq_nosync(port->irq); + /* * Test for UARTs that do not reassert THRE when the * transmitter is idle and the interrupt has already @@ -2207,8 +2225,6 @@ int serial8250_do_startup(struct uart_port *port) * allow register changes to become visible. */ spin_lock_irqsave(&port->lock, flags); - if (up->port.irqflags & IRQF_SHARED) - disable_irq_nosync(port->irq); wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); @@ -2220,9 +2236,10 @@ int serial8250_do_startup(struct uart_port *port) iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + spin_unlock_irqrestore(&port->lock, flags); + if (port->irqflags & IRQF_SHARED) enable_irq(port->irq); - spin_unlock_irqrestore(&port->lock, flags); /* * If the interrupt is not reasserted, or we otherwise @@ -2539,6 +2556,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { + unsigned int tolerance = port->uartclk / 100; + /* * Ask the core to calculate the divisor for us. * Allow 1% tolerance at the upper limit so uart clks marginally @@ -2547,7 +2566,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, */ return uart_get_baud_rate(port, termios, old, port->uartclk / 16 / UART_DIV_MAX, - port->uartclk); + (port->uartclk + tolerance) / 16); } void diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 67a9eb3f94ce..a9751a83d5db 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -10,6 +10,7 @@ menu "Serial drivers" config SERIAL_EARLYCON bool + depends on SERIAL_CORE help Support for early consoles with the earlycon parameter. This enables the console before standard serial driver is probed. The console is diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index b0b689546395..16720c97a4dd 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -313,8 +313,9 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap, */ static int pl011_fifo_to_tty(struct uart_amba_port *uap) { - u16 status; unsigned int ch, flag, fifotaken; + int sysrq; + u16 status; for (fifotaken = 0; fifotaken != 256; fifotaken++) { status = pl011_read(uap, REG_FR); @@ -349,10 +350,12 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap) flag = TTY_FRAME; } - if (uart_handle_sysrq_char(&uap->port, ch & 255)) - continue; + spin_unlock(&uap->port.lock); + sysrq = uart_handle_sysrq_char(&uap->port, ch & 255); + spin_lock(&uap->port.lock); - uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); + if (!sysrq) + uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); } return fifotaken; @@ -2252,9 +2255,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_disable(uap->clk); } -static void __init -pl011_console_get_options(struct uart_amba_port *uap, int *baud, - int *parity, int *bits) +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) { if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; @@ -2287,7 +2289,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, } } -static int __init pl011_console_setup(struct console *co, char *options) +static int pl011_console_setup(struct console *co, char *options) { struct uart_amba_port *uap; int baud = 38400; @@ -2355,8 +2357,8 @@ static int __init pl011_console_setup(struct console *co, char *options) * * Returns 0 if console matches; otherwise non-zero to use default matching */ -static int __init pl011_console_match(struct console *co, char *name, int idx, - char *options) +static int pl011_console_match(struct console *co, char *name, int idx, + char *options) { unsigned char iotype; resource_size_t addr; @@ -2585,6 +2587,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, uap->port.fifosize = uap->fifosize; uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = index; + spin_lock_init(&uap->port.lock); amba_ports[index] = uap; @@ -2593,7 +2596,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, static int pl011_register_port(struct uart_amba_port *uap) { - int ret; + int ret, i; /* Ensure interrupts from this UART are masked and cleared */ pl011_write(0, uap, REG_IMSC); @@ -2604,6 +2607,9 @@ static int pl011_register_port(struct uart_amba_port *uap) if (ret < 0) { dev_err(uap->port.dev, "Failed to register AMBA-PL011 driver\n"); + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; return ret; } } diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index d2fc050a3445..6ad985e8dc65 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -238,6 +238,7 @@ static DEFINE_IDA(fsl_lpuart_ida); enum lpuart_type { VF610_LPUART, LS1021A_LPUART, + LS1028A_LPUART, IMX7ULP_LPUART, IMX8QXP_LPUART, }; @@ -282,11 +283,16 @@ static const struct lpuart_soc_data vf_data = { .iotype = UPIO_MEM, }; -static const struct lpuart_soc_data ls_data = { +static const struct lpuart_soc_data ls1021a_data = { .devtype = LS1021A_LPUART, .iotype = UPIO_MEM32BE, }; +static const struct lpuart_soc_data ls1028a_data = { + .devtype = LS1028A_LPUART, + .iotype = UPIO_MEM32, +}; + static struct lpuart_soc_data imx7ulp_data = { .devtype = IMX7ULP_LPUART, .iotype = UPIO_MEM32, @@ -301,7 +307,8 @@ static struct lpuart_soc_data imx8qxp_data = { static const struct of_device_id lpuart_dt_ids[] = { { .compatible = "fsl,vf610-lpuart", .data = &vf_data, }, - { .compatible = "fsl,ls1021a-lpuart", .data = &ls_data, }, + { .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, }, + { .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, }, { .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, }, { .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, }, { /* sentinel */ } @@ -311,6 +318,12 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids); /* Forward declare this for the dma callbacks*/ static void lpuart_dma_tx_complete(void *arg); +static inline bool is_layerscape_lpuart(struct lpuart_port *sport) +{ + return (sport->devtype == LS1021A_LPUART || + sport->devtype == LS1028A_LPUART); +} + static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport) { return sport->devtype == IMX8QXP_LPUART; @@ -635,26 +648,24 @@ static int lpuart32_poll_init(struct uart_port *port) spin_lock_irqsave(&sport->port.lock, flags); /* Disable Rx & Tx */ - lpuart32_write(&sport->port, UARTCTRL, 0); + lpuart32_write(&sport->port, 0, UARTCTRL); temp = lpuart32_read(&sport->port, UARTFIFO); /* Enable Rx and Tx FIFO */ - lpuart32_write(&sport->port, UARTFIFO, - temp | UARTFIFO_RXFE | UARTFIFO_TXFE); + lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO); /* flush Tx and Rx FIFO */ - lpuart32_write(&sport->port, UARTFIFO, - UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH); + lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO); /* explicitly clear RDRF */ if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) { lpuart32_read(&sport->port, UARTDATA); - lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF); + lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO); } /* Enable Rx and Tx */ - lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE); + lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL); spin_unlock_irqrestore(&sport->port.lock, flags); return 0; @@ -663,12 +674,12 @@ static int lpuart32_poll_init(struct uart_port *port) static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c) { lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE); - lpuart32_write(port, UARTDATA, c); + lpuart32_write(port, c, UARTDATA); } static int lpuart32_poll_get_char(struct uart_port *port) { - if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF)) + if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF)) return NO_POLL_CHAR; return lpuart32_read(port, UARTDATA); @@ -1555,6 +1566,17 @@ static int lpuart32_startup(struct uart_port *port) sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) & UARTFIFO_FIFOSIZE_MASK); + /* + * The LS1021A and LS1028A have a fixed FIFO depth of 16 words. + * Although they support the RX/TXSIZE fields, their encoding is + * different. Eg the reference manual states 0b101 is 16 words. + */ + if (is_layerscape_lpuart(sport)) { + sport->rxfifo_size = 16; + sport->txfifo_size = 16; + sport->port.fifosize = sport->txfifo_size; + } + spin_lock_irqsave(&sport->port.lock, flags); lpuart32_setup_watermark_enable(sport); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 22d8705cd5cd..e5ed4ab2b08d 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -877,8 +877,14 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id) struct imx_port *sport = dev_id; unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4; irqreturn_t ret = IRQ_NONE; + unsigned long flags = 0; - spin_lock(&sport->port.lock); + /* + * IRQs might not be disabled upon entering this interrupt handler, + * e.g. when interrupt handlers are forced to be threaded. To support + * this scenario as well, disable IRQs when acquiring the spinlock. + */ + spin_lock_irqsave(&sport->port.lock, flags); usr1 = imx_uart_readl(sport, USR1); usr2 = imx_uart_readl(sport, USR2); @@ -946,7 +952,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id) ret = IRQ_HANDLED; } - spin_unlock(&sport->port.lock); + spin_unlock_irqrestore(&sport->port.lock, flags); return ret; } @@ -1936,16 +1942,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count) unsigned int ucr1; unsigned long flags = 0; int locked = 1; - int retval; - - retval = clk_enable(sport->clk_per); - if (retval) - return; - retval = clk_enable(sport->clk_ipg); - if (retval) { - clk_disable(sport->clk_per); - return; - } if (sport->port.sysrq) locked = 0; @@ -1981,9 +1977,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count) if (locked) spin_unlock_irqrestore(&sport->port.lock, flags); - - clk_disable(sport->clk_ipg); - clk_disable(sport->clk_per); } /* @@ -2084,15 +2077,14 @@ imx_uart_console_setup(struct console *co, char *options) retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); - clk_disable(sport->clk_ipg); if (retval) { - clk_unprepare(sport->clk_ipg); + clk_disable_unprepare(sport->clk_ipg); goto error_console; } - retval = clk_prepare(sport->clk_per); + retval = clk_prepare_enable(sport->clk_per); if (retval) - clk_unprepare(sport->clk_ipg); + clk_disable_unprepare(sport->clk_ipg); error_console: return retval; diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index c7d51b51898f..f5608ad68ae1 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -20,6 +20,7 @@ #include <linux/vt_kern.h> #include <linux/input.h> #include <linux/module.h> +#include <linux/platform_device.h> #define MAX_CONFIG_LEN 40 @@ -27,6 +28,7 @@ static struct kgdb_io kgdboc_io_ops; /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ static int configured = -1; +static DEFINE_MUTEX(config_mutex); static char config[MAX_CONFIG_LEN]; static struct kparam_string kps = { @@ -38,6 +40,8 @@ static int kgdboc_use_kms; /* 1 if we use kernel mode switching */ static struct tty_driver *kgdb_tty_driver; static int kgdb_tty_line; +static struct platform_device *kgdboc_pdev; + #ifdef CONFIG_KDB_KEYBOARD static int kgdboc_reset_connect(struct input_handler *handler, struct input_dev *dev, @@ -133,11 +137,13 @@ static void kgdboc_unregister_kbd(void) static void cleanup_kgdboc(void) { + if (configured != 1) + return; + if (kgdb_unregister_nmi_console()) return; kgdboc_unregister_kbd(); - if (configured == 1) - kgdb_unregister_io_module(&kgdboc_io_ops); + kgdb_unregister_io_module(&kgdboc_io_ops); } static int configure_kgdboc(void) @@ -200,20 +206,79 @@ nmi_con_failed: kgdb_unregister_io_module(&kgdboc_io_ops); noconfig: kgdboc_unregister_kbd(); - config[0] = 0; configured = 0; - cleanup_kgdboc(); return err; } +static int kgdboc_probe(struct platform_device *pdev) +{ + int ret = 0; + + mutex_lock(&config_mutex); + if (configured != 1) { + ret = configure_kgdboc(); + + /* Convert "no device" to "defer" so we'll keep trying */ + if (ret == -ENODEV) + ret = -EPROBE_DEFER; + } + mutex_unlock(&config_mutex); + + return ret; +} + +static struct platform_driver kgdboc_platform_driver = { + .probe = kgdboc_probe, + .driver = { + .name = "kgdboc", + .suppress_bind_attrs = true, + }, +}; + static int __init init_kgdboc(void) { - /* Already configured? */ - if (configured == 1) + int ret; + + /* + * kgdboc is a little bit of an odd "platform_driver". It can be + * up and running long before the platform_driver object is + * created and thus doesn't actually store anything in it. There's + * only one instance of kgdb so anything is stored as global state. + * The platform_driver is only created so that we can leverage the + * kernel's mechanisms (like -EPROBE_DEFER) to call us when our + * underlying tty is ready. Here we init our platform driver and + * then create the single kgdboc instance. + */ + ret = platform_driver_register(&kgdboc_platform_driver); + if (ret) + return ret; + + kgdboc_pdev = platform_device_alloc("kgdboc", PLATFORM_DEVID_NONE); + if (!kgdboc_pdev) { + ret = -ENOMEM; + goto err_did_register; + } + + ret = platform_device_add(kgdboc_pdev); + if (!ret) return 0; - return configure_kgdboc(); + platform_device_put(kgdboc_pdev); + +err_did_register: + platform_driver_unregister(&kgdboc_platform_driver); + return ret; +} + +static void exit_kgdboc(void) +{ + mutex_lock(&config_mutex); + cleanup_kgdboc(); + mutex_unlock(&config_mutex); + + platform_device_unregister(kgdboc_pdev); + platform_driver_unregister(&kgdboc_platform_driver); } static int kgdboc_get_char(void) @@ -236,24 +301,20 @@ static int param_set_kgdboc_var(const char *kmessage, const struct kernel_param *kp) { size_t len = strlen(kmessage); + int ret = 0; if (len >= MAX_CONFIG_LEN) { pr_err("config string too long\n"); return -ENOSPC; } - /* Only copy in the string if the init function has not run yet */ - if (configured < 0) { - strcpy(config, kmessage); - return 0; - } - if (kgdb_connected) { pr_err("Cannot reconfigure while KGDB is connected.\n"); - return -EBUSY; } + mutex_lock(&config_mutex); + strcpy(config, kmessage); /* Chop out \n char as a result of echo */ if (len && config[len - 1] == '\n') @@ -262,8 +323,30 @@ static int param_set_kgdboc_var(const char *kmessage, if (configured == 1) cleanup_kgdboc(); - /* Go and configure with the new params. */ - return configure_kgdboc(); + /* + * Configure with the new params as long as init already ran. + * Note that we can get called before init if someone loads us + * with "modprobe kgdboc kgdboc=..." or if they happen to use the + * the odd syntax of "kgdboc.kgdboc=..." on the kernel command. + */ + if (configured >= 0) + ret = configure_kgdboc(); + + /* + * If we couldn't configure then clear out the config. Note that + * specifying an invalid config on the kernel command line vs. + * through sysfs have slightly different behaviors. If we fail + * to configure what was specified on the kernel command line + * we'll leave it in the 'config' and return -EPROBE_DEFER from + * our probe. When specified through sysfs userspace is + * responsible for loading the tty driver before setting up. + */ + if (ret) + config[0] = '\0'; + + mutex_unlock(&config_mutex); + + return ret; } static int dbg_restore_graphics; @@ -326,15 +409,8 @@ __setup("kgdboc=", kgdboc_option_setup); /* This is only available if kgdboc is a built in for early debugging */ static int __init kgdboc_early_init(char *opt) { - /* save the first character of the config string because the - * init routine can destroy it. - */ - char save_ch; - kgdboc_option_setup(opt); - save_ch = config[0]; - init_kgdboc(); - config[0] = save_ch; + configure_kgdboc(); return 0; } @@ -342,7 +418,7 @@ early_param("ekgdboc", kgdboc_early_init); #endif /* CONFIG_KGDB_SERIAL_CONSOLE */ module_init(init_kgdboc); -module_exit(cleanup_kgdboc); +module_exit(exit_kgdboc); module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644); MODULE_PARM_DESC(kgdboc, "<serial_device>[,baud]"); MODULE_DESCRIPTION("KGDB Console TTY Driver"); diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 4e9a590712cb..e006f40ffdc0 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port) (val & STAT_TX_RDY(port)), 1, 10000); } +static void wait_for_xmite(struct uart_port *port) +{ + u32 val; + + readl_poll_timeout_atomic(port->membase + UART_STAT, val, + (val & STAT_TX_EMP), 1, 10000); +} + static void mvebu_uart_console_putchar(struct uart_port *port, int ch) { wait_for_xmitr(port); @@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s, uart_console_write(port, s, count, mvebu_uart_console_putchar); - wait_for_xmitr(port); + wait_for_xmite(port); if (ier) writel(ier, port->membase + UART_CTRL(port)); diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index e34525970682..5d483e996514 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1701,21 +1701,21 @@ static int mxs_auart_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; - goto out_disable_clks; + goto out_iounmap; } s->port.irq = irq; ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s); if (ret) - goto out_disable_clks; + goto out_iounmap; platform_set_drvdata(pdev, s); ret = mxs_auart_init_gpios(s, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); - goto out_disable_clks; + goto out_iounmap; } /* @@ -1723,7 +1723,7 @@ static int mxs_auart_probe(struct platform_device *pdev) */ ret = mxs_auart_request_gpio_irq(s); if (ret) - goto out_disable_clks; + goto out_iounmap; auart_port[s->port.line] = s; @@ -1749,6 +1749,9 @@ out_free_qpio_irq: mxs_auart_free_gpio_irq(s); auart_port[pdev->id] = NULL; +out_iounmap: + iounmap(s->port.membase); + out_disable_clks: if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); @@ -1764,6 +1767,7 @@ static int mxs_auart_remove(struct platform_device *pdev) uart_remove_one_port(&auart_driver, &s->port); auart_port[pdev->id] = NULL; mxs_auart_free_gpio_irq(s); + iounmap(s->port.membase); if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); clk_disable_unprepare(s->clk_ahb); diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index d2d8b3494685..c55c8507713c 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -680,6 +680,12 @@ static int owl_uart_probe(struct platform_device *pdev) return PTR_ERR(owl_port->clk); } + ret = clk_prepare_enable(owl_port->clk); + if (ret) { + dev_err(&pdev->dev, "could not enable clk\n"); + return ret; + } + owl_port->port.dev = &pdev->dev; owl_port->port.line = pdev->id; owl_port->port.type = PORT_OWL; @@ -712,6 +718,7 @@ static int owl_uart_remove(struct platform_device *pdev) uart_remove_one_port(&owl_uart_driver, &owl_port->port); owl_uart_ports[pdev->id] = NULL; + clk_disable_unprepare(owl_port->clk); return 0; } diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index f98a79172ad2..9b148f78323f 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -970,7 +970,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, sampling_rate = UART_OVERSAMPLING; /* Sampling rate is halved for IP versions >= 2.5 */ ver = geni_se_get_qup_hw_version(&port->se); - if (GENI_SE_VERSION_MAJOR(ver) >= 2 && GENI_SE_VERSION_MINOR(ver) >= 5) + if (ver >= QUP_SE_VERSION_2_5) sampling_rate /= 2; clk_rate = get_clk_div_rate(baud, sampling_rate, &clk_div); @@ -1063,7 +1063,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport) } #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE -static int __init qcom_geni_console_setup(struct console *co, char *options) +static int qcom_geni_console_setup(struct console *co, char *options) { struct uart_port *uport; struct qcom_geni_serial_port *port; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 83fd51607741..c7683beb3412 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, struct s3c24xx_uart_info *info = ourport->info; struct clk *clk; unsigned long rate; - unsigned int cnt, baud, quot, clk_sel, best_quot = 0; + unsigned int cnt, baud, quot, best_quot = 0; char clkname[MAX_CLK_NAME_LENGTH]; int calc_deviation, deviation = (1 << 30) - 1; - clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : - ourport->info->def_clk_sel; for (cnt = 0; cnt < info->num_clks; cnt++) { - if (!(clk_sel & (1 << cnt))) + /* Keep selected clock if provided */ + if (ourport->cfg->clk_sel && + !(ourport->cfg->clk_sel & (1 << cnt))) continue; sprintf(clkname, "clk_uart_baud%d", cnt); @@ -1791,9 +1791,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, ourport->tx_irq = ret + 1; } - ret = platform_get_irq(platdev, 1); - if (ret > 0) - ourport->tx_irq = ret; + if (!s3c24xx_serial_has_interrupt_mask(port)) { + ret = platform_get_irq(platdev, 1); + if (ret > 0) + ourport->tx_irq = ret; + } /* * DMA is currently supported only on DT platforms, if DMA properties * are specified. diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 2f599515c133..51c3f579ccd0 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -651,11 +651,14 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, ch = (unsigned char) tegra_uart_read(tup, UART_RX); tup->uport.icount.rx++; - if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) - tty_insert_flip_char(tty, ch, flag); + if (uart_handle_sysrq_char(&tup->uport, ch)) + continue; if (tup->uport.ignore_status_mask & UART_LSR_DR) continue; + + if (tty) + tty_insert_flip_char(tty, ch, flag); } while (1); } diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 7c2782785736..e2ab6524119a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1304,7 +1304,7 @@ static int uart_set_rs485_config(struct uart_port *port, unsigned long flags; if (!port->rs485_config) - return -ENOIOCTLCMD; + return -ENOTTY; if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user))) return -EFAULT; @@ -1328,7 +1328,7 @@ static int uart_get_iso7816_config(struct uart_port *port, struct serial_iso7816 aux; if (!port->iso7816_config) - return -ENOIOCTLCMD; + return -ENOTTY; spin_lock_irqsave(&port->lock, flags); aux = port->iso7816; @@ -1348,7 +1348,7 @@ static int uart_set_iso7816_config(struct uart_port *port, unsigned long flags; if (!port->iso7816_config) - return -ENOIOCTLCMD; + return -ENOTTY; if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user))) return -EFAULT; @@ -1465,6 +1465,10 @@ static void uart_set_ldisc(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; struct uart_port *uport; + struct tty_port *port = &state->port; + + if (!tty_port_initialized(port)) + return; mutex_lock(&state->port.mutex); uport = uart_port_check(state); diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index d22ccb32aa9b..8507f18900d0 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c @@ -1283,6 +1283,9 @@ static int __init serial_txx9_init(void) #ifdef ENABLE_SERIAL_TXX9_PCI ret = pci_register_driver(&serial_txx9_pci_driver); + if (ret) { + platform_driver_unregister(&serial_txx9_plat_driver); + } #endif if (ret == 0) goto out; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 22e5d4e13714..7d1529b11ae9 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -873,9 +873,16 @@ static void sci_receive_chars(struct uart_port *port) tty_insert_flip_char(tport, c, TTY_NORMAL); } else { for (i = 0; i < count; i++) { - char c = serial_port_in(port, SCxRDR); + char c; - status = serial_port_in(port, SCxSR); + if (port->type == PORT_SCIF || + port->type == PORT_HSCIF) { + status = serial_port_in(port, SCxSR); + c = serial_port_in(port, SCxRDR); + } else { + c = serial_port_in(port, SCxRDR); + status = serial_port_in(port, SCxSR); + } if (uart_handle_sysrq_char(port, c)) { count--; i--; continue; diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index d5f81b98e4d7..6a2dc823ea82 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -618,10 +618,10 @@ static void sifive_serial_shutdown(struct uart_port *port) * * On the V0 SoC, the UART IP block is derived from the CPU clock source * after a synchronous divide-by-two divider, so any CPU clock rate change - * requires the UART baud rate to be updated. This presumably could corrupt any - * serial word currently being transmitted or received. It would probably - * be better to stop receives and transmits, then complete the baud rate - * change, then re-enable them. + * requires the UART baud rate to be updated. This presumably corrupts any + * serial word currently being transmitted or received. In order to avoid + * corrupting the output data stream, we drain the transmit queue before + * allowing the clock's rate to be changed. */ static int sifive_serial_clk_notifier(struct notifier_block *nb, unsigned long event, void *data) @@ -629,6 +629,26 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb, struct clk_notifier_data *cnd = data; struct sifive_serial_port *ssp = notifier_to_sifive_serial_port(nb); + if (event == PRE_RATE_CHANGE) { + /* + * The TX watermark is always set to 1 by this driver, which + * means that the TX busy bit will lower when there are 0 bytes + * left in the TX queue -- in other words, when the TX FIFO is + * empty. + */ + __ssp_wait_for_xmitr(ssp); + /* + * On the cycle the TX FIFO goes empty there is still a full + * UART frame left to be transmitted in the shift register. + * The UART provides no way for software to directly determine + * when that last frame has been transmitted, so we just sleep + * here instead. As we're not tracking the number of stop bits + * they're just worst cased here. The rest of the serial + * framing parameters aren't configurable by software. + */ + udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate)); + } + if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) { ssp->clkin_rate = cnd->new_rate; __ssp_update_div(ssp); @@ -840,6 +860,7 @@ console_initcall(sifive_console_init); static void __ssp_add_console_port(struct sifive_serial_port *ssp) { + spin_lock_init(&ssp->port.lock); sifive_serial_console_ports[ssp->port.line] = ssp; } @@ -952,6 +973,7 @@ static int sifive_serial_probe(struct platform_device *pdev) /* Set up clock divider */ ssp->clkin_rate = clk_get_rate(ssp->clk); ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE; + ssp->port.uartclk = ssp->baud_rate * 16; __ssp_update_div(ssp); platform_set_drvdata(pdev, ssp); diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 2f72514d63ed..23b7bdae173c 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -502,7 +502,10 @@ static unsigned int stm32_tx_empty(struct uart_port *port) struct stm32_port *stm32_port = to_stm32_port(port); struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE; + if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) + return TIOCSER_TEMT; + + return 0; } static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl) @@ -688,8 +691,9 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios, unsigned int baud, bits; u32 usartdiv, mantissa, fraction, oversampling; tcflag_t cflag = termios->c_cflag; - u32 cr1, cr2, cr3; + u32 cr1, cr2, cr3, isr; unsigned long flags; + int ret; if (!stm32_port->hw_flow_control) cflag &= ~CRTSCTS; @@ -698,6 +702,15 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios, spin_lock_irqsave(&port->lock, flags); + ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, + isr, + (isr & USART_SR_TC), + 10, 100000); + + /* Send the TC error message only when ISR_TC is not set. */ + if (ret) + dev_err(port->dev, "Transmission is not complete\n"); + /* Stop serial port and reset value */ writel_relaxed(0, port->membase + ofs->cr1); @@ -937,7 +950,7 @@ static int stm32_init_port(struct stm32_port *stm32port, stm32_init_rs485(port, pdev); if (stm32port->info->cfg.has_wakeup) { - stm32port->wakeirq = platform_get_irq(pdev, 1); + stm32port->wakeirq = platform_get_irq_optional(pdev, 1); if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO) return stm32port->wakeirq ? : -ENODEV; } diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h index a175c1094dc8..a0b816e0e89b 100644 --- a/drivers/tty/serial/stm32-usart.h +++ b/drivers/tty/serial/stm32-usart.h @@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = { /* Dummy bits */ #define USART_SR_DUMMY_RX BIT(16) -/* USART_ICR (F7) */ -#define USART_CR_TC BIT(6) - /* USART_DR */ #define USART_DR_MASK GENMASK(8, 0) diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 4e55bc327a54..9359c80fbb9f 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -30,13 +30,15 @@ #define CDNS_UART_TTY_NAME "ttyPS" #define CDNS_UART_NAME "xuartps" +#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */ +#define CDNS_UART_MINOR 0 /* works best with devtmpfs */ +#define CDNS_UART_NR_PORTS 16 #define CDNS_UART_FIFO_SIZE 64 /* FIFO size */ #define CDNS_UART_REGISTER_SPACE 0x1000 #define TX_TIMEOUT 500000 /* Rx Trigger level */ static int rx_trigger_level = 56; -static int uartps_major; module_param(rx_trigger_level, uint, 0444); MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes"); @@ -182,7 +184,6 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255"); * @pclk: APB clock * @cdns_uart_driver: Pointer to UART driver * @baud: Current baud rate - * @id: Port ID * @clk_rate_change_nb: Notifier block for clock changes * @quirks: Flags for RXBS support. */ @@ -192,7 +193,6 @@ struct cdns_uart { struct clk *pclk; struct uart_driver *cdns_uart_driver; unsigned int baud; - int id; struct notifier_block clk_rate_change_nb; u32 quirks; bool cts_override; @@ -1119,6 +1119,8 @@ static const struct uart_ops cdns_uart_ops = { #endif }; +static struct uart_driver cdns_uart_uart_driver; + #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /** * cdns_uart_console_putchar - write the character to the FIFO buffer @@ -1246,6 +1248,7 @@ static int cdns_uart_console_setup(struct console *co, char *options) int bits = 8; int parity = 'n'; int flow = 'n'; + unsigned long time_out; if (!port->membase) { pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n", @@ -1256,8 +1259,25 @@ static int cdns_uart_console_setup(struct console *co, char *options) if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); + /* Wait for tx_empty before setting up the console */ + time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT); + + while (time_before(jiffies, time_out) && + cdns_uart_tx_empty(port) != TIOCSER_TEMT) + cpu_relax(); + return uart_set_options(port, co, baud, parity, bits, flow); } + +static struct console cdns_uart_console = { + .name = CDNS_UART_TTY_NAME, + .write = cdns_uart_console_write, + .device = uart_console_device, + .setup = cdns_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */ + .data = &cdns_uart_uart_driver, +}; #endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */ #ifdef CONFIG_PM_SLEEP @@ -1389,89 +1409,8 @@ static const struct of_device_id cdns_uart_of_match[] = { }; MODULE_DEVICE_TABLE(of, cdns_uart_of_match); -/* - * Maximum number of instances without alias IDs but if there is alias - * which target "< MAX_UART_INSTANCES" range this ID can't be used. - */ -#define MAX_UART_INSTANCES 32 - -/* Stores static aliases list */ -static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES); -static int alias_bitmap_initialized; - -/* Stores actual bitmap of allocated IDs with alias IDs together */ -static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES); -/* Protect bitmap operations to have unique IDs */ -static DEFINE_MUTEX(bitmap_lock); - -static int cdns_get_id(struct platform_device *pdev) -{ - int id, ret; - - mutex_lock(&bitmap_lock); - - /* Alias list is stable that's why get alias bitmap only once */ - if (!alias_bitmap_initialized) { - ret = of_alias_get_alias_list(cdns_uart_of_match, "serial", - alias_bitmap, MAX_UART_INSTANCES); - if (ret && ret != -EOVERFLOW) { - mutex_unlock(&bitmap_lock); - return ret; - } - - alias_bitmap_initialized++; - } - - /* Make sure that alias ID is not taken by instance without alias */ - bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES); - - dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n", - MAX_UART_INSTANCES, bitmap); - - /* Look for a serialN alias */ - id = of_alias_get_id(pdev->dev.of_node, "serial"); - if (id < 0) { - dev_warn(&pdev->dev, - "No serial alias passed. Using the first free id\n"); - - /* - * Start with id 0 and check if there is no serial0 alias - * which points to device which is compatible with this driver. - * If alias exists then try next free position. - */ - id = 0; - - for (;;) { - dev_info(&pdev->dev, "Checking id %d\n", id); - id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id); - - /* No free empty instance */ - if (id == MAX_UART_INSTANCES) { - dev_err(&pdev->dev, "No free ID\n"); - mutex_unlock(&bitmap_lock); - return -EINVAL; - } - - dev_dbg(&pdev->dev, "The empty id is %d\n", id); - /* Check if ID is empty */ - if (!test_and_set_bit(id, bitmap)) { - /* Break the loop if bit is taken */ - dev_dbg(&pdev->dev, - "Selected ID %d allocation passed\n", - id); - break; - } - dev_dbg(&pdev->dev, - "Selected ID %d allocation failed\n", id); - /* if taking bit fails then try next one */ - id++; - } - } - - mutex_unlock(&bitmap_lock); - - return id; -} +/* Temporary variable for storing number of instances */ +static int instances; /** * cdns_uart_probe - Platform driver probe @@ -1481,16 +1420,11 @@ static int cdns_get_id(struct platform_device *pdev) */ static int cdns_uart_probe(struct platform_device *pdev) { - int rc, irq; + int rc, id, irq; struct uart_port *port; struct resource *res; struct cdns_uart *cdns_uart_data; const struct of_device_id *match; - struct uart_driver *cdns_uart_uart_driver; - char *driver_name; -#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE - struct console *cdns_uart_console; -#endif cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), GFP_KERNEL); @@ -1500,64 +1434,35 @@ static int cdns_uart_probe(struct platform_device *pdev) if (!port) return -ENOMEM; - cdns_uart_uart_driver = devm_kzalloc(&pdev->dev, - sizeof(*cdns_uart_uart_driver), - GFP_KERNEL); - if (!cdns_uart_uart_driver) - return -ENOMEM; + /* Look for a serialN alias */ + id = of_alias_get_id(pdev->dev.of_node, "serial"); + if (id < 0) + id = 0; - cdns_uart_data->id = cdns_get_id(pdev); - if (cdns_uart_data->id < 0) - return cdns_uart_data->id; - - /* There is a need to use unique driver name */ - driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d", - CDNS_UART_NAME, cdns_uart_data->id); - if (!driver_name) { - rc = -ENOMEM; - goto err_out_id; + if (id >= CDNS_UART_NR_PORTS) { + dev_err(&pdev->dev, "Cannot get uart_port structure\n"); + return -ENODEV; } - cdns_uart_uart_driver->owner = THIS_MODULE; - cdns_uart_uart_driver->driver_name = driver_name; - cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME; - cdns_uart_uart_driver->major = uartps_major; - cdns_uart_uart_driver->minor = cdns_uart_data->id; - cdns_uart_uart_driver->nr = 1; - + if (!cdns_uart_uart_driver.state) { + cdns_uart_uart_driver.owner = THIS_MODULE; + cdns_uart_uart_driver.driver_name = CDNS_UART_NAME; + cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME; + cdns_uart_uart_driver.major = CDNS_UART_MAJOR; + cdns_uart_uart_driver.minor = CDNS_UART_MINOR; + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE - cdns_uart_console = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_console), - GFP_KERNEL); - if (!cdns_uart_console) { - rc = -ENOMEM; - goto err_out_id; - } - - strncpy(cdns_uart_console->name, CDNS_UART_TTY_NAME, - sizeof(cdns_uart_console->name)); - cdns_uart_console->index = cdns_uart_data->id; - cdns_uart_console->write = cdns_uart_console_write; - cdns_uart_console->device = uart_console_device; - cdns_uart_console->setup = cdns_uart_console_setup; - cdns_uart_console->flags = CON_PRINTBUFFER; - cdns_uart_console->data = cdns_uart_uart_driver; - cdns_uart_uart_driver->cons = cdns_uart_console; + cdns_uart_uart_driver.cons = &cdns_uart_console; #endif - rc = uart_register_driver(cdns_uart_uart_driver); - if (rc < 0) { - dev_err(&pdev->dev, "Failed to register driver\n"); - goto err_out_id; + rc = uart_register_driver(&cdns_uart_uart_driver); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to register driver\n"); + return rc; + } } - cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver; - - /* - * Setting up proper name_base needs to be done after uart - * registration because tty_driver structure is not filled. - * name_base is 0 by default. - */ - cdns_uart_uart_driver->tty_driver->name_base = cdns_uart_data->id; + cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver; match = of_match_node(cdns_uart_of_match, pdev->dev.of_node); if (match && match->data) { @@ -1634,6 +1539,7 @@ static int cdns_uart_probe(struct platform_device *pdev) port->flags = UPF_BOOT_AUTOCONF; port->ops = &cdns_uart_ops; port->fifosize = CDNS_UART_FIFO_SIZE; + port->line = id; /* * Register the port. @@ -1661,11 +1567,13 @@ static int cdns_uart_probe(struct platform_device *pdev) * If register_console() don't assign value, then console_port pointer * is cleanup. */ - if (!console_port) + if (!console_port) { + cdns_uart_console.index = id; console_port = port; + } #endif - rc = uart_add_one_port(cdns_uart_uart_driver, port); + rc = uart_add_one_port(&cdns_uart_uart_driver, port); if (rc) { dev_err(&pdev->dev, "uart_add_one_port() failed; err=%i\n", rc); @@ -1675,13 +1583,17 @@ static int cdns_uart_probe(struct platform_device *pdev) #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /* This is not port which is used for console that's why clean it up */ if (console_port == port && - !(cdns_uart_uart_driver->cons->flags & CON_ENABLED)) + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) { console_port = NULL; + cdns_uart_console.index = -1; + } #endif - uartps_major = cdns_uart_uart_driver->tty_driver->major; cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, "cts-override"); + + instances++; + return 0; err_out_pm_disable: @@ -1697,12 +1609,8 @@ err_out_clk_disable: err_out_clk_dis_pclk: clk_disable_unprepare(cdns_uart_data->pclk); err_out_unregister_driver: - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); -err_out_id: - mutex_lock(&bitmap_lock); - if (cdns_uart_data->id < MAX_UART_INSTANCES) - clear_bit(cdns_uart_data->id, bitmap); - mutex_unlock(&bitmap_lock); + if (!instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); return rc; } @@ -1725,10 +1633,6 @@ static int cdns_uart_remove(struct platform_device *pdev) #endif rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port); port->mapbase = 0; - mutex_lock(&bitmap_lock); - if (cdns_uart_data->id < MAX_UART_INSTANCES) - clear_bit(cdns_uart_data->id, bitmap); - mutex_unlock(&bitmap_lock); clk_disable_unprepare(cdns_uart_data->uartclk); clk_disable_unprepare(cdns_uart_data->pclk); pm_runtime_disable(&pdev->dev); @@ -1741,13 +1645,8 @@ static int cdns_uart_remove(struct platform_device *pdev) console_port = NULL; #endif - /* If this is last instance major number should be initialized */ - mutex_lock(&bitmap_lock); - if (bitmap_empty(bitmap, MAX_UART_INSTANCES)) - uartps_major = 0; - mutex_unlock(&bitmap_lock); - - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); + if (!--instances) + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); return rc; } diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 36c1c59cc72a..cee7514c3aaf 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2407,14 +2407,14 @@ out: * @p: pointer to result * * Obtain the modem status bits from the tty driver if the feature - * is supported. Return -EINVAL if it is not available. + * is supported. Return -ENOTTY if it is not available. * * Locking: none (up to the driver) */ static int tty_tiocmget(struct tty_struct *tty, int __user *p) { - int retval = -EINVAL; + int retval = -ENOTTY; if (tty->ops->tiocmget) { retval = tty->ops->tiocmget(tty); @@ -2432,7 +2432,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p) * @p: pointer to desired bits * * Set the modem status bits from the tty driver if the feature - * is supported. Return -EINVAL if it is not available. + * is supported. Return -ENOTTY if it is not available. * * Locking: none (up to the driver) */ @@ -2444,7 +2444,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd, unsigned int set, clear, val; if (tty->ops->tiocmset == NULL) - return -EINVAL; + return -ENOTTY; retval = get_user(val, p); if (retval) @@ -2894,10 +2894,14 @@ void __do_SAK(struct tty_struct *tty) struct task_struct *g, *p; struct pid *session; int i; + unsigned long flags; if (!tty) return; - session = tty->session; + + spin_lock_irqsave(&tty->ctrl_lock, flags); + session = get_pid(tty->session); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty_ldisc_flush(tty); @@ -2929,6 +2933,7 @@ void __do_SAK(struct tty_struct *tty) task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); + put_pid(session); #endif } diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c index f8ed50a16848..813be2c05262 100644 --- a/drivers/tty/tty_jobctrl.c +++ b/drivers/tty/tty_jobctrl.c @@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty) put_pid(tty->session); put_pid(tty->pgrp); tty->pgrp = get_pid(task_pgrp(current)); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty->session = get_pid(task_session(current)); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); if (current->signal->tty) { tty_debug(tty, "current tty %s not NULL!!\n", current->signal->tty->name); @@ -293,20 +293,23 @@ void disassociate_ctty(int on_exit) spin_lock_irq(¤t->sighand->siglock); put_pid(current->signal->tty_old_pgrp); current->signal->tty_old_pgrp = NULL; - tty = tty_kref_get(current->signal->tty); + spin_unlock_irq(¤t->sighand->siglock); + if (tty) { unsigned long flags; + + tty_lock(tty); spin_lock_irqsave(&tty->ctrl_lock, flags); put_pid(tty->session); put_pid(tty->pgrp); tty->session = NULL; tty->pgrp = NULL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); + tty_unlock(tty); tty_kref_put(tty); } - spin_unlock_irq(¤t->sighand->siglock); /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); @@ -477,14 +480,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return -ENOTTY; if (retval) return retval; - if (!current->signal->tty || - (current->signal->tty != real_tty) || - (real_tty->session != task_session(current))) - return -ENOTTY; + if (get_user(pgrp_nr, p)) return -EFAULT; if (pgrp_nr < 0) return -EINVAL; + + spin_lock_irq(&real_tty->ctrl_lock); + if (!current->signal->tty || + (current->signal->tty != real_tty) || + (real_tty->session != task_session(current))) { + retval = -ENOTTY; + goto out_unlock_ctrl; + } rcu_read_lock(); pgrp = find_vpid(pgrp_nr); retval = -ESRCH; @@ -494,12 +502,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t if (session_of_pgrp(pgrp) != task_session(current)) goto out_unlock; retval = 0; - spin_lock_irq(&tty->ctrl_lock); put_pid(real_tty->pgrp); real_tty->pgrp = get_pid(pgrp); - spin_unlock_irq(&tty->ctrl_lock); out_unlock: rcu_read_unlock(); +out_unlock_ctrl: + spin_unlock_irq(&real_tty->ctrl_lock); return retval; } @@ -511,20 +519,30 @@ out_unlock: * * Obtain the session id of the tty. If there is no session * return an error. - * - * Locking: none. Reference to current->signal->tty is safe. */ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { + unsigned long flags; + pid_t sid; + /* * (tty == real_tty) is a cheap way of * testing if the tty is NOT a master pty. */ if (tty == real_tty && current->signal->tty != real_tty) return -ENOTTY; + + spin_lock_irqsave(&real_tty->ctrl_lock, flags); if (!real_tty->session) - return -ENOTTY; - return put_user(pid_vnr(real_tty->session), p); + goto err; + sid = pid_vnr(real_tty->session); + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + + return put_user(sid, p); + +err: + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + return -ENOTTY; } /* diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index d2a1e1228c82..9ffd42e333b8 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -605,6 +605,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) port->index = vcc_table_add(port); if (port->index == -1) { pr_err("VCC: no more TTY indices left for allocation\n"); + rv = -ENOMEM; goto free_ldc; } diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c index b28aa0d289f8..251c02af1fc3 100644 --- a/drivers/tty/vt/consolemap.c +++ b/drivers/tty/vt/consolemap.c @@ -495,7 +495,7 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos) p2[unicode & 0x3f] = fontpos; - p->sum += (fontpos << 20) + unicode; + p->sum += (fontpos << 20U) + unicode; return 0; } diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 15d33fa0c925..b6e78fdbfdff 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -127,7 +127,11 @@ static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */ static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ static bool dead_key_next; -static int npadch = -1; /* -1 or number assembled on pad */ + +/* Handles a number being assembled on the number pad */ +static bool npadch_active; +static unsigned int npadch_value; + static unsigned int diacr; static char rep; /* flag telling character repeat */ @@ -738,8 +742,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) return; if ((unsigned)value < ARRAY_SIZE(func_table)) { + unsigned long flags; + + spin_lock_irqsave(&func_buf_lock, flags); if (func_table[value]) puts_queue(vc, func_table[value]); + spin_unlock_irqrestore(&func_buf_lock, flags); + } else pr_err("k_fn called with value=%d\n", value); } @@ -845,12 +854,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag) shift_state &= ~(1 << value); /* kludge */ - if (up_flag && shift_state != old_state && npadch != -1) { + if (up_flag && shift_state != old_state && npadch_active) { if (kbd->kbdmode == VC_UNICODE) - to_utf8(vc, npadch); + to_utf8(vc, npadch_value); else - put_queue(vc, npadch & 0xff); - npadch = -1; + put_queue(vc, npadch_value & 0xff); + npadch_active = false; } } @@ -868,7 +877,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag) static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) { - int base; + unsigned int base; if (up_flag) return; @@ -882,10 +891,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) base = 16; } - if (npadch == -1) - npadch = value; - else - npadch = npadch * base + value; + if (!npadch_active) { + npadch_value = 0; + npadch_active = true; + } + + npadch_value = npadch_value * base + value; } static void k_lock(struct vc_data *vc, unsigned char value, char up_flag) @@ -1984,13 +1995,11 @@ out: #undef s #undef v -/* FIXME: This one needs untangling and locking */ +/* FIXME: This one needs untangling */ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) { struct kbsentry *kbs; - char *p; u_char *q; - u_char __user *up; int sz, fnw_sz; int delta; char *first_free, *fj, *fnw; @@ -2016,23 +2025,19 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) i = kbs->kb_func; switch (cmd) { - case KDGKBSENT: - sz = sizeof(kbs->kb_string) - 1; /* sz should have been - a struct member */ - up = user_kdgkb->kb_string; - p = func_table[i]; - if(p) - for ( ; *p && sz; p++, sz--) - if (put_user(*p, up++)) { - ret = -EFAULT; - goto reterr; - } - if (put_user('\0', up)) { - ret = -EFAULT; - goto reterr; - } - kfree(kbs); - return ((p && *p) ? -EOVERFLOW : 0); + case KDGKBSENT: { + /* size should have been a struct member */ + ssize_t len = sizeof(user_kdgkb->kb_string); + + spin_lock_irqsave(&func_buf_lock, flags); + len = strlcpy(kbs->kb_string, func_table[i] ? : "", len); + spin_unlock_irqrestore(&func_buf_lock, flags); + + ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string, + len + 1) ? -EFAULT : 0; + + goto reterr; + } case KDSKBSENT: if (!perm) { ret = -EPERM; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index fa9433e6cdc7..564e1fa2e4c9 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -81,6 +81,7 @@ #include <linux/errno.h> #include <linux/kd.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/major.h> #include <linux/mm.h> #include <linux/console.h> @@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) /* allocate everything in one go */ memsize = cols * rows * sizeof(char32_t); memsize += rows * sizeof(char32_t *); - p = kmalloc(memsize, GFP_KERNEL); + p = vmalloc(memsize); if (!p) return NULL; @@ -364,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) return uniscr; } +static void vc_uniscr_free(struct uni_screen *uniscr) +{ + vfree(uniscr); +} + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) { - kfree(vc->vc_uni_screen); + vc_uniscr_free(vc->vc_uni_screen); vc->vc_uni_screen = new_uniscr; } @@ -1086,10 +1092,19 @@ static const struct tty_port_operations vc_port_ops = { .destruct = vc_port_destruct, }; +/* + * Change # of rows and columns (0 means unchanged/the size of fg_console) + * [this is to be used together with some user program + * like resize that changes the hardware videomode] + */ +#define VC_MAXCOL (32767) +#define VC_MAXROW (32767) + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; + int err; WARN_CONSOLE_UNLOCKED(); @@ -1119,6 +1134,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); + err = -EINVAL; + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) + goto err_free; + err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -1137,7 +1157,7 @@ err_free: visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; - return -ENOMEM; + return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, @@ -1152,14 +1172,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, return err; } -/* - * Change # of rows and columns (0 means unchanged/the size of fg_console) - * [this is to be used together with some user program - * like resize that changes the hardware videomode] - */ -#define VC_RESIZE_MAXCOL (32767) -#define VC_RESIZE_MAXROW (32767) - /** * vc_do_resize - resizing method for the tty * @tty: tty being resized @@ -1184,7 +1196,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int old_rows, old_row_size, first_copied_row; unsigned int new_cols, new_rows, new_row_size, new_screen_size; unsigned int user; - unsigned short *newscreen; + unsigned short *oldscreen, *newscreen; struct uni_screen *new_uniscr = NULL; WARN_CONSOLE_UNLOCKED(); @@ -1195,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, user = vc->vc_resize_user; vc->vc_resize_user = 0; - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) + if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); @@ -1206,7 +1218,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > (4 << 20)) + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -1229,7 +1241,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, err = resize_screen(vc, new_cols, new_rows, user); if (err) { kfree(newscreen); - kfree(new_uniscr); + vc_uniscr_free(new_uniscr); return err; } @@ -1282,10 +1294,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); - kfree(vc->vc_screenbuf); + oldscreen = vc->vc_screenbuf; vc->vc_screenbuf = newscreen; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); + kfree(oldscreen); /* do part of a reset_terminal() */ vc->vc_top = 0; @@ -1364,6 +1377,7 @@ struct vc_data *vc_deallocate(unsigned int currcons) atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m); vcs_remove_sysfs(currcons); visual_deinit(vc); + con_free_unimap(vc); put_pid(vc->vt_pid); vc_uniscr_set(vc, NULL); kfree(vc->vc_screenbuf); @@ -3390,6 +3404,7 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, 1); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); @@ -4606,27 +4621,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op) return rc; } -static int con_font_copy(struct vc_data *vc, struct console_font_op *op) -{ - int con = op->height; - int rc; - - - console_lock(); - if (vc->vc_mode != KD_TEXT) - rc = -EINVAL; - else if (!vc->vc_sw->con_font_copy) - rc = -ENOSYS; - else if (con < 0 || !vc_cons_allocated(con)) - rc = -ENOTTY; - else if (con == vc->vc_num) /* nothing to do */ - rc = 0; - else - rc = vc->vc_sw->con_font_copy(vc, con); - console_unlock(); - return rc; -} - int con_font_op(struct vc_data *vc, struct console_font_op *op) { switch (op->op) { @@ -4637,7 +4631,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op) case KD_FONT_OP_SET_DEFAULT: return con_font_default(vc, op); case KD_FONT_OP_COPY: - return con_font_copy(vc, op); + /* was buggy and never really used */ + return -EINVAL; } return -ENOSYS; } diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index daf61c28ba76..4f51be17427c 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -244,7 +244,7 @@ int vt_waitactive(int n) static inline int -do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) +do_fontx_ioctl(struct vc_data *vc, int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) { struct consolefontdesc cfdarg; int i; @@ -262,15 +262,16 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; - return con_font_op(vc_cons[fg_console].d, op); - case GIO_FONTX: { + return con_font_op(vc, op); + + case GIO_FONTX: op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; - i = con_font_op(vc_cons[fg_console].d, op); + i = con_font_op(vc, op); if (i) return i; cfdarg.charheight = op->height; @@ -278,7 +279,6 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) return -EFAULT; return 0; - } } return -EINVAL; } @@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty, console_lock(); vcp = vc_cons[i].d; if (vcp) { + int ret; + int save_scan_lines = vcp->vc_scan_lines; + int save_font_height = vcp->vc_font.height; + if (v.v_vlin) vcp->vc_scan_lines = v.v_vlin; if (v.v_clin) vcp->vc_font.height = v.v_clin; vcp->vc_resize_user = 1; - vc_resize(vcp, v.v_cols, v.v_rows); + ret = vc_resize(vcp, v.v_cols, v.v_rows); + if (ret) { + vcp->vc_scan_lines = save_scan_lines; + vcp->vc_font.height = save_font_height; + console_unlock(); + return ret; + } } console_unlock(); } @@ -914,7 +924,7 @@ int vt_ioctl(struct tty_struct *tty, op.height = 0; op.charcount = 256; op.data = up; - ret = con_font_op(vc_cons[fg_console].d, &op); + ret = con_font_op(vc, &op); break; } @@ -925,7 +935,7 @@ int vt_ioctl(struct tty_struct *tty, op.height = 32; op.charcount = 256; op.data = up; - ret = con_font_op(vc_cons[fg_console].d, &op); + ret = con_font_op(vc, &op); break; } @@ -942,7 +952,7 @@ int vt_ioctl(struct tty_struct *tty, case PIO_FONTX: case GIO_FONTX: - ret = do_fontx_ioctl(cmd, up, perm, &op); + ret = do_fontx_ioctl(vc, cmd, up, perm, &op); break; case PIO_FONTRESET: @@ -959,11 +969,11 @@ int vt_ioctl(struct tty_struct *tty, { op.op = KD_FONT_OP_SET_DEFAULT; op.data = NULL; - ret = con_font_op(vc_cons[fg_console].d, &op); + ret = con_font_op(vc, &op); if (ret) break; console_lock(); - con_set_default_unimap(vc_cons[fg_console].d); + con_set_default_unimap(vc); console_unlock(); break; } @@ -1090,8 +1100,9 @@ struct compat_consolefontdesc { }; static inline int -compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, - int perm, struct console_font_op *op) +compat_fontx_ioctl(struct vc_data *vc, int cmd, + struct compat_consolefontdesc __user *user_cfd, + int perm, struct console_font_op *op) { struct compat_consolefontdesc cfdarg; int i; @@ -1109,7 +1120,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); - return con_font_op(vc_cons[fg_console].d, op); + return con_font_op(vc, op); + case GIO_FONTX: op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; @@ -1117,7 +1129,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); - i = con_font_op(vc_cons[fg_console].d, op); + i = con_font_op(vc, op); if (i) return i; cfdarg.charheight = op->height; @@ -1207,7 +1219,7 @@ long vt_compat_ioctl(struct tty_struct *tty, */ case PIO_FONTX: case GIO_FONTX: - return compat_fontx_ioctl(cmd, up, perm, &op); + return compat_fontx_ioctl(vc, cmd, up, perm, &op); case KDFONTOP: return compat_kdfontop_ioctl(up, perm, &op, vc); diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index a57698985f9c..d91e051d1367 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -413,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev) return retval; } -static void uio_free_minor(struct uio_device *idev) +static void uio_free_minor(unsigned long minor) { mutex_lock(&minor_lock); - idr_remove(&uio_idr, idev->minor); + idr_remove(&uio_idr, minor); mutex_unlock(&minor_lock); } @@ -990,7 +990,7 @@ err_request_irq: err_uio_dev_add_attributes: device_del(&idev->dev); err_device_create: - uio_free_minor(idev); + uio_free_minor(idev->minor); put_device(&idev->dev); return ret; } @@ -1004,13 +1004,13 @@ EXPORT_SYMBOL_GPL(__uio_register_device); void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; + unsigned long minor; if (!info || !info->uio_dev) return; idev = info->uio_dev; - - uio_free_minor(idev); + minor = idev->minor; mutex_lock(&idev->info_lock); uio_dev_del_attributes(idev); @@ -1026,6 +1026,8 @@ void uio_unregister_device(struct uio_info *info) device_unregister(&idev->dev); + uio_free_minor(minor); + return; } EXPORT_SYMBOL_GPL(uio_unregister_device); diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c index dde5cbb27178..1b33581f0a20 100644 --- a/drivers/uio/uio_pci_generic.c +++ b/drivers/uio/uio_pci_generic.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/interrupt.h> #include <linux/uio_driver.h> #define DRIVER_VERSION "0.01.0" @@ -97,6 +98,11 @@ static int probe(struct pci_dev *pdev, gdev->info.release = release; gdev->pdev = pdev; if (pdev->irq) { +#ifdef CONFIG_X86 + gdev->info.irq = pdev->irq == IRQ_NOTCONNECTED ? 0 : pdev->irq; +#else + gdev->info.irq = pdev->irq; +#endif gdev->info.irq = pdev->irq; gdev->info.irq_flags = IRQF_SHARED; gdev->info.handler = irqhandler; diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index 1303b165055b..538adf9c47bb 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -152,9 +152,9 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev) priv->pdev = pdev; if (!uioinfo->irq) { - ret = platform_get_irq(pdev, 0); + ret = platform_get_irq_optional(pdev, 0); uioinfo->irq = ret; - if (ret == -ENXIO && pdev->dev.of_node) + if (ret == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index 633c52de3bb3..9865750bc31e 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -486,7 +486,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) c67x00_release_urb(c67x00, urb); usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); spin_unlock(&c67x00->lock); - usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); + usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); spin_lock(&c67x00->lock); } diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index e71240b386b4..666cebd9c5f2 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -37,18 +37,18 @@ static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev, struct cdns3_usb_regs __iomem *regs = priv_dev->regs; struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; - priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr); - priv_ep->trb_pool[0].length = TRB_LEN(length); + priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length)); if (zlp) { - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL); - priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr); - priv_ep->trb_pool[1].length = TRB_LEN(0); - priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC | - TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL)); + priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0)); + priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); } else { - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC | - TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); priv_ep->trb_pool[1].control = 0; } @@ -264,11 +264,11 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, case USB_RECIP_INTERFACE: return cdns3_ep0_delegate_req(priv_dev, ctrl); case USB_RECIP_ENDPOINT: - index = cdns3_ep_addr_to_index(ctrl->wIndex); + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; /* check if endpoint is stalled or stall is pending */ - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || (priv_ep->flags & EP_STALL_PENDING)) usb_status = BIT(USB_ENDPOINT_HALT); @@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, if (!set || (tmode & 0xff) != 0) return -EINVAL; - switch (tmode >> 8) { + tmode >>= 8; + switch (tmode) { case TEST_J: case TEST_K: case TEST_SE0_NAK: @@ -387,10 +388,10 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, if (!(ctrl->wIndex & ~USB_DIR_IN)) return 0; - index = cdns3_ep_addr_to_index(ctrl->wIndex); + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (set) __cdns3_gadget_ep_set_halt(priv_ep); @@ -451,7 +452,7 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, if (priv_dev->gadget.state < USB_STATE_ADDRESS) return -EINVAL; - if (ctrl_req->wLength != 6) { + if (le16_to_cpu(ctrl_req->wLength) != 6) { dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", ctrl_req->wLength); return -EINVAL; @@ -475,7 +476,7 @@ static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, if (ctrl_req->wIndex || ctrl_req->wLength) return -EINVAL; - priv_dev->isoch_delay = ctrl_req->wValue; + priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); return 0; } @@ -711,15 +712,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, int ret = 0; u8 zlp = 0; + spin_lock_irqsave(&priv_dev->lock, flags); trace_cdns3_ep0_queue(priv_dev, request); /* cancel the request if controller receive new SETUP packet. */ - if (cdns3_check_new_setup(priv_dev)) + if (cdns3_check_new_setup(priv_dev)) { + spin_unlock_irqrestore(&priv_dev->lock, flags); return -ECONNRESET; + } /* send STATUS stage. Should be called only for SET_CONFIGURATION */ if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) { - spin_lock_irqsave(&priv_dev->lock, flags); cdns3_select_ep(priv_dev, 0x00); erdy_sent = !priv_dev->hw_configured_flag; @@ -744,7 +747,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, return 0; } - spin_lock_irqsave(&priv_dev->lock, flags); if (!list_empty(&priv_ep->pending_req_list)) { dev_err(priv_dev->dev, "can't handle multiple requests for ep0\n"); diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index f624cc87cbab..c0c39cf30387 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -189,10 +189,10 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) GFP_DMA32 | GFP_ATOMIC); if (!priv_ep->trb_pool) return -ENOMEM; - } else { - memset(priv_ep->trb_pool, 0, ring_size); } + memset(priv_ep->trb_pool, 0, ring_size); + if (!priv_ep->num) return 0; @@ -2105,7 +2105,7 @@ found: link_trb = priv_req->trb; /* Update ring only if removed request is on pending_req_list list */ - if (req_on_hw_ring) { + if (req_on_hw_ring && link_trb) { link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + ((priv_req->end_trb + 1) * TRB_SIZE)); link_trb->control = (link_trb->control & TRB_CYCLE) | @@ -2545,12 +2545,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns) priv_dev = cdns->gadget_dev; - devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); pm_runtime_mark_last_busy(cdns->dev); pm_runtime_put_autosuspend(cdns->dev); usb_del_gadget_udc(&priv_dev->gadget); + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); cdns3_free_all_eps(priv_dev); diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h index bc4024041ef2..ec5c05454531 100644 --- a/drivers/usb/cdns3/gadget.h +++ b/drivers/usb/cdns3/gadget.h @@ -1057,7 +1057,7 @@ struct cdns3_trb { #define TRB_TDL_SS_SIZE_GET(p) (((p) & GENMASK(23, 17)) >> 17) /* transfer_len bitmasks - bits 31:24 */ -#define TRB_BURST_LEN(p) (((p) << 24) & GENMASK(31, 24)) +#define TRB_BURST_LEN(p) ((unsigned int)((p) << 24) & GENMASK(31, 24)) #define TRB_BURST_LEN_GET(p) (((p) & GENMASK(31, 24)) >> 24) /* Data buffer pointer bitmasks*/ diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h index e92348c9b4d7..f8482456116e 100644 --- a/drivers/usb/cdns3/trace.h +++ b/drivers/usb/cdns3/trace.h @@ -150,7 +150,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq, __dynamic_array(char, str, CDNS3_MSG_MAX) ), TP_fast_assign( - __entry->ep_dir = priv_dev->ep0_data_dir; + __entry->ep_dir = priv_dev->selected_ep; __entry->ep_sts = ep_sts; ), TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str), @@ -333,9 +333,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, TP_fast_assign( __assign_str(name, priv_ep->name); __entry->trb = trb; - __entry->buffer = trb->buffer; - __entry->length = trb->length; - __entry->control = trb->control; + __entry->buffer = le32_to_cpu(trb->buffer); + __entry->length = le32_to_cpu(trb->length); + __entry->control = le32_to_cpu(trb->control); __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); ), TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)", diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index df8812c30640..85561b3194a1 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -57,7 +57,8 @@ static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = { static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = { .flags = CI_HDRC_SUPPORTS_RUNTIME_PM | - CI_HDRC_TURN_VBUS_EARLY_ON, + CI_HDRC_TURN_VBUS_EARLY_ON | + CI_HDRC_DISABLE_DEVICE_STREAMING, }; static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = { @@ -138,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) misc_pdev = of_find_device_by_node(args.np); of_node_put(args.np); - if (!misc_pdev || !platform_get_drvdata(misc_pdev)) + if (!misc_pdev) return ERR_PTR(-EPROBE_DEFER); + if (!platform_get_drvdata(misc_pdev)) { + put_device(&misc_pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } data->dev = &misc_pdev->dev; /* diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index af648ba6544d..46105457e1ca 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0); - if (!IS_ERR(ci->platdata->vbus_extcon.edev)) { + if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) { hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, HS_PHY_SESS_VLD_CTRL_EN, HS_PHY_SESS_VLD_CTRL_EN); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 98ee575ee500..b7da2a273c45 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1261,6 +1261,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci) enable_irq(ci->irq); } +/* + * Handle the wakeup interrupt triggered by extcon connector + * We need to call ci_irq again for extcon since the first + * interrupt (wakeup int) only let the controller be out of + * low power mode, but not handle any interrupts. + */ +static void ci_extcon_wakeup_int(struct ci_hdrc *ci) +{ + struct ci_hdrc_cable *cable_id, *cable_vbus; + u32 otgsc = hw_read_otgsc(ci, ~0); + + cable_id = &ci->platdata->id_extcon; + cable_vbus = &ci->platdata->vbus_extcon; + + if (!IS_ERR(cable_id->edev) && ci->is_otg && + (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) + ci_irq(ci->irq, ci); + + if (!IS_ERR(cable_vbus->edev) && ci->is_otg && + (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) + ci_irq(ci->irq, ci); +} + static int ci_controller_resume(struct device *dev) { struct ci_hdrc *ci = dev_get_drvdata(dev); @@ -1293,6 +1316,7 @@ static int ci_controller_resume(struct device *dev) enable_irq(ci->irq); if (ci_otg_is_fsm_mode(ci)) ci_otg_fsm_wakeup_by_srp(ci); + ci_extcon_wakeup_int(ci); } return 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 84d6f7df09a4..1e9aab6118f5 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control) #define acm_send_break(acm, ms) \ acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0) -static void acm_kill_urbs(struct acm *acm) +static void acm_poison_urbs(struct acm *acm) { int i; - usb_kill_urb(acm->ctrlurb); + usb_poison_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); + usb_poison_urb(acm->wb[i].urb); for (i = 0; i < acm->rx_buflimit; i++) - usb_kill_urb(acm->read_urbs[i]); + usb_poison_urb(acm->read_urbs[i]); } +static void acm_unpoison_urbs(struct acm *acm) +{ + int i; + + for (i = 0; i < acm->rx_buflimit; i++) + usb_unpoison_urb(acm->read_urbs[i]); + for (i = 0; i < ACM_NW; i++) + usb_unpoison_urb(acm->wb[i].urb); + usb_unpoison_urb(acm->ctrlurb); +} + + /* * Write buffer management. * All of these assume proper locks taken by the caller. @@ -225,9 +237,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb) rc = usb_submit_urb(wb->urb, GFP_ATOMIC); if (rc < 0) { - dev_err(&acm->data->dev, - "%s - usb_submit_urb(write bulk) failed: %d\n", - __func__, rc); + if (rc != -EPERM) + dev_err(&acm->data->dev, + "%s - usb_submit_urb(write bulk) failed: %d\n", + __func__, rc); acm_write_done(acm, wb); } return rc; @@ -312,8 +325,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf) acm->iocount.dsr++; if (difference & ACM_CTRL_DCD) acm->iocount.dcd++; - if (newctrl & ACM_CTRL_BRK) + if (newctrl & ACM_CTRL_BRK) { acm->iocount.brk++; + tty_insert_flip_char(&acm->port, 0, TTY_BREAK); + } if (newctrl & ACM_CTRL_RI) acm->iocount.rng++; if (newctrl & ACM_CTRL_FRAMING) @@ -378,21 +393,19 @@ static void acm_ctrl_irq(struct urb *urb) if (current_size < expected_size) { /* notification is transmitted fragmented, reassemble */ if (acm->nb_size < expected_size) { - if (acm->nb_size) { - kfree(acm->notification_buffer); - acm->nb_size = 0; - } + u8 *new_buffer; alloc_size = roundup_pow_of_two(expected_size); - /* - * kmalloc ensures a valid notification_buffer after a - * use of kfree in case the previous allocation was too - * small. Final freeing is done on disconnect. - */ - acm->notification_buffer = - kmalloc(alloc_size, GFP_ATOMIC); - if (!acm->notification_buffer) + /* Final freeing is done on disconnect. */ + new_buffer = krealloc(acm->notification_buffer, + alloc_size, GFP_ATOMIC); + if (!new_buffer) { + acm->nb_index = 0; goto exit; + } + + acm->notification_buffer = new_buffer; acm->nb_size = alloc_size; + dr = (struct usb_cdc_notification *)acm->notification_buffer; } copy_size = min(current_size, @@ -412,9 +425,12 @@ static void acm_ctrl_irq(struct urb *urb) exit: retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval && retval != -EPERM) + if (retval && retval != -EPERM && retval != -ENODEV) dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n", __func__, retval); + else + dev_vdbg(&acm->control->dev, + "control resubmission terminated %d\n", retval); } static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) @@ -430,6 +446,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) dev_err(&acm->data->dev, "urb %d failed submission with %d\n", index, res); + } else { + dev_vdbg(&acm->data->dev, "intended failure %d\n", res); } set_bit(index, &acm->read_urbs_free); return res; @@ -471,15 +489,11 @@ static void acm_read_bulk_callback(struct urb *urb) int status = urb->status; bool stopped = false; bool stalled = false; + bool cooldown = false; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); - if (!acm->dev) { - dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); - return; - } - switch (status) { case 0: usb_mark_last_busy(acm->dev); @@ -497,6 +511,15 @@ static void acm_read_bulk_callback(struct urb *urb) __func__, status); stopped = true; break; + case -EOVERFLOW: + case -EPROTO: + dev_dbg(&acm->data->dev, + "%s - cooling babbling device\n", __func__); + usb_mark_last_busy(acm->dev); + set_bit(rb->index, &acm->urbs_in_error_delay); + set_bit(ACM_ERROR_DELAY, &acm->flags); + cooldown = true; + break; default: dev_dbg(&acm->data->dev, "%s - nonzero urb status received: %d\n", @@ -518,9 +541,11 @@ static void acm_read_bulk_callback(struct urb *urb) */ smp_mb__after_atomic(); - if (stopped || stalled) { + if (stopped || stalled || cooldown) { if (stalled) - schedule_work(&acm->work); + schedule_delayed_work(&acm->dwork, 0); + else if (cooldown) + schedule_delayed_work(&acm->dwork, HZ / 2); return; } @@ -548,23 +573,29 @@ static void acm_write_bulk(struct urb *urb) acm_write_done(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); set_bit(EVENT_TTY_WAKEUP, &acm->flags); - schedule_work(&acm->work); + schedule_delayed_work(&acm->dwork, 0); } static void acm_softint(struct work_struct *work) { int i; - struct acm *acm = container_of(work, struct acm, work); + struct acm *acm = container_of(work, struct acm, dwork.work); if (test_bit(EVENT_RX_STALL, &acm->flags)) { - if (!(usb_autopm_get_interface(acm->data))) { + smp_mb(); /* against acm_suspend() */ + if (!acm->susp_count) { for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); usb_clear_halt(acm->dev, acm->in); acm_submit_read_urbs(acm, GFP_KERNEL); - usb_autopm_put_interface(acm->data); + clear_bit(EVENT_RX_STALL, &acm->flags); } - clear_bit(EVENT_RX_STALL, &acm->flags); + } + + if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { + for (i = 0; i < acm->rx_buflimit; i++) + if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) + acm_submit_read_urb(acm, i, GFP_KERNEL); } if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) @@ -627,7 +658,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise) res = acm_set_control(acm, val); if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE)) - dev_err(&acm->control->dev, "failed to set dtr/rts\n"); + /* This is broken in too many devices to spam the logs */ + dev_dbg(&acm->control->dev, "failed to set dtr/rts\n"); } static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) @@ -709,6 +741,7 @@ static void acm_port_shutdown(struct tty_port *port) * Need to grab write_lock to prevent race with resume, but no need to * hold it due to the tty-port initialised flag. */ + acm_poison_urbs(acm); spin_lock_irq(&acm->write_lock); spin_unlock_irq(&acm->write_lock); @@ -725,7 +758,8 @@ static void acm_port_shutdown(struct tty_port *port) usb_autopm_put_interface_async(acm->control); } - acm_kill_urbs(acm); + acm_unpoison_urbs(acm); + } static void acm_tty_cleanup(struct tty_struct *tty) @@ -894,8 +928,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct acm *acm = tty->driver_data; - ss->xmit_fifo_size = acm->writesize; - ss->baud_base = le32_to_cpu(acm->line.dwDTERate); + ss->line = acm->minor; ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : @@ -907,7 +940,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct acm *acm = tty->driver_data; unsigned int closing_wait, close_delay; - unsigned int old_closing_wait, old_close_delay; int retval = 0; close_delay = msecs_to_jiffies(ss->close_delay * 10); @@ -915,20 +947,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) ASYNC_CLOSING_WAIT_NONE : msecs_to_jiffies(ss->closing_wait * 10); - /* we must redo the rounding here, so that the values match */ - old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; - old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? - ASYNC_CLOSING_WAIT_NONE : - jiffies_to_msecs(acm->port.closing_wait) / 10; - mutex_lock(&acm->port.mutex); if (!capable(CAP_SYS_ADMIN)) { - if ((ss->close_delay != old_close_delay) || - (ss->closing_wait != old_closing_wait)) + if ((close_delay != acm->port.close_delay) || + (closing_wait != acm->port.closing_wait)) retval = -EPERM; - else - retval = -EOPNOTSUPP; } else { acm->port.close_delay = close_delay; acm->port.closing_wait = closing_wait; @@ -1223,9 +1247,21 @@ static int acm_probe(struct usb_interface *intf, } } } else { + int class = -1; + data_intf_num = union_header->bSlaveInterface0; control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); data_interface = usb_ifnum_to_if(usb_dev, data_intf_num); + + if (control_interface) + class = control_interface->cur_altsetting->desc.bInterfaceClass; + + if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) { + dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n"); + combined_interfaces = 1; + control_interface = data_interface = intf; + goto look_for_collapsed_interface; + } } if (!control_interface || !data_interface) { @@ -1332,7 +1368,7 @@ made_compressed_probe: acm->ctrlsize = ctrlsize; acm->readsize = readsize; acm->rx_buflimit = num_rx_buf; - INIT_WORK(&acm->work, acm_softint); + INIT_DELAYED_WORK(&acm->dwork, acm_softint); init_waitqueue_head(&acm->wioctl); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); @@ -1483,12 +1519,16 @@ skip_countries: return 0; alloc_fail6: + if (!acm->combined_interfaces) { + /* Clear driver data so that disconnect() returns early. */ + usb_set_intfdata(data_interface, NULL); + usb_driver_release_interface(&acm_driver, data_interface); + } if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); device_remove_file(&acm->control->dev, &dev_attr_iCountryCodeRelDate); - kfree(acm->country_codes); } device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); alloc_fail5: @@ -1520,8 +1560,14 @@ static void acm_disconnect(struct usb_interface *intf) if (!acm) return; - mutex_lock(&acm->mutex); acm->disconnected = true; + /* + * there is a circular dependency. acm_softint() can resubmit + * the URBs in error handling so we need to block any + * submission right away + */ + acm_poison_urbs(acm); + mutex_lock(&acm->mutex); if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); @@ -1540,8 +1586,7 @@ static void acm_disconnect(struct usb_interface *intf) tty_kref_put(tty); } - acm_kill_urbs(acm); - cancel_work_sync(&acm->work); + cancel_delayed_work_sync(&acm->dwork); tty_unregister_device(acm_tty_driver, acm->minor); @@ -1582,8 +1627,9 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) if (cnt) return 0; - acm_kill_urbs(acm); - cancel_work_sync(&acm->work); + acm_poison_urbs(acm); + cancel_delayed_work_sync(&acm->dwork); + acm->urbs_in_error_delay = 0; return 0; } @@ -1599,6 +1645,8 @@ static int acm_resume(struct usb_interface *intf) if (--acm->susp_count) goto out; + acm_unpoison_urbs(acm); + if (tty_port_initialized(&acm->port)) { rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC); @@ -1663,6 +1711,8 @@ static int acm_pre_reset(struct usb_interface *intf) static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ + { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */ + .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */ { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ @@ -1670,6 +1720,15 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, + { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */ + .driver_info = DISABLE_ECHO, /* Don't echo banner */ + }, + { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */ + .driver_info = DISABLE_ECHO, /* Don't echo banner */ + }, + { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */ + .driver_info = DISABLE_ECHO, /* Don't echo banner */ + }, { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, @@ -1862,6 +1921,10 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x04d8, 0x0083), /* Bootloader mode */ .driver_info = IGNORE_DEVICE, }, + + { USB_DEVICE(0x04d8, 0xf58b), + .driver_info = IGNORE_DEVICE, + }, #endif /*Samsung phone in firmware update mode */ @@ -1874,6 +1937,17 @@ static const struct usb_device_id acm_ids[] = { .driver_info = IGNORE_DEVICE, }, + /* Exclude ETAS ES58x */ + { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */ + .driver_info = IGNORE_DEVICE, + }, + { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */ + .driver_info = IGNORE_DEVICE, + }, + { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */ + .driver_info = IGNORE_DEVICE, + }, + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ .driver_info = SEND_ZERO_PACKET, }, @@ -1881,6 +1955,11 @@ static const struct usb_device_id acm_ids[] = { .driver_info = SEND_ZERO_PACKET, }, + /* Exclude Goodix Fingerprint Reader */ + { USB_DEVICE(0x27c6, 0x5395), + .driver_info = IGNORE_DEVICE, + }, + /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index ca1c026382c2..b95ff769072e 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -109,8 +109,10 @@ struct acm { # define EVENT_TTY_WAKEUP 0 # define EVENT_RX_STALL 1 # define ACM_THROTTLED 2 +# define ACM_ERROR_DELAY 3 + unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */ struct usb_cdc_line_coding line; /* bits, stop, parity */ - struct work_struct work; /* work queue entry for line discipline waking up */ + struct delayed_work dwork; /* work queue entry for various purposes */ unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ unsigned int ctrlout; /* output control lines (DTR, RTS) */ struct async_icount iocount; /* counters for control line changes */ diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 70afb2ca1eab..fc1a219ad0a7 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_MAX 16 +/* we cannot wait forever at flush() */ +#define WDM_FLUSH_TIMEOUT (30 * HZ) + /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */ #define WDM_DEFAULT_BUFSIZE 256 @@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb) kfree(desc->outbuf); desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); - wake_up(&desc->wait); + wake_up_all(&desc->wait); } static void wdm_in_callback(struct urb *urb) @@ -393,6 +396,9 @@ static ssize_t wdm_write if (test_bit(WDM_RESETTING, &desc->flags)) r = -EIO; + if (test_bit(WDM_DISCONNECTING, &desc->flags)) + r = -ENODEV; + if (r < 0) { rv = r; goto out_free_mem_pm; @@ -424,6 +430,7 @@ static ssize_t wdm_write if (rv < 0) { desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); + wake_up_all(&desc->wait); /* for wdm_wait_for_response() */ dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); rv = usb_translate_errors(rv); goto out_free_mem_pm; @@ -458,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc) if (!desc->resp_count || !--desc->resp_count) goto out; + if (test_bit(WDM_DISCONNECTING, &desc->flags)) { + rv = -ENODEV; + goto out; + } + if (test_bit(WDM_RESETTING, &desc->flags)) { + rv = -EIO; + goto out; + } + set_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irq(&desc->iuspin); rv = usb_submit_urb(desc->response, GFP_KERNEL); spin_lock_irq(&desc->iuspin); if (rv) { - dev_err(&desc->intf->dev, - "usb_submit_urb failed with result %d\n", rv); + if (!test_bit(WDM_DISCONNECTING, &desc->flags)) + dev_err(&desc->intf->dev, + "usb_submit_urb failed with result %d\n", rv); /* make sure the next notification trigger a submit */ clear_bit(WDM_RESPONDING, &desc->flags); @@ -583,28 +600,58 @@ err: return rv; } -static int wdm_flush(struct file *file, fl_owner_t id) +static int wdm_wait_for_response(struct file *file, long timeout) { struct wdm_device *desc = file->private_data; + long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */ - wait_event(desc->wait, - /* - * needs both flags. We cannot do with one - * because resetting it would cause a race - * with write() yet we need to signal - * a disconnect - */ - !test_bit(WDM_IN_USE, &desc->flags) || - test_bit(WDM_DISCONNECTING, &desc->flags)); + /* + * Needs both flags. We cannot do with one because resetting it would + * cause a race with write() yet we need to signal a disconnect. + */ + rv = wait_event_interruptible_timeout(desc->wait, + !test_bit(WDM_IN_USE, &desc->flags) || + test_bit(WDM_DISCONNECTING, &desc->flags), + timeout); - /* cannot dereference desc->intf if WDM_DISCONNECTING */ + /* + * To report the correct error. This is best effort. + * We are inevitably racing with the hardware. + */ if (test_bit(WDM_DISCONNECTING, &desc->flags)) return -ENODEV; - if (desc->werr < 0) - dev_err(&desc->intf->dev, "Error in flush path: %d\n", - desc->werr); + if (!rv) + return -EIO; + if (rv < 0) + return -EINTR; - return usb_translate_errors(desc->werr); + spin_lock_irq(&desc->iuspin); + rv = desc->werr; + desc->werr = 0; + spin_unlock_irq(&desc->iuspin); + + return usb_translate_errors(rv); + +} + +/* + * You need to send a signal when you react to malicious or defective hardware. + * Also, don't abort when fsync() returned -EINVAL, for older kernels which do + * not implement wdm_flush() will return -EINVAL. + */ +static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT); +} + +/* + * Same with wdm_fsync(), except it uses finite timeout in order to react to + * malicious or defective hardware which ceased communication after close() was + * implicitly called due to process termination. + */ +static int wdm_flush(struct file *file, fl_owner_t id) +{ + return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT); } static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait) @@ -729,6 +776,7 @@ static const struct file_operations wdm_fops = { .owner = THIS_MODULE, .read = wdm_read, .write = wdm_write, + .fsync = wdm_fsync, .open = wdm_open, .flush = wdm_flush, .release = wdm_release, @@ -988,9 +1036,9 @@ static void wdm_disconnect(struct usb_interface *intf) wake_up_all(&desc->wait); mutex_lock(&desc->rlock); mutex_lock(&desc->wlock); - kill_urbs(desc); cancel_work_sync(&desc->rxwork); cancel_work_sync(&desc->service_outs_intr); + kill_urbs(desc); mutex_unlock(&desc->wlock); mutex_unlock(&desc->rlock); diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 0d8e3f3804a3..f27b4aecff3d 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i #define usblp_reset(usblp)\ usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0) -#define usblp_hp_channel_change_request(usblp, channel, buffer) \ - usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1) +static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel) +{ + u8 *buf; + int ret; + + buf = kzalloc(1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, + USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, + channel, buf, 1); + if (ret == 0) + *new_channel = buf[0]; + + kfree(buf); + + return ret; +} /* * See the description for usblp_select_alts() below for the usage @@ -468,7 +485,8 @@ static int usblp_release(struct inode *inode, struct file *file) usb_autopm_put_interface(usblp->intf); if (!usblp->present) /* finish cleanup from disconnect */ - usblp_cleanup(usblp); + usblp_cleanup(usblp); /* any URBs must be dead */ + mutex_unlock(&usblp_mutex); return 0; } @@ -476,16 +494,24 @@ static int usblp_release(struct inode *inode, struct file *file) /* No kernel lock - fine */ static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait) { - __poll_t ret; + struct usblp *usblp = file->private_data; + __poll_t ret = 0; unsigned long flags; - struct usblp *usblp = file->private_data; /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */ poll_wait(file, &usblp->rwait, wait); poll_wait(file, &usblp->wwait, wait); + + mutex_lock(&usblp->mut); + if (!usblp->present) + ret |= EPOLLHUP; + mutex_unlock(&usblp->mut); + spin_lock_irqsave(&usblp->lock, flags); - ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) | - ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0); + if (usblp->bidir && usblp->rcomplete) + ret |= EPOLLIN | EPOLLRDNORM; + if (usblp->no_paper || usblp->wcomplete) + ret |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&usblp->lock, flags); return ret; } @@ -826,6 +852,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo if (rv < 0) return rv; + if (!usblp->present) { + count = -ENODEV; + goto done; + } + if ((avail = usblp->rstatus) < 0) { printk(KERN_ERR "usblp%d: error %d reading from printer\n", usblp->minor, (int)avail); @@ -1306,14 +1337,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) return -EINVAL; - alts = usblp->protocol[protocol].alt_setting; - if (alts < 0) - return -EINVAL; - r = usb_set_interface(usblp->dev, usblp->ifnum, alts); - if (r < 0) { - printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", - alts, usblp->ifnum); - return r; + /* Don't unnecessarily set the interface if there's a single alt. */ + if (usblp->intf->num_altsetting > 1) { + alts = usblp->protocol[protocol].alt_setting; + if (alts < 0) + return -EINVAL; + r = usb_set_interface(usblp->dev, usblp->ifnum, alts); + if (r < 0) { + printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", + alts, usblp->ifnum); + return r; + } } usblp->bidir = (usblp->protocol[protocol].epread != NULL); @@ -1375,9 +1409,11 @@ static void usblp_disconnect(struct usb_interface *intf) usblp_unlink_urbs(usblp); mutex_unlock(&usblp->mut); + usb_poison_anchored_urbs(&usblp->urbs); if (!usblp->used) usblp_cleanup(usblp); + mutex_unlock(&usblp_mutex); } diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6ca40d135430..35e89460b9ca 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) { struct usb_memory *usbm = NULL; struct usb_dev_state *ps = file->private_data; + struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; @@ -250,11 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); - if (remap_pfn_range(vma, vma->vm_start, - virt_to_phys(usbm->mem) >> PAGE_SHIFT, - size, vma->vm_page_prot) < 0) { - dec_usb_memory_use_count(usbm, &usbm->vma_use_count); - return -EAGAIN; + if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { + if (remap_pfn_range(vma, vma->vm_start, + virt_to_phys(usbm->mem) >> PAGE_SHIFT, + size, vma->vm_page_prot) < 0) { + dec_usb_memory_use_count(usbm, &usbm->vma_use_count); + return -EAGAIN; + } + } else { + if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, + size)) { + dec_usb_memory_use_count(usbm, &usbm->vma_use_count); + return -EAGAIN; + } } vma->vm_flags |= VM_IO; @@ -473,11 +482,11 @@ static void snoop_urb(struct usb_device *udev, if (userurb) { /* Async */ if (when == SUBMIT) - dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " + dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "length %u\n", userurb, ep, t, d, length); else - dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " + dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " "actual_length %u status %d\n", userurb, ep, t, d, length, timeout_or_status); @@ -1983,7 +1992,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) if (as) { int retval; - snoop(&ps->dev->dev, "reap %pK\n", as->userurb); + snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); return retval; @@ -2000,7 +2009,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) as = async_getcompleted(ps); if (as) { - snoop(&ps->dev->dev, "reap %pK\n", as->userurb); + snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl(as, (void __user * __user *)arg); free_async(as); } else { @@ -2130,7 +2139,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) if (as) { int retval; - snoop(&ps->dev->dev, "reap %pK\n", as->userurb); + snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); return retval; @@ -2147,7 +2156,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar as = async_getcompleted(ps); if (as) { - snoop(&ps->dev->dev, "reap %pK\n", as->userurb); + snoop(&ps->dev->dev, "reap %px\n", as->userurb); retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); } else { @@ -2612,7 +2621,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, #endif case USBDEVFS_DISCARDURB: - snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p); + snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p); ret = proc_unlinkurb(ps, p); break; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 243577656177..cd61860cada5 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -38,6 +38,7 @@ #define USB_VENDOR_GENESYS_LOGIC 0x05e3 #define USB_VENDOR_SMSC 0x0424 +#define USB_PRODUCT_USB5534B 0x5534 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 @@ -1222,6 +1223,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) #ifdef CONFIG_PM udev->reset_resume = 1; #endif + /* Don't set the change_bits when the device + * was powered off. + */ + if (test_bit(port1, hub->power_bits)) + set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ @@ -2722,13 +2728,11 @@ static bool use_new_scheme(struct usb_device *udev, int retry, { int old_scheme_first_port = port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME; - int quick_enumeration = (udev->speed == USB_SPEED_HIGH); if (udev->speed >= USB_SPEED_SUPER) return false; - return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first - || quick_enumeration); + return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first); } /* Is a USB 3.0 port in the Inactive or Compliance Mode state? @@ -3087,6 +3091,15 @@ static int check_port_resume_type(struct usb_device *udev, if (portchange & USB_PORT_STAT_C_ENABLE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); + + /* + * Whatever made this reset-resume necessary may have + * turned on the port1 bit in hub->change_bits. But after + * a successful reset-resume we want the bit to be clear; + * if it was on it would indicate that something happened + * following the reset-resume. + */ + clear_bit(port1, hub->change_bits); } return status; @@ -3524,7 +3537,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) u16 portchange, portstatus; if (!test_and_set_bit(port1, hub->child_usage_bits)) { - status = pm_runtime_get_sync(&port_dev->dev); + status = pm_runtime_resume_and_get(&port_dev->dev); if (status < 0) { dev_dbg(&udev->dev, "can't resume usb port, status %d\n", status); @@ -5494,8 +5507,11 @@ out_hdev_lock: } static const struct usb_device_id hub_id_table[] = { - { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = USB_VENDOR_SMSC, + .idProduct = USB_PRODUCT_USB5534B, .bInterfaceClass = USB_CLASS_HUB, .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_VENDOR diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 5adf489428aa..041c68ea329f 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io) int i, retval; spin_lock_irqsave(&io->lock, flags); - if (io->status) { + if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; + io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { @@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } + + spin_lock_irqsave(&io->lock, flags); + io->count--; + if (!io->count) + complete(&io->complete); + spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); @@ -1136,11 +1143,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { @@ -1197,6 +1204,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, } } +/* + * usb_disable_device_endpoints -- Disable all endpoints for a device + * @dev: the device whose endpoints are being disabled + * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. + */ +static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) +{ + struct usb_hcd *hcd = bus_to_hcd(dev->bus); + int i; + + if (hcd->driver->check_bandwidth) { + /* First pass: Cancel URBs, leave endpoint pointers intact. */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, false); + usb_disable_endpoint(dev, i + USB_DIR_IN, false); + } + /* Remove endpoints from the host controller internal state */ + mutex_lock(hcd->bandwidth_mutex); + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + mutex_unlock(hcd->bandwidth_mutex); + } + /* Second pass: remove endpoint pointers */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, true); + usb_disable_endpoint(dev, i + USB_DIR_IN, true); + } +} + /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled @@ -1210,7 +1245,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; - struct usb_hcd *hcd = bus_to_hcd(dev->bus); /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) @@ -1256,22 +1290,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); - if (hcd->driver->check_bandwidth) { - /* First pass: Cancel URBs, leave endpoint pointers intact. */ - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, false); - usb_disable_endpoint(dev, i + USB_DIR_IN, false); - } - /* Remove endpoints from the host controller internal state */ - mutex_lock(hcd->bandwidth_mutex); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - /* Second pass: remove endpoint pointers */ - } - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + + usb_disable_device_endpoints(dev, skip_ep0); } /** @@ -1514,6 +1534,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface); * The caller must own the device lock. * * Return: Zero on success, else a negative error code. + * + * If this routine fails the device will probably be in an unusable state + * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { @@ -1529,10 +1552,7 @@ int usb_reset_configuration(struct usb_device *dev) * calls during probe() are fine */ - for (i = 1; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; @@ -1545,34 +1565,10 @@ int usb_reset_configuration(struct usb_device *dev) mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } - /* Make sure we have enough bandwidth for each alternate setting 0 */ - for (i = 0; i < config->desc.bNumInterfaces; i++) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - retval = usb_hcd_alloc_bandwidth(dev, NULL, - intf->cur_altsetting, alt); - if (retval < 0) - break; - } - /* If not, reinstate the old alternate settings */ + /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ + retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { -reset_old_alts: - for (i--; i >= 0; i--) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - usb_hcd_alloc_bandwidth(dev, NULL, - alt, intf->cur_altsetting); - } usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; @@ -1581,8 +1577,12 @@ reset_old_alts: USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (retval < 0) - goto reset_old_alts; + if (retval < 0) { + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + usb_enable_lpm(dev); + mutex_unlock(hcd->bandwidth_mutex); + return retval; + } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index da30b5664ff3..f6a6c54cba35 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -25,17 +25,23 @@ static unsigned int quirk_count; static char quirks_param[128]; -static int quirks_param_set(const char *val, const struct kernel_param *kp) +static int quirks_param_set(const char *value, const struct kernel_param *kp) { - char *p, *field; + char *val, *p, *field; u16 vid, pid; u32 flags; size_t i; int err; + val = kstrdup(value, GFP_KERNEL); + if (!val) + return -ENOMEM; + err = param_set_copystring(val, kp); - if (err) + if (err) { + kfree(val); return err; + } mutex_lock(&quirk_mutex); @@ -60,10 +66,11 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) if (!quirk_list) { quirk_count = 0; mutex_unlock(&quirk_mutex); + kfree(val); return -ENOMEM; } - for (i = 0, p = (char *)val; p && *p;) { + for (i = 0, p = val; p && *p;) { /* Each entry consists of VID:PID:flags */ field = strsep(&p, ":"); if (!field) @@ -144,6 +151,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) unlock: mutex_unlock(&quirk_mutex); + kfree(val); return 0; } @@ -218,11 +226,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech HD Webcam C270 */ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -333,12 +342,19 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Agfa SNAPSCAN 1212U */ + { USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */ { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME }, /* Guillemot Webcam Hercules Dualpix Exchange*/ { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Guillemot Hercules DJ Console audio card (BZ 208357) */ + { USB_DEVICE(0x06f8, 0xb000), .driver_info = + USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Midiman M-Audio Keystation 88es */ { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -361,13 +377,23 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0926, 0x0202), .driver_info = USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Sound Devices MixPre-D */ + { USB_DEVICE(0x0926, 0x0208), .driver_info = + USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Kingston DataTraveler 3.0 */ + { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, + /* ELMO L-12F document camera */ + { USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG }, + /* Broadcom BCM92035DGROM BT dongle */ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -380,14 +406,22 @@ static const struct usb_device_id usb_quirk_list[] = { /* Realtek hub in Dell WD19 (Type-C) */ { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME }, /* Generic RTL8153 based ethernet adapters */ { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, + /* SONiX USB DEVICE Touchpad */ + { USB_DEVICE(0x0c45, 0x7056), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* novation SoundControl XL */ + { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Huawei 4G LTE module */ { USB_DEVICE(0x12d1, 0x15bb), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, @@ -401,6 +435,13 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1532, 0x0116), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */ + { USB_DEVICE(0x17ef, 0xa012), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, + + /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */ + { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM }, + /* BUILDWIN Photo Frame */ { USB_DEVICE(0x1908, 0x1315), .driver_info = USB_QUIRK_HONOR_BNUMINTERFACES }, @@ -430,6 +471,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair K70 LUX */ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 RGB RAPDIFIRE */ + { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -452,15 +497,18 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, + /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* Fibocom L850-GL LTE Modem */ + { USB_DEVICE(0x2cb7, 0x0007), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, - /* novation SoundControl XL */ - { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, - { } /* terminating entry must be last */ }; @@ -495,7 +543,10 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST. */ static const struct usb_device_id usb_endpoint_blacklist[] = { + { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 }, + { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 }, { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 }, { } }; diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index f19694e69f5c..2f594c88d905 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -889,7 +889,11 @@ read_descriptors(struct file *filp, struct kobject *kobj, size_t srclen, n; int cfgno; void *src; + int retval; + retval = usb_lock_device_interruptible(udev); + if (retval < 0) + return -EINTR; /* The binary attribute begins with the device descriptor. * Following that are the raw descriptor entries for all the * configurations (config plus subsidiary descriptors). @@ -914,6 +918,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, off -= srclen; } } + usb_unlock_device(udev); return count - nleft; } diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index da923ec17612..31ca5abb4c12 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb) EXPORT_SYMBOL_GPL(usb_block_urb); /** - * usb_kill_anchored_urbs - cancel transfer requests en masse + * usb_kill_anchored_urbs - kill all URBs associated with an anchor * @anchor: anchor the requests are bound to * - * this allows all outstanding URBs to be killed starting - * from the back of the queue + * This kills all outstanding URBs starting from the back of the queue, + * with guarantee that no completer callbacks will take place from the + * anchor after this function returns. * * This routine should not be called by a driver after its disconnect * method has returned. @@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb); void usb_kill_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; + int surely_empty; - spin_lock_irq(&anchor->lock); - while (!list_empty(&anchor->urb_list)) { - victim = list_entry(anchor->urb_list.prev, struct urb, - anchor_list); - /* we must make sure the URB isn't freed before we kill it*/ - usb_get_urb(victim); - spin_unlock_irq(&anchor->lock); - /* this will unanchor the URB */ - usb_kill_urb(victim); - usb_put_urb(victim); + do { spin_lock_irq(&anchor->lock); - } - spin_unlock_irq(&anchor->lock); + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, + struct urb, anchor_list); + /* make sure the URB isn't freed before we kill it */ + usb_get_urb(victim); + spin_unlock_irq(&anchor->lock); + /* this will unanchor the URB */ + usb_kill_urb(victim); + usb_put_urb(victim); + spin_lock_irq(&anchor->lock); + } + surely_empty = usb_anchor_check_wakeup(anchor); + + spin_unlock_irq(&anchor->lock); + cpu_relax(); + } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); @@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); void usb_poison_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; + int surely_empty; - spin_lock_irq(&anchor->lock); - anchor->poisoned = 1; - while (!list_empty(&anchor->urb_list)) { - victim = list_entry(anchor->urb_list.prev, struct urb, - anchor_list); - /* we must make sure the URB isn't freed before we kill it*/ - usb_get_urb(victim); - spin_unlock_irq(&anchor->lock); - /* this will unanchor the URB */ - usb_poison_urb(victim); - usb_put_urb(victim); + do { spin_lock_irq(&anchor->lock); - } - spin_unlock_irq(&anchor->lock); + anchor->poisoned = 1; + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, + struct urb, anchor_list); + /* make sure the URB isn't freed before we kill it */ + usb_get_urb(victim); + spin_unlock_irq(&anchor->lock); + /* this will unanchor the URB */ + usb_poison_urb(victim); + usb_put_urb(victim); + spin_lock_irq(&anchor->lock); + } + surely_empty = usb_anchor_check_wakeup(anchor); + + spin_unlock_irq(&anchor->lock); + cpu_relax(); + } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); @@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; + int surely_empty; - spin_lock_irqsave(&anchor->lock, flags); - while (!list_empty(&anchor->urb_list)) { - victim = list_entry(anchor->urb_list.prev, struct urb, - anchor_list); - __usb_unanchor_urb(victim, anchor); - } - spin_unlock_irqrestore(&anchor->lock, flags); + do { + spin_lock_irqsave(&anchor->lock, flags); + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, + struct urb, anchor_list); + __usb_unanchor_urb(victim, anchor); + } + surely_empty = usb_anchor_check_wakeup(anchor); + + spin_unlock_irqrestore(&anchor->lock, flags); + cpu_relax(); + } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 6af6add3d4c0..9da27ec22d58 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -312,6 +312,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg) static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) { int ret; + u32 hprt0; /* Clear interrupt */ dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS); @@ -332,6 +333,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) * established */ dwc2_hsotg_disconnect(hsotg); + } else { + /* Turn on the port power bit. */ + hprt0 = dwc2_read_hprt0(hsotg); + hprt0 |= HPRT0_PWR; + dwc2_writel(hsotg, hprt0, HPRT0); + /* Connect hcd after port power is set. */ + dwc2_hcd_connect(hsotg); } } @@ -421,10 +429,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) if (ret && (ret != -ENOTSUPP)) dev_err(hsotg->dev, "exit power_down failed\n"); + /* Change to L0 state */ + hsotg->lx_state = DWC2_L0; call_gadget(hsotg, resume); + } else { + /* Change to L0 state */ + hsotg->lx_state = DWC2_L0; } - /* Change to L0 state */ - hsotg->lx_state = DWC2_L0; } else { if (hsotg->params.power_down) return; @@ -654,6 +665,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg) return 0; } +/** + * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect. + * Exits hibernation without restoring registers. + * + * @hsotg: Programming view of DWC_otg controller + * @gpwrdn: GPWRDN register + */ +static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg, + u32 gpwrdn) +{ + u32 gpwrdn_tmp; + + /* Switch-on voltage to the core */ + gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); + gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH; + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); + udelay(5); + + /* Reset core */ + gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); + gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN; + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); + udelay(5); + + /* Disable Power Down Clamp */ + gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); + gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP; + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); + udelay(5); + + /* Deassert reset core */ + gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); + gpwrdn_tmp |= GPWRDN_PWRDNRSTN; + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); + udelay(5); + + /* Disable PMU interrupt */ + gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); + gpwrdn_tmp &= ~GPWRDN_PMUINTSEL; + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); + + /* De-assert Wakeup Logic */ + gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); + gpwrdn_tmp &= ~GPWRDN_PMUACTV; + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); + + hsotg->hibernated = 0; + hsotg->bus_suspended = 0; + + if (gpwrdn & GPWRDN_IDSTS) { + hsotg->op_state = OTG_STATE_B_PERIPHERAL; + dwc2_core_init(hsotg, false); + dwc2_enable_global_interrupts(hsotg); + dwc2_hsotg_core_init_disconnected(hsotg, false); + dwc2_hsotg_core_connect(hsotg); + } else { + hsotg->op_state = OTG_STATE_A_HOST; + + /* Initialize the Core for Host mode */ + dwc2_core_init(hsotg, false); + dwc2_enable_global_interrupts(hsotg); + dwc2_hcd_start(hsotg); + } +} + /* * GPWRDN interrupt handler. * @@ -675,64 +751,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg) if ((gpwrdn & GPWRDN_DISCONN_DET) && (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) { - u32 gpwrdn_tmp; - dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__); - - /* Switch-on voltage to the core */ - gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); - gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH; - dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); - udelay(10); - - /* Reset core */ - gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); - gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN; - dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); - udelay(10); - - /* Disable Power Down Clamp */ - gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); - gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP; - dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); - udelay(10); - - /* Deassert reset core */ - gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); - gpwrdn_tmp |= GPWRDN_PWRDNRSTN; - dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); - udelay(10); - - /* Disable PMU interrupt */ - gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); - gpwrdn_tmp &= ~GPWRDN_PMUINTSEL; - dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); - - /* De-assert Wakeup Logic */ - gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN); - gpwrdn_tmp &= ~GPWRDN_PMUACTV; - dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); - - hsotg->hibernated = 0; - - if (gpwrdn & GPWRDN_IDSTS) { - hsotg->op_state = OTG_STATE_B_PERIPHERAL; - dwc2_core_init(hsotg, false); - dwc2_enable_global_interrupts(hsotg); - dwc2_hsotg_core_init_disconnected(hsotg, false); - dwc2_hsotg_core_connect(hsotg); - } else { - hsotg->op_state = OTG_STATE_A_HOST; - - /* Initialize the Core for Host mode */ - dwc2_core_init(hsotg, false); - dwc2_enable_global_interrupts(hsotg); - dwc2_hcd_start(hsotg); - } - } - - if ((gpwrdn & GPWRDN_LNSTSCHG) && - (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) { + /* + * Call disconnect detect function to exit from + * hibernation + */ + dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn); + } else if ((gpwrdn & GPWRDN_LNSTSCHG) && + (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) { dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__); if (hsotg->hw_params.hibernation && hsotg->hibernated) { @@ -743,24 +769,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg) dwc2_exit_hibernation(hsotg, 1, 0, 1); } } - } - if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) { + } else if ((gpwrdn & GPWRDN_RST_DET) && + (gpwrdn & GPWRDN_RST_DET_MSK)) { dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__); if (!linestate && (gpwrdn & GPWRDN_BSESSVLD)) dwc2_exit_hibernation(hsotg, 0, 1, 0); - } - if ((gpwrdn & GPWRDN_STS_CHGINT) && - (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) { + } else if ((gpwrdn & GPWRDN_STS_CHGINT) && + (gpwrdn & GPWRDN_STS_CHGINT_MSK)) { dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__); - if (hsotg->hw_params.hibernation && - hsotg->hibernated) { - if (gpwrdn & GPWRDN_IDSTS) { - dwc2_exit_hibernation(hsotg, 0, 0, 0); - call_gadget(hsotg, resume); - } else { - dwc2_exit_hibernation(hsotg, 1, 0, 1); - } - } + /* + * As GPWRDN_STS_CHGINT exit from hibernation flow is + * the same as in GPWRDN_DISCONN_DET flow. Call + * disconnect detect helper function to exit from + * hibernation. + */ + dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn); } } diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 7fd0900a9cb0..e3f1f20c4922 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -712,8 +712,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) */ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) { + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; int is_isoc = hs_ep->isochronous; unsigned int maxsize; + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; if (is_isoc) maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : @@ -722,6 +725,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) else maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC; + /* Interrupt OUT EP with mps not multiple of 4 */ + if (hs_ep->index) + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) + maxsize = mps * MAX_DMA_DESC_NUM_GENERIC; + return maxsize; } @@ -737,11 +745,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) * Isochronous - descriptor rx/tx bytes bitfield limit, * Control In/Bulk/Interrupt - multiple of mps. This will allow to not * have concatenations from various descriptors within one packet. + * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds + * to a single descriptor. * * Selects corresponding mask for RX/TX bytes as well. */ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) { + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; u32 mps = hs_ep->ep.maxpacket; int dir_in = hs_ep->dir_in; u32 desc_size = 0; @@ -765,6 +776,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) desc_size -= desc_size % mps; } + /* Interrupt OUT EP with mps not multiple of 4 */ + if (hs_ep->index) + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) { + desc_size = mps; + *mask = DEV_DMA_NBYTES_MASK; + } + return desc_size; } @@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, length += (mps - (length % mps)); } - /* - * If more data to send, adjust DMA for EP0 out data stage. - * ureq->dma stays unchanged, hence increment it by already - * passed passed data count before starting new transaction. - */ - if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && - continuing) + if (continuing) offset = ureq->actual; /* Fill DDMA chain entries */ @@ -1531,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, u32 windex) { - struct dwc2_hsotg_ep *ep; int dir = (windex & USB_DIR_IN) ? 1 : 0; int idx = windex & 0x7F; @@ -1541,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, if (idx > hsotg->num_of_eps) return NULL; - ep = index_to_ep(hsotg, idx, dir); - - if (idx && ep->dir_in != dir) - return NULL; - - return ep; + return index_to_ep(hsotg, idx, dir); } /** @@ -2319,22 +2325,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, */ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) { + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; struct dwc2_hsotg *hsotg = hs_ep->parent; unsigned int bytes_rem = 0; + unsigned int bytes_rem_correction = 0; struct dwc2_dma_desc *desc = hs_ep->desc_list; int i; u32 status; + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; if (!desc) return -EINVAL; + /* Interrupt OUT EP with mps not multiple of 4 */ + if (hs_ep->index) + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) + bytes_rem_correction = 4 - (mps % 4); + for (i = 0; i < hs_ep->desc_count; ++i) { status = desc->status; bytes_rem += status & DEV_DMA_NBYTES_MASK; + bytes_rem -= bytes_rem_correction; if (status & DEV_DMA_STS_MASK) dev_err(hsotg->dev, "descriptor %d closed with %x\n", i, status & DEV_DMA_STS_MASK); + + if (status & DEV_DMA_L) + break; + desc++; } @@ -4886,12 +4906,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) epnum, 0); } - ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) { - dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, - hsotg->ctrl_req); - return ret; - } dwc2_hsotg_dump(hsotg); return 0; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 81afe553aa66..f29fbadb0548 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, if (num_packets > max_hc_pkt_count) { num_packets = max_hc_pkt_count; chan->xfer_len = num_packets * chan->max_packet; + } else if (chan->ep_is_in) { + /* + * Always program an integral # of max packets + * for IN transfers. + * Note: This assumes that the input buffer is + * aligned and sized accordingly. + */ + chan->xfer_len = num_packets * chan->max_packet; } } else { /* Need 1 packet for transfer length of 0 */ num_packets = 1; } - if (chan->ep_is_in) - /* - * Always program an integral # of max packets for IN - * transfers - */ - chan->xfer_len = num_packets * chan->max_packet; - if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* @@ -4321,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) goto unlock; - if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL) + if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL || + hsotg->flags.b.port_connect_status == 0) goto skip_power_saving; /* @@ -5397,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg) dwc2_writel(hsotg, hprt0, HPRT0); /* Wait for the HPRT0.PrtSusp register field to be set */ - if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000)) + if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000)) dev_warn(hsotg->dev, "Suspend wasn't generated\n"); /* @@ -5578,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup, return ret; } - dwc2_hcd_rem_wakeup(hsotg); + if (rem_wakeup) { + dwc2_hcd_rem_wakeup(hsotg); + /* + * Change "port_connect_status_change" flag to re-enumerate, + * because after exit from hibernation port connection status + * is not detected. + */ + hsotg->flags.b.port_connect_status_change = 1; + } hsotg->hibernated = 0; hsotg->bus_suspended = 0; diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index a052d39b4375..d5f4ec1b73b1 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, &short_read); if (urb->actual_length + xfer_length > urb->length) { - dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); + dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__); xfer_length = urb->length - urb->actual_length; } @@ -1977,6 +1977,18 @@ error: qtd->error_count++; dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, DWC2_HC_XFER_XACT_ERR); + /* + * We can get here after a completed transaction + * (urb->actual_length >= urb->length) which was not reported + * as completed. If that is the case, and we do not abort + * the transfer, a transfer of size 0 will be enqueued + * subsequently. If urb->actual_length is not DMA-aligned, + * the buffer will then point to an unaligned address, and + * the resulting behavior is undefined. Bail out in that + * situation. + */ + if (qtd->urb->actual_length >= qtd->urb->length) + qtd->error_count = 3; dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); } diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 31e090ac9f1e..6d3812678b8c 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -846,7 +846,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) int dwc2_init_params(struct dwc2_hsotg *hsotg) { const struct of_device_id *match; - void (*set_params)(void *data); + void (*set_params)(struct dwc2_hsotg *data); dwc2_set_default_params(hsotg); dwc2_get_device_properties(hsotg); diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 3c6ce09a6db5..34bb6124f1e2 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -337,7 +337,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev) { struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); - disable_irq(hsotg->irq); + dwc2_disable_global_interrupts(hsotg); + synchronize_irq(hsotg->irq); } /** @@ -507,10 +508,23 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + /* Postponed adding a new gadget to the udc class driver list */ + if (hsotg->gadget_enabled) { + retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); + if (retval) { + hsotg->gadget.udc = NULL; + dwc2_hsotg_remove(hsotg); + goto error; + } + } +#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; error: - dwc2_lowlevel_hw_disable(hsotg); + if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) + dwc2_lowlevel_hw_disable(hsotg); return retval; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index cede7a8e3605..90ec65d31059 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -117,6 +117,7 @@ static void __dwc3_set_mode(struct work_struct *work) struct dwc3 *dwc = work_to_dwc(work); unsigned long flags; int ret; + u32 reg; if (dwc->dr_mode != USB_DR_MODE_OTG) return; @@ -168,6 +169,11 @@ static void __dwc3_set_mode(struct work_struct *work) otg_set_vbus(dwc->usb2_phy->otg, true); phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); + if (dwc->dis_split_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); + reg |= DWC3_GUCTL3_SPLITDISABLE; + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); + } } break; case DWC3_GCTL_PRTCAP_DEVICE: @@ -992,6 +998,9 @@ static int dwc3_core_init(struct dwc3 *dwc) if (dwc->dis_tx_ipgap_linecheck_quirk) reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; + if (dwc->parkmode_disable_ss_quirk) + reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); } @@ -1305,6 +1314,8 @@ static void dwc3_get_properties(struct dwc3 *dwc) "snps,dis-del-phy-power-chg-quirk"); dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, "snps,dis-tx-ipgap-linecheck-quirk"); + dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, + "snps,parkmode-disable-ss-quirk"); dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, "snps,tx_de_emphasis_quirk"); @@ -1318,6 +1329,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->dis_metastability_quirk = device_property_read_bool(dev, "snps,dis_metastability_quirk"); + dwc->dis_split_quirk = device_property_read_bool(dev, + "snps,dis-split-quirk"); + dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; @@ -1521,6 +1535,17 @@ static int dwc3_probe(struct platform_device *pdev) err5: dwc3_event_buffers_cleanup(dwc); + + usb_phy_shutdown(dwc->usb2_phy); + usb_phy_shutdown(dwc->usb3_phy); + phy_exit(dwc->usb2_generic_phy); + phy_exit(dwc->usb3_generic_phy); + + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); + dwc3_ulpi_exit(dwc); err4: @@ -1556,9 +1581,9 @@ static int dwc3_remove(struct platform_device *pdev) dwc3_core_exit(dwc); dwc3_ulpi_exit(dwc); - pm_runtime_put_sync(&pdev->dev); - pm_runtime_allow(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); dwc3_free_event_buffers(dwc); dwc3_free_scratch_buffers(dwc); @@ -1693,7 +1718,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (PMSG_IS_AUTO(msg)) break; - ret = dwc3_core_init(dwc); + ret = dwc3_core_init_for_resume(dwc); if (ret) return ret; @@ -1830,10 +1855,26 @@ static int dwc3_resume(struct device *dev) return 0; } + +static void dwc3_complete(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + u32 reg; + + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && + dwc->dis_split_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); + reg |= DWC3_GUCTL3_SPLITDISABLE; + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); + } +} +#else +#define dwc3_complete NULL #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dwc3_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) + .complete = dwc3_complete, SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, dwc3_runtime_idle) }; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 77c4a9abe365..da296f888f45 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -136,6 +136,7 @@ #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10)) #define DWC3_GHWPARAMS8 0xc600 +#define DWC3_GUCTL3 0xc60c #define DWC3_GFLADJ 0xc630 /* Device Registers */ @@ -249,6 +250,7 @@ #define DWC3_GUCTL_HSTINAUTORETRY BIT(14) /* Global User Control 1 Register */ +#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) @@ -281,6 +283,7 @@ /* Global USB2 PHY Vendor Control Register */ #define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25) +#define DWC3_GUSB2PHYACC_DONE BIT(24) #define DWC3_GUSB2PHYACC_BUSY BIT(23) #define DWC3_GUSB2PHYACC_WRITE BIT(22) #define DWC3_GUSB2PHYACC_ADDR(n) (n << 16) @@ -309,6 +312,10 @@ #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) +/* Global RX Fifo Size Register */ +#define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ +#define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff) + /* Global Event Size Registers */ #define DWC3_GEVNTSIZ_INTMASK BIT(31) #define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff) @@ -370,6 +377,9 @@ /* Global User Control Register 2 */ #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) +/* Global User Control Register 3 */ +#define DWC3_GUCTL3_SPLITDISABLE BIT(14) + /* Device Configuration Register */ #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) @@ -691,6 +701,7 @@ struct dwc3_ep { #define DWC3_EP_END_TRANSFER_PENDING BIT(4) #define DWC3_EP_PENDING_REQUEST BIT(5) #define DWC3_EP_DELAY_START BIT(6) +#define DWC3_EP_PENDING_CLEAR_STALL BIT(11) /* This last one is specific to EP0 */ #define DWC3_EP0_DIR_IN BIT(31) @@ -1024,6 +1035,8 @@ struct dwc3_scratchpad_array { * change quirk. * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate * check during HS transmit. + * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed + * instances in park mode. * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk * @tx_de_emphasis: Tx de-emphasis value * 0 - -6dB de-emphasis @@ -1031,6 +1044,7 @@ struct dwc3_scratchpad_array { * 2 - No de-emphasis * 3 - Reserved * @dis_metastability_quirk: set to disable metastability quirk. + * @dis_split_quirk: set to disable split boundary. * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. */ @@ -1215,12 +1229,15 @@ struct dwc3 { unsigned dis_u2_freeclk_exists_quirk:1; unsigned dis_del_phy_power_chg_quirk:1; unsigned dis_tx_ipgap_linecheck_quirk:1; + unsigned parkmode_disable_ss_quirk:1; unsigned tx_de_emphasis_quirk:1; unsigned tx_de_emphasis:2; unsigned dis_metastability_quirk:1; + unsigned dis_split_quirk:1; + u16 imod_interval; }; diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index bdac3e7d7b18..d055e00f8180 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -183,6 +183,7 @@ static const struct of_device_id of_dwc3_simple_match[] = { { .compatible = "amlogic,meson-axg-dwc3" }, { .compatible = "amlogic,meson-gxl-dwc3" }, { .compatible = "allwinner,sun50i-h6-dwc3" }, + { .compatible = "hisilicon,hi3670-dwc3" }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 7051611229c9..58b8801ce881 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -38,6 +38,9 @@ #define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee +#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee +#define PCI_DEVICE_ID_INTEL_JSP 0x4dee +#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -114,6 +117,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = { static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), + PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} }; @@ -144,7 +148,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) if (pdev->vendor == PCI_VENDOR_ID_INTEL) { if (pdev->device == PCI_DEVICE_ID_INTEL_BXT || - pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) { + pdev->device == PCI_DEVICE_ID_INTEL_BXT_M || + pdev->device == PCI_DEVICE_ID_INTEL_EHLLP) { guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid); dwc->has_dsm_for_pm = true; } @@ -205,8 +210,10 @@ static void dwc3_pci_resume_work(struct work_struct *work) int ret; ret = pm_runtime_get_sync(&dwc3->dev); - if (ret) + if (ret) { + pm_runtime_put_sync_autosuspend(&dwc3->dev); return; + } pm_runtime_mark_last_busy(&dwc3->dev); pm_runtime_put_sync_autosuspend(&dwc3->dev); @@ -355,6 +362,15 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB), (kernel_ulong_t) &dwc3_pci_amd_properties, }, { } /* Terminating Entry */ diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 261af9e38ddd..7874b97e3322 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -251,8 +251,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) for (i = qcom->num_clocks - 1; i >= 0; i--) clk_disable_unprepare(qcom->clks[i]); + if (device_may_wakeup(qcom->dev)) + dwc3_qcom_enable_interrupts(qcom); + qcom->is_suspended = true; - dwc3_qcom_enable_interrupts(qcom); return 0; } @@ -265,7 +267,8 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom) if (!qcom->is_suspended) return 0; - dwc3_qcom_disable_interrupts(qcom); + if (device_may_wakeup(qcom->dev)) + dwc3_qcom_disable_interrupts(qcom); for (i = 0; i < qcom->num_clocks; i++) { ret = clk_prepare_enable(qcom->clks[i]); @@ -528,16 +531,19 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) ret = of_platform_populate(np, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to register dwc3 core - %d\n", ret); - return ret; + goto node_put; } qcom->dwc3 = of_find_device_by_node(dwc3_np); if (!qcom->dwc3) { + ret = -ENODEV; dev_err(dev, "failed to get dwc3 platform device\n"); - return -ENODEV; } - return 0; +node_put: + of_node_put(dwc3_np); + + return ret; } static const struct dwc3_acpi_pdata sdm845_acpi_pdata = { diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 6dee4dabc0a4..4de8e5f091cb 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -524,6 +524,11 @@ static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc, ret = __dwc3_gadget_ep_set_halt(dep, set, true); if (ret) return -EINVAL; + + /* ClearFeature(Halt) may need delayed status */ + if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) + return USB_GADGET_DELAYED_STATUS; + break; default: return -EINVAL; @@ -942,12 +947,16 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc, static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req) { + unsigned int trb_length = 0; int ret; req->direction = !!dep->number; if (req->request.length == 0) { - dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, + if (!req->direction) + trb_length = dep->endpoint.maxpacket; + + dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length, DWC3_TRBCTL_CONTROL_DATA, false); ret = dwc3_ep0_start_trans(dep); } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) @@ -994,9 +1003,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1]; + if (!req->direction) + trb_length = dep->endpoint.maxpacket; + /* Now prepare one extra TRB to align transfer size */ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, - 0, DWC3_TRBCTL_CONTROL_DATA, + trb_length, DWC3_TRBCTL_CONTROL_DATA, false); ret = dwc3_ep0_start_trans(dep); } else { @@ -1042,6 +1054,18 @@ static void dwc3_ep0_do_control_status(struct dwc3 *dwc, __dwc3_ep0_do_control_status(dwc, dep); } +void dwc3_ep0_send_delayed_status(struct dwc3 *dwc) +{ + unsigned int direction = !dwc->ep0_expect_in; + + dwc->delayed_status = false; + + if (dwc->ep0state != EP0_STATUS_PHASE) + return; + + __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]); +} + static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 09d6b11246c9..af8efebfaf11 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; - u32 timeout = 1000; + u32 timeout = 5000; u32 saved_config = 0; u32 reg; @@ -304,13 +304,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, } if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { - int needs_wakeup; + int link_state; - needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || - dwc->link_state == DWC3_LINK_STATE_U2 || - dwc->link_state == DWC3_LINK_STATE_U3); - - if (unlikely(needs_wakeup)) { + link_state = dwc3_gadget_get_link_state(dwc); + if (link_state == DWC3_LINK_STATE_U1 || + link_state == DWC3_LINK_STATE_U2 || + link_state == DWC3_LINK_STATE_U3) { ret = __dwc3_gadget_wakeup(dwc); dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", ret); @@ -593,8 +592,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); if (desc->bInterval) { - params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); - dep->interval = 1 << (desc->bInterval - 1); + u8 bInterval_m1; + + /* + * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it + * must be set to 0 when the controller operates in full-speed. + */ + bInterval_m1 = min_t(u8, desc->bInterval - 1, 13); + if (dwc->gadget.speed == USB_SPEED_FULL) + bInterval_m1 = 0; + + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT && + dwc->gadget.speed == USB_SPEED_FULL) + dep->interval = desc->bInterval; + else + dep->interval = 1 << (desc->bInterval - 1); + + params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); } return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); @@ -1017,26 +1031,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, * dwc3_prepare_one_trb - setup one TRB from one request * @dep: endpoint for which this request is prepared * @req: dwc3_request pointer + * @trb_length: buffer size of the TRB * @chain: should this TRB be chained to the next? * @node: only for isochronous endpoints. First TRB needs different type. */ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, - struct dwc3_request *req, unsigned chain, unsigned node) + struct dwc3_request *req, unsigned int trb_length, + unsigned chain, unsigned node) { struct dwc3_trb *trb; - unsigned int length; dma_addr_t dma; unsigned stream_id = req->request.stream_id; unsigned short_not_ok = req->request.short_not_ok; unsigned no_interrupt = req->request.no_interrupt; - if (req->request.num_sgs > 0) { - length = sg_dma_len(req->start_sg); + if (req->request.num_sgs > 0) dma = sg_dma_address(req->start_sg); - } else { - length = req->request.length; + else dma = req->request.dma; - } trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1048,7 +1060,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, + __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node, stream_id, short_not_ok, no_interrupt); } @@ -1058,16 +1070,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct scatterlist *sg = req->start_sg; struct scatterlist *s; int i; - + unsigned int length = req->request.length; + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + unsigned int rem = length % maxp; unsigned int remaining = req->request.num_mapped_sgs - req->num_queued_sgs; + /* + * If we resume preparing the request, then get the remaining length of + * the request and resume where we left off. + */ + for_each_sg(req->request.sg, s, req->num_queued_sgs, i) + length -= sg_dma_len(s); + for_each_sg(sg, s, remaining, i) { - unsigned int length = req->request.length; - unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); - unsigned int rem = length % maxp; + unsigned int trb_length; unsigned chain = true; + trb_length = min_t(unsigned int, length, sg_dma_len(s)); + + length -= trb_length; + /* * IOMMU driver is coalescing the list of sgs which shares a * page boundary into one and giving it to USB driver. With @@ -1075,7 +1098,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, * sgs passed. So mark the chain bit to false if it isthe last * mapped sg. */ - if (i == remaining - 1) + if ((i == remaining - 1) || !length) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { @@ -1085,7 +1108,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, i); + dwc3_prepare_one_trb(dep, req, trb_length, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1095,8 +1118,37 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + !rem && !chain) { + struct dwc3 *dwc = dep->dwc; + struct dwc3_trb *trb; + + req->needs_extra_trb = true; + + /* Prepare normal TRB */ + dwc3_prepare_one_trb(dep, req, trb_length, true, i); + + /* Prepare one extra TRB to handle ZLP */ + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, + !req->direction, 1, + req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { - dwc3_prepare_one_trb(dep, req, chain, i); + dwc3_prepare_one_trb(dep, req, trb_length, chain, i); } /* @@ -1111,6 +1163,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->num_queued_sgs++; + /* + * The number of pending SG entries may not correspond to the + * number of mapped SG entries. If all the data are queued, then + * don't include unused SG entries. + */ + if (length == 0) { + req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs; + break; + } + if (!dwc3_calc_trbs_left(dep)) break; } @@ -1130,7 +1192,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1140,6 +1202,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->request.short_not_ok, req->request.no_interrupt); } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && (IS_ALIGNED(req->request.length, maxp))) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; @@ -1147,17 +1210,27 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); - /* Now prepare one extra TRB to handle ZLP */ + /* Prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - false, 1, req->request.stream_id, + !req->direction, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment for OUT */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { - dwc3_prepare_one_trb(dep, req, false, 0); + dwc3_prepare_one_trb(dep, req, length, false, 0); } } @@ -1217,6 +1290,8 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) } } +static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep); + static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; @@ -1256,14 +1331,20 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); if (ret < 0) { - /* - * FIXME we need to iterate over the list of requests - * here and stop, unmap, free and del each of the linked - * requests instead of what we do now. - */ - if (req->trb) - memset(req->trb, 0, sizeof(struct dwc3_trb)); - dwc3_gadget_del_and_unmap_request(dep, req, ret); + struct dwc3_request *tmp; + + if (ret == -EAGAIN) + return ret; + + dwc3_stop_active_transfer(dep, true, true); + + list_for_each_entry_safe(req, tmp, &dep->started_list, list) + dwc3_gadget_move_cancelled_request(req); + + /* If ep isn't started, then there's no end transfer pending */ + if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) + dwc3_gadget_ep_cleanup_cancelled_requests(dep); + return ret; } @@ -1454,8 +1535,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) list_add_tail(&req->list, &dep->pending_list); req->status = DWC3_REQUEST_STATUS_QUEUED; - /* Start the transfer only after the END_TRANSFER is completed */ - if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { + /* + * Start the transfer only after the END_TRANSFER is completed + * and endpoint STALL is cleared. + */ + if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || + (dep->flags & DWC3_EP_WEDGE) || + (dep->flags & DWC3_EP_STALL)) { dep->flags |= DWC3_EP_DELAY_START; return 0; } @@ -1505,6 +1591,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r { int i; + /* If req->trb is not set, then the request has not started */ + if (!req->trb) + return; + /* * If request was already started, this means we had to * stop the transfer. With that we also need to ignore @@ -1595,6 +1685,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) { struct dwc3_gadget_ep_cmd_params params; struct dwc3 *dwc = dep->dwc; + struct dwc3_request *req; + struct dwc3_request *tmp; int ret; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { @@ -1631,13 +1723,42 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) else dep->flags |= DWC3_EP_STALL; } else { + /* + * Don't issue CLEAR_STALL command to control endpoints. The + * controller automatically clears the STALL when it receives + * the SETUP token. + */ + if (dep->number <= 1) { + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + return 0; + } + + dwc3_stop_active_transfer(dep, true, true); + + list_for_each_entry_safe(req, tmp, &dep->started_list, list) + dwc3_gadget_move_cancelled_request(req); + + if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { + dep->flags |= DWC3_EP_PENDING_CLEAR_STALL; + return 0; + } + + dwc3_gadget_ep_cleanup_cancelled_requests(dep); ret = dwc3_send_clear_stall_ep_cmd(dep); - if (ret) + if (ret) { dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name); - else - dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + return ret; + } + + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + + if ((dep->flags & DWC3_EP_DELAY_START) && + !usb_endpoint_xfer_isoc(dep->endpoint.desc)) + __dwc3_gadget_kick_transfer(dep); + + dep->flags &= ~DWC3_EP_DELAY_START; } return ret; @@ -1725,7 +1846,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) u32 reg; u8 link_state; - u8 speed; /* * According to the Databook Remote wakeup request should @@ -1735,16 +1855,15 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) */ reg = dwc3_readl(dwc->regs, DWC3_DSTS); - speed = reg & DWC3_DSTS_CONNECTSPD; - if ((speed == DWC3_DSTS_SUPERSPEED) || - (speed == DWC3_DSTS_SUPERSPEED_PLUS)) - return 0; - link_state = DWC3_DSTS_USBLNKST(reg); switch (link_state) { + case DWC3_LINK_STATE_RESET: case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ + case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */ + case DWC3_LINK_STATE_U1: + case DWC3_LINK_STATE_RESUME: break; default: return -EINVAL; @@ -2224,7 +2343,6 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; int mdwidth; - int kbytes; int size; mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); @@ -2240,17 +2358,17 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) /* FIFO Depth is in MDWDITH bytes. Multiply */ size *= mdwidth; - kbytes = size / 1024; - if (kbytes == 0) - kbytes = 1; - /* - * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for - * internal overhead. We don't really know how these are used, - * but documentation say it exists. + * To meet performance requirement, a minimum TxFIFO size of 3x + * MaxPacketSize is recommended for endpoints that support burst and a + * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't + * support burst. Use those numbers and we can calculate the max packet + * limit as below. */ - size -= mdwidth * (kbytes + 1); - size /= kbytes; + if (dwc->maximum_speed >= USB_SPEED_SUPER) + size /= 3; + else + size /= 2; usb_ep_set_maxpacket_limit(&dep->endpoint, size); @@ -2268,8 +2386,39 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; + int mdwidth; + int size; - usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); + mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); + + /* MDWIDTH is represented in bits, convert to bytes */ + mdwidth /= 8; + + /* All OUT endpoints share a single RxFIFO space */ + size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0)); + if (dwc3_is_usb31(dwc)) + size = DWC31_GRXFIFOSIZ_RXFDEP(size); + else + size = DWC3_GRXFIFOSIZ_RXFDEP(size); + + /* FIFO depth is in MDWDITH bytes */ + size *= mdwidth; + + /* + * To meet performance requirement, a minimum recommended RxFIFO size + * is defined as follow: + * RxFIFO size >= (3 x MaxPacketSize) + + * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin) + * + * Then calculate the max packet limit as below. + */ + size -= (3 * 8) + 16; + if (size < 0) + size = 0; + else + size /= 3; + + usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 15; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, @@ -2454,9 +2603,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, for_each_sg(sg, s, pending, i) { trb = &dep->trb_pool[dep->trb_dequeue]; - if (trb->ctrl & DWC3_TRB_CTRL_HWO) - break; - req->sg = sg_next(s); req->num_pending_sgs--; @@ -2481,14 +2627,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { - /* - * For OUT direction, host may send less than the setup - * length. Return true for all OUT requests. - */ - if (!req->direction) - return true; - - return req->request.actual == req->request.length; + return req->num_pending_sgs == 0; } static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, @@ -2504,18 +2643,24 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); - if (req->needs_extra_trb) { - ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, - status); - req->needs_extra_trb = false; - } - req->request.actual = req->request.length - req->remaining; - if (!dwc3_gadget_ep_request_completed(req) || - req->num_pending_sgs) { - __dwc3_gadget_kick_transfer(dep); + if (!dwc3_gadget_ep_request_completed(req)) goto out; + + if (req->needs_extra_trb) { + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, + status); + + /* Reclaim MPS padding TRB for ZLP */ + if (!req->direction && req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (IS_ALIGNED(req->request.length, maxp))) + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + + req->needs_extra_trb = false; } dwc3_gadget_giveback(dep, req, status); @@ -2540,6 +2685,24 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep, } } +static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep) +{ + struct dwc3_request *req; + + if (!list_empty(&dep->pending_list)) + return true; + + /* + * We only need to check the first entry of the started list. We can + * assume the completed requests are removed from the started list. + */ + req = next_request(&dep->started_list); + if (!req) + return false; + + return !dwc3_gadget_ep_request_completed(req); +} + static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep, const struct dwc3_event_depevt *event) { @@ -2567,10 +2730,10 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); - if (stop) { + if (stop) dwc3_stop_active_transfer(dep, true, true); - dep->flags = DWC3_EP_ENABLED; - } + else if (dwc3_gadget_ep_should_continue(dep)) + __dwc3_gadget_kick_transfer(dep); /* * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. @@ -2642,6 +2805,26 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; dep->flags &= ~DWC3_EP_TRANSFER_STARTED; dwc3_gadget_ep_cleanup_cancelled_requests(dep); + + if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) { + struct dwc3 *dwc = dep->dwc; + + dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL; + if (dwc3_send_clear_stall_ep_cmd(dep)) { + struct usb_ep *ep0 = &dwc->eps[0]->endpoint; + + dev_err(dwc->dev, "failed to clear STALL on %s\n", + dep->name); + if (dwc->delayed_status) + __dwc3_gadget_ep0_set_halt(ep0, 1); + return; + } + + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + if (dwc->delayed_status) + dwc3_ep0_send_delayed_status(dwc); + } + if ((dep->flags & DWC3_EP_DELAY_START) && !usb_endpoint_xfer_isoc(dep->endpoint.desc)) __dwc3_gadget_kick_transfer(dep); @@ -2804,6 +2987,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) dwc->connected = true; + /* + * Ideally, dwc3_reset_gadget() would trigger the function + * drivers to stop any active transfers through ep disable. + * However, for functions which defer ep disable, such as mass + * storage, we will need to rely on the call to stop active + * transfers here, and avoid allowing of request queuing. + */ + dwc->connected = false; + /* * WORKAROUND: DWC3 revisions <1.88a have an issue which * would cause a missing Disconnect Event if there's a diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 5faf4d1249e0..f207e59c7d03 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -111,6 +111,7 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags); int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol); +void dwc3_ep0_send_delayed_status(struct dwc3 *dwc); /** * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c index f62b5f3c2d67..ffe3440abb74 100644 --- a/drivers/usb/dwc3/ulpi.c +++ b/drivers/usb/dwc3/ulpi.c @@ -7,6 +7,8 @@ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com> */ +#include <linux/delay.h> +#include <linux/time64.h> #include <linux/ulpi/regs.h> #include "core.h" @@ -17,14 +19,24 @@ DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \ DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a)) -static int dwc3_ulpi_busyloop(struct dwc3 *dwc) +#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L) + +static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read) { - unsigned count = 1000; + unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY; + unsigned int count = 1000; u32 reg; + if (addr >= ULPI_EXT_VENDOR_SPECIFIC) + ns += DWC3_ULPI_BASE_DELAY; + + if (read) + ns += DWC3_ULPI_BASE_DELAY; + while (count--) { + ndelay(ns); reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0)); - if (!(reg & DWC3_GUSB2PHYACC_BUSY)) + if (reg & DWC3_GUSB2PHYACC_DONE) return 0; cpu_relax(); } @@ -47,7 +59,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr) reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr); dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); - ret = dwc3_ulpi_busyloop(dwc); + ret = dwc3_ulpi_busyloop(dwc, addr, true); if (ret) return ret; @@ -71,7 +83,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val) reg |= DWC3_GUSB2PHYACC_WRITE | val; dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); - return dwc3_ulpi_busyloop(dwc); + return dwc3_ulpi_busyloop(dwc, addr, false); } static const struct ulpi_ops dwc3_ulpi_ops = { diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index cac991173ac0..5a462a1d1896 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -728,19 +728,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb) case COMP_USB_TRANSACTION_ERROR: case COMP_STALL_ERROR: default: - if (ep_id == XDBC_EPID_OUT) + if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) xdbc.flags |= XDBC_FLAGS_OUT_STALL; - if (ep_id == XDBC_EPID_IN) + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) xdbc.flags |= XDBC_FLAGS_IN_STALL; xdbc_trace("endpoint %d stalled\n", ep_id); break; } - if (ep_id == XDBC_EPID_IN) { + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) { xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS; xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); - } else if (ep_id == XDBC_EPID_OUT) { + } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) { xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS; } else { xdbc_trace("invalid endpoint id %d\n", ep_id); diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h index 673686eeddd7..6e2b7266a695 100644 --- a/drivers/usb/early/xhci-dbc.h +++ b/drivers/usb/early/xhci-dbc.h @@ -120,8 +120,22 @@ struct xdbc_ring { u32 cycle_state; }; -#define XDBC_EPID_OUT 2 -#define XDBC_EPID_IN 3 +/* + * These are the "Endpoint ID" (also known as "Context Index") values for the + * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data + * structure. + * According to the "eXtensible Host Controller Interface for Universal Serial + * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer + * Rings", these should be 0 and 1, and those are the values AMD machines give + * you; but Intel machines seem to use the formula from section "4.5.1 Device + * Context Index", which is supposed to be used for the Device Context only. + * Luckily the values from Intel don't overlap with those from AMD, so we can + * just test for both. + */ +#define XDBC_EPID_OUT 0 +#define XDBC_EPID_IN 1 +#define XDBC_EPID_OUT_INTEL 2 +#define XDBC_EPID_IN_INTEL 3 struct xdbc_state { u16 vendor; diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 02ff850278b1..09ba3af1234e 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM depends on NET select USB_U_ETHER select USB_F_NCM + select CRC32 help NCM is an advanced protocol for Ethernet encapsulation, allows grouping of several ethernet frames into one USB transfer and @@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM depends on NET select USB_U_ETHER select USB_F_EEM + select CRC32 help CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM and therefore can be supported by more hardware. Technically ECM and diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d7871636fced..24dad1d78d1e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -96,40 +96,43 @@ function_descriptors(struct usb_function *f, } /** - * next_ep_desc() - advance to the next EP descriptor + * next_desc() - advance to the next desc_type descriptor * @t: currect pointer within descriptor array + * @desc_type: descriptor type * - * Return: next EP descriptor or NULL + * Return: next desc_type descriptor or NULL * - * Iterate over @t until either EP descriptor found or + * Iterate over @t until either desc_type descriptor found or * NULL (that indicates end of list) encountered */ static struct usb_descriptor_header** -next_ep_desc(struct usb_descriptor_header **t) +next_desc(struct usb_descriptor_header **t, u8 desc_type) { for (; *t; t++) { - if ((*t)->bDescriptorType == USB_DT_ENDPOINT) + if ((*t)->bDescriptorType == desc_type) return t; } return NULL; } /* - * for_each_ep_desc()- iterate over endpoint descriptors in the - * descriptors list - * @start: pointer within descriptor array. - * @ep_desc: endpoint descriptor to use as the loop cursor + * for_each_desc() - iterate over desc_type descriptors in the + * descriptors list + * @start: pointer within descriptor array. + * @iter_desc: desc_type descriptor to use as the loop cursor + * @desc_type: wanted descriptr type */ -#define for_each_ep_desc(start, ep_desc) \ - for (ep_desc = next_ep_desc(start); \ - ep_desc; ep_desc = next_ep_desc(ep_desc+1)) +#define for_each_desc(start, iter_desc, desc_type) \ + for (iter_desc = next_desc(start, desc_type); \ + iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type)) /** - * config_ep_by_speed() - configures the given endpoint + * config_ep_by_speed_and_alt() - configures the given endpoint * according to gadget speed. * @g: pointer to the gadget * @f: usb function * @_ep: the endpoint to configure + * @alt: alternate setting number * * Return: error code, 0 on success * @@ -142,11 +145,13 @@ next_ep_desc(struct usb_descriptor_header **t) * Note: the supplied function should hold all the descriptors * for supported speeds */ -int config_ep_by_speed(struct usb_gadget *g, - struct usb_function *f, - struct usb_ep *_ep) +int config_ep_by_speed_and_alt(struct usb_gadget *g, + struct usb_function *f, + struct usb_ep *_ep, + u8 alt) { struct usb_endpoint_descriptor *chosen_desc = NULL; + struct usb_interface_descriptor *int_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; struct usb_ss_ep_comp_descriptor *comp_desc = NULL; @@ -182,8 +187,21 @@ int config_ep_by_speed(struct usb_gadget *g, default: speed_desc = f->fs_descriptors; } + + /* find correct alternate setting descriptor */ + for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) { + int_desc = (struct usb_interface_descriptor *)*d_spd; + + if (int_desc->bAlternateSetting == alt) { + speed_desc = d_spd; + goto intf_found; + } + } + return -EIO; + +intf_found: /* find descriptors */ - for_each_ep_desc(speed_desc, d_spd) { + for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) { chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; if (chosen_desc->bEndpointAddress == _ep->address) goto ep_found; @@ -237,6 +255,32 @@ ep_found: } return 0; } +EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt); + +/** + * config_ep_by_speed() - configures the given endpoint + * according to gadget speed. + * @g: pointer to the gadget + * @f: usb function + * @_ep: the endpoint to configure + * + * Return: error code, 0 on success + * + * This function chooses the right descriptors for a given + * endpoint according to gadget speed and saves it in the + * endpoint desc field. If the endpoint already has a descriptor + * assigned to it - overwrites it with currently corresponding + * descriptor. The endpoint maxpacket field is updated according + * to the chosen descriptor. + * Note: the supplied function should hold all the descriptors + * for supported speeds + */ +int config_ep_by_speed(struct usb_gadget *g, + struct usb_function *f, + struct usb_ep *_ep) +{ + return config_ep_by_speed_and_alt(g, f, _ep, 0); +} EXPORT_SYMBOL_GPL(config_ep_by_speed); /** @@ -348,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function) spin_lock_irqsave(&cdev->lock, flags); - if (cdev->deactivations == 0) + if (cdev->deactivations == 0) { + spin_unlock_irqrestore(&cdev->lock, flags); status = usb_gadget_deactivate(cdev->gadget); + spin_lock_irqsave(&cdev->lock, flags); + } if (status == 0) cdev->deactivations++; @@ -380,8 +427,11 @@ int usb_function_activate(struct usb_function *function) status = -EINVAL; else { cdev->deactivations--; - if (cdev->deactivations == 0) + if (cdev->deactivations == 0) { + spin_unlock_irqrestore(&cdev->lock, flags); status = usb_gadget_activate(cdev->gadget); + spin_lock_irqsave(&cdev->lock, flags); + } } spin_unlock_irqrestore(&cdev->lock, flags); @@ -861,6 +911,11 @@ static int set_config(struct usb_composite_dev *cdev, else power = min(power, 900U); done: + if (power <= USB_SELF_POWER_VBUS_MAX_DRAW) + usb_gadget_set_selfpowered(gadget); + else + usb_gadget_clear_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) result = USB_GADGET_DELAYED_STATUS; @@ -1036,7 +1091,7 @@ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) while (*sp) { s = *sp; language = cpu_to_le16(s->language); - for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { + for (tmp = buf; *tmp && tmp < &buf[USB_MAX_STRING_LEN]; tmp++) { if (*tmp == language) goto repeat; } @@ -1111,7 +1166,7 @@ static int get_string(struct usb_composite_dev *cdev, collect_langs(sp, s->wData); } - for (len = 0; len <= 126 && s->wData[len]; len++) + for (len = 0; len <= USB_MAX_STRING_LEN && s->wData[len]; len++) continue; if (!len) return -EINVAL; @@ -2279,6 +2334,7 @@ void composite_suspend(struct usb_gadget *gadget) cdev->suspended = 1; + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, 2); } @@ -2307,6 +2363,9 @@ void composite_resume(struct usb_gadget *gadget) else maxpower = min(maxpower, 900U); + if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) + usb_gadget_clear_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, maxpower); } diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c index 2d115353424c..8bb25773b61e 100644 --- a/drivers/usb/gadget/config.c +++ b/drivers/usb/gadget/config.c @@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors); void usb_free_all_descriptors(struct usb_function *f) { usb_free_descriptors(f->fs_descriptors); + f->fs_descriptors = NULL; usb_free_descriptors(f->hs_descriptors); + f->hs_descriptors = NULL; usb_free_descriptors(f->ss_descriptors); + f->ss_descriptors = NULL; usb_free_descriptors(f->ssp_descriptors); + f->ssp_descriptors = NULL; } EXPORT_SYMBOL_GPL(usb_free_all_descriptors); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index ab9ac48a751a..3d4710cc34bc 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -109,21 +109,27 @@ struct gadget_config_name { struct list_head list; }; +#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1) + static int usb_string_copy(const char *s, char **s_copy) { int ret; char *str; char *copy = *s_copy; ret = strlen(s); - if (ret > 126) + if (ret > USB_MAX_STRING_LEN) return -EOVERFLOW; - str = kstrdup(s, GFP_KERNEL); - if (!str) - return -ENOMEM; + if (copy) { + str = copy; + } else { + str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL); + if (!str) + return -ENOMEM; + } + strcpy(str, s); if (str[ret - 1] == '\n') str[ret - 1] = '\0'; - kfree(copy); *s_copy = str; return 0; } @@ -233,9 +239,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item, static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page) { - char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name; + struct gadget_info *gi = to_gadget_info(item); + char *udc_name; + int ret; - return sprintf(page, "%s\n", udc_name ?: ""); + mutex_lock(&gi->lock); + udc_name = gi->composite.gadget_driver.udc_name; + ret = sprintf(page, "%s\n", udc_name ?: ""); + mutex_unlock(&gi->lock); + + return ret; } static int unregister_gadget(struct gadget_info *gi) @@ -260,6 +273,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, char *name; int ret; + if (strlen(page) < len) + return -EOVERFLOW; + name = kstrdup(page, GFP_KERNEL); if (!name) return -ENOMEM; @@ -1214,9 +1230,9 @@ static void purge_configs_funcs(struct gadget_info *gi) cfg = container_of(c, struct config_usb_cfg, c); - list_for_each_entry_safe(f, tmp, &c->functions, list) { + list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) { - list_move_tail(&f->list, &cfg->func_list); + list_move(&f->list, &cfg->func_list); if (f->unbind) { dev_dbg(&gi->cdev.gadget->dev, "unbind function '%s'/%p\n", @@ -1502,7 +1518,7 @@ static const struct usb_gadget_driver configfs_driver_template = { .suspend = configfs_composite_suspend, .resume = configfs_composite_resume, - .max_speed = USB_SPEED_SUPER, + .max_speed = USB_SPEED_SUPER_PLUS, .driver = { .owner = THIS_MODULE, .name = "configfs-gadget", @@ -1542,7 +1558,7 @@ static struct config_group *gadgets_make( gi->composite.unbind = configfs_do_nothing; gi->composite.suspend = NULL; gi->composite.resume = NULL; - gi->composite.max_speed = USB_SPEED_SUPER; + gi->composite.max_speed = USB_SPEED_SUPER_PLUS; spin_lock_init(&gi->spinlock); mutex_init(&gi->lock); diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 9fc98de83624..add0f7ead55c 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -684,7 +684,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function, - acm_ss_function, NULL); + acm_ss_function, acm_ss_function); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a9a711e04614..aa56a080235c 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1120,6 +1120,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); if (unlikely(ret)) { + io_data->req = NULL; usb_ep_free_request(ep->ep, req); goto error_lock; } @@ -1327,10 +1328,11 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, case FUNCTIONFS_ENDPOINT_DESC: { int desc_idx; - struct usb_endpoint_descriptor *desc; + struct usb_endpoint_descriptor desc1, *desc; switch (epfile->ffs->gadget->speed) { case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: desc_idx = 2; break; case USB_SPEED_HIGH: @@ -1339,10 +1341,12 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, default: desc_idx = 0; } + desc = epfile->ep->descs[desc_idx]; + memcpy(&desc1, desc, desc->bLength); spin_unlock_irq(&epfile->ffs->eps_lock); - ret = copy_to_user((void __user *)value, desc, desc->bLength); + ret = copy_to_user((void __user *)value, &desc1, desc1.bLength); if (ret) ret = -EFAULT; return ret; @@ -1827,6 +1831,10 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs->state = FFS_READ_DESCRIPTORS; ffs->setup_state = FFS_NO_SETUP; ffs->flags = 0; + + ffs->ms_os_descs_ext_prop_count = 0; + ffs->ms_os_descs_ext_prop_name_len = 0; + ffs->ms_os_descs_ext_prop_data_len = 0; } @@ -2650,6 +2658,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, do { /* lang_count > 0 so we can use do-while */ unsigned needed = needed_count; + u32 str_per_lang = str_count; if (unlikely(len < 3)) goto error_free; @@ -2685,7 +2694,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, data += length + 1; len -= length + 1; - } while (--str_count); + } while (--str_per_lang); s->id = 0; /* terminator */ s->s = NULL; @@ -3186,7 +3195,8 @@ static int _ffs_func_bind(struct usb_configuration *c, } if (likely(super)) { - func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs); + func->function.ss_descriptors = func->function.ssp_descriptors = + vla_ptr(vlabuf, d, ss_descs); ss_len = ffs_do_descs(ffs->ss_descs_count, vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len, d_raw_descs__sz - fs_len - hs_len, @@ -3596,6 +3606,7 @@ static void ffs_func_unbind(struct usb_configuration *c, func->function.fs_descriptors = NULL; func->function.hs_descriptors = NULL; func->function.ss_descriptors = NULL; + func->function.ssp_descriptors = NULL; func->interfaces_nums = NULL; ffs_event_add(ffs, FUNCTIONFS_UNBIND); diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 46af0aa07e2e..0e083a53da53 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -1048,6 +1048,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) f->ss_descriptors = usb_copy_descriptors(midi_function); if (!f->ss_descriptors) goto fail_f_midi; + + if (gadget_is_superspeed_plus(c->cdev->gadget)) { + f->ssp_descriptors = usb_copy_descriptors(midi_function); + if (!f->ssp_descriptors) + goto fail_f_midi; + } } kfree(midi_function); @@ -1315,7 +1321,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) midi->id = kstrdup(opts->id, GFP_KERNEL); if (opts->id && !midi->id) { status = -ENOMEM; - goto setup_fail; + goto midi_free; } midi->in_ports = opts->in_ports; midi->out_ports = opts->out_ports; @@ -1327,7 +1333,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL); if (status) - goto setup_fail; + goto midi_free; spin_lock_init(&midi->transmit_lock); @@ -1343,9 +1349,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) return &midi->func; +midi_free: + if (midi) + kfree(midi->id); + kfree(midi); setup_fail: mutex_unlock(&opts->lock); - kfree(midi); + return ERR_PTR(status); } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 1d900081b1f0..92a7c3a83945 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ncm_bitrate(struct usb_gadget *g) { - if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) - return 13 * 1024 * 8 * 1000 * 8; + if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) + return 4250000000U; + else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) + return 3750000000U; else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else @@ -1181,9 +1183,11 @@ static int ncm_unwrap_ntb(struct gether *port, int ndp_index; unsigned dg_len, dg_len2; unsigned ndp_len; + unsigned block_len; struct sk_buff *skb2; int ret = -EINVAL; - unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize); const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; @@ -1205,8 +1209,9 @@ static int ncm_unwrap_ntb(struct gether *port, } tmp++; /* skip wSequence */ + block_len = get_ncm(&tmp, opts->block_length); /* (d)wBlockLength */ - if (get_ncm(&tmp, opts->block_length) > max_size) { + if (block_len > ntb_max) { INFO(port->func.config->cdev, "OUT size exceeded\n"); goto err; } @@ -1215,15 +1220,23 @@ static int ncm_unwrap_ntb(struct gether *port, /* Run through all the NDP's in the NTB */ do { - /* NCM 3.2 */ - if (((ndp_index % 4) != 0) && - (ndp_index < opts->nth_size)) { + /* + * NCM 3.2 + * dwNdpIndex + */ + if (((ndp_index % 4) != 0) || + (ndp_index < opts->nth_size) || + (ndp_index > (block_len - + opts->ndp_size))) { INFO(port->func.config->cdev, "Bad index: %#X\n", ndp_index); goto err; } - /* walk through NDP */ + /* + * walk through NDP + * dwSignature + */ tmp = (void *)(skb->data + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); @@ -1234,14 +1247,15 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len = get_unaligned_le16(tmp++); /* * NCM 3.3.1 + * wLength * entry is 2 items * item size is 16/32 bits, opts->dgram_item_len * 2 bytes * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry * Each entry is a dgram index and a dgram length. */ if ((ndp_len < opts->ndp_size - + 2 * 2 * (opts->dgram_item_len * 2)) - || (ndp_len % opts->ndplen_align != 0)) { + + 2 * 2 * (opts->dgram_item_len * 2)) || + (ndp_len % opts->ndplen_align != 0)) { INFO(port->func.config->cdev, "Bad NDP length: %#X\n", ndp_len); goto err; @@ -1258,8 +1272,21 @@ static int ncm_unwrap_ntb(struct gether *port, do { index = index2; + /* wDatagramIndex[0] */ + if ((index < opts->nth_size) || + (index > block_len - opts->dpe_size)) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index); + goto err; + } + dg_len = dg_len2; - if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ + /* + * wDatagramLength[0] + * ethernet hdr + crc or larger than max frame size + */ + if ((dg_len < 14 + crc_len) || + (dg_len > frame_max)) { INFO(port->func.config->cdev, "Bad dgram length: %#X\n", dg_len); goto err; @@ -1283,6 +1310,13 @@ static int ncm_unwrap_ntb(struct gether *port, index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + /* wDatagramIndex[1] */ + if (index2 > block_len - opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + /* * Copy the data into a new skb. * This ensures the truesize is correct @@ -1299,7 +1333,6 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; - if (index2 == 0 || dg_len2 == 0) break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); @@ -1503,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) fs_ncm_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, - ncm_ss_function, NULL); + ncm_ss_function, ncm_ss_function); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 9c7ed2539ff7..0f47cd398d60 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/ctype.h> #include <linux/cdev.h> +#include <linux/kref.h> #include <asm/byteorder.h> #include <linux/io.h> @@ -64,7 +65,7 @@ struct printer_dev { struct usb_gadget *gadget; s8 interface; struct usb_ep *in_ep, *out_ep; - + struct kref kref; struct list_head rx_reqs; /* List of free RX structs */ struct list_head rx_reqs_active; /* List of Active RX xfers */ struct list_head rx_buffers; /* List of completed xfers */ @@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget, /*-------------------------------------------------------------------------*/ +static void printer_dev_free(struct kref *kref) +{ + struct printer_dev *dev = container_of(kref, struct printer_dev, kref); + + kfree(dev); +} + static struct usb_request * printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags) { @@ -348,6 +356,7 @@ printer_open(struct inode *inode, struct file *fd) spin_unlock_irqrestore(&dev->lock, flags); + kref_get(&dev->kref); DBG(dev, "printer_open returned %x\n", ret); return ret; } @@ -365,6 +374,7 @@ printer_close(struct inode *inode, struct file *fd) dev->printer_status &= ~PRINTER_SELECTED; spin_unlock_irqrestore(&dev->lock, flags); + kref_put(&dev->kref, printer_dev_free); DBG(dev, "printer_close\n"); return 0; @@ -1116,6 +1126,7 @@ fail_tx_reqs: printer_req_free(dev->in_ep, req); } + usb_free_all_descriptors(f); return ret; } @@ -1350,7 +1361,8 @@ static void gprinter_free(struct usb_function *f) struct f_printer_opts *opts; opts = container_of(f->fi, struct f_printer_opts, func_inst); - kfree(dev); + + kref_put(&dev->kref, printer_dev_free); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1419,6 +1431,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) return ERR_PTR(-ENOMEM); } + kref_init(&dev->kref); ++opts->refcnt; dev->minor = opts->minor; dev->pnp_string = opts->pnp_string; diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index 0d8e4a364ca6..cc1ff5b7b60c 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -87,8 +87,10 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f) /* peak (theoretical) bulk transfer rate in bits-per-second */ static unsigned int bitrate(struct usb_gadget *g) { + if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) + return 4250000000U; if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) - return 13 * 1024 * 8 * 1000 * 8; + return 3750000000U; else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 7f01f78b1d23..f6d203fec495 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) goto err_sts; return 0; + err_sts: - usb_ep_free_request(fu->ep_status, stream->req_status); - stream->req_status = NULL; -err_out: usb_ep_free_request(fu->ep_out, stream->req_out); stream->req_out = NULL; +err_out: + usb_ep_free_request(fu->ep_in, stream->req_in); + stream->req_in = NULL; out: return -ENOMEM; } diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 00d346965f7a..e65f474ad7b3 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -19,6 +19,9 @@ #include "u_audio.h" #include "u_uac1.h" +/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */ +#define UAC1_CHANNEL_MASK 0x0FFF + struct f_uac1 { struct g_audio g_audio; u8 ac_intf, as_in_intf, as_out_intf; @@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f) return container_of(f, struct f_uac1, g_audio.func); } +static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio) +{ + return container_of(audio->func.fi, struct f_uac1_opts, func_inst); +} + /* * DESCRIPTORS ... most are static, but strings and full * configuration descriptors are built on demand. @@ -499,16 +507,48 @@ static void f_audio_disable(struct usb_function *f) uac1->as_out_alt = 0; uac1->as_in_alt = 0; + u_audio_stop_playback(&uac1->g_audio); u_audio_stop_capture(&uac1->g_audio); } /*-------------------------------------------------------------------------*/ +static int f_audio_validate_opts(struct g_audio *audio, struct device *dev) +{ + struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio); + + if (!opts->p_chmask && !opts->c_chmask) { + dev_err(dev, "Error: no playback and capture channels\n"); + return -EINVAL; + } else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) { + dev_err(dev, "Error: unsupported playback channels mask\n"); + return -EINVAL; + } else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) { + dev_err(dev, "Error: unsupported capture channels mask\n"); + return -EINVAL; + } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) { + dev_err(dev, "Error: incorrect playback sample size\n"); + return -EINVAL; + } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) { + dev_err(dev, "Error: incorrect capture sample size\n"); + return -EINVAL; + } else if (!opts->p_srate) { + dev_err(dev, "Error: incorrect playback sampling rate\n"); + return -EINVAL; + } else if (!opts->c_srate) { + dev_err(dev, "Error: incorrect capture sampling rate\n"); + return -EINVAL; + } + + return 0; +} + /* audio function driver setup/binding */ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct usb_gadget *gadget = cdev->gadget; + struct device *dev = &gadget->dev; struct f_uac1 *uac1 = func_to_uac1(f); struct g_audio *audio = func_to_g_audio(f); struct f_uac1_opts *audio_opts; @@ -518,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f) int rate; int status; + status = f_audio_validate_opts(audio, dev); + if (status) + return status; + audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst); us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1)); diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c index 6677ae932de0..06ee6e901808 100644 --- a/drivers/usb/gadget/function/f_uac1_legacy.c +++ b/drivers/usb/gadget/function/f_uac1_legacy.c @@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) /* Copy buffer is full, add it to the play_queue */ if (audio_buf_size - copy_buf->actual < req->actual) { + spin_lock_irq(&audio->lock); list_add_tail(©_buf->list, &audio->play_queue); + spin_unlock_irq(&audio->lock); schedule_work(&audio->playback_work); copy_buf = f_audio_buffer_alloc(audio_buf_size); if (IS_ERR(copy_buf)) diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index db2d4980cb35..dd960cea642f 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -14,6 +14,9 @@ #include "u_audio.h" #include "u_uac2.h" +/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */ +#define UAC2_CHANNEL_MASK 0x07FFFFFF + /* * The driver implements a simple UAC_2 topology. * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture @@ -215,10 +218,7 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = { .bDescriptorSubtype = UAC_MS_HEADER, .bcdADC = cpu_to_le16(0x200), .bCategory = UAC2_FUNCTION_IO_BOX, - .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc - + sizeof out_clk_src_desc + sizeof usb_out_it_desc - + sizeof io_in_it_desc + sizeof usb_in_ot_desc - + sizeof io_out_ot_desc), + /* .wTotalLength = DYNAMIC */ .bmControls = 0, }; @@ -274,7 +274,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = { .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1023), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 1, }; @@ -283,7 +283,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1024), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 4, }; @@ -351,7 +351,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = { .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1023), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 1, }; @@ -360,7 +360,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - .wMaxPacketSize = cpu_to_le16(1024), + /* .wMaxPacketSize = DYNAMIC */ .bInterval = 4, }; @@ -447,12 +447,28 @@ struct cntrl_range_lay3 { __le32 dRES; } __packed; -static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, +static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, struct usb_endpoint_descriptor *ep_desc, - unsigned int factor, bool is_playback) + enum usb_device_speed speed, bool is_playback) { int chmask, srate, ssize; - u16 max_packet_size; + u16 max_size_bw, max_size_ep; + unsigned int factor; + + switch (speed) { + case USB_SPEED_FULL: + max_size_ep = 1023; + factor = 1000; + break; + + case USB_SPEED_HIGH: + max_size_ep = 1024; + factor = 8000; + break; + + default: + return -EINVAL; + } if (is_playback) { chmask = uac2_opts->p_chmask; @@ -464,10 +480,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, ssize = uac2_opts->c_ssize; } - max_packet_size = num_channels(chmask) * ssize * - DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); - ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size, - le16_to_cpu(ep_desc->wMaxPacketSize))); + max_size_bw = num_channels(chmask) * ssize * + ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1); + ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw, + max_size_ep)); + + return 0; } /* Use macro to overcome line length limitation */ @@ -501,7 +519,7 @@ static void setup_descriptor(struct f_uac2_opts *opts) as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID; iad_desc.bInterfaceCount = 1; - ac_hdr_desc.wTotalLength = 0; + ac_hdr_desc.wTotalLength = cpu_to_le16(sizeof(ac_hdr_desc)); if (EPIN_EN(opts)) { u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength); @@ -589,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts) hs_audio_desc[i] = NULL; } +static int afunc_validate_opts(struct g_audio *agdev, struct device *dev) +{ + struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev); + + if (!opts->p_chmask && !opts->c_chmask) { + dev_err(dev, "Error: no playback and capture channels\n"); + return -EINVAL; + } else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) { + dev_err(dev, "Error: unsupported playback channels mask\n"); + return -EINVAL; + } else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) { + dev_err(dev, "Error: unsupported capture channels mask\n"); + return -EINVAL; + } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) { + dev_err(dev, "Error: incorrect playback sample size\n"); + return -EINVAL; + } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) { + dev_err(dev, "Error: incorrect capture sample size\n"); + return -EINVAL; + } else if (!opts->p_srate) { + dev_err(dev, "Error: incorrect playback sampling rate\n"); + return -EINVAL; + } else if (!opts->c_srate) { + dev_err(dev, "Error: incorrect capture sampling rate\n"); + return -EINVAL; + } + + return 0; +} + static int afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) { @@ -597,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) struct usb_composite_dev *cdev = cfg->cdev; struct usb_gadget *gadget = cdev->gadget; struct device *dev = &gadget->dev; - struct f_uac2_opts *uac2_opts; + struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev); struct usb_string *us; int ret; - uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst); + ret = afunc_validate_opts(agdev, dev); + if (ret) + return ret; us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn)); if (IS_ERR(us)) @@ -673,10 +723,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) } /* Calculate wMaxPacketSize according to audio bandwidth */ - set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true); - set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false); - set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true); - set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false); + ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL, + true); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + + ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL, + false); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + + ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH, + true); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + + ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH, + false); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } if (EPOUT_EN(uac2_opts)) { agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index fb0a892687c0..c03b67aab1a8 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) uvc_hs_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11)); - uvc_hs_streaming_ep.bInterval = opts->streaming_interval; + + /* A high-bandwidth endpoint must specify a bInterval value of 1 */ + if (max_packet_mult > 1) + uvc_hs_streaming_ep.bInterval = 1; + else + uvc_hs_streaming_ep.bInterval = opts->streaming_interval; uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size); uvc_ss_streaming_ep.bInterval = opts->streaming_interval; @@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void) pd->bmControls[0] = 1; pd->bmControls[1] = 0; pd->iProcessing = 0; + pd->bmVideoStandards = 0; od = &opts->uvc_output_terminal; od->bLength = UVC_DT_OUTPUT_TERMINAL_SIZE; diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 56906d15fb55..223029fa8445 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -89,7 +89,12 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) struct snd_uac_chip *uac = prm->uac; /* i/f shutting down */ - if (!prm->ep_enabled || req->status == -ESHUTDOWN) + if (!prm->ep_enabled) { + usb_ep_free_request(ep, req); + return; + } + + if (req->status == -ESHUTDOWN) return; /* @@ -351,8 +356,14 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep) for (i = 0; i < params->req_number; i++) { if (prm->ureq[i].req) { - usb_ep_dequeue(ep, prm->ureq[i].req); - usb_ep_free_request(ep, prm->ureq[i].req); + if (usb_ep_dequeue(ep, prm->ureq[i].req)) + usb_ep_free_request(ep, prm->ureq[i].req); + /* + * If usb_ep_dequeue() cannot successfully dequeue the + * request, the request will be freed by the completion + * callback. + */ + prm->ureq[i].req = NULL; } } diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index fbe96ef1ac7a..99b840daf3d9 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -45,9 +45,10 @@ #define UETH__VERSION "29-May-2008" /* Experiments show that both Linux and Windows hosts allow up to 16k - * frame sizes. Set the max size to 15k+52 to prevent allocating 32k + * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k * blocks and still have efficient handling. */ -#define GETHER_MAX_ETH_FRAME_LEN 15412 +#define GETHER_MAX_MTU_SIZE 15412 +#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN) struct eth_dev { /* lock is held while accessing port_usb @@ -93,7 +94,7 @@ struct eth_dev { static inline int qlen(struct usb_gadget *gadget, unsigned qmult) { if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || - gadget->speed == USB_SPEED_SUPER)) + gadget->speed >= USB_SPEED_SUPER)) return qmult * DEFAULT_QLEN; else return DEFAULT_QLEN; @@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, /* MTU range: 14 - 15412 */ net->min_mtu = ETH_HLEN; - net->max_mtu = GETHER_MAX_ETH_FRAME_LEN; + net->max_mtu = GETHER_MAX_MTU_SIZE; dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); @@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname) /* MTU range: 14 - 15412 */ net->min_mtu = ETH_HLEN; - net->max_mtu = GETHER_MAX_ETH_FRAME_LEN; + net->max_mtu = GETHER_MAX_MTU_SIZE; return net; } diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h index d8b92485b727..5b1d4771d44f 100644 --- a/drivers/usb/gadget/function/u_ether_configfs.h +++ b/drivers/usb/gadget/function/u_ether_configfs.h @@ -169,12 +169,11 @@ out: \ size_t len) \ { \ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ - int ret; \ + int ret = -EINVAL; \ u8 val; \ \ mutex_lock(&opts->lock); \ - ret = sscanf(page, "%02hhx", &val); \ - if (ret > 0) { \ + if (sscanf(page, "%02hhx", &val) > 0) { \ opts->_n_ = val; \ ret = len; \ } \ diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c index af16672d5118..6680dcfe660e 100644 --- a/drivers/usb/gadget/legacy/acm_ms.c +++ b/drivers/usb/gadget/legacy/acm_ms.c @@ -203,8 +203,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail_string_ids; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c index dd81fd538cb8..a748ed0842e8 100644 --- a/drivers/usb/gadget/legacy/audio.c +++ b/drivers/usb/gadget/legacy/audio.c @@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(cdev->gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(cdev->gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c index 8d7a556ece30..563363aba48f 100644 --- a/drivers/usb/gadget/legacy/cdc2.c +++ b/drivers/usb/gadget/legacy/cdc2.c @@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail1; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c index 30313b233680..99c7fc0d1d59 100644 --- a/drivers/usb/gadget/legacy/ether.c +++ b/drivers/usb/gadget/legacy/ether.c @@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail1; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index b47938dff1a2..cabcbb47f0ac 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1361,7 +1361,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) req->buf = dev->rbuf; req->context = NULL; - value = -EOPNOTSUPP; switch (ctrl->bRequest) { case USB_REQ_GET_DESCRIPTOR: @@ -1784,7 +1783,7 @@ static ssize_t dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) { struct dev_data *dev = fd->private_data; - ssize_t value = len, length = len; + ssize_t value, length = len; unsigned total; u32 tag; char *kbuf; @@ -2041,6 +2040,9 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc) return 0; Enomem: + kfree(CHIP); + CHIP = NULL; + return -ENOMEM; } diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c index c61e71ba7045..0f1b45e3abd1 100644 --- a/drivers/usb/gadget/legacy/ncm.c +++ b/drivers/usb/gadget/legacy/ncm.c @@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c index a9f8eb8e1c76..2c9eab2b863d 100644 --- a/drivers/usb/gadget/legacy/webcam.c +++ b/drivers/usb/gadget/legacy/webcam.c @@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = { .bmControls[0] = 1, .bmControls[1] = 0, .iProcessing = 0, + .bmVideoStandards = 0, }; static const struct uvc_output_terminal_descriptor uvc_output_terminal = { diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index eaa13fd3dc7f..e313c3b8dcb1 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -14,6 +14,7 @@ #define __U_F_H__ #include <linux/usb/gadget.h> +#include <linux/overflow.h> /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 @@ -21,21 +22,36 @@ #define vla_item(groupname, type, name, n) \ size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = (n) * sizeof(type); \ - groupname##__next = offset + size; \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + size_t size = array_size(n, sizeof(type)); \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, size, \ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ offset; \ }) #define vla_item_with_sz(groupname, type, name, n) \ - size_t groupname##_##name##__sz = (n) * sizeof(type); \ - size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = groupname##_##name##__sz; \ - groupname##__next = offset + size; \ - offset; \ + size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \ + size_t groupname##_##name##__offset = ({ \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, groupname##_##name##__sz,\ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ + offset; \ }) #define vla_ptr(ptr, groupname, name) \ diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index 57b6f66331cf..362284057d30 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -154,6 +154,11 @@ static int udc_pci_probe( pci_set_master(pdev); pci_try_set_mwi(pdev); + dev->phys_addr = resource; + dev->irq = pdev->irq; + dev->pdev = pdev; + dev->dev = &pdev->dev; + /* init dma pools */ if (use_dma) { retval = init_dma_pools(dev); @@ -161,11 +166,6 @@ static int udc_pci_probe( goto err_dma; } - dev->phys_addr = resource; - dev->irq = pdev->irq; - dev->pdev = pdev; - dev->dev = &pdev->dev; - /* general probing */ if (udc_probe(dev)) { retval = -ENODEV; diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c index 90b134d5dca9..c1bfbfd9491d 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c @@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, int status) { bool internal = req->internal; + struct ast_vhub *vhub = ep->vhub; EPVDBG(ep, "completing request @%p, status %d\n", req, status); @@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, if (req->req.dma) { if (!WARN_ON(!ep->dev)) - usb_gadget_unmap_request(&ep->dev->gadget, + usb_gadget_unmap_request_by_dev(&vhub->pdev->dev, &req->req, ep->epn.is_in); req->req.dma = 0; } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c index 7475c74aa5c5..2cd406e8dd99 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c @@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req, if (ep->epn.desc_mode || ((((unsigned long)u_req->buf & 7) == 0) && (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) { - rc = usb_gadget_map_request(&ep->dev->gadget, u_req, + rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req, ep->epn.is_in); if (rc) { dev_warn(&vhub->pdev->dev, @@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, u32 state, reg, loops; /* Stop DMA activity */ - writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); + if (ep->epn.desc_mode) + writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); + else + writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); /* Wait for it to complete */ for (loops = 0; loops < 1000; loops++) { diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 1d0d8952a74b..bebe814f55e6 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -870,7 +870,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) u32 status; DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", - ep->ep.name, req); + ep->ep.name, _req); spin_lock_irqsave(&udc->lock, flags); @@ -1950,10 +1950,10 @@ static irqreturn_t usba_vbus_irq_thread(int irq, void *devid) usba_start(udc); } else { udc->suspended = false; - usba_stop(udc); - if (udc->driver->disconnect) udc->driver->disconnect(&udc->gadget); + + usba_stop(udc); } udc->vbus_prev = vbus; } diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig index 3e88c7670b2e..fb01ff47b64c 100644 --- a/drivers/usb/gadget/udc/bdc/Kconfig +++ b/drivers/usb/gadget/udc/bdc/Kconfig @@ -17,7 +17,7 @@ if USB_BDC_UDC comment "Platform Support" config USB_BDC_PCI tristate "BDC support for PCIe based platforms" - depends on USB_PCI + depends on USB_PCI && BROKEN default USB_BDC_UDC help Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform. diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index cc4a16e253ac..3d33499db50b 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -282,6 +282,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) * in that case reinit is passed as 1 */ if (reinit) { + int i; /* Enable interrupts */ temp = bdc_readl(bdc->regs, BDC_BDCSC); temp |= BDC_GIE; @@ -291,6 +292,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) /* Initialize SRR to 0 */ memset(bdc->srr.sr_bds, 0, NUM_SR_ENTRIES * sizeof(struct bdc_bd)); + /* clear ep flags to avoid post disconnect stops/deconfigs */ + for (i = 1; i < bdc->num_eps; ++i) + bdc->bdc_ep_array[i]->flags = 0; } else { /* One time initiaization only */ /* Enable status report function pointers */ @@ -601,9 +605,14 @@ static int bdc_remove(struct platform_device *pdev) static int bdc_suspend(struct device *dev) { struct bdc *bdc = dev_get_drvdata(dev); + int ret; - clk_disable_unprepare(bdc->clk); - return 0; + /* Halt the controller */ + ret = bdc_stop(bdc); + if (!ret) + clk_disable_unprepare(bdc->clk); + + return ret; } static int bdc_resume(struct device *dev) diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index a4d9b5e1e50e..9ddc0b4e92c9 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req, { struct bdc *bdc = ep->bdc; - if (req == NULL || &req->queue == NULL || &req->usb_req == NULL) + if (req == NULL) return; dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status); @@ -615,7 +615,6 @@ int bdc_ep_enable(struct bdc_ep *ep) } bdc_dbg_bd_list(bdc, ep); /* only for ep0: config ep is called for ep0 from connect event */ - ep->flags |= BDC_EP_ENABLED; if (ep->ep_num == 1) return ret; @@ -759,10 +758,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) __func__, ep->name, start_bdi, end_bdi); dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n", ep, (void *)ep->usb_ep.desc); - /* Stop the ep to see where the HW is ? */ - ret = bdc_stop_ep(bdc, ep->ep_num); - /* if there is an issue with stopping ep, then no need to go further */ - if (ret) + /* if still connected, stop the ep to see where the HW is ? */ + if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) { + ret = bdc_stop_ep(bdc, ep->ep_num); + /* if there is an issue, then no need to go further */ + if (ret) + return 0; + } else return 0; /* @@ -1911,7 +1913,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep) __func__, ep->name, ep->flags); if (!(ep->flags & BDC_EP_ENABLED)) { - dev_warn(bdc->dev, "%s is already disabled\n", ep->name); + if (bdc->gadget.speed != USB_SPEED_UNKNOWN) + dev_warn(bdc->dev, "%s is already disabled\n", + ep->name); return 0; } spin_lock_irqsave(&bdc->lock, flags); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 51fa614b4079..e41f67cd3d46 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1297,6 +1297,8 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); usb_gadget_disconnect(udc->gadget); + if (udc->gadget->irq) + synchronize_irq(udc->gadget->irq); udc->driver->unbind(udc->gadget); usb_gadget_udc_stop(udc); @@ -1475,10 +1477,13 @@ static ssize_t soft_connect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); + ssize_t ret; + mutex_lock(&udc_lock); if (!udc->driver) { dev_err(dev, "soft-connect without a gadget driver\n"); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto out; } if (sysfs_streq(buf, "connect")) { @@ -1489,10 +1494,14 @@ static ssize_t soft_connect_store(struct device *dev, usb_gadget_udc_stop(udc); } else { dev_err(dev, "unsupported command '%s'\n", buf); - return -EINVAL; + ret = -EINVAL; + goto out; } - return n; + ret = n; +out: + mutex_unlock(&udc_lock); + return ret; } static DEVICE_ATTR_WO(soft_connect); diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 4c9d1e49d5ed..58261ec1300a 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -900,6 +900,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value) spin_lock_irqsave(&dum->lock, flags); dum->pullup = (value != 0); set_link_state(dum_hcd); + if (value == 0) { + /* + * Emulate synchronize_irq(): wait for callbacks to finish. + * This seems to be the best place to emulate the call to + * synchronize_irq() that's in usb_gadget_remove_driver(). + * Doing it in dummy_udc_stop() would be too late since it + * is called after the unbind callback and unbind shouldn't + * be invoked until all the other callbacks are finished. + */ + while (dum->callback_usage > 0) { + spin_unlock_irqrestore(&dum->lock, flags); + usleep_range(1000, 2000); + spin_lock_irqsave(&dum->lock, flags); + } + } spin_unlock_irqrestore(&dum->lock, flags); usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd)); @@ -1001,14 +1016,6 @@ static int dummy_udc_stop(struct usb_gadget *g) spin_lock_irq(&dum->lock); dum->ints_enabled = 0; stop_activity(dum); - - /* emulate synchronize_irq(): wait for callbacks to finish */ - while (dum->callback_usage > 0) { - spin_unlock_irq(&dum->lock); - usleep_range(1000, 2000); - spin_lock_irq(&dum->lock); - } - dum->driver = NULL; spin_unlock_irq(&dum->lock); @@ -2733,7 +2740,7 @@ static int __init init(void) { int retval = -ENOMEM; int i; - struct dummy *dum[MAX_NUM_UDC]; + struct dummy *dum[MAX_NUM_UDC] = {}; if (usb_disabled()) return -ENODEV; diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 21f3e6c4e4d6..c313d07ec16f 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep, } else { buffer = req->req.buf + req->req.actual; length = ioread32(ep->fotg210->reg + - FOTG210_FIBCR(ep->epnum - 1)); - length &= FIBCR_BCFX; + FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX; + if (length > req->req.length - req->req.actual) + length = req->req.length - req->req.actual; } } else { buffer = req->req.buf + req->req.actual; if (req->req.length - req->req.actual > ep->ep.maxpacket) length = ep->ep.maxpacket; else - length = req->req.length; + length = req->req.length - req->req.actual; } d = dma_map_single(dev, buffer, length, @@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep, } if (ep->dir_in) { /* if IN */ fotg210_start_dma(ep, req); - if ((req->req.length == req->req.actual) || - (req->req.actual < ep->ep.maxpacket)) + if (req->req.length == req->req.actual) fotg210_done(ep, req, 0); } else { /* OUT */ u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0); @@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210) if (req->req.length) fotg210_start_dma(ep, req); - if ((req->req.length - req->req.actual) < ep->ep.maxpacket) + if (req->req.actual == req->req.length) fotg210_done(ep, req, 0); } else { fotg210_set_cxdone(fotg210); @@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep) { struct fotg210_request *req = list_entry(ep->queue.next, struct fotg210_request, queue); + int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1); fotg210_start_dma(ep, req); - /* finish out transfer */ + /* Complete the request when it's full or a short packet arrived. + * Like other drivers, short_not_ok isn't handled. + */ + if (req->req.length == req->req.actual || - req->req.actual < ep->ep.maxpacket) + (disgr1 & DISGR1_SPK_INT(ep->epnum - 1))) fotg210_done(ep, req, 0); } @@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210) value &= ~DMCR_GLINT_EN; iowrite32(value, fotg210->reg + FOTG210_DMCR); + /* enable only grp2 irqs we handle */ + iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT + | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT + | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT), + fotg210->reg + FOTG210_DMISGR2); + /* disable all fifo interrupt */ iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1); diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c index c3721225b61e..b706ad3034bc 100644 --- a/drivers/usb/gadget/udc/goku_udc.c +++ b/drivers/usb/gadget/udc/goku_udc.c @@ -1757,6 +1757,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err; } + pci_set_drvdata(pdev, dev); spin_lock_init(&dev->lock); dev->pdev = pdev; dev->gadget.ops = &goku_ops; @@ -1790,7 +1791,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) } dev->regs = (struct goku_udc_regs __iomem *) base; - pci_set_drvdata(pdev, dev); INFO(dev, "%s\n", driver_desc); INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr()); INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 116d386472ef..da73a06c20a3 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) if (num == 0) { _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); + if (!_req) + return -ENOMEM; + buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); - if (!_req || !buf) { - /* possible _req freed by gr_probe via gr_remove */ + if (!buf) { + gr_free_request(&ep->ep, _req); return -ENOMEM; } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index bf6c81e2f8cc..6d2f1f98f13d 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -1614,17 +1614,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); - struct lpc32xx_udc *udc = ep->udc; + struct lpc32xx_udc *udc; u16 maxpacket; u32 tmp; unsigned long flags; /* Verify EP data */ if ((!_ep) || (!ep) || (!desc) || - (desc->bDescriptorType != USB_DT_ENDPOINT)) { - dev_dbg(udc->dev, "bad ep or descriptor\n"); + (desc->bDescriptorType != USB_DT_ENDPOINT)) return -EINVAL; - } + + udc = ep->udc; maxpacket = usb_endpoint_maxp(desc); if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); @@ -1872,7 +1872,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); - struct lpc32xx_udc *udc = ep->udc; + struct lpc32xx_udc *udc; unsigned long flags; if ((!ep) || (ep->hwep_num <= 1)) @@ -1882,6 +1882,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) if (ep->is_in) return -EAGAIN; + udc = ep->udc; spin_lock_irqsave(&udc->lock, flags); if (value == 1) { diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c index a8288df6aadf..ea59b56e5402 100644 --- a/drivers/usb/gadget/udc/m66592-udc.c +++ b/drivers/usb/gadget/udc/m66592-udc.c @@ -1667,7 +1667,7 @@ static int m66592_probe(struct platform_device *pdev) err_add_udc: m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); - + m66592->ep0_req = NULL; clean_up3: if (m66592->pdata->on_chip) { clk_disable(m66592->clk); diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index cafde053788b..80a1b52c656e 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev) return 0; err_create_workqueue: - destroy_workqueue(udc->qwork); + if (udc->qwork) + destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 247de0faaeb7..5980540a8fff 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev) err_req: release_mem_region(base, len); err: + kfree(dev); + return ret; } diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 51efee21915f..7c616d7641c6 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -3782,8 +3782,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; done: - if (dev) + if (dev) { net2280_remove(pdev); + kfree(dev); + } return retval; } diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index 3344fb8c4181..da8aeecc59b2 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -600,18 +600,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev) static inline void pch_udc_vbus_session(struct pch_udc_dev *dev, int is_active) { + unsigned long iflags; + + spin_lock_irqsave(&dev->lock, iflags); if (is_active) { pch_udc_reconnect(dev); dev->vbus_session = 1; } else { if (dev->driver && dev->driver->disconnect) { - spin_lock(&dev->lock); + spin_unlock_irqrestore(&dev->lock, iflags); dev->driver->disconnect(&dev->gadget); - spin_unlock(&dev->lock); + spin_lock_irqsave(&dev->lock, iflags); } pch_udc_set_disconnect(dev); dev->vbus_session = 0; } + spin_unlock_irqrestore(&dev->lock, iflags); } /** @@ -1168,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value) static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on) { struct pch_udc_dev *dev; + unsigned long iflags; if (!gadget) return -EINVAL; + dev = container_of(gadget, struct pch_udc_dev, gadget); + + spin_lock_irqsave(&dev->lock, iflags); if (is_on) { pch_udc_reconnect(dev); } else { if (dev->driver && dev->driver->disconnect) { - spin_lock(&dev->lock); + spin_unlock_irqrestore(&dev->lock, iflags); dev->driver->disconnect(&dev->gadget); - spin_unlock(&dev->lock); + spin_lock_irqsave(&dev->lock, iflags); } pch_udc_set_disconnect(dev); } + spin_unlock_irqrestore(&dev->lock, iflags); return 0; } @@ -1773,7 +1782,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep, } /* prevent from using desc. - set HOST BUSY */ dma_desc->status |= PCH_UDC_BS_HST_BSY; - dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID); + dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID); req->td_data = dma_desc; req->td_data_last = dma_desc; req->chain_len = 1; @@ -2316,6 +2325,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num) pch_udc_set_dma(dev, DMA_DIR_RX); } +static int pch_udc_gadget_setup(struct pch_udc_dev *dev) + __must_hold(&dev->lock) +{ + int rc; + + /* In some cases we can get an interrupt before driver gets setup */ + if (!dev->driver) + return -ESHUTDOWN; + + spin_unlock(&dev->lock); + rc = dev->driver->setup(&dev->gadget, &dev->setup_data); + spin_lock(&dev->lock); + return rc; +} + /** * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts * @dev: Reference to the device structure @@ -2387,15 +2411,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev) dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep; else /* OUT */ dev->gadget.ep0 = &ep->ep; - spin_lock(&dev->lock); /* If Mass storage Reset */ if ((dev->setup_data.bRequestType == 0x21) && (dev->setup_data.bRequest == 0xFF)) dev->prot_stall = 0; /* call gadget with setup data received */ - setup_supported = dev->driver->setup(&dev->gadget, - &dev->setup_data); - spin_unlock(&dev->lock); + setup_supported = pch_udc_gadget_setup(dev); if (dev->setup_data.bRequestType & USB_DIR_IN) { ep->td_data->status = (ep->td_data->status & @@ -2643,9 +2664,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev) dev->ep[i].halted = 0; } dev->stall = 0; - spin_unlock(&dev->lock); - dev->driver->setup(&dev->gadget, &dev->setup_data); - spin_lock(&dev->lock); + pch_udc_gadget_setup(dev); } /** @@ -2680,9 +2699,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev) dev->stall = 0; /* call gadget zero with setup data received */ - spin_unlock(&dev->lock); - dev->driver->setup(&dev->gadget, &dev->setup_data); - spin_lock(&dev->lock); + pch_udc_gadget_setup(dev); } /** @@ -2956,7 +2973,7 @@ static int init_dma_pools(struct pch_udc_dev *dev) dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf, UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE); - return 0; + return dma_mapping_error(&dev->pdev->dev, dev->dma_addr); } static int pch_udc_start(struct usb_gadget *g, diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c index 11e25a3f4f1f..a766476fd742 100644 --- a/drivers/usb/gadget/udc/r8a66597-udc.c +++ b/drivers/usb/gadget/udc/r8a66597-udc.c @@ -1852,6 +1852,8 @@ static int r8a66597_probe(struct platform_device *pdev) return PTR_ERR(reg); ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) + return -EINVAL; irq = ires->start; irq_trigger = ires->flags & IRQF_TRIGGER_MASK; diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index f82208fbc249..5dcc0692b95c 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -251,10 +251,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep, static void s3c2410_udc_nuke(struct s3c2410_udc *udc, struct s3c2410_ep *ep, int status) { - /* Sanity check */ - if (&ep->queue == NULL) - return; - while (!list_empty(&ep->queue)) { struct s3c2410_request *req; req = list_entry(ep->queue.next, struct s3c2410_request, diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c index 32f1d3e90c26..99805d60a7ab 100644 --- a/drivers/usb/gadget/udc/snps_udc_plat.c +++ b/drivers/usb/gadget/udc/snps_udc_plat.c @@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); udc->virt_addr = devm_ioremap_resource(dev, res); - if (IS_ERR(udc->regs)) - return PTR_ERR(udc->regs); + if (IS_ERR(udc->virt_addr)) + return PTR_ERR(udc->virt_addr); /* udc csr registers base */ udc->csr = udc->virt_addr + UDC_CSR_ADDR; diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 7c24d1ce1088..33f77e59aa6f 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -55,9 +55,9 @@ usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf) return -EINVAL; /* string descriptors have length, tag, then UTF16-LE text */ - len = min ((size_t) 126, strlen (s->s)); + len = min((size_t)USB_MAX_STRING_LEN, strlen(s->s)); len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN, - (wchar_t *) &buf[2], 126); + (wchar_t *) &buf[2], USB_MAX_STRING_LEN); if (len < 0) return -EINVAL; buf [0] = (len + 1) * 2; diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index 01debfd03d4a..84d59a611511 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index cf2b7ae93b7e..5c6ce1ef3f4b 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -22,6 +22,7 @@ #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/usb/hcd.h> +#include <linux/usb/otg.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> @@ -573,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd) struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp; u32 hcc_params; + int rc; hcd->uses_new_polling = 1; @@ -628,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd) down_write(&ehci_cf_port_reset_rwsem); ehci->rh_state = EHCI_RH_RUNNING; ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + + /* Wait until HC become operational */ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ msleep(5); + rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000); + up_write(&ehci_cf_port_reset_rwsem); + + if (rc) { + ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n", + ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc); + return rc; + } + ehci->last_periodic_enable = ktime_get_real(); temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index ce0eaf7d7c12..9f9ab5ccea88 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -14,7 +14,6 @@ */ /*-------------------------------------------------------------------------*/ -#include <linux/usb/otg.h> #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) @@ -346,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) unlink_empty_async_suspended(ehci); + /* Some Synopsys controllers mistakenly leave IAA turned on */ + ehci_writel(ehci, STS_IAA, &ehci->regs->status); + /* Any IAA cycle that started before the suspend is now invalid */ end_iaa_cycle(ehci); ehci_handle_start_intr_unlinks(ehci); diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index 66ec1fdf9fe7..b6f196f5e252 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -156,12 +156,10 @@ static int mv_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(r); hcd->regs = ehci_mv->op_regs; - hcd->irq = platform_get_irq(pdev, 0); - if (!hcd->irq) { - dev_err(&pdev->dev, "Cannot get irq."); - retval = -ENODEV; + retval = platform_get_irq(pdev, 0); + if (retval < 0) goto err_disable_clk; - } + hcd->irq = retval; ehci = hcd_to_ehci(hcd); ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs; diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index c9f91e6c72b6..7f65c86047dd 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -50,6 +50,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); if (!hcd) diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index fc125b3d06e7..03122dc332ed 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -220,6 +220,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) err_pm_runtime: pm_runtime_put_sync(dev); + pm_runtime_disable(dev); err_phy: for (i = 0; i < omap->nports; i++) { diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index b0882c13a1d1..66713c253765 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; + case PCI_VENDOR_ID_HUAWEI: + /* Synopsys HC bug */ + if (pdev->device == 0xa239) { + ehci_info(ehci, "applying Synopsys HC workaround\n"); + ehci->has_synopsys_hc_bug = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 769749ca5961..e4fc3f66d43b 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -29,6 +29,8 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> +#include <linux/sys_soc.h> +#include <linux/timer.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/usb/ehci_pdriver.h> @@ -44,6 +46,9 @@ struct ehci_platform_priv { struct clk *clks[EHCI_MAX_CLKS]; struct reset_control *rsts; bool reset_on_resume; + bool quirk_poll; + struct timer_list poll_timer; + struct delayed_work poll_work; }; static const char hcd_name[] = "ehci-platform"; @@ -118,6 +123,111 @@ static struct usb_ehci_pdata ehci_platform_defaults = { .power_off = ehci_platform_power_off, }; +/** + * quirk_poll_check_port_status - Poll port_status if the device sticks + * @ehci: the ehci hcd pointer + * + * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting + * stuck very rarely after a full/low usb device was disconnected. To + * detect such a situation, the controllers require a special way which poll + * the EHCI PORTSC register. + * + * Return: true if the controller's port_status indicated getting stuck + */ +static bool quirk_poll_check_port_status(struct ehci_hcd *ehci) +{ + u32 port_status = ehci_readl(ehci, &ehci->regs->port_status[0]); + + if (!(port_status & PORT_OWNER) && + (port_status & PORT_POWER) && + !(port_status & PORT_CONNECT) && + (port_status & PORT_LS_MASK)) + return true; + + return false; +} + +/** + * quirk_poll_rebind_companion - rebind comanion device to recover + * @ehci: the ehci hcd pointer + * + * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting + * stuck very rarely after a full/low usb device was disconnected. To + * recover from such a situation, the controllers require changing the OHCI + * functional state. + */ +static void quirk_poll_rebind_companion(struct ehci_hcd *ehci) +{ + struct device *companion_dev; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + + companion_dev = usb_of_get_companion_dev(hcd->self.controller); + if (!companion_dev) + return; + + device_release_driver(companion_dev); + if (device_attach(companion_dev) < 0) + ehci_err(ehci, "%s: failed\n", __func__); + + put_device(companion_dev); +} + +static void quirk_poll_work(struct work_struct *work) +{ + struct ehci_platform_priv *priv = + container_of(to_delayed_work(work), struct ehci_platform_priv, + poll_work); + struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, + priv); + + /* check the status twice to reduce misdetection rate */ + if (!quirk_poll_check_port_status(ehci)) + return; + udelay(10); + if (!quirk_poll_check_port_status(ehci)) + return; + + ehci_dbg(ehci, "%s: detected getting stuck. rebind now!\n", __func__); + quirk_poll_rebind_companion(ehci); +} + +static void quirk_poll_timer(struct timer_list *t) +{ + struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer); + struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, + priv); + + if (quirk_poll_check_port_status(ehci)) { + /* + * Now scheduling the work for testing the port more. Note that + * updating the status is possible to be delayed when + * reconnection. So, this uses delayed work with 5 ms delay + * to avoid misdetection. + */ + schedule_delayed_work(&priv->poll_work, msecs_to_jiffies(5)); + } + + mod_timer(&priv->poll_timer, jiffies + HZ); +} + +static void quirk_poll_init(struct ehci_platform_priv *priv) +{ + INIT_DELAYED_WORK(&priv->poll_work, quirk_poll_work); + timer_setup(&priv->poll_timer, quirk_poll_timer, 0); + mod_timer(&priv->poll_timer, jiffies + HZ); +} + +static void quirk_poll_end(struct ehci_platform_priv *priv) +{ + del_timer_sync(&priv->poll_timer); + cancel_delayed_work(&priv->poll_work); +} + +static const struct soc_device_attribute quirk_poll_match[] = { + { .family = "R-Car Gen3" }, + { /* sentinel*/ } +}; + static int ehci_platform_probe(struct platform_device *dev) { struct usb_hcd *hcd; @@ -176,6 +286,9 @@ static int ehci_platform_probe(struct platform_device *dev) "has-transaction-translator")) hcd->has_tt = 1; + if (soc_device_match(quirk_poll_match)) + priv->quirk_poll = true; + for (clk = 0; clk < EHCI_MAX_CLKS; clk++) { priv->clks[clk] = of_clk_get(dev->dev.of_node, clk); if (IS_ERR(priv->clks[clk])) { @@ -247,6 +360,9 @@ static int ehci_platform_probe(struct platform_device *dev) device_enable_async_suspend(hcd->self.controller); platform_set_drvdata(dev, hcd); + if (priv->quirk_poll) + quirk_poll_init(priv); + return err; err_power: @@ -273,6 +389,9 @@ static int ehci_platform_remove(struct platform_device *dev) struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); int clk; + if (priv->quirk_poll) + quirk_poll_end(priv); + usb_remove_hcd(hcd); if (pdata->power_off) @@ -297,9 +416,13 @@ static int ehci_platform_suspend(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct usb_ehci_pdata *pdata = dev_get_platdata(dev); struct platform_device *pdev = to_platform_device(dev); + struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); bool do_wakeup = device_may_wakeup(dev); int ret; + if (priv->quirk_poll) + quirk_poll_end(priv); + ret = ehci_suspend(hcd, do_wakeup); if (ret) return ret; @@ -331,6 +454,10 @@ static int ehci_platform_resume(struct device *dev) } ehci_resume(hcd, priv->reset_on_resume); + + if (priv->quirk_poll) + quirk_poll_init(priv); + return 0; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index ae8f60f6e6a5..44a7e58a26e3 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -94,10 +94,13 @@ static struct platform_device *fsl_usb2_device_register( pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask; - if (!pdev->dev.dma_mask) + if (!pdev->dev.dma_mask) { pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask; - else - dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + } else { + retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) + goto error; + } retval = platform_device_add_data(pdev, pdata, sizeof(*pdata)); if (retval) diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 8819f502b6a6..903abdf30b5a 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1847,7 +1847,7 @@ max3421_probe(struct spi_device *spi) struct max3421_hcd *max3421_hcd; struct usb_hcd *hcd = NULL; struct max3421_hcd_platform_data *pdata = NULL; - int retval = -ENOMEM; + int retval; if (spi_setup(spi) < 0) { dev_err(&spi->dev, "Unable to setup SPI bus"); @@ -1889,6 +1889,7 @@ max3421_probe(struct spi_device *spi) goto error; } + retval = -ENOMEM; hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev, dev_name(&spi->dev)); if (!hcd) { diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index d5ce98e205c7..d8b6c9f5695c 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 4de91653a2c7..833f51282acc 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -102,7 +102,7 @@ static void io_watchdog_func(struct timer_list *t); /* Some boards misreport power switching/overcurrent */ -static bool distrust_firmware = true; +static bool distrust_firmware; module_param (distrust_firmware, bool, 0); MODULE_PARM_DESC (distrust_firmware, "true to distrust firmware power/overcurrent setup"); @@ -673,20 +673,24 @@ retry: /* handle root hub init quirks ... */ val = roothub_a (ohci); - val &= ~(RH_A_PSM | RH_A_OCPM); + /* Configure for per-port over-current protection by default */ + val &= ~RH_A_NOCP; + val |= RH_A_OCPM; if (ohci->flags & OHCI_QUIRK_SUPERIO) { - /* NSC 87560 and maybe others */ + /* NSC 87560 and maybe others. + * Ganged power switching, no over-current protection. + */ val |= RH_A_NOCP; - val &= ~(RH_A_POTPGT | RH_A_NPS); - ohci_writel (ohci, val, &ohci->regs->roothub.a); + val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM); } else if ((ohci->flags & OHCI_QUIRK_AMD756) || (ohci->flags & OHCI_QUIRK_HUB_POWER)) { /* hub power always on; required for AMD-756 and some - * Mac platforms. ganged overcurrent reporting, if any. + * Mac platforms. */ val |= RH_A_NPS; - ohci_writel (ohci, val, &ohci->regs->roothub.a); } + ohci_writel(ohci, val, &ohci->regs->roothub.a); + ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status); ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM, &ohci->regs->roothub.b); diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index c158cda9e4b9..b91d50da6127 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -157,9 +157,10 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev) * the call to usb_hcd_setup_local_mem() below does just that. */ - if (usb_hcd_setup_local_mem(hcd, mem->start, - mem->start - mem->parent->start, - resource_size(mem)) < 0) + retval = usb_hcd_setup_local_mem(hcd, mem->start, + mem->start - mem->parent->start, + resource_size(mem)); + if (retval < 0) goto err5; retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval) @@ -190,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev) struct resource *mem; usb_remove_hcd(hcd); + iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index e67242e437ed..65985247fc00 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -4149,8 +4149,10 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev, oxu->is_otg = otg; ret = usb_add_hcd(hcd, irq, IRQF_SHARED); - if (ret < 0) + if (ret < 0) { + usb_put_hcd(hcd); return ERR_PTR(ret); + } device_wakeup_enable(hcd->self.controller); return hcd; diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 76c3f29562d2..448d7b11dec4 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) static int xhci_endpoint_context_show(struct seq_file *s, void *unused) { - int dci; + int ep_index; dma_addr_t dma; struct xhci_hcd *xhci; struct xhci_ep_ctx *ep_ctx; @@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); - for (dci = 1; dci < 32; dci++) { - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); - dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); + for (ep_index = 0; ep_index < 31; ep_index++) { + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params); seq_printf(s, "%pad: %s\n", &dma, xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), le32_to_cpu(ep_ctx->ep_info2), diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c index 3c4abb5a1c3f..73aba464b66a 100644 --- a/drivers/usb/host/xhci-histb.c +++ b/drivers/usb/host/xhci-histb.c @@ -241,7 +241,7 @@ static int xhci_histb_probe(struct platform_device *pdev) /* Initialize dma_mask and coherent_dma_mask to 32-bits */ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) - return ret; + goto disable_pm; hcd = usb_create_hcd(driver, dev, dev_name(dev)); if (!hcd) { diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index af92b2576fe9..1a274f8a5bf1 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -738,15 +738,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, { u32 pls = status_reg & PORT_PLS_MASK; - /* resume state is a xHCI internal state. - * Do not report it to usb core, instead, pretend to be U3, - * thus usb core knows it's not ready for transfer - */ - if (pls == XDEV_RESUME) { - *status |= USB_SS_PORT_LS_U3; - return; - } - /* When the CAS bit is set then warm reset * should be performed on port */ @@ -768,6 +759,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, */ pls |= USB_PORT_STAT_CONNECTION; } else { + /* + * Resume state is an xHCI internal state. Do not report it to + * usb core, instead, pretend to be U3, thus usb core knows + * it's not ready for transfer. + */ + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; + return; + } + /* * If CAS bit isn't set but the Port is already at * Compliance Mode, fake a connection so the USB core @@ -1306,7 +1307,47 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, wIndex, link_state); goto error; } + + /* + * set link to U0, steps depend on current link state. + * U3: set link to U0 and wait for u3exit completion. + * U1/U2: no PLC complete event, only set link to U0. + * Resume/Recovery: device initiated U0, only wait for + * completion + */ + if (link_state == USB_SS_PORT_LS_U0) { + u32 pls = temp & PORT_PLS_MASK; + bool wait_u0 = false; + + /* already in U0 */ + if (pls == XDEV_U0) + break; + if (pls == XDEV_U3 || + pls == XDEV_RESUME || + pls == XDEV_RECOVERY) { + wait_u0 = true; + reinit_completion(&bus_state->u3exit_done[wIndex]); + } + if (pls <= XDEV_U3) /* U1, U2, U3 */ + xhci_set_link_state(xhci, ports[wIndex], + USB_SS_PORT_LS_U0); + if (!wait_u0) { + if (pls > XDEV_U3) + goto error; + break; + } + spin_unlock_irqrestore(&xhci->lock, flags); + if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], + msecs_to_jiffies(100))) + xhci_dbg(xhci, "missing U0 port change event for port %d\n", + wIndex); + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); + break; + } + if (link_state == USB_SS_PORT_LS_U3) { + int retries = 16; slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); if (slot_id) { @@ -1317,17 +1358,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); } - } - - xhci_set_link_state(xhci, ports[wIndex], link_state); - - spin_unlock_irqrestore(&xhci->lock, flags); - msleep(20); /* wait device to enter */ - spin_lock_irqsave(&xhci->lock, flags); - - temp = readl(ports[wIndex]->addr); - if (link_state == USB_SS_PORT_LS_U3) + xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3); + spin_unlock_irqrestore(&xhci->lock, flags); + while (retries--) { + usleep_range(4000, 8000); + temp = readl(ports[wIndex]->addr); + if ((temp & PORT_PLS_MASK) == XDEV_U3) + break; + } + spin_lock_irqsave(&xhci->lock, flags); + temp = readl(ports[wIndex]->addr); bus_state->suspended_ports |= 1 << wIndex; + } break; case USB_PORT_FEAT_POWER: /* @@ -1528,6 +1570,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) } if ((temp & PORT_RC)) reset_change = true; + if (temp & PORT_OC) + status = 1; } if (!status && !reset_change) { xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); @@ -1593,6 +1637,13 @@ retry: port_index); goto retry; } + /* bail out if port detected a over-current condition */ + if (t1 & PORT_OC) { + bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n"); + return -EBUSY; + } /* suspend ports in U0, or bail out for new connect changes */ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { if ((t1 & PORT_CSC) && wake_enabled) { @@ -1654,6 +1705,10 @@ retry: hcd->state = HC_STATE_SUSPENDED; bus_state->next_statechange = jiffies + msecs_to_jiffies(10); spin_unlock_irqrestore(&xhci->lock, flags); + + if (bus_state->bus_suspended) + usleep_range(5000, 10000); + return 0; } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 884c601bfa15..7f9f302a73cd 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2134,6 +2134,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, if (major_revision == 0x03) { rhub = &xhci->usb3_rhub; + /* + * Some hosts incorrectly use sub-minor version for minor + * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01 + * for bcdUSB 0x310). Since there is no USB release with sub + * minor version 0x301 to 0x309, we can assume that they are + * incorrect and fix it here. + */ + if (minor_revision > 0x00 && minor_revision < 0x10) + minor_revision <<= 4; } else if (major_revision <= 0x02) { rhub = &xhci->usb2_rhub; } else { @@ -2552,6 +2561,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->usb3_rhub.bus_state.resume_done[i] = 0; /* Only the USB 2.0 completions will ever be used. */ init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]); + init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]); } if (scratchpad_alloc(xhci, flags)) diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index fea555570ad4..8950d1f10a7f 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev, sch_ep->sch_tt = tt; sch_ep->ep = ep; + INIT_LIST_HEAD(&sch_ep->endpoint); + INIT_LIST_HEAD(&sch_ep->tt_endpoint); return sch_ep; } @@ -373,6 +375,32 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, sch_ep->bw_budget_table[j]; } } + sch_ep->allocated = used; +} + +static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) +{ + struct mu3h_sch_tt *tt = sch_ep->sch_tt; + u32 num_esit, tmp; + int base; + int i, j; + + num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + for (i = 0; i < num_esit; i++) { + base = offset + i * sch_ep->esit; + + /* + * Compared with hs bus, no matter what ep type, + * the hub will always delay one uframe to send data + */ + for (j = 0; j < sch_ep->cs_count; j++) { + tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe; + if (tmp > FS_PAYLOAD_MAX) + return -ERANGE; + } + } + + return 0; } static int check_sch_tt(struct usb_device *udev, @@ -399,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev, return -ERANGE; for (i = 0; i < sch_ep->cs_count; i++) - if (test_bit(offset + i, tt->split_bit_map)) + if (test_bit(offset + i, tt->ss_bit_map)) return -ERANGE; } else { @@ -429,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev, cs_count = 7; /* HW limit */ for (i = 0; i < cs_count + 2; i++) { - if (test_bit(offset + i, tt->split_bit_map)) + if (test_bit(offset + i, tt->ss_bit_map)) return -ERANGE; } @@ -445,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev, sch_ep->num_budget_microframes = sch_ep->esit; } - return 0; + return check_fs_bus_bw(sch_ep, offset); } static void update_sch_tt(struct usb_device *udev, - struct mu3h_sch_ep_info *sch_ep) + struct mu3h_sch_ep_info *sch_ep, bool used) { struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 base, num_esit; + int bw_updated; + int bits; int i, j; num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1; + + if (used) + bw_updated = sch_ep->bw_cost_per_microframe; + else + bw_updated = -sch_ep->bw_cost_per_microframe; + for (i = 0; i < num_esit; i++) { base = sch_ep->offset + i * sch_ep->esit; - for (j = 0; j < sch_ep->num_budget_microframes; j++) - set_bit(base + j, tt->split_bit_map); + + for (j = 0; j < bits; j++) { + if (used) + set_bit(base + j, tt->ss_bit_map); + else + clear_bit(base + j, tt->ss_bit_map); + } + + for (j = 0; j < sch_ep->cs_count; j++) + tt->fs_bus_bw[base + j] += bw_updated; } - list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list); + if (used) + list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list); + else + list_del(&sch_ep->tt_endpoint); } static int check_sch_bw(struct usb_device *udev, @@ -532,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev, if (!tt_offset_ok) return -ERANGE; - update_sch_tt(udev, sch_ep); + update_sch_tt(udev, sch_ep, 1); } /* update bus bandwidth info */ @@ -541,6 +589,23 @@ static int check_sch_bw(struct usb_device *udev, return 0; } +static void destroy_sch_ep(struct usb_device *udev, + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) +{ + /* only release ep bw check passed by check_sch_bw() */ + if (sch_ep->allocated) { + update_bus_bw(sch_bw, sch_ep, 0); + if (sch_ep->sch_tt) + update_sch_tt(udev, sch_ep, 0); + } + + if (sch_ep->sch_tt) + drop_tt(udev); + + list_del(&sch_ep->endpoint); + kfree(sch_ep); +} + static bool need_bw_sch(struct usb_host_endpoint *ep, enum usb_device_speed speed, int has_tt) { @@ -557,6 +622,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, if (is_fs_or_ls(speed) && !has_tt) return false; + /* skip endpoint with zero maxpkt */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return false; + return true; } @@ -579,6 +648,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) mtk->sch_array = sch_array; + INIT_LIST_HEAD(&mtk->bw_ep_chk_list); + return 0; } EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); @@ -597,19 +668,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_virt_device *virt_dev; - struct mu3h_sch_bw_info *sch_bw; struct mu3h_sch_ep_info *sch_ep; - struct mu3h_sch_bw_info *sch_array; unsigned int ep_index; - int bw_index; - int ret = 0; xhci = hcd_to_xhci(hcd); virt_dev = xhci->devs[udev->slot_id]; ep_index = xhci_get_endpoint_index(&ep->desc); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); - sch_array = mtk->sch_array; xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", __func__, usb_endpoint_type(&ep->desc), udev->speed, @@ -623,40 +689,18 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, */ if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) - ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1)); + ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1)); return 0; } - bw_index = get_bw_index(xhci, udev, ep); - sch_bw = &sch_array[bw_index]; - sch_ep = create_sch_ep(udev, ep, ep_ctx); if (IS_ERR_OR_NULL(sch_ep)) return -ENOMEM; setup_sch_info(udev, ep_ctx, sch_ep); - ret = check_sch_bw(udev, sch_bw, sch_ep); - if (ret) { - xhci_err(xhci, "Not enough bandwidth!\n"); - if (is_fs_or_ls(udev->speed)) - drop_tt(udev); - - kfree(sch_ep); - return -ENOSPC; - } - - list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); - - ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) - | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); - ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) - | EP_BREPEAT(sch_ep->repeat)); - - xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", - sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, - sch_ep->offset, sch_ep->repeat); + list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list); return 0; } @@ -671,7 +715,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_virt_device *virt_dev; struct mu3h_sch_bw_info *sch_array; struct mu3h_sch_bw_info *sch_bw; - struct mu3h_sch_ep_info *sch_ep; + struct mu3h_sch_ep_info *sch_ep, *tmp; int bw_index; xhci = hcd_to_xhci(hcd); @@ -690,17 +734,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, bw_index = get_bw_index(xhci, udev, ep); sch_bw = &sch_array[bw_index]; - list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { + list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) { if (sch_ep->ep == ep) { - update_bus_bw(sch_bw, sch_ep, 0); - list_del(&sch_ep->endpoint); - if (is_fs_or_ls(udev->speed)) { - list_del(&sch_ep->tt_endpoint); - drop_tt(udev); - } - kfree(sch_ep); + destroy_sch_ep(udev, sch_bw, sch_ep); break; } } } EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); + +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index, ret; + + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); + + list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) { + bw_index = get_bw_index(xhci, udev, sch_ep->ep); + sch_bw = &mtk->sch_array[bw_index]; + + ret = check_sch_bw(udev, sch_bw, sch_ep); + if (ret) { + xhci_err(xhci, "Not enough bandwidth!\n"); + return -ENOSPC; + } + } + + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { + struct xhci_ep_ctx *ep_ctx; + struct usb_host_endpoint *ep = sch_ep->ep; + unsigned int ep_index = xhci_get_endpoint_index(&ep->desc); + + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &mtk->sch_array[bw_index]; + + list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); + + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts) + | EP_BCSCOUNT(sch_ep->cs_count) + | EP_BBM(sch_ep->burst_mode)); + ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset) + | EP_BREPEAT(sch_ep->repeat)); + + xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", + sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, + sch_ep->offset, sch_ep->repeat); + } + + return xhci_check_bandwidth(hcd, udev); +} +EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth); + +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index; + + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); + + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { + bw_index = get_bw_index(xhci, udev, sch_ep->ep); + sch_bw = &mtk->sch_array[bw_index]; + destroy_sch_ep(udev, sch_bw, sch_ep); + } + + xhci_reset_bandwidth(hcd, udev); +} +EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index b18a6baef204..5c0eb35cd007 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable) static int xhci_mtk_setup(struct usb_hcd *hcd); static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { .reset = xhci_mtk_setup, + .check_bandwidth = xhci_mtk_check_bandwidth, + .reset_bandwidth = xhci_mtk_reset_bandwidth, }; static struct hc_driver __read_mostly xhci_mtk_hc_driver; @@ -395,6 +397,15 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; if (mtk->lpm_support) xhci->quirks |= XHCI_LPM_SUPPORT; + if (mtk->u2_lpm_disable) + xhci->quirks |= XHCI_HW_LPM_DISABLE; + + /* + * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream, + * and it's 3 when support it. + */ + if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4) + xhci->quirks |= XHCI_BROKEN_STREAMS; } /* called during probe() after chip reset completes */ @@ -460,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev) return ret; mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable"); + mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable"); /* optional property, ignore the error if it does not exist */ of_property_read_u32(node, "mediatek,u3p-dis-msk", &mtk->u3p_dis_msk); @@ -551,7 +563,8 @@ static int xhci_mtk_probe(struct platform_device *pdev) if (ret) goto put_usb3_hcd; - if (HCC_MAX_PSA(xhci->hcc_params) >= 4) + if (HCC_MAX_PSA(xhci->hcc_params) >= 4 && + !(xhci->quirks & XHCI_BROKEN_STREAMS)) xhci->shared_hcd->can_do_streams = 1; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); @@ -592,6 +605,9 @@ static int xhci_mtk_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_put_noidle(&dev->dev); + pm_runtime_disable(&dev->dev); + usb_remove_hcd(shared_hcd); xhci->shared_hcd = NULL; device_init_wakeup(&dev->dev, false); @@ -602,8 +618,6 @@ static int xhci_mtk_remove(struct platform_device *dev) xhci_mtk_sch_exit(mtk); xhci_mtk_clks_disable(mtk); xhci_mtk_ldos_disable(mtk); - pm_runtime_put_sync(&dev->dev); - pm_runtime_disable(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 5ac458b7d2e0..985e7a19f6f6 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -20,13 +20,15 @@ #define XHCI_MTK_MAX_ESIT 64 /** - * @split_bit_map: used to avoid split microframes overlay + * @ss_bit_map: used to avoid start split microframes overlay + * @fs_bus_bw: array to keep track of bandwidth already used for FS * @ep_list: Endpoints using this TT * @usb_tt: usb TT related * @tt_port: TT port number */ struct mu3h_sch_tt { - DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT); + DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT); + u32 fs_bus_bw[XHCI_MTK_MAX_ESIT]; struct list_head ep_list; struct usb_tt *usb_tt; int tt_port; @@ -59,6 +61,7 @@ struct mu3h_sch_bw_info { * @ep_type: endpoint type * @maxpkt: max packet size of endpoint * @ep: address of usb_host_endpoint struct + * @allocated: the bandwidth is aready allocated from bus_bw * @offset: which uframe of the interval that transfer should be * scheduled first time within the interval * @repeat: the time gap between two uframes that transfers are @@ -86,6 +89,7 @@ struct mu3h_sch_ep_info { u32 ep_type; u32 maxpkt; void *ep; + bool allocated; /* * mtk xHCI scheduling information put into reserved DWs * in ep context @@ -131,6 +135,7 @@ struct xhci_hcd_mtk { struct device *dev; struct usb_hcd *hcd; struct mu3h_sch_bw_info *sch_array; + struct list_head bw_ep_chk_list; struct mu3c_ippc_regs __iomem *ippc_regs; bool has_ippc; int num_u2_ports; @@ -147,6 +152,7 @@ struct xhci_hcd_mtk { struct phy **phys; int num_phys; bool lpm_support; + bool u2_lpm_disable; /* usb remote wakeup */ bool uwk_en; struct regmap *uwk; @@ -166,6 +172,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); #else static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, @@ -179,6 +187,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, { } +static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, + struct usb_device *udev) +{ + return 0; +} + +static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, + struct usb_device *udev) +{ +} #endif #endif /* _XHCI_MTK_H_ */ diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 60651a50770f..f27d5c2c42f3 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -8,6 +8,7 @@ #include <linux/mbus.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/phy/phy.h> #include <linux/usb.h> #include <linux/usb/hcd.h> @@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct device *dev = hcd->self.controller; + struct phy *phy; + int ret; + + /* Old bindings miss the PHY handle */ + phy = of_phy_get(dev->of_node, "usb3-phy"); + if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (IS_ERR(phy)) + goto phy_out; + + ret = phy_init(phy); + if (ret) + goto phy_put; + + ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); + if (ret) + goto phy_exit; + + ret = phy_power_on(phy); + if (ret == -EOPNOTSUPP) { + /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ + dev_warn(dev, "PHY unsupported by firmware\n"); + xhci->quirks |= XHCI_SKIP_PHY_INIT; + } + if (ret) + goto phy_exit; + + phy_power_off(phy); +phy_exit: + phy_exit(phy); +phy_put: + phy_put(phy); +phy_out: + + return 0; +} + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h index ca0a3a5721dd..74b4d21a498a 100644 --- a/drivers/usb/host/xhci-mvebu.h +++ b/drivers/usb/host/xhci-mvebu.h @@ -12,6 +12,7 @@ struct usb_hcd; #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); #else static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) @@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } +static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +{ + return 0; +} + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) { return 0; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1fddc41fa1f3..71ef473df585 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -21,6 +21,8 @@ #define SSIC_PORT_CFG2_OFFSET 0x30 #define PROG_DONE (1 << 30) #define SSIC_PORT_UNUSED (1 << 31) +#define SPARSE_DISABLE_BIT 17 +#define SPARSE_CNTL_ENABLE 0xC12C /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 @@ -43,6 +45,7 @@ #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI 0x15c1 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI 0x15db #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI 0x15d4 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9 @@ -55,7 +58,11 @@ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 +#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 +#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 +#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 static const char hcd_name[] = "xhci_hcd"; @@ -146,6 +153,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) (pdev->device == 0x15e0 || pdev->device == 0x15e1)) xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND; + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) + xhci->quirks |= XHCI_DISABLE_SPARSE; + if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; @@ -212,6 +222,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI || @@ -245,11 +256,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1042) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1142) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + } + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI || + pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI)) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) @@ -263,6 +281,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x9026) xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 || + pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); @@ -460,6 +483,15 @@ static void xhci_pme_quirk(struct usb_hcd *hcd) readl(reg); } +static void xhci_sparse_control_quirk(struct usb_hcd *hcd) +{ + u32 reg; + + reg = readl(hcd->regs + SPARSE_CNTL_ENABLE); + reg &= ~BIT(SPARSE_DISABLE_BIT); + writel(reg, hcd->regs + SPARSE_CNTL_ENABLE); +} + static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -479,6 +511,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) xhci_ssic_port_unused_quirk(hcd, true); + if (xhci->quirks & XHCI_DISABLE_SPARSE) + xhci_sparse_control_quirk(hcd); + ret = xhci_suspend(xhci, do_wakeup); if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED)) xhci_ssic_port_unused_quirk(hcd, false); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 315b4552693c..84cfa8544285 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd) priv->plat_start(hcd); } +static int xhci_priv_plat_setup(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->plat_setup) + return 0; + + return priv->plat_setup(hcd); +} + static int xhci_priv_init_quirk(struct usb_hcd *hcd) { struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); @@ -101,6 +111,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { }; static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { + .plat_setup = xhci_mvebu_a3700_plat_setup, .init_quirk = xhci_mvebu_a3700_init_quirk, }; @@ -163,6 +174,8 @@ static int xhci_plat_probe(struct platform_device *pdev) struct usb_hcd *hcd; int ret; int irq; + struct xhci_plat_priv *priv = NULL; + if (usb_disabled()) return -ENODEV; @@ -257,8 +270,7 @@ static int xhci_plat_probe(struct platform_device *pdev) priv_match = of_device_get_match_data(&pdev->dev); if (priv_match) { - struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); - + priv = hcd_to_xhci_priv(hcd); /* Just copy data for now */ if (priv_match) *priv = *priv_match; @@ -307,6 +319,16 @@ static int xhci_plat_probe(struct platform_device *pdev) hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); xhci->shared_hcd->tpl_support = hcd->tpl_support; + + if (priv) { + ret = xhci_priv_plat_setup(hcd); + if (ret) + goto disable_usb_phy; + } + + if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) + hcd->skip_phy_initialization = 1; + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto disable_usb_phy; @@ -363,6 +385,7 @@ static int xhci_plat_remove(struct platform_device *dev) struct clk *reg_clk = xhci->reg_clk; struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_get_sync(&dev->dev); xhci->xhc_state |= XHCI_STATE_REMOVING; usb_remove_hcd(shared_hcd); @@ -376,8 +399,9 @@ static int xhci_plat_remove(struct platform_device *dev) clk_disable_unprepare(reg_clk); usb_put_hcd(hcd); - pm_runtime_set_suspended(&dev->dev); pm_runtime_disable(&dev->dev); + pm_runtime_put_noidle(&dev->dev); + pm_runtime_set_suspended(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index 5681723fc9cd..b7749151bdfb 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -13,6 +13,7 @@ struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; + int (*plat_setup)(struct usb_hcd *); void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*resume_quirk)(struct usb_hcd *); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index f7a190fb2353..f6b5010deb73 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -541,6 +541,23 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, stream_id); return; } + /* + * A cancelled TD can complete with a stall if HW cached the trb. + * In this case driver can't find cur_td, but if the ring is empty we + * can move the dequeue pointer to the current enqueue position. + */ + if (!cur_td) { + if (list_empty(&ep_ring->td_list)) { + state->new_deq_seg = ep_ring->enq_seg; + state->new_deq_ptr = ep_ring->enqueue; + state->new_cycle_state = ep_ring->cycle_state; + goto done; + } else { + xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n"); + return; + } + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Finding endpoint context"); @@ -586,6 +603,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = new_seg; state->new_deq_ptr = new_deq; +done: /* Don't update the ring cycle state for the producer (us). */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Cycle state = 0x%x", state->new_cycle_state); @@ -677,11 +695,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); /* for in tranfers we need to copy the data from bounce to sg */ - len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, - seg->bounce_len, seg->bounce_offs); - if (len != seg->bounce_len) - xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", - len, seg->bounce_len); + if (urb->num_sgs) { + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); + } else { + memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, + seg->bounce_len); + } seg->bounce_len = 0; seg->bounce_offs = 0; } @@ -1673,6 +1696,7 @@ static void handle_port_status(struct xhci_hcd *xhci, (portsc & PORT_PLS_MASK) == XDEV_U1 || (portsc & PORT_PLS_MASK) == XDEV_U2)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); + complete(&bus_state->u3exit_done[hcd_portnum]); /* We've just brought the device into U0/1/2 through either the * Resume state after a device remote wakeup, or through the * U3Exit state after a host-initiated resume. If it's a device @@ -1847,8 +1871,8 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, if (reset_type == EP_HARD_RESET) { ep->ep_state |= EP_HARD_CLEAR_TOGGLE; - xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td); - xhci_clear_hub_tt_buffer(xhci, td, ep); + xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, + td); } xhci_ring_cmd_db(xhci); } @@ -1969,11 +1993,18 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { - /* Issue a reset endpoint command to clear the host side - * halt, followed by a set dequeue command to move the - * dequeue pointer past the TD. - * The class driver clears the device side halt later. + /* + * xhci internal endpoint state will go to a "halt" state for + * any stall, including default control pipe protocol stall. + * To clear the host side halt we need to issue a reset endpoint + * command, followed by a set dequeue command to move past the + * TD. + * Class drivers clear the device side halt from a functional + * stall later. Hub TT buffer should only be cleared for FS/LS + * devices behind HS hubs for functional stalls. */ + if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + xhci_clear_hub_tt_buffer(xhci, td, ep); xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, ep_ring->stream_id, td, EP_HARD_RESET); } else { @@ -2268,7 +2299,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, remaining = 0; break; case COMP_USB_TRANSACTION_ERROR: - if ((ep_ring->err_count++ > MAX_SOFT_RETRY) || + if (xhci->quirks & XHCI_NO_SOFT_RETRY || + (ep_ring->err_count++ > MAX_SOFT_RETRY) || le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; *status = 0; @@ -2526,6 +2558,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", slot_id, ep_index); } + if (trb_comp_code == COMP_STALL_ERROR || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + xhci_cleanup_halted_endpoint(xhci, slot_id, + ep_index, + ep_ring->stream_id, + NULL, + EP_HARD_RESET); + } goto cleanup; } @@ -2883,6 +2924,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, trb->field[0] = cpu_to_le32(field1); trb->field[1] = cpu_to_le32(field2); trb->field[2] = cpu_to_le32(field3); + /* make sure TRB is fully written before giving it to the controller */ + wmb(); trb->field[3] = cpu_to_le32(field4); trace_xhci_queue_trb(ring, trb); @@ -3226,12 +3269,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, /* create a max max_pkt sized bounce buffer pointed to by last trb */ if (usb_urb_dir_out(urb)) { - len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, - seg->bounce_buf, new_buff_len, enqd_len); - if (len != new_buff_len) - xhci_warn(xhci, - "WARN Wrong bounce buffer write length: %zu != %d\n", - len, new_buff_len); + if (urb->num_sgs) { + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + if (len != new_buff_len) + xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", + len, new_buff_len); + } else { + memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); + } + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { @@ -3386,8 +3433,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* New sg entry */ --num_sgs; sent_len -= block_len; - if (num_sgs != 0) { - sg = sg_next(sg); + sg = sg_next(sg); + if (num_sgs != 0 && sg) { block_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); addr += sent_len; diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index b8e24ccba9f3..6087b1fa530f 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -562,6 +562,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, enable); if (err < 0) break; + + /* + * wait 500us for LFPS detector to be disabled before + * sending ACK + */ + if (!enable) + usleep_range(500, 1000); } if (err < 0) { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 9b3b1b16eafb..de05ac9d3ae1 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) struct device *dev = xhci_to_hcd(xhci)->self.sysdev; int err, i; u64 val; + u32 intrs; /* * Some Renesas controllers get into a weird state if they are @@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) if (upper_32_bits(val)) xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); - for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) { + intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1), + ARRAY_SIZE(xhci->run_regs->ir_set)); + + for (i = 0; i < intrs; i++) { struct xhci_intr_reg __iomem *ir; ir = &xhci->run_regs->ir_set[i]; @@ -883,44 +887,42 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) xhci_set_cmd_ring_deq(xhci); } -static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) +/* + * Disable port wake bits if do_wakeup is not set. + * + * Also clear a possible internal port wake state left hanging for ports that + * detected termination but never successfully enumerated (trained to 0U). + * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done + * at enumeration clears this wake, force one here as well for unconnected ports + */ + +static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, + struct xhci_hub *rhub, + bool do_wakeup) { - struct xhci_port **ports; - int port_index; unsigned long flags; u32 t1, t2, portsc; + int i; spin_lock_irqsave(&xhci->lock, flags); - /* disable usb3 ports Wake bits */ - port_index = xhci->usb3_rhub.num_ports; - ports = xhci->usb3_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - portsc = t1; - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; - if (t1 != t2) { - writel(t2, ports[port_index]->addr); - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", - xhci->usb3_rhub.hcd->self.busnum, - port_index + 1, portsc, t2); - } - } + for (i = 0; i < rhub->num_ports; i++) { + portsc = readl(rhub->ports[i]->addr); + t1 = xhci_port_state_to_neutral(portsc); + t2 = t1; + + /* clear wake bits if do_wake is not set */ + if (!do_wakeup) + t2 &= ~PORT_WAKE_BITS; + + /* Don't touch csc bit if connected or connect change is set */ + if (!(portsc & (PORT_CSC | PORT_CONNECT))) + t2 |= PORT_CSC; - /* disable usb2 ports Wake bits */ - port_index = xhci->usb2_rhub.num_ports; - ports = xhci->usb2_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - portsc = t1; - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; if (t1 != t2) { - writel(t2, ports[port_index]->addr); - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n", - xhci->usb2_rhub.hcd->self.busnum, - port_index + 1, portsc, t2); + writel(t2, rhub->ports[i]->addr); + xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", + rhub->hcd->self.busnum, i + 1, portsc, t2); } } spin_unlock_irqrestore(&xhci->lock, flags); @@ -982,11 +984,14 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) xhci->shared_hcd->state != HC_STATE_SUSPENDED) return -EINVAL; - xhci_dbc_suspend(xhci); - /* Clear root port wake on bits if wakeup not allowed. */ - if (!do_wakeup) - xhci_disable_port_wake_on_bits(xhci); + xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); + xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); + + if (!HCD_HW_ACCESSIBLE(hcd)) + return 0; + + xhci_dbc_suspend(xhci); /* Don't poll the roothubs on bus suspend. */ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); @@ -1085,6 +1090,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; + bool pending_portevent = false; if (!hcd->state) return 0; @@ -1157,8 +1163,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - xhci_reset(xhci); + retval = xhci_reset(xhci); spin_unlock_irq(&xhci->lock); + if (retval) + return retval; xhci_cleanup_msix(xhci); xhci_dbg(xhci, "// Disabling event ring interrupts\n"); @@ -1221,13 +1229,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { - /* Resume root hubs only when have pending events. */ - if (xhci_pending_portevent(xhci)) { + /* + * Resume roothubs only if there are pending events. + * USB 3 devices resend U3 LFPS wake after a 100ms delay if + * the first wake signalling failed, give it that chance. + */ + pending_portevent = xhci_pending_portevent(xhci); + if (!pending_portevent) { + msleep(120); + pending_portevent = xhci_pending_portevent(xhci); + } + + if (pending_portevent) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } } - /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject @@ -1428,6 +1445,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, xhci->devs[slot_id]->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); @@ -1912,8 +1930,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); trace_xhci_add_endpoint(ep_ctx); - xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); - xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", (unsigned int) ep->desc.bEndpointAddress, udev->slot_id, @@ -2857,7 +2873,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, * else should be touching the xhci->devs[slot_id] structure, so we * don't need to take the xhci->lock for manipulating that. */ -static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { int i; int ret = 0; @@ -2946,6 +2962,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].new_ring = NULL; + xhci_debugfs_create_endpoint(xhci, virt_dev, i); } command_cleanup: kfree(command->completion); @@ -2954,7 +2971,7 @@ command_cleanup: return ret; } -static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; @@ -3029,19 +3046,19 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, added_ctxs, added_ctxs); } -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td) +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td) { struct xhci_dequeue_state deq_state; - struct usb_device *udev = td->urb->dev; xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Cleaning up stalled endpoint ring"); /* We need to move the HW's dequeue pointer past this TD, * or it will attempt to resend it on the next doorbell ring. */ - xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, stream_id, td, &deq_state); + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td, + &deq_state); if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) return; @@ -3052,7 +3069,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Queueing new dequeue state"); - xhci_queue_new_dequeue_state(xhci, udev->slot_id, + xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, &deq_state); } else { /* Better hope no one uses the input context between now and the @@ -3063,7 +3080,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Setting up input context for " "configure endpoint command"); - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, + xhci_setup_input_ctx_for_quirk(xhci, slot_id, ep_index, &deq_state); } } @@ -3214,6 +3231,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, /* config ep command clears toggle if add and drop ep flags are set */ ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); + if (!ctrl_ctx) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + goto cleanup; + } + xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ctrl_ctx, ep_flag, ep_flag); xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); @@ -3233,10 +3258,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, wait_for_completion(cfg_cmd->completion); - ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; xhci_free_command(xhci, cfg_cmd); cleanup: xhci_free_command(xhci, stop_cmd); + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, @@ -4388,6 +4414,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, int hird, exit_latency; int ret; + if (xhci->quirks & XHCI_HW_LPM_DISABLE) + return -EPERM; + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || !udev->lpm_capable) return -EPERM; @@ -4410,7 +4439,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", enable ? "enable" : "disable", port_num + 1); - if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { + if (enable) { /* Host supports BESL timeout instead of HIRD */ if (udev->usb2_hw_lpm_besl_capable) { /* if device doesn't have a preferred BESL value use a @@ -4469,6 +4498,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, mutex_lock(hcd->bandwidth_mutex); xhci_change_max_exit_latency(xhci, udev, 0); mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(ports[port_num]->addr, pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); return 0; } } @@ -4630,19 +4662,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; - /* Prevent U1 if service interval is shorter than U1 exit latency */ - if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { - if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { - dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); - return USB3_LPM_DISABLED; - } - } - if (xhci->quirks & XHCI_INTEL_HOST) timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); else timeout_ns = udev->u1_params.sel; + /* Prevent U1 if service interval is shorter than U1 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { + if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + /* The U1 timeout is encoded in 1us intervals. * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */ @@ -4694,19 +4726,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; - /* Prevent U2 if service interval is shorter than U2 exit latency */ - if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { - if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { - dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); - return USB3_LPM_DISABLED; - } - } - if (xhci->quirks & XHCI_INTEL_HOST) timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); else timeout_ns = udev->u2_params.sel; + /* Prevent U2 if service interval is shorter than U2 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { + if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + /* The U2 timeout is encoded in 256us intervals */ timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); /* If the necessary timeout value is bigger than what we can set in the @@ -5368,6 +5400,10 @@ void xhci_init_driver(struct hc_driver *drv, drv->reset = over->reset; if (over->start) drv->start = over->start; + if (over->check_bandwidth) + drv->check_bandwidth = over->check_bandwidth; + if (over->reset_bandwidth) + drv->reset_bandwidth = over->reset_bandwidth; } } EXPORT_SYMBOL_GPL(xhci_init_driver); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 98b98a0cd2a8..8798ed031786 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -716,7 +716,7 @@ struct xhci_ep_ctx { * 4 - TRB error * 5-7 - reserved */ -#define EP_STATE_MASK (0xf) +#define EP_STATE_MASK (0x7) #define EP_STATE_DISABLED 0 #define EP_STATE_RUNNING 1 #define EP_STATE_HALTED 2 @@ -1694,6 +1694,7 @@ struct xhci_bus_state { /* Which ports are waiting on RExit to U0 transition. */ unsigned long rexit_ports; struct completion rexit_done[USB_MAXCHILDREN]; + struct completion u3exit_done[USB_MAXCHILDREN]; }; @@ -1872,6 +1873,9 @@ struct xhci_hcd { #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) +#define XHCI_SKIP_PHY_INIT BIT_ULL(37) +#define XHCI_DISABLE_SPARSE BIT_ULL(38) +#define XHCI_NO_SOFT_RETRY BIT_ULL(40) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1909,6 +1913,8 @@ struct xhci_driver_overrides { size_t extra_priv_size; int (*reset)(struct usb_hcd *hcd); int (*start)(struct usb_hcd *hcd); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); }; #define XHCI_CFC_DELAY 10 @@ -2061,6 +2067,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); @@ -2115,8 +2123,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td); +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_td *td); void xhci_stop_endpoint_command_watchdog(struct timer_list *t); void xhci_handle_command_timeout(struct work_struct *work); diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index d8d157c4c271..96495fcd952a 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -209,6 +209,7 @@ static void adu_interrupt_out_callback(struct urb *urb) if (status != 0) { if ((status != -ENOENT) && + (status != -ESHUTDOWN) && (status != -ECONNRESET)) { dev_dbg(&dev->udev->dev, "%s :nonzero status received: %d\n", __func__, diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index dce20301e367..103c69c692ba 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -2,8 +2,9 @@ /* * Native support for the I/O-Warrior USB devices * - * Copyright (c) 2003-2005 Code Mercenaries GmbH - * written by Christian Lucht <lucht@codemercs.com> + * Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH + * written by Christian Lucht <lucht@codemercs.com> and + * Christoph Jung <jung@codemercs.com> * * based on @@ -802,14 +803,28 @@ static int iowarrior_probe(struct usb_interface *interface, /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); - if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && - ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100))) - /* IOWarrior56 has wMaxPacketSize different from report size */ - dev->report_size = 7; + + /* + * Some devices need the report size to be different than the + * endpoint size. + */ + if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { + switch (dev->product_id) { + case USB_DEVICE_ID_CODEMERCS_IOW56: + case USB_DEVICE_ID_CODEMERCS_IOW56AM: + dev->report_size = 7; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW28: + case USB_DEVICE_ID_CODEMERCS_IOW28L: + dev->report_size = 4; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW100: + dev->report_size = 13; + break; + } + } /* create the urb and buffer for reading */ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index 407fe7570f3b..f8686139d6f3 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -426,7 +426,7 @@ static int lvs_rh_probe(struct usb_interface *intf, USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT); if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) { dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret); - return ret; + return ret < 0 ? ret : -EINVAL; } /* submit urb to poll interrupt endpoint */ diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig index 9b632ab24f03..df7404c526c8 100644 --- a/drivers/usb/misc/sisusbvga/Kconfig +++ b/drivers/usb/misc/sisusbvga/Kconfig @@ -16,7 +16,7 @@ config USB_SISUSBVGA config USB_SISUSBVGA_CON bool "Text console and mode switching support" if USB_SISUSBVGA - depends on VT + depends on VT && BROKEN select FONT_8x16 ---help--- Say Y here if you want a VGA text console via the USB dongle or diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 2ab9600d0898..0734e6dd9386 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, u8 swap8, fromkern = kernbuffer ? 1 : 0; u16 swap16; u32 swap32, flag = (length >> 28) & 1; - char buf[4]; + u8 buf[4]; /* if neither kernbuffer not userbuffer are given, assume * data in obuf @@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, /* High level: Gfx (indexed) register access */ #ifdef CONFIG_USB_SISUSBVGA_CON -int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data) +int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data) { return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } -int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data) +int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data) { return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } #endif -int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data) { int ret; @@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 *data) { int ret; @@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, +int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor) { int ret; @@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, } static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, - int port, u8 idx, u8 data, u8 mask) + u32 port, u8 idx, u8 data, u8 mask) { int ret; u8 tmp; @@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, return ret; } -int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor) { return sisusb_setidxregandor(sisusb, port, index, 0xff, myor); } -int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand) { return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00); @@ -2785,8 +2785,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig) static int sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y, unsigned long arg) { - int retval, port, length; - u32 address; + int retval, length; + u32 port, address; /* All our commands require the device * to be initialized. diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h index 1782c759c4ad..ace09985dae4 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_init.h +++ b/drivers/usb/misc/sisusbvga/sisusb_init.h @@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = { int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo); -extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data); -extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data); -extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data); +extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data); +extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data); -extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 * data); -extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor); -extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor); -extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand); void sisusb_delete(struct kref *kref); diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 98ada1a3425c..bae88893ee8e 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); dev_dbg(&intf->dev, "disconnect\n"); + kfree(dev->buf); kfree(dev); } diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index be0505b8b5d4..08b72bb22b7e 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -492,11 +492,14 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE); dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__, dev->cntl_buffer[0]); - retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL); + retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC); if (retval >= 0) timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); + /* make sure URB is idle after timeout or (spurious) CMD_ACK */ + usb_kill_urb(dev->cntl_urb); + mutex_unlock(&dev->io_mutex); if (retval < 0) { diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 9dd02160cca9..e3780d4d6514 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -131,8 +131,12 @@ static void mtu3_device_disable(struct mtu3 *mtu) mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN); - if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) + if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) { mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); + if (mtu->is_u3_ip) + mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), + SSUSB_U3_PORT_DUAL_MODE); + } mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); } diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c index c96e5dab0a48..25b9635b60bb 100644 --- a/drivers/usb/mtu3/mtu3_debugfs.c +++ b/drivers/usb/mtu3/mtu3_debugfs.c @@ -127,7 +127,7 @@ static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base, struct debugfs_regset32 *regset; struct mtu3_regset *mregs; - mregs = devm_kzalloc(mtu->dev, sizeof(*regset), GFP_KERNEL); + mregs = devm_kzalloc(mtu->dev, sizeof(*mregs), GFP_KERNEL); if (!mregs) return; diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c index f93732e53fd8..08db02f3172d 100644 --- a/drivers/usb/mtu3/mtu3_gadget.c +++ b/drivers/usb/mtu3/mtu3_gadget.c @@ -587,6 +587,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g) spin_unlock_irqrestore(&mtu->lock, flags); + synchronize_irq(mtu->irq); return 0; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index bf083c1f997f..70ef603f7bb9 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1866,10 +1866,14 @@ static void musb_pm_runtime_check_session(struct musb *musb) MUSB_DEVCTL_HR; switch (devctl & ~s) { case MUSB_QUIRK_B_DISCONNECT_99: - musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); - schedule_delayed_work(&musb->irq_work, - msecs_to_jiffies(1000)); - break; + if (musb->quirk_retries && !musb->flush_irq_work) { + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + musb->quirk_retries--; + break; + } + fallthrough; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, @@ -1928,7 +1932,7 @@ static void musb_irq_work(struct work_struct *data) struct musb *musb = container_of(data, struct musb, irq_work.work); int error; - error = pm_runtime_get_sync(musb->controller); + error = pm_runtime_resume_and_get(musb->controller); if (error < 0) { dev_err(musb->controller, "Could not enable: %i\n", error); @@ -2102,32 +2106,35 @@ int musb_queue_resume_work(struct musb *musb, { struct musb_pending_work *w; unsigned long flags; + bool is_suspended; int error; if (WARN_ON(!callback)) return -EINVAL; - if (pm_runtime_active(musb->controller)) - return callback(musb, data); - - w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); - if (!w) - return -ENOMEM; - - w->callback = callback; - w->data = data; spin_lock_irqsave(&musb->list_lock, flags); - if (musb->is_runtime_suspended) { + is_suspended = musb->is_runtime_suspended; + + if (is_suspended) { + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); + if (!w) { + error = -ENOMEM; + goto out_unlock; + } + + w->callback = callback; + w->data = data; + list_add_tail(&w->node, &musb->pending_list); error = 0; - } else { - dev_err(musb->controller, "could not add resume work %p\n", - callback); - devm_kfree(musb->controller, w); - error = -EINPROGRESS; } + +out_unlock: spin_unlock_irqrestore(&musb->list_lock, flags); + if (!is_suspended) + error = callback(musb, data); + return error; } EXPORT_SYMBOL_GPL(musb_queue_resume_work); @@ -2721,6 +2728,13 @@ static int musb_resume(struct device *dev) musb_enable_interrupts(musb); musb_platform_enable(musb); + /* session might be disabled in suspend */ + if (musb->port_mode == MUSB_HOST && + !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) { + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + } + spin_lock_irqsave(&musb->lock, flags); error = musb_run_resume_work(musb); if (error) diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index f42858e2b54c..0c6204add616 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -168,6 +168,11 @@ static ssize_t musb_test_mode_write(struct file *file, u8 test; char buf[24]; + memset(buf, 0x00, sizeof(buf)); + + if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + pm_runtime_get_sync(musb->controller); test = musb_readb(musb->mregs, MUSB_TESTMODE); if (test) { @@ -176,11 +181,6 @@ static ssize_t musb_test_mode_write(struct file *file, goto ret; } - memset(buf, 0x00, sizeof(buf)); - - if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) - return -EFAULT; - if (strstarts(buf, "force host full-speed")) test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS; diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index bfebf1f2e991..9a7e655d5280 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -377,7 +377,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq1, status); - return status; + goto err_put_regulator; } status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq, @@ -386,8 +386,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq2, status); - free_irq(twl->irq1, twl); - return status; + goto err_free_irq1; } twl->asleep = 0; @@ -396,6 +395,13 @@ static int twl6030_usb_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); return 0; + +err_free_irq1: + free_irq(twl->irq1, twl); +err_put_regulator: + regulator_put(twl->usb3v3); + + return status; } static int twl6030_usb_remove(struct platform_device *pdev) diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 86637cd066cf..cfc16943979d 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) } usbhs_pipe_clear_without_sequence(pipe, 0, 0); + usbhs_pipe_running(pipe, 0); __usbhsf_pkt_del(pkt); } @@ -803,7 +804,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) return info->dma_map_ctrl(chan->device->dev, pkt, map); } -static void usbhsf_dma_complete(void *arg); +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result); static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) { struct usbhs_pipe *pipe = pkt->pipe; @@ -813,6 +815,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) struct dma_chan *chan; struct device *dev = usbhs_priv_to_dev(priv); enum dma_transfer_direction dir; + dma_cookie_t cookie; fifo = usbhs_pipe_to_fifo(pipe); if (!fifo) @@ -827,11 +830,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) if (!desc) return; - desc->callback = usbhsf_dma_complete; - desc->callback_param = pipe; + desc->callback_result = usbhsf_dma_complete; + desc->callback_param = pkt; - pkt->cookie = dmaengine_submit(desc); - if (pkt->cookie < 0) { + cookie = dmaengine_submit(desc); + if (cookie < 0) { dev_err(dev, "Failed to submit dma descriptor\n"); return; } @@ -1152,12 +1155,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt, struct dma_chan *chan, int dtln) { struct usbhs_pipe *pipe = pkt->pipe; - struct dma_tx_state state; size_t received_size; int maxp = usbhs_pipe_get_maxpacket(pipe); - dmaengine_tx_status(chan, pkt->cookie, &state); - received_size = pkt->length - state.residue; + received_size = pkt->length - pkt->dma_result->residue; if (dtln) { received_size -= USBHS_USB_DMAC_XFER_SIZE; @@ -1363,13 +1364,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv, return 0; } -static void usbhsf_dma_complete(void *arg) +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result) { - struct usbhs_pipe *pipe = arg; + struct usbhs_pkt *pkt = arg; + struct usbhs_pipe *pipe = pkt->pipe; struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct device *dev = usbhs_priv_to_dev(priv); int ret; + pkt->dma_result = result; ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE); if (ret < 0) dev_err(dev, "dma_complete run_error %d : %d\n", diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h index c3d3cc35cee0..4a7dc23ce3d3 100644 --- a/drivers/usb/renesas_usbhs/fifo.h +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -50,7 +50,7 @@ struct usbhs_pkt { struct usbhs_pkt *pkt); struct work_struct work; dma_addr_t dma; - dma_cookie_t cookie; + const struct dmaengine_result *dma_result; void *buf; int length; int trans; diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 9e5afdde1adb..40576e7176d8 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, void usbhs_pipe_free(struct usbhs_pipe *pipe) { + usbhsp_pipe_select(pipe); + usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0); usbhsp_put_pipe(pipe); } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 955ab97b9b22..a82ba9cc0c72 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -80,9 +80,12 @@ #define CH341_LCR_CS5 0x00 static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x4348, 0x5523) }, - { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x1a86, 0x5512) }, { USB_DEVICE(0x1a86, 0x5523) }, + { USB_DEVICE(0x1a86, 0x7522) }, + { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x9986, 0x7523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); @@ -93,6 +96,7 @@ struct ch341_private { u8 mcr; u8 msr; u8 lcr; + unsigned long quirks; }; static void ch341_set_termios(struct tty_struct *tty, @@ -245,6 +249,53 @@ out: kfree(buffer); return r; } +static int ch341_detect_quirks(struct usb_serial_port *port) +{ + struct ch341_private *priv = usb_get_serial_port_data(port); + struct usb_device *udev = port->serial->dev; + const unsigned int size = 2; + unsigned long quirks = 0; + char *buffer; + int r; + + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + /* + * A subset of CH34x devices does not support all features. The + * prescaler is limited and there is no support for sending a RS232 + * break condition. A read failure when trying to set up the latter is + * used to detect these devices. + */ + r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), CH341_REQ_READ_REG, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + CH341_REG_BREAK, 0, buffer, size, DEFAULT_TIMEOUT); + if (r == -EPIPE) { + dev_dbg(&port->dev, "break control not supported\n"); + r = 0; + goto out; + } + + if (r != size) { + if (r >= 0) + r = -EIO; + dev_err(&port->dev, "failed to read break control: %d\n", r); + goto out; + } + + r = 0; +out: + kfree(buffer); + + if (quirks) { + dev_dbg(&port->dev, "enabling quirk flags: 0x%02lx\n", quirks); + priv->quirks |= quirks; + } + + return r; +} + static int ch341_port_probe(struct usb_serial_port *port) { struct ch341_private *priv; @@ -267,6 +318,11 @@ static int ch341_port_probe(struct usb_serial_port *port) goto error; usb_set_serial_port_data(port, priv); + + r = ch341_detect_quirks(port); + if (r < 0) + goto error; + return 0; error: kfree(priv); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f5143eedbc48..caf27a0d51f0 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ + { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ @@ -145,6 +146,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ @@ -201,6 +203,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ + { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */ + { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */ + { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ @@ -272,6 +277,8 @@ static struct usb_serial_driver cp210x_device = { .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, .tx_empty = cp210x_tx_empty, + .throttle = usb_serial_generic_throttle, + .unthrottle = usb_serial_generic_unthrottle, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .attach = cp210x_attach, @@ -915,6 +922,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, u32 baud; u16 bits; u32 ctl_hs; + u32 flow_repl; cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud); @@ -1015,6 +1023,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) { dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__); + /* + * When the port is closed, the CP210x hardware disables + * auto-RTS and RTS is deasserted but it leaves auto-CTS when + * in hardware flow control mode. When re-opening the port, if + * auto-CTS is enabled on the cp210x, then auto-RTS must be + * re-enabled in the driver. + */ + flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); + flow_repl &= ~CP210X_SERIAL_RTS_MASK; + flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL); + flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); + cp210x_write_reg_block(port, + CP210X_SET_FLOW, + &flow_ctl, + sizeof(flow_ctl)); + cflag |= CRTSCTS; } else { dev_dbg(dev, "%s - flow control = NONE\n", __func__); diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c index ebd76ab07b72..36dd688b5795 100644 --- a/drivers/usb/serial/cyberjack.c +++ b/drivers/usb/serial/cyberjack.c @@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb) struct device *dev = &port->dev; int status = urb->status; unsigned long flags; + bool resubmitted = false; - set_bit(0, &port->write_urbs_free); if (status) { dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", __func__, status); + set_bit(0, &port->write_urbs_free); return; } @@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb) goto exit; } + resubmitted = true; + dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent); dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled); @@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb) exit: spin_unlock_irqrestore(&priv->lock, flags); + if (!resubmitted) + set_bit(0, &port->write_urbs_free); usb_serial_port_softint(port); } diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 216edd5826ca..ecda82198798 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -59,6 +59,7 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ @@ -73,6 +74,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index 35e223751c0e..16b7410ad057 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -25,6 +25,9 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 +/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */ +#define VENDOR_ID_SAI 0x17dd + /* FRWD Dongle - a GPS sports watch */ #define VENDOR_ID_FRWD 0x6737 #define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 578ebdd86520..e0ef758eade5 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -19,7 +19,6 @@ #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> -#include <linux/workqueue.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/wait.h> @@ -198,14 +197,12 @@ struct digi_port { int dp_throttle_restart; wait_queue_head_t dp_flush_wait; wait_queue_head_t dp_close_wait; /* wait queue for close */ - struct work_struct dp_wakeup_work; struct usb_serial_port *dp_port; }; /* Local Function Declarations */ -static void digi_wakeup_write_lock(struct work_struct *work); static int digi_write_oob_command(struct usb_serial_port *port, unsigned char *buf, int count, int interruptible); static int digi_write_inb_command(struct usb_serial_port *port, @@ -356,26 +353,6 @@ __releases(lock) return timeout; } - -/* - * Digi Wakeup Write - * - * Wake up port, line discipline, and tty processes sleeping - * on writes. - */ - -static void digi_wakeup_write_lock(struct work_struct *work) -{ - struct digi_port *priv = - container_of(work, struct digi_port, dp_wakeup_work); - struct usb_serial_port *port = priv->dp_port; - unsigned long flags; - - spin_lock_irqsave(&priv->dp_port_lock, flags); - tty_port_tty_wakeup(&port->port); - spin_unlock_irqrestore(&priv->dp_port_lock, flags); -} - /* * Digi Write OOB Command * @@ -986,6 +963,7 @@ static void digi_write_bulk_callback(struct urb *urb) unsigned long flags; int ret = 0; int status = urb->status; + bool wakeup; /* port and serial sanity check */ if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) { @@ -1012,6 +990,7 @@ static void digi_write_bulk_callback(struct urb *urb) } /* try to send any buffered data on this port */ + wakeup = true; spin_lock_irqsave(&priv->dp_port_lock, flags); priv->dp_write_urb_in_use = 0; if (priv->dp_out_buf_len > 0) { @@ -1027,19 +1006,18 @@ static void digi_write_bulk_callback(struct urb *urb) if (ret == 0) { priv->dp_write_urb_in_use = 1; priv->dp_out_buf_len = 0; + wakeup = false; } } - /* wake up processes sleeping on writes immediately */ - tty_port_tty_wakeup(&port->port); - /* also queue up a wakeup at scheduler time, in case we */ - /* lost the race in write_chan(). */ - schedule_work(&priv->dp_wakeup_work); - spin_unlock_irqrestore(&priv->dp_port_lock, flags); + if (ret && ret != -EPERM) dev_err_console(port, "%s: usb_submit_urb failed, ret=%d, port=%d\n", __func__, ret, priv->dp_port_num); + + if (wakeup) + tty_port_tty_wakeup(&port->port); } static int digi_write_room(struct tty_struct *tty) @@ -1239,7 +1217,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num) init_waitqueue_head(&priv->dp_transmit_idle_wait); init_waitqueue_head(&priv->dp_flush_wait); init_waitqueue_head(&priv->dp_close_wait); - INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); priv->dp_port = port; init_waitqueue_head(&port->write_wait); @@ -1508,13 +1485,14 @@ static int digi_read_oob_callback(struct urb *urb) rts = C_CRTSCTS(tty); if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) { + bool wakeup = false; + spin_lock_irqsave(&priv->dp_port_lock, flags); /* convert from digi flags to termiox flags */ if (val & DIGI_READ_INPUT_SIGNALS_CTS) { priv->dp_modem_signals |= TIOCM_CTS; - /* port must be open to use tty struct */ if (rts) - tty_port_tty_wakeup(&port->port); + wakeup = true; } else { priv->dp_modem_signals &= ~TIOCM_CTS; /* port must be open to use tty struct */ @@ -1533,6 +1511,9 @@ static int digi_read_oob_callback(struct urb *urb) priv->dp_modem_signals &= ~TIOCM_CD; spin_unlock_irqrestore(&priv->dp_port_lock, flags); + + if (wakeup) + tty_port_tty_wakeup(&port->port); } else if (opcode == DIGI_CMD_TRANSMIT_IDLE) { spin_lock_irqsave(&priv->dp_port_lock, flags); priv->dp_transmit_idle = 1; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 9ad44a96dfe3..c00e4177651a 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -713,6 +713,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, @@ -1036,6 +1037,11 @@ static const struct usb_device_id id_table_combined[] = { /* U-Blox devices */ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, + /* FreeCalypso USB adapters */ + { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { } /* Terminating entry */ }; @@ -1380,8 +1386,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) index_value = get_ftdi_divisor(tty, port); value = (u16)index_value; index = (u16)(index_value >> 16); - if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) || - (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) { + if (priv->chip_type == FT2232C || priv->chip_type == FT2232H || + priv->chip_type == FT4232H || priv->chip_type == FT232H || + priv->chip_type == FTX) { /* Probably the BM type needs the MSB of the encoded fractional * divider also moved like for the chips above. Any infos? */ index = (u16)((index << 8) | priv->interface); @@ -2480,12 +2487,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) static int ftdi_process_packet(struct usb_serial_port *port, - struct ftdi_private *priv, char *packet, int len) + struct ftdi_private *priv, unsigned char *buf, int len) { + unsigned char status; int i; - char status; char flag; - char *ch; if (len < 2) { dev_dbg(&port->dev, "malformed packet\n"); @@ -2495,7 +2501,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, /* Compare new line status to the old one, signal if different/ N.B. packet may be processed more than once, but differences are only processed once. */ - status = packet[0] & FTDI_STATUS_B0_MASK; + status = buf[0] & FTDI_STATUS_B0_MASK; if (status != priv->prev_status) { char diff_status = status ^ priv->prev_status; @@ -2521,13 +2527,12 @@ static int ftdi_process_packet(struct usb_serial_port *port, } /* save if the transmitter is empty or not */ - if (packet[1] & FTDI_RS_TEMT) + if (buf[1] & FTDI_RS_TEMT) priv->transmit_empty = 1; else priv->transmit_empty = 0; - len -= 2; - if (!len) + if (len == 2) return 0; /* status only */ /* @@ -2535,40 +2540,41 @@ static int ftdi_process_packet(struct usb_serial_port *port, * data payload to avoid over-reporting. */ flag = TTY_NORMAL; - if (packet[1] & FTDI_RS_ERR_MASK) { + if (buf[1] & FTDI_RS_ERR_MASK) { /* Break takes precedence over parity, which takes precedence * over framing errors */ - if (packet[1] & FTDI_RS_BI) { + if (buf[1] & FTDI_RS_BI) { flag = TTY_BREAK; port->icount.brk++; usb_serial_handle_break(port); - } else if (packet[1] & FTDI_RS_PE) { + } else if (buf[1] & FTDI_RS_PE) { flag = TTY_PARITY; port->icount.parity++; - } else if (packet[1] & FTDI_RS_FE) { + } else if (buf[1] & FTDI_RS_FE) { flag = TTY_FRAME; port->icount.frame++; } /* Overrun is special, not associated with a char */ - if (packet[1] & FTDI_RS_OE) { + if (buf[1] & FTDI_RS_OE) { port->icount.overrun++; tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } - port->icount.rx += len; - ch = packet + 2; + port->icount.rx += len - 2; if (port->port.console && port->sysrq) { - for (i = 0; i < len; i++, ch++) { - if (!usb_serial_handle_sysrq_char(port, *ch)) - tty_insert_flip_char(&port->port, *ch, flag); + for (i = 2; i < len; i++) { + if (usb_serial_handle_sysrq_char(port, buf[i])) + continue; + tty_insert_flip_char(&port->port, buf[i], flag); } } else { - tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len); + tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag, + len - 2); } - return len; + return len - 2; } static void ftdi_process_read_urb(struct urb *urb) diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e8373528264c..3d47c6d72256 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -39,6 +39,13 @@ #define FTDI_LUMEL_PD12_PID 0x6002 +/* + * Custom USB adapters made by Falconia Partners LLC + * for FreeCalypso project, ID codes allocated to Falconia by FTDI. + */ +#define FTDI_FALCONIA_JTAG_BUF_PID 0x7150 +#define FTDI_FALCONIA_JTAG_UNBUF_PID 0x7151 + /* Sienna Serial Interface by Secyourit GmbH */ #define FTDI_SIENNA_PID 0x8348 @@ -160,6 +167,7 @@ #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ +#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 633550ec3025..f29c3a936a08 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, send it directly to the tty port */ if (garmin_data_p->flags & FLAGS_QUEUING) { pkt_add(garmin_data_p, data, data_length); - } else if (bulk_data || - getLayerId(data) == GARMIN_LAYERID_APPL) { + } else if (bulk_data || (data_length >= sizeof(u32) && + getLayerId(data) == GARMIN_LAYERID_APPL)) { spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->flags |= APP_RESP_SEEN; diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 4cca0b836f43..3b4d1ff9033d 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -3003,26 +3003,32 @@ static int edge_startup(struct usb_serial *serial) response = -ENODEV; } - usb_free_urb(edge_serial->interrupt_read_urb); - kfree(edge_serial->interrupt_in_buffer); - - usb_free_urb(edge_serial->read_urb); - kfree(edge_serial->bulk_in_buffer); - - kfree(edge_serial); - - return response; + goto error; } /* start interrupt read for this edgeport this interrupt will * continue as long as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); - if (response) + if (response) { dev_err(ddev, "%s - Error %d submitting control urb\n", __func__, response); + + goto error; + } } return response; + +error: + usb_free_urb(edge_serial->interrupt_read_urb); + kfree(edge_serial->interrupt_in_buffer); + + usb_free_urb(edge_serial->read_urb); + kfree(edge_serial->bulk_in_buffer); + + kfree(edge_serial); + + return response; } diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index d5bff69b1769..7e4ec0b8d01f 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -353,10 +353,11 @@ static void iuu_led_activity_on(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; - *buf_ptr++ = IUU_SET_LED; + if (xmas) { - get_random_bytes(buf_ptr, 6); - *(buf_ptr+7) = 1; + buf_ptr[0] = IUU_SET_LED; + get_random_bytes(buf_ptr + 1, 6); + buf_ptr[7] = 1; } else { iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255); } @@ -374,13 +375,14 @@ static void iuu_led_activity_off(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; + if (xmas) { iuu_rxcmd(urb); return; - } else { - *buf_ptr++ = IUU_SET_LED; - iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); } + + iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); + usb_fill_bulk_urb(port->write_urb, port->serial->dev, usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress), @@ -534,23 +536,29 @@ static int iuu_uart_flush(struct usb_serial_port *port) struct device *dev = &port->dev; int i; int status; - u8 rxcmd = IUU_UART_RX; + u8 *rxcmd; struct iuu_private *priv = usb_get_serial_port_data(port); if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0) return -EIO; + rxcmd = kmalloc(1, GFP_KERNEL); + if (!rxcmd) + return -ENOMEM; + + rxcmd[0] = IUU_UART_RX; + for (i = 0; i < 2; i++) { - status = bulk_immediate(port, &rxcmd, 1); + status = bulk_immediate(port, rxcmd, 1); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_write error\n", __func__); - return status; + goto out_free; } status = read_immediate(port, &priv->len, 1); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_read error\n", __func__); - return status; + goto out_free; } if (priv->len > 0) { @@ -558,12 +566,16 @@ static int iuu_uart_flush(struct usb_serial_port *port) status = read_immediate(port, priv->buf, priv->len); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_read error\n", __func__); - return status; + goto out_free; } } } dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__); iuu_led(port, 0, 0xF000, 0, 0xFF); + +out_free: + kfree(rxcmd); + return status; } @@ -697,14 +709,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port, struct iuu_private *priv = usb_get_serial_port_data(port); unsigned long flags; - if (count > 256) - return -ENOMEM; - spin_lock_irqsave(&priv->lock, flags); + count = min(count, 256 - priv->writelen); + if (count == 0) + goto out; + /* fill the buffer */ memcpy(priv->writebuf + priv->writelen, buf, count); priv->writelen += count; +out: spin_unlock_irqrestore(&priv->lock, flags); return count; diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index bf988f77d400..5dd228da5bdd 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -40,11 +40,12 @@ #define DRIVER_AUTHOR "Brian Warner <warner@lothar.com>" #define DRIVER_DESC "USB Keyspan PDA Converter driver" +#define KEYSPAN_TX_THRESHOLD 16 + struct keyspan_pda_private { int tx_room; int tx_throttled; - struct work_struct wakeup_work; - struct work_struct unthrottle_work; + struct work_struct unthrottle_work; struct usb_serial *serial; struct usb_serial_port *port; }; @@ -97,15 +98,6 @@ static const struct usb_device_id id_table_fake_xircom[] = { }; #endif -static void keyspan_pda_wakeup_write(struct work_struct *work) -{ - struct keyspan_pda_private *priv = - container_of(work, struct keyspan_pda_private, wakeup_work); - struct usb_serial_port *port = priv->port; - - tty_port_tty_wakeup(&port->port); -} - static void keyspan_pda_request_unthrottle(struct work_struct *work) { struct keyspan_pda_private *priv = @@ -120,7 +112,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work) 7, /* request_unthrottle */ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, - 16, /* value: threshold */ + KEYSPAN_TX_THRESHOLD, 0, /* index */ NULL, 0, @@ -139,6 +131,8 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) int retval; int status = urb->status; struct keyspan_pda_private *priv; + unsigned long flags; + priv = usb_get_serial_port_data(port); switch (status) { @@ -172,18 +166,21 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) break; case 1: /* status interrupt */ - if (len < 3) { + if (len < 2) { dev_warn(&port->dev, "short interrupt message received\n"); break; } - dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]); + dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]); switch (data[1]) { case 1: /* modemline change */ break; case 2: /* tx unthrottle interrupt */ + spin_lock_irqsave(&port->lock, flags); priv->tx_throttled = 0; + priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD); + spin_unlock_irqrestore(&port->lock, flags); /* queue up a wakeup at scheduler time */ - schedule_work(&priv->wakeup_work); + usb_serial_port_softint(port); break; default: break; @@ -443,6 +440,7 @@ static int keyspan_pda_write(struct tty_struct *tty, int request_unthrottle = 0; int rc = 0; struct keyspan_pda_private *priv; + unsigned long flags; priv = usb_get_serial_port_data(port); /* guess how much room is left in the device's ring buffer, and if we @@ -462,13 +460,13 @@ static int keyspan_pda_write(struct tty_struct *tty, the TX urb is in-flight (wait until it completes) the device is full (wait until it says there is room) */ - spin_lock_bh(&port->lock); + spin_lock_irqsave(&port->lock, flags); if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) { - spin_unlock_bh(&port->lock); + spin_unlock_irqrestore(&port->lock, flags); return 0; } clear_bit(0, &port->write_urbs_free); - spin_unlock_bh(&port->lock); + spin_unlock_irqrestore(&port->lock, flags); /* At this point the URB is in our control, nobody else can submit it again (the only sudden transition was the one from EINPROGRESS to @@ -514,7 +512,8 @@ static int keyspan_pda_write(struct tty_struct *tty, goto exit; } } - if (count > priv->tx_room) { + + if (count >= priv->tx_room) { /* we're about to completely fill the Tx buffer, so we'll be throttled afterwards. */ count = priv->tx_room; @@ -547,7 +546,7 @@ static int keyspan_pda_write(struct tty_struct *tty, rc = count; exit: - if (rc < 0) + if (rc <= 0) set_bit(0, &port->write_urbs_free); return rc; } @@ -556,27 +555,28 @@ exit: static void keyspan_pda_write_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; - struct keyspan_pda_private *priv; set_bit(0, &port->write_urbs_free); - priv = usb_get_serial_port_data(port); /* queue up a wakeup at scheduler time */ - schedule_work(&priv->wakeup_work); + usb_serial_port_softint(port); } static int keyspan_pda_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; - struct keyspan_pda_private *priv; - priv = usb_get_serial_port_data(port); - /* used by n_tty.c for processing of tabs and such. Giving it our - conservative guess is probably good enough, but needs testing by - running a console through the device. */ - return priv->tx_room; -} + struct keyspan_pda_private *priv = usb_get_serial_port_data(port); + unsigned long flags; + int room = 0; + spin_lock_irqsave(&port->lock, flags); + if (test_bit(0, &port->write_urbs_free) && !priv->tx_throttled) + room = priv->tx_room; + spin_unlock_irqrestore(&port->lock, flags); + + return room; +} static int keyspan_pda_chars_in_buffer(struct tty_struct *tty) { @@ -656,8 +656,12 @@ error: } static void keyspan_pda_close(struct usb_serial_port *port) { + struct keyspan_pda_private *priv = usb_get_serial_port_data(port); + usb_kill_urb(port->write_urb); usb_kill_urb(port->interrupt_in_urb); + + cancel_work_sync(&priv->unthrottle_work); } @@ -715,7 +719,6 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port) if (!priv) return -ENOMEM; - INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); priv->serial = port->serial; priv->port = port; diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 5ee48b0650c4..5f6b82ebccc5 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -276,12 +276,12 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) priv->cfg.unknown2 = cfg->unknown2; spin_unlock_irqrestore(&priv->lock, flags); + kfree(cfg); + /* READ_ON and urb submission */ rc = usb_serial_generic_open(tty, port); - if (rc) { - retval = rc; - goto err_free_cfg; - } + if (rc) + return rc; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), @@ -324,8 +324,6 @@ err_disable_read: KLSI_TIMEOUT); err_generic_close: usb_serial_generic_close(port); -err_free_cfg: - kfree(cfg); return retval; } diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 2ec4eeacebc7..aefc1b58d956 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -638,6 +638,8 @@ static void parport_mos7715_restore_state(struct parport *pp, spin_unlock(&release_lock); return; } + mos_parport->shadowDCR = s->u.pc.ctr; + mos_parport->shadowECR = s->u.pc.ecr; write_parport_reg_nonblock(mos_parport, MOS7720_DCR, mos_parport->shadowDCR); write_parport_reg_nonblock(mos_parport, MOS7720_ECR, @@ -1248,8 +1250,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) + if (!urb->transfer_buffer) { + bytes_sent = -ENOMEM; goto exit; + } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index ab4bf8d6d7df..2b8a0d4b66fc 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1330,8 +1330,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) + if (!urb->transfer_buffer) { + bytes_sent = -ENOMEM; goto exit; + } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8bfffca3e4ae..5c167bc089a0 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -245,10 +245,12 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 +#define QUECTEL_PRODUCT_EC200T 0x6026 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -417,11 +419,14 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_PH8 0x0053 #define CINTERION_PRODUCT_AHXX 0x0055 #define CINTERION_PRODUCT_PLXX 0x0060 +#define CINTERION_PRODUCT_EXS82 0x006c #define CINTERION_PRODUCT_PH8_2RMNET 0x0082 #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 #define CINTERION_PRODUCT_CLS8 0x00b0 +#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 +#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -527,6 +532,7 @@ static void option_instat_callback(struct urb *urb); /* Cellient products */ #define CELLIENT_VENDOR_ID 0x2692 #define CELLIENT_PRODUCT_MEN200 0x9005 +#define CELLIENT_PRODUCT_MPL200 0x9025 /* Hyundai Petatel Inc. products */ #define PETATEL_VENDOR_ID 0x1ff4 @@ -559,6 +565,9 @@ static void option_instat_callback(struct urb *urb); /* Device flags */ +/* Highest interface number which can be used with NCTRL() and RSVD() */ +#define FLAG_IFNUM_MAX 7 + /* Interface does not support modem-control requests */ #define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8) @@ -1093,10 +1102,15 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), - .driver_info = RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), @@ -1105,10 +1119,13 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */ + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1157,6 +1174,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */ + .driver_info = NCTRL(0) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */ + .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), @@ -1175,6 +1196,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */ + .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */ + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1187,6 +1212,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */ + .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), @@ -1201,6 +1228,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */ + .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), @@ -1538,7 +1569,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, + { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */ + .driver_info = RSVD(3) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, @@ -1812,6 +1844,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ .driver_info = RSVD(7) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), @@ -1876,12 +1910,17 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), .driver_info = RSVD(0) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), + .driver_info = RSVD(0)}, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), @@ -1969,6 +2008,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), + .driver_info = RSVD(1) | RSVD(4) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ @@ -2018,12 +2059,17 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, - { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -2068,6 +2114,14 @@ static struct usb_serial_driver * const serial_drivers[] = { module_usb_serial_driver(serial_drivers, option_ids); +static bool iface_is_reserved(unsigned long device_flags, u8 ifnum) +{ + if (ifnum > FLAG_IFNUM_MAX) + return false; + + return device_flags & RSVD(ifnum); +} + static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { @@ -2084,7 +2138,7 @@ static int option_probe(struct usb_serial *serial, * the same class/subclass/protocol as the serial interfaces. Look at * the Windows driver .INF files for reserved interface numbers. */ - if (device_flags & RSVD(iface_desc->bInterfaceNumber)) + if (iface_is_reserved(device_flags, iface_desc->bInterfaceNumber)) return -ENODEV; /* @@ -2100,6 +2154,14 @@ static int option_probe(struct usb_serial *serial, return 0; } +static bool iface_no_modem_control(unsigned long device_flags, u8 ifnum) +{ + if (ifnum > FLAG_IFNUM_MAX) + return false; + + return device_flags & NCTRL(ifnum); +} + static int option_attach(struct usb_serial *serial) { struct usb_interface_descriptor *iface_desc; @@ -2115,7 +2177,7 @@ static int option_attach(struct usb_serial *serial) iface_desc = &serial->interface->cur_altsetting->desc; - if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) + if (!iface_no_modem_control(device_flags, iface_desc->bInterfaceNumber)) data->use_send_setup = 1; if (device_flags & ZLP) diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index e6d9b79d3521..e290b250f45c 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -94,6 +94,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index c98db6b650a5..a897680473a7 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -121,6 +121,7 @@ /* Hewlett-Packard POS Pole Displays */ #define HP_VENDOR_ID 0x03f0 +#define HP_LD381GC_PRODUCT_ID 0x0183 #define HP_LM920_PRODUCT_ID 0x026b #define HP_TD620_PRODUCT_ID 0x0956 #define HP_LD960_PRODUCT_ID 0x0b39 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 613f91add03d..0f60363c1bbc 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ @@ -173,6 +174,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */ + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index ef23acc9b9ce..b1449d4914cc 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1420,14 +1420,19 @@ static int ti_set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct usb_serial_port *port = tty->driver_data; - struct ti_port *tport = usb_get_serial_port_data(port); + struct tty_port *tport = &port->port; unsigned cwait; cwait = ss->closing_wait; if (cwait != ASYNC_CLOSING_WAIT_NONE) cwait = msecs_to_jiffies(10 * ss->closing_wait); - tport->tp_port->port.closing_wait = cwait; + if (!capable(CAP_SYS_ADMIN)) { + if (cwait != tport->closing_wait) + return -EPERM; + } + + tport->closing_wait = cwait; return 0; } diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 13be21aad2f4..b2285d5a869d 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty, ss->line = port->minor; ss->port = port->port_number; ss->baud_base = tty_get_baud_rate(port->port.tty); - ss->close_delay = port->port.close_delay / 10; + ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10; ss->closing_wait = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : - port->port.closing_wait / 10; + jiffies_to_msecs(port->port.closing_wait) / 10; return 0; } EXPORT_SYMBOL(usb_wwan_get_serial_info); @@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty, unsigned int closing_wait, close_delay; int retval = 0; - close_delay = ss->close_delay * 10; + close_delay = msecs_to_jiffies(ss->close_delay * 10); closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ? - ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10; + ASYNC_CLOSING_WAIT_NONE : + msecs_to_jiffies(ss->closing_wait * 10); mutex_lock(&port->port.mutex); @@ -270,6 +271,10 @@ static void usb_wwan_indat_callback(struct urb *urb) if (status) { dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n", __func__, status, endpoint); + + /* don't resubmit on fatal errors */ + if (status == -ESHUTDOWN || status == -ENOENT) + return; } else { if (urb->actual_length) { tty_insert_flip_string(&port->port, data, diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile index 46635fa4a340..5c1f846a795e 100644 --- a/drivers/usb/storage/Makefile +++ b/drivers/usb/storage/Makefile @@ -8,7 +8,7 @@ ccflags-y := -I $(srctree)/drivers/scsi -ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_STORAGE +#ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_STORAGE obj-$(CONFIG_USB_UAS) += uas.o obj-$(CONFIG_USB_STORAGE) += usb-storage.o diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 96cb0409dd89..737b765d0f6e 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -651,6 +651,13 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) need_auto_sense = 1; } + /* Some devices (Kindle) require another command after SYNC CACHE */ + if ((us->fflags & US_FL_SENSE_AFTER_SYNC) && + srb->cmnd[0] == SYNCHRONIZE_CACHE) { + usb_stor_dbg(us, "-- sense after SYNC CACHE\n"); + need_auto_sense = 1; + } + /* * If we have a failure, we're going to do a REQUEST_SENSE * automatically. Note that we differentiate between a command diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index bb2198496f42..678903d1ce4d 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo); static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, int status); +/* + * This driver needs its own workqueue, as we need to control memory allocation. + * + * In the course of error handling and power management uas_wait_for_pending_cmnds() + * needs to flush pending work items. In these contexts we cannot allocate memory + * by doing block IO as we would deadlock. For the same reason we cannot wait + * for anything allocating memory not heeding these constraints. + * + * So we have to control all work items that can be on the workqueue we flush. + * Hence we cannot share a queue and need our own. + */ +static struct workqueue_struct *workqueue; + static void uas_do_work(struct work_struct *work) { struct uas_dev_info *devinfo = @@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work) if (!err) cmdinfo->state &= ~IS_IN_WORK_LIST; else - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } out: spin_unlock_irqrestore(&devinfo->lock, flags); @@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo) lockdep_assert_held(&devinfo->lock); cmdinfo->state |= IS_IN_WORK_LIST; - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } static void uas_zap_pending(struct uas_dev_info *devinfo, int result) @@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, struct uas_cmd_info *ci = (void *)&cmnd->SCp; struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; + if (status == -ENODEV) /* too late */ + return; + scmd_printk(KERN_INFO, cmnd, "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ", prefix, status, cmdinfo->uas_tag, @@ -646,8 +662,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, if (devinfo->resetting) { cmnd->result = DID_ERROR << 16; cmnd->scsi_done(cmnd); - spin_unlock_irqrestore(&devinfo->lock, flags); - return 0; + goto zombie; } /* Find a free uas-tag */ @@ -683,6 +698,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB); err = uas_submit_urbs(cmnd, devinfo); + /* + * in case of fatal errors the SCSI layer is peculiar + * a command that has finished is a success for the purpose + * of queueing, no matter how fatal the error + */ + if (err == -ENODEV) { + cmnd->result = DID_ERROR << 16; + cmnd->scsi_done(cmnd); + goto zombie; + } if (err) { /* If we did nothing, give up now */ if (cmdinfo->state & SUBMIT_STATUS_URB) { @@ -693,6 +718,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, } devinfo->cmnd[idx] = cmnd; +zombie: spin_unlock_irqrestore(&devinfo->lock, flags); return 0; } @@ -841,6 +867,9 @@ static int uas_slave_configure(struct scsi_device *sdev) if (devinfo->flags & US_FL_NO_READ_CAPACITY_16) sdev->no_read_capacity_16 = 1; + /* Some disks cannot handle WRITE_SAME */ + if (devinfo->flags & US_FL_NO_SAME) + sdev->no_write_same = 1; /* * Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. @@ -1227,7 +1256,31 @@ static struct usb_driver uas_driver = { .id_table = uas_usb_ids, }; -module_usb_driver(uas_driver); +static int __init uas_init(void) +{ + int rv; + + workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0); + if (!workqueue) + return -ENOMEM; + + rv = usb_register(&uas_driver); + if (rv) { + destroy_workqueue(workqueue); + return -ENOMEM; + } + + return 0; +} + +static void __exit uas_exit(void) +{ + usb_deregister(&uas_driver); + destroy_workqueue(workqueue); +} + +module_init(uas_init); +module_exit(uas_exit); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(USB_STORAGE); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1880f3e13f57..861153d294b6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2211,6 +2211,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_READ_DISC_INFO ), +/* + * Reported by Matthias Schwarzott <zzam@gentoo.org> + * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that + * the host may be finished with it, and automatically ejects its + * emulated media unless it receives another command within one second. + */ +UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999, + "Amazon", + "Kindle", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SENSE_AFTER_SYNC ), + /* * Reported by Oliver Neukum <oneukum@suse.com> * This device morphes spontaneously into another device if the access @@ -2323,6 +2335,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000, USB_SC_DEVICE,USB_PR_DEVICE,NULL, US_FL_MAX_SECTORS_64 ), +/* Reported by Cyril Roelandt <tipecaml@gmail.com> */ +UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ), + /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, "iRiver", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 1b23741036ee..cb7b15ecb7ab 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -28,6 +28,23 @@ * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> */ +/* Reported-by: Till Dörges <doerges@pre-sense.de> */ +UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999, + "Sony", + "PSZ-HA*", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + +/* + * Initially Reported-by: Julian Groß <julian.g@posteo.de> + * Further reports David C. Partridge <david.partridge@perdrix.co.uk> + */ +UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + "LaCie", + "2Big Quadra USB3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? @@ -73,6 +90,20 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), +/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */ +UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, + "PNY", + "Pro Elite SSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + +/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */ +UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, + "PNY", + "Pro Elite SSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 9a79cd9762f3..2349dfa3b176 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -541,6 +541,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) case 'j': f |= US_FL_NO_REPORT_LUNS; break; + case 'k': + f |= US_FL_NO_SAME; + break; case 'l': f |= US_FL_NOT_LOCKABLE; break; diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig index 187690fd1a5b..60d375e9c3c7 100644 --- a/drivers/usb/typec/altmodes/Kconfig +++ b/drivers/usb/typec/altmodes/Kconfig @@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE to enable support for VirtualLink devices with NVIDIA GPUs. To compile this driver as a module, choose M here: the - module will be called typec_displayport. + module will be called typec_nvidia. endmenu diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index 74cb3c2ecb34..c950171556d8 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -192,7 +192,10 @@ EXPORT_SYMBOL_GPL(typec_altmode_vdm); const struct typec_altmode * typec_altmode_get_partner(struct typec_altmode *adev) { - return adev ? &to_altmode(adev)->partner->adev : NULL; + if (!adev || !to_altmode(adev)->partner) + return NULL; + + return &to_altmode(adev)->partner->adev; } EXPORT_SYMBOL_GPL(typec_altmode_get_partner); diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index 753645bb2527..59e304a341f8 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -20,6 +20,15 @@ #define PD_RETRY_COUNT 3 +#define tcpc_presenting_cc1_rd(reg) \ + (!(TCPC_ROLE_CTRL_DRP & (reg)) && \ + (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \ + (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT))) +#define tcpc_presenting_cc2_rd(reg) \ + (!(TCPC_ROLE_CTRL_DRP & (reg)) && \ + (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \ + (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT))) + struct tcpci { struct device *dev; @@ -168,19 +177,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc, enum typec_cc_status *cc1, enum typec_cc_status *cc2) { struct tcpci *tcpci = tcpc_to_tcpci(tcpc); - unsigned int reg; + unsigned int reg, role_control; int ret; + ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control); + if (ret < 0) + return ret; + ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, ®); if (ret < 0) return ret; *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) & TCPC_CC_STATUS_CC1_MASK, - reg & TCPC_CC_STATUS_TERM); + reg & TCPC_CC_STATUS_TERM || + tcpc_presenting_cc1_rd(role_control)); *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) & TCPC_CC_STATUS_CC2_MASK, - reg & TCPC_CC_STATUS_TERM); + reg & TCPC_CC_STATUS_TERM || + tcpc_presenting_cc2_rd(role_control)); return 0; } diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 017389021b96..b56a0880a044 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -179,26 +179,6 @@ out: return tcpci_irq(chip->tcpci); } -static int rt1711h_init_alert(struct rt1711h_chip *chip, - struct i2c_client *client) -{ - int ret; - - /* Disable chip interrupts before requesting irq */ - ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); - if (ret < 0) - return ret; - - ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, - rt1711h_irq, - IRQF_ONESHOT | IRQF_TRIGGER_LOW, - dev_name(chip->dev), chip); - if (ret < 0) - return ret; - enable_irq_wake(client->irq); - return 0; -} - static int rt1711h_sw_reset(struct rt1711h_chip *chip) { int ret; @@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client, if (ret < 0) return ret; - ret = rt1711h_init_alert(chip, client); + /* Disable chip interrupts before requesting irq */ + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); if (ret < 0) return ret; @@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client, if (IS_ERR_OR_NULL(chip->tcpci)) return PTR_ERR(chip->tcpci); + ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, + rt1711h_irq, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + dev_name(chip->dev), chip); + if (ret < 0) + return ret; + enable_irq_wake(client->irq); + return 0; } diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 5f61d9977a15..b7852a54efb3 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -181,12 +181,27 @@ struct pd_mode_data { struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX]; }; +/* + * @min_volt: Actual min voltage at the local port + * @req_min_volt: Requested min voltage to the port partner + * @max_volt: Actual max voltage at the local port + * @req_max_volt: Requested max voltage to the port partner + * @max_curr: Actual max current at the local port + * @req_max_curr: Requested max current of the port partner + * @req_out_volt: Requested output voltage to the port partner + * @req_op_curr: Requested operating current to the port partner + * @supported: Parter has atleast one APDO hence supports PPS + * @active: PPS mode is active + */ struct pd_pps_data { u32 min_volt; + u32 req_min_volt; u32 max_volt; + u32 req_max_volt; u32 max_curr; - u32 out_volt; - u32 op_curr; + u32 req_max_curr; + u32 req_out_volt; + u32 req_op_curr; bool supported; bool active; }; @@ -285,7 +300,10 @@ struct tcpm_port { unsigned int operating_snk_mw; bool update_sink_caps; - /* Requested current / voltage */ + /* Requested current / voltage to the port partner */ + u32 req_current_limit; + u32 req_supply_voltage; + /* Actual current / voltage limit of the local port */ u32 current_limit; u32 supply_voltage; @@ -739,6 +757,7 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) port->supply_voltage = mv; port->current_limit = max_ma; + power_supply_changed(port->psy); if (port->tcpc->set_current_limit) ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); @@ -1723,8 +1742,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, case SNK_TRANSITION_SINK: if (port->vbus_present) { tcpm_set_current_limit(port, - port->current_limit, - port->supply_voltage); + port->req_current_limit, + port->req_supply_voltage); port->explicit_contract = true; tcpm_set_state(port, SNK_READY, 0); } else { @@ -1763,8 +1782,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, break; case SNK_NEGOTIATE_PPS_CAPABILITIES: /* Revert data back from any requested PPS updates */ - port->pps_data.out_volt = port->supply_voltage; - port->pps_data.op_curr = port->current_limit; + port->pps_data.req_out_volt = port->supply_voltage; + port->pps_data.req_op_curr = port->current_limit; port->pps_status = (type == PD_CTRL_WAIT ? -EAGAIN : -EOPNOTSUPP); tcpm_set_state(port, SNK_READY, 0); @@ -1796,8 +1815,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, break; case SNK_NEGOTIATE_PPS_CAPABILITIES: port->pps_data.active = true; - port->supply_voltage = port->pps_data.out_volt; - port->current_limit = port->pps_data.op_curr; + port->pps_data.min_volt = port->pps_data.req_min_volt; + port->pps_data.max_volt = port->pps_data.req_max_volt; + port->pps_data.max_curr = port->pps_data.req_max_curr; + port->req_supply_voltage = port->pps_data.req_out_volt; + port->req_current_limit = port->pps_data.req_op_curr; + power_supply_changed(port->psy); tcpm_set_state(port, SNK_TRANSITION_SINK, 0); break; case SOFT_RESET_SEND: @@ -2138,6 +2161,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, port->pps_data.supported = false; port->usb_type = POWER_SUPPLY_USB_TYPE_PD; + power_supply_changed(port->psy); /* * Select the source PDO providing the most power which has a @@ -2162,6 +2186,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, port->pps_data.supported = true; port->usb_type = POWER_SUPPLY_USB_TYPE_PD_PPS; + power_supply_changed(port->psy); } continue; default: @@ -2309,16 +2334,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) src = port->source_caps[src_pdo]; snk = port->snk_pdo[snk_pdo]; - port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src), - pdo_pps_apdo_min_voltage(snk)); - port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src), - pdo_pps_apdo_max_voltage(snk)); - port->pps_data.max_curr = min_pps_apdo_current(src, snk); - port->pps_data.out_volt = min(port->pps_data.max_volt, - max(port->pps_data.min_volt, - port->pps_data.out_volt)); - port->pps_data.op_curr = min(port->pps_data.max_curr, - port->pps_data.op_curr); + port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src), + pdo_pps_apdo_min_voltage(snk)); + port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src), + pdo_pps_apdo_max_voltage(snk)); + port->pps_data.req_max_curr = min_pps_apdo_current(src, snk); + port->pps_data.req_out_volt = min(port->pps_data.max_volt, + max(port->pps_data.min_volt, + port->pps_data.req_out_volt)); + port->pps_data.req_op_curr = min(port->pps_data.max_curr, + port->pps_data.req_op_curr); } return src_pdo; @@ -2398,8 +2423,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); } - port->current_limit = ma; - port->supply_voltage = mv; + port->req_current_limit = ma; + port->req_supply_voltage = mv; return 0; } @@ -2445,10 +2470,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo) tcpm_log(port, "Invalid APDO selected!"); return -EINVAL; } - max_mv = port->pps_data.max_volt; - max_ma = port->pps_data.max_curr; - out_mv = port->pps_data.out_volt; - op_ma = port->pps_data.op_curr; + max_mv = port->pps_data.req_max_volt; + max_ma = port->pps_data.req_max_curr; + out_mv = port->pps_data.req_out_volt; + op_ma = port->pps_data.req_op_curr; break; default: tcpm_log(port, "Invalid PDO selected!"); @@ -2495,8 +2520,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo) tcpm_log(port, "Requesting APDO %d: %u mV, %u mA", src_pdo_index, out_mv, op_ma); - port->pps_data.op_curr = op_ma; - port->pps_data.out_volt = out_mv; + port->pps_data.req_op_curr = op_ma; + port->pps_data.req_out_volt = out_mv; return 0; } @@ -2554,6 +2579,7 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge) return ret; } port->vbus_charge = charge; + power_supply_changed(port->psy); return 0; } @@ -2717,18 +2743,16 @@ static void tcpm_reset_port(struct tcpm_port *port) port->try_src_count = 0; port->try_snk_count = 0; port->usb_type = POWER_SUPPLY_USB_TYPE_C; - - power_supply_changed(port->psy); } static void tcpm_detach(struct tcpm_port *port) { - if (!port->attached) - return; - if (tcpm_port_is_disconnected(port)) port->hard_reset_count = 0; + if (!port->attached) + return; + tcpm_reset_port(port); } @@ -3482,7 +3506,7 @@ static void run_state_machine(struct tcpm_port *port) */ tcpm_set_pwr_role(port, TYPEC_SOURCE); tcpm_pd_send_control(port, PD_CTRL_PS_RDY); - tcpm_set_state(port, SRC_STARTUP, 0); + tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START); break; case VCONN_SWAP_ACCEPT: @@ -3768,6 +3792,14 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, */ break; + case PORT_RESET: + case PORT_RESET_WAIT_OFF: + /* + * State set back to default mode once the timer completes. + * Ignore CC changes here. + */ + break; + default: if (tcpm_port_is_disconnected(port)) tcpm_set_state(port, unattached_state(port), 0); @@ -3829,6 +3861,15 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port) case SRC_TRY_DEBOUNCE: /* Do nothing, waiting for sink detection */ break; + + case PORT_RESET: + case PORT_RESET_WAIT_OFF: + /* + * State set back to default mode once the timer completes. + * Ignore vbus changes here. + */ + break; + default: break; } @@ -3882,10 +3923,19 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) case PORT_RESET_WAIT_OFF: tcpm_set_state(port, tcpm_default_state(port), 0); break; + case SRC_TRY_WAIT: case SRC_TRY_DEBOUNCE: /* Do nothing, waiting for sink detection */ break; + + case PORT_RESET: + /* + * State set back to default mode once the timer completes. + * Ignore vbus changes here. + */ + break; + default: if (port->pwr_role == TYPEC_SINK && port->attached) @@ -4140,7 +4190,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role) return ret; } -static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr) +static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr) { unsigned int target_mw; int ret; @@ -4158,22 +4208,22 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr) goto port_unlock; } - if (op_curr > port->pps_data.max_curr) { + if (req_op_curr > port->pps_data.max_curr) { ret = -EINVAL; goto port_unlock; } - target_mw = (op_curr * port->pps_data.out_volt) / 1000; + target_mw = (req_op_curr * port->supply_voltage) / 1000; if (target_mw < port->operating_snk_mw) { ret = -EINVAL; goto port_unlock; } /* Round down operating current to align with PPS valid steps */ - op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP); + req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP); reinit_completion(&port->pps_complete); - port->pps_data.op_curr = op_curr; + port->pps_data.req_op_curr = req_op_curr; port->pps_status = 0; port->pps_pending = true; tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0); @@ -4195,7 +4245,7 @@ swap_unlock: return ret; } -static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt) +static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt) { unsigned int target_mw; int ret; @@ -4213,23 +4263,23 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt) goto port_unlock; } - if (out_volt < port->pps_data.min_volt || - out_volt > port->pps_data.max_volt) { + if (req_out_volt < port->pps_data.min_volt || + req_out_volt > port->pps_data.max_volt) { ret = -EINVAL; goto port_unlock; } - target_mw = (port->pps_data.op_curr * out_volt) / 1000; + target_mw = (port->current_limit * req_out_volt) / 1000; if (target_mw < port->operating_snk_mw) { ret = -EINVAL; goto port_unlock; } /* Round down output voltage to align with PPS valid steps */ - out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP); + req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP); reinit_completion(&port->pps_complete); - port->pps_data.out_volt = out_volt; + port->pps_data.req_out_volt = req_out_volt; port->pps_status = 0; port->pps_pending = true; tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0); @@ -4278,8 +4328,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate) /* Trigger PPS request or move back to standard PDO contract */ if (activate) { - port->pps_data.out_volt = port->supply_voltage; - port->pps_data.op_curr = port->current_limit; + port->pps_data.req_out_volt = port->supply_voltage; + port->pps_data.req_op_curr = port->current_limit; tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0); } else { tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0); @@ -4639,7 +4689,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy, ret = -EINVAL; break; } - + power_supply_changed(port->psy); return ret; } @@ -4790,6 +4840,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) err = devm_tcpm_psy_register(port); if (err) goto out_role_sw_put; + power_supply_changed(port->psy); port->typec_port = typec_register_port(port->dev, &port->typec_caps); if (IS_ERR(port->typec_port)) { diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index ba288b964dc8..4d56408ac623 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -246,14 +246,18 @@ void ucsi_altmode_update_active(struct ucsi_connector *con) con->partner_altmode[i] == altmode); } -static u8 ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid) +static int ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid) { u8 mode = 1; int i; - for (i = 0; alt[i]; i++) + for (i = 0; alt[i]; i++) { + if (i > MODE_DISCOVERY_MAX) + return -ERANGE; + if (alt[i]->svid == svid) mode++; + } return mode; } @@ -288,8 +292,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con, goto err; } - desc->mode = ucsi_altmode_next_mode(con->port_altmode, - desc->svid); + ret = ucsi_altmode_next_mode(con->port_altmode, desc->svid); + if (ret < 0) + return ret; + + desc->mode = ret; switch (desc->svid) { case USB_TYPEC_DP_SID: @@ -315,8 +322,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con, goto err; } - desc->mode = ucsi_altmode_next_mode(con->partner_altmode, - desc->svid); + ret = ucsi_altmode_next_mode(con->partner_altmode, desc->svid); + if (ret < 0) + return ret; + + desc->mode = ret; alt = typec_partner_register_altmode(con->partner, desc); if (IS_ERR(alt)) { diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index a18112a83fae..dda8bd39c918 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -64,11 +64,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) static int ucsi_acpi_probe(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct ucsi_acpi *ua; struct resource *res; acpi_status status; int ret; + if (adev->dep_unmet) + return -EPROBE_DEFER; + ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL); if (!ua) return -ENOMEM; diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 2305d425e6c9..d8d3892e5a69 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a int sockfd = 0; struct socket *socket; int rv; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; if (!sdev) { dev_err(dev, "sdev is null\n"); @@ -61,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a dev_info(dev, "stub up\n"); + mutex_lock(&sdev->ud.sysfs_lock); spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_AVAILABLE) { @@ -69,23 +72,49 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a } socket = sockfd_lookup(sockfd, &err); - if (!socket) + if (!socket) { + dev_err(dev, "failed to lookup sock"); goto err; + } + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + goto sock_err; + } + + /* unlock and create threads and get tasks */ + spin_unlock_irq(&sdev->ud.lock); + tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + goto unlock_mutex; + } + tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + goto unlock_mutex; + } + + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + + /* lock and update sdev->ud state */ + spin_lock_irq(&sdev->ud.lock); sdev->ud.tcp_socket = socket; sdev->ud.sockfd = sockfd; - - spin_unlock_irq(&sdev->ud.lock); - - sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, - "stub_rx"); - sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, - "stub_tx"); - - spin_lock_irq(&sdev->ud.lock); + sdev->ud.tcp_rx = tcp_rx; + sdev->ud.tcp_tx = tcp_tx; sdev->ud.status = SDEV_ST_USED; spin_unlock_irq(&sdev->ud.lock); + wake_up_process(sdev->ud.tcp_rx); + wake_up_process(sdev->ud.tcp_tx); + + mutex_unlock(&sdev->ud.sysfs_lock); + } else { dev_info(dev, "stub down\n"); @@ -96,12 +125,17 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a spin_unlock_irq(&sdev->ud.lock); usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); + mutex_unlock(&sdev->ud.sysfs_lock); } return count; +sock_err: + sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); +unlock_mutex: + mutex_unlock(&sdev->ud.sysfs_lock); return -EINVAL; } static DEVICE_ATTR_WO(usbip_sockfd); @@ -242,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) sdev->ud.side = USBIP_STUB; sdev->ud.status = SDEV_ST_AVAILABLE; spin_lock_init(&sdev->ud.lock); + mutex_init(&sdev->ud.sysfs_lock); sdev->ud.tcp_socket = NULL; sdev->ud.sockfd = -1; diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 8be857a4fa13..a7e6ce96f62c 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -263,6 +263,9 @@ struct usbip_device { /* lock for status */ spinlock_t lock; + /* mutex for synchronizing sysfs store paths */ + struct mutex sysfs_lock; + int sockfd; struct socket *tcp_socket; diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 5d88917c9631..086ca76dd053 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c @@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work) while ((ud = get_event()) != NULL) { usbip_dbg_eh("pending event %lx\n", ud->event); + mutex_lock(&ud->sysfs_lock); /* * NOTE: shutdown must come first. * Shutdown the device. @@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work) ud->eh_ops.unusable(ud); unset_event(ud, USBIP_EH_UNUSABLE); } + mutex_unlock(&ud->sysfs_lock); wake_up(&ud->eh_waitq); } diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 65850e9c7190..98636fbf7188 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, default: usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n", wValue); + if (wValue >= 32) + goto error; vhci_hcd->port_status[rhport] &= ~(1 << wValue); break; } @@ -593,6 +595,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, pr_err("invalid port number %d\n", wIndex); goto error; } + if (wValue >= 32) + goto error; if (hcd->speed == HCD_USB3) { if ((vhci_hcd->port_status[rhport] & USB_SS_PORT_STAT_POWER) != 0) { @@ -1092,6 +1096,7 @@ static void vhci_device_init(struct vhci_device *vdev) vdev->ud.side = USBIP_VHCI; vdev->ud.status = VDEV_ST_NULL; spin_lock_init(&vdev->ud.lock); + mutex_init(&vdev->ud.sysfs_lock); INIT_LIST_HEAD(&vdev->priv_rx); INIT_LIST_HEAD(&vdev->priv_tx); diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index be37aec250c2..ebc7be1d9820 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) usbip_dbg_vhci_sysfs("enter\n"); + mutex_lock(&vdev->ud.sysfs_lock); + /* lock */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); @@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); + mutex_unlock(&vdev->ud.sysfs_lock); return -EINVAL; } @@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); + mutex_unlock(&vdev->ud.sysfs_lock); + return 0; } @@ -312,6 +317,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, struct vhci *vhci; int err; unsigned long flags; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; /* * @rhport: port number of vhci_hcd @@ -347,14 +354,43 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, else vdev = &vhci->vhci_hcd_hs->vdev[rhport]; + mutex_lock(&vdev->ud.sysfs_lock); + /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); - if (!socket) - return -EINVAL; + if (!socket) { + dev_err(dev, "failed to lookup sock"); + err = -EINVAL; + goto unlock_mutex; + } + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + sockfd_put(socket); + err = -EINVAL; + goto unlock_mutex; + } - /* now need lock until setting vdev status as used */ + /* create threads before locking */ + tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + err = -EINVAL; + goto unlock_mutex; + } + tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + err = -EINVAL; + goto unlock_mutex; + } - /* begin a lock */ + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + + /* now begin lock until setting vdev status set */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); @@ -364,13 +400,16 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, spin_unlock_irqrestore(&vhci->lock, flags); sockfd_put(socket); + kthread_stop_put(tcp_rx); + kthread_stop_put(tcp_tx); dev_err(dev, "port %d already used\n", rhport); /* * Will be retried from userspace * if there's another free port. */ - return -EBUSY; + err = -EBUSY; + goto unlock_mutex; } dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n", @@ -382,18 +421,28 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, vdev->speed = speed; vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; + vdev->ud.tcp_rx = tcp_rx; + vdev->ud.tcp_tx = tcp_tx; vdev->ud.status = VDEV_ST_NOTASSIGNED; spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); /* end the lock */ - vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); - vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); + wake_up_process(vdev->ud.tcp_rx); + wake_up_process(vdev->ud.tcp_tx); rh_port_connect(vdev, speed); + dev_info(dev, "Device attached\n"); + + mutex_unlock(&vdev->ud.sysfs_lock); + return count; + +unlock_mutex: + mutex_unlock(&vdev->ud.sysfs_lock); + return err; } static DEVICE_ATTR_WO(attach); diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c index c8eeabdd9b56..2bc428f2e261 100644 --- a/drivers/usb/usbip/vudc_dev.c +++ b/drivers/usb/usbip/vudc_dev.c @@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc) init_waitqueue_head(&udc->tx_waitq); spin_lock_init(&ud->lock); + mutex_init(&ud->sysfs_lock); ud->status = SDEV_ST_AVAILABLE; ud->side = USBIP_VUDC; diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 100f680c572a..d1cf6b51bf85 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -90,8 +90,9 @@ unlock: } static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor)); -static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr, - const char *in, size_t count) +static ssize_t usbip_sockfd_store(struct device *dev, + struct device_attribute *attr, + const char *in, size_t count) { struct vudc *udc = (struct vudc *) dev_get_drvdata(dev); int rv; @@ -100,6 +101,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a struct socket *socket; unsigned long flags; int ret; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; rv = kstrtoint(in, 0, &sockfd); if (rv != 0) @@ -109,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a dev_err(dev, "no device"); return -ENODEV; } + mutex_lock(&udc->ud.sysfs_lock); spin_lock_irqsave(&udc->lock, flags); /* Don't export what we don't have */ if (!udc->driver || !udc->pullup) { @@ -138,24 +142,58 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a goto unlock_ud; } - udc->ud.tcp_socket = socket; + if (socket->type != SOCK_STREAM) { + dev_err(dev, "Expecting SOCK_STREAM - found %d", + socket->type); + ret = -EINVAL; + goto sock_err; + } + /* unlock and create threads and get tasks */ spin_unlock_irq(&udc->ud.lock); spin_unlock_irqrestore(&udc->lock, flags); - udc->ud.tcp_rx = kthread_get_run(&v_rx_loop, - &udc->ud, "vudc_rx"); - udc->ud.tcp_tx = kthread_get_run(&v_tx_loop, - &udc->ud, "vudc_tx"); + tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + mutex_unlock(&udc->ud.sysfs_lock); + return -EINVAL; + } + tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + mutex_unlock(&udc->ud.sysfs_lock); + return -EINVAL; + } + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + + /* lock and update udc->ud state */ spin_lock_irqsave(&udc->lock, flags); spin_lock_irq(&udc->ud.lock); + + udc->ud.tcp_socket = socket; + udc->ud.tcp_rx = tcp_rx; + udc->ud.tcp_tx = tcp_tx; udc->ud.status = SDEV_ST_USED; + spin_unlock_irq(&udc->ud.lock); ktime_get_ts64(&udc->start_time); v_start_timer(udc); udc->connected = 1; + + spin_unlock_irqrestore(&udc->lock, flags); + + wake_up_process(udc->ud.tcp_rx); + wake_up_process(udc->ud.tcp_tx); + + mutex_unlock(&udc->ud.sysfs_lock); + return count; + } else { if (!udc->connected) { dev_err(dev, "Device not connected"); @@ -174,13 +212,17 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a } spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock); return count; +sock_err: + sockfd_put(socket); unlock_ud: spin_unlock_irq(&udc->ud.lock); unlock: spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock); return ret; } diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index fd17db9b432f..503ed2f3fbb5 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -21,8 +21,8 @@ config VFIO_VIRQFD menuconfig VFIO tristate "VFIO Non-Privileged userspace driver framework" - depends on IOMMU_API - select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64) + select IOMMU_API + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) help VFIO provides a framework for secure userspace device drivers. See Documentation/driver-api/vfio.rst for more details. diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 7570c7602ab4..ca9476883f15 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -105,12 +105,13 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, return ERR_PTR(-ENOMEM); type->kobj.kset = parent->mdev_types_kset; + type->parent = parent; ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL, "%s-%s", dev_driver_string(parent->dev), group->name); if (ret) { - kfree(type); + kobject_put(&type->kobj); return ERR_PTR(ret); } @@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, } type->group = group; - type->parent = parent; return type; attrs_failed: diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index ac3c1dd3edef..4abddbebd4b2 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -42,6 +42,6 @@ config VFIO_PCI_IGD config VFIO_PCI_NVLINK2 def_bool y - depends on VFIO_PCI && PPC_POWERNV + depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU help VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 02206162eaa9..a603f363835c 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -27,6 +27,7 @@ #include <linux/vfio.h> #include <linux/vgaarb.h> #include <linux/nospec.h> +#include <linux/sched/mm.h> #include "vfio_pci_private.h" @@ -113,8 +114,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) int bar; struct vfio_pci_dummy_resource *dummy_res; - INIT_LIST_HEAD(&vdev->dummy_resources_list); - for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { res = vdev->pdev->resource + bar; @@ -177,6 +176,7 @@ no_mmap: static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); /* * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND @@ -332,7 +332,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) pdev->vendor == PCI_VENDOR_ID_INTEL && IS_ENABLED(CONFIG_VFIO_PCI_IGD)) { ret = vfio_pci_igd_init(vdev); - if (ret) { + if (ret && ret != -ENODEV) { pci_warn(pdev, "Failed to setup Intel IGD regions\n"); goto disable_exit; } @@ -472,6 +472,19 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); + if (vdev->err_trigger) { + eventfd_ctx_put(vdev->err_trigger); + vdev->err_trigger = NULL; + } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); + if (vdev->req_trigger) { + eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } + mutex_unlock(&vdev->igate); } mutex_unlock(&vdev->reflck->lock); @@ -688,6 +701,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, return 0; } +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + static long vfio_pci_ioctl(void *device_data, unsigned int cmd, unsigned long arg) { @@ -761,7 +780,7 @@ static long vfio_pci_ioctl(void *device_data, { void __iomem *io; size_t size; - u16 orig_cmd; + u16 cmd; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; @@ -781,10 +800,7 @@ static long vfio_pci_ioctl(void *device_data, * Is it really there? Enable memory decode for * implicit access in pci_map_rom(). */ - pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd); - pci_write_config_word(pdev, PCI_COMMAND, - orig_cmd | PCI_COMMAND_MEMORY); - + cmd = vfio_pci_memory_lock_and_enable(vdev); io = pci_map_rom(pdev, &size); if (io) { info.flags = VFIO_REGION_INFO_FLAG_READ; @@ -792,8 +808,8 @@ static long vfio_pci_ioctl(void *device_data, } else { info.size = 0; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); - pci_write_config_word(pdev, PCI_COMMAND, orig_cmd); break; } case VFIO_PCI_VGA_REGION_INDEX: @@ -936,8 +952,16 @@ static long vfio_pci_ioctl(void *device_data, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - return vdev->reset_works ? - pci_try_reset_function(vdev->pdev) : -EINVAL; + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { struct vfio_pci_hot_reset_info hdr; @@ -1017,8 +1041,9 @@ reset_info_exit: int32_t *group_fds; struct vfio_pci_group_entry *groups; struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; bool slot = false; - int i, count = 0, ret = 0; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; minsz = offsetofend(struct vfio_pci_hot_reset, count); @@ -1070,9 +1095,9 @@ reset_info_exit: * user interface and store the group and iommu ID. This * ensures the group is held across the reset. */ - for (i = 0; i < hdr.count; i++) { + for (group_idx = 0; group_idx < hdr.count; group_idx++) { struct vfio_group *group; - struct fd f = fdget(group_fds[i]); + struct fd f = fdget(group_fds[group_idx]); if (!f.file) { ret = -EBADF; break; @@ -1085,8 +1110,9 @@ reset_info_exit: break; } - groups[i].group = group; - groups[i].id = vfio_external_user_iommu_id(group); + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); } kfree(group_fds); @@ -1105,13 +1131,63 @@ reset_info_exit: ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_validate_devs, &info, slot); - if (!ret) - /* User has access, do the reset */ - ret = pci_reset_bus(vdev->pdev); + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = pci_reset_bus(vdev->pdev); hot_reset_release: - for (i--; i >= 0; i--) - vfio_group_put_external_user(groups[i].group); + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); kfree(groups); return ret; @@ -1192,6 +1268,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &vdev->vma_list); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vfio_pci_device *vdev = vma->vm_private_data; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + mutex_unlock(&vdev->vma_lock); + + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + ret = VM_FAULT_SIGBUS; + +up_out: + up_read(&vdev->memory_lock); + return ret; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) { struct vfio_pci_device *vdev = device_data; @@ -1202,6 +1474,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; if (vma->vm_end < vma->vm_start) return -EINVAL; if ((vma->vm_flags & VM_SHARED) == 0) @@ -1210,7 +1484,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) int regnum = index - VFIO_PCI_NUM_REGIONS; struct vfio_pci_region *region = vdev->region + regnum; - if (region && region->ops && region->ops->mmap && + if (region->ops && region->ops->mmap && (region->flags & VFIO_REGION_INFO_FLAG_MMAP)) return region->ops->mmap(vdev, region, vma); return -EINVAL; @@ -1250,8 +1524,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - req_len, vma->vm_page_prot); + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; } static void vfio_pci_request(void *device_data, unsigned int count) @@ -1326,7 +1606,11 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&vdev->igate); spin_lock_init(&vdev->irqlock); mutex_init(&vdev->ioeventfds_lock); + INIT_LIST_HEAD(&vdev->dummy_resources_list); INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { @@ -1516,12 +1800,6 @@ static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck) kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock); } -struct vfio_devices { - struct vfio_device **devices; - int cur_index; - int max_index; -}; - static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data) { struct vfio_devices *devs = data; @@ -1552,6 +1830,39 @@ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data) return 0; } +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + /* * If a bus or slot reset is available for the provided device and: * - All of the devices affected by that bus or slot reset are unused diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index f0891bd8444c..bf32997c557f 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -395,6 +395,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) *(__le32 *)(&p->write[off]) = cpu_to_le32(write); } +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY); +} + /* * Restore the *real* BARs after we detect a FLR or backdoor reset. * (backdoor = some device specific technique that we didn't catch) @@ -504,8 +518,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos, count = vfio_default_config_read(vdev, pos, count, perm, offset, val); - /* Mask in virtual memory enable for SR-IOV devices */ - if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) { + /* Mask in virtual memory enable */ + if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) { u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); u32 tmp_val = le32_to_cpu(*val); @@ -554,13 +568,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, new_cmd = le32_to_cpu(val); + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); - phys_io = !!(phys_cmd & PCI_COMMAND_IO); - virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); - new_io = !!(new_cmd & PCI_COMMAND_IO); + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); /* * If the user is writing mem/io enable (new_mem/io) and we @@ -568,17 +587,22 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, * shows it disabled (phys_mem/io, then the device has * undergone some kind of backdoor reset and needs to be * restored before we allow it to enable the bars. - * SR-IOV devices will trigger this, but we catch them later + * SR-IOV devices will trigger this - for mem enable let's + * catch this now and for io enable it will be caught later */ - if ((new_mem && virt_mem && !phys_mem) || + if ((new_mem && virt_mem && !phys_mem && + !pdev->no_command_memory) || (new_io && virt_io && !phys_io) || vfio_need_bar_restore(vdev)) vfio_bar_restore(vdev); } count = vfio_default_config_write(vdev, pos, count, perm, offset, val); - if (count < 0) + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); return count; + } /* * Save current memory/io enable bits in vconfig to allow for @@ -589,6 +613,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, *virt_cmd &= cpu_to_le16(~mask); *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); } /* Emulate INTx disable */ @@ -826,8 +852,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_EXP_DEVCAP, &cap); - if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } /* @@ -905,8 +934,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_AF_CAP, &cap); - if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } return count; @@ -1460,7 +1492,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev) if (ret) return ret; - if (cap <= PCI_CAP_ID_MAX) { + /* + * ID 0 is a NULL capability, conflicting with our fake + * PCI_CAP_ID_BASIC. As it has no content, consider it + * hidden for now. + */ + if (cap && cap <= PCI_CAP_ID_MAX) { len = pci_cap_length[cap]; if (len == 0xFF) { /* Variable length */ len = vfio_cap_len(vdev, cap, pos); @@ -1698,6 +1735,17 @@ int vfio_config_init(struct vfio_pci_device *vdev) vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ } + if (pdev->no_command_memory) { + /* + * VFs and devices that set pdev->no_command_memory do not + * implement the memory enable bit of the COMMAND register + * therefore we'll not have it set in our initial copy of + * config space after pci_enable_device(). For consistency + * with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); + } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) vconfig[PCI_INTERRUPT_PIN] = 0; @@ -1726,8 +1774,11 @@ void vfio_config_free(struct vfio_pci_device *vdev) vdev->vconfig = NULL; kfree(vdev->pci_config_map); vdev->pci_config_map = NULL; - kfree(vdev->msi_perm); - vdev->msi_perm = NULL; + if (vdev->msi_perm) { + free_perm_bits(vdev->msi_perm); + kfree(vdev->msi_perm); + vdev->msi_perm = NULL; + } } /* diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 2056f3f85f59..869dce5f134d 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -249,6 +249,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; + u16 cmd; if (!is_irq_none(vdev)) return -EINVAL; @@ -258,13 +259,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); kfree(vdev->ctx); return ret; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : @@ -287,6 +291,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; + u16 cmd; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; @@ -295,7 +300,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, if (vdev->ctx[vector].trigger) { irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; @@ -323,6 +332,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ + cmd = vfio_pci_memory_lock_and_enable(vdev); if (msix) { struct msi_msg msg; @@ -332,6 +342,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); @@ -341,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, vdev->ctx[vector].producer.token = trigger; vdev->ctx[vector].producer.irq = irq; ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); - if (unlikely(ret)) + if (unlikely(ret)) { dev_info(&pdev->dev, "irq bypass producer (token %p) registration fails: %d\n", vdev->ctx[vector].producer.token, ret); + vdev->ctx[vector].producer.token = NULL; + } vdev->ctx[vector].trigger = trigger; return 0; @@ -376,6 +389,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; + u16 cmd; for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); @@ -384,7 +398,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + cmd = vfio_pci_memory_lock_and_enable(vdev); pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); /* * Both disable paths above use pci_intx_for_msi() to clear DisINTx diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c index 3f5f8198a6bb..08f17839c2fe 100644 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c @@ -231,7 +231,7 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev) return -EINVAL; if (of_property_read_u32(npu_node, "memory-region", &mem_phandle)) - return -EINVAL; + return -ENODEV; mem_node = of_find_node_by_phandle(mem_phandle); if (!mem_node) @@ -393,7 +393,7 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) int ret; struct vfio_pci_npu2_data *data; struct device_node *nvlink_dn; - u32 nvlink_index = 0; + u32 nvlink_index = 0, mem_phandle = 0; struct pci_dev *npdev = vdev->pdev; struct device_node *npu_node = pci_device_to_OF_node(npdev); struct pci_controller *hose = pci_bus_to_host(npdev->bus); @@ -408,6 +408,9 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) if (!pnv_pci_get_gpu_dev(vdev->pdev)) return -ENODEV; + if (of_property_read_u32(npu_node, "memory-region", &mem_phandle)) + return -ENODEV; + /* * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links * so we can allocate one register per link, using nvlink index as diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index ee6ee91718a4..987b4d311fde 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -84,6 +84,11 @@ struct vfio_pci_reflck { struct mutex lock; }; +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; @@ -122,6 +127,9 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) @@ -164,6 +172,13 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state); +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + #ifdef CONFIG_VFIO_PCI_IGD extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); #else diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 0120d8324a40..83f81d24df78 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, size_t x_start = 0, x_end = 0; resource_size_t end; void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; if (pci_resource_start(pdev, bar)) @@ -177,6 +178,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -184,13 +193,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, * filling large ROM BARs much faster. */ io = pci_map_rom(pdev, &x_start); - if (!io) - return -ENOMEM; + if (!io) { + done = -ENOMEM; + goto out; + } x_end = end; } else { int ret = vfio_pci_setup_barmap(vdev, bar); - if (ret) - return ret; + if (ret) { + done = ret; + goto out; + } io = vdev->barmap[bar]; } @@ -207,6 +220,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); return done; } diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c index ae1a5eb98620..1e2769010089 100644 --- a/drivers/vfio/platform/vfio_platform.c +++ b/drivers/vfio/platform/vfio_platform.c @@ -44,7 +44,7 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i) { struct platform_device *pdev = (struct platform_device *) vdev->opaque; - return platform_get_irq(pdev, i); + return platform_get_irq_optional(pdev, i); } static int vfio_platform_probe(struct platform_device *pdev) diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index e8f2bdbe0542..152e5188183c 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data) ret = pm_runtime_get_sync(vdev->device); if (ret < 0) - goto err_pm; + goto err_rst; ret = vfio_platform_call_reset(vdev, &extra_dbg); if (ret && vdev->reset_required) { @@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data) err_rst: pm_runtime_put(vdev->device); -err_pm: vfio_platform_irq_cleanup(vdev); err_irq: vfio_platform_regions_cleanup(vdev); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index d864277ea16f..6b1e8cba1798 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -24,6 +24,7 @@ #include <linux/compat.h> #include <linux/device.h> #include <linux/fs.h> +#include <linux/highmem.h> #include <linux/iommu.h> #include <linux/module.h> #include <linux/mm.h> @@ -335,6 +336,42 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + pte_t *ptep; + spinlock_t *ptl; + int ret; + + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(NULL, mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); + if (ret) + return ret; + } + + if (write_fault && !pte_write(*ptep)) + ret = -EFAULT; + else + *pfn = pte_pfn(*ptep); + + pte_unmap_unlock(ptep, ptl); + return ret; +} + static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, int prot, unsigned long *pfn) { @@ -377,12 +414,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, vaddr = untagged_addr(vaddr); +retry: vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - if (is_invalid_reserved_pfn(*pfn)) - ret = 0; + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; } up_read(&mm->mmap_sem); @@ -593,7 +634,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, continue; } - remote_vaddr = dma->vaddr + iova - dma->iova; + remote_vaddr = dma->vaddr + (iova - dma->iova); ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], do_accounting); if (ret) @@ -601,7 +642,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); if (ret) { - vfio_unpin_page_external(dma, iova, do_accounting); + if (put_pfn(phys_pfn[i], dma->prot) && do_accounting) + vfio_lock_acct(dma, -1, true); goto pin_unwind; } } @@ -835,6 +877,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) { + WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)); vfio_unmap_unpin(iommu, dma, true); vfio_unlink_dma(iommu, dma); put_task_struct(dma->task); @@ -1187,13 +1230,16 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { - struct vfio_domain *d; + struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; /* Arbitrarily pick the first domain in the list for lookups */ - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1211,6 +1257,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys_addr_t p; dma_addr_t i; + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { @@ -1240,7 +1291,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; - return ret; + goto unwind; } phys = pfn << PAGE_SHIFT; @@ -1249,14 +1300,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); - if (ret) - return ret; + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } iova += size; } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma->iommu_mapped = true; } + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; } /* @@ -1882,23 +1986,6 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) } } -static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu) -{ - struct rb_node *n; - - n = rb_first(&iommu->dma_list); - for (; n; n = rb_next(n)) { - struct vfio_dma *dma; - - dma = rb_entry(n, struct vfio_dma, node); - - if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list))) - break; - } - /* mdev vendor driver must unregister notifier */ - WARN_ON(iommu->notifier.head); -} - /* * Called when a domain is removed in detach. It is possible that * the removed domain decided the iova aperture window. Modify the @@ -1996,10 +2083,10 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, kfree(group); if (list_empty(&iommu->external_domain->group_list)) { - vfio_sanity_check_pfn_list(iommu); - - if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) { + WARN_ON(iommu->notifier.head); vfio_iommu_unmap_unpin_all(iommu); + } kfree(iommu->external_domain); iommu->external_domain = NULL; @@ -2032,10 +2119,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, */ if (list_empty(&domain->group_list)) { if (list_is_singular(&iommu->domain_list)) { - if (!iommu->external_domain) + if (!iommu->external_domain) { + WARN_ON(iommu->notifier.head); vfio_iommu_unmap_unpin_all(iommu); - else + } else { vfio_iommu_unmap_unpin_reaccount(iommu); + } } iommu_domain_free(domain->domain); list_del(&domain->next); @@ -2109,7 +2198,6 @@ static void vfio_iommu_type1_release(void *iommu_data) if (iommu->external_domain) { vfio_release_domain(iommu->external_domain, true); - vfio_sanity_check_pfn_list(iommu); kfree(iommu->external_domain); } @@ -2211,6 +2299,24 @@ out_unlock: return ret; } +static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu, + struct vfio_info_cap *caps) +{ + struct vfio_iommu_type1_info_dma_avail cap_dma_avail; + int ret; + + mutex_lock(&iommu->lock); + cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL; + cap_dma_avail.header.version = 1; + + cap_dma_avail.avail = iommu->dma_avail; + + ret = vfio_info_add_capability(caps, &cap_dma_avail.header, + sizeof(cap_dma_avail)); + mutex_unlock(&iommu->lock); + return ret; +} + static long vfio_iommu_type1_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -2257,6 +2363,10 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, info.iova_pgsizes = vfio_pgsize_bitmap(iommu); ret = vfio_iommu_iova_build_caps(iommu, &caps); + + if (!ret) + ret = vfio_iommu_dma_avail_build_caps(iommu, &caps); + if (ret) return ret; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index b53b6528d6ce..d6f9f491e69f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -744,6 +744,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, xdp->data_hard_start = buf; xdp->data = buf + pad; xdp->data_end = xdp->data + len; + xdp_set_data_meta_invalid(xdp); hdr->buflen = buflen; --net->refcnt_bias; @@ -860,6 +861,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) size_t len, total_len = 0; int err; struct vhost_net_ubuf_ref *uninitialized_var(ubufs); + struct ubuf_info *ubuf; bool zcopy_used; int sent_pkts = 0; @@ -892,9 +894,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) /* use msg_control to pass vhost zerocopy ubuf info to skb */ if (zcopy_used) { - struct ubuf_info *ubuf; ubuf = nvq->ubuf_info + nvq->upend_idx; - vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; ubuf->callback = vhost_zerocopy_callback; @@ -924,7 +924,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { if (zcopy_used) { - vhost_net_ubuf_put(ubufs); + if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) + vhost_net_ubuf_put(ubufs); nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) % UIO_MAXIOV; } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index a9caf1bc3c3e..98c484149ac7 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -320,7 +320,7 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) return 1; } -static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) +static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) { struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); @@ -340,6 +340,16 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) target_free_tag(se_sess, se_cmd); } +static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) +{ + struct vhost_scsi_cmd *cmd = container_of(se_cmd, + struct vhost_scsi_cmd, tvc_se_cmd); + struct vhost_scsi *vs = cmd->tvc_vhost; + + llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); + vhost_work_queue(&vs->dev, &vs->vs_completion_work); +} + static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) { return 0; @@ -362,28 +372,15 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) return 0; } -static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd) -{ - struct vhost_scsi *vs = cmd->tvc_vhost; - - llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); - - vhost_work_queue(&vs->dev, &vs->vs_completion_work); -} - static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) { - struct vhost_scsi_cmd *cmd = container_of(se_cmd, - struct vhost_scsi_cmd, tvc_se_cmd); - vhost_scsi_complete_cmd(cmd); + transport_generic_free_cmd(se_cmd, 0); return 0; } static int vhost_scsi_queue_status(struct se_cmd *se_cmd) { - struct vhost_scsi_cmd *cmd = container_of(se_cmd, - struct vhost_scsi_cmd, tvc_se_cmd); - vhost_scsi_complete_cmd(cmd); + transport_generic_free_cmd(se_cmd, 0); return 0; } @@ -429,15 +426,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs, return evt; } -static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd) -{ - struct se_cmd *se_cmd = &cmd->tvc_se_cmd; - - /* TODO locking against target/backend threads? */ - transport_generic_free_cmd(se_cmd, 0); - -} - static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) { return target_put_sess_cmd(se_cmd); @@ -556,7 +544,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) } else pr_err("Faulted on virtio_scsi_cmd_resp\n"); - vhost_scsi_free_cmd(cmd); + vhost_scsi_release_cmd_res(se_cmd); } vq = -1; @@ -1088,7 +1076,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) &prot_iter, exp_data_len, &data_iter))) { vq_err(vq, "Failed to map iov to sgl\n"); - vhost_scsi_release_cmd(&cmd->tvc_se_cmd); + vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); goto err; } } @@ -1215,7 +1203,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } - switch (v_req.type) { + switch (vhost32_to_cpu(vq, v_req.type)) { case VIRTIO_SCSI_T_TMF: vc.req = &v_req.tmf; vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); @@ -2290,6 +2278,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { static const struct target_core_fabric_ops vhost_scsi_ops = { .module = THIS_MODULE, .fabric_name = "vhost", + .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS, .tpg_get_wwn = vhost_scsi_get_fabric_wwn, .tpg_get_tag = vhost_scsi_get_tpgt, .tpg_check_demo_mode = vhost_scsi_check_true, diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 36ca2cf419bf..a279ecacbf60 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -320,8 +320,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->kick = NULL; vq->call_ctx = NULL; vq->log_ctx = NULL; - vhost_reset_is_le(vq); vhost_disable_cross_endian(vq); + vhost_reset_is_le(vq); vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; @@ -1299,6 +1299,11 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, struct vring_used __user *used) { + /* If an IOTLB device is present, the vring addresses are + * GIOVAs. Access validation occurs at prefetch time. */ + if (vq->iotlb) + return true; + return access_ok(desc, vhost_get_desc_size(vq, num)) && access_ok(avail, vhost_get_avail_size(vq, num)) && access_ok(used, vhost_get_used_size(vq, num)); @@ -1394,10 +1399,6 @@ bool vhost_vq_access_ok(struct vhost_virtqueue *vq) if (!vq_log_access_ok(vq, vq->log_base)) return false; - /* Access validation occurs at prefetch time with IOTLB */ - if (vq->iotlb) - return true; - return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); @@ -1544,8 +1545,7 @@ static long vhost_vring_set_addr(struct vhost_dev *d, /* Also validate log access for used ring if enabled. */ if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && !log_access_ok(vq->log_base, a.log_guest_addr, - sizeof *vq->used + - vq->num * sizeof *vq->used->ring)) + vhost_get_used_size(vq, vq->num))) return -EINVAL; } diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index a0a2d74967ef..026a37ee4177 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -274,13 +274,14 @@ __vringh_iov(struct vringh *vrh, u16 i, desc_max = vrh->vring.num; up_next = -1; + /* You must want something! */ + if (WARN_ON(!riov && !wiov)) + return -EINVAL; + if (riov) riov->i = riov->used = 0; - else if (wiov) + if (wiov) wiov->i = wiov->used = 0; - else - /* You must want something! */ - BUG(); for (;;) { void *addr; diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 88a5aa6624b4..f21f5bfbb78d 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -181,14 +181,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); - added = true; - - /* Deliver to monitoring devices all correctly transmitted - * packets. + /* Deliver to monitoring devices all packets that we + * will transmit. */ virtio_transport_deliver_tap_pkt(pkt); + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); + added = true; + pkt->off += payload_len; total_len += payload_len; @@ -384,6 +384,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) return val < vq->num; } +static struct virtio_transport vhost_transport = { + .transport = { + .get_local_cid = vhost_transport_get_local_cid, + + .init = virtio_transport_do_socket_init, + .destruct = virtio_transport_destruct, + .release = virtio_transport_release, + .connect = virtio_transport_connect, + .shutdown = virtio_transport_shutdown, + .cancel_pkt = vhost_transport_cancel_pkt, + + .dgram_enqueue = virtio_transport_dgram_enqueue, + .dgram_dequeue = virtio_transport_dgram_dequeue, + .dgram_bind = virtio_transport_dgram_bind, + .dgram_allow = virtio_transport_dgram_allow, + + .stream_enqueue = virtio_transport_stream_enqueue, + .stream_dequeue = virtio_transport_stream_dequeue, + .stream_has_data = virtio_transport_stream_has_data, + .stream_has_space = virtio_transport_stream_has_space, + .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, + .stream_is_active = virtio_transport_stream_is_active, + .stream_allow = virtio_transport_stream_allow, + + .notify_poll_in = virtio_transport_notify_poll_in, + .notify_poll_out = virtio_transport_notify_poll_out, + .notify_recv_init = virtio_transport_notify_recv_init, + .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, + .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, + .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, + .notify_send_init = virtio_transport_notify_send_init, + .notify_send_pre_block = virtio_transport_notify_send_pre_block, + .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, + .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, + + .set_buffer_size = virtio_transport_set_buffer_size, + .set_min_buffer_size = virtio_transport_set_min_buffer_size, + .set_max_buffer_size = virtio_transport_set_max_buffer_size, + .get_buffer_size = virtio_transport_get_buffer_size, + .get_min_buffer_size = virtio_transport_get_min_buffer_size, + .get_max_buffer_size = virtio_transport_get_max_buffer_size, + }, + + .send_pkt = vhost_transport_send_pkt, +}; + static void vhost_vsock_handle_tx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, @@ -440,7 +486,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && le64_to_cpu(pkt->hdr.dst_cid) == vhost_transport_get_local_cid()) - virtio_transport_recv_pkt(pkt); + virtio_transport_recv_pkt(&vhost_transport, pkt); else virtio_transport_free_pkt(pkt); @@ -500,6 +546,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) mutex_unlock(&vq->mutex); } + /* Some packets may have been queued before the device was started, + * let's kick the send worker to send them. + */ + vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); + mutex_unlock(&vsock->dev.mutex); return 0; @@ -788,52 +839,6 @@ static struct miscdevice vhost_vsock_misc = { .fops = &vhost_vsock_fops, }; -static struct virtio_transport vhost_transport = { - .transport = { - .get_local_cid = vhost_transport_get_local_cid, - - .init = virtio_transport_do_socket_init, - .destruct = virtio_transport_destruct, - .release = virtio_transport_release, - .connect = virtio_transport_connect, - .shutdown = virtio_transport_shutdown, - .cancel_pkt = vhost_transport_cancel_pkt, - - .dgram_enqueue = virtio_transport_dgram_enqueue, - .dgram_dequeue = virtio_transport_dgram_dequeue, - .dgram_bind = virtio_transport_dgram_bind, - .dgram_allow = virtio_transport_dgram_allow, - - .stream_enqueue = virtio_transport_stream_enqueue, - .stream_dequeue = virtio_transport_stream_dequeue, - .stream_has_data = virtio_transport_stream_has_data, - .stream_has_space = virtio_transport_stream_has_space, - .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, - .stream_is_active = virtio_transport_stream_is_active, - .stream_allow = virtio_transport_stream_allow, - - .notify_poll_in = virtio_transport_notify_poll_in, - .notify_poll_out = virtio_transport_notify_poll_out, - .notify_recv_init = virtio_transport_notify_recv_init, - .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, - .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, - .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, - .notify_send_init = virtio_transport_notify_send_init, - .notify_send_pre_block = virtio_transport_notify_send_pre_block, - .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, - .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, - - .set_buffer_size = virtio_transport_set_buffer_size, - .set_min_buffer_size = virtio_transport_set_min_buffer_size, - .set_max_buffer_size = virtio_transport_set_max_buffer_size, - .get_buffer_size = virtio_transport_get_buffer_size, - .get_min_buffer_size = virtio_transport_get_min_buffer_size, - .get_max_buffer_size = virtio_transport_get_max_buffer_size, - }, - - .send_pkt = vhost_transport_send_pkt, -}; - static int __init vhost_vsock_init(void) { int ret; diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c index f68920131a4a..e94932c69f54 100644 --- a/drivers/video/backlight/lp855x_bl.c +++ b/drivers/video/backlight/lp855x_bl.c @@ -456,7 +456,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) ret = regulator_enable(lp->enable); if (ret < 0) { dev_err(lp->dev, "failed to enable vddio: %d\n", ret); - return ret; + goto disable_supply; } /* @@ -471,24 +471,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) ret = lp855x_configure(lp); if (ret) { dev_err(lp->dev, "device config err: %d", ret); - return ret; + goto disable_vddio; } ret = lp855x_backlight_register(lp); if (ret) { dev_err(lp->dev, "failed to register backlight. err: %d\n", ret); - return ret; + goto disable_vddio; } ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group); if (ret) { dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret); - return ret; + goto disable_vddio; } backlight_update_status(lp->bl); + return 0; + +disable_vddio: + if (lp->enable) + regulator_disable(lp->enable); +disable_supply: + if (lp->supply) + regulator_disable(lp->supply); + + return ret; } static int lp855x_remove(struct i2c_client *cl) @@ -497,6 +507,8 @@ static int lp855x_remove(struct i2c_client *cl) lp->bl->props.brightness = 0; backlight_update_status(lp->bl); + if (lp->enable) + regulator_disable(lp->enable); if (lp->supply) regulator_disable(lp->supply); sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group); diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c index 2355f00f5773..1f6301375fd3 100644 --- a/drivers/video/backlight/sky81452-backlight.c +++ b/drivers/video/backlight/sky81452-backlight.c @@ -196,6 +196,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt( num_entry); if (ret < 0) { dev_err(dev, "led-sources node is invalid.\n"); + of_node_put(np); return ERR_PTR(-EINVAL); } diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index c10e17fb9a9a..3b432a18b5ab 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -22,52 +22,6 @@ config VGA_CONSOLE Say Y. -config VGACON_SOFT_SCROLLBACK - bool "Enable Scrollback Buffer in System RAM" - depends on VGA_CONSOLE - default n - help - The scrollback buffer of the standard VGA console is located in - the VGA RAM. The size of this RAM is fixed and is quite small. - If you require a larger scrollback buffer, this can be placed in - System RAM which is dynamically allocated during initialization. - Placing the scrollback buffer in System RAM will slightly slow - down the console. - - If you want this feature, say 'Y' here and enter the amount of - RAM to allocate for this buffer. If unsure, say 'N'. - -config VGACON_SOFT_SCROLLBACK_SIZE - int "Scrollback Buffer Size (in KB)" - depends on VGACON_SOFT_SCROLLBACK - range 1 1024 - default "64" - help - Enter the amount of System RAM to allocate for scrollback - buffers of VGA consoles. Each 64KB will give you approximately - 16 80x25 screenfuls of scrollback buffer. - -config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT - bool "Persistent Scrollback History for each console by default" - depends on VGACON_SOFT_SCROLLBACK - default n - help - Say Y here if the scrollback history should persist by default when - switching between consoles. Otherwise, the scrollback history will be - flushed each time the console is switched. This feature can also be - enabled using the boot command line parameter - 'vgacon.scrollback_persistent=1'. - - This feature might break your tool of choice to flush the scrollback - buffer, e.g. clear(1) will work fine but Debian's clear_console(1) - will be broken, which might cause security issues. - You can use the escape sequence \e[3J instead if this feature is - activated. - - Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each - created tty device. - So if you use a RAM-constrained system, say N here. - config MDA_CONSOLE depends on !M68K && !PARISC && ISA tristate "MDA text console (dual-headed)" diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 00dddf6e08b0..f45de374f165 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -32,17 +32,14 @@ #include <linux/linux_logo.h> #include <linux/font.h> -#define FONT_DATA ((unsigned char *)font_vga_8x16.data) +#define NEWPORT_LEN 0x10000 -/* borrowed from fbcon.c */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FONT_EXTRA_WORDS 3 +#define FONT_DATA ((unsigned char *)font_vga_8x16.data) static unsigned char *font_data[MAX_NR_CONSOLES]; static struct newport_regs *npregs; +static unsigned long newport_addr; static int logo_active; static int topscan; @@ -520,6 +517,7 @@ static int newport_set_font(int unit, struct console_font *op) FNTSIZE(new_data) = size; FNTCHARCNT(new_data) = op->charcount; REFCOUNT(new_data) = 0; /* usage counter */ + FNTSUM(new_data) = 0; p = new_data; for (i = 0; i < op->charcount; i++) { @@ -702,7 +700,6 @@ const struct consw newport_con = { static int newport_probe(struct gio_device *dev, const struct gio_device_id *id) { - unsigned long newport_addr; int err; if (!dev->resource.start) @@ -712,7 +709,7 @@ static int newport_probe(struct gio_device *dev, return -EBUSY; /* we only support one Newport as console */ newport_addr = dev->resource.start + 0xF0000; - if (!request_mem_region(newport_addr, 0x10000, "Newport")) + if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport")) return -ENODEV; npregs = (struct newport_regs *)/* ioremap cannot fail */ @@ -720,6 +717,11 @@ static int newport_probe(struct gio_device *dev, console_lock(); err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1); console_unlock(); + + if (err) { + iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); + } return err; } @@ -727,6 +729,7 @@ static void newport_remove(struct gio_device *dev) { give_up_console(&newport_con); iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); } static struct gio_device_id newport_ids[] = { diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index bfaa9ec4bc1f..55507df335bd 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -165,210 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c) write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } -#ifdef CONFIG_VGACON_SOFT_SCROLLBACK -/* software scrollback */ -struct vgacon_scrollback_info { - void *data; - int tail; - int size; - int rows; - int cnt; - int cur; - int save; - int restore; -}; - -static struct vgacon_scrollback_info *vgacon_scrollback_cur; -static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES]; -static bool scrollback_persistent = \ - IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT); -module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000); -MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles"); - -static void vgacon_scrollback_reset(int vc_num, size_t reset_size) -{ - struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num]; - - if (scrollback->data && reset_size > 0) - memset(scrollback->data, 0, reset_size); - - scrollback->cnt = 0; - scrollback->tail = 0; - scrollback->cur = 0; -} - -static void vgacon_scrollback_init(int vc_num) -{ - int pitch = vga_video_num_columns * 2; - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - int rows = size / pitch; - void *data; - - data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, - GFP_NOWAIT); - - vgacon_scrollbacks[vc_num].data = data; - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - - vgacon_scrollback_cur->rows = rows - 1; - vgacon_scrollback_cur->size = rows * pitch; - - vgacon_scrollback_reset(vc_num, size); -} - -static void vgacon_scrollback_switch(int vc_num) -{ - if (!scrollback_persistent) - vc_num = 0; - - if (!vgacon_scrollbacks[vc_num].data) { - vgacon_scrollback_init(vc_num); - } else { - if (scrollback_persistent) { - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - } else { - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(vc_num, size); - } - } -} - -static void vgacon_scrollback_startup(void) -{ - vgacon_scrollback_cur = &vgacon_scrollbacks[0]; - vgacon_scrollback_init(0); -} - -static void vgacon_scrollback_update(struct vc_data *c, int t, int count) -{ - void *p; - - if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size || - c->vc_num != fg_console) - return; - - p = (void *) (c->vc_origin + t * c->vc_size_row); - - while (count--) { - scr_memcpyw(vgacon_scrollback_cur->data + - vgacon_scrollback_cur->tail, - p, c->vc_size_row); - - vgacon_scrollback_cur->cnt++; - p += c->vc_size_row; - vgacon_scrollback_cur->tail += c->vc_size_row; - - if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size) - vgacon_scrollback_cur->tail = 0; - - if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows) - vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows; - - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_restore_screen(struct vc_data *c) -{ - c->vc_origin = c->vc_visible_origin; - vgacon_scrollback_cur->save = 0; - - if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { - scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, - c->vc_screenbuf_size > vga_vram_size ? - vga_vram_size : c->vc_screenbuf_size); - vgacon_scrollback_cur->restore = 1; - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_scrolldelta(struct vc_data *c, int lines) -{ - int start, end, count, soff; - - if (!lines) { - vgacon_restore_screen(c); - return; - } - - if (!vgacon_scrollback_cur->data) - return; - - if (!vgacon_scrollback_cur->save) { - vgacon_cursor(c, CM_ERASE); - vgacon_save_screen(c); - c->vc_origin = (unsigned long)c->vc_screenbuf; - vgacon_scrollback_cur->save = 1; - } - - vgacon_scrollback_cur->restore = 0; - start = vgacon_scrollback_cur->cur + lines; - end = start + abs(lines); - - if (start < 0) - start = 0; - - if (start > vgacon_scrollback_cur->cnt) - start = vgacon_scrollback_cur->cnt; - - if (end < 0) - end = 0; - - if (end > vgacon_scrollback_cur->cnt) - end = vgacon_scrollback_cur->cnt; - - vgacon_scrollback_cur->cur = start; - count = end - start; - soff = vgacon_scrollback_cur->tail - - ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); - soff -= count * c->vc_size_row; - - if (soff < 0) - soff += vgacon_scrollback_cur->size; - - count = vgacon_scrollback_cur->cnt - start; - - if (count > c->vc_rows) - count = c->vc_rows; - - if (count) { - int copysize; - - int diff = c->vc_rows - count; - void *d = (void *) c->vc_visible_origin; - void *s = (void *) c->vc_screenbuf; - - count *= c->vc_size_row; - /* how much memory to end of buffer left? */ - copysize = min(count, vgacon_scrollback_cur->size - soff); - scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize); - d += copysize; - count -= copysize; - - if (count) { - scr_memcpyw(d, vgacon_scrollback_cur->data, count); - d += count; - } - - if (diff) - scr_memcpyw(d, s, diff * c->vc_size_row); - } else - vgacon_cursor(c, CM_MOVE); -} - -static void vgacon_flush_scrollback(struct vc_data *c) -{ - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(c->vc_num, size); -} -#else -#define vgacon_scrollback_startup(...) do { } while (0) -#define vgacon_scrollback_init(...) do { } while (0) -#define vgacon_scrollback_update(...) do { } while (0) -#define vgacon_scrollback_switch(...) do { } while (0) - static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) @@ -382,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) vga_set_mem_top(c); } -static void vgacon_flush_scrollback(struct vc_data *c) -{ -} -#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ - static const char *vgacon_startup(void) { const char *display_desc = NULL; @@ -569,10 +360,7 @@ static const char *vgacon_startup(void) vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; - if (!vga_init_done) { - vgacon_scrollback_startup(); - vga_init_done = true; - } + vga_init_done = true; return display_desc; } @@ -863,7 +651,6 @@ static int vgacon_switch(struct vc_data *c) vgacon_doresize(c, c->vc_cols, c->vc_rows); } - vgacon_scrollback_switch(c->vc_num); return 0; /* Redrawing not needed */ } @@ -1380,7 +1167,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { - vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), @@ -1444,7 +1230,6 @@ const struct consw vga_con = { .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, - .con_flush_scrollback = vgacon_flush_scrollback, }; EXPORT_SYMBOL(vga_con); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 1e70e838530e..a7e5f12687b7 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1269,6 +1269,7 @@ config FB_ATY select FB_CFB_IMAGEBLIT select FB_BACKLIGHT if FB_ATY_BACKLIGHT select FB_MACMODES if PPC + select FB_ATY_CT if SPARC64 && PCI help This driver supports graphics boards with the ATI Mach64 chips. Say Y if you have such a graphics board. @@ -1279,7 +1280,6 @@ config FB_ATY config FB_ATY_CT bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support" depends on PCI && FB_ATY - default y if SPARC64 && PCI help Say Y here to support use of ATI's 64-bit Rage boards (or other boards based on the Mach64 CT, VT, GT, and LT chipsets) as a diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 5ff8e0320d95..cf2bfff2efbf 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -987,8 +987,8 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) } INIT_LIST_HEAD(&pdata->pwr_gpios); - ret = -ENOMEM; for (i = 0; i < gpiod_count(dev, "atmel,power-control"); i++) { + ret = -ENOMEM; gpiod = devm_gpiod_get_index(dev, "atmel,power-control", i, GPIOD_ASIS); if (IS_ERR(gpiod)) diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 4ca07866f2f6..5dda824d0da3 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -2323,7 +2323,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev, ret = radeon_kick_out_firmware_fb(pdev); if (ret) - return ret; + goto err_release_fb; /* request the mem regions */ ret = pci_request_region(pdev, 0, "radeonfb framebuffer"); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index ca935c09a261..436365efae73 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = info->var.xoffset + rs; region.dy = 0; region.width = rw; @@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset + bs; region.width = rs; @@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, } static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c index e5ae33c1a8e8..e8a17fb715ac 100644 --- a/drivers/video/fbdev/core/fbcmap.c +++ b/drivers/video/fbdev/core/fbcmap.c @@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) if (!len) return 0; - cmap->red = kmalloc(size, flags); + cmap->red = kzalloc(size, flags); if (!cmap->red) goto fail; - cmap->green = kmalloc(size, flags); + cmap->green = kzalloc(size, flags); if (!cmap->green) goto fail; - cmap->blue = kmalloc(size, flags); + cmap->blue = kzalloc(size, flags); if (!cmap->blue) goto fail; if (transp) { - cmap->transp = kmalloc(size, flags); + cmap->transp = kzalloc(size, flags); if (!cmap->transp) goto fail; } else { diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 22070cfea1d0..0a9202176ffc 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -122,12 +122,6 @@ static int logo_lines; /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO enums. */ static int logo_shown = FBCON_LOGO_CANSHOW; -/* Software scrollback */ -static int fbcon_softback_size = 32768; -static unsigned long softback_buf, softback_curr; -static unsigned long softback_in; -static unsigned long softback_top, softback_end; -static int softback_lines; /* console mappings */ static int first_fb_vc; static int last_fb_vc = MAX_NR_CONSOLES - 1; @@ -167,8 +161,6 @@ static int margin_color; static const struct consw fb_con; -#define CM_SOFTBACK (8) - #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) static int fbcon_set_origin(struct vc_data *); @@ -373,18 +365,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info, return color; } -static void fbcon_update_softback(struct vc_data *vc) -{ - int l = fbcon_softback_size / vc->vc_size_row; - - if (l > 5) - softback_end = softback_buf + l * vc->vc_size_row; - else - /* Smaller scrollback makes no sense, and 0 would screw - the operation totally */ - softback_top = 0; -} - static void fb_flashcursor(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, queue); @@ -414,7 +394,7 @@ static void fb_flashcursor(struct work_struct *work) c = scr_readw((u16 *) vc->vc_pos); mode = (!ops->cursor_flash || ops->cursor_state.enable) ? CM_ERASE : CM_DRAW; - ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); } @@ -471,13 +451,7 @@ static int __init fb_console_setup(char *this_opt) } if (!strncmp(options, "scrollback:", 11)) { - options += 11; - if (*options) { - fbcon_softback_size = simple_strtoul(options, &options, 0); - if (*options == 'k' || *options == 'K') { - fbcon_softback_size *= 1024; - } - } + pr_warn("Ignoring scrollback size option\n"); continue; } @@ -1016,31 +990,6 @@ static const char *fbcon_startup(void) set_blitting_type(vc, info); - if (info->fix.type != FB_TYPE_TEXT) { - if (fbcon_softback_size) { - if (!softback_buf) { - softback_buf = - (unsigned long) - kvmalloc(fbcon_softback_size, - GFP_KERNEL); - if (!softback_buf) { - fbcon_softback_size = 0; - softback_top = 0; - } - } - } else { - if (softback_buf) { - kvfree((void *) softback_buf); - softback_buf = 0; - softback_top = 0; - } - } - if (softback_buf) - softback_in = softback_top = softback_curr = - softback_buf; - softback_lines = 0; - } - /* Setup default font */ if (!p->fontdata && !vc->vc_font.data) { if (!fontname[0] || !(font = find_font(fontname))) @@ -1214,9 +1163,6 @@ static void fbcon_init(struct vc_data *vc, int init) if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); - if (vc == svc && softback_buf) - fbcon_update_softback(vc); - if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); @@ -1379,7 +1325,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; - int y; int c = scr_readw((u16 *) vc->vc_pos); ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); @@ -1393,16 +1338,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode) fbcon_add_cursor_timer(info); ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; - if (mode & CM_SOFTBACK) { - mode &= ~CM_SOFTBACK; - y = softback_lines; - } else { - if (softback_lines) - fbcon_set_origin(vc); - y = 0; - } - ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), + if (!ops->cursor) + return; + + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } @@ -1473,8 +1413,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, if (con_is_visible(vc)) { update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -1612,99 +1550,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) scrollback_current = 0; } -static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p, - long delta) -{ - int count = vc->vc_rows; - unsigned short *d, *s; - unsigned long n; - int line = 0; - - d = (u16 *) softback_curr; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - n = softback_curr + delta * vc->vc_size_row; - softback_lines -= delta; - if (delta < 0) { - if (softback_curr < softback_top && n < softback_buf) { - n += softback_end - softback_buf; - if (n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else if (softback_curr >= softback_top - && n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else { - if (softback_curr > softback_in && n >= softback_end) { - n += softback_buf - softback_end; - if (n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } else if (softback_curr <= softback_in && n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } - if (n == softback_curr) - return; - softback_curr = n; - s = (u16 *) softback_curr; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - while (count--) { - unsigned short *start; - unsigned short *le; - unsigned short c; - int x = 0; - unsigned short attr = 1; - - start = s; - le = advance_row(s, 1); - do { - c = scr_readw(s); - if (attr != (c & 0xff00)) { - attr = c & 0xff00; - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start; - start = s; - } - } - if (c == scr_readw(d)) { - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start + 1; - start = s + 1; - } else { - x++; - start++; - } - } - s++; - d++; - } while (s < le); - if (s > start) - fbcon_putcs(vc, start, s - start, line, x); - line++; - if (d == (u16 *) softback_end) - d = (u16 *) softback_buf; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - if (s == (u16 *) softback_end) - s = (u16 *) softback_buf; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - } -} - static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, int line, int count, int dy) { @@ -1844,31 +1689,6 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, } } -static inline void fbcon_softback_note(struct vc_data *vc, int t, - int count) -{ - unsigned short *p; - - if (vc->vc_num != fg_console) - return; - p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); - - while (count) { - scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); - count--; - p = advance_row(p, 1); - softback_in += vc->vc_size_row; - if (softback_in == softback_end) - softback_in = softback_buf; - if (softback_in == softback_top) { - softback_top += vc->vc_size_row; - if (softback_top == softback_end) - softback_top = softback_buf; - } - } - softback_curr = softback_in; -} - static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int count) { @@ -1891,8 +1711,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (softback_top) - fbcon_softback_note(vc, t, count); if (logo_shown >= 0) goto redraw_up; switch (p->scrollmode) { @@ -2185,6 +2003,9 @@ static void updatescrollmode(struct fbcon_display *p, } } +#define PITCH(w) (((w) + 7) >> 3) +#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */ + static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user) { @@ -2194,6 +2015,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; + if (p->userfont && FNTSIZE(vc->vc_font.data)) { + int size; + int pitch = PITCH(vc->vc_font.width); + + /* + * If user font, ensure that a possible change to user font + * height or width will not allow a font data out-of-bounds access. + * NOTE: must use original charcount in calculation as font + * charcount can change and cannot be used to determine the + * font data allocated size. + */ + if (pitch <= 0) + return -EINVAL; + size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data)); + if (size > FNTSIZE(vc->vc_font.data)) + return -EINVAL; + } + virt_w = FBCON_SWAP(ops->rotate, width, height); virt_h = FBCON_SWAP(ops->rotate, height, width); virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, @@ -2242,14 +2081,6 @@ static int fbcon_switch(struct vc_data *vc) info = registered_fb[con2fb_map[vc->vc_num]]; ops = info->fbcon_par; - if (softback_top) { - if (softback_lines) - fbcon_set_origin(vc); - softback_top = softback_curr = softback_in = softback_buf; - softback_lines = 0; - fbcon_update_softback(vc); - } - if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; @@ -2464,6 +2295,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) if (font->width <= 8) { j = vc->vc_font.height; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 32 - j); @@ -2472,6 +2306,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) } } else if (font->width <= 16) { j = vc->vc_font.height * 2; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 64 - j); @@ -2479,6 +2316,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) fontdata += j; } } else if (font->width <= 24) { + if (font->charcount * (vc->vc_font.height * sizeof(u32)) > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { for (j = 0; j < vc->vc_font.height; j++) { *data++ = fontdata[0]; @@ -2491,6 +2331,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) } } else { j = vc->vc_font.height * 4; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 128 - j); @@ -2572,9 +2415,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int cnt; char *old_data = NULL; - if (con_is_visible(vc) && softback_lines) - fbcon_set_origin(vc); - resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); if (p->userfont) old_data = vc->vc_font.data; @@ -2600,8 +2440,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, cols /= w; rows /= h; vc_resize(vc, cols, rows); - if (con_is_visible(vc) && softback_buf) - fbcon_update_softback(vc); } else if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); @@ -2645,7 +2483,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, int size; int i, csum; u8 *new_data, *data = font->data; - int pitch = (font->width+7) >> 3; + int pitch = PITCH(font->width); /* Is there a reason why fbconsole couldn't handle any charcount >256? * If not this check should be changed to charcount < 256 */ @@ -2661,7 +2499,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = h * pitch * charcount; + size = CALC_FONTSZ(h, pitch, charcount); new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); @@ -2760,19 +2598,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) { - unsigned long p; - int line; - - if (vc->vc_num != fg_console || !softback_lines) - return (u16 *) (vc->vc_origin + offset); - line = offset / vc->vc_size_row; - if (line >= softback_lines) - return (u16 *) (vc->vc_origin + offset - - softback_lines * vc->vc_size_row); - p = softback_curr + offset; - if (p >= softback_end) - p += softback_buf - softback_end; - return (u16 *) p; + return (u16 *) (vc->vc_origin + offset); } static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, @@ -2786,22 +2612,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, x = offset % vc->vc_cols; y = offset / vc->vc_cols; - if (vc->vc_num == fg_console) - y += softback_lines; ret = pos + (vc->vc_cols - x) * 2; - } else if (vc->vc_num == fg_console && softback_lines) { - unsigned long offset = pos - softback_curr; - - if (pos < softback_curr) - offset += softback_end - softback_buf; - offset /= 2; - x = offset % vc->vc_cols; - y = offset / vc->vc_cols; - ret = pos + (vc->vc_cols - x) * 2; - if (ret == softback_end) - ret = softback_buf; - if (ret == softback_in) - ret = vc->vc_origin; } else { /* Should not happen */ x = y = 0; @@ -2829,106 +2640,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); scr_writew(a, p++); - if (p == (u16 *) softback_end) - p = (u16 *) softback_buf; - if (p == (u16 *) softback_in) - p = (u16 *) vc->vc_origin; } } -static void fbcon_scrolldelta(struct vc_data *vc, int lines) -{ - struct fb_info *info = registered_fb[con2fb_map[fg_console]]; - struct fbcon_ops *ops = info->fbcon_par; - struct fbcon_display *disp = &fb_display[fg_console]; - int offset, limit, scrollback_old; - - if (softback_top) { - if (vc->vc_num != fg_console) - return; - if (vc->vc_mode != KD_TEXT || !lines) - return; - if (logo_shown >= 0) { - struct vc_data *conp2 = vc_cons[logo_shown].d; - - if (conp2->vc_top == logo_lines - && conp2->vc_bottom == conp2->vc_rows) - conp2->vc_top = 0; - if (logo_shown == vc->vc_num) { - unsigned long p, q; - int i; - - p = softback_in; - q = vc->vc_origin + - logo_lines * vc->vc_size_row; - for (i = 0; i < logo_lines; i++) { - if (p == softback_top) - break; - if (p == softback_buf) - p = softback_end; - p -= vc->vc_size_row; - q -= vc->vc_size_row; - scr_memcpyw((u16 *) q, (u16 *) p, - vc->vc_size_row); - } - softback_in = softback_curr = p; - update_region(vc, vc->vc_origin, - logo_lines * vc->vc_cols); - } - logo_shown = FBCON_LOGO_CANSHOW; - } - fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); - fbcon_redraw_softback(vc, disp, lines); - fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); - return; - } - - if (!scrollback_phys_max) - return; - - scrollback_old = scrollback_current; - scrollback_current -= lines; - if (scrollback_current < 0) - scrollback_current = 0; - else if (scrollback_current > scrollback_max) - scrollback_current = scrollback_max; - if (scrollback_current == scrollback_old) - return; - - if (fbcon_is_inactive(vc, info)) - return; - - fbcon_cursor(vc, CM_ERASE); - - offset = disp->yscroll - scrollback_current; - limit = disp->vrows; - switch (disp->scrollmode) { - case SCROLL_WRAP_MOVE: - info->var.vmode |= FB_VMODE_YWRAP; - break; - case SCROLL_PAN_MOVE: - case SCROLL_PAN_REDRAW: - limit -= vc->vc_rows; - info->var.vmode &= ~FB_VMODE_YWRAP; - break; - } - if (offset < 0) - offset += limit; - else if (offset >= limit) - offset -= limit; - - ops->var.xoffset = 0; - ops->var.yoffset = offset * vc->vc_font.height; - ops->update_start(info); - - if (!scrollback_current) - fbcon_cursor(vc, CM_DRAW); -} - static int fbcon_set_origin(struct vc_data *vc) { - if (softback_lines) - fbcon_scrolldelta(vc, softback_lines); return 0; } @@ -2992,8 +2708,6 @@ static void fbcon_modechanged(struct fb_info *info) fbcon_set_palette(vc, color_table); update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -3404,7 +3118,6 @@ static const struct consw fb_con = { .con_font_default = fbcon_set_def_font, .con_font_copy = fbcon_copy_font, .con_set_palette = fbcon_set_palette, - .con_scrolldelta = fbcon_scrolldelta, .con_set_origin = fbcon_set_origin, .con_invert_region = fbcon_invert_region, .con_screen_pos = fbcon_screen_pos, @@ -3639,9 +3352,6 @@ static void fbcon_exit(void) } #endif - kvfree((void *)softback_buf); - softback_buf = 0UL; - for_each_registered_fb(i) { int pending = 0; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 20dea853765f..9315b360c898 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -62,7 +62,7 @@ struct fbcon_ops { void (*clear_margins)(struct vc_data *vc, struct fb_info *info, int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg); + int fg, int bg); int (*update_start)(struct fb_info *info); int (*rotate_font)(struct fb_info *info, struct vc_data *vc); struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ @@ -152,13 +152,6 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) -/* Font */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FNTSUM(fd) (((int *)(fd))[-4]) -#define FONT_EXTRA_WORDS 4 - /* * Scroll Method */ diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index dfa9a8aa4509..71ad6967a70e 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset; region.height = rw; @@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset + bs; region.dy = 0; region.height = info->var.yres_virtual; @@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index ce08251bfd38..31fe5dd651d4 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; @@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; @@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c index c0d445294aa7..ac72d4f85f7d 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.c +++ b/drivers/video/fbdev/core/fbcon_rotate.c @@ -14,6 +14,7 @@ #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/console.h> +#include <linux/font.h> #include <asm/types.h> #include "fbcon.h" #include "fbcon_rotate.h" diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 1936afc78fec..b2dd1370e39b 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dy = 0; region.dx = info->var.xoffset; region.width = rw; @@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dy = info->var.yoffset; region.dx = info->var.xoffset; region.height = bh; @@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index e6a1c805064f..bf76dadbed87 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -662,20 +662,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate) fb_logo.depth = 1; - if (fb_logo.depth > 4 && depth > 4) { - switch (info->fix.visual) { - case FB_VISUAL_TRUECOLOR: - fb_logo.needs_truepalette = 1; - break; - case FB_VISUAL_DIRECTCOLOR: - fb_logo.needs_directpalette = 1; - fb_logo.needs_cmapreset = 1; - break; - case FB_VISUAL_PSEUDOCOLOR: - fb_logo.needs_cmapreset = 1; - break; - } - } + if (fb_logo.depth > 4 && depth > 4) { + switch (info->fix.visual) { + case FB_VISUAL_TRUECOLOR: + fb_logo.needs_truepalette = 1; + break; + case FB_VISUAL_DIRECTCOLOR: + fb_logo.needs_directpalette = 1; + fb_logo.needs_cmapreset = 1; + break; + case FB_VISUAL_PSEUDOCOLOR: + fb_logo.needs_cmapreset = 1; + break; + } + } height = fb_logo.logo->height; if (fb_center_logo) @@ -952,7 +952,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var, int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) { - int flags = info->flags; int ret = 0; u32 activate; struct fb_var_screeninfo old_var; @@ -1002,6 +1001,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) return 0; } + /* bitfill_aligned() assumes that it's at least 8x8 */ + if (var->xres < 8 || var->yres < 8) + return -EINVAL; + ret = info->fbops->fb_check_var(var, info); if (ret) @@ -1047,9 +1050,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) event.data = &mode; fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event); - if (flags & FBINFO_MISC_USEREVENT) - fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL); - return 0; } EXPORT_SYMBOL(fb_set_var); @@ -1060,19 +1060,19 @@ fb_blank(struct fb_info *info, int blank) struct fb_event event; int ret = -EINVAL; - if (blank > FB_BLANK_POWERDOWN) - blank = FB_BLANK_POWERDOWN; + if (blank > FB_BLANK_POWERDOWN) + blank = FB_BLANK_POWERDOWN; event.info = info; event.data = ␣ if (info->fbops->fb_blank) - ret = info->fbops->fb_blank(blank, info); + ret = info->fbops->fb_blank(blank, info); if (!ret) fb_notifier_call_chain(FB_EVENT_BLANK, &event); - return ret; + return ret; } EXPORT_SYMBOL(fb_blank); @@ -1100,9 +1100,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EFAULT; console_lock(); lock_fb_info(info); - info->flags |= FBINFO_MISC_USEREVENT; ret = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!ret) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); unlock_fb_info(info); console_unlock(); if (!ret && copy_to_user(argp, &var, sizeof(var))) @@ -1110,7 +1110,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, break; case FBIOGET_FSCREENINFO: lock_fb_info(info); - fix = info->fix; + memcpy(&fix, &info->fix, sizeof(fix)); if (info->flags & FBINFO_HIDE_SMEM_START) fix.smem_start = 0; unlock_fb_info(info); diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c index d54c88f88991..65dae05fff8e 100644 --- a/drivers/video/fbdev/core/fbsysfs.c +++ b/drivers/video/fbdev/core/fbsysfs.c @@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var) var->activate |= FB_ACTIVATE_FORCE; console_lock(); - fb_info->flags |= FBINFO_MISC_USEREVENT; err = fb_set_var(fb_info, var); - fb_info->flags &= ~FBINFO_MISC_USEREVENT; + if (!err) + fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL); console_unlock(); if (err) return err; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 93390312957f..adff8d6ffe6f 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -13,6 +13,7 @@ #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/console.h> +#include <linux/font.h> #include <asm/types.h> #include "fbcon.h" @@ -80,7 +81,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, } static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_tilecursor cursor; int use_sw = (vc->vc_cursor_type & 0x10); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 51d97ec4f58f..e0cbf5b3d217 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (efi_enabled(EFI_BOOT) && + if (efi_enabled(EFI_MEMMAP) && !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 2dcb7c58b31e..1c6ae98710a0 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -703,7 +703,10 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) goto err1; } - fb_virt = ioremap(par->mem->start, screen_fb_size); + /* + * Map the VRAM cacheable for performance. + */ + fb_virt = ioremap_wc(par->mem->start, screen_fb_size); if (!fb_virt) goto err2; diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index b770946a0920..76464000933d 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info) #else printk(KERN_ERR "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n"); + kfree(info->monspecs.modedb); return -1; #endif default: diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 376ee5bc3ddc..34e8171856e9 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -520,8 +520,11 @@ int dispc_runtime_get(void) DSSDBG("dispc_runtime_get\n"); r = pm_runtime_get_sync(&dispc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dispc.pdev->dev); + return r; + } + return 0; } EXPORT_SYMBOL(dispc_runtime_get); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index d620376216e1..6f9c25fec994 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -1137,8 +1137,11 @@ static int dsi_runtime_get(struct platform_device *dsidev) DSSDBG("dsi_runtime_get\n"); r = pm_runtime_get_sync(&dsi->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dsi->pdev->dev); + return r; + } + return 0; } static void dsi_runtime_put(struct platform_device *dsidev) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index 7252d22dd117..a6b1c1598040 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -768,8 +768,11 @@ int dss_runtime_get(void) DSSDBG("dss_runtime_get\n"); r = pm_runtime_get_sync(&dss.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dss.pdev->dev); + return r; + } + return 0; } void dss_runtime_put(void) @@ -833,7 +836,7 @@ static const struct dss_features omap34xx_dss_feats = { }; static const struct dss_features omap3630_dss_feats = { - .fck_div_max = 32, + .fck_div_max = 31, .dss_fck_multiplier = 1, .parent_clk_name = "dpll4_ck", .dpi_select_source = &dss_dpi_select_source_omap2_omap3, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 7060ae56c062..4804aab34298 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -39,9 +39,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index ac49531e4732..a06b6f1355bd 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -43,9 +43,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index f81e2a46366d..3717dac3dcc8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -391,8 +391,11 @@ static int venc_runtime_get(void) DSSDBG("venc_runtime_get\n"); r = pm_runtime_get_sync(&venc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&venc.pdev->dev); + return r; + } + return 0; } static void venc_runtime_put(void) diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index 5ed2db39d823..ce90483c5020 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -29,6 +29,7 @@ #include <linux/freezer.h> #include <linux/uaccess.h> #include <linux/fb.h> +#include <linux/fbcon.h> #include <linux/init.h> #include <asm/cell-regs.h> @@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd, var = info->var; fb_videomode_to_var(&var, vmode); console_lock(); - info->flags |= FBINFO_MISC_USEREVENT; /* Force, in case only special bits changed */ var.activate |= FB_ACTIVATE_FORCE; par->new_mode_id = val; retval = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!retval) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); console_unlock(); } break; diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index 0a3b2b7c7891..c916e9161443 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -1016,6 +1016,8 @@ static int __init pvr2fb_setup(char *options) if (!options || !*options) return 0; + cable_arg[0] = output_arg[0] = 0; + while ((this_opt = strsep(&options, ","))) { if (!*this_opt) continue; diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index f70c9f79622e..27635926cea3 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -2425,8 +2425,8 @@ static int pxafb_remove(struct platform_device *dev) free_pages_exact(fbi->video_mem, fbi->video_mem_size); - dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, - fbi->dma_buff_phys); + dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, + fbi->dma_buff_phys); return 0; } diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index 512789f5f884..d5d22d9c0f56 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c @@ -2158,6 +2158,8 @@ static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev, info->flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; + else + kfree(info->pixmap.addr); } #endif return err; diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c index dfe3eb769638..fde27feae5d0 100644 --- a/drivers/video/fbdev/sis/init.c +++ b/drivers/video/fbdev/sis/init.c @@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, i = 0; + if (SiS_Pr->ChipType == SIS_730) + queuedata = &FQBQData730[0]; + else + queuedata = &FQBQData[0]; + if(ModeNo > 0x13) { /* Get VCLK */ @@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, /* Get half colordepth */ colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)]; - if(SiS_Pr->ChipType == SIS_730) { - queuedata = &FQBQData730[0]; - } else { - queuedata = &FQBQData[0]; - } - do { templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth; diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 207d0add684b..246681414577 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1429,6 +1429,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb, static void smtc_unmap_smem(struct smtcfb_info *sfb) { if (sfb && sfb->fb->screen_base) { + if (sfb->chip_id == 0x720) + sfb->fb->screen_base -= 0x00200000; iounmap(sfb->fb->screen_base); sfb->fb->screen_base = NULL; } diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index fe373b63ddd6..ecbfbbf1c1a7 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1017,6 +1017,7 @@ static void dlfb_ops_destroy(struct fb_info *info) } vfree(dlfb->backing_buffer); kfree(dlfb->edid); + dlfb_free_urb_list(dlfb); usb_put_dev(dlfb->udev); kfree(dlfb); diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 2c6a576ed84c..3c4d20618de4 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info) } static void vga16fb_clock_chip(struct vga16fb_par *par, - unsigned int pixclock, + unsigned int *pixclock, const struct fb_info *info, int mul, int div) { @@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, { 0 /* bad */, 0x00, 0x00}}; int err; - pixclock = (pixclock * mul) / div; + *pixclock = (*pixclock * mul) / div; best = vgaclocks; - err = pixclock - best->pixclock; + err = *pixclock - best->pixclock; if (err < 0) err = -err; for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) { int tmp; - tmp = pixclock - ptr->pixclock; + tmp = *pixclock - ptr->pixclock; if (tmp < 0) tmp = -tmp; if (tmp < err) { err = tmp; @@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, } par->misc |= best->misc; par->clkdiv = best->seq_clock_mode; - pixclock = (best->pixclock * div) / mul; + *pixclock = (best->pixclock * div) / mul; } #define FAIL(X) return -EINVAL @@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var, if (mode & MODE_8BPP) /* pixel clock == vga clock / 2 */ - vga16fb_clock_chip(par, var->pixclock, info, 1, 2); + vga16fb_clock_chip(par, &var->pixclock, info, 1, 2); else /* pixel clock == vga clock */ - vga16fb_clock_chip(par, var->pixclock, info, 1, 1); + vga16fb_clock_chip(par, &var->pixclock, info, 1, 1); var->red.offset = var->green.offset = var->blue.offset = var->transp.offset = 0; @@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i char oldop = setop(0); char oldsr = setsr(0); char oldmask = selectmask(); - const char *cdat = image->data; + const unsigned char *cdat = image->data; u32 dx = image->dx; char __iomem *where; int y; diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c index be8d9702cbb2..4b84fd4483e1 100644 --- a/drivers/video/fbdev/vt8500lcdfb.c +++ b/drivers/video/fbdev/vt8500lcdfb.c @@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info) info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) for (i = 0; i < 256; i++) vt8500lcd_setcolreg(i, 0, 0, 0, 0, info); + fallthrough; case FB_BLANK_UNBLANK: if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index 3be07807edcd..e30f9427b335 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) memsize=par->mach->mem->size; memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); vfree(par->saved_extmem); + par->saved_extmem = NULL; } if (par->saved_intmem) { memsize=MEM_INT_SIZE; @@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) else memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); vfree(par->saved_intmem); + par->saved_intmem = NULL; } } diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c index 93d5bebf9572..fb292f9cf29d 100644 --- a/drivers/virt/fsl_hypervisor.c +++ b/drivers/virt/fsl_hypervisor.c @@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) unsigned int i; long ret = 0; - int num_pinned; /* return value from get_user_pages() */ + int num_pinned = 0; /* return value from get_user_pages_fast() */ phys_addr_t remote_paddr; /* The next address in the remote buffer */ uint32_t count; /* The number of bytes left to copy */ @@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) return -EINVAL; /* - * The array of pages returned by get_user_pages() covers only + * The array of pages returned by get_user_pages_fast() covers only * page-aligned memory. Since the user buffer is probably not * page-aligned, we need to handle the discrepancy. * @@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) /* * 'pages' is an array of struct page pointers that's initialized by - * get_user_pages(). + * get_user_pages_fast(). */ pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { @@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) if (!sg_list_unaligned) { pr_debug("fsl-hv: could not allocate S/G list\n"); ret = -ENOMEM; - goto exit; + goto free_pages; } sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); @@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) num_pages, param.source != -1 ? FOLL_WRITE : 0, pages); if (num_pinned != num_pages) { - /* get_user_pages() failed */ pr_debug("fsl-hv: could not lock source buffer\n"); ret = (num_pinned < 0) ? num_pinned : -EFAULT; goto exit; @@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) virt_to_phys(sg_list), num_pages); exit: - if (pages) { - for (i = 0; i < num_pages; i++) - if (pages[i]) - put_page(pages[i]); + if (pages && (num_pinned > 0)) { + for (i = 0; i < num_pinned; i++) + put_page(pages[i]); } kfree(sg_list_unaligned); +free_pages: kfree(pages); if (!ret) diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 2307b0329aec..95bfdb8ac8a2 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -1443,7 +1443,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, or_mask = caps->u.in.or_mask; not_mask = caps->u.in.not_mask; - if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) return -EINVAL; ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, @@ -1519,7 +1519,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG) + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT) return vbg_ioctl_vmmrequest(gdev, session, data); if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) @@ -1557,6 +1558,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) case VBG_IOCTL_HGCM_CALL(0): return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); case VBG_IOCTL_LOG(0): + case VBG_IOCTL_LOG_ALT(0): return vbg_ioctl_log(data); } diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 4188c12b839f..77c3a9c8255d 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -15,6 +15,21 @@ #include <linux/vboxguest.h> #include "vmmdev.h" +/* + * The mainline kernel version (this version) of the vboxguest module + * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and + * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead + * of _IO(V, ...) as the out of tree VirtualBox upstream version does. + * + * These _ALT definitions keep compatibility with the wrong defines the + * mainline kernel version used for a while. + * Note the VirtualBox userspace bits have always been built against + * VirtualBox upstream's headers, so this is likely not necessary. But + * we must never break our ABI so we keep these around to be 100% sure. + */ +#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) +#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) + struct vbg_session; /** VBox guest memory balloon. */ diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 6e8c0f1c1056..32c2c52f7e84 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG; + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT, diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 43c391626a00..bf2945c25ca8 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -466,7 +466,7 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call) * Cancellation fun. */ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, - u32 timeout_ms, bool *leak_it) + u32 timeout_ms, bool interruptible, bool *leak_it) { int rc, cancel_rc, ret; long timeout; @@ -493,10 +493,15 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, else timeout = msecs_to_jiffies(timeout_ms); - timeout = wait_event_interruptible_timeout( - gdev->hgcm_wq, - hgcm_req_done(gdev, &call->header), - timeout); + if (interruptible) { + timeout = wait_event_interruptible_timeout(gdev->hgcm_wq, + hgcm_req_done(gdev, &call->header), + timeout); + } else { + timeout = wait_event_timeout(gdev->hgcm_wq, + hgcm_req_done(gdev, &call->header), + timeout); + } /* timeout > 0 means hgcm_req_done has returned true, so success */ if (timeout > 0) @@ -629,7 +634,8 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id, hgcm_call_init_call(call, client_id, function, parms, parm_count, bounce_bufs); - ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it); + ret = vbg_hgcm_do_call(gdev, call, timeout_ms, + requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it); if (ret == 0) { *vbox_status = call->header.result; ret = hgcm_call_copy_back_result(call, parms, parm_count, diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h index 6337b8d75d96..21f408120e3f 100644 --- a/drivers/virt/vboxguest/vmmdev.h +++ b/drivers/virt/vboxguest/vmmdev.h @@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8); * not. */ #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) +/* The mask of valid capabilities, for sanity checking. */ +#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ struct vmmdev_hypervisorinfo { diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7aaf150f89ba..1e444826a66e 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -529,10 +529,14 @@ static int init_vqs(struct virtio_balloon *vb) static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) { if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, - &vb->config_read_bitmap)) + &vb->config_read_bitmap)) { virtio_cread(vb->vdev, struct virtio_balloon_config, free_page_report_cmd_id, &vb->cmd_id_received_cache); + /* Legacy balloon config space is LE, unlike all other devices. */ + if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) + vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache); + } return vb->cmd_id_received_cache; } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 58b96baa8d48..97e8a195e18f 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1608,7 +1608,6 @@ static struct virtqueue *vring_create_virtqueue_packed( vq->num_added = 0; vq->packed_ring = true; vq->use_dma_api = vring_use_dma_api(vdev); - list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; @@ -1669,6 +1668,7 @@ static struct virtqueue *vring_create_virtqueue_packed( cpu_to_le16(vq->packed.event_flags_shadow); } + list_add_tail(&vq->vq.list, &vdev->vqs); return &vq->vq; err_desc_extra: @@ -1676,9 +1676,9 @@ err_desc_extra: err_desc_state: kfree(vq); err_vq: - vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr); + vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr); err_device: - vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr); + vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr); err_driver: vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr); err_ring: @@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); + if (unlikely(vq->broken)) + return false; + virtio_mb(vq->weak_barriers); return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : virtqueue_poll_split(_vq, last_used_idx); @@ -2082,7 +2085,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, vq->last_used_idx = 0; vq->num_added = 0; vq->use_dma_api = vring_use_dma_api(vdev); - list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; @@ -2124,6 +2126,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, memset(vq->split.desc_state, 0, vring.num * sizeof(struct vring_desc_state_split)); + list_add_tail(&vq->vq.list, &vdev->vqs); return &vq->vq; } EXPORT_SYMBOL_GPL(__vring_new_virtqueue); diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c index 1ca880e01476..090cbbf9e1e2 100644 --- a/drivers/w1/masters/mxc_w1.c +++ b/drivers/w1/masters/mxc_w1.c @@ -7,7 +7,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> -#include <linux/jiffies.h> +#include <linux/ktime.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> @@ -40,12 +40,12 @@ struct mxc_w1_device { static u8 mxc_w1_ds2_reset_bus(void *data) { struct mxc_w1_device *dev = data; - unsigned long timeout; + ktime_t timeout; writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL); /* Wait for reset sequence 511+512us, use 1500us for sure */ - timeout = jiffies + usecs_to_jiffies(1500); + timeout = ktime_add_us(ktime_get(), 1500); udelay(511 + 512); @@ -55,7 +55,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data) /* PST bit is valid after the RPP bit is self-cleared */ if (!(ctrl & MXC_W1_CONTROL_RPP)) return !(ctrl & MXC_W1_CONTROL_PST); - } while (time_is_after_jiffies(timeout)); + } while (ktime_before(ktime_get(), timeout)); return 1; } @@ -68,12 +68,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data) static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) { struct mxc_w1_device *dev = data; - unsigned long timeout; + ktime_t timeout; writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL); /* Wait for read/write bit (60us, Max 120us), use 200us for sure */ - timeout = jiffies + usecs_to_jiffies(200); + timeout = ktime_add_us(ktime_get(), 200); udelay(60); @@ -83,7 +83,7 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) /* RDST bit is valid after the WR1/RD bit is self-cleared */ if (!(ctrl & MXC_W1_CONTROL_WR(bit))) return !!(ctrl & MXC_W1_CONTROL_RDST); - } while (time_is_after_jiffies(timeout)); + } while (ktime_before(ktime_get(), timeout)); return 0; } diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 4164045866b3..6bac5c18cf6d 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -176,7 +176,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) /* check irqstatus */ if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" - " TXCOMPLETE/RXCOMPLETE, %x", *status); + " TXCOMPLETE/RXCOMPLETE, %x\n", *status); ret = -ETIMEDOUT; goto out; } @@ -187,7 +187,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) { dev_dbg(hdq_data->dev, "timeout waiting GO bit" - " return to zero, %x", tmp_status); + " return to zero, %x\n", tmp_status); } out: @@ -203,7 +203,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq) spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); - dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); + dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus); if (hdq_data->hdq_irqstatus & (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE @@ -311,7 +311,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) tmp_status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { - dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", + dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n", tmp_status); ret = -ETIMEDOUT; goto out; @@ -338,7 +338,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" - " return to zero, %x", tmp_status); + " return to zero, %x\n", tmp_status); out: mutex_unlock(&hdq_data->hdq_mutex); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index e2745f686196..1aa42e879e63 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -374,6 +374,7 @@ config ARM_SBSA_WATCHDOG config ARMADA_37XX_WATCHDOG tristate "Armada 37xx watchdog" depends on ARCH_MVEBU || COMPILE_TEST + depends on HAS_IOMEM select MFD_SYSCON select WATCHDOG_CORE help @@ -617,7 +618,7 @@ config SUNXI_WATCHDOG config COH901327_WATCHDOG bool "ST-Ericsson COH 901 327 watchdog" - depends on ARCH_U300 || (ARM && COMPILE_TEST) + depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST) default y if MACH_U300 select WATCHDOG_CORE help @@ -774,6 +775,7 @@ config MOXART_WDT config SIRFSOC_WATCHDOG tristate "SiRFSOC watchdog" + depends on HAS_IOMEM depends on ARCH_SIRF || COMPILE_TEST select WATCHDOG_CORE default y diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c index e92f38fcb7a4..1b9bcfed39e9 100644 --- a/drivers/watchdog/da9062_wdt.c +++ b/drivers/watchdog/da9062_wdt.c @@ -55,11 +55,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt, unsigned int regval) { struct da9062 *chip = wdt->hw; - int ret; - - ret = da9062_reset_watchdog_timer(wdt); - if (ret) - return ret; regmap_update_bits(chip->regmap, DA9062AA_CONTROL_D, diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index e46104c2fd94..893cef70c159 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -689,9 +689,9 @@ static int __init watchdog_init(int sioaddr) * into the module have been registered yet. */ watchdog.sioaddr = sioaddr; - watchdog.ident.options = WDIOC_SETTIMEOUT - | WDIOF_MAGICCLOSE - | WDIOF_KEEPALIVEPING; + watchdog.ident.options = WDIOF_MAGICCLOSE + | WDIOF_KEEPALIVEPING + | WDIOF_CARDRESET; snprintf(watchdog.ident.identity, sizeof(watchdog.ident.identity), "%s watchdog", @@ -705,6 +705,13 @@ static int __init watchdog_init(int sioaddr) wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); + /* + * We don't want WDTMOUT_STS to stick around till regular reboot. + * Write 1 to the bit to clear it to zero. + */ + superio_outb(sioaddr, F71808FG_REG_WDT_CONF, + wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS)); + superio_exit(sioaddr); err = watchdog_set_timeout(timeout); diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c index 8ed89f032ebf..e0e62149a6f4 100644 --- a/drivers/watchdog/imx_sc_wdt.c +++ b/drivers/watchdog/imx_sc_wdt.c @@ -177,6 +177,11 @@ static int imx_sc_wdt_probe(struct platform_device *pdev) wdog->timeout = DEFAULT_TIMEOUT; watchdog_init_timeout(wdog, 0, dev); + + ret = imx_sc_wdt_set_timeout(wdog, wdog->timeout); + if (ret) + return ret; + watchdog_stop_on_reboot(wdog); watchdog_stop_on_unregister(wdog); diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c index 5391bf3e6b11..c5967d8b4256 100644 --- a/drivers/watchdog/mei_wdt.c +++ b/drivers/watchdog/mei_wdt.c @@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt) watchdog_set_drvdata(&wdt->wdd, wdt); watchdog_stop_on_reboot(&wdt->wdd); + watchdog_stop_on_unregister(&wdt->wdd); ret = watchdog_register_device(&wdt->wdd); if (ret) diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index eb47fe5ed280..094f096aee0f 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -22,7 +22,6 @@ enum wdt_reg { }; #define QCOM_WDT_ENABLE BIT(0) -#define QCOM_WDT_ENABLE_IRQ BIT(1) static const u32 reg_offset_data_apcs_tmr[] = { [WDT_RST] = 0x38, @@ -58,16 +57,6 @@ struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd) return container_of(wdd, struct qcom_wdt, wdd); } -static inline int qcom_get_enable(struct watchdog_device *wdd) -{ - int enable = QCOM_WDT_ENABLE; - - if (wdd->pretimeout) - enable |= QCOM_WDT_ENABLE_IRQ; - - return enable; -} - static irqreturn_t qcom_wdt_isr(int irq, void *arg) { struct watchdog_device *wdd = arg; @@ -86,7 +75,7 @@ static int qcom_wdt_start(struct watchdog_device *wdd) writel(1, wdt_addr(wdt, WDT_RST)); writel(bark * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME)); writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME)); - writel(qcom_get_enable(wdd), wdt_addr(wdt, WDT_EN)); + writel(QCOM_WDT_ENABLE, wdt_addr(wdt, WDT_EN)); return 0; } @@ -143,7 +132,7 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action, */ wmb(); - msleep(150); + mdelay(150); return 0; } diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index 2e608ae6cbc7..e0efbc583198 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -230,6 +230,8 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) rdc321x_wdt_device.sb_pdev = pdata->sb_pdev; rdc321x_wdt_device.base_reg = r->start; + rdc321x_wdt_device.queue = 0; + rdc321x_wdt_device.default_ticks = ticks; err = misc_register(&rdc321x_wdt_misc); if (err < 0) { @@ -244,14 +246,11 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) rdc321x_wdt_device.base_reg, RDC_WDT_RST); init_completion(&rdc321x_wdt_device.stop); - rdc321x_wdt_device.queue = 0; clear_bit(0, &rdc321x_wdt_device.inuse); timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0); - rdc321x_wdt_device.default_ticks = ticks; - dev_info(&pdev->dev, "watchdog init success\n"); return 0; diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h index 87eaf357ae01..adf015aa4126 100644 --- a/drivers/watchdog/sp5100_tco.h +++ b/drivers/watchdog/sp5100_tco.h @@ -70,7 +70,7 @@ #define EFCH_PM_DECODEEN_WDT_TMREN BIT(7) -#define EFCH_PM_DECODEEN3 0x00 +#define EFCH_PM_DECODEEN3 0x03 #define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0) #define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2)) diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 53e04926a7b2..190d26e2e75f 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd) { struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); + writel_relaxed(UNLOCK, wdt->base + WDTLOCK); writel_relaxed(0, wdt->base + WDTCONTROL); writel_relaxed(0, wdt->base + WDTLOAD); writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL); + /* Flush posted writes. */ + readl_relaxed(wdt->base + WDTLOCK); + return 0; } diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c index 65cb55f3916f..b9b1daa9e2a4 100644 --- a/drivers/watchdog/sprd_wdt.c +++ b/drivers/watchdog/sprd_wdt.c @@ -108,18 +108,6 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout, u32 tmr_step = timeout * SPRD_WDT_CNT_STEP; u32 prtmr_step = pretimeout * SPRD_WDT_CNT_STEP; - sprd_wdt_unlock(wdt->base); - writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & - SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH); - writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK), - wdt->base + SPRD_WDT_LOAD_LOW); - writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & - SPRD_WDT_LOW_VALUE_MASK, - wdt->base + SPRD_WDT_IRQ_LOAD_HIGH); - writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK, - wdt->base + SPRD_WDT_IRQ_LOAD_LOW); - sprd_wdt_lock(wdt->base); - /* * Waiting the load value operation done, * it needs two or three RTC clock cycles. @@ -134,6 +122,19 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout, if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT) return -EBUSY; + + sprd_wdt_unlock(wdt->base); + writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & + SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH); + writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK), + wdt->base + SPRD_WDT_LOAD_LOW); + writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) & + SPRD_WDT_LOW_VALUE_MASK, + wdt->base + SPRD_WDT_IRQ_LOAD_HIGH); + writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK, + wdt->base + SPRD_WDT_IRQ_LOAD_LOW); + sprd_wdt_lock(wdt->base); + return 0; } @@ -345,15 +346,10 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev) if (ret) return ret; - if (watchdog_active(&wdt->wdd)) { + if (watchdog_active(&wdt->wdd)) ret = sprd_wdt_start(&wdt->wdd); - if (ret) { - sprd_wdt_disable(wdt); - return ret; - } - } - return 0; + return ret; } static const struct dev_pm_ops sprd_wdt_pm_ops = { diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 861daf4f37b2..faa46a666f4c 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -255,15 +255,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd) } if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { - wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; + if (!wdd->ops->stop) + pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id); + else { + wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; - ret = register_reboot_notifier(&wdd->reboot_nb); - if (ret) { - pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", - wdd->id, ret); - watchdog_dev_unregister(wdd); - ida_simple_remove(&watchdog_ida, id); - return ret; + ret = register_reboot_notifier(&wdd->reboot_nb); + if (ret) { + pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", + wdd->id, ret); + watchdog_dev_unregister(wdd); + ida_simple_remove(&watchdog_ida, id); + return ret; + } } } diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index ce04edc69e5f..8494846ccdc5 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -282,6 +282,7 @@ static int watchdog_start(struct watchdog_device *wdd) if (err == 0) { set_bit(WDOG_ACTIVE, &wdd->status); wd_data->last_keepalive = started_at; + wd_data->last_hw_keepalive = started_at; watchdog_update_worker(wdd); } @@ -970,8 +971,19 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) wd_data->wdd = wdd; wdd->wd_data = wd_data; - if (IS_ERR_OR_NULL(watchdog_kworker)) + if (IS_ERR_OR_NULL(watchdog_kworker)) { + kfree(wd_data); return -ENODEV; + } + + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); + wd_data->dev.class = &watchdog_class; + wd_data->dev.parent = wdd->parent; + wd_data->dev.groups = wdd->groups; + wd_data->dev.release = watchdog_core_data_release; + dev_set_drvdata(&wd_data->dev, wdd); + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); kthread_init_work(&wd_data->work, watchdog_ping_work); hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); @@ -988,20 +1000,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) pr_err("%s: a legacy watchdog module is probably present.\n", wdd->info->identity); old_wd_data = NULL; - kfree(wd_data); + put_device(&wd_data->dev); return err; } } - device_initialize(&wd_data->dev); - wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); - wd_data->dev.class = &watchdog_class; - wd_data->dev.parent = wdd->parent; - wd_data->dev.groups = wdd->groups; - wd_data->dev.release = watchdog_core_data_release; - dev_set_drvdata(&wd_data->dev, wdd); - dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); - /* Fill in the data structures */ cdev_init(&wd_data->cdev, &watchdog_fops); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index bed90d612e48..ebb05517b6aa 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -570,11 +570,13 @@ static int add_ballooned_pages(int nr_pages) if (xen_hotplug_unpopulated) { st = reserve_additional_memory(); if (st != BP_ECANCELED) { + int rc; + mutex_unlock(&balloon_mutex); - wait_event(balloon_wq, + rc = wait_event_interruptible(balloon_wq, !list_empty(&ballooned_pages)); mutex_lock(&balloon_mutex); - return 0; + return rc ? -ENOMEM : 0; } } @@ -632,6 +634,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) out_undo: mutex_unlock(&balloon_mutex); free_xenballooned_pages(pgno, pages); + /* + * NB: free_xenballooned_pages will only subtract pgno pages, but since + * target_unpopulated is incremented with nr_pages at the start we need + * to remove the remaining ones also, or accounting will be screwed. + */ + balloon_stats.target_unpopulated -= nr_pages - pgno; return ret; } EXPORT_SYMBOL(alloc_xenballooned_pages); diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index 8edef51c92e5..77cc80bcb479 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void) return EVTCHN_2L_NR_CHANNELS; } +static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) +{ + clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); +} + static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) { clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); @@ -71,12 +76,6 @@ static bool evtchn_2l_is_pending(unsigned port) return sync_test_bit(port, BM(&s->evtchn_pending[0])); } -static bool evtchn_2l_test_and_set_mask(unsigned port) -{ - struct shared_info *s = HYPERVISOR_shared_info; - return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); -} - static void evtchn_2l_mask(unsigned port) { struct shared_info *s = HYPERVISOR_shared_info; @@ -91,6 +90,8 @@ static void evtchn_2l_unmask(unsigned port) BUG_ON(!irqs_disabled()); + smp_wmb(); /* All writes before unmask must be visible. */ + if (unlikely((cpu != cpu_from_evtchn(port)))) do_hypercall = 1; else { @@ -159,7 +160,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu, * a bitset of words which contain pending event bits. The second * level is a bitset of pending events themselves. */ -static void evtchn_2l_handle_events(unsigned cpu) +static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl) { int irq; xen_ulong_t pending_words; @@ -240,10 +241,7 @@ static void evtchn_2l_handle_events(unsigned cpu) /* Process port. */ port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; - irq = get_evtchn_to_irq(port); - - if (irq != -1) - generic_handle_irq(irq); + handle_irq_for_port(port, ctrl); bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD; @@ -355,18 +353,27 @@ static void evtchn_2l_resume(void) EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); } +static int evtchn_2l_percpu_deinit(unsigned int cpu) +{ + memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); + + return 0; +} + static const struct evtchn_ops evtchn_ops_2l = { .max_channels = evtchn_2l_max_channels, .nr_channels = evtchn_2l_max_channels, + .remove = evtchn_2l_remove, .bind_to_cpu = evtchn_2l_bind_to_cpu, .clear_pending = evtchn_2l_clear_pending, .set_pending = evtchn_2l_set_pending, .is_pending = evtchn_2l_is_pending, - .test_and_set_mask = evtchn_2l_test_and_set_mask, .mask = evtchn_2l_mask, .unmask = evtchn_2l_unmask, .handle_events = evtchn_2l_handle_events, .resume = evtchn_2l_resume, + .percpu_deinit = evtchn_2l_percpu_deinit, }; void __init xen_evtchn_2l_init(void) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 6c8843968a52..9cc77f039779 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -33,6 +33,10 @@ #include <linux/slab.h> #include <linux/irqnr.h> #include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/cpuhotplug.h> +#include <linux/atomic.h> +#include <linux/ktime.h> #ifdef CONFIG_X86 #include <asm/desc.h> @@ -62,6 +66,15 @@ #include "events_internal.h" +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "xen." + +static uint __read_mostly event_loop_timeout = 2; +module_param(event_loop_timeout, uint, 0644); + +static uint __read_mostly event_eoi_delay = 10; +module_param(event_eoi_delay, uint, 0644); + const struct evtchn_ops *evtchn_ops; /* @@ -70,6 +83,25 @@ const struct evtchn_ops *evtchn_ops; */ static DEFINE_MUTEX(irq_mapping_update_lock); +/* + * Lock protecting event handling loop against removing event channels. + * Adding of event channels is no issue as the associated IRQ becomes active + * only after everything is setup (before request_[threaded_]irq() the handler + * can't be entered for an event, as the event channel will be unmasked only + * then). + */ +static DEFINE_RWLOCK(evtchn_rwlock); + +/* + * Lock hierarchy: + * + * irq_mapping_update_lock + * evtchn_rwlock + * IRQ-desc lock + * percpu eoi_list_lock + * irq_info->lock + */ + static LIST_HEAD(xen_irq_list_head); /* IRQ <-> VIRQ mapping. */ @@ -91,18 +123,23 @@ static bool (*pirq_needs_eoi)(unsigned irq); /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) +static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; + static struct irq_chip xen_dynamic_chip; +static struct irq_chip xen_lateeoi_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; static void enable_dynirq(struct irq_data *data); static void disable_dynirq(struct irq_data *data); +static DEFINE_PER_CPU(unsigned int, irq_epoch); + static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) - evtchn_to_irq[row][col] = -1; + WRITE_ONCE(evtchn_to_irq[row][col], -1); } static void clear_evtchn_to_irq_all(void) @@ -139,7 +176,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) clear_evtchn_to_irq_row(row); } - evtchn_to_irq[row][col] = irq; + WRITE_ONCE(evtchn_to_irq[row][col], irq); return 0; } @@ -149,13 +186,24 @@ int get_evtchn_to_irq(unsigned evtchn) return -1; if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) return -1; - return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; + return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); } /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { - return irq_get_handler_data(irq); + if (irq < nr_legacy_irqs()) + return legacy_info_ptrs[irq]; + else + return irq_get_chip_data(irq); +} + +static void set_info_for_irq(unsigned int irq, struct irq_info *info) +{ + if (irq < nr_legacy_irqs()) + legacy_info_ptrs[irq] = info; + else + irq_set_chip_data(irq, info); } /* Constructors for packed IRQ information. */ @@ -173,6 +221,8 @@ static int xen_irq_info_common_setup(struct irq_info *info, info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; + info->mask_reason = EVT_MASK_REASON_EXPLICIT; + raw_spin_lock_init(&info->lock); ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) @@ -239,6 +289,7 @@ static int xen_irq_info_pirq_setup(unsigned irq, static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); + xen_evtchn_port_remove(info->evtchn, info->cpu); info->evtchn = 0; } @@ -247,10 +298,14 @@ static void xen_irq_info_cleanup(struct irq_info *info) */ unsigned int evtchn_from_irq(unsigned irq) { - if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)) + const struct irq_info *info = NULL; + + if (likely(irq < nr_irqs)) + info = info_for_irq(irq); + if (!info) return 0; - return info_for_irq(irq)->evtchn; + return info->evtchn; } unsigned irq_from_evtchn(unsigned int evtchn) @@ -315,6 +370,34 @@ unsigned int cpu_from_evtchn(unsigned int evtchn) return ret; } +static void do_mask(struct irq_info *info, u8 reason) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&info->lock, flags); + + if (!info->mask_reason) + mask_evtchn(info->evtchn); + + info->mask_reason |= reason; + + raw_spin_unlock_irqrestore(&info->lock, flags); +} + +static void do_unmask(struct irq_info *info, u8 reason) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&info->lock, flags); + + info->mask_reason &= ~reason; + + if (!info->mask_reason) + unmask_evtchn(info->evtchn); + + raw_spin_unlock_irqrestore(&info->lock, flags); +} + #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { @@ -361,9 +444,157 @@ void notify_remote_via_irq(int irq) } EXPORT_SYMBOL_GPL(notify_remote_via_irq); +struct lateeoi_work { + struct delayed_work delayed; + spinlock_t eoi_list_lock; + struct list_head eoi_list; +}; + +static DEFINE_PER_CPU(struct lateeoi_work, lateeoi); + +static void lateeoi_list_del(struct irq_info *info) +{ + struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); + unsigned long flags; + + spin_lock_irqsave(&eoi->eoi_list_lock, flags); + list_del_init(&info->eoi_list); + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); +} + +static void lateeoi_list_add(struct irq_info *info) +{ + struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); + struct irq_info *elem; + u64 now = get_jiffies_64(); + unsigned long delay; + unsigned long flags; + + if (now < info->eoi_time) + delay = info->eoi_time - now; + else + delay = 1; + + spin_lock_irqsave(&eoi->eoi_list_lock, flags); + + if (list_empty(&eoi->eoi_list)) { + list_add(&info->eoi_list, &eoi->eoi_list); + mod_delayed_work_on(info->eoi_cpu, system_wq, + &eoi->delayed, delay); + } else { + list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) { + if (elem->eoi_time <= info->eoi_time) + break; + } + list_add(&info->eoi_list, &elem->eoi_list); + } + + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); +} + +static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) +{ + evtchn_port_t evtchn; + unsigned int cpu; + unsigned int delay = 0; + + evtchn = info->evtchn; + if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list)) + return; + + if (spurious) { + if ((1 << info->spurious_cnt) < (HZ << 2)) + info->spurious_cnt++; + if (info->spurious_cnt > 1) { + delay = 1 << (info->spurious_cnt - 2); + if (delay > HZ) + delay = HZ; + if (!info->eoi_time) + info->eoi_cpu = smp_processor_id(); + info->eoi_time = get_jiffies_64() + delay; + } + } else { + info->spurious_cnt = 0; + } + + cpu = info->eoi_cpu; + if (info->eoi_time && + (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) { + lateeoi_list_add(info); + return; + } + + info->eoi_time = 0; + do_unmask(info, EVT_MASK_REASON_EOI_PENDING); +} + +static void xen_irq_lateeoi_worker(struct work_struct *work) +{ + struct lateeoi_work *eoi; + struct irq_info *info; + u64 now = get_jiffies_64(); + unsigned long flags; + + eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); + + read_lock_irqsave(&evtchn_rwlock, flags); + + while (true) { + spin_lock(&eoi->eoi_list_lock); + + info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, + eoi_list); + + if (info == NULL || now < info->eoi_time) { + spin_unlock(&eoi->eoi_list_lock); + break; + } + + list_del_init(&info->eoi_list); + + spin_unlock(&eoi->eoi_list_lock); + + info->eoi_time = 0; + + xen_irq_lateeoi_locked(info, false); + } + + if (info) + mod_delayed_work_on(info->eoi_cpu, system_wq, + &eoi->delayed, info->eoi_time - now); + + read_unlock_irqrestore(&evtchn_rwlock, flags); +} + +static void xen_cpu_init_eoi(unsigned int cpu) +{ + struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu); + + INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker); + spin_lock_init(&eoi->eoi_list_lock); + INIT_LIST_HEAD(&eoi->eoi_list); +} + +void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) +{ + struct irq_info *info; + unsigned long flags; + + read_lock_irqsave(&evtchn_rwlock, flags); + + info = info_for_irq(irq); + + if (info) + xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS); + + read_unlock_irqrestore(&evtchn_rwlock, flags); +} +EXPORT_SYMBOL_GPL(xen_irq_lateeoi); + static void xen_irq_init(unsigned irq) { struct irq_info *info; + #ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); @@ -376,8 +607,9 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; - irq_set_handler_data(irq, info); + set_info_for_irq(irq, info); + INIT_LIST_HEAD(&info->eoi_list); list_add_tail(&info->list, &xen_irq_list_head); } @@ -425,17 +657,25 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = info_for_irq(irq); + unsigned long flags; if (WARN_ON(!info)) return; + write_lock_irqsave(&evtchn_rwlock, flags); + + if (!list_empty(&info->eoi_list)) + lateeoi_list_del(info); + list_del(&info->list); - irq_set_handler_data(irq, NULL); + set_info_for_irq(irq, NULL); WARN_ON(info->refcnt > 0); + write_unlock_irqrestore(&evtchn_rwlock, flags); + kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ @@ -454,6 +694,12 @@ static void xen_evtchn_close(unsigned int port) BUG(); } +static void event_handler_exit(struct irq_info *info) +{ + smp_store_release(&info->is_active, 0); + clear_evtchn(info->evtchn); +} + static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; @@ -472,7 +718,8 @@ static void pirq_query_unmask(int irq) static void eoi_pirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + int evtchn = info ? info->evtchn : 0; struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; @@ -481,16 +728,15 @@ static void eoi_pirq(struct irq_data *data) if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { - int masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); - clear_evtchn(evtchn); + event_handler_exit(info); irq_move_masked_irq(data); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); } else - clear_evtchn(evtchn); + event_handler_exit(info); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); @@ -541,7 +787,8 @@ static unsigned int __startup_pirq(unsigned int irq) goto err; out: - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EXPLICIT); + eoi_pirq(irq_get_irq_data(irq)); return 0; @@ -568,7 +815,7 @@ static void shutdown_pirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - mask_evtchn(evtchn); + do_mask(info, EVT_MASK_REASON_EXPLICIT); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } @@ -602,7 +849,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { int evtchn = evtchn_from_irq(irq); - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = info_for_irq(irq); if (info->refcnt > 0) { info->refcnt--; @@ -827,7 +1074,7 @@ int xen_pirq_from_irq(unsigned irq) } EXPORT_SYMBOL_GPL(xen_pirq_from_irq); -int bind_evtchn_to_irq(unsigned int evtchn) +static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip) { int irq; int ret; @@ -844,7 +1091,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) if (irq < 0) goto out; - irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, + irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "event"); ret = xen_irq_info_evtchn_setup(irq, evtchn); @@ -865,8 +1112,19 @@ out: return irq; } + +int bind_evtchn_to_irq(evtchn_port_t evtchn) +{ + return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip); +} EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); +int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn) +{ + return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip); +} +EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi); + static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; @@ -908,8 +1166,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) return irq; } -int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, - unsigned int remote_port) +static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain, + evtchn_port_t remote_port, + struct irq_chip *chip) { struct evtchn_bind_interdomain bind_interdomain; int err; @@ -920,10 +1179,26 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); - return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); + return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port, + chip); +} + +int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, + evtchn_port_t remote_port) +{ + return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, + &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); +int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, + evtchn_port_t remote_port) +{ + return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, + &xen_lateeoi_chip); +} +EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); + static int find_virq(unsigned int virq, unsigned int cpu) { struct evtchn_status status; @@ -1019,14 +1294,15 @@ static void unbind_from_irq(unsigned int irq) mutex_unlock(&irq_mapping_update_lock); } -int bind_evtchn_to_irqhandler(unsigned int evtchn, - irq_handler_t handler, - unsigned long irqflags, - const char *devname, void *dev_id) +static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, void *dev_id, + struct irq_chip *chip) { int irq, retval; - irq = bind_evtchn_to_irq(evtchn); + irq = bind_evtchn_to_irq_chip(evtchn, chip); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); @@ -1037,31 +1313,76 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, return irq; } + +int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, void *dev_id) +{ + return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, + devname, dev_id, + &xen_dynamic_chip); +} EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); +int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, void *dev_id) +{ + return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, + devname, dev_id, + &xen_lateeoi_chip); +} +EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi); + +static int bind_interdomain_evtchn_to_irqhandler_chip( + unsigned int remote_domain, evtchn_port_t remote_port, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id, struct irq_chip *chip) +{ + int irq, retval; + + irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, + chip); + if (irq < 0) + return irq; + + retval = request_irq(irq, handler, irqflags, devname, dev_id); + if (retval != 0) { + unbind_from_irq(irq); + return retval; + } + + return irq; +} + int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, - unsigned int remote_port, + evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { - int irq, retval; - - irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); - if (irq < 0) - return irq; - - retval = request_irq(irq, handler, irqflags, devname, dev_id); - if (retval != 0) { - unbind_from_irq(irq); - return retval; - } - - return irq; + return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, + remote_port, handler, irqflags, devname, + dev_id, &xen_dynamic_chip); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); +int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, + evtchn_port_t remote_port, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, + void *dev_id) +{ + return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, + remote_port, handler, irqflags, devname, + dev_id, &xen_lateeoi_chip); +} +EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi); + int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) @@ -1106,7 +1427,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; @@ -1140,7 +1461,7 @@ int evtchn_make_refcounted(unsigned int evtchn) if (irq == -1) return -ENOENT; - info = irq_get_handler_data(irq); + info = info_for_irq(irq); if (!info) return -ENOENT; @@ -1168,13 +1489,13 @@ int evtchn_get(unsigned int evtchn) if (irq == -1) goto done; - info = irq_get_handler_data(irq); + info = info_for_irq(irq); if (!info) goto done; err = -EINVAL; - if (info->refcnt <= 0) + if (info->refcnt <= 0 || info->refcnt == SHRT_MAX) goto done; info->refcnt++; @@ -1213,6 +1534,56 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) notify_remote_via_irq(irq); } +struct evtchn_loop_ctrl { + ktime_t timeout; + unsigned count; + bool defer_eoi; +}; + +void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) +{ + int irq; + struct irq_info *info; + + irq = get_evtchn_to_irq(port); + if (irq == -1) + return; + + /* + * Check for timeout every 256 events. + * We are setting the timeout value only after the first 256 + * events in order to not hurt the common case of few loop + * iterations. The 256 is basically an arbitrary value. + * + * In case we are hitting the timeout we need to defer all further + * EOIs in order to ensure to leave the event handling loop rather + * sooner than later. + */ + if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) { + ktime_t kt = ktime_get(); + + if (!ctrl->timeout) { + kt = ktime_add_ms(kt, + jiffies_to_msecs(event_loop_timeout)); + ctrl->timeout = kt; + } else if (kt > ctrl->timeout) { + ctrl->defer_eoi = true; + } + } + + info = info_for_irq(irq); + if (xchg_acquire(&info->is_active, 1)) + return; + + if (ctrl->defer_eoi) { + info->eoi_cpu = smp_processor_id(); + info->irq_epoch = __this_cpu_read(irq_epoch); + info->eoi_time = get_jiffies_64() + event_eoi_delay; + } + + generic_handle_irq(irq); +} + static DEFINE_PER_CPU(unsigned, xed_nesting_count); static void __xen_evtchn_do_upcall(void) @@ -1220,6 +1591,9 @@ static void __xen_evtchn_do_upcall(void) struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); int cpu = get_cpu(); unsigned count; + struct evtchn_loop_ctrl ctrl = { 0 }; + + read_lock(&evtchn_rwlock); do { vcpu_info->evtchn_upcall_pending = 0; @@ -1227,7 +1601,7 @@ static void __xen_evtchn_do_upcall(void) if (__this_cpu_inc_return(xed_nesting_count) - 1) goto out; - xen_evtchn_handle_events(cpu); + xen_evtchn_handle_events(cpu, &ctrl); BUG_ON(!irqs_disabled()); @@ -1236,6 +1610,14 @@ static void __xen_evtchn_do_upcall(void) } while (count != 1 || vcpu_info->evtchn_upcall_pending); out: + read_unlock(&evtchn_rwlock); + + /* + * Increment irq_epoch only now to defer EOIs only for + * xen_irq_lateeoi() invocations occurring from inside the loop + * above. + */ + __this_cpu_inc(irq_epoch); put_cpu(); } @@ -1294,10 +1676,10 @@ void rebind_evtchn_irq(int evtchn, int irq) } /* Rebind an evtchn so that it gets delivered to a specific cpu */ -static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) +static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) { struct evtchn_bind_vcpu bind_vcpu; - int masked; + evtchn_port_t evtchn = info ? info->evtchn : 0; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1313,7 +1695,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ - masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); /* * If this fails, it usually just indicates that we're dealing with a @@ -1323,8 +1705,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 0; } @@ -1333,7 +1714,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); - int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); + int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); @@ -1352,39 +1733,41 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); static void enable_dynirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_EXPLICIT); } static void disable_dynirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - mask_evtchn(evtchn); + do_mask(info, EVT_MASK_REASON_EXPLICIT); } static void ack_dynirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(data->irq); + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { - int masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); - clear_evtchn(evtchn); + event_handler_exit(info); irq_move_masked_irq(data); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); } else - clear_evtchn(evtchn); + event_handler_exit(info); } static void mask_ack_dynirq(struct irq_data *data) @@ -1393,18 +1776,39 @@ static void mask_ack_dynirq(struct irq_data *data) ack_dynirq(data); } +static void lateeoi_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EOI_PENDING); + ack_dynirq(data); + } +} + +static void lateeoi_mask_ack_dynirq(struct irq_data *data) +{ + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EXPLICIT); + ack_dynirq(data); + } +} + static int retrigger_dynirq(struct irq_data *data) { - unsigned int evtchn = evtchn_from_irq(data->irq); - int masked; + struct irq_info *info = info_for_irq(data->irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (!VALID_EVTCHN(evtchn)) return 0; - masked = test_and_set_mask(evtchn); + do_mask(info, EVT_MASK_REASON_TEMPORARY); set_evtchn(evtchn); - if (!masked) - unmask_evtchn(evtchn); + do_unmask(info, EVT_MASK_REASON_TEMPORARY); return 1; } @@ -1499,10 +1903,11 @@ static void restore_cpu_ipis(unsigned int cpu) /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { - int evtchn = evtchn_from_irq(irq); + struct irq_info *info = info_for_irq(irq); + evtchn_port_t evtchn = info ? info->evtchn : 0; if (VALID_EVTCHN(evtchn)) - clear_evtchn(evtchn); + event_handler_exit(info); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) @@ -1602,6 +2007,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { .irq_retrigger = retrigger_dynirq, }; +static struct irq_chip xen_lateeoi_chip __read_mostly = { + /* The chip name needs to contain "xen-dyn" for irqbalance to work. */ + .name = "xen-dyn-lateeoi", + + .irq_disable = disable_dynirq, + .irq_mask = disable_dynirq, + .irq_unmask = enable_dynirq, + + .irq_ack = lateeoi_ack_dynirq, + .irq_mask_ack = lateeoi_mask_ack_dynirq, + + .irq_set_affinity = set_affinity_irq, + .irq_retrigger = retrigger_dynirq, +}; + static struct irq_chip xen_pirq_chip __read_mostly = { .name = "xen-pirq", @@ -1632,16 +2052,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = { .irq_ack = ack_dynirq, }; -int xen_set_callback_via(uint64_t via) -{ - struct xen_hvm_param a; - a.domid = DOMID_SELF; - a.index = HVM_PARAM_CALLBACK_IRQ; - a.value = via; - return HYPERVISOR_hvm_op(HVMOP_set_param, &a); -} -EXPORT_SYMBOL_GPL(xen_set_callback_via); - #ifdef CONFIG_XEN_PVHVM /* Vector callbacks are better than PCI interrupts to receive event * channel notifications because we can receive vector callbacks on any @@ -1668,12 +2078,31 @@ void xen_callback_vector(void) void xen_callback_vector(void) {} #endif -#undef MODULE_PARAM_PREFIX -#define MODULE_PARAM_PREFIX "xen." - static bool fifo_events = true; module_param(fifo_events, bool, 0); +static int xen_evtchn_cpu_prepare(unsigned int cpu) +{ + int ret = 0; + + xen_cpu_init_eoi(cpu); + + if (evtchn_ops->percpu_init) + ret = evtchn_ops->percpu_init(cpu); + + return ret; +} + +static int xen_evtchn_cpu_dead(unsigned int cpu) +{ + int ret = 0; + + if (evtchn_ops->percpu_deinit) + ret = evtchn_ops->percpu_deinit(cpu); + + return ret; +} + void __init xen_init_IRQ(void) { int ret = -EINVAL; @@ -1684,6 +2113,12 @@ void __init xen_init_IRQ(void) if (ret < 0) xen_evtchn_2l_init(); + xen_cpu_init_eoi(smp_processor_id()); + + cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, + "xen/evtchn:prepare", + xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); + evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), sizeof(*evtchn_to_irq), GFP_KERNEL); BUG_ON(!evtchn_to_irq); diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 76b318e88382..360a7f8cdf75 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsigned port) return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } -static bool evtchn_fifo_test_and_set_mask(unsigned port) -{ - event_word_t *word = event_word_from_port(port); - return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); -} - static void evtchn_fifo_mask(unsigned port) { event_word_t *word = event_word_from_port(port); @@ -227,19 +221,25 @@ static bool evtchn_fifo_is_masked(unsigned port) return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); } /* - * Clear MASKED, spinning if BUSY is set. + * Clear MASKED if not PENDING, spinning if BUSY is set. + * Return true if mask was cleared. */ -static void clear_masked(volatile event_word_t *word) +static bool clear_masked_cond(volatile event_word_t *word) { event_word_t new, old, w; w = *word; do { + if (w & (1 << EVTCHN_FIFO_PENDING)) + return false; + old = w & ~(1 << EVTCHN_FIFO_BUSY); new = old & ~(1 << EVTCHN_FIFO_MASKED); w = sync_cmpxchg(word, old, new); } while (w != old); + + return true; } static void evtchn_fifo_unmask(unsigned port) @@ -248,8 +248,7 @@ static void evtchn_fifo_unmask(unsigned port) BUG_ON(!irqs_disabled()); - clear_masked(word); - if (evtchn_fifo_is_pending(port)) { + if (!clear_masked_cond(word)) { struct evtchn_unmask unmask = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); } @@ -270,19 +269,9 @@ static uint32_t clear_linked(volatile event_word_t *word) return w & EVTCHN_FIFO_LINK_MASK; } -static void handle_irq_for_port(unsigned port) -{ - int irq; - - irq = get_evtchn_to_irq(port); - if (irq != -1) - generic_handle_irq(irq); -} - -static void consume_one_event(unsigned cpu, +static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl, struct evtchn_fifo_control_block *control_block, - unsigned priority, unsigned long *ready, - bool drop) + unsigned priority, unsigned long *ready) { struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); uint32_t head; @@ -315,16 +304,17 @@ static void consume_one_event(unsigned cpu, clear_bit(priority, ready); if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { - if (unlikely(drop)) + if (unlikely(!ctrl)) pr_warn("Dropping pending event for port %u\n", port); else - handle_irq_for_port(port); + handle_irq_for_port(port, ctrl); } q->head[priority] = head; } -static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) +static void __evtchn_fifo_handle_events(unsigned cpu, + struct evtchn_loop_ctrl *ctrl) { struct evtchn_fifo_control_block *control_block; unsigned long ready; @@ -336,14 +326,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) while (ready) { q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); - consume_one_event(cpu, control_block, q, &ready, drop); + consume_one_event(cpu, ctrl, control_block, q, &ready); ready |= xchg(&control_block->ready, 0); } } -static void evtchn_fifo_handle_events(unsigned cpu) +static void evtchn_fifo_handle_events(unsigned cpu, + struct evtchn_loop_ctrl *ctrl) { - __evtchn_fifo_handle_events(cpu, false); + __evtchn_fifo_handle_events(cpu, ctrl); } static void evtchn_fifo_resume(void) @@ -380,21 +371,6 @@ static void evtchn_fifo_resume(void) event_array_pages = 0; } -static const struct evtchn_ops evtchn_ops_fifo = { - .max_channels = evtchn_fifo_max_channels, - .nr_channels = evtchn_fifo_nr_channels, - .setup = evtchn_fifo_setup, - .bind_to_cpu = evtchn_fifo_bind_to_cpu, - .clear_pending = evtchn_fifo_clear_pending, - .set_pending = evtchn_fifo_set_pending, - .is_pending = evtchn_fifo_is_pending, - .test_and_set_mask = evtchn_fifo_test_and_set_mask, - .mask = evtchn_fifo_mask, - .unmask = evtchn_fifo_unmask, - .handle_events = evtchn_fifo_handle_events, - .resume = evtchn_fifo_resume, -}; - static int evtchn_fifo_alloc_control_block(unsigned cpu) { void *control_block = NULL; @@ -417,19 +393,35 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu) return ret; } -static int xen_evtchn_cpu_prepare(unsigned int cpu) +static int evtchn_fifo_percpu_init(unsigned int cpu) { if (!per_cpu(cpu_control_block, cpu)) return evtchn_fifo_alloc_control_block(cpu); return 0; } -static int xen_evtchn_cpu_dead(unsigned int cpu) +static int evtchn_fifo_percpu_deinit(unsigned int cpu) { - __evtchn_fifo_handle_events(cpu, true); + __evtchn_fifo_handle_events(cpu, NULL); return 0; } +static const struct evtchn_ops evtchn_ops_fifo = { + .max_channels = evtchn_fifo_max_channels, + .nr_channels = evtchn_fifo_nr_channels, + .setup = evtchn_fifo_setup, + .bind_to_cpu = evtchn_fifo_bind_to_cpu, + .clear_pending = evtchn_fifo_clear_pending, + .set_pending = evtchn_fifo_set_pending, + .is_pending = evtchn_fifo_is_pending, + .mask = evtchn_fifo_mask, + .unmask = evtchn_fifo_unmask, + .handle_events = evtchn_fifo_handle_events, + .resume = evtchn_fifo_resume, + .percpu_init = evtchn_fifo_percpu_init, + .percpu_deinit = evtchn_fifo_percpu_deinit, +}; + int __init xen_evtchn_fifo_init(void) { int cpu = smp_processor_id(); @@ -443,9 +435,5 @@ int __init xen_evtchn_fifo_init(void) evtchn_ops = &evtchn_ops_fifo; - cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, - "xen/evtchn:prepare", - xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); - return ret; } diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index 82938cff6c7a..eb012fbb62e7 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -30,11 +30,22 @@ enum xen_irq_type { */ struct irq_info { struct list_head list; - int refcnt; - enum xen_irq_type type; /* type */ + struct list_head eoi_list; + short refcnt; + short spurious_cnt; + short type; /* type */ + u8 mask_reason; /* Why is event channel masked */ +#define EVT_MASK_REASON_EXPLICIT 0x01 +#define EVT_MASK_REASON_TEMPORARY 0x02 +#define EVT_MASK_REASON_EOI_PENDING 0x04 + u8 is_active; /* Is event just being handled? */ unsigned irq; unsigned int evtchn; /* event channel */ unsigned short cpu; /* cpu bound */ + unsigned short eoi_cpu; /* EOI must happen on this cpu */ + unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ + u64 eoi_time; /* Time in jiffies when to EOI. */ + raw_spinlock_t lock; union { unsigned short virq; @@ -53,28 +64,34 @@ struct irq_info { #define PIRQ_SHAREABLE (1 << 1) #define PIRQ_MSI_GROUP (1 << 2) +struct evtchn_loop_ctrl; + struct evtchn_ops { unsigned (*max_channels)(void); unsigned (*nr_channels)(void); int (*setup)(struct irq_info *info); + void (*remove)(evtchn_port_t port, unsigned int cpu); void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); void (*clear_pending)(unsigned port); void (*set_pending)(unsigned port); bool (*is_pending)(unsigned port); - bool (*test_and_set_mask)(unsigned port); void (*mask)(unsigned port); void (*unmask)(unsigned port); - void (*handle_events)(unsigned cpu); + void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl); void (*resume)(void); + + int (*percpu_init)(unsigned int cpu); + int (*percpu_deinit)(unsigned int cpu); }; extern const struct evtchn_ops *evtchn_ops; extern int **evtchn_to_irq; int get_evtchn_to_irq(unsigned int evtchn); +void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl); struct irq_info *info_for_irq(unsigned irq); unsigned cpu_from_irq(unsigned irq); @@ -96,6 +113,13 @@ static inline int xen_evtchn_port_setup(struct irq_info *info) return 0; } +static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, + unsigned int cpu) +{ + if (evtchn_ops->remove) + evtchn_ops->remove(evtchn, cpu); +} + static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info, unsigned cpu) { @@ -117,11 +141,6 @@ static inline bool test_evtchn(unsigned port) return evtchn_ops->is_pending(port); } -static inline bool test_and_set_mask(unsigned port) -{ - return evtchn_ops->test_and_set_mask(port); -} - static inline void mask_evtchn(unsigned port) { return evtchn_ops->mask(port); @@ -132,9 +151,10 @@ static inline void unmask_evtchn(unsigned port) return evtchn_ops->unmask(port); } -static inline void xen_evtchn_handle_events(unsigned cpu) +static inline void xen_evtchn_handle_events(unsigned cpu, + struct evtchn_loop_ctrl *ctrl) { - return evtchn_ops->handle_events(cpu); + return evtchn_ops->handle_events(cpu, ctrl); } static inline void xen_evtchn_resume(void) diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 052b55a14ebc..a43930191e20 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -166,7 +166,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data) "Interrupt for port %d, but apparently not enabled; per-user %p\n", evtchn->port, u); - disable_irq_nosync(irq); evtchn->enabled = false; spin_lock(&u->ring_prod_lock); @@ -292,7 +291,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, evtchn = find_evtchn(u, port); if (evtchn && !evtchn->enabled) { evtchn->enabled = true; - enable_irq(irq_from_evtchn(port)); + xen_irq_lateeoi(irq_from_evtchn(port), 0); } } @@ -392,8 +391,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) if (rc < 0) goto err; - rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, - u->name, evtchn); + rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0, + u->name, evtchn); if (rc < 0) goto err; diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index 2c4f324f8626..da799929087d 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -641,6 +641,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, goto fail_detach; } + /* Check that we have zero offset. */ + if (sgt->sgl->offset) { + ret = ERR_PTR(-EINVAL); + pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n", + sgt->sgl->offset); + goto fail_unmap; + } + /* Check number of pages that imported buffer has. */ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { ret = ERR_PTR(-EINVAL); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 81401f386c9c..e953ea34b3e4 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -319,44 +319,47 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) * to the kernel linear addresses of the struct pages. * These ptes are completely different from the user ptes dealt * with find_grant_ptes. + * Note that GNTMAP_device_map isn't needed here: The + * dev_bus_addr output field gets consumed only from ->map_ops, + * and by not requesting it when mapping we also avoid needing + * to mirror dev_bus_addr into ->unmap_ops (and holding an extra + * reference to the page in the hypervisor). */ + unsigned int flags = (map->flags & ~GNTMAP_device_map) | + GNTMAP_host_map; + for (i = 0; i < map->count; i++) { unsigned long address = (unsigned long) pfn_to_kaddr(page_to_pfn(map->pages[i])); BUG_ON(PageHighMem(map->pages[i])); - gnttab_set_map_op(&map->kmap_ops[i], address, - map->flags | GNTMAP_host_map, + gnttab_set_map_op(&map->kmap_ops[i], address, flags, map->grants[i].ref, map->grants[i].domid); gnttab_set_unmap_op(&map->kunmap_ops[i], address, - map->flags | GNTMAP_host_map, -1); + flags, -1); } } pr_debug("map %d+%d\n", map->index, map->count); err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, map->pages, map->count); - if (err) - return err; for (i = 0; i < map->count; i++) { - if (map->map_ops[i].status) { + if (map->map_ops[i].status == GNTST_okay) + map->unmap_ops[i].handle = map->map_ops[i].handle; + else if (!err) err = -EINVAL; - continue; - } - map->unmap_ops[i].handle = map->map_ops[i].handle; - if (use_ptemod) - map->kunmap_ops[i].handle = map->kmap_ops[i].handle; -#ifdef CONFIG_XEN_GRANT_DMA_ALLOC - else if (map->dma_vaddr) { - unsigned long bfn; + if (map->flags & GNTMAP_device_map) + map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; - bfn = pfn_to_bfn(page_to_pfn(map->pages[i])); - map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn); + if (use_ptemod) { + if (map->kmap_ops[i].status == GNTST_okay) + map->kunmap_ops[i].handle = map->kmap_ops[i].handle; + else if (!err) + err = -EINVAL; } -#endif } return err; } @@ -831,17 +834,18 @@ struct gntdev_copy_batch { s16 __user *status[GNTDEV_COPY_BATCH]; unsigned int nr_ops; unsigned int nr_pages; + bool writeable; }; static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, - bool writeable, unsigned long *gfn) + unsigned long *gfn) { unsigned long addr = (unsigned long)virt; struct page *page; unsigned long xen_pfn; int ret; - ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page); + ret = get_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page); if (ret < 0) return ret; @@ -857,9 +861,13 @@ static void gntdev_put_pages(struct gntdev_copy_batch *batch) { unsigned int i; - for (i = 0; i < batch->nr_pages; i++) + for (i = 0; i < batch->nr_pages; i++) { + if (batch->writeable && !PageDirty(batch->pages[i])) + set_page_dirty_lock(batch->pages[i]); put_page(batch->pages[i]); + } batch->nr_pages = 0; + batch->writeable = false; } static int gntdev_copy(struct gntdev_copy_batch *batch) @@ -948,8 +956,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch, virt = seg->source.virt + copied; off = (unsigned long)virt & ~XEN_PAGE_MASK; len = min(len, (size_t)XEN_PAGE_SIZE - off); + batch->writeable = false; - ret = gntdev_get_page(batch, virt, false, &gfn); + ret = gntdev_get_page(batch, virt, &gfn); if (ret < 0) return ret; @@ -967,8 +976,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch, virt = seg->dest.virt + copied; off = (unsigned long)virt & ~XEN_PAGE_MASK; len = min(len, (size_t)XEN_PAGE_SIZE - off); + batch->writeable = true; - ret = gntdev_get_page(batch, virt, true, &gfn); + ret = gntdev_get_page(batch, virt, &gfn); if (ret < 0) return ret; diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 5e30602fdbad..c45646450135 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -149,7 +149,6 @@ static int platform_pci_probe(struct pci_dev *pdev, ret = gnttab_init(); if (ret) goto grant_out; - xenbus_probe(NULL); return 0; grant_out: gnttab_free_auto_xlat_frames(); diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 456a164364a2..98a9d6892d98 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -27,7 +27,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); asmlinkage __visible void xen_maybe_preempt_hcall(void) { if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) - && need_resched())) { + && need_resched() && !preempt_count())) { /* * Clear flag as we may be rescheduled on a different * cpu. diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index c6070e70dd73..9c9422e9fac4 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -724,14 +724,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata) return 0; } -static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) +static long privcmd_ioctl_mmap_resource(struct file *file, + struct privcmd_mmap_resource __user *udata) { struct privcmd_data *data = file->private_data; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct privcmd_mmap_resource kdata; xen_pfn_t *pfns = NULL; - struct xen_mem_acquire_resource xdata; + struct xen_mem_acquire_resource xdata = { }; int rc; if (copy_from_user(&kdata, udata, sizeof(kdata))) @@ -741,6 +742,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) if (data->domid != DOMID_INVALID && data->domid != kdata.dom) return -EPERM; + /* Both fields must be set or unset */ + if (!!kdata.addr != !!kdata.num) + return -EINVAL; + + xdata.domid = kdata.dom; + xdata.type = kdata.type; + xdata.id = kdata.id; + + if (!kdata.addr && !kdata.num) { + /* Query the size of the resource. */ + rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata); + if (rc) + return rc; + return __put_user(xdata.nr_frames, &udata->num); + } + down_write(&mm->mmap_sem); vma = find_vma(mm, kdata.addr); @@ -775,10 +792,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) } else vma->vm_private_data = PRIV_VMA_LOCKED; - memset(&xdata, 0, sizeof(xdata)); - xdata.domid = kdata.dom; - xdata.type = kdata.type; - xdata.id = kdata.id; xdata.frame = kdata.idx; xdata.nr_frames = kdata.num; set_xen_guest_handle(xdata.frame_list, pfns); diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index c57c71b7d53d..9439de2ca0e4 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -66,6 +66,7 @@ struct sock_mapping { atomic_t write; atomic_t io; atomic_t release; + atomic_t eoi; void (*saved_data_ready)(struct sock *sk); struct pvcalls_ioworker ioworker; }; @@ -87,7 +88,7 @@ static int pvcalls_back_release_active(struct xenbus_device *dev, struct pvcalls_fedata *fedata, struct sock_mapping *map); -static void pvcalls_conn_back_read(void *opaque) +static bool pvcalls_conn_back_read(void *opaque) { struct sock_mapping *map = (struct sock_mapping *)opaque; struct msghdr msg; @@ -107,17 +108,17 @@ static void pvcalls_conn_back_read(void *opaque) virt_mb(); if (error) - return; + return false; size = pvcalls_queued(prod, cons, array_size); if (size >= array_size) - return; + return false; spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { atomic_set(&map->read, 0); spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); - return; + return true; } spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); wanted = array_size - size; @@ -141,7 +142,7 @@ static void pvcalls_conn_back_read(void *opaque) ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT); WARN_ON(ret > wanted); if (ret == -EAGAIN) /* shouldn't happen */ - return; + return true; if (!ret) ret = -ENOTCONN; spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); @@ -160,10 +161,10 @@ static void pvcalls_conn_back_read(void *opaque) virt_wmb(); notify_remote_via_irq(map->irq); - return; + return true; } -static void pvcalls_conn_back_write(struct sock_mapping *map) +static bool pvcalls_conn_back_write(struct sock_mapping *map) { struct pvcalls_data_intf *intf = map->ring; struct pvcalls_data *data = &map->data; @@ -180,7 +181,7 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) array_size = XEN_FLEX_RING_SIZE(map->ring_order); size = pvcalls_queued(prod, cons, array_size); if (size == 0) - return; + return false; memset(&msg, 0, sizeof(msg)); msg.msg_flags |= MSG_DONTWAIT; @@ -198,12 +199,11 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) atomic_set(&map->write, 0); ret = inet_sendmsg(map->sock, &msg, size); - if (ret == -EAGAIN || (ret >= 0 && ret < size)) { + if (ret == -EAGAIN) { atomic_inc(&map->write); atomic_inc(&map->io); + return true; } - if (ret == -EAGAIN) - return; /* write the data, then update the indexes */ virt_wmb(); @@ -216,9 +216,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) } /* update the indexes, then notify the other end */ virt_wmb(); - if (prod != cons + ret) + if (prod != cons + ret) { atomic_inc(&map->write); + atomic_inc(&map->io); + } notify_remote_via_irq(map->irq); + + return true; } static void pvcalls_back_ioworker(struct work_struct *work) @@ -227,6 +231,7 @@ static void pvcalls_back_ioworker(struct work_struct *work) struct pvcalls_ioworker, register_work); struct sock_mapping *map = container_of(ioworker, struct sock_mapping, ioworker); + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; while (atomic_read(&map->io) > 0) { if (atomic_read(&map->release) > 0) { @@ -234,10 +239,18 @@ static void pvcalls_back_ioworker(struct work_struct *work) return; } - if (atomic_read(&map->read) > 0) - pvcalls_conn_back_read(map); - if (atomic_read(&map->write) > 0) - pvcalls_conn_back_write(map); + if (atomic_read(&map->read) > 0 && + pvcalls_conn_back_read(map)) + eoi_flags = 0; + if (atomic_read(&map->write) > 0 && + pvcalls_conn_back_write(map)) + eoi_flags = 0; + + if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) { + atomic_set(&map->eoi, 0); + xen_irq_lateeoi(map->irq, eoi_flags); + eoi_flags = XEN_EOI_FLAG_SPURIOUS; + } atomic_dec(&map->io); } @@ -334,12 +347,9 @@ static struct sock_mapping *pvcalls_new_active_socket( goto out; map->bytes = page; - ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id, - evtchn, - pvcalls_back_conn_event, - 0, - "pvcalls-backend", - map); + ret = bind_interdomain_evtchn_to_irqhandler_lateeoi( + fedata->dev->otherend_id, evtchn, + pvcalls_back_conn_event, 0, "pvcalls-backend", map); if (ret < 0) goto out; map->irq = ret; @@ -873,15 +883,18 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id) { struct xenbus_device *dev = dev_id; struct pvcalls_fedata *fedata = NULL; + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; - if (dev == NULL) - return IRQ_HANDLED; + if (dev) { + fedata = dev_get_drvdata(&dev->dev); + if (fedata) { + pvcalls_back_work(fedata); + eoi_flags = 0; + } + } - fedata = dev_get_drvdata(&dev->dev); - if (fedata == NULL) - return IRQ_HANDLED; + xen_irq_lateeoi(irq, eoi_flags); - pvcalls_back_work(fedata); return IRQ_HANDLED; } @@ -891,12 +904,15 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map) struct pvcalls_ioworker *iow; if (map == NULL || map->sock == NULL || map->sock->sk == NULL || - map->sock->sk->sk_user_data != map) + map->sock->sk->sk_user_data != map) { + xen_irq_lateeoi(irq, 0); return IRQ_HANDLED; + } iow = &map->ioworker; atomic_inc(&map->write); + atomic_inc(&map->eoi); atomic_inc(&map->io); queue_work(iow->wq, &iow->register_work); @@ -931,7 +947,7 @@ static int backend_connect(struct xenbus_device *dev) goto error; } - err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn); + err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn); if (err < 0) goto error; fedata->irq = err; @@ -1087,7 +1103,8 @@ static void set_backend_state(struct xenbus_device *dev, case XenbusStateInitialised: switch (state) { case XenbusStateConnected: - backend_connect(dev); + if (backend_connect(dev)) + return; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index bd3a10dfac15..06346422f743 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -335,6 +335,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, int order = get_order(size); phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); + struct page *page; if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; @@ -346,9 +347,14 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, /* Convert the size to actually allocated. */ size = 1UL << (order + XEN_PAGE_SHIFT); + if (is_vmalloc_addr(vaddr)) + page = vmalloc_to_page(vaddr); + else + page = virt_to_page(vaddr); + if (!WARN_ON((dev_addr + size - 1 > dma_mask) || range_straddles_page_boundary(phys, size)) && - TestClearPageXenRemapped(virt_to_page(vaddr))) + TestClearPageXenRemapped(page)) xen_destroy_contiguous_region(phys, order); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 097410a7cdb7..adf3aae2939f 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -733,10 +733,17 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, wmb(); notify_remote_via_irq(pdev->evtchn_irq); + /* Enable IRQ to signal "request done". */ + xen_pcibk_lateeoi(pdev, 0); + ret = wait_event_timeout(xen_pcibk_aer_wait_queue, !(test_bit(_XEN_PCIB_active, (unsigned long *) &sh_info->flags)), 300*HZ); + /* Enable IRQ for pcifront request if not already active. */ + if (!test_bit(_PDEVF_op_active, &pdev->flags)) + xen_pcibk_lateeoi(pdev, 0); + if (!ret) { if (test_bit(_XEN_PCIB_active, (unsigned long *)&sh_info->flags)) { @@ -750,13 +757,6 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, } clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags); - if (test_bit(_XEN_PCIF_active, - (unsigned long *)&sh_info->flags)) { - dev_dbg(&psdev->dev->dev, - "schedule pci_conf service in " DRV_NAME "\n"); - xen_pcibk_test_and_schedule_op(psdev->pdev); - } - res = (pci_ers_result_t)aer_op->err; return res; } diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index 263c059bff90..235cdfe13494 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -14,6 +14,7 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/atomic.h> +#include <xen/events.h> #include <xen/interface/io/pciif.h> #define DRV_NAME "xen-pciback" @@ -27,6 +28,8 @@ struct pci_dev_entry { #define PDEVF_op_active (1<<(_PDEVF_op_active)) #define _PCIB_op_pending (1) #define PCIB_op_pending (1<<(_PCIB_op_pending)) +#define _EOI_pending (2) +#define EOI_pending (1<<(_EOI_pending)) struct xen_pcibk_device { void *pci_dev_data; @@ -182,12 +185,17 @@ static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev) irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id); void xen_pcibk_do_op(struct work_struct *data); +static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev, + unsigned int eoi_flag) +{ + if (test_and_clear_bit(_EOI_pending, &pdev->flags)) + xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag); +} + int xen_pcibk_xenbus_register(void); void xen_pcibk_xenbus_unregister(void); extern int verbose_request; - -void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev); #endif /* Handles shared IRQs that can to device domain and control domain. */ diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 787966f44589..c4ed2c634ca7 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -297,26 +297,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, return 0; } #endif + +static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev) +{ + return test_bit(_XEN_PCIF_active, + (unsigned long *)&pdev->sh_info->flags) && + !test_and_set_bit(_PDEVF_op_active, &pdev->flags); +} + /* * Now the same evtchn is used for both pcifront conf_read_write request * as well as pcie aer front end ack. We use a new work_queue to schedule * xen_pcibk conf_read_write service for avoiding confict with aer_core * do_recovery job which also use the system default work_queue */ -void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) +static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) { + bool eoi = true; + /* Check that frontend is requesting an operation and that we are not * already processing a request */ - if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) - && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) { + if (xen_pcibk_test_op_pending(pdev)) { schedule_work(&pdev->op_work); + eoi = false; } /*_XEN_PCIB_active should have been cleared by pcifront. And also make sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/ if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) && test_bit(_PCIB_op_pending, &pdev->flags)) { wake_up(&xen_pcibk_aer_wait_queue); + eoi = false; } + + /* EOI if there was nothing to do. */ + if (eoi) + xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS); } /* Performing the configuration space reads/writes must not be done in atomic @@ -324,10 +339,8 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) * use of semaphores). This function is intended to be called from a work * queue in process context taking a struct xen_pcibk_device as a parameter */ -void xen_pcibk_do_op(struct work_struct *data) +static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev) { - struct xen_pcibk_device *pdev = - container_of(data, struct xen_pcibk_device, op_work); struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data = NULL; struct xen_pci_op *op = &pdev->op; @@ -400,16 +413,31 @@ void xen_pcibk_do_op(struct work_struct *data) smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ clear_bit(_PDEVF_op_active, &pdev->flags); smp_mb__after_atomic(); /* /before/ final check for work */ +} - /* Check to see if the driver domain tried to start another request in - * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. - */ - xen_pcibk_test_and_schedule_op(pdev); +void xen_pcibk_do_op(struct work_struct *data) +{ + struct xen_pcibk_device *pdev = + container_of(data, struct xen_pcibk_device, op_work); + + do { + xen_pcibk_do_one_op(pdev); + } while (xen_pcibk_test_op_pending(pdev)); + + xen_pcibk_lateeoi(pdev, 0); } irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id) { struct xen_pcibk_device *pdev = dev_id; + bool eoi; + + /* IRQs might come in before pdev->evtchn_irq is written. */ + if (unlikely(pdev->evtchn_irq != irq)) + pdev->evtchn_irq = irq; + + eoi = test_and_set_bit(_EOI_pending, &pdev->flags); + WARN(eoi, "IRQ while EOI pending\n"); xen_pcibk_test_and_schedule_op(pdev); diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index 833b2d2c4318..7fb65836230a 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -123,7 +123,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, pdev->sh_info = vaddr; - err = bind_interdomain_evtchn_to_irqhandler( + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event, 0, DRV_NAME, pdev); if (err < 0) { @@ -688,7 +688,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev, /* watch the backend node for backend configuration information */ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch, - xen_pcibk_be_watch); + NULL, xen_pcibk_be_watch); if (err) goto out; diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index ba0942e481bc..32aba2e8c075 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -91,7 +91,6 @@ struct vscsibk_info { unsigned int irq; struct vscsiif_back_ring ring; - int ring_error; spinlock_t ring_lock; atomic_t nr_unreplied_reqs; @@ -423,12 +422,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, return 0; err = gnttab_map_refs(map, NULL, pg, cnt); - BUG_ON(err); for (i = 0; i < cnt; i++) { if (unlikely(map[i].status != GNTST_okay)) { pr_err("invalid buffer -- could not remap it\n"); map[i].handle = SCSIBACK_INVALID_HANDLE; - err = -ENOMEM; + if (!err) + err = -ENOMEM; } else { get_page(pg[i]); } @@ -722,7 +721,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info, return pending_req; } -static int scsiback_do_cmd_fn(struct vscsibk_info *info) +static int scsiback_do_cmd_fn(struct vscsibk_info *info, + unsigned int *eoi_flags) { struct vscsiif_back_ring *ring = &info->ring; struct vscsiif_request ring_req; @@ -739,11 +739,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) rc = ring->rsp_prod_pvt; pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n", info->domid, rp, rc, rp - rc); - info->ring_error = 1; - return 0; + return -EINVAL; } while ((rc != rp)) { + *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; + if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) break; @@ -802,13 +803,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) static irqreturn_t scsiback_irq_fn(int irq, void *dev_id) { struct vscsibk_info *info = dev_id; + int rc; + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; - if (info->ring_error) - return IRQ_HANDLED; - - while (scsiback_do_cmd_fn(info)) + while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0) cond_resched(); + /* In case of a ring error we keep the event channel masked. */ + if (!rc) + xen_irq_lateeoi(irq, eoi_flags); + return IRQ_HANDLED; } @@ -829,7 +833,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref, sring = (struct vscsiif_sring *)area; BACK_RING_INIT(&info->ring, sring, PAGE_SIZE); - err = bind_interdomain_evtchn_to_irq(info->domid, evtchn); + err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn); if (err < 0) goto unmap_page; @@ -1252,7 +1256,6 @@ static int scsiback_probe(struct xenbus_device *dev, info->domid = dev->otherend_id; spin_lock_init(&info->ring_lock); - info->ring_error = 0; atomic_set(&info->nr_unreplied_reqs, 0); init_waitqueue_head(&info->waiting_to_free); info->dev = dev; diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index d75a2385b37c..88516a8a9f93 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -44,6 +44,8 @@ struct xen_bus_type { int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(struct xen_bus_type *bus, const char *type, const char *dir); + bool (*otherend_will_handle)(struct xenbus_watch *watch, + const char *path, const char *token); void (*otherend_changed)(struct xenbus_watch *watch, const char *path, const char *token); struct bus_type bus; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e17ca8156171..81eddb8529ff 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -114,18 +114,22 @@ EXPORT_SYMBOL_GPL(xenbus_strstate); */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, + bool (*will_handle)(struct xenbus_watch *, + const char *, const char *), void (*callback)(struct xenbus_watch *, const char *, const char *)) { int err; watch->node = path; + watch->will_handle = will_handle; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; + watch->will_handle = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } @@ -152,6 +156,8 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path); */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, + bool (*will_handle)(struct xenbus_watch *, + const char *, const char *), void (*callback)(struct xenbus_watch *, const char *, const char *), const char *pathfmt, ...) @@ -168,7 +174,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } - err = xenbus_watch_path(dev, path, watch, callback); + err = xenbus_watch_path(dev, path, watch, will_handle, callback); if (err) kfree(path); @@ -363,8 +369,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, int i, j; for (i = 0; i < nr_pages; i++) { - err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_gfn(vaddr), 0); + unsigned long gfn; + + if (is_vmalloc_addr(vaddr)) + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); + else + gfn = virt_to_gfn(vaddr); + + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); @@ -448,7 +460,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) { - return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); + int err; + + err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); + /* Some hypervisors are buggy and can return 1. */ + if (err > 0) + err = GNTST_general_error; + + return err; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index eb5151fc8efa..e5fda0256feb 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex); static int xenbus_irq; static struct task_struct *xenbus_task; -static DECLARE_WORK(probe_work, xenbus_probe); - - static irqreturn_t wake_waiting(int irq, void *unused) { - if (unlikely(xenstored_ready == 0)) { - xenstored_ready = 1; - schedule_work(&probe_work); - } - wake_up(&xb_waitq); return IRQ_HANDLED; } diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 5b471889d723..652894d61967 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -136,6 +136,7 @@ static int watch_otherend(struct xenbus_device *dev) container_of(dev->dev.bus, struct xen_bus_type, bus); return xenbus_watch_pathfmt(dev, &dev->otherend_watch, + bus->otherend_will_handle, bus->otherend_changed, "%s/%s", dev->otherend, "state"); } @@ -682,29 +683,107 @@ void unregister_xenstore_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); -void xenbus_probe(struct work_struct *unused) +static void xenbus_probe(void) { xenstored_ready = 1; + /* + * In the HVM case, xenbus_init() deferred its call to + * xs_init() in case callbacks were not operational yet. + * So do it now. + */ + if (xen_store_domain_type == XS_HVM) + xs_init(); + /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } -EXPORT_SYMBOL_GPL(xenbus_probe); -static int __init xenbus_probe_initcall(void) +/* + * Returns true when XenStore init must be deferred in order to + * allow the PCI platform device to be initialised, before we + * can actually have event channel interrupts working. + */ +static bool xs_hvm_defer_init_for_callback(void) { - if (!xen_domain()) - return -ENODEV; +#ifdef CONFIG_XEN_PVHVM + return xen_store_domain_type == XS_HVM && + !xen_have_vector_callback; +#else + return false; +#endif +} - if (xen_initial_domain() || xen_hvm_domain()) - return 0; +static int xenbus_probe_thread(void *unused) +{ + DEFINE_WAIT(w); - xenbus_probe(NULL); + /* + * We actually just want to wait for *any* trigger of xb_waitq, + * and run xenbus_probe() the moment it occurs. + */ + prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&xb_waitq, &w); + + DPRINTK("probing"); + xenbus_probe(); return 0; } +static int __init xenbus_probe_initcall(void) +{ + /* + * Probe XenBus here in the XS_PV case, and also XS_HVM unless we + * need to wait for the platform PCI device to come up. + */ + if (xen_store_domain_type == XS_PV || + (xen_store_domain_type == XS_HVM && + !xs_hvm_defer_init_for_callback())) + xenbus_probe(); + + /* + * For XS_LOCAL, spawn a thread which will wait for xenstored + * or a xenstore-stubdom to be started, then probe. It will be + * triggered when communication starts happening, by waiting + * on xb_waitq. + */ + if (xen_store_domain_type == XS_LOCAL) { + struct task_struct *probe_task; + + probe_task = kthread_run(xenbus_probe_thread, NULL, + "xenbus_probe"); + if (IS_ERR(probe_task)) + return PTR_ERR(probe_task); + } + return 0; +} device_initcall(xenbus_probe_initcall); +int xen_set_callback_via(uint64_t via) +{ + struct xen_hvm_param a; + int ret; + + a.domid = DOMID_SELF; + a.index = HVM_PARAM_CALLBACK_IRQ; + a.value = via; + + ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a); + if (ret) + return ret; + + /* + * If xenbus_probe_initcall() deferred the xenbus_probe() + * due to the callback not functioning yet, we can do it now. + */ + if (!xenstored_ready && xs_hvm_defer_init_for_callback()) + xenbus_probe(); + + return ret; +} +EXPORT_SYMBOL_GPL(xen_set_callback_via); + /* Set up event channel for xenstored which is run as a local process * (this is normally used only in dom0) */ @@ -817,11 +896,17 @@ static int __init xenbus_init(void) break; } - /* Initialize the interface to xenstore. */ - err = xs_init(); - if (err) { - pr_warn("Error initializing xenstore comms: %i\n", err); - goto out_error; + /* + * HVM domains may not have a functional callback yet. In that + * case let xs_init() be called from xenbus_probe(), which will + * get invoked at an appropriate time. + */ + if (xen_store_domain_type != XS_HVM) { + err = xs_init(); + if (err) { + pr_warn("Error initializing xenstore comms: %i\n", err); + goto out_error; + } } if ((xen_store_domain_type != XS_LOCAL) && diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index b0bed4faf44c..4bb603051d5b 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c @@ -180,6 +180,12 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, return err; } +static bool frontend_will_handle(struct xenbus_watch *watch, + const char *path, const char *token) +{ + return watch->nr_pending == 0; +} + static void frontend_changed(struct xenbus_watch *watch, const char *path, const char *token) { @@ -191,6 +197,7 @@ static struct xen_bus_type xenbus_backend = { .levels = 3, /* backend/type/<frontend>/<id> */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, + .otherend_will_handle = frontend_will_handle, .otherend_changed = frontend_changed, .bus = { .name = "xen-backend", diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 3a06eb699f33..12e02eb01f59 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -705,9 +705,13 @@ int xs_watch_msg(struct xs_watch_event *event) spin_lock(&watches_lock); event->handle = find_watch(event->token); - if (event->handle != NULL) { + if (event->handle != NULL && + (!event->handle->will_handle || + event->handle->will_handle(event->handle, + event->path, event->token))) { spin_lock(&watch_events_lock); list_add_tail(&event->list, &watch_events); + event->handle->nr_pending++; wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else @@ -765,6 +769,8 @@ int register_xenbus_watch(struct xenbus_watch *watch) sprintf(token, "%lX", (long)watch); + watch->nr_pending = 0; + down_read(&xs_watch_rwsem); spin_lock(&watches_lock); @@ -814,11 +820,14 @@ void unregister_xenbus_watch(struct xenbus_watch *watch) /* Cancel pending watch events. */ spin_lock(&watch_events_lock); - list_for_each_entry_safe(event, tmp, &watch_events, list) { - if (event->handle != watch) - continue; - list_del(&event->list); - kfree(event); + if (watch->nr_pending) { + list_for_each_entry_safe(event, tmp, &watch_events, list) { + if (event->handle != watch) + continue; + list_del(&event->list); + kfree(event); + } + watch->nr_pending = 0; } spin_unlock(&watch_events_lock); @@ -865,7 +874,6 @@ void xs_suspend_cancel(void) static int xenwatch_thread(void *unused) { - struct list_head *ent; struct xs_watch_event *event; xenwatch_pid = current->pid; @@ -880,13 +888,15 @@ static int xenwatch_thread(void *unused) mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); - ent = watch_events.next; - if (ent != &watch_events) - list_del(ent); + event = list_first_entry_or_null(&watch_events, + struct xs_watch_event, list); + if (event) { + list_del(&event->list); + event->handle->nr_pending--; + } spin_unlock(&watch_events_lock); - if (ent != &watch_events) { - event = list_entry(ent, struct xs_watch_event, list); + if (event) { event->handle->callback(event->handle, event->path, event->token); kfree(event); diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 15a99f9c7253..39def020a074 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -500,10 +500,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) } #ifdef CONFIG_9P_FSCACHE - if (v9ses->fscache) { + if (v9ses->fscache) v9fs_cache_session_put_cookie(v9ses); - kfree(v9ses->cachetag); - } + kfree(v9ses->cachetag); #endif kfree(v9ses->uname); kfree(v9ses->aname); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index fe7f0bd2048e..ee9cabac1204 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -609,9 +609,9 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma) struct writeback_control wbc = { .nr_to_write = LONG_MAX, .sync_mode = WB_SYNC_ALL, - .range_start = vma->vm_pgoff * PAGE_SIZE, + .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE, /* absolute end, byte at end included */ - .range_end = vma->vm_pgoff * PAGE_SIZE + + .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start - 1), }; diff --git a/fs/Kconfig b/fs/Kconfig index 2501e6f1f965..35452399975d 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -152,6 +152,7 @@ menu "Pseudo filesystems" source "fs/proc/Kconfig" source "fs/kernfs/Kconfig" source "fs/sysfs/Kconfig" +source "fs/cgroupfs/Kconfig" config TMPFS bool "Tmpfs virtual memory file system support (former shm fs)" diff --git a/fs/Makefile b/fs/Makefile index 14231b4cf383..f878cb2ef18a 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -24,6 +24,7 @@ endif obj-$(CONFIG_PROC_FS) += proc_namespace.o obj-y += notify/ +obj-y += cgroupfs/ obj-$(CONFIG_EPOLL) += eventpoll.o obj-y += anon_inodes.o obj-$(CONFIG_SIGNALFD) += signalfd.o diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index f708c45d5f66..29f11e10a7c7 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; + /* + * First, clear all RWED bits for owner, group, other. + * Then, recalculate them afresh. + * + * We'll always clear the delete-inhibit bit for the owner, as that is + * the classic single-user mode AmigaOS protection bit and we need to + * stay compatible with all scenarios. + * + * Since multi-user AmigaOS is an extension, we'll only set the + * delete-allow bit if any of the other bits in the same user class + * (group/other) are used. + */ + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD + | FIBF_NOWRITE | FIBF_NODELETE + | FIBF_GRP_EXECUTE | FIBF_GRP_READ + | FIBF_GRP_WRITE | FIBF_GRP_DELETE + | FIBF_OTR_EXECUTE | FIBF_OTR_READ + | FIBF_OTR_WRITE | FIBF_OTR_DELETE); + + /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; + + /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; + if (mode & 0070) + prot |= FIBF_GRP_DELETE; + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; + if (mode & 0007) + prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } diff --git a/fs/affs/file.c b/fs/affs/file.c index a85817f54483..ba084b0b214b 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } +static int affs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + + return ret; +} + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); @@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, - .write_end = generic_write_end, + .write_end = affs_write_end, .direct_IO = affs_direct_IO, .bmap = _affs_bmap }; @@ -794,6 +812,12 @@ done: if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + err_first_bh: unlock_page(page); put_page(page); diff --git a/fs/affs/namei.c b/fs/affs/namei.c index 41c5749f4db7..5400a876d73f 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -460,8 +460,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry, return -EIO; bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino); - if (!bh_new) + if (!bh_new) { + affs_brelse(bh_old); return -EIO; + } /* Remove old header from its parent directory. */ affs_lock_dir(old_dir); diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 78ba5f932287..296b489861a9 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, return ERR_PTR(-ENOMEM); } + cell->name = kmalloc(namelen + 1, GFP_KERNEL); + if (!cell->name) { + kfree(cell); + return ERR_PTR(-ENOMEM); + } + cell->net = net; cell->name_len = namelen; for (i = 0; i < namelen; i++) cell->name[i] = tolower(name[i]); + cell->name[i] = 0; atomic_set(&cell->usage, 2); INIT_WORK(&cell->manager, afs_manage_cell); @@ -203,6 +210,7 @@ parse_failed: if (ret == -EINVAL) printk(KERN_ERR "kAFS: bad VL server IP address\n"); error: + kfree(cell->name); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); @@ -483,6 +491,7 @@ static void afs_cell_destroy(struct rcu_head *rcu) afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); key_put(cell->anonymous_key); + kfree(cell->name); kfree(cell); _leave(" [destroyed]"); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index b378cd780ed5..fc5eb0f89304 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -169,7 +169,7 @@ static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server) spin_lock(&server->probe_lock); - if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) { + if (!test_and_set_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) { server->cm_epoch = call->epoch; server->probe.cm_epoch = call->epoch; goto out; diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 5c794f4b051a..e7494cd49ce7 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -69,7 +69,6 @@ const struct inode_operations afs_dir_inode_operations = { .permission = afs_permission, .getattr = afs_getattr, .setattr = afs_setattr, - .listxattr = afs_listxattr, }; const struct address_space_operations afs_dir_aops = { @@ -658,7 +657,8 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, cookie->ctx.actor = afs_lookup_filldir; cookie->name = dentry->d_name; - cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */ + cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want + * and slot 1 for the directory */ read_seqlock_excl(&dvnode->cb_lock); dcbi = rcu_dereference_protected(dvnode->cb_interest, @@ -709,7 +709,11 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, if (!cookie->inodes) goto out_s; - for (i = 1; i < cookie->nr_fids; i++) { + cookie->fids[1] = dvnode->fid; + cookie->statuses[1].cb_break = afs_calc_vnode_cb_break(dvnode); + cookie->inodes[1] = igrab(&dvnode->vfs_inode); + + for (i = 2; i < cookie->nr_fids; i++) { scb = &cookie->statuses[i]; /* Find any inodes that already exist and get their @@ -1032,7 +1036,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) struct dentry *parent; struct inode *inode; struct key *key; - afs_dataversion_t dir_version; + afs_dataversion_t dir_version, invalid_before; long de_version; int ret; @@ -1084,8 +1088,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) if (de_version == (long)dir_version) goto out_valid_noupdate; - dir_version = dir->invalid_before; - if (de_version - (long)dir_version >= 0) + invalid_before = dir->invalid_before; + if (de_version - (long)invalid_before >= 0) goto out_valid; _debug("dir modified"); @@ -1275,6 +1279,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) struct afs_fs_cursor fc; struct afs_vnode *dvnode = AFS_FS_I(dir); struct key *key; + afs_dataversion_t data_version; int ret; mode |= S_IFDIR; @@ -1295,7 +1300,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - afs_dataversion_t data_version = dvnode->status.data_version + 1; + data_version = dvnode->status.data_version + 1; while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(dvnode); @@ -1316,10 +1321,14 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) goto error_key; } - if (ret == 0 && - test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) - afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, - afs_edit_dir_for_create); + if (ret == 0) { + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == data_version) + afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, + afs_edit_dir_for_create); + up_write(&dvnode->validate_lock); + } key_put(key); kfree(scb); @@ -1360,6 +1369,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) struct afs_fs_cursor fc; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL; struct key *key; + afs_dataversion_t data_version; int ret; _enter("{%llx:%llu},{%pd}", @@ -1391,7 +1401,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - afs_dataversion_t data_version = dvnode->status.data_version + 1; + data_version = dvnode->status.data_version + 1; while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(dvnode); @@ -1404,9 +1414,12 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) ret = afs_end_vnode_operation(&fc); if (ret == 0) { afs_dir_remove_subdir(dentry); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == data_version) afs_edit_dir_remove(dvnode, &dentry->d_name, afs_edit_dir_for_rmdir); + up_write(&dvnode->validate_lock); } } @@ -1544,10 +1557,15 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) ret = afs_end_vnode_operation(&fc); if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) ret = afs_dir_remove_link(dvnode, dentry, key); - if (ret == 0 && - test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) - afs_edit_dir_remove(dvnode, &dentry->d_name, - afs_edit_dir_for_unlink); + + if (ret == 0) { + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == data_version) + afs_edit_dir_remove(dvnode, &dentry->d_name, + afs_edit_dir_for_unlink); + up_write(&dvnode->validate_lock); + } } if (need_rehash && ret < 0 && ret != -ENOENT) @@ -1573,6 +1591,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct afs_status_cb *scb; struct afs_vnode *dvnode = AFS_FS_I(dir); struct key *key; + afs_dataversion_t data_version; int ret; mode |= S_IFREG; @@ -1597,7 +1616,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - afs_dataversion_t data_version = dvnode->status.data_version + 1; + data_version = dvnode->status.data_version + 1; while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(dvnode); @@ -1618,9 +1637,12 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, goto error_key; } - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == data_version) afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, afs_edit_dir_for_create); + up_write(&dvnode->validate_lock); kfree(scb); key_put(key); @@ -1648,6 +1670,7 @@ static int afs_link(struct dentry *from, struct inode *dir, struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_vnode *vnode = AFS_FS_I(d_inode(from)); struct key *key; + afs_dataversion_t data_version; int ret; _enter("{%llx:%llu},{%llx:%llu},{%pd}", @@ -1672,7 +1695,7 @@ static int afs_link(struct dentry *from, struct inode *dir, ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - afs_dataversion_t data_version = dvnode->status.data_version + 1; + data_version = dvnode->status.data_version + 1; if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) { afs_end_vnode_operation(&fc); @@ -1702,9 +1725,12 @@ static int afs_link(struct dentry *from, struct inode *dir, goto error_key; } - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == data_version) afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid, afs_edit_dir_for_link); + up_write(&dvnode->validate_lock); key_put(key); kfree(scb); @@ -1732,6 +1758,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, struct afs_status_cb *scb; struct afs_vnode *dvnode = AFS_FS_I(dir); struct key *key; + afs_dataversion_t data_version; int ret; _enter("{%llx:%llu},{%pd},%s", @@ -1759,7 +1786,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - afs_dataversion_t data_version = dvnode->status.data_version + 1; + data_version = dvnode->status.data_version + 1; while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(dvnode); @@ -1780,9 +1807,12 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, goto error_key; } - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == data_version) afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid, afs_edit_dir_for_symlink); + up_write(&dvnode->validate_lock); key_put(key); kfree(scb); @@ -1812,6 +1842,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, struct dentry *tmp = NULL, *rehash = NULL; struct inode *new_inode; struct key *key; + afs_dataversion_t orig_data_version; + afs_dataversion_t new_data_version; bool new_negative = d_is_negative(new_dentry); int ret; @@ -1890,10 +1922,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { - afs_dataversion_t orig_data_version; - afs_dataversion_t new_data_version; - struct afs_status_cb *new_scb = &scb[1]; - orig_data_version = orig_dvnode->status.data_version + 1; if (orig_dvnode != new_dvnode) { @@ -1904,7 +1932,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, new_data_version = new_dvnode->status.data_version + 1; } else { new_data_version = orig_data_version; - new_scb = &scb[0]; } while (afs_select_fileserver(&fc)) { @@ -1912,7 +1939,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode); afs_fs_rename(&fc, old_dentry->d_name.name, new_dvnode, new_dentry->d_name.name, - &scb[0], new_scb); + &scb[0], &scb[1]); } afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break, @@ -1930,18 +1957,25 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, if (ret == 0) { if (rehash) d_rehash(rehash); - if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags)) - afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name, - afs_edit_dir_for_rename_0); + down_write(&orig_dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) && + orig_dvnode->status.data_version == orig_data_version) + afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name, + afs_edit_dir_for_rename_0); + if (orig_dvnode != new_dvnode) { + up_write(&orig_dvnode->validate_lock); - if (!new_negative && - test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags)) - afs_edit_dir_remove(new_dvnode, &new_dentry->d_name, - afs_edit_dir_for_rename_1); + down_write(&new_dvnode->validate_lock); + } + if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) && + orig_dvnode->status.data_version == new_data_version) { + if (!new_negative) + afs_edit_dir_remove(new_dvnode, &new_dentry->d_name, + afs_edit_dir_for_rename_1); - if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags)) afs_edit_dir_add(new_dvnode, &new_dentry->d_name, &vnode->fid, afs_edit_dir_for_rename_2); + } new_inode = d_inode(new_dentry); if (new_inode) { @@ -1957,14 +1991,10 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, * Note that if we ever implement RENAME_EXCHANGE, we'll have * to update both dentries with opposing dir versions. */ - if (new_dvnode != orig_dvnode) { - afs_update_dentry_version(&fc, old_dentry, &scb[1]); - afs_update_dentry_version(&fc, new_dentry, &scb[1]); - } else { - afs_update_dentry_version(&fc, old_dentry, &scb[0]); - afs_update_dentry_version(&fc, new_dentry, &scb[0]); - } + afs_update_dentry_version(&fc, old_dentry, &scb[1]); + afs_update_dentry_version(&fc, new_dentry, &scb[1]); d_move(old_dentry, new_dentry); + up_write(&new_dvnode->validate_lock); goto error_tmp; } diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c index 361088a5edb9..d94e2b7cddff 100644 --- a/fs/afs/dir_silly.c +++ b/fs/afs/dir_silly.c @@ -21,6 +21,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode { struct afs_fs_cursor fc; struct afs_status_cb *scb; + afs_dataversion_t dir_data_version; int ret = -ERESTARTSYS; _enter("%pd,%pd", old, new); @@ -31,7 +32,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode trace_afs_silly_rename(vnode, false); if (afs_begin_vnode_operation(&fc, dvnode, key, true)) { - afs_dataversion_t dir_data_version = dvnode->status.data_version + 1; + dir_data_version = dvnode->status.data_version + 1; while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(dvnode); @@ -54,12 +55,15 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode dvnode->silly_key = key_get(key); } - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == dir_data_version) { afs_edit_dir_remove(dvnode, &old->d_name, afs_edit_dir_for_silly_0); - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) afs_edit_dir_add(dvnode, &new->d_name, &vnode->fid, afs_edit_dir_for_silly_1); + } + up_write(&dvnode->validate_lock); } kfree(scb); @@ -181,10 +185,14 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } } - if (ret == 0 && - test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) - afs_edit_dir_remove(dvnode, &dentry->d_name, - afs_edit_dir_for_unlink); + if (ret == 0) { + down_write(&dvnode->validate_lock); + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + dvnode->status.data_version == dir_data_version) + afs_edit_dir_remove(dvnode, &dentry->d_name, + afs_edit_dir_for_unlink); + up_write(&dvnode->validate_lock); + } } kfree(scb); diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 7503899c0a1b..f07e53ab808e 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -289,15 +289,17 @@ void afs_dynroot_depopulate(struct super_block *sb) net->dynroot_sb = NULL; mutex_unlock(&net->proc_cells_lock); - inode_lock(root->d_inode); + if (root) { + inode_lock(root->d_inode); - /* Remove all the pins for dirs created for manually added cells */ - list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { - if (subdir->d_fsdata) { - subdir->d_fsdata = NULL; - dput(subdir); + /* Remove all the pins for dirs created for manually added cells */ + list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { + if (subdir->d_fsdata) { + subdir->d_fsdata = NULL; + dput(subdir); + } } - } - inode_unlock(root->d_inode); + inode_unlock(root->d_inode); + } } diff --git a/fs/afs/file.c b/fs/afs/file.c index dd3c55c9101c..5d0b472e984b 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -42,7 +42,6 @@ const struct inode_operations afs_file_inode_operations = { .getattr = afs_getattr, .setattr = afs_setattr, .permission = afs_permission, - .listxattr = afs_listxattr, }; const struct address_space_operations afs_fs_aops = { diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index e1b9ed679045..51ee3dd79700 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -32,9 +32,8 @@ void afs_fileserver_probe_result(struct afs_call *call) struct afs_server *server = call->server; unsigned int server_index = call->server_index; unsigned int index = call->addr_ix; - unsigned int rtt = UINT_MAX; + unsigned int rtt_us; bool have_result = false; - u64 _rtt; int ret = call->error; _enter("%pU,%u", &server->uuid, index); @@ -93,15 +92,9 @@ responded: } } - /* Get the RTT and scale it to fit into a 32-bit value that represents - * over a minute of time so that we can access it with one instruction - * on a 32-bit system. - */ - _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); - _rtt /= 64; - rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; - if (rtt < server->probe.rtt) { - server->probe.rtt = rtt; + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { + server->probe.rtt = rtt_us; alist->preferred = index; have_result = true; } @@ -113,8 +106,7 @@ out: spin_unlock(&server->probe_lock); _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", - server_index, index, &alist->addrs[index].transport, - (unsigned int)rtt, ret); + server_index, index, &alist->addrs[index].transport, rtt_us, ret); have_result |= afs_fs_probe_done(server); if (have_result) { diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 6f84231f11a5..5c2729fc07e5 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -56,9 +56,9 @@ static void xdr_dump_bad(const __be32 *bp) /* * decode an AFSFetchStatus block */ -static int xdr_decode_AFSFetchStatus(const __be32 **_bp, - struct afs_call *call, - struct afs_status_cb *scb) +static void xdr_decode_AFSFetchStatus(const __be32 **_bp, + struct afs_call *call, + struct afs_status_cb *scb) { const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; @@ -78,7 +78,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, */ status->abort_code = abort_code; scb->have_error = true; - return 0; + goto advance; } pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); @@ -87,7 +87,8 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, if (abort_code != 0 && inline_error) { status->abort_code = abort_code; - return 0; + scb->have_error = true; + goto advance; } type = ntohl(xdr->type); @@ -123,13 +124,14 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, data_version |= (u64)ntohl(xdr->data_version_hi) << 32; status->data_version = data_version; scb->have_status = true; - +advance: *_bp = (const void *)*_bp + sizeof(*xdr); - return 0; + return; bad: xdr_dump_bad(*_bp); - return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + goto advance; } static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) @@ -249,9 +251,7 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -380,8 +380,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) ASSERTCMP(req->offset, <=, PAGE_SIZE); if (req->offset == PAGE_SIZE) { req->offset = 0; - if (req->page_done) - req->page_done(req); req->index++; if (req->remain > 0) goto begin_page; @@ -416,9 +414,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -435,11 +431,13 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) if (req->offset < PAGE_SIZE) zero_user_segment(req->pages[req->index], req->offset, PAGE_SIZE); - if (req->page_done) - req->page_done(req); req->offset = 0; } + if (req->page_done) + for (req->index = 0; req->index < req->nr_pages; req->index++) + req->page_done(req); + _leave(" = 0 [done]"); return 0; } @@ -574,12 +572,8 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, call->out_fid); - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -688,9 +682,7 @@ static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -781,12 +773,8 @@ static int afs_deliver_fs_link(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -875,12 +863,8 @@ static int afs_deliver_fs_symlink(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, call->out_fid); - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -983,16 +967,12 @@ static int afs_deliver_fs_rename(struct afs_call *call) if (ret < 0) return ret; - /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - if (call->out_dir_scb != call->out_scb) { - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - } + /* If the two dirs are the same, we have two copies of the same status + * report, so we just decode it twice. + */ + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1100,9 +1080,7 @@ static int afs_deliver_fs_store_data(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1280,9 +1258,7 @@ static int afs_deliver_fs_store_status(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1951,9 +1927,7 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -2059,10 +2033,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) bp = call->buffer; scb = &call->out_scb[call->count]; - ret = xdr_decode_AFSFetchStatus(&bp, call, scb); - if (ret < 0) - return ret; - + xdr_decode_AFSFetchStatus(&bp, call, scb); call->count++; if (call->count < call->count2) goto more_counts; @@ -2240,9 +2211,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); call->unmarshall++; @@ -2323,9 +2292,7 @@ static int afs_deliver_fs_file_status_and_vol(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 46d2d7cb461d..4f58b28a1edd 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -27,7 +27,6 @@ static const struct inode_operations afs_symlink_inode_operations = { .get_link = page_get_link, - .listxattr = afs_listxattr, }; static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode) @@ -171,6 +170,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, struct timespec64 t; umode_t mode; bool data_changed = false; + bool change_size = false; BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags)); @@ -226,6 +226,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, } else { set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); } + change_size = true; } else if (vnode->status.type == AFS_FTYPE_DIR) { /* Expected directory change is handled elsewhere so * that we can locally edit the directory and save on a @@ -233,11 +234,19 @@ static void afs_apply_status(struct afs_fs_cursor *fc, */ if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) data_changed = false; + change_size = true; } if (data_changed) { inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); - afs_set_i_size(vnode, status->size); + + /* Only update the size if the data version jumped. If the + * file is being modified locally, then we might have our own + * idea of what the size should be that's not the same as + * what's on the server. + */ + if (change_size) + afs_set_i_size(vnode, status->size); } } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index d5efb1debebf..c3ad582f9fd0 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -161,6 +161,7 @@ struct afs_call { bool upgrade; /* T to request service upgrade */ bool have_reply_time; /* T if have got reply_time */ bool intr; /* T if interruptible */ + bool unmarshalling_error; /* T if an unmarshalling error occurred */ u16 service_id; /* Actual service ID (after upgrade) */ unsigned int debug_id; /* Trace ID */ u32 operation_ID; /* operation ID for an incoming call */ @@ -396,7 +397,7 @@ struct afs_cell { struct afs_vlserver_list __rcu *vl_servers; u8 name_len; /* Length of name */ - char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */ + char *name; /* Cell name, case-flattened and NUL-padded */ }; /* @@ -1329,7 +1330,7 @@ extern struct afs_volume *afs_create_volume(struct afs_fs_context *); extern void afs_activate_volume(struct afs_volume *); extern void afs_deactivate_volume(struct afs_volume *); extern void afs_put_volume(struct afs_cell *, struct afs_volume *); -extern int afs_check_volume_status(struct afs_volume *, struct key *); +extern int afs_check_volume_status(struct afs_volume *, struct afs_fs_cursor *); /* * write.c @@ -1353,7 +1354,6 @@ extern int afs_launder_page(struct page *); * xattr.c */ extern const struct xattr_handler *afs_xattr_handlers[]; -extern ssize_t afs_listxattr(struct dentry *, char *, size_t); /* * yfsclient.c diff --git a/fs/afs/main.c b/fs/afs/main.c index c9c45d7078bd..5cd26af2464c 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -186,7 +186,7 @@ static int __init afs_init(void) goto error_cache; #endif - ret = register_pernet_subsys(&afs_net_ops); + ret = register_pernet_device(&afs_net_ops); if (ret < 0) goto error_net; @@ -206,7 +206,7 @@ static int __init afs_init(void) error_proc: afs_fs_exit(); error_fs: - unregister_pernet_subsys(&afs_net_ops); + unregister_pernet_device(&afs_net_ops); error_net: #ifdef CONFIG_AFS_FSCACHE fscache_unregister_netfs(&afs_cache_netfs); @@ -237,7 +237,7 @@ static void __exit afs_exit(void) proc_remove(afs_proc_symlink); afs_fs_exit(); - unregister_pernet_subsys(&afs_net_ops); + unregister_pernet_device(&afs_net_ops); #ifdef CONFIG_AFS_FSCACHE fscache_unregister_netfs(&afs_cache_netfs); #endif diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 52b19e9c1535..5334f1bd2bca 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -83,6 +83,7 @@ int afs_abort_to_error(u32 abort_code) case UAENOLCK: return -ENOLCK; case UAENOTEMPTY: return -ENOTEMPTY; case UAELOOP: return -ELOOP; + case UAEOVERFLOW: return -EOVERFLOW; case UAENOMEDIUM: return -ENOMEDIUM; case UAEDQUOT: return -EDQUOT; diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 79bc5f1338ed..f105f061e89b 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -32,7 +32,6 @@ const struct inode_operations afs_mntpt_inode_operations = { .lookup = afs_mntpt_lookup, .readlink = page_readlink, .getattr = afs_getattr, - .listxattr = afs_listxattr, }; const struct inode_operations afs_autocell_inode_operations = { diff --git a/fs/afs/proc.c b/fs/afs/proc.c index fba2ec3a3a9c..106b27011f6d 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -562,6 +562,7 @@ void afs_put_sysnames(struct afs_sysnames *sysnames) if (sysnames->subs[i] != afs_init_sysname && sysnames->subs[i] != sysnames->blank) kfree(sysnames->subs[i]); + kfree(sysnames); } } diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index 172ba569cd60..2a3305e42b14 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -192,7 +192,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) write_unlock(&vnode->volume->servers_lock); set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - error = afs_check_volume_status(vnode->volume, fc->key); + error = afs_check_volume_status(vnode->volume, fc); if (error < 0) goto failed_set_error; @@ -281,7 +281,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags); set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - error = afs_check_volume_status(vnode->volume, fc->key); + error = afs_check_volume_status(vnode->volume, fc); if (error < 0) goto failed_set_error; @@ -341,7 +341,7 @@ start: /* See if we need to do an update of the volume record. Note that the * volume may have moved or even have been deleted. */ - error = afs_check_volume_status(vnode->volume, fc->key); + error = afs_check_volume_status(vnode->volume, fc); if (error < 0) goto failed_set_error; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index ef1d09f8920b..6adab30a8399 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -414,7 +414,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) afs_wake_up_async_call : afs_wake_up_call_waiter), call->upgrade, - call->intr, + (call->intr ? RXRPC_PREINTERRUPTIBLE : + RXRPC_UNINTERRUPTIBLE), call->debug_id); if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); @@ -539,6 +540,8 @@ static void afs_deliver_to_call(struct afs_call *call) ret = call->type->deliver(call); state = READ_ONCE(call->state); + if (ret == 0 && call->unmarshalling_error) + ret = -EBADMSG; switch (ret) { case 0: afs_queue_call_work(call); @@ -962,5 +965,7 @@ noinline int afs_protocol_error(struct afs_call *call, int error, enum afs_eproto_cause cause) { trace_afs_protocol_error(call, error, cause); + if (call) + call->unmarshalling_error = true; return error; } diff --git a/fs/afs/server.c b/fs/afs/server.c index ca8115ba1724..d3a9288f7556 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -595,12 +595,9 @@ retry: } ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING, - TASK_INTERRUPTIBLE); + (fc->flags & AFS_FS_CURSOR_INTR) ? + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret == -ERESTARTSYS) { - if (!(fc->flags & AFS_FS_CURSOR_INTR) && server->addresses) { - _leave(" = t [intr]"); - return true; - } fc->error = ret; _leave(" = f [intr]"); return false; diff --git a/fs/afs/super.c b/fs/afs/super.c index 7f8a9b3137bf..eb04dcc54328 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -236,6 +236,9 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param) _enter(",%s", name); + if (fc->source) + return invalf(fc, "kAFS: Multiple sources not supported"); + if (!name) { printk(KERN_ERR "kAFS: no volume name specified\n"); return -EINVAL; diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c index 858498cc1b05..081b7e5b13f5 100644 --- a/fs/afs/vl_probe.c +++ b/fs/afs/vl_probe.c @@ -31,10 +31,9 @@ void afs_vlserver_probe_result(struct afs_call *call) struct afs_addr_list *alist = call->alist; struct afs_vlserver *server = call->vlserver; unsigned int server_index = call->server_index; + unsigned int rtt_us = 0; unsigned int index = call->addr_ix; - unsigned int rtt = UINT_MAX; bool have_result = false; - u64 _rtt; int ret = call->error; _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code); @@ -93,15 +92,9 @@ responded: } } - /* Get the RTT and scale it to fit into a 32-bit value that represents - * over a minute of time so that we can access it with one instruction - * on a 32-bit system. - */ - _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); - _rtt /= 64; - rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; - if (rtt < server->probe.rtt) { - server->probe.rtt = rtt; + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { + server->probe.rtt = rtt_us; alist->preferred = index; have_result = true; } @@ -113,8 +106,7 @@ out: spin_unlock(&server->probe_lock); _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", - server_index, index, &alist->addrs[index].transport, - (unsigned int)rtt, ret); + server_index, index, &alist->addrs[index].transport, rtt_us, ret); have_result |= afs_vl_probe_done(server); if (have_result) { diff --git a/fs/afs/volume.c b/fs/afs/volume.c index 92ca5e27573b..4310336b9bb8 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -281,7 +281,7 @@ error: /* * Make sure the volume record is up to date. */ -int afs_check_volume_status(struct afs_volume *volume, struct key *key) +int afs_check_volume_status(struct afs_volume *volume, struct afs_fs_cursor *fc) { time64_t now = ktime_get_real_seconds(); int ret, retries = 0; @@ -299,7 +299,7 @@ retry: } if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) { - ret = afs_update_volume_status(volume, key); + ret = afs_update_volume_status(volume, fc->key); clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags); clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags); wake_up_bit(&volume->flags, AFS_VOLUME_WAIT); @@ -312,7 +312,9 @@ retry: return 0; } - ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, TASK_INTERRUPTIBLE); + ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, + (fc->flags & AFS_FS_CURSOR_INTR) ? + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret == -ERESTARTSYS) { _leave(" = %d", ret); return ret; diff --git a/fs/afs/write.c b/fs/afs/write.c index cb76566763db..96b042af6248 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -194,11 +194,11 @@ int afs_write_end(struct file *file, struct address_space *mapping, i_size = i_size_read(&vnode->vfs_inode); if (maybe_i_size > i_size) { - spin_lock(&vnode->wb_lock); + write_seqlock(&vnode->cb_lock); i_size = i_size_read(&vnode->vfs_inode); if (maybe_i_size > i_size) i_size_write(&vnode->vfs_inode, maybe_i_size); - spin_unlock(&vnode->wb_lock); + write_sequnlock(&vnode->cb_lock); } if (!PageUptodate(page)) { @@ -811,6 +811,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) vmf->page->index, priv); SetPagePrivate(vmf->page); set_page_private(vmf->page, priv); + file_update_time(file); sb_end_pagefault(inode->i_sb); return VM_FAULT_LOCKED; diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c index 5552d034090a..13292d4ca860 100644 --- a/fs/afs/xattr.c +++ b/fs/afs/xattr.c @@ -11,29 +11,6 @@ #include <linux/xattr.h> #include "internal.h" -static const char afs_xattr_list[] = - "afs.acl\0" - "afs.cell\0" - "afs.fid\0" - "afs.volume\0" - "afs.yfs.acl\0" - "afs.yfs.acl_inherited\0" - "afs.yfs.acl_num_cleaned\0" - "afs.yfs.vol_acl"; - -/* - * Retrieve a list of the supported xattrs. - */ -ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size) -{ - if (size == 0) - return sizeof(afs_xattr_list); - if (size < sizeof(afs_xattr_list)) - return -ERANGE; - memcpy(buffer, afs_xattr_list, sizeof(afs_xattr_list)); - return sizeof(afs_xattr_list); -} - /* * Get a file's ACL. */ diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 3ee7abf4b2d0..3b19b009452a 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -165,23 +165,23 @@ static void xdr_dump_bad(const __be32 *bp) int i; pr_notice("YFS XDR: Bad status record\n"); - for (i = 0; i < 5 * 4 * 4; i += 16) { + for (i = 0; i < 6 * 4 * 4; i += 16) { memcpy(x, bp, 16); bp += 4; pr_notice("%03x: %08x %08x %08x %08x\n", i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3])); } - memcpy(x, bp, 4); - pr_notice("0x50: %08x\n", ntohl(x[0])); + memcpy(x, bp, 8); + pr_notice("0x60: %08x %08x\n", ntohl(x[0]), ntohl(x[1])); } /* * Decode a YFSFetchStatus block */ -static int xdr_decode_YFSFetchStatus(const __be32 **_bp, - struct afs_call *call, - struct afs_status_cb *scb) +static void xdr_decode_YFSFetchStatus(const __be32 **_bp, + struct afs_call *call, + struct afs_status_cb *scb) { const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; @@ -192,7 +192,7 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp, if (status->abort_code == VNOVNODE) status->nlink = 0; scb->have_error = true; - return 0; + goto advance; } type = ntohl(xdr->type); @@ -220,13 +220,14 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp, status->size = xdr_to_u64(xdr->size); status->data_version = xdr_to_u64(xdr->data_version); scb->have_status = true; - +advance: *_bp += xdr_size(xdr); - return 0; + return; bad: xdr_dump_bad(*_bp); - return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + goto advance; } /* @@ -344,9 +345,7 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSCallBack(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -368,9 +367,7 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -493,8 +490,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) ASSERTCMP(req->offset, <=, PAGE_SIZE); if (req->offset == PAGE_SIZE) { req->offset = 0; - if (req->page_done) - req->page_done(req); req->index++; if (req->remain > 0) goto begin_page; @@ -532,9 +527,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSCallBack(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -552,11 +545,13 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) if (req->offset < PAGE_SIZE) zero_user_segment(req->pages[req->index], req->offset, PAGE_SIZE); - if (req->page_done) - req->page_done(req); req->offset = 0; } + if (req->page_done) + for (req->index = 0; req->index < req->nr_pages; req->index++) + req->page_done(req); + _leave(" = 0 [done]"); return 0; } @@ -641,12 +636,8 @@ static int yfs_deliver_fs_create_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_YFSFid(&bp, call->out_fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSCallBack(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -799,14 +790,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSFid(&bp, &fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); /* Was deleted if vnode->status.abort_code == VNOVNODE. */ xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -886,10 +872,7 @@ static int yfs_deliver_fs_remove(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); return 0; } @@ -971,12 +954,8 @@ static int yfs_deliver_fs_link(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); return 0; @@ -1058,12 +1037,8 @@ static int yfs_deliver_fs_symlink(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_YFSFid(&bp, call->out_fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1151,15 +1126,11 @@ static int yfs_deliver_fs_rename(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - if (call->out_dir_scb != call->out_scb) { - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - } - + /* If the two dirs are the same, we have two copies of the same status + * report, so we just decode it twice. + */ + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); return 0; @@ -1844,9 +1815,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) bp = call->buffer; scb = &call->out_scb[call->count]; - ret = xdr_decode_YFSFetchStatus(&bp, call, scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, scb); call->count++; if (call->count < call->count2) @@ -2066,9 +2035,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) bp = call->buffer; yacl->inherit_flag = ntohl(*bp++); yacl->num_cleaned = ntohl(*bp++); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); call->unmarshall++; @@ -2195,6 +2162,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl memcpy(bp, acl->data, acl->size); if (acl->size != size) memset((void *)bp + acl->size, 0, size - acl->size); + bp += size / sizeof(__be32); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vnode->fid); diff --git a/fs/aio.c b/fs/aio.c index 4115d5ad6b90..47bb7b5685ba 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -176,6 +176,7 @@ struct fsync_iocb { struct file *file; struct work_struct work; bool datasync; + struct cred *creds; }; struct poll_iocb { @@ -1589,8 +1590,11 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb, static void aio_fsync_work(struct work_struct *work) { struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); + const struct cred *old_cred = override_creds(iocb->fsync.creds); iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); + revert_creds(old_cred); + put_cred(iocb->fsync.creds); iocb_put(iocb); } @@ -1604,6 +1608,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, if (unlikely(!req->file->f_op->fsync)) return -EINVAL; + req->creds = prepare_creds(); + if (!req->creds) + return -ENOMEM; + req->datasync = datasync; INIT_WORK(&req->work, aio_fsync_work); schedule_work(&req->work); diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index f8ce1368218b..1a8f3c8ab32c 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -351,7 +351,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; if (info->si_lasti == BFS_MAX_LASTI) - printf("WARNING: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id); + printf("NOTE: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id); else if (info->si_lasti > BFS_MAX_LASTI) { printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id); goto out1; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index c5642bcb6b46..7ce3cfd965d2 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1731,7 +1731,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, (!regset->active || regset->active(t->task, regset) > 0)) { int ret; size_t size = regset_size(t->task, regset); - void *data = kmalloc(size, GFP_KERNEL); + void *data = kzalloc(size, GFP_KERNEL); if (unlikely(!data)) return 0; ret = regset->get(t->task, regset, diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 831a2b25ba79..196f9f64d075 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -571,7 +571,7 @@ static int load_flat_file(struct linux_binprm *bprm, goto err; } - len = data_len + extra; + len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); len = PAGE_ALIGN(len); realdatastart = vm_mmap(NULL, 0, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); @@ -585,7 +585,9 @@ static int load_flat_file(struct linux_binprm *bprm, vm_munmap(textpos, text_len); goto err; } - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(unsigned long), + FLAT_DATA_ALIGN); pr_debug("Allocated data+bss+stack (%u bytes): %lx\n", data_len + bss_len + stack_len, datapos); @@ -615,7 +617,7 @@ static int load_flat_file(struct linux_binprm *bprm, memp_size = len; } else { - len = text_len + data_len + extra; + len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32); len = PAGE_ALIGN(len); textpos = vm_mmap(NULL, 0, len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); @@ -630,7 +632,9 @@ static int load_flat_file(struct linux_binprm *bprm, } realdatastart = textpos + ntohl(hdr->data_start); - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(u32), + FLAT_DATA_ALIGN); reloc = (__be32 __user *) (datapos + (ntohl(hdr->reloc_start) - text_len)); @@ -647,9 +651,8 @@ static int load_flat_file(struct linux_binprm *bprm, (text_len + full_data - sizeof(struct flat_hdr)), 0); - if (datapos != realdatastart) - memmove((void *)datapos, (void *)realdatastart, - full_data); + memmove((void *) datapos, (void *) realdatastart, + full_data); #else /* * This is used on MMU systems mainly for testing. @@ -705,7 +708,8 @@ static int load_flat_file(struct linux_binprm *bprm, if (IS_ERR_VALUE(result)) { ret = result; pr_err("Unable to read code+data+bss, errno %d\n", ret); - vm_munmap(textpos, text_len + data_len + extra); + vm_munmap(textpos, text_len + data_len + extra + + MAX_SHARED_LIBS * sizeof(u32)); goto err; } } diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index cdb45829354d..056a68292e15 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -696,12 +696,24 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, struct super_block *sb = file_inode(file)->i_sb; struct dentry *root = sb->s_root, *dentry; int err = 0; + struct file *f = NULL; e = create_entry(buffer, count); if (IS_ERR(e)) return PTR_ERR(e); + if (e->flags & MISC_FMT_OPEN_FILE) { + f = open_exec(e->interpreter); + if (IS_ERR(f)) { + pr_notice("register: failed to install interpreter file %s\n", + e->interpreter); + kfree(e); + return PTR_ERR(f); + } + e->interp_file = f; + } + inode_lock(d_inode(root)); dentry = lookup_one_len(e->name, root, strlen(e->name)); err = PTR_ERR(dentry); @@ -725,21 +737,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, goto out2; } - if (e->flags & MISC_FMT_OPEN_FILE) { - struct file *f; - - f = open_exec(e->interpreter); - if (IS_ERR(f)) { - err = PTR_ERR(f); - pr_notice("register: failed to install interpreter file %s\n", e->interpreter); - simple_release_fs(&bm_mnt, &entry_count); - iput(inode); - inode = NULL; - goto out2; - } - e->interp_file = f; - } - e->dentry = dget(dentry); inode->i_private = e; inode->i_fop = &bm_entry_operations; @@ -756,6 +753,8 @@ out: inode_unlock(d_inode(root)); if (err) { + if (f) + filp_close(f, NULL); kfree(e); return err; } diff --git a/fs/block_dev.c b/fs/block_dev.c index d612468ee66b..7094995026d3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -34,6 +34,7 @@ #include <linux/task_io_accounting_ops.h> #include <linux/falloc.h> #include <linux/uaccess.h> +#include <linux/suspend.h> #include "internal.h" struct bdev_inode { @@ -245,6 +246,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, bio.bi_opf = dio_bio_write_op(iocb); task_io_account_write(ret); } + if (iocb->ki_flags & IOCB_NOWAIT) + bio.bi_opf |= REQ_NOWAIT; if (iocb->ki_flags & IOCB_HIPRI) bio_set_polled(&bio, iocb); @@ -398,6 +401,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) bio->bi_opf = dio_bio_write_op(iocb); task_io_account_write(bio->bi_iter.bi_size); } + if (iocb->ki_flags & IOCB_NOWAIT) + bio->bi_opf |= REQ_NOWAIT; dio->size += bio->bi_iter.bi_size; pos += bio->bi_iter.bi_size; @@ -1243,6 +1248,33 @@ void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, } EXPORT_SYMBOL(bd_abort_claiming); +/* + * Drop all buffers & page cache for given bdev range. This function bails + * with error if bdev has other exclusive owner (such as filesystem). + */ +int truncate_bdev_range(struct block_device *bdev, fmode_t mode, + loff_t lstart, loff_t lend) +{ + struct block_device *claimed_bdev = NULL; + int err; + + /* + * If we don't hold exclusive handle for the device, upgrade to it + * while we discard the buffer cache to avoid discarding buffers + * under live filesystem. + */ + if (!(mode & FMODE_EXCL)) { + claimed_bdev = bdev->bd_contains; + err = bd_prepare_to_claim(bdev, claimed_bdev, + truncate_bdev_range); + if (err) + return err; + } + truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); + return 0; +} +EXPORT_SYMBOL(truncate_bdev_range); + #ifdef CONFIG_SYSFS struct bd_holder_disk { struct list_head list; @@ -1545,10 +1577,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) */ if (!for_part) { ret = devcgroup_inode_permission(bdev->bd_inode, perm); - if (ret != 0) { - bdput(bdev); + if (ret != 0) return ret; - } } restart: @@ -1617,8 +1647,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; BUG_ON(for_part); ret = __blkdev_get(whole, mode, 1); - if (ret) + if (ret) { + bdput(whole); goto out_clear; + } bdev->bd_contains = whole; bdev->bd_part = disk_get_part(disk, partno); if (!(disk->flags & GENHD_FL_UP) || @@ -1668,7 +1700,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) disk_unblock_events(disk); put_disk_and_module(disk); out: - bdput(bdev); return ret; } @@ -1735,6 +1766,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) bdput(whole); } + if (res) + bdput(bdev); + return res; } EXPORT_SYMBOL(blkdev_get); @@ -1854,6 +1888,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) struct gendisk *disk = bdev->bd_disk; struct block_device *victim = NULL; + /* + * Sync early if it looks like we're the last one. If someone else + * opens the block device between now and the decrement of bd_openers + * then we did a sync that we didn't need to, but that's not the end + * of the world and we want to avoid long (could be several minute) + * syncs while holding the mutex. + */ + if (bdev->bd_openers == 1) + sync_blockdev(bdev); + mutex_lock_nested(&bdev->bd_mutex, for_part); if (for_part) bdev->bd_part_count--; @@ -1975,7 +2019,8 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) if (bdev_read_only(I_BDEV(bd_inode))) return -EPERM; - if (IS_SWAPFILE(bd_inode)) + /* uswsusp needs write permission to the swap */ + if (IS_SWAPFILE(bd_inode) && !hibernation_available()) return -ETXTBSY; if (!iov_iter_count(from)) @@ -2055,7 +2100,6 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, loff_t len) { struct block_device *bdev = I_BDEV(bdev_file_inode(file)); - struct address_space *mapping; loff_t end = start + len - 1; loff_t isize; int error; @@ -2083,8 +2127,9 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, return -EINVAL; /* Invalidate the page cache, including dirty pages. */ - mapping = bdev->bd_inode->i_mapping; - truncate_inode_pages_range(mapping, start, end); + error = truncate_bdev_range(bdev, file->f_mode, start, end); + if (error) + return error; switch (mode) { case FALLOC_FL_ZERO_RANGE: @@ -2111,7 +2156,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, * the caller will be given -EBUSY. The third argument is * inclusive, so the rounding here is safe. */ - return invalidate_inode_pages2_range(mapping, + return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, start >> PAGE_SHIFT, end >> PAGE_SHIFT); } diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 3f3110975f88..11be02459b87 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -402,3 +402,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work) { set_bit(WORK_HIGH_PRIO_BIT, &work->flags); } + +void btrfs_flush_workqueue(struct btrfs_workqueue *wq) +{ + if (wq->high) + flush_workqueue(wq->high->normal_wq); + + flush_workqueue(wq->normal->normal_wq); +} diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index c5bf2b117c05..714ab6855423 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -44,5 +44,6 @@ void btrfs_set_work_high_priority(struct btrfs_work *work); struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work); struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); +void btrfs_flush_workqueue(struct btrfs_workqueue *wq); #endif diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index e5d85311d5d5..7f644a58db51 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -347,33 +347,10 @@ static int add_prelim_ref(const struct btrfs_fs_info *fs_info, return -ENOMEM; ref->root_id = root_id; - if (key) { + if (key) ref->key_for_search = *key; - /* - * We can often find data backrefs with an offset that is too - * large (>= LLONG_MAX, maximum allowed file offset) due to - * underflows when subtracting a file's offset with the data - * offset of its corresponding extent data item. This can - * happen for example in the clone ioctl. - * So if we detect such case we set the search key's offset to - * zero to make sure we will find the matching file extent item - * at add_all_parents(), otherwise we will miss it because the - * offset taken form the backref is much larger then the offset - * of the file extent item. This can make us scan a very large - * number of file extent items, but at least it will not make - * us miss any. - * This is an ugly workaround for a behaviour that should have - * never existed, but it does and a fix for the clone ioctl - * would touch a lot of places, cause backwards incompatibility - * and would not fix the problem for extents cloned with older - * kernels. - */ - if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY && - ref->key_for_search.offset >= LLONG_MAX) - ref->key_for_search.offset = 0; - } else { + else memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); - } ref->inode_list = NULL; ref->level = level; @@ -409,10 +386,36 @@ static int add_indirect_ref(const struct btrfs_fs_info *fs_info, wanted_disk_byte, count, sc, gfp_mask); } +static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) +{ + struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; + struct rb_node *parent = NULL; + struct prelim_ref *ref = NULL; + struct prelim_ref target = {0}; + int result; + + target.parent = bytenr; + + while (*p) { + parent = *p; + ref = rb_entry(parent, struct prelim_ref, rbnode); + result = prelim_ref_compare(ref, &target); + + if (result < 0) + p = &(*p)->rb_left; + else if (result > 0) + p = &(*p)->rb_right; + else + return 1; + } + return 0; +} + static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, - struct ulist *parents, struct prelim_ref *ref, + struct ulist *parents, + struct preftrees *preftrees, struct prelim_ref *ref, int level, u64 time_seq, const u64 *extent_item_pos, - u64 total_refs, bool ignore_offset) + bool ignore_offset) { int ret = 0; int slot; @@ -424,6 +427,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, u64 disk_byte; u64 wanted_disk_byte = ref->wanted_disk_byte; u64 count = 0; + u64 data_offset; if (level != 0) { eb = path->nodes[level]; @@ -434,18 +438,26 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, } /* - * We normally enter this function with the path already pointing to - * the first item to check. But sometimes, we may enter it with - * slot==nritems. In that case, go to the next leaf before we continue. + * 1. We normally enter this function with the path already pointing to + * the first item to check. But sometimes, we may enter it with + * slot == nritems. + * 2. We are searching for normal backref but bytenr of this leaf + * matches shared data backref + * 3. The leaf owner is not equal to the root we are searching + * + * For these cases, go to the next leaf before we continue. */ - if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { + eb = path->nodes[0]; + if (path->slots[0] >= btrfs_header_nritems(eb) || + is_shared_data_backref(preftrees, eb->start) || + ref->root_id != btrfs_header_owner(eb)) { if (time_seq == SEQ_LAST) ret = btrfs_next_leaf(root, path); else ret = btrfs_next_old_leaf(root, path, time_seq); } - while (!ret && count < total_refs) { + while (!ret && count < ref->count) { eb = path->nodes[0]; slot = path->slots[0]; @@ -455,13 +467,31 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, key.type != BTRFS_EXTENT_DATA_KEY) break; + /* + * We are searching for normal backref but bytenr of this leaf + * matches shared data backref, OR + * the leaf owner is not equal to the root we are searching for + */ + if (slot == 0 && + (is_shared_data_backref(preftrees, eb->start) || + ref->root_id != btrfs_header_owner(eb))) { + if (time_seq == SEQ_LAST) + ret = btrfs_next_leaf(root, path); + else + ret = btrfs_next_old_leaf(root, path, time_seq); + continue; + } fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); + data_offset = btrfs_file_extent_offset(eb, fi); if (disk_byte == wanted_disk_byte) { eie = NULL; old = NULL; - count++; + if (ref->key_for_search.offset == key.offset - data_offset) + count++; + else + goto next; if (extent_item_pos) { ret = check_extent_in_eb(&key, eb, fi, *extent_item_pos, @@ -502,9 +532,9 @@ next: */ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 time_seq, + struct preftrees *preftrees, struct prelim_ref *ref, struct ulist *parents, - const u64 *extent_item_pos, u64 total_refs, - bool ignore_offset) + const u64 *extent_item_pos, bool ignore_offset) { struct btrfs_root *root; struct btrfs_key root_key; @@ -513,6 +543,7 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, int root_level; int level = ref->level; int index; + struct btrfs_key search_key = ref->key_for_search; root_key.objectid = ref->root_id; root_key.type = BTRFS_ROOT_ITEM_KEY; @@ -545,13 +576,33 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, goto out; } + /* + * We can often find data backrefs with an offset that is too large + * (>= LLONG_MAX, maximum allowed file offset) due to underflows when + * subtracting a file's offset with the data offset of its + * corresponding extent data item. This can happen for example in the + * clone ioctl. + * + * So if we detect such case we set the search key's offset to zero to + * make sure we will find the matching file extent item at + * add_all_parents(), otherwise we will miss it because the offset + * taken form the backref is much larger then the offset of the file + * extent item. This can make us scan a very large number of file + * extent items, but at least it will not make us miss any. + * + * This is an ugly workaround for a behaviour that should have never + * existed, but it does and a fix for the clone ioctl would touch a lot + * of places, cause backwards incompatibility and would not fix the + * problem for extents cloned with older kernels. + */ + if (search_key.type == BTRFS_EXTENT_DATA_KEY && + search_key.offset >= LLONG_MAX) + search_key.offset = 0; path->lowest_level = level; if (time_seq == SEQ_LAST) - ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path, - 0, 0); + ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); else - ret = btrfs_search_old_slot(root, &ref->key_for_search, path, - time_seq); + ret = btrfs_search_old_slot(root, &search_key, path, time_seq); /* root node has been locked, we can release @subvol_srcu safely here */ srcu_read_unlock(&fs_info->subvol_srcu, index); @@ -574,8 +625,8 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, eb = path->nodes[level]; } - ret = add_all_parents(root, path, parents, ref, level, time_seq, - extent_item_pos, total_refs, ignore_offset); + ret = add_all_parents(root, path, parents, preftrees, ref, level, + time_seq, extent_item_pos, ignore_offset); out: path->lowest_level = 0; btrfs_release_path(path); @@ -609,7 +660,7 @@ unode_aux_to_inode_list(struct ulist_node *node) static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 time_seq, struct preftrees *preftrees, - const u64 *extent_item_pos, u64 total_refs, + const u64 *extent_item_pos, struct share_check *sc, bool ignore_offset) { int err; @@ -653,9 +704,9 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, ret = BACKREF_FOUND_SHARED; goto out; } - err = resolve_indirect_ref(fs_info, path, time_seq, ref, - parents, extent_item_pos, - total_refs, ignore_offset); + err = resolve_indirect_ref(fs_info, path, time_seq, preftrees, + ref, parents, extent_item_pos, + ignore_offset); /* * we can only tolerate ENOENT,otherwise,we should catch error * and return directly. @@ -758,8 +809,7 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info, */ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_head *head, u64 seq, - struct preftrees *preftrees, u64 *total_refs, - struct share_check *sc) + struct preftrees *preftrees, struct share_check *sc) { struct btrfs_delayed_ref_node *node; struct btrfs_delayed_extent_op *extent_op = head->extent_op; @@ -793,7 +843,6 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, default: BUG(); } - *total_refs += count; switch (node->type) { case BTRFS_TREE_BLOCK_REF_KEY: { /* NORMAL INDIRECT METADATA backref */ @@ -876,7 +925,7 @@ out: static int add_inline_refs(const struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, int *info_level, struct preftrees *preftrees, - u64 *total_refs, struct share_check *sc) + struct share_check *sc) { int ret = 0; int slot; @@ -900,7 +949,6 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(leaf, ei); - *total_refs += btrfs_extent_refs(leaf, ei); btrfs_item_key_to_cpu(leaf, &found_key, slot); ptr = (unsigned long)(ei + 1); @@ -1125,8 +1173,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, struct prelim_ref *ref; struct rb_node *node; struct extent_inode_elem *eie = NULL; - /* total of both direct AND indirect refs! */ - u64 total_refs = 0; struct preftrees preftrees = { .direct = PREFTREE_INIT, .indirect = PREFTREE_INIT, @@ -1195,7 +1241,7 @@ again: } spin_unlock(&delayed_refs->lock); ret = add_delayed_refs(fs_info, head, time_seq, - &preftrees, &total_refs, sc); + &preftrees, sc); mutex_unlock(&head->mutex); if (ret) goto out; @@ -1216,8 +1262,7 @@ again: (key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY)) { ret = add_inline_refs(fs_info, path, bytenr, - &info_level, &preftrees, - &total_refs, sc); + &info_level, &preftrees, sc); if (ret) goto out; ret = add_keyed_refs(fs_info, path, bytenr, info_level, @@ -1236,7 +1281,7 @@ again: WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees, - extent_item_pos, total_refs, sc, ignore_offset); + extent_item_pos, sc, ignore_offset); if (ret) goto out; @@ -1422,6 +1467,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, if (ret < 0 && ret != -ENOENT) { ulist_free(tmp); ulist_free(*roots); + *roots = NULL; return ret; } node = ulist_next(tmp, &uiter); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 7dcfa7d7632a..a352c1704042 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -640,7 +640,15 @@ static noinline void caching_thread(struct btrfs_work *work) mutex_lock(&caching_ctl->mutex); down_read(&fs_info->commit_root_sem); - if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) + /* + * If we are in the transaction that populated the free space tree we + * can't actually cache from the free space tree as our commit root and + * real root are the same, so we could change the contents of the blocks + * while caching. Instead do the slow caching in this case, and after + * the transaction has committed we will be safe. + */ + if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && + !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) ret = load_free_space_tree(caching_ctl); else ret = load_extent_tree_free(caching_ctl); @@ -986,6 +994,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); + /* Once for the block groups rbtree */ + btrfs_put_block_group(block_group); + if (fs_info->first_logical_byte == block_group->key.objectid) fs_info->first_logical_byte = (u64)-1; spin_unlock(&fs_info->block_group_cache_lock); @@ -1096,9 +1107,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (ret) goto out; - btrfs_put_block_group(block_group); - btrfs_put_block_group(block_group); - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) ret = -EIO; @@ -1119,7 +1127,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, /* once for the tree */ free_extent_map(em); } + out: + /* Once for the lookup reference */ + btrfs_put_block_group(block_group); if (remove_rsv) btrfs_delayed_refs_rsv_release(fs_info, 1); btrfs_free_path(path); @@ -1163,7 +1174,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( free_extent_map(em); return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, - num_items, 1); + num_items); } /* @@ -1183,7 +1194,6 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; - u64 sinfo_used; u64 min_allocable_bytes; int ret = -ENOSPC; @@ -1210,20 +1220,38 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) num_bytes = cache->key.offset - cache->reserved - cache->pinned - cache->bytes_super - btrfs_block_group_used(&cache->item); - sinfo_used = btrfs_space_info_used(sinfo, true); /* - * sinfo_used + num_bytes should always <= sinfo->total_bytes. - * - * Here we make sure if we mark this bg RO, we still have enough - * free space as buffer (if min_allocable_bytes is not 0). + * Data never overcommits, even in mixed mode, so do just the straight + * check of left over space in how much we have allocated. */ - if (sinfo_used + num_bytes + min_allocable_bytes <= - sinfo->total_bytes) { + if (force) { + ret = 0; + } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { + u64 sinfo_used = btrfs_space_info_used(sinfo, true); + + /* + * Here we make sure if we mark this bg RO, we still have enough + * free space as buffer. + */ + if (sinfo_used + num_bytes <= sinfo->total_bytes) + ret = 0; + } else { + /* + * We overcommit metadata, so we need to do the + * btrfs_can_overcommit check here, and we need to pass in + * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of + * leeway to allow us to mark this block group as read only. + */ + if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, + BTRFS_RESERVE_NO_FLUSH)) + ret = 0; + } + + if (!ret) { sinfo->bytes_readonly += num_bytes; cache->ro++; list_add_tail(&cache->ro_list, &sinfo->ro_bgs); - ret = 0; } out: spin_unlock(&cache->lock); @@ -1232,9 +1260,6 @@ out: btrfs_info(cache->fs_info, "unable to make block group %llu ro", cache->key.objectid); - btrfs_info(cache->fs_info, - "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu", - sinfo_used, num_bytes, min_allocable_bytes); btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); } return ret; @@ -1829,6 +1854,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) } } + rcu_read_lock(); list_for_each_entry_rcu(space_info, &info->space_info, list) { if (!(btrfs_get_alloc_profile(info, space_info->flags) & (BTRFS_BLOCK_GROUP_RAID10 | @@ -1849,6 +1875,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) list) inc_block_group_ro(cache, 1); } + rcu_read_unlock(); btrfs_init_global_block_rsv(info); ret = check_chunk_block_group_mappings(info); @@ -2021,8 +2048,17 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) return flags; } -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) - +/* + * Mark one block group RO, can be called several times for the same block + * group. + * + * @cache: the destination block group + * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to + * ensure we still have some free space after marking this + * block group RO. + */ +int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, + bool do_chunk_alloc) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_trans_handle *trans; @@ -2052,25 +2088,29 @@ again: goto again; } - /* - * if we are changing raid levels, try to allocate a corresponding - * block group with the new raid level. - */ - alloc_flags = update_block_group_flags(fs_info, cache->flags); - if (alloc_flags != cache->flags) { - ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); + if (do_chunk_alloc) { /* - * ENOSPC is allowed here, we may have enough space - * already allocated at the new raid level to - * carry on + * If we are changing raid levels, try to allocate a + * corresponding block group with the new raid level. */ - if (ret == -ENOSPC) - ret = 0; - if (ret < 0) - goto out; + alloc_flags = update_block_group_flags(fs_info, cache->flags); + if (alloc_flags != cache->flags) { + ret = btrfs_chunk_alloc(trans, alloc_flags, + CHUNK_ALLOC_FORCE); + /* + * ENOSPC is allowed here, we may have enough space + * already allocated at the new raid level to carry on + */ + if (ret == -ENOSPC) + ret = 0; + if (ret < 0) + goto out; + } } - ret = inc_block_group_ro(cache, 0); + ret = inc_block_group_ro(cache, !do_chunk_alloc); + if (!do_chunk_alloc) + goto unlock_out; if (!ret) goto out; alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); @@ -2085,6 +2125,7 @@ out: check_system_chunk(trans, alloc_flags); mutex_unlock(&fs_info->chunk_mutex); } +unlock_out: mutex_unlock(&fs_info->ro_block_group_mutex); btrfs_end_transaction(trans); @@ -2163,7 +2204,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, return 0; } - if (trans->aborted) + if (TRANS_ABORTED(trans)) return 0; again: inode = lookup_free_space_inode(block_group, path); @@ -2364,8 +2405,10 @@ again: if (!path) { path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; + if (!path) { + ret = -ENOMEM; + goto out; + } } /* @@ -2460,16 +2503,14 @@ again: btrfs_put_block_group(cache); if (drop_reserve) btrfs_delayed_refs_rsv_release(fs_info, 1); - - if (ret) - break; - /* * Avoid blocking other tasks for too long. It might even save * us from writing caches for block groups that are going to be * removed. */ mutex_unlock(&trans->transaction->cache_write_mutex); + if (ret) + goto out; mutex_lock(&trans->transaction->cache_write_mutex); } mutex_unlock(&trans->transaction->cache_write_mutex); @@ -2478,7 +2519,8 @@ again: * Go through delayed refs for all the stuff we've just kicked off * and then loop back (just once) */ - ret = btrfs_run_delayed_refs(trans, 0); + if (!ret) + ret = btrfs_run_delayed_refs(trans, 0); if (!ret && loops == 0) { loops++; spin_lock(&cur_trans->dirty_bgs_lock); @@ -2492,7 +2534,12 @@ again: goto again; } spin_unlock(&cur_trans->dirty_bgs_lock); - } else if (ret < 0) { + } +out: + if (ret < 0) { + spin_lock(&cur_trans->dirty_bgs_lock); + list_splice_init(&dirty, &cur_trans->dirty_bgs); + spin_unlock(&cur_trans->dirty_bgs_lock); btrfs_cleanup_dirty_bgs(cur_trans, fs_info); } diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index c391800388dd..0758e6d52acb 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -205,7 +205,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); +int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, + bool do_chunk_alloc); void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index d07bd41a7c1e..343400d49bd1 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -5,6 +5,7 @@ #include "block-rsv.h" #include "space-info.h" #include "transaction.h" +#include "block-group.h" static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, @@ -313,6 +314,8 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) else block_rsv->full = 0; + if (block_rsv->size >= sinfo->total_bytes) + sinfo->force_alloc = CHUNK_ALLOC_FORCE; spin_unlock(&block_rsv->lock); spin_unlock(&sinfo->lock); } diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a989105d39c8..ab69e3563b12 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -260,9 +260,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, ret = btrfs_inc_ref(trans, root, cow, 1); else ret = btrfs_inc_ref(trans, root, cow, 0); - - if (ret) + if (ret) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); + btrfs_abort_transaction(trans, ret); return ret; + } btrfs_mark_buffer_dirty(cow); *cow_ret = cow; @@ -1103,6 +1106,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); if (ret) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); btrfs_abort_transaction(trans, ret); return ret; } @@ -1110,6 +1115,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { ret = btrfs_reloc_cow_block(trans, root, buf, cow); if (ret) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); btrfs_abort_transaction(trans, ret); return ret; } @@ -1142,6 +1149,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, if (last_ref) { ret = tree_mod_log_free_eb(buf); if (ret) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); btrfs_abort_transaction(trans, ret); return ret; } @@ -1339,6 +1348,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), + eb_rewin, btrfs_header_level(eb_rewin)); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > @@ -1395,8 +1406,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq) "failed to read tree block %llu from get_old_root", logical); } else { + struct tree_mod_elem *tm2; + + btrfs_tree_read_lock(old); eb = btrfs_clone_extent_buffer(old); + /* + * After the lookup for the most recent tree mod operation + * above and before we locked and cloned the extent buffer + * 'old', a new tree mod log operation may have been added. + * So lookup for a more recent one to make sure the number + * of mod log operations we replay is consistent with the + * number of items we have in the cloned extent buffer, + * otherwise we can hit a BUG_ON when rewinding the extent + * buffer. + */ + tm2 = tree_mod_log_search(fs_info, logical, time_seq); + btrfs_tree_read_unlock(old); free_extent_buffer(old); + ASSERT(tm2); + ASSERT(tm2 == tm || tm2->seq > tm->seq); + if (!tm2 || tm2->seq < tm->seq) { + free_extent_buffer(eb); + return NULL; + } + tm = tm2; } } else if (old_root) { eb_root_owner = btrfs_header_owner(eb_root); @@ -1412,7 +1445,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); @@ -1420,6 +1452,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, + btrfs_header_level(eb)); + btrfs_tree_read_lock(eb); if (tm) __tree_mod_log_rewind(fs_info, eb, time_seq, tm); else diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 169075550a5a..7960359dbc70 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -524,6 +524,9 @@ enum { * so we don't need to offload checksums to workqueues. */ BTRFS_FS_CSUM_IMPL_FAST, + + /* Indicate that we can't trust the free space tree for caching yet */ + BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, }; struct btrfs_fs_info { @@ -940,6 +943,8 @@ enum { BTRFS_ROOT_DEAD_RELOC_TREE, /* Mark dead root stored on device whose cleanup needs to be resumed */ BTRFS_ROOT_DEAD_TREE, + /* The root has a log tree. Used only for subvolume roots. */ + BTRFS_ROOT_HAS_LOG_TREE, }; /* @@ -988,8 +993,10 @@ struct btrfs_root { wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; struct list_head log_ctxs[2]; + /* Used only for log trees of subvolumes, not for the log root tree */ atomic_t log_writers; atomic_t log_commit[2]; + /* Used only for log trees of subvolumes, not for the log root tree */ atomic_t log_batch; int log_transid; /* No matter the commit succeeds or not*/ @@ -2411,7 +2418,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes); int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, - u64 objectid, u64 offset, u64 bytenr); + u64 objectid, u64 offset, u64 bytenr, bool strict); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, @@ -2465,6 +2472,7 @@ enum btrfs_reserve_flush_enum { BTRFS_RESERVE_FLUSH_LIMIT, BTRFS_RESERVE_FLUSH_EVICT, BTRFS_RESERVE_FLUSH_ALL, + BTRFS_RESERVE_FLUSH_ALL_STEAL, }; enum btrfs_flush_state { @@ -2816,7 +2824,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, u64 start, u64 len); noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, - u64 *ram_bytes); + u64 *ram_bytes, bool strict); void __btrfs_del_delalloc_inode(struct btrfs_root *root, struct btrfs_inode *inode); @@ -2951,6 +2959,8 @@ int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); +int btrfs_check_can_nocow(struct btrfs_inode *inode, loff_t pos, + size_t *write_bytes); /* tree-defrag.c */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, @@ -2960,6 +2970,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, int btrfs_parse_options(struct btrfs_fs_info *info, char *options, unsigned long new_flags); int btrfs_sync_fs(struct super_block *sb, int wait); +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid); static inline __printf(2, 3) __cold void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) @@ -3161,7 +3173,7 @@ do { \ /* Report first abort since mount */ \ if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ &((trans)->fs_info->fs_state))) { \ - if ((errno) != -EIO) { \ + if ((errno) != -EIO && (errno) != -EROFS) { \ WARN(1, KERN_DEBUG \ "BTRFS: Transaction aborted (error %d)\n", \ (errno)); \ @@ -3395,6 +3407,8 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root, int btrfs_reada_wait(void *handle); void btrfs_reada_detach(void *handle); int btree_readahead_hook(struct extent_buffer *eb, int err); +void btrfs_reada_remove_dev(struct btrfs_device *dev); +void btrfs_reada_undo_remove_dev(struct btrfs_device *dev); static inline int is_fstree(u64 rootid) { diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c7a53e79c66d..7dad8794ee38 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -6,6 +6,7 @@ #include <linux/slab.h> #include <linux/iversion.h> +#include <linux/sched/mm.h> #include "misc.h" #include "delayed-inode.h" #include "disk-io.h" @@ -626,8 +627,7 @@ static int btrfs_delayed_inode_reserve_metadata( */ if (!src_rsv || (!trans->bytes_reserved && src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { - ret = btrfs_qgroup_reserve_meta_prealloc(root, - fs_info->nodesize, true); + ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); if (ret < 0) return ret; ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, @@ -649,7 +649,7 @@ static int btrfs_delayed_inode_reserve_metadata( btrfs_ino(inode), num_bytes, 1); } else { - btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize); + btrfs_qgroup_free_meta_prealloc(root, num_bytes); } return ret; } @@ -804,11 +804,14 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, struct btrfs_delayed_item *delayed_item) { struct extent_buffer *leaf; + unsigned int nofs_flag; char *ptr; int ret; + nofs_flag = memalloc_nofs_save(); ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, delayed_item->data_len); + memalloc_nofs_restore(nofs_flag); if (ret < 0 && ret != -EEXIST) return ret; @@ -936,6 +939,7 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *node) { struct btrfs_delayed_item *curr, *prev; + unsigned int nofs_flag; int ret = 0; do_again: @@ -944,7 +948,9 @@ do_again: if (!curr) goto delete_fail; + nofs_flag = memalloc_nofs_save(); ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); + memalloc_nofs_restore(nofs_flag); if (ret < 0) goto delete_fail; else if (ret > 0) { @@ -1011,6 +1017,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, struct btrfs_key key; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; + unsigned int nofs_flag; int mod; int ret; @@ -1023,7 +1030,9 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, else mod = 1; + nofs_flag = memalloc_nofs_save(); ret = btrfs_lookup_inode(trans, root, path, &key, mod); + memalloc_nofs_restore(nofs_flag); if (ret > 0) { btrfs_release_path(path); return -ENOENT; @@ -1074,7 +1083,10 @@ search: key.type = BTRFS_INODE_EXTREF_KEY; key.offset = -1; + + nofs_flag = memalloc_nofs_save(); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + memalloc_nofs_restore(nofs_flag); if (ret < 0) goto err_out; ASSERT(ret); @@ -1138,7 +1150,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr) int ret = 0; bool count = (nr > 0); - if (trans->aborted) + if (TRANS_ABORTED(trans)) return -EIO; path = btrfs_alloc_path(); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 48890826b5e6..1cb7f5d79765 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -55,6 +55,17 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); if (ret) { no_valid_dev_replace_entry_found: + /* + * We don't have a replace item or it's corrupted. If there is + * a replace target, fail the mount. + */ + if (btrfs_find_device(fs_info->fs_devices, + BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) { + btrfs_err(fs_info, + "found replace target device without a valid replace item"); + ret = -EUCLEAN; + goto out; + } ret = 0; dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; @@ -107,8 +118,19 @@ no_valid_dev_replace_entry_found: case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: - dev_replace->srcdev = NULL; - dev_replace->tgtdev = NULL; + /* + * We don't have an active replace item but if there is a + * replace target, fail the mount. + */ + if (btrfs_find_device(fs_info->fs_devices, + BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) { + btrfs_err(fs_info, + "replace devid present without an active replace item"); + ret = -EUCLEAN; + } else { + dev_replace->srcdev = NULL; + dev_replace->tgtdev = NULL; + } break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: @@ -190,7 +212,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, int ret = 0; *device_out = NULL; - if (fs_info->fs_devices->seeding) { + if (srcdev->fs_devices->seeding) { btrfs_err(fs_info, "the filesystem is a seed filesystem!"); return -EINVAL; } @@ -562,6 +584,37 @@ static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info) wake_up(&fs_info->dev_replace.replace_wait); } +/* + * When finishing the device replace, before swapping the source device with the + * target device we must update the chunk allocation state in the target device, + * as it is empty because replace works by directly copying the chunks and not + * through the normal chunk allocation path. + */ +static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, + struct btrfs_device *tgtdev) +{ + struct extent_state *cached_state = NULL; + u64 start = 0; + u64 found_start; + u64 found_end; + int ret = 0; + + lockdep_assert_held(&srcdev->fs_info->chunk_mutex); + + while (!find_first_extent_bit(&srcdev->alloc_state, start, + &found_start, &found_end, + CHUNK_ALLOCATED, &cached_state)) { + ret = set_extent_bits(&tgtdev->alloc_state, found_start, + found_end, CHUNK_ALLOCATED); + if (ret) + break; + start = found_end + 1; + } + + free_extent_state(cached_state); + return ret; +} + static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, int scrub_ret) { @@ -600,6 +653,9 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, } btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); + if (!scrub_ret) + btrfs_reada_remove_dev(src_device); + /* * We have to use this loop approach because at this point src_device * has to be available for transaction commit to complete, yet new @@ -608,6 +664,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, while (1) { trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { + btrfs_reada_undo_remove_dev(src_device); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); return PTR_ERR(trans); } @@ -636,8 +693,14 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, dev_replace->time_stopped = ktime_get_real_seconds(); dev_replace->item_needs_writeback = 1; - /* replace old device with new one in mapping tree */ + /* + * Update allocation state in the new device and replace the old device + * with the new one in the mapping tree. + */ if (!scrub_ret) { + scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device); + if (scrub_ret) + goto error; btrfs_dev_replace_update_device_in_mapping_tree(fs_info, src_device, tgt_device); @@ -648,9 +711,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, btrfs_dev_name(src_device), src_device->devid, rcu_str_deref(tgt_device->name), scrub_ret); +error: up_write(&dev_replace->rwsem); mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex); + btrfs_reada_undo_remove_dev(src_device); btrfs_rm_dev_replace_blocked(fs_info); if (tgt_device) btrfs_destroy_dev_replace_tgtdev(tgt_device); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5cdd1b51285b..cd65ef7c7c3f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -649,16 +649,15 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, goto err; if (memcmp_extent_buffer(eb, result, 0, csum_size)) { - u32 val; - u32 found = 0; - - memcpy(&found, result, csum_size); + u8 val[BTRFS_CSUM_SIZE] = { 0 }; read_extent_buffer(eb, &val, 0, csum_size); btrfs_warn_rl(fs_info, - "%s checksum verify failed on %llu wanted %x found %x level %d", + "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d", fs_info->sb->s_id, eb->start, - val, found, btrfs_header_level(eb)); + CSUM_FMT_VALUE(csum_size, val), + CSUM_FMT_VALUE(csum_size, result), + btrfs_header_level(eb)); ret = -EUCLEAN; goto err; } @@ -1475,9 +1474,16 @@ int btrfs_init_fs_root(struct btrfs_root *root) spin_lock_init(&root->ino_cache_lock); init_waitqueue_head(&root->ino_cache_wait); - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto fail; + /* + * Don't assign anonymous block device to roots that are not exposed to + * userspace, the id pool is limited to 1M + */ + if (is_fstree(root->root_key.objectid) && + btrfs_root_refs(&root->root_item) > 0) { + ret = get_anon_bdev(&root->anon_dev); + if (ret) + goto fail; + } mutex_lock(&root->objectid_mutex); ret = btrfs_find_highest_objectid(root, @@ -3057,6 +3063,18 @@ retry_root_backup: fs_info->generation = generation; fs_info->last_trans_committed = generation; + /* + * If we have a uuid root and we're not being told to rescan we need to + * check the generation here so we can set the + * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the + * transaction during a balance or the log replay without updating the + * uuid generation, and then if we crash we would rescan the uuid tree, + * even though it was perfectly fine. + */ + if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) && + fs_info->generation == btrfs_super_uuid_tree_generation(disk_super)) + set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); + ret = btrfs_verify_dev_extents(fs_info); if (ret) { btrfs_err(fs_info, @@ -3287,8 +3305,6 @@ retry_root_backup: close_ctree(fs_info); return ret; } - } else { - set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); } set_bit(BTRFS_FS_OPEN, &fs_info->flags); @@ -4007,6 +4023,19 @@ void close_ctree(struct btrfs_fs_info *fs_info) */ btrfs_delete_unused_bgs(fs_info); + /* + * There might be existing delayed inode workers still running + * and holding an empty delayed inode item. We must wait for + * them to complete first because they can create a transaction. + * This happens when someone calls btrfs_balance_delayed_items() + * and then a transaction commit runs the same delayed nodes + * before any delayed worker has done something with the nodes. + * We must wait for any worker here and not at transaction + * commit time since that could cause a deadlock. + * This is a very rare case. + */ + btrfs_flush_workqueue(fs_info->delayed_workers); + ret = btrfs_commit_super(fs_info); if (ret) btrfs_err(fs_info, "commit super ret %d", ret); @@ -4447,6 +4476,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) cache->io_ctl.inode = NULL; iput(inode); } + ASSERT(cache->io_ctl.pages == NULL); btrfs_put_block_group(cache); } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ddf28ecf17f9..93cceeba484c 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, return type; } -static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, - int check_generation) +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root; @@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); } -static struct dentry *btrfs_get_parent(struct dentry *child) +struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = d_inode(child); struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h index 57488ecd7d4e..f32f4113c976 100644 --- a/fs/btrfs/export.h +++ b/fs/btrfs/export.h @@ -18,4 +18,9 @@ struct btrfs_fid { u64 parent_root_objectid; } __attribute__ ((packed)); +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation); +struct dentry *btrfs_get_parent(struct dentry *child); + #endif diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 47ecf7216b3e..6317394f02b8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -32,6 +32,7 @@ #include "block-rsv.h" #include "delalloc-space.h" #include "block-group.h" +#include "rcu-string.h" #undef SCRAMBLE_DELAYED_REFS @@ -402,12 +403,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_BLOCK_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else if (is_data == BTRFS_REF_TYPE_DATA) { @@ -416,12 +416,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_DATA_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else { @@ -431,8 +430,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, } btrfs_print_leaf((struct extent_buffer *)eb); - btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d", - eb->start, type); + btrfs_err(eb->fs_info, + "eb %llu iref 0x%lx invalid extent inline ref type %d", + eb->start, (unsigned long)iref, type); WARN_ON(1); return BTRFS_REF_TYPE_INVALID; @@ -1306,8 +1306,10 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, u64 *actual_bytes) { - int ret; + int ret = 0; u64 discarded_bytes = 0; + u64 end = bytenr + num_bytes; + u64 cur = bytenr; struct btrfs_bio *bbio = NULL; @@ -1316,15 +1318,23 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, * associated to its stripes that don't go away while we are discarding. */ btrfs_bio_counter_inc_blocked(fs_info); - /* Tell the block device(s) that the sectors can be discarded */ - ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes, - &bbio, 0); - /* Error condition is -ENOMEM */ - if (!ret) { - struct btrfs_bio_stripe *stripe = bbio->stripes; + while (cur < end) { + struct btrfs_bio_stripe *stripe; int i; + num_bytes = end - cur; + /* Tell the block device(s) that the sectors can be discarded */ + ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur, + &num_bytes, &bbio, 0); + /* + * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or + * -EOPNOTSUPP. For any such error, @num_bytes is not updated, + * thus we can't continue anyway. + */ + if (ret < 0) + goto out; + stripe = bbio->stripes; for (i = 0; i < bbio->num_stripes; i++, stripe++) { u64 bytes; struct request_queue *req_q; @@ -1341,10 +1351,19 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, stripe->physical, stripe->length, &bytes); - if (!ret) + if (!ret) { discarded_bytes += bytes; - else if (ret != -EOPNOTSUPP) - break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */ + } else if (ret != -EOPNOTSUPP) { + /* + * Logic errors or -ENOMEM, or -EIO, but + * unlikely to happen. + * + * And since there are two loops, explicitly + * go to out to avoid confusion. + */ + btrfs_put_bbio(bbio); + goto out; + } /* * Just in case we get back EOPNOTSUPP for some reason, @@ -1354,7 +1373,9 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, ret = 0; } btrfs_put_bbio(bbio); + cur += num_bytes; } +out: btrfs_bio_counter_dec(fs_info); if (actual_bytes) @@ -1561,7 +1582,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, int err = 0; int metadata = !extent_op->is_data; - if (trans->aborted) + if (TRANS_ABORTED(trans)) return 0; if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) @@ -1681,7 +1702,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, { int ret = 0; - if (trans->aborted) { + if (TRANS_ABORTED(trans)) { if (insert_reserved) btrfs_pin_extent(trans->fs_info, node->bytenr, node->num_bytes, 1); @@ -2169,7 +2190,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, int run_all = count == (unsigned long)-1; /* We'll clean this up in btrfs_cleanup_transaction */ - if (trans->aborted) + if (TRANS_ABORTED(trans)) return 0; if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) @@ -2320,7 +2341,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root, static noinline int check_committed_ref(struct btrfs_root *root, struct btrfs_path *path, - u64 objectid, u64 offset, u64 bytenr) + u64 objectid, u64 offset, u64 bytenr, + bool strict) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; @@ -2362,9 +2384,13 @@ static noinline int check_committed_ref(struct btrfs_root *root, btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) goto out; - /* If extent created before last snapshot => it's definitely shared */ - if (btrfs_extent_generation(leaf, ei) <= - btrfs_root_last_snapshot(&root->root_item)) + /* + * If extent created before last snapshot => it's shared unless the + * snapshot has been deleted. Use the heuristic if strict is false. + */ + if (!strict && + (btrfs_extent_generation(leaf, ei) <= + btrfs_root_last_snapshot(&root->root_item))) goto out; iref = (struct btrfs_extent_inline_ref *)(ei + 1); @@ -2389,7 +2415,7 @@ out: } int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, - u64 bytenr) + u64 bytenr, bool strict) { struct btrfs_path *path; int ret; @@ -2400,7 +2426,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, do { ret = check_committed_ref(root, path, objectid, - offset, bytenr); + offset, bytenr, strict); if (ret && ret != -ENOENT) goto out; @@ -2813,10 +2839,10 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, len = cache->key.objectid + cache->key.offset - start; len = min(len, end + 1 - start); - if (start < cache->last_byte_to_unpin) { - len = min(len, cache->last_byte_to_unpin - start); - if (return_free_space) - btrfs_add_free_space(cache, start, len); + if (start < cache->last_byte_to_unpin && return_free_space) { + u64 add_len = min(len, cache->last_byte_to_unpin - start); + + btrfs_add_free_space(cache, start, add_len); } start += len; @@ -2892,7 +2918,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) else unpin = &fs_info->freed_extents[0]; - while (!trans->aborted) { + while (!TRANS_ABORTED(trans)) { struct extent_state *cached_state = NULL; mutex_lock(&fs_info->unused_bg_unpin_mutex); @@ -2924,7 +2950,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) u64 trimmed = 0; ret = -EROFS; - if (!trans->aborted) + if (!TRANS_ABORTED(trans)) ret = btrfs_discard_extent(fs_info, block_group->key.objectid, block_group->key.offset, @@ -3775,11 +3801,12 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, * |- Push harder to find free extents * |- If not found, re-iterate all block groups */ -static noinline int find_free_extent(struct btrfs_fs_info *fs_info, +static noinline int find_free_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes, u64 empty_size, u64 hint_byte, struct btrfs_key *ins, u64 flags, int delalloc) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; int cache_block_group_error = 0; struct btrfs_free_cluster *last_ptr = NULL; @@ -3808,7 +3835,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, ins->objectid = 0; ins->offset = 0; - trace_find_free_extent(fs_info, num_bytes, empty_size, flags); + trace_find_free_extent(root, num_bytes, empty_size, flags); space_info = btrfs_find_space_info(fs_info, flags); if (!space_info) { @@ -4116,7 +4143,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, flags = get_alloc_profile_by_root(root, is_data); again: WARN_ON(num_bytes < fs_info->sectorsize); - ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size, + ret = find_free_extent(root, ram_bytes, num_bytes, empty_size, hint_byte, ins, flags, delalloc); if (!ret && !is_data) { btrfs_dec_block_group_reservations(fs_info, ins->objectid); @@ -4441,7 +4468,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, return ERR_PTR(-EUCLEAN); } - btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); + btrfs_set_buffer_lockdep_class(owner, buf, level); btrfs_tree_lock(buf); btrfs_clean_tree_block(buf); clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); @@ -5221,7 +5248,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, goto out; } - trans = btrfs_start_transaction(tree_root, 0); + /* + * Use join to avoid potential EINTR from transaction start. See + * wait_reserve_ticket and the whole reservation callchain. + */ + if (for_reloc) + trans = btrfs_join_transaction(tree_root); + else + trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto out_free; @@ -5356,7 +5390,15 @@ int btrfs_drop_snapshot(struct btrfs_root *root, goto out_free; } - trans = btrfs_start_transaction(tree_root, 0); + /* + * Use join to avoid potential EINTR from transaction + * start. See wait_reserve_ticket and the whole + * reservation callchain. + */ + if (for_reloc) + trans = btrfs_join_transaction(tree_root); + else + trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto out_free; @@ -5417,8 +5459,6 @@ out: */ if (!for_reloc && !root_dropped) btrfs_add_dead_root(root); - if (err && err != -EAGAIN) - btrfs_handle_fs_error(fs_info, err, NULL); return err; } @@ -5587,6 +5627,19 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); + /* Check if there are any CHUNK_* bits left */ + if (start > device->total_bytes) { + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + btrfs_warn_in_rcu(fs_info, +"ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", + start, end - start + 1, + rcu_str_deref(device->name), + device->total_bytes); + mutex_unlock(&fs_info->chunk_mutex); + ret = 0; + break; + } + /* Ensure we skip the reserved area in the first 1M */ start = max_t(u64, start, SZ_1M); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 284540cdbbd9..95205bde240f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -647,9 +647,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc) static void extent_io_tree_panic(struct extent_io_tree *tree, int err) { - struct inode *inode = tree->private_data; - - btrfs_panic(btrfs_sb(inode->i_sb), err, + btrfs_panic(tree->fs_info, err, "locking error: extent tree was modified by another thread while locked"); } @@ -1923,7 +1921,8 @@ static int __process_pages_contig(struct address_space *mapping, if (!PageDirty(pages[i]) || pages[i]->mapping != mapping) { unlock_page(pages[i]); - put_page(pages[i]); + for (; i < ret; i++) + put_page(pages[i]); err = -EAGAIN; goto out; } @@ -3928,6 +3927,7 @@ int btree_write_cache_pages(struct address_space *mapping, .extent_locked = 0, .sync_io = wbc->sync_mode == WB_SYNC_ALL, }; + struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; int ret = 0; int done = 0; int nr_to_write_done = 0; @@ -4041,7 +4041,39 @@ retry: end_write_bio(&epd, ret); return ret; } - ret = flush_write_bio(&epd); + /* + * If something went wrong, don't allow any metadata write bio to be + * submitted. + * + * This would prevent use-after-free if we had dirty pages not + * cleaned up, which can still happen by fuzzed images. + * + * - Bad extent tree + * Allowing existing tree block to be allocated for other trees. + * + * - Log tree operations + * Exiting tree blocks get allocated to log tree, bumps its + * generation, then get cleaned in tree re-balance. + * Such tree block will not be written back, since it's clean, + * thus no WRITTEN flag set. + * And after log writes back, this tree block is not traced by + * any dirty extent_io_tree. + * + * - Offending tree block gets re-dirtied from its original owner + * Since it has bumped generation, no WRITTEN flag, it can be + * reused without COWing. This tree block will not be traced + * by btrfs_transaction::dirty_pages. + * + * Now such dirty tree block will not be cleaned by any dirty + * extent io tree. Thus we don't want to submit such wild eb + * if the fs already has error. + */ + if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { + ret = flush_write_bio(&epd); + } else { + ret = -EROFS; + end_write_bio(&epd, ret); + } return ret; } @@ -4433,20 +4465,32 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) free_extent_map(em); break; } - if (!test_range_bit(tree, em->start, - extent_map_end(em) - 1, - EXTENT_LOCKED, 0, NULL)) { + if (test_range_bit(tree, em->start, + extent_map_end(em) - 1, + EXTENT_LOCKED, 0, NULL)) + goto next; + /* + * If it's not in the list of modified extents, used + * by a fast fsync, we can remove it. If it's being + * logged we can safely remove it since fsync took an + * extra reference on the em. + */ + if (list_empty(&em->list) || + test_bit(EXTENT_FLAG_LOGGING, &em->flags)) { set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &btrfs_inode->runtime_flags); remove_extent_mapping(map, em); /* once for the rb tree */ free_extent_map(em); } +next: start = extent_map_end(em); write_unlock(&map->lock); /* once for us */ free_extent_map(em); + + cond_resched(); /* Allow large-extent preemption. */ } } return try_release_extent_state(tree, page, mask); @@ -4992,25 +5036,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, static void check_buffer_tree_ref(struct extent_buffer *eb) { int refs; - /* the ref bit is tricky. We have to make sure it is set - * if we have the buffer dirty. Otherwise the - * code to free a buffer can end up dropping a dirty - * page + /* + * The TREE_REF bit is first set when the extent_buffer is added + * to the radix tree. It is also reset, if unset, when a new reference + * is created by find_extent_buffer. * - * Once the ref bit is set, it won't go away while the - * buffer is dirty or in writeback, and it also won't - * go away while we have the reference count on the - * eb bumped. + * It is only cleared in two cases: freeing the last non-tree + * reference to the extent_buffer when its STALE bit is set or + * calling releasepage when the tree reference is the only reference. * - * We can't just set the ref bit without bumping the - * ref on the eb because free_extent_buffer might - * see the ref bit and try to clear it. If this happens - * free_extent_buffer might end up dropping our original - * ref by mistake and freeing the page before we are able - * to add one more ref. + * In both cases, care is taken to ensure that the extent_buffer's + * pages are not under io. However, releasepage can be concurrently + * called with creating new references, which is prone to race + * conditions between the calls to check_buffer_tree_ref in those + * codepaths and clearing TREE_REF in try_release_extent_buffer. * - * So bump the ref count first, then set the bit. If someone - * beat us to it, drop the ref we added. + * The actual lifetime of the extent_buffer in the radix tree is + * adequately protected by the refcount, but the TREE_REF bit and + * its corresponding reference are not. To protect against this + * class of races, we call check_buffer_tree_ref from the codepaths + * which trigger io after they set eb->io_pages. Note that once io is + * initiated, TREE_REF can no longer be cleared, so that is the + * moment at which any such race is best fixed. */ refs = atomic_read(&eb->refs); if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) @@ -5460,6 +5507,11 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = 0; atomic_set(&eb->io_pages, num_reads); + /* + * It is possible for releasepage to clear the TREE_REF bit before we + * set io_pages. See check_buffer_tree_ref for a more detailed comment. + */ + check_buffer_tree_ref(eb); for (i = 0; i < num_pages; i++) { page = eb->pages[i]; @@ -5553,9 +5605,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, } } -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dstv, - unsigned long start, unsigned long len) +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dstv, + unsigned long start, unsigned long len) { size_t cur; size_t offset; @@ -5576,7 +5628,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); - if (copy_to_user(dst, kaddr + offset, cur)) { + if (probe_user_write(dst, kaddr + offset, cur)) { ret = -EFAULT; break; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index cf3424d58fec..fcf1807cc8dd 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -35,6 +35,8 @@ */ #define CHUNK_ALLOCATED EXTENT_DIRTY #define CHUNK_TRIMMED EXTENT_DEFRAG +#define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \ + CHUNK_TRIMMED) /* * flags for bio submission. The high bits indicate the compression @@ -457,9 +459,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, void read_extent_buffer(const struct extent_buffer *eb, void *dst, unsigned long start, unsigned long len); -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dst, unsigned long start, - unsigned long len); +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dst, unsigned long start, + unsigned long len); void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, const void *src); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f62a179f85bb..2b8f29c07668 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -798,10 +798,12 @@ again: nritems = btrfs_header_nritems(path->nodes[0]); if (!nritems || (path->slots[0] >= nritems - 1)) { ret = btrfs_next_leaf(root, path); - if (ret == 1) + if (ret < 0) { + goto out; + } else if (ret > 0) { found_next = 1; - if (ret != 0) goto insert; + } slot = path->slots[0]; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 5739b8fc7fff..f8e5c47b95e4 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1546,8 +1546,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, return ret; } -static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, - size_t *write_bytes) +int btrfs_check_can_nocow(struct btrfs_inode *inode, loff_t pos, + size_t *write_bytes) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_root *root = inode->root; @@ -1568,7 +1568,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, num_bytes = lockend - lockstart + 1; ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, - NULL, NULL, NULL); + NULL, NULL, NULL, false); if (ret <= 0) { ret = 0; btrfs_end_write_no_snapshotting(root); @@ -1647,7 +1647,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, if (ret < 0) { if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) && - check_can_nocow(BTRFS_I(inode), pos, + btrfs_check_can_nocow(BTRFS_I(inode), pos, &write_bytes) > 0) { /* * For nodata cow case, no need to reserve @@ -1919,13 +1919,28 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, pos = iocb->ki_pos; count = iov_iter_count(from); if (iocb->ki_flags & IOCB_NOWAIT) { + size_t nocow_bytes = count; + /* * We will allocate space in case nodatacow is not set, * so bail */ if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) || - check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) { + btrfs_check_can_nocow(BTRFS_I(inode), pos, + &nocow_bytes) <= 0) { + inode_unlock(inode); + return -EAGAIN; + } + + /* check_can_nocow() locks the snapshot lock on success */ + btrfs_end_write_no_snapshotting(root); + /* + * There are holes in the range or parts of the range that must + * be COWed (shared extents, RO block groups, etc), so just bail + * out. + */ + if (nocow_bytes < count) { inode_unlock(inode); return -EAGAIN; } @@ -2073,6 +2088,16 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) btrfs_init_log_ctx(&ctx, inode); + /* + * Set the range to full if the NO_HOLES feature is not enabled. + * This is to avoid missing file extent items representing holes after + * replaying the log. + */ + if (!btrfs_fs_incompat(fs_info, NO_HOLES)) { + start = 0; + end = LLONG_MAX; + } + /* * We write the dirty pages in the range and wait until they complete * out of the ->i_mutex. If so, we can flush the dirty pages by @@ -2127,6 +2152,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) */ ret = start_ordered_ops(inode, start, end); if (ret) { + up_write(&BTRFS_I(inode)->dio_sem); inode_unlock(inode); goto out; } @@ -3119,14 +3145,17 @@ reserve_space: if (ret < 0) goto out; space_reserved = true; - ret = btrfs_qgroup_reserve_data(inode, &data_reserved, - alloc_start, bytes_to_reserve); - if (ret) - goto out; ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state); if (ret) goto out; + ret = btrfs_qgroup_reserve_data(inode, &data_reserved, + alloc_start, bytes_to_reserve); + if (ret) { + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, + lockend, &cached_state); + goto out; + } ret = btrfs_prealloc_file_range(inode, mode, alloc_start, alloc_end - alloc_start, i_blocksize(inode), diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d86ada9c3c54..23f59d463e24 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -744,8 +744,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, while (num_entries) { e = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); - if (!e) + if (!e) { + ret = -ENOMEM; goto free_cache; + } ret = io_ctl_read_entry(&io_ctl, e, &type); if (ret) { @@ -754,6 +756,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } if (!e->bytes) { + ret = -1; kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; } @@ -774,6 +777,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, e->bitmap = kmem_cache_zalloc( btrfs_free_space_bitmap_cachep, GFP_NOFS); if (!e->bitmap) { + ret = -ENOMEM; kmem_cache_free( btrfs_free_space_cachep, e); goto free_cache; @@ -1166,7 +1170,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root, ret = update_cache_item(trans, root, inode, path, offset, io_ctl->entries, io_ctl->bitmaps); out: - io_ctl_free(io_ctl); if (ret) { invalidate_inode_pages2(inode->i_mapping); BTRFS_I(inode)->generation = 0; @@ -1329,6 +1332,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, * them out later */ io_ctl_drop_pages(io_ctl); + io_ctl_free(io_ctl); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state); @@ -2166,7 +2170,7 @@ out: static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { - struct btrfs_free_space *left_info; + struct btrfs_free_space *left_info = NULL; struct btrfs_free_space *right_info; bool merged = false; u64 offset = info->offset; @@ -2181,7 +2185,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); - else + else if (!right_info) left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 48a03f5240f5..dfabbbfc94cc 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1149,6 +1149,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) return PTR_ERR(trans); set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); + set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); free_space_root = btrfs_create_tree(trans, BTRFS_FREE_SPACE_TREE_OBJECTID); if (IS_ERR(free_space_root)) { @@ -1170,11 +1171,18 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); + ret = btrfs_commit_transaction(trans); - return btrfs_commit_transaction(trans); + /* + * Now that we've committed the transaction any reading of our commit + * root will be safe, so we can cache from the free space tree now. + */ + clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); + return ret; abort: clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); + clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 94b0df3fb3c8..b36a4d144bec 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -49,6 +49,7 @@ #include "qgroup.h" #include "delalloc-space.h" #include "block-group.h" +#include "space-info.h" struct btrfs_iget_args { struct btrfs_key *location; @@ -640,12 +641,18 @@ cont: page_error_op | PAGE_END_WRITEBACK); - for (i = 0; i < nr_pages; i++) { - WARN_ON(pages[i]->mapping); - put_page(pages[i]); + /* + * Ensure we only free the compressed pages if we have + * them allocated, as we can still reach here with + * inode_need_compress() == false. + */ + if (pages) { + for (i = 0; i < nr_pages; i++) { + WARN_ON(pages[i]->mapping); + put_page(pages[i]); + } + kfree(pages); } - kfree(pages); - return 0; } } @@ -974,6 +981,7 @@ static noinline int cow_file_range(struct inode *inode, u64 num_bytes; unsigned long ram_size; u64 cur_alloc_size = 0; + u64 min_alloc_size; u64 blocksize = fs_info->sectorsize; struct btrfs_key ins; struct extent_map *em; @@ -1024,10 +1032,26 @@ static noinline int cow_file_range(struct inode *inode, btrfs_drop_extent_cache(BTRFS_I(inode), start, start + num_bytes - 1, 0); + /* + * Relocation relies on the relocated extents to have exactly the same + * size as the original extents. Normally writeback for relocation data + * extents follows a NOCOW path because relocation preallocates the + * extents. However, due to an operation such as scrub turning a block + * group to RO mode, it may fallback to COW mode, so we must make sure + * an extent allocated during COW has exactly the requested size and can + * not be split into smaller extents, otherwise relocation breaks and + * fails during the stage where it updates the bytenr of file extent + * items. + */ + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) + min_alloc_size = num_bytes; + else + min_alloc_size = fs_info->sectorsize; + while (num_bytes > 0) { cur_alloc_size = num_bytes; ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, - fs_info->sectorsize, 0, alloc_hint, + min_alloc_size, 0, alloc_hint, &ins, 1, 1); if (ret < 0) goto out_unlock; @@ -1132,7 +1156,7 @@ out_unlock: */ if (extent_reserved) { extent_clear_unlock_delalloc(inode, start, - start + cur_alloc_size, + start + cur_alloc_size - 1, locked_page, clear_bits, page_ops); @@ -1322,6 +1346,73 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, return 1; } +static int fallback_to_cow(struct inode *inode, struct page *locked_page, + const u64 start, const u64 end, + int *page_started, unsigned long *nr_written) +{ + const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode)); + const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid == + BTRFS_DATA_RELOC_TREE_OBJECTID); + const u64 range_bytes = end + 1 - start; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + u64 range_start = start; + u64 count; + + /* + * If EXTENT_NORESERVE is set it means that when the buffered write was + * made we had not enough available data space and therefore we did not + * reserve data space for it, since we though we could do NOCOW for the + * respective file range (either there is prealloc extent or the inode + * has the NOCOW bit set). + * + * However when we need to fallback to COW mode (because for example the + * block group for the corresponding extent was turned to RO mode by a + * scrub or relocation) we need to do the following: + * + * 1) We increment the bytes_may_use counter of the data space info. + * If COW succeeds, it allocates a new data extent and after doing + * that it decrements the space info's bytes_may_use counter and + * increments its bytes_reserved counter by the same amount (we do + * this at btrfs_add_reserved_bytes()). So we need to increment the + * bytes_may_use counter to compensate (when space is reserved at + * buffered write time, the bytes_may_use counter is incremented); + * + * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so + * that if the COW path fails for any reason, it decrements (through + * extent_clear_unlock_delalloc()) the bytes_may_use counter of the + * data space info, which we incremented in the step above. + * + * If we need to fallback to cow and the inode corresponds to a free + * space cache inode or an inode of the data relocation tree, we must + * also increment bytes_may_use of the data space_info for the same + * reason. Space caches and relocated data extents always get a prealloc + * extent for them, however scrub or balance may have set the block + * group that contains that extent to RO mode and therefore force COW + * when starting writeback. + */ + count = count_range_bits(io_tree, &range_start, end, range_bytes, + EXTENT_NORESERVE, 0); + if (count > 0 || is_space_ino || is_reloc_ino) { + u64 bytes = count; + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_space_info *sinfo = fs_info->data_sinfo; + + if (is_space_ino || is_reloc_ino) + bytes = range_bytes; + + spin_lock(&sinfo->lock); + btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); + spin_unlock(&sinfo->lock); + + if (count > 0) + clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, + 0, 0, NULL); + } + + return cow_file_range(inode, locked_page, start, end, page_started, + nr_written, 1); +} + /* * when nowcow writeback call back. This checks for snapshots or COW copies * of the extents that exist in the file, and COWs the file as required. @@ -1487,7 +1578,7 @@ next_slot: goto out_check; ret = btrfs_cross_ref_exist(root, ino, found_key.offset - - extent_offset, disk_bytenr); + extent_offset, disk_bytenr, false); if (ret) { /* * ret could be -EIO if the above fails to read @@ -1569,15 +1660,11 @@ out_check: * NOCOW, following one which needs to be COW'ed */ if (cow_start != (u64)-1) { - ret = cow_file_range(inode, locked_page, - cow_start, found_key.offset - 1, - page_started, nr_written, 1); - if (ret) { - if (nocow) - btrfs_dec_nocow_writers(fs_info, - disk_bytenr); + ret = fallback_to_cow(inode, locked_page, cow_start, + found_key.offset - 1, + page_started, nr_written); + if (ret) goto error; - } cow_start = (u64)-1; } @@ -1593,9 +1680,6 @@ out_check: ram_bytes, BTRFS_COMPRESS_NONE, BTRFS_ORDERED_PREALLOC); if (IS_ERR(em)) { - if (nocow) - btrfs_dec_nocow_writers(fs_info, - disk_bytenr); ret = PTR_ERR(em); goto error; } @@ -1660,8 +1744,8 @@ out_check: if (cow_start != (u64)-1) { cur_offset = end; - ret = cow_file_range(inode, locked_page, cow_start, end, - page_started, nr_written, 1); + ret = fallback_to_cow(inode, locked_page, cow_start, end, + page_started, nr_written); if (ret) goto error; } @@ -4250,7 +4334,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) * 1 for the inode ref * 1 for the inode */ - return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); + return btrfs_start_transaction_fallback_global_rsv(root, 5); } static int btrfs_unlink(struct inode *dir, struct dentry *dentry) @@ -4603,6 +4687,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) } } + free_anon_bdev(dest->anon_dev); + dest->anon_dev = 0; out_end_trans: trans->block_rsv = NULL; trans->bytes_reserved = 0; @@ -5047,11 +5133,13 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, struct extent_state *cached_state = NULL; struct extent_changeset *data_reserved = NULL; char *kaddr; + bool only_release_metadata = false; u32 blocksize = fs_info->sectorsize; pgoff_t index = from >> PAGE_SHIFT; unsigned offset = from & (blocksize - 1); struct page *page; gfp_t mask = btrfs_alloc_write_mask(mapping); + size_t write_bytes = blocksize; int ret = 0; u64 block_start; u64 block_end; @@ -5063,11 +5151,27 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, block_start = round_down(from, blocksize); block_end = block_start + blocksize - 1; - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, - block_start, blocksize); - if (ret) - goto out; + ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, + blocksize); + if (ret < 0) { + if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | + BTRFS_INODE_PREALLOC)) && + btrfs_check_can_nocow(BTRFS_I(inode), block_start, + &write_bytes) > 0) { + /* For nocow case, no need to reserve data space */ + only_release_metadata = true; + } else { + goto out; + } + } + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), blocksize); + if (ret < 0) { + if (!only_release_metadata) + btrfs_free_reserved_data_space(inode, data_reserved, + block_start, blocksize); + goto out; + } again: page = find_or_create_page(mapping, index, mask); if (!page) { @@ -5136,14 +5240,26 @@ again: set_page_dirty(page); unlock_extent_cached(io_tree, block_start, block_end, &cached_state); + if (only_release_metadata) + set_extent_bit(&BTRFS_I(inode)->io_tree, block_start, + block_end, EXTENT_NORESERVE, NULL, NULL, + GFP_NOFS); + out_unlock: - if (ret) - btrfs_delalloc_release_space(inode, data_reserved, block_start, - blocksize, true); + if (ret) { + if (only_release_metadata) + btrfs_delalloc_release_metadata(BTRFS_I(inode), + blocksize, true); + else + btrfs_delalloc_release_space(inode, data_reserved, + block_start, blocksize, true); + } btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); unlock_page(page); put_page(page); out: + if (only_release_metadata) + btrfs_end_write_no_snapshotting(BTRFS_I(inode)->root); extent_changeset_free(data_reserved); return ret; } @@ -7108,7 +7224,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, extent_type == BTRFS_FILE_EXTENT_PREALLOC) { /* Only regular file could have regular/prealloc extent */ if (!S_ISREG(inode->vfs_inode.i_mode)) { - ret = -EUCLEAN; + err = -EUCLEAN; btrfs_crit(fs_info, "regular/prealloc extent found for non-regular inode %llu", btrfs_ino(inode)); @@ -7443,7 +7559,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, */ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, - u64 *ram_bytes) + u64 *ram_bytes, bool strict) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_path *path; @@ -7521,8 +7637,9 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, * Do the same check as in btrfs_cross_ref_exist but without the * unnecessary search. */ - if (btrfs_file_extent_generation(leaf, fi) <= - btrfs_root_last_snapshot(&root->root_item)) + if (!strict && + (btrfs_file_extent_generation(leaf, fi) <= + btrfs_root_last_snapshot(&root->root_item))) goto out; backref_offset = btrfs_file_extent_offset(leaf, fi); @@ -7558,7 +7675,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, */ ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), - key.offset - backref_offset, disk_bytenr); + key.offset - backref_offset, disk_bytenr, + strict); if (ret) { ret = 0; goto out; @@ -7779,7 +7897,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, block_start = em->block_start + (start - em->start); if (can_nocow_extent(inode, start, &len, &orig_start, - &orig_block_len, &ram_bytes) == 1 && + &orig_block_len, &ram_bytes, false) == 1 && btrfs_inc_nocow_writers(fs_info, block_start)) { struct extent_map *em2; @@ -8498,14 +8616,64 @@ err: return ret; } -static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) +/* + * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked + * or ordered extents whether or not we submit any bios. + */ +static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, + struct inode *inode, + loff_t file_offset) { - struct inode *inode = dip->inode; - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); + struct btrfs_dio_private *dip; struct bio *bio; - struct bio *orig_bio = dip->orig_bio; - u64 start_sector = orig_bio->bi_iter.bi_sector; - u64 file_offset = dip->logical_offset; + + dip = kzalloc(sizeof(*dip), GFP_NOFS); + if (!dip) + return NULL; + + bio = btrfs_bio_clone(dio_bio); + bio->bi_private = dip; + btrfs_io_bio(bio)->logical = file_offset; + + dip->private = dio_bio->bi_private; + dip->inode = inode; + dip->logical_offset = file_offset; + dip->bytes = dio_bio->bi_iter.bi_size; + dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; + dip->orig_bio = bio; + dip->dio_bio = dio_bio; + atomic_set(&dip->pending_bios, 1); + + if (write) { + struct btrfs_dio_data *dio_data = current->journal_info; + + /* + * Setting range start and end to the same value means that + * no cleanup will happen in btrfs_direct_IO + */ + dio_data->unsubmitted_oe_range_end = dip->logical_offset + + dip->bytes; + dio_data->unsubmitted_oe_range_start = + dio_data->unsubmitted_oe_range_end; + + bio->bi_end_io = btrfs_endio_direct_write; + } else { + bio->bi_end_io = btrfs_endio_direct_read; + dip->subio_endio = btrfs_subio_endio_read; + } + return dip; +} + +static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, + loff_t file_offset) +{ + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_dio_private *dip; + struct bio *bio; + struct bio *orig_bio; + u64 start_sector; int async_submit = 0; u64 submit_len; int clone_offset = 0; @@ -8514,11 +8682,24 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) blk_status_t status; struct btrfs_io_geometry geom; + dip = btrfs_create_dio_private(dio_bio, inode, file_offset); + if (!dip) { + if (!write) { + unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, + file_offset + dio_bio->bi_iter.bi_size - 1); + } + dio_bio->bi_status = BLK_STS_RESOURCE; + dio_end_io(dio_bio); + return; + } + + orig_bio = dip->orig_bio; + start_sector = orig_bio->bi_iter.bi_sector; submit_len = orig_bio->bi_iter.bi_size; ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), start_sector << 9, submit_len, &geom); if (ret) - return -EIO; + goto out_err; if (geom.len >= submit_len) { bio = orig_bio; @@ -8534,7 +8715,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) /* bio split */ ASSERT(geom.len <= INT_MAX); - atomic_inc(&dip->pending_bios); do { clone_len = min_t(int, submit_len, geom.len); @@ -8582,9 +8762,10 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) submit: status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit); if (!status) - return 0; + return; - bio_put(bio); + if (bio != orig_bio) + bio_put(bio); out_err: dip->errors = 1; /* @@ -8595,107 +8776,6 @@ out_err: */ if (atomic_dec_and_test(&dip->pending_bios)) bio_io_error(dip->orig_bio); - - /* bio_end_io() will handle error, so we needn't return it */ - return 0; -} - -static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, - loff_t file_offset) -{ - struct btrfs_dio_private *dip = NULL; - struct bio *bio = NULL; - struct btrfs_io_bio *io_bio; - bool write = (bio_op(dio_bio) == REQ_OP_WRITE); - int ret = 0; - - bio = btrfs_bio_clone(dio_bio); - - dip = kzalloc(sizeof(*dip), GFP_NOFS); - if (!dip) { - ret = -ENOMEM; - goto free_ordered; - } - - dip->private = dio_bio->bi_private; - dip->inode = inode; - dip->logical_offset = file_offset; - dip->bytes = dio_bio->bi_iter.bi_size; - dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; - bio->bi_private = dip; - dip->orig_bio = bio; - dip->dio_bio = dio_bio; - atomic_set(&dip->pending_bios, 0); - io_bio = btrfs_io_bio(bio); - io_bio->logical = file_offset; - - if (write) { - bio->bi_end_io = btrfs_endio_direct_write; - } else { - bio->bi_end_io = btrfs_endio_direct_read; - dip->subio_endio = btrfs_subio_endio_read; - } - - /* - * Reset the range for unsubmitted ordered extents (to a 0 length range) - * even if we fail to submit a bio, because in such case we do the - * corresponding error handling below and it must not be done a second - * time by btrfs_direct_IO(). - */ - if (write) { - struct btrfs_dio_data *dio_data = current->journal_info; - - dio_data->unsubmitted_oe_range_end = dip->logical_offset + - dip->bytes; - dio_data->unsubmitted_oe_range_start = - dio_data->unsubmitted_oe_range_end; - } - - ret = btrfs_submit_direct_hook(dip); - if (!ret) - return; - - btrfs_io_bio_free_csum(io_bio); - -free_ordered: - /* - * If we arrived here it means either we failed to submit the dip - * or we either failed to clone the dio_bio or failed to allocate the - * dip. If we cloned the dio_bio and allocated the dip, we can just - * call bio_endio against our io_bio so that we get proper resource - * cleanup if we fail to submit the dip, otherwise, we must do the - * same as btrfs_endio_direct_[write|read] because we can't call these - * callbacks - they require an allocated dip and a clone of dio_bio. - */ - if (bio && dip) { - bio_io_error(bio); - /* - * The end io callbacks free our dip, do the final put on bio - * and all the cleanup and final put for dio_bio (through - * dio_end_io()). - */ - dip = NULL; - bio = NULL; - } else { - if (write) - __endio_write_update_ordered(inode, - file_offset, - dio_bio->bi_iter.bi_size, - false); - else - unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, - file_offset + dio_bio->bi_iter.bi_size - 1); - - dio_bio->bi_status = BLK_STS_IOERR; - /* - * Releases and cleans up our dio_bio, no need to bio_put() - * nor bio_endio()/bio_io_error() against dio_bio. - */ - dio_end_io(dio_bio); - } - if (bio) - bio_put(bio); - kfree(dip); } static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, @@ -8772,9 +8852,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.overwrite = 1; inode_unlock(inode); relock = true; - } else if (iocb->ki_flags & IOCB_NOWAIT) { - ret = -EAGAIN; - goto out; } ret = btrfs_delalloc_reserve_space(inode, &data_reserved, offset, count); @@ -8997,20 +9074,17 @@ again: /* * Qgroup reserved space handler * Page here will be either - * 1) Already written to disk - * In this case, its reserved space is released from data rsv map - * and will be freed by delayed_ref handler finally. - * So even we call qgroup_free_data(), it won't decrease reserved - * space. - * 2) Not written to disk - * This means the reserved space should be freed here. However, - * if a truncate invalidates the page (by clearing PageDirty) - * and the page is accounted for while allocating extent - * in btrfs_check_data_free_space() we let delayed_ref to - * free the entire extent. + * 1) Already written to disk or ordered extent already submitted + * Then its QGROUP_RESERVED bit in io_tree is already cleaned. + * Qgroup will be handled by its qgroup_record then. + * btrfs_qgroup_free_data() call will do nothing here. + * + * 2) Not written to disk yet + * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED + * bit of its io_tree, and free the qgroup reserved data space. + * Since the IO will never happen for this page. */ - if (PageDirty(page)) - btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); + btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | @@ -9485,7 +9559,7 @@ void btrfs_destroy_inode(struct inode *inode) btrfs_put_ordered_extent(ordered); } } - btrfs_qgroup_check_reserved_leak(inode); + btrfs_qgroup_check_reserved_leak(BTRFS_I(inode)); inode_tree_del(inode); btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); } @@ -9554,7 +9628,7 @@ int __init btrfs_init_cachep(void) btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", PAGE_SIZE, PAGE_SIZE, - SLAB_RED_ZONE, NULL); + SLAB_MEM_SPREAD, NULL); if (!btrfs_free_space_bitmap_cachep) goto fail; @@ -10947,7 +11021,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, free_extent_map(em); em = NULL; - ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL); + ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true); if (ret < 0) { goto out; } else if (ret) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d88b8d8897cc..e9d3eb7f0e2b 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -167,8 +167,11 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg) return 0; } -/* Check if @flags are a supported and valid set of FS_*_FL flags */ -static int check_fsflags(unsigned int flags) +/* + * Check if @flags are a supported and valid set of FS_*_FL flags and that + * the old and new flags are not conflicting + */ +static int check_fsflags(unsigned int old_flags, unsigned int flags) { if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ @@ -177,9 +180,19 @@ static int check_fsflags(unsigned int flags) FS_NOCOW_FL)) return -EOPNOTSUPP; + /* COMPR and NOCOMP on new/old are valid */ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; + if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL)) + return -EINVAL; + + /* NOCOW and compression options are mutually exclusive */ + if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + return 0; } @@ -193,7 +206,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) unsigned int fsflags, old_fsflags; int ret; const char *comp = NULL; - u32 binode_flags = binode->flags; + u32 binode_flags; if (!inode_owner_or_capable(inode)) return -EPERM; @@ -204,22 +217,23 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) if (copy_from_user(&fsflags, arg, sizeof(fsflags))) return -EFAULT; - ret = check_fsflags(fsflags); - if (ret) - return ret; - ret = mnt_want_write_file(file); if (ret) return ret; inode_lock(inode); - fsflags = btrfs_mask_fsflags_for_type(inode, fsflags); old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags); + ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); if (ret) goto out_unlock; + ret = check_fsflags(old_fsflags, fsflags); + if (ret) + goto out_unlock; + + binode_flags = binode->flags; if (fsflags & FS_SYNC_FL) binode_flags |= BTRFS_INODE_SYNC; else @@ -653,8 +667,6 @@ static noinline int create_subvol(struct inode *dir, btrfs_set_root_otransid(root_item, trans->transid); btrfs_tree_unlock(leaf); - free_extent_buffer(leaf); - leaf = NULL; btrfs_set_root_dirid(root_item, new_dirid); @@ -663,8 +675,22 @@ static noinline int create_subvol(struct inode *dir, key.type = BTRFS_ROOT_ITEM_KEY; ret = btrfs_insert_root(trans, fs_info->tree_root, &key, root_item); - if (ret) + if (ret) { + /* + * Since we don't abort the transaction in this case, free the + * tree block so that we don't leak space and leave the + * filesystem in an inconsistent state (an extent item in the + * extent tree without backreferences). Also no need to have + * the tree block locked since it is not in any tree at this + * point, so no other task can find it and use it. + */ + btrfs_free_tree_block(trans, root, leaf, 0, 1); + free_extent_buffer(leaf); goto fail; + } + + free_extent_buffer(leaf); + leaf = NULL; key.offset = (u64)-1; new_root = btrfs_read_fs_root_no_name(fs_info, &key); @@ -1241,6 +1267,8 @@ static int cluster_pages_for_defrag(struct inode *inode, u64 page_start; u64 page_end; u64 page_cnt; + u64 start = (u64)start_index << PAGE_SHIFT; + u64 search_start; int ret; int i; int i_done; @@ -1257,8 +1285,7 @@ static int cluster_pages_for_defrag(struct inode *inode, page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); ret = btrfs_delalloc_reserve_space(inode, &data_reserved, - start_index << PAGE_SHIFT, - page_cnt << PAGE_SHIFT); + start, page_cnt << PAGE_SHIFT); if (ret) return ret; i_done = 0; @@ -1338,6 +1365,40 @@ again: lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); + + /* + * When defragmenting we skip ranges that have holes or inline extents, + * (check should_defrag_range()), to avoid unnecessary IO and wasting + * space. At btrfs_defrag_file(), we check if a range should be defragged + * before locking the inode and then, if it should, we trigger a sync + * page cache readahead - we lock the inode only after that to avoid + * blocking for too long other tasks that possibly want to operate on + * other file ranges. But before we were able to get the inode lock, + * some other task may have punched a hole in the range, or we may have + * now an inline extent, in which case we should not defrag. So check + * for that here, where we have the inode and the range locked, and bail + * out if that happened. + */ + search_start = page_start; + while (search_start < page_end) { + struct extent_map *em; + + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start, + page_end - search_start, 0); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto out_unlock_range; + } + if (em->block_start >= EXTENT_MAP_LAST_BYTE) { + free_extent_map(em); + /* Ok, 0 means we did not defrag anything */ + ret = 0; + goto out_unlock_range; + } + search_start = extent_map_end(em); + free_extent_map(em); + } + clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, &cached_state); @@ -1347,8 +1408,7 @@ again: btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); spin_unlock(&BTRFS_I(inode)->lock); btrfs_delalloc_release_space(inode, data_reserved, - start_index << PAGE_SHIFT, - (page_cnt - i_done) << PAGE_SHIFT, true); + start, (page_cnt - i_done) << PAGE_SHIFT, true); } @@ -1369,14 +1429,17 @@ again: btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); extent_changeset_free(data_reserved); return i_done; + +out_unlock_range: + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, &cached_state); out: for (i = 0; i < i_done; i++) { unlock_page(pages[i]); put_page(pages[i]); } btrfs_delalloc_release_space(inode, data_reserved, - start_index << PAGE_SHIFT, - page_cnt << PAGE_SHIFT, true); + start, page_cnt << PAGE_SHIFT, true); btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); extent_changeset_free(data_reserved); return ret; @@ -1856,7 +1919,10 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, if (vol_args->flags & BTRFS_SUBVOL_RDONLY) readonly = true; if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { - if (vol_args->size > PAGE_SIZE) { + u64 nums; + + if (vol_args->size < sizeof(*inherit) || + vol_args->size > PAGE_SIZE) { ret = -EINVAL; goto free_args; } @@ -1865,6 +1931,20 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, ret = PTR_ERR(inherit); goto free_args; } + + if (inherit->num_qgroups > PAGE_SIZE || + inherit->num_ref_copies > PAGE_SIZE || + inherit->num_excl_copies > PAGE_SIZE) { + ret = -EINVAL; + goto free_inherit; + } + + nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + + 2 * inherit->num_excl_copies; + if (vol_args->size != struct_size(inherit, qgroups, nums)) { + ret = -EINVAL; + goto free_inherit; + } } ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, @@ -2091,9 +2171,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, sh.len = item_len; sh.transid = found_transid; - /* copy search result header */ - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { - ret = -EFAULT; + /* + * Copy search result header. If we fault then loop again so we + * can fault in the pages and -EFAULT there if there's a + * problem. Otherwise we'll fault and then copy the buffer in + * properly this next time through + */ + if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) { + ret = 0; goto out; } @@ -2101,10 +2186,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, if (item_len) { char __user *up = ubuf + *sk_offset; - /* copy the item */ - if (read_extent_buffer_to_user(leaf, up, - item_off, item_len)) { - ret = -EFAULT; + /* + * Copy the item, same behavior as above, but reset the + * * sk_offset so we copy the full thing again. + */ + if (read_extent_buffer_to_user_nofault(leaf, up, + item_off, item_len)) { + ret = 0; + *sk_offset -= sizeof(sh); goto out; } @@ -2192,6 +2281,11 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { + ret = fault_in_pages_writeable(ubuf + sk_offset, + *buf_size - sk_offset); + if (ret) + break; + ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret != 0) { if (ret > 0) @@ -3724,6 +3818,8 @@ process_slot: ret = -EINTR; goto out; } + + cond_resched(); } ret = 0; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 9cb50577d982..f4edadf1067f 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * offset is supposed to be a tree block which * must be aligned to nodesize. */ - if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + if (!IS_ALIGNED(offset, eb->fs_info->sectorsize)) + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; case BTRFS_EXTENT_DATA_REF_KEY: dref = (struct btrfs_extent_data_ref *)(&iref->offset); @@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * must be aligned to nodesize. */ if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; default: pr_cont("(extent %llu has INVALID ref type %d)\n", diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 286c8c11c8d3..cd8e81c02f63 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -488,13 +488,13 @@ next2: break; } out: + btrfs_free_path(path); fs_info->qgroup_flags |= flags; if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && ret >= 0) ret = qgroup_rescan_init(fs_info, rescan_progress, 0); - btrfs_free_path(path); if (ret < 0) { ulist_free(fs_info->qgroup_ulist); @@ -1030,6 +1030,7 @@ out_add_root: ret = qgroup_rescan_init(fs_info, 0, 1); if (!ret) { qgroup_rescan_zero_tracking(fs_info); + fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); } @@ -2635,6 +2636,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, struct btrfs_root *quota_root; struct btrfs_qgroup *srcgroup; struct btrfs_qgroup *dstgroup; + bool need_rescan = false; u32 level_size = 0; u64 nums; @@ -2778,6 +2780,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, goto unlock; } ++i_qgroups; + + /* + * If we're doing a snapshot, and adding the snapshot to a new + * qgroup, the numbers are guaranteed to be incorrect. + */ + if (srcid) + need_rescan = true; } for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { @@ -2797,6 +2806,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dst->rfer = src->rfer - level_size; dst->rfer_cmpr = src->rfer_cmpr - level_size; + + /* Manually tweaking numbers certainly needs a rescan */ + need_rescan = true; } for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { struct btrfs_qgroup *src; @@ -2815,6 +2827,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dst->excl = src->excl + level_size; dst->excl_cmpr = src->excl_cmpr + level_size; + need_rescan = true; } unlock: @@ -2822,6 +2835,8 @@ unlock: out: if (!committing) mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (need_rescan) + fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; return ret; } @@ -3129,6 +3144,12 @@ out: return ret; } +static bool rescan_should_stop(struct btrfs_fs_info *fs_info) +{ + return btrfs_fs_closing(fs_info) || + test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); +} + static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) { struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, @@ -3137,6 +3158,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) struct btrfs_trans_handle *trans = NULL; int err = -ENOMEM; int ret = 0; + bool stopped = false; path = btrfs_alloc_path(); if (!path) @@ -3149,7 +3171,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) path->skip_locking = 1; err = 0; - while (!err && !btrfs_fs_closing(fs_info)) { + while (!err && !(stopped = rescan_should_stop(fs_info))) { trans = btrfs_start_transaction(fs_info->fs_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); @@ -3192,7 +3214,7 @@ out: } mutex_lock(&fs_info->qgroup_rescan_lock); - if (!btrfs_fs_closing(fs_info)) + if (!stopped) fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; if (trans) { ret = update_qgroup_status_item(trans); @@ -3211,7 +3233,7 @@ out: btrfs_end_transaction(trans); - if (btrfs_fs_closing(fs_info)) { + if (stopped) { btrfs_info(fs_info, "qgroup scan paused"); } else if (err >= 0) { btrfs_info(fs_info, "qgroup scan completed%s", @@ -3276,7 +3298,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, sizeof(fs_info->qgroup_rescan_progress)); fs_info->qgroup_rescan_progress.objectid = progress_objectid; init_completion(&fs_info->qgroup_rescan_completion); - fs_info->qgroup_rescan_running = true; spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); @@ -3341,8 +3362,11 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) qgroup_rescan_zero_tracking(fs_info); + mutex_lock(&fs_info->qgroup_rescan_lock); + fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); + mutex_unlock(&fs_info->qgroup_rescan_lock); return 0; } @@ -3378,9 +3402,13 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) { - if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { + mutex_lock(&fs_info->qgroup_rescan_lock); + fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); + mutex_unlock(&fs_info->qgroup_rescan_lock); + } } /* @@ -3748,7 +3776,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) * Check qgroup reserved space leaking, normally at destroy inode * time */ -void btrfs_qgroup_check_reserved_leak(struct inode *inode) +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) { struct extent_changeset changeset; struct ulist_node *unode; @@ -3756,19 +3784,19 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) int ret; extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, EXTENT_QGROUP_RESERVED, &changeset); WARN_ON(ret < 0); if (WARN_ON(changeset.bytes_changed)) { ULIST_ITER_INIT(&iter); while ((unode = ulist_next(&changeset.range_changed, &iter))) { - btrfs_warn(BTRFS_I(inode)->root->fs_info, - "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", - inode->i_ino, unode->val, unode->aux); + btrfs_warn(inode->root->fs_info, + "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", + btrfs_ino(inode), unode->val, unode->aux); } - btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, - BTRFS_I(inode)->root->root_key.objectid, + btrfs_qgroup_free_refroot(inode->root->fs_info, + inode->root->root_key.objectid, changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); } diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 17e8ac992c50..b0420c4f5d0e 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -399,7 +399,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root); */ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes); -void btrfs_qgroup_check_reserved_leak(struct inode *inode); +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode); /* btrfs_qgroup_swapped_blocks related functions */ void btrfs_qgroup_init_swapped_blocks( diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 8f47a85944eb..7ac679ed2b6c 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -1198,22 +1198,19 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) int nr_data = rbio->nr_data; int stripe; int pagenr; - int p_stripe = -1; - int q_stripe = -1; + bool has_qstripe; struct bio_list bio_list; struct bio *bio; int ret; bio_list_init(&bio_list); - if (rbio->real_stripes - rbio->nr_data == 1) { - p_stripe = rbio->real_stripes - 1; - } else if (rbio->real_stripes - rbio->nr_data == 2) { - p_stripe = rbio->real_stripes - 2; - q_stripe = rbio->real_stripes - 1; - } else { + if (rbio->real_stripes - rbio->nr_data == 1) + has_qstripe = false; + else if (rbio->real_stripes - rbio->nr_data == 2) + has_qstripe = true; + else BUG(); - } /* at this point we either have a full stripe, * or we've read the full stripe from the drive. @@ -1257,7 +1254,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) SetPageUptodate(p); pointers[stripe++] = kmap(p); - if (q_stripe != -1) { + if (has_qstripe) { /* * raid6, add the qstripe and call the @@ -2355,8 +2352,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, int nr_data = rbio->nr_data; int stripe; int pagenr; - int p_stripe = -1; - int q_stripe = -1; + bool has_qstripe; struct page *p_page = NULL; struct page *q_page = NULL; struct bio_list bio_list; @@ -2366,14 +2362,12 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, bio_list_init(&bio_list); - if (rbio->real_stripes - rbio->nr_data == 1) { - p_stripe = rbio->real_stripes - 1; - } else if (rbio->real_stripes - rbio->nr_data == 2) { - p_stripe = rbio->real_stripes - 2; - q_stripe = rbio->real_stripes - 1; - } else { + if (rbio->real_stripes - rbio->nr_data == 1) + has_qstripe = false; + else if (rbio->real_stripes - rbio->nr_data == 2) + has_qstripe = true; + else BUG(); - } if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { is_replace = 1; @@ -2395,17 +2389,22 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, goto cleanup; SetPageUptodate(p_page); - if (q_stripe != -1) { + if (has_qstripe) { + /* RAID6, allocate and map temp space for the Q stripe */ q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!q_page) { __free_page(p_page); goto cleanup; } SetPageUptodate(q_page); + pointers[rbio->real_stripes - 1] = kmap(q_page); } atomic_set(&rbio->error, 0); + /* Map the parity stripe just once */ + pointers[nr_data] = kmap(p_page); + for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { struct page *p; void *parity; @@ -2415,17 +2414,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, pointers[stripe] = kmap(p); } - /* then add the parity stripe */ - pointers[stripe++] = kmap(p_page); - - if (q_stripe != -1) { - - /* - * raid6, add the qstripe and call the - * library function to fill in our p/q - */ - pointers[stripe++] = kmap(q_page); - + if (has_qstripe) { + /* RAID6, call the library function to fill in our P/Q */ raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, pointers); } else { @@ -2446,12 +2436,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, for (stripe = 0; stripe < nr_data; stripe++) kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); - kunmap(p_page); } + kunmap(p_page); __free_page(p_page); - if (q_page) + if (q_page) { + kunmap(q_page); __free_page(q_page); + } writeback: /* diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 1feaeadc8cf5..2656dc8de99c 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -421,6 +421,9 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, if (!dev->bdev) continue; + if (test_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state)) + continue; + if (dev_replace_is_ongoing && dev == fs_info->dev_replace.tgtdev) { /* @@ -445,6 +448,8 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, } have_zone = 1; } + if (!have_zone) + radix_tree_delete(&fs_info->reada_tree, index); spin_unlock(&fs_info->reada_lock); up_read(&fs_info->dev_replace.rwsem); @@ -1012,3 +1017,45 @@ void btrfs_reada_detach(void *handle) kref_put(&rc->refcnt, reada_control_release); } + +/* + * Before removing a device (device replace or device remove ioctls), call this + * function to wait for all existing readahead requests on the device and to + * make sure no one queues more readahead requests for the device. + * + * Must be called without holding neither the device list mutex nor the device + * replace semaphore, otherwise it will deadlock. + */ +void btrfs_reada_remove_dev(struct btrfs_device *dev) +{ + struct btrfs_fs_info *fs_info = dev->fs_info; + + /* Serialize with readahead extent creation at reada_find_extent(). */ + spin_lock(&fs_info->reada_lock); + set_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state); + spin_unlock(&fs_info->reada_lock); + + /* + * There might be readahead requests added to the radix trees which + * were not yet added to the readahead work queue. We need to start + * them and wait for their completion, otherwise we can end up with + * use-after-free problems when dropping the last reference on the + * readahead extents and their zones, as they need to access the + * device structure. + */ + reada_start_machine(fs_info); + btrfs_flush_workqueue(fs_info->readahead_workers); +} + +/* + * If when removing a device (device replace or device remove ioctls) an error + * happens after calling btrfs_reada_remove_dev(), call this to undo what that + * function did. This is safe to call even if btrfs_reada_remove_dev() was not + * called before. + */ +void btrfs_reada_undo_remove_dev(struct btrfs_device *dev) +{ + spin_lock(&dev->fs_info->reada_lock); + clear_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state); + spin_unlock(&dev->fs_info->reada_lock); +} diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index 454a1015d026..bbd63535965c 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -286,6 +286,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info, exist_re = insert_root_entry(&exist->roots, re); if (exist_re) kfree(re); + } else { + kfree(re); } kfree(be); return exist; @@ -849,6 +851,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, "dropping a ref for a root that doesn't have a ref on the block"); dump_block_entry(fs_info, be); dump_ref_action(fs_info, ra); + kfree(ref); kfree(ra); goto out_unlock; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index bc1d7f144ace..ba68b0b41dff 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -561,8 +561,8 @@ static int should_ignore_root(struct btrfs_root *root) if (!reloc_root) return 0; - if (btrfs_root_last_snapshot(&reloc_root->root_item) == - root->fs_info->running_transaction->transid - 1) + if (btrfs_header_generation(reloc_root->commit_root) == + root->fs_info->running_transaction->transid) return 0; /* * if there is reloc tree and it was created in previous @@ -1186,7 +1186,7 @@ out: free_backref_node(cache, lower); } - free_backref_node(cache, node); + remove_backref_node(cache, node); return ERR_PTR(err); } ASSERT(!node || !node->detached); @@ -1298,7 +1298,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) if (!node) return -ENOMEM; - node->bytenr = root->node->start; + node->bytenr = root->commit_root->start; node->data = root; spin_lock(&rc->reloc_root_tree.lock); @@ -1329,15 +1329,14 @@ static void __del_reloc_root(struct btrfs_root *root) if (rc && root->node) { spin_lock(&rc->reloc_root_tree.lock); rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->node->start); + root->commit_root->start); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); + RB_CLEAR_NODE(&node->rb_node); } spin_unlock(&rc->reloc_root_tree.lock); - if (!node) - return; - BUG_ON((struct btrfs_root *)node->data != root); + ASSERT(!node || (struct btrfs_root *)node->data == root); } spin_lock(&fs_info->trans_lock); @@ -1350,7 +1349,7 @@ static void __del_reloc_root(struct btrfs_root *root) * helper to update the 'address of tree root -> reloc tree' * mapping */ -static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) +static int __update_reloc_root(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct rb_node *rb_node; @@ -1359,7 +1358,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) spin_lock(&rc->reloc_root_tree.lock); rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->node->start); + root->commit_root->start); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); @@ -1371,7 +1370,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) BUG_ON((struct btrfs_root *)node->data != root); spin_lock(&rc->reloc_root_tree.lock); - node->bytenr = new_bytenr; + node->bytenr = root->node->start; rb_node = tree_insert(&rc->reloc_root_tree.rb_root, node->bytenr, &node->rb_node); spin_unlock(&rc->reloc_root_tree.lock); @@ -1467,6 +1466,9 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, int clear_rsv = 0; int ret; + if (!rc) + return 0; + /* * The subvolume has reloc tree but the swap is finished, no need to * create/update the dead reloc tree @@ -1474,13 +1476,25 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, if (reloc_root_is_dead(root)) return 0; + /* + * This is subtle but important. We do not do + * record_root_in_transaction for reloc roots, instead we record their + * corresponding fs root, and then here we update the last trans for the + * reloc root. This means that we have to do this for the entire life + * of the reloc root, regardless of which stage of the relocation we are + * in. + */ if (root->reloc_root) { reloc_root = root->reloc_root; reloc_root->last_trans = trans->transid; return 0; } - if (!rc || !rc->create_reloc_tree || + /* + * We are merging reloc roots, we do not need new reloc trees. Also + * reloc trees never need their own reloc tree. + */ + if (!rc->create_reloc_tree || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) return 0; @@ -1529,6 +1543,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, } if (reloc_root->commit_root != reloc_root->node) { + __update_reloc_root(reloc_root); btrfs_set_root_node(root_item, reloc_root->node); free_extent_buffer(reloc_root->commit_root); reloc_root->commit_root = btrfs_root_node(reloc_root); @@ -1821,8 +1836,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, int ret; int slot; - BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); - BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); + ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); + ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); last_snapshot = btrfs_root_last_snapshot(&src->root_item); again: @@ -1856,7 +1871,7 @@ again: struct btrfs_key first_key; level = btrfs_header_level(parent); - BUG_ON(level < lowest_level); + ASSERT(level >= lowest_level); ret = btrfs_bin_search(parent, &key, level, &slot); if (ret < 0) @@ -2270,6 +2285,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, struct btrfs_root_item *root_item; struct btrfs_path *path; struct extent_buffer *leaf; + int reserve_level; int level; int max_level; int replaced = 0; @@ -2310,12 +2326,21 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, btrfs_unlock_up_safe(path, 0); } - min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; + /* + * In merge_reloc_root(), we modify the upper level pointer to swap the + * tree blocks between reloc tree and subvolume tree. Thus for tree + * block COW, we COW at most from level 1 to root level for each tree. + * + * Thus the needed metadata size is at most root_level * nodesize, + * and * 2 since we have two trees to COW. + */ + reserve_level = max_t(int, 1, btrfs_root_level(root_item)); + min_reserved = fs_info->nodesize * reserve_level * 2; memset(&next_key, 0, sizeof(next_key)); while (1) { ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, - BTRFS_RESERVE_FLUSH_ALL); + BTRFS_RESERVE_FLUSH_LIMIT); if (ret) { err = ret; goto out; @@ -2326,6 +2351,18 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, trans = NULL; goto out; } + + /* + * At this point we no longer have a reloc_control, so we can't + * depend on btrfs_init_reloc_root to update our last_trans. + * + * But that's ok, we started the trans handle on our + * corresponding fs_root, which means it's been added to the + * dirty list. At commit time we'll still call + * btrfs_update_reloc_root() and update our root item + * appropriately. + */ + reloc_root->last_trans = trans->transid; trans->block_rsv = rc->block_rsv; replaced = 0; @@ -2523,12 +2560,10 @@ again: reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); + root = read_fs_root(fs_info, reloc_root->root_key.offset); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - root = read_fs_root(fs_info, - reloc_root->root_key.offset); BUG_ON(IS_ERR(root)); BUG_ON(root->reloc_root != reloc_root); - ret = merge_reloc_root(rc, root); if (ret) { if (list_empty(&reloc_root->root_list)) @@ -2537,6 +2572,13 @@ again: goto out; } } else { + if (!IS_ERR(root)) { + if (root->reloc_root == reloc_root) + root->reloc_root = NULL; + clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, + &root->state); + } + list_del_init(&reloc_root->root_list); /* Don't forget to queue this reloc root for cleanup */ list_add_tail(&reloc_root->reloc_dirty_list, @@ -2562,7 +2604,21 @@ out: free_reloc_roots(&reloc_roots); } - BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); + /* + * We used to have + * + * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); + * + * here, but it's wrong. If we fail to start the transaction in + * prepare_to_merge() we will have only 0 ref reloc roots, none of which + * have actually been removed from the reloc_root_tree rb tree. This is + * fine because we're bailing here, and we hold a reference on the root + * for the list that holds it, so these roots will be cleaned up when we + * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root + * will be cleaned up on unmount. + * + * The remaining nodes will be cleaned up by free_reloc_control. + */ } static void free_block_list(struct rb_root *blocks) @@ -3162,9 +3218,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, ret = relocate_tree_block(trans, rc, node, &block->key, path); if (ret < 0) { - if (ret != -EAGAIN || &block->rb_node == rb_first(blocks)) - err = ret; - goto out; + err = ret; + break; } } out: @@ -4140,12 +4195,6 @@ restart: if (!RB_EMPTY_ROOT(&blocks)) { ret = relocate_tree_blocks(trans, rc, &blocks); if (ret < 0) { - /* - * if we fail to relocate tree blocks, force to update - * backref cache when committing transaction. - */ - rc->backref_cache.last_trans = trans->transid - 1; - if (ret != -EAGAIN) { err = ret; break; @@ -4215,10 +4264,10 @@ restart: goto out_free; } btrfs_commit_transaction(trans); +out_free: ret = clean_dirty_subvols(rc); if (ret < 0 && !err) err = ret; -out_free: btrfs_free_block_rsv(fs_info, rc->block_rsv); btrfs_free_path(path); return err; @@ -4320,6 +4369,18 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) return rc; } +static void free_reloc_control(struct reloc_control *rc) +{ + struct mapping_node *node, *tmp; + + free_reloc_roots(&rc->reloc_roots); + rbtree_postorder_for_each_entry_safe(node, tmp, + &rc->reloc_root_tree.rb_root, rb_node) + kfree(node); + + kfree(rc); +} + /* * Print the block group being relocated */ @@ -4367,7 +4428,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) rc->extent_root = extent_root; rc->block_group = bg; - ret = btrfs_inc_block_group_ro(rc->block_group); + ret = btrfs_inc_block_group_ro(rc->block_group, true); if (ret) { err = ret; goto out; @@ -4452,7 +4513,7 @@ out: btrfs_dec_block_group_ro(rc->block_group); iput(rc->data_inode); btrfs_put_block_group(rc->block_group); - kfree(rc); + free_reloc_control(rc); return err; } @@ -4575,9 +4636,8 @@ int btrfs_recover_relocation(struct btrfs_root *root) trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { - unset_reloc_control(rc); err = PTR_ERR(trans); - goto out_free; + goto out_unset; } rc->merge_reloc_tree = 1; @@ -4597,7 +4657,8 @@ int btrfs_recover_relocation(struct btrfs_root *root) if (IS_ERR(fs_root)) { err = PTR_ERR(fs_root); list_add_tail(&reloc_root->root_list, &reloc_roots); - goto out_free; + btrfs_end_transaction(trans); + goto out_unset; } err = __add_reloc_root(reloc_root); @@ -4607,7 +4668,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) err = btrfs_commit_transaction(trans); if (err) - goto out_free; + goto out_unset; merge_reloc_roots(rc); @@ -4616,15 +4677,16 @@ int btrfs_recover_relocation(struct btrfs_root *root) trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { err = PTR_ERR(trans); - goto out_free; + goto out_clean; } err = btrfs_commit_transaction(trans); - +out_clean: ret = clean_dirty_subvols(rc); if (ret < 0 && !err) err = ret; -out_free: - kfree(rc); +out_unset: + unset_reloc_control(rc); + free_reloc_control(rc); out: if (!list_empty(&reloc_roots)) free_reloc_roots(&reloc_roots); @@ -4711,11 +4773,6 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, BUG_ON(rc->stage == UPDATE_DATA_PTRS && root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { - if (buf == root->node) - __update_reloc_root(root, cow->start); - } - level = btrfs_header_level(buf); if (btrfs_header_generation(buf) <= btrfs_root_last_snapshot(&root->root_item)) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a7b043fd7a57..e5db948daa12 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3560,7 +3560,26 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); - ret = btrfs_inc_block_group_ro(cache); + + /* + * Don't do chunk preallocation for scrub. + * + * This is especially important for SYSTEM bgs, or we can hit + * -EFBIG from btrfs_finish_chunk_alloc() like: + * 1. The only SYSTEM bg is marked RO. + * Since SYSTEM bg is small, that's pretty common. + * 2. New SYSTEM bg will be allocated + * Due to regular version will allocate new chunk. + * 3. New SYSTEM bg is empty and will get cleaned up + * Before cleanup really happens, it's marked RO again. + * 4. Empty SYSTEM bg get scrubbed + * We go back to 2. + * + * This can easily boost the amount of SYSTEM chunks if cleaner + * thread can't be triggered fast enough, and use up all space + * of btrfs_super_block::sys_chunk_array + */ + ret = btrfs_inc_block_group_ro(cache, false); if (!ret && sctx->is_dev_replace) { /* * If we are doing a device replace wait for any tasks @@ -3717,7 +3736,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_fs_info *fs_info = sctx->fs_info; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) - return -EIO; + return -EROFS; /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) @@ -3742,50 +3761,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, return 0; } +static void scrub_workers_put(struct btrfs_fs_info *fs_info) +{ + if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, + &fs_info->scrub_lock)) { + struct btrfs_workqueue *scrub_workers = NULL; + struct btrfs_workqueue *scrub_wr_comp = NULL; + struct btrfs_workqueue *scrub_parity = NULL; + + scrub_workers = fs_info->scrub_workers; + scrub_wr_comp = fs_info->scrub_wr_completion_workers; + scrub_parity = fs_info->scrub_parity_workers; + + fs_info->scrub_workers = NULL; + fs_info->scrub_wr_completion_workers = NULL; + fs_info->scrub_parity_workers = NULL; + mutex_unlock(&fs_info->scrub_lock); + + btrfs_destroy_workqueue(scrub_workers); + btrfs_destroy_workqueue(scrub_wr_comp); + btrfs_destroy_workqueue(scrub_parity); + } +} + /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { + struct btrfs_workqueue *scrub_workers = NULL; + struct btrfs_workqueue *scrub_wr_comp = NULL; + struct btrfs_workqueue *scrub_parity = NULL; unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; + int ret = -ENOMEM; - lockdep_assert_held(&fs_info->scrub_lock); + if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) + return 0; + scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, + is_dev_replace ? 1 : max_active, 4); + if (!scrub_workers) + goto fail_scrub_workers; + + scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, + max_active, 2); + if (!scrub_wr_comp) + goto fail_scrub_wr_completion_workers; + + scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, + max_active, 2); + if (!scrub_parity) + goto fail_scrub_parity_workers; + + mutex_lock(&fs_info->scrub_lock); if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { - ASSERT(fs_info->scrub_workers == NULL); - fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", - flags, is_dev_replace ? 1 : max_active, 4); - if (!fs_info->scrub_workers) - goto fail_scrub_workers; - - ASSERT(fs_info->scrub_wr_completion_workers == NULL); - fs_info->scrub_wr_completion_workers = - btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, - max_active, 2); - if (!fs_info->scrub_wr_completion_workers) - goto fail_scrub_wr_completion_workers; - - ASSERT(fs_info->scrub_parity_workers == NULL); - fs_info->scrub_parity_workers = - btrfs_alloc_workqueue(fs_info, "scrubparity", flags, - max_active, 2); - if (!fs_info->scrub_parity_workers) - goto fail_scrub_parity_workers; - + ASSERT(fs_info->scrub_workers == NULL && + fs_info->scrub_wr_completion_workers == NULL && + fs_info->scrub_parity_workers == NULL); + fs_info->scrub_workers = scrub_workers; + fs_info->scrub_wr_completion_workers = scrub_wr_comp; + fs_info->scrub_parity_workers = scrub_parity; refcount_set(&fs_info->scrub_workers_refcnt, 1); - } else { - refcount_inc(&fs_info->scrub_workers_refcnt); + mutex_unlock(&fs_info->scrub_lock); + return 0; } - return 0; + /* Other thread raced in and created the workers for us */ + refcount_inc(&fs_info->scrub_workers_refcnt); + mutex_unlock(&fs_info->scrub_lock); + ret = 0; + btrfs_destroy_workqueue(scrub_parity); fail_scrub_parity_workers: - btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); + btrfs_destroy_workqueue(scrub_wr_comp); fail_scrub_wr_completion_workers: - btrfs_destroy_workqueue(fs_info->scrub_workers); + btrfs_destroy_workqueue(scrub_workers); fail_scrub_workers: - return -ENOMEM; + return ret; } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, @@ -3796,9 +3849,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, int ret; struct btrfs_device *dev; unsigned int nofs_flag; - struct btrfs_workqueue *scrub_workers = NULL; - struct btrfs_workqueue *scrub_wr_comp = NULL; - struct btrfs_workqueue *scrub_parity = NULL; if (btrfs_fs_closing(fs_info)) return -EAGAIN; @@ -3845,13 +3895,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, if (IS_ERR(sctx)) return PTR_ERR(sctx); + ret = scrub_workers_get(fs_info, is_dev_replace); + if (ret) + goto out_free_ctx; + mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; - goto out_free_ctx; + goto out; } if (!is_dev_replace && !readonly && @@ -3860,7 +3914,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; - goto out_free_ctx; + goto out; } mutex_lock(&fs_info->scrub_lock); @@ -3869,7 +3923,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; - goto out_free_ctx; + goto out; } down_read(&fs_info->dev_replace.rwsem); @@ -3880,17 +3934,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; - goto out_free_ctx; + goto out; } up_read(&fs_info->dev_replace.rwsem); - ret = scrub_workers_get(fs_info, is_dev_replace); - if (ret) { - mutex_unlock(&fs_info->scrub_lock); - mutex_unlock(&fs_info->fs_devices->device_list_mutex); - goto out_free_ctx; - } - sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); @@ -3943,24 +3990,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; - if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) { - scrub_workers = fs_info->scrub_workers; - scrub_wr_comp = fs_info->scrub_wr_completion_workers; - scrub_parity = fs_info->scrub_parity_workers; - - fs_info->scrub_workers = NULL; - fs_info->scrub_wr_completion_workers = NULL; - fs_info->scrub_parity_workers = NULL; - } mutex_unlock(&fs_info->scrub_lock); - btrfs_destroy_workqueue(scrub_workers); - btrfs_destroy_workqueue(scrub_wr_comp); - btrfs_destroy_workqueue(scrub_parity); + scrub_workers_put(fs_info); scrub_put_ctx(sctx); return ret; - +out: + scrub_workers_put(fs_info); out_free_ctx: scrub_free_ctx(sctx); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 3eb0fec2488a..ecdefa7262d2 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -23,6 +23,7 @@ #include "btrfs_inode.h" #include "transaction.h" #include "compression.h" +#include "xattr.h" /* * Maximum number of references an extent can have in order for us to attempt to @@ -237,6 +238,7 @@ struct waiting_dir_move { * after this directory is moved, we can try to rmdir the ino rmdir_ino. */ u64 rmdir_ino; + u64 rmdir_gen; bool orphanized; }; @@ -322,7 +324,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); static struct waiting_dir_move * get_waiting_dir_move(struct send_ctx *sctx, u64 ino); -static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino); +static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen); static int need_send_hole(struct send_ctx *sctx) { @@ -1256,12 +1258,21 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) */ if (found->root == bctx->sctx->send_root) { /* - * TODO for the moment we don't accept clones from the inode - * that is currently send. We may change this when - * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same - * file. + * If the source inode was not yet processed we can't issue a + * clone operation, as the source extent does not exist yet at + * the destination of the stream. */ - if (ino >= bctx->cur_objectid) + if (ino > bctx->cur_objectid) + return 0; + /* + * We clone from the inode currently being sent as long as the + * source extent is already processed, otherwise we could try + * to clone from an extent that does not exist yet at the + * destination of the stream. + */ + if (ino == bctx->cur_objectid && + offset + bctx->extent_len > + bctx->sctx->cur_inode_next_write_offset) return 0; } @@ -2296,7 +2307,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, fs_path_reset(name); - if (is_waiting_for_rm(sctx, ino)) { + if (is_waiting_for_rm(sctx, ino, gen)) { ret = gen_unique_name(sctx, ino, gen, name); if (ret < 0) goto out; @@ -2855,8 +2866,8 @@ out: return ret; } -static struct orphan_dir_info * -add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) +static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx, + u64 dir_ino, u64 dir_gen) { struct rb_node **p = &sctx->orphan_dirs.rb_node; struct rb_node *parent = NULL; @@ -2865,20 +2876,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) while (*p) { parent = *p; entry = rb_entry(parent, struct orphan_dir_info, node); - if (dir_ino < entry->ino) { + if (dir_ino < entry->ino) p = &(*p)->rb_left; - } else if (dir_ino > entry->ino) { + else if (dir_ino > entry->ino) p = &(*p)->rb_right; - } else { + else if (dir_gen < entry->gen) + p = &(*p)->rb_left; + else if (dir_gen > entry->gen) + p = &(*p)->rb_right; + else return entry; - } } odi = kmalloc(sizeof(*odi), GFP_KERNEL); if (!odi) return ERR_PTR(-ENOMEM); odi->ino = dir_ino; - odi->gen = 0; + odi->gen = dir_gen; odi->last_dir_index_offset = 0; rb_link_node(&odi->node, parent, p); @@ -2886,8 +2900,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) return odi; } -static struct orphan_dir_info * -get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) +static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx, + u64 dir_ino, u64 gen) { struct rb_node *n = sctx->orphan_dirs.rb_node; struct orphan_dir_info *entry; @@ -2898,15 +2912,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) n = n->rb_left; else if (dir_ino > entry->ino) n = n->rb_right; + else if (gen < entry->gen) + n = n->rb_left; + else if (gen > entry->gen) + n = n->rb_right; else return entry; } return NULL; } -static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) +static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen) { - struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); + struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen); return odi != NULL; } @@ -2951,7 +2969,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, key.type = BTRFS_DIR_INDEX_KEY; key.offset = 0; - odi = get_orphan_dir_info(sctx, dir); + odi = get_orphan_dir_info(sctx, dir, dir_gen); if (odi) key.offset = odi->last_dir_index_offset; @@ -2982,7 +3000,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, dm = get_waiting_dir_move(sctx, loc.objectid); if (dm) { - odi = add_orphan_dir_info(sctx, dir); + odi = add_orphan_dir_info(sctx, dir, dir_gen); if (IS_ERR(odi)) { ret = PTR_ERR(odi); goto out; @@ -2990,12 +3008,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, odi->gen = dir_gen; odi->last_dir_index_offset = found_key.offset; dm->rmdir_ino = dir; + dm->rmdir_gen = dir_gen; ret = 0; goto out; } if (loc.objectid > send_progress) { - odi = add_orphan_dir_info(sctx, dir); + odi = add_orphan_dir_info(sctx, dir, dir_gen); if (IS_ERR(odi)) { ret = PTR_ERR(odi); goto out; @@ -3035,6 +3054,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized) return -ENOMEM; dm->ino = ino; dm->rmdir_ino = 0; + dm->rmdir_gen = 0; dm->orphanized = orphanized; while (*p) { @@ -3180,7 +3200,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name, while (ino != BTRFS_FIRST_FREE_OBJECTID) { fs_path_reset(name); - if (is_waiting_for_rm(sctx, ino)) + if (is_waiting_for_rm(sctx, ino, gen)) break; if (is_waiting_for_move(sctx, ino)) { if (*ancestor_ino == 0) @@ -3220,6 +3240,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) u64 parent_ino, parent_gen; struct waiting_dir_move *dm = NULL; u64 rmdir_ino = 0; + u64 rmdir_gen; u64 ancestor; bool is_orphan; int ret; @@ -3234,6 +3255,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) dm = get_waiting_dir_move(sctx, pm->ino); ASSERT(dm); rmdir_ino = dm->rmdir_ino; + rmdir_gen = dm->rmdir_gen; is_orphan = dm->orphanized; free_waiting_dir_move(sctx, dm); @@ -3270,6 +3292,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) dm = get_waiting_dir_move(sctx, pm->ino); ASSERT(dm); dm->rmdir_ino = rmdir_ino; + dm->rmdir_gen = rmdir_gen; } goto out; } @@ -3288,7 +3311,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) struct orphan_dir_info *odi; u64 gen; - odi = get_orphan_dir_info(sctx, rmdir_ino); + odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen); if (!odi) { /* already deleted */ goto finish; @@ -3802,6 +3825,72 @@ static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) return 0; } +/* + * When processing the new references for an inode we may orphanize an existing + * directory inode because its old name conflicts with one of the new references + * of the current inode. Later, when processing another new reference of our + * inode, we might need to orphanize another inode, but the path we have in the + * reference reflects the pre-orphanization name of the directory we previously + * orphanized. For example: + * + * parent snapshot looks like: + * + * . (ino 256) + * |----- f1 (ino 257) + * |----- f2 (ino 258) + * |----- d1/ (ino 259) + * |----- d2/ (ino 260) + * + * send snapshot looks like: + * + * . (ino 256) + * |----- d1 (ino 258) + * |----- f2/ (ino 259) + * |----- f2_link/ (ino 260) + * | |----- f1 (ino 257) + * | + * |----- d2 (ino 258) + * + * When processing inode 257 we compute the name for inode 259 as "d1", and we + * cache it in the name cache. Later when we start processing inode 258, when + * collecting all its new references we set a full path of "d1/d2" for its new + * reference with name "d2". When we start processing the new references we + * start by processing the new reference with name "d1", and this results in + * orphanizing inode 259, since its old reference causes a conflict. Then we + * move on the next new reference, with name "d2", and we find out we must + * orphanize inode 260, as its old reference conflicts with ours - but for the + * orphanization we use a source path corresponding to the path we stored in the + * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the + * receiver fail since the path component "d1/" no longer exists, it was renamed + * to "o259-6-0/" when processing the previous new reference. So in this case we + * must recompute the path in the new reference and use it for the new + * orphanization operation. + */ +static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) +{ + char *name; + int ret; + + name = kmemdup(ref->name, ref->name_len, GFP_KERNEL); + if (!name) + return -ENOMEM; + + fs_path_reset(ref->full_path); + ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path); + if (ret < 0) + goto out; + + ret = fs_path_add(ref->full_path, name, ref->name_len); + if (ret < 0) + goto out; + + /* Update the reference's base name pointer. */ + set_ref_path(ref, ref->full_path); +out: + kfree(name); + return ret; +} + /* * This does all the move/link/unlink/rmdir magic. */ @@ -3870,52 +3959,56 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) goto out; } + /* + * Before doing any rename and link operations, do a first pass on the + * new references to orphanize any unprocessed inodes that may have a + * reference that conflicts with one of the new references of the current + * inode. This needs to happen first because a new reference may conflict + * with the old reference of a parent directory, so we must make sure + * that the path used for link and rename commands don't use an + * orphanized name when an ancestor was not yet orphanized. + * + * Example: + * + * Parent snapshot: + * + * . (ino 256) + * |----- testdir/ (ino 259) + * | |----- a (ino 257) + * | + * |----- b (ino 258) + * + * Send snapshot: + * + * . (ino 256) + * |----- testdir_2/ (ino 259) + * | |----- a (ino 260) + * | + * |----- testdir (ino 257) + * |----- b (ino 257) + * |----- b2 (ino 258) + * + * Processing the new reference for inode 257 with name "b" may happen + * before processing the new reference with name "testdir". If so, we + * must make sure that by the time we send a link command to create the + * hard link "b", inode 259 was already orphanized, since the generated + * path in "valid_path" already contains the orphanized name for 259. + * We are processing inode 257, so only later when processing 259 we do + * the rename operation to change its temporary (orphanized) name to + * "testdir_2". + */ list_for_each_entry(cur, &sctx->new_refs, list) { - /* - * We may have refs where the parent directory does not exist - * yet. This happens if the parent directories inum is higher - * than the current inum. To handle this case, we create the - * parent directory out of order. But we need to check if this - * did already happen before due to other refs in the same dir. - */ ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); if (ret < 0) goto out; - if (ret == inode_state_will_create) { - ret = 0; - /* - * First check if any of the current inodes refs did - * already create the dir. - */ - list_for_each_entry(cur2, &sctx->new_refs, list) { - if (cur == cur2) - break; - if (cur2->dir == cur->dir) { - ret = 1; - break; - } - } - - /* - * If that did not happen, check if a previous inode - * did already create the dir. - */ - if (!ret) - ret = did_create_dir(sctx, cur->dir); - if (ret < 0) - goto out; - if (!ret) { - ret = send_create_inode(sctx, cur->dir); - if (ret < 0) - goto out; - } - } + if (ret == inode_state_will_create) + continue; /* - * Check if this new ref would overwrite the first ref of - * another unprocessed inode. If yes, orphanize the - * overwritten inode. If we find an overwritten ref that is - * not the first ref, simply unlink it. + * Check if this new ref would overwrite the first ref of another + * unprocessed inode. If yes, orphanize the overwritten inode. + * If we find an overwritten ref that is not the first ref, + * simply unlink it. */ ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, cur->name, cur->name_len, @@ -3932,6 +4025,12 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) struct name_cache_entry *nce; struct waiting_dir_move *wdm; + if (orphanized_dir) { + ret = refresh_ref_path(sctx, cur); + if (ret < 0) + goto out; + } + ret = orphanize_inode(sctx, ow_inode, ow_gen, cur->full_path); if (ret < 0) @@ -3994,6 +4093,49 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) } } + } + + list_for_each_entry(cur, &sctx->new_refs, list) { + /* + * We may have refs where the parent directory does not exist + * yet. This happens if the parent directories inum is higher + * than the current inum. To handle this case, we create the + * parent directory out of order. But we need to check if this + * did already happen before due to other refs in the same dir. + */ + ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); + if (ret < 0) + goto out; + if (ret == inode_state_will_create) { + ret = 0; + /* + * First check if any of the current inodes refs did + * already create the dir. + */ + list_for_each_entry(cur2, &sctx->new_refs, list) { + if (cur == cur2) + break; + if (cur2->dir == cur->dir) { + ret = 1; + break; + } + } + + /* + * If that did not happen, check if a previous inode + * did already create the dir. + */ + if (!ret) + ret = did_create_dir(sctx, cur->dir); + if (ret < 0) + goto out; + if (!ret) { + ret = send_create_inode(sctx, cur->dir); + if (ret < 0) + goto out; + } + } + if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { ret = wait_for_dest_dir_move(sctx, cur, is_orphan); if (ret < 0) @@ -4536,6 +4678,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, struct fs_path *p; struct posix_acl_xattr_header dummy_acl; + /* Capabilities are emitted by finish_inode_if_needed */ + if (!strncmp(name, XATTR_NAME_CAPS, name_len)) + return 0; + p = fs_path_alloc(); if (!p) return -ENOMEM; @@ -5098,6 +5244,64 @@ static int send_extent_data(struct send_ctx *sctx, return 0; } +/* + * Search for a capability xattr related to sctx->cur_ino. If the capability is + * found, call send_set_xattr function to emit it. + * + * Return 0 if there isn't a capability, or when the capability was emitted + * successfully, or < 0 if an error occurred. + */ +static int send_capabilities(struct send_ctx *sctx) +{ + struct fs_path *fspath = NULL; + struct btrfs_path *path; + struct btrfs_dir_item *di; + struct extent_buffer *leaf; + unsigned long data_ptr; + char *buf = NULL; + int buf_len; + int ret = 0; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, + XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); + if (!di) { + /* There is no xattr for this inode */ + goto out; + } else if (IS_ERR(di)) { + ret = PTR_ERR(di); + goto out; + } + + leaf = path->nodes[0]; + buf_len = btrfs_dir_data_len(leaf, di); + + fspath = fs_path_alloc(); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!fspath || !buf) { + ret = -ENOMEM; + goto out; + } + + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); + if (ret < 0) + goto out; + + data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); + read_extent_buffer(leaf, buf, data_ptr, buf_len); + + ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, + strlen(XATTR_NAME_CAPS), buf, buf_len); +out: + kfree(buf); + fs_path_free(fspath); + btrfs_free_path(path); + return ret; +} + static int clone_range(struct send_ctx *sctx, struct clone_root *clone_root, const u64 disk_byte, @@ -5315,6 +5519,21 @@ static int clone_range(struct send_ctx *sctx, break; offset += clone_len; clone_root->offset += clone_len; + + /* + * If we are cloning from the file we are currently processing, + * and using the send root as the clone root, we must stop once + * the current clone offset reaches the current eof of the file + * at the receiver, otherwise we would issue an invalid clone + * operation (source range going beyond eof) and cause the + * receiver to fail. So if we reach the current eof, bail out + * and fallback to a regular write. + */ + if (clone_root->root == sctx->send_root && + clone_root->ino == sctx->cur_ino && + clone_root->offset >= sctx->cur_inode_next_write_offset) + break; + data_offset += clone_len; next: path->slots[0]++; @@ -6001,6 +6220,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) goto out; } + ret = send_capabilities(sctx); + if (ret < 0) + goto out; + /* * If other directory inodes depended on our current directory * inode's move/rename, now do their move/rename operations. @@ -7157,7 +7380,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1); - sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL); + sctx->clone_roots = kvzalloc(alloc_size, GFP_KERNEL); if (!sctx->clone_roots) { ret = -ENOMEM; goto out; diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index e8a4b0ebe97f..90500b6c41fc 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -160,10 +160,9 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) return (global->size << 1); } -static int can_overcommit(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, u64 bytes, - enum btrfs_reserve_flush_enum flush, - bool system_chunk) +int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, u64 bytes, + enum btrfs_reserve_flush_enum flush) { u64 profile; u64 avail; @@ -174,7 +173,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info, if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) return 0; - if (system_chunk) + if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) profile = btrfs_system_alloc_profile(fs_info); else profile = btrfs_metadata_alloc_profile(fs_info); @@ -228,8 +227,8 @@ again: /* Check and see if our ticket can be satisified now. */ if ((used + ticket->bytes <= space_info->total_bytes) || - can_overcommit(fs_info, space_info, ticket->bytes, flush, - false)) { + btrfs_can_overcommit(fs_info, space_info, ticket->bytes, + flush)) { btrfs_space_info_update_bytes_may_use(fs_info, space_info, ticket->bytes); @@ -304,8 +303,8 @@ again: cache->key.objectid, cache->key.offset, btrfs_block_group_used(&cache->item), cache->pinned, cache->reserved, cache->ro ? "[readonly]" : ""); - btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); + btrfs_dump_free_space(cache, bytes); } if (++index < BTRFS_NR_RAID_TYPES) goto again; @@ -462,6 +461,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, struct reserve_ticket *ticket = NULL; struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; + struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; struct btrfs_trans_handle *trans; u64 bytes_needed; u64 reclaim_bytes = 0; @@ -524,6 +524,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, spin_lock(&delayed_refs_rsv->lock); reclaim_bytes += delayed_refs_rsv->reserved; spin_unlock(&delayed_refs_rsv->lock); + + spin_lock(&trans_rsv->lock); + reclaim_bytes += trans_rsv->reserved; + spin_unlock(&trans_rsv->lock); + if (reclaim_bytes >= bytes_needed) goto commit; bytes_needed -= reclaim_bytes; @@ -628,8 +633,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, static inline u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - bool system_chunk) + struct btrfs_space_info *space_info) { struct reserve_ticket *ticket; u64 used; @@ -644,14 +648,14 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, return to_reclaim; to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); - if (can_overcommit(fs_info, space_info, to_reclaim, - BTRFS_RESERVE_FLUSH_ALL, system_chunk)) + if (btrfs_can_overcommit(fs_info, space_info, to_reclaim, + BTRFS_RESERVE_FLUSH_ALL)) return 0; used = btrfs_space_info_used(space_info, true); - if (can_overcommit(fs_info, space_info, SZ_1M, - BTRFS_RESERVE_FLUSH_ALL, system_chunk)) + if (btrfs_can_overcommit(fs_info, space_info, SZ_1M, + BTRFS_RESERVE_FLUSH_ALL)) expected = div_factor_fine(space_info->total_bytes, 95); else expected = div_factor_fine(space_info->total_bytes, 90); @@ -667,7 +671,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, - u64 used, bool system_chunk) + u64 used) { u64 thresh = div_factor_fine(space_info->total_bytes, 98); @@ -675,14 +679,41 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh) return 0; - if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info, - system_chunk)) + if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info)) return 0; return (used >= thresh && !btrfs_fs_closing(fs_info) && !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); } +static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, + struct reserve_ticket *ticket) +{ + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; + u64 min_bytes; + + if (global_rsv->space_info != space_info) + return false; + + spin_lock(&global_rsv->lock); + min_bytes = div_factor(global_rsv->size, 5); + if (global_rsv->reserved < min_bytes + ticket->bytes) { + spin_unlock(&global_rsv->lock); + return false; + } + global_rsv->reserved -= ticket->bytes; + ticket->bytes = 0; + list_del_init(&ticket->list); + wake_up(&ticket->wait); + space_info->tickets_id++; + if (global_rsv->reserved < global_rsv->size) + global_rsv->full = 0; + spin_unlock(&global_rsv->lock); + + return true; +} + /* * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets * @fs_info - fs_info for this fs @@ -715,6 +746,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, ticket = list_first_entry(&space_info->tickets, struct reserve_ticket, list); + if (ticket->steal && + steal_from_global_rsv(fs_info, space_info, ticket)) + return true; + /* * may_commit_transaction will avoid committing the transaction * if it doesn't feel like the space reclaimed by the commit @@ -767,8 +802,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); spin_lock(&space_info->lock); - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info, - false); + to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); if (!to_reclaim) { space_info->flush = 0; spin_unlock(&space_info->lock); @@ -787,8 +821,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) return; } to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, - space_info, - false); + space_info); if (last_tickets_id == space_info->tickets_id) { flush_state++; } else { @@ -860,8 +893,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, int flush_state; spin_lock(&space_info->lock); - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info, - false); + to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); if (!to_reclaim) { spin_unlock(&space_info->lock); return; @@ -934,6 +966,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, switch (flush) { case BTRFS_RESERVE_FLUSH_ALL: + case BTRFS_RESERVE_FLUSH_ALL_STEAL: wait_reserve_ticket(fs_info, space_info, ticket); break; case BTRFS_RESERVE_FLUSH_LIMIT: @@ -992,8 +1025,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 orig_bytes, - enum btrfs_reserve_flush_enum flush, - bool system_chunk) + enum btrfs_reserve_flush_enum flush) { struct reserve_ticket ticket; u64 used; @@ -1015,8 +1047,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, */ if (!pending_tickets && ((used + orig_bytes <= space_info->total_bytes) || - can_overcommit(fs_info, space_info, orig_bytes, flush, - system_chunk))) { + btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { btrfs_space_info_update_bytes_may_use(fs_info, space_info, orig_bytes); ret = 0; @@ -1033,7 +1064,9 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, ticket.bytes = orig_bytes; ticket.error = 0; init_waitqueue_head(&ticket.wait); - if (flush == BTRFS_RESERVE_FLUSH_ALL) { + ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); + if (flush == BTRFS_RESERVE_FLUSH_ALL || + flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { list_add_tail(&ticket.list, &space_info->tickets); if (!space_info->flush) { space_info->flush = 1; @@ -1056,8 +1089,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, * the async reclaim as we will panic. */ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && - need_do_async_reclaim(fs_info, space_info, - used, system_chunk) && + need_do_async_reclaim(fs_info, space_info, used) && !work_busy(&fs_info->async_reclaim_work)) { trace_btrfs_trigger_flush(fs_info, space_info->flags, orig_bytes, flush, "preempt"); @@ -1094,10 +1126,9 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root, struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; int ret; - bool system_chunk = (root == fs_info->chunk_root); ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info, - orig_bytes, flush, system_chunk); + orig_bytes, flush); if (ret == -ENOSPC && unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { if (block_rsv != global_rsv && diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 8867e84aa33d..b9cffc62cafa 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -72,6 +72,7 @@ struct btrfs_space_info { struct reserve_ticket { u64 bytes; int error; + bool steal; struct list_head list; wait_queue_head_t wait; }; @@ -128,6 +129,9 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root, enum btrfs_reserve_flush_enum flush); void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); +int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, u64 bytes, + enum btrfs_reserve_flush_enum flush); static inline void btrfs_space_info_free_bytes_may_use( struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index aea24202cd35..1a69bdb96fb2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -241,7 +241,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = trans->fs_info; - trans->aborted = errno; + WRITE_ONCE(trans->aborted, errno); /* Nothing used. The other threads that have joined this * transaction may be able to continue. */ if (!trans->dirty && list_empty(&trans->new_bgs)) { @@ -435,6 +435,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, char *compress_type; bool compress_force = false; enum btrfs_compression_type saved_compress_type; + int saved_compress_level; bool saved_compress_force; int no_compress = 0; @@ -517,6 +518,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, info->compress_type : BTRFS_COMPRESS_NONE; saved_compress_force = btrfs_test_opt(info, FORCE_COMPRESS); + saved_compress_level = info->compress_level; if (token == Opt_compress || token == Opt_compress_force || strncmp(args[0].from, "zlib", 4) == 0) { @@ -542,6 +544,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, } else if (strncmp(args[0].from, "lzo", 3) == 0) { compress_type = "lzo"; info->compress_type = BTRFS_COMPRESS_LZO; + info->compress_level = 0; btrfs_set_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATASUM); @@ -561,6 +564,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, no_compress = 0; } else if (strncmp(args[0].from, "no", 2) == 0) { compress_type = "no"; + info->compress_level = 0; + info->compress_type = 0; btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); compress_force = false; @@ -581,11 +586,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, */ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); } - if ((btrfs_test_opt(info, COMPRESS) && - (info->compress_type != saved_compress_type || - compress_force != saved_compress_force)) || - (!btrfs_test_opt(info, COMPRESS) && - no_compress == 1)) { + if (no_compress == 1) { + btrfs_info(info, "use no compression"); + } else if ((info->compress_type != saved_compress_type) || + (compress_force != saved_compress_force) || + (info->compress_level != saved_compress_level)) { btrfs_info(info, "%s %s compression, level %d", (compress_force) ? "force" : "use", compress_type, info->compress_level); @@ -1005,8 +1010,8 @@ out: return error; } -static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, - u64 subvol_objectid) +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_root *fs_root; @@ -1287,6 +1292,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); const char *compress_type; + const char *subvol_name; if (btrfs_test_opt(info, DEGRADED)) seq_puts(seq, ",degraded"); @@ -1371,8 +1377,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) seq_puts(seq, ",ref_verify"); seq_printf(seq, ",subvolid=%llu", BTRFS_I(d_inode(dentry))->root->root_key.objectid); - seq_puts(seq, ",subvol="); - seq_dentry(seq, dentry, " \t\n\\"); + subvol_name = btrfs_get_subvol_name_from_objectid(info, + BTRFS_I(d_inode(dentry))->root->root_key.objectid); + if (!IS_ERR(subvol_name)) { + seq_puts(seq, ",subvol="); + seq_escape(seq, subvol_name, " \t\n\\"); + kfree(subvol_name); + } return 0; } @@ -1417,8 +1428,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, goto out; } } - subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), - subvol_objectid); + subvol_name = btrfs_get_subvol_name_from_objectid( + btrfs_sb(mnt->mnt_sb), subvol_objectid); if (IS_ERR(subvol_name)) { root = ERR_CAST(subvol_name); subvol_name = NULL; @@ -1781,6 +1792,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) btrfs_scrub_cancel(fs_info); btrfs_pause_balance(fs_info); + /* + * Pause the qgroup rescan worker if it is running. We don't want + * it to be still running after we are in RO mode, as after that, + * by the time we unmount, it might have left a transaction open, + * so we would leak the transaction and/or crash. + */ + btrfs_qgroup_wait_for_completion(fs_info, false); + ret = btrfs_commit_super(fs_info); if (ret) goto restore; @@ -1848,6 +1867,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) set_bit(BTRFS_FS_OPEN, &fs_info->flags); } out: + /* + * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS, + * since the absence of the flag means it can be toggled off by remount. + */ + *flags |= SB_I_VERSION; + wake_up_process(fs_info->transaction_kthread); btrfs_remount_cleanup(fs_info, old_opts); return 0; @@ -2254,9 +2279,7 @@ static int btrfs_unfreeze(struct super_block *sb) static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); - struct btrfs_fs_devices *cur_devices; struct btrfs_device *dev, *first_dev = NULL; - struct list_head *head; /* * Lightweight locking of the devices. We should not need @@ -2266,18 +2289,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) * least until the rcu_read_unlock. */ rcu_read_lock(); - cur_devices = fs_info->fs_devices; - while (cur_devices) { - head = &cur_devices->devices; - list_for_each_entry_rcu(dev, head, dev_list) { - if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) - continue; - if (!dev->name) - continue; - if (!first_dev || dev->devid < first_dev->devid) - first_dev = dev; - } - cur_devices = cur_devices->seed; + list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) { + if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) + continue; + if (!dev->name) + continue; + if (!first_dev || dev->devid < first_dev->devid) + first_dev = dev; } if (first_dev) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index f6d3c80f2e28..5c299e1f2297 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -975,7 +975,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, { int error = 0; struct btrfs_device *dev; + unsigned int nofs_flag; + nofs_flag = memalloc_nofs_save(); list_for_each_entry(dev, &fs_devices->devices, dev_list) { struct hd_struct *disk; struct kobject *disk_kobj; @@ -994,6 +996,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, if (error) break; } + memalloc_nofs_restore(nofs_flag); return error; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 98b6903e3938..aca6c467d776 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -21,6 +21,7 @@ #include "dev-replace.h" #include "qgroup.h" #include "block-group.h" +#include "space-info.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -173,7 +174,7 @@ loop: cur_trans = fs_info->running_transaction; if (cur_trans) { - if (cur_trans->aborted) { + if (TRANS_ABORTED(cur_trans)) { spin_unlock(&fs_info->trans_lock); return cur_trans->aborted; } @@ -389,7 +390,7 @@ static inline int is_transaction_blocked(struct btrfs_transaction *trans) { return (trans->state >= TRANS_STATE_BLOCKED && trans->state < TRANS_STATE_UNBLOCKED && - !trans->aborted); + !TRANS_ABORTED(trans)); } /* wait for commit against the current transaction to become unblocked @@ -408,7 +409,7 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info) wait_event(fs_info->transaction_wait, cur_trans->state >= TRANS_STATE_UNBLOCKED || - cur_trans->aborted); + TRANS_ABORTED(cur_trans)); btrfs_put_transaction(cur_trans); } else { spin_unlock(&fs_info->trans_lock); @@ -451,6 +452,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, u64 num_bytes = 0; u64 qgroup_reserved = 0; bool reloc_reserved = false; + bool do_chunk_alloc = false; int ret; /* Send isn't supposed to start transactions. */ @@ -491,7 +493,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, * refill that amount for whatever is missing in the reserve. */ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); - if (delayed_refs_rsv->full == 0) { + if (flush == BTRFS_RESERVE_FLUSH_ALL && + delayed_refs_rsv->full == 0) { delayed_refs_bytes = num_bytes; num_bytes <<= 1; } @@ -512,6 +515,9 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, delayed_refs_bytes); num_bytes -= delayed_refs_bytes; } + + if (rsv->space_info->force_alloc) + do_chunk_alloc = true; } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && !delayed_refs_rsv->full) { /* @@ -590,10 +596,32 @@ again: } got_it: - btrfs_record_root_in_trans(h, root); - if (!current->journal_info) current->journal_info = h; + + /* + * If the space_info is marked ALLOC_FORCE then we'll get upgraded to + * ALLOC_FORCE the first run through, and then we won't allocate for + * anybody else who races in later. We don't care about the return + * value here. + */ + if (do_chunk_alloc && num_bytes) { + u64 flags = h->block_rsv->space_info->flags; + + btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), + CHUNK_ALLOC_NO_FORCE); + } + + /* + * btrfs_record_root_in_trans() needs to alloc new extents, and may + * call btrfs_join_transaction() while we're also starting a + * transaction. + * + * Thus it need to be called after current->journal_info initialized, + * or we can deadlock. + */ + btrfs_record_root_in_trans(h, root); + return h; join_fail: @@ -618,43 +646,10 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, - unsigned int num_items, - int min_factor) + unsigned int num_items) { - struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_trans_handle *trans; - u64 num_bytes; - int ret; - - /* - * We have two callers: unlink and block group removal. The - * former should succeed even if we will temporarily exceed - * quota and the latter operates on the extent root so - * qgroup enforcement is ignored anyway. - */ - trans = start_transaction(root, num_items, TRANS_START, - BTRFS_RESERVE_FLUSH_ALL, false); - if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) - return trans; - - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return trans; - - num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); - ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, - num_bytes, min_factor); - if (ret) { - btrfs_end_transaction(trans); - return ERR_PTR(ret); - } - - trans->block_rsv = &fs_info->trans_block_rsv; - trans->bytes_reserved = num_bytes; - trace_btrfs_space_reservation(fs_info, "transaction", - trans->transid, num_bytes, 1); - - return trans; + return start_transaction(root, num_items, TRANS_START, + BTRFS_RESERVE_FLUSH_ALL_STEAL, false); } struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) @@ -875,10 +870,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, if (throttle) btrfs_run_delayed_iputs(info); - if (trans->aborted || + if (TRANS_ABORTED(trans) || test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) { wake_up_process(info->transaction_kthread); - err = -EIO; + if (TRANS_ABORTED(trans)) + err = trans->aborted; + else + err = -EROFS; } kmem_cache_free(btrfs_trans_handle_cachep, trans); @@ -1214,7 +1212,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) struct btrfs_root *gang[8]; int i; int ret; - int err = 0; spin_lock(&fs_info->fs_roots_radix_lock); while (1) { @@ -1226,6 +1223,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) break; for (i = 0; i < ret; i++) { struct btrfs_root *root = gang[i]; + int ret2; + radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); @@ -1247,17 +1246,17 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) root->node); } - err = btrfs_update_root(trans, fs_info->tree_root, + ret2 = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); + if (ret2) + return ret2; spin_lock(&fs_info->fs_roots_radix_lock); - if (err) - break; btrfs_qgroup_free_meta_all_pertrans(root); } } spin_unlock(&fs_info->fs_roots_radix_lock); - return err; + return 0; } /* @@ -1732,7 +1731,8 @@ static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info, struct btrfs_transaction *trans) { wait_event(fs_info->transaction_blocked_wait, - trans->state >= TRANS_STATE_COMMIT_START || trans->aborted); + trans->state >= TRANS_STATE_COMMIT_START || + TRANS_ABORTED(trans)); } /* @@ -1744,7 +1744,8 @@ static void wait_current_trans_commit_start_and_unblock( struct btrfs_transaction *trans) { wait_event(fs_info->transaction_wait, - trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted); + trans->state >= TRANS_STATE_UNBLOCKED || + TRANS_ABORTED(trans)); } /* @@ -1962,7 +1963,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) trans->dirty = true; /* Stop the commit early if ->aborted is set */ - if (unlikely(READ_ONCE(cur_trans->aborted))) { + if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; btrfs_end_transaction(trans); return ret; @@ -2036,7 +2037,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) wait_for_commit(cur_trans); - if (unlikely(cur_trans->aborted)) + if (TRANS_ABORTED(cur_trans)) ret = cur_trans->aborted; btrfs_put_transaction(cur_trans); @@ -2055,7 +2056,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) spin_unlock(&fs_info->trans_lock); wait_for_commit(prev_trans); - ret = prev_trans->aborted; + ret = READ_ONCE(prev_trans->aborted); btrfs_put_transaction(prev_trans); if (ret) @@ -2109,8 +2110,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) wait_event(cur_trans->writer_wait, atomic_read(&cur_trans->num_writers) == 1); - /* ->aborted might be set after the previous check, so check it */ - if (unlikely(READ_ONCE(cur_trans->aborted))) { + if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; goto scrub_continue; } @@ -2228,7 +2228,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) * The tasks which save the space cache and inode cache may also * update ->aborted, check it. */ - if (unlikely(READ_ONCE(cur_trans->aborted))) { + if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; mutex_unlock(&fs_info->tree_log_mutex); mutex_unlock(&fs_info->reloc_mutex); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 2c5a6f6e5bb0..7291a2a93075 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -116,6 +116,10 @@ struct btrfs_trans_handle { struct btrfs_block_rsv *orig_rsv; refcount_t use_count; unsigned int type; + /* + * Error code of transaction abort, set outside of locks and must use + * the READ_ONCE/WRITE_ONCE access + */ short aborted; bool adding_csums; bool allocating_chunk; @@ -127,6 +131,14 @@ struct btrfs_trans_handle { struct list_head new_bgs; }; +/* + * The abort status can be changed between calls and is not protected by locks. + * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's + * set to a non-zero value it does not change, so the macro should be in checks + * but is not necessary for further reads of the value. + */ +#define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted))) + struct btrfs_pending_snapshot { struct dentry *dentry; struct inode *dir; @@ -181,8 +193,7 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, unsigned int num_items); struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, - unsigned int num_items, - int min_factor); + unsigned int num_items); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 0e44db066641..7d06842a3d74 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -571,24 +571,43 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, { struct btrfs_fs_info *fs_info = leaf->fs_info; u64 length; + u64 chunk_end; u64 stripe_len; u16 num_stripes; u16 sub_stripes; u64 type; u64 features; bool mixed = false; + int raid_index; + int nparity; + int ncopies; length = btrfs_chunk_length(leaf, chunk); stripe_len = btrfs_chunk_stripe_len(leaf, chunk); num_stripes = btrfs_chunk_num_stripes(leaf, chunk); sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); type = btrfs_chunk_type(leaf, chunk); + raid_index = btrfs_bg_flags_to_raid_index(type); + ncopies = btrfs_raid_array[raid_index].ncopies; + nparity = btrfs_raid_array[raid_index].nparity; if (!num_stripes) { chunk_err(leaf, chunk, logical, "invalid chunk num_stripes, have %u", num_stripes); return -EUCLEAN; } + if (num_stripes < ncopies) { + chunk_err(leaf, chunk, logical, + "invalid chunk num_stripes < ncopies, have %u < %d", + num_stripes, ncopies); + return -EUCLEAN; + } + if (nparity && num_stripes == nparity) { + chunk_err(leaf, chunk, logical, + "invalid chunk num_stripes == nparity, have %u == %d", + num_stripes, nparity); + return -EUCLEAN; + } if (!IS_ALIGNED(logical, fs_info->sectorsize)) { chunk_err(leaf, chunk, logical, "invalid chunk logical, have %llu should aligned to %u", @@ -607,6 +626,12 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, "invalid chunk length, have %llu", length); return -EUCLEAN; } + if (unlikely(check_add_overflow(logical, length, &chunk_end))) { + chunk_err(leaf, chunk, logical, +"invalid chunk logical start and length, have logical start %llu length %llu", + logical, length); + return -EUCLEAN; + } if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { chunk_err(leaf, chunk, logical, "invalid chunk stripe length: %llu", @@ -674,6 +699,44 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, return 0; } +/* + * Enhanced version of chunk item checker. + * + * The common btrfs_check_chunk_valid() doesn't check item size since it needs + * to work on super block sys_chunk_array which doesn't have full item ptr. + */ +static int check_leaf_chunk_item(struct extent_buffer *leaf, + struct btrfs_chunk *chunk, + struct btrfs_key *key, int slot) +{ + int num_stripes; + + if (btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk)) { + chunk_err(leaf, chunk, key->offset, + "invalid chunk item size: have %u expect [%zu, %u)", + btrfs_item_size_nr(leaf, slot), + sizeof(struct btrfs_chunk), + BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); + return -EUCLEAN; + } + + num_stripes = btrfs_chunk_num_stripes(leaf, chunk); + /* Let btrfs_check_chunk_valid() handle this error type */ + if (num_stripes == 0) + goto out; + + if (btrfs_chunk_item_size(num_stripes) != + btrfs_item_size_nr(leaf, slot)) { + chunk_err(leaf, chunk, key->offset, + "invalid chunk item size: have %u expect %lu", + btrfs_item_size_nr(leaf, slot), + btrfs_chunk_item_size(num_stripes)); + return -EUCLEAN; + } +out: + return btrfs_check_chunk_valid(leaf, chunk, key->offset); +} + __printf(3, 4) __cold static void dev_item_err(const struct extent_buffer *eb, int slot, @@ -772,7 +835,7 @@ static int check_inode_item(struct extent_buffer *leaf, /* Here we use super block generation + 1 to handle log tree */ if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) { inode_item_err(fs_info, leaf, slot, - "invalid inode generation: has %llu expect (0, %llu]", + "invalid inode transid: has %llu expect [0, %llu]", btrfs_inode_generation(leaf, iitem), super_gen + 1); return -EUCLEAN; @@ -831,7 +894,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot) { struct btrfs_fs_info *fs_info = leaf->fs_info; - struct btrfs_root_item ri; + struct btrfs_root_item ri = { 0 }; const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | BTRFS_ROOT_SUBVOL_DEAD; @@ -851,14 +914,22 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, return -EUCLEAN; } - if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) { + if (btrfs_item_size_nr(leaf, slot) != sizeof(ri) && + btrfs_item_size_nr(leaf, slot) != btrfs_legacy_root_item_size()) { generic_err(leaf, slot, - "invalid root item size, have %u expect %zu", - btrfs_item_size_nr(leaf, slot), sizeof(ri)); + "invalid root item size, have %u expect %zu or %u", + btrfs_item_size_nr(leaf, slot), sizeof(ri), + btrfs_legacy_root_item_size()); + return -EUCLEAN; } + /* + * For legacy root item, the members starting at generation_v2 will be + * all filled with 0. + * And since we allow geneartion_v2 as 0, it will still pass the check. + */ read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), - sizeof(ri)); + btrfs_item_size_nr(leaf, slot)); /* Generation related */ if (btrfs_root_generation(&ri) > @@ -1205,6 +1276,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf, "invalid item size, have %u expect aligned to %zu for key type %u", btrfs_item_size_nr(leaf, slot), sizeof(*dref), key->type); + return -EUCLEAN; } if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) { generic_err(leaf, slot, @@ -1233,6 +1305,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf, extent_err(leaf, slot, "invalid extent data backref offset, have %llu expect aligned to %u", offset, leaf->fs_info->sectorsize); + return -EUCLEAN; } } return 0; @@ -1265,7 +1338,7 @@ static int check_leaf_item(struct extent_buffer *leaf, break; case BTRFS_CHUNK_ITEM_KEY: chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); - ret = btrfs_check_chunk_valid(leaf, chunk, key->offset); + ret = check_leaf_chunk_item(leaf, chunk, key, slot); break; case BTRFS_DEV_ITEM_KEY: ret = check_dev_item(leaf, key, slot); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 6f2178618c22..de53e5166997 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -167,6 +167,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans, if (ret) goto out; + set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); root->log_start_pid = current->pid; } @@ -193,6 +194,9 @@ static int join_running_log_trans(struct btrfs_root *root) { int ret = -ENOENT; + if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) + return ret; + mutex_lock(&root->log_mutex); if (root->log_root) { ret = 0; @@ -3136,29 +3140,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, btrfs_init_log_ctx(&root_log_ctx, NULL); mutex_lock(&log_root_tree->log_mutex); - atomic_inc(&log_root_tree->log_batch); - atomic_inc(&log_root_tree->log_writers); index2 = log_root_tree->log_transid % 2; list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); root_log_ctx.log_transid = log_root_tree->log_transid; - mutex_unlock(&log_root_tree->log_mutex); - - mutex_lock(&log_root_tree->log_mutex); - /* * Now we are safe to update the log_root_tree because we're under the * log_mutex, and we're a current writer so we're holding the commit * open until we drop the log_mutex. */ ret = update_log_root(trans, log, &new_root_item); - - if (atomic_dec_and_test(&log_root_tree->log_writers)) { - /* atomic_dec_and_test implies a barrier */ - cond_wake_up_nomb(&log_root_tree->log_writer_wait); - } - if (ret) { if (!list_empty(&root_log_ctx.list)) list_del_init(&root_log_ctx.list); @@ -3204,8 +3196,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, root_log_ctx.log_transid - 1); } - wait_for_writer(log_root_tree); - /* * now that we've moved on to the tree of log tree roots, * check the full commit flag again @@ -3327,6 +3317,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) if (root->log_root) { free_log_tree(trans, root->log_root); root->log_root = NULL; + clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); } return 0; } @@ -3482,11 +3473,13 @@ fail: btrfs_free_path(path); out_unlock: mutex_unlock(&dir->log_mutex); - if (ret == -ENOSPC) { + if (err == -ENOSPC) { btrfs_set_log_full_commit(trans); - ret = 0; - } else if (ret < 0) - btrfs_abort_transaction(trans, ret); + err = 0; + } else if (err < 0 && err != -ENOENT) { + /* ENOENT can be returned if the entry hasn't been fsynced yet */ + btrfs_abort_transaction(trans, err); + } btrfs_end_log_trans(root); @@ -3646,6 +3639,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * search and this search we'll not find the key again and can just * bail. */ +search: ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); if (ret != 0) goto done; @@ -3665,6 +3659,13 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, if (min_key.objectid != ino || min_key.type != key_type) goto done; + + if (need_resched()) { + btrfs_release_path(path); + cond_resched(); + goto search; + } + ret = overwrite_item(trans, log, dst_path, src, i, &min_key); if (ret) { @@ -4049,11 +4050,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, fs_info->csum_root, ds + cs, ds + cs + cl - 1, &ordered_sums, 0); - if (ret) { - btrfs_release_path(dst_path); - kfree(ins_data); - return ret; - } + if (ret) + break; } } } @@ -4066,7 +4064,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ - ret = 0; while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, @@ -4242,6 +4239,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, const u64 ino = btrfs_ino(inode); struct btrfs_path *dst_path = NULL; bool dropped_extents = false; + u64 truncate_offset = i_size; + struct extent_buffer *leaf; + int slot; int ins_nr = 0; int start_slot; int ret; @@ -4256,9 +4256,43 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, if (ret < 0) goto out; + /* + * We must check if there is a prealloc extent that starts before the + * i_size and crosses the i_size boundary. This is to ensure later we + * truncate down to the end of that extent and not to the i_size, as + * otherwise we end up losing part of the prealloc extent after a log + * replay and with an implicit hole if there is another prealloc extent + * that starts at an offset beyond i_size. + */ + ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY); + if (ret < 0) + goto out; + + if (ret == 0) { + struct btrfs_file_extent_item *ei; + + leaf = path->nodes[0]; + slot = path->slots[0]; + ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + + if (btrfs_file_extent_type(leaf, ei) == + BTRFS_FILE_EXTENT_PREALLOC) { + u64 extent_end; + + btrfs_item_key_to_cpu(leaf, &key, slot); + extent_end = key.offset + + btrfs_file_extent_num_bytes(leaf, ei); + + if (extent_end > i_size) + truncate_offset = extent_end; + } + } else { + ret = 0; + } + while (true) { - struct extent_buffer *leaf = path->nodes[0]; - int slot = path->slots[0]; + leaf = path->nodes[0]; + slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { if (ins_nr > 0) { @@ -4296,7 +4330,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, ret = btrfs_truncate_inode_items(trans, root->log_root, &inode->vfs_inode, - i_size, + truncate_offset, BTRFS_EXTENT_DATA_KEY); } while (ret == -EAGAIN); if (ret) @@ -4970,6 +5004,138 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, return ret; } +static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode, + struct btrfs_key *min_key, + const struct btrfs_key *max_key, + struct btrfs_path *path, + struct btrfs_path *dst_path, + const u64 logged_isize, + const bool recursive_logging, + const int inode_only, + struct btrfs_log_ctx *ctx, + bool *need_log_inode_item) +{ + struct btrfs_root *root = inode->root; + int ins_start_slot = 0; + int ins_nr = 0; + int ret; + + while (1) { + ret = btrfs_search_forward(root, min_key, path, trans->transid); + if (ret < 0) + return ret; + if (ret > 0) { + ret = 0; + break; + } +again: + /* Note, ins_nr might be > 0 here, cleanup outside the loop */ + if (min_key->objectid != max_key->objectid) + break; + if (min_key->type > max_key->type) + break; + + if (min_key->type == BTRFS_INODE_ITEM_KEY) + *need_log_inode_item = false; + + if ((min_key->type == BTRFS_INODE_REF_KEY || + min_key->type == BTRFS_INODE_EXTREF_KEY) && + inode->generation == trans->transid && + !recursive_logging) { + u64 other_ino = 0; + u64 other_parent = 0; + + ret = btrfs_check_ref_name_override(path->nodes[0], + path->slots[0], min_key, inode, + &other_ino, &other_parent); + if (ret < 0) { + return ret; + } else if (ret > 0 && ctx && + other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { + if (ins_nr > 0) { + ins_nr++; + } else { + ins_nr = 1; + ins_start_slot = path->slots[0]; + } + ret = copy_items(trans, inode, dst_path, path, + ins_start_slot, ins_nr, + inode_only, logged_isize); + if (ret < 0) + return ret; + ins_nr = 0; + + ret = log_conflicting_inodes(trans, root, path, + ctx, other_ino, other_parent); + if (ret) + return ret; + btrfs_release_path(path); + goto next_key; + } + } + + /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ + if (min_key->type == BTRFS_XATTR_ITEM_KEY) { + if (ins_nr == 0) + goto next_slot; + ret = copy_items(trans, inode, dst_path, path, + ins_start_slot, + ins_nr, inode_only, logged_isize); + if (ret < 0) + return ret; + ins_nr = 0; + goto next_slot; + } + + if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { + ins_nr++; + goto next_slot; + } else if (!ins_nr) { + ins_start_slot = path->slots[0]; + ins_nr = 1; + goto next_slot; + } + + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, + ins_nr, inode_only, logged_isize); + if (ret < 0) + return ret; + ins_nr = 1; + ins_start_slot = path->slots[0]; +next_slot: + path->slots[0]++; + if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { + btrfs_item_key_to_cpu(path->nodes[0], min_key, + path->slots[0]); + goto again; + } + if (ins_nr) { + ret = copy_items(trans, inode, dst_path, path, + ins_start_slot, ins_nr, inode_only, + logged_isize); + if (ret < 0) + return ret; + ins_nr = 0; + } + btrfs_release_path(path); +next_key: + if (min_key->offset < (u64)-1) { + min_key->offset++; + } else if (min_key->type < max_key->type) { + min_key->type++; + min_key->offset = 0; + } else { + break; + } + } + if (ins_nr) + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, + ins_nr, inode_only, logged_isize); + + return ret; +} + /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. @@ -4991,17 +5157,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, const loff_t end, struct btrfs_log_ctx *ctx) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct btrfs_path *dst_path; struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = root->log_root; int err = 0; - int ret; - int nritems; - int ins_start_slot = 0; - int ins_nr; + int ret = 0; bool fast_search = false; u64 ino = btrfs_ino(inode); struct extent_map_tree *em_tree = &inode->extent_tree; @@ -5037,15 +5199,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.offset = (u64)-1; /* - * Only run delayed items if we are a dir or a new file. - * Otherwise commit the delayed inode only, which is needed in - * order for the log replay code to mark inodes for link count - * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). + * Only run delayed items if we are a directory. We want to make sure + * all directory indexes hit the fs/subvolume tree so we can find them + * and figure out which index ranges have to be logged. + * + * Otherwise commit the delayed inode only if the full sync flag is set, + * as we want to make sure an up to date version is in the subvolume + * tree so copy_inode_items_to_log() / copy_items() can find it and copy + * it to the log tree. For a non full sync, we always log the inode item + * based on the in-memory struct btrfs_inode which is always up to date. */ - if (S_ISDIR(inode->vfs_inode.i_mode) || - inode->generation > fs_info->last_trans_committed) + if (S_ISDIR(inode->vfs_inode.i_mode)) ret = btrfs_commit_inode_delayed_items(trans, inode); - else + else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) ret = btrfs_commit_inode_delayed_inode(inode); if (ret) { @@ -5132,139 +5298,12 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, goto out_unlock; } - while (1) { - ins_nr = 0; - ret = btrfs_search_forward(root, &min_key, - path, trans->transid); - if (ret < 0) { - err = ret; - goto out_unlock; - } - if (ret != 0) - break; -again: - /* note, ins_nr might be > 0 here, cleanup outside the loop */ - if (min_key.objectid != ino) - break; - if (min_key.type > max_key.type) - break; - - if (min_key.type == BTRFS_INODE_ITEM_KEY) - need_log_inode_item = false; - - if ((min_key.type == BTRFS_INODE_REF_KEY || - min_key.type == BTRFS_INODE_EXTREF_KEY) && - inode->generation == trans->transid && - !recursive_logging) { - u64 other_ino = 0; - u64 other_parent = 0; - - ret = btrfs_check_ref_name_override(path->nodes[0], - path->slots[0], &min_key, inode, - &other_ino, &other_parent); - if (ret < 0) { - err = ret; - goto out_unlock; - } else if (ret > 0 && ctx && - other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { - if (ins_nr > 0) { - ins_nr++; - } else { - ins_nr = 1; - ins_start_slot = path->slots[0]; - } - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, - ins_nr, inode_only, - logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - - err = log_conflicting_inodes(trans, root, path, - ctx, other_ino, other_parent); - if (err) - goto out_unlock; - btrfs_release_path(path); - goto next_key; - } - } - - /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ - if (min_key.type == BTRFS_XATTR_ITEM_KEY) { - if (ins_nr == 0) - goto next_slot; - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, - ins_nr, inode_only, logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - goto next_slot; - } - - if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { - ins_nr++; - goto next_slot; - } else if (!ins_nr) { - ins_start_slot = path->slots[0]; - ins_nr = 1; - goto next_slot; - } - - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, ins_nr, inode_only, - logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 1; - ins_start_slot = path->slots[0]; -next_slot: - - nritems = btrfs_header_nritems(path->nodes[0]); - path->slots[0]++; - if (path->slots[0] < nritems) { - btrfs_item_key_to_cpu(path->nodes[0], &min_key, - path->slots[0]); - goto again; - } - if (ins_nr) { - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, - ins_nr, inode_only, logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - } - btrfs_release_path(path); -next_key: - if (min_key.offset < (u64)-1) { - min_key.offset++; - } else if (min_key.type < max_key.type) { - min_key.type++; - min_key.offset = 0; - } else { - break; - } - } - if (ins_nr) { - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, ins_nr, inode_only, - logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - } + err = copy_inode_items_to_log(trans, inode, &min_key, &max_key, + path, dst_path, logged_isize, + recursive_logging, inode_only, ctx, + &need_log_inode_item); + if (err) + goto out_unlock; btrfs_release_path(path); btrfs_release_path(dst_path); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3e64f49c394b..db3b17bca11f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4,6 +4,7 @@ */ #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/buffer_head.h> @@ -219,7 +220,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * * global::fs_devs - add, remove, updates to the global list * - * does not protect: manipulation of the fs_devices::devices list! + * does not protect: manipulation of the fs_devices::devices list in general + * but in mount context it could be used to exclude list modifications by eg. + * scan ioctl * * btrfs_device::name - renames (write side), read is RCU * @@ -232,6 +235,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * + * Is not required at mount and close times, because our device list is + * protected by the uuid_mutex at that point. + * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from @@ -778,6 +784,11 @@ static int btrfs_free_stale_devices(const char *path, return ret; } +/* + * This is only used on mount, and we are protected from competing things + * messing with our fs_devices by the uuid_mutex, thus we do not need the + * fs_devices->device_list_mutex here. + */ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, fmode_t flags, void *holder) @@ -1111,17 +1122,25 @@ static noinline struct btrfs_device *device_list_add(const char *path, if (device->bdev != path_bdev) { bdput(path_bdev); mutex_unlock(&fs_devices->device_list_mutex); - btrfs_warn_in_rcu(device->fs_info, - "duplicate device fsid:devid for %pU:%llu old:%s new:%s", - disk_super->fsid, devid, - rcu_str_deref(device->name), path); + /* + * device->fs_info may not be reliable here, so + * pass in a NULL instead. This avoids a + * possible use-after-free when the fs_info and + * fs_info->sb are already torn down. + */ + btrfs_warn_in_rcu(NULL, + "duplicate device %s devid %llu generation %llu scanned by %s (%d)", + path, devid, found_transid, + current->comm, + task_pid_nr(current)); return ERR_PTR(-EEXIST); } bdput(path_bdev); btrfs_info_in_rcu(device->fs_info, - "device fsid %pU devid %llu moved old:%s new:%s", - disk_super->fsid, devid, - rcu_str_deref(device->name), path); + "devid %llu device path %s changed to %s scanned by %s (%d)", + devid, rcu_str_deref(device->name), + path, current->comm, + task_pid_nr(current)); } name = rcu_string_strdup(path, GFP_NOFS); @@ -1223,6 +1242,8 @@ again: &device->dev_state)) { if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state) && + !test_bit(BTRFS_DEV_STATE_MISSING, + &device->dev_state) && (!latest_dev || device->generation > latest_dev->generation)) { latest_dev = device; @@ -1230,22 +1251,13 @@ again: continue; } - if (device->devid == BTRFS_DEV_REPLACE_DEVID) { - /* - * In the first step, keep the device which has - * the correct fsid and the devid that is used - * for the dev_replace procedure. - * In the second step, the dev_replace state is - * read from the device tree and it is known - * whether the procedure is really active or - * not, which means whether this device is - * used or whether it should be removed. - */ - if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, - &device->dev_state)) { - continue; - } - } + /* + * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, + * in btrfs_init_dev_replace() so just continue. + */ + if (device->devid == BTRFS_DEV_REPLACE_DEVID) + continue; + if (device->bdev) { blkdev_put(device->bdev, device->mode); device->bdev = NULL; @@ -1254,9 +1266,6 @@ again: if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { list_del_init(&device->dev_alloc_list); clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); - if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, - &device->dev_state)) - fs_devices->rw_devices--; } list_del_init(&device->dev_list); fs_devices->num_devices--; @@ -1416,8 +1425,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int ret; lockdep_assert_held(&uuid_mutex); + /* + * The device_list_mutex cannot be taken here in case opening the + * underlying device takes further locks like bd_mutex. + * + * We also don't need the lock here as this is called during mount and + * exclusion is provided by uuid_mutex + */ - mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; @@ -1425,7 +1440,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, list_sort(NULL, &fs_devices->devices, devid_cmp); ret = open_fs_devices(fs_devices, flags, holder); } - mutex_unlock(&fs_devices->device_list_mutex); return ret; } @@ -2188,6 +2202,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, mutex_unlock(&uuid_mutex); ret = btrfs_shrink_device(device, 0); + if (!ret) + btrfs_reada_remove_dev(device); mutex_lock(&uuid_mutex); if (ret) goto error_undo; @@ -2274,6 +2290,7 @@ out: return ret; error_undo: + btrfs_reada_undo_remove_dev(device); if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { mutex_lock(&fs_info->chunk_mutex); list_add(&device->dev_alloc_list, @@ -2705,9 +2722,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path btrfs_set_super_num_devices(fs_info->super_copy, orig_super_num_devices + 1); - /* add sysfs device entry */ - btrfs_sysfs_add_device_link(fs_devices, device); - /* * we've got more storage, clear any full flags on the space * infos @@ -2715,6 +2729,10 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path btrfs_clear_space_info_full(fs_info); mutex_unlock(&fs_info->chunk_mutex); + + /* Add sysfs device entry */ + btrfs_sysfs_add_device_link(fs_devices, device); + mutex_unlock(&fs_devices->device_list_mutex); if (seeding_dev) { @@ -2769,8 +2787,18 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path ret = btrfs_commit_transaction(trans); } - /* Update ctime/mtime for libblkid */ + /* + * Now that we have written a new super block to this device, check all + * other fs_devices list if device_path alienates any other scanned + * device. + * We can ignore the return value as it typically returns -EINVAL and + * only succeeds if the device was an alien. + */ + btrfs_forget_devices(device_path); + + /* Update ctime/mtime for blkid or udev */ update_dev_time(device_path); + return ret; error_sysfs: @@ -3271,7 +3299,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info, if (!path) return -ENOMEM; - trans = btrfs_start_transaction(root, 0); + trans = btrfs_start_transaction_fallback_global_rsv(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); @@ -4234,7 +4262,22 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, mutex_lock(&fs_info->balance_mutex); if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) btrfs_info(fs_info, "balance: paused"); - else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req)) + /* + * Balance can be canceled by: + * + * - Regular cancel request + * Then ret == -ECANCELED and balance_cancel_req > 0 + * + * - Fatal signal to "btrfs" process + * Either the signal caught by wait_reserve_ticket() and callers + * got -EINTR, or caught by btrfs_should_cancel_balance() and + * got -ECANCELED. + * Either way, in this case balance_cancel_req = 0, and + * ret == -EINTR or ret == -ECANCELED. + * + * So here we only check the return value to catch canceled balance. + */ + else if (ret == -ECANCELED || ret == -EINTR) btrfs_info(fs_info, "balance: canceled"); else btrfs_info(fs_info, "balance: ended with status: %d", ret); @@ -4366,6 +4409,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info) btrfs_warn(fs_info, "balance: cannot set exclusive op status, resume manually"); + btrfs_release_path(path); + mutex_lock(&fs_info->balance_mutex); BUG_ON(fs_info->balance_ctl); spin_lock(&fs_info->balance_lock); @@ -4526,6 +4571,7 @@ static int btrfs_uuid_scan_kthread(void *data) goto skip; } update_tree: + btrfs_release_path(path); if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.uuid, BTRFS_UUID_KEY_SUBVOL, @@ -4550,6 +4596,7 @@ update_tree: } skip: + btrfs_release_path(path); if (trans) { ret = btrfs_end_transaction(trans); trans = NULL; @@ -4557,7 +4604,6 @@ skip: break; } - btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { @@ -4864,6 +4910,10 @@ again: } mutex_lock(&fs_info->chunk_mutex); + /* Clear all state bits beyond the shrunk device size */ + clear_extent_bits(&device->alloc_state, new_size, (u64)-1, + CHUNK_STATE_MASK); + btrfs_device_set_disk_total_bytes(device, new_size); if (list_empty(&device->post_commit_list)) list_add_tail(&device->post_commit_list, @@ -5632,12 +5682,13 @@ void btrfs_put_bbio(struct btrfs_bio *bbio) * replace. */ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, - u64 logical, u64 length, + u64 logical, u64 *length_ret, struct btrfs_bio **bbio_ret) { struct extent_map *em; struct map_lookup *map; struct btrfs_bio *bbio; + u64 length = *length_ret; u64 offset; u64 stripe_nr; u64 stripe_nr_end; @@ -5670,7 +5721,8 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, } offset = logical - em->start; - length = min_t(u64, em->len - offset, length); + length = min_t(u64, em->start + em->len - logical, length); + *length_ret = length; stripe_len = map->stripe_len; /* @@ -6085,7 +6137,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, if (op == BTRFS_MAP_DISCARD) return __btrfs_map_block_for_discard(fs_info, logical, - *length, bbio_ret); + length, bbio_ret); ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom); if (ret < 0) @@ -6665,8 +6717,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; + unsigned int nofs_flag; + /* + * We call this under the chunk_mutex, so we want to use NOFS for this + * allocation, however we don't want to change btrfs_alloc_device() to + * always do NOFS because we use it in a lot of other GFP_KERNEL safe + * places. + */ + nofs_flag = memalloc_nofs_save(); device = btrfs_alloc_device(NULL, &devid, dev_uuid); + memalloc_nofs_restore(nofs_flag); if (IS_ERR(device)) return device; @@ -7255,7 +7316,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) * otherwise we don't need it. */ mutex_lock(&uuid_mutex); - mutex_lock(&fs_info->chunk_mutex); + + /* + * It is possible for mount and umount to race in such a way that + * we execute this code path, but open_fs_devices failed to clear + * total_rw_bytes. We certainly want it cleared before reading the + * device items, so clear it here. + */ + fs_info->fs_devices->total_rw_bytes = 0; /* * Read all device items, and then all the chunk items. All @@ -7292,7 +7360,9 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); + mutex_lock(&fs_info->chunk_mutex); ret = read_one_chunk(&found_key, leaf, chunk); + mutex_unlock(&fs_info->chunk_mutex); if (ret) goto error; } @@ -7322,7 +7392,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) } ret = 0; error: - mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&uuid_mutex); btrfs_free_path(path); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 5acf5c507ec2..aa6a6d7b2978 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -56,6 +56,7 @@ struct btrfs_io_geometry { #define BTRFS_DEV_STATE_MISSING (2) #define BTRFS_DEV_STATE_REPLACE_TGT (3) #define BTRFS_DEV_STATE_FLUSH_SENT (4) +#define BTRFS_DEV_STATE_NO_READA (5) struct btrfs_device { struct list_head dev_list; /* device_list_mutex */ diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 95d9aebff2c4..48858510739b 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -227,11 +227,33 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name, { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; + const bool start_trans = (current->journal_info == NULL); int ret; - trans = btrfs_start_transaction(root, 2); - if (IS_ERR(trans)) - return PTR_ERR(trans); + if (start_trans) { + /* + * 1 unit for inserting/updating/deleting the xattr + * 1 unit for the inode item update + */ + trans = btrfs_start_transaction(root, 2); + if (IS_ERR(trans)) + return PTR_ERR(trans); + } else { + /* + * This can happen when smack is enabled and a directory is being + * created. It happens through d_instantiate_new(), which calls + * smack_d_instantiate(), which in turn calls __vfs_setxattr() to + * set the transmute xattr (XATTR_NAME_SMACKTRANSMUTE) on the + * inode. We have already reserved space for the xattr and inode + * update at btrfs_mkdir(), so just use the transaction handle. + * We don't join or start a transaction, as that will reset the + * block_rsv of the handle and trigger a warning for the start + * case. + */ + ASSERT(strncmp(name, XATTR_SECURITY_PREFIX, + XATTR_SECURITY_PREFIX_LEN) == 0); + trans = current->journal_info; + } ret = btrfs_setxattr(trans, inode, name, value, size, flags); if (ret) @@ -242,7 +264,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name, ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); out: - btrfs_end_transaction(trans); + if (start_trans) + btrfs_end_transaction(trans); return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index 91ceca52d14f..0d7bd7712076 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1337,6 +1337,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) } EXPORT_SYMBOL(__breadahead); +void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, + gfp_t gfp) +{ + struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); + if (likely(bh)) { + ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); + brelse(bh); + } +} +EXPORT_SYMBOL(__breadahead_gfp); + /** * __bread_gfp() - reads a specified block and returns the bh * @bdev: the block_device to read from @@ -2728,16 +2739,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block, /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_SIZE-1); if (page->index >= end_index+1 || !offset) { - /* - * The page may have dirty, unmapped buffers. For example, - * they may have been added in ext3_writepage(). Make them - * freeable here, so the page does not leak. - */ -#if 0 - /* Not really sure about this - do we need this ? */ - if (page->mapping->a_ops->invalidatepage) - page->mapping->a_ops->invalidatepage(page, offset); -#endif unlock_page(page); return 0; /* don't care */ } @@ -2932,12 +2933,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block, /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_SIZE-1); if (page->index >= end_index+1 || !offset) { - /* - * The page may have dirty, unmapped buffers. For example, - * they may have been added in ext3_writepage(). Make them - * freeable here, so the page does not leak. - */ - do_invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return 0; /* don't care */ } @@ -3159,6 +3154,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { + /* + * The bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(bh)) { + unlock_buffer(bh); + return -EIO; + } + get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(REQ_OP_WRITE, op_flags, bh); diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 44a3ce1e4ce4..bd5fe8d00d00 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -60,9 +60,9 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, object = container_of(op->op.object, struct cachefiles_object, fscache); spin_lock(&object->work_lock); list_add_tail(&monitor->op_link, &op->to_do); + fscache_enqueue_retrieval(op); spin_unlock(&object->work_lock); - fscache_enqueue_retrieval(op); fscache_put_retrieval(op); return 0; } @@ -121,7 +121,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object, _debug("reissue read"); ret = bmapping->a_ops->readpage(NULL, backpage); if (ret < 0) - goto unlock_discard; + goto discard; } /* but the page may have been read before the monitor was installed, so @@ -138,6 +138,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object, unlock_discard: unlock_page(backpage); +discard: spin_lock_irq(&object->work_lock); list_del(&monitor->op_link); spin_unlock_irq(&object->work_lock); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 7ab616601141..a02e845eb0fb 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1427,7 +1427,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; struct page *pinned_page = NULL; - loff_t off = vmf->pgoff << PAGE_SHIFT; + loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; int want, got, err; sigset_t oldset; vm_fault_t ret = VM_FAULT_SIGBUS; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f5a38910a82b..22833fa5bb58 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1052,12 +1052,19 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) { struct ceph_mds_session *session = cap->session; struct ceph_inode_info *ci = cap->ci; - struct ceph_mds_client *mdsc = - ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; + struct ceph_mds_client *mdsc; int removed = 0; + /* 'ci' being NULL means the remove have already occurred */ + if (!ci) { + dout("%s: cap inode is NULL\n", __func__); + return; + } + dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc; + /* remove from inode's cap rbtree, and clear auth cap */ rb_erase(&cap->ci_node, &ci->i_caps); if (ci->i_auth_cap == cap) @@ -1976,8 +1983,12 @@ retry_locked: } /* want more caps from mds? */ - if (want & ~(cap->mds_wanted | cap->issued)) - goto ack; + if (want & ~cap->mds_wanted) { + if (want & ~(cap->mds_wanted | cap->issued)) + goto ack; + if (!__cap_is_valid(cap)) + goto ack; + } /* things we might delay */ if ((cap->issued & ~retain) == 0) @@ -2009,12 +2020,24 @@ ack: if (mutex_trylock(&session->s_mutex) == 0) { dout("inverting session/ino locks on %p\n", session); + session = ceph_get_mds_session(session); spin_unlock(&ci->i_ceph_lock); if (took_snap_rwsem) { up_read(&mdsc->snap_rwsem); took_snap_rwsem = 0; } - mutex_lock(&session->s_mutex); + if (session) { + mutex_lock(&session->s_mutex); + ceph_put_mds_session(session); + } else { + /* + * Because we take the reference while + * holding the i_ceph_lock, it should + * never be NULL. Throw a warning if it + * ever is. + */ + WARN_ON_ONCE(true); + } goto retry; } } @@ -3689,6 +3712,7 @@ retry: WARN_ON(1); tsession = NULL; target = -1; + mutex_lock(&session->s_mutex); } goto retry; @@ -3933,7 +3957,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, __ceph_queue_cap_release(session, cap); spin_unlock(&session->s_cap_lock); } - goto done; + goto flush_cap_releases; } /* these will work even if we don't have a cap yet */ diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 2e4764fd1872..3367a8194f24 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -920,6 +920,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, req->r_num_caps = 2; req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + if (as_ctx.pagelist) { + req->r_pagelist = as_ctx.pagelist; + as_ctx.pagelist = NULL; + } err = ceph_mdsc_do_request(mdsc, dir, req); if (!err && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); diff --git a/fs/ceph/export.c b/fs/ceph/export.c index b6bfa94332c3..e088843a7734 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -172,9 +172,16 @@ struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino) static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) { struct inode *inode = __lookup_inode(sb, ino); + int err; + if (IS_ERR(inode)) return ERR_CAST(inode); - if (inode->i_nlink == 0) { + /* We need LINK caps to reliably check i_nlink */ + err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false); + if (err) + return ERR_PTR(err); + /* -ESTALE if inode as been unlinked and no file is open */ + if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) { iput(inode); return ERR_PTR(-ESTALE); } @@ -315,6 +322,11 @@ static struct dentry *__get_parent(struct super_block *sb, req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err) { + ceph_mdsc_put_request(req); + return ERR_PTR(err); + } + inode = req->r_target_inode; if (inode) ihold(inode); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ce54a1b12819..a10711a6337a 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1260,6 +1260,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); struct page *pinned_page = NULL; + bool direct_lock = iocb->ki_flags & IOCB_DIRECT; ssize_t ret; int want, got = 0; int retry_op = 0, read = 0; @@ -1268,7 +1269,7 @@ again: dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_start_io_direct(inode); else ceph_start_io_read(inode); @@ -1325,7 +1326,7 @@ again: } ceph_put_cap_refs(ci, got); - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_end_io_direct(inode); else ceph_end_io_read(inode); @@ -2197,6 +2198,7 @@ const struct file_operations ceph_file_fops = { .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, + .setlease = simple_nosetlease, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index c07407586ce8..660a878e20ef 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -754,8 +754,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page, info_caps = le32_to_cpu(info->cap.caps); /* prealloc new cap struct */ - if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) + if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) { new_cap = ceph_get_cap(mdsc, caps_reservation); + if (!new_cap) + return -ENOMEM; + } /* * prealloc xattr data, if it looks like we'll need it. only diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 5c84545bb8e8..f03ff5c34255 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3078,8 +3078,7 @@ static void handle_session(struct ceph_mds_session *session, void *end = p + msg->front.iov_len; struct ceph_mds_session_head *h; u32 op; - u64 seq; - unsigned long features = 0; + u64 seq, features = 0; int wake = 0; bool blacklisted = false; @@ -3098,9 +3097,10 @@ static void handle_session(struct ceph_mds_session *session, goto bad; /* version >= 3, feature bits */ ceph_decode_32_safe(&p, end, len, bad); - ceph_decode_need(&p, end, len, bad); - memcpy(&features, p, min_t(size_t, len, sizeof(features))); - p += len; + if (len) { + ceph_decode_64_safe(&p, end, features, bad); + p += len - sizeof(features); + } } mutex_lock(&mdsc->mutex); @@ -4074,6 +4074,9 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); + if (mdsc->stopping) + return; + mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; renew_caps = time_after_eq(jiffies, HZ*renew_interval + @@ -4149,7 +4152,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) return -ENOMEM; } - fsc->mdsc = mdsc; init_completion(&mdsc->safe_umount_waiters); init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); @@ -4201,6 +4203,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) strscpy(mdsc->nodename, utsname()->nodename, sizeof(mdsc->nodename)); + + fsc->mdsc = mdsc; return 0; } @@ -4438,7 +4442,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) { dout("stop\n"); - cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ + /* + * Make sure the delayed work stopped before releasing + * the resources. + * + * Because the cancel_delayed_work_sync() will only + * guarantee that the work finishes executing. But the + * delayed work will re-arm itself again after that. + */ + flush_delayed_work(&mdsc->delayed_work); + if (mdsc->mdsmap) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index de56dee60540..19507e2fdb57 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc, } if (IS_ERR(in)) { - pr_warn("Can't lookup inode %llx (err: %ld)\n", - realm->ino, PTR_ERR(in)); + dout("Can't lookup inode %llx (err: %ld)\n", + realm->ino, PTR_ERR(in)); qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ } else { qri->timeout = 0; diff --git a/fs/cgroupfs/Kconfig b/fs/cgroupfs/Kconfig new file mode 100644 index 000000000000..f93a4643faef --- /dev/null +++ b/fs/cgroupfs/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CGROUPFS + bool "cgroupfs file system support" + default n + help + The cgroupfs is used to present independent resource views inside container, + include cpuinfo, meminfo, stat and so on. diff --git a/fs/cgroupfs/Makefile b/fs/cgroupfs/Makefile new file mode 100644 index 000000000000..9aa265ced398 --- /dev/null +++ b/fs/cgroupfs/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for the cgroupfs pseudo filesystem +# +obj-$(CONFIG_CGROUPFS) += mount.o proc.o sys.o diff --git a/fs/cgroupfs/cgroupfs.h b/fs/cgroupfs/cgroupfs.h new file mode 100644 index 000000000000..90cac3408a7d --- /dev/null +++ b/fs/cgroupfs/cgroupfs.h @@ -0,0 +1,50 @@ +#ifndef __LINUX_CGROUPFS_H +#define __LINUX_CGROUPFS_H +enum cgroupfs_file_type{ + CGROUPFS_TYPE_MEMINFO, + CGROUPFS_TYPE_CPUINFO, + CGROUPFS_TYPE_STAT, + CGROUPFS_TYPE_UPTIME, + CGROUPFS_TYPE_LOADAVG, + CGROUPFS_TYPE_DKSTATS, + CGROUPFS_TYPE_VMSTAT, + CGROUPFS_TYPE_CPUDIR, + CGROUPFS_TYPE_NORMAL_DIR, + CGROUPFS_TYPE_LAST, +}; + +typedef struct cgroupfs_entry { + struct rb_root subdir; + struct rb_node subdir_node; + const char *name; + struct cgroupfs_entry *parent; + struct super_block *sb; + refcount_t refcnt; + u64 inum; + int cgroupfs_type; + int cpu; + umode_t mode; + int namelen; + const struct inode_operations *e_iops; + const struct file_operations *e_fops; + const struct dentry_operations *e_dops; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); +} cgroupfs_entry_t; + +cgroupfs_entry_t *cgroupfs_alloc_private(const char *name, + cgroupfs_entry_t *parent, int cgroupfs_type); +void cgroupfs_set_proc_fops(cgroupfs_entry_t *en); +void cgroupfs_set_sys_dops(cgroupfs_entry_t *en); +struct inode *cgroupfs_get_inode(cgroupfs_entry_t *en); + +static inline void cgroupfs_get_entry(cgroupfs_entry_t *en) +{ + refcount_inc(&en->refcnt); +} + +void cgroupfs_put_entry(cgroupfs_entry_t *en); +int cgroupfs_cpu_dir_filter(int cpu, int *max_cpu, int *counted_cpu, int once); +#endif diff --git a/fs/cgroupfs/mount.c b/fs/cgroupfs/mount.c new file mode 100644 index 000000000000..d3f092ea0c7a --- /dev/null +++ b/fs/cgroupfs/mount.c @@ -0,0 +1,425 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/cgroup.h> +#include <linux/rbtree.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> +#include <linux/err.h> +#include <linux/kernfs.h> +#include "cgroupfs.h" + +#define CGROUPFS_MAGIC 3718152116619 + +static DEFINE_RWLOCK(cgroupfs_subdir_lock); + +cgroupfs_entry_t *cgroupfs_root; + +static int cgroufs_entry_num; + +static void cgroupfs_free_inode(struct inode *inode) +{ + if (S_ISLNK(inode->i_mode)) + kfree(inode->i_link); + free_inode_nonrcu(inode); +} + +static inline void cgroupfs_free_entry(cgroupfs_entry_t *en) +{ + if (!en) + return; + if (en->name) + kfree(en->name); + kfree(en); +} + +void cgroupfs_put_entry(cgroupfs_entry_t *en) +{ + if (refcount_dec_and_test(&en->refcnt)) { + return; + } +} + +static cgroupfs_entry_t *cfs_subdir_first(cgroupfs_entry_t *dir) +{ + return rb_entry_safe(rb_first(&dir->subdir), cgroupfs_entry_t, + subdir_node); +} + +static cgroupfs_entry_t *cfs_subdir_next(cgroupfs_entry_t *dir) +{ + return rb_entry_safe(rb_next(&dir->subdir_node), cgroupfs_entry_t, + subdir_node); +} + +static int cfs_name_match(const char *name, cgroupfs_entry_t *en, unsigned int len) +{ + if (len < en->namelen) + return -1; + if (len > en->namelen) + return 1; + + return memcmp(name, en->name, len); +} + +static cgroupfs_entry_t *cpu_subdir_find(cgroupfs_entry_t *dir, + const char *name, + unsigned int len) +{ + struct rb_node *node = dir->subdir.rb_node; + + while (node) { + cgroupfs_entry_t *en = rb_entry(node, cgroupfs_entry_t, subdir_node); + int result = cfs_name_match(name, en, len); + + if (result < 0) + node = node->rb_left; + else if (result > 0) + node = node->rb_right; + else + return en; + } + return NULL; +} + +static bool cpu_subdir_insert(cgroupfs_entry_t *dir, + cgroupfs_entry_t *en) +{ + struct rb_root *root = &dir->subdir; + struct rb_node **new = &root->rb_node, *parent = NULL; + + write_lock(&cgroupfs_subdir_lock); + while (*new) { + cgroupfs_entry_t *this = rb_entry(*new, cgroupfs_entry_t, subdir_node); + int result = cfs_name_match(en->name, this, en->namelen); + + parent = *new; + if (result < 0) + new = &(*new)->rb_left; + else if (result > 0) + new = &(*new)->rb_right; + else { + write_unlock(&cgroupfs_subdir_lock); + return false; + } + } + + rb_link_node(&en->subdir_node, parent, new); + rb_insert_color(&en->subdir_node, root); + write_unlock(&cgroupfs_subdir_lock); + cgroupfs_get_entry(dir); + cgroufs_entry_num++; + return true; +} + +void cgroupfs_umount_remove_tree(cgroupfs_entry_t *root) +{ + cgroupfs_entry_t *en, *next; + write_lock(&cgroupfs_subdir_lock); + en = root; + while (1) { + write_unlock(&cgroupfs_subdir_lock); + cond_resched(); + write_lock(&cgroupfs_subdir_lock); + next = cfs_subdir_first(en); + if (next) { + if (S_ISREG(next->mode)) { + rb_erase(&next->subdir_node, &en->subdir); + cgroupfs_free_entry(next); + continue; + } + else { + en = next; + continue; + } + } + next = en->parent; + rb_erase(&en->subdir_node, &next->subdir); + cgroupfs_free_entry(en); + + if (en == root) + break; + en = next; + } + write_unlock(&cgroupfs_subdir_lock); +} + +cgroupfs_entry_t *cgroupfs_alloc_private(const char *name, cgroupfs_entry_t *parent, int cgroupfs_type) +{ + cgroupfs_entry_t *p = kmalloc(sizeof(cgroupfs_entry_t), GFP_KERNEL); + if (!p) + return NULL; + memset(p, 0, sizeof(cgroupfs_entry_t)); + p->namelen = strlen(name); + p->name = kmalloc(p->namelen + 1, GFP_KERNEL); + if (!p->name) { + kfree(p); + return NULL; + } + memcpy((void *)(p->name), (void *)name, p->namelen + 1); + p->parent = parent; + p->cgroupfs_type = cgroupfs_type; + return p; +} + +struct dentry *cgroupfs_iop_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +{ + int cpu; + cgroupfs_entry_t *sub, *parent = dir->i_private; + struct inode *inode = NULL; + int counted_cpu = -1, max_cpu = INT_MAX; + read_lock(&cgroupfs_subdir_lock); + sub = cpu_subdir_find(parent, dentry->d_name.name, dentry->d_name.len); + if (!sub) { + read_unlock(&cgroupfs_subdir_lock); + return ERR_PTR(-ENOENT); + } + cgroupfs_get_entry(sub); + read_unlock(&cgroupfs_subdir_lock); + if (parent->cgroupfs_type == CGROUPFS_TYPE_CPUDIR && sub->cgroupfs_type == CGROUPFS_TYPE_CPUDIR) { + cpu = sub->cpu; + if (cgroupfs_cpu_dir_filter(cpu, &max_cpu, &counted_cpu, 1)) + goto out; + } + if (sub->cgroupfs_type == CGROUPFS_TYPE_CPUDIR) { + d_set_d_op(dentry, sub->e_dops); + } + inode = cgroupfs_get_inode(sub); +out: + cgroupfs_put_entry(sub); + if (!inode) + return ERR_PTR(-ENOENT); + return d_splice_alias(inode, dentry); +} + +const struct inode_operations cgroupfs_inode_operations = { + .lookup = cgroupfs_iop_lookup, +}; + +int cgroupfs_readdir(struct file *file, struct dir_context *ctx) +{ + cgroupfs_entry_t *next; + int i, cpu, skip; + int counted_cpu = -1, max_cpu = INT_MAX, filter_cpu = 0; + cgroupfs_entry_t *en; + en = file_inode(file)->i_private; + if (en->cgroupfs_type == CGROUPFS_TYPE_CPUDIR) { + filter_cpu = 1; + } + + if (!dir_emit_dots(file, ctx)) + return 0; + + i = ctx->pos - 2; + read_lock(&cgroupfs_subdir_lock); + en = cfs_subdir_first(en); + for (;;) { + if (!en) { + read_unlock(&cgroupfs_subdir_lock); + return 0; + } + if (!i) + break; + en = cfs_subdir_next(en); + i--; + } + + do { + skip = 0; + cgroupfs_get_entry(en); + read_unlock(&cgroupfs_subdir_lock); + if (filter_cpu && en->cgroupfs_type == CGROUPFS_TYPE_CPUDIR) { + cpu = en->cpu; + if (cgroupfs_cpu_dir_filter(cpu, &max_cpu, &counted_cpu, 0)) { + skip = 1; + } + } + if (!skip && !dir_emit(ctx, en->name, en->namelen, + en->inum, en->mode >> 12)) { + cgroupfs_put_entry(en); + return 0; + } + ctx->pos++; + read_lock(&cgroupfs_subdir_lock); + next = cfs_subdir_next(en); + cgroupfs_put_entry(en); + en = next; + } while (en); + + read_unlock(&cgroupfs_subdir_lock); + return 1; +} + +static const struct file_operations cgroupfs_dir_operations = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .iterate_shared = cgroupfs_readdir, +}; + +struct inode *cgroupfs_get_inode(cgroupfs_entry_t *en) +{ + struct inode *inode; + inode = new_inode(en->sb); + if (!inode) + return NULL; + inode->i_mode = en->mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); + inode->i_op = en->e_iops; + inode->i_fop = en->e_fops; + inode->i_private = en; + return inode; +} + +static inline cgroupfs_entry_t *cgroupfs_new_entry(struct super_block *sb, + const char *name, cgroupfs_entry_t *parent, + int proc_type, umode_t mode) +{ + cgroupfs_entry_t *p; + p = cgroupfs_alloc_private(name, parent, proc_type); + if (!p) + return NULL; + p->sb = sb; + p->e_iops = &cgroupfs_inode_operations; + p->inum = get_next_ino(); + p->mode = mode; + refcount_set(&p->refcnt, 1); + if (S_ISDIR(mode)) { + p->subdir = RB_ROOT; + p->e_fops = &cgroupfs_dir_operations; + if (proc_type == CGROUPFS_TYPE_CPUDIR) + cgroupfs_set_sys_dops(p); + } + if (S_ISREG(mode) && proc_type <= CGROUPFS_TYPE_VMSTAT) + cgroupfs_set_proc_fops(p); + if (parent) + cpu_subdir_insert(parent, p); + return p; +} + +// to be continued +static int cgroupfs_new_cpu_dir(struct super_block *sb, cgroupfs_entry_t *parent, umode_t mode) +{ + int cpu; + char cpustr[20]; + cgroupfs_entry_t *dir; + for_each_online_cpu(cpu) { + sprintf(cpustr, "cpu%d", cpu); + dir = cgroupfs_new_entry(sb, cpustr, parent, CGROUPFS_TYPE_CPUDIR, mode); + if (!dir) + return -1; + dir->cpu = cpu; + } + return 0; +} + + +struct super_operations cgroupfs_super_operations = { + .statfs = simple_statfs, + .drop_inode = generic_delete_inode, + .free_inode = cgroupfs_free_inode, +}; + +static int cgroupfs_fill_root(struct super_block *s, unsigned long magic, cgroupfs_entry_t *en) +{ + struct inode *inode; + struct dentry *root; + + s->s_blocksize = PAGE_SIZE; + s->s_blocksize_bits = PAGE_SHIFT; + s->s_magic = magic; + s->s_op = &cgroupfs_super_operations; + s->s_time_gran = 1; + + inode = new_inode(s); + if (!inode) + return -ENOMEM; + inode->i_ino = en->inum; + inode->i_mode = en->mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); + inode->i_op = en->e_iops; + inode->i_fop = en->e_fops; + inode->i_private = en; + set_nlink(inode, 2); + root = d_make_root(inode); + if (!root) + return -ENOMEM; + s->s_root = root; + return 0; +} + +static int cgroupfs_fill_super (struct super_block *sb, void *data, int silent) +{ + int err = -ENOMEM; + cgroupfs_entry_t *proc, *sys, *entry, *cpu, *root_entry; + umode_t f_mode = S_IFREG | 0644, d_mode = S_IFDIR | 0755; + root_entry = cgroupfs_new_entry(sb, "/", NULL, CGROUPFS_TYPE_NORMAL_DIR, d_mode); + if (!root_entry) + return err; + root_entry->parent = root_entry; + cgroupfs_root = root_entry; + if (cgroupfs_fill_root(sb, CGROUPFS_MAGIC, root_entry)) + return err; + + proc = cgroupfs_new_entry(sb, "proc", root_entry, CGROUPFS_TYPE_NORMAL_DIR, d_mode); + if (!proc) + return err; + sys = cgroupfs_new_entry(sb, "sys", root_entry, CGROUPFS_TYPE_NORMAL_DIR, d_mode); + if (!sys) + return err; + cgroupfs_new_entry(sb, "meminfo", proc, CGROUPFS_TYPE_MEMINFO, f_mode); + cgroupfs_new_entry(sb, "cpuinfo", proc, CGROUPFS_TYPE_CPUINFO, f_mode); + cgroupfs_new_entry(sb, "stat", proc, CGROUPFS_TYPE_STAT, f_mode); + cgroupfs_new_entry(sb, "uptime", proc, CGROUPFS_TYPE_UPTIME, f_mode); + cgroupfs_new_entry(sb, "loadavg", proc, CGROUPFS_TYPE_LOADAVG, f_mode); + cgroupfs_new_entry(sb, "diskstats", proc, CGROUPFS_TYPE_DKSTATS, f_mode); + cgroupfs_new_entry(sb, "vmstat", proc, CGROUPFS_TYPE_VMSTAT, f_mode); + + entry = cgroupfs_new_entry(sb, "devices", sys, CGROUPFS_TYPE_NORMAL_DIR, d_mode); + if (!entry) + return err; + entry = cgroupfs_new_entry(sb, "system", entry, CGROUPFS_TYPE_NORMAL_DIR, d_mode); + if (!entry) + return err; + cpu = cgroupfs_new_entry(sb, "cpu", entry, CGROUPFS_TYPE_CPUDIR, d_mode); + if (!cpu) + return err; + cgroupfs_new_cpu_dir(sb, cpu, d_mode); + return 0; +} + +static struct dentry *cgroupfs_get_super(struct file_system_type *fst, + int flags, const char *devname, void *data) +{ + return mount_single(fst, flags, data, cgroupfs_fill_super); +} + +static void cgroupfs_kill_sb(struct super_block *sb) +{ + kill_anon_super(sb); + cgroupfs_umount_remove_tree(cgroupfs_root); +} + +static struct file_system_type cgroup_fs_type = { + .owner = THIS_MODULE, + .name = "cgroupfs", + .mount = cgroupfs_get_super, + .kill_sb = cgroupfs_kill_sb, +}; + +int __init cgroupfs_init(void) +{ + printk("register cgroupfs\n"); + return register_filesystem(&cgroup_fs_type); +} + +static void __exit cgroupfs_exit(void) +{ + unregister_filesystem(&cgroup_fs_type); +} + +module_init(cgroupfs_init); +module_exit(cgroupfs_exit); diff --git a/fs/cgroupfs/proc.c b/fs/cgroupfs/proc.c new file mode 100644 index 000000000000..9c2ab3cc12ac --- /dev/null +++ b/fs/cgroupfs/proc.c @@ -0,0 +1,67 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/cgroup.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> +#include "cgroupfs.h" + +extern int cpuset_cgroupfs_cpuinfo_show(struct seq_file *m, void *v); +extern int cpuset_cgroupfs_stat_show(struct seq_file *m, void *v); +extern int mem_cgroupfs_meminfo_show(struct seq_file *m, void *v); +extern int cpuacct_cgroupfs_uptime_show(struct seq_file *m, void *v); +extern int cpuset_cgroupfs_loadavg_show(struct seq_file *m, void *v); +extern int blkcg_cgroupfs_dkstats_show(struct seq_file *m, void *v); +extern int mem_cgroupfs_vmstat_show(struct seq_file *m, void *v); +extern int blkcg_cgroupfs_dkstats_show(struct seq_file *m, void *v); + +static int cgroup_fs_show(struct seq_file *m, void *v) +{ + cgroupfs_entry_t *private = m->private; + switch (private->cgroupfs_type) { + case CGROUPFS_TYPE_MEMINFO: + return mem_cgroupfs_meminfo_show(m, v); + case CGROUPFS_TYPE_CPUINFO: + return cpuset_cgroupfs_cpuinfo_show(m, v); + case CGROUPFS_TYPE_STAT: + return cpuset_cgroupfs_stat_show(m, v); + case CGROUPFS_TYPE_UPTIME: + return cpuacct_cgroupfs_uptime_show(m, v); + case CGROUPFS_TYPE_LOADAVG: + return cpuset_cgroupfs_loadavg_show(m, v); + case CGROUPFS_TYPE_DKSTATS: + return blkcg_cgroupfs_dkstats_show(m, v); + case CGROUPFS_TYPE_VMSTAT: + return mem_cgroupfs_vmstat_show(m, v); + default: + break; + } + return 0; +} + +static int cgroupfs_seq_open(struct inode *inode, struct file *file) +{ + void *private = inode->i_private; + return single_open(file, cgroup_fs_show, private); +} + +static int cgroupfs_seq_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static struct file_operations cgroupfs_file_ops = { + .open = cgroupfs_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = cgroupfs_seq_release, +}; + +void cgroupfs_set_proc_fops(cgroupfs_entry_t *en) +{ + en->e_fops = &cgroupfs_file_ops; +}; diff --git a/fs/cgroupfs/sys.c b/fs/cgroupfs/sys.c new file mode 100644 index 000000000000..ecf1c398d002 --- /dev/null +++ b/fs/cgroupfs/sys.c @@ -0,0 +1,35 @@ +#include <linux/fs.h> +#include <linux/fs_context.h> +#include "cgroupfs.h" + +extern int cpu_get_max_cpus(struct task_struct *p); +extern int cpuset_cgroups_cpu_allowed(struct task_struct *task, int cpu, int once); + +static int cgroupfs_dop_revalidate(struct dentry *dentry, unsigned int flags) +{ + return 0; +} + +int cgroupfs_cpu_dir_filter(int cpu, int *max_cpu, int *counted_cpu, int once) +{ + if (!once && *counted_cpu == -1) { + *max_cpu = cpu_get_max_cpus(current); + *counted_cpu = 0; + } + if (!once && *counted_cpu >= *max_cpu) + return 1; + if (!cpuset_cgroups_cpu_allowed(current, cpu, once)) + return 1; + *counted_cpu += 1; + return 0; +} + +const struct dentry_operations cgroupfs_dir_dops = { + .d_revalidate = cgroupfs_dop_revalidate, + .d_delete = always_delete_dentry, +}; + +void cgroupfs_set_sys_dops(cgroupfs_entry_t *en) +{ + en->e_dops = &cgroupfs_dir_dops; +} diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index 689162e2e175..3150c19cdc2f 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c @@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, return 0; } else if ((cls != ASN1_CTX) || (con != ASN1_CON) || (tag != ASN1_EOC)) { - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", - cls, con, tag, end, *end); + cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", + cls, con, tag, end); return 0; } @@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, return 0; } else if ((cls != ASN1_UNI) || (con != ASN1_CON) || (tag != ASN1_SEQ)) { - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", - cls, con, tag, end, *end); + cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n", + cls, con, tag, end); return 0; } @@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, return 0; } else if ((cls != ASN1_CTX) || (con != ASN1_CON) || (tag != ASN1_EOC)) { - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", - cls, con, tag, end, *end); + cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", + cls, con, tag, end); return 0; } @@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, return 0; } else if ((cls != ASN1_UNI) || (con != ASN1_CON) || (tag != ASN1_SEQ)) { - cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", - cls, con, tag, end, *end); + cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n", + cls, con, tag, sequence_end); return 0; } diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 498777d859eb..9bd03a231032 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -488,7 +488,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, else if (map_chars == SFM_MAP_UNI_RSVD) { bool end_of_string; - if (i == srclen - 1) + /** + * Remap spaces and periods found at the end of every + * component of the path. The special cases of '.' and + * '..' do not need to be dealt with explicitly because + * they are addressed in namei.c:link_path_walk(). + **/ + if ((i == srclen - 1) || (source[i+1] == '\\')) end_of_string = true; else end_of_string = false; diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 1619af216677..1f55072aa302 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -1198,6 +1198,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); } else if (mode_from_special_sid) { rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true); + kfree(pntsd); } else { /* get approximated mode from ACL */ rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 115f063497ff..41b3c5fc958c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -278,7 +278,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) rc = server->ops->queryfs(xid, tcon, buf); free_xid(xid); - return 0; + return rc; } static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f9cbdfc1591b..b16c994414ab 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -268,8 +268,9 @@ struct smb_version_operations { int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); - void (*downgrade_oplock)(struct TCP_Server_Info *, - struct cifsInodeInfo *, bool); + void (*downgrade_oplock)(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); /* process transaction2 response */ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, char *, int); @@ -1261,6 +1262,8 @@ struct cifsFileInfo { unsigned int f_flags; bool invalidHandle:1; /* file closed via session abend */ bool oplock_break_cancelled:1; + unsigned int oplock_epoch; /* epoch from the lease break */ + __u32 oplock_level; /* oplock/lease level from the lease break */ int count; spinlock_t file_info_lock; /* protects four flag/count fields above */ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ @@ -1408,7 +1411,7 @@ struct cifsInodeInfo { unsigned int epoch; /* used to track lease state changes */ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ -#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ +#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index c8494fa5e19d..4b8632eda2bd 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -2135,8 +2135,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata) } } + kref_put(&wdata2->refcount, cifs_writedata_release); if (rc) { - kref_put(&wdata2->refcount, cifs_writedata_release); if (is_retryable_error(rc)) continue; i += nr_pages; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index bcda48c03882..ab9eeb5ff8e5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -371,8 +371,10 @@ static int reconn_set_ipaddr(struct TCP_Server_Info *server) return rc; } + spin_lock(&cifs_tcp_ses_lock); rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, strlen(ipaddr)); + spin_unlock(&cifs_tcp_ses_lock); kfree(ipaddr); return !rc ? -1 : 0; @@ -612,26 +614,26 @@ cifs_reconnect(struct TCP_Server_Info *server) try_to_freeze(); mutex_lock(&server->srv_mutex); +#ifdef CONFIG_CIFS_DFS_UPCALL /* * Set up next DFS target server (if any) for reconnect. If DFS * feature is disabled, then we will retry last server we * connected to before. */ + reconn_inval_dfs_target(server, cifs_sb, &tgt_list, &tgt_it); +#endif + rc = reconn_set_ipaddr(server); + if (rc) { + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", + __func__, rc); + } + if (cifs_rdma_enabled(server)) rc = smbd_reconnect(server); else rc = generic_ip_connect(server); if (rc) { cifs_dbg(FYI, "reconnect error %d\n", rc); -#ifdef CONFIG_CIFS_DFS_UPCALL - reconn_inval_dfs_target(server, cifs_sb, &tgt_list, - &tgt_it); -#endif - rc = reconn_set_ipaddr(server); - if (rc) { - cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", - __func__, rc); - } mutex_unlock(&server->srv_mutex); msleep(3000); } else { @@ -973,6 +975,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); + cancel_delayed_work_sync(&server->echo); + spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); @@ -3360,6 +3364,10 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &ses->tcon_list) { tcon = list_entry(tmp, struct cifs_tcon, tcon_list); +#ifdef CONFIG_CIFS_DFS_UPCALL + if (tcon->dfs_path) + continue; +#endif if (!match_tcon(tcon, volume_info)) continue; ++tcon->tc_count; @@ -5275,9 +5283,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) vol_info->nocase = master_tcon->nocase; vol_info->nohandlecache = master_tcon->nohandlecache; vol_info->local_lease = master_tcon->local_lease; + vol_info->no_lease = master_tcon->no_lease; + vol_info->resilient = master_tcon->use_resilient; + vol_info->persistent = master_tcon->use_persistent; + vol_info->handle_timeout = master_tcon->handle_timeout; vol_info->no_linux_ext = !master_tcon->unix_ext; + vol_info->linux_ext = master_tcon->posix_extensions; vol_info->sectype = master_tcon->ses->sectype; vol_info->sign = master_tcon->ses->sign; + vol_info->seal = master_tcon->seal; rc = cifs_set_vol_auth(vol_info, master_tcon->ses); if (rc) { @@ -5303,10 +5317,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) goto out; } - /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ - if (tcon->posix_extensions) - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; - if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, vol_info); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 5a35850ccb1a..9ae9a514676c 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -738,6 +738,7 @@ static int cifs_d_revalidate(struct dentry *direntry, unsigned int flags) { struct inode *inode; + int rc; if (flags & LOOKUP_RCU) return -ECHILD; @@ -747,8 +748,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) CIFS_I(inode)->time = 0; /* force reval */ - if (cifs_revalidate_dentry(direntry)) - return 0; + rc = cifs_revalidate_dentry(direntry); + if (rc) { + cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); + switch (rc) { + case -ENOENT: + case -ESTALE: + /* + * Those errors mean the dentry is invalid + * (file was deleted or recreated) + */ + return 0; + default: + /* + * Otherwise some unexpected error happened + * report it as-is to VFS layer + */ + return rc; + } + } else { /* * If the inode wasn't known to be a dfs entry when diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 35c55cf38a35..1aac8d38f887 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -164,6 +164,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, goto posix_open_ret; } } else { + cifs_revalidate_mapping(*pinode); cifs_fattr_to_inode(*pinode, &fattr); } @@ -3778,7 +3779,7 @@ again: if (rc == -ENODATA) rc = 0; - ctx->rc = (rc == 0) ? ctx->total_len : rc; + ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc; mutex_unlock(&ctx->aio_mutex); @@ -3997,7 +3998,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) * than it negotiated since it will refuse the read * then. */ - if ((tcon->ses) && !(tcon->ses->capabilities & + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)) { current_read_size = min_t(uint, current_read_size, CIFSMaxBufSize); @@ -4269,7 +4270,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; __SetPageLocked(page); - if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { + rc = add_to_page_cache_locked(page, mapping, page->index, gfp); + if (rc) { __ClearPageLocked(page); break; } @@ -4285,6 +4287,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { int rc; + int err = 0; struct list_head tmplist; struct cifsFileInfo *open_file = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); @@ -4329,7 +4332,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * the order of declining indexes. When we put the pages in * the rdata->pages, then we want them in increasing order. */ - while (!list_empty(page_list)) { + while (!list_empty(page_list) && !err) { unsigned int i, nr_pages, bytes, rsize; loff_t offset; struct page *page, *tpage; @@ -4362,9 +4365,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, return 0; } - rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, + nr_pages = 0; + err = readpages_get_pages(mapping, page_list, rsize, &tmplist, &nr_pages, &offset, &bytes); - if (rc) { + if (!nr_pages) { add_credits_and_wake_if(server, credits, 0); break; } @@ -4675,12 +4679,13 @@ void cifs_oplock_break(struct work_struct *work) struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; + bool purge_cache = false; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); - server->ops->downgrade_oplock(server, cinode, - test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, + cfile->oplock_epoch, &purge_cache); if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && cifs_has_mand_locks(cinode)) { @@ -4695,18 +4700,21 @@ void cifs_oplock_break(struct work_struct *work) else break_lease(inode, O_WRONLY); rc = filemap_fdatawrite(inode->i_mapping); - if (!CIFS_CACHE_READ(cinode)) { + if (!CIFS_CACHE_READ(cinode) || purge_cache) { rc = filemap_fdatawait(inode->i_mapping); mapping_set_error(inode->i_mapping, rc); cifs_zap_mapping(inode); } cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); + if (CIFS_CACHE_WRITE(cinode)) + goto oplock_break_ack; } rc = cifs_push_locks(cfile); if (rc) cifs_dbg(VFS, "Push locks rc = %d\n", rc); +oplock_break_ack: /* * releasing stale oplock after recent reconnect of smb session using * a now incorrect file handle is not a data integrity issue but do diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 7c5e983fe385..fd9e289f3e72 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -898,6 +898,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n", __func__, rc); @@ -906,6 +908,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n", __func__, rc); @@ -2264,6 +2268,15 @@ set_size_out: if (rc == 0) { cifsInode->server_eof = attrs->ia_size; cifs_setsize(inode, attrs->ia_size); + + /* + * The man page of truncate says if the size changed, + * then the st_ctime and st_mtime fields for the file + * are updated. + */ + attrs->ia_ctime = attrs->ia_mtime = current_time(inode); + attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME; + cifs_truncate_page(inode->i_mapping, inode->i_size); } @@ -2454,25 +2467,26 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) /* * Attempt to flush data before changing attributes. We need to do - * this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the - * ownership or mode then we may also need to do this. Here, we take - * the safe way out and just do the flush on all setattr requests. If - * the flush returns error, store it to report later and continue. + * this for ATTR_SIZE and ATTR_MTIME. If the flush of the data + * returns error, store it to report later and continue. * * BB: This should be smarter. Why bother flushing pages that * will be truncated anyway? Also, should we error out here if - * the flush returns error? + * the flush returns error? Do we need to check for ATTR_MTIME_SET flag? */ - rc = filemap_write_and_wait(inode->i_mapping); - if (is_interrupt_error(rc)) { - rc = -ERESTARTSYS; - goto cifs_setattr_exit; + if (attrs->ia_valid & (ATTR_MTIME | ATTR_SIZE | ATTR_CTIME)) { + rc = filemap_write_and_wait(inode->i_mapping); + if (is_interrupt_error(rc)) { + rc = -ERESTARTSYS; + goto cifs_setattr_exit; + } + mapping_set_error(inode->i_mapping, rc); } - mapping_set_error(inode->i_mapping, rc); rc = 0; - if (attrs->ia_valid & ATTR_MTIME) { + if ((attrs->ia_valid & ATTR_MTIME) && + !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile); if (!rc) { tcon = tlink_tcon(wfile->tlink); @@ -2600,13 +2614,18 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); + int rc, retries = 0; - if (pTcon->unix_ext) - return cifs_setattr_unix(direntry, attrs); - - return cifs_setattr_nounix(direntry, attrs); + do { + if (pTcon->unix_ext) + rc = cifs_setattr_unix(direntry, attrs); + else + rc = cifs_setattr_nounix(direntry, attrs); + retries++; + } while (is_retryable_error(rc) && retries < 2); /* BB: add cifs_setattr_legacy for really old servers */ + return rc; } #if 0 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 5ad83bdb9bea..40ca394fd5de 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -488,21 +488,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &pCifsInode->flags); - /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (pSMB->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - - cifs_queue_oplock_break(netfile); + netfile->oplock_epoch = 0; + netfile->oplock_level = pSMB->OplockLevel; netfile->oplock_break_cancelled = false; + cifs_queue_oplock_break(netfile); spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 195766221a7a..e523c05a4487 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -369,12 +369,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) static void cifs_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - cifs_set_oplock_level(cinode, OPLOCK_READ); - else - cifs_set_oplock_level(cinode, 0); + cifs_set_oplock_level(cinode, oplock); } static bool diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 14265b4bbcc0..7177720e822e 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work) kfree(lw); } -static bool -smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, - struct smb2_lease_break_work *lw) +static void +smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key, + __le32 new_lease_state) +{ + struct smb2_lease_break_work *lw; + + lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); + if (!lw) { + cifs_put_tlink(tlink); + return; + } + + INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); + lw->tlink = tlink; + lw->lease_state = new_lease_state; + memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE); + queue_work(cifsiod_wq, &lw->lease_break); +} + +static bool +smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) { - bool found; __u8 lease_state; struct list_head *tmp; struct cifsFileInfo *cfile; - struct cifs_pending_open *open; struct cifsInodeInfo *cinode; int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); @@ -534,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, cifs_dbg(FYI, "found in the open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state); if (ack_req) cfile->oplock_break_cancelled = false; @@ -543,40 +559,38 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); - /* - * Set or clear flags depending on the lease state being READ. - * HANDLE caching flag should be added when the client starts - * to defer closing remote file handles with HANDLE leases. - */ - if (lease_state & SMB2_LEASE_READ_CACHING_HE) - set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); + cfile->oplock_level = lease_state; cifs_queue_oplock_break(cfile); - kfree(lw); return true; } - found = false; + return false; +} + +static struct cifs_pending_open * +smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, + struct smb2_lease_break *rsp) +{ + __u8 lease_state = le32_to_cpu(rsp->NewLeaseState); + int ack_req = le32_to_cpu(rsp->Flags & + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); + struct cifs_pending_open *open; + struct cifs_pending_open *found = NULL; + list_for_each_entry(open, &tcon->pending_opens, olist) { if (memcmp(open->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; if (!found && ack_req) { - found = true; - memcpy(lw->lease_key, open->lease_key, - SMB2_LEASE_KEY_SIZE); - lw->tlink = cifs_get_tlink(open->tlink); - queue_work(cifsiod_wq, &lw->lease_break); + found = open; } cifs_dbg(FYI, "found in the pending open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state); open->oplock = lease_state; } @@ -592,14 +606,7 @@ smb2_is_valid_lease_break(char *buffer) struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; - struct smb2_lease_break_work *lw; - - lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); - if (!lw) - return false; - - INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); - lw->lease_state = rsp->NewLeaseState; + struct cifs_pending_open *open; cifs_dbg(FYI, "Checking for lease break\n"); @@ -617,11 +624,27 @@ smb2_is_valid_lease_break(char *buffer) spin_lock(&tcon->open_file_lock); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); - if (smb2_tcon_has_lease(tcon, rsp, lw)) { + if (smb2_tcon_has_lease(tcon, rsp)) { spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } + open = smb2_tcon_find_pending_open_lease(tcon, + rsp); + if (open) { + __u8 lease_key[SMB2_LEASE_KEY_SIZE]; + struct tcon_link *tlink; + + tlink = cifs_get_tlink(open->tlink); + memcpy(lease_key, open->lease_key, + SMB2_LEASE_KEY_SIZE); + spin_unlock(&tcon->open_file_lock); + spin_unlock(&cifs_tcp_ses_lock); + smb2_queue_pending_open_break(tlink, + lease_key, + rsp->NewLeaseState); + return true; + } spin_unlock(&tcon->open_file_lock); if (tcon->crfid.is_valid && @@ -639,7 +662,6 @@ smb2_is_valid_lease_break(char *buffer) } } spin_unlock(&cifs_tcp_ses_lock); - kfree(lw); cifs_dbg(FYI, "Can not process lease break - no lease matched\n"); return false; } @@ -701,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); - /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (rsp->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = 0; + cfile->oplock_level = rsp->OplockLevel; + spin_unlock(&cfile->file_info_lock); cifs_queue_oplock_break(cfile); @@ -725,8 +738,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) } } spin_unlock(&cifs_tcp_ses_lock); - cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); - return false; + cifs_dbg(FYI, "No file id matched, oplock break ignored\n"); + return true; } void diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 76eacffb24d8..997a106f28d3 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -259,7 +259,7 @@ smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) } static struct mid_q_entry * -smb2_find_mid(struct TCP_Server_Info *server, char *buf) +__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue) { struct mid_q_entry *mid; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf; @@ -276,6 +276,10 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) (mid->mid_state == MID_REQUEST_SUBMITTED) && (mid->command == shdr->Command)) { kref_get(&mid->refcount); + if (dequeue) { + list_del_init(&mid->qhead); + mid->mid_flags |= MID_DELETED; + } spin_unlock(&GlobalMid_Lock); return mid; } @@ -284,6 +288,18 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) return NULL; } +static struct mid_q_entry * +smb2_find_mid(struct TCP_Server_Info *server, char *buf) +{ + return __smb2_find_mid(server, buf, false); +} + +static struct mid_q_entry * +smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf) +{ + return __smb2_find_mid(server, buf, true); +} + static void smb2_dump_detail(void *buf, struct TCP_Server_Info *server) { @@ -462,7 +478,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, goto out; } - if (bytes_left || p->Next) + /* Azure rounds the buffer size up 8, to a 16 byte boundary */ + if ((bytes_left > 8) || p->Next) cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); @@ -664,6 +681,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; + if (!server->ops->new_lease_key) + return -EIO; + + server->ops->new_lease_key(pfid); + memset(rqst, 0, sizeof(rqst)); resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; memset(rsp_iov, 0, sizeof(rsp_iov)); @@ -731,6 +753,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) /* close extra handle outside of crit sec */ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } + rc = 0; goto oshr_free; } @@ -1173,7 +1196,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, rqst[1].rq_iov = si_iov; rqst[1].rq_nvec = 1; - len = sizeof(ea) + ea_name_len + ea_value_len + 1; + len = sizeof(*ea) + ea_name_len + ea_value_len + 1; ea = kzalloc(len, GFP_KERNEL); if (ea == NULL) { rc = -ENOMEM; @@ -1559,35 +1582,32 @@ smb2_ioctl_query_info(const unsigned int xid, if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length) qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); if (qi.input_buffer_length > 0 && - le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) { - rc = -EFAULT; - goto iqinf_exit; - } - if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) { - rc = -EFAULT; - goto iqinf_exit; - } + le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length + > rsp_iov[1].iov_len) + goto e_fault; + + if (copy_to_user(&pqi->input_buffer_length, + &qi.input_buffer_length, + sizeof(qi.input_buffer_length))) + goto e_fault; + if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), - qi.input_buffer_length)) { - rc = -EFAULT; - goto iqinf_exit; - } + qi.input_buffer_length)) + goto e_fault; } else { pqi = (struct smb_query_info __user *)arg; qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length) qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); - if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) { - rc = -EFAULT; - goto iqinf_exit; - } - if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) { - rc = -EFAULT; - goto iqinf_exit; - } + if (copy_to_user(&pqi->input_buffer_length, + &qi.input_buffer_length, + sizeof(qi.input_buffer_length))) + goto e_fault; + + if (copy_to_user(pqi + 1, qi_rsp->Buffer, + qi.input_buffer_length)) + goto e_fault; } iqinf_exit: @@ -1603,6 +1623,10 @@ smb2_ioctl_query_info(const unsigned int xid, free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); return rc; + +e_fault: + rc = -EFAULT; + goto iqinf_exit; } static ssize_t @@ -2964,6 +2988,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, ses->Suid, offset, len); + /* + * We zero the range through ioctl, so we need remove the page caches + * first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) @@ -3030,6 +3059,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, return rc; } + /* + * We implement the punch hole through ioctl, so we need remove the page + * caches first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); + cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); fsctl_buf.FileOffset = cpu_to_le64(offset); @@ -3314,22 +3349,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, static void smb2_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, - 0, NULL); - else - server->ops->set_oplock_level(cinode, 0, 0, NULL); + server->ops->set_oplock_level(cinode, oplock, 0, NULL); } static void -smb21_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) +smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); + +static void +smb3_downgrade_oplock(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - server->ops->set_oplock_level(cinode, - set_level2 ? SMB2_LEASE_READ_CACHING_HE : - 0, 0, NULL); + unsigned int old_state = cinode->oplock; + unsigned int old_epoch = cinode->epoch; + unsigned int new_state; + + if (epoch > old_epoch) { + smb21_set_oplock_level(cinode, oplock, 0, NULL); + cinode->epoch = epoch; + } + + new_state = cinode->oplock; + *purge_cache = false; + + if ((old_state & CIFS_CACHE_READ_FLG) != 0 && + (new_state & CIFS_CACHE_READ_FLG) == 0) + *purge_cache = true; + else if (old_state == new_state && (epoch - old_epoch > 1)) + *purge_cache = true; } static void @@ -3642,7 +3693,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) } spin_unlock(&cifs_tcp_ses_lock); - return 1; + return -EAGAIN; } /* * Encrypt or decrypt @rqst message. @rqst[0] has the following format: @@ -3673,7 +3724,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, if (rc) { cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__, enc ? "en" : "de"); - return 0; + return rc; } rc = smb3_crypto_aead_allocate(server); @@ -3852,7 +3903,8 @@ smb3_is_transform_hdr(void *buf) static int decrypt_raw_data(struct TCP_Server_Info *server, char *buf, unsigned int buf_data_size, struct page **pages, - unsigned int npages, unsigned int page_data_size) + unsigned int npages, unsigned int page_data_size, + bool is_offloaded) { struct kvec iov[2]; struct smb_rqst rqst = {NULL}; @@ -3878,7 +3930,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf, memmove(buf, iov[1].iov_base, buf_data_size); - server->total_read = buf_data_size + page_data_size; + if (!is_offloaded) + server->total_read = buf_data_size + page_data_size; return rc; } @@ -3943,7 +3996,8 @@ init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size, static int handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, char *buf, unsigned int buf_len, struct page **pages, - unsigned int npages, unsigned int page_data_size) + unsigned int npages, unsigned int page_data_size, + bool is_offloaded) { unsigned int data_offset; unsigned int data_len; @@ -3965,7 +4019,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, if (server->ops->is_session_expired && server->ops->is_session_expired(buf)) { - cifs_reconnect(server); + if (!is_offloaded) + cifs_reconnect(server); wake_up(&server->response_q); return -1; } @@ -3990,7 +4045,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, cifs_dbg(FYI, "%s: server returned error %d\n", __func__, rdata->result); /* normal error on read response */ - dequeue_mid(mid, false); + if (is_offloaded) + mid->mid_state = MID_RESPONSE_RECEIVED; + else + dequeue_mid(mid, false); return 0; } @@ -4014,7 +4072,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", __func__, data_offset); rdata->result = -EIO; - dequeue_mid(mid, rdata->result); + if (is_offloaded) + mid->mid_state = MID_RESPONSE_MALFORMED; + else + dequeue_mid(mid, rdata->result); return 0; } @@ -4030,21 +4091,30 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n", __func__, data_offset); rdata->result = -EIO; - dequeue_mid(mid, rdata->result); + if (is_offloaded) + mid->mid_state = MID_RESPONSE_MALFORMED; + else + dequeue_mid(mid, rdata->result); return 0; } if (data_len > page_data_size - pad_len) { /* data_len is corrupt -- discard frame */ rdata->result = -EIO; - dequeue_mid(mid, rdata->result); + if (is_offloaded) + mid->mid_state = MID_RESPONSE_MALFORMED; + else + dequeue_mid(mid, rdata->result); return 0; } rdata->result = init_read_bvec(pages, npages, page_data_size, cur_off, &bvec); if (rdata->result != 0) { - dequeue_mid(mid, rdata->result); + if (is_offloaded) + mid->mid_state = MID_RESPONSE_MALFORMED; + else + dequeue_mid(mid, rdata->result); return 0; } @@ -4059,7 +4129,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, /* read response payload cannot be in both buf and pages */ WARN_ONCE(1, "buf can not contain only a part of read data"); rdata->result = -EIO; - dequeue_mid(mid, rdata->result); + if (is_offloaded) + mid->mid_state = MID_RESPONSE_MALFORMED; + else + dequeue_mid(mid, rdata->result); return 0; } @@ -4070,7 +4143,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, if (length < 0) return length; - dequeue_mid(mid, false); + if (is_offloaded) + mid->mid_state = MID_RESPONSE_RECEIVED; + else + dequeue_mid(mid, false); return length; } @@ -4092,22 +4168,41 @@ static void smb2_decrypt_offload(struct work_struct *work) struct mid_q_entry *mid; rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size, - dw->ppages, dw->npages, dw->len); + dw->ppages, dw->npages, dw->len, true); if (rc) { cifs_dbg(VFS, "error decrypting rc=%d\n", rc); goto free_pages; } dw->server->lstrp = jiffies; - mid = smb2_find_mid(dw->server, dw->buf); + mid = smb2_find_dequeue_mid(dw->server, dw->buf); if (mid == NULL) cifs_dbg(FYI, "mid not found\n"); else { mid->decrypted = true; rc = handle_read_data(dw->server, mid, dw->buf, dw->server->vals->read_rsp_size, - dw->ppages, dw->npages, dw->len); - mid->callback(mid); + dw->ppages, dw->npages, dw->len, + true); + if (rc >= 0) { +#ifdef CONFIG_CIFS_STATS2 + mid->when_received = jiffies; +#endif + mid->callback(mid); + } else { + spin_lock(&GlobalMid_Lock); + if (dw->server->tcpStatus == CifsNeedReconnect) { + mid->mid_state = MID_RETRY_NEEDED; + spin_unlock(&GlobalMid_Lock); + mid->callback(mid); + } else { + mid->mid_state = MID_REQUEST_SUBMITTED; + mid->mid_flags &= ~(MID_DELETED); + list_add_tail(&mid->qhead, + &dw->server->pending_mid_q); + spin_unlock(&GlobalMid_Lock); + } + } cifs_mid_q_entry_release(mid); } @@ -4198,7 +4293,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, non_offloaded_decrypt: rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size, - pages, npages, len); + pages, npages, len, false); if (rc) goto free_pages; @@ -4210,7 +4305,7 @@ non_offloaded_decrypt: (*mid)->decrypted = true; rc = handle_read_data(server, *mid, buf, server->vals->read_rsp_size, - pages, npages, len); + pages, npages, len, false); } free_pages: @@ -4254,7 +4349,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server, server->total_read += length; buf_size = pdu_length - sizeof(struct smb2_transform_hdr); - length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0); + length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false); if (length) return length; @@ -4355,7 +4450,7 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid) char *buf = server->large_buf ? server->bigbuf : server->smallbuf; return handle_read_data(server, mid, buf, server->pdu_size, - NULL, 0, 0); + NULL, 0, 0, false); } static int @@ -4589,7 +4684,7 @@ struct smb_version_operations smb21_operations = { .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, @@ -4689,7 +4784,7 @@ struct smb_version_operations smb30_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb3_negotiate_wsize, @@ -4797,7 +4892,7 @@ struct smb_version_operations smb311_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb3_negotiate_wsize, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index c8f304cae3f3..81d9c4ea0e8f 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -490,8 +490,8 @@ build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(38); pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); - pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); - get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); + pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE); + get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE); pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; } @@ -617,6 +617,9 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) if (len < MIN_PREAUTH_CTXT_DATA_LEN) { printk_once(KERN_WARNING "server sent bad preauth context\n"); return; + } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { + pr_warn_once("server sent invalid SaltLength\n"); + return; } if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n"); @@ -1323,6 +1326,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); + if (rc == -ENOKEY) + cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); spnego_key = NULL; goto out; } @@ -2747,7 +2752,9 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, * response size smaller. */ req->MaxOutputResponse = cpu_to_le32(max_response_size); - + req->sync_hdr.CreditCharge = + cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), + SMB2_MAX_BUFFER_SIZE)); if (is_fsctl) req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); else @@ -2941,8 +2948,8 @@ SMB2_close_free(struct smb_rqst *rqst) } int -SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon, - u64 persistent_fid, u64 volatile_fid, int flags) +SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, + u64 persistent_fid, u64 volatile_fid) { struct smb_rqst rqst; struct smb2_close_rsp *rsp = NULL; @@ -2951,6 +2958,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon, struct kvec rsp_iov; int resp_buftype = CIFS_NO_BUFFER; int rc = 0; + int flags = 0; cifs_dbg(FYI, "Close\n"); @@ -2989,27 +2997,17 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon, close_exit: SMB2_close_free(&rqst); free_rsp_buf(resp_buftype, rsp); - return rc; -} - -int -SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, - u64 persistent_fid, u64 volatile_fid) -{ - int rc; - int tmp_rc; - - rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0); /* retry close in a worker thread if this one is interrupted */ - if (rc == -EINTR) { + if (is_interrupt_error(rc)) { + int tmp_rc; + tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid, volatile_fid); if (tmp_rc) cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n", persistent_fid, tmp_rc); } - return rc; } @@ -3745,8 +3743,7 @@ smb2_async_readv(struct cifs_readdata *rdata) if (rdata->credits.value > 0) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, SMB2_MAX_BUFFER_SIZE)); - shdr->CreditRequest = - cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); + shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8); rc = adjust_credits(server, &rdata->credits, rdata->bytes); if (rc) @@ -4040,8 +4037,7 @@ smb2_async_writev(struct cifs_writedata *wdata, if (wdata->credits.value > 0) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, SMB2_MAX_BUFFER_SIZE)); - shdr->CreditRequest = - cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); + shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8); rc = adjust_credits(server, &wdata->credits, wdata->bytes); if (rc) diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 0abfde6d0b05..739556e385be 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -227,7 +227,7 @@ struct smb2_negotiate_req { __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */ __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */ __le16 Reserved2; - __le16 Dialects[1]; /* One dialect (vers=) at a time for now */ + __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ } __packed; /* Dialects */ @@ -271,12 +271,20 @@ struct smb2_neg_context { /* Followed by array of data */ } __packed; -#define SMB311_SALT_SIZE 32 +#define SMB311_LINUX_CLIENT_SALT_SIZE 32 /* Hash Algorithm Types */ #define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001) #define SMB2_PREAUTH_HASH_SIZE 64 -#define MIN_PREAUTH_CTXT_DATA_LEN (SMB311_SALT_SIZE + 6) +/* + * SaltLength that the server send can be zero, so the only three required + * fields (all __le16) end up six bytes total, so the minimum context data len + * in the response is six bytes which accounts for + * + * HashAlgorithmCount, SaltLength, and 1 HashAlgorithm. + */ +#define MIN_PREAUTH_CTXT_DATA_LEN 6 + struct smb2_preauth_neg_context { __le16 ContextType; /* 1 */ __le16 DataLength; @@ -284,7 +292,7 @@ struct smb2_preauth_neg_context { __le16 HashAlgorithmCount; /* 1 */ __le16 SaltLength; __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */ - __u8 Salt[SMB311_SALT_SIZE]; + __u8 Salt[SMB311_LINUX_CLIENT_SALT_SIZE]; } __packed; /* Encryption Algorithms Ciphers */ @@ -1386,7 +1394,7 @@ struct smb2_oplock_break { struct smb2_lease_break { struct smb2_sync_hdr sync_hdr; __le16 StructureSize; /* Must be 44 */ - __le16 Reserved; + __le16 Epoch; __le32 Flags; __u8 LeaseKey[16]; __le32 CurrentLeaseState; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 2a12a2fa38a2..57f7075a3587 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -156,8 +156,6 @@ extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_file_id, u64 volatile_file_id); -extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon, - u64 persistent_fid, u64 volatile_fid, int flags); extern int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, u64 persistent_file_id, u64 volatile_file_id); extern void SMB2_close_free(struct smb_rqst *rqst); diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index e67a43fd037c..61e7df4d9cb1 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -339,9 +339,9 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, if (ssocket == NULL) return -EAGAIN; - if (signal_pending(current)) { - cifs_dbg(FYI, "signal is pending before sending any data\n"); - return -EINTR; + if (fatal_signal_pending(current)) { + cifs_dbg(FYI, "signal pending before send request\n"); + return -ERESTARTSYS; } /* cork the socket */ @@ -431,7 +431,7 @@ unmask: if (signal_pending(current) && (total_len != send_length)) { cifs_dbg(FYI, "signal is pending after attempt to send\n"); - rc = -EINTR; + rc = -ERESTARTSYS; } /* uncork it */ @@ -466,7 +466,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct kvec iov; - struct smb2_transform_hdr tr_hdr; + struct smb2_transform_hdr *tr_hdr; struct smb_rqst cur_rqst[MAX_COMPOUND]; int rc; @@ -476,28 +476,34 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; - memset(&cur_rqst[0], 0, sizeof(cur_rqst)); - memset(&iov, 0, sizeof(iov)); - memset(&tr_hdr, 0, sizeof(tr_hdr)); - - iov.iov_base = &tr_hdr; - iov.iov_len = sizeof(tr_hdr); - cur_rqst[0].rq_iov = &iov; - cur_rqst[0].rq_nvec = 1; - if (!server->ops->init_transform_rq) { cifs_server_dbg(VFS, "Encryption requested but transform " "callback is missing\n"); return -EIO; } + tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS); + if (!tr_hdr) + return -ENOMEM; + + memset(&cur_rqst[0], 0, sizeof(cur_rqst)); + memset(&iov, 0, sizeof(iov)); + memset(tr_hdr, 0, sizeof(*tr_hdr)); + + iov.iov_base = tr_hdr; + iov.iov_len = sizeof(*tr_hdr); + cur_rqst[0].rq_iov = &iov; + cur_rqst[0].rq_nvec = 1; + rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) - return rc; + goto out; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); +out: + kfree(tr_hdr); return rc; } @@ -522,7 +528,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, const int timeout, const int flags, unsigned int *instance) { - int rc; + long rc; int *credits; int optype; long int t; @@ -653,10 +659,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num, spin_lock(&server->req_lock); if (*credits < num) { /* - * Return immediately if not too many requests in flight since - * we will likely be stuck on waiting for credits. + * If the server is tight on resources or just gives us less + * credits for other reasons (e.g. requests are coming out of + * order and the server delays granting more credits until it + * processes a missing mid) and we exhausted most available + * credits there may be situations when we try to send + * a compound request but we don't have enough credits. At this + * point the client needs to decide if it should wait for + * additional credits or fail the request. If at least one + * request is in flight there is a high probability that the + * server will return enough credits to satisfy this compound + * request. + * + * Return immediately if no requests in flight since we will be + * stuck on waiting for credits. */ - if (server->in_flight < num - *credits) { + if (server->in_flight == 0) { spin_unlock(&server->req_lock); return -ENOTSUPP; } @@ -1116,9 +1134,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, /* * Compounding is never used during session establish. */ - if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) { + mutex_lock(&server->srv_mutex); smb311_update_preauth_hash(ses, rqst[0].rq_iov, rqst[0].rq_nvec); + mutex_unlock(&server->srv_mutex); + } for (i = 0; i < num_rqst; i++) { rc = wait_for_response(server, midQ[i]); @@ -1127,7 +1148,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, } if (rc != 0) { for (; i < num_rqst; i++) { - cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n", + cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", midQ[i]->mid, le16_to_cpu(midQ[i]->command)); send_cancel(server, &rqst[i], midQ[i]); spin_lock(&GlobalMid_Lock); @@ -1186,7 +1207,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, .iov_base = resp_iov[0].iov_base, .iov_len = resp_iov[0].iov_len }; + mutex_lock(&server->srv_mutex); smb311_update_preauth_hash(ses, &iov, 1); + mutex_unlock(&server->srv_mutex); } out: diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index b7f9ffa1d5f1..7f49b283c680 100644 --- a/fs/compat_binfmt_elf.c +++ b/fs/compat_binfmt_elf.c @@ -113,6 +113,11 @@ #define arch_setup_additional_pages compat_arch_setup_additional_pages #endif +#ifdef compat_elf_read_implies_exec +#undef elf_read_implies_exec +#define elf_read_implies_exec compat_elf_read_implies_exec +#endif + /* * Rename a few of the symbols that binfmt_elf.c will define. * These are all local so the names don't really matter, but it diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index cf7b7e1d5bd7..cb733652ecca 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1519,6 +1519,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) spin_lock(&configfs_dirent_lock); configfs_detach_rollback(dentry); spin_unlock(&configfs_dirent_lock); + config_item_put(parent_item); return -EINTR; } frag->frag_dead = true; diff --git a/fs/configfs/file.c b/fs/configfs/file.c index fb65b706cc0d..84b4d58fc65f 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -378,7 +378,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type attr = to_attr(dentry); if (!attr) - goto out_put_item; + goto out_free_buffer; if (type & CONFIGFS_ITEM_BIN_ATTR) { buffer->bin_attr = to_bin_attr(dentry); @@ -391,7 +391,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type /* Grab the module reference for this attribute if we have one */ error = -ENODEV; if (!try_module_get(buffer->owner)) - goto out_put_item; + goto out_free_buffer; error = -EACCES; if (!buffer->item->ci_type) @@ -435,8 +435,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type out_put_module: module_put(buffer->owner); -out_put_item: - config_item_put(buffer->item); out_free_buffer: up_read(&frag->frag_sem); kfree(buffer); diff --git a/fs/coredump.c b/fs/coredump.c index b1ea7dfbd149..f34767eedf38 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -211,6 +211,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, return -ENOMEM; (*argv)[(*argc)++] = 0; ++pat_ptr; + if (!(*pat_ptr)) + return -ENOMEM; } /* Repeat as long as we have more pattern to process and more output @@ -222,7 +224,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, */ if (ispipe) { if (isspace(*pat_ptr)) { - was_space = true; + if (cn->used != 0) + was_space = true; pat_ptr++; continue; } else if (was_space) { @@ -786,6 +789,14 @@ void do_coredump(const kernel_siginfo_t *siginfo) if (displaced) put_files_struct(displaced); if (!dump_interrupted()) { + /* + * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would + * have this set to NULL. + */ + if (!cprm.file) { + pr_info("Core dump to |%s disabled\n", cn.corename); + goto close_fail; + } file_start_write(cprm.file); core_dumped = binfmt->core_dump(&cprm); file_end_write(cprm.file); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index e84efc01512e..ec7387266190 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -23,6 +23,9 @@ #define FSCRYPT_CONTEXT_V1 1 #define FSCRYPT_CONTEXT_V2 2 +/* Keep this in sync with include/uapi/linux/fscrypt.h */ +#define FSCRYPT_MODE_MAX FSCRYPT_MODE_ADIANTUM + struct fscrypt_context_v1 { u8 version; /* FSCRYPT_CONTEXT_V1 */ u8 contents_encryption_mode; @@ -387,7 +390,7 @@ struct fscrypt_master_key { spinlock_t mk_decrypted_inodes_lock; /* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */ - struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1]; + struct crypto_skcipher *mk_mode_keys[FSCRYPT_MODE_MAX + 1]; } __randomize_layout; diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index bb3b7fcfdd48..a5a40a76b8ed 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -58,8 +58,8 @@ int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, if (err) return err; - /* ... in case we looked up ciphertext name before key was added */ - if (dentry->d_flags & DCACHE_ENCRYPTED_NAME) + /* ... in case we looked up no-key name before key was added */ + if (fscrypt_is_nokey_name(dentry)) return -ENOKEY; if (!fscrypt_has_permitted_context(dir, inode)) @@ -83,9 +83,9 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, if (err) return err; - /* ... in case we looked up ciphertext name(s) before key was added */ - if ((old_dentry->d_flags | new_dentry->d_flags) & - DCACHE_ENCRYPTED_NAME) + /* ... in case we looked up no-key name(s) before key was added */ + if (fscrypt_is_nokey_name(old_dentry) || + fscrypt_is_nokey_name(new_dentry)) return -ENOKEY; if (old_dir != new_dir) { diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 75898340eb46..3e86f75b532a 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -55,6 +55,8 @@ static struct fscrypt_mode * select_encryption_mode(const union fscrypt_policy *policy, const struct inode *inode) { + BUILD_BUG_ON(ARRAY_SIZE(available_modes) != FSCRYPT_MODE_MAX + 1); + if (S_ISREG(inode->i_mode)) return &available_modes[fscrypt_policy_contents_mode(policy)]; diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 4072ba644595..8e1b10861c10 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -55,7 +55,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, return false; } - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", policy->flags); @@ -76,7 +77,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, return false; } - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", policy->flags); diff --git a/fs/d_path.c b/fs/d_path.c index 0f1fc1743302..a69e2cd36e6e 100644 --- a/fs/d_path.c +++ b/fs/d_path.c @@ -102,6 +102,8 @@ restart: if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { struct mount *parent = READ_ONCE(mnt->mnt_parent); + struct mnt_namespace *mnt_ns; + /* Escaped? */ if (dentry != vfsmnt->mnt_root) { bptr = *buffer; @@ -116,7 +118,9 @@ restart: vfsmnt = &mnt->mnt; continue; } - if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns)) + mnt_ns = READ_ONCE(mnt->mnt_ns); + /* open-coded is_mounted() to use local mnt_ns */ + if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns)) error = 1; // absolute root else error = 2; // detached or not attached yet diff --git a/fs/dax.c b/fs/dax.c index cc56313c6b3b..f06ffdd2fea7 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -477,10 +477,11 @@ static void *grab_mapping_entry(struct xa_state *xas, struct address_space *mapping, unsigned int order) { unsigned long index = xas->xa_index; - bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ + bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ void *entry; retry: + pmd_downgrade = false; xas_lock_irq(xas); entry = get_unlocked_entry(xas, order); @@ -488,7 +489,7 @@ retry: if (dax_is_conflict(entry)) goto fallback; if (!xa_is_value(entry)) { - xas_set_err(xas, EIO); + xas_set_err(xas, -EIO); goto out_unlock; } @@ -794,12 +795,12 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, address = pgoff_address(index, vma); /* - * Note because we provide range to follow_pte_pmd it will - * call mmu_notifier_invalidate_range_start() on our behalf - * before taking any lock. + * follow_invalidate_pte() will use the range to call + * mmu_notifier_invalidate_range_start() on our behalf before + * taking any lock. */ - if (follow_pte_pmd(vma->vm_mm, address, &range, - &ptep, &pmdp, &ptl)) + if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep, + &pmdp, &ptl)) continue; /* diff --git a/fs/dcache.c b/fs/dcache.c index e88cf0554e65..b2a7f1765f0b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -903,17 +903,19 @@ struct dentry *dget_parent(struct dentry *dentry) { int gotref; struct dentry *ret; + unsigned seq; /* * Do optimistic parent lookup without any * locking. */ rcu_read_lock(); + seq = raw_seqcount_begin(&dentry->d_seq); ret = READ_ONCE(dentry->d_parent); gotref = lockref_get_not_zero(&ret->d_lockref); rcu_read_unlock(); if (likely(gotref)) { - if (likely(ret == READ_ONCE(dentry->d_parent))) + if (!read_seqcount_retry(&dentry->d_seq, seq)) return ret; dput(ret); } diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 8fd45eb89424..943637298f65 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -175,8 +175,13 @@ static int open_proxy_open(struct inode *inode, struct file *filp) if (r) goto out; - real_fops = fops_get(real_fops); - if (!real_fops) { + if (!fops_get(real_fops)) { +#ifdef CONFIG_MODULES + if (real_fops->owner && + real_fops->owner->state == MODULE_STATE_GOING) + goto out; +#endif + /* Huh? Module did not clean up after itself at exit? */ WARN(1, "debugfs file owner did not clean up at exit: %pd", dentry); @@ -305,8 +310,13 @@ static int full_proxy_open(struct inode *inode, struct file *filp) if (r) goto out; - real_fops = fops_get(real_fops); - if (!real_fops) { + if (!fops_get(real_fops)) { +#ifdef CONFIG_MODULES + if (real_fops->owner && + real_fops->owner->state == MODULE_STATE_GOING) + goto out; +#endif + /* Huh? Module did not cleanup after itself at exit? */ WARN(1, "debugfs file owner did not clean up at exit: %pd", dentry); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 7b975dbb2bb4..1c74a7cbf5b1 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -293,7 +293,7 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent) { struct dentry *dentry; - if (IS_ERR(parent)) + if (!debugfs_initialized() || IS_ERR_OR_NULL(name) || IS_ERR(parent)) return NULL; if (!parent) @@ -315,6 +315,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) struct dentry *dentry; int error; + if (!debugfs_initialized()) + return ERR_PTR(-ENOENT); + pr_debug("creating file '%s'\n", name); if (IS_ERR(parent)) diff --git a/fs/direct-io.c b/fs/direct-io.c index 9329ced91f1d..434cffcc0391 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -848,6 +848,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, struct buffer_head *map_bh) { int ret = 0; + int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ if (dio->op == REQ_OP_WRITE) { /* @@ -886,10 +887,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; out: /* - * If sdio->boundary then we want to schedule the IO now to + * If boundary then we want to schedule the IO now to * avoid metadata seeks. */ - if (sdio->boundary) { + if (boundary) { ret = dio_send_cur_page(dio, sdio, map_bh); if (sdio->bio) dio_bio_submit(dio, sdio); diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 3b21082e1b55..3b1012a3c439 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -216,6 +216,7 @@ struct dlm_space { struct list_head members; struct mutex members_lock; int members_count; + struct dlm_nodes *nds; }; struct dlm_comms { @@ -424,6 +425,7 @@ static struct config_group *make_space(struct config_group *g, const char *name) INIT_LIST_HEAD(&sp->members); mutex_init(&sp->members_lock); sp->members_count = 0; + sp->nds = nds; return &sp->group; fail: @@ -445,6 +447,7 @@ static void drop_space(struct config_group *g, struct config_item *i) static void release_space(struct config_item *i) { struct dlm_space *sp = config_item_to_space(i); + kfree(sp->nds); kfree(sp); } diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 416d9de35679..4311d01b02a8 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -97,7 +97,6 @@ do { \ __LINE__, __FILE__, #x, jiffies); \ {do} \ printk("\n"); \ - BUG(); \ panic("DLM: Record message above and reboot.\n"); \ } \ } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index afb8340918b8..c689359ca532 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -632,6 +632,9 @@ static int new_lockspace(const char *name, const char *cluster, wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); + /* let kobject handle freeing of ls if there's an error */ + do_unreg = 1; + ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); @@ -639,9 +642,6 @@ static int new_lockspace(const char *name, const char *cluster, goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); - /* let kobject handle freeing of ls if there's an error */ - do_unreg = 1; - /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index b8a7ce379ffe..75fbdfb8aaef 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out; } + if (!dev_name) { + rc = -EINVAL; + err = "Device name cannot be null"; + goto out; + } + rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); if (rc) { err = "Error parsing options"; diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c index 96c0c86f3fff..0297ad95eb5c 100644 --- a/fs/efivarfs/inode.c +++ b/fs/efivarfs/inode.c @@ -7,6 +7,7 @@ #include <linux/efi.h> #include <linux/fs.h> #include <linux/ctype.h> +#include <linux/kmemleak.h> #include <linux/slab.h> #include <linux/uuid.h> @@ -103,6 +104,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, var->var.VariableName[i] = '\0'; inode->i_private = var; + kmemleak_ignore(var); err = efivar_entry_add(var, &efivarfs_list); if (err) diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index fa4f6447ddad..9760a52800b4 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -141,6 +141,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */ + strreplace(name, '/', '!'); + inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0, is_removable); if (!inode) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index fc3a8d8064f8..b22a08ac53a2 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -323,27 +323,12 @@ static int erofs_raw_access_readpages(struct file *filp, return 0; } -static int erofs_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - struct erofs_map_blocks map = { - .m_la = iblock << 9, - }; - int err; - - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); - if (err) - return err; - - if (map.m_flags & EROFS_MAP_MAPPED) - bh->b_blocknr = erofs_blknr(map.m_pa); - - return err; -} - static sector_t erofs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; + struct erofs_map_blocks map = { + .m_la = blknr_to_addr(block), + }; if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) { erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; @@ -352,7 +337,10 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block) return 0; } - return generic_block_bmap(mapping, block, erofs_get_block); + if (!erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW)) + return erofs_blknr(map.m_pa); + + return 0; } /* for uncompressed (aligned) files and raw access for other files */ diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index b1ee5654750d..52ab96154467 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -74,6 +74,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode) #define EROFS_I_VERSION_BIT 0 #define EROFS_I_DATALAYOUT_BIT 1 +#define EROFS_I_ALL \ + ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1) + /* 32-byte reduced form of an ondisk inode */ struct erofs_inode_compact { __le16 i_format; /* inode format hints */ diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 3350ab65d892..0dbeaf68e1d6 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -8,31 +8,87 @@ #include <trace/events/erofs.h> -/* no locking */ -static int erofs_read_inode(struct inode *inode, void *data) +/* + * if inode is successfully read, return its inode page (or sometimes + * the inode payload page if it's an extended inode) in order to fill + * inline data if possible. + */ +static struct page *erofs_read_inode(struct inode *inode, + unsigned int *ofs) { + struct super_block *sb = inode->i_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); - struct erofs_inode_compact *dic = data; - struct erofs_inode_extended *die; + const erofs_off_t inode_loc = iloc(sbi, vi->nid); - const unsigned int ifmt = le16_to_cpu(dic->i_format); - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - erofs_blk_t nblks = 0; + erofs_blk_t blkaddr, nblks = 0; + struct page *page; + struct erofs_inode_compact *dic; + struct erofs_inode_extended *die, *copied = NULL; + unsigned int ifmt; + int err; + + blkaddr = erofs_blknr(inode_loc); + *ofs = erofs_blkoff(inode_loc); + + erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", + __func__, vi->nid, *ofs, blkaddr); + + page = erofs_get_meta_page(sb, blkaddr); + if (IS_ERR(page)) { + erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", + vi->nid, PTR_ERR(page)); + return page; + } + + dic = page_address(page) + *ofs; + ifmt = le16_to_cpu(dic->i_format); + + if (ifmt & ~EROFS_I_ALL) { + erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", + ifmt, vi->nid); + err = -EOPNOTSUPP; + goto err_out; + } vi->datalayout = erofs_inode_datalayout(ifmt); - if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } switch (erofs_inode_version(ifmt)) { case EROFS_INODE_LAYOUT_EXTENDED: - die = data; - vi->inode_isize = sizeof(struct erofs_inode_extended); + /* check if the inode acrosses page boundary */ + if (*ofs + vi->inode_isize <= PAGE_SIZE) { + *ofs += vi->inode_isize; + die = (struct erofs_inode_extended *)dic; + } else { + const unsigned int gotten = PAGE_SIZE - *ofs; + + copied = kmalloc(vi->inode_isize, GFP_NOFS); + if (!copied) { + err = -ENOMEM; + goto err_out; + } + memcpy(copied, dic, gotten); + unlock_page(page); + put_page(page); + + page = erofs_get_meta_page(sb, blkaddr + 1); + if (IS_ERR(page)) { + erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld", + vi->nid, PTR_ERR(page)); + kfree(copied); + return page; + } + *ofs = vi->inode_isize - gotten; + memcpy((u8 *)copied + gotten, page_address(page), *ofs); + die = copied; + } vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); @@ -58,20 +114,21 @@ static int erofs_read_inode(struct inode *inode, void *data) i_gid_write(inode, le32_to_cpu(die->i_gid)); set_nlink(inode, le32_to_cpu(die->i_nlink)); - /* ns timestamp */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - le64_to_cpu(die->i_ctime); - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - le32_to_cpu(die->i_ctime_nsec); + /* extended inode has its own timestamp */ + inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime); + inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec); inode->i_size = le64_to_cpu(die->i_size); /* total blocks for compressed files */ if (erofs_inode_is_data_compressed(vi->datalayout)) nblks = le32_to_cpu(die->i_u.compressed_blocks); + + kfree(copied); break; case EROFS_INODE_LAYOUT_COMPACT: vi->inode_isize = sizeof(struct erofs_inode_compact); + *ofs += vi->inode_isize; vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); @@ -97,11 +154,9 @@ static int erofs_read_inode(struct inode *inode, void *data) i_gid_write(inode, le16_to_cpu(dic->i_gid)); set_nlink(inode, le16_to_cpu(dic->i_nlink)); - /* use build time to derive all file time */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - sbi->build_time; - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - sbi->build_time_nsec; + /* use build time for compact inodes */ + inode->i_ctime.tv_sec = sbi->build_time; + inode->i_ctime.tv_nsec = sbi->build_time_nsec; inode->i_size = le32_to_cpu(dic->i_size); if (erofs_inode_is_data_compressed(vi->datalayout)) @@ -111,22 +166,32 @@ static int erofs_read_inode(struct inode *inode, void *data) erofs_err(inode->i_sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec; + inode->i_atime.tv_sec = inode->i_ctime.tv_sec; + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec; + inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec; + if (!nblks) /* measure inode.i_blocks as generic filesystems */ inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; else inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; - return 0; + return page; bogusimode: erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); + err = -EFSCORRUPTED; +err_out: DBG_BUGON(1); - return -EFSCORRUPTED; + kfree(copied); + unlock_page(page); + put_page(page); + return ERR_PTR(err); } static int erofs_fill_symlink(struct inode *inode, void *data, @@ -146,7 +211,7 @@ static int erofs_fill_symlink(struct inode *inode, void *data, if (!lnk) return -ENOMEM; - m_pofs += vi->inode_isize + vi->xattr_isize; + m_pofs += vi->xattr_isize; /* inline symlink data shouldn't cross page boundary as well */ if (m_pofs + inode->i_size > PAGE_SIZE) { kfree(lnk); @@ -167,37 +232,17 @@ static int erofs_fill_symlink(struct inode *inode, void *data, static int erofs_fill_inode(struct inode *inode, int isdir) { - struct super_block *sb = inode->i_sb; struct erofs_inode *vi = EROFS_I(inode); struct page *page; - void *data; - int err; - erofs_blk_t blkaddr; unsigned int ofs; - erofs_off_t inode_loc; + int err = 0; trace_erofs_fill_inode(inode, isdir); - inode_loc = iloc(EROFS_SB(sb), vi->nid); - blkaddr = erofs_blknr(inode_loc); - ofs = erofs_blkoff(inode_loc); - erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", - __func__, vi->nid, ofs, blkaddr); - - page = erofs_get_meta_page(sb, blkaddr); - - if (IS_ERR(page)) { - erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", - vi->nid, PTR_ERR(page)); + /* read inode base data from disk */ + page = erofs_read_inode(inode, &ofs); + if (IS_ERR(page)) return PTR_ERR(page); - } - - DBG_BUGON(!PageUptodate(page)); - data = page_address(page); - - err = erofs_read_inode(inode, data + ofs); - if (err) - goto out_unlock; /* setup the new inode */ switch (inode->i_mode & S_IFMT) { @@ -210,7 +255,7 @@ static int erofs_fill_inode(struct inode *inode, int isdir) inode->i_fop = &erofs_dir_fops; break; case S_IFLNK: - err = erofs_fill_symlink(inode, data, ofs); + err = erofs_fill_symlink(inode, page_address(page), ofs); if (err) goto out_unlock; inode_nohighmem(inode); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 0e369494f2f2..22e059b4f745 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -124,8 +124,8 @@ static int erofs_read_superblock(struct super_block *sb) blkszbits = dsb->blkszbits; /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ if (blkszbits != LOG_BLOCK_SIZE) { - erofs_err(sb, "blksize %u isn't supported on this platform", - 1 << blkszbits); + erofs_err(sb, "blkszbits %u isn't supported on this platform", + blkszbits); goto out; } diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index d92b3e753a6f..3e28fd082df0 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -294,7 +294,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink, spin_unlock(&erofs_sb_list_lock); sbi->shrinker_run_no = run_no; - freed += erofs_shrink_workstation(sbi, nr, false); + freed += erofs_shrink_workstation(sbi, nr - freed, false); spin_lock(&erofs_sb_list_lock); /* Get the next list element before we move this one */ diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index b766c3ee5fa8..f78496776e76 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inode *inode) int ret = 0; /* the most case is that xattrs of this inode are initialized. */ - if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) { + /* + * paired with smp_mb() at the end of the function to ensure + * fields will only be observed after the bit is set. + */ + smp_mb(); return 0; + } if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE)) return -ERESTARTSYS; @@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inode *inode) } xattr_iter_end(&it, atomic_map); + /* paired with smp_mb() at the beginning of the function. */ + smp_mb(); set_bit(EROFS_I_EA_INITED_BIT, &vi->flags); out_unlock: @@ -473,8 +481,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler, return -EOPNOTSUPP; break; case EROFS_XATTR_INDEX_TRUSTED: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; break; case EROFS_XATTR_INDEX_SECURITY: break; diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index faf950189bd7..568d5a493876 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -148,22 +148,22 @@ static inline void z_erofs_onlinepage_init(struct page *page) static inline void z_erofs_onlinepage_fixup(struct page *page, uintptr_t index, bool down) { - unsigned long *p, o, v, id; -repeat: - p = &page_private(page); - o = READ_ONCE(*p); + union z_erofs_onlinepage_converter u = { .v = &page_private(page) }; + int orig, orig_index, val; - id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; - if (id) { +repeat: + orig = atomic_read(u.o); + orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; + if (orig_index) { if (!index) return; - DBG_BUGON(id != index); + DBG_BUGON(orig_index != index); } - v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | - ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); - if (cmpxchg(p, o, v) != o) + val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | + ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); + if (atomic_cmpxchg(u.o, orig, val) != orig) goto repeat; } diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 6a26c293ae2d..fff574100721 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -36,8 +36,14 @@ static int fill_inode_lazy(struct inode *inode) void *kaddr; struct z_erofs_map_header *h; - if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { + /* + * paired with smp_mb() at the end of the function to ensure + * fields will only be observed after the bit is set. + */ + smp_mb(); return 0; + } if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) return -ERESTARTSYS; @@ -83,6 +89,8 @@ static int fill_inode_lazy(struct inode *inode) vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + ((h->h_clusterbits >> 5) & 7); + /* paired with smp_mb() at the beginning of the function */ + smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); unmap_done: kunmap_atomic(kaddr); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 84a6b61b9829..2e5779721ac6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -218,8 +218,7 @@ struct eventpoll { struct file *file; /* used to optimize loop detection check */ - int visited; - struct list_head visited_list_link; + u64 gen; #ifdef CONFIG_NET_RX_BUSY_POLL /* used to track busy poll napi_id */ @@ -269,6 +268,8 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +static u64 loop_check_gen = 0; + /* Used to check for epoll file descriptor inclusion loops */ static struct nested_calls poll_loop_ncalls; @@ -278,9 +279,6 @@ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; -/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ -static LIST_HEAD(visited_list); - /* * List of files with newly added links, where we may need to limit the number * of emanating paths. Protected by the epmutex. @@ -1176,6 +1174,10 @@ static inline bool chain_epi_lockless(struct epitem *epi) { struct eventpoll *ep = epi->ep; + /* Fast preliminary check */ + if (epi->next != EP_UNACTIVE_PTR) + return false; + /* Check that the same epi has not been just chained from another CPU */ if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) return false; @@ -1242,16 +1244,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v * chained in ep->ovflist and requeued later on. */ if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { - if (epi->next == EP_UNACTIVE_PTR && - chain_epi_lockless(epi)) + if (chain_epi_lockless(epi)) + ep_pm_stay_awake_rcu(epi); + } else if (!ep_is_linked(epi)) { + /* In the usual case, add event to ready list. */ + if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) ep_pm_stay_awake_rcu(epi); - goto out_unlock; - } - - /* If this file is already in the ready list we exit soon */ - if (!ep_is_linked(epi) && - list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) { - ep_pm_stay_awake_rcu(epi); } /* @@ -1455,7 +1453,7 @@ static int reverse_path_check(void) static int ep_create_wakeup_source(struct epitem *epi) { - const char *name; + struct name_snapshot n; struct wakeup_source *ws; if (!epi->ep->ws) { @@ -1464,8 +1462,9 @@ static int ep_create_wakeup_source(struct epitem *epi) return -ENOMEM; } - name = epi->ffd.file->f_path.dentry->d_name.name; - ws = wakeup_source_register(NULL, name); + take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); + ws = wakeup_source_register(NULL, n.name.name); + release_dentry_name_snapshot(&n); if (!ws) return -ENOMEM; @@ -1527,6 +1526,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_lock); + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_lock); + + /* + * Add the current item to the RB tree. All RB tree operations are + * protected by "mtx", and ep_insert() is called with "mtx" held. + */ + ep_rbtree_insert(ep, epi); + + /* now check if we've created too many backpaths */ + error = -EINVAL; + if (full_check && reverse_path_check()) + goto error_remove_epi; + /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); @@ -1549,22 +1564,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; - /* Add the current item to the list of active epoll hook for this file */ - spin_lock(&tfile->f_lock); - list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); - spin_unlock(&tfile->f_lock); - - /* - * Add the current item to the RB tree. All RB tree operations are - * protected by "mtx", and ep_insert() is called with "mtx" held. - */ - ep_rbtree_insert(ep, epi); - - /* now check if we've created too many backpaths */ - error = -EINVAL; - if (full_check && reverse_path_check()) - goto error_remove_epi; - /* We have to drop the new item inside our item list to keep track of it */ write_lock_irq(&ep->lock); @@ -1593,6 +1592,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, return 0; +error_unregister: + ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); @@ -1600,9 +1601,6 @@ error_remove_epi: rb_erase_cached(&epi->rbn, &ep->rbr); -error_unregister: - ep_unregister_pollwait(ep, epi); - /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist @@ -1827,7 +1825,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, { int res = 0, eavail, timed_out = 0; u64 slack = 0; - bool waiter = false; wait_queue_entry_t wait; ktime_t expires, *to = NULL; @@ -1872,21 +1869,23 @@ fetch_events: */ ep_reset_busy_poll_napi_id(ep); - /* - * We don't have any available event to return to the caller. We need - * to sleep here, and we will be woken by ep_poll_callback() when events - * become available. - */ - if (!waiter) { - waiter = true; - init_waitqueue_entry(&wait, current); - + do { + /* + * Internally init_wait() uses autoremove_wake_function(), + * thus wait entry is removed from the wait queue on each + * wakeup. Why it is important? In case of several waiters + * each new wakeup will hit the next waiter, giving it the + * chance to harvest new event. Otherwise wakeup can be + * lost. This is also good performance-wise, because on + * normal wakeup path no need to call __remove_wait_queue() + * explicitly, thus ep->lock is not taken, which halts the + * event delivery. + */ + init_wait(&wait); write_lock_irq(&ep->lock); __add_wait_queue_exclusive(&ep->wq, &wait); write_unlock_irq(&ep->lock); - } - for (;;) { /* * We don't want to sleep if the ep_poll_callback() sends us * a wakeup in between. That's why we set the task state @@ -1916,10 +1915,20 @@ fetch_events: timed_out = 1; break; } - } + + /* We were woken up, thus go and try to harvest some events */ + eavail = 1; + + } while (0); __set_current_state(TASK_RUNNING); + if (!list_empty_careful(&wait.entry)) { + write_lock_irq(&ep->lock); + __remove_wait_queue(&ep->wq, &wait); + write_unlock_irq(&ep->lock); + } + send_events: /* * Try to transfer events to user space. In case we get 0 events and @@ -1930,12 +1939,6 @@ send_events: !(res = ep_send_events(ep, events, maxevents)) && !timed_out) goto fetch_events; - if (waiter) { - write_lock_irq(&ep->lock); - __remove_wait_queue(&ep->wq, &wait); - write_unlock_irq(&ep->lock); - } - return res; } @@ -1964,13 +1967,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) struct epitem *epi; mutex_lock_nested(&ep->mtx, call_nests + 1); - ep->visited = 1; - list_add(&ep->visited_list_link, &visited_list); + ep->gen = loop_check_gen; for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); if (unlikely(is_file_epoll(epi->ffd.file))) { ep_tovisit = epi->ffd.file->private_data; - if (ep_tovisit->visited) + if (ep_tovisit->gen == loop_check_gen) continue; error = ep_call_nested(&poll_loop_ncalls, ep_loop_check_proc, epi->ffd.file, @@ -1986,9 +1988,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * not already there, and calling reverse_path_check() * during ep_insert(). */ - if (list_empty(&epi->ffd.file->f_tfile_llink)) - list_add(&epi->ffd.file->f_tfile_llink, - &tfile_check_list); + if (list_empty(&epi->ffd.file->f_tfile_llink)) { + if (get_file_rcu(epi->ffd.file)) + list_add(&epi->ffd.file->f_tfile_llink, + &tfile_check_list); + } } } mutex_unlock(&ep->mtx); @@ -2009,18 +2013,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) */ static int ep_loop_check(struct eventpoll *ep, struct file *file) { - int ret; - struct eventpoll *ep_cur, *ep_next; - - ret = ep_call_nested(&poll_loop_ncalls, + return ep_call_nested(&poll_loop_ncalls, ep_loop_check_proc, file, ep, current); - /* clear visited list */ - list_for_each_entry_safe(ep_cur, ep_next, &visited_list, - visited_list_link) { - ep_cur->visited = 0; - list_del(&ep_cur->visited_list_link); - } - return ret; } static void clear_tfile_check_list(void) @@ -2032,6 +2026,7 @@ static void clear_tfile_check_list(void) file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); + fput(file); } INIT_LIST_HEAD(&tfile_check_list); } @@ -2181,19 +2176,20 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_lock_nested(&ep->mtx, 0); if (op == EPOLL_CTL_ADD) { if (!list_empty(&f.file->f_ep_links) || + ep->gen == loop_check_gen || is_file_epoll(tf.file)) { full_check = 1; mutex_unlock(&ep->mtx); mutex_lock(&epmutex); if (is_file_epoll(tf.file)) { error = -ELOOP; - if (ep_loop_check(ep, tf.file) != 0) { - clear_tfile_check_list(); + if (ep_loop_check(ep, tf.file) != 0) goto error_tgt_fput; - } - } else + } else { + get_file(tf.file); list_add(&tf.file->f_tfile_llink, &tfile_check_list); + } mutex_lock_nested(&ep->mtx, 0); if (is_file_epoll(tf.file)) { tep = tf.file->private_data; @@ -2217,8 +2213,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, error = ep_insert(ep, &epds, tf.file, fd, full_check); } else error = -EEXIST; - if (full_check) - clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) @@ -2241,8 +2235,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: - if (full_check) + if (full_check) { + clear_tfile_check_list(); + loop_check_gen++; mutex_unlock(&epmutex); + } fdput(tf); error_fput: diff --git a/fs/exec.c b/fs/exec.c index 0caab9269cff..ceddda2d7c47 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1037,16 +1037,26 @@ ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) } EXPORT_SYMBOL(read_code); +/* + * Maps the mm_struct mm into the current task struct. + * On success, this function returns with exec_update_lock + * held for writing. + */ static int exec_mmap(struct mm_struct *mm) { struct task_struct *tsk; struct mm_struct *old_mm, *active_mm; + int ret; /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; exec_mm_release(tsk, old_mm); + ret = down_write_killable(&tsk->signal->exec_update_lock); + if (ret) + return ret; + if (old_mm) { sync_mm_rss(old_mm); /* @@ -1058,15 +1068,30 @@ static int exec_mmap(struct mm_struct *mm) down_read(&old_mm->mmap_sem); if (unlikely(old_mm->core_state)) { up_read(&old_mm->mmap_sem); + up_write(&tsk->signal->exec_update_lock); return -EINTR; } } + task_lock(tsk); - active_mm = tsk->active_mm; membarrier_exec_mmap(mm); - tsk->mm = mm; + + local_irq_disable(); + active_mm = tsk->active_mm; tsk->active_mm = mm; + tsk->mm = mm; + /* + * This prevents preemption while active_mm is being loaded and + * it and mm are being updated, which could cause problems for + * lazy tlb mm refcounting when these are updated by context + * switches. Not all architectures can handle irqs off over + * activate_mm yet. + */ + if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) + local_irq_enable(); activate_mm(active_mm, mm); + if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) + local_irq_enable(); tsk->mm->vmacache_seqnum = 0; vmacache_flush(tsk); task_unlock(tsk); @@ -1304,6 +1329,8 @@ int flush_old_exec(struct linux_binprm * bprm) */ set_mm_exe_file(bprm->mm, bprm->file); + would_dump(bprm, bprm->file); + /* * Release all of the old mmap stuff */ @@ -1313,11 +1340,12 @@ int flush_old_exec(struct linux_binprm * bprm) goto out; /* - * After clearing bprm->mm (to mark that current is using the - * prepared mm now), we have nothing left of the original + * After setting bprm->called_exec_mmap (to mark that current is + * using the prepared mm now), we have nothing left of the original * process. If anything from here on returns an error, the check * in search_binary_handler() will SEGV current. */ + bprm->called_exec_mmap = 1; bprm->mm = NULL; set_fs(USER_DS); @@ -1413,7 +1441,7 @@ void setup_new_exec(struct linux_binprm * bprm) /* An exec changes our domain. We are no longer part of the thread group */ - current->self_exec_id++; + WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1); flush_signal_handlers(current, 0); } EXPORT_SYMBOL(setup_new_exec); @@ -1451,6 +1479,8 @@ static void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { + if (bprm->called_exec_mmap) + up_write(¤t->signal->exec_update_lock); mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } @@ -1500,6 +1530,7 @@ void install_exec_creds(struct linux_binprm *bprm) * credentials; any time after this it may be unlocked. */ security_bprm_committed_creds(bprm); + up_write(¤t->signal->exec_update_lock); mutex_unlock(¤t->signal->cred_guard_mutex); } EXPORT_SYMBOL(install_exec_creds); @@ -1691,7 +1722,7 @@ int search_binary_handler(struct linux_binprm *bprm) read_lock(&binfmt_lock); put_binfmt(fmt); - if (retval < 0 && !bprm->mm) { + if (retval < 0 && bprm->called_exec_mmap) { /* we got to flush_old_exec() and failed after it */ read_unlock(&binfmt_lock); force_sigsegv(SIGSEGV); @@ -1854,8 +1885,6 @@ static int __do_execve_file(int fd, struct filename *filename, if (retval < 0) goto out; - would_dump(bprm, bprm->file); - retval = exec_binprm(bprm); if (retval < 0) goto out; diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 39c4772e96c9..d73103cdda21 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); struct ext2_inode_info *ei = EXT2_I(inode); vm_fault_t ret; + bool write = (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); - if (vmf->flags & FAULT_FLAG_WRITE) { + if (write) { sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); } @@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); up_read(&ei->dax_sem); - if (vmf->flags & FAULT_FLAG_WRITE) + if (write) sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index fda7d3f5b4be..432c3febea6d 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir) if (dir) le16_add_cpu(&desc->bg_used_dirs_count, -1); spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); + percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter); if (dir) percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); mark_buffer_dirty(bh); @@ -528,7 +529,7 @@ got: goto fail; } - percpu_counter_add(&sbi->s_freeinodes_counter, -1); + percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 0456bc990b5e..62acbe27d8bf 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -56,6 +56,7 @@ #include <linux/buffer_head.h> #include <linux/init.h> +#include <linux/printk.h> #include <linux/slab.h> #include <linux/mbcache.h> #include <linux/quotaops.h> @@ -84,8 +85,8 @@ printk("\n"); \ } while (0) #else -# define ea_idebug(f...) -# define ea_bdebug(f...) +# define ea_idebug(inode, f...) no_printk(f) +# define ea_bdebug(bh, f...) no_printk(f) #endif static int ext2_xattr_set2(struct inode *, struct buffer_head *, @@ -864,8 +865,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) true); if (error) { if (error == -EBUSY) { - ea_bdebug(bh, "already in cache (%d cache entries)", - atomic_read(&ext2_xattr_cache->c_entry_count)); + ea_bdebug(bh, "already in cache"); error = 0; } } else diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 5aba67a504cf..031ff3f19018 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -612,27 +612,41 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi, /** * ext4_should_retry_alloc() - check if a block allocation should be retried - * @sb: super block - * @retries: number of attemps has been made + * @sb: superblock + * @retries: number of retry attempts made so far * - * ext4_should_retry_alloc() is called when ENOSPC is returned, and if - * it is profitable to retry the operation, this function will wait - * for the current or committing transaction to complete, and then - * return TRUE. We will only retry once. + * ext4_should_retry_alloc() is called when ENOSPC is returned while + * attempting to allocate blocks. If there's an indication that a pending + * journal transaction might free some space and allow another attempt to + * succeed, this function will wait for the current or committing transaction + * to complete and then return TRUE. */ int ext4_should_retry_alloc(struct super_block *sb, int *retries) { - if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || - (*retries)++ > 1 || - !EXT4_SB(sb)->s_journal) + struct ext4_sb_info *sbi = EXT4_SB(sb); + + if (!sbi->s_journal) return 0; + if (++(*retries) > 3) { + percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit); + return 0; + } + + /* + * if there's no indication that blocks are about to be freed it's + * possible we just missed a transaction commit that did so + */ smp_mb(); - if (EXT4_SB(sb)->s_mb_free_pending == 0) - return 0; + if (sbi->s_mb_free_pending == 0) + return ext4_has_free_clusters(sbi, 1, 0); + /* + * it's possible we've just missed a transaction commit here, + * so ignore the returned status + */ jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); - jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); + (void) jbd2_journal_force_commit_nested(sbi->s_journal); return 1; } diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index ff8e1205127e..97c56d061e61 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -68,7 +68,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, ext4_fsblk_t start_blk, unsigned int count) { - struct ext4_system_zone *new_entry = NULL, *entry; + struct ext4_system_zone *new_entry, *entry; struct rb_node **n = &system_blks->root.rb_node, *node; struct rb_node *parent = NULL, *new_node = NULL; @@ -79,30 +79,20 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; - else { - if (start_blk + count > (entry->start_blk + - entry->count)) - entry->count = (start_blk + count - - entry->start_blk); - new_node = *n; - new_entry = rb_entry(new_node, struct ext4_system_zone, - node); - break; - } + else /* Unexpected overlap of system zones. */ + return -EFSCORRUPTED; } - if (!new_entry) { - new_entry = kmem_cache_alloc(ext4_system_zone_cachep, - GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - new_entry->start_blk = start_blk; - new_entry->count = count; - new_node = &new_entry->node; + new_entry = kmem_cache_alloc(ext4_system_zone_cachep, + GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + new_entry->start_blk = start_blk; + new_entry->count = count; + new_node = &new_entry->node; - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &system_blks->root); - } + rb_link_node(new_node, parent, n); + rb_insert_color(new_node, &system_blks->root); /* Can we merge to the left? */ node = rb_prev(new_node); @@ -260,14 +250,6 @@ int ext4_setup_system_zone(struct super_block *sb) int flex_size = ext4_flex_bg_size(sbi); int ret; - if (!test_opt(sb, BLOCK_VALIDITY)) { - if (sbi->system_blks) - ext4_release_system_zone(sb); - return 0; - } - if (sbi->system_blks) - return 0; - system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); if (!system_blks) return -ENOMEM; diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 2743c6f8a457..0589e914663f 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -677,6 +677,7 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, struct qstr qstr = {.name = str, .len = len }; const struct dentry *parent = READ_ONCE(dentry->d_parent); const struct inode *inode = READ_ONCE(parent->d_inode); + char strbuf[DNAME_INLINE_LEN]; if (!inode || !IS_CASEFOLDED(inode) || !EXT4_SB(inode->i_sb)->s_encoding) { @@ -685,6 +686,21 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, return memcmp(str, name->name, len); } + /* + * If the dentry name is stored in-line, then it may be concurrently + * modified by a rename. If this happens, the VFS will eventually retry + * the lookup, so it doesn't matter what ->d_compare() returns. + * However, it's unsafe to call utf8_strncasecmp() with an unstable + * string. Therefore, we have to copy the name into a temporary buffer. + */ + if (len <= DNAME_INLINE_LEN - 1) { + memcpy(strbuf, str, len); + strbuf[len] = 0; + qstr.name = strbuf; + /* prevent compiler from optimizing out the temporary buffer */ + barrier(); + } + return ext4_ci_compare(inode, name, &qstr, false); } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 69a7535b7982..bc14573d829d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1421,6 +1421,7 @@ struct ext4_sb_info { struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; struct percpu_counter s_dirtyclusters_counter; + struct percpu_counter s_sra_exceeded_retry_limit; struct blockgroup_lock *s_blockgroup_lock; struct proc_dir_entry *s_proc; struct kobject s_kobj; @@ -1601,7 +1602,6 @@ enum { EXT4_STATE_NO_EXPAND, /* No space for expansion */ EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */ EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ - EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ EXT4_STATE_NEWENTRY, /* File just added to dir */ EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read nolocking */ @@ -2500,7 +2500,8 @@ void ext4_insert_dentry(struct inode *inode, struct ext4_filename *fname); static inline void ext4_update_dx_flag(struct inode *inode) { - if (!ext4_has_feature_dir_index(inode->i_sb)) { + if (!ext4_has_feature_dir_index(inode->i_sb) && + ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) { /* ext4_iget() should have caught this... */ WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb)); ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); @@ -2587,8 +2588,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); -int ext4_dio_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create); int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create); int ext4_walk_page_buffers(handle_t *handle, @@ -3422,6 +3421,8 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end) } extern const struct iomap_ops ext4_iomap_ops; +extern const struct iomap_ops ext4_iomap_overwrite_ops; +extern const struct iomap_ops ext4_iomap_report_ops; static inline int ext4_buffer_uptodate(struct buffer_head *bh) { diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 98bd0e9ee7df..ca78fd709845 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -170,10 +170,13 @@ struct partial_cluster { (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) #define EXT_LAST_INDEX(__hdr__) \ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) -#define EXT_MAX_EXTENT(__hdr__) \ - (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) +#define EXT_MAX_EXTENT(__hdr__) \ + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ + : 0) #define EXT_MAX_INDEX(__hdr__) \ - (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 6208b36a2256..631d3030fb3e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -498,6 +498,30 @@ int ext4_ext_check_inode(struct inode *inode) return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); } +static void ext4_cache_extents(struct inode *inode, + struct ext4_extent_header *eh) +{ + struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); + ext4_lblk_t prev = 0; + int i; + + for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { + unsigned int status = EXTENT_STATUS_WRITTEN; + ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); + int len = ext4_ext_get_actual_len(ex); + + if (prev && (prev != lblk)) + ext4_es_cache_extent(inode, prev, lblk - prev, ~0, + EXTENT_STATUS_HOLE); + + if (ext4_ext_is_unwritten(ex)) + status = EXTENT_STATUS_UNWRITTEN; + ext4_es_cache_extent(inode, lblk, len, + ext4_ext_pblock(ex), status); + prev = lblk + len; + } +} + static struct buffer_head * __read_extent_tree_block(const char *function, unsigned int line, struct inode *inode, ext4_fsblk_t pblk, int depth, @@ -532,26 +556,7 @@ __read_extent_tree_block(const char *function, unsigned int line, */ if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { struct ext4_extent_header *eh = ext_block_hdr(bh); - struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); - ext4_lblk_t prev = 0; - int i; - - for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { - unsigned int status = EXTENT_STATUS_WRITTEN; - ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); - int len = ext4_ext_get_actual_len(ex); - - if (prev && (prev != lblk)) - ext4_es_cache_extent(inode, prev, - lblk - prev, ~0, - EXTENT_STATUS_HOLE); - - if (ext4_ext_is_unwritten(ex)) - status = EXTENT_STATUS_UNWRITTEN; - ext4_es_cache_extent(inode, lblk, len, - ext4_ext_pblock(ex), status); - prev = lblk + len; - } + ext4_cache_extents(inode, eh); } return bh; errout: @@ -899,6 +904,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, path[0].p_bh = NULL; i = depth; + if (!(flags & EXT4_EX_NOCACHE) && depth == 0) + ext4_cache_extents(inode, eh); /* walk through the tree */ while (i) { ext_debug("depth %d: num %d, max %d\n", @@ -1753,16 +1760,9 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, */ if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) return 0; - /* - * The check for IO to unwritten extent is somewhat racy as we - * increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after - * dropping i_data_sem. But reserved blocks should save us in that - * case. - */ + if (ext4_ext_is_unwritten(ex1) && - (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || - atomic_read(&EXT4_I(inode)->i_unwritten) || - (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN))) + ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) return 0; #ifdef AGGRESSIVE_TEST if (ext1_ee_len >= 4) @@ -3003,7 +3003,7 @@ again: * in use to avoid freeing it when removing blocks. */ if (sbi->s_cluster_ratio > 1) { - pblk = ext4_ext_pblock(ex) + end - ee_block + 2; + pblk = ext4_ext_pblock(ex) + end - ee_block + 1; partial.pclu = EXT4_B2C(sbi, pblk); partial.state = nofree; } @@ -3549,8 +3549,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, (unsigned long long)map->m_lblk, map_len); sbi = EXT4_SB(inode->i_sb); - eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> - inode->i_sb->s_blocksize_bits; + eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) + >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map_len) eof_block = map->m_lblk + map_len; @@ -3805,8 +3805,8 @@ static int ext4_split_convert_extents(handle_t *handle, __func__, inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); - eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> - inode->i_sb->s_blocksize_bits; + eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) + >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; /* diff --git a/fs/ext4/file.c b/fs/ext4/file.c index f7f8ca6689ef..4fc31eb369c7 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -29,10 +29,57 @@ #include <linux/pagevec.h> #include <linux/uio.h> #include <linux/mman.h> +#include <linux/backing-dev.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" +#include "truncate.h" + +static bool ext4_dio_supported(struct inode *inode) +{ + if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode)) + return false; + if (fsverity_active(inode)) + return false; + if (ext4_should_journal_data(inode)) + return false; + if (ext4_has_inline_data(inode)) + return false; + return true; +} + +static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + ssize_t ret; + struct inode *inode = file_inode(iocb->ki_filp); + + if (iocb->ki_flags & IOCB_NOWAIT) { + if (!inode_trylock_shared(inode)) + return -EAGAIN; + } else { + inode_lock_shared(inode); + } + + if (!ext4_dio_supported(inode)) { + inode_unlock_shared(inode); + /* + * Fallback to buffered I/O if the operation being performed on + * the inode is not supported by direct I/O. The IOCB_DIRECT + * flag needs to be cleared here in order to ensure that the + * direct I/O path within generic_file_read_iter() is not + * taken. + */ + iocb->ki_flags &= ~IOCB_DIRECT; + return generic_file_read_iter(iocb, to); + } + + ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, is_sync_kiocb(iocb)); + inode_unlock_shared(inode); + + file_accessed(iocb->ki_filp); + return ret; +} #ifdef CONFIG_FS_DAX static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) @@ -65,16 +112,20 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { - if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb)))) + struct inode *inode = file_inode(iocb->ki_filp); + + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; if (!iov_iter_count(to)) return 0; /* skip atime */ #ifdef CONFIG_FS_DAX - if (IS_DAX(file_inode(iocb->ki_filp))) + if (IS_DAX(inode)) return ext4_dax_read_iter(iocb, to); #endif + if (iocb->ki_flags & IOCB_DIRECT) + return ext4_dio_read_iter(iocb, to); return generic_file_read_iter(iocb, to); } @@ -104,13 +155,6 @@ static int ext4_release_file(struct inode *inode, struct file *filp) return 0; } -static void ext4_unwritten_wait(struct inode *inode) -{ - wait_queue_head_t *wq = ext4_ioend_wq(inode); - - wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); -} - /* * This tests whether the IO in question is block-aligned or not. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they @@ -120,19 +164,25 @@ static void ext4_unwritten_wait(struct inode *inode) * threads are at work on the same unwritten block, they must be synchronized * or one thread will zero the other's data, causing corruption. */ -static int -ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) +static bool +ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos) { struct super_block *sb = inode->i_sb; - int blockmask = sb->s_blocksize - 1; - - if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize)) - return 0; + unsigned long blockmask = sb->s_blocksize - 1; if ((pos | iov_iter_alignment(from)) & blockmask) - return 1; + return true; - return 0; + return false; +} + +static bool +ext4_extending_io(struct inode *inode, loff_t offset, size_t len) +{ + if (offset + len > i_size_read(inode) || + offset + len > EXT4_I(inode)->i_disksize) + return true; + return false; } /* Is IO overwriting allocated and initialized blocks? */ @@ -158,18 +208,19 @@ static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len) return err == blklen && (map.m_flags & EXT4_MAP_MAPPED); } -static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) +static ssize_t ext4_generic_write_checks(struct kiocb *iocb, + struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; + if (unlikely(IS_IMMUTABLE(inode))) + return -EPERM; + ret = generic_write_checks(iocb, from); if (ret <= 0) return ret; - if (unlikely(IS_IMMUTABLE(inode))) - return -EPERM; - /* * If we have encountered a bitmap-format file, the size limit * is smaller than s_maxbytes, which is for extent-mapped files. @@ -184,12 +235,347 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) return iov_iter_count(from); } +static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) +{ + ssize_t ret, count; + + count = ext4_generic_write_checks(iocb, from); + if (count <= 0) + return count; + ret = file_modified(iocb->ki_filp); + if (ret) + return ret; + + return count; +} + +static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, + struct iov_iter *from) +{ + ssize_t ret; + struct inode *inode = file_inode(iocb->ki_filp); + + if (iocb->ki_flags & IOCB_NOWAIT) + return -EOPNOTSUPP; + + inode_lock(inode); + ret = ext4_write_checks(iocb, from); + if (ret <= 0) + goto out; + + current->backing_dev_info = inode_to_bdi(inode); + ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos); + current->backing_dev_info = NULL; + +out: + inode_unlock(inode); + if (likely(ret > 0)) { + iocb->ki_pos += ret; + ret = generic_write_sync(iocb, ret); + } + + return ret; +} + +static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, + ssize_t written, size_t count) +{ + handle_t *handle; + bool truncate = false; + u8 blkbits = inode->i_blkbits; + ext4_lblk_t written_blk, end_blk; + + /* + * Note that EXT4_I(inode)->i_disksize can get extended up to + * inode->i_size while the I/O was running due to writeback of delalloc + * blocks. But, the code in ext4_iomap_alloc() is careful to use + * zeroed/unwritten extents if this is possible; thus we won't leave + * uninitialized blocks in a file even if we didn't succeed in writing + * as much as we intended. + */ + WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize); + if (offset + count <= EXT4_I(inode)->i_disksize) { + /* + * We need to ensure that the inode is removed from the orphan + * list if it has been added prematurely, due to writeback of + * delalloc blocks. + */ + if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) { + handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); + + if (IS_ERR(handle)) { + ext4_orphan_del(NULL, inode); + return PTR_ERR(handle); + } + + ext4_orphan_del(handle, inode); + ext4_journal_stop(handle); + } + + return written; + } + + if (written < 0) + goto truncate; + + handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); + if (IS_ERR(handle)) { + written = PTR_ERR(handle); + goto truncate; + } + + if (ext4_update_inode_size(inode, offset + written)) + ext4_mark_inode_dirty(handle, inode); + + /* + * We may need to truncate allocated but not written blocks beyond EOF. + */ + written_blk = ALIGN(offset + written, 1 << blkbits); + end_blk = ALIGN(offset + count, 1 << blkbits); + if (written_blk < end_blk && ext4_can_truncate(inode)) + truncate = true; + + /* + * Remove the inode from the orphan list if it has been extended and + * everything went OK. + */ + if (!truncate && inode->i_nlink) + ext4_orphan_del(handle, inode); + ext4_journal_stop(handle); + + if (truncate) { +truncate: + ext4_truncate_failed_write(inode); + /* + * If the truncate operation failed early, then the inode may + * still be on the orphan list. In that case, we need to try + * remove the inode from the in-memory linked list. + */ + if (inode->i_nlink) + ext4_orphan_del(NULL, inode); + } + + return written; +} + +static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size, + int error, unsigned int flags) +{ + loff_t offset = iocb->ki_pos; + struct inode *inode = file_inode(iocb->ki_filp); + + if (error) + return error; + + if (size && flags & IOMAP_DIO_UNWRITTEN) + return ext4_convert_unwritten_extents(NULL, inode, + offset, size); + + return 0; +} + +static const struct iomap_dio_ops ext4_dio_write_ops = { + .end_io = ext4_dio_write_end_io, +}; + +/* + * The intention here is to start with shared lock acquired then see if any + * condition requires an exclusive inode lock. If yes, then we restart the + * whole operation by releasing the shared lock and acquiring exclusive lock. + * + * - For unaligned_io we never take shared lock as it may cause data corruption + * when two unaligned IO tries to modify the same block e.g. while zeroing. + * + * - For extending writes case we don't take the shared lock, since it requires + * updating inode i_disksize and/or orphan handling with exclusive lock. + * + * - shared locking will only be true mostly with overwrites. Otherwise we will + * switch to exclusive i_rwsem lock. + */ +static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from, + bool *ilock_shared, bool *extend) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file); + loff_t offset; + size_t count; + ssize_t ret; + +restart: + ret = ext4_generic_write_checks(iocb, from); + if (ret <= 0) + goto out; + + offset = iocb->ki_pos; + count = ret; + if (ext4_extending_io(inode, offset, count)) + *extend = true; + /* + * Determine whether the IO operation will overwrite allocated + * and initialized blocks. + * We need exclusive i_rwsem for changing security info + * in file_modified(). + */ + if (*ilock_shared && (!IS_NOSEC(inode) || *extend || + !ext4_overwrite_io(inode, offset, count))) { + inode_unlock_shared(inode); + *ilock_shared = false; + inode_lock(inode); + goto restart; + } + + ret = file_modified(file); + if (ret < 0) + goto out; + + return count; +out: + if (*ilock_shared) + inode_unlock_shared(inode); + else + inode_unlock(inode); + return ret; +} + +static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + ssize_t ret; + handle_t *handle; + struct inode *inode = file_inode(iocb->ki_filp); + loff_t offset = iocb->ki_pos; + const struct iomap_ops *iomap_ops = &ext4_iomap_ops; + size_t count = iov_iter_count(from); + bool extend = false, unaligned_io = false; + bool ilock_shared = true; + + /* + * We initially start with shared inode lock unless it is + * unaligned IO which needs exclusive lock anyways. + */ + if (ext4_unaligned_io(inode, from, offset)) { + unaligned_io = true; + ilock_shared = false; + } + /* + * Quick check here without any i_rwsem lock to see if it is extending + * IO. A more reliable check is done in ext4_dio_write_checks() with + * proper locking in place. + */ + if (offset + count > i_size_read(inode)) + ilock_shared = false; + + + if (iocb->ki_flags & IOCB_NOWAIT) { + if (ilock_shared) { + if (!inode_trylock_shared(inode)) + return -EAGAIN; + } else { + if (!inode_trylock(inode)) + return -EAGAIN; + } + } else { + if (ilock_shared) + inode_lock_shared(inode); + else + inode_lock(inode); + } + + /* Fallback to buffered I/O if the inode does not support direct I/O. */ + if (!ext4_dio_supported(inode)) { + if (ilock_shared) + inode_unlock_shared(inode); + else + inode_unlock(inode); + return ext4_buffered_write_iter(iocb, from); + } + ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend); + if (ret <= 0) + return ret; + + + offset = iocb->ki_pos; + count = ret; + /* + * Unaligned direct IO must be serialized among each other as zeroing + * of partial blocks of two competing unaligned IOs can result in data + * corruption. + * + * So we make sure we don't allow any unaligned IO in flight. + * For IOs where we need not wait (like unaligned non-AIO DIO), + * below inode_dio_wait() may anyway become a no-op, since we start + * with exclusive lock. + */ + if (unaligned_io) + inode_dio_wait(inode); + + if (extend) { + handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out; + } + + ret = ext4_orphan_add(handle, inode); + if (ret) { + ext4_journal_stop(handle); + goto out; + } + + ext4_journal_stop(handle); + } + if (ilock_shared) + iomap_ops = &ext4_iomap_overwrite_ops; + ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops, + is_sync_kiocb(iocb) || unaligned_io || extend); + + if (extend) + ret = ext4_handle_inode_extension(inode, offset, ret, count); + +out: + if (ilock_shared) + inode_unlock_shared(inode); + else + inode_unlock(inode); + + if (ret >= 0 && iov_iter_count(from)) { + ssize_t err; + loff_t endbyte; + + offset = iocb->ki_pos; + err = ext4_buffered_write_iter(iocb, from); + if (err < 0) + return err; + + /* + * We need to ensure that the pages within the page cache for + * the range covered by this I/O are written to disk and + * invalidated. This is in attempt to preserve the expected + * direct I/O semantics in the case we fallback to buffered I/O + * to complete off the I/O request. + */ + ret += err; + endbyte = offset + ret - 1; + err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping, + offset, endbyte); + if (!err) + invalidate_mapping_pages(iocb->ki_filp->f_mapping, + offset >> PAGE_SHIFT, + endbyte >> PAGE_SHIFT); + } + + return ret; +} + #ifdef CONFIG_FS_DAX static ssize_t ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; + size_t count; + loff_t offset; + handle_t *handle; + bool extend = false; + struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock(inode)) @@ -200,14 +586,28 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) ret = ext4_write_checks(iocb, from); if (ret <= 0) goto out; - ret = file_remove_privs(iocb->ki_filp); - if (ret) - goto out; - ret = file_update_time(iocb->ki_filp); - if (ret) - goto out; + offset = iocb->ki_pos; + count = iov_iter_count(from); + if (offset + count > EXT4_I(inode)->i_disksize) { + handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out; + } + ret = ext4_orphan_add(handle, inode); + if (ret) { + ext4_journal_stop(handle); + goto out; + } + + extend = true; + ext4_journal_stop(handle); + } ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); + + if (extend) + ret = ext4_handle_inode_extension(inode, offset, ret, count); out: inode_unlock(inode); if (ret > 0) @@ -220,10 +620,6 @@ static ssize_t ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); - int o_direct = iocb->ki_flags & IOCB_DIRECT; - int unaligned_aio = 0; - int overwrite = 0; - ssize_t ret; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; @@ -232,59 +628,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (IS_DAX(inode)) return ext4_dax_write_iter(iocb, from); #endif + if (iocb->ki_flags & IOCB_DIRECT) + return ext4_dio_write_iter(iocb, from); - if (!inode_trylock(inode)) { - if (iocb->ki_flags & IOCB_NOWAIT) - return -EAGAIN; - inode_lock(inode); - } - - ret = ext4_write_checks(iocb, from); - if (ret <= 0) - goto out; - - /* - * Unaligned direct AIO must be serialized among each other as zeroing - * of partial blocks of two competing unaligned AIOs can result in data - * corruption. - */ - if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && - !is_sync_kiocb(iocb) && - ext4_unaligned_aio(inode, from, iocb->ki_pos)) { - unaligned_aio = 1; - ext4_unwritten_wait(inode); - } - - iocb->private = &overwrite; - /* Check whether we do a DIO overwrite or not */ - if (o_direct && !unaligned_aio) { - if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) { - if (ext4_should_dioread_nolock(inode)) - overwrite = 1; - } else if (iocb->ki_flags & IOCB_NOWAIT) { - ret = -EAGAIN; - goto out; - } - } - - ret = __generic_file_write_iter(iocb, from); - /* - * Unaligned direct AIO must be the only IO in flight. Otherwise - * overlapping aligned IO after unaligned might result in data - * corruption. - */ - if (ret == -EIOCBQUEUED && unaligned_aio) - ext4_unwritten_wait(inode); - inode_unlock(inode); - - if (ret > 0) - ret = generic_write_sync(iocb, ret); - - return ret; - -out: - inode_unlock(inode); - return ret; + return ext4_buffered_write_iter(iocb, from); } #ifdef CONFIG_FS_DAX @@ -432,7 +779,7 @@ static int ext4_sample_last_mounted(struct super_block *sb, err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto out_journal; - strlcpy(sbi->s_es->s_last_mounted, cp, + strncpy(sbi->s_es->s_last_mounted, cp, sizeof(sbi->s_es->s_last_mounted)); ext4_handle_dirty_super(handle, sb); out_journal: @@ -496,12 +843,14 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence) maxbytes, i_size_read(inode)); case SEEK_HOLE: inode_lock_shared(inode); - offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops); + offset = iomap_seek_hole(inode, offset, + &ext4_iomap_report_ops); inode_unlock_shared(inode); break; case SEEK_DATA: inode_lock_shared(inode); - offset = iomap_seek_data(inode, offset, &ext4_iomap_ops); + offset = iomap_seek_data(inode, offset, + &ext4_iomap_report_ops); inode_unlock_shared(inode); break; } diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index dbccf46f1770..37347ba868b7 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb, /* Are we just counting mappings? */ if (info->gfi_head->fmh_count == 0) { + if (info->gfi_head->fmh_entries == UINT_MAX) + return EXT4_QUERY_RANGE_ABORT; + if (rec_fsblk > info->gfi_next_fsblk) info->gfi_head->fmh_entries++; diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 5508baa11bb6..181f6a1d3cb0 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -44,30 +44,28 @@ */ static int ext4_sync_parent(struct inode *inode) { - struct dentry *dentry = NULL; - struct inode *next; + struct dentry *dentry, *next; int ret = 0; if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) return 0; - inode = igrab(inode); + dentry = d_find_any_alias(inode); + if (!dentry) + return 0; while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); - dentry = d_find_any_alias(inode); - if (!dentry) - break; - next = igrab(d_inode(dentry->d_parent)); + + next = dget_parent(dentry); dput(dentry); - if (!next) - break; - iput(inode); - inode = next; + dentry = next; + inode = dentry->d_inode; + /* * The directory inode may have gone through rmdir by now. But * the inode itself and its blocks are still allocated (we hold - * a reference to the inode so it didn't go through - * ext4_evict_inode()) and so we are safe to flush metadata - * blocks and the inode. + * a reference to the inode via its dentry), so it didn't go + * through ext4_evict_inode()) and so we are safe to flush + * metadata blocks and the inode. */ ret = sync_mapping_buffers(inode->i_mapping); if (ret) @@ -76,10 +74,47 @@ static int ext4_sync_parent(struct inode *inode) if (ret) break; } - iput(inode); + dput(dentry); return ret; } +static int ext4_fsync_nojournal(struct inode *inode, bool datasync, + bool *needs_barrier) +{ + int ret, err; + + ret = sync_mapping_buffers(inode->i_mapping); + if (!(inode->i_state & I_DIRTY_ALL)) + return ret; + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) + return ret; + + err = sync_inode_metadata(inode, 1); + if (!ret) + ret = err; + + if (!ret) + ret = ext4_sync_parent(inode); + if (test_opt(inode->i_sb, BARRIER)) + *needs_barrier = true; + + return ret; +} + +static int ext4_fsync_journal(struct inode *inode, bool datasync, + bool *needs_barrier) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; + tid_t commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; + + if (journal->j_flags & JBD2_BARRIER && + !jbd2_trans_will_send_data_barrier(journal, commit_tid)) + *needs_barrier = true; + + return jbd2_complete_transaction(journal, commit_tid); +} + /* * akpm: A new design for ext4_sync_file(). * @@ -94,14 +129,12 @@ static int ext4_sync_parent(struct inode *inode) int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { - struct inode *inode = file->f_mapping->host; - struct ext4_inode_info *ei = EXT4_I(inode); - journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; int ret = 0, err; - tid_t commit_tid; bool needs_barrier = false; + struct inode *inode = file->f_mapping->host; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) + if (unlikely(ext4_forced_shutdown(sbi))) return -EIO; J_ASSERT(ext4_journal_current_handle() == NULL); @@ -111,20 +144,11 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (sb_rdonly(inode->i_sb)) { /* Make sure that we read updated s_mount_flags value */ smp_rmb(); - if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED) + if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ret = -EROFS; goto out; } - if (!journal) { - ret = __generic_file_fsync(file, start, end, datasync); - if (!ret) - ret = ext4_sync_parent(inode); - if (test_opt(inode->i_sb, BARRIER)) - goto issue_flush; - goto out; - } - ret = file_write_and_wait_range(file, start, end); if (ret) return ret; @@ -142,18 +166,14 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) * (they were dirtied by commit). But that's OK - the blocks are * safe in-journal, which is all fsync() needs to ensure. */ - if (ext4_should_journal_data(inode)) { + if (!sbi->s_journal) + ret = ext4_fsync_nojournal(inode, datasync, &needs_barrier); + else if (ext4_should_journal_data(inode)) ret = ext4_force_commit(inode->i_sb); - goto out; - } + else + ret = ext4_fsync_journal(inode, datasync, &needs_barrier); - commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; - if (journal->j_flags & JBD2_BARRIER && - !jbd2_trans_will_send_data_barrier(journal, commit_tid)) - needs_barrier = true; - ret = jbd2_complete_transaction(journal, commit_tid); if (needs_barrier) { - issue_flush: err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); if (!ret) ret = err; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 07a548dccb15..3aed57a8316f 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -660,7 +660,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent, * block has been written back to disk. (Yes, these values are * somewhat arbitrary...) */ -#define RECENTCY_MIN 5 +#define RECENTCY_MIN 60 #define RECENTCY_DIRTY 300 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino) @@ -1356,6 +1356,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, handle_t *handle; ext4_fsblk_t blk; int num, ret = 0, used_blks = 0; + unsigned long used_inos = 0; /* This should not happen, but just to be sure check this */ if (sb_rdonly(sb)) { @@ -1386,22 +1387,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, * used inodes so we need to skip blocks with used inodes in * inode table. */ - if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) - used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) - - ext4_itable_unused_count(sb, gdp)), - sbi->s_inodes_per_block); + if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { + used_inos = EXT4_INODES_PER_GROUP(sb) - + ext4_itable_unused_count(sb, gdp); + used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block); - if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || - ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - - ext4_itable_unused_count(sb, gdp)) < - EXT4_FIRST_INO(sb)))) { - ext4_error(sb, "Something is wrong with group %u: " - "used itable blocks: %d; " - "itable unused count: %u", - group, used_blks, - ext4_itable_unused_count(sb, gdp)); - ret = 1; - goto err_out; + /* Bogus inode unused count? */ + if (used_blks < 0 || used_blks > sbi->s_itb_per_group) { + ext4_error(sb, "Something is wrong with group %u: " + "used itable blocks: %d; " + "itable unused count: %u", + group, used_blks, + ext4_itable_unused_count(sb, gdp)); + ret = 1; + goto err_out; + } + + used_inos += group * EXT4_INODES_PER_GROUP(sb); + /* + * Are there some uninitialized inodes in the inode table + * before the first normal inode? + */ + if ((used_blks != sbi->s_itb_per_group) && + (used_inos < EXT4_FIRST_INO(sb))) { + ext4_error(sb, "Something is wrong with group %u: " + "itable unused count: %u; " + "itables initialized count: %ld", + group, ext4_itable_unused_count(sb, gdp), + used_inos); + ret = 1; + goto err_out; + } } blk = ext4_inode_table(sb, gdp) + used_blks; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 2fec62d764fa..519378a15bc6 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1918,6 +1918,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline) ext4_write_lock_xattr(inode, &no_expand); if (!ext4_has_inline_data(inode)) { + ext4_write_unlock_xattr(inode, &no_expand); *has_inline = 0; ext4_journal_stop(handle); return 0; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 55ac694005dc..73d15fbaf17f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -203,6 +203,7 @@ void ext4_evict_inode(struct inode *inode) */ int extra_credits = 6; struct ext4_xattr_inode_array *ea_inode_array = NULL; + bool freeze_protected = false; trace_ext4_evict_inode(inode); @@ -250,9 +251,14 @@ void ext4_evict_inode(struct inode *inode) /* * Protect us against freezing - iput() caller didn't have to have any - * protection against it + * protection against it. When we are in a running transaction though, + * we are already protected against freezing and we cannot grab further + * protection due to lock ordering constraints. */ - sb_start_intwrite(inode->i_sb); + if (!ext4_journal_current_handle()) { + sb_start_intwrite(inode->i_sb); + freeze_protected = true; + } if (!IS_NOQUOTA(inode)) extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); @@ -271,7 +277,8 @@ void ext4_evict_inode(struct inode *inode) * cleaned up. */ ext4_orphan_del(NULL, inode); - sb_end_intwrite(inode->i_sb); + if (freeze_protected) + sb_end_intwrite(inode->i_sb); goto no_delete; } @@ -312,7 +319,8 @@ void ext4_evict_inode(struct inode *inode) stop_handle: ext4_journal_stop(handle); ext4_orphan_del(NULL, inode); - sb_end_intwrite(inode->i_sb); + if (freeze_protected) + sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); goto no_delete; } @@ -341,7 +349,8 @@ stop_handle: else ext4_free_inode(handle, inode); ext4_journal_stop(handle); - sb_end_intwrite(inode->i_sb); + if (freeze_protected) + sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); return; no_delete: @@ -835,136 +844,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 -/* - * Get blocks function for the cases that need to start a transaction - - * generally difference cases of direct IO and DAX IO. It also handles retries - * in case of ENOSPC. - */ -static int ext4_get_block_trans(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int flags) -{ - int dio_credits; - handle_t *handle; - int retries = 0; - int ret; - - /* Trim mapping request to maximum we can map at once for DIO */ - if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) - bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; - dio_credits = ext4_chunk_trans_blocks(inode, - bh_result->b_size >> inode->i_blkbits); -retry: - handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); - if (IS_ERR(handle)) - return PTR_ERR(handle); - - ret = _ext4_get_block(inode, iblock, bh_result, flags); - ext4_journal_stop(handle); - - if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) - goto retry; - return ret; -} - -/* Get block function for DIO reads and writes to inodes without extents */ -int ext4_dio_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - /* We don't expect handle for direct IO */ - WARN_ON_ONCE(ext4_journal_current_handle()); - - if (!create) - return _ext4_get_block(inode, iblock, bh, 0); - return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE); -} - -/* - * Get block function for AIO DIO writes when we create unwritten extent if - * blocks are not allocated yet. The extent will be converted to written - * after IO is complete. - */ -static int ext4_dio_get_block_unwritten_async(struct inode *inode, - sector_t iblock, struct buffer_head *bh_result, int create) -{ - int ret; - - /* We don't expect handle for direct IO */ - WARN_ON_ONCE(ext4_journal_current_handle()); - - ret = ext4_get_block_trans(inode, iblock, bh_result, - EXT4_GET_BLOCKS_IO_CREATE_EXT); - - /* - * When doing DIO using unwritten extents, we need io_end to convert - * unwritten extents to written on IO completion. We allocate io_end - * once we spot unwritten extent and store it in b_private. Generic - * DIO code keeps b_private set and furthermore passes the value to - * our completion callback in 'private' argument. - */ - if (!ret && buffer_unwritten(bh_result)) { - if (!bh_result->b_private) { - ext4_io_end_t *io_end; - - io_end = ext4_init_io_end(inode, GFP_KERNEL); - if (!io_end) - return -ENOMEM; - bh_result->b_private = io_end; - ext4_set_io_unwritten_flag(inode, io_end); - } - set_buffer_defer_completion(bh_result); - } - - return ret; -} - -/* - * Get block function for non-AIO DIO writes when we create unwritten extent if - * blocks are not allocated yet. The extent will be converted to written - * after IO is complete by ext4_direct_IO_write(). - */ -static int ext4_dio_get_block_unwritten_sync(struct inode *inode, - sector_t iblock, struct buffer_head *bh_result, int create) -{ - int ret; - - /* We don't expect handle for direct IO */ - WARN_ON_ONCE(ext4_journal_current_handle()); - - ret = ext4_get_block_trans(inode, iblock, bh_result, - EXT4_GET_BLOCKS_IO_CREATE_EXT); - - /* - * Mark inode as having pending DIO writes to unwritten extents. - * ext4_direct_IO_write() checks this flag and converts extents to - * written. - */ - if (!ret && buffer_unwritten(bh_result)) - ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); - - return ret; -} - -static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) -{ - int ret; - - ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n", - inode->i_ino, create); - /* We don't expect handle for direct IO */ - WARN_ON_ONCE(ext4_journal_current_handle()); - - ret = _ext4_get_block(inode, iblock, bh_result, 0); - /* - * Blocks should have been preallocated! ext4_file_write_iter() checks - * that. - */ - WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result)); - - return ret; -} - - /* * `handle' can be NULL if create is zero */ @@ -2067,13 +1946,13 @@ static int __ext4_journalled_writepage(struct page *page, if (!ret) ret = err; - if (!ext4_has_inline_data(inode)) - ext4_walk_page_buffers(NULL, page_bufs, 0, len, - NULL, bput_one); ext4_set_inode_state(inode, EXT4_STATE_JDATA); out: unlock_page(page); out_no_pagelock: + if (!inline_data && page_bufs) + ext4_walk_page_buffers(NULL, page_bufs, 0, len, + NULL, bput_one); brelse(inode_bh); return ret; } @@ -2131,7 +2010,7 @@ static int ext4_writepage(struct page *page, bool keep_towrite = false; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { - ext4_invalidatepage(page, 0, PAGE_SIZE); + inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return -EIO; } @@ -3415,208 +3294,173 @@ static bool ext4_inode_datasync_dirty(struct inode *inode) return inode->i_state & I_DIRTY_DATASYNC; } -static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, - unsigned flags, struct iomap *iomap) +static void ext4_set_iomap(struct inode *inode, struct iomap *iomap, + struct ext4_map_blocks *map, loff_t offset, + loff_t length) { - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - unsigned int blkbits = inode->i_blkbits; - unsigned long first_block, last_block; - struct ext4_map_blocks map; - bool delalloc = false; - int ret; - - if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) - return -EINVAL; - first_block = offset >> blkbits; - last_block = min_t(loff_t, (offset + length - 1) >> blkbits, - EXT4_MAX_LOGICAL_BLOCK); - - if (flags & IOMAP_REPORT) { - if (ext4_has_inline_data(inode)) { - ret = ext4_inline_data_iomap(inode, iomap); - if (ret != -EAGAIN) { - if (ret == 0 && offset >= iomap->length) - ret = -ENOENT; - return ret; - } - } - } else { - if (WARN_ON_ONCE(ext4_has_inline_data(inode))) - return -ERANGE; - } - - map.m_lblk = first_block; - map.m_len = last_block - first_block + 1; - - if (flags & IOMAP_REPORT) { - ret = ext4_map_blocks(NULL, inode, &map, 0); - if (ret < 0) - return ret; - - if (ret == 0) { - ext4_lblk_t end = map.m_lblk + map.m_len - 1; - struct extent_status es; - - ext4_es_find_extent_range(inode, &ext4_es_is_delayed, - map.m_lblk, end, &es); - - if (!es.es_len || es.es_lblk > end) { - /* entire range is a hole */ - } else if (es.es_lblk > map.m_lblk) { - /* range starts with a hole */ - map.m_len = es.es_lblk - map.m_lblk; - } else { - ext4_lblk_t offs = 0; - - if (es.es_lblk < map.m_lblk) - offs = map.m_lblk - es.es_lblk; - map.m_lblk = es.es_lblk + offs; - map.m_len = es.es_len - offs; - delalloc = true; - } - } - } else if (flags & IOMAP_WRITE) { - int dio_credits; - handle_t *handle; - int retries = 0; - - /* Trim mapping request to maximum we can map at once for DIO */ - if (map.m_len > DIO_MAX_BLOCKS) - map.m_len = DIO_MAX_BLOCKS; - dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); -retry: - /* - * Either we allocate blocks and then we don't get unwritten - * extent so we have reserved enough credits, or the blocks - * are already allocated and unwritten and in that case - * extent conversion fits in the credits as well. - */ - handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, - dio_credits); - if (IS_ERR(handle)) - return PTR_ERR(handle); - - ret = ext4_map_blocks(handle, inode, &map, - EXT4_GET_BLOCKS_CREATE_ZERO); - if (ret < 0) { - ext4_journal_stop(handle); - if (ret == -ENOSPC && - ext4_should_retry_alloc(inode->i_sb, &retries)) - goto retry; - return ret; - } - - /* - * If we added blocks beyond i_size, we need to make sure they - * will get truncated if we crash before updating i_size in - * ext4_iomap_end(). For faults we don't need to do that (and - * even cannot because for orphan list operations inode_lock is - * required) - if we happen to instantiate block beyond i_size, - * it is because we race with truncate which has already added - * the inode to the orphan list. - */ - if (!(flags & IOMAP_FAULT) && first_block + map.m_len > - (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) { - int err; - - err = ext4_orphan_add(handle, inode); - if (err < 0) { - ext4_journal_stop(handle); - return err; - } - } - ext4_journal_stop(handle); - } else { - ret = ext4_map_blocks(NULL, inode, &map, 0); - if (ret < 0) - return ret; - } + u8 blkbits = inode->i_blkbits; /* * Writes that span EOF might trigger an I/O size update on completion, - * so consider them to be dirty for the purposes of O_DSYNC, even if - * there is no other metadata changes being made or are pending here. + * so consider them to be dirty for the purpose of O_DSYNC, even if + * there is no other metadata changes being made or are pending. */ iomap->flags = 0; if (ext4_inode_datasync_dirty(inode) || offset + length > i_size_read(inode)) iomap->flags |= IOMAP_F_DIRTY; - iomap->bdev = inode->i_sb->s_bdev; - iomap->dax_dev = sbi->s_daxdev; - iomap->offset = (u64)first_block << blkbits; - iomap->length = (u64)map.m_len << blkbits; - if (ret == 0) { - iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE; - iomap->addr = IOMAP_NULL_ADDR; - } else { - if (map.m_flags & EXT4_MAP_MAPPED) { - iomap->type = IOMAP_MAPPED; - } else if (map.m_flags & EXT4_MAP_UNWRITTEN) { - iomap->type = IOMAP_UNWRITTEN; - } else { - WARN_ON_ONCE(1); - return -EIO; - } - iomap->addr = (u64)map.m_pblk << blkbits; - } - - if (map.m_flags & EXT4_MAP_NEW) + if (map->m_flags & EXT4_MAP_NEW) iomap->flags |= IOMAP_F_NEW; + iomap->bdev = inode->i_sb->s_bdev; + iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; + iomap->offset = (u64) map->m_lblk << blkbits; + iomap->length = (u64) map->m_len << blkbits; + + /* + * Flags passed to ext4_map_blocks() for direct I/O writes can result + * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits + * set. In order for any allocated unwritten extents to be converted + * into written extents correctly within the ->end_io() handler, we + * need to ensure that the iomap->type is set appropriately. Hence, the + * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has + * been set first. + */ + if (map->m_flags & EXT4_MAP_UNWRITTEN) { + iomap->type = IOMAP_UNWRITTEN; + iomap->addr = (u64) map->m_pblk << blkbits; + } else if (map->m_flags & EXT4_MAP_MAPPED) { + iomap->type = IOMAP_MAPPED; + iomap->addr = (u64) map->m_pblk << blkbits; + } else { + iomap->type = IOMAP_HOLE; + iomap->addr = IOMAP_NULL_ADDR; + } +} + +static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map, + unsigned int flags) +{ + handle_t *handle; + u8 blkbits = inode->i_blkbits; + int ret, dio_credits, m_flags = 0, retries = 0; + + /* + * Trim the mapping request to the maximum value that we can map at + * once for direct I/O. + */ + if (map->m_len > DIO_MAX_BLOCKS) + map->m_len = DIO_MAX_BLOCKS; + dio_credits = ext4_chunk_trans_blocks(inode, map->m_len); + +retry: + /* + * Either we allocate blocks and then don't get an unwritten extent, so + * in that case we have reserved enough credits. Or, the blocks are + * already allocated and unwritten. In that case, the extent conversion + * fits into the credits as well. + */ + handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + /* + * DAX and direct I/O are the only two operations that are currently + * supported with IOMAP_WRITE. + */ + WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT)); + if (IS_DAX(inode)) + m_flags = EXT4_GET_BLOCKS_CREATE_ZERO; + /* + * We use i_size instead of i_disksize here because delalloc writeback + * can complete at any point during the I/O and subsequently push the + * i_disksize out to i_size. This could be beyond where direct I/O is + * happening and thus expose allocated blocks to direct I/O reads. + */ + else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode)) + m_flags = EXT4_GET_BLOCKS_CREATE; + else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT; + + ret = ext4_map_blocks(handle, inode, map, m_flags); + + /* + * We cannot fill holes in indirect tree based inodes as that could + * expose stale data in the case of a crash. Use the magic error code + * to fallback to buffered I/O. + */ + if (!m_flags && !ret) + ret = -ENOTBLK; + + ext4_journal_stop(handle); + if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) + goto retry; + + return ret; +} + +static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, + unsigned flags, struct iomap *iomap) +{ + int ret; + struct ext4_map_blocks map; + u8 blkbits = inode->i_blkbits; + + if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) + return -EINVAL; + + if (WARN_ON_ONCE(ext4_has_inline_data(inode))) + return -ERANGE; + + /* + * Calculate the first and last logical blocks respectively. + */ + map.m_lblk = offset >> blkbits; + map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, + EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; + + if (flags & IOMAP_WRITE) + ret = ext4_iomap_alloc(inode, &map, flags); + else + ret = ext4_map_blocks(NULL, inode, &map, 0); + + if (ret < 0) + return ret; + + ext4_set_iomap(inode, iomap, &map, offset, length); + return 0; } +static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset, + loff_t length, unsigned flags, struct iomap *iomap) +{ + int ret; + + /* + * Even for writes we don't need to allocate blocks, so just pretend + * we are reading to save overhead of starting a transaction. + */ + flags &= ~IOMAP_WRITE; + ret = ext4_iomap_begin(inode, offset, length, flags, iomap); + WARN_ON_ONCE(iomap->type != IOMAP_MAPPED); + return ret; +} + static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap) { - int ret = 0; - handle_t *handle; - int blkbits = inode->i_blkbits; - bool truncate = false; - - if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) - return 0; - - handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto orphan_del; - } - if (ext4_update_inode_size(inode, offset + written)) - ext4_mark_inode_dirty(handle, inode); /* - * We may need to truncate allocated but not written blocks beyond EOF. + * Check to see whether an error occurred while writing out the data to + * the allocated blocks. If so, return the magic error code so that we + * fallback to buffered I/O and attempt to complete the remainder of + * the I/O. Any blocks that may have been allocated in preparation for + * the direct I/O will be reused during buffered I/O. */ - if (iomap->offset + iomap->length > - ALIGN(inode->i_size, 1 << blkbits)) { - ext4_lblk_t written_blk, end_blk; - - written_blk = (offset + written) >> blkbits; - end_blk = (offset + length) >> blkbits; - if (written_blk < end_blk && ext4_can_truncate(inode)) - truncate = true; - } - /* - * Remove inode from orphan list if we were extending a inode and - * everything went fine. - */ - if (!truncate && inode->i_nlink && - !list_empty(&EXT4_I(inode)->i_orphan)) - ext4_orphan_del(handle, inode); - ext4_journal_stop(handle); - if (truncate) { - ext4_truncate_failed_write(inode); -orphan_del: - /* - * If truncate failed early the inode might still be on the - * orphan list; we need to make sure the inode is removed from - * the orphan list in that case. - */ - if (inode->i_nlink) - ext4_orphan_del(NULL, inode); - } - return ret; + if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0) + return -ENOTBLK; + return 0; } const struct iomap_ops ext4_iomap_ops = { @@ -3624,276 +3468,78 @@ const struct iomap_ops ext4_iomap_ops = { .iomap_end = ext4_iomap_end, }; -static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - ssize_t size, void *private) +const struct iomap_ops ext4_iomap_overwrite_ops = { + .iomap_begin = ext4_iomap_overwrite_begin, + .iomap_end = ext4_iomap_end, +}; + +static bool ext4_iomap_is_delalloc(struct inode *inode, + struct ext4_map_blocks *map) { - ext4_io_end_t *io_end = private; + struct extent_status es; + ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1; - /* if not async direct IO just return */ - if (!io_end) - return 0; + ext4_es_find_extent_range(inode, &ext4_es_is_delayed, + map->m_lblk, end, &es); - ext_debug("ext4_end_io_dio(): io_end 0x%p " - "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", - io_end, io_end->inode->i_ino, iocb, offset, size); + if (!es.es_len || es.es_lblk > end) + return false; + + if (es.es_lblk > map->m_lblk) { + map->m_len = es.es_lblk - map->m_lblk; + return false; + } + + offset = map->m_lblk - es.es_lblk; + map->m_len = es.es_len - offset; + + return true; +} + +static int ext4_iomap_begin_report(struct inode *inode, loff_t offset, + loff_t length, unsigned int flags, + struct iomap *iomap) +{ + int ret; + bool delalloc = false; + struct ext4_map_blocks map; + u8 blkbits = inode->i_blkbits; + + if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) + return -EINVAL; + + if (ext4_has_inline_data(inode)) { + ret = ext4_inline_data_iomap(inode, iomap); + if (ret != -EAGAIN) { + if (ret == 0 && offset >= iomap->length) + ret = -ENOENT; + return ret; + } + } /* - * Error during AIO DIO. We cannot convert unwritten extents as the - * data was not written. Just clear the unwritten flag and drop io_end. + * Calculate the first and last logical block respectively. */ - if (size <= 0) { - ext4_clear_io_unwritten_flag(io_end); - size = 0; - } - io_end->offset = offset; - io_end->size = size; - ext4_put_io_end(io_end); + map.m_lblk = offset >> blkbits; + map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, + EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; + + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret < 0) + return ret; + if (ret == 0) + delalloc = ext4_iomap_is_delalloc(inode, &map); + + ext4_set_iomap(inode, iomap, &map, offset, length); + if (delalloc && iomap->type == IOMAP_HOLE) + iomap->type = IOMAP_DELALLOC; return 0; } -/* - * Handling of direct IO writes. - * - * For ext4 extent files, ext4 will do direct-io write even to holes, - * preallocated extents, and those write extend the file, no need to - * fall back to buffered IO. - * - * For holes, we fallocate those blocks, mark them as unwritten - * If those blocks were preallocated, we mark sure they are split, but - * still keep the range to write as unwritten. - * - * The unwritten extents will be converted to written when DIO is completed. - * For async direct IO, since the IO may still pending when return, we - * set up an end_io call back function, which will do the conversion - * when async direct IO completed. - * - * If the O_DIRECT write will extend the file then add this inode to the - * orphan list. So recovery will truncate it back to the original size - * if the machine crashes during the write. - * - */ -static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - struct ext4_inode_info *ei = EXT4_I(inode); - ssize_t ret; - loff_t offset = iocb->ki_pos; - size_t count = iov_iter_count(iter); - int overwrite = 0; - get_block_t *get_block_func = NULL; - int dio_flags = 0; - loff_t final_size = offset + count; - int orphan = 0; - handle_t *handle; - - if (final_size > inode->i_size || final_size > ei->i_disksize) { - /* Credits for sb + inode write */ - handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto out; - } - ret = ext4_orphan_add(handle, inode); - if (ret) { - ext4_journal_stop(handle); - goto out; - } - orphan = 1; - ext4_update_i_disksize(inode, inode->i_size); - ext4_journal_stop(handle); - } - - BUG_ON(iocb->private == NULL); - - /* - * Make all waiters for direct IO properly wait also for extent - * conversion. This also disallows race between truncate() and - * overwrite DIO as i_dio_count needs to be incremented under i_mutex. - */ - inode_dio_begin(inode); - - /* If we do a overwrite dio, i_mutex locking can be released */ - overwrite = *((int *)iocb->private); - - if (overwrite) - inode_unlock(inode); - - /* - * For extent mapped files we could direct write to holes and fallocate. - * - * Allocated blocks to fill the hole are marked as unwritten to prevent - * parallel buffered read to expose the stale data before DIO complete - * the data IO. - * - * As to previously fallocated extents, ext4 get_block will just simply - * mark the buffer mapped but still keep the extents unwritten. - * - * For non AIO case, we will convert those unwritten extents to written - * after return back from blockdev_direct_IO. That way we save us from - * allocating io_end structure and also the overhead of offloading - * the extent convertion to a workqueue. - * - * For async DIO, the conversion needs to be deferred when the - * IO is completed. The ext4 end_io callback function will be - * called to take care of the conversion work. Here for async - * case, we allocate an io_end structure to hook to the iocb. - */ - iocb->private = NULL; - if (overwrite) - get_block_func = ext4_dio_get_block_overwrite; - else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || - round_down(offset, i_blocksize(inode)) >= inode->i_size) { - get_block_func = ext4_dio_get_block; - dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; - } else if (is_sync_kiocb(iocb)) { - get_block_func = ext4_dio_get_block_unwritten_sync; - dio_flags = DIO_LOCKING; - } else { - get_block_func = ext4_dio_get_block_unwritten_async; - dio_flags = DIO_LOCKING; - } - ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, - get_block_func, ext4_end_io_dio, NULL, - dio_flags); - - if (ret > 0 && !overwrite && ext4_test_inode_state(inode, - EXT4_STATE_DIO_UNWRITTEN)) { - int err; - /* - * for non AIO case, since the IO is already - * completed, we could do the conversion right here - */ - err = ext4_convert_unwritten_extents(NULL, inode, - offset, ret); - if (err < 0) - ret = err; - ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); - } - - inode_dio_end(inode); - /* take i_mutex locking again if we do a ovewrite dio */ - if (overwrite) - inode_lock(inode); - - if (ret < 0 && final_size > inode->i_size) - ext4_truncate_failed_write(inode); - - /* Handle extending of i_size after direct IO write */ - if (orphan) { - int err; - - /* Credits for sb + inode write */ - handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); - if (IS_ERR(handle)) { - /* - * We wrote the data but cannot extend - * i_size. Bail out. In async io case, we do - * not return error here because we have - * already submmitted the corresponding - * bio. Returning error here makes the caller - * think that this IO is done and failed - * resulting in race with bio's completion - * handler. - */ - if (!ret) - ret = PTR_ERR(handle); - if (inode->i_nlink) - ext4_orphan_del(NULL, inode); - - goto out; - } - if (inode->i_nlink) - ext4_orphan_del(handle, inode); - if (ret > 0) { - loff_t end = offset + ret; - if (end > inode->i_size || end > ei->i_disksize) { - ext4_update_i_disksize(inode, end); - if (end > inode->i_size) - i_size_write(inode, end); - /* - * We're going to return a positive `ret' - * here due to non-zero-length I/O, so there's - * no way of reporting error returns from - * ext4_mark_inode_dirty() to userspace. So - * ignore it. - */ - ext4_mark_inode_dirty(handle, inode); - } - } - err = ext4_journal_stop(handle); - if (ret == 0) - ret = err; - } -out: - return ret; -} - -static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) -{ - int unlocked = 0; - struct inode *inode = iocb->ki_filp->f_mapping->host; - ssize_t ret; - - if (ext4_should_dioread_nolock(inode)) { - /* - * Nolock dioread optimization may be dynamically disabled - * via ext4_inode_block_unlocked_dio(). Check inode's state - * while holding extra i_dio_count ref. - */ - inode_dio_begin(inode); - smp_mb(); - if (unlikely(ext4_test_inode_state(inode, - EXT4_STATE_DIOREAD_LOCK))) - inode_dio_end(inode); - else - unlocked = 1; - } - - ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, - iter, ext4_dio_get_block, - NULL, NULL, - unlocked ? 0 : DIO_LOCKING); - if (unlocked) - inode_dio_end(inode); - - return ret; -} - -static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - size_t count = iov_iter_count(iter); - loff_t offset = iocb->ki_pos; - ssize_t ret; - -#ifdef CONFIG_FS_ENCRYPTION - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) - return 0; -#endif - if (fsverity_active(inode)) - return 0; - - /* - * If we are doing data journalling we don't support O_DIRECT - */ - if (ext4_should_journal_data(inode)) - return 0; - - /* Let buffer I/O handle the inline data case. */ - if (ext4_has_inline_data(inode)) - return 0; - - trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); - if (iov_iter_rw(iter) == READ) - ret = ext4_direct_IO_read(iocb, iter); - else - ret = ext4_direct_IO_write(iocb, iter); - trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); - return ret; -} +const struct iomap_ops ext4_iomap_report_ops = { + .iomap_begin = ext4_iomap_begin_report, +}; /* * Pages can be marked dirty completely asynchronously from ext4's journalling @@ -3932,7 +3578,7 @@ static const struct address_space_operations ext4_aops = { .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, - .direct_IO = ext4_direct_IO, + .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, @@ -3949,7 +3595,7 @@ static const struct address_space_operations ext4_journalled_aops = { .bmap = ext4_bmap, .invalidatepage = ext4_journalled_invalidatepage, .releasepage = ext4_releasepage, - .direct_IO = ext4_direct_IO, + .direct_IO = noop_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; @@ -3965,7 +3611,7 @@ static const struct address_space_operations ext4_da_aops = { .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, - .direct_IO = ext4_direct_IO, + .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, @@ -4683,7 +4329,7 @@ make_io: if (end > table) end = table; while (b <= end) - sb_breadahead(sb, b++); + sb_breadahead_unmovable(sb, b++); } /* @@ -5143,7 +4789,7 @@ static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode_info *ei) { struct inode *inode = &(ei->vfs_inode); - u64 i_blocks = inode->i_blocks; + u64 i_blocks = READ_ONCE(inode->i_blocks); struct super_block *sb = inode->i_sb; if (i_blocks <= ~0U) { @@ -5198,7 +4844,7 @@ static int other_inode_match(struct inode * inode, unsigned long ino, (inode->i_state & I_DIRTY_TIME)) { struct ext4_inode_info *ei = EXT4_I(inode); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); @@ -5256,7 +4902,7 @@ static int ext4_do_update_inode(handle_t *handle, struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; struct super_block *sb = inode->i_sb; - int err = 0, rc, block; + int err = 0, block; int need_datasync = 0, set_large_file = 0; uid_t i_uid; gid_t i_gid; @@ -5269,6 +4915,12 @@ static int ext4_do_update_inode(handle_t *handle, if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); + err = ext4_inode_blocks_set(handle, raw_inode, ei); + if (err) { + spin_unlock(&ei->i_raw_lock); + goto out_brelse; + } + raw_inode->i_mode = cpu_to_le16(inode->i_mode); i_uid = i_uid_read(inode); i_gid = i_gid_read(inode); @@ -5302,18 +4954,13 @@ static int ext4_do_update_inode(handle_t *handle, EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); - err = ext4_inode_blocks_set(handle, raw_inode, ei); - if (err) { - spin_unlock(&ei->i_raw_lock); - goto out_brelse; - } raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); - if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { + if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) { ext4_isize_set(raw_inode, ei->i_disksize); need_datasync = 1; } @@ -5367,9 +5014,9 @@ static int ext4_do_update_inode(handle_t *handle, bh->b_data); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - rc = ext4_handle_dirty_metadata(handle, NULL, bh); - if (!err) - err = rc; + err = ext4_handle_dirty_metadata(handle, NULL, bh); + if (err) + goto out_brelse; ext4_clear_inode_state(inode, EXT4_STATE_NEW); if (set_large_file) { BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index be4fd8f3d58c..4b59cd9deba4 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -1166,7 +1166,10 @@ resizefs_out: err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto pwsalt_err_journal; + lock_buffer(sbi->s_sbh); generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); + ext4_superblock_csum_set(sb); + unlock_buffer(sbi->s_sbh); err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); pwsalt_err_journal: diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c92cdf24238c..550c5f96d64f 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, BUG_ON(buddy == NULL); k = mb_find_next_zero_bit(buddy, max, 0); - BUG_ON(k >= max); - + if (k >= max) { + ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, + "%d free clusters of order %d. But found 0", + grp->bb_counters[i], i); + ext4_mark_group_bitmap_corrupted(ac->ac_sb, + e4b->bd_group, + EXT4_GROUP_INFO_BBITMAP_CORRUPT); + break; + } ac->ac_found++; ac->ac_b_ex.fe_len = 1 << i; @@ -1936,7 +1943,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, int free; free = e4b->bd_info->bb_free; - BUG_ON(free <= 0); + if (WARN_ON(free <= 0)) + return; i = e4b->bd_info->bb_first_free; @@ -1959,7 +1967,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, } mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); - BUG_ON(ex.fe_len <= 0); + if (WARN_ON(ex.fe_len <= 0)) + break; if (free < ex.fe_len) { ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " @@ -3917,7 +3926,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, */ static noinline_for_stack int ext4_mb_discard_group_preallocations(struct super_block *sb, - ext4_group_t group, int needed) + ext4_group_t group, int *busy) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct buffer_head *bitmap_bh = NULL; @@ -3925,7 +3934,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, struct list_head list; struct ext4_buddy e4b; int err; - int busy = 0; int free = 0; mb_debug(1, "discard preallocation for group %u\n", group); @@ -3949,18 +3957,15 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, return 0; } - if (needed == 0) - needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; - INIT_LIST_HEAD(&list); -repeat: + ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { spin_unlock(&pa->pa_lock); - busy = 1; + *busy = 1; continue; } if (pa->pa_deleted) { @@ -3980,14 +3985,6 @@ repeat: list_add(&pa->u.pa_tmp_list, &list); } - /* if we still need more blocks and some PAs were used, try again */ - if (free < needed && busy) { - busy = 0; - ext4_unlock_group(sb, group); - cond_resched(); - goto repeat; - } - /* found anything to free? */ if (list_empty(&list)) { BUG_ON(free != 0); @@ -4521,13 +4518,24 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); int ret; - int freed = 0; + int freed = 0, busy = 0; + int retry = 0; trace_ext4_mb_discard_preallocations(sb, needed); + + if (needed == 0) + needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; + repeat: for (i = 0; i < ngroups && needed > 0; i++) { - ret = ext4_mb_discard_group_preallocations(sb, i, needed); + ret = ext4_mb_discard_group_preallocations(sb, i, &busy); freed += ret; needed -= ret; + cond_resched(); + } + + if (needed > 0 && busy && ++retry < 3) { + busy = 0; + goto repeat; } return freed; @@ -4741,6 +4749,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, cluster), "Block already on to-be-freed list"); + kmem_cache_free(ext4_free_data_cachep, new_entry); return 0; } } diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a564d0289a70..4c37abe76851 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1392,8 +1392,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, + buf_size, offset)) return -1; *res_dir = de; return 1; @@ -1852,7 +1852,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); - /* Split the existing block in the middle, size-wise */ + /* Ensure that neither split block is over half full */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { @@ -1862,8 +1862,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, size += map[i].size; move++; } - /* map index at which we will split */ - split = count - move; + /* + * map index at which we will split + * + * If the sum of active entries didn't exceed half the block size, just + * split it in half by count; each resulting block will have at least + * half the space free. + */ + if (i > 0) + split = count - move; + else + split = count/2; + hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", @@ -2182,6 +2192,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, if (!dentry->d_name.len) return -EINVAL; + if (fscrypt_is_nokey_name(dentry)) + return -ENOKEY; + #ifdef CONFIG_UNICODE if (ext4_has_strict_mode(sbi) && IS_CASEFOLDED(dir) && sbi->s_encoding && utf8_validate(sbi->s_encoding, &dentry->d_name)) @@ -2392,11 +2405,10 @@ again: (frame - 1)->bh); if (err) goto journal_error; - if (restart) { - err = ext4_handle_dirty_dx_node(handle, dir, - frame->bh); + err = ext4_handle_dirty_dx_node(handle, dir, + frame->bh); + if (err) goto journal_error; - } } else { struct dx_root *dxroot; memcpy((char *) entries2, (char *) entries, @@ -2462,7 +2474,7 @@ int ext4_generic_delete_entry(handle_t *handle, de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, i)) + entry_buf, buf_size, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) @@ -3531,12 +3543,35 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent, return retval; } } - brelse(ent->bh); - ent->bh = NULL; return 0; } +static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, + unsigned ino, unsigned file_type) +{ + struct ext4_renament old = *ent; + int retval = 0; + + /* + * old->de could have moved from under us during make indexed dir, + * so the old->de may no longer valid and need to find it again + * before reset old inode info. + */ + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); + if (IS_ERR(old.bh)) + retval = PTR_ERR(old.bh); + if (!old.bh) + retval = -ENOENT; + if (retval) { + ext4_std_error(old.dir->i_sb, retval); + return; + } + + ext4_setent(handle, &old, ino, file_type); + brelse(old.bh); +} + static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, const struct qstr *d_name) { @@ -3696,14 +3731,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, */ retval = -ENOENT; if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) - goto end_rename; + goto release_bh; new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, &new.de, &new.inlined); if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; - goto end_rename; + goto release_bh; } if (new.bh) { if (!new.inode) { @@ -3720,18 +3755,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); if (IS_ERR(handle)) { retval = PTR_ERR(handle); - handle = NULL; - goto end_rename; + goto release_bh; } } else { whiteout = ext4_whiteout_for_rename(&old, credits, &handle); if (IS_ERR(whiteout)) { retval = PTR_ERR(whiteout); - whiteout = NULL; - goto end_rename; + goto release_bh; } } + old_file_type = old.de->file_type; if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); @@ -3759,7 +3793,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, force_reread = (new.dir->i_ino == old.dir->i_ino && ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); - old_file_type = old.de->file_type; if (whiteout) { /* * Do this before adding a new entry, so the old entry is sure @@ -3831,17 +3864,23 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, retval = 0; end_rename: + if (whiteout) { + if (retval) { + ext4_resetent(handle, &old, + old.inode->i_ino, old_file_type); + drop_nlink(whiteout); + ext4_orphan_add(handle, whiteout); + } + unlock_new_inode(whiteout); + ext4_journal_stop(handle); + iput(whiteout); + } else { + ext4_journal_stop(handle); + } +release_bh: brelse(old.dir_bh); brelse(old.bh); brelse(new.bh); - if (whiteout) { - if (retval) - drop_nlink(whiteout); - unlock_new_inode(whiteout); - iput(whiteout); - } - if (handle) - ext4_journal_stop(handle); return retval; } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 080e25f6ef56..ad1d4c8faf44 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -861,8 +861,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, BUFFER_TRACE(dind, "get_write_access"); err = ext4_journal_get_write_access(handle, dind); - if (unlikely(err)) + if (unlikely(err)) { ext4_std_error(sb, err); + goto errout; + } /* ext4_reserve_inode_write() gets a reference on the iloc */ err = ext4_reserve_inode_write(handle, inode, &iloc); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 539d63503e99..1fbaeea9ee85 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -67,10 +67,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); -static void ext4_mark_recovery_complete(struct super_block *sb, +static int ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); -static void ext4_clear_journal_err(struct super_block *sb, - struct ext4_super_block *es); +static int ext4_clear_journal_err(struct super_block *sb, + struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); @@ -390,7 +390,8 @@ static void save_error_info(struct super_block *sb, const char *func, unsigned int line) { __save_error_info(sb, func, line); - ext4_commit_super(sb, 1); + if (!bdev_read_only(sb->s_bdev)) + ext4_commit_super(sb, 1); } /* @@ -455,19 +456,17 @@ static bool system_going_down(void) static void ext4_handle_error(struct super_block *sb) { + journal_t *journal = EXT4_SB(sb)->s_journal; + if (test_opt(sb, WARN_ON_ERROR)) WARN_ON_ONCE(1); - if (sb_rdonly(sb)) + if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT)) return; - if (!test_opt(sb, ERRORS_CONT)) { - journal_t *journal = EXT4_SB(sb)->s_journal; - - EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; - if (journal) - jbd2_journal_abort(journal, -EIO); - } + EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; + if (journal) + jbd2_journal_abort(journal, -EIO); /* * We force ERRORS_RO behavior when system is rebooting. Otherwise we * could panic during 'reboot -f' as the underlying device got already @@ -1019,6 +1018,7 @@ static void ext4_put_super(struct super_block *sb) percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); + percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); percpu_free_rwsem(&sbi->s_writepages_rwsem); #ifdef CONFIG_QUOTA for (i = 0; i < EXT4_MAXQUOTAS; i++) @@ -1757,8 +1757,8 @@ static const struct mount_opts { {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), MOPT_CLEAR | MOPT_Q}, - {Opt_usrjquota, 0, MOPT_Q}, - {Opt_grpjquota, 0, MOPT_Q}, + {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING}, + {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING}, {Opt_offusrjquota, 0, MOPT_Q}, {Opt_offgrpjquota, 0, MOPT_Q}, {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT}, @@ -2035,6 +2035,16 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, #endif } else if (token == Opt_dax) { #ifdef CONFIG_FS_DAX + if (is_remount && test_opt(sb, DAX)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and dax"); + return -1; + } + if (is_remount && !(sbi->s_mount_opt & EXT4_MOUNT_DAX)) { + ext4_msg(sb, KERN_ERR, "can't change " + "dax mount option while remounting"); + return -1; + } ext4_msg(sb, KERN_WARNING, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); sbi->s_mount_opt |= m->mount_opt; @@ -2264,8 +2274,6 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PUTS("data_err=abort"); if (DUMMY_ENCRYPTION_ENABLED(sbi)) SEQ_OPTS_PUTS("test_dummy_encryption"); - if (test_opt(sb, BARRIER)) - SEQ_OPTS_PUTS("barrier"); ext4_show_quota_options(seq, sb); return 0; @@ -2297,6 +2305,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, ext4_msg(sb, KERN_ERR, "revision level too high, " "forcing read-only mode"); err = -EROFS; + goto done; } if (read_only) goto done; @@ -2668,9 +2677,6 @@ static void ext4_orphan_cleanup(struct super_block *sb, sb->s_flags &= ~SB_RDONLY; } #ifdef CONFIG_QUOTA - /* Needed for iput() to work correctly and not trash data */ - sb->s_flags |= SB_ACTIVE; - /* * Turn on quotas which were not enabled for read-only mounts if * filesystem has quota feature, so that they are updated correctly. @@ -3565,7 +3571,8 @@ int ext4_calculate_overhead(struct super_block *sb) */ if (sbi->s_journal && !sbi->journal_bdev) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); - else if (ext4_has_feature_journal(sb) && !sbi->s_journal) { + else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { + /* j_inum for internal journal is non-zero */ j_inode = ext4_get_journal_inode(sb, j_inum); if (j_inode) { j_blocks = j_inode->i_size >> sb->s_blocksize_bits; @@ -3772,7 +3779,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; - if ((def_mount_opts & EXT4_MOUNT_BARRIER)) + if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) set_opt(sb, BARRIER); /* @@ -4113,7 +4120,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || sbi->s_inodes_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", - sbi->s_blocks_per_group); + sbi->s_inodes_per_group); goto failed_mount; } sbi->s_itb_per_group = sbi->s_inodes_per_group / @@ -4242,9 +4249,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) EXT4_BLOCKS_PER_GROUP(sb) - 1); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { - ext4_msg(sb, KERN_WARNING, "groups count too large: %u " + ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " "(block count %llu, first data block %u, " - "blocks per group %lu)", sbi->s_groups_count, + "blocks per group %lu)", blocks_count, ext4_blocks_count(es), le32_to_cpu(es->s_first_data_block), EXT4_BLOCKS_PER_GROUP(sb)); @@ -4287,7 +4294,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* Pre-read the descriptors into the buffer cache */ for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); - sb_breadahead(sb, block); + sb_breadahead_unmovable(sb, block); } for (i = 0; i < db_count; i++) { @@ -4415,8 +4422,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* We have now updated the journal if required, so we can * validate the data journaling mode. */ - if (IS_EXT3_SB(sb) && test_opt(sb, DATA_FLAGS) == 0) - set_opt(sb, WRITEBACK_DATA); switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal @@ -4556,11 +4561,13 @@ no_journal: ext4_set_resv_clusters(sb); - err = ext4_setup_system_zone(sb); - if (err) { - ext4_msg(sb, KERN_ERR, "failed to initialize system " - "zone (%d)", err); - goto failed_mount4a; + if (test_opt(sb, BLOCK_VALIDITY)) { + err = ext4_setup_system_zone(sb); + if (err) { + ext4_msg(sb, KERN_ERR, "failed to initialize system " + "zone (%d)", err); + goto failed_mount4a; + } } ext4_ext_init(sb); @@ -4590,6 +4597,9 @@ no_journal: if (!err) err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, GFP_KERNEL); + if (!err) + err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0, + GFP_KERNEL); if (!err) err = percpu_init_rwsem(&sbi->s_writepages_rwsem); @@ -4628,7 +4638,9 @@ no_journal: EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); - ext4_mark_recovery_complete(sb, es); + err = ext4_mark_recovery_complete(sb, es); + if (err) + goto failed_mount8; } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) @@ -4671,10 +4683,9 @@ cantfind_ext4: ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; -#ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); -#endif + kobject_put(&sbi->s_kobj); failed_mount7: ext4_unregister_li_request(sb); failed_mount6: @@ -4691,6 +4702,7 @@ failed_mount6: percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); + percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); percpu_free_rwsem(&sbi->s_writepages_rwsem); failed_mount5: ext4_ext_release(sb); @@ -4813,7 +4825,8 @@ static journal_t *ext4_get_journal(struct super_block *sb, struct inode *journal_inode; journal_t *journal; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; journal_inode = ext4_get_journal_inode(sb, journal_inum); if (!journal_inode) @@ -4843,7 +4856,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, struct ext4_super_block *es; struct block_device *bdev; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) @@ -4934,8 +4948,10 @@ static int ext4_load_journal(struct super_block *sb, dev_t journal_dev; int err = 0; int really_read_only; + int journal_dev_ro; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return -EFSCORRUPTED; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -4945,7 +4961,31 @@ static int ext4_load_journal(struct super_block *sb, } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); - really_read_only = bdev_read_only(sb->s_bdev); + if (journal_inum && journal_dev) { + ext4_msg(sb, KERN_ERR, + "filesystem has both journal inode and journal device!"); + return -EINVAL; + } + + if (journal_inum) { + journal = ext4_get_journal(sb, journal_inum); + if (!journal) + return -EINVAL; + } else { + journal = ext4_get_dev_journal(sb, journal_dev); + if (!journal) + return -EINVAL; + } + + journal_dev_ro = bdev_read_only(journal->j_dev); + really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; + + if (journal_dev_ro && !sb_rdonly(sb)) { + ext4_msg(sb, KERN_ERR, + "journal device read-only, try mounting with '-o ro'"); + err = -EROFS; + goto err_out; + } /* * Are we loading a blank journal or performing recovery after a @@ -4960,27 +5000,14 @@ static int ext4_load_journal(struct super_block *sb, ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed " "(try mounting with noload)"); - return -EROFS; + err = -EROFS; + goto err_out; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } - if (journal_inum && journal_dev) { - ext4_msg(sb, KERN_ERR, "filesystem has both journal " - "and inode journals!"); - return -EINVAL; - } - - if (journal_inum) { - if (!(journal = ext4_get_journal(sb, journal_inum))) - return -EINVAL; - } else { - if (!(journal = ext4_get_dev_journal(sb, journal_dev))) - return -EINVAL; - } - if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); @@ -5000,12 +5027,16 @@ static int ext4_load_journal(struct super_block *sb, if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); - jbd2_journal_destroy(journal); - return err; + goto err_out; } EXT4_SB(sb)->s_journal = journal; - ext4_clear_journal_err(sb, es); + err = ext4_clear_journal_err(sb, es); + if (err) { + EXT4_SB(sb)->s_journal = NULL; + jbd2_journal_destroy(journal); + return err; + } if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5016,6 +5047,10 @@ static int ext4_load_journal(struct super_block *sb, } return 0; + +err_out: + jbd2_journal_destroy(journal); + return err; } static int ext4_commit_super(struct super_block *sb, int sync) @@ -5024,15 +5059,10 @@ static int ext4_commit_super(struct super_block *sb, int sync) struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; - if (!sbh || block_device_ejected(sb)) - return error; - - /* - * The superblock bh should be mapped, but it might not be if the - * device was hot-removed. Not much we can do but fail the I/O. - */ - if (!buffer_mapped(sbh)) - return error; + if (!sbh) + return -EINVAL; + if (block_device_ejected(sb)) + return -ENODEV; /* * If the file system is mounted read-only, don't update the @@ -5101,26 +5131,32 @@ static int ext4_commit_super(struct super_block *sb, int sync) * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ -static void ext4_mark_recovery_complete(struct super_block *sb, - struct ext4_super_block *es) +static int ext4_mark_recovery_complete(struct super_block *sb, + struct ext4_super_block *es) { + int err; journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { - BUG_ON(journal != NULL); - return; + if (journal != NULL) { + ext4_error(sb, "Journal got removed while the fs was " + "mounted!"); + return -EFSCORRUPTED; + } + return 0; } jbd2_journal_lock_updates(journal); - if (jbd2_journal_flush(journal) < 0) + err = jbd2_journal_flush(journal); + if (err < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } - out: jbd2_journal_unlock_updates(journal); + return err; } /* @@ -5128,14 +5164,17 @@ out: * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ -static void ext4_clear_journal_err(struct super_block *sb, +static int ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; - BUG_ON(!ext4_has_feature_journal(sb)); + if (!ext4_has_feature_journal(sb)) { + ext4_error(sb, "Journal got removed while the fs was mounted!"); + return -EFSCORRUPTED; + } journal = EXT4_SB(sb)->s_journal; @@ -5160,6 +5199,7 @@ static void ext4_clear_journal_err(struct super_block *sb, jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } + return 0; } /* @@ -5302,7 +5342,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned long old_sb_flags; + unsigned long old_sb_flags, vfs_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; @@ -5345,6 +5385,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from *flags to s_flags + */ + vfs_flags = SB_LAZYTIME | SB_I_VERSION; + sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags); + if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -5370,12 +5418,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) err = -EINVAL; goto restore_opts; } - if (test_opt(sb, DAX)) { - ext4_msg(sb, KERN_ERR, "can't mount with " - "both data=journal and dax"); - err = -EINVAL; - goto restore_opts; - } } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " @@ -5391,12 +5433,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { - ext4_msg(sb, KERN_WARNING, "warning: refusing change of " - "dax flag with busy inodes while remounting"); - sbi->s_mount_opt ^= EXT4_MOUNT_DAX; - } - if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); @@ -5410,9 +5446,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } - if (*flags & SB_LAZYTIME) - sb->s_flags |= SB_LAZYTIME; - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; @@ -5442,8 +5475,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); - if (sbi->s_journal) + if (sbi->s_journal) { + /* + * We let remount-ro finish even if marking fs + * as clean failed... + */ ext4_mark_recovery_complete(sb, es); + } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); } else { @@ -5491,8 +5529,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) * been changed by e2fsck since we originally mounted * the partition.) */ - if (sbi->s_journal) - ext4_clear_journal_err(sb, es); + if (sbi->s_journal) { + err = ext4_clear_journal_err(sb, es); + if (err) + goto restore_opts; + } sbi->s_mount_state = le16_to_cpu(es->s_state); err = ext4_setup_super(sb, es, 0); @@ -5522,7 +5563,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - ext4_setup_system_zone(sb); + /* + * Handle creation of system zone data early because it can fail. + * Releasing of existing data is done when we are sure remount will + * succeed. + */ + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) { + err = ext4_setup_system_zone(sb); + if (err) + goto restore_opts; + } + if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { err = ext4_commit_super(sb, 1); if (err) @@ -5543,8 +5594,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } } #endif + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); + + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from s_flags to *flags + */ + *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags); - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; @@ -5558,6 +5617,8 @@ restore_opts: sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { @@ -5800,6 +5861,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; + + /* Quota already enabled for this file? */ + if (IS_NOQUOTA(d_inode(path->dentry))) + return -EBUSY; + /* Journaling quota? */ if (EXT4_SB(sb)->s_qf_names[type]) { /* Quotafile not in fs root? */ diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index 9c4ce4d681e9..afa5f5531ff9 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -23,6 +23,7 @@ typedef enum { attr_session_write_kbytes, attr_lifetime_write_kbytes, attr_reserved_clusters, + attr_sra_exceeded_retry_limit, attr_inode_readahead, attr_trigger_test_error, attr_first_error_time, @@ -176,6 +177,7 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444); EXT4_ATTR_FUNC(session_write_kbytes, 0444); EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444); EXT4_ATTR_FUNC(reserved_clusters, 0644); +EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444); EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead, ext4_sb_info, s_inode_readahead_blks); @@ -208,6 +210,7 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(session_write_kbytes), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(reserved_clusters), + ATTR_LIST(sra_exceeded_retry_limit), ATTR_LIST(inode_readahead_blks), ATTR_LIST(inode_goal), ATTR_LIST(mb_stats), @@ -310,6 +313,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj, return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long) atomic64_read(&sbi->s_resv_clusters)); + case attr_sra_exceeded_retry_limit: + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long) + percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit)); case attr_inode_readahead: case attr_pointer_ui: if (!ptr) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 894a61010ae9..20e40cac819e 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1476,6 +1476,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, if (!ce) return NULL; + WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) && + !(current->flags & PF_MEMALLOC_NOFS)); + ea_data = ext4_kvmalloc(value_len, GFP_NOFS); if (!ea_data) { mb_cache_entry_put(ea_inode_cache, ce); @@ -2342,6 +2345,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, error = -ENOSPC; goto cleanup; } + WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS)); } error = ext4_reserve_inode_write(handle, inode, &is.iloc); @@ -2415,7 +2419,7 @@ retry_inode: * external inode if possible. */ if (ext4_has_feature_ea_inode(inode->i_sb) && - !i.in_inode) { + i.value_len && !i.in_inode) { i.in_inode = 1; goto retry_inode; } diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index a0eef95b9e0e..a57219c51c01 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -108,7 +108,7 @@ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) return __get_meta_page(sbi, index, true); } -struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index) +struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index) { struct page *page; int count = 0; @@ -243,6 +243,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, blkno * NAT_ENTRY_PER_BLOCK); break; case META_SIT: + if (unlikely(blkno >= TOTAL_SEGS(sbi))) + goto out; /* get sit block addr */ fio.new_blkaddr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK); @@ -892,8 +894,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) int i; int err; - sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), - GFP_KERNEL); + sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), + GFP_KERNEL); if (!sbi->ckpt) return -ENOMEM; /* @@ -1044,8 +1046,12 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) get_pages(sbi, is_dir ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); retry: - if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi))) { + trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, + get_pages(sbi, is_dir ? + F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); return -EIO; + } spin_lock(&sbi->inode_lock[type]); @@ -1250,20 +1256,20 @@ static void unblock_operations(struct f2fs_sb_info *sbi) f2fs_unlock_all(sbi); } -void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) +void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); - if (!get_pages(sbi, F2FS_WB_CP_DATA)) + if (!get_pages(sbi, type)) break; if (unlikely(f2fs_cp_error(sbi))) break; - io_schedule_timeout(5*HZ); + io_schedule_timeout(HZ/50); } finish_wait(&sbi->cp_wait, &wait); } @@ -1301,10 +1307,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) else __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); - if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || - is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) __set_ckpt_flags(ckpt, CP_FSCK_FLAG); + if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) + __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); + else + __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); + if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) __set_ckpt_flags(ckpt, CP_DISABLED_FLAG); else @@ -1384,8 +1394,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* Flush all the NAT/SIT pages */ f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); - f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && - !f2fs_cp_error(sbi)); /* * modify checkpoint @@ -1493,11 +1501,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* Here, we have one bio having CP pack except cp pack 2 page */ f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); - f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && - !f2fs_cp_error(sbi)); + /* Wait for all dirty meta pages to be submitted for IO */ + f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META); /* wait for previous submitted meta pages writeback */ - f2fs_wait_on_all_pages_writeback(sbi); + f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); /* flush all device cache */ err = f2fs_flush_device_cache(sbi); @@ -1506,7 +1514,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* barrier and flush checkpoint cp pack 2 page if it can */ commit_checkpoint(sbi, ckpt, start_blk); - f2fs_wait_on_all_pages_writeback(sbi); + f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); /* * invalidate intermediate page cache borrowed from meta inode @@ -1588,7 +1596,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) goto out; } - if (NM_I(sbi)->dirty_nat_cnt == 0 && + if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 && SIT_I(sbi)->dirty_sentries == 0 && prefree_segments(sbi) == 0) { f2fs_flush_sit_entries(sbi, cpc); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index ec9a1f9ce2dd..64ee2a064e33 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -318,7 +318,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi, if (test_opt(sbi, LFS) && current->plug) blk_finish_plug(current->plug); - if (F2FS_IO_ALIGNED(sbi)) + if (!F2FS_IO_ALIGNED(sbi)) goto submit_io; start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; @@ -2753,6 +2753,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, unsigned long align = offset | iov_iter_alignment(iter); struct block_device *bdev = inode->i_sb->s_bdev; + if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode)) + return 1; + if (align & blocksize_mask) { if (bdev) blkbits = blksize_bits(bdev_logical_block_size(bdev)); diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 9b0bedd82581..d8d64447bc94 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -107,8 +107,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->node_pages = NODE_MAPPING(sbi)->nrpages; if (sbi->meta_inode) si->meta_pages = META_MAPPING(sbi)->nrpages; - si->nats = NM_I(sbi)->nat_cnt; - si->dirty_nats = NM_I(sbi)->dirty_nat_cnt; + si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT]; + si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT]; si->sits = MAIN_SEGS(sbi); si->dirty_sits = SIT_I(sbi)->dirty_sentries; si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID]; @@ -254,9 +254,10 @@ get_cache: si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] + NM_I(sbi)->nid_cnt[PREALLOC_NID]) * sizeof(struct free_nid); - si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); - si->cache_mem += NM_I(sbi)->dirty_nat_cnt * - sizeof(struct nat_entry_set); + si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] * + sizeof(struct nat_entry); + si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] * + sizeof(struct nat_entry_set); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); for (i = 0; i < MAX_INO_ENTRY; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 84280ad3786c..78d041f9775a 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -107,36 +107,28 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, /* * Test whether a case-insensitive directory entry matches the filename * being searched for. - * - * Returns: 0 if the directory entry matches, more than 0 if it - * doesn't match or less than zero on error. */ -int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, - const struct qstr *entry, bool quick) +static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name, + const struct qstr *entry, bool quick) { - const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb); + const struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); const struct unicode_map *um = sbi->s_encoding; - int ret; + int res; if (quick) - ret = utf8_strncasecmp_folded(um, name, entry); + res = utf8_strncasecmp_folded(um, name, entry); else - ret = utf8_strncasecmp(um, name, entry); - - if (ret < 0) { - /* Handle invalid character sequence as either an error - * or as an opaque byte sequence. + res = utf8_strncasecmp(um, name, entry); + if (res < 0) { + /* + * In strict mode, ignore invalid names. In non-strict mode, + * fall back to treating them as opaque byte sequences. */ - if (f2fs_has_strict_mode(sbi)) - return -EINVAL; - - if (name->len != entry->len) - return 1; - - return !!memcmp(name->name, entry->name, name->len); + if (f2fs_has_strict_mode(sbi) || name->len != entry->len) + return false; + return !memcmp(name->name, entry->name, name->len); } - - return ret; + return res == 0; } static void f2fs_fname_setup_ci_filename(struct inode *dir, @@ -188,10 +180,10 @@ static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, if (cf_str->name) { struct qstr cf = {.name = cf_str->name, .len = cf_str->len}; - return !f2fs_ci_compare(parent, &cf, &entry, true); + return f2fs_match_ci_name(parent, &cf, &entry, true); } - return !f2fs_ci_compare(parent, fname->usr_fname, &entry, - false); + return f2fs_match_ci_name(parent, fname->usr_fname, &entry, + false); } #endif if (fscrypt_match_name(fname, d->filename[bit_pos], @@ -311,16 +303,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, unsigned int max_depth; unsigned int level; + *res_page = NULL; + if (f2fs_has_inline_dentry(dir)) { - *res_page = NULL; de = f2fs_find_in_inline_dir(dir, fname, res_page); goto out; } - if (npages == 0) { - *res_page = NULL; + if (npages == 0) goto out; - } max_depth = F2FS_I(dir)->i_current_depth; if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) { @@ -331,7 +322,6 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, } for (level = 0; level < max_depth; level++) { - *res_page = NULL; de = find_in_level(dir, level, fname, res_page); if (de || IS_ERR(*res_page)) break; @@ -1067,17 +1057,41 @@ const struct file_operations f2fs_dir_operations = { static int f2fs_d_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { - struct qstr qstr = {.name = str, .len = len }; const struct dentry *parent = READ_ONCE(dentry->d_parent); - const struct inode *inode = READ_ONCE(parent->d_inode); + const struct inode *dir = READ_ONCE(parent->d_inode); + const struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + struct qstr entry = QSTR_INIT(str, len); + char strbuf[DNAME_INLINE_LEN]; + int res; - if (!inode || !IS_CASEFOLDED(inode)) { - if (len != name->len) - return -1; - return memcmp(str, name->name, len); + if (!dir || !IS_CASEFOLDED(dir)) + goto fallback; + + /* + * If the dentry name is stored in-line, then it may be concurrently + * modified by a rename. If this happens, the VFS will eventually retry + * the lookup, so it doesn't matter what ->d_compare() returns. + * However, it's unsafe to call utf8_strncasecmp() with an unstable + * string. Therefore, we have to copy the name into a temporary buffer. + */ + if (len <= DNAME_INLINE_LEN - 1) { + memcpy(strbuf, str, len); + strbuf[len] = 0; + entry.name = strbuf; + /* prevent compiler from optimizing out the temporary buffer */ + barrier(); } - return f2fs_ci_compare(inode, name, &qstr, false); + res = utf8_strncasecmp(sbi->s_encoding, name, &entry); + if (res >= 0) + return res; + + if (f2fs_has_strict_mode(sbi)) + return -EINVAL; +fallback: + if (len != name->len) + return 1; + return !!memcmp(str, name->name, len); } static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9046432b87c2..4ca3c2a0a0f5 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -100,6 +100,7 @@ extern const char *f2fs_fault_name[FAULT_MAX]; #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 +#define F2FS_MOUNT_NORECOVERY 0x04000000 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) @@ -137,6 +138,7 @@ struct f2fs_mount_info { int alloc_mode; /* segment allocation policy */ int fsync_mode; /* fsync policy */ bool test_dummy_encryption; /* test dummy encryption */ + block_t unusable_cap_perc; /* percentage for cap */ block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ @@ -795,6 +797,13 @@ enum nid_state { MAX_NID_STATE, }; +enum nat_state { + TOTAL_NAT, + DIRTY_NAT, + RECLAIMABLE_NAT, + MAX_NAT_STATE, +}; + struct f2fs_nm_info { block_t nat_blkaddr; /* base disk address of NAT */ nid_t max_nid; /* maximum possible node ids */ @@ -810,8 +819,7 @@ struct f2fs_nm_info { struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ struct list_head nat_entries; /* cached nat entry list (clean) */ spinlock_t nat_list_lock; /* protect clean nat entry list */ - unsigned int nat_cnt; /* the # of cached nat entries */ - unsigned int dirty_nat_cnt; /* total num of nat entries in set */ + unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ unsigned int nat_blocks; /* # of nat blocks */ /* free node ids management */ @@ -2788,18 +2796,12 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { - void *ret; - if (time_to_inject(sbi, FAULT_KMALLOC)) { f2fs_show_injection_info(FAULT_KMALLOC); return NULL; } - ret = kmalloc(size, flags); - if (ret) - return ret; - - return kvmalloc(size, flags); + return kmalloc(size, flags); } static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, @@ -2958,11 +2960,6 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, bool hot, bool set); struct dentry *f2fs_get_parent(struct dentry *child); -extern int f2fs_ci_compare(const struct inode *parent, - const struct qstr *name, - const struct qstr *entry, - bool quick); - /* * dir.c */ @@ -3007,6 +3004,8 @@ bool f2fs_empty_dir(struct inode *dir); static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) { + if (fscrypt_is_nokey_name(dentry)) + return -ENOKEY; return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, inode, inode->i_ino, inode->i_mode); } @@ -3070,7 +3069,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page); +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); int f2fs_recover_xattr_data(struct inode *inode, struct page *page); int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, @@ -3158,7 +3157,7 @@ enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); -struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index); +struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); @@ -3185,7 +3184,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); void f2fs_update_dirty_page(struct inode *inode, struct page *page); void f2fs_remove_dirty_inode(struct inode *inode); int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); -void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi); +void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); int __init f2fs_create_checkpoint_caches(void); @@ -3496,7 +3495,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page); int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); int f2fs_convert_inline_inode(struct inode *inode); int f2fs_write_inline_data(struct inode *inode, struct page *page); -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage); +int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, struct fscrypt_name *fname, struct page **res_page); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index c3a9da79ac99..6e58b2e62b18 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -686,6 +686,10 @@ int f2fs_truncate(struct inode *inode) return -EIO; } + err = dquot_initialize(inode); + if (err) + return err; + /* we should check inline_data size */ if (!f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); @@ -761,7 +765,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr) if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + if (!in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) mode &= ~S_ISGID; set_acl_inode(inode, mode); } @@ -2056,8 +2061,15 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) if (in != F2FS_GOING_DOWN_FULLSYNC) { ret = mnt_want_write_file(filp); - if (ret) + if (ret) { + if (ret == -EROFS) { + ret = 0; + f2fs_stop_checkpoint(sbi, false); + set_sbi_flag(sbi, SBI_IS_SHUTDOWN); + trace_f2fs_shutdown(sbi, in, ret); + } return ret; + } } switch (in) { diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 5877bd729689..a78aa5480454 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1012,8 +1012,14 @@ next_step: block_t start_bidx; nid_t nid = le32_to_cpu(entry->nid); - /* stop BG_GC if there is not enough free sections. */ - if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) + /* + * stop BG_GC if there is not enough free sections. + * Or, stop GC if the segment becomes fully valid caused by + * race condition along with SSR block allocation. + */ + if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || + get_valid_blocks(sbi, segno, false) == + sbi->blocks_per_seg) return submitted; if (check_valid_map(sbi, segno, off) == 0) @@ -1532,11 +1538,17 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) goto out; } + mutex_lock(&sbi->cp_mutex); update_fs_metadata(sbi, -secs); clear_sbi_flag(sbi, SBI_IS_RESIZEFS); + set_sbi_flag(sbi, SBI_IS_DIRTY); + mutex_unlock(&sbi->cp_mutex); + err = f2fs_sync_fs(sbi->sb, 1); if (err) { + mutex_lock(&sbi->cp_mutex); update_fs_metadata(sbi, secs); + mutex_unlock(&sbi->cp_mutex); update_sb_metadata(sbi, secs); f2fs_commit_super(sbi, false); } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 896db0416f0e..cbd17e4ff920 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -189,6 +189,10 @@ int f2fs_convert_inline_inode(struct inode *inode) if (!f2fs_has_inline_data(inode)) return 0; + err = dquot_initialize(inode); + if (err) + return err; + page = f2fs_grab_cache_page(inode->i_mapping, 0, false); if (!page) return -ENOMEM; @@ -252,7 +256,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) return 0; } -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) +int f2fs_recover_inline_data(struct inode *inode, struct page *npage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode *ri = NULL; @@ -274,7 +278,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) ri && (ri->i_inline & F2FS_INLINE_DATA)) { process_inline: ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_wait_on_page_writeback(ipage, NODE, true, true); @@ -287,21 +292,25 @@ process_inline: set_page_dirty(ipage); f2fs_put_page(ipage, 1); - return true; + return 1; } if (f2fs_has_inline_data(inode)) { ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_truncate_inline_inode(inode, ipage, 0); clear_inode_flag(inode, FI_INLINE_DATA); f2fs_put_page(ipage, 1); } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { - if (f2fs_truncate_blocks(inode, 0, false)) - return false; + int ret; + + ret = f2fs_truncate_blocks(inode, 0, false); + if (ret) + return ret; goto process_inline; } - return false; + return 0; } struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 5d9584281935..3a97ac56821b 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -797,7 +797,11 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry, if (whiteout) { f2fs_i_links_write(inode, false); + + spin_lock(&inode->i_lock); inode->i_state |= I_LINKABLE; + spin_unlock(&inode->i_lock); + *whiteout = inode; } else { d_tmpfile(dentry, inode); @@ -996,7 +1000,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, err = f2fs_add_link(old_dentry, whiteout); if (err) goto put_out_dir; + + spin_lock(&whiteout->i_lock); whiteout->i_state &= ~I_LINKABLE; + spin_unlock(&whiteout->i_lock); + iput(whiteout); } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 8b66bc4c004b..48bb5d3c709d 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -62,8 +62,8 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) sizeof(struct free_nid)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { - mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> - PAGE_SHIFT; + mem_size = (nm_i->nat_cnt[TOTAL_NAT] * + sizeof(struct nat_entry)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); if (excess_cached_nats(sbi)) res = false; @@ -109,7 +109,7 @@ static void clear_node_page_dirty(struct page *page) static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) { - return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid)); + return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); } static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) @@ -177,7 +177,8 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, list_add_tail(&ne->list, &nm_i->nat_entries); spin_unlock(&nm_i->nat_list_lock); - nm_i->nat_cnt++; + nm_i->nat_cnt[TOTAL_NAT]++; + nm_i->nat_cnt[RECLAIMABLE_NAT]++; return ne; } @@ -207,7 +208,8 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) { radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); - nm_i->nat_cnt--; + nm_i->nat_cnt[TOTAL_NAT]--; + nm_i->nat_cnt[RECLAIMABLE_NAT]--; __free_nat_entry(e); } @@ -253,7 +255,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, if (get_nat_flag(ne, IS_DIRTY)) goto refresh_list; - nm_i->dirty_nat_cnt++; + nm_i->nat_cnt[DIRTY_NAT]++; + nm_i->nat_cnt[RECLAIMABLE_NAT]--; set_nat_flag(ne, IS_DIRTY, true); refresh_list: spin_lock(&nm_i->nat_list_lock); @@ -273,7 +276,8 @@ static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, set_nat_flag(ne, IS_DIRTY, false); set->entry_cnt--; - nm_i->dirty_nat_cnt--; + nm_i->nat_cnt[DIRTY_NAT]--; + nm_i->nat_cnt[RECLAIMABLE_NAT]++; } static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, @@ -1562,15 +1566,16 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, if (atomic && !test_opt(sbi, NOBARRIER)) fio.op_flags |= REQ_PREFLUSH | REQ_FUA; - set_page_writeback(page); - ClearPageError(page); - + /* should add to global list before clearing PAGECACHE status */ if (f2fs_in_warm_node_list(sbi, page)) { seq = f2fs_add_fsync_node_entry(sbi, page); if (seq_id) *seq_id = seq; } + set_page_writeback(page); + ClearPageError(page); + fio.old_blkaddr = ni.blk_addr; f2fs_do_write_node_page(nid, &fio); set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); @@ -2314,6 +2319,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, if (unlikely(nid >= nm_i->max_nid)) nid = 0; + if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) + nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; + /* Enough entries */ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return 0; @@ -2349,7 +2357,6 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, if (ret) { up_read(&nm_i->nat_tree_lock); - f2fs_bug_on(sbi, !mount); f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); return ret; } @@ -2511,7 +2518,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) return nr - nr_shrink; } -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; size_t inline_size; @@ -2519,7 +2526,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) struct f2fs_inode *ri; ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); - f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); ri = F2FS_INODE(page); if (ri->i_inline & F2FS_INLINE_XATTR) { @@ -2538,6 +2546,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) update_inode: f2fs_update_inode(inode, ipage); f2fs_put_page(ipage, 1); + return 0; } int f2fs_recover_xattr_data(struct inode *inode, struct page *page) @@ -2709,6 +2718,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) struct f2fs_nat_entry raw_ne; nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); + if (f2fs_check_nid_range(sbi, nid)) + continue; + raw_ne = nat_in_journal(journal, i); ne = __lookup_nat_cache(nm_i, nid); @@ -2876,14 +2888,17 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) LIST_HEAD(sets); int err = 0; - /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */ + /* + * during unmount, let's flush nat_bits before checking + * nat_cnt[DIRTY_NAT]. + */ if (enabled_nat_bits(sbi, cpc)) { down_write(&nm_i->nat_tree_lock); remove_nats_in_journal(sbi); up_write(&nm_i->nat_tree_lock); } - if (!nm_i->dirty_nat_cnt) + if (!nm_i->nat_cnt[DIRTY_NAT]) return 0; down_write(&nm_i->nat_tree_lock); @@ -2894,7 +2909,8 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * into nat entry set. */ if (enabled_nat_bits(sbi, cpc) || - !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) + !__has_cursum_space(journal, + nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) remove_nats_in_journal(sbi); while ((found = __gang_lookup_nat_set(nm_i, @@ -2932,7 +2948,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) return 0; nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); - nm_i->nat_bits = f2fs_kzalloc(sbi, + nm_i->nat_bits = f2fs_kvzalloc(sbi, nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); if (!nm_i->nat_bits) return -ENOMEM; @@ -3018,7 +3034,6 @@ static int init_node_manager(struct f2fs_sb_info *sbi) F2FS_RESERVED_NODE_NUM; nm_i->nid_cnt[FREE_NID] = 0; nm_i->nid_cnt[PREALLOC_NID] = 0; - nm_i->nat_cnt = 0; nm_i->ram_thresh = DEF_RAM_THRESHOLD; nm_i->ra_nid_pages = DEF_RA_NID_PAGES; nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; @@ -3065,9 +3080,9 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi) int i; nm_i->free_nid_bitmap = - f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *), - nm_i->nat_blocks), - GFP_KERNEL); + f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), + nm_i->nat_blocks), + GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; @@ -3155,7 +3170,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) __del_from_nat_cache(nm_i, natvec[idx]); } } - f2fs_bug_on(sbi, nm_i->nat_cnt); + f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); /* destroy nat set cache */ nid = 0; diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index e05af5df5648..4a2e7eaf2b02 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -123,13 +123,13 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne, static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) { - return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid * + return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid * NM_I(sbi)->dirty_nats_ratio / 100; } static inline bool excess_cached_nats(struct f2fs_sb_info *sbi) { - return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD; + return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD; } static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 783773e4560d..5f230e981c48 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -514,7 +514,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, /* step 1: recover xattr */ if (IS_INODE(page)) { - f2fs_recover_inline_xattr(inode, page); + err = f2fs_recover_inline_xattr(inode, page); + if (err) + goto out; } else if (f2fs_has_xattr_block(ofs_of_node(page))) { err = f2fs_recover_xattr_data(inode, page); if (!err) @@ -523,8 +525,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* step 2: recover inline data */ - if (f2fs_recover_inline_data(inode, page)) + err = f2fs_recover_inline_data(inode, page); + if (err) { + if (err == 1) + err = 0; goto out; + } /* step 3: recover data indices */ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 7d8578401267..5ba677f85533 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2310,7 +2310,9 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) */ struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) { - return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno)); + if (unlikely(f2fs_cp_error(sbi))) + return ERR_PTR(-EIO); + return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); } void f2fs_update_meta_page(struct f2fs_sb_info *sbi, @@ -2582,7 +2584,11 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type) __next_free_blkoff(sbi, curseg, 0); sum_page = f2fs_get_sum_page(sbi, new_segno); - f2fs_bug_on(sbi, IS_ERR(sum_page)); + if (IS_ERR(sum_page)) { + /* GC won't be able to use stale summary pages by cp_error */ + memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); + return; + } sum_node = (struct f2fs_summary_block *)page_address(sum_page); memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); f2fs_put_page(sum_page, 1); @@ -3713,7 +3719,7 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, unsigned int segno) { - return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno)); + return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); } static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 325781a1ae4d..2034b9a07d63 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -88,11 +88,11 @@ #define BLKS_PER_SEC(sbi) \ ((sbi)->segs_per_sec * (sbi)->blocks_per_seg) #define GET_SEC_FROM_SEG(sbi, segno) \ - ((segno) / (sbi)->segs_per_sec) + (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec) #define GET_SEG_FROM_SEC(sbi, secno) \ ((secno) * (sbi)->segs_per_sec) #define GET_ZONE_FROM_SEC(sbi, secno) \ - ((secno) / (sbi)->secs_per_zone) + (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone) #define GET_ZONE_FROM_SEG(sbi, segno) \ GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno)) diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index a467aca29cfe..3ceebaaee384 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -18,9 +18,7 @@ static unsigned int shrinker_run_no; static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) { - long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt; - - return count > 0 ? count : 0; + return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT]; } static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index ea8dbf1458c9..a9a083232bcf 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -277,6 +277,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).s_resgid)); } +static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) +{ + if (!F2FS_OPTION(sbi).unusable_cap_perc) + return; + + if (F2FS_OPTION(sbi).unusable_cap_perc == 100) + F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; + else + F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * + F2FS_OPTION(sbi).unusable_cap_perc; + + f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", + F2FS_OPTION(sbi).unusable_cap, + F2FS_OPTION(sbi).unusable_cap_perc); +} + static void init_once(void *foo) { struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; @@ -439,7 +455,7 @@ static int parse_options(struct super_block *sb, char *options) break; case Opt_norecovery: /* this option mounts f2fs with ro */ - set_opt(sbi, DISABLE_ROLL_FORWARD); + set_opt(sbi, NORECOVERY); if (!f2fs_readonly(sb)) return -EINVAL; break; @@ -790,12 +806,7 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; if (arg < 0 || arg > 100) return -EINVAL; - if (arg == 100) - F2FS_OPTION(sbi).unusable_cap = - sbi->user_block_count; - else - F2FS_OPTION(sbi).unusable_cap = - (sbi->user_block_count / 100) * arg; + F2FS_OPTION(sbi).unusable_cap_perc = arg; set_opt(sbi, DISABLE_CHECKPOINT); break; case Opt_checkpoint_disable_cap: @@ -1064,6 +1075,9 @@ static void f2fs_put_super(struct super_block *sb) int i; bool dropped; + /* unregister procfs/sysfs entries in advance to avoid race case */ + f2fs_unregister_sysfs(sbi); + f2fs_quota_off_umount(sb); /* prevent remaining shrinker jobs */ @@ -1105,7 +1119,7 @@ static void f2fs_put_super(struct super_block *sb) /* our cp_error case, we can wait for any writeback page */ f2fs_flush_merged_writes(sbi); - f2fs_wait_on_all_pages_writeback(sbi); + f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); f2fs_bug_on(sbi, sbi->fsync_node_num); @@ -1127,8 +1141,6 @@ static void f2fs_put_super(struct super_block *sb) kvfree(sbi->ckpt); - f2fs_unregister_sysfs(sbi); - sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); @@ -1219,7 +1231,8 @@ static int f2fs_statfs_project(struct super_block *sb, limit >>= sb->s_blocksize_bits; if (limit && buf->f_blocks > limit) { - curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; + curblock = (dquot->dq_dqb.dqb_curspace + + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; buf->f_blocks = limit; buf->f_bfree = buf->f_bavail = (buf->f_blocks > curblock) ? @@ -1348,6 +1361,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) } if (test_opt(sbi, DISABLE_ROLL_FORWARD)) seq_puts(seq, ",disable_roll_forward"); + if (test_opt(sbi, NORECOVERY)) + seq_puts(seq, ",norecovery"); if (test_opt(sbi, DISCARD)) seq_puts(seq, ",discard"); else @@ -1733,6 +1748,7 @@ skip: (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); return 0; restore_gc: @@ -1824,6 +1840,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, int offset = off & (sb->s_blocksize - 1); size_t towrite = len; struct page *page; + void *fsdata = NULL; char *kaddr; int err = 0; int tocopy; @@ -1833,7 +1850,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, towrite); retry: err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, - &page, NULL); + &page, &fsdata); if (unlikely(err)) { if (err == -ENOMEM) { congestion_wait(BLK_RW_ASYNC, HZ/50); @@ -1849,7 +1866,7 @@ retry: flush_dcache_page(page); a_ops->write_end(NULL, mapping, off, tocopy, tocopy, - page, NULL); + page, fsdata); offset = 0; towrite -= tocopy; off += tocopy; @@ -2506,7 +2523,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, block_t total_sections, blocks_per_seg; struct f2fs_super_block *raw_super = (struct f2fs_super_block *) (bh->b_data + F2FS_SUPER_OFFSET); - unsigned int blocksize; size_t crc_offset = 0; __u32 crc = 0; @@ -2540,10 +2556,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, } /* Currently, support only 4KB block size */ - blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); - if (blocksize != F2FS_BLKSIZE) { - f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", - blocksize); + if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) { + f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", + le32_to_cpu(raw_super->log_blocksize), + F2FS_BLKSIZE_BITS); return -EFSCORRUPTED; } @@ -2885,7 +2901,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) if (nr_sectors & (bdev_zone_sectors(bdev) - 1)) FDEV(devi).nr_blkz++; - FDEV(devi).blkz_seq = f2fs_kzalloc(sbi, + FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, BITS_TO_LONGS(FDEV(devi).nr_blkz) * sizeof(unsigned long), GFP_KERNEL); @@ -3394,6 +3410,7 @@ try_onemore: sbi->reserved_blocks = 0; sbi->current_reserved_blocks = 0; limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); @@ -3488,7 +3505,8 @@ try_onemore: goto reset_checkpoint; /* recover fsynced data */ - if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { + if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && + !test_opt(sbi, NORECOVERY)) { /* * mount should be failed, when device has readonly mode, and * previous checkpoint was not done by clean system shutdown. diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 170934430d7d..029e693e201c 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -788,4 +788,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi) } kobject_del(&sbi->s_kobj); kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); } diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 181900af2576..296b3189448a 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -539,8 +539,9 @@ out: ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = d_inode(dentry); + nid_t xnid = F2FS_I(inode)->i_xattr_nid; struct f2fs_xattr_entry *entry; - void *base_addr; + void *base_addr, *last_base_addr; int error = 0; size_t rest = buffer_size; @@ -550,6 +551,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) if (error) return error; + last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode); + list_for_each_xattr(entry, base_addr) { const struct xattr_handler *handler = f2fs_xattr_handler(entry->e_name_index); @@ -557,6 +560,15 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) size_t prefix_len; size_t size; + if ((void *)(entry) + sizeof(__u32) > last_base_addr || + (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) { + f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr", + inode->i_ino); + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + error = -EFSCORRUPTED; + goto cleanup; + } + if (!handler || (handler->list && !handler->list(dentry))) continue; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index d40cbad16659..3d5ad11aacc5 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1519,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, goto out; } + if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { + if (!silent) + fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); + goto out; + } + error = 0; out: diff --git a/fs/fcntl.c b/fs/fcntl.c index 3d40771e8e7c..3dc90e5293e6 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -779,9 +779,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band) { struct task_struct *p; enum pid_type type; + unsigned long flags; struct pid *pid; - read_lock(&fown->lock); + read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; @@ -802,7 +803,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band) read_unlock(&tasklist_lock); } out_unlock_fown: - read_unlock(&fown->lock); + read_unlock_irqrestore(&fown->lock, flags); } static void send_sigurg_to_task(struct task_struct *p, @@ -817,9 +818,10 @@ int send_sigurg(struct fown_struct *fown) struct task_struct *p; enum pid_type type; struct pid *pid; + unsigned long flags; int ret = 0; - read_lock(&fown->lock); + read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; @@ -842,7 +844,7 @@ int send_sigurg(struct fown_struct *fown) read_unlock(&tasklist_lock); } out_unlock_fown: - read_unlock(&fown->lock); + read_unlock_irqrestore(&fown->lock, flags); return ret; } diff --git a/fs/file.c b/fs/file.c index 3da91a112bab..e5d328335f88 100644 --- a/fs/file.c +++ b/fs/file.c @@ -70,7 +70,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, */ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) { - unsigned int cpy, set; + size_t cpy, set; BUG_ON(nfdt->max_fds < ofdt->max_fds); diff --git a/fs/filesystems.c b/fs/filesystems.c index 9135646e41ac..5e1a19013373 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -271,7 +271,9 @@ struct file_system_type *get_fs_type(const char *name) fs = __get_fs_type(name, len); if (!fs && (request_module("fs-%.*s", len, name) == 0)) { fs = __get_fs_type(name, len); - WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name); + if (!fs) + pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n", + len, name); } if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 76ac9c7d32ec..a2cf2db0d3de 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -42,7 +42,6 @@ struct wb_writeback_work { long nr_pages; struct super_block *sb; - unsigned long *older_than_this; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages:1; unsigned int for_kupdate:1; @@ -144,7 +143,9 @@ static void inode_io_list_del_locked(struct inode *inode, struct bdi_writeback *wb) { assert_spin_locked(&wb->list_lock); + assert_spin_locked(&inode->i_lock); + inode->i_state &= ~I_SYNC_QUEUED; list_del_init(&inode->i_io_list); wb_io_lists_depopulated(wb); } @@ -1123,7 +1124,9 @@ void inode_io_list_del(struct inode *inode) struct bdi_writeback *wb; wb = inode_to_wb_and_lock_list(inode); + spin_lock(&inode->i_lock); inode_io_list_del_locked(inode, wb); + spin_unlock(&inode->i_lock); spin_unlock(&wb->list_lock); } @@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode) * the case then the inode must have been redirtied while it was being written * out and we don't reset its dirtied_when. */ -static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) { + assert_spin_locked(&inode->i_lock); + if (!list_empty(&wb->b_dirty)) { struct inode *tail; @@ -1182,6 +1187,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) inode->dirtied_when = jiffies; } inode_io_list_move_locked(inode, wb, &wb->b_dirty); + inode->i_state &= ~I_SYNC_QUEUED; +} + +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +{ + spin_lock(&inode->i_lock); + redirty_tail_locked(inode, wb); + spin_unlock(&inode->i_lock); } /* @@ -1220,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) #define EXPIRE_DIRTY_ATIME 0x0001 /* - * Move expired (dirtied before work->older_than_this) dirty inodes from + * Move expired (dirtied before dirtied_before) dirty inodes from * @delaying_queue to @dispatch_queue. */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - int flags, - struct wb_writeback_work *work) + unsigned long dirtied_before) { - unsigned long *older_than_this = NULL; - unsigned long expire_time; LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; @@ -1237,21 +1247,15 @@ static int move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; int moved = 0; - if ((flags & EXPIRE_DIRTY_ATIME) == 0) - older_than_this = work->older_than_this; - else if (!work->for_sync) { - expire_time = jiffies - (dirtytime_expire_interval * HZ); - older_than_this = &expire_time; - } while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (inode_dirtied_after(inode, dirtied_before)) break; list_move(&inode->i_io_list, &tmp); moved++; - if (flags & EXPIRE_DIRTY_ATIME) - set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); + spin_lock(&inode->i_lock); + inode->i_state |= I_SYNC_QUEUED; + spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) continue; if (sb && sb != inode->i_sb) @@ -1289,18 +1293,22 @@ out: * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before) { int moved; + unsigned long time_expire_jif = dirtied_before; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); + if (!work->for_sync) + time_expire_jif = jiffies - dirtytime_expire_interval * HZ; moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, - EXPIRE_DIRTY_ATIME, work); + time_expire_jif); if (moved) wb_io_lists_populated(wb); - trace_writeback_queue_io(wb, work, moved); + trace_writeback_queue_io(wb, work, dirtied_before, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -1394,7 +1402,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * writeback is not making progress due to locked * buffers. Skip this inode for now. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); return; } @@ -1414,7 +1422,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * retrying writeback of the dirty page/inode * that cannot be performed immediately. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } } else if (inode->i_state & I_DIRTY) { /* @@ -1422,10 +1430,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * such as delayed allocation during submission or metadata * updates after data IO completion. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } else if (inode->i_state & I_DIRTY_TIME) { inode->dirtied_when = jiffies; inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); + inode->i_state &= ~I_SYNC_QUEUED; } else { /* The inode is clean. Remove from writeback lists. */ inode_io_list_del_locked(inode, wb); @@ -1464,26 +1473,26 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) ret = err; } + /* + * If the inode has dirty timestamps and we need to write them, call + * mark_inode_dirty_sync() to notify the filesystem about it and to + * change I_DIRTY_TIME into I_DIRTY_SYNC. + */ + if ((inode->i_state & I_DIRTY_TIME) && + (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync || + time_after(jiffies, inode->dirtied_time_when + + dirtytime_expire_interval * HZ))) { + trace_writeback_lazytime(inode); + mark_inode_dirty_sync(inode); + } + /* * Some filesystems may redirty the inode during the writeback * due to delalloc, clear dirty metadata flags right before * write_inode() */ spin_lock(&inode->i_lock); - dirty = inode->i_state & I_DIRTY; - if (inode->i_state & I_DIRTY_TIME) { - if ((dirty & I_DIRTY_INODE) || - wbc->sync_mode == WB_SYNC_ALL || - unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || - unlikely(time_after(jiffies, - (inode->dirtied_time_when + - dirtytime_expire_interval * HZ)))) { - dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; - trace_writeback_lazytime(inode); - } - } else - inode->i_state &= ~I_DIRTY_TIME_EXPIRED; inode->i_state &= ~dirty; /* @@ -1504,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_unlock(&inode->i_lock); - if (dirty & I_DIRTY_TIME) - mark_inode_dirty_sync(inode); /* Don't write the inode if only I_DIRTY_PAGES was set */ if (dirty & ~I_DIRTY_PAGES) { int err = write_inode(inode, wbc); @@ -1669,8 +1676,8 @@ static long writeback_sb_inodes(struct super_block *sb, */ spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { + redirty_tail_locked(inode, wb); spin_unlock(&inode->i_lock); - redirty_tail(inode, wb); continue; } if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { @@ -1811,7 +1818,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, blk_start_plug(&plug); spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, &work); + queue_io(wb, &work, jiffies); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); blk_finish_plug(&plug); @@ -1831,7 +1838,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, * takes longer than a dirty_writeback_interval interval, then leave a * one-second gap. * - * older_than_this takes precedence over nr_to_write. So we'll only write back + * dirtied_before takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ static long wb_writeback(struct bdi_writeback *wb, @@ -1839,14 +1846,11 @@ static long wb_writeback(struct bdi_writeback *wb, { unsigned long wb_start = jiffies; long nr_pages = work->nr_pages; - unsigned long oldest_jif; + unsigned long dirtied_before = jiffies; struct inode *inode; long progress; struct blk_plug plug; - oldest_jif = jiffies; - work->older_than_this = &oldest_jif; - blk_start_plug(&plug); spin_lock(&wb->list_lock); for (;;) { @@ -1880,14 +1884,14 @@ static long wb_writeback(struct bdi_writeback *wb, * safe. */ if (work->for_kupdate) { - oldest_jif = jiffies - + dirtied_before = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); } else if (work->for_background) - oldest_jif = jiffies; + dirtied_before = jiffies; trace_writeback_start(wb, work); if (list_empty(&wb->b_io)) - queue_io(wb, work); + queue_io(wb, work, dirtied_before); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else @@ -2289,11 +2293,12 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* - * If the inode is being synced, just update its dirty state. - * The unlocker will place the inode on the appropriate - * superblock list, based upon its state. + * If the inode is queued for writeback by flush worker, just + * update its dirty state. Once the flush worker is done with + * the inode it will place it on the appropriate superblock + * list, based upon its state. */ - if (inode->i_state & I_SYNC) + if (inode->i_state & I_SYNC_QUEUED) goto out_unlock_inode; /* diff --git a/fs/fs_context.c b/fs/fs_context.c index 138b5b4d621d..a2367c7aef5b 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -585,7 +585,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) param->key); } - if (len > PAGE_SIZE - 2 - size) + if (size + len + 2 > PAGE_SIZE) return invalf(fc, "VFS: Legacy: Cumulative options too large"); if (strchr(param->key, ',') || (param->type == fs_value_is_string && diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c index 5a48cee6d7d3..f529075a2ce8 100644 --- a/fs/fuse/acl.c +++ b/fs/fuse/acl.c @@ -19,6 +19,9 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type) void *value = NULL; struct posix_acl *acl; + if (fuse_is_bad(inode)) + return ERR_PTR(-EIO); + if (!fc->posix_acl || fc->no_getxattr) return NULL; @@ -53,6 +56,9 @@ int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type) const char *name; int ret; + if (fuse_is_bad(inode)) + return -EIO; + if (!fc->posix_acl || fc->no_setxattr) return -EOPNOTSUPP; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 4f2e4f38feb8..adc8e6b5ebab 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -764,14 +764,14 @@ static int fuse_check_page(struct page *page) { if (page_mapcount(page) || page->mapping != NULL || - page_count(page) != 1 || (page->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | - 1 << PG_reclaim))) { + 1 << PG_reclaim | + 1 << PG_waiters))) { pr_warn("trying to steal weird page\n"); pr_warn(" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); return 1; @@ -786,15 +786,16 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) struct page *newpage; struct pipe_buffer *buf = cs->pipebufs; + get_page(oldpage); err = unlock_request(cs->req); if (err) - return err; + goto out_put_old; fuse_copy_finish(cs); err = pipe_buf_confirm(cs->pipe, buf); if (err) - return err; + goto out_put_old; BUG_ON(!cs->nr_segs); cs->currbuf = buf; @@ -834,7 +835,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); if (err) { unlock_page(newpage); - return err; + goto out_put_old; } get_page(newpage); @@ -853,14 +854,19 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) if (err) { unlock_page(newpage); put_page(newpage); - return err; + goto out_put_old; } unlock_page(oldpage); + /* Drop ref for ap->pages[] array */ put_page(oldpage); cs->len = 0; - return 0; + err = 0; +out_put_old: + /* Drop ref obtained in this function */ + put_page(oldpage); + return err; out_fallback_unlock: unlock_page(newpage); @@ -869,10 +875,10 @@ out_fallback: cs->offset = buf->offset; err = lock_request(cs->req); - if (err) - return err; + if (!err) + err = 1; - return 1; + goto out_put_old; } static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, @@ -884,14 +890,16 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, if (cs->nr_segs == cs->pipe->buffers) return -EIO; + get_page(page); err = unlock_request(cs->req); - if (err) + if (err) { + put_page(page); return err; + } fuse_copy_finish(cs); buf = cs->pipebufs; - get_page(page); buf->page = page; buf->offset = offset; buf->len = count; @@ -2247,6 +2255,30 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd, } } } + if (cmd == FUSE_DEV_IOC_RECOVERY) { + struct fuse_dev *fud = fuse_get_dev(file); + struct fuse_iqueue *fiq = &fud->fc->iq; + struct fuse_pqueue *fpq = &fud->pq; + struct fuse_req *req, *next; + LIST_HEAD(recovery); + unsigned int i; + + spin_lock(&fpq->lock); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_tail_init(&fpq->processing[i], + &recovery); + spin_unlock(&fpq->lock); + + list_for_each_entry_safe(req, next, &recovery, list) { + clear_bit(FR_SENT, &req->flags); + set_bit(FR_PENDING, &req->flags); + } + + spin_lock(&fiq->lock); + list_splice(&recovery, &fiq->pending); + spin_unlock(&fiq->lock); + err = 0; + } return err; } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index ee190119f45c..60378f3baaae 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -201,7 +201,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) int ret; inode = d_inode_rcu(entry); - if (inode && is_bad_inode(inode)) + if (inode && fuse_is_bad(inode)) goto invalid; else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || (flags & LOOKUP_REVAL)) { @@ -386,6 +386,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, bool outarg_valid = true; bool locked; + if (fuse_is_bad(dir)) + return ERR_PTR(-EIO); + locked = fuse_lock_inode(dir); err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, &outarg, &inode); @@ -529,6 +532,9 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry, struct fuse_conn *fc = get_fuse_conn(dir); struct dentry *res = NULL; + if (fuse_is_bad(dir)) + return -EIO; + if (d_in_lookup(entry)) { res = fuse_lookup(dir, entry, 0); if (IS_ERR(res)) @@ -577,6 +583,9 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args, int err; struct fuse_forget_link *forget; + if (fuse_is_bad(dir)) + return -EIO; + forget = fuse_alloc_forget(); if (!forget) return -ENOMEM; @@ -704,6 +713,9 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) struct fuse_conn *fc = get_fuse_conn(dir); FUSE_ARGS(args); + if (fuse_is_bad(dir)) + return -EIO; + args.opcode = FUSE_UNLINK; args.nodeid = get_node_id(dir); args.in_numargs = 1; @@ -740,6 +752,9 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) struct fuse_conn *fc = get_fuse_conn(dir); FUSE_ARGS(args); + if (fuse_is_bad(dir)) + return -EIO; + args.opcode = FUSE_RMDIR; args.nodeid = get_node_id(dir); args.in_numargs = 1; @@ -818,6 +833,9 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent, struct fuse_conn *fc = get_fuse_conn(olddir); int err; + if (fuse_is_bad(olddir)) + return -EIO; + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) return -EINVAL; @@ -953,7 +971,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat, if (!err) { if (fuse_invalid_attr(&outarg.attr) || (inode->i_mode ^ outarg.attr.mode) & S_IFMT) { - make_bad_inode(inode); + fuse_make_bad(inode); err = -EIO; } else { fuse_change_attributes(inode, &outarg.attr, @@ -1155,6 +1173,9 @@ static int fuse_permission(struct inode *inode, int mask) bool refreshed = false; int err = 0; + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(fc)) return -EACCES; @@ -1250,7 +1271,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, int err; err = -EIO; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) goto out_err; if (fc->cache_symlinks) @@ -1298,7 +1319,7 @@ static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, struct fuse_conn *fc = get_fuse_conn(inode); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; if (fc->no_fsyncdir) @@ -1575,7 +1596,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, if (fuse_invalid_attr(&outarg.attr) || (inode->i_mode ^ outarg.attr.mode) & S_IFMT) { - make_bad_inode(inode); + fuse_make_bad(inode); err = -EIO; goto error; } @@ -1631,6 +1652,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL; int ret; + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(get_fuse_conn(inode))) return -EACCES; @@ -1689,6 +1713,9 @@ static int fuse_getattr(const struct path *path, struct kstat *stat, struct inode *inode = d_inode(path->dentry); struct fuse_conn *fc = get_fuse_conn(inode); + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(fc)) return -EACCES; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 3dd37a998ea9..0883e5b24c90 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -18,6 +18,7 @@ #include <linux/swap.h> #include <linux/falloc.h> #include <linux/uio.h> +#include <linux/fs.h> static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, struct fuse_page_desc **desc) @@ -221,6 +222,9 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) fc->atomic_o_trunc && fc->writeback_cache; + if (fuse_is_bad(inode)) + return -EIO; + err = generic_file_open(inode, file); if (err) return err; @@ -442,7 +446,7 @@ static int fuse_flush(struct file *file, fl_owner_t id) FUSE_ARGS(args); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; if (fc->no_flush) @@ -505,7 +509,7 @@ static int fuse_fsync(struct file *file, loff_t start, loff_t end, struct fuse_conn *fc = get_fuse_conn(inode); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; inode_lock(inode); @@ -712,6 +716,7 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc, spin_unlock(&io->lock); ia->ap.args.end = fuse_aio_complete_req; + ia->ap.args.may_block = io->should_dirty; err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL); if (err) fuse_aio_complete_req(fc, &ia->ap.args, err); @@ -828,7 +833,7 @@ static int fuse_readpage(struct file *file, struct page *page) int err; err = -EIO; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) goto out; err = fuse_do_readpage(file, page); @@ -971,7 +976,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, int err; err = -EIO; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) goto out; data.file = file; @@ -1103,6 +1108,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; unsigned int offset, i; + bool short_write; int err; for (i = 0; i < ap->num_pages; i++) @@ -1115,32 +1121,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, if (!err && ia->write.out.size > count) err = -EIO; + short_write = ia->write.out.size < count; offset = ap->descs[0].offset; count = ia->write.out.size; for (i = 0; i < ap->num_pages; i++) { struct page *page = ap->pages[i]; - if (!err && !offset && count >= PAGE_SIZE) - SetPageUptodate(page); - - if (count > PAGE_SIZE - offset) - count -= PAGE_SIZE - offset; - else - count = 0; - offset = 0; - - unlock_page(page); + if (err) { + ClearPageUptodate(page); + } else { + if (count >= PAGE_SIZE - offset) + count -= PAGE_SIZE - offset; + else { + if (short_write) + ClearPageUptodate(page); + count = 0; + } + offset = 0; + } + if (ia->write.page_locked && (i == ap->num_pages - 1)) + unlock_page(page); put_page(page); } return err; } -static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap, +static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, struct address_space *mapping, struct iov_iter *ii, loff_t pos, unsigned int max_pages) { + struct fuse_args_pages *ap = &ia->ap; struct fuse_conn *fc = get_fuse_conn(mapping->host); unsigned offset = pos & (PAGE_SIZE - 1); size_t count = 0; @@ -1193,6 +1205,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap, if (offset == PAGE_SIZE) offset = 0; + /* If we copied full page, mark it uptodate */ + if (tmp == PAGE_SIZE) + SetPageUptodate(page); + + if (PageUptodate(page)) { + unlock_page(page); + } else { + ia->write.page_locked = true; + break; + } if (!fc->big_writes) break; } while (iov_iter_count(ii) && count < fc->max_write && @@ -1236,7 +1258,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, break; } - count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages); + count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages); if (count <= 0) { err = count; } else { @@ -1567,7 +1589,7 @@ static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; - if (is_bad_inode(file_inode(file))) + if (fuse_is_bad(file_inode(file))) return -EIO; if (!(ff->open_flags & FOPEN_DIRECT_IO)) @@ -1581,7 +1603,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; - if (is_bad_inode(file_inode(file))) + if (fuse_is_bad(file_inode(file))) return -EIO; if (!(ff->open_flags & FOPEN_DIRECT_IO)) @@ -2131,7 +2153,7 @@ static int fuse_writepages(struct address_space *mapping, int err; err = -EIO; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) goto out; data.inode = inode; @@ -2147,10 +2169,8 @@ static int fuse_writepages(struct address_space *mapping, err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); if (data.wpa) { - /* Ignore errors if we can write at least one page */ WARN_ON(!data.wpa->ia.ap.num_pages); fuse_writepages_send(&data); - err = 0; } if (data.ff) fuse_file_put(data.ff, false, false); @@ -2759,7 +2779,16 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, struct iovec *iov = iov_page; iov->iov_base = (void __user *)arg; - iov->iov_len = _IOC_SIZE(cmd); + + switch (cmd) { + case FS_IOC_GETFLAGS: + case FS_IOC_SETFLAGS: + iov->iov_len = sizeof(int); + break; + default: + iov->iov_len = _IOC_SIZE(cmd); + break; + } if (_IOC_DIR(cmd) & _IOC_WRITE) { in_iov = iov; @@ -2902,7 +2931,7 @@ long fuse_ioctl_common(struct file *file, unsigned int cmd, if (!fuse_allow_current_process(fc)) return -EACCES; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; return fuse_do_ioctl(file, cmd, arg, flags); @@ -3065,11 +3094,10 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret = 0; struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; - bool async_dio = ff->fc->async_dio; loff_t pos = 0; struct inode *inode; loff_t i_size; - size_t count = iov_iter_count(iter); + size_t count = iov_iter_count(iter), shortened = 0; loff_t offset = iocb->ki_pos; struct fuse_io_priv *io; @@ -3077,17 +3105,9 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) inode = file->f_mapping->host; i_size = i_size_read(inode); - if ((iov_iter_rw(iter) == READ) && (offset > i_size)) + if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) return 0; - /* optimization for short read */ - if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { - if (offset >= i_size) - return 0; - iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); - count = iov_iter_count(iter); - } - io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); if (!io) return -ENOMEM; @@ -3103,15 +3123,22 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) * By default, we want to optimize all I/Os with async request * submission to the client filesystem if supported. */ - io->async = async_dio; + io->async = ff->fc->async_dio; io->iocb = iocb; io->blocking = is_sync_kiocb(iocb); + /* optimization for short read */ + if (io->async && !io->write && offset + count > i_size) { + iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); + shortened = count - iov_iter_count(iter); + count -= shortened; + } + /* * We cannot asynchronously extend the size of a file. * In such case the aio will behave exactly like sync io. */ - if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE) + if ((offset + count > i_size) && io->write) io->blocking = true; if (io->async && io->blocking) { @@ -3129,6 +3156,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } else { ret = __fuse_direct_read(io, iter, &pos); } + iov_iter_reexpand(iter, iov_iter_count(iter) + shortened); if (io->async) { bool blocking = io->blocking; @@ -3279,13 +3307,11 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) return -EXDEV; - if (fc->writeback_cache) { - inode_lock(inode_in); - err = fuse_writeback_range(inode_in, pos_in, pos_in + len); - inode_unlock(inode_in); - if (err) - return err; - } + inode_lock(inode_in); + err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); + inode_unlock(inode_in); + if (err) + return err; inode_lock(inode_out); @@ -3293,11 +3319,27 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (err) goto out; - if (fc->writeback_cache) { - err = fuse_writeback_range(inode_out, pos_out, pos_out + len); - if (err) - goto out; - } + /* + * Write out dirty pages in the destination file before sending the COPY + * request to userspace. After the request is completed, truncate off + * pages (including partial ones) from the cache that have been copied, + * since these contain stale data at that point. + * + * This should be mostly correct, but if the COPY writes to partial + * pages (at the start or end) and the parts not covered by the COPY are + * written through a memory map after calling fuse_writeback_range(), + * then these partial page modifications will be lost on truncation. + * + * It is unlikely that someone would rely on such mixed style + * modifications. Yet this does give less guarantees than if the + * copying was performed with write(2). + * + * To fix this a i_mmap_sem style lock could be used to prevent new + * faults while the copy is ongoing. + */ + err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); + if (err) + goto out; if (is_unstable) set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); @@ -3318,6 +3360,10 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (err) goto out; + truncate_inode_pages_range(inode_out->i_mapping, + ALIGN_DOWN(pos_out, PAGE_SIZE), + ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1); + if (fc->writeback_cache) { fuse_write_update_size(inode_out, pos_out + outarg.size); file_update_time(file_out); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ca344bf71404..d87892648545 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -158,6 +158,8 @@ enum { FUSE_I_INIT_RDPLUS, /** An operation changing file size is in progress */ FUSE_I_SIZE_UNSTABLE, + /* Bad inode */ + FUSE_I_BAD, }; struct fuse_conn; @@ -249,6 +251,7 @@ struct fuse_args { bool out_argvar:1; bool page_zeroing:1; bool page_replace:1; + bool may_block:1; struct fuse_in_arg in_args[3]; struct fuse_arg out_args[2]; void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error); @@ -786,6 +789,17 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc) return atomic64_read(&fc->attr_version); } +static inline void fuse_make_bad(struct inode *inode) +{ + remove_inode_hash(inode); + set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); +} + +static inline bool fuse_is_bad(struct inode *inode) +{ + return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state)); +} + /** Device operations */ extern const struct file_operations fuse_dev_operations; @@ -831,6 +845,7 @@ struct fuse_io_args { struct { struct fuse_write_in in; struct fuse_write_out out; + bool page_locked; } write; }; struct fuse_args_pages ap; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 16aec32f7f3d..aa1d5cf1bc3a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -115,16 +115,18 @@ static void fuse_evict_inode(struct inode *inode) fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup); fi->forget = NULL; } - if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) { + if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) { WARN_ON(!list_empty(&fi->write_files)); WARN_ON(!list_empty(&fi->queued_writes)); } } -static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) +static int fuse_reconfigure(struct fs_context *fc) { + struct super_block *sb = fc->root->d_sb; + sync_filesystem(sb); - if (*flags & SB_MANDLOCK) + if (fc->sb_flags & SB_MANDLOCK) return -EINVAL; return 0; @@ -304,7 +306,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, unlock_new_inode(inode); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { /* Inode has changed type, any I/O on the old should fail */ - make_bad_inode(inode); + fuse_make_bad(inode); iput(inode); goto retry; } @@ -321,6 +323,8 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, loff_t offset, loff_t len) { + struct fuse_conn *fc = get_fuse_conn_super(sb); + struct fuse_inode *fi; struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; @@ -329,6 +333,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, if (!inode) return -ENOENT; + fi = get_fuse_inode(inode); + spin_lock(&fi->lock); + fi->attr_version = atomic64_inc_return(&fc->attr_version); + spin_unlock(&fi->lock); + fuse_invalidate_attr(inode); forget_all_cached_acls(inode); if (offset >= 0) { @@ -473,6 +482,13 @@ static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param) struct fuse_fs_context *ctx = fc->fs_private; int opt; + /* + * Ignore options coming from mount(MS_REMOUNT) for backward + * compatibility. + */ + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) + return 0; + opt = fs_parse(fc, &fuse_fs_parameters, param, &result); if (opt < 0) return opt; @@ -815,7 +831,6 @@ static const struct super_operations fuse_super_operations = { .evict_inode = fuse_evict_inode, .write_inode = fuse_write_inode, .drop_inode = generic_delete_inode, - .remount_fs = fuse_remount_fs, .put_super = fuse_put_super, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, @@ -1289,6 +1304,7 @@ static int fuse_get_tree(struct fs_context *fc) static const struct fs_context_operations fuse_context_ops = { .free = fuse_free_fc, .parse_param = fuse_parse_param, + .reconfigure = fuse_reconfigure, .get_tree = fuse_get_tree, }; diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index 6a40f75a0d25..70f685b61e3a 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -207,7 +207,7 @@ retry: dput(dentry); goto retry; } - if (is_bad_inode(inode)) { + if (fuse_is_bad(inode)) { dput(dentry); return -EIO; } @@ -568,7 +568,7 @@ int fuse_readdir(struct file *file, struct dir_context *ctx) struct inode *inode = file_inode(file); int err; - if (is_bad_inode(inode)) + if (fuse_is_bad(inode)) return -EIO; mutex_lock(&ff->readdir.lock); diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index a5c86048b96e..fadf6fb90fe2 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -55,6 +55,12 @@ struct virtio_fs_forget { struct list_head list; }; +struct virtio_fs_req_work { + struct fuse_req *req; + struct virtio_fs_vq *fsvq; + struct work_struct done_work; +}; + static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, struct fuse_req *req, bool in_flight); @@ -443,19 +449,67 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req) } /* Work function for request completion */ +static void virtio_fs_request_complete(struct fuse_req *req, + struct virtio_fs_vq *fsvq) +{ + struct fuse_pqueue *fpq = &fsvq->fud->pq; + struct fuse_conn *fc = fsvq->fud->fc; + struct fuse_args *args; + struct fuse_args_pages *ap; + unsigned int len, i, thislen; + struct page *page; + + /* + * TODO verify that server properly follows FUSE protocol + * (oh.uniq, oh.len) + */ + args = req->args; + copy_args_from_argbuf(args, req); + + if (args->out_pages && args->page_zeroing) { + len = args->out_args[args->out_numargs - 1].size; + ap = container_of(args, typeof(*ap), args); + for (i = 0; i < ap->num_pages; i++) { + thislen = ap->descs[i].length; + if (len < thislen) { + WARN_ON(ap->descs[i].offset); + page = ap->pages[i]; + zero_user_segment(page, len, thislen); + len = 0; + } else { + len -= thislen; + } + } + } + + spin_lock(&fpq->lock); + clear_bit(FR_SENT, &req->flags); + spin_unlock(&fpq->lock); + + fuse_request_end(fc, req); + spin_lock(&fsvq->lock); + dec_in_flight_req(fsvq); + spin_unlock(&fsvq->lock); +} + +static void virtio_fs_complete_req_work(struct work_struct *work) +{ + struct virtio_fs_req_work *w = + container_of(work, typeof(*w), done_work); + + virtio_fs_request_complete(w->req, w->fsvq); + kfree(w); +} + static void virtio_fs_requests_done_work(struct work_struct *work) { struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, done_work); struct fuse_pqueue *fpq = &fsvq->fud->pq; - struct fuse_conn *fc = fsvq->fud->fc; struct virtqueue *vq = fsvq->vq; struct fuse_req *req; - struct fuse_args_pages *ap; struct fuse_req *next; - struct fuse_args *args; - unsigned int len, i, thislen; - struct page *page; + unsigned int len; LIST_HEAD(reqs); /* Collect completed requests off the virtqueue */ @@ -473,38 +527,20 @@ static void virtio_fs_requests_done_work(struct work_struct *work) /* End requests */ list_for_each_entry_safe(req, next, &reqs, list) { - /* - * TODO verify that server properly follows FUSE protocol - * (oh.uniq, oh.len) - */ - args = req->args; - copy_args_from_argbuf(args, req); - - if (args->out_pages && args->page_zeroing) { - len = args->out_args[args->out_numargs - 1].size; - ap = container_of(args, typeof(*ap), args); - for (i = 0; i < ap->num_pages; i++) { - thislen = ap->descs[i].length; - if (len < thislen) { - WARN_ON(ap->descs[i].offset); - page = ap->pages[i]; - zero_user_segment(page, len, thislen); - len = 0; - } else { - len -= thislen; - } - } - } - - spin_lock(&fpq->lock); - clear_bit(FR_SENT, &req->flags); list_del_init(&req->list); - spin_unlock(&fpq->lock); - fuse_request_end(fc, req); - spin_lock(&fsvq->lock); - dec_in_flight_req(fsvq); - spin_unlock(&fsvq->lock); + /* blocking async request completes in a worker context */ + if (req->args->may_block) { + struct virtio_fs_req_work *w; + + w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL); + INIT_WORK(&w->done_work, virtio_fs_complete_req_work); + w->fsvq = fsvq; + w->req = req; + schedule_work(&w->done_work); + } else { + virtio_fs_request_complete(req, fsvq); + } } } @@ -631,6 +667,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) out_vqs: vdev->config->reset(vdev); virtio_fs_cleanup_vqs(vdev, fs); + kfree(fs->vqs); out: vdev->priv = NULL; diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c index 20d052e08b3b..28fed5295770 100644 --- a/fs/fuse/xattr.c +++ b/fs/fuse/xattr.c @@ -113,6 +113,9 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) struct fuse_getxattr_out outarg; ssize_t ret; + if (fuse_is_bad(inode)) + return -EIO; + if (!fuse_allow_current_process(fc)) return -EACCES; @@ -178,6 +181,9 @@ static int fuse_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *value, size_t size) { + if (fuse_is_bad(inode)) + return -EIO; + return fuse_getxattr(inode, name, value, size); } @@ -186,6 +192,9 @@ static int fuse_xattr_set(const struct xattr_handler *handler, const char *name, const void *value, size_t size, int flags) { + if (fuse_is_bad(inode)) + return -EIO; + if (!value) return fuse_removexattr(inode, name); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index f63df54a08c6..aaec3c5b0202 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -528,10 +528,12 @@ lower_metapath: /* Advance in metadata tree. */ (mp->mp_list[hgt])++; - if (mp->mp_list[hgt] >= sdp->sd_inptrs) { - if (!hgt) + if (hgt) { + if (mp->mp_list[hgt] >= sdp->sd_inptrs) + goto lower_metapath; + } else { + if (mp->mp_list[hgt] >= sdp->sd_diptrs) break; - goto lower_metapath; } fill_up_metapath: @@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, ret = -ENOENT; goto unlock; } else { - /* report a hole */ iomap->offset = pos; iomap->length = length; - goto do_alloc; + goto hole_found; } } iomap->length = size; @@ -933,8 +934,6 @@ unlock: return ret; do_alloc: - iomap->addr = IOMAP_NULL_ADDR; - iomap->type = IOMAP_HOLE; if (flags & IOMAP_REPORT) { if (pos >= size) ret = -ENOENT; @@ -956,6 +955,9 @@ do_alloc: if (pos < size && height == ip->i_height) ret = gfs2_hole_size(inode, lblock, len, mp, iomap); } +hole_found: + iomap->addr = IOMAP_NULL_ADDR; + iomap->type = IOMAP_HOLE; goto out; } @@ -1226,6 +1228,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, gfs2_inplace_release(ip); + if (ip->i_qadata && ip->i_qadata->qa_qd_num) + gfs2_quota_unlock(ip); + if (length != written && (iomap->flags & IOMAP_F_NEW)) { /* Deallocate blocks that were just allocated. */ loff_t blockmask = i_blocksize(inode) - 1; @@ -1238,9 +1243,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, } } - if (ip->i_qadata && ip->i_qadata->qa_qd_num) - gfs2_quota_unlock(ip); - if (unlikely(!written)) goto out_unlock; @@ -1348,9 +1350,15 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi return ret; } +/* + * NOTE: Never call gfs2_block_zero_range with an open transaction because it + * uses iomap write to perform its actions, which begin their own transactions + * (iomap_begin, page_prepare, etc.) + */ static int gfs2_block_zero_range(struct inode *inode, loff_t from, unsigned int length) { + BUG_ON(current->journal_info); return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops); } @@ -1411,6 +1419,16 @@ static int trunc_start(struct inode *inode, u64 newsize) u64 oldsize = inode->i_size; int error; + if (!gfs2_is_stuffed(ip)) { + unsigned int blocksize = i_blocksize(inode); + unsigned int offs = newsize & (blocksize - 1); + if (offs) { + error = gfs2_block_zero_range(inode, newsize, + blocksize - offs); + if (error) + return error; + } + } if (journaled) error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES); else @@ -1424,19 +1442,10 @@ static int trunc_start(struct inode *inode, u64 newsize) gfs2_trans_add_meta(ip->i_gl, dibh); - if (gfs2_is_stuffed(ip)) { + if (gfs2_is_stuffed(ip)) gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); - } else { - unsigned int blocksize = i_blocksize(inode); - unsigned int offs = newsize & (blocksize - 1); - if (offs) { - error = gfs2_block_zero_range(inode, newsize, - blocksize - offs); - if (error) - goto out; - } + else ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; - } i_size_write(inode, newsize); ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); @@ -2440,24 +2449,13 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) struct inode *inode = file_inode(file); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); + unsigned int blocksize = i_blocksize(inode); + loff_t start, end; int error; - if (gfs2_is_jdata(ip)) - error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, - GFS2_JTRUNC_REVOKES); - else - error = gfs2_trans_begin(sdp, RES_DINODE, 0); - if (error) - return error; + if (!gfs2_is_stuffed(ip)) { + unsigned int start_off, end_len; - if (gfs2_is_stuffed(ip)) { - error = stuffed_zero_range(inode, offset, length); - if (error) - goto out; - } else { - unsigned int start_off, end_len, blocksize; - - blocksize = i_blocksize(inode); start_off = offset & (blocksize - 1); end_len = (offset + length) & (blocksize - 1); if (start_off) { @@ -2478,6 +2476,26 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) } } + start = round_down(offset, blocksize); + end = round_up(offset + length, blocksize) - 1; + error = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (error) + return error; + + if (gfs2_is_jdata(ip)) + error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, + GFS2_JTRUNC_REVOKES); + else + error = gfs2_trans_begin(sdp, RES_DINODE, 0); + if (error) + return error; + + if (gfs2_is_stuffed(ip)) { + error = stuffed_zero_range(inode, offset, length); + if (error) + goto out; + } + if (gfs2_is_jdata(ip)) { BUG_ON(!current->journal_info); gfs2_journaled_truncate_range(inode, offset, length); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 4a10b4e7092a..2b304293a295 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -759,7 +759,8 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to) if (ret) goto out_uninit; - ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL); + ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, + is_sync_kiocb(iocb)); gfs2_glock_dq(&gh); out_uninit: @@ -794,7 +795,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) if (offset + len > i_size_read(&ip->i_inode)) goto out; - ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL); + ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, + is_sync_kiocb(iocb)); out: gfs2_glock_dq(&gh); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 0290a22ebccf..9e1685a30bf8 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -873,7 +873,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, out_free: kfree(gl->gl_lksb.sb_lvbptr); kmem_cache_free(cachep, gl); - atomic_dec(&sdp->sd_glock_disposal); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); out: return ret; diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index ff213690e364..d5b9274662db 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -87,10 +87,36 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) memset(&tr, 0, sizeof(tr)); INIT_LIST_HEAD(&tr.tr_buf); INIT_LIST_HEAD(&tr.tr_databuf); + INIT_LIST_HEAD(&tr.tr_ail1_list); + INIT_LIST_HEAD(&tr.tr_ail2_list); tr.tr_revokes = atomic_read(&gl->gl_ail_count); - if (!tr.tr_revokes) + if (!tr.tr_revokes) { + bool have_revokes; + bool log_in_flight; + + /* + * We have nothing on the ail, but there could be revokes on + * the sdp revoke queue, in which case, we still want to flush + * the log and wait for it to finish. + * + * If the sdp revoke list is empty too, we might still have an + * io outstanding for writing revokes, so we should wait for + * it before returning. + * + * If none of these conditions are true, our revokes are all + * flushed and we can return. + */ + gfs2_log_lock(sdp); + have_revokes = !list_empty(&sdp->sd_log_revokes); + log_in_flight = atomic_read(&sdp->sd_log_in_flight); + gfs2_log_unlock(sdp); + if (have_revokes) + goto flush; + if (log_in_flight) + log_flush_wait(sdp); return; + } /* A shortened, inline version of gfs2_trans_begin() * tr->alloced is not set since the transaction structure is @@ -105,6 +131,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) __gfs2_ail_flush(gl, 0, tr.tr_revokes); gfs2_trans_end(sdp); +flush: gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_AIL_EMPTY_GL); } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 5f89c515f5bb..33a6b074209d 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -694,6 +694,7 @@ struct gfs2_sbd { struct super_block *sd_vfs; struct gfs2_pcpu_lkstats __percpu *sd_lkstats; struct kobject sd_kobj; + struct completion sd_kobj_unregister; unsigned long sd_flags; /* SDF_... */ struct gfs2_sb_host sd_sb; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 8466166f22e3..988bb7b17ed8 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -712,7 +712,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_trans_begin(sdp, blocks, 0); if (error) - goto fail_gunlock2; + goto fail_free_inode; if (blocks > 1) { ip->i_eattr = ip->i_no_addr + 1; @@ -723,7 +723,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) - goto fail_gunlock2; + goto fail_free_inode; BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags)); @@ -732,7 +732,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail_gunlock2; glock_set_object(ip->i_iopen_gh.gh_gl, ip); - gfs2_glock_put(io_gl); gfs2_set_iop(inode); insert_inode_hash(inode); @@ -765,6 +764,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, mark_inode_dirty(inode); d_instantiate(dentry, inode); + /* After instantiate, errors should result in evict which will destroy + * both inode and iopen glocks properly. */ if (file) { file->f_mode |= FMODE_CREATED; error = finish_open(file, dentry, gfs2_open_common); @@ -772,15 +773,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); return error; fail_gunlock3: glock_clear_object(io_gl, ip); gfs2_glock_dq_uninit(&ip->i_iopen_gh); - gfs2_glock_put(io_gl); fail_gunlock2: - if (io_gl) - clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); fail_free_inode: if (ip->i_gl) { glock_clear_object(ip->i_gl, ip); diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 7c7197343ee2..72dec177b349 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -280,7 +280,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; - int lvb_needs_unlock = 0; int error; if (gl->gl_lksb.sb_lkid == 0) { @@ -293,13 +292,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl) gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); - /* don't want to skip dlm_unlock writing the lvb when lock is ex */ - - if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) - lvb_needs_unlock = 1; + /* don't want to skip dlm_unlock writing the lvb when lock has one */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && - !lvb_needs_unlock) { + !gl->gl_lksb.sb_lvbptr) { gfs2_glock_free(gl); return; } diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 2aed73666a65..00a90fc72597 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -513,7 +513,7 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) } -static void log_flush_wait(struct gfs2_sbd *sdp) +void log_flush_wait(struct gfs2_sbd *sdp) { DEFINE_WAIT(wait); @@ -598,13 +598,13 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) struct buffer_head *bh = bd->bd_bh; struct gfs2_glock *gl = bd->bd_gl; + sdp->sd_log_num_revoke++; + if (atomic_inc_return(&gl->gl_revokes) == 1) + gfs2_glock_hold(gl); bh->b_private = NULL; bd->bd_blkno = bh->b_blocknr; gfs2_remove_from_ail(bd); /* drops ref on bh */ bd->bd_bh = NULL; - sdp->sd_log_num_revoke++; - if (atomic_inc_return(&gl->gl_revokes) == 1) - gfs2_glock_hold(gl); set_bit(GLF_LFLUSH, &gl->gl_flags); list_add(&bd->bd_list, &sdp->sd_log_revokes); } @@ -810,8 +810,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) tr = sdp->sd_log_tr; if (tr) { sdp->sd_log_tr = NULL; - INIT_LIST_HEAD(&tr->tr_ail1_list); - INIT_LIST_HEAD(&tr->tr_ail2_list); tr->tr_first = sdp->sd_log_flush_head; if (unlikely (state == SFS_FROZEN)) gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); @@ -881,8 +879,10 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) * @new: New transaction to be merged */ -static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) +static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new) { + struct gfs2_trans *old = sdp->sd_log_tr; + WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); old->tr_num_buf_new += new->tr_num_buf_new; @@ -893,6 +893,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); list_splice_tail_init(&new->tr_buf, &old->tr_buf); + + spin_lock(&sdp->sd_ail_lock); + list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list); + list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list); + spin_unlock(&sdp->sd_ail_lock); } static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) @@ -904,7 +909,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) gfs2_log_lock(sdp); if (sdp->sd_log_tr) { - gfs2_merge_trans(sdp->sd_log_tr, tr); + gfs2_merge_trans(sdp, tr); } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); sdp->sd_log_tr = tr; diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index c762da494546..52b9bf27e918 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -73,6 +73,7 @@ extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 type); extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc); +extern void log_flush_wait(struct gfs2_sbd *sdp); extern void gfs2_log_shutdown(struct gfs2_sbd *sdp); extern int gfs2_logd(void *data); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 7ca84be20cf6..d2ed4dc4434c 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -264,7 +264,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, struct super_block *sb = sdp->sd_vfs; struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); - bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9); + bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; bio_set_dev(bio, sb->s_bdev); bio->bi_end_io = end_io; bio->bi_private = sdp; @@ -504,12 +504,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, unsigned int bsize = sdp->sd_sb.sb_bsize, off; unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; unsigned int shift = PAGE_SHIFT - bsize_shift; - unsigned int readahead_blocks = BIO_MAX_PAGES << shift; + unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; struct gfs2_journal_extent *je; int sz, ret = 0; struct bio *bio = NULL; struct page *page = NULL; - bool bio_chained = false, done = false; + bool done = false; errseq_t since; memset(head, 0, sizeof(*head)); @@ -532,30 +532,30 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, off = 0; } - if (!bio || (bio_chained && !off)) { - /* start new bio */ - } else { - sz = bio_add_page(bio, page, bsize, off); - if (sz == bsize) - goto block_added; + if (bio && (off || block < blocks_submitted + max_blocks)) { + sector_t sector = dblock << sdp->sd_fsb2bb_shift; + + if (bio_end_sector(bio) == sector) { + sz = bio_add_page(bio, page, bsize, off); + if (sz == bsize) + goto block_added; + } if (off) { unsigned int blocks = (PAGE_SIZE - off) >> bsize_shift; bio = gfs2_chain_bio(bio, blocks); - bio_chained = true; goto add_block_to_new_bio; } } if (bio) { - blocks_submitted = block + 1; + blocks_submitted = block; submit_bio(bio); } bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); bio->bi_opf = REQ_OP_READ; - bio_chained = false; add_block_to_new_bio: sz = bio_add_page(bio, page, bsize, off); BUG_ON(sz != bsize); @@ -563,7 +563,7 @@ block_added: off += bsize; if (off == PAGE_SIZE) page = NULL; - if (blocks_submitted < blocks_read + readahead_blocks) { + if (blocks_submitted <= blocks_read + max_blocks) { /* Keep at least one bio in flight */ continue; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 18daf494abab..29b27d769860 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -169,15 +169,19 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) return -EINVAL; } - /* If format numbers match exactly, we're done. */ + if (sb->sb_fs_format != GFS2_FORMAT_FS || + sb->sb_multihost_format != GFS2_FORMAT_MULTI) { + fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); + return -EINVAL; + } - if (sb->sb_fs_format == GFS2_FORMAT_FS && - sb->sb_multihost_format == GFS2_FORMAT_MULTI) - return 0; + if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE || + (sb->sb_bsize & (sb->sb_bsize - 1))) { + pr_warn("Invalid superblock size\n"); + return -EINVAL; + } - fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); - - return -EINVAL; + return 0; } static void end_bio_io_page(struct bio *bio) @@ -911,7 +915,7 @@ fail: } static const match_table_t nolock_tokens = { - { Opt_jid, "jid=%d\n", }, + { Opt_jid, "jid=%d", }, { Opt_err, NULL }, }; @@ -1094,26 +1098,14 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) } error = init_names(sdp, silent); - if (error) { - /* In this case, we haven't initialized sysfs, so we have to - manually free the sdp. */ - free_sbd(sdp); - sb->s_fs_info = NULL; - return error; - } + if (error) + goto fail_free; snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name); error = gfs2_sys_fs_add(sdp); - /* - * If we hit an error here, gfs2_sys_fs_add will have called function - * kobject_put which causes the sysfs usage count to go to zero, which - * causes sysfs to call function gfs2_sbd_release, which frees sdp. - * Subsequent error paths here will call gfs2_sys_fs_del, which also - * kobject_put to free sdp. - */ if (error) - return error; + goto fail_free; gfs2_create_debugfs_file(sdp); @@ -1168,7 +1160,17 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) goto fail_per_node; } - if (!sb_rdonly(sb)) { + if (sb_rdonly(sb)) { + struct gfs2_holder freeze_gh; + + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, + GL_EXACT, &freeze_gh); + if (error) { + fs_err(sdp, "can't make FS RO: %d\n", error); + goto fail_per_node; + } + gfs2_glock_dq_uninit(&freeze_gh); + } else { error = gfs2_make_fs_rw(sdp); if (error) { fs_err(sdp, "can't make FS RW: %d\n", error); @@ -1200,9 +1202,9 @@ fail_lm: gfs2_lm_unmount(sdp); fail_debug: gfs2_delete_debugfs_file(sdp); - /* gfs2_sys_fs_del must be the last thing we do, since it causes - * sysfs to call function gfs2_sbd_release, which frees sdp. */ gfs2_sys_fs_del(sdp); +fail_free: + free_sbd(sdp); sb->s_fs_info = NULL; return error; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 7c016a082aa6..cbee745169b8 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1040,8 +1040,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) u32 x; int error = 0; - if (capable(CAP_SYS_RESOURCE) || - sdp->sd_args.ar_quota != GFS2_QUOTA_ON) + if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) return 0; error = gfs2_quota_hold(ip, uid, gid); diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 765627d9a91e..fe68a91dc16f 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -44,7 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, int ret; ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ - if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + if (capable(CAP_SYS_RESOURCE) || + sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); if (ret) diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 2466bb44a23c..c056ed5c6df3 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -736,9 +736,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) } gfs2_free_clones(rgd); + return_all_reservations(rgd); kfree(rgd->rd_bits); rgd->rd_bits = NULL; - return_all_reservations(rgd); kmem_cache_free(gfs2_rgrpd_cachep, rgd); } } @@ -1008,6 +1008,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip) if (error < 0) return error; + if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) { + fs_err(sdp, "no resource groups found in the file system.\n"); + return -ENOENT; + } set_rgrp_preferences(sdp); sdp->sd_rindex_uptodate = 1; @@ -1410,6 +1414,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) + return -EROFS; + if (!blk_queue_discard(q)) return -EOPNOTSUPP; @@ -2571,13 +2578,13 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) rbm.rgd = rgd; error = gfs2_rbm_from_block(&rbm, no_addr); - if (WARN_ON_ONCE(error)) - goto fail; - - if (gfs2_testbit(&rbm, false) != type) - error = -ESTALE; + if (!WARN_ON_ONCE(error)) { + if (gfs2_testbit(&rbm, false) != type) + error = -ESTALE; + } gfs2_glock_dq_uninit(&rgd_gh); + fail: return error; } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 5fa1eec4fb4f..9c593fd50c6a 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -689,12 +689,14 @@ restart: gfs2_jindex_free(sdp); /* Take apart glock structures and buffer lists */ gfs2_gl_hash_clear(sdp); + truncate_inode_pages_final(&sdp->sd_aspace); gfs2_delete_debugfs_file(sdp); /* Unmount the locking protocol */ gfs2_lm_unmount(sdp); /* At this point, we're through participating in the lockspace */ gfs2_sys_fs_del(sdp); + free_sbd(sdp); } /** @@ -755,11 +757,13 @@ void gfs2_freeze_func(struct work_struct *work) static int gfs2_freeze(struct super_block *sb) { struct gfs2_sbd *sdp = sb->s_fs_info; - int error = 0; + int error; mutex_lock(&sdp->sd_freeze_mutex); - if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) + if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) { + error = -EBUSY; goto out; + } if (test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) { error = -EINVAL; @@ -796,10 +800,10 @@ static int gfs2_unfreeze(struct super_block *sb) struct gfs2_sbd *sdp = sb->s_fs_info; mutex_lock(&sdp->sd_freeze_mutex); - if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || + if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || !gfs2_holder_initialized(&sdp->sd_freeze_gh)) { mutex_unlock(&sdp->sd_freeze_mutex); - return 0; + return -EINVAL; } gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index dd15b8e4af2c..1c6e52dc878e 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -302,7 +302,7 @@ static void gfs2_sbd_release(struct kobject *kobj) { struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); - free_sbd(sdp); + complete(&sdp->sd_kobj_unregister); } static struct kobj_type gfs2_ktype = { @@ -652,6 +652,7 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp) sprintf(ro, "RDONLY=%d", sb_rdonly(sb)); sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); + init_completion(&sdp->sd_kobj_unregister); sdp->sd_kobj.kset = gfs2_kset; error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL, "%s", sdp->sd_table_name); @@ -682,6 +683,7 @@ fail_tune: fail_reg: fs_err(sdp, "error %d adding sysfs files\n", error); kobject_put(&sdp->sd_kobj); + wait_for_completion(&sdp->sd_kobj_unregister); sb->s_fs_info = NULL; return error; } @@ -692,6 +694,7 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp) sysfs_remove_group(&sdp->sd_kobj, &tune_group); sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); kobject_put(&sdp->sd_kobj); + wait_for_completion(&sdp->sd_kobj_unregister); } static int gfs2_uevent(struct kset *kset, struct kobject *kobj, diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 9d4227330de4..2a12d30ae0de 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -53,6 +53,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, sizeof(u64)); INIT_LIST_HEAD(&tr->tr_databuf); INIT_LIST_HEAD(&tr->tr_buf); + INIT_LIST_HEAD(&tr->tr_ail1_list); + INIT_LIST_HEAD(&tr->tr_ail2_list); sb_start_intwrite(sdp->sd_vfs); diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c index e6d554476db4..eeebe80c6be4 100644 --- a/fs/hfsplus/attributes.c +++ b/fs/hfsplus/attributes.c @@ -292,6 +292,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid, return -ENOENT; } + /* Avoid btree corruption */ + hfs_bnode_read(fd->bnode, fd->search_key, + fd->keyoffset, fd->keylength); + err = hfs_brec_remove(fd); if (err) return err; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 5a7eb0c79839..58a972667bf8 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -139,10 +139,10 @@ static char *inode_name(struct inode *ino) static char *follow_link(char *link) { - int len, n; char *name, *resolved, *end; + int n; - name = __getname(); + name = kmalloc(PATH_MAX, GFP_KERNEL); if (!name) { n = -ENOMEM; goto out_free; @@ -164,21 +164,18 @@ static char *follow_link(char *link) return name; *(end + 1) = '\0'; - len = strlen(link) + strlen(name) + 1; - resolved = kmalloc(len, GFP_KERNEL); + resolved = kasprintf(GFP_KERNEL, "%s%s", link, name); if (resolved == NULL) { n = -ENOMEM; goto out_free; } - sprintf(resolved, "%s%s", link, name); - __putname(name); - kfree(link); + kfree(name); return resolved; out_free: - __putname(name); + kfree(name); return ERR_PTR(n); } @@ -918,18 +915,16 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) sb->s_d_op = &simple_dentry_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; - /* NULL is printed as <NULL> by sprintf: avoid that. */ + /* NULL is printed as '(null)' by printf(): avoid that. */ if (req_root == NULL) req_root = ""; err = -ENOMEM; sb->s_fs_info = host_root_path = - kmalloc(strlen(root_ino) + strlen(req_root) + 2, GFP_KERNEL); + kasprintf(GFP_KERNEL, "%s/%s", root_ino, req_root); if (host_root_path == NULL) goto out; - sprintf(host_root_path, "%s/%s", root_ino, req_root); - root_inode = new_inode(sb); if (!root_inode) goto out; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 40306c1eab07..a2e9354b9d53 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -440,7 +440,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, u32 hash; index = page->index; - hash = hugetlb_fault_mutex_hash(h, mapping, index, 0); + hash = hugetlb_fault_mutex_hash(h, mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* @@ -644,7 +644,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ - hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); + hash = hugetlb_fault_mutex_hash(h, mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ @@ -675,9 +675,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, mutex_unlock(&hugetlb_fault_mutex_table[hash]); + set_page_huge_active(page); /* * unlock_page because locked by add_to_page_cache() - * page_put due to reference from alloc_huge_page() + * put_page() due to reference from alloc_huge_page() */ unlock_page(page); put_page(page); @@ -1284,6 +1285,12 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; + + /* + * Due to the special and limited functionality of hugetlbfs, it does + * not work well as a stacking filesystem. + */ + sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); if (!sb->s_root) goto out_free; diff --git a/fs/io_uring.c b/fs/io_uring.c index e37b84146453..478df7e10767 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -267,6 +267,9 @@ struct io_ring_ctx { #if defined(CONFIG_UNIX) struct socket *ring_sock; #endif + + struct list_head task_list; + spinlock_t task_lock; }; struct sqe_submit { @@ -276,6 +279,7 @@ struct sqe_submit { bool has_user; bool needs_lock; bool needs_fixed_file; + u8 opcode; }; /* @@ -331,13 +335,18 @@ struct io_kiocb { #define REQ_F_ISREG 2048 /* regular file */ #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ #define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */ +#define REQ_F_CANCEL 16384 /* cancel request */ + unsigned long fsize; u64 user_data; u32 result; u32 sequence; + struct files_struct *files; struct fs_struct *fs; struct work_struct work; + struct task_struct *work_task; + struct list_head task_list; }; #define IO_PLUG_THRESHOLD 2 @@ -408,6 +417,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) } ctx->flags = p->flags; + init_waitqueue_head(&ctx->sqo_wait); init_waitqueue_head(&ctx->cq_wait); init_completion(&ctx->ctx_done); init_completion(&ctx->sqo_thread_started); @@ -423,6 +433,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->cancel_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); + INIT_LIST_HEAD(&ctx->task_list); + spin_lock_init(&ctx->task_lock); return ctx; } @@ -490,10 +502,11 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) static inline void io_queue_async_work(struct io_ring_ctx *ctx, struct io_kiocb *req) { + unsigned long flags; int rw = 0; if (req->submit.sqe) { - switch (req->submit.sqe->opcode) { + switch (req->submit.opcode) { case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: rw = !(req->rw.ki_flags & IOCB_DIRECT); @@ -501,6 +514,15 @@ static inline void io_queue_async_work(struct io_ring_ctx *ctx, } } + if (req->work.func == io_sq_wq_submit_work) { + req->files = current->files; + + spin_lock_irqsave(&ctx->task_lock, flags); + list_add(&req->task_list, &ctx->task_list); + req->work_task = NULL; + spin_unlock_irqrestore(&ctx->task_lock, flags); + } + queue_work(ctx->sqo_wq[rw], &req->work); } @@ -648,6 +670,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, state->cur_req++; } + INIT_LIST_HEAD(&req->task_list); req->file = NULL; req->ctx = ctx; req->flags = 0; @@ -1085,6 +1108,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, if (S_ISREG(file_inode(req->file)->i_mode)) req->flags |= REQ_F_ISREG; + if (force_nonblock) + req->fsize = rlimit(RLIMIT_FSIZE); + /* * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so * we know to async punt it even if it was opened O_NONBLOCK @@ -1232,23 +1258,15 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, } static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, - const struct sqe_submit *s, struct iovec **iovec, + struct io_kiocb *req, struct iovec **iovec, struct iov_iter *iter) { - const struct io_uring_sqe *sqe = s->sqe; + const struct io_uring_sqe *sqe = req->submit.sqe; void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); size_t sqe_len = READ_ONCE(sqe->len); u8 opcode; - /* - * We're reading ->opcode for the second time, but the first read - * doesn't care whether it's _FIXED or not, so it doesn't matter - * whether ->opcode changes concurrently. The first read does care - * about whether it is a READ or a WRITE, so we don't trust this read - * for that purpose and instead let the caller pass in the read/write - * flag. - */ - opcode = READ_ONCE(sqe->opcode); + opcode = req->submit.opcode; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); @@ -1256,7 +1274,7 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, return ret; } - if (!s->has_user) + if (!req->submit.has_user) return -EFAULT; #ifdef CONFIG_COMPAT @@ -1403,7 +1421,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, if (unlikely(!(file->f_mode & FMODE_READ))) return -EBADF; - ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); + ret = io_import_iovec(req->ctx, READ, req, &iovec, &iter); if (ret < 0) return ret; @@ -1418,8 +1436,10 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, if (file->f_op->read_iter) ret2 = call_read_iter(file, kiocb, &iter); - else + else if (req->file->f_op->read) ret2 = loop_rw_iter(READ, file, kiocb, &iter); + else + ret2 = -EINVAL; /* * In case of a short read, punt to async. This can happen @@ -1468,7 +1488,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, if (unlikely(!(file->f_mode & FMODE_WRITE))) return -EBADF; - ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); + ret = io_import_iovec(req->ctx, WRITE, req, &iovec, &iter); if (ret < 0) return ret; @@ -1504,10 +1524,19 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, } kiocb->ki_flags |= IOCB_WRITE; + if (!force_nonblock) + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize; + if (file->f_op->write_iter) ret2 = call_write_iter(file, kiocb, &iter); - else + else if (req->file->f_op->write) ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); + else + ret2 = -EINVAL; + + if (!force_nonblock) + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + if (!force_nonblock || ret2 != -EAGAIN) { io_rw_done(kiocb, ret2); } else { @@ -2080,15 +2109,14 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct sqe_submit *s, bool force_nonblock) { - int ret, opcode; + int ret; req->user_data = READ_ONCE(s->sqe->user_data); if (unlikely(s->index >= ctx->sq_entries)) return -EINVAL; - opcode = READ_ONCE(s->sqe->opcode); - switch (opcode) { + switch (req->submit.opcode) { case IORING_OP_NOP: ret = io_nop(req, req->user_data); break; @@ -2152,10 +2180,10 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, return 0; } -static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx, - const struct io_uring_sqe *sqe) +static struct async_list *io_async_list_from_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) { - switch (sqe->opcode) { + switch (req->submit.opcode) { case IORING_OP_READV: case IORING_OP_READ_FIXED: return &ctx->pending_async[READ]; @@ -2167,12 +2195,10 @@ static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx, } } -static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) +static inline bool io_req_needs_user(struct io_kiocb *req) { - u8 opcode = READ_ONCE(sqe->opcode); - - return !(opcode == IORING_OP_READ_FIXED || - opcode == IORING_OP_WRITE_FIXED); + return !(req->submit.opcode == IORING_OP_READ_FIXED || + req->submit.opcode == IORING_OP_WRITE_FIXED); } static void io_sq_wq_submit_work(struct work_struct *work) @@ -2188,7 +2214,9 @@ static void io_sq_wq_submit_work(struct work_struct *work) int ret; old_cred = override_creds(ctx->creds); - async_list = io_async_list_from_sqe(ctx, req->submit.sqe); + async_list = io_async_list_from_req(ctx, req); + + allow_kernel_signal(SIGINT); restart: do { struct sqe_submit *s = &req->submit; @@ -2198,7 +2226,8 @@ restart: /* Ensure we clear previously set non-block flag */ req->rw.ki_flags &= ~IOCB_NOWAIT; - if (req->fs != current->fs && current->fs != old_fs_struct) { + if ((req->fs && req->fs != current->fs) || + (!req->fs && current->fs != old_fs_struct)) { task_lock(current); if (req->fs) current->fs = req->fs; @@ -2208,9 +2237,10 @@ restart: } ret = 0; - if (io_sqe_needs_user(sqe) && !cur_mm) { + if (io_req_needs_user(req) && !cur_mm) { if (!mmget_not_zero(ctx->sqo_mm)) { ret = -EFAULT; + goto end_req; } else { cur_mm = ctx->sqo_mm; use_mm(cur_mm); @@ -2220,6 +2250,18 @@ restart: } if (!ret) { + req->work_task = current; + + /* + * Pairs with the smp_store_mb() (B) in + * io_cancel_async_work(). + */ + smp_mb(); /* A */ + if (req->flags & REQ_F_CANCEL) { + ret = -ECANCELED; + goto end_req; + } + s->has_user = cur_mm != NULL; s->needs_lock = true; do { @@ -2235,6 +2277,10 @@ restart: cond_resched(); } while (1); } +end_req: + spin_lock_irq(&ctx->task_lock); + list_del_init(&req->task_list); + spin_unlock_irq(&ctx->task_lock); /* drop submission reference */ io_put_req(req); @@ -2299,13 +2345,14 @@ restart: } out: + disallow_signal(SIGINT); if (cur_mm) { set_fs(old_fs); unuse_mm(cur_mm); mmput(cur_mm); } revert_creds(old_cred); - if (old_fs_struct) { + if (old_fs_struct != current->fs) { task_lock(current); current->fs = old_fs_struct; task_unlock(current); @@ -2339,15 +2386,24 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req) list_del_init(&req->list); ret = false; } + + if (ret) { + struct io_ring_ctx *ctx = req->ctx; + + req->files = current->files; + + spin_lock_irq(&ctx->task_lock); + list_add(&req->task_list, &ctx->task_list); + req->work_task = NULL; + spin_unlock_irq(&ctx->task_lock); + } spin_unlock(&list->lock); return ret; } -static bool io_op_needs_file(const struct io_uring_sqe *sqe) +static bool io_op_needs_file(struct io_kiocb *req) { - int op = READ_ONCE(sqe->opcode); - - switch (op) { + switch (req->submit.opcode) { case IORING_OP_NOP: case IORING_OP_POLL_REMOVE: case IORING_OP_TIMEOUT: @@ -2375,7 +2431,7 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, */ req->sequence = s->sequence; - if (!io_op_needs_file(s->sqe)) + if (!io_op_needs_file(req)) return 0; if (flags & IOSQE_FIXED_FILE) { @@ -2416,7 +2472,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, s->sqe = sqe_copy; memcpy(&req->submit, s, sizeof(*s)); - list = io_async_list_from_sqe(ctx, s->sqe); + list = io_async_list_from_req(ctx, req); if (!io_add_to_prev_work(list, req)) { if (list) atomic_inc(&list->cnt); @@ -2526,6 +2582,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, goto err; } + memcpy(&req->submit, s, sizeof(*s)); ret = io_req_set_file(ctx, s, state, req); if (unlikely(ret)) { err_req: @@ -2538,7 +2595,7 @@ err: req->user_data = s->sqe->user_data; #if defined(CONFIG_NET) - switch (READ_ONCE(s->sqe->opcode)) { + switch (req->submit.opcode) { case IORING_OP_SENDMSG: case IORING_OP_RECVMSG: spin_lock(¤t->fs->lock); @@ -2653,6 +2710,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) if (head < ctx->sq_entries) { s->index = head; s->sqe = &ctx->sq_sqes[head]; + s->opcode = READ_ONCE(s->sqe->opcode); s->sequence = ctx->cached_sq_head; ctx->cached_sq_head++; return true; @@ -3092,13 +3150,6 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) struct sk_buff *skb; int i; - if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { - unsigned long inflight = ctx->user->unix_inflight + nr; - - if (inflight > task_rlimit(current, RLIMIT_NOFILE)) - return -EMFILE; - } - fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); if (!fpl) return -ENOMEM; @@ -3233,7 +3284,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, { int ret; - init_waitqueue_head(&ctx->sqo_wait); mmgrab(current->mm); ctx->sqo_mm = current->mm; @@ -3364,6 +3414,9 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, return SIZE_MAX; #endif + if (sq_offset) + *sq_offset = off; + sq_array_size = array_size(sizeof(u32), sq_entries); if (sq_array_size == SIZE_MAX) return SIZE_MAX; @@ -3371,9 +3424,6 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, if (check_add_overflow(off, sq_array_size, &off)) return SIZE_MAX; - if (sq_offset) - *sq_offset = off; - return off; } @@ -3494,8 +3544,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, ret = 0; if (!pages || nr_pages > got_pages) { - kfree(vmas); - kfree(pages); + kvfree(vmas); + kvfree(pages); pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); vmas = kvmalloc_array(nr_pages, @@ -3671,12 +3721,41 @@ static int io_uring_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &ctx->cq_fasync); } +static void io_cancel_async_work(struct io_ring_ctx *ctx, + struct files_struct *files) +{ + struct io_kiocb *req; + + if (list_empty(&ctx->task_list)) + return; + + spin_lock_irq(&ctx->task_lock); + + list_for_each_entry(req, &ctx->task_list, task_list) { + if (files && req->files != files) + continue; + + /* + * The below executes an smp_mb(), which matches with the + * smp_mb() (A) in io_sq_wq_submit_work() such that either + * we store REQ_F_CANCEL flag to req->flags or we see the + * req->work_task setted in io_sq_wq_submit_work(). + */ + smp_store_mb(req->flags, req->flags | REQ_F_CANCEL); /* B */ + + if (req->work_task) + send_sig(SIGINT, req->work_task, 1); + } + spin_unlock_irq(&ctx->task_lock); +} + static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { mutex_lock(&ctx->uring_lock); percpu_ref_kill(&ctx->refs); mutex_unlock(&ctx->uring_lock); + io_cancel_async_work(ctx, NULL); io_kill_timeouts(ctx); io_poll_remove_all(ctx); io_iopoll_reap_events(ctx); @@ -3684,6 +3763,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_ring_ctx_free(ctx); } +static int io_uring_flush(struct file *file, void *data) +{ + struct io_ring_ctx *ctx = file->private_data; + + if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) + io_cancel_async_work(ctx, data); + + return 0; +} + static int io_uring_release(struct inode *inode, struct file *file) { struct io_ring_ctx *ctx = file->private_data; @@ -3788,6 +3877,7 @@ out_fput: static const struct file_operations io_uring_fops = { .release = io_uring_release, + .flush = io_uring_flush, .mmap = io_uring_mmap, .poll = io_uring_poll, .fasync = io_uring_fasync, @@ -3799,6 +3889,10 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, struct io_rings *rings; size_t size, sq_array_offset; + /* make sure these are sane, as we already accounted them */ + ctx->sq_entries = p->sq_entries; + ctx->cq_entries = p->cq_entries; + size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); if (size == SIZE_MAX) return -EOVERFLOW; @@ -3815,8 +3909,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, rings->cq_ring_entries = p->cq_entries; ctx->sq_mask = rings->sq_ring_mask; ctx->cq_mask = rings->cq_ring_mask; - ctx->sq_entries = rings->sq_ring_entries; - ctx->cq_entries = rings->cq_ring_entries; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index e25901ae3ff4..80867a1a94f2 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -559,6 +559,7 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, if (PageUptodate(page)) return 0; + ClearPageError(page); do { iomap_adjust_read_range(inode, iop, &block_start, @@ -1040,20 +1041,19 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) lock_page(page); size = i_size_read(inode); - if ((page->mapping != inode->i_mapping) || - (page_offset(page) > size)) { + offset = page_offset(page); + if (page->mapping != inode->i_mapping || offset > size) { /* We overload EFAULT to mean page got truncated */ ret = -EFAULT; goto out_unlock; } /* page is wholly or partially inside EOF */ - if (((page->index + 1) << PAGE_SHIFT) > size) + if (offset > size - PAGE_SIZE) length = offset_in_page(size); else length = PAGE_SIZE; - offset = page_offset(page); while (length > 0) { ret = iomap_apply(inode, offset, length, IOMAP_WRITE | IOMAP_FAULT, ops, page, diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 7b5f76efef02..45602f5a46b3 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -377,6 +377,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, return iomap_dio_bio_actor(inode, pos, length, dio, iomap); case IOMAP_INLINE: return iomap_dio_inline_actor(inode, pos, length, dio, iomap); + case IOMAP_DELALLOC: + /* + * DIO is not serialised against mmap() access at all, and so + * if the page_mkwrite occurs between the writeback and the + * iomap_apply() call in the DIO path, then it will see the + * DELALLOC block that the page-mkwrite allocated. + */ + pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", + dio->iocb->ki_filp, current->comm); + return -EIO; default: WARN_ON_ONCE(1); return -EIO; @@ -394,7 +404,8 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, */ ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, - const struct iomap_ops *ops, const struct iomap_dio_ops *dops) + const struct iomap_ops *ops, const struct iomap_dio_ops *dops, + bool wait_for_completion) { struct address_space *mapping = iocb->ki_filp->f_mapping; struct inode *inode = file_inode(iocb->ki_filp); @@ -402,7 +413,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, loff_t pos = iocb->ki_pos, start = pos; loff_t end = iocb->ki_pos + count - 1, ret = 0; unsigned int flags = IOMAP_DIRECT; - bool wait_for_completion = is_sync_kiocb(iocb); struct blk_plug plug; struct iomap_dio *dio; @@ -411,6 +421,9 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (!count) return 0; + if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion)) + return -EIO; + dio = kmalloc(sizeof(*dio), GFP_KERNEL); if (!dio) return -ENOMEM; diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c index 152a230f668d..bd0cc3dcc980 100644 --- a/fs/iomap/swapfile.c +++ b/fs/iomap/swapfile.c @@ -169,6 +169,16 @@ int iomap_swapfile_activate(struct swap_info_struct *sis, return ret; } + /* + * If this swapfile doesn't contain even a single page-aligned + * contiguous range of blocks, reject this useless swapfile to + * prevent confusion later on. + */ + if (isi.nr_pages == 0) { + pr_warn("swapon: Cannot find a single usable page in file.\n"); + return -EINVAL; + } + *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; sis->max = isi.nr_pages; sis->pages = isi.nr_pages - 1; diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index f0fe641893a5..b9e6a7ec78be 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, inode->i_ino); + brelse(bh); return -EIO; } diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index cac468f04820..558e7c51ce0d 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, dir->i_ino); + brelse(bh); return 0; } diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 62cf497f18eb..5ef99b9ec8be 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -106,6 +106,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh) * for a checkpoint to free up some space in the log. */ void __jbd2_log_wait_for_space(journal_t *journal) +__acquires(&journal->j_state_lock) +__releases(&journal->j_state_lock) { int nblocks, space_left; /* assert_spin_locked(&journal->j_state_lock); */ diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 754ec3c47d6f..88146008b3e3 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -995,9 +995,10 @@ restart_loop: * journalled data) we need to unmap buffer and clear * more bits. We also need to be careful about the check * because the data page mapping can get cleared under - * out hands, which alse need not to clear more bits - * because the page and buffers will be freed and can - * never be reused once we are done with them. + * our hands. Note that if mapping == NULL, we don't + * need to make buffer unmapped because the page is + * already detached from the mapping and buffers cannot + * get reused. */ mapping = READ_ONCE(bh->b_page->mapping); if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c1ce2805c563..b7c5819bfc41 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -96,7 +96,6 @@ EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); EXPORT_SYMBOL(jbd2_inode_cache); -static void __journal_abort_soft (journal_t *journal, int errno); static int jbd2_journal_create_slab(size_t slab_size); #ifdef CONFIG_JBD2_DEBUG @@ -805,7 +804,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr, "at offset %lu on %s\n", __func__, blocknr, journal->j_devname); err = -EIO; - __journal_abort_soft(journal, err); + jbd2_journal_abort(journal, err); } } else { *retp = blocknr; /* +journal->j_blk_offset */ @@ -1349,8 +1348,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) int ret; /* Buffer got discarded which means block device got invalidated */ - if (!buffer_mapped(bh)) + if (!buffer_mapped(bh)) { + unlock_buffer(bh); return -EIO; + } trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) @@ -2070,64 +2071,6 @@ int jbd2_journal_wipe(journal_t *journal, int write) return err; } -/* - * Journal abort has very specific semantics, which we describe - * for journal abort. - * - * Two internal functions, which provide abort to the jbd layer - * itself are here. - */ - -/* - * Quick version for internal journal use (doesn't lock the journal). - * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, - * and don't attempt to make any other journal updates. - */ -void __jbd2_journal_abort_hard(journal_t *journal) -{ - transaction_t *transaction; - - if (journal->j_flags & JBD2_ABORT) - return; - - printk(KERN_ERR "Aborting journal on device %s.\n", - journal->j_devname); - - write_lock(&journal->j_state_lock); - journal->j_flags |= JBD2_ABORT; - transaction = journal->j_running_transaction; - if (transaction) - __jbd2_log_start_commit(journal, transaction->t_tid); - write_unlock(&journal->j_state_lock); -} - -/* Soft abort: record the abort error status in the journal superblock, - * but don't do any other IO. */ -static void __journal_abort_soft (journal_t *journal, int errno) -{ - int old_errno; - - write_lock(&journal->j_state_lock); - old_errno = journal->j_errno; - if (!journal->j_errno || errno == -ESHUTDOWN) - journal->j_errno = errno; - - if (journal->j_flags & JBD2_ABORT) { - write_unlock(&journal->j_state_lock); - if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) - jbd2_journal_update_sb_errno(journal); - return; - } - write_unlock(&journal->j_state_lock); - - __jbd2_journal_abort_hard(journal); - - jbd2_journal_update_sb_errno(journal); - write_lock(&journal->j_state_lock); - journal->j_flags |= JBD2_REC_ERR; - write_unlock(&journal->j_state_lock); -} - /** * void jbd2_journal_abort () - Shutdown the journal immediately. * @journal: the journal to shutdown. @@ -2171,7 +2114,47 @@ static void __journal_abort_soft (journal_t *journal, int errno) void jbd2_journal_abort(journal_t *journal, int errno) { - __journal_abort_soft(journal, errno); + transaction_t *transaction; + + /* + * ESHUTDOWN always takes precedence because a file system check + * caused by any other journal abort error is not required after + * a shutdown triggered. + */ + write_lock(&journal->j_state_lock); + if (journal->j_flags & JBD2_ABORT) { + int old_errno = journal->j_errno; + + write_unlock(&journal->j_state_lock); + if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) { + journal->j_errno = errno; + jbd2_journal_update_sb_errno(journal); + } + return; + } + + /* + * Mark the abort as occurred and start current running transaction + * to release all journaled buffer. + */ + pr_err("Aborting journal on device %s.\n", journal->j_devname); + + journal->j_flags |= JBD2_ABORT; + journal->j_errno = errno; + transaction = journal->j_running_transaction; + if (transaction) + __jbd2_log_start_commit(journal, transaction->t_tid); + write_unlock(&journal->j_state_lock); + + /* + * Record errno to the journal super block, so that fsck and jbd2 + * layer could realise that a filesystem check is needed. + */ + jbd2_journal_update_sb_errno(journal); + + write_lock(&journal->j_state_lock); + journal->j_flags |= JBD2_REC_ERR; + write_unlock(&journal->j_state_lock); } /** diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index de992a70ddfe..be05fb96757c 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -171,8 +171,10 @@ static void wait_transaction_switching(journal_t *journal) DEFINE_WAIT(wait); if (WARN_ON(!journal->j_running_transaction || - journal->j_running_transaction->t_state != T_SWITCH)) + journal->j_running_transaction->t_state != T_SWITCH)) { + read_unlock(&journal->j_state_lock); return; + } prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); @@ -1983,6 +1985,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { + J_ASSERT_JH(jh, jh->b_transaction != NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; jbd2_journal_put_journal_head(jh); @@ -2074,6 +2079,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, { struct buffer_head *head; struct buffer_head *bh; + bool has_write_io_error = false; int ret = 0; J_ASSERT(PageLocked(page)); @@ -2098,11 +2104,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; + + /* + * If we free a metadata buffer which has been failed to + * write out, the jbd2 checkpoint procedure will not detect + * this failure and may lead to filesystem inconsistency + * after cleanup journal tail. + */ + if (buffer_write_io_error(bh)) { + pr_err("JBD2: Error while async write back metadata bh %llu.", + (unsigned long long)bh->b_blocknr); + has_write_io_error = true; + } } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: + if (has_write_io_error) + jbd2_journal_abort(journal, -EIO); + return ret; } @@ -2530,6 +2551,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); + + /* + * b_transaction must be set, otherwise the new b_transaction won't + * be holding jh reference + */ + J_ASSERT_JH(jh, jh->b_transaction != NULL); + /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c index 406d9cc84ba8..79e771ab624f 100644 --- a/fs/jffs2/compr_rtime.c +++ b/fs/jffs2/compr_rtime.c @@ -37,6 +37,9 @@ static int jffs2_rtime_compress(unsigned char *data_in, int outpos = 0; int pos=0; + if (*dstlen <= 3) + return -1; + memset(positions,0,sizeof(positions)); while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index f20cff1194bb..776493713153 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) int ret; uint32_t now = JFFS2_NOW(); + mutex_lock(&f->sem); for (fd = f->dents ; fd; fd = fd->next) { - if (fd->ino) + if (fd->ino) { + mutex_unlock(&f->sem); return -ENOTEMPTY; + } } + mutex_unlock(&f->sem); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, f, now); diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h index 778275f48a87..5a7091746f68 100644 --- a/fs/jffs2/jffs2_fs_sb.h +++ b/fs/jffs2/jffs2_fs_sb.h @@ -38,6 +38,7 @@ struct jffs2_mount_opts { * users. This is implemented simply by means of not allowing the * latter users to write to the file system if the amount if the * available space is less then 'rp_size'. */ + bool set_rp_size; unsigned int rp_size; }; diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index bccfc40b3a74..d19483fa1fe8 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -672,6 +672,22 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r jffs2_free_full_dirent(fd); return -EIO; } + +#ifdef CONFIG_JFFS2_SUMMARY + /* + * we use CONFIG_JFFS2_SUMMARY because without it, we + * have checked it while mounting + */ + crc = crc32(0, fd->name, rd->nsize); + if (unlikely(crc != je32_to_cpu(rd->name_crc))) { + JFFS2_NOTICE("name CRC failed on dirent node at" + "%#08x: read %#08x,calculated %#08x\n", + ref_offset(ref), je32_to_cpu(rd->node_crc), crc); + jffs2_mark_node_obsolete(c, ref); + jffs2_free_full_dirent(fd); + return 0; + } +#endif } fd->nhash = full_name_hash(NULL, fd->name, rd->nsize); diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 5f7e284e0df3..0b1a7f68b712 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -1078,7 +1078,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo memcpy(&fd->name, rd->name, checkedlen); fd->name[checkedlen] = 0; - crc = crc32(0, fd->name, rd->nsize); + crc = crc32(0, fd->name, checkedlen); if (crc != je32_to_cpu(rd->name_crc)) { pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", __func__, ofs, je32_to_cpu(rd->name_crc), crc); diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index be7c8a6a5748..4fe64519870f 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c @@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", je16_to_cpu(temp->u.nodetype)); jffs2_sum_disable_collecting(c->summary); + /* The above call removes the list, nothing more to do */ + goto bail_rwcompat; } else { BUG(); /* unknown node in summary information */ } @@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock c->summary->sum_num--; } + bail_rwcompat: jffs2_sum_reset_collected(c->summary); diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 0e6406c4f362..6839a61e8ff1 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root) if (opts->override_compr) seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr)); - if (opts->rp_size) + if (opts->set_rp_size) seq_printf(s, ",rp_size=%u", opts->rp_size / 1024); return 0; @@ -208,11 +208,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param) case Opt_rp_size: if (result.uint_32 > UINT_MAX / 1024) return invalf(fc, "jffs2: rp_size unrepresentable"); - opt = result.uint_32 * 1024; - if (opt > c->mtd->size) - return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB", - c->mtd->size / 1024); - c->mount_opts.rp_size = opt; + c->mount_opts.rp_size = result.uint_32 * 1024; + c->mount_opts.set_rp_size = true; break; default: return -EINVAL; @@ -221,11 +218,30 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param) return 0; } +static inline void jffs2_update_mount_opts(struct fs_context *fc) +{ + struct jffs2_sb_info *new_c = fc->s_fs_info; + struct jffs2_sb_info *c = JFFS2_SB_INFO(fc->root->d_sb); + + mutex_lock(&c->alloc_sem); + if (new_c->mount_opts.override_compr) { + c->mount_opts.override_compr = new_c->mount_opts.override_compr; + c->mount_opts.compr = new_c->mount_opts.compr; + } + if (new_c->mount_opts.set_rp_size) { + c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size; + c->mount_opts.rp_size = new_c->mount_opts.rp_size; + } + mutex_unlock(&c->alloc_sem); +} + static int jffs2_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; sync_filesystem(sb); + jffs2_update_mount_opts(fc); + return jffs2_do_remount_fs(sb, fc); } @@ -255,6 +271,10 @@ static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc) c->mtd = sb->s_mtd; c->os_priv = sb; + if (c->mount_opts.rp_size > c->mtd->size) + return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB", + c->mtd->size / 1024); + /* Initialize JFFS2 superblock locks, the further initialization will * be done later */ mutex_init(&c->alloc_sem); diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index caade185e568..6fe82ce8663e 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -1656,7 +1656,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen) } else if (rc == -ENOSPC) { /* search for next smaller log2 block */ l2nb = BLKSTOL2(nblocks) - 1; - nblocks = 1 << l2nb; + nblocks = 1LL << l2nb; } else { /* Trim any already allocated blocks */ jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n"); diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h index 29891fad3f09..aa03a904d5ab 100644 --- a/fs/jfs/jfs_dmap.h +++ b/fs/jfs/jfs_dmap.h @@ -183,7 +183,7 @@ typedef union dmtree { #define dmt_leafidx t1.leafidx #define dmt_height t1.height #define dmt_budmin t1.budmin -#define dmt_stree t1.stree +#define dmt_stree t2.stree /* * on-disk aggregate disk allocation map descriptor. diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h index 1e899298f7f0..b5d702df7111 100644 --- a/fs/jfs/jfs_filsys.h +++ b/fs/jfs/jfs_filsys.h @@ -268,5 +268,6 @@ * fsck() must be run to repair */ #define FM_EXTENDFS 0x00000008 /* file system extendfs() in progress */ +#define FM_STATE_MAX 0x0000000f /* max value of s_state */ #endif /* _H_JFS_FILSYS */ diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c index eb8b9e233d73..616de103dccc 100644 --- a/fs/jfs/jfs_mount.c +++ b/fs/jfs/jfs_mount.c @@ -36,6 +36,7 @@ #include <linux/fs.h> #include <linux/buffer_head.h> +#include <linux/log2.h> #include "jfs_incore.h" #include "jfs_filsys.h" @@ -365,6 +366,15 @@ static int chkSuper(struct super_block *sb) sbi->bsize = bsize; sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize); + /* check some fields for possible corruption */ + if (sbi->l2bsize != ilog2((u32)bsize) || + j_sb->pad != 0 || + le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) { + rc = -EINVAL; + jfs_err("jfs_mount: Mount Failure: superblock is corrupt!"); + goto out; + } + /* * For now, ignore s_pbsize, l2bfactor. All I/O going through buffer * cache. diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index e8c792b49616..c35bbaa19486 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -912,7 +912,7 @@ repeat: } fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, - &name, 0); + NULL, 0); iput(inode); } diff --git a/fs/libfs.c b/fs/libfs.c index 5fd9cc0e2ac9..247b58a68240 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -887,7 +887,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct simple_attr *attr; - u64 val; + unsigned long long val; size_t size; ssize_t ret; @@ -905,7 +905,9 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, goto out; attr->set_buf[size] = '\0'; - val = simple_strtoll(attr->set_buf, NULL, 0); + ret = kstrtoull(attr->set_buf, 0, &val); + if (ret) + goto out; ret = attr->set(attr->data, val); if (ret == 0) ret = len; /* on success, claim we got the whole input */ diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 7d46fafdbbe5..584c03e11844 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -439,12 +439,7 @@ nlm_bind_host(struct nlm_host *host) * RPC rebind is required */ if ((clnt = host->h_rpcclnt) != NULL) { - if (time_after_eq(jiffies, host->h_nextrebind)) { - rpc_force_rebind(clnt); - host->h_nextrebind = jiffies + NLM_HOST_REBIND; - dprintk("lockd: next rebind in %lu jiffies\n", - host->h_nextrebind - jiffies); - } + nlm_rebind_host(host); } else { unsigned long increment = nlmsvc_timeout; struct rpc_timeout timeparms = { @@ -493,13 +488,20 @@ nlm_bind_host(struct nlm_host *host) return clnt; } -/* - * Force a portmap lookup of the remote lockd port +/** + * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port + * @host: NLM host handle for peer + * + * This is not needed when using a connection-oriented protocol, such as TCP. + * The existing autobind mechanism is sufficient to force a rebind when + * required, e.g. on connection state transitions. */ void nlm_rebind_host(struct nlm_host *host) { - dprintk("lockd: rebind host %s\n", host->h_name); + if (host->h_proto != IPPROTO_UDP) + return; + if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { rpc_force_rebind(host->h_rpcclnt); host->h_nextrebind = jiffies + NLM_HOST_REBIND; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 7cb5fd38eb14..7b09a9158e40 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -150,6 +150,25 @@ static int minix_remount (struct super_block * sb, int * flags, char * data) return 0; } +static bool minix_check_superblock(struct super_block *sb) +{ + struct minix_sb_info *sbi = minix_sb(sb); + + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) + return false; + + /* + * s_max_size must not exceed the block mapping limitation. This check + * is only needed for V1 filesystems, since V2/V3 support an extra level + * of indirect blocks which places the limit well above U32_MAX. + */ + if (sbi->s_version == MINIX_V1 && + sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE) + return false; + + return true; +} + static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; @@ -185,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; - sbi->s_max_size = ms->s_max_size; + s->s_maxbytes = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; @@ -216,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; - sbi->s_max_size = m3s->s_max_size; + s->s_maxbytes = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; @@ -228,11 +247,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) } else goto out_no_fs; + if (!minix_check_superblock(s)) + goto out_illegal_sb; + /* * Allocate the buffer map to keep the superblock small. */ - if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) - goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) @@ -468,6 +488,13 @@ static struct inode *V1_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); @@ -501,6 +528,13 @@ static struct inode *V2_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c index 043c3fdbc8e7..446148792f41 100644 --- a/fs/minix/itree_common.c +++ b/fs/minix/itree_common.c @@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode, int n = 0; int i; int parent = minix_new_block(inode); + int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { @@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode, break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + minix_free_block(inode, nr); + err = -ENOMEM; + break; + } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; @@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode, bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); - return -ENOSPC; + return err; } static inline int splice_branch(struct inode *inode, diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c index 046cc96ee7ad..1fed906042aa 100644 --- a/fs/minix/itree_v1.c +++ b/fs/minix/itree_v1.c @@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); - } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, inode->i_sb->s_bdev); - } else if (block < 7) { + return 0; + } + if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) + return 0; + + if (block < 7) { offsets[n++] = block; } else if ((block -= 7) < 512) { offsets[n++] = 7; diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c index f7fc7ecccccc..9d00f31a2d9d 100644 --- a/fs/minix/itree_v2.c +++ b/fs/minix/itree_v2.c @@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, sb->s_bdev); - } else if ((u64)block * (u64)sb->s_blocksize >= - minix_sb(sb)->s_max_size) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, sb->s_bdev); - } else if (block < DIRCOUNT) { + return 0; + } + if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) + return 0; + + if (block < DIRCOUNT) { offsets[n++] = block; } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT; diff --git a/fs/minix/minix.h b/fs/minix/minix.h index df081e8afcc3..168d45d3de73 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -32,7 +32,6 @@ struct minix_sb_info { unsigned long s_zmap_blocks; unsigned long s_firstdatazone; unsigned long s_log_zone_size; - unsigned long s_max_size; int s_dirsize; int s_namelen; struct buffer_head ** s_imap; diff --git a/fs/namespace.c b/fs/namespace.c index 2adfe7b166a3..76ea92994d26 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -156,10 +156,10 @@ static inline void mnt_add_count(struct mount *mnt, int n) /* * vfsmount lock must be held for write */ -unsigned int mnt_get_count(struct mount *mnt) +int mnt_get_count(struct mount *mnt) { #ifdef CONFIG_SMP - unsigned int count = 0; + int count = 0; int cpu; for_each_possible_cpu(cpu) { @@ -1123,6 +1123,7 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); static void mntput_no_expire(struct mount *mnt) { LIST_HEAD(list); + int count; rcu_read_lock(); if (likely(READ_ONCE(mnt->mnt_ns))) { @@ -1146,7 +1147,9 @@ static void mntput_no_expire(struct mount *mnt) */ smp_mb(); mnt_add_count(mnt, -1); - if (mnt_get_count(mnt)) { + count = mnt_get_count(mnt); + if (count != 0) { + WARN_ON(count < 0); rcu_read_unlock(); unlock_mount_hash(); return; diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index e7dd07f47825..e84c187d942e 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -127,7 +127,7 @@ config PNFS_BLOCK config PNFS_FLEXFILE_LAYOUT tristate depends on NFS_V4_1 && NFS_V3 - default m + default NFS_V4 config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN string "NFSv4.1 Implementation ID Domain" diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index f39924ba050b..fc775b0b5194 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -130,6 +130,8 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { list_for_each_entry(lo, &server->layouts, plh_layouts) { + if (!pnfs_layout_is_valid(lo)) + continue; if (stateid != NULL && !nfs4_stateid_match_other(stateid, &lo->plh_stateid)) continue; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 05ed7be8a634..e7c0790308fe 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); do { + if (entry->label) + entry->label->len = NFS4_MAXLABELLEN; + status = xdr_decode(desc, entry, &stream); if (status != 0) { if (status == -EAGAIN) @@ -1070,6 +1073,15 @@ out_force: goto out; } +static void nfs_mark_dir_for_revalidate(struct inode *inode) +{ + struct nfs_inode *nfsi = NFS_I(inode); + + spin_lock(&inode->i_lock); + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE; + spin_unlock(&inode->i_lock); +} + /* * We judge how long we want to trust negative * dentries by looking at the parent inode mtime. @@ -1104,19 +1116,14 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry, __func__, dentry); return 1; case 0: - nfs_mark_for_revalidate(dir); - if (inode && S_ISDIR(inode->i_mode)) { - /* Purge readdir caches. */ - nfs_zap_caches(inode); - /* - * We can't d_drop the root of a disconnected tree: - * its d_hash is on the s_anon list and d_drop() would hide - * it from shrink_dcache_for_unmount(), leading to busy - * inodes on unmount and further oopses. - */ - if (IS_ROOT(dentry)) - return 1; - } + /* + * We can't d_drop the root of a disconnected tree: + * its d_hash is on the s_anon list and d_drop() would hide + * it from shrink_dcache_for_unmount(), leading to busy + * inodes on unmount and further oopses. + */ + if (inode && IS_ROOT(dentry)) + return 1; dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n", __func__, dentry); return 0; @@ -1185,6 +1192,13 @@ out: nfs_free_fattr(fattr); nfs_free_fhandle(fhandle); nfs4_label_free(label); + + /* + * If the lookup failed despite the dentry change attribute being + * a match, then we should revalidate the directory cache. + */ + if (!ret && nfs_verify_change_attribute(dir, dentry->d_time)) + nfs_mark_dir_for_revalidate(dir); return nfs_lookup_revalidate_done(dir, dentry, inode, ret); } @@ -1227,7 +1241,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, error = nfs_lookup_verify_inode(inode, flags); if (error) { if (error == -ESTALE) - nfs_zap_caches(dir); + nfs_mark_dir_for_revalidate(dir); goto out_bad; } nfs_advise_use_readdirplus(dir); @@ -1722,7 +1736,6 @@ out: dput(parent); return d; out_error: - nfs_mark_for_revalidate(dir); d = ERR_PTR(error); goto out; } diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 29f00da8a0b7..6b0bf4ebd812 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -571,6 +571,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { result = PTR_ERR(l_ctx); + nfs_direct_req_release(dreq); goto out_release; } dreq->l_ctx = l_ctx; @@ -989,6 +990,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { result = PTR_ERR(l_ctx); + nfs_direct_req_release(dreq); goto out_release; } dreq->l_ctx = l_ctx; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 95dc90570786..387a2cfa7e17 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -140,6 +140,7 @@ static int nfs_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); + errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id) return 0; /* Flush writes to the server and return any errors */ - return nfs_wb_all(inode); + since = filemap_sample_wb_err(file->f_mapping); + nfs_wb_all(inode); + return filemap_check_wb_err(file->f_mapping, since); } ssize_t @@ -580,12 +583,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = { .page_mkwrite = nfs_vm_page_mkwrite, }; -static int nfs_need_check_write(struct file *filp, struct inode *inode) +static int nfs_need_check_write(struct file *filp, struct inode *inode, + int error) { struct nfs_open_context *ctx; ctx = nfs_file_open_context(filp); - if (nfs_ctx_key_to_expire(ctx, inode)) + if (nfs_error_is_fatal_on_server(error) || + nfs_ctx_key_to_expire(ctx, inode)) return 1; return 0; } @@ -596,6 +601,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); unsigned long written = 0; ssize_t result; + errseq_t since; + int error; result = nfs_key_timeout_notify(file, inode); if (result) @@ -620,6 +627,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_pos > i_size_read(inode)) nfs_revalidate_mapping(inode, file->f_mapping); + since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); result = generic_write_checks(iocb, from); if (result > 0) { @@ -638,7 +646,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; /* Return error values */ - if (nfs_need_check_write(file, inode)) { + error = filemap_check_wb_err(file->f_mapping, since); + if (nfs_need_check_write(file, inode, error)) { int err = nfs_wb_all(inode); if (err < 0) result = err; diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 5657b7f2611f..1741d902b0d8 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -984,9 +984,8 @@ retry: goto out_mds; /* Use a direct mapping of ds_idx to pgio mirror_idx */ - if (WARN_ON_ONCE(pgio->pg_mirror_count != - FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) - goto out_mds; + if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)) + goto out_eagain; for (i = 0; i < pgio->pg_mirror_count; i++) { mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); @@ -1008,7 +1007,10 @@ retry: (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) pgio->pg_maxretrans = io_maxretrans; return; - +out_eagain: + pnfs_generic_pg_cleanup(pgio); + pgio->pg_error = -EAGAIN; + return; out_mds: trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode, 0, NFS4_MAX_UINT64, IOMODE_RW, @@ -1018,6 +1020,7 @@ out_mds: pgio->pg_lseg = NULL; pgio->pg_maxretrans = 0; nfs_pageio_reset_write_mds(pgio); + pgio->pg_error = -EAGAIN; } static unsigned int diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c index 15f271401dcc..573b1da9342c 100644 --- a/fs/nfs/fscache-index.c +++ b/fs/nfs/fscache-index.c @@ -84,8 +84,10 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data, return FSCACHE_CHECKAUX_OBSOLETE; memset(&auxdata, 0, sizeof(auxdata)); - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); + auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; + auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; + auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; + auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index a6dcc2151e77..7d6721ec31d4 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -188,7 +188,8 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int /* create a cache index for looking up filehandles */ nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache, &nfs_fscache_super_index_def, - key, sizeof(*key) + ulen, + &key->key, + sizeof(key->key) + ulen, NULL, 0, nfss, 0, true); dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n", @@ -226,6 +227,19 @@ void nfs_fscache_release_super_cookie(struct super_block *sb) } } +static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata, + struct nfs_inode *nfsi) +{ + memset(auxdata, 0, sizeof(*auxdata)); + auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; + auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; + auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; + auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; + + if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) + auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); +} + /* * Initialise the per-inode cache cookie pointer for an NFS inode. */ @@ -239,12 +253,7 @@ void nfs_fscache_init_inode(struct inode *inode) if (!(nfss->fscache && S_ISREG(inode->i_mode))) return; - memset(&auxdata, 0, sizeof(auxdata)); - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); - - if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) - auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); + nfs_fscache_update_auxdata(&auxdata, nfsi); nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache, &nfs_fscache_inode_object_def, @@ -264,9 +273,7 @@ void nfs_fscache_clear_inode(struct inode *inode) dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie); - memset(&auxdata, 0, sizeof(auxdata)); - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); + nfs_fscache_update_auxdata(&auxdata, nfsi); fscache_relinquish_cookie(cookie, &auxdata, false); nfsi->fscache = NULL; } @@ -306,9 +313,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp) if (!fscache_cookie_valid(cookie)) return; - memset(&auxdata, 0, sizeof(auxdata)); - auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); - auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); + nfs_fscache_update_auxdata(&auxdata, nfsi); if (inode_is_open_for_write(inode)) { dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index ad041cfbf9ec..6754c8607230 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -62,9 +62,11 @@ struct nfs_fscache_key { * cache object. */ struct nfs_fscache_inode_auxdata { - struct timespec mtime; - struct timespec ctime; - u64 change_attr; + s64 mtime_sec; + s64 mtime_nsec; + s64 ctime_sec; + s64 ctime_nsec; + u64 change_attr; }; /* diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 2a03bfeec10a..53604cc090ca 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -826,6 +826,8 @@ int nfs_getattr(const struct path *path, struct kstat *stat, do_update |= cache_validity & NFS_INO_INVALID_ATIME; if (request_mask & (STATX_CTIME|STATX_MTIME)) do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE; + if (request_mask & STATX_BLOCKS) + do_update |= cache_validity & NFS_INO_INVALID_BLOCKS; if (do_update) { /* Update the attribute cache */ if (!(server->flags & NFS_MOUNT_NOAC)) @@ -959,16 +961,16 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct file *filp) { struct nfs_open_context *ctx; - const struct cred *cred = get_current_cred(); ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - put_cred(cred); + if (!ctx) return ERR_PTR(-ENOMEM); - } nfs_sb_active(dentry->d_sb); ctx->dentry = dget(dentry); - ctx->cred = cred; + if (filp) + ctx->cred = get_cred(filp->f_cred); + else + ctx->cred = get_current_cred(); ctx->ll_cred = NULL; ctx->state = NULL; ctx->mode = f_mode; @@ -1750,7 +1752,8 @@ out_noforce: status = nfs_post_op_update_inode_locked(inode, fattr, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME - | NFS_INO_INVALID_MTIME); + | NFS_INO_INVALID_MTIME + | NFS_INO_INVALID_BLOCKS); return status; } @@ -1857,7 +1860,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ATIME | NFS_INO_REVAL_FORCED - | NFS_INO_REVAL_PAGECACHE); + | NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_BLOCKS); /* Do atomic weak cache consistency updates */ nfs_wcc_update_inode(inode, fattr); @@ -2019,8 +2023,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; - else + else { + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_BLOCKS + | NFS_INO_REVAL_FORCED); cache_revalidated = false; + } /* Update attrtimeo value if we're out of the unstable period */ if (attr_changed) { @@ -2143,7 +2151,7 @@ static int nfsiod_start(void) { struct workqueue_struct *wq; dprintk("RPC: creating workqueue nfsiod\n"); - wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); + wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (wq == NULL) return -ENOMEM; nfsiod_workqueue = wq; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 447a3c17fa8e..9e717796e57b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -569,12 +569,14 @@ extern void nfs4_test_session_trunk(struct rpc_clnt *clnt, static inline struct inode *nfs_igrab_and_active(struct inode *inode) { - inode = igrab(inode); - if (inode != NULL && !nfs_sb_active(inode->i_sb)) { - iput(inode); - inode = NULL; + struct super_block *sb = inode->i_sb; + + if (sb && nfs_sb_active(sb)) { + if (igrab(inode)) + return inode; + nfs_sb_deactive(sb); } - return inode; + return NULL; } static inline void nfs_iput_and_deactive(struct inode *inode) diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index cb7c10e9721e..a2593b787cc7 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -32,6 +32,7 @@ #define MNT_fhs_status_sz (1) #define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE) #define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE)) +#define MNT_fhandlev3_sz XDR_QUADLEN(NFS3_FHSIZE) #define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS) /* @@ -39,7 +40,7 @@ */ #define MNT_enc_dirpath_sz encode_dirpath_sz #define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz) -#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \ +#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandlev3_sz + \ MNT_authflav3_sz) /* diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 9287eb666322..2db17fdf516b 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -31,9 +31,9 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; /* * nfs_path - reconstruct the path given an arbitrary dentry * @base - used to return pointer to the end of devname part of path - * @dentry - pointer to dentry + * @dentry_in - pointer to dentry * @buffer - result buffer - * @buflen - length of buffer + * @buflen_in - length of buffer * @flags - options (see below) * * Helper function for constructing the server pathname @@ -48,15 +48,19 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; * the original device (export) name * (if unset, the original name is returned verbatim) */ -char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen, - unsigned flags) +char *nfs_path(char **p, struct dentry *dentry_in, char *buffer, + ssize_t buflen_in, unsigned flags) { char *end; int namelen; unsigned seq; const char *base; + struct dentry *dentry; + ssize_t buflen; rename_retry: + buflen = buflen_in; + dentry = dentry_in; end = buffer+buflen; *--end = '\0'; buflen--; diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index c5c3fc6e6c60..26c94b32d6f4 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -253,37 +253,45 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type) { - struct posix_acl *alloc = NULL, *dfacl = NULL; + struct posix_acl *orig = acl, *dfacl = NULL, *alloc; int status; if (S_ISDIR(inode->i_mode)) { switch(type) { case ACL_TYPE_ACCESS: - alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT); + alloc = get_acl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(alloc)) goto fail; + dfacl = alloc; break; case ACL_TYPE_DEFAULT: - dfacl = acl; - alloc = acl = get_acl(inode, ACL_TYPE_ACCESS); + alloc = get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(alloc)) goto fail; + dfacl = acl; + acl = alloc; break; } } if (acl == NULL) { - alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); + alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); if (IS_ERR(alloc)) goto fail; + acl = alloc; } status = __nfs3_proc_setacls(inode, acl, dfacl); - posix_acl_release(alloc); +out: + if (acl != orig) + posix_acl_release(acl); + if (dfacl != orig) + posix_acl_release(dfacl); return status; fail: - return PTR_ERR(alloc); + status = PTR_ERR(alloc); + goto out; } const struct xattr_handler *nfs3_xattr_handlers[] = { diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 1f60ab2535ee..23d75cddbb2e 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -35,6 +35,7 @@ */ #define NFS3_fhandle_sz (1+16) #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */ +#define NFS3_post_op_fh_sz (1+NFS3_fh_sz) #define NFS3_sattr_sz (15) #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2)) #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2)) @@ -72,7 +73,7 @@ #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+1) #define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+1) #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4) -#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) +#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz)) #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) #define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+1) diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index bb322d9de313..c4a98cbda6dd 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -570,6 +570,14 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0; } +static inline bool nfs4_stateid_is_next(const nfs4_stateid *s1, const nfs4_stateid *s2) +{ + u32 seq1 = be32_to_cpu(s1->seqid); + u32 seq2 = be32_to_cpu(s2->seqid); + + return seq2 == seq1 + 1U || (seq2 == 1U && seq1 == 0xffffffffU); +} + static inline void nfs4_stateid_seqid_inc(nfs4_stateid *s1) { u32 seqid = be32_to_cpu(s1->seqid); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 54f1c1f626fc..6b31cb5f9c9d 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -109,6 +109,7 @@ static int nfs4_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); + errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -124,7 +125,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id) return filemap_fdatawrite(file->f_mapping); /* Flush writes to the server and return any errors */ - return nfs_wb_all(inode); + since = filemap_sample_wb_err(file->f_mapping); + nfs_wb_all(inode); + return filemap_check_wb_err(file->f_mapping, since); } #ifdef CONFIG_NFS_V4_2 @@ -135,7 +138,8 @@ static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in, /* Only offload copy if superblock is the same */ if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) return -EXDEV; - if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY)) + if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY) || + !nfs_server_capable(file_inode(file_in), NFS_CAP_COPY)) return -EOPNOTSUPP; if (file_inode(file_in) == file_inode(file_out)) return -EOPNOTSUPP; @@ -210,6 +214,9 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off, if (remap_flags & ~REMAP_FILE_ADVISORY) return -EINVAL; + if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode)) + return -ETXTBSY; + /* check alignment w.r.t. clone_blksize */ ret = -EINVAL; if (bs) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6b29703d2fe1..304ab4cdaa8c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -774,6 +774,14 @@ static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, slot->seq_nr_last_acked = seqnr; } +static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, + struct nfs4_slot *slot) +{ + struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); + if (!IS_ERR(task)) + rpc_put_task_async(task); +} + static int nfs41_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) { @@ -790,6 +798,7 @@ static int nfs41_sequence_process(struct rpc_task *task, goto out; session = slot->table->session; + clp = session->clp; trace_nfs4_sequence_done(session, res); @@ -804,7 +813,6 @@ static int nfs41_sequence_process(struct rpc_task *task, nfs4_slot_sequence_acked(slot, slot->seq_nr); /* Update the slot's sequence and clientid lease timer */ slot->seq_done = 1; - clp = session->clp; do_renew_lease(clp, res->sr_timestamp); /* Check sequence flags */ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, @@ -852,10 +860,18 @@ static int nfs41_sequence_process(struct rpc_task *task, /* * Were one or more calls using this slot interrupted? * If the server never received the request, then our - * transmitted slot sequence number may be too high. + * transmitted slot sequence number may be too high. However, + * if the server did receive the request then it might + * accidentally give us a reply with a mismatched operation. + * We can sort this out by sending a lone sequence operation + * to the server on the same slot. */ if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { slot->seq_nr--; + if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { + nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); + res->sr_slot = NULL; + } goto retry_nowait; } /* @@ -1499,19 +1515,6 @@ static void nfs_state_log_update_open_stateid(struct nfs4_state *state) wake_up_all(&state->waitq); } -static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state, - const nfs4_stateid *stateid) -{ - u32 state_seqid = be32_to_cpu(state->open_stateid.seqid); - u32 stateid_seqid = be32_to_cpu(stateid->seqid); - - if (stateid_seqid == state_seqid + 1U || - (stateid_seqid == 1U && state_seqid == 0xffffffffU)) - nfs_state_log_update_open_stateid(state); - else - set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); -} - static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) { struct nfs_client *clp = state->owner->so_server->nfs_client; @@ -1537,21 +1540,19 @@ static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) * i.e. The stateid seqids have to be initialised to 1, and * are then incremented on every state transition. */ -static bool nfs_need_update_open_stateid(struct nfs4_state *state, +static bool nfs_stateid_is_sequential(struct nfs4_state *state, const nfs4_stateid *stateid) { - if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 || - !nfs4_stateid_match_other(stateid, &state->open_stateid)) { + if (test_bit(NFS_OPEN_STATE, &state->flags)) { + /* The common case - we're updating to a new sequence number */ + if (nfs4_stateid_match_other(stateid, &state->open_stateid) && + nfs4_stateid_is_next(&state->open_stateid, stateid)) { + return true; + } + } else { + /* This is the first OPEN in this generation */ if (stateid->seqid == cpu_to_be32(1)) - nfs_state_log_update_open_stateid(state); - else - set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); - return true; - } - - if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) { - nfs_state_log_out_of_order_open_stateid(state, stateid); - return true; + return true; } return false; } @@ -1625,16 +1626,16 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, int status = 0; for (;;) { - if (!nfs_need_update_open_stateid(state, stateid)) - return; - if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) + if (nfs_stateid_is_sequential(state, stateid)) break; + if (status) break; /* Rely on seqids for serialisation with NFSv4.0 */ if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) break; + set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); /* * Ensure we process the state changes in the same order @@ -1645,6 +1646,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, spin_unlock(&state->owner->so_lock); rcu_read_unlock(); trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); + if (!signal_pending(current)) { if (schedule_timeout(5*HZ) == 0) status = -EAGAIN; @@ -3241,8 +3243,10 @@ static int _nfs4_do_setattr(struct inode *inode, /* Servers should only apply open mode checks for file size changes */ truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; - if (!truncate) + if (!truncate) { + nfs4_inode_make_writeable(inode); goto zero_stateid; + } if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ @@ -3379,7 +3383,8 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, __be32 seqid_open; u32 dst_seqid; bool ret; - int seq; + int seq, status = -EAGAIN; + DEFINE_WAIT(wait); for (;;) { ret = false; @@ -3391,15 +3396,41 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, continue; break; } + + write_seqlock(&state->seqlock); seqid_open = state->open_stateid.seqid; - if (read_seqretry(&state->seqlock, seq)) - continue; dst_seqid = be32_to_cpu(dst->seqid); - if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0) - dst->seqid = cpu_to_be32(dst_seqid + 1); - else + + /* Did another OPEN bump the state's seqid? try again: */ + if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { dst->seqid = seqid_open; + write_sequnlock(&state->seqlock); + ret = true; + break; + } + + /* server says we're behind but we haven't seen the update yet */ + set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); + prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); + write_sequnlock(&state->seqlock); + trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); + + if (signal_pending(current)) + status = -EINTR; + else + if (schedule_timeout(5*HZ) != 0) + status = 0; + + finish_wait(&state->waitq, &wait); + + if (!status) + continue; + if (status == -EINTR) + break; + + /* we slept the whole 5 seconds, we must have lost a seqid */ + dst->seqid = cpu_to_be32(dst_seqid + 1); ret = true; break; } @@ -3457,10 +3488,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data) trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); /* Handle Layoutreturn errors */ - if (pnfs_roc_done(task, calldata->inode, - &calldata->arg.lr_args, - &calldata->res.lr_res, - &calldata->res.lr_ret) == -EAGAIN) + if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, + &calldata->res.lr_ret) == -EAGAIN) goto out_restart; /* hmm. we are done with the inode, and in the process of freeing @@ -4868,12 +4897,12 @@ static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred, u64 cookie, struct page **pages, unsigned int count, bool plus) { struct inode *dir = d_inode(dentry); + struct nfs_server *server = NFS_SERVER(dir); struct nfs4_readdir_arg args = { .fh = NFS_FH(dir), .pages = pages, .pgbase = 0, .count = count, - .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, .plus = plus, }; struct nfs4_readdir_res res; @@ -4888,9 +4917,15 @@ static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred, dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, dentry, (unsigned long long)cookie); + if (!(server->caps & NFS_CAP_SECURITY_LABEL)) + args.bitmask = server->attr_bitmask_nl; + else + args.bitmask = server->attr_bitmask; + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); res.pgbase = args.pgbase; - status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); + status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, + &res.seq_res, 0); if (status >= 0) { memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); status += args.pgbase; @@ -5719,6 +5754,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); int ret, i; + /* You can't remove system.nfs4_acl: */ + if (buflen == 0) + return -EINVAL; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; if (npages > ARRAY_SIZE(pages)) @@ -5795,9 +5833,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, return ret; if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) return -ENOENT; - if (buflen < label.len) - return -ERANGE; - return 0; + return label.len; } static int nfs4_get_security_label(struct inode *inode, void *buf, @@ -6203,10 +6239,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); /* Handle Layoutreturn errors */ - if (pnfs_roc_done(task, data->inode, - &data->args.lr_args, - &data->res.lr_res, - &data->res.lr_ret) == -EAGAIN) + if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, + &data->res.lr_ret) == -EAGAIN) goto out_restart; switch (task->tk_status) { @@ -6255,10 +6289,10 @@ static void nfs4_delegreturn_release(void *calldata) struct nfs4_delegreturndata *data = calldata; struct inode *inode = data->inode; + if (data->lr.roc) + pnfs_roc_release(&data->lr.arg, &data->lr.res, + data->res.lr_ret); if (inode) { - if (data->lr.roc) - pnfs_roc_release(&data->lr.arg, &data->lr.res, - data->res.lr_ret); nfs_post_op_update_inode_force_wcc(inode, &data->fattr); nfs_iput_and_deactive(inode); } @@ -6333,16 +6367,14 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, nfs_fattr_init(data->res.fattr); data->timestamp = jiffies; data->rpc_status = 0; - data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred); data->inode = nfs_igrab_and_active(inode); - if (data->inode) { + if (data->inode || issync) { + data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, + cred); if (data->lr.roc) { data->args.lr_args = &data->lr.arg; data->res.lr_res = &data->lr.res; } - } else if (data->lr.roc) { - pnfs_roc_release(&data->lr.arg, &data->lr.res, 0); - data->lr.roc = false; } task_setup_data.callback_data = data; @@ -6924,9 +6956,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f data->arg.new_lock_owner, ret); } else data->cancelled = true; + trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); rpc_put_task(task); dprintk("%s: done, ret = %d!\n", __func__, ret); - trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); return ret; } @@ -7218,7 +7250,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, err = nfs4_set_lock_state(state, fl); if (err != 0) return err; - err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + do { + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + if (err != -NFS4ERR_DELAY) + break; + ssleep(1); + } while (err == -NFS4ERR_DELAY); return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); } @@ -7825,9 +7862,11 @@ int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or * DS flags set. */ -static int nfs4_check_cl_exchange_flags(u32 flags) +static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) { - if (flags & ~EXCHGID4_FLAG_MASK_R) + if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) + goto out_inval; + else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) goto out_inval; if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && (flags & EXCHGID4_FLAG_USE_NON_PNFS)) @@ -7852,6 +7891,7 @@ static void nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) { struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; + struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; struct nfs_client *clp = args->client; switch (task->tk_status) { @@ -7860,10 +7900,16 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); } + if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && + res->dir != NFS4_CDFS4_BOTH) { + rpc_task_close_connection(task); + if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) + rpc_restart_call(task); + } } static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { - .rpc_call_done = &nfs4_bind_one_conn_to_session_done, + .rpc_call_done = nfs4_bind_one_conn_to_session_done, }; /* @@ -7882,6 +7928,7 @@ int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, struct nfs41_bind_conn_to_session_args args = { .client = clp, .dir = NFS4_CDFC4_FORE_OR_BOTH, + .retries = 0, }; struct nfs41_bind_conn_to_session_res res; struct rpc_message msg = { @@ -8232,7 +8279,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre if (status != 0) goto out; - status = nfs4_check_cl_exchange_flags(resp->flags); + status = nfs4_check_cl_exchange_flags(resp->flags, + clp->cl_mvops->minor_version); if (status != 0) goto out; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index b53bcf40e2a7..ea680f619438 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -733,9 +733,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) state = new; state->owner = owner; atomic_inc(&owner->so_count); - list_add_rcu(&state->inode_states, &nfsi->open_states); ihold(inode); state->inode = inode; + list_add_rcu(&state->inode_states, &nfsi->open_states); spin_unlock(&inode->i_lock); /* Note: The reclaim code dictates that we add stateless * and read-only stateids to the end of the list */ diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 04c57066a11a..b90642b022eb 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -96,7 +96,7 @@ static void nfs4_evict_inode(struct inode *inode) nfs_inode_return_delegation_noreclaim(inode); /* Note that above delegreturn would trigger pnfs return-on-close */ pnfs_return_layout(inode); - pnfs_destroy_layout(NFS_I(inode)); + pnfs_destroy_layout_final(NFS_I(inode)); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); } diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 9398c0b6e0a3..2295a934a154 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -1291,6 +1291,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait); +DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait); DECLARE_EVENT_CLASS(nfs4_getattr_event, TP_PROTO( diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 7c0ff1a3b591..9a022a4fb964 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3012,15 +3012,19 @@ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; + uint32_t replen; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); + + replen = hdr.replen + op_decode_hdr_maxsz; + encode_getdeviceinfo(xdr, args, &hdr); - /* set up reply kvec. Subtract notification bitmap max size (2) - * so that notification bitmap is put in xdr_buf tail */ + /* set up reply kvec. device_addr4 opaque data is read into the + * pages */ rpc_prepare_reply_pages(req, args->pdev->pages, args->pdev->pgbase, - args->pdev->pglen, hdr.replen - 2); + args->pdev->pglen, replen + 2 + 1); encode_nops(&hdr); } @@ -4169,7 +4173,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, return -EIO; if (len < NFS4_MAXLABELLEN) { if (label) { - memcpy(label->label, p, len); + if (label->len) { + if (label->len < len) + return -ERANGE; + memcpy(label->label, p, len); + } label->len = len; label->pi = pi; label->lfs = lfs; diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 361cc10d6f95..c8081d2b4166 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -1147,7 +1147,12 @@ TRACE_EVENT(nfs_xdr_status, __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) + __field(int, version) __field(unsigned long, error) + __string(program, + xdr->rqst->rq_task->tk_client->cl_program->name) + __string(procedure, + xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) ), TP_fast_assign( @@ -1157,13 +1162,19 @@ TRACE_EVENT(nfs_xdr_status, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->xid = be32_to_cpu(rqstp->rq_xid); + __entry->version = task->tk_client->cl_vers; __entry->error = error; + __assign_str(program, + task->tk_client->cl_program->name) + __assign_str(procedure, task->tk_msg.rpc_proc->p_name) ), TP_printk( - "task:%u@%d xid=0x%08x error=%ld (%s)", + "task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)", __entry->task_id, __entry->client_id, __entry->xid, - -__entry->error, nfs_show_status(__entry->error) + __get_str(program), __entry->version, + __get_str(procedure), -__entry->error, + nfs_show_status(__entry->error) ) ); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 20b3717cd7ca..f4407dd426bf 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -132,9 +132,44 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) } EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); +/* + * nfs_page_set_headlock - set the request PG_HEADLOCK + * @req: request that is to be locked + * + * this lock must be held when modifying req->wb_head + * + * return 0 on success, < 0 on error + */ +int +nfs_page_set_headlock(struct nfs_page *req) +{ + if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) + return 0; + + set_bit(PG_CONTENDED1, &req->wb_flags); + smp_mb__after_atomic(); + return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, + TASK_UNINTERRUPTIBLE); +} + +/* + * nfs_page_clear_headlock - clear the request PG_HEADLOCK + * @req: request that is to be locked + */ +void +nfs_page_clear_headlock(struct nfs_page *req) +{ + smp_mb__before_atomic(); + clear_bit(PG_HEADLOCK, &req->wb_flags); + smp_mb__after_atomic(); + if (!test_bit(PG_CONTENDED1, &req->wb_flags)) + return; + wake_up_bit(&req->wb_flags, PG_HEADLOCK); +} + /* * nfs_page_group_lock - lock the head of the page group - * @req - request in group that is to be locked + * @req: request in group that is to be locked * * this lock must be held when traversing or modifying the page * group list @@ -144,36 +179,24 @@ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); int nfs_page_group_lock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; + int ret; - WARN_ON_ONCE(head != head->wb_head); - - if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) - return 0; - - set_bit(PG_CONTENDED1, &head->wb_flags); - smp_mb__after_atomic(); - return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, - TASK_UNINTERRUPTIBLE); + ret = nfs_page_set_headlock(req); + if (ret || req->wb_head == req) + return ret; + return nfs_page_set_headlock(req->wb_head); } /* * nfs_page_group_unlock - unlock the head of the page group - * @req - request in group that is to be unlocked + * @req: request in group that is to be unlocked */ void nfs_page_group_unlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - - smp_mb__before_atomic(); - clear_bit(PG_HEADLOCK, &head->wb_flags); - smp_mb__after_atomic(); - if (!test_bit(PG_CONTENDED1, &head->wb_flags)) - return; - wake_up_bit(&head->wb_flags, PG_HEADLOCK); + if (req != req->wb_head) + nfs_page_clear_headlock(req->wb_head); + nfs_page_clear_headlock(req); } /* @@ -886,15 +909,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, pgio->pg_mirror_count = mirror_count; } -/* - * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) - */ -void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) -{ - pgio->pg_mirror_count = 1; - pgio->pg_mirror_idx = 0; -} - static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) { pgio->pg_mirror_count = 1; @@ -1177,38 +1191,38 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, if (desc->pg_error < 0) goto out_failed; - for (midx = 0; midx < desc->pg_mirror_count; midx++) { - if (midx) { - nfs_page_group_lock(req); + /* Create the mirror instances first, and fire them off */ + for (midx = 1; midx < desc->pg_mirror_count; midx++) { + nfs_page_group_lock(req); - /* find the last request */ - for (lastreq = req->wb_head; - lastreq->wb_this_page != req->wb_head; - lastreq = lastreq->wb_this_page) - ; + /* find the last request */ + for (lastreq = req->wb_head; + lastreq->wb_this_page != req->wb_head; + lastreq = lastreq->wb_this_page) + ; - dupreq = nfs_create_subreq(req, lastreq, - pgbase, offset, bytes); + dupreq = nfs_create_subreq(req, lastreq, + pgbase, offset, bytes); - nfs_page_group_unlock(req); - if (IS_ERR(dupreq)) { - desc->pg_error = PTR_ERR(dupreq); - goto out_failed; - } - } else - dupreq = req; + nfs_page_group_unlock(req); + if (IS_ERR(dupreq)) { + desc->pg_error = PTR_ERR(dupreq); + goto out_failed; + } - if (nfs_pgio_has_mirroring(desc)) - desc->pg_mirror_idx = midx; + desc->pg_mirror_idx = midx; if (!nfs_pageio_add_request_mirror(desc, dupreq)) goto out_cleanup_subreq; } + desc->pg_mirror_idx = 0; + if (!nfs_pageio_add_request_mirror(desc, req)) + goto out_failed; + return 1; out_cleanup_subreq: - if (req != dupreq) - nfs_pageio_cleanup_request(desc, dupreq); + nfs_pageio_cleanup_request(desc, dupreq); out_failed: nfs_pageio_error_cleanup(desc); return 0; @@ -1320,6 +1334,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) } } +/* + * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) + */ +void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) +{ + nfs_pageio_complete(pgio); +} + int __init nfs_init_nfspagecache(void) { nfs_page_cachep = kmem_cache_create("nfs_page", diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 443639cbb0cf..7e8c18218e68 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -294,6 +294,7 @@ void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) { struct inode *inode; + unsigned long i_state; if (!lo) return; @@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) if (!list_empty(&lo->plh_segs)) WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); pnfs_detach_layout_hdr(lo); + i_state = inode->i_state; spin_unlock(&inode->i_lock); pnfs_free_layout_hdr(lo); + /* Notify pnfs_destroy_layout_final() that we're done */ + if (i_state & (I_FREEING | I_CLEAR)) + wake_up_var(lo); } } @@ -723,8 +728,7 @@ pnfs_free_lseg_list(struct list_head *free_me) } } -void -pnfs_destroy_layout(struct nfs_inode *nfsi) +static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi) { struct pnfs_layout_hdr *lo; LIST_HEAD(tmp_list); @@ -742,9 +746,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi) pnfs_put_layout_hdr(lo); } else spin_unlock(&nfsi->vfs_inode.i_lock); + return lo; +} + +void pnfs_destroy_layout(struct nfs_inode *nfsi) +{ + __pnfs_destroy_layout(nfsi); } EXPORT_SYMBOL_GPL(pnfs_destroy_layout); +static bool pnfs_layout_removed(struct nfs_inode *nfsi, + struct pnfs_layout_hdr *lo) +{ + bool ret; + + spin_lock(&nfsi->vfs_inode.i_lock); + ret = nfsi->layout != lo; + spin_unlock(&nfsi->vfs_inode.i_lock); + return ret; +} + +void pnfs_destroy_layout_final(struct nfs_inode *nfsi) +{ + struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi); + + if (lo) + wait_var_event(lo, pnfs_layout_removed(nfsi, lo)); +} + static bool pnfs_layout_add_bulk_destroy_list(struct inode *inode, struct list_head *layout_list) @@ -1198,31 +1227,27 @@ out: return status; } +static bool +pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo, + enum pnfs_iomode iomode, + u32 seq) +{ + struct pnfs_layout_range recall_range = { + .length = NFS4_MAX_UINT64, + .iomode = iomode, + }; + return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, + &recall_range, seq) != -EBUSY; +} + /* Return true if layoutreturn is needed */ static bool pnfs_layout_need_return(struct pnfs_layout_hdr *lo) { - struct pnfs_layout_segment *s; - enum pnfs_iomode iomode; - u32 seq; - if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) return false; - - seq = lo->plh_return_seq; - iomode = lo->plh_return_iomode; - - /* Defer layoutreturn until all recalled lsegs are done */ - list_for_each_entry(s, &lo->plh_segs, pls_list) { - if (seq && pnfs_seqid_is_newer(s->pls_seq, seq)) - continue; - if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode) - continue; - if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) - return false; - } - - return true; + return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode, + lo->plh_return_seq); } static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) @@ -1286,7 +1311,7 @@ _pnfs_return_layout(struct inode *ino) } valid_layout = pnfs_layout_is_valid(lo); pnfs_clear_layoutcommit(ino, &tmp_list); - pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0); + pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0); if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { struct pnfs_layout_range range = { @@ -1450,10 +1475,8 @@ out_noroc: return false; } -int pnfs_roc_done(struct rpc_task *task, struct inode *inode, - struct nfs4_layoutreturn_args **argpp, - struct nfs4_layoutreturn_res **respp, - int *ret) +int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp, + struct nfs4_layoutreturn_res **respp, int *ret) { struct nfs4_layoutreturn_args *arg = *argpp; int retval = -EAGAIN; @@ -1486,7 +1509,7 @@ int pnfs_roc_done(struct rpc_task *task, struct inode *inode, return 0; case -NFS4ERR_OLD_STATEID: if (!nfs4_layout_refresh_old_stateid(&arg->stateid, - &arg->range, inode)) + &arg->range, arg->inode)) break; *ret = -NFS4ERR_NOMATCHING_LAYOUT; return -EAGAIN; @@ -1501,12 +1524,18 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args, int ret) { struct pnfs_layout_hdr *lo = args->layout; + struct inode *inode = args->inode; const nfs4_stateid *arg_stateid = NULL; const nfs4_stateid *res_stateid = NULL; struct nfs4_xdr_opaque_data *ld_private = args->ld_private; switch (ret) { case -NFS4ERR_NOMATCHING_LAYOUT: + spin_lock(&inode->i_lock); + if (pnfs_layout_is_valid(lo) && + nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid)) + pnfs_set_plh_return_info(lo, args->range.iomode, 0); + spin_unlock(&inode->i_lock); break; case 0: if (res->lrs_present) @@ -1957,6 +1986,27 @@ lookup_again: goto lookup_again; } + /* + * Because we free lsegs when sending LAYOUTRETURN, we need to wait + * for LAYOUTRETURN. + */ + if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { + spin_unlock(&ino->i_lock); + dprintk("%s wait for layoutreturn\n", __func__); + lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo)); + if (!IS_ERR(lseg)) { + pnfs_put_layout_hdr(lo); + dprintk("%s retrying\n", __func__); + trace_pnfs_update_layout(ino, pos, count, iomode, lo, + lseg, + PNFS_UPDATE_LAYOUT_RETRY); + goto lookup_again; + } + trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, + PNFS_UPDATE_LAYOUT_RETURN); + goto out_put_layout_hdr; + } + lseg = pnfs_find_lseg(lo, &arg, strict_iomode); if (lseg) { trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, @@ -2010,28 +2060,6 @@ lookup_again: nfs4_stateid_copy(&stateid, &lo->plh_stateid); } - /* - * Because we free lsegs before sending LAYOUTRETURN, we need to wait - * for LAYOUTRETURN even if first is true. - */ - if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { - spin_unlock(&ino->i_lock); - dprintk("%s wait for layoutreturn\n", __func__); - lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo)); - if (!IS_ERR(lseg)) { - if (first) - pnfs_clear_first_layoutget(lo); - pnfs_put_layout_hdr(lo); - dprintk("%s retrying\n", __func__); - trace_pnfs_update_layout(ino, pos, count, iomode, lo, - lseg, PNFS_UPDATE_LAYOUT_RETRY); - goto lookup_again; - } - trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, - PNFS_UPDATE_LAYOUT_RETURN); - goto out_put_layout_hdr; - } - if (pnfs_layoutgets_blocked(lo)) { trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, PNFS_UPDATE_LAYOUT_BLOCKED); @@ -2187,6 +2215,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data, &rng, GFP_KERNEL); if (!lgp) { pnfs_clear_first_layoutget(lo); + nfs_layoutget_end(lo); pnfs_put_layout_hdr(lo); return; } @@ -2340,7 +2369,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) * We got an entirely new state ID. Mark all segments for the * inode invalid, and retry the layoutget */ - pnfs_mark_layout_stateid_invalid(lo, &free_me); + struct pnfs_layout_range range = { + .iomode = IOMODE_ANY, + .length = NFS4_MAX_UINT64, + }; + pnfs_set_plh_return_info(lo, IOMODE_ANY, 0); + pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, + &range, 0); goto out_forget; } @@ -2359,19 +2394,10 @@ out_forget: spin_unlock(&ino->i_lock); lseg->pls_layout = lo; NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); + pnfs_free_lseg_list(&free_me); return ERR_PTR(-EAGAIN); } -static int -mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg, - struct list_head *tmp_list) -{ - if (!mark_lseg_invalid(lseg, tmp_list)) - return 0; - pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg); - return 1; -} - /** * pnfs_mark_matching_lsegs_return - Free or return matching layout segments * @lo: pointer to layout header @@ -2401,6 +2427,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, assert_spin_locked(&lo->plh_inode->i_lock); + if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) + tmp_list = &lo->plh_return_segs; + list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) if (pnfs_match_lseg_recall(lseg, return_range, seq)) { dprintk("%s: marking lseg %p iomode %d " @@ -2408,7 +2437,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); - if (mark_lseg_invalid_or_return(lseg, tmp_list)) + if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) + tmp_list = &lo->plh_return_segs; + if (mark_lseg_invalid(lseg, tmp_list)) continue; remaining++; set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index f8a38065c7e4..3d55edd6b25a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -255,6 +255,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp); void pnfs_layoutget_free(struct nfs4_layoutget *lgp); void pnfs_free_lseg_list(struct list_head *tmp_list); void pnfs_destroy_layout(struct nfs_inode *); +void pnfs_destroy_layout_final(struct nfs_inode *); void pnfs_destroy_all_layouts(struct nfs_client *); int pnfs_destroy_layouts_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid, @@ -282,10 +283,8 @@ bool pnfs_roc(struct inode *ino, struct nfs4_layoutreturn_args *args, struct nfs4_layoutreturn_res *res, const struct cred *cred); -int pnfs_roc_done(struct rpc_task *task, struct inode *inode, - struct nfs4_layoutreturn_args **argpp, - struct nfs4_layoutreturn_res **respp, - int *ret); +int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp, + struct nfs4_layoutreturn_res **respp, int *ret); void pnfs_roc_release(struct nfs4_layoutreturn_args *args, struct nfs4_layoutreturn_res *res, int ret); @@ -651,6 +650,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi) { } +static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi) +{ +} + static inline struct pnfs_layout_segment * pnfs_get_lseg(struct pnfs_layout_segment *lseg) { @@ -706,7 +709,7 @@ pnfs_roc(struct inode *ino, } static inline int -pnfs_roc_done(struct rpc_task *task, struct inode *inode, +pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp, struct nfs4_layoutreturn_res **respp, int *ret) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 913eb37c249b..613c3ef23e07 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -425,22 +425,29 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, destroy_list = (subreq->wb_this_page == old_head) ? NULL : subreq->wb_this_page; + /* Note: lock subreq in order to change subreq->wb_head */ + nfs_page_set_headlock(subreq); WARN_ON_ONCE(old_head != subreq->wb_head); /* make sure old group is not used */ subreq->wb_this_page = subreq; + subreq->wb_head = subreq; clear_bit(PG_REMOVE, &subreq->wb_flags); /* Note: races with nfs_page_group_destroy() */ if (!kref_read(&subreq->wb_kref)) { /* Check if we raced with nfs_page_group_destroy() */ - if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) + if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { + nfs_page_clear_headlock(subreq); nfs_free_request(subreq); + } else + nfs_page_clear_headlock(subreq); continue; } + nfs_page_clear_headlock(subreq); - subreq->wb_head = subreq; + nfs_release_request(old_head); if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { nfs_release_request(subreq); diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c index b73d9dd37f73..26f2a50eceac 100644 --- a/fs/nfs_common/grace.c +++ b/fs/nfs_common/grace.c @@ -69,10 +69,14 @@ __state_in_grace(struct net *net, bool open) if (!open) return !list_empty(grace_list); + spin_lock(&grace_lock); list_for_each_entry(lm, grace_list, list) { - if (lm->block_opens) + if (lm->block_opens) { + spin_unlock(&grace_lock); return true; + } } + spin_unlock(&grace_lock); return false; } diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index f2f81561ebb6..4d6e71335bce 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -73,6 +73,7 @@ config NFSD_V4 select NFSD_V3 select FS_POSIX_ACL select SUNRPC_GSS + select CRYPTO select CRYPTO_MD5 select CRYPTO_SHA256 select GRACE_PERIOD diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h index 10ec5ecdf117..65c331f75e9c 100644 --- a/fs/nfsd/cache.h +++ b/fs/nfsd/cache.h @@ -78,6 +78,8 @@ enum { /* Checksum this amount of the request */ #define RC_CSUMLEN (256U) +int nfsd_drc_slab_create(void); +void nfsd_drc_slab_free(void); int nfsd_reply_cache_init(struct nfsd_net *); void nfsd_reply_cache_shutdown(struct nfsd_net *); int nfsd_cache_lookup(struct svc_rqst *); diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 3007b8945d38..662937472e9b 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -133,9 +133,13 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf) struct nfsd_file_mark, nfm_mark)); mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex); - fsnotify_put_mark(mark); - if (likely(nfm)) + if (nfm) { + fsnotify_put_mark(mark); break; + } + /* Avoid soft lockup race with nfsd_file_mark_put() */ + fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group); + fsnotify_put_mark(mark); } else mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex); @@ -747,6 +751,8 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags, continue; if (!nfsd_match_cred(nf->nf_cred, current_cred())) continue; + if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) + continue; if (nfsd_file_get(nf) != NULL) return nf; } diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 9a4ef815fb8c..ed53e206a299 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -139,7 +139,6 @@ struct nfsd_net { * Duplicate reply cache */ struct nfsd_drc_bucket *drc_hashtbl; - struct kmem_cache *drc_slab; /* max number of entries allowed in the cache */ unsigned int max_drc_entries; diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 86e5658651f1..8f077e66e613 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -857,9 +857,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, if (isdotent(name, namlen)) { if (namlen == 2) { dchild = dget_parent(dparent); - /* filesystem root - cannot return filehandle for ".." */ + /* + * Don't return filehandle for ".." if we're at + * the filesystem or export root: + */ if (dchild == dparent) goto out; + if (dparent == exp->ex_path.dentry) + goto out; } else dchild = dget(dparent); } else diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 524111420b48..3c50d18fe8a9 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1121,6 +1121,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) switch (task->tk_status) { case -EIO: case -ETIMEDOUT: + case -EACCES: nfsd4_mark_cb_down(clp, task->tk_status); } break; @@ -1230,6 +1231,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) err = setup_callback_client(clp, &conn, ses); if (err) { nfsd4_mark_cb_down(clp, err); + if (c) + svc_xprt_put(c->cn_xprt); return; } } @@ -1241,6 +1244,7 @@ nfsd4_run_cb_work(struct work_struct *work) container_of(work, struct nfsd4_callback, cb_work); struct nfs4_client *clp = cb->cb_clp; struct rpc_clnt *clnt; + int flags; if (cb->cb_need_restart) { cb->cb_need_restart = false; @@ -1269,7 +1273,8 @@ nfsd4_run_cb_work(struct work_struct *work) } cb->cb_msg.rpc_cred = clp->cl_cb_cred; - rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, + flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN; + rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags, cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb); } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1c82d7dd54df..8cb2f744dde6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -266,6 +266,8 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, if (!nbl) { nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); if (nbl) { + INIT_LIST_HEAD(&nbl->nbl_list); + INIT_LIST_HEAD(&nbl->nbl_lru); fh_copy_shallow(&nbl->nbl_fh, fh); locks_init_lock(&nbl->nbl_lock); nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, @@ -493,6 +495,8 @@ find_any_file(struct nfs4_file *f) { struct nfsd_file *ret; + if (!f) + return NULL; spin_lock(&f->fi_lock); ret = __nfs4_get_fd(f, O_RDWR); if (!ret) { @@ -504,6 +508,17 @@ find_any_file(struct nfs4_file *f) return ret; } +static struct nfsd_file *find_deleg_file(struct nfs4_file *f) +{ + struct nfsd_file *ret = NULL; + + spin_lock(&f->fi_lock); + if (f->fi_deleg_file) + ret = nfsd_file_get(f->fi_deleg_file); + spin_unlock(&f->fi_lock); + return ret; +} + static atomic_long_t num_delegations; unsigned long max_delegations; @@ -1260,6 +1275,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) nfs4_free_stateowner(sop); } +static bool +nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) +{ + return list_empty(&stp->st_perfile); +} + static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_file *fp = stp->st_stid.sc_file; @@ -1330,9 +1351,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) { lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); + if (!unhash_ol_stateid(stp)) + return false; list_del_init(&stp->st_locks); nfs4_unhash_stid(&stp->st_stid); - return unhash_ol_stateid(stp); + return true; } static void release_lock_stateid(struct nfs4_ol_stateid *stp) @@ -1397,13 +1420,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { - bool unhashed; - lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); - unhashed = unhash_ol_stateid(stp); + if (!unhash_ol_stateid(stp)) + return false; release_open_stateid_locks(stp, reaplist); - return unhashed; + return true; } static void release_open_stateid(struct nfs4_ol_stateid *stp) @@ -2376,6 +2398,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid); @@ -2409,6 +2433,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid); @@ -2437,7 +2463,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) ds = delegstateid(st); nf = st->sc_file; - file = nf->fi_deleg_file; + file = find_deleg_file(nf); + if (!file) + return 0; seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid); @@ -2449,6 +2477,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) nfs4_show_superblock(s, file); seq_printf(s, " }\n"); + nfsd_file_put(file); return 0; } @@ -6247,21 +6276,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, } static struct nfs4_ol_stateid * -find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) +find_lock_stateid(const struct nfs4_lockowner *lo, + const struct nfs4_ol_stateid *ost) { struct nfs4_ol_stateid *lst; - struct nfs4_client *clp = lo->lo_owner.so_client; - lockdep_assert_held(&clp->cl_lock); + lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); - list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { - if (lst->st_stid.sc_type != NFS4_LOCK_STID) - continue; - if (lst->st_stid.sc_file == fp) { - refcount_inc(&lst->st_stid.sc_count); - return lst; + /* If ost is not hashed, ost->st_locks will not be valid */ + if (!nfs4_ol_stateid_unhashed(ost)) + list_for_each_entry(lst, &ost->st_locks, st_locks) { + if (lst->st_stateowner == &lo->lo_owner) { + refcount_inc(&lst->st_stid.sc_count); + return lst; + } } - } return NULL; } @@ -6277,11 +6306,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); retry: spin_lock(&clp->cl_lock); - spin_lock(&fp->fi_lock); - retstp = find_lock_stateid(lo, fp); + if (nfs4_ol_stateid_unhashed(open_stp)) + goto out_close; + retstp = find_lock_stateid(lo, open_stp); if (retstp) - goto out_unlock; - + goto out_found; refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_LOCK_STID; stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); @@ -6290,22 +6319,26 @@ retry: stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; + spin_lock(&fp->fi_lock); list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); -out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&clp->cl_lock); - if (retstp) { - if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { - nfs4_put_stid(&retstp->st_stid); - goto retry; - } - /* To keep mutex tracking happy */ - mutex_unlock(&stp->st_mutex); - stp = retstp; - } return stp; +out_found: + spin_unlock(&clp->cl_lock); + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { + nfs4_put_stid(&retstp->st_stid); + goto retry; + } + /* To keep mutex tracking happy */ + mutex_unlock(&stp->st_mutex); + return retstp; +out_close: + spin_unlock(&clp->cl_lock); + mutex_unlock(&stp->st_mutex); + return NULL; } static struct nfs4_ol_stateid * @@ -6320,7 +6353,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, *new = false; spin_lock(&clp->cl_lock); - lst = find_lock_stateid(lo, fi); + lst = find_lock_stateid(lo, ost); spin_unlock(&clp->cl_lock); if (lst != NULL) { if (nfsd4_lock_ol_stateid(lst) == nfs_ok) @@ -7703,9 +7736,14 @@ nfs4_state_start_net(struct net *net) struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret; - ret = nfs4_state_create_net(net); + ret = get_nfsdfs(net); if (ret) return ret; + ret = nfs4_state_create_net(net); + if (ret) { + mntput(nn->nfsd_mnt); + return ret; + } locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) @@ -7774,6 +7812,7 @@ nfs4_state_shutdown_net(struct net *net) nfsd4_client_tracking_exit(net); nfs4_state_destroy_net(net); + mntput(nn->nfsd_mnt); } void diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 533d0fc3c96b..d6f244559e75 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3530,17 +3530,17 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, u32 zzz = 0; int pad; + /* + * svcrdma requires every READ payload to start somewhere + * in xdr->pages. + */ + if (xdr->iov == xdr->buf->head) { + xdr->iov = NULL; + xdr->end = xdr->p; + } + len = maxcount; v = 0; - - thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p)); - p = xdr_reserve_space(xdr, (thislen+3)&~3); - WARN_ON_ONCE(!p); - resp->rqstp->rq_vec[v].iov_base = p; - resp->rqstp->rq_vec[v].iov_len = thislen; - v++; - len -= thislen; - while (len) { thislen = min_t(long, len, PAGE_SIZE); p = xdr_reserve_space(xdr, (thislen+3)&~3); @@ -3559,6 +3559,8 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, read->rd_length = maxcount; if (nfserr) return nfserr; + if (svc_encode_read_payload(resp->rqstp, starting_len + 8, maxcount)) + return nfserr_io; xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3)); tmp = htonl(eof); diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 96352ab7bd81..4a258065188e 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -36,6 +36,8 @@ struct nfsd_drc_bucket { spinlock_t cache_lock; }; +static struct kmem_cache *drc_slab; + static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc); @@ -95,7 +97,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, { struct svc_cacherep *rp; - rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); + rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); if (rp) { rp->c_state = RC_UNUSED; rp->c_type = RC_NOCACHE; @@ -129,7 +131,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, atomic_dec(&nn->num_drc_entries); nn->drc_mem_usage -= sizeof(*rp); } - kmem_cache_free(nn->drc_slab, rp); + kmem_cache_free(drc_slab, rp); } static void @@ -141,6 +143,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, spin_unlock(&b->cache_lock); } +int nfsd_drc_slab_create(void) +{ + drc_slab = kmem_cache_create("nfsd_drc", + sizeof(struct svc_cacherep), 0, 0, NULL); + return drc_slab ? 0: -ENOMEM; +} + +void nfsd_drc_slab_free(void) +{ + kmem_cache_destroy(drc_slab); +} + int nfsd_reply_cache_init(struct nfsd_net *nn) { unsigned int hashsize; @@ -159,18 +173,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) if (status) goto out_nomem; - nn->drc_slab = kmem_cache_create("nfsd_drc", - sizeof(struct svc_cacherep), 0, 0, NULL); - if (!nn->drc_slab) - goto out_shrinker; - nn->drc_hashtbl = kcalloc(hashsize, sizeof(*nn->drc_hashtbl), GFP_KERNEL); if (!nn->drc_hashtbl) { nn->drc_hashtbl = vzalloc(array_size(hashsize, sizeof(*nn->drc_hashtbl))); if (!nn->drc_hashtbl) - goto out_slab; + goto out_shrinker; } for (i = 0; i < hashsize; i++) { @@ -180,8 +189,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) nn->drc_hashsize = hashsize; return 0; -out_slab: - kmem_cache_destroy(nn->drc_slab); out_shrinker: unregister_shrinker(&nn->nfsd_reply_cache_shrinker); out_nomem: @@ -209,8 +216,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn) nn->drc_hashtbl = NULL; nn->drc_hashsize = 0; - kmem_cache_destroy(nn->drc_slab); - nn->drc_slab = NULL; } /* @@ -464,8 +469,7 @@ found_entry: rtn = RC_REPLY; break; default: - printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); - nfsd_reply_cache_free_locked(b, rp, nn); + WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); } goto out; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 11b42c523f04..7f39d6091dfa 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1333,7 +1333,9 @@ void nfsd_client_rmdir(struct dentry *dentry) dget(dentry); ret = simple_rmdir(dir, dentry); WARN_ON_ONCE(ret); + fsnotify_rmdir(dir, dentry); d_delete(dentry); + dput(dentry); inode_unlock(dir); } @@ -1423,6 +1425,18 @@ static struct file_system_type nfsd_fs_type = { }; MODULE_ALIAS_FS("nfsd"); +int get_nfsdfs(struct net *net) +{ + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + struct vfsmount *mnt; + + mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); + if (IS_ERR(mnt)) + return PTR_ERR(mnt); + nn->nfsd_mnt = mnt; + return 0; +} + #ifdef CONFIG_PROC_FS static int create_proc_exports_entry(void) { @@ -1451,7 +1465,6 @@ unsigned int nfsd_net_id; static __net_init int nfsd_init_net(struct net *net) { int retval; - struct vfsmount *mnt; struct nfsd_net *nn = net_generic(net, nfsd_net_id); retval = nfsd_export_init(net); @@ -1478,16 +1491,8 @@ static __net_init int nfsd_init_net(struct net *net) init_waitqueue_head(&nn->ntf_wq); seqlock_init(&nn->boot_lock); - mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); - if (IS_ERR(mnt)) { - retval = PTR_ERR(mnt); - goto out_mount_err; - } - nn->nfsd_mnt = mnt; return 0; -out_mount_err: - nfsd_reply_cache_shutdown(nn); out_drc_error: nfsd_idmap_shutdown(net); out_idmap_error: @@ -1500,7 +1505,6 @@ static __net_exit void nfsd_exit_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - mntput(nn->nfsd_mnt); nfsd_reply_cache_shutdown(nn); nfsd_idmap_shutdown(net); nfsd_export_shutdown(net); @@ -1519,12 +1523,9 @@ static int __init init_nfsd(void) int retval; printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n"); - retval = register_pernet_subsys(&nfsd_net_ops); - if (retval < 0) - return retval; retval = register_cld_notifier(); if (retval) - goto out_unregister_pernet; + return retval; retval = nfsd4_init_slabs(); if (retval) goto out_unregister_notifier; @@ -1533,19 +1534,29 @@ static int __init init_nfsd(void) goto out_free_slabs; nfsd_fault_inject_init(); /* nfsd fault injection controls */ nfsd_stat_init(); /* Statistics */ + retval = nfsd_drc_slab_create(); + if (retval) + goto out_free_stat; nfsd_lockd_init(); /* lockd->nfsd callbacks */ retval = create_proc_exports_entry(); if (retval) goto out_free_lockd; retval = register_filesystem(&nfsd_fs_type); if (retval) + goto out_free_exports; + retval = register_pernet_subsys(&nfsd_net_ops); + if (retval < 0) goto out_free_all; return 0; out_free_all: + unregister_pernet_subsys(&nfsd_net_ops); +out_free_exports: remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); out_free_lockd: nfsd_lockd_shutdown(); + nfsd_drc_slab_free(); +out_free_stat: nfsd_stat_shutdown(); nfsd_fault_inject_cleanup(); nfsd4_exit_pnfs(); @@ -1553,13 +1564,13 @@ out_free_slabs: nfsd4_free_slabs(); out_unregister_notifier: unregister_cld_notifier(); -out_unregister_pernet: - unregister_pernet_subsys(&nfsd_net_ops); return retval; } static void __exit exit_nfsd(void) { + unregister_pernet_subsys(&nfsd_net_ops); + nfsd_drc_slab_free(); remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); nfsd_stat_shutdown(); @@ -1569,7 +1580,6 @@ static void __exit exit_nfsd(void) nfsd_fault_inject_cleanup(); unregister_filesystem(&nfsd_fs_type); unregister_cld_notifier(); - unregister_pernet_subsys(&nfsd_net_ops); } MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index af2947551e9c..4ff0c5318a02 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -87,6 +87,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *); void nfsd_destroy(struct net *net); +int get_nfsdfs(struct net *); + struct nfsdfs_client { struct kref cl_ref; void (*cl_release)(struct kref *kref); @@ -97,6 +99,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn, struct nfsdfs_client *ncl, u32 id, const struct tree_descr *); void nfsd_client_rmdir(struct dentry *dentry); + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) #ifdef CONFIG_NFSD_V2_ACL extern const struct svc_version nfsd_acl_version2; diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index c83ddac22f38..754c763374dd 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -118,6 +118,13 @@ done: return nfsd_return_attrs(nfserr, resp); } +/* Obsolete, replaced by MNTPROC_MNT. */ +static __be32 +nfsd_proc_root(struct svc_rqst *rqstp) +{ + return nfs_ok; +} + /* * Look up a path name component * Note: the dentry in the resp->fh may be negative if the file @@ -203,6 +210,13 @@ nfsd_proc_read(struct svc_rqst *rqstp) return fh_getattr(&resp->fh, &resp->stat); } +/* Reserved */ +static __be32 +nfsd_proc_writecache(struct svc_rqst *rqstp) +{ + return nfs_ok; +} + /* * Write data to a file * N.B. After this call resp->fh needs an fh_put @@ -617,6 +631,7 @@ static const struct svc_procedure nfsd_procedures2[18] = { .pc_xdrressize = ST+AT, }, [NFSPROC_ROOT] = { + .pc_func = nfsd_proc_root, .pc_decode = nfssvc_decode_void, .pc_encode = nfssvc_encode_void, .pc_argsize = sizeof(struct nfsd_void), @@ -654,6 +669,7 @@ static const struct svc_procedure nfsd_procedures2[18] = { .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4, }, [NFSPROC_WRITECACHE] = { + .pc_func = nfsd_proc_writecache, .pc_decode = nfssvc_decode_void, .pc_encode = nfssvc_encode_void, .pc_argsize = sizeof(struct nfsd_void), diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index e8bee8ff30c5..155a4e43b24e 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -516,8 +516,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net) return; nfsd_shutdown_net(net); - printk(KERN_WARNING "nfsd: last server has exited, flushing export " - "cache\n"); + pr_info("nfsd: last server has exited, flushing export cache\n"); nfsd_export_flush(net); } diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index ffc78a0e28b2..b073bdc2e6e8 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -228,7 +228,7 @@ TRACE_EVENT(nfsd_file_acquire, TP_ARGS(rqstp, hash, inode, may_flags, nf, status), TP_STRUCT__entry( - __field(__be32, xid) + __field(u32, xid) __field(unsigned int, hash) __field(void *, inode) __field(unsigned int, may_flags) @@ -236,11 +236,11 @@ TRACE_EVENT(nfsd_file_acquire, __field(unsigned long, nf_flags) __field(unsigned char, nf_may) __field(struct file *, nf_file) - __field(__be32, status) + __field(u32, status) ), TP_fast_assign( - __entry->xid = rqstp->rq_xid; + __entry->xid = be32_to_cpu(rqstp->rq_xid); __entry->hash = hash; __entry->inode = inode; __entry->may_flags = may_flags; @@ -248,15 +248,15 @@ TRACE_EVENT(nfsd_file_acquire, __entry->nf_flags = nf ? nf->nf_flags : 0; __entry->nf_may = nf ? nf->nf_may : 0; __entry->nf_file = nf ? nf->nf_file : NULL; - __entry->status = status; + __entry->status = be32_to_cpu(status); ), TP_printk("xid=0x%x hash=0x%x inode=0x%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=0x%p status=%u", - be32_to_cpu(__entry->xid), __entry->hash, __entry->inode, + __entry->xid, __entry->hash, __entry->inode, show_nf_may(__entry->may_flags), __entry->nf_ref, show_nf_flags(__entry->nf_flags), show_nf_may(__entry->nf_may), __entry->nf_file, - be32_to_cpu(__entry->status)) + __entry->status) ); DECLARE_EVENT_CLASS(nfsd_file_search_class, diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 005d1802ab40..b6f4b552c9af 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1184,6 +1184,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, iap->ia_mode = 0; iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; + if (!IS_POSIXACL(dirp)) + iap->ia_mode &= ~current_umask(); + err = 0; host_err = 0; switch (type) { @@ -1416,6 +1419,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; } + if (!IS_POSIXACL(dirp)) + iap->ia_mode &= ~current_umask(); + host_err = vfs_create(dirp, dchild, iap->ia_mode, true); if (host_err < 0) { fh_drop_write(fhp); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 445eef41bfaf..91b58c897f92 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) if (!nilfs->ns_writer) return -ENOMEM; + inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); + err = nilfs_segctor_start_thread(nilfs->ns_writer); if (err) { kfree(nilfs->ns_writer); diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 5778d1347b35..d24548ed31b9 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn, old = FANOTIFY_E(old_fsn); new = FANOTIFY_E(new_fsn); - if (old_fsn->inode != new_fsn->inode || old->pid != new->pid || + if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid || old->fh_type != new->fh_type || old->fh_len != new->fh_len) return false; @@ -171,6 +171,17 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, if (!fsnotify_iter_should_report_type(iter_info, type)) continue; mark = iter_info->marks[type]; + + /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */ + marks_ignored_mask |= mark->ignored_mask; + + /* + * If the event is on dir and this mark doesn't care about + * events on dir, don't send it! + */ + if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR)) + continue; + /* * If the event is for a child and this mark doesn't care about * events on a child, don't send it! @@ -181,7 +192,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, continue; marks_mask |= mark->mask; - marks_ignored_mask |= mark->ignored_mask; } test_mask = event_mask & marks_mask & ~marks_ignored_mask; @@ -203,10 +213,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, user_mask &= ~FAN_ONDIR; } - if (event_mask & FS_ISDIR && - !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) - return 0; - return test_mask & user_mask; } @@ -314,7 +320,12 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, if (!event) goto out; init: __maybe_unused - fsnotify_init_event(&event->fse, inode); + /* + * Use the victim inode instead of the watching inode as the id for + * event queue, so event reported on parent is merged with event + * reported on child when both directory and child watches exist. + */ + fsnotify_init_event(&event->fse, (unsigned long)id); event->mask = mask; if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) event->pid = get_pid(task_pid(current)); diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index d510223d302c..589dee962993 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn, if (old->mask & FS_IN_IGNORED) return false; if ((old->mask == new->mask) && - (old_fsn->inode == new_fsn->inode) && + (old_fsn->objectid == new_fsn->objectid) && (old->name_len == new->name_len) && (!old->name_len || !strcmp(old->name, new->name))) return true; @@ -118,7 +118,7 @@ int inotify_handle_event(struct fsnotify_group *group, mask &= ~IN_ISDIR; fsn_event = &event->fse; - fsnotify_init_event(fsn_event, inode); + fsnotify_init_event(fsn_event, (unsigned long)inode); event->mask = mask; event->wd = i_mark->wd; event->sync_cookie = cookie; diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 107537a543fd..81ffc8629fc4 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) return ERR_PTR(-ENOMEM); } group->overflow_event = &oevent->fse; - fsnotify_init_event(group->overflow_event, NULL); + fsnotify_init_event(group->overflow_event, 0); oevent->mask = FS_Q_OVERFLOW; oevent->wd = -1; oevent->sync_cookie = 0; diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index d4359a1df3d5..672feb96e250 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -628,6 +628,12 @@ static int ntfs_read_locked_inode(struct inode *vi) } a = ctx->attr; /* Get the standard information attribute value. */ + if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset) + + le32_to_cpu(a->data.resident.value_length) > + (u8 *)ctx->mrec + vol->mft_record_size) { + ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode."); + goto unm_err_out; + } si = (STANDARD_INFORMATION*)((u8*)a + le16_to_cpu(a->data.resident.value_offset)); @@ -1809,6 +1815,12 @@ int ntfs_read_inode_mount(struct inode *vi) brelse(bh); } + if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) { + ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.", + le32_to_cpu(m->bytes_allocated), vol->mft_record_size); + goto err_out; + } + /* Apply the mst fixups. */ if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) { /* FIXME: Try to use the $MFTMirr now. */ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index f9baefc76cf9..0a6fe7d5aba7 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -7403,6 +7403,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inline_data *idata = &di->id2.i_data; + /* No need to punch hole beyond i_size. */ + if (start >= i_size_read(inode)) + return 0; + if (end > i_size_read(inode)) end = i_size_read(inode); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 9cd0a6815933..7f66e3342475 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2304,7 +2304,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode, struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle = NULL; loff_t end = offset + bytes; - int ret = 0, credits = 0, locked = 0; + int ret = 0, credits = 0; ocfs2_init_dealloc_ctxt(&dealloc); @@ -2315,13 +2315,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode, !dwc->dw_orphaned) goto out; - /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we - * are in that context. */ - if (dwc->dw_writer_pid != task_pid_nr(current)) { - inode_lock(inode); - locked = 1; - } - ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret < 0) { mlog_errno(ret); @@ -2402,8 +2395,6 @@ out: if (meta_ac) ocfs2_free_alloc_context(meta_ac); ocfs2_run_deallocs(osb, &dealloc); - if (locked) - inode_unlock(inode); ocfs2_dio_free_write_ctx(inode, dwc); return ret; diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index a368350d4c27..c843bc318382 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -2052,7 +2052,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g o2hb_nego_timeout_handler, reg, NULL, ®->hr_handler_list); if (ret) - goto free; + goto remove_item; ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key, sizeof(struct o2hb_nego_msg), @@ -2067,6 +2067,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g unregister_handler: o2net_unregister_handler_list(®->hr_handler_list); +remove_item: + spin_lock(&o2hb_live_lock); + list_del(®->hr_all_item); + if (o2hb_global_heartbeat_active()) + clear_bit(reg->hr_region_num, o2hb_region_bitmap); + spin_unlock(&o2hb_live_lock); free: kfree(reg); return ERR_PTR(ret); diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 8e4f1ace467c..1de77f1a600b 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -275,7 +275,6 @@ static ssize_t dlmfs_file_write(struct file *filp, loff_t *ppos) { int bytes_left; - ssize_t writelen; char *lvb_buf; struct inode *inode = file_inode(filp); @@ -285,32 +284,30 @@ static ssize_t dlmfs_file_write(struct file *filp, if (*ppos >= i_size_read(inode)) return -ENOSPC; + /* don't write past the lvb */ + if (count > i_size_read(inode) - *ppos) + count = i_size_read(inode) - *ppos; + if (!count) return 0; if (!access_ok(buf, count)) return -EFAULT; - /* don't write past the lvb */ - if ((count + *ppos) > i_size_read(inode)) - writelen = i_size_read(inode) - *ppos; - else - writelen = count - *ppos; - - lvb_buf = kmalloc(writelen, GFP_NOFS); + lvb_buf = kmalloc(count, GFP_NOFS); if (!lvb_buf) return -ENOMEM; - bytes_left = copy_from_user(lvb_buf, buf, writelen); - writelen -= bytes_left; - if (writelen) - user_dlm_write_lvb(inode, lvb_buf, writelen); + bytes_left = copy_from_user(lvb_buf, buf, count); + count -= bytes_left; + if (count) + user_dlm_write_lvb(inode, lvb_buf, count); kfree(lvb_buf); - *ppos = *ppos + writelen; - mlog(0, "wrote %zd bytes\n", writelen); - return writelen; + *ppos = *ppos + count; + mlog(0, "wrote %zu bytes\n", count); + return count; } static void dlmfs_init_once(void *foo) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8a2e284ccfcd..50a863fc1779 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res, &ocfs2_nfs_sync_lops, osb); } +static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb) +{ + ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb); + init_rwsem(&osb->nfs_sync_rwlock); +} + void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb) { struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres; @@ -2855,14 +2861,25 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex) if (ocfs2_is_hard_readonly(osb)) return -EROFS; + if (ex) + down_write(&osb->nfs_sync_rwlock); + else + down_read(&osb->nfs_sync_rwlock); + if (ocfs2_mount_local(osb)) return 0; status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE, 0, 0); - if (status < 0) + if (status < 0) { mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); + } + return status; } @@ -2873,6 +2890,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex) if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); } int ocfs2_trim_fs_lock(struct ocfs2_super *osb, @@ -3340,7 +3361,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb) local: ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb); ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb); - ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb); + ocfs2_nfs_sync_lock_init(osb); ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb); osb->cconn = conn; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 6cd5e4924e4d..ab2b0d74ad03 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1244,22 +1244,24 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) goto bail_unlock; } } + down_write(&OCFS2_I(inode)->ip_alloc_sem); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + 2 * ocfs2_quota_trans_credits(sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); - goto bail_unlock; + goto bail_unlock_alloc; } status = __dquot_transfer(inode, transfer_to); if (status < 0) goto bail_commit; } else { + down_write(&OCFS2_I(inode)->ip_alloc_sem); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); - goto bail_unlock; + goto bail_unlock_alloc; } } @@ -1272,6 +1274,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) bail_commit: ocfs2_commit_trans(osb, handle); +bail_unlock_alloc: + up_write(&OCFS2_I(inode)->ip_alloc_sem); bail_unlock: if (status && inode_locked) { ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 9150cfa4df7d..0a8cd8e59a92 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -326,8 +326,8 @@ struct ocfs2_super spinlock_t osb_lock; u32 s_next_generation; unsigned long osb_flags; - s16 s_inode_steal_slot; - s16 s_meta_steal_slot; + u16 s_inode_steal_slot; + u16 s_meta_steal_slot; atomic_t s_num_inodes_stolen; atomic_t s_num_meta_stolen; @@ -394,6 +394,7 @@ struct ocfs2_super struct ocfs2_lock_res osb_super_lockres; struct ocfs2_lock_res osb_rename_lockres; struct ocfs2_lock_res osb_nfs_sync_lockres; + struct rw_semaphore nfs_sync_rwlock; struct ocfs2_lock_res osb_trim_fs_lockres; struct mutex obs_trim_fs_mutex; struct ocfs2_dlm_debug *osb_dlm_debug; diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 0db4a7ec58a2..dcef83c8796d 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -290,7 +290,7 @@ #define OCFS2_MAX_SLOTS 255 /* Slot map indicator for an empty slot */ -#define OCFS2_INVALID_SLOT -1 +#define OCFS2_INVALID_SLOT ((u16)-1) #define OCFS2_VOL_UUID_LEN 16 #define OCFS2_MAX_VOL_LABEL_LEN 64 @@ -326,8 +326,8 @@ struct ocfs2_system_inode_info { enum { BAD_BLOCK_SYSTEM_INODE = 0, GLOBAL_INODE_ALLOC_SYSTEM_INODE, +#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE, -#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE HEARTBEAT_SYSTEM_INODE, GLOBAL_BITMAP_SYSTEM_INODE, USER_QUOTA_SYSTEM_INODE, diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 69c21a3843af..5e0eaea47405 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -879,9 +879,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type) { spin_lock(&osb->osb_lock); if (type == INODE_ALLOC_SYSTEM_INODE) - osb->s_inode_steal_slot = slot; + osb->s_inode_steal_slot = (u16)slot; else if (type == EXTENT_ALLOC_SYSTEM_INODE) - osb->s_meta_steal_slot = slot; + osb->s_meta_steal_slot = (u16)slot; spin_unlock(&osb->osb_lock); } @@ -2827,9 +2827,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) goto bail; } - inode_alloc_inode = - ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, - suballoc_slot); + if (suballoc_slot == (u16)OCFS2_INVALID_SLOT) + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot); + else + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + INODE_ALLOC_SYSTEM_INODE, suballoc_slot); if (!inode_alloc_inode) { /* the error code could be inaccurate, but we are not able to * get the correct one. */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index c81e86c62380..60b4b6df1ed3 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -78,7 +78,7 @@ struct mount_options unsigned long commit_interval; unsigned long mount_opt; unsigned int atime_quantum; - signed short slot; + unsigned short slot; int localalloc_opt; unsigned int resv_level; int dir_resv_level; @@ -1334,7 +1334,7 @@ static int ocfs2_parse_options(struct super_block *sb, goto bail; } if (option) - mopt->slot = (s16)option; + mopt->slot = (u16)option; break; case Opt_commit: if (match_int(&args[0], &option)) { @@ -1692,6 +1692,7 @@ static void ocfs2_inode_init_once(void *data) oi->ip_blkno = 0ULL; oi->ip_clusters = 0; + oi->ip_next_orphan = NULL; ocfs2_resv_init_once(&oi->ip_la_data_resv); diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index a5612abc0936..bcd4fd5ad175 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -311,23 +311,8 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { int ret; - struct orangefs_read_options *ro; - orangefs_stats.reads++; - /* - * Remember how they set "count" in read(2) or pread(2) or whatever - - * users can use count as a knob to control orangefs io size and later - * we can try to help them fill as many pages as possible in readpage. - */ - if (!iocb->ki_filp->private_data) { - iocb->ki_filp->private_data = kmalloc(sizeof *ro, GFP_KERNEL); - if (!iocb->ki_filp->private_data) - return(ENOMEM); - ro = iocb->ki_filp->private_data; - ro->blksiz = iter->count; - } - down_read(&file_inode(iocb->ki_filp)->i_rwsem); ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); if (ret) @@ -615,12 +600,6 @@ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) return rc; } -static int orangefs_file_open(struct inode * inode, struct file *file) -{ - file->private_data = NULL; - return generic_file_open(inode, file); -} - static int orangefs_flush(struct file *file, fl_owner_t id) { /* @@ -634,9 +613,6 @@ static int orangefs_flush(struct file *file, fl_owner_t id) struct inode *inode = file->f_mapping->host; int r; - kfree(file->private_data); - file->private_data = NULL; - if (inode->i_state & I_DIRTY_TIME) { spin_lock(&inode->i_lock); inode->i_state &= ~I_DIRTY_TIME; @@ -659,7 +635,7 @@ const struct file_operations orangefs_file_operations = { .lock = orangefs_lock, .unlocked_ioctl = orangefs_ioctl, .mmap = orangefs_file_mmap, - .open = orangefs_file_open, + .open = generic_file_open, .flush = orangefs_flush, .release = orangefs_file_release, .fsync = orangefs_fsync, diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index efb12197da18..636892ffec0b 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -259,46 +259,19 @@ static int orangefs_readpage(struct file *file, struct page *page) pgoff_t index; /* which page */ struct page *next_page; char *kaddr; - struct orangefs_read_options *ro = file->private_data; loff_t read_size; - loff_t roundedup; int buffer_index = -1; /* orangefs shared memory slot */ int slot_index; /* index into slot */ int remaining; /* - * If they set some miniscule size for "count" in read(2) - * (for example) then let's try to read a page, or the whole file - * if it is smaller than a page. Once "count" goes over a page - * then lets round up to the highest page size multiple that is - * less than or equal to "count" and do that much orangefs IO and - * try to fill as many pages as we can from it. - * - * "count" should be represented in ro->blksiz. - * - * inode->i_size = file size. + * Get up to this many bytes from Orangefs at a time and try + * to fill them into the page cache at once. Tests with dd made + * this seem like a reasonable static number, if there was + * interest perhaps this number could be made setable through + * sysfs... */ - if (ro) { - if (ro->blksiz < PAGE_SIZE) { - if (inode->i_size < PAGE_SIZE) - read_size = inode->i_size; - else - read_size = PAGE_SIZE; - } else { - roundedup = ((PAGE_SIZE - 1) & ro->blksiz) ? - ((ro->blksiz + PAGE_SIZE) & ~(PAGE_SIZE -1)) : - ro->blksiz; - if (roundedup > inode->i_size) - read_size = inode->i_size; - else - read_size = roundedup; - - } - } else { - read_size = PAGE_SIZE; - } - if (!read_size) - read_size = PAGE_SIZE; + read_size = 524288; if (PageDirty(page)) orangefs_launder_page(page); diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index 34a6c99fa29b..3003007681a0 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -239,10 +239,6 @@ struct orangefs_write_range { kgid_t gid; }; -struct orangefs_read_options { - ssize_t blksiz; -}; - extern struct orangefs_stats orangefs_stats; /* diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index ed6e2d6cf7a1..001f798b1391 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -47,7 +47,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) { ssize_t list_size, size, value_size = 0; char *buf, *name, *value = NULL; - int uninitialized_var(error); + int error = 0; size_t slen; if (!(old->d_inode->i_opflags & IOP_XATTR) || @@ -83,6 +83,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) if (ovl_is_private_xattr(name)) continue; + + error = security_inode_copy_up_xattr(name); + if (error < 0 && error != -EOPNOTSUPP) + break; + if (error == 1) { + error = 0; + continue; /* Discard */ + } retry: size = vfs_getxattr(old, name, value, value_size); if (size == -ERANGE) @@ -106,13 +114,6 @@ retry: goto retry; } - error = security_inode_copy_up_xattr(name); - if (error < 0 && error != -EOPNOTSUPP) - break; - if (error == 1) { - error = 0; - continue; /* Discard */ - } error = vfs_setxattr(new, name, value, size, 0); if (error) { if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name)) @@ -863,7 +864,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, int ovl_copy_up_flags(struct dentry *dentry, int flags) { int err = 0; - const struct cred *old_cred = ovl_override_creds(dentry->d_sb); + const struct cred *old_cred; bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED); /* @@ -874,6 +875,7 @@ int ovl_copy_up_flags(struct dentry *dentry, int flags) if (WARN_ON(disconnected && d_is_dir(dentry))) return -EIO; + old_cred = ovl_override_creds(dentry->d_sb); while (!err) { struct dentry *next; struct dentry *parent = NULL; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 29abdb1d3b5c..6509ec3cb373 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -940,8 +940,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) buflen -= thislen; memcpy(&buf[buflen], name, thislen); - tmp = dget_dlock(d->d_parent); spin_unlock(&d->d_lock); + tmp = dget_parent(d); dput(d); d = tmp; diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index 73c9775215b3..11dd8177770d 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -482,7 +482,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb, if (IS_ERR_OR_NULL(this)) return this; - if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { + if (ovl_dentry_real_at(this, layer->idx) != real) { dput(this); this = ERR_PTR(-EIO); } diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 15e4fa288475..7a08a576f7b2 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -21,13 +21,16 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode) return 'm'; } +/* No atime modificaton nor notify on underlying */ +#define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY) + static struct file *ovl_open_realfile(const struct file *file, struct inode *realinode) { struct inode *inode = file_inode(file); struct file *realfile; const struct cred *old_cred; - int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY; + int flags = file->f_flags | OVL_OPEN_FLAGS; old_cred = ovl_override_creds(inode->i_sb); realfile = open_with_fake_path(&file->f_path, flags, realinode, @@ -48,8 +51,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags) struct inode *inode = file_inode(file); int err; - /* No atime modificaton on underlying */ - flags |= O_NOATIME | FMODE_NONOTIFY; + flags |= OVL_OPEN_FLAGS; /* If some flag changed that cannot be changed then something's amiss */ if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK)) @@ -102,7 +104,7 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real, } /* Did the flags change since open? */ - if (unlikely((file->f_flags ^ real->file->f_flags) & ~O_NOATIME)) + if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS)) return ovl_change_flags(real->file, file->f_flags); return 0; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index b045cf1826fc..56b55397a7a0 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -337,7 +337,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name, goto out; if (!value && !upperdentry) { + old_cred = ovl_override_creds(dentry->d_sb); err = vfs_getxattr(realdentry, name, NULL, 0); + revert_creds(old_cred); if (err < 0) goto out_drop_write; } @@ -881,7 +883,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL; bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, oip->index); - int fsid = bylower ? oip->lowerpath->layer->fsid : 0; + int fsid = bylower ? lowerpath->layer->fsid : 0; bool is_dir, metacopy = false; unsigned long ino = 0; int err = oip->newinode ? -EEXIST : -ENOMEM; @@ -931,6 +933,8 @@ struct inode *ovl_get_inode(struct super_block *sb, err = -ENOMEM; goto out_err; } + ino = realinode->i_ino; + fsid = lowerpath->layer->fsid; } ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino, fsid); ovl_inode_init(inode, upperdentry, lowerdentry, oip->lowerdata); diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 56e68d8c5d9f..cfa87dfc335f 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -196,9 +196,8 @@ static struct dentry *ovl_lookup_positive_unlocked(const char *name, bool drop_negative) { struct dentry *ret = lookup_one_len_unlocked(name, base, len); - unsigned int flags = smp_load_acquire(&ret->d_flags); - if (!IS_ERR(ret) && ((flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE)) { + if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { if (drop_negative && ret->d_lockref.count == 1) { spin_lock(&ret->d_lock); /* Recheck condition under lock */ diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 0e51e4b3e6f8..9e278dc207c6 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -79,7 +79,7 @@ static void ovl_dentry_release(struct dentry *dentry) static struct dentry *ovl_d_real(struct dentry *dentry, const struct inode *inode) { - struct dentry *real; + struct dentry *real = NULL, *lower; /* It's an overlay file */ if (inode && d_inode(dentry) == inode) @@ -98,9 +98,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry, if (real && !inode && ovl_has_upperdata(d_inode(dentry))) return real; - real = ovl_dentry_lowerdata(dentry); - if (!real) + lower = ovl_dentry_lowerdata(dentry); + if (!lower) goto bug; + real = lower; /* Handle recursion */ real = d_real(real, inode); @@ -108,8 +109,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry, if (!inode || inode == d_inode(real)) return real; bug: - WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry, - inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0); + WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n", + __func__, dentry, inode ? inode->i_sb->s_id : "NULL", + inode ? inode->i_ino : 0, real, + real && d_inode(real) ? d_inode(real)->i_ino : 0); return dentry; } @@ -249,7 +252,6 @@ static void ovl_put_super(struct super_block *sb) ovl_free_fs(ofs); } -/* Sync real dirty inodes in upper filesystem (if it exists) */ static int ovl_sync_fs(struct super_block *sb, int wait) { struct ovl_fs *ofs = sb->s_fs_info; @@ -259,21 +261,12 @@ static int ovl_sync_fs(struct super_block *sb, int wait) if (!ofs->upper_mnt) return 0; - /* - * If this is a sync(2) call or an emergency sync, all the super blocks - * will be iterated, including upper_sb, so no need to do anything. - * - * If this is a syncfs(2) call, then we do need to call - * sync_filesystem() on upper_sb, but enough if we do it when being - * called with wait == 1. - */ - if (!wait) + upper_sb = ofs->upper_mnt->mnt_sb; + if (!upper_sb->s_op->sync_fs) return 0; - upper_sb = ofs->upper_mnt->mnt_sb; - down_read(&upper_sb->s_umount); - ret = sync_filesystem(upper_sb); + ret = upper_sb->s_op->sync_fs(upper_sb, wait); up_read(&upper_sb->s_umount); return ret; @@ -1270,6 +1263,18 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) if (!ofs->config.nfs_export && !ofs->upper_mnt) return true; + /* + * We allow using single lower with null uuid for index and nfs_export + * for example to support those features with single lower squashfs. + * To avoid regressions in setups of overlay with re-formatted lower + * squashfs, do not allow decoding origin with lower null uuid unless + * user opted-in to one of the new features that require following the + * lower inode of non-dir upper. + */ + if (!ofs->config.index && !ofs->config.metacopy && !ofs->config.xino && + uuid_is_null(uuid)) + return false; + for (i = 0; i < ofs->numlowerfs; i++) { /* * We use uuid to associate an overlay lower file handle with a @@ -1356,14 +1361,23 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs, if (err < 0) goto out; + /* + * Check if lower root conflicts with this overlay layers before + * checking if it is in-use as upperdir/workdir of "another" + * mount, because we do not bother to check in ovl_is_inuse() if + * the upperdir/workdir is in fact in-use by our + * upperdir/workdir. + */ err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); if (err) goto out; if (ovl_is_inuse(stack[i].dentry)) { err = ovl_report_in_use(ofs, "lowerdir"); - if (err) + if (err) { + iput(trap); goto out; + } } mnt = clone_private_mount(&stack[i]); @@ -1513,7 +1527,8 @@ out_err: * - upper/work dir of any overlayfs instance */ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, - struct dentry *dentry, const char *name) + struct dentry *dentry, const char *name, + bool is_lower) { struct dentry *next = dentry, *parent; int err = 0; @@ -1525,7 +1540,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, /* Walk back ancestors to root (inclusive) looking for traps */ while (!err && parent != next) { - if (ovl_lookup_trap_inode(sb, parent)) { + if (is_lower && ovl_lookup_trap_inode(sb, parent)) { err = -ELOOP; pr_err("overlayfs: overlapping %s path\n", name); } else if (ovl_is_inuse(parent)) { @@ -1551,7 +1566,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb, if (ofs->upper_mnt) { err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root, - "upperdir"); + "upperdir", false); if (err) return err; @@ -1562,7 +1577,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb, * workbasedir. In that case, we already have their traps in * inode cache and we will catch that case on lookup. */ - err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir"); + err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir", + false); if (err) return err; } @@ -1570,7 +1586,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb, for (i = 0; i < ofs->numlower; i++) { err = ovl_check_layer(sb, ofs, ofs->lower_layers[i].mnt->mnt_root, - "lowerdir"); + "lowerdir", true); if (err) return err; } diff --git a/fs/pnode.c b/fs/pnode.c index 49f6d7ff2139..1106137c747a 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -261,14 +261,13 @@ static int propagate_one(struct mount *m) child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); + read_seqlock_excl(&mount_lock); mnt_set_mountpoint(m, mp, child); + if (m->mnt_master != dest_master) + SET_MNT_MARK(m->mnt_master); + read_sequnlock_excl(&mount_lock); last_dest = m; last_source = child; - if (m->mnt_master != dest_master) { - read_seqlock_excl(&mount_lock); - SET_MNT_MARK(m->mnt_master); - read_sequnlock_excl(&mount_lock); - } hlist_add_head(&child->mnt_hash, list); return count_mounts(m->mnt_ns, child); } diff --git a/fs/pnode.h b/fs/pnode.h index 49a058c73e4c..988f1aa9b02a 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -12,7 +12,7 @@ #define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED) #define IS_MNT_SLAVE(m) ((m)->mnt_master) -#define IS_MNT_NEW(m) (!(m)->mnt_ns) +#define IS_MNT_NEW(m) (!(m)->mnt_ns || is_anon_ns((m)->mnt_ns)) #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED) #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE) #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED) @@ -44,7 +44,7 @@ int propagate_mount_busy(struct mount *, int); void propagate_mount_unlock(struct mount *); void mnt_release_group_id(struct mount *); int get_dominating_id(struct mount *mnt, const struct path *root); -unsigned int mnt_get_count(struct mount *mnt); +int mnt_get_count(struct mount *mnt); void mnt_set_mountpoint(struct mount *, struct mountpoint *, struct mount *); void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, diff --git a/fs/proc/base.c b/fs/proc/base.c index ced7d0684ec0..519f2d89ec9f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -403,11 +403,11 @@ print0: static int lock_trace(struct task_struct *task) { - int err = mutex_lock_killable(&task->signal->cred_guard_mutex); + int err = down_read_killable(&task->signal->exec_update_lock); if (err) return err; if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { - mutex_unlock(&task->signal->cred_guard_mutex); + up_read(&task->signal->exec_update_lock); return -EPERM; } return 0; @@ -415,7 +415,7 @@ static int lock_trace(struct task_struct *task) static void unlock_trace(struct task_struct *task) { - mutex_unlock(&task->signal->cred_guard_mutex); + up_read(&task->signal->exec_update_lock); } #ifdef CONFIG_STACKTRACE @@ -1036,7 +1036,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) { - static DEFINE_MUTEX(oom_adj_mutex); struct mm_struct *mm = NULL; struct task_struct *task; int err = 0; @@ -1076,7 +1075,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) struct task_struct *p = find_lock_task_mm(task); if (p) { - if (atomic_read(&p->mm->mm_users) > 1) { + if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) { mm = p->mm; mmgrab(mm); } @@ -2770,7 +2769,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh unsigned long flags; int result; - result = mutex_lock_killable(&task->signal->cred_guard_mutex); + result = down_read_killable(&task->signal->exec_update_lock); if (result) return result; @@ -2806,7 +2805,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh result = 0; out_unlock: - mutex_unlock(&task->signal->cred_guard_mutex); + up_read(&task->signal->exec_update_lock); return result; } diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 64e9ee1b129e..8c3dbe13e647 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -138,8 +138,12 @@ static int proc_getattr(const struct path *path, struct kstat *stat, { struct inode *inode = d_inode(path->dentry); struct proc_dir_entry *de = PDE(inode); - if (de && de->nlink) - set_nlink(inode, de->nlink); + if (de) { + nlink_t nlink = READ_ONCE(de->nlink); + if (nlink > 0) { + set_nlink(inode, nlink); + } + } generic_fillattr(inode, stat); return 0; @@ -338,6 +342,16 @@ static const struct file_operations proc_dir_operations = { .iterate_shared = proc_readdir, }; +static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) +{ + return 0; +} + +const struct dentry_operations proc_net_dentry_ops = { + .d_revalidate = proc_net_d_revalidate, + .d_delete = always_delete_dentry, +}; + /* * proc directories can do almost nothing.. */ @@ -362,6 +376,7 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir, write_unlock(&proc_subdir_lock); goto out_free_inum; } + dir->nlink++; write_unlock(&proc_subdir_lock); return dp; @@ -459,8 +474,8 @@ struct proc_dir_entry *proc_symlink(const char *name, } EXPORT_SYMBOL(proc_symlink); -struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, - struct proc_dir_entry *parent, void *data) +struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, + struct proc_dir_entry *parent, void *data, bool force_lookup) { struct proc_dir_entry *ent; @@ -472,13 +487,20 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, ent->data = data; ent->proc_fops = &proc_dir_operations; ent->proc_iops = &proc_dir_inode_operations; - parent->nlink++; + if (force_lookup) { + pde_force_lookup(ent); + } ent = proc_register(parent, ent); - if (!ent) - parent->nlink--; } return ent; } +EXPORT_SYMBOL_GPL(_proc_mkdir); + +struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, + struct proc_dir_entry *parent, void *data) +{ + return _proc_mkdir(name, mode, parent, data, false); +} EXPORT_SYMBOL_GPL(proc_mkdir_data); struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, @@ -505,10 +527,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name) ent->data = NULL; ent->proc_fops = NULL; ent->proc_iops = NULL; - parent->nlink++; ent = proc_register(parent, ent); - if (!ent) - parent->nlink--; } return ent; } @@ -666,8 +685,12 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) len = strlen(fn); de = pde_subdir_find(parent, fn, len); - if (de) + if (de) { rb_erase(&de->subdir_node, &parent->subdir); + if (S_ISDIR(de->mode)) { + parent->nlink--; + } + } write_unlock(&proc_subdir_lock); if (!de) { WARN(1, "name '%s'\n", name); @@ -676,9 +699,6 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) proc_entry_rundown(de); - if (S_ISDIR(de->mode)) - parent->nlink--; - de->nlink = 0; WARN(pde_subdir_first(de), "%s: removing non-empty directory '%s/%s', leaking at least '%s'\n", __func__, de->parent->name, de->name, pde_subdir_first(de)->name); @@ -714,13 +734,12 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) de = next; continue; } - write_unlock(&proc_subdir_lock); - - proc_entry_rundown(de); next = de->parent; if (S_ISDIR(de->mode)) next->nlink--; - de->nlink = 0; + write_unlock(&proc_subdir_lock); + + proc_entry_rundown(de); if (de == root) break; pde_put(de); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index dbe43a50caf2..3f0c89001fcf 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -448,7 +448,7 @@ const struct inode_operations proc_link_inode_operations = { struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { - struct inode *inode = new_inode_pseudo(sb); + struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = de->low_ino; diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 92db21bdebad..1e9a087cf6f5 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -301,3 +301,10 @@ extern unsigned long task_statm(struct mm_struct *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); extern void task_mem(struct seq_file *, struct mm_struct *); + +extern const struct dentry_operations proc_net_dentry_ops; +static inline void pde_force_lookup(struct proc_dir_entry *pde) +{ + /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ + pde->proc_dops = &proc_net_dentry_ops; +} diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 76ae278df1c4..88a3e7989beb 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -39,22 +39,6 @@ static struct net *get_proc_net(const struct inode *inode) return maybe_get_net(PDE_NET(PDE(inode))); } -static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) -{ - return 0; -} - -static const struct dentry_operations proc_net_dentry_ops = { - .d_revalidate = proc_net_d_revalidate, - .d_delete = always_delete_dentry, -}; - -static void pde_force_lookup(struct proc_dir_entry *pde) -{ - /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ - pde->proc_dops = &proc_net_dentry_ops; -} - static int seq_open_net(struct inode *inode, struct file *file) { unsigned int state_size = PDE(inode)->state_size; @@ -98,6 +82,25 @@ static const struct file_operations proc_net_seq_fops = { .release = seq_release_net, }; +int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux) +{ +#ifdef CONFIG_NET_NS + struct seq_net_private *p = priv_data; + + p->net = get_net(current->nsproxy->net_ns); +#endif + return 0; +} + +void bpf_iter_fini_seq_net(void *priv_data) +{ +#ifdef CONFIG_NET_NS + struct seq_net_private *p = priv_data; + + put_net(p->net); +#endif +} + struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, unsigned int state_size, void *data) diff --git a/fs/proc/self.c b/fs/proc/self.c index 57c0a1047250..582336862d25 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -16,6 +16,13 @@ static const char *proc_self_get_link(struct dentry *dentry, pid_t tgid = task_tgid_nr_ns(current, ns); char *name; + /* + * Not currently supported. Once we can inherit all of struct pid, + * we can allow this. + */ + if (current->flags & PF_KTHREAD) + return ERR_PTR(-EOPNOTSUPP); + if (!tgid) return ERR_PTR(-ENOENT); /* max length of unsigned int in decimal + NULL term */ @@ -43,7 +50,7 @@ int proc_setup_self(struct super_block *s) inode_lock(root_inode); self = d_alloc_name(s->s_root, "self"); if (self) { - struct inode *inode = new_inode_pseudo(s); + struct inode *inode = new_inode(s); if (inode) { inode->i_ino = self_inum; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9442631fd4af..f51dadd1ce43 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1567,11 +1567,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, src = *ppos; svpfn = src / PM_ENTRY_BYTES; - start_vaddr = svpfn << PAGE_SHIFT; end_vaddr = mm->task_size; /* watch out for wraparound */ - if (svpfn > mm->task_size >> PAGE_SHIFT) + start_vaddr = end_vaddr; + if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) + start_vaddr = untagged_addr(svpfn << PAGE_SHIFT); + + /* Ensure the address is inside the task */ + if (start_vaddr > mm->task_size) start_vaddr = end_vaddr; /* diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index f61ae53533f5..fac9e50b33a6 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c @@ -43,7 +43,7 @@ int proc_setup_thread_self(struct super_block *s) inode_lock(root_inode); thread_self = d_alloc_name(s->s_root, "thread-self"); if (thread_self) { - struct inode *inode = new_inode_pseudo(s); + struct inode *inode = new_inode(s); if (inode) { inode->i_ino = thread_self_inum; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 7b13988796e1..080ca9d5eccb 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -266,7 +266,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, if (start < offset + dump->size) { tsz = min(offset + (u64)dump->size - start, (u64)size); buf = dump->buf + start - offset; - if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) { + if (remap_vmalloc_range_partial(vma, dst, buf, 0, + tsz)) { ret = -EFAULT; goto out_unlock; } @@ -624,7 +625,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; if (remap_vmalloc_range_partial(vma, vma->vm_start + len, - kaddr, tsz)) + kaddr, 0, tsz)) goto fail; size -= tsz; diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index 7fbe8f058220..d99b5d39aa90 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -87,11 +87,11 @@ static void *pstore_ftrace_seq_next(struct seq_file *s, void *v, loff_t *pos) struct pstore_private *ps = s->private; struct pstore_ftrace_seq_data *data = v; + (*pos)++; data->off += REC_SIZE; if (data->off + REC_SIZE > ps->total_size) return NULL; - (*pos)++; return data; } @@ -101,6 +101,9 @@ static int pstore_ftrace_seq_show(struct seq_file *s, void *v) struct pstore_ftrace_seq_data *data = v; struct pstore_ftrace_record *rec; + if (!data) + return 0; + rec = (struct pstore_ftrace_record *)(ps->record->buf + data->off); seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %ps <- %pS\n", diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 3d7024662d29..705b79bb9b24 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -275,6 +275,9 @@ static int pstore_compress(const void *in, void *out, { int ret; + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS)) + return -EINVAL; + ret = crypto_comp_compress(tfm, in, inlen, out, &outlen); if (ret) { pr_err("crypto_comp_compress failed, ret = %d!\n", ret); @@ -661,7 +664,7 @@ static void decompress_record(struct pstore_record *record) int unzipped_len; char *unzipped, *workspace; - if (!record->compressed) + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed) return; /* Only PSTORE_TYPE_DMESG support compression. */ @@ -823,9 +826,9 @@ static int __init pstore_init(void) ret = pstore_init_fs(); if (ret) - return ret; + free_buf_for_compression(); - return 0; + return ret; } late_initcall(pstore_init); diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index a6f856f341dc..c5562c871c8b 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -62,7 +62,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) memset(buf, 0, info->dqi_usable_bs); return sb->s_op->quota_read(sb, info->dqi_type, buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); } static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) @@ -71,7 +71,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) ssize_t ret; ret = sb->s_op->quota_write(sb, info->dqi_type, buf, - info->dqi_usable_bs, blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); if (ret != info->dqi_usable_bs) { quota_error(sb, "dquota write failed"); if (ret >= 0) @@ -284,7 +284,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, blk); goto out_buf; } - dquot->dq_off = (blk << info->dqi_blocksize_bits) + + dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; kfree(buf); @@ -559,7 +559,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, ret = -EIO; goto out_buf; } else { - ret = (blk << info->dqi_blocksize_bits) + sizeof(struct + ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; } out_buf: diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 53429c29c784..56aedf4ba886 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -159,7 +159,31 @@ static int v2_read_file_info(struct super_block *sb, int type) qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk); qinfo->dqi_ops = &v2r1_qtree_ops; } + ret = -EUCLEAN; + /* Some sanity checks of the read headers... */ + if ((loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits > + i_size_read(sb_dqopt(sb)->files[type])) { + quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).", + (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits, + i_size_read(sb_dqopt(sb)->files[type])); + goto out_free; + } + if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) { + quota_error(sb, "Free block number too big (%u >= %u).", + qinfo->dqi_free_blk, qinfo->dqi_blocks); + goto out_free; + } + if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) { + quota_error(sb, "Block with free entry too big (%u >= %u).", + qinfo->dqi_free_entry, qinfo->dqi_blocks); + goto out_free; + } ret = 0; +out_free: + if (ret) { + kfree(info->dqi_priv); + info->dqi_priv = NULL; + } out: up_read(&dqopt->dqio_sem); return ret; @@ -284,6 +308,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot) d->dqb_curspace = cpu_to_le64(m->dqb_curspace); d->dqb_btime = cpu_to_le64(m->dqb_btime); d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); + d->dqb_pad = 0; if (qtree_entry_unused(info, dp)) d->dqb_itime = cpu_to_le64(1); } diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 414695454956..355523f4a4bf 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, if (!pages) goto out_free; - nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages); + nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages); if (nr != lpages) goto out_free_pages; /* leave if some pages were missing */ diff --git a/fs/readdir.c b/fs/readdir.c index de2eceffdee8..07a3b5baa404 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen, if (buf->result) return -EINVAL; + buf->result = verify_dirent_name(name, namlen); + if (buf->result < 0) + return buf->result; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->result = -EOVERFLOW; @@ -417,6 +420,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name, if (buf->result) return -EINVAL; + buf->result = verify_dirent_name(name, namlen); + if (buf->result < 0) + return buf->result; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->result = -EOVERFLOW; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 6419e6dacc39..ac35ddf0dd60 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1553,11 +1553,7 @@ void reiserfs_read_locked_inode(struct inode *inode, * set version 1, version 2 could be used too, because stat data * key is the same in both versions */ - key.version = KEY_FORMAT_3_5; - key.on_disk_key.k_dir_id = dirino; - key.on_disk_key.k_objectid = inode->i_ino; - key.on_disk_key.k_offset = 0; - key.on_disk_key.k_type = 0; + _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3); /* look for the object's stat data */ retval = search_item(inode->i_sb, &key, &path_to_sd); @@ -2165,7 +2161,8 @@ out_end_trans: out_inserted_sd: clear_nlink(inode); th->t_trans_id = 0; /* so the caller can't use this handle later */ - unlock_new_inode(inode); /* OK to do even if we hadn't locked it */ + if (inode->i_state & I_NEW) + unlock_new_inode(inode); iput(inode); return err; } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index bb4973aefbb1..9e64e23014e8 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) "(second one): %h", ih); return 0; } + if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) { + reiserfs_warning(NULL, "reiserfs-5093", + "item entry count seems wrong %h", + ih); + return 0; + } prev_location = ih_location(ih); } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index a6bce5b1fb1d..1b9c7a387dc7 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s, "turned on."); return 0; } + if (qf_names[qtype] != + REISERFS_SB(s)->s_qf_names[qtype]) + kfree(qf_names[qtype]); + qf_names[qtype] = NULL; if (*arg) { /* Some filename specified? */ if (REISERFS_SB(s)->s_qf_names[qtype] && strcmp(REISERFS_SB(s)->s_qf_names[qtype], @@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s, else *mount_options |= 1 << REISERFS_GRPQUOTA; } else { - if (qf_names[qtype] != - REISERFS_SB(s)->s_qf_names[qtype]) - kfree(qf_names[qtype]); - qf_names[qtype] = NULL; if (qtype == USRQUOTA) *mount_options &= ~(1 << REISERFS_USRQUOTA); else diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 28b241cd6987..fe63a7c3e0da 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -674,6 +674,13 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, if (get_inode_sd_version(inode) == STAT_DATA_V1) return -EOPNOTSUPP; + /* + * priv_root needn't be initialized during mount so allow initial + * lookups to succeed. + */ + if (!REISERFS_SB(inode->i_sb)->priv_root) + return 0; + dentry = xattr_lookup(inode, name, XATTR_REPLACE); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h index c764352447ba..81bec2c80b25 100644 --- a/fs/reiserfs/xattr.h +++ b/fs/reiserfs/xattr.h @@ -43,7 +43,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec); static inline int reiserfs_xattrs_initialized(struct super_block *sb) { - return REISERFS_SB(sb)->priv_root != NULL; + return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root; } #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c index 6b2b4362089e..b57b3ffcbc32 100644 --- a/fs/romfs/storage.c +++ b/fs/romfs/storage.c @@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, size_t limit; limit = romfs_maxsize(sb); - if (pos >= limit) + if (pos >= limit || buflen > limit - pos) return -EIO; - if (buflen > limit - pos) - buflen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) diff --git a/fs/select.c b/fs/select.c index 53a0c149f528..e51796063cb6 100644 --- a/fs/select.c +++ b/fs/select.c @@ -1037,10 +1037,9 @@ static long do_restart_poll(struct restart_block *restart_block) ret = do_sys_poll(ufds, nfds, to); - if (ret == -ERESTARTNOHAND) { - restart_block->fn = do_restart_poll; - ret = -ERESTART_RESTARTBLOCK; - } + if (ret == -ERESTARTNOHAND) + ret = set_restart_fn(restart_block, do_restart_poll); + return ret; } @@ -1062,7 +1061,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, struct restart_block *restart_block; restart_block = ¤t->restart_block; - restart_block->fn = do_restart_poll; restart_block->poll.ufds = ufds; restart_block->poll.nfds = nfds; @@ -1073,7 +1071,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, } else restart_block->poll.has_timeout = 0; - ret = -ERESTART_RESTARTBLOCK; + ret = set_restart_fn(restart_block, do_restart_poll); } return ret; } diff --git a/fs/seq_file.c b/fs/seq_file.c index 1600034a929b..c19ecc1f2d50 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -29,6 +29,9 @@ static void seq_set_overflow(struct seq_file *m) static void *seq_buf_alloc(unsigned long size) { + if (unlikely(size > MAX_RW_COUNT)) + return NULL; + return kvmalloc(size, GFP_KERNEL_ACCOUNT); } diff --git a/fs/signalfd.c b/fs/signalfd.c index 44b6845b071c..5b78719be445 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, flags); } @@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, 0); } diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c index ae2c87bb0fbe..723763746238 100644 --- a/fs/squashfs/export.c +++ b/fs/squashfs/export.c @@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) struct squashfs_sb_info *msblk = sb->s_fs_info; int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); - u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); + u64 start; __le64 ino; int err; TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); + if (ino_num == 0 || (ino_num - 1) >= msblk->inodes) + return -EINVAL; + + start = le64_to_cpu(msblk->inode_lookup_table[blk]); + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); if (err < 0) return err; @@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, u64 lookup_table_start, u64 next_table, unsigned int inodes) { unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); + unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes); + int n; __le64 *table; + u64 start, end; TRACE("In read_inode_lookup_table, length %d\n", length); @@ -121,20 +129,41 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, if (inodes == 0) return ERR_PTR(-EINVAL); - /* length bytes should not extend into the next table - this check - * also traps instances where lookup_table_start is incorrectly larger - * than the next table start + /* + * The computed size of the lookup table (length bytes) should exactly + * match the table start and end points */ - if (lookup_table_start + length > next_table) + if (length != (next_table - lookup_table_start)) return ERR_PTR(-EINVAL); table = squashfs_read_table(sb, lookup_table_start, length); + if (IS_ERR(table)) + return table; /* - * table[0] points to the first inode lookup table metadata block, - * this should be less than lookup_table_start + * table0], table[1], ... table[indexes - 1] store the locations + * of the compressed inode lookup blocks. Each entry should be + * less than the next (i.e. table[0] < table[1]), and the difference + * between them should be SQUASHFS_METADATA_SIZE or less. + * table[indexes - 1] should be less than lookup_table_start, and + * again the difference should be SQUASHFS_METADATA_SIZE or less */ - if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { + for (n = 0; n < (indexes - 1); n++) { + start = le64_to_cpu(table[n]); + end = le64_to_cpu(table[n + 1]); + + if (start >= end + || (end - start) > + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + kfree(table); + return ERR_PTR(-EINVAL); + } + } + + start = le64_to_cpu(table[indexes - 1]); + if (start >= lookup_table_start || + (lookup_table_start - start) > + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { kfree(table); return ERR_PTR(-EINVAL); } diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c index 6be5afe7287d..ea5387679723 100644 --- a/fs/squashfs/id.c +++ b/fs/squashfs/id.c @@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, struct squashfs_sb_info *msblk = sb->s_fs_info; int block = SQUASHFS_ID_BLOCK(index); int offset = SQUASHFS_ID_BLOCK_OFFSET(index); - u64 start_block = le64_to_cpu(msblk->id_table[block]); + u64 start_block; __le32 disk_id; int err; + if (index >= msblk->ids) + return -EINVAL; + + start_block = le64_to_cpu(msblk->id_table[block]); + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, sizeof(disk_id)); if (err < 0) @@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, u64 id_table_start, u64 next_table, unsigned short no_ids) { unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); + unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids); + int n; __le64 *table; + u64 start, end; TRACE("In read_id_index_table, length %d\n", length); @@ -67,20 +75,38 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, return ERR_PTR(-EINVAL); /* - * length bytes should not extend into the next table - this check - * also traps instances where id_table_start is incorrectly larger - * than the next table start + * The computed size of the index table (length bytes) should exactly + * match the table start and end points */ - if (id_table_start + length > next_table) + if (length != (next_table - id_table_start)) return ERR_PTR(-EINVAL); table = squashfs_read_table(sb, id_table_start, length); + if (IS_ERR(table)) + return table; /* - * table[0] points to the first id lookup table metadata block, this - * should be less than id_table_start + * table[0], table[1], ... table[indexes - 1] store the locations + * of the compressed id blocks. Each entry should be less than + * the next (i.e. table[0] < table[1]), and the difference between them + * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] + * should be less than id_table_start, and again the difference + * should be SQUASHFS_METADATA_SIZE or less */ - if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { + for (n = 0; n < (indexes - 1); n++) { + start = le64_to_cpu(table[n]); + end = le64_to_cpu(table[n + 1]); + + if (start >= end || (end - start) > + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + kfree(table); + return ERR_PTR(-EINVAL); + } + } + + start = le64_to_cpu(table[indexes - 1]); + if (start >= id_table_start || (id_table_start - start) > + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { kfree(table); return ERR_PTR(-EINVAL); } diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 7187bd1a30ea..236664d69141 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -17,6 +17,7 @@ /* size of metadata (inode and directory) blocks */ #define SQUASHFS_METADATA_SIZE 8192 +#define SQUASHFS_BLOCK_OFFSET 2 /* default size of block device I/O */ #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index 34c21ffb6df3..166e98806265 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h @@ -64,5 +64,6 @@ struct squashfs_sb_info { unsigned int inodes; unsigned int fragments; int xattr_ids; + unsigned int ids; }; #endif diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 0cc4ceec0562..2110323b610b 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->inodes = le32_to_cpu(sblk->inodes); msblk->fragments = le32_to_cpu(sblk->fragments); + msblk->ids = le16_to_cpu(sblk->no_ids); flags = le16_to_cpu(sblk->flags); TRACE("Found valid superblock on %pg\n", sb->s_bdev); @@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) TRACE("Block size %d\n", msblk->block_size); TRACE("Number of inodes %d\n", msblk->inodes); TRACE("Number of fragments %d\n", msblk->fragments); - TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); + TRACE("Number of ids %d\n", msblk->ids); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); TRACE("sblk->fragment_table_start %llx\n", @@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) allocate_id_index_table: /* Allocate and read id index table */ msblk->id_table = squashfs_read_id_index_table(sb, - le64_to_cpu(sblk->id_table_start), next_table, - le16_to_cpu(sblk->no_ids)); + le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); if (IS_ERR(msblk->id_table)) { errorf(fc, "unable to read id index table"); err = PTR_ERR(msblk->id_table); diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h index 184129afd456..d8a270d3ac4c 100644 --- a/fs/squashfs/xattr.h +++ b/fs/squashfs/xattr.h @@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, u64 *xattr_table_start, int *xattr_ids) { + struct squashfs_xattr_id_table *id_table; + + id_table = squashfs_read_table(sb, start, sizeof(*id_table)); + if (IS_ERR(id_table)) + return (__le64 *) id_table; + + *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); + kfree(id_table); + ERROR("Xattrs in filesystem, these will be ignored\n"); - *xattr_table_start = start; return ERR_PTR(-ENOTSUPP); } diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c index d99e08464554..087cab8c78f4 100644 --- a/fs/squashfs/xattr_id.c +++ b/fs/squashfs/xattr_id.c @@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, struct squashfs_sb_info *msblk = sb->s_fs_info; int block = SQUASHFS_XATTR_BLOCK(index); int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); - u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); + u64 start_block; struct squashfs_xattr_id id; int err; + if (index >= msblk->xattr_ids) + return -EINVAL; + + start_block = le64_to_cpu(msblk->xattr_id_table[block]); + err = squashfs_read_metadata(sb, &id, &start_block, &offset, sizeof(id)); if (err < 0) @@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, /* * Read uncompressed xattr id lookup table indexes from disk into memory */ -__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, +__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, u64 *xattr_table_start, int *xattr_ids) { - unsigned int len; + struct squashfs_sb_info *msblk = sb->s_fs_info; + unsigned int len, indexes; struct squashfs_xattr_id_table *id_table; + __le64 *table; + u64 start, end; + int n; - id_table = squashfs_read_table(sb, start, sizeof(*id_table)); + id_table = squashfs_read_table(sb, table_start, sizeof(*id_table)); if (IS_ERR(id_table)) return (__le64 *) id_table; @@ -70,13 +79,54 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, if (*xattr_ids == 0) return ERR_PTR(-EINVAL); - /* xattr_table should be less than start */ - if (*xattr_table_start >= start) + len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); + indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids); + + /* + * The computed size of the index table (len bytes) should exactly + * match the table start and end points + */ + start = table_start + sizeof(*id_table); + end = msblk->bytes_used; + + if (len != (end - start)) return ERR_PTR(-EINVAL); - len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); + table = squashfs_read_table(sb, start, len); + if (IS_ERR(table)) + return table; - TRACE("In read_xattr_index_table, length %d\n", len); + /* table[0], table[1], ... table[indexes - 1] store the locations + * of the compressed xattr id blocks. Each entry should be less than + * the next (i.e. table[0] < table[1]), and the difference between them + * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] + * should be less than table_start, and again the difference + * shouls be SQUASHFS_METADATA_SIZE or less. + * + * Finally xattr_table_start should be less than table[0]. + */ + for (n = 0; n < (indexes - 1); n++) { + start = le64_to_cpu(table[n]); + end = le64_to_cpu(table[n + 1]); - return squashfs_read_table(sb, start + sizeof(*id_table), len); + if (start >= end || (end - start) > + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + kfree(table); + return ERR_PTR(-EINVAL); + } + } + + start = le64_to_cpu(table[indexes - 1]); + if (start >= table_start || (table_start - start) > + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + kfree(table); + return ERR_PTR(-EINVAL); + } + + if (*xattr_table_start >= le64_to_cpu(table[0])) { + kfree(table); + return ERR_PTR(-EINVAL); + } + + return table; } diff --git a/fs/super.c b/fs/super.c index cd352530eca9..877532baf513 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1302,8 +1302,8 @@ int get_tree_bdev(struct fs_context *fc, mutex_lock(&bdev->bd_fsfreeze_mutex); if (bdev->bd_fsfreeze_count > 0) { mutex_unlock(&bdev->bd_fsfreeze_mutex); - blkdev_put(bdev, mode); warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); + blkdev_put(bdev, mode); return -EBUSY; } @@ -1647,36 +1647,11 @@ EXPORT_SYMBOL(__sb_end_write); */ int __sb_start_write(struct super_block *sb, int level, bool wait) { - bool force_trylock = false; - int ret = 1; + if (!wait) + return percpu_down_read_trylock(sb->s_writers.rw_sem + level-1); -#ifdef CONFIG_LOCKDEP - /* - * We want lockdep to tell us about possible deadlocks with freezing - * but it's it bit tricky to properly instrument it. Getting a freeze - * protection works as getting a read lock but there are subtle - * problems. XFS for example gets freeze protection on internal level - * twice in some cases, which is OK only because we already hold a - * freeze protection also on higher level. Due to these cases we have - * to use wait == F (trylock mode) which must not fail. - */ - if (wait) { - int i; - - for (i = 0; i < level - 1; i++) - if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) { - force_trylock = true; - break; - } - } -#endif - if (wait && !force_trylock) - percpu_down_read(sb->s_writers.rw_sem + level-1); - else - ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1); - - WARN_ON(force_trylock && !ret); - return ret; + percpu_down_read(sb->s_writers.rw_sem + level-1); + return 1; } EXPORT_SYMBOL(__sb_start_write); diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 130fc6fbcc03..4f7cf975b27c 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/seq_file.h> +#include <linux/mm.h> #include "sysfs.h" @@ -558,3 +559,57 @@ void sysfs_remove_bin_file(struct kobject *kobj, kernfs_remove_by_name(kobj->sd, attr->attr.name); } EXPORT_SYMBOL_GPL(sysfs_remove_bin_file); + +/** + * sysfs_emit - scnprintf equivalent, aware of PAGE_SIZE buffer. + * @buf: start of PAGE_SIZE buffer. + * @fmt: format + * @...: optional arguments to @format + * + * + * Returns number of characters written to @buf. + */ +int sysfs_emit(char *buf, const char *fmt, ...) +{ + va_list args; + int len; + + if (WARN(!buf || offset_in_page(buf), + "invalid sysfs_emit: buf:%p\n", buf)) + return 0; + + va_start(args, fmt); + len = vscnprintf(buf, PAGE_SIZE, fmt, args); + va_end(args); + + return len; +} +EXPORT_SYMBOL_GPL(sysfs_emit); + +/** + * sysfs_emit_at - scnprintf equivalent, aware of PAGE_SIZE buffer. + * @buf: start of PAGE_SIZE buffer. + * @at: offset in @buf to start write in bytes + * @at must be >= 0 && < PAGE_SIZE + * @fmt: format + * @...: optional arguments to @fmt + * + * + * Returns number of characters written starting at &@buf[@at]. + */ +int sysfs_emit_at(char *buf, int at, const char *fmt, ...) +{ + va_list args; + int len; + + if (WARN(!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE, + "invalid sysfs_emit_at: buf:%p at:%d\n", buf, at)) + return 0; + + va_start(args, fmt); + len = vscnprintf(buf + at, PAGE_SIZE - at, fmt, args); + va_end(args); + + return len; +} +EXPORT_SYMBOL_GPL(sysfs_emit_at); diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c index 8cdbd53d780c..8be17a773196 100644 --- a/fs/ubifs/auth.c +++ b/fs/ubifs/auth.c @@ -79,13 +79,9 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, struct shash_desc *inhash) { struct ubifs_auth_node *auth = node; - u8 *hash; + u8 hash[UBIFS_HASH_ARR_SZ]; int err; - hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS); - if (!hash) - return -ENOMEM; - { SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); @@ -94,21 +90,16 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, err = crypto_shash_final(hash_desc, hash); if (err) - goto out; + return err; } err = ubifs_hash_calc_hmac(c, hash, auth->hmac); if (err) - goto out; + return err; auth->ch.node_type = UBIFS_AUTH_NODE; ubifs_prepare_node(c, auth, ubifs_auth_node_sz(c), 0); - - err = 0; -out: - kfree(hash); - - return err; + return 0; } static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c, @@ -351,7 +342,7 @@ int ubifs_init_authentication(struct ubifs_info *c) ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)", hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ); err = -EINVAL; - goto out_free_hash; + goto out_free_hmac; } err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen); @@ -361,8 +352,10 @@ int ubifs_init_authentication(struct ubifs_info *c) c->authenticated = true; c->log_hash = ubifs_hash_get_desc(c); - if (IS_ERR(c->log_hash)) + if (IS_ERR(c->log_hash)) { + err = PTR_ERR(c->log_hash); goto out_free_hmac; + } err = 0; diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index e4b52783819d..992b74f9c941 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -1123,6 +1123,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) err = PTR_ERR(dent); if (err == -ENOENT) break; + kfree(pdent); return err; } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 6c0e19f7a21f..a5e5e9b9d4e3 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -278,6 +278,15 @@ done: return d_splice_alias(inode, dentry); } +static int ubifs_prepare_create(struct inode *dir, struct dentry *dentry, + struct fscrypt_name *nm) +{ + if (fscrypt_is_nokey_name(dentry)) + return -ENOKEY; + + return fscrypt_setup_filename(dir, &dentry->d_name, 0, nm); +} + static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { @@ -301,7 +310,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode, if (err) return err; - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); + err = ubifs_prepare_create(dir, dentry, &nm); if (err) goto out_budg; @@ -961,7 +970,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (err) return err; - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); + err = ubifs_prepare_create(dir, dentry, &nm); if (err) goto out_budg; @@ -1046,7 +1055,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry, return err; } - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); + err = ubifs_prepare_create(dir, dentry, &nm); if (err) { kfree(dev); goto out_budg; @@ -1130,7 +1139,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, if (err) return err; - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); + err = ubifs_prepare_create(dir, dentry, &nm); if (err) goto out_budg; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index a771273fba7e..8dada89bbe4d 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1375,7 +1375,6 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time, struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_budget_req req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(ui->data_len, 8) }; - int iflags = I_DIRTY_TIME; int err, release; if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) @@ -1393,11 +1392,8 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time, if (flags & S_MTIME) inode->i_mtime = *time; - if (!(inode->i_sb->s_flags & SB_LAZYTIME)) - iflags |= I_DIRTY_SYNC; - release = ui->dirty; - __mark_inode_dirty(inode, iflags); + __mark_inode_dirty(inode, I_DIRTY_SYNC); mutex_unlock(&ui->ui_mutex); if (release) ubifs_release_budget(c, &req); diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 8ceb51478800..eae9cf5a57b0 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -225,7 +225,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, int offs, int quiet, int must_chk_crc) { - int err = -EINVAL, type, node_len; + int err = -EINVAL, type, node_len, dump_node = 1; uint32_t crc, node_crc, magic; const struct ubifs_ch *ch = buf; @@ -278,10 +278,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, out_len: if (!quiet) ubifs_err(c, "bad node length %d", node_len); + if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ) + dump_node = 0; out: if (!quiet) { ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); - ubifs_dump_node(c, buf); + if (dump_node) { + ubifs_dump_node(c, buf); + } else { + int safe_len = min3(node_len, c->leb_size - offs, + (int)UBIFS_MAX_DATA_NODE_SZ); + pr_err("\tprevent out-of-bounds memory access\n"); + pr_err("\ttruncated data node length %d\n", safe_len); + pr_err("\tcorrupted data node:\n"); + print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, + buf, safe_len, 0); + } dump_stack(); } return err; @@ -307,7 +319,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) { uint32_t crc; - ubifs_assert(c, pad >= 0 && !(pad & 7)); + ubifs_assert(c, pad >= 0); if (pad >= UBIFS_PAD_NODE_SZ) { struct ubifs_ch *ch = buf; @@ -752,6 +764,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) * write-buffer. */ memcpy(wbuf->buf + wbuf->used, buf, len); + if (aligned_len > len) { + ubifs_assert(c, aligned_len - len < 8); + ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len); + } if (aligned_len == wbuf->avail) { dbg_io("flush jhead %s wbuf to LEB %d:%d", @@ -844,13 +860,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) } spin_lock(&wbuf->lock); - if (aligned_len) + if (aligned_len) { /* * And now we have what's left and what does not take whole * max. write unit, so write it to the write-buffer and we are * done. */ memcpy(wbuf->buf, buf + written, len); + if (aligned_len > len) { + ubifs_assert(c, aligned_len - len < 8); + ubifs_pad(c, wbuf->buf + len, aligned_len - len); + } + } if (c->leb_size - wbuf->offs >= c->max_write_size) wbuf->size = c->max_write_size; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 826dad0243dc..f78c3e3ef931 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -539,7 +539,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, const struct fscrypt_name *nm, const struct inode *inode, int deletion, int xent) { - int err, dlen, ilen, len, lnum, ino_offs, dent_offs; + int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0; int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir); int last_reference = !!(deletion && inode->i_nlink == 0); struct ubifs_inode *ui = ubifs_inode(inode); @@ -630,6 +630,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, goto out_finish; } ui->del_cmtno = c->cmt_no; + orphan_added = 1; } err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); @@ -702,7 +703,7 @@ out_release: kfree(dent); out_ro: ubifs_ro_mode(c, err); - if (last_reference) + if (orphan_added) ubifs_delete_orphan(c, inode->i_ino); finish_reservation(c); return err; @@ -893,6 +894,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) if (err == -ENOENT) break; + kfree(pxent); goto out_release; } @@ -905,6 +907,8 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) ubifs_err(c, "dead directory entry '%s', error %d", xent->name, err); ubifs_ro_mode(c, err); + kfree(pxent); + kfree(xent); goto out_release; } ubifs_assert(c, ubifs_inode(xino)->xattr); @@ -934,8 +938,6 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) inode->i_ino); release_head(c, BASEHD); - ubifs_add_auth_dirt(c, lnum); - if (last_reference) { err = ubifs_tnc_remove_ino(c, inode->i_ino); if (err) @@ -945,6 +947,8 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) } else { union ubifs_key key; + ubifs_add_auth_dirt(c, lnum); + ino_key_init(c, &key, inode->i_ino); err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash); } @@ -1217,7 +1221,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, void *p; union ubifs_key key; struct ubifs_dent_node *dent, *dent2; - int err, dlen1, dlen2, ilen, lnum, offs, len; + int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0; int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; int last_reference = !!(new_inode && new_inode->i_nlink == 0); int move = (old_dir != new_dir); @@ -1333,6 +1337,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, goto out_finish; } new_ui->del_cmtno = c->cmt_no; + orphan_added = 1; } err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); @@ -1414,7 +1419,7 @@ out_release: release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); - if (last_reference) + if (orphan_added) ubifs_delete_orphan(c, new_inode->i_ino); out_finish: finish_reservation(c); diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index edf43ddd7dce..b0117878b3a0 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -157,7 +157,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) int err = 0; ino_t xattr_inum; union ubifs_key key; - struct ubifs_dent_node *xent; + struct ubifs_dent_node *xent, *pxent = NULL; struct fscrypt_name nm = {0}; struct ubifs_orphan *xattr_orphan; struct ubifs_orphan *orphan; @@ -173,6 +173,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) err = PTR_ERR(xent); if (err == -ENOENT) break; + kfree(pxent); return err; } @@ -181,11 +182,17 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) xattr_inum = le64_to_cpu(xent->inum); xattr_orphan = orphan_add(c, xattr_inum, orphan); - if (IS_ERR(xattr_orphan)) + if (IS_ERR(xattr_orphan)) { + kfree(pxent); + kfree(xent); return PTR_ERR(xattr_orphan); + } + kfree(pxent); + pxent = xent; key_read(c, &xent->key, &key); } + kfree(pxent); return 0; } @@ -688,14 +695,14 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, ino_key_init(c, &key1, inum); err = ubifs_tnc_lookup(c, &key1, ino); - if (err) + if (err && err != -ENOENT) goto out_free; /* * Check whether an inode can really get deleted. * linkat() with O_TMPFILE allows rebirth of an inode. */ - if (ino->nlink == 0) { + if (err == 0 && ino->nlink == 0) { dbg_rcvry("deleting orphaned inode %lu", (unsigned long)inum); diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index b28ac4dfb407..723a4b66a9b9 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino) */ list_for_each_entry_reverse(r, &c->replay_list, list) { ubifs_assert(c, r->sqnum >= rino->sqnum); - if (key_inum(c, &r->key) == key_inum(c, &rino->key)) + if (key_inum(c, &r->key) == key_inum(c, &rino->key) && + key_type(c, &r->key) == UBIFS_INO_KEY) return r->deletion == 0; } @@ -601,18 +602,12 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, struct ubifs_scan_node *snod; int n_nodes = 0; int err; - u8 *hash, *hmac; + u8 hash[UBIFS_HASH_ARR_SZ]; + u8 hmac[UBIFS_HMAC_ARR_SZ]; if (!ubifs_authenticated(c)) return sleb->nodes_cnt; - hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS); - hmac = kmalloc(c->hmac_desc_len, GFP_NOFS); - if (!hash || !hmac) { - err = -ENOMEM; - goto out; - } - list_for_each_entry(snod, &sleb->nodes, list) { n_nodes++; @@ -662,9 +657,6 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, err = 0; } out: - kfree(hash); - kfree(hmac); - return err ? err : n_nodes - n_not_auth; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 7fc2f3f07c16..701f15ba6135 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -820,8 +820,10 @@ static int alloc_wbufs(struct ubifs_info *c) c->jheads[i].wbuf.jhead = i; c->jheads[i].grouped = 1; c->jheads[i].log_hash = ubifs_hash_get_desc(c); - if (IS_ERR(c->jheads[i].log_hash)) + if (IS_ERR(c->jheads[i].log_hash)) { + err = PTR_ERR(c->jheads[i].log_hash); goto out; + } } /* @@ -1092,14 +1094,20 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, break; } case Opt_auth_key: - c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL); - if (!c->auth_key_name) - return -ENOMEM; + if (!is_remount) { + c->auth_key_name = kstrdup(args[0].from, + GFP_KERNEL); + if (!c->auth_key_name) + return -ENOMEM; + } break; case Opt_auth_hash_name: - c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL); - if (!c->auth_hash_name) - return -ENOMEM; + if (!is_remount) { + c->auth_hash_name = kstrdup(args[0].from, + GFP_KERNEL); + if (!c->auth_hash_name) + return -ENOMEM; + } break; case Opt_ignore: break; @@ -1123,6 +1131,18 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, return 0; } +/* + * ubifs_release_options - release mount parameters which have been dumped. + * @c: UBIFS file-system description object + */ +static void ubifs_release_options(struct ubifs_info *c) +{ + kfree(c->auth_key_name); + c->auth_key_name = NULL; + kfree(c->auth_hash_name); + c->auth_hash_name = NULL; +} + /** * destroy_journal - destroy journal data structures. * @c: UBIFS file-system description object @@ -1295,7 +1315,7 @@ static int mount_ubifs(struct ubifs_info *c) err = ubifs_read_superblock(c); if (err) - goto out_free; + goto out_auth; c->probing = 0; @@ -1307,18 +1327,18 @@ static int mount_ubifs(struct ubifs_info *c) ubifs_err(c, "'compressor \"%s\" is not compiled in", ubifs_compr_name(c, c->default_compr)); err = -ENOTSUPP; - goto out_free; + goto out_auth; } err = init_constants_sb(c); if (err) - goto out_free; + goto out_auth; sz = ALIGN(c->max_idx_node_sz, c->min_io_size) * 2; c->cbuf = kmalloc(sz, GFP_NOFS); if (!c->cbuf) { err = -ENOMEM; - goto out_free; + goto out_auth; } err = alloc_wbufs(c); @@ -1593,6 +1613,8 @@ out_wbufs: free_wbufs(c); out_cbuf: kfree(c->cbuf); +out_auth: + ubifs_exit_authentication(c); out_free: kfree(c->write_reserve_buf); kfree(c->bu.buf); @@ -1632,8 +1654,7 @@ static void ubifs_umount(struct ubifs_info *c) ubifs_lpt_free(c, 0); ubifs_exit_authentication(c); - kfree(c->auth_key_name); - kfree(c->auth_hash_name); + ubifs_release_options(c); kfree(c->cbuf); kfree(c->rcvrd_mst_node); kfree(c->mst_node); @@ -2201,6 +2222,7 @@ out_umount: out_unlock: mutex_unlock(&c->umount_mutex); out_close: + ubifs_release_options(c); ubi_close_volume(c->ubi); out: return err; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index e8e7b0e9532e..33742ee3945b 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -2885,6 +2885,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) err = PTR_ERR(xent); if (err == -ENOENT) break; + kfree(pxent); return err; } @@ -2898,6 +2899,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) fname_len(&nm) = le16_to_cpu(xent->nlen); err = ubifs_tnc_remove_nm(c, &key1, &nm); if (err) { + kfree(pxent); kfree(xent); return err; } @@ -2906,6 +2908,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) highest_ino_key(c, &key2, xattr_inum); err = ubifs_tnc_remove_range(c, &key1, &key2); if (err) { + kfree(pxent); kfree(xent); return err; } diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 9aefbb60074f..a0b9b349efe6 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -522,6 +522,7 @@ int ubifs_purge_xattrs(struct inode *host) xent->name, err); ubifs_ro_mode(c, err); kfree(pxent); + kfree(xent); return err; } @@ -531,6 +532,7 @@ int ubifs_purge_xattrs(struct inode *host) err = remove_xattr(c, host, xino, &nm); if (err) { kfree(pxent); + kfree(xent); iput(xino); ubifs_err(c, "cannot remove xattr, error %d", err); return err; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ea80036d7897..507f8f910327 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode) struct udf_inode_info *iinfo = UDF_I(inode); int want_delete = 0; - if (!inode->i_nlink && !is_bad_inode(inode)) { - want_delete = 1; - udf_setsize(inode, 0); - udf_update_inode(inode, IS_SYNC(inode)); + if (!is_bad_inode(inode)) { + if (!inode->i_nlink) { + want_delete = 1; + udf_setsize(inode, 0); + udf_update_inode(inode, IS_SYNC(inode)); + } + if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && + inode->i_size != iinfo->i_lenExtents) { + udf_warn(inode->i_sb, + "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", + inode->i_ino, inode->i_mode, + (unsigned long long)inode->i_size, + (unsigned long long)iinfo->i_lenExtents); + } } truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); clear_inode(inode); - if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && - inode->i_size != iinfo->i_lenExtents) { - udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", - inode->i_ino, inode->i_mode, - (unsigned long long)inode->i_size, - (unsigned long long)iinfo->i_lenExtents); - } kfree(iinfo->i_ext.i_data); iinfo->i_ext.i_data = NULL; udf_clear_extent_cache(inode); @@ -544,11 +547,14 @@ static int udf_do_extend_file(struct inode *inode, udf_write_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); + /* - * We've rewritten the last extent but there may be empty - * indirect extent after it - enter it. + * We've rewritten the last extent. If we are going to add + * more extents, we may need to enter possible following + * empty indirect extent. */ - udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); + if (new_block_bytes || prealloc_len) + udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); } /* Managed to do everything necessary? */ diff --git a/fs/udf/super.c b/fs/udf/super.c index 4baa1ca91e9b..8bb001c7927f 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb) struct buffer_head *bh = NULL; int nsr = 0; struct udf_sb_info *sbi; + loff_t session_offset; sbi = UDF_SB(sb); if (sb->s_blocksize < sizeof(struct volStructDesc)) @@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb) else sectorsize = sb->s_blocksize; - sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits); + session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits; + sector += session_offset; udf_debug("Starting at sector %u (%lu byte sectors)\n", (unsigned int)(sector >> sb->s_blocksize_bits), @@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb) if (nsr > 0) return 1; - else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) == - VSD_FIRST_SECTOR_OFFSET) + else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET) return -1; else return 0; @@ -1352,6 +1353,12 @@ static int udf_load_sparable_map(struct super_block *sb, (int)spm->numSparingTables); return -EIO; } + if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) { + udf_err(sb, "error loading logical volume descriptor: " + "Too big sparing table size (%u)\n", + le32_to_cpu(spm->sizeSparingTable)); + return -EIO; + } for (i = 0; i < spm->numSparingTables; i++) { loc = le32_to_cpu(spm->locSparingTable[i]); @@ -1697,7 +1704,8 @@ static noinline int udf_process_sequence( "Pointers (max %u supported)\n", UDF_MAX_TD_NESTING); brelse(bh); - return -EIO; + ret = -EIO; + goto out; } vdp = (struct volDescPtr *)bh->b_data; @@ -1717,7 +1725,8 @@ static noinline int udf_process_sequence( curr = get_volume_descriptor_record(ident, bh, &data); if (IS_ERR(curr)) { brelse(bh); - return PTR_ERR(curr); + ret = PTR_ERR(curr); + goto out; } /* Descriptor we don't care about? */ if (!curr) @@ -1739,28 +1748,31 @@ static noinline int udf_process_sequence( */ if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) { udf_err(sb, "Primary Volume Descriptor not found!\n"); - return -EAGAIN; + ret = -EAGAIN; + goto out; } ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block); if (ret < 0) - return ret; + goto out; if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) { ret = udf_load_logicalvol(sb, data.vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset); if (ret < 0) - return ret; + goto out; } /* Now handle prevailing Partition Descriptors */ for (i = 0; i < data.num_part_descs; i++) { ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); if (ret < 0) - return ret; + goto out; } - - return 0; + ret = 0; +out: + kfree(data.part_descs_loc); + return ret; } /* diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 1da0be667409..e3b69fb280e8 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -101,7 +101,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct inode *inode; - if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg) + if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg) return ERR_PTR(-ESTALE); inode = ufs_iget(sb, ino); diff --git a/fs/xattr.c b/fs/xattr.c index 90dd78f0eb27..f2854570d411 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -204,10 +204,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, return error; } - +/** + * __vfs_setxattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - xattr name to set + * @value - value to set @name to + * @size - size of @value + * @flags - flags to pass into filesystem operations + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +__vfs_setxattr_locked(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -216,15 +228,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (error) return error; - inode_lock(inode); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); + +int +vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_setxattr_locked(dentry, name, value, size, flags, + &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); @@ -378,8 +415,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) } EXPORT_SYMBOL(__vfs_removexattr); +/** + * __vfs_removexattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - name of xattr to remove + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_removexattr(struct dentry *dentry, const char *name) +__vfs_removexattr_locked(struct dentry *dentry, const char *name, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -388,11 +435,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) if (error) return error; - inode_lock(inode); error = security_inode_removexattr(dentry, name); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_removexattr(dentry, name); if (!error) { @@ -401,12 +451,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) } out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); + +int +vfs_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_removexattr_locked(dentry, name, &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } + return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); - /* * Extended attribute SET operations */ diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 533b04aaf6f6..436f686a9891 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2209,6 +2209,7 @@ xfs_defer_agfl_block( new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); new->xefi_blockcount = 1; new->xefi_oinfo = *oinfo; + new->xefi_skip_discard = false; trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1); @@ -2598,6 +2599,13 @@ xfs_agf_verify( be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp))) return __this_address; + if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks) + return __this_address; + + if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) || + be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length)) + return __this_address; + if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || @@ -2609,6 +2617,10 @@ xfs_agf_verify( be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)) return __this_address; + if (xfs_sb_version_hasrmapbt(&mp->m_sb) && + be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length)) + return __this_address; + /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't @@ -2622,6 +2634,11 @@ xfs_agf_verify( be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) return __this_address; + if (xfs_sb_version_hasreflink(&mp->m_sb) && + be32_to_cpu(agf->agf_refcount_blocks) > + be32_to_cpu(agf->agf_length)) + return __this_address; + if (xfs_sb_version_hasreflink(&mp->m_sb) && (be32_to_cpu(agf->agf_refcount_level) < 1 || be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)) diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index f0089e862216..de33efc9b4f9 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -453,13 +453,15 @@ xfs_attr_copy_value( * special case for dev/uuid inodes, they have fixed size data forks. */ int -xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) +xfs_attr_shortform_bytesfit( + struct xfs_inode *dp, + int bytes) { - int offset; - int minforkoff; /* lower limit on valid forkoff locations */ - int maxforkoff; /* upper limit on valid forkoff locations */ - int dsize; - xfs_mount_t *mp = dp->i_mount; + struct xfs_mount *mp = dp->i_mount; + int64_t dsize; + int minforkoff; + int maxforkoff; + int offset; /* rounded down */ offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3; @@ -525,7 +527,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) * A data fork btree root must have space for at least * MINDBTPTRS key/ptr pairs if the data fork is small or empty. */ - minforkoff = max(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); + minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); minforkoff = roundup(minforkoff, 8) >> 3; /* attr fork btree root can have at least this many key/ptr pairs */ @@ -583,8 +585,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args) ASSERT(ifp->if_flags & XFS_IFINLINE); } xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); - hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; - hdr->count = 0; + hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; + memset(hdr, 0, sizeof(*hdr)); hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -924,7 +926,7 @@ xfs_attr_shortform_verify( char *endp; struct xfs_ifork *ifp; int i; - int size; + int64_t size; ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL); ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK); @@ -946,8 +948,10 @@ xfs_attr_shortform_verify( * struct xfs_attr_sf_entry has a variable length. * Check the fixed-offset parts of the structure are * within the data buffer. + * xfs_attr_sf_entry is defined with a 1-byte variable + * array at the end, so we must subtract that off. */ - if (((char *)sfep + sizeof(*sfep)) >= endp) + if (((char *)sfep + sizeof(*sfep) - 1) >= endp) return __this_address; /* Don't allow names with known bad length. */ @@ -1447,7 +1451,9 @@ xfs_attr3_leaf_add_work( for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { if (ichdr->freemap[i].base == tmp) { ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); - ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); + ichdr->freemap[i].size -= + min_t(uint16_t, ichdr->freemap[i].size, + sizeof(xfs_attr_leaf_entry_t)); } } ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 3f76da11197c..c114d24be619 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -4985,20 +4985,25 @@ xfs_bmap_del_extent_real( flags = XFS_ILOG_CORE; if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { - xfs_fsblock_t bno; xfs_filblks_t len; xfs_extlen_t mod; - bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize, - &mod); - ASSERT(mod == 0); len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, &mod); ASSERT(mod == 0); - error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); - if (error) - goto done; + if (!(bflags & XFS_BMAPI_REMAP)) { + xfs_fsblock_t bno; + + bno = div_u64_rem(del->br_startblock, + mp->m_sb.sb_rextsize, &mod); + ASSERT(mod == 0); + + error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); + if (error) + goto done; + } + do_fx = 0; nblks = len * mp->m_sb.sb_rextsize; qfield = XFS_TRANS_DQ_RTBCOUNT; @@ -5376,16 +5381,17 @@ __xfs_bunmapi( } div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); if (mod) { + xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; + /* * Realtime extent is lined up at the end but not * at the front. We'll get rid of full extents if * we can. */ - mod = mp->m_sb.sb_rextsize - mod; - if (del.br_blockcount > mod) { - del.br_blockcount -= mod; - del.br_startoff += mod; - del.br_startblock += mod; + if (del.br_blockcount > off) { + del.br_blockcount -= off; + del.br_startoff += off; + del.br_startblock += off; } else if (del.br_startoff == start && (del.br_state == XFS_EXT_UNWRITTEN || tp->t_blk_res == 0)) { @@ -5403,6 +5409,7 @@ __xfs_bunmapi( continue; } else if (del.br_state == XFS_EXT_UNWRITTEN) { struct xfs_bmbt_irec prev; + xfs_fileoff_t unwrite_start; /* * This one is already unwritten. @@ -5416,12 +5423,13 @@ __xfs_bunmapi( ASSERT(!isnullstartblock(prev.br_startblock)); ASSERT(del.br_startblock == prev.br_startblock + prev.br_blockcount); - if (prev.br_startoff < start) { - mod = start - prev.br_startoff; - prev.br_blockcount -= mod; - prev.br_startblock += mod; - prev.br_startoff = start; - } + unwrite_start = max3(start, + del.br_startoff - mod, + prev.br_startoff); + mod = unwrite_start - prev.br_startoff; + prev.br_startoff = unwrite_start; + prev.br_startblock += mod; + prev.br_blockcount -= mod; prev.br_state = XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(tp, ip, whichfork, &icur, &cur, @@ -6179,7 +6187,7 @@ xfs_bmap_validate_extent( isrt = XFS_IS_REALTIME_INODE(ip); endfsb = irec->br_startblock + irec->br_blockcount - 1; - if (isrt) { + if (isrt && whichfork == XFS_DATA_FORK) { if (!xfs_verify_rtbno(mp, irec->br_startblock)) return __this_address; if (!xfs_verify_rtbno(mp, endfsb)) diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index e2798c6f3a5f..093716a074fb 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -52,9 +52,9 @@ struct xfs_extent_free_item { xfs_fsblock_t xefi_startblock;/* starting fs block number */ xfs_extlen_t xefi_blockcount;/* number of blocks in extent */ + bool xefi_skip_discard; struct list_head xefi_list; struct xfs_owner_info xefi_oinfo; /* extent owner */ - bool xefi_skip_discard; }; #define XFS_BMAP_MAX_NMAP 4 diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index 01b1722333a9..f54244779492 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -124,8 +124,6 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t ino, xfs_extlen_t tot); -extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp, - xfs_ino_t inum); extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t inum, xfs_extlen_t tot); diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index 705c4f562758..99d5b2ed67f2 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -210,6 +210,7 @@ __xfs_dir3_free_read( if (fa) { xfs_verifier_error(*bpp, -EFSCORRUPTED, fa); xfs_trans_brelse(tp, *bpp); + *bpp = NULL; return -EFSCORRUPTED; } diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 0e112e1d3e6b..f980c3f3d2f6 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -628,7 +628,7 @@ xfs_dir2_sf_verify( int i; int i8count; int offset; - int size; + int64_t size; int error; uint8_t filetype; @@ -947,7 +947,7 @@ xfs_dir2_sf_removename( /* * Check whether the sf dir replace operation need more blocks. */ -bool +static bool xfs_dir2_sf_replace_needblock( struct xfs_inode *dp, xfs_ino_t inum) diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 588d44613094..443cf33f6666 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -679,7 +679,7 @@ xfs_ialloc_ag_alloc( args.minalignslop = igeo->cluster_align - 1; /* Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; @@ -727,7 +727,7 @@ xfs_ialloc_ag_alloc( /* * Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; } diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c index 7bc87408f1a0..52451809c478 100644 --- a/fs/xfs/libxfs/xfs_iext_tree.c +++ b/fs/xfs/libxfs/xfs_iext_tree.c @@ -596,7 +596,7 @@ xfs_iext_realloc_root( struct xfs_ifork *ifp, struct xfs_iext_cursor *cur) { - size_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec); + int64_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec); void *new; /* account for the prev/next pointers */ diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index c643beeb5a24..8fdd0424070e 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -129,7 +129,7 @@ xfs_init_local_fork( struct xfs_inode *ip, int whichfork, const void *data, - int size) + int64_t size) { struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); int mem_size = size, real_size = 0; @@ -467,11 +467,11 @@ xfs_iroot_realloc( void xfs_idata_realloc( struct xfs_inode *ip, - int byte_diff, + int64_t byte_diff, int whichfork) { struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); - int new_size = (int)ifp->if_bytes + byte_diff; + int64_t new_size = ifp->if_bytes + byte_diff; ASSERT(new_size >= 0); ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork)); @@ -552,7 +552,7 @@ xfs_iextents_copy( struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_iext_cursor icur; struct xfs_bmbt_irec rec; - int copied = 0; + int64_t copied = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)); ASSERT(ifp->if_bytes > 0); diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 00c62ce170d0..7b845c052fb4 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h @@ -13,16 +13,16 @@ struct xfs_dinode; * File incore extent information, present for each of data & attr forks. */ struct xfs_ifork { - int if_bytes; /* bytes in if_u1 */ - unsigned int if_seq; /* fork mod counter */ + int64_t if_bytes; /* bytes in if_u1 */ struct xfs_btree_block *if_broot; /* file's incore btree root */ - short if_broot_bytes; /* bytes allocated for root */ - unsigned char if_flags; /* per-fork flags */ + unsigned int if_seq; /* fork mod counter */ int if_height; /* height of the extent tree */ union { void *if_root; /* extent tree root */ char *if_data; /* inline file data */ } if_u1; + short if_broot_bytes; /* bytes allocated for root */ + unsigned char if_flags; /* per-fork flags */ }; /* @@ -93,12 +93,14 @@ int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, struct xfs_inode_log_item *, int); void xfs_idestroy_fork(struct xfs_inode *, int); -void xfs_idata_realloc(struct xfs_inode *, int, int); +void xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff, + int whichfork); void xfs_iroot_realloc(struct xfs_inode *, int, int); int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int); int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *, int); -void xfs_init_local_fork(struct xfs_inode *, int, const void *, int); +void xfs_init_local_fork(struct xfs_inode *ip, int whichfork, + const void *data, int64_t size); xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp); void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur, diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index 38e9414878b3..9d3c67b654ca 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -1379,7 +1379,7 @@ xfs_rmap_convert_shared( * record for our insertion point. This will also give us the record for * start block contiguity tests. */ - error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags, + error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext, &PREV, &i); if (error) goto done; diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index 8ea1efc97b41..42085e70c01a 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c @@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range( struct xfs_mount *mp = tp->t_mountp; xfs_rtblock_t rtstart; xfs_rtblock_t rtend; - xfs_rtblock_t rem; int is_free; int error = 0; @@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range( if (low_rec->ar_startext >= mp->m_sb.sb_rextents || low_rec->ar_startext == high_rec->ar_startext) return 0; - if (high_rec->ar_startext > mp->m_sb.sb_rextents) - high_rec->ar_startext = mp->m_sb.sb_rextents; + high_rec->ar_startext = min(high_rec->ar_startext, + mp->m_sb.sb_rextents - 1); /* Iterate the bitmap, looking for discrepancies. */ rtstart = low_rec->ar_startext; - rem = high_rec->ar_startext - rtstart; - while (rem) { + while (rtstart <= high_rec->ar_startext) { /* Is the first block free? */ error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend, &is_free); @@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range( /* How long does the extent go for? */ error = xfs_rtfind_forw(mp, tp, rtstart, - high_rec->ar_startext - 1, &rtend); + high_rec->ar_startext, &rtend); if (error) break; @@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range( break; } - rem -= rtend - rtstart + 1; rtstart = rtend + 1; } diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index a9ad90926b87..0ba7368b9a5f 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -36,6 +36,7 @@ xfs_trans_ijoin( ASSERT(iip->ili_lock_flags == 0); iip->ili_lock_flags = lock_flags; + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Get a log_item_desc to point at the new item. @@ -91,6 +92,7 @@ xfs_trans_log_inode( ASSERT(ip->i_itemp != NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Don't bother with i_lock for the I_DIRTY_TIME check here, as races @@ -98,9 +100,9 @@ xfs_trans_log_inode( * to log the timestamps, or will clear already cleared fields in the * worst case. */ - if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) { + if (inode->i_state & I_DIRTY_TIME) { spin_lock(&inode->i_lock); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); } diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index d12bbd526e7c..b3584cd2cc16 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c @@ -196,6 +196,24 @@ xfs_calc_inode_chunk_res( return res; } +/* + * Per-extent log reservation for the btree changes involved in freeing or + * allocating a realtime extent. We have to be able to log as many rtbitmap + * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents, + * as well as the realtime summary block. + */ +unsigned int +xfs_rtalloc_log_count( + struct xfs_mount *mp, + unsigned int num_ops) +{ + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + unsigned int rtbmp_bytes; + + rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY; + return (howmany(rtbmp_bytes, blksz) + 1) * num_ops; +} + /* * Various log reservation values. * @@ -218,13 +236,21 @@ xfs_calc_inode_chunk_res( /* * In a write transaction we can allocate a maximum of 2 - * extents. This gives: + * extents. This gives (t1): * the inode getting the new extents: inode size * the inode's bmap btree: max depth * block size * the agfs of the ags from which the extents are allocated: 2 * sector * the superblock free block counter: sector size * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size - * And the bmap_finish transaction can free bmap blocks in a join: + * Or, if we're writing to a realtime file (t2): + * the inode getting the new extents: inode size + * the inode's bmap btree: max depth * block size + * the agfs of the ags from which the extents are allocated: 2 * sector + * the superblock free block counter: sector size + * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 1 block + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + * And the bmap_finish transaction can free bmap blocks in a join (t3): * the agfs of the ags containing the blocks: 2 * sector size * the agfls of the ags containing the blocks: 2 * sector size * the super block free block counter: sector size @@ -234,40 +260,72 @@ STATIC uint xfs_calc_write_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) + + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t2 = xfs_calc_inode_res(mp, 1) + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), - XFS_FSB_TO_B(mp, 1)) + + blksz) + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1)))); + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz); + } else { + t2 = 0; + } + + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* - * In truncating a file we free up to two extents at once. We can modify: + * In truncating a file we free up to two extents at once. We can modify (t1): * the inode being truncated: inode size * the inode's bmap btree: (max depth + 1) * block size - * And the bmap_finish transaction can free the blocks and bmap blocks: + * And the bmap_finish transaction can free the blocks and bmap blocks (t2): * the agf for each of the ags: 4 * sector size * the agfl for each of the ags: 4 * sector size * the super block to reflect the freed blocks: sector size * worst case split in allocation btrees per extent assuming 4 extents: * 4 exts * 2 trees * (2 * max depth - 1) * block size + * Or, if it's a realtime file (t3): + * the agf for each of the ags: 2 * sector size + * the agfl for each of the ags: 2 * sector size + * the super block to reflect the freed blocks: sector size + * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 2 exts * 1 block + * worst case split in allocation btrees per extent assuming 2 extents: + * 2 exts * 2 trees * (2 * max depth - 1) * block size */ STATIC uint xfs_calc_itruncate_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + - xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), - XFS_FSB_TO_B(mp, 1)))); + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); + + t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + } else { + t3 = 0; + } + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h index 88221c7a04cc..7ad3659c5d2a 100644 --- a/fs/xfs/libxfs/xfs_trans_space.h +++ b/fs/xfs/libxfs/xfs_trans_space.h @@ -57,8 +57,8 @@ XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) #define XFS_IALLOC_SPACE_RES(mp) \ (M_IGEO(mp)->ialloc_blks + \ - (xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1 * \ - (M_IGEO(mp)->inobt_maxlevels - 1))) + ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \ + M_IGEO(mp)->inobt_maxlevels)) /* * Space reservation values for various transactions. diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index fa6ea6407992..52892f41eb2d 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -45,9 +45,27 @@ xchk_setup_inode_bmap( */ if (S_ISREG(VFS_I(sc->ip)->i_mode) && sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { + struct address_space *mapping = VFS_I(sc->ip)->i_mapping; + inode_dio_wait(VFS_I(sc->ip)); - error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping); - if (error) + + /* + * Try to flush all incore state to disk before we examine the + * space mappings for the data fork. Leave accumulated errors + * in the mapping for the writer threads to consume. + * + * On ENOSPC or EIO writeback errors, we continue into the + * extent mapping checks because write failures do not + * necessarily imply anything about the correctness of the file + * metadata. The metadata and the file data could be on + * completely separate devices; a media failure might only + * affect a subset of the disk, etc. We can handle delalloc + * extents in the scrubber, so leaving them in memory is fine. + */ + error = filemap_fdatawrite(mapping); + if (!error) + error = filemap_fdatawait_keep_errors(mapping); + if (error && (error != -ENOSPC && error != -EIO)) goto out; } @@ -95,6 +113,8 @@ xchk_bmap_get_rmap( if (info->whichfork == XFS_ATTR_FORK) rflags |= XFS_RMAP_ATTR_FORK; + if (irec->br_state == XFS_EXT_UNWRITTEN) + rflags |= XFS_RMAP_UNWRITTEN; /* * CoW staging extents are owned (on disk) by the refcountbt, so @@ -198,13 +218,13 @@ xchk_bmap_xref_rmap( * which doesn't track unwritten state. */ if (owner != XFS_RMAP_OWN_COW && - irec->br_state == XFS_EXT_UNWRITTEN && - !(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) + !!(irec->br_state == XFS_EXT_UNWRITTEN) != + !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); - if (info->whichfork == XFS_ATTR_FORK && - !(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) + if (!!(info->whichfork == XFS_ATTR_FORK) != + !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c index f52a7b8256f9..debf392e0515 100644 --- a/fs/xfs/scrub/btree.c +++ b/fs/xfs/scrub/btree.c @@ -452,32 +452,41 @@ xchk_btree_check_minrecs( int level, struct xfs_btree_block *block) { - unsigned int numrecs; - int ok_level; - - numrecs = be16_to_cpu(block->bb_numrecs); + struct xfs_btree_cur *cur = bs->cur; + unsigned int root_level = cur->bc_nlevels - 1; + unsigned int numrecs = be16_to_cpu(block->bb_numrecs); /* More records than minrecs means the block is ok. */ - if (numrecs >= bs->cur->bc_ops->get_minrecs(bs->cur, level)) + if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) return; /* - * Certain btree blocks /can/ have fewer than minrecs records. Any - * level greater than or equal to the level of the highest dedicated - * btree block are allowed to violate this constraint. - * - * For a btree rooted in a block, the btree root can have fewer than - * minrecs records. If the btree is rooted in an inode and does not - * store records in the root, the direct children of the root and the - * root itself can have fewer than minrecs records. + * For btrees rooted in the inode, it's possible that the root block + * contents spilled into a regular ondisk block because there wasn't + * enough space in the inode root. The number of records in that + * child block might be less than the standard minrecs, but that's ok + * provided that there's only one direct child of the root. */ - ok_level = bs->cur->bc_nlevels - 1; - if (bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) - ok_level--; - if (level >= ok_level) - return; + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + level == cur->bc_nlevels - 2) { + struct xfs_btree_block *root_block; + struct xfs_buf *root_bp; + int root_maxrecs; - xchk_btree_set_corrupt(bs->sc, bs->cur, level); + root_block = xfs_btree_get_block(cur, root_level, &root_bp); + root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level); + if (be16_to_cpu(root_block->bb_numrecs) != 1 || + numrecs <= root_maxrecs) + xchk_btree_set_corrupt(bs->sc, cur, level); + return; + } + + /* + * Otherwise, only the root level is allowed to have fewer than minrecs + * records or keyptrs. + */ + if (level < root_level) + xchk_btree_set_corrupt(bs->sc, cur, level); } /* diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c index 1e2e11721eb9..20eca2d8e7c7 100644 --- a/fs/xfs/scrub/dir.c +++ b/fs/xfs/scrub/dir.c @@ -152,6 +152,9 @@ xchk_dir_actor( xname.type = XFS_DIR3_FT_UNKNOWN; error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL); + /* ENOENT means the hash lookup failed and the dir is corrupt */ + if (error == -ENOENT) + error = -EFSCORRUPTED; if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset, &error)) goto out; diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c index 6d483ab29e63..1bea029b634a 100644 --- a/fs/xfs/scrub/inode.c +++ b/fs/xfs/scrub/inode.c @@ -121,8 +121,7 @@ xchk_inode_flags( goto bad; /* rt flags require rt device */ - if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) && - !mp->m_rtdev_targp) + if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) goto bad; /* new rt bitmap flag only valid for rbmino */ diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c index 0cab11a5d390..5c6b71b75ca1 100644 --- a/fs/xfs/scrub/refcount.c +++ b/fs/xfs/scrub/refcount.c @@ -170,7 +170,6 @@ xchk_refcountbt_process_rmap_fragments( */ INIT_LIST_HEAD(&worklist); rbno = NULLAGBLOCK; - nr = 1; /* Make sure the fragments actually /are/ in agbno order. */ bno = 0; @@ -184,15 +183,14 @@ xchk_refcountbt_process_rmap_fragments( * Find all the rmaps that start at or before the refc extent, * and put them on the worklist. */ + nr = 0; list_for_each_entry_safe(frag, n, &refchk->fragments, list) { - if (frag->rm.rm_startblock > refchk->bno) - goto done; + if (frag->rm.rm_startblock > refchk->bno || nr > target_nr) + break; bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; if (bno < rbno) rbno = bno; list_move_tail(&frag->list, &worklist); - if (nr == target_nr) - break; nr++; } diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 15c8c5f3f688..720bef577998 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c @@ -167,6 +167,7 @@ xchk_teardown( xfs_irele(sc->ip); sc->ip = NULL; } + sb_end_write(sc->mp->m_super); if (sc->flags & XCHK_REAPING_DISABLED) xchk_start_reaping(sc); if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) { @@ -489,6 +490,14 @@ xfs_scrub_metadata( sc.ops = &meta_scrub_ops[sm->sm_type]; sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type); retry_op: + /* + * If freeze runs concurrently with a scrub, the freeze can be delayed + * indefinitely as we walk the filesystem and iterate over metadata + * buffers. Freeze quiesces the log (which waits for the buffer LRU to + * be emptied) and that won't happen while checking is running. + */ + sb_start_write(mp->m_super); + /* Set up for the operation. */ error = sc.ops->setup(&sc, ip); if (error) diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 4f443703065e..d6d78e127625 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1039,6 +1039,7 @@ out_trans_cancel: goto out_unlock; } +/* Caller must first wait for the completion of any pending DIOs if required. */ int xfs_flush_unmap_range( struct xfs_inode *ip, @@ -1050,9 +1051,6 @@ xfs_flush_unmap_range( xfs_off_t rounding, start, end; int error; - /* wait for the completion of any pending DIOs */ - inode_dio_wait(inode); - rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE); start = round_down(offset, rounding); end = round_up(offset + len, rounding) - 1; @@ -1084,10 +1082,6 @@ xfs_free_file_space( if (len <= 0) /* if nothing being freed */ return 0; - error = xfs_flush_unmap_range(ip, offset, len); - if (error) - return error; - startoffset_fsb = XFS_B_TO_FSB(mp, offset); endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); @@ -1760,7 +1754,7 @@ xfs_swap_extents( if (xfs_inode_has_cow_data(tip)) { error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true); if (error) - return error; + goto out_unlock; } /* diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 0abba171aa89..1264ac63e4e5 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1162,8 +1162,10 @@ xfs_buf_ioend( bp->b_ops->verify_read(bp); } - if (!bp->b_error) + if (!bp->b_error) { + bp->b_flags &= ~XBF_WRITE_FAIL; bp->b_flags |= XBF_DONE; + } if (bp->b_iodone) (*(bp->b_iodone))(bp); @@ -1223,7 +1225,7 @@ xfs_bwrite( bp->b_flags |= XBF_WRITE; bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | - XBF_WRITE_FAIL | XBF_DONE); + XBF_DONE); error = xfs_buf_submit(bp); if (error) @@ -1929,7 +1931,7 @@ xfs_buf_delwri_submit_buffers( * synchronously. Otherwise, drop the buffer from the delwri * queue and submit async. */ - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); + bp->b_flags &= ~_XBF_DELWRI_Q; bp->b_flags |= XBF_WRITE; if (wait_list) { bp->b_flags &= ~XBF_ASYNC; diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index aeb95e7391c1..3cbf248af51f 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -1116,13 +1116,12 @@ xfs_qm_dqflush( dqb = bp->b_addr + dqp->q_bufoffset; ddqp = &dqb->dd_diskdq; - /* - * A simple sanity check in case we got a corrupted dquot. - */ - fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0); + /* sanity check the in-core structure before we flush */ + fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id), + 0); if (fa) { xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", - be32_to_cpu(ddqp->d_id), fa); + be32_to_cpu(dqp->q_core.d_id), fa); xfs_buf_relse(bp); xfs_dqfunlock(dqp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 1ffb179f35d2..26e3384ca39f 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -188,7 +188,7 @@ xfs_file_dio_aio_read( file_accessed(iocb->ki_filp); xfs_ilock(ip, XFS_IOLOCK_SHARED); - ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL); + ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL, is_sync_kiocb(iocb)); xfs_iunlock(ip, XFS_IOLOCK_SHARED); return ret; @@ -547,7 +547,8 @@ xfs_file_dio_aio_write( } trace_xfs_file_direct_write(ip, count, iocb->ki_pos); - ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops); + ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops, + is_sync_kiocb(iocb)); /* * If unaligned, this is the only IO in-flight. If it has not yet @@ -818,6 +819,36 @@ xfs_file_fallocate( if (error) goto out_unlock; + /* + * Must wait for all AIO to complete before we continue as AIO can + * change the file size on completion without holding any locks we + * currently hold. We must do this first because AIO can update both + * the on disk and in memory inode sizes, and the operations that follow + * require the in-memory size to be fully up-to-date. + */ + inode_dio_wait(inode); + + /* + * Now AIO and DIO has drained we flush and (if necessary) invalidate + * the cached range over the first operation we are about to run. + * + * We care about zero and collapse here because they both run a hole + * punch over the range first. Because that can zero data, and the range + * of invalidation for the shift operations is much larger, we still do + * the required flush for collapse in xfs_prepare_shift(). + * + * Insert has the same range requirements as collapse, and we extend the + * file first which can zero data. Hence insert has the same + * flush/invalidate requirements as collapse and so they are both + * handled at the right time by xfs_prepare_shift(). + */ + if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE | + FALLOC_FL_COLLAPSE_RANGE)) { + error = xfs_flush_unmap_range(ip, offset, len); + if (error) + goto out_unlock; + } + if (mode & FALLOC_FL_PUNCH_HOLE) { error = xfs_free_file_space(ip, offset, len); if (error) @@ -1172,6 +1203,14 @@ __xfs_filemap_fault( return ret; } +static inline bool +xfs_is_write_fault( + struct vm_fault *vmf) +{ + return (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); +} + static vm_fault_t xfs_filemap_fault( struct vm_fault *vmf) @@ -1179,7 +1218,7 @@ xfs_filemap_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, PE_SIZE_PTE, IS_DAX(file_inode(vmf->vma->vm_file)) && - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t @@ -1192,7 +1231,7 @@ xfs_filemap_huge_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, pe_size, - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index d082143feb5a..01c0933a4d10 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -26,7 +26,7 @@ #include "xfs_rtalloc.h" /* Convert an xfs_fsmap to an fsmap. */ -void +static void xfs_fsmap_from_internal( struct fsmap *dest, struct xfs_fsmap *src) @@ -154,8 +154,7 @@ xfs_fsmap_owner_from_rmap( /* getfsmap query state */ struct xfs_getfsmap_info { struct xfs_fsmap_head *head; - xfs_fsmap_format_t formatter; /* formatting fn */ - void *format_arg; /* format buffer */ + struct fsmap *fsmap_recs; /* mapping records */ struct xfs_buf *agf_bp; /* AGF, for refcount queries */ xfs_daddr_t next_daddr; /* next daddr we expect */ u64 missing_owner; /* owner of holes */ @@ -223,6 +222,20 @@ xfs_getfsmap_is_shared( return 0; } +static inline void +xfs_getfsmap_format( + struct xfs_mount *mp, + struct xfs_fsmap *xfm, + struct xfs_getfsmap_info *info) +{ + struct fsmap *rec; + + trace_xfs_getfsmap_mapping(mp, xfm); + + rec = &info->fsmap_recs[info->head->fmh_entries++]; + xfs_fsmap_from_internal(rec, xfm); +} + /* * Format a reverse mapping for getfsmap, having translated rm_startblock * into the appropriate daddr units. @@ -255,6 +268,9 @@ xfs_getfsmap_helper( /* Are we just counting mappings? */ if (info->head->fmh_count == 0) { + if (info->head->fmh_entries == UINT_MAX) + return -ECANCELED; + if (rec_daddr > info->next_daddr) info->head->fmh_entries++; @@ -284,10 +300,7 @@ xfs_getfsmap_helper( fmr.fmr_offset = 0; fmr.fmr_length = rec_daddr - info->next_daddr; fmr.fmr_flags = FMR_OF_SPECIAL_OWNER; - error = info->formatter(&fmr, info->format_arg); - if (error) - return error; - info->head->fmh_entries++; + xfs_getfsmap_format(mp, &fmr, info); } if (info->last) @@ -319,11 +332,8 @@ xfs_getfsmap_helper( if (shared) fmr.fmr_flags |= FMR_OF_SHARED; } - error = info->formatter(&fmr, info->format_arg); - if (error) - return error; - info->head->fmh_entries++; + xfs_getfsmap_format(mp, &fmr, info); out: rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount); if (info->next_daddr < rec_daddr) @@ -791,11 +801,11 @@ xfs_getfsmap_check_keys( #endif /* CONFIG_XFS_RT */ /* - * Get filesystem's extents as described in head, and format for - * output. Calls formatter to fill the user's buffer until all - * extents are mapped, until the passed-in head->fmh_count slots have - * been filled, or until the formatter short-circuits the loop, if it - * is tracking filled-in extents on its own. + * Get filesystem's extents as described in head, and format for output. Fills + * in the supplied records array until there are no more reverse mappings to + * return or head.fmh_entries == head.fmh_count. In the second case, this + * function returns -ECANCELED to indicate that more records would have been + * returned. * * Key to Confusion * ---------------- @@ -815,8 +825,7 @@ int xfs_getfsmap( struct xfs_mount *mp, struct xfs_fsmap_head *head, - xfs_fsmap_format_t formatter, - void *arg) + struct fsmap *fsmap_recs) { struct xfs_trans *tp = NULL; struct xfs_fsmap dkeys[2]; /* per-dev keys */ @@ -891,10 +900,17 @@ xfs_getfsmap( info.next_daddr = head->fmh_keys[0].fmr_physical + head->fmh_keys[0].fmr_length; - info.formatter = formatter; - info.format_arg = arg; + info.fsmap_recs = fsmap_recs; info.head = head; + /* + * If fsmap runs concurrently with a scrub, the freeze can be delayed + * indefinitely as we walk the rmapbt and iterate over metadata + * buffers. Freeze quiesces the log (which waits for the buffer LRU to + * be emptied) and that won't happen while we're reading buffers. + */ + sb_start_write(mp->m_super); + /* For each device we support... */ for (i = 0; i < XFS_GETFSMAP_DEVS; i++) { /* Is this device within the range the user asked for? */ @@ -934,6 +950,7 @@ xfs_getfsmap( if (tp) xfs_trans_cancel(tp); + sb_end_write(mp->m_super); head->fmh_oflags = FMH_OF_DEV_T; return error; } diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h index c6c57739b862..a0775788e7b1 100644 --- a/fs/xfs/xfs_fsmap.h +++ b/fs/xfs/xfs_fsmap.h @@ -27,13 +27,9 @@ struct xfs_fsmap_head { struct xfs_fsmap fmh_keys[2]; /* low and high keys */ }; -void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src); void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src); -/* fsmap to userspace formatter - copy to user & advance pointer */ -typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *); - int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head, - xfs_fsmap_format_t formatter, void *arg); + struct fsmap *out_recs); #endif /* __XFS_FSMAP_H__ */ diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 944add5ff8e0..a1135b86e79f 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -907,7 +907,12 @@ xfs_eofblocks_worker( { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_eofblocks_work); + + if (!sb_start_write_trylock(mp->m_super)) + return; xfs_icache_free_eofblocks(mp, NULL); + sb_end_write(mp->m_super); + xfs_queue_eofblocks(mp); } @@ -934,7 +939,12 @@ xfs_cowblocks_worker( { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_cowblocks_work); + + if (!sb_start_write_trylock(mp->m_super)) + return; xfs_icache_free_cowblocks(mp, NULL); + sb_end_write(mp->m_super); + xfs_queue_cowblocks(mp); } @@ -1122,7 +1132,7 @@ restart: goto out_ifunlock; xfs_iunpin_wait(ip); } - if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { + if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); goto reclaim; } @@ -1209,6 +1219,7 @@ reclaim: xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); + ASSERT(xfs_inode_clean(ip)); __xfs_inode_free(ip); return error; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 90bf983a34e8..a3e39d343c14 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1761,10 +1761,31 @@ xfs_inactive_ifree( return error; } + /* + * We do not hold the inode locked across the entire rolling transaction + * here. We only need to hold it for the first transaction that + * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the + * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode + * here breaks the relationship between cluster buffer invalidation and + * stale inode invalidation on cluster buffer item journal commit + * completion, and can result in leaving dirty stale inodes hanging + * around in memory. + * + * We have no need for serialising this inode operation against other + * operations - we freed the inode and hence reallocation is required + * and that will serialise on reallocating the space the deferops need + * to free. Hence we can unlock the inode on the first commit of + * the transaction rather than roll it right through the deferops. This + * avoids relogging the XFS_ISTALE inode. + * + * We check that xfs_ifree() hasn't grown an internal transaction roll + * by asserting that the inode is still locked when it returns. + */ xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, 0); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); error = xfs_ifree(tp, ip); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (error) { /* * If we fail to free the inode, shut down. The cancel @@ -1777,7 +1798,6 @@ xfs_inactive_ifree( xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); } xfs_trans_cancel(tp); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } @@ -1795,7 +1815,6 @@ xfs_inactive_ifree( xfs_notice(mp, "%s: xfs_trans_commit returned error %d", __func__, error); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } @@ -3196,7 +3215,7 @@ xfs_rename( struct xfs_trans *tp; struct xfs_inode *wip = NULL; /* whiteout inode */ struct xfs_inode *inodes[__XFS_SORT_INODES]; - struct xfs_buf *agibp; + int i; int num_inodes = __XFS_SORT_INODES; bool new_parent = (src_dp != target_dp); bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); @@ -3309,6 +3328,30 @@ xfs_rename( } } + /* + * Lock the AGI buffers we need to handle bumping the nlink of the + * whiteout inode off the unlinked list and to handle dropping the + * nlink of the target inode. Per locking order rules, do this in + * increasing AG order and before directory block allocation tries to + * grab AGFs because we grab AGIs before AGFs. + * + * The (vfs) caller must ensure that if src is a directory then + * target_ip is either null or an empty directory. + */ + for (i = 0; i < num_inodes && inodes[i] != NULL; i++) { + if (inodes[i] == wip || + (inodes[i] == target_ip && + (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) { + struct xfs_buf *bp; + xfs_agnumber_t agno; + + agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino); + error = xfs_read_agi(mp, tp, agno, &bp); + if (error) + goto out_trans_cancel; + } + } + /* * Directory entry creation below may acquire the AGF. Remove * the whiteout from the unlinked list first to preserve correct @@ -3362,22 +3405,6 @@ xfs_rename( * In case there is already an entry with the same * name at the destination directory, remove it first. */ - - /* - * Check whether the replace operation will need to allocate - * blocks. This happens when the shortform directory lacks - * space and we have to convert it to a block format directory. - * When more blocks are necessary, we must lock the AGI first - * to preserve locking order (AGI -> AGF). - */ - if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) { - error = xfs_read_agi(mp, tp, - XFS_INO_TO_AGNO(mp, target_ip->i_ino), - &agibp); - if (error) - goto out_trans_cancel; - } - error = xfs_dir_replace(tp, target_dp, target_name, src_ip->i_ino, spaceres); if (error) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 2a1909397cb4..b3021d9b34a5 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -667,6 +667,31 @@ xfs_ioc_space( goto out_unlock; } + /* + * Must wait for all AIO to complete before we continue as AIO can + * change the file size on completion without holding any locks we + * currently hold. We must do this first because AIO can update both + * the on disk and in memory inode sizes, and the operations that follow + * require the in-memory size to be fully up-to-date. + */ + inode_dio_wait(inode); + + /* + * Now that AIO and DIO has drained we can flush and (if necessary) + * invalidate the cached range over the first operation we are about to + * run. We include zero range here because it starts with a hole punch + * over the target range. + */ + switch (cmd) { + case XFS_IOC_ZERO_RANGE: + case XFS_IOC_UNRESVSP: + case XFS_IOC_UNRESVSP64: + error = xfs_flush_unmap_range(ip, bf->l_start, bf->l_len); + if (error) + goto out_unlock; + break; + } + switch (cmd) { case XFS_IOC_ZERO_RANGE: flags |= XFS_PREALLOC_SET; @@ -1831,39 +1856,17 @@ out_free_buf: return error; } -struct getfsmap_info { - struct xfs_mount *mp; - struct fsmap_head __user *data; - unsigned int idx; - __u32 last_flags; -}; - -STATIC int -xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv) -{ - struct getfsmap_info *info = priv; - struct fsmap fm; - - trace_xfs_getfsmap_mapping(info->mp, xfm); - - info->last_flags = xfm->fmr_flags; - xfs_fsmap_from_internal(&fm, xfm); - if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm, - sizeof(struct fsmap))) - return -EFAULT; - - return 0; -} - STATIC int xfs_ioc_getfsmap( struct xfs_inode *ip, struct fsmap_head __user *arg) { - struct getfsmap_info info = { NULL }; struct xfs_fsmap_head xhead = {0}; struct fsmap_head head; - bool aborted = false; + struct fsmap *recs; + unsigned int count; + __u32 last_flags = 0; + bool done = false; int error; if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) @@ -1875,38 +1878,112 @@ xfs_ioc_getfsmap( sizeof(head.fmh_keys[1].fmr_reserved))) return -EINVAL; + /* + * Use an internal memory buffer so that we don't have to copy fsmap + * data to userspace while holding locks. Start by trying to allocate + * up to 128k for the buffer, but fall back to a single page if needed. + */ + count = min_t(unsigned int, head.fmh_count, + 131072 / sizeof(struct fsmap)); + recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL); + if (!recs) { + count = min_t(unsigned int, head.fmh_count, + PAGE_SIZE / sizeof(struct fsmap)); + recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL); + if (!recs) + return -ENOMEM; + } + xhead.fmh_iflags = head.fmh_iflags; - xhead.fmh_count = head.fmh_count; xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]); - info.mp = ip->i_mount; - info.data = arg; - error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info); - if (error == -ECANCELED) { - error = 0; - aborted = true; - } else if (error) - return error; + head.fmh_entries = 0; + do { + struct fsmap __user *user_recs; + struct fsmap *last_rec; - /* If we didn't abort, set the "last" flag in the last fmx */ - if (!aborted && info.idx) { - info.last_flags |= FMR_OF_LAST; - if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags, - &info.last_flags, sizeof(info.last_flags))) - return -EFAULT; + user_recs = &arg->fmh_recs[head.fmh_entries]; + xhead.fmh_entries = 0; + xhead.fmh_count = min_t(unsigned int, count, + head.fmh_count - head.fmh_entries); + + /* Run query, record how many entries we got. */ + error = xfs_getfsmap(ip->i_mount, &xhead, recs); + switch (error) { + case 0: + /* + * There are no more records in the result set. Copy + * whatever we got to userspace and break out. + */ + done = true; + break; + case -ECANCELED: + /* + * The internal memory buffer is full. Copy whatever + * records we got to userspace and go again if we have + * not yet filled the userspace buffer. + */ + error = 0; + break; + default: + goto out_free; + } + head.fmh_entries += xhead.fmh_entries; + head.fmh_oflags = xhead.fmh_oflags; + + /* + * If the caller wanted a record count or there aren't any + * new records to return, we're done. + */ + if (head.fmh_count == 0 || xhead.fmh_entries == 0) + break; + + /* Copy all the records we got out to userspace. */ + if (copy_to_user(user_recs, recs, + xhead.fmh_entries * sizeof(struct fsmap))) { + error = -EFAULT; + goto out_free; + } + + /* Remember the last record flags we copied to userspace. */ + last_rec = &recs[xhead.fmh_entries - 1]; + last_flags = last_rec->fmr_flags; + + /* Set up the low key for the next iteration. */ + xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec); + trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); + } while (!done && head.fmh_entries < head.fmh_count); + + /* + * If there are no more records in the query result set and we're not + * in counting mode, mark the last record returned with the LAST flag. + */ + if (done && head.fmh_count > 0 && head.fmh_entries > 0) { + struct fsmap __user *user_rec; + + last_flags |= FMR_OF_LAST; + user_rec = &arg->fmh_recs[head.fmh_entries - 1]; + + if (copy_to_user(&user_rec->fmr_flags, &last_flags, + sizeof(last_flags))) { + error = -EFAULT; + goto out_free; + } } /* copy back header */ - head.fmh_entries = xhead.fmh_entries; - head.fmh_oflags = xhead.fmh_oflags; - if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) - return -EFAULT; + if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) { + error = -EFAULT; + goto out_free; + } - return 0; +out_free: + kmem_free(recs); + return error; } STATIC int @@ -2401,7 +2478,10 @@ xfs_file_ioctl( if (error) return error; - return xfs_icache_free_eofblocks(mp, &keofb); + sb_start_write(mp->m_super); + error = xfs_icache_free_eofblocks(mp, &keofb); + sb_end_write(mp->m_super); + return error; } default: diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index f780e223b118..239c9548b156 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1002,9 +1002,15 @@ xfs_file_iomap_begin( * I/O, which must be block aligned, we need to report the * newly allocated address. If the data fork has a hole, copy * the COW fork mapping to avoid allocating to the data fork. + * + * Otherwise, ensure that the imap range does not extend past + * the range allocated/found in cmap. */ if (directio || imap.br_startblock == HOLESTARTBLOCK) imap = cmap; + else + xfs_trim_extent(&imap, cmap.br_startoff, + cmap.br_blockcount); end_fsb = imap.br_startoff + imap.br_blockcount; length = XFS_FSB_TO_B(mp, end_fsb) - offset; diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index fe285d123d69..ca8c763902b9 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -839,7 +839,7 @@ xfs_setattr_size( ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); ASSERT(S_ISREG(inode->i_mode)); ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| - ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); + ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0); oldsize = inode->i_size; newsize = iattr->ia_size; @@ -885,6 +885,16 @@ xfs_setattr_size( error = iomap_zero_range(inode, oldsize, newsize - oldsize, &did_zeroing, &xfs_iomap_ops); } else { + /* + * iomap won't detect a dirty page over an unwritten block (or a + * cow block over a hole) and subsequently skips zeroing the + * newly post-EOF portion of the page. Flush the new EOF to + * convert the block before the pagecache truncate. + */ + error = filemap_write_and_wait_range(inode->i_mapping, newsize, + newsize); + if (error) + return error; error = iomap_truncate_page(inode, newsize, &did_zeroing, &xfs_iomap_ops); } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index ba5b6f3b2b88..5a0ce0c2c4bb 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -195,21 +195,26 @@ xfs_initialize_perag( } pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); - if (!pag) + if (!pag) { + error = -ENOMEM; goto out_unwind_new_pags; + } pag->pag_agno = index; pag->pag_mount = mp; spin_lock_init(&pag->pag_ici_lock); mutex_init(&pag->pag_ici_reclaim_lock); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); - if (xfs_buf_hash_init(pag)) + + error = xfs_buf_hash_init(pag); + if (error) goto out_free_pag; init_waitqueue_head(&pag->pagb_wait); spin_lock_init(&pag->pagb_lock); pag->pagb_count = 0; pag->pagb_tree = RB_ROOT; - if (radix_tree_preload(GFP_NOFS)) + error = radix_tree_preload(GFP_NOFS); + if (error) goto out_hash_destroy; spin_lock(&mp->m_perag_lock); diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index a339bd5fa260..f63fe8d924a3 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -134,7 +134,7 @@ xfs_fs_map_blocks( goto out_unlock; error = invalidate_inode_pages2(inode->i_mapping); if (WARN_ON_ONCE(error)) - return error; + goto out_unlock; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length); offset_fsb = XFS_B_TO_FSBT(mp, offset); diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 0f08153b4994..904d8285c226 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1005,6 +1005,7 @@ xfs_reflink_remap_extent( xfs_filblks_t rlen; xfs_filblks_t unmap_len; xfs_off_t newlen; + int64_t qres; int error; unmap_len = irec->br_startoff + irec->br_blockcount - destoff; @@ -1027,13 +1028,19 @@ xfs_reflink_remap_extent( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); - /* If we're not just clearing space, then do we have enough quota? */ - if (real_extent) { - error = xfs_trans_reserve_quota_nblks(tp, ip, - irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS); - if (error) - goto out_cancel; - } + /* + * Reserve quota for this operation. We don't know if the first unmap + * in the dest file will cause a bmap btree split, so we always reserve + * at least enough blocks for that split. If the extent being mapped + * in is written, we need to reserve quota for that too. + */ + qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + if (real_extent) + qres += irec->br_blockcount; + error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0, + XFS_QMOPT_RES_REGBLKS); + if (error) + goto out_cancel; trace_xfs_reflink_remap(ip, irec->br_startoff, irec->br_blockcount, irec->br_startblock); @@ -1053,6 +1060,7 @@ xfs_reflink_remap_extent( uirec.br_startblock = irec->br_startblock + rlen; uirec.br_startoff = irec->br_startoff + rlen; uirec.br_blockcount = unmap_len - rlen; + uirec.br_state = irec->br_state; unmap_len = rlen; /* If this isn't a real mapping, we're done. */ diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 4a48a8c75b4f..6d5ddc4e5135 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -247,6 +247,9 @@ xfs_rtallocate_extent_block( end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1; i <= end; i++) { + /* Make sure we don't scan off the end of the rt volume. */ + maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i; + /* * See if there's a free extent of maxlen starting at i. * If it's not so then next will contain the first non-free. @@ -442,6 +445,14 @@ xfs_rtallocate_extent_near( */ if (bno >= mp->m_sb.sb_rextents) bno = mp->m_sb.sb_rextents - 1; + + /* Make sure we don't run off the end of the rt volume. */ + maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno; + if (maxlen < minlen) { + *rtblock = NULLRTBLOCK; + return 0; + } + /* * Try the exact allocation first. */ @@ -1010,10 +1021,13 @@ xfs_growfs_rt( xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); /* - * Update the bitmap inode's size. + * Update the bitmap inode's size ondisk and incore. We need + * to update the incore size so that inode inactivation won't + * punch what it thinks are "posteof" blocks. */ mp->m_rbmip->i_d.di_size = nsbp->sb_rbmblocks * nsbp->sb_blocksize; + i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_d.di_size); xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); /* * Get the summary inode into the transaction. @@ -1021,9 +1035,12 @@ xfs_growfs_rt( xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); /* - * Update the summary inode's size. + * Update the summary inode's size. We need to update the + * incore size so that inode inactivation won't punch what it + * thinks are "posteof" blocks. */ mp->m_rsumip->i_d.di_size = nmp->m_rsumsize; + i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_d.di_size); xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE); /* * Copy summary data from old to new sizes. diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h index e9f810fc6731..43585850f154 100644 --- a/fs/xfs/xfs_sysfs.h +++ b/fs/xfs/xfs_sysfs.h @@ -32,9 +32,11 @@ xfs_sysfs_init( struct xfs_kobj *parent_kobj, const char *name) { + struct kobject *parent; + + parent = parent_kobj ? &parent_kobj->kobject : NULL; init_completion(&kobj->complete); - return kobject_init_and_add(&kobj->kobject, ktype, - &parent_kobj->kobject, "%s", name); + return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); } static inline void diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index f4795fdb7389..b32a66452d44 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -306,6 +306,11 @@ xfs_trans_alloc( * * Note the zero-length reservation; this transaction MUST be cancelled * without any dirty data. + * + * Callers should obtain freeze protection to avoid two conflicts with fs + * freezing: (1) having active transactions trip the m_active_trans ASSERTs; + * and (2) grabbing buffers at the same time that freeze is trying to drain + * the buffer LRU list. */ int xfs_trans_alloc_empty( diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 6ccfd75d3c24..812108f6cc89 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -529,8 +529,9 @@ xfsaild( { struct xfs_ail *ailp = data; long tout = 0; /* milliseconds */ + unsigned int noreclaim_flag; - current->flags |= PF_MEMALLOC; + noreclaim_flag = memalloc_noreclaim_save(); set_freezable(); while (1) { @@ -601,6 +602,7 @@ xfsaild( tout = xfsaild_push(ailp); } + memalloc_noreclaim_restore(noreclaim_flag); return 0; } diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 16457465833b..904780dd74aa 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -646,7 +646,7 @@ xfs_trans_dqresv( } } if (ninos > 0) { - total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; + total_count = dqp->q_res_icount + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 233a72f169bb..e1de0fe28164 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -59,11 +59,11 @@ struct acpi_exception_info { #define AE_OK (acpi_status) 0x0000 -#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL) -#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML) -#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER) -#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES) -#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL) +#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL) +#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML) +#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER) +#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES) +#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL) /* * Environmental exceptions diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 3f6fddeb7519..4d67a67964fa 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -232,6 +232,7 @@ struct acpi_pnp_type { struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ + int instance_no; /* Instance number of this object */ struct acpi_pnp_type type; /* ID type */ acpi_bus_address bus_address; /* _ADR */ char *unique_id; /* _UID */ @@ -614,7 +615,6 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev); bool acpi_pm_device_can_wakeup(struct device *dev); int acpi_pm_device_sleep_state(struct device *, int *, int); int acpi_pm_set_device_wakeup(struct device *dev, bool enable); -int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable); #else static inline void acpi_pm_wakeup_event(struct device *dev) { @@ -645,10 +645,6 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable) { return -ENODEV; } -static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) -{ - return -ENODEV; -} #endif #ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 4010c42e40bd..00441e24a5b9 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -748,7 +748,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) -ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number)) ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index e45ced27f4c3..d068272f6e56 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -3,7 +3,7 @@ * * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -502,7 +502,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, - ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ + ACPI_MADT_TYPE_RESERVED = 16, /* 16 and greater are reserved */ + ACPI_MADT_TYPE_PHYTIUM_2500 = 128 }; /* diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index e3f1cddb4ac8..517a5231cc1b 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -33,6 +33,9 @@ struct ghes_estatus_node { struct llist_node llnode; struct acpi_hest_generic *generic; struct ghes *ghes; + + int task_work_cpu; + struct callback_head task_work; }; struct ghes_estatus_cache { diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 47805172e73d..683e124ad517 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -297,6 +297,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx } #endif +static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, + bool direct) +{ + if (direct || (is_percpu_thread() && cpu == smp_processor_id())) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + /* in processor_perflib.c */ #ifdef CONFIG_CPU_FREQ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index dd90c9792909..0e7316a86240 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -11,19 +11,19 @@ * See Documentation/atomic_bitops.txt for details. */ -static inline void set_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } -static inline void clear_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } -static inline void change_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 384b5c835ced..c43b5906e0dc 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -3,6 +3,7 @@ #define _ASM_GENERIC_BUG_H #include <linux/compiler.h> +#include <linux/instrumentation.h> #define CUT_HERE "------------[ cut here ]------------\n" diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h index 10cd4ffc6ba2..f933d99c63e0 100644 --- a/include/asm-generic/mcs_spinlock.h +++ b/include/asm-generic/mcs_spinlock.h @@ -4,8 +4,8 @@ /* * Architectures can define their own: * - * arch_mcs_spin_lock_contended(l) - * arch_mcs_spin_unlock_contended(l) + * arch_mcs_spin_wait(l) + * arch_mcs_lock_handoff(l, val) * * See kernel/locking/mcs_spinlock.c. */ diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h index 9439ff037b2d..5698fca3bf56 100644 --- a/include/asm-generic/mmiowb.h +++ b/include/asm-generic/mmiowb.h @@ -27,7 +27,7 @@ #include <asm/smp.h> DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); -#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state) +#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state) #else #define __mmiowb_state() arch_mmiowb_state() #endif /* arch_mmiowb_state */ @@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); static inline void mmiowb_set_pending(void) { struct mmiowb_state *ms = __mmiowb_state(); - ms->mmiowb_pending = ms->nesting_count; + + if (likely(ms->nesting_count)) + ms->mmiowb_pending = ms->nesting_count; } static inline void mmiowb_spin_lock(void) diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 18d8e2d8210f..53759d2b9c26 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -163,7 +163,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, return nr_bank; } -void hyperv_report_panic(struct pt_regs *regs, long err); +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); void hyperv_cleanup(void); diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 818691846c90..15f77361eb13 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1159,8 +1159,17 @@ static inline bool arch_has_pfn_modify_check(void) #endif /* !__ASSEMBLY__ */ -#ifndef io_remap_pfn_range -#define io_remap_pfn_range remap_pfn_range +#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) +#ifdef CONFIG_PHYS_ADDR_T_64BIT +/* + * ZSMALLOC needs to know the highest PFN on 32-bit architectures + * with physical address space extension, but falls back to + * BITS_PER_LONG otherwise. + */ +#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition +#else +#define MAX_POSSIBLE_PHYSMEM_BITS 32 +#endif #endif #ifndef has_transparent_hugepage @@ -1171,6 +1180,16 @@ static inline bool arch_has_pfn_modify_check(void) #endif #endif +#ifndef p4d_offset_lockless +#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) +#endif +#ifndef pud_offset_lockless +#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) +#endif +#ifndef pmd_offset_lockless +#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) +#endif + /* * On some architectures it depends on the mm if the p4d/pud or pmd * layer of the page table hierarchy is folded or not. diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d1779d442aa5..66397ed10acb 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -53,6 +53,9 @@ extern char __ctors_start[], __ctors_end[]; /* Start and end of .opd section - used for function descriptors. */ extern char __start_opd[], __end_opd[]; +/* Start and end of instrumentation protected text section */ +extern char __noinstr_text_start[], __noinstr_text_end[]; + extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 238873739550..5aa8705df87e 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -48,7 +48,7 @@ #ifdef CONFIG_NEED_MULTIPLE_NODES #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else - #define cpumask_of_node(node) ((void)node, cpu_online_mask) + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #endif #ifndef pcibus_to_node diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a9c4e4721434..4b52a73b5910 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -306,7 +306,8 @@ #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data..page_aligned) + *(.data..page_aligned) \ + . = ALIGN(page_align); #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ @@ -339,6 +340,7 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ + . = ALIGN(8); \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ JUMP_TABLE_DATA \ @@ -394,7 +396,7 @@ } \ \ /* Built-in firmware blobs */ \ - .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ __start_builtin_fw = .; \ KEEP(*(.builtin_fw)) \ __end_builtin_fw = .; \ @@ -496,16 +498,27 @@ __start___modver = .; \ KEEP(*(__modver)) \ __stop___modver = .; \ - . = ALIGN((align)); \ - __end_rodata = .; \ } \ - . = ALIGN((align)); + \ + BTF \ + \ + . = ALIGN((align)); \ + __end_rodata = .; /* RODATA & RO_DATA provided for backward compatibility. * All archs are supposed to use RO_DATA() */ #define RODATA RO_DATA_SECTION(4096) #define RO_DATA(align) RO_DATA_SECTION(align) +/* + * Non-instrumentable text section + */ +#define NOINSTR_TEXT \ + ALIGN_FUNCTION(); \ + __noinstr_text_start = .; \ + *(.noinstr.text) \ + __noinstr_text_end = .; + /* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map @@ -516,7 +529,11 @@ */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ + *(.text.hot .text.hot.*) \ + *(TEXT_MAIN .text.fixup) \ + *(.text.unlikely .text.unlikely.*) \ + *(.text.unknown .text.unknown.*) \ + NOINSTR_TEXT \ *(.text..refcount) \ *(.ref.text) \ MEM_KEEP(init.text*) \ @@ -588,6 +605,24 @@ __stop___ex_table = .; \ } +/* + * .BTF + */ +#ifdef CONFIG_DEBUG_INFO_BTF +#define BTF \ + .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ + __start_BTF = .; \ + KEEP(*(.BTF)) \ + __stop_BTF = .; \ + } \ + . = ALIGN(4); \ + .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ + *(.BTF_ids) \ + } +#else +#define BTF +#endif + /* * Init task */ @@ -679,7 +714,9 @@ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ BSS_FIRST_SECTIONS \ + . = ALIGN(PAGE_SIZE); \ *(.bss..page_aligned) \ + . = ALIGN(PAGE_SIZE); \ *(.dynbss) \ *(BSS_MAIN) \ *(COMMON) \ @@ -723,8 +760,13 @@ /* DWARF 4 */ \ .debug_types 0 : { *(.debug_types) } \ /* DWARF 5 */ \ + .debug_addr 0 : { *(.debug_addr) } \ + .debug_line_str 0 : { *(.debug_line_str) } \ + .debug_loclists 0 : { *(.debug_loclists) } \ .debug_macro 0 : { *(.debug_macro) } \ - .debug_addr 0 : { *(.debug_addr) } + .debug_names 0 : { *(.debug_names) } \ + .debug_rnglists 0 : { *(.debug_rnglists) } \ + .debug_str_offsets 0 : { *(.debug_str_offsets) } /* Stabs debugging sections. */ #define STABS_DEBUG \ diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index d873f999b334..3a801a7d3a0e 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -147,6 +147,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) * crypto_free_acomp() -- free ACOMPRESS tfm handle * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_acomp(struct crypto_acomp *tfm) { diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 3c245b1859e7..3b870b4e8275 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -179,6 +179,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) /** * crypto_free_aead() - zeroize and free aead handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_aead(struct crypto_aead *tfm) { diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 6924b091adec..8913b42fcb34 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm( * crypto_free_akcipher() - free AKCIPHER tfm handle * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) { diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h index c71f6ef47f0f..4c8d0c72f78d 100644 --- a/include/crypto/cast6.h +++ b/include/crypto/cast6.h @@ -19,7 +19,7 @@ int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen, u32 *flags); int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); -void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); -void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); +void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src); #endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h index d52b95b75ae4..e993c6beec07 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -141,7 +141,7 @@ struct ahash_alg { struct shash_desc { struct crypto_shash *tfm; - void *__ctx[] CRYPTO_MINALIGN_ATTR; + void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); }; #define HASH_MAX_DIGESTSIZE 64 @@ -154,9 +154,9 @@ struct shash_desc { #define HASH_MAX_STATESIZE 512 -#define SHASH_DESC_ON_STACK(shash, ctx) \ - char __##shash##_desc[sizeof(struct shash_desc) + \ - HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \ +#define SHASH_DESC_ON_STACK(shash, ctx) \ + char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ + __aligned(__alignof__(struct shash_desc)); \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc /** @@ -260,6 +260,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) /** * crypto_free_ahash() - zeroize and free the ahash handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_ahash(struct crypto_ahash *tfm) { @@ -703,6 +705,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) /** * crypto_free_shash() - zeroize and free the message digest handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_shash(struct crypto_shash *tfm) { diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 24cfa96f98ea..c1a8d4a41bb1 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -29,8 +29,8 @@ struct alg_sock { struct sock *parent; - unsigned int refcnt; - unsigned int nokey_refcnt; + atomic_t refcnt; + atomic_t nokey_refcnt; const struct af_alg_type *type; void *private; @@ -135,6 +135,7 @@ struct af_alg_async_req { * SG? * @enc: Cryptographic operation to be performed when * recvmsg is invoked. + * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. */ struct af_alg_ctx { @@ -151,6 +152,7 @@ struct af_alg_ctx { bool more; bool merge; bool enc; + bool init; unsigned int len; }; @@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset); void af_alg_wmem_wakeup(struct sock *sk); -int af_alg_wait_for_data(struct sock *sk, unsigned flags); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index cd9a9b500624..19a2eadbef61 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) * crypto_free_kpp() - free KPP tfm handle * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_kpp(struct crypto_kpp *tfm) { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 8b4b844b4eef..17bb3673d3c1 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) /** * crypto_free_rng() - zeroize and free RNG handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_rng(struct crypto_rng *tfm) { diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h index 7dd780c5d058..75c7eaa20853 100644 --- a/include/crypto/serpent.h +++ b/include/crypto/serpent.h @@ -22,7 +22,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, unsigned int keylen); int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); -void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); -void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); +void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src); #endif diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index aada87916918..0bce6005d325 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -203,6 +203,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm( /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) { diff --git a/include/crypto/xts.h b/include/crypto/xts.h index 75fd96ff976b..15ae7fdc0478 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h @@ -8,8 +8,6 @@ #define XTS_BLOCK_SIZE 16 -#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x)) - static inline int xts_check_key(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h index 7aa2f93da49c..b0dcc07334a1 100644 --- a/include/drm/bridge/analogix_dp.h +++ b/include/drm/bridge/analogix_dp.h @@ -42,9 +42,10 @@ int analogix_dp_resume(struct analogix_dp_device *dp); int analogix_dp_suspend(struct analogix_dp_device *dp); struct analogix_dp_device * -analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, - struct analogix_dp_plat_data *plat_data); +analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data); +int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev); void analogix_dp_unbind(struct analogix_dp_device *dp); +void analogix_dp_remove(struct analogix_dp_device *dp); int analogix_dp_start_crc(struct drm_connector *connector); int analogix_dp_stop_crc(struct drm_connector *connector); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 3bcbe30339f0..198b9d060008 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -865,6 +865,18 @@ struct drm_mode_config { */ bool prefer_shadow_fbdev; + /** + * @fbdev_use_iomem: + * + * Set to true if framebuffer reside in iomem. + * When set to true memcpy_toio() is used when copying the framebuffer in + * drm_fb_helper.drm_fb_helper_dirty_blit_real(). + * + * FIXME: This should be replaced with a per-mapping is_iomem + * flag (like ttm does), and then used everywhere in fbdev code. + */ + bool fbdev_use_iomem; + /** * @quirk_addfb_prefer_xbgr_30bpp: * diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 71d81923e6b0..abfefaaf897a 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -5,6 +5,7 @@ #define _DRM_INTEL_GTT_H #include <linux/agp_backend.h> +#include <linux/intel-iommu.h> #include <linux/kernel.h> void intel_gtt_get(u64 *gtt_total, @@ -33,8 +34,4 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); /* flag for GFDT type */ #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) -#ifdef CONFIG_INTEL_IOMMU -extern int intel_iommu_gfx_mapped; -#endif - #endif diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h index f6a7ba4dccd4..3fee04f81439 100644 --- a/include/keys/big_key-type.h +++ b/include/keys/big_key-type.h @@ -17,6 +17,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); extern void big_key_revoke(struct key *key); extern void big_key_destroy(struct key *key); extern void big_key_describe(const struct key *big_key, struct seq_file *m); -extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); +extern long big_key_read(const struct key *key, char *buffer, size_t buflen); #endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index d5e73266a81a..be61fcddc02a 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -41,8 +41,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); extern void user_revoke(struct key *key); extern void user_destroy(struct key *key); extern void user_describe(const struct key *user, struct seq_file *m); -extern long user_read(const struct key *key, - char __user *buffer, size_t buflen); +extern long user_read(const struct key *key, char *buffer, size_t buflen); static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ce29a014e591..ed68f45c09f7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); void __acpi_unmap_table(void __iomem *map, unsigned long size); int early_acpi_boot_init(void); int acpi_boot_init (void); +void acpi_boot_table_prepare (void); void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); +int acpi_locate_initial_tables (void); +void acpi_reserve_initial_tables (void); +void acpi_table_init_complete (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init acpi_table_parse_entries(char *id, unsigned long table_size, @@ -279,6 +283,21 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) /* Validate the processor object's proc_id */ bool acpi_duplicate_processor_id(int proc_id); +/* Processor _CTS control */ +struct acpi_processor_power; + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +bool acpi_processor_claim_cst_control(void); +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info); +#else +static inline bool acpi_processor_claim_cst_control(void) { return false; } +static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + return -ENODEV; +} +#endif #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ @@ -759,9 +778,12 @@ static inline int acpi_boot_init(void) return 0; } +static inline void acpi_boot_table_prepare(void) +{ +} + static inline void acpi_boot_table_init(void) { - return; } static inline int acpi_mps_check(void) @@ -837,6 +859,13 @@ static inline int acpi_device_modalias(struct device *dev, return -ENODEV; } +static inline struct platform_device * +acpi_create_platform_device(struct acpi_device *adev, + struct property_entry *properties) +{ + return NULL; +} + static inline bool acpi_dma_supported(struct acpi_device *adev) { return false; diff --git a/include/linux/aer.h b/include/linux/aer.h index 514bffa11dbb..fa19e01f418a 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -46,6 +46,8 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); +void pci_save_aer_state(struct pci_dev *dev); +void pci_restore_aer_state(struct pci_dev *dev); #else static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) { @@ -63,6 +65,8 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) { return -EINVAL; } +static inline void pci_save_aer_state(struct pci_dev *dev) {} +static inline void pci_restore_aer_state(struct pci_dev *dev) {} #endif void cper_print_aer(struct pci_dev *dev, int aer_severity, diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 080012a6f025..157e4a6a83f6 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -76,6 +76,8 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 + #ifndef __ASSEMBLY__ #include <linux/linkage.h> diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 4fc87dee005a..6d13683884d0 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -13,6 +13,7 @@ #include <linux/workqueue.h> #include <linux/kref.h> #include <linux/refcount.h> +#include <linux/kabi.h> struct page; struct device; @@ -183,6 +184,10 @@ struct bdi_writeback { struct rcu_head rcu; }; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; struct backing_dev_info { @@ -220,6 +225,7 @@ struct backing_dev_info { wait_queue_head_t wb_waitq; struct device *dev; + char dev_name[64]; struct device *owner; struct timer_list laptop_mode_wb_timer; @@ -227,6 +233,11 @@ struct backing_dev_info { #ifdef CONFIG_DEBUG_FS struct dentry *debug_dir; #endif + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; enum { diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f88197c1ffc2..c9ad5c3b7b4b 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) (1 << WB_async_congested)); } -extern const char *bdi_unknown_name; - -static inline const char *bdi_dev_name(struct backing_dev_info *bdi) -{ - if (!bdi || !bdi->dev) - return bdi_unknown_name; - return dev_name(bdi->dev); -} +const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index b40fc633f3be..a345d9fed3d8 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -44,7 +44,13 @@ struct linux_binprm { * exec has happened. Used to sanitize execution environment * and to set AT_SECURE auxv for glibc. */ - secureexec:1; + secureexec:1, + /* + * Set by flush_old_exec, when exec_mmap has been called. + * This is past the point of no return, when the + * exec_update_mutex has been taken. + */ + called_exec_mmap:1; #ifdef __alpha__ unsigned int taso:1; #endif diff --git a/include/linux/bio.h b/include/linux/bio.h index 853d92ceee64..95d5b2677deb 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -8,6 +8,7 @@ #include <linux/highmem.h> #include <linux/mempool.h> #include <linux/ioprio.h> +#include <linux/kabi.h> #ifdef CONFIG_BLOCK /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ @@ -319,6 +320,10 @@ struct bio_integrity_payload { struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; + + KABI_RESERVE(1); + KABI_RESERVE(2); + struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ }; @@ -718,6 +723,12 @@ struct bio_set { spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + struct workqueue_struct *rescue_workqueue; }; diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 4bbb5f1c8b5b..4c0224ff0a14 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -64,7 +64,7 @@ */ #define FIELD_FIT(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index c94a9ff9f082..4f0e62cbf2ff 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -57,7 +57,7 @@ static inline int get_bitmask_order(unsigned int count) static __always_inline unsigned long hweight_long(unsigned long w) { - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); } /** diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index bca7b04eb1ab..3a15a99f6e56 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -22,6 +22,7 @@ #include <linux/atomic.h> #include <linux/kthread.h> #include <linux/fs.h> +#include <linux/kabi.h> /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) @@ -75,6 +76,11 @@ struct blkcg { unsigned int dkstats_on; struct list_head dkstats_list; struct blkcg_dkstats *dkstats_hint; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* @@ -194,6 +200,9 @@ struct blkcg_policy { blkcg_pol_free_pd_fn *pd_free_fn; blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; blkcg_pol_stat_pd_fn *pd_stat_fn; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; extern struct blkcg blkcg_root; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0bf056de5cc3..295c9c040ec3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -5,6 +5,7 @@ #include <linux/blkdev.h> #include <linux/sbitmap.h> #include <linux/srcu.h> +#include <linux/kabi.h> struct blk_mq_tags; struct blk_flush_queue; @@ -72,6 +73,11 @@ struct blk_mq_hw_ctx { struct list_head hctx_list; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + /* Must be the last member - see also blk_mq_hw_ctx_size(). */ struct srcu_struct srcu[0]; }; @@ -113,11 +119,18 @@ struct blk_mq_tag_set { struct mutex tag_list_lock; struct list_head tag_list; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; struct blk_mq_queue_data { struct request *rq; bool last; + + KABI_RESERVE(1); }; typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, @@ -221,6 +234,10 @@ struct blk_mq_ops { */ void (*show_rq)(struct seq_file *m, struct request *rq); #endif + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; enum { diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index d688b96d1d63..5d1dc8841d18 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/bvec.h> #include <linux/ktime.h> +#include <linux/kabi.h> struct bio_set; struct bio; @@ -193,6 +194,9 @@ struct bio { struct bio_set *bi_pool; + KABI_RESERVE(1); + KABI_RESERVE(2); + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b1c591a53c47..0f2c2241269b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -27,6 +27,7 @@ #include <linux/percpu-refcount.h> #include <linux/scatterlist.h> #include <linux/blkzoned.h> +#include <linux/kabi.h> struct module; struct scsi_ioctl_command; @@ -348,6 +349,10 @@ struct queue_limits { unsigned char discard_misaligned; unsigned char raid_partial_stripes_expensive; enum blk_zoned_model zoned; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; #ifdef CONFIG_BLK_DEV_ZONED @@ -593,6 +598,7 @@ struct request_queue { u64 write_hints[BLK_MAX_WRITE_HINTS]; }; +/* Keep blk_queue_flag_name[] in sync with the definitions below */ #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ #define QUEUE_FLAG_DYING 1 /* queue being torn down */ #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ @@ -1534,6 +1540,9 @@ struct blk_integrity_profile { integrity_prepare_fn *prepare_fn; integrity_complete_fn *complete_fn; const char *name; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); @@ -1713,6 +1722,9 @@ struct block_device_operations { struct blk_zone *zones, unsigned int *nr_zones); struct module *owner; const struct pr_ops *pr_ops; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, @@ -1774,6 +1786,9 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq) } #endif /* CONFIG_BLK_DEV_ZONED */ +int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, + loff_t lend); + #else /* CONFIG_BLOCK */ struct block_device; @@ -1783,6 +1798,12 @@ struct block_device; */ #define buffer_heads_over_limit 0 +static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode, + loff_t lstart, loff_t lend) +{ + return 0; +} + static inline long nr_blockdev_pages(void) { return 0; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 169fd25f6bc2..2b2b1b15a043 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -51,9 +51,16 @@ struct bpf_cgroup_storage { struct rcu_head rcu; }; +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; + struct bpf_cgroup_link *link; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; @@ -84,18 +91,23 @@ struct cgroup_bpf { int cgroup_bpf_inherit(struct cgroup *cgrp); void cgroup_bpf_offline(struct cgroup *cgrp); -int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, +int __cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type, u32 flags); int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type); int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ -int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type, u32 flags); +int cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type, + u32 flags); int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type, u32 flags); + enum bpf_attach_type type); int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -330,6 +342,7 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog); int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); #else @@ -352,6 +365,12 @@ static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, return -EINVAL; } +static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -378,6 +397,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, } #define cgroup_bpf_enabled (0) +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h new file mode 100644 index 000000000000..cb1d849c5d4f --- /dev/null +++ b/include/linux/bpf-netns.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BPF_NETNS_H +#define _BPF_NETNS_H + +#include <linux/mutex.h> +#include <uapi/linux/bpf.h> + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP, + MAX_NETNS_BPF_ATTACH_TYPE +}; + +static inline enum netns_bpf_attach_type +to_netns_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + case BPF_FLOW_DISSECTOR: + return NETNS_BPF_FLOW_DISSECTOR; + case BPF_SK_LOOKUP: + return NETNS_BPF_SK_LOOKUP; + default: + return NETNS_BPF_INVALID; + } +} + +/* Protects updates to netns_bpf */ +extern struct mutex netns_bpf_mutex; + +union bpf_attr; +struct bpf_prog; + +#ifdef CONFIG_NET +int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); +int netns_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog); +int netns_bpf_prog_detach(const union bpf_attr *attr); +int netns_bpf_link_create(const union bpf_attr *attr, + struct bpf_prog *prog); +#else +static inline int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_prog_detach(const union bpf_attr *attr) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_link_create(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif + +#endif /* _BPF_NETNS_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3bf3835d0e86..93300d71a894 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -12,20 +12,44 @@ #include <linux/err.h> #include <linux/rbtree_latch.h> #include <linux/numa.h> +#include <linux/mm_types.h> #include <linux/wait.h> #include <linux/u64_stats_sync.h> +#include <linux/refcount.h> +#include <linux/mutex.h> +#include <linux/module.h> +#include <linux/capability.h> +#include <linux/poll.h> struct bpf_verifier_env; +struct bpf_verifier_log; struct perf_event; struct bpf_prog; +struct bpf_prog_aux; struct bpf_map; struct sock; struct seq_file; struct btf; struct btf_type; +struct exception_table_entry; +struct seq_operations; +struct bpf_iter_aux_info; +struct bpf_local_storage; +struct bpf_local_storage_map; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; +struct bpf_func_state; + +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, + struct bpf_iter_aux_info *aux); +typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { @@ -50,7 +74,7 @@ struct bpf_map_ops { void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, int fd); void (*map_fd_put_ptr)(void *ptr); - u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); + int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); u32 (*map_fd_sys_lookup_elem)(void *ptr); void (*map_seq_show_elem)(struct bpf_map *map, void *key, struct seq_file *m); @@ -59,11 +83,39 @@ struct bpf_map_ops { const struct btf_type *key_type, const struct btf_type *value_type); + /* Prog poke tracking helpers. */ + int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); + void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); + void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, + struct bpf_prog *new); + /* Direct value access helpers. */ int (*map_direct_value_addr)(const struct bpf_map *map, u64 *imm, u32 off); int (*map_direct_value_meta)(const struct bpf_map *map, u64 imm, u32 *off); + int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); + __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, + struct poll_table_struct *pts); + + /* Functions called by bpf_local_storage maps */ + int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, + void *owner, u32 size); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, + void *owner, u32 size); + struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); + + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee); + int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, + void *callback_ctx, u64 flags); + + /* BTF name and id of struct allocated by map_alloc */ + const char * const map_btf_name; + int *map_btf_id; + /* bpf_iter info used to open a seq_file */ + const struct bpf_iter_seq_info *iter_seq_info; }; struct bpf_map_memory { @@ -86,23 +138,27 @@ struct bpf_map { u32 max_entries; u32 map_flags; int spin_lock_off; /* >=0 valid offset, <0 error */ + int timer_off; /* >=0 valid offset, <0 error */ u32 id; int numa_node; u32 btf_key_type_id; u32 btf_value_type_id; struct btf *btf; struct bpf_map_memory memory; - bool unpriv_array; - bool frozen; /* write-once */ - /* 48 bytes hole */ + char name[BPF_OBJ_NAME_LEN]; + u32 btf_vmlinux_value_type_id; + bool bypass_spec_v1; + bool frozen; /* write-once; write-protected by freeze_mutex */ + /* 22 bytes hole */ /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. */ - atomic_t refcnt ____cacheline_aligned; - atomic_t usercnt; + atomic64_t refcnt ____cacheline_aligned; + atomic64_t usercnt; struct work_struct work; - char name[BPF_OBJ_NAME_LEN]; + struct mutex freeze_mutex; + u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -110,30 +166,53 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map) return map->spin_lock_off >= 0; } -static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) +static inline bool map_value_has_timer(const struct bpf_map *map) { - if (likely(!map_value_has_spin_lock(map))) - return; - *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = - (struct bpf_spin_lock){}; + return map->timer_off >= 0; } -/* copy everything but bpf_spin_lock */ +static inline void check_and_init_map_value(struct bpf_map *map, void *dst) +{ + if (unlikely(map_value_has_spin_lock(map))) + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = + (struct bpf_spin_lock){}; + if (unlikely(map_value_has_timer(map))) + *(struct bpf_timer *)(dst + map->timer_off) = + (struct bpf_timer){}; +} + +/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) { - if (unlikely(map_value_has_spin_lock(map))) { - u32 off = map->spin_lock_off; + u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; - memcpy(dst, src, off); - memcpy(dst + off + sizeof(struct bpf_spin_lock), - src + off + sizeof(struct bpf_spin_lock), - map->value_size - off - sizeof(struct bpf_spin_lock)); + if (unlikely(map_value_has_spin_lock(map))) { + s_off = map->spin_lock_off; + s_sz = sizeof(struct bpf_spin_lock); + } else if (unlikely(map_value_has_timer(map))) { + t_off = map->timer_off; + t_sz = sizeof(struct bpf_timer); + } + + if (unlikely(s_sz || t_sz)) { + if (s_off < t_off || !s_sz) { + swap(s_off, t_off); + swap(s_sz, t_sz); + } + memcpy(dst, src, t_off); + memcpy(dst + t_off + t_sz, + src + t_off + t_sz, + s_off - t_off - t_sz); + memcpy(dst + s_off + s_sz, + src + s_off + s_sz, + map->value_size - s_off - s_sz); } else { memcpy(dst, src, map->value_size); } } void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); +void bpf_timer_cancel_and_free(void *timer); struct bpf_offload_dev; struct bpf_offloaded_map; @@ -168,7 +247,8 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map) static inline bool bpf_map_support_seq_show(const struct bpf_map *map) { - return map->btf && map->ops->map_seq_show_elem; + return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && + map->ops->map_seq_show_elem; } int map_check_no_btf(const struct bpf_map *map, @@ -205,12 +285,22 @@ enum bpf_arg_type { ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ + ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ ARG_PTR_TO_INT, /* pointer to int */ ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ + ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ + ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ + ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ + ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ + ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ + ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ + ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ + __BPF_ARG_TYPE_MAX, }; /* type of values returned from helper functions */ @@ -222,6 +312,8 @@ enum bpf_return_type { RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ + RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ + RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -233,11 +325,23 @@ struct bpf_func_proto { bool gpl_only; bool pkt_access; enum bpf_return_type ret_type; - enum bpf_arg_type arg1_type; - enum bpf_arg_type arg2_type; - enum bpf_arg_type arg3_type; - enum bpf_arg_type arg4_type; - enum bpf_arg_type arg5_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + int *btf_id; /* BTF ids of arguments */ + bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is + * valid. Often used if more + * than one btf id is permitted + * for this argument. + */ + int *ret_btf_id; /* return value btf_id */ }; /* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -281,6 +385,16 @@ enum bpf_reg_type { PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ + PTR_TO_BTF_ID, /* reg points to kernel struct */ + PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */ + PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ + PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ + PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ + PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ + PTR_TO_MEM, /* reg points to valid memory region */ + PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ + PTR_TO_FUNC, /* reg points to a bpf program function */ + PTR_TO_MAP_KEY, /* reg points to a map element key */ }; /* The information passed from prog-specific *_is_valid_access @@ -288,7 +402,11 @@ enum bpf_reg_type { */ struct bpf_insn_access_aux { enum bpf_reg_type reg_type; - int ctx_field_size; + union { + int ctx_field_size; + u32 btf_id; + }; + struct bpf_verifier_log *log; /* for verbose logs */ }; static inline void @@ -297,6 +415,12 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) aux->ctx_field_size = size; } +static inline bool bpf_pseudo_func(const struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && + insn->src_reg == BPF_PSEUDO_FUNC; +} + struct bpf_prog_ops { int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); @@ -322,6 +446,10 @@ struct bpf_verifier_ops { const struct bpf_insn *src, struct bpf_insn *dst, struct bpf_prog *prog, u32 *target_size); + int (*btf_struct_access)(struct bpf_verifier_log *log, + const struct btf_type *t, int off, int size, + enum bpf_access_type atype, + u32 *next_btf_id); }; struct bpf_prog_offload_ops { @@ -359,14 +487,225 @@ enum bpf_cgroup_storage_type { #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX +/* The longest tracepoint has 12 args. + * See include/trace/bpf_probe.h + */ +#define MAX_BPF_FUNC_ARGS 12 + struct bpf_prog_stats { u64 cnt; u64 nsecs; struct u64_stats_sync syncp; }; +struct btf_func_model { + u8 ret_size; + u8 nr_args; + u8 arg_size[MAX_BPF_FUNC_ARGS]; +}; + +/* Restore arguments before returning from trampoline to let original function + * continue executing. This flag is used for fentry progs when there are no + * fexit progs. + */ +#define BPF_TRAMP_F_RESTORE_REGS BIT(0) +/* Call original function after fentry progs, but before fexit progs. + * Makes sense for fentry/fexit, normal calls and indirect calls. + */ +#define BPF_TRAMP_F_CALL_ORIG BIT(1) +/* Skip current frame and return to parent. Makes sense for fentry/fexit + * programs only. Should not be used with normal calls and indirect calls. + */ +#define BPF_TRAMP_F_SKIP_FRAME BIT(2) + +/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 + * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 + */ +#define BPF_MAX_TRAMP_PROGS 40 + +struct bpf_tramp_progs { + struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; + int nr_progs; +}; + +/* Different use cases for BPF trampoline: + * 1. replace nop at the function entry (kprobe equivalent) + * flags = BPF_TRAMP_F_RESTORE_REGS + * fentry = a set of programs to run before returning from trampoline + * + * 2. replace nop at the function entry (kprobe + kretprobe equivalent) + * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME + * orig_call = fentry_ip + MCOUNT_INSN_SIZE + * fentry = a set of program to run before calling original function + * fexit = a set of program to run after original function + * + * 3. replace direct call instruction anywhere in the function body + * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) + * With flags = 0 + * fentry = a set of programs to run before returning from trampoline + * With flags = BPF_TRAMP_F_CALL_ORIG + * orig_call = original callback addr or direct function addr + * fentry = a set of program to run before calling original function + * fexit = a set of program to run after original function + */ +int arch_prepare_bpf_trampoline(void *image, void *image_end, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_progs *tprogs, + void *orig_call); +/* these two functions are called from generated trampoline */ +u64 notrace __bpf_prog_enter(void); +void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY, + BPF_TRAMP_FEXIT, + BPF_TRAMP_MODIFY_RETURN, + BPF_TRAMP_MAX, + BPF_TRAMP_REPLACE, /* more than MAX */ +}; + +struct bpf_trampoline { + /* hlist for trampoline_table */ + struct hlist_node hlist; + /* serializes access to fields of this trampoline */ + struct mutex mutex; + refcount_t refcnt; + u64 key; + struct { + struct btf_func_model model; + void *addr; + } func; + /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF + * program by replacing one of its functions. func.addr is the address + * of the function it replaced. + */ + struct bpf_prog *extension_prog; + /* list of BPF programs using this trampoline */ + struct hlist_head progs_hlist[BPF_TRAMP_MAX]; + /* Number of attached programs. A counter per kind. */ + int progs_cnt[BPF_TRAMP_MAX]; + /* Executable image of trampoline */ + void *image; + u64 selector; +}; + +#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + /* dispatcher mutex */ + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; + int num_progs; + void *image; + u32 image_off; +}; + +static __always_inline unsigned int bpf_dispatcher_nopfunc( + const void *ctx, + const struct bpf_insn *insnsi, + unsigned int (*bpf_func)(const void *, + const struct bpf_insn *)) +{ + return bpf_func(ctx, insnsi); +} +#ifdef CONFIG_BPF_JIT +struct bpf_trampoline *bpf_trampoline_lookup(u64 key); +int bpf_trampoline_link_prog(struct bpf_prog *prog); +int bpf_trampoline_unlink_prog(struct bpf_prog *prog); +void bpf_trampoline_put(struct bpf_trampoline *tr); +void *bpf_jit_alloc_exec_page(void); +#define BPF_DISPATCHER_INIT(name) { \ + .mutex = __MUTEX_INITIALIZER(name.mutex), \ + .func = &name##func, \ + .progs = {}, \ + .num_progs = 0, \ + .image = NULL, \ + .image_off = 0 \ +} + +#define DEFINE_BPF_DISPATCHER(name) \ + noinline unsigned int name##func( \ + const void *ctx, \ + const struct bpf_insn *insnsi, \ + unsigned int (*bpf_func)(const void *, \ + const struct bpf_insn *)) \ + { \ + return bpf_func(ctx, insnsi); \ + } \ + EXPORT_SYMBOL(name##func); \ + struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name); +#define DECLARE_BPF_DISPATCHER(name) \ + unsigned int name##func( \ + const void *ctx, \ + const struct bpf_insn *insnsi, \ + unsigned int (*bpf_func)(const void *, \ + const struct bpf_insn *)); \ + extern struct bpf_dispatcher name; +#define BPF_DISPATCHER_FUNC(name) name##func +#define BPF_DISPATCHER_PTR(name) (&name) +void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, + struct bpf_prog *to); +#else +static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) +{ + return NULL; +} +static inline int bpf_trampoline_link_prog(struct bpf_prog *prog) +{ + return -ENOTSUPP; +} +static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog) +{ + return -ENOTSUPP; +} +static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} +#define DEFINE_BPF_DISPATCHER(name) +#define DECLARE_BPF_DISPATCHER(name) +#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc +#define BPF_DISPATCHER_PTR(name) NULL +static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, + struct bpf_prog *from, + struct bpf_prog *to) {} +#endif + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL, +}; + +/* Descriptor of pokes pointing /into/ the JITed image. */ +struct bpf_jit_poke_descriptor { + void *ip; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool ip_stable; + u8 adj_off; + u16 reason; +}; + +/* reg_type info for ctx arguments */ +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + u32 btf_id; +}; + struct bpf_prog_aux { - atomic_t refcnt; + atomic64_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; u32 max_pkt_offset; @@ -375,10 +714,27 @@ struct bpf_prog_aux { u32 id; u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ + u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + const struct bpf_ctx_arg_aux *ctx_arg_info; + struct bpf_prog *linked_prog; bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool offload_requested; + bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ + bool func_proto_unreliable; + enum bpf_tramp_prog_type trampoline_prog_type; + struct bpf_trampoline *trampoline; + struct hlist_node tramp_hlist; + /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ + const struct btf_type *attach_func_proto; + /* function name for valid attach_btf_id */ + const char *attach_func_name; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ + struct bpf_jit_poke_descriptor *poke_tab; + u32 size_poke_tab; struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_prog_ops *ops; @@ -394,6 +750,7 @@ struct bpf_prog_aux { struct bpf_prog_offload *offload; struct btf *btf; struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; /* bpf_line_info loaded from userspace. linfo->insn_off * has the xlated insn offset. * Both the main and sub prog share the same linfo. @@ -416,6 +773,8 @@ struct bpf_prog_aux { * main prog always has linfo_idx == 0 */ u32 linfo_idx; + u32 num_exentries; + struct exception_table_entry *extable; struct bpf_prog_stats __percpu *stats; union { struct work_struct work; @@ -423,17 +782,93 @@ struct bpf_prog_aux { }; }; +struct bpf_struct_ops_value; +struct btf_type; +struct btf_member; + +#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *btf); + int (*check_member)(const struct btf_type *t, + const struct btf_member *member); + int (*init_member)(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata); + int (*reg)(void *kdata); + void (*unreg)(void *kdata); + const struct btf_type *type; + const struct btf_type *value_type; + const char *name; + struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; + u32 type_id; + u32 value_id; +}; + +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) +#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) +const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); +void bpf_struct_ops_init(struct btf *btf); +bool bpf_struct_ops_get(const void *kdata); +void bpf_struct_ops_put(const void *kdata); +int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, + void *value); +static inline bool bpf_try_module_get(const void *data, struct module *owner) +{ + if (owner == BPF_MODULE_OWNER) + return bpf_struct_ops_get(data); + else + return try_module_get(owner); +} +static inline void bpf_module_put(const void *data, struct module *owner) +{ + if (owner == BPF_MODULE_OWNER) + bpf_struct_ops_put(data); + else + module_put(owner); +} +#else +static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) +{ + return NULL; +} +static inline void bpf_struct_ops_init(struct btf *btf) { } +static inline bool bpf_try_module_get(const void *data, struct module *owner) +{ + return try_module_get(owner); +} +static inline void bpf_module_put(const void *data, struct module *owner) +{ + module_put(owner); +} +static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, + void *key, + void *value) +{ + return -EINVAL; +} +#endif + +struct bpf_array_aux { + /* 'Ownership' of prog array is claimed by the first program that + * is going to use this map or by the first program which FD is + * stored in the map to make sure that all callers and callees have + * the same prog type and JITed flag. + */ + enum bpf_prog_type type; + bool jited; + /* Programs with direct jumps into programs part of this array. */ + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + struct bpf_array { struct bpf_map map; u32 elem_size; u32 index_mask; - /* 'ownership' of prog_array is claimed by the first program that - * is going to use this map or by the first program which FD is stored - * in the map to make sure that all callers and callees have the same - * prog_type and JITed flag - */ - enum bpf_prog_type owner_prog_type; - bool owner_jited; + struct bpf_array_aux *aux; union { char value[0] __aligned(8); void *ptrs[0] __aligned(8); @@ -482,6 +917,7 @@ struct bpf_event_entry { bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp); +const char *kernel_type_name(u32 btf_type_id); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@ -527,6 +963,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, + struct bpf_prog *prog); int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); @@ -535,7 +974,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *include_prog, struct bpf_prog_array **new_array); -#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ +#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ ({ \ struct bpf_prog_array_item *_item; \ struct bpf_prog *_prog; \ @@ -548,7 +987,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, goto _out; \ _item = &_array->items[0]; \ while ((_prog = READ_ONCE(_item->prog))) { \ - bpf_cgroup_storage_set(_item->cgroup_storage); \ + if (set_cg_storage) \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ _ret &= func(_prog, ctx); \ _item++; \ } \ @@ -609,36 +1049,42 @@ _out: \ }) #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, false) + __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, true) + __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); +extern struct mutex bpf_stats_enabled_mutex; extern const struct file_operations bpf_map_fops; extern const struct file_operations bpf_prog_fops; +extern const struct file_operations bpf_iter_fops; -#define BPF_PROG_TYPE(_id, _name) \ +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ extern const struct bpf_prog_ops _name ## _prog_ops; \ extern const struct bpf_verifier_ops _name ## _verifier_ops; #define BPF_MAP_TYPE(_id, _ops) \ extern const struct bpf_map_ops _ops; +#define BPF_LINK_TYPE(_id, _name) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE extern const struct bpf_prog_ops bpf_offload_prog_ops; extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; extern const struct bpf_verifier_ops xdp_analyzer_ops; +struct bpf_prog *bpf_prog_by_id(u32 id); + struct bpf_prog *bpf_prog_get(u32 ufd); struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, bool attach_drv); -struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); +void bpf_prog_add(struct bpf_prog *prog, int i); void bpf_prog_sub(struct bpf_prog *prog, int i); -struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); +void bpf_prog_inc(struct bpf_prog *prog); struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); @@ -647,11 +1093,12 @@ void __bpf_prog_uncharge(struct user_struct *user, u32 pages); void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); +struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); -struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); -struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, - bool uref); +void bpf_map_inc(struct bpf_map *map); +void bpf_map_inc_with_uref(struct bpf_map *map); +struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); @@ -661,17 +1108,123 @@ void bpf_map_charge_finish(struct bpf_map_memory *mem); void bpf_map_charge_move(struct bpf_map_memory *dst, struct bpf_map_memory *src); void *bpf_map_area_alloc(u64 size, int numa_node); +void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); +struct bpf_map *bpf_map_get_curr_or_next(u32 *id); +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); extern int sysctl_unprivileged_bpf_disabled; +static inline bool bpf_allow_ptr_leaks(void) +{ + return perfmon_capable(); +} + +static inline bool bpf_bypass_spec_v1(void) +{ + return perfmon_capable(); +} + +static inline bool bpf_bypass_spec_v4(void) +{ + return perfmon_capable(); +} + int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *link); + void (*dealloc)(struct bpf_link *link); + int (*detach)(struct bpf_link *link); + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog); + void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); + int (*fill_link_info)(const struct bpf_link *link, + struct bpf_link_info *info); +}; + +void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog); +int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); +int bpf_link_settle(struct bpf_link_primer *primer); +void bpf_link_cleanup(struct bpf_link_primer *primer); +void bpf_link_inc(struct bpf_link *link); +void bpf_link_put(struct bpf_link *link); +int bpf_link_new_fd(struct bpf_link *link); +struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); +struct bpf_link *bpf_link_get_from_fd(u32 ufd); + int bpf_obj_pin_user(u32 ufd, const char __user *pathname); int bpf_obj_get_user(const char __user *pathname, int flags); +#define BPF_ITER_FUNC_PREFIX "bpf_iter_" +#define DEFINE_BPF_ITER_FUNC(target, args...) \ + extern int bpf_iter_ ## target(args); \ + int __init bpf_iter_ ## target(args) { return 0; } + +struct bpf_iter_aux_info { + struct bpf_map *map; +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux); +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); + +#define BPF_ITER_CTX_ARG_MAX 2 +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + u32 ctx_arg_info_size; + struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_iter_meta { + __bpf_md_ptr(struct seq_file *, seq); + u64 session_id; + u64 seq_num; +}; + +struct bpf_iter__bpf_map_elem { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_map *, map); + __bpf_md_ptr(void *, key); + __bpf_md_ptr(void *, value); +}; + +int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); +void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); +bool bpf_iter_prog_supported(struct bpf_prog *prog); +int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_iter_new_fd(struct bpf_link *link); +bool bpf_link_is_iter(struct bpf_link *link); +struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); +int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); + +int map_set_for_each_callback_args(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee); + int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, @@ -708,10 +1261,18 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) *ldst++ = *lsrc++; } +static inline bool bpf_allow_ptr_to_map_access(void) +{ + return perfmon_capable(); +} + /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, union bpf_attr __user *uattr); + +#ifndef CONFIG_BPF_JIT_ALWAYS_ON void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); +#endif /* Map specifics */ struct xdp_buff; @@ -744,9 +1305,37 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +int bpf_prog_test_run_tracing(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +bool btf_ctx_access(int off, int size, enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info); +int btf_struct_access(struct bpf_verifier_log *log, + const struct btf_type *t, int off, int size, + enum bpf_access_type atype, + u32 *next_btf_id); +int btf_resolve_helper_id(struct bpf_verifier_log *log, + const struct bpf_func_proto *fn, int); + +int btf_distill_func_proto(struct bpf_verifier_log *log, + struct btf *btf, + const struct btf_type *func_proto, + const char *func_name, + struct btf_func_model *m); + +struct bpf_reg_state; +int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *regs); +int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *reg); + +int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, + struct btf *btf, const struct btf_type *t); + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -760,10 +1349,8 @@ static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, return ERR_PTR(-EOPNOTSUPP); } -static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, - int i) +static inline void bpf_prog_add(struct bpf_prog *prog, int i) { - return ERR_PTR(-EOPNOTSUPP); } static inline void bpf_prog_sub(struct bpf_prog *prog, int i) @@ -774,9 +1361,8 @@ static inline void bpf_prog_put(struct bpf_prog *prog) { } -static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) +static inline void bpf_prog_inc(struct bpf_prog *prog) { - return ERR_PTR(-EOPNOTSUPP); } static inline struct bpf_prog *__must_check @@ -871,12 +1457,28 @@ static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, return -ENOTSUPP; } +static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} + static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { return -ENOTSUPP; } + +static inline void bpf_map_put(struct bpf_map *map) +{ +} + +static inline struct bpf_prog *bpf_prog_by_id(u32 id) +{ + return ERR_PTR(-ENOTSUPP); +} #endif /* CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, @@ -956,11 +1558,16 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) -int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which); int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +void sock_map_unhash(struct sock *sk); +void sock_map_close(struct sock *sk, long timeout); #else static inline int sock_map_prog_update(struct bpf_map *map, - struct bpf_prog *prog, u32 which) + struct bpf_prog *prog, + struct bpf_prog *old, u32 which) { return -EOPNOTSUPP; } @@ -970,7 +1577,13 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr, { return -EINVAL; } -#endif + +static inline int sock_map_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_BPF_STREAM_PARSER */ #if defined(CONFIG_XDP_SOCKETS) struct xdp_sock; @@ -1042,6 +1655,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_get_task_stack_proto; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; @@ -1055,6 +1669,28 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto; extern const struct bpf_func_proto bpf_strtol_proto; extern const struct bpf_func_proto bpf_strtoul_proto; extern const struct bpf_func_proto bpf_tcp_sock_proto; +extern const struct bpf_func_proto bpf_jiffies64_proto; +extern const struct bpf_func_proto bpf_ringbuf_output_proto; +extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; +extern const struct bpf_func_proto bpf_ringbuf_submit_proto; +extern const struct bpf_func_proto bpf_ringbuf_discard_proto; +extern const struct bpf_func_proto bpf_ringbuf_query_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; +extern const struct bpf_func_proto bpf_for_each_map_elem_proto; +extern const struct bpf_func_proto bpf_timer_init_proto; +extern const struct bpf_func_proto bpf_timer_set_callback_proto; +extern const struct bpf_func_proto bpf_timer_start_proto; +extern const struct bpf_func_proto bpf_timer_cancel_proto; + +const struct bpf_func_proto *bpf_tracing_func_proto( + enum bpf_func_id func_id, const struct bpf_prog *prog); + +const struct bpf_func_proto *tracing_prog_func_proto( + enum bpf_func_id func_id, const struct bpf_prog *prog); /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); @@ -1095,6 +1731,15 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, #endif #ifdef CONFIG_INET +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); @@ -1145,4 +1790,12 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, } #endif /* CONFIG_INET */ +enum bpf_text_poke_type { + BPF_MOD_CALL, + BPF_MOD_JUMP, +}; + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *addr1, void *addr2); + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h new file mode 100644 index 000000000000..b2c9463f36a1 --- /dev/null +++ b/include/linux/bpf_local_storage.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 Facebook + * Copyright 2020 Google LLC. + */ + +#ifndef _BPF_LOCAL_STORAGE_H +#define _BPF_LOCAL_STORAGE_H + +#include <linux/bpf.h> +#include <linux/rculist.h> +#include <linux/list.h> +#include <linux/hash.h> +#include <linux/types.h> +#include <uapi/linux/btf.h> + +#define BPF_LOCAL_STORAGE_CACHE_SIZE 16 + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +/* Thp map is not the primary owner of a bpf_local_storage_elem. + * Instead, the container object (eg. sk->sk_bpf_storage) is. + * + * The map (bpf_local_storage_map) is for two purposes + * 1. Define the size of the "local storage". It is + * the map's value_size. + * + * 2. Maintain a list to keep track of all elems such + * that they can be cleaned up during the map destruction. + * + * When a bpf local storage is being looked up for a + * particular object, the "bpf_map" pointer is actually used + * as the "key" to search in the list of elem in + * the respective bpf_local_storage owned by the object. + * + * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer + * as the searching key. + */ +struct bpf_local_storage_map { + struct bpf_map map; + /* Lookup elem does not require accessing the map. + * + * Updating/Deleting requires a bucket lock to + * link/unlink the elem from the map. Having + * multiple buckets to improve contention. + */ + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; +}; + +struct bpf_local_storage_data { + /* smap is used as the searching key when looking up + * from the object's bpf_local_storage. + * + * Put it in the same cacheline as the data to minimize + * the number of cachelines access during the cache hit case. + */ + struct bpf_local_storage_map __rcu *smap; + u8 data[] __aligned(8); +}; + +/* Linked to bpf_local_storage and bpf_local_storage_map */ +struct bpf_local_storage_elem { + struct hlist_node map_node; /* Linked to bpf_local_storage_map */ + struct hlist_node snode; /* Linked to bpf_local_storage */ + struct bpf_local_storage __rcu *local_storage; + struct rcu_head rcu; + /* 8 bytes hole */ + /* The data is stored in aother cacheline to minimize + * the number of cachelines access during a cache hit. + */ + struct bpf_local_storage_data sdata ____cacheline_aligned; +}; + +struct bpf_local_storage { + struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE]; + struct hlist_head list; /* List of bpf_local_storage_elem */ + void *owner; /* The object that owns the above "list" of + * bpf_local_storage_elem. + */ + struct rcu_head rcu; + raw_spinlock_t lock; /* Protect adding/removing from the "list" */ +}; + +/* U16_MAX is much more than enough for sk local storage + * considering a tcp_sock is ~2k. + */ +#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \ + min_t(u32, \ + (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \ + sizeof(struct bpf_local_storage_elem)), \ + (U16_MAX - sizeof(struct bpf_local_storage_elem))) + +#define SELEM(_SDATA) \ + container_of((_SDATA), struct bpf_local_storage_elem, sdata) +#define SDATA(_SELEM) (&(_SELEM)->sdata) + +#define BPF_LOCAL_STORAGE_CACHE_SIZE 16 + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE]; +}; + +#define DEFINE_BPF_STORAGE_CACHE(name) \ +static struct bpf_local_storage_cache name = { \ + .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \ +} + +u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache); +void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache, + u16 idx); + +/* Helper functions for bpf_local_storage */ +int bpf_local_storage_map_alloc_check(union bpf_attr *attr); + +struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr); + +struct bpf_local_storage_data * +bpf_local_storage_lookup(struct bpf_local_storage *local_storage, + struct bpf_local_storage_map *smap, + bool cacheit_lockit); + +void bpf_local_storage_map_free(struct bpf_local_storage_map *smap); + +int bpf_local_storage_map_check_btf(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type); + +void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage, + struct bpf_local_storage_elem *selem); + +bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, + struct bpf_local_storage_elem *selem, + bool uncharge_omem); + +void bpf_selem_unlink(struct bpf_local_storage_elem *selem); + +void bpf_selem_link_map(struct bpf_local_storage_map *smap, + struct bpf_local_storage_elem *selem); + +void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem); + +struct bpf_local_storage_elem * +bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value, + bool charge_mem); + +int +bpf_local_storage_alloc(void *owner, + struct bpf_local_storage_map *smap, + struct bpf_local_storage_elem *first_selem); + +struct bpf_local_storage_data * +bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, + void *value, u64 map_flags); + +#endif /* _BPF_LOCAL_STORAGE_H */ diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h new file mode 100644 index 000000000000..aaacb6aafc87 --- /dev/null +++ b/include/linux/bpf_lsm.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2020 Google LLC. + */ + +#ifndef _LINUX_BPF_LSM_H +#define _LINUX_BPF_LSM_H + +#include <linux/bpf.h> +#include <linux/lsm_hooks.h> + +#ifdef CONFIG_BPF_LSM + +#define LSM_HOOK(RET, DEFAULT, NAME, ...) \ + RET bpf_lsm_##NAME(__VA_ARGS__); +#include <linux/lsm_hook_defs.h> +#undef LSM_HOOK + +struct bpf_storage_blob { + struct bpf_local_storage __rcu *storage; +}; + +extern struct lsm_blob_sizes bpf_lsm_blob_sizes; + +int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog); + +static inline struct bpf_storage_blob *bpf_inode( + const struct inode *inode) +{ + if (unlikely(!inode->i_security)) + return NULL; + + return inode->i_security + bpf_lsm_blob_sizes.lbs_inode; +} + +extern const struct bpf_func_proto bpf_inode_storage_get_proto; +extern const struct bpf_func_proto bpf_inode_storage_delete_proto; +void bpf_inode_storage_free(struct inode *inode); + +#else /* !CONFIG_BPF_LSM */ + +static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +static inline struct bpf_storage_blob *bpf_inode( + const struct inode *inode) +{ + return NULL; +} + +static inline void bpf_inode_storage_free(struct inode *inode) +{ +} + +#endif /* CONFIG_BPF_LSM */ + +#endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 36a9c2325176..2e6f568377f1 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -2,41 +2,80 @@ /* internal file - do not include directly */ #ifdef CONFIG_NET -BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) -BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) -BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) -BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp, + struct xdp_md, struct xdp_buff) #ifdef CONFIG_CGROUP_BPF -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock, + struct bpf_sock, struct sock) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr, + struct bpf_sock_addr, struct bpf_sock_addr_kern) #endif -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) -BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) -BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) -BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) -BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops, + struct bpf_sock_ops, struct bpf_sock_ops_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg, + struct sk_msg_md, struct sk_msg) +BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector, + struct __sk_buff, struct bpf_flow_dissector) #endif #ifdef CONFIG_BPF_EVENTS -BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) -BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) -BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) -BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) -BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) +BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe, + bpf_user_pt_regs_t, struct pt_regs) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint, + __u64, u64) +BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event, + struct bpf_perf_event_data, struct bpf_perf_event_data_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint, + struct bpf_raw_tracepoint_args, u64) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable, + struct bpf_raw_tracepoint_args, u64) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing, + void *, void *) #endif #ifdef CONFIG_CGROUP_BPF -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev, + struct bpf_cgroup_dev_ctx, struct bpf_cgroup_dev_ctx) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl, + struct bpf_sysctl, struct bpf_sysctl_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt, + struct bpf_sockopt, struct bpf_sockopt_kern) #endif #ifdef CONFIG_BPF_LIRC_MODE2 -BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) +BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2, + __u32, u32) #endif #ifdef CONFIG_INET -BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport, + struct sk_reuseport_md, struct sk_reuseport_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup, + struct bpf_sk_lookup, struct bpf_sk_lookup_kern) +#endif +#if defined(CONFIG_BPF_JIT) +BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops, + void *, void *) +BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension, + void *, void *) +#ifdef CONFIG_BPF_LSM +BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, + void *, void *) +#endif /* CONFIG_BPF_LSM */ #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) @@ -68,6 +107,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) #endif +#ifdef CONFIG_BPF_LSM +BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops) +#endif BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) #if defined(CONFIG_XDP_SOCKETS) BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) @@ -78,3 +120,17 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) +#if defined(CONFIG_BPF_JIT) +BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops) + +BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint) +BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing) +#ifdef CONFIG_CGROUP_BPF +BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) +#endif +BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) +#ifdef CONFIG_NET +BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) +#endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 26a6d58ca78c..bb76fcc79fc3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -50,10 +50,22 @@ struct bpf_reg_state { /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ - struct bpf_map *map_ptr; + struct { + struct bpf_map *map_ptr; + /* To distinguish map lookups from outer map + * the map_uid is non-zero for registers + * pointing to inner maps. + */ + u32 map_uid; + }; + + u32 btf_id; /* for PTR_TO_BTF_ID */ + u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ /* Max size from any of the above. */ unsigned long raw; + + u32 subprogno; /* for PTR_TO_FUNC */ }; /* Fixed part of pointer offset, pointer types only */ s32 off; @@ -61,6 +73,8 @@ struct bpf_reg_state { * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we * came from, when one is tested for != NULL. + * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation + * for the purpose of tracking that it's freed. * For PTR_TO_SOCKET this is used to share which pointers retain the * same reference to the socket, to determine proper reference freeing. */ @@ -181,6 +195,14 @@ struct bpf_func_state { * zero == main subprog */ u32 subprogno; + /* Every bpf_timer_start will increment async_entry_cnt. + * It's used to distinguish: + * void foo(void) { for(;;); } + * void foo(void) { bpf_timer_set_callback(,foo); } + */ + u32 async_entry_cnt; + bool in_callback_fn; + bool in_async_callback_fn; /* The following fields should be last. See copy_func_state() */ int acquired_refs; @@ -194,6 +216,13 @@ struct bpf_idx_pair { u32 idx; }; +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +/* Maximum number of register states that can exist at once */ +#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) #define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ @@ -281,31 +310,42 @@ struct bpf_verifier_state_list { }; /* Possible states for alu_state member. */ -#define BPF_ALU_SANITIZE_SRC 1U -#define BPF_ALU_SANITIZE_DST 2U +#define BPF_ALU_SANITIZE_SRC (1U << 0) +#define BPF_ALU_SANITIZE_DST (1U << 1) #define BPF_ALU_NEG_VALUE (1U << 2) #define BPF_ALU_NON_POINTER (1U << 3) +#define BPF_ALU_IMMEDIATE (1U << 4) #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ BPF_ALU_SANITIZE_DST) struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ - unsigned long map_state; /* pointer/poison value for maps */ + unsigned long map_ptr_state; /* pointer/poison value for maps */ s32 call_imm; /* saved imm field of call insn */ u32 alu_limit; /* limit for add/sub register with pointer */ struct { u32 map_index; /* index into used_maps[] */ u32 map_off; /* offset from value base address */ }; + struct { + enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ + union { + u32 btf_id; /* btf_id for struct typed var */ + u32 mem_size; /* mem_size for non-struct typed var */ + }; + } btf_var; }; + u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int sanitize_stack_off; /* stack slot to be cleared */ - bool seen; /* this insn was processed by the verifier */ + u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ - bool prune_point; + + /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ + bool prune_point; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -330,18 +370,23 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) #define BPF_LOG_STATS 4 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) +#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) { - return log->level && log->ubuf && !bpf_verifier_log_full(log); + return (log->level && log->ubuf && !bpf_verifier_log_full(log)) || + log->level == BPF_LOG_KERNEL; } #define BPF_MAX_SUBPROGS 256 struct bpf_subprog_info { + /* 'start' has to be the first field otherwise find_subprog() won't work */ u32 start; /* insn idx of function entry point */ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ + bool has_tail_call; + bool is_async_cb; }; /* single container for all structs @@ -362,17 +407,24 @@ struct bpf_verifier_env { struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ + bool explore_alu_limits; bool allow_ptr_leaks; + bool allow_ptr_to_map_access; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; struct { int *insn_state; int *insn_stack; int cur_stack; } cfg; + u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ u32 prev_insn_processed, insn_processed; @@ -397,6 +449,8 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args); __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...); +__printf(2, 3) void bpf_log(struct bpf_verifier_log *log, + const char *fmt, ...); static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { @@ -420,4 +474,7 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, void bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); +int check_ctx_reg(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno); + #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 960988d42f77..d49a5b1f5f11 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -11,6 +11,7 @@ #include <linux/blkdev.h> #include <scsi/scsi_request.h> +#include <linux/kabi.h> struct request; struct device; @@ -60,6 +61,9 @@ struct bsg_job { struct bio *bidi_bio; void *dd_data; /* Used for driver-specific storage */ + + KABI_RESERVE(1); + KABI_RESERVE(2); }; void bsg_job_done(struct bsg_job *job, int result, diff --git a/include/linux/bsg.h b/include/linux/bsg.h index dac37b6e00ec..79b27efa0c83 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -3,6 +3,7 @@ #define _LINUX_BSG_H #include <uapi/linux/bsg.h> +#include <linux/kabi.h> struct request; @@ -13,6 +14,9 @@ struct bsg_ops { fmode_t mode); int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); void (*free_rq)(struct request *rq); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; struct bsg_class_device { @@ -20,6 +24,9 @@ struct bsg_class_device { int minor; struct request_queue *queue; const struct bsg_ops *ops; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; int bsg_register_queue(struct request_queue *q, struct device *parent, diff --git a/include/linux/btf.h b/include/linux/btf.h index 64cdf2a23d42..a20a8c1fd86f 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -5,6 +5,9 @@ #define _LINUX_BTF_H 1 #include <linux/types.h> +#include <uapi/linux/btf.h> + +#define BTF_TYPE_EMIT(type) ((void)(type *)0) struct btf; struct btf_member; @@ -13,6 +16,7 @@ union bpf_attr; extern const struct file_operations btf_fops; +void btf_get(struct btf *btf); void btf_put(struct btf *btf); int btf_new_fd(const union bpf_attr *attr); struct btf *btf_get_by_fd(int fd); @@ -51,11 +55,109 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); +int btf_find_timer(const struct btf *btf, const struct btf_type *t); bool btf_type_is_void(const struct btf_type *t); +s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind); +const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, + u32 id, u32 *res_id); +const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, + u32 id, u32 *res_id); +const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, + u32 id, u32 *res_id); +const struct btf_type * +btf_resolve_size(const struct btf *btf, const struct btf_type *type, + u32 *type_size, const struct btf_type **elem_type, + u32 *total_nelems); + +#define for_each_member(i, struct_type, member) \ + for (i = 0, member = btf_type_member(struct_type); \ + i < btf_type_vlen(struct_type); \ + i++, member++) + +static inline bool btf_type_is_ptr(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_PTR; +} + +static inline bool btf_type_is_int(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_INT; +} + +static inline bool btf_type_is_enum(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM; +} + +static inline bool btf_type_is_typedef(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF; +} + +static inline bool btf_type_is_func(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC; +} + +static inline bool btf_type_is_func_proto(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO; +} + +static inline bool btf_type_is_var(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_VAR; +} + +/* union is only a special case of struct: + * all its offsetof(member) == 0 + */ +static inline bool btf_type_is_struct(const struct btf_type *t) +{ + u8 kind = BTF_INFO_KIND(t->info); + + return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; +} + +static inline u16 btf_type_vlen(const struct btf_type *t) +{ + return BTF_INFO_VLEN(t->info); +} + +static inline u16 btf_func_linkage(const struct btf_type *t) +{ + return BTF_INFO_VLEN(t->info); +} + +static inline bool btf_type_kflag(const struct btf_type *t) +{ + return BTF_INFO_KFLAG(t->info); +} + +static inline u32 btf_member_bit_offset(const struct btf_type *struct_type, + const struct btf_member *member) +{ + return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset) + : member->offset; +} + +static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type, + const struct btf_member *member) +{ + return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset) + : 0; +} + +static inline const struct btf_member *btf_type_member(const struct btf_type *t) +{ + return (const struct btf_member *)(t + 1); +} #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); +struct btf *btf_parse_vmlinux(void); +struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h new file mode 100644 index 000000000000..a1d9715cfb17 --- /dev/null +++ b/include/linux/btf_ids.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BTF_IDS_H +#define _LINUX_BTF_IDS_H + +#ifdef CONFIG_DEBUG_INFO_BTF + +#include <linux/compiler.h> /* for __PASTE */ + +/* + * Following macros help to define lists of BTF IDs placed + * in .BTF_ids section. They are initially filled with zeros + * (during compilation) and resolved later during the + * linking phase by resolve_btfids tool. + * + * Any change in list layout must be reflected in resolve_btfids + * tool logic. + */ + +#define BTF_IDS_SECTION ".BTF_ids" + +#define ____BTF_ID(symbol) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #symbol " ; \n" \ +".type " #symbol ", @object; \n" \ +".size " #symbol ", 4; \n" \ +#symbol ": \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#define __BTF_ID(symbol) \ + ____BTF_ID(symbol) + +#define __ID(prefix) \ + __PASTE(prefix, __COUNTER__) + +/* + * The BTF_ID defines unique symbol for each ID pointing + * to 4 zero bytes. + */ +#define BTF_ID(prefix, name) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + +/* + * The BTF_ID_LIST macro defines pure (unsorted) list + * of BTF IDs, with following layout: + * + * BTF_ID_LIST(list1) + * BTF_ID(type1, name1) + * BTF_ID(type2, name2) + * + * list1: + * __BTF_ID__type1__name1__1: + * .zero 4 + * __BTF_ID__type2__name2__2: + * .zero 4 + * + */ +#define __BTF_ID_LIST(name, scope) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +"." #scope " " #name "; \n" \ +#name ":; \n" \ +".popsection; \n"); \ + +#define BTF_ID_LIST(name) \ +__BTF_ID_LIST(name, local) \ +extern u32 name[]; + +#define BTF_ID_LIST_GLOBAL(name) \ +__BTF_ID_LIST(name, globl) + +/* + * The BTF_ID_UNUSED macro defines 4 zero bytes. + * It's used when we want to define 'unused' entry + * in BTF_ID_LIST, like: + * + * BTF_ID_LIST(bpf_skb_output_btf_ids) + * BTF_ID(struct, sk_buff) + * BTF_ID_UNUSED + * BTF_ID(struct, task_struct) + */ + +#define BTF_ID_UNUSED \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#else + +#define BTF_ID_LIST(name) static u32 name[5]; +#define BTF_ID(prefix, name) +#define BTF_ID_UNUSED +#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; + +#endif /* CONFIG_DEBUG_INFO_BTF */ + +#ifdef CONFIG_NET +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +extern u32 btf_sock_ids[]; +#endif + +#endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 7b73ef7f902d..b56cc825f64d 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); +void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, + gfp_t gfp); struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); @@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block) __breadahead(sb->s_bdev, block, sb->s_blocksize); } +static inline void +sb_breadahead_unmovable(struct super_block *sb, sector_t block) +{ + __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + static inline struct buffer_head * sb_getblk(struct super_block *sb, sector_t block) { diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 0fe5426f2bdc..1fb55c0ad78a 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -77,4 +77,9 @@ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) +#ifdef __GENKSYMS__ +/* genksyms gets confused by _Static_assert */ +#define _Static_assert(expr, ...) +#endif + #endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/bvec.h b/include/linux/bvec.h index a032f01e928c..d7a628e066ee 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -110,11 +110,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, return true; } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h index 2f5d731ae251..8afa92d15a66 100644 --- a/include/linux/can/can-ml.h +++ b/include/linux/can/can-ml.h @@ -44,6 +44,7 @@ #include <linux/can.h> #include <linux/list.h> +#include <linux/netdevice.h> #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) #define CAN_EFF_RCV_HASH_BITS 10 @@ -65,4 +66,15 @@ struct can_ml_priv { #endif }; +static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev) +{ + return netdev_get_ml_priv(dev, ML_PRIV_CAN); +} + +static inline void can_set_ml_priv(struct net_device *dev, + struct can_ml_priv *ml_priv) +{ + netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN); +} + #endif /* CAN_ML_H */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index a954def26c0d..1ef071e5a55e 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -49,8 +49,12 @@ static inline void can_skb_reserve(struct sk_buff *skb) static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) { - if (sk) { - sock_hold(sk); + /* If the socket has already been closed by user space, the + * refcount may already be 0 (and the socket will be freed + * after the last TX skb has been freed). So only increase + * socket refcount if the refcount is > 0. + */ + if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { skb->destructor = sock_efree; skb->sk = sk; } @@ -61,21 +65,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) */ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) { - if (skb_shared(skb)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + struct sk_buff *nskb; - if (likely(nskb)) { - can_skb_set_owner(nskb, skb->sk); - consume_skb(skb); - return nskb; - } else { - kfree_skb(skb); - return NULL; - } + nskb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!nskb)) { + kfree_skb(skb); + return NULL; } - /* we can assume to have an unshared skb with proper owner */ - return skb; + can_skb_set_owner(nskb, skb->sk); + consume_skb(skb); + return nskb; } #endif /* !_CAN_SKB_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index ecce0f43c73a..b4345b38a6be 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -251,6 +251,15 @@ extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); +static inline bool perfmon_capable(void) +{ + return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN); +} + +static inline bool bpf_capable(void) +{ + return capable(CAP_BPF) || capable(CAP_SYS_ADMIN); +} /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 430e219e3aba..eb345d08c59b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -487,8 +487,20 @@ struct cgroup { /* Used to store internal freezer state */ struct cgroup_freezer_state freezer; + /* cgroup log subsys*/ + struct mbuf_slot *mbuf; + + /* memory latency stat */ + struct sli_memlat_stat __percpu *sli_memlat_stat_percpu; + + /* sched latency stat */ + struct sli_schedlat_stat __percpu *sli_schedlat_stat_percpu; + + spinlock_t cgrp_mbuf_lock; + /* ids of the ancestors at each level including self */ int ancestor_ids[]; + }; /* @@ -797,7 +809,9 @@ struct sock_cgroup_data { union { #ifdef __LITTLE_ENDIAN struct { - u8 is_data; + u8 is_data : 1; + u8 no_refcnt : 1; + u8 unused : 6; u8 padding; u16 prioidx; u32 classid; @@ -807,7 +821,9 @@ struct sock_cgroup_data { u32 classid; u16 prioidx; u8 padding; - u8 is_data; + u8 unused : 6; + u8 no_refcnt : 1; + u8 is_data : 1; } __packed; #endif u64 val; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 57577075d204..6ac71a4c5789 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock; void cgroup_sk_alloc_disable(void); void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_clone(struct sock_cgroup_data *skcd); void cgroup_sk_free(struct sock_cgroup_data *skcd); static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) @@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) */ v = READ_ONCE(skcd->val); - if (v & 1) + if (v & 3) return &cgrp_dfl_root.cgrp; return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; @@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) #else /* CONFIG_CGROUP_DATA */ static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} #endif /* CONFIG_CGROUP_DATA */ @@ -957,4 +959,12 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} #endif /* CONFIG_CGROUP_BPF */ +ssize_t mbuf_print(struct cgroup *cgrp, const char *fmt, ...); +struct cgroup *get_cgroup_from_task(struct task_struct *task); +ssize_t mbuf_print_task(struct task_struct *task, const char *fmt, ...); +void *cgroup_mbuf_start(struct seq_file *s, loff_t *pos); +void *cgroup_mbuf_next(struct seq_file *s, void *v, loff_t *pos); +void cgroup_mbuf_stop(struct seq_file *s, void *v); +int cgroup_mbuf_show(struct seq_file *s, void *v); + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 333a6695a918..0e06df20928c 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -36,9 +36,3 @@ __has_builtin(__builtin_sub_overflow) #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif - -/* The following are for compatibility with GCC, from compiler-gcc.h, - * and may be redefined here because they should not be shared with other - * compilers, like ICC. - */ -#define barrier() __asm__ __volatile__("" : : : "memory") diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d7ee4c6bad48..180b52419136 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -12,26 +12,13 @@ #if GCC_VERSION < 40600 # error Sorry, your compiler is too old - please upgrade it. -#endif - -/* Optimization barrier */ - -/* The "volatile" is due to gcc bugs */ -#define barrier() __asm__ __volatile__("": : :"memory") +#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 /* - * This version is i.e. to prevent dead stores elimination on @ptr - * where gcc and llvm may behave differently when otherwise using - * normal barrier(): while gcc behavior gets along with a normal - * barrier(), llvm needs an explicit input variable to be assumed - * clobbered. The issue is as follows: while the inline asm might - * access any memory it wants, the compiler could have fit all of - * @ptr into memory registers instead, and since @ptr never escaped - * from that, it proved that the inline asm wasn't touching any of - * it. This version works well with both compilers, i.e. we're telling - * the compiler that the inline asm absolutely may see the contents - * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 + * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk */ -#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") +# error Sorry, your version of GCC is too old - please use 5.1 or newer. +#endif /* * This macro obfuscates arithmetic on a variable address so that gcc @@ -170,5 +157,3 @@ #else #define __diag_GCC_8(s) #endif - -#define __no_fgcse __attribute__((optimize("-fno-gcse"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5e88e7e33abe..9446e8fbe55c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -80,11 +80,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Optimization barrier */ #ifndef barrier -# define barrier() __memory_barrier() +/* The "volatile" is due to gcc bugs */ +# define barrier() __asm__ __volatile__("": : :"memory") #endif #ifndef barrier_data -# define barrier_data(ptr) barrier() +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proved that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") #endif /* workaround for GCC PR82365 if needed */ @@ -120,12 +134,65 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Annotate a C jump table to allow objtool to follow the code flow */ #define __annotate_jump_table __section(.rodata..c_jump_table) +#ifdef CONFIG_DEBUG_ENTRY +/* Begin/end of an instrumentation safe region */ +#define instrumentation_begin() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.instr_begin\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) + +/* + * Because instrumentation_{begin,end}() can nest, objtool validation considers + * _begin() a +1 and _end() a -1 and computes a sum over the instructions. + * When the value is greater than 0, we consider instrumentation allowed. + * + * There is a problem with code like: + * + * noinstr void foo() + * { + * instrumentation_begin(); + * ... + * if (cond) { + * instrumentation_begin(); + * ... + * instrumentation_end(); + * } + * bar(); + * instrumentation_end(); + * } + * + * If instrumentation_end() would be an empty label, like all the other + * annotations, the inner _end(), which is at the end of a conditional block, + * would land on the instruction after the block. + * + * If we then consider the sum of the !cond path, we'll see that the call to + * bar() is with a 0-value, even though, we meant it to happen with a positive + * value. + * + * To avoid this, have _end() be a NOP instruction, this ensures it will be + * part of the condition block and does not escape. + */ +#define instrumentation_end() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_end\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#endif /* CONFIG_DEBUG_ENTRY */ + #else #define annotate_reachable() #define annotate_unreachable() #define __annotate_jump_table #endif +#ifndef instrumentation_begin +#define instrumentation_begin() do { } while(0) +#define instrumentation_end() do { } while(0) +#endif + #ifndef ASM_UNREACHABLE # define ASM_UNREACHABLE #endif @@ -347,7 +414,7 @@ static inline void *offset_to_ptr(const int *off) * compiler has support to do so. */ #define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) #define compiletime_assert_atomic_type(t) \ compiletime_assert(__native_word(t), \ @@ -356,4 +423,10 @@ static inline void *offset_to_ptr(const int *off) /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +/* + * This is needed in functions which generate the stack canary, see + * arch/x86/kernel/smpboot.c::start_secondary() for an example. + */ +#define prevent_tail_call_optimization() mb() + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index cdf016596659..080ca3230b51 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -270,4 +270,11 @@ */ #define __weak __attribute__((__weak__)) +/* + * Used by functions that use '__builtin_return_address'. These function + * don't want to be splited or made inline, which can make + * the '__builtin_return_address' get unexpected address. + */ +#define __fix_address noinline __noclone + #endif /* __LINUX_COMPILER_ATTRIBUTES_H */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 72393a8c1a6c..b2e50f7a17be 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -118,6 +118,10 @@ struct ftrace_likely_data { #define notrace __attribute__((__no_instrument_function__)) #endif +/* Section for code which can't be instrumented at all */ +#define noinstr \ + noinline notrace __attribute((__section__(".noinstr.text"))) + /* * it doesn't make sense on ARM (currently the only user of __naked) * to trace naked functions because then mcount is called without @@ -176,6 +180,10 @@ struct ftrace_likely_data { */ #define noinline_for_stack noinline +/* Section for code which can't be instrumented at all */ +#define noinstr \ + noinline notrace __attribute((__section__(".noinstr.text"))) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -212,10 +220,6 @@ struct ftrace_likely_data { #define asm_inline asm #endif -#ifndef __no_fgcse -# define __no_fgcse -#endif - /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index d05609ad329d..e0bece1473fd 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -5,8 +5,10 @@ #include <linux/sched.h> #include <linux/vtime.h> #include <linux/context_tracking_state.h> -#include <asm/ptrace.h> +#include <linux/instrumentation.h> +#include <asm/ptrace.h> +#include <linux/sli.h> #ifdef CONFIG_CONTEXT_TRACKING extern void context_tracking_cpu_set(int cpu); @@ -24,7 +26,6 @@ static inline void user_enter(void) { if (context_tracking_is_enabled()) context_tracking_enter(CONTEXT_USER); - } static inline void user_exit(void) { @@ -37,7 +38,6 @@ static inline void user_enter_irqoff(void) { if (context_tracking_is_enabled()) __context_tracking_enter(CONTEXT_USER); - } static inline void user_exit_irqoff(void) { @@ -85,6 +85,7 @@ static inline void user_enter(void) { } static inline void user_exit(void) { } static inline void user_enter_irqoff(void) { } static inline void user_exit_irqoff(void) { } + static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } diff --git a/include/linux/cookie.h b/include/linux/cookie.h new file mode 100644 index 000000000000..0c159f585109 --- /dev/null +++ b/include/linux/cookie.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COOKIE_H +#define __LINUX_COOKIE_H + +#include <linux/atomic.h> +#include <linux/percpu.h> +#include <asm/local.h> + +struct pcpu_gen_cookie { + local_t nesting; + u64 last; +} __aligned(16); + +struct gen_cookie { + struct pcpu_gen_cookie __percpu *local; + atomic64_t forward_last ____cacheline_aligned_in_smp; + atomic64_t reverse_last; +}; + +#define COOKIE_LOCAL_BATCH 4096 + +#define DEFINE_COOKIE(name) \ + static DEFINE_PER_CPU(struct pcpu_gen_cookie, __##name); \ + static struct gen_cookie name = { \ + .local = &__##name, \ + .forward_last = ATOMIC64_INIT(0), \ + .reverse_last = ATOMIC64_INIT(0), \ + } + +static __always_inline u64 gen_cookie_next(struct gen_cookie *gc) +{ + struct pcpu_gen_cookie *local = this_cpu_ptr(gc->local); + u64 val; + + if (likely(local_inc_return(&local->nesting) == 1)) { + val = local->last; + if (__is_defined(CONFIG_SMP) && + unlikely((val & (COOKIE_LOCAL_BATCH - 1)) == 0)) { + s64 next = atomic64_add_return(COOKIE_LOCAL_BATCH, + &gc->forward_last); + val = next - COOKIE_LOCAL_BATCH; + } + local->last = ++val; + } else { + val = atomic64_dec_return(&gc->reverse_last); + } + local_dec(&local->nesting); + return val; +} + +#endif /* __LINUX_COOKIE_H */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index bc6c879bd110..4e9822cb11f3 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -138,12 +138,18 @@ static inline void get_online_cpus(void) { cpus_read_lock(); } static inline void put_online_cpus(void) { cpus_read_unlock(); } #ifdef CONFIG_PM_SLEEP_SMP -extern int freeze_secondary_cpus(int primary); +int __freeze_secondary_cpus(int primary, bool suspend); +static inline int freeze_secondary_cpus(int primary) +{ + return __freeze_secondary_cpus(primary, true); +} + static inline int disable_nonboot_cpus(void) { - return freeze_secondary_cpus(0); + return __freeze_secondary_cpus(0, false); } -extern void enable_nonboot_cpus(void); + +void enable_nonboot_cpus(void); static inline int suspend_disable_secondary_cpus(void) { diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 4b6b5bea8f79..4447967e7a9f 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -29,6 +29,8 @@ struct cpuidle_driver; * CPUIDLE DEVICE INTERFACE * ****************************/ +#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0) + struct cpuidle_state_usage { unsigned long long disable; unsigned long long usage; @@ -72,6 +74,7 @@ struct cpuidle_state { #define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 19ea3a371d7b..0c720a2982ae 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -130,9 +130,12 @@ * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual * declaration) is used to ensure that the crypto_tfm context structure is * aligned correctly for the given architecture so that there are no alignment - * faults for C data types. In particular, this is required on platforms such - * as arm where pointers are 32-bit aligned but there are data types such as - * u64 which require 64-bit alignment. + * faults for C data types. On architectures that support non-cache coherent + * DMA, such as ARM or arm64, it also takes into account the minimal alignment + * that is required to ensure that the context struct member does not share any + * cachelines with the rest of the struct. This is needed to ensure that cache + * maintenance for non-coherent DMA (cache invalidation in particular) does not + * affect data that may be accessed by the CPU concurrently. */ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN diff --git a/include/linux/dax.h b/include/linux/dax.h index 9bd8528bd305..72a7f03a59f4 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -56,6 +56,8 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev) { __set_dax_synchronous(dax_dev); } +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, + int blocksize, sector_t start, sector_t len); /* * Check if given mapping is supported by the file / underlying device. */ @@ -102,6 +104,12 @@ static inline bool dax_synchronous(struct dax_device *dax_dev) static inline void set_dax_synchronous(struct dax_device *dax_dev) { } +static inline bool dax_supported(struct dax_device *dax_dev, + struct block_device *bdev, int blocksize, sector_t start, + sector_t len) +{ + return false; +} static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct dax_device *dax_dev) { @@ -197,14 +205,23 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) } #endif +#if IS_ENABLED(CONFIG_DAX) int dax_read_lock(void); void dax_read_unlock(int id); +#else +static inline int dax_read_lock(void) +{ + return 0; +} + +static inline void dax_read_unlock(int id) +{ +} +#endif /* CONFIG_DAX */ bool dax_alive(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 10090f11ab95..f156bfd89c8f 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -13,6 +13,7 @@ #include <linux/lockref.h> #include <linux/stringhash.h> #include <linux/wait.h> +#include <linux/kabi.h> struct path; struct vfsmount; @@ -118,6 +119,11 @@ struct dentry { struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } __randomize_layout; /* @@ -147,6 +153,11 @@ struct dentry_operations { struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } ____cacheline_aligned; /* @@ -440,6 +451,11 @@ static inline bool d_is_negative(const struct dentry *dentry) return d_is_miss(dentry); } +static inline bool d_flags_negative(unsigned flags) +{ + return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE; +} + static inline bool d_is_positive(const struct dentry *dentry) { return !d_is_negative(dentry); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 58424eb3b329..798f0b9b43ae 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -54,6 +54,8 @@ static const struct file_operations __fops = { \ .llseek = no_llseek, \ } +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); + #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_lookup(const char *name, struct dentry *parent); @@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); -typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, debugfs_automount_t f, @@ -203,7 +204,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name, static inline struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, - struct vfsmount *(*f)(void *), + debugfs_automount_t f, void *data) { return ERR_PTR(-ENODEV); diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 4635f95000a4..79a6e37a1d6f 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -75,7 +75,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); #else /* !CONFIG_DEVFREQ_THERMAL */ -struct thermal_cooling_device * +static inline struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power) { diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 399ad8632356..a53d7d2c2d95 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -316,6 +316,11 @@ struct dm_target { * whether or not its underlying devices have support. */ bool discards_supported:1; + + /* + * Set if we need to limit the number of in-flight bios when swapping. + */ + bool limit_swap_bios:1; }; /* Each target can link one of these into the table */ @@ -420,6 +425,7 @@ const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); +int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); void dm_remap_zone_report(struct dm_target *ti, sector_t start, diff --git a/include/linux/device.h b/include/linux/device.h index 297239a08bb7..5add0c2deb87 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -27,6 +27,7 @@ #include <linux/gfp.h> #include <linux/overflow.h> #include <asm/device.h> +#include <linux/kabi.h> struct device; struct device_private; @@ -594,6 +595,8 @@ struct class { const struct dev_pm_ops *pm; struct subsys_private *p; + + KABI_RESERVE(1); }; struct class_dev_iter { @@ -1499,6 +1502,9 @@ extern void device_unregister(struct device *dev); extern void device_initialize(struct device *dev); extern int __must_check device_add(struct device *dev); extern void device_del(struct device *dev); +extern bool device_is_hidden(struct device *dev); +extern int device_hide(struct device *dev); +extern int device_unhide(struct device *dev); extern int device_for_each_child(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); extern int device_for_each_child_reverse(struct device *dev, void *data, diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h index 3c8b7d274bd9..45ba37aaf6b7 100644 --- a/include/linux/dm-bufio.h +++ b/include/linux/dm-bufio.h @@ -138,6 +138,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c); sector_t dm_bufio_get_block_number(struct dm_buffer *b); void *dm_bufio_get_block_data(struct dm_buffer *b); void *dm_bufio_get_aux_data(struct dm_buffer *b); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index ec212cb27fdc..12eac4293af6 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -303,6 +303,7 @@ struct dma_buf { void *vmap_ptr; const char *exp_name; const char *name; + spinlock_t name_lock; /* spinlock to protect name access */ struct module *owner; struct list_head list_node; void *priv; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4d450672b7d6..d0d8651ee149 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -11,6 +11,7 @@ #include <linux/scatterlist.h> #include <linux/bug.h> #include <linux/mem_encrypt.h> +#include <linux/kabi.h> /** * List of possible attributes associated with a DMA mapping. The semantics @@ -132,6 +133,11 @@ struct dma_map_ops { u64 (*get_required_mask)(struct device *dev); size_t (*max_mapping_size)(struct device *dev); unsigned long (*get_merge_boundary)(struct device *dev); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; #define DMA_MAPPING_ERROR (~(dma_addr_t)0) diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index eec7928ff8fe..99580c22f91a 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h @@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data { #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) /* Instructions such as EWEN are (addrlen + 2) in length. */ #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) +/* Add extra cycle after address during a read */ +#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) /* * optional hooks to control additional logic diff --git a/include/linux/efi.h b/include/linux/efi.h index d87acf62958e..c82ef0eba4f8 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -63,8 +63,10 @@ typedef void *efi_handle_t; */ typedef guid_t efi_guid_t __aligned(__alignof__(u32)); -#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ - GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) +#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \ + (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ + (b) & 0xff, ((b) >> 8) & 0xff, \ + (c) & 0xff, ((c) >> 8) & 0xff, d } } /* * Generic EFI table header @@ -1039,7 +1041,11 @@ extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif #ifdef CONFIG_X86 extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 901bda352dcb..2e107d1eb26f 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -4,6 +4,7 @@ #include <linux/percpu.h> #include <linux/hashtable.h> +#include <linux/kabi.h> #ifdef CONFIG_BLOCK @@ -50,6 +51,9 @@ struct elevator_mq_ops { struct request *(*next_request)(struct request_queue *, struct request *); void (*init_icq)(struct io_cq *); void (*exit_icq)(struct io_cq *); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; #define ELV_NAME_MAX (16) @@ -86,6 +90,11 @@ struct elevator_type /* managed by elevator core */ char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ struct list_head list; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; #define ELV_HASH_BITS 6 diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 4cad0e784b28..b81f9e1d74b0 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -58,6 +58,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse } #endif +#if defined(CONFIG_UM) || defined(CONFIG_IA64) /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its @@ -72,5 +73,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); extern int elf_core_write_extra_data(struct coredump_params *cprm); extern size_t elf_core_extra_data_size(void); +#else +static inline Elf_Half elf_core_extra_phdrs(void) +{ + return 0; +} + +static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +{ + return 1; +} + +static inline int elf_core_write_extra_data(struct coredump_params *cprm) +{ + return 1; +} + +static inline size_t elf_core_extra_data_size(void) +{ + return 0; +} +#endif #endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index f236f5b931b2..7fdd7f355b52 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -54,7 +54,7 @@ .popsection ; #define ELFNOTE(name, type, desc) \ - ELFNOTE_START(name, type, "") \ + ELFNOTE_START(name, type, "a") \ desc ; \ ELFNOTE_END diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 95991e4300bf..66409f92ca14 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -16,6 +16,7 @@ #include <linux/bitmap.h> #include <linux/compat.h> #include <uapi/linux/ethtool.h> +#include <linux/kabi.h> #ifdef CONFIG_COMPAT @@ -404,6 +405,11 @@ struct ethtool_ops { struct ethtool_fecparam *); void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; struct ethtool_rx_flow_rule { diff --git a/include/linux/extable.h b/include/linux/extable.h index 81ecfaa83ad3..4ab9e78f313b 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -33,4 +33,14 @@ search_module_extables(unsigned long addr) } #endif /*CONFIG_MODULES*/ +#ifdef CONFIG_BPF_JIT +const struct exception_table_entry *search_bpf_extables(unsigned long addr); +#else +static inline const struct exception_table_entry * +search_bpf_extables(unsigned long addr) +{ + return NULL; +} +#endif + #endif /* _LINUX_EXTABLE_H */ diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 2bdf643d8593..47d3e19a49ae 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -271,6 +271,29 @@ static inline void devm_extcon_unregister_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb) { } +static inline int extcon_register_notifier_all(struct extcon_dev *edev, + struct notifier_block *nb) +{ + return 0; +} + +static inline int extcon_unregister_notifier_all(struct extcon_dev *edev, + struct notifier_block *nb) +{ + return 0; +} + +static inline int devm_extcon_register_notifier_all(struct device *dev, + struct extcon_dev *edev, + struct notifier_block *nb) +{ + return 0; +} + +static inline void devm_extcon_unregister_notifier_all(struct device *dev, + struct extcon_dev *edev, + struct notifier_block *nb) { } + static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { return ERR_PTR(-ENODEV); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 284738996028..6bb6f718a102 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -124,6 +124,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_RESIZEFS_FLAG 0x00004000 #define CP_DISABLED_QUICK_FLAG 0x00002000 #define CP_DISABLED_FLAG 0x00001000 #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 diff --git a/include/linux/fb.h b/include/linux/fb.h index 756706b666a1..8221838fefd9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -400,8 +400,6 @@ struct fb_tile_ops { #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ -#define FBINFO_MISC_USEREVENT 0x10000 /* event request - from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ /* A driver may set this flag to indicate that it does want a set_par to be diff --git a/include/linux/filter.h b/include/linux/filter.h index 0367a75f873b..7c75019718e4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -65,9 +65,17 @@ struct ctl_table_header; /* unused opcode to mark special call to bpf_tail_call() helper */ #define BPF_TAIL_CALL 0xf0 +/* unused opcode to mark special load instruction. Same as BPF_ABS */ +#define BPF_PROBE_MEM 0x20 + /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 +/* unused opcode to mark speculation barrier for mitigating + * Speculative Store Bypass + */ +#define BPF_NOSPEC 0xc0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -368,6 +376,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = 0, \ .imm = 0 }) +/* Speculation barrier */ + +#define BPF_ST_NOSPEC() \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_NOSPEC, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ @@ -511,10 +529,12 @@ struct sock_fprog_kern { struct sock_filter *filter; }; +/* Some arches need doubleword alignment for their instructions and/or data */ +#define BPF_IMAGE_ALIGNMENT 8 + struct bpf_binary_header { u32 pages; - /* Some arches need word alignment for their instructions */ - u8 image[] __aligned(4); + u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); }; struct bpf_prog { @@ -553,23 +573,26 @@ struct sk_filter { DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); -#define BPF_PROG_RUN(prog, ctx) ({ \ - u32 ret; \ - cant_sleep(); \ - if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ - struct bpf_prog_stats *stats; \ - u64 start = sched_clock(); \ - ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ - stats = this_cpu_ptr(prog->aux->stats); \ - u64_stats_update_begin(&stats->syncp); \ - stats->cnt++; \ - stats->nsecs += sched_clock() - start; \ - u64_stats_update_end(&stats->syncp); \ - } else { \ - ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ - } \ +#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \ + u32 ret; \ + cant_sleep(); \ + if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ + struct bpf_prog_stats *stats; \ + u64 start = sched_clock(); \ + ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ + stats = this_cpu_ptr(prog->aux->stats); \ + u64_stats_update_begin(&stats->syncp); \ + stats->cnt++; \ + stats->nsecs += sched_clock() - start; \ + u64_stats_update_end(&stats->syncp); \ + } else { \ + ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ + } \ ret; }) +#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \ + bpf_dispatcher_nopfunc) + #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN struct bpf_skb_data_end { @@ -578,6 +601,14 @@ struct bpf_skb_data_end { void *data_end; }; +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + struct bpf_redirect_info { u32 flags; u32 tgt_index; @@ -585,6 +616,7 @@ struct bpf_redirect_info { struct bpf_map *map; struct bpf_map *map_to_flush; u32 kern_flags; + struct bpf_nh_params nh; }; DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); @@ -693,6 +725,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, return res; } +DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp) + static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) { @@ -702,9 +736,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, * already takes rcu_read_lock() when fetching the program, so * it's not necessary here anymore. */ - return BPF_PROG_RUN(prog, xdp); + return __BPF_PROG_RUN(prog, xdp, + BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp)); } +void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); + static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) { return prog->len * sizeof(struct bpf_insn); @@ -770,8 +807,12 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - set_vm_flush_reset_perms(fp); - set_memory_ro((unsigned long)fp, fp->pages); +#ifndef CONFIG_BPF_JIT_ALWAYS_ON + if (!fp->jited) { + set_vm_flush_reset_perms(fp); + set_memory_ro((unsigned long)fp, fp->pages); + } +#endif } static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) @@ -826,6 +867,8 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, bpf_aux_classic_check_t trans, bool save_orig); void bpf_prog_destroy(struct bpf_prog *fp); +const struct bpf_func_proto * +bpf_base_func_proto(enum bpf_func_id func_id); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); @@ -842,19 +885,19 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); #define __bpf_call_base_args \ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ - __bpf_call_base) + (void *)__bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); -static inline bool bpf_dump_raw_ok(void) +static inline bool bpf_dump_raw_ok(const struct cred *cred) { /* Reconstruction of call-sites is dependent on kallsyms, * thus make dump the same restriction. */ - return kallsyms_show_value() == 1; + return kallsyms_show_value(cred); } struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, @@ -946,6 +989,9 @@ void *bpf_jit_alloc_exec(unsigned long size); void bpf_jit_free_exec(void *addr); void bpf_jit_free(struct bpf_prog *fp); +int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, + struct bpf_jit_poke_descriptor *poke); + int bpf_jit_get_func_addr(const struct bpf_prog *prog, const struct bpf_insn *insn, bool extra_pass, u64 *func_addr, bool *func_addr_fixed); @@ -1049,6 +1095,13 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) return false; } +static inline int +bpf_jit_add_poke_descriptor(struct bpf_prog *prog, + struct bpf_jit_poke_descriptor *poke) +{ + return -ENOTSUPP; +} + static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); @@ -1228,4 +1281,149 @@ struct bpf_sockopt_kern { s32 retval; }; +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + __be16 sport; + u16 dport; + struct sock *selected_sk; + bool no_reuseport; +}; + +extern struct static_key_false bpf_sk_lookup_enabled; + +/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. + * + * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and + * SK_DROP. Their meaning is as follows: + * + * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result + * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup + * SK_DROP : terminate lookup with -ECONNREFUSED + * + * This macro aggregates return values and selected sockets from + * multiple BPF programs according to following rules in order: + * + * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, + * macro result is SK_PASS and last ctx.selected_sk is used. + * 2. If any program returned SK_DROP return value, + * macro result is SK_DROP. + * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. + * + * Caller must ensure that the prog array is non-NULL, and that the + * array as well as the programs it contains remain valid. + */ +#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_sk_lookup_kern *_ctx = &(ctx); \ + struct bpf_prog_array_item *_item; \ + struct sock *_selected_sk = NULL; \ + bool _no_reuseport = false; \ + struct bpf_prog *_prog; \ + bool _all_pass = true; \ + u32 _ret; \ + \ + _item = &(array)->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + /* restore most recent selection */ \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + \ + _ret = func(_prog, _ctx); \ + if (_ret == SK_PASS && _ctx->selected_sk) { \ + /* remember last non-NULL socket */ \ + _selected_sk = _ctx->selected_sk; \ + _no_reuseport = _ctx->no_reuseport; \ + } else if (_ret == SK_DROP && _all_pass) { \ + _all_pass = false; \ + } \ + _item++; \ + } \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ + }) + +static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, + const __be32 saddr, const __be16 sport, + const __be32 daddr, const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET, + .protocol = protocol, + .v4.saddr = saddr, + .v4.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET6, + .protocol = protocol, + .v6.saddr = saddr, + .v6.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/font.h b/include/linux/font.h index 51b91c8b69d5..59faa80f586d 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -59,4 +59,17 @@ extern const struct font_desc *get_default_font(int xres, int yres, /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 +/* Extra word getters */ +#define REFCOUNT(fd) (((int *)(fd))[-1]) +#define FNTSIZE(fd) (((int *)(fd))[-2]) +#define FNTCHARCNT(fd) (((int *)(fd))[-3]) +#define FNTSUM(fd) (((int *)(fd))[-4]) + +#define FONT_EXTRA_WORDS 4 + +struct font_data { + unsigned int extra[FONT_EXTRA_WORDS]; + const unsigned char data[]; +} __packed; + #endif /* _VIDEO_FONT_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 391f31472b44..49e12bb53690 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -40,6 +40,7 @@ #include <linux/fs_types.h> #include <linux/build_bug.h> #include <linux/stddef.h> +#include <linux/kabi.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> @@ -408,6 +409,9 @@ struct address_space_operations { int (*swap_activate)(struct swap_info_struct *sis, struct file *file, sector_t *span); void (*swap_deactivate)(struct file *file); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; extern const struct address_space_operations empty_aops; @@ -463,6 +467,11 @@ struct address_space { spinlock_t private_lock; struct list_head private_list; void *private_data; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but @@ -507,6 +516,11 @@ struct block_device { int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } __randomize_layout; /* XArray tags, for tagging dirty and writeback pages in the pagecache. */ @@ -736,6 +750,9 @@ struct inode { struct fsverity_info *i_verity_info; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); + void *i_private; /* fs or device private pointer */ } __randomize_layout; @@ -979,7 +996,7 @@ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[0]; + unsigned char f_handle[]; }; static inline struct file *get_file(struct file *f) @@ -1031,6 +1048,9 @@ struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; struct lock_manager_operations { @@ -1041,6 +1061,8 @@ struct lock_manager_operations { bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); + + KABI_RESERVE(1); }; struct lock_manager { @@ -1860,6 +1882,11 @@ struct file_operations { struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); int (*fadvise)(struct file *, loff_t, loff_t, int); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } __randomize_layout; struct inode_operations { @@ -1890,6 +1917,11 @@ struct inode_operations { umode_t create_mode); int (*tmpfile) (struct inode *, struct dentry *, umode_t); int (*set_acl)(struct inode *, struct posix_acl *, int); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, @@ -1974,6 +2006,11 @@ struct super_operations { struct shrink_control *); long (*free_cached_objects)(struct super_block *, struct shrink_control *); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* @@ -2141,6 +2178,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) * * I_CREATING New object's inode in the middle of setting up. * + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. + * Used to detect that mark_inode_dirty() should not move + * inode between dirty lists. + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -2158,11 +2199,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define __I_DIRTY_TIME_EXPIRED 12 -#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) +#define I_SYNC_QUEUED (1 << 17) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) @@ -2242,6 +2282,10 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index f622f7460ed8..032e5bcf9701 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -100,6 +100,35 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME; } +/** + * fscrypt_is_nokey_name() - test whether a dentry is a no-key name + * @dentry: the dentry to check + * + * This returns true if the dentry is a no-key dentry. A no-key dentry is a + * dentry that was created in an encrypted directory that hasn't had its + * encryption key added yet. Such dentries may be either positive or negative. + * + * When a filesystem is asked to create a new filename in an encrypted directory + * and the new filename's dentry is a no-key dentry, it must fail the operation + * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(), + * ->rename(), and ->link(). (However, ->rename() and ->link() are already + * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().) + * + * This is necessary because creating a filename requires the directory's + * encryption key, but just checking for the key on the directory inode during + * the final filesystem operation doesn't guarantee that the key was available + * during the preceding dentry lookup. And the key must have already been + * available during the dentry lookup in order for it to have been checked + * whether the filename already exists in the directory and for the new file's + * dentry not to be invalidated due to it incorrectly having the no-key flag. + * + * Return: %true if the dentry is a no-key name + */ +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_ENCRYPTED_NAME; +} + /* crypto.c */ extern void fscrypt_enqueue_decrypt_work(struct work_struct *); extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t); @@ -290,6 +319,11 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) { } +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) +{ + return false; +} + /* crypto.c */ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) { diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1915bdba2fad..6c200a57b4d1 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -19,6 +19,7 @@ #include <linux/atomic.h> #include <linux/user_namespace.h> #include <linux/refcount.h> +#include <linux/kabi.h> /* * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily @@ -133,8 +134,7 @@ struct fsnotify_ops { */ struct fsnotify_event { struct list_head list; - /* inode may ONLY be dereferenced during handle_event(). */ - struct inode *inode; /* either the inode the event happened to or its parent */ + unsigned long objectid; /* identifier for queue merges */ }; /* @@ -211,6 +211,11 @@ struct fsnotify_group { } fanotify_data; #endif /* CONFIG_FANOTIFY */ }; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* when calling fsnotify tell it if the data is a path or inode */ @@ -500,10 +505,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); static inline void fsnotify_init_event(struct fsnotify_event *event, - struct inode *inode) + unsigned long objectid) { INIT_LIST_HEAD(&event->list); - event->inode = inode; + event->objectid = objectid; } #else diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 2f9f336b8a11..f1f3fa0f5c8a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -18,6 +18,7 @@ #include <linux/uuid.h> #include <linux/blk_types.h> #include <asm/local.h> +#include <linux/kabi.h> #ifdef CONFIG_BLOCK @@ -131,6 +132,12 @@ struct hd_struct { struct disk_stats dkstats; #endif struct percpu_ref ref; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + struct rcu_work rcu_work; }; @@ -176,6 +183,9 @@ struct blk_integrity { unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; #endif /* CONFIG_BLK_DEV_INTEGRITY */ @@ -220,6 +230,11 @@ struct gendisk { int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; static inline struct gendisk *part_to_disk(struct hd_struct *part) @@ -333,6 +348,7 @@ static inline int init_part_stats(struct hd_struct *part) part->dkstats = alloc_percpu(struct disk_stats); if (!part->dkstats) return 0; + part->stamp = jiffies - 1; return 1; } @@ -420,7 +436,8 @@ static inline void free_part_info(struct hd_struct *part) kfree(part->info); } -void update_io_ticks(struct hd_struct *part, unsigned long now); +void update_io_ticks(struct hd_struct *part, unsigned long now, bool end); +void sync_io_ticks(struct hd_struct *part, bool busy); /* block/genhd.c */ extern void device_add_disk(struct device *parent, struct gendisk *disk, @@ -751,9 +768,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) + preempt_disable(); write_seqcount_begin(&part->nr_sects_seq); part->nr_sects = size; write_seqcount_end(&part->nr_sects_seq); + preempt_enable(); #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) preempt_disable(); part->nr_sects = size; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index da0af631ded5..9c4ed2c4b45c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -65,13 +65,16 @@ extern void irq_exit(void); #define arch_nmi_exit() do { } while (0) #endif +/* + * nmi_enter() can nest up to 15 times; see NMI_BITS. + */ #define nmi_enter() \ do { \ arch_nmi_enter(); \ printk_nmi_enter(); \ lockdep_off(); \ ftrace_nmi_enter(); \ - BUG_ON(in_nmi()); \ + BUG_ON(in_nmi() == NMI_MASK); \ preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ rcu_nmi_enter(); \ trace_hardirq_enter(); \ diff --git a/include/linux/hid.h b/include/linux/hid.h index 875f71132b14..ae906deb42e8 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -261,6 +261,8 @@ struct hid_item { #define HID_CP_SELECTION 0x000c0080 #define HID_CP_MEDIASELECTION 0x000c0087 #define HID_CP_SELECTDISC 0x000c00ba +#define HID_CP_VOLUMEUP 0x000c00e9 +#define HID_CP_VOLUMEDOWN 0x000c00ea #define HID_CP_PLAYBACKSPEED 0x000c00f1 #define HID_CP_PROXIMITY 0x000c0109 #define HID_CP_SPEAKERSYSTEM 0x000c0160 @@ -959,34 +961,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -1000,7 +1017,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h index 774f7d3b8f6a..369221fd5518 100644 --- a/include/linux/hil_mlc.h +++ b/include/linux/hil_mlc.h @@ -103,7 +103,7 @@ struct hilse_node { /* Methods for back-end drivers, e.g. hp_sdc_mlc */ typedef int (hil_mlc_cts) (hil_mlc *mlc); -typedef void (hil_mlc_out) (hil_mlc *mlc); +typedef int (hil_mlc_out) (hil_mlc *mlc); typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout); struct hil_mlc_devinfo { diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 53fc34f930d0..fc717aeb2b3d 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -106,7 +106,7 @@ void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx, unsigned long address); + pgoff_t idx); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); @@ -298,7 +298,10 @@ static inline bool is_file_hugepages(struct file *file) return is_file_shm_hugepages(file); } - +static inline struct hstate *hstate_inode(struct inode *i) +{ + return HUGETLBFS_SB(i->i_sb)->hstate; +} #else /* !CONFIG_HUGETLBFS */ #define is_file_hugepages(file) false @@ -310,6 +313,10 @@ hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, return ERR_PTR(-ENOSYS); } +static inline struct hstate *hstate_inode(struct inode *i) +{ + return NULL; +} #endif /* !CONFIG_HUGETLBFS */ #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA @@ -379,11 +386,6 @@ extern unsigned int default_hstate_idx; #define default_hstate (hstates[default_hstate_idx]) -static inline struct hstate *hstate_inode(struct inode *i) -{ - return HUGETLBFS_SB(i->i_sb)->hstate; -} - static inline struct hstate *hstate_file(struct file *f) { return hstate_inode(file_inode(f)); @@ -588,6 +590,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, } #endif +void set_page_huge_active(struct page *page); + #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; @@ -636,11 +640,6 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma) return NULL; } -static inline struct hstate *hstate_inode(struct inode *i) -{ - return NULL; -} - static inline struct hstate *page_hstate(struct page *page) { return NULL; diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b4a017093b69..67d9b5a37460 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -423,6 +423,8 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, + CHANNELMSG_22 = 22, + CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index d03071732db4..7c522fdd9ea7 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -53,6 +53,20 @@ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ +/** + * struct pca_i2c_bus_settings - The configured PCA i2c bus settings + * @mode: Configured i2c bus mode + * @tlow: Configured SCL LOW period + * @thi: Configured SCL HIGH period + * @clock_freq: The configured clock frequency + */ +struct pca_i2c_bus_settings { + int mode; + int tlow; + int thi; + int clock_freq; +}; + struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); @@ -64,6 +78,7 @@ struct i2c_algo_pca_data { * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; + struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index a8f888976137..0be0d68fbb00 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -3,6 +3,7 @@ #define _LINUX_ICMPV6_H #include <linux/skbuff.h> +#include <linux/ipv6.h> #include <uapi/linux/icmpv6.h> static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) @@ -13,21 +14,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include <linux/netdevice.h> #if IS_ENABLED(CONFIG_IPV6) -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); +#if IS_BUILTIN(CONFIG_IPV6) +static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) +{ + icmp6_send(skb, type, code, info, NULL, parm); +} +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +#else +extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +#endif + +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + __icmpv6_send(skb, type, code, info, IP6CB(skb)); +} + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); +#if IS_ENABLED(CONFIG_NF_NAT) +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); +#else +static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + __icmpv6_send(skb_in, type, code, info, &parm); +} +#endif + #else static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { +} +static inline void icmpv6_ndo_send(struct sk_buff *skb, + u8 type, u8 code, __u32 info) +{ } #endif diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 73c66a3a33ae..624d2643bfba 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -619,6 +619,15 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); } +/** + * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_any_nullfunc(__le16 fc) +{ + return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); +} + /** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @fc: frame control field in little-endian byteorder @@ -2038,7 +2047,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) } /* HE Operation defines */ -#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index a367ead4bf4b..e11555989090 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -42,13 +42,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, if (likely(success)) { struct vlan_pcpu_stats *pcpu_stats; - pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); + pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->rx_packets++; pcpu_stats->rx_bytes += len; if (multicast) pcpu_stats->rx_multicast++; u64_stats_update_end(&pcpu_stats->syncp); + put_cpu_ptr(vlan->pcpu_stats); } else { this_cpu_inc(vlan->pcpu_stats->rx_errors); } diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b05e855f1ddd..41a518336673 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -25,6 +25,8 @@ #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ +#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ + /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID @@ -577,10 +579,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, int *depth) { - unsigned int vlan_depth = skb->mac_len; + unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; /* if type is 802.1Q/AD then the header should already be * present at mac_len - VLAN_HLEN (if mac_len > 0), or at @@ -595,13 +597,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, vlan_depth = ETH_HLEN; } do { - struct vlan_hdr *vh; + struct vlan_hdr vhdr, *vh; - if (unlikely(!pskb_may_pull(skb, - vlan_depth + VLAN_HLEN))) + vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + if (unlikely(!vh || !--parse_depth)) return 0; - vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } while (eth_type_vlan(type)); @@ -620,11 +621,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(struct sk_buff *skb) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) { return __vlan_get_protocol(skb, skb->protocol, NULL); } +/* A getter for the SKB protocol field which will handle VLAN tags consistently + * whether VLAN acceleration is enabled or not. + */ +static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) +{ + if (!skip_vlan) + /* VLAN acceleration strips the VLAN header from the skb and + * moves it to skb->vlan_proto + */ + return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; + + return vlan_get_protocol(skb); +} + static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 8e132cf819e4..e5341847e3a0 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -596,7 +596,7 @@ void iio_device_unregister(struct iio_dev *indio_dev); * 0 on success, negative error number on failure. */ #define devm_iio_device_register(dev, indio_dev) \ - __devm_iio_device_register((dev), (indio_dev), THIS_MODULE); + __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, struct module *this_mod); void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index c91cf2dee12a..ce9ed1c0602f 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -10,11 +10,9 @@ struct inet_hashinfo; struct inet_diag_handler { void (*dump)(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r); - int (*dump_one)(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + int (*dump_one)(struct netlink_callback *cb, const struct inet_diag_req_v2 *req); void (*idiag_get_info)(struct sock *sk, @@ -35,18 +33,25 @@ struct inet_diag_handler { __u16 idiag_info_size; }; +struct bpf_sk_storage_diag; +struct inet_diag_dump_data { + struct nlattr *req_nlas[__INET_DIAG_REQ_MAX]; +#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE] +#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES] + + struct bpf_sk_storage_diag *bpf_stg_diag; +}; + struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin); + struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + u16 nlmsg_flags, bool net_admin); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, const struct nlmsghdr *nlh, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req); struct sock *inet_diag_find_one_icsk(struct net *net, diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h index 1ecb6b45812c..520858d12680 100644 --- a/include/linux/input/elan-i2c-ids.h +++ b/include/linux/input/elan-i2c-ids.h @@ -67,8 +67,15 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN062B", 0 }, { "ELAN062C", 0 }, { "ELAN062D", 0 }, + { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */ + { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */ { "ELAN0631", 0 }, { "ELAN0632", 0 }, + { "ELAN0633", 0 }, /* Lenovo S145 */ + { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */ + { "ELAN0635", 0 }, /* Lenovo V1415-IIL */ + { "ELAN0636", 0 }, /* Lenovo V1415-Dali */ + { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */ { "ELAN1000", 0 }, { } }; diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h new file mode 100644 index 000000000000..93e2ad67fc10 --- /dev/null +++ b/include/linux/instrumentation.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_INSTRUMENTATION_H +#define __LINUX_INSTRUMENTATION_H + +#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION) + +/* Begin/end of an instrumentation safe region */ +#define instrumentation_begin() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_begin\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) + +/* + * Because instrumentation_{begin,end}() can nest, objtool validation considers + * _begin() a +1 and _end() a -1 and computes a sum over the instructions. + * When the value is greater than 0, we consider instrumentation allowed. + * + * There is a problem with code like: + * + * noinstr void foo() + * { + * instrumentation_begin(); + * ... + * if (cond) { + * instrumentation_begin(); + * ... + * instrumentation_end(); + * } + * bar(); + * instrumentation_end(); + * } + * + * If instrumentation_end() would be an empty label, like all the other + * annotations, the inner _end(), which is at the end of a conditional block, + * would land on the instruction after the block. + * + * If we then consider the sum of the !cond path, we'll see that the call to + * bar() is with a 0-value, even though, we meant it to happen with a positive + * value. + * + * To avoid this, have _end() be a NOP instruction, this ensures it will be + * part of the condition block and does not escape. + */ +#define instrumentation_end() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_end\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#else +# define instrumentation_begin() do { } while(0) +# define instrumentation_end() do { } while(0) +#endif + +#endif /* __LINUX_INSTRUMENTATION_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1e5dad8b8e59..88ac8edf44e3 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -359,8 +359,8 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ @@ -556,6 +556,8 @@ struct intel_iommu { struct iommu_device iommu; /* IOMMU core code handle */ int node; u32 flags; /* Software defined flags */ + + struct dmar_drhd_unit *drhd; }; /* PCI domain-device relationship */ @@ -706,7 +708,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); extern int dmar_disabled; extern int intel_iommu_enabled; -extern int intel_iommu_tboot_noforce; +extern int intel_iommu_gfx_mapped; #else static inline int iommu_calculate_agaw(struct intel_iommu *iommu) { diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 89fc59dab57d..cf34dbaf2c11 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -235,6 +235,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type); extern int prepare_percpu_nmi(unsigned int irq); extern void teardown_percpu_nmi(unsigned int irq); +extern int irq_inject_interrupt(unsigned int irq); + /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 6e125e9b4187..b9c91d321240 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -108,9 +108,12 @@ io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { + iomap->iomem = ioremap_wc(base, size); + if (!iomap->iomem) + return NULL; + iomap->base = base; iomap->size = size; - iomap->iomem = ioremap_wc(base, size); #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) diff --git a/include/linux/io.h b/include/linux/io.h index accac822336a..a59834bc0a11 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -64,6 +64,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size); +void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, + resource_size_t size); void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, resource_size_t size); void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index dba15ca8e60b..1dcd9198beb7 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -8,6 +8,7 @@ enum { ICQ_EXITED = 1 << 2, + ICQ_DESTROYED = 1 << 3, }; /* diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 7aa5d6117936..76b14cb729dc 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -195,7 +195,8 @@ struct iomap_dio_ops { }; ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, - const struct iomap_ops *ops, const struct iomap_dio_ops *dops); + const struct iomap_ops *ops, const struct iomap_dio_ops *dops, + bool wait_for_completion); int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); #ifdef CONFIG_SWAP diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 7bddddfc76d6..982f87a4f52f 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -13,6 +13,8 @@ #include <linux/compiler.h> #include <linux/types.h> #include <linux/bits.h> +#include <linux/kabi.h> + /* * Resources are tree-like, allowing * nesting etc.. @@ -24,6 +26,11 @@ struct resource { unsigned long flags; unsigned long desc; struct resource *parent, *sibling, *child; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* @@ -300,5 +307,11 @@ struct resource *devm_request_free_mem_region(struct device *dev, struct resource *request_free_mem_region(struct resource *base, unsigned long size, const char *name); +#ifdef CONFIG_IO_STRICT_DEVMEM +void revoke_devmem(struct resource *res); +#else +static inline void revoke_devmem(struct resource *res) { }; +#endif + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ea7c7906591e..5b24e7d9dd16 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -3,6 +3,7 @@ #define _IPV6_H #include <uapi/linux/ipv6.h> +#include <linux/kabi.h> #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) @@ -75,6 +76,11 @@ struct ipv6_devconf { __s32 disable_policy; __s32 ndisc_tclass; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + struct ctl_table_header *sysctl_header; }; @@ -83,7 +89,6 @@ struct ipv6_params { __s32 autoconf; }; extern struct ipv6_params ipv6_defaults; -#include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> diff --git a/include/linux/irq.h b/include/linux/irq.h index f8755e5fcd74..3d7b04d5264c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -211,6 +211,10 @@ struct irq_data { * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required + * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked + * from actual interrupt context. + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call + * irq_chip::irq_set_affinity() when deactivated. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -234,6 +238,8 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -303,6 +309,16 @@ static inline bool irqd_is_single_target(struct irq_data *d) return __irqd_to_state(d) & IRQD_SINGLE_TARGET; } +static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; +} + +static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; +} + static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; @@ -408,6 +424,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d) return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; } +static inline void irqd_set_affinity_on_activate(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; +} + +static inline bool irqd_affinity_on_activate(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h new file mode 100644 index 000000000000..b476a0885096 --- /dev/null +++ b/include/linux/irqchip/arm-gic-phytium-2500.h @@ -0,0 +1,655 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng <wangyinfeng@phytium.com.cn> + * Chen Baozi <chenbaozi@phytium.com.cn> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H +#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_SEIR 0x0068 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 +#define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +#define ESPI_BASE_INTID 4096 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_SEIR GICD_SEIR +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_MOVLPIR 0x0100 +#define GICR_MOVALLR 0x0110 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP is the odd one, as it doesn't have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* + * Hypervisor interface registers (SRE only) + */ +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) +#define ICH_LR_PHYS_ID_SHIFT 32 +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 +#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICH_MISR_EOI (1 << 0) +#define ICH_MISR_U (1 << 1) + +#define ICH_HCR_EN (1 << 0) +#define ICH_HCR_UIE (1 << 1) +#define ICH_HCR_NPIE (1 << 3) +#define ICH_HCR_TC (1 << 10) +#define ICH_HCR_TALL0 (1 << 11) +#define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_EOIcount_SHIFT 27 +#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) + +#define ICH_VMCR_ACK_CTL_SHIFT 2 +#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) +#define ICH_VMCR_FIQ_EN_SHIFT 3 +#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) +#define ICH_VMCR_CBPR_SHIFT 4 +#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) +#define ICH_VMCR_EOIM_SHIFT 9 +#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) +#define ICH_VMCR_BPR1_SHIFT 18 +#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) +#define ICH_VMCR_BPR0_SHIFT 21 +#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) +#define ICH_VMCR_ENG0_SHIFT 0 +#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) +#define ICH_VMCR_ENG1_SHIFT 1 +#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) + +#define ICH_VTR_PRI_BITS_SHIFT 29 +#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) +#define ICH_VTR_ID_BITS_SHIFT 23 +#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) +#define ICH_VTR_SEIS_SHIFT 22 +#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) +#define ICH_VTR_A3V_SHIFT 21 +#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include <asm/arch_gicv3.h> + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + bool lpi_enabled; + } __percpu *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + bool has_vlpis; + bool has_direct_lpi; +}; + +struct irq_domain; +struct fwnode_handle; +int phytium_its_cpu_init(void); +int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); +int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index e85f714a623e..6fc9f0ac5f2b 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -35,6 +35,7 @@ #include <linux/of.h> #include <linux/mutex.h> #include <linux/radix-tree.h> +#include <linux/kabi.h> struct device_node; struct irq_domain; @@ -182,6 +183,12 @@ struct irq_domain { unsigned int revmap_size; struct radix_tree_root revmap_tree; struct mutex revmap_tree_mutex; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + unsigned int linear_revmap[]; }; @@ -382,11 +389,19 @@ extern void irq_domain_associate_many(struct irq_domain *domain, extern void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq); -extern unsigned int irq_create_mapping(struct irq_domain *host, - irq_hw_number_t hwirq); +extern unsigned int irq_create_mapping_affinity(struct irq_domain *host, + irq_hw_number_t hwirq, + const struct irq_affinity_desc *affinity); extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); +static inline unsigned int irq_create_mapping(struct irq_domain *host, + irq_hw_number_t hwirq) +{ + return irq_create_mapping_affinity(host, hwirq, NULL); +} + + /** * irq_linear_revmap() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 10e6049c0ba9..b0e97e5de8ca 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1402,7 +1402,6 @@ extern int jbd2_journal_skip_recovery (journal_t *); extern void jbd2_journal_update_sb_errno(journal_t *); extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, unsigned long, int); -extern void __jbd2_journal_abort_hard (journal_t *); extern void jbd2_journal_abort (journal_t *, int); extern int jbd2_journal_errno (journal_t *); extern void jbd2_journal_ack_err (journal_t *); diff --git a/include/linux/kabi.h b/include/linux/kabi.h new file mode 100644 index 000000000000..24f31b35af88 --- /dev/null +++ b/include/linux/kabi.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * kabi.h + * + */ + +#ifndef _LINUX_KABI_H +#define _LINUX_KABI_H + +#define KABI_RESERVE(n) unsigned long kabi_reserved##n + +#endif /* _LINUX_KABI_H */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 657a83b943f0..1f96ce2b47df 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -18,6 +18,7 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +struct cred; struct module; static inline int is_kernel_inittext(unsigned long addr) @@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* How and when do we show kallsyms values? */ -extern int kallsyms_show_value(void); +extern bool kallsyms_show_value(const struct cred *cred); #else /* !CONFIG_KALLSYMS */ @@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u return -ERANGE; } -static inline int kallsyms_show_value(void) +static inline bool kallsyms_show_value(const struct cred *cred) { return false; } diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index 85b5151911cf..4856706fbfeb 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h @@ -21,61 +21,61 @@ }) /* acceptable for old filesystems */ -static inline bool old_valid_dev(dev_t dev) +static __always_inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } -static inline u16 old_encode_dev(dev_t dev) +static __always_inline u16 old_encode_dev(dev_t dev) { return (MAJOR(dev) << 8) | MINOR(dev); } -static inline dev_t old_decode_dev(u16 val) +static __always_inline dev_t old_decode_dev(u16 val) { return MKDEV((val >> 8) & 255, val & 255); } -static inline u32 new_encode_dev(dev_t dev) +static __always_inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } -static inline dev_t new_decode_dev(u32 dev) +static __always_inline dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } -static inline u64 huge_encode_dev(dev_t dev) +static __always_inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } -static inline dev_t huge_decode_dev(u64 dev) +static __always_inline dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } -static inline int sysv_valid_dev(dev_t dev) +static __always_inline int sysv_valid_dev(dev_t dev) { return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); } -static inline u32 sysv_encode_dev(dev_t dev) +static __always_inline u32 sysv_encode_dev(dev_t dev) { return MINOR(dev) | (MAJOR(dev) << 18); } -static inline unsigned sysv_major(u32 dev) +static __always_inline unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } -static inline unsigned sysv_minor(u32 dev) +static __always_inline unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index f797ccc650e7..b71eb456b8c5 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -16,6 +16,7 @@ #include <linux/atomic.h> #include <linux/uidgid.h> #include <linux/wait.h> +#include <linux/kabi.h> struct file; struct dentry; @@ -178,6 +179,9 @@ struct kernfs_syscall_ops { const char *new_name); int (*show_path)(struct seq_file *sf, struct kernfs_node *kn, struct kernfs_root *root); + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; struct kernfs_root { @@ -271,6 +275,8 @@ struct kernfs_ops { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); }; /* diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 1776eb2e43a4..a1cffce3de8c 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -293,6 +293,11 @@ struct kimage { /* Information for loading purgatory */ struct purgatory_info purgatory_info; #endif + +#ifdef CONFIG_IMA_KEXEC + /* Virtual address of IMA measurement buffer for kexec syscall */ + void *ima_buffer; +#endif }; /* kexec interface functions */ diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 4ded94bcf274..2ab2d6d6aeab 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -127,7 +127,7 @@ struct key_type { * much is copied into the buffer * - shouldn't do the copy if the buffer is NULL */ - long (*read)(const struct key *key, char __user *buffer, size_t buflen); + long (*read)(const struct key *key, char *buffer, size_t buflen); /* handle request_key() for this type instead of invoking * /sbin/request-key (optional) diff --git a/include/linux/key.h b/include/linux/key.h index 6cf8e71cf8b7..9c26cc9b802a 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -269,6 +269,7 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ #define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */ +#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index b072aeb1fd78..4d6fe87fd38f 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -323,7 +323,7 @@ extern void gdbstub_exit(int status); extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ - (raw_smp_processor_id() == atomic_read(&kgdb_active)) + (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) extern bool dbg_is_early; extern void __init dbg_late_init(void); extern void kgdb_panic(const char *msg); diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index bc45ea1efbf7..c941b7377321 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -15,6 +15,7 @@ extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); +extern void khugepaged_min_free_kbytes_update(void); #ifdef CONFIG_SHMEM extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); #else @@ -85,6 +86,10 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { } + +static inline void khugepaged_min_free_kbytes_update(void) +{ +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index e2ca0a292e21..f1dc39d57731 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -27,6 +27,7 @@ #include <linux/atomic.h> #include <linux/workqueue.h> #include <linux/uidgid.h> +#include <linux/kabi.h> #define UEVENT_HELPER_PATH_LEN 256 #define UEVENT_NUM_ENVP 32 /* number of env pointers */ @@ -78,6 +79,9 @@ struct kobject { unsigned int state_add_uevent_sent:1; unsigned int state_remove_uevent_sent:1; unsigned int uevent_suppress:1; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; extern __printf(2, 3) @@ -144,6 +148,8 @@ struct kobj_type { const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); const void *(*namespace)(struct kobject *kobj); void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); + + KABI_RESERVE(1); }; struct kobj_uevent_env { @@ -195,6 +201,9 @@ struct kset { spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; + + KABI_RESERVE(1); + KABI_RESERVE(2); } __randomize_layout; extern void kset_init(struct kset *kset); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 04bdaf01112c..a121fd8e7c3a 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -232,7 +232,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); extern bool arch_kprobe_on_func_entry(unsigned long offset); -extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); +extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); extern int kprobe_add_ksym_blacklist(unsigned long entry); @@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); @@ -365,6 +369,8 @@ void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); +void kprobe_free_init_mem(void); + int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp); @@ -422,6 +428,9 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) static inline void kprobe_flush_task(struct task_struct *tk) { } +static inline void kprobe_free_init_mem(void) +{ +} static inline int disable_kprobe(struct kprobe *kp) { return -ENOSYS; diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 0f9da966934e..c7108ce5a051 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -31,6 +31,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), unsigned int cpu, const char *namefmt); +void kthread_set_per_cpu(struct task_struct *k, int cpu); +bool kthread_is_per_cpu(struct task_struct *k); + /** * kthread_run - create and wake a thread. * @threadfn: the function to run until signal_pending(current). diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b81f0f1ded5f..0ec4ec428b9b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev); +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); @@ -1027,7 +1027,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } - if (gfn >= memslots[start].base_gfn && + if (start < slots->used_slots && gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); return &memslots[start]; @@ -1376,8 +1376,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable); +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); diff --git a/include/linux/libata.h b/include/linux/libata.h index c44e4cfbcb16..3c3d8d6b1618 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -22,6 +22,7 @@ #include <linux/acpi.h> #include <linux/cdrom.h> #include <linux/sched.h> +#include <linux/async.h> /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -421,6 +422,7 @@ enum { ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -484,6 +486,7 @@ enum hsm_task_states { }; enum ata_completion_errors { + AC_ERR_OK = 0, /* no error */ AC_ERR_DEV = (1 << 0), /* device reported error */ AC_ERR_HSM = (1 << 1), /* host state machine violation */ AC_ERR_TIMEOUT = (1 << 2), /* timeout */ @@ -870,6 +873,8 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; void *private_data; @@ -891,9 +896,9 @@ struct ata_port_operations { /* * Command execution */ - int (*qc_defer)(struct ata_queued_cmd *qc); - int (*check_atapi_dma)(struct ata_queued_cmd *qc); - void (*qc_prep)(struct ata_queued_cmd *qc); + int (*qc_defer)(struct ata_queued_cmd *qc); + int (*check_atapi_dma)(struct ata_queued_cmd *qc); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); unsigned int (*qc_issue)(struct ata_queued_cmd *qc); bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); @@ -1161,7 +1166,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode); extern const char *ata_mode_string(unsigned long xfer_mask); extern unsigned long ata_id_xfermask(const u16 *id); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); -extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); @@ -1895,9 +1900,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops; .sg_tablesize = LIBATA_MAX_PRD, \ .dma_boundary = ATA_DMA_BOUNDARY -extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); -extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 7e020782ade2..f3ae8f3dea2c 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -75,32 +75,58 @@ #ifdef __ASSEMBLY__ +/* SYM_T_FUNC -- type used by assembler to mark functions */ +#ifndef SYM_T_FUNC +#define SYM_T_FUNC STT_FUNC +#endif + +/* SYM_T_OBJECT -- type used by assembler to mark data */ +#ifndef SYM_T_OBJECT +#define SYM_T_OBJECT STT_OBJECT +#endif + +/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */ +#ifndef SYM_T_NONE +#define SYM_T_NONE STT_NOTYPE +#endif + +/* SYM_A_* -- align the symbol? */ +#define SYM_A_ALIGN ALIGN +#define SYM_A_NONE /* nothing */ + +/* SYM_L_* -- linkage of symbols */ +#define SYM_L_GLOBAL(name) .globl name +#define SYM_L_WEAK(name) .weak name +#define SYM_L_LOCAL(name) /* nothing */ + #ifndef LINKER_SCRIPT #define ALIGN __ALIGN #define ALIGN_STR __ALIGN_STR +/* === DEPRECATED annotations === */ + #ifndef GLOBAL +/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */ #define GLOBAL(name) \ .globl name ASM_NL \ name: #endif #ifndef ENTRY +/* deprecated, use SYM_FUNC_START */ #define ENTRY(name) \ - .globl name ASM_NL \ - ALIGN ASM_NL \ - name: + SYM_FUNC_START(name) #endif #endif /* LINKER_SCRIPT */ #ifndef WEAK +/* deprecated, use SYM_FUNC_START_WEAK* */ #define WEAK(name) \ - .weak name ASM_NL \ - ALIGN ASM_NL \ - name: + SYM_FUNC_START_WEAK(name) #endif #ifndef END +/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */ #define END(name) \ .size name, .-name #endif @@ -110,11 +136,214 @@ * static analysis tools such as stack depth analyzer. */ #ifndef ENDPROC +/* deprecated, use SYM_FUNC_END */ #define ENDPROC(name) \ - .type name, @function ASM_NL \ - END(name) + SYM_FUNC_END(name) #endif +/* === generic annotations === */ + +/* SYM_ENTRY -- use only if you have to for non-paired symbols */ +#ifndef SYM_ENTRY +#define SYM_ENTRY(name, linkage, align...) \ + linkage(name) ASM_NL \ + align ASM_NL \ + name: #endif +/* SYM_START -- use only if you have to */ +#ifndef SYM_START +#define SYM_START(name, linkage, align...) \ + SYM_ENTRY(name, linkage, align) #endif + +/* SYM_END -- use only if you have to */ +#ifndef SYM_END +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name +#endif + +/* === code annotations === */ + +/* + * FUNC -- C-like functions (proper stack frame etc.) + * CODE -- non-C code (e.g. irq handlers with different, special stack etc.) + * + * Objtool validates stack for FUNC, but not for CODE. + * Objtool generates debug info for both FUNC & CODE, but needs special + * annotations for each CODE's start (to describe the actual stack frame). + * + * ALIAS -- does not generate debug info -- the aliased function will + */ + +/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */ +#ifndef SYM_INNER_LABEL_ALIGN +#define SYM_INNER_LABEL_ALIGN(name, linkage) \ + .type name SYM_T_NONE ASM_NL \ + SYM_ENTRY(name, linkage, SYM_A_ALIGN) +#endif + +/* SYM_INNER_LABEL -- only for labels in the middle of code */ +#ifndef SYM_INNER_LABEL +#define SYM_INNER_LABEL(name, linkage) \ + .type name SYM_T_NONE ASM_NL \ + SYM_ENTRY(name, linkage, SYM_A_NONE) +#endif + +/* + * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one + * function + */ +#ifndef SYM_FUNC_START_LOCAL_ALIAS +#define SYM_FUNC_START_LOCAL_ALIAS(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* + * SYM_FUNC_START_ALIAS -- use where there are two global names for one + * function + */ +#ifndef SYM_FUNC_START_ALIAS +#define SYM_FUNC_START_ALIAS(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START -- use for global functions */ +#ifndef SYM_FUNC_START +/* + * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two + * later. + */ +#define SYM_FUNC_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */ +#ifndef SYM_FUNC_START_NOALIGN +#define SYM_FUNC_START_NOALIGN(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) +#endif + +/* SYM_FUNC_START_LOCAL -- use for local functions */ +#ifndef SYM_FUNC_START_LOCAL +/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */ +#ifndef SYM_FUNC_START_LOCAL_NOALIGN +#define SYM_FUNC_START_LOCAL_NOALIGN(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) +#endif + +/* SYM_FUNC_START_WEAK -- use for weak functions */ +#ifndef SYM_FUNC_START_WEAK +#define SYM_FUNC_START_WEAK(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */ +#ifndef SYM_FUNC_START_WEAK_NOALIGN +#define SYM_FUNC_START_WEAK_NOALIGN(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_NONE) +#endif + +/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ +#ifndef SYM_FUNC_END_ALIAS +#define SYM_FUNC_END_ALIAS(name) \ + SYM_END(name, SYM_T_FUNC) +#endif + +/* + * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, + * SYM_FUNC_START_WEAK, ... + */ +#ifndef SYM_FUNC_END +/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_END(name) \ + SYM_END(name, SYM_T_FUNC) +#endif + +/* SYM_CODE_START -- use for non-C (special) functions */ +#ifndef SYM_CODE_START +#define SYM_CODE_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */ +#ifndef SYM_CODE_START_NOALIGN +#define SYM_CODE_START_NOALIGN(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) +#endif + +/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */ +#ifndef SYM_CODE_START_LOCAL +#define SYM_CODE_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* + * SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions, + * w/o alignment + */ +#ifndef SYM_CODE_START_LOCAL_NOALIGN +#define SYM_CODE_START_LOCAL_NOALIGN(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) +#endif + +/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */ +#ifndef SYM_CODE_END +#define SYM_CODE_END(name) \ + SYM_END(name, SYM_T_NONE) +#endif + +/* === data annotations === */ + +/* SYM_DATA_START -- global data symbol */ +#ifndef SYM_DATA_START +#define SYM_DATA_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) +#endif + +/* SYM_DATA_START -- local data symbol */ +#ifndef SYM_DATA_START_LOCAL +#define SYM_DATA_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) +#endif + +/* SYM_DATA_END -- the end of SYM_DATA_START symbol */ +#ifndef SYM_DATA_END +#define SYM_DATA_END(name) \ + SYM_END(name, SYM_T_OBJECT) +#endif + +/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */ +#ifndef SYM_DATA_END_LABEL +#define SYM_DATA_END_LABEL(name, linkage, label) \ + linkage(label) ASM_NL \ + .type label SYM_T_OBJECT ASM_NL \ + label: \ + SYM_END(name, SYM_T_OBJECT) +#endif + +/* SYM_DATA -- start+end wrapper around simple global data */ +#ifndef SYM_DATA +#define SYM_DATA(name, data...) \ + SYM_DATA_START(name) ASM_NL \ + data ASM_NL \ + SYM_DATA_END(name) +#endif + +/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */ +#ifndef SYM_DATA_LOCAL +#define SYM_DATA_LOCAL(name, data...) \ + SYM_DATA_START_LOCAL(name) ASM_NL \ + data ASM_NL \ + SYM_DATA_END(name) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _LINUX_LINKAGE_H */ diff --git a/include/linux/log2.h b/include/linux/log2.h index 83a4a3ca3e8a..c619ec6eff4a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h new file mode 100644 index 000000000000..ca954f784e24 --- /dev/null +++ b/include/linux/lsm_hook_defs.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Linux Security Module Hook declarations. + * + * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> + * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> + * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> + * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) + * Copyright (C) 2015 Intel Corporation. + * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> + * Copyright (C) 2016 Mellanox Techonologies + * Copyright (C) 2020 Google LLC. + */ + +/* + * The macro LSM_HOOK is used to define the data structures required by the + * the LSM framework using the pattern: + * + * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...) + * + * struct security_hook_heads { + * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; + * #include <linux/lsm_hook_defs.h> + * #undef LSM_HOOK + * }; + */ +LSM_HOOK(int, 0, binder_set_context_mgr, struct task_struct *mgr) +LSM_HOOK(int, 0, binder_transaction, struct task_struct *from, + struct task_struct *to) +LSM_HOOK(int, 0, binder_transfer_binder, struct task_struct *from, + struct task_struct *to) +LSM_HOOK(int, 0, binder_transfer_file, struct task_struct *from, + struct task_struct *to, struct file *file) +LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child, + unsigned int mode) +LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent) +LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective, + kernel_cap_t *inheritable, kernel_cap_t *permitted) +LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old, + const kernel_cap_t *effective, const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) +LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns, + int cap, unsigned int opts) +LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb) +LSM_HOOK(int, 0, quota_on, struct dentry *dentry) +LSM_HOOK(int, 0, syslog, int type) +LSM_HOOK(int, 0, settime, const struct timespec64 *ts, + const struct timezone *tz) +LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) +LSM_HOOK(int, 0, bprm_set_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, + struct fs_context *src_sc) +LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, + struct fs_parameter *param) +LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) +LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) +LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) +LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) +LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts) +LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb) +LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb) +LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry) +LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path, + const char *type, unsigned long flags, void *data) +LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags) +LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path, + const struct path *new_path) +LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts, + unsigned long kern_flags, unsigned long *set_kern_flags) +LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb, + struct super_block *newsb, unsigned long kern_flags, + unsigned long *set_kern_flags) +LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val, + int len, void **mnt_opts) +LSM_HOOK(int, 0, move_mount, const struct path *from_path, + const struct path *to_path) +LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry, + int mode, const struct qstr *name, void **ctx, u32 *ctxlen) +LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode, + struct qstr *name, const struct cred *old, struct cred *new) + +#ifdef CONFIG_SECURITY_PATH +LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry) +LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry, + umode_t mode) +LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry) +LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry, + umode_t mode, unsigned int dev) +LSM_HOOK(int, 0, path_truncate, const struct path *path) +LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry, + const char *old_name) +LSM_HOOK(int, 0, path_link, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry) +LSM_HOOK(int, 0, path_rename, const struct path *old_dir, + struct dentry *old_dentry, const struct path *new_dir, + struct dentry *new_dentry) +LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode) +LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid) +LSM_HOOK(int, 0, path_chroot, const struct path *path) +#endif /* CONFIG_SECURITY_PATH */ + +/* Needed for inode based security check */ +LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask, + unsigned int obj_type) +LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode) +LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode) +LSM_HOOK(int, 0, inode_init_security, struct inode *inode, + struct inode *dir, const struct qstr *qstr, const char **name, + void **value, size_t *len) +LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry, + umode_t mode) +LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry) +LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry) +LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry, + const char *old_name) +LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry, + umode_t mode) +LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry) +LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t dev) +LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry) +LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode, + bool rcu) +LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask) +LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr) +LSM_HOOK(int, 0, inode_getattr, const struct path *path) +LSM_HOOK(int, 0, inode_setxattr, struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) +LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name) +LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry) +LSM_HOOK(int, 0, inode_removexattr, struct dentry *dentry, const char *name) +LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry) +LSM_HOOK(int, 0, inode_killpriv, struct dentry *dentry) +LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct inode *inode, + const char *name, void **buffer, bool alloc) +LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode, + const char *name, const void *value, size_t size, int flags) +LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, + size_t buffer_size) +LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) +LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) +LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name) +LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, + struct kernfs_node *kn) +LSM_HOOK(int, 0, file_permission, struct file *file, int mask) +LSM_HOOK(int, 0, file_alloc_security, struct file *file) +LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file) +LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd, + unsigned long arg) +LSM_HOOK(int, 0, mmap_addr, unsigned long addr) +LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) +LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma, + unsigned long reqprot, unsigned long prot) +LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd) +LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd, + unsigned long arg) +LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file) +LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk, + struct fown_struct *fown, int sig) +LSM_HOOK(int, 0, file_receive, struct file *file) +LSM_HOOK(int, 0, file_open, struct file *file) +LSM_HOOK(int, 0, task_alloc, struct task_struct *task, + unsigned long clone_flags) +LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task) +LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp) +LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred) +LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old, + gfp_t gfp) +LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new, + const struct cred *old) +LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid) +LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid) +LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode) +LSM_HOOK(int, 0, kernel_module_request, char *kmod_name) +LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id) +LSM_HOOK(int, 0, kernel_read_file, struct file *file, + enum kernel_read_file_id id) +LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf, + loff_t size, enum kernel_read_file_id id) +LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, + int flags) +LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) +LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) +LSM_HOOK(int, 0, task_getsid, struct task_struct *p) +LSM_HOOK(void, LSM_RET_VOID, task_getsecid, struct task_struct *p, u32 *secid) +LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice) +LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio) +LSM_HOOK(int, 0, task_getioprio, struct task_struct *p) +LSM_HOOK(int, 0, task_prlimit, const struct cred *cred, + const struct cred *tcred, unsigned int flags) +LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim) +LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p) +LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p) +LSM_HOOK(int, 0, task_movememory, struct task_struct *p) +LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info, + int sig, const struct cred *cred) +LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2, + unsigned long arg3, unsigned long arg4, unsigned long arg5) +LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p, + struct inode *inode) +LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag) +LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp, + u32 *secid) +LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg) +LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg) +LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm) +LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security, + struct kern_ipc_perm *perm) +LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg) +LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd) +LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm, + struct msg_msg *msg, int msqflg) +LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm, + struct msg_msg *msg, struct task_struct *target, long type, int mode) +LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm) +LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm) +LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg) +LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd) +LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr, + int shmflg) +LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm) +LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm) +LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg) +LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd) +LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops, + unsigned nsops, int alter) +LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb) +LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry, + struct inode *inode) +LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name, + char **value) +LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) +LSM_HOOK(int, 0, ismaclabel, const char *name) +LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata, + u32 *seclen) +LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) +LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) +LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen) +LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) +LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, + u32 *ctxlen) + +#ifdef CONFIG_SECURITY_NETWORK +LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, + struct sock *newsk) +LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other) +LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern) +LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type, + int protocol, int kern) +LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb) +LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address, + int addrlen) +LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address, + int addrlen) +LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog) +LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock) +LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg, + int size) +LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg, + int size, int flags) +LSM_HOOK(int, 0, socket_getsockname, struct socket *sock) +LSM_HOOK(int, 0, socket_getpeername, struct socket *sock) +LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname) +LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) +LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) +LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) +LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock, + char __user *optval, int __user *optlen, unsigned len) +LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock, + struct sk_buff *skb, u32 *secid) +LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) +LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) +LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk, + struct sock *newsk) +LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent) +LSM_HOOK(int, 0, inet_conn_request, struct sock *sk, struct sk_buff *skb, + struct request_sock *req) +LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk, + const struct request_sock *req) +LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk, + struct sk_buff *skb) +LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid) +LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) +LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) +LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, + struct flowi *fl) +LSM_HOOK(int, 0, tun_dev_alloc_security, void **security) +LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security) +LSM_HOOK(int, 0, tun_dev_create, void) +LSM_HOOK(int, 0, tun_dev_attach_queue, void *security) +LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security) +LSM_HOOK(int, 0, tun_dev_open, void *security) +LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep, + struct sk_buff *skb) +LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname, + struct sockaddr *address, int addrlen) +LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep, + struct sock *sk, struct sock *newsk) +#endif /* CONFIG_SECURITY_NETWORK */ + +#ifdef CONFIG_SECURITY_INFINIBAND +LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey) +LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name, + u8 port_num) +LSM_HOOK(int, 0, ib_alloc_security, void **sec) +LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec) +#endif /* CONFIG_SECURITY_INFINIBAND */ + +#ifdef CONFIG_SECURITY_NETWORK_XFRM +LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) +LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx, + struct xfrm_sec_ctx **new_ctx) +LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security, + struct xfrm_sec_ctx *ctx) +LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx) +LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) +LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid) +LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x) +LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x) +LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid, + u8 dir) +LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, + struct xfrm_policy *xp, const struct flowi *fl) +LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, + int ckall) +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ + +/* key management security hooks */ +#ifdef CONFIG_KEYS +LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred, + unsigned long flags) +LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key) +LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, + unsigned perm) +LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer) +#endif /* CONFIG_KEYS */ + +#ifdef CONFIG_AUDIT +LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, + void **lsmrule) +LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) +LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule) +LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule) +#endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_BPF_SYSCALL +LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size) +LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode) +LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog) +LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map) +LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map) +LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux) +LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux) +#endif /* CONFIG_BPF_SYSCALL */ + +LSM_HOOK(int, 0, locked_down, enum lockdown_reason what) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index a3763247547c..c09623b32489 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1456,610 +1456,15 @@ * @what: kernel feature being accessed */ union security_list_options { - int (*binder_set_context_mgr)(struct task_struct *mgr); - int (*binder_transaction)(struct task_struct *from, - struct task_struct *to); - int (*binder_transfer_binder)(struct task_struct *from, - struct task_struct *to); - int (*binder_transfer_file)(struct task_struct *from, - struct task_struct *to, - struct file *file); - - int (*ptrace_access_check)(struct task_struct *child, - unsigned int mode); - int (*ptrace_traceme)(struct task_struct *parent); - int (*capget)(struct task_struct *target, kernel_cap_t *effective, - kernel_cap_t *inheritable, kernel_cap_t *permitted); - int (*capset)(struct cred *new, const struct cred *old, - const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); - int (*capable)(const struct cred *cred, - struct user_namespace *ns, - int cap, - unsigned int opts); - int (*quotactl)(int cmds, int type, int id, struct super_block *sb); - int (*quota_on)(struct dentry *dentry); - int (*syslog)(int type); - int (*settime)(const struct timespec64 *ts, const struct timezone *tz); - int (*vm_enough_memory)(struct mm_struct *mm, long pages); - - int (*bprm_set_creds)(struct linux_binprm *bprm); - int (*bprm_check_security)(struct linux_binprm *bprm); - void (*bprm_committing_creds)(struct linux_binprm *bprm); - void (*bprm_committed_creds)(struct linux_binprm *bprm); - - int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc); - int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param); - - int (*sb_alloc_security)(struct super_block *sb); - void (*sb_free_security)(struct super_block *sb); - void (*sb_free_mnt_opts)(void *mnt_opts); - int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts); - int (*sb_remount)(struct super_block *sb, void *mnt_opts); - int (*sb_kern_mount)(struct super_block *sb); - int (*sb_show_options)(struct seq_file *m, struct super_block *sb); - int (*sb_statfs)(struct dentry *dentry); - int (*sb_mount)(const char *dev_name, const struct path *path, - const char *type, unsigned long flags, void *data); - int (*sb_umount)(struct vfsmount *mnt, int flags); - int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); - int (*sb_set_mnt_opts)(struct super_block *sb, - void *mnt_opts, - unsigned long kern_flags, - unsigned long *set_kern_flags); - int (*sb_clone_mnt_opts)(const struct super_block *oldsb, - struct super_block *newsb, - unsigned long kern_flags, - unsigned long *set_kern_flags); - int (*sb_add_mnt_opt)(const char *option, const char *val, int len, - void **mnt_opts); - int (*move_mount)(const struct path *from_path, const struct path *to_path); - int (*dentry_init_security)(struct dentry *dentry, int mode, - const struct qstr *name, void **ctx, - u32 *ctxlen); - int (*dentry_create_files_as)(struct dentry *dentry, int mode, - struct qstr *name, - const struct cred *old, - struct cred *new); - - -#ifdef CONFIG_SECURITY_PATH - int (*path_unlink)(const struct path *dir, struct dentry *dentry); - int (*path_mkdir)(const struct path *dir, struct dentry *dentry, - umode_t mode); - int (*path_rmdir)(const struct path *dir, struct dentry *dentry); - int (*path_mknod)(const struct path *dir, struct dentry *dentry, - umode_t mode, unsigned int dev); - int (*path_truncate)(const struct path *path); - int (*path_symlink)(const struct path *dir, struct dentry *dentry, - const char *old_name); - int (*path_link)(struct dentry *old_dentry, const struct path *new_dir, - struct dentry *new_dentry); - int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry, - const struct path *new_dir, - struct dentry *new_dentry); - int (*path_chmod)(const struct path *path, umode_t mode); - int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); - int (*path_chroot)(const struct path *path); -#endif - /* Needed for inode based security check */ - int (*path_notify)(const struct path *path, u64 mask, - unsigned int obj_type); - int (*inode_alloc_security)(struct inode *inode); - void (*inode_free_security)(struct inode *inode); - int (*inode_init_security)(struct inode *inode, struct inode *dir, - const struct qstr *qstr, - const char **name, void **value, - size_t *len); - int (*inode_create)(struct inode *dir, struct dentry *dentry, - umode_t mode); - int (*inode_link)(struct dentry *old_dentry, struct inode *dir, - struct dentry *new_dentry); - int (*inode_unlink)(struct inode *dir, struct dentry *dentry); - int (*inode_symlink)(struct inode *dir, struct dentry *dentry, - const char *old_name); - int (*inode_mkdir)(struct inode *dir, struct dentry *dentry, - umode_t mode); - int (*inode_rmdir)(struct inode *dir, struct dentry *dentry); - int (*inode_mknod)(struct inode *dir, struct dentry *dentry, - umode_t mode, dev_t dev); - int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, - struct dentry *new_dentry); - int (*inode_readlink)(struct dentry *dentry); - int (*inode_follow_link)(struct dentry *dentry, struct inode *inode, - bool rcu); - int (*inode_permission)(struct inode *inode, int mask); - int (*inode_setattr)(struct dentry *dentry, struct iattr *attr); - int (*inode_getattr)(const struct path *path); - int (*inode_setxattr)(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); - void (*inode_post_setxattr)(struct dentry *dentry, const char *name, - const void *value, size_t size, - int flags); - int (*inode_getxattr)(struct dentry *dentry, const char *name); - int (*inode_listxattr)(struct dentry *dentry); - int (*inode_removexattr)(struct dentry *dentry, const char *name); - int (*inode_need_killpriv)(struct dentry *dentry); - int (*inode_killpriv)(struct dentry *dentry); - int (*inode_getsecurity)(struct inode *inode, const char *name, - void **buffer, bool alloc); - int (*inode_setsecurity)(struct inode *inode, const char *name, - const void *value, size_t size, - int flags); - int (*inode_listsecurity)(struct inode *inode, char *buffer, - size_t buffer_size); - void (*inode_getsecid)(struct inode *inode, u32 *secid); - int (*inode_copy_up)(struct dentry *src, struct cred **new); - int (*inode_copy_up_xattr)(const char *name); - - int (*kernfs_init_security)(struct kernfs_node *kn_dir, - struct kernfs_node *kn); - - int (*file_permission)(struct file *file, int mask); - int (*file_alloc_security)(struct file *file); - void (*file_free_security)(struct file *file); - int (*file_ioctl)(struct file *file, unsigned int cmd, - unsigned long arg); - int (*mmap_addr)(unsigned long addr); - int (*mmap_file)(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags); - int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot, - unsigned long prot); - int (*file_lock)(struct file *file, unsigned int cmd); - int (*file_fcntl)(struct file *file, unsigned int cmd, - unsigned long arg); - void (*file_set_fowner)(struct file *file); - int (*file_send_sigiotask)(struct task_struct *tsk, - struct fown_struct *fown, int sig); - int (*file_receive)(struct file *file); - int (*file_open)(struct file *file); - - int (*task_alloc)(struct task_struct *task, unsigned long clone_flags); - void (*task_free)(struct task_struct *task); - int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp); - void (*cred_free)(struct cred *cred); - int (*cred_prepare)(struct cred *new, const struct cred *old, - gfp_t gfp); - void (*cred_transfer)(struct cred *new, const struct cred *old); - void (*cred_getsecid)(const struct cred *c, u32 *secid); - int (*kernel_act_as)(struct cred *new, u32 secid); - int (*kernel_create_files_as)(struct cred *new, struct inode *inode); - int (*kernel_module_request)(char *kmod_name); - int (*kernel_load_data)(enum kernel_load_data_id id); - int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id); - int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size, - enum kernel_read_file_id id); - int (*task_fix_setuid)(struct cred *new, const struct cred *old, - int flags); - int (*task_setpgid)(struct task_struct *p, pid_t pgid); - int (*task_getpgid)(struct task_struct *p); - int (*task_getsid)(struct task_struct *p); - void (*task_getsecid)(struct task_struct *p, u32 *secid); - int (*task_setnice)(struct task_struct *p, int nice); - int (*task_setioprio)(struct task_struct *p, int ioprio); - int (*task_getioprio)(struct task_struct *p); - int (*task_prlimit)(const struct cred *cred, const struct cred *tcred, - unsigned int flags); - int (*task_setrlimit)(struct task_struct *p, unsigned int resource, - struct rlimit *new_rlim); - int (*task_setscheduler)(struct task_struct *p); - int (*task_getscheduler)(struct task_struct *p); - int (*task_movememory)(struct task_struct *p); - int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info, - int sig, const struct cred *cred); - int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5); - void (*task_to_inode)(struct task_struct *p, struct inode *inode); - - int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag); - void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid); - - int (*msg_msg_alloc_security)(struct msg_msg *msg); - void (*msg_msg_free_security)(struct msg_msg *msg); - - int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm); - void (*msg_queue_free_security)(struct kern_ipc_perm *perm); - int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg); - int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd); - int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg, - int msqflg); - int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg, - struct task_struct *target, long type, - int mode); - - int (*shm_alloc_security)(struct kern_ipc_perm *perm); - void (*shm_free_security)(struct kern_ipc_perm *perm); - int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg); - int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd); - int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr, - int shmflg); - - int (*sem_alloc_security)(struct kern_ipc_perm *perm); - void (*sem_free_security)(struct kern_ipc_perm *perm); - int (*sem_associate)(struct kern_ipc_perm *perm, int semflg); - int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd); - int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops, - unsigned nsops, int alter); - - int (*netlink_send)(struct sock *sk, struct sk_buff *skb); - - void (*d_instantiate)(struct dentry *dentry, struct inode *inode); - - int (*getprocattr)(struct task_struct *p, char *name, char **value); - int (*setprocattr)(const char *name, void *value, size_t size); - int (*ismaclabel)(const char *name); - int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); - int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); - void (*release_secctx)(char *secdata, u32 seclen); - - void (*inode_invalidate_secctx)(struct inode *inode); - int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); - int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); - int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); - -#ifdef CONFIG_SECURITY_NETWORK - int (*unix_stream_connect)(struct sock *sock, struct sock *other, - struct sock *newsk); - int (*unix_may_send)(struct socket *sock, struct socket *other); - - int (*socket_create)(int family, int type, int protocol, int kern); - int (*socket_post_create)(struct socket *sock, int family, int type, - int protocol, int kern); - int (*socket_socketpair)(struct socket *socka, struct socket *sockb); - int (*socket_bind)(struct socket *sock, struct sockaddr *address, - int addrlen); - int (*socket_connect)(struct socket *sock, struct sockaddr *address, - int addrlen); - int (*socket_listen)(struct socket *sock, int backlog); - int (*socket_accept)(struct socket *sock, struct socket *newsock); - int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg, - int size); - int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg, - int size, int flags); - int (*socket_getsockname)(struct socket *sock); - int (*socket_getpeername)(struct socket *sock); - int (*socket_getsockopt)(struct socket *sock, int level, int optname); - int (*socket_setsockopt)(struct socket *sock, int level, int optname); - int (*socket_shutdown)(struct socket *sock, int how); - int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb); - int (*socket_getpeersec_stream)(struct socket *sock, - char __user *optval, - int __user *optlen, unsigned len); - int (*socket_getpeersec_dgram)(struct socket *sock, - struct sk_buff *skb, u32 *secid); - int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority); - void (*sk_free_security)(struct sock *sk); - void (*sk_clone_security)(const struct sock *sk, struct sock *newsk); - void (*sk_getsecid)(struct sock *sk, u32 *secid); - void (*sock_graft)(struct sock *sk, struct socket *parent); - int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb, - struct request_sock *req); - void (*inet_csk_clone)(struct sock *newsk, - const struct request_sock *req); - void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb); - int (*secmark_relabel_packet)(u32 secid); - void (*secmark_refcount_inc)(void); - void (*secmark_refcount_dec)(void); - void (*req_classify_flow)(const struct request_sock *req, - struct flowi *fl); - int (*tun_dev_alloc_security)(void **security); - void (*tun_dev_free_security)(void *security); - int (*tun_dev_create)(void); - int (*tun_dev_attach_queue)(void *security); - int (*tun_dev_attach)(struct sock *sk, void *security); - int (*tun_dev_open)(void *security); - int (*sctp_assoc_request)(struct sctp_endpoint *ep, - struct sk_buff *skb); - int (*sctp_bind_connect)(struct sock *sk, int optname, - struct sockaddr *address, int addrlen); - void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk, - struct sock *newsk); -#endif /* CONFIG_SECURITY_NETWORK */ - -#ifdef CONFIG_SECURITY_INFINIBAND - int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey); - int (*ib_endport_manage_subnet)(void *sec, const char *dev_name, - u8 port_num); - int (*ib_alloc_security)(void **sec); - void (*ib_free_security)(void *sec); -#endif /* CONFIG_SECURITY_INFINIBAND */ - -#ifdef CONFIG_SECURITY_NETWORK_XFRM - int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *sec_ctx, - gfp_t gfp); - int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx, - struct xfrm_sec_ctx **new_ctx); - void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx); - int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx); - int (*xfrm_state_alloc)(struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx); - int (*xfrm_state_alloc_acquire)(struct xfrm_state *x, - struct xfrm_sec_ctx *polsec, - u32 secid); - void (*xfrm_state_free_security)(struct xfrm_state *x); - int (*xfrm_state_delete_security)(struct xfrm_state *x); - int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, - u8 dir); - int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, - struct xfrm_policy *xp, - const struct flowi *fl); - int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); -#endif /* CONFIG_SECURITY_NETWORK_XFRM */ - - /* key management security hooks */ -#ifdef CONFIG_KEYS - int (*key_alloc)(struct key *key, const struct cred *cred, - unsigned long flags); - void (*key_free)(struct key *key); - int (*key_permission)(key_ref_t key_ref, const struct cred *cred, - unsigned perm); - int (*key_getsecurity)(struct key *key, char **_buffer); -#endif /* CONFIG_KEYS */ - -#ifdef CONFIG_AUDIT - int (*audit_rule_init)(u32 field, u32 op, char *rulestr, - void **lsmrule); - int (*audit_rule_known)(struct audit_krule *krule); - int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule); - void (*audit_rule_free)(void *lsmrule); -#endif /* CONFIG_AUDIT */ - -#ifdef CONFIG_BPF_SYSCALL - int (*bpf)(int cmd, union bpf_attr *attr, - unsigned int size); - int (*bpf_map)(struct bpf_map *map, fmode_t fmode); - int (*bpf_prog)(struct bpf_prog *prog); - int (*bpf_map_alloc_security)(struct bpf_map *map); - void (*bpf_map_free_security)(struct bpf_map *map); - int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux); - void (*bpf_prog_free_security)(struct bpf_prog_aux *aux); -#endif /* CONFIG_BPF_SYSCALL */ - int (*locked_down)(enum lockdown_reason what); + #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__); + #include "lsm_hook_defs.h" + #undef LSM_HOOK }; struct security_hook_heads { - struct hlist_head binder_set_context_mgr; - struct hlist_head binder_transaction; - struct hlist_head binder_transfer_binder; - struct hlist_head binder_transfer_file; - struct hlist_head ptrace_access_check; - struct hlist_head ptrace_traceme; - struct hlist_head capget; - struct hlist_head capset; - struct hlist_head capable; - struct hlist_head quotactl; - struct hlist_head quota_on; - struct hlist_head syslog; - struct hlist_head settime; - struct hlist_head vm_enough_memory; - struct hlist_head bprm_set_creds; - struct hlist_head bprm_check_security; - struct hlist_head bprm_committing_creds; - struct hlist_head bprm_committed_creds; - struct hlist_head fs_context_dup; - struct hlist_head fs_context_parse_param; - struct hlist_head sb_alloc_security; - struct hlist_head sb_free_security; - struct hlist_head sb_free_mnt_opts; - struct hlist_head sb_eat_lsm_opts; - struct hlist_head sb_remount; - struct hlist_head sb_kern_mount; - struct hlist_head sb_show_options; - struct hlist_head sb_statfs; - struct hlist_head sb_mount; - struct hlist_head sb_umount; - struct hlist_head sb_pivotroot; - struct hlist_head sb_set_mnt_opts; - struct hlist_head sb_clone_mnt_opts; - struct hlist_head sb_add_mnt_opt; - struct hlist_head move_mount; - struct hlist_head dentry_init_security; - struct hlist_head dentry_create_files_as; -#ifdef CONFIG_SECURITY_PATH - struct hlist_head path_unlink; - struct hlist_head path_mkdir; - struct hlist_head path_rmdir; - struct hlist_head path_mknod; - struct hlist_head path_truncate; - struct hlist_head path_symlink; - struct hlist_head path_link; - struct hlist_head path_rename; - struct hlist_head path_chmod; - struct hlist_head path_chown; - struct hlist_head path_chroot; -#endif - /* Needed for inode based modules as well */ - struct hlist_head path_notify; - struct hlist_head inode_alloc_security; - struct hlist_head inode_free_security; - struct hlist_head inode_init_security; - struct hlist_head inode_create; - struct hlist_head inode_link; - struct hlist_head inode_unlink; - struct hlist_head inode_symlink; - struct hlist_head inode_mkdir; - struct hlist_head inode_rmdir; - struct hlist_head inode_mknod; - struct hlist_head inode_rename; - struct hlist_head inode_readlink; - struct hlist_head inode_follow_link; - struct hlist_head inode_permission; - struct hlist_head inode_setattr; - struct hlist_head inode_getattr; - struct hlist_head inode_setxattr; - struct hlist_head inode_post_setxattr; - struct hlist_head inode_getxattr; - struct hlist_head inode_listxattr; - struct hlist_head inode_removexattr; - struct hlist_head inode_need_killpriv; - struct hlist_head inode_killpriv; - struct hlist_head inode_getsecurity; - struct hlist_head inode_setsecurity; - struct hlist_head inode_listsecurity; - struct hlist_head inode_getsecid; - struct hlist_head inode_copy_up; - struct hlist_head inode_copy_up_xattr; - struct hlist_head kernfs_init_security; - struct hlist_head file_permission; - struct hlist_head file_alloc_security; - struct hlist_head file_free_security; - struct hlist_head file_ioctl; - struct hlist_head mmap_addr; - struct hlist_head mmap_file; - struct hlist_head file_mprotect; - struct hlist_head file_lock; - struct hlist_head file_fcntl; - struct hlist_head file_set_fowner; - struct hlist_head file_send_sigiotask; - struct hlist_head file_receive; - struct hlist_head file_open; - struct hlist_head task_alloc; - struct hlist_head task_free; - struct hlist_head cred_alloc_blank; - struct hlist_head cred_free; - struct hlist_head cred_prepare; - struct hlist_head cred_transfer; - struct hlist_head cred_getsecid; - struct hlist_head kernel_act_as; - struct hlist_head kernel_create_files_as; - struct hlist_head kernel_load_data; - struct hlist_head kernel_read_file; - struct hlist_head kernel_post_read_file; - struct hlist_head kernel_module_request; - struct hlist_head task_fix_setuid; - struct hlist_head task_setpgid; - struct hlist_head task_getpgid; - struct hlist_head task_getsid; - struct hlist_head task_getsecid; - struct hlist_head task_setnice; - struct hlist_head task_setioprio; - struct hlist_head task_getioprio; - struct hlist_head task_prlimit; - struct hlist_head task_setrlimit; - struct hlist_head task_setscheduler; - struct hlist_head task_getscheduler; - struct hlist_head task_movememory; - struct hlist_head task_kill; - struct hlist_head task_prctl; - struct hlist_head task_to_inode; - struct hlist_head ipc_permission; - struct hlist_head ipc_getsecid; - struct hlist_head msg_msg_alloc_security; - struct hlist_head msg_msg_free_security; - struct hlist_head msg_queue_alloc_security; - struct hlist_head msg_queue_free_security; - struct hlist_head msg_queue_associate; - struct hlist_head msg_queue_msgctl; - struct hlist_head msg_queue_msgsnd; - struct hlist_head msg_queue_msgrcv; - struct hlist_head shm_alloc_security; - struct hlist_head shm_free_security; - struct hlist_head shm_associate; - struct hlist_head shm_shmctl; - struct hlist_head shm_shmat; - struct hlist_head sem_alloc_security; - struct hlist_head sem_free_security; - struct hlist_head sem_associate; - struct hlist_head sem_semctl; - struct hlist_head sem_semop; - struct hlist_head netlink_send; - struct hlist_head d_instantiate; - struct hlist_head getprocattr; - struct hlist_head setprocattr; - struct hlist_head ismaclabel; - struct hlist_head secid_to_secctx; - struct hlist_head secctx_to_secid; - struct hlist_head release_secctx; - struct hlist_head inode_invalidate_secctx; - struct hlist_head inode_notifysecctx; - struct hlist_head inode_setsecctx; - struct hlist_head inode_getsecctx; -#ifdef CONFIG_SECURITY_NETWORK - struct hlist_head unix_stream_connect; - struct hlist_head unix_may_send; - struct hlist_head socket_create; - struct hlist_head socket_post_create; - struct hlist_head socket_socketpair; - struct hlist_head socket_bind; - struct hlist_head socket_connect; - struct hlist_head socket_listen; - struct hlist_head socket_accept; - struct hlist_head socket_sendmsg; - struct hlist_head socket_recvmsg; - struct hlist_head socket_getsockname; - struct hlist_head socket_getpeername; - struct hlist_head socket_getsockopt; - struct hlist_head socket_setsockopt; - struct hlist_head socket_shutdown; - struct hlist_head socket_sock_rcv_skb; - struct hlist_head socket_getpeersec_stream; - struct hlist_head socket_getpeersec_dgram; - struct hlist_head sk_alloc_security; - struct hlist_head sk_free_security; - struct hlist_head sk_clone_security; - struct hlist_head sk_getsecid; - struct hlist_head sock_graft; - struct hlist_head inet_conn_request; - struct hlist_head inet_csk_clone; - struct hlist_head inet_conn_established; - struct hlist_head secmark_relabel_packet; - struct hlist_head secmark_refcount_inc; - struct hlist_head secmark_refcount_dec; - struct hlist_head req_classify_flow; - struct hlist_head tun_dev_alloc_security; - struct hlist_head tun_dev_free_security; - struct hlist_head tun_dev_create; - struct hlist_head tun_dev_attach_queue; - struct hlist_head tun_dev_attach; - struct hlist_head tun_dev_open; - struct hlist_head sctp_assoc_request; - struct hlist_head sctp_bind_connect; - struct hlist_head sctp_sk_clone; -#endif /* CONFIG_SECURITY_NETWORK */ -#ifdef CONFIG_SECURITY_INFINIBAND - struct hlist_head ib_pkey_access; - struct hlist_head ib_endport_manage_subnet; - struct hlist_head ib_alloc_security; - struct hlist_head ib_free_security; -#endif /* CONFIG_SECURITY_INFINIBAND */ -#ifdef CONFIG_SECURITY_NETWORK_XFRM - struct hlist_head xfrm_policy_alloc_security; - struct hlist_head xfrm_policy_clone_security; - struct hlist_head xfrm_policy_free_security; - struct hlist_head xfrm_policy_delete_security; - struct hlist_head xfrm_state_alloc; - struct hlist_head xfrm_state_alloc_acquire; - struct hlist_head xfrm_state_free_security; - struct hlist_head xfrm_state_delete_security; - struct hlist_head xfrm_policy_lookup; - struct hlist_head xfrm_state_pol_flow_match; - struct hlist_head xfrm_decode_session; -#endif /* CONFIG_SECURITY_NETWORK_XFRM */ -#ifdef CONFIG_KEYS - struct hlist_head key_alloc; - struct hlist_head key_free; - struct hlist_head key_permission; - struct hlist_head key_getsecurity; -#endif /* CONFIG_KEYS */ -#ifdef CONFIG_AUDIT - struct hlist_head audit_rule_init; - struct hlist_head audit_rule_known; - struct hlist_head audit_rule_match; - struct hlist_head audit_rule_free; -#endif /* CONFIG_AUDIT */ -#ifdef CONFIG_BPF_SYSCALL - struct hlist_head bpf; - struct hlist_head bpf_map; - struct hlist_head bpf_prog; - struct hlist_head bpf_map_alloc_security; - struct hlist_head bpf_map_free_security; - struct hlist_head bpf_prog_alloc_security; - struct hlist_head bpf_prog_free_security; -#endif /* CONFIG_BPF_SYSCALL */ - struct hlist_head locked_down; + #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; + #include "lsm_hook_defs.h" + #undef LSM_HOOK } __randomize_layout; /* @@ -2085,6 +1490,12 @@ struct lsm_blob_sizes { int lbs_task; }; +/* + * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void + * LSM hooks (in include/linux/lsm_hook_defs.h). + */ +#define LSM_RET_VOID ((void) 0) + /* * Initializing a security_hook_list structure takes * up a lot of space in a source file. This macro takes diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index af6b11d4d673..1847a0784243 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -23,11 +23,12 @@ #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 -/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do +/* These Ethernet switch families contain embedded PHYs, but they do * not have a model ID. So the switch driver traps reads to the ID2 * register and returns the switch family ID */ -#define MARVELL_PHY_ID_88E6390 0x01410f90 +#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41 +#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90 #define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) diff --git a/include/linux/mbuf.h b/include/linux/mbuf.h new file mode 100644 index 000000000000..32779eee2ef5 --- /dev/null +++ b/include/linux/mbuf.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 bauerchen <bauerchen@tencent.com> + */ + +#ifndef _CGROUP_MBUF_H +#define _CGROUP_MBUF_H + +#include <linux/cgroup.h> +#include <linux/rwsem.h> + +struct mbuf_struct { + u32 mbuf_len; + u32 mbuf_max_slots; + u32 mbuf_frees; + u32 mbuf_next_id; + u32 mbuf_size_per_cg; + spinlock_t mbuf_lock; + char *mbuf; + unsigned long *mbuf_bitmap; +}; + +struct mbuf_ring_desc { + /* timestamp of this message */ + u64 ts_ns; + /* message total len, ring_item + ->len = next_item */ + u16 len; + /* text len text_len + sizeof(ring) = len */ + u16 text_len; +}; + +struct mbuf_ring { + u32 base_idx; + u32 first_idx; + u64 first_seq; + u32 next_idx; + u64 next_seq; + u32 end_idx; +}; + +struct mbuf_user_desc { + u64 user_seq; + u32 user_idx; + char buf[1024]; +}; + +/* each cgroup has a mbuf_slot struct */ +struct mbuf_slot { + u32 idx; + /* write op must hold this lock */ + spinlock_t slot_lock; + /* rate limit */ + struct ratelimit_state ratelimit; + struct cgroup *owner; + const struct mbuf_operations *ops; + struct mbuf_ring *mring; + struct mbuf_user_desc *udesc; +}; + +struct mbuf_operations { + /* read message */ + ssize_t (*read) (struct mbuf_slot *, struct mbuf_user_desc *); + /* get next available idx */ + u32 (*next) (struct mbuf_ring *, u32); + /* write message */ + ssize_t (*write) (struct cgroup *, const char *, va_list); +} ____cacheline_aligned; + + +void __init mbuf_bmap_init(void); +void __init setup_mbuf(void); +struct mbuf_slot *mbuf_slot_alloc(struct cgroup *cg); +void mbuf_free(struct cgroup *cg); +#endif diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2de725382784..f27b2ba743dd 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -47,6 +47,8 @@ enum memcg_memory_event { MEMCG_OOM_KILL, MEMCG_SWAP_MAX, MEMCG_SWAP_FAIL, + MEMCG_PAGECACHE_MAX, + MEMCG_PAGECACHE_OOM, MEMCG_NR_MEMORY_EVENTS, }; @@ -214,6 +216,9 @@ struct mem_cgroup { /* Accounted resources */ struct page_counter memory; struct page_counter swap; + struct page_counter pagecache; + u64 pagecache_reclaim_ratio; + u32 pagecache_max_ratio; /* Legacy consumer-oriented counters */ struct page_counter memsw; @@ -345,6 +350,21 @@ struct mem_cgroup { */ #define MEMCG_CHARGE_BATCH 32U +/* + * Iteration constructs for visiting all cgroups (under a tree). If + * loops are exited prematurely (break), mem_cgroup_iter_break() must + * be used for reference counting. + */ +#define for_each_mem_cgroup_tree(iter, root) \ + for (iter = mem_cgroup_iter(root, NULL, NULL); \ + iter != NULL; \ + iter = mem_cgroup_iter(root, iter, NULL)) + +#define for_each_mem_cgroup(iter) \ + for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ + iter != NULL; \ + iter = mem_cgroup_iter(NULL, iter, NULL)) + extern struct mem_cgroup *root_mem_cgroup; static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) @@ -794,6 +814,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg, atomic_long_inc(&memcg->memory_events[event]); cgroup_file_notify(&memcg->events_file); + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + break; if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) break; } while ((memcg = parent_mem_cgroup(memcg)) && @@ -1335,6 +1357,10 @@ struct sock; bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #ifdef CONFIG_MEMCG + +extern unsigned int vm_pagecache_limit_retry_times; +extern void mem_cgroup_shrink_pagecache(struct mem_cgroup *memcg, gfp_t gfp_mask); + extern struct static_key_false memcg_sockets_enabled_key; #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) void mem_cgroup_sk_alloc(struct sock *sk); diff --git a/include/linux/memstick.h b/include/linux/memstick.h index 216a713bef7f..1198ea3d4012 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -281,6 +281,7 @@ struct memstick_host { struct memstick_dev *card; unsigned int retries; + bool removing; /* Notify the host that some requests are pending. */ void (*request)(struct memstick_host *host); diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h index 3c67983678ec..744dce63946e 100644 --- a/include/linux/mfd/stmfx.h +++ b/include/linux/mfd/stmfx.h @@ -109,6 +109,7 @@ struct stmfx { struct device *dev; struct regmap *map; struct regulator *vdd; + int irq; struct irq_domain *irq_domain; struct mutex lock; /* IRQ bus lock */ u8 irq_src; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3e80f03a387f..2b65ffb3bd76 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -299,6 +299,7 @@ struct mlx5_cmd { struct semaphore sem; struct semaphore pages_sem; int mode; + u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct dma_pool *pool; struct mlx5_cmd_debug dbg; @@ -756,6 +757,7 @@ struct mlx5_cmd_work_ent { struct delayed_work cb_timeout_work; void *context; int idx; + struct completion handling; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -768,6 +770,8 @@ struct mlx5_cmd_work_ent { u64 ts2; u16 op; bool polling; + /* Track the max comp handlers */ + refcount_t refcnt; }; struct mlx5_pas { @@ -887,10 +891,15 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); } +enum { + CMD_ALLOWED_OPCODE_ALL, +}; + int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); struct mlx5_async_ctx { struct mlx5_core_dev *dev; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index acd859ea09d4..36516fe86fe7 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -415,11 +415,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8]; - u8 reserved_at_80[0x18]; + u8 reserved_at_80[0x10]; + u8 log_max_flow_counter[0x8]; u8 log_max_destination[0x8]; - u8 log_max_flow_counter[0x8]; - u8 reserved_at_a8[0x10]; + u8 reserved_at_a0[0x18]; u8 log_max_flow[0x8]; u8 reserved_at_c0[0x40]; @@ -1139,6 +1139,11 @@ enum mlx5_fc_bulk_alloc_bitmask { #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) +enum { + MLX5_STEERING_FORMAT_CONNECTX_5 = 0, + MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, +}; + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@ -1419,7 +1424,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 general_obj_types[0x40]; - u8 reserved_at_440[0x20]; + u8 reserved_at_440[0x4]; + u8 steering_format_version[0x4]; + u8 create_qp_start_hint[0x18]; u8 reserved_at_460[0x3]; u8 log_max_uctx[0x5]; @@ -4177,6 +4184,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, + MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_arm_monitor_counter_in_bits { @@ -9661,7 +9669,7 @@ struct mlx5_ifc_pbmc_reg_bits { struct mlx5_ifc_bufferx_reg_bits buffer[10]; - u8 reserved_at_2e0[0x40]; + u8 reserved_at_2e0[0x80]; }; struct mlx5_ifc_qtct_reg_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 0affadb49092..60d9d7c5a10b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -27,6 +27,7 @@ #include <linux/memremap.h> #include <linux/overflow.h> #include <linux/sizes.h> +#include <linux/kabi.h> struct mempolicy; struct anon_vma; @@ -470,6 +471,13 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); int (*split)(struct vm_area_struct * area, unsigned long addr); int (*mremap)(struct vm_area_struct * area); + /* + * Called by mprotect() to make driver-specific permission + * checks before mprotect() is finalised. The VMA must not + * be modified. Returns 0 if eprotect() can proceed. + */ + int (*mprotect)(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long newflags); vm_fault_t (*fault)(struct vm_fault *vmf); vm_fault_t (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); @@ -525,6 +533,7 @@ struct vm_operations_struct { */ struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); + KABI_RESERVE(1); }; static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) @@ -695,7 +704,13 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) } extern void kvfree(const void *addr); +extern void kvfree_sensitive(const void *addr, size_t len); +/* + * Mapcount of compound page as a whole, does not include mapped sub-pages. + * + * Must be called only for compound pages or any their tail sub-pages. + */ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); @@ -715,10 +730,16 @@ static inline void page_mapcount_reset(struct page *page) int __page_mapcount(struct page *page); +/* + * Mapcount of 0-order page; when compound sub-page, includes + * compound_mapcount(). + * + * Result is undefined for pages which cannot be mapped into userspace. + * For example SLAB or special types of pages. See function page_has_type(). + * They use this place in struct page differently. + */ static inline int page_mapcount(struct page *page) { - VM_BUG_ON_PAGE(PageSlab(page), page); - if (unlikely(PageCompound(page))) return __page_mapcount(page); return atomic_read(&page->_mapcount) + 1; @@ -1215,13 +1236,26 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_KASAN_SW_TAGS + +/* + * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid + * setting tags for all pages to native kernel tag value 0xff, as the default + * value 0x00 maps to 0xff. + */ + static inline u8 page_kasan_tag(const struct page *page) { - return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + u8 tag; + + tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; + tag ^= 0xff; + + return tag; } static inline void page_kasan_tag_set(struct page *page, u8 tag) { + tag ^= 0xff; page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; } @@ -1455,9 +1489,11 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); -int follow_pte_pmd(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, - pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); +int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, pte_t **ptepp, + pmd_t **pmdpp, spinlock_t **ptlp); +int follow_pte(struct mm_struct *mm, unsigned long address, + pte_t **ptepp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, @@ -2197,7 +2233,7 @@ static inline void zero_resv_unavail(void) {} extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum memmap_context, struct vmem_altmap *); + enum meminit_context, struct vmem_altmap *); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2561,6 +2597,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } +#ifndef io_remap_pfn_range +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); +} +#endif + static inline vm_fault_t vmf_error(int err) { if (err == -ENOMEM) @@ -2791,6 +2836,7 @@ enum mf_flags { }; extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); +extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern int get_hwpoison_page(struct page *page); #define put_hwpoison_page(page) put_page(page) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 270aa8fd2800..34f390f45bd2 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -14,6 +14,7 @@ #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> +#include <linux/kabi.h> #include <asm/mmu.h> @@ -352,6 +353,12 @@ struct vm_area_struct { #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; @@ -526,6 +533,11 @@ struct mm_struct { struct work_struct async_put_work; } __randomize_layout; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + /* * The mm_cpumask needs to be at the end of mm_struct, because it * is dynamically sized based on nr_cpu_ids. diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index e459b38ef33c..cf3780a6ccc4 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -226,7 +226,7 @@ struct mmc_queue_req; * MMC Physical partitions */ struct mmc_part { - unsigned int size; /* partition size (in bytes) */ + u64 size; /* partition size (in bytes) */ unsigned int part_cfg; /* partition type */ char name[MAX_MMC_PART_NAME_LEN]; bool force_ro; /* to make boot parts RO by default */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4c5eb3aa8e72..a0e1cf1bef4e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -281,9 +281,6 @@ struct mmc_host { u32 ocr_avail_sdio; /* SDIO-specific OCR */ u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ -#ifdef CONFIG_PM_SLEEP - struct notifier_block pm_notify; -#endif u32 max_current_330; u32 max_current_300; u32 max_current_180; diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 2ad72d2c8cc5..4ed52879ce55 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -37,6 +37,18 @@ void dump_mm(const struct mm_struct *mm); BUG(); \ } \ } while (0) +#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(cond); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) + #define VM_WARN_ON(cond) (void)WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) @@ -48,6 +60,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 1bd8e6a09a3c..aa4017b33306 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -6,6 +6,7 @@ #include <linux/spinlock.h> #include <linux/mm_types.h> #include <linux/srcu.h> +#include <linux/kabi.h> struct mmu_notifier; struct mmu_notifier_ops; @@ -228,6 +229,9 @@ struct mmu_notifier_ops { */ struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm); void (*free_notifier)(struct mmu_notifier *mn); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; /* @@ -247,6 +251,8 @@ struct mmu_notifier { struct mm_struct *mm; struct rcu_head rcu; unsigned int users; + + KABI_RESERVE(1); }; static inline int mm_has_notifiers(struct mm_struct *mm) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 089963c52fd9..9598f80eecc2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -21,6 +21,7 @@ #include <linux/mm_types.h> #include <linux/page-flags.h> #include <asm/page.h> +#include <linux/kabi.h> /* Free memory management - zoned buddy allocator. */ #ifndef CONFIG_FORCE_MAX_ZONEORDER @@ -569,6 +570,9 @@ struct zone { /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; + + KABI_RESERVE(1); + KABI_RESERVE(2); } ____cacheline_internodealigned_in_smp; enum pgdat_flags { @@ -717,6 +721,8 @@ typedef struct pglist_data { /* * Must be held any time you expect node_start_pfn, * node_present_pages, node_spanned_pages or nr_zones to stay constant. + * Also synchronizes pgdat->first_deferred_pfn during deferred page + * init. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG @@ -786,6 +792,9 @@ typedef struct pglist_data { /* Per-node vmstats */ struct per_cpu_nodestat __percpu *per_cpu_nodestats; atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; + + KABI_RESERVE(1); + KABI_RESERVE(2); } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) @@ -828,10 +837,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx); -enum memmap_context { - MEMMAP_EARLY, - MEMMAP_HOTPLUG, +/* + * Memory initialization context, use to differentiate memory added by + * the platform statically or via memory hotplug interface. + */ +enum meminit_context { + MEMINIT_EARLY, + MEMINIT_HOTPLUG, }; + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index e3596db077dc..4c56404e53a7 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -318,7 +318,7 @@ struct pcmcia_device_id { #define INPUT_DEVICE_ID_LED_MAX 0x0f #define INPUT_DEVICE_ID_SND_MAX 0x07 #define INPUT_DEVICE_ID_FF_MAX 0x7f -#define INPUT_DEVICE_ID_SW_MAX 0x0f +#define INPUT_DEVICE_ID_SW_MAX 0x10 #define INPUT_DEVICE_ID_PROP_MAX 0x1f #define INPUT_DEVICE_ID_MATCH_BUS 1 @@ -657,6 +657,10 @@ struct mips_cdmm_device_id { /* * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id. * Although gcc seems to ignore this error, clang fails without this define. + * + * Note: The ordering of the struct is different from upstream because the + * static initializers in kernels < 5.7 still use C89 style while upstream + * has been converted to proper C99 initializers. */ #define x86cpu_device_id x86_cpu_id struct x86_cpu_id { @@ -665,6 +669,7 @@ struct x86_cpu_id { __u16 model; __u16 feature; /* bit index */ kernel_ulong_t driver_data; + __u16 steppings; }; #define X86_FEATURE_MATCH(x) \ @@ -673,6 +678,7 @@ struct x86_cpu_id { #define X86_VENDOR_ANY 0xffff #define X86_FAMILY_ANY 0 #define X86_MODEL_ANY 0 +#define X86_STEPPING_ANY 0 #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ /* diff --git a/include/linux/module.h b/include/linux/module.h index a2ba67f2ac65..78b684600319 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -22,6 +22,7 @@ #include <linux/error-injection.h> #include <linux/tracepoint-defs.h> #include <linux/srcu.h> +#include <linux/kabi.h> #include <linux/percpu.h> #include <asm/module.h> @@ -447,6 +448,11 @@ struct module { keeping pointers to this stuff */ char *args; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + #ifdef CONFIG_SMP /* Per-cpu data. */ void __percpu *percpu; diff --git a/include/linux/msi.h b/include/linux/msi.h index 8ad679e9d9c0..d695e2eb2092 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -139,6 +139,12 @@ struct msi_desc { list_for_each_entry((desc), dev_to_msi_list((dev)), list) #define for_each_msi_entry_safe(desc, tmp, dev) \ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) +#define for_each_msi_vector(desc, __irq, dev) \ + for_each_msi_entry((desc), (dev)) \ + if ((desc)->irq) \ + for (__irq = (desc)->irq; \ + __irq < ((desc)->irq + (desc)->nvec_used); \ + __irq++) #ifdef CONFIG_IRQ_MSI_IOMMU static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index 122f3439e1af..c65d7a3be3c6 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h @@ -128,7 +128,7 @@ static inline void print_drs_error(unsigned dsr) if (!(dsr & DSR_AVAILABLE)) printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); - if (prog_status & 0x03) + if ((prog_status & 0x03) == 0x03) printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " "half with 41h command\n"); else if (prog_status & 0x02) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index aca8f36dfac9..479bc96c3e63 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -171,7 +171,7 @@ extern void mutex_lock_io(struct mutex *lock); # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock) +# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock) #endif /* diff --git a/include/linux/net.h b/include/linux/net.h index 9cafb5f353a9..25ce0b9cf7a0 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -21,6 +21,8 @@ #include <linux/rcupdate.h> #include <linux/once.h> #include <linux/fs.h> +#include <linux/mm.h> +#include <linux/kabi.h> #include <uapi/linux/net.h> @@ -202,6 +204,10 @@ struct proto_ops { int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, size_t size); int (*set_rcvlowat)(struct sock *sk, int val); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; #define DECLARE_SOCKADDR(type, dst, src) \ @@ -288,6 +294,21 @@ do { \ #define net_get_random_once_wait(buf, nbytes) \ get_random_once_wait((buf), (nbytes)) +/* + * E.g. XFS meta- & log-data is in slab pages, or bcache meta + * data pages, or other high order pages allocated by + * __get_free_pages() without __GFP_COMP, which have a page_count + * of 0 and/or have PageSlab() set. We cannot use send_page for + * those, as that does get_page(); put_page(); and would cause + * either a VM_BUG directly, or __page_cache_release a page that + * would actually still be referenced by someone, leading to some + * obscure delayed Oops somewhere else. + */ +static inline bool sendpage_ok(struct page *page) +{ + return !PageSlab(page) && page_count(page) >= 1; +} + int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9c1081208b79..60cacc183eef 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -48,6 +48,7 @@ #include <uapi/linux/if_bonding.h> #include <uapi/linux/pkt_cls.h> #include <linux/hashtable.h> +#include <linux/kabi.h> struct netpoll_info; struct device; @@ -274,6 +275,9 @@ struct header_ops { const unsigned char *haddr); bool (*validate)(const char *ll_header, unsigned int len); __be16 (*parse_protocol)(const struct sk_buff *skb); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; /* These flag bits are private to the generic network queueing @@ -341,6 +345,11 @@ struct napi_struct { struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; enum { @@ -629,6 +638,10 @@ struct netdev_queue { #ifdef CONFIG_BQL struct dql dql; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } ____cacheline_aligned_in_smp; extern int sysctl_fb_tunnels_only_for_init_net; @@ -747,6 +760,10 @@ struct netdev_rx_queue { #ifdef CONFIG_XDP_SOCKETS struct xdp_umem *umem; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } ____cacheline_aligned_in_smp; /* @@ -917,6 +934,10 @@ struct xfrmdev_ops { bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; #endif @@ -1246,6 +1267,9 @@ struct tlsdev_ops; * Get devlink port instance associated with a given netdev. * Called with a reference on the netdevice and devlink locks only, * rtnl_lock is not held. + * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); + * If a device is paired with a peer device, return the peer instance. + * The caller must be under RCU read context. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1443,6 +1467,12 @@ struct net_device_ops { int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); + + KABI_RESERVE(1); + struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; #define DEV_MIB_MAX __DEV_MIB_MAX @@ -1565,6 +1595,12 @@ enum netdev_priv_flags { #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK +/* Specifies the type of the struct net_device::ml_priv pointer */ +enum netdev_ml_priv_type { + ML_PRIV_NONE, + ML_PRIV_CAN, +}; + /** * struct net_device - The DEVICE structure. * @@ -1742,6 +1778,7 @@ enum netdev_priv_flags { * @nd_net: Network namespace this network device is inside * * @ml_priv: Mid-layer private + * @ml_priv_type: Mid-layer private type * @lstats: Loopback statistics * @tstats: Tunnel statistics * @dstats: Dummy statistics @@ -2030,8 +2067,10 @@ struct net_device { possible_net_t nd_net; /* mid-layer private */ + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + union { - void *ml_priv; struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; @@ -2077,6 +2116,11 @@ struct net_device { struct lock_class_key addr_list_lock_key; bool proto_down; unsigned wol_enabled:1; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2178,6 +2222,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev) netdev_set_rx_headroom(dev, -1); } +static inline void *netdev_get_ml_priv(struct net_device *dev, + enum netdev_ml_priv_type type) +{ + if (dev->ml_priv_type != type) + return NULL; + + return dev->ml_priv; +} + +static inline void netdev_set_ml_priv(struct net_device *dev, + void *ml_priv, + enum netdev_ml_priv_type type) +{ + WARN(dev->ml_priv_type && dev->ml_priv_type != type, + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", + dev->ml_priv_type, type); + WARN(!dev->ml_priv_type && dev->ml_priv, + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); + + dev->ml_priv = ml_priv; + dev->ml_priv_type = type; +} + /* * Net namespace inlines */ @@ -2378,6 +2445,11 @@ struct packet_type { struct sock *sk); void *af_packet_priv; struct list_head list; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; struct offload_callbacks { @@ -3054,7 +3126,7 @@ static inline int dev_recursion_level(void) return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 10 +#define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > @@ -3690,6 +3762,9 @@ int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, struct netlink_ext_ack *extack); int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack); +int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack); +int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); @@ -4056,6 +4131,7 @@ static inline void netif_tx_disable(struct net_device *dev) local_bh_disable(); cpu = smp_processor_id(); + spin_lock(&dev->tx_global_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -4063,6 +4139,7 @@ static inline void netif_tx_disable(struct net_device *dev) netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } + spin_unlock(&dev->tx_global_lock); local_bh_enable(); } diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index fcc409de31a4..a28aa289afdc 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -10,7 +10,7 @@ #include <net/netfilter/nf_conntrack_expect.h> #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> -extern const char *const pptp_msg_name[]; +const char *pptp_msg_name(u_int16_t msg); /* state of the control session */ enum pptp_ctrlsess_state { diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 9a33f171aa82..625f491b95de 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,8 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 851425c3178f..f6267e2883f2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -24,6 +24,12 @@ struct nfnl_callback { const u_int16_t attr_count; /* number of nlattr's */ }; +enum nfnl_abort_action { + NFNL_ABORT_NONE = 0, + NFNL_ABORT_AUTOLOAD, + NFNL_ABORT_VALIDATE, +}; + struct nfnetlink_subsystem { const char *name; __u8 subsys_id; /* nfnetlink subsystem ID */ @@ -31,7 +37,8 @@ struct nfnetlink_subsystem { const struct nfnl_callback *cb; /* callback for individual types */ struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); - int (*abort)(struct net *net, struct sk_buff *skb, bool autoload); + int (*abort)(struct net *net, struct sk_buff *skb, + enum nfnl_abort_action action); void (*cleanup)(struct net *net); bool (*valid_genid)(struct net *net, u32 genid); }; @@ -43,8 +50,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 1b261c51b3a3..04e7f5630509 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -376,7 +376,7 @@ static inline unsigned int xt_write_recseq_begin(void) * since addend is most likely 1 */ __this_cpu_add(xt_recseq.sequence, addend); - smp_wmb(); + smp_mb(); return addend; } diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index e98028f00e47..6988cf9ffe3a 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *); int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); -void arpt_unregister_table(struct net *net, struct xt_table *table, - const struct nf_hook_ops *ops); +void arpt_unregister_table(struct net *net, struct xt_table *table); +void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 162f59d0d17a..db472c9cd8e9 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net, const struct ebt_table *table, const struct nf_hook_ops *ops, struct ebt_table **res); -extern void ebt_unregister_table(struct net *net, struct ebt_table *table, - const struct nf_hook_ops *); +extern void ebt_unregister_table(struct net *net, struct ebt_table *table); +void ebt_unregister_table_pre_exit(struct net *net, const char *tablename, + const struct nf_hook_ops *ops); extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 082e2c41b7ff..5b70ca868bb1 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -16,7 +16,7 @@ struct ip_rt_info { u_int32_t mark; }; -int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); struct nf_queue_entry; diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index aac42c28fe62..48314ade1506 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -42,7 +42,7 @@ struct nf_ipv6_ops { #if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); - int (*route_me_harder)(struct net *net, struct sk_buff *skb); + int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*dev_get_saddr)(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); @@ -58,7 +58,6 @@ struct nf_ipv6_ops { int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) - int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, @@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, #include <net/netfilter/ipv6/nf_defrag_ipv6.h> -static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, - u32 user) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return 1; - - return v6_ops->br_defrag(net, skb, user); -#elif IS_BUILTIN(CONFIG_IPV6) - return nf_ct_frag6_gather(net, skb, user); -#else - return 1; -#endif -} - int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, @@ -161,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, #endif } -int ip6_route_me_harder(struct net *net, struct sk_buff *skb); +int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); -static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) +static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); @@ -171,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) if (!v6_ops) return -EHOSTUNREACH; - return v6_ops->route_me_harder(net, skb); + return v6_ops->route_me_harder(net, sk, skb); #elif IS_BUILTIN(CONFIG_IPV6) - return ip6_route_me_harder(net, skb); + return ip6_route_me_harder(net, sk, skb); #else return -EHOSTUNREACH; #endif diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 205fa7b1f07a..788969ccbbde 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -188,10 +188,10 @@ struct netlink_callback { struct module *module; struct netlink_ext_ack *extack; u16 family; - u16 min_dump_alloc; - bool strict_check; u16 answer_flags; + u32 min_dump_alloc; unsigned int prev_seq, seq; + bool strict_check; union { u8 ctx[48]; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 570a60c2f4f4..ad09c0cc5464 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -225,6 +225,7 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ +#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 0bbd587fac6a..7e9419d74b86 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern int nfs_page_set_headlock(struct nfs_page *req); +extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9b8324ec08f3..99d327d0bccb 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1307,11 +1307,13 @@ struct nfs41_impl_id { struct nfstime4 date; }; +#define MAX_BIND_CONN_TO_SESSION_RETRIES 3 struct nfs41_bind_conn_to_session_args { struct nfs_client *client; struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; + int retries; }; struct nfs41_bind_conn_to_session_res { diff --git a/include/linux/node.h b/include/linux/node.h index 4866f32a02d8..014ba3ab2efd 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -99,11 +99,13 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) -extern int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn); +int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else static inline int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn) + unsigned long end_pfn, + enum meminit_context context) { return 0; } @@ -128,7 +130,8 @@ static inline int register_one_node(int nid) if (error) return error; /* link memory sections under this node */ - error = link_mem_sections(nid, start_pfn, end_pfn); + error = link_mem_sections(nid, start_pfn, end_pfn, + MEMINIT_EARLY); } return error; diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 6d0d70f3219c..10f81629b9ce 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -270,8 +270,6 @@ struct nvme_fc_remote_port { * * Host/Initiator Transport Entrypoints/Parameters: * - * @module: The LLDD module using the interface - * * @localport_delete: The LLDD initiates deletion of a localport via * nvme_fc_deregister_localport(). However, the teardown is * asynchronous. This routine is called upon the completion of the @@ -385,8 +383,6 @@ struct nvme_fc_remote_port { * Value is Mandatory. Allowed to be zero. */ struct nvme_fc_port_template { - struct module *module; - /* initiator-based functions */ void (*localport_delete)(struct nvme_fc_local_port *); void (*remoteport_delete)(struct nvme_fc_remote_port *); diff --git a/include/linux/of.h b/include/linux/of.h index 844f89e1b039..a7621e2b440a 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1282,6 +1282,7 @@ static inline int of_get_available_child_count(const struct device_node *np) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __used __section(__##table##_of_table) \ + __aligned(__alignof__(struct of_device_id)) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else diff --git a/include/linux/oom.h b/include/linux/oom.h index c696c265f019..b9df34326772 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -55,6 +55,7 @@ struct oom_control { }; extern struct mutex oom_lock; +extern struct mutex oom_adj_mutex; static inline void set_current_oom_origin(void) { diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 659045046468..50c93ca0c3d6 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -3,6 +3,7 @@ #define __LINUX_OVERFLOW_H #include <linux/compiler.h> +#include <linux/limits.h> /* * In the fallback code below, we need to compute the minimum and diff --git a/include/linux/padata.h b/include/linux/padata.h index cccab7a59787..fa35dcfbd13f 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -145,7 +145,8 @@ struct padata_shell { /** * struct padata_instance - The overall control structure. * - * @cpu_notifier: cpu hotplug notifier. + * @cpu_online_node: Linkage for CPU online callback. + * @cpu_dead_node: Linkage for CPU offline callback. * @parallel_wq: The workqueue used for parallel work. * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. @@ -160,7 +161,8 @@ struct padata_shell { * @flags: padata flags. */ struct padata_instance { - struct hlist_node node; + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; struct list_head pslist; diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 8679ccd722e8..3468794f83d2 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); -extern void __split_page_owner(struct page *page, unsigned int order); +extern void __split_page_owner(struct page *page, unsigned int nr); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __dump_page_owner(struct page *page); @@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); } -static inline void split_page_owner(struct page *page, unsigned int order) +static inline void split_page_owner(struct page *page, unsigned int nr) { if (static_branch_unlikely(&page_owner_inited)) - __split_page_owner(page, order); + __split_page_owner(page, nr); } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index a73164c85e78..d0260824dc5d 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -57,6 +57,7 @@ extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ +extern struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read accesses only */ #endif #ifdef CONFIG_PCI_HOST_COMMON diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index f641badc2c61..0c12d69dde92 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -71,6 +71,7 @@ struct pci_epc_ops { * @bitmap: bitmap to manage the PCI address space * @pages: number of bits representing the address region * @page_size: size of each page + * @lock: mutex to protect bitmap */ struct pci_epc_mem { phys_addr_t phys_base; @@ -78,6 +79,8 @@ struct pci_epc_mem { unsigned long *bitmap; size_t page_size; int pages; + /* mutex to protect against concurrent access for memory allocation*/ + struct mutex lock; }; /** diff --git a/include/linux/pci.h b/include/linux/pci.h index f39f22f9ee47..2665076fc842 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -39,6 +39,7 @@ #include <linux/io.h> #include <linux/resource_ext.h> #include <uapi/linux/pci.h> +#include <linux/kabi.h> #include <linux/pci_ids.h> @@ -423,6 +424,7 @@ struct pci_dev { unsigned int is_probed:1; /* Device probing in progress */ unsigned int link_active_reporting:1;/* Device capable of reporting link active */ unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ + unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -467,6 +469,11 @@ struct pci_dev { size_t romlen; /* Length if not from BAR */ char *driver_override; /* Driver name to force a match */ + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + unsigned long priv_flags; /* Private flags for the PCI driver */ }; @@ -601,6 +608,11 @@ struct pci_bus { struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ struct bin_attribute *legacy_mem; /* Legacy mem */ unsigned int is_added:1; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; #define to_pci_bus(n) container_of(n, struct pci_bus, dev) @@ -838,6 +850,10 @@ struct pci_driver { const struct attribute_group **groups; struct device_driver driver; struct pci_dynids dynids; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) @@ -1216,7 +1232,6 @@ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); -void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index b482e42d7153..4a2c53d9afbc 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -13,6 +13,7 @@ */ #ifndef _PCI_HOTPLUG_H #define _PCI_HOTPLUG_H +#include <linux/kabi.h> /** * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use @@ -45,6 +46,10 @@ struct hotplug_slot_ops { int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); int (*reset_slot) (struct hotplug_slot *slot, int probe); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; /** @@ -61,6 +66,9 @@ struct hotplug_slot { struct pci_slot *pci_slot; struct module *owner; const char *mod_name; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 21a572469a4e..0ad57693f392 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -148,6 +148,8 @@ /* Vendors and devices. Sort key: vendor first, device next. */ +#define PCI_VENDOR_ID_LOONGSON 0x0014 + #define PCI_VENDOR_ID_TTTECH 0x0357 #define PCI_DEVICE_ID_TTTECH_MC322 0x000a @@ -548,7 +550,9 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 +#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -1829,6 +1833,12 @@ #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 +#define PCI_VENDOR_ID_PERICOM 0x12D8 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 + #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 @@ -2582,6 +2592,8 @@ #define PCI_VENDOR_ID_AMAZON 0x1d0f +#define PCI_VENDOR_ID_ZHAOXIN 0x1d17 + #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_VENDOR_ID_HXT 0x1dbf @@ -3006,6 +3018,7 @@ #define PCI_DEVICE_ID_INTEL_84460GX 0x84ea #define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 #define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 +#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 #define PCI_VENDOR_ID_SCALEMP 0x8686 diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 3998cdf9cd14..d0ae3940b15f 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -48,7 +48,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) * and that once the synchronize_rcu() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ - __this_cpu_inc(*sem->read_count); + this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ /* @@ -66,7 +66,7 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) /* * Same as in percpu_down_read(). */ - __this_cpu_inc(*sem->read_count); + this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); @@ -88,7 +88,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem) * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) - __this_cpu_dec(*sem->read_count); + this_cpu_dec(*sem->read_count); else __percpu_up_read(sem); /* Unconditional memory barrier */ preempt_enable(); diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 4f052496cdfd..0a4f54dd4737 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { - s64 ret = fbc->count; + /* Prevent reloads of fbc->count */ + s64 ret = READ_ONCE(fbc->count); - barrier(); /* Prevent reloads of fbc->count */ if (ret >= 0) return ret; return 0; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 68ccc5b1913b..5862ff3f031a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -199,17 +199,26 @@ struct hw_perf_event { */ u64 sample_period; - /* - * The period we started this sample with. - */ - u64 last_period; + union { + struct { /* Sampling */ + /* + * The period we started this sample with. + */ + u64 last_period; - /* - * However much is left of the current period; note that this is - * a full 64bit value and allows for generation of periods longer - * than hardware might allow. - */ - local64_t period_left; + /* + * However much is left of the current period; + * note that this is a full 64bit value and + * allows for generation of periods longer + * than hardware might allow. + */ + local64_t period_left; + }; + struct { /* Topdown events counting for context switch */ + u64 saved_metric; + u64 saved_slots; + }; + }; /* * State for throttling the event, see __perf_event_overflow() and @@ -538,9 +547,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, * PERF_EV_CAP_SOFTWARE: Is a software event. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read * from any CPU in the package where it is active. + * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and + * cannot be a group leader. If an event with this flag is detached from the + * group it is scheduled out and moved into an unrecoverable ERROR state. */ #define PERF_EV_CAP_SOFTWARE BIT(0) #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) +#define PERF_EV_CAP_SIBLING BIT(2) #define SWEVENT_HLIST_BITS 8 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) @@ -1195,6 +1208,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); +extern struct perf_callchain_entry *get_callchain_entry(int *rctx); +extern void put_callchain_entry(int rctx); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index f3eaf9ec00a1..70078be166e3 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -21,6 +21,7 @@ * @dst_id: dst request line * @m_master: memory master for transfers on allocated channel * @p_master: peripheral master for transfers on allocated channel + * @channels: mask of the channels permitted for allocation (zero value means any) * @hs_polarity:set active low polarity of handshake interface */ struct dw_dma_slave { @@ -29,6 +30,7 @@ struct dw_dma_slave { u8 dst_id; u8 m_master; u8 p_master; + u8 channels; bool hs_polarity; }; diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index 8b30b14b47d3..f377817ce75c 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -85,6 +85,7 @@ * omap2+ specific GPIO registers */ #define OMAP24XX_GPIO_REVISION 0x0000 +#define OMAP24XX_GPIO_SYSCONFIG 0x0010 #define OMAP24XX_GPIO_IRQSTATUS1 0x0018 #define OMAP24XX_GPIO_IRQSTATUS2 0x0028 #define OMAP24XX_GPIO_IRQENABLE2 0x002c @@ -108,6 +109,7 @@ #define OMAP24XX_GPIO_SETDATAOUT 0x0094 #define OMAP4_GPIO_REVISION 0x0000 +#define OMAP4_GPIO_SYSCONFIG 0x0010 #define OMAP4_GPIO_EOI 0x0020 #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024 #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028 @@ -148,6 +150,7 @@ #ifndef __ASSEMBLER__ struct omap_gpio_reg_offs { u16 revision; + u16 sysconfig; u16 direction; u16 datain; u16 dataout; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 2cbde6542849..5fcc9bc9e751 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) #define SYSC_MODULE_QUIRK_AESS BIT(19) diff --git a/include/linux/pm.h b/include/linux/pm.h index 4c441be03079..c1d21e9a864f 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -598,7 +598,7 @@ struct dev_pm_info { #endif #ifdef CONFIG_PM struct hrtimer suspend_timer; - unsigned long timer_expires; + u64 timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 22af69d237a6..7145795b4b9d 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); -extern void pm_runtime_clean_up_links(struct device *dev); extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); -extern void pm_runtime_drop_link(struct device *dev); +extern void pm_runtime_drop_link(struct device_link *link); static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { @@ -173,11 +172,10 @@ static inline u64 pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} -static inline void pm_runtime_clean_up_links(struct device *dev) {} static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} -static inline void pm_runtime_drop_link(struct device *dev) {} +static inline void pm_runtime_drop_link(struct device_link *link) {} #endif /* !CONFIG_PM */ @@ -226,6 +224,27 @@ static inline int pm_runtime_get_sync(struct device *dev) return __pm_runtime_resume(dev, RPM_GET_PUT); } +/** + * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it. + * @dev: Target device. + * + * Resume @dev synchronously and if that is successful, increment its runtime + * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been + * incremented or a negative error code otherwise. + */ +static inline int pm_runtime_resume_and_get(struct device *dev) +{ + int ret; + + ret = __pm_runtime_resume(dev, RPM_GET_PUT); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + return 0; +} + static inline int pm_runtime_put(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 3b12fd28af78..fc4df3ccefc9 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -220,10 +220,8 @@ struct pnp_card { #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) #define to_pnp_card(n) container_of(n, struct pnp_card, dev) -#define pnp_for_each_card(card) \ - for((card) = global_to_pnp_card(pnp_cards.next); \ - (card) != global_to_pnp_card(&pnp_cards); \ - (card) = global_to_pnp_card((card)->global_list.next)) +#define pnp_for_each_card(card) \ + list_for_each_entry(card, &pnp_cards, global_list) struct pnp_card_link { struct pnp_card *card; @@ -276,14 +274,9 @@ struct pnp_dev { #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) -#define pnp_for_each_dev(dev) \ - for((dev) = global_to_pnp_dev(pnp_global.next); \ - (dev) != global_to_pnp_dev(&pnp_global); \ - (dev) = global_to_pnp_dev((dev)->global_list.next)) -#define card_for_each_dev(card,dev) \ - for((dev) = card_to_pnp_dev((card)->devices.next); \ - (dev) != card_to_pnp_dev(&(card)->devices); \ - (dev) = card_to_pnp_dev((dev)->card_list.next)) +#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list) +#define card_for_each_dev(card, dev) \ + list_for_each_entry(dev, &(card)->devices, card_list) #define pnp_dev_name(dev) (dev)->name static inline void *pnp_get_drvdata(struct pnp_dev *pdev) @@ -437,14 +430,10 @@ struct pnp_protocol { }; #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) -#define protocol_for_each_card(protocol,card) \ - for((card) = protocol_to_pnp_card((protocol)->cards.next); \ - (card) != protocol_to_pnp_card(&(protocol)->cards); \ - (card) = protocol_to_pnp_card((card)->protocol_list.next)) -#define protocol_for_each_dev(protocol,dev) \ - for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \ - (dev) != protocol_to_pnp_dev(&(protocol)->devices); \ - (dev) = protocol_to_pnp_dev((dev)->protocol_list.next)) +#define protocol_for_each_card(protocol, card) \ + list_for_each_entry(card, &(protocol)->cards, protocol_list) +#define protocol_for_each_dev(protocol, dev) \ + list_for_each_entry(dev, &(protocol)->devices, protocol_list) extern struct bus_type pnp_bus_type; diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 507c5e214c42..7413779484d5 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -50,7 +50,6 @@ struct bq27xxx_reg_cache { int capacity; int energy; int flags; - int power_avg; int health; }; diff --git a/include/linux/prandom.h b/include/linux/prandom.h new file mode 100644 index 000000000000..cc1e71334e53 --- /dev/null +++ b/include/linux/prandom.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/prandom.h + * + * Include file for the fast pseudo-random 32-bit + * generation. + */ +#ifndef _LINUX_PRANDOM_H +#define _LINUX_PRANDOM_H + +#include <linux/types.h> +#include <linux/percpu.h> + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +#if BITS_PER_LONG == 64 +/* + * The core SipHash round function. Each line can be executed in + * parallel given enough CPU resources. + */ +#define PRND_SIPROUND(v0, v1, v2, v3) ( \ + v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \ + v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \ + v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \ + v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \ +) + +#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261) +#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573) + +#elif BITS_PER_LONG == 32 +/* + * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash. + * This is weaker, but 32-bit machines are not used for high-traffic + * applications, so there is less output for an attacker to analyze. + */ +#define PRND_SIPROUND(v0, v1, v2, v3) ( \ + v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \ + v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \ + v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \ + v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \ +) +#define PRND_K0 0x6c796765 +#define PRND_K1 0x74656462 + +#else +#error Unsupported BITS_PER_LONG +#endif + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +static inline u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} + +#endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bbb68dba37cc..9b26069531de 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -26,13 +26,13 @@ * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 - * NMI_MASK: 0x00100000 + * NMI_MASK: 0x00f00000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 #define HARDIRQ_BITS 4 -#define NMI_BITS 1 +#define NMI_BITS 4 #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) diff --git a/include/linux/printk.h b/include/linux/printk.h index c09d67edda3a..3b5cb66d8bc1 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -202,7 +202,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack(void) __cold; -extern void printk_safe_init(void); extern void printk_safe_flush(void); extern void printk_safe_flush_on_panic(void); #else @@ -269,10 +268,6 @@ static inline void dump_stack(void) { } -static inline void printk_safe_init(void) -{ -} - static inline void printk_safe_flush(void) { } diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index a705aa2d03f9..76e619b232f3 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -21,6 +21,7 @@ extern void proc_flush_task(struct task_struct *); extern struct proc_dir_entry *proc_symlink(const char *, struct proc_dir_entry *, const char *); +struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool); extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *); extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, struct proc_dir_entry *, void *); @@ -75,6 +76,13 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); +extern struct net init_net; +extern struct list_head sysctl_restrict_list; + +struct bpf_iter_aux_info; +extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux); +extern void bpf_iter_fini_seq_net(void *priv_data); + #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must @@ -99,6 +107,11 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } +static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, + struct proc_dir_entry *parent, void *data, bool force_lookup) +{ + return NULL; +} static inline struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, @@ -136,7 +149,7 @@ struct net; static inline struct proc_dir_entry *proc_net_mkdir( struct net *net, const char *name, struct proc_dir_entry *parent) { - return proc_mkdir_data(name, 0, parent, net); + return _proc_mkdir(name, 0, parent, net, true); } struct ns_common; diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index dd464943f717..5b90eff50bf6 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -229,6 +229,9 @@ struct geni_se { #define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT) #define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK) +/* QUP SE VERSION value for major number 2 and minor number 5 */ +#define QUP_SE_VERSION_2_5 0x20050000 + #if IS_ENABLED(CONFIG_QCOM_GENI_SE) u32 geni_se_get_qup_hw_version(struct geni_se *se); diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 2dd0a9ed5b36..6d15040c642c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -97,6 +97,11 @@ struct qed_chain_u32 { u32 cons_idx; }; +struct addr_tbl_entry { + void *virt_addr; + dma_addr_t dma_map; +}; + struct qed_chain { /* fastpath portion of the chain - required for commands such * as produce / consume. @@ -107,10 +112,11 @@ struct qed_chain { /* Fastpath portions of the PBL [if exists] */ struct { - /* Table for keeping the virtual addresses of the chain pages, - * respectively to the physical addresses in the pbl table. + /* Table for keeping the virtual and physical addresses of the + * chain pages, respectively to the physical addresses + * in the pbl table. */ - void **pp_virt_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { struct qed_chain_pbl_u16 u16; @@ -201,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u32 prod = p_chain->u.chain16.prod_idx; + u32 cons = p_chain->u.chain16.cons_idx; u16 used; - used = (u16) (((u32)0x10000 + - (u32)p_chain->u.chain16.prod_idx) - - (u32)p_chain->u.chain16.cons_idx); + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - - p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + used -= prod / elem_per_page - cons / elem_per_page; return (u16)(p_chain->capacity - used); } static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u64 prod = p_chain->u.chain32.prod_idx; + u64 cons = p_chain->u.chain32.cons_idx; u32 used; - used = (u32) (((u64)0x100000000ULL + - (u64)p_chain->u.chain32.prod_idx) - - (u64)p_chain->u.chain32.cons_idx); + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - - p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + used -= (u32)(prod / elem_per_page - cons / elem_per_page); return p_chain->capacity - used; } @@ -287,7 +299,7 @@ qed_chain_advance_page(struct qed_chain *p_chain, *(u32 *)page_to_inc = 0; page_index = *(u32 *)page_to_inc; } - *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; + *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr; } } @@ -537,7 +549,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->pbl_sp.p_phys_table = 0; p_chain->pbl_sp.p_virt_table = NULL; - p_chain->pbl.pp_virt_addr_tbl = NULL; + p_chain->pbl.pp_addr_tbl = NULL; } /** @@ -575,11 +587,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain, static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, void *p_virt_pbl, dma_addr_t p_phys_pbl, - void **pp_virt_addr_tbl) + struct addr_tbl_entry *pp_addr_tbl) { p_chain->pbl_sp.p_phys_table = p_phys_pbl; p_chain->pbl_sp.p_virt_table = p_virt_pbl; - p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; + p_chain->pbl.pp_addr_tbl = pp_addr_tbl; } /** @@ -644,7 +656,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) break; case QED_CHAIN_MODE_PBL: last_page_idx = p_chain->page_cnt - 1; - p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr; break; } /* p_virt_addr points at this stage to the last page of the chain */ @@ -716,7 +728,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) page_cnt = qed_chain_get_page_cnt(p_chain); for (i = 0; i < page_cnt; i++) - memset(p_chain->pbl.pp_virt_addr_tbl[i], 0, + memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, QED_CHAIN_PAGE_SIZE); } diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b5db1ee96d78..65a7355ed07b 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -637,6 +637,7 @@ struct qed_dev_info { #define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; + bool b_arfs_capable; bool b_inter_pf_switch; bool tx_switching; bool rdma_supported; diff --git a/include/linux/quota.h b/include/linux/quota.h index 27aab84fcbaa..9dde23639625 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -47,6 +47,7 @@ #include <linux/uidgid.h> #include <linux/projid.h> #include <uapi/linux/quota.h> +#include <linux/kabi.h> #undef USRQUOTA #undef GRPQUOTA @@ -304,6 +305,11 @@ struct dquot { loff_t dq_off; /* Offset of dquot on disk [dq_lock, stable once set] */ unsigned long dq_flags; /* See DQ_* */ struct mem_dqblk dq_dqb; /* Diskquota usage [dq_dqb_lock] */ + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* Operations which must be implemented by each quota format */ @@ -316,6 +322,9 @@ struct quota_format_ops { int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */ + + KABI_RESERVE(1); + KABI_RESERVE(2); }; /* Operations working with dquots */ @@ -335,6 +344,9 @@ struct dquot_operations { int (*get_inode_usage) (struct inode *, qsize_t *); /* Get next ID with active quota structure */ int (*get_next_id) (struct super_block *sb, struct kqid *qid); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; struct path; @@ -438,6 +450,9 @@ struct quotactl_ops { int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); + + KABI_RESERVE(1); + KABI_RESERVE(2); }; struct quota_format_type { diff --git a/include/linux/random.h b/include/linux/random.h index f189c927fdea..5b3ec7d2791f 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -108,61 +108,12 @@ declare_get_random_var_wait(long) unsigned long randomize_page(unsigned long start, unsigned long range); -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -struct rnd_state { - __u32 s1, s2, s3, s4; -}; - -u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); - -#define prandom_init_once(pcpu_state) \ - DO_ONCE(prandom_seed_full_state, (pcpu_state)) - -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). Note - * that the result depends on PRNG being well distributed in [0, ~0U] - * u32 space. Here we use maximally equidistributed combined Tausworthe - * generator, that is, prandom_u32(). This is useful when requesting a - * random index of an array containing ep_ro elements, for example. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return (u32)(((u64) prandom_u32() * ep_ro) >> 32); -} - /* - * Handle minimum values for seeds + * This is designed to be standalone for just prandom + * users, but for now we include it from <linux/random.h> + * for legacy reasons. */ -static inline u32 __seed(u32 x, u32 m) -{ - return (x < m) ? x + m : x; -} - -/** - * prandom_seed_state - set seed for prandom_u32_state(). - * @state: pointer to state structure to receive the seed. - * @seed: arbitrary 64-bit value to use as a seed. - */ -static inline void prandom_seed_state(struct rnd_state *state, u64 seed) -{ - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; - - state->s1 = __seed(i, 2U); - state->s2 = __seed(i, 8U); - state->s3 = __seed(i, 16U); - state->s4 = __seed(i, 128U); -} +#include <linux/prandom.h> #ifdef CONFIG_ARCH_RANDOM # include <asm/archrandom.h> @@ -193,10 +144,4 @@ static inline bool arch_has_random_seed(void) } #endif -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} - #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/ras.h b/include/linux/ras.h index 7c3debb47c87..1f4048bf2674 100644 --- a/include/linux/ras.h +++ b/include/linux/ras.h @@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; } #endif #ifdef CONFIG_RAS_CEC -void __init cec_init(void); int __init parse_cec_param(char *str); -int cec_add_elem(u64 pfn); -#else -static inline void __init cec_init(void) { } -static inline int cec_add_elem(u64 pfn) { return -ENODEV; } #endif #ifdef CONFIG_RAS diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 75a2eded7aa2..c75b38ba4a72 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -96,8 +96,10 @@ static inline void rcu_user_exit(void) { } #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); +void rcu_nocb_flush_deferred_wakeup(void); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } +static inline void rcu_nocb_flush_deferred_wakeup(void) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /** @@ -382,6 +384,24 @@ do { \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ } while (0) +/** + * rcu_replace_pointer() - replace an RCU pointer, returning its old value + * @rcu_ptr: RCU pointer, whose old value is returned + * @ptr: regular pointer + * @c: the lockdep conditions under which the dereference will take place + * + * Perform a replacement, where @rcu_ptr is an RCU-annotated + * pointer and @c is the lockdep argument that is passed to the + * rcu_dereference_protected() call used to read that pointer. The old + * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. + */ +#define rcu_replace_pointer(rcu_ptr, ptr, c) \ +({ \ + typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ + rcu_assign_pointer((rcu_ptr), (ptr)); \ + __tmp; \ +}) + /** * rcu_swap_protected() - swap an RCU and a regular pointer * @rcu_ptr: RCU pointer diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index beb9a9da1699..c5bf21261bb1 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -349,11 +349,11 @@ static inline void rht_unlock(struct bucket_table *tbl, local_bh_enable(); } -static inline struct rhash_head __rcu *__rht_ptr( - struct rhash_lock_head *const *bkt) +static inline struct rhash_head *__rht_ptr( + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) { - return (struct rhash_head __rcu *) - ((unsigned long)*bkt & ~BIT(0) ?: + return (struct rhash_head *) + ((unsigned long)p & ~BIT(0) ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } @@ -365,25 +365,26 @@ static inline struct rhash_head __rcu *__rht_ptr( * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr_rcu( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head *const *p) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - return rcu_dereference(p); + struct rhash_lock_head __rcu *const *bkt = (void *)p; + return __rht_ptr(rcu_dereference(*bkt), bkt); } static inline struct rhash_head *rht_ptr( - struct rhash_lock_head *const *bkt, + struct rhash_lock_head *const *p, struct bucket_table *tbl, unsigned int hash) { - return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); + struct rhash_lock_head __rcu *const *bkt = (void *)p; + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); } static inline struct rhash_head *rht_ptr_exclusive( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head *const *p) { - return rcu_dereference_protected(__rht_ptr(bkt), 1); + struct rhash_lock_head __rcu *const *bkt = (void *)p; + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); } static inline void rht_assign_locked(struct rhash_lock_head **bkt, diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 988d176472df..d7d6d4eb1794 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -214,7 +214,8 @@ struct page_vma_mapped_walk { static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { - if (pvmw->pte) + /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ + if (pvmw->pte && !PageHuge(pvmw->page)) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 00d6054687dd..8a3606372abc 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -125,6 +125,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) * lock for reading */ extern void down_read(struct rw_semaphore *sem); +extern int __must_check down_read_interruptible(struct rw_semaphore *sem); extern int __must_check down_read_killable(struct rw_semaphore *sem); /* @@ -173,6 +174,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * See Documentation/locking/lockdep-design.rst for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); @@ -193,6 +195,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem); extern void up_read_non_owner(struct rw_semaphore *sem); #else # define down_read_nested(sem, subclass) down_read(sem) +# define down_read_killable_nested(sem, subclass) down_read_killable(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) # define down_write_killable_nested(sem, subclass) down_write_killable(sem) diff --git a/include/linux/sched.h b/include/linux/sched.h index b968d736833b..d7999c06015f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -31,7 +31,7 @@ #include <linux/task_io_accounting.h> #include <linux/posix-timers.h> #include <linux/rseq.h> - +#include <linux/kabi.h> /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; struct backing_dev_info; @@ -300,6 +300,15 @@ struct sched_info { /* When were we last queued to run? */ unsigned long long last_queued; + /* The count of task switch */ + unsigned long task_switch; + + /* Timestamps of task that was running in the kernel space */ + unsigned long kernel_exec_start; + + /* Userspace execution time of process */ + unsigned long utime; + #endif /* CONFIG_SCHED_INFO */ }; @@ -476,6 +485,11 @@ struct sched_entity { */ struct sched_avg avg; #endif + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; struct sched_rt_entity { @@ -563,6 +577,7 @@ struct sched_dl_entity { * time. */ struct hrtimer inactive_timer; + }; #ifdef CONFIG_UCLAMP_TASK @@ -934,8 +949,8 @@ struct task_struct { struct seccomp seccomp; /* Thread group tracking: */ - u32 parent_exec_id; - u32 self_exec_id; + u64 parent_exec_id; + u64 self_exec_id; /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ spinlock_t alloc_lock; @@ -1268,12 +1283,28 @@ struct task_struct { unsigned long prev_lowest_stack; #endif +#ifdef CONFIG_X86_MCE + void __user *mce_vaddr; + __u64 mce_kflags; + u64 mce_addr; + __u64 mce_ripv : 1, + mce_whole_page : 1, + __mce_reserved : 62; + + struct callback_head mce_kill_me; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. */ randomized_struct_fields_end + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + /* CPU-specific state of this task: */ struct thread_struct thread; diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index ecdc6542070f..dfd82eab2902 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ +#define MMF_MULTIPROCESS 27 /* mm is shared between processes */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index c49257a3b510..3a1d899019af 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,6 +49,8 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } +void mmdrop(struct mm_struct *mm); + /* * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_sem for writing before modifying the @@ -165,7 +167,8 @@ static inline bool in_vfork(struct task_struct *tsk) * another oom-unkillable task does this it should blame itself. */ rcu_read_lock(); - ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + ret = tsk->vfork_done && + rcu_dereference(tsk->real_parent)->mm == tsk->mm; rcu_read_unlock(); return ret; diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 88050259c466..23351d6ace55 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -10,6 +10,7 @@ #include <linux/cred.h> #include <linux/refcount.h> #include <linux/posix-timers.h> +#include <linux/kabi.h> /* * Types defining task->signal and task->sighand and APIs using them: @@ -224,7 +225,19 @@ struct signal_struct { struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations - * (notably. ptrace) */ + * (notably. ptrace) + * Deprecated do not use in new code. + * Use exec_update_lock instead. + */ + struct rw_semaphore exec_update_lock; /* Held while task_struct is + * being updated during exec, + * and may have inconsistent + * permissions. + */ + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } __randomize_layout; /* diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index f1185ac29e70..8e9d03f5ae97 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -15,7 +15,7 @@ DECLARE_PER_CPU(unsigned long, total_forks); extern int nr_threads; DECLARE_PER_CPU(unsigned long, process_counts); -extern int nr_forks(void); +extern unsigned long nr_forks(void); extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_running_cpu(int cpu); diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index d57e1ed4ec8d..25804152b745 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -64,6 +64,7 @@ extern unsigned int sysctl_sched_uclamp_util_max; #ifdef CONFIG_CFS_BANDWIDTH extern unsigned int sysctl_sched_cfs_bandwidth_slice; +extern unsigned int sysctl_sched_cfs_bw_burst_enabled; #endif #ifdef CONFIG_SCHED_AUTOGROUP diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 917d88edb7b9..b9d4850894eb 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -6,6 +6,7 @@ #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/ratelimit.h> +#include <linux/kabi.h> /* * Some day this will be a full-fledged user tracking system.. @@ -39,6 +40,9 @@ struct user_struct { /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; extern int uids_sysfs_init(void); diff --git a/include/linux/security.h b/include/linux/security.h index 9df7547afc0c..df90399a8af9 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -117,12 +117,14 @@ enum lockdown_reason { LOCKDOWN_MODULE_PARAMETERS, LOCKDOWN_MMIOTRACE, LOCKDOWN_DEBUGFS, + LOCKDOWN_XMON_WR, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, LOCKDOWN_BPF_READ, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, + LOCKDOWN_XMON_RW, LOCKDOWN_CONFIDENTIALITY_MAX, }; @@ -850,7 +852,7 @@ static inline int security_inode_killpriv(struct dentry *dentry) static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) { - return -EOPNOTSUPP; + return cap_inode_getsecurity(inode, name, buffer, alloc); } static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index aa5deb041c25..7cc952282e8b 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -30,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s) } static inline void -seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) +seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) { s->buffer = buf; s->size = size; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index bcf4cf26b8c8..a42a29952889 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * + * Note that, writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * * seqcount_t seq; * bool X = true, Y = false; * @@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s) diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 86281ac7c305..860e0f843c12 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page) #endif #ifndef set_mce_nospec -static inline int set_mce_nospec(unsigned long pfn) +static inline int set_mce_nospec(unsigned long pfn, bool unmap) { return 0; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 955e1370f033..ae63fc06a1a8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1018,7 +1018,7 @@ static inline bool skb_unref(struct sk_buff *skb) } void skb_release_head_state(struct sk_buff *skb); -void kfree_skb(struct sk_buff *skb); +void __fix_address kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); @@ -1251,32 +1251,6 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); -#ifdef CONFIG_NET -int skb_flow_dissector_prog_query(const union bpf_attr *attr, - union bpf_attr __user *uattr); -int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, - struct bpf_prog *prog); - -int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr); -#else -static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr, - union bpf_attr __user *uattr) -{ - return -EOPNOTSUPP; -} - -static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, - struct bpf_prog *prog) -{ - return -EOPNOTSUPP; -} - -static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) -{ - return -EOPNOTSUPP; -} -#endif - struct bpf_flow_dissector; bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, __be16 proto, int nhoff, int hlen, unsigned int flags); @@ -1480,6 +1454,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb) skb->next = NULL; } +/* Iterate through singly-linked GSO fragments of an skb. */ +#define skb_list_walk_safe(first, skb, next_skb) \ + for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ + (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) + static inline void skb_list_del_init(struct sk_buff *skb) { __list_del_entry(&skb->list); @@ -1816,6 +1795,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +/** + * skb_queue_len_lockless - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + * This variant can be used in lockless contexts. + */ +static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) +{ + return READ_ONCE(list_->qlen); +} + /** * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head * @list: queue to initialize @@ -2021,7 +2012,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev; - list->qlen--; + WRITE_ONCE(list->qlen, list->qlen - 1); next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; @@ -2510,6 +2501,11 @@ static inline int skb_mac_header_was_set(const struct sk_buff *skb) return skb->mac_header != (typeof(skb->mac_header))~0U; } +static inline void skb_unset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = (typeof(skb->mac_header))~0U; +} + static inline void skb_reset_mac_header(struct sk_buff *skb) { skb->mac_header = skb->data - skb->head; @@ -3185,8 +3181,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error if @free_on_error is true. */ -static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, - bool free_on_error) +static inline int __must_check __skb_put_padto(struct sk_buff *skb, + unsigned int len, + bool free_on_error) { unsigned int size = skb->len; @@ -3209,7 +3206,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) { return __skb_put_padto(skb, len, true); } diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 7eb6a8754f19..b003d2706c61 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -186,6 +186,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, dst->sg.data[which] = src->sg.data[which]; dst->sg.data[which].length = size; dst->sg.size += size; + src->sg.size -= size; src->sg.data[which].length -= size; src->sg.data[which].offset += size; } @@ -319,14 +320,6 @@ static inline void sk_psock_free_link(struct sk_psock_link *link) } struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); -#if defined(CONFIG_BPF_STREAM_PARSER) -void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link); -#else -static inline void sk_psock_unlink(struct sock *sk, - struct sk_psock_link *link) -{ -} -#endif void __sk_psock_purge_ingress_msg(struct sk_psock *psock); @@ -343,26 +336,48 @@ static inline void sk_psock_update_proto(struct sock *sk, struct sk_psock *psock, struct proto *ops) { - psock->saved_unhash = sk->sk_prot->unhash; - psock->saved_close = sk->sk_prot->close; - psock->saved_write_space = sk->sk_write_space; + /* Initialize saved callbacks and original proto only once, since this + * function may be called multiple times for a psock, e.g. when + * psock->progs.msg_parser is updated. + * + * Since we've not installed the new proto, psock is not yet in use and + * we can initialize it without synchronization. + */ + if (!psock->sk_proto) { + struct proto *orig = READ_ONCE(sk->sk_prot); - psock->sk_proto = sk->sk_prot; - sk->sk_prot = ops; + psock->saved_unhash = orig->unhash; + psock->saved_close = orig->close; + psock->saved_write_space = sk->sk_write_space; + + psock->sk_proto = orig; + } + + /* Pairs with lockless read in sk_clone_lock() */ + WRITE_ONCE(sk->sk_prot, ops); } static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { - sk->sk_prot->unhash = psock->saved_unhash; - if (psock->sk_proto) { struct inet_connection_sock *icsk = inet_csk(sk); bool has_ulp = !!icsk->icsk_ulp_data; if (has_ulp) { - tcp_update_ulp(sk, psock->sk_proto, - psock->saved_write_space); + /* TLS does not have an unhash proto in SW cases, but we need + * to ensure we stop using the sock_map unhash routine because + * the associated psock is being removed. So use the original + * unhash handler. + */ + WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); + if (inet_csk_has_ulp(sk)) { + tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); + } else { + sk->sk_write_space = psock->saved_write_space; + /* Pairs with lockless read in sk_clone_lock() */ + WRITE_ONCE(sk->sk_prot, psock->sk_proto); + } } else { sk->sk_prot = psock->sk_proto; sk->sk_write_space = psock->saved_write_space; @@ -391,26 +406,6 @@ static inline bool sk_psock_test_state(const struct sk_psock *psock, return test_bit(bit, &psock->state); } -static inline struct sk_psock *sk_psock_get_checked(struct sock *sk) -{ - struct sk_psock *psock; - - rcu_read_lock(); - psock = sk_psock(sk); - if (psock) { - if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) { - psock = ERR_PTR(-EBUSY); - goto out; - } - - if (!refcount_inc_not_zero(&psock->refcnt)) - psock = ERR_PTR(-EBUSY); - } -out: - rcu_read_unlock(); - return psock; -} - static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; @@ -449,6 +444,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog, bpf_prog_put(prog); } +static inline int psock_replace_prog(struct bpf_prog **pprog, + struct bpf_prog *prog, + struct bpf_prog *old) +{ + if (cmpxchg(pprog, old, prog) != old) + return -ENOENT; + + if (old) + bpf_prog_put(old); + + return 0; +} + static inline void psock_progs_drop(struct sk_psock_progs *progs) { psock_set_prog(&progs->msg_parser, NULL); @@ -456,4 +464,12 @@ static inline void psock_progs_drop(struct sk_psock_progs *progs) psock_set_prog(&progs->skb_verdict, NULL); } +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); + +static inline bool sk_psock_strp_enabled(struct sk_psock *psock) +{ + if (!psock) + return false; + return psock->parser.enabled; +} #endif /* _LINUX_SKMSG_H */ diff --git a/include/linux/sli.h b/include/linux/sli.h new file mode 100755 index 000000000000..630d8e28fd46 --- /dev/null +++ b/include/linux/sli.h @@ -0,0 +1,64 @@ +#ifndef _LINUX_SLI_H +#define _LINUX_SLI_H +#include <linux/cgroup.h> + +enum sli_memlat_stat_item { + MEM_LAT_GLOBAL_DIRECT_RECLAIM, /* global direct reclaim latency */ + MEM_LAT_MEMCG_DIRECT_RECLAIM, /* memcg direct reclaim latency */ + MEM_LAT_DIRECT_COMPACT, /* direct compact latency */ + MEM_LAT_GLOBAL_DIRECT_SWAPOUT, /* global direct swapout latency */ + MEM_LAT_MEMCG_DIRECT_SWAPOUT, /* memcg direct swapout latency */ + MEM_LAT_DIRECT_SWAPIN, /* direct swapin latency */ + MEM_LAT_PAGE_ALLOC, /* latency of page alloc */ + MEM_LAT_STAT_NR, +}; + +/* Memory latency histogram distribution, in milliseconds */ +enum sli_lat_count { + LAT_0_1, + LAT_1_4, + LAT_4_8, + LAT_8_16, + LAT_16_32, + LAT_32_64, + LAT_64_128, + LAT_128_INF, + LAT_COUNT_NR, +}; + +enum sli_schedlat_stat_item { + SCHEDLAT_WAIT, + SCHEDLAT_BLOCK, + SCHEDLAT_IOBLOCK, + SCHEDLAT_SLEEP, + SCHEDLAT_RUNDELAY, + SCHEDLAT_LONGSYS, + SCHEDLAT_STAT_NR +}; + +struct sli_memlat_stat { + unsigned long latency_max[SCHEDLAT_STAT_NR]; + unsigned long item[MEM_LAT_STAT_NR][LAT_COUNT_NR]; +}; + +struct sli_schedlat_stat { + unsigned long latency_max[SCHEDLAT_STAT_NR]; + unsigned long item[SCHEDLAT_STAT_NR][LAT_COUNT_NR]; +}; + +int sli_cgroup_alloc(struct cgroup *cgroup); +void sli_cgroup_free(struct cgroup *cgroup); +void sli_memlat_stat_start(u64 *start); +void sli_memlat_stat_end(enum sli_memlat_stat_item sidx, u64 start); +int sli_memlat_stat_show(struct seq_file *m, struct cgroup *cgrp); +int sli_memlat_max_show(struct seq_file *m, struct cgroup *cgrp); +void sli_schedlat_stat(struct task_struct *task,enum sli_schedlat_stat_item sidx, u64 delta); +void sli_schedlat_rundelay(struct task_struct *task, struct task_struct *prev, u64 delta); +int sli_schedlat_stat_show(struct seq_file *m, struct cgroup *cgrp); +int sli_schedlat_max_show(struct seq_file *m, struct cgroup *cgrp); +#ifdef CONFIG_SCHED_INFO +void sli_check_longsys(struct task_struct *tsk); +#else +static void sli_check_longsys(struct task_struct *tsk){}; +#endif +#endif /*_LINUX_SLI_H*/ diff --git a/include/linux/smp.h b/include/linux/smp.h index 6fc856c9eda5..e3072cab1e76 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -57,7 +57,7 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags, const struct cpumask *mask); -int smp_call_function_single_async(int cpu, call_single_data_t *csd); +int smp_call_function_single_async(int cpu, struct __call_single_data *csd); #ifdef CONFIG_SMP @@ -98,6 +98,17 @@ extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); */ extern void smp_cpus_done(unsigned int max_cpus); +#define smp_call_function_many_async_begin(cpumask) \ + preempt_disable(); + +int smp_call_function_many_async(int cpu, call_single_data_t *csd, struct cpumask *mask); + +#define smp_call_function_many_async_end(cpumask) \ + if (cpumask) \ + arch_send_call_function_ipi_mask(cpumask); \ + preempt_enable(); + + /* * Call a function on all other processors */ @@ -140,6 +151,10 @@ static inline int get_boot_cpu_id(void) static inline void smp_send_stop(void) { } +#define smp_call_function_many_async_begin(cpumask) +#define smp_call_function_many_async(cpu, csd, mask) smp_call_function_single_async(cpu, csd) +#define smp_call_function_many_async_end(cpumask) + /* * These macros fold the SMP functionality into a single CPU system */ diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 15fe980a27ea..0b9ecd8cf979 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -25,7 +25,19 @@ void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); -u64 sock_gen_cookie(struct sock *sk); +u64 __sock_gen_cookie(struct sock *sk); + +static inline u64 sock_gen_cookie(struct sock *sk) +{ + u64 cookie; + + preempt_disable(); + cookie = __sock_gen_cookie(sk); + preempt_enable(); + + return cookie; +} + int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h new file mode 100644 index 000000000000..700856e13ea0 --- /dev/null +++ b/include/linux/sockptr.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 Christoph Hellwig. + * + * Support for "universal" pointers that can point to either kernel or userspace + * memory. + */ +#ifndef _LINUX_SOCKPTR_H +#define _LINUX_SOCKPTR_H + +#include <linux/slab.h> +#include <linux/uaccess.h> + +typedef struct { + union { + void *kernel; + void __user *user; + }; + bool is_kernel : 1; +} sockptr_t; + +static inline bool sockptr_is_kernel(sockptr_t sockptr) +{ + return sockptr.is_kernel; +} + +static inline sockptr_t KERNEL_SOCKPTR(void *p) +{ + return (sockptr_t) { .kernel = p, .is_kernel = true }; +} + +static inline sockptr_t USER_SOCKPTR(void __user *p) +{ + return (sockptr_t) { .user = p }; +} + +static inline bool sockptr_is_null(sockptr_t sockptr) +{ + return !sockptr.user && !sockptr.kernel; +} + +static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +{ + if (!sockptr_is_kernel(src)) + return copy_from_user(dst, src.user, size); + memcpy(dst, src.kernel, size); + return 0; +} + +static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) +{ + if (!sockptr_is_kernel(dst)) + return copy_to_user(dst.user, src, size); + memcpy(dst.kernel, src, size); + return 0; +} + +static inline void *memdup_sockptr(sockptr_t src, size_t len) +{ + void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + return p; +} + +static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) +{ + char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + p[len] = '\0'; + return p; +} + +static inline void sockptr_advance(sockptr_t sockptr, size_t len) +{ + if (sockptr_is_kernel(sockptr)) + sockptr.kernel += len; + else + sockptr.user += len; +} + +static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) +{ + if (sockptr_is_kernel(src)) { + size_t len = min(strnlen(src.kernel, count - 1) + 1, count); + + memcpy(dst, src.kernel, len); + return len; + } + return strncpy_from_user(dst, src.user, count); +} + +#endif /* _LINUX_SOCKPTR_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index af4f265d0f67..7067f85cef0b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -466,6 +466,9 @@ struct spi_controller { #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ + /* flag indicating this is a non-devres managed controller */ + bool devm_allocated; + /* flag indicating this is an SPI slave controller */ bool slave; @@ -663,6 +666,25 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host, return __spi_alloc_controller(host, size, true); } +struct spi_controller *__devm_spi_alloc_controller(struct device *dev, + unsigned int size, + bool slave); + +static inline struct spi_controller *devm_spi_alloc_master(struct device *dev, + unsigned int size) +{ + return __devm_spi_alloc_controller(dev, size, false); +} + +static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev, + unsigned int size) +{ + if (!IS_ENABLED(CONFIG_SPI_SLAVE)) + return NULL; + + return __devm_spi_alloc_controller(dev, size, true); +} + extern int spi_register_controller(struct spi_controller *ctlr); extern int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr); diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 83bd8cb475d7..9c6b127a2de5 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <asm/errno.h> +#include <linux/kabi.h> struct task_struct; struct pt_regs; @@ -65,6 +66,7 @@ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; int skip; /* input argument: How many entries to skip */ + KABI_RESERVE(1); }; extern void save_stack_trace(struct stack_trace *trace); diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index f9a0c6189852..69998fc5ffe9 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -139,7 +139,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ -static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, +static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { unsigned long flags; @@ -150,14 +150,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, return ret; } -static inline int stop_machine(cpu_stop_fn_t fn, void *data, - const struct cpumask *cpus) +static __always_inline int +stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { return stop_machine_cpuslocked(fn, data, cpus); } -static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, - const struct cpumask *cpus) +static __always_inline int +stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, + const struct cpumask *cpus) { return stop_machine(fn, data, cpus); } diff --git a/include/linux/string.h b/include/linux/string.h index b6ccdc2c7f02..b2264355272d 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -269,6 +269,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) + +#ifdef CONFIG_KASAN +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -276,14 +301,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_strncpy(p, q, size); + return __underlying_strncpy(p, q, size); } __FORTIFY_INLINE char *strcat(char *p, const char *q) { size_t p_size = __builtin_object_size(p, 0); if (p_size == (size_t)-1) - return __builtin_strcat(p, q); + return __underlying_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) fortify_panic(__func__); return p; @@ -297,7 +322,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) /* Work around gcc excess stack consumption issue */ if (p_size == (size_t)-1 || (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) - return __builtin_strlen(p); + return __underlying_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) fortify_panic(__func__); @@ -330,7 +355,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) __write_overflow(); if (len >= p_size) fortify_panic(__func__); - __builtin_memcpy(p, q, len); + __underlying_memcpy(p, q, len); p[len] = '\0'; } return ret; @@ -343,12 +368,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strncat(p, q, count); + return __underlying_strncat(p, q, count); p_len = strlen(p); copy_len = strnlen(q, count); if (p_size < p_len + copy_len + 1) fortify_panic(__func__); - __builtin_memcpy(p + p_len, q, copy_len); + __underlying_memcpy(p + p_len, q, copy_len); p[p_len + copy_len] = '\0'; return p; } @@ -360,7 +385,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memset(p, c, size); + return __underlying_memset(p, c, size); } __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) @@ -375,7 +400,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcpy(p, q, size); + return __underlying_memcpy(p, q, size); } __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) @@ -390,7 +415,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memmove(p, q, size); + return __underlying_memmove(p, q, size); } extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); @@ -416,7 +441,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcmp(p, q, size); + return __underlying_memcmp(p, q, size); } __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) @@ -426,7 +451,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) __read_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memchr(p, c, size); + return __underlying_memchr(p, c, size); } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); @@ -457,11 +482,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strcpy(p, q); + return __underlying_strcpy(p, q); memcpy(p, q, strlen(q) + 1); return p; } +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy #endif /** diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index abc63bd1be2b..336802acc629 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -237,5 +237,10 @@ static inline int rpc_reply_expected(struct rpc_task *task) (task->tk_msg.rpc_proc->p_decode != NULL); } +static inline void rpc_task_close_connection(struct rpc_task *task) +{ + if (task->tk_xprt) + xprt_force_disconnect(task->tk_xprt); +} #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 5ac5db4d295f..b5a4eb14f809 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -22,6 +22,7 @@ struct gss_ctx { struct gss_api_mech *mech_type; void *internal_ctx_id; + unsigned int slack, align; }; #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) @@ -67,6 +68,7 @@ u32 gss_wrap( u32 gss_unwrap( struct gss_ctx *ctx_id, int offset, + int len, struct xdr_buf *inbuf); u32 gss_delete_sec_context( struct gss_ctx **ctx_id); @@ -83,6 +85,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + struct auth_domain *domain; bool datatouch; }; @@ -127,6 +130,7 @@ struct gss_api_ops { u32 (*gss_unwrap)( struct gss_ctx *ctx_id, int offset, + int len, struct xdr_buf *buf); void (*gss_delete_sec_context)( void *internal_ctx_id); diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index 02c0412e368c..07930bc9ad60 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -83,7 +83,7 @@ struct gss_krb5_enctype { u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages); /* v2 encryption function */ - u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, + u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *headskip, u32 *tailskip); /* v2 decryption function */ }; @@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, struct xdr_buf *outbuf, struct page **pages); u32 -gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len, struct xdr_buf *buf); @@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, struct page **pages); u32 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *plainoffset, u32 *plainlen); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 1afe38eb33f7..82665ff360fd 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -517,6 +517,9 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +int svc_encode_read_payload(struct svc_rqst *rqstp, + unsigned int offset, + unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, struct kvec *first, size_t total); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 40f65888dd38..77589ed787f5 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -137,6 +137,8 @@ struct svc_rdma_recv_ctxt { unsigned int rc_page_count; unsigned int rc_hdr_count; u32 rc_inv_rkey; + unsigned int rc_read_payload_offset; + unsigned int rc_read_payload_length; struct page *rc_pages[RPCSVC_MAXPAGES]; }; @@ -152,9 +154,8 @@ struct svc_rdma_send_ctxt { }; /* svc_rdma_backchannel.c */ -extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, - __be32 *rdma_resp, - struct xdr_buf *rcvbuf); +extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt); /* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); @@ -162,6 +163,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt); extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); +extern void svc_rdma_release_rqst(struct svc_rqst *rqstp); extern int svc_rdma_recvfrom(struct svc_rqst *); /* svc_rdma_rw.c */ @@ -170,7 +172,9 @@ extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, __be32 *p); extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, - __be32 *wr_ch, struct xdr_buf *xdr); + __be32 *wr_ch, struct xdr_buf *xdr, + unsigned int offset, + unsigned long length); extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, bool writelist, struct xdr_buf *xdr); @@ -189,6 +193,8 @@ extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, struct xdr_buf *xdr, __be32 *wr_lst); extern int svc_rdma_sendto(struct svc_rqst *); +extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length); /* svc_rdma_transport.c */ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ea6f46be9cb7..9e1e046de176 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -21,6 +21,8 @@ struct svc_xprt_ops { int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); + int (*xpo_read_payload)(struct svc_rqst *, unsigned int, + unsigned int); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index a4528b26c8aa..d229d27ab19e 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -21,7 +21,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* __KERNEL__ */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index f33e5013bdfb..a8d68c5a4ca6 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -27,8 +27,7 @@ struct rpc_rqst; #define XDR_QUADLEN(l) (((l) + 3) >> 2) /* - * Generic opaque `network object.' At the kernel level, this type - * is used only by lockd. + * Generic opaque `network object.' */ #define XDR_MAX_NETOBJ 1024 struct xdr_netobj { @@ -186,6 +185,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index d783e15ba898..d7ef5b97174c 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -330,6 +330,7 @@ struct xprt_class { struct rpc_xprt * (*setup)(struct xprt_create *); struct module *owner; char name[32]; + const char * netid[]; }; /* diff --git a/include/linux/swap.h b/include/linux/swap.h index d4a727e06986..b9561372e757 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -13,6 +13,7 @@ #include <linux/atomic.h> #include <linux/page-flags.h> #include <asm/page.h> +#include <linux/kabi.h> struct notifier_block; @@ -275,6 +276,10 @@ struct swap_info_struct { */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ + + KABI_RESERVE(1); + KABI_RESERVE(2); + struct plist_node avail_lists[0]; /* * entries in swap_avail_heads, one * entry per node. @@ -365,6 +370,7 @@ extern int vm_swappiness; #define ADDITIONAL_RECLAIM_RATIO 2 extern unsigned long pagecache_over_limit(void); extern void shrink_page_cache(gfp_t mask, struct page *page); +extern unsigned long shrink_page_cache_memcg(gfp_t mask, struct mem_cgroup *memcg, unsigned long nr_pages); extern unsigned long vm_pagecache_limit_pages; extern unsigned long vm_pagecache_limit_reclaim_pages; extern int vm_pagecache_limit_ratio; @@ -374,6 +380,7 @@ extern unsigned int vm_pagecache_limit_async; extern unsigned int vm_pagecache_ignore_slab; extern int kpagecache_limitd_run(void); extern void kpagecache_limitd_stop(void); +extern unsigned int vm_pagecache_limit_global; extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; @@ -485,6 +492,7 @@ struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); extern struct swap_info_struct *get_swap_device(swp_entry_t entry); +sector_t swap_page_sector(struct page *page); static inline void put_swap_device(struct swap_info_struct *si) { diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 877fd239b6ff..3208a520d0be 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -348,7 +348,8 @@ static inline void num_poisoned_pages_inc(void) } #endif -#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ + defined(CONFIG_DEVICE_PRIVATE) static inline int non_swap_entry(swp_entry_t entry) { return swp_type(entry) >= MAX_SWAPFILES; diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index cde3dc18e21a..0a8fced6aaec 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -5,6 +5,7 @@ #include <linux/dma-direction.h> #include <linux/init.h> #include <linux/types.h> +#include <linux/limits.h> struct device; struct page; diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fa7ee503fb76..428707ac3c0b 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -310,6 +310,11 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) return kernfs_enable_ns(kn); } +__printf(2, 3) +int sysfs_emit(char *buf, const char *fmt, ...); +__printf(3, 4) +int sysfs_emit_at(char *buf, int at, const char *fmt, ...); + #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) @@ -522,6 +527,17 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) { } +__printf(2, 3) +static inline int sysfs_emit(char *buf, const char *fmt, ...) +{ + return 0; +} + +__printf(3, 4) +static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) +{ + return 0; +} #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, diff --git a/include/linux/tcp.h b/include/linux/tcp.h index bcd95a793208..3491213aa05d 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -216,6 +216,8 @@ struct tcp_sock { } rack; u16 advmss; /* Advertised MSS */ u8 compressed_ack; + u8 tlp_retrans:1, /* TLP is a retransmission */ + unused_1:7; u32 chrono_start; /* Start time in jiffies of a TCP chrono */ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ @@ -238,7 +240,7 @@ struct tcp_sock { save_syn:1, /* Save headers of SYN packet */ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ syn_smc:1; /* SYN includes SMC */ - u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index e93e249a4e9b..f3040b0b4b23 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/bug.h> #include <linux/restart_block.h> +#include <linux/errno.h> #ifdef CONFIG_THREAD_INFO_IN_TASK /* @@ -39,6 +40,18 @@ enum { #ifdef __KERNEL__ +#ifndef arch_set_restart_data +#define arch_set_restart_data(restart) do { } while (0) +#endif + +static inline long set_restart_fn(struct restart_block *restart, + long (*fn)(struct restart_block *)) +{ + restart->fn = fn; + arch_set_restart_data(restart); + return -ERESTART_RESTARTBLOCK; +} + #ifndef THREAD_ALIGN #define THREAD_ALIGN THREAD_SIZE #endif diff --git a/include/linux/time64.h b/include/linux/time64.h index 19125489ae94..5eab3f263518 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -132,6 +132,10 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts) */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { + /* Prevent multiplication overflow */ + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + return KTIME_MAX; + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } diff --git a/include/linux/timer.h b/include/linux/timer.h index 1e6650ed066d..77e091c97ab1 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -7,6 +7,7 @@ #include <linux/stddef.h> #include <linux/debugobjects.h> #include <linux/stringify.h> +#include <linux/kabi.h> struct timer_list { /* @@ -21,6 +22,9 @@ struct timer_list { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; #ifdef CONFIG_LOCKDEP diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 53c0ea9ec9df..77fdc988c610 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -93,6 +93,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; struct tpm_bios_log { diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 131ea1bad458..f3caeeb7a0d0 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs { u16 digest_size; } __packed; +#define TCG_SPECID_SIG "Spec ID Event03" + struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; @@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, int i; int j; u32 count, event_type; + const u8 zero_digest[sizeof(event_header->digest)] = {0}; marker = event; marker_start = marker; @@ -198,10 +201,26 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, count = READ_ONCE(event->count); event_type = READ_ONCE(event->event_type); + /* Verify that it's the log header */ + if (event_header->pcr_idx != 0 || + event_header->event_type != NO_ACTION || + memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) { + size = 0; + goto out; + } + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; - /* Check if event is malformed. */ - if (count > efispecid->num_algs) { + /* + * Perform validation of the event in order to identify malformed + * events. This function may be asked to parse arbitrary byte sequences + * immediately following a valid event log. The caller expects this + * function to recognize that the byte sequence is not a valid event + * and to return an event size of 0. + */ + if (memcmp(efispecid->signature, TCG_SPECID_SIG, + sizeof(TCG_SPECID_SIG)) || + !efispecid->num_algs || count != efispecid->num_algs) { size = 0; goto out; } diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 6609b39a7232..6db257466af6 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -12,7 +12,7 @@ */ struct trace_seq { - unsigned char buffer[PAGE_SIZE]; + char buffer[PAGE_SIZE]; struct seq_buf seq; int full; }; @@ -51,7 +51,7 @@ static inline int trace_seq_used(struct trace_seq *s) * that is about to be written to and then return the result * of that write. */ -static inline unsigned char * +static inline char * trace_seq_buffer_ptr(struct trace_seq *s) { return s->buffer + seq_buf_used(&s->seq); diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 1fb11daa5c53..57ce5af258a3 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -362,7 +362,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"))) +#define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) #else /* * tracepoint_string() is used to save the string address for userspace diff --git a/include/linux/tty.h b/include/linux/tty.h index bd5fe0e907e8..eb33d948788c 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -66,7 +66,7 @@ struct tty_buffer { int read; int flags; /* Data points here */ - unsigned long data[0]; + unsigned long data[]; }; /* Values for .flags field of tty_buffer */ @@ -306,6 +306,10 @@ struct tty_struct { struct termiox *termiox; /* May be NULL for unsupported */ char name[64]; struct pid *pgrp; /* Protected by ctrl lock */ + /* + * Writes protected by both ctrl lock and legacy mutex, readers must use + * at least one of them. + */ struct pid *session; unsigned long flags; int count; diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 358446247ccd..7186d77f431e 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -236,7 +236,7 @@ * * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel * structure to complete. This method is optional and will only be called - * if provided (otherwise EINVAL will be returned). + * if provided (otherwise ENOTTY will be returned). */ #include <linux/export.h> diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index a27604f99ed0..11096b561dab 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -69,12 +69,13 @@ struct u64_stats_sync { }; +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) +#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) +#else static inline void u64_stats_init(struct u64_stats_sync *syncp) { -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) - seqcount_init(&syncp->seq); -#endif } +#endif static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 38555435a64a..67f016010aad 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -311,6 +311,7 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, * happens, handle that and return -EFAULT. */ extern long probe_kernel_read(void *dst, const void *src, size_t size); +extern long probe_kernel_read_strict(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); /* @@ -350,6 +351,9 @@ extern long notrace probe_user_write(void __user *dst, const void *src, size_t s extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, + long count); +extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, long count); extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); diff --git a/include/linux/uio.h b/include/linux/uio.h index ab5f523bc0df..b74d292b9960 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -261,7 +261,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); + +struct csum_state { + __wsum csum; + size_t off; +}; + +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 8675e145ea8b..2040696d75b6 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *); int usb_interface_id(struct usb_configuration *, struct usb_function *); +int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep, u8 alt); + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep); diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index a15ce99dfc2d..78e006355557 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -151,7 +151,7 @@ struct ehci_regs { #define PORT_OWNER (1<<13) /* true: companion hc owns this port */ #define PORT_POWER (1<<12) /* true: has power (see PPC) */ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ -/* 11:10 for detecting lowspeed devices (reset vs release ownership) */ +#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */ /* 9 reserved */ #define PORT_LPM (1<<9) /* LPM transaction */ #define PORT_RESET (1<<8) /* reset port */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 124462d65eac..67f5adc9b875 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -373,6 +373,7 @@ struct usb_gadget_ops { * @connected: True if gadget is connected. * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag * indicates that it supports LPM as per the LPM ECN & errata. + * @irq: the interrupt number for device controller. * * Gadgets have a mostly-portable "gadget driver" implementing device * functions, handling all usb configurations and interfaces. Gadget @@ -427,6 +428,7 @@ struct usb_gadget { unsigned deactivated:1; unsigned connected:1; unsigned lpm_capable:1; + int irq; }; #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index 145c38e351c2..6655ce32feff 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -442,6 +442,7 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */ #define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */ #define PD_T_NEWSRC 250 /* Maximum of 275ms */ +#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */ #define PD_T_DRP_TRY 100 /* 75 - 150 ms */ #define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */ diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 000a5954b2e8..0a958c794832 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -84,6 +84,10 @@ /* Cannot handle REPORT_LUNS */ \ US_FLAG(ALWAYS_SYNC, 0x20000000) \ /* lies about caching, so always sync */ \ + US_FLAG(NO_SAME, 0x40000000) \ + /* Cannot handle WRITE_SAME */ \ + US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \ + /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index fb9f4f799554..b309a30a5db1 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -10,6 +10,7 @@ #include <linux/rwsem.h> #include <linux/sysctl.h> #include <linux/err.h> +#include <linux/kabi.h> #define UID_GID_MAP_MAX_BASE_EXTENTS 5 #define UID_GID_MAP_MAX_EXTENTS 340 @@ -85,7 +86,13 @@ struct user_namespace { struct ctl_table_header *sysctls; #endif struct ucounts *ucounts; + int ucount_max[UCOUNT_COUNTS]; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } __randomize_layout; struct ucounts { diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 0d1fe9297ac6..98775d7fa696 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -3,6 +3,8 @@ #define _LINUX_VIRTIO_NET_H #include <linux/if_vlan.h> +#include <uapi/linux/tcp.h> +#include <uapi/linux/udp.h> #include <uapi/linux/virtio_net.h> static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, @@ -28,17 +30,26 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, bool little_endian) { unsigned int gso_type = 0; + unsigned int thlen = 0; + unsigned int p_off = 0; + unsigned int ip_proto; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; + ip_proto = IPPROTO_UDP; + thlen = sizeof(struct udphdr); break; default: return -EINVAL; @@ -51,22 +62,36 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, return -EINVAL; } + skb_reset_mac_header(skb); + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start); u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; + + p_off = skb_transport_offset(skb) + thlen; + if (p_off > skb_headlen(skb)) + return -EINVAL; } else { /* gso packets without NEEDS_CSUM do not set transport_offset. * probe and drop if does not match one of the above types. */ if (gso_type && skb->network_header) { - if (!skb->protocol) + struct flow_keys_basic keys; + + if (!skb->protocol) { + __be16 protocol = dev_parse_header_protocol(skb); + virtio_net_hdr_set_proto(skb, hdr); + if (protocol && protocol != skb->protocol) + return -EINVAL; + } retry: - skb_probe_transport_header(skb); - if (!skb_transport_header_was_set(skb)) { + if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, + NULL, 0, 0, 0, + 0)) { /* UFO does not specify ipv4 or 6: try both */ if (gso_type & SKB_GSO_UDP && skb->protocol == htons(ETH_P_IP)) { @@ -75,18 +100,33 @@ retry: } return -EINVAL; } + + p_off = keys.control.thoff + thlen; + if (p_off > skb_headlen(skb) || + keys.basic.ip_proto != ip_proto) + return -EINVAL; + + skb_set_transport_header(skb, keys.control.thoff); + } else if (gso_type) { + p_off = thlen; + if (p_off > skb_headlen(skb)) + return -EINVAL; } } if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + struct skb_shared_info *shinfo = skb_shinfo(skb); - skb_shinfo(skb)->gso_size = gso_size; - skb_shinfo(skb)->gso_type = gso_type; + /* Too small packets are not really GSO ones. */ + if (skb->len - p_off > gso_size) { + shinfo->gso_size = gso_size; + shinfo->gso_type = gso_type; - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + /* Header must be checked, and gso_segs computed. */ + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; + } } return 0; diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 07875ccc7bb5..b139f76060a6 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -150,7 +150,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk, void virtio_transport_destruct(struct vsock_sock *vsk); -void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt); +void virtio_transport_recv_pkt(struct virtio_transport *t, + struct virtio_vsock_pkt *pkt); void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index decac0790fc1..4c7477677f21 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -93,6 +93,7 @@ extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); +extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags); extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); @@ -122,7 +123,7 @@ extern void vunmap(const void *addr); extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, - unsigned long size); + unsigned long pgoff, unsigned long size); extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); diff --git a/include/linux/wait.h b/include/linux/wait.h index 3eb7cae8206c..065f9b8a9813 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -1121,6 +1121,7 @@ do { \ * Waitqueues which are removed from the waitqueue_head at wakeup time */ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); +bool prepare_to_wait_exclusive_with_ret(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4261d1c6e87b..ddc6f4b15eee 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -14,6 +14,7 @@ #include <linux/atomic.h> #include <linux/cpumask.h> #include <linux/rcupdate.h> +#include <linux/kabi.h> struct workqueue_struct; @@ -106,6 +107,8 @@ struct work_struct { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); }; #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) @@ -119,6 +122,8 @@ struct delayed_work { /* target workqueue and CPU ->timer uses to queue ->work */ struct workqueue_struct *wq; int cpu; + + KABI_RESERVE(1); }; struct rcu_work { diff --git a/include/linux/writeback.h b/include/linux/writeback.h index a19d845dd7eb..bcf699d48bb2 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -12,6 +12,7 @@ #include <linux/backing-dev-defs.h> #include <linux/blk_types.h> #include <linux/blk-cgroup.h> +#include <linux/kabi.h> struct bio; @@ -92,6 +93,10 @@ struct writeback_control { size_t wb_lcand_bytes; /* bytes written by last candidate */ size_t wb_tcand_bytes; /* bytes written by this candidate */ #endif + + KABI_RESERVE(1); + KABI_RESERVE(2); + }; static inline int wbc_to_write_flags(struct writeback_control *wbc) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 86eecbd98e84..3b257c97837d 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1613,6 +1613,7 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) { struct xa_node *node = xas->xa_node; + void *entry; unsigned int offset; if (unlikely(xas_not_node(node) || node->shift)) @@ -1624,7 +1625,10 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, return NULL; if (offset == XA_CHUNK_SIZE) return xas_find_marked(xas, max, mark); - return xa_entry(xas->xa, node, offset); + entry = xa_entry(xas->xa, node, offset); + if (!entry) + return xas_find_marked(xas, max, mark); + return entry; } /* diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 6dad031be3c2..3a71ad716da5 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 2219cce81ca4..2a430e713ce5 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -20,7 +20,6 @@ * zsmalloc mapping modes * * NOTE: These only make a difference when a mapped object spans pages. - * They also have no effect when PGTABLE_MAPPING is selected. */ enum zs_mapmode { ZS_MM_RW, /* normal read-write mapping */ @@ -36,7 +35,7 @@ enum zs_mapmode { struct zs_pool_stats { /* How many pages were migrated (freed) */ - unsigned long pages_compacted; + atomic_long_t pages_compacted; }; struct zs_pool; diff --git a/include/media/rc-map.h b/include/media/rc-map.h index afd2ab31bdf2..a358c87a65de 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -126,6 +126,13 @@ struct rc_map_list { struct rc_map map; }; +#ifdef CONFIG_MEDIA_CEC_RC +/* + * rc_map_list from rc-cec.c + */ +extern struct rc_map_list cec_map; +#endif + /* Routines from rc-map.c */ /** @@ -271,6 +278,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" #define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" #define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" +#define RC_MAP_KII_PRO "rc-videostrong-kii-pro" #define RC_MAP_WETEK_HUB "rc-wetek-hub" #define RC_MAP_WETEK_PLAY2 "rc-wetek-play2" #define RC_MAP_WINFAST "rc-winfast" diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 45f88f0248c4..c072ed141811 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -78,6 +78,7 @@ * @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2) * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY + * @V4L2_MBUS_INVALID: invalid bus type (keep as last) */ enum v4l2_mbus_type { V4L2_MBUS_UNKNOWN, @@ -87,6 +88,7 @@ enum v4l2_mbus_type { V4L2_MBUS_CCP2, V4L2_MBUS_CSI2_DPHY, V4L2_MBUS_CSI2_CPHY, + V4L2_MBUS_INVALID, }; /** diff --git a/include/net/act_api.h b/include/net/act_api.h index b18c699681ca..4dabe4730f00 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -69,7 +69,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm) { dtm->install = jiffies_to_clock_t(jiffies - stm->install); dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); - dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse); + dtm->firstuse = stm->firstuse ? + jiffies_to_clock_t(jiffies - stm->firstuse) : 0; dtm->expires = jiffies_to_clock_t(stm->expires); } @@ -155,8 +156,7 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, int bind, bool cpustats); -void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); - +void tcf_idr_insert_many(struct tc_action *actions[]); void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, struct tc_action **a, int bind); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 3f62b347b04a..8d90fb9184e8 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -229,7 +229,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev); void ipv6_mc_remap(struct inet6_dev *idev); void ipv6_mc_init_dev(struct inet6_dev *idev); void ipv6_mc_destroy_dev(struct inet6_dev *idev); -int ipv6_mc_check_icmpv6(struct sk_buff *skb); int ipv6_mc_check_mld(struct sk_buff *skb); void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp); @@ -273,6 +272,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_ac_close(struct sock *sk); void ipv6_sock_ac_close(struct sock *sk); int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 299240df79e4..55b980b21f4b 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -16,6 +16,12 @@ struct sock; struct socket; struct rxrpc_call; +enum rxrpc_interruptibility { + RXRPC_INTERRUPTIBLE, /* Call is interruptible */ + RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */ + RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */ +}; + /* * Debug ID counter for tracing. */ @@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, gfp_t, rxrpc_notify_rx_t, bool, - bool, + enum rxrpc_interruptibility, unsigned int); int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, @@ -53,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *); -u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index b689aceb636b..325e8efb5b36 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -540,6 +540,7 @@ struct hci_chan { struct sk_buff_head data_q; unsigned int sent; __u8 state; + bool amp; }; struct hci_conn_params { @@ -1308,16 +1309,34 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) conn->security_cfm_cb(conn, status); } -static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) +static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; + __u8 encrypt; - if (conn->sec_level == BT_SECURITY_SDP) - conn->sec_level = BT_SECURITY_LOW; + if (conn->state == BT_CONFIG) { + if (!status) + conn->state = BT_CONNECTED; - if (conn->pending_sec_level > conn->sec_level) - conn->sec_level = conn->pending_sec_level; + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + return; + } + + if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + encrypt = 0x00; + else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) + encrypt = 0x02; + else + encrypt = 0x01; + + if (!status) { + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; + + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; + } mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 093aedebdf0c..8efc2419a815 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -623,6 +623,8 @@ struct l2cap_ops { struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb); + int (*filter) (struct l2cap_chan * chan, + struct sk_buff *skb); }; struct l2cap_conn { diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index 9b73d4457d8b..db6bf26bc2e0 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -10,6 +10,7 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> +#include <linux/kabi.h> /* General definitions */ #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) @@ -243,6 +244,11 @@ typedef struct port { struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */ u32 transaction_id; /* continuous number for identification of Marker PDU's; */ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */ + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } port_t; /* system structure */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 2f3305315094..ce29d114c05e 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -25,6 +25,7 @@ #include <linux/etherdevice.h> #include <linux/reciprocal_div.h> #include <linux/if_link.h> +#include <linux/kabi.h> #include <net/bond_3ad.h> #include <net/bond_alb.h> @@ -147,6 +148,9 @@ struct bond_params { /* 2 bytes of padding : see ether_addr_equal_64bits() */ u8 ad_actor_system[ETH_ALEN + 2]; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; struct bond_parm_tbl { @@ -185,6 +189,11 @@ struct slave { struct rtnl_link_stats64 slave_stats; }; +static inline struct slave *to_slave(struct kobject *kobj) +{ + return container_of(kobj, struct slave, kobj); +} + struct bond_up_slave { unsigned int count; struct rcu_head rcu; @@ -748,6 +757,9 @@ extern struct bond_parm_tbl ad_select_tbl[]; /* exported from bond_netlink.c */ extern struct rtnl_link_ops bond_link_ops; +/* exported from bond_sysfs_slave.c */ +extern const struct sysfs_ops slave_sysfs_ops; + static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { atomic_long_inc(&dev->tx_dropped); diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h index 8e4f831d2e52..119f4c9c3a9c 100644 --- a/include/net/bpf_sk_storage.h +++ b/include/net/bpf_sk_storage.h @@ -3,21 +3,62 @@ #ifndef _BPF_SK_STORAGE_H #define _BPF_SK_STORAGE_H +#include <linux/rculist.h> +#include <linux/list.h> +#include <linux/hash.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/bpf.h> +#include <net/sock.h> +#include <uapi/linux/sock_diag.h> +#include <uapi/linux/btf.h> +#include <linux/bpf_local_storage.h> + struct sock; void bpf_sk_storage_free(struct sock *sk); extern const struct bpf_func_proto bpf_sk_storage_get_proto; extern const struct bpf_func_proto bpf_sk_storage_delete_proto; +extern const struct bpf_func_proto sk_storage_get_btf_proto; +extern const struct bpf_func_proto sk_storage_delete_btf_proto; + +struct bpf_local_storage_elem; +struct bpf_sk_storage_diag; +struct sk_buff; +struct nlattr; +struct sock; #ifdef CONFIG_BPF_SYSCALL int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk); +struct bpf_sk_storage_diag * +bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs); +void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag); +int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag, + struct sock *sk, struct sk_buff *skb, + int stg_array_type, + unsigned int *res_diag_size); #else static inline int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) { return 0; } +static inline struct bpf_sk_storage_diag * +bpf_sk_storage_diag_alloc(const struct nlattr *nla) +{ + return NULL; +} +static inline void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag) +{ +} +static inline int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag, + struct sock *sk, struct sk_buff *skb, + int stg_array_type, + unsigned int *res_diag_size) +{ + return 0; +} #endif #endif /* _BPF_SK_STORAGE_H */ diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index e4ad58c4062c..2e3ae152bf7c 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -9,6 +9,7 @@ #define __NET_DCBNL_H__ #include <linux/dcbnl.h> +#include <linux/kabi.h> struct dcb_app_type { int ifindex; @@ -107,6 +108,11 @@ struct dcbnl_rtnl_ops { /* buffer settings */ int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *); int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; #endif /* __NET_DCBNL_H__ */ diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h index 2ab668461463..f68bc373544a 100644 --- a/include/net/drop_monitor.h +++ b/include/net/drop_monitor.h @@ -19,7 +19,7 @@ struct net_dm_hw_metadata { struct net_device *input_dev; }; -#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) +#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR) void net_dm_hw_report(struct sk_buff *skb, const struct net_dm_hw_metadata *hw_metadata); #else diff --git a/include/net/dst.h b/include/net/dst.h index 3448cf865ede..4d81582072a7 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -18,6 +18,7 @@ #include <linux/refcount.h> #include <net/neighbour.h> #include <asm/processor.h> +#include <linux/kabi.h> struct sk_buff; @@ -77,6 +78,8 @@ struct dst_entry { #ifndef CONFIG_64BIT atomic_t __refcnt; /* 32-bit offset 64 */ #endif + KABI_RESERVE(1); + KABI_RESERVE(2); }; struct dst_metrics { @@ -401,7 +404,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, struct sk_buff *skb) { - struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); + struct neighbour *n = NULL; + + /* The packets from tunnel devices (eg bareudp) may have only + * metadata in the dst pointer of skb. Hence a pointer check of + * neigh_lookup is needed. + */ + if (dst->ops->neigh_lookup) + n = dst->ops->neigh_lookup(dst, skb, NULL); + return IS_ERR(n) ? NULL : n; } diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 443863c7b8da..130559966db4 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/percpu_counter.h> #include <linux/cache.h> +#include <linux/kabi.h> struct dst_entry; struct kmem_cachep; @@ -40,6 +41,11 @@ struct dst_ops { struct kmem_cache *kmem_cachep; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp; }; diff --git a/include/net/esp.h b/include/net/esp.h index 117652eb6ea3..465e38890ee9 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -4,6 +4,8 @@ #include <linux/skbuff.h> +#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER) + struct ip_esp_hdr; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 7fed3193f81d..8b0c0227943e 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -10,6 +10,7 @@ #include <net/flow.h> #include <net/rtnetlink.h> #include <net/fib_notifier.h> +#include <linux/kabi.h> struct fib_kuid_range { kuid_t start; @@ -43,6 +44,11 @@ struct fib_rule { struct fib_rule_port_range sport_range; struct fib_rule_port_range dport_range; struct rcu_head rcu; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; struct fib_lookup_arg { diff --git a/include/net/flow.h b/include/net/flow.h index a50fb77a0b27..456d05c2547b 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -13,6 +13,7 @@ #include <linux/atomic.h> #include <net/flow_dissector.h> #include <linux/uidgid.h> +#include <linux/kabi.h> /* * ifindex generation is per-net namespace, and loopback is @@ -41,6 +42,9 @@ struct flowi_common { kuid_t flowic_uid; struct flowi_tunnel flowic_tun_key; __u32 flowic_multipath_hash; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; union flowi_uli { @@ -93,6 +97,8 @@ struct flowi4 { #define fl4_ipsec_spi uli.spi #define fl4_mh_type uli.mht.type #define fl4_gre_key uli.gre_key + + KABI_RESERVE(1); } __attribute__((__aligned__(BITS_PER_LONG/8))); static inline void flowi4_init_output(struct flowi4 *fl4, int oif, @@ -116,6 +122,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, fl4->saddr = saddr; fl4->fl4_dport = dport; fl4->fl4_sport = sport; + fl4->flowi4_multipath_hash = 0; } /* Reset some input parameters after previous lookup */ @@ -153,6 +160,8 @@ struct flowi6 { #define fl6_mh_type uli.mht.type #define fl6_gre_key uli.gre_key __u32 mp_hash; + + KABI_RESERVE(1); } __attribute__((__aligned__(BITS_PER_LONG/8))); struct flowidn { diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 78f6437cbc3a..b6933326eaab 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -8,6 +8,9 @@ #include <linux/string.h> #include <uapi/linux/if_ether.h> +struct bpf_prog; +struct net; + /** * struct flow_dissector_key_control: * @thoff: Transport header offset @@ -347,4 +350,9 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control, memset(key_basic, 0, sizeof(*key_basic)); } +#ifdef CONFIG_BPF_SYSCALL +int flow_dissector_bpf_prog_attach_check(struct net *net, + struct bpf_prog *prog); +#endif /* CONFIG_BPF_SYSCALL */ + #endif diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 9292f1c588b7..0e2ac2a1005f 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -5,6 +5,7 @@ #include <linux/genetlink.h> #include <net/netlink.h> #include <net/net_namespace.h> +#include <linux/kabi.h> #define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN) @@ -35,12 +36,6 @@ struct genl_info; * do additional, common, filtering and return an error * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks - * @mcast_bind: a socket bound to the given multicast group (which - * is given as the offset into the groups array) - * @mcast_unbind: a socket was unbound from the given multicast group. - * Note that unbind() will not be called symmetrically if the - * generic netlink family is removed while there are still open - * sockets. * @attrbuf: buffer to store parsed attributes (private) * @mcgrps: multicast groups used by this family * @n_mcgrps: number of multicast groups @@ -64,8 +59,6 @@ struct genl_family { void (*post_doit)(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); - int (*mcast_bind)(struct net *net, int group); - void (*mcast_unbind)(struct net *net, int group); struct nlattr ** attrbuf; /* private */ const struct genl_ops * ops; const struct genl_multicast_group *mcgrps; @@ -73,6 +66,9 @@ struct genl_family { unsigned int n_mcgrps; unsigned int mcgrp_offset; /* private */ struct module *module; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; struct nlattr **genl_family_attrbuf(const struct genl_family *family); @@ -148,6 +144,10 @@ struct genl_ops { u8 internal_flags; u8 flags; u8 validate; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); }; int genl_register_family(struct genl_family *family); diff --git a/include/net/icmp.h b/include/net/icmp.h index 5d4bfdba9adf..fd84adc47963 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -43,6 +43,16 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt); } +#if IS_ENABLED(CONFIG_NF_NAT) +void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); +#else +static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct ip_options opts = { 0 }; + __icmp_send(skb_in, type, code, info, &opts); +} +#endif + int icmp_rcv(struct sk_buff *skb); int icmp_err(struct sk_buff *skb, u32 info); int icmp_init(void); diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index fe96bf247aac..81b965953036 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -85,9 +85,8 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, int iif, int sdif, bool *refcounted) { - struct sock *sk = skb_steal_sock(skb); + struct sock *sk = skb_steal_sock(skb, refcounted); - *refcounted = true; if (sk) return sk; diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 485d0b59a5e8..8b68b08657bf 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -35,8 +35,14 @@ int inet_shutdown(struct socket *sock, int how); int inet_listen(struct socket *sock, int backlog); void inet_sock_destruct(struct sock *sk); int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +/* Don't allocate port at this moment, defer to connect. */ +#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0) +/* Grab and release socket lock. */ +#define BIND_WITH_LOCK (1 << 1) +/* Called from BPF program. */ +#define BIND_FROM_BPF (1 << 2) int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, - bool force_bind_address_no_port, bool with_lock); + u32 flags); int inet_getname(struct socket *sock, struct sockaddr *uaddr, int peer); int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 895546058a20..e588bb273c46 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -83,6 +83,8 @@ struct inet_connection_sock_af_ops { * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options) * @icsk_ack: Delayed ACK control data * @icsk_mtup; MTU probing control data + * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack) + * @icsk_user_timeout: TCP_USER_TIMEOUT value */ struct inet_connection_sock { /* inet_sock has to be the first member! */ @@ -133,6 +135,7 @@ struct inet_connection_sock { u32 probe_timestamp; } icsk_mtup; + u32 icsk_probes_tstamp; u32 icsk_user_timeout; u64 icsk_ca_priv[104 / sizeof(u64)]; @@ -284,7 +287,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; } -void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); +bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); void inet_csk_destroy_sock(struct sock *sk); @@ -309,13 +312,17 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); +/* update the fast reuse flag when adding a socket */ +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk); + struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); -#define TCP_PINGPONG_THRESH 3 +extern int sysctl_tcp_pingpong_thresh; static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { - inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH; + inet_csk(sk)->icsk_ack.pingpong = sysctl_tcp_pingpong_thresh; } static inline void inet_csk_exit_pingpong_mode(struct sock *sk) @@ -325,7 +332,7 @@ static inline void inet_csk_exit_pingpong_mode(struct sock *sk) static inline bool inet_csk_in_pingpong_mode(struct sock *sk) { - return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; + return inet_csk(sk)->icsk_ack.pingpong >= sysctl_tcp_pingpong_thresh; } static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) @@ -335,4 +342,10 @@ static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) if (icsk->icsk_ack.pingpong < U8_MAX) icsk->icsk_ack.pingpong++; } + +static inline bool inet_csk_has_ulp(struct sock *sk) +{ + return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; +} + #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index c8e2bebd8d93..563457fec557 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -4,6 +4,7 @@ #include <linux/ip.h> #include <linux/skbuff.h> +#include <linux/if_vlan.h> #include <net/inet_sock.h> #include <net/dsfield.h> @@ -99,6 +100,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph) return 1; } +static inline int IP_ECN_set_ect1(struct iphdr *iph) +{ + u32 check = (__force u32)iph->check; + + if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0) + return 0; + + check += (__force u16)htons(0x1); + + iph->check = (__force __sum16)(check + (check>=0xFFFF)); + iph->tos ^= INET_ECN_MASK; + return 1; +} + static inline void IP_ECN_clear(struct iphdr *iph) { iph->tos &= ~INET_ECN_MASK; @@ -134,6 +149,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) return 1; } +static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph) +{ + __be32 from, to; + + if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0) + return 0; + + from = *(__be32 *)iph; + to = from ^ htonl(INET_ECN_MASK << 20); + *(__be32 *)iph = to; + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), + (__force __wsum)to); + return 1; +} + static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) { dscp &= ~INET_ECN_MASK; @@ -142,7 +173,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) static inline int INET_ECN_set_ce(struct sk_buff *skb) { - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): if (skb_network_header(skb) + sizeof(struct iphdr) <= skb_tail_pointer(skb)) @@ -159,6 +190,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) return 0; } +static inline int INET_ECN_set_ect1(struct sk_buff *skb) +{ + switch (skb_protocol(skb, true)) { + case cpu_to_be16(ETH_P_IP): + if (skb_network_header(skb) + sizeof(struct iphdr) <= + skb_tail_pointer(skb)) + return IP_ECN_set_ect1(ip_hdr(skb)); + break; + + case cpu_to_be16(ETH_P_IPV6): + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= + skb_tail_pointer(skb)) + return IP6_ECN_set_ect1(skb, ipv6_hdr(skb)); + break; + } + + return 0; +} + /* * RFC 6040 4.2 * To decapsulate the inner header at the tunnel egress, a compliant @@ -208,8 +258,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb, int rc; rc = __INET_ECN_decapsulate(outer, inner, &set_ce); - if (!rc && set_ce) - INET_ECN_set_ce(skb); + if (!rc) { + if (set_ce) + INET_ECN_set_ce(skb); + else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1) + INET_ECN_set_ect1(skb); + } return rc; } @@ -219,12 +273,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph, { __u8 inner; - if (skb->protocol == htons(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): inner = ip_hdr(skb)->tos; - else if (skb->protocol == htons(ETH_P_IPV6)) + break; + case htons(ETH_P_IPV6): inner = ipv6_get_dsfield(ipv6_hdr(skb)); - else + break; + default: return 0; + } return INET_ECN_decapsulate(skb, oiph->tos, inner); } @@ -234,12 +292,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, { __u8 inner; - if (skb->protocol == htons(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): inner = ip_hdr(skb)->tos; - else if (skb->protocol == htons(ETH_P_IPV6)) + break; + case htons(ETH_P_IPV6): inner = ipv6_get_dsfield(ipv6_hdr(skb)); - else + break; + default: return 0; + } return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); } diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 3c95278d96ff..b4840f4d35d3 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -185,6 +185,12 @@ static inline spinlock_t *inet_ehash_lockp( int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); +static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h) +{ + kfree(h->lhash2); + h->lhash2 = NULL; +} + static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) { kvfree(hashinfo->ehash_locks); @@ -379,10 +385,9 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, const int sdif, bool *refcounted) { - struct sock *sk = skb_steal_sock(skb); + struct sock *sk = skb_steal_sock(skb, refcounted); const struct iphdr *iph = ip_hdr(skb); - *refcounted = true; if (sk) return sk; diff --git a/include/net/ip.h b/include/net/ip.h index d09a0a805c48..63e2e9531cd6 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -444,12 +444,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { struct net *net = dev_net(dst->dev); + unsigned int mtu; if (net->ipv4.sysctl_ip_fwd_use_pmtu || ip_mtu_locked(dst) || !forwarding) return dst_mtu(dst); + /* 'forwarding = true' case should always honour route mtu */ + mtu = dst_metric_raw(dst, RTAX_MTU); + if (mtu) + return mtu; + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); } diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 4b5656c71abc..aea076167ef5 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -177,6 +177,7 @@ struct fib6_info { struct rt6_info { struct dst_entry dst; struct fib6_info __rcu *from; + int sernum; struct rt6key rt6i_dst; struct rt6key rt6i_src; @@ -260,6 +261,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt) struct fib6_info *from; u32 cookie = 0; + if (rt->sernum) + return rt->sernum; + rcu_read_lock(); from = rcu_dereference(rt->from); @@ -500,6 +504,13 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric) return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric)); } +#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) +struct bpf_iter__ipv6_route { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct fib6_info *, rt); +}; +#endif + #ifdef CONFIG_IPV6_MULTIPLE_TABLES int fib6_rules_init(void); void fib6_rules_cleanup(void); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index b69c16cbbf71..2d0d91070268 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -254,6 +254,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, return rt->rt6i_flags & RTF_ANYCAST || (rt->rt6i_dst.plen < 127 && + !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) && ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ab1ca9e238d2..ffbae7683450 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -244,7 +244,6 @@ struct fib_dump_filter { u32 table_id; /* filter_set is an optimization that an entry is set */ bool filter_set; - bool dump_all_families; bool dump_routes; bool dump_exceptions; unsigned char protocol; @@ -423,6 +422,16 @@ static inline int fib_num_tclassid_users(struct net *net) #endif int fib_unmerge(struct net *net); +static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, +const struct net_device *dev) +{ + if (nhc->nhc_dev == dev || + l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) + return true; + + return false; +} + /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index af645604f328..56deb2501e96 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -472,9 +472,11 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, const void *from, int len, __be16 flags) { - memcpy(ip_tunnel_info_opts(info), from, len); info->options_len = len; - info->key.tun_flags |= flags; + if (len > 0) { + memcpy(ip_tunnel_info_opts(info), from, len); + info->key.tun_flags |= flags; + } } static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) @@ -520,7 +522,6 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, __be16 flags) { info->options_len = 0; - info->key.tun_flags |= flags; } #endif /* CONFIG_INET */ diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index 3e7d2c0e79ca..f1704385dbc1 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -63,7 +63,7 @@ extern const struct ipv6_stub *ipv6_stub __read_mostly; /* A stub used by bpf helpers. Similarly ugly as ipv6_stub */ struct ipv6_bpf_stub { int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len, - bool force_bind_address_no_port, bool with_lock); + u32 flags); struct sock *(*udp6_lib_lookup)(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index e942372b077b..2e0124708740 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -9,6 +9,7 @@ #include <net/dst.h> #include <net/fib_rules.h> +#include <linux/kabi.h> /** * struct l3mdev_ops - l3mdev operations @@ -33,6 +34,11 @@ struct l3mdev_ops { /* IPv6 ops */ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev, struct flowi6 *fl6); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; #ifdef CONFIG_NET_L3_MASTER_DEV diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 5d6c5b1fc695..081085d94114 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -7,6 +7,7 @@ #include <linux/skbuff.h> #include <linux/types.h> #include <net/route.h> +#include <linux/kabi.h> #define LWTUNNEL_HASH_BITS 7 #define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS) @@ -30,6 +31,10 @@ struct lwtunnel_state { int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*orig_input)(struct sk_buff *); struct rcu_head rcu; + + KABI_RESERVE(1); + KABI_RESERVE(2); + __u8 data[0]; }; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 523c6a09e1c8..d9ba9a77bcf2 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5933,7 +5933,9 @@ enum rate_control_capabilities { struct rate_control_ops { unsigned long capa; const char *name; - void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); + void *(*alloc)(struct ieee80211_hw *hw); + void (*add_debugfs)(struct ieee80211_hw *hw, void *priv, + struct dentry *debugfsdir); void (*free)(void *priv); void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 8ec77bfdc1a4..d69426a8e989 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -24,6 +24,7 @@ #include <linux/rcupdate.h> #include <linux/seq_file.h> #include <linux/bitmap.h> +#include <linux/kabi.h> #include <linux/err.h> #include <linux/sysctl.h> @@ -157,6 +158,10 @@ struct neighbour { struct list_head gc_list; struct rcu_head rcu; struct net_device *dev; + + KABI_RESERVE(1); + KABI_RESERVE(2); + u8 primary_key[0]; } __randomize_layout; @@ -204,6 +209,7 @@ struct neigh_table { int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *skb); + int (*is_multicast)(const void *pkey); bool (*allow_add)(const struct net_device *dev, struct netlink_ext_ack *extack); char *id; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index c7e15a213ef2..aa32674df5e7 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -33,6 +33,7 @@ #include <net/netns/mpls.h> #include <net/netns/can.h> #include <net/netns/xdp.h> +#include <net/netns/bpf.h> #include <linux/ns_common.h> #include <linux/idr.h> #include <linux/skbuff.h> @@ -159,12 +160,16 @@ struct net { #endif struct net_generic __rcu *gen; - struct bpf_prog __rcu *flow_dissector_prog; + /* Used to store attached BPF programs */ + struct netns_bpf bpf; /* Note : following structs are cache line aligned */ #ifdef CONFIG_XFRM struct netns_xfrm xfrm; #endif + + u64 net_cookie; /* written once */ + #if IS_ENABLED(CONFIG_IP_VS) struct netns_ipvs *ipvs; #endif @@ -428,6 +433,13 @@ static inline int rt_genid_ipv4(struct net *net) return atomic_read(&net->ipv4.rt_genid); } +#if IS_ENABLED(CONFIG_IPV6) +static inline int rt_genid_ipv6(const struct net *net) +{ + return atomic_read(&net->ipv6.fib6_sernum); +} +#endif + static inline void rt_genid_bump_ipv4(struct net *net) { atomic_inc(&net->ipv4.rt_genid); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 9f551f3b69c6..90690e37a56f 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -87,7 +87,7 @@ struct nf_conn { struct hlist_node nat_bysource; #endif /* all members below initialized via memset */ - u8 __nfct_init_offset[0]; + struct { } __nfct_init_offset; /* If we were expected by an expectation, this will be it */ struct nf_conn *master; diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 0d3920896d50..716db4a0fed8 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, unsigned int logflags); void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, struct sock *sk); +void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb); void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 2d0275f13bbf..886866bee8b2 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -143,6 +143,8 @@ static inline u64 nft_reg_load64(u32 *sreg) static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { + if (len % NFT_REG32_SIZE) + dst[len / NFT_REG32_SIZE] = 0; memcpy(dst, src, len); } @@ -803,7 +805,7 @@ struct nft_expr_ops { int (*offload)(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr); - u32 offload_flags; + bool (*offload_action)(const struct nft_expr *expr); const struct nft_expr_type *type; void *data; }; @@ -870,6 +872,12 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) return (struct nft_expr *)&rule->data[rule->dlen]; } +static inline bool nft_expr_more(const struct nft_rule *rule, + const struct nft_expr *expr) +{ + return expr != nft_expr_last(rule) && expr->ops; +} + static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) { return (void *)&rule->data[rule->dlen]; @@ -1454,4 +1462,10 @@ void nft_chain_filter_fini(void); void __init nft_chain_route_init(void); void nft_chain_route_fini(void); + +void nf_tables_trans_destroy_flush_work(void); + +int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result); +__be64 nf_jiffies64_to_msecs(u64 input); + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 03cf5856d76f..a9989ca6e5af 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -37,6 +37,7 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx, struct nft_flow_key { struct flow_dissector_key_basic basic; + struct flow_dissector_key_control control; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; @@ -59,7 +60,8 @@ struct nft_flow_rule { struct flow_rule *rule; }; -#define NFT_OFFLOAD_F_ACTION (1 << 0) +void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, + enum flow_dissector_key_id addr_type); struct nft_rule; struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule); diff --git a/include/net/netns/bpf.h b/include/net/netns/bpf.h new file mode 100644 index 000000000000..0ca6a1b87185 --- /dev/null +++ b/include/net/netns/bpf.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BPF programs attached to network namespace + */ + +#ifndef __NETNS_BPF_H__ +#define __NETNS_BPF_H__ + +#include <linux/bpf-netns.h> + +struct bpf_prog; +struct bpf_prog_array; + +struct netns_bpf { + /* Array of programs to run compiled from progs or links */ + struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE]; + struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE]; + struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE]; +}; + +#endif /* __NETNS_BPF_H__ */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index b353d9ec4661..4b2870691806 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -213,6 +213,7 @@ struct netns_ipv4 { int sysctl_fib_multipath_hash_policy; #endif + int sysctl_tw_timeout; int sysctl_tcp_max_orphans; struct fib_notifier_ops *notifier_ops; diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 59f45b1e9dac..b59d73d529ba 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -72,7 +72,9 @@ struct netns_xfrm { #if IS_ENABLED(CONFIG_IPV6) struct dst_ops xfrm6_dst_ops; #endif - spinlock_t xfrm_state_lock; + spinlock_t xfrm_state_lock; + seqcount_t xfrm_state_hash_generation; + spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; }; diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 331ebbc94fe7..18a5aca26476 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -70,6 +70,7 @@ struct nh_grp_entry { }; struct nh_group { + struct nh_group *spare; /* spare group for removals */ u16 num_nh; bool mpath; bool has_v4; @@ -136,21 +137,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh) { unsigned int rc = 1; - if (nexthop_is_multipath(nh)) { + if (nh->is_group) { struct nh_group *nh_grp; nh_grp = rcu_dereference_rtnl(nh->nh_grp); - rc = nh_grp->num_nh; + if (nh_grp->mpath) + rc = nh_grp->num_nh; } return rc; } static inline -struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) +struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel) { - const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); - /* for_nexthops macros in fib_semantics.c grabs a pointer to * the nexthop before checking nhsel */ @@ -185,12 +185,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh) { const struct nh_info *nhi; - if (nexthop_is_multipath(nh)) { - if (nexthop_num_path(nh) > 1) - return false; - nh = nexthop_mpath_select(nh, 0); - if (!nh) + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + if (nh_grp->num_nh > 1) return false; + + nh = nh_grp->nh_entries[0].nh; } nhi = rcu_dereference_rtnl(nh->nh_info); @@ -216,16 +218,46 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); - if (nexthop_is_multipath(nh)) { - nh = nexthop_mpath_select(nh, nhsel); - if (!nh) - return NULL; + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + if (nh_grp->mpath) { + nh = nexthop_mpath_select(nh_grp, nhsel); + if (!nh) + return NULL; + } } nhi = rcu_dereference_rtnl(nh->nh_info); return &nhi->fib_nhc; } +static inline bool nexthop_uses_dev(const struct nexthop *nh, + const struct net_device *dev) +{ + struct nh_info *nhi; + + if (nh->is_group) { + struct nh_group *nhg = rcu_dereference(nh->nh_grp); + int i; + + for (i = 0; i < nhg->num_nh; i++) { + struct nexthop *nhe = nhg->nh_entries[i].nh; + + nhi = rcu_dereference(nhe->nh_info); + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) + return true; + } + } else { + nhi = rcu_dereference(nh->nh_info); + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) + return true; + } + + return false; +} + static inline unsigned int fib_info_num_path(const struct fib_info *fi) { if (unlikely(fi->nh)) @@ -259,12 +291,16 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel) int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, struct netlink_ext_ack *extack); +/* Caller should either hold rcu_read_lock(), or RTNL. */ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) { struct nh_info *nhi; - if (nexthop_is_multipath(nh)) { - nh = nexthop_mpath_select(nh, 0); + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + nh = nexthop_mpath_select(nh_grp, 0); if (!nh) return NULL; } @@ -276,6 +312,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) return NULL; } +/* Variant of nexthop_fib6_nh(). + * Caller should either hold rcu_read_lock_bh(), or RTNL. + */ +static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh) +{ + struct nh_info *nhi; + + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp); + nh = nexthop_mpath_select(nh_grp, 0); + if (!nh) + return NULL; + } + + nhi = rcu_dereference_bh_rtnl(nh->nh_info); + if (nhi->family == AF_INET6) + return &nhi->fib6_nh; + + return NULL; +} + static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i) { struct fib6_nh *fib6_nh; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 6a70845bd9ab..cee1c084e9f4 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -128,17 +128,6 @@ static inline void qdisc_run(struct Qdisc *q) } } -static inline __be16 tc_skb_protocol(const struct sk_buff *skb) -{ - /* We need to take extra care in case the skb came via - * vlan accelerated path. In that case, use skb->vlan_proto - * as the original vlan header was already stripped. - */ - if (skb_vlan_tag_present(skb)) - return skb->vlan_proto; - return skb->protocol; -} - /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ diff --git a/include/net/raw.h b/include/net/raw.h index 8ad8df594853..9f11c2950125 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -33,10 +33,19 @@ int raw_rcv(struct sock *, struct sk_buff *); #define RAW_HTABLE_SIZE MAX_INET_PROTOS struct raw_hashinfo { - rwlock_t lock; - struct hlist_head ht[RAW_HTABLE_SIZE]; + spinlock_t lock; + struct hlist_nulls_head ht[RAW_HTABLE_SIZE]; }; +static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo) +{ + int i; + + spin_lock_init(&hashinfo->lock); + for (i = 0; i < RAW_HTABLE_SIZE; i++) + INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i); +} + #ifdef CONFIG_PROC_FS int raw_proc_init(void); void raw_proc_exit(void); diff --git a/include/net/rawv6.h b/include/net/rawv6.h index 53d86b6055e8..2efa4a88616f 100644 --- a/include/net/rawv6.h +++ b/include/net/rawv6.h @@ -3,6 +3,7 @@ #define _NET_RAWV6_H #include <net/protocol.h> +#include <net/raw.h> extern struct raw_hashinfo raw_v6_hashinfo; struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, diff --git a/include/net/red.h b/include/net/red.h index 9665582c4687..ff07a7cedf68 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -168,14 +168,24 @@ static inline void red_set_vars(struct red_vars *v) v->qcount = -1; } -static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog) +static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, + u8 Scell_log, u8 *stab) { - if (fls(qth_min) + Wlog > 32) + if (fls(qth_min) + Wlog >= 32) return false; - if (fls(qth_max) + Wlog > 32) + if (fls(qth_max) + Wlog >= 32) + return false; + if (Scell_log >= 32) return false; if (qth_max < qth_min) return false; + if (stab) { + int i; + + for (i = 0; i < RED_STAB_SIZE; i++) + if (stab[i] >= 32) + return false; + } return true; } diff --git a/include/net/request_sock.h b/include/net/request_sock.h index cf8b33213bbc..c276fd16a98f 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -63,6 +63,7 @@ struct request_sock { u32 *saved_syn; u32 secid; u32 peer_secid; + u32 mark; }; static inline struct request_sock *inet_reqsk(const struct sock *sk) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index e2091bb2b3a8..b2b5856f965b 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -4,6 +4,7 @@ #include <linux/rtnetlink.h> #include <net/netlink.h> +#include <linux/kabi.h> typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); @@ -33,6 +34,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * * @list: Used internally * @kind: Identifier + * @netns_refund: Physical device, move to init_net on netns exit * @maxtype: Highest device specific netlink attribute number * @policy: Netlink policy for device specific attribute validation * @validate: Optional validation function for netlink/changelink parameters @@ -64,6 +66,7 @@ struct rtnl_link_ops { size_t priv_size; void (*setup)(struct net_device *dev); + bool netns_refund; unsigned int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr *tb[], @@ -110,6 +113,10 @@ struct rtnl_link_ops { int (*fill_linkxstats)(struct sk_buff *skb, const struct net_device *dev, int *prividx, int attr); + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; int __rtnl_link_register(struct rtnl_link_ops *ops); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 9fb7cf1cdf36..b2ceec7b280d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -407,6 +407,7 @@ struct tcf_block { struct mutex lock; struct list_head chain_list; u32 index; /* block index for shared blocks */ + u32 classid; /* which class this block belongs to */ refcount_t refcnt; struct net *net; struct Qdisc *q; @@ -1157,7 +1158,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, old = *pold; *pold = new; if (old != NULL) - qdisc_tree_flush_backlog(old); + qdisc_purge_queue(old); sch_tree_unlock(sch); return old; diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 823afc42a3aa..06e1deeef464 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -341,11 +341,13 @@ enum { ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ -#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by +#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by local sock family */ -#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by +#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by + local sock family */ +#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by peer */ -#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by +#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by peer */ /* Reasons to retransmit. */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 2b6f3f13d5bc..3e8f87a3c52f 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -224,12 +224,14 @@ struct sctp_sock { data_ready_signalled:1; atomic_t pd_mode; + + /* Fields after this point will be skipped on copies, like on accept + * and peeloff operations + */ + /* Receive to here while partial delivery is in effect. */ struct sk_buff_head pd_lobby; - /* These must be the last fields, as they will skipped on copies, - * like on accept and peeloff operations - */ struct list_head auto_asconf_list; int do_auto_asconf; }; diff --git a/include/net/sock.h b/include/net/sock.h index 6c5a3809483e..5fa161265a1b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -59,6 +59,7 @@ #include <linux/filter.h> #include <linux/rculist_nulls.h> #include <linux/poll.h> +#include <linux/kabi.h> #include <linux/atomic.h> #include <linux/refcount.h> @@ -231,7 +232,16 @@ struct sock_common { /* public: */ }; -struct bpf_sk_storage; +/*tvpc data*/ +struct tvpc_info { + u32 vpcid; + __be32 vmip; + __be32 vip; + __be16 sport; + __be16 vport; +}; + +struct bpf_local_storage; /** * struct sock - network layer representation of sockets @@ -422,6 +432,7 @@ struct sock { struct timer_list sk_timer; __u32 sk_priority; __u32 sk_mark; + __u32 sk_mark2; unsigned long sk_pacing_rate; /* bytes per second */ unsigned long sk_max_pacing_rate; struct page_frag sk_frag; @@ -508,9 +519,14 @@ struct sock { void (*sk_destruct)(struct sock *sk); struct sock_reuseport __rcu *sk_reuseport_cb; #ifdef CONFIG_BPF_SYSCALL - struct bpf_sk_storage __rcu *sk_bpf_storage; + struct bpf_local_storage __rcu *sk_bpf_storage; #endif struct rcu_head sk_rcu; + struct tvpc_info sk_tvpc_info; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; enum sk_pacing { @@ -519,10 +535,43 @@ enum sk_pacing { SK_PACING_FQ = 2, }; +/* Pointer stored in sk_user_data might not be suitable for copying + * when cloning the socket. For instance, it can point to a reference + * counted object. sk_user_data bottom bit is set if pointer must not + * be copied. + */ +#define SK_USER_DATA_NOCOPY 1UL +#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY) + +/** + * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied + * @sk: socket + */ +static inline bool sk_user_data_is_nocopy(const struct sock *sk) +{ + return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY); +} + #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) -#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) -#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) +#define rcu_dereference_sk_user_data(sk) \ +({ \ + void *__tmp = rcu_dereference(__sk_user_data((sk))); \ + (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \ +}) +#define rcu_assign_sk_user_data(sk, ptr) \ +({ \ + uintptr_t __tmp = (uintptr_t)(ptr); \ + WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ + rcu_assign_pointer(__sk_user_data((sk)), __tmp); \ +}) +#define rcu_assign_sk_user_data_nocopy(sk, ptr) \ +({ \ + uintptr_t __tmp = (uintptr_t)(ptr); \ + WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ + rcu_assign_pointer(__sk_user_data((sk)), \ + __tmp | SK_USER_DATA_NOCOPY); \ +}) /* * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK @@ -849,6 +898,8 @@ static inline int sk_memalloc_socks(void) { return static_branch_unlikely(&memalloc_socks_key); } + +void __receive_sock(struct file *file); #else static inline int sk_memalloc_socks(void) @@ -856,6 +907,8 @@ static inline int sk_memalloc_socks(void) return 0; } +static inline void __receive_sock(struct file *file) +{ } #endif static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) @@ -905,11 +958,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) skb_dst_force(skb); if (!sk->sk_backlog.tail) - sk->sk_backlog.head = skb; + WRITE_ONCE(sk->sk_backlog.head, skb); else sk->sk_backlog.tail->next = skb; - sk->sk_backlog.tail = skb; + WRITE_ONCE(sk->sk_backlog.tail, skb); skb->next = NULL; } @@ -1184,6 +1237,11 @@ struct proto { atomic_t socks; #endif int (*diag_destroy)(struct sock *sk, int err); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } __randomize_layout; int proto_register(struct proto *prot, int alloc_slab); @@ -1617,6 +1675,7 @@ void sock_rfree(struct sk_buff *skb); void sock_efree(struct sk_buff *skb); #ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); +void sock_pfree(struct sk_buff *skb); #else #define sock_edemux sock_efree #endif @@ -1803,7 +1862,6 @@ static inline int sk_rx_queue_get(const struct sock *sk) static inline void sk_set_socket(struct sock *sk, struct socket *sock) { - sk_tx_queue_clear(sk); sk->sk_socket = sock; } @@ -2147,6 +2205,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); } +static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) +{ + if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { + skb_orphan(skb); + skb->destructor = sock_efree; + skb->sk = sk; + } +} + void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); @@ -2481,16 +2548,14 @@ void sock_net_set(struct sock *sk, struct net *net) write_pnet(&sk->sk_net, net); } -static inline struct sock *skb_steal_sock(struct sk_buff *skb) +static inline bool +skb_sk_is_prefetched(struct sk_buff *skb) { - if (skb->sk) { - struct sock *sk = skb->sk; - - skb->destructor = NULL; - skb->sk = NULL; - return sk; - } - return NULL; +#ifdef CONFIG_INET + return skb->destructor == sock_pfree; +#else + return false; +#endif /* CONFIG_INET */ } /* This helper checks if a socket is a full socket, @@ -2501,6 +2566,35 @@ static inline bool sk_fullsock(const struct sock *sk) return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); } +static inline bool +sk_is_refcounted(struct sock *sk) +{ + /* Only full sockets have sk->sk_flags. */ + return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); +} + +/** + * skb_steal_sock + * @skb to steal the socket from + * @refcounted is set to true if the socket is reference-counted + */ +static inline struct sock * +skb_steal_sock(struct sk_buff *skb, bool *refcounted) +{ + if (skb->sk) { + struct sock *sk = skb->sk; + + *refcounted = true; + if (skb_sk_is_prefetched(skb)) + *refcounted = sk_is_refcounted(sk); + skb->destructor = NULL; + skb->sk = NULL; + return sk; + } + *refcounted = false; + return NULL; +} + /* Checks if this SKB belongs to an HW offloaded socket * and whether any SW fallbacks are required based on dev. * Check decrypted mark in case skb_orphan() cleared socket. @@ -2563,6 +2657,10 @@ extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; +extern int sysctl_forced_caps_enabled; + +/* On 32bit arches, an skb frag is limited to 2^15 */ +#define SKB_FRAG_PAGE_ORDER get_order(32768) DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 43f4a818d88f..3ecaa15d1850 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -55,6 +55,4 @@ static inline bool reuseport_has_conns(struct sock *sk, bool set) return ret; } -int reuseport_get_id(struct sock_reuseport *reuse); - #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/net/switchdev.h b/include/net/switchdev.h index aee86a189432..64d1ed5ee360 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -11,6 +11,7 @@ #include <linux/notifier.h> #include <linux/list.h> #include <net/ip_fib.h> +#include <linux/kabi.h> #define SWITCHDEV_F_NO_RECURSE BIT(0) #define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1) @@ -71,6 +72,11 @@ struct switchdev_obj { u32 flags; void *complete_priv; void (*complete)(struct net_device *dev, int err, void *priv); + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* SWITCHDEV_OBJ_ID_PORT_VLAN */ diff --git a/include/net/tcp.h b/include/net/tcp.h index c25b9d4298bf..12a9b2c47b17 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -50,7 +50,7 @@ extern struct inet_hashinfo tcp_hashinfo; extern struct percpu_counter tcp_orphan_count; void tcp_time_wait(struct sock *sk, int state, int timeo); -#define MAX_TCP_HEADER (128 + MAX_HEADER) +#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 #define TCP_MIN_SND_MSS 48 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) @@ -633,6 +633,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk) unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); unsigned int tcp_current_mss(struct sock *sk); +u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when); /* Bound MSS / TSO packet size with the half of the window */ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) @@ -1015,6 +1016,7 @@ enum tcp_ca_ack_event_flags { #define TCP_CONG_NON_RESTRICTED 0x1 /* Requires ECN/ECT set on all packets */ #define TCP_CONG_NEEDS_ECN 0x2 +#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN) union tcp_cc_info; @@ -1109,6 +1111,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk); void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); extern struct tcp_congestion_ops tcp_reno; +struct tcp_congestion_ops *tcp_ca_find(const char *name); struct tcp_congestion_ops *tcp_ca_find_key(u32 key); u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca); #ifdef CONFIG_INET @@ -1432,6 +1435,24 @@ static inline int tcp_full_space(const struct sock *sk) return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); } +/* We provision sk_rcvbuf around 200% of sk_rcvlowat. + * If 87.5 % (7/8) of the space has been consumed, we want to override + * SO_RCVLOWAT constraint, since we are receiving skbs with too small + * len/truesize ratio. + */ +static inline bool tcp_rmem_pressure(const struct sock *sk) +{ + int rcvbuf, threshold; + + if (tcp_under_memory_pressure(sk)) + return true; + + rcvbuf = READ_ONCE(sk->sk_rcvbuf); + threshold = rcvbuf - (rcvbuf >> 3); + + return atomic_read(&sk->sk_rmem_alloc) > threshold; +} + extern void tcp_openreq_init_rwin(struct request_sock *req, const struct sock *sk_listener, const struct dst_entry *dst); @@ -1668,6 +1689,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk); void tcp_fastopen_ctx_destroy(struct net *net); int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, void *primary_key, void *backup_key); +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, + u64 *key); void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, @@ -1930,6 +1953,7 @@ struct tcp_iter_state { struct seq_net_private p; enum tcp_seq_states state; struct sock *syn_wait_sk; + struct tcp_seq_afinfo *bpf_seq_afinfo; int bucket, offset, sbucket, num; loff_t last_pos; }; @@ -2046,7 +2070,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd); -extern void tcp_rack_mark_lost(struct sock *sk); +extern bool tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, u64 xmit_time); extern void tcp_rack_reo_timeout(struct sock *sk); @@ -2189,14 +2213,23 @@ void tcp_update_ulp(struct sock *sk, struct proto *p, struct sk_msg; struct sk_psock; -int tcp_bpf_init(struct sock *sk); -void tcp_bpf_reinit(struct sock *sk); +#ifdef CONFIG_BPF_STREAM_PARSER +struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); +void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); +#else +static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) +{ +} +#endif /* CONFIG_BPF_STREAM_PARSER */ + +#ifdef CONFIG_NET_SOCK_MSG int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, int flags); int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, int len, int flags); +#endif /* CONFIG_NET_SOCK_MSG */ /* Call BPF_SOCK_OPS program that returns an int. If the return value * is < 0, then the BPF op failed (for example if the loaded BPF diff --git a/include/net/tls.h b/include/net/tls.h index 093abb5a3dff..78c8cdf8ec44 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -43,6 +43,7 @@ #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> +#include <linux/kabi.h> #include <net/tcp.h> #include <net/strparser.h> @@ -157,6 +158,8 @@ struct tls_sw_context_tx { struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; + /* protect crypto_wait with encrypt_pending */ + spinlock_t encrypt_compl_lock; int async_notify; int async_capable; @@ -177,6 +180,8 @@ struct tls_sw_context_rx { int async_capable; bool decrypted; atomic_t decrypt_pending; + /* protect crypto_wait with decrypt_pending*/ + spinlock_t decrypt_compl_lock; bool async_notify; }; @@ -217,11 +222,20 @@ enum tls_context_flags { * to be atomic. */ TLS_TX_SYNC_SCHED = 1, + /* tls_dev_del was called for the RX side, device state was released, + * but tls_ctx->netdev might still be kept, because TX-side driver + * resources might not be released yet. Used to prevent the second + * tls_dev_del call in tls_device_down if it happens simultaneously. + */ + TLS_RX_DEV_CLOSED = 2, }; struct cipher_context { char *iv; char *rec_seq; + + KABI_RESERVE(1); + KABI_RESERVE(2); }; union tls_crypto_context { @@ -586,6 +600,15 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk) return !!tls_sw_ctx_tx(ctx); } +static inline bool tls_sw_has_ctx_rx(const struct sock *sk) +{ + struct tls_context *ctx = tls_get_ctx(sk); + + if (!ctx) + return false; + return !!tls_sw_ctx_rx(ctx); +} + void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); void tls_device_write_space(struct sock *sk, struct tls_context *ctx); diff --git a/include/net/udp.h b/include/net/udp.h index afac782bb166..95ed86fc17ac 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -441,6 +441,7 @@ struct udp_seq_afinfo { struct udp_iter_state { struct seq_net_private p; int bucket; + struct udp_seq_afinfo *bpf_seq_afinfo; }; void *udp_seq_start(struct seq_file *seq, loff_t *pos); @@ -477,6 +478,13 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, if (!inet_get_convert_csum(sk)) features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or + * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial + * packets in udp_gro_complete_segment. As does UDP GSO, verified by + * udp_send_skb. But when those packets are looped in dev_loopback_xmit + * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this + * specific case, where PARTIAL is both correct and required. + */ if (skb->pkt_type == PACKET_LOOPBACK) skb->ip_summed = CHECKSUM_PARTIAL; @@ -497,4 +505,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, return segs; } +#ifdef CONFIG_BPF_STREAM_PARSER +struct sk_psock; +struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); +#endif /* BPF_STREAM_PARSER */ + #endif /* _UDP_H */ diff --git a/include/net/xdp.h b/include/net/xdp.h index 40c6d3398458..171c21b22f90 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -6,6 +6,8 @@ #ifndef __LINUX_NET_XDP_H__ #define __LINUX_NET_XDP_H__ +#include <linux/kabi.h> + /** * DOC: XDP RX-queue information * @@ -61,6 +63,11 @@ struct xdp_rxq_info { u32 queue_index; u32 reg_state; struct xdp_mem_info mem; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); } ____cacheline_aligned; /* perf critical, avoid false-sharing */ struct xdp_buff { diff --git a/include/net/xfrm.h b/include/net/xfrm.h index aa08a7a5f6ac..552d6d225272 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -24,6 +24,7 @@ #include <net/ip6_fib.h> #include <net/flow.h> #include <net/gro_cells.h> +#include <linux/kabi.h> #include <linux/interrupt.h> @@ -123,6 +124,8 @@ struct xfrm_state_walk { u8 proto; u32 seq; struct xfrm_address_filter *filter; + + KABI_RESERVE(1); }; struct xfrm_state_offload { @@ -130,6 +133,8 @@ struct xfrm_state_offload { unsigned long offload_handle; unsigned int num_exthdrs; u8 flags; + + KABI_RESERVE(1); }; struct xfrm_mode { @@ -945,7 +950,7 @@ struct xfrm_dst { static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; return xdst->path; @@ -957,7 +962,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; return xdst->child; } @@ -1012,6 +1017,7 @@ struct xfrm_offload { #define XFRM_GRO 32 #define XFRM_ESP_NO_TRAILER 64 #define XFRM_DEV_RESUME 128 +#define XFRM_XMIT 256 __u32 status; #define CRYPTO_SUCCESS 1 @@ -1097,7 +1103,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir, return __xfrm_policy_check(sk, ndir, skb, family); return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || - (skb_dst(skb)->flags & DST_NOPOLICY) || + (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || __xfrm_policy_check(sk, ndir, skb, family); } @@ -1635,13 +1641,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, void *); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, + const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8, - int dir, u32 id, int delete, int *err); +struct xfrm_policy *xfrm_policy_byid(struct net *net, + const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, + int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); @@ -1769,21 +1778,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es static inline int xfrm_replay_clone(struct xfrm_state *x, struct xfrm_state *orig) { - x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn), + + x->replay_esn = kmemdup(orig->replay_esn, + xfrm_replay_state_esn_len(orig->replay_esn), GFP_KERNEL); if (!x->replay_esn) return -ENOMEM; - - x->replay_esn->bmp_len = orig->replay_esn->bmp_len; - x->replay_esn->replay_window = orig->replay_esn->replay_window; - - x->preplay_esn = kmemdup(x->replay_esn, - xfrm_replay_state_esn_len(x->replay_esn), + x->preplay_esn = kmemdup(orig->preplay_esn, + xfrm_replay_state_esn_len(orig->preplay_esn), GFP_KERNEL); - if (!x->preplay_esn) { - kfree(x->replay_esn); + if (!x->preplay_esn) return -ENOMEM; - } return 0; } diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index a91b2af64ec4..8e94279af47d 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -95,10 +95,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs size_t length) { return -EINVAL; } -static inline int ib_umem_find_best_pgsz(struct ib_umem *umem, - unsigned long pgsz_bitmap, - unsigned long virt) { - return -EINVAL; +static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt) +{ + return 0; } #endif /* CONFIG_INFINIBAND_USER_MEM */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index b550ae89bf85..6dd3b5284fd1 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -278,6 +278,25 @@ struct rvt_rq { spinlock_t lock ____cacheline_aligned_in_smp; }; +/** + * rvt_get_rq_count - count numbers of request work queue entries + * in circular buffer + * @rq: data structure for request queue entry + * @head: head indices of the circular buffer + * @tail: tail indices of the circular buffer + * + * Return - total number of entries in the Receive Queue + */ + +static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail) +{ + u32 count = head - tail; + + if ((s32)count < 0) + count += rq->size; + return count; +} + /* * This structure holds the information that the send tasklet needs * to send a RDMA read response or atomic operation. diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index 05eabfd5d0d3..9f382e7d4579 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -88,7 +88,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, static inline void uobj_put_destroy(struct ib_uobject *uobj) { - rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); + rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); } static inline void uobj_put_read(struct ib_uobject *uobj) diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 2568cb0627ec..fac8e89aed81 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, struct fc_frame *); /* libfcoe funcs */ -u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int); int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *, const struct libfc_function_template *, int init_fcp); u32 fcoe_fc_crc(struct fc_frame *fp); diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index c25fb86ffae9..b3bbd10eb3f0 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -132,6 +132,9 @@ struct iscsi_task { void *dd_data; /* driver/transport data */ }; +/* invalid scsi_task pointer */ +#define INVALID_SCSI_TASK (struct iscsi_task *)-1l + static inline int iscsi_task_has_unsol_data(struct iscsi_task *task) { return task->unsol_r2t.data_length > task->unsol_r2t.sent; diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 91bd749a02f7..d00326f511e9 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -11,6 +11,7 @@ #include <linux/scatterlist.h> #include <scsi/scsi_device.h> #include <scsi/scsi_request.h> +#include <linux/kabi.h> struct Scsi_Host; struct scsi_driver; @@ -141,6 +142,11 @@ struct scsi_cmnd { unsigned long state; /* Command completion state */ unsigned char tag; /* SCSI-II queued command tag */ + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h index 731ac09ed231..5b567b43e1b1 100644 --- a/include/scsi/scsi_common.h +++ b/include/scsi/scsi_common.h @@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd) scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]); } +static inline unsigned char +scsi_command_control(const unsigned char *cmnd) +{ + return (cmnd[0] == VARIABLE_LENGTH_CMD) ? + cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1]; +} + /* Returns a human-readable name for the device */ extern const char *scsi_device_type(unsigned type); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 202f4d6a4342..81bd736c53d4 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -8,6 +8,7 @@ #include <linux/blkdev.h> #include <scsi/scsi.h> #include <linux/atomic.h> +#include <linux/kabi.h> struct device; struct request_queue; @@ -227,6 +228,12 @@ struct scsi_device { struct mutex state_mutex; enum scsi_device_state sdev_state; struct task_struct *quiesced_by; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + unsigned long sdev_data[0]; } __attribute__((aligned(sizeof(unsigned long)))); @@ -311,6 +318,12 @@ struct scsi_target { char scsi_level; enum scsi_target_state state; void *hostdata; /* available to low-level driver */ + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + unsigned long starget_data[0]; /* for the transport */ /* starget_data must be the last element!!!! */ } __attribute__((aligned(sizeof(unsigned long)))); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 31e0d6ca1eba..c61948674006 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -10,6 +10,7 @@ #include <linux/seq_file.h> #include <linux/blk-mq.h> #include <scsi/scsi.h> +#include <linux/kabi.h> struct block_device; struct completion; @@ -486,6 +487,11 @@ struct scsi_host_template { */ unsigned int cmd_size; struct scsi_host_cmd_pool *cmd_pool; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; /* @@ -691,6 +697,11 @@ struct Scsi_Host { */ struct device *dma_dev; + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); + /* * We should ensure that this is aligned, both for better performance * and also because some compilers (m68k) don't automatically force diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h index 9b1d43d671a3..8c18dc6d3fde 100644 --- a/include/soc/nps/common.h +++ b/include/soc/nps/common.h @@ -45,6 +45,12 @@ #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 +#ifndef AUX_IENABLE +#define AUX_IENABLE 0x40c +#endif + +#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) + #ifndef __ASSEMBLY__ /* In order to increase compilation test coverage */ diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index bc88d6f964da..006f01922439 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -59,6 +59,7 @@ struct snd_compr_runtime { * @direction: stream direction, playback/recording * @metadata_set: metadata set flag, true when set * @next_track: has userspace signal next track transition, true when set + * @partial_drain: undergoing partial_drain for stream, true when set * @private_data: pointer to DSP private data */ struct snd_compr_stream { @@ -70,6 +71,7 @@ struct snd_compr_stream { enum snd_compr_direction direction; bool metadata_set; bool next_track; + bool partial_drain; void *private_data; }; @@ -173,7 +175,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) if (snd_BUG_ON(!stream)) return; - stream->runtime->state = SNDRV_PCM_STATE_SETUP; + /* for partial_drain case we are back to running state on success */ + if (stream->partial_drain) { + stream->runtime->state = SNDRV_PCM_STATE_RUNNING; + stream->partial_drain = false; /* clear this flag as well */ + } else { + stream->runtime->state = SNDRV_PCM_STATE_SETUP; + } wake_up(&stream->runtime->sleep); } diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 9a0393cf024c..8341e2c48982 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -254,6 +254,7 @@ struct hda_codec { unsigned int force_pin_prefix:1; /* Add location prefix */ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */ + unsigned int forced_resume:1; /* forced resume for jack */ #ifdef CONFIG_PM unsigned long power_on_acct; @@ -494,6 +495,11 @@ void snd_hda_update_power_acct(struct hda_codec *codec); static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {} #endif +static inline bool hda_codec_need_resume(struct hda_codec *codec) +{ + return !codec->relaxed_resume && codec->jacktbl.used; +} + #ifdef CONFIG_SND_HDA_PATCH_LOADER /* * patch firmware diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h index 5141f8ffbb12..4c1b9bebbd60 100644 --- a/include/sound/hda_regmap.h +++ b/include/sound/hda_regmap.h @@ -24,6 +24,9 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, unsigned int val); int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, unsigned int mask, unsigned int val); +int snd_hdac_regmap_update_raw_once(struct hdac_device *codec, unsigned int reg, + unsigned int mask, unsigned int val); +void snd_hdac_regmap_sync(struct hdac_device *codec); /** * snd_hdac_regmap_encode_verb - encode the verb to a pseudo register diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index fb9dce4c6928..44e57bcc4a57 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -87,6 +87,7 @@ struct hdac_device { /* regmap */ struct regmap *regmap; + struct mutex regmap_lock; struct snd_array vendor_verbs; bool lazy_cache:1; /* don't wake up for writes */ bool caps_overwriting:1; /* caps overwrite being in process */ diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h index f657fd8fc0ad..f38947b9a1b9 100644 --- a/include/sound/intel-nhlt.h +++ b/include/sound/intel-nhlt.h @@ -112,6 +112,11 @@ struct nhlt_vendor_dmic_array_config { /* TODO add vendor mic config */ } __packed; +enum { + NHLT_CONFIG_TYPE_GENERIC = 0, + NHLT_CONFIG_TYPE_MIC_ARRAY = 1 +}; + enum { NHLT_MIC_ARRAY_2CH_SMALL = 0xa, NHLT_MIC_ARRAY_2CH_BIG = 0xb, diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index a36b7227a15a..334842daa904 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -61,6 +61,7 @@ struct snd_rawmidi_runtime { size_t avail_min; /* min avail for wakeup */ size_t avail; /* max used buffer for wakeup */ size_t xruns; /* over/underruns counter */ + int buffer_ref; /* buffer reference count */ /* misc */ spinlock_t lock; wait_queue_head_t sleep; diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h index f9024c7a1600..02e1d7778354 100644 --- a/include/sound/rt5670.h +++ b/include/sound/rt5670.h @@ -12,6 +12,7 @@ struct rt5670_platform_data { int jd_mode; bool in2_diff; bool dev_gpio; + bool gpio1_is_ext_spk_en; bool dmic_en; unsigned int dmic1_data_pin; diff --git a/include/sound/soc.h b/include/sound/soc.h index f264c6509f00..6d2662c3126c 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1059,6 +1059,7 @@ struct snd_soc_card { const struct snd_soc_dapm_route *of_dapm_routes; int num_of_dapm_routes; bool fully_routed; + bool disable_route_checks; /* lists of probed devices belonging to this card */ struct list_head component_dev_list; diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index a49d37140a64..591cd9e4692c 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -676,7 +676,7 @@ struct iscsi_session { atomic_t session_logout; atomic_t session_reinstatement; atomic_t session_stop_active; - atomic_t sleep_on_sess_wait_comp; + atomic_t session_close; /* connection list */ struct list_head sess_conn_list; struct list_head cr_active_list; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 51b6f50eabee..0deeff9b4496 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -69,6 +69,7 @@ int transport_backend_register(const struct target_backend_ops *); void target_backend_unregister(const struct target_backend_ops *); void target_complete_cmd(struct se_cmd *, u8); +void target_set_cmd_data_length(struct se_cmd *, int); void target_complete_cmd_with_length(struct se_cmd *, u8, int); void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *); diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index d6e556c0a085..b04c29270973 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -74,11 +74,12 @@ static inline void bpf_test_probe_##call(void) \ { \ check_trace_callback_type_##call(__bpf_trace_##template); \ } \ +typedef void (*btf_trace_##call)(void *__data, proto); \ static struct bpf_raw_event_map __used \ __attribute__((section("__bpf_raw_tp_map"))) \ __bpf_trace_tp_map_##call = { \ .tp = &__tracepoint_##call, \ - .bpf_func = (void *)__bpf_trace_##template, \ + .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \ .num_args = COUNT_ARGS(args), \ .writable_size = size, \ }; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 75ae1899452b..94a3adb65b8a 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1159,25 +1159,27 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free, TRACE_EVENT(find_free_extent, - TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes, + TP_PROTO(const struct btrfs_root *root, u64 num_bytes, u64 empty_size, u64 data), - TP_ARGS(fs_info, num_bytes, empty_size, data), + TP_ARGS(root, num_bytes, empty_size, data), TP_STRUCT__entry_btrfs( + __field( u64, root_objectid ) __field( u64, num_bytes ) __field( u64, empty_size ) __field( u64, data ) ), - TP_fast_assign_btrfs(fs_info, + TP_fast_assign_btrfs(root->fs_info, + __entry->root_objectid = root->root_key.objectid; __entry->num_bytes = num_bytes; __entry->empty_size = empty_size; __entry->data = data; ), TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s)", - show_root_type(BTRFS_EXTENT_TREE_OBJECTID), + show_root_type(__entry->root_objectid), __entry->num_bytes, __entry->empty_size, __entry->data, __print_flags((unsigned long)__entry->data, "|", BTRFS_GROUP_FLAGS)) diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h index 7ecaa65b7106..c2f580fd371b 100644 --- a/include/trace/events/iocost.h +++ b/include/trace/events/iocost.h @@ -130,7 +130,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset, TRACE_EVENT(iocost_ioc_vrate_adj, - TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2], + TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm, u32 rq_wait_pct, int nr_lagging, int nr_shortages, int nr_surpluses), @@ -155,8 +155,8 @@ TRACE_EVENT(iocost_ioc_vrate_adj, __entry->old_vrate = atomic64_read(&ioc->vtime_rate);; __entry->new_vrate = new_vrate; __entry->busy_level = ioc->busy_level; - __entry->read_missed_ppm = (*missed_ppm)[READ]; - __entry->write_missed_ppm = (*missed_ppm)[WRITE]; + __entry->read_missed_ppm = missed_ppm[READ]; + __entry->write_missed_ppm = missed_ppm[WRITE]; __entry->rq_wait_pct = rq_wait_pct; __entry->nr_lagging = nr_lagging; __entry->nr_shortages = nr_shortages; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 7fd11ec1c9a4..2464311b0389 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1638,17 +1638,15 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, TRACE_EVENT(svcrdma_post_send, TP_PROTO( - const struct ib_send_wr *wr, - int status + const struct ib_send_wr *wr ), - TP_ARGS(wr, status), + TP_ARGS(wr), TP_STRUCT__entry( __field(const void *, cqe) __field(unsigned int, num_sge) __field(u32, inv_rkey) - __field(int, status) ), TP_fast_assign( @@ -1656,12 +1654,11 @@ TRACE_EVENT(svcrdma_post_send, __entry->num_sge = wr->num_sge; __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? wr->ex.invalidate_rkey : 0; - __entry->status = status; ), - TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d", + TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x", __entry->cqe, __entry->num_sge, - __entry->inv_rkey, __entry->status + __entry->inv_rkey ) ); @@ -1726,26 +1723,23 @@ TRACE_EVENT(svcrdma_wc_receive, TRACE_EVENT(svcrdma_post_rw, TP_PROTO( const void *cqe, - int sqecount, - int status + int sqecount ), - TP_ARGS(cqe, sqecount, status), + TP_ARGS(cqe, sqecount), TP_STRUCT__entry( __field(const void *, cqe) __field(int, sqecount) - __field(int, status) ), TP_fast_assign( __entry->cqe = cqe; __entry->sqecount = sqecount; - __entry->status = status; ), - TP_printk("cqe=%p sqecount=%d status=%d", - __entry->cqe, __entry->sqecount, __entry->status + TP_printk("cqe=%p sqecount=%d", + __entry->cqe, __entry->sqecount ) ); @@ -1841,6 +1835,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, DEFINE_SQ_EVENT(full); DEFINE_SQ_EVENT(retry); +TRACE_EVENT(svcrdma_sq_post_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + int status + ), + + TP_ARGS(rdma, status), + + TP_STRUCT__entry( + __field(int, avail) + __field(int, depth) + __field(int, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->avail = atomic_read(&rdma->sc_sq_avail); + __entry->depth = rdma->sc_sq_depth; + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", + __get_str(addr), __entry->avail, __entry->depth, + __entry->status + ) +); + #endif /* _TRACE_RPCRDMA_H */ #include <trace/define_trace.h> diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 191fe447f990..059b6e45a028 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -400,7 +400,7 @@ enum rxrpc_tx_point { EM(rxrpc_cong_begin_retransmission, " Retrans") \ EM(rxrpc_cong_cleared_nacks, " Cleared") \ EM(rxrpc_cong_new_low_nack, " NewLowN") \ - EM(rxrpc_cong_no_change, "") \ + EM(rxrpc_cong_no_change, " -") \ EM(rxrpc_cong_progress, " Progres") \ EM(rxrpc_cong_retransmit_again, " ReTxAgn") \ EM(rxrpc_cong_rtt_window_end, " RttWinE") \ @@ -1112,18 +1112,17 @@ TRACE_EVENT(rxrpc_rtt_tx, TRACE_EVENT(rxrpc_rtt_rx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, - s64 rtt, u8 nr, s64 avg), + u32 rtt, u32 rto), - TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg), + TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), TP_STRUCT__entry( __field(unsigned int, call ) __field(enum rxrpc_rtt_rx_trace, why ) - __field(u8, nr ) __field(rxrpc_serial_t, send_serial ) __field(rxrpc_serial_t, resp_serial ) - __field(s64, rtt ) - __field(u64, avg ) + __field(u32, rtt ) + __field(u32, rto ) ), TP_fast_assign( @@ -1132,18 +1131,16 @@ TRACE_EVENT(rxrpc_rtt_rx, __entry->send_serial = send_serial; __entry->resp_serial = resp_serial; __entry->rtt = rtt; - __entry->nr = nr; - __entry->avg = avg; + __entry->rto = rto; ), - TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", + TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", __entry->call, __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, __entry->rtt, - __entry->nr, - __entry->avg) + __entry->rto) ); TRACE_EVENT(rxrpc_timer, @@ -1544,6 +1541,41 @@ TRACE_EVENT(rxrpc_notify_socket, __entry->serial) ); +TRACE_EVENT(rxrpc_rx_discard_ack, + TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, + rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, + rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), + + TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, + prev_pkt, call_ackr_prev), + + TP_STRUCT__entry( + __field(unsigned int, debug_id ) + __field(rxrpc_serial_t, serial ) + __field(rxrpc_seq_t, first_soft_ack) + __field(rxrpc_seq_t, call_ackr_first) + __field(rxrpc_seq_t, prev_pkt) + __field(rxrpc_seq_t, call_ackr_prev) + ), + + TP_fast_assign( + __entry->debug_id = debug_id; + __entry->serial = serial; + __entry->first_soft_ack = first_soft_ack; + __entry->call_ackr_first = call_ackr_first; + __entry->prev_pkt = prev_pkt; + __entry->call_ackr_prev = call_ackr_prev; + ), + + TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", + __entry->debug_id, + __entry->serial, + __entry->first_soft_ack, + __entry->call_ackr_first, + __entry->prev_pkt, + __entry->call_ackr_prev) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h index 7475c7be165a..d4aac3436595 100644 --- a/include/trace/events/sctp.h +++ b/include/trace/events/sctp.h @@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe, __entry->pathmtu = asoc->pathmtu; __entry->rwnd = asoc->peer.rwnd; __entry->unack_data = asoc->unack_data; - - if (trace_sctp_probe_path_enabled()) { - struct sctp_transport *sp; - - list_for_each_entry(sp, &asoc->peer.transport_addr_list, - transports) { - trace_sctp_probe_path(sp, asoc); - } - } ), TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ffa3c51dbb1a..f16e9fb97e9f 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -165,6 +165,7 @@ DECLARE_EVENT_CLASS(rpc_task_running, DEFINE_RPC_RUNNING_EVENT(begin); DEFINE_RPC_RUNNING_EVENT(run_action); DEFINE_RPC_RUNNING_EVENT(complete); +DEFINE_RPC_RUNNING_EVENT(end); DECLARE_EVENT_CLASS(rpc_task_queued, @@ -356,10 +357,10 @@ TRACE_EVENT(rpc_xdr_overflow, __field(size_t, tail_len) __field(unsigned int, page_len) __field(unsigned int, len) - __string(progname, - xdr->rqst->rq_task->tk_client->cl_program->name) - __string(procedure, - xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) + __string(progname, xdr->rqst ? + xdr->rqst->rq_task->tk_client->cl_program->name : "unknown") + __string(procedure, xdr->rqst ? + xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown") ), TP_fast_assign( diff --git a/include/trace/events/target.h b/include/trace/events/target.h index 914a872dd343..e87a3716b0ac 100644 --- a/include/trace/events/target.h +++ b/include/trace/events/target.h @@ -140,6 +140,7 @@ TRACE_EVENT(target_sequencer_start, __field( unsigned int, opcode ) __field( unsigned int, data_length ) __field( unsigned int, task_attribute ) + __field( unsigned char, control ) __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) __string( initiator, cmd->se_sess->se_node_acl->initiatorname ) ), @@ -149,6 +150,7 @@ TRACE_EVENT(target_sequencer_start, __entry->opcode = cmd->t_task_cdb[0]; __entry->data_length = cmd->data_length; __entry->task_attribute = cmd->sam_task_attr; + __entry->control = scsi_command_control(cmd->t_task_cdb); memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); ), @@ -158,9 +160,7 @@ TRACE_EVENT(target_sequencer_start, show_opcode_name(__entry->opcode), __entry->data_length, __print_hex(__entry->cdb, 16), show_task_attribute_name(__entry->task_attribute), - scsi_command_size(__entry->cdb) <= 16 ? - __entry->cdb[scsi_command_size(__entry->cdb) - 1] : - __entry->cdb[1] + __entry->control ) ); @@ -175,6 +175,7 @@ TRACE_EVENT(target_cmd_complete, __field( unsigned int, opcode ) __field( unsigned int, data_length ) __field( unsigned int, task_attribute ) + __field( unsigned char, control ) __field( unsigned char, scsi_status ) __field( unsigned char, sense_length ) __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) @@ -187,6 +188,7 @@ TRACE_EVENT(target_cmd_complete, __entry->opcode = cmd->t_task_cdb[0]; __entry->data_length = cmd->data_length; __entry->task_attribute = cmd->sam_task_attr; + __entry->control = scsi_command_control(cmd->t_task_cdb); __entry->scsi_status = cmd->scsi_status; __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ? min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0; @@ -203,9 +205,7 @@ TRACE_EVENT(target_cmd_complete, show_opcode_name(__entry->opcode), __entry->data_length, __print_hex(__entry->cdb, 16), show_task_attribute_name(__entry->task_attribute), - scsi_command_size(__entry->cdb) <= 16 ? - __entry->cdb[scsi_command_size(__entry->cdb) - 1] : - __entry->cdb[1] + __entry->control ) ); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 66282552db20..011e8faa608b 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -20,7 +20,6 @@ {I_CLEAR, "I_CLEAR"}, \ {I_SYNC, "I_SYNC"}, \ {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ - {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ {I_REFERENCED, "I_REFERENCED"} \ ) @@ -192,7 +191,7 @@ TRACE_EVENT(inode_foreign_history, ), TP_fast_assign( - strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); + strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); __entry->history = history; @@ -221,7 +220,7 @@ TRACE_EVENT(inode_switch_wbs, ), TP_fast_assign( - strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32); + strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32); __entry->ino = inode->i_ino; __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); @@ -254,7 +253,7 @@ TRACE_EVENT(track_foreign_dirty, struct address_space *mapping = page_mapping(page); struct inode *inode = mapping ? mapping->host : NULL; - strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->bdi_id = wb->bdi->id; __entry->ino = inode ? inode->i_ino : 0; __entry->memcg_id = wb->memcg_css->id; @@ -287,7 +286,7 @@ TRACE_EVENT(flush_foreign, ), TP_fast_assign( - strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); __entry->frn_bdi_id = frn_bdi_id; __entry->frn_memcg_id = frn_memcg_id; @@ -499,8 +498,9 @@ DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before, int moved), - TP_ARGS(wb, work, moved), + TP_ARGS(wb, work, dirtied_before, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) @@ -510,19 +510,17 @@ TRACE_EVENT(writeback_queue_io, __field(unsigned int, cgroup_ino) ), TP_fast_assign( - unsigned long *older_than_this = work->older_than_this; strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); - __entry->older = older_than_this ? *older_than_this : 0; - __entry->age = older_than_this ? - (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->older = dirtied_before; + __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", __entry->name, - __entry->older, /* older_than_this in jiffies */ - __entry->age, /* older_than_this in relative milliseconds */ + __entry->older, /* dirtied_before in jiffies */ + __entry->age, /* dirtied_before in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), __entry->cgroup_ino diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 77f7c1638eb1..066eda5a632a 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -119,6 +119,8 @@ #define SO_DETACH_REUSEPORT_BPF 68 +#define SO_NETNS_COOKIE 71 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) @@ -143,5 +145,9 @@ #define SCM_TIMESTAMPING SO_TIMESTAMPING #endif +/* get the MARK of a flow which have been set by iptables*/ +#define SO_MARK2 1000 +/* get the tvpcinfo which have been set by tcp_v4_syn_recv_sock_toa*/ +#define SO_TVPC_INFO 5000 #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 2832134e5397..731780804c2f 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -248,6 +248,7 @@ enum transaction_flags { TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ + TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ }; struct binder_transaction_data { diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index c89c6495983d..32a5db900f47 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -116,6 +116,7 @@ #define AUDIT_FANOTIFY 1331 /* Fanotify access decision */ #define AUDIT_TIME_INJOFFSET 1332 /* Timekeeping offset injected */ #define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */ +#define AUDIT_BPF 1334 /* BPF subsystem */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 77c6be96d676..fa3d7c7dbb57 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key { __u32 attach_type; /* program attach type */ }; +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -107,6 +113,17 @@ enum bpf_cmd { BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, BPF_BTF_GET_NEXT_ID, + BPF_MAP_LOOKUP_BATCH, + BPF_MAP_LOOKUP_AND_DELETE_BATCH, + BPF_MAP_UPDATE_BATCH, + BPF_MAP_DELETE_BATCH, + BPF_LINK_CREATE, + BPF_LINK_UPDATE, + BPF_LINK_GET_FD_BY_ID, + BPF_LINK_GET_NEXT_ID, + BPF_ENABLE_STATS, + BPF_ITER_CREATE, + BPF_LINK_DETACH, }; enum bpf_map_type { @@ -136,6 +153,9 @@ enum bpf_map_type { BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, BPF_MAP_TYPE_DEVMAP_HASH, + BPF_MAP_TYPE_STRUCT_OPS, + BPF_MAP_TYPE_RINGBUF, + BPF_MAP_TYPE_INODE_STORAGE, }; /* Note that tracing related programs such as @@ -173,6 +193,11 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, + BPF_PROG_TYPE_STRUCT_OPS, + BPF_PROG_TYPE_EXT, + BPF_PROG_TYPE_LSM, + BPF_PROG_TYPE_SK_LOOKUP, }; enum bpf_attach_type { @@ -199,11 +224,36 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, + BPF_TRACE_FENTRY, + BPF_TRACE_FEXIT, + BPF_MODIFY_RETURN, + BPF_LSM_MAC, + BPF_TRACE_ITER, + BPF_CGROUP_INET4_GETPEERNAME, + BPF_CGROUP_INET6_GETPEERNAME, + BPF_CGROUP_INET4_GETSOCKNAME, + BPF_CGROUP_INET6_GETSOCKNAME, + BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, + BPF_XDP_CPUMAP, + BPF_SK_LOOKUP, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + + MAX_BPF_LINK_TYPE, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. @@ -227,6 +277,11 @@ enum bpf_attach_type { * When children program makes decision (like picking TCP CA or sock bind) * parent program has a chance to override it. * + * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of + * programs for a cgroup. Though it's possible to replace an old program at + * any position by also specifying BPF_F_REPLACE flag and position itself in + * replace_bpf_fd attribute. Old program at this position will be released. + * * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. * A cgroup with NONE doesn't allow any programs in sub-cgroups. * Ex1: @@ -245,6 +300,7 @@ enum bpf_attach_type { */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_MULTI (1U << 1) +#define BPF_F_REPLACE (1U << 2) /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel @@ -289,18 +345,45 @@ enum bpf_attach_type { #define BPF_F_TEST_STATE_FREQ (1U << 3) /* When BPF ldimm64's insn[0].src_reg != 0 then this can have - * two extensions: + * the following extensions: * - * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE - * insn[0].imm: map fd map fd - * insn[1].imm: 0 offset into value - * insn[0].off: 0 0 - * insn[1].off: 0 0 - * ldimm64 rewrite: address of map address of map[0]+offset - * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE + * insn[0].src_reg: BPF_PSEUDO_MAP_FD + * insn[0].imm: map fd + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of map + * verifier type: CONST_PTR_TO_MAP */ #define BPF_PSEUDO_MAP_FD 1 +/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE + * insn[0].imm: map fd + * insn[1].imm: offset into value + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of map[0]+offset + * verifier type: PTR_TO_MAP_VALUE + */ #define BPF_PSEUDO_MAP_VALUE 2 +/* insn[0].src_reg: BPF_PSEUDO_BTF_ID + * insn[0].imm: kernel btd id of VAR + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of the kernel variable + * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var + * is struct/union. + */ +#define BPF_PSEUDO_BTF_ID 3 +/* insn[0].src_reg: BPF_PSEUDO_FUNC + * insn[0].imm: insn offset to the func + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of the function + * verifier type: PTR_TO_FUNC. + */ +#define BPF_PSEUDO_FUNC 4 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative * offset to another bpf function @@ -308,45 +391,58 @@ enum bpf_attach_type { #define BPF_PSEUDO_CALL 1 /* flags for BPF_MAP_UPDATE_ELEM command */ -#define BPF_ANY 0 /* create new element or update existing */ -#define BPF_NOEXIST 1 /* create new element if it didn't exist */ -#define BPF_EXIST 2 /* update existing element */ -#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */ +enum { + BPF_ANY = 0, /* create new element or update existing */ + BPF_NOEXIST = 1, /* create new element if it didn't exist */ + BPF_EXIST = 2, /* update existing element */ + BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ +}; /* flags for BPF_MAP_CREATE command */ -#define BPF_F_NO_PREALLOC (1U << 0) +enum { + BPF_F_NO_PREALLOC = (1U << 0), /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * which can scale and perform better. * Note, the LRU nodes (including free nodes) cannot be moved * across different LRU lists. */ -#define BPF_F_NO_COMMON_LRU (1U << 1) + BPF_F_NO_COMMON_LRU = (1U << 1), /* Specify numa node during map creation */ -#define BPF_F_NUMA_NODE (1U << 2) - -#define BPF_OBJ_NAME_LEN 16U - + BPF_F_NUMA_NODE = (1U << 2), /* Flags for accessing BPF object from syscall side. */ -#define BPF_F_RDONLY (1U << 3) -#define BPF_F_WRONLY (1U << 4) + BPF_F_RDONLY = (1U << 3), + BPF_F_WRONLY = (1U << 4), /* Flag for stack_map, store build_id+offset instead of pointer */ -#define BPF_F_STACK_BUILD_ID (1U << 5) + BPF_F_STACK_BUILD_ID = (1U << 5), /* Zero-initialize hash function seed. This should only be used for testing. */ -#define BPF_F_ZERO_SEED (1U << 6) + BPF_F_ZERO_SEED = (1U << 6), /* Flags for accessing BPF object from program side. */ -#define BPF_F_RDONLY_PROG (1U << 7) -#define BPF_F_WRONLY_PROG (1U << 8) + BPF_F_RDONLY_PROG = (1U << 7), + BPF_F_WRONLY_PROG = (1U << 8), /* Clone map from listener for newly accepted socket */ -#define BPF_F_CLONE (1U << 9) + BPF_F_CLONE = (1U << 9), + +/* Enable memory-mapping BPF map */ + BPF_F_MMAPABLE = (1U << 10), + +/* Create a map that is suitable to be an inner map with dynamic max entries */ + BPF_F_INNER_MAP = (1U << 12), +}; /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) +/* type for BPF_ENABLE_STATS */ +enum bpf_stats_type { + /* enabled run_time_ns and run_cnt */ + BPF_STATS_RUN_TIME = 0, +}; + enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, @@ -366,6 +462,8 @@ struct bpf_stack_build_id { }; }; +#define BPF_OBJ_NAME_LEN 16U + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -384,6 +482,10 @@ union bpf_attr { __u32 btf_fd; /* fd pointing to a BTF type data */ __u32 btf_key_type_id; /* BTF type_id of the key */ __u32 btf_value_type_id; /* BTF type_id of the value */ + __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- + * struct stored as the + * map value + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -420,6 +522,8 @@ union bpf_attr { __u32 line_info_rec_size; /* userspace bpf_line_info size */ __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ + __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + __u32 attach_prog_fd; /* 0 to attach to vmlinux */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -433,6 +537,10 @@ union bpf_attr { __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; __u32 attach_flags; + __u32 replace_bpf_fd; /* previously attached eBPF + * program to replace if + * BPF_F_REPLACE is used + */ }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ @@ -462,6 +570,7 @@ union bpf_attr { __u32 prog_id; __u32 map_id; __u32 btf_id; + __u32 link_id; }; __u32 next_id; __u32 open_flags; @@ -482,7 +591,7 @@ union bpf_attr { __u32 prog_cnt; } query; - struct { + struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ __u64 name; __u32 prog_fd; } raw_tracepoint; @@ -510,6 +619,39 @@ union bpf_attr { __u64 probe_offset; /* output: probe_offset */ __u64 probe_addr; /* output: probe_addr */ } task_fd_query; + + struct { /* struct used by BPF_LINK_CREATE command */ + __u32 prog_fd; /* eBPF program to attach */ + __u32 target_fd; /* object to attach to */ + __u32 attach_type; /* attach type */ + __u32 flags; /* extra flags */ + __aligned_u64 iter_info; /* extra bpf_iter_link_info */ + __u32 iter_info_len; /* iter_info length */ + } link_create; + + struct { /* struct used by BPF_LINK_UPDATE command */ + __u32 link_fd; /* link fd */ + /* new program fd to update link with */ + __u32 new_prog_fd; + __u32 flags; /* extra flags */ + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags */ + __u32 old_prog_fd; + } link_update; + + struct { + __u32 link_fd; + } link_detach; + + struct { /* struct used by BPF_ENABLE_STATS command */ + __u32 type; + } enable_stats; + + struct { /* struct used by BPF_ITER_CREATE command */ + __u32 link_fd; + __u32 flags; + } iter_create; + } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF @@ -560,10 +702,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *src) + * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from - * address *src* and store the data in *dst*. + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() + * instead. * Return * 0 on success, or a negative error in case of failure. * @@ -1294,8 +1439,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if the *skb* task belongs to the cgroup2. - * * 1, if the *skb* task does not belong to the cgroup2. + * * 0, if current task belongs to the cgroup2. + * * 1, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) @@ -1425,45 +1570,14 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description - * Copy a NUL terminated string from an unsafe address - * *unsafe_ptr* to *dst*. The *size* should include the - * terminating NUL byte. In case the string length is smaller than - * *size*, the target is not padded with further NUL bytes. If the - * string length is larger than *size*, just *size*-1 bytes are - * copied and the last byte is set to NUL. + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for + * more details. * - * On success, the length of the copied string is returned. This - * makes this helper useful in tracing programs for reading - * strings, and more importantly to get its length at runtime. See - * the following snippet: - * - * :: - * - * SEC("kprobe/sys_open") - * void bpf_sys_open(struct pt_regs *ctx) - * { - * char buf[PATHLEN]; // PATHLEN is defined to 256 - * int res = bpf_probe_read_str(buf, sizeof(buf), - * ctx->di); - * - * // Consume buf, for example push it to - * // userspace via bpf_perf_event_output(); we - * // can use res (the string length) as event - * // size, after checking its boundaries. - * } - * - * In comparison, using **bpf_probe_read()** helper here instead - * to read the string would require to estimate the length at - * compile time, and would often result in copying more memory - * than necessary. - * - * Another useful use case is when parsing individual process - * arguments or individual environment variables navigating - * *current*\ **->mm->arg_start** and *current*\ - * **->mm->env_start**: using this helper and the return value, - * one can quickly iterate at the right offset of the memory area. + * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() + * instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative @@ -1910,10 +2024,11 @@ union bpf_attr { * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or - * **AF_INET6**). Looking for a free port to bind to can be - * expensive, therefore binding to port is not permitted by the - * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) - * must be set to zero. + * **AF_INET6**). It's advised to pass zero port (**sin_port** + * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like + * behavior and lets the kernel efficiently pick up an unused + * port as long as 4-tuple is unique. Passing non-zero port might + * lead to degraded performance. * Return * 0 on success, or a negative error in case of failure. * @@ -2674,7 +2789,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) + * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) * Description * Get a bpf-local-storage from a *sk*. * @@ -2690,6 +2805,9 @@ union bpf_attr { * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf-local-storages residing at *sk*. * + * *sk* is a kernel **struct sock** pointer for LSM program. + * *sk* is a **struct bpf_sock** pointer for other program types. + * * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used @@ -2702,7 +2820,7 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return @@ -2710,6 +2828,30 @@ union bpf_attr { * * **-ENOENT** if the bpf-local-storage cannot be found. * + * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * Description + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct xdp_buff. + * + * This helper is similar to **bpf_perf_eventoutput**\ () but + * restricted to raw_tracepoint bpf programs. + * Return + * 0 on success, or a negative error in case of failure. + * * int bpf_send_signal(u32 sig) * Description * Send signal *sig* to the current task. @@ -2750,6 +2892,535 @@ union bpf_attr { * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 + * * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * Description + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct sk_buff. + * + * This helper is similar to **bpf_perf_event_output**\ () but + * restricted to raw_tracepoint bpf programs. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, the length of the copied string is returned. This + * makes this helper useful in tracing programs for reading + * strings, and more importantly to get its length at runtime. See + * the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user**\ () helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * Return + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + * + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. + * Return + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. + * + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * Description + * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. + * *rcv_nxt* is the ack_seq to be sent out. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_send_signal_thread(u32 sig) + * Description + * Send signal *sig* to the thread corresponding to the current task. + * Return + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + * + * u64 bpf_jiffies64(void) + * Description + * Obtain the 64bit jiffies + * Return + * The 64 bit jiffies + * + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * Description + * For an eBPF program attached to a perf event, retrieve the + * branch records (**struct perf_branch_entry**) associated to *ctx* + * and store it in the buffer pointed by *buf* up to size + * *size* bytes. + * Return + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to + * instead return the number of bytes required to store all the + * branch entries. If this flag is set, *buf* may be NULL. + * + * **-EINVAL** if arguments invalid or **size** not a multiple + * of **sizeof**\ (**struct perf_branch_entry**\ ). + * + * **-ENOENT** if architecture does not support branch records. + * + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * Description + * Returns 0 on success, values for *pid* and *tgid* as seen from the current + * *namespace* will be returned in *nsdata*. + * Return + * 0 on success, or one of the following in case of failure: + * + * **-EINVAL** if dev and inum supplied don't match dev_t and inode number + * with nsfs of current task, or if dev conversion to dev_t lost high bits. + * + * **-ENOENT** if pidns does not exists for the current task. + * + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * Description + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct xdp_buff. + * + * This helper is similar to **bpf_perf_eventoutput**\ () but + * restricted to raw_tracepoint bpf programs. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) + * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. + * + * Select the *sk* as a result of a socket lookup. + * + * For the operation to succeed passed socket must be compatible + * with the packet description provided by the *ctx* object. + * + * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must + * be an exact match. While IP family (**AF_INET** or + * **AF_INET6**) must be compatible, that is IPv6 sockets + * that are not v6-only can be selected for IPv4 packets. + * + * Only TCP listeners and UDP unconnected sockets can be + * selected. *sk* can also be NULL to reset any previous + * selection. + * + * *flags* argument can combination of following values: + * + * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous + * socket selection, potentially done by a BPF program + * that ran before us. + * + * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip + * load-balancing within reuseport group for the socket + * being selected. + * + * On success *ctx->sk* will point to the selected socket. + * + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EAFNOSUPPORT** if socket family (*sk->family*) is + * not compatible with packet family (*ctx->family*). + * + * * **-EEXIST** if socket has been already selected, + * potentially by another program, and + * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. + * + * * **-EINVAL** if unsupported flags were specified. + * + * * **-EPROTOTYPE** if socket L4 protocol + * (*sk->protocol*) doesn't match packet protocol + * (*ctx->protocol*). + * + * * **-ESOCKTNOSUPPORT** if socket is not in allowed + * state (TCP listening or UDP unconnected). + * + * u64 bpf_get_netns_cookie(void *ctx) + * Description + * Retrieve the cookie (generated by the kernel) of the network + * namespace the input *ctx* is associated with. The network + * namespace cookie remains stable for its lifetime and provides + * a global identifier that can be assumed unique. If *ctx* is + * NULL, then the helper returns the cookie for the initial + * network namespace. The cookie itself is very similar to that + * of **bpf_get_socket_cookie**\ () helper, but for network + * namespaces instead of sockets. + * Return + * A 8-byte long opaque number. + * + * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) + * Description + * Get a bpf_local_storage from an *inode*. + * + * Logically, it could be thought of as getting the value from + * a *map* with *inode* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this + * helper enforces the key must be an inode and the map must also + * be a **BPF_MAP_TYPE_INODE_STORAGE**. + * + * Underneath, the value is stored locally at *inode* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf_local_storage residing at *inode*. + * + * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf_local_storage will be + * created if one does not exist. *value* can be used + * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf_local_storage. If *value* is + * **NULL**, the new bpf_local_storage will be zero initialized. + * Return + * A bpf_local_storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf_local_storage. + * + * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) + * Description + * Delete a bpf_local_storage from an *inode*. + * Return + * 0 on success. + * + * **-ENOENT** if the bpf_local_storage cannot be found. + * + * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * Description + * seq_printf uses seq_file seq_printf() to print out the format string. + * The *m* represents the seq_file. The *fmt* and *fmt_size* are for + * the format string itself. The *data* and *data_len* are format string + * arguments. The *data* are a u64 array and corresponding format string + * values are stored in the array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* array. + * The *data_len* is the *data* size in term of bytes. + * + * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. + * Reading kernel memory may fail due to either invalid address or + * valid address but requiring a major memory fault. If reading kernel memory + * fails, the string for **%s** will be an empty string, and the ip + * address for **%p{i,I}{4,6}** will be 0. Not returning error to + * bpf program is consistent with what bpf_trace_printk() does for now. + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EBUSY** Percpu memory copy buffer is busy, can try again + * by returning 1 from bpf program. + * * **-EINVAL** Invalid arguments, or invalid/unsupported formats. + * * **-E2BIG** Too many format specifiers. + * * **-EOVERFLOW** Overflow happens, the same object will be tried again. + * + * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * Description + * seq_write uses seq_file seq_write() to write the data. + * The *m* represents the seq_file. The *data* and *len* represent the + * data to write in bytes. + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EOVERFLOW** Overflow happens, the same object will be tried again. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) + * Description + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to struct task_struct. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack=<new value> + * Return + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + * + * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * Description + * Copy *size* bytes from *data* into a ring buffer *ringbuf*. + * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of + * new data availability is sent. + * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of + * new data availability is sent unconditionally. + * Return + * 0, on success; + * < 0, on error. + * + * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) + * Description + * Reserve *size* bytes of payload in a ring buffer *ringbuf*. + * Return + * Valid pointer with *size* bytes of memory available; NULL, + * otherwise. + * + * void bpf_ringbuf_submit(void *data, u64 flags) + * Description + * Submit reserved ring buffer sample, pointed to by *data*. + * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of + * new data availability is sent. + * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of + * new data availability is sent unconditionally. + * Return + * Nothing. Always succeeds. + * + * void bpf_ringbuf_discard(void *data, u64 flags) + * Description + * Discard reserved ring buffer sample, pointed to by *data*. + * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of + * new data availability is sent. + * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of + * new data availability is sent unconditionally. + * Return + * Nothing. Always succeeds. + * + * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) + * Description + * Query various characteristics of provided ring buffer. What + * exactly is queries is determined by *flags*: + * - BPF_RB_AVAIL_DATA - amount of data not yet consumed; + * - BPF_RB_RING_SIZE - the size of ring buffer; + * - BPF_RB_CONS_POS - consumer position (can wrap around); + * - BPF_RB_PROD_POS - producer(s) position (can wrap around); + * Data returned is just a momentary snapshots of actual values + * and could be inaccurate, so this facility should be used to + * power heuristics and for reporting, not to make 100% correct + * calculation. + * Return + * Requested value, or 0, if flags are not recognized. + * + * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) + * Description + * Initialize the timer. + * First 4 bits of *flags* specify clockid. + * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. + * All other bits of *flags* are reserved. + * The verifier will reject the program if *timer* is not from + * the same *map*. + * Return + * 0 on success. + * **-EBUSY** if *timer* is already initialized. + * **-EINVAL** if invalid *flags* are passed. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) + * Description + * Configure the timer to call *callback_fn* static function. + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) + * Description + * Set timer expiration N nanoseconds from the current time. The + * configured callback will be invoked in soft irq context on some cpu + * and will not repeat unless another bpf_timer_start() is made. + * In such case the next invocation can migrate to a different cpu. + * Since struct bpf_timer is a field inside map element the map + * owns the timer. The bpf_timer_set_callback() will increment refcnt + * of BPF program to make sure that callback_fn code stays valid. + * When user space reference to a map reaches zero all timers + * in a map are cancelled and corresponding program's refcnts are + * decremented. This is done to make sure that Ctrl-C of a user + * process doesn't leave any timers running. If map is pinned in + * bpffs the callback_fn can re-arm itself indefinitely. + * bpf_map_update/delete_elem() helpers and user space sys_bpf commands + * cancel and free the timer in the given map element. + * The map can contain timers that invoke callback_fn-s from different + * programs. The same callback_fn can serve different timers from + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier + * or invalid *flags* are passed. + * + * long bpf_timer_cancel(struct bpf_timer *timer) + * Description + * Cancel the timer and wait for callback_fn to finish if it was running. + * Return + * 0 if the timer was not active. + * 1 if the timer was active. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its + * own timer which would have led to a deadlock otherwise. + * + * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) + * Description + * See **bpf_get_cgroup_classid**\ () for the main description. + * This helper differs from **bpf_get_cgroup_classid**\ () in that + * the cgroup v1 net_cls class is retrieved only from the *skb*'s + * associated socket instead of the current process. + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) + * Description + * Redirect the packet to another net device of index *ifindex* + * and fill in L2 addresses from neighboring subsystem. This helper + * is somewhat similar to **bpf_redirect**\ (), except that it + * populates L2 addresses as well, meaning, internally, the helper + * relies on the neighbor lookup for the L2 address of the nexthop. + * + * The helper will perform a FIB lookup based on the skb's + * networking header to get the address of the next hop, unless + * this is supplied by the caller in the *params* argument. The + * *plen* argument indicates the len of *params* and should be set + * to 0 if *params* is NULL. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types. + * Return + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. + * + * long bpf_redirect_peer(u32 ifindex, u64 flags) + * Description + * Redirect the packet to another net device of index *ifindex*. + * This helper is somewhat similar to **bpf_redirect**\ (), except + * that the redirection happens to the *ifindex*' peer device and + * the netns switch takes place from ingress to ingress without + * going through the CPU's backlog queue. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types at the ingress + * hook and for veth device types. The peer device must reside in a + * different network namespace. + * Return + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2862,7 +3533,69 @@ union bpf_attr { FN(sk_storage_get), \ FN(sk_storage_delete), \ FN(send_signal), \ - FN(tcp_gen_syncookie), + FN(tcp_gen_syncookie), \ + FN(skb_output), \ + FN(probe_read_user), \ + FN(probe_read_kernel), \ + FN(probe_read_user_str), \ + FN(probe_read_kernel_str), \ + FN(tcp_send_ack), \ + FN(send_signal_thread), \ + FN(jiffies64), \ + FN(read_branch_records), \ + FN(get_ns_current_pid_tgid), \ + FN(xdp_output), \ + FN(get_netns_cookie), \ + FN(get_current_ancestor_cgroup_id), \ + FN(sk_assign), \ + FN(ktime_get_boot_ns), \ + FN(seq_printf), \ + FN(seq_write), \ + FN(sk_cgroup_id), \ + FN(sk_ancestor_cgroup_id), \ + FN(ringbuf_output), \ + FN(ringbuf_reserve), \ + FN(ringbuf_submit), \ + FN(ringbuf_discard), \ + FN(ringbuf_query), \ + FN(csum_level), \ + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), \ + FN(get_task_stack), \ + FN(load_hdr_opt), \ + FN(store_hdr_opt), \ + FN(reserve_hdr_opt), \ + FN(inode_storage_get), \ + FN(inode_storage_delete), \ + FN(d_path), \ + FN(copy_from_user), \ + FN(snprintf_btf), \ + FN(seq_printf_btf), \ + FN(skb_cgroup_classid), \ + FN(redirect_neigh), \ + FN(per_cpu_ptr), \ + FN(this_cpu_ptr), \ + FN(redirect_peer), \ + FN(task_storage_get), \ + FN(task_storage_delete), \ + FN(get_current_task_btf), \ + FN(bprm_opts_set), \ + FN(ktime_get_coarse_ns), \ + FN(ima_inode_hash), \ + FN(sock_from_file), \ + FN(check_mtu), \ + FN(for_each_map_elem), \ + FN(snprintf), \ + FN(sys_bpf), \ + FN(btf_find_by_name_kind), \ + FN(sys_close), \ + FN(timer_init), \ + FN(timer_set_callback), \ + FN(timer_start), \ + FN(timer_cancel), \ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2877,69 +3610,128 @@ enum bpf_func_id { /* All flags used by eBPF helper functions, placed here. */ /* BPF_FUNC_skb_store_bytes flags. */ -#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) -#define BPF_F_INVALIDATE_HASH (1ULL << 1) +enum { + BPF_F_RECOMPUTE_CSUM = (1ULL << 0), + BPF_F_INVALIDATE_HASH = (1ULL << 1), +}; /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. */ -#define BPF_F_HDR_FIELD_MASK 0xfULL +enum { + BPF_F_HDR_FIELD_MASK = 0xfULL, +}; /* BPF_FUNC_l4_csum_replace flags. */ -#define BPF_F_PSEUDO_HDR (1ULL << 4) -#define BPF_F_MARK_MANGLED_0 (1ULL << 5) -#define BPF_F_MARK_ENFORCE (1ULL << 6) +enum { + BPF_F_PSEUDO_HDR = (1ULL << 4), + BPF_F_MARK_MANGLED_0 = (1ULL << 5), + BPF_F_MARK_ENFORCE = (1ULL << 6), +}; /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ -#define BPF_F_INGRESS (1ULL << 0) +enum { + BPF_F_INGRESS = (1ULL << 0), +}; /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ -#define BPF_F_TUNINFO_IPV6 (1ULL << 0) +enum { + BPF_F_TUNINFO_IPV6 = (1ULL << 0), +}; /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ -#define BPF_F_SKIP_FIELD_MASK 0xffULL -#define BPF_F_USER_STACK (1ULL << 8) +enum { + BPF_F_SKIP_FIELD_MASK = 0xffULL, + BPF_F_USER_STACK = (1ULL << 8), /* flags used by BPF_FUNC_get_stackid only. */ -#define BPF_F_FAST_STACK_CMP (1ULL << 9) -#define BPF_F_REUSE_STACKID (1ULL << 10) + BPF_F_FAST_STACK_CMP = (1ULL << 9), + BPF_F_REUSE_STACKID = (1ULL << 10), /* flags used by BPF_FUNC_get_stack only. */ -#define BPF_F_USER_BUILD_ID (1ULL << 11) + BPF_F_USER_BUILD_ID = (1ULL << 11), +}; /* BPF_FUNC_skb_set_tunnel_key flags. */ -#define BPF_F_ZERO_CSUM_TX (1ULL << 1) -#define BPF_F_DONT_FRAGMENT (1ULL << 2) -#define BPF_F_SEQ_NUMBER (1ULL << 3) +enum { + BPF_F_ZERO_CSUM_TX = (1ULL << 1), + BPF_F_DONT_FRAGMENT = (1ULL << 2), + BPF_F_SEQ_NUMBER = (1ULL << 3), +}; /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ -#define BPF_F_INDEX_MASK 0xffffffffULL -#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK +enum { + BPF_F_INDEX_MASK = 0xffffffffULL, + BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, /* BPF_FUNC_perf_event_output for sk_buff input context. */ -#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) + BPF_F_CTXLEN_MASK = (0xfffffULL << 32), +}; /* Current network namespace */ -#define BPF_F_CURRENT_NETNS (-1L) +enum { + BPF_F_CURRENT_NETNS = (-1L), +}; /* BPF_FUNC_skb_adjust_room flags. */ -#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0) +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), +}; -#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff -#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56 +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1) -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2) -#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3) -#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4) #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ BPF_ADJ_ROOM_ENCAP_L2_MASK) \ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) /* BPF_FUNC_sysctl_get_name flags. */ -#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0) +enum { + BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), +}; -/* BPF_FUNC_sk_storage_get flags */ -#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0) +/* BPF_FUNC_<kernel_obj>_storage_get flags */ +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), + /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility + * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. + */ + BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, +}; + +/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ +enum { + BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), + BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), +}; + +/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and + * BPF_FUNC_bpf_ringbuf_output flags. + */ +enum { + BPF_RB_NO_WAKEUP = (1ULL << 0), + BPF_RB_FORCE_WAKEUP = (1ULL << 1), +}; + +/* BPF_FUNC_bpf_ringbuf_query flags */ +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +/* BPF ring buffer constants */ +enum { + BPF_RINGBUF_BUSY_BIT = (1U << 31), + BPF_RINGBUF_DISCARD_BIT = (1U << 30), + BPF_RINGBUF_HDR_SZ = 8, +}; /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { @@ -3187,6 +3979,8 @@ struct sk_msg_md { __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 size; /* Total size of sk_msg */ + + __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ }; struct sk_reuseport_md { @@ -3263,7 +4057,7 @@ struct bpf_map_info { __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; - __u32 :32; + __u32 btf_vmlinux_value_type_id; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; @@ -3277,6 +4071,29 @@ struct bpf_btf_info { __u32 id; } __attribute__((aligned(8))); +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ + __u32 tp_name_len; /* in/out: tp_name buffer len */ + } raw_tracepoint; + struct { + __u32 attach_type; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + }; +} __attribute__((aligned(8))); + /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on * attach attach type). @@ -3357,13 +4174,14 @@ struct bpf_sock_ops { }; /* Definitions for bpf_sock_ops_cb_flags */ -#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) -#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) -#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) -#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3) -#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently - * supported cb flags - */ +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), + BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), + BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), + BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), +/* Mask of all currently supported cb flags */ + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF, +}; /* List of known BPF sock_ops operators. * New entries can only be added at the end @@ -3442,8 +4260,10 @@ enum { BPF_TCP_MAX_STATES /* Leave at the end! */ }; -#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ -#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ +enum { + TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ + TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ +}; struct bpf_perf_event_value { __u64 counter; @@ -3451,12 +4271,16 @@ struct bpf_perf_event_value { __u64 running; }; -#define BPF_DEVCG_ACC_MKNOD (1ULL << 0) -#define BPF_DEVCG_ACC_READ (1ULL << 1) -#define BPF_DEVCG_ACC_WRITE (1ULL << 2) +enum { + BPF_DEVCG_ACC_MKNOD = (1ULL << 0), + BPF_DEVCG_ACC_READ = (1ULL << 1), + BPF_DEVCG_ACC_WRITE = (1ULL << 2), +}; -#define BPF_DEVCG_DEV_BLOCK (1ULL << 0) -#define BPF_DEVCG_DEV_CHAR (1ULL << 1) +enum { + BPF_DEVCG_DEV_BLOCK = (1ULL << 0), + BPF_DEVCG_DEV_CHAR = (1ULL << 1), +}; struct bpf_cgroup_dev_ctx { /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ @@ -3472,8 +4296,10 @@ struct bpf_raw_tracepoint_args { /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ -#define BPF_FIB_LOOKUP_DIRECT (1U << 0) -#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) +enum { + BPF_FIB_LOOKUP_DIRECT = (1U << 0), + BPF_FIB_LOOKUP_OUTPUT = (1U << 1), +}; enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ @@ -3536,6 +4362,16 @@ struct bpf_fib_lookup { __u8 dmac[6]; /* ETH_ALEN */ }; +struct bpf_redir_neigh { + /* network family for lookup (AF_INET, AF_INET6) */ + __u32 nh_family; + /* network address of nexthop; skips fib lookup to find gateway */ + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; /* in6_addr; network order */ + }; +}; + enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ BPF_FD_TYPE_TRACEPOINT, /* tp name */ @@ -3545,9 +4381,11 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; -#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), +}; struct bpf_flow_keys { __u16 nhoff; @@ -3593,6 +4431,11 @@ struct bpf_spin_lock { __u32 val; }; +struct bpf_timer { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. @@ -3613,4 +4456,18 @@ struct bpf_sockopt { __s32 retval; }; +/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ +struct bpf_sk_lookup { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + + __u32 family; /* Protocol family (AF_INET, AF_INET6) */ + __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ + __u32 remote_ip4; /* Network byte order */ + __u32 remote_ip6[4]; /* Network byte order */ + __u32 remote_port; /* Network byte order */ + __u32 local_ip4; /* Network byte order */ + __u32 local_ip6[4]; /* Network byte order */ + __u32 local_port; /* Host byte order */ +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index c02dec97e1ce..284037e56fa8 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -145,6 +145,12 @@ enum { BTF_VAR_GLOBAL_ALLOCATED, }; +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + /* BTF_KIND_VAR is followed by a single "struct btf_var" to describe * additional information related to the variable such as its linkage. */ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index b65c7ee75bc7..035e59eef05b 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -4,6 +4,11 @@ #include <linux/btrfs.h> #include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/stddef.h> +#else +#include <stddef.h> +#endif /* * This header contains the structure definitions and constants used @@ -650,6 +655,15 @@ struct btrfs_root_item { __le64 reserved[8]; /* for future */ } __attribute__ ((__packed__)); +/* + * Btrfs root item used to be smaller than current size. The old format ends + * at where member generation_v2 is. + */ +static inline __u32 btrfs_legacy_root_item_size(void) +{ + return offsetof(struct btrfs_root_item, generation_v2); +} + /* * this is used for both forward and backward root refs */ diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 240fdb9a60f6..9bbb14d7f7d7 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -274,6 +274,7 @@ struct vfs_ns_cap_data { arbitrary SCSI commands */ /* Allow setting encryption key on loopback filesystem */ /* Allow setting zone reclaim policy */ +/* Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility */ #define CAP_SYS_ADMIN 21 @@ -366,8 +367,45 @@ struct vfs_ns_cap_data { #define CAP_AUDIT_READ 37 +/* + * Allow system performance and observability privileged operations + * using perf_events, i915_perf and other kernel subsystems + */ -#define CAP_LAST_CAP CAP_AUDIT_READ +#define CAP_PERFMON 38 + +/* + * CAP_BPF allows the following BPF operations: + * - Creating all types of BPF maps + * - Advanced verifier features + * - Indirect variable access + * - Bounded loops + * - BPF to BPF function calls + * - Scalar precision tracking + * - Larger complexity limits + * - Dead code elimination + * - And potentially other features + * - Loading BPF Type Format (BTF) data + * - Retrieve xlated and JITed code of BPF programs + * - Use bpf_spin_lock() helper + * + * CAP_PERFMON relaxes the verifier checks further: + * - BPF progs can use of pointer-to-integer conversions + * - speculation attack hardening measures are bypassed + * - bpf_probe_read to read arbitrary kernel memory is allowed + * - bpf_trace_printk to print kernel memory is allowed + * + * CAP_SYS_ADMIN is required to use bpf_probe_write_user. + * + * CAP_SYS_ADMIN is required to iterate system wide loaded + * programs, maps, links, BTFs and convert their IDs to file descriptors. + * + * CAP_PERFMON and CAP_BPF are required to load tracing programs. + * CAP_NET_ADMIN and CAP_BPF are required to load networking programs. + */ +#define CAP_BPF 39 + +#define CAP_LAST_CAP CAP_BPF #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h index 5ed721ad5b19..af2a44c08683 100644 --- a/include/uapi/linux/const.h +++ b/include/uapi/linux/const.h @@ -28,4 +28,9 @@ #define _BITUL(x) (_UL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x)) +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) + +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + #endif /* _UAPI_LINUX_CONST_H */ diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index dbc7092e04b5..7f30393b92c3 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -39,6 +39,12 @@ struct dma_buf_sync { #define DMA_BUF_BASE 'b' #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +/* 32/64bitness of this uapi was botched in android, there's no difference + * between them in actual uapi, they're just different numbers. + */ #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32) +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64) #endif diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 8938b76c4ee3..7857aa413627 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -14,7 +14,7 @@ #ifndef _UAPI_LINUX_ETHTOOL_H #define _UAPI_LINUX_ETHTOOL_H -#include <linux/kernel.h> +#include <linux/const.h> #include <linux/types.h> #include <linux/if_ether.h> diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 39ccfe9311c3..b14f436f4ebd 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -17,7 +17,6 @@ #define FSCRYPT_POLICY_FLAGS_PAD_32 0x03 #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 -#define FSCRYPT_POLICY_FLAGS_VALID 0x07 /* Encryption algorithms */ #define FSCRYPT_MODE_AES_256_XTS 1 @@ -25,7 +24,7 @@ #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 #define FSCRYPT_MODE_ADIANTUM 9 -#define __FSCRYPT_MODE_MAX 9 +/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */ /* * Legacy policy version; ad-hoc KDF and no key verification. @@ -162,7 +161,7 @@ struct fscrypt_get_key_status_arg { #define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32 #define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK #define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY -#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID +#define FS_POLICY_FLAGS_VALID 0x07 /* contains old flags only */ #define FS_ENCRYPTION_MODE_INVALID 0 /* never used */ #define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS #define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */ diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 373cada89815..3ebc07e2e81a 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -870,6 +870,7 @@ struct fuse_notify_retrieve_in { /* Device ioctls: */ #define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t) +#define FUSE_DEV_IOC_RECOVERY _IOR(230, 0, uint32_t) struct fuse_lseek_in { uint64_t fh; diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h index 2622b5a3e616..9a31ea2ad1cf 100644 --- a/include/uapi/linux/icmpv6.h +++ b/include/uapi/linux/icmpv6.h @@ -137,6 +137,7 @@ struct icmp6hdr { #define ICMPV6_HDR_FIELD 0 #define ICMPV6_UNK_NEXTHDR 1 #define ICMPV6_UNK_OPTION 2 +#define ICMPV6_HDR_INCOMP 3 /* * constants for (set|get)sockopt diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h index bc2bcdec377b..769050771423 100644 --- a/include/uapi/linux/if_alg.h +++ b/include/uapi/linux/if_alg.h @@ -24,6 +24,22 @@ struct sockaddr_alg { __u8 salg_name[64]; }; +/* + * Linux v4.12 and later removed the 64-byte limit on salg_name[]; it's now an + * arbitrary-length field. We had to keep the original struct above for source + * compatibility with existing userspace programs, though. Use the new struct + * below if support for very long algorithm names is needed. To do this, + * allocate 'sizeof(struct sockaddr_alg_new) + strlen(algname) + 1' bytes, and + * copy algname (including the null terminator) into salg_name. + */ +struct sockaddr_alg_new { + __u16 salg_family; + __u8 salg_type[14]; + __u32 salg_feat; + __u32 salg_mask; + __u8 salg_name[]; +}; + struct af_alg_iv { __u32 ivlen; __u8 iv[0]; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index a1ff345b3f33..75dffd78363a 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -64,9 +64,11 @@ struct inet_diag_req_raw { enum { INET_DIAG_REQ_NONE, INET_DIAG_REQ_BYTECODE, + INET_DIAG_REQ_SK_BPF_STORAGES, + __INET_DIAG_REQ_MAX, }; -#define INET_DIAG_REQ_MAX INET_DIAG_REQ_BYTECODE +#define INET_DIAG_REQ_MAX (__INET_DIAG_REQ_MAX - 1) /* Bytecode is sequence of 4 byte commands followed by variable arguments. * All the commands identified by "code" are conditional jumps forward: @@ -154,6 +156,7 @@ enum { INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ INET_DIAG_MD5SIG, INET_DIAG_ULP_INFO, + INET_DIAG_SK_BPF_STORAGES, __INET_DIAG_MAX, }; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 85387c76c24f..472cd5bc5567 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -808,7 +808,8 @@ #define SW_LINEIN_INSERT 0x0d /* set = inserted */ #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ -#define SW_MAX 0x0f +#define SW_MACHINE_COVER 0x10 /* set = cover closed */ +#define SW_MAX 0x10 #define SW_CNT (SW_MAX+1) /* diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h index 0ff8f7477847..fadf2db71fe8 100644 --- a/include/uapi/linux/kernel.h +++ b/include/uapi/linux/kernel.h @@ -3,13 +3,6 @@ #define _UAPI_LINUX_KERNEL_H #include <linux/sysinfo.h> - -/* - * 'kernel.h' contains some often-used function prototypes etc - */ -#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) -#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) - -#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#include <linux/const.h> #endif /* _UAPI_LINUX_KERNEL_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 52641d8ca9e8..1b6b8e05868d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -189,9 +189,11 @@ struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; @@ -766,9 +768,10 @@ struct kvm_ppc_resize_hpt { #define KVM_VM_PPC_HV 1 #define KVM_VM_PPC_PR 2 -/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */ -#define KVM_VM_MIPS_TE 0 +/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */ +#define KVM_VM_MIPS_AUTO 0 #define KVM_VM_MIPS_VZ 1 +#define KVM_VM_MIPS_TE 2 #define KVM_S390_SIE_PAGE_OFFSET 1 diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index f9a1be7fc696..ead2e72e5c88 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -21,7 +21,7 @@ #define _UAPI_LINUX_LIGHTNVM_H #ifdef __KERNEL__ -#include <linux/kernel.h> +#include <linux/const.h> #include <linux/ioctl.h> #else /* __KERNEL__ */ #include <stdio.h> diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 903cc2d2750b..84ae605c0643 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -93,6 +93,7 @@ #define BALLOON_KVM_MAGIC 0x13661366 #define ZSMALLOC_MAGIC 0x58295829 #define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */ +#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ #define Z3FOLD_MAGIC 0x33 #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h index 00c08120f3ba..27a39847d55c 100644 --- a/include/uapi/linux/mmc/ioctl.h +++ b/include/uapi/linux/mmc/ioctl.h @@ -3,6 +3,7 @@ #define LINUX_MMC_IOCTL_H #include <linux/types.h> +#include <linux/major.h> struct mmc_ioc_cmd { /* diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index c36177a86516..a1fd6173e2db 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -2,7 +2,7 @@ #ifndef _UAPI__LINUX_MROUTE6_H #define _UAPI__LINUX_MROUTE6_H -#include <linux/kernel.h> +#include <linux/const.h> #include <linux/types.h> #include <linux/sockios.h> #include <linux/in6.h> /* For struct sockaddr_in6. */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index ed8881ad18ed..0a995403172c 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -132,7 +132,7 @@ enum nf_tables_msg_types { * @NFTA_LIST_ELEM: list element (NLA_NESTED) */ enum nft_list_attributes { - NFTA_LIST_UNPEC, + NFTA_LIST_UNSPEC, NFTA_LIST_ELEM, __NFTA_LIST_MAX }; diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h index 1d41810d17e2..a9de5c5f00b6 100644 --- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h +++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -55,6 +55,8 @@ enum ctattr_type { CTA_LABELS, CTA_LABELS_MASK, CTA_SYNPROXY, + CTA_VPCID, + CTA_VMIP, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h index a13137afc429..70af02092d16 100644 --- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h +++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h @@ -5,7 +5,7 @@ #define NFCT_HELPER_STATUS_DISABLED 0 #define NFCT_HELPER_STATUS_ENABLED 1 -enum nfnl_acct_msg_types { +enum nfnl_cthelper_msg_types { NFNL_MSG_CTHELPER_NEW, NFNL_MSG_CTHELPER_GET, NFNL_MSG_CTHELPER_DEL, diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h index a8283f7dbc51..b8c6bb233ac1 100644 --- a/include/uapi/linux/netfilter/x_tables.h +++ b/include/uapi/linux/netfilter/x_tables.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_X_TABLES_H #define _UAPI_X_TABLES_H -#include <linux/kernel.h> +#include <linux/const.h> #include <linux/types.h> #define XT_FUNCTION_MAXNAMELEN 30 diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 21db87138514..1f4d4aeb9cf9 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -2,7 +2,7 @@ #ifndef _UAPI__LINUX_NETLINK_H #define _UAPI__LINUX_NETLINK_H -#include <linux/kernel.h> +#include <linux/const.h> #include <linux/socket.h> /* for __kernel_sa_family_t */ #include <linux/types.h> diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index 8572930cf5b0..54a78529c8b3 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -136,6 +136,8 @@ #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 #define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 + +#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004 /* * Since the validity of these bits depends on whether * they're set in the argument or response, have separate @@ -143,6 +145,7 @@ */ #define EXCHGID4_FLAG_MASK_A 0x40070103 #define EXCHGID4_FLAG_MASK_R 0x80070103 +#define EXCHGID4_2_FLAG_MASK_R 0x80070107 #define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index bb7b271397a6..ceccd980ffcf 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1131,7 +1131,7 @@ union perf_mem_data_src { #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ -#define PERF_MEM_SNOOPX_SHIFT 37 +#define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 5011259b8f67..edbbf4bfdd9e 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -1160,8 +1160,8 @@ enum { * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL] */ -#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0) -#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1) +#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0) +#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1) enum { TCA_TAPRIO_ATTR_UNSPEC, diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index a71b6e3b03eb..83ee45fa634b 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -81,7 +81,8 @@ struct seccomp_metadata { struct ptrace_syscall_info { __u8 op; /* PTRACE_SYSCALL_INFO_* */ - __u32 arch __attribute__((__aligned__(sizeof(__u32)))); + __u8 pad[3]; + __u32 arch; __u64 instruction_pointer; __u64 stack_pointer; union { diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 90734aa5aa36..b5f901af79f0 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -93,5 +93,6 @@ struct seccomp_notif_resp { #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ struct seccomp_notif_resp) -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) + #endif /* _UAPI_LINUX_SECCOMP_H */ diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h index e5925009a652..5f74a5f6091d 100644 --- a/include/uapi/linux/sock_diag.h +++ b/include/uapi/linux/sock_diag.h @@ -36,4 +36,30 @@ enum sknetlink_groups { }; #define SKNLGRP_MAX (__SKNLGRP_MAX - 1) +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD, + __SK_DIAG_BPF_STORAGE_REQ_MAX, +}; + +#define SK_DIAG_BPF_STORAGE_REQ_MAX (__SK_DIAG_BPF_STORAGE_REQ_MAX - 1) + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE, + SK_DIAG_BPF_STORAGE, + __SK_DIAG_BPF_STORAGE_REP_MAX, +}; + +#define SK_DIAB_BPF_STORAGE_REP_MAX (__SK_DIAG_BPF_STORAGE_REP_MAX - 1) + +enum { + SK_DIAG_BPF_STORAGE_NONE, + SK_DIAG_BPF_STORAGE_PAD, + SK_DIAG_BPF_STORAGE_MAP_ID, + SK_DIAG_BPF_STORAGE_MAP_VALUE, + __SK_DIAG_BPF_STORAGE_MAX, +}; + +#define SK_DIAG_BPF_STORAGE_MAX (__SK_DIAG_BPF_STORAGE_MAX - 1) + #endif /* _UAPI__SOCK_DIAG_H__ */ diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index a95349ec195c..673ed65d334b 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -23,7 +23,7 @@ #ifndef _UAPI_LINUX_SYSCTL_H #define _UAPI_LINUX_SYSCTL_H -#include <linux/kernel.h> +#include <linux/const.h> #include <linux/types.h> #include <linux/compiler.h> @@ -429,6 +429,7 @@ enum NET_TCP_TW_IGNORE_SYN_TSVAL_ZERO=126, NET_IPV4_TCP_RTO_MIN=127, NET_IPV4_TCP_RTO_MAX=128, + NET_IPV4_TCP_TW_TIMEOUT=129, }; enum { diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 9066f2e16f7b..a3919d181afd 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -132,8 +132,8 @@ enum { #define TCP_FULLNAT_REAL 10000 struct tcp_fullnat_real_opt { - u32 real_ip; - u16 real_port; + __u32 real_ip; + __u16 real_port; }; #define TCP_REPAIR_ON 1 diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h index 900a32e63424..6a3ac496a56c 100644 --- a/include/uapi/linux/tty_flags.h +++ b/include/uapi/linux/tty_flags.h @@ -39,7 +39,7 @@ * WARNING: These flags are no longer used and have been superceded by the * TTY_PORT_ flags in the iflags field (and not userspace-visible) */ -#ifndef _KERNEL_ +#ifndef __KERNEL__ #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ #define ASYNCB_SUSPENDED 30 /* Serial port is suspended */ #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ @@ -81,7 +81,7 @@ #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) -#ifndef _KERNEL_ +#ifndef __KERNEL__ /* These flags are no longer used (and were always masked from userspace) */ #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 2b623f36af6b..5633c804683e 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -364,6 +364,9 @@ struct usb_config_descriptor { /*-------------------------------------------------------------------------*/ +/* USB String descriptors can contain at most 126 characters. */ +#define USB_MAX_STRING_LEN 126 + /* USB_DT_STRING: String descriptor */ struct usb_string_descriptor { __u8 bLength; diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index d854cb19c42c..bfdae12cdacf 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor { __u8 bControlSize; __u8 bmControls[2]; __u8 iProcessing; + __u8 bmVideoStandards; } __attribute__((__packed__)); -#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) +#define UVC_DT_PROCESSING_UNIT_SIZE(n) (10+(n)) /* 3.7.2.6. Extension Unit Descriptor */ struct uvc_extension_unit_descriptor { diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index 9cec58a6a5ea..f79d7abe27db 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20); /* IOCTL to perform a VMM Device request larger then 1KB. */ -#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) +#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3) /** VBG_IOCTL_HGCM_CONNECT data structure. */ @@ -198,7 +198,7 @@ struct vbg_ioctl_log { } u; }; -#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) +#define VBG_IOCTL_LOG(s) _IO('V', 9) /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 9e843a147ead..cabc93118f9c 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -748,6 +748,21 @@ struct vfio_iommu_type1_info_cap_iova_range { struct vfio_iova_range iova_ranges[]; }; +/* + * The DMA available capability allows to report the current number of + * simultaneously outstanding DMA mappings that are allowed. + * + * The structure below defines version 1 of this capability. + * + * avail: specifies the current number of outstanding DMA mappings allowed. + */ +#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3 + +struct vfio_iommu_type1_info_dma_avail { + struct vfio_info_cap_header header; + __u32 avail; +}; + #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) /** diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 530638dffd93..3210b3c82a4a 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -371,9 +371,9 @@ enum v4l2_hsv_encoding { enum v4l2_quantization { /* - * The default for R'G'B' quantization is always full range, except - * for the BT2020 colorspace. For Y'CbCr the quantization is always - * limited range, except for COLORSPACE_JPEG: this is full range. + * The default for R'G'B' quantization is always full range. + * For Y'CbCr the quantization is always limited range, except + * for COLORSPACE_JPEG: this is full range. */ V4L2_QUANTIZATION_DEFAULT = 0, V4L2_QUANTIZATION_FULL_RANGE = 1, @@ -382,14 +382,13 @@ enum v4l2_quantization { /* * Determine how QUANTIZATION_DEFAULT should map to a proper quantization. - * This depends on whether the image is RGB or not, the colorspace and the - * Y'CbCr encoding. + * This depends on whether the image is RGB or not, the colorspace. + * The Y'CbCr encoding is not used anymore, but is still there for backwards + * compatibility. */ #define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \ - (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \ - V4L2_QUANTIZATION_LIM_RANGE : \ - (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \ - V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)) + (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \ + V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE) /* * Deprecated names for opRGB colorspace (IEC 61966-2-5) diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 86eca3208b6b..24f3371ad826 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -74,6 +74,12 @@ #include <linux/socket.h> /* for "struct sockaddr" et al */ #include <linux/if.h> /* for IFNAMSIZ and co... */ +#ifdef __KERNEL__ +# include <linux/stddef.h> /* for offsetof */ +#else +# include <stddef.h> /* for offsetof */ +#endif + /***************************** VERSION *****************************/ /* * This constant is used to know the availability of the wireless @@ -1090,8 +1096,7 @@ struct iw_event { /* iw_point events are special. First, the payload (extra data) come at * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second, * we omit the pointer, so start at an offset. */ -#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \ - (char *) NULL) +#define IW_EV_POINT_OFF offsetof(struct iw_point, length) #define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ IW_EV_POINT_OFF) diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 5f3b9fec7b5f..ff7cfdc6cb44 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -304,7 +304,7 @@ enum xfrm_attr_type_t { XFRMA_PROTO, /* __u8 */ XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ XFRMA_PAD, - XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ + XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */ XFRMA_SET_MARK, /* __u32 */ XFRMA_SET_MARK_MASK, /* __u32 */ XFRMA_IF_ID, /* __u32 */ diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index 9eee32f5e407..a93c0decfdd5 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -18,6 +18,8 @@ */ #define SKL_CONTROL_TYPE_BYTE_TLV 0x100 #define SKL_CONTROL_TYPE_MIC_SELECT 0x102 +#define SKL_CONTROL_TYPE_MULTI_IO_SELECT 0x103 +#define SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC 0x104 #define HDA_SST_CFG_MAX 900 /* size of copier cfg*/ #define MAX_IN_QUEUE 8 diff --git a/include/xen/events.h b/include/xen/events.h index c0e6a0598397..31952308a6d5 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -14,11 +14,16 @@ unsigned xen_evtchn_nr_channels(void); -int bind_evtchn_to_irq(unsigned int evtchn); -int bind_evtchn_to_irqhandler(unsigned int evtchn, +int bind_evtchn_to_irq(evtchn_port_t evtchn); +int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn); +int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); +int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, + irq_handler_t handler, + unsigned long irqflags, const char *devname, + void *dev_id); int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, @@ -31,13 +36,21 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, - unsigned int remote_port); + evtchn_port_t remote_port); +int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, + evtchn_port_t remote_port); int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, - unsigned int remote_port, + evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); +int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, + evtchn_port_t remote_port, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, + void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. @@ -46,6 +59,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); +/* + * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi + * functions above. + */ +void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags); +/* Signal an event was spurious, i.e. there was no action resulting from it. */ +#define XEN_EOI_FLAG_SPURIOUS 0x00000001 + #define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX #define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT #define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 9bc5bc07d4d3..a9978350b45b 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, map->flags = flags; map->ref = ref; map->dom = domid; + map->status = 1; /* arbitrary positive value */ } static inline void diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 869c816d5f8c..14d47ed4114f 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -59,6 +59,15 @@ struct xenbus_watch /* Path being watched. */ const char *node; + unsigned int nr_pending; + + /* + * Called just before enqueing new event while a spinlock is held. + * The event will be discarded if this callback returns false. + */ + bool (*will_handle)(struct xenbus_watch *, + const char *path, const char *token); + /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char *path, const char *token); @@ -178,8 +187,6 @@ void xs_suspend_cancel(void); struct work_struct; -void xenbus_probe(struct work_struct *); - #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ @@ -192,10 +199,14 @@ void xenbus_probe(struct work_struct *); int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, + bool (*will_handle)(struct xenbus_watch *, + const char *, const char *), void (*callback)(struct xenbus_watch *, const char *, const char *)); -__printf(4, 5) +__printf(5, 6) int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, + bool (*will_handle)(struct xenbus_watch *, + const char *, const char *), void (*callback)(struct xenbus_watch *, const char *, const char *), const char *pathfmt, ...); diff --git a/init/Kconfig b/init/Kconfig index 0bffc8fdbf3d..0babb0a34952 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -36,22 +36,6 @@ config TOOLS_SUPPORT_RELR config CC_HAS_ASM_INLINE def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null) -config CC_HAS_WARN_MAYBE_UNINITIALIZED - def_bool $(cc-option,-Wmaybe-uninitialized) - help - GCC >= 4.7 supports this option. - -config CC_DISABLE_WARN_MAYBE_UNINITIALIZED - bool - depends on CC_HAS_WARN_MAYBE_UNINITIALIZED - default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9 - help - GCC's -Wmaybe-uninitialized is not reliable by definition. - Lots of false positive warnings are produced in some cases. - - If this option is enabled, -Wno-maybe-uninitialzed is passed - to the compiler to suppress maybe-uninitialized warnings. - config CONSTRUCTORS bool depends on !UML @@ -92,8 +76,7 @@ config INIT_ENV_ARG_LIMIT config COMPILE_TEST bool "Compile also drivers which will not load" - depends on !UML - default n + depends on HAS_IOMEM help Some drivers can be compiled on a different platform than they are intended to be run on. Despite they cannot be loaded there (or even @@ -610,7 +593,8 @@ config IKHEADERS config LOG_BUF_SHIFT int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" - range 12 25 + range 12 25 if !H8300 + range 12 19 if H8300 default 17 depends on PRINTK help @@ -1226,14 +1210,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" depends on ARC - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. config CC_OPTIMIZE_FOR_SIZE bool "Optimize for size (-Os)" - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help Choosing this option will pass "-Os" to your compiler resulting in a smaller kernel. @@ -1608,6 +1590,18 @@ config KALLSYMS_BASE_RELATIVE # end of the "standard kernel features (expert users)" menu # syscall, maps, verifier + +config BPF_LSM + bool "LSM Instrumentation with BPF" + depends on BPF_SYSCALL + depends on SECURITY + depends on BPF_JIT + help + Enables instrumentation of the security hooks with eBPF programs for + implementing dynamic MAC and Audit Policies. + + If you are unsure how to answer this question, answer N. + config BPF_SYSCALL bool "Enable bpf() system call" select BPF diff --git a/init/init_task.c b/init/init_task.c index 9e5cbe5eab7b..5d8359c44564 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -26,6 +26,7 @@ static struct signal_struct init_signals = { .multiprocess = HLIST_HEAD_INIT, .rlim = INIT_RLIMITS, .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex), + .exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock), #ifdef CONFIG_POSIX_TIMERS .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers), .cputimer = { @@ -170,7 +171,8 @@ struct task_struct init_task .lockdep_recursion = 0, #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .ret_stack = NULL, + .ret_stack = NULL, + .tracing_graph_pause = ATOMIC_INIT(0), #endif #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION) .trace_recursion = 0, diff --git a/init/initramfs.c b/init/initramfs.c index c47dad0884f7..00a32799a38b 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -527,14 +527,14 @@ extern unsigned long __initramfs_size; #include <linux/initrd.h> #include <linux/kexec.h> -void __weak free_initrd_mem(unsigned long start, unsigned long end) +void __weak __init free_initrd_mem(unsigned long start, unsigned long end) { free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, "initrd"); } #ifdef CONFIG_KEXEC_CORE -static bool kexec_free_initrd(void) +static bool __init kexec_free_initrd(void) { unsigned long crashk_start = (unsigned long)__va(crashk_res.start); unsigned long crashk_end = (unsigned long)__va(crashk_res.end); diff --git a/init/main.c b/init/main.c index c0206c507eba..2677324e7300 100644 --- a/init/main.c +++ b/init/main.c @@ -32,6 +32,7 @@ #include <linux/nmi.h> #include <linux/percpu.h> #include <linux/kmod.h> +#include <linux/kprobes.h> #include <linux/vmalloc.h> #include <linux/kernel_stat.h> #include <linux/start_kernel.h> @@ -45,6 +46,7 @@ #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/cgroup.h> +#include <linux/mbuf.h> #include <linux/efi.h> #include <linux/tick.h> #include <linux/sched/isolation.h> @@ -622,11 +624,13 @@ asmlinkage __visible void __init start_kernel(void) * kmem_cache_init() */ setup_log_buf(0); + setup_mbuf(); vfs_caches_init_early(); sort_main_extable(); trap_init(); mm_init(); + mbuf_bmap_init(); ftrace_init(); /* trace_printk can be enabled here */ @@ -694,7 +698,6 @@ asmlinkage __visible void __init start_kernel(void) boot_init_stack_canary(); time_init(); - printk_safe_init(); perf_event_init(); profile_init(); call_function_init(); @@ -783,6 +786,8 @@ asmlinkage __visible void __init start_kernel(void) /* Do the rest non-__init'ed, we're now alive */ arch_call_rest_init(); + + prevent_tail_call_optimization(); } /* Call all constructor functions linked into the kernel. */ @@ -1110,6 +1115,7 @@ static int __ref kernel_init(void *unused) kernel_init_freeable(); /* need to finish all async __init code before freeing the memory */ async_synchronize_full(); + kprobe_free_init_mem(); ftrace_free_init_mem(); free_initmem(); mark_readonly(); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 3d920ff15c80..2ea0c08188e6 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -82,6 +82,7 @@ struct mqueue_inode_info { struct sigevent notify; struct pid *notify_owner; + u32 notify_self_exec_id; struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; @@ -709,28 +710,44 @@ static void __do_notify(struct mqueue_inode_info *info) * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { - struct kernel_siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; - case SIGEV_SIGNAL: - /* sends signal */ + case SIGEV_SIGNAL: { + struct kernel_siginfo sig_i; + struct task_struct *task; + + /* do_mq_notify() accepts sigev_signo == 0, why?? */ + if (!info->notify.sigev_signo) + break; clear_siginfo(&sig_i); sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; - /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); + /* map current pid/uid into info->owner's namespaces */ sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); - sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); + sig_i.si_uid = from_kuid_munged(info->notify_user_ns, + current_uid()); + /* + * We can't use kill_pid_info(), this signal should + * bypass check_kill_permission(). It is from kernel + * but si_fromuser() can't know this. + * We do check the self_exec_id, to avoid sending + * signals to programs that don't expect them. + */ + task = pid_task(info->notify_owner, PIDTYPE_TGID); + if (task && task->self_exec_id == + info->notify_self_exec_id) { + do_send_sig_info(info->notify.sigev_signo, + &sig_i, task, PIDTYPE_TGID); + } rcu_read_unlock(); - - kill_pid_info(info->notify.sigev_signo, - &sig_i, info->notify_owner); break; + } case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); @@ -1315,6 +1332,7 @@ retry: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; + info->notify_self_exec_id = current->self_exec_id; break; } diff --git a/ipc/util.c b/ipc/util.c index d126d156efc6..1821b6386d3b 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -764,21 +764,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, total++; } + ipc = NULL; if (total >= ids->in_use) - return NULL; + goto out; for (; pos < ipc_mni; pos++) { ipc = idr_find(&ids->ipcs_idr, pos); if (ipc != NULL) { - *new_pos = pos + 1; rcu_read_lock(); ipc_lock_object(ipc); - return ipc; + break; } } - - /* Out of range - return NULL to terminate iteration */ - return NULL; +out: + *new_pos = pos + 1; + return ipc; } static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) diff --git a/kernel/.gitignore b/kernel/.gitignore index 34d1e77ee9df..0f0dba6c1eed 100644 --- a/kernel/.gitignore +++ b/kernel/.gitignore @@ -1,4 +1,5 @@ # +/config_data # Generated files # kheaders.md5 diff --git a/kernel/Makefile b/kernel/Makefile index fa543a44810e..cd6db723bc1e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -95,7 +95,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o obj-$(CONFIG_TRACEPOINTS) += tracepoint.o obj-$(CONFIG_LATENCYTOP) += latencytop.o -obj-$(CONFIG_ELFCORE) += elfcore.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_TRACE_CLOCK) += trace/ @@ -117,20 +116,27 @@ obj-$(CONFIG_TORTURE_TEST) += torture.o obj-$(CONFIG_HAS_IOMEM) += iomem.o obj-$(CONFIG_RSEQ) += rseq.o +obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o + obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o KASAN_SANITIZE_stackleak.o := n KCOV_INSTRUMENT_stackleak.o := n $(obj)/configs.o: $(obj)/config_data.gz -targets += config_data.gz -$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE +targets += config_data config_data.gz +$(obj)/config_data.gz: $(obj)/config_data FORCE $(call if_changed,gzip) +filechk_cat = cat $< + +$(obj)/config_data: $(KCONFIG_CONFIG) FORCE + $(call filechk,cat) + $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz - cmd_genikh = $(BASH) $(srctree)/kernel/gen_kheaders.sh $@ + cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@ $(obj)/kheaders_data.tar.xz: FORCE $(call cmd,genikh) diff --git a/kernel/audit.c b/kernel/audit.c index dfc45063cb56..05ae208ad442 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -879,7 +879,7 @@ main_queue: return 0; } -int audit_send_list(void *_dest) +int audit_send_list_thread(void *_dest) { struct audit_netlink_list *dest = _dest; struct sk_buff *skb; @@ -923,19 +923,30 @@ out_kfree_skb: return NULL; } +static void audit_free_reply(struct audit_reply *reply) +{ + if (!reply) + return; + + if (reply->skb) + kfree_skb(reply->skb); + if (reply->net) + put_net(reply->net); + kfree(reply); +} + static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; - struct sock *sk = audit_get_sk(reply->net); audit_ctl_lock(); audit_ctl_unlock(); /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ - netlink_unicast(sk, reply->skb, reply->portid, 0); - put_net(reply->net); - kfree(reply); + netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0); + reply->skb = NULL; + audit_free_reply(reply); return 0; } @@ -949,35 +960,32 @@ static int audit_send_reply_thread(void *arg) * @payload: payload data * @size: payload size * - * Allocates an skb, builds the netlink message, and sends it to the port id. - * No failure notifications. + * Allocates a skb, builds the netlink message, and sends it to the port id. */ static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, int multi, const void *payload, int size) { - struct net *net = sock_net(NETLINK_CB(request_skb).sk); - struct sk_buff *skb; struct task_struct *tsk; - struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), - GFP_KERNEL); + struct audit_reply *reply; + reply = kzalloc(sizeof(*reply), GFP_KERNEL); if (!reply) return; - skb = audit_make_reply(seq, type, done, multi, payload, size); - if (!skb) - goto out; - - reply->net = get_net(net); + reply->skb = audit_make_reply(seq, type, done, multi, payload, size); + if (!reply->skb) + goto err; + reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); reply->portid = NETLINK_CB(request_skb).portid; - reply->skb = skb; tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); - if (!IS_ERR(tsk)) - return; - kfree_skb(skb); -out: - kfree(reply); + if (IS_ERR(tsk)) + goto err; + + return; + +err: + audit_free_reply(reply); } /* @@ -1325,6 +1333,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: if (!audit_enabled && msg_type != AUDIT_USER_AVC) return 0; + /* exit early if there isn't at least one character to print */ + if (data_len < 2) + return -EINVAL; err = audit_filter(msg_type, AUDIT_FILTER_USER); if (err == 1) { /* match or error */ diff --git a/kernel/audit.h b/kernel/audit.h index 6fb7160412d4..ddc22878433d 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -229,7 +229,7 @@ struct audit_netlink_list { struct sk_buff_head q; }; -int audit_send_list(void *_dest); +int audit_send_list_thread(void *_dest); extern int selinux_audit_rule_update(void); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4508d5e0cf69..8a8fd732ff6d 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -302,8 +302,6 @@ static void audit_update_watch(struct audit_parent *parent, if (oentry->rule.exe) audit_remove_mark(oentry->rule.exe); - audit_watch_log_rule_change(r, owatch, "updated_rules"); - call_rcu(&oentry->rcu, audit_free_rule_rcu); } diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 026e34da4ace..a10e2997aa6c 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -1161,11 +1161,8 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz) */ int audit_list_rules_send(struct sk_buff *request_skb, int seq) { - u32 portid = NETLINK_CB(request_skb).portid; - struct net *net = sock_net(NETLINK_CB(request_skb).sk); struct task_struct *tsk; struct audit_netlink_list *dest; - int err = 0; /* We can't just spew out the rules here because we might fill * the available socket buffer space and deadlock waiting for @@ -1173,25 +1170,26 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ - dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); + dest = kmalloc(sizeof(*dest), GFP_KERNEL); if (!dest) return -ENOMEM; - dest->net = get_net(net); - dest->portid = portid; + dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); + dest->portid = NETLINK_CB(request_skb).portid; skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); audit_list_rules(seq, &dest->q); mutex_unlock(&audit_filter_mutex); - tsk = kthread_run(audit_send_list, dest, "audit_send_list"); + tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); if (IS_ERR(tsk)) { skb_queue_purge(&dest->q); + put_net(dest->net); kfree(dest); - err = PTR_ERR(tsk); + return PTR_ERR(tsk); } - return err; + return 0; } int audit_comparator(u32 left, u32 op, u32 right) diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index e1d9adb212f9..5ef50f4b8adf 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1,15 +1,23 @@ # SPDX-License-Identifier: GPL-2.0 obj-y := core.o -CFLAGS_core.o += $(call cc-disable-warning, override-init) +ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y) +# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details +cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse +endif +CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy) -obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o +obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o -obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o +obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o +obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o +obj-$(CONFIG_BPF_JIT) += trampoline.o obj-$(CONFIG_BPF_SYSCALL) += btf.o +obj-$(CONFIG_BPF_JIT) += dispatcher.o ifeq ($(CONFIG_NET),y) obj-$(CONFIG_BPF_SYSCALL) += devmap.o obj-$(CONFIG_BPF_SYSCALL) += cpumap.o +obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o ifeq ($(CONFIG_XDP_SOCKETS),y) obj-$(CONFIG_BPF_SYSCALL) += xskmap.o endif @@ -21,7 +29,12 @@ endif obj-$(CONFIG_CGROUP_BPF) += cgroup.o ifeq ($(CONFIG_INET),y) obj-$(CONFIG_BPF_SYSCALL) += reuseport_array.o +obj-$(CONFIG_BPF_SYSCALL) += net_namespace.o endif ifeq ($(CONFIG_SYSFS),y) obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o endif +ifeq ($(CONFIG_BPF_JIT),y) +obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o +obj-${CONFIG_BPF_LSM} += bpf_lsm.o +endif diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 1c65ce0098a9..e80220d8f764 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -14,7 +14,8 @@ #include "map_in_map.h" #define ARRAY_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) + (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ + BPF_F_INNER_MAP) static void bpf_array_free_percpu(struct bpf_array *array) { @@ -59,6 +60,10 @@ int array_map_alloc_check(union bpf_attr *attr) (percpu && numa_node != NUMA_NO_NODE)) return -EINVAL; + if (attr->map_type != BPF_MAP_TYPE_ARRAY && + attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) + return -EINVAL; + if (attr->value_size > KMALLOC_MAX_SIZE) /* if value_size is bigger, the user space won't be able to * access the elements. @@ -73,7 +78,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; int ret, numa_node = bpf_map_attr_numa_node(attr); u32 elem_size, index_mask, max_entries; - bool unpriv = !capable(CAP_SYS_ADMIN); + bool bypass_spec_v1 = bpf_bypass_spec_v1(); u64 cost, array_size, mask64; struct bpf_map_memory mem; struct bpf_array *array; @@ -91,7 +96,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) mask64 -= 1; index_mask = mask64; - if (unpriv) { + if (!bypass_spec_v1) { /* round up array size to nearest power of 2, * since cpu will speculate within index_mask limits */ @@ -102,10 +107,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) } array_size = sizeof(*array); - if (percpu) + if (percpu) { array_size += (u64) max_entries * sizeof(void *); - else - array_size += (u64) max_entries * elem_size; + } else { + /* rely on vmalloc() to return page-aligned memory and + * ensure array->value is exactly page-aligned + */ + if (attr->map_flags & BPF_F_MMAPABLE) { + array_size = PAGE_ALIGN(array_size); + array_size += PAGE_ALIGN((u64) max_entries * elem_size); + } else { + array_size += (u64) max_entries * elem_size; + } + } /* make sure there is no u32 overflow later in round_up() */ cost = array_size; @@ -117,13 +131,26 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) return ERR_PTR(ret); /* allocate all map elements and zero-initialize them */ - array = bpf_map_area_alloc(array_size, numa_node); + if (attr->map_flags & BPF_F_MMAPABLE) { + void *data; + + /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ + data = bpf_map_area_mmapable_alloc(array_size, numa_node); + if (!data) { + bpf_map_charge_finish(&mem); + return ERR_PTR(-ENOMEM); + } + array = data + PAGE_ALIGN(sizeof(struct bpf_array)) + - offsetof(struct bpf_array, value); + } else { + array = bpf_map_area_alloc(array_size, numa_node); + } if (!array) { bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); } array->index_mask = index_mask; - array->map.unpriv_array = unpriv; + array->map.bypass_spec_v1 = bypass_spec_v1; /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); @@ -182,7 +209,7 @@ static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, } /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ -static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) +static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_insn *insn = insn_buf; @@ -191,9 +218,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) const int map_ptr = BPF_REG_1; const int index = BPF_REG_2; + if (map->map_flags & BPF_F_INNER_MAP) + return -EOPNOTSUPP; + *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); - if (map->unpriv_array) { + if (!map->bypass_spec_v1) { *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); } else { @@ -268,6 +298,12 @@ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key return 0; } +static void check_and_free_timer_in_array(struct bpf_array *arr, void *val) +{ + if (unlikely(map_value_has_timer(&arr->map))) + bpf_timer_cancel_and_free(val + arr->map.timer_off); +} + /* Called from syscall or from eBPF program */ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) @@ -302,6 +338,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, copy_map_value_locked(map, val, value, false); else copy_map_value(map, val, value); + check_and_free_timer_in_array(array, val); } return 0; } @@ -350,6 +387,24 @@ static int array_map_delete_elem(struct bpf_map *map, void *key) return -EINVAL; } +static void *array_map_vmalloc_addr(struct bpf_array *array) +{ + return (void *)round_down((unsigned long)array, PAGE_SIZE); +} + +static void array_map_free_timers(struct bpf_map *map) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + int i; + + if (likely(!map_value_has_timer(map))) + return; + + for (i = 0; i < array->map.max_entries; i++) + bpf_timer_cancel_and_free(array->value + array->elem_size * i + + map->timer_off); +} + /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ static void array_map_free(struct bpf_map *map) { @@ -365,7 +420,10 @@ static void array_map_free(struct bpf_map *map) if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) bpf_array_free_percpu(array); - bpf_map_area_free(array); + if (array->map.map_flags & BPF_F_MMAPABLE) + bpf_map_area_free(array_map_vmalloc_addr(array)); + else + bpf_map_area_free(array); } static void array_map_seq_show_elem(struct bpf_map *map, void *key, @@ -444,21 +502,214 @@ static int array_map_check_btf(const struct bpf_map *map, return 0; } +int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; + + if (!(map->map_flags & BPF_F_MMAPABLE)) + return -EINVAL; + + if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > + PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) + return -EINVAL; + + return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), + vma->vm_pgoff + pgoff); +} + +static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, + void *callback_ctx, u64 flags) +{ + u32 i, key, num_elems = 0; + struct bpf_array *array; + bool is_percpu; + u64 ret = 0; + void *val; + + if (flags != 0) + return -EINVAL; + + is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; + array = container_of(map, struct bpf_array, map); + for (i = 0; i < map->max_entries; i++) { + if (is_percpu) + val = this_cpu_ptr(array->pptrs[i]); + else + val = array->value + array->elem_size * i; + num_elems++; + key = i; + ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, + (u64)(long)&key, (u64)(long)val, + (u64)(long)callback_ctx, 0); + /* return value: 0 - continue, 1 - stop and return */ + if (ret) + break; + } + + return num_elems; +} + +static int array_map_btf_id; +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_array_map_info *info = seq->private; + struct bpf_map *map = info->map; + struct bpf_array *array; + u32 index; + + if (info->index >= map->max_entries) + return NULL; + + if (*pos == 0) + ++*pos; + array = container_of(map, struct bpf_array, map); + index = info->index & array->index_mask; + if (info->percpu_value_buf) + return array->pptrs[index]; + return array->value + array->elem_size * index; +} + +static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_array_map_info *info = seq->private; + struct bpf_map *map = info->map; + struct bpf_array *array; + u32 index; + + ++*pos; + ++info->index; + if (info->index >= map->max_entries) + return NULL; + + array = container_of(map, struct bpf_array, map); + index = info->index & array->index_mask; + if (info->percpu_value_buf) + return array->pptrs[index]; + return array->value + array->elem_size * index; +} + +static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_seq_array_map_info *info = seq->private; + struct bpf_iter__bpf_map_elem ctx = {}; + struct bpf_map *map = info->map; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int off = 0, cpu = 0; + void __percpu **pptr; + u32 size; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, v == NULL); + if (!prog) + return 0; + + ctx.meta = &meta; + ctx.map = info->map; + if (v) { + ctx.key = &info->index; + + if (!info->percpu_value_buf) { + ctx.value = v; + } else { + pptr = v; + size = round_up(map->value_size, 8); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(info->percpu_value_buf + off, + per_cpu_ptr(pptr, cpu), + size); + off += size; + } + ctx.value = info->percpu_value_buf; + } + } + + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_array_map_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_array_map_seq_show(seq, v); +} + +static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) +{ + if (!v) + (void)__bpf_array_map_seq_show(seq, NULL); +} + +static int bpf_iter_init_array_map(void *priv_data, + struct bpf_iter_aux_info *aux) +{ + struct bpf_iter_seq_array_map_info *seq_info = priv_data; + struct bpf_map *map = aux->map; + void *value_buf; + u32 buf_size; + + if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { + buf_size = round_up(map->value_size, 8) * num_possible_cpus(); + value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); + if (!value_buf) + return -ENOMEM; + + seq_info->percpu_value_buf = value_buf; + } + + seq_info->map = map; + return 0; +} + +static void bpf_iter_fini_array_map(void *priv_data) +{ + struct bpf_iter_seq_array_map_info *seq_info = priv_data; + + kfree(seq_info->percpu_value_buf); +} + +static const struct seq_operations bpf_array_map_seq_ops = { + .start = bpf_array_map_seq_start, + .next = bpf_array_map_seq_next, + .stop = bpf_array_map_seq_stop, + .show = bpf_array_map_seq_show, +}; + +static const struct bpf_iter_seq_info iter_seq_info = { + .seq_ops = &bpf_array_map_seq_ops, + .init_seq_private = bpf_iter_init_array_map, + .fini_seq_private = bpf_iter_fini_array_map, + .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), +}; + const struct bpf_map_ops array_map_ops = { .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, .map_free = array_map_free, .map_get_next_key = array_map_get_next_key, + .map_release_uref = array_map_free_timers, .map_lookup_elem = array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, .map_gen_lookup = array_map_gen_lookup, .map_direct_value_addr = array_map_direct_value_addr, .map_direct_value_meta = array_map_direct_value_meta, + .map_mmap = array_map_mmap, .map_seq_show_elem = array_map_seq_show_elem, .map_check_btf = array_map_check_btf, + .map_set_for_each_callback_args = map_set_for_each_callback_args, + .map_for_each_callback = bpf_for_each_array_elem, + .map_btf_name = "bpf_array", + .map_btf_id = &array_map_btf_id, + .iter_seq_info = &iter_seq_info, }; +static int percpu_array_map_btf_id; const struct bpf_map_ops percpu_array_map_ops = { .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, @@ -469,6 +720,11 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_delete_elem = array_map_delete_elem, .map_seq_show_elem = percpu_array_map_seq_show_elem, .map_check_btf = array_map_check_btf, + .map_set_for_each_callback_args = map_set_for_each_callback_args, + .map_for_each_callback = bpf_for_each_array_elem, + .map_btf_name = "bpf_array", + .map_btf_id = &percpu_array_map_btf_id, + .iter_seq_info = &iter_seq_info, }; static int fd_array_map_alloc_check(union bpf_attr *attr) @@ -540,10 +796,17 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, if (IS_ERR(new_ptr)) return PTR_ERR(new_ptr); - old_ptr = xchg(array->ptrs + index, new_ptr); + if (map->ops->map_poke_run) { + mutex_lock(&array->aux->poke_mutex); + old_ptr = xchg(array->ptrs + index, new_ptr); + map->ops->map_poke_run(map, index, old_ptr, new_ptr); + mutex_unlock(&array->aux->poke_mutex); + } else { + old_ptr = xchg(array->ptrs + index, new_ptr); + } + if (old_ptr) map->ops->map_fd_put_ptr(old_ptr); - return 0; } @@ -556,7 +819,15 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key) if (index >= array->map.max_entries) return -E2BIG; - old_ptr = xchg(array->ptrs + index, NULL); + if (map->ops->map_poke_run) { + mutex_lock(&array->aux->poke_mutex); + old_ptr = xchg(array->ptrs + index, NULL); + map->ops->map_poke_run(map, index, old_ptr, NULL); + mutex_unlock(&array->aux->poke_mutex); + } else { + old_ptr = xchg(array->ptrs + index, NULL); + } + if (old_ptr) { map->ops->map_fd_put_ptr(old_ptr); return 0; @@ -625,18 +896,199 @@ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +static int prog_array_map_poke_track(struct bpf_map *map, + struct bpf_prog_aux *prog_aux) +{ + struct prog_poke_elem *elem; + struct bpf_array_aux *aux; + int ret = 0; + + aux = container_of(map, struct bpf_array, map)->aux; + mutex_lock(&aux->poke_mutex); + list_for_each_entry(elem, &aux->poke_progs, list) { + if (elem->aux == prog_aux) + goto out; + } + + elem = kmalloc(sizeof(*elem), GFP_KERNEL); + if (!elem) { + ret = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&elem->list); + /* We must track the program's aux info at this point in time + * since the program pointer itself may not be stable yet, see + * also comment in prog_array_map_poke_run(). + */ + elem->aux = prog_aux; + + list_add_tail(&elem->list, &aux->poke_progs); +out: + mutex_unlock(&aux->poke_mutex); + return ret; +} + +static void prog_array_map_poke_untrack(struct bpf_map *map, + struct bpf_prog_aux *prog_aux) +{ + struct prog_poke_elem *elem, *tmp; + struct bpf_array_aux *aux; + + aux = container_of(map, struct bpf_array, map)->aux; + mutex_lock(&aux->poke_mutex); + list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { + if (elem->aux == prog_aux) { + list_del_init(&elem->list); + kfree(elem); + break; + } + } + mutex_unlock(&aux->poke_mutex); +} + +static void prog_array_map_poke_run(struct bpf_map *map, u32 key, + struct bpf_prog *old, + struct bpf_prog *new) +{ + struct prog_poke_elem *elem; + struct bpf_array_aux *aux; + + aux = container_of(map, struct bpf_array, map)->aux; + WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); + + list_for_each_entry(elem, &aux->poke_progs, list) { + struct bpf_jit_poke_descriptor *poke; + int i, ret; + + for (i = 0; i < elem->aux->size_poke_tab; i++) { + poke = &elem->aux->poke_tab[i]; + + /* Few things to be aware of: + * + * 1) We can only ever access aux in this context, but + * not aux->prog since it might not be stable yet and + * there could be danger of use after free otherwise. + * 2) Initially when we start tracking aux, the program + * is not JITed yet and also does not have a kallsyms + * entry. We skip these as poke->ip_stable is not + * active yet. The JIT will do the final fixup before + * setting it stable. The various poke->ip_stable are + * successively activated, so tail call updates can + * arrive from here while JIT is still finishing its + * final fixup for non-activated poke entries. + * 3) On program teardown, the program's kallsym entry gets + * removed out of RCU callback, but we can only untrack + * from sleepable context, therefore bpf_arch_text_poke() + * might not see that this is in BPF text section and + * bails out with -EINVAL. As these are unreachable since + * RCU grace period already passed, we simply skip them. + * 4) Also programs reaching refcount of zero while patching + * is in progress is okay since we're protected under + * poke_mutex and untrack the programs before the JIT + * buffer is freed. When we're still in the middle of + * patching and suddenly kallsyms entry of the program + * gets evicted, we just skip the rest which is fine due + * to point 3). + * 5) Any other error happening below from bpf_arch_text_poke() + * is a unexpected bug. + */ + if (!READ_ONCE(poke->ip_stable)) + continue; + if (poke->reason != BPF_POKE_REASON_TAIL_CALL) + continue; + if (poke->tail_call.map != map || + poke->tail_call.key != key) + continue; + + ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, + old ? (u8 *)old->bpf_func + + poke->adj_off : NULL, + new ? (u8 *)new->bpf_func + + poke->adj_off : NULL); + BUG_ON(ret < 0 && ret != -EINVAL); + } + } +} + +static void prog_array_map_clear_deferred(struct work_struct *work) +{ + struct bpf_map *map = container_of(work, struct bpf_array_aux, + work)->map; + bpf_fd_array_map_clear(map); + bpf_map_put(map); +} + +static void prog_array_map_clear(struct bpf_map *map) +{ + struct bpf_array_aux *aux = container_of(map, struct bpf_array, + map)->aux; + bpf_map_inc(map); + schedule_work(&aux->work); +} + +static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) +{ + struct bpf_array_aux *aux; + struct bpf_map *map; + + aux = kzalloc(sizeof(*aux), GFP_KERNEL); + if (!aux) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&aux->work, prog_array_map_clear_deferred); + INIT_LIST_HEAD(&aux->poke_progs); + mutex_init(&aux->poke_mutex); + + map = array_map_alloc(attr); + if (IS_ERR(map)) { + kfree(aux); + return map; + } + + container_of(map, struct bpf_array, map)->aux = aux; + aux->map = map; + + return map; +} + +static void prog_array_map_free(struct bpf_map *map) +{ + struct prog_poke_elem *elem, *tmp; + struct bpf_array_aux *aux; + + aux = container_of(map, struct bpf_array, map)->aux; + list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { + list_del_init(&elem->list); + kfree(elem); + } + kfree(aux); + fd_array_map_free(map); +} + +static int prog_array_map_btf_id; const struct bpf_map_ops prog_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, - .map_alloc = array_map_alloc, - .map_free = fd_array_map_free, + .map_alloc = prog_array_map_alloc, + .map_free = prog_array_map_free, + .map_poke_track = prog_array_map_poke_track, + .map_poke_untrack = prog_array_map_poke_untrack, + .map_poke_run = prog_array_map_poke_run, .map_get_next_key = array_map_get_next_key, .map_lookup_elem = fd_array_map_lookup_elem, .map_delete_elem = fd_array_map_delete_elem, .map_fd_get_ptr = prog_fd_array_get_ptr, .map_fd_put_ptr = prog_fd_array_put_ptr, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, - .map_release_uref = bpf_fd_array_map_clear, + .map_release_uref = prog_array_map_clear, .map_seq_show_elem = prog_array_map_seq_show_elem, + .map_btf_name = "bpf_array", + .map_btf_id = &prog_array_map_btf_id, }; static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, @@ -715,6 +1167,7 @@ static void perf_event_fd_array_release(struct bpf_map *map, rcu_read_unlock(); } +static int perf_event_array_map_btf_id; const struct bpf_map_ops perf_event_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, @@ -726,6 +1179,8 @@ const struct bpf_map_ops perf_event_array_map_ops = { .map_fd_put_ptr = perf_event_fd_array_put_ptr, .map_release = perf_event_fd_array_release, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_array", + .map_btf_id = &perf_event_array_map_btf_id, }; #ifdef CONFIG_CGROUPS @@ -748,6 +1203,7 @@ static void cgroup_fd_array_free(struct bpf_map *map) fd_array_map_free(map); } +static int cgroup_array_map_btf_id; const struct bpf_map_ops cgroup_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, @@ -758,6 +1214,8 @@ const struct bpf_map_ops cgroup_array_map_ops = { .map_fd_get_ptr = cgroup_fd_array_get_ptr, .map_fd_put_ptr = cgroup_fd_array_put_ptr, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_array", + .map_btf_id = &cgroup_array_map_btf_id, }; #endif @@ -800,7 +1258,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) return READ_ONCE(*inner_map); } -static u32 array_of_map_gen_lookup(struct bpf_map *map, +static int array_of_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_array *array = container_of(map, struct bpf_array, map); @@ -812,7 +1270,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map, *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); - if (map->unpriv_array) { + if (!map->bypass_spec_v1) { *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); } else { @@ -831,6 +1289,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map, return insn - insn_buf; } +static int array_of_maps_map_btf_id; const struct bpf_map_ops array_of_maps_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_of_map_alloc, @@ -843,4 +1302,6 @@ const struct bpf_map_ops array_of_maps_map_ops = { .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_gen_lookup = array_of_map_gen_lookup, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_array", + .map_btf_id = &array_of_maps_map_btf_id, }; diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c new file mode 100644 index 000000000000..f3a44e929447 --- /dev/null +++ b/kernel/bpf/bpf_inode_storage.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 Facebook + * Copyright 2020 Google LLC. + */ + +#include <linux/rculist.h> +#include <linux/list.h> +#include <linux/hash.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/bpf.h> +#include <linux/bpf_local_storage.h> +#include <net/sock.h> +#include <uapi/linux/sock_diag.h> +#include <uapi/linux/btf.h> +#include <linux/bpf_lsm.h> +#include <linux/btf_ids.h> +#include <linux/fdtable.h> + +DEFINE_BPF_STORAGE_CACHE(inode_cache); + +static struct bpf_local_storage __rcu ** +inode_storage_ptr(void *owner) +{ + struct inode *inode = owner; + struct bpf_storage_blob *bsb; + + bsb = bpf_inode(inode); + if (!bsb) + return NULL; + return &bsb->storage; +} + +static struct bpf_local_storage_data *inode_storage_lookup(struct inode *inode, + struct bpf_map *map, + bool cacheit_lockit) +{ + struct bpf_local_storage *inode_storage; + struct bpf_local_storage_map *smap; + struct bpf_storage_blob *bsb; + + bsb = bpf_inode(inode); + if (!bsb) + return NULL; + + inode_storage = rcu_dereference(bsb->storage); + if (!inode_storage) + return NULL; + + smap = (struct bpf_local_storage_map *)map; + return bpf_local_storage_lookup(inode_storage, smap, cacheit_lockit); +} + +void bpf_inode_storage_free(struct inode *inode) +{ + struct bpf_local_storage_elem *selem; + struct bpf_local_storage *local_storage; + bool free_inode_storage = false; + struct bpf_storage_blob *bsb; + struct hlist_node *n; + + bsb = bpf_inode(inode); + if (!bsb) + return; + + rcu_read_lock(); + + local_storage = rcu_dereference(bsb->storage); + if (!local_storage) { + rcu_read_unlock(); + return; + } + + /* Netiher the bpf_prog nor the bpf-map's syscall + * could be modifying the local_storage->list now. + * Thus, no elem can be added-to or deleted-from the + * local_storage->list by the bpf_prog or by the bpf-map's syscall. + * + * It is racing with bpf_local_storage_map_free() alone + * when unlinking elem from the local_storage->list and + * the map's bucket->list. + */ + raw_spin_lock_bh(&local_storage->lock); + hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) { + /* Always unlink from map before unlinking from + * local_storage. + */ + bpf_selem_unlink_map(selem); + free_inode_storage = bpf_selem_unlink_storage_nolock( + local_storage, selem, false); + } + raw_spin_unlock_bh(&local_storage->lock); + rcu_read_unlock(); + + /* free_inoode_storage should always be true as long as + * local_storage->list was non-empty. + */ + if (free_inode_storage) + kfree_rcu(local_storage, rcu); +} + +static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_local_storage_data *sdata; + struct file *f; + int fd; + + fd = *(int *)key; + f = fget_raw(fd); + if (!f) + return NULL; + + sdata = inode_storage_lookup(f->f_inode, map, true); + fput(f); + return sdata ? sdata->data : NULL; +} + +static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags) +{ + struct bpf_local_storage_data *sdata; + struct file *f; + int fd; + + fd = *(int *)key; + f = fget_raw(fd); + if (!f || !inode_storage_ptr(f->f_inode)) + return -EBADF; + + sdata = bpf_local_storage_update(f->f_inode, + (struct bpf_local_storage_map *)map, + value, map_flags); + fput(f); + return PTR_ERR_OR_ZERO(sdata); +} + +static int inode_storage_delete(struct inode *inode, struct bpf_map *map) +{ + struct bpf_local_storage_data *sdata; + + sdata = inode_storage_lookup(inode, map, false); + if (!sdata) + return -ENOENT; + + bpf_selem_unlink(SELEM(sdata)); + + return 0; +} + +static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key) +{ + struct file *f; + int fd, err; + + fd = *(int *)key; + f = fget_raw(fd); + if (!f) + return -EBADF; + + err = inode_storage_delete(f->f_inode, map); + fput(f); + return err; +} + +BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, + void *, value, u64, flags) +{ + struct bpf_local_storage_data *sdata; + + if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE)) + return (unsigned long)NULL; + + /* explicitly check that the inode_storage_ptr is not + * NULL as inode_storage_lookup returns NULL in this case and + * bpf_local_storage_update expects the owner to have a + * valid storage pointer. + */ + if (!inode_storage_ptr(inode)) + return (unsigned long)NULL; + + sdata = inode_storage_lookup(inode, map, true); + if (sdata) + return (unsigned long)sdata->data; + + /* This helper must only called from where the inode is gurranteed + * to have a refcount and cannot be freed. + */ + if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) { + sdata = bpf_local_storage_update( + inode, (struct bpf_local_storage_map *)map, value, + BPF_NOEXIST); + return IS_ERR(sdata) ? (unsigned long)NULL : + (unsigned long)sdata->data; + } + + return (unsigned long)NULL; +} + +BPF_CALL_2(bpf_inode_storage_delete, + struct bpf_map *, map, struct inode *, inode) +{ + /* This helper must only called from where the inode is gurranteed + * to have a refcount and cannot be freed. + */ + return inode_storage_delete(inode, map); +} + +static int notsupp_get_next_key(struct bpf_map *map, void *key, + void *next_key) +{ + return -ENOTSUPP; +} + +static struct bpf_map *inode_storage_map_alloc(union bpf_attr *attr) +{ + struct bpf_local_storage_map *smap; + + smap = bpf_local_storage_map_alloc(attr); + if (IS_ERR(smap)) + return ERR_CAST(smap); + + smap->cache_idx = bpf_local_storage_cache_idx_get(&inode_cache); + return &smap->map; +} + +static void inode_storage_map_free(struct bpf_map *map) +{ + struct bpf_local_storage_map *smap; + + smap = (struct bpf_local_storage_map *)map; + bpf_local_storage_cache_idx_free(&inode_cache, smap->cache_idx); + bpf_local_storage_map_free(smap); +} + +static int inode_storage_map_btf_id; +const struct bpf_map_ops inode_storage_map_ops = { + .map_alloc_check = bpf_local_storage_map_alloc_check, + .map_alloc = inode_storage_map_alloc, + .map_free = inode_storage_map_free, + .map_get_next_key = notsupp_get_next_key, + .map_lookup_elem = bpf_fd_inode_storage_lookup_elem, + .map_update_elem = bpf_fd_inode_storage_update_elem, + .map_delete_elem = bpf_fd_inode_storage_delete_elem, + .map_check_btf = bpf_local_storage_map_check_btf, + .map_btf_name = "bpf_local_storage_map", + .map_btf_id = &inode_storage_map_btf_id, + .map_owner_storage_ptr = inode_storage_ptr, +}; + +BTF_ID_LIST(bpf_inode_storage_btf_ids) +BTF_ID_UNUSED +BTF_ID(struct, inode) + +const struct bpf_func_proto bpf_inode_storage_get_proto = { + .func = bpf_inode_storage_get, + .gpl_only = false, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_BTF_ID, + .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, + .arg4_type = ARG_ANYTHING, + .btf_id = bpf_inode_storage_btf_ids, +}; + +const struct bpf_func_proto bpf_inode_storage_delete_proto = { + .func = bpf_inode_storage_delete, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_BTF_ID, + .btf_id = bpf_inode_storage_btf_ids, +}; diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c new file mode 100644 index 000000000000..288382b352b9 --- /dev/null +++ b/kernel/bpf/bpf_iter.c @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ + +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/filter.h> +#include <linux/bpf.h> + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; /* cached value */ +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + u64 session_id; + u64 seq_num; + bool done_stop; + u8 target_private[] __aligned(8); +}; + +static struct list_head targets = LIST_HEAD_INIT(targets); +static DEFINE_MUTEX(targets_mutex); + +/* protect bpf_iter_link changes */ +static DEFINE_MUTEX(link_mutex); + +/* incremented on every opened seq_file */ +static atomic64_t session_id; + +static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, + const struct bpf_iter_seq_info *seq_info); + +static void bpf_iter_inc_seq_num(struct seq_file *seq) +{ + struct bpf_iter_priv_data *iter_priv; + + iter_priv = container_of(seq->private, struct bpf_iter_priv_data, + target_private); + iter_priv->seq_num++; +} + +static void bpf_iter_dec_seq_num(struct seq_file *seq) +{ + struct bpf_iter_priv_data *iter_priv; + + iter_priv = container_of(seq->private, struct bpf_iter_priv_data, + target_private); + iter_priv->seq_num--; +} + +static void bpf_iter_done_stop(struct seq_file *seq) +{ + struct bpf_iter_priv_data *iter_priv; + + iter_priv = container_of(seq->private, struct bpf_iter_priv_data, + target_private); + iter_priv->done_stop = true; +} + +/* bpf_seq_read, a customized and simpler version for bpf iterator. + * no_llseek is assumed for this file. + * The following are differences from seq_read(): + * . fixed buffer size (PAGE_SIZE) + * . assuming no_llseek + * . stop() may call bpf program, handling potential overflow there + */ +static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size, + loff_t *ppos) +{ + struct seq_file *seq = file->private_data; + size_t n, offs, copied = 0; + int err = 0; + void *p; + + mutex_lock(&seq->lock); + + if (!seq->buf) { + seq->size = PAGE_SIZE; + seq->buf = kmalloc(seq->size, GFP_KERNEL); + if (!seq->buf) { + err = -ENOMEM; + goto done; + } + } + + if (seq->count) { + n = min(seq->count, size); + err = copy_to_user(buf, seq->buf + seq->from, n); + if (err) { + err = -EFAULT; + goto done; + } + seq->count -= n; + seq->from += n; + copied = n; + goto done; + } + + seq->from = 0; + p = seq->op->start(seq, &seq->index); + if (!p) + goto stop; + if (IS_ERR(p)) { + err = PTR_ERR(p); + seq->op->stop(seq, p); + seq->count = 0; + goto done; + } + + err = seq->op->show(seq, p); + if (err > 0) { + /* object is skipped, decrease seq_num, so next + * valid object can reuse the same seq_num. + */ + bpf_iter_dec_seq_num(seq); + seq->count = 0; + } else if (err < 0 || seq_has_overflowed(seq)) { + if (!err) + err = -E2BIG; + seq->op->stop(seq, p); + seq->count = 0; + goto done; + } + + while (1) { + loff_t pos = seq->index; + + offs = seq->count; + p = seq->op->next(seq, p, &seq->index); + if (pos == seq->index) { + pr_info_ratelimited("buggy seq_file .next function %ps " + "did not updated position index\n", + seq->op->next); + seq->index++; + } + + if (IS_ERR_OR_NULL(p)) + break; + + /* got a valid next object, increase seq_num */ + bpf_iter_inc_seq_num(seq); + + if (seq->count >= size) + break; + + err = seq->op->show(seq, p); + if (err > 0) { + bpf_iter_dec_seq_num(seq); + seq->count = offs; + } else if (err < 0 || seq_has_overflowed(seq)) { + seq->count = offs; + if (offs == 0) { + if (!err) + err = -E2BIG; + seq->op->stop(seq, p); + goto done; + } + break; + } + } +stop: + offs = seq->count; + /* bpf program called if !p */ + seq->op->stop(seq, p); + if (!p) { + if (!seq_has_overflowed(seq)) { + bpf_iter_done_stop(seq); + } else { + seq->count = offs; + if (offs == 0) { + err = -E2BIG; + goto done; + } + } + } + + n = min(seq->count, size); + err = copy_to_user(buf, seq->buf, n); + if (err) { + err = -EFAULT; + goto done; + } + copied = n; + seq->count -= n; + seq->from = n; +done: + if (!copied) + copied = err; + else + *ppos += copied; + mutex_unlock(&seq->lock); + return copied; +} + +static const struct bpf_iter_seq_info * +__get_seq_info(struct bpf_iter_link *link) +{ + const struct bpf_iter_seq_info *seq_info; + + if (link->aux.map) { + seq_info = link->aux.map->ops->iter_seq_info; + if (seq_info) + return seq_info; + } + + return link->tinfo->reg_info->seq_info; +} + +static int iter_open(struct inode *inode, struct file *file) +{ + struct bpf_iter_link *link = inode->i_private; + + return prepare_seq_file(file, link, __get_seq_info(link)); +} + +static int iter_release(struct inode *inode, struct file *file) +{ + struct bpf_iter_priv_data *iter_priv; + struct seq_file *seq; + + seq = file->private_data; + if (!seq) + return 0; + + iter_priv = container_of(seq->private, struct bpf_iter_priv_data, + target_private); + + if (iter_priv->seq_info->fini_seq_private) + iter_priv->seq_info->fini_seq_private(seq->private); + + bpf_prog_put(iter_priv->prog); + seq->private = iter_priv; + + return seq_release_private(inode, file); +} + +const struct file_operations bpf_iter_fops = { + .open = iter_open, + .llseek = no_llseek, + .read = bpf_seq_read, + .release = iter_release, +}; + +/* The argument reg_info will be cached in bpf_iter_target_info. + * The common practice is to declare target reg_info as + * a const static variable and passed as an argument to + * bpf_iter_reg_target(). + */ +int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info) +{ + struct bpf_iter_target_info *tinfo; + + tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); + if (!tinfo) + return -ENOMEM; + + tinfo->reg_info = reg_info; + INIT_LIST_HEAD(&tinfo->list); + + mutex_lock(&targets_mutex); + list_add(&tinfo->list, &targets); + mutex_unlock(&targets_mutex); + + return 0; +} + +void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info) +{ + struct bpf_iter_target_info *tinfo; + bool found = false; + + mutex_lock(&targets_mutex); + list_for_each_entry(tinfo, &targets, list) { + if (reg_info == tinfo->reg_info) { + list_del(&tinfo->list); + kfree(tinfo); + found = true; + break; + } + } + mutex_unlock(&targets_mutex); + + WARN_ON(found == false); +} + +static void cache_btf_id(struct bpf_iter_target_info *tinfo, + struct bpf_prog *prog) +{ + tinfo->btf_id = prog->aux->attach_btf_id; +} + +bool bpf_iter_prog_supported(struct bpf_prog *prog) +{ + const char *attach_fname = prog->aux->attach_func_name; + u32 prog_btf_id = prog->aux->attach_btf_id; + const char *prefix = BPF_ITER_FUNC_PREFIX; + struct bpf_iter_target_info *tinfo; + int prefix_len = strlen(prefix); + bool supported = false; + + if (strncmp(attach_fname, prefix, prefix_len)) + return false; + + mutex_lock(&targets_mutex); + list_for_each_entry(tinfo, &targets, list) { + if (tinfo->btf_id && tinfo->btf_id == prog_btf_id) { + supported = true; + break; + } + if (!strcmp(attach_fname + prefix_len, tinfo->reg_info->target)) { + cache_btf_id(tinfo, prog); + supported = true; + break; + } + } + mutex_unlock(&targets_mutex); + + if (supported) { + prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size; + prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info; + } + + return supported; +} + +static void bpf_iter_link_release(struct bpf_link *link) +{ + struct bpf_iter_link *iter_link = + container_of(link, struct bpf_iter_link, link); + + if (iter_link->tinfo->reg_info->detach_target) + iter_link->tinfo->reg_info->detach_target(&iter_link->aux); +} + +static void bpf_iter_link_dealloc(struct bpf_link *link) +{ + struct bpf_iter_link *iter_link = + container_of(link, struct bpf_iter_link, link); + + kfree(iter_link); +} + +static int bpf_iter_link_replace(struct bpf_link *link, + struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + int ret = 0; + + mutex_lock(&link_mutex); + if (old_prog && link->prog != old_prog) { + ret = -EPERM; + goto out_unlock; + } + + if (link->prog->type != new_prog->type || + link->prog->expected_attach_type != new_prog->expected_attach_type || + link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) { + ret = -EINVAL; + goto out_unlock; + } + + old_prog = xchg(&link->prog, new_prog); + bpf_prog_put(old_prog); + +out_unlock: + mutex_unlock(&link_mutex); + return ret; +} + +static const struct bpf_link_ops bpf_iter_link_lops = { + .release = bpf_iter_link_release, + .dealloc = bpf_iter_link_dealloc, + .update_prog = bpf_iter_link_replace, +}; + +bool bpf_link_is_iter(struct bpf_link *link) +{ + return link->ops == &bpf_iter_link_lops; +} + +int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + union bpf_iter_link_info __user *ulinfo; + struct bpf_link_primer link_primer; + struct bpf_iter_target_info *tinfo; + union bpf_iter_link_info linfo; + struct bpf_iter_link *link; + u32 prog_btf_id, linfo_len; + bool existed = false; + int err; + + if (attr->link_create.target_fd || attr->link_create.flags) + return -EINVAL; + + memset(&linfo, 0, sizeof(union bpf_iter_link_info)); + + ulinfo = u64_to_user_ptr(attr->link_create.iter_info); + linfo_len = attr->link_create.iter_info_len; + if (!ulinfo ^ !linfo_len) + return -EINVAL; + + if (ulinfo) { + err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo), + linfo_len); + if (err) + return err; + linfo_len = min_t(u32, linfo_len, sizeof(linfo)); + if (copy_from_user(&linfo, ulinfo, linfo_len)) + return -EFAULT; + } + + prog_btf_id = prog->aux->attach_btf_id; + mutex_lock(&targets_mutex); + list_for_each_entry(tinfo, &targets, list) { + if (tinfo->btf_id == prog_btf_id) { + existed = true; + break; + } + } + mutex_unlock(&targets_mutex); + if (!existed) + return -ENOENT; + + link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); + if (!link) + return -ENOMEM; + + bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog); + link->tinfo = tinfo; + + err = bpf_link_prime(&link->link, &link_primer); + if (err) { + kfree(link); + return err; + } + + if (tinfo->reg_info->attach_target) { + err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux); + if (err) { + bpf_link_cleanup(&link_primer); + return err; + } + } + + return bpf_link_settle(&link_primer); +} + +static void init_seq_meta(struct bpf_iter_priv_data *priv_data, + struct bpf_iter_target_info *tinfo, + const struct bpf_iter_seq_info *seq_info, + struct bpf_prog *prog) +{ + priv_data->tinfo = tinfo; + priv_data->seq_info = seq_info; + priv_data->prog = prog; + priv_data->session_id = atomic64_inc_return(&session_id); + priv_data->seq_num = 0; + priv_data->done_stop = false; +} + +static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, + const struct bpf_iter_seq_info *seq_info) +{ + struct bpf_iter_priv_data *priv_data; + struct bpf_iter_target_info *tinfo; + struct bpf_prog *prog; + u32 total_priv_dsize; + struct seq_file *seq; + int err = 0; + + mutex_lock(&link_mutex); + prog = link->link.prog; + bpf_prog_inc(prog); + mutex_unlock(&link_mutex); + + tinfo = link->tinfo; + total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) + + seq_info->seq_priv_size; + priv_data = __seq_open_private(file, seq_info->seq_ops, + total_priv_dsize); + if (!priv_data) { + err = -ENOMEM; + goto release_prog; + } + + if (seq_info->init_seq_private) { + err = seq_info->init_seq_private(priv_data->target_private, &link->aux); + if (err) + goto release_seq_file; + } + + init_seq_meta(priv_data, tinfo, seq_info, prog); + seq = file->private_data; + seq->private = priv_data->target_private; + + return 0; + +release_seq_file: + seq_release_private(file->f_inode, file); + file->private_data = NULL; +release_prog: + bpf_prog_put(prog); + return err; +} + +int bpf_iter_new_fd(struct bpf_link *link) +{ + struct bpf_iter_link *iter_link; + struct file *file; + unsigned int flags; + int err, fd; + + if (link->ops != &bpf_iter_link_lops) + return -EINVAL; + + flags = O_RDONLY | O_CLOEXEC; + fd = get_unused_fd_flags(flags); + if (fd < 0) + return fd; + + file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags); + if (IS_ERR(file)) { + err = PTR_ERR(file); + goto free_fd; + } + + iter_link = container_of(link, struct bpf_iter_link, link); + err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link)); + if (err) + goto free_file; + + fd_install(fd, file); + return fd; + +free_file: + fput(file); +free_fd: + put_unused_fd(fd); + return err; +} + +struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop) +{ + struct bpf_iter_priv_data *iter_priv; + struct seq_file *seq; + void *seq_priv; + + seq = meta->seq; + if (seq->file->f_op != &bpf_iter_fops) + return NULL; + + seq_priv = seq->private; + iter_priv = container_of(seq_priv, struct bpf_iter_priv_data, + target_private); + + if (in_stop && iter_priv->done_stop) + return NULL; + + meta->session_id = iter_priv->session_id; + meta->seq_num = iter_priv->seq_num; + + return iter_priv->prog; +} + +int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx) +{ + int ret; + + rcu_read_lock(); + ret = BPF_PROG_RUN(prog, ctx); + rcu_read_unlock(); + + /* bpf program can only return 0 or 1: + * 0 : okay + * 1 : retry the same object + * The bpf_iter_run_prog() return value + * will be seq_ops->show() return value. + */ + return ret == 0 ? 0 : -EAGAIN; +} + +BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn, + void *, callback_ctx, u64, flags) +{ + return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags); +} + +const struct bpf_func_proto bpf_for_each_map_elem_proto = { + .func = bpf_for_each_map_elem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_FUNC, + .arg3_type = ARG_PTR_TO_STACK_OR_NULL, + .arg4_type = ARG_ANYTHING, +}; diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c new file mode 100644 index 000000000000..ffa7d11fc2bd --- /dev/null +++ b/kernel/bpf/bpf_local_storage.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/rculist.h> +#include <linux/list.h> +#include <linux/hash.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/bpf.h> +#include <linux/btf_ids.h> +#include <linux/bpf_local_storage.h> +#include <net/sock.h> +#include <uapi/linux/sock_diag.h> +#include <uapi/linux/btf.h> + +#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE) + +static struct bpf_local_storage_map_bucket * +select_bucket(struct bpf_local_storage_map *smap, + struct bpf_local_storage_elem *selem) +{ + return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; +} + +static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size) +{ + struct bpf_map *map = &smap->map; + + if (!map->ops->map_local_storage_charge) + return 0; + + return map->ops->map_local_storage_charge(smap, owner, size); +} + +static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner, + u32 size) +{ + struct bpf_map *map = &smap->map; + + if (map->ops->map_local_storage_uncharge) + map->ops->map_local_storage_uncharge(smap, owner, size); +} + +static struct bpf_local_storage __rcu ** +owner_storage(struct bpf_local_storage_map *smap, void *owner) +{ + struct bpf_map *map = &smap->map; + + return map->ops->map_owner_storage_ptr(owner); +} + +static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem) +{ + return !hlist_unhashed(&selem->snode); +} + +static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem) +{ + return !hlist_unhashed(&selem->map_node); +} + +struct bpf_local_storage_elem * +bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, + void *value, bool charge_mem) +{ + struct bpf_local_storage_elem *selem; + + if (charge_mem && mem_charge(smap, owner, smap->elem_size)) + return NULL; + + selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN); + if (selem) { + if (value) + memcpy(SDATA(selem)->data, value, smap->map.value_size); + return selem; + } + + if (charge_mem) + mem_uncharge(smap, owner, smap->elem_size); + + return NULL; +} + +/* local_storage->lock must be held and selem->local_storage == local_storage. + * The caller must ensure selem->smap is still valid to be + * dereferenced for its smap->elem_size and smap->cache_idx. + */ +bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, + struct bpf_local_storage_elem *selem, + bool uncharge_mem) +{ + struct bpf_local_storage_map *smap; + bool free_local_storage; + void *owner; + + smap = rcu_dereference(SDATA(selem)->smap); + owner = local_storage->owner; + + /* All uncharging on the owner must be done first. + * The owner may be freed once the last selem is unlinked + * from local_storage. + */ + if (uncharge_mem) + mem_uncharge(smap, owner, smap->elem_size); + + free_local_storage = hlist_is_singular_node(&selem->snode, + &local_storage->list); + if (free_local_storage) { + mem_uncharge(smap, owner, sizeof(struct bpf_local_storage)); + local_storage->owner = NULL; + + /* After this RCU_INIT, owner may be freed and cannot be used */ + RCU_INIT_POINTER(*owner_storage(smap, owner), NULL); + + /* local_storage is not freed now. local_storage->lock is + * still held and raw_spin_unlock_bh(&local_storage->lock) + * will be done by the caller. + * + * Although the unlock will be done under + * rcu_read_lock(), it is more intutivie to + * read if kfree_rcu(local_storage, rcu) is done + * after the raw_spin_unlock_bh(&local_storage->lock). + * + * Hence, a "bool free_local_storage" is returned + * to the caller which then calls the kfree_rcu() + * after unlock. + */ + } + hlist_del_init_rcu(&selem->snode); + if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) == + SDATA(selem)) + RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL); + + kfree_rcu(selem, rcu); + + return free_local_storage; +} + +static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem) +{ + struct bpf_local_storage *local_storage; + bool free_local_storage = false; + + if (unlikely(!selem_linked_to_storage(selem))) + /* selem has already been unlinked from sk */ + return; + + local_storage = rcu_dereference(selem->local_storage); + raw_spin_lock_bh(&local_storage->lock); + if (likely(selem_linked_to_storage(selem))) + free_local_storage = bpf_selem_unlink_storage_nolock( + local_storage, selem, true); + raw_spin_unlock_bh(&local_storage->lock); + + if (free_local_storage) + kfree_rcu(local_storage, rcu); +} + +void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage, + struct bpf_local_storage_elem *selem) +{ + RCU_INIT_POINTER(selem->local_storage, local_storage); + hlist_add_head(&selem->snode, &local_storage->list); +} + +void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem) +{ + struct bpf_local_storage_map *smap; + struct bpf_local_storage_map_bucket *b; + + if (unlikely(!selem_linked_to_map(selem))) + /* selem has already be unlinked from smap */ + return; + + smap = rcu_dereference(SDATA(selem)->smap); + b = select_bucket(smap, selem); + raw_spin_lock_bh(&b->lock); + if (likely(selem_linked_to_map(selem))) + hlist_del_init_rcu(&selem->map_node); + raw_spin_unlock_bh(&b->lock); +} + +void bpf_selem_link_map(struct bpf_local_storage_map *smap, + struct bpf_local_storage_elem *selem) +{ + struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem); + + raw_spin_lock_bh(&b->lock); + RCU_INIT_POINTER(SDATA(selem)->smap, smap); + hlist_add_head_rcu(&selem->map_node, &b->list); + raw_spin_unlock_bh(&b->lock); +} + +void bpf_selem_unlink(struct bpf_local_storage_elem *selem) +{ + /* Always unlink from map before unlinking from local_storage + * because selem will be freed after successfully unlinked from + * the local_storage. + */ + bpf_selem_unlink_map(selem); + __bpf_selem_unlink_storage(selem); +} + +struct bpf_local_storage_data * +bpf_local_storage_lookup(struct bpf_local_storage *local_storage, + struct bpf_local_storage_map *smap, + bool cacheit_lockit) +{ + struct bpf_local_storage_data *sdata; + struct bpf_local_storage_elem *selem; + + /* Fast path (cache hit) */ + sdata = rcu_dereference(local_storage->cache[smap->cache_idx]); + if (sdata && rcu_access_pointer(sdata->smap) == smap) + return sdata; + + /* Slow path (cache miss) */ + hlist_for_each_entry_rcu(selem, &local_storage->list, snode) + if (rcu_access_pointer(SDATA(selem)->smap) == smap) + break; + + if (!selem) + return NULL; + + sdata = SDATA(selem); + if (cacheit_lockit) { + /* spinlock is needed to avoid racing with the + * parallel delete. Otherwise, publishing an already + * deleted sdata to the cache will become a use-after-free + * problem in the next bpf_local_storage_lookup(). + */ + raw_spin_lock_bh(&local_storage->lock); + if (selem_linked_to_storage(selem)) + rcu_assign_pointer(local_storage->cache[smap->cache_idx], + sdata); + raw_spin_unlock_bh(&local_storage->lock); + } + + return sdata; +} + +static int check_flags(const struct bpf_local_storage_data *old_sdata, + u64 map_flags) +{ + if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) + /* elem already exists */ + return -EEXIST; + + if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) + /* elem doesn't exist, cannot update it */ + return -ENOENT; + + return 0; +} + +int bpf_local_storage_alloc(void *owner, + struct bpf_local_storage_map *smap, + struct bpf_local_storage_elem *first_selem) +{ + struct bpf_local_storage *prev_storage, *storage; + struct bpf_local_storage **owner_storage_ptr; + int err; + + err = mem_charge(smap, owner, sizeof(*storage)); + if (err) + return err; + + storage = kzalloc(sizeof(*storage), GFP_ATOMIC | __GFP_NOWARN); + if (!storage) { + err = -ENOMEM; + goto uncharge; + } + + INIT_HLIST_HEAD(&storage->list); + raw_spin_lock_init(&storage->lock); + storage->owner = owner; + + bpf_selem_link_storage_nolock(storage, first_selem); + bpf_selem_link_map(smap, first_selem); + + owner_storage_ptr = + (struct bpf_local_storage **)owner_storage(smap, owner); + /* Publish storage to the owner. + * Instead of using any lock of the kernel object (i.e. owner), + * cmpxchg will work with any kernel object regardless what + * the running context is, bh, irq...etc. + * + * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage) + * is protected by the storage->lock. Hence, when freeing + * the owner->storage, the storage->lock must be held before + * setting owner->storage ptr to NULL. + */ + prev_storage = cmpxchg(owner_storage_ptr, NULL, storage); + if (unlikely(prev_storage)) { + bpf_selem_unlink_map(first_selem); + err = -EAGAIN; + goto uncharge; + + /* Note that even first_selem was linked to smap's + * bucket->list, first_selem can be freed immediately + * (instead of kfree_rcu) because + * bpf_local_storage_map_free() does a + * synchronize_rcu() before walking the bucket->list. + * Hence, no one is accessing selem from the + * bucket->list under rcu_read_lock(). + */ + } + + return 0; + +uncharge: + kfree(storage); + mem_uncharge(smap, owner, sizeof(*storage)); + return err; +} + +/* sk cannot be going away because it is linking new elem + * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0). + * Otherwise, it will become a leak (and other memory issues + * during map destruction). + */ +struct bpf_local_storage_data * +bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, + void *value, u64 map_flags) +{ + struct bpf_local_storage_data *old_sdata = NULL; + struct bpf_local_storage_elem *selem; + struct bpf_local_storage *local_storage; + int err; + + /* BPF_EXIST and BPF_NOEXIST cannot be both set */ + if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) || + /* BPF_F_LOCK can only be used in a value with spin_lock */ + unlikely((map_flags & BPF_F_LOCK) && + !map_value_has_spin_lock(&smap->map))) + return ERR_PTR(-EINVAL); + + local_storage = rcu_dereference(*owner_storage(smap, owner)); + if (!local_storage || hlist_empty(&local_storage->list)) { + /* Very first elem for the owner */ + err = check_flags(NULL, map_flags); + if (err) + return ERR_PTR(err); + + selem = bpf_selem_alloc(smap, owner, value, true); + if (!selem) + return ERR_PTR(-ENOMEM); + + err = bpf_local_storage_alloc(owner, smap, selem); + if (err) { + kfree(selem); + mem_uncharge(smap, owner, smap->elem_size); + return ERR_PTR(err); + } + + return SDATA(selem); + } + + if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) { + /* Hoping to find an old_sdata to do inline update + * such that it can avoid taking the local_storage->lock + * and changing the lists. + */ + old_sdata = + bpf_local_storage_lookup(local_storage, smap, false); + err = check_flags(old_sdata, map_flags); + if (err) + return ERR_PTR(err); + if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) { + copy_map_value_locked(&smap->map, old_sdata->data, + value, false); + return old_sdata; + } + } + + raw_spin_lock_bh(&local_storage->lock); + + /* Recheck local_storage->list under local_storage->lock */ + if (unlikely(hlist_empty(&local_storage->list))) { + /* A parallel del is happening and local_storage is going + * away. It has just been checked before, so very + * unlikely. Return instead of retry to keep things + * simple. + */ + err = -EAGAIN; + goto unlock_err; + } + + old_sdata = bpf_local_storage_lookup(local_storage, smap, false); + err = check_flags(old_sdata, map_flags); + if (err) + goto unlock_err; + + if (old_sdata && (map_flags & BPF_F_LOCK)) { + copy_map_value_locked(&smap->map, old_sdata->data, value, + false); + selem = SELEM(old_sdata); + goto unlock; + } + + /* local_storage->lock is held. Hence, we are sure + * we can unlink and uncharge the old_sdata successfully + * later. Hence, instead of charging the new selem now + * and then uncharge the old selem later (which may cause + * a potential but unnecessary charge failure), avoid taking + * a charge at all here (the "!old_sdata" check) and the + * old_sdata will not be uncharged later during + * bpf_selem_unlink_storage_nolock(). + */ + selem = bpf_selem_alloc(smap, owner, value, !old_sdata); + if (!selem) { + err = -ENOMEM; + goto unlock_err; + } + + /* First, link the new selem to the map */ + bpf_selem_link_map(smap, selem); + + /* Second, link (and publish) the new selem to local_storage */ + bpf_selem_link_storage_nolock(local_storage, selem); + + /* Third, remove old selem, SELEM(old_sdata) */ + if (old_sdata) { + bpf_selem_unlink_map(SELEM(old_sdata)); + bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata), + false); + } + +unlock: + raw_spin_unlock_bh(&local_storage->lock); + return SDATA(selem); + +unlock_err: + raw_spin_unlock_bh(&local_storage->lock); + return ERR_PTR(err); +} + +u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache) +{ + u64 min_usage = U64_MAX; + u16 i, res = 0; + + spin_lock(&cache->idx_lock); + + for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) { + if (cache->idx_usage_counts[i] < min_usage) { + min_usage = cache->idx_usage_counts[i]; + res = i; + + /* Found a free cache_idx */ + if (!min_usage) + break; + } + } + cache->idx_usage_counts[res]++; + + spin_unlock(&cache->idx_lock); + + return res; +} + +void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache, + u16 idx) +{ + spin_lock(&cache->idx_lock); + cache->idx_usage_counts[idx]--; + spin_unlock(&cache->idx_lock); +} + +void bpf_local_storage_map_free(struct bpf_local_storage_map *smap) +{ + struct bpf_local_storage_elem *selem; + struct bpf_local_storage_map_bucket *b; + unsigned int i; + + /* Note that this map might be concurrently cloned from + * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone + * RCU read section to finish before proceeding. New RCU + * read sections should be prevented via bpf_map_inc_not_zero. + */ + synchronize_rcu(); + + /* bpf prog and the userspace can no longer access this map + * now. No new selem (of this map) can be added + * to the owner->storage or to the map bucket's list. + * + * The elem of this map can be cleaned up here + * or when the storage is freed e.g. + * by bpf_sk_storage_free() during __sk_destruct(). + */ + for (i = 0; i < (1U << smap->bucket_log); i++) { + b = &smap->buckets[i]; + + rcu_read_lock(); + /* No one is adding to b->list now */ + while ((selem = hlist_entry_safe( + rcu_dereference_raw(hlist_first_rcu(&b->list)), + struct bpf_local_storage_elem, map_node))) { + bpf_selem_unlink(selem); + cond_resched_rcu(); + } + rcu_read_unlock(); + } + + /* While freeing the storage we may still need to access the map. + * + * e.g. when bpf_sk_storage_free() has unlinked selem from the map + * which then made the above while((selem = ...)) loop + * exit immediately. + * + * However, while freeing the storage one still needs to access the + * smap->elem_size to do the uncharging in + * bpf_selem_unlink_storage_nolock(). + * + * Hence, wait another rcu grace period for the storage to be freed. + */ + synchronize_rcu(); + + kvfree(smap->buckets); + kfree(smap); +} + +int bpf_local_storage_map_alloc_check(union bpf_attr *attr) +{ + if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK || + !(attr->map_flags & BPF_F_NO_PREALLOC) || + attr->max_entries || + attr->key_size != sizeof(int) || !attr->value_size || + /* Enforce BTF for userspace sk dumping */ + !attr->btf_key_type_id || !attr->btf_value_type_id) + return -EINVAL; + + if (!bpf_capable()) + return -EPERM; + + if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) + return -E2BIG; + + return 0; +} + +struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr) +{ + struct bpf_local_storage_map *smap; + unsigned int i; + u32 nbuckets; + u64 cost; + int ret; + + smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN); + if (!smap) + return ERR_PTR(-ENOMEM); + bpf_map_init_from_attr(&smap->map, attr); + + nbuckets = roundup_pow_of_two(num_possible_cpus()); + /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ + nbuckets = max_t(u32, 2, nbuckets); + smap->bucket_log = ilog2(nbuckets); + cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); + + ret = bpf_map_charge_init(&smap->map.memory, cost); + if (ret < 0) { + kfree(smap); + return ERR_PTR(ret); + } + + smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, + GFP_USER | __GFP_NOWARN); + if (!smap->buckets) { + bpf_map_charge_finish(&smap->map.memory); + kfree(smap); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < nbuckets; i++) { + INIT_HLIST_HEAD(&smap->buckets[i].list); + raw_spin_lock_init(&smap->buckets[i].lock); + } + + smap->elem_size = + sizeof(struct bpf_local_storage_elem) + attr->value_size; + + return smap; +} + +int bpf_local_storage_map_check_btf(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type) +{ + u32 int_data; + + if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) + return -EINVAL; + + int_data = *(u32 *)(key_type + 1); + if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) + return -EINVAL; + + return 0; +} diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c index 1b6b9349cb85..d99e89f113c4 100644 --- a/kernel/bpf/bpf_lru_list.c +++ b/kernel/bpf/bpf_lru_list.c @@ -502,13 +502,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) static void bpf_common_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) { + u8 node_type = READ_ONCE(node->type); unsigned long flags; - if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) || - WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE)) + if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) || + WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE)) return; - if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) { + if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) { struct bpf_lru_locallist *loc_l; loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c new file mode 100644 index 000000000000..9cd1428c7199 --- /dev/null +++ b/kernel/bpf/bpf_lsm.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ + +#include <linux/filter.h> +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/lsm_hooks.h> +#include <linux/bpf_lsm.h> +#include <linux/kallsyms.h> +#include <linux/bpf_verifier.h> +#include <net/bpf_sk_storage.h> +#include <linux/bpf_local_storage.h> + +/* For every LSM hook that allows attachment of BPF programs, declare a nop + * function where a BPF program can be attached. + */ +#define LSM_HOOK(RET, DEFAULT, NAME, ...) \ +noinline RET bpf_lsm_##NAME(__VA_ARGS__) \ +{ \ + return DEFAULT; \ +} + +#include <linux/lsm_hook_defs.h> +#undef LSM_HOOK + +#define BPF_LSM_SYM_PREFX "bpf_lsm_" + +int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog) +{ + if (!prog->gpl_compatible) { + bpf_log(vlog, + "LSM programs must have a GPL compatible license\n"); + return -EINVAL; + } + + if (strncmp(BPF_LSM_SYM_PREFX, prog->aux->attach_func_name, + sizeof(BPF_LSM_SYM_PREFX) - 1)) { + bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n", + prog->aux->attach_btf_id, prog->aux->attach_func_name); + return -EINVAL; + } + + return 0; +} + +static const struct bpf_func_proto * +bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_inode_storage_get: + return &bpf_inode_storage_get_proto; + case BPF_FUNC_inode_storage_delete: + return &bpf_inode_storage_delete_proto; + case BPF_FUNC_sk_storage_get: + return &sk_storage_get_btf_proto; + case BPF_FUNC_sk_storage_delete: + return &sk_storage_delete_btf_proto; + default: + return tracing_prog_func_proto(func_id, prog); + } +} + +const struct bpf_prog_ops lsm_prog_ops = { +}; + +const struct bpf_verifier_ops lsm_verifier_ops = { + .get_func_proto = bpf_lsm_func_proto, + .is_valid_access = btf_ctx_access, +}; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c new file mode 100644 index 000000000000..ff240ae7f353 --- /dev/null +++ b/kernel/bpf/bpf_struct_ops.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019 Facebook */ + +#include <linux/bpf.h> +#include <linux/bpf_verifier.h> +#include <linux/btf.h> +#include <linux/filter.h> +#include <linux/slab.h> +#include <linux/numa.h> +#include <linux/seq_file.h> +#include <linux/refcount.h> +#include <linux/mutex.h> + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT, + BPF_STRUCT_OPS_STATE_INUSE, + BPF_STRUCT_OPS_STATE_TOBEFREE, +}; + +#define BPF_STRUCT_OPS_COMMON_VALUE \ + refcount_t refcnt; \ + enum bpf_struct_ops_state state + +struct bpf_struct_ops_value { + BPF_STRUCT_OPS_COMMON_VALUE; + char data[0] ____cacheline_aligned_in_smp; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + const struct bpf_struct_ops *st_ops; + /* protect map_update */ + struct mutex lock; + /* progs has all the bpf_prog that is populated + * to the func ptr of the kernel's struct + * (in kvalue.data). + */ + struct bpf_prog **progs; + /* image is a page that has all the trampolines + * that stores the func args before calling the bpf_prog. + * A PAGE_SIZE "image" is enough to store all trampoline for + * "progs[]". + */ + void *image; + /* uvalue->data stores the kernel struct + * (e.g. tcp_congestion_ops) that is more useful + * to userspace than the kvalue. For example, + * the bpf_prog's id is stored instead of the kernel + * address of a func ptr. + */ + struct bpf_struct_ops_value *uvalue; + /* kvalue.data stores the actual kernel's struct + * (e.g. tcp_congestion_ops) that will be + * registered to the kernel subsystem. + */ + struct bpf_struct_ops_value kvalue; +}; + +#define VALUE_PREFIX "bpf_struct_ops_" +#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) + +/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is + * the map's value exposed to the userspace and its btf-type-id is + * stored at the map->btf_vmlinux_value_type_id. + * + */ +#define BPF_STRUCT_OPS_TYPE(_name) \ +extern struct bpf_struct_ops bpf_##_name; \ + \ +struct bpf_struct_ops_##_name { \ + BPF_STRUCT_OPS_COMMON_VALUE; \ + struct _name data ____cacheline_aligned_in_smp; \ +}; +#include "bpf_struct_ops_types.h" +#undef BPF_STRUCT_OPS_TYPE + +enum { +#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name, +#include "bpf_struct_ops_types.h" +#undef BPF_STRUCT_OPS_TYPE + __NR_BPF_STRUCT_OPS_TYPE, +}; + +static struct bpf_struct_ops * const bpf_struct_ops[] = { +#define BPF_STRUCT_OPS_TYPE(_name) \ + [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name, +#include "bpf_struct_ops_types.h" +#undef BPF_STRUCT_OPS_TYPE +}; + +const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = { +}; + +const struct bpf_prog_ops bpf_struct_ops_prog_ops = { +}; + +static const struct btf_type *module_type; + +void bpf_struct_ops_init(struct btf *btf) +{ + s32 type_id, value_id, module_id; + const struct btf_member *member; + struct bpf_struct_ops *st_ops; + struct bpf_verifier_log log = {}; + const struct btf_type *t; + char value_name[128]; + const char *mname; + u32 i, j; + + /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */ +#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name); +#include "bpf_struct_ops_types.h" +#undef BPF_STRUCT_OPS_TYPE + + module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT); + if (module_id < 0) { + pr_warn("Cannot find struct module in btf_vmlinux\n"); + return; + } + module_type = btf_type_by_id(btf, module_id); + + for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { + st_ops = bpf_struct_ops[i]; + + if (strlen(st_ops->name) + VALUE_PREFIX_LEN >= + sizeof(value_name)) { + pr_warn("struct_ops name %s is too long\n", + st_ops->name); + continue; + } + sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); + + value_id = btf_find_by_name_kind(btf, value_name, + BTF_KIND_STRUCT); + if (value_id < 0) { + pr_warn("Cannot find struct %s in btf_vmlinux\n", + value_name); + continue; + } + + type_id = btf_find_by_name_kind(btf, st_ops->name, + BTF_KIND_STRUCT); + if (type_id < 0) { + pr_warn("Cannot find struct %s in btf_vmlinux\n", + st_ops->name); + continue; + } + t = btf_type_by_id(btf, type_id); + if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { + pr_warn("Cannot support #%u members in struct %s\n", + btf_type_vlen(t), st_ops->name); + continue; + } + + for_each_member(j, t, member) { + const struct btf_type *func_proto; + + mname = btf_name_by_offset(btf, member->name_off); + if (!*mname) { + pr_warn("anon member in struct %s is not supported\n", + st_ops->name); + break; + } + + if (btf_member_bitfield_size(t, member)) { + pr_warn("bit field member %s in struct %s is not supported\n", + mname, st_ops->name); + break; + } + + func_proto = btf_type_resolve_func_ptr(btf, + member->type, + NULL); + if (func_proto && + btf_distill_func_proto(&log, btf, + func_proto, mname, + &st_ops->func_models[j])) { + pr_warn("Error in parsing func ptr %s in struct %s\n", + mname, st_ops->name); + break; + } + } + + if (j == btf_type_vlen(t)) { + if (st_ops->init(btf)) { + pr_warn("Error in init bpf_struct_ops %s\n", + st_ops->name); + } else { + st_ops->type_id = type_id; + st_ops->type = t; + st_ops->value_id = value_id; + st_ops->value_type = btf_type_by_id(btf, + value_id); + } + } + } +} + +extern struct btf *btf_vmlinux; + +static const struct bpf_struct_ops * +bpf_struct_ops_find_value(u32 value_id) +{ + unsigned int i; + + if (!value_id || !btf_vmlinux) + return NULL; + + for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { + if (bpf_struct_ops[i]->value_id == value_id) + return bpf_struct_ops[i]; + } + + return NULL; +} + +const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) +{ + unsigned int i; + + if (!type_id || !btf_vmlinux) + return NULL; + + for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { + if (bpf_struct_ops[i]->type_id == type_id) + return bpf_struct_ops[i]; + } + + return NULL; +} + +static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key, + void *next_key) +{ + if (key && *(u32 *)key == 0) + return -ENOENT; + + *(u32 *)next_key = 0; + return 0; +} + +int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, + void *value) +{ + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + struct bpf_struct_ops_value *uvalue, *kvalue; + enum bpf_struct_ops_state state; + + if (unlikely(*(u32 *)key != 0)) + return -ENOENT; + + kvalue = &st_map->kvalue; + /* Pair with smp_store_release() during map_update */ + state = smp_load_acquire(&kvalue->state); + if (state == BPF_STRUCT_OPS_STATE_INIT) { + memset(value, 0, map->value_size); + return 0; + } + + /* No lock is needed. state and refcnt do not need + * to be updated together under atomic context. + */ + uvalue = (struct bpf_struct_ops_value *)value; + memcpy(uvalue, st_map->uvalue, map->value_size); + uvalue->state = state; + refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt)); + + return 0; +} + +static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) +{ + return ERR_PTR(-EINVAL); +} + +static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) +{ + const struct btf_type *t = st_map->st_ops->type; + u32 i; + + for (i = 0; i < btf_type_vlen(t); i++) { + if (st_map->progs[i]) { + bpf_prog_put(st_map->progs[i]); + st_map->progs[i] = NULL; + } + } +} + +static int check_zero_holes(const struct btf_type *t, void *data) +{ + const struct btf_member *member; + u32 i, moff, msize, prev_mend = 0; + const struct btf_type *mtype; + + for_each_member(i, t, member) { + moff = btf_member_bit_offset(t, member) / 8; + if (moff > prev_mend && + memchr_inv(data + prev_mend, 0, moff - prev_mend)) + return -EINVAL; + + mtype = btf_type_by_id(btf_vmlinux, member->type); + mtype = btf_resolve_size(btf_vmlinux, mtype, &msize, + NULL, NULL); + if (IS_ERR(mtype)) + return PTR_ERR(mtype); + prev_mend = moff + msize; + } + + if (t->size > prev_mend && + memchr_inv(data + prev_mend, 0, t->size - prev_mend)) + return -EINVAL; + + return 0; +} + +static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + const struct bpf_struct_ops *st_ops = st_map->st_ops; + struct bpf_struct_ops_value *uvalue, *kvalue; + const struct btf_member *member; + const struct btf_type *t = st_ops->type; + struct bpf_tramp_progs *tprogs = NULL; + void *udata, *kdata; + int prog_fd, err = 0; + void *image; + u32 i; + + if (flags) + return -EINVAL; + + if (*(u32 *)key != 0) + return -E2BIG; + + err = check_zero_holes(st_ops->value_type, value); + if (err) + return err; + + uvalue = (struct bpf_struct_ops_value *)value; + err = check_zero_holes(t, uvalue->data); + if (err) + return err; + + if (uvalue->state || refcount_read(&uvalue->refcnt)) + return -EINVAL; + + tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); + if (!tprogs) + return -ENOMEM; + + uvalue = (struct bpf_struct_ops_value *)st_map->uvalue; + kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue; + + mutex_lock(&st_map->lock); + + if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) { + err = -EBUSY; + goto unlock; + } + + memcpy(uvalue, value, map->value_size); + + udata = &uvalue->data; + kdata = &kvalue->data; + image = st_map->image; + + for_each_member(i, t, member) { + const struct btf_type *mtype, *ptype; + struct bpf_prog *prog; + u32 moff; + + moff = btf_member_bit_offset(t, member) / 8; + ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); + if (ptype == module_type) { + if (*(void **)(udata + moff)) + goto reset_unlock; + *(void **)(kdata + moff) = BPF_MODULE_OWNER; + continue; + } + + err = st_ops->init_member(t, member, kdata, udata); + if (err < 0) + goto reset_unlock; + + /* The ->init_member() has handled this member */ + if (err > 0) + continue; + + /* If st_ops->init_member does not handle it, + * we will only handle func ptrs and zero-ed members + * here. Reject everything else. + */ + + /* All non func ptr member must be 0 */ + if (!ptype || !btf_type_is_func_proto(ptype)) { + u32 msize; + + mtype = btf_type_by_id(btf_vmlinux, member->type); + mtype = btf_resolve_size(btf_vmlinux, mtype, &msize, + NULL, NULL); + if (IS_ERR(mtype)) { + err = PTR_ERR(mtype); + goto reset_unlock; + } + + if (memchr_inv(udata + moff, 0, msize)) { + err = -EINVAL; + goto reset_unlock; + } + + continue; + } + + prog_fd = (int)(*(unsigned long *)(udata + moff)); + /* Similar check as the attr->attach_prog_fd */ + if (!prog_fd) + continue; + + prog = bpf_prog_get(prog_fd); + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto reset_unlock; + } + st_map->progs[i] = prog; + + if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || + prog->aux->attach_btf_id != st_ops->type_id || + prog->expected_attach_type != i) { + err = -EINVAL; + goto reset_unlock; + } + + tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; + tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; + err = arch_prepare_bpf_trampoline(image, + st_map->image + PAGE_SIZE, + &st_ops->func_models[i], 0, + tprogs, NULL); + if (err < 0) + goto reset_unlock; + + *(void **)(kdata + moff) = image; + image += err; + + /* put prog_id to udata */ + *(unsigned long *)(udata + moff) = prog->aux->id; + } + + refcount_set(&kvalue->refcnt, 1); + bpf_map_inc(map); + + set_memory_ro((long)st_map->image, 1); + set_memory_x((long)st_map->image, 1); + err = st_ops->reg(kdata); + if (likely(!err)) { + /* Pair with smp_load_acquire() during lookup_elem(). + * It ensures the above udata updates (e.g. prog->aux->id) + * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set. + */ + smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE); + goto unlock; + } + + /* Error during st_ops->reg(). It is very unlikely since + * the above init_member() should have caught it earlier + * before reg(). The only possibility is if there was a race + * in registering the struct_ops (under the same name) to + * a sub-system through different struct_ops's maps. + */ + set_memory_nx((long)st_map->image, 1); + set_memory_rw((long)st_map->image, 1); + bpf_map_put(map); + +reset_unlock: + bpf_struct_ops_map_put_progs(st_map); + memset(uvalue, 0, map->value_size); + memset(kvalue, 0, map->value_size); +unlock: + kfree(tprogs); + mutex_unlock(&st_map->lock); + return err; +} + +static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) +{ + enum bpf_struct_ops_state prev_state; + struct bpf_struct_ops_map *st_map; + + st_map = (struct bpf_struct_ops_map *)map; + prev_state = cmpxchg(&st_map->kvalue.state, + BPF_STRUCT_OPS_STATE_INUSE, + BPF_STRUCT_OPS_STATE_TOBEFREE); + if (prev_state == BPF_STRUCT_OPS_STATE_INUSE) { + st_map->st_ops->unreg(&st_map->kvalue.data); + if (refcount_dec_and_test(&st_map->kvalue.refcnt)) + bpf_map_put(map); + } + + return 0; +} + +static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + void *value; + + value = bpf_struct_ops_map_lookup_elem(map, key); + if (!value) + return; + + btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id, + value, m); + seq_puts(m, "\n"); +} + +static void bpf_struct_ops_map_free(struct bpf_map *map) +{ + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + + if (st_map->progs) + bpf_struct_ops_map_put_progs(st_map); + bpf_map_area_free(st_map->progs); + bpf_jit_free_exec(st_map->image); + bpf_map_area_free(st_map->uvalue); + bpf_map_area_free(st_map); +} + +static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) +{ + if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || + attr->map_flags || !attr->btf_vmlinux_value_type_id) + return -EINVAL; + return 0; +} + +static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) +{ + const struct bpf_struct_ops *st_ops; + size_t map_total_size, st_map_size; + struct bpf_struct_ops_map *st_map; + const struct btf_type *t, *vt; + struct bpf_map_memory mem; + struct bpf_map *map; + int err; + + if (!bpf_capable()) + return ERR_PTR(-EPERM); + + st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); + if (!st_ops) + return ERR_PTR(-ENOTSUPP); + + vt = st_ops->value_type; + if (attr->value_size != vt->size) + return ERR_PTR(-EINVAL); + + t = st_ops->type; + + st_map_size = sizeof(*st_map) + + /* kvalue stores the + * struct bpf_struct_ops_tcp_congestions_ops + */ + (vt->size - sizeof(struct bpf_struct_ops_value)); + map_total_size = st_map_size + + /* uvalue */ + sizeof(vt->size) + + /* struct bpf_progs **progs */ + btf_type_vlen(t) * sizeof(struct bpf_prog *); + err = bpf_map_charge_init(&mem, map_total_size); + if (err < 0) + return ERR_PTR(err); + + st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE); + if (!st_map) { + bpf_map_charge_finish(&mem); + return ERR_PTR(-ENOMEM); + } + st_map->st_ops = st_ops; + map = &st_map->map; + + st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); + st_map->progs = + bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *), + NUMA_NO_NODE); + st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); + if (!st_map->uvalue || !st_map->progs || !st_map->image) { + bpf_struct_ops_map_free(map); + bpf_map_charge_finish(&mem); + return ERR_PTR(-ENOMEM); + } + + mutex_init(&st_map->lock); + set_vm_flush_reset_perms(st_map->image); + bpf_map_init_from_attr(map, attr); + bpf_map_charge_move(&map->memory, &mem); + + return map; +} + +static int bpf_struct_ops_map_btf_id; +const struct bpf_map_ops bpf_struct_ops_map_ops = { + .map_alloc_check = bpf_struct_ops_map_alloc_check, + .map_alloc = bpf_struct_ops_map_alloc, + .map_free = bpf_struct_ops_map_free, + .map_get_next_key = bpf_struct_ops_map_get_next_key, + .map_lookup_elem = bpf_struct_ops_map_lookup_elem, + .map_delete_elem = bpf_struct_ops_map_delete_elem, + .map_update_elem = bpf_struct_ops_map_update_elem, + .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem, + .map_btf_name = "bpf_struct_ops_map", + .map_btf_id = &bpf_struct_ops_map_btf_id, +}; + +/* "const void *" because some subsystem is + * passing a const (e.g. const struct tcp_congestion_ops *) + */ +bool bpf_struct_ops_get(const void *kdata) +{ + struct bpf_struct_ops_value *kvalue; + + kvalue = container_of(kdata, struct bpf_struct_ops_value, data); + + return refcount_inc_not_zero(&kvalue->refcnt); +} + +void bpf_struct_ops_put(const void *kdata) +{ + struct bpf_struct_ops_value *kvalue; + + kvalue = container_of(kdata, struct bpf_struct_ops_value, data); + if (refcount_dec_and_test(&kvalue->refcnt)) { + struct bpf_struct_ops_map *st_map; + + st_map = container_of(kvalue, struct bpf_struct_ops_map, + kvalue); + bpf_map_put(&st_map->map); + } +} diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h new file mode 100644 index 000000000000..066d83ea1c99 --- /dev/null +++ b/kernel/bpf/bpf_struct_ops_types.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* internal file - do not include directly */ + +#ifdef CONFIG_BPF_JIT +#ifdef CONFIG_INET +#include <net/tcp.h> +BPF_STRUCT_OPS_TYPE(tcp_congestion_ops) +#endif +#endif diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index b03087f110eb..f0bf8fbb15db 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2,6 +2,8 @@ /* Copyright (c) 2018 Facebook */ #include <uapi/linux/btf.h> +#include <uapi/linux/bpf.h> +#include <uapi/linux/bpf_perf_event.h> #include <uapi/linux/types.h> #include <linux/seq_file.h> #include <linux/compiler.h> @@ -16,6 +18,10 @@ #include <linux/sort.h> #include <linux/bpf_verifier.h> #include <linux/btf.h> +#include <linux/btf_ids.h> +#include <linux/skmsg.h> +#include <linux/perf_event.h> +#include <net/sock.h> /* BTF (BPF Type Format) is the meta data format which describes * the data types of BPF program/map. Hence, it basically focus @@ -175,11 +181,6 @@ */ #define BTF_MAX_SIZE (16 * 1024 * 1024) -#define for_each_member(i, struct_type, member) \ - for (i = 0, member = btf_type_member(struct_type); \ - i < btf_type_vlen(struct_type); \ - i++, member++) - #define for_each_member_from(i, from, struct_type, member) \ for (i = from, member = btf_type_member(struct_type) + from; \ i < btf_type_vlen(struct_type); \ @@ -276,6 +277,11 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_DATASEC] = "DATASEC", }; +static const char *btf_type_str(const struct btf_type *t) +{ + return btf_kind_str[BTF_INFO_KIND(t->info)]; +} + struct btf_kind_operations { s32 (*check_meta)(struct btf_verifier_env *env, const struct btf_type *t, @@ -336,16 +342,6 @@ static bool btf_type_is_fwd(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; } -static bool btf_type_is_func(const struct btf_type *t) -{ - return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC; -} - -static bool btf_type_is_func_proto(const struct btf_type *t) -{ - return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO; -} - static bool btf_type_nosize(const struct btf_type *t) { return btf_type_is_void(t) || btf_type_is_fwd(t) || @@ -357,16 +353,6 @@ static bool btf_type_nosize_or_null(const struct btf_type *t) return !t || btf_type_nosize(t); } -/* union is only a special case of struct: - * all its offsetof(member) == 0 - */ -static bool btf_type_is_struct(const struct btf_type *t) -{ - u8 kind = BTF_INFO_KIND(t->info); - - return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; -} - static bool __btf_type_is_struct(const struct btf_type *t) { return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT; @@ -377,26 +363,70 @@ static bool btf_type_is_array(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; } -static bool btf_type_is_ptr(const struct btf_type *t) -{ - return BTF_INFO_KIND(t->info) == BTF_KIND_PTR; -} - -static bool btf_type_is_int(const struct btf_type *t) -{ - return BTF_INFO_KIND(t->info) == BTF_KIND_INT; -} - -static bool btf_type_is_var(const struct btf_type *t) -{ - return BTF_INFO_KIND(t->info) == BTF_KIND_VAR; -} - static bool btf_type_is_datasec(const struct btf_type *t) { return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; } +s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) +{ + const struct btf_type *t; + const char *tname; + u32 i; + + for (i = 1; i <= btf->nr_types; i++) { + t = btf->types[i]; + if (BTF_INFO_KIND(t->info) != kind) + continue; + + tname = btf_name_by_offset(btf, t->name_off); + if (!strcmp(tname, name)) + return i; + } + + return -ENOENT; +} + +const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, + u32 id, u32 *res_id) +{ + const struct btf_type *t = btf_type_by_id(btf, id); + + while (btf_type_is_modifier(t)) { + id = t->type; + t = btf_type_by_id(btf, t->type); + } + + if (res_id) + *res_id = id; + + return t; +} + +const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, + u32 id, u32 *res_id) +{ + const struct btf_type *t; + + t = btf_type_skip_modifiers(btf, id, NULL); + if (!btf_type_is_ptr(t)) + return NULL; + + return btf_type_skip_modifiers(btf, t->type, res_id); +} + +const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, + u32 id, u32 *res_id) +{ + const struct btf_type *ptype; + + ptype = btf_type_resolve_ptr(btf, id, res_id); + if (ptype && btf_type_is_func_proto(ptype)) + return ptype; + + return NULL; +} + /* Types that act only as a source, not sink or intermediate * type when resolving. */ @@ -461,30 +491,6 @@ static const char *btf_int_encoding_str(u8 encoding) return "UNKN"; } -static u16 btf_type_vlen(const struct btf_type *t) -{ - return BTF_INFO_VLEN(t->info); -} - -static bool btf_type_kflag(const struct btf_type *t) -{ - return BTF_INFO_KFLAG(t->info); -} - -static u32 btf_member_bit_offset(const struct btf_type *struct_type, - const struct btf_member *member) -{ - return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset) - : member->offset; -} - -static u32 btf_member_bitfield_size(const struct btf_type *struct_type, - const struct btf_member *member) -{ - return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset) - : 0; -} - static u32 btf_type_int(const struct btf_type *t) { return *(u32 *)(t + 1); @@ -495,11 +501,6 @@ static const struct btf_array *btf_type_array(const struct btf_type *t) return (const struct btf_array *)(t + 1); } -static const struct btf_member *btf_type_member(const struct btf_type *t) -{ - return (const struct btf_member *)(t + 1); -} - static const struct btf_enum *btf_type_enum(const struct btf_type *t) { return (const struct btf_enum *)(t + 1); @@ -698,6 +699,13 @@ __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, if (!bpf_verifier_log_needed(log)) return; + /* btf verifier prints all types it is processing via + * btf_verifier_log_type(..., fmt = NULL). + * Skip those prints for in-kernel BTF verification. + */ + if (log->level == BPF_LOG_KERNEL && !fmt) + return; + __btf_verifier_log(log, "[%u] %s %s%s", env->log_type_id, btf_kind_str[kind], @@ -735,6 +743,8 @@ static void btf_verifier_log_member(struct btf_verifier_env *env, if (!bpf_verifier_log_needed(log)) return; + if (log->level == BPF_LOG_KERNEL && !fmt) + return; /* The CHECK_META phase already did a btf dump. * * If member is logged again, it must hit an error in @@ -777,6 +787,8 @@ static void btf_verifier_log_vsi(struct btf_verifier_env *env, if (!bpf_verifier_log_needed(log)) return; + if (log->level == BPF_LOG_KERNEL && !fmt) + return; if (env->phase != CHECK_META) btf_verifier_log_type(env, datasec_type, NULL); @@ -802,6 +814,8 @@ static void btf_verifier_log_hdr(struct btf_verifier_env *env, if (!bpf_verifier_log_needed(log)) return; + if (log->level == BPF_LOG_KERNEL) + return; hdr = &btf->hdr; __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); __btf_verifier_log(log, "version: %u\n", hdr->version); @@ -910,6 +924,11 @@ static void btf_free_rcu(struct rcu_head *rcu) btf_free(btf); } +void btf_get(struct btf *btf) +{ + refcount_inc(&btf->refcnt); +} + void btf_put(struct btf *btf) { if (btf && refcount_dec_and_test(&btf->refcnt)) { @@ -1043,6 +1062,84 @@ static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; } +/* Resolve the size of a passed-in "type" + * + * type: is an array (e.g. u32 array[x][y]) + * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, + * *type_size: (x * y * sizeof(u32)). Hence, *type_size always + * corresponds to the return type. + * *elem_type: u32 + * *total_nelems: (x * y). Hence, individual elem size is + * (*type_size / *total_nelems) + * + * type: is not an array (e.g. const struct X) + * return type: type "struct X" + * *type_size: sizeof(struct X) + * *elem_type: same as return type ("struct X") + * *total_nelems: 1 + */ +const struct btf_type * +btf_resolve_size(const struct btf *btf, const struct btf_type *type, + u32 *type_size, const struct btf_type **elem_type, + u32 *total_nelems) +{ + const struct btf_type *array_type = NULL; + const struct btf_array *array; + u32 i, size, nelems = 1; + + for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { + switch (BTF_INFO_KIND(type->info)) { + /* type->size can be used */ + case BTF_KIND_INT: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + size = type->size; + goto resolved; + + case BTF_KIND_PTR: + size = sizeof(void *); + goto resolved; + + /* Modifiers */ + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + type = btf_type_by_id(btf, type->type); + break; + + case BTF_KIND_ARRAY: + if (!array_type) + array_type = type; + array = btf_type_array(type); + if (nelems && array->nelems > U32_MAX / nelems) + return ERR_PTR(-EINVAL); + nelems *= array->nelems; + type = btf_type_by_id(btf, array->type); + break; + + /* type without size */ + default: + return ERR_PTR(-EINVAL); + } + } + + return ERR_PTR(-EINVAL); + +resolved: + if (nelems && size > U32_MAX / nelems) + return ERR_PTR(-EINVAL); + + *type_size = nelems * size; + if (total_nelems) + *total_nelems = nelems; + if (elem_type) + *elem_type = type; + + return array_type ? : type; +} + /* The input param "type_id" must point to a needs_resolve type */ static const struct btf_type *btf_type_id_resolve(const struct btf *btf, u32 *type_id) @@ -1752,7 +1849,10 @@ static void btf_modifier_seq_show(const struct btf *btf, u32 type_id, void *data, u8 bits_offset, struct seq_file *m) { - t = btf_type_id_resolve(btf, &type_id); + if (btf->resolved_ids) + t = btf_type_id_resolve(btf, &type_id); + else + t = btf_type_skip_modifiers(btf, type_id, NULL); btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m); } @@ -2211,43 +2311,92 @@ static void btf_struct_log(struct btf_verifier_env *env, btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); } -/* find 'struct bpf_spin_lock' in map value. - * return >= 0 offset if found - * and < 0 in case of error - */ -int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t) +static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t, + const char *name, int sz, int align) { const struct btf_member *member; u32 i, off = -ENOENT; - if (!__btf_type_is_struct(t)) - return -EINVAL; - for_each_member(i, t, member) { const struct btf_type *member_type = btf_type_by_id(btf, member->type); if (!__btf_type_is_struct(member_type)) continue; - if (member_type->size != sizeof(struct bpf_spin_lock)) + if (member_type->size != sz) continue; - if (strcmp(__btf_name_by_offset(btf, member_type->name_off), - "bpf_spin_lock")) + if (strcmp(__btf_name_by_offset(btf, member_type->name_off), name)) continue; if (off != -ENOENT) - /* only one 'struct bpf_spin_lock' is allowed */ + /* only one such field is allowed */ return -E2BIG; off = btf_member_bit_offset(t, member); if (off % 8) /* valid C code cannot generate such BTF */ return -EINVAL; off /= 8; - if (off % __alignof__(struct bpf_spin_lock)) - /* valid struct bpf_spin_lock will be 4 byte aligned */ + if (off % align) return -EINVAL; } return off; } +static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, + const char *name, int sz, int align) +{ + const struct btf_var_secinfo *vsi; + u32 i, off = -ENOENT; + + for_each_vsi(i, t, vsi) { + const struct btf_type *var = btf_type_by_id(btf, vsi->type); + const struct btf_type *var_type = btf_type_by_id(btf, var->type); + + if (!__btf_type_is_struct(var_type)) + continue; + if (var_type->size != sz) + continue; + if (vsi->size != sz) + continue; + if (strcmp(__btf_name_by_offset(btf, var_type->name_off), name)) + continue; + if (off != -ENOENT) + /* only one such field is allowed */ + return -E2BIG; + off = vsi->offset; + if (off % align) + return -EINVAL; + } + return off; +} + +static int btf_find_field(const struct btf *btf, const struct btf_type *t, + const char *name, int sz, int align) +{ + + if (__btf_type_is_struct(t)) + return btf_find_struct_field(btf, t, name, sz, align); + else if (btf_type_is_datasec(t)) + return btf_find_datasec_var(btf, t, name, sz, align); + return -EINVAL; +} + +/* find 'struct bpf_spin_lock' in map value. + * return >= 0 offset if found + * and < 0 in case of error + */ +int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t) +{ + return btf_find_field(btf, t, "bpf_spin_lock", + sizeof(struct bpf_spin_lock), + __alignof__(struct bpf_spin_lock)); +} + +int btf_find_timer(const struct btf *btf, const struct btf_type *t) +{ + return btf_find_field(btf, t, "bpf_timer", + sizeof(struct bpf_timer), + __alignof__(struct bpf_timer)); +} + static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, u32 type_id, void *data, u8 bits_offset, struct seq_file *m) @@ -2405,7 +2554,8 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env, return -EINVAL; } - + if (env->log.level == BPF_LOG_KERNEL) + continue; btf_verifier_log(env, "\t%s val=%d\n", __btf_name_by_offset(btf, enums[i].name_off), enums[i].val); @@ -2546,8 +2696,8 @@ static s32 btf_func_check_meta(struct btf_verifier_env *env, return -EINVAL; } - if (btf_type_vlen(t)) { - btf_verifier_log_type(env, t, "vlen != 0"); + if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { + btf_verifier_log_type(env, t, "Invalid func linkage"); return -EINVAL; } @@ -3367,6 +3517,1016 @@ errout: return ERR_PTR(err); } +extern char __weak __start_BTF[]; +extern char __weak __stop_BTF[]; +extern struct btf *btf_vmlinux; + +#define BPF_MAP_TYPE(_id, _ops) +#define BPF_LINK_TYPE(_id, _name) +static union { + struct bpf_ctx_convert { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ + prog_ctx_type _id##_prog; \ + kern_ctx_type _id##_kern; +#include <linux/bpf_types.h> +#undef BPF_PROG_TYPE + } *__t; + /* 't' is written once under lock. Read many times. */ + const struct btf_type *t; +} bpf_ctx_convert; +enum { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ + __ctx_convert##_id, +#include <linux/bpf_types.h> +#undef BPF_PROG_TYPE +}; +static u8 bpf_ctx_convert_map[] = { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ + [_id] = __ctx_convert##_id, +#include <linux/bpf_types.h> +#undef BPF_PROG_TYPE +}; +#undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE + +static const struct btf_member * +btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, + const struct btf_type *t, enum bpf_prog_type prog_type, + int arg) +{ + const struct btf_type *conv_struct; + const struct btf_type *ctx_struct; + const struct btf_member *ctx_type; + const char *tname, *ctx_tname; + + conv_struct = bpf_ctx_convert.t; + if (!conv_struct) { + bpf_log(log, "btf_vmlinux is malformed\n"); + return NULL; + } + t = btf_type_by_id(btf, t->type); + while (btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_struct(t)) { + /* Only pointer to struct is supported for now. + * That means that BPF_PROG_TYPE_TRACEPOINT with BTF + * is not supported yet. + * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. + */ + if (log->level & BPF_LOG_LEVEL) + bpf_log(log, "arg#%d type is not a struct\n", arg); + return NULL; + } + tname = btf_name_by_offset(btf, t->name_off); + if (!tname) { + bpf_log(log, "arg#%d struct doesn't have a name\n", arg); + return NULL; + } + /* prog_type is valid bpf program type. No need for bounds check. */ + ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2; + /* ctx_struct is a pointer to prog_ctx_type in vmlinux. + * Like 'struct __sk_buff' + */ + ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type); + if (!ctx_struct) + /* should not happen */ + return NULL; + ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off); + if (!ctx_tname) { + /* should not happen */ + bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n"); + return NULL; + } + /* only compare that prog's ctx type name is the same as + * kernel expects. No need to compare field by field. + * It's ok for bpf prog to do: + * struct __sk_buff {}; + * int socket_filter_bpf_prog(struct __sk_buff *skb) + * { // no fields of skb are ever used } + */ + if (strcmp(ctx_tname, tname)) + return NULL; + return ctx_type; +} + +static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) +#define BPF_LINK_TYPE(_id, _name) +#define BPF_MAP_TYPE(_id, _ops) \ + [_id] = &_ops, +#include <linux/bpf_types.h> +#undef BPF_PROG_TYPE +#undef BPF_LINK_TYPE +#undef BPF_MAP_TYPE +}; + +static int btf_vmlinux_map_ids_init(const struct btf *btf, + struct bpf_verifier_log *log) +{ + const struct bpf_map_ops *ops; + int i, btf_id; + + for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) { + ops = btf_vmlinux_map_ops[i]; + if (!ops || (!ops->map_btf_name && !ops->map_btf_id)) + continue; + if (!ops->map_btf_name || !ops->map_btf_id) { + bpf_log(log, "map type %d is misconfigured\n", i); + return -EINVAL; + } + btf_id = btf_find_by_name_kind(btf, ops->map_btf_name, + BTF_KIND_STRUCT); + if (btf_id < 0) + return btf_id; + *ops->map_btf_id = btf_id; + } + + return 0; +} + +static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, + struct btf *btf, + const struct btf_type *t, + enum bpf_prog_type prog_type, + int arg) +{ + const struct btf_member *prog_ctx_type, *kern_ctx_type; + + prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg); + if (!prog_ctx_type) + return -ENOENT; + kern_ctx_type = prog_ctx_type + 1; + return kern_ctx_type->type; +} + +BTF_ID_LIST(bpf_ctx_convert_btf_id) +BTF_ID(struct, bpf_ctx_convert) + +struct btf *btf_parse_vmlinux(void) +{ + struct btf_verifier_env *env = NULL; + struct bpf_verifier_log *log; + struct btf *btf = NULL; + int err; + + env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); + if (!env) + return ERR_PTR(-ENOMEM); + + log = &env->log; + log->level = BPF_LOG_KERNEL; + + btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); + if (!btf) { + err = -ENOMEM; + goto errout; + } + env->btf = btf; + + btf->data = __start_BTF; + btf->data_size = __stop_BTF - __start_BTF; + + err = btf_parse_hdr(env); + if (err) + goto errout; + + btf->nohdr_data = btf->data + btf->hdr.hdr_len; + + err = btf_parse_str_sec(env); + if (err) + goto errout; + + err = btf_check_all_metas(env); + if (err) + goto errout; + + bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); + + /* find bpf map structs for map_ptr access checking */ + err = btf_vmlinux_map_ids_init(btf, log); + if (err < 0) + goto errout; + + bpf_struct_ops_init(btf); + + btf_verifier_env_free(env); + refcount_set(&btf->refcnt, 1); + return btf; + +errout: + btf_verifier_env_free(env); + if (btf) { + kvfree(btf->types); + kfree(btf); + } + return ERR_PTR(err); +} + +struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) +{ + struct bpf_prog *tgt_prog = prog->aux->linked_prog; + + if (tgt_prog) { + return tgt_prog->aux->btf; + } else { + return btf_vmlinux; + } +} + +bool btf_ctx_access(int off, int size, enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + const struct btf_type *t = prog->aux->attach_func_proto; + struct bpf_prog *tgt_prog = prog->aux->linked_prog; + struct btf *btf = bpf_prog_get_target_btf(prog); + const char *tname = prog->aux->attach_func_name; + struct bpf_verifier_log *log = info->log; + const struct btf_param *args; + u32 nr_args, arg; + int i, ret; + + if (off % 8) { + bpf_log(log, "func '%s' offset %d is not multiple of 8\n", + tname, off); + return false; + } + arg = off / 8; + args = (const struct btf_param *)(t + 1); + /* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */ + nr_args = t ? btf_type_vlen(t) : 5; + if (prog->aux->attach_btf_trace) { + /* skip first 'void *__data' argument in btf_trace_##name typedef */ + args++; + nr_args--; + } + + if (arg == nr_args) { + if (prog->expected_attach_type == BPF_TRACE_FEXIT || + prog->expected_attach_type == BPF_LSM_MAC) { + /* When LSM programs are attached to void LSM hooks + * they use FEXIT trampolines and when attached to + * int LSM hooks, they use MODIFY_RETURN trampolines. + * + * While the LSM programs are BPF_MODIFY_RETURN-like + * the check: + * + * if (ret_type != 'int') + * return -EINVAL; + * + * is _not_ done here. This is still safe as LSM hooks + * have only void and int return types. + */ + if (!t) + /* Default prog with 5 args. 6th arg is retval. */ + return true; + t = btf_type_by_id(btf, t->type); + } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { + /* For now the BPF_MODIFY_RETURN can only be attached to + * functions that return an int. + */ + if (!t) + return false; + + t = btf_type_skip_modifiers(btf, t->type, NULL); + if (!btf_type_is_int(t)) { + bpf_log(log, + "ret type %s not allowed for fmod_ret\n", + btf_kind_str[BTF_INFO_KIND(t->info)]); + return false; + } + } + } else if (arg >= nr_args) { + bpf_log(log, "func '%s' doesn't have %d-th argument\n", + tname, arg + 1); + return false; + } else { + if (!t) + /* Default prog with 5 args */ + return true; + t = btf_type_by_id(btf, args[arg].type); + } + /* skip modifiers */ + while (btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (btf_type_is_int(t) || btf_type_is_enum(t)) + /* accessing a scalar */ + return true; + if (!btf_type_is_ptr(t)) { + bpf_log(log, + "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n", + tname, arg, + __btf_name_by_offset(btf, t->name_off), + btf_kind_str[BTF_INFO_KIND(t->info)]); + return false; + } + + /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ + for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { + const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; + + if (ctx_arg_info->offset == off && + (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL || + ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) { + info->reg_type = ctx_arg_info->reg_type; + return true; + } + } + + if (t->type == 0) + /* This is a pointer to void. + * It is the same as scalar from the verifier safety pov. + * No further pointer walking is allowed. + */ + return true; + + /* this is a pointer to another type */ + for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { + const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; + + if (ctx_arg_info->offset == off) { + info->reg_type = ctx_arg_info->reg_type; + info->btf_id = ctx_arg_info->btf_id; + return true; + } + } + + info->reg_type = PTR_TO_BTF_ID; + if (tgt_prog) { + ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg); + if (ret > 0) { + info->btf_id = ret; + return true; + } else { + return false; + } + } + + info->btf_id = t->type; + t = btf_type_by_id(btf, t->type); + /* skip modifiers */ + while (btf_type_is_modifier(t)) { + info->btf_id = t->type; + t = btf_type_by_id(btf, t->type); + } + if (!btf_type_is_struct(t)) { + bpf_log(log, + "func '%s' arg%d type %s is not a struct\n", + tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]); + return false; + } + bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n", + tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)], + __btf_name_by_offset(btf, t->name_off)); + return true; +} + +int btf_struct_access(struct bpf_verifier_log *log, + const struct btf_type *t, int off, int size, + enum bpf_access_type atype, + u32 *next_btf_id) +{ + u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; + const struct btf_type *mtype, *elem_type = NULL; + const struct btf_member *member; + const char *tname, *mname; + u32 vlen; + +again: + tname = __btf_name_by_offset(btf_vmlinux, t->name_off); + if (!btf_type_is_struct(t)) { + bpf_log(log, "Type '%s' is not a struct\n", tname); + return -EINVAL; + } + + vlen = btf_type_vlen(t); + if (off + size > t->size) { + /* If the last element is a variable size array, we may + * need to relax the rule. + */ + struct btf_array *array_elem; + + if (vlen == 0) + goto error; + + member = btf_type_member(t) + vlen - 1; + mtype = btf_type_skip_modifiers(btf_vmlinux, member->type, + NULL); + if (!btf_type_is_array(mtype)) + goto error; + + array_elem = (struct btf_array *)(mtype + 1); + if (array_elem->nelems != 0) + goto error; + + moff = btf_member_bit_offset(t, member) / 8; + if (off < moff) + goto error; + + /* Only allow structure for now, can be relaxed for + * other types later. + */ + elem_type = btf_type_skip_modifiers(btf_vmlinux, + array_elem->type, NULL); + if (!btf_type_is_struct(elem_type)) + goto error; + + off = (off - moff) % elem_type->size; + return btf_struct_access(log, elem_type, off, size, atype, + next_btf_id); + +error: + bpf_log(log, "access beyond struct %s at off %u size %u\n", + tname, off, size); + return -EACCES; + } + + for_each_member(i, t, member) { + /* offset of the field in bytes */ + moff = btf_member_bit_offset(t, member) / 8; + if (off + size <= moff) + /* won't find anything, field is already too far */ + break; + + if (btf_member_bitfield_size(t, member)) { + u32 end_bit = btf_member_bit_offset(t, member) + + btf_member_bitfield_size(t, member); + + /* off <= moff instead of off == moff because clang + * does not generate a BTF member for anonymous + * bitfield like the ":16" here: + * struct { + * int :16; + * int x:8; + * }; + */ + if (off <= moff && + BITS_ROUNDUP_BYTES(end_bit) <= off + size) + return SCALAR_VALUE; + + /* off may be accessing a following member + * + * or + * + * Doing partial access at either end of this + * bitfield. Continue on this case also to + * treat it as not accessing this bitfield + * and eventually error out as field not + * found to keep it simple. + * It could be relaxed if there was a legit + * partial access case later. + */ + continue; + } + + /* In case of "off" is pointing to holes of a struct */ + if (off < moff) + break; + + /* type of the field */ + mtype = btf_type_by_id(btf_vmlinux, member->type); + mname = __btf_name_by_offset(btf_vmlinux, member->name_off); + + mtype = btf_resolve_size(btf_vmlinux, mtype, &msize, + &elem_type, &total_nelems); + if (IS_ERR(mtype)) { + bpf_log(log, "field %s doesn't have size\n", mname); + return -EFAULT; + } + + mtrue_end = moff + msize; + if (off >= mtrue_end) + /* no overlap with member, keep iterating */ + continue; + + if (btf_type_is_array(mtype)) { + u32 elem_idx; + + /* btf_resolve_size() above helps to + * linearize a multi-dimensional array. + * + * The logic here is treating an array + * in a struct as the following way: + * + * struct outer { + * struct inner array[2][2]; + * }; + * + * looks like: + * + * struct outer { + * struct inner array_elem0; + * struct inner array_elem1; + * struct inner array_elem2; + * struct inner array_elem3; + * }; + * + * When accessing outer->array[1][0], it moves + * moff to "array_elem2", set mtype to + * "struct inner", and msize also becomes + * sizeof(struct inner). Then most of the + * remaining logic will fall through without + * caring the current member is an array or + * not. + * + * Unlike mtype/msize/moff, mtrue_end does not + * change. The naming difference ("_true") tells + * that it is not always corresponding to + * the current mtype/msize/moff. + * It is the true end of the current + * member (i.e. array in this case). That + * will allow an int array to be accessed like + * a scratch space, + * i.e. allow access beyond the size of + * the array's element as long as it is + * within the mtrue_end boundary. + */ + + /* skip empty array */ + if (moff == mtrue_end) + continue; + + msize /= total_nelems; + elem_idx = (off - moff) / msize; + moff += elem_idx * msize; + mtype = elem_type; + } + + /* the 'off' we're looking for is either equal to start + * of this field or inside of this struct + */ + if (btf_type_is_struct(mtype)) { + /* our field must be inside that union or struct */ + t = mtype; + + /* adjust offset we're looking for */ + off -= moff; + goto again; + } + + if (btf_type_is_ptr(mtype)) { + const struct btf_type *stype; + + if (msize != size || off != moff) { + bpf_log(log, + "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n", + mname, moff, tname, off, size); + return -EACCES; + } + + stype = btf_type_by_id(btf_vmlinux, mtype->type); + /* skip modifiers */ + while (btf_type_is_modifier(stype)) + stype = btf_type_by_id(btf_vmlinux, stype->type); + if (btf_type_is_struct(stype)) { + *next_btf_id = mtype->type; + return PTR_TO_BTF_ID; + } + } + + /* Allow more flexible access within an int as long as + * it is within mtrue_end. + * Since mtrue_end could be the end of an array, + * that also allows using an array of int as a scratch + * space. e.g. skb->cb[]. + */ + if (off + size > mtrue_end) { + bpf_log(log, + "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n", + mname, mtrue_end, tname, off, size); + return -EACCES; + } + + return SCALAR_VALUE; + } + bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off); + return -EINVAL; +} + +int btf_resolve_helper_id(struct bpf_verifier_log *log, + const struct bpf_func_proto *fn, int arg) +{ + int id; + + if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID) + return -EINVAL; + id = fn->btf_id[arg]; + if (!id || id > btf_vmlinux->nr_types) + return -EINVAL; + return id; +} + +static int __get_type_size(struct btf *btf, u32 btf_id, + const struct btf_type **bad_type) +{ + const struct btf_type *t; + + if (!btf_id) + /* void */ + return 0; + t = btf_type_by_id(btf, btf_id); + while (t && btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (!t) + return -EINVAL; + if (btf_type_is_ptr(t)) + /* kernel size of pointer. Not BPF's size of pointer*/ + return sizeof(void *); + if (btf_type_is_int(t) || btf_type_is_enum(t)) + return t->size; + *bad_type = t; + return -EINVAL; +} + +int btf_distill_func_proto(struct bpf_verifier_log *log, + struct btf *btf, + const struct btf_type *func, + const char *tname, + struct btf_func_model *m) +{ + const struct btf_param *args; + const struct btf_type *t; + u32 i, nargs; + int ret; + + if (!func) { + /* BTF function prototype doesn't match the verifier types. + * Fall back to 5 u64 args. + */ + for (i = 0; i < 5; i++) + m->arg_size[i] = 8; + m->ret_size = 8; + m->nr_args = 5; + return 0; + } + args = (const struct btf_param *)(func + 1); + nargs = btf_type_vlen(func); + if (nargs >= MAX_BPF_FUNC_ARGS) { + bpf_log(log, + "The function %s has %d arguments. Too many.\n", + tname, nargs); + return -EINVAL; + } + ret = __get_type_size(btf, func->type, &t); + if (ret < 0) { + bpf_log(log, + "The function %s return type %s is unsupported.\n", + tname, btf_kind_str[BTF_INFO_KIND(t->info)]); + return -EINVAL; + } + m->ret_size = ret; + + for (i = 0; i < nargs; i++) { + ret = __get_type_size(btf, args[i].type, &t); + if (ret < 0) { + bpf_log(log, + "The function %s arg%d type %s is unsupported.\n", + tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]); + return -EINVAL; + } + m->arg_size[i] = ret; + } + m->nr_args = nargs; + return 0; +} + +/* Compare BTFs of two functions assuming only scalars and pointers to context. + * t1 points to BTF_KIND_FUNC in btf1 + * t2 points to BTF_KIND_FUNC in btf2 + * Returns: + * EINVAL - function prototype mismatch + * EFAULT - verifier bug + * 0 - 99% match. The last 1% is validated by the verifier. + */ +int btf_check_func_type_match(struct bpf_verifier_log *log, + struct btf *btf1, const struct btf_type *t1, + struct btf *btf2, const struct btf_type *t2) +{ + const struct btf_param *args1, *args2; + const char *fn1, *fn2, *s1, *s2; + u32 nargs1, nargs2, i; + + fn1 = btf_name_by_offset(btf1, t1->name_off); + fn2 = btf_name_by_offset(btf2, t2->name_off); + + if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) { + bpf_log(log, "%s() is not a global function\n", fn1); + return -EINVAL; + } + if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) { + bpf_log(log, "%s() is not a global function\n", fn2); + return -EINVAL; + } + + t1 = btf_type_by_id(btf1, t1->type); + if (!t1 || !btf_type_is_func_proto(t1)) + return -EFAULT; + t2 = btf_type_by_id(btf2, t2->type); + if (!t2 || !btf_type_is_func_proto(t2)) + return -EFAULT; + + args1 = (const struct btf_param *)(t1 + 1); + nargs1 = btf_type_vlen(t1); + args2 = (const struct btf_param *)(t2 + 1); + nargs2 = btf_type_vlen(t2); + + if (nargs1 != nargs2) { + bpf_log(log, "%s() has %d args while %s() has %d args\n", + fn1, nargs1, fn2, nargs2); + return -EINVAL; + } + + t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); + t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); + if (t1->info != t2->info) { + bpf_log(log, + "Return type %s of %s() doesn't match type %s of %s()\n", + btf_type_str(t1), fn1, + btf_type_str(t2), fn2); + return -EINVAL; + } + + for (i = 0; i < nargs1; i++) { + t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL); + t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL); + + if (t1->info != t2->info) { + bpf_log(log, "arg%d in %s() is %s while %s() has %s\n", + i, fn1, btf_type_str(t1), + fn2, btf_type_str(t2)); + return -EINVAL; + } + if (btf_type_has_size(t1) && t1->size != t2->size) { + bpf_log(log, + "arg%d in %s() has size %d while %s() has %d\n", + i, fn1, t1->size, + fn2, t2->size); + return -EINVAL; + } + + /* global functions are validated with scalars and pointers + * to context only. And only global functions can be replaced. + * Hence type check only those types. + */ + if (btf_type_is_int(t1) || btf_type_is_enum(t1)) + continue; + if (!btf_type_is_ptr(t1)) { + bpf_log(log, + "arg%d in %s() has unrecognized type\n", + i, fn1); + return -EINVAL; + } + t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); + t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); + if (!btf_type_is_struct(t1)) { + bpf_log(log, + "arg%d in %s() is not a pointer to context\n", + i, fn1); + return -EINVAL; + } + if (!btf_type_is_struct(t2)) { + bpf_log(log, + "arg%d in %s() is not a pointer to context\n", + i, fn2); + return -EINVAL; + } + /* This is an optional check to make program writing easier. + * Compare names of structs and report an error to the user. + * btf_prepare_func_args() already checked that t2 struct + * is a context type. btf_prepare_func_args() will check + * later that t1 struct is a context type as well. + */ + s1 = btf_name_by_offset(btf1, t1->name_off); + s2 = btf_name_by_offset(btf2, t2->name_off); + if (strcmp(s1, s2)) { + bpf_log(log, + "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n", + i, fn1, s1, fn2, s2); + return -EINVAL; + } + } + return 0; +} + +/* Compare BTFs of given program with BTF of target program */ +int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, + struct btf *btf2, const struct btf_type *t2) +{ + struct btf *btf1 = prog->aux->btf; + const struct btf_type *t1; + u32 btf_id = 0; + + if (!prog->aux->func_info) { + bpf_log(&env->log, "Program extension requires BTF\n"); + return -EINVAL; + } + + btf_id = prog->aux->func_info[0].type_id; + if (!btf_id) + return -EFAULT; + + t1 = btf_type_by_id(btf1, btf_id); + if (!t1 || !btf_type_is_func(t1)) + return -EFAULT; + + return btf_check_func_type_match(&env->log, btf1, t1, btf2, t2); +} + +/* Compare BTF of a function with given bpf_reg_state. + * Returns: + * EFAULT - there is a verifier bug. Abort verification. + * EINVAL - there is a type mismatch or BTF is not available. + * 0 - BTF matches with what bpf_reg_state expects. + * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. + */ +int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *reg) +{ + struct bpf_verifier_log *log = &env->log; + struct bpf_prog *prog = env->prog; + struct btf *btf = prog->aux->btf; + const struct btf_param *args; + const struct btf_type *t; + u32 i, nargs, btf_id; + const char *tname; + + if (!prog->aux->func_info) + return -EINVAL; + + btf_id = prog->aux->func_info[subprog].type_id; + if (!btf_id) + return -EFAULT; + + if (prog->aux->func_info_aux[subprog].unreliable) + return -EINVAL; + + t = btf_type_by_id(btf, btf_id); + if (!t || !btf_type_is_func(t)) { + /* These checks were already done by the verifier while loading + * struct bpf_func_info + */ + bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n", + subprog); + return -EFAULT; + } + tname = btf_name_by_offset(btf, t->name_off); + + t = btf_type_by_id(btf, t->type); + if (!t || !btf_type_is_func_proto(t)) { + bpf_log(log, "Invalid BTF of func %s\n", tname); + return -EFAULT; + } + args = (const struct btf_param *)(t + 1); + nargs = btf_type_vlen(t); + if (nargs > 5) { + bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs); + goto out; + } + /* check that BTF function arguments match actual types that the + * verifier sees. + */ + for (i = 0; i < nargs; i++) { + t = btf_type_by_id(btf, args[i].type); + while (btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (btf_type_is_int(t) || btf_type_is_enum(t)) { + if (reg[i + 1].type == SCALAR_VALUE) + continue; + bpf_log(log, "R%d is not a scalar\n", i + 1); + goto out; + } + if (btf_type_is_ptr(t)) { + if (reg[i + 1].type == SCALAR_VALUE) { + bpf_log(log, "R%d is not a pointer\n", i + 1); + goto out; + } + /* If function expects ctx type in BTF check that caller + * is passing PTR_TO_CTX. + */ + if (btf_get_prog_ctx_type(log, btf, t, prog->type, i)) { + if (reg[i + 1].type != PTR_TO_CTX) { + bpf_log(log, + "arg#%d expected pointer to ctx, but got %s\n", + i, btf_kind_str[BTF_INFO_KIND(t->info)]); + goto out; + } + if (check_ctx_reg(env, ®[i + 1], i + 1)) + goto out; + continue; + } + } + bpf_log(log, "Unrecognized arg#%d type %s\n", + i, btf_kind_str[BTF_INFO_KIND(t->info)]); + goto out; + } + return 0; +out: + /* Compiler optimizations can remove arguments from static functions + * or mismatched type can be passed into a global function. + * In such cases mark the function as unreliable from BTF point of view. + */ + prog->aux->func_info_aux[subprog].unreliable = true; + return -EINVAL; +} + +/* Convert BTF of a function into bpf_reg_state if possible + * Returns: + * EFAULT - there is a verifier bug. Abort verification. + * EINVAL - cannot convert BTF. + * 0 - Successfully converted BTF into bpf_reg_state + * (either PTR_TO_CTX or SCALAR_VALUE). + */ +int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *reg) +{ + struct bpf_verifier_log *log = &env->log; + struct bpf_prog *prog = env->prog; + enum bpf_prog_type prog_type = prog->type; + struct btf *btf = prog->aux->btf; + const struct btf_param *args; + const struct btf_type *t; + u32 i, nargs, btf_id; + const char *tname; + + if (!prog->aux->func_info || + prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) { + bpf_log(log, "Verifier bug\n"); + return -EFAULT; + } + + btf_id = prog->aux->func_info[subprog].type_id; + if (!btf_id) { + bpf_log(log, "Global functions need valid BTF\n"); + return -EFAULT; + } + + t = btf_type_by_id(btf, btf_id); + if (!t || !btf_type_is_func(t)) { + /* These checks were already done by the verifier while loading + * struct bpf_func_info + */ + bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n", + subprog); + return -EFAULT; + } + tname = btf_name_by_offset(btf, t->name_off); + + if (log->level & BPF_LOG_LEVEL) + bpf_log(log, "Validating %s() func#%d...\n", + tname, subprog); + + if (prog->aux->func_info_aux[subprog].unreliable) { + bpf_log(log, "Verifier bug in function %s()\n", tname); + return -EFAULT; + } + if (prog_type == BPF_PROG_TYPE_EXT) + prog_type = prog->aux->linked_prog->type; + + t = btf_type_by_id(btf, t->type); + if (!t || !btf_type_is_func_proto(t)) { + bpf_log(log, "Invalid type of function %s()\n", tname); + return -EFAULT; + } + args = (const struct btf_param *)(t + 1); + nargs = btf_type_vlen(t); + if (nargs > 5) { + bpf_log(log, "Global function %s() with %d > 5 args. Buggy compiler.\n", + tname, nargs); + return -EINVAL; + } + /* check that function returns int */ + t = btf_type_by_id(btf, t->type); + while (btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_int(t) && !btf_type_is_enum(t)) { + bpf_log(log, + "Global function %s() doesn't return scalar. Only those are supported.\n", + tname); + return -EINVAL; + } + /* Convert BTF function arguments into verifier types. + * Only PTR_TO_CTX and SCALAR are supported atm. + */ + for (i = 0; i < nargs; i++) { + t = btf_type_by_id(btf, args[i].type); + while (btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (btf_type_is_int(t) || btf_type_is_enum(t)) { + reg[i + 1].type = SCALAR_VALUE; + continue; + } + if (btf_type_is_ptr(t) && + btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { + reg[i + 1].type = PTR_TO_CTX; + continue; + } + bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n", + i, btf_kind_str[BTF_INFO_KIND(t->info)], tname); + return -EINVAL; + } + return 0; +} + void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, struct seq_file *m) { diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 869e2e1860e8..163a89dfd70a 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -28,6 +28,69 @@ void cgroup_bpf_offline(struct cgroup *cgrp) percpu_ref_kill(&cgrp->bpf.refcnt); } +static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) +{ + enum bpf_cgroup_storage_type stype; + + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storages[stype]); +} + +static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], + struct bpf_prog *prog) +{ + enum bpf_cgroup_storage_type stype; + + for_each_cgroup_storage_type(stype) { + storages[stype] = bpf_cgroup_storage_alloc(prog, stype); + if (IS_ERR(storages[stype])) { + storages[stype] = NULL; + bpf_cgroup_storages_free(storages); + return -ENOMEM; + } + } + + return 0; +} + +static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], + struct bpf_cgroup_storage *src[]) +{ + enum bpf_cgroup_storage_type stype; + + for_each_cgroup_storage_type(stype) + dst[stype] = src[stype]; +} + +static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], + struct cgroup* cgrp, + enum bpf_attach_type attach_type) +{ + enum bpf_cgroup_storage_type stype; + + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); +} + +static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) +{ + enum bpf_cgroup_storage_type stype; + + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_unlink(storages[stype]); +} + +/* Called when bpf_cgroup_link is auto-detached from dying cgroup. + * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It + * doesn't free link memory, which will eventually be done by bpf_link's + * release() callback, when its last FD is closed. + */ +static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) +{ + cgroup_put(link->cgroup); + link->cgroup = NULL; +} + /** * cgroup_bpf_release() - put references of all bpf programs and * release all cgroup bpf data @@ -37,7 +100,6 @@ static void cgroup_bpf_release(struct work_struct *work) { struct cgroup *p, *cgrp = container_of(work, struct cgroup, bpf.release_work); - enum bpf_cgroup_storage_type stype; struct bpf_prog_array *old_array; unsigned int type; @@ -49,11 +111,12 @@ static void cgroup_bpf_release(struct work_struct *work) list_for_each_entry_safe(pl, tmp, progs, node) { list_del(&pl->node); - bpf_prog_put(pl->prog); - for_each_cgroup_storage_type(stype) { - bpf_cgroup_storage_unlink(pl->storage[stype]); - bpf_cgroup_storage_free(pl->storage[stype]); - } + if (pl->prog) + bpf_prog_put(pl->prog); + if (pl->link) + bpf_cgroup_link_auto_detach(pl->link); + bpf_cgroup_storages_unlink(pl->storage); + bpf_cgroup_storages_free(pl->storage); kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key); } @@ -85,6 +148,18 @@ static void cgroup_bpf_release_fn(struct percpu_ref *ref) queue_work(system_wq, &cgrp->bpf.release_work); } +/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through + * link or direct prog. + */ +static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) +{ + if (pl->prog) + return pl->prog; + if (pl->link) + return pl->link->link.prog; + return NULL; +} + /* count number of elements in the list. * it's slow but the list cannot be long */ @@ -94,7 +169,7 @@ static u32 prog_list_length(struct list_head *head) u32 cnt = 0; list_for_each_entry(pl, head, node) { - if (!pl->prog) + if (!prog_list_prog(pl)) continue; cnt++; } @@ -106,8 +181,7 @@ static u32 prog_list_length(struct list_head *head) * if parent has overridable or multi-prog, allow attaching */ static bool hierarchy_allows_attach(struct cgroup *cgrp, - enum bpf_attach_type type, - u32 new_flags) + enum bpf_attach_type type) { struct cgroup *p; @@ -139,7 +213,7 @@ static int compute_effective_progs(struct cgroup *cgrp, enum bpf_attach_type type, struct bpf_prog_array **array) { - enum bpf_cgroup_storage_type stype; + struct bpf_prog_array_item *item; struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct cgroup *p = cgrp; @@ -164,13 +238,13 @@ static int compute_effective_progs(struct cgroup *cgrp, continue; list_for_each_entry(pl, &p->bpf.progs[type], node) { - if (!pl->prog) + if (!prog_list_prog(pl)) continue; - progs->items[cnt].prog = pl->prog; - for_each_cgroup_storage_type(stype) - progs->items[cnt].cgroup_storage[stype] = - pl->storage[stype]; + item = &progs->items[cnt]; + item->prog = prog_list_prog(pl); + bpf_cgroup_storages_assign(item->cgroup_storage, + pl->storage); cnt++; } } while ((p = cgroup_parent(p))); @@ -288,36 +362,85 @@ cleanup: #define BPF_CGROUP_MAX_PROGS 64 +static struct bpf_prog_list *find_attach_entry(struct list_head *progs, + struct bpf_prog *prog, + struct bpf_cgroup_link *link, + struct bpf_prog *replace_prog, + bool allow_multi) +{ + struct bpf_prog_list *pl; + + /* single-attach case */ + if (!allow_multi) { + if (list_empty(progs)) + return NULL; + return list_first_entry(progs, typeof(*pl), node); + } + + list_for_each_entry(pl, progs, node) { + if (prog && pl->prog == prog) + /* disallow attaching the same prog twice */ + return ERR_PTR(-EINVAL); + if (link && pl->link == link) + /* disallow attaching the same link twice */ + return ERR_PTR(-EINVAL); + } + + /* direct prog multi-attach w/ replacement case */ + if (replace_prog) { + list_for_each_entry(pl, progs, node) { + if (pl->prog == replace_prog) + /* a match found */ + return pl; + } + /* prog to replace not found for cgroup */ + return ERR_PTR(-ENOENT); + } + + return NULL; +} + /** - * __cgroup_bpf_attach() - Attach the program to a cgroup, and + * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and * propagate the change to descendants * @cgrp: The cgroup which descendants to traverse * @prog: A program to attach + * @link: A link to attach + * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set * @type: Type of attach operation * @flags: Option flags * + * Exactly one of @prog or @link can be non-null. * Must be called with cgroup_mutex held. */ -int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, +int __cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type, u32 flags) { + u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; - enum bpf_cgroup_storage_type stype; struct bpf_prog_list *pl; - bool pl_was_allocated; int err; - if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) + if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || + ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) /* invalid combination */ return -EINVAL; + if (link && (prog || replace_prog)) + /* only either link or prog/replace_prog can be specified */ + return -EINVAL; + if (!!replace_prog != !!(flags & BPF_F_REPLACE)) + /* replace_prog implies BPF_F_REPLACE, and vice versa */ + return -EINVAL; - if (!hierarchy_allows_attach(cgrp, type, flags)) + if (!hierarchy_allows_attach(cgrp, type)) return -EPERM; - if (!list_empty(progs) && cgrp->bpf.flags[type] != flags) + if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) /* Disallow attaching non-overridable on top * of existing overridable in this cgroup. * Disallow attaching multi-prog if overridable or none @@ -327,150 +450,228 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) return -E2BIG; - for_each_cgroup_storage_type(stype) { - storage[stype] = bpf_cgroup_storage_alloc(prog, stype); - if (IS_ERR(storage[stype])) { - storage[stype] = NULL; - for_each_cgroup_storage_type(stype) - bpf_cgroup_storage_free(storage[stype]); - return -ENOMEM; - } - } + pl = find_attach_entry(progs, prog, link, replace_prog, + flags & BPF_F_ALLOW_MULTI); + if (IS_ERR(pl)) + return PTR_ERR(pl); - if (flags & BPF_F_ALLOW_MULTI) { - list_for_each_entry(pl, progs, node) { - if (pl->prog == prog) { - /* disallow attaching the same prog twice */ - for_each_cgroup_storage_type(stype) - bpf_cgroup_storage_free(storage[stype]); - return -EINVAL; - } - } + if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) + return -ENOMEM; + if (pl) { + old_prog = pl->prog; + bpf_cgroup_storages_unlink(pl->storage); + bpf_cgroup_storages_assign(old_storage, pl->storage); + } else { pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { - for_each_cgroup_storage_type(stype) - bpf_cgroup_storage_free(storage[stype]); + bpf_cgroup_storages_free(storage); return -ENOMEM; } - - pl_was_allocated = true; - pl->prog = prog; - for_each_cgroup_storage_type(stype) - pl->storage[stype] = storage[stype]; list_add_tail(&pl->node, progs); - } else { - if (list_empty(progs)) { - pl = kmalloc(sizeof(*pl), GFP_KERNEL); - if (!pl) { - for_each_cgroup_storage_type(stype) - bpf_cgroup_storage_free(storage[stype]); - return -ENOMEM; - } - pl_was_allocated = true; - list_add_tail(&pl->node, progs); - } else { - pl = list_first_entry(progs, typeof(*pl), node); - old_prog = pl->prog; - for_each_cgroup_storage_type(stype) { - old_storage[stype] = pl->storage[stype]; - bpf_cgroup_storage_unlink(old_storage[stype]); - } - pl_was_allocated = false; - } - pl->prog = prog; - for_each_cgroup_storage_type(stype) - pl->storage[stype] = storage[stype]; } - cgrp->bpf.flags[type] = flags; + pl->prog = prog; + pl->link = link; + bpf_cgroup_storages_assign(pl->storage, storage); + cgrp->bpf.flags[type] = saved_flags; err = update_effective_progs(cgrp, type); if (err) goto cleanup; - static_branch_inc(&cgroup_bpf_enabled_key); - for_each_cgroup_storage_type(stype) { - if (!old_storage[stype]) - continue; - bpf_cgroup_storage_free(old_storage[stype]); - } - if (old_prog) { + bpf_cgroup_storages_free(old_storage); + if (old_prog) bpf_prog_put(old_prog); - static_branch_dec(&cgroup_bpf_enabled_key); - } - for_each_cgroup_storage_type(stype) - bpf_cgroup_storage_link(storage[stype], cgrp, type); + else + static_branch_inc(&cgroup_bpf_enabled_key); + bpf_cgroup_storages_link(pl->storage, cgrp, type); return 0; cleanup: - /* and cleanup the prog list */ - pl->prog = old_prog; - for_each_cgroup_storage_type(stype) { - bpf_cgroup_storage_free(pl->storage[stype]); - pl->storage[stype] = old_storage[stype]; - bpf_cgroup_storage_link(old_storage[stype], cgrp, type); + if (old_prog) { + pl->prog = old_prog; + pl->link = NULL; } - if (pl_was_allocated) { + bpf_cgroup_storages_free(pl->storage); + bpf_cgroup_storages_assign(pl->storage, old_storage); + bpf_cgroup_storages_link(pl->storage, cgrp, type); + if (!old_prog) { list_del(&pl->node); kfree(pl); } return err; } +/* Swap updated BPF program for given link in effective program arrays across + * all descendant cgroups. This function is guaranteed to succeed. + */ +static void replace_effective_prog(struct cgroup *cgrp, + enum bpf_attach_type type, + struct bpf_cgroup_link *link) +{ + struct bpf_prog_array_item *item; + struct cgroup_subsys_state *css; + struct bpf_prog_array *progs; + struct bpf_prog_list *pl; + struct list_head *head; + struct cgroup *cg; + int pos; + + css_for_each_descendant_pre(css, &cgrp->self) { + struct cgroup *desc = container_of(css, struct cgroup, self); + + if (percpu_ref_is_zero(&desc->bpf.refcnt)) + continue; + + /* find position of link in effective progs array */ + for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { + if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) + continue; + + head = &cg->bpf.progs[type]; + list_for_each_entry(pl, head, node) { + if (!prog_list_prog(pl)) + continue; + if (pl->link == link) + goto found; + pos++; + } + } +found: + BUG_ON(!cg); + progs = rcu_dereference_protected( + desc->bpf.effective[type], + lockdep_is_held(&cgroup_mutex)); + item = &progs->items[pos]; + WRITE_ONCE(item->prog, link->link.prog); + } +} + /** - * __cgroup_bpf_detach() - Detach the program from a cgroup, and - * propagate the change to descendants + * __cgroup_bpf_replace() - Replace link's program and propagate the change + * to descendants * @cgrp: The cgroup which descendants to traverse - * @prog: A program to detach or NULL - * @type: Type of detach operation + * @link: A link for which to replace BPF program + * @type: Type of attach operation * * Must be called with cgroup_mutex held. */ -int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type) +static int __cgroup_bpf_replace(struct cgroup *cgrp, + struct bpf_cgroup_link *link, + struct bpf_prog *new_prog) { - struct list_head *progs = &cgrp->bpf.progs[type]; - enum bpf_cgroup_storage_type stype; - u32 flags = cgrp->bpf.flags[type]; - struct bpf_prog *old_prog = NULL; + struct list_head *progs = &cgrp->bpf.progs[link->type]; + struct bpf_prog *old_prog; struct bpf_prog_list *pl; - int err; + bool found = false; - if (flags & BPF_F_ALLOW_MULTI) { - if (!prog) - /* to detach MULTI prog the user has to specify valid FD - * of the program to be detached - */ - return -EINVAL; - } else { - if (list_empty(progs)) - /* report error when trying to detach and nothing is attached */ - return -ENOENT; - } + if (link->link.prog->type != new_prog->type) + return -EINVAL; - if (flags & BPF_F_ALLOW_MULTI) { - /* find the prog and detach it */ - list_for_each_entry(pl, progs, node) { - if (pl->prog != prog) - continue; - old_prog = prog; - /* mark it deleted, so it's ignored while - * recomputing effective - */ - pl->prog = NULL; + list_for_each_entry(pl, progs, node) { + if (pl->link == link) { + found = true; break; } - if (!old_prog) - return -ENOENT; - } else { - /* to maintain backward compatibility NONE and OVERRIDE cgroups - * allow detaching with invalid FD (prog==NULL) - */ - pl = list_first_entry(progs, typeof(*pl), node); - old_prog = pl->prog; - pl->prog = NULL; } + if (!found) + return -ENOENT; + + old_prog = xchg(&link->link.prog, new_prog); + replace_effective_prog(cgrp, link->type, link); + bpf_prog_put(old_prog); + return 0; +} + +static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + struct bpf_cgroup_link *cg_link; + int ret; + + cg_link = container_of(link, struct bpf_cgroup_link, link); + + mutex_lock(&cgroup_mutex); + /* link might have been auto-released by dying cgroup, so fail */ + if (!cg_link->cgroup) { + ret = -ENOLINK; + goto out_unlock; + } + if (old_prog && link->prog != old_prog) { + ret = -EPERM; + goto out_unlock; + } + ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); +out_unlock: + mutex_unlock(&cgroup_mutex); + return ret; +} + +static struct bpf_prog_list *find_detach_entry(struct list_head *progs, + struct bpf_prog *prog, + struct bpf_cgroup_link *link, + bool allow_multi) +{ + struct bpf_prog_list *pl; + + if (!allow_multi) { + if (list_empty(progs)) + /* report error when trying to detach and nothing is attached */ + return ERR_PTR(-ENOENT); + + /* to maintain backward compatibility NONE and OVERRIDE cgroups + * allow detaching with invalid FD (prog==NULL) in legacy mode + */ + return list_first_entry(progs, typeof(*pl), node); + } + + if (!prog && !link) + /* to detach MULTI prog the user has to specify valid FD + * of the program or link to be detached + */ + return ERR_PTR(-EINVAL); + + /* find the prog or link and detach it */ + list_for_each_entry(pl, progs, node) { + if (pl->prog == prog && pl->link == link) + return pl; + } + return ERR_PTR(-ENOENT); +} + +/** + * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and + * propagate the change to descendants + * @cgrp: The cgroup which descendants to traverse + * @prog: A program to detach or NULL + * @prog: A link to detach or NULL + * @type: Type of detach operation + * + * At most one of @prog or @link can be non-NULL. + * Must be called with cgroup_mutex held. + */ +int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type) +{ + struct list_head *progs = &cgrp->bpf.progs[type]; + u32 flags = cgrp->bpf.flags[type]; + struct bpf_prog_list *pl; + struct bpf_prog *old_prog; + int err; + + if (prog && link) + /* only one of prog or link can be specified */ + return -EINVAL; + + pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); + if (IS_ERR(pl)) + return PTR_ERR(pl); + + /* mark it deleted, so it's ignored while recomputing effective */ + old_prog = pl->prog; + pl->prog = NULL; + pl->link = NULL; err = update_effective_progs(cgrp, type); if (err) @@ -478,22 +679,21 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, /* now can actually delete it from this cgroup list */ list_del(&pl->node); - for_each_cgroup_storage_type(stype) { - bpf_cgroup_storage_unlink(pl->storage[stype]); - bpf_cgroup_storage_free(pl->storage[stype]); - } + bpf_cgroup_storages_unlink(pl->storage); + bpf_cgroup_storages_free(pl->storage); kfree(pl); if (list_empty(progs)) /* last program was detached, reset flags to zero */ cgrp->bpf.flags[type] = 0; - - bpf_prog_put(old_prog); + if (old_prog) + bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key); return 0; cleanup: - /* and restore back old_prog */ + /* restore back prog or link */ pl->prog = old_prog; + pl->link = link; return err; } @@ -506,6 +706,7 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, struct list_head *progs = &cgrp->bpf.progs[type]; u32 flags = cgrp->bpf.flags[type]; struct bpf_prog_array *effective; + struct bpf_prog *prog; int cnt, ret = 0, i; effective = rcu_dereference_protected(cgrp->bpf.effective[type], @@ -536,7 +737,8 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, i = 0; list_for_each_entry(pl, progs, node) { - id = pl->prog->aux->id; + prog = prog_list_prog(pl); + id = prog->aux->id; if (copy_to_user(prog_ids + i, &id, sizeof(id))) return -EFAULT; if (++i == cnt) @@ -549,6 +751,7 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog) { + struct bpf_prog *replace_prog = NULL; struct cgroup *cgrp; int ret; @@ -556,8 +759,20 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr, if (IS_ERR(cgrp)) return PTR_ERR(cgrp); - ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, - attr->attach_flags); + if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && + (attr->attach_flags & BPF_F_REPLACE)) { + replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); + if (IS_ERR(replace_prog)) { + cgroup_put(cgrp); + return PTR_ERR(replace_prog); + } + } + + ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, + attr->attach_type, attr->attach_flags); + + if (replace_prog) + bpf_prog_put(replace_prog); cgroup_put(cgrp); return ret; } @@ -576,7 +791,7 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) if (IS_ERR(prog)) prog = NULL; - ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); + ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); if (prog) bpf_prog_put(prog); @@ -584,6 +799,141 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) return ret; } +static void bpf_cgroup_link_release(struct bpf_link *link) +{ + struct bpf_cgroup_link *cg_link = + container_of(link, struct bpf_cgroup_link, link); + struct cgroup *cg; + + /* link might have been auto-detached by dying cgroup already, + * in that case our work is done here + */ + if (!cg_link->cgroup) + return; + + mutex_lock(&cgroup_mutex); + + /* re-check cgroup under lock again */ + if (!cg_link->cgroup) { + mutex_unlock(&cgroup_mutex); + return; + } + + WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, + cg_link->type)); + + cg = cg_link->cgroup; + cg_link->cgroup = NULL; + + mutex_unlock(&cgroup_mutex); + + cgroup_put(cg); +} + +static void bpf_cgroup_link_dealloc(struct bpf_link *link) +{ + struct bpf_cgroup_link *cg_link = + container_of(link, struct bpf_cgroup_link, link); + + kfree(cg_link); +} + +static int bpf_cgroup_link_detach(struct bpf_link *link) +{ + bpf_cgroup_link_release(link); + + return 0; +} + +static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_cgroup_link *cg_link = + container_of(link, struct bpf_cgroup_link, link); + u64 cg_id = 0; + + mutex_lock(&cgroup_mutex); + if (cg_link->cgroup) + cg_id = cg_link->cgroup->kn->id.id; + mutex_unlock(&cgroup_mutex); + + seq_printf(seq, + "cgroup_id:\t%llu\n" + "attach_type:\t%d\n", + cg_id, + cg_link->type); +} + +static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + struct bpf_cgroup_link *cg_link = + container_of(link, struct bpf_cgroup_link, link); + u64 cg_id = 0; + + mutex_lock(&cgroup_mutex); + if (cg_link->cgroup) + cg_id = cg_link->cgroup->kn->id.id; + mutex_unlock(&cgroup_mutex); + + info->cgroup.cgroup_id = cg_id; + info->cgroup.attach_type = cg_link->type; + return 0; +} + +static const struct bpf_link_ops bpf_cgroup_link_lops = { + .release = bpf_cgroup_link_release, + .dealloc = bpf_cgroup_link_dealloc, + .detach = bpf_cgroup_link_detach, + .update_prog = cgroup_bpf_replace, + .show_fdinfo = bpf_cgroup_link_show_fdinfo, + .fill_link_info = bpf_cgroup_link_fill_link_info, +}; + +int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_link_primer link_primer; + struct bpf_cgroup_link *link; + struct cgroup *cgrp; + int err; + + if (attr->link_create.flags) + return -EINVAL; + + cgrp = cgroup_get_from_fd(attr->link_create.target_fd); + if (IS_ERR(cgrp)) + return PTR_ERR(cgrp); + + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + err = -ENOMEM; + goto out_put_cgroup; + } + bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, + prog); + link->cgroup = cgrp; + link->type = attr->link_create.attach_type; + + err = bpf_link_prime(&link->link, &link_primer); + if (err) { + kfree(link); + goto out_put_cgroup; + } + + err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, + BPF_F_ALLOW_MULTI); + if (err) { + bpf_link_cleanup(&link_primer); + goto out_put_cgroup; + } + + return bpf_link_settle(&link_primer); + +out_put_cgroup: + cgroup_put(cgrp); + return err; +} + int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -966,16 +1316,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) { - if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) + if (unlikely(max_optlen < 0)) return -EINVAL; + if (unlikely(max_optlen > PAGE_SIZE)) { + /* We don't expose optvals that are greater than PAGE_SIZE + * to the BPF program. + */ + max_optlen = PAGE_SIZE; + } + ctx->optval = kzalloc(max_optlen, GFP_USER); if (!ctx->optval) return -ENOMEM; ctx->optval_end = ctx->optval + max_optlen; - return 0; + return max_optlen; } static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) @@ -1009,13 +1366,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, */ max_optlen = max_t(int, 16, *optlen); - ret = sockopt_alloc_buf(&ctx, max_optlen); - if (ret) - return ret; + max_optlen = sockopt_alloc_buf(&ctx, max_optlen); + if (max_optlen < 0) + return max_optlen; ctx.optlen = *optlen; - if (copy_from_user(ctx.optval, optval, *optlen) != 0) { + if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { ret = -EFAULT; goto out; } @@ -1043,13 +1400,20 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, /* export any potential modifications */ *level = ctx.level; *optname = ctx.optname; - *optlen = ctx.optlen; - *kernel_optval = ctx.optval; + + /* optlen == 0 from BPF indicates that we should + * use original userspace data. + */ + if (ctx.optlen != 0) { + *optlen = ctx.optlen; + *kernel_optval = ctx.optval; + /* export and don't free sockopt buf */ + return 0; + } } out: - if (ret) - sockopt_free_buf(&ctx); + sockopt_free_buf(&ctx); return ret; } EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt); @@ -1076,12 +1440,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) return retval; - ret = sockopt_alloc_buf(&ctx, max_optlen); - if (ret) - return ret; - ctx.optlen = max_optlen; + max_optlen = sockopt_alloc_buf(&ctx, max_optlen); + if (max_optlen < 0) + return max_optlen; + if (!retval) { /* If kernel getsockopt finished successfully, * copy whatever was returned to the user back @@ -1095,10 +1459,13 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - if (ctx.optlen > max_optlen) - ctx.optlen = max_optlen; + if (ctx.optlen < 0) { + ret = -EFAULT; + goto out; + } - if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { + if (copy_from_user(ctx.optval, optval, + min(ctx.optlen, max_optlen)) != 0) { ret = -EFAULT; goto out; } @@ -1114,7 +1481,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - if (ctx.optlen > max_optlen) { + if (ctx.optlen > max_optlen || ctx.optlen < 0) { ret = -EFAULT; goto out; } @@ -1127,10 +1494,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - if (copy_to_user(optval, ctx.optval, ctx.optlen) || - put_user(ctx.optlen, optlen)) { - ret = -EFAULT; - goto out; + if (ctx.optlen != 0) { + if (copy_to_user(optval, ctx.optval, ctx.optlen) || + put_user(ctx.optlen, optlen)) { + ret = -EFAULT; + goto out; + } } ret = ctx.retval; @@ -1409,11 +1778,29 @@ const struct bpf_verifier_ops cg_sysctl_verifier_ops = { const struct bpf_prog_ops cg_sysctl_prog_ops = { }; +#ifdef CONFIG_NET +BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx) +{ + const struct net *net = ctx ? sock_net(ctx->sk) : &init_net; + + return net->net_cookie; +} + +static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = { + .func = bpf_get_netns_cookie_sockopt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX_OR_NULL, +}; +#endif + static const struct bpf_func_proto * cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { #ifdef CONFIG_NET + case BPF_FUNC_get_netns_cookie: + return &bpf_get_netns_cookie_sockopt_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ef0e1e3e66f4..9159e381d4ca 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -31,6 +31,9 @@ #include <linux/rcupdate.h> #include <linux/perf_event.h> +#include <asm/barrier.h> +#include <linux/extable.h> +#include <linux/log2.h> #include <asm/unaligned.h> /* Registers */ @@ -255,6 +258,7 @@ void __bpf_prog_free(struct bpf_prog *fp) { if (fp->aux) { free_percpu(fp->aux->stats); + kfree(fp->aux->poke_tab); kfree(fp->aux); } vfree(fp); @@ -387,6 +391,13 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, i = end_new; insn = prog->insnsi + end_old; } + if (bpf_pseudo_func(insn)) { + ret = bpf_adj_delta_to_imm(insn, pos, end_old, + end_new, i, probe_pass); + if (ret) + return ret; + continue; + } code = insn->code; if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) || @@ -646,7 +657,7 @@ static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) void bpf_prog_kallsyms_add(struct bpf_prog *fp) { if (!bpf_prog_kallsyms_candidate(fp) || - !capable(CAP_SYS_ADMIN)) + !bpf_capable()) return; spin_lock_bh(&bpf_lock); @@ -668,9 +679,6 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) { struct latch_tree_node *n; - if (!bpf_jit_kallsyms_enabled()) - return NULL; - n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); return n ? container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : @@ -712,6 +720,24 @@ bool is_bpf_text_address(unsigned long addr) return ret; } +const struct exception_table_entry *search_bpf_extables(unsigned long addr) +{ + const struct exception_table_entry *e = NULL; + struct bpf_prog *prog; + + rcu_read_lock(); + prog = bpf_prog_kallsyms_find(addr); + if (!prog) + goto out; + if (!prog->aux->num_exentries) + goto out; + + e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); +out: + rcu_read_unlock(); + return e; +} + int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym) { @@ -740,6 +766,39 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, return ret; } +int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, + struct bpf_jit_poke_descriptor *poke) +{ + struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; + static const u32 poke_tab_max = 1024; + u32 slot = prog->aux->size_poke_tab; + u32 size = slot + 1; + + if (size > poke_tab_max) + return -ENOSPC; + if (poke->ip || poke->ip_stable || poke->adj_off) + return -EINVAL; + + switch (poke->reason) { + case BPF_POKE_REASON_TAIL_CALL: + if (!poke->tail_call.map) + return -EINVAL; + break; + default: + return -EINVAL; + } + + tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL); + if (!tab) + return -ENOMEM; + + memcpy(&tab[slot], poke, sizeof(*poke)); + prog->aux->size_poke_tab = size; + prog->aux->poke_tab = tab; + + return slot; +} + static atomic_long_t bpf_jit_current; /* Can be overridden by an arch's JIT compiler if it has a custom, @@ -800,6 +859,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, struct bpf_binary_header *hdr; u32 size, hole, start, pages; + WARN_ON_ONCE(!is_power_of_2(alignment) || + alignment > BPF_IMAGE_ALIGNMENT); + /* Most of BPF filters are really small, but if some of them * fill a page, allow at least 128 extra bytes to insert a * random section of illegal instructions. @@ -1291,6 +1353,12 @@ bool bpf_opcode_in_insntable(u8 code) } #ifndef CONFIG_BPF_JIT_ALWAYS_ON +u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) +{ + memset(dst, 0, size); + return -EFAULT; +} + /** * __bpf_prog_run - run eBPF program on a given context * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers @@ -1299,7 +1367,7 @@ bool bpf_opcode_in_insntable(u8 code) * * Decode and execute eBPF instructions. */ -static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) +static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) { #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z @@ -1310,6 +1378,11 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6 /* Non-UAPI available opcodes. */ [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, + [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B, + [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H, + [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W, + [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW, + [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, }; #undef BPF_INSN_3_LBL #undef BPF_INSN_2_LBL @@ -1321,29 +1394,54 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6 select_insn: goto *jumptable[insn->code]; - /* ALU */ -#define ALU(OPCODE, OP) \ - ALU64_##OPCODE##_X: \ - DST = DST OP SRC; \ - CONT; \ - ALU_##OPCODE##_X: \ - DST = (u32) DST OP (u32) SRC; \ - CONT; \ - ALU64_##OPCODE##_K: \ - DST = DST OP IMM; \ - CONT; \ - ALU_##OPCODE##_K: \ - DST = (u32) DST OP (u32) IMM; \ + /* Explicitly mask the register-based shift amounts with 63 or 31 + * to avoid undefined behavior. Normally this won't affect the + * generated code, for example, in case of native 64 bit archs such + * as x86-64 or arm64, the compiler is optimizing the AND away for + * the interpreter. In case of JITs, each of the JIT backends compiles + * the BPF shift operations to machine instructions which produce + * implementation-defined results in such a case; the resulting + * contents of the register may be arbitrary, but program behaviour + * as a whole remains defined. In other words, in case of JIT backends, + * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. + */ + /* ALU (shifts) */ +#define SHT(OPCODE, OP) \ + ALU64_##OPCODE##_X: \ + DST = DST OP (SRC & 63); \ + CONT; \ + ALU_##OPCODE##_X: \ + DST = (u32) DST OP ((u32) SRC & 31); \ + CONT; \ + ALU64_##OPCODE##_K: \ + DST = DST OP IMM; \ + CONT; \ + ALU_##OPCODE##_K: \ + DST = (u32) DST OP (u32) IMM; \ + CONT; + /* ALU (rest) */ +#define ALU(OPCODE, OP) \ + ALU64_##OPCODE##_X: \ + DST = DST OP SRC; \ + CONT; \ + ALU_##OPCODE##_X: \ + DST = (u32) DST OP (u32) SRC; \ + CONT; \ + ALU64_##OPCODE##_K: \ + DST = DST OP IMM; \ + CONT; \ + ALU_##OPCODE##_K: \ + DST = (u32) DST OP (u32) IMM; \ CONT; - ALU(ADD, +) ALU(SUB, -) ALU(AND, &) ALU(OR, |) - ALU(LSH, <<) - ALU(RSH, >>) ALU(XOR, ^) ALU(MUL, *) + SHT(LSH, <<) + SHT(RSH, >>) +#undef SHT #undef ALU ALU_NEG: DST = (u32) -DST; @@ -1368,13 +1466,13 @@ select_insn: insn++; CONT; ALU_ARSH_X: - DST = (u64) (u32) (((s32) DST) >> SRC); + DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); CONT; ALU_ARSH_K: DST = (u64) (u32) (((s32) DST) >> IMM); CONT; ALU64_ARSH_X: - (*(s64 *) &DST) >>= SRC; + (*(s64 *) &DST) >>= (SRC & 63); CONT; ALU64_ARSH_K: (*(s64 *) &DST) >>= IMM; @@ -1525,7 +1623,21 @@ out: COND_JMP(s, JSGE, >=) COND_JMP(s, JSLE, <=) #undef COND_JMP - /* STX and ST and LDX*/ + /* ST, STX and LDX*/ + ST_NOSPEC: + /* Speculation barrier for mitigating Speculative Store Bypass. + * In case of arm64, we rely on the firmware mitigation as + * controlled via the ssbd kernel parameter. Whenever the + * mitigation is enabled, it works for all of the kernel code + * with no need to provide any additional instructions here. + * In case of x86, we use 'lfence' insn for mitigation. We + * reuse preexisting logic from Spectre v1 mitigation that + * happens to produce the required code on x86 for v4 as well. + */ +#ifdef CONFIG_X86 + barrier_nospec(); +#endif + CONT; #define LDST(SIZEOP, SIZE) \ STX_MEM_##SIZEOP: \ *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ @@ -1542,6 +1654,17 @@ out: LDST(W, u32) LDST(DW, u64) #undef LDST +#define LDX_PROBE(SIZEOP, SIZE, TYPE) \ + LDX_PROBE_MEM_##SIZEOP: \ + bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \ + DST = *((TYPE *)&DST); \ + CONT; + LDX_PROBE(B, 1, u8) + LDX_PROBE(H, 2, u16) + LDX_PROBE(W, 4, u32) + LDX_PROBE(DW, 8, u64) +#undef LDX_PROBE + STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ atomic_add((u32) SRC, (atomic_t *)(unsigned long) (DST + insn->off)); @@ -1652,18 +1775,17 @@ bool bpf_prog_array_compatible(struct bpf_array *array, if (fp->kprobe_override) return false; - if (!array->owner_prog_type) { + if (!array->aux->type) { /* There's no owner yet where we could check for * compatibility. */ - array->owner_prog_type = fp->type; - array->owner_jited = fp->jited; - + array->aux->type = fp->type; + array->aux->jited = fp->jited; return true; } - return array->owner_prog_type == fp->type && - array->owner_jited == fp->jited; + return array->aux->type == fp->type && + array->aux->jited == fp->jited; } static int bpf_check_tail_call(const struct bpf_prog *fp) @@ -1880,6 +2002,61 @@ void bpf_prog_array_delete_safe(struct bpf_prog_array *array, } } +/** + * bpf_prog_array_delete_safe_at() - Replaces the program at the given + * index into the program array with + * a dummy no-op program. + * @array: a bpf_prog_array + * @index: the index of the program to replace + * + * Skips over dummy programs, by not counting them, when calculating + * the the position of the program to replace. + * + * Return: + * * 0 - Success + * * -EINVAL - Invalid index value. Must be a non-negative integer. + * * -ENOENT - Index out of range + */ +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) +{ + return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); +} + +/** + * bpf_prog_array_update_at() - Updates the program at the given index + * into the program array. + * @array: a bpf_prog_array + * @index: the index of the program to update + * @prog: the program to insert into the array + * + * Skips over dummy programs, by not counting them, when calculating + * the position of the program to update. + * + * Return: + * * 0 - Success + * * -EINVAL - Invalid index value. Must be a non-negative integer. + * * -ENOENT - Index out of range + */ +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, + struct bpf_prog *prog) +{ + struct bpf_prog_array_item *item; + + if (unlikely(index < 0)) + return -EINVAL; + + for (item = array->items; item->prog; item++) { + if (item->prog == &dummy_bpf_prog.prog) + continue; + if (!index) { + WRITE_ONCE(item->prog, prog); + return 0; + } + index--; + } + return -ENOENT; +} + int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, @@ -1964,18 +2141,47 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array, : 0; } +static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux) +{ + enum bpf_cgroup_storage_type stype; + + for_each_cgroup_storage_type(stype) { + if (!aux->cgroup_storage[stype]) + continue; + bpf_cgroup_storage_release(aux->prog, + aux->cgroup_storage[stype]); + } +} + +static void bpf_free_used_maps(struct bpf_prog_aux *aux) +{ + struct bpf_map *map; + int i; + + bpf_free_cgroup_storage(aux); + for (i = 0; i < aux->used_map_cnt; i++) { + map = aux->used_maps[i]; + if (map->ops->map_poke_untrack) + map->ops->map_poke_untrack(map, aux); + bpf_map_put(map); + } + kfree(aux->used_maps); +} + static void bpf_prog_free_deferred(struct work_struct *work) { struct bpf_prog_aux *aux; int i; aux = container_of(work, struct bpf_prog_aux, work); + bpf_free_used_maps(aux); if (bpf_prog_is_dev_bound(aux)) bpf_prog_offload_destroy(aux->prog); #ifdef CONFIG_PERF_EVENTS if (aux->prog->has_callchain_buf) put_callchain_buffers(); #endif + bpf_trampoline_put(aux->trampoline); for (i = 0; i < aux->func_cnt; i++) bpf_jit_free(aux->func[i]); if (aux->func_cnt) { @@ -1991,6 +2197,8 @@ void bpf_prog_free(struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; + if (aux->linked_prog) + bpf_prog_put(aux->linked_prog); INIT_WORK(&aux->work, bpf_prog_free_deferred); schedule_work(&aux->work); } @@ -2031,6 +2239,7 @@ const struct bpf_func_proto bpf_map_pop_elem_proto __weak; const struct bpf_func_proto bpf_map_peek_elem_proto __weak; const struct bpf_func_proto bpf_spin_lock_proto __weak; const struct bpf_func_proto bpf_spin_unlock_proto __weak; +const struct bpf_func_proto bpf_jiffies64_proto __weak; const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; @@ -2105,6 +2314,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, return -EFAULT; } +int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *addr1, void *addr2) +{ + return -ENOTSUPP; +} + DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); EXPORT_SYMBOL(bpf_stats_enabled_key); diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index ef49e17ae47c..637d85260e1f 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -84,7 +84,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) int ret, cpu; u64 cost; - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return ERR_PTR(-EPERM); /* check sanity of attributes */ @@ -486,7 +486,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, return -EOVERFLOW; /* Make sure CPU is a valid possible cpu */ - if (!cpu_possible(key_cpu)) + if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) return -ENODEV; if (qsize == 0) { @@ -589,6 +589,7 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } +static int cpu_map_btf_id; const struct bpf_map_ops cpu_map_ops = { .map_alloc = cpu_map_alloc, .map_free = cpu_map_free, @@ -597,6 +598,8 @@ const struct bpf_map_ops cpu_map_ops = { .map_lookup_elem = cpu_map_lookup_elem, .map_get_next_key = cpu_map_get_next_key, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_cpu_map", + .map_btf_id = &cpu_map_btf_id, }; static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index b4b6b77f309c..f8765d1a5c39 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -88,12 +88,13 @@ struct bpf_dtab { static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); -static struct hlist_head *dev_map_create_hash(unsigned int entries) +static struct hlist_head *dev_map_create_hash(unsigned int entries, + int numa_node) { int i; struct hlist_head *hash; - hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); + hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); if (hash != NULL) for (i = 0; i < entries; i++) INIT_HLIST_HEAD(&hash[i]); @@ -151,7 +152,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); + dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, + dtab->map.numa_node); if (!dtab->dev_index_head) goto free_percpu; @@ -249,7 +251,7 @@ static void dev_map_free(struct bpf_map *map) } } - kfree(dtab->dev_index_head); + bpf_map_area_free(dtab->dev_index_head); } else { for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; @@ -718,6 +720,7 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, map, key, value, map_flags); } +static int dev_map_btf_id; const struct bpf_map_ops dev_map_ops = { .map_alloc = dev_map_alloc, .map_free = dev_map_free, @@ -726,8 +729,11 @@ const struct bpf_map_ops dev_map_ops = { .map_update_elem = dev_map_update_elem, .map_delete_elem = dev_map_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_dtab", + .map_btf_id = &dev_map_btf_id, }; +static int dev_map_hash_map_btf_id; const struct bpf_map_ops dev_map_hash_ops = { .map_alloc = dev_map_alloc, .map_free = dev_map_free, @@ -736,6 +742,8 @@ const struct bpf_map_ops dev_map_hash_ops = { .map_update_elem = dev_map_hash_update_elem, .map_delete_elem = dev_map_hash_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_dtab", + .map_btf_id = &dev_map_hash_map_btf_id, }; static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index b44d8c447afd..ff1dd7d45b58 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -162,15 +162,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, else verbose(cbs->private_data, "BUG_%02x\n", insn->code); } else if (class == BPF_ST) { - if (BPF_MODE(insn->code) != BPF_MEM) { + if (BPF_MODE(insn->code) == BPF_MEM) { + verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, + insn->off, insn->imm); + } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) { + verbose(cbs->private_data, "(%02x) nospec\n", insn->code); + } else { verbose(cbs->private_data, "BUG_st_%02x\n", insn->code); - return; } - verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", - insn->code, - bpf_ldst_string[BPF_SIZE(insn->code) >> 3], - insn->dst_reg, - insn->off, insn->imm); } else if (class == BPF_LDX) { if (BPF_MODE(insn->code) != BPF_MEM) { verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code); diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c new file mode 100644 index 000000000000..204ee61a3904 --- /dev/null +++ b/kernel/bpf/dispatcher.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2019 Intel Corporation. */ + +#include <linux/hash.h> +#include <linux/bpf.h> +#include <linux/filter.h> + +/* The BPF dispatcher is a multiway branch code generator. The + * dispatcher is a mechanism to avoid the performance penalty of an + * indirect call, which is expensive when retpolines are enabled. A + * dispatch client registers a BPF program into the dispatcher, and if + * there is available room in the dispatcher a direct call to the BPF + * program will be generated. All calls to the BPF programs called via + * the dispatcher will then be a direct call, instead of an + * indirect. The dispatcher hijacks a trampoline function it via the + * __fentry__ of the trampoline. The trampoline function has the + * following signature: + * + * unsigned int trampoline(const void *ctx, const struct bpf_insn *insnsi, + * unsigned int (*bpf_func)(const void *, + * const struct bpf_insn *)); + */ + +static struct bpf_dispatcher_prog *bpf_dispatcher_find_prog( + struct bpf_dispatcher *d, struct bpf_prog *prog) +{ + int i; + + for (i = 0; i < BPF_DISPATCHER_MAX; i++) { + if (prog == d->progs[i].prog) + return &d->progs[i]; + } + return NULL; +} + +static struct bpf_dispatcher_prog *bpf_dispatcher_find_free( + struct bpf_dispatcher *d) +{ + return bpf_dispatcher_find_prog(d, NULL); +} + +static bool bpf_dispatcher_add_prog(struct bpf_dispatcher *d, + struct bpf_prog *prog) +{ + struct bpf_dispatcher_prog *entry; + + if (!prog) + return false; + + entry = bpf_dispatcher_find_prog(d, prog); + if (entry) { + refcount_inc(&entry->users); + return false; + } + + entry = bpf_dispatcher_find_free(d); + if (!entry) + return false; + + bpf_prog_inc(prog); + entry->prog = prog; + refcount_set(&entry->users, 1); + d->num_progs++; + return true; +} + +static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d, + struct bpf_prog *prog) +{ + struct bpf_dispatcher_prog *entry; + + if (!prog) + return false; + + entry = bpf_dispatcher_find_prog(d, prog); + if (!entry) + return false; + + if (refcount_dec_and_test(&entry->users)) { + entry->prog = NULL; + bpf_prog_put(prog); + d->num_progs--; + return true; + } + return false; +} + +int __weak arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) +{ + return -ENOTSUPP; +} + +static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image) +{ + s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0]; + int i; + + for (i = 0; i < BPF_DISPATCHER_MAX; i++) { + if (d->progs[i].prog) + *ipsp++ = (s64)(uintptr_t)d->progs[i].prog->bpf_func; + } + return arch_prepare_bpf_dispatcher(image, &ips[0], d->num_progs); +} + +static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) +{ + void *old, *new; + u32 noff; + int err; + + if (!prev_num_progs) { + old = NULL; + noff = 0; + } else { + old = d->image + d->image_off; + noff = d->image_off ^ (PAGE_SIZE / 2); + } + + new = d->num_progs ? d->image + noff : NULL; + if (new) { + if (bpf_dispatcher_prepare(d, new)) + return; + } + + err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new); + if (err || !new) + return; + + d->image_off = noff; +} + +void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, + struct bpf_prog *to) +{ + bool changed = false; + int prev_num_progs; + + if (from == to) + return; + + mutex_lock(&d->mutex); + if (!d->image) { + d->image = bpf_jit_alloc_exec_page(); + if (!d->image) + goto out; + } + + prev_num_progs = d->num_progs; + changed |= bpf_dispatcher_remove_prog(d, from); + changed |= bpf_dispatcher_add_prog(d, to); + + if (!changed) + goto out; + + bpf_dispatcher_update(d, prev_num_progs); +out: + mutex_unlock(&d->mutex); +} diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 22066a62c8c9..bc45c6f05564 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -57,6 +57,32 @@ struct htab_elem { char key[0] __aligned(8); }; +static void htab_init_buckets(struct bpf_htab *htab) +{ + unsigned i; + + for (i = 0; i < htab->n_buckets; i++) { + INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); + raw_spin_lock_init(&htab->buckets[i].lock); + } +} + +static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab, + struct bucket *b) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&b->lock, flags); + return flags; +} + +static inline void htab_unlock_bucket(const struct bpf_htab *htab, + struct bucket *b, + unsigned long flags) +{ + raw_spin_unlock_irqrestore(&b->lock, flags); +} + static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); static bool htab_is_lru(const struct bpf_htab *htab) @@ -97,6 +123,32 @@ static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) return (struct htab_elem *) (htab->elems + i * htab->elem_size); } +static bool htab_has_extra_elems(struct bpf_htab *htab) +{ + return !htab_is_percpu(htab) && !htab_is_lru(htab); +} + +static void htab_free_prealloced_timers(struct bpf_htab *htab) +{ + u32 num_entries = htab->map.max_entries; + int i; + + if (likely(!map_value_has_timer(&htab->map))) + return; + if (htab_has_extra_elems(htab)) + num_entries += num_possible_cpus(); + + for (i = 0; i < num_entries; i++) { + struct htab_elem *elem; + + elem = get_htab_elem(htab, i); + bpf_timer_cancel_and_free(elem->key + + round_up(htab->map.key_size, 8) + + htab->map.timer_off); + cond_resched(); + } +} + static void htab_free_elems(struct bpf_htab *htab) { int i; @@ -123,8 +175,12 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, struct htab_elem *l; if (node) { + u32 key_size = htab->map.key_size; + l = container_of(node, struct htab_elem, lru_node); - memcpy(l->key, key, htab->map.key_size); + memcpy(l->key, key, key_size); + check_and_init_map_value(&htab->map, + l->key + round_up(key_size, 8)); return l; } @@ -136,7 +192,7 @@ static int prealloc_init(struct bpf_htab *htab) u32 num_entries = htab->map.max_entries; int err = -ENOMEM, i; - if (!htab_is_percpu(htab) && !htab_is_lru(htab)) + if (htab_has_extra_elems(htab)) num_entries += num_possible_cpus(); htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, @@ -244,9 +300,9 @@ static int htab_map_alloc_check(union bpf_attr *attr) BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != offsetof(struct htab_elem, hash_node.pprev)); - if (lru && !capable(CAP_SYS_ADMIN)) + if (lru && !bpf_capable()) /* LRU implementation is much complicated than other - * maps. Hence, limit to CAP_SYS_ADMIN for now. + * maps. Hence, limit to CAP_BPF. */ return -EPERM; @@ -306,8 +362,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); struct bpf_htab *htab; - int err, i; u64 cost; + int err; htab = kzalloc(sizeof(*htab), GFP_USER); if (!htab) @@ -369,10 +425,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) else htab->hashrnd = get_random_int(); - for (i = 0; i < htab->n_buckets; i++) { - INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); - raw_spin_lock_init(&htab->buckets[i].lock); - } + htab_init_buckets(htab); if (prealloc) { err = prealloc_init(htab); @@ -500,7 +553,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key) * bpf_prog * __htab_map_lookup_elem */ -static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) +static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; const int ret = BPF_REG_0; @@ -539,7 +592,7 @@ static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) return __htab_lru_map_lookup_elem(map, key, false); } -static u32 htab_lru_map_gen_lookup(struct bpf_map *map, +static int htab_lru_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; @@ -564,6 +617,14 @@ static u32 htab_lru_map_gen_lookup(struct bpf_map *map, return insn - insn_buf; } +static void check_and_free_timer(struct bpf_htab *htab, struct htab_elem *elem) +{ + if (unlikely(map_value_has_timer(&htab->map))) + bpf_timer_cancel_and_free(elem->key + + round_up(htab->map.key_size, 8) + + htab->map.timer_off); +} + /* It is called from the bpf_lru_list when the LRU needs to delete * older elements from the htab. */ @@ -580,15 +641,16 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) b = __select_bucket(htab, tgt_l->hash); head = &b->head; - raw_spin_lock_irqsave(&b->lock, flags); + flags = htab_lock_bucket(htab, b); hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) if (l == tgt_l) { hlist_nulls_del_rcu(&l->hash_node); + check_and_free_timer(htab, l); break; } - raw_spin_unlock_irqrestore(&b->lock, flags); + htab_unlock_bucket(htab, b, flags); return l == tgt_l; } @@ -656,6 +718,7 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) { if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); + check_and_free_timer(htab, l); kfree(l); } @@ -664,28 +727,26 @@ static void htab_elem_free_rcu(struct rcu_head *head) struct htab_elem *l = container_of(head, struct htab_elem, rcu); struct bpf_htab *htab = l->htab; - /* must increment bpf_prog_active to avoid kprobe+bpf triggering while - * we're calling kfree, otherwise deadlock is possible if kprobes - * are placed somewhere inside of slub - */ - preempt_disable(); - __this_cpu_inc(bpf_prog_active); htab_elem_free(htab, l); - __this_cpu_dec(bpf_prog_active); - preempt_enable(); +} + +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) +{ + struct bpf_map *map = &htab->map; + void *ptr; + + if (map->ops->map_fd_put_ptr) { + ptr = fd_htab_map_get_ptr(map, l); + map->ops->map_fd_put_ptr(ptr); + } } static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) { - struct bpf_map *map = &htab->map; - - if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - - map->ops->map_fd_put_ptr(ptr); - } + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { + check_and_free_timer(htab, l); __pcpu_freelist_push(&htab->freelist, &l->fnode); } else { atomic_dec(&htab->count); @@ -712,6 +773,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, } } +static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, + void *value, bool onallcpus) +{ + /* When using prealloc and not setting the initial value on all cpus, + * zero-fill element values for other cpus (just as what happens when + * not using prealloc). Otherwise, bpf program has no way to ensure + * known initial values for cpus other than current one + * (onallcpus=false always when coming from bpf prog). + */ + if (htab_is_prealloc(htab) && !onallcpus) { + u32 size = round_up(htab->map.value_size, 8); + int current_cpu = raw_smp_processor_id(); + int cpu; + + for_each_possible_cpu(cpu) { + if (cpu == current_cpu) + bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value, + size); + else + memset(per_cpu_ptr(pptr, cpu), 0, size); + } + } else { + pcpu_copy_value(htab, pptr, value, onallcpus); + } +} + static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) { return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && @@ -735,6 +822,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; @@ -761,8 +849,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, l_new = ERR_PTR(-ENOMEM); goto dec_count; } - check_and_init_map_lock(&htab->map, - l_new->key + round_up(key_size, 8)); + check_and_init_map_value(&htab->map, + l_new->key + round_up(key_size, 8)); } memcpy(l_new->key, key, key_size); @@ -781,7 +869,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, } } - pcpu_copy_value(htab, pptr, value, onallcpus); + pcpu_init_value(htab, pptr, value, onallcpus); if (!prealloc) htab_elem_set_ptr(l_new, key_size, pptr); @@ -863,7 +951,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, } /* bpf_map_update_elem() can be called in_irq() */ - raw_spin_lock_irqsave(&b->lock, flags); + flags = htab_lock_bucket(htab, b); l_old = lookup_elem_raw(head, hash, key, key_size); @@ -901,13 +989,21 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, hlist_nulls_del_rcu(&l_old->hash_node); if (!htab_is_prealloc(htab)) free_htab_elem(htab, l_old); + else + check_and_free_timer(htab, l_old); } ret = 0; err: - raw_spin_unlock_irqrestore(&b->lock, flags); + htab_unlock_bucket(htab, b, flags); return ret; } +static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) +{ + check_and_free_timer(htab, elem); + bpf_lru_push_free(&htab->lru, &elem->lru_node); +} + static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { @@ -940,10 +1036,11 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, l_new = prealloc_lru_pop(htab, key, hash); if (!l_new) return -ENOMEM; - memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); + copy_map_value(&htab->map, + l_new->key + round_up(map->key_size, 8), value); /* bpf_map_update_elem() can be called in_irq() */ - raw_spin_lock_irqsave(&b->lock, flags); + flags = htab_lock_bucket(htab, b); l_old = lookup_elem_raw(head, hash, key, key_size); @@ -962,12 +1059,12 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, ret = 0; err: - raw_spin_unlock_irqrestore(&b->lock, flags); + htab_unlock_bucket(htab, b, flags); if (ret) - bpf_lru_push_free(&htab->lru, &l_new->lru_node); + htab_lru_push_free(htab, l_new); else if (l_old) - bpf_lru_push_free(&htab->lru, &l_old->lru_node); + htab_lru_push_free(htab, l_old); return ret; } @@ -998,7 +1095,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, head = &b->head; /* bpf_map_update_elem() can be called in_irq() */ - raw_spin_lock_irqsave(&b->lock, flags); + flags = htab_lock_bucket(htab, b); l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1021,7 +1118,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, } ret = 0; err: - raw_spin_unlock_irqrestore(&b->lock, flags); + htab_unlock_bucket(htab, b, flags); return ret; } @@ -1062,7 +1159,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, } /* bpf_map_update_elem() can be called in_irq() */ - raw_spin_lock_irqsave(&b->lock, flags); + flags = htab_lock_bucket(htab, b); l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1077,14 +1174,14 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), value, onallcpus); } else { - pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), + pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), value, onallcpus); hlist_nulls_add_head_rcu(&l_new->hash_node, head); l_new = NULL; } ret = 0; err: - raw_spin_unlock_irqrestore(&b->lock, flags); + htab_unlock_bucket(htab, b, flags); if (l_new) bpf_lru_push_free(&htab->lru, &l_new->lru_node); return ret; @@ -1122,7 +1219,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) b = __select_bucket(htab, hash); head = &b->head; - raw_spin_lock_irqsave(&b->lock, flags); + flags = htab_lock_bucket(htab, b); l = lookup_elem_raw(head, hash, key, key_size); @@ -1132,7 +1229,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) ret = 0; } - raw_spin_unlock_irqrestore(&b->lock, flags); + htab_unlock_bucket(htab, b, flags); return ret; } @@ -1154,7 +1251,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) b = __select_bucket(htab, hash); head = &b->head; - raw_spin_lock_irqsave(&b->lock, flags); + flags = htab_lock_bucket(htab, b); l = lookup_elem_raw(head, hash, key, key_size); @@ -1163,9 +1260,9 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) ret = 0; } - raw_spin_unlock_irqrestore(&b->lock, flags); + htab_unlock_bucket(htab, b, flags); if (l) - bpf_lru_push_free(&htab->lru, &l->lru_node); + htab_lru_push_free(htab, l); return ret; } @@ -1185,6 +1282,35 @@ static void delete_all_elements(struct bpf_htab *htab) } } +static void htab_free_malloced_timers(struct bpf_htab *htab) +{ + int i; + + rcu_read_lock(); + for (i = 0; i < htab->n_buckets; i++) { + struct hlist_nulls_head *head = select_bucket(htab, i); + struct hlist_nulls_node *n; + struct htab_elem *l; + + hlist_nulls_for_each_entry(l, n, head, hash_node) + check_and_free_timer(htab, l); + cond_resched_rcu(); + } + rcu_read_unlock(); +} + +static void htab_map_free_timers(struct bpf_map *map) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + + if (likely(!map_value_has_timer(&htab->map))) + return; + if (!htab_is_prealloc(htab)) + htab_free_malloced_timers(htab); + else + htab_free_prealloced_timers(htab); +} + /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ static void htab_map_free(struct bpf_map *map) { @@ -1232,29 +1358,283 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } +static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn, + void *callback_ctx, u64 flags) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct hlist_nulls_head *head; + struct hlist_nulls_node *n; + struct htab_elem *elem; + u32 roundup_key_size; + int i, num_elems = 0; + void __percpu *pptr; + struct bucket *b; + void *key, *val; + bool is_percpu; + u64 ret = 0; + + if (flags != 0) + return -EINVAL; + + is_percpu = htab_is_percpu(htab); + + roundup_key_size = round_up(map->key_size, 8); + for (i = 0; i < htab->n_buckets; i++) { + b = &htab->buckets[i]; + rcu_read_lock(); + head = &b->head; + hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { + key = elem->key; + if (is_percpu) { + /* current cpu value for percpu map */ + pptr = htab_elem_get_ptr(elem, map->key_size); + val = this_cpu_ptr(pptr); + } else { + val = elem->key + roundup_key_size; + } + num_elems++; + ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, + (u64)(long)key, (u64)(long)val, + (u64)(long)callback_ctx, 0); + /* return value: 0 - continue, 1 - stop and return */ + if (ret) { + rcu_read_unlock(); + goto out; + } + } + rcu_read_unlock(); + } +out: + return num_elems; +} + +static int htab_map_btf_id; +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; // non-zero means percpu hash + unsigned long flags; + u32 bucket_id; + u32 skip_elems; +}; + +static struct htab_elem * +bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, + struct htab_elem *prev_elem) +{ + const struct bpf_htab *htab = info->htab; + unsigned long flags = info->flags; + u32 skip_elems = info->skip_elems; + u32 bucket_id = info->bucket_id; + struct hlist_nulls_head *head; + struct hlist_nulls_node *n; + struct htab_elem *elem; + struct bucket *b; + u32 i, count; + + if (bucket_id >= htab->n_buckets) + return NULL; + + /* try to find next elem in the same bucket */ + if (prev_elem) { + /* no update/deletion on this bucket, prev_elem should be still valid + * and we won't skip elements. + */ + n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); + elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node); + if (elem) + return elem; + + /* not found, unlock and go to the next bucket */ + b = &htab->buckets[bucket_id++]; + htab_unlock_bucket(htab, b, flags); + skip_elems = 0; + } + + for (i = bucket_id; i < htab->n_buckets; i++) { + b = &htab->buckets[i]; + flags = htab_lock_bucket(htab, b); + + count = 0; + head = &b->head; + hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { + if (count >= skip_elems) { + info->flags = flags; + info->bucket_id = i; + info->skip_elems = count; + return elem; + } + count++; + } + + htab_unlock_bucket(htab, b, flags); + skip_elems = 0; + } + + info->bucket_id = i; + info->skip_elems = 0; + return NULL; +} + +static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + struct htab_elem *elem; + + elem = bpf_hash_map_seq_find_next(info, NULL); + if (!elem) + return NULL; + + if (*pos == 0) + ++*pos; + return elem; +} + +static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + + ++*pos; + ++info->skip_elems; + return bpf_hash_map_seq_find_next(info, v); +} + +static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + u32 roundup_key_size, roundup_value_size; + struct bpf_iter__bpf_map_elem ctx = {}; + struct bpf_map *map = info->map; + struct bpf_iter_meta meta; + int ret = 0, off = 0, cpu; + struct bpf_prog *prog; + void __percpu *pptr; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, elem == NULL); + if (prog) { + ctx.meta = &meta; + ctx.map = info->map; + if (elem) { + roundup_key_size = round_up(map->key_size, 8); + ctx.key = elem->key; + if (!info->percpu_value_buf) { + ctx.value = elem->key + roundup_key_size; + } else { + roundup_value_size = round_up(map->value_size, 8); + pptr = htab_elem_get_ptr(elem, map->key_size); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(info->percpu_value_buf + off, + per_cpu_ptr(pptr, cpu), + roundup_value_size); + off += roundup_value_size; + } + ctx.value = info->percpu_value_buf; + } + } + ret = bpf_iter_run_prog(prog, &ctx); + } + + return ret; +} + +static int bpf_hash_map_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_hash_map_seq_show(seq, v); +} + +static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + + if (!v) + (void)__bpf_hash_map_seq_show(seq, NULL); + else + htab_unlock_bucket(info->htab, + &info->htab->buckets[info->bucket_id], + info->flags); +} + +static int bpf_iter_init_hash_map(void *priv_data, + struct bpf_iter_aux_info *aux) +{ + struct bpf_iter_seq_hash_map_info *seq_info = priv_data; + struct bpf_map *map = aux->map; + void *value_buf; + u32 buf_size; + + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { + buf_size = round_up(map->value_size, 8) * num_possible_cpus(); + value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); + if (!value_buf) + return -ENOMEM; + + seq_info->percpu_value_buf = value_buf; + } + + seq_info->map = map; + seq_info->htab = container_of(map, struct bpf_htab, map); + return 0; +} + +static void bpf_iter_fini_hash_map(void *priv_data) +{ + struct bpf_iter_seq_hash_map_info *seq_info = priv_data; + + kfree(seq_info->percpu_value_buf); +} + +static const struct seq_operations bpf_hash_map_seq_ops = { + .start = bpf_hash_map_seq_start, + .next = bpf_hash_map_seq_next, + .stop = bpf_hash_map_seq_stop, + .show = bpf_hash_map_seq_show, +}; + +static const struct bpf_iter_seq_info iter_seq_info = { + .seq_ops = &bpf_hash_map_seq_ops, + .init_seq_private = bpf_iter_init_hash_map, + .fini_seq_private = bpf_iter_fini_hash_map, + .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info), +}; + const struct bpf_map_ops htab_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, + .map_release_uref = htab_map_free_timers, .map_lookup_elem = htab_map_lookup_elem, .map_update_elem = htab_map_update_elem, .map_delete_elem = htab_map_delete_elem, .map_gen_lookup = htab_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, + .map_set_for_each_callback_args = map_set_for_each_callback_args, + .map_for_each_callback = bpf_for_each_hash_elem, + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_map_btf_id, + .iter_seq_info = &iter_seq_info, }; +static int htab_lru_map_btf_id; const struct bpf_map_ops htab_lru_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, + .map_release_uref = htab_map_free_timers, .map_lookup_elem = htab_lru_map_lookup_elem, .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, .map_update_elem = htab_lru_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, .map_gen_lookup = htab_lru_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, + .map_set_for_each_callback_args = map_set_for_each_callback_args, + .map_for_each_callback = bpf_for_each_hash_elem, + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_lru_map_btf_id, + .iter_seq_info = &iter_seq_info, }; /* Called from eBPF program */ @@ -1359,6 +1739,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } +static int htab_percpu_map_btf_id; const struct bpf_map_ops htab_percpu_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1368,8 +1749,14 @@ const struct bpf_map_ops htab_percpu_map_ops = { .map_update_elem = htab_percpu_map_update_elem, .map_delete_elem = htab_map_delete_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, + .map_set_for_each_callback_args = map_set_for_each_callback_args, + .map_for_each_callback = bpf_for_each_hash_elem, + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_percpu_map_btf_id, + .iter_seq_info = &iter_seq_info, }; +static int htab_lru_percpu_map_btf_id; const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1379,6 +1766,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_update_elem = htab_lru_percpu_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, + .map_set_for_each_callback_args = map_set_for_each_callback_args, + .map_for_each_callback = bpf_for_each_hash_elem, + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_lru_percpu_map_btf_id, + .iter_seq_info = &iter_seq_info, }; static int fd_htab_map_alloc_check(union bpf_attr *attr) @@ -1477,7 +1869,7 @@ static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) return READ_ONCE(*inner_map); } -static u32 htab_of_map_gen_lookup(struct bpf_map *map, +static int htab_of_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; @@ -1501,6 +1893,7 @@ static void htab_of_map_free(struct bpf_map *map) fd_htab_map_free(map); } +static int htab_of_maps_map_btf_id; const struct bpf_map_ops htab_of_maps_map_ops = { .map_alloc_check = fd_htab_map_alloc_check, .map_alloc = htab_of_map_alloc, @@ -1513,4 +1906,6 @@ const struct bpf_map_ops htab_of_maps_map_ops = { .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_gen_lookup = htab_of_map_gen_lookup, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_of_maps_map_btf_id, }; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 5e28718928ca..4f84cac28e3b 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -11,6 +11,7 @@ #include <linux/uidgid.h> #include <linux/filter.h> #include <linux/ctype.h> +#include <linux/jiffies.h> #include "../../lib/kstrtox.h" @@ -105,7 +106,7 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) } const struct bpf_func_proto bpf_map_peek_elem_proto = { - .func = bpf_map_pop_elem, + .func = bpf_map_peek_elem, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, @@ -262,13 +263,18 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) static DEFINE_PER_CPU(unsigned long, irqsave_flags); -notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) +static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) { unsigned long flags; local_irq_save(flags); __bpf_spin_lock(lock); __this_cpu_write(irqsave_flags, flags); +} + +notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) +{ + __bpf_spin_lock_irqsave(lock); return 0; } @@ -279,13 +285,18 @@ const struct bpf_func_proto bpf_spin_lock_proto = { .arg1_type = ARG_PTR_TO_SPIN_LOCK, }; -notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) +static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) { unsigned long flags; flags = __this_cpu_read(irqsave_flags); __bpf_spin_unlock(lock); local_irq_restore(flags); +} + +notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) +{ + __bpf_spin_unlock_irqrestore(lock); return 0; } @@ -306,12 +317,23 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, else lock = dst + map->spin_lock_off; preempt_disable(); - ____bpf_spin_lock(lock); + __bpf_spin_lock_irqsave(lock); copy_map_value(map, dst, src); - ____bpf_spin_unlock(lock); + __bpf_spin_unlock_irqrestore(lock); preempt_enable(); } +BPF_CALL_0(bpf_jiffies64) +{ + return get_jiffies_64(); +} + +const struct bpf_func_proto bpf_jiffies64_proto = { + .func = bpf_jiffies64, + .gpl_only = false, + .ret_type = RET_INTEGER, +}; + #ifdef CONFIG_CGROUPS BPF_CALL_0(bpf_get_current_cgroup_id) { @@ -486,4 +508,319 @@ const struct bpf_func_proto bpf_strtoul_proto = { .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_LONG, }; + +/* BPF map elements can contain 'struct bpf_timer'. + * Such map owns all of its BPF timers. + * 'struct bpf_timer' is allocated as part of map element allocation + * and it's zero initialized. + * That space is used to keep 'struct bpf_timer_kern'. + * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and + * remembers 'struct bpf_map *' pointer it's part of. + * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. + * bpf_timer_start() arms the timer. + * If user space reference to a map goes to zero at this point + * ops->map_release_uref callback is responsible for cancelling the timers, + * freeing their memory, and decrementing prog's refcnts. + * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. + * Inner maps can contain bpf timers as well. ops->map_release_uref is + * freeing the timers when inner map is replaced or deleted by user space. + */ +struct bpf_hrtimer { + struct hrtimer timer; + struct bpf_map *map; + struct bpf_prog *prog; + void __rcu *callback_fn; + void *value; +}; + +/* the actual struct hidden inside uapi struct bpf_timer */ +struct bpf_timer_kern { + struct bpf_hrtimer *timer; + /* bpf_spin_lock is used here instead of spinlock_t to make + * sure that it always fits into space resereved by struct bpf_timer + * regardless of LOCKDEP and spinlock debug flags. + */ + struct bpf_spin_lock lock; +} __attribute__((aligned(8))); + +static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); + +static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) +{ + struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); + struct bpf_map *map = t->map; + void *value = t->value; + void *callback_fn; + void *key; + u32 idx; + + callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); + if (!callback_fn) + goto out; + + /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and + * cannot be preempted by another bpf_timer_cb() on the same cpu. + * Remember the timer this callback is servicing to prevent + * deadlock if callback_fn() calls bpf_timer_cancel() or + * bpf_map_delete_elem() on the same timer. + */ + this_cpu_write(hrtimer_running, t); + if (map->map_type == BPF_MAP_TYPE_ARRAY) { + struct bpf_array *array = container_of(map, struct bpf_array, map); + + /* compute the key */ + idx = ((char *)value - array->value) / array->elem_size; + key = &idx; + } else { /* hash or lru */ + key = value - round_up(map->key_size, 8); + } + + BPF_CAST_CALL(callback_fn)((u64)(long)map, (u64)(long)key, + (u64)(long)value, 0, 0); + /* The verifier checked that return value is zero. */ + + this_cpu_write(hrtimer_running, NULL); +out: + return HRTIMER_NORESTART; +} + +BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, + u64, flags) +{ + clockid_t clockid = flags & (MAX_CLOCKS - 1); + struct bpf_hrtimer *t; + int ret = 0; + + BUILD_BUG_ON(MAX_CLOCKS != 16); + BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); + BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); + + if (in_nmi()) + return -EOPNOTSUPP; + + if (flags >= MAX_CLOCKS || + /* similar to timerfd except _ALARM variants are not supported */ + (clockid != CLOCK_MONOTONIC && + clockid != CLOCK_REALTIME && + clockid != CLOCK_BOOTTIME)) + return -EINVAL; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (t) { + ret = -EBUSY; + goto out; + } + if (!atomic64_read(&map->usercnt)) { + /* maps with timers must be either held by user space + * or pinned in bpffs. + */ + ret = -EPERM; + goto out; + } + /* allocate hrtimer via map_kmalloc to use memcg accounting */ + t = kmalloc_node(sizeof(*t), GFP_ATOMIC, map->numa_node); + if (!t) { + ret = -ENOMEM; + goto out; + } + t->value = (void *)timer - map->timer_off; + t->map = map; + t->prog = NULL; + rcu_assign_pointer(t->callback_fn, NULL); + hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); + t->timer.function = bpf_timer_cb; + timer->timer = t; +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + return ret; +} + +const struct bpf_func_proto bpf_timer_init_proto = { + .func = bpf_timer_init, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, + struct bpf_prog_aux *, aux) +{ + struct bpf_prog *prev, *prog = aux->prog; + struct bpf_hrtimer *t; + int ret = 0; + + if (in_nmi()) + return -EOPNOTSUPP; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (!t) { + ret = -EINVAL; + goto out; + } + if (!atomic64_read(&t->map->usercnt)) { + /* maps with timers must be either held by user space + * or pinned in bpffs. Otherwise timer might still be + * running even when bpf prog is detached and user space + * is gone, since map_release_uref won't ever be called. + */ + ret = -EPERM; + goto out; + } + prev = t->prog; + if (prev != prog) { + /* Bump prog refcnt once. Every bpf_timer_set_callback() + * can pick different callback_fn-s within the same prog. + */ + prog = bpf_prog_inc_not_zero(prog); + if (IS_ERR(prog)) { + ret = PTR_ERR(prog); + goto out; + } + if (prev) + /* Drop prev prog refcnt when swapping with new prog */ + bpf_prog_put(prev); + t->prog = prog; + } + rcu_assign_pointer(t->callback_fn, callback_fn); +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + return ret; +} + +const struct bpf_func_proto bpf_timer_set_callback_proto = { + .func = bpf_timer_set_callback, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, + .arg2_type = ARG_PTR_TO_FUNC, +}; + +BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) +{ + struct bpf_hrtimer *t; + int ret = 0; + + if (in_nmi()) + return -EOPNOTSUPP; + if (flags) + return -EINVAL; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (!t || !t->prog) { + ret = -EINVAL; + goto out; + } + hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT); +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + return ret; +} + +const struct bpf_func_proto bpf_timer_start_proto = { + .func = bpf_timer_start, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + +static void drop_prog_refcnt(struct bpf_hrtimer *t) +{ + struct bpf_prog *prog = t->prog; + + if (prog) { + bpf_prog_put(prog); + t->prog = NULL; + rcu_assign_pointer(t->callback_fn, NULL); + } +} + +BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) +{ + struct bpf_hrtimer *t; + int ret = 0; + + if (in_nmi()) + return -EOPNOTSUPP; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (!t) { + ret = -EINVAL; + goto out; + } + if (this_cpu_read(hrtimer_running) == t) { + /* If bpf callback_fn is trying to bpf_timer_cancel() + * its own timer the hrtimer_cancel() will deadlock + * since it waits for callback_fn to finish + */ + ret = -EDEADLK; + goto out; + } + drop_prog_refcnt(t); +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + /* Cancel the timer and wait for associated callback to finish + * if it was running. + */ + ret = ret ?: hrtimer_cancel(&t->timer); + return ret; +} + +const struct bpf_func_proto bpf_timer_cancel_proto = { + .func = bpf_timer_cancel, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, +}; + +/* This function is called by map_delete/update_elem for individual element and + * by ops->map_release_uref when the user space reference to a map reaches zero. + */ +void bpf_timer_cancel_and_free(void *val) +{ + struct bpf_timer_kern *timer = val; + struct bpf_hrtimer *t; + + /* Performance optimization: read timer->timer without lock first. */ + if (!READ_ONCE(timer->timer)) + return; + + __bpf_spin_lock_irqsave(&timer->lock); + /* re-read it under lock */ + t = timer->timer; + if (!t) + goto out; + drop_prog_refcnt(t); + /* The subsequent bpf_timer_start/cancel() helpers won't be able to use + * this timer, since it won't be initialized. + */ + timer->timer = NULL; +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + if (!t) + return; + /* Cancel the timer and wait for callback to complete if it was running. + * If hrtimer_cancel() can be safely called it's safe to call kfree(t) + * right after for both preallocated and non-preallocated maps. + * The timer->timer = NULL was already done and no code path can + * see address 't' anymore. + * + * Check that bpf_map_delete/update_elem() wasn't called from timer + * callback_fn. In such case don't call hrtimer_cancel() (since it will + * deadlock) and don't call hrtimer_try_to_cancel() (since it will just + * return -1). Though callback_fn is still running on this cpu it's + * safe to do kfree(t) because bpf_timer_cb() read everything it needed + * from 't'. The bpf subprog callback_fn won't be able to access 't', + * since timer->timer = NULL was already done. The timer will be + * effectively cancelled because bpf_timer_cb() will return + * HRTIMER_NORESTART. + */ + if (this_cpu_read(hrtimer_running) != t) + hrtimer_cancel(&t->timer); + kfree(t); +} + #endif diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 218c09ff6a27..ccfc7c9523a0 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -25,16 +25,20 @@ enum bpf_type { BPF_TYPE_UNSPEC = 0, BPF_TYPE_PROG, BPF_TYPE_MAP, + BPF_TYPE_LINK, }; static void *bpf_any_get(void *raw, enum bpf_type type) { switch (type) { case BPF_TYPE_PROG: - raw = bpf_prog_inc(raw); + bpf_prog_inc(raw); break; case BPF_TYPE_MAP: - raw = bpf_map_inc(raw, true); + bpf_map_inc_with_uref(raw); + break; + case BPF_TYPE_LINK: + bpf_link_inc(raw); break; default: WARN_ON_ONCE(1); @@ -53,6 +57,9 @@ static void bpf_any_put(void *raw, enum bpf_type type) case BPF_TYPE_MAP: bpf_map_put_with_uref(raw); break; + case BPF_TYPE_LINK: + bpf_link_put(raw); + break; default: WARN_ON_ONCE(1); break; @@ -63,20 +70,32 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type) { void *raw; - *type = BPF_TYPE_MAP; raw = bpf_map_get_with_uref(ufd); - if (IS_ERR(raw)) { - *type = BPF_TYPE_PROG; - raw = bpf_prog_get(ufd); + if (!IS_ERR(raw)) { + *type = BPF_TYPE_MAP; + return raw; } - return raw; + raw = bpf_prog_get(ufd); + if (!IS_ERR(raw)) { + *type = BPF_TYPE_PROG; + return raw; + } + + raw = bpf_link_get_from_fd(ufd); + if (!IS_ERR(raw)) { + *type = BPF_TYPE_LINK; + return raw; + } + + return ERR_PTR(-EINVAL); } static const struct inode_operations bpf_dir_iops; static const struct inode_operations bpf_prog_iops = { }; static const struct inode_operations bpf_map_iops = { }; +static const struct inode_operations bpf_link_iops = { }; static struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir, @@ -114,6 +133,8 @@ static int bpf_inode_type(const struct inode *inode, enum bpf_type *type) *type = BPF_TYPE_PROG; else if (inode->i_op == &bpf_map_iops) *type = BPF_TYPE_MAP; + else if (inode->i_op == &bpf_link_iops) + *type = BPF_TYPE_LINK; else return -EACCES; @@ -205,10 +226,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) else prev_key = key; + rcu_read_lock(); if (map->ops->map_get_next_key(map, prev_key, key)) { map_iter(m)->done = true; - return NULL; + key = NULL; } + rcu_read_unlock(); return key; } @@ -335,6 +358,15 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) &bpffs_map_fops : &bpffs_obj_fops); } +static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg) +{ + struct bpf_link *link = arg; + + return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops, + bpf_link_is_iter(link) ? + &bpf_iter_fops : &bpffs_obj_fops); +} + static struct dentry * bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) { @@ -411,6 +443,9 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw, case BPF_TYPE_MAP: ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw); break; + case BPF_TYPE_LINK: + ret = vfs_mkobj(dentry, mode, bpf_mklink, raw); + break; default: ret = -EPERM; } @@ -502,6 +537,8 @@ int bpf_obj_get_user(const char __user *pathname, int flags) ret = bpf_prog_new_fd(raw); else if (type == BPF_TYPE_MAP) ret = bpf_map_new_fd(raw, f_flags); + else if (type == BPF_TYPE_LINK) + ret = bpf_link_new_fd(raw); else goto out; @@ -521,6 +558,8 @@ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type if (inode->i_op == &bpf_map_iops) return ERR_PTR(-EINVAL); + if (inode->i_op == &bpf_link_iops) + return ERR_PTR(-EINVAL); if (inode->i_op != &bpf_prog_iops) return ERR_PTR(-EACCES); @@ -533,7 +572,8 @@ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type if (!bpf_prog_get_ok(prog, &type, false)) return ERR_PTR(-EINVAL); - return bpf_prog_inc(prog); + bpf_prog_inc(prog); + return prog; } struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index addd6fdceec8..cc9a3cd2bfed 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -159,7 +159,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, return -ENOMEM; memcpy(&new->data[0], value, map->value_size); - check_and_init_map_lock(map, new->data); + check_and_init_map_value(map, new->data); new = xchg(&storage->buf, new); kfree_rcu(new, rcu); @@ -409,6 +409,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key, rcu_read_unlock(); } +static int cgroup_storage_map_btf_id; const struct bpf_map_ops cgroup_storage_map_ops = { .map_alloc = cgroup_storage_map_alloc, .map_free = cgroup_storage_map_free, @@ -418,6 +419,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = { .map_delete_elem = cgroup_storage_delete_elem, .map_check_btf = cgroup_storage_check_btf, .map_seq_show_elem = cgroup_storage_seq_show_elem, + .map_btf_name = "bpf_cgroup_storage_map", + .map_btf_id = &cgroup_storage_map_btf_id, }; int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) @@ -503,7 +506,7 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, storage->buf = kmalloc_node(size, flags, map->numa_node); if (!storage->buf) goto enomem; - check_and_init_map_lock(map, storage->buf->data); + check_and_init_map_value(map, storage->buf->data); } else { storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); if (!storage->percpu_buf) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 56e6c75d354d..b5426a35b279 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -543,7 +543,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) u64 cost = sizeof(*trie), cost_per_node; int ret; - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return ERR_PTR(-EPERM); /* check sanity of attributes */ @@ -735,6 +735,7 @@ static int trie_check_btf(const struct bpf_map *map, -EINVAL : 0; } +static int trie_map_btf_id; const struct bpf_map_ops trie_map_ops = { .map_alloc = trie_alloc, .map_free = trie_free, @@ -743,4 +744,6 @@ const struct bpf_map_ops trie_map_ops = { .map_update_elem = trie_update_elem, .map_delete_elem = trie_delete_elem, .map_check_btf = trie_check_btf, + .map_btf_name = "lpm_trie", + .map_btf_id = &trie_map_btf_id, }; diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index fab4fb134547..6a1f326323eb 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -3,6 +3,7 @@ */ #include <linux/slab.h> #include <linux/bpf.h> +#include <linux/btf.h> #include "map_in_map.h" @@ -17,13 +18,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) if (IS_ERR(inner_map)) return inner_map; - /* prog_array->owner_prog_type and owner_jited - * is a runtime binding. Doing static check alone - * in the verifier is not enough. + /* prog_array->aux->{type,jited} is a runtime binding. + * Doing static check alone in the verifier is not enough. */ if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY || inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || - inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { + inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE || + inner_map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { fdput(f); return ERR_PTR(-ENOTSUPP); } @@ -56,11 +57,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) inner_map_meta->map_flags = inner_map->map_flags; inner_map_meta->max_entries = inner_map->max_entries; inner_map_meta->spin_lock_off = inner_map->spin_lock_off; + inner_map_meta->timer_off = inner_map->timer_off; + if (inner_map->btf) { + btf_get(inner_map->btf); + inner_map_meta->btf = inner_map->btf; + } /* Misc members not needed in bpf_map_meta_equal() check. */ inner_map_meta->ops = inner_map->ops; if (inner_map->ops == &array_map_ops) { - inner_map_meta->unpriv_array = inner_map->unpriv_array; + inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; container_of(inner_map_meta, struct bpf_array, map)->index_mask = container_of(inner_map, struct bpf_array, map)->index_mask; } @@ -71,6 +77,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) void bpf_map_meta_free(struct bpf_map *map_meta) { + btf_put(map_meta->btf); kfree(map_meta); } @@ -81,6 +88,7 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0, return meta0->map_type == meta1->map_type && meta0->key_size == meta1->key_size && meta0->value_size == meta1->value_size && + meta0->timer_off == meta1->timer_off && meta0->map_flags == meta1->map_flags && meta0->max_entries == meta1->max_entries; } @@ -98,7 +106,7 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map, return inner_map; if (bpf_map_meta_equal(map->inner_map_meta, inner_map)) - inner_map = bpf_map_inc(inner_map, false); + bpf_map_inc(inner_map); else inner_map = ERR_PTR(-EINVAL); diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c new file mode 100644 index 000000000000..af86048e5afd --- /dev/null +++ b/kernel/bpf/map_iter.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include <linux/bpf.h> +#include <linux/fs.h> +#include <linux/filter.h> +#include <linux/kernel.h> +#include <linux/btf_ids.h> + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_map_info *info = seq->private; + struct bpf_map *map; + + map = bpf_map_get_curr_or_next(&info->map_id); + if (!map) + return NULL; + + if (*pos == 0) + ++*pos; + return map; +} + +static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_map_info *info = seq->private; + + ++*pos; + ++info->map_id; + bpf_map_put((struct bpf_map *)v); + return bpf_map_get_curr_or_next(&info->map_id); +} + +struct bpf_iter__bpf_map { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_map *, map); +}; + +DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map) + +static int __bpf_map_seq_show(struct seq_file *seq, void *v, bool in_stop) +{ + struct bpf_iter__bpf_map ctx; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int ret = 0; + + ctx.meta = &meta; + ctx.map = v; + meta.seq = seq; + prog = bpf_iter_get_info(&meta, in_stop); + if (prog) + ret = bpf_iter_run_prog(prog, &ctx); + + return ret; +} + +static int bpf_map_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_map_seq_show(seq, v, false); +} + +static void bpf_map_seq_stop(struct seq_file *seq, void *v) +{ + if (!v) + (void)__bpf_map_seq_show(seq, v, true); + else + bpf_map_put((struct bpf_map *)v); +} + +static const struct seq_operations bpf_map_seq_ops = { + .start = bpf_map_seq_start, + .next = bpf_map_seq_next, + .stop = bpf_map_seq_stop, + .show = bpf_map_seq_show, +}; + +BTF_ID_LIST(btf_bpf_map_id) +BTF_ID(struct, bpf_map) + +static const struct bpf_iter_seq_info bpf_map_seq_info = { + .seq_ops = &bpf_map_seq_ops, + .init_seq_private = NULL, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct bpf_iter_seq_map_info), +}; + +static struct bpf_iter_reg bpf_map_reg_info = { + .target = "bpf_map", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_map, map), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &bpf_map_seq_info, +}; + +static int bpf_iter_attach_map(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux) +{ + u32 key_acc_size, value_acc_size, key_size, value_size; + struct bpf_map *map; + bool is_percpu = false; + int err = -EINVAL; + + if (!linfo->map.map_fd) + return -EBADF; + + map = bpf_map_get_with_uref(linfo->map.map_fd); + if (IS_ERR(map)) + return PTR_ERR(map); + + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) + is_percpu = true; + else if (map->map_type != BPF_MAP_TYPE_HASH && + map->map_type != BPF_MAP_TYPE_LRU_HASH && + map->map_type != BPF_MAP_TYPE_ARRAY) + goto put_map; + + key_acc_size = prog->aux->max_rdonly_access; + value_acc_size = prog->aux->max_rdwr_access; + key_size = map->key_size; + if (!is_percpu) + value_size = map->value_size; + else + value_size = round_up(map->value_size, 8) * num_possible_cpus(); + + if (key_acc_size > key_size || value_acc_size > value_size) { + err = -EACCES; + goto put_map; + } + + aux->map = map; + return 0; + +put_map: + bpf_map_put_with_uref(map); + return err; +} + +static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux) +{ + bpf_map_put_with_uref(aux->map); +} + +DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, + struct bpf_map *map, void *key, void *value) + +static const struct bpf_iter_reg bpf_map_elem_reg_info = { + .target = "bpf_map_elem", + .attach_target = bpf_iter_attach_map, + .detach_target = bpf_iter_detach_map, + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_map_elem, key), + PTR_TO_RDONLY_BUF_OR_NULL }, + { offsetof(struct bpf_iter__bpf_map_elem, value), + PTR_TO_RDWR_BUF_OR_NULL }, + }, +}; + +static int __init bpf_map_iter_init(void) +{ + int ret; + + bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id; + ret = bpf_iter_reg_target(&bpf_map_reg_info); + if (ret) + return ret; + + return bpf_iter_reg_target(&bpf_map_elem_reg_info); +} + +late_initcall(bpf_map_iter_init); diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c new file mode 100644 index 000000000000..ffbabf0d168a --- /dev/null +++ b/kernel/bpf/net_namespace.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <linux/filter.h> +#include <net/net_namespace.h> + +/* + * Functions to manage BPF programs attached to netns + */ + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + + /* We don't hold a ref to net in order to auto-detach the link + * when netns is going away. Instead we rely on pernet + * pre_exit callback to clear this pointer. Must be accessed + * with netns_bpf_mutex held. + */ + struct net *net; + struct list_head node; /* node in list of links attached to net */ +}; + +/* Protects updates to netns_bpf */ +DEFINE_MUTEX(netns_bpf_mutex); + +static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_SK_LOOKUP: + static_branch_dec(&bpf_sk_lookup_enabled); + break; + default: + break; + } +} + +static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_SK_LOOKUP: + static_branch_inc(&bpf_sk_lookup_enabled); + break; + default: + break; + } +} + +/* Must be called with netns_bpf_mutex held. */ +static void netns_bpf_run_array_detach(struct net *net, + enum netns_bpf_attach_type type) +{ + struct bpf_prog_array *run_array; + + run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL, + lockdep_is_held(&netns_bpf_mutex)); + bpf_prog_array_free(run_array); +} + +static int link_index(struct net *net, enum netns_bpf_attach_type type, + struct bpf_netns_link *link) +{ + struct bpf_netns_link *pos; + int i = 0; + + list_for_each_entry(pos, &net->bpf.links[type], node) { + if (pos == link) + return i; + i++; + } + return -ENOENT; +} + +static int link_count(struct net *net, enum netns_bpf_attach_type type) +{ + struct list_head *pos; + int i = 0; + + list_for_each(pos, &net->bpf.links[type]) + i++; + return i; +} + +static void fill_prog_array(struct net *net, enum netns_bpf_attach_type type, + struct bpf_prog_array *prog_array) +{ + struct bpf_netns_link *pos; + unsigned int i = 0; + + list_for_each_entry(pos, &net->bpf.links[type], node) { + prog_array->items[i].prog = pos->link.prog; + i++; + } +} + +static void bpf_netns_link_release(struct bpf_link *link) +{ + struct bpf_netns_link *net_link = + container_of(link, struct bpf_netns_link, link); + enum netns_bpf_attach_type type = net_link->netns_type; + struct bpf_prog_array *old_array, *new_array; + struct net *net; + int cnt, idx; + + /* Link auto-detached by dying netns. */ + if (!net_link->net) + return; + + mutex_lock(&netns_bpf_mutex); + + /* Recheck after potential sleep. We can race with cleanup_net + * here, but if we see a non-NULL struct net pointer pre_exit + * has not happened yet and will block on netns_bpf_mutex. + */ + net = net_link->net; + if (!net) + goto out_unlock; + + /* Mark attach point as unused */ + netns_bpf_attach_type_unneed(type); + + /* Remember link position in case of safe delete */ + idx = link_index(net, type, net_link); + list_del(&net_link->node); + + cnt = link_count(net, type); + if (!cnt) { + netns_bpf_run_array_detach(net, type); + goto out_unlock; + } + + old_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL); + if (!new_array) { + WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx)); + goto out_unlock; + } + fill_prog_array(net, type, new_array); + rcu_assign_pointer(net->bpf.run_array[type], new_array); + bpf_prog_array_free(old_array); + +out_unlock: + net_link->net = NULL; + mutex_unlock(&netns_bpf_mutex); +} + +static int bpf_netns_link_detach(struct bpf_link *link) +{ + bpf_netns_link_release(link); + return 0; +} + +static void bpf_netns_link_dealloc(struct bpf_link *link) +{ + struct bpf_netns_link *net_link = + container_of(link, struct bpf_netns_link, link); + + kfree(net_link); +} + +static int bpf_netns_link_update_prog(struct bpf_link *link, + struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + struct bpf_netns_link *net_link = + container_of(link, struct bpf_netns_link, link); + enum netns_bpf_attach_type type = net_link->netns_type; + struct bpf_prog_array *run_array; + struct net *net; + int idx, ret; + + if (old_prog && old_prog != link->prog) + return -EPERM; + if (new_prog->type != link->prog->type) + return -EINVAL; + + mutex_lock(&netns_bpf_mutex); + + net = net_link->net; + if (!net || !check_net(net)) { + /* Link auto-detached or netns dying */ + ret = -ENOLINK; + goto out_unlock; + } + + run_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + idx = link_index(net, type, net_link); + ret = bpf_prog_array_update_at(run_array, idx, new_prog); + if (ret) + goto out_unlock; + + old_prog = xchg(&link->prog, new_prog); + bpf_prog_put(old_prog); + +out_unlock: + mutex_unlock(&netns_bpf_mutex); + return ret; +} + +static int bpf_netns_link_fill_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + const struct bpf_netns_link *net_link = + container_of(link, struct bpf_netns_link, link); + unsigned int inum = 0; + struct net *net; + + mutex_lock(&netns_bpf_mutex); + net = net_link->net; + if (net && check_net(net)) + inum = net->ns.inum; + mutex_unlock(&netns_bpf_mutex); + + info->netns.netns_ino = inum; + info->netns.attach_type = net_link->type; + return 0; +} + +static void bpf_netns_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_link_info info = {}; + + bpf_netns_link_fill_info(link, &info); + seq_printf(seq, + "netns_ino:\t%u\n" + "attach_type:\t%u\n", + info.netns.netns_ino, + info.netns.attach_type); +} + +static const struct bpf_link_ops bpf_netns_link_ops = { + .release = bpf_netns_link_release, + .dealloc = bpf_netns_link_dealloc, + .detach = bpf_netns_link_detach, + .update_prog = bpf_netns_link_update_prog, + .fill_link_info = bpf_netns_link_fill_info, + .show_fdinfo = bpf_netns_link_show_fdinfo, +}; + +/* Must be called with netns_bpf_mutex held. */ +static int __netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr, + struct net *net, + enum netns_bpf_attach_type type) +{ + __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + struct bpf_prog_array *run_array; + u32 prog_cnt = 0, flags = 0; + + run_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + if (run_array) + prog_cnt = bpf_prog_array_length(run_array); + + if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) + return -EFAULT; + if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) + return -EFAULT; + if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) + return 0; + + return bpf_prog_array_copy_to_user(run_array, prog_ids, + attr->query.prog_cnt); +} + +int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + enum netns_bpf_attach_type type; + struct net *net; + int ret; + + if (attr->query.query_flags) + return -EINVAL; + + type = to_netns_bpf_attach_type(attr->query.attach_type); + if (type < 0) + return -EINVAL; + + net = get_net_ns_by_fd(attr->query.target_fd); + if (IS_ERR(net)) + return PTR_ERR(net); + + mutex_lock(&netns_bpf_mutex); + ret = __netns_bpf_prog_query(attr, uattr, net, type); + mutex_unlock(&netns_bpf_mutex); + + put_net(net); + return ret; +} + +int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_prog_array *run_array; + enum netns_bpf_attach_type type; + struct bpf_prog *attached; + struct net *net; + int ret; + + type = to_netns_bpf_attach_type(attr->attach_type); + if (type < 0) + return -EINVAL; + + net = current->nsproxy->net_ns; + mutex_lock(&netns_bpf_mutex); + + /* Attaching prog directly is not compatible with links */ + if (!list_empty(&net->bpf.links[type])) { + ret = -EEXIST; + goto out_unlock; + } + + switch (type) { + case NETNS_BPF_FLOW_DISSECTOR: + ret = flow_dissector_bpf_prog_attach_check(net, prog); + break; + default: + ret = -EINVAL; + break; + } + if (ret) + goto out_unlock; + + attached = net->bpf.progs[type]; + if (attached == prog) { + /* The same program cannot be attached twice */ + ret = -EINVAL; + goto out_unlock; + } + + run_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + if (run_array) { + WRITE_ONCE(run_array->items[0].prog, prog); + } else { + run_array = bpf_prog_array_alloc(1, GFP_KERNEL); + if (!run_array) { + ret = -ENOMEM; + goto out_unlock; + } + run_array->items[0].prog = prog; + rcu_assign_pointer(net->bpf.run_array[type], run_array); + } + + net->bpf.progs[type] = prog; + if (attached) + bpf_prog_put(attached); + +out_unlock: + mutex_unlock(&netns_bpf_mutex); + + return ret; +} + +/* Must be called with netns_bpf_mutex held. */ +static int __netns_bpf_prog_detach(struct net *net, + enum netns_bpf_attach_type type) +{ + struct bpf_prog *attached; + + /* Progs attached via links cannot be detached */ + if (!list_empty(&net->bpf.links[type])) + return -EINVAL; + + attached = net->bpf.progs[type]; + if (!attached) + return -ENOENT; + netns_bpf_run_array_detach(net, type); + net->bpf.progs[type] = NULL; + bpf_prog_put(attached); + return 0; +} + +int netns_bpf_prog_detach(const union bpf_attr *attr) +{ + enum netns_bpf_attach_type type; + int ret; + + type = to_netns_bpf_attach_type(attr->attach_type); + if (type < 0) + return -EINVAL; + + mutex_lock(&netns_bpf_mutex); + ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type); + mutex_unlock(&netns_bpf_mutex); + + return ret; +} + +static int netns_bpf_max_progs(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_FLOW_DISSECTOR: + return 1; + case NETNS_BPF_SK_LOOKUP: + return 64; + default: + return 0; + } +} + +static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, + enum netns_bpf_attach_type type) +{ + struct bpf_netns_link *net_link = + container_of(link, struct bpf_netns_link, link); + struct bpf_prog_array *run_array; + int cnt, err; + + mutex_lock(&netns_bpf_mutex); + + cnt = link_count(net, type); + if (cnt >= netns_bpf_max_progs(type)) { + err = -E2BIG; + goto out_unlock; + } + /* Links are not compatible with attaching prog directly */ + if (net->bpf.progs[type]) { + err = -EEXIST; + goto out_unlock; + } + + switch (type) { + case NETNS_BPF_FLOW_DISSECTOR: + err = flow_dissector_bpf_prog_attach_check(net, link->prog); + break; + case NETNS_BPF_SK_LOOKUP: + err = 0; /* nothing to check */ + break; + default: + err = -EINVAL; + break; + } + if (err) + goto out_unlock; + + run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL); + if (!run_array) { + err = -ENOMEM; + goto out_unlock; + } + + list_add_tail(&net_link->node, &net->bpf.links[type]); + + fill_prog_array(net, type, run_array); + run_array = rcu_replace_pointer(net->bpf.run_array[type], run_array, + lockdep_is_held(&netns_bpf_mutex)); + bpf_prog_array_free(run_array); + + /* Mark attach point as used */ + netns_bpf_attach_type_need(type); + +out_unlock: + mutex_unlock(&netns_bpf_mutex); + return err; +} + +int netns_bpf_link_create(const union bpf_attr *attr, struct bpf_prog *prog) +{ + enum netns_bpf_attach_type netns_type; + struct bpf_link_primer link_primer; + struct bpf_netns_link *net_link; + enum bpf_attach_type type; + struct net *net; + int err; + + if (attr->link_create.flags) + return -EINVAL; + + type = attr->link_create.attach_type; + netns_type = to_netns_bpf_attach_type(type); + if (netns_type < 0) + return -EINVAL; + + net = get_net_ns_by_fd(attr->link_create.target_fd); + if (IS_ERR(net)) + return PTR_ERR(net); + + net_link = kzalloc(sizeof(*net_link), GFP_USER); + if (!net_link) { + err = -ENOMEM; + goto out_put_net; + } + bpf_link_init(&net_link->link, BPF_LINK_TYPE_NETNS, + &bpf_netns_link_ops, prog); + net_link->net = net; + net_link->type = type; + net_link->netns_type = netns_type; + + err = bpf_link_prime(&net_link->link, &link_primer); + if (err) { + kfree(net_link); + goto out_put_net; + } + + err = netns_bpf_link_attach(net, &net_link->link, netns_type); + if (err) { + bpf_link_cleanup(&link_primer); + goto out_put_net; + } + + put_net(net); + return bpf_link_settle(&link_primer); + +out_put_net: + put_net(net); + return err; +} + +static int __net_init netns_bpf_pernet_init(struct net *net) +{ + int type; + + for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) + INIT_LIST_HEAD(&net->bpf.links[type]); + + return 0; +} + +static void __net_exit netns_bpf_pernet_pre_exit(struct net *net) +{ + enum netns_bpf_attach_type type; + struct bpf_netns_link *net_link; + + mutex_lock(&netns_bpf_mutex); + for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) { + netns_bpf_run_array_detach(net, type); + list_for_each_entry(net_link, &net->bpf.links[type], node) { + net_link->net = NULL; /* auto-detach link */ + netns_bpf_attach_type_unneed(type); + } + if (net->bpf.progs[type]) + bpf_prog_put(net->bpf.progs[type]); + } + mutex_unlock(&netns_bpf_mutex); +} + +static struct pernet_operations netns_bpf_pernet_ops __net_initdata = { + .init = netns_bpf_pernet_init, + .pre_exit = netns_bpf_pernet_pre_exit, +}; + +static int __init netns_bpf_init(void) +{ + return register_pernet_subsys(&netns_bpf_pernet_ops); +} + +subsys_initcall(netns_bpf_init); diff --git a/kernel/bpf/prog_iter.c b/kernel/bpf/prog_iter.c new file mode 100644 index 000000000000..53a73c841c13 --- /dev/null +++ b/kernel/bpf/prog_iter.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include <linux/bpf.h> +#include <linux/fs.h> +#include <linux/filter.h> +#include <linux/kernel.h> +#include <linux/btf_ids.h> + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +static void *bpf_prog_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_prog_info *info = seq->private; + struct bpf_prog *prog; + + prog = bpf_prog_get_curr_or_next(&info->prog_id); + if (!prog) + return NULL; + + if (*pos == 0) + ++*pos; + return prog; +} + +static void *bpf_prog_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_prog_info *info = seq->private; + + ++*pos; + ++info->prog_id; + bpf_prog_put((struct bpf_prog *)v); + return bpf_prog_get_curr_or_next(&info->prog_id); +} + +struct bpf_iter__bpf_prog { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_prog *, prog); +}; + +DEFINE_BPF_ITER_FUNC(bpf_prog, struct bpf_iter_meta *meta, struct bpf_prog *prog) + +static int __bpf_prog_seq_show(struct seq_file *seq, void *v, bool in_stop) +{ + struct bpf_iter__bpf_prog ctx; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int ret = 0; + + ctx.meta = &meta; + ctx.prog = v; + meta.seq = seq; + prog = bpf_iter_get_info(&meta, in_stop); + if (prog) + ret = bpf_iter_run_prog(prog, &ctx); + + return ret; +} + +static int bpf_prog_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_prog_seq_show(seq, v, false); +} + +static void bpf_prog_seq_stop(struct seq_file *seq, void *v) +{ + if (!v) + (void)__bpf_prog_seq_show(seq, v, true); + else + bpf_prog_put((struct bpf_prog *)v); +} + +static const struct seq_operations bpf_prog_seq_ops = { + .start = bpf_prog_seq_start, + .next = bpf_prog_seq_next, + .stop = bpf_prog_seq_stop, + .show = bpf_prog_seq_show, +}; + +BTF_ID_LIST(btf_bpf_prog_id) +BTF_ID(struct, bpf_prog) + +static const struct bpf_iter_seq_info bpf_prog_seq_info = { + .seq_ops = &bpf_prog_seq_ops, + .init_seq_private = NULL, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct bpf_iter_seq_prog_info), +}; + +static struct bpf_iter_reg bpf_prog_reg_info = { + .target = "bpf_prog", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_prog, prog), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &bpf_prog_seq_info, +}; + +static int __init bpf_prog_iter_init(void) +{ + bpf_prog_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_prog_id; + return bpf_iter_reg_target(&bpf_prog_reg_info); +} + +late_initcall(bpf_prog_iter_init); diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index f697647ceb54..64b4f514532a 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -45,7 +45,7 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) /* Called from syscall */ static int queue_stack_map_alloc_check(union bpf_attr *attr) { - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return -EPERM; /* check sanity of attributes */ @@ -262,6 +262,7 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key, return -EINVAL; } +static int queue_map_btf_id; const struct bpf_map_ops queue_map_ops = { .map_alloc_check = queue_stack_map_alloc_check, .map_alloc = queue_stack_map_alloc, @@ -273,8 +274,11 @@ const struct bpf_map_ops queue_map_ops = { .map_pop_elem = queue_map_pop_elem, .map_peek_elem = queue_map_peek_elem, .map_get_next_key = queue_stack_map_get_next_key, + .map_btf_name = "bpf_queue_stack", + .map_btf_id = &queue_map_btf_id, }; +static int stack_map_btf_id; const struct bpf_map_ops stack_map_ops = { .map_alloc_check = queue_stack_map_alloc_check, .map_alloc = queue_stack_map_alloc, @@ -286,4 +290,6 @@ const struct bpf_map_ops stack_map_ops = { .map_pop_elem = stack_map_pop_elem, .map_peek_elem = stack_map_peek_elem, .map_get_next_key = queue_stack_map_get_next_key, + .map_btf_name = "bpf_queue_stack", + .map_btf_id = &stack_map_btf_id, }; diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 50c083ba978c..c11a9d750f97 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -154,7 +154,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) struct bpf_map_memory mem; u64 array_size; - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return ERR_PTR(-EPERM); array_size = sizeof(*array); @@ -190,7 +190,7 @@ int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, rcu_read_lock(); sk = reuseport_array_lookup_elem(map, key); if (sk) { - *(u64 *)value = sock_gen_cookie(sk); + *(u64 *)value = __sock_gen_cookie(sk); err = 0; } else { err = -ENOENT; @@ -305,11 +305,6 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, if (err) goto put_file_unlock; - /* Ensure reuse->reuseport_id is set */ - err = reuseport_get_id(reuse); - if (err < 0) - goto put_file_unlock; - WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]); rcu_assign_pointer(array->ptrs[index], nsk); free_osk = osk; @@ -350,6 +345,7 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key, return 0; } +static int reuseport_array_map_btf_id; const struct bpf_map_ops reuseport_array_ops = { .map_alloc_check = reuseport_array_alloc_check, .map_alloc = reuseport_array_alloc, @@ -357,4 +353,6 @@ const struct bpf_map_ops reuseport_array_ops = { .map_lookup_elem = reuseport_array_lookup_elem, .map_get_next_key = reuseport_array_get_next_key, .map_delete_elem = reuseport_array_delete_elem, + .map_btf_name = "reuseport_array", + .map_btf_id = &reuseport_array_map_btf_id, }; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c new file mode 100644 index 000000000000..dbf37aff4827 --- /dev/null +++ b/kernel/bpf/ringbuf.c @@ -0,0 +1,504 @@ +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/err.h> +#include <linux/irq_work.h> +#include <linux/slab.h> +#include <linux/filter.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> +#include <linux/poll.h> +#include <uapi/linux/btf.h> + +#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE) + +/* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */ +#define RINGBUF_PGOFF \ + (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT) +/* consumer page and producer page */ +#define RINGBUF_POS_PAGES 2 + +#define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4) + +/* Maximum size of ring buffer area is limited by 32-bit page offset within + * record header, counted in pages. Reserve 8 bits for extensibility, and take + * into account few extra pages for consumer/producer pages and + * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single + * ring buffer. + */ +#define RINGBUF_MAX_DATA_SZ \ + (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE) + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + u64 mask; + struct page **pages; + int nr_pages; + spinlock_t spinlock ____cacheline_aligned_in_smp; + /* Consumer and producer counters are put into separate pages to allow + * mapping consumer page as r/w, but restrict producer page to r/o. + * This protects producer position from being modified by user-space + * application and ruining in-kernel position tracking. + */ + unsigned long consumer_pos __aligned(PAGE_SIZE); + unsigned long producer_pos __aligned(PAGE_SIZE); + char data[] __aligned(PAGE_SIZE); +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_map_memory memory; + struct bpf_ringbuf *rb; +}; + +/* 8-byte ring buffer record header structure */ +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) +{ + const gfp_t flags = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | + __GFP_ZERO; + int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES; + int nr_data_pages = data_sz >> PAGE_SHIFT; + int nr_pages = nr_meta_pages + nr_data_pages; + struct page **pages, *page; + struct bpf_ringbuf *rb; + size_t array_size; + int i; + + /* Each data page is mapped twice to allow "virtual" + * continuous read of samples wrapping around the end of ring + * buffer area: + * ------------------------------------------------------ + * | meta pages | real data pages | same data pages | + * ------------------------------------------------------ + * | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 | + * ------------------------------------------------------ + * | | TA DA | TA DA | + * ------------------------------------------------------ + * ^^^^^^^ + * | + * Here, no need to worry about special handling of wrapped-around + * data due to double-mapped data pages. This works both in kernel and + * when mmap()'ed in user-space, simplifying both kernel and + * user-space implementations significantly. + */ + array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages); + if (array_size > PAGE_SIZE) + pages = vmalloc_node(array_size, numa_node); + else + pages = kmalloc_node(array_size, flags, numa_node); + if (!pages) + return NULL; + + for (i = 0; i < nr_pages; i++) { + page = alloc_pages_node(numa_node, flags, 0); + if (!page) { + nr_pages = i; + goto err_free_pages; + } + pages[i] = page; + if (i >= nr_meta_pages) + pages[nr_data_pages + i] = page; + } + + rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, + VM_ALLOC | VM_USERMAP, PAGE_KERNEL); + if (rb) { + rb->pages = pages; + rb->nr_pages = nr_pages; + return rb; + } + +err_free_pages: + for (i = 0; i < nr_pages; i++) + __free_page(pages[i]); + kvfree(pages); + return NULL; +} + +static void bpf_ringbuf_notify(struct irq_work *work) +{ + struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); + + wake_up_all(&rb->waitq); +} + +static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node) +{ + struct bpf_ringbuf *rb; + + if (!data_sz || !PAGE_ALIGNED(data_sz)) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_64BIT + /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */ + if (data_sz > RINGBUF_MAX_DATA_SZ) + return ERR_PTR(-E2BIG); +#endif + + rb = bpf_ringbuf_area_alloc(data_sz, numa_node); + if (!rb) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&rb->spinlock); + init_waitqueue_head(&rb->waitq); + init_irq_work(&rb->work, bpf_ringbuf_notify); + + rb->mask = data_sz - 1; + rb->consumer_pos = 0; + rb->producer_pos = 0; + + return rb; +} + +static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr) +{ + struct bpf_ringbuf_map *rb_map; + u64 cost; + int err; + + if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK) + return ERR_PTR(-EINVAL); + + if (attr->key_size || attr->value_size || + attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries)) + return ERR_PTR(-EINVAL); + + rb_map = kzalloc(sizeof(*rb_map), GFP_USER); + if (!rb_map) + return ERR_PTR(-ENOMEM); + + bpf_map_init_from_attr(&rb_map->map, attr); + + cost = sizeof(struct bpf_ringbuf_map) + + sizeof(struct bpf_ringbuf) + + attr->max_entries; + err = bpf_map_charge_init(&rb_map->map.memory, cost); + if (err) + goto err_free_map; + + rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); + if (IS_ERR(rb_map->rb)) { + err = PTR_ERR(rb_map->rb); + goto err_uncharge; + } + + return &rb_map->map; + +err_uncharge: + bpf_map_charge_finish(&rb_map->map.memory); +err_free_map: + kfree(rb_map); + return ERR_PTR(err); +} + +static void bpf_ringbuf_free(struct bpf_ringbuf *rb) +{ + /* copy pages pointer and nr_pages to local variable, as we are going + * to unmap rb itself with vunmap() below + */ + struct page **pages = rb->pages; + int i, nr_pages = rb->nr_pages; + + vunmap(rb); + for (i = 0; i < nr_pages; i++) + __free_page(pages[i]); + kvfree(pages); +} + +static void ringbuf_map_free(struct bpf_map *map) +{ + struct bpf_ringbuf_map *rb_map; + + /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, + * so the programs (can be more than one that used this map) were + * disconnected from events. Wait for outstanding critical sections in + * these programs to complete + */ + synchronize_rcu(); + + rb_map = container_of(map, struct bpf_ringbuf_map, map); + bpf_ringbuf_free(rb_map->rb); + kfree(rb_map); +} + +static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key) +{ + return ERR_PTR(-ENOTSUPP); +} + +static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value, + u64 flags) +{ + return -ENOTSUPP; +} + +static int ringbuf_map_delete_elem(struct bpf_map *map, void *key) +{ + return -ENOTSUPP; +} + +static int ringbuf_map_get_next_key(struct bpf_map *map, void *key, + void *next_key) +{ + return -ENOTSUPP; +} + +static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb) +{ + size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT; + + /* consumer page + producer page + 2 x data pages */ + return RINGBUF_POS_PAGES + 2 * data_pages; +} + +static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) +{ + struct bpf_ringbuf_map *rb_map; + size_t mmap_sz; + + rb_map = container_of(map, struct bpf_ringbuf_map, map); + mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT; + + if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz) + return -EINVAL; + + return remap_vmalloc_range(vma, rb_map->rb, + vma->vm_pgoff + RINGBUF_PGOFF); +} + +static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb) +{ + unsigned long cons_pos, prod_pos; + + cons_pos = smp_load_acquire(&rb->consumer_pos); + prod_pos = smp_load_acquire(&rb->producer_pos); + return prod_pos - cons_pos; +} + +static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp, + struct poll_table_struct *pts) +{ + struct bpf_ringbuf_map *rb_map; + + rb_map = container_of(map, struct bpf_ringbuf_map, map); + poll_wait(filp, &rb_map->rb->waitq, pts); + + if (ringbuf_avail_data_sz(rb_map->rb)) + return EPOLLIN | EPOLLRDNORM; + return 0; +} + +static int ringbuf_map_btf_id; +const struct bpf_map_ops ringbuf_map_ops = { + .map_alloc = ringbuf_map_alloc, + .map_free = ringbuf_map_free, + .map_mmap = ringbuf_map_mmap, + .map_poll = ringbuf_map_poll, + .map_lookup_elem = ringbuf_map_lookup_elem, + .map_update_elem = ringbuf_map_update_elem, + .map_delete_elem = ringbuf_map_delete_elem, + .map_get_next_key = ringbuf_map_get_next_key, + .map_btf_name = "bpf_ringbuf_map", + .map_btf_id = &ringbuf_map_btf_id, +}; + +/* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself, + * calculate offset from record metadata to ring buffer in pages, rounded + * down. This page offset is stored as part of record metadata and allows to + * restore struct bpf_ringbuf * from record pointer. This page offset is + * stored at offset 4 of record metadata header. + */ +static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb, + struct bpf_ringbuf_hdr *hdr) +{ + return ((void *)hdr - (void *)rb) >> PAGE_SHIFT; +} + +/* Given pointer to ring buffer record header, restore pointer to struct + * bpf_ringbuf itself by using page offset stored at offset 4 + */ +static struct bpf_ringbuf * +bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr) +{ + unsigned long addr = (unsigned long)(void *)hdr; + unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT; + + return (void*)((addr & PAGE_MASK) - off); +} + +static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) +{ + unsigned long cons_pos, prod_pos, new_prod_pos, flags; + u32 len, pg_off; + struct bpf_ringbuf_hdr *hdr; + + if (unlikely(size > RINGBUF_MAX_RECORD_SZ)) + return NULL; + + len = round_up(size + BPF_RINGBUF_HDR_SZ, 8); + cons_pos = smp_load_acquire(&rb->consumer_pos); + + if (in_nmi()) { + if (!spin_trylock_irqsave(&rb->spinlock, flags)) + return NULL; + } else { + spin_lock_irqsave(&rb->spinlock, flags); + } + + prod_pos = rb->producer_pos; + new_prod_pos = prod_pos + len; + + /* check for out of ringbuf space by ensuring producer position + * doesn't advance more than (ringbuf_size - 1) ahead + */ + if (new_prod_pos - cons_pos > rb->mask) { + spin_unlock_irqrestore(&rb->spinlock, flags); + return NULL; + } + + hdr = (void *)rb->data + (prod_pos & rb->mask); + pg_off = bpf_ringbuf_rec_pg_off(rb, hdr); + hdr->len = size | BPF_RINGBUF_BUSY_BIT; + hdr->pg_off = pg_off; + + /* pairs with consumer's smp_load_acquire() */ + smp_store_release(&rb->producer_pos, new_prod_pos); + + spin_unlock_irqrestore(&rb->spinlock, flags); + + return (void *)hdr + BPF_RINGBUF_HDR_SZ; +} + +BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags) +{ + struct bpf_ringbuf_map *rb_map; + + if (unlikely(flags)) + return 0; + + rb_map = container_of(map, struct bpf_ringbuf_map, map); + return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); +} + +const struct bpf_func_proto bpf_ringbuf_reserve_proto = { + .func = bpf_ringbuf_reserve, + .ret_type = RET_PTR_TO_ALLOC_MEM_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard) +{ + unsigned long rec_pos, cons_pos; + struct bpf_ringbuf_hdr *hdr; + struct bpf_ringbuf *rb; + u32 new_len; + + hdr = sample - BPF_RINGBUF_HDR_SZ; + rb = bpf_ringbuf_restore_from_rec(hdr); + new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT; + if (discard) + new_len |= BPF_RINGBUF_DISCARD_BIT; + + /* update record header with correct final size prefix */ + xchg(&hdr->len, new_len); + + /* if consumer caught up and is waiting for our record, notify about + * new data availability + */ + rec_pos = (void *)hdr - (void *)rb->data; + cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; + + if (flags & BPF_RB_FORCE_WAKEUP) + irq_work_queue(&rb->work); + else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP)) + irq_work_queue(&rb->work); +} + +BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags) +{ + bpf_ringbuf_commit(sample, flags, false /* discard */); + return 0; +} + +const struct bpf_func_proto bpf_ringbuf_submit_proto = { + .func = bpf_ringbuf_submit, + .ret_type = RET_VOID, + .arg1_type = ARG_PTR_TO_ALLOC_MEM, + .arg2_type = ARG_ANYTHING, +}; + +BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags) +{ + bpf_ringbuf_commit(sample, flags, true /* discard */); + return 0; +} + +const struct bpf_func_proto bpf_ringbuf_discard_proto = { + .func = bpf_ringbuf_discard, + .ret_type = RET_VOID, + .arg1_type = ARG_PTR_TO_ALLOC_MEM, + .arg2_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size, + u64, flags) +{ + struct bpf_ringbuf_map *rb_map; + void *rec; + + if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP))) + return -EINVAL; + + rb_map = container_of(map, struct bpf_ringbuf_map, map); + rec = __bpf_ringbuf_reserve(rb_map->rb, size); + if (!rec) + return -EAGAIN; + + memcpy(rec, data, size); + bpf_ringbuf_commit(rec, flags, false /* discard */); + return 0; +} + +const struct bpf_func_proto bpf_ringbuf_output_proto = { + .func = bpf_ringbuf_output, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags) +{ + struct bpf_ringbuf *rb; + + rb = container_of(map, struct bpf_ringbuf_map, map)->rb; + + switch (flags) { + case BPF_RB_AVAIL_DATA: + return ringbuf_avail_data_sz(rb); + case BPF_RB_RING_SIZE: + return rb->mask + 1; + case BPF_RB_CONS_POS: + return smp_load_acquire(&rb->consumer_pos); + case BPF_RB_PROD_POS: + return smp_load_acquire(&rb->producer_pos); + default: + return 0; + } +} + +const struct bpf_func_proto bpf_ringbuf_query_proto = { + .func = bpf_ringbuf_query, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, +}; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 173e983619d7..2bd295b3673d 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -9,6 +9,7 @@ #include <linux/elf.h> #include <linux/pagemap.h> #include <linux/irq_work.h> +#include <linux/btf_ids.h> #include "percpu_freelist.h" #define STACK_CREATE_FLAG_MASK \ @@ -90,7 +91,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) u64 cost, n_buckets; int err; - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return ERR_PTR(-EPERM); if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) @@ -112,6 +113,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) /* hash table size must be power of 2 */ n_buckets = roundup_pow_of_two(attr->max_entries); + if (!n_buckets) + return ERR_PTR(-E2BIG); cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); @@ -343,6 +346,40 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } } +static struct perf_callchain_entry * +get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) +{ + struct perf_callchain_entry *entry; + int rctx; + + entry = get_callchain_entry(&rctx); + + if (!entry) + return NULL; + + entry->nr = init_nr + + stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr), + sysctl_perf_event_max_stack - init_nr, 0); + + /* stack_trace_save_tsk() works on unsigned long array, while + * perf_callchain_entry uses u64 array. For 32-bit systems, it is + * necessary to fix this mismatch. + */ + if (__BITS_PER_LONG != 64) { + unsigned long *from = (unsigned long *) entry->ip; + u64 *to = entry->ip; + int i; + + /* copy data from the end to avoid using extra buffer */ + for (i = entry->nr - 1; i >= (int)init_nr; i--) + to[i] = (u64)(from[i]); + } + + put_callchain_entry(rctx); + + return entry; +} + BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { @@ -443,8 +480,8 @@ const struct bpf_func_proto bpf_get_stackid_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, - u64, flags) +static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, + void *buf, u32 size, u64 flags) { u32 init_nr, trace_nr, copy_len, elem_size, num_elem; bool user_build_id = flags & BPF_F_USER_BUILD_ID; @@ -466,13 +503,22 @@ BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, if (unlikely(size % elem_size)) goto clear; + /* cannot get valid user stack for task without user_mode regs */ + if (task && user && !user_mode(regs)) + goto err_fault; + num_elem = size / elem_size; if (sysctl_perf_event_max_stack < num_elem) init_nr = 0; else init_nr = sysctl_perf_event_max_stack - num_elem; - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); + + if (kernel && task) + trace = get_callchain_entry_for_task(task, init_nr); + else + trace = get_perf_callchain(regs, init_nr, kernel, user, + sysctl_perf_event_max_stack, + false, false); if (unlikely(!trace)) goto err_fault; @@ -500,6 +546,12 @@ clear: return err; } +BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, + u64, flags) +{ + return __bpf_get_stack(regs, NULL, buf, size, flags); +} + const struct bpf_func_proto bpf_get_stack_proto = { .func = bpf_get_stack, .gpl_only = true, @@ -510,6 +562,28 @@ const struct bpf_func_proto bpf_get_stack_proto = { .arg4_type = ARG_ANYTHING, }; +BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, + u32, size, u64, flags) +{ + struct pt_regs *regs = task_pt_regs(task); + + return __bpf_get_stack(regs, task, buf, size, flags); +} + +BTF_ID_LIST(bpf_get_task_stack_btf_ids) +BTF_ID(struct, task_struct) + +const struct bpf_func_proto bpf_get_task_stack_proto = { + .func = bpf_get_task_stack, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + .btf_id = bpf_get_task_stack_btf_ids, +}; + /* Called from eBPF program */ static void *stack_map_lookup_elem(struct bpf_map *map, void *key) { @@ -608,6 +682,7 @@ static void stack_map_free(struct bpf_map *map) put_callchain_buffers(); } +static int stack_trace_map_btf_id; const struct bpf_map_ops stack_trace_map_ops = { .map_alloc = stack_map_alloc, .map_free = stack_map_free, @@ -616,6 +691,8 @@ const struct bpf_map_ops stack_trace_map_ops = { .map_update_elem = stack_map_update_elem, .map_delete_elem = stack_map_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_stack_map", + .map_btf_id = &stack_trace_map_btf_id, }; static int __init stack_map_init(void) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 14f4a76b44d5..9254bcf0aa9c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -23,13 +23,18 @@ #include <linux/timekeeping.h> #include <linux/ctype.h> #include <linux/nospec.h> +#include <linux/audit.h> +#include <uapi/linux/btf.h> +#include <linux/bpf-netns.h> +#include <linux/poll.h> -#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ - (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ - (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ - (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) +#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ + (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ + (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) +#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) -#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) +#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ + IS_FD_HASH(map)) #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) @@ -38,16 +43,20 @@ static DEFINE_IDR(prog_idr); static DEFINE_SPINLOCK(prog_idr_lock); static DEFINE_IDR(map_idr); static DEFINE_SPINLOCK(map_idr_lock); +static DEFINE_IDR(link_idr); +static DEFINE_SPINLOCK(link_idr_lock); int sysctl_unprivileged_bpf_disabled __read_mostly; static const struct bpf_map_ops * const bpf_map_types[] = { -#define BPF_PROG_TYPE(_id, _ops) +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) #define BPF_MAP_TYPE(_id, _ops) \ [_id] = &_ops, +#define BPF_LINK_TYPE(_id, _name) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE }; /* @@ -63,32 +72,19 @@ int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, size_t actual_size) { - unsigned char __user *addr; - unsigned char __user *end; - unsigned char val; - int err; + unsigned char __user *addr = uaddr + expected_size; + int res; if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ return -E2BIG; - if (unlikely(!access_ok(uaddr, actual_size))) - return -EFAULT; - if (actual_size <= expected_size) return 0; - addr = uaddr + expected_size; - end = uaddr + actual_size; - - for (; addr < end; addr++) { - err = get_user(val, addr); - if (err) - return err; - if (val) - return -E2BIG; - } - - return 0; + res = check_zeroed_user(addr, actual_size - expected_size); + if (res < 0) + return res; + return res ? 0 : -E2BIG; } const struct bpf_map_ops bpf_map_offload_ops = { @@ -126,7 +122,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) return map; } -void *bpf_map_area_alloc(u64 size, int numa_node) +static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) { /* We really just want to fail instead of triggering OOM killer * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, @@ -144,18 +140,33 @@ void *bpf_map_area_alloc(u64 size, int numa_node) if (size >= SIZE_MAX) return NULL; - if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { + /* kmalloc()'ed memory can't be mmap()'ed */ + if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, numa_node); if (area != NULL) return area; } - + if (mmapable) { + BUG_ON(!PAGE_ALIGNED(size)); + return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL | + __GFP_RETRY_MAYFAIL | flags); + } return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | __GFP_RETRY_MAYFAIL | flags, __builtin_return_address(0)); } +void *bpf_map_area_alloc(u64 size, int numa_node) +{ + return __bpf_map_area_alloc(size, numa_node, false); +} + +void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) +{ + return __bpf_map_area_alloc(size, numa_node, true); +} + void bpf_map_area_free(void *area) { kvfree(area); @@ -313,7 +324,7 @@ static void bpf_map_free_deferred(struct work_struct *work) static void bpf_map_put_uref(struct bpf_map *map) { - if (atomic_dec_and_test(&map->usercnt)) { + if (atomic64_dec_and_test(&map->usercnt)) { if (map->ops->map_release_uref) map->ops->map_release_uref(map); } @@ -324,7 +335,7 @@ static void bpf_map_put_uref(struct bpf_map *map) */ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) { - if (atomic_dec_and_test(&map->refcnt)) { + if (atomic64_dec_and_test(&map->refcnt)) { /* bpf_map_free_id() must be called first */ bpf_map_free_id(map, do_idr_lock); btf_put(map->btf); @@ -373,13 +384,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) { const struct bpf_map *map = filp->private_data; const struct bpf_array *array; - u32 owner_prog_type = 0; - u32 owner_jited = 0; + u32 type = 0, jited = 0; if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { array = container_of(map, struct bpf_array, map); - owner_prog_type = array->owner_prog_type; - owner_jited = array->owner_jited; + type = array->aux->type; + jited = array->aux->jited; } seq_printf(m, @@ -399,12 +409,9 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) map->memory.pages * 1ULL << PAGE_SHIFT, map->id, READ_ONCE(map->frozen)); - - if (owner_prog_type) { - seq_printf(m, "owner_prog_type:\t%u\n", - owner_prog_type); - seq_printf(m, "owner_jited:\t%u\n", - owner_jited); + if (type) { + seq_printf(m, "owner_prog_type:\t%u\n", type); + seq_printf(m, "owner_jited:\t%u\n", jited); } } #endif @@ -427,6 +434,94 @@ static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, return -EINVAL; } +/* called for any extra memory-mapped regions (except initial) */ +static void bpf_map_mmap_open(struct vm_area_struct *vma) +{ + struct bpf_map *map = vma->vm_file->private_data; + + if (vma->vm_flags & VM_MAYWRITE) { + mutex_lock(&map->freeze_mutex); + map->writecnt++; + mutex_unlock(&map->freeze_mutex); + } +} + +/* called for all unmapped memory region (including initial) */ +static void bpf_map_mmap_close(struct vm_area_struct *vma) +{ + struct bpf_map *map = vma->vm_file->private_data; + + if (vma->vm_flags & VM_MAYWRITE) { + mutex_lock(&map->freeze_mutex); + map->writecnt--; + mutex_unlock(&map->freeze_mutex); + } +} + +static const struct vm_operations_struct bpf_map_default_vmops = { + .open = bpf_map_mmap_open, + .close = bpf_map_mmap_close, +}; + +static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct bpf_map *map = filp->private_data; + int err; + + if (!map->ops->map_mmap || map_value_has_spin_lock(map) || + map_value_has_timer(map)) + return -ENOTSUPP; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + mutex_lock(&map->freeze_mutex); + + if (vma->vm_flags & VM_WRITE) { + if (map->frozen) { + err = -EPERM; + goto out; + } + /* map is meant to be read-only, so do not allow mapping as + * writable, because it's possible to leak a writable page + * reference and allows user-space to still modify it after + * freezing, while verifier will assume contents do not change + */ + if (map->map_flags & BPF_F_RDONLY_PROG) { + err = -EACCES; + goto out; + } + } + + /* set default open/close callbacks */ + vma->vm_ops = &bpf_map_default_vmops; + vma->vm_private_data = map; + vma->vm_flags &= ~VM_MAYEXEC; + if (!(vma->vm_flags & VM_WRITE)) + /* disallow re-mapping with PROT_WRITE */ + vma->vm_flags &= ~VM_MAYWRITE; + + err = map->ops->map_mmap(map, vma); + if (err) + goto out; + + if (vma->vm_flags & VM_MAYWRITE) + map->writecnt++; +out: + mutex_unlock(&map->freeze_mutex); + return err; +} + +static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) +{ + struct bpf_map *map = filp->private_data; + + if (map->ops->map_poll) + return map->ops->map_poll(map, filp, pts); + + return EPOLLERR; +} + const struct file_operations bpf_map_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = bpf_map_show_fdinfo, @@ -434,6 +529,8 @@ const struct file_operations bpf_map_fops = { .release = bpf_map_release, .read = bpf_dummy_read, .write = bpf_dummy_write, + .mmap = bpf_map_mmap, + .poll = bpf_map_poll, }; int bpf_map_new_fd(struct bpf_map *map, int flags) @@ -528,7 +625,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_ARRAY && map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && - map->map_type != BPF_MAP_TYPE_SK_STORAGE) + map->map_type != BPF_MAP_TYPE_SK_STORAGE && + map->map_type != BPF_MAP_TYPE_INODE_STORAGE) return -ENOTSUPP; if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > map->value_size) { @@ -539,13 +637,23 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, } } + map->timer_off = btf_find_timer(btf, value_type); + if (map_value_has_timer(map)) { + if (map->map_flags & BPF_F_RDONLY_PROG) + return -EACCES; + if (map->map_type != BPF_MAP_TYPE_HASH && + map->map_type != BPF_MAP_TYPE_LRU_HASH && + map->map_type != BPF_MAP_TYPE_ARRAY) + return -EOPNOTSUPP; + } + if (map->ops->map_check_btf) ret = map->ops->map_check_btf(map, btf, key_type, value_type); return ret; } -#define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id +#define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id /* called via syscall */ static int map_create(union bpf_attr *attr) { @@ -559,6 +667,14 @@ static int map_create(union bpf_attr *attr) if (err) return -EINVAL; + if (attr->btf_vmlinux_value_type_id) { + if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || + attr->btf_key_type_id || attr->btf_value_type_id) + return -EINVAL; + } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { + return -EINVAL; + } + f_flags = bpf_get_file_flag(attr->map_flags); if (f_flags < 0) return f_flags; @@ -577,35 +693,40 @@ static int map_create(union bpf_attr *attr) if (err) goto free_map; - atomic_set(&map->refcnt, 1); - atomic_set(&map->usercnt, 1); + atomic64_set(&map->refcnt, 1); + atomic64_set(&map->usercnt, 1); + mutex_init(&map->freeze_mutex); - if (attr->btf_key_type_id || attr->btf_value_type_id) { + map->spin_lock_off = -EINVAL; + map->timer_off = -EINVAL; + if (attr->btf_key_type_id || attr->btf_value_type_id || + /* Even the map's value is a kernel's struct, + * the bpf_prog.o must have BTF to begin with + * to figure out the corresponding kernel's + * counter part. Thus, attr->btf_fd has + * to be valid also. + */ + attr->btf_vmlinux_value_type_id) { struct btf *btf; - if (!attr->btf_value_type_id) { - err = -EINVAL; - goto free_map; - } - btf = btf_get_by_fd(attr->btf_fd); if (IS_ERR(btf)) { err = PTR_ERR(btf); goto free_map; } - - err = map_check_btf(map, btf, attr->btf_key_type_id, - attr->btf_value_type_id); - if (err) { - btf_put(btf); - goto free_map; - } - map->btf = btf; + + if (attr->btf_value_type_id) { + err = map_check_btf(map, btf, attr->btf_key_type_id, + attr->btf_value_type_id); + if (err) + goto free_map; + } map->btf_key_type_id = attr->btf_key_type_id; map->btf_value_type_id = attr->btf_value_type_id; } else { - map->spin_lock_off = -EINVAL; + map->btf_vmlinux_value_type_id = + attr->btf_vmlinux_value_type_id; } err = security_bpf_map_alloc(map); @@ -655,21 +776,34 @@ struct bpf_map *__bpf_map_get(struct fd f) return f.file->private_data; } -/* prog's and map's refcnt limit */ -#define BPF_MAX_REFCNT 32768 - -struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) +void bpf_map_inc(struct bpf_map *map) { - if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { - atomic_dec(&map->refcnt); - return ERR_PTR(-EBUSY); - } - if (uref) - atomic_inc(&map->usercnt); - return map; + atomic64_inc(&map->refcnt); } EXPORT_SYMBOL_GPL(bpf_map_inc); +void bpf_map_inc_with_uref(struct bpf_map *map) +{ + atomic64_inc(&map->refcnt); + atomic64_inc(&map->usercnt); +} +EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); + +struct bpf_map *bpf_map_get(u32 ufd) +{ + struct fd f = fdget(ufd); + struct bpf_map *map; + + map = __bpf_map_get(f); + if (IS_ERR(map)) + return map; + + bpf_map_inc(map); + fdput(f); + + return map; +} + struct bpf_map *bpf_map_get_with_uref(u32 ufd) { struct fd f = fdget(ufd); @@ -679,38 +813,30 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd) if (IS_ERR(map)) return map; - map = bpf_map_inc(map, true); + bpf_map_inc_with_uref(map); fdput(f); return map; } /* map_idr_lock should have been held */ -static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, - bool uref) +static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) { int refold; - refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); - - if (refold >= BPF_MAX_REFCNT) { - __bpf_map_put(map, false); - return ERR_PTR(-EBUSY); - } - + refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); if (!refold) return ERR_PTR(-ENOENT); - if (uref) - atomic_inc(&map->usercnt); + atomic64_inc(&map->usercnt); return map; } -struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref) +struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) { spin_lock_bh(&map_idr_lock); - map = __bpf_map_inc_not_zero(map, uref); + map = __bpf_map_inc_not_zero(map, false); spin_unlock_bh(&map_idr_lock); return map; @@ -805,7 +931,7 @@ static int map_lookup_elem(union bpf_attr *attr) err = bpf_percpu_cgroup_storage_copy(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { err = bpf_stackmap_copy(map, key, value); - } else if (IS_FD_ARRAY(map)) { + } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { err = bpf_fd_array_map_lookup_elem(map, key, value); } else if (IS_FD_HASH(map)) { err = bpf_fd_htab_map_lookup_elem(map, key, value); @@ -814,6 +940,9 @@ static int map_lookup_elem(union bpf_attr *attr) } else if (map->map_type == BPF_MAP_TYPE_QUEUE || map->map_type == BPF_MAP_TYPE_STACK) { err = map->ops->map_peek_elem(map, value); + } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { + /* struct_ops map requires directly updating "value" */ + err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); } else { rcu_read_lock(); if (map->ops->map_lookup_elem_sys_only) @@ -831,8 +960,8 @@ static int map_lookup_elem(union bpf_attr *attr) copy_map_value_locked(map, value, ptr, true); else copy_map_value(map, value, ptr); - /* mask lock, since value wasn't zero inited */ - check_and_init_map_lock(map, value); + /* mask lock and timer, since value wasn't zero inited */ + check_and_init_map_value(map, value); } rcu_read_unlock(); } @@ -929,9 +1058,14 @@ static int map_update_elem(union bpf_attr *attr) goto out; } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || map->map_type == BPF_MAP_TYPE_SOCKHASH || - map->map_type == BPF_MAP_TYPE_SOCKMAP) { + map->map_type == BPF_MAP_TYPE_SOCKMAP || + map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { err = map->ops->map_update_elem(map, key, value, attr->flags); goto out; + } else if (IS_FD_PROG_ARRAY(map)) { + err = bpf_fd_array_map_update_elem(map, f.file, key, value, + attr->flags); + goto out; } /* must increment bpf_prog_active to avoid kprobe+bpf triggering from @@ -1014,6 +1148,11 @@ static int map_delete_elem(union bpf_attr *attr) if (bpf_map_is_dev_bound(map)) { err = bpf_map_offload_delete_elem(map, key); goto out; + } else if (IS_FD_PROG_ARRAY(map) || + map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { + /* These maps require sleepable context */ + err = map->ops->map_delete_elem(map, key); + goto out; } preempt_disable(); @@ -1118,7 +1257,8 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { + if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || + !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { err = -EPERM; goto err_put; } @@ -1146,8 +1286,10 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) if (err) goto free_value; - if (copy_to_user(uvalue, value, value_size) != 0) + if (copy_to_user(uvalue, value, value_size) != 0) { + err = -EFAULT; goto free_value; + } err = 0; @@ -1175,28 +1317,44 @@ static int map_freeze(const union bpf_attr *attr) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); + + if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || + map_value_has_timer(map)) { + fdput(f); + return -ENOTSUPP; + } + + mutex_lock(&map->freeze_mutex); + + if (map->writecnt) { + err = -EBUSY; + goto err_put; + } if (READ_ONCE(map->frozen)) { err = -EBUSY; goto err_put; } - if (!capable(CAP_SYS_ADMIN)) { + if (!bpf_capable()) { err = -EPERM; goto err_put; } WRITE_ONCE(map->frozen, true); err_put: + mutex_unlock(&map->freeze_mutex); fdput(f); return err; } static const struct bpf_prog_ops * const bpf_prog_types[] = { -#define BPF_PROG_TYPE(_id, _name) \ +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ [_id] = & _name ## _prog_ops, #define BPF_MAP_TYPE(_id, _ops) +#define BPF_LINK_TYPE(_id, _name) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE }; static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) @@ -1218,23 +1376,34 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) return 0; } -/* drop refcnt on maps used by eBPF program and free auxilary data */ -static void free_used_maps(struct bpf_prog_aux *aux) +enum bpf_audit { + BPF_AUDIT_LOAD, + BPF_AUDIT_UNLOAD, + BPF_AUDIT_MAX, +}; + +static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { + [BPF_AUDIT_LOAD] = "LOAD", + [BPF_AUDIT_UNLOAD] = "UNLOAD", +}; + +static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) { - enum bpf_cgroup_storage_type stype; - int i; + struct audit_context *ctx = NULL; + struct audit_buffer *ab; - for_each_cgroup_storage_type(stype) { - if (!aux->cgroup_storage[stype]) - continue; - bpf_cgroup_storage_release(aux->prog, - aux->cgroup_storage[stype]); - } - - for (i = 0; i < aux->used_map_cnt; i++) - bpf_map_put(aux->used_maps[i]); - - kfree(aux->used_maps); + if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) + return; + if (audit_enabled == AUDIT_OFF) + return; + if (op == BPF_AUDIT_LOAD) + ctx = audit_context(); + ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); + if (unlikely(!ab)) + return; + audit_log_format(ab, "prog-id=%u op=%s", + prog->aux->id, bpf_audit_str[op]); + audit_log_end(ab); } int __bpf_prog_charge(struct user_struct *user, u32 pages) @@ -1303,6 +1472,8 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog) void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) { + unsigned long flags; + /* cBPF to eBPF migrations are currently not in the idr store. * Offloaded programs are removed from the store when their device * disappears - even if someone grabs an fd to them they are unusable, @@ -1312,7 +1483,7 @@ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) return; if (do_idr_lock) - spin_lock_bh(&prog_idr_lock); + spin_lock_irqsave(&prog_idr_lock, flags); else __acquire(&prog_idr_lock); @@ -1320,7 +1491,7 @@ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) prog->aux->id = 0; if (do_idr_lock) - spin_unlock_bh(&prog_idr_lock); + spin_unlock_irqrestore(&prog_idr_lock, flags); else __release(&prog_idr_lock); } @@ -1330,7 +1501,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); kvfree(aux->func_info); - free_used_maps(aux); + kfree(aux->func_info_aux); bpf_prog_uncharge_memlock(aux->prog); security_bpf_prog_free(aux); bpf_prog_free(aux->prog); @@ -1348,13 +1519,32 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) __bpf_prog_put_rcu(&prog->aux->rcu); } +static void bpf_prog_put_deferred(struct work_struct *work) +{ + struct bpf_prog_aux *aux; + struct bpf_prog *prog; + + aux = container_of(work, struct bpf_prog_aux, work); + prog = aux->prog; + perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); + bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); + __bpf_prog_put_noref(prog, true); +} + static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) { - if (atomic_dec_and_test(&prog->aux->refcnt)) { - perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); + struct bpf_prog_aux *aux = prog->aux; + + if (atomic64_dec_and_test(&aux->refcnt)) { /* bpf_prog_free_id() must be called first */ bpf_prog_free_id(prog, do_idr_lock); - __bpf_prog_put_noref(prog, true); + + if (in_irq() || irqs_disabled()) { + INIT_WORK(&aux->work, bpf_prog_put_deferred); + schedule_work(&aux->work); + } else { + bpf_prog_put_deferred(&aux->work); + } } } @@ -1456,13 +1646,9 @@ static struct bpf_prog *____bpf_prog_get(struct fd f) return f.file->private_data; } -struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) +void bpf_prog_add(struct bpf_prog *prog, int i) { - if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { - atomic_sub(i, &prog->aux->refcnt); - return ERR_PTR(-EBUSY); - } - return prog; + atomic64_add(i, &prog->aux->refcnt); } EXPORT_SYMBOL_GPL(bpf_prog_add); @@ -1473,13 +1659,13 @@ void bpf_prog_sub(struct bpf_prog *prog, int i) * path holds a reference to the program, thus atomic_sub() can * be safely used in such cases! */ - WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); + WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); } EXPORT_SYMBOL_GPL(bpf_prog_sub); -struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) +void bpf_prog_inc(struct bpf_prog *prog) { - return bpf_prog_add(prog, 1); + atomic64_inc(&prog->aux->refcnt); } EXPORT_SYMBOL_GPL(bpf_prog_inc); @@ -1488,12 +1674,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) { int refold; - refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0); - - if (refold >= BPF_MAX_REFCNT) { - __bpf_prog_put(prog, false); - return ERR_PTR(-EBUSY); - } + refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); if (!refold) return ERR_PTR(-ENOENT); @@ -1531,7 +1712,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, goto out; } - prog = bpf_prog_inc(prog); + bpf_prog_inc(prog); out: fdput(f); return prog; @@ -1576,9 +1757,29 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) } static int -bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, - enum bpf_attach_type expected_attach_type) +bpf_prog_load_check_attach(enum bpf_prog_type prog_type, + enum bpf_attach_type expected_attach_type, + u32 btf_id, u32 prog_fd) { + if (btf_id) { + if (btf_id > BTF_MAX_TYPE) + return -EINVAL; + + switch (prog_type) { + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_STRUCT_OPS: + case BPF_PROG_TYPE_EXT: + break; + default: + return -EINVAL; + } + } + + if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING && + prog_type != BPF_PROG_TYPE_EXT) + return -EINVAL; + switch (prog_type) { case BPF_PROG_TYPE_CGROUP_SOCK: switch (expected_attach_type) { @@ -1595,6 +1796,10 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET4_CONNECT: case BPF_CGROUP_INET6_CONNECT: + case BPF_CGROUP_INET4_GETPEERNAME: + case BPF_CGROUP_INET6_GETPEERNAME: + case BPF_CGROUP_INET4_GETSOCKNAME: + case BPF_CGROUP_INET6_GETSOCKNAME: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: case BPF_CGROUP_UDP4_RECVMSG: @@ -1619,13 +1824,70 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, default: return -EINVAL; } + case BPF_PROG_TYPE_SK_LOOKUP: + if (expected_attach_type == BPF_SK_LOOKUP) + return 0; + return -EINVAL; + case BPF_PROG_TYPE_EXT: + if (expected_attach_type) + return -EINVAL; + /* fallthrough */ default: return 0; } } +static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) +{ + switch (prog_type) { + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + case BPF_PROG_TYPE_XDP: + case BPF_PROG_TYPE_LWT_IN: + case BPF_PROG_TYPE_LWT_OUT: + case BPF_PROG_TYPE_LWT_XMIT: + case BPF_PROG_TYPE_LWT_SEG6LOCAL: + case BPF_PROG_TYPE_SK_SKB: + case BPF_PROG_TYPE_SK_MSG: + case BPF_PROG_TYPE_LIRC_MODE2: + case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_CGROUP_DEVICE: + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_CGROUP_SYSCTL: + case BPF_PROG_TYPE_SOCK_OPS: + case BPF_PROG_TYPE_EXT: /* extends any prog */ + return true; + case BPF_PROG_TYPE_CGROUP_SKB: + /* always unpriv */ + case BPF_PROG_TYPE_SK_REUSEPORT: + /* equivalent to SOCKET_FILTER. need CAP_BPF only */ + default: + return false; + } +} + +static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) +{ + switch (prog_type) { + case BPF_PROG_TYPE_KPROBE: + case BPF_PROG_TYPE_TRACEPOINT: + case BPF_PROG_TYPE_PERF_EVENT: + case BPF_PROG_TYPE_RAW_TRACEPOINT: + case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ + case BPF_PROG_TYPE_EXT: /* extends any prog */ + return true; + default: + return false; + } +} + /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD line_info_cnt +#define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -1646,7 +1908,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && - !capable(CAP_SYS_ADMIN)) + !bpf_capable()) return -EPERM; /* copy eBPF program license from user space */ @@ -1659,15 +1921,22 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) is_gpl = license_is_gpl_compatible(license); if (attr->insn_cnt == 0 || - attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) + attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) return -E2BIG; if (type != BPF_PROG_TYPE_SOCKET_FILTER && type != BPF_PROG_TYPE_CGROUP_SKB && - !capable(CAP_SYS_ADMIN)) + !bpf_capable()) + return -EPERM; + + if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN)) + return -EPERM; + if (is_perfmon_prog_type(type) && !perfmon_capable()) return -EPERM; bpf_prog_load_fixup_attach_type(attr); - if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) + if (bpf_prog_load_check_attach(type, attr->expected_attach_type, + attr->attach_btf_id, + attr->attach_prog_fd)) return -EINVAL; /* plain bpf_prog allocation */ @@ -1676,6 +1945,17 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) return -ENOMEM; prog->expected_attach_type = attr->expected_attach_type; + prog->aux->attach_btf_id = attr->attach_btf_id; + if (attr->attach_prog_fd) { + struct bpf_prog *tgt_prog; + + tgt_prog = bpf_prog_get(attr->attach_prog_fd); + if (IS_ERR(tgt_prog)) { + err = PTR_ERR(tgt_prog); + goto free_prog_nouncharge; + } + prog->aux->linked_prog = tgt_prog; + } prog->aux->offload_requested = !!attr->prog_ifindex; @@ -1697,7 +1977,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) prog->orig_prog = NULL; prog->jited = 0; - atomic_set(&prog->aux->refcnt, 1); + atomic64_set(&prog->aux->refcnt, 1); prog->gpl_compatible = is_gpl ? 1 : 0; if (bpf_prog_is_dev_bound(prog->aux)) { @@ -1745,6 +2025,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) */ bpf_prog_kallsyms_add(prog); perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); + bpf_audit_prog(prog, BPF_AUDIT_LOAD); err = bpf_prog_new_fd(prog); if (err < 0) @@ -1787,87 +2068,488 @@ static int bpf_obj_get(const union bpf_attr *attr) attr->file_flags); } -struct bpf_raw_tracepoint { - struct bpf_raw_event_map *btp; - struct bpf_prog *prog; -}; - -static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) +void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog) { - struct bpf_raw_tracepoint *raw_tp = filp->private_data; + atomic64_set(&link->refcnt, 1); + link->type = type; + link->id = 0; + link->ops = ops; + link->prog = prog; +} - if (raw_tp->prog) { - bpf_probe_unregister(raw_tp->btp, raw_tp->prog); - bpf_prog_put(raw_tp->prog); +static void bpf_link_free_id(int id) +{ + if (!id) + return; + + spin_lock_bh(&link_idr_lock); + idr_remove(&link_idr, id); + spin_unlock_bh(&link_idr_lock); +} + +/* Clean up bpf_link and corresponding anon_inode file and FD. After + * anon_inode is created, bpf_link can't be just kfree()'d due to deferred + * anon_inode's release() call. This helper marksbpf_link as + * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt + * is not decremented, it's the responsibility of a calling code that failed + * to complete bpf_link initialization. + */ +void bpf_link_cleanup(struct bpf_link_primer *primer) +{ + primer->link->prog = NULL; + bpf_link_free_id(primer->id); + fput(primer->file); + put_unused_fd(primer->fd); +} + +void bpf_link_inc(struct bpf_link *link) +{ + atomic64_inc(&link->refcnt); +} + +/* bpf_link_free is guaranteed to be called from process context */ +static void bpf_link_free(struct bpf_link *link) +{ + bpf_link_free_id(link->id); + if (link->prog) { + /* detach BPF program, clean up used resources */ + link->ops->release(link); + bpf_prog_put(link->prog); } - bpf_put_raw_tracepoint(raw_tp->btp); - kfree(raw_tp); + /* free bpf_link and its containing memory */ + link->ops->dealloc(link); +} + +static void bpf_link_put_deferred(struct work_struct *work) +{ + struct bpf_link *link = container_of(work, struct bpf_link, work); + + bpf_link_free(link); +} + +/* bpf_link_put can be called from atomic context, but ensures that resources + * are freed from process context + */ +void bpf_link_put(struct bpf_link *link) +{ + if (!atomic64_dec_and_test(&link->refcnt)) + return; + + if (in_atomic()) { + INIT_WORK(&link->work, bpf_link_put_deferred); + schedule_work(&link->work); + } else { + bpf_link_free(link); + } +} + +static int bpf_link_release(struct inode *inode, struct file *filp) +{ + struct bpf_link *link = filp->private_data; + + bpf_link_put(link); return 0; } -static const struct file_operations bpf_raw_tp_fops = { - .release = bpf_raw_tracepoint_release, +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) +#define BPF_MAP_TYPE(_id, _ops) +#define BPF_LINK_TYPE(_id, _name) [_id] = #_name, +static const char *bpf_link_type_strs[] = { + [BPF_LINK_TYPE_UNSPEC] = "<invalid>", +#include <linux/bpf_types.h> +}; +#undef BPF_PROG_TYPE +#undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE + +#ifdef CONFIG_PROC_FS +static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) +{ + const struct bpf_link *link = filp->private_data; + const struct bpf_prog *prog = link->prog; + char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; + + bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); + seq_printf(m, + "link_type:\t%s\n" + "link_id:\t%u\n" + "prog_tag:\t%s\n" + "prog_id:\t%u\n", + bpf_link_type_strs[link->type], + link->id, + prog_tag, + prog->aux->id); + if (link->ops->show_fdinfo) + link->ops->show_fdinfo(link, m); +} +#endif + +const struct file_operations bpf_link_fops = { +#ifdef CONFIG_PROC_FS + .show_fdinfo = bpf_link_show_fdinfo, +#endif + .release = bpf_link_release, .read = bpf_dummy_read, .write = bpf_dummy_write, }; +static int bpf_link_alloc_id(struct bpf_link *link) +{ + int id; + + idr_preload(GFP_KERNEL); + spin_lock_bh(&link_idr_lock); + id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); + spin_unlock_bh(&link_idr_lock); + idr_preload_end(); + + return id; +} + +/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, + * reserving unused FD and allocating ID from link_idr. This is to be paired + * with bpf_link_settle() to install FD and ID and expose bpf_link to + * user-space, if bpf_link is successfully attached. If not, bpf_link and + * pre-allocated resources are to be freed with bpf_cleanup() call. All the + * transient state is passed around in struct bpf_link_primer. + * This is preferred way to create and initialize bpf_link, especially when + * there are complicated and expensive operations inbetween creating bpf_link + * itself and attaching it to BPF hook. By using bpf_link_prime() and + * bpf_link_settle() kernel code using bpf_link doesn't have to perform + * expensive (and potentially failing) roll back operations in a rare case + * that file, FD, or ID can't be allocated. + */ +int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) +{ + struct file *file; + int fd, id; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + return fd; + + file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); + if (IS_ERR(file)) { + put_unused_fd(fd); + return PTR_ERR(file); + } + + id = bpf_link_alloc_id(link); + if (id < 0) { + put_unused_fd(fd); + fput(file); + return id; + } + + primer->link = link; + primer->file = file; + primer->fd = fd; + primer->id = id; + return 0; +} + +int bpf_link_settle(struct bpf_link_primer *primer) +{ + /* make bpf_link fetchable by ID */ + spin_lock_bh(&link_idr_lock); + primer->link->id = primer->id; + spin_unlock_bh(&link_idr_lock); + /* make bpf_link fetchable by FD */ + fd_install(primer->fd, primer->file); + /* pass through installed FD */ + return primer->fd; +} + +int bpf_link_new_fd(struct bpf_link *link) +{ + return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); +} + +struct bpf_link *bpf_link_get_from_fd(u32 ufd) +{ + struct fd f = fdget(ufd); + struct bpf_link *link; + + if (!f.file) + return ERR_PTR(-EBADF); + if (f.file->f_op != &bpf_link_fops) { + fdput(f); + return ERR_PTR(-EINVAL); + } + + link = f.file->private_data; + bpf_link_inc(link); + fdput(f); + + return link; +} + +struct bpf_tracing_link { + struct bpf_link link; + enum bpf_attach_type attach_type; +}; + +static void bpf_tracing_link_release(struct bpf_link *link) +{ + WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog)); +} + +static void bpf_tracing_link_dealloc(struct bpf_link *link) +{ + struct bpf_tracing_link *tr_link = + container_of(link, struct bpf_tracing_link, link); + + kfree(tr_link); +} + +static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_tracing_link *tr_link = + container_of(link, struct bpf_tracing_link, link); + + seq_printf(seq, + "attach_type:\t%d\n", + tr_link->attach_type); +} + +static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + struct bpf_tracing_link *tr_link = + container_of(link, struct bpf_tracing_link, link); + + info->tracing.attach_type = tr_link->attach_type; + + return 0; +} + +static const struct bpf_link_ops bpf_tracing_link_lops = { + .release = bpf_tracing_link_release, + .dealloc = bpf_tracing_link_dealloc, + .show_fdinfo = bpf_tracing_link_show_fdinfo, + .fill_link_info = bpf_tracing_link_fill_link_info, +}; + +static int bpf_tracing_prog_attach(struct bpf_prog *prog) +{ + struct bpf_link_primer link_primer; + struct bpf_tracing_link *link; + int err; + + switch (prog->type) { + case BPF_PROG_TYPE_TRACING: + if (prog->expected_attach_type != BPF_TRACE_FENTRY && + prog->expected_attach_type != BPF_TRACE_FEXIT && + prog->expected_attach_type != BPF_MODIFY_RETURN) { + err = -EINVAL; + goto out_put_prog; + } + break; + case BPF_PROG_TYPE_EXT: + if (prog->expected_attach_type != 0) { + err = -EINVAL; + goto out_put_prog; + } + break; + case BPF_PROG_TYPE_LSM: + if (prog->expected_attach_type != BPF_LSM_MAC) { + err = -EINVAL; + goto out_put_prog; + } + break; + default: + err = -EINVAL; + goto out_put_prog; + } + + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + err = -ENOMEM; + goto out_put_prog; + } + bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING, + &bpf_tracing_link_lops, prog); + link->attach_type = prog->expected_attach_type; + + err = bpf_link_prime(&link->link, &link_primer); + if (err) { + kfree(link); + goto out_put_prog; + } + + err = bpf_trampoline_link_prog(prog); + if (err) { + bpf_link_cleanup(&link_primer); + goto out_put_prog; + } + + return bpf_link_settle(&link_primer); +out_put_prog: + bpf_prog_put(prog); + return err; +} + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; +}; + +static void bpf_raw_tp_link_release(struct bpf_link *link) +{ + struct bpf_raw_tp_link *raw_tp = + container_of(link, struct bpf_raw_tp_link, link); + + bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); + bpf_put_raw_tracepoint(raw_tp->btp); +} + +static void bpf_raw_tp_link_dealloc(struct bpf_link *link) +{ + struct bpf_raw_tp_link *raw_tp = + container_of(link, struct bpf_raw_tp_link, link); + + kfree(raw_tp); +} + +static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_raw_tp_link *raw_tp_link = + container_of(link, struct bpf_raw_tp_link, link); + + seq_printf(seq, + "tp_name:\t%s\n", + raw_tp_link->btp->tp->name); +} + +static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + struct bpf_raw_tp_link *raw_tp_link = + container_of(link, struct bpf_raw_tp_link, link); + char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); + const char *tp_name = raw_tp_link->btp->tp->name; + u32 ulen = info->raw_tracepoint.tp_name_len; + size_t tp_len = strlen(tp_name); + + if (ulen && !ubuf) + return -EINVAL; + + info->raw_tracepoint.tp_name_len = tp_len + 1; + + if (!ubuf) + return 0; + + if (ulen >= tp_len + 1) { + if (copy_to_user(ubuf, tp_name, tp_len + 1)) + return -EFAULT; + } else { + char zero = '\0'; + + if (copy_to_user(ubuf, tp_name, ulen - 1)) + return -EFAULT; + if (put_user(zero, ubuf + ulen - 1)) + return -EFAULT; + return -ENOSPC; + } + + return 0; +} + +static const struct bpf_link_ops bpf_raw_tp_link_lops = { + .release = bpf_raw_tp_link_release, + .dealloc = bpf_raw_tp_link_dealloc, + .show_fdinfo = bpf_raw_tp_link_show_fdinfo, + .fill_link_info = bpf_raw_tp_link_fill_link_info, +}; + #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd static int bpf_raw_tracepoint_open(const union bpf_attr *attr) { - struct bpf_raw_tracepoint *raw_tp; + struct bpf_link_primer link_primer; + struct bpf_raw_tp_link *link; struct bpf_raw_event_map *btp; struct bpf_prog *prog; - char tp_name[128]; - int tp_fd, err; + const char *tp_name; + char buf[128]; + int err; - if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), - sizeof(tp_name) - 1) < 0) - return -EFAULT; - tp_name[sizeof(tp_name) - 1] = 0; - - btp = bpf_get_raw_tracepoint(tp_name); - if (!btp) - return -ENOENT; - - raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); - if (!raw_tp) { - err = -ENOMEM; - goto out_put_btp; - } - raw_tp->btp = btp; + if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) + return -EINVAL; prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); - if (IS_ERR(prog)) { - err = PTR_ERR(prog); - goto out_free_tp; - } - if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT && - prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) { + if (IS_ERR(prog)) + return PTR_ERR(prog); + + switch (prog->type) { + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_LSM: + if (attr->raw_tracepoint.name) { + /* The attach point for this category of programs + * should be specified via btf_id during program load. + */ + err = -EINVAL; + goto out_put_prog; + } + if (prog->type == BPF_PROG_TYPE_TRACING && + prog->expected_attach_type == BPF_TRACE_RAW_TP) { + tp_name = prog->aux->attach_func_name; + break; + } + return bpf_tracing_prog_attach(prog); + case BPF_PROG_TYPE_RAW_TRACEPOINT: + case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: + if (strncpy_from_user(buf, + u64_to_user_ptr(attr->raw_tracepoint.name), + sizeof(buf) - 1) < 0) { + err = -EFAULT; + goto out_put_prog; + } + buf[sizeof(buf) - 1] = 0; + tp_name = buf; + break; + default: err = -EINVAL; goto out_put_prog; } - err = bpf_probe_register(raw_tp->btp, prog); - if (err) - goto out_put_prog; - - raw_tp->prog = prog; - tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, - O_CLOEXEC); - if (tp_fd < 0) { - bpf_probe_unregister(raw_tp->btp, prog); - err = tp_fd; + btp = bpf_get_raw_tracepoint(tp_name); + if (!btp) { + err = -ENOENT; goto out_put_prog; } - return tp_fd; -out_put_prog: - bpf_prog_put(prog); -out_free_tp: - kfree(raw_tp); + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + err = -ENOMEM; + goto out_put_btp; + } + bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, + &bpf_raw_tp_link_lops, prog); + link->btp = btp; + + err = bpf_link_prime(&link->link, &link_primer); + if (err) { + kfree(link); + goto out_put_btp; + } + + err = bpf_probe_register(link->btp, prog); + if (err) { + bpf_link_cleanup(&link_primer); + goto out_put_btp; + } + + return bpf_link_settle(&link_primer); + out_put_btp: bpf_put_raw_tracepoint(btp); +out_put_prog: + bpf_prog_put(prog); return err; } @@ -1878,8 +2560,14 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_SK_LOOKUP: return attach_type == prog->expected_attach_type ? 0 : -EINVAL; case BPF_PROG_TYPE_CGROUP_SKB: + if (!capable(CAP_NET_ADMIN)) + /* cg-skb progs can be loaded by unpriv user. + * check permissions at attach time. + */ + return -EPERM; return prog->enforce_expected_attach_type && prog->expected_attach_type != attach_type ? -EINVAL : 0; @@ -1888,10 +2576,62 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, } } -#define BPF_PROG_ATTACH_LAST_FIELD attach_flags +static enum bpf_prog_type +attach_type_to_prog_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + case BPF_CGROUP_INET_INGRESS: + case BPF_CGROUP_INET_EGRESS: + return BPF_PROG_TYPE_CGROUP_SKB; + break; + case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET4_POST_BIND: + case BPF_CGROUP_INET6_POST_BIND: + return BPF_PROG_TYPE_CGROUP_SOCK; + case BPF_CGROUP_INET4_BIND: + case BPF_CGROUP_INET6_BIND: + case BPF_CGROUP_INET4_CONNECT: + case BPF_CGROUP_INET6_CONNECT: + case BPF_CGROUP_INET4_GETPEERNAME: + case BPF_CGROUP_INET6_GETPEERNAME: + case BPF_CGROUP_INET4_GETSOCKNAME: + case BPF_CGROUP_INET6_GETSOCKNAME: + case BPF_CGROUP_UDP4_SENDMSG: + case BPF_CGROUP_UDP6_SENDMSG: + case BPF_CGROUP_UDP4_RECVMSG: + case BPF_CGROUP_UDP6_RECVMSG: + return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; + case BPF_CGROUP_SOCK_OPS: + return BPF_PROG_TYPE_SOCK_OPS; + case BPF_CGROUP_DEVICE: + return BPF_PROG_TYPE_CGROUP_DEVICE; + case BPF_SK_MSG_VERDICT: + return BPF_PROG_TYPE_SK_MSG; + case BPF_SK_SKB_STREAM_PARSER: + case BPF_SK_SKB_STREAM_VERDICT: + return BPF_PROG_TYPE_SK_SKB; + case BPF_LIRC_MODE2: + return BPF_PROG_TYPE_LIRC_MODE2; + case BPF_FLOW_DISSECTOR: + return BPF_PROG_TYPE_FLOW_DISSECTOR; + case BPF_CGROUP_SYSCTL: + return BPF_PROG_TYPE_CGROUP_SYSCTL; + case BPF_CGROUP_GETSOCKOPT: + case BPF_CGROUP_SETSOCKOPT: + return BPF_PROG_TYPE_CGROUP_SOCKOPT; + case BPF_TRACE_ITER: + return BPF_PROG_TYPE_TRACING; + case BPF_SK_LOOKUP: + return BPF_PROG_TYPE_SK_LOOKUP; + default: + return BPF_PROG_TYPE_UNSPEC; + } +} + +#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd #define BPF_F_ATTACH_MASK \ - (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) + (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) static int bpf_prog_attach(const union bpf_attr *attr) { @@ -1899,64 +2639,15 @@ static int bpf_prog_attach(const union bpf_attr *attr) struct bpf_prog *prog; int ret; - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (CHECK_ATTR(BPF_PROG_ATTACH)) return -EINVAL; if (attr->attach_flags & ~BPF_F_ATTACH_MASK) return -EINVAL; - switch (attr->attach_type) { - case BPF_CGROUP_INET_INGRESS: - case BPF_CGROUP_INET_EGRESS: - ptype = BPF_PROG_TYPE_CGROUP_SKB; - break; - case BPF_CGROUP_INET_SOCK_CREATE: - case BPF_CGROUP_INET4_POST_BIND: - case BPF_CGROUP_INET6_POST_BIND: - ptype = BPF_PROG_TYPE_CGROUP_SOCK; - break; - case BPF_CGROUP_INET4_BIND: - case BPF_CGROUP_INET6_BIND: - case BPF_CGROUP_INET4_CONNECT: - case BPF_CGROUP_INET6_CONNECT: - case BPF_CGROUP_UDP4_SENDMSG: - case BPF_CGROUP_UDP6_SENDMSG: - case BPF_CGROUP_UDP4_RECVMSG: - case BPF_CGROUP_UDP6_RECVMSG: - ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; - break; - case BPF_CGROUP_SOCK_OPS: - ptype = BPF_PROG_TYPE_SOCK_OPS; - break; - case BPF_CGROUP_DEVICE: - ptype = BPF_PROG_TYPE_CGROUP_DEVICE; - break; - case BPF_SK_MSG_VERDICT: - ptype = BPF_PROG_TYPE_SK_MSG; - break; - case BPF_SK_SKB_STREAM_PARSER: - case BPF_SK_SKB_STREAM_VERDICT: - ptype = BPF_PROG_TYPE_SK_SKB; - break; - case BPF_LIRC_MODE2: - ptype = BPF_PROG_TYPE_LIRC_MODE2; - break; - case BPF_FLOW_DISSECTOR: - ptype = BPF_PROG_TYPE_FLOW_DISSECTOR; - break; - case BPF_CGROUP_SYSCTL: - ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; - break; - case BPF_CGROUP_GETSOCKOPT: - case BPF_CGROUP_SETSOCKOPT: - ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; - break; - default: + ptype = attach_type_to_prog_type(attr->attach_type); + if (ptype == BPF_PROG_TYPE_UNSPEC) return -EINVAL; - } prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); if (IS_ERR(prog)) @@ -1976,10 +2667,19 @@ static int bpf_prog_attach(const union bpf_attr *attr) ret = lirc_prog_attach(attr, prog); break; case BPF_PROG_TYPE_FLOW_DISSECTOR: - ret = skb_flow_dissector_bpf_prog_attach(attr, prog); + ret = netns_bpf_prog_attach(attr, prog); + break; + case BPF_PROG_TYPE_CGROUP_DEVICE: + case BPF_PROG_TYPE_CGROUP_SKB: + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_CGROUP_SYSCTL: + case BPF_PROG_TYPE_SOCK_OPS: + ret = cgroup_bpf_prog_attach(attr, ptype, prog); break; default: - ret = cgroup_bpf_prog_attach(attr, ptype, prog); + ret = -EINVAL; } if (ret) @@ -1993,9 +2693,6 @@ static int bpf_prog_detach(const union bpf_attr *attr) { enum bpf_prog_type ptype; - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (CHECK_ATTR(BPF_PROG_DETACH)) return -EINVAL; @@ -2013,6 +2710,10 @@ static int bpf_prog_detach(const union bpf_attr *attr) case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET4_CONNECT: case BPF_CGROUP_INET6_CONNECT: + case BPF_CGROUP_INET4_GETPEERNAME: + case BPF_CGROUP_INET6_GETPEERNAME: + case BPF_CGROUP_INET4_GETSOCKNAME: + case BPF_CGROUP_INET6_GETSOCKNAME: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: case BPF_CGROUP_UDP4_RECVMSG: @@ -2026,14 +2727,16 @@ static int bpf_prog_detach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_DEVICE; break; case BPF_SK_MSG_VERDICT: - return sock_map_get_from_fd(attr, NULL); + return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_MSG); case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_VERDICT: - return sock_map_get_from_fd(attr, NULL); + return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_SKB); case BPF_LIRC_MODE2: return lirc_prog_detach(attr); case BPF_FLOW_DISSECTOR: - return skb_flow_dissector_bpf_prog_detach(attr); + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return netns_bpf_prog_detach(attr); case BPF_CGROUP_SYSCTL: ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; break; @@ -2070,6 +2773,10 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_INET6_POST_BIND: case BPF_CGROUP_INET4_CONNECT: case BPF_CGROUP_INET6_CONNECT: + case BPF_CGROUP_INET4_GETPEERNAME: + case BPF_CGROUP_INET6_GETPEERNAME: + case BPF_CGROUP_INET4_GETSOCKNAME: + case BPF_CGROUP_INET6_GETSOCKNAME: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP6_SENDMSG: case BPF_CGROUP_UDP4_RECVMSG: @@ -2079,16 +2786,15 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_SYSCTL: case BPF_CGROUP_GETSOCKOPT: case BPF_CGROUP_SETSOCKOPT: - break; + return cgroup_bpf_prog_query(attr, uattr); case BPF_LIRC_MODE2: return lirc_prog_query(attr, uattr); case BPF_FLOW_DISSECTOR: - return skb_flow_dissector_prog_query(attr, uattr); + case BPF_SK_LOOKUP: + return netns_bpf_prog_query(attr, uattr); default: return -EINVAL; } - - return cgroup_bpf_prog_query(attr, uattr); } #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out @@ -2099,8 +2805,6 @@ static int bpf_prog_test_run(const union bpf_attr *attr, struct bpf_prog *prog; int ret = -ENOTSUPP; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; if (CHECK_ATTR(BPF_PROG_TEST_RUN)) return -EINVAL; @@ -2151,8 +2855,63 @@ static int bpf_obj_get_next_id(const union bpf_attr *attr, return err; } +struct bpf_map *bpf_map_get_curr_or_next(u32 *id) +{ + struct bpf_map *map; + + spin_lock_bh(&map_idr_lock); +again: + map = idr_get_next(&map_idr, id); + if (map) { + map = __bpf_map_inc_not_zero(map, false); + if (IS_ERR(map)) { + (*id)++; + goto again; + } + } + spin_unlock_bh(&map_idr_lock); + + return map; +} + +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) +{ + struct bpf_prog *prog; + + spin_lock_bh(&prog_idr_lock); +again: + prog = idr_get_next(&prog_idr, id); + if (prog) { + prog = bpf_prog_inc_not_zero(prog); + if (IS_ERR(prog)) { + (*id)++; + goto again; + } + } + spin_unlock_bh(&prog_idr_lock); + + return prog; +} + #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id +struct bpf_prog *bpf_prog_by_id(u32 id) +{ + struct bpf_prog *prog; + + if (!id) + return ERR_PTR(-ENOENT); + + spin_lock_bh(&prog_idr_lock); + prog = idr_find(&prog_idr, id); + if (prog) + prog = bpf_prog_inc_not_zero(prog); + else + prog = ERR_PTR(-ENOENT); + spin_unlock_bh(&prog_idr_lock); + return prog; +} + static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) { struct bpf_prog *prog; @@ -2165,14 +2924,7 @@ static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - spin_lock_bh(&prog_idr_lock); - prog = idr_find(&prog_idr, id); - if (prog) - prog = bpf_prog_inc_not_zero(prog); - else - prog = ERR_PTR(-ENOENT); - spin_unlock_bh(&prog_idr_lock); - + prog = bpf_prog_by_id(id); if (IS_ERR(prog)) return PTR_ERR(prog); @@ -2245,7 +2997,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, return NULL; } -static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) +static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, + const struct cred *f_cred) { const struct bpf_map *map; struct bpf_insn *insns; @@ -2268,7 +3021,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) insns[i].code = BPF_JMP | BPF_CALL; - if (!bpf_dump_raw_ok()) + if (!bpf_dump_raw_ok(f_cred)) insns[i].imm = 0; continue; } @@ -2320,7 +3073,8 @@ static int set_info_rec_size(struct bpf_prog_info *info) return 0; } -static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, +static int bpf_prog_get_info_by_fd(struct file *file, + struct bpf_prog *prog, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -2372,7 +3126,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, info.run_time_ns = stats.nsecs; info.run_cnt = stats.cnt; - if (!capable(CAP_SYS_ADMIN)) { + if (!bpf_capable()) { info.jited_prog_len = 0; info.xlated_prog_len = 0; info.nr_jited_ksyms = 0; @@ -2389,11 +3143,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, struct bpf_insn *insns_sanitized; bool fault; - if (prog->blinded && !bpf_dump_raw_ok()) { + if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { info.xlated_prog_insns = 0; goto done; } - insns_sanitized = bpf_insn_prepare_dump(prog); + insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); if (!insns_sanitized) return -ENOMEM; uinsns = u64_to_user_ptr(info.xlated_prog_insns); @@ -2427,7 +3181,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, } if (info.jited_prog_len && ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { uinsns = u64_to_user_ptr(info.jited_prog_insns); ulen = min_t(u32, info.jited_prog_len, ulen); @@ -2462,7 +3216,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ulen = info.nr_jited_ksyms; info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; if (ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { unsigned long ksym_addr; u64 __user *user_ksyms; u32 i; @@ -2493,7 +3247,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ulen = info.nr_jited_func_lens; info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; if (ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { u32 __user *user_lens; u32 func_len, i; @@ -2550,7 +3304,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, else info.nr_jited_line_info = 0; if (info.nr_jited_line_info && ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { __u64 __user *user_linfo; u32 i; @@ -2596,7 +3350,8 @@ done: return 0; } -static int bpf_map_get_info_by_fd(struct bpf_map *map, +static int bpf_map_get_info_by_fd(struct file *file, + struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -2624,6 +3379,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map, info.btf_key_type_id = map->btf_key_type_id; info.btf_value_type_id = map->btf_value_type_id; } + info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; if (bpf_map_is_dev_bound(map)) { err = bpf_map_offload_info_fill(&info, map); @@ -2638,7 +3394,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map, return 0; } -static int bpf_btf_get_info_by_fd(struct btf *btf, +static int bpf_btf_get_info_by_fd(struct file *file, + struct btf *btf, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -2653,6 +3410,42 @@ static int bpf_btf_get_info_by_fd(struct btf *btf, return btf_get_info_by_fd(btf, attr, uattr); } +static int bpf_link_get_info_by_fd(struct bpf_link *link, + const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); + struct bpf_link_info info; + u32 info_len = attr->info.info_len; + int err; + + err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); + if (err) + return err; + info_len = min_t(u32, sizeof(info), info_len); + + memset(&info, 0, sizeof(info)); + if (copy_from_user(&info, uinfo, info_len)) + return -EFAULT; + + info.type = link->type; + info.id = link->id; + info.prog_id = link->prog->aux->id; + + if (link->ops->fill_link_info) { + err = link->ops->fill_link_info(link, &info); + if (err) + return err; + } + + if (copy_to_user(uinfo, &info, info_len) || + put_user(info_len, &uattr->info.info_len)) + return -EFAULT; + + return 0; +} + + #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, @@ -2670,13 +3463,16 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, return -EBADFD; if (f.file->f_op == &bpf_prog_fops) - err = bpf_prog_get_info_by_fd(f.file->private_data, attr, + err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else if (f.file->f_op == &bpf_map_fops) - err = bpf_map_get_info_by_fd(f.file->private_data, attr, + err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else if (f.file->f_op == &btf_fops) - err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); + err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); + else if (f.file->f_op == &bpf_link_fops) + err = bpf_link_get_info_by_fd(f.file->private_data, + attr, uattr); else err = -EINVAL; @@ -2691,7 +3487,7 @@ static int bpf_btf_load(const union bpf_attr *attr) if (CHECK_ATTR(BPF_BTF_LOAD)) return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return -EPERM; return btf_new_fd(attr); @@ -2801,15 +3597,21 @@ static int bpf_task_fd_query(const union bpf_attr *attr, if (err) goto out; - if (file->f_op == &bpf_raw_tp_fops) { - struct bpf_raw_tracepoint *raw_tp = file->private_data; - struct bpf_raw_event_map *btp = raw_tp->btp; + if (file->f_op == &bpf_link_fops) { + struct bpf_link *link = file->private_data; - err = bpf_task_fd_query_copy(attr, uattr, - raw_tp->prog->aux->id, - BPF_FD_TYPE_RAW_TRACEPOINT, - btp->tp->name, 0, 0); - goto put_file; + if (link->ops == &bpf_raw_tp_link_lops) { + struct bpf_raw_tp_link *raw_tp = + container_of(link, struct bpf_raw_tp_link, link); + struct bpf_raw_event_map *btp = raw_tp->btp; + + err = bpf_task_fd_query_copy(attr, uattr, + raw_tp->link.prog->aux->id, + BPF_FD_TYPE_RAW_TRACEPOINT, + btp->tp->name, 0, 0); + goto put_file; + } + goto out_not_supp; } event = perf_get_event(file); @@ -2829,6 +3631,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr, goto put_file; } +out_not_supp: err = -ENOTSUPP; put_file: fput(file); @@ -2836,12 +3639,266 @@ out: return err; } +static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + if (attr->link_create.attach_type == BPF_TRACE_ITER && + prog->expected_attach_type == BPF_TRACE_ITER) + return bpf_iter_link_attach(attr, prog); + + return -EINVAL; +} + +#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len +static int link_create(union bpf_attr *attr) +{ + enum bpf_prog_type ptype; + struct bpf_prog *prog; + int ret; + + if (CHECK_ATTR(BPF_LINK_CREATE)) + return -EINVAL; + + ptype = attach_type_to_prog_type(attr->link_create.attach_type); + if (ptype == BPF_PROG_TYPE_UNSPEC) + return -EINVAL; + + prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + ret = bpf_prog_attach_check_attach_type(prog, + attr->link_create.attach_type); + if (ret) + goto err_out; + + switch (ptype) { + case BPF_PROG_TYPE_CGROUP_SKB: + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + case BPF_PROG_TYPE_SOCK_OPS: + case BPF_PROG_TYPE_CGROUP_DEVICE: + case BPF_PROG_TYPE_CGROUP_SYSCTL: + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + ret = cgroup_bpf_link_attach(attr, prog); + break; + case BPF_PROG_TYPE_TRACING: + ret = tracing_bpf_link_attach(attr, prog); + break; + case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_SK_LOOKUP: + ret = netns_bpf_link_create(attr, prog); + break; + default: + ret = -EINVAL; + } + +err_out: + if (ret < 0) + bpf_prog_put(prog); + return ret; +} + +#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd + +static int link_update(union bpf_attr *attr) +{ + struct bpf_prog *old_prog = NULL, *new_prog; + struct bpf_link *link; + u32 flags; + int ret; + + if (CHECK_ATTR(BPF_LINK_UPDATE)) + return -EINVAL; + + flags = attr->link_update.flags; + if (flags & ~BPF_F_REPLACE) + return -EINVAL; + + link = bpf_link_get_from_fd(attr->link_update.link_fd); + if (IS_ERR(link)) + return PTR_ERR(link); + + new_prog = bpf_prog_get(attr->link_update.new_prog_fd); + if (IS_ERR(new_prog)) { + ret = PTR_ERR(new_prog); + goto out_put_link; + } + + if (flags & BPF_F_REPLACE) { + old_prog = bpf_prog_get(attr->link_update.old_prog_fd); + if (IS_ERR(old_prog)) { + ret = PTR_ERR(old_prog); + old_prog = NULL; + goto out_put_progs; + } + } else if (attr->link_update.old_prog_fd) { + ret = -EINVAL; + goto out_put_progs; + } + + if (link->ops->update_prog) + ret = link->ops->update_prog(link, new_prog, old_prog); + else + ret = EINVAL; + +out_put_progs: + if (old_prog) + bpf_prog_put(old_prog); + if (ret) + bpf_prog_put(new_prog); +out_put_link: + bpf_link_put(link); + return ret; +} + +#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd + +static int link_detach(union bpf_attr *attr) +{ + struct bpf_link *link; + int ret; + + if (CHECK_ATTR(BPF_LINK_DETACH)) + return -EINVAL; + + link = bpf_link_get_from_fd(attr->link_detach.link_fd); + if (IS_ERR(link)) + return PTR_ERR(link); + + if (link->ops->detach) + ret = link->ops->detach(link); + else + ret = -EOPNOTSUPP; + + bpf_link_put(link); + return ret; +} + +static int bpf_link_inc_not_zero(struct bpf_link *link) +{ + return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT; +} + +#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id + +static int bpf_link_get_fd_by_id(const union bpf_attr *attr) +{ + struct bpf_link *link; + u32 id = attr->link_id; + int fd, err; + + if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + spin_lock_bh(&link_idr_lock); + link = idr_find(&link_idr, id); + /* before link is "settled", ID is 0, pretend it doesn't exist yet */ + if (link) { + if (link->id) + err = bpf_link_inc_not_zero(link); + else + err = -EAGAIN; + } else { + err = -ENOENT; + } + spin_unlock_bh(&link_idr_lock); + + if (err) + return err; + + fd = bpf_link_new_fd(link); + if (fd < 0) + bpf_link_put(link); + + return fd; +} + +DEFINE_MUTEX(bpf_stats_enabled_mutex); + +static int bpf_stats_release(struct inode *inode, struct file *file) +{ + mutex_lock(&bpf_stats_enabled_mutex); + static_key_slow_dec(&bpf_stats_enabled_key.key); + mutex_unlock(&bpf_stats_enabled_mutex); + return 0; +} + +static const struct file_operations bpf_stats_fops = { + .release = bpf_stats_release, +}; + +static int bpf_enable_runtime_stats(void) +{ + int fd; + + mutex_lock(&bpf_stats_enabled_mutex); + + /* Set a very high limit to avoid overflow */ + if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { + mutex_unlock(&bpf_stats_enabled_mutex); + return -EBUSY; + } + + fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); + if (fd >= 0) + static_key_slow_inc(&bpf_stats_enabled_key.key); + + mutex_unlock(&bpf_stats_enabled_mutex); + return fd; +} + +#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type + +static int bpf_enable_stats(union bpf_attr *attr) +{ + + if (CHECK_ATTR(BPF_ENABLE_STATS)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + switch (attr->enable_stats.type) { + case BPF_STATS_RUN_TIME: + return bpf_enable_runtime_stats(); + default: + break; + } + return -EINVAL; +} + +#define BPF_ITER_CREATE_LAST_FIELD iter_create.flags + +static int bpf_iter_create(union bpf_attr *attr) +{ + struct bpf_link *link; + int err; + + if (CHECK_ATTR(BPF_ITER_CREATE)) + return -EINVAL; + + if (attr->iter_create.flags) + return -EINVAL; + + link = bpf_link_get_from_fd(attr->iter_create.link_fd); + if (IS_ERR(link)) + return PTR_ERR(link); + + err = bpf_iter_new_fd(link); + bpf_link_put(link); + + return err; +} + SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { union bpf_attr attr; int err; - if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) + if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) return -EPERM; err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); @@ -2934,6 +3991,28 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_MAP_LOOKUP_AND_DELETE_ELEM: err = map_lookup_and_delete_elem(&attr); break; + case BPF_LINK_CREATE: + err = link_create(&attr); + break; + case BPF_LINK_UPDATE: + err = link_update(&attr); + break; + case BPF_LINK_GET_FD_BY_ID: + err = bpf_link_get_fd_by_id(&attr); + break; + case BPF_LINK_GET_NEXT_ID: + err = bpf_obj_get_next_id(&attr, uattr, + &link_idr, &link_idr_lock); + break; + case BPF_ENABLE_STATS: + err = bpf_enable_stats(&attr); + break; + case BPF_ITER_CREATE: + err = bpf_iter_create(&attr); + break; + case BPF_LINK_DETACH: + err = link_detach(&attr); + break; default: err = -EINVAL; break; diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c index 7ae5dddd1fe6..11b3380887fa 100644 --- a/kernel/bpf/sysfs_btf.c +++ b/kernel/bpf/sysfs_btf.c @@ -9,15 +9,15 @@ #include <linux/sysfs.h> /* See scripts/link-vmlinux.sh, gen_btf() func for details */ -extern char __weak _binary__btf_vmlinux_bin_start[]; -extern char __weak _binary__btf_vmlinux_bin_end[]; +extern char __weak __start_BTF[]; +extern char __weak __stop_BTF[]; static ssize_t btf_vmlinux_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) { - memcpy(buf, _binary__btf_vmlinux_bin_start + off, len); + memcpy(buf, __start_BTF + off, len); return len; } @@ -30,16 +30,15 @@ static struct kobject *btf_kobj; static int __init btf_vmlinux_init(void) { - if (!_binary__btf_vmlinux_bin_start) + bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF; + + if (!__start_BTF || bin_attr_btf_vmlinux.size == 0) return 0; btf_kobj = kobject_create_and_add("btf", kernel_kobj); if (!btf_kobj) return -ENOMEM; - bin_attr_btf_vmlinux.size = _binary__btf_vmlinux_bin_end - - _binary__btf_vmlinux_bin_start; - return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux); } diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c new file mode 100644 index 000000000000..c22d15060eaf --- /dev/null +++ b/kernel/bpf/task_iter.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ + +#include <linux/init.h> +#include <linux/namei.h> +#include <linux/pid_namespace.h> +#include <linux/fs.h> +#include <linux/fdtable.h> +#include <linux/filter.h> +#include <linux/btf_ids.h> + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; +}; + +struct bpf_iter_seq_task_info { + /* The first field must be struct bpf_iter_seq_task_common. + * this is assumed by {init, fini}_seq_pidns() callback functions. + */ + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +static struct task_struct *task_seq_get_next(struct pid_namespace *ns, + u32 *tid) +{ + struct task_struct *task = NULL; + struct pid *pid; + + rcu_read_lock(); + pid = idr_get_next(&ns->idr, tid); + if (pid) + task = get_pid_task(pid, PIDTYPE_PID); + rcu_read_unlock(); + + return task; +} + +static void *task_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_task_info *info = seq->private; + struct task_struct *task; + + task = task_seq_get_next(info->common.ns, &info->tid); + if (!task) + return NULL; + + if (*pos == 0) + ++*pos; + return task; +} + +static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_task_info *info = seq->private; + struct task_struct *task; + + ++*pos; + ++info->tid; + put_task_struct((struct task_struct *)v); + task = task_seq_get_next(info->common.ns, &info->tid); + if (!task) + return NULL; + + return task; +} + +struct bpf_iter__task { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct task_struct *, task); +}; + +DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task) + +static int __task_seq_show(struct seq_file *seq, struct task_struct *task, + bool in_stop) +{ + struct bpf_iter_meta meta; + struct bpf_iter__task ctx; + struct bpf_prog *prog; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, in_stop); + if (!prog) + return 0; + + meta.seq = seq; + ctx.meta = &meta; + ctx.task = task; + return bpf_iter_run_prog(prog, &ctx); +} + +static int task_seq_show(struct seq_file *seq, void *v) +{ + return __task_seq_show(seq, v, false); +} + +static void task_seq_stop(struct seq_file *seq, void *v) +{ + if (!v) + (void)__task_seq_show(seq, v, true); + else + put_task_struct((struct task_struct *)v); +} + +static const struct seq_operations task_seq_ops = { + .start = task_seq_start, + .next = task_seq_next, + .stop = task_seq_stop, + .show = task_seq_show, +}; + +struct bpf_iter_seq_task_file_info { + /* The first field must be struct bpf_iter_seq_task_common. + * this is assumed by {init, fini}_seq_pidns() callback functions. + */ + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct files_struct *files; + u32 tid; + u32 fd; +}; + +static struct file * +task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info, + struct task_struct **task, struct files_struct **fstruct) +{ + struct pid_namespace *ns = info->common.ns; + u32 curr_tid = info->tid, max_fds; + struct files_struct *curr_files; + struct task_struct *curr_task; + int curr_fd = info->fd; + + /* If this function returns a non-NULL file object, + * it held a reference to the task/files_struct/file. + * Otherwise, it does not hold any reference. + */ +again: + if (*task) { + curr_task = *task; + curr_files = *fstruct; + curr_fd = info->fd; + } else { + curr_task = task_seq_get_next(ns, &curr_tid); + if (!curr_task) + return NULL; + + curr_files = get_files_struct(curr_task); + if (!curr_files) { + put_task_struct(curr_task); + curr_tid = ++(info->tid); + info->fd = 0; + goto again; + } + + /* set *fstruct, *task and info->tid */ + *fstruct = curr_files; + *task = curr_task; + if (curr_tid == info->tid) { + curr_fd = info->fd; + } else { + info->tid = curr_tid; + curr_fd = 0; + } + } + + rcu_read_lock(); + max_fds = files_fdtable(curr_files)->max_fds; + for (; curr_fd < max_fds; curr_fd++) { + struct file *f; + + f = fcheck_files(curr_files, curr_fd); + if (!f) + continue; + + /* set info->fd */ + info->fd = curr_fd; + get_file(f); + rcu_read_unlock(); + return f; + } + + /* the current task is done, go to the next task */ + rcu_read_unlock(); + put_files_struct(curr_files); + put_task_struct(curr_task); + *task = NULL; + *fstruct = NULL; + info->fd = 0; + curr_tid = ++(info->tid); + goto again; +} + +static void *task_file_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_task_file_info *info = seq->private; + struct files_struct *files = NULL; + struct task_struct *task = NULL; + struct file *file; + + file = task_file_seq_get_next(info, &task, &files); + if (!file) { + info->files = NULL; + info->task = NULL; + return NULL; + } + + if (*pos == 0) + ++*pos; + info->task = task; + info->files = files; + + return file; +} + +static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_task_file_info *info = seq->private; + struct files_struct *files = info->files; + struct task_struct *task = info->task; + struct file *file; + + ++*pos; + ++info->fd; + fput((struct file *)v); + file = task_file_seq_get_next(info, &task, &files); + if (!file) { + info->files = NULL; + info->task = NULL; + return NULL; + } + + info->task = task; + info->files = files; + + return file; +} + +struct bpf_iter__task_file { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct task_struct *, task); + u32 fd __aligned(8); + __bpf_md_ptr(struct file *, file); +}; + +DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta, + struct task_struct *task, u32 fd, + struct file *file) + +static int __task_file_seq_show(struct seq_file *seq, struct file *file, + bool in_stop) +{ + struct bpf_iter_seq_task_file_info *info = seq->private; + struct bpf_iter__task_file ctx; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, in_stop); + if (!prog) + return 0; + + ctx.meta = &meta; + ctx.task = info->task; + ctx.fd = info->fd; + ctx.file = file; + return bpf_iter_run_prog(prog, &ctx); +} + +static int task_file_seq_show(struct seq_file *seq, void *v) +{ + return __task_file_seq_show(seq, v, false); +} + +static void task_file_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_seq_task_file_info *info = seq->private; + + if (!v) { + (void)__task_file_seq_show(seq, v, true); + } else { + fput((struct file *)v); + put_files_struct(info->files); + put_task_struct(info->task); + info->files = NULL; + info->task = NULL; + } +} + +static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux) +{ + struct bpf_iter_seq_task_common *common = priv_data; + + common->ns = get_pid_ns(task_active_pid_ns(current)); + return 0; +} + +static void fini_seq_pidns(void *priv_data) +{ + struct bpf_iter_seq_task_common *common = priv_data; + + put_pid_ns(common->ns); +} + +static const struct seq_operations task_file_seq_ops = { + .start = task_file_seq_start, + .next = task_file_seq_next, + .stop = task_file_seq_stop, + .show = task_file_seq_show, +}; + +BTF_ID_LIST(btf_task_file_ids) +BTF_ID(struct, task_struct) +BTF_ID(struct, file) + +static const struct bpf_iter_seq_info task_seq_info = { + .seq_ops = &task_seq_ops, + .init_seq_private = init_seq_pidns, + .fini_seq_private = fini_seq_pidns, + .seq_priv_size = sizeof(struct bpf_iter_seq_task_info), +}; + +static struct bpf_iter_reg task_reg_info = { + .target = "task", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__task, task), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &task_seq_info, +}; + +static const struct bpf_iter_seq_info task_file_seq_info = { + .seq_ops = &task_file_seq_ops, + .init_seq_private = init_seq_pidns, + .fini_seq_private = fini_seq_pidns, + .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info), +}; + +static struct bpf_iter_reg task_file_reg_info = { + .target = "task_file", + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__task_file, task), + PTR_TO_BTF_ID_OR_NULL }, + { offsetof(struct bpf_iter__task_file, file), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &task_file_seq_info, +}; + +static int __init task_iter_init(void) +{ + int ret; + + task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0]; + ret = bpf_iter_reg_target(&task_reg_info); + if (ret) + return ret; + + task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0]; + task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1]; + return bpf_iter_reg_target(&task_file_reg_info); +} +late_initcall(task_iter_init); diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c new file mode 100644 index 000000000000..366996aebff0 --- /dev/null +++ b/kernel/bpf/trampoline.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019 Facebook */ +#include <linux/hash.h> +#include <linux/bpf.h> +#include <linux/filter.h> +#include <linux/btf.h> + +/* dummy _ops. The verifier will operate on target program's ops. */ +const struct bpf_verifier_ops bpf_extension_verifier_ops = { +}; +const struct bpf_prog_ops bpf_extension_prog_ops = { +}; + +/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */ +#define TRAMPOLINE_HASH_BITS 10 +#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS) + +static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; + +/* serializes access to trampoline_table */ +static DEFINE_MUTEX(trampoline_mutex); + +void *bpf_jit_alloc_exec_page(void) +{ + void *image; + + image = bpf_jit_alloc_exec(PAGE_SIZE); + if (!image) + return NULL; + + set_vm_flush_reset_perms(image); + /* Keep image as writeable. The alternative is to keep flipping ro/rw + * everytime new program is attached or detached. + */ + set_memory_x((long)image, 1); + return image; +} + +struct bpf_trampoline *bpf_trampoline_lookup(u64 key) +{ + struct bpf_trampoline *tr; + struct hlist_head *head; + void *image; + int i; + + mutex_lock(&trampoline_mutex); + head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; + hlist_for_each_entry(tr, head, hlist) { + if (tr->key == key) { + refcount_inc(&tr->refcnt); + goto out; + } + } + tr = kzalloc(sizeof(*tr), GFP_KERNEL); + if (!tr) + goto out; + + /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */ + image = bpf_jit_alloc_exec_page(); + if (!image) { + kfree(tr); + tr = NULL; + goto out; + } + + tr->key = key; + INIT_HLIST_NODE(&tr->hlist); + hlist_add_head(&tr->hlist, head); + refcount_set(&tr->refcnt, 1); + mutex_init(&tr->mutex); + for (i = 0; i < BPF_TRAMP_MAX; i++) + INIT_HLIST_HEAD(&tr->progs_hlist[i]); + tr->image = image; +out: + mutex_unlock(&trampoline_mutex); + return tr; +} + +static struct bpf_tramp_progs * +bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total) +{ + const struct bpf_prog_aux *aux; + struct bpf_tramp_progs *tprogs; + struct bpf_prog **progs; + int kind; + + *total = 0; + tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL); + if (!tprogs) + return ERR_PTR(-ENOMEM); + + for (kind = 0; kind < BPF_TRAMP_MAX; kind++) { + tprogs[kind].nr_progs = tr->progs_cnt[kind]; + *total += tr->progs_cnt[kind]; + progs = tprogs[kind].progs; + + hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) + *progs++ = aux->prog; + } + return tprogs; +} + +static int bpf_trampoline_update(struct bpf_trampoline *tr) +{ + void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2; + void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2; + struct bpf_tramp_progs *tprogs; + u32 flags = BPF_TRAMP_F_RESTORE_REGS; + struct bpf_prog_aux *aux; + int err, total; + + tprogs = bpf_trampoline_get_progs(tr, &total); + if (IS_ERR(tprogs)) + return PTR_ERR(tprogs); + + if (total == 0) { + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, + old_image, NULL); + tr->selector = 0; + goto out; + } + + if (tprogs[BPF_TRAMP_FEXIT].nr_progs || + tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs) + flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; + + err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2, + &tr->func.model, flags, tprogs, + tr->func.addr); + if (err < 0) + goto out; + + if (tr->selector) + /* progs already running at this address */ + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, + old_image, new_image); + else + /* first time registering */ + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL, + new_image); + if (err) + goto out; + tr->selector++; +out: + kfree(tprogs); + return err; +} + +static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) +{ + switch (prog->expected_attach_type) { + case BPF_TRACE_FENTRY: + return BPF_TRAMP_FENTRY; + case BPF_MODIFY_RETURN: + return BPF_TRAMP_MODIFY_RETURN; + case BPF_TRACE_FEXIT: + return BPF_TRAMP_FEXIT; + case BPF_LSM_MAC: + if (!prog->aux->attach_func_proto->type) + /* The function returns void, we cannot modify its + * return value. + */ + return BPF_TRAMP_FEXIT; + else + return BPF_TRAMP_MODIFY_RETURN; + default: + return BPF_TRAMP_REPLACE; + } +} + +int bpf_trampoline_link_prog(struct bpf_prog *prog) +{ + enum bpf_tramp_prog_type kind; + struct bpf_trampoline *tr; + int err = 0; + int cnt; + + tr = prog->aux->trampoline; + kind = bpf_attach_type_to_tramp(prog); + mutex_lock(&tr->mutex); + if (tr->extension_prog) { + /* cannot attach fentry/fexit if extension prog is attached. + * cannot overwrite extension prog either. + */ + err = -EBUSY; + goto out; + } + cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]; + if (kind == BPF_TRAMP_REPLACE) { + /* Cannot attach extension if fentry/fexit are in use. */ + if (cnt) { + err = -EBUSY; + goto out; + } + tr->extension_prog = prog; + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, + prog->bpf_func); + goto out; + } + if (cnt >= BPF_MAX_TRAMP_PROGS) { + err = -E2BIG; + goto out; + } + if (!hlist_unhashed(&prog->aux->tramp_hlist)) { + /* prog already linked */ + err = -EBUSY; + goto out; + } + hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]); + tr->progs_cnt[kind]++; + err = bpf_trampoline_update(prog->aux->trampoline); + if (err) { + hlist_del(&prog->aux->tramp_hlist); + tr->progs_cnt[kind]--; + } +out: + mutex_unlock(&tr->mutex); + return err; +} + +/* bpf_trampoline_unlink_prog() should never fail. */ +int bpf_trampoline_unlink_prog(struct bpf_prog *prog) +{ + enum bpf_tramp_prog_type kind; + struct bpf_trampoline *tr; + int err; + + tr = prog->aux->trampoline; + kind = bpf_attach_type_to_tramp(prog); + mutex_lock(&tr->mutex); + if (kind == BPF_TRAMP_REPLACE) { + WARN_ON_ONCE(!tr->extension_prog); + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, + tr->extension_prog->bpf_func, NULL); + tr->extension_prog = NULL; + goto out; + } + hlist_del(&prog->aux->tramp_hlist); + tr->progs_cnt[kind]--; + err = bpf_trampoline_update(prog->aux->trampoline); +out: + mutex_unlock(&tr->mutex); + return err; +} + +void bpf_trampoline_put(struct bpf_trampoline *tr) +{ + if (!tr) + return; + mutex_lock(&trampoline_mutex); + if (!refcount_dec_and_test(&tr->refcnt)) + goto out; + WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); + if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY]))) + goto out; + if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) + goto out; + bpf_jit_free_exec(tr->image); + hlist_del(&tr->hlist); + kfree(tr); +out: + mutex_unlock(&trampoline_mutex); +} + +/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that + * are needed for trampoline. The macro is split into + * call _bpf_prog_enter + * call prog->bpf_func + * call __bpf_prog_exit + */ +u64 notrace __bpf_prog_enter(void) +{ + u64 start = 0; + + rcu_read_lock(); + preempt_disable(); + if (static_branch_unlikely(&bpf_stats_enabled_key)) + start = sched_clock(); + return start; +} + +void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start) +{ + struct bpf_prog_stats *stats; + + if (static_branch_unlikely(&bpf_stats_enabled_key) && + /* static_key could be enabled in __bpf_prog_enter + * and disabled in __bpf_prog_exit. + * And vice versa. + * Hence check that 'start' is not zero. + */ + start) { + stats = this_cpu_ptr(prog->aux->stats); + u64_stats_update_begin(&stats->syncp); + stats->cnt++; + stats->nsecs += sched_clock() - start; + u64_stats_update_end(&stats->syncp); + } + preempt_enable(); + rcu_read_unlock(); +} + +int __weak +arch_prepare_bpf_trampoline(void *image, void *image_end, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_progs *tprogs, + void *orig_call) +{ + return -ENOTSUPP; +} + +static int __init init_trampolines(void) +{ + int i; + + for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) + INIT_HLIST_HEAD(&trampoline_table[i]); + return 0; +} +late_initcall(init_trampolines); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a0b76b360d6f..ddc36bfe13e6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19,16 +19,20 @@ #include <linux/sort.h> #include <linux/perf_event.h> #include <linux/ctype.h> +#include <linux/error-injection.h> +#include <linux/bpf_lsm.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { -#define BPF_PROG_TYPE(_id, _name) \ +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) +#define BPF_LINK_TYPE(_id, _name) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program @@ -171,6 +175,9 @@ struct bpf_verifier_stack_elem { #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 #define BPF_COMPLEXITY_LIMIT_STATES 64 +#define BPF_MAP_KEY_POISON (1ULL << 63) +#define BPF_MAP_KEY_SEEN (1ULL << 62) + #define BPF_MAP_PTR_UNPRIV 1UL #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ POISON_POINTER_DELTA)) @@ -178,12 +185,12 @@ struct bpf_verifier_stack_elem { static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { - return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; + return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; } static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) { - return aux->map_state & BPF_MAP_PTR_UNPRIV; + return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; } static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, @@ -191,8 +198,37 @@ static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, { BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); unpriv |= bpf_map_ptr_unpriv(aux); - aux->map_state = (unsigned long)map | - (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); + aux->map_ptr_state = (unsigned long)map | + (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); +} + +static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) +{ + return aux->map_key_state & BPF_MAP_KEY_POISON; +} + +static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) +{ + return !(aux->map_key_state & BPF_MAP_KEY_SEEN); +} + +static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) +{ + return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); +} + +static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) +{ + bool poisoned = bpf_map_key_poisoned(aux); + + aux->map_key_state = state | BPF_MAP_KEY_SEEN | + (poisoned ? BPF_MAP_KEY_POISON : 0ULL); +} + +static bool bpf_pseudo_call(const struct bpf_insn *insn) +{ + return insn->code == (BPF_JMP | BPF_CALL) && + insn->src_reg == BPF_PSEUDO_CALL; } struct bpf_call_arg_meta { @@ -201,12 +237,17 @@ struct bpf_call_arg_meta { bool pkt_access; int regno; int access_size; - s64 msize_smax_value; - u64 msize_umax_value; + int mem_size; + u64 msize_max_value; int ref_obj_id; + int map_uid; int func_id; + u32 btf_id; + u32 subprogno; }; +struct btf *btf_vmlinux; + static DEFINE_MUTEX(bpf_verifier_lock); static const struct bpf_line_info * @@ -243,6 +284,10 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; + if (log->level == BPF_LOG_KERNEL) { + pr_err("BPF:%s\n", log->kbuf); + return; + } if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else @@ -280,6 +325,19 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) va_end(args); } +__printf(2, 3) void bpf_log(struct bpf_verifier_log *log, + const char *fmt, ...) +{ + va_list args; + + if (!bpf_verifier_log_needed(log)) + return; + + va_start(args, fmt); + bpf_verifier_vlog(log, fmt, args); + va_end(args); +} + static const char *ltrim(const char *s) { while (isspace(*s)) @@ -316,6 +374,24 @@ __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, env->prev_linfo = linfo; } +static void verbose_invalid_scalar(struct bpf_verifier_env *env, + struct bpf_reg_state *reg, + struct tnum *range, const char *ctx, + const char *reg_name) +{ + char tn_buf[48]; + + verbose(env, "At %s the register %s ", ctx, reg_name); + if (!tnum_is_unknown(reg->var_off)) { + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose(env, "has value %s", tn_buf); + } else { + verbose(env, "has unknown scalar value"); + } + tnum_strn(tn_buf, sizeof(tn_buf), *range); + verbose(env, " should have been in %s\n", tn_buf); +} + static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || @@ -330,12 +406,26 @@ static bool type_is_sk_pointer(enum bpf_reg_type type) type == PTR_TO_XDP_SOCK; } +static bool reg_type_not_null(enum bpf_reg_type type) +{ + return type == PTR_TO_SOCKET || + type == PTR_TO_TCP_SOCK || + type == PTR_TO_MAP_VALUE || + type == PTR_TO_MAP_KEY || + type == PTR_TO_SOCK_COMMON || + type == PTR_TO_BTF_ID; +} + static bool reg_type_may_be_null(enum bpf_reg_type type) { return type == PTR_TO_MAP_VALUE_OR_NULL || type == PTR_TO_SOCKET_OR_NULL || type == PTR_TO_SOCK_COMMON_OR_NULL || - type == PTR_TO_TCP_SOCK_OR_NULL; + type == PTR_TO_MEM_OR_NULL || + type == PTR_TO_TCP_SOCK_OR_NULL || + type == PTR_TO_BTF_ID_OR_NULL || + type == PTR_TO_RDONLY_BUF_OR_NULL || + type == PTR_TO_RDWR_BUF_OR_NULL; } static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) @@ -349,7 +439,9 @@ static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL || type == PTR_TO_TCP_SOCK || - type == PTR_TO_TCP_SOCK_OR_NULL; + type == PTR_TO_TCP_SOCK_OR_NULL || + type == PTR_TO_MEM || + type == PTR_TO_MEM_OR_NULL; } static bool arg_type_may_be_refcounted(enum bpf_arg_type type) @@ -363,14 +455,37 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type) */ static bool is_release_function(enum bpf_func_id func_id) { - return func_id == BPF_FUNC_sk_release; + return func_id == BPF_FUNC_sk_release || + func_id == BPF_FUNC_ringbuf_submit || + func_id == BPF_FUNC_ringbuf_discard; } -static bool is_acquire_function(enum bpf_func_id func_id) +static bool may_be_acquire_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_sk_lookup_tcp || func_id == BPF_FUNC_sk_lookup_udp || - func_id == BPF_FUNC_skc_lookup_tcp; + func_id == BPF_FUNC_skc_lookup_tcp || + func_id == BPF_FUNC_map_lookup_elem || + func_id == BPF_FUNC_ringbuf_reserve; +} + +static bool is_acquire_function(enum bpf_func_id func_id, + const struct bpf_map *map) +{ + enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; + + if (func_id == BPF_FUNC_sk_lookup_tcp || + func_id == BPF_FUNC_sk_lookup_udp || + func_id == BPF_FUNC_skc_lookup_tcp || + func_id == BPF_FUNC_ringbuf_reserve) + return true; + + if (func_id == BPF_FUNC_map_lookup_elem && + (map_type == BPF_MAP_TYPE_SOCKMAP || + map_type == BPF_MAP_TYPE_SOCKHASH)) + return true; + + return false; } static bool is_ptr_cast_function(enum bpf_func_id func_id) @@ -400,6 +515,16 @@ static const char * const reg_type_str[] = { [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", [PTR_TO_TP_BUFFER] = "tp_buffer", [PTR_TO_XDP_SOCK] = "xdp_sock", + [PTR_TO_MEM] = "mem", + [PTR_TO_MEM_OR_NULL] = "mem_or_null", + [PTR_TO_BTF_ID] = "ptr_", + [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", + [PTR_TO_RDONLY_BUF] = "rdonly_buf", + [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", + [PTR_TO_RDWR_BUF] = "rdwr_buf", + [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", + [PTR_TO_FUNC] = "func", + [PTR_TO_MAP_KEY] = "map_key", }; static char slot_type_char[] = { @@ -430,6 +555,12 @@ static struct bpf_func_state *func(struct bpf_verifier_env *env, return cur->frame[reg->frameno]; } +const char *kernel_type_name(u32 id) +{ + return btf_name_by_offset(btf_vmlinux, + btf_type_by_id(btf_vmlinux, id)->name_off); +} + static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { @@ -454,6 +585,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { + if (t == PTR_TO_BTF_ID || t == PTR_TO_BTF_ID_OR_NULL) + verbose(env, "%s", kernel_type_name(reg->btf_id)); verbose(env, "(id=%d", reg->id); if (reg_type_may_be_refcounted_or_null(t)) verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); @@ -462,6 +595,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || + t == PTR_TO_MAP_KEY || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", @@ -532,6 +666,10 @@ static void print_verifier_state(struct bpf_verifier_env *env, if (state->refs[i].id) verbose(env, ",%d", state->refs[i].id); } + if (state->in_callback_fn) + verbose(env, " cb"); + if (state->in_async_callback_fn) + verbose(env, " async_cb"); verbose(env, "\n"); } @@ -1000,8 +1138,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env, reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; - reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? - true : false; + reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; __mark_reg_unbounded(reg); } @@ -1038,6 +1175,19 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, __mark_reg_not_init(env, regs + regno); } +static void mark_btf_ld_reg(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, u32 regno, + enum bpf_reg_type reg_type, u32 btf_id) +{ + if (reg_type == SCALAR_VALUE) { + mark_reg_unknown(env, regs, regno); + return; + } + mark_reg_known_zero(env, regs, regno); + regs[regno].type = PTR_TO_BTF_ID; + regs[regno].btf_id = btf_id; +} + #define DEF_NOT_SUBREG (0) static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) @@ -1056,10 +1206,6 @@ static void init_reg_state(struct bpf_verifier_env *env, regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); regs[BPF_REG_FP].frameno = state->frameno; - - /* 1st arg to a function */ - regs[BPF_REG_1].type = PTR_TO_CTX; - mark_reg_known_zero(env, regs, BPF_REG_1); } #define BPF_MAIN_FUNC (-1) @@ -1073,6 +1219,53 @@ static void init_func_state(struct bpf_verifier_env *env, init_reg_state(env, state); } +/* Similar to push_stack(), but for async callbacks */ +static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx, + int subprog) +{ + struct bpf_verifier_stack_elem *elem; + struct bpf_func_state *frame; + + elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); + if (!elem) + goto err; + + elem->insn_idx = insn_idx; + elem->prev_insn_idx = prev_insn_idx; + elem->next = env->head; + env->head = elem; + env->stack_size++; + if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { + verbose(env, + "The sequence of %d jumps is too complex for async cb.\n", + env->stack_size); + goto err; + } + /* Unlike push_stack() do not copy_verifier_state(). + * The caller state doesn't matter. + * This is async callback. It starts in a fresh stack. + * Initialize it similar to do_check_common(). + */ + elem->st.branches = 1; + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) + goto err; + init_func_state(env, frame, + BPF_MAIN_FUNC /* callsite */, + 0 /* frameno within this callchain */, + subprog /* subprog number within this prog */); + elem->st.frame[0] = frame; + return &elem->st; +err: + free_verifier_state(env->cur_state, true); + env->cur_state = NULL; + /* pop all elements and return */ + while (!pop_stack(env, NULL, NULL)); + return NULL; +} + + enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ @@ -1108,7 +1301,7 @@ static int add_subprog(struct bpf_verifier_env *env, int off) } ret = find_subprog(env, off); if (ret >= 0) - return 0; + return ret; if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { verbose(env, "too many subprograms\n"); return -E2BIG; @@ -1116,7 +1309,7 @@ static int add_subprog(struct bpf_verifier_env *env, int off) env->subprog_info[env->subprog_cnt++].start = off; sort(env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs, NULL); - return 0; + return env->subprog_cnt - 1; } static int check_subprogs(struct bpf_verifier_env *env) @@ -1133,12 +1326,24 @@ static int check_subprogs(struct bpf_verifier_env *env) /* determine subprog starts. The end is one before the next starts */ for (i = 0; i < insn_cnt; i++) { - if (insn[i].code != (BPF_JMP | BPF_CALL)) + if (bpf_pseudo_func(insn + i)) { + if (!env->bpf_capable) { + verbose(env, + "function pointers are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); + return -EPERM; + } + ret = add_subprog(env, i + insn[i].imm + 1); + if (ret < 0) + return ret; + /* remember subprog */ + insn[i + 1].imm = ret; continue; - if (insn[i].src_reg != BPF_PSEUDO_CALL) + } + if (!bpf_pseudo_call(insn + i)) continue; - if (!env->allow_ptr_leaks) { - verbose(env, "function calls to other bpf functions are allowed for root only\n"); + if (!env->bpf_capable) { + verbose(env, + "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); return -EPERM; } ret = add_subprog(env, i + insn[i].imm + 1); @@ -1161,6 +1366,10 @@ static int check_subprogs(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code; + if (code == (BPF_JMP | BPF_CALL) && + insn[i].imm == BPF_FUNC_tail_call && + insn[i].src_reg != BPF_PSEUDO_CALL) + subprog[cur_subprog].has_tail_call = true; if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) @@ -1672,8 +1881,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, bool new_marks = false; int i, err; - if (!env->allow_ptr_leaks) - /* backtracking is root only for now */ + if (!env->bpf_capable) return 0; func = st->frame[st->curframe]; @@ -1850,6 +2058,14 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: + case PTR_TO_BTF_ID: + case PTR_TO_BTF_ID_OR_NULL: + case PTR_TO_RDONLY_BUF: + case PTR_TO_RDONLY_BUF_OR_NULL: + case PTR_TO_RDWR_BUF: + case PTR_TO_RDWR_BUF_OR_NULL: + case PTR_TO_FUNC: + case PTR_TO_MAP_KEY: return true; default: return false; @@ -1867,6 +2083,15 @@ static bool register_is_const(struct bpf_reg_state *reg) return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); } +static bool __is_pointer_value(bool allow_ptr_leaks, + const struct bpf_reg_state *reg) +{ + if (allow_ptr_leaks) + return false; + + return reg->type != SCALAR_VALUE; +} + static void save_register_state(struct bpf_func_state *state, int spi, struct bpf_reg_state *reg) { @@ -1908,9 +2133,22 @@ static int check_stack_write(struct bpf_verifier_env *env, cur = env->cur_state->frame[env->cur_state->curframe]; if (value_regno >= 0) reg = &cur->regs[value_regno]; + if (!env->bypass_spec_v4) { + bool sanitize = reg && is_spillable_regtype(reg->type); + + for (i = 0; i < size; i++) { + if (state->stack[spi].slot_type[i] == STACK_INVALID) { + sanitize = true; + break; + } + } + + if (sanitize) + env->insn_aux_data[insn_idx].sanitize_stack_spill = true; + } if (reg && size == BPF_REG_SIZE && register_is_const(reg) && - !register_is_null(reg) && env->allow_ptr_leaks) { + !register_is_null(reg) && env->bpf_capable) { if (dst_reg != BPF_REG_FP) { /* The backtracking logic can only recognize explicit * stack slot address like [fp - 8]. Other spill of @@ -1930,47 +2168,10 @@ static int check_stack_write(struct bpf_verifier_env *env, verbose(env, "invalid size of register spill\n"); return -EACCES; } - if (state != cur && reg->type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } - - if (!env->allow_ptr_leaks) { - bool sanitize = false; - - if (state->stack[spi].slot_type[0] == STACK_SPILL && - register_is_const(&state->stack[spi].spilled_ptr)) - sanitize = true; - for (i = 0; i < BPF_REG_SIZE; i++) - if (state->stack[spi].slot_type[i] == STACK_MISC) { - sanitize = true; - break; - } - if (sanitize) { - int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; - int soff = (-spi - 1) * BPF_REG_SIZE; - - /* detected reuse of integer stack slot with a pointer - * which means either llvm is reusing stack slot or - * an attacker is trying to exploit CVE-2018-3639 - * (speculative store bypass) - * Have to sanitize that slot with preemptive - * store of zero. - */ - if (*poff && *poff != soff) { - /* disallow programs where single insn stores - * into two different stack slots, since verifier - * cannot sanitize them - */ - verbose(env, - "insn %d cannot access two stack slots fp%d and fp%d", - insn_idx, *poff, soff); - return -EINVAL; - } - *poff = soff; - } - } save_register_state(state, spi, reg); } else { u8 type = STACK_MISC; @@ -2057,6 +2258,16 @@ static int check_stack_read(struct bpf_verifier_env *env, * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; + } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { + /* If value_regno==-1, the caller is asking us whether + * it is acceptable to use this value as a SCALAR_VALUE + * (e.g. for XADD). + * We must not allow unprivileged callers to do that + * with spilled pointers. + */ + verbose(env, "leaking pointer from stack off %d\n", + off); + return -EACCES; } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); } else { @@ -2148,32 +2359,53 @@ static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, return 0; } -/* check read/write into map element returned by bpf_map_lookup_elem() */ -static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, - int size, bool zero_size_allowed) +/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ +static int __check_mem_access(struct bpf_verifier_env *env, int regno, + int off, int size, u32 mem_size, + bool zero_size_allowed) { - struct bpf_reg_state *regs = cur_regs(env); - struct bpf_map *map = regs[regno].map_ptr; + bool size_ok = size > 0 || (size == 0 && zero_size_allowed); + struct bpf_reg_state *reg; - if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || - off + size > map->value_size) { + if (off >= 0 && size_ok && (u64)off + size <= mem_size) + return 0; + + reg = &cur_regs(env)[regno]; + switch (reg->type) { + case PTR_TO_MAP_KEY: + verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", + mem_size, off, size); + break; + case PTR_TO_MAP_VALUE: verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", - map->value_size, off, size); - return -EACCES; + mem_size, off, size); + break; + case PTR_TO_PACKET: + case PTR_TO_PACKET_META: + case PTR_TO_PACKET_END: + verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", + off, size, regno, reg->id, off, mem_size); + break; + case PTR_TO_MEM: + default: + verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", + mem_size, off, size); } - return 0; + + return -EACCES; } -/* check read/write into a map element with possible variable offset */ -static int check_map_access(struct bpf_verifier_env *env, u32 regno, - int off, int size, bool zero_size_allowed) +/* check read/write into a memory region with possible variable offset */ +static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, + int off, int size, u32 mem_size, + bool zero_size_allowed) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg = &state->regs[regno]; int err; - /* We may have adjusted the register to this map value, so we + /* We may have adjusted the register pointing to memory region, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ @@ -2194,10 +2426,10 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, regno); return -EACCES; } - err = __check_map_access(env, regno, reg->smin_value + off, size, - zero_size_allowed); + err = __check_mem_access(env, regno, reg->smin_value + off, size, + mem_size, zero_size_allowed); if (err) { - verbose(env, "R%d min value is outside of the array range\n", + verbose(env, "R%d min value is outside of the allowed memory range\n", regno); return err; } @@ -2207,18 +2439,38 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { - verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", + verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", regno); return -EACCES; } - err = __check_map_access(env, regno, reg->umax_value + off, size, - zero_size_allowed); - if (err) - verbose(env, "R%d max value is outside of the array range\n", + err = __check_mem_access(env, regno, reg->umax_value + off, size, + mem_size, zero_size_allowed); + if (err) { + verbose(env, "R%d max value is outside of the allowed memory range\n", regno); + return err; + } - if (map_value_has_spin_lock(reg->map_ptr)) { - u32 lock = reg->map_ptr->spin_lock_off; + return 0; +} + +/* check read/write into a map element with possible variable offset */ +static int check_map_access(struct bpf_verifier_env *env, u32 regno, + int off, int size, bool zero_size_allowed) +{ + struct bpf_verifier_state *vstate = env->cur_state; + struct bpf_func_state *state = vstate->frame[vstate->curframe]; + struct bpf_reg_state *reg = &state->regs[regno]; + struct bpf_map *map = reg->map_ptr; + int err; + + err = check_mem_region_access(env, regno, off, size, map->value_size, + zero_size_allowed); + if (err) + return err; + + if (map_value_has_spin_lock(map)) { + u32 lock = map->spin_lock_off; /* if any part of struct bpf_spin_lock can be touched by * load/store reject this program. @@ -2231,6 +2483,15 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, return -EACCES; } } + if (map_value_has_timer(map)) { + u32 t = map->timer_off; + + if (reg->smin_value + off < t + sizeof(struct bpf_timer) && + t < reg->umax_value + off + size) { + verbose(env, "bpf_timer cannot be accessed directly by load/store\n"); + return -EACCES; + } + } return err; } @@ -2276,21 +2537,6 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, } } -static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, - int off, int size, bool zero_size_allowed) -{ - struct bpf_reg_state *regs = cur_regs(env); - struct bpf_reg_state *reg = ®s[regno]; - - if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || - (u64)off + size > reg->range) { - verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", - off, size, regno, reg->id, reg->off, reg->range); - return -EACCES; - } - return 0; -} - static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { @@ -2311,16 +2557,17 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, regno); return -EACCES; } - err = __check_packet_access(env, regno, off, size, zero_size_allowed); + err = __check_mem_access(env, regno, off, size, reg->range, + zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } - /* __check_packet_access has made sure "off + size - 1" is within u16. + /* __check_mem_access has made sure "off + size - 1" is within u16. * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, * otherwise find_good_pkt_pointers would have refused to set range info - * that __check_packet_access would have rejected this pkt access. + * that __check_mem_access would have rejected this pkt access. * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. */ env->prog->aux->max_pkt_offset = @@ -2332,10 +2579,12 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, - enum bpf_access_type t, enum bpf_reg_type *reg_type) + enum bpf_access_type t, enum bpf_reg_type *reg_type, + u32 *btf_id) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, + .log = &env->log, }; if (env->ops->is_valid_access && @@ -2349,7 +2598,10 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, */ *reg_type = info.reg_type; - env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; + if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) + *btf_id = info.btf_id; + else + env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; @@ -2417,15 +2669,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, return -EACCES; } -static bool __is_pointer_value(bool allow_ptr_leaks, - const struct bpf_reg_state *reg) -{ - if (allow_ptr_leaks) - return false; - - return reg->type != SCALAR_VALUE; -} - static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) { return cur_regs(env) + regno; @@ -2541,6 +2784,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, case PTR_TO_FLOW_KEYS: pointer_desc = "flow keys "; break; + case PTR_TO_MAP_KEY: + pointer_desc = "key "; + break; case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; @@ -2603,6 +2849,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env) int ret_prog[MAX_CALL_FRAMES]; process_func: + /* protect against potential stack overflow that might happen when + * bpf2bpf calls get combined with tailcalls. Limit the caller's stack + * depth for such case down to 256 so that the worst case scenario + * would result in 8k stack size (32 which is tailcall limit * 256 = + * 8k). + * + * To get the idea what might happen, see an example: + * func1 -> sub rsp, 128 + * subfunc1 -> sub rsp, 256 + * tailcall1 -> add rsp, 256 + * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) + * subfunc2 -> sub rsp, 64 + * subfunc22 -> sub rsp, 128 + * tailcall2 -> add rsp, 128 + * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) + * + * tailcall will unwind the current stack frame but it will not get rid + * of caller's stack as shown on the example above. + */ + if (idx && subprog[idx].has_tail_call && depth >= 256) { + verbose(env, + "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", + depth); + return -EACCES; + } /* round up to 32-bytes, since this is granularity * of interpreter stack size */ @@ -2615,22 +2886,32 @@ process_func: continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { - if (insn[i].code != (BPF_JMP | BPF_CALL)) - continue; - if (insn[i].src_reg != BPF_PSEUDO_CALL) + int next_insn; + + if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) continue; /* remember insn and function to return to */ ret_insn[frame] = i + 1; ret_prog[frame] = idx; /* find the callee */ - i = i + insn[i].imm + 1; - idx = find_subprog(env, i); + next_insn = i + insn[i].imm + 1; + idx = find_subprog(env, next_insn); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", - i); + next_insn); return -EFAULT; } + if (subprog[idx].is_async_cb) { + if (subprog[idx].has_tail_call) { + verbose(env, "verifier bug. subprog has tail_call and async cb\n"); + return -EFAULT; + } + /* async callbacks don't increase bpf prog stack size */ + continue; + } + i = next_insn; + frame++; if (frame >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep !\n", @@ -2667,8 +2948,8 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env, } #endif -static int check_ctx_reg(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, int regno) +int check_ctx_reg(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno) { /* Access to ctx or passing it to a helper is only allowed in * its original, unmodified form. @@ -2691,14 +2972,15 @@ static int check_ctx_reg(struct bpf_verifier_env *env, return 0; } -static int check_tp_buffer_access(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, - int regno, int off, int size) +static int __check_buffer_access(struct bpf_verifier_env *env, + const char *buf_info, + const struct bpf_reg_state *reg, + int regno, int off, int size) { if (off < 0) { verbose(env, - "R%d invalid tracepoint buffer access: off=%d, size=%d", - regno, off, size); + "R%d invalid %s buffer access: off=%d, size=%d", + regno, buf_info, off, size); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { @@ -2710,12 +2992,43 @@ static int check_tp_buffer_access(struct bpf_verifier_env *env, regno, off, tn_buf); return -EACCES; } + return 0; +} + +static int check_tp_buffer_access(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + int regno, int off, int size) +{ + int err; + + err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); + if (err) + return err; + if (off + size > env->prog->aux->max_tp_access) env->prog->aux->max_tp_access = off + size; return 0; } +static int check_buffer_access(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + int regno, int off, int size, + bool zero_size_allowed, + const char *buf_info, + u32 *max_access) +{ + int err; + + err = __check_buffer_access(env, buf_info, reg, regno, off, size); + if (err) + return err; + + if (off + size > *max_access) + *max_access = off + size; + + return 0; +} /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE @@ -2740,6 +3053,147 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) reg->smax_value = reg->umax_value; } +static bool bpf_map_is_rdonly(const struct bpf_map *map) +{ + return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; +} + +static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) +{ + void *ptr; + u64 addr; + int err; + + err = map->ops->map_direct_value_addr(map, &addr, off); + if (err) + return err; + ptr = (void *)(long)addr + off; + + switch (size) { + case sizeof(u8): + *val = (u64)*(u8 *)ptr; + break; + case sizeof(u16): + *val = (u64)*(u16 *)ptr; + break; + case sizeof(u32): + *val = (u64)*(u32 *)ptr; + break; + case sizeof(u64): + *val = *(u64 *)ptr; + break; + default: + return -EINVAL; + } + return 0; +} + +static int check_ptr_to_btf_access(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + int regno, int off, int size, + enum bpf_access_type atype, + int value_regno) +{ + struct bpf_reg_state *reg = regs + regno; + const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id); + const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off); + u32 btf_id; + int ret; + + if (off < 0) { + verbose(env, + "R%d is ptr_%s invalid negative access: off=%d\n", + regno, tname, off); + return -EACCES; + } + if (!tnum_is_const(reg->var_off) || reg->var_off.value) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose(env, + "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", + regno, tname, off, tn_buf); + return -EACCES; + } + + if (env->ops->btf_struct_access) { + ret = env->ops->btf_struct_access(&env->log, t, off, size, + atype, &btf_id); + } else { + if (atype != BPF_READ) { + verbose(env, "only read is supported\n"); + return -EACCES; + } + + ret = btf_struct_access(&env->log, t, off, size, atype, + &btf_id); + } + + if (ret < 0) + return ret; + + if (atype == BPF_READ && value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); + + return 0; +} + +static int check_ptr_to_map_access(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + int regno, int off, int size, + enum bpf_access_type atype, + int value_regno) +{ + struct bpf_reg_state *reg = regs + regno; + struct bpf_map *map = reg->map_ptr; + const struct btf_type *t; + const char *tname; + u32 btf_id; + int ret; + + if (!btf_vmlinux) { + verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); + return -ENOTSUPP; + } + + if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { + verbose(env, "map_ptr access not supported for map type %d\n", + map->map_type); + return -ENOTSUPP; + } + + t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); + tname = btf_name_by_offset(btf_vmlinux, t->name_off); + + if (!env->allow_ptr_to_map_access) { + verbose(env, + "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", + tname); + return -EPERM; + } + + if (off < 0) { + verbose(env, "R%d is %s invalid negative access: off=%d\n", + regno, tname, off); + return -EACCES; + } + + if (atype != BPF_READ) { + verbose(env, "only read from %s is supported\n", tname); + return -EACCES; + } + + ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); + if (ret < 0) + return ret; + + if (value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); + + return 0; +} + + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -2767,7 +3221,19 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn /* for access checks, reg->off is just part of off */ off += reg->off; - if (reg->type == PTR_TO_MAP_VALUE) { + if (reg->type == PTR_TO_MAP_KEY) { + if (t == BPF_WRITE) { + verbose(env, "write to change key R%d not allowed\n", regno); + return -EACCES; + } + + err = check_mem_region_access(env, regno, off, size, + reg->map_ptr->key_size, false); + if (err) + return err; + if (value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); @@ -2777,11 +3243,40 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (err) return err; err = check_map_access(env, regno, off, size, false); + if (!err && t == BPF_READ && value_regno >= 0) { + struct bpf_map *map = reg->map_ptr; + + /* if map is read-only, track its contents as scalars */ + if (tnum_is_const(reg->var_off) && + bpf_map_is_rdonly(map) && + map->ops->map_direct_value_addr) { + int map_off = off + reg->var_off.value; + u64 val = 0; + + err = bpf_map_direct_read(map, map_off, size, + &val); + if (err) + return err; + + regs[value_regno].type = SCALAR_VALUE; + __mark_reg_known(®s[value_regno], val); + } else { + mark_reg_unknown(env, regs, value_regno); + } + } + } else if (reg->type == PTR_TO_MEM) { + if (t == BPF_WRITE && value_regno >= 0 && + is_pointer_value(env, value_regno)) { + verbose(env, "R%d leaks addr into mem\n", value_regno); + return -EACCES; + } + err = check_mem_region_access(env, regno, off, size, + reg->mem_size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); - } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; + u32 btf_id = 0; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { @@ -2793,7 +3288,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (err < 0) return err; - err = check_ctx_access(env, insn_idx, off, size, t, ®_type); + err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf_id); + if (err) + verbose_linfo(env, insn_idx, "; "); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter @@ -2812,6 +3309,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn * a sub-register. */ regs[value_regno].subreg_def = DEF_NOT_SUBREG; + if (reg_type == PTR_TO_BTF_ID || + reg_type == PTR_TO_BTF_ID_OR_NULL) + regs[value_regno].btf_id = btf_id; } regs[value_regno].type = reg_type; } @@ -2871,6 +3371,29 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn err = check_tp_buffer_access(env, reg, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_BTF_ID) { + err = check_ptr_to_btf_access(env, regs, regno, off, size, t, + value_regno); + } else if (reg->type == CONST_PTR_TO_MAP) { + err = check_ptr_to_map_access(env, regs, regno, off, size, t, + value_regno); + } else if (reg->type == PTR_TO_RDONLY_BUF) { + if (t == BPF_WRITE) { + verbose(env, "R%d cannot write into %s\n", + regno, reg_type_str[reg->type]); + return -EACCES; + } + err = check_buffer_access(env, reg, regno, off, size, "rdonly", + false, + &env->prog->aux->max_rdonly_access); + if (!err && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_RDWR_BUF) { + err = check_buffer_access(env, reg, regno, off, size, "rdwr", + false, + &env->prog->aux->max_rdwr_access); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -2992,7 +3515,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, * Spectre masking for stack ALU. * See also retrieve_ptr_limit(). */ - if (!env->allow_ptr_leaks) { + if (!env->bypass_spec_v1) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); @@ -3054,6 +3577,11 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, *stype = STACK_MISC; goto mark; } + + if (state->stack[spi].slot_type[0] == STACK_SPILL && + state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) + goto mark; + if (state->stack[spi].slot_type[0] == STACK_SPILL && state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); @@ -3096,6 +3624,9 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); + case PTR_TO_MAP_KEY: + return check_mem_region_access(env, regno, reg->off, access_size, + reg->map_ptr->key_size, false); case PTR_TO_MAP_VALUE: if (check_map_access_type(env, regno, reg->off, access_size, meta && meta->raw_mode ? BPF_WRITE : @@ -3103,6 +3634,22 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, return -EACCES; return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); + case PTR_TO_RDONLY_BUF: + if (meta && meta->raw_mode) + return -EACCES; + return check_buffer_access(env, reg, regno, reg->off, + access_size, zero_size_allowed, + "rdonly", + &env->prog->aux->max_rdonly_access); + case PTR_TO_RDWR_BUF: + return check_buffer_access(env, reg, regno, reg->off, + access_size, zero_size_allowed, + "rdwr", + &env->prog->aux->max_rdwr_access); + case PTR_TO_MEM: + return check_mem_region_access(env, regno, reg->off, + access_size, reg->mem_size, + zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); @@ -3194,6 +3741,54 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, return 0; } +static int process_timer_func(struct bpf_verifier_env *env, int regno, + struct bpf_call_arg_meta *meta) +{ + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + bool is_const = tnum_is_const(reg->var_off); + struct bpf_map *map = reg->map_ptr; + u64 val = reg->var_off.value; + + if (!is_const) { + verbose(env, + "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", + regno); + return -EINVAL; + } + if (!map->btf) { + verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", + map->name); + return -EINVAL; + } + if (!map_value_has_timer(map)) { + if (map->timer_off == -E2BIG) + verbose(env, + "map '%s' has more than one 'struct bpf_timer'\n", + map->name); + else if (map->timer_off == -ENOENT) + verbose(env, + "map '%s' doesn't have 'struct bpf_timer'\n", + map->name); + else + verbose(env, + "map '%s' is not a struct type or bpf_timer is mangled\n", + map->name); + return -EINVAL; + } + if (map->timer_off != val + reg->off) { + verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", + val + reg->off, map->timer_off); + return -EINVAL; + } + if (meta->map_ptr) { + verbose(env, "verifier bug. Two map pointers in a timer helper\n"); + return -EFAULT; + } + meta->map_uid = reg->map_uid; + meta->map_ptr = map; + return 0; +} + static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || @@ -3207,6 +3802,17 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type) type == ARG_CONST_SIZE_OR_ZERO; } +static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type) +{ + return type == ARG_PTR_TO_ALLOC_MEM || + type == ARG_PTR_TO_ALLOC_MEM_OR_NULL; +} + +static bool arg_type_is_alloc_size(enum bpf_arg_type type) +{ + return type == ARG_CONST_ALLOC_SIZE_OR_ZERO; +} + static bool arg_type_is_int_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_INT || @@ -3223,12 +3829,14 @@ static int int_ptr_type_to_size(enum bpf_arg_type type) return -EINVAL; } -static int check_func_arg(struct bpf_verifier_env *env, u32 regno, - enum bpf_arg_type arg_type, - struct bpf_call_arg_meta *meta) +static int check_func_arg(struct bpf_verifier_env *env, u32 arg, + struct bpf_call_arg_meta *meta, + const struct bpf_func_proto *fn) { + u32 regno = BPF_REG_1 + arg; struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; enum bpf_reg_type expected_type, type = reg->type; + enum bpf_arg_type arg_type = fn->arg_type[arg]; int err = 0; if (arg_type == ARG_DONTCARE) @@ -3263,10 +3871,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && + type != PTR_TO_MAP_KEY && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || - arg_type == ARG_CONST_SIZE_OR_ZERO) { + arg_type == ARG_CONST_SIZE_OR_ZERO || + arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; @@ -3274,13 +3884,17 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; - } else if (arg_type == ARG_PTR_TO_CTX) { + } else if (arg_type == ARG_PTR_TO_CTX || + arg_type == ARG_PTR_TO_CTX_OR_NULL) { expected_type = PTR_TO_CTX; - if (type != expected_type) - goto err_type; - err = check_ctx_reg(env, reg, regno); - if (err < 0) - return err; + if (!(register_is_null(reg) && + arg_type == ARG_PTR_TO_CTX_OR_NULL)) { + if (type != expected_type) + goto err_type; + err = check_ctx_reg(env, reg, regno); + if (err < 0) + return err; + } } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { expected_type = PTR_TO_SOCK_COMMON; /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ @@ -3295,10 +3909,37 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, } meta->ref_obj_id = reg->ref_obj_id; } - } else if (arg_type == ARG_PTR_TO_SOCKET) { + } else if (arg_type == ARG_PTR_TO_SOCKET || + arg_type == ARG_PTR_TO_SOCKET_OR_NULL) { expected_type = PTR_TO_SOCKET; + if (!(register_is_null(reg) && + arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) { + if (type != expected_type) + goto err_type; + } + } else if (arg_type == ARG_PTR_TO_BTF_ID) { + expected_type = PTR_TO_BTF_ID; if (type != expected_type) goto err_type; + if (!fn->check_btf_id) { + if (reg->btf_id != meta->btf_id) { + verbose(env, "Helper has type %s got %s in R%d\n", + kernel_type_name(meta->btf_id), + kernel_type_name(reg->btf_id), regno); + + return -EACCES; + } + } else if (!fn->check_btf_id(reg->btf_id, arg)) { + verbose(env, "Helper does not support %s in R%d\n", + kernel_type_name(reg->btf_id), regno); + + return -EACCES; + } + if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) { + verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", + regno); + return -EACCES; + } } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { if (meta->func_id == BPF_FUNC_spin_lock) { if (process_spin_lock(env, regno, true)) @@ -3317,19 +3958,49 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, * happens during stack boundary checking. */ if (register_is_null(reg) && - arg_type == ARG_PTR_TO_MEM_OR_NULL) + (arg_type == ARG_PTR_TO_MEM_OR_NULL || + arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && + type != PTR_TO_RDONLY_BUF && + type != PTR_TO_RDWR_BUF && + type != PTR_TO_MEM && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; + } else if (arg_type_is_alloc_mem_ptr(arg_type)) { + expected_type = PTR_TO_MEM; + if (register_is_null(reg) && + arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL) + /* final test in check_stack_boundary() */; + else if (type != expected_type) + goto err_type; + if (meta->ref_obj_id) { + verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", + regno, reg->ref_obj_id, + meta->ref_obj_id); + return -EFAULT; + } + meta->ref_obj_id = reg->ref_obj_id; } else if (arg_type_is_int_ptr(arg_type)) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; + } else if (arg_type == ARG_PTR_TO_FUNC) { + expected_type = PTR_TO_FUNC; + if (type != expected_type) + goto err_type; + } else if (arg_type == ARG_PTR_TO_STACK_OR_NULL) { + expected_type = PTR_TO_STACK; + if (type != expected_type) + goto err_type; + } else if (arg_type == ARG_PTR_TO_TIMER) { + expected_type = PTR_TO_MAP_VALUE; + if (type != expected_type) + goto err_type; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; @@ -3337,7 +4008,34 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ + if (meta->map_ptr) { + /* Use map_uid (which is unique id of inner map) to reject: + * inner_map1 = bpf_map_lookup_elem(outer_map, key1) + * inner_map2 = bpf_map_lookup_elem(outer_map, key2) + * if (inner_map1 && inner_map2) { + * timer = bpf_map_lookup_elem(inner_map1); + * if (timer) + * // mismatch would have been allowed + * bpf_timer_init(timer, inner_map2); + * } + * + * Comparing map_ptr is enough to distinguish normal and outer maps. + */ + if (meta->map_ptr != reg->map_ptr || + meta->map_uid != reg->map_uid) { + verbose(env, + "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", + meta->map_uid, reg->map_uid); + return -EINVAL; + } + } meta->map_ptr = reg->map_ptr; + meta->map_uid = reg->map_uid; + } else if (arg_type == ARG_PTR_TO_FUNC) { + meta->subprogno = reg->subprogno; + } else if (arg_type == ARG_PTR_TO_TIMER) { + if (process_timer_func(env, regno, meta)) + return -EACCES; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within @@ -3377,8 +4075,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, /* remember the mem_size which may be used later * to refine return values. */ - meta->msize_smax_value = reg->smax_value; - meta->msize_umax_value = reg->umax_value; + meta->msize_max_value = reg->umax_value; /* The register is SCALAR_VALUE; the access check * happens using its boundaries. @@ -3415,6 +4112,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, zero_size_allowed, meta); if (!err) err = mark_chain_precision(env, regno); + } else if (arg_type_is_alloc_size(arg_type)) { + if (!tnum_is_const(reg->var_off)) { + verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n", + regno); + return -EACCES; + } + meta->mem_size = reg->var_off.value; } else if (arg_type_is_int_ptr(arg_type)) { int size = int_ptr_type_to_size(arg_type); @@ -3446,7 +4150,9 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && - func_id != BPF_FUNC_perf_event_read_value) + func_id != BPF_FUNC_skb_output && + func_id != BPF_FUNC_perf_event_read_value && + func_id != BPF_FUNC_xdp_output) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: @@ -3490,14 +4196,18 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem && - func_id != BPF_FUNC_msg_redirect_map) + func_id != BPF_FUNC_msg_redirect_map && + func_id != BPF_FUNC_sk_select_reuseport && + func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKHASH: if (func_id != BPF_FUNC_sk_redirect_hash && func_id != BPF_FUNC_sock_hash_update && func_id != BPF_FUNC_map_delete_elem && - func_id != BPF_FUNC_msg_redirect_hash) + func_id != BPF_FUNC_msg_redirect_hash && + func_id != BPF_FUNC_sk_select_reuseport && + func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: @@ -3516,6 +4226,19 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, func_id != BPF_FUNC_sk_storage_delete) goto error; break; + case BPF_MAP_TYPE_INODE_STORAGE: + if (func_id != BPF_FUNC_inode_storage_get && + func_id != BPF_FUNC_inode_storage_delete) + goto error; + break; + case BPF_MAP_TYPE_RINGBUF: + if (func_id != BPF_FUNC_ringbuf_output && + func_id != BPF_FUNC_ringbuf_reserve && + func_id != BPF_FUNC_ringbuf_submit && + func_id != BPF_FUNC_ringbuf_discard && + func_id != BPF_FUNC_ringbuf_query) + goto error; + break; default: break; } @@ -3533,6 +4256,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: + case BPF_FUNC_skb_output: + case BPF_FUNC_xdp_output: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; @@ -3570,7 +4295,9 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, goto error; break; case BPF_FUNC_sk_select_reuseport: - if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) + if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && + map->map_type != BPF_MAP_TYPE_SOCKMAP && + map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; case BPF_FUNC_map_peek_elem: @@ -3585,6 +4312,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) goto error; break; + case BPF_FUNC_inode_storage_get: + case BPF_FUNC_inode_storage_delete: + if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) + goto error; + break; default: break; } @@ -3663,7 +4395,7 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) /* A reference acquiring function cannot acquire * another refcounted ptr. */ - if (is_acquire_function(func_id) && count) + if (may_be_acquire_function(func_id) && count) return false; /* We only support one arg being unreferenced at the moment, @@ -3748,12 +4480,32 @@ static int release_reference(struct bpf_verifier_env *env, return 0; } -static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, - int *insn_idx) +static void clear_caller_saved_regs(struct bpf_verifier_env *env, + struct bpf_reg_state *regs) +{ + int i; + + /* after the call registers r0 - r5 were scratched */ + for (i = 0; i < CALLER_SAVED_REGS; i++) { + mark_reg_not_init(env, regs, caller_saved[i]); + check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); + } +} + +typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx); + +static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int *insn_idx, int subprog, + set_callee_state_fn set_callee_state_cb) { struct bpf_verifier_state *state = env->cur_state; + struct bpf_func_info_aux *func_info_aux; struct bpf_func_state *caller, *callee; - int i, err, subprog, target_insn; + int err; + bool is_global = false; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", @@ -3761,14 +4513,6 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -E2BIG; } - target_insn = *insn_idx + insn->imm; - subprog = find_subprog(env, target_insn + 1); - if (subprog < 0) { - verbose(env, "verifier bug. No program starts at insn %d\n", - target_insn + 1); - return -EFAULT; - } - caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", @@ -3776,6 +4520,57 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EFAULT; } + func_info_aux = env->prog->aux->func_info_aux; + if (func_info_aux) + is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; + err = btf_check_func_arg_match(env, subprog, caller->regs); + if (err == -EFAULT) + return err; + if (is_global) { + if (err) { + verbose(env, "Caller passes invalid args into func#%d\n", + subprog); + return err; + } else { + if (env->log.level & BPF_LOG_LEVEL) + verbose(env, + "Func#%d is global and valid. Skipping.\n", + subprog); + clear_caller_saved_regs(env, caller->regs); + + /* All global functions return SCALAR_VALUE */ + mark_reg_unknown(env, caller->regs, BPF_REG_0); + + /* continue with next insn after call */ + return 0; + } + } + + if (insn->code == (BPF_JMP | BPF_CALL) && + insn->imm == BPF_FUNC_timer_set_callback) { + struct bpf_verifier_state *async_cb; + + /* there is no real recursion here. timer callbacks are async */ + env->subprog_info[subprog].is_async_cb = true; + async_cb = push_async_cb(env, env->subprog_info[subprog].start, + *insn_idx, subprog); + if (!async_cb) + return -EFAULT; + callee = async_cb->frame[0]; + callee->async_entry_cnt = caller->async_entry_cnt + 1; + + /* Convert bpf_timer_set_callback() args into timer callback args */ + err = set_callee_state_cb(env, caller, callee, *insn_idx); + if (err) + return err; + + clear_caller_saved_regs(env, caller->regs); + mark_reg_unknown(env, caller->regs, BPF_REG_0); + caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; + /* continue with next insn after call */ + return 0; + } + callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; @@ -3796,23 +4591,17 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (err) return err; - /* copy r1 - r5 args that callee can access. The copy includes parent - * pointers, which connects us up to the liveness chain - */ - for (i = BPF_REG_1; i <= BPF_REG_5; i++) - callee->regs[i] = caller->regs[i]; + err = set_callee_state_cb(env, caller, callee, *insn_idx); + if (err) + return err; - /* after the call registers r0 - r5 were scratched */ - for (i = 0; i < CALLER_SAVED_REGS; i++) { - mark_reg_not_init(env, caller->regs, caller_saved[i]); - check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); - } + clear_caller_saved_regs(env, caller->regs); /* only increment it after check_reg_arg() finished */ state->curframe++; /* and go analyze first insn of the callee */ - *insn_idx = target_insn; + *insn_idx = env->subprog_info[subprog].start - 1; if (env->log.level & BPF_LOG_LEVEL) { verbose(env, "caller:\n"); @@ -3823,6 +4612,121 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return 0; } +int map_set_for_each_callback_args(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee) +{ + /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, + * void *callback_ctx, u64 flags); + * callback_fn(struct bpf_map *map, void *key, void *value, + * void *callback_ctx); + */ + callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; + + callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; + __mark_reg_known_zero(&callee->regs[BPF_REG_2]); + callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; + + callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; + __mark_reg_known_zero(&callee->regs[BPF_REG_3]); + callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; + + /* pointer to stack or null */ + callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; + + /* unused */ + __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); + return 0; +} + +static int set_callee_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, int insn_idx) +{ + int i; + + /* copy r1 - r5 args that callee can access. The copy includes parent + * pointers, which connects us up to the liveness chain + */ + for (i = BPF_REG_1; i <= BPF_REG_5; i++) + callee->regs[i] = caller->regs[i]; + return 0; +} + +static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int *insn_idx) +{ + int subprog, target_insn; + + target_insn = *insn_idx + insn->imm + 1; + subprog = find_subprog(env, target_insn); + if (subprog < 0) { + verbose(env, "verifier bug. No program starts at insn %d\n", + target_insn); + return -EFAULT; + } + + return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); +} + +static int set_map_elem_callback_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx) +{ + struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; + struct bpf_map *map; + int err; + + if (bpf_map_ptr_poisoned(insn_aux)) { + verbose(env, "tail_call abusing map_ptr\n"); + return -EINVAL; + } + + map = BPF_MAP_PTR(insn_aux->map_ptr_state); + if (!map->ops->map_set_for_each_callback_args || + !map->ops->map_for_each_callback) { + verbose(env, "callback function not allowed for map\n"); + return -ENOTSUPP; + } + + err = map->ops->map_set_for_each_callback_args(env, caller, callee); + if (err) + return err; + + callee->in_callback_fn = true; + return 0; +} + +static int set_timer_callback_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx) +{ + struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; + + /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); + * callback_fn(struct bpf_map *map, void *key, void *value); + */ + callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; + __mark_reg_known_zero(&callee->regs[BPF_REG_1]); + callee->regs[BPF_REG_1].map_ptr = map_ptr; + + callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; + __mark_reg_known_zero(&callee->regs[BPF_REG_2]); + callee->regs[BPF_REG_2].map_ptr = map_ptr; + + callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; + __mark_reg_known_zero(&callee->regs[BPF_REG_3]); + callee->regs[BPF_REG_3].map_ptr = map_ptr; + + /* unused */ + __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); + __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); + callee->in_async_callback_fn = true; + return 0; +} + static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; @@ -3845,8 +4749,22 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) state->curframe--; caller = state->frame[state->curframe]; - /* return to the caller whatever r0 had in the callee */ - caller->regs[BPF_REG_0] = *r0; + if (callee->in_callback_fn) { + /* enforce R0 return value range [0, 1]. */ + struct tnum range = tnum_range(0, 1); + + if (r0->type != SCALAR_VALUE) { + verbose(env, "R0 not a scalar value\n"); + return -EACCES; + } + if (!tnum_in(range, r0->var_off)) { + verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); + return -EINVAL; + } + } else { + /* return to the caller whatever r0 had in the callee */ + caller->regs[BPF_REG_0] = *r0; + } /* Transfer references to the caller */ err = transfer_reference_state(caller, callee); @@ -3866,21 +4784,44 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) return 0; } -static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, - int func_id, - struct bpf_call_arg_meta *meta) +static int do_refine_retval_range(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, int ret_type, + int func_id, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; + struct bpf_reg_state tmp_reg = *ret_reg; + bool ret; if (ret_type != RET_INTEGER || (func_id != BPF_FUNC_get_stack && func_id != BPF_FUNC_probe_read_str)) - return; + return 0; + + /* Error case where ret is in interval [S32MIN, -1]. */ + ret_reg->smin_value = S32_MIN; + ret_reg->smax_value = -1; - ret_reg->smax_value = meta->msize_smax_value; - ret_reg->umax_value = meta->msize_umax_value; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); + __update_reg_bounds(ret_reg); + + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false); + if (!ret) + return -EFAULT; + + *ret_reg = tmp_reg; + + /* Success case where ret is in range [0, msize_max_value]. */ + ret_reg->smin_value = 0; + ret_reg->smax_value = meta->msize_max_value; + ret_reg->umin_value = ret_reg->smin_value; + ret_reg->umax_value = ret_reg->smax_value; + + __reg_deduce_bounds(ret_reg); + __reg_bound_offset(ret_reg); + __update_reg_bounds(ret_reg); + + return 0; } static int @@ -3896,7 +4837,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_map_push_elem && func_id != BPF_FUNC_map_pop_elem && - func_id != BPF_FUNC_map_peek_elem) + func_id != BPF_FUNC_map_peek_elem && + func_id != BPF_FUNC_for_each_map_elem) return 0; if (map == NULL) { @@ -3917,12 +4859,51 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, return -EACCES; } - if (!BPF_MAP_PTR(aux->map_state)) + if (!BPF_MAP_PTR(aux->map_ptr_state)) bpf_map_ptr_store(aux, meta->map_ptr, - meta->map_ptr->unpriv_array); - else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) + !meta->map_ptr->bypass_spec_v1); + else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, - meta->map_ptr->unpriv_array); + !meta->map_ptr->bypass_spec_v1); + return 0; +} + +static int +record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, + int func_id, int insn_idx) +{ + struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; + struct bpf_reg_state *regs = cur_regs(env), *reg; + struct bpf_map *map = meta->map_ptr; + struct tnum range; + u64 val; + int err; + + if (func_id != BPF_FUNC_tail_call) + return 0; + if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { + verbose(env, "kernel subsystem misconfigured verifier\n"); + return -EINVAL; + } + + range = tnum_range(0, map->max_entries - 1); + reg = ®s[BPF_REG_3]; + + if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { + bpf_map_key_store(aux, BPF_MAP_KEY_POISON); + return 0; + } + + err = mark_chain_precision(env, BPF_REG_3); + if (err) + return err; + + val = reg->var_off.value; + if (bpf_map_key_unseen(aux)) + bpf_map_key_store(aux, val); + else if (!bpf_map_key_poisoned(aux) && + bpf_map_key_immediate(aux) != val) + bpf_map_key_store(aux, BPF_MAP_KEY_POISON); return 0; } @@ -3938,15 +4919,18 @@ static int check_reference_leak(struct bpf_verifier_env *env) return state->acquired_refs ? -EINVAL : 0; } -static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) +static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int *insn_idx_p) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; + int insn_idx = *insn_idx_p; bool changes_data; - int i, err; + int i, err, func_id; /* find function prototype */ + func_id = insn->imm; if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); @@ -3987,23 +4971,22 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn meta.func_id = func_id; /* check args */ - err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); - if (err) - return err; - err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); - if (err) - return err; - err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); - if (err) - return err; - err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); - if (err) - return err; - err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); + for (i = 0; i < 5; i++) { + if (!fn->check_btf_id) { + err = btf_resolve_helper_id(&env->log, fn, i); + if (err > 0) + meta.btf_id = err; + } + err = check_func_arg(env, i, &meta, fn); + if (err) + return err; + } + + err = record_func_map(env, &meta, func_id, insn_idx); if (err) return err; - err = record_func_map(env, &meta, func_id, insn_idx); + err = record_func_key(env, &meta, func_id, insn_idx); if (err) return err; @@ -4043,6 +5026,20 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return -EINVAL; } + if (func_id == BPF_FUNC_for_each_map_elem) { + err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, + set_map_elem_callback_state); + if (err < 0) + return -EINVAL; + } + + if (func_id == BPF_FUNC_timer_set_callback) { + err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, + set_timer_callback_state); + if (err < 0) + return -EINVAL; + } + /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); @@ -4072,6 +5069,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; + regs[BPF_REG_0].map_uid = meta.map_uid; if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; if (map_value_has_spin_lock(meta.map_ptr)) @@ -4092,6 +5090,23 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; + } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { + int ret_btf_id; + + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL; + ret_btf_id = *fn->ret_btf_id; + if (ret_btf_id == 0) { + verbose(env, "invalid return type %d of func %s#%d\n", + fn->ret_type, func_id_name(func_id), func_id); + return -EINVAL; + } + regs[BPF_REG_0].btf_id = ret_btf_id; + } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; + regs[BPF_REG_0].id = ++env->id_gen; + regs[BPF_REG_0].mem_size = meta.mem_size; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); @@ -4101,7 +5116,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn if (is_ptr_cast_function(func_id)) { /* For release_reference() */ regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; - } else if (is_acquire_function(func_id)) { + } else if (is_acquire_function(func_id, meta.map_ptr)) { int id = acquire_reference_state(env, insn_idx); if (id < 0) @@ -4112,13 +5127,17 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].ref_obj_id = id; } - do_refine_retval_range(regs, fn->ret_type, func_id, &meta); + err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); + if (err) + return err; err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; - if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { + if ((func_id == BPF_FUNC_get_stack || + func_id == BPF_FUNC_get_task_stack) && + !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS @@ -4201,41 +5220,62 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) return &env->insn_aux_data[env->insn_idx]; } +enum { + REASON_BOUNDS = -1, + REASON_TYPE = -2, + REASON_PATHS = -3, + REASON_LIMIT = -4, + REASON_STACK = -5, +}; + static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, - u32 *ptr_limit, u8 opcode, bool off_is_neg) + u32 *alu_limit, bool mask_to_left) { - bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || - (opcode == BPF_SUB && !off_is_neg); - u32 off; + u32 max = 0, ptr_limit = 0, off; switch (ptr_reg->type) { case PTR_TO_STACK: - /* Indirect variable offset stack access is prohibited in - * unprivileged mode so it's not handled here. + /* Offset 0 is out-of-bounds, but acceptable start for the + * left direction, see BPF_REG_FP. Also, unknown scalar + * offset where we would need to deal with min/max bounds is + * currently prohibited for unprivileged. */ - off = ptr_reg->off + ptr_reg->var_off.value; - if (mask_to_left) - *ptr_limit = MAX_BPF_STACK + off; - else - *ptr_limit = -off; - return 0; + max = MAX_BPF_STACK + mask_to_left; + ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); + break; case PTR_TO_MAP_VALUE: + max = ptr_reg->map_ptr->value_size; + ptr_limit = (mask_to_left ? + ptr_reg->smin_value : + ptr_reg->umax_value) + ptr_reg->off; + break; + case PTR_TO_MAP_KEY: + /* Currently, this code is not exercised as the only use + * is bpf_for_each_map_elem() helper which requires + * bpf_capble. The code has been tested manually for + * future use. + */ if (mask_to_left) { - *ptr_limit = ptr_reg->umax_value + ptr_reg->off; + ptr_limit = ptr_reg->umax_value + ptr_reg->off; } else { off = ptr_reg->smin_value + ptr_reg->off; - *ptr_limit = ptr_reg->map_ptr->value_size - off; + ptr_limit = ptr_reg->map_ptr->key_size - off; } return 0; default: - return -EINVAL; + return REASON_TYPE; } + + if (ptr_limit >= max) + return REASON_LIMIT; + *alu_limit = ptr_limit; + return 0; } static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, const struct bpf_insn *insn) { - return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; + return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; } static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, @@ -4247,7 +5287,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, if (aux->alu_state && (aux->alu_state != alu_state || aux->alu_limit != alu_limit)) - return -EACCES; + return REASON_PATHS; /* Corresponding fixup done in fixup_bpf_calls(). */ aux->alu_state = alu_state; @@ -4266,19 +5306,55 @@ static int sanitize_val_alu(struct bpf_verifier_env *env, return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); } +static bool sanitize_needed(u8 opcode) +{ + return opcode == BPF_ADD || opcode == BPF_SUB; +} + +struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; + bool mask_to_left; +}; + +static struct bpf_verifier_state * +sanitize_speculative_path(struct bpf_verifier_env *env, + const struct bpf_insn *insn, + u32 next_idx, u32 curr_idx) +{ + struct bpf_verifier_state *branch; + struct bpf_reg_state *regs; + + branch = push_stack(env, next_idx, curr_idx, true); + if (branch && insn) { + regs = branch->frame[branch->curframe]->regs; + if (BPF_SRC(insn->code) == BPF_K) { + mark_reg_unknown(env, regs, insn->dst_reg); + } else if (BPF_SRC(insn->code) == BPF_X) { + mark_reg_unknown(env, regs, insn->dst_reg); + mark_reg_unknown(env, regs, insn->src_reg); + } + } + return branch; +} + static int sanitize_ptr_alu(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, + const struct bpf_reg_state *off_reg, struct bpf_reg_state *dst_reg, - bool off_is_neg) + struct bpf_sanitize_info *info, + const bool commit_window) { + struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; struct bpf_verifier_state *vstate = env->cur_state; - struct bpf_insn_aux_data *aux = cur_aux(env); + bool off_is_imm = tnum_is_const(off_reg->var_off); + bool off_is_neg = off_reg->smin_value < 0; bool ptr_is_dst_reg = ptr_reg == dst_reg; u8 opcode = BPF_OP(insn->code); u32 alu_state, alu_limit; struct bpf_reg_state tmp; bool ret; + int err; if (can_skip_alu_sanitation(env, insn)) return 0; @@ -4290,15 +5366,53 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, if (vstate->speculative) goto do_sim; - alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; - alu_state |= ptr_is_dst_reg ? - BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; + if (!commit_window) { + if (!tnum_is_const(off_reg->var_off) && + (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) + return REASON_BOUNDS; - if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) - return 0; - if (update_alu_sanitation_state(aux, alu_state, alu_limit)) - return -EACCES; + info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || + (opcode == BPF_SUB && !off_is_neg); + } + + err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); + if (err < 0) + return err; + + if (commit_window) { + /* In commit phase we narrow the masking window based on + * the observed pointer move after the simulated operation. + */ + alu_state = info->aux.alu_state; + alu_limit = abs(info->aux.alu_limit - alu_limit); + } else { + alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; + alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; + alu_state |= ptr_is_dst_reg ? + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; + + /* Limit pruning on unknown scalars to enable deep search for + * potential masking differences from other program paths. + */ + if (!off_is_imm) + env->explore_alu_limits = true; + } + + err = update_alu_sanitation_state(aux, alu_state, alu_limit); + if (err < 0) + return err; do_sim: + /* If we're in commit phase, we're done here given we already + * pushed the truncated dst_reg into the speculative verification + * stack. + * + * Also, when register is a known constant, we rewrite register-based + * operation to immediate-based, and thus do not need masking (and as + * a consequence, do not need to simulate the zero-truncation either). + */ + if (commit_window || off_is_imm) + return 0; + /* Simulate and find potential out-of-bounds access under * speculative execution from truncation as a result of * masking when off was not within expected range. If off @@ -4312,10 +5426,98 @@ do_sim: tmp = *dst_reg; *dst_reg = *ptr_reg; } - ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); + ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, + env->insn_idx); if (!ptr_is_dst_reg && ret) *dst_reg = tmp; - return !ret ? -EFAULT : 0; + return !ret ? REASON_STACK : 0; +} + +static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) +{ + struct bpf_verifier_state *vstate = env->cur_state; + + /* If we simulate paths under speculation, we don't update the + * insn as 'seen' such that when we verify unreachable paths in + * the non-speculative domain, sanitize_dead_code() can still + * rewrite/sanitize them. + */ + if (!vstate->speculative) + env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; +} + +static int sanitize_err(struct bpf_verifier_env *env, + const struct bpf_insn *insn, int reason, + const struct bpf_reg_state *off_reg, + const struct bpf_reg_state *dst_reg) +{ + static const char *err = "pointer arithmetic with it prohibited for !root"; + const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; + u32 dst = insn->dst_reg, src = insn->src_reg; + + switch (reason) { + case REASON_BOUNDS: + verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", + off_reg == dst_reg ? dst : src, err); + break; + case REASON_TYPE: + verbose(env, "R%d has pointer with unsupported alu operation, %s\n", + off_reg == dst_reg ? src : dst, err); + break; + case REASON_PATHS: + verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", + dst, op, err); + break; + case REASON_LIMIT: + verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", + dst, op, err); + break; + case REASON_STACK: + verbose(env, "R%d could not be pushed for speculative verification, %s\n", + dst, err); + break; + default: + verbose(env, "verifier internal error: unknown reason (%d)\n", + reason); + break; + } + + return -EACCES; +} + +static int sanitize_check_bounds(struct bpf_verifier_env *env, + const struct bpf_insn *insn, + const struct bpf_reg_state *dst_reg) +{ + u32 dst = insn->dst_reg; + + /* For unprivileged we require that resulting offset must be in bounds + * in order to be able to sanitize access later on. + */ + if (env->bypass_spec_v1) + return 0; + + switch (dst_reg->type) { + case PTR_TO_STACK: + if (check_stack_access(env, dst_reg, dst_reg->off + + dst_reg->var_off.value, 1)) { + verbose(env, "R%d stack pointer arithmetic goes out of range, " + "prohibited for !root\n", dst); + return -EACCES; + } + break; + case PTR_TO_MAP_VALUE: + if (check_map_access(env, dst, dst_reg->off, 1, false)) { + verbose(env, "R%d pointer arithmetic of map value goes out of range, " + "prohibited for !root\n", dst); + return -EACCES; + } + break; + default: + break; + } + + return 0; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. @@ -4336,8 +5538,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; - u32 dst = insn->dst_reg, src = insn->src_reg; + struct bpf_sanitize_info info = {}; u8 opcode = BPF_OP(insn->code); + u32 dst = insn->dst_reg; int ret; dst_reg = ®s[dst]; @@ -4365,6 +5568,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst, reg_type_str[ptr_reg->type]); return -EACCES; case CONST_PTR_TO_MAP: + /* smin_val represents the known value */ + if (known && smin_val == 0 && opcode == BPF_ADD) + break; + /* fall-through */ case PTR_TO_PACKET_END: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: @@ -4376,13 +5583,14 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; + case PTR_TO_MAP_KEY: case PTR_TO_MAP_VALUE: if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", - off_reg == dst_reg ? dst : src); + dst); return -EACCES; } - /* fall-through */ + fallthrough; default: break; } @@ -4397,13 +5605,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) return -EINVAL; + if (sanitize_needed(opcode)) { + ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, + &info, false); + if (ret < 0) + return sanitize_err(env, insn, ret, off_reg, dst_reg); + } + switch (opcode) { case BPF_ADD: - ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); - if (ret < 0) { - verbose(env, "R%d tried to add from different maps or paths\n", dst); - return ret; - } /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ @@ -4454,11 +5664,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } break; case BPF_SUB: - ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); - if (ret < 0) { - verbose(env, "R%d tried to sub from different maps or paths\n", dst); - return ret; - } if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ verbose(env, "R%d tried to subtract pointer from scalar\n", @@ -4539,22 +5744,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); - /* For unprivileged we require that resulting offset must be in bounds - * in order to be able to sanitize access later on. - */ - if (!env->allow_ptr_leaks) { - if (dst_reg->type == PTR_TO_MAP_VALUE && - check_map_access(env, dst, dst_reg->off, 1, false)) { - verbose(env, "R%d pointer arithmetic of map value goes out of range, " - "prohibited for !root\n", dst); - return -EACCES; - } else if (dst_reg->type == PTR_TO_STACK && - check_stack_access(env, dst_reg, dst_reg->off + - dst_reg->var_off.value, 1)) { - verbose(env, "R%d stack pointer arithmetic goes out of range, " - "prohibited for !root\n", dst); - return -EACCES; - } + if (sanitize_check_bounds(env, insn, dst_reg) < 0) + return -EACCES; + if (sanitize_needed(opcode)) { + ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, + &info, true); + if (ret < 0) + return sanitize_err(env, insn, ret, off_reg, dst_reg); } return 0; @@ -4575,7 +5771,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; - u32 dst = insn->dst_reg; int ret; if (insn_bitness == 32) { @@ -4609,13 +5804,14 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, return 0; } + if (sanitize_needed(opcode)) { + ret = sanitize_val_alu(env, insn); + if (ret < 0) + return sanitize_err(env, insn, ret, NULL, NULL); + } + switch (opcode) { case BPF_ADD: - ret = sanitize_val_alu(env, insn); - if (ret < 0) { - verbose(env, "R%d tried to add from different pointers or scalars\n", dst); - return ret; - } if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; @@ -4635,11 +5831,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: - ret = sanitize_val_alu(env, insn); - if (ret < 0) { - verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); - return ret; - } if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ @@ -5196,8 +6387,25 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, struct bpf_reg_state reg_lo; s64 sval; - if (__is_pointer_value(false, reg)) - return -1; + if (__is_pointer_value(false, reg)) { + if (!reg_type_not_null(reg->type)) + return -1; + + /* If pointer is valid tests against zero will fail so we can + * use this to direct branch taken. + */ + if (val != 0) + return -1; + + switch (opcode) { + case BPF_JEQ: + return 0; + case BPF_JNE: + return 1; + default: + return -1; + } + } if (is_jmp32) { reg_lo = *reg; @@ -5325,6 +6533,70 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) reg->smax_value <= 0 && reg->smin_value >= S32_MIN); } +/* Constrain the possible values of @reg with unsigned upper bound @bound. + * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive. + * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits + * of @reg. + */ +static void set_upper_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32, + bool is_exclusive) +{ + if (is_exclusive) { + /* There are no values for `reg` that make `reg<0` true. */ + if (bound == 0) + return; + bound--; + } + if (is_jmp32) { + /* Constrain the register's value in the tnum representation. + * For 64-bit comparisons this happens later in + * __reg_bound_offset(), but for 32-bit comparisons, we can be + * more precise than what can be derived from the updated + * numeric bounds. + */ + struct tnum t = tnum_range(0, bound); + + t.mask |= ~0xffffffffULL; /* upper half is unknown */ + reg->var_off = tnum_intersect(reg->var_off, t); + + /* Compute the 64-bit bound from the 32-bit bound. */ + bound += gen_hi_max(reg->var_off); + } + reg->umax_value = min(reg->umax_value, bound); +} + +/* Constrain the possible values of @reg with unsigned lower bound @bound. + * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive. + * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits + * of @reg. + */ +static void set_lower_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32, + bool is_exclusive) +{ + if (is_exclusive) { + /* There are no values for `reg` that make `reg>MAX` true. */ + if (bound == (is_jmp32 ? U32_MAX : U64_MAX)) + return; + bound++; + } + if (is_jmp32) { + /* Constrain the register's value in the tnum representation. + * For 64-bit comparisons this happens later in + * __reg_bound_offset(), but for 32-bit comparisons, we can be + * more precise than what can be derived from the updated + * numeric bounds. + */ + struct tnum t = tnum_range(bound, U32_MAX); + + t.mask |= ~0xffffffffULL; /* upper half is unknown */ + reg->var_off = tnum_intersect(reg->var_off, t); + + /* Compute the 64-bit bound from the 32-bit bound. */ + bound += gen_hi_min(reg->var_off); + } + reg->umin_value = max(reg->umin_value, bound); +} + /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. @@ -5380,15 +6652,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, case BPF_JGE: case BPF_JGT: { - u64 false_umax = opcode == BPF_JGT ? val : val - 1; - u64 true_umin = opcode == BPF_JGT ? val + 1 : val; - - if (is_jmp32) { - false_umax += gen_hi_max(false_reg->var_off); - true_umin += gen_hi_min(true_reg->var_off); - } - false_reg->umax_value = min(false_reg->umax_value, false_umax); - true_reg->umin_value = max(true_reg->umin_value, true_umin); + set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JGE); + set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JGT); break; } case BPF_JSGE: @@ -5409,15 +6674,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, case BPF_JLE: case BPF_JLT: { - u64 false_umin = opcode == BPF_JLT ? val : val + 1; - u64 true_umax = opcode == BPF_JLT ? val - 1 : val; - - if (is_jmp32) { - false_umin += gen_hi_min(false_reg->var_off); - true_umax += gen_hi_max(true_reg->var_off); - } - false_reg->umin_value = max(false_reg->umin_value, false_umin); - true_reg->umax_value = min(true_reg->umax_value, true_umax); + set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JLE); + set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JLT); break; } case BPF_JSLE: @@ -5492,15 +6750,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, case BPF_JGE: case BPF_JGT: { - u64 false_umin = opcode == BPF_JGT ? val : val + 1; - u64 true_umax = opcode == BPF_JGT ? val - 1 : val; - - if (is_jmp32) { - false_umin += gen_hi_min(false_reg->var_off); - true_umax += gen_hi_max(true_reg->var_off); - } - false_reg->umin_value = max(false_reg->umin_value, false_umin); - true_reg->umax_value = min(true_reg->umax_value, true_umax); + set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JGE); + set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JGT); break; } case BPF_JSGE: @@ -5518,15 +6769,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, case BPF_JLE: case BPF_JLT: { - u64 false_umax = opcode == BPF_JLT ? val : val - 1; - u64 true_umin = opcode == BPF_JLT ? val + 1 : val; - - if (is_jmp32) { - false_umax += gen_hi_max(false_reg->var_off); - true_umin += gen_hi_min(true_reg->var_off); - } - false_reg->umax_value = min(false_reg->umax_value, false_umax); - true_reg->umin_value = max(true_reg->umin_value, true_umin); + set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JLE); + set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JLT); break; } case BPF_JSLE: @@ -5623,12 +6867,20 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { - if (reg->map_ptr->inner_map_meta) { + const struct bpf_map *map = reg->map_ptr; + + if (map->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; - reg->map_ptr = reg->map_ptr->inner_map_meta; - } else if (reg->map_ptr->map_type == - BPF_MAP_TYPE_XSKMAP) { + reg->map_ptr = map->inner_map_meta; + /* transfer reg's id which is unique for every map_lookup_elem + * as UID of the inner map. + */ + reg->map_uid = reg->id; + } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { reg->type = PTR_TO_XDP_SOCK; + } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || + map->map_type == BPF_MAP_TYPE_SOCKHASH) { + reg->type = PTR_TO_SOCKET; } else { reg->type = PTR_TO_MAP_VALUE; } @@ -5638,6 +6890,14 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, reg->type = PTR_TO_SOCK_COMMON; } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { reg->type = PTR_TO_TCP_SOCK; + } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) { + reg->type = PTR_TO_BTF_ID; + } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) { + reg->type = PTR_TO_RDONLY_BUF; + } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) { + reg->type = PTR_TO_RDWR_BUF; + } else if (reg->type == PTR_TO_MEM_OR_NULL) { + reg->type = PTR_TO_MEM; } if (is_null) { /* We don't need id and ref_obj_id from this point @@ -5852,20 +7112,38 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, pred = is_branch_taken(dst_reg, src_reg->var_off.value, opcode, is_jmp32); if (pred >= 0) { - err = mark_chain_precision(env, insn->dst_reg); + /* If we get here with a dst_reg pointer type it is because + * above is_branch_taken() special cased the 0 comparison. + */ + if (!__is_pointer_value(false, dst_reg)) + err = mark_chain_precision(env, insn->dst_reg); if (BPF_SRC(insn->code) == BPF_X && !err) err = mark_chain_precision(env, insn->src_reg); if (err) return err; } + if (pred == 1) { - /* only follow the goto, ignore fall-through */ + /* Only follow the goto, ignore fall-through. If needed, push + * the fall-through branch for simulation under speculative + * execution. + */ + if (!env->allow_ptr_leaks && + !sanitize_speculative_path(env, insn, *insn_idx + 1, + *insn_idx)) + return -EFAULT; *insn_idx += insn->off; return 0; } else if (pred == 0) { - /* only follow fall-through branch, since - * that's where the program will go + /* Only follow the fall-through branch, since that's where the + * program will go. If needed, push the goto branch for + * simulation under speculative execution. */ + if (!env->allow_ptr_leaks && + !sanitize_speculative_path(env, insn, + *insn_idx + insn->off + 1, + *insn_idx)) + return -EFAULT; return 0; } @@ -5954,6 +7232,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_insn_aux_data *aux = cur_aux(env); struct bpf_reg_state *regs = cur_regs(env); + struct bpf_reg_state *dst_reg; struct bpf_map *map; int err; @@ -5970,25 +7249,63 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) if (err) return err; + dst_reg = ®s[insn->dst_reg]; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; - regs[insn->dst_reg].type = SCALAR_VALUE; + dst_reg->type = SCALAR_VALUE; __mark_reg_known(®s[insn->dst_reg], imm); return 0; } + if (insn->src_reg == BPF_PSEUDO_BTF_ID) { + mark_reg_known_zero(env, regs, insn->dst_reg); + + dst_reg->type = aux->btf_var.reg_type; + switch (dst_reg->type) { + case PTR_TO_MEM: + dst_reg->mem_size = aux->btf_var.mem_size; + break; + case PTR_TO_BTF_ID: + dst_reg->btf_id = aux->btf_var.btf_id; + break; + default: + verbose(env, "bpf verifier is misconfigured\n"); + return -EFAULT; + } + return 0; + } + + if (insn->src_reg == BPF_PSEUDO_FUNC) { + struct bpf_prog_aux *aux = env->prog->aux; + u32 subprogno = find_subprog(env, + env->insn_idx + insn->imm + 1); + + if (!aux->func_info) { + verbose(env, "missing btf func_info\n"); + return -EINVAL; + } + if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { + verbose(env, "callback function not static\n"); + return -EINVAL; + } + + dst_reg->type = PTR_TO_FUNC; + dst_reg->subprogno = subprogno; + return 0; + } + map = env->used_maps[aux->map_index]; mark_reg_known_zero(env, regs, insn->dst_reg); - regs[insn->dst_reg].map_ptr = map; + dst_reg->map_ptr = map; if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { - regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; - regs[insn->dst_reg].off = aux->map_off; + dst_reg->type = PTR_TO_MAP_VALUE; + dst_reg->off = aux->map_off; if (map_value_has_spin_lock(map)) - regs[insn->dst_reg].id = ++env->id_gen; + dst_reg->id = ++env->id_gen; } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { - regs[insn->dst_reg].type = CONST_PTR_TO_MAP; + dst_reg->type = CONST_PTR_TO_MAP; } else { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; @@ -6116,13 +7433,41 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) static int check_return_code(struct bpf_verifier_env *env) { struct tnum enforce_attach_type_range = tnum_unknown; + const struct bpf_prog *prog = env->prog; struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); + int err; + struct bpf_func_state *frame = env->cur_state->frame[0]; + + /* LSM and struct_ops func-ptr's return type could be "void" */ + if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS || + env->prog->type == BPF_PROG_TYPE_LSM) && + !prog->aux->attach_func_proto->type) + return 0; + + /* eBPF calling convetion is such that R0 is used + * to return the value from eBPF program. + * Make sure that it's readable at this time + * of bpf_exit, which means that program wrote + * something into it earlier + */ + err = check_reg_arg(env, BPF_REG_0, SRC_OP); + if (err) + return err; + + if (is_pointer_value(env, BPF_REG_0)) { + verbose(env, "R0 leaks addr as return value\n"); + return -EACCES; + } switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || - env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG) + env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || + env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || + env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || + env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || + env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) range = tnum_range(1, 1); break; case BPF_PROG_TYPE_CGROUP_SKB: @@ -6137,11 +7482,39 @@ static int check_return_code(struct bpf_verifier_env *env) case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_CGROUP_SOCKOPT: break; + case BPF_PROG_TYPE_RAW_TRACEPOINT: + if (!env->prog->aux->attach_btf_id) + return 0; + range = tnum_const(0); + break; + case BPF_PROG_TYPE_SK_LOOKUP: + range = tnum_range(SK_DROP, SK_PASS); + break; + case BPF_PROG_TYPE_TRACING: + if (env->prog->expected_attach_type != BPF_TRACE_ITER) + return 0; + break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; + + if (frame->in_async_callback_fn) { + /* enforce return zero from async callbacks like timer */ + if (reg->type != SCALAR_VALUE) { + verbose(env, "In async callback the register R0 is not a known value (%s)\n", + reg_type_str[reg->type]); + return -EINVAL; + } + + if (!tnum_in(tnum_const(0), reg->var_off)) { + verbose_invalid_scalar(env, reg, &range, "async callback", "R0"); + return -EINVAL; + } + return 0; + } + if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); @@ -6149,17 +7522,7 @@ static int check_return_code(struct bpf_verifier_env *env) } if (!tnum_in(range, reg->var_off)) { - char tn_buf[48]; - - verbose(env, "At program exit the register R0 "); - if (!tnum_is_unknown(reg->var_off)) { - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); - verbose(env, "has value %s", tn_buf); - } else { - verbose(env, "has unknown scalar value"); - } - tnum_strn(tn_buf, sizeof(tn_buf), range); - verbose(env, " should have been in %s\n", tn_buf); + verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); return -EINVAL; } @@ -6229,6 +7592,11 @@ static void init_explored_state(struct bpf_verifier_env *env, int idx) env->insn_aux_data[idx].prune_point = true; } +enum { + DONE_EXPLORING = 0, + KEEP_EXPLORING = 1, +}; + /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction @@ -6241,10 +7609,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, int *insn_state = env->cfg.insn_state; if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) - return 0; + return DONE_EXPLORING; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) - return 0; + return DONE_EXPLORING; if (w < 0 || w >= env->prog->len) { verbose_linfo(env, t, "%d: ", t); @@ -6263,10 +7631,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, if (env->cfg.cur_stack >= env->prog->len) return -E2BIG; insn_stack[env->cfg.cur_stack++] = w; - return 1; + return KEEP_EXPLORING; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { - if (loop_ok && env->allow_ptr_leaks) - return 0; + if (loop_ok && env->bpf_capable) + return DONE_EXPLORING; verbose_linfo(env, t, "%d: ", t); verbose_linfo(env, w, "%d: ", w); verbose(env, "back-edge from insn %d to %d\n", t, w); @@ -6278,7 +7646,99 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, verbose(env, "insn state internal bug\n"); return -EFAULT; } - return 0; + return DONE_EXPLORING; +} + +static int visit_func_call_insn(int t, int insn_cnt, + struct bpf_insn *insns, + struct bpf_verifier_env *env, + bool visit_callee) +{ + int ret; + + ret = push_insn(t, t + 1, FALLTHROUGH, env, false); + if (ret) + return ret; + + if (t + 1 < insn_cnt) + init_explored_state(env, t + 1); + if (visit_callee) { + init_explored_state(env, t); + ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, + /* It's ok to allow recursion from CFG point of + * view. __check_func_call() will do the actual + * check. + */ + bpf_pseudo_func(insns + t)); + } + return ret; +} + +/* Visits the instruction at index t and returns one of the following: + * < 0 - an error occurred + * DONE_EXPLORING - the instruction was fully explored + * KEEP_EXPLORING - there is still work to be done before it is fully explored + */ +static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) +{ + struct bpf_insn *insns = env->prog->insnsi; + int ret; + + if (bpf_pseudo_func(insns + t)) + return visit_func_call_insn(t, insn_cnt, insns, env, true); + + /* All non-branch instructions have a single fall-through edge. */ + if (BPF_CLASS(insns[t].code) != BPF_JMP && + BPF_CLASS(insns[t].code) != BPF_JMP32) + return push_insn(t, t + 1, FALLTHROUGH, env, false); + + switch (BPF_OP(insns[t].code)) { + case BPF_EXIT: + return DONE_EXPLORING; + + case BPF_CALL: + if (insns[t].imm == BPF_FUNC_timer_set_callback) + /* Mark this call insn to trigger is_state_visited() check + * before call itself is processed by __check_func_call(). + * Otherwise new async state will be pushed for further + * exploration. + */ + init_explored_state(env, t); + return visit_func_call_insn(t, insn_cnt, insns, env, + insns[t].src_reg == BPF_PSEUDO_CALL); + + case BPF_JA: + if (BPF_SRC(insns[t].code) != BPF_K) + return -EINVAL; + + /* unconditional jump with single edge */ + ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env, + true); + if (ret) + return ret; + + /* unconditional jmp is not a good pruning point, + * but it's marked, since backtracking needs + * to record jmp history in is_state_visited(). + */ + init_explored_state(env, t + insns[t].off + 1); + /* tell verifier to check for equivalent states + * after every call and jump + */ + if (t + 1 < insn_cnt) + init_explored_state(env, t + 1); + + return ret; + + default: + /* conditional jump with two edges */ + init_explored_state(env, t); + ret = push_insn(t, t + 1, FALLTHROUGH, env, true); + if (ret) + return ret; + + return push_insn(t, t + insns[t].off + 1, BRANCH, env, true); + } } /* non-recursive depth-first-search to detect loops in BPF program @@ -6286,11 +7746,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, */ static int check_cfg(struct bpf_verifier_env *env) { - struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int *insn_stack, *insn_state; int ret = 0; - int i, t; + int i; insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) @@ -6306,92 +7765,32 @@ static int check_cfg(struct bpf_verifier_env *env) insn_stack[0] = 0; /* 0 is the first instruction */ env->cfg.cur_stack = 1; -peek_stack: - if (env->cfg.cur_stack == 0) - goto check_state; - t = insn_stack[env->cfg.cur_stack - 1]; + while (env->cfg.cur_stack > 0) { + int t = insn_stack[env->cfg.cur_stack - 1]; - if (BPF_CLASS(insns[t].code) == BPF_JMP || - BPF_CLASS(insns[t].code) == BPF_JMP32) { - u8 opcode = BPF_OP(insns[t].code); - - if (opcode == BPF_EXIT) { - goto mark_explored; - } else if (opcode == BPF_CALL) { - ret = push_insn(t, t + 1, FALLTHROUGH, env, false); - if (ret == 1) - goto peek_stack; - else if (ret < 0) - goto err_free; - if (t + 1 < insn_cnt) - init_explored_state(env, t + 1); - if (insns[t].src_reg == BPF_PSEUDO_CALL) { - init_explored_state(env, t); - ret = push_insn(t, t + insns[t].imm + 1, BRANCH, - env, false); - if (ret == 1) - goto peek_stack; - else if (ret < 0) - goto err_free; + ret = visit_insn(t, insn_cnt, env); + switch (ret) { + case DONE_EXPLORING: + insn_state[t] = EXPLORED; + env->cfg.cur_stack--; + break; + case KEEP_EXPLORING: + break; + default: + if (ret > 0) { + verbose(env, "visit_insn internal bug\n"); + ret = -EFAULT; } - } else if (opcode == BPF_JA) { - if (BPF_SRC(insns[t].code) != BPF_K) { - ret = -EINVAL; - goto err_free; - } - /* unconditional jump with single edge */ - ret = push_insn(t, t + insns[t].off + 1, - FALLTHROUGH, env, true); - if (ret == 1) - goto peek_stack; - else if (ret < 0) - goto err_free; - /* unconditional jmp is not a good pruning point, - * but it's marked, since backtracking needs - * to record jmp history in is_state_visited(). - */ - init_explored_state(env, t + insns[t].off + 1); - /* tell verifier to check for equivalent states - * after every call and jump - */ - if (t + 1 < insn_cnt) - init_explored_state(env, t + 1); - } else { - /* conditional jump with two edges */ - init_explored_state(env, t); - ret = push_insn(t, t + 1, FALLTHROUGH, env, true); - if (ret == 1) - goto peek_stack; - else if (ret < 0) - goto err_free; - - ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); - if (ret == 1) - goto peek_stack; - else if (ret < 0) - goto err_free; - } - } else { - /* all other non-branch instructions with single - * fall-through edge - */ - ret = push_insn(t, t + 1, FALLTHROUGH, env, false); - if (ret == 1) - goto peek_stack; - else if (ret < 0) goto err_free; + } } -mark_explored: - insn_state[t] = EXPLORED; - if (env->cfg.cur_stack-- <= 0) { + if (env->cfg.cur_stack < 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } - goto peek_stack; -check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); @@ -6419,6 +7818,7 @@ static int check_btf_func(struct bpf_verifier_env *env, u32 i, nfuncs, urec_size, min_size; u32 krec_size = sizeof(struct bpf_func_info); struct bpf_func_info *krecord; + struct bpf_func_info_aux *info_aux = NULL; const struct btf_type *type; struct bpf_prog *prog; const struct btf *btf; @@ -6452,6 +7852,9 @@ static int check_btf_func(struct bpf_verifier_env *env, krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); if (!krecord) return -ENOMEM; + info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); + if (!info_aux) + goto err_free; for (i = 0; i < nfuncs; i++) { ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); @@ -6497,35 +7900,38 @@ static int check_btf_func(struct bpf_verifier_env *env, /* check type_id */ type = btf_type_by_id(btf, krecord[i].type_id); - if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) { + if (!type || !btf_type_is_func(type)) { verbose(env, "invalid type id %d in func info", krecord[i].type_id); ret = -EINVAL; goto err_free; } - + info_aux[i].linkage = BTF_INFO_VLEN(type->info); prev_offset = krecord[i].insn_off; urecord += urec_size; } prog->aux->func_info = krecord; prog->aux->func_info_cnt = nfuncs; + prog->aux->func_info_aux = info_aux; return 0; err_free: kvfree(krecord); + kfree(info_aux); return ret; } static void adjust_btf_func(struct bpf_verifier_env *env) { + struct bpf_prog_aux *aux = env->prog->aux; int i; - if (!env->prog->aux->func_info) + if (!aux->func_info) return; for (i = 0; i < env->subprog_cnt; i++) - env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; + aux->func_info[i].insn_off = env->subprog_info[i].start; } #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ @@ -6547,6 +7953,8 @@ static int check_btf_line(struct bpf_verifier_env *env, nr_linfo = attr->line_info_cnt; if (!nr_linfo) return 0; + if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) + return -EINVAL; rec_size = attr->line_info_rec_size; if (rec_size < MIN_BPF_LINEINFO_SIZE || @@ -6690,13 +8098,6 @@ static bool range_within(struct bpf_reg_state *old, old->smax_value >= cur->smax_value; } -/* Maximum number of register states that can exist at once */ -#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) -struct idpair { - u32 old; - u32 cur; -}; - /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. @@ -6707,11 +8108,11 @@ struct idpair { * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ -static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) +static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) { unsigned int i; - for (i = 0; i < ID_MAP_SIZE; i++) { + for (i = 0; i < BPF_ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; @@ -6823,8 +8224,8 @@ next: } /* Returns true if (rold safe implies rcur safe) */ -static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, - struct idpair *idmap) +static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, + struct bpf_reg_state *rcur, struct bpf_id_pair *idmap) { bool equal; @@ -6850,6 +8251,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; switch (rold->type) { case SCALAR_VALUE: + if (env->explore_alu_limits) + return false; if (rcur->type == SCALAR_VALUE) { if (!rold->precise && !rcur->precise) return true; @@ -6866,6 +8269,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, */ return false; } + case PTR_TO_MAP_KEY: case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. @@ -6939,9 +8343,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; } -static bool stacksafe(struct bpf_func_state *old, - struct bpf_func_state *cur, - struct idpair *idmap) +static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, + struct bpf_func_state *cur, struct bpf_id_pair *idmap) { int i, spi; @@ -6986,9 +8389,8 @@ static bool stacksafe(struct bpf_func_state *old, continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; - if (!regsafe(&old->stack[spi].spilled_ptr, - &cur->stack[spi].spilled_ptr, - idmap)) + if (!regsafe(env, &old->stack[spi].spilled_ptr, + &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. @@ -7038,32 +8440,24 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ -static bool func_states_equal(struct bpf_func_state *old, +static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur) { - struct idpair *idmap; - bool ret = false; int i; - idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); - /* If we failed to allocate the idmap, just say it's not safe */ - if (!idmap) + memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); + for (i = 0; i < MAX_BPF_REG; i++) + if (!regsafe(env, &old->regs[i], &cur->regs[i], + env->idmap_scratch)) + return false; + + if (!stacksafe(env, old, cur, env->idmap_scratch)) return false; - for (i = 0; i < MAX_BPF_REG; i++) { - if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) - goto out_free; - } - - if (!stacksafe(old, cur, idmap)) - goto out_free; - if (!refsafe(old, cur)) - goto out_free; - ret = true; -out_free: - kfree(idmap); - return ret; + return false; + + return true; } static bool states_equal(struct bpf_verifier_env *env, @@ -7090,7 +8484,7 @@ static bool states_equal(struct bpf_verifier_env *env, for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; - if (!func_states_equal(old->frame[i], cur->frame[i])) + if (!func_states_equal(env, old->frame[i], cur->frame[i])) return false; } return true; @@ -7271,9 +8665,25 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) states_cnt++; if (sl->state.insn_idx != insn_idx) goto next; + if (sl->state.branches) { - if (states_maybe_looping(&sl->state, cur) && - states_equal(env, &sl->state, cur)) { + struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; + + if (frame->in_async_callback_fn && + frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { + /* Different async_entry_cnt means that the verifier is + * processing another entry into async callback. + * Seeing the same state is not an indication of infinite + * loop or infinite recursion. + * But finding the same state doesn't mean that it's safe + * to stop processing the current state. The previous state + * hasn't yet reached bpf_exit, since state.branches > 0. + * Checking in_async_callback_fn alone is not enough either. + * Since the verifier still needs to catch infinite loops + * inside async callbacks. + */ + } else if (states_maybe_looping(&sl->state, cur) && + states_equal(env, &sl->state, cur)) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); return -EINVAL; @@ -7367,7 +8777,7 @@ next: if (env->max_states_per_insn < states_cnt) env->max_states_per_insn = states_cnt; - if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) + if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) return push_jmp_history(env, cur); if (!add_new_state) @@ -7453,6 +8863,8 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type) case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: + case PTR_TO_BTF_ID: + case PTR_TO_BTF_ID_OR_NULL: return false; default: return true; @@ -7479,32 +8891,13 @@ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) static int do_check(struct bpf_verifier_env *env) { - struct bpf_verifier_state *state; + struct bpf_verifier_state *state = env->cur_state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; bool do_print_state = false; int prev_insn_idx = -1; - env->prev_linfo = NULL; - - state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); - if (!state) - return -ENOMEM; - state->curframe = 0; - state->speculative = false; - state->branches = 1; - state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); - if (!state->frame[0]) { - kfree(state); - return -ENOMEM; - } - env->cur_state = state; - init_func_state(env, state->frame[0], - BPF_MAIN_FUNC /* callsite */, - 0 /* frameno */, - 0 /* subprogno, zero == main subprog */); - for (;;) { struct bpf_insn *insn; u8 class; @@ -7582,7 +8975,7 @@ static int do_check(struct bpf_verifier_env *env) } regs = cur_regs(env); - env->insn_aux_data[env->insn_idx].seen = true; + sanitize_mark_insn_seen(env); prev_insn_idx = env->insn_idx; if (class == BPF_ALU || class == BPF_ALU64) { @@ -7723,10 +9116,9 @@ static int do_check(struct bpf_verifier_env *env) if (insn->src_reg == BPF_PSEUDO_CALL) err = check_func_call(env, insn, &env->insn_idx); else - err = check_helper_call(env, insn->imm, env->insn_idx); + err = check_helper_call(env, insn, &env->insn_idx); if (err) return err; - } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || @@ -7768,21 +9160,6 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; - /* eBPF calling convetion is such that R0 is used - * to return the value from eBPF program. - * Make sure that it's readable at this time - * of bpf_exit, which means that program wrote - * something into it earlier - */ - err = check_reg_arg(env, BPF_REG_0, SRC_OP); - if (err) - return err; - - if (is_pointer_value(env, BPF_REG_0)) { - verbose(env, "R0 leaks addr as return value\n"); - return -EACCES; - } - err = check_return_code(env); if (err) return err; @@ -7817,7 +9194,7 @@ process_bpf_exit: return err; env->insn_idx++; - env->insn_aux_data[env->insn_idx].seen = true; + sanitize_mark_insn_seen(env); } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; @@ -7830,7 +9207,73 @@ process_bpf_exit: env->insn_idx++; } - env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; + return 0; +} + +/* replace pseudo btf_id with kernel symbol address */ +static int check_pseudo_btf_id(struct bpf_verifier_env *env, + struct bpf_insn *insn, + struct bpf_insn_aux_data *aux) +{ + u32 type, id = insn->imm; + const struct btf_type *t; + const char *sym_name; + u64 addr; + + if (!btf_vmlinux) { + verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); + return -EINVAL; + } + + if (insn[1].imm != 0) { + verbose(env, "reserved field (insn[1].imm) is used in pseudo_btf_id ldimm64 insn.\n"); + return -EINVAL; + } + + t = btf_type_by_id(btf_vmlinux, id); + if (!t) { + verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); + return -ENOENT; + } + + if (!btf_type_is_var(t)) { + verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", + id); + return -EINVAL; + } + + sym_name = btf_name_by_offset(btf_vmlinux, t->name_off); + addr = kallsyms_lookup_name(sym_name); + if (!addr) { + verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", + sym_name); + return -ENOENT; + } + + insn[0].imm = (u32)addr; + insn[1].imm = addr >> 32; + + type = t->type; + t = btf_type_skip_modifiers(btf_vmlinux, type, NULL); + if (!btf_type_is_struct(t)) { + const struct btf_type *ret; + const char *tname; + u32 tsize; + + /* resolve the type size of ksym. */ + ret = btf_resolve_size(btf_vmlinux, t, &tsize, NULL, NULL); + if (IS_ERR(ret)) { + tname = btf_name_by_offset(btf_vmlinux, t->name_off); + verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", + tname, PTR_ERR(ret)); + return -EINVAL; + } + aux->btf_var.reg_type = PTR_TO_MEM; + aux->btf_var.mem_size = tsize; + } else { + aux->btf_var.reg_type = PTR_TO_BTF_ID; + aux->btf_var.btf_id = type; + } return 0; } @@ -7890,6 +9333,11 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, return -EINVAL; } + if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { + verbose(env, "bpf_struct_ops map cannot be used in prog\n"); + return -EINVAL; + } + return 0; } @@ -7899,10 +9347,14 @@ static bool bpf_map_is_cgroup_storage(struct bpf_map *map) map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); } -/* look for pseudo eBPF instructions that access map FDs and - * replace them with actual map pointers +/* find and rewrite pseudo imm in ld_imm64 instructions: + * + * 1. if it accesses map FD, replace it with actual map pointer. + * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. + * + * NOTE: btf_vmlinux is required for converting pseudo btf_id. */ -static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) +static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; @@ -7943,6 +9395,20 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) /* valid generic load 64-bit imm */ goto next_insn; + if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { + aux = &env->insn_aux_data[i]; + err = check_pseudo_btf_id(env, insn, aux); + if (err) + return err; + goto next_insn; + } + + if (insn[0].src_reg == BPF_PSEUDO_FUNC) { + aux = &env->insn_aux_data[i]; + aux->ptr_type = PTR_TO_FUNC; + goto next_insn; + } + /* In final convert_pseudo_ld_imm64() step, this is * converted into regular 64-bit imm load insn. */ @@ -8021,11 +9487,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) * will be used by the valid program until it's unloaded * and all maps are released in free_used_maps() */ - map = bpf_map_inc(map, false); - if (IS_ERR(map)) { - fdput(f); - return PTR_ERR(map); - } + bpf_map_inc(map); aux->map_index = env->used_map_cnt; env->used_maps[env->used_map_cnt++] = map; @@ -8082,20 +9544,26 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) int insn_cnt = env->prog->len; int i; - for (i = 0; i < insn_cnt; i++, insn++) - if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) - insn->src_reg = 0; + for (i = 0; i < insn_cnt; i++, insn++) { + if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) + continue; + if (insn->src_reg == BPF_PSEUDO_FUNC) + continue; + insn->src_reg = 0; + } } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ -static int adjust_insn_aux_data(struct bpf_verifier_env *env, - struct bpf_prog *new_prog, u32 off, u32 cnt) +static void adjust_insn_aux_data(struct bpf_verifier_env *env, + struct bpf_insn_aux_data *new_data, + struct bpf_prog *new_prog, u32 off, u32 cnt) { - struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; + struct bpf_insn_aux_data *old_data = env->insn_aux_data; struct bpf_insn *insn = new_prog->insnsi; + bool old_seen = old_data[off].seen; u32 prog_len; int i; @@ -8106,22 +9574,19 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); if (cnt == 1) - return 0; + return; prog_len = new_prog->len; - new_data = vzalloc(array_size(prog_len, - sizeof(struct bpf_insn_aux_data))); - if (!new_data) - return -ENOMEM; + memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) { - new_data[i].seen = true; + /* Expand insni[off]'s seen count to the patched range. */ + new_data[i].seen = old_seen; new_data[i].zext_dst = insn_has_def32(env, insn + i); } env->insn_aux_data = new_data; vfree(old_data); - return 0; } static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) @@ -8142,6 +9607,14 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; + struct bpf_insn_aux_data *new_data = NULL; + + if (len > 1) { + new_data = vzalloc(array_size(env->prog->len + len - 1, + sizeof(struct bpf_insn_aux_data))); + if (!new_data) + return NULL; + } new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (IS_ERR(new_prog)) { @@ -8149,10 +9622,10 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of verbose(env, "insn %d cannot be patched due to 16-bit range\n", env->insn_aux_data[off].orig_idx); + vfree(new_data); return NULL; } - if (adjust_insn_aux_data(env, new_prog, off, len)) - return NULL; + adjust_insn_aux_data(env, new_data, new_prog, off, len); adjust_subprog_starts(env, off, len); return new_prog; } @@ -8327,6 +9800,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env) if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); + aux_data[i].zext_dst = false; } } @@ -8536,35 +10010,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++, insn++) { bpf_convert_ctx_access_t convert_ctx_access; + bool ctx_access; if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || - insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) + insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) { type = BPF_READ; - else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || - insn->code == (BPF_STX | BPF_MEM | BPF_H) || - insn->code == (BPF_STX | BPF_MEM | BPF_W) || - insn->code == (BPF_STX | BPF_MEM | BPF_DW)) + ctx_access = true; + } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || + insn->code == (BPF_STX | BPF_MEM | BPF_H) || + insn->code == (BPF_STX | BPF_MEM | BPF_W) || + insn->code == (BPF_STX | BPF_MEM | BPF_DW) || + insn->code == (BPF_ST | BPF_MEM | BPF_B) || + insn->code == (BPF_ST | BPF_MEM | BPF_H) || + insn->code == (BPF_ST | BPF_MEM | BPF_W) || + insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { type = BPF_WRITE; - else + ctx_access = BPF_CLASS(insn->code) == BPF_STX; + } else { continue; + } if (type == BPF_WRITE && - env->insn_aux_data[i + delta].sanitize_stack_off) { + env->insn_aux_data[i + delta].sanitize_stack_spill) { struct bpf_insn patch[] = { - /* Sanitize suspicious stack slot with zero. - * There are no memory dependencies for this store, - * since it's only using frame pointer and immediate - * constant of zero - */ - BPF_ST_MEM(BPF_DW, BPF_REG_FP, - env->insn_aux_data[i + delta].sanitize_stack_off, - 0), - /* the original STX instruction will immediately - * overwrite the same stack slot with appropriate value - */ *insn, + BPF_ST_NOSPEC(), }; cnt = ARRAY_SIZE(patch); @@ -8578,6 +10050,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) continue; } + if (!ctx_access) + continue; + switch (env->insn_aux_data[i + delta].ptr_type) { case PTR_TO_CTX: if (!ops->convert_ctx_access) @@ -8594,6 +10069,16 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) case PTR_TO_XDP_SOCK: convert_ctx_access = bpf_xdp_sock_convert_ctx_access; break; + case PTR_TO_BTF_ID: + if (type == BPF_READ) { + insn->code = BPF_LDX | BPF_PROBE_MEM | + BPF_SIZE((insn)->code); + env->prog->aux->num_exentries++; + } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) { + verbose(env, "Writes through BTF pointers are not allowed\n"); + return -EINVAL; + } + continue; default: continue; } @@ -8639,6 +10124,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) if (is_narrower_load && size < target_size) { u8 shift = bpf_ctx_narrow_access_offset( off, size, size_default) * 8; + if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { + verbose(env, "bpf verifier narrow ctx load misconfigured\n"); + return -EINVAL; + } if (ctx_field_size <= 4) { if (shift) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, @@ -8682,9 +10171,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) return 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { - if (insn->code != (BPF_JMP | BPF_CALL) || - insn->src_reg != BPF_PSEUDO_CALL) + if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) continue; + /* Upon error here we cannot fall back to interpreter but * need a hard reject of the program. Thus -EFAULT is * propagated in any case. @@ -8705,6 +10194,12 @@ static int jit_subprogs(struct bpf_verifier_env *env) env->insn_aux_data[i].call_imm = insn->imm; /* point imm to __bpf_call_base+1 from JITs point of view */ insn->imm = 1; + if (bpf_pseudo_func(insn)) + /* jit (e.g. x86_64) may emit fewer instructions + * if it learns a u32 imm is the same as a u64 imm. + * Force a non zero here. + */ + insn[1].imm = 1; } err = bpf_prog_alloc_jited_linfo(prog); @@ -8765,8 +10260,13 @@ static int jit_subprogs(struct bpf_verifier_env *env) for (i = 0; i < env->subprog_cnt; i++) { insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { - if (insn->code != (BPF_JMP | BPF_CALL) || - insn->src_reg != BPF_PSEUDO_CALL) + if (bpf_pseudo_func(insn)) { + subprog = insn->off; + insn[0].imm = (u32)(long)func[subprog]->bpf_func; + insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; + continue; + } + if (!bpf_pseudo_call(insn)) continue; subprog = insn->off; insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - @@ -8811,8 +10311,13 @@ static int jit_subprogs(struct bpf_verifier_env *env) * later look the same as if they were interpreted only. */ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { - if (insn->code != (BPF_JMP | BPF_CALL) || - insn->src_reg != BPF_PSEUDO_CALL) + if (bpf_pseudo_func(insn)) { + insn[0].imm = env->insn_aux_data[i].call_imm; + insn[1].imm = insn->off; + insn->off = 0; + continue; + } + if (!bpf_pseudo_call(insn)) continue; insn->off = env->insn_aux_data[i].call_imm; subprog = find_subprog(env, i + insn->off + 1); @@ -8834,8 +10339,7 @@ out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { - if (insn->code != (BPF_JMP | BPF_CALL) || - insn->src_reg != BPF_PSEUDO_CALL) + if (!bpf_pseudo_call(insn)) continue; insn->off = 0; insn->imm = env->insn_aux_data[i].call_imm; @@ -8863,8 +10367,7 @@ static int fixup_call_args(struct bpf_verifier_env *env) } #ifndef CONFIG_BPF_JIT_ALWAYS_ON for (i = 0; i < prog->len; i++, insn++) { - if (insn->code != (BPF_JMP | BPF_CALL) || - insn->src_reg != BPF_PSEUDO_CALL) + if (!bpf_pseudo_call(insn)) continue; depth = get_callee_stack_depth(env, insn, i); if (depth < 0) @@ -8884,6 +10387,7 @@ static int fixup_call_args(struct bpf_verifier_env *env) static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; + bool expect_blinding = bpf_jit_blinding_enabled(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; @@ -8892,7 +10396,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; - int i, cnt, delta = 0; + int i, ret, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || @@ -8900,30 +10404,30 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; - struct bpf_insn mask_and_div[] = { - BPF_MOV32_REG(insn->src_reg, insn->src_reg), - /* Rx div 0 -> 0 */ - BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), + bool isdiv = BPF_OP(insn->code) == BPF_DIV; + struct bpf_insn *patchlet; + struct bpf_insn chk_and_div[] = { + /* [R,W]x div 0 -> 0 */ + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JNE | BPF_K, insn->src_reg, + 0, 2, 0), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; - struct bpf_insn mask_and_mod[] = { - BPF_MOV32_REG(insn->src_reg, insn->src_reg), - /* Rx mod 0 -> Rx */ - BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), + struct bpf_insn chk_and_mod[] = { + /* [R,W]x mod 0 -> [R,W]x */ + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, insn->src_reg, + 0, 1 + (is64 ? 0 : 1), 0), *insn, + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), }; - struct bpf_insn *patchlet; - if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || - insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { - patchlet = mask_and_div + (is64 ? 1 : 0); - cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); - } else { - patchlet = mask_and_mod + (is64 ? 1 : 0); - cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); - } + patchlet = isdiv ? chk_and_div : chk_and_mod; + cnt = isdiv ? ARRAY_SIZE(chk_and_div) : + ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) @@ -8960,7 +10464,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; struct bpf_insn insn_buf[16]; struct bpf_insn *patch = &insn_buf[0]; - bool issrc, isneg; + bool issrc, isneg, isimm; u32 off_reg; aux = &env->insn_aux_data[i + delta]; @@ -8971,28 +10475,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) isneg = aux->alu_state & BPF_ALU_NEG_VALUE; issrc = (aux->alu_state & BPF_ALU_SANITIZE) == BPF_ALU_SANITIZE_SRC; + isimm = aux->alu_state & BPF_ALU_IMMEDIATE; off_reg = issrc ? insn->src_reg : insn->dst_reg; - if (isneg) - *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); - *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); - *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); - *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); - *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); - *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); - if (issrc) { - *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, - off_reg); - insn->src_reg = BPF_REG_AX; + if (isimm) { + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); } else { - *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, - BPF_REG_AX); + if (isneg) + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); + *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); + *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); + *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); + *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); + *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); } + if (!issrc) + *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); + insn->src_reg = BPF_REG_AX; if (isneg) insn->code = insn->code == code_add ? code_sub : code_add; *patch++ = *insn; - if (issrc && isneg) + if (issrc && isneg && !isimm) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); cnt = patch - insn_buf; @@ -9050,7 +10555,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) return -EINVAL; } - map_ptr = BPF_MAP_PTR(aux->map_state); + map_ptr = BPF_MAP_PTR(aux->map_ptr_state); insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, @@ -9069,6 +10574,39 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) continue; } + if (insn->imm == BPF_FUNC_timer_set_callback) { + /* The verifier will process callback_fn as many times as necessary + * with different maps and the register states prepared by + * set_timer_callback_state will be accurate. + * + * The following use case is valid: + * map1 is shared by prog1, prog2, prog3. + * prog1 calls bpf_timer_init for some map1 elements + * prog2 calls bpf_timer_set_callback for some map1 elements. + * Those that were not bpf_timer_init-ed will return -EINVAL. + * prog3 calls bpf_timer_start for some map1 elements. + * Those that were not both bpf_timer_init-ed and + * bpf_timer_set_callback-ed will return -EINVAL. + */ + struct bpf_insn ld_addrs[2] = { + BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), + }; + + insn_buf[0] = ld_addrs[0]; + insn_buf[1] = ld_addrs[1]; + insn_buf[2] = *insn; + cnt = 3; + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto patch_call_imm; + } + /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. @@ -9084,12 +10622,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; - map_ptr = BPF_MAP_PTR(aux->map_state); + map_ptr = BPF_MAP_PTR(aux->map_ptr_state); ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { cnt = ops->map_gen_lookup(map_ptr, insn_buf); - if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { + if (cnt == -EOPNOTSUPP) + goto patch_map_ops_generic; + if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } @@ -9119,7 +10659,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) (int (*)(struct bpf_map *map, void *value))NULL)); BUILD_BUG_ON(!__same_type(ops->map_peek_elem, (int (*)(struct bpf_map *map, void *value))NULL)); - +patch_map_ops_generic: switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - @@ -9150,6 +10690,30 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) goto patch_call_imm; } + if (prog->jit_requested && BITS_PER_LONG == 64 && + insn->imm == BPF_FUNC_jiffies64) { + struct bpf_insn ld_jiffies_addr[2] = { + BPF_LD_IMM64(BPF_REG_0, + (unsigned long)&jiffies), + }; + + insn_buf[0] = ld_jiffies_addr[0]; + insn_buf[1] = ld_jiffies_addr[1]; + insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, + BPF_REG_0, 0); + cnt = 3; + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, + cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed @@ -9164,6 +10728,23 @@ patch_call_imm: insn->imm = fn->func - __bpf_call_base; } + /* Since poke tab is now finalized, publish aux to tracker. */ + for (i = 0; i < prog->aux->size_poke_tab; i++) { + map_ptr = prog->aux->poke_tab[i].tail_call.map; + if (!map_ptr->ops->map_poke_track || + !map_ptr->ops->map_poke_untrack || + !map_ptr->ops->map_poke_run) { + verbose(env, "bpf verifier is misconfigured\n"); + return -EINVAL; + } + + ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); + if (ret < 0) { + verbose(env, "tracking tail call prog failed\n"); + return ret; + } + } + return 0; } @@ -9179,6 +10760,7 @@ static void free_states(struct bpf_verifier_env *env) kfree(sl); sl = sln; } + env->free_list = NULL; if (!env->explored_states) return; @@ -9192,11 +10774,159 @@ static void free_states(struct bpf_verifier_env *env) kfree(sl); sl = sln; } + env->explored_states[i] = NULL; + } +} + +/* The verifier is using insn_aux_data[] to store temporary data during + * verification and to store information for passes that run after the + * verification like dead code sanitization. do_check_common() for subprogram N + * may analyze many other subprograms. sanitize_insn_aux_data() clears all + * temporary data after do_check_common() finds that subprogram N cannot be + * verified independently. pass_cnt counts the number of times + * do_check_common() was run and insn->aux->seen tells the pass number + * insn_aux_data was touched. These variables are compared to clear temporary + * data from failed pass. For testing and experiments do_check_common() can be + * run multiple times even when prior attempt to verify is unsuccessful. + */ +static void sanitize_insn_aux_data(struct bpf_verifier_env *env) +{ + struct bpf_insn *insn = env->prog->insnsi; + struct bpf_insn_aux_data *aux; + int i, class; + + for (i = 0; i < env->prog->len; i++) { + class = BPF_CLASS(insn[i].code); + if (class != BPF_LDX && class != BPF_STX) + continue; + aux = &env->insn_aux_data[i]; + if (aux->seen != env->pass_cnt) + continue; + memset(aux, 0, offsetof(typeof(*aux), orig_idx)); + } +} + +static int do_check_common(struct bpf_verifier_env *env, int subprog) +{ + struct bpf_verifier_state *state; + struct bpf_reg_state *regs; + int ret, i; + + env->prev_linfo = NULL; + env->pass_cnt++; + + state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); + if (!state) + return -ENOMEM; + state->curframe = 0; + state->speculative = false; + state->branches = 1; + state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); + if (!state->frame[0]) { + kfree(state); + return -ENOMEM; + } + env->cur_state = state; + init_func_state(env, state->frame[0], + BPF_MAIN_FUNC /* callsite */, + 0 /* frameno */, + subprog); + + regs = state->frame[state->curframe]->regs; + if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { + ret = btf_prepare_func_args(env, subprog, regs); + if (ret) + goto out; + for (i = BPF_REG_1; i <= BPF_REG_5; i++) { + if (regs[i].type == PTR_TO_CTX) + mark_reg_known_zero(env, regs, i); + else if (regs[i].type == SCALAR_VALUE) + mark_reg_unknown(env, regs, i); + } + } else { + /* 1st arg to a function */ + regs[BPF_REG_1].type = PTR_TO_CTX; + mark_reg_known_zero(env, regs, BPF_REG_1); + ret = btf_check_func_arg_match(env, subprog, regs); + if (ret == -EFAULT) + /* unlikely verifier bug. abort. + * ret == 0 and ret < 0 are sadly acceptable for + * main() function due to backward compatibility. + * Like socket filter program may be written as: + * int bpf_prog(struct pt_regs *ctx) + * and never dereference that ctx in the program. + * 'struct pt_regs' is a type mismatch for socket + * filter that should be using 'struct __sk_buff'. + */ + goto out; } - kvfree(env->explored_states); + ret = do_check(env); +out: + free_verifier_state(env->cur_state, true); + env->cur_state = NULL; + while (!pop_stack(env, NULL, NULL)); + free_states(env); + if (ret) + /* clean aux data in case subprog was rejected */ + sanitize_insn_aux_data(env); + return ret; } +/* Verify all global functions in a BPF program one by one based on their BTF. + * All global functions must pass verification. Otherwise the whole program is rejected. + * Consider: + * int bar(int); + * int foo(int f) + * { + * return bar(f); + * } + * int bar(int b) + * { + * ... + * } + * foo() will be verified first for R1=any_scalar_value. During verification it + * will be assumed that bar() already verified successfully and call to bar() + * from foo() will be checked for type match only. Later bar() will be verified + * independently to check that it's safe for R1=any_scalar_value. + */ +static int do_check_subprogs(struct bpf_verifier_env *env) +{ + struct bpf_prog_aux *aux = env->prog->aux; + int i, ret; + + if (!aux->func_info) + return 0; + + for (i = 1; i < env->subprog_cnt; i++) { + if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) + continue; + env->insn_idx = env->subprog_info[i].start; + WARN_ON_ONCE(env->insn_idx == 0); + ret = do_check_common(env, i); + if (ret) { + return ret; + } else if (env->log.level & BPF_LOG_LEVEL) { + verbose(env, + "Func#%d is safe for any args that match its prototype\n", + i); + } + } + return 0; +} + +static int do_check_main(struct bpf_verifier_env *env) +{ + int ret; + + env->insn_idx = 0; + ret = do_check_common(env, 0); + if (!ret) + env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; + return ret; +} + + static void print_verification_stats(struct bpf_verifier_env *env) { int i; @@ -9221,6 +10951,321 @@ static void print_verification_stats(struct bpf_verifier_env *env) env->peak_states, env->longest_mark_read_walk); } +static int check_struct_ops_btf_id(struct bpf_verifier_env *env) +{ + const struct btf_type *t, *func_proto; + const struct bpf_struct_ops *st_ops; + const struct btf_member *member; + struct bpf_prog *prog = env->prog; + u32 btf_id, member_idx; + const char *mname; + + btf_id = prog->aux->attach_btf_id; + st_ops = bpf_struct_ops_find(btf_id); + if (!st_ops) { + verbose(env, "attach_btf_id %u is not a supported struct\n", + btf_id); + return -ENOTSUPP; + } + + t = st_ops->type; + member_idx = prog->expected_attach_type; + if (member_idx >= btf_type_vlen(t)) { + verbose(env, "attach to invalid member idx %u of struct %s\n", + member_idx, st_ops->name); + return -EINVAL; + } + + member = &btf_type_member(t)[member_idx]; + mname = btf_name_by_offset(btf_vmlinux, member->name_off); + func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, + NULL); + if (!func_proto) { + verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", + mname, member_idx, st_ops->name); + return -EINVAL; + } + + if (st_ops->check_member) { + int err = st_ops->check_member(t, member); + + if (err) { + verbose(env, "attach to unsupported member %s of struct %s\n", + mname, st_ops->name); + return err; + } + } + + prog->aux->attach_func_proto = func_proto; + prog->aux->attach_func_name = mname; + env->ops = st_ops->verifier_ops; + + return 0; +} +#define SECURITY_PREFIX "security_" + +static int check_attach_modify_return(struct bpf_verifier_env *env) +{ + struct bpf_prog *prog = env->prog; + unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr; + + if (within_error_injection_list(addr)) + return 0; + + /* This is expected to be cleaned up in the future with the KRSI effort + * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h. + */ + if (!strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, + sizeof(SECURITY_PREFIX) - 1)) { + + if (!capable(CAP_MAC_ADMIN)) + return -EPERM; + + return 0; + } + + verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n", + prog->aux->attach_btf_id, prog->aux->attach_func_name); + + return -EINVAL; +} + +static int check_attach_btf_id(struct bpf_verifier_env *env) +{ + struct bpf_prog *prog = env->prog; + bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; + struct bpf_prog *tgt_prog = prog->aux->linked_prog; + u32 btf_id = prog->aux->attach_btf_id; + const char prefix[] = "btf_trace_"; + struct btf_func_model fmodel; + int ret = 0, subprog = -1, i; + struct bpf_trampoline *tr; + const struct btf_type *t; + bool conservative = true; + const char *tname; + struct btf *btf; + long addr; + u64 key; + + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) + return check_struct_ops_btf_id(env); + + if (prog->type != BPF_PROG_TYPE_TRACING && + prog->type != BPF_PROG_TYPE_LSM && + !prog_extension) + return 0; + + if (!btf_id) { + verbose(env, "Tracing programs must provide btf_id\n"); + return -EINVAL; + } + btf = bpf_prog_get_target_btf(prog); + if (!btf) { + verbose(env, + "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); + return -EINVAL; + } + t = btf_type_by_id(btf, btf_id); + if (!t) { + verbose(env, "attach_btf_id %u is invalid\n", btf_id); + return -EINVAL; + } + tname = btf_name_by_offset(btf, t->name_off); + if (!tname) { + verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); + return -EINVAL; + } + if (tgt_prog) { + struct bpf_prog_aux *aux = tgt_prog->aux; + + for (i = 0; i < aux->func_info_cnt; i++) + if (aux->func_info[i].type_id == btf_id) { + subprog = i; + break; + } + if (subprog == -1) { + verbose(env, "Subprog %s doesn't exist\n", tname); + return -EINVAL; + } + conservative = aux->func_info_aux[subprog].unreliable; + if (prog_extension) { + if (conservative) { + verbose(env, + "Cannot replace static functions\n"); + return -EINVAL; + } + if (!prog->jit_requested) { + verbose(env, + "Extension programs should be JITed\n"); + return -EINVAL; + } + env->ops = bpf_verifier_ops[tgt_prog->type]; + } + if (!tgt_prog->jited) { + verbose(env, "Can attach to only JITed progs\n"); + return -EINVAL; + } + if (tgt_prog->type == prog->type) { + /* Cannot fentry/fexit another fentry/fexit program. + * Cannot attach program extension to another extension. + * It's ok to attach fentry/fexit to extension program. + */ + verbose(env, "Cannot recursively attach\n"); + return -EINVAL; + } + if (tgt_prog->type == BPF_PROG_TYPE_TRACING && + prog_extension && + (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || + tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { + /* Program extensions can extend all program types + * except fentry/fexit. The reason is the following. + * The fentry/fexit programs are used for performance + * analysis, stats and can be attached to any program + * type except themselves. When extension program is + * replacing XDP function it is necessary to allow + * performance analysis of all functions. Both original + * XDP program and its program extension. Hence + * attaching fentry/fexit to BPF_PROG_TYPE_EXT is + * allowed. If extending of fentry/fexit was allowed it + * would be possible to create long call chain + * fentry->extension->fentry->extension beyond + * reasonable stack size. Hence extending fentry is not + * allowed. + */ + verbose(env, "Cannot extend fentry/fexit\n"); + return -EINVAL; + } + key = ((u64)aux->id) << 32 | btf_id; + } else { + if (prog_extension) { + verbose(env, "Cannot replace kernel functions\n"); + return -EINVAL; + } + key = btf_id; + } + + switch (prog->expected_attach_type) { + case BPF_TRACE_RAW_TP: + if (tgt_prog) { + verbose(env, + "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); + return -EINVAL; + } + if (!btf_type_is_typedef(t)) { + verbose(env, "attach_btf_id %u is not a typedef\n", + btf_id); + return -EINVAL; + } + if (strncmp(prefix, tname, sizeof(prefix) - 1)) { + verbose(env, "attach_btf_id %u points to wrong type name %s\n", + btf_id, tname); + return -EINVAL; + } + tname += sizeof(prefix) - 1; + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_ptr(t)) + /* should never happen in valid vmlinux build */ + return -EINVAL; + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_func_proto(t)) + /* should never happen in valid vmlinux build */ + return -EINVAL; + + /* remember two read only pointers that are valid for + * the life time of the kernel + */ + prog->aux->attach_func_name = tname; + prog->aux->attach_func_proto = t; + prog->aux->attach_btf_trace = true; + return 0; + case BPF_TRACE_ITER: + if (!btf_type_is_func(t)) { + verbose(env, "attach_btf_id %u is not a function\n", + btf_id); + return -EINVAL; + } + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_func_proto(t)) + return -EINVAL; + prog->aux->attach_func_name = tname; + prog->aux->attach_func_proto = t; + if (!bpf_iter_prog_supported(prog)) + return -EINVAL; + ret = btf_distill_func_proto(&env->log, btf, t, + tname, &fmodel); + return ret; + default: + if (!prog_extension) + return -EINVAL; + /* fallthrough */ + case BPF_MODIFY_RETURN: + case BPF_LSM_MAC: + case BPF_TRACE_FENTRY: + case BPF_TRACE_FEXIT: + prog->aux->attach_func_name = tname; + if (prog->type == BPF_PROG_TYPE_LSM) { + ret = bpf_lsm_verify_prog(&env->log, prog); + if (ret < 0) + return ret; + } + + if (!btf_type_is_func(t)) { + verbose(env, "attach_btf_id %u is not a function\n", + btf_id); + return -EINVAL; + } + if (prog_extension && + btf_check_type_match(env, prog, btf, t)) + return -EINVAL; + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_func_proto(t)) + return -EINVAL; + tr = bpf_trampoline_lookup(key); + if (!tr) + return -ENOMEM; + /* t is either vmlinux type or another program's type */ + prog->aux->attach_func_proto = t; + mutex_lock(&tr->mutex); + if (tr->func.addr) { + prog->aux->trampoline = tr; + goto out; + } + if (tgt_prog && conservative) { + prog->aux->attach_func_proto = NULL; + t = NULL; + } + ret = btf_distill_func_proto(&env->log, btf, t, + tname, &tr->func.model); + if (ret < 0) + goto out; + if (tgt_prog) { + if (subprog == 0) + addr = (long) tgt_prog->bpf_func; + else + addr = (long) tgt_prog->aux->func[subprog]->bpf_func; + } else { + addr = kallsyms_lookup_name(tname); + if (!addr) { + verbose(env, + "The address of function %s cannot be found\n", + tname); + ret = -ENOENT; + goto out; + } + } + tr->func.addr = (void *)addr; + prog->aux->trampoline = tr; + + if (prog->expected_attach_type == BPF_MODIFY_RETURN) + ret = check_attach_modify_return(env); +out: + mutex_unlock(&tr->mutex); + if (ret) + bpf_trampoline_put(tr); + return ret; + } +} + int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -9252,7 +11297,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->insn_aux_data[i].orig_idx = i; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; - is_priv = capable(CAP_SYS_ADMIN); + is_priv = bpf_capable(); + + if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { + mutex_lock(&bpf_verifier_lock); + if (!btf_vmlinux) + btf_vmlinux = btf_parse_vmlinux(); + mutex_unlock(&bpf_verifier_lock); + } /* grab the mutex to protect few globals used by verifier */ if (!is_priv) @@ -9273,6 +11325,13 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, goto err_unlock; } + if (IS_ERR(btf_vmlinux)) { + /* Either gcc or pahole or kernel are broken. */ + verbose(env, "in-kernel BTF is malformed\n"); + ret = PTR_ERR(btf_vmlinux); + goto skip_full_check; + } + env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; @@ -9280,14 +11339,15 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->strict_alignment = false; env->allow_ptr_leaks = is_priv; + env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); + env->allow_ptr_leaks = bpf_allow_ptr_leaks(); + env->bypass_spec_v1 = bpf_bypass_spec_v1(); + env->bypass_spec_v4 = bpf_bypass_spec_v4(); + env->bpf_capable = bpf_capable(); if (is_priv) env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; - ret = replace_map_fd_with_map_ptr(env); - if (ret < 0) - goto skip_full_check; - if (bpf_prog_is_dev_bound(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env->prog); if (ret) @@ -9309,22 +11369,26 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, if (ret < 0) goto skip_full_check; + ret = check_attach_btf_id(env); + if (ret) + goto skip_full_check; + + ret = resolve_pseudo_ldimm64(env); + if (ret < 0) + goto skip_full_check; + ret = check_cfg(env); if (ret < 0) goto skip_full_check; - ret = do_check(env); - if (env->cur_state) { - free_verifier_state(env->cur_state, true); - env->cur_state = NULL; - } + ret = do_check_subprogs(env); + ret = ret ?: do_check_main(env); if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) ret = bpf_prog_offload_finalize(env); skip_full_check: - while (!pop_stack(env, NULL, NULL)); - free_states(env); + kvfree(env->explored_states); if (ret == 0) ret = check_max_stack_depth(env); diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index 82a1ffe15dfa..14156faf75f6 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -11,17 +11,15 @@ struct xsk_map { struct bpf_map map; - struct xdp_sock **xsk_map; struct list_head __percpu *flush_list; spinlock_t lock; /* Synchronize map updates */ + struct xdp_sock *xsk_map[]; }; int xsk_map_inc(struct xsk_map *map) { - struct bpf_map *m = &map->map; - - m = bpf_map_inc(m, false); - return PTR_ERR_OR_ZERO(m); + bpf_map_inc(&map->map); + return 0; } void xsk_map_put(struct xsk_map *map) @@ -80,9 +78,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs, static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) { + struct bpf_map_memory mem; + int cpu, err, numa_node; struct xsk_map *m; - int cpu, err; - u64 cost; + u64 cost, size; if (!capable(CAP_NET_ADMIN)) return ERR_PTR(-EPERM); @@ -92,44 +91,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) return ERR_PTR(-EINVAL); - m = kzalloc(sizeof(*m), GFP_USER); - if (!m) + numa_node = bpf_map_attr_numa_node(attr); + size = struct_size(m, xsk_map, attr->max_entries); + cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus()); + + err = bpf_map_charge_init(&mem, cost); + if (err < 0) + return ERR_PTR(err); + + m = bpf_map_area_alloc(size, numa_node); + if (!m) { + bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); + } bpf_map_init_from_attr(&m->map, attr); + bpf_map_charge_move(&m->map.memory, &mem); spin_lock_init(&m->lock); - cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *); - cost += sizeof(struct list_head) * num_possible_cpus(); - - /* Notice returns -EPERM on if map size is larger than memlock limit */ - err = bpf_map_charge_init(&m->map.memory, cost); - if (err) - goto free_m; - - err = -ENOMEM; - m->flush_list = alloc_percpu(struct list_head); - if (!m->flush_list) - goto free_charge; + if (!m->flush_list) { + bpf_map_charge_finish(&m->map.memory); + bpf_map_area_free(m); + return ERR_PTR(-ENOMEM); + } for_each_possible_cpu(cpu) INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu)); - m->xsk_map = bpf_map_area_alloc(m->map.max_entries * - sizeof(struct xdp_sock *), - m->map.numa_node); - if (!m->xsk_map) - goto free_percpu; return &m->map; - -free_percpu: - free_percpu(m->flush_list); -free_charge: - bpf_map_charge_finish(&m->map.memory); -free_m: - kfree(m); - return ERR_PTR(err); } static void xsk_map_free(struct bpf_map *map) @@ -139,8 +129,7 @@ static void xsk_map_free(struct bpf_map *map) bpf_clear_redirect_map(map); synchronize_net(); free_percpu(m->flush_list); - bpf_map_area_free(m->xsk_map); - kfree(m); + bpf_map_area_free(m); } static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) @@ -172,6 +161,22 @@ struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key) return xs; } +static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) +{ + const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2; + struct bpf_insn *insn = insn_buf; + + *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); + *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *))); + *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map)); + *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *insn++ = BPF_MOV64_IMM(ret, 0); + return insn - insn_buf; +} + int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, struct xdp_sock *xs) { @@ -307,13 +312,17 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, spin_unlock_bh(&map->lock); } +static int xsk_map_btf_id; const struct bpf_map_ops xsk_map_ops = { .map_alloc = xsk_map_alloc, .map_free = xsk_map_free, .map_get_next_key = xsk_map_get_next_key, .map_lookup_elem = xsk_map_lookup_elem, + .map_gen_lookup = xsk_map_gen_lookup, .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only, .map_update_elem = xsk_map_update_elem, .map_delete_elem = xsk_map_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "xsk_map", + .map_btf_id = &xsk_map_btf_id, }; diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile index 5d7a76bfbbb7..cc3765e92885 100644 --- a/kernel/cgroup/Makefile +++ b/kernel/cgroup/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_CGROUP_PIDS) += pids.o obj-$(CONFIG_CGROUP_RDMA) += rdma.o obj-$(CONFIG_CPUSETS) += cpuset.o obj-$(CONFIG_CGROUP_DEBUG) += debug.o +obj-y += mbuf.o sli.o diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 809e34a3c017..e998c6872ec4 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -274,5 +274,6 @@ void cgroup1_check_for_release(struct cgroup *cgrp); int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param); int cgroup1_get_tree(struct fs_context *fc); int cgroup1_reconfigure(struct fs_context *ctx); +int cgroup_id_show(struct seq_file *seq, void *v); #endif /* __CGROUP_INTERNAL_H */ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index f684c82efc2e..77a470e66997 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -15,7 +15,6 @@ #include <linux/pid_namespace.h> #include <linux/cgroupstats.h> #include <linux/fs_parser.h> - #include <trace/events/cgroup.h> #define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__) @@ -549,6 +548,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + /* + * Release agent gets called with all capabilities, + * require capabilities to set release agent. + */ + if ((of->file->f_cred->user_ns != &init_user_ns) || + !capable(CAP_SYS_ADMIN)) + return -EPERM; + cgrp = cgroup_kn_lock_live(of->kn, false); if (!cgrp) return -ENODEV; @@ -625,6 +632,10 @@ struct cftype cgroup1_base_files[] = { .read_u64 = cgroup_clone_children_read, .write_u64 = cgroup_clone_children_write, }, + { + .name = "cgroup.id", + .seq_show = cgroup_id_show, + }, { .name = "cgroup.sane_behavior", .flags = CFTYPE_ONLY_ON_ROOT, @@ -914,6 +925,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result); if (opt == -ENOPARAM) { if (strcmp(param->key, "source") == 0) { + if (fc->source) + return invalf(fc, "Multiple sources not supported"); fc->source = param->string; param->string = NULL; return 0; @@ -953,6 +966,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) /* Specifying two release agents is forbidden */ if (ctx->release_agent) return cg_invalf(fc, "cgroup1: release_agent respecified"); + /* + * Release agent gets called with all capabilities, + * require capabilities to set release agent. + */ + if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) + return cg_invalf(fc, "cgroup1: Setting release_agent not allowed"); ctx->release_agent = param->string; param->string = NULL; break; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 7c9e97553a00..d40e051b812a 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -58,6 +58,8 @@ #include <linux/sched/cputime.h> #include <linux/psi.h> #include <net/sock.h> +#include <linux/mbuf.h> +#include <linux/sli.h> #define CREATE_TRACE_POINTS #include <trace/events/cgroup.h> @@ -180,6 +182,8 @@ static u16 cgrp_dfl_threaded_ss_mask; LIST_HEAD(cgroup_roots); static int cgroup_root_count; +int sysctl_qos_mbuf_enable = 0; + /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ static DEFINE_IDR(cgroup_hierarchy_idr); @@ -3438,6 +3442,21 @@ static int cgroup_type_show(struct seq_file *seq, void *v) return 0; } +int cgroup_id_show(struct seq_file *seq, void *v) +{ + struct cgroup *cgrp = seq_css(seq)->cgroup; + int ssid; + struct cgroup_subsys *ss; + + for_each_subsys(ss, ssid) { + struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); + + if (css) + seq_printf(seq, "%s:%u\n", ss->name, css->id); + } + return 0; +} + static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -3627,6 +3646,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, { struct psi_trigger *new; struct cgroup *cgrp; + struct psi_group *psi; cgrp = cgroup_kn_lock_live(of->kn, false); if (!cgrp) @@ -3635,7 +3655,8 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, cgroup_get(cgrp); cgroup_kn_unlock(of->kn); - new = psi_trigger_create(&cgrp->psi, buf, nbytes, res); + psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; + new = psi_trigger_create(psi, buf, nbytes, res); if (IS_ERR(new)) { cgroup_put(cgrp); return PTR_ERR(new); @@ -3681,6 +3702,183 @@ static void cgroup_pressure_release(struct kernfs_open_file *of) } #endif /* CONFIG_PSI */ +static int cgroup_sli_memory_show(struct seq_file *seq, void *v) +{ + struct cgroup *cgroup = seq_css(seq)->cgroup; + + return sli_memlat_stat_show(seq,cgroup); +} + +static int cgroup_sli_cpu_show(struct seq_file *seq, void *v) +{ + struct cgroup *cgroup = seq_css(seq)->cgroup; + + return sli_schedlat_stat_show(seq,cgroup); +} + +static int cgroup_sli_max_show(struct seq_file *seq, void *v) +{ + + struct cgroup *cgroup = seq_css(seq)->cgroup; + + sli_schedlat_max_show(seq, cgroup); + return sli_memlat_max_show(seq, cgroup); +} + +void *cgroup_mbuf_start(struct seq_file *s, loff_t *pos) +{ + struct cgroup *cgrp = seq_css(s)->cgroup; + struct mbuf_slot *mb = cgrp->mbuf; + u32 index; + + if (!mb) + return NULL; + + index = *pos; + /* If already reach end, just return */ + if (index && index == mb->mring->next_idx) + return NULL; + + if (!mb->udesc) { + mb->udesc = kmalloc(sizeof(struct mbuf_user_desc), GFP_KERNEL); + + if (!mb->udesc) + goto out; + + mb->udesc->user_idx = mb->mring->first_idx; + mb->udesc->user_seq = mb->mring->first_seq; + } + + /* Maybe reach end or empty */ + if (mb->udesc->user_idx == mb->mring->next_idx) + return NULL; + +out: + return mb->udesc; +} + +void *cgroup_mbuf_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct mbuf_user_desc *udesc = (struct mbuf_user_desc *)v; + struct cgroup *cgrp = seq_css(s)->cgroup; + struct mbuf_slot *mb = cgrp->mbuf; + + udesc->user_idx = mb->ops->next(mb->mring, udesc->user_idx); + *pos = udesc->user_idx; + + if (udesc->user_idx == mb->mring->next_idx) + return NULL; + + return udesc; +} + +void cgroup_mbuf_stop(struct seq_file *s, void *v) +{ + struct cgroup *cgrp = seq_css(s)->cgroup; + struct mbuf_user_desc *desc; + + if (cgrp->mbuf) { + desc = cgrp->mbuf->udesc; + if(desc && desc->user_idx == cgrp->mbuf->mring->next_idx) { + kfree(cgrp->mbuf->udesc); + cgrp->mbuf->udesc = NULL; + } + } +} + +int cgroup_mbuf_show(struct seq_file *s, void *v) +{ + ssize_t ret; + struct mbuf_user_desc *udesc = (struct mbuf_user_desc *)v; + struct cgroup *cgrp = seq_css(s)->cgroup; + struct mbuf_slot *mb = cgrp->mbuf; + + memset(udesc->buf, 0, sizeof(udesc->buf)); + ret = mb->ops->read(mb, udesc); + + if (ret > 0) + seq_printf(s, "%s", udesc->buf); + + return 0; +} + +/* + * Get cgroup struct from task_struct for mbuf and sli. + * + * In cgroup V1, return cpuacct cgroup, and if cpuacct cgroup is root, df1_cgrp + * is returned, but is harmless. + * + * In cgroup V2, just return task_struct.cgroups.dfl_cgrp. + */ +struct cgroup *get_cgroup_from_task(struct task_struct *task) +{ + struct cgroup *cgrp; + + /* First, try to get cpuacct cgroup for V1*/ + cgrp = task_cgroup(task, cpuacct_cgrp_id); + if (cgrp && cgrp->level) + return cgrp; + + /* + * If can not find cpuacct cgroup or cpuacct cgroup is root, just return + * dfl_cgrp. + */ + cgrp = task_dfl_cgroup(task); + + return cgrp; +} + +/* Store info to mbuf according task_struct */ +ssize_t mbuf_print_task(struct task_struct *task, const char *fmt, ...) +{ + struct cgroup *cgrp; + struct mbuf_slot *mb; + va_list args; + + cgrp = get_cgroup_from_task(task); + mb = cgrp->mbuf; + if(!sysctl_qos_mbuf_enable || !mb) + goto out; + + if(!__ratelimit(&mb->ratelimit)) { + goto out; + } + + if (mb->ops) { + va_start(args, fmt); + mb->ops->write(cgrp, fmt, args); + va_end(args); + } + +out: + return 0; +} +EXPORT_SYMBOL(mbuf_print_task); + +ssize_t mbuf_print(struct cgroup *cgrp, const char *fmt, ...) +{ + struct mbuf_slot *mb; + va_list args; + + mb = cgrp->mbuf; + if(!sysctl_qos_mbuf_enable || !mb) + goto out; + + if(!__ratelimit(&mb->ratelimit)) { + goto out; + } + + if (mb->ops) { + va_start(args, fmt); + mb->ops->write(cgrp, fmt, args); + va_end(args); + } + +out: + return 0; +} +EXPORT_SYMBOL(mbuf_print); + static int cgroup_freeze_show(struct seq_file *seq, void *v) { struct cgroup *cgrp = seq_css(seq)->cgroup; @@ -4856,6 +5054,10 @@ static struct cftype cgroup_base_files[] = { .seq_show = cgroup_type_show, .write = cgroup_type_write, }, + { + .name = "cgroup.id", + .seq_show = cgroup_id_show, + }, { .name = "cgroup.procs", .flags = CFTYPE_NS_DELEGATABLE, @@ -4939,6 +5141,29 @@ static struct cftype cgroup_base_files[] = { .release = cgroup_pressure_release, }, #endif /* CONFIG_PSI */ + { + .name = "mbuf", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_mbuf_show, + .seq_start = cgroup_mbuf_start, + .seq_next = cgroup_mbuf_next, + .seq_stop = cgroup_mbuf_stop, + }, + { + .name = "sli.memory", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_sli_memory_show, + }, + { + .name = "sli.cpu", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_sli_cpu_show, + }, + { + .name = "sli.max", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_sli_max_show, + }, { } /* terminate */ }; @@ -5000,6 +5225,7 @@ static void css_free_rwork_fn(struct work_struct *work) cgroup_put(cgroup_parent(cgrp)); kernfs_put(cgrp->kn); psi_cgroup_free(cgrp); + sli_cgroup_free(cgrp); if (cgroup_on_dfl(cgrp)) cgroup_rstat_exit(cgrp); kfree(cgrp); @@ -5256,15 +5482,20 @@ static struct cgroup *cgroup_create(struct cgroup *parent) cgrp->self.parent = &parent->self; cgrp->root = root; cgrp->level = level; + cgrp->mbuf = NULL; ret = psi_cgroup_alloc(cgrp); if (ret) goto out_idr_free; - ret = cgroup_bpf_inherit(cgrp); + ret = sli_cgroup_alloc(cgrp); if (ret) goto out_psi_free; + ret = cgroup_bpf_inherit(cgrp); + if (ret) + goto out_sli_free; + /* * New cgroup inherits effective freeze counter, and * if the parent has to be frozen, the child has too. @@ -5329,6 +5560,8 @@ static struct cgroup *cgroup_create(struct cgroup *parent) return cgrp; +out_sli_free: + sli_cgroup_free(cgrp); out_psi_free: psi_cgroup_free(cgrp); out_idr_free: @@ -5366,6 +5599,42 @@ fail: return ret; } +/* + * mbuf only support cpu/memory/io/net cgroup in V2, and only support + * cpuacct cgroup in V1. + */ +static inline bool cgroup_need_mbuf(struct cgroup *cgrp) +{ + if (cgroup_on_dfl(cgrp)){ +#if IS_ENABLED(CONFIG_CGROUP_SCHED) + if (cgroup_css(cgrp, cgroup_subsys[cpu_cgrp_id])) + return true; +#endif + +#if IS_ENABLED(CONFIG_BLK_CGROUP) + if (cgroup_css(cgrp, cgroup_subsys[io_cgrp_id])) + return true; +#endif + +#if IS_ENABLED(CONFIG_MEMCG) + if (cgroup_css(cgrp, cgroup_subsys[memory_cgrp_id])) + return true; +#endif + +#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) + if (cgroup_css(cgrp, cgroup_subsys[net_cls_cgrp_id])) + return true; +#endif + } else +#if IS_ENABLED(CONFIG_CGROUP_CPUACCT) + if (cgroup_css(cgrp, cgroup_subsys[cpuacct_cgrp_id])) + return true; +#endif + + return false; +} + + int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) { struct cgroup *parent, *cgrp; @@ -5404,7 +5673,6 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) * that @cgrp->kn is always accessible. */ kernfs_get(kn); - ret = cgroup_kn_set_ugid(kn); if (ret) goto out_destroy; @@ -5417,6 +5685,9 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) if (ret) goto out_destroy; + if(sysctl_qos_mbuf_enable && cgroup_need_mbuf(cgrp)) + cgrp->mbuf = mbuf_slot_alloc(cgrp); + TRACE_CGROUP_PATH(mkdir, cgrp); /* let's create and online css's */ @@ -5599,6 +5870,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) cgroup_bpf_offline(cgrp); + if (cgrp->mbuf) + mbuf_free(cgrp); + /* put the base reference */ percpu_ref_kill(&cgrp->self.refcnt); @@ -6379,18 +6653,8 @@ void cgroup_sk_alloc_disable(void) void cgroup_sk_alloc(struct sock_cgroup_data *skcd) { - if (cgroup_sk_alloc_disabled) - return; - - /* Socket clone path */ - if (skcd->val) { - /* - * We might be cloning a socket which is left in an empty - * cgroup and the cgroup might have already been rmdir'd. - * Don't use cgroup_get_live(). - */ - cgroup_get(sock_cgroup_ptr(skcd)); - cgroup_bpf_get(sock_cgroup_ptr(skcd)); + if (cgroup_sk_alloc_disabled) { + skcd->no_refcnt = 1; return; } @@ -6415,10 +6679,27 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) rcu_read_unlock(); } +void cgroup_sk_clone(struct sock_cgroup_data *skcd) +{ + if (skcd->val) { + if (skcd->no_refcnt) + return; + /* + * We might be cloning a socket which is left in an empty + * cgroup and the cgroup might have already been rmdir'd. + * Don't use cgroup_get_live(). + */ + cgroup_get(sock_cgroup_ptr(skcd)); + cgroup_bpf_get(sock_cgroup_ptr(skcd)); + } +} + void cgroup_sk_free(struct sock_cgroup_data *skcd) { struct cgroup *cgrp = sock_cgroup_ptr(skcd); + if (skcd->no_refcnt) + return; cgroup_bpf_put(cgrp); cgroup_put(cgrp); } @@ -6426,26 +6707,31 @@ void cgroup_sk_free(struct sock_cgroup_data *skcd) #endif /* CONFIG_SOCK_CGROUP_DATA */ #ifdef CONFIG_CGROUP_BPF -int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type, u32 flags) +int cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, + enum bpf_attach_type type, + u32 flags) { int ret; mutex_lock(&cgroup_mutex); - ret = __cgroup_bpf_attach(cgrp, prog, type, flags); + ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags); mutex_unlock(&cgroup_mutex); return ret; } + int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type, u32 flags) + enum bpf_attach_type type) { int ret; mutex_lock(&cgroup_mutex); - ret = __cgroup_bpf_detach(cgrp, prog, type); + ret = __cgroup_bpf_detach(cgrp, prog, NULL, type); mutex_unlock(&cgroup_mutex); return ret; } + int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr) { diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 8d60a0a65086..11dc982ba51c 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -171,6 +171,14 @@ struct cpuset { /* for cpu load calc */ unsigned long calc_load_tasks; unsigned long avenrun[3]; + + /* for cpu R load calc */ + unsigned long calc_load_tasks_r; + unsigned long avenrun_r[3]; + + /* for cpu D load calc */ + unsigned long calc_load_tasks_d; + unsigned long avenrun_d[3]; }; /* @@ -995,25 +1003,48 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], */ static void rebuild_sched_domains_locked(void) { + struct cgroup_subsys_state *pos_css; struct sched_domain_attr *attr; cpumask_var_t *doms; + struct cpuset *cs; int ndoms; lockdep_assert_cpus_held(); percpu_rwsem_assert_held(&cpuset_rwsem); /* - * We have raced with CPU hotplug. Don't do anything to avoid + * If we have raced with CPU hotplug, return early to avoid * passing doms with offlined cpu to partition_sched_domains(). - * Anyways, hotplug work item will rebuild sched domains. + * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. + * + * With no CPUs in any subpartitions, top_cpuset's effective CPUs + * should be the same as the active CPUs, so checking only top_cpuset + * is enough to detect racing CPU offlines. */ if (!top_cpuset.nr_subparts_cpus && !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) return; - if (top_cpuset.nr_subparts_cpus && - !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask)) - return; + /* + * With subpartition CPUs, however, the effective CPUs of a partition + * root should be only a subset of the active CPUs. Since a CPU in any + * partition root could be offlined, all must be checked. + */ + if (top_cpuset.nr_subparts_cpus) { + rcu_read_lock(); + cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { + if (!is_partition_root(cs)) { + pos_css = css_rightmost_descendant(pos_css); + continue; + } + if (!cpumask_subset(cs->effective_cpus, + cpu_active_mask)) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + } /* Generate domain masks and attrs */ ndoms = generate_sched_domains(&doms, &attr); @@ -2526,16 +2557,19 @@ static u64 get_iowait_time(int cpu) } -static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) +extern int cpu_get_max_cpus(struct task_struct *p); +static int cpuset_cgroup_stat_show_comm(struct seq_file *sf, void *v, struct cpuset *cs, int max_cpu) { - struct cpuset *cs = css_cs(seq_css(sf)); - int i, j; + int i, j, k = 0; u64 user, nice, system, idle, iowait, irq, softirq, steal; u64 guest, guest_nice, n_ctx_switch, n_process, n_running, n_blocked; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; + bool is_top_cgrp; + int show_realinfo = cpuset_cpuinfo_show_realinfo; + is_top_cgrp = !cs->css.parent ? true : false; user = nice = system = idle = iowait = irq = softirq = steal = 0; @@ -2543,6 +2577,8 @@ static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) getboottime(&boottime); for_each_cpu(i, cs->cpus_allowed) { + if (++k > max_cpu) + break; user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; @@ -2578,8 +2614,11 @@ static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) seq_putc(sf, '\n'); j = 0; + k = 0; for_each_cpu(i, cs->cpus_allowed) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ + if (++k > max_cpu) + break; user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; @@ -2590,7 +2629,10 @@ static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; - seq_printf(sf, "cpu%d", i); + if (is_top_cgrp || show_realinfo) + seq_printf(sf, "cpu%d", i); + else + seq_printf(sf, "cpu%d", j++); seq_put_decimal_ull(sf, " ", nsec_to_clock_t(user)); seq_put_decimal_ull(sf, " ", nsec_to_clock_t(nice)); seq_put_decimal_ull(sf, " ", nsec_to_clock_t(system)); @@ -2606,10 +2648,14 @@ static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) seq_put_decimal_ull(sf, "intr ", (unsigned long long)sum); /* sum again ? it could be updated? */ + k = 0; for_each_irq_nr(j) { sum = 0; - for_each_cpu(i, cs->cpus_allowed) + for_each_cpu(i, cs->cpus_allowed) { + if (++k > max_cpu) + break; sum += kstat_irqs_cpu(j, i); + } seq_put_decimal_ull(sf, " ", sum); } @@ -2618,7 +2664,10 @@ static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) n_process = 0; n_running = 0; n_blocked = 0; + k = 0; for_each_cpu(i, cs->cpus_allowed) { + if (++k > max_cpu) + break; n_ctx_switch += nr_context_switches_cpu(i); n_process += per_cpu(total_forks, i); n_running += nr_running_cpu(i); @@ -2645,6 +2694,20 @@ static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) return 0; } +int cpuset_cgroupfs_stat_show(struct seq_file *m, void *v) +{ + struct cgroup_subsys_state *css = task_css(current, cpuset_cgrp_id); + struct cpuset *cs = css_cs(css); + int max_cpu = cpu_get_max_cpus(current); + return cpuset_cgroup_stat_show_comm(m, v, cs, max_cpu); +} + +static int cpuset_cgroup_stat_show(struct seq_file *sf, void *v) +{ + struct cpuset *cs = css_cs(seq_css(sf)); + return cpuset_cgroup_stat_show_comm(sf, v, cs, INT_MAX); +} + #ifdef CONFIG_X86 /* * Get CPU information for use by the procfs. @@ -2693,15 +2756,14 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) } #endif -static int cpuset_cgroup_cpuinfo_show(struct seq_file *sf, void *v) +static int cpuset_cgroup_cpuinfo_show_comm(struct seq_file *sf, void *v, struct cpuset *cs, int max_cpu) { int i, j, k = 0; - struct cpuset *cs = css_cs(seq_css(sf)); struct cpuinfo_x86 *c; unsigned int cpu; bool is_top_cgrp; - if (!seq_css(sf)->parent) + if (!cs->css.parent) is_top_cgrp = true; else is_top_cgrp = false; @@ -2714,6 +2776,8 @@ static int cpuset_cgroup_cpuinfo_show(struct seq_file *sf, void *v) else cpu = k; k++; + if (k > max_cpu) + break; seq_printf(sf, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" @@ -2791,6 +2855,41 @@ static int cpuset_cgroup_cpuinfo_show(struct seq_file *sf, void *v) } return 0; } + +/* return 1 if allowed, otherwise 0 is returned */ +int cpuset_cgroups_cpu_allowed(struct task_struct *task, int cpu, int once) +{ + struct cgroup_subsys_state *css = task_css(task, cpuset_cgrp_id); + struct cpuset *cs = css_cs(css); + int max_cpu, i, k = 0; + if (!once) + return cpumask_test_cpu(cpu, cs->cpus_allowed); + max_cpu = cpu_get_max_cpus(task); + for_each_cpu(i, cs->cpus_allowed) { + if (++k > max_cpu) + return 0; + if (i == cpu) + return 1; + if (i > cpu) + return 0; + } + return 0; +} + +int cpuset_cgroupfs_cpuinfo_show(struct seq_file *m, void *v) +{ + struct cgroup_subsys_state *css = task_css(current, cpuset_cgrp_id); + struct cpuset *cs = css_cs(css); + int max_cpu = cpu_get_max_cpus(current); + return cpuset_cgroup_cpuinfo_show_comm(m, v, cs, max_cpu); +} + +static int cpuset_cgroup_cpuinfo_show(struct seq_file *sf, void *v) +{ + struct cpuset *cs = css_cs(seq_css(sf)); + return cpuset_cgroup_cpuinfo_show_comm(sf, v, cs, INT_MAX); +} + #endif static int cpuset_cgroup_loadavg_show(struct seq_file *sf, void *v); @@ -3017,6 +3116,10 @@ static struct cftype dfl_files[] = { .private = FILE_SUBPARTS_CPULIST, .flags = CFTYPE_DEBUG, }, + { + .name = "loadavg", + .seq_show = cpuset_cgroup_loadavg_show, + }, { } /* terminate */ }; @@ -3957,11 +4060,23 @@ void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) static void __cpuset_calc_load(struct cpuset *cs) { long active = cs->calc_load_tasks; + long active_r = cs->calc_load_tasks_r; + long active_d = cs->calc_load_tasks_d; + active = active > 0 ? active * FIXED_1 : 0; cs->avenrun[0] = calc_load(cs->avenrun[0], EXP_1, active); cs->avenrun[1] = calc_load(cs->avenrun[1], EXP_5, active); cs->avenrun[2] = calc_load(cs->avenrun[2], EXP_15, active); + active_r = active_r > 0 ? active_r * FIXED_1 : 0; + cs->avenrun_r[0] = calc_load(cs->avenrun_r[0], EXP_1, active_r); + cs->avenrun_r[1] = calc_load(cs->avenrun_r[1], EXP_5, active_r); + cs->avenrun_r[2] = calc_load(cs->avenrun_r[2], EXP_15, active_r); + + active_d = active_d > 0 ? active_d * FIXED_1 : 0; + cs->avenrun_d[0] = calc_load(cs->avenrun_d[0], EXP_1, active_d); + cs->avenrun_d[1] = calc_load(cs->avenrun_d[1], EXP_5, active_d); + cs->avenrun_d[2] = calc_load(cs->avenrun_d[2], EXP_15, active_d); } /*calc cpu load for this cpuset*/ void cgroup_cpuset_calc_load(void) @@ -3978,11 +4093,20 @@ void cgroup_cpuset_calc_load(void) continue; cs->calc_load_tasks = 0; + cs->calc_load_tasks_r = 0; + cs->calc_load_tasks_d = 0; css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) { - if (task->state == TASK_RUNNING || task->state == TASK_UNINTERRUPTIBLE) + if (task->state == TASK_RUNNING || task->state & TASK_UNINTERRUPTIBLE) { cs->calc_load_tasks ++; + + /* calc R/D state process separately */ + if (task->state == TASK_RUNNING) + cs->calc_load_tasks_r ++; + if (task->state & TASK_UNINTERRUPTIBLE) + cs->calc_load_tasks_d ++; + } } css_task_iter_end(&it); @@ -4013,10 +4137,11 @@ static unsigned long cpuset_nr_running(struct cpuset *cs) return nr_running; } -static int cpuset_cgroup_loadavg_show(struct seq_file *sf, void *v) +static int cpuset_cgroup_loadavg_show_comm(struct seq_file *sf, void *v, struct cpuset *cs) { - struct cpuset *cs = css_cs(seq_css(sf)); unsigned long loads[3] = {0}; + unsigned long loads_r[3] = {0}; + unsigned long loads_d[3] = {0}; unsigned long offset = FIXED_1/200; int shift = 0; unsigned long n_running = cpuset_nr_running(cs); @@ -4024,6 +4149,15 @@ static int cpuset_cgroup_loadavg_show(struct seq_file *sf, void *v) loads[0] = (cs->avenrun[0] + offset) << shift; loads[1] = (cs->avenrun[1] + offset) << shift; loads[2] = (cs->avenrun[2] + offset) << shift; + + loads_r[0] = (cs->avenrun_r[0] + offset) << shift; + loads_r[1] = (cs->avenrun_r[1] + offset) << shift; + loads_r[2] = (cs->avenrun_r[2] + offset) << shift; + + loads_d[0] = (cs->avenrun_d[0] + offset) << shift; + loads_d[1] = (cs->avenrun_d[1] + offset) << shift; + loads_d[2] = (cs->avenrun_d[2] + offset) << shift; + #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) @@ -4033,5 +4167,29 @@ static int cpuset_cgroup_loadavg_show(struct seq_file *sf, void *v) LOAD_INT(loads[2]), LOAD_FRAC(loads[2]), n_running, nr_threads, idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); + + seq_printf(sf, "%lu.%02lu %lu.%02lu %lu.%02lu\n", + LOAD_INT(loads_r[0]), LOAD_FRAC(loads_r[0]), + LOAD_INT(loads_r[1]), LOAD_FRAC(loads_r[1]), + LOAD_INT(loads_r[2]), LOAD_FRAC(loads_r[2])); + + seq_printf(sf, "%lu.%02lu %lu.%02lu %lu.%02lu\n", + LOAD_INT(loads_d[0]), LOAD_FRAC(loads_d[0]), + LOAD_INT(loads_d[1]), LOAD_FRAC(loads_d[1]), + LOAD_INT(loads_d[2]), LOAD_FRAC(loads_d[2])); + return 0; } + +int cpuset_cgroupfs_loadavg_show(struct seq_file *m, void *v) +{ + struct cgroup_subsys_state *css = task_css(current, cpuset_cgrp_id); + struct cpuset *cs = css_cs(css); + return cpuset_cgroup_loadavg_show_comm(m, v, cs); +} + +static int cpuset_cgroup_loadavg_show(struct seq_file *sf, void *v) +{ + struct cpuset *cs = css_cs(seq_css(sf)); + return cpuset_cgroup_loadavg_show_comm(sf, v, cs); +} diff --git a/kernel/cgroup/mbuf.c b/kernel/cgroup/mbuf.c new file mode 100644 index 000000000000..6d7cb88273aa --- /dev/null +++ b/kernel/cgroup/mbuf.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Quality Monitor Buffer + * Aim to provide backup buffer for RQM to record critical message. + * Could be used to catch critical context when abnormal jitters occur. + * + * Author: bauerchen <bauerchen@tencent.com> + * Copyright (C) 2021 Tencent, Inc + */ + +#include <linux/kernel.h> +#include <linux/cgroup.h> +#include <linux/memblock.h> +#include <linux/cpu.h> +#include <linux/uaccess.h> +#include <linux/mbuf.h> +#include <linux/slab.h> +#include <linux/sched/clock.h> +#include <linux/ratelimit.h> + +/* Define max mbuf len is 8M, and min is 2M */ +#define MBUF_LEN_MAX (1 << 23) +#define MBUF_LEN_MIN (1 << 21) +#define MBUF_LEN_DEF MBUF_LEN_MIN + +#define MBUF_MSG_LEN_MAX 1024 + +/* Monitor buffer support max 1024 items */ +#define MBUF_SLOTS_MAX 1024 +#define MBUF_SLOTS_MIN 256 +#define MBUF_SLOTS_DEF 512 + +/* Global mbuf metadata struct */ +static struct mbuf_struct g_mbuf = { + .mbuf_len = MBUF_LEN_DEF, + .mbuf_max_slots = MBUF_SLOTS_DEF, + .mbuf_next_id = 0, + .mbuf_size_per_cg = 0, + .mbuf = NULL, + .mbuf_bitmap = NULL, +}; + +static void __init mbuf_len_update(u64 size) +{ + if (size) + size = roundup_pow_of_two(size); + + if (size > MBUF_LEN_MAX) { + size = (u64)MBUF_LEN_MAX; + pr_warn("mbuf: monitor buffer over [ %llu ] is not supported.\n", + (u64)MBUF_LEN_MAX); + } + + if (size < MBUF_LEN_MIN){ + size = (u64) MBUF_LEN_MIN; + pr_warn("mbuf: monitor buffer less [ %llu ] is not supported.\n", + (u64) MBUF_LEN_MIN); + } + + g_mbuf.mbuf_len = size; +} + +static int __init mbuf_len_setup(char *str) +{ + + u64 size; + + if (!str) + return -EINVAL; + + size = memparse(str, &str); + + mbuf_len_update(size); + + return 0; + +} +early_param("mbuf_len", mbuf_len_setup); + +static int __init mbuf_max_items_setup(char *str) +{ + int num; + + if (!str) + return -EINVAL; + + if (!get_option(&str, &num)) + return -EINVAL; + + if (num) + num = roundup_pow_of_two(num); + + if (num > MBUF_SLOTS_MAX) + num = MBUF_SLOTS_MAX; + + if (num < MBUF_SLOTS_MIN) + num = MBUF_SLOTS_MIN; + + g_mbuf.mbuf_max_slots = num; + + return 0; +} +early_param("mbuf_max_items", mbuf_max_items_setup); + +/* Alloc mbuf global bitmap, each bit stands for a mbuf slot. */ +void __init mbuf_bmap_init(void) +{ + size_t alloc_size; + void *mbuf_bitmap; + + alloc_size = max_t(size_t, g_mbuf.mbuf_max_slots / BITS_PER_BYTE + 1, + L1_CACHE_BYTES); + mbuf_bitmap = kmalloc(alloc_size, __GFP_HIGH|__GFP_ZERO); + + if(!mbuf_bitmap){ + pr_err("mbuf: alloc mbuf_bitmap failed!\n"); + return; + } + g_mbuf.mbuf_bitmap = mbuf_bitmap; + g_mbuf.mbuf_size_per_cg = g_mbuf.mbuf_len / g_mbuf.mbuf_max_slots; +} + +/* Called by start_kernel() */ +void __init setup_mbuf(void) +{ + /* mbuf has been alloced */ + if (g_mbuf.mbuf) + return; + + g_mbuf.mbuf = memblock_alloc(g_mbuf.mbuf_len, PAGE_SIZE); + if (unlikely(!g_mbuf.mbuf)) { + pr_err("mbuf: memblock_alloc [ %u ] bytes failed\n", + g_mbuf.mbuf_len); + return; + } + + g_mbuf.mbuf_frees = g_mbuf.mbuf_max_slots; + spin_lock_init(&g_mbuf.mbuf_lock); + + pr_info("mbuf: mbuf_len:%u\n", g_mbuf.mbuf_len); +} + +/* Get mbuf ring desc text pointer */ +static char *mbuf_text(struct mbuf_ring_desc *desc) +{ + return (char *)desc + sizeof(struct mbuf_ring_desc); +} + +/* Get next mbuf_slot idx */ +static u32 mbuf_next(struct mbuf_ring *mring, u32 curr_idx) +{ + struct mbuf_ring_desc *cdesc, *ndesc; + u32 frees, next_idx; + + cdesc = (struct mbuf_ring_desc *)(g_mbuf.mbuf + curr_idx); + next_idx = curr_idx + cdesc->len; + /* + * If frees are not enough to store mbuf_ring_desc struct, + * just goto head + */ + frees = mring->end_idx - next_idx; + if(frees < sizeof(struct mbuf_ring_desc)){ + next_idx = mring->base_idx; + goto next; + } + + ndesc = (struct mbuf_ring_desc *)(g_mbuf.mbuf + next_idx); + if (!ndesc->len && next_idx != mring->next_idx) + next_idx = mring->base_idx; + +next: + return next_idx; +} + +static inline struct mbuf_ring_desc *get_ring_desc_from_idx( + struct mbuf_ring *ring, u32 idx) +{ + return (struct mbuf_ring_desc *)(g_mbuf.mbuf + idx); +} + +/* Read mbuf message according to its idx */ +static ssize_t mbuf_read(struct mbuf_slot *mb, struct mbuf_user_desc *udesc) +{ + struct mbuf_ring *mring; + struct mbuf_ring_desc *desc; + ssize_t ret; + size_t i, len, tbuf_len; + + tbuf_len = sizeof(udesc->buf); + mring = mb->mring; + + if (udesc->user_seq < mring->first_seq) { + udesc->user_seq = mring->first_seq; + udesc->user_idx = mring->first_idx; + ret = -1; + goto out; + } + + desc = get_ring_desc_from_idx(mring, udesc->user_idx); + + len = sprintf(udesc->buf, "%llu:", desc->ts_ns); + for (i = 0; i < desc->text_len; i++) { + unsigned char c = mbuf_text(desc)[i]; + + if (c < ' ' || c >= 127 || c == '\\') + continue; + else + udesc->buf[len++] = c; + + if (len >= tbuf_len) + break; + } + + len = len >= tbuf_len ? tbuf_len - 1 : len; + udesc->buf[len] = '\n'; + udesc->user_seq++; + ret = len; + +out: + return ret; +} + +static int mbuf_prepare(struct mbuf_ring *mring, u32 msg_size) +{ + u32 frees; + + if (unlikely(msg_size > MBUF_MSG_LEN_MAX)) { + return -ENOMEM; + } + + while (mring->first_seq < mring->next_seq) { + + if (mring->first_idx < mring->next_idx) + frees = max(mring->end_idx - mring->next_idx, + mring->first_idx - mring->base_idx); + else + frees = mring->first_idx - mring->next_idx; + + if (frees > msg_size) + break; + + /* Drop old message until se have enough contiguous space */ + mring->first_idx = mbuf_next(mring, mring->first_idx); + mring->first_seq++; + } + return 0; +} + +/* Write monitor buffer message */ +static ssize_t do_mbuf_write(struct cgroup *cg, char *buffer, size_t size) +{ + struct mbuf_ring *mring; + struct mbuf_ring_desc *desc; + size_t len; + unsigned long flags; + + if (size >= g_mbuf.mbuf_size_per_cg){ + pr_err("mbuf: write message need less than [ %u ] bytes\n", + g_mbuf.mbuf_size_per_cg); + return 0; + } + + mring = cg->mbuf->mring; + len = sizeof(struct mbuf_ring_desc) + size; + + spin_lock_irqsave(&cg->mbuf->slot_lock, flags); + + if (mbuf_prepare(mring, len)){ + spin_unlock_irqrestore(&cg->mbuf->slot_lock, flags); + pr_err("mbuf: Can not find enough space.\n"); + return 0; + } + + if (mring->next_idx + len >= mring->end_idx) { + /* Set remain buffer to 0 if we go to head */ + memset(g_mbuf.mbuf + mring->next_idx, 0, mring->end_idx - mring->next_idx); + mring->next_idx = mring->base_idx; + } + + desc = (struct mbuf_ring_desc *)(g_mbuf.mbuf + mring->next_idx); + memcpy(mbuf_text(desc), buffer, size); + desc->len = size + sizeof(struct mbuf_ring_desc); + desc->text_len = size; + desc->ts_ns = local_clock(); + mring->next_idx += desc->len; + mring->next_seq++; + + spin_unlock_irqrestore(&cg->mbuf->slot_lock, flags); + + return size; +} + +void mbuf_reset(struct mbuf_ring *mring) +{ + mring->first_idx = mring->base_idx; + mring->first_seq = 0; + mring->next_idx = mring->base_idx; + mring->next_seq = 0; +} + +static ssize_t mbuf_write(struct cgroup *cg, const char *fmt, va_list args) +{ + static char buf[MBUF_MSG_LEN_MAX]; + char *text = buf; + size_t t_len; + ssize_t ret; + /* Store string to buffer */ + t_len = vscnprintf(text, sizeof(buf), fmt, args); + + /* Write string to mbuf */ + ret = do_mbuf_write(cg, text, t_len); + + return ret; +} + +const struct mbuf_operations mbuf_ops = { + .read = mbuf_read, + .next = mbuf_next, + .write = mbuf_write, +}; + +static int get_next_mbuf_id(unsigned long *addr, u32 start) +{ + u32 index; + + index = find_next_zero_bit(addr, g_mbuf.mbuf_max_slots, start); + if (unlikely(index >= g_mbuf.mbuf_max_slots)) + return g_mbuf.mbuf_max_slots; + + return index; +} + +static void mbuf_slot_init(struct mbuf_slot *mb, struct cgroup *cg, u32 index) +{ + mb->owner = cg; + mb->idx = index; + mb->ops = &mbuf_ops; + spin_lock_init(&mb->slot_lock); + ratelimit_state_init(&mb->ratelimit, 5 * HZ,50); + + mb->mring = (struct mbuf_ring *)((char *)mb + sizeof(struct mbuf_slot)); + mb->mring->base_idx = index * + g_mbuf.mbuf_size_per_cg + sizeof(struct mbuf_slot) + sizeof(struct mbuf_ring); + mb->mring->end_idx = (index + 1) * g_mbuf.mbuf_size_per_cg - 1; + + mbuf_reset(mb->mring); +} + +struct mbuf_slot *mbuf_slot_alloc(struct cgroup *cg) +{ + struct mbuf_slot *mb; + u32 index = 0; + u32 try_times; + unsigned long *m_bitmap; + unsigned long flags; + + /* If mbuf_bitmap or mbuf not ready, just return NULL. */ + if (!g_mbuf.mbuf_bitmap || !g_mbuf.mbuf) { + pr_warn_ratelimited("mbuf: mbuf bitmap or mbuf pointer is NULL, alloc failed\n"); + return NULL; + } + + spin_lock_irqsave(&g_mbuf.mbuf_lock, flags); + + if (g_mbuf.mbuf_frees == 0) { + pr_warn_ratelimited("mbuf: reached max num, alloc failed\n"); + spin_unlock_irqrestore(&g_mbuf.mbuf_lock, flags); + return NULL; + } + + /* Alloc a free mbuf_slot from global mbuf, according mbuf_bitmap */ + m_bitmap = g_mbuf.mbuf_bitmap; + + try_times = 1; +again: + index = get_next_mbuf_id(m_bitmap, g_mbuf.mbuf_next_id); + + /* Rescan next avail idx from head if current idx reach end */ + if (index == g_mbuf.mbuf_max_slots && try_times--) { + g_mbuf.mbuf_next_id = 0; + goto again; + } + + if (unlikely(index == g_mbuf.mbuf_max_slots)) { + /* + * Just a protection mechanism, its must be a bug + * if function reached here. + */ + pr_warn_ratelimited("mbuf: frees and bitmap not coincident, just return\n"); + spin_unlock_irqrestore(&g_mbuf.mbuf_lock, flags); + return NULL; + } + + __set_bit(index, m_bitmap); + g_mbuf.mbuf_next_id = index; + + mb = (struct mbuf_slot *)(g_mbuf.mbuf + index * g_mbuf.mbuf_size_per_cg); + mbuf_slot_init(mb, cg, index); + g_mbuf.mbuf_frees--; + + spin_unlock_irqrestore(&g_mbuf.mbuf_lock, flags); + + return mb; +} + +void mbuf_free(struct cgroup *cg) +{ + unsigned long flags; + + spin_lock_irqsave(&g_mbuf.mbuf_lock, flags); + + /* Make current idx the next available buffer */ + g_mbuf.mbuf_next_id = cg->mbuf->idx; + __clear_bit(g_mbuf.mbuf_next_id, g_mbuf.mbuf_bitmap); + + g_mbuf.mbuf_frees++; + spin_unlock_irqrestore(&g_mbuf.mbuf_lock, flags); +} + diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index ca19b4c8acf5..4a942d4e9763 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -33,12 +33,9 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) return; /* - * Paired with the one in cgroup_rstat_cpu_pop_upated(). Either we - * see NULL updated_next or they see our updated stat. - */ - smp_mb(); - - /* + * Speculative already-on-list test. This may race leading to + * temporary inaccuracies, which is fine. + * * Because @parent's updated_children is terminated with @parent * instead of NULL, we can tell whether @cgrp is on the list by * testing the next pointer for NULL. @@ -134,13 +131,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos, *nextp = rstatc->updated_next; rstatc->updated_next = NULL; - /* - * Paired with the one in cgroup_rstat_cpu_updated(). - * Either they see NULL updated_next or we see their - * updated stat. - */ - smp_mb(); - return pos; } diff --git a/kernel/cgroup/sli.c b/kernel/cgroup/sli.c new file mode 100755 index 000000000000..0cff9377699b --- /dev/null +++ b/kernel/cgroup/sli.c @@ -0,0 +1,816 @@ +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <linux/cgroup.h> +#include <linux/module.h> +#include <linux/psi.h> +#include <linux/memcontrol.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/sysctl.h> +#include <linux/stacktrace.h> +#include <asm/irq_regs.h> +#include "../sched/sched.h" +#include <linux/sli.h> + +#define LINE_SIZE 128 +#define MAX_STACK_TRACE_DEPTH 64 + +struct member_offset { + char *member; + int offset; +}; + +struct memlat_thr { + u64 global_direct_reclaim_thr; + u64 memcg_direct_reclaim_thr; + u64 direct_compact_thr; + u64 global_direct_swapout_thr; + u64 memcg_direct_swapout_thr; + u64 direct_swapin_thr; + u64 page_alloc_thr; +}; + +struct schedlat_thr { + u64 schedlat_wait_thr; + u64 schedlat_block_thr; + u64 schedlat_ioblock_thr; + u64 schedlat_sleep_thr; + u64 schedlat_rundelay_thr; + u64 schedlat_longsys_thr; +}; + +static DEFINE_STATIC_KEY_TRUE(sli_no_enabled); +static struct memlat_thr memlat_threshold; +static struct schedlat_thr schedlat_threshold; + +static const struct member_offset memlat_member_offset[] = { + { "global_direct_reclaim_threshold=", offsetof(struct memlat_thr, global_direct_reclaim_thr)}, + { "memcg_direct_reclaim_threshold=", offsetof(struct memlat_thr, memcg_direct_reclaim_thr)}, + { "direct_compact_threshold=", offsetof(struct memlat_thr, direct_compact_thr)}, + { "global_direct_swapout_threshold=", offsetof(struct memlat_thr, global_direct_swapout_thr)}, + { "memcg_direct_swapout_threshold=", offsetof(struct memlat_thr, memcg_direct_swapout_thr)}, + { "direct_swapin_threshold=", offsetof(struct memlat_thr, direct_swapin_thr)}, + { "page_alloc_threshold=", offsetof(struct memlat_thr, page_alloc_thr)}, + { }, +}; + +static const struct member_offset schedlat_member_offset[] = { + { "schedlat_wait_thr=",offsetof(struct schedlat_thr,schedlat_wait_thr) }, + { "schedlat_block_thr=",offsetof(struct schedlat_thr,schedlat_block_thr) }, + { "schedlat_ioblock_thr=",offsetof(struct schedlat_thr,schedlat_ioblock_thr) }, + { "schedlat_sleep_thr=",offsetof(struct schedlat_thr,schedlat_sleep_thr) }, + { "schedlat_rundelay_thr=",offsetof(struct schedlat_thr,schedlat_rundelay_thr) }, + { "schedlat_longsys_thr=",offsetof(struct schedlat_thr,schedlat_longsys_thr) }, + { }, +}; + +static void store_task_stack(struct task_struct *task, char *reason, + u64 duration, unsigned int skipnr) +{ + unsigned long *entries; + unsigned nr_entries = 0; + unsigned long flags; + int i; + struct cgroup *cgrp; + + entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries), + GFP_ATOMIC); + if (!entries) + return; + + nr_entries = stack_trace_save_tsk(task, entries, MAX_STACK_TRACE_DEPTH, skipnr); + + cgrp = get_cgroup_from_task(task); + spin_lock_irqsave(&cgrp->cgrp_mbuf_lock, flags); + + mbuf_print(cgrp, "record reason:%s comm:%s pid:%d duration=%lld\n", + reason, task->comm, task->pid, duration); + + for (i = 0; i < nr_entries; i++) + mbuf_print(cgrp, "[<0>] %pB\n", (void *)entries[i]); + + spin_unlock_irqrestore(&cgrp->cgrp_mbuf_lock, flags); + + kfree(entries); + return; +} + +static u64 get_memlat_threshold(enum sli_memlat_stat_item sidx) +{ + long threshold = -1; + + switch (sidx) { + case MEM_LAT_GLOBAL_DIRECT_RECLAIM: + threshold = memlat_threshold.global_direct_reclaim_thr; + break; + case MEM_LAT_MEMCG_DIRECT_RECLAIM: + threshold = memlat_threshold.memcg_direct_reclaim_thr; + break; + case MEM_LAT_DIRECT_COMPACT: + threshold = memlat_threshold.direct_compact_thr; + break; + case MEM_LAT_GLOBAL_DIRECT_SWAPOUT: + threshold = memlat_threshold.global_direct_swapout_thr; + break; + case MEM_LAT_MEMCG_DIRECT_SWAPOUT: + threshold = memlat_threshold.memcg_direct_swapout_thr; + break; + case MEM_LAT_DIRECT_SWAPIN: + threshold = memlat_threshold.direct_swapin_thr; + break; + case MEM_LAT_PAGE_ALLOC: + threshold = memlat_threshold.page_alloc_thr; + break; + default: + break; + } + + if (threshold != -1) + threshold = threshold << 20;//ns to ms,2^20=1048576(about 1000000) + + return threshold; +} + +static char * get_memlat_name(enum sli_memlat_stat_item sidx) +{ + char *name = NULL; + + switch (sidx) { + case MEM_LAT_GLOBAL_DIRECT_RECLAIM: + name = "memlat_global_direct_reclaim"; + break; + case MEM_LAT_MEMCG_DIRECT_RECLAIM: + name = "memlat_memcg_direct_reclaim"; + break; + case MEM_LAT_DIRECT_COMPACT: + name = "memlat_direct_compact"; + break; + case MEM_LAT_GLOBAL_DIRECT_SWAPOUT: + name = "memlat_global_direct_swapout"; + break; + case MEM_LAT_MEMCG_DIRECT_SWAPOUT: + name = "memlat_memcg_direct_swapout"; + break; + case MEM_LAT_DIRECT_SWAPIN: + name = "memlat_direct_swapin"; + break; + case MEM_LAT_PAGE_ALLOC: + name = "memlat_page_alloc"; + break; + default: + break; + } + + return name; +} + +static enum sli_lat_count get_lat_count_idx(u64 duration) +{ + enum sli_lat_count idx; + + duration = duration >> 20; + if (duration < 1) + idx = LAT_0_1; + else if (duration < 4) + idx = LAT_1_4; + else if (duration < 8) + idx = LAT_4_8; + else if (duration < 16) + idx = LAT_8_16; + else if (duration < 32) + idx = LAT_16_32; + else if (duration < 64) + idx = LAT_32_64; + else if (duration < 128) + idx = LAT_64_128; + else + idx = LAT_128_INF; + + return idx; +} + +static u64 get_schedlat_threshold(enum sli_memlat_stat_item sidx) +{ + long threshold = -1; + + switch (sidx) { + case SCHEDLAT_WAIT: + threshold = schedlat_threshold.schedlat_wait_thr; + break; + case SCHEDLAT_BLOCK: + threshold = schedlat_threshold.schedlat_block_thr; + break; + case SCHEDLAT_IOBLOCK: + threshold = schedlat_threshold.schedlat_ioblock_thr; + break; + case SCHEDLAT_SLEEP: + threshold = schedlat_threshold.schedlat_sleep_thr; + break; + case SCHEDLAT_RUNDELAY: + threshold = schedlat_threshold.schedlat_rundelay_thr; + break; + case SCHEDLAT_LONGSYS: + threshold = schedlat_threshold.schedlat_longsys_thr; + break; + default: + break; + } + + if (threshold != -1) + threshold = threshold << 20;//ms to ns,2^20=1048576(about 1000000) + + return threshold; +} + +static char * get_schedlat_name(enum sli_memlat_stat_item sidx) +{ + char *name = NULL; + + switch (sidx) { + case SCHEDLAT_WAIT: + name = "schedlat_wait"; + break; + case SCHEDLAT_BLOCK: + name = "schedlat_block"; + break; + case SCHEDLAT_IOBLOCK: + name = "schedlat_ioblock"; + break; + case SCHEDLAT_SLEEP: + name = "schedlat_sleep"; + break; + case SCHEDLAT_RUNDELAY: + name = "schedlat_rundelay"; + break; + case SCHEDLAT_LONGSYS: + name = "schedlat_longsys"; + break; + default: + break; + } + + return name; +} + +static u64 sli_memlat_stat_gather(struct cgroup *cgrp, + enum sli_memlat_stat_item sidx, + enum sli_lat_count cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(cgrp->sli_memlat_stat_percpu, cpu)->item[sidx][cidx]; + + return sum; +} + +int sli_memlat_stat_show(struct seq_file *m, struct cgroup *cgrp) +{ + enum sli_memlat_stat_item sidx; + + if (static_branch_likely(&sli_no_enabled)) { + seq_printf(m,"sli is not enabled,please echo 1 > /proc/sli/sli_enabled!\n"); + return 0; + } + + if (!cgrp->sli_memlat_stat_percpu) + return 0; + + for (sidx = MEM_LAT_GLOBAL_DIRECT_RECLAIM;sidx < MEM_LAT_STAT_NR;sidx++) { + seq_printf(m,"%s:\n",get_memlat_name(sidx)); + seq_printf(m, "0-1ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_0_1)); + seq_printf(m, "1-4ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_1_4)); + seq_printf(m, "4-8ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_4_8)); + seq_printf(m, "8-16ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_8_16)); + seq_printf(m, "16-32ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_16_32)); + seq_printf(m, "32-64ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_32_64)); + seq_printf(m, "64-128ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_64_128)); + seq_printf(m, ">=128ms: %llu\n", sli_memlat_stat_gather(cgrp, sidx, LAT_128_INF)); + } + + return 0; +} + +int sli_memlat_max_show(struct seq_file *m, struct cgroup *cgrp) +{ + enum sli_memlat_stat_item sidx; + + if (static_branch_likely(&sli_no_enabled)) { + seq_printf(m,"sli is not enabled,please echo 1 > /proc/sli/sli_enabled && " + "echo 1 > /proc/sys/kernel/sched_schedstats\n" + ); + return 0; + } + + if (!cgrp->sli_memlat_stat_percpu) + return 0; + + for (sidx = MEM_LAT_GLOBAL_DIRECT_RECLAIM; sidx < MEM_LAT_STAT_NR; sidx++) { + int cpu; + unsigned long latency_max = 0, latency; + + for_each_possible_cpu(cpu) { + latency = per_cpu_ptr(cgrp->sli_memlat_stat_percpu, cpu)->latency_max[sidx]; + per_cpu_ptr(cgrp->sli_memlat_stat_percpu, cpu)->latency_max[sidx] = 0; + + if (latency > latency_max) + latency_max = latency; + } + + seq_printf(m,"%s: %lu\n", get_memlat_name(sidx), latency_max); + } + + return 0; +} + +void sli_memlat_stat_start(u64 *start) +{ + if (static_branch_likely(&sli_no_enabled)) + *start = 0; + else + *start = local_clock(); +} + +void sli_memlat_stat_end(enum sli_memlat_stat_item sidx, u64 start) +{ + enum sli_lat_count cidx; + u64 duration,threshold; + struct mem_cgroup *memcg; + struct cgroup *cgrp; + + if (static_branch_likely(&sli_no_enabled) || start == 0) + return; + + duration = local_clock() - start; + threshold = get_memlat_threshold(sidx); + cidx = get_lat_count_idx(duration); + + rcu_read_lock(); + memcg = mem_cgroup_from_task(current); + if (!memcg || memcg == root_mem_cgroup ) { + rcu_read_unlock(); + return; + } + + cgrp = memcg->css.cgroup; + if (cgrp && cgrp->sli_memlat_stat_percpu) { + this_cpu_inc(cgrp->sli_memlat_stat_percpu->item[sidx][cidx]); + if (duration >= threshold) { + char *lat_name; + + lat_name = get_memlat_name(sidx); + store_task_stack(current, lat_name, duration, 0); + } + + if (duration > this_cpu_read(cgrp->sli_memlat_stat_percpu->latency_max[sidx])) + this_cpu_write(cgrp->sli_memlat_stat_percpu->latency_max[sidx], duration); + } + rcu_read_unlock(); +} + +static void sli_memlat_thr_set(u64 value) +{ + memlat_threshold.global_direct_reclaim_thr = value; + memlat_threshold.memcg_direct_reclaim_thr = value; + memlat_threshold.direct_compact_thr = value; + memlat_threshold.global_direct_swapout_thr = value; + memlat_threshold.memcg_direct_swapout_thr = value; + memlat_threshold.direct_swapin_thr = value; + memlat_threshold.page_alloc_thr = value; +} + +static int sli_memlat_thr_show(struct seq_file *m, void *v) +{ + seq_printf(m,"global_direct_reclaim_threshold = %llu ms\n" + "memcg_direct_reclaim_threshold = %llu ms\n" + "direct_compact_threshold = %llu ms\n" + "global_direct_swapout_threshold = %llu ms\n" + "memcg_direct_swapout_threshold = %llu ms\n" + "direct_swapin_threshold = %llu ms\n" + "page_alloc_threshold = %llu ms\n", + memlat_threshold.global_direct_reclaim_thr, + memlat_threshold.memcg_direct_reclaim_thr, + memlat_threshold.direct_compact_thr, + memlat_threshold.global_direct_swapout_thr, + memlat_threshold.memcg_direct_swapout_thr, + memlat_threshold.direct_swapin_thr, + memlat_threshold.page_alloc_thr + ); + + return 0; +} + +static int sli_memlat_thr_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sli_memlat_thr_show, NULL); +} + +static ssize_t sli_memlat_thr_write(struct file *file, + const char __user *buf, size_t count, loff_t *offs) +{ + char line[LINE_SIZE]; + u64 value; + char *ptr; + int i = 0; + + memset(line, 0, LINE_SIZE); + + count = min_t(size_t, count, LINE_SIZE - 1); + + if (strncpy_from_user(line, buf, count) < 0) + return -EINVAL; + + if (strlen(line) < 1) + return -EINVAL; + + if (!kstrtou64(line, 0, &value)) { + sli_memlat_thr_set(value); + return count; + } + + for ( ; memlat_member_offset[i].member ; i++) { + int lenstr = strlen(memlat_member_offset[i].member); + + ptr = strnstr(line, memlat_member_offset[i].member, lenstr); + if (lenstr && ptr) { + if (kstrtou64(ptr+lenstr, 0, &value)) + return -EINVAL; + + *((u64 *)((char *)&memlat_threshold + memlat_member_offset[i].offset)) = value; + return count; + } + } + + return -EINVAL; +} + +static const struct file_operations sli_memlat_thr_fops = { + .open = sli_memlat_thr_open, + .read = seq_read, + .write = sli_memlat_thr_write, + .release = single_release, +}; + +void sli_schedlat_stat(struct task_struct *task, enum sli_schedlat_stat_item sidx, u64 delta) +{ + enum sli_lat_count cidx; + struct cgroup *cgrp = NULL; + u64 threshold; + + if (static_branch_likely(&sli_no_enabled) || !task) + return; + + threshold = get_schedlat_threshold(sidx); + cidx = get_lat_count_idx(delta); + + rcu_read_lock(); + cgrp = get_cgroup_from_task(task); + if (cgrp && cgrp->sli_schedlat_stat_percpu) { + this_cpu_inc(cgrp->sli_schedlat_stat_percpu->item[sidx][cidx]); + if (delta >= threshold) { + char *lat_name; + + lat_name = get_schedlat_name(sidx); + store_task_stack(task, lat_name, delta, 0); + } + + if (delta > this_cpu_read(cgrp->sli_schedlat_stat_percpu->latency_max[sidx])) + this_cpu_write(cgrp->sli_schedlat_stat_percpu->latency_max[sidx], delta); + } + rcu_read_unlock(); +} + +void sli_schedlat_rundelay(struct task_struct *task, struct task_struct *prev, u64 delta) +{ + enum sli_lat_count cidx; + enum sli_schedlat_stat_item sidx = SCHEDLAT_RUNDELAY; + struct cgroup *cgrp = NULL; + u64 threshold; + + if (static_branch_likely(&sli_no_enabled) || !task || !prev) + return; + + threshold = get_schedlat_threshold(sidx); + cidx = get_lat_count_idx(delta); + + rcu_read_lock(); + cgrp = get_cgroup_from_task(task); + if (cgrp && cgrp->sli_schedlat_stat_percpu) { + this_cpu_inc(cgrp->sli_schedlat_stat_percpu->item[sidx][cidx]); + if (delta >= threshold) { + int i; + unsigned long *entries; + unsigned nr_entries = 0; + unsigned long flags; + + entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries), + GFP_ATOMIC); + if (!entries) + goto out; + + nr_entries = stack_trace_save_tsk(prev, entries, + MAX_STACK_TRACE_DEPTH, 0); + + spin_lock_irqsave(&cgrp->cgrp_mbuf_lock, flags); + + mbuf_print(cgrp, "record reason:schedlat_rundelay next_comm:%s " + "next_pid:%d prev_comm:%s prev_pid:%d duration=%lld\n", + task->comm, task->pid, prev->comm, prev->pid, delta); + + for (i = 0; i < nr_entries; i++) + mbuf_print(cgrp, "[<0>] %pB\n", (void *)entries[i]); + + spin_unlock_irqrestore(&cgrp->cgrp_mbuf_lock, flags); + kfree(entries); + } + + if (delta > this_cpu_read(cgrp->sli_schedlat_stat_percpu->latency_max[sidx])) + this_cpu_write(cgrp->sli_schedlat_stat_percpu->latency_max[sidx], delta); + } + +out: + rcu_read_unlock(); +} + +#ifdef CONFIG_SCHED_INFO +void sli_check_longsys(struct task_struct *tsk) +{ + long delta; + + if (static_branch_likely(&sli_no_enabled)) + return; + + if (!tsk || tsk->sched_class != &fair_sched_class) + return ; + + /* Longsys is performed only when TIF_RESCHED is set */ + if (!test_tsk_need_resched(tsk)) + return; + + /* Kthread is not belong to any cgroup */ + if (tsk->flags & PF_KTHREAD) + return; + + if (!tsk->sched_info.kernel_exec_start || + tsk->sched_info.task_switch != (tsk->nvcsw + tsk->nivcsw) || + tsk->utime != tsk->sched_info.utime) { + tsk->sched_info.utime = tsk->utime; + tsk->sched_info.kernel_exec_start = rq_clock(this_rq()); + tsk->sched_info.task_switch = tsk->nvcsw + tsk->nivcsw; + return; + } + + delta = rq_clock(this_rq()) - tsk->sched_info.kernel_exec_start; + sli_schedlat_stat(tsk, SCHEDLAT_LONGSYS, delta); +} + +#endif +static void sli_schedlat_thr_set(u64 value) +{ + schedlat_threshold.schedlat_wait_thr = value; + schedlat_threshold.schedlat_block_thr = value; + schedlat_threshold.schedlat_ioblock_thr = value; + schedlat_threshold.schedlat_sleep_thr = value; + schedlat_threshold.schedlat_rundelay_thr = value; + schedlat_threshold.schedlat_longsys_thr = value; +} + +static u64 sli_schedlat_stat_gather(struct cgroup *cgrp, + enum sli_schedlat_stat_item sidx, + enum sli_lat_count cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(cgrp->sli_schedlat_stat_percpu, cpu)->item[sidx][cidx]; + + return sum; +} + +int sli_schedlat_max_show(struct seq_file *m, struct cgroup *cgrp) +{ + enum sli_schedlat_stat_item sidx; + + if (static_branch_likely(&sli_no_enabled)) { + seq_printf(m,"sli is not enabled,please echo 1 > /proc/sli/sli_enabled && " + "echo 1 > /proc/sys/kernel/sched_schedstats\n" + ); + return 0; + } + + if (!cgrp->sli_schedlat_stat_percpu) + return 0; + + for (sidx = SCHEDLAT_WAIT; sidx < SCHEDLAT_STAT_NR; sidx++) { + int cpu; + unsigned long latency_max = 0, latency; + + for_each_possible_cpu(cpu) { + latency = per_cpu_ptr(cgrp->sli_schedlat_stat_percpu, cpu)->latency_max[sidx]; + per_cpu_ptr(cgrp->sli_schedlat_stat_percpu, cpu)->latency_max[sidx] = 0; + + if (latency > latency_max) + latency_max = latency; + } + + seq_printf(m,"%s: %lu\n", get_schedlat_name(sidx), latency_max); + } + + return 0; +} + +int sli_schedlat_stat_show(struct seq_file *m, struct cgroup *cgrp) +{ + enum sli_schedlat_stat_item sidx; + + if (static_branch_likely(&sli_no_enabled)) { + seq_printf(m,"sli is not enabled,please echo 1 > /proc/sli/sli_enabled && " + "echo 1 > /proc/sys/kernel/sched_schedstats\n" + ); + return 0; + } + + if (!cgrp->sli_schedlat_stat_percpu) + return 0; + + for (sidx = SCHEDLAT_WAIT;sidx < SCHEDLAT_STAT_NR;sidx++) { + seq_printf(m,"%s:\n",get_schedlat_name(sidx)); + seq_printf(m, "0-1ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_0_1)); + seq_printf(m, "1-4ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_1_4)); + seq_printf(m, "4-8ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_4_8)); + seq_printf(m, "8-16ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_8_16)); + seq_printf(m, "16-32ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_16_32)); + seq_printf(m, "32-64ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_32_64)); + seq_printf(m, "64-128ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_64_128)); + seq_printf(m, ">=128ms: %llu\n", sli_schedlat_stat_gather(cgrp, sidx, LAT_128_INF)); + } + + return 0; +} + +static int sli_schedlat_thr_show(struct seq_file *m, void *v) +{ + seq_printf(m,"schedlat_wait_thr = %llu ms\n" + "schedlat_block_thr = %llu ms\n" + "schedlat_ioblock_thr = %llu ms\n" + "schedlat_sleep_thr = %llu ms\n" + "schedlat_rundelay_thr = %llu ms\n" + "schedlat_longsys_thr = %llu ms\n", + schedlat_threshold.schedlat_wait_thr, + schedlat_threshold.schedlat_block_thr, + schedlat_threshold.schedlat_ioblock_thr, + schedlat_threshold.schedlat_sleep_thr, + schedlat_threshold.schedlat_rundelay_thr, + schedlat_threshold.schedlat_longsys_thr + ); + + return 0; +} + +static int sli_schedlat_thr_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sli_schedlat_thr_show, NULL); +} + +static ssize_t sli_schedlat_thr_write(struct file *file, + const char __user *buf, size_t count, loff_t *offs) +{ + char line[LINE_SIZE]; + u64 value; + char *ptr; + int i = 0; + + memset(line, 0, LINE_SIZE); + + count = min_t(size_t, count, LINE_SIZE - 1); + + if (strncpy_from_user(line, buf, count) < 0) + return -EINVAL; + + if (strlen(line) < 1) + return -EINVAL; + + if (!kstrtou64(line, 0, &value)) { + sli_schedlat_thr_set(value); + return count; + } + + for ( ; schedlat_member_offset[i].member ; i++) { + int lenstr = strlen(schedlat_member_offset[i].member); + + ptr = strnstr(line, schedlat_member_offset[i].member, lenstr); + if (lenstr && ptr) { + if (kstrtou64(ptr+lenstr, 0, &value)) + return -EINVAL; + + *((u64 *)((char *)&schedlat_threshold + schedlat_member_offset[i].offset)) = value; + return count; + } + } + + return -EINVAL; +} + +static const struct file_operations sli_schedlat_thr_fops = { + .open = sli_schedlat_thr_open, + .read = seq_read, + .write = sli_schedlat_thr_write, + .release = single_release, +}; + +static int sli_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&sli_no_enabled)); + return 0; +} + +static int sli_enabled_open(struct inode *inode, struct file *file) +{ + return single_open(file, sli_enabled_show, NULL); +} + +static ssize_t sli_enabled_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&sli_no_enabled); + break; + case '1': + static_branch_disable(&sli_no_enabled); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct file_operations sli_enabled_fops = { + .open = sli_enabled_open, + .read = seq_read, + .write = sli_enabled_write, + .llseek = seq_lseek, + .release = single_release, +}; + +int sli_cgroup_alloc(struct cgroup *cgroup) +{ + if (!cgroup) + return 0; + + spin_lock_init(&cgroup->cgrp_mbuf_lock); + cgroup->sli_memlat_stat_percpu = alloc_percpu(struct sli_memlat_stat); + if (!cgroup->sli_memlat_stat_percpu) + return -ENOMEM; + + cgroup->sli_schedlat_stat_percpu = alloc_percpu(struct sli_schedlat_stat); + if (!cgroup->sli_schedlat_stat_percpu) { + free_percpu(cgroup->sli_memlat_stat_percpu); + return -ENOMEM; + } + + return 0; +} + +void sli_cgroup_free(struct cgroup *cgroup) +{ + if (!cgroup) + return; + + if (cgroup->sli_memlat_stat_percpu) + free_percpu(cgroup->sli_memlat_stat_percpu); + if (cgroup->sli_schedlat_stat_percpu) + free_percpu(cgroup->sli_schedlat_stat_percpu); +} + +static int __init sli_proc_init(void) +{ + /* default threshhold is the max value of u64 */ + sli_memlat_thr_set(-1); + sli_schedlat_thr_set(-1); + proc_mkdir("sli", NULL); + proc_create("sli/sli_enabled", 0, NULL, &sli_enabled_fops); + proc_create("sli/memory_latency_threshold", 0, NULL, &sli_memlat_thr_fops); + proc_create("sli/sched_latency_threshold", 0, NULL, &sli_schedlat_thr_fops); + return 0; +} + +late_initcall(sli_proc_init); + diff --git a/kernel/cpu.c b/kernel/cpu.c index 406828fb3038..fa0e5727b4d9 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -3,6 +3,7 @@ * * This code is licenced under the GPL. */ +#include <linux/sched/mm.h> #include <linux/proc_fs.h> #include <linux/smp.h> #include <linux/init.h> @@ -564,6 +565,21 @@ static int bringup_cpu(unsigned int cpu) return bringup_wait_for_ap(cpu); } +static int finish_cpu(unsigned int cpu) +{ + struct task_struct *idle = idle_thread_get(cpu); + struct mm_struct *mm = idle->active_mm; + + /* + * idle_task_exit() will have switched to &init_mm, now + * clean up any remaining active_mm state. + */ + if (mm != &init_mm) + idle->active_mm = &init_mm; + mmdrop(mm); + return 0; +} + /* * Hotplug state machine related functions */ @@ -799,6 +815,10 @@ void __init cpuhp_threads_init(void) } #ifdef CONFIG_HOTPLUG_CPU +#ifndef arch_clear_mm_cpumask_cpu +#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) +#endif + /** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id @@ -834,7 +854,7 @@ void clear_tasks_mm_cpumask(int cpu) t = find_lock_task_mm(p); if (!t) continue; - cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); + arch_clear_mm_cpumask_cpu(cpu, t->mm); task_unlock(t); } rcu_read_unlock(); @@ -1212,7 +1232,7 @@ EXPORT_SYMBOL_GPL(cpu_up); #ifdef CONFIG_PM_SLEEP_SMP static cpumask_var_t frozen_cpus; -int freeze_secondary_cpus(int primary) +int __freeze_secondary_cpus(int primary, bool suspend) { int cpu, error = 0; @@ -1237,7 +1257,7 @@ int freeze_secondary_cpus(int primary) if (cpu == primary) continue; - if (pm_wakeup_pending()) { + if (suspend && pm_wakeup_pending()) { pr_info("Wakeup pending. Abort CPU freeze\n"); error = -EBUSY; break; @@ -1434,7 +1454,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { [CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", .startup.single = bringup_cpu, - .teardown.single = NULL, + .teardown.single = finish_cpu, .cant_stop = true, }, /* Final state before CPU kills itself */ diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c index cbca6879ab7d..44a259338e33 100644 --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); */ int cpu_pm_enter(void) { - int nr_calls; + int nr_calls = 0; int ret = 0; ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); @@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); */ int cpu_cluster_pm_enter(void) { - int nr_calls; + int nr_calls = 0; int ret = 0; ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index f76d6f77dd5e..097ab02989f9 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -96,14 +96,6 @@ int dbg_switch_cpu; /* Use kdb or gdbserver mode */ int dbg_kdb_mode = 1; -static int __init opt_kgdb_con(char *str) -{ - kgdb_use_con = 1; - return 0; -} - -early_param("kgdbcon", opt_kgdb_con); - module_param(kgdb_use_con, int, 0644); module_param(kgdbreboot, int, 0644); @@ -501,6 +493,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) if (exception_level > 1) { dump_stack(); + kgdb_io_module_registered = false; panic("Recursive entry to debugger"); } @@ -545,6 +538,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, arch_kgdb_ops.disable_hw_break(regs); acquirelock: + rcu_read_lock(); /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. @@ -601,6 +595,7 @@ return_normal: atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return 0; } cpu_relax(); @@ -619,6 +614,7 @@ return_normal: raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); goto acquirelock; } @@ -634,6 +630,8 @@ return_normal: if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) goto kgdb_restore; + atomic_inc(&ignore_console_lock_warning); + /* Call the I/O driver's pre_exception routine */ if (dbg_io_ops->pre_exception) dbg_io_ops->pre_exception(); @@ -706,6 +704,8 @@ cpu_master_loop: if (dbg_io_ops->post_exception) dbg_io_ops->post_exception(); + atomic_dec(&ignore_console_lock_warning); + if (!kgdb_single_step) { raw_spin_unlock(&dbg_slave_lock); /* Wait till all the CPUs have quit from the debugger. */ @@ -738,6 +738,7 @@ kgdb_restore: raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return kgdb_info[cpu].ret_state; } @@ -867,6 +868,20 @@ static struct console kgdbcons = { .index = -1, }; +static int __init opt_kgdb_con(char *str) +{ + kgdb_use_con = 1; + + if (kgdb_io_module_registered && !kgdb_con_registered) { + register_console(&kgdbcons); + kgdb_con_registered = 1; + } + + return 0; +} + +early_param("kgdbcon", opt_kgdb_con); + #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handle_dbg(int key) { diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 3a5184eb6977..46821793637a 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -679,12 +679,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) size_avail = sizeof(kdb_buffer) - len; goto kdb_print_out; } - if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) + if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) { /* * This was a interactive search (using '/' at more - * prompt) and it has completed. Clear the flag. + * prompt) and it has completed. Replace the \0 with + * its original value to ensure multi-line strings + * are handled properly, and return to normal mode. */ + *cphold = replaced_byte; kdb_grepping_flag = 0; + } /* * at this point the string is a full line and * should be printed, up to the null. diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 2118d8258b7c..ad53b19734e9 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -233,7 +233,7 @@ extern struct task_struct *kdb_curr_task(int); #define kdb_do_each_thread(g, p) do_each_thread(g, p) #define kdb_while_each_thread(g, p) while_each_thread(g, p) -#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) +#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL) extern void *debug_kmalloc(size_t size, gfp_t flags); extern void debug_kfree(void *); diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 551b0eb7028a..2a0c4985f38e 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -134,7 +134,7 @@ static void *__dma_alloc_from_coherent(struct device *dev, spin_lock_irqsave(&mem->spinlock, flags); - if (unlikely(size > (mem->size << PAGE_SHIFT))) + if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) goto err; pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); @@ -144,8 +144,9 @@ static void *__dma_alloc_from_coherent(struct device *dev, /* * Memory was found in the coherent area. */ - *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); - ret = mem->virt_base + (pageno << PAGE_SHIFT); + *dma_handle = dma_get_device_base(dev, mem) + + ((dma_addr_t)pageno << PAGE_SHIFT); + ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT); spin_unlock_irqrestore(&mem->spinlock, flags); memset(ret, 0, size); return ret; @@ -194,7 +195,7 @@ static int __dma_release_from_coherent(struct dma_coherent_mem *mem, int order, void *vaddr) { if (mem && vaddr >= mem->virt_base && vaddr < - (mem->virt_base + (mem->size << PAGE_SHIFT))) { + (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; unsigned long flags; @@ -238,10 +239,10 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { if (mem && vaddr >= mem->virt_base && vaddr + size <= - (mem->virt_base + (mem->size << PAGE_SHIFT))) { + (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { unsigned long off = vma->vm_pgoff; int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; - int user_count = vma_pages(vma); + unsigned long user_count = vma_pages(vma); int count = PAGE_ALIGN(size) >> PAGE_SHIFT; *ret = -ENXIO; diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 4ad74f5987ea..cb6425e52bf7 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -137,9 +137,12 @@ static const char *const maperr2str[] = { [MAP_ERR_CHECKED] = "dma map error checked", }; -static const char *type2name[5] = { "single", "page", - "scather-gather", "coherent", - "resource" }; +static const char *type2name[] = { + [dma_debug_single] = "single", + [dma_debug_sg] = "scather-gather", + [dma_debug_coherent] = "coherent", + [dma_debug_resource] = "resource", +}; static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", "DMA_FROM_DEVICE", "DMA_NONE" }; diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 867fd72cb260..0a093a675b63 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -45,7 +45,8 @@ static inline dma_addr_t phys_to_dma_direct(struct device *dev, u64 dma_direct_get_required_mask(struct device *dev) { - u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); + phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; + u64 max_dma = phys_to_dma_direct(dev, phys); return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index d9334f31a5af..8682a5305cb3 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -169,6 +169,8 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs); */ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) { + if (force_dma_unencrypted(dev)) + prot = pgprot_decrypted(prot); if (dev_is_dma_coherent(dev) || (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && (attrs & DMA_ATTR_NON_CONSISTENT))) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 673a2cdb2656..f99b79d7e123 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -230,6 +230,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } io_tlb_index = 0; + no_iotlb_memory = false; if (verbose) swiotlb_print_info(); @@ -261,9 +262,11 @@ swiotlb_init(int verbose) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) return; - if (io_tlb_start) + if (io_tlb_start) { memblock_free_early(io_tlb_start, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + io_tlb_start = 0; + } pr_warn("Cannot allocate buffer"); no_iotlb_memory = true; } @@ -361,6 +364,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } io_tlb_index = 0; + no_iotlb_memory = false; swiotlb_print_info(); diff --git a/kernel/elfcore.c b/kernel/elfcore.c deleted file mode 100644 index 57fb4dcff434..000000000000 --- a/kernel/elfcore.c +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/elf.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/binfmts.h> -#include <linux/elfcore.h> - -Elf_Half __weak elf_core_extra_phdrs(void) -{ - return 0; -} - -int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) -{ - return 1; -} - -int __weak elf_core_write_extra_data(struct coredump_params *cprm) -{ - return 1; -} - -size_t __weak elf_core_extra_data_size(void) -{ - return 0; -} diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index c2b41a263166..162d4a3c3d29 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -149,7 +149,7 @@ void put_callchain_buffers(void) } } -static struct perf_callchain_entry *get_callchain_entry(int *rctx) +struct perf_callchain_entry *get_callchain_entry(int *rctx) { int cpu; struct callchain_cpus_entries *entries; @@ -159,8 +159,10 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) return NULL; entries = rcu_dereference(callchain_cpus_entries); - if (!entries) + if (!entries) { + put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx); return NULL; + } cpu = smp_processor_id(); @@ -168,7 +170,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) (*rctx * perf_callchain_entry__sizeof())); } -static void +void put_callchain_entry(int rctx) { put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); @@ -183,11 +185,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, int rctx; entry = get_callchain_entry(&rctx); - if (rctx == -1) - return NULL; - if (!entry) - goto exit_put; + return NULL; ctx.entry = entry; ctx.max_stack = max_stack; diff --git a/kernel/events/core.c b/kernel/events/core.c index 15b123bdcaf5..8e04df75cb34 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -93,11 +93,11 @@ static void remote_function(void *data) * @info: the function call argument * * Calls the function @func when the task is currently running. This might - * be on the current CPU, which just calls the function directly + * be on the current CPU, which just calls the function directly. This will + * retry due to any failures in smp_call_function_single(), such as if the + * task_cpu() goes offline concurrently. * - * returns: @func return value, or - * -ESRCH - when the process isn't running - * -EAGAIN - when the process moved away + * returns @func return value or -ESRCH or -ENXIO when the process isn't running */ static int task_function_call(struct task_struct *p, remote_function_f func, void *info) @@ -110,11 +110,17 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) }; int ret; - do { - ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); + for (;;) { + ret = smp_call_function_single(task_cpu(p), remote_function, + &data, 1); if (!ret) ret = data.ret; - } while (ret == -EAGAIN); + + if (ret != -EAGAIN) + break; + + cond_resched(); + } return ret; } @@ -1248,7 +1254,7 @@ static void put_ctx(struct perf_event_context *ctx) * function. * * Lock order: - * cred_guard_mutex + * exec_update_lock * task_struct::perf_event_mutex * perf_event_context::mutex * perf_event::child_mutex; @@ -1970,8 +1976,24 @@ static int perf_get_aux_event(struct perf_event *event, return 1; } +/* + * Events that have PERF_EV_CAP_SIBLING require being part of a group and + * cannot exist on their own, schedule them out and move them into the ERROR + * state. Also see _perf_event_enable(), it will not be able to recover + * this ERROR state. + */ +static inline void perf_remove_sibling_event(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + + event_sched_out(event, cpuctx, ctx); + perf_event_set_state(event, PERF_EVENT_STATE_ERROR); +} + static void perf_group_detach(struct perf_event *event) { + struct perf_event *leader = event->group_leader; struct perf_event *sibling, *tmp; struct perf_event_context *ctx = event->ctx; @@ -1990,7 +2012,7 @@ static void perf_group_detach(struct perf_event *event) /* * If this is a sibling, remove it from its group. */ - if (event->group_leader != event) { + if (leader != event) { list_del_init(&event->sibling_list); event->group_leader->nr_siblings--; goto out; @@ -2003,6 +2025,9 @@ static void perf_group_detach(struct perf_event *event) */ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { + if (sibling->event_caps & PERF_EV_CAP_SIBLING) + perf_remove_sibling_event(sibling); + sibling->group_leader = sibling; list_del_init(&sibling->sibling_list); @@ -2024,10 +2049,10 @@ static void perf_group_detach(struct perf_event *event) } out: - perf_event__header_size(event->group_leader); - - for_each_sibling_event(tmp, event->group_leader) + for_each_sibling_event(tmp, leader) perf_event__header_size(tmp); + + perf_event__header_size(leader); } static bool is_orphaned_event(struct perf_event *event) @@ -2166,6 +2191,7 @@ __perf_remove_from_context(struct perf_event *event, if (!ctx->nr_events && ctx->is_active) { ctx->is_active = 0; + ctx->rotate_necessary = 0; if (ctx->task) { WARN_ON_ONCE(cpuctx->task_ctx != ctx); cpuctx->task_ctx = NULL; @@ -2795,6 +2821,7 @@ static void _perf_event_enable(struct perf_event *event) raw_spin_lock_irq(&ctx->lock); if (event->state >= PERF_EVENT_STATE_INACTIVE || event->state < PERF_EVENT_STATE_ERROR) { +out: raw_spin_unlock_irq(&ctx->lock); return; } @@ -2806,8 +2833,16 @@ static void _perf_event_enable(struct perf_event *event) * has gone back into error state, as distinct from the task having * been scheduled away before the cross-call arrived. */ - if (event->state == PERF_EVENT_STATE_ERROR) + if (event->state == PERF_EVENT_STATE_ERROR) { + /* + * Detached SIBLING events cannot leave ERROR state. + */ + if (event->event_caps & PERF_EV_CAP_SIBLING && + event->group_leader == event) + goto out; + event->state = PERF_EVENT_STATE_OFF; + } raw_spin_unlock_irq(&ctx->lock); event_function_call(event, __perf_event_enable, NULL); @@ -3042,12 +3077,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, if (!ctx->nr_active || !(is_active & EVENT_ALL)) return; - /* - * If we had been multiplexing, no rotations are necessary, now no events - * are active. - */ - ctx->rotate_necessary = 0; - perf_pmu_disable(ctx->pmu); if (is_active & EVENT_PINNED) { list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) @@ -3057,6 +3086,13 @@ static void ctx_sched_out(struct perf_event_context *ctx, if (is_active & EVENT_FLEXIBLE) { list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) group_sched_out(event, cpuctx, ctx); + + /* + * Since we cleared EVENT_FLEXIBLE, also clear + * rotate_necessary, is will be reset by + * ctx_flexible_sched_in() when needed. + */ + ctx->rotate_necessary = 0; } perf_pmu_enable(ctx->pmu); } @@ -3795,6 +3831,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx) typeof(*event), group_node); } + /* + * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in() + * finds there are unschedulable events, it will set it again. + */ + ctx->rotate_necessary = 0; + return event; } @@ -5582,11 +5624,11 @@ static void perf_pmu_output_stop(struct perf_event *event); static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; - struct ring_buffer *rb = ring_buffer_get(event); struct user_struct *mmap_user = rb->mmap_user; int mmap_locked = rb->mmap_locked; unsigned long size = perf_data_size(rb); + bool detach_rest = false; if (event->pmu->event_unmapped) event->pmu->event_unmapped(event, vma->vm_mm); @@ -5617,7 +5659,8 @@ static void perf_mmap_close(struct vm_area_struct *vma) mutex_unlock(&event->mmap_mutex); } - atomic_dec(&rb->mmap_count); + if (atomic_dec_and_test(&rb->mmap_count)) + detach_rest = true; if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) goto out_put; @@ -5626,7 +5669,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) mutex_unlock(&event->mmap_mutex); /* If there's still other mmap()s of this buffer, we're done. */ - if (atomic_read(&rb->mmap_count)) + if (!detach_rest) goto out_put; /* @@ -6537,9 +6580,12 @@ static u64 perf_virt_to_phys(u64 virt) * Try IRQ-safe __get_user_pages_fast first. * If failed, leave phys_addr as 0. */ - if ((current->mm != NULL) && - (__get_user_pages_fast(virt, 1, 0, &p) == 1)) - phys_addr = page_to_phys(p) + virt % PAGE_SIZE; + if (current->mm != NULL) { + pagefault_disable(); + if (__get_user_pages_fast(virt, 1, 0, &p) == 1) + phys_addr = page_to_phys(p) + virt % PAGE_SIZE; + pagefault_enable(); + } if (p) put_page(p); @@ -7049,10 +7095,17 @@ static void perf_event_task_output(struct perf_event *event, goto out; task_event->event_id.pid = perf_event_pid(event, task); - task_event->event_id.ppid = perf_event_pid(event, current); - task_event->event_id.tid = perf_event_tid(event, task); - task_event->event_id.ptid = perf_event_tid(event, current); + + if (task_event->event_id.header.type == PERF_RECORD_EXIT) { + task_event->event_id.ppid = perf_event_pid(event, + task->real_parent); + task_event->event_id.ptid = perf_event_pid(event, + task->real_parent); + } else { /* PERF_RECORD_FORK */ + task_event->event_id.ppid = perf_event_pid(event, current); + task_event->event_id.ptid = perf_event_tid(event, current); + } task_event->event_id.time = perf_event_clock(event); @@ -9391,6 +9444,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { int fpos = token == IF_SRC_FILE ? 2 : 1; + kfree(filename); filename = match_strdup(&args[fpos]); if (!filename) { ret = -ENOMEM; @@ -9437,16 +9491,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, */ ret = -EOPNOTSUPP; if (!event->ctx->task) - goto fail_free_name; + goto fail; /* look up the path and grab its inode */ ret = kern_path(filename, LOOKUP_FOLLOW, &filter->path); if (ret) - goto fail_free_name; - - kfree(filename); - filename = NULL; + goto fail; ret = -EINVAL; if (!filter->path.dentry || @@ -9466,13 +9517,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, if (state != IF_STATE_ACTION) goto fail; + kfree(filename); kfree(orig); return 0; -fail_free_name: - kfree(filename); fail: + kfree(filename); free_filters_list(filters); kfree(orig); @@ -10483,12 +10534,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, context = parent_event->overflow_handler_context; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) if (overflow_handler == bpf_overflow_handler) { - struct bpf_prog *prog = bpf_prog_inc(parent_event->prog); + struct bpf_prog *prog = parent_event->prog; - if (IS_ERR(prog)) { - err = PTR_ERR(prog); - goto err_ns; - } + bpf_prog_inc(prog); event->prog = prog; event->orig_overflow_handler = parent_event->orig_overflow_handler; @@ -10930,12 +10978,12 @@ SYSCALL_DEFINE5(perf_event_open, perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) return -EACCES; - err = security_locked_down(LOCKDOWN_PERF); - if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR)) - /* REGS_INTR can leak data, lockdown must prevent this */ - return err; - - err = 0; + /* REGS_INTR can leak data, lockdown must prevent this */ + if (attr.sample_type & PERF_SAMPLE_REGS_INTR) { + err = security_locked_down(LOCKDOWN_PERF); + if (err) + return err; + } /* * In cgroup mode, the pid argument is used to pass the fd @@ -10978,24 +11026,6 @@ SYSCALL_DEFINE5(perf_event_open, goto err_task; } - if (task) { - err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); - if (err) - goto err_task; - - /* - * Reuse ptrace permission checks for now. - * - * We must hold cred_guard_mutex across this and any potential - * perf_install_in_context() call for this new event to - * serialize against exec() altering our credentials (and the - * perf_event_exit_task() that could imply). - */ - err = -EACCES; - if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) - goto err_cred; - } - if (flags & PERF_FLAG_PID_CGROUP) cgroup_fd = pid; @@ -11003,7 +11033,7 @@ SYSCALL_DEFINE5(perf_event_open, NULL, NULL, cgroup_fd); if (IS_ERR(event)) { err = PTR_ERR(event); - goto err_cred; + goto err_task; } if (is_sampling_event(event)) { @@ -11122,6 +11152,24 @@ SYSCALL_DEFINE5(perf_event_open, goto err_context; } + if (task) { + err = down_read_interruptible(&task->signal->exec_update_lock); + if (err) + goto err_file; + + /* + * Preserve ptrace permission check for backwards compatibility. + * + * We must hold exec_update_lock across this and any potential + * perf_install_in_context() call for this new event to + * serialize against exec() altering our credentials (and the + * perf_event_exit_task() that could imply). + */ + err = -EACCES; + if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) + goto err_cred; + } + if (move_group) { gctx = __perf_event_ctx_lock_double(group_leader, ctx); @@ -11275,7 +11323,7 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(&ctx->mutex); if (task) { - mutex_unlock(&task->signal->cred_guard_mutex); + up_read(&task->signal->exec_update_lock); put_task_struct(task); } @@ -11297,7 +11345,10 @@ err_locked: if (move_group) perf_event_ctx_unlock(group_leader, gctx); mutex_unlock(&ctx->mutex); -/* err_file: */ +err_cred: + if (task) + up_read(&task->signal->exec_update_lock); +err_file: fput(event_file); err_context: perf_unpin_context(ctx); @@ -11309,9 +11360,6 @@ err_alloc: */ if (!event_file) free_event(event); -err_cred: - if (task) - mutex_unlock(&task->signal->cred_guard_mutex); err_task: if (task) put_task_struct(task); @@ -11616,7 +11664,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) /* * When a child task exits, feed back event values to parent events. * - * Can be called with cred_guard_mutex held when called from + * Can be called with exec_update_lock held when called from * install_exec_creds(). */ void perf_event_exit_task(struct task_struct *child) diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 3aef4191798c..6e87b358e082 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -210,7 +210,7 @@ static inline int get_recursion_context(int *recursion) rctx = 3; else if (in_irq()) rctx = 2; - else if (in_softirq()) + else if (in_serving_softirq()) rctx = 1; else rctx = 0; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c74761004ee5..a793bd23fe56 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -211,7 +211,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, try_to_free_swap(old_page); page_vma_mapped_walk_done(&pvmw); - if (vma->vm_flags & VM_LOCKED) + if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page)) munlock_vma_page(old_page); put_page(old_page); @@ -867,10 +867,6 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, if (ret) goto out; - /* uprobe_write_opcode() assumes we don't cross page boundary */ - BUG_ON((uprobe->offset & ~PAGE_MASK) + - UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); - smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ set_bit(UPROBE_COPY_INSN, &uprobe->flags); @@ -1166,6 +1162,15 @@ static int __uprobe_register(struct inode *inode, loff_t offset, if (offset > i_size_read(inode)) return -EINVAL; + /* + * This ensures that copy_from_page(), copy_to_page() and + * __update_ref_ctr() can't cross page boundary. + */ + if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) + return -EINVAL; + if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) + return -EINVAL; + retry: uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); if (!uprobe) @@ -2014,6 +2019,9 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) uprobe_opcode_t opcode; int result; + if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) + return -EINVAL; + pagefault_disable(); result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); pagefault_enable(); @@ -2197,7 +2205,7 @@ static void handle_swbp(struct pt_regs *regs) if (!uprobe) { if (is_swbp > 0) { /* No matching uprobe; signal SIGTRAP. */ - send_sig(SIGTRAP, current, 0); + force_sig(SIGTRAP); } else { /* * Either we raced with uprobe_unregister() or we can't diff --git a/kernel/exit.c b/kernel/exit.c index 22dfaac9e48c..ece64771a31f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -456,7 +456,10 @@ static void exit_mm(void) up_read(&mm->mmap_sem); self.task = current; - self.next = xchg(&core_state->dumper.next, &self); + if (self.task->flags & PF_SIGNALED) + self.next = xchg(&core_state->dumper.next, &self); + else + self.task = NULL; /* * Implies mb(), the result of xchg() must be visible * to core_state->dumper. @@ -713,8 +716,12 @@ void __noreturn do_exit(long code) struct task_struct *tsk = current; int group_dead; - profile_task_exit(tsk); - kcov_task_exit(tsk); + /* + * We can get here from a kernel oops, sometimes with preemption off. + * Start by checking for critical errors. + * Then fix up important state like USER_DS and preemption. + * Then do everything else. + */ WARN_ON(blk_needs_flush_plug(tsk)); @@ -732,6 +739,16 @@ void __noreturn do_exit(long code) */ set_fs(USER_DS); + if (unlikely(in_atomic())) { + pr_info("note: %s[%d] exited with preempt_count %d\n", + current->comm, task_pid_nr(current), + preempt_count()); + preempt_count_set(PREEMPT_ENABLED); + } + + profile_task_exit(tsk); + kcov_task_exit(tsk); + ptrace_event(PTRACE_EVENT_EXIT, code); validate_creds_for_do_exit(tsk); @@ -749,13 +766,6 @@ void __noreturn do_exit(long code) exit_signals(tsk); /* sets PF_EXITING */ - if (unlikely(in_atomic())) { - pr_info("note: %s[%d] exited with preempt_count %d\n", - current->comm, task_pid_nr(current), - preempt_count()); - preempt_count_set(PREEMPT_ENABLED); - } - /* sync mm's RSS info before statistics gathering */ if (tsk->mm) sync_mm_rss(tsk->mm); diff --git a/kernel/extable.c b/kernel/extable.c index f6c9406eec7d..f6920a11e28a 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -56,6 +56,8 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) e = search_kernel_exception_table(addr); if (!e) e = search_module_extables(addr); + if (!e) + e = search_bpf_extables(addr); return e; } diff --git a/kernel/fail_function.c b/kernel/fail_function.c index 63b349168da7..b0b1ad93fa95 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -253,7 +253,7 @@ static ssize_t fei_write(struct file *file, const char __user *buffer, if (copy_from_user(buf, buffer, count)) { ret = -EFAULT; - goto out; + goto out_free; } buf[count] = '\0'; sym = strstrip(buf); @@ -307,8 +307,9 @@ static ssize_t fei_write(struct file *file, const char __user *buffer, ret = count; } out: - kfree(buf); mutex_unlock(&fei_lock); +out_free: + kfree(buf); return ret; } diff --git a/kernel/fork.c b/kernel/fork.c index f07a13d85a69..2511cd4718e7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -146,10 +146,10 @@ int lockdep_tasklist_lock_is_held(void) EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); #endif /* #ifdef CONFIG_PROVE_RCU */ -int nr_forks(void) +unsigned long nr_forks(void) { int cpu; - int total = 0; + unsigned long total = 0; for_each_possible_cpu(cpu) total += per_cpu(total_forks, cpu); @@ -1232,7 +1232,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) struct mm_struct *mm; int err; - err = mutex_lock_killable(&task->signal->cred_guard_mutex); + err = down_read_killable(&task->signal->exec_update_lock); if (err) return ERR_PTR(err); @@ -1242,7 +1242,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) mmput(mm); mm = ERR_PTR(-EACCES); } - mutex_unlock(&task->signal->cred_guard_mutex); + up_read(&task->signal->exec_update_lock); return mm; } @@ -1597,6 +1597,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->oom_score_adj_min = current->signal->oom_score_adj_min; mutex_init(&sig->cred_guard_mutex); + init_rwsem(&sig->exec_update_lock); return 0; } @@ -1760,6 +1761,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk) free_task(tsk); } +static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) +{ + /* Skip if kernel thread */ + if (!tsk->mm) + return; + + /* Skip if spawning a thread or using vfork */ + if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) + return; + + /* We need to synchronize with __set_oom_adj */ + mutex_lock(&oom_adj_mutex); + set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); + /* Update the values in case they were changed after copy_signal */ + tsk->signal->oom_score_adj = current->signal->oom_score_adj; + tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; + mutex_unlock(&oom_adj_mutex); +} + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -2091,14 +2111,9 @@ static __latent_entropy struct task_struct *copy_process( /* ok, now we should be set up.. */ p->pid = pid_nr(pid); if (clone_flags & CLONE_THREAD) { - p->exit_signal = -1; p->group_leader = current->group_leader; p->tgid = current->tgid; } else { - if (clone_flags & CLONE_PARENT) - p->exit_signal = current->group_leader->exit_signal; - else - p->exit_signal = args->exit_signal; p->group_leader = p; p->tgid = p->pid; } @@ -2143,9 +2158,14 @@ static __latent_entropy struct task_struct *copy_process( if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; + if (clone_flags & CLONE_THREAD) + p->exit_signal = -1; + else + p->exit_signal = current->group_leader->exit_signal; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; + p->exit_signal = args->exit_signal; } klp_copy_process(p); @@ -2232,6 +2252,8 @@ static __latent_entropy struct task_struct *copy_process( trace_task_newtask(p, clone_flags); uprobe_copy_process(p, clone_flags); + copy_oom_score_adj(clone_flags, p); + return p; bad_fork_cancel_cgroup: @@ -2423,11 +2445,11 @@ long do_fork(unsigned long clone_flags, int __user *child_tidptr) { struct kernel_clone_args args = { - .flags = (clone_flags & ~CSIGNAL), + .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), .pidfd = parent_tidptr, .child_tid = child_tidptr, .parent_tid = parent_tidptr, - .exit_signal = (clone_flags & CSIGNAL), + .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), .stack = stack_start, .stack_size = stack_size, }; @@ -2445,8 +2467,9 @@ long do_fork(unsigned long clone_flags, pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct kernel_clone_args args = { - .flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), - .exit_signal = (flags & CSIGNAL), + .flags = ((lower_32_bits(flags) | CLONE_VM | + CLONE_UNTRACED) & ~CSIGNAL), + .exit_signal = (lower_32_bits(flags) & CSIGNAL), .stack = (unsigned long)fn, .stack_size = (unsigned long)arg, }; @@ -2507,11 +2530,11 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, #endif { struct kernel_clone_args args = { - .flags = (clone_flags & ~CSIGNAL), + .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), .pidfd = parent_tidptr, .child_tid = child_tidptr, .parent_tid = parent_tidptr, - .exit_signal = (clone_flags & CSIGNAL), + .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), .stack = newsp, .tls = tls, }; diff --git a/kernel/futex.c b/kernel/futex.c index 5660c02b01b0..375e7e98e301 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -857,6 +857,29 @@ static struct futex_pi_state *alloc_pi_state(void) return pi_state; } +static void pi_state_update_owner(struct futex_pi_state *pi_state, + struct task_struct *new_owner) +{ + struct task_struct *old_owner = pi_state->owner; + + lockdep_assert_held(&pi_state->pi_mutex.wait_lock); + + if (old_owner) { + raw_spin_lock(&old_owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + raw_spin_unlock(&old_owner->pi_lock); + } + + if (new_owner) { + raw_spin_lock(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &new_owner->pi_state_list); + pi_state->owner = new_owner; + raw_spin_unlock(&new_owner->pi_lock); + } +} + static void get_pi_state(struct futex_pi_state *pi_state) { WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); @@ -879,17 +902,12 @@ static void put_pi_state(struct futex_pi_state *pi_state) * and has cleaned up the pi_state already */ if (pi_state->owner) { - struct task_struct *owner; + unsigned long flags; - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - owner = pi_state->owner; - if (owner) { - raw_spin_lock(&owner->pi_lock); - list_del_init(&pi_state->list); - raw_spin_unlock(&owner->pi_lock); - } - rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); + pi_state_update_owner(pi_state, NULL); + rt_mutex_proxy_unlock(&pi_state->pi_mutex); + raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); } if (current->pi_state_cache) { @@ -1034,7 +1052,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { } * FUTEX_OWNER_DIED bit. See [4] * * [10] There is no transient state which leaves owner and user space - * TID out of sync. + * TID out of sync. Except one error case where the kernel is denied + * write access to the user address, see fixup_pi_state_owner(). * * * Serialization and lifetime rules: @@ -1594,8 +1613,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ */ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); - if (unlikely(should_fail_futex(true))) + if (unlikely(should_fail_futex(true))) { ret = -EFAULT; + goto out_unlock; + } ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); if (!ret && (curval != uval)) { @@ -1611,26 +1632,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ ret = -EINVAL; } - if (ret) - goto out_unlock; - - /* - * This is a point of no return; once we modify the uval there is no - * going back and subsequent operations must not fail. - */ - - raw_spin_lock(&pi_state->owner->pi_lock); - WARN_ON(list_empty(&pi_state->list)); - list_del_init(&pi_state->list); - raw_spin_unlock(&pi_state->owner->pi_lock); - - raw_spin_lock(&new_owner->pi_lock); - WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &new_owner->pi_state_list); - pi_state->owner = new_owner; - raw_spin_unlock(&new_owner->pi_lock); - - postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + if (!ret) { + /* + * This is a point of no return; once we modified the uval + * there is no going back and subsequent operations must + * not fail. + */ + pi_state_update_owner(pi_state, new_owner); + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + } out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); @@ -2453,18 +2463,13 @@ static void unqueue_me_pi(struct futex_q *q) spin_unlock(q->lock_ptr); } -static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - struct task_struct *argowner) +static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct task_struct *argowner) { + u32 uval, uninitialized_var(curval), newval, newtid; struct futex_pi_state *pi_state = q->pi_state; - u32 uval, uninitialized_var(curval), newval; struct task_struct *oldowner, *newowner; - u32 newtid; - int ret, err = 0; - - lockdep_assert_held(q->lock_ptr); - - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + int err = 0; oldowner = pi_state->owner; @@ -2498,21 +2503,31 @@ retry: * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ - ret = 0; - goto out_unlock; + return 0; } if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { - /* We got the lock after all, nothing to fix. */ - ret = 0; - goto out_unlock; + /* We got the lock. pi_state is correct. Tell caller. */ + return 1; } /* - * Since we just failed the trylock; there must be an owner. + * The trylock just failed, so either there is an owner or + * there is a higher priority waiter than this one. */ newowner = rt_mutex_owner(&pi_state->pi_mutex); - BUG_ON(!newowner); + /* + * If the higher priority waiter has not yet taken over the + * rtmutex then newowner is NULL. We can't return here with + * that state because it's inconsistent vs. the user space + * state. So drop the locks and try again. It's a valid + * situation and not any different from the other retry + * conditions. + */ + if (unlikely(!newowner)) { + err = -EAGAIN; + goto handle_err; + } } else { WARN_ON_ONCE(argowner != current); if (oldowner == current) { @@ -2520,8 +2535,7 @@ retry: * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ - ret = 0; - goto out_unlock; + return 1; } newowner = argowner; } @@ -2551,22 +2565,9 @@ retry: * We fixed up user space. Now we need to fix the pi_state * itself. */ - if (pi_state->owner != NULL) { - raw_spin_lock(&pi_state->owner->pi_lock); - WARN_ON(list_empty(&pi_state->list)); - list_del_init(&pi_state->list); - raw_spin_unlock(&pi_state->owner->pi_lock); - } + pi_state_update_owner(pi_state, newowner); - pi_state->owner = newowner; - - raw_spin_lock(&newowner->pi_lock); - WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &newowner->pi_state_list); - raw_spin_unlock(&newowner->pi_lock); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - - return 0; + return argowner == current; /* * In order to reschedule or handle a page fault, we need to drop the @@ -2587,17 +2588,16 @@ handle_err: switch (err) { case -EFAULT: - ret = fault_in_user_writeable(uaddr); + err = fault_in_user_writeable(uaddr); break; case -EAGAIN: cond_resched(); - ret = 0; + err = 0; break; default: WARN_ON_ONCE(1); - ret = err; break; } @@ -2607,17 +2607,44 @@ handle_err: /* * Check if someone else fixed it for us: */ - if (pi_state->owner != oldowner) { - ret = 0; - goto out_unlock; - } + if (pi_state->owner != oldowner) + return argowner == current; - if (ret) - goto out_unlock; + /* Retry if err was -EAGAIN or the fault in succeeded */ + if (!err) + goto retry; - goto retry; + /* + * fault_in_user_writeable() failed so user state is immutable. At + * best we can make the kernel state consistent but user state will + * be most likely hosed and any subsequent unlock operation will be + * rejected due to PI futex rule [10]. + * + * Ensure that the rtmutex owner is also the pi_state owner despite + * the user space value claiming something different. There is no + * point in unlocking the rtmutex if current is the owner as it + * would need to wait until the next waiter has taken the rtmutex + * to guarantee consistent state. Keep it simple. Userspace asked + * for this wreckaged state. + * + * The rtmutex has an owner - either current or some other + * task. See the EAGAIN loop above. + */ + pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); -out_unlock: + return err; +} + +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct task_struct *argowner) +{ + struct futex_pi_state *pi_state = q->pi_state; + int ret; + + lockdep_assert_held(q->lock_ptr); + + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + ret = __fixup_pi_state_owner(uaddr, q, argowner); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); return ret; } @@ -2641,8 +2668,6 @@ static long futex_wait_restart(struct restart_block *restart); */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { - int ret = 0; - if (locked) { /* * Got the lock. We might not be the anticipated owner if we @@ -2653,8 +2678,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) * stable state, anything else needs more attention. */ if (q->pi_state->owner != current) - ret = fixup_pi_state_owner(uaddr, q, current); - goto out; + return fixup_pi_state_owner(uaddr, q, current); + return 1; } /* @@ -2665,24 +2690,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) * Another speculative read; pi_state->owner == current is unstable * but needs our attention. */ - if (q->pi_state->owner == current) { - ret = fixup_pi_state_owner(uaddr, q, NULL); - goto out; - } + if (q->pi_state->owner == current) + return fixup_pi_state_owner(uaddr, q, NULL); /* * Paranoia check. If we did not take the lock, then we should not be - * the owner of the rt_mutex. + * the owner of the rt_mutex. Warn and establish consistent state. */ - if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { - printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " - "pi-state %p\n", ret, - q->pi_state->pi_mutex.owner, - q->pi_state->owner); - } + if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) + return fixup_pi_state_owner(uaddr, q, current); -out: - return ret ? ret : locked; + return 0; } /** @@ -2847,14 +2865,13 @@ retry: goto out; restart = ¤t->restart_block; - restart->fn = futex_wait_restart; restart->futex.uaddr = uaddr; restart->futex.val = val; restart->futex.time = *abs_time; restart->futex.bitset = bitset; restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; - ret = -ERESTART_RESTARTBLOCK; + ret = set_restart_fn(restart, futex_wait_restart); out: if (to) { @@ -2894,7 +2911,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to; - struct futex_pi_state *pi_state = NULL; struct task_struct *exiting = NULL; struct rt_mutex_waiter rt_waiter; struct futex_hash_bucket *hb; @@ -3031,23 +3047,9 @@ no_block: if (res) ret = (res < 0) ? res : 0; - /* - * If fixup_owner() faulted and was unable to handle the fault, unlock - * it and return the fault to userspace. - */ - if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { - pi_state = q.pi_state; - get_pi_state(pi_state); - } - /* Unqueue and drop the lock */ unqueue_me_pi(&q); - if (pi_state) { - rt_mutex_futex_unlock(&pi_state->pi_mutex); - put_pi_state(pi_state); - } - goto out_put_key; out_unlock_put_key: @@ -3313,7 +3315,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to; - struct futex_pi_state *pi_state = NULL; struct rt_mutex_waiter rt_waiter; struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; @@ -3391,16 +3392,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; - get_pi_state(pi_state); - } /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. */ put_pi_state(q.pi_state); spin_unlock(q.lock_ptr); + /* + * Adjust the return value. It's either -EFAULT or + * success (1) but the caller expects 0 for success. + */ + ret = ret < 0 ? ret : 0; } } else { struct rt_mutex *pi_mutex; @@ -3431,25 +3433,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (res) ret = (res < 0) ? res : 0; - /* - * If fixup_pi_state_owner() faulted and was unable to handle - * the fault, unlock the rt_mutex and return the fault to - * userspace. - */ - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; - get_pi_state(pi_state); - } - /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } - if (pi_state) { - rt_mutex_futex_unlock(&pi_state->pi_mutex); - put_pi_state(pi_state); - } - if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling @@ -3875,8 +3862,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, if (op & FUTEX_CLOCK_REALTIME) { flags |= FLAGS_CLOCKRT; - if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \ - cmd != FUTEX_WAIT_REQUEUE_PI) + if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) return -ENOSYS; } diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index c94b820a1b62..c466c7fbdece 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -70,12 +70,16 @@ struct gcov_fn_info { u32 ident; u32 checksum; +#if CONFIG_CLANG_VERSION < 110000 u8 use_extra_checksum; +#endif u32 cfg_checksum; u32 num_counters; u64 *counters; +#if CONFIG_CLANG_VERSION < 110000 const char *function_name; +#endif }; static struct gcov_info *current_info; @@ -105,6 +109,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush) } EXPORT_SYMBOL(llvm_gcov_init); +#if CONFIG_CLANG_VERSION < 110000 void llvm_gcda_start_file(const char *orig_filename, const char version[4], u32 checksum) { @@ -113,7 +118,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4], current_info->checksum = checksum; } EXPORT_SYMBOL(llvm_gcda_start_file); +#else +void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum) +{ + current_info->filename = orig_filename; + current_info->version = version; + current_info->checksum = checksum; +} +EXPORT_SYMBOL(llvm_gcda_start_file); +#endif +#if CONFIG_CLANG_VERSION < 110000 void llvm_gcda_emit_function(u32 ident, const char *function_name, u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum) { @@ -132,6 +147,21 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name, list_add_tail(&info->head, ¤t_info->functions); } +#else +void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum) +{ + struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL); + + if (!info) + return; + + INIT_LIST_HEAD(&info->head); + info->ident = ident; + info->checksum = func_checksum; + info->cfg_checksum = cfg_checksum; + list_add_tail(&info->head, ¤t_info->functions); +} +#endif EXPORT_SYMBOL(llvm_gcda_emit_function); void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters) @@ -262,11 +292,16 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) !list_is_last(&fn_ptr2->head, &info2->functions)) { if (fn_ptr1->checksum != fn_ptr2->checksum) return false; +#if CONFIG_CLANG_VERSION < 110000 if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum) return false; if (fn_ptr1->use_extra_checksum && fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum) return false; +#else + if (fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum) + return false; +#endif fn_ptr1 = list_next_entry(fn_ptr1, head); fn_ptr2 = list_next_entry(fn_ptr2, head); } @@ -295,6 +330,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src) } } +#if CONFIG_CLANG_VERSION < 110000 static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn) { size_t cv_size; /* counter values size */ @@ -322,6 +358,28 @@ err_name: kfree(fn_dup); return NULL; } +#else +static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn) +{ + size_t cv_size; /* counter values size */ + struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn), + GFP_KERNEL); + if (!fn_dup) + return NULL; + INIT_LIST_HEAD(&fn_dup->head); + + cv_size = fn->num_counters * sizeof(fn->counters[0]); + fn_dup->counters = vmalloc(cv_size); + if (!fn_dup->counters) { + kfree(fn_dup); + return NULL; + } + + memcpy(fn_dup->counters, fn->counters, cv_size); + + return fn_dup; +} +#endif /** * gcov_info_dup - duplicate profiling data set @@ -362,6 +420,7 @@ err: * gcov_info_free - release memory for profiling data set duplicate * @info: profiling data set duplicate to free */ +#if CONFIG_CLANG_VERSION < 110000 void gcov_info_free(struct gcov_info *info) { struct gcov_fn_info *fn, *tmp; @@ -375,6 +434,20 @@ void gcov_info_free(struct gcov_info *info) kfree(info->filename); kfree(info); } +#else +void gcov_info_free(struct gcov_info *info) +{ + struct gcov_fn_info *fn, *tmp; + + list_for_each_entry_safe(fn, tmp, &info->functions, head) { + vfree(fn->counters); + list_del(&fn->head); + kfree(fn); + } + kfree(info->filename); + kfree(info); +} +#endif #define ITER_STRIDE PAGE_SIZE @@ -460,17 +533,22 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info) list_for_each_entry(fi_ptr, &info->functions, head) { u32 i; - u32 len = 2; - - if (fi_ptr->use_extra_checksum) - len++; pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); - pos += store_gcov_u32(buffer, pos, len); +#if CONFIG_CLANG_VERSION < 110000 + pos += store_gcov_u32(buffer, pos, + fi_ptr->use_extra_checksum ? 3 : 2); +#else + pos += store_gcov_u32(buffer, pos, 3); +#endif pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->checksum); +#if CONFIG_CLANG_VERSION < 110000 if (fi_ptr->use_extra_checksum) pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); +#else + pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); +#endif pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE); pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2); diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index e5eb5ea7ea59..cc4ee482d3fb 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -108,9 +108,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; + (*pos)++; if (gcov_iter_next(iter)) return NULL; - (*pos)++; return iter; } diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index ec37563674d6..60c7be5ff5c8 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -19,7 +19,9 @@ #include <linux/vmalloc.h> #include "gcov.h" -#if (__GNUC__ >= 7) +#if (__GNUC__ >= 10) +#define GCOV_COUNTERS 8 +#elif (__GNUC__ >= 7) #define GCOV_COUNTERS 9 #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh index 5a0fc0b0403a..c1510f0ab3ea 100755 --- a/kernel/gen_kheaders.sh +++ b/kernel/gen_kheaders.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # SPDX-License-Identifier: GPL-2.0 # This script generates an archive consisting of kernel headers @@ -21,30 +21,38 @@ arch/$SRCARCH/include/ # Uncomment it for debugging. # if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter; # else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi -# find $src_file_list -name "*.h" | xargs ls -l > /tmp/src-ls-$iter -# find $obj_file_list -name "*.h" | xargs ls -l > /tmp/obj-ls-$iter +# find $all_dirs -name "*.h" | xargs ls -l > /tmp/ls-$iter + +all_dirs= +if [ "$building_out_of_srctree" ]; then + for d in $dir_list; do + all_dirs="$all_dirs $srctree/$d" + done +fi +all_dirs="$all_dirs $dir_list" # include/generated/compile.h is ignored because it is touched even when none -# of the source files changed. This causes pointless regeneration, so let us -# ignore them for md5 calculation. -pushd $srctree > /dev/null -src_files_md5="$(find $dir_list -name "*.h" | - grep -v "include/generated/compile.h" | - grep -v "include/generated/autoconf.h" | - xargs ls -l | md5sum | cut -d ' ' -f1)" -popd > /dev/null -obj_files_md5="$(find $dir_list -name "*.h" | - grep -v "include/generated/compile.h" | - grep -v "include/generated/autoconf.h" | +# of the source files changed. +# +# When Kconfig regenerates include/generated/autoconf.h, its timestamp is +# updated, but the contents might be still the same. When any CONFIG option is +# changed, Kconfig touches the corresponding timestamp file include/config/*.h. +# Hence, the md5sum detects the configuration change anyway. We do not need to +# check include/generated/autoconf.h explicitly. +# +# Ignore them for md5 calculation to avoid pointless regeneration. +headers_md5="$(find $all_dirs -name "*.h" | + grep -v "include/generated/compile.h" | + grep -v "include/generated/autoconf.h" | xargs ls -l | md5sum | cut -d ' ' -f1)" + # Any changes to this script will also cause a rebuild of the archive. this_file_md5="$(ls -l $sfile | md5sum | cut -d ' ' -f1)" if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi if [ -f kernel/kheaders.md5 ] && - [ "$(cat kernel/kheaders.md5|head -1)" == "$src_files_md5" ] && - [ "$(cat kernel/kheaders.md5|head -2|tail -1)" == "$obj_files_md5" ] && - [ "$(cat kernel/kheaders.md5|head -3|tail -1)" == "$this_file_md5" ] && - [ "$(cat kernel/kheaders.md5|tail -1)" == "$tarfile_md5" ]; then + [ "$(head -n 1 kernel/kheaders.md5)" = "$headers_md5" ] && + [ "$(head -n 2 kernel/kheaders.md5 | tail -n 1)" = "$this_file_md5" ] && + [ "$(tail -n 1 kernel/kheaders.md5)" = "$tarfile_md5" ]; then exit fi @@ -55,14 +63,17 @@ fi rm -rf $cpio_dir mkdir $cpio_dir -pushd $srctree > /dev/null -for f in $dir_list; - do find "$f" -name "*.h"; -done | cpio --quiet -pd $cpio_dir -popd > /dev/null +if [ "$building_out_of_srctree" ]; then + ( + cd $srctree + for f in $dir_list + do find "$f" -name "*.h"; + done | cpio --quiet -pd $cpio_dir + ) +fi -# The second CPIO can complain if files already exist which can -# happen with out of tree builds. Just silence CPIO for now. +# The second CPIO can complain if files already exist which can happen with out +# of tree builds having stale headers in srctree. Just silence CPIO for now. for f in $dir_list; do find "$f" -name "*.h"; done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1 @@ -77,10 +88,9 @@ find $cpio_dir -type f -print0 | find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \ --owner=0 --group=0 --numeric-owner --no-recursion \ - -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null + -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null -echo "$src_files_md5" > kernel/kheaders.md5 -echo "$obj_files_md5" >> kernel/kheaders.md5 +echo $headers_md5 > kernel/kheaders.md5 echo "$this_file_md5" >> kernel/kheaders.md5 echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5 diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index f92d9a687372..8fc2539eb08e 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -43,6 +43,10 @@ config GENERIC_IRQ_MIGRATION config AUTO_IRQ_AFFINITY bool +# Interrupt injection mechanism +config GENERIC_IRQ_INJECTION + bool + # Tasklet based software resend for pending interrupts on enable_irq() config HARDIRQS_SW_RESEND bool @@ -81,6 +85,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS # Generic IRQ IPI support config GENERIC_IRQ_IPI bool + select IRQ_DOMAIN_HIERARCHY # Generic MSI interrupt support config GENERIC_MSI_IRQ @@ -127,6 +132,7 @@ config SPARSE_IRQ config GENERIC_IRQ_DEBUGFS bool "Expose irq internals in debugfs" depends on DEBUG_FS + select GENERIC_IRQ_INJECTION default n ---help--- diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index b76703b2c0af..41d8818ee84f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -278,7 +278,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) } } if (resend) - check_irq_resend(desc); + check_irq_resend(desc, false); return ret; } diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index a949bd39e343..18ea484628c9 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -190,33 +190,8 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, return -EFAULT; if (!strncmp(buf, "trigger", size)) { - unsigned long flags; - int err; - /* Try the HW interface first */ - err = irq_set_irqchip_state(irq_desc_get_irq(desc), - IRQCHIP_STATE_PENDING, true); - if (!err) - return count; - - /* - * Otherwise, try to inject via the resend interface, - * which may or may not succeed. - */ - chip_bus_lock(desc); - raw_spin_lock_irqsave(&desc->lock, flags); - - if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) { - /* Can't do level nor NMIs, sorry */ - err = -EINVAL; - } else { - desc->istate |= IRQS_PENDING; - check_irq_resend(desc); - err = 0; - } - - raw_spin_unlock_irqrestore(&desc->lock, flags); - chip_bus_sync_unlock(desc); + int err = irq_inject_interrupt(irq_desc_get_irq(desc)); return err ? err : count; } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index c9d8eb7f5c02..7db284b10ac9 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -108,7 +108,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); irqreturn_t handle_irq_event(struct irq_desc *desc); /* Resending of interrupts :*/ -void check_irq_resend(struct irq_desc *desc); +int check_irq_resend(struct irq_desc *desc, bool inject); bool irq_wait_for_poll(struct irq_desc *desc); void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); @@ -425,6 +425,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return irqd_is_handle_enforce_irqctx(data); +} bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); #else /* CONFIG_GENERIC_PENDING_IRQ */ static inline bool irq_can_move_pcntxt(struct irq_data *data) @@ -451,6 +455,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) { return false; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return false; +} #endif /* !CONFIG_GENERIC_PENDING_IRQ */ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 9be995fc3c5a..6a27603fe1cf 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -638,9 +638,15 @@ void irq_init_desc(unsigned int irq) int generic_handle_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data; if (!desc) return -EINVAL; + + data = irq_desc_get_irq_data(desc); + if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) + return -EPERM; + generic_handle_irq_desc(desc); return 0; } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 480df3659720..5e03cbee70d6 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -638,17 +638,19 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) EXPORT_SYMBOL_GPL(irq_create_direct_mapping); /** - * irq_create_mapping() - Map a hardware interrupt into linux irq space + * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space * @domain: domain owning this hardware interrupt or NULL for default domain * @hwirq: hardware irq number in that domain space + * @affinity: irq affinity * * Only one mapping per hardware interrupt is permitted. Returns a linux * irq number. * If the sense/trigger is to be specified, set_irq_type() should be called * on the number returned from that call. */ -unsigned int irq_create_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) +unsigned int irq_create_mapping_affinity(struct irq_domain *domain, + irq_hw_number_t hwirq, + const struct irq_affinity_desc *affinity) { struct device_node *of_node; int virq; @@ -674,7 +676,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain, } /* Allocate a virtual interrupt number */ - virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL); + virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), + affinity); if (virq <= 0) { pr_debug("-> virq allocation failed\n"); return 0; @@ -690,7 +693,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, return virq; } -EXPORT_SYMBOL_GPL(irq_create_mapping); +EXPORT_SYMBOL_GPL(irq_create_mapping_affinity); /** * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs @@ -1285,14 +1288,26 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs) { - if (domain->ops->free) - domain->ops->free(domain, irq_base, nr_irqs); + unsigned int i; + + if (!domain->ops->free) + return; + + for (i = 0; i < nr_irqs; i++) { + if (irq_domain_get_irq_data(domain, irq_base + i)) + domain->ops->free(domain, irq_base + i, 1); + } } int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { + if (!domain->ops->alloc) { + pr_debug("domain->ops->alloc() is NULL\n"); + return -ENOSYS; + } + return domain->ops->alloc(domain, irq_base, nr_irqs, arg); } @@ -1330,11 +1345,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, return -EINVAL; } - if (!domain->ops->alloc) { - pr_debug("domain->ops->alloc() is NULL\n"); - return -ENOSYS; - } - if (realloc && irq_base >= 0) { virq = irq_base; } else { diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index b304c17d53a3..918fe0593386 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -194,9 +194,9 @@ void irq_set_thread_affinity(struct irq_desc *desc) set_bit(IRQTF_AFFINITY, &action->thread_flags); } +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static void irq_validate_effective_affinity(struct irq_data *data) { -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK const struct cpumask *m = irq_data_get_effective_affinity_mask(data); struct irq_chip *chip = irq_data_get_irq_chip(data); @@ -204,9 +204,19 @@ static void irq_validate_effective_affinity(struct irq_data *data) return; pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", chip->name, data->irq); -#endif } +static inline void irq_init_effective_affinity(struct irq_data *data, + const struct cpumask *mask) +{ + cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); +} +#else +static inline void irq_validate_effective_affinity(struct irq_data *data) { } +static inline void irq_init_effective_affinity(struct irq_data *data, + const struct cpumask *mask) { } +#endif + int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -265,6 +275,30 @@ static int irq_try_set_affinity(struct irq_data *data, return ret; } +static bool irq_set_affinity_deactivated(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + /* + * Handle irq chips which can handle affinity only in activated + * state correctly + * + * If the interrupt is not yet activated, just store the affinity + * mask and do not call the chip driver at all. On activation the + * driver has to make sure anyway that the interrupt is in a + * useable state so startup works. + */ + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || + irqd_is_activated(data) || !irqd_affinity_on_activate(data)) + return false; + + cpumask_copy(desc->irq_common_data.affinity, mask); + irq_init_effective_affinity(data, mask); + irqd_set(data, IRQD_AFFINITY_SET); + return true; +} + int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -275,6 +309,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (!chip || !chip->irq_set_affinity) return -EINVAL; + if (irq_set_affinity_deactivated(data, mask, force)) + return 0; + if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { ret = irq_try_set_affinity(data, mask, force); } else { @@ -989,11 +1026,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) irqreturn_t ret; local_bh_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_disable(); ret = action->thread_fn(action->irq, action->dev_id); if (ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_irq_enable(); local_bh_enable(); return ret; } diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 30cc217b8631..8e586858bcf4 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, unsigned int cpu, bit; struct cpumap *cm; + /* + * Not required in theory, but matrix_find_best_cpu() uses + * for_each_cpu() which ignores the cpumask on UP . + */ + if (cpumask_empty(msk)) + return -EINVAL; + cpu = matrix_find_best_cpu(m, msk); if (cpu == UINT_MAX) return -ENOSPC; @@ -416,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) return; - clear_bit(bit, cm->alloc_map); + if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map))) + return; + cm->allocated--; if(managed) cm->managed_allocated--; diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index eb95f6106a1e..5d3da0db092f 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, can_reserve = msi_check_reservation_mode(domain, info, dev); - for_each_msi_entry(desc, dev) { - virq = desc->irq; - if (desc->nvec_used == 1) - dev_dbg(dev, "irq %d for MSI\n", virq); - else + /* + * This flag is set by the PCI layer as we need to activate + * the MSI entries before the PCI layer enables MSI in the + * card. Otherwise the card latches a random msi message. + */ + if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) + goto skip_activate; + + for_each_msi_vector(desc, i, dev) { + if (desc->irq == i) { + virq = desc->irq; dev_dbg(dev, "irq [%d-%d] for MSI\n", virq, virq + desc->nvec_used - 1); - /* - * This flag is set by the PCI layer as we need to activate - * the MSI entries before the PCI layer enables MSI in the - * card. Otherwise the card latches a random msi message. - */ - if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) - continue; + } - irq_data = irq_domain_get_irq_data(domain, desc->irq); + irq_data = irq_domain_get_irq_data(domain, i); if (!can_reserve) { irqd_clr_can_reserve(irq_data); if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) @@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, goto cleanup; } +skip_activate: /* * If these interrupts use reservation mode, clear the activated bit * so request_irq() will assign the final vector. */ if (can_reserve) { - for_each_msi_entry(desc, dev) { - irq_data = irq_domain_get_irq_data(domain, desc->irq); + for_each_msi_vector(desc, i, dev) { + irq_data = irq_domain_get_irq_data(domain, i); irqd_clr_activated(irq_data); } } return 0; cleanup: - for_each_msi_entry(desc, dev) { - struct irq_data *irqd; - - if (desc->irq == virq) - break; - - irqd = irq_domain_get_irq_data(domain, desc->irq); - if (irqd_is_activated(irqd)) - irq_domain_deactivate_irq(irqd); + for_each_msi_vector(desc, i, dev) { + irq_data = irq_domain_get_irq_data(domain, i); + if (irqd_is_activated(irq_data)) + irq_domain_deactivate_irq(irq_data); } msi_domain_free_irqs(domain, dev); return ret; diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 8f557fa1f4fe..c6c7e187ae74 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -185,14 +185,18 @@ void rearm_wake_irq(unsigned int irq) unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - if (!desc || !(desc->istate & IRQS_SUSPENDED) || - !irqd_is_wakeup_set(&desc->irq_data)) + if (!desc) return; + if (!(desc->istate & IRQS_SUSPENDED) || + !irqd_is_wakeup_set(&desc->irq_data)) + goto unlock; + desc->istate &= ~IRQS_SUSPENDED; irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); __enable_irq(desc); +unlock: irq_put_desc_busunlock(desc, flags); } diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 98c04ca5fa43..27634f4022d0 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -47,6 +47,43 @@ static void resend_irqs(unsigned long arg) /* Tasklet to handle resend: */ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); +static int irq_sw_resend(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + + /* + * Validate whether this interrupt can be safely injected from + * non interrupt context + */ + if (handle_enforce_irqctx(&desc->irq_data)) + return -EINVAL; + + /* + * If the interrupt is running in the thread context of the parent + * irq we need to be careful, because we cannot trigger it + * directly. + */ + if (irq_settings_is_nested_thread(desc)) { + /* + * If the parent_irq is valid, we retrigger the parent, + * otherwise we do nothing. + */ + if (!desc->parent_irq) + return -EINVAL; + irq = desc->parent_irq; + } + + /* Set it pending and activate the softirq: */ + set_bit(irq, irqs_resend); + tasklet_schedule(&resend_tasklet); + return 0; +} + +#else +static int irq_sw_resend(struct irq_desc *desc) +{ + return -EINVAL; +} #endif /* @@ -54,49 +91,83 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); * * Is called with interrupts disabled and desc->lock held. */ -void check_irq_resend(struct irq_desc *desc) +int check_irq_resend(struct irq_desc *desc, bool inject) { + int err = 0; + /* - * We do not resend level type interrupts. Level type - * interrupts are resent by hardware when they are still - * active. Clear the pending bit so suspend/resume does not - * get confused. + * We do not resend level type interrupts. Level type interrupts + * are resent by hardware when they are still active. Clear the + * pending bit so suspend/resume does not get confused. */ if (irq_settings_is_level(desc)) { desc->istate &= ~IRQS_PENDING; - return; + return -EINVAL; } + if (desc->istate & IRQS_REPLAY) - return; - if (desc->istate & IRQS_PENDING) { - desc->istate &= ~IRQS_PENDING; + return -EBUSY; + + if (!(desc->istate & IRQS_PENDING) && !inject) + return 0; + + desc->istate &= ~IRQS_PENDING; + + if (!desc->irq_data.chip->irq_retrigger || + !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) + err = irq_sw_resend(desc); + + /* If the retrigger was successfull, mark it with the REPLAY bit */ + if (!err) desc->istate |= IRQS_REPLAY; - - if (!desc->irq_data.chip->irq_retrigger || - !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { -#ifdef CONFIG_HARDIRQS_SW_RESEND - unsigned int irq = irq_desc_get_irq(desc); - - /* - * If the interrupt is running in the thread - * context of the parent irq we need to be - * careful, because we cannot trigger it - * directly. - */ - if (irq_settings_is_nested_thread(desc)) { - /* - * If the parent_irq is valid, we - * retrigger the parent, otherwise we - * do nothing. - */ - if (!desc->parent_irq) - return; - irq = desc->parent_irq; - } - /* Set it pending and activate the softirq: */ - set_bit(irq, irqs_resend); - tasklet_schedule(&resend_tasklet); -#endif - } - } + return err; } + +#ifdef CONFIG_GENERIC_IRQ_INJECTION +/** + * irq_inject_interrupt - Inject an interrupt for testing/error injection + * @irq: The interrupt number + * + * This function must only be used for debug and testing purposes! + * + * Especially on x86 this can cause a premature completion of an interrupt + * affinity change causing the interrupt line to become stale. Very + * unlikely, but possible. + * + * The injection can fail for various reasons: + * - Interrupt is not activated + * - Interrupt is NMI type or currently replaying + * - Interrupt is level type + * - Interrupt does not support hardware retrigger and software resend is + * either not enabled or not possible for the interrupt. + */ +int irq_inject_interrupt(unsigned int irq) +{ + struct irq_desc *desc; + unsigned long flags; + int err; + + /* Try the state injection hardware interface first */ + if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true)) + return 0; + + /* That failed, try via the resend mechanism */ + desc = irq_get_desc_buslock(irq, &flags, 0); + if (!desc) + return -EINVAL; + + /* + * Only try to inject when the interrupt is: + * - not NMI type + * - activated + */ + if ((desc->istate & IRQS_NMI) || !irqd_is_activated(&desc->irq_data)) + err = -EINVAL; + else + err = check_irq_resend(desc, true); + + irq_put_desc_busunlock(desc, flags); + return err; +} +EXPORT_SYMBOL_GPL(irq_inject_interrupt); +#endif diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 136ce049c4ad..61f9d781f70a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -645,19 +645,20 @@ static inline int kallsyms_for_perf(void) * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to * block even that). */ -int kallsyms_show_value(void) +bool kallsyms_show_value(const struct cred *cred) { switch (kptr_restrict) { case 0: if (kallsyms_for_perf()) - return 1; + return true; /* fallthrough */ case 1: - if (has_capability_noaudit(current, CAP_SYSLOG)) - return 1; + if (security_capable(cred, &init_user_ns, CAP_SYSLOG, + CAP_OPT_NOAUDIT) == 0) + return true; /* fallthrough */ default: - return 0; + return false; } } @@ -674,7 +675,11 @@ static int kallsyms_open(struct inode *inode, struct file *file) return -ENOMEM; reset_iter(iter, 0); - iter->show_value = kallsyms_show_value(); + /* + * Instead of checking this on every s_show() call, cache + * the result here at open time. + */ + iter->show_value = kallsyms_show_value(file->f_cred); return 0; } diff --git a/kernel/kcmp.c b/kernel/kcmp.c index a0e3d7a0e8b8..c0d2ad9b4705 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c @@ -75,25 +75,25 @@ get_file_raw_ptr(struct task_struct *task, unsigned int idx) return file; } -static void kcmp_unlock(struct mutex *m1, struct mutex *m2) +static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2) { - if (likely(m2 != m1)) - mutex_unlock(m2); - mutex_unlock(m1); + if (likely(l2 != l1)) + up_read(l2); + up_read(l1); } -static int kcmp_lock(struct mutex *m1, struct mutex *m2) +static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2) { int err; - if (m2 > m1) - swap(m1, m2); + if (l2 > l1) + swap(l1, l2); - err = mutex_lock_killable(m1); - if (!err && likely(m1 != m2)) { - err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING); + err = down_read_killable(l1); + if (!err && likely(l1 != l2)) { + err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING); if (err) - mutex_unlock(m1); + up_read(l1); } return err; @@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, /* * One should have enough rights to inspect task details. */ - ret = kcmp_lock(&task1->signal->cred_guard_mutex, - &task2->signal->cred_guard_mutex); + ret = kcmp_lock(&task1->signal->exec_update_lock, + &task2->signal->exec_update_lock); if (ret) goto err; if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || @@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, } err_unlock: - kcmp_unlock(&task1->signal->cred_guard_mutex, - &task2->signal->cred_guard_mutex); + kcmp_unlock(&task1->signal->exec_update_lock, + &task2->signal->exec_update_lock); err: put_task_struct(task1); put_task_struct(task2); diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 15d70a90b50d..d65b0fc8fb48 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -1129,7 +1129,6 @@ int kernel_kexec(void) #ifdef CONFIG_KEXEC_JUMP if (kexec_image->preserve_context) { - lock_system_sleep(); pm_prepare_console(); error = freeze_processes(); if (error) { @@ -1192,7 +1191,6 @@ int kernel_kexec(void) thaw_processes(); Restore_console: pm_restore_console(); - unlock_system_sleep(); } #endif diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 79f252af7dee..4e74db89bd23 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -165,6 +165,11 @@ void kimage_file_post_load_cleanup(struct kimage *image) vfree(pi->sechdrs); pi->sechdrs = NULL; +#ifdef CONFIG_IMA_KEXEC + vfree(image->ima_buffer); + image->ima_buffer = NULL; +#endif /* CONFIG_IMA_KEXEC */ + /* See if architecture has anything to cleanup post load */ arch_kimage_file_post_load_cleanup(image); diff --git a/kernel/kmod.c b/kernel/kmod.c index bc6addd9152b..a2de58de6ab6 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -120,7 +120,7 @@ out: * invoke it. * * If module auto-loading support is disabled then this function - * becomes a no-operation. + * simply returns -ENOENT. */ int __request_module(bool wait, const char *fmt, ...) { @@ -137,7 +137,7 @@ int __request_module(bool wait, const char *fmt, ...) WARN_ON_ONCE(wait && current_is_async()); if (!modprobe_path[0]) - return 0; + return -ENOENT; va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 2625c241ac00..a7812c115e48 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -326,7 +326,8 @@ struct kprobe *get_kprobe(void *addr) struct kprobe *p; head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; - hlist_for_each_entry_rcu(p, head, hlist) { + hlist_for_each_entry_rcu(p, head, hlist, + lockdep_is_held(&kprobe_mutex)) { if (p->addr == addr) return p; } @@ -586,11 +587,12 @@ static void kprobe_optimizer(struct work_struct *work) mutex_unlock(&module_mutex); mutex_unlock(&text_mutex); cpus_read_unlock(); - mutex_unlock(&kprobe_mutex); /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); + + mutex_unlock(&kprobe_mutex); } /* Wait for completing optimization and unoptimization */ @@ -1074,9 +1076,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p) ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); } #else /* !CONFIG_KPROBES_ON_FTRACE */ -#define prepare_kprobe(p) arch_prepare_kprobe(p) -#define arm_kprobe_ftrace(p) (-ENODEV) -#define disarm_kprobe_ftrace(p) (-ENODEV) +static inline int prepare_kprobe(struct kprobe *p) +{ + return arch_prepare_kprobe(p); +} + +static inline int arm_kprobe_ftrace(struct kprobe *p) +{ + return -ENODEV; +} + +static inline int disarm_kprobe_ftrace(struct kprobe *p) +{ + return -ENODEV; +} #endif /* Arm a kprobe with text_mutex */ @@ -1236,6 +1249,26 @@ __releases(hlist_lock) } NOKPROBE_SYMBOL(kretprobe_table_unlock); +struct kprobe kprobe_busy = { + .addr = (void *) get_kprobe, +}; + +void kprobe_busy_begin(void) +{ + struct kprobe_ctlblk *kcb; + + preempt_disable(); + __this_cpu_write(current_kprobe, &kprobe_busy); + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; +} + +void kprobe_busy_end(void) +{ + __this_cpu_write(current_kprobe, NULL); + preempt_enable(); +} + /* * This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated @@ -1253,6 +1286,8 @@ void kprobe_flush_task(struct task_struct *tk) /* Early boot. kretprobe_table_locks not yet initialized. */ return; + kprobe_busy_begin(); + INIT_HLIST_HEAD(&empty_rp); hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; @@ -1266,6 +1301,8 @@ void kprobe_flush_task(struct task_struct *tk) hlist_del(&ri->hlist); kfree(ri); } + + kprobe_busy_end(); } NOKPROBE_SYMBOL(kprobe_flush_task); @@ -1911,28 +1948,48 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset) return !offset; } -bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) +/** + * kprobe_on_func_entry() -- check whether given address is function entry + * @addr: Target address + * @sym: Target symbol name + * @offset: The offset from the symbol or the address + * + * This checks whether the given @addr+@offset or @sym+@offset is on the + * function entry address or not. + * This returns 0 if it is the function entry, or -EINVAL if it is not. + * And also it returns -ENOENT if it fails the symbol or address lookup. + * Caller must pass @addr or @sym (either one must be NULL), or this + * returns -EINVAL. + */ +int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) { kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); if (IS_ERR(kp_addr)) - return false; + return PTR_ERR(kp_addr); - if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || - !arch_kprobe_on_func_entry(offset)) - return false; + if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset)) + return -ENOENT; - return true; + if (!arch_kprobe_on_func_entry(offset)) + return -EINVAL; + + return 0; } int register_kretprobe(struct kretprobe *rp) { - int ret = 0; + int ret; struct kretprobe_instance *inst; int i; void *addr; - if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) + ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); + if (ret) + return ret; + + /* If only rp->kp.addr is specified, check reregistering kprobes */ + if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) return -EINVAL; if (kretprobe_blacklist_size) { @@ -2062,6 +2119,9 @@ static void kill_kprobe(struct kprobe *p) { struct kprobe *kp; + if (WARN_ON_ONCE(kprobe_gone(p))) + return; + p->flags |= KPROBE_FLAG_GONE; if (kprobe_aggrprobe(p)) { /* @@ -2078,6 +2138,14 @@ static void kill_kprobe(struct kprobe *p) * the original probed function (which will be freed soon) any more. */ arch_remove_kprobe(p); + + /* + * The module is going away. We should disarm the kprobe which + * is using ftrace, because ftrace framework is still available at + * MODULE_STATE_GOING notification. + */ + if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) + disarm_kprobe_ftrace(p); } /* Disable one kprobe */ @@ -2237,7 +2305,10 @@ static int kprobes_module_callback(struct notifier_block *nb, mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; - hlist_for_each_entry_rcu(p, head, hlist) + hlist_for_each_entry_rcu(p, head, hlist) { + if (kprobe_gone(p)) + continue; + if (within_module_init((unsigned long)p->addr, mod) || (checkcore && within_module_core((unsigned long)p->addr, mod))) { @@ -2254,6 +2325,7 @@ static int kprobes_module_callback(struct notifier_block *nb, */ kill_kprobe(p); } + } } mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; @@ -2268,6 +2340,28 @@ static struct notifier_block kprobe_module_nb = { extern unsigned long __start_kprobe_blacklist[]; extern unsigned long __stop_kprobe_blacklist[]; +void kprobe_free_init_mem(void) +{ + void *start = (void *)(&__init_begin); + void *end = (void *)(&__init_end); + struct hlist_head *head; + struct kprobe *p; + int i; + + mutex_lock(&kprobe_mutex); + + /* Kill all kprobes on initmem */ + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; + hlist_for_each_entry(p, head, hlist) { + if (start <= (void *)p->addr && (void *)p->addr < end) + kill_kprobe(p); + } + } + + mutex_unlock(&kprobe_mutex); +} + static int __init init_kprobes(void) { int i, err = 0; @@ -2336,7 +2430,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p, else kprobe_type = "k"; - if (!kallsyms_show_value()) + if (!kallsyms_show_value(pi->file->f_cred)) addr = NULL; if (sym) @@ -2437,7 +2531,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) * If /proc/kallsyms is not showing kernel address, we won't * show them here either. */ - if (!kallsyms_show_value()) + if (!kallsyms_show_value(m->file->f_cred)) seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, (void *)ent->start_addr); else diff --git a/kernel/kthread.c b/kernel/kthread.c index b262f47046ca..d6a4296384c2 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -76,6 +76,25 @@ static inline struct kthread *to_kthread(struct task_struct *k) return (__force void *)k->set_child_tid; } +/* + * Variant of to_kthread() that doesn't assume @p is a kthread. + * + * Per construction; when: + * + * (p->flags & PF_KTHREAD) && p->set_child_tid + * + * the task is both a kthread and struct kthread is persistent. However + * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and + * begin_new_exec()). + */ +static inline struct kthread *__to_kthread(struct task_struct *p) +{ + void *kthread = (__force void *)p->set_child_tid; + if (kthread && !(p->flags & PF_KTHREAD)) + kthread = NULL; + return kthread; +} + void free_kthread_struct(struct task_struct *k) { struct kthread *kthread; @@ -176,10 +195,11 @@ void *kthread_data(struct task_struct *task) */ void *kthread_probe_data(struct task_struct *task) { - struct kthread *kthread = to_kthread(task); + struct kthread *kthread = __to_kthread(task); void *data = NULL; - probe_kernel_read(&data, &kthread->data, sizeof(data)); + if (kthread) + probe_kernel_read(&data, &kthread->data, sizeof(data)); return data; } @@ -199,8 +219,15 @@ static void __kthread_parkme(struct kthread *self) if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; + /* + * Thread is going to call schedule(), do not preempt it, + * or the caller of kthread_park() may spend more time in + * wait_task_inactive(). + */ + preempt_disable(); complete(&self->parked); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); } __set_current_state(TASK_RUNNING); } @@ -245,8 +272,14 @@ static int kthread(void *_create) /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; + /* + * Thread is going to call schedule(), do not preempt it, + * or the creator may spend more time in wait_task_inactive(). + */ + preempt_disable(); complete(done); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { @@ -456,11 +489,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), return p; kthread_bind(p, cpu); /* CPU hotplug need to bind once again when unparking the thread. */ - set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); to_kthread(p)->cpu = cpu; return p; } +void kthread_set_per_cpu(struct task_struct *k, int cpu) +{ + struct kthread *kthread = to_kthread(k); + if (!kthread) + return; + + WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); + + if (cpu < 0) { + clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); + return; + } + + kthread->cpu = cpu; + set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); +} + +bool kthread_is_per_cpu(struct task_struct *k) +{ + struct kthread *kthread = __to_kthread(k); + if (!kthread) + return false; + + return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); +} + /** * kthread_unpark - unpark a thread created by kthread_create(). * @k: thread created by kthread_create(). @@ -860,7 +918,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) /* Move the work from worker->delayed_work_list. */ WARN_ON_ONCE(list_empty(&work->node)); list_del_init(&work->node); - kthread_insert_work(worker, work, &worker->work_list); + if (!work->canceling) + kthread_insert_work(worker, work, &worker->work_list); raw_spin_unlock_irqrestore(&worker->lock, flags); } diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 35d3b6925b1e..7429f1571755 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -875,7 +875,8 @@ static bool assign_lock_key(struct lockdep_map *lock) /* Debug-check: all keys must be persistent! */ debug_locks_off(); pr_err("INFO: trying to register non-static key.\n"); - pr_err("the code is fine but needs lockdep annotation.\n"); + pr_err("The code is fine but needs lockdep annotation, or maybe\n"); + pr_err("you didn't initialize this object before use?\n"); pr_err("turning off the locking correctness validator.\n"); dump_stack(); return false; @@ -1719,9 +1720,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.class = class; raw_local_irq_save(flags); + current->lockdep_recursion = 1; arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); arch_spin_unlock(&lockdep_lock); + current->lockdep_recursion = 0; raw_local_irq_restore(flags); return ret; @@ -1746,9 +1749,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.class = class; raw_local_irq_save(flags); + current->lockdep_recursion = 1; arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); arch_spin_unlock(&lockdep_lock); + current->lockdep_recursion = 0; raw_local_irq_restore(flags); return ret; @@ -2298,18 +2303,6 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, return 0; } -static void inc_chains(void) -{ - if (current->hardirq_context) - nr_hardirq_chains++; - else { - if (current->softirq_context) - nr_softirq_chains++; - else - nr_process_chains++; - } -} - #else static inline int check_irq_usage(struct task_struct *curr, @@ -2317,13 +2310,27 @@ static inline int check_irq_usage(struct task_struct *curr, { return 1; } +#endif /* CONFIG_TRACE_IRQFLAGS */ -static inline void inc_chains(void) +static void inc_chains(int irq_context) { - nr_process_chains++; + if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) + nr_hardirq_chains++; + else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) + nr_softirq_chains++; + else + nr_process_chains++; } -#endif /* CONFIG_TRACE_IRQFLAGS */ +static void dec_chains(int irq_context) +{ + if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) + nr_hardirq_chains--; + else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) + nr_softirq_chains--; + else + nr_process_chains--; +} static void print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) @@ -2843,7 +2850,7 @@ static inline int add_chain_cache(struct task_struct *curr, hlist_add_head_rcu(&chain->entry, hash_head); debug_atomic_inc(chain_lookup_misses); - inc_chains(); + inc_chains(chain->irq_context); return 1; } @@ -3596,7 +3603,8 @@ lock_used: static inline unsigned int task_irq_context(struct task_struct *task) { - return 2 * !!task->hardirq_context + !!task->softirq_context; + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context + + LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; } static int separate_irq_context(struct task_struct *curr, @@ -4801,6 +4809,8 @@ recalc: return; /* Overwrite the chain key for concurrent RCU readers. */ WRITE_ONCE(chain->chain_key, chain_key); + dec_chains(chain->irq_context); + /* * Note: calling hlist_del_rcu() from inside a * hlist_for_each_entry_rcu() loop is safe. @@ -4822,6 +4832,7 @@ recalc: } *new_chain = *chain; hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key)); + inc_chains(new_chain->irq_context); #endif } diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 18d85aebbb57..a525368b8cf6 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -106,6 +106,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = #define STACK_TRACE_HASH_SIZE 16384 #endif +/* + * Bit definitions for lock_chain.irq_context + */ +#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0) +#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1) + #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 9bb6d2497b04..581f81818138 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -400,7 +400,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) seq_time(m, lt->min); seq_time(m, lt->max); seq_time(m, lt->total); - seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0); + seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); } static void seq_stats(struct seq_file *m, struct lock_stat_data *data) diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index c513031cd7e3..e09562818bb7 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -697,10 +697,10 @@ static void __torture_print_stats(char *page, if (statp[i].n_lock_fail) fail = true; sum += statp[i].n_lock_acquired; - if (max < statp[i].n_lock_fail) - max = statp[i].n_lock_fail; - if (min > statp[i].n_lock_fail) - min = statp[i].n_lock_fail; + if (max < statp[i].n_lock_acquired) + max = statp[i].n_lock_acquired; + if (min > statp[i].n_lock_acquired) + min = statp[i].n_lock_acquired; } page += sprintf(page, "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 5e10153b4d3c..05d02cbea29b 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -17,11 +17,11 @@ struct mcs_spinlock { struct mcs_spinlock *next; - int locked; /* 1 if lock acquired */ + unsigned int locked; /* 1 if lock acquired */ int count; /* nesting count, see qspinlock.c */ }; -#ifndef arch_mcs_spin_lock_contended +#ifndef arch_mcs_spin_wait /* * Using smp_cond_load_acquire() provides the acquire semantics * required so that subsequent operations happen after the @@ -29,20 +29,20 @@ struct mcs_spinlock { * ARM64 would like to do spin-waiting instead of purely * spinning, and smp_cond_load_acquire() provides that behavior. */ -#define arch_mcs_spin_lock_contended(l) \ +#define arch_mcs_spin_wait(l) \ do { \ smp_cond_load_acquire(l, VAL); \ } while (0) #endif -#ifndef arch_mcs_spin_unlock_contended +#ifndef arch_mcs_lock_handoff /* * smp_store_release() provides a memory barrier to ensure all * operations in the critical section has been completed before * unlocking. */ -#define arch_mcs_spin_unlock_contended(l) \ - smp_store_release((l), 1) +#define arch_mcs_lock_handoff(l, val) \ + smp_store_release((l), (val)) #endif /* @@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) WRITE_ONCE(prev->next, node); /* Wait until the lock holder passes the lock down. */ - arch_mcs_spin_lock_contended(&node->locked); + arch_mcs_spin_wait(&node->locked); } /* @@ -115,7 +115,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) } /* Pass lock to next waiter. */ - arch_mcs_spin_unlock_contended(&next->locked); + arch_mcs_lock_handoff(&next->locked, 1); } #endif /* __LINUX_MCS_SPINLOCK_H */ diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 468a9b8422e3..07a9f9f46e03 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -636,7 +636,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) */ static __always_inline bool mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, - const bool use_ww_ctx, struct mutex_waiter *waiter) + struct mutex_waiter *waiter) { if (!waiter) { /* @@ -712,7 +712,7 @@ fail: #else static __always_inline bool mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, - const bool use_ww_ctx, struct mutex_waiter *waiter) + struct mutex_waiter *waiter) { return false; } @@ -932,6 +932,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct ww_mutex *ww; int ret; + if (!use_ww_ctx) + ww_ctx = NULL; + might_sleep(); #ifdef CONFIG_DEBUG_MUTEXES @@ -939,7 +942,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, #endif ww = container_of(lock, struct ww_mutex, base); - if (use_ww_ctx && ww_ctx) { + if (ww_ctx) { if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) return -EALREADY; @@ -956,10 +959,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); if (__mutex_trylock(lock) || - mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { + mutex_optimistic_spin(lock, ww_ctx, NULL)) { /* got the lock, yay! */ lock_acquired(&lock->dep_map, ip); - if (use_ww_ctx && ww_ctx) + if (ww_ctx) ww_mutex_set_context_fastpath(ww, ww_ctx); preempt_enable(); return 0; @@ -970,7 +973,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * After waiting to acquire the wait_lock, try again. */ if (__mutex_trylock(lock)) { - if (use_ww_ctx && ww_ctx) + if (ww_ctx) __ww_mutex_check_waiters(lock, ww_ctx); goto skip_wait; @@ -1023,7 +1026,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, goto err; } - if (use_ww_ctx && ww_ctx) { + if (ww_ctx) { ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); if (ret) goto err; @@ -1036,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * ww_mutex needs to always recheck its position since its waiter * list is not FIFO ordered. */ - if ((use_ww_ctx && ww_ctx) || !first) { + if (ww_ctx || !first) { first = __mutex_waiter_is_first(lock, &waiter); if (first) __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); @@ -1049,7 +1052,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * or we must see its unlock and acquire. */ if (__mutex_trylock(lock) || - (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) + (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) break; spin_lock(&lock->wait_lock); @@ -1058,7 +1061,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, acquired: __set_current_state(TASK_RUNNING); - if (use_ww_ctx && ww_ctx) { + if (ww_ctx) { /* * Wound-Wait; we stole the lock (!first_waiter), check the * waiters as anyone might want to wound us. @@ -1078,7 +1081,7 @@ skip_wait: /* got the lock - cleanup and rejoice! */ lock_acquired(&lock->dep_map, ip); - if (use_ww_ctx && ww_ctx) + if (ww_ctx) ww_mutex_lock_acquired(ww, ww_ctx); spin_unlock(&lock->wait_lock); diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 364d38a0c444..367e002048d8 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -102,7 +102,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem) * zero, as that is the only time it matters) they will also see our * critical section. */ - __this_cpu_dec(*sem->read_count); + this_cpu_dec(*sem->read_count); /* Prod writer to recheck readers_active */ rcuwait_wake_up(&sem->writer); diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index fe9ca92faa2a..909b0bf22a1e 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -61,6 +61,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); */ void queued_write_lock_slowpath(struct qrwlock *lock) { + int cnts; + /* Put the writer into the wait queue */ arch_spin_lock(&lock->wait_lock); @@ -74,9 +76,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock) /* When no more readers or writers, set the locked flag */ do { - atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); - } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, - _QW_LOCKED) != _QW_WAITING); + cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); + } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); unlock: arch_spin_unlock(&lock->wait_lock); } diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 2473f10c6956..be6748106e40 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -11,7 +11,7 @@ * Peter Zijlstra <peterz@infradead.org> */ -#ifndef _GEN_PV_LOCK_SLOWPATH +#if !defined(_GEN_PV_LOCK_SLOWPATH) && !defined(_GEN_CNA_LOCK_SLOWPATH) #include <linux/smp.h> #include <linux/bug.h> @@ -79,7 +79,7 @@ */ struct qnode { struct mcs_spinlock mcs; -#ifdef CONFIG_PARAVIRT_SPINLOCKS +#if defined(CONFIG_PARAVIRT_SPINLOCKS) || defined(CONFIG_NUMA_AWARE_SPINLOCKS) long reserved[2]; #endif }; @@ -288,7 +288,35 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath #endif -#endif /* _GEN_PV_LOCK_SLOWPATH */ +/* + * __try_clear_tail - try to clear tail by setting the lock value to + * _Q_LOCKED_VAL. + * @lock: Pointer to the queued spinlock structure + * @val: Current value of the lock + * @node: Pointer to the MCS node of the lock holder + */ +static __always_inline bool __try_clear_tail(struct qspinlock *lock, + u32 val, + struct mcs_spinlock *node) +{ + return atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL); +} + +/* + * __mcs_lock_handoff - pass the MCS lock to the next waiter + * @node: Pointer to the MCS node of the lock holder + * @next: Pointer to the MCS node of the first waiter in the MCS queue + */ +static __always_inline void __mcs_lock_handoff(struct mcs_spinlock *node, + struct mcs_spinlock *next) +{ + arch_mcs_lock_handoff(&next->locked, 1); +} + +#define try_clear_tail __try_clear_tail +#define mcs_lock_handoff __mcs_lock_handoff + +#endif /* _GEN_PV_LOCK_SLOWPATH && _GEN_CNA_LOCK_SLOWPATH */ /** * queued_spin_lock_slowpath - acquire the queued spinlock @@ -470,7 +498,7 @@ pv_queue: WRITE_ONCE(prev->next, node); pv_wait_node(node, prev); - arch_mcs_spin_lock_contended(&node->locked); + arch_mcs_spin_wait(&node->locked); /* * While waiting for the MCS lock, the next pointer may have @@ -532,7 +560,7 @@ locked: * PENDING will make the uncontended transition fail. */ if ((val & _Q_TAIL_MASK) == tail) { - if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) + if (try_clear_tail(lock, val, node)) goto release; /* No contention */ } @@ -549,7 +577,7 @@ locked: if (!next) next = smp_cond_load_relaxed(&node->next, (VAL)); - arch_mcs_spin_unlock_contended(&next->locked); + mcs_lock_handoff(node, next); pv_kick_node(lock, next); release: @@ -560,6 +588,37 @@ release: } EXPORT_SYMBOL(queued_spin_lock_slowpath); +/* + * Generate the code for NUMA-aware spinlocks + */ +#if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS) +#define _GEN_CNA_LOCK_SLOWPATH + +#undef pv_init_node +#define pv_init_node cna_init_node + +#undef pv_wait_head_or_lock +#define pv_wait_head_or_lock cna_wait_head_or_lock + +#undef try_clear_tail +#define try_clear_tail cna_try_clear_tail + +#undef mcs_lock_handoff +#define mcs_lock_handoff cna_lock_handoff + +#undef queued_spin_lock_slowpath +/* + * defer defining queued_spin_lock_slowpath until after the include to + * avoid a name clash with the identically named field in pv_ops.lock + * (see cna_configure_spin_lock_slowpath()) + */ +#include "qspinlock_cna.h" +#define queued_spin_lock_slowpath __cna_queued_spin_lock_slowpath + +#include "qspinlock.c" + +#endif + /* * Generate the paravirt code for queued_spin_unlock_slowpath(). */ @@ -574,10 +633,23 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath); #undef pv_kick_node #undef pv_wait_head_or_lock +#undef try_clear_tail +#define try_clear_tail __try_clear_tail + +#undef mcs_lock_handoff +#define mcs_lock_handoff __mcs_lock_handoff + #undef queued_spin_lock_slowpath #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath #include "qspinlock_paravirt.h" #include "qspinlock.c" +bool nopvspin __initdata; +static __init int parse_nopvspin(char *arg) +{ + nopvspin = true; + return 0; +} +early_param("nopvspin", parse_nopvspin); #endif diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h new file mode 100644 index 000000000000..2c252660b8a4 --- /dev/null +++ b/kernel/locking/qspinlock_cna.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GEN_CNA_LOCK_SLOWPATH +#error "do not include this file" +#endif + +#include <linux/topology.h> +#include <linux/sched/clock.h> +#include <linux/sched/rt.h> +#include <linux/moduleparam.h> +#include <linux/random.h> + +/* + * Implement a NUMA-aware version of MCS (aka CNA, or compact NUMA-aware lock). + * + * In CNA, spinning threads are organized in two queues, a primary queue for + * threads running on the same NUMA node as the current lock holder, and a + * secondary queue for threads running on other nodes. Schematically, it + * looks like this: + * + * cna_node + * +----------+ +--------+ +--------+ + * |mcs:next | --> |mcs:next| --> ... |mcs:next| --> NULL [Primary queue] + * |mcs:locked| -. +--------+ +--------+ + * +----------+ | + * `----------------------. + * v + * +--------+ +--------+ + * |mcs:next| --> ... |mcs:next| [Secondary queue] + * +--------+ +--------+ + * ^ | + * `--------------------' + * + * N.B. locked := 1 if secondary queue is absent. Otherwise, it contains the + * encoded pointer to the tail of the secondary queue, which is organized as a + * circular list. + * + * After acquiring the MCS lock and before acquiring the spinlock, the MCS lock + * holder checks whether the next waiter in the primary queue (if exists) is + * running on the same NUMA node. If it is not, that waiter is detached from the + * main queue and moved into the tail of the secondary queue. This way, we + * gradually filter the primary queue, leaving only waiters running on the same + * preferred NUMA node. + * + * For more details, see https://arxiv.org/abs/1810.05600. + * + * Authors: Alex Kogan <alex.kogan@oracle.com> + * Dave Dice <dave.dice@oracle.com> + */ + +#define FLUSH_SECONDARY_QUEUE 1 + +#define CNA_PRIORITY_NODE 0xffff + +#define DEFAULT_LLC_ID 0xfffe + +struct cna_node { + struct mcs_spinlock mcs; + u16 llc_id; + u16 real_llc_id; + u32 encoded_tail; /* self */ + u64 start_time; +}; + +bool use_llc_spinlock = false; + +static ulong numa_spinlock_threshold_ns = 1000000; /* 1ms, by default */ +module_param(numa_spinlock_threshold_ns, ulong, 0644); + +static inline bool intra_node_threshold_reached(struct cna_node *cn) +{ + u64 current_time = local_clock(); + u64 threshold = cn->start_time + numa_spinlock_threshold_ns; + + return current_time > threshold; +} + +/* + * Controls the probability for enabling the ordering of the main queue + * when the secondary queue is empty. The chosen value reduces the amount + * of unnecessary shuffling of threads between the two waiting queues + * when the contention is low, while responding fast enough and enabling + * the shuffling when the contention is high. + */ +#define SHUFFLE_REDUCTION_PROB_ARG (7) + +/* Per-CPU pseudo-random number seed */ +static DEFINE_PER_CPU(u32, seed); + +/* + * Return false with probability 1 / 2^@num_bits. + * Intuitively, the larger @num_bits the less likely false is to be returned. + * @num_bits must be a number between 0 and 31. + */ +static bool probably(unsigned int num_bits) +{ + u32 s; + + s = this_cpu_read(seed); + s = next_pseudo_random32(s); + this_cpu_write(seed, s); + + return s & ((1 << num_bits) - 1); +} + +static void __init cna_init_nodes_per_cpu(unsigned int cpu) +{ + struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu); + int i; + + for (i = 0; i < MAX_NODES; i++) { + struct cna_node *cn = (struct cna_node *)grab_mcs_node(base, i); + + /* + *cpu_llc_id is not initialized when when this function is called + *so just set a fake llc id. + */ + cn->real_llc_id = DEFAULT_LLC_ID; + cn->encoded_tail = encode_tail(cpu, i); + /* + * make sure @encoded_tail is not confused with other valid + * values for @locked (0 or 1) + */ + WARN_ON(cn->encoded_tail <= 1); + } +} + +/* +* must be called after cpu_llc_id is initialized. +*/ +void cna_set_llc_id_per_cpu(unsigned int cpu) +{ + struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu); + int i; + + if (!use_llc_spinlock) + return; + + for (i = 0; i < MAX_NODES; i++) { + struct cna_node *cn = (struct cna_node *)grab_mcs_node(base, i); + + cn->real_llc_id = per_cpu(cpu_llc_id, cpu); + } +} + +static int __init cna_init_nodes(void) +{ + unsigned int cpu; + + /* + * this will break on 32bit architectures, so we restrict + * the use of CNA to 64bit only (see arch/x86/Kconfig) + */ + BUILD_BUG_ON(sizeof(struct cna_node) > sizeof(struct qnode)); + /* we store an ecoded tail word in the node's @locked field */ + BUILD_BUG_ON(sizeof(u32) > sizeof(unsigned int)); + + for_each_possible_cpu(cpu) + cna_init_nodes_per_cpu(cpu); + + return 0; +} + +static __always_inline void cna_init_node(struct mcs_spinlock *node) +{ + bool priority = !in_task() || irqs_disabled() || rt_task(current); + struct cna_node *cn = (struct cna_node *)node; + + cn->llc_id = priority ? CNA_PRIORITY_NODE : cn->real_llc_id; + cn->start_time = 0; +} + +/* + * cna_splice_head -- splice the entire secondary queue onto the head of the + * primary queue. + * + * Returns the new primary head node or NULL on failure. + */ +static struct mcs_spinlock * +cna_splice_head(struct qspinlock *lock, u32 val, + struct mcs_spinlock *node, struct mcs_spinlock *next) +{ + struct mcs_spinlock *head_2nd, *tail_2nd; + u32 new; + + tail_2nd = decode_tail(node->locked); + head_2nd = tail_2nd->next; + + if (next) { + /* + * If the primary queue is not empty, the primary tail doesn't + * need to change and we can simply link the secondary tail to + * the old primary head. + */ + tail_2nd->next = next; + } else { + /* + * When the primary queue is empty, the secondary tail becomes + * the primary tail. + */ + + /* + * Speculatively break the secondary queue's circular link such + * that when the secondary tail becomes the primary tail it all + * works out. + */ + tail_2nd->next = NULL; + + /* + * tail_2nd->next = NULL; old = xchg_tail(lock, tail); + * prev = decode_tail(old); + * try_cmpxchg_release(...); WRITE_ONCE(prev->next, node); + * + * If the following cmpxchg() succeeds, our stores will not + * collide. + */ + new = ((struct cna_node *)tail_2nd)->encoded_tail | + _Q_LOCKED_VAL; + if (!atomic_try_cmpxchg_release(&lock->val, &val, new)) { + /* Restore the secondary queue's circular link. */ + tail_2nd->next = head_2nd; + return NULL; + } + } + + /* The primary queue head now is what was the secondary queue head. */ + return head_2nd; +} + +static inline bool cna_try_clear_tail(struct qspinlock *lock, u32 val, + struct mcs_spinlock *node) +{ + /* + * We're here because the primary queue is empty; check the secondary + * queue for remote waiters. + */ + if (node->locked > 1) { + struct mcs_spinlock *next; + + /* + * When there are waiters on the secondary queue, try to move + * them back onto the primary queue and let them rip. + */ + next = cna_splice_head(lock, val, node, NULL); + if (next) { + arch_mcs_lock_handoff(&next->locked, 1); + return true; + } + + return false; + } + + /* Both queues are empty. Do what MCS does. */ + return __try_clear_tail(lock, val, node); +} + +/* + * cna_splice_next -- splice the next node from the primary queue onto + * the secondary queue. + */ +static void cna_splice_next(struct mcs_spinlock *node, + struct mcs_spinlock *next, + struct mcs_spinlock *nnext) +{ + /* remove 'next' from the main queue */ + node->next = nnext; + + /* stick `next` on the secondary queue tail */ + if (node->locked <= 1) { /* if secondary queue is empty */ + struct cna_node *cn = (struct cna_node *)node; + + /* create secondary queue */ + next->next = next; + + cn->start_time = local_clock(); + /* secondary queue is not empty iff start_time != 0 */ + WARN_ON(!cn->start_time); + } else { + /* add to the tail of the secondary queue */ + struct mcs_spinlock *tail_2nd = decode_tail(node->locked); + struct mcs_spinlock *head_2nd = tail_2nd->next; + + tail_2nd->next = next; + next->next = head_2nd; + } + + node->locked = ((struct cna_node *)next)->encoded_tail; +} + +/* + * cna_order_queue - check whether the next waiter in the main queue is on + * the same NUMA node as the lock holder; if not, and it has a waiter behind + * it in the main queue, move the former onto the secondary queue. + * Returns 1 if the next waiter runs on the same NUMA node; 0 otherwise. + */ +static int cna_order_queue(struct mcs_spinlock *node) +{ + struct mcs_spinlock *next = READ_ONCE(node->next); + struct cna_node *cn = (struct cna_node *)node; + int llc_id, next_llc_id; + + if (!next) + return 0; + + llc_id = cn->llc_id; + next_llc_id = ((struct cna_node *)next)->llc_id; + + if (next_llc_id != llc_id && next_llc_id != CNA_PRIORITY_NODE) { + struct mcs_spinlock *nnext = READ_ONCE(next->next); + + if (nnext) + cna_splice_next(node, next, nnext); + + return 0; + } + return 1; +} + +#define LOCK_IS_BUSY(lock) (atomic_read(&(lock)->val) & _Q_LOCKED_PENDING_MASK) + +/* Abuse the pv_wait_head_or_lock() hook to get some work done */ +static __always_inline u32 cna_wait_head_or_lock(struct qspinlock *lock, + struct mcs_spinlock *node) +{ + struct cna_node *cn = (struct cna_node *)node; + + if (node->locked <= 1 && probably(SHUFFLE_REDUCTION_PROB_ARG)) { + /* + * When the secondary queue is empty, skip the calls to + * cna_order_queue() below with high probability. This optimization + * reduces the overhead of unnecessary shuffling of threads + * between waiting queues when the lock is only lightly contended. + */ + return 0; + } + + if (!cn->start_time || !intra_node_threshold_reached(cn)) { + + /* + * We are at the head of the wait queue, no need to use + * the fake NUMA node ID. + */ + if (cn->llc_id == CNA_PRIORITY_NODE) + cn->llc_id = cn->real_llc_id; + + /* + * Try and put the time otherwise spent spin waiting on + * _Q_LOCKED_PENDING_MASK to use by sorting our lists. + */ + while (LOCK_IS_BUSY(lock) && !cna_order_queue(node)) + cpu_relax(); + } else { + cn->start_time = FLUSH_SECONDARY_QUEUE; + } + + return 0; /* we lied; we didn't wait, go do so now */ +} + +static inline void cna_lock_handoff(struct mcs_spinlock *node, + struct mcs_spinlock *next) +{ + struct cna_node *cn = (struct cna_node *)node; + u32 val = 1; + + + if (cn->start_time != FLUSH_SECONDARY_QUEUE) { + if (node->locked > 1) { + val = node->locked; /* preseve secondary queue */ + + /* + * We have a local waiter, either real or fake one; + * reload @next in case it was changed by cna_order_queue(). + */ + next = node->next; + + /* + * Pass over NUMA node id of primary queue, to maintain the + * preference even if the next waiter is on a different node. + */ + ((struct cna_node *)next)->llc_id = cn->llc_id; + + ((struct cna_node *)next)->start_time = cn->start_time; + } + } else { + /* + * We decided to flush the secondary queue; + * this can only happen if that queue is not empty. + */ + + WARN_ON(node->locked <= 1); + /* + * Splice the secondary queue onto the primary queue and pass the lock + * to the longest waiting remote waiter. + */ + + next = cna_splice_head(NULL, 0, node, next); + } + + arch_mcs_lock_handoff(&next->locked, val); +} + +/* + * Constant (boot-param configurable) flag selecting the LLC-aware variant + * of spinlock. Possible values: -1 (off) / 0 (auto, default) / 1 (on). + */ +static int llc_spinlock_flag = -1; + +static int __init llc_spinlock_setup(char *str) +{ + if (!strcmp(str, "auto")) { + llc_spinlock_flag = 0; + return 1; + } else if (!strcmp(str, "on")) { + llc_spinlock_flag = 1; + return 1; + } else if (!strcmp(str, "off")) { + llc_spinlock_flag = -1; + return 1; + } + + return 0; +} +__setup("llc_spinlock=", llc_spinlock_setup); + +void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +/* + * Switch to the llc-friendly slow path for spinlocks when we have + * multiple llc nodes in native environment, unless the user has + * overridden this default behavior by setting the llc_spinlock flag. + */ +void __init cna_configure_spin_lock_slowpath(void) +{ + if (llc_spinlock_flag < 0) + return; + + if (llc_spinlock_flag == 0 && + pv_ops.lock.queued_spin_lock_slowpath != + native_queued_spin_lock_slowpath) + return; + + cna_init_nodes(); + + use_llc_spinlock = true; + + pv_ops.lock.queued_spin_lock_slowpath = __cna_queued_spin_lock_slowpath; + + pr_info("Enabling CNA spinlock\n"); +} + diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index e84d21aa0722..619d80fd5ea8 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -368,7 +368,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) * * Matches with smp_store_mb() and cmpxchg() in pv_wait_node() * - * The write to next->locked in arch_mcs_spin_unlock_contended() + * The write to next->locked in arch_mcs_lock_handoff() * must be ordered before the read of pn->state in the cmpxchg() * below for the code to work correctly. To guarantee full ordering * irrespective of the success or failure of the cmpxchg(), diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2874bf556162..734698aec5f9 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1718,8 +1718,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, * possible because it belongs to the pi_state which is about to be freed * and it is not longer visible to other tasks. */ -void rt_mutex_proxy_unlock(struct rt_mutex *lock, - struct task_struct *proxy_owner) +void rt_mutex_proxy_unlock(struct rt_mutex *lock) { debug_rt_mutex_proxy_unlock(lock); rt_mutex_set_owner(lock, NULL); diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index d1d62f942be2..ca6fb489007b 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -133,8 +133,7 @@ enum rtmutex_chainwalk { extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); -extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, - struct task_struct *proxy_owner); +extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index baafa1dd9fcc..5d54ff3179b8 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1348,6 +1348,18 @@ inline void __down_read(struct rw_semaphore *sem) } } +static inline int __down_read_interruptible(struct rw_semaphore *sem) +{ + if (!rwsem_read_trylock(sem)) { + if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE))) + return -EINTR; + DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); + } else { + rwsem_set_reader_owned(sem); + } + return 0; +} + static inline int __down_read_killable(struct rw_semaphore *sem) { if (!rwsem_read_trylock(sem)) { @@ -1498,6 +1510,20 @@ void __sched down_read(struct rw_semaphore *sem) } EXPORT_SYMBOL(down_read); +int __sched down_read_interruptible(struct rw_semaphore *sem) +{ + might_sleep(); + rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); + + if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { + rwsem_release(&sem->dep_map, 1, _RET_IP_); + return -EINTR; + } + + return 0; +} +EXPORT_SYMBOL(down_read_interruptible); + int __sched down_read_killable(struct rw_semaphore *sem) { might_sleep(); @@ -1608,6 +1634,20 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) } EXPORT_SYMBOL(down_read_nested); +int down_read_killable_nested(struct rw_semaphore *sem, int subclass) +{ + might_sleep(); + rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); + + if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { + rwsem_release(&sem->dep_map, 1, _RET_IP_); + return -EINTR; + } + + return 0; +} +EXPORT_SYMBOL(down_read_killable_nested); + void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) { might_sleep(); diff --git a/kernel/module.c b/kernel/module.c index b7c4cfd3b718..13471a694d43 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -88,8 +88,9 @@ EXPORT_SYMBOL_GPL(module_mutex); static LIST_HEAD(modules); /* Work queue for freeing init sections in success case */ -static struct work_struct init_free_wq; -static struct llist_head init_free_list; +static void do_free_init(struct work_struct *w); +static DECLARE_WORK(init_free_wq, do_free_init); +static LLIST_HEAD(init_free_list); #ifdef CONFIG_MODULES_TREE_LOOKUP @@ -1365,17 +1366,39 @@ static inline int check_modstruct_version(const struct load_info *info, static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) { - int l1, l2; + int l1, l2, l3, l4, l5, l6; if (has_crcs) { l1 = strcspn(amagic, " "); l2 = strcspn(bmagic, " "); - if (l1 != l2) + l3 = strcspn(amagic, "-"); + l4 = strcspn(bmagic, "-"); + + if (l3 > l1 || l4 > l2) + goto check_all; + + if (l3 != l4 || memcmp(amagic, bmagic, l3)) return false; - if (memcmp(amagic, bmagic, l1)) + + amagic += l3; + bmagic += l3; + + l5 = strcspn(amagic, ". "); + l6 = strcspn(bmagic, ". "); + + if (l5 != l6) + goto check_all; + + if (memcmp(amagic, bmagic, l5)) return false; - amagic += l1; - bmagic += l2; + + l5 = strcspn(amagic, " "); + l6 = strcspn(bmagic, " "); + + amagic += l5; + bmagic += l6; } + +check_all: return strcmp(amagic, bmagic) == 0; } #else @@ -1514,8 +1537,7 @@ static inline bool sect_empty(const Elf_Shdr *sect) } struct module_sect_attr { - struct module_attribute mattr; - char *name; + struct bin_attribute battr; unsigned long address; }; @@ -1525,13 +1547,34 @@ struct module_sect_attrs { struct module_sect_attr attrs[0]; }; -static ssize_t module_sect_show(struct module_attribute *mattr, - struct module_kobject *mk, char *buf) +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) +static ssize_t module_sect_read(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, + char *buf, loff_t pos, size_t count) { struct module_sect_attr *sattr = - container_of(mattr, struct module_sect_attr, mattr); - return sprintf(buf, "0x%px\n", kptr_restrict < 2 ? - (void *)sattr->address : NULL); + container_of(battr, struct module_sect_attr, battr); + char bounce[MODULE_SECT_READ_SIZE + 1]; + size_t wrote; + + if (pos != 0) + return -EINVAL; + + /* + * Since we're a binary read handler, we must account for the + * trailing NUL byte that sprintf will write: if "buf" is + * too small to hold the NUL, or the NUL is exactly the last + * byte, the read will look like it got truncated by one byte. + * Since there is no way to ask sprintf nicely to not write + * the NUL, we have to use a bounce buffer. + */ + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", + kallsyms_show_value(file->f_cred) + ? (void *)sattr->address : NULL); + count = min(count, wrote); + memcpy(buf, bounce, count); + + return count; } static void free_sect_attrs(struct module_sect_attrs *sect_attrs) @@ -1539,7 +1582,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs) unsigned int section; for (section = 0; section < sect_attrs->nsections; section++) - kfree(sect_attrs->attrs[section].name); + kfree(sect_attrs->attrs[section].battr.attr.name); kfree(sect_attrs); } @@ -1548,42 +1591,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info) unsigned int nloaded = 0, i, size[2]; struct module_sect_attrs *sect_attrs; struct module_sect_attr *sattr; - struct attribute **gattr; + struct bin_attribute **gattr; /* Count loaded sections and allocate structures */ for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i])) nloaded++; size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), - sizeof(sect_attrs->grp.attrs[0])); - size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); + sizeof(sect_attrs->grp.bin_attrs[0])); + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); if (sect_attrs == NULL) return; /* Setup section attributes. */ sect_attrs->grp.name = "sections"; - sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; sect_attrs->nsections = 0; sattr = §_attrs->attrs[0]; - gattr = §_attrs->grp.attrs[0]; + gattr = §_attrs->grp.bin_attrs[0]; for (i = 0; i < info->hdr->e_shnum; i++) { Elf_Shdr *sec = &info->sechdrs[i]; if (sect_empty(sec)) continue; + sysfs_bin_attr_init(&sattr->battr); sattr->address = sec->sh_addr; - sattr->name = kstrdup(info->secstrings + sec->sh_name, - GFP_KERNEL); - if (sattr->name == NULL) + sattr->battr.attr.name = + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); + if (sattr->battr.attr.name == NULL) goto out; sect_attrs->nsections++; - sysfs_attr_init(&sattr->mattr.attr); - sattr->mattr.show = module_sect_show; - sattr->mattr.store = NULL; - sattr->mattr.attr.name = sattr->name; - sattr->mattr.attr.mode = S_IRUSR; - *(gattr++) = &(sattr++)->mattr.attr; + sattr->battr.read = module_sect_read; + sattr->battr.size = MODULE_SECT_READ_SIZE; + sattr->battr.attr.mode = 0400; + *(gattr++) = &(sattr++)->battr; } *gattr = NULL; @@ -1673,7 +1715,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info) continue; if (info->sechdrs[i].sh_type == SHT_NOTE) { sysfs_bin_attr_init(nattr); - nattr->attr.name = mod->sect_attrs->attrs[loaded].name; + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; nattr->attr.mode = S_IRUGO; nattr->size = info->sechdrs[i].sh_size; nattr->private = (void *) info->sechdrs[i].sh_addr; @@ -1850,7 +1892,6 @@ static int mod_sysfs_init(struct module *mod) if (err) mod_kobject_put(mod); - /* delay uevent until full sysfs population */ out: return err; } @@ -1887,7 +1928,6 @@ static int mod_sysfs_setup(struct module *mod, add_sect_attrs(mod, info); add_notes_attrs(mod, info); - kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); return 0; out_unreg_modinfo_attrs: @@ -2379,6 +2419,21 @@ static int verify_exported_symbols(struct module *mod) return 0; } +static bool ignore_undef_symbol(Elf_Half emachine, const char *name) +{ + /* + * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as + * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. + * i386 has a similar problem but may not deserve a fix. + * + * If we ever have to ignore many symbols, consider refactoring the code to + * only warn if referenced by a relocation. + */ + if (emachine == EM_386 || emachine == EM_X86_64) + return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); + return false; +} + /* Change all symbols so that st_value encodes the pointer directly. */ static int simplify_symbols(struct module *mod, const struct load_info *info) { @@ -2424,8 +2479,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) break; } - /* Ok if weak. */ - if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) + /* Ok if weak or ignored. */ + if (!ksym && + (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || + ignore_undef_symbol(info->hdr->e_machine, name))) break; ret = PTR_ERR(ksym) ?: -ENOENT; @@ -3226,10 +3283,19 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) err = try_to_force_load(mod, "bad vermagic"); if (err) return err; - } else if (!same_magic(modmagic, vermagic, info->index.vers)) { - pr_err("%s: version magic '%s' should be '%s'\n", - info->name, modmagic, vermagic); - return -ENOEXEC; + } else { + if ((strstr(mod->name, "patch") || strstr(mod->name, "hotfix")) && + strcmp(modmagic, vermagic) != 0) { + printk(KERN_ERR "%s: version magic '%s' should be '%s'\n", + mod->name, modmagic, vermagic); + return -ENOEXEC; + } + + if (!same_magic(modmagic, vermagic, info->index.vers)) { + printk(KERN_ERR "%s: version magic '%s' should be compatible with '%s'\n", + mod->name, modmagic, vermagic); + return -ENOEXEC; + } } if (!get_modinfo(info, "intree")) { @@ -3656,14 +3722,6 @@ static void do_free_init(struct work_struct *w) } } -static int __init modules_wq_init(void) -{ - INIT_WORK(&init_free_wq, do_free_init); - init_llist_head(&init_free_list); - return 0; -} -module_init(modules_wq_init); - /* * This is where the real work happens. * @@ -3708,6 +3766,9 @@ static noinline int do_init_module(struct module *mod) blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); + /* Delay uevent until module has finished its init routine */ + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + /* * We need to finish all async code before the module init sequence * is done. This has potential to deadlock. For example, a newly @@ -4062,6 +4123,7 @@ static int load_module(struct load_info *info, const char __user *uargs, MODULE_STATE_GOING, mod); klp_module_going(mod); bug_cleanup: + mod->state = MODULE_STATE_GOING; /* module_bug_cleanup needs module_mutex protection */ mutex_lock(&module_mutex); module_bug_cleanup(mod); @@ -4512,7 +4574,7 @@ static int modules_open(struct inode *inode, struct file *file) if (!err) { struct seq_file *m = file->private_data; - m->private = kallsyms_show_value() ? NULL : (void *)8ul; + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; } return err; diff --git a/kernel/notifier.c b/kernel/notifier.c index 157d7c29f720..f6d5ffe4e72e 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -23,7 +23,10 @@ static int notifier_chain_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { - WARN_ONCE(((*nl) == n), "double register detected"); + if (unlikely((*nl) == n)) { + WARN(1, "double register detected"); + return 0; + } if (n->priority > (*nl)->priority) break; nl = &((*nl)->next); diff --git a/kernel/padata.c b/kernel/padata.c index c4b774331e46..92a4867e8adc 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -782,7 +782,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) struct padata_instance *pinst; int ret; - pinst = hlist_entry_safe(node, struct padata_instance, node); + pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); if (!pinst_has_cpu(pinst, cpu)) return 0; @@ -797,7 +797,7 @@ static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) struct padata_instance *pinst; int ret; - pinst = hlist_entry_safe(node, struct padata_instance, node); + pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); if (!pinst_has_cpu(pinst, cpu)) return 0; @@ -813,8 +813,9 @@ static enum cpuhp_state hp_online; static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU - cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node); - cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); + cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, + &pinst->cpu_dead_node); + cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); #endif WARN_ON(!list_empty(&pinst->pslist)); @@ -1020,9 +1021,10 @@ static struct padata_instance *padata_alloc(const char *name, mutex_init(&pinst->lock); #ifdef CONFIG_HOTPLUG_CPU - cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); + cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, + &pinst->cpu_online_node); cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, - &pinst->node); + &pinst->cpu_dead_node); #endif put_online_cpus(); diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 0a9326f5f421..8dac32bd9089 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -74,7 +74,7 @@ static int __init em_debug_init(void) return 0; } -core_initcall(em_debug_init); +fs_initcall(em_debug_init); #else /* CONFIG_DEBUG_FS */ static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {} #endif diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 3c0a5a8170b0..69c4cd472def 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -839,17 +839,6 @@ static int software_resume(void) /* Check if the device is there */ swsusp_resume_device = name_to_dev_t(resume_file); - - /* - * name_to_dev_t is ineffective to verify parition if resume_file is in - * integer format. (e.g. major:minor) - */ - if (isdigit(resume_file[0]) && resume_wait) { - int partno; - while (!get_gendisk(swsusp_resume_device, &partno)) - msleep(10); - } - if (!swsusp_resume_device) { /* * Some device discovery might still be in progress; we need @@ -898,6 +887,13 @@ static int software_resume(void) error = freeze_processes(); if (error) goto Close_Finish; + + error = freeze_kernel_threads(); + if (error) { + thaw_processes(); + goto Close_Finish; + } + error = load_image_and_restore(); thaw_processes(); Finish: diff --git a/kernel/power/swap.c b/kernel/power/swap.c index ca0fcb5ced71..0516c422206d 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -489,10 +489,10 @@ static int swap_writer_finish(struct swap_map_handle *handle, unsigned int flags, int error) { if (!error) { - flush_swap_writer(handle); pr_info("S"); error = mark_swapfiles(handle, flags); pr_cont("|\n"); + flush_swap_writer(handle); } if (error) diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index c8e6ab689d42..b2b0f526f249 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -23,6 +23,9 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args); void __printk_safe_enter(void); void __printk_safe_exit(void); +void printk_safe_init(void); +bool printk_percpu_data_ready(void); + #define printk_safe_enter_irqsave(flags) \ do { \ local_irq_save(flags); \ @@ -64,4 +67,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } #define printk_safe_enter_irq() local_irq_disable() #define printk_safe_exit_irq() local_irq_enable() +static inline void printk_safe_init(void) { } +static inline bool printk_percpu_data_ready(void) { return false; } #endif /* CONFIG_PRINTK */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index c0a5b56aea4e..5569ef6bc183 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -460,6 +460,18 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; +/* + * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before + * per_cpu_areas are initialised. This variable is set to true when + * it's safe to access per-CPU data. + */ +static bool __printk_percpu_data_ready __read_mostly; + +bool printk_percpu_data_ready(void) +{ + return __printk_percpu_data_ready; +} + /* Return log buffer address */ char *log_buf_addr_get(void) { @@ -1146,12 +1158,28 @@ static void __init log_buf_add_cpu(void) static inline void log_buf_add_cpu(void) {} #endif /* CONFIG_SMP */ +static void __init set_percpu_data_ready(void) +{ + printk_safe_init(); + /* Make sure we set this flag only after printk_safe() init is done */ + barrier(); + __printk_percpu_data_ready = true; +} + void __init setup_log_buf(int early) { unsigned long flags; char *new_log_buf; unsigned int free; + /* + * Some archs call setup_log_buf() multiple times - first is very + * early, e.g. from setup_arch(), and second - when percpu_areas + * are initialised. + */ + if (!early) + set_percpu_data_ready(); + if (log_buf != __log_buf) return; @@ -2165,6 +2193,9 @@ static int __init console_setup(char *str) char *s, *options, *brl_options = NULL; int idx; + if (str[0] == 0) + return 1; + if (_braille_console_setup(&str, &brl_options)) return 1; @@ -2966,6 +2997,9 @@ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { void wake_up_klogd(void) { + if (!printk_percpu_data_ready()) + return; + preempt_disable(); if (waitqueue_active(&log_wait)) { this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); @@ -2976,6 +3010,9 @@ void wake_up_klogd(void) void defer_console_output(void) { + if (!printk_percpu_data_ready()) + return; + preempt_disable(); __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index b4045e782743..6cfc5a00c67d 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -27,7 +27,6 @@ * There are situations when we want to make sure that all buffers * were handled or when IRQs are blocked. */ -static int printk_safe_irq_ready __read_mostly; #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ sizeof(atomic_t) - \ @@ -44,6 +43,8 @@ struct printk_safe_seq_buf { static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq); static DEFINE_PER_CPU(int, printk_context); +static DEFINE_RAW_SPINLOCK(safe_read_lock); + #ifdef CONFIG_PRINTK_NMI static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); #endif @@ -51,7 +52,7 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); /* Get flushed in a more safe context. */ static void queue_flush_work(struct printk_safe_seq_buf *s) { - if (printk_safe_irq_ready) + if (printk_percpu_data_ready()) irq_work_queue(&s->work); } @@ -179,8 +180,6 @@ static void report_message_lost(struct printk_safe_seq_buf *s) */ static void __printk_safe_flush(struct irq_work *work) { - static raw_spinlock_t read_lock = - __RAW_SPIN_LOCK_INITIALIZER(read_lock); struct printk_safe_seq_buf *s = container_of(work, struct printk_safe_seq_buf, work); unsigned long flags; @@ -194,7 +193,7 @@ static void __printk_safe_flush(struct irq_work *work) * different CPUs. This is especially important when printing * a backtrace. */ - raw_spin_lock_irqsave(&read_lock, flags); + raw_spin_lock_irqsave(&safe_read_lock, flags); i = 0; more: @@ -231,7 +230,7 @@ more: out: report_message_lost(s); - raw_spin_unlock_irqrestore(&read_lock, flags); + raw_spin_unlock_irqrestore(&safe_read_lock, flags); } /** @@ -277,6 +276,14 @@ void printk_safe_flush_on_panic(void) raw_spin_lock_init(&logbuf_lock); } + if (raw_spin_is_locked(&safe_read_lock)) { + if (num_online_cpus() > 1) + return; + + debug_locks_off(); + raw_spin_lock_init(&safe_read_lock); + } + printk_safe_flush(); } @@ -402,14 +409,6 @@ void __init printk_safe_init(void) #endif } - /* - * In the highly unlikely event that a NMI were to trigger at - * this moment. Make sure IRQ work is set up before this - * variable is set. - */ - barrier(); - printk_safe_irq_ready = 1; - /* Flush pending messages that did not have scheduled IRQ works. */ printk_safe_flush(); } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 36943f38665b..85535465f2e6 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -264,17 +264,11 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) return ret; } -static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns, - unsigned int mode) +static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode) { - int ret; - if (mode & PTRACE_MODE_NOAUDIT) - ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT); - else - ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE); - - return ret == 0; + return ns_capable_noaudit(ns, CAP_SYS_PTRACE); + return ns_capable(ns, CAP_SYS_PTRACE); } /* Returns 0 on success, -errno on denial. */ @@ -326,7 +320,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) gid_eq(caller_gid, tcred->sgid) && gid_eq(caller_gid, tcred->gid)) goto ok; - if (ptrace_has_cap(cred, tcred->user_ns, mode)) + if (ptrace_has_cap(tcred->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; @@ -345,7 +339,7 @@ ok: mm = task->mm; if (mm && ((get_dumpable(mm) != SUID_DUMP_USER) && - !ptrace_has_cap(cred, mm->user_ns, mode))) + !ptrace_has_cap(mm->user_ns, mode))) return -EPERM; return security_ptrace_access_check(task, mode); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 62e59596a30a..4dfa9dd47223 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -579,7 +579,6 @@ static void rcu_eqs_enter(bool user) trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); rdp = this_cpu_ptr(&rcu_data); - do_nocb_deferred_wakeup(rdp); rcu_prepare_for_idle(); rcu_preempt_deferred_qs(current); WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ @@ -618,7 +617,14 @@ void rcu_idle_enter(void) */ void rcu_user_enter(void) { + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + lockdep_assert_irqs_disabled(); + + instrumentation_begin(); + do_nocb_deferred_wakeup(rdp); + instrumentation_end(); + rcu_eqs_enter(true); } #endif /* CONFIG_NO_HZ_FULL */ @@ -3157,7 +3163,6 @@ void rcu_cpu_starting(unsigned int cpu) smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ } -#ifdef CONFIG_HOTPLUG_CPU /* * The outgoing function has no further need of RCU, so remove it from * the rcu_node tree's ->qsmaskinitnext bit masks. @@ -3197,6 +3202,7 @@ void rcu_report_dead(unsigned int cpu) per_cpu(rcu_cpu_started, cpu) = 0; } +#ifdef CONFIG_HOTPLUG_CPU /* * The outgoing CPU has just passed through the dying-idle state, and we * are being invoked from the CPU that was IPIed to continue the offline diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index f7118842a2b8..a71a4a272515 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2190,6 +2190,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) do_nocb_deferred_wakeup_common(rdp); } +void rcu_nocb_flush_deferred_wakeup(void) +{ + do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data)); +} + void __init rcu_init_nohz(void) { int cpu; diff --git a/kernel/reboot.c b/kernel/reboot.c index 509029cbc820..01b6a32566ba 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -618,22 +618,22 @@ static int __init reboot_setup(char *str) break; case 's': - { - int rc; - - if (isdigit(*(str+1))) { - rc = kstrtoint(str+1, 0, &reboot_cpu); - if (rc) - return rc; - } else if (str[1] == 'm' && str[2] == 'p' && - isdigit(*(str+3))) { - rc = kstrtoint(str+3, 0, &reboot_cpu); - if (rc) - return rc; - } else + if (isdigit(*(str+1))) + reboot_cpu = simple_strtoul(str+1, NULL, 0); + else if (str[1] == 'm' && str[2] == 'p' && + isdigit(*(str+3))) + reboot_cpu = simple_strtoul(str+3, NULL, 0); + else *mode = REBOOT_SOFT; + if (reboot_cpu >= num_possible_cpus()) { + pr_err("Ignoring the CPU number in reboot= option. " + "CPU %d exceeds possible cpu number %d\n", + reboot_cpu, num_possible_cpus()); + reboot_cpu = 0; + break; + } break; - } + case 'g': *mode = REBOOT_GPIO; break; diff --git a/kernel/relay.c b/kernel/relay.c index ade14fb7ce2e..d3940becf2fc 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -197,6 +197,7 @@ free_buf: static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); + free_percpu(chan->buf); kfree(chan); } @@ -581,6 +582,11 @@ struct rchan *relay_open(const char *base_filename, return NULL; chan->buf = alloc_percpu(struct rchan_buf *); + if (!chan->buf) { + kfree(chan); + return NULL; + } + chan->version = RELAYFS_CHANNEL_VERSION; chan->n_subbufs = n_subbufs; chan->subbuf_size = subbuf_size; diff --git a/kernel/resource.c b/kernel/resource.c index 76036a41143b..841737bbda9e 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1126,6 +1126,7 @@ struct resource * __request_region(struct resource *parent, { DECLARE_WAITQUEUE(wait, current); struct resource *res = alloc_resource(GFP_KERNEL); + struct resource *orig_parent = parent; if (!res) return NULL; @@ -1176,6 +1177,10 @@ struct resource * __request_region(struct resource *parent, break; } write_unlock(&resource_lock); + + if (res && orig_parent == &iomem_resource) + revoke_devmem(res); + return res; } EXPORT_SYMBOL(__request_region); diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 2067080bb235..4f439cd95eec 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -5,7 +5,7 @@ #include <linux/nospec.h> #include "sched.h" -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; +unsigned int __read_mostly sysctl_sched_autogroup_enabled = 0; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ea09ebacce45..af7d04951e8d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -19,6 +19,7 @@ #include "../smpboot.h" #include "pelt.h" +#include <linux/sli.h> #define CREATE_TRACE_POINTS #include <trace/events/sched.h> @@ -36,7 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) +#ifdef CONFIG_SCHED_DEBUG /* * Debugging: various feature bits * @@ -254,8 +255,9 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) static void __hrtick_restart(struct rq *rq) { struct hrtimer *timer = &rq->hrtick_timer; + ktime_t time = rq->hrtick_time; - hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); + hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); } /* @@ -280,7 +282,6 @@ static void __hrtick_start(void *arg) void hrtick_start(struct rq *rq, u64 delay) { struct hrtimer *timer = &rq->hrtick_timer; - ktime_t time; s64 delta; /* @@ -288,9 +289,7 @@ void hrtick_start(struct rq *rq, u64 delay) * doesn't make sense and can cause timer DoS. */ delta = max_t(s64, delay, 10000LL); - time = ktime_add_ns(timer->base->get_time(), delta); - - hrtimer_set_expires(timer, time); + rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); if (rq == this_rq()) { __hrtick_restart(rq); @@ -794,6 +793,26 @@ unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; /* All clamps are required to be less or equal than these values */ static struct uclamp_se uclamp_default[UCLAMP_CNT]; +/* + * This static key is used to reduce the uclamp overhead in the fast path. It + * primarily disables the call to uclamp_rq_{inc, dec}() in + * enqueue/dequeue_task(). + * + * This allows users to continue to enable uclamp in their kernel config with + * minimum uclamp overhead in the fast path. + * + * As soon as userspace modifies any of the uclamp knobs, the static key is + * enabled, since we have an actual users that make use of uclamp + * functionality. + * + * The knobs that would enable this static key are: + * + * * A task modifying its uclamp value with sched_setattr(). + * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. + * * An admin modifying the cgroup cpu.uclamp.{min, max} + */ +DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); + /* Integer rounded range for each bucket */ #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) @@ -990,10 +1009,38 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, lockdep_assert_held(&rq->lock); + /* + * If sched_uclamp_used was enabled after task @p was enqueued, + * we could end up with unbalanced call to uclamp_rq_dec_id(). + * + * In this case the uc_se->active flag should be false since no uclamp + * accounting was performed at enqueue time and we can just return + * here. + * + * Need to be careful of the following enqeueue/dequeue ordering + * problem too + * + * enqueue(taskA) + * // sched_uclamp_used gets enabled + * enqueue(taskB) + * dequeue(taskA) + * // Must not decrement bukcet->tasks here + * dequeue(taskB) + * + * where we could end up with stale data in uc_se and + * bucket[uc_se->bucket_id]. + * + * The following check here eliminates the possibility of such race. + */ + if (unlikely(!uc_se->active)) + return; + bucket = &uc_rq->bucket[uc_se->bucket_id]; + SCHED_WARN_ON(!bucket->tasks); if (likely(bucket->tasks)) bucket->tasks--; + uc_se->active = false; /* @@ -1021,6 +1068,15 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { enum uclamp_id clamp_id; + /* + * Avoid any overhead until uclamp is actually used by the userspace. + * + * The condition is constructed such that a NOP is generated when + * sched_uclamp_used is disabled. + */ + if (!static_branch_unlikely(&sched_uclamp_used)) + return; + if (unlikely(!p->sched_class->uclamp_enabled)) return; @@ -1036,6 +1092,15 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { enum uclamp_id clamp_id; + /* + * Avoid any overhead until uclamp is actually used by the userspace. + * + * The condition is constructed such that a NOP is generated when + * sched_uclamp_used is disabled. + */ + if (!static_branch_unlikely(&sched_uclamp_used)) + return; + if (unlikely(!p->sched_class->uclamp_enabled)) return; @@ -1145,8 +1210,10 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, update_root_tg = true; } - if (update_root_tg) + if (update_root_tg) { + static_branch_enable(&sched_uclamp_used); uclamp_update_root_tg(); + } /* * We update all RUNNABLE tasks only when task groups are in use. @@ -1181,6 +1248,15 @@ static int uclamp_validate(struct task_struct *p, if (upper_bound > SCHED_CAPACITY_SCALE) return -EINVAL; + /* + * We have valid uclamp attributes; make sure uclamp is enabled. + * + * We need to do that here, because enabling static branches is a + * blocking operation which obviously cannot be done while holding + * scheduler locks. + */ + static_branch_enable(&sched_uclamp_used); + return 0; } @@ -1233,16 +1309,25 @@ static void uclamp_fork(struct task_struct *p) return; for_each_clamp_id(clamp_id) { - unsigned int clamp_value = uclamp_none(clamp_id); - - /* By default, RT tasks always get 100% boost */ - if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) - clamp_value = uclamp_none(UCLAMP_MAX); - - uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false); + uclamp_se_set(&p->uclamp_req[clamp_id], + uclamp_none(clamp_id), false); } } +static void __init init_uclamp_rq(struct rq *rq) +{ + enum uclamp_id clamp_id; + struct uclamp_rq *uc_rq = rq->uclamp; + + for_each_clamp_id(clamp_id) { + uc_rq[clamp_id] = (struct uclamp_rq) { + .value = uclamp_none(clamp_id) + }; + } + + rq->uclamp_flags = 0; +} + static void __init init_uclamp(void) { struct uclamp_se uc_max = {}; @@ -1251,11 +1336,8 @@ static void __init init_uclamp(void) mutex_init(&uclamp_mutex); - for_each_possible_cpu(cpu) { - memset(&cpu_rq(cpu)->uclamp, 0, - sizeof(struct uclamp_rq)*UCLAMP_CNT); - cpu_rq(cpu)->uclamp_flags = 0; - } + for_each_possible_cpu(cpu) + init_uclamp_rq(cpu_rq(cpu)); for_each_clamp_id(clamp_id) { uclamp_se_set(&init_task.uclamp_req[clamp_id], @@ -1654,7 +1736,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } - if (cpumask_equal(p->cpus_ptr, new_mask)) + if (cpumask_equal(&p->cpus_mask, new_mask)) goto out; dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); @@ -2894,6 +2976,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) * Silence PROVE_RCU. */ raw_spin_lock_irqsave(&p->pi_lock, flags); + rseq_migrate(p); /* * We're setting the CPU for the first time, we don't migrate, * so use __set_task_cpu(). @@ -2958,6 +3041,7 @@ void wake_up_new_task(struct task_struct *p) * as we're not fully set-up yet. */ p->recent_used_cpu = task_cpu(p); + rseq_migrate(p); __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); #endif rq = __task_rq_lock(p, &rf); @@ -3610,6 +3694,7 @@ void scheduler_tick(void) update_rq_clock(rq); curr->sched_class->task_tick(rq, curr, 0); + sli_check_longsys(curr); calc_global_load_tick(rq); psi_task_tick(rq); @@ -3686,7 +3771,6 @@ static void sched_tick_remote(struct work_struct *work) if (cpu_is_offline(cpu)) goto out_unlock; - curr = rq->curr; update_rq_clock(rq); if (!is_idle_task(curr)) { @@ -4125,7 +4209,8 @@ static inline void sched_submit_work(struct task_struct *tsk) * it wants to wake up a task to maintain concurrency. * As this function is called inside the schedule() context, * we disable preemption to avoid it calling schedule() again - * in the possible wakeup of a kworker. + * in the possible wakeup of a kworker and because wq_worker_sleeping() + * requires it. */ if (tsk->flags & PF_WQ_WORKER) { preempt_disable(); @@ -4463,7 +4548,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) */ if (dl_prio(prio)) { if (!dl_prio(p->normal_prio) || - (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; queue_flag |= ENQUEUE_REPLENISH; } else @@ -5603,12 +5689,8 @@ static void do_sched_yield(void) schedstat_inc(rq->yld_count); current->sched_class->yield_task(rq); - /* - * Since we are going to call schedule() anyway, there's - * no need to preempt or enable interrupts: - */ preempt_disable(); - rq_unlock(rq, &rf); + rq_unlock_irq(rq, &rf); sched_preempt_enable_no_resched(); schedule(); @@ -6193,13 +6275,14 @@ void idle_task_exit(void) struct mm_struct *mm = current->active_mm; BUG_ON(cpu_online(smp_processor_id())); + BUG_ON(current != this_rq()->idle); if (mm != &init_mm) { switch_mm(mm, &init_mm, current); - current->active_mm = &init_mm; finish_arch_post_lock_switch(); } - mmdrop(mm); + + /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ } /* @@ -7086,6 +7169,26 @@ static inline struct task_group *css_tg(struct cgroup_subsys_state *css) return css ? container_of(css, struct task_group, css) : NULL; } +int container_cpuquota_aware; +#define cpu_quota_aware_enabled(tg) \ + (tg && tg != &root_task_group && tg->cpuquota_aware) + +int cpu_get_max_cpus(struct task_struct *p) +{ + int max_cpus = INT_MAX; + struct task_group *tg = task_group(p); + + if (!cpu_quota_aware_enabled(tg)) + return max_cpus; + + if (tg->cfs_bandwidth.quota == RUNTIME_INF) + return max_cpus; + + max_cpus = DIV_ROUND_UP(tg->cfs_bandwidth.quota, tg->cfs_bandwidth.period); + + return max_cpus; +} + static struct cgroup_subsys_state * cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { @@ -7100,6 +7203,7 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) tg = sched_create_group(parent); if (IS_ERR(tg)) return ERR_PTR(-ENOMEM); + tg->cpuquota_aware = container_cpuquota_aware; return &tg->css; } @@ -7295,6 +7399,8 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, if (req.ret) return req.ret; + static_branch_enable(&sched_uclamp_used); + mutex_lock(&uclamp_mutex); rcu_read_lock(); @@ -7389,10 +7495,13 @@ static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ +/* More than 203 days if BW_SHIFT equals 20. */ +const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); -static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) +static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, + u64 burst) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; @@ -7416,6 +7525,18 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) if (period > max_cfs_quota_period) return -EINVAL; + /* + * Bound quota to defend quota against overflow during bandwidth shift. + */ + if (quota != RUNTIME_INF && quota > max_cfs_runtime) + return -EINVAL; + + /* + * Bound burst to defend burst against overflow during bandwidth shift. + */ + if (burst > max_cfs_runtime) + return -EINVAL; + /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -7437,12 +7558,16 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; - - __refill_cfs_bandwidth_runtime(cfs_b); + cfs_b->burst = burst; /* Restart the period timer (if active) to handle new period expiry: */ - if (runtime_enabled) - start_cfs_bandwidth(cfs_b); + if (runtime_enabled) { + cfs_b->buffer = min(max_cfs_runtime, quota + burst); + cfs_b->max_overrun = DIV_ROUND_UP_ULL(max_cfs_runtime, quota); + cfs_b->runtime = cfs_b->quota; + cfs_b->runtime_at_period_start = cfs_b->runtime; + start_cfs_bandwidth(cfs_b, 1); + } raw_spin_unlock_irq(&cfs_b->lock); @@ -7470,9 +7595,10 @@ out_unlock: static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period; + u64 quota, period, burst; period = ktime_to_ns(tg->cfs_bandwidth.period); + burst = tg->cfs_bandwidth.burst; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -7480,7 +7606,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_quota(struct task_group *tg) @@ -7498,15 +7624,16 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period; + u64 quota, period, burst; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_period(struct task_group *tg) @@ -7519,6 +7646,35 @@ static long tg_get_cfs_period(struct task_group *tg) return cfs_period_us; } +static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) +{ + u64 quota, period, burst; + + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + if (cfs_burst_us < 0) + burst = RUNTIME_INF; + else if ((u64)cfs_burst_us <= U64_MAX / NSEC_PER_USEC) + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + else + return -EINVAL; + + return tg_set_cfs_bandwidth(tg, period, quota, burst); +} + +static long tg_get_cfs_burst(struct task_group *tg) +{ + u64 burst_us; + + if (tg->cfs_bandwidth.burst == RUNTIME_INF) + return -1; + + burst_us = tg->cfs_bandwidth.burst; + do_div(burst_us, NSEC_PER_USEC); + + return burst_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -7543,6 +7699,18 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } +static s64 cpu_cfs_burst_read_s64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_burst(css_tg(css)); +} + +static int cpu_cfs_burst_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_burst_us) +{ + return tg_set_cfs_burst(css_tg(css), cfs_burst_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -7645,6 +7813,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "nr_burst %d\n", cfs_b->nr_burst); + seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); + return 0; } #endif /* CONFIG_CFS_BANDWIDTH */ @@ -7676,8 +7847,32 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, } #endif /* CONFIG_RT_GROUP_SCHED */ +#ifdef CONFIG_FAIR_GROUP_SCHED +static u64 cpu_quota_aware_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct task_group *tg = css_tg(css); + + return tg->cpuquota_aware; +} +static int cpu_quota_aware_write_u64(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct task_group *tg = css_tg(css); + tg->cpuquota_aware = !!val; + return 0; +} + +#endif + static struct cftype cpu_legacy_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED + { + .name = "quota_aware", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = cpu_quota_aware_read_u64, + .write_u64 = cpu_quota_aware_write_u64, + }, { .name = "shares", .read_u64 = cpu_shares_read_u64, @@ -7695,6 +7890,11 @@ static struct cftype cpu_legacy_files[] = { .read_u64 = cpu_cfs_period_read_u64, .write_u64 = cpu_cfs_period_write_u64, }, + { + .name = "cfs_burst_us", + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, + }, { .name = "stat", .seq_show = cpu_cfs_stat_show, @@ -7736,16 +7936,20 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec; + u64 throttled_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + burst_usec = cfs_b->burst_time; + do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" - "throttled_usec %llu\n", + "throttled_usec %llu\n" + "nr_burst %d\n" + "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec); + throttled_usec, cfs_b->nr_burst, burst_usec); } #endif return 0; @@ -7814,27 +8018,29 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, } #endif -static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, - long period, long quota) +static void __maybe_unused +cpu_period_quota_print(struct seq_file *sf, long period, long quota, + long burst) { if (quota < 0) seq_puts(sf, "max"); else seq_printf(sf, "%ld", quota); - seq_printf(sf, " %ld\n", period); + seq_printf(sf, " %ld %ld\n", period, burst); } -/* caller should put the current value in *@periodp before calling */ -static int __maybe_unused cpu_period_quota_parse(char *buf, - u64 *periodp, u64 *quotap) +/* caller should put the current value in *@periodp and *@burstp before calling */ +static int __maybe_unused cpu_period_quota_parse(char *buf, u64 *periodp, + u64 *quotap, u64 *burstp) { char tok[21]; /* U64_MAX */ - if (sscanf(buf, "%20s %llu", tok, periodp) < 1) + if (sscanf(buf, "%20s %llu %llu", tok, periodp, burstp) < 1) return -EINVAL; *periodp *= NSEC_PER_USEC; + *burstp *= NSEC_PER_USEC; if (sscanf(tok, "%llu", quotap)) *quotap *= NSEC_PER_USEC; @@ -7851,7 +8057,8 @@ static int cpu_max_show(struct seq_file *sf, void *v) { struct task_group *tg = css_tg(seq_css(sf)); - cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); + cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg), + tg_get_cfs_burst(tg)); return 0; } @@ -7860,12 +8067,13 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, { struct task_group *tg = css_tg(of_css(of)); u64 period = tg_get_cfs_period(tg); + u64 burst = tg_get_cfs_burst(tg); u64 quota; int ret; - ret = cpu_period_quota_parse(buf, &period, "a); + ret = cpu_period_quota_parse(buf, &period, "a, &burst); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst); return ret ?: nbytes; } #endif diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index e9fea3cac9f1..36a83402ff9e 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -6,6 +6,7 @@ * (balbir@in.ibm.com). */ #include "sched.h" +#include <linux/sli.h> /* Time spent by the tasks of the CPU accounting group executing in ... */ enum cpuacct_stat_index { @@ -300,9 +301,8 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) return 0; } -static int cpuacct_uptime_show(struct seq_file *sf, void *v) +static int cpuacct_uptime_show_comm(struct seq_file *sf, void *v, struct cpuacct *ca) { - struct cpuacct *ca = css_ca(seq_css(sf)); struct timespec64 uptime, idle, curr; u64 idletime = 0; u32 cpu, rem; @@ -323,6 +323,32 @@ static int cpuacct_uptime_show(struct seq_file *sf, void *v) return 0; } +static int cpuacct_sli_show(struct seq_file *sf, void *v) +{ + struct cgroup *cgrp = seq_css(sf)->cgroup; + + return sli_schedlat_stat_show(sf, cgrp); +} + +static int cpuacct_sli_max_show(struct seq_file *sf, void *v) +{ + struct cgroup *cgrp = seq_css(sf)->cgroup; + + return sli_schedlat_max_show(sf, cgrp); +} + +int cpuacct_cgroupfs_uptime_show(struct seq_file *m, void *v) +{ + struct cpuacct *ca = task_ca(current); + return cpuacct_uptime_show_comm(m, v, ca); +} + +static int cpuacct_uptime_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + return cpuacct_uptime_show_comm(sf, v, ca); +} + static struct cftype files[] = { { .name = "usage", @@ -361,6 +387,24 @@ static struct cftype files[] = { .name = "uptime", .seq_show = cpuacct_uptime_show, }, + { + .name = "mbuf", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_mbuf_show, + .seq_start = cgroup_mbuf_start, + .seq_next = cgroup_mbuf_next, + .seq_stop = cgroup_mbuf_stop, + }, + { + .name = "sli", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cpuacct_sli_show, + }, + { + .name = "sli_max", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cpuacct_sli_max_show, + }, { } /* terminate */ }; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index b6f56e7c8dd1..4cb80e6042c4 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -210,7 +210,7 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long dl_util, util, irq; struct rq *rq = cpu_rq(cpu); - if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) && + if (!uclamp_is_used() && type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { return max; } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 08bdee0480b3..4ce8c11e5e4a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2469,7 +2469,7 @@ int sched_dl_global_validate(void) u64 period = global_rt_period(); u64 new_bw = to_ratio(period, runtime); struct dl_bw *dl_b; - int cpu, ret = 0; + int cpu, cpus, ret = 0; unsigned long flags; /* @@ -2484,9 +2484,10 @@ int sched_dl_global_validate(void) for_each_possible_cpu(cpu) { rcu_read_lock_sched(); dl_b = dl_bw_of(cpu); + cpus = dl_bw_cpus(cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); - if (new_bw < dl_b->total_bw) + if (new_bw * cpus < dl_b->total_bw) ret = -EBUSY; raw_spin_unlock_irqrestore(&dl_b->lock, flags); @@ -2693,6 +2694,7 @@ void __dl_clear_params(struct task_struct *p) dl_se->dl_bw = 0; dl_se->dl_density = 0; + dl_se->dl_boosted = 0; dl_se->dl_throttled = 0; dl_se->dl_yielded = 0; dl_se->dl_non_contending = 0; diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index f7e4579e746c..68bb7f89883d 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -8,8 +8,6 @@ */ #include "sched.h" -static DEFINE_SPINLOCK(sched_debug_lock); - /* * This allows printing both to /proc/sched_debug and * to the console @@ -417,16 +415,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group #endif #ifdef CONFIG_CGROUP_SCHED +static DEFINE_SPINLOCK(sched_debug_lock); static char group_path[PATH_MAX]; -static char *task_group_path(struct task_group *tg) +static void task_group_path(struct task_group *tg, char *path, int plen) { - if (autogroup_path(tg, group_path, PATH_MAX)) - return group_path; + if (autogroup_path(tg, path, plen)) + return; - cgroup_path(tg->css.cgroup, group_path, PATH_MAX); + cgroup_path(tg->css.cgroup, path, plen); +} - return group_path; +/* + * Only 1 SEQ_printf_task_group_path() caller can use the full length + * group_path[] for cgroup path. Other simultaneous callers will have + * to use a shorter stack buffer. A "..." suffix is appended at the end + * of the stack buffer so that it will show up in case the output length + * matches the given buffer size to indicate possible path name truncation. + */ +#define SEQ_printf_task_group_path(m, tg, fmt...) \ +{ \ + if (spin_trylock(&sched_debug_lock)) { \ + task_group_path(tg, group_path, sizeof(group_path)); \ + SEQ_printf(m, fmt, group_path); \ + spin_unlock(&sched_debug_lock); \ + } else { \ + char buf[128]; \ + char *bufend = buf + sizeof(buf) - 3; \ + task_group_path(tg, buf, bufend - buf); \ + strcpy(bufend - 1, "..."); \ + SEQ_printf(m, fmt, buf); \ + } \ } #endif @@ -453,7 +472,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); #endif #ifdef CONFIG_CGROUP_SCHED - SEQ_printf(m, " %s", task_group_path(task_group(p))); + SEQ_printf_task_group_path(m, task_group(p), " %s") #endif SEQ_printf(m, "\n"); @@ -490,7 +509,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) #ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, "\n"); - SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); + SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); #else SEQ_printf(m, "\n"); SEQ_printf(m, "cfs_rq[%d]:\n", cpu); @@ -562,7 +581,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) { #ifdef CONFIG_RT_GROUP_SCHED SEQ_printf(m, "\n"); - SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); + SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu); #else SEQ_printf(m, "\n"); SEQ_printf(m, "rt_rq[%d]:\n", cpu); @@ -614,7 +633,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) static void print_cpu(struct seq_file *m, int cpu) { struct rq *rq = cpu_rq(cpu); - unsigned long flags; #ifdef CONFIG_X86 { @@ -666,13 +684,11 @@ do { \ } #undef P - spin_lock_irqsave(&sched_debug_lock, flags); print_cfs_stats(m, cpu); print_rt_stats(m, cpu); print_dl_stats(m, cpu); print_rq(m, rq, cpu); - spin_unlock_irqrestore(&sched_debug_lock, flags); SEQ_printf(m, "\n"); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3f723c87abc4..8d3377d76554 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -23,6 +23,7 @@ #include "sched.h" #include <trace/events/sched.h> +#include <linux/sli.h> /* * Targeted preemption latency for CPU-bound tasks: @@ -116,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu) * (default: 5 msec, units: microseconds) */ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; + +/* + * A switch for cfs bandwidth burst. + */ +unsigned int sysctl_sched_cfs_bw_burst_enabled; #endif static inline void update_load_add(struct load_weight *lw, unsigned long inc) @@ -911,6 +917,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) return; } trace_sched_stat_wait(p, delta); + sli_schedlat_stat(p,SCHEDLAT_WAIT,delta); } __schedstat_set(se->statistics.wait_max, @@ -948,6 +955,7 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) __schedstat_add(se->statistics.sum_sleep_runtime, delta); if (tsk) { + sli_schedlat_stat(tsk,SCHEDLAT_SLEEP,delta); account_scheduler_latency(tsk, delta >> 10, 1); trace_sched_stat_sleep(tsk, delta); } @@ -966,9 +974,12 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) if (tsk) { if (tsk->in_iowait) { + sli_schedlat_stat(tsk,SCHEDLAT_IOBLOCK,delta); __schedstat_add(se->statistics.iowait_sum, delta); __schedstat_inc(se->statistics.iowait_count); trace_sched_stat_iowait(tsk, delta); + } else { + sli_schedlat_stat(tsk,SCHEDLAT_BLOCK,delta); } trace_sched_stat_blocked(tsk, delta); @@ -2678,7 +2689,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr) /* * We don't care about NUMA placement if we don't have memory. */ - if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) + if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) return; /* @@ -3814,7 +3825,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) if (!static_branch_unlikely(&sched_asym_cpucapacity)) return; - if (!p) { + if (!p || p->nr_cpus_allowed == 1) { rq->misfit_task_load = 0; return; } @@ -3824,7 +3835,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) return; } - rq->misfit_task_load = task_h_load(p); + /* + * Make sure that misfit_task_load will not be null even if + * task_h_load() returns 0. + */ + rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); } #else /* CONFIG_SMP */ @@ -3927,6 +3942,7 @@ static inline void check_schedstat_required(void) #endif } +static inline bool cfs_bandwidth_used(void); /* * MIGRATION @@ -4005,10 +4021,16 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) __enqueue_entity(cfs_rq, se); se->on_rq = 1; - if (cfs_rq->nr_running == 1) { + /* + * When bandwidth control is enabled, cfs might have been removed + * because of a parent been throttled but cfs->nr_running > 1. Try to + * add it unconditionnally. + */ + if (cfs_rq->nr_running == 1 || cfs_bandwidth_used()) list_add_leaf_cfs_rq(cfs_rq); + + if (cfs_rq->nr_running == 1) check_enqueue_throttle(cfs_rq); - } } static void __clear_buddies_last(struct sched_entity *se) @@ -4360,10 +4382,34 @@ static inline u64 sched_cfs_bandwidth_slice(void) * * requires cfs_b->lock */ -void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) +static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, + u64 overrun) { - if (cfs_b->quota != RUNTIME_INF) - cfs_b->runtime = cfs_b->quota; + u64 refill, runtime; + + if (cfs_b->quota != RUNTIME_INF) { + + if (!sysctl_sched_cfs_bw_burst_enabled) { + cfs_b->runtime = cfs_b->quota; + return; + } + + if (cfs_b->runtime_at_period_start > cfs_b->runtime) { + runtime = cfs_b->runtime_at_period_start + - cfs_b->runtime; + if (runtime > cfs_b->quota) { + cfs_b->burst_time += runtime - cfs_b->quota; + cfs_b->nr_burst++; + } + } + + overrun = min(overrun, cfs_b->max_overrun); + refill = cfs_b->quota * overrun; + cfs_b->runtime += refill; + cfs_b->runtime = min(cfs_b->runtime, cfs_b->buffer); + + cfs_b->runtime_at_period_start = cfs_b->runtime; + } } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -4372,20 +4418,20 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) } /* returns 0 on failure to allocate runtime */ -static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) +static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, + struct cfs_rq *cfs_rq, u64 target_runtime) { - struct task_group *tg = cfs_rq->tg; - struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); - u64 amount = 0, min_amount; + u64 min_amount, amount = 0; + + lockdep_assert_held(&cfs_b->lock); /* note: this is a positive sum as runtime_remaining <= 0 */ - min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; + min_amount = target_runtime - cfs_rq->runtime_remaining; - raw_spin_lock(&cfs_b->lock); if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 0); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); @@ -4393,13 +4439,25 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) cfs_b->idle = 0; } } - raw_spin_unlock(&cfs_b->lock); cfs_rq->runtime_remaining += amount; return cfs_rq->runtime_remaining > 0; } +/* returns 0 on failure to allocate runtime */ +static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + int ret; + + raw_spin_lock(&cfs_b->lock); + ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); + raw_spin_unlock(&cfs_b->lock); + + return ret; +} + static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { /* dock delta_exec before expiring quota (as it could span periods) */ @@ -4488,13 +4546,33 @@ static int tg_throttle_down(struct task_group *tg, void *data) return 0; } -static void throttle_cfs_rq(struct cfs_rq *cfs_rq) +static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; long task_delta, idle_task_delta, dequeue = 1; - bool empty; + + raw_spin_lock(&cfs_b->lock); + /* This will start the period timer if necessary */ + if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { + /* + * We have raced with bandwidth becoming available, and if we + * actually throttled the timer might not unthrottle us for an + * entire period. We additionally needed to make sure that any + * subsequent check_cfs_rq_runtime calls agree not to throttle + * us, as we may commit to do cfs put_prev+pick_next, so we ask + * for 1ns of runtime rather than just check cfs_b. + */ + dequeue = 0; + } else { + list_add_tail_rcu(&cfs_rq->throttled_list, + &cfs_b->throttled_cfs_rq); + } + raw_spin_unlock(&cfs_b->lock); + + if (!dequeue) + return false; /* Throttle no longer required. */ se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; @@ -4523,29 +4601,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) sub_nr_running(rq, task_delta); + /* + * Note: distribution will already see us throttled via the + * throttled-list. rq->lock protects completion. + */ cfs_rq->throttled = 1; cfs_rq->throttled_clock = rq_clock(rq); - raw_spin_lock(&cfs_b->lock); - empty = list_empty(&cfs_b->throttled_cfs_rq); - - /* - * Add to the _head_ of the list, so that an already-started - * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is - * not running add to the tail so that later runqueues don't get starved. - */ - if (cfs_b->distribute_running) - list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); - else - list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); - - /* - * If we're the first throttled task, make sure the bandwidth - * timer is running. - */ - if (empty) - start_cfs_bandwidth(cfs_b); - - raw_spin_unlock(&cfs_b->lock); + return true; } void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) @@ -4553,7 +4615,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; - int enqueue = 1; long task_delta, idle_task_delta; se = cfs_rq->tg->se[cpu_of(rq)]; @@ -4577,23 +4638,55 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) idle_task_delta = cfs_rq->idle_h_nr_running; for_each_sched_entity(se) { if (se->on_rq) - enqueue = 0; - + break; cfs_rq = cfs_rq_of(se); - if (enqueue) - enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); + cfs_rq->h_nr_running += task_delta; cfs_rq->idle_h_nr_running += idle_task_delta; + /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) + goto unthrottle_throttle; + } + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + + cfs_rq->h_nr_running += task_delta; + cfs_rq->idle_h_nr_running += idle_task_delta; + + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) + goto unthrottle_throttle; + + /* + * One parent has been throttled and cfs_rq removed from the + * list. Add it back to not break the leaf list. + */ + if (throttled_hierarchy(cfs_rq)) + list_add_leaf_cfs_rq(cfs_rq); + } + + /* At this point se is NULL and we are at root level*/ + add_nr_running(rq, task_delta); + +unthrottle_throttle: + /* + * The cfs_rq_throttled() breaks in the above iteration can result in + * incomplete leaf list maintenance, resulting in triggering the + * assertion below. + */ + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + + if (list_add_leaf_cfs_rq(cfs_rq)) break; } assert_list_leaf_cfs_rq(rq); - if (!se) - add_nr_running(rq, task_delta); - /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); @@ -4665,7 +4758,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u if (cfs_b->idle && !throttled) goto out_deactivate; - __refill_cfs_bandwidth_runtime(cfs_b); + __refill_cfs_bandwidth_runtime(cfs_b, overrun); if (!throttled) { /* mark as potentially idle for the upcoming period */ @@ -4893,8 +4986,7 @@ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) if (cfs_rq_throttled(cfs_rq)) return true; - throttle_cfs_rq(cfs_rq); - return true; + return throttle_cfs_rq(cfs_rq); } static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) @@ -4908,6 +5000,7 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) } extern const u64 max_cfs_quota_period; +extern const u64 max_cfs_runtime; static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { @@ -4924,6 +5017,8 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) if (!overrun) break; + idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); + if (++count > 3) { u64 new, old = ktime_to_ns(cfs_b->period); @@ -4935,7 +5030,18 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) new = old * 2; if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); - cfs_b->quota *= 2; + cfs_b->quota = min(cfs_b->quota * 2, + max_cfs_runtime); + + cfs_b->buffer = min(cfs_b->quota + cfs_b->burst, + max_cfs_runtime); + /* + * Add 1 in case max_overrun becomes 0. + * 0 max_overrun will cause no runtime being + * refilled in __refill_cfs_bandwidth_runtime(). + */ + cfs_b->max_overrun >>= 1; + cfs_b->max_overrun++; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -4953,8 +5059,6 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) /* reset count so we don't come right back in here */ count = 0; } - - idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); } if (idle) cfs_b->period_active = 0; @@ -4969,6 +5073,8 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); + cfs_b->burst = 0; + cfs_b->buffer = RUNTIME_INF; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); @@ -4985,16 +5091,26 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) INIT_LIST_HEAD(&cfs_rq->throttled_list); } -void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init) { + u64 overrun; + lockdep_assert_held(&cfs_b->lock); if (cfs_b->period_active) return; cfs_b->period_active = 1; - hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); + + /* + * When period timer stops, quota for the following period is not + * refilled, however period timer is already forwarded. We should + * accumulate quota once more than overrun here. + */ + if (!init) + __refill_cfs_bandwidth_runtime(cfs_b, overrun + 1); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) @@ -5191,6 +5307,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; int idle_h_nr_running = task_has_idle_policy(p); + int task_new = !(flags & ENQUEUE_WAKEUP); /* * The code below (indirectly) updates schedutil which looks at @@ -5214,32 +5331,38 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); - /* - * end evaluation on encountering a throttled cfs_rq - * - * note: in the case of encountering a throttled cfs_rq we will - * post the final h_nr_running increment below. - */ - if (cfs_rq_throttled(cfs_rq)) - break; cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) + goto enqueue_throttle; + flags = ENQUEUE_WAKEUP; } for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); - cfs_rq->h_nr_running++; - cfs_rq->idle_h_nr_running += idle_h_nr_running; - - if (cfs_rq_throttled(cfs_rq)) - break; update_load_avg(cfs_rq, se, UPDATE_TG); update_cfs_group(se); + + cfs_rq->h_nr_running++; + cfs_rq->idle_h_nr_running += idle_h_nr_running; + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) + goto enqueue_throttle; + + /* + * One parent has been throttled and cfs_rq removed from the + * list. Add it back to not break the leaf list. + */ + if (throttled_hierarchy(cfs_rq)) + list_add_leaf_cfs_rq(cfs_rq); } +enqueue_throttle: if (!se) { add_nr_running(rq, 1); /* @@ -5256,7 +5379,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) * into account, but that is not straightforward to implement, * and the following generally works well enough in practice. */ - if (flags & ENQUEUE_WAKEUP) + if (!task_new) update_overutilized_status(rq); } @@ -5299,17 +5422,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); - /* - * end evaluation on encountering a throttled cfs_rq - * - * note: in the case of encountering a throttled cfs_rq we will - * post the final h_nr_running decrement below. - */ - if (cfs_rq_throttled(cfs_rq)) - break; cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) + goto dequeue_throttle; + /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { /* Avoid re-evaluating load for this entity: */ @@ -5327,16 +5446,20 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); - cfs_rq->h_nr_running--; - cfs_rq->idle_h_nr_running -= idle_h_nr_running; - - if (cfs_rq_throttled(cfs_rq)) - break; update_load_avg(cfs_rq, se, UPDATE_TG); update_cfs_group(se); + + cfs_rq->h_nr_running--; + cfs_rq->idle_h_nr_running -= idle_h_nr_running; + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) + goto dequeue_throttle; + } +dequeue_throttle: if (!se) sub_nr_running(rq, 1); @@ -5473,6 +5596,9 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync) if (sync && cpu_rq(this_cpu)->nr_running == 1) return this_cpu; + if (available_idle_cpu(prev_cpu)) + return prev_cpu; + return nr_cpumask_bits; } @@ -5893,7 +6019,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int /* * Scan the local SMT mask for idle CPUs. */ -static int select_idle_smt(struct task_struct *p, int target) +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) { int cpu, si_cpu = -1; @@ -5901,7 +6027,8 @@ static int select_idle_smt(struct task_struct *p, int target) return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { - if (!cpumask_test_cpu(cpu, p->cpus_ptr)) + if (!cpumask_test_cpu(cpu, p->cpus_ptr) || + !cpumask_test_cpu(cpu, sched_domain_span(sd))) continue; if (available_idle_cpu(cpu)) return cpu; @@ -5919,7 +6046,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s return -1; } -static inline int select_idle_smt(struct task_struct *p, int target) +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) { return -1; } @@ -6029,7 +6156,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if ((unsigned)i < nr_cpumask_bits) return i; - i = select_idle_smt(p, target); + i = select_idle_smt(p, sd, target); if ((unsigned)i < nr_cpumask_bits) return i; @@ -7235,6 +7362,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) return 0; + /* Disregard pcpu kthreads; they are where they need to be. */ + if (kthread_is_per_cpu(p)) + return 0; + if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { int cpu; @@ -7383,7 +7514,15 @@ static int detach_tasks(struct lb_env *env) if (!can_migrate_task(p, env)) goto next; - load = task_h_load(p); + /* + * Depending of the number of CPUs and tasks and the + * cgroup hierarchy, task_h_load() can return a null + * value. Make sure that env->imbalance decreases + * otherwise detach_tasks() will stop only after + * detaching up to loop_max tasks. + */ + load = max_t(unsigned long, task_h_load(p), 1); + if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; @@ -9349,7 +9488,12 @@ static void kick_ilb(unsigned int flags) { int ilb_cpu; - nohz.next_balance++; + /* + * Increase nohz.next_balance only when if full ilb is triggered but + * not if we only update stats. + */ + if (flags & NOHZ_BALANCE_KICK) + nohz.next_balance = jiffies+1; ilb_cpu = find_new_ilb(); @@ -9667,6 +9811,14 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, } } + /* + * next_balance will be updated only when there is a need. + * When the CPU is attached to null domain for ex, it will not be + * updated. + */ + if (likely(update_next_balance)) + nohz.next_balance = next_balance; + /* Newly idle CPU doesn't need an update */ if (idle != CPU_NEWLY_IDLE) { update_blocked_averages(this_cpu); @@ -9687,14 +9839,6 @@ abort: if (has_blocked_load) WRITE_ONCE(nohz.has_blocked, 1); - /* - * next_balance will be updated only when there is a need. - * When the CPU is attached to null domain for ex, it will not be - * updated. - */ - if (likely(update_next_balance)) - nohz.next_balance = next_balance; - return ret; } diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 131e7c86cf06..3f8c7867c14c 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -249,6 +249,7 @@ static void do_idle(void) } arch_cpu_idle_enter(); + rcu_nocb_flush_deferred_wakeup(); /* * In poll mode we reenable interrupts and spin. Also if we diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 168479a7d61b..46c142b69598 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -30,6 +30,23 @@ static void ipi_mb(void *info) smp_mb(); /* IPIs should be serializing but paranoid. */ } +static void ipi_sync_core(void *info) +{ + /* + * The smp_mb() in membarrier after all the IPIs is supposed to + * ensure that memory on remote CPUs that occur before the IPI + * become visible to membarrier()'s caller -- see scenario B in + * the big comment at the top of this file. + * + * A sync_core() would provide this guarantee, but + * sync_core_before_usermode() might end up being deferred until + * after membarrier()'s smp_mb(). + */ + smp_mb(); /* IPIs should be serializing but paranoid. */ + + sync_core_before_usermode(); +} + static void ipi_sync_rq_state(void *info) { struct mm_struct *mm = (struct mm_struct *) info; @@ -134,6 +151,7 @@ static int membarrier_private_expedited(int flags) int cpu; cpumask_var_t tmpmask; struct mm_struct *mm = current->mm; + smp_call_func_t ipi_func = ipi_mb; if (flags & MEMBARRIER_FLAG_SYNC_CORE) { if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE)) @@ -141,6 +159,7 @@ static int membarrier_private_expedited(int flags) if (!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) return -EPERM; + ipi_func = ipi_sync_core; } else { if (!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) @@ -181,7 +200,7 @@ static int membarrier_private_expedited(int flags) rcu_read_unlock(); preempt_disable(); - smp_call_function_many(tmpmask, ipi_mb, NULL, 1); + smp_call_function_many(tmpmask, ipi_func, NULL, 1); preempt_enable(); free_cpumask_var(tmpmask); @@ -246,9 +265,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) } rcu_read_unlock(); - preempt_disable(); - smp_call_function_many(tmpmask, ipi_sync_rq_state, mm, 1); - preempt_enable(); + on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true); free_cpumask_var(tmpmask); cpus_read_unlock(); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 8201c5a0d23d..43a224243f4d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -9,6 +9,8 @@ int sched_rr_timeslice = RR_TIMESLICE; int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; +/* More than 4 hours if BW_SHIFT equals 20. */ +static const u64 max_rt_runtime = MAX_BW; static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); @@ -2516,6 +2518,12 @@ static int tg_set_rt_bandwidth(struct task_group *tg, if (rt_period == 0) return -EINVAL; + /* + * Bound quota to defend quota against overflow during bandwidth shift. + */ + if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) + return -EINVAL; + mutex_lock(&rt_constraints_mutex); err = __rt_schedulable(tg, rt_period, rt_runtime); if (err) @@ -2633,7 +2641,9 @@ static int sched_rt_global_validate(void) return -EINVAL; if ((sysctl_sched_rt_runtime != RUNTIME_INF) && - (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) + ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || + ((u64)sysctl_sched_rt_runtime * + NSEC_PER_USEC > max_rt_runtime))) return -EINVAL; return 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e5e2605778c9..406e35f0ab69 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -118,7 +118,13 @@ extern long calc_load_fold_active(struct rq *this_rq, long adjust); #ifdef CONFIG_64BIT # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) -# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) +# define scale_load_down(w) \ +({ \ + unsigned long __w = (w); \ + if (__w) \ + __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ + __w; \ +}) #else # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) # define scale_load(w) (w) @@ -241,30 +247,6 @@ struct rt_bandwidth { void __dl_clear_params(struct task_struct *p); -/* - * To keep the bandwidth of -deadline tasks and groups under control - * we need some place where: - * - store the maximum -deadline bandwidth of the system (the group); - * - cache the fraction of that bandwidth that is currently allocated. - * - * This is all done in the data structure below. It is similar to the - * one used for RT-throttling (rt_bandwidth), with the main difference - * that, since here we are only interested in admission control, we - * do not decrease any runtime while the group "executes", neither we - * need a timer to replenish it. - * - * With respect to SMP, the bandwidth is given on a per-CPU basis, - * meaning that: - * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; - * - dl_total_bw array contains, in the i-eth element, the currently - * allocated bandwidth on the i-eth CPU. - * Moreover, groups consume bandwidth on each CPU, while tasks only - * consume bandwidth on the CPU they're running on. - * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw - * that will be shown the next time the proc or cgroup controls will - * be red. It on its turn can be changed by writing on its own - * control. - */ struct dl_bandwidth { raw_spinlock_t dl_runtime_lock; u64 dl_runtime; @@ -276,6 +258,24 @@ static inline int dl_bandwidth_enabled(void) return sysctl_sched_rt_runtime >= 0; } +/* + * To keep the bandwidth of -deadline tasks under control + * we need some place where: + * - store the maximum -deadline bandwidth of each cpu; + * - cache the fraction of bandwidth that is currently allocated in + * each root domain; + * + * This is all done in the data structure below. It is similar to the + * one used for RT-throttling (rt_bandwidth), with the main difference + * that, since here we are only interested in admission control, we + * do not decrease any runtime while the group "executes", neither we + * need a timer to replenish it. + * + * With respect to SMP, bandwidth is given on a per root domain basis, + * meaning that: + * - bw (< 100%) is the deadline bandwidth of each CPU; + * - total_bw is the currently allocated bandwidth in each root domain; + */ struct dl_bw { raw_spinlock_t lock; u64 bw; @@ -334,6 +334,10 @@ struct cfs_bandwidth { ktime_t period; u64 quota; u64 runtime; + u64 burst; + u64 buffer; + u64 max_overrun; + u64 runtime_at_period_start; s64 hierarchical_quota; u8 idle; @@ -347,7 +351,9 @@ struct cfs_bandwidth { /* Statistics: */ int nr_periods; int nr_throttled; + int nr_burst; u64 throttled_time; + u64 burst_time; #endif }; @@ -389,7 +395,7 @@ struct task_group { #ifdef CONFIG_SCHED_AUTOGROUP struct autogroup *autogroup; #endif - + u64 cpuquota_aware; struct cfs_bandwidth cfs_bandwidth; #ifdef CONFIG_UCLAMP_TASK_GROUP @@ -401,6 +407,8 @@ struct task_group { struct uclamp_se uclamp[UCLAMP_CNT]; #endif + KABI_RESERVE(1); + KABI_RESERVE(2); }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -445,8 +453,7 @@ extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *parent); extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); -extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); -extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); +extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); extern void free_rt_sched_group(struct task_group *tg); @@ -575,6 +582,8 @@ struct cfs_rq { struct list_head throttled_list; #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ + KABI_RESERVE(1); + KABI_RESERVE(2); }; static inline int rt_bandwidth_enabled(void) @@ -782,6 +791,11 @@ struct root_domain { * CPUs of the rd. Protected by RCU. */ struct perf_domain __rcu *pd; + + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; extern void init_defrootdomain(void); @@ -835,6 +849,8 @@ struct uclamp_rq { unsigned int value; struct uclamp_bucket bucket[UCLAMP_BUCKETS]; }; + +DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); #endif /* CONFIG_UCLAMP_TASK */ /* @@ -971,6 +987,7 @@ struct rq { call_single_data_t hrtick_csd; #endif struct hrtimer hrtick_timer; + ktime_t hrtick_time; #endif #ifdef CONFIG_SCHEDSTATS @@ -999,6 +1016,9 @@ struct rq { /* Must be inspected within a rcu lock section */ struct cpuidle_state *idle_state; #endif + + KABI_RESERVE(1); + KABI_RESERVE(2); }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1409,6 +1429,9 @@ struct sched_group { struct sched_group_capacity *sgc; int asym_prefer_cpu; /* CPU of highest priority in group */ + KABI_RESERVE(1); + KABI_RESERVE(2); + /* * The CPUs this group covers. * @@ -1560,7 +1583,7 @@ enum { #undef SCHED_FEAT -#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) +#ifdef CONFIG_SCHED_DEBUG /* * To support run-time toggling of sched features, all the translation units @@ -1568,6 +1591,7 @@ enum { */ extern const_debug unsigned int sysctl_sched_features; +#ifdef CONFIG_JUMP_LABEL #define SCHED_FEAT(name, enabled) \ static __always_inline bool static_branch_##name(struct static_key *key) \ { \ @@ -1580,7 +1604,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) -#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ +#else /* !CONFIG_JUMP_LABEL */ + +#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) + +#endif /* CONFIG_JUMP_LABEL */ + +#else /* !SCHED_DEBUG */ /* * Each translation unit has its own copy of sysctl_sched_features to allow @@ -1596,7 +1626,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features = #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) -#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ +#endif /* SCHED_DEBUG */ extern struct static_key_false sched_numa_balancing; extern struct static_key_false sched_schedstats; @@ -1769,6 +1799,10 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_change_group)(struct task_struct *p, int type); #endif + KABI_RESERVE(1); + KABI_RESERVE(2); + KABI_RESERVE(3); + KABI_RESERVE(4); }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) @@ -1883,6 +1917,8 @@ extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); #define BW_SHIFT 20 #define BW_UNIT (1 << BW_SHIFT) #define RATIO_SHIFT 8 +#define MAX_BW_BITS (64 - BW_SHIFT) +#define MAX_BW ((1ULL << MAX_BW_BITS) - 1) unsigned long to_ratio(u64 period, u64 runtime); extern void init_entity_runnable_average(struct sched_entity *se); @@ -2311,12 +2347,35 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} #ifdef CONFIG_UCLAMP_TASK unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); +/** + * uclamp_util_with - clamp @util with @rq and @p effective uclamp values. + * @rq: The rq to clamp against. Must not be NULL. + * @util: The util value to clamp. + * @p: The task to clamp against. Can be NULL if you want to clamp + * against @rq only. + * + * Clamps the passed @util to the max(@rq, @p) effective uclamp values. + * + * If sched_uclamp_used static key is disabled, then just return the util + * without any clamping since uclamp aggregation at the rq level in the fast + * path is disabled, rendering this operation a NOP. + * + * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It + * will return the correct effective uclamp value of the task even if the + * static key is disabled. + */ static __always_inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util, struct task_struct *p) { - unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); - unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); + unsigned int min_util; + unsigned int max_util; + + if (!static_branch_likely(&sched_uclamp_used)) + return util; + + min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); + max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); if (p) { min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); @@ -2338,6 +2397,19 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util) { return uclamp_util_with(rq, util, NULL); } + +/* + * When uclamp is compiled in, the aggregation at rq level is 'turned off' + * by default in the fast path and only gets turned on once userspace performs + * an operation that requires it. + * + * Returns true if userspace opted-in to use uclamp and aggregation at rq level + * hence is active. + */ +static inline bool uclamp_is_used(void) +{ + return static_branch_likely(&sched_uclamp_used); +} #else /* CONFIG_UCLAMP_TASK */ static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util, struct task_struct *p) @@ -2348,6 +2420,11 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util) { return util; } + +static inline bool uclamp_is_used(void) +{ + return false; +} #endif /* CONFIG_UCLAMP_TASK */ #ifdef arch_scale_freq_capacity diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index ba683fe81a6e..4543af7b140c 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/sli.h> #ifdef CONFIG_SCHEDSTATS @@ -172,7 +173,7 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ -static void sched_info_arrive(struct rq *rq, struct task_struct *t) +static void sched_info_arrive(struct rq *rq, struct task_struct *t, struct task_struct *prev) { unsigned long long now = rq_clock(rq), delta = 0; @@ -182,8 +183,8 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t) t->sched_info.run_delay += delta; t->sched_info.last_arrival = now; t->sched_info.pcount++; - rq_sched_info_arrive(rq, delta); + sli_schedlat_rundelay(t, prev, delta); } /* @@ -234,7 +235,7 @@ __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct sched_info_depart(rq, prev); if (next != rq->idle) - sched_info_arrive(rq, next); + sched_info_arrive(rq, next, prev); } static inline void @@ -249,6 +250,6 @@ sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *n # define sched_info_reset_dequeued(t) do { } while (0) # define sched_info_dequeued(rq, t) do { } while (0) # define sched_info_depart(rq, t) do { } while (0) -# define sched_info_arrive(rq, next) do { } while (0) +# define sched_info_arrive(rq, t, prev) do { } while (0) # define sched_info_switch(rq, t, next) do { } while (0) #endif /* CONFIG_SCHED_INFO */ diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 1fa1e13a5944..ffaa97a8d405 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1333,7 +1333,7 @@ sd_init(struct sched_domain_topology_level *tl, sd_flags = (*tl->sd_flags)(); if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, "wrong sd_flags in topology description\n")) - sd_flags &= ~TOPOLOGY_SD_FLAGS; + sd_flags &= TOPOLOGY_SD_FLAGS; /* Apply detected topology flags */ sd_flags |= dflags; diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index c1e566a114ca..f805095ae1d0 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -232,17 +232,28 @@ prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_ent } EXPORT_SYMBOL(prepare_to_wait); -void -prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) +bool +prepare_to_wait_exclusive_with_ret(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) { unsigned long flags; + bool was_empty = false; wq_entry->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); - if (list_empty(&wq_entry->entry)) + if (list_empty(&wq_entry->entry)) { + was_empty = list_empty(&wq_head->head); __add_wait_queue_entry_tail(wq_head, wq_entry); + } set_current_state(state); spin_unlock_irqrestore(&wq_head->lock, flags); + return was_empty; +} + +/* Returns true if we are the first waiter in the queue, false otherwise. */ +void +prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) +{ + (void) prepare_to_wait_exclusive_with_ret(wq_head, wq_entry, state); } EXPORT_SYMBOL(prepare_to_wait_exclusive); diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 614a557a0814..1d62fa2b6b91 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -37,11 +37,19 @@ #include <linux/filter.h> #include <linux/pid.h> #include <linux/ptrace.h> -#include <linux/security.h> +#include <linux/capability.h> #include <linux/tracehook.h> #include <linux/uaccess.h> #include <linux/anon_inodes.h> +/* + * When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced, it had the + * wrong direction flag in the ioctl number. This is the broken one, + * which the kernel needs to keep supporting until all userspaces stop + * using the wrong command number. + */ +#define SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR SECCOMP_IOR(2, __u64) + enum notify_state { SECCOMP_NOTIFY_INIT, SECCOMP_NOTIFY_SENT, @@ -445,8 +453,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) * behavior of privileged children. */ if (!task_no_new_privs(current) && - security_capable(current_cred(), current_user_ns(), - CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0) + !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); /* Allocate a new seccomp_filter */ @@ -914,6 +921,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, const bool recheck_after_trace) { BUG(); + + return -1; } #endif @@ -1168,6 +1177,7 @@ static long seccomp_notify_ioctl(struct file *file, unsigned int cmd, return seccomp_notify_recv(filter, buf); case SECCOMP_IOCTL_NOTIF_SEND: return seccomp_notify_send(filter, buf); + case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR: case SECCOMP_IOCTL_NOTIF_ID_VALID: return seccomp_notify_id_valid(filter, buf); default: @@ -1205,17 +1215,12 @@ static const struct file_operations seccomp_notify_ops = { .poll = seccomp_notify_poll, .release = seccomp_notify_release, .unlocked_ioctl = seccomp_notify_ioctl, + .compat_ioctl = seccomp_notify_ioctl, }; static struct file *init_listener(struct seccomp_filter *filter) { - struct file *ret = ERR_PTR(-EBUSY); - struct seccomp_filter *cur; - - for (cur = current->seccomp.filter; cur; cur = cur->prev) { - if (cur->notif) - goto out; - } + struct file *ret; ret = ERR_PTR(-ENOMEM); filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL); @@ -1242,6 +1247,31 @@ out: return ret; } +/* + * Does @new_child have a listener while an ancestor also has a listener? + * If so, we'll want to reject this filter. + * This only has to be tested for the current process, even in the TSYNC case, + * because TSYNC installs @child with the same parent on all threads. + * Note that @new_child is not hooked up to its parent at this point yet, so + * we use current->seccomp.filter. + */ +static bool has_duplicate_listener(struct seccomp_filter *new_child) +{ + struct seccomp_filter *cur; + + /* must be protected against concurrent TSYNC */ + lockdep_assert_held(¤t->sighand->siglock); + + if (!new_child->notif) + return false; + for (cur = current->seccomp.filter; cur; cur = cur->prev) { + if (cur->notif) + return true; + } + + return false; +} + /** * seccomp_set_mode_filter: internal function for setting seccomp filter * @flags: flags to change filter behavior @@ -1311,6 +1341,11 @@ static long seccomp_set_mode_filter(unsigned int flags, if (!seccomp_may_assign_mode(seccomp_mode)) goto out; + if (has_duplicate_listener(prepared)) { + ret = -EBUSY; + goto out; + } + ret = seccomp_attach_filter(flags, prepared); if (ret) goto out; diff --git a/kernel/signal.c b/kernel/signal.c index eea748174ade..8c97fc72d78b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -391,16 +391,17 @@ static bool task_participate_group_stop(struct task_struct *task) void task_join_group_stop(struct task_struct *task) { + unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; + struct signal_struct *sig = current->signal; + + if (sig->group_stop_count) { + sig->group_stop_count++; + mask |= JOBCTL_STOP_CONSUME; + } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) + return; + /* Have the new thread join an on-going signal group stop */ - unsigned long jobctl = current->jobctl; - if (jobctl & JOBCTL_STOP_PENDING) { - struct signal_struct *sig = current->signal; - unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; - unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; - if (task_set_jobctl_pending(task, signr | gstop)) { - sig->group_stop_count++; - } - } + task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); } /* @@ -1510,15 +1511,15 @@ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, unsigned long flags; int ret = -EINVAL; + if (!valid_signal(sig)) + return ret; + clear_siginfo(&info); info.si_signo = sig; info.si_errno = errno; info.si_code = SI_ASYNCIO; *((sigval_t *)&info.si_pid) = addr; - if (!valid_signal(sig)) - return ret; - rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (!p) { @@ -1931,7 +1932,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) * This is only possible if parent == real_parent. * Check if it has changed security domain. */ - if (tsk->parent_exec_id != tsk->parent->self_exec_id) + if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) sig = SIGCHLD; } @@ -1993,8 +1994,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig) if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) sig = 0; } + /* + * Send with __send_signal as si_pid and si_uid are in the + * parent's namespaces. + */ if (valid_signal(sig) && sig) - __group_send_sig_info(sig, &info, tsk->parent); + __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); diff --git a/kernel/smp.c b/kernel/smp.c index 7dbcb402c2fc..9a957f37aef8 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -104,12 +104,12 @@ void __init call_function_init(void) * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */ -static __always_inline void csd_lock_wait(call_single_data_t *csd) +static __always_inline void csd_lock_wait(struct __call_single_data *csd) { smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); } -static __always_inline void csd_lock(call_single_data_t *csd) +static __always_inline void csd_lock(struct __call_single_data *csd) { csd_lock_wait(csd); csd->flags |= CSD_FLAG_LOCK; @@ -122,7 +122,7 @@ static __always_inline void csd_lock(call_single_data_t *csd) smp_wmb(); } -static __always_inline void csd_unlock(call_single_data_t *csd) +static __always_inline void csd_unlock(struct __call_single_data *csd) { WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); @@ -139,8 +139,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */ -static int generic_exec_single(int cpu, call_single_data_t *csd, - smp_call_func_t func, void *info) +static int generic_exec_single(int cpu, struct __call_single_data *csd, + smp_call_func_t func, void *info, struct cpumask *mask) { if (cpu == smp_processor_id()) { unsigned long flags; @@ -177,7 +177,8 @@ static int generic_exec_single(int cpu, call_single_data_t *csd, * equipped to do the right thing... */ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) - arch_send_call_function_single_ipi(cpu); + if (!mask) arch_send_call_function_single_ipi(cpu); + else __cpumask_set_cpu(cpu, mask); return 0; } @@ -305,7 +306,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, csd_lock(csd); } - err = generic_exec_single(cpu, csd, func, info); + err = generic_exec_single(cpu, csd, func, info, NULL); if (wait) csd_lock_wait(csd); @@ -332,7 +333,7 @@ EXPORT_SYMBOL(smp_call_function_single); * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. */ -int smp_call_function_single_async(int cpu, call_single_data_t *csd) +int smp_call_function_single_async(int cpu, struct __call_single_data *csd) { int err = 0; @@ -345,13 +346,48 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd) csd->flags = CSD_FLAG_LOCK; smp_wmb(); - err = generic_exec_single(cpu, csd, csd->func, csd->info); + err = generic_exec_single(cpu, csd, csd->func, csd->info, NULL); preempt_enable(); return err; } EXPORT_SYMBOL_GPL(smp_call_function_single_async); +/** + * smp_call_function_many_async(): Run an asynchronous function on a + * specific CPU. + * @cpu: The CPU to run on. + * @csd: Pre-allocated and setup data structure + * @mask: CPU mask, only mark, do not send ipi + * + * Like smp_call_function_single(), but the call is asynchonous and + * can thus be done from contexts with disabled interrupts. + * + * The caller passes his own pre-allocated data structure + * (ie: embedded in an object) and is responsible for synchronizing it + * such that the IPIs performed on the @csd are strictly serialized. + * + * NOTE: Be careful, there is unfortunately no current debugging facility to + * validate the correctness of this serialization. + */ +int smp_call_function_many_async(int cpu, call_single_data_t *csd, struct cpumask *mask) +{ + int err = 0; + + /* We could deadlock if we have to wait here with interrupts disabled! */ + if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) + csd_lock_wait(csd); + + csd->flags = CSD_FLAG_LOCK; + smp_wmb(); + + err = generic_exec_single(cpu, csd, csd->func, csd->info, mask); + + return err; +} +EXPORT_SYMBOL_GPL(smp_call_function_many_async); + + /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 2efe1e206167..f25208e8df83 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) kfree(td); return PTR_ERR(tsk); } + kthread_set_per_cpu(tsk, cpu); /* * Park the thread so that it could start right on the CPU * when it is available. diff --git a/kernel/sys.c b/kernel/sys.c index 40dee3547dc4..30eb78aba736 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1317,11 +1317,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) { - struct oldold_utsname tmp = {}; + struct oldold_utsname tmp; if (!name) return -EFAULT; + memset(&tmp, 0, sizeof(tmp)); + down_read(&uts_sem); memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c new file mode 100644 index 000000000000..2a63241a8453 --- /dev/null +++ b/kernel/sysctl-test.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test of proc sysctl. + */ + +#include <kunit/test.h> +#include <linux/sysctl.h> + +#define KUNIT_PROC_READ 0 +#define KUNIT_PROC_WRITE 1 + +static int i_zero; +static int i_one_hundred = 100; + +/* + * Test that proc_dointvec will not try to use a NULL .data field even when the + * length is non-zero. + */ +static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test) +{ + struct ctl_table null_data_table = { + .procname = "foo", + /* + * Here we are testing that proc_dointvec behaves correctly when + * we give it a NULL .data field. Normally this would point to a + * piece of memory where the value would be stored. + */ + .data = NULL, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + /* + * proc_dointvec expects a buffer in user space, so we allocate one. We + * also need to cast it to __user so sparse doesn't get mad. + */ + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + size_t len; + loff_t pos; + + /* + * We don't care what the starting length is since proc_dointvec should + * not try to read because .data is NULL. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, + KUNIT_PROC_READ, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); + + /* + * See above. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, + KUNIT_PROC_WRITE, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Similar to the previous test, we create a struct ctrl_table that has a .data + * field that proc_dointvec cannot do anything with; however, this time it is + * because we tell proc_dointvec that the size is 0. + */ +static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test) +{ + int data = 0; + struct ctl_table data_maxlen_unset_table = { + .procname = "foo", + .data = &data, + /* + * So .data is no longer NULL, but we tell proc_dointvec its + * length is 0, so it still shouldn't try to use it. + */ + .maxlen = 0, + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + size_t len; + loff_t pos; + + /* + * As before, we don't care what buffer length is because proc_dointvec + * cannot do anything because its internal .data buffer has zero length. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, + KUNIT_PROC_READ, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); + + /* + * See previous comment. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, + KUNIT_PROC_WRITE, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Here we provide a valid struct ctl_table, but we try to read and write from + * it using a buffer of zero length, so it should still fail in a similar way as + * before. + */ +static void sysctl_test_api_dointvec_table_len_is_zero(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + /* + * However, now our read/write buffer has zero length. + */ + size_t len = 0; + loff_t pos; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, + &len, &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, buffer, + &len, &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Test that proc_dointvec refuses to read when the file position is non-zero. + */ +static void sysctl_test_api_dointvec_table_read_but_position_set( + struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + /* + * We don't care about our buffer length because we start off with a + * non-zero file position. + */ + size_t len = 1234; + /* + * proc_dointvec should refuse to read into the buffer since the file + * pos is non-zero. + */ + loff_t pos = 1; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, + &len, &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Test that we can read a two digit number in a sufficiently size buffer. + * Nothing fancy. + */ +static void sysctl_test_dointvec_read_happy_single_positive(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t len = 4; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + /* Store 13 in the data field. */ + *((int *)table.data) = 13; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, + user_buffer, &len, &pos)); + KUNIT_ASSERT_EQ(test, (size_t)3, len); + buffer[len] = '\0'; + /* And we read 13 back out. */ + KUNIT_EXPECT_STREQ(test, "13\n", buffer); +} + +/* + * Same as previous test, just now with negative numbers. + */ +static void sysctl_test_dointvec_read_happy_single_negative(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t len = 5; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + *((int *)table.data) = -16; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, + user_buffer, &len, &pos)); + KUNIT_ASSERT_EQ(test, (size_t)4, len); + buffer[len] = '\0'; + KUNIT_EXPECT_STREQ(test, "-16\n", (char *)buffer); +} + +/* + * Test that a simple positive write works. + */ +static void sysctl_test_dointvec_write_happy_single_positive(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + char input[] = "9"; + size_t len = sizeof(input) - 1; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + + memcpy(buffer, input, len); + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); + KUNIT_EXPECT_EQ(test, 9, *((int *)table.data)); +} + +/* + * Same as previous test, but now with negative numbers. + */ +static void sysctl_test_dointvec_write_happy_single_negative(struct kunit *test) +{ + int data = 0; + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + char input[] = "-9"; + size_t len = sizeof(input) - 1; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + + memcpy(buffer, input, len); + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); + KUNIT_EXPECT_EQ(test, -9, *((int *)table.data)); +} + +/* + * Test that writing a value smaller than the minimum possible value is not + * allowed. + */ +static void sysctl_test_api_dointvec_write_single_less_int_min( + struct kunit *test) +{ + int data = 0; + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t max_len = 32, len = max_len; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, max_len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + unsigned long abs_of_less_than_min = (unsigned long)INT_MAX + - (INT_MAX + INT_MIN) + 1; + + /* + * We use this rigmarole to create a string that contains a value one + * less than the minimum accepted value. + */ + KUNIT_ASSERT_LT(test, + (size_t)snprintf(buffer, max_len, "-%lu", + abs_of_less_than_min), + max_len); + + KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_EXPECT_EQ(test, max_len, len); + KUNIT_EXPECT_EQ(test, 0, *((int *)table.data)); +} + +/* + * Test that writing the maximum possible value works. + */ +static void sysctl_test_api_dointvec_write_single_greater_int_max( + struct kunit *test) +{ + int data = 0; + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t max_len = 32, len = max_len; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, max_len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + unsigned long greater_than_max = (unsigned long)INT_MAX + 1; + + KUNIT_ASSERT_GT(test, greater_than_max, (unsigned long)INT_MAX); + KUNIT_ASSERT_LT(test, (size_t)snprintf(buffer, max_len, "%lu", + greater_than_max), + max_len); + KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_ASSERT_EQ(test, max_len, len); + KUNIT_EXPECT_EQ(test, 0, *((int *)table.data)); +} + +static struct kunit_case sysctl_test_cases[] = { + KUNIT_CASE(sysctl_test_api_dointvec_null_tbl_data), + KUNIT_CASE(sysctl_test_api_dointvec_table_maxlen_unset), + KUNIT_CASE(sysctl_test_api_dointvec_table_len_is_zero), + KUNIT_CASE(sysctl_test_api_dointvec_table_read_but_position_set), + KUNIT_CASE(sysctl_test_dointvec_read_happy_single_positive), + KUNIT_CASE(sysctl_test_dointvec_read_happy_single_negative), + KUNIT_CASE(sysctl_test_dointvec_write_happy_single_positive), + KUNIT_CASE(sysctl_test_dointvec_write_happy_single_negative), + KUNIT_CASE(sysctl_test_api_dointvec_write_single_less_int_min), + KUNIT_CASE(sysctl_test_api_dointvec_write_single_greater_int_max), + {} +}; + +static struct kunit_suite sysctl_test_suite = { + .name = "sysctl_test", + .test_cases = sysctl_test_cases, +}; + +kunit_test_suite(sysctl_test_suite); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index fa320c937fa7..01963ced1742 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -121,6 +121,7 @@ extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max; extern int sysctl_nr_trim_pages; #endif +extern int sysctl_qos_mbuf_enable; extern int sysctl_min_epoll_wait_time; extern unsigned int sysctl_softirq_accel_target; extern int sysctl_softirq_accel_mask; @@ -187,6 +188,10 @@ extern int unaligned_dump_stack; extern int no_unaligned_warning; #endif +#ifdef CONFIG_RPS +extern unsigned int sysctl_rps_using_pvipi; +#endif + #ifdef CONFIG_PROC_SYSCTL /** @@ -302,6 +307,7 @@ int accept_info_flag; int sendto_info_flag; int recvfrom_info_flag; int execve_info_flag; +extern int container_cpuquota_aware; /* The default sysctl tables: */ @@ -357,7 +363,49 @@ static int min_extfrag_threshold; static int max_extfrag_threshold = 1000; #endif + +#ifdef CONFIG_BPF_SYSCALL +static int bpf_stats_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct static_key *key = (struct static_key *)table->data; + static int saved_val; + int val, ret; + struct ctl_table tmp = { + .data = &val, + .maxlen = sizeof(val), + .mode = table->mode, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }; + + if (write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + mutex_lock(&bpf_stats_enabled_mutex); + val = saved_val; + ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); + if (write && !ret && val != saved_val) { + if (val) + static_key_slow_inc(key); + else + static_key_slow_dec(key); + saved_val = val; + } + mutex_unlock(&bpf_stats_enabled_mutex); + return ret; +} +#endif + static struct ctl_table kern_table[] = { + { + .procname = "container_cpuquota_aware", + .data = &container_cpuquota_aware, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "connect_info_switch", .data = &connect_info_flag, @@ -409,6 +457,17 @@ static struct ctl_table kern_table[] = { .extra1 = &min_softirq_accel_mask, .extra2 = &max_softirq_accel_mask, }, +#ifdef CONFIG_RPS + { + .procname = "rps_using_pvipi", + .data = &sysctl_rps_using_pvipi, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif #ifdef CONFIG_PID_NS { .procname = "watch_host_pid", @@ -603,6 +662,15 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "cpu_qos_cfs_bw_burst_enabled", + .data = &sysctl_sched_cfs_bw_burst_enabled, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, #endif #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) { @@ -1390,7 +1458,7 @@ static struct ctl_table kern_table[] = { .data = &bpf_stats_enabled_key.key, .maxlen = sizeof(bpf_stats_enabled_key), .mode = 0644, - .proc_handler = proc_do_static_key, + .proc_handler = bpf_stats_handler, }, #endif #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) @@ -1433,6 +1501,15 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "qos_mbuf_enable", + .data = &sysctl_qos_mbuf_enable, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; @@ -1605,6 +1682,24 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .procname = "pagecache_limit_global", + .data = &vm_pagecache_limit_global, + .maxlen = sizeof(vm_pagecache_limit_global), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "pagecache_limit_retry_times", + .data = &vm_pagecache_limit_retry_times, + .maxlen = sizeof(vm_pagecache_limit_retry_times), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &one_hundred, + }, #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", @@ -1754,7 +1849,7 @@ static struct ctl_table vm_table[] = { .data = &block_dump, .maxlen = sizeof(block_dump), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, { @@ -1762,7 +1857,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ @@ -1772,7 +1867,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #endif @@ -1782,7 +1877,7 @@ static struct ctl_table vm_table[] = { .data = &node_reclaim_mode, .maxlen = sizeof(node_reclaim_mode), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, { diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index bbd263836127..7e4c09c14f5a 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -359,6 +359,7 @@ static const struct bin_table bin_net_ipv4_table[] = { { CTL_INT, NET_IPV4_TCP_RETRIES1, "tcp_retries1" }, { CTL_INT, NET_IPV4_TCP_RETRIES2, "tcp_retries2" }, { CTL_INT, NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" }, + { CTL_INT, NET_IPV4_TCP_TW_TIMEOUT, "tcp_tw_timeout" }, { CTL_INT, NET_TCP_SYNCOOKIES, "tcp_syncookies" }, { CTL_INT, NET_TCP_TW_RECYCLE, "tcp_tw_recycle" }, { CTL_INT, NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" }, diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index b97401f6bc23..0e96c38204a8 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -838,9 +838,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, if (flags == TIMER_ABSTIME) return -ERESTARTNOHAND; - restart->fn = alarm_timer_nsleep_restart; restart->nanosleep.clockid = type; restart->nanosleep.expires = exp; + set_restart_fn(restart, alarm_timer_nsleep_restart); return ret; } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 7f31932216a1..1f3e3a17f67e 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -547,8 +547,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, } /* - * Recomputes cpu_base::*next_timer and returns the earliest expires_next but - * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram. + * Recomputes cpu_base::*next_timer and returns the earliest expires_next + * but does not set cpu_base::*expires_next, that is done by + * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating + * cpu_base::*expires_next right away, reprogramming logic would no longer + * work. * * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases, * those timers will get run whenever the softirq gets handled, at the end of @@ -589,6 +592,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_ return expires_next; } +static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base) +{ + ktime_t expires_next, soft = KTIME_MAX; + + /* + * If the soft interrupt has already been activated, ignore the + * soft bases. They will be handled in the already raised soft + * interrupt. + */ + if (!cpu_base->softirq_activated) { + soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); + /* + * Update the soft expiry time. clock_settime() might have + * affected it. + */ + cpu_base->softirq_expires_next = soft; + } + + expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); + /* + * If a softirq timer is expiring first, update cpu_base->next_timer + * and program the hardware with the soft expiry time. + */ + if (expires_next > soft) { + cpu_base->next_timer = cpu_base->softirq_next_timer; + expires_next = soft; + } + + return expires_next; +} + static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) { ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; @@ -629,23 +663,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) { ktime_t expires_next; - /* - * Find the current next expiration time. - */ - expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); - - if (cpu_base->next_timer && cpu_base->next_timer->is_soft) { - /* - * When the softirq is activated, hrtimer has to be - * programmed with the first hard hrtimer because soft - * timer interrupt could occur too late. - */ - if (cpu_base->softirq_activated) - expires_next = __hrtimer_get_next_event(cpu_base, - HRTIMER_ACTIVE_HARD); - else - cpu_base->softirq_expires_next = expires_next; - } + expires_next = hrtimer_update_next_event(cpu_base); if (skip_equal && expires_next == cpu_base->expires_next) return; @@ -1640,8 +1658,8 @@ retry: __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); - /* Reevaluate the clock bases for the next expiry */ - expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); + /* Reevaluate the clock bases for the [soft] next expiry */ + expires_next = hrtimer_update_next_event(cpu_base); /* * Store the new expiry value so the migration code can verify * against it. @@ -1935,9 +1953,9 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp, } restart = ¤t->restart_block; - restart->fn = hrtimer_nanosleep_restart; restart->nanosleep.clockid = t.timer.base->clockid; restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); + set_restart_fn(restart, hrtimer_nanosleep_restart); out: destroy_hrtimer_on_stack(&t.timer); return ret; diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index 77f1e5635cc1..62dc9757118c 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -147,10 +147,6 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, u64 oval, nval, ointerval, ninterval; struct cpu_itimer *it = &tsk->signal->it[clock_id]; - /* - * Use the to_ktime conversion because that clamps the maximum - * value to KTIME_MAX and avoid multiplication overflows. - */ nval = ktime_to_ns(timeval_to_ktime(value->it_value)); ninterval = ktime_to_ns(timeval_to_ktime(value->it_interval)); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 42d512fcfda2..eacb0ca30193 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1335,8 +1335,8 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags, if (flags & TIMER_ABSTIME) return -ERESTARTNOHAND; - restart_block->fn = posix_cpu_nsleep_restart; restart_block->nanosleep.clockid = which_clock; + set_restart_fn(restart_block, posix_cpu_nsleep_restart); } return error; } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 0ec5b7a1d769..97d4a9dcf339 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -1169,8 +1169,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, err = do_clock_adjtime(which_clock, &ktx); - if (err >= 0) - err = put_old_timex32(utp, &ktx); + if (err >= 0 && put_old_timex32(utp, &ktx)) + return -EFAULT; return err; } diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index dbd69052eaa6..a5538dd76a81 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -207,7 +207,8 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) if (sched_clock_timer.function != NULL) { /* update timeout for clock wrap */ - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); + hrtimer_start(&sched_clock_timer, cd.wrap_kt, + HRTIMER_MODE_REL_HARD); } r = rate; @@ -251,9 +252,9 @@ void __init generic_sched_clock_init(void) * Start the timer to keep sched_clock() properly updated and * sets the initial epoch. */ - hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); sched_clock_timer.function = sched_clock_poll; - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); } /* @@ -290,7 +291,7 @@ void sched_clock_resume(void) struct clock_read_data *rd = &cd.read_data[0]; rd->epoch_cyc = cd.actual_read_sched_clock(); - hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); rd->read_sched_clock = cd.actual_read_sched_clock; } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 59225b484e4e..7e5d3524e924 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -11,6 +11,7 @@ #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> +#include <linux/nmi.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> @@ -558,6 +559,7 @@ void tick_unfreeze(void) trace_suspend_resume(TPS("timekeeping_freeze"), smp_processor_id(), false); } else { + touch_softlockup_watchdog(); tick_resume_local(); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 4eac43db8148..cd1aa98b33aa 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -916,13 +916,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) */ if (tick_do_timer_cpu == cpu) return false; - /* - * Boot safety: make sure the timekeeping duty has been - * assigned before entering dyntick-idle mode, - * tick_do_timer_cpu is TICK_DO_TIMER_BOOT - */ - if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT)) - return false; /* Should not happen for nohz-full */ if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index ca69290bee2a..4fc2af4367a7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1005,9 +1005,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) return -EOVERFLOW; tmp *= mult; - rem *= mult; - do_div(rem, div); + rem = div64_u64(rem * mult, div); *base = tmp + rem; return 0; } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 4820823515e9..87fa73cdb90f 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -43,6 +43,7 @@ #include <linux/sched/debug.h> #include <linux/slab.h> #include <linux/compat.h> +#include <linux/random.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -522,8 +523,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk) * Force expire obscene large timeouts to expire at the * capacity limit of the wheel. */ - if (expires >= WHEEL_TIMEOUT_CUTOFF) - expires = WHEEL_TIMEOUT_MAX; + if (delta >= WHEEL_TIMEOUT_CUTOFF) + expires = clk + WHEEL_TIMEOUT_MAX; idx = calc_index(expires, LVL_DEPTH - 1); } @@ -585,7 +586,15 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) * Set the next expiry time and kick the CPU so it can reevaluate the * wheel: */ - base->next_expiry = timer->expires; + if (time_before(timer->expires, base->clk)) { + /* + * Prevent from forward_timer_base() moving the base->clk + * backward + */ + base->next_expiry = base->clk; + } else { + base->next_expiry = timer->expires; + } wake_up_nohz_cpu(base->cpu); } @@ -897,10 +906,13 @@ static inline void forward_timer_base(struct timer_base *base) * If the next expiry value is > jiffies, then we fast forward to * jiffies otherwise we forward to the next expiry value. */ - if (time_after(base->next_expiry, jnow)) + if (time_after(base->next_expiry, jnow)) { base->clk = jnow; - else + } else { + if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) + return; base->clk = base->next_expiry; + } #endif } diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e08527f50d2a..9fa01dad655b 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -371,7 +371,6 @@ config PROFILE_ANNOTATED_BRANCHES config PROFILE_ALL_BRANCHES bool "Profile all if conditionals" if !FORTIFY_SOURCE select TRACE_BRANCH_PROFILING - imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives help This tracer profiles all branch conditions. Every if () taken in the kernel is recorded whether it hit or miss. @@ -479,7 +478,7 @@ config KPROBE_EVENTS config KPROBE_EVENTS_ON_NOTRACE bool "Do NOT protect notrace function from kprobe events" depends on KPROBE_EVENTS - depends on KPROBES_ON_FTRACE + depends on DYNAMIC_FTRACE default n help This is only for the developers who want to debug ftrace itself diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e7e483cdbea6..884333b9fc76 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -3,6 +3,9 @@ * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/blktrace_api.h> @@ -495,6 +498,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, */ strreplace(buts->name, '/', '_'); + /* + * bdev can be NULL, as with scsi-generic, this is a helpful as + * we can be. + */ + if (q->blk_trace) { + pr_warn("Concurrent blktraces are not allowed on %s\n", + buts->name); + return -EBUSY; + } + bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; @@ -508,12 +521,32 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!bt->msg_data) goto err; - ret = -ENOENT; - - dir = debugfs_lookup(buts->name, blk_debugfs_root); - if (!dir) +#ifdef CONFIG_BLK_DEBUG_FS + /* + * When tracing whole make_request drivers (multiqueue) block devices, + * reuse the existing debugfs directory created by the block layer on + * init. For request-based block devices, all partitions block devices, + * and scsi-generic block devices we create a temporary new debugfs + * directory that will be removed once the trace ends. + */ + if (queue_is_mq(q) && bdev && bdev == bdev->bd_contains) + dir = q->debugfs_dir; + else +#endif bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); + /* + * As blktrace relies on debugfs for its interface the debugfs directory + * is required, contrary to the usual mantra of not checking for debugfs + * files or directories. + */ + if (IS_ERR_OR_NULL(dir)) { + pr_warn("debugfs_dir not present for %s so skipping\n", + buts->name); + ret = -ENOENT; + goto err; + } + bt->dev = dev; atomic_set(&bt->dropped, 0); INIT_LIST_HEAD(&bt->running_list); @@ -552,8 +585,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, ret = 0; err: - if (dir && !bt->dir) - dput(dir); if (ret) blk_trace_free(bt); return ret; @@ -999,8 +1030,10 @@ static void blk_add_trace_split(void *ignore, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, - BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), - &rpdu, blk_trace_bio_get_cgid(q, bio)); + BLK_TA_SPLIT, + blk_status_to_errno(bio->bi_status), + sizeof(rpdu), &rpdu, + blk_trace_bio_get_cgid(q, bio)); } rcu_read_unlock(); } @@ -1037,7 +1070,8 @@ static void blk_add_trace_bio_remap(void *ignore, r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, - bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, + bio_op(bio), bio->bi_opf, BLK_TA_REMAP, + blk_status_to_errno(bio->bi_status), sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); rcu_read_unlock(); } @@ -1259,21 +1293,10 @@ static inline __u16 t_error(const struct trace_entry *ent) static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) { - const __u64 *val = pdu_start(ent, has_cg); + const __be64 *val = pdu_start(ent, has_cg); return be64_to_cpu(*val); } -static void get_pdu_remap(const struct trace_entry *ent, - struct blk_io_trace_remap *r, bool has_cg) -{ - const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - __u64 sector_from = __r->sector_from; - - r->device_from = be32_to_cpu(__r->device_from); - r->device_to = be32_to_cpu(__r->device_to); - r->sector_from = be64_to_cpu(sector_from); -} - typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, bool has_cg); @@ -1399,13 +1422,13 @@ static void blk_log_with_error(struct trace_seq *s, static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { - struct blk_io_trace_remap r = { .device_from = 0, }; + const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - get_pdu_remap(ent, &r, has_cg); trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", t_sector(ent), t_sec(ent), - MAJOR(r.device_from), MINOR(r.device_from), - (unsigned long long)r.sector_from); + MAJOR(be32_to_cpu(__r->device_from)), + MINOR(be32_to_cpu(__r->device_from)), + be64_to_cpu(__r->sector_from)); } static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 89bdac61233d..5f91b242e221 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -13,6 +13,7 @@ #include <linux/kprobes.h> #include <linux/syscalls.h> #include <linux/error-injection.h> +#include <linux/btf_ids.h> #include <asm/tlb.h> @@ -138,24 +139,140 @@ static const struct bpf_func_proto bpf_override_return_proto = { }; #endif -BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) +BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, + const void __user *, unsafe_ptr) { - int ret; + int ret = probe_user_read(dst, unsafe_ptr, size); - ret = security_locked_down(LOCKDOWN_BPF_READ); - if (ret < 0) - goto out; - - ret = probe_kernel_read(dst, unsafe_ptr, size); if (unlikely(ret < 0)) -out: memset(dst, 0, size); return ret; } -static const struct bpf_func_proto bpf_probe_read_proto = { - .func = bpf_probe_read, +static const struct bpf_func_proto bpf_probe_read_user_proto = { + .func = bpf_probe_read_user, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, + const void __user *, unsafe_ptr) +{ + int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size); + + if (unlikely(ret < 0)) + memset(dst, 0, size); + + return ret; +} + +static const struct bpf_func_proto bpf_probe_read_user_str_proto = { + .func = bpf_probe_read_user_str, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +static __always_inline int +bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr, + const bool compat) +{ + int ret = security_locked_down(LOCKDOWN_BPF_READ); + + if (unlikely(ret < 0)) + goto out; + ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) : + probe_kernel_read_strict(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) +out: + memset(dst, 0, size); + return ret; +} + +BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false); +} + +static const struct bpf_func_proto bpf_probe_read_kernel_proto = { + .func = bpf_probe_read_kernel, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true); +} + +static const struct bpf_func_proto bpf_probe_read_compat_proto = { + .func = bpf_probe_read_compat, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +static __always_inline int +bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr, + const bool compat) +{ + int ret = security_locked_down(LOCKDOWN_BPF_READ); + + if (unlikely(ret < 0)) + goto out; + /* + * The strncpy_from_unsafe_*() call will likely not fill the entire + * buffer, but that's okay in this circumstance as we're probing + * arbitrary memory anyway similar to bpf_probe_read_*() and might + * as well probe the stack. Thus, memory is explicitly cleared + * only in error case, so that improper users ignoring return + * code altogether don't copy garbage; otherwise length of string + * is returned that can be used for bpf_perf_event_output() et al. + */ + ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) : + strncpy_from_unsafe_strict(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) +out: + memset(dst, 0, size); + return ret; +} + +BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false); +} + +static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { + .func = bpf_probe_read_kernel_str, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true); +} + +static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { + .func = bpf_probe_read_compat_str, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, @@ -201,6 +318,9 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = { static const struct bpf_func_proto *bpf_get_probe_write_proto(void) { + if (!capable(CAP_SYS_ADMIN)) + return NULL; + pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", current->comm, task_pid_nr(current)); @@ -209,7 +329,7 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void) /* * Only limited trace_printk() conversion specifiers allowed: - * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s + * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s */ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, u64, arg2, u64, arg3) @@ -247,6 +367,12 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, i++; } else if (fmt[i] == 'p' || fmt[i] == 's') { mod[fmt_cnt]++; + + if (fmt[i + 1] == 'B') { + i++; + goto fmt_next; + } + /* disallow any further format extensions */ if (fmt[i + 1] != 0 && !isspace(fmt[i + 1]) && @@ -289,6 +415,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') return -EINVAL; +fmt_next: fmt_cnt++; } @@ -343,6 +470,218 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) return &bpf_trace_printk_proto; } +#define MAX_SEQ_PRINTF_VARARGS 12 +#define MAX_SEQ_PRINTF_MAX_MEMCPY 6 +#define MAX_SEQ_PRINTF_STR_LEN 128 + +struct bpf_seq_printf_buf { + char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN]; +}; +static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf); +static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used); + +BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, + const void *, data, u32, data_len) +{ + int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0; + int i, buf_used, copy_size, num_args; + u64 params[MAX_SEQ_PRINTF_VARARGS]; + struct bpf_seq_printf_buf *bufs; + const u64 *args = data; + + buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used); + if (WARN_ON_ONCE(buf_used > 1)) { + err = -EBUSY; + goto out; + } + + bufs = this_cpu_ptr(&bpf_seq_printf_buf); + + /* + * bpf_check()->check_func_arg()->check_stack_boundary() + * guarantees that fmt points to bpf program stack, + * fmt_size bytes of it were initialized and fmt_size > 0 + */ + if (fmt[--fmt_size] != 0) + goto out; + + if (data_len & 7) + goto out; + + for (i = 0; i < fmt_size; i++) { + if (fmt[i] == '%') { + if (fmt[i + 1] == '%') + i++; + else if (!data || !data_len) + goto out; + } + } + + num_args = data_len / 8; + + /* check format string for allowed specifiers */ + for (i = 0; i < fmt_size; i++) { + /* only printable ascii for now. */ + if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { + err = -EINVAL; + goto out; + } + + if (fmt[i] != '%') + continue; + + if (fmt[i + 1] == '%') { + i++; + continue; + } + + if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) { + err = -E2BIG; + goto out; + } + + if (fmt_cnt >= num_args) { + err = -EINVAL; + goto out; + } + + /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ + i++; + + /* skip optional "[0 +-][num]" width formating field */ + while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || + fmt[i] == ' ') + i++; + if (fmt[i] >= '1' && fmt[i] <= '9') { + i++; + while (fmt[i] >= '0' && fmt[i] <= '9') + i++; + } + + if (fmt[i] == 's') { + /* try our best to copy */ + if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { + err = -E2BIG; + goto out; + } + + err = strncpy_from_unsafe(bufs->buf[memcpy_cnt], + (void *) (long) args[fmt_cnt], + MAX_SEQ_PRINTF_STR_LEN); + if (err < 0) + bufs->buf[memcpy_cnt][0] = '\0'; + params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; + + fmt_cnt++; + memcpy_cnt++; + continue; + } + + if (fmt[i] == 'p') { + if (fmt[i + 1] == 0 || + fmt[i + 1] == 'K' || + fmt[i + 1] == 'x' || + fmt[i + 1] == 'B') { + /* just kernel pointers */ + params[fmt_cnt] = args[fmt_cnt]; + fmt_cnt++; + continue; + } + + /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ + if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') { + err = -EINVAL; + goto out; + } + if (fmt[i + 2] != '4' && fmt[i + 2] != '6') { + err = -EINVAL; + goto out; + } + + if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) { + err = -E2BIG; + goto out; + } + + + copy_size = (fmt[i + 2] == '4') ? 4 : 16; + + err = probe_kernel_read(bufs->buf[memcpy_cnt], + (void *) (long) args[fmt_cnt], + copy_size); + if (err < 0) + memset(bufs->buf[memcpy_cnt], 0, copy_size); + params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt]; + + i += 2; + fmt_cnt++; + memcpy_cnt++; + continue; + } + + if (fmt[i] == 'l') { + i++; + if (fmt[i] == 'l') + i++; + } + + if (fmt[i] != 'i' && fmt[i] != 'd' && + fmt[i] != 'u' && fmt[i] != 'x' && + fmt[i] != 'X') { + err = -EINVAL; + goto out; + } + + params[fmt_cnt] = args[fmt_cnt]; + fmt_cnt++; + } + + /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give + * all of them to seq_printf(). + */ + seq_printf(m, fmt, params[0], params[1], params[2], params[3], + params[4], params[5], params[6], params[7], params[8], + params[9], params[10], params[11]); + + err = seq_has_overflowed(m) ? -EOVERFLOW : 0; +out: + this_cpu_dec(bpf_seq_printf_buf_used); + return err; +} + +BTF_ID_LIST(bpf_seq_printf_btf_ids) +BTF_ID(struct, seq_file) + +static const struct bpf_func_proto bpf_seq_printf_proto = { + .func = bpf_seq_printf, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_PTR_TO_MEM_OR_NULL, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + .btf_id = bpf_seq_printf_btf_ids, +}; + +BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) +{ + return seq_write(m, data, len) ? -EOVERFLOW : 0; +} + +BTF_ID_LIST(bpf_seq_write_btf_ids) +BTF_ID(struct, seq_file) + +static const struct bpf_func_proto bpf_seq_write_proto = { + .func = bpf_seq_write, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .btf_id = bpf_seq_write_btf_ids, +}; + static __always_inline int get_map_perf_counter(struct bpf_map *map, u64 flags, u64 *value, u64 *enabled, u64 *running) @@ -583,41 +922,6 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { .arg2_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, - const void *, unsafe_ptr) -{ - int ret; - - ret = security_locked_down(LOCKDOWN_BPF_READ); - if (ret < 0) - goto out; - - /* - * The strncpy_from_unsafe() call will likely not fill the entire - * buffer, but that's okay in this circumstance as we're probing - * arbitrary memory anyway similar to bpf_probe_read() and might - * as well probe the stack. Thus, memory is explicitly cleared - * only in error case, so that improper users ignoring return - * code altogether don't copy garbage; otherwise length of string - * is returned that can be used for bpf_perf_event_output() et al. - */ - ret = strncpy_from_unsafe(dst, unsafe_ptr, size); - if (unlikely(ret < 0)) -out: - memset(dst, 0, size); - - return ret; -} - -static const struct bpf_func_proto bpf_probe_read_str_proto = { - .func = bpf_probe_read_str, - .gpl_only = true, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_UNINIT_MEM, - .arg2_type = ARG_CONST_SIZE_OR_ZERO, - .arg3_type = ARG_ANYTHING, -}; - struct send_signal_irq_work { struct irq_work irq_work; struct task_struct *task; @@ -650,7 +954,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig) if (unlikely(!nmi_uaccess_okay())) return -EPERM; - if (in_nmi()) { + if (irqs_disabled()) { /* Do an early check on signal validity. Otherwise, * the error is lost in deferred irq_work. */ @@ -681,8 +985,8 @@ static const struct bpf_func_proto bpf_send_signal_proto = { .arg1_type = ARG_ANYTHING, }; -static const struct bpf_func_proto * -tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +const struct bpf_func_proto * +bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_map_lookup_elem: @@ -697,8 +1001,6 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_map_pop_elem_proto; case BPF_FUNC_map_peek_elem: return &bpf_map_peek_elem_proto; - case BPF_FUNC_probe_read: - return &bpf_probe_read_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; case BPF_FUNC_tail_call: @@ -725,16 +1027,40 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_current_task_under_cgroup_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; + case BPF_FUNC_probe_read_user: + return &bpf_probe_read_user_proto; + case BPF_FUNC_probe_read_kernel: + return &bpf_probe_read_kernel_proto; + case BPF_FUNC_probe_read: + return &bpf_probe_read_compat_proto; + case BPF_FUNC_probe_read_user_str: + return &bpf_probe_read_user_str_proto; + case BPF_FUNC_probe_read_kernel_str: + return &bpf_probe_read_kernel_str_proto; case BPF_FUNC_probe_read_str: - return &bpf_probe_read_str_proto; + return &bpf_probe_read_compat_str_proto; #ifdef CONFIG_CGROUPS case BPF_FUNC_get_current_cgroup_id: return &bpf_get_current_cgroup_id_proto; #endif case BPF_FUNC_send_signal: return &bpf_send_signal_proto; + case BPF_FUNC_jiffies64: + return &bpf_jiffies64_proto; + case BPF_FUNC_ringbuf_output: + return &bpf_ringbuf_output_proto; + case BPF_FUNC_ringbuf_reserve: + return &bpf_ringbuf_reserve_proto; + case BPF_FUNC_ringbuf_submit: + return &bpf_ringbuf_submit_proto; + case BPF_FUNC_ringbuf_discard: + return &bpf_ringbuf_discard_proto; + case BPF_FUNC_ringbuf_query: + return &bpf_ringbuf_query_proto; + case BPF_FUNC_for_each_map_elem: + return &bpf_for_each_map_elem_proto; default: - return NULL; + return bpf_base_func_proto(func_id); } } @@ -755,7 +1081,7 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_override_return_proto; #endif default: - return tracing_func_proto(func_id, prog); + return bpf_tracing_func_proto(func_id, prog); } } @@ -865,7 +1191,7 @@ tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_stack: return &bpf_get_stack_proto_tp; default: - return tracing_func_proto(func_id, prog); + return bpf_tracing_func_proto(func_id, prog); } } @@ -931,7 +1257,7 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_perf_prog_read_value: return &bpf_perf_prog_read_value_proto; default: - return tracing_func_proto(func_id, prog); + return bpf_tracing_func_proto(func_id, prog); } } @@ -993,6 +1319,9 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; +extern const struct bpf_func_proto bpf_skb_output_proto; +extern const struct bpf_func_proto bpf_xdp_output_proto; + BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, struct bpf_map *, map, u64, flags) { @@ -1051,12 +1380,49 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_perf_event_output_proto_raw_tp; + case BPF_FUNC_skb_output: + return &bpf_skb_output_proto; case BPF_FUNC_get_stackid: return &bpf_get_stackid_proto_raw_tp; case BPF_FUNC_get_stack: return &bpf_get_stack_proto_raw_tp; default: - return tracing_func_proto(func_id, prog); + return bpf_tracing_func_proto(func_id, prog); + } +} + +const struct bpf_func_proto * +tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { +#ifdef CONFIG_NET + case BPF_FUNC_skb_output: + return &bpf_skb_output_proto; + case BPF_FUNC_xdp_output: + return &bpf_xdp_output_proto; + case BPF_FUNC_skc_to_tcp6_sock: + return &bpf_skc_to_tcp6_sock_proto; + case BPF_FUNC_skc_to_tcp_sock: + return &bpf_skc_to_tcp_sock_proto; + case BPF_FUNC_skc_to_tcp_timewait_sock: + return &bpf_skc_to_tcp_timewait_sock_proto; + case BPF_FUNC_skc_to_tcp_request_sock: + return &bpf_skc_to_tcp_request_sock_proto; + case BPF_FUNC_skc_to_udp6_sock: + return &bpf_skc_to_udp6_sock_proto; + case BPF_FUNC_get_task_stack: + return &bpf_get_task_stack_proto; +#endif + case BPF_FUNC_seq_printf: + return prog->expected_attach_type == BPF_TRACE_ITER ? + &bpf_seq_printf_proto : + NULL; + case BPF_FUNC_seq_write: + return prog->expected_attach_type == BPF_TRACE_ITER ? + &bpf_seq_write_proto : + NULL; + default: + return raw_tp_prog_func_proto(func_id, prog); } } @@ -1065,8 +1431,7 @@ static bool raw_tp_prog_is_valid_access(int off, int size, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - /* largest tracepoint in the kernel has 12 args */ - if (off < 0 || off >= sizeof(__u64) * 12) + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) return false; if (type != BPF_READ) return false; @@ -1075,6 +1440,20 @@ static bool raw_tp_prog_is_valid_access(int off, int size, return true; } +static bool tracing_prog_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + return btf_ctx_access(off, size, type, prog, info); +} + const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { .get_func_proto = raw_tp_prog_func_proto, .is_valid_access = raw_tp_prog_is_valid_access, @@ -1083,6 +1462,15 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { const struct bpf_prog_ops raw_tracepoint_prog_ops = { }; +const struct bpf_verifier_ops tracing_verifier_ops = { + .get_func_proto = tracing_prog_func_proto, + .is_valid_access = tracing_prog_is_valid_access, +}; + +const struct bpf_prog_ops tracing_prog_ops = { + .test_run = bpf_prog_test_run_tracing, +}; + static bool raw_tp_writable_prog_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, @@ -1320,10 +1708,12 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) { - struct module *mod = __module_address((unsigned long)btp); + struct module *mod; - if (mod) - module_put(mod); + preempt_disable(); + mod = __module_address((unsigned long)btp); + module_put(mod); + preempt_enable(); } static __always_inline diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 7950a0356042..888cd00174fe 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -367,7 +367,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) } if (t->ret_stack == NULL) { - atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->curr_ret_stack = -1; t->curr_ret_depth = -1; @@ -462,7 +461,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); static void graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) { - atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->ftrace_timestamp = 0; /* make curr_ret_stack visible before we add the ret_stack */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9495d69460b2..4642f3e3b4bb 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1626,6 +1626,8 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) static struct ftrace_ops * ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); static struct ftrace_ops * +ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); +static struct ftrace_ops * ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, @@ -1763,7 +1765,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, * to it. */ if (ftrace_rec_count(rec) == 1 && - ftrace_find_tramp_ops_any(rec)) + ftrace_find_tramp_ops_any_other(rec, ops)) rec->flags |= FTRACE_FL_TRAMP; else rec->flags &= ~FTRACE_FL_TRAMP; @@ -2191,6 +2193,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) return NULL; } +static struct ftrace_ops * +ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) +{ + struct ftrace_ops *op; + unsigned long ip = rec->ip; + + do_for_each_ftrace_op(op, ftrace_ops_list) { + + if (op == op_exclude || !op->trampoline) + continue; + + if (hash_contains_ip(ip, op->func_hash)) + return op; + } while_for_each_ftrace_op(op); + + return NULL; +} + static struct ftrace_ops * ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *op) @@ -5033,7 +5053,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file) parser = &iter->parser; if (trace_parser_loaded(parser)) { - ftrace_match_records(iter->hash, parser->buffer, parser->idx); + int enable = !(iter->flags & FTRACE_ITER_NOTRACE); + + ftrace_process_regex(iter, parser->buffer, + parser->idx, enable); } trace_parser_put(parser); @@ -5699,8 +5722,11 @@ static int referenced_filters(struct dyn_ftrace *rec) int cnt = 0; for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { - if (ops_references_rec(ops, rec)) - cnt++; + if (ops_references_rec(ops, rec)) { + cnt++; + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) + rec->flags |= FTRACE_FL_REGS; + } } return cnt; @@ -5877,8 +5903,8 @@ void ftrace_module_enable(struct module *mod) if (ftrace_start_up) cnt += referenced_filters(rec); - /* This clears FTRACE_FL_DISABLED */ - rec->flags = cnt; + rec->flags &= ~FTRACE_FL_DISABLED; + rec->flags += cnt; if (ftrace_start_up && cnt) { int failed = __ftrace_replace_code(rec, 1); @@ -6379,16 +6405,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, { int bit; - if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) - return; - bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; preempt_disable_notrace(); - op->func(ip, parent_ip, op, regs); + if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) + op->func(ip, parent_ip, op, regs); preempt_enable_notrace(); trace_clear_recursion(bit); @@ -6459,12 +6483,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } else { unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 4bf050fcfe3b..728374166653 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -129,7 +129,16 @@ int ring_buffer_print_entry_header(struct trace_seq *s) #define RB_ALIGNMENT 4U #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ -#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT) + +#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS +# define RB_FORCE_8BYTE_ALIGNMENT 0 +# define RB_ARCH_ALIGNMENT RB_ALIGNMENT +#else +# define RB_FORCE_8BYTE_ALIGNMENT 1 +# define RB_ARCH_ALIGNMENT 8U +#endif + +#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX @@ -422,14 +431,16 @@ struct rb_event_info { /* * Used for which event context the event is in. - * NMI = 0 - * IRQ = 1 - * SOFTIRQ = 2 - * NORMAL = 3 + * TRANSITION = 0 + * NMI = 1 + * IRQ = 2 + * SOFTIRQ = 3 + * NORMAL = 4 * * See trace_recursive_lock() comment below for more details. */ enum { + RB_CTX_TRANSITION, RB_CTX_NMI, RB_CTX_IRQ, RB_CTX_SOFTIRQ, @@ -1717,18 +1728,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, { struct ring_buffer_per_cpu *cpu_buffer; unsigned long nr_pages; - int cpu, err = 0; + int cpu, err; /* * Always succeed at resizing a non-existent buffer: */ if (!buffer) - return size; + return 0; /* Make sure the requested buffer exists */ if (cpu_id != RING_BUFFER_ALL_CPUS && !cpumask_test_cpu(cpu_id, buffer->cpumask)) - return size; + return 0; nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); @@ -1868,7 +1879,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, } mutex_unlock(&buffer->mutex); - return size; + return 0; out_err: for_each_buffer_cpu(buffer, cpu) { @@ -2358,14 +2369,14 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, if (unlikely(info->add_timestamp)) { bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer); - event = rb_add_time_stamp(event, info->delta, abs); + event = rb_add_time_stamp(event, abs ? info->delta : delta, abs); length -= RB_LEN_TIME_EXTEND; delta = 0; } event->time_delta = delta; length -= RB_EVNT_HDR_SIZE; - if (length > RB_MAX_SMALL_DATA) { + if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { event->type_len = 0; event->array[0] = length; } else @@ -2380,11 +2391,11 @@ static unsigned rb_calculate_event_length(unsigned length) if (!length) length++; - if (length > RB_MAX_SMALL_DATA) + if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) length += sizeof(event.array[0]); length += RB_EVNT_HDR_SIZE; - length = ALIGN(length, RB_ALIGNMENT); + length = ALIGN(length, RB_ARCH_ALIGNMENT); /* * In case the time delta is larger than the 27 bits for it @@ -2660,10 +2671,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) * a bit of overhead in something as critical as function tracing, * we use a bitmask trick. * - * bit 0 = NMI context - * bit 1 = IRQ context - * bit 2 = SoftIRQ context - * bit 3 = normal context. + * bit 1 = NMI context + * bit 2 = IRQ context + * bit 3 = SoftIRQ context + * bit 4 = normal context. * * This works because this is the order of contexts that can * preempt other contexts. A SoftIRQ never preempts an IRQ @@ -2686,6 +2697,30 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) * The least significant bit can be cleared this way, and it * just so happens that it is the same bit corresponding to * the current context. + * + * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit + * is set when a recursion is detected at the current context, and if + * the TRANSITION bit is already set, it will fail the recursion. + * This is needed because there's a lag between the changing of + * interrupt context and updating the preempt count. In this case, + * a false positive will be found. To handle this, one extra recursion + * is allowed, and this is done by the TRANSITION bit. If the TRANSITION + * bit is already set, then it is considered a recursion and the function + * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. + * + * On the trace_recursive_unlock(), the TRANSITION bit will be the first + * to be cleared. Even if it wasn't the context that set it. That is, + * if an interrupt comes in while NORMAL bit is set and the ring buffer + * is called before preempt_count() is updated, since the check will + * be on the NORMAL bit, the TRANSITION bit will then be set. If an + * NMI then comes in, it will set the NMI bit, but when the NMI code + * does the trace_recursive_unlock() it will clear the TRANSTION bit + * and leave the NMI bit set. But this is fine, because the interrupt + * code that set the TRANSITION bit will then clear the NMI bit when it + * calls trace_recursive_unlock(). If another NMI comes in, it will + * set the TRANSITION bit and continue. + * + * Note: The TRANSITION bit only handles a single transition between context. */ static __always_inline int @@ -2701,8 +2736,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) bit = pc & NMI_MASK ? RB_CTX_NMI : pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ; - if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) - return 1; + if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { + /* + * It is possible that this was called by transitioning + * between interrupt context, and preempt_count() has not + * been updated yet. In this case, use the TRANSITION bit. + */ + bit = RB_CTX_TRANSITION; + if (val & (1 << (bit + cpu_buffer->nest))) + return 1; + } val |= (1 << (bit + cpu_buffer->nest)); cpu_buffer->current_context = val; @@ -2717,8 +2760,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->current_context - (1 << cpu_buffer->nest); } -/* The recursive locking above uses 4 bits */ -#define NESTED_BITS 4 +/* The recursive locking above uses 5 bits */ +#define NESTED_BITS 5 /** * ring_buffer_nest_start - Allow to trace while nested @@ -4405,6 +4448,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (!cpumask_test_cpu(cpu, buffer->cpumask)) return; + /* prevent another thread from changing buffer sizes */ + mutex_lock(&buffer->mutex); atomic_inc(&buffer->resize_disabled); atomic_inc(&cpu_buffer->record_disabled); @@ -4428,6 +4473,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&buffer->resize_disabled); + + mutex_unlock(&buffer->mutex); } EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c6ccaf6c62f7..5eaa46fe9ed6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -160,7 +160,8 @@ static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); -static void ftrace_trace_userstack(struct ring_buffer *buffer, +static void ftrace_trace_userstack(struct trace_array *tr, + struct ring_buffer *buffer, unsigned long flags, int pc); #define MAX_TRACER_SIZE 100 @@ -2139,14 +2140,13 @@ static void tracing_stop_tr(struct trace_array *tr) static int trace_save_cmdline(struct task_struct *tsk) { - unsigned pid, idx; + unsigned tpid, idx; /* treat recording of idle task as a success */ if (!tsk->pid) return 1; - if (unlikely(tsk->pid > PID_MAX_DEFAULT)) - return 0; + tpid = tsk->pid & (PID_MAX_DEFAULT - 1); /* * It's not the end of the world if we don't get @@ -2157,26 +2157,15 @@ static int trace_save_cmdline(struct task_struct *tsk) if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; - idx = savedcmd->map_pid_to_cmdline[tsk->pid]; + idx = savedcmd->map_pid_to_cmdline[tpid]; if (idx == NO_CMDLINE_MAP) { idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; - /* - * Check whether the cmdline buffer at idx has a pid - * mapped. We are going to overwrite that entry so we - * need to clear the map_pid_to_cmdline. Otherwise we - * would read the new comm for the old pid. - */ - pid = savedcmd->map_cmdline_to_pid[idx]; - if (pid != NO_CMDLINE_MAP) - savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; - - savedcmd->map_cmdline_to_pid[idx] = tsk->pid; - savedcmd->map_pid_to_cmdline[tsk->pid] = idx; - + savedcmd->map_pid_to_cmdline[tpid] = idx; savedcmd->cmdline_idx = idx; } + savedcmd->map_cmdline_to_pid[idx] = tsk->pid; set_cmdline(idx, tsk->comm); arch_spin_unlock(&trace_cmdline_lock); @@ -2187,6 +2176,7 @@ static int trace_save_cmdline(struct task_struct *tsk) static void __trace_find_cmdline(int pid, char comm[]) { unsigned map; + int tpid; if (!pid) { strcpy(comm, "<idle>"); @@ -2198,16 +2188,16 @@ static void __trace_find_cmdline(int pid, char comm[]) return; } - if (pid > PID_MAX_DEFAULT) { - strcpy(comm, "<...>"); - return; + tpid = pid & (PID_MAX_DEFAULT - 1); + map = savedcmd->map_pid_to_cmdline[tpid]; + if (map != NO_CMDLINE_MAP) { + tpid = savedcmd->map_cmdline_to_pid[map]; + if (tpid == pid) { + strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); + return; + } } - - map = savedcmd->map_pid_to_cmdline[pid]; - if (map != NO_CMDLINE_MAP) - strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); - else - strcpy(comm, "<...>"); + strcpy(comm, "<...>"); } void trace_find_cmdline(int pid, char comm[]) @@ -2497,7 +2487,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, (entry = this_cpu_read(trace_buffered_event))) { /* Try to use the per cpu buffer first */ val = this_cpu_inc_return(trace_buffered_event_cnt); - if (val == 1) { + if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) { trace_event_setup(entry, type, flags, pc); entry->array[0] = len; return entry; @@ -2510,7 +2500,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer - * to store the trace event for the tigger to use. It's recusive + * to store the trace event for the trigger to use. It's recursive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { @@ -2621,7 +2611,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); - ftrace_trace_userstack(buffer, flags, pc); + ftrace_trace_userstack(tr, buffer, flags, pc); } /* @@ -2832,7 +2822,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; /* This should never happen. If it does, yell once and skip */ - if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING)) + if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) goto out; /* @@ -2856,7 +2846,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, size = nr_entries * sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, - sizeof(*entry) + size, flags, pc); + (sizeof(*entry) - sizeof(entry->caller)) + size, + flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); @@ -2936,13 +2927,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); static void -ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) +ftrace_trace_userstack(struct trace_array *tr, + struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; - if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) + if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* @@ -2981,7 +2973,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) preempt_enable(); } #else /* CONFIG_USER_STACKTRACE_SUPPORT */ -static void ftrace_trace_userstack(struct ring_buffer *buffer, +static void ftrace_trace_userstack(struct trace_array *tr, + struct ring_buffer *buffer, unsigned long flags, int pc) { } @@ -3012,7 +3005,7 @@ static char *get_trace_buf(void) /* Interrupts must see nesting incremented before we use the buffer */ barrier(); - return &buffer->buffer[buffer->nesting][0]; + return &buffer->buffer[buffer->nesting - 1][0]; } static void put_trace_buf(void) @@ -3233,6 +3226,9 @@ int trace_array_printk(struct trace_array *tr, if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; + if (!tr) + return -ENOENT; + va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); @@ -3581,14 +3577,14 @@ unsigned long trace_total_entries(struct trace_array *tr) static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n" - "# / _-----=> irqs-off \n" - "# | / _----=> need-resched \n" - "# || / _---=> hardirq/softirq \n" - "# ||| / _--=> preempt-depth \n" - "# |||| / delay \n" - "# cmd pid ||||| time | caller \n" - "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# _------=> CPU# \n" + "# / _-----=> irqs-off \n" + "# | / _----=> need-resched \n" + "# || / _---=> hardirq/softirq \n" + "# ||| / _--=> preempt-depth \n" + "# |||| / delay \n" + "# cmd pid ||||| time | caller \n" + "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) @@ -3609,26 +3605,26 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, print_event_info(buf, m); - seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); - seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); + seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); + seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; - const char *space = " "; - int prec = tgid ? 10 : 2; + const char *space = " "; + int prec = tgid ? 12 : 2; print_event_info(buf, m); - seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); - seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); - seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); - seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); - seq_printf(m, "# %.*s||| / delay\n", prec, space); - seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); - seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); + seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); + seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); + seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); + seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); + seq_printf(m, "# %.*s||| / delay\n", prec, space); + seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); + seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); } void @@ -5686,7 +5682,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) } /* If trace pipe files are being read, we can't change the tracer */ - if (tr->current_trace->ref) { + if (tr->trace_ref) { ret = -EBUSY; goto out; } @@ -5902,7 +5898,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) nonseekable_open(inode, filp); - tr->current_trace->ref++; + tr->trace_ref++; out: mutex_unlock(&trace_types_lock); return ret; @@ -5921,7 +5917,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) mutex_lock(&trace_types_lock); - tr->current_trace->ref--; + tr->trace_ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); @@ -7230,7 +7226,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) filp->private_data = info; - tr->current_trace->ref++; + tr->trace_ref++; mutex_unlock(&trace_types_lock); @@ -7331,7 +7327,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) mutex_lock(&trace_types_lock); - iter->tr->current_trace->ref--; + iter->tr->trace_ref--; __trace_array_put(iter->tr); @@ -8318,6 +8314,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) */ allocate_snapshot = false; #endif + + /* + * Because of some magic with the way alloc_percpu() works on + * x86_64, we need to synchronize the pgd of all the tables, + * otherwise the trace events that happen in x86_64 page fault + * handlers can't cope with accessing the chance that a + * alloc_percpu()'d memory might be touched in the page fault trace + * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() + * calls in tracing, because something might get triggered within a + * page fault trace event! + */ + vmalloc_sync_mappings(); + return 0; } @@ -8457,7 +8466,7 @@ static int __remove_instance(struct trace_array *tr) { int i; - if (tr->ref || (tr->current_trace && tr->current_trace->ref)) + if (tr->ref || (tr->current_trace && tr->trace_ref)) return -EBUSY; list_del(&tr->list); @@ -8489,17 +8498,26 @@ static int __remove_instance(struct trace_array *tr) return 0; } -int trace_array_destroy(struct trace_array *tr) +int trace_array_destroy(struct trace_array *this_tr) { + struct trace_array *tr; int ret; - if (!tr) + if (!this_tr) return -EINVAL; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); - ret = __remove_instance(tr); + ret = -ENODEV; + + /* Making sure trace array exists before destroying it. */ + list_for_each_entry(tr, &ftrace_trace_arrays, list) { + if (tr == this_tr) { + ret = __remove_instance(tr); + break; + } + } mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); @@ -9121,7 +9139,7 @@ __init static int tracer_alloc_buffers(void) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ - if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) + if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a3c29d5fcc61..fc3aa81a43e3 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -309,6 +309,7 @@ struct trace_array { struct trace_event_file *trace_marker_file; cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ int ref; + int trace_ref; #ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops *ops; struct trace_pid_list __rcu *function_pids; @@ -498,7 +499,6 @@ struct tracer { struct tracer *next; struct tracer_flags *flags; int enabled; - int ref; bool print_max; bool allow_instances; #ifdef CONFIG_TRACER_MAX_TRACE @@ -592,6 +592,12 @@ enum { * function is called to clear it. */ TRACE_GRAPH_NOTRACE_BIT, + + /* + * When transitioning between context, the preempt_count() may + * not be correct. Allow for a single recursion to cover this case. + */ + TRACE_TRANSITION_BIT, }; #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) @@ -646,14 +652,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max) return 0; bit = trace_get_context_bit() + start; - if (unlikely(val & (1 << bit))) - return -1; + if (unlikely(val & (1 << bit))) { + /* + * It could be that preempt_count has not been updated during + * a switch between contexts. Allow for a single recursion. + */ + bit = TRACE_TRANSITION_BIT; + if (trace_recursion_test(bit)) + return -1; + trace_recursion_set(bit); + barrier(); + return bit + 1; + } + + /* Normal check passed, clear the transition to allow it again */ + trace_recursion_clear(TRACE_TRANSITION_BIT); val |= 1 << bit; current->trace_recursion = val; barrier(); - return bit; + return bit + 1; } static __always_inline void trace_clear_recursion(int bit) @@ -663,6 +682,7 @@ static __always_inline void trace_clear_recursion(int bit) if (!bit) return; + bit--; bit = 1 << bit; val &= ~bit; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index aaf6793ededa..c1637f90c8a3 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; - u64 now; + u64 now, prev_time; raw_local_irq_save(flags); this_cpu = raw_smp_processor_id(); - now = sched_clock_cpu(this_cpu); + /* - * If in an NMI context then dont risk lockups and return the - * cpu_clock() time: + * The global clock "guarantees" that the events are ordered + * between CPUs. But if two events on two different CPUS call + * trace_clock_global at roughly the same time, it really does + * not matter which one gets the earlier time. Just make sure + * that the same CPU will always show a monotonic clock. + * + * Use a read memory barrier to get the latest written + * time that was recorded. + */ + smp_rmb(); + prev_time = READ_ONCE(trace_clock_struct.prev_time); + now = sched_clock_cpu(this_cpu); + + /* Make sure that now is always greater than prev_time */ + if ((s64)(now - prev_time) < 0) + now = prev_time + 1; + + /* + * If in an NMI context then dont risk lockups and simply return + * the current time. */ if (unlikely(in_nmi())) goto out; - arch_spin_lock(&trace_clock_struct.lock); + /* Tracing can cause strange recursion, always use a try lock */ + if (arch_spin_trylock(&trace_clock_struct.lock)) { + /* Reread prev_time in case it was already updated */ + prev_time = READ_ONCE(trace_clock_struct.prev_time); + if ((s64)(now - prev_time) < 0) + now = prev_time + 1; - /* - * TODO: if this happens often then maybe we should reset - * my_scd->clock to prev_time+1, to make sure - * we start ticking with the local clock from now on? - */ - if ((s64)(now - trace_clock_struct.prev_time) < 0) - now = trace_clock_struct.prev_time + 1; - - trace_clock_struct.prev_time = now; - - arch_spin_unlock(&trace_clock_struct.lock); + trace_clock_struct.prev_time = now; + /* The unlock acts as the wmb for the above rmb */ + arch_spin_unlock(&trace_clock_struct.lock); + } out: raw_local_irq_restore(flags); diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index fc8e97328e54..78c146efb862 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -174,7 +174,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry, F_STRUCT( __field( int, size ) - __dynamic_array(unsigned long, caller ) + __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) ), F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n" diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 995061bb2dec..e31ee325dad1 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -527,12 +527,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, tr, INT_MIN); - register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit, + register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, tr, INT_MAX); } else { unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, tr); - unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit, + unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, tr); } } @@ -793,6 +793,8 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) char *event = NULL, *sub = NULL, *match; int ret; + if (!tr) + return -ENOENT; /* * The buf format can be <subsystem>:<event-name> * *:<event-name> means any event by that name. @@ -1105,7 +1107,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, mutex_lock(&event_mutex); list_for_each_entry(file, &tr->events, list) { call = file->event_call; - if (!trace_event_name(call) || !call->class || !call->class->reg) + if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || + !trace_event_name(call) || !call->class || !call->class->reg) continue; if (system && strcmp(call->class->system, system->name) != 0) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 6495800fb92a..a616b314fb7a 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2466,6 +2466,9 @@ static void __destroy_hist_field(struct hist_field *hist_field) kfree(hist_field->name); kfree(hist_field->type); + kfree(hist_field->system); + kfree(hist_field->event_name); + kfree(hist_field); } @@ -3528,6 +3531,7 @@ static struct hist_field *create_var(struct hist_trigger_data *hist_data, goto out; } + var->ref = 1; var->flags = HIST_FIELD_FL_VAR; var->var.idx = idx; var->var.hist_data = var->hist_data = hist_data; @@ -4157,6 +4161,9 @@ static void destroy_field_vars(struct hist_trigger_data *hist_data) for (i = 0; i < hist_data->n_field_vars; i++) destroy_field_var(hist_data->field_vars[i]); + + for (i = 0; i < hist_data->n_save_vars; i++) + destroy_field_var(hist_data->save_vars[i]); } static void save_field_var(struct hist_trigger_data *hist_data, @@ -4763,7 +4770,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data) s = kstrdup(field_str, GFP_KERNEL); if (!s) { - kfree(hist_data->attrs->var_defs.name[n_vars]); ret = -ENOMEM; goto free; } diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 287d77eae59b..e913d41a4194 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) static int trigger_process_regex(struct trace_event_file *file, char *buff) { - char *command, *next = buff; + char *command, *next; struct event_command *p; int ret = -EINVAL; + next = buff = skip_spaces(buff); command = strsep(&next, ": \t"); + if (next) { + next = skip_spaces(next); + if (!*next) + next = NULL; + } command = (command[0] != '!') ? command : command + 1; mutex_lock(&trigger_cmd_mutex); @@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops, int ret; /* separate the trigger from the filter (t:n [if filter]) */ - if (param && isdigit(param[0])) + if (param && isdigit(param[0])) { trigger = strsep(¶m, " \t"); + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } + } trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); @@ -1088,14 +1100,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *data, struct trace_event_file *file) { - int ret = register_trigger(glob, ops, data, file); + if (tracing_alloc_snapshot_instance(file->tr) != 0) + return 0; - if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { - unregister_trigger(glob, ops, data, file); - ret = 0; - } - - return ret; + return register_trigger(glob, ops, data, file); } static int @@ -1372,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops, trigger = strsep(¶m, " \t"); if (!trigger) return -EINVAL; + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } system = strsep(&trigger, ":"); if (!trigger) diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 862f4b0139fc..164e5c618cce 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -270,6 +270,7 @@ static bool disable_migrate; static void move_to_next_cpu(void) { struct cpumask *current_mask = &save_cpumask; + struct trace_array *tr = hwlat_trace; int next_cpu; if (disable_migrate) @@ -283,7 +284,7 @@ static void move_to_next_cpu(void) goto disable; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); next_cpu = cpumask_next(smp_processor_id(), current_mask); put_online_cpus(); @@ -354,13 +355,13 @@ static int start_kthread(struct trace_array *tr) struct task_struct *kthread; int next_cpu; - if (WARN_ON(hwlat_kthread)) + if (hwlat_kthread) return 0; /* Just pick the first CPU on first iteration */ current_mask = &save_cpumask; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); put_online_cpus(); next_cpu = cpumask_first(current_mask); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3f54dc2f6e1c..233322c77b76 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -220,9 +220,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call) { struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); - return tk ? kprobe_on_func_entry(tk->rp.kp.addr, + return tk ? (kprobe_on_func_entry(tk->rp.kp.addr, tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, - tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false; + tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false; } bool trace_kprobe_error_injectable(struct trace_event_call *call) @@ -433,7 +433,7 @@ static int disable_trace_kprobe(struct trace_event_call *call, return 0; } -#if defined(CONFIG_KPROBES_ON_FTRACE) && \ +#if defined(CONFIG_DYNAMIC_FTRACE) && \ !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE) static bool __within_notrace_func(unsigned long addr) { @@ -454,7 +454,7 @@ static bool __within_notrace_func(unsigned long addr) static bool within_notrace_func(struct trace_kprobe *tk) { - unsigned long addr = addr = trace_kprobe_address(tk); + unsigned long addr = trace_kprobe_address(tk); char symname[KSYM_NAME_LEN], *p; if (!__within_notrace_func(addr)) @@ -811,9 +811,11 @@ static int trace_kprobe_create(int argc, const char *argv[]) trace_probe_log_err(0, BAD_PROBE_ADDR); goto parse_error; } - if (kprobe_on_func_entry(NULL, symbol, offset)) + ret = kprobe_on_func_entry(NULL, symbol, offset); + if (ret == 0) flags |= TPARG_FL_FENTRY; - if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { + /* Defer the ENOENT case until register kprobe */ + if (ret == -EINVAL && is_return) { trace_probe_log_err(0, BAD_RETPROBE); goto parse_error; } @@ -918,6 +920,8 @@ static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev) int i; seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); + if (trace_kprobe_is_return(tk) && tk->rp.maxactive) + seq_printf(m, "%d", tk->rp.maxactive); seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp), trace_probe_name(&tk->tp)); @@ -1462,7 +1466,7 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, if (perf_type_tracepoint) tk = find_trace_kprobe(pevent, group); else - tk = event->tp_event->data; + tk = trace_kprobe_primary_from_call(event->tp_event); if (!tk) return -EINVAL; diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index d54ce252b05a..a0a45901dc02 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -482,7 +482,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) trace_find_cmdline(entry->pid, comm); - trace_seq_printf(s, "%8.8s-%-5d %3d", + trace_seq_printf(s, "%8.8s-%-7d %3d", comm, entry->pid, cpu); return trace_print_lat_fmt(s, entry); @@ -573,15 +573,15 @@ int trace_print_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); - trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); + trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid); if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { unsigned int tgid = trace_find_tgid(entry->pid); if (!tgid) - trace_seq_printf(s, "(-----) "); + trace_seq_printf(s, "(-------) "); else - trace_seq_printf(s, "(%5d) ", tgid); + trace_seq_printf(s, "(%7d) ", tgid); } trace_seq_printf(s, "[%03d] ", iter->cpu); @@ -624,7 +624,7 @@ int trace_print_lat_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); trace_seq_printf( - s, "%16s %5d %3d %d %08x %08lx ", + s, "%16s %7d %3d %d %08x %08lx ", comm, entry->pid, iter->cpu, entry->flags, entry->preempt_count, iter->idx); } else { @@ -905,7 +905,7 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, S = task_index_to_char(field->prev_state); trace_find_cmdline(field->next_pid, comm); trace_seq_printf(&iter->seq, - " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", + " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n", field->prev_pid, field->prev_prio, S, delim, diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c index 4d8e99fdbbbe..26b06b09c9f6 100644 --- a/kernel/trace/trace_preemptirq.c +++ b/kernel/trace/trace_preemptirq.c @@ -63,14 +63,14 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_caller); __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { + lockdep_hardirqs_off(CALLER_ADDR0); + if (!this_cpu_read(tracing_irq_cpu)) { this_cpu_write(tracing_irq_cpu, 1); tracer_hardirqs_off(CALLER_ADDR0, caller_addr); if (!in_nmi()) trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); } - - lockdep_hardirqs_off(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_off_caller); NOKPROBE_SYMBOL(trace_hardirqs_off_caller); diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index ab8b6436d53f..f98d6d94cbbf 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, ret = -EINVAL; goto fail; } - if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) || - parg->count) { + if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || + code->op == FETCH_OP_DATA) || parg->count) { /* * IMM, DATA and COMM is pointing actual address, those * must be kept, and if parg->count != 0, this is an diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 69ee8ef12cee..0838c290ac7f 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -492,8 +492,13 @@ trace_selftest_function_recursion(void) unregister_ftrace_function(&test_rec_probe); ret = -1; - if (trace_selftest_recursion_cnt != 1) { - pr_cont("*callback not called once (%d)* ", + /* + * Recursion allows for transitions between context, + * and may call the callback twice. + */ + if (trace_selftest_recursion_cnt != 1 && + trace_selftest_recursion_cnt != 2) { + pr_cont("*callback not called once (or twice) (%d)* ", trace_selftest_recursion_cnt); goto out; } diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2619bc5ed520..5294843de6ef 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1405,7 +1405,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, if (perf_type_tracepoint) tu = find_probe_event(pevent, group); else - tu = event->tp_event->data; + tu = trace_uprobe_primary_from_call(event->tp_event); if (!tu) return -EINVAL; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 73956eaff8a9..be51df4508cb 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -53,6 +53,12 @@ struct tp_probes { struct tracepoint_func probes[0]; }; +/* Called in removal of a func but failed to allocate a new tp_funcs */ +static void tp_stub_func(void) +{ + return; +} + static inline void *allocate_probes(int count) { struct tp_probes *p = kmalloc(struct_size(p, probes, count), @@ -131,6 +137,7 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, { struct tracepoint_func *old, *new; int nr_probes = 0; + int stub_funcs = 0; int pos = -1; if (WARN_ON(!tp_func->func)) @@ -147,14 +154,34 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, if (old[nr_probes].func == tp_func->func && old[nr_probes].data == tp_func->data) return ERR_PTR(-EEXIST); + if (old[nr_probes].func == tp_stub_func) + stub_funcs++; } } - /* + 2 : one for new probe, one for NULL func */ - new = allocate_probes(nr_probes + 2); + /* + 2 : one for new probe, one for NULL func - stub functions */ + new = allocate_probes(nr_probes + 2 - stub_funcs); if (new == NULL) return ERR_PTR(-ENOMEM); if (old) { - if (pos < 0) { + if (stub_funcs) { + /* Need to copy one at a time to remove stubs */ + int probes = 0; + + pos = -1; + for (nr_probes = 0; old[nr_probes].func; nr_probes++) { + if (old[nr_probes].func == tp_stub_func) + continue; + if (pos < 0 && old[nr_probes].prio < prio) + pos = probes++; + new[probes++] = old[nr_probes]; + } + nr_probes = probes; + if (pos < 0) + pos = probes; + else + nr_probes--; /* Account for insertion */ + + } else if (pos < 0) { pos = nr_probes; memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); } else { @@ -188,8 +215,9 @@ static void *func_remove(struct tracepoint_func **funcs, /* (N -> M), (N > 1, M >= 0) probes */ if (tp_func->func) { for (nr_probes = 0; old[nr_probes].func; nr_probes++) { - if (old[nr_probes].func == tp_func->func && - old[nr_probes].data == tp_func->data) + if ((old[nr_probes].func == tp_func->func && + old[nr_probes].data == tp_func->data) || + old[nr_probes].func == tp_stub_func) nr_del++; } } @@ -208,14 +236,32 @@ static void *func_remove(struct tracepoint_func **funcs, /* N -> M, (N > 1, M > 0) */ /* + 1 for NULL */ new = allocate_probes(nr_probes - nr_del + 1); - if (new == NULL) - return ERR_PTR(-ENOMEM); - for (i = 0; old[i].func; i++) - if (old[i].func != tp_func->func - || old[i].data != tp_func->data) - new[j++] = old[i]; - new[nr_probes - nr_del].func = NULL; - *funcs = new; + if (new) { + for (i = 0; old[i].func; i++) + if ((old[i].func != tp_func->func + || old[i].data != tp_func->data) + && old[i].func != tp_stub_func) + new[j++] = old[i]; + new[nr_probes - nr_del].func = NULL; + *funcs = new; + } else { + /* + * Failed to allocate, replace the old function + * with calls to tp_stub_func. + */ + for (i = 0; old[i].func; i++) + if (old[i].func == tp_func->func && + old[i].data == tp_func->data) { + old[i].func = tp_stub_func; + /* Set the prio to the next event. */ + if (old[i + 1].func) + old[i].prio = + old[i + 1].prio; + else + old[i].prio = -1; + } + *funcs = old; + } } debug_print_probes(*funcs); return old; @@ -271,10 +317,12 @@ static int tracepoint_remove_func(struct tracepoint *tp, tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); old = func_remove(&tp_funcs, func); - if (IS_ERR(old)) { - WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); + if (WARN_ON_ONCE(IS_ERR(old))) return PTR_ERR(old); - } + + if (tp_funcs == old) + /* Failed allocating new tp_funcs, replaced func with stub */ + return 0; if (!tp_funcs) { /* Removed last function */ diff --git a/kernel/umh.c b/kernel/umh.c index 7f255b5a8845..b8c524dcc76f 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -14,6 +14,7 @@ #include <linux/cred.h> #include <linux/file.h> #include <linux/fdtable.h> +#include <linux/fs_struct.h> #include <linux/workqueue.h> #include <linux/security.h> #include <linux/mount.h> @@ -75,6 +76,14 @@ static int call_usermodehelper_exec_async(void *data) flush_signal_handlers(current, 1); spin_unlock_irq(¤t->sighand->siglock); + /* + * Initial kernel threads share ther FS with init, in order to + * get the init root directory. But we've now created a new + * thread that is going to execve a user process and has its own + * 'struct fs_struct'. Reset umask to the default. + */ + current->fs->umask = 0022; + /* * Our parent (unbound workqueue) runs with elevated scheduling * priority. Avoid propagating that into the userspace child. @@ -475,6 +484,12 @@ static void umh_clean_and_save_pid(struct subprocess_info *info) { struct umh_info *umh_info = info->data; + /* cleanup if umh_pipe_setup() was successful but exec failed */ + if (info->pid && info->retval) { + fput(umh_info->pipe_to_umh); + fput(umh_info->pipe_from_umh); + } + argv_free(info->argv); umh_info->pid = info->pid; } @@ -544,6 +559,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob); * Runs a user-space application. The application is started * asynchronously if wait is not set, and runs as a child of system workqueues. * (ie. it runs with full root capabilities and optimized affinity). + * + * Note: successful return value does not guarantee the helper was called at + * all. You can't rely on sub_info->{init,cleanup} being called even for + * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers + * into a successful no-op. */ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) { diff --git a/kernel/up.c b/kernel/up.c index 862b460ab97a..8e8551c8b285 100644 --- a/kernel/up.c +++ b/kernel/up.c @@ -24,7 +24,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, } EXPORT_SYMBOL(smp_call_function_single); -int smp_call_function_single_async(int cpu, call_single_data_t *csd) +int smp_call_function_single_async(int cpu, struct __call_single_data *csd) { unsigned long flags; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1a0c224af6fb..5d7092e32912 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -864,7 +864,8 @@ void wq_worker_running(struct task_struct *task) * @task: task going to sleep * * This function is called from schedule() when a busy worker is - * going to sleep. + * going to sleep. Preemption needs to be disabled to protect ->sleeping + * assignment. */ void wq_worker_sleeping(struct task_struct *task) { @@ -881,7 +882,8 @@ void wq_worker_sleeping(struct task_struct *task) pool = worker->pool; - if (WARN_ON_ONCE(worker->sleeping)) + /* Return if preempted before wq_worker_running() was reached */ + if (worker->sleeping) return; worker->sleeping = 1; @@ -1409,7 +1411,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ lockdep_assert_irqs_disabled(); - debug_work_activate(work); /* if draining, only works from the same workqueue are allowed */ if (unlikely(wq->flags & __WQ_DRAINING) && @@ -1491,6 +1492,7 @@ retry: worklist = &pwq->delayed_works; } + debug_work_activate(work); insert_work(pwq, work, worklist, work_flags); out: @@ -1845,12 +1847,6 @@ static void worker_attach_to_pool(struct worker *worker, { mutex_lock(&wq_pool_attach_mutex); - /* - * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any - * online CPUs. It'll be re-applied when any of the CPUs come up. - */ - set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); - /* * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains * stable across this function. See the comments above the flag @@ -1859,6 +1855,9 @@ static void worker_attach_to_pool(struct worker *worker, if (pool->flags & POOL_DISASSOCIATED) worker->flags |= WORKER_UNBOUND; + if (worker->rescue_wq) + set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); + list_add_tail(&worker->node, &pool->workers); worker->pool = pool; @@ -3716,17 +3715,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) * is updated and visible. */ if (!freezable || !workqueue_freezing) { + bool kick = false; + pwq->max_active = wq->saved_max_active; while (!list_empty(&pwq->delayed_works) && - pwq->nr_active < pwq->max_active) + pwq->nr_active < pwq->max_active) { pwq_activate_first_delayed(pwq); + kick = true; + } /* * Need to kick a worker after thawed or an unbound wq's - * max_active is bumped. It's a slow path. Do it always. + * max_active is bumped. In realtime scenarios, always kicking a + * worker will cause interference on the isolated cpu cores, so + * let's kick iff work items were activated. */ - wake_up_worker(pwq->pool); + if (kick) + wake_up_worker(pwq->pool); } else { pwq->max_active = 0; } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f61d834e02fe..ee00c6c8a373 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -223,6 +223,8 @@ config DEBUG_INFO_DWARF4 config DEBUG_INFO_BTF bool "Generate BTF typeinfo" depends on DEBUG_INFO + depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED + depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST help Generate deduplicated BTF type information from DWARF debug info. Turning this on expects presence of pahole tool, which will convert @@ -1937,6 +1939,17 @@ config TEST_SYSCTL If unsure, say N. +config SYSCTL_KUNIT_TEST + bool "KUnit test for sysctl" + depends on KUNIT + help + This builds the proc sysctl unit test, which runs on boot. + Tests the API contract and implementation correctness of sysctl. + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config TEST_UDELAY tristate "udelay test driver" help diff --git a/lib/Makefile b/lib/Makefile index c5892807e06f..3b8977aed1b4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -31,7 +31,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ - nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o + nmi_backtrace.o nodemask.o win_minmax.o lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_MMU) += ioremap.o @@ -46,7 +46,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o \ once.o refcount.o usercopy.o errseq.o bucket_locks.o \ - generic-radix-tree.o + generic-radix-tree.o memcat_p.o obj-$(CONFIG_STRING_SELFTEST) += test_string.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o diff --git a/lib/bug.c b/lib/bug.c index 8c98af0bf585..c3aa22cbc876 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -155,30 +155,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) file = NULL; line = 0; - warning = 0; - if (bug) { #ifdef CONFIG_DEBUG_BUGVERBOSE #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS - file = bug->file; + file = bug->file; #else - file = (const char *)bug + bug->file_disp; + file = (const char *)bug + bug->file_disp; #endif - line = bug->line; + line = bug->line; #endif - warning = (bug->flags & BUGFLAG_WARNING) != 0; - once = (bug->flags & BUGFLAG_ONCE) != 0; - done = (bug->flags & BUGFLAG_DONE) != 0; + warning = (bug->flags & BUGFLAG_WARNING) != 0; + once = (bug->flags & BUGFLAG_ONCE) != 0; + done = (bug->flags & BUGFLAG_DONE) != 0; - if (warning && once) { - if (done) - return BUG_TRAP_TYPE_WARN; + if (warning && once) { + if (done) + return BUG_TRAP_TYPE_WARN; - /* - * Since this is the only store, concurrency is not an issue. - */ - bug->flags |= BUGFLAG_DONE; - } + /* + * Since this is the only store, concurrency is not an issue. + */ + bug->flags |= BUGFLAG_DONE; } /* diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index 8cc01a603416..c9acf1c12cfc 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -19,39 +19,46 @@ static struct crypto_shash __rcu *crct10dif_tfm; static struct static_key crct10dif_fallback __read_mostly; static DEFINE_MUTEX(crc_t10dif_mutex); +static struct work_struct crct10dif_rehash_work; -static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data) +static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data) { struct crypto_alg *alg = data; - struct crypto_shash *new, *old; if (val != CRYPTO_MSG_ALG_LOADED || static_key_false(&crct10dif_fallback) || strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) return 0; + schedule_work(&crct10dif_rehash_work); + return 0; +} + +static void crc_t10dif_rehash(struct work_struct *work) +{ + struct crypto_shash *new, *old; + mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); if (!old) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } new = crypto_alloc_shash("crct10dif", 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } rcu_assign_pointer(crct10dif_tfm, new); mutex_unlock(&crc_t10dif_mutex); synchronize_rcu(); crypto_free_shash(old); - return 0; } static struct notifier_block crc_t10dif_nb = { - .notifier_call = crc_t10dif_rehash, + .notifier_call = crc_t10dif_notify, }; __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) @@ -86,19 +93,26 @@ EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) { + struct crypto_shash *tfm; + + INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash); crypto_register_notifier(&crc_t10dif_nb); - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - if (IS_ERR(crct10dif_tfm)) { + mutex_lock(&crc_t10dif_mutex); + tfm = crypto_alloc_shash("crct10dif", 0, 0); + if (IS_ERR(tfm)) { static_key_slow_inc(&crct10dif_fallback); - crct10dif_tfm = NULL; + tfm = NULL; } + RCU_INIT_POINTER(crct10dif_tfm, tfm); + mutex_unlock(&crc_t10dif_mutex); return 0; } static void __exit crc_t10dif_mod_fini(void) { crypto_unregister_notifier(&crc_t10dif_nb); - crypto_free_shash(crct10dif_tfm); + cancel_work_sync(&crct10dif_rehash_work); + crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1)); } module_init(crc_t10dif_mod_init); @@ -106,11 +120,27 @@ module_exit(crc_t10dif_mod_fini); static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp) { + struct crypto_shash *tfm; + const char *name; + int len; + if (static_key_false(&crct10dif_fallback)) return sprintf(buffer, "fallback\n"); - return sprintf(buffer, "%s\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm))); + rcu_read_lock(); + tfm = rcu_dereference(crct10dif_tfm); + if (!tfm) { + len = sprintf(buffer, "init\n"); + goto unlock; + } + + name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); + len = sprintf(buffer, "%s\n", name); + +unlock: + rcu_read_unlock(); + + return len; } module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644); diff --git a/lib/crc32.c b/lib/crc32.c index 4a20455d1f61..bf60ef26a45c 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, return crc; } -#if CRC_LE_BITS == 1 +#if CRC_BE_BITS == 1 u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE); diff --git a/lib/crc32test.c b/lib/crc32test.c index 97d6a57cefcc..61ddce2cff77 100644 --- a/lib/crc32test.c +++ b/lib/crc32test.c @@ -683,7 +683,6 @@ static int __init crc32c_test(void) /* reduce OS noise */ local_irq_save(flags); - local_irq_disable(); nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { @@ -694,7 +693,6 @@ static int __init crc32c_test(void) nsec = ktime_get_ns() - nsec; local_irq_restore(flags); - local_irq_enable(); pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); @@ -768,7 +766,6 @@ static int __init crc32_test(void) /* reduce OS noise */ local_irq_save(flags); - local_irq_disable(); nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { @@ -783,7 +780,6 @@ static int __init crc32_test(void) nsec = ktime_get_ns() - nsec; local_irq_restore(flags); - local_irq_enable(); pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", CRC_LE_BITS, CRC_BE_BITS); diff --git a/lib/devres.c b/lib/devres.c index 6a0e9bd6524a..77c80ca9e485 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -9,6 +9,7 @@ enum devm_ioremap_type { DEVM_IOREMAP = 0, DEVM_IOREMAP_NC, + DEVM_IOREMAP_UC, DEVM_IOREMAP_WC, }; @@ -39,6 +40,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, case DEVM_IOREMAP_NC: addr = ioremap_nocache(offset, size); break; + case DEVM_IOREMAP_UC: + addr = ioremap_uc(offset, size); + break; case DEVM_IOREMAP_WC: addr = ioremap_wc(offset, size); break; @@ -68,6 +72,21 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, } EXPORT_SYMBOL(devm_ioremap); +/** + * devm_ioremap_uc - Managed ioremap_uc() + * @dev: Generic device to remap IO address for + * @offset: Resource address to map + * @size: Size of map + * + * Managed ioremap_uc(). Map is automatically unmapped on driver detach. + */ +void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, + resource_size_t size) +{ + return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); +} +EXPORT_SYMBOL_GPL(devm_ioremap_uc); + /** * devm_ioremap_nocache - Managed ioremap_nocache() * @dev: Generic device to remap IO address for @@ -136,6 +155,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, { resource_size_t size; void __iomem *dest_ptr; + char *pretty_name; BUG_ON(!dev); @@ -146,7 +166,15 @@ void __iomem *devm_ioremap_resource(struct device *dev, size = resource_size(res); - if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) { + if (res->name) + pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", + dev_name(dev), res->name); + else + pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); + if (!pretty_name) + return IOMEM_ERR_PTR(-ENOMEM); + + if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { dev_err(dev, "can't request region for resource %pR\n", res); return IOMEM_ERR_PTR(-EBUSY); } diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c60409138e13..ccf05719b1ad 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -87,22 +87,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = { { _DPRINTK_FLAGS_NONE, '_' }, }; +struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; }; + /* format a string into buf[] which describes the _ddebug's flags */ -static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, - size_t maxlen) +static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb) { - char *p = buf; + char *p = fb->buf; int i; - BUG_ON(maxlen < 6); for (i = 0; i < ARRAY_SIZE(opt_array); ++i) - if (dp->flags & opt_array[i].flag) + if (flags & opt_array[i].flag) *p++ = opt_array[i].opt_char; - if (p == buf) + if (p == fb->buf) *p++ = '_'; *p = '\0'; - return buf; + return fb->buf; } #define vpr_info(fmt, ...) \ @@ -144,7 +144,7 @@ static int ddebug_change(const struct ddebug_query *query, struct ddebug_table *dt; unsigned int newflags; unsigned int nfound = 0; - char flagbuf[10]; + struct flagsbuf fbuf; /* search for matching ddebugs */ mutex_lock(&ddebug_lock); @@ -201,8 +201,7 @@ static int ddebug_change(const struct ddebug_query *query, vpr_info("changed %s:%d [%s]%s =%s\n", trim_prefix(dp->filename), dp->lineno, dt->mod_name, dp->function, - ddebug_describe_flags(dp, flagbuf, - sizeof(flagbuf))); + ddebug_describe_flags(dp->flags, &fbuf)); } } mutex_unlock(&ddebug_lock); @@ -816,7 +815,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) { struct ddebug_iter *iter = m->private; struct _ddebug *dp = p; - char flagsbuf[10]; + struct flagsbuf flags; vpr_info("called m=%p p=%p\n", m, p); @@ -829,7 +828,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) seq_printf(m, "%s:%u [%s]%s =%s \"", trim_prefix(dp->filename), dp->lineno, iter->table->mod_name, dp->function, - ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); + ddebug_describe_flags(dp->flags, &flags)); seq_escape(m, dp->format, "\t\r\n\""); seq_puts(m, "\"\n"); diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c index 532f0ff89a96..e02f9df24d1e 100644 --- a/lib/fonts/font_10x18.c +++ b/lib/fonts/font_10x18.c @@ -8,8 +8,8 @@ #define FONTDATAMAX 9216 -static const unsigned char fontdata_10x18[FONTDATAMAX] = { - +static const struct font_data fontdata_10x18 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ @@ -5129,8 +5129,7 @@ static const unsigned char fontdata_10x18[FONTDATAMAX] = { 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ - -}; +} }; const struct font_desc font_10x18 = { @@ -5138,7 +5137,7 @@ const struct font_desc font_10x18 = { .name = "10x18", .width = 10, .height = 18, - .data = fontdata_10x18, + .data = fontdata_10x18.data, #ifdef __sparc__ .pref = 5, #else diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c index 09b2cc03435b..6e3c4b7691c8 100644 --- a/lib/fonts/font_6x10.c +++ b/lib/fonts/font_6x10.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/font.h> -static const unsigned char fontdata_6x10[] = { +#define FONTDATAMAX 2560 +static const struct font_data fontdata_6x10 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -3074,14 +3076,13 @@ static const unsigned char fontdata_6x10[] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_6x10 = { .idx = FONT6x10_IDX, .name = "6x10", .width = 6, .height = 10, - .data = fontdata_6x10, + .data = fontdata_6x10.data, .pref = 0, }; diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c index d7136c33f1f0..2d22a24e816f 100644 --- a/lib/fonts/font_6x11.c +++ b/lib/fonts/font_6x11.c @@ -9,8 +9,8 @@ #define FONTDATAMAX (11*256) -static const unsigned char fontdata_6x11[FONTDATAMAX] = { - +static const struct font_data fontdata_6x11 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -3338,8 +3338,7 @@ static const unsigned char fontdata_6x11[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_6x11 = { @@ -3347,7 +3346,7 @@ const struct font_desc font_vga_6x11 = { .name = "ProFont6x11", .width = 6, .height = 11, - .data = fontdata_6x11, + .data = fontdata_6x11.data, /* Try avoiding this font if possible unless on MAC */ .pref = -2000, }; diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c index 89752d0b23e8..9cc7ae2e03f7 100644 --- a/lib/fonts/font_7x14.c +++ b/lib/fonts/font_7x14.c @@ -8,8 +8,8 @@ #define FONTDATAMAX 3584 -static const unsigned char fontdata_7x14[FONTDATAMAX] = { - +static const struct font_data fontdata_7x14 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ @@ -4105,8 +4105,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = { 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ - -}; +} }; const struct font_desc font_7x14 = { @@ -4114,6 +4113,6 @@ const struct font_desc font_7x14 = { .name = "7x14", .width = 7, .height = 14, - .data = fontdata_7x14, + .data = fontdata_7x14.data, .pref = 0, }; diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c index b7ab1f5fbdb8..bab25dc59e8d 100644 --- a/lib/fonts/font_8x16.c +++ b/lib/fonts/font_8x16.c @@ -10,8 +10,8 @@ #define FONTDATAMAX 4096 -static const unsigned char fontdata_8x16[FONTDATAMAX] = { - +static const struct font_data fontdata_8x16 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -4619,8 +4619,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_8x16 = { @@ -4628,7 +4627,7 @@ const struct font_desc font_vga_8x16 = { .name = "VGA8x16", .width = 8, .height = 16, - .data = fontdata_8x16, + .data = fontdata_8x16.data, .pref = 0, }; EXPORT_SYMBOL(font_vga_8x16); diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c index 2328ebc8bab5..109d0572368f 100644 --- a/lib/fonts/font_8x8.c +++ b/lib/fonts/font_8x8.c @@ -9,8 +9,8 @@ #define FONTDATAMAX 2048 -static const unsigned char fontdata_8x8[FONTDATAMAX] = { - +static const struct font_data fontdata_8x8 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -2570,8 +2570,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_8x8 = { @@ -2579,6 +2578,6 @@ const struct font_desc font_vga_8x8 = { .name = "VGA8x8", .width = 8, .height = 8, - .data = fontdata_8x8, + .data = fontdata_8x8.data, .pref = 0, }; diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c index 0ff0e85d4481..fb395f0d4031 100644 --- a/lib/fonts/font_acorn_8x8.c +++ b/lib/fonts/font_acorn_8x8.c @@ -3,7 +3,10 @@ #include <linux/font.h> -static const unsigned char acorndata_8x8[] = { +#define FONTDATAMAX 2048 + +static const struct font_data acorndata_8x8 = { +{ 0, 0, FONTDATAMAX, 0 }, { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ /* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ @@ -260,14 +263,14 @@ static const unsigned char acorndata_8x8[] = { /* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00, /* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -}; +} }; const struct font_desc font_acorn_8x8 = { .idx = ACORN8x8_IDX, .name = "Acorn8x8", .width = 8, .height = 8, - .data = acorndata_8x8, + .data = acorndata_8x8.data, #ifdef CONFIG_ARCH_ACORN .pref = 20, #else diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c index 838caa1cfef7..592774a90917 100644 --- a/lib/fonts/font_mini_4x6.c +++ b/lib/fonts/font_mini_4x6.c @@ -43,8 +43,8 @@ __END__; #define FONTDATAMAX 1536 -static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { - +static const struct font_data fontdata_mini_4x6 = { + { 0, 0, FONTDATAMAX, 0 }, { /*{*/ /* Char 0: ' ' */ 0xee, /*= [*** ] */ @@ -2145,14 +2145,14 @@ static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ -}; +} }; const struct font_desc font_mini_4x6 = { .idx = MINI4x6_IDX, .name = "MINI4x6", .width = 4, .height = 6, - .data = fontdata_mini_4x6, + .data = fontdata_mini_4x6.data, .pref = 3, }; diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c index b15d3c342c5b..a6f95ebce950 100644 --- a/lib/fonts/font_pearl_8x8.c +++ b/lib/fonts/font_pearl_8x8.c @@ -14,8 +14,8 @@ #define FONTDATAMAX 2048 -static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { - +static const struct font_data fontdata_pearl8x8 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -2575,14 +2575,13 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_pearl_8x8 = { .idx = PEARL8x8_IDX, .name = "PEARL8x8", .width = 8, .height = 8, - .data = fontdata_pearl8x8, + .data = fontdata_pearl8x8.data, .pref = 2, }; diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c index 955d6eee3959..a5b65bd49604 100644 --- a/lib/fonts/font_sun12x22.c +++ b/lib/fonts/font_sun12x22.c @@ -3,8 +3,8 @@ #define FONTDATAMAX 11264 -static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { - +static const struct font_data fontdata_sun12x22 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ @@ -6148,8 +6148,7 @@ static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ - -}; +} }; const struct font_desc font_sun_12x22 = { @@ -6157,7 +6156,7 @@ const struct font_desc font_sun_12x22 = { .name = "SUN12x22", .width = 12, .height = 22, - .data = fontdata_sun12x22, + .data = fontdata_sun12x22.data, #ifdef __sparc__ .pref = 5, #else diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c index 03d71e53954a..e577e76a6a7c 100644 --- a/lib/fonts/font_sun8x16.c +++ b/lib/fonts/font_sun8x16.c @@ -3,7 +3,8 @@ #define FONTDATAMAX 4096 -static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { +static const struct font_data fontdata_sun8x16 = { +{ 0, 0, FONTDATAMAX, 0 }, { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, @@ -260,14 +261,14 @@ static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { /* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -}; +} }; const struct font_desc font_sun_8x16 = { .idx = SUN8x16_IDX, .name = "SUN8x16", .width = 8, .height = 16, - .data = fontdata_sun8x16, + .data = fontdata_sun8x16.data, #ifdef __sparc__ .pref = 10, #else diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c index 3f0cf1ccdf3a..f7c3abb6b99e 100644 --- a/lib/fonts/font_ter16x32.c +++ b/lib/fonts/font_ter16x32.c @@ -4,8 +4,8 @@ #define FONTDATAMAX 16384 -static const unsigned char fontdata_ter16x32[FONTDATAMAX] = { - +static const struct font_data fontdata_ter16x32 = { + { 0, 0, FONTDATAMAX, 0 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, @@ -2054,8 +2054,7 @@ static const unsigned char fontdata_ter16x32[FONTDATAMAX] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 255 */ - -}; +} }; const struct font_desc font_ter_16x32 = { @@ -2063,7 +2062,7 @@ const struct font_desc font_ter_16x32 = { .name = "TER16x32", .width = 16, .height = 32, - .data = fontdata_ter16x32, + .data = fontdata_ter16x32.data, #ifdef __sparc__ .pref = 5, #else diff --git a/lib/genalloc.c b/lib/genalloc.c index 9fc31292cfa1..80d10d02cf38 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -81,14 +81,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) * users set the same bit, one user will return remain bits, otherwise * return 0. */ -static int bitmap_set_ll(unsigned long *map, int start, int nr) +static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) { unsigned long *p = map + BIT_WORD(start); - const int size = start + nr; + const unsigned long size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); - while (nr - bits_to_set >= 0) { + while (nr >= bits_to_set) { if (set_bits_ll(p, mask_to_set)) return nr; nr -= bits_to_set; @@ -116,14 +116,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr) * users clear the same bit, one user will return remain bits, * otherwise return 0. */ -static int bitmap_clear_ll(unsigned long *map, int start, int nr) +static unsigned long +bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) { unsigned long *p = map + BIT_WORD(start); - const int size = start + nr; + const unsigned long size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); - while (nr - bits_to_clear >= 0) { + while (nr >= bits_to_clear) { if (clear_bits_ll(p, mask_to_clear)) return nr; nr -= bits_to_clear; @@ -183,8 +184,8 @@ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t ph size_t size, int nid, void *owner) { struct gen_pool_chunk *chunk; - int nbits = size >> pool->min_alloc_order; - int nbytes = sizeof(struct gen_pool_chunk) + + unsigned long nbits = size >> pool->min_alloc_order; + unsigned long nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); chunk = vzalloc_node(nbytes, nid); @@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool) struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; - int bit, end_bit; + unsigned long bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); @@ -278,7 +279,7 @@ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, struct gen_pool_chunk *chunk; unsigned long addr = 0; int order = pool->min_alloc_order; - int nbits, start_bit, end_bit, remain; + unsigned long nbits, start_bit, end_bit, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); @@ -487,7 +488,7 @@ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, { struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; - int start_bit, nbits, remain; + unsigned long start_bit, nbits, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); @@ -754,7 +755,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, index = bitmap_find_next_zero_area(map, size, start, nr, 0); while (index < size) { - int next_bit = find_next_bit(map, size, index + nr); + unsigned long next_bit = find_next_bit(map, size, index + nr); if ((next_bit - index) < len) { len = next_bit - index; start_bit = index; diff --git a/lib/idr.c b/lib/idr.c index c2cf2c52bbde..4d2eef0259d2 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -470,6 +470,7 @@ alloc: goto retry; nospc: xas_unlock_irqrestore(&xas, flags); + kfree(alloc); return -ENOSPC; } EXPORT_SYMBOL(ida_alloc_range); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 639d5e7014c1..9ea6f7bb8309 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -570,12 +570,13 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len, } static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, - __wsum *csum, struct iov_iter *i) + struct csum_state *csstate, + struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; + __wsum sum = csstate->csum; + size_t off = csstate->off; size_t n, r; - size_t off = 0; - __wsum sum = *csum; int idx; if (!sanity(i)) @@ -596,7 +597,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, addr += chunk; } i->count -= bytes; - *csum = sum; + csstate->csum = sum; + csstate->off = off; return bytes; } @@ -1484,18 +1486,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, } EXPORT_SYMBOL(csum_and_copy_from_iter_full); -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, struct iov_iter *i) { + struct csum_state *csstate = _csstate; const char *from = addr; - __wsum *csum = csump; __wsum sum, next; - size_t off = 0; + size_t off; if (unlikely(iov_iter_is_pipe(i))) - return csum_and_copy_to_pipe_iter(addr, bytes, csum, i); + return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i); - sum = *csum; + sum = csstate->csum; + off = csstate->off; if (unlikely(iov_iter_is_discard(i))) { WARN_ON(1); /* for now */ return 0; @@ -1524,7 +1527,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, off += v.iov_len; }) ) - *csum = sum; + csstate->csum = sum; + csstate->off = off; return bytes; } EXPORT_SYMBOL(csum_and_copy_to_iter); diff --git a/lib/kobject.c b/lib/kobject.c index 83198cb37d8d..0c6d17503a11 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -599,14 +599,7 @@ out: } EXPORT_SYMBOL_GPL(kobject_move); -/** - * kobject_del() - Unlink kobject from hierarchy. - * @kobj: object. - * - * This is the function that should be called to delete an object - * successfully added via kobject_add(). - */ -void kobject_del(struct kobject *kobj) +static void __kobject_del(struct kobject *kobj) { struct kernfs_node *sd; const struct kobj_type *ktype; @@ -625,9 +618,27 @@ void kobject_del(struct kobject *kobj) kobj->state_in_sysfs = 0; kobj_kset_leave(kobj); - kobject_put(kobj->parent); kobj->parent = NULL; } + +/** + * kobject_del() - Unlink kobject from hierarchy. + * @kobj: object. + * + * This is the function that should be called to delete an object + * successfully added via kobject_add(). + */ +void kobject_del(struct kobject *kobj) +{ + struct kobject *parent; + + if (!kobj) + return; + + parent = kobj->parent; + __kobject_del(kobj); + kobject_put(parent); +} EXPORT_SYMBOL(kobject_del); /** @@ -663,6 +674,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero); */ static void kobject_cleanup(struct kobject *kobj) { + struct kobject *parent = kobj->parent; struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; @@ -684,7 +696,10 @@ static void kobject_cleanup(struct kobject *kobj) if (kobj->state_in_sysfs) { pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n", kobject_name(kobj), kobj); - kobject_del(kobj); + __kobject_del(kobj); + } else { + /* avoid dropping the parent reference unnecessarily */ + parent = NULL; } if (t && t->release) { @@ -698,6 +713,8 @@ static void kobject_cleanup(struct kobject *kobj) pr_debug("kobject: '%s': free name\n", name); kfree_const(name); } + + kobject_put(parent); } #ifdef CONFIG_DEBUG_KOBJECT_RELEASE diff --git a/lib/logic_pio.c b/lib/logic_pio.c index 905027574e5d..774bb02fff10 100644 --- a/lib/logic_pio.c +++ b/lib/logic_pio.c @@ -27,6 +27,8 @@ static DEFINE_MUTEX(io_range_mutex); * @new_range: pointer to the IO range to be registered. * * Returns 0 on success, the error code in case of failure. + * If the range already exists, -EEXIST will be returned, which should be + * considered a success. * * Register a new IO range node in the IO range list. */ @@ -49,6 +51,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range) list_for_each_entry(range, &io_range_list, list) { if (range->fwnode == new_range->fwnode) { /* range already there */ + ret = -EEXIST; goto end_register; } if (range->flags == LOGIC_PIO_CPU_MMIO && diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 717c940112f9..8ad5ba2b86e2 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -268,6 +268,19 @@ m_len_done: *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); else { + if (unlikely(((m_off & 0x403f) == 0x403f) + && (m_len >= 261) + && (m_len <= 264)) + && likely(bitstream_version)) { + // Under lzo-rle, block copies + // for 261 <= length <= 264 and + // (distance & 0x80f3) == 0x80f3 + // can result in ambiguous + // output. Adjust length + // to 260 to prevent ambiguity. + ip -= m_len - 260; + m_len = 260; + } m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 2dceaca27489..afbd99987cf8 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -653,7 +653,7 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) /* * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C * code below, so we special case MIPS64r6 until the compiler can do better. @@ -722,22 +722,22 @@ do { \ do { \ if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else \ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "%r" ((USItype)(al)), \ @@ -747,36 +747,36 @@ do { \ do { \ if (__builtin_constant_p(ah) && (ah) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else \ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ @@ -787,7 +787,7 @@ do { \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mulhwu %0,%1,%2" \ - : "=r" ((USItype) ph) \ + : "=r" (ph) \ : "%r" (__m0), \ "r" (__m1)); \ (pl) = __m0 * __m1; \ diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 0083b5cc646c..d4d56ca6eafc 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -48,7 +48,7 @@ endif endif quiet_cmd_unroll = UNROLL $@ - cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@ + cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@ targets += int1.c int2.c int4.c int8.c int16.c int32.c $(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 3ab8720aa2f8..b9e6c3648be1 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -35,13 +35,13 @@ endif ifeq ($(IS_X86),yes) OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \ - gcc -c -x assembler - >&/dev/null && \ + gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_SSSE3=1) CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ - gcc -c -x assembler - >&/dev/null && \ + gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_AVX2=1) CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \ - gcc -c -x assembler - >&/dev/null && \ + gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_AVX512=1) else ifeq ($(HAS_NEON),yes) OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o diff --git a/lib/random32.c b/lib/random32.c index 763b920a6206..9085b1172015 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -40,16 +40,6 @@ #include <linux/sched.h> #include <asm/unaligned.h> -#ifdef CONFIG_RANDOM32_SELFTEST -static void __init prandom_state_selftest(void); -#else -static inline void prandom_state_selftest(void) -{ -} -#endif - -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; - /** * prandom_u32_state - seeded pseudo-random number generator. * @state: pointer to state structure holding seeded state. @@ -69,25 +59,6 @@ u32 prandom_u32_state(struct rnd_state *state) } EXPORT_SYMBOL(prandom_u32_state); -/** - * prandom_u32 - pseudo random number generator - * - * A 32 bit pseudo-random number is generated using a fast - * algorithm suitable for simulation. This algorithm is NOT - * considered safe for cryptographic use. - */ -u32 prandom_u32(void) -{ - struct rnd_state *state = &get_cpu_var(net_rand_state); - u32 res; - - res = prandom_u32_state(state); - put_cpu_var(net_rand_state); - - return res; -} -EXPORT_SYMBOL(prandom_u32); - /** * prandom_bytes_state - get the requested number of pseudo-random bytes * @@ -119,20 +90,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) } EXPORT_SYMBOL(prandom_bytes_state); -/** - * prandom_bytes - get the requested number of pseudo-random bytes - * @buf: where to copy the pseudo-random bytes to - * @bytes: the requested number of bytes - */ -void prandom_bytes(void *buf, size_t bytes) -{ - struct rnd_state *state = &get_cpu_var(net_rand_state); - - prandom_bytes_state(state, buf, bytes); - put_cpu_var(net_rand_state); -} -EXPORT_SYMBOL(prandom_bytes); - static void prandom_warmup(struct rnd_state *state) { /* Calling RNG ten times to satisfy recurrence condition */ @@ -148,96 +105,6 @@ static void prandom_warmup(struct rnd_state *state) prandom_u32_state(state); } -static u32 __extract_hwseed(void) -{ - unsigned int val = 0; - - (void)(arch_get_random_seed_int(&val) || - arch_get_random_int(&val)); - - return val; -} - -static void prandom_seed_early(struct rnd_state *state, u32 seed, - bool mix_with_hwseed) -{ -#define LCG(x) ((x) * 69069U) /* super-duper LCG */ -#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) - state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); - state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); - state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); - state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); -} - -/** - * prandom_seed - add entropy to pseudo random number generator - * @entropy: entropy value - * - * Add some additional entropy to the prandom pool. - */ -void prandom_seed(u32 entropy) -{ - int i; - /* - * No locking on the CPUs, but then somewhat random results are, well, - * expected. - */ - for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state, i); - - state->s1 = __seed(state->s1 ^ entropy, 2U); - prandom_warmup(state); - } -} -EXPORT_SYMBOL(prandom_seed); - -/* - * Generate some initially weak seeding values to allow - * to start the prandom_u32() engine. - */ -static int __init prandom_init(void) -{ - int i; - - prandom_state_selftest(); - - for_each_possible_cpu(i) { - struct rnd_state *state = &per_cpu(net_rand_state, i); - u32 weak_seed = (i + jiffies) ^ random_get_entropy(); - - prandom_seed_early(state, weak_seed, true); - prandom_warmup(state); - } - - return 0; -} -core_initcall(prandom_init); - -static void __prandom_timer(struct timer_list *unused); - -static DEFINE_TIMER(seed_timer, __prandom_timer); - -static void __prandom_timer(struct timer_list *unused) -{ - u32 entropy; - unsigned long expires; - - get_random_bytes(&entropy, sizeof(entropy)); - prandom_seed(entropy); - - /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ - expires = 40 + prandom_u32_max(40); - seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); - - add_timer(&seed_timer); -} - -static void __init __prandom_start_seed_timer(void) -{ - seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); - add_timer(&seed_timer); -} - void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) { int i; @@ -257,51 +124,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) } EXPORT_SYMBOL(prandom_seed_full_state); -/* - * Generate better values after random number generator - * is fully initialized. - */ -static void __prandom_reseed(bool late) -{ - unsigned long flags; - static bool latch = false; - static DEFINE_SPINLOCK(lock); - - /* Asking for random bytes might result in bytes getting - * moved into the nonblocking pool and thus marking it - * as initialized. In this case we would double back into - * this function and attempt to do a late reseed. - * Ignore the pointless attempt to reseed again if we're - * already waiting for bytes when the nonblocking pool - * got initialized. - */ - - /* only allow initial seeding (late == false) once */ - if (!spin_trylock_irqsave(&lock, flags)) - return; - - if (latch && !late) - goto out; - - latch = true; - prandom_seed_full_state(&net_rand_state); -out: - spin_unlock_irqrestore(&lock, flags); -} - -void prandom_reseed_late(void) -{ - __prandom_reseed(true); -} - -static int __init prandom_reseed(void) -{ - __prandom_reseed(false); - __prandom_start_seed_timer(); - return 0; -} -late_initcall(prandom_reseed); - #ifdef CONFIG_RANDOM32_SELFTEST static struct prandom_test1 { u32 seed; @@ -421,7 +243,28 @@ static struct prandom_test2 { { 407983964U, 921U, 728767059U }, }; -static void __init prandom_state_selftest(void) +static u32 __extract_hwseed(void) +{ + unsigned int val = 0; + + (void)(arch_get_random_seed_int(&val) || + arch_get_random_int(&val)); + + return val; +} + +static void prandom_seed_early(struct rnd_state *state, u32 seed, + bool mix_with_hwseed) +{ +#define LCG(x) ((x) * 69069U) /* super-duper LCG */ +#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) + state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); + state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); + state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); + state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); +} + +static int __init prandom_state_selftest(void) { int i, j, errors = 0, runs = 0; bool error = false; @@ -461,5 +304,266 @@ static void __init prandom_state_selftest(void) pr_warn("prandom: %d/%d self tests failed\n", errors, runs); else pr_info("prandom: %d self tests passed\n", runs); + return 0; } +core_initcall(prandom_state_selftest); #endif + +/* + * The prandom_u32() implementation is now completely separate from the + * prandom_state() functions, which are retained (for now) for compatibility. + * + * Because of (ab)use in the networking code for choosing random TCP/UDP port + * numbers, which open DoS possibilities if guessable, we want something + * stronger than a standard PRNG. But the performance requirements of + * the network code do not allow robust crypto for this application. + * + * So this is a homebrew Junior Spaceman implementation, based on the + * lowest-latency trustworthy crypto primitive available, SipHash. + * (The authors of SipHash have not been consulted about this abuse of + * their work.) + * + * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to + * one word of output. This abbreviated version uses 2 rounds per word + * of output. + */ + +struct siprand_state { + unsigned long v0; + unsigned long v1; + unsigned long v2; + unsigned long v3; +}; + +static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy; + +/* + * This is the core CPRNG function. As "pseudorandom", this is not used + * for truly valuable things, just intended to be a PITA to guess. + * For maximum speed, we do just two SipHash rounds per word. This is + * the same rate as 4 rounds per 64 bits that SipHash normally uses, + * so hopefully it's reasonably secure. + * + * There are two changes from the official SipHash finalization: + * - We omit some constants XORed with v2 in the SipHash spec as irrelevant; + * they are there only to make the output rounds distinct from the input + * rounds, and this application has no input rounds. + * - Rather than returning v0^v1^v2^v3, return v1+v3. + * If you look at the SipHash round, the last operation on v3 is + * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time. + * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but + * it still cancels out half of the bits in v2 for no benefit.) + * Second, since the last combining operation was xor, continue the + * pattern of alternating xor/add for a tiny bit of extra non-linearity. + */ +static inline u32 siprand_u32(struct siprand_state *s) +{ + unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3; + + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3; + return v1 + v3; +} + + +/** + * prandom_u32 - pseudo random number generator + * + * A 32 bit pseudo-random number is generated using a fast + * algorithm suitable for simulation. This algorithm is NOT + * considered safe for cryptographic use. + */ +u32 prandom_u32(void) +{ + struct siprand_state *state = get_cpu_ptr(&net_rand_state); + u32 res = siprand_u32(state); + + put_cpu_ptr(&net_rand_state); + return res; +} +EXPORT_SYMBOL(prandom_u32); + +/** + * prandom_bytes - get the requested number of pseudo-random bytes + * @buf: where to copy the pseudo-random bytes to + * @bytes: the requested number of bytes + */ +void prandom_bytes(void *buf, size_t bytes) +{ + struct siprand_state *state = get_cpu_ptr(&net_rand_state); + u8 *ptr = buf; + + while (bytes >= sizeof(u32)) { + put_unaligned(siprand_u32(state), (u32 *)ptr); + ptr += sizeof(u32); + bytes -= sizeof(u32); + } + + if (bytes > 0) { + u32 rem = siprand_u32(state); + + do { + *ptr++ = (u8)rem; + rem >>= BITS_PER_BYTE; + } while (--bytes > 0); + } + put_cpu_ptr(&net_rand_state); +} +EXPORT_SYMBOL(prandom_bytes); + +/** + * prandom_seed - add entropy to pseudo random number generator + * @entropy: entropy value + * + * Add some additional seed material to the prandom pool. + * The "entropy" is actually our IP address (the only caller is + * the network code), not for unpredictability, but to ensure that + * different machines are initialized differently. + */ +void prandom_seed(u32 entropy) +{ + int i; + + add_device_randomness(&entropy, sizeof(entropy)); + + for_each_possible_cpu(i) { + struct siprand_state *state = per_cpu_ptr(&net_rand_state, i); + unsigned long v0 = state->v0, v1 = state->v1; + unsigned long v2 = state->v2, v3 = state->v3; + + do { + v3 ^= entropy; + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + v0 ^= entropy; + } while (unlikely(!v0 || !v1 || !v2 || !v3)); + + WRITE_ONCE(state->v0, v0); + WRITE_ONCE(state->v1, v1); + WRITE_ONCE(state->v2, v2); + WRITE_ONCE(state->v3, v3); + } +} +EXPORT_SYMBOL(prandom_seed); + +/* + * Generate some initially weak seeding values to allow + * the prandom_u32() engine to be started. + */ +static int __init prandom_init_early(void) +{ + int i; + unsigned long v0, v1, v2, v3; + + if (!arch_get_random_long(&v0)) + v0 = jiffies; + if (!arch_get_random_long(&v1)) + v1 = random_get_entropy(); + v2 = v0 ^ PRND_K0; + v3 = v1 ^ PRND_K1; + + for_each_possible_cpu(i) { + struct siprand_state *state; + + v3 ^= i; + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + v0 ^= i; + + state = per_cpu_ptr(&net_rand_state, i); + state->v0 = v0; state->v1 = v1; + state->v2 = v2; state->v3 = v3; + } + + return 0; +} +core_initcall(prandom_init_early); + + +/* Stronger reseeding when available, and periodically thereafter. */ +static void prandom_reseed(struct timer_list *unused); + +static DEFINE_TIMER(seed_timer, prandom_reseed); + +static void prandom_reseed(struct timer_list *unused) +{ + unsigned long expires; + int i; + + /* + * Reinitialize each CPU's PRNG with 128 bits of key. + * No locking on the CPUs, but then somewhat random results are, + * well, expected. + */ + for_each_possible_cpu(i) { + struct siprand_state *state; + unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0; + unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1; +#if BITS_PER_LONG == 32 + int j; + + /* + * On 32-bit machines, hash in two extra words to + * approximate 128-bit key length. Not that the hash + * has that much security, but this prevents a trivial + * 64-bit brute force. + */ + for (j = 0; j < 2; j++) { + unsigned long m = get_random_long(); + + v3 ^= m; + PRND_SIPROUND(v0, v1, v2, v3); + PRND_SIPROUND(v0, v1, v2, v3); + v0 ^= m; + } +#endif + /* + * Probably impossible in practice, but there is a + * theoretical risk that a race between this reseeding + * and the target CPU writing its state back could + * create the all-zero SipHash fixed point. + * + * To ensure that never happens, ensure the state + * we write contains no zero words. + */ + state = per_cpu_ptr(&net_rand_state, i); + WRITE_ONCE(state->v0, v0 ? v0 : -1ul); + WRITE_ONCE(state->v1, v1 ? v1 : -1ul); + WRITE_ONCE(state->v2, v2 ? v2 : -1ul); + WRITE_ONCE(state->v3, v3 ? v3 : -1ul); + } + + /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ + expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ)); + mod_timer(&seed_timer, expires); +} + +/* + * The random ready callback can be called from almost any interrupt. + * To avoid worrying about whether it's safe to delay that interrupt + * long enough to seed all CPUs, just schedule an immediate timer event. + */ +static void prandom_timer_start(struct random_ready_callback *unused) +{ + mod_timer(&seed_timer, jiffies); +} + +/* + * Start periodic full reseeding as soon as strong + * random numbers are available. + */ +static int __init prandom_init_late(void) +{ + static struct random_ready_callback random_ready = { + .func = prandom_timer_start + }; + int ret = add_random_ready_callback(&random_ready); + + if (ret == -EALREADY) { + prandom_timer_start(&random_ready); + ret = 0; + } + return ret; +} +late_initcall(prandom_init_late); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 5813072bc589..29346184fcf2 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -514,7 +514,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length, elem_len = min_t(u64, length, PAGE_SIZE << order); page = alloc_pages(gfp, order); if (!page) { - sgl_free(sgl); + sgl_free_order(sgl, order); return NULL; } diff --git a/lib/string.c b/lib/string.c index 08ec58cc673b..abfaa05181e2 100644 --- a/lib/string.c +++ b/lib/string.c @@ -272,6 +272,30 @@ ssize_t strscpy_pad(char *dest, const char *src, size_t count) } EXPORT_SYMBOL(strscpy_pad); +/** + * stpcpy - copy a string from src to dest returning a pointer to the new end + * of dest, including src's %NUL-terminator. May overrun dest. + * @dest: pointer to end of string being copied into. Must be large enough + * to receive copy. + * @src: pointer to the beginning of string being copied from. Must not overlap + * dest. + * + * stpcpy differs from strcpy in a key way: the return value is a pointer + * to the new %NUL-terminating character in @dest. (For strcpy, the return + * value is a pointer to the start of @dest). This interface is considered + * unsafe as it doesn't perform bounds checking of the inputs. As such it's + * not recommended for usage. Instead, its definition is provided in case + * the compiler lowers other libcalls to stpcpy. + */ +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) +{ + while ((*dest++ = *src++) != '\0') + /* nothing */; + return --dest; +} +EXPORT_SYMBOL(stpcpy); + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/lib/syscall.c b/lib/syscall.c index fb328e7ccb08..71ffcf5aff12 100644 --- a/lib/syscall.c +++ b/lib/syscall.c @@ -7,6 +7,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info) { + unsigned long args[6] = { }; struct pt_regs *regs; if (!try_get_task_stack(target)) { @@ -27,8 +28,14 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info info->data.nr = syscall_get_nr(target, regs); if (info->data.nr != -1L) - syscall_get_arguments(target, regs, - (unsigned long *)&info->data.args[0]); + syscall_get_arguments(target, regs, args); + + info->data.args[0] = args[0]; + info->data.args[1] = args[1]; + info->data.args[2] = args[2]; + info->data.args[3] = args[3]; + info->data.args[4] = args[4]; + info->data.args[5] = args[5]; put_task_stack(target); return 0; diff --git a/lib/test_kasan.c b/lib/test_kasan.c index bd3d9ef7d39e..83344c9c38f4 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -22,6 +22,14 @@ #include <asm/page.h> +/* + * We assign some test results to these globals to make sure the tests + * are not eliminated as dead code. + */ + +int kasan_int_result; +void *kasan_ptr_result; + /* * Note: test functions are marked noinline so that their names appear in * reports. @@ -603,7 +611,7 @@ static noinline void __init kasan_memchr(void) if (!ptr) return; - memchr(ptr, '1', size + 1); + kasan_ptr_result = memchr(ptr, '1', size + 1); kfree(ptr); } @@ -619,7 +627,7 @@ static noinline void __init kasan_memcmp(void) return; memset(arr, 0, sizeof(arr)); - memcmp(ptr, arr, size+1); + kasan_int_result = memcmp(ptr, arr, size + 1); kfree(ptr); } @@ -642,22 +650,22 @@ static noinline void __init kasan_strings(void) * will likely point to zeroed byte. */ ptr += 16; - strchr(ptr, '1'); + kasan_ptr_result = strchr(ptr, '1'); pr_info("use-after-free in strrchr\n"); - strrchr(ptr, '1'); + kasan_ptr_result = strrchr(ptr, '1'); pr_info("use-after-free in strcmp\n"); - strcmp(ptr, "2"); + kasan_int_result = strcmp(ptr, "2"); pr_info("use-after-free in strncmp\n"); - strncmp(ptr, "2", 1); + kasan_int_result = strncmp(ptr, "2", 1); pr_info("use-after-free in strlen\n"); - strlen(ptr); + kasan_int_result = strlen(ptr); pr_info("use-after-free in strnlen\n"); - strnlen(ptr, 1); + kasan_int_result = strnlen(ptr, 1); } static noinline void __init kasan_bitops(void) @@ -724,11 +732,12 @@ static noinline void __init kasan_bitops(void) __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); pr_info("out-of-bounds in test_bit\n"); - (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); + kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); #if defined(clear_bit_unlock_is_negative_byte) pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n"); - clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits); + kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG + + BITS_PER_BYTE, bits); #endif kfree(bits); } diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 9cf77628fc91..87a0cc750ea2 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev, break; case TEST_KMOD_FS_TYPE: kfree_const(config->test_fs); - config->test_driver = NULL; + config->test_fs = NULL; copied = config_copy_test_fs(config, test_str, strlen(test_str)); break; diff --git a/lib/test_objagg.c b/lib/test_objagg.c index 72c1abfa154d..da137939a410 100644 --- a/lib/test_objagg.c +++ b/lib/test_objagg.c @@ -979,10 +979,10 @@ err_check_expect_stats2: err_world2_obj_get: for (i--; i >= 0; i--) world_obj_put(&world2, objagg, hints_case->key_ids[i]); - objagg_hints_put(hints); - objagg_destroy(objagg2); i = hints_case->key_ids_count; + objagg_destroy(objagg2); err_check_expect_hints_stats: + objagg_hints_put(hints); err_hints_get: err_check_expect_stats: err_world_obj_get: diff --git a/lib/test_printf.c b/lib/test_printf.c index 5d94cbff2120..d4b711b53942 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -212,6 +212,7 @@ test_string(void) #define PTR_STR "ffff0123456789ab" #define PTR_VAL_NO_CRNG "(____ptrval____)" #define ZEROS "00000000" /* hex 32 zero bits */ +#define ONES "ffffffff" /* hex 32 one bits */ static int __init plain_format(void) @@ -243,6 +244,7 @@ plain_format(void) #define PTR_STR "456789ab" #define PTR_VAL_NO_CRNG "(ptrval)" #define ZEROS "" +#define ONES "" static int __init plain_format(void) @@ -328,14 +330,28 @@ test_hashed(const char *fmt, const void *p) test(buf, fmt, p); } +/* + * NULL pointers aren't hashed. + */ static void __init null_pointer(void) { - test_hashed("%p", NULL); + test(ZEROS "00000000", "%p", NULL); test(ZEROS "00000000", "%px", NULL); test("(null)", "%pE", NULL); } +/* + * Error pointers aren't hashed. + */ +static void __init +error_pointer(void) +{ + test(ONES "fffffff5", "%p", ERR_PTR(-11)); + test(ONES "fffffff5", "%px", ERR_PTR(-11)); + test("(efault)", "%pE", ERR_PTR(-11)); +} + #define PTR_INVALID ((void *)0x000000ab) static void __init @@ -598,6 +614,7 @@ test_pointer(void) { plain(); null_pointer(); + error_pointer(); invalid_pointer(); symbol_ptr(); kernel_ptr(); diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 8c7d7a8468b8..d4f97925dbd8 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1156,6 +1156,42 @@ static noinline void check_find_entry(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_pause(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + void *entry; + unsigned int order; + unsigned long index = 1; + unsigned int count = 0; + + for (order = 0; order < order_limit; order++) { + XA_BUG_ON(xa, xa_store_order(xa, index, order, + xa_mk_index(index), GFP_KERNEL)); + index += 1UL << order; + } + + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); + count++; + } + rcu_read_unlock(); + XA_BUG_ON(xa, count != order_limit); + + count = 0; + xas_set(&xas, 0); + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); + count++; + xas_pause(&xas); + } + rcu_read_unlock(); + XA_BUG_ON(xa, count != order_limit); + + xa_destroy(xa); +} + static noinline void check_move_tiny(struct xarray *xa) { XA_STATE(xas, xa, 0); @@ -1664,6 +1700,7 @@ static int xarray_checks(void) check_xa_alloc(); check_find(&array); check_find_entry(&array); + check_pause(&array); check_account(&array); check_destroy(&array); check_move(&array); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e78017a3e1bd..fb4af73142b4 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -746,6 +746,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr, const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)"; unsigned long hashval; + /* + * Print the real pointer value for NULL and error pointers, + * as they are not actual addresses. + */ + if (IS_ERR_OR_NULL(ptr)) + return pointer_string(buf, end, ptr, spec); + /* When debugging early boot use non-cryptographically secure hash. */ if (unlikely(debug_boot_weak_hash)) { hashval = hash_long((unsigned long)ptr, 32); diff --git a/lib/xarray.c b/lib/xarray.c index acd1fad2e862..08d71c7b7599 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas) xas->xa_node = XAS_RESTART; if (node) { - unsigned int offset = xas->xa_offset; + unsigned long offset = xas->xa_offset; while (++offset < XA_CHUNK_SIZE) { if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) break; @@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) } entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) + continue; if (!xa_is_node(entry)) return entry; xas->xa_node = xa_to_node(entry); diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 2c13ecc5bb2c..ed1f3df27260 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -10,17 +10,6 @@ #ifndef ASMINF -/* Allow machine dependent optimization for post-increment or pre-increment. - Based on testing to date, - Pre-increment preferred for: - - PowerPC G3 (Adler) - - MIPS R5000 (Randers-Pehrson) - Post-increment preferred for: - - none - No measurable difference: - - Pentium III (Anderson) - - M68060 (Nikl) - */ union uu { unsigned short us; unsigned char b[2]; @@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p) return mm.us; } -#ifdef POSTINC -# define OFF 0 -# define PUP(a) *(a)++ -# define UP_UNALIGNED(a) get_unaligned16((a)++) -#else -# define OFF 1 -# define PUP(a) *++(a) -# define UP_UNALIGNED(a) get_unaligned16(++(a)) -#endif - /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is @@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start) /* copy state to local variables */ state = (struct inflate_state *)strm->state; - in = strm->next_in - OFF; + in = strm->next_in; last = in + (strm->avail_in - 5); - out = strm->next_out - OFF; + out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT @@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start) input data or output space */ do { if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = lcode[hold & lmask]; @@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; op = (unsigned)(this.op); if (op == 0) { /* literal */ - PUP(out) = (unsigned char)(this.val); + *out++ = (unsigned char)(this.val); } else if (op & 16) { /* length base */ len = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); @@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; } if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = dcode[hold & dmask]; @@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start) dist = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } } @@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start) state->mode = BAD; break; } - from = window - OFF; + from = window; if (write == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from end of window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); - from = window - OFF; + from = window; if (write < len) { /* some from start of window */ op = write; len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; len -= 3; } if (len) { - PUP(out) = PUP(from); + *out++ = *from++; if (len > 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else { @@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start) from = out - dist; /* copy direct from output */ /* minimum length is three */ /* Align out addr */ - if (!((long)(out - 1 + OFF) & 1)) { - PUP(out) = PUP(from); + if (!((long)(out - 1) & 1)) { + *out++ = *from++; len--; } - sout = (unsigned short *)(out - OFF); + sout = (unsigned short *)(out); if (dist > 2) { unsigned short *sfrom; - sfrom = (unsigned short *)(from - OFF); + sfrom = (unsigned short *)(from); loops = len >> 1; do #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - PUP(sout) = PUP(sfrom); + *sout++ = *sfrom++; #else - PUP(sout) = UP_UNALIGNED(sfrom); + *sout++ = get_unaligned16(sfrom++); #endif while (--loops); - out = (unsigned char *)sout + OFF; - from = (unsigned char *)sfrom + OFF; + out = (unsigned char *)sout; + from = (unsigned char *)sfrom; } else { /* dist == 1 or dist == 2 */ unsigned short pat16; - pat16 = *(sout-1+OFF); + pat16 = *(sout-1); if (dist == 1) { union uu mm; /* copy one char pattern to both bytes */ @@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start) } loops = len >> 1; do - PUP(sout) = pat16; + *sout++ = pat16; while (--loops); - out = (unsigned char *)sout + OFF; + out = (unsigned char *)sout; } if (len & 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else if ((op & 64) == 0) { /* 2nd level distance code */ @@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start) hold &= (1U << bits) - 1; /* update state and return */ - strm->next_in = in + OFF; - strm->next_out = out + OFF; + strm->next_in = in; + strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); diff --git a/mm/Kconfig b/mm/Kconfig index a5dae9a7eb51..fbdc5c70e487 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -576,19 +576,6 @@ config ZSMALLOC returned by an alloc(). This handle must be mapped in order to access the allocated space. -config PGTABLE_MAPPING - bool "Use page table mapping to access object in zsmalloc" - depends on ZSMALLOC - help - By default, zsmalloc uses a copy-based object mapping method to - access allocations that span two pages. However, if a particular - architecture (ex, ARM) performs VM mapping faster than copying, - then you should select this. This causes zsmalloc to use page table - mapping rather than copying for object mapping. - - You can check speed with zsmalloc benchmark: - https://github.com/spartacus06/zsmapbench - config ZSMALLOC_STAT bool "Export zsmalloc statistics" depends on ZSMALLOC diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 62f05f605fb5..3f2480e4c5af 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = { EXPORT_SYMBOL_GPL(noop_backing_dev_info); static struct class *bdi_class; -const char *bdi_unknown_name = "(unknown)"; +static const char *bdi_unknown_name = "(unknown)"; /* * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU @@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) if (bdi->dev) /* The driver needs to use separate queues per device */ return 0; - dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args); + vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); + dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); if (IS_ERR(dev)) return PTR_ERR(dev); @@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi) } EXPORT_SYMBOL(bdi_put); +const char *bdi_dev_name(struct backing_dev_info *bdi) +{ + if (!bdi || !bdi->dev) + return bdi_unknown_name; + return bdi->dev_name; +} +EXPORT_SYMBOL_GPL(bdi_dev_name); + static wait_queue_head_t congestion_wqh[2] = { __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) diff --git a/mm/cma.c b/mm/cma.c index 7fe0b8356775..7de520c0a1db 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -93,19 +93,15 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, mutex_unlock(&cma->lock); } -static int __init cma_activate_area(struct cma *cma) +static void __init cma_activate_area(struct cma *cma) { - int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - - if (!cma->bitmap) { - cma->count = 0; - return -ENOMEM; - } + cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); + if (!cma->bitmap) + goto out_error; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); @@ -135,25 +131,22 @@ static int __init cma_activate_area(struct cma *cma) spin_lock_init(&cma->mem_head_lock); #endif - return 0; + return; not_in_zone: - pr_err("CMA area %s could not be activated\n", cma->name); - kfree(cma->bitmap); + bitmap_free(cma->bitmap); +out_error: cma->count = 0; - return -EINVAL; + pr_err("CMA area %s could not be activated\n", cma->name); + return; } static int __init cma_init_reserved_areas(void) { int i; - for (i = 0; i < cma_area_count; i++) { - int ret = cma_activate_area(&cma_areas[i]); - - if (ret) - return ret; - } + for (i = 0; i < cma_area_count; i++) + cma_activate_area(&cma_areas[i]); return 0; } diff --git a/mm/compaction.c b/mm/compaction.c index 672d3c78c6ab..d686887856fe 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1276,7 +1276,7 @@ fast_isolate_freepages(struct compact_control *cc) { unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); unsigned int nr_scanned = 0; - unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; + unsigned long low_pfn, min_pfn, highest = 0; unsigned long nr_isolated = 0; unsigned long distance; struct page *page = NULL; @@ -1321,6 +1321,7 @@ fast_isolate_freepages(struct compact_control *cc) struct page *freepage; unsigned long flags; unsigned int order_scanned = 0; + unsigned long high_pfn = 0; if (!area->nr_free) continue; @@ -1629,6 +1630,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) unsigned long pfn = cc->migrate_pfn; unsigned long high_pfn; int order; + bool found_block = false; /* Skip hints are relied on to avoid repeats on the fast search */ if (cc->ignore_skip_hint) @@ -1671,7 +1673,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); for (order = cc->order - 1; - order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit; + order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; order--) { struct free_area *area = &cc->zone->free_area[order]; struct list_head *freelist; @@ -1686,7 +1688,11 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) list_for_each_entry(freepage, freelist, lru) { unsigned long free_pfn; - nr_scanned++; + if (nr_scanned++ >= limit) { + move_freelist_tail(freelist, freepage); + break; + } + free_pfn = page_to_pfn(freepage); if (free_pfn < high_pfn) { /* @@ -1695,12 +1701,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) * the list assumes an entry is deleted, not * reordered. */ - if (get_pageblock_skip(freepage)) { - if (list_is_last(freelist, &freepage->lru)) - break; - + if (get_pageblock_skip(freepage)) continue; - } /* Reorder to so a future search skips recent pages */ move_freelist_tail(freelist, freepage); @@ -1708,15 +1710,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) update_fast_start_pfn(cc, free_pfn); pfn = pageblock_start_pfn(free_pfn); cc->fast_search_fail = 0; + found_block = true; set_pageblock_skip(freepage); break; } - - if (nr_scanned >= limit) { - cc->fast_search_fail++; - move_freelist_tail(freelist, freepage); - break; - } } spin_unlock_irqrestore(&cc->zone->lock, flags); } @@ -1727,9 +1724,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) * If fast scanning failed then use a cached entry for a page block * that had free pages as the basis for starting a linear scan. */ - if (pfn == cc->migrate_pfn) + if (!found_block) { + cc->fast_search_fail++; pfn = reinit_migrate_pfn(cc); - + } return pfn; } @@ -2310,16 +2308,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, .page = NULL, }; - if (capture) - current->capture_control = &capc; + /* + * Make sure the structs are really initialized before we expose the + * capture control, in case we are interrupted and the interrupt handler + * frees a page. + */ + barrier(); + WRITE_ONCE(current->capture_control, &capc); ret = compact_zone(&cc, &capc); VM_BUG_ON(!list_empty(&cc.freepages)); VM_BUG_ON(!list_empty(&cc.migratepages)); - *capture = capc.page; - current->capture_control = NULL; + /* + * Make sure we hide capture control first before we read the captured + * page pointer, otherwise an interrupt could free and capture a page + * and we would leak it. + */ + WRITE_ONCE(current->capture_control, NULL); + *capture = READ_ONCE(capc.page); return ret; } @@ -2333,6 +2341,7 @@ int sysctl_extfrag_threshold = 500; * @alloc_flags: The allocation flags of the current allocation * @ac: The context of current allocation * @prio: Determines how hard direct compaction should try to succeed + * @capture: Pointer to free page created by compaction will be stored here * * This is the main entry point for direct page compaction. */ diff --git a/mm/filemap.c b/mm/filemap.c index 5376c23c9fc1..4a830bc940d1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -198,7 +198,6 @@ static void unaccount_page_cache_page(struct address_space *mapping, return; nr = hpage_nr_pages(page); - __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); if (PageSwapBacked(page)) { __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); @@ -847,10 +846,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(replace_page_cache_page); -static int __add_to_page_cache_locked(struct page *page, - struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask, - void **shadowp) +noinline int __add_to_page_cache_locked(struct page *page, + struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask, + void **shadowp) { XA_STATE(xas, &mapping->i_pages, offset); int huge = PageHuge(page); @@ -867,6 +866,7 @@ static int __add_to_page_cache_locked(struct page *page, gfp_mask, &memcg, false); if (error) return error; + mem_cgroup_shrink_pagecache(memcg, gfp_mask); } get_page(page); @@ -2461,7 +2461,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf, pgoff_t offset = vmf->pgoff; /* If we don't want any read-ahead, don't bother */ - if (vmf->vma->vm_flags & VM_RAND_READ) + if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) return fpin; if (ra->mmap_miss > 0) ra->mmap_miss--; @@ -2868,6 +2868,14 @@ filler: unlock_page(page); goto out; } + + /* + * A previous I/O error may have been due to temporary + * failures. + * Clear page error before actual read, PG_error will be + * set again if read page fails. + */ + ClearPageError(page); goto filler; out: diff --git a/mm/gup.c b/mm/gup.c index 745b4036cdfd..3ef769529548 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -161,13 +161,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, } /* - * FOLL_FORCE can write to even unwritable pte's, but only - * after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE or a forced COW break can write even to unwritable pte's, + * but only after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { - return pte_write(pte) || - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); + return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte)); +} + +/* + * A (separate) COW fault might break the page the other way and + * get_user_pages() would return the page from what is now the wrong + * VM. So we need to force a COW break at GUP time even for reads. + */ +static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) +{ + return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); } static struct page *follow_page_pte(struct vm_area_struct *vma, @@ -823,12 +832,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, goto out; } if (is_vm_hugetlb_page(vma)) { + if (should_force_cow_break(vma, foll_flags)) + foll_flags |= FOLL_WRITE; i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, - gup_flags, nonblocking); + foll_flags, nonblocking); continue; } } + + if (should_force_cow_break(vma, foll_flags)) + foll_flags |= FOLL_WRITE; + retry: /* * If we have a pending SIGKILL, don't keep faulting pages and @@ -2169,13 +2184,13 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 1; } -static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, +static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; - pmdp = pmd_offset(&pud, addr); + pmdp = pmd_offset_lockless(pudp, pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); @@ -2212,13 +2227,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, return 1; } -static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, +static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; - pudp = pud_offset(&p4d, addr); + pudp = pud_offset_lockless(p4dp, p4d, addr); do { pud_t pud = READ_ONCE(*pudp); @@ -2233,20 +2248,20 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) + } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } -static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, +static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; - p4dp = p4d_offset(&pgd, addr); + p4dp = p4d_offset_lockless(pgdp, pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); @@ -2258,7 +2273,7 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) + } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); @@ -2286,7 +2301,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, flags, pages, nr)) return; - } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) + } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } @@ -2316,6 +2331,10 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end) * * If the architecture does not support this function, simply return with no * pages pinned. + * + * Careful, careful! COW breaking can go either way, so a non-write + * access can get ambiguous page results. If you call this function without + * 'write' set, you'd better be sure that you're ok with that ambiguity. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) @@ -2343,6 +2362,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. + * + * NOTE! We allow read-only gup_fast() here, but you'd better be + * careful about possible COW pages. You'll get _a_ COW page, but + * not necessarily the one you intended to get depending on what + * COW event happens after this. COW may break the page copy in a + * random direction. */ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && @@ -2415,10 +2440,17 @@ int get_user_pages_fast(unsigned long start, int nr_pages, if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; + /* + * The FAST_GUP case requires FOLL_WRITE even for pure reads, + * because get_user_pages() may need to cause an early COW in + * order to avoid confusing the normal COW routines. So only + * targets that are already writable are safe to do by just + * looking at the page tables. + */ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) { local_irq_disable(); - gup_pgd_range(addr, end, gup_flags, pages, &nr); + gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr); local_irq_enable(); ret = nr; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a2d9162543a0..77b507021901 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -46,7 +46,20 @@ * Defrag is invoked by khugepaged hugepage allocations and by page faults * for all hugepage allocations. */ -unsigned long transparent_hugepage_flags __read_mostly = 0; +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_32) + unsigned long transparent_hugepage_flags __read_mostly = +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS + (1<<TRANSPARENT_HUGEPAGE_FLAG)| +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE + (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| +#endif + (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| + (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| + (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); +#else + unsigned long transparent_hugepage_flags __read_mostly = 0; +#endif static struct shrinker deferred_split_shrinker; @@ -713,7 +726,6 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) transparent_hugepage_use_zero_page()) { pgtable_t pgtable; struct page *zero_page; - bool set; vm_fault_t ret; pgtable = pte_alloc_one(vma->vm_mm); if (unlikely(!pgtable)) @@ -726,25 +738,25 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) } vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); ret = 0; - set = false; if (pmd_none(*vmf->pmd)) { ret = check_stable_address_space(vma->vm_mm); if (ret) { spin_unlock(vmf->ptl); + pte_free(vma->vm_mm, pgtable); } else if (userfaultfd_missing(vma)) { spin_unlock(vmf->ptl); + pte_free(vma->vm_mm, pgtable); ret = handle_userfault(vmf, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); } else { set_huge_zero_page(pgtable, vma->vm_mm, vma, haddr, vmf->pmd, zero_page); spin_unlock(vmf->ptl); - set = true; } - } else + } else { spin_unlock(vmf->ptl); - if (!set) pte_free(vma->vm_mm, pgtable); + } return ret; } gfp = alloc_hugepage_direct_gfpmask(vma); @@ -1445,13 +1457,12 @@ out_unlock: } /* - * FOLL_FORCE can write to even unwritable pmd's, but only - * after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, + * but only after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) { - return pmd_write(pmd) || - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); + return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); } struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, @@ -2166,7 +2177,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, put_page(page); add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; - } else if (is_huge_zero_pmd(*pmd)) { + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { /* * FIXME: Do we want to invalidate secondary mmu by calling * mmu_notifier_invalidate_range() see comments below inside @@ -2254,27 +2265,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - atomic_inc(&page[i]._mapcount); + if (!pmd_migration) + atomic_inc(&page[i]._mapcount); pte_unmap(pte); } - /* - * Set PG_double_map before dropping compound_mapcount to avoid - * false-negative page_mapped(). - */ - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { - for (i = 0; i < HPAGE_PMD_NR; i++) - atomic_inc(&page[i]._mapcount); - } - - if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { - /* Last compound_mapcount is gone. */ - __dec_node_page_state(page, NR_ANON_THPS); - if (TestClearPageDoubleMap(page)) { - /* No need in mapcount reference anymore */ + if (!pmd_migration) { + /* + * Set PG_double_map before dropping compound_mapcount to avoid + * false-negative page_mapped(). + */ + if (compound_mapcount(page) > 1 && + !TestSetPageDoubleMap(page)) { for (i = 0; i < HPAGE_PMD_NR; i++) - atomic_dec(&page[i]._mapcount); + atomic_inc(&page[i]._mapcount); } + + lock_page_memcg(page); + if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { + /* Last compound_mapcount is gone. */ + __dec_lruvec_page_state(page, NR_ANON_THPS); + if (TestClearPageDoubleMap(page)) { + /* No need in mapcount reference anymore */ + for (i = 0; i < HPAGE_PMD_NR; i++) + atomic_dec(&page[i]._mapcount); + } + } + unlock_page_memcg(page); } smp_wmb(); /* make pte visible before pmd */ @@ -2293,6 +2310,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, { spinlock_t *ptl; struct mmu_notifier_range range; + bool do_unlock_page = false; + pmd_t _pmd; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, address & HPAGE_PMD_MASK, @@ -2305,11 +2324,41 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, * pmd against. Otherwise we can end up replacing wrong page. */ VM_BUG_ON(freeze && !page); - if (page && page != pmd_page(*pmd)) - goto out; + if (page) { + VM_WARN_ON_ONCE(!PageLocked(page)); + if (page != pmd_page(*pmd)) + goto out; + } +repeat: if (pmd_trans_huge(*pmd)) { - page = pmd_page(*pmd); + if (!page) { + page = pmd_page(*pmd); + /* + * An anonymous page must be locked, to ensure that a + * concurrent reuse_swap_page() sees stable mapcount; + * but reuse_swap_page() is not used on shmem or file, + * and page lock must not be taken when zap_pmd_range() + * calls __split_huge_pmd() while i_mmap_lock is held. + */ + if (PageAnon(page)) { + if (unlikely(!trylock_page(page))) { + get_page(page); + _pmd = *pmd; + spin_unlock(ptl); + lock_page(page); + spin_lock(ptl); + if (unlikely(!pmd_same(*pmd, _pmd))) { + unlock_page(page); + put_page(page); + page = NULL; + goto repeat; + } + put_page(page); + } + do_unlock_page = true; + } + } if (PageMlocked(page)) clear_page_mlock(page); } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) @@ -2317,6 +2366,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, __split_huge_pmd_locked(vma, pmd, range.start, freeze); out: spin_unlock(ptl); + if (do_unlock_page) + unlock_page(page); /* * No need to double call mmu_notifier->invalidate_range() callback. * They are 3 cases to consider inside __split_huge_pmd_locked(): @@ -2488,7 +2539,7 @@ static void __split_huge_page_tail(struct page *head, int tail, } static void __split_huge_page(struct page *page, struct list_head *list, - pgoff_t end, unsigned long flags) + pgoff_t end) { struct page *head = compound_head(page); pg_data_t *pgdat = page_pgdat(head); @@ -2497,8 +2548,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, unsigned long offset = 0; int i; - lruvec = mem_cgroup_page_lruvec(head, pgdat); - /* complete memcg works before add pages to LRU */ mem_cgroup_split_huge_fixup(head); @@ -2509,6 +2558,10 @@ static void __split_huge_page(struct page *page, struct list_head *list, swap_cache = swap_address_space(entry); xa_lock(&swap_cache->i_pages); } + /* prevent PageLRU to go away from under us, and freeze lru stats */ + spin_lock(&pgdat->lru_lock); + + lruvec = mem_cgroup_page_lruvec(head, pgdat); for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { __split_huge_page_tail(head, i, lruvec, list); @@ -2529,8 +2582,9 @@ static void __split_huge_page(struct page *page, struct list_head *list, } ClearPageCompound(head); - - split_page_owner(head, HPAGE_PMD_ORDER); + spin_unlock(&pgdat->lru_lock); + /* Caller disabled irqs, so they are still disabled here */ + split_page_owner(head, HPAGE_PMD_NR); /* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { @@ -2547,7 +2601,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, xa_unlock(&head->mapping->i_pages); } - spin_unlock_irqrestore(&pgdat->lru_lock, flags); + local_irq_enable(); remap_page(head); @@ -2686,13 +2740,11 @@ bool can_split_huge_page(struct page *page, int *pextra_pins) int split_huge_page_to_list(struct page *page, struct list_head *list) { struct page *head = compound_head(page); - struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); struct deferred_split *ds_queue = get_deferred_split_queue(page); struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; int count, mapcount, extra_pins, ret; bool mlocked; - unsigned long flags; pgoff_t end; VM_BUG_ON_PAGE(is_huge_zero_page(head), head); @@ -2758,9 +2810,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (mlocked) lru_add_drain(); - /* prevent PageLRU to go away from under us, and freeze lru stats */ - spin_lock_irqsave(&pgdata->lru_lock, flags); - + /* block interrupt reentry in xa_lock and spinlock */ + local_irq_disable(); if (mapping) { XA_STATE(xas, &mapping->i_pages, page_index(head)); @@ -2790,7 +2841,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) } spin_unlock(&ds_queue->split_queue_lock); - __split_huge_page(page, list, end, flags); + __split_huge_page(page, list, end); if (PageSwapCache(head)) { swp_entry_t entry = { .val = page_private(head) }; @@ -2809,7 +2860,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) spin_unlock(&ds_queue->split_queue_lock); fail: if (mapping) xa_unlock(&mapping->i_pages); - spin_unlock_irqrestore(&pgdata->lru_lock, flags); + local_irq_enable(); remap_page(head); ret = -EBUSY; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e0afd582ca01..5253c67acb1d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -71,6 +71,21 @@ DEFINE_SPINLOCK(hugetlb_lock); static int num_fault_mutexes; struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; +static inline bool PageHugeFreed(struct page *head) +{ + return page_private(head + 4) == -1UL; +} + +static inline void SetPageHugeFreed(struct page *head) +{ + set_page_private(head + 4, -1UL); +} + +static inline void ClearPageHugeFreed(struct page *head) +{ + set_page_private(head + 4, 0); +} + /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); @@ -869,6 +884,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) list_move(&page->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; + SetPageHugeFreed(page); } static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) @@ -886,6 +902,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) return NULL; list_move(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); + ClearPageHugeFreed(page); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return page; @@ -1175,14 +1192,16 @@ static inline void destroy_compound_gigantic_page(struct page *page, static void update_and_free_page(struct hstate *h, struct page *page) { int i; + struct page *subpage = page; if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; - for (i = 0; i < pages_per_huge_page(h); i++) { - page[i].flags &= ~(1 << PG_locked | 1 << PG_error | + for (i = 0; i < pages_per_huge_page(h); + i++, subpage = mem_map_next(subpage, page, i)) { + subpage->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1 << PG_dirty | 1 << PG_active | 1 << PG_private | 1 << PG_writeback); @@ -1217,12 +1236,11 @@ struct hstate *size_to_hstate(unsigned long size) */ bool page_huge_active(struct page *page) { - VM_BUG_ON_PAGE(!PageHuge(page), page); - return PageHead(page) && PagePrivate(&page[1]); + return PageHeadHuge(page) && PagePrivate(&page[1]); } /* never called for tail page */ -static void set_page_huge_active(struct page *page) +void set_page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHeadHuge(page), page); SetPagePrivate(&page[1]); @@ -1375,6 +1393,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) set_hugetlb_cgroup(page, NULL); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; + ClearPageHugeFreed(page); spin_unlock(&hugetlb_lock); } @@ -1602,6 +1621,7 @@ int dissolve_free_huge_page(struct page *page) { int rc = -EBUSY; +retry: /* Not to disrupt normal path by vainly holding hugetlb_lock */ if (!PageHuge(page)) return 0; @@ -1618,6 +1638,26 @@ int dissolve_free_huge_page(struct page *page) int nid = page_to_nid(head); if (h->free_huge_pages - h->resv_huge_pages == 0) goto out; + + /* + * We should make sure that the page is already on the free list + * when it is dissolved. + */ + if (unlikely(!PageHugeFreed(head))) { + spin_unlock(&hugetlb_lock); + cond_resched(); + + /* + * Theoretically, we should return -EBUSY when we + * encounter this race. In fact, we have a chance + * to successfully dissolve the page if we do a + * retry. Because the race window is quite small. + * If we seize this opportunity, it is an optimization + * for increasing the success rate of dissolving page. + */ + goto retry; + } + /* * Move PageHWPoison flag from head page to the raw error page, * which makes any subpages rather than the error page reusable. @@ -2774,8 +2814,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, return -ENOMEM; retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); - if (retval) + if (retval) { kobject_put(hstate_kobjs[hi]); + hstate_kobjs[hi] = NULL; + } return retval; } @@ -3082,6 +3124,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL +static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, + void *buffer, size_t *length, + loff_t *ppos, unsigned long *out) +{ + struct ctl_table dup_table; + + /* + * In order to avoid races with __do_proc_doulongvec_minmax(), we + * can duplicate the @table and alter the duplicate of it. + */ + dup_table = *table; + dup_table.data = out; + + return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); +} + static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -3093,9 +3151,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, if (!hugepages_supported()) return -EOPNOTSUPP; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -3139,9 +3196,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, if (write && hstate_is_gigantic(h)) return -EINVAL; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -3964,7 +4020,7 @@ retry: * handling userfault. Reacquire after handling * fault to make calling code simpler. */ - hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); + hash = hugetlb_fault_mutex_hash(h, mapping, idx); mutex_unlock(&hugetlb_fault_mutex_table[hash]); ret = handle_userfault(&vmf, VM_UFFD_MISSING); mutex_lock(&hugetlb_fault_mutex_table[hash]); @@ -4022,7 +4078,7 @@ retry: * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(PageHWPoison(page))) { - ret = VM_FAULT_HWPOISON | + ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } @@ -4092,7 +4148,7 @@ backout_unlocked: #ifdef CONFIG_SMP u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx, unsigned long address) + pgoff_t idx) { unsigned long key[2]; u32 hash; @@ -4100,7 +4156,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, key[0] = (unsigned long) mapping; key[1] = idx; - hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); + hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); return hash & (num_fault_mutexes - 1); } @@ -4110,7 +4166,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, * return 0 and avoid the hashing overhead. */ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx, unsigned long address) + pgoff_t idx) { return 0; } @@ -4154,7 +4210,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); + hash = hugetlb_fault_mutex_hash(h, mapping, idx); mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); @@ -4846,25 +4902,23 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { - unsigned long check_addr = *start; + unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), + v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); - if (!(vma->vm_flags & VM_MAYSHARE)) + /* + * vma need span at least one aligned PUD size and the start,end range + * must at least partialy within it. + */ + if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || + (*end <= v_start) || (*start >= v_end)) return; - for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { - unsigned long a_start = check_addr & PUD_MASK; - unsigned long a_end = a_start + PUD_SIZE; + /* Extend the range to be PUD aligned for a worst case scenario */ + if (*start > v_start) + *start = ALIGN_DOWN(*start, PUD_SIZE); - /* - * If sharing is possible, adjust start/end if necessary. - */ - if (range_in_vma(vma, a_start, a_end)) { - if (a_start < *start) - *start = a_start; - if (a_end > *end) - *end = a_end; - } - } + if (*end < v_end) + *end = ALIGN(*end, PUD_SIZE); } /* @@ -5016,8 +5070,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, { pgd_t *pgd; p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; + pud_t *pud, pud_entry; + pmd_t *pmd, pmd_entry; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) @@ -5027,17 +5081,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return NULL; pud = pud_offset(p4d, addr); - if (sz != PUD_SIZE && pud_none(*pud)) + pud_entry = READ_ONCE(*pud); + if (sz != PUD_SIZE && pud_none(pud_entry)) return NULL; /* hugepage or swap? */ - if (pud_huge(*pud) || !pud_present(*pud)) + if (pud_huge(pud_entry) || !pud_present(pud_entry)) return (pte_t *)pud; pmd = pmd_offset(pud, addr); - if (sz != PMD_SIZE && pmd_none(*pmd)) + pmd_entry = READ_ONCE(*pmd); + if (sz != PMD_SIZE && pmd_none(pmd_entry)) return NULL; /* hugepage or swap? */ - if (pmd_huge(*pmd) || !pmd_present(*pmd)) + if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry)) return (pte_t *)pmd; return NULL; @@ -5124,9 +5180,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) { bool ret = true; - VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); - if (!page_huge_active(page) || !get_page_unless_zero(page)) { + if (!PageHeadHuge(page) || !page_huge_active(page) || + !get_page_unless_zero(page)) { ret = false; goto unlock; } diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 08b43de2383b..f36ffc090f5f 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -14,10 +14,10 @@ CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE) # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 -CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING +CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING +CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING +CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING obj-$(CONFIG_KASAN) := common.o init.o report.o obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 616f9dd82d12..76a80033e0b7 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -15,7 +15,6 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define DISABLE_BRANCH_PROFILING #include <linux/export.h> #include <linux/interrupt.h> diff --git a/mm/kasan/init.c b/mm/kasan/init.c index ce45c491ebcd..ee21e1c1150c 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -388,9 +388,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, if (kasan_pte_table(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && - IS_ALIGNED(next, PMD_SIZE)) + IS_ALIGNED(next, PMD_SIZE)) { pmd_clear(pmd); - continue; + continue; + } } pte = pte_offset_kernel(pmd, addr); kasan_remove_pte_table(pte, addr, next); @@ -413,9 +414,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, if (kasan_pmd_table(*pud)) { if (IS_ALIGNED(addr, PUD_SIZE) && - IS_ALIGNED(next, PUD_SIZE)) + IS_ALIGNED(next, PUD_SIZE)) { pud_clear(pud); - continue; + continue; + } } pmd = pmd_offset(pud, addr); pmd_base = pmd_offset(pud, 0); @@ -439,9 +441,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, if (kasan_pud_table(*p4d)) { if (IS_ALIGNED(addr, P4D_SIZE) && - IS_ALIGNED(next, P4D_SIZE)) + IS_ALIGNED(next, P4D_SIZE)) { p4d_clear(p4d); - continue; + continue; + } } pud = pud_offset(p4d, addr); kasan_remove_pud_table(pud, addr, next); @@ -473,9 +476,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size) if (kasan_p4d_table(*pgd)) { if (IS_ALIGNED(addr, PGDIR_SIZE) && - IS_ALIGNED(next, PGDIR_SIZE)) + IS_ALIGNED(next, PGDIR_SIZE)) { pgd_clear(pgd); - continue; + continue; + } } p4d = p4d_offset(pgd, addr); @@ -499,7 +503,6 @@ int kasan_add_zero_shadow(void *start, unsigned long size) ret = kasan_populate_early_shadow(shadow_start, shadow_end); if (ret) - kasan_remove_zero_shadow(shadow_start, - size >> KASAN_SHADOW_SCALE_SHIFT); + kasan_remove_zero_shadow(start, size); return ret; } diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 0e987c9ca052..caf4efd9888c 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -12,7 +12,6 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define DISABLE_BRANCH_PROFILING #include <linux/export.h> #include <linux/interrupt.h> diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a8a57bebb5fa..f0d7e6483ba3 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -54,6 +54,9 @@ enum scan_result { #define CREATE_TRACE_POINTS #include <trace/events/huge_memory.h> +static struct task_struct *khugepaged_thread __read_mostly; +static DEFINE_MUTEX(khugepaged_mutex); + /* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly; static unsigned int khugepaged_pages_collapsed; @@ -401,7 +404,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, static inline int khugepaged_test_exit(struct mm_struct *mm) { - return atomic_read(&mm->mm_users) == 0; + return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm); } static bool hugepage_vma_check(struct vm_area_struct *vma, @@ -438,7 +441,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; @@ -832,6 +835,18 @@ static struct page *khugepaged_alloc_hugepage(bool *wait) static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) { + /* + * If the hpage allocated earlier was briefly exposed in page cache + * before collapse_file() failed, it is possible that racing lookups + * have not yet completed, and would then be unpleasantly surprised by + * finding the hpage reused for the same mapping at a different offset. + * Just release the previous allocation if there is any danger of that. + */ + if (*hpage && page_count(*hpage) > 1) { + put_page(*hpage); + *hpage = NULL; + } + if (!*hpage) *hpage = khugepaged_alloc_hugepage(wait); @@ -876,6 +891,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags)) return SCAN_VMA_CHECK; + /* Anon VMA expected */ + if (!vma->anon_vma || vma->vm_ops) + return SCAN_VMA_CHECK; return 0; } @@ -1016,9 +1034,6 @@ static void collapse_huge_page(struct mm_struct *mm, * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); - result = SCAN_ANY_PROCESS; - if (!mmget_still_valid(mm)) - goto out; result = hugepage_vma_revalidate(mm, address, &vma); if (result) goto out; @@ -1291,7 +1306,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { unsigned long haddr = addr & HPAGE_PMD_MASK; struct vm_area_struct *vma = find_vma(mm, haddr); - struct page *hpage = NULL; + struct page *hpage; pte_t *start_pte, *pte; pmd_t *pmd, _pmd; spinlock_t *ptl; @@ -1311,9 +1326,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) return; + hpage = find_lock_page(vma->vm_file->f_mapping, + linear_page_index(vma, haddr)); + if (!hpage) + return; + + if (!PageHead(hpage)) + goto drop_hpage; + pmd = mm_find_pmd(mm, haddr); if (!pmd) - return; + goto drop_hpage; start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); @@ -1332,30 +1355,11 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) page = vm_normal_page(vma, addr, *pte); - if (!page || !PageCompound(page)) - goto abort; - - if (!hpage) { - hpage = compound_head(page); - /* - * The mapping of the THP should not change. - * - * Note that uprobe, debugger, or MAP_PRIVATE may - * change the page table, but the new page will - * not pass PageCompound() check. - */ - if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping)) - goto abort; - } - /* - * Confirm the page maps to the correct subpage. - * - * Note that uprobe, debugger, or MAP_PRIVATE may change - * the page table, but the new page will not pass - * PageCompound() check. + * Note that uprobe, debugger, or MAP_PRIVATE may change the + * page table, but the new page will not be a subpage of hpage. */ - if (WARN_ON(hpage + i != page)) + if (hpage + i != page) goto abort; count++; } @@ -1374,21 +1378,26 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) pte_unmap_unlock(start_pte, ptl); /* step 3: set proper refcount and mm_counters. */ - if (hpage) { + if (count) { page_ref_sub(hpage, count); add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); } /* step 4: collapse pmd */ ptl = pmd_lock(vma->vm_mm, pmd); - _pmd = pmdp_collapse_flush(vma, addr, pmd); + _pmd = pmdp_collapse_flush(vma, haddr, pmd); spin_unlock(ptl); mm_dec_nr_ptes(mm); pte_free(mm, pmd_pgtable(_pmd)); + +drop_hpage: + unlock_page(hpage); + put_page(hpage); return; abort: pte_unmap_unlock(start_pte, ptl); + goto drop_hpage; } static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) @@ -1417,6 +1426,7 @@ out: static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; + struct mm_struct *mm; unsigned long addr; pmd_t *pmd, _pmd; @@ -1445,7 +1455,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) continue; if (vma->vm_end < addr + HPAGE_PMD_SIZE) continue; - pmd = mm_find_pmd(vma->vm_mm, addr); + mm = vma->vm_mm; + pmd = mm_find_pmd(mm, addr); if (!pmd) continue; /* @@ -1455,17 +1466,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * mmap_sem while holding page lock. Fault path does it in * reverse order. Trylock is a way to avoid deadlock. */ - if (down_write_trylock(&vma->vm_mm->mmap_sem)) { - spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); - /* assume page table is clear */ - _pmd = pmdp_collapse_flush(vma, addr, pmd); - spin_unlock(ptl); - up_write(&vma->vm_mm->mmap_sem); - mm_dec_nr_ptes(vma->vm_mm); - pte_free(vma->vm_mm, pmd_pgtable(_pmd)); + if (down_write_trylock(&mm->mmap_sem)) { + if (!khugepaged_test_exit(mm)) { + spinlock_t *ptl = pmd_lock(mm, pmd); + /* assume page table is clear */ + _pmd = pmdp_collapse_flush(vma, addr, pmd); + spin_unlock(ptl); + mm_dec_nr_ptes(mm); + pte_free(mm, pmd_pgtable(_pmd)); + } + up_write(&mm->mmap_sem); } else { /* Try again later */ - khugepaged_add_pte_mapped_thp(vma->vm_mm, addr); + khugepaged_add_pte_mapped_thp(mm, addr); } } i_mmap_unlock_write(mapping); @@ -1594,7 +1607,7 @@ static void collapse_file(struct mm_struct *mm, xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, - PAGE_SIZE); + end - index); /* drain pagevecs to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); @@ -1655,6 +1668,7 @@ static void collapse_file(struct mm_struct *mm, if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) { result = SCAN_PAGE_HAS_PRIVATE; + putback_lru_page(page); goto out_unlock; } @@ -2166,8 +2180,6 @@ static void set_recommended_min_free_kbytes(void) int start_stop_khugepaged(void) { - static struct task_struct *khugepaged_thread __read_mostly; - static DEFINE_MUTEX(khugepaged_mutex); int err = 0; mutex_lock(&khugepaged_mutex); @@ -2194,3 +2206,11 @@ fail: mutex_unlock(&khugepaged_mutex); return err; } + +void khugepaged_min_free_kbytes_update(void) +{ + mutex_lock(&khugepaged_mutex); + if (khugepaged_enabled() && khugepaged_thread) + set_recommended_min_free_kbytes(); + mutex_unlock(&khugepaged_mutex); +} diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 244607663363..312942d78405 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1947,7 +1947,7 @@ void __init kmemleak_init(void) create_object((unsigned long)__bss_start, __bss_stop - __bss_start, KMEMLEAK_GREY, GFP_ATOMIC); /* only register .data..ro_after_init if not within .data */ - if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) + if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata) create_object((unsigned long)__start_ro_after_init, __end_ro_after_init - __start_ro_after_init, KMEMLEAK_GREY, GFP_ATOMIC); diff --git a/mm/ksm.c b/mm/ksm.c index 7905934cd3ad..e486c54d921b 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2112,8 +2112,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) down_read(&mm->mmap_sem); vma = find_mergeable_vma(mm, rmap_item->address); - err = try_to_merge_one_page(vma, page, - ZERO_PAGE(rmap_item->address)); + if (vma) { + err = try_to_merge_one_page(vma, page, + ZERO_PAGE(rmap_item->address)); + } else { + /* + * If the vma is out of date, we do not need to + * continue. + */ + err = 0; + } up_read(&mm->mmap_sem); /* * In case of failure, the page was not really empty, so we diff --git a/mm/list_lru.c b/mm/list_lru.c index 0f1f6b06b7f3..d12c1943f6f3 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -544,7 +544,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, struct list_lru_node *nlru = &lru->node[nid]; int dst_idx = dst_memcg->kmemcg_id; struct list_lru_one *src, *dst; - bool set; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, @@ -556,11 +555,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, dst = list_lru_from_memcg_idx(nlru, dst_idx); list_splice_init(&src->list, &dst->list); - set = (!dst->nr_items && src->nr_items); - dst->nr_items += src->nr_items; - if (set) + + if (src->nr_items) { + dst->nr_items += src->nr_items; memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); - src->nr_items = 0; + src->nr_items = 0; + } spin_unlock_irq(&nlru->lock); } diff --git a/mm/maccess.c b/mm/maccess.c index 2d3c3d01064c..3ca8d97e5010 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -43,11 +43,20 @@ probe_write_common(void __user *dst, const void *src, size_t size) * do_page_fault() doesn't attempt to take mmap_sem. This makes * probe_kernel_read() suitable for use within regions where the caller * already holds mmap_sem, or other locks which nest inside mmap_sem. + * + * probe_kernel_read_strict() is the same as probe_kernel_read() except for + * the case where architectures have non-overlapping user and kernel address + * ranges: probe_kernel_read_strict() will additionally return -EFAULT for + * probing memory on a user address range where probe_user_read() is supposed + * to be used instead. */ long __weak probe_kernel_read(void *dst, const void *src, size_t size) __attribute__((alias("__probe_kernel_read"))); +long __weak probe_kernel_read_strict(void *dst, const void *src, size_t size) + __attribute__((alias("__probe_kernel_read"))); + long __probe_kernel_read(void *dst, const void *src, size_t size) { long ret; @@ -157,8 +166,22 @@ EXPORT_SYMBOL_GPL(probe_user_write); * * If @count is smaller than the length of the string, copies @count-1 bytes, * sets the last byte of @dst buffer to NUL and returns @count. + * + * strncpy_from_unsafe_strict() is the same as strncpy_from_unsafe() except + * for the case where architectures have non-overlapping user and kernel address + * ranges: strncpy_from_unsafe_strict() will additionally return -EFAULT for + * probing memory on a user address range where strncpy_from_unsafe_user() is + * supposed to be used instead. */ -long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) + +long __weak strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) + __attribute__((alias("__strncpy_from_unsafe"))); + +long __weak strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, + long count) + __attribute__((alias("__strncpy_from_unsafe"))); + +long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) { mm_segment_t old_fs = get_fs(); const void *src = unsafe_addr; diff --git a/mm/madvise.c b/mm/madvise.c index 418a8076112d..bf85383f1247 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -300,9 +300,9 @@ static long madvise_willneed(struct vm_area_struct *vma, */ *prev = NULL; /* tell sys_madvise we drop mmap_sem */ get_file(file); - up_read(¤t->mm->mmap_sem); offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + up_read(¤t->mm->mmap_sem); vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); fput(file); down_read(¤t->mm->mmap_sem); @@ -392,9 +392,9 @@ huge_unlock: return 0; } +regular_page: if (pmd_trans_unstable(pmd)) return 0; -regular_page: #endif tlb_change_page_size(tlb, PAGE_SIZE); orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); diff --git a/mm/memblock.c b/mm/memblock.c index c4b16cae2bc9..11f6ae37d669 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -257,14 +257,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, * * Find @size free area aligned to @align in the specified range and node. * - * When allocation direction is bottom-up, the @start should be greater - * than the end of the kernel image. Otherwise, it will be trimmed. The - * reason is that we want the bottom-up allocation just near the kernel - * image so it is highly likely that the allocated memory and the kernel - * will reside in the same node. - * - * If bottom-up allocation failed, will try to allocate memory top-down. - * * Return: * Found address on success, 0 on failure. */ @@ -273,8 +265,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, phys_addr_t end, int nid, enum memblock_flags flags) { - phys_addr_t kernel_end, ret; - /* pump up @end */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE || end == MEMBLOCK_ALLOC_KASAN) @@ -283,40 +273,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, /* avoid allocating the first page */ start = max_t(phys_addr_t, start, PAGE_SIZE); end = max(start, end); - kernel_end = __pa_symbol(_end); - /* - * try bottom-up allocation only when bottom-up mode - * is set and @end is above the kernel image. - */ - if (memblock_bottom_up() && end > kernel_end) { - phys_addr_t bottom_up_start; - - /* make sure we will allocate above the kernel */ - bottom_up_start = max(start, kernel_end); - - /* ok, try bottom-up allocation first */ - ret = __memblock_find_range_bottom_up(bottom_up_start, end, - size, align, nid, flags); - if (ret) - return ret; - - /* - * we always limit bottom-up allocation above the kernel, - * but top-down allocation doesn't have the limit, so - * retrying top-down allocation may succeed when bottom-up - * allocation failed. - * - * bottom-up allocation is expected to be fail very rarely, - * so we use WARN_ONCE() here to see the stack trace if - * fail happens. - */ - WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), - "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); - } - - return __memblock_find_range_top_down(start, end, size, align, nid, - flags); + if (memblock_bottom_up()) + return __memblock_find_range_bottom_up(start, end, size, align, + nid, flags); + else + return __memblock_find_range_top_down(start, end, size, align, + nid, flags); } /** diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9db4f1ae0c9a..114d054b2bad 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -63,8 +63,9 @@ #include <net/sock.h> #include <net/ip.h> #include "slab.h" - +#include <linux/sli.h> #include <linux/uaccess.h> +#include <linux/sli.h> #include <trace/events/vmscan.h> @@ -74,6 +75,9 @@ EXPORT_SYMBOL(memory_cgrp_subsys); struct mem_cgroup *root_mem_cgroup __read_mostly; #define MEM_CGROUP_RECLAIM_RETRIES 5 +#define DEFAULT_PAGE_RECLAIM_RATIO 5 +#define PAGECACHE_MAX_RATIO_MIN 5 +#define PAGECACHE_MAX_RATIO_MAX 100 /* Socket memory accounting disabled? */ static bool cgroup_memory_nosocket; @@ -231,21 +235,6 @@ enum res_type { /* Used for OOM nofiier */ #define OOM_CONTROL (0) -/* - * Iteration constructs for visiting all cgroups (under a tree). If - * loops are exited prematurely (break), mem_cgroup_iter_break() must - * be used for reference counting. - */ -#define for_each_mem_cgroup_tree(iter, root) \ - for (iter = mem_cgroup_iter(root, NULL, NULL); \ - iter != NULL; \ - iter = mem_cgroup_iter(root, iter, NULL)) - -#define for_each_mem_cgroup(iter) \ - for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ - iter != NULL; \ - iter = mem_cgroup_iter(NULL, iter, NULL)) - static inline bool should_force_charge(void) { return tsk_is_oom_victim(current) || fatal_signal_pending(current) || @@ -709,6 +698,13 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) x = 0; } __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); + + if (idx == MEMCG_CACHE) { + if (val > 0) + page_counter_charge(&memcg->pagecache, val); + else + page_counter_uncharge(&memcg->pagecache, -val); + } } static struct mem_cgroup_per_node * @@ -776,8 +772,13 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) rcu_read_lock(); memcg = memcg_from_slab_page(page); - /* Untracked pages have no memcg, no lruvec. Update only the node */ - if (!memcg || memcg == root_mem_cgroup) { + /* + * Untracked pages have no memcg, no lruvec. Update only the + * node. If we reparent the slab objects to the root memcg, + * when we free the slab object, we need to update the per-memcg + * vmstats to keep it correct for the root memcg. + */ + if (!memcg) { __mod_node_page_state(pgdat, idx, val); } else { lruvec = mem_cgroup_lruvec(pgdat, memcg); @@ -1256,10 +1257,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd } memcg = page->mem_cgroup; - /* - * Swapcache readahead pages are added to the LRU - and - * possibly migrated - before they are charged. - */ + VM_WARN_ON_ONCE_PAGE(!memcg, page); if (!memcg) memcg = root_mem_cgroup; @@ -2108,6 +2106,12 @@ again: if (unlikely(!memcg)) return NULL; +#ifdef CONFIG_PROVE_LOCKING + local_irq_save(flags); + might_lock(&memcg->move_lock); + local_irq_restore(flags); +#endif + if (atomic_read(&memcg->moving_account) <= 0) return memcg; @@ -2441,6 +2445,9 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg, usage = page_counter_read(&memcg->memory); high = READ_ONCE(memcg->high); + if (usage <= high) + continue; + /* * Prevent division by 0 in overage calculation by acting as if * it was a threshold of 1 page @@ -2499,10 +2506,12 @@ void mem_cgroup_handle_over_high(void) unsigned long pflags; unsigned int nr_pages = current->memcg_nr_pages_over_high; struct mem_cgroup *memcg; + u64 start; if (likely(!nr_pages)) return; + sli_memlat_stat_start(&start); memcg = get_mem_cgroup_from_mm(current->mm); reclaim_high(memcg, nr_pages, GFP_KERNEL); current->memcg_nr_pages_over_high = 0; @@ -2532,6 +2541,7 @@ void mem_cgroup_handle_over_high(void) psi_memstall_leave(&pflags); out: + sli_memlat_stat_end(MEM_LAT_MEMCG_DIRECT_RECLAIM, start); css_put(&memcg->css); } @@ -2546,6 +2556,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool may_swap = true; bool drained = false; enum oom_status oom_status; + u64 start; if (mem_cgroup_is_root(memcg)) return 0; @@ -2605,9 +2616,11 @@ retry: memcg_memory_event(mem_over_limit, MEMCG_MAX); + sli_memlat_stat_start(&start); nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, gfp_mask, may_swap); + sli_memlat_stat_end(MEM_LAT_MEMCG_DIRECT_RECLAIM, start); if (mem_cgroup_margin(mem_over_limit) >= nr_pages) goto retry; @@ -2892,8 +2905,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, return; cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) + if (!cw) { + css_put(&memcg->css); return; + } cw->memcg = memcg; cw->cachep = cachep; @@ -3162,6 +3177,8 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, } #endif +static void pagecache_set_limit(struct mem_cgroup *memcg); + static DEFINE_MUTEX(memcg_max_mutex); static int mem_cgroup_resize_max(struct mem_cgroup *memcg, @@ -3212,8 +3229,11 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg, } } while (true); - if (!ret && enlarge) - memcg_oom_recover(memcg); + if (!ret) { + if (enlarge) + memcg_oom_recover(memcg); + pagecache_set_limit(memcg); + } return ret; } @@ -3398,6 +3418,215 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, return retval; } +#define MIN_PAGECACHE_PAGES 16 + +unsigned int vm_pagecache_limit_retry_times __read_mostly = MEM_CGROUP_RECLAIM_RETRIES; +void mem_cgroup_shrink_pagecache(struct mem_cgroup *memcg, gfp_t gfp_mask) +{ + unsigned long pages_used, pages_max, pages_reclaimed, goal_pages_used, pre_used; + unsigned int retry_times = 0; + unsigned int limit_retry_times; + u32 max_ratio; + + if (!memcg || mem_cgroup_is_root(memcg)) + return; + + max_ratio = READ_ONCE(memcg->pagecache_max_ratio); + if (max_ratio == PAGECACHE_MAX_RATIO_MAX) + return; + + pages_max = READ_ONCE(memcg->pagecache.max); + if (pages_max == PAGE_COUNTER_MAX || vm_pagecache_limit_global) + return; + + if (gfp_mask & __GFP_ATOMIC) + return; + + if (unlikely(should_force_charge())) + return; + + if (unlikely(current->flags & PF_MEMALLOC)) + return; + + if (unlikely(task_in_memcg_oom(current))) + return; + + if (!gfpflags_allow_blocking(gfp_mask)) + return; + + pages_used = page_counter_read(&memcg->pagecache); + limit_retry_times = READ_ONCE(vm_pagecache_limit_retry_times); + goal_pages_used = (100 - READ_ONCE(memcg->pagecache_reclaim_ratio)) * pages_max / 100; + goal_pages_used = max_t(unsigned long, MIN_PAGECACHE_PAGES, goal_pages_used); + + if (pages_used > pages_max) + memcg_memory_event(memcg, MEMCG_PAGECACHE_MAX); + + while (pages_used > goal_pages_used) { + if (fatal_signal_pending(current)) + break; + + pre_used = pages_used; + pages_reclaimed = shrink_page_cache_memcg(gfp_mask, memcg, pages_used - goal_pages_used); + + if (pages_reclaimed == 0) { + congestion_wait(BLK_RW_ASYNC, HZ/10); + retry_times++; + } else + retry_times = 0; + + if (retry_times > limit_retry_times) { + memcg_memory_event(memcg, MEMCG_PAGECACHE_OOM); + mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0); + break; + } + + pages_used = page_counter_read(&memcg->pagecache); + cond_resched(); + } +} + +static u64 pagecache_reclaim_ratio_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->pagecache_reclaim_ratio; +} + +static ssize_t pagecache_reclaim_ratio_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + u64 reclaim_ratio; + int ret; + unsigned long nr_pages; + + buf = strstrip(buf); + if (!buf) + return -EINVAL; + + ret = kstrtou64(buf, 0, &reclaim_ratio); + if (ret) + return ret; + + if ((reclaim_ratio > 0) && (reclaim_ratio < 100)) { + memcg->pagecache_reclaim_ratio = reclaim_ratio; + mem_cgroup_shrink_pagecache(memcg, GFP_KERNEL); + return nbytes; + } else if (reclaim_ratio == 100) { + nr_pages = page_counter_read(&memcg->pagecache); + + //try reclaim once + shrink_page_cache_memcg(GFP_KERNEL, memcg, nr_pages); + return nbytes; + } + + return -EINVAL; +} + +static u64 pagecache_current_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return (u64)page_counter_read(&memcg->pagecache) * PAGE_SIZE; +} + +static u64 memory_pagecache_max_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->pagecache_max_ratio; +} + +unsigned long mem_cgroup_pagecache_get_reclaim_pages(struct mem_cgroup *memcg) +{ + unsigned long goal_pages_used, pages_used, pages_max; + + if ((!memcg) || (mem_cgroup_is_root(memcg))) + return 0; + + pages_max = READ_ONCE(memcg->pagecache.max); + if (pages_max == PAGE_COUNTER_MAX) + return 0; + + goal_pages_used = (100 - READ_ONCE(memcg->pagecache_reclaim_ratio)) * pages_max / 100; + goal_pages_used = max_t(unsigned long, MIN_PAGECACHE_PAGES, goal_pages_used); + pages_used = page_counter_read(&memcg->pagecache); + + return pages_used > pages_max ? pages_used - goal_pages_used : 0; +} + +static void pagecache_set_limit(struct mem_cgroup *memcg) +{ + unsigned long max, pre, pages_max; + u32 max_ratio; + + pages_max = READ_ONCE(memcg->memory.max); + max_ratio = READ_ONCE(memcg->pagecache_max_ratio); + max = ((pages_max * max_ratio) / 100); + xchg(&memcg->pagecache.max, max); +} + +static ssize_t memory_pagecache_max_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; + unsigned long max, pages_reclaimed; + int err, ret = 0; + u64 max_ratio; + + if (!buf) + return -EINVAL; + + ret = kstrtou64(buf, 0, &max_ratio); + if (ret) + return ret; + + if (max_ratio > PAGECACHE_MAX_RATIO_MAX || + max_ratio < PAGECACHE_MAX_RATIO_MIN) + return -EINVAL; + + if (READ_ONCE(memcg->memory.max) == PAGE_COUNTER_MAX) { + printk(KERN_WARNING "pagecache limit not allowed for cgroup without memory limit set\n"); + return -EPERM; + } + + memcg->pagecache_max_ratio = max_ratio; + pagecache_set_limit(memcg); + max = READ_ONCE(memcg->pagecache.max); + + for (;;) { + unsigned long pages_used = page_counter_read(&memcg->pagecache); + + if (pages_used <= max) + break; + + if (fatal_signal_pending(current)) { + ret = -EINTR; + break; + } + + if (nr_reclaims) { + pages_reclaimed = shrink_page_cache_memcg(GFP_KERNEL, memcg, mem_cgroup_pagecache_get_reclaim_pages(memcg)); + if (pages_reclaimed == 0) { + congestion_wait(BLK_RW_ASYNC, HZ/10); + nr_reclaims--; + } + continue; + } + + memcg_memory_event(memcg, MEMCG_OOM); + if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) + return -EINVAL; + } + + return ret ? : nbytes; +} + static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) { unsigned long val; @@ -3843,10 +4072,12 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); seq_printf(m, "%s=%lu", stat->name, nr); + cond_resched(); for_each_node_state(nid, N_MEMORY) { nr = mem_cgroup_node_nr_lru_pages(memcg, nid, stat->lru_mask); seq_printf(m, " N%d=%lu", nid, nr); + cond_resched(); } seq_putc(m, '\n'); } @@ -3855,14 +4086,18 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) struct mem_cgroup *iter; nr = 0; - for_each_mem_cgroup_tree(iter, memcg) + for_each_mem_cgroup_tree(iter, memcg) { nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); + cond_resched(); + } seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); for_each_node_state(nid, N_MEMORY) { nr = 0; - for_each_mem_cgroup_tree(iter, memcg) + for_each_mem_cgroup_tree(iter, memcg) { nr += mem_cgroup_node_nr_lru_pages( iter, nid, stat->lru_mask); + cond_resched(); + } seq_printf(m, " N%d=%lu", nid, nr); } seq_putc(m, '\n'); @@ -4867,9 +5102,8 @@ static unsigned long memcg_nr_lru_pages(struct mem_cgroup *memcg, return nr; } -static int mem_cgroup_meminfo_read(struct seq_file *m, void *v) +static int mem_cgroup_meminfo_read_comm(struct seq_file *m, void *v, struct mem_cgroup *memcg) { - struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); u64 mem_limit, mem_usage; u64 mem_swap, mem_swap_usage; unsigned long mem_cache, mem_swap_cache; @@ -5053,15 +5287,26 @@ static int mem_cgroup_meminfo_read(struct seq_file *m, void *v) return 0; } +int mem_cgroupfs_meminfo_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_task(current); + return mem_cgroup_meminfo_read_comm(m, v, memcg); +} + +static int mem_cgroup_meminfo_read(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + return mem_cgroup_meminfo_read_comm(m, v, memcg); +} + #define NR_VM_WRITEBACK_STAT_ITEMS 2 extern const char * const vmstat_text[]; extern unsigned int vmstat_text_size; -static int mem_cgroup_vmstat_read(struct seq_file *m, void *vv) +static int mem_cgroup_vmstat_read_comm(struct seq_file *m, void *vv, struct mem_cgroup *memcg) { unsigned long *v1, *v; int i, stat_items_size; - struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); u64 mem_limit, mem_usage; mem_limit = memcg->memory.max; @@ -5122,7 +5367,54 @@ static int mem_cgroup_vmstat_read(struct seq_file *m, void *vv) return 0; } +static int mem_cgroup_sli_max_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_seq(m); + struct cgroup *cgrp; + cgrp = memcg->css.cgroup; + + return sli_memlat_max_show(m, cgrp); +} + +static int mem_cgroup_sli_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_seq(m); + struct cgroup *cgrp; + cgrp = memcg->css.cgroup; + + return sli_memlat_stat_show(m, cgrp); +} + +int mem_cgroupfs_vmstat_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_task(current); + return mem_cgroup_vmstat_read_comm(m, v, memcg); +} + +static int mem_cgroup_vmstat_read(struct seq_file *m, void *vv) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + return mem_cgroup_vmstat_read_comm(m, vv, memcg); +} + static struct cftype mem_cgroup_legacy_files[] = { + { + .name = "pagecache.reclaim_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = pagecache_reclaim_ratio_read, + .write = pagecache_reclaim_ratio_write, + }, + { + .name = "pagecache.max_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = memory_pagecache_max_read, + .write = memory_pagecache_max_write, + }, + { + .name = "pagecache.current", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = pagecache_current_read, + }, { .name = "usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), @@ -5263,6 +5555,16 @@ static struct cftype mem_cgroup_legacy_files[] = { .write = mem_cgroup_reset, .read_u64 = mem_cgroup_read_u64, }, + { + .name = "sli", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = mem_cgroup_sli_show, + }, + { + .name = "sli_max", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = mem_cgroup_sli_max_show, + }, { }, /* terminate */ }; @@ -5413,19 +5715,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void) unsigned int size; int node; int __maybe_unused i; + long error = -ENOMEM; size = sizeof(struct mem_cgroup); size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); memcg = kzalloc(size, GFP_KERNEL); if (!memcg) - return NULL; + return ERR_PTR(error); memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX, GFP_KERNEL); - if (memcg->id.id < 0) + if (memcg->id.id < 0) { + error = memcg->id.id; goto fail; + } memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); if (!memcg->vmstats_local) @@ -5470,7 +5775,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) fail: mem_cgroup_id_remove(memcg); __mem_cgroup_free(memcg); - return NULL; + return ERR_PTR(error); } static struct cgroup_subsys_state * __ref @@ -5481,11 +5786,13 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) long error = -ENOMEM; memcg = mem_cgroup_alloc(); - if (!memcg) - return ERR_PTR(error); + if (IS_ERR(memcg)) + return ERR_CAST(memcg); memcg->high = PAGE_COUNTER_MAX; memcg->soft_limit = PAGE_COUNTER_MAX; + memcg->pagecache_reclaim_ratio = DEFAULT_PAGE_RECLAIM_RATIO; + memcg->pagecache_max_ratio = PAGECACHE_MAX_RATIO_MAX; if (parent) { memcg->swappiness = mem_cgroup_swappiness(parent); memcg->oom_kill_disable = parent->oom_kill_disable; @@ -5497,12 +5804,14 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) page_counter_init(&memcg->memsw, &parent->memsw); page_counter_init(&memcg->kmem, &parent->kmem); page_counter_init(&memcg->tcpmem, &parent->tcpmem); + page_counter_init(&memcg->pagecache, &parent->pagecache); } else { page_counter_init(&memcg->memory, NULL); page_counter_init(&memcg->swap, NULL); page_counter_init(&memcg->memsw, NULL); page_counter_init(&memcg->kmem, NULL); page_counter_init(&memcg->tcpmem, NULL); + page_counter_init(&memcg->pagecache, NULL); /* * Deeper hierachy with use_hierarchy == false doesn't make * much sense so let cgroup subsystem know about this @@ -5532,7 +5841,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) fail: mem_cgroup_id_remove(memcg); mem_cgroup_free(memcg); - return ERR_PTR(-ENOMEM); + return ERR_PTR(error); } static int mem_cgroup_css_online(struct cgroup_subsys_state *css) @@ -5635,6 +5944,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); + page_counter_set_max(&memcg->pagecache, PAGE_COUNTER_MAX); page_counter_set_min(&memcg->memory, 0); page_counter_set_low(&memcg->memory, 0); memcg->high = PAGE_COUNTER_MAX; @@ -5705,7 +6015,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, struct page *page = NULL; swp_entry_t ent = pte_to_swp_entry(ptent); - if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) + if (!(mc.flags & MOVE_ANON)) return NULL; /* @@ -5724,6 +6034,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, return page; } + if (non_swap_entry(ent)) + return NULL; + /* * Because lookup_swap_cache() updates some statistics counter, * we call find_get_page() with swapper_space directly. @@ -5796,7 +6109,6 @@ static int mem_cgroup_move_account(struct page *page, { struct lruvec *from_vec, *to_vec; struct pglist_data *pgdat; - unsigned long flags; unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; int ret; bool anon; @@ -5823,18 +6135,13 @@ static int mem_cgroup_move_account(struct page *page, from_vec = mem_cgroup_lruvec(pgdat, from); to_vec = mem_cgroup_lruvec(pgdat, to); - spin_lock_irqsave(&from->move_lock, flags); + lock_page_memcg(page); if (!anon && page_mapped(page)) { __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); } - /* - * move_lock grabbed above and caller set from->moving_account, so - * mod_memcg_page_state will serialize updates to PageDirty. - * So mapping should be stable for dirty pages. - */ if (!anon && PageDirty(page)) { struct address_space *mapping = page_mapping(page); @@ -5850,15 +6157,23 @@ static int mem_cgroup_move_account(struct page *page, } /* + * All state has been migrated, let's switch to the new memcg. + * * It is safe to change page->mem_cgroup here because the page - * is referenced, charged, and isolated - we can't race with - * uncharging, charging, migration, or LRU putback. + * is referenced, charged, isolated, and locked: we can't race + * with (un)charging, migration, LRU putback, or anything else + * that would rely on a stable page->mem_cgroup. + * + * Note that lock_page_memcg is a memcg lock, not a page lock, + * to save space. As soon as we switch page->mem_cgroup to a + * new memcg that isn't locked, the above state can change + * concurrently again. Make sure we're truly done with it. */ + smp_mb(); - /* caller should have done css_get */ - page->mem_cgroup = to; + page->mem_cgroup = to; /* caller should have done css_get */ - spin_unlock_irqrestore(&from->move_lock, flags); + __unlock_page_memcg(from); ret = 0; @@ -6077,7 +6392,6 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - mem_cgroup_id_get_many(mc.to, mc.moved_swap); css_put_many(&mc.to->css, mc.moved_swap); mc.moved_swap = 0; @@ -6268,7 +6582,8 @@ put: /* get_mctgt_type() gets the page */ ent = target.ent; if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { mc.precharge--; - /* we fixup refcnts and charges later. */ + mem_cgroup_id_get_many(mc.to, 1); + /* we fixup other refcnts and charges later. */ mc.moved_swap++; } break; @@ -6514,6 +6829,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, break; } + pagecache_set_limit(memcg); + memcg_wb_domain_size_changed(memcg); return nbytes; } @@ -6526,6 +6843,8 @@ static void __memory_events_show(struct seq_file *m, atomic_long_t *events) seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); seq_printf(m, "oom_kill %lu\n", atomic_long_read(&events[MEMCG_OOM_KILL])); + seq_printf(m, "pagecache_max:%lu\n", atomic_long_read(&events[MEMCG_PAGECACHE_MAX])); + seq_printf(m, "pagecache_oom:%lu\n", atomic_long_read(&events[MEMCG_PAGECACHE_OOM])); } static int memory_events_show(struct seq_file *m, void *v) @@ -6589,6 +6908,23 @@ static ssize_t memory_oom_group_write(struct kernfs_open_file *of, } static struct cftype memory_files[] = { + { + .name = "pagecache.reclaim_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = pagecache_reclaim_ratio_read, + .write = pagecache_reclaim_ratio_write, + }, + { + .name = "pagecache.max_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = memory_pagecache_max_read, + .write = memory_pagecache_max_write, + }, + { + .name = "pagecache.current", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = pagecache_current_read, + }, { .name = "current", .flags = CFTYPE_NOT_ON_ROOT, @@ -7142,8 +7478,8 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) if (newpage->mem_cgroup) return; - /* Swapcache readahead pages can get replaced before being charged */ memcg = oldpage->mem_cgroup; + VM_WARN_ON_ONCE_PAGE(!memcg, oldpage); if (!memcg) return; @@ -7347,12 +7683,14 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); + if (mem_cgroup_disabled()) + return; if (!do_memsw_account()) return; memcg = page->mem_cgroup; - /* Readahead page, never charged */ + VM_WARN_ON_ONCE_PAGE(!memcg, page); if (!memcg) return; @@ -7413,12 +7751,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) struct mem_cgroup *memcg; unsigned short oldid; + if (mem_cgroup_disabled()) + return 0; if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) return 0; memcg = page->mem_cgroup; - /* Readahead page, never charged */ + VM_WARN_ON_ONCE_PAGE(!memcg, page); if (!memcg) return 0; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 3151c87dff73..63e188a7d40b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1215,7 +1215,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, * communicated in siginfo, see kill_proc() */ start = (page->index << PAGE_SHIFT) & ~(size - 1); - unmap_mapping_range(page->mapping, start, start + size, 0); + unmap_mapping_range(page->mapping, start, size, 0); } kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags); rc = 0; @@ -1482,7 +1482,7 @@ static void memory_failure_work_func(struct work_struct *work) unsigned long proc_flags; int gotten; - mf_cpu = this_cpu_ptr(&memory_failure_cpu); + mf_cpu = container_of(work, struct memory_failure_cpu, work); for (;;) { spin_lock_irqsave(&mf_cpu->lock, proc_flags); gotten = kfifo_get(&mf_cpu->fifo, &entry); @@ -1496,6 +1496,19 @@ static void memory_failure_work_func(struct work_struct *work) } } +/* + * Process memory_failure work queued on the specified CPU. + * Used to avoid return-to-userspace racing with the memory_failure workqueue. + */ +void memory_failure_queue_kick(int cpu) +{ + struct memory_failure_cpu *mf_cpu; + + mf_cpu = &per_cpu(memory_failure_cpu, cpu); + cancel_work_sync(&mf_cpu->work); + memory_failure_work_func(&mf_cpu->work); +} + static int __init memory_failure_init(void) { struct memory_failure_cpu *mf_cpu; diff --git a/mm/memory.c b/mm/memory.c index cb7c940cf800..465c6b6bbeb0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -79,6 +79,7 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> +#include <linux/sli.h> #include "internal.h" @@ -118,6 +119,18 @@ int randomize_va_space __read_mostly = 2; #endif +#ifndef arch_faults_on_old_pte +static inline bool arch_faults_on_old_pte(void) +{ + /* + * Those arches which don't have hw access flag feature need to + * implement their own helper. By default, "true" means pagefault + * will be hit on old pte. + */ + return true; +} +#endif + static int __init disable_randmaps(char *s) { randomize_va_space = 0; @@ -138,7 +151,7 @@ static int __init init_zero_pfn(void) zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0; } -core_initcall(init_zero_pfn); +early_initcall(init_zero_pfn); #if defined(SPLIT_RSS_COUNTING) @@ -1792,11 +1805,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { - pte_t *pte; + pte_t *pte, *mapped_pte; spinlock_t *ptl; int err = 0; - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); @@ -1810,7 +1823,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(pte - 1, ptl); + pte_unmap_unlock(mapped_pte, ptl); return err; } @@ -2145,32 +2158,101 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, return same; } -static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) +static inline bool cow_user_page(struct page *dst, struct page *src, + struct vm_fault *vmf) { + bool ret; + void *kaddr; + void __user *uaddr; + bool locked = false; + struct vm_area_struct *vma = vmf->vma; + struct mm_struct *mm = vma->vm_mm; + unsigned long addr = vmf->address; + debug_dma_assert_idle(src); + if (likely(src)) { + copy_user_highpage(dst, src, addr, vma); + return true; + } + /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ - if (unlikely(!src)) { - void *kaddr = kmap_atomic(dst); - void __user *uaddr = (void __user *)(va & PAGE_MASK); + kaddr = kmap_atomic(dst); + uaddr = (void __user *)(addr & PAGE_MASK); + + /* + * On architectures with software "accessed" bits, we would + * take a double page fault, so mark it accessed here. + */ + if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { + pte_t entry; + + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* + * Other thread has already handled the fault + * and we don't need to do anything. If it's + * not the case, the fault will be triggered + * again on the same address. + */ + ret = false; + goto pte_unlock; + } + + entry = pte_mkyoung(vmf->orig_pte); + if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) + update_mmu_cache(vma, addr, vmf->pte); + } + + /* + * This really shouldn't fail, because the page is there + * in the page tables. But it might just be unreadable, + * in which case we just give up and fill the result with + * zeroes. + */ + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + if (locked) + goto warn; + + /* Re-validate under PTL if the page is still mapped */ + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* The PTE changed under us. Retry page fault. */ + ret = false; + goto pte_unlock; + } /* - * This really shouldn't fail, because the page is there - * in the page tables. But it might just be unreadable, - * in which case we just give up and fill the result with - * zeroes. + * The same page can be mapped back since last copy attampt. + * Try to copy again under PTL. */ - if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + /* + * Give a warn in case there can be some obscure + * use-case + */ +warn: + WARN_ON_ONCE(1); clear_page(kaddr); - kunmap_atomic(kaddr); - flush_dcache_page(dst); - } else - copy_user_highpage(dst, src, va, vma); + } + } + + ret = true; + +pte_unlock: + if (locked) + pte_unmap_unlock(vmf->pte, vmf->ptl); + kunmap_atomic(kaddr); + flush_dcache_page(dst); + + return ret; } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) @@ -2342,7 +2424,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) vmf->address); if (!new_page) goto oom; - cow_user_page(new_page, old_page, vmf->address, vma); + + if (!cow_user_page(new_page, old_page, vmf)) { + /* + * COW failed, if the fault was solved by other, + * it's fine. If not, userspace would re-fault on + * the same address and we will handle the fault + * from the second attempt. + */ + put_page(new_page); + if (old_page) + put_page(old_page); + return 0; + } } if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) @@ -3865,8 +3959,15 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) return do_fault(vmf); } - if (!pte_present(vmf->orig_pte)) - return do_swap_page(vmf); + if (!pte_present(vmf->orig_pte)) { + vm_fault_t retval; + u64 start; + + sli_memlat_stat_start(&start); + retval = do_swap_page(vmf); + sli_memlat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); + return retval; + } if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); @@ -4129,9 +4230,9 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) } #endif /* __PAGETABLE_PMD_FOLDED */ -static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, - pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) +int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, pte_t **ptepp, + pmd_t **pmdpp, spinlock_t **ptlp) { pgd_t *pgd; p4d_t *p4d; @@ -4196,31 +4297,33 @@ out: return -EINVAL; } -static inline int follow_pte(struct mm_struct *mm, unsigned long address, - pte_t **ptepp, spinlock_t **ptlp) +/** + * follow_pte - look up PTE at a user virtual address + * @mm: the mm_struct of the target address space + * @address: user virtual address + * @ptepp: location to store found PTE + * @ptlp: location to store the lock for the PTE + * + * On a successful return, the pointer to the PTE is stored in @ptepp; + * the corresponding lock is taken and its location is stored in @ptlp. + * The contents of the PTE are only stable until @ptlp is released; + * any further use, if any, must be protected against invalidation + * with MMU notifiers. + * + * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore + * should be taken for read. + * + * KVM uses this function. While it is arguably less bad than ``follow_pfn``, + * it is not a good general-purpose API. + * + * Return: zero on success, -ve otherwise. + */ +int follow_pte(struct mm_struct *mm, unsigned long address, + pte_t **ptepp, spinlock_t **ptlp) { - int res; - - /* (void) is needed to make gcc happy */ - (void) __cond_lock(*ptlp, - !(res = __follow_pte_pmd(mm, address, NULL, - ptepp, NULL, ptlp))); - return res; + return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp); } - -int follow_pte_pmd(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, - pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) -{ - int res; - - /* (void) is needed to make gcc happy */ - (void) __cond_lock(*ptlp, - !(res = __follow_pte_pmd(mm, address, range, - ptepp, pmdpp, ptlp))); - return res; -} -EXPORT_SYMBOL(follow_pte_pmd); +EXPORT_SYMBOL_GPL(follow_pte); /** * follow_pfn - look up PFN at a user virtual address @@ -4230,6 +4333,9 @@ EXPORT_SYMBOL(follow_pte_pmd); * * Only IO mappings and raw PFN mappings are allowed. * + * This function does not allow the caller to read the permissions + * of the PTE. Do not use it. + * * Return: zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, @@ -4620,17 +4726,19 @@ long copy_huge_page_from_user(struct page *dst_page, void *page_kaddr; unsigned long i, rc = 0; unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; + struct page *subpage = dst_page; - for (i = 0; i < pages_per_huge_page; i++) { + for (i = 0; i < pages_per_huge_page; + i++, subpage = mem_map_next(subpage, dst_page, i)) { if (allow_pagefault) - page_kaddr = kmap(dst_page + i); + page_kaddr = kmap(subpage); else - page_kaddr = kmap_atomic(dst_page + i); + page_kaddr = kmap_atomic(subpage); rc = copy_from_user(page_kaddr, (const void __user *)(src + i * PAGE_SIZE), PAGE_SIZE); if (allow_pagefault) - kunmap(dst_page + i); + kunmap(subpage); else kunmap_atomic(page_kaddr); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c054945a9a74..308beca3ffeb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -725,7 +725,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, * are reserved so nobody should be touching them so we should be safe */ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, - MEMMAP_HOTPLUG, altmap); + MEMINIT_HOTPLUG, altmap); set_zone_contiguous(zone); } @@ -1082,7 +1082,8 @@ int __ref add_memory_resource(int nid, struct resource *res) } /* link memory sections under this node.*/ - ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); + ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), + MEMINIT_HOTPLUG); BUG_ON(ret); /* create new memmap entry */ @@ -1566,6 +1567,20 @@ static int __ref __offline_pages(unsigned long start_pfn, /* check again */ ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, check_pages_isolated_cb); + /* + * per-cpu pages are drained in start_isolate_page_range, but if + * there are still pages that are not free, make sure that we + * drain again, because when we isolated range we might + * have raced with another thread that was adding pages to pcp + * list. + * + * Forward progress should be still guaranteed because + * pages on the pcp list can only belong to MOVABLE_ZONE + * because has_unmovable_pages explicitly checks for + * PageBuddy on freed pages on other zones. + */ + if (ret) + drain_all_pages(zone); } while (ret); /* Ok, all of our target is isolated. @@ -1751,7 +1766,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) */ rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb); if (rc) - goto done; + return rc; /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); @@ -1771,9 +1786,8 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) try_offline_node(nid); -done: mem_hotplug_done(); - return rc; + return 0; } /** diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 787c5fc91b21..87d165923fee 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -496,7 +496,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long flags = qp->flags; int ret; bool has_unmovable = false; - pte_t *pte; + pte_t *pte, *mapped_pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); @@ -510,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, if (pmd_trans_unstable(pmd)) return 0; - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; @@ -542,7 +542,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, } else break; } - pte_unmap_unlock(pte - 1, ptl); + pte_unmap_unlock(mapped_pte, ptl); cond_resched(); if (has_unmovable) diff --git a/mm/mlock.c b/mm/mlock.c index a72c1eeded77..7476a50600aa 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -102,26 +102,6 @@ void mlock_vma_page(struct page *page) } } -/* - * Isolate a page from LRU with optional get_page() pin. - * Assumes lru_lock already held and page already pinned. - */ -static bool __munlock_isolate_lru_page(struct page *page, bool getpage) -{ - if (PageLRU(page)) { - struct lruvec *lruvec; - - lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); - if (getpage) - get_page(page); - ClearPageLRU(page); - del_page_from_lru_list(page, lruvec, page_lru(page)); - return true; - } - - return false; -} - /* * Finish munlock after successful page isolation * @@ -182,40 +162,25 @@ static void __munlock_isolation_failed(struct page *page) unsigned int munlock_vma_page(struct page *page) { int nr_pages; - pg_data_t *pgdat = page_pgdat(page); /* For try_to_munlock() and to serialize with page migration */ BUG_ON(!PageLocked(page)); VM_BUG_ON_PAGE(PageTail(page), page); - /* - * Serialize with any parallel __split_huge_page_refcount() which - * might otherwise copy PageMlocked to part of the tail pages before - * we clear it in the head page. It also stabilizes hpage_nr_pages(). - */ - spin_lock_irq(&pgdat->lru_lock); - if (!TestClearPageMlocked(page)) { /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ - nr_pages = 1; - goto unlock_out; + return 0; } nr_pages = hpage_nr_pages(page); - __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); + mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); - if (__munlock_isolate_lru_page(page, true)) { - spin_unlock_irq(&pgdat->lru_lock); + if (!isolate_lru_page(page)) __munlock_isolated_page(page); - goto out; - } - __munlock_isolation_failed(page); + else + __munlock_isolation_failed(page); -unlock_out: - spin_unlock_irq(&pgdat->lru_lock); - -out: return nr_pages - 1; } @@ -307,9 +272,16 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) * We already have pin from follow_page_mask() * so we can spare the get_page() here. */ - if (__munlock_isolate_lru_page(page, false)) + if (PageLRU(page)) { + struct lruvec *lruvec; + + ClearPageLRU(page); + lruvec = mem_cgroup_page_lruvec(page, + page_pgdat(page)); + del_page_from_lru_list(page, lruvec, + page_lru(page)); continue; - else + } else __munlock_isolation_failed(page); } else { delta_munlocked++; diff --git a/mm/mmap.c b/mm/mmap.c index e8d3504be68b..44a6ab34cd73 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2132,6 +2132,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; + info.align_offset = 0; return vm_unmapped_area(&info); } #endif @@ -2173,6 +2174,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; + info.align_offset = 0; addr = vm_unmapped_area(&info); /* @@ -2628,7 +2630,7 @@ static void unmap_region(struct mm_struct *mm, * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ -static void +static bool detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { @@ -2653,6 +2655,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, /* Kill the cache */ vmacache_invalidate(mm); + + /* + * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or + * VM_GROWSUP VMA. Such VMAs can change their size under + * down_read(mmap_lock) and collide with the VMA we are about to unmap. + */ + if (vma && (vma->vm_flags & VM_GROWSDOWN)) + return false; + if (prev && (prev->vm_flags & VM_GROWSUP)) + return false; + return true; } /* @@ -2846,7 +2859,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, } /* Detach vmas from rbtree */ - detach_vmas_to_be_unmapped(mm, vma, prev, end); + if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) + downgrade = false; if (downgrade) downgrade_write(&mm->mmap_sem); @@ -3185,6 +3199,7 @@ void exit_mmap(struct mm_struct *mm) if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); + cond_resched(); } vm_unacct_memory(nr_accounted); } diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 3e612ae748e9..a1da47e02747 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -25,13 +25,16 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); + /* Hold off tlb flush IPIs while switching mm's */ + local_irq_disable(); active_mm = tsk->active_mm; if (active_mm != mm) { mmgrab(mm); tsk->active_mm = mm; } tsk->mm = mm; - switch_mm(active_mm, mm, tsk); + switch_mm_irqs_off(active_mm, mm, tsk); + local_irq_enable(); task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); @@ -56,9 +59,11 @@ void unuse_mm(struct mm_struct *mm) task_lock(tsk); sync_mm_rss(mm); + local_irq_disable(); tsk->mm = NULL; /* active_mm is still 'mm' */ enter_lazy_tlb(mm, tsk); + local_irq_enable(); task_unlock(tsk); } EXPORT_SYMBOL_GPL(unuse_mm); diff --git a/mm/mprotect.c b/mm/mprotect.c index 95dee88f782b..410e2c27a491 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -584,9 +584,16 @@ static int do_mprotect_pkey(unsigned long start, size_t len, tmp = vma->vm_end; if (tmp > end) tmp = end; + + if (vma->vm_ops && vma->vm_ops->mprotect) + error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); + if (error) + goto out; + error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; + nstart = tmp; if (nstart < prev->vm_end) diff --git a/mm/mremap.c b/mm/mremap.c index be399486dc81..926e24d70bf6 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -266,7 +266,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; - if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) { + if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE) { bool moved; /* See comment in move_ptes() */ @@ -616,6 +616,16 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, LIST_HEAD(uf_unmap_early); LIST_HEAD(uf_unmap); + /* + * There is a deliberate asymmetry here: we strip the pointer tag + * from the old address but leave the new address alone. This is + * for consistency with mmap(), where we prevent the creation of + * aliasing mappings in userspace by leaving the tag bits of the + * mapping address intact. A non-zero tag will cause the subsequent + * range checks to reject the address as invalid. + * + * See Documentation/arm64/tagged-address-abi.rst for more information. + */ addr = untagged_addr(addr); if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) diff --git a/mm/nommu.c b/mm/nommu.c index 8800fc689061..2308e8a24b78 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -155,11 +155,11 @@ void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags) return __vmalloc(size, flags, PAGE_KERNEL); } -void *vmalloc_user(unsigned long size) +static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) { void *ret; - ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + ret = __vmalloc(size, flags, PAGE_KERNEL); if (ret) { struct vm_area_struct *vma; @@ -172,8 +172,19 @@ void *vmalloc_user(unsigned long size) return ret; } + +void *vmalloc_user(unsigned long size) +{ + return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO); +} EXPORT_SYMBOL(vmalloc_user); +void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags) +{ + return __vmalloc_user_flags(size, flags | __GFP_ZERO); +} +EXPORT_SYMBOL(vmalloc_user_node_flags); + struct page *vmalloc_to_page(const void *addr) { return virt_to_page(addr); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d58c481b3df8..212e71874301 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -63,6 +63,8 @@ int sysctl_oom_dump_tasks = 1; * and mark_oom_victim */ DEFINE_MUTEX(oom_lock); +/* Serializes oom_score_adj and oom_score_adj_min updates */ +DEFINE_MUTEX(oom_adj_mutex); static inline bool is_memcg_oom(struct oom_control *oc) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 266a3fb73b70..f0ec43034538 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -68,6 +68,8 @@ #include <linux/lockdep.h> #include <linux/nmi.h> #include <linux/psi.h> +#include <linux/khugepaged.h> +#include <linux/sli.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -1256,6 +1258,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, struct page *page, *tmp; LIST_HEAD(head); + /* + * Ensure proper count is passed which otherwise would stuck in the + * below while (list_empty(list)) loop. + */ + count = min(pcp->count, count); while (count) { struct list_head *list; @@ -1555,6 +1562,7 @@ void set_zone_contiguous(struct zone *zone) if (!__pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone)) return; + cond_resched(); } /* We confirm that there is no hole */ @@ -1639,7 +1647,6 @@ static void __init deferred_free_pages(unsigned long pfn, } else if (!(pfn & nr_pgmask)) { deferred_free_range(pfn - nr_free, nr_free); nr_free = 1; - touch_nmi_watchdog(); } else { nr_free++; } @@ -1669,7 +1676,6 @@ static unsigned long __init deferred_init_pages(struct zone *zone, continue; } else if (!page || !(pfn & nr_pgmask)) { page = pfn_to_page(pfn); - touch_nmi_watchdog(); } else { page++; } @@ -1792,6 +1798,13 @@ static int __init deferred_init_memmap(void *data) BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); pgdat->first_deferred_pfn = ULONG_MAX; + /* + * Once we unlock here, the zone cannot be grown anymore, thus if an + * interrupt thread must allocate this early in boot, zone must be + * pre-grown prior to start of deferred page initialization. + */ + pgdat_resize_unlock(pgdat, &flags); + /* Only the highest zone is deferred so find it */ for (zid = 0; zid < MAX_NR_ZONES; zid++) { zone = pgdat->node_zones + zid; @@ -1809,11 +1822,11 @@ static int __init deferred_init_memmap(void *data) * that we can avoid introducing any issues with the buddy * allocator. */ - while (spfn < epfn) + while (spfn < epfn) { nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); + cond_resched(); + } zone_empty: - pgdat_resize_unlock(pgdat, &flags); - /* Sanity check that the next zone really is unpopulated */ WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); @@ -1855,17 +1868,6 @@ deferred_grow_zone(struct zone *zone, unsigned int order) pgdat_resize_lock(pgdat, &flags); - /* - * If deferred pages have been initialized while we were waiting for - * the lock, return true, as the zone was grown. The caller will retry - * this zone. We won't return to this function since the caller also - * has this static branch. - */ - if (!static_branch_unlikely(&deferred_pages)) { - pgdat_resize_unlock(pgdat, &flags); - return true; - } - /* * If someone grew this zone while we were waiting for spinlock, return * true, as there might be enough pages already. @@ -1894,6 +1896,7 @@ deferred_grow_zone(struct zone *zone, unsigned int order) first_deferred_pfn = spfn; nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); + touch_nmi_watchdog(); /* We should only stop along section boundaries */ if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) @@ -2344,12 +2347,20 @@ static bool can_steal_fallback(unsigned int order, int start_mt) return false; } -static inline void boost_watermark(struct zone *zone) +static inline bool boost_watermark(struct zone *zone) { unsigned long max_boost; if (!watermark_boost_factor) - return; + return false; + /* + * Don't bother in zones that are unlikely to produce results. + * On small machines, including kdump capture kernels running + * in a small area, boosting the watermark can cause an out of + * memory situation immediately. + */ + if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) + return false; max_boost = mult_frac(zone->_watermark[WMARK_HIGH], watermark_boost_factor, 10000); @@ -2363,12 +2374,14 @@ static inline void boost_watermark(struct zone *zone) * boosted watermark resulting in a hang. */ if (!max_boost) - return; + return false; max_boost = max(pageblock_nr_pages, max_boost); zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, max_boost); + + return true; } /* @@ -2407,8 +2420,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, * likelihood of future fallbacks. Wake kswapd now as the node * may be balanced overall and kswapd will not wake naturally. */ - boost_watermark(zone); - if (alloc_flags & ALLOC_KSWAPD) + if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); /* We are not allowed to try stealing from the whole block */ @@ -3120,7 +3132,7 @@ void split_page(struct page *page, unsigned int order) for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); - split_page_owner(page, order); + split_page_owner(page, 1 << order); } EXPORT_SYMBOL_GPL(split_page); @@ -3375,7 +3387,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) #endif /* CONFIG_FAIL_PAGE_ALLOC */ -static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); } @@ -3474,7 +3486,8 @@ bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, } static inline bool zone_watermark_fast(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, unsigned int alloc_flags) + unsigned long mark, int classzone_idx, + unsigned int alloc_flags, gfp_t gfp_mask) { long free_pages = zone_page_state(z, NR_FREE_PAGES); long cma_pages = 0; @@ -3495,8 +3508,23 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) return true; - return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, - free_pages); + if (__zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, + free_pages)) + return true; + /* + * Ignore watermark boosting for GFP_ATOMIC order-0 allocations + * when checking the min watermark. The min watermark is the + * point where boosting is ignored so that kswapd is woken up + * when below the low watermark. + */ + if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost + && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { + mark = z->_watermark[WMARK_MIN]; + return __zone_watermark_ok(z, order, mark, classzone_idx, + alloc_flags, free_pages); + } + + return false; } bool zone_watermark_ok_safe(struct zone *z, unsigned int order, @@ -3637,7 +3665,8 @@ retry: mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); if (!zone_watermark_fast(zone, order, mark, - ac_classzone_idx(ac), alloc_flags)) { + ac_classzone_idx(ac), alloc_flags, + gfp_mask)) { int ret; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -3880,17 +3909,19 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; unsigned long pflags; unsigned int noreclaim_flag; + u64 start; if (!order) return NULL; psi_memstall_enter(&pflags); + sli_memlat_stat_start(&start); noreclaim_flag = memalloc_noreclaim_save(); - *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, prio, &page); memalloc_noreclaim_restore(noreclaim_flag); + sli_memlat_stat_end(MEM_LAT_DIRECT_COMPACT, start); psi_memstall_leave(&pflags); /* @@ -4101,12 +4132,14 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, int progress; unsigned int noreclaim_flag; unsigned long pflags; + u64 start; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); psi_memstall_enter(&pflags); + sli_memlat_stat_start(&start); fs_reclaim_acquire(gfp_mask); noreclaim_flag = memalloc_noreclaim_save(); @@ -4115,6 +4148,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(gfp_mask); + sli_memlat_stat_end(MEM_LAT_GLOBAL_DIRECT_RECLAIM, start); psi_memstall_leave(&pflags); cond_resched(); @@ -4716,6 +4750,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; + unsigned long long alloc_entry_time; gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ struct alloc_context ac = { }; @@ -4733,6 +4768,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) return NULL; + sli_memlat_stat_start(&alloc_entry_time); finalise_ac(gfp_mask, &ac); /* @@ -4773,6 +4809,7 @@ out: trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); + sli_memlat_stat_end(MEM_LAT_PAGE_ALLOC, alloc_entry_time); return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -4900,6 +4937,11 @@ refill: if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) goto refill; + if (unlikely(nc->pfmemalloc)) { + free_the_page(page, compound_order(page)); + goto refill; + } + #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) /* if size can vary use size else just use PAGE_SIZE */ size = nc->size; @@ -5866,7 +5908,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn) * done. Non-atomic initialization, single-pass. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn, enum memmap_context context, + unsigned long start_pfn, enum meminit_context context, struct vmem_altmap *altmap) { unsigned long pfn, end_pfn = start_pfn + size; @@ -5898,7 +5940,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * There can be holes in boot-time mem_map[]s handed to this * function. They do not exist on hotplugged memory. */ - if (context == MEMMAP_EARLY) { + if (context == MEMINIT_EARLY) { if (!early_pfn_valid(pfn)) continue; if (!early_pfn_in_nid(pfn, nid)) @@ -5911,7 +5953,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, page = pfn_to_page(pfn); __init_single_page(page, pfn, zone, nid); - if (context == MEMMAP_HOTPLUG) + if (context == MEMINIT_HOTPLUG) __SetPageReserved(page); /* @@ -5993,7 +6035,7 @@ void __ref memmap_init_zone_device(struct zone *zone, * check here not to call set_pageblock_migratetype() against * pfn out of zone. * - * Please note that MEMMAP_HOTPLUG path doesn't clear memmap + * Please note that MEMINIT_HOTPLUG path doesn't clear memmap * because this is done early in section_activate() */ if (!(pfn & (pageblock_nr_pages - 1))) { @@ -6019,7 +6061,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) void __meminit __weak memmap_init(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { - memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL); + memmap_init_zone(size, nid, zone, start_pfn, MEMINIT_EARLY, NULL); } static int zone_batchsize(struct zone *zone) @@ -7861,9 +7903,11 @@ int __meminit init_per_zone_wmark_min(void) setup_min_slab_ratio(); #endif + khugepaged_min_free_kbytes_update(); + return 0; } -core_initcall(init_per_zone_wmark_min) +postcore_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so diff --git a/mm/page_counter.c b/mm/page_counter.c index de31470655f6..147ff99187b8 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) long new; new = atomic_long_add_return(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is indeed racy, but we can live with some * inaccuracy in the watermark. @@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter, new = atomic_long_add_return(nr_pages, &c->usage); if (new > c->max) { atomic_long_sub(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is racy, but we can live with some * inaccuracy in the failcnt. @@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter, *fail = c; goto failed; } - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * Just like with failcnt, we can live with some * inaccuracy in the watermark. diff --git a/mm/page_idle.c b/mm/page_idle.c index 295512465065..51acae4d3f3f 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -31,7 +31,6 @@ static struct page *page_idle_get_page(unsigned long pfn) { struct page *page; - pg_data_t *pgdat; if (!pfn_valid(pfn)) return NULL; @@ -41,13 +40,11 @@ static struct page *page_idle_get_page(unsigned long pfn) !get_page_unless_zero(page)) return NULL; - pgdat = page_pgdat(page); - spin_lock_irq(&pgdat->lru_lock); if (unlikely(!PageLRU(page))) { put_page(page); page = NULL; } - spin_unlock_irq(&pgdat->lru_lock); + return page; } diff --git a/mm/page_io.c b/mm/page_io.c index 60a66a58b9bf..bcf27d057253 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -260,11 +260,6 @@ out: return ret; } -static sector_t swap_page_sector(struct page *page) -{ - return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9); -} - static inline void count_swpout_vm_event(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 89c19c0feadb..da0f6e1ae01e 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -187,6 +187,14 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * pageblocks we may have modified and return -EBUSY to caller. This * prevents two threads from simultaneously working on overlapping ranges. * + * Please note that there is no strong synchronization with the page allocator + * either. Pages might be freed while their page blocks are marked ISOLATED. + * In some cases pages might still end up on pcp lists and that would allow + * for their allocation even when they are in fact isolated already. Depending + * on how strong of a guarantee the caller needs drain_all_pages might be needed + * (e.g. __offline_pages will need to call it after check for isolated range for + * a next retry). + * * Return: the number of isolated pageblocks on success and -EBUSY if any part * of range cannot be isolated. */ diff --git a/mm/page_owner.c b/mm/page_owner.c index 18ecde9f45b2..83d08943bcde 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) page_owner->last_migrate_reason = reason; } -void __split_page_owner(struct page *page, unsigned int order) +void __split_page_owner(struct page *page, unsigned int nr) { int i; struct page_ext *page_ext = lookup_page_ext(page); @@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order) if (unlikely(!page_ext)) return; - for (i = 0; i < (1 << order); i++) { + for (i = 0; i < nr; i++) { page_owner = get_page_owner(page_ext); page_owner->order = 0; page_ext = page_ext_next(page_ext); diff --git a/mm/pagewalk.c b/mm/pagewalk.c index d48c2a986ea3..4eb09e089881 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -16,9 +16,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; - addr += PAGE_SIZE; - if (addr == end) + if (addr >= end - PAGE_SIZE) break; + addr += PAGE_SIZE; pte++; } diff --git a/mm/percpu.c b/mm/percpu.c index 7e06a1e58720..806bc16f88eb 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1328,7 +1328,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, /* allocate chunk */ alloc_size = sizeof(struct pcpu_chunk) + - BITS_TO_LONGS(region_size >> PAGE_SHIFT); + BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long); chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!chunk) panic("%s: Failed to allocate %zu bytes\n", __func__, diff --git a/mm/shmem.c b/mm/shmem.c index 3de91bd95b56..9699ecd1a3f9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -38,6 +38,7 @@ #include <linux/hugetlb.h> #include <linux/frontswap.h> #include <linux/fs_parser.h> +#include <linux/sli.h> #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ @@ -1649,6 +1650,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, struct page *page; swp_entry_t swap; int error; + u64 start; VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); swap = radix_to_swp_entry(*pagep); @@ -1664,7 +1666,9 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, count_memcg_event_mm(charge_mm, PGMAJFAULT); } /* Here we actually start the io */ + sli_memlat_stat_start(&start); page = shmem_swapin(swap, gfp, info, index); + sli_memlat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); if (!page) { error = -ENOMEM; goto failed; @@ -2197,7 +2201,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) struct shmem_inode_info *info = SHMEM_I(inode); int retval = -ENOMEM; - spin_lock_irq(&info->lock); + /* + * What serializes the accesses to info->flags? + * ipc_lock_object() when called from shmctl_do_lock(), + * no serialization needed when called from shm_destroy(). + */ if (lock && !(info->flags & VM_LOCKED)) { if (!user_shm_lock(inode->i_size, user)) goto out_nomem; @@ -2212,7 +2220,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) retval = 0; out_nomem: - spin_unlock_irq(&info->lock); return retval; } @@ -2417,11 +2424,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, lru_cache_add_anon(page); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); info->alloced++; inode->i_blocks += BLOCKS_PER_PAGE; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); inc_mm_counter(dst_mm, mm_counter_file(page)); page_add_file_rmap(page, false); diff --git a/mm/shuffle.c b/mm/shuffle.c index b3fe97fd6654..56958ffa5a3a 100644 --- a/mm/shuffle.c +++ b/mm/shuffle.c @@ -58,25 +58,25 @@ module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400); * For two pages to be swapped in the shuffle, they must be free (on a * 'free_area' lru), have the same order, and have the same migratetype. */ -static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order) +static struct page * __meminit shuffle_valid_page(struct zone *zone, + unsigned long pfn, int order) { - struct page *page; + struct page *page = pfn_to_online_page(pfn); /* * Given we're dealing with randomly selected pfns in a zone we * need to ask questions like... */ - /* ...is the pfn even in the memmap? */ - if (!pfn_valid_within(pfn)) + /* ... is the page managed by the buddy? */ + if (!page) return NULL; - /* ...is the pfn in a present section or a hole? */ - if (!pfn_present(pfn)) + /* ... is the page assigned to the same zone? */ + if (page_zone(page) != zone) return NULL; /* ...is the page free and currently on a free_area list? */ - page = pfn_to_page(pfn); if (!PageBuddy(page)) return NULL; @@ -123,7 +123,7 @@ void __meminit __shuffle_zone(struct zone *z) * page_j randomly selected in the span @zone_start_pfn to * @spanned_pages. */ - page_i = shuffle_valid_page(i, order); + page_i = shuffle_valid_page(z, i, order); if (!page_i) continue; @@ -137,7 +137,7 @@ void __meminit __shuffle_zone(struct zone *z) j = z->zone_start_pfn + ALIGN_DOWN(get_random_long() % z->spanned_pages, order_pages); - page_j = shuffle_valid_page(j, order); + page_j = shuffle_valid_page(z, j, order); if (page_j && page_j != page_i) break; } diff --git a/mm/slab_common.c b/mm/slab_common.c index ade6c257d4b4..e36dd36c7076 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -326,6 +326,14 @@ int slab_unmergeable(struct kmem_cache *s) if (s->refcount < 0) return 1; +#ifdef CONFIG_MEMCG_KMEM + /* + * Skip the dying kmem_cache. + */ + if (s->memcg_params.dying) + return 1; +#endif + return 0; } @@ -886,12 +894,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void memcg_set_kmem_cache_dying(struct kmem_cache *s) { spin_lock_irq(&memcg_kmem_wq_lock); s->memcg_params.dying = true; spin_unlock_irq(&memcg_kmem_wq_lock); +} +static void flush_memcg_workqueue(struct kmem_cache *s) +{ /* * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make * sure all registered rcu callbacks have been invoked. @@ -923,10 +934,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) { return 0; } - -static inline void flush_memcg_workqueue(struct kmem_cache *s) -{ -} #endif /* CONFIG_MEMCG_KMEM */ void slab_kmem_cache_release(struct kmem_cache *s) @@ -944,8 +951,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - flush_memcg_workqueue(s); - get_online_cpus(); get_online_mems(); @@ -955,6 +960,22 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; +#ifdef CONFIG_MEMCG_KMEM + memcg_set_kmem_cache_dying(s); + + mutex_unlock(&slab_mutex); + + put_online_mems(); + put_online_cpus(); + + flush_memcg_workqueue(s); + + get_online_cpus(); + get_online_mems(); + + mutex_lock(&slab_mutex); +#endif + err = shutdown_memcg_caches(s); if (!err) err = shutdown_cache(s); @@ -1740,7 +1761,7 @@ void kzfree(const void *p) if (unlikely(ZERO_OR_NULL_PTR(mem))) return; ks = ksize(mem); - memset(mem, 0, ks); + memzero_explicit(mem, ks); kfree(mem); } EXPORT_SYMBOL(kzfree); diff --git a/mm/slub.c b/mm/slub.c index af44807d5b05..52ded855b4ed 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -533,15 +533,32 @@ static void print_section(char *level, char *text, u8 *addr, metadata_access_disable(); } +/* + * See comment in calculate_sizes(). + */ +static inline bool freeptr_outside_object(struct kmem_cache *s) +{ + return s->offset >= s->inuse; +} + +/* + * Return offset of the end of info block which is inuse + free pointer if + * not overlapping with object. + */ +static inline unsigned int get_info_end(struct kmem_cache *s) +{ + if (freeptr_outside_object(s)) + return s->inuse + sizeof(void *); + else + return s->inuse; +} + static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { struct track *p; - if (s->offset) - p = object + s->offset + sizeof(void *); - else - p = object + s->inuse; + p = object + get_info_end(s); return p + alloc; } @@ -644,6 +661,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) va_end(args); } +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void **freelist, void *nextfree) +{ + if ((s->flags & SLAB_CONSISTENCY_CHECKS) && + !check_valid_pointer(s, page, nextfree) && freelist) { + object_err(s, page, *freelist, "Freechain corrupt"); + *freelist = NULL; + slab_fix(s, "Isolate corrupted freechain"); + return true; + } + + return false; +} + static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ @@ -668,10 +699,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) print_section(KERN_ERR, "Redzone ", p + s->object_size, s->inuse - s->object_size); - if (s->offset) - off = s->offset + sizeof(void *); - else - off = s->inuse; + off = get_info_end(s); if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); @@ -762,7 +790,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, * object address * Bytes of the object to be managed. * If the freepointer may overlay the object then the free - * pointer is the first word of the object. + * pointer is at the middle of the object. * * Poisoning uses 0x6b (POISON_FREE) and the last byte is * 0xa5 (POISON_END) @@ -796,11 +824,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) { - unsigned long off = s->inuse; /* The end of info */ - - if (s->offset) - /* Freepointer is placed after the object. */ - off += sizeof(void *); + unsigned long off = get_info_end(s); /* The end of info */ if (s->flags & SLAB_STORE_USER) /* We also have user information there */ @@ -886,7 +910,7 @@ static int check_object(struct kmem_cache *s, struct page *page, check_pad_bytes(s, page, p); } - if (!s->offset && val == SLUB_RED_ACTIVE) + if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) /* * Object and freepointer overlap. Cannot check * freepointer while object is allocated. @@ -1379,6 +1403,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void **freelist, void *nextfree) +{ + return false; +} #endif /* CONFIG_SLUB_DEBUG */ /* @@ -2062,6 +2091,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *prior; unsigned long counters; + /* + * If 'nextfree' is invalid, it is possible that the object at + * 'freelist' is already corrupted. So isolate all objects + * starting at 'freelist'. + */ + if (freelist_corrupted(s, page, &freelist, nextfree)) + break; + do { prior = page->freelist; counters = page->counters; @@ -2726,7 +2763,7 @@ redo: object = c->freelist; page = c->page; - if (unlikely(!object || !node_match(page, node))) { + if (unlikely(!object || !page || !node_match(page, node))) { object = __slab_alloc(s, gfpflags, node, addr, c); stat(s, ALLOC_SLOWPATH); } else { @@ -3558,6 +3595,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * * This is the case if we do RCU, have a constructor or * destructor or are poisoning the objects. + * + * The assumption that s->offset >= s->inuse means free + * pointer is outside of the object is used in the + * freeptr_outside_object() function. If that is no + * longer true, the function needs to be modified. */ s->offset = size; size += sizeof(void *); @@ -5621,7 +5663,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) */ if (buffer) buf = buffer; - else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) + else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && + !IS_ENABLED(CONFIG_SLUB_STATS)) buf = mbuf; else { buffer = (char *) get_zeroed_page(GFP_KERNEL); diff --git a/mm/sparse.c b/mm/sparse.c index 78bbecd904c3..191e29cca3fb 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -551,6 +551,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", __func__, nid); pnum_begin = pnum; + sparse_buffer_fini(); goto failed; } check_usemap_section_nr(nid, usage); diff --git a/mm/swap.c b/mm/swap.c index 38c3fa4308e2..21efae62b03b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -188,8 +188,7 @@ int get_kernel_page(unsigned long start, int write, struct page **pages) EXPORT_SYMBOL_GPL(get_kernel_page); static void pagevec_lru_move_fn(struct pagevec *pvec, - void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), - void *arg) + void (*move_fn)(struct page *page, struct lruvec *lruvec)) { int i; struct pglist_data *pgdat = NULL; @@ -208,7 +207,7 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, } lruvec = mem_cgroup_page_lruvec(page, pgdat); - (*move_fn)(page, lruvec, arg); + (*move_fn)(page, lruvec); } if (pgdat) spin_unlock_irqrestore(&pgdat->lru_lock, flags); @@ -216,35 +215,21 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, pagevec_reinit(pvec); } -static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, - void *arg) +static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec) { - int *pgmoved = arg; - if (PageLRU(page) && !PageUnevictable(page)) { del_page_from_lru_list(page, lruvec, page_lru(page)); ClearPageActive(page); add_page_to_lru_list_tail(page, lruvec, page_lru(page)); - (*pgmoved)++; + __count_vm_events(PGROTATED, hpage_nr_pages(page)); } } -/* - * pagevec_move_tail() must be called with IRQ disabled. - * Otherwise this may cause nasty races. - */ -static void pagevec_move_tail(struct pagevec *pvec) -{ - int pgmoved = 0; - - pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); - __count_vm_events(PGROTATED, pgmoved); -} - /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. + * rotate_reclaimable_page() must disable IRQs, to prevent nasty races. */ void rotate_reclaimable_page(struct page *page) { @@ -257,7 +242,7 @@ void rotate_reclaimable_page(struct page *page) local_irq_save(flags); pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_move_tail(pvec); + pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); local_irq_restore(flags); } } @@ -272,8 +257,7 @@ static void update_page_reclaim_stat(struct lruvec *lruvec, reclaim_stat->recent_rotated[file]++; } -static void __activate_page(struct page *page, struct lruvec *lruvec, - void *arg) +static void __activate_page(struct page *page, struct lruvec *lruvec) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); @@ -296,7 +280,7 @@ static void activate_page_drain(int cpu) struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); if (pagevec_count(pvec)) - pagevec_lru_move_fn(pvec, __activate_page, NULL); + pagevec_lru_move_fn(pvec, __activate_page); } static bool need_activate_page_drain(int cpu) @@ -312,7 +296,7 @@ void activate_page(struct page *page) get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_lru_move_fn(pvec, __activate_page, NULL); + pagevec_lru_move_fn(pvec, __activate_page); put_cpu_var(activate_page_pvecs); } } @@ -328,7 +312,7 @@ void activate_page(struct page *page) page = compound_head(page); spin_lock_irq(&pgdat->lru_lock); - __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); + __activate_page(page, mem_cgroup_page_lruvec(page, pgdat)); spin_unlock_irq(&pgdat->lru_lock); } #endif @@ -493,8 +477,7 @@ void lru_cache_add_active_or_unevictable(struct page *page, * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ -static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, - void *arg) +static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) { int lru, file; bool active; @@ -539,8 +522,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, update_page_reclaim_stat(lruvec, file, 0); } -static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, - void *arg) +static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) { if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); @@ -556,8 +538,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, } } -static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, - void *arg) +static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec) { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { @@ -599,21 +580,21 @@ void lru_add_drain_cpu(int cpu) /* No harm done if a racing interrupt already did this */ local_irq_save(flags); - pagevec_move_tail(pvec); + pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); if (pagevec_count(pvec)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); pvec = &per_cpu(lru_deactivate_pvecs, cpu); if (pagevec_count(pvec)) - pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); + pagevec_lru_move_fn(pvec, lru_deactivate_fn); pvec = &per_cpu(lru_lazyfree_pvecs, cpu); if (pagevec_count(pvec)) - pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); + pagevec_lru_move_fn(pvec, lru_lazyfree_fn); activate_page_drain(cpu); } @@ -639,7 +620,7 @@ void deactivate_file_page(struct page *page) struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); put_cpu_var(lru_deactivate_file_pvecs); } } @@ -659,7 +640,7 @@ void deactivate_page(struct page *page) get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); + pagevec_lru_move_fn(pvec, lru_deactivate_fn); put_cpu_var(lru_deactivate_pvecs); } } @@ -679,7 +660,7 @@ void mark_page_lazyfree(struct page *page) get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); + pagevec_lru_move_fn(pvec, lru_lazyfree_fn); put_cpu_var(lru_lazyfree_pvecs); } } @@ -876,25 +857,16 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, VM_BUG_ON_PAGE(PageLRU(page_tail), page); lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); - if (!list) - SetPageLRU(page_tail); - - if (likely(PageLRU(page))) - list_add_tail(&page_tail->lru, &page->lru); - else if (list) { + if (list) { /* page reclaim is reclaiming a huge page */ get_page(page_tail); + VM_WARN_ON(PageLRU(page)); list_add_tail(&page_tail->lru, list); } else { - /* - * Head page has not yet been counted, as an hpage, - * so we must account for each subpage individually. - * - * Put page_tail on the list at the correct position - * so they all end up in order. - */ - add_page_to_lru_list_tail(page_tail, lruvec, - page_lru(page_tail)); + /* head is still on lru (and we have it frozen) */ + VM_WARN_ON(!PageLRU(page)); + SetPageLRU(page_tail); + list_add_tail(&page_tail->lru, &page->lru); } if (!PageUnevictable(page)) @@ -902,8 +874,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, - void *arg) +static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec) { enum lru_list lru; int was_unevictable = TestClearPageUnevictable(page); @@ -963,7 +934,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, */ void __pagevec_lru_add(struct pagevec *pvec) { - pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); + pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn); } EXPORT_SYMBOL(__pagevec_lru_add); diff --git a/mm/swap_state.c b/mm/swap_state.c index 8e7ce9a9bc5e..7c434fcfff0d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -23,6 +23,7 @@ #include <linux/huge_mm.h> #include <asm/pgtable.h> +#include "internal.h" /* * swapper_space is a fiction, retained to simplify the path through @@ -418,7 +419,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* May fail (-ENOMEM) if XArray node allocation failed. */ __SetPageLocked(new_page); __SetPageSwapBacked(new_page); - err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); + err = add_to_swap_cache(new_page, entry, + gfp_mask & GFP_RECLAIM_MASK); if (likely(!err)) { /* Initiate read into locked page */ SetPageWorkingset(new_page); @@ -509,10 +511,11 @@ static unsigned long swapin_nr_pages(unsigned long offset) return 1; hits = atomic_xchg(&swapin_readahead_hits, 0); - pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, + pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, + max_pages, atomic_read(&last_readahead_pages)); if (!hits) - prev_offset = offset; + WRITE_ONCE(prev_offset, offset); atomic_set(&last_readahead_pages, pages); return pages; diff --git a/mm/swapfile.c b/mm/swapfile.c index 891a3ef48651..f6964212c6c8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -221,6 +221,19 @@ offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset) BUG(); } +sector_t swap_page_sector(struct page *page) +{ + struct swap_info_struct *sis = page_swap_info(page); + struct swap_extent *se; + sector_t sector; + pgoff_t offset; + + offset = __page_file_index(page); + se = offset_to_swap_extent(sis, offset); + sector = se->start_block + (offset - se->start_page); + return sector << (PAGE_SHIFT - 9); +} + /* * swap allocation tell device that a cluster of swap can now be discarded, * to allow the swap device to optimize its wear-levelling. @@ -1038,7 +1051,7 @@ start_over: goto nextsi; } if (size == SWAPFILE_CLUSTER) { - if (!(si->flags & SWP_FS)) + if (si->flags & SWP_BLKDEV) n_ret = swap_alloc_cluster(si, swp_entries); } else n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, @@ -2132,7 +2145,7 @@ int try_to_unuse(unsigned int type, bool frontswap, swp_entry_t entry; unsigned int i; - if (!si->inuse_pages) + if (!READ_ONCE(si->inuse_pages)) return 0; if (!frontswap) @@ -2148,7 +2161,7 @@ retry: spin_lock(&mmlist_lock); p = &init_mm.mmlist; - while (si->inuse_pages && + while (READ_ONCE(si->inuse_pages) && !signal_pending(current) && (p = p->next) != &init_mm.mmlist) { @@ -2177,7 +2190,7 @@ retry: mmput(prev_mm); i = 0; - while (si->inuse_pages && + while (READ_ONCE(si->inuse_pages) && !signal_pending(current) && (i = find_next_to_unuse(si, i, frontswap)) != 0) { @@ -2219,7 +2232,7 @@ retry: * been preempted after get_swap_page(), temporarily hiding that swap. * It's easy and robust (though cpu-intensive) just to keep retrying. */ - if (si->inuse_pages) { + if (READ_ONCE(si->inuse_pages)) { if (!signal_pending(current)) goto retry; retval = -EINTR; @@ -2737,10 +2750,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) else type = si->type + 1; + ++(*pos); for (; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; - ++*pos; return si; } @@ -2824,6 +2837,7 @@ late_initcall(max_swapfiles_check); static struct swap_info_struct *alloc_swap_info(void) { struct swap_info_struct *p; + struct swap_info_struct *defer = NULL; unsigned int type; int i; @@ -2852,7 +2866,7 @@ static struct swap_info_struct *alloc_swap_info(void) smp_wmb(); WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1); } else { - kvfree(p); + defer = p; p = swap_info[type]; /* * Do not memset this entry: a racing procfs swap_next() @@ -2865,6 +2879,7 @@ static struct swap_info_struct *alloc_swap_info(void) plist_node_init(&p->avail_lists[i], 0); p->flags = SWP_USED; spin_unlock(&swap_lock); + kvfree(defer); spin_lock_init(&p->lock); spin_lock_init(&p->cont_lock); @@ -3284,7 +3299,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = inode_drain_writes(inode); if (error) { inode->i_flags &= ~S_SWAPFILE; - goto bad_swap_unlock_inode; + goto free_swap_address_space; } mutex_lock(&swapon_mutex); @@ -3309,6 +3324,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = 0; goto out; +free_swap_address_space: + exit_swap_address_space(p->type); bad_swap_unlock_inode: inode_unlock(inode); bad_swap: diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index c7ae74ce5ff3..640ff2bd9a69 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -269,7 +269,7 @@ retry: */ idx = linear_page_index(dst_vma, dst_addr); mapping = dst_vma->vm_file->f_mapping; - hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); + hash = hugetlb_fault_mutex_hash(h, mapping, idx); mutex_lock(&hugetlb_fault_mutex_table[hash]); err = -ENOMEM; diff --git a/mm/util.c b/mm/util.c index 38a90a8f96f6..3c85c75bfa6a 100644 --- a/mm/util.c +++ b/mm/util.c @@ -594,6 +594,24 @@ void kvfree(const void *addr) } EXPORT_SYMBOL(kvfree); +/** + * kvfree_sensitive - Free a data object containing sensitive information. + * @addr: address of the data object to be freed. + * @len: length of the data object. + * + * Use the special memzero_explicit() function to clear the content of a + * kvmalloc'ed object containing sensitive data to make sure that the + * compiler won't optimize out the data clearing. + */ +void kvfree_sensitive(const void *addr, size_t len) +{ + if (likely(!ZERO_OR_NULL_PTR(addr))) { + memzero_explicit((void *)addr, len); + kvfree(addr); + } +} +EXPORT_SYMBOL(kvfree_sensitive); + static inline void *__page_rmapping(struct page *page) { unsigned long mapping; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7d05834e594c..50b132913316 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -34,6 +34,7 @@ #include <linux/llist.h> #include <linux/bitops.h> #include <linux/rbtree_augmented.h> +#include <linux/overflow.h> #include <linux/uaccess.h> #include <asm/tlbflush.h> @@ -84,6 +85,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next); + + cond_resched(); } while (pmd++, addr = next, addr != end); } @@ -2671,6 +2674,26 @@ void *vzalloc_node(unsigned long size, int node) } EXPORT_SYMBOL(vzalloc_node); +/** + * vmalloc_user_node_flags - allocate memory for userspace on a specific node + * @size: allocation size + * @node: numa node + * @flags: flags for the page level allocator + * + * The resulting memory area is zeroed so it can be mapped to userspace + * without leaking data. + * + * Return: pointer to the allocated memory or %NULL on error + */ +void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags) +{ + return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, + flags | __GFP_ZERO, PAGE_KERNEL, + VM_USERMAP, node, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(vmalloc_user_node_flags); + /** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size @@ -2976,6 +2999,7 @@ finished: * @vma: vma to cover * @uaddr: target user address to start at * @kaddr: virtual address of vmalloc kernel memory + * @pgoff: offset from @kaddr to start at * @size: size of map area * * Returns: 0 for success, -Exxx on failure @@ -2988,9 +3012,15 @@ finished: * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, - void *kaddr, unsigned long size) + void *kaddr, unsigned long pgoff, + unsigned long size) { struct vm_struct *area; + unsigned long off; + unsigned long end_index; + + if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) + return -EINVAL; size = PAGE_ALIGN(size); @@ -3004,8 +3034,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) return -EINVAL; - if (kaddr + size > area->addr + get_vm_area_size(area)) + if (check_add_overflow(size, off, &end_index) || + end_index > get_vm_area_size(area)) return -EINVAL; + kaddr += off; do { struct page *page = vmalloc_to_page(kaddr); @@ -3044,7 +3076,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { return remap_vmalloc_range_partial(vma, vma->vm_start, - addr + (pgoff << PAGE_SHIFT), + addr, pgoff, vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range); diff --git a/mm/vmscan.c b/mm/vmscan.c index d7ab98cd98ab..bd8cb4e8c392 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -57,6 +57,7 @@ #include <linux/swapops.h> #include <linux/balloon_compaction.h> +#include <linux/sli.h> #include "internal.h" @@ -174,6 +175,7 @@ unsigned int vm_pagecache_limit_async __read_mostly = 0; unsigned int vm_pagecache_ignore_slab __read_mostly = 1; static struct task_struct *kpclimitd = NULL; static bool kpclimitd_context = false; +unsigned int vm_pagecache_limit_global __read_mostly = 0; /* * The total number of pages which are beyond the high watermark within all * zones. @@ -756,6 +758,9 @@ void drop_slab_node(int nid) do { struct mem_cgroup *memcg = NULL; + if (fatal_signal_pending(current)) + return; + freed = 0; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { @@ -835,6 +840,7 @@ typedef enum { static pageout_t pageout(struct page *page, struct address_space *mapping, struct scan_control *sc) { + u64 start; /* * If the page is dirty, only perform writeback if that write * will be non-blocking. To prevent this allocation from being @@ -883,7 +889,14 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, }; SetPageReclaim(page); + if (!current_is_kswapd()) + sli_memlat_stat_start(&start); res = mapping->a_ops->writepage(page, &wbc); + if (!current_is_kswapd()) + sli_memlat_stat_end(global_reclaim(sc) ? + MEM_LAT_GLOBAL_DIRECT_SWAPOUT : + MEM_LAT_MEMCG_DIRECT_SWAPOUT, + start); if (res < 0) handle_write_error(mapping, page, res); if (res == AOP_WRITEPAGE_ACTIVATE) { @@ -1896,26 +1909,29 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, while (!list_empty(list)) { page = lru_to_page(list); VM_BUG_ON_PAGE(PageLRU(page), page); + list_del(&page->lru); if (unlikely(!page_evictable(page))) { - list_del(&page->lru); spin_unlock_irq(&pgdat->lru_lock); putback_lru_page(page); spin_lock_irq(&pgdat->lru_lock); continue; } - lruvec = mem_cgroup_page_lruvec(page, pgdat); + /* The SetPageLRU needs to be kept here for list integrity. + * Otherwise: + * #0 move_pages_to_lru #1 release_pages + * if !put_page_testzero + * if (put_page_testzero()) + * !PageLRU //skip lru_lock + * SetPageLRU() + * list_add(&page->lru,) + * list_add(&page->lru,) + */ SetPageLRU(page); - lru = page_lru(page); - nr_pages = hpage_nr_pages(page); - update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); - list_move(&page->lru, &lruvec->lists[lru]); - - if (put_page_testzero(page)) { + if (unlikely(put_page_testzero(page))) { __ClearPageLRU(page); __ClearPageActive(page); - del_page_from_lru_list(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&pgdat->lru_lock); @@ -1923,9 +1939,16 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, spin_lock_irq(&pgdat->lru_lock); } else list_add(&page->lru, &pages_to_free); - } else { - nr_moved += nr_pages; + continue; } + VM_BUG_ON_PAGE(mem_cgroup_page_lruvec(page, page_pgdat(page)) + != lruvec, page); + lru = page_lru(page); + nr_pages = hpage_nr_pages(page); + + update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); + list_add(&page->lru, &lruvec->lists[lru]); + nr_moved += nr_pages; } /* @@ -2784,6 +2807,14 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) unsigned long reclaimed; unsigned long scanned; + /* + * This loop can become CPU-bound when target memcgs + * aren't eligible for reclaim - either because they + * don't have any reclaimable pages, or because their + * memory is explicitly protected. Avoid soft lockups. + */ + cond_resched(); + switch (mem_cgroup_protected(root, memcg)) { case MEMCG_PROT_MIN: /* @@ -3169,8 +3200,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { - pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, - (enum zone_type)ZONE_NORMAL); + if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL) + WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL); + wake_up_interruptible(&pgdat->kswapd_wait); } @@ -3571,7 +3603,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, return sc->nr_scanned >= sc->nr_to_reclaim; } -static void __shrink_page_cache(gfp_t mask); +static unsigned long __shrink_page_cache(gfp_t mask, struct mem_cgroup *memcg, unsigned long nr_pages); /* * For kswapd, balance_pgdat() will reclaim pages across a node from zones @@ -3601,6 +3633,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) .order = order, .may_unmap = 1, }; + unsigned long nr_pages; set_task_reclaim_state(current, &sc.reclaim_state); psi_memstall_enter(&pflags); @@ -3609,8 +3642,11 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) count_vm_event(PAGEOUTRUN); /* this reclaims from all zones so don't count to sc.nr_reclaimed */ - if (unlikely(vm_pagecache_limit_pages) && pagecache_over_limit() > 0) - __shrink_page_cache(GFP_KERNEL); + if (unlikely(vm_pagecache_limit_pages)) { + nr_pages = pagecache_over_limit(); + if (nr_pages) + __shrink_page_cache(GFP_KERNEL, NULL, nr_pages); + } /* * Account for the reclaim boost. Note that the zone boost is left in @@ -3808,9 +3844,9 @@ out: static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, enum zone_type prev_classzone_idx) { - if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - return prev_classzone_idx; - return pgdat->kswapd_classzone_idx; + enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, @@ -3854,8 +3890,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * the previous request that slept prematurely. */ if (remaining) { - pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); + WRITE_ONCE(pgdat->kswapd_classzone_idx, + kswapd_classzone_idx(pgdat, classzone_idx)); + + if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) + WRITE_ONCE(pgdat->kswapd_order, reclaim_order); } finish_wait(&pgdat->kswapd_wait, &wait); @@ -3938,12 +3977,12 @@ static int kswapd(void *p) tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); - pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); for ( ; ; ) { bool ret; - alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); kswapd_try_sleep: @@ -3951,10 +3990,10 @@ kswapd_try_sleep: classzone_idx); /* Read the new order and classzone_idx */ - alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); ret = try_to_freeze(); if (kthread_should_stop()) @@ -3998,20 +4037,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; + enum zone_type curr_idx; if (!managed_zone(zone)) return; if (!cpuset_zone_allowed(zone, gfp_flags)) return; - pgdat = zone->zone_pgdat; - if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - pgdat->kswapd_classzone_idx = classzone_idx; - else - pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, - classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, order); + pgdat = zone->zone_pgdat; + curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx) + WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx); + + if (READ_ONCE(pgdat->kswapd_order) < order) + WRITE_ONCE(pgdat->kswapd_order, order); + if (!waitqueue_active(&pgdat->kswapd_wait)) return; @@ -4036,27 +4078,6 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, wake_up_interruptible(&pgdat->kswapd_wait); } -/* - * * The reclaimable count would be mostly accurate. - * * The less reclaimable pages may be - * * - mlocked pages, which will be moved to unevictable list when encountered - * * - mapped pages, which may require several travels to be reclaimed - * * - dirty pages, which is not "instantly" reclaimable - * * */ - -static unsigned long global_reclaimable_pages(void) -{ - int nr; - - nr = global_node_page_state(NR_ACTIVE_FILE) + - global_node_page_state(NR_INACTIVE_FILE); - - if (get_nr_swap_pages() > 0) - nr += global_node_page_state(NR_ACTIVE_ANON) + - global_node_page_state(NR_INACTIVE_ANON); - return nr; -} - #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of @@ -4279,7 +4300,7 @@ out: * This function is similar to shrink_all_memory, except that it may never * swap out mapped pages and only does four passes. */ -static void __shrink_page_cache(gfp_t mask) +static unsigned long __shrink_page_cache(gfp_t mask, struct mem_cgroup *memcg, unsigned long nr_pages) { unsigned long ret = 0; int pass = 0; @@ -4290,11 +4311,10 @@ static void __shrink_page_cache(gfp_t mask) .priority = DEF_PRIORITY, .may_unmap = 0, .may_writepage = 0, - .target_mem_cgroup = NULL, + .target_mem_cgroup = memcg, .reclaim_idx = MAX_NR_ZONES, }; struct reclaim_state *old_rs = current->reclaim_state; - long nr_pages; /* We might sleep during direct reclaim so make atomic context * is certainly a bug. @@ -4303,7 +4323,7 @@ static void __shrink_page_cache(gfp_t mask) retry: /* How many pages are we over the limit?*/ - nr_pages = pagecache_over_limit(); +// nr_pages = pagecache_get_reclaim_pages(memcg); /* * Return early if there's no work to do. @@ -4312,7 +4332,7 @@ retry: * This makes sure that no sleeping reclaimer will stay behind. * Allow breaching the limit if the task is on the way out. */ - if (nr_pages <= 0 || fatal_signal_pending(current)) { + if (nr_pages == 0 || fatal_signal_pending(current)) { wake_up_interruptible(&pagecache_reclaim_wq); goto out; } @@ -4333,7 +4353,6 @@ retry: for (; pass <= 3; pass++) { for (sc.priority = DEF_PRIORITY; sc.priority >= 0; sc.priority--) { unsigned long nr_to_scan = nr_pages - ret; - struct mem_cgroup *memcg = mem_cgroup_iter(NULL, NULL, NULL); int nid; sc.nr_scanned = 0; @@ -4351,9 +4370,11 @@ retry: reclaim_state.reclaimed_slab = 0; for_each_online_node(nid) { - do { - shrink_slab(mask, nid, memcg, sc.priority); - } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); + struct mem_cgroup *iter; + + for_each_mem_cgroup_tree(iter, memcg) { + shrink_slab(mask, nid, iter, sc.priority); + } } ret += reclaim_state.reclaimed_slab; @@ -4372,6 +4393,8 @@ retry: out: current->reclaim_state = old_rs; + + return sc.nr_reclaimed; } static int kpagecache_limitd(void *data) @@ -4386,7 +4409,7 @@ static int kpagecache_limitd(void *data) wake_up_interruptible(&pagecache_reclaim_wq); for ( ; ; ) { - __shrink_page_cache(GFP_KERNEL); + __shrink_page_cache(GFP_KERNEL, NULL, pagecache_over_limit()); prepare_to_wait(&kpagecache_limitd_wq, &wait, TASK_INTERRUPTIBLE); if (!kthread_should_stop()) @@ -4410,10 +4433,20 @@ static void wakeup_kpclimitd(gfp_t mask) void shrink_page_cache(gfp_t mask, struct page *page) { - if (0 == vm_pagecache_limit_async) - __shrink_page_cache(mask); - else - wakeup_kpclimitd(mask); + if (vm_pagecache_limit_global) { + if (0 == vm_pagecache_limit_async) + __shrink_page_cache(mask, NULL, pagecache_over_limit()); + else + wakeup_kpclimitd(mask); + } +} + +unsigned long shrink_page_cache_memcg(gfp_t mask, struct mem_cgroup *memcg, unsigned long nr_pages) +{ + if (!vm_pagecache_limit_global) + return __shrink_page_cache(mask, memcg, nr_pages); + + return 0; } /* It's optimal to keep kswapds on the same CPUs as their memory, but diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 22d17ecfe7df..443b3b1c9581 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -293,11 +293,7 @@ struct zspage { }; struct mapping_area { -#ifdef CONFIG_PGTABLE_MAPPING - struct vm_struct *vm; /* vm area for mapping object that span pages */ -#else char *vm_buf; /* copy buffer for objects that span pages */ -#endif char *vm_addr; /* address of kmap_atomic()'ed pages */ enum zs_mapmode vm_mm; /* mapping mode */ }; @@ -1113,46 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class) return zspage; } -#ifdef CONFIG_PGTABLE_MAPPING -static inline int __zs_cpu_up(struct mapping_area *area) -{ - /* - * Make sure we don't leak memory if a cpu UP notification - * and zs_init() race and both call zs_cpu_up() on the same cpu - */ - if (area->vm) - return 0; - area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); - if (!area->vm) - return -ENOMEM; - return 0; -} - -static inline void __zs_cpu_down(struct mapping_area *area) -{ - if (area->vm) - free_vm_area(area->vm); - area->vm = NULL; -} - -static inline void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); - area->vm_addr = area->vm->addr; - return area->vm_addr + off; -} - -static inline void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - unsigned long addr = (unsigned long)area->vm_addr; - - unmap_kernel_range(addr, PAGE_SIZE * 2); -} - -#else /* CONFIG_PGTABLE_MAPPING */ - static inline int __zs_cpu_up(struct mapping_area *area) { /* @@ -1233,8 +1189,6 @@ out: pagefault_enable(); } -#endif /* CONFIG_PGTABLE_MAPPING */ - static int zs_cpu_prepare(unsigned int cpu) { struct mapping_area *area; @@ -2262,11 +2216,13 @@ static unsigned long zs_can_compact(struct size_class *class) return obj_wasted * class->pages_per_zspage; } -static void __zs_compact(struct zs_pool *pool, struct size_class *class) +static unsigned long __zs_compact(struct zs_pool *pool, + struct size_class *class) { struct zs_compact_control cc; struct zspage *src_zspage; struct zspage *dst_zspage = NULL; + unsigned long pages_freed = 0; spin_lock(&class->lock); while ((src_zspage = isolate_zspage(class, true))) { @@ -2296,7 +2252,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(class, dst_zspage); if (putback_zspage(class, src_zspage) == ZS_EMPTY) { free_zspage(pool, class, src_zspage); - pool->stats.pages_compacted += class->pages_per_zspage; + pages_freed += class->pages_per_zspage; } spin_unlock(&class->lock); cond_resched(); @@ -2307,12 +2263,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(class, src_zspage); spin_unlock(&class->lock); + + return pages_freed; } unsigned long zs_compact(struct zs_pool *pool) { int i; struct size_class *class; + unsigned long pages_freed = 0; for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { class = pool->size_class[i]; @@ -2320,10 +2279,11 @@ unsigned long zs_compact(struct zs_pool *pool) continue; if (class->index != i) continue; - __zs_compact(pool, class); + pages_freed += __zs_compact(pool, class); } + atomic_long_add(pages_freed, &pool->stats.pages_compacted); - return pool->stats.pages_compacted; + return pages_freed; } EXPORT_SYMBOL_GPL(zs_compact); @@ -2340,13 +2300,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker, struct zs_pool *pool = container_of(shrinker, struct zs_pool, shrinker); - pages_freed = pool->stats.pages_compacted; /* * Compact classes and calculate compaction delta. * Can run concurrently with a manually triggered * (by user) compaction. */ - pages_freed = zs_compact(pool) - pages_freed; + pages_freed = zs_compact(pool); return pages_freed ? pages_freed : SHRINK_STOP; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index d4bcfd8f95bf..3f47abf9ef4a 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -280,7 +280,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) return 0; out_free_newdev: - if (new_dev->reg_state == NETREG_UNINITIALIZED) + if (new_dev->reg_state == NETREG_UNINITIALIZED || + new_dev->reg_state == NETREG_UNREGISTERED) free_netdev(new_dev); return err; } diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 13cd683a658a..60eb9a2b209b 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work) if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); + } else if (m->rreq->status == REQ_STATUS_FLSHD) { + /* Ignore replies associated with a cancelled request. */ + p9_debug(P9_DEBUG_TRANS, + "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->client->lock); p9_debug(P9_DEBUG_ERROR, @@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + spin_lock(&client->lock); + /* Ignore cancelled request if message has been received + * before lock. + */ + if (req->status == REQ_STATUS_RCVD) { + spin_unlock(&client->lock); + return 0; + } + /* we haven't received a response for oldreq, * remove it from the list. */ - spin_lock(&client->lock); list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; spin_unlock(&client->lock); p9_req_put(req); @@ -803,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) return -ENOMEM; ts->rd = fget(rfd); + if (!ts->rd) + goto out_free_ts; + if (!(ts->rd->f_mode & FMODE_READ)) + goto out_put_rd; ts->wr = fget(wfd); - if (!ts->rd || !ts->wr) { - if (ts->rd) - fput(ts->rd); - if (ts->wr) - fput(ts->wr); - kfree(ts); - return -EIO; - } + if (!ts->wr) + goto out_put_rd; + if (!(ts->wr->f_mode & FMODE_WRITE)) + goto out_put_wr; client->trans = ts; client->status = Connected; return 0; + +out_put_wr: + fput(ts->wr); +out_put_rd: + fput(ts->rd); +out_free_ts: + kfree(ts); + return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) @@ -1002,7 +1023,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) csocket = NULL; - if (addr == NULL) + if (!addr || !strlen(addr)) return -EINVAL; if (strlen(addr) >= UNIX_PATH_MAX) { diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index 550c6ca007cc..9c1241292d1d 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -229,6 +229,8 @@ int __init atalk_proc_init(void) sizeof(struct aarp_iter_state), NULL)) goto out; + return 0; + out: remove_proc_subtree("atalk", init_net.proc_net); return -ENOMEM; diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index b41375d4d295..4610c352849b 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1568,8 +1568,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) struct sk_buff *skb; struct net_device *dev; struct ddpehdr *ddp; - int size; - struct atalk_route *rt; + int size, hard_header_len; + struct atalk_route *rt, *rt_lo = NULL; int err; if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) @@ -1632,7 +1632,22 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", sk, size, dev->name); - size += dev->hard_header_len; + hard_header_len = dev->hard_header_len; + /* Leave room for loopback hardware header if necessary */ + if (usat->sat_addr.s_node == ATADDR_BCAST && + (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) { + struct atalk_addr at_lo; + + at_lo.s_node = 0; + at_lo.s_net = 0; + + rt_lo = atrtr_find(&at_lo); + + if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len) + hard_header_len = rt_lo->dev->hard_header_len; + } + + size += hard_header_len; release_sock(sk); skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); lock_sock(sk); @@ -1640,7 +1655,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) goto out; skb_reserve(skb, ddp_dl->header_length); - skb_reserve(skb, dev->hard_header_len); + skb_reserve(skb, hard_header_len); skb->dev = dev; SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk); @@ -1691,18 +1706,12 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) /* loop back */ skb_orphan(skb); if (ddp->deh_dnode == ATADDR_BCAST) { - struct atalk_addr at_lo; - - at_lo.s_node = 0; - at_lo.s_net = 0; - - rt = atrtr_find(&at_lo); - if (!rt) { + if (!rt_lo) { kfree_skb(skb); err = -ENETUNREACH; goto out; } - dev = rt->dev; + dev = rt_lo->dev; skb->dev = dev; } ddp_dl->request(ddp_dl, skb, dev->dev_addr); diff --git a/net/atm/lec.c b/net/atm/lec.c index 5a77c235a212..3625a04a6c70 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1269,6 +1269,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry) entry->vcc = NULL; } if (entry->recv_vcc) { + struct atm_vcc *vcc = entry->recv_vcc; + struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); + + kfree(vpriv); + vcc->user_back = NULL; + entry->recv_vcc->push = entry->old_recv_push; vcc_release_async(entry->recv_vcc, -EPIPE); entry->recv_vcc = NULL; diff --git a/net/atm/proc.c b/net/atm/proc.c index d79221fd4dae..c31896707313 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -134,8 +134,7 @@ static void vcc_seq_stop(struct seq_file *seq, void *v) static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { v = vcc_walk(seq, 1); - if (v) - (*pos)++; + (*pos)++; return v; } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bb222b882b67..2fdb1b573e8c 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -635,8 +635,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case SO_BINDTODEVICE: - if (optlen > IFNAMSIZ) - optlen = IFNAMSIZ; + if (optlen > IFNAMSIZ - 1) + optlen = IFNAMSIZ - 1; + + memset(devname, 0, sizeof(devname)); if (copy_from_user(devname, optval, optlen)) { res = -EFAULT; @@ -1185,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock, if (addr_len > sizeof(struct sockaddr_ax25) && fsa->fsa_ax25.sax25_ndigis != 0) { /* Valid number of digipeaters ? */ - if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { + if (fsa->fsa_ax25.sax25_ndigis < 1 || + fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) { err = -EINVAL; goto out_release; } @@ -1505,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; /* Valid number of digipeaters ? */ - if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { + if (usax->sax25_ndigis < 1 || + usax->sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * usax->sax25_ndigis) { err = -EINVAL; goto out; } diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 2614a9caee00..a39af0eefad3 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -120,20 +120,7 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) rtnl_lock(); ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings); rtnl_unlock(); - - /* Virtual interface drivers such as tun / tap interfaces, VLAN, etc - * tend to initialize the interface throughput with some value for the - * sake of having a throughput number to export via ethtool. This - * exported throughput leaves batman-adv to conclude the interface - * throughput is genuine (reflecting reality), thus no measurements - * are necessary. - * - * Based on the observation that those interface types also tend to set - * the link auto-negotiation to 'off', batman-adv shall check this - * setting to differentiate between genuine link throughput information - * and placeholders installed by virtual interfaces. - */ - if (ret == 0 && link_settings.base.autoneg == AUTONEG_ENABLE) { + if (ret == 0) { /* link characteristics might change over time */ if (link_settings.base.duplex == DUPLEX_FULL) hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 8033f24f506c..3165f6ff8ee7 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -878,6 +878,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len)); + if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Drop packet: originator packet from ourself\n"); + return; + } + /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ @@ -897,7 +903,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); if (!orig_node) - return; + goto out; neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, ethhdr->h_source); @@ -1005,11 +1011,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; - ogm_packet = (struct batadv_ogm2_packet *)skb->data; - - if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) - goto free_skb; - batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 663a53b6d36e..a6b26ca5c697 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -25,6 +25,7 @@ #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/netlink.h> +#include <linux/preempt.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> @@ -83,11 +84,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size) */ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) { - const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; + const struct batadv_bla_backbone_gw *gw; u32 hash = 0; - hash = jhash(&claim->addr, sizeof(claim->addr), hash); - hash = jhash(&claim->vid, sizeof(claim->vid), hash); + gw = (struct batadv_bla_backbone_gw *)data; + hash = jhash(&gw->orig, sizeof(gw->orig), hash); + hash = jhash(&gw->vid, sizeof(gw->vid), hash); return hash % size; } @@ -437,7 +439,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); - netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); @@ -1576,13 +1581,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv) } /** - * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the soft interface information - * @skb: contains the bcast_packet to be checked + * @skb: contains the multicast packet to be checked + * @payload_ptr: pointer to position inside the head buffer of the skb + * marking the start of the data to be CRC'ed + * @orig: originator mac address, NULL if unknown * - * check if it is on our broadcast list. Another gateway might - * have sent the same packet because it is connected to the same backbone, - * so we have to remove this duplicate. + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. * * This is performed by checking the CRC, which will tell us * with a good chance that it is the same packet. If it is furthermore @@ -1591,19 +1599,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv) * * Return: true if a packet is in the duplicate list, false otherwise. */ -bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb) +static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb, u8 *payload_ptr, + const u8 *orig) { - int i, curr; - __be32 crc; - struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_duplist_entry *entry; bool ret = false; - - bcast_packet = (struct batadv_bcast_packet *)skb->data; + int i, curr; + __be32 crc; /* calculate the crc ... */ - crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); + crc = batadv_skb_crc32(skb, payload_ptr); spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); @@ -1622,8 +1628,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, if (entry->crc != crc) continue; - if (batadv_compare_eth(entry->orig, bcast_packet->orig)) - continue; + /* are the originators both known and not anonymous? */ + if (orig && !is_zero_ether_addr(orig) && + !is_zero_ether_addr(entry->orig)) { + /* If known, check if the new frame came from + * the same originator: + * We are safe to take identical frames from the + * same orig, if known, as multiplications in + * the mesh are detected via the (orig, seqno) pair. + * So we can be a bit more liberal here and allow + * identical frames from the same orig which the source + * host might have sent multiple times on purpose. + */ + if (batadv_compare_eth(entry->orig, orig)) + continue; + } /* this entry seems to match: same crc, not too old, * and from another gw. therefore return true to forbid it. @@ -1639,7 +1658,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, entry = &bat_priv->bla.bcast_duplist[curr]; entry->crc = crc; entry->entrytime = jiffies; - ether_addr_copy(entry->orig, bcast_packet->orig); + + /* known originator */ + if (orig) + ether_addr_copy(entry->orig, orig); + /* anonymous originator */ + else + eth_zero_addr(entry->orig); + bat_priv->bla.bcast_duplist_curr = curr; out: @@ -1648,6 +1674,48 @@ out: return ret; } +/** + * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the multicast packet to be checked, decapsulated from a + * unicast_packet + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); +} + +/** + * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the bcast_packet to be checked + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + struct batadv_bcast_packet *bcast_packet; + u8 *payload_ptr; + + bcast_packet = (struct batadv_bcast_packet *)skb->data; + payload_ptr = (u8 *)(bcast_packet + 1); + + return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, + bcast_packet->orig); +} + /** * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for * the VLAN identified by vid. @@ -1809,7 +1877,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame - * @is_bcast: the packet came in a broadcast packet type. + * @packet_type: the batman packet type this frame came in * * batadv_bla_rx avoidance checks if: * * we have to race for a claim @@ -1821,7 +1889,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * further process the skb. */ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast) + unsigned short vid, int packet_type) { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; @@ -1843,9 +1911,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto handled; if (unlikely(atomic_read(&bat_priv->bla.num_requests))) - /* don't allow broadcasts while requests are in flight */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) - goto handled; + /* don't allow multicast packets while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + /* Both broadcast flooding or multicast-via-unicasts + * delivery might send to multiple backbone gateways + * sharing the same LAN and therefore need to coordinate + * which backbone gateway forwards into the LAN, + * by claiming the payload source address. + * + * Broadcast flooding and multicast-via-unicasts + * delivery use the following two batman packet types. + * Note: explicitly exclude BATADV_UNICAST_4ADDR, + * as the DHCP gateway feature will send explicitly + * to only one BLA gateway, so the claiming process + * should be avoided there. + */ + if (packet_type == BATADV_BCAST || + packet_type == BATADV_UNICAST) + goto handled; + + /* potential duplicates from foreign BLA backbone gateways via + * multicast-in-unicast packets + */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + packet_type == BATADV_UNICAST && + batadv_bla_check_ucast_duplist(bat_priv, skb)) + goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; @@ -1880,13 +1971,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto allow; } - /* if it is a broadcast ... */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { + /* if it is a multicast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { /* ... drop it. the responsible gateway is in charge. * - * We need to check is_bcast because with the gateway + * We need to check packet type because with the gateway * feature, broadcasts (like DHCP requests) may be sent - * using a unicast packet type. + * using a unicast 4 address packet type. See comment above. */ goto handled; } else { diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 02b24a861a85..9370be015813 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -35,7 +35,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) #ifdef CONFIG_BATMAN_ADV_BLA bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast); + unsigned short vid, int packet_type); bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); bool batadv_bla_is_backbone_gw(struct sk_buff *skb, @@ -66,7 +66,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, - bool is_bcast) + int packet_type) { return false; } diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 47df4c678988..89c9097007c3 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ - if (ret == BATADV_DHCP_TO_CLIENT && - pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { + if (ret == BATADV_DHCP_TO_CLIENT) { + if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) + return BATADV_DHCP_NO; + /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c index 11941cf1adcc..87f3de3d4e4c 100644 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c @@ -180,6 +180,7 @@ static const struct file_operations batadv_log_fops = { .read = batadv_log_read, .poll = batadv_log_poll, .llseek = no_llseek, + .owner = THIS_MODULE, }; /** diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 1d5bdf3a4b65..f5bf931252c4 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -51,6 +51,7 @@ #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h> +#include "bridge_loop_avoidance.h" #include "hard-interface.h" #include "hash.h" #include "log.h" @@ -1434,6 +1435,35 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, return BATADV_FORW_ALL; } +/** + * batadv_mcast_forw_send_orig() - send a multicast packet to an originator + * @bat_priv: the bat priv with all the soft interface information + * @skb: the multicast packet to send + * @vid: the vlan identifier + * @orig_node: the originator to send the packet to + * + * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. + */ +int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, + struct sk_buff *skb, + unsigned short vid, + struct batadv_orig_node *orig_node) +{ + /* Avoid sending multicast-in-unicast packets to other BLA + * gateways - they already got the frame from the LAN side + * we share with them. + * TODO: Refactor to take BLA into account earlier, to avoid + * reducing the mcast_fanout count. + */ + if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { + dev_kfree_skb(skb); + return NET_XMIT_SUCCESS; + } + + return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, + orig_node, vid); +} + /** * batadv_mcast_forw_tt() - forwards a packet to multicast listeners * @bat_priv: the bat priv with all the soft interface information @@ -1471,8 +1501,8 @@ batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_entry->orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, + orig_entry->orig_node); } rcu_read_unlock(); @@ -1513,8 +1543,7 @@ batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; @@ -1551,8 +1580,7 @@ batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; @@ -1618,8 +1646,7 @@ batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; @@ -1656,8 +1683,7 @@ batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h index 5d9e2bb29c97..403929013ac4 100644 --- a/net/batman-adv/multicast.h +++ b/net/batman-adv/multicast.h @@ -46,6 +46,11 @@ enum batadv_forw_mode batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_orig_node **mcast_single_orig); +int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, + struct sk_buff *skb, + unsigned short vid, + struct batadv_orig_node *orig_node); + int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); @@ -71,6 +76,16 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, return BATADV_FORW_ALL; } +static inline int +batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, + struct sk_buff *skb, + unsigned short vid, + struct batadv_orig_node *orig_node) +{ + kfree_skb(skb); + return NET_XMIT_DROP; +} + static inline int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 580609389f0f..70e3b161c663 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, */ static u8 batadv_nc_random_weight_tq(u8 tq) { - u8 rand_val, rand_tq; - - get_random_bytes(&rand_val, sizeof(rand_val)); - /* randomize the estimated packet loss (max TQ - estimated TQ) */ - rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); - - /* normalize the randomized packet loss */ - rand_tq /= BATADV_TQ_MAX_VALUE; + u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq); /* convert to (randomized) estimated tq again */ return BATADV_TQ_MAX_VALUE - rand_tq; diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index f0f864820dea..708e90cb18a6 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -826,6 +826,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, vid = batadv_get_vid(skb, hdr_len); ethhdr = (struct ethhdr *)(skb->data + hdr_len); + /* do not reroute multicast frames in a unicast header */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + return true; + /* check if the destination client was served by this node and it is now * roaming. In this case, it means that the node has got a ROAM_ADV * message and that it knows the new destination in the mesh to re-route diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5ee8e9a100f9..7f209390069e 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -364,9 +364,8 @@ send: goto dropped; ret = batadv_send_skb_via_gw(bat_priv, skb, vid); } else if (mcast_single_orig) { - ret = batadv_send_skb_unicast(bat_priv, skb, - BATADV_UNICAST, 0, - mcast_single_orig, vid); + ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid, + mcast_single_orig); } else if (forw_mode == BATADV_FORW_SOME) { ret = batadv_mcast_forw_send(bat_priv, skb, vid); } else { @@ -425,10 +424,10 @@ void batadv_interface_rx(struct net_device *soft_iface, struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; - bool is_bcast; + int packet_type; batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; - is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); + packet_type = batadv_bcast_packet->packet_type; skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); @@ -471,7 +470,7 @@ void batadv_interface_rx(struct net_device *soft_iface, /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) + if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) goto out; if (orig_node) diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index e5bbc28ed12c..079a13493880 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, ret = batadv_parse_throughput(net_dev, buff, "throughput_override", &tp_override); if (!ret) - return count; + goto out; old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); if (old_tp_override == tp_override) @@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj, tp_override = atomic_read(&hard_iface->bat_v.throughput_override); + batadv_hardif_put(hard_iface); return sprintf(buff, "%u.%u MBit\n", tp_override / 10, tp_override % 10); } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 8a482c5ec67b..c5271ea4dc83 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -891,6 +891,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) { tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); + tt_vlan->reserved = 0; tt_vlan++; } @@ -974,6 +975,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); + tt_vlan->reserved = 0; tt_vlan++; } diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 4febc82a7c76..52fb6d6d6d58 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -50,6 +50,7 @@ static bool enable_6lowpan; /* We are listening incoming connections via this channel */ static struct l2cap_chan *listen_chan; +static DEFINE_MUTEX(set_lock); struct lowpan_peer { struct list_head list; @@ -1070,12 +1071,14 @@ static void do_enable_set(struct work_struct *work) enable_6lowpan = set_enable->flag; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); } listen_chan = bt_6lowpan_listen(); + mutex_unlock(&set_lock); kfree(set_enable); } @@ -1127,11 +1130,13 @@ static ssize_t lowpan_control_write(struct file *fp, if (ret == -EINVAL) return ret; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); listen_chan = NULL; } + mutex_unlock(&set_lock); if (conn) { struct lowpan_peer *peer; diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index 26526be579c7..463bad58478b 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -226,6 +226,9 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_info_req req; found = true; + + memset(&req, 0, sizeof(req)); + req.id = cl->id; a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), sizeof(req), &req); @@ -305,6 +308,8 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, if (!hdev || hdev->dev_type != HCI_AMP) { struct a2mp_info_rsp rsp; + memset(&rsp, 0, sizeof(rsp)); + rsp.id = req->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; @@ -348,6 +353,8 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, if (!ctrl) return -ENOMEM; + memset(&req, 0, sizeof(req)); + req.id = rsp->id; a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), &req); @@ -374,6 +381,8 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, hdev = hci_dev_get(req->id); if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { struct a2mp_amp_assoc_rsp rsp; + + memset(&rsp, 0, sizeof(rsp)); rsp.id = req->id; if (tmp) { @@ -464,7 +473,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_physlink_req *req = (void *) skb->data; - struct a2mp_physlink_rsp rsp; struct hci_dev *hdev; struct hci_conn *hcon; @@ -475,6 +483,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); + memset(&rsp, 0, sizeof(rsp)); + rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; @@ -502,6 +512,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); if (!assoc) { amp_ctrl_put(ctrl); + hci_dev_put(hdev); return -ENOMEM; } @@ -553,6 +564,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); + memset(&rsp, 0, sizeof(rsp)); + rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; rsp.status = A2MP_STATUS_SUCCESS; @@ -675,6 +688,8 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) if (err) { struct a2mp_cmd_rej rej; + memset(&rej, 0, sizeof(rej)); + rej.reason = cpu_to_le16(0); hdr = (void *) skb->data; @@ -898,6 +913,8 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev) BT_DBG("%s mgr %p", hdev->name, mgr); + memset(&rsp, 0, sizeof(rsp)); + rsp.id = hdev->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; @@ -995,6 +1012,8 @@ void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status) if (!mgr) return; + memset(&rsp, 0, sizeof(rsp)); + hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT); if (!hs_hcon) { rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; @@ -1027,6 +1046,8 @@ void a2mp_discover_amp(struct l2cap_chan *chan) mgr->bredr_chan = chan; + memset(&req, 0, sizeof(req)); + req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); req.ext_feat = 0; a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index 9c711f0dfae3..be2d469d6369 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -297,6 +297,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev, struct hci_request req; int err; + if (!mgr) + return; + cp.phy_handle = hcon->handle; cp.len_so_far = cpu_to_le16(0); cp.max_len = cpu_to_le16(hdev->amp_assoc_size); diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h index a6f8d03d4aaf..830723971cf8 100644 --- a/net/bluetooth/ecdh_helper.h +++ b/net/bluetooth/ecdh_helper.h @@ -25,6 +25,6 @@ int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64], u8 secret[32]); -int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key); +int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]); int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]); int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 87691404d0c6..ee57fa20bac3 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1285,6 +1285,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 0; } + /* AES encryption is required for Level 4: + * + * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C + * page 1319: + * + * 128-bit equivalent strength for link and encryption keys + * required using FIPS approved algorithms (E0 not allowed, + * SAFER+ not allowed, and P-192 not allowed; encryption key + * not shortened) + */ + if (conn->sec_level == BT_SECURITY_FIPS && + !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { + bt_dev_err(conn->hdev, + "Invalid security: Missing AES-CCM usage"); + return 0; + } + if (hci_conn_ssp_enabled(conn) && !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 9e19d5a3aac8..83b324419ad3 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1317,8 +1317,10 @@ int hci_inquiry(void __user *arg) * cleared). If it is interrupted by a signal, return -EINTR. */ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, - TASK_INTERRUPTIBLE)) - return -EINTR; + TASK_INTERRUPTIBLE)) { + err = -EINTR; + goto done; + } } /* for unlimited number of responses we will use buffer with diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index c1d3a303d97f..bd678ffdaef7 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -41,12 +41,27 @@ /* Handle HCI Event packets */ -static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, + u8 *new_status) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%2.2x", hdev->name, status); + /* It is possible that we receive Inquiry Complete event right + * before we receive Inquiry Cancel Command Complete event, in + * which case the latter event should have status of Command + * Disallowed (0x0c). This should not be treated as error, since + * we actually achieve what Inquiry Cancel wants to achieve, + * which is to end the last Inquiry session. + */ + if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { + bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); + status = 0x00; + } + + *new_status = status; + if (status) return; @@ -1274,6 +1289,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; + if (len > HCI_MAX_AD_LENGTH) + return; + bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; @@ -2441,7 +2459,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -2822,7 +2840,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); - hci_encrypt_cfm(conn, ev->status, 0x00); + hci_encrypt_cfm(conn, ev->status); } } @@ -2907,22 +2925,7 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, conn->enc_key_size = rp->key_size; } - if (conn->state == BT_CONFIG) { - conn->state = BT_CONNECTED; - hci_connect_cfm(conn, 0); - hci_conn_drop(conn); - } else { - u8 encrypt; - - if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) - encrypt = 0x00; - else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) - encrypt = 0x02; - else - encrypt = 0x01; - - hci_encrypt_cfm(conn, 0, encrypt); - } + hci_encrypt_cfm(conn, 0); unlock: hci_dev_unlock(hdev); @@ -2971,27 +2974,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + /* Check link security requirements are met */ + if (!hci_conn_check_link_mode(conn)) + ev->status = HCI_ERROR_AUTH_FAILURE; + if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + /* Notify upper layers so they can cleanup before + * disconnecting. + */ + hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } - /* In Secure Connections Only mode, do not allow any connections - * that are not encrypted with AES-CCM using a P-256 authenticated - * combination key. - */ - if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && - (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || - conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { - hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_drop(conn); - goto unlock; - } - /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { struct hci_cp_read_enc_key_size cp; @@ -3040,14 +3039,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) } notify: - if (conn->state == BT_CONFIG) { - if (!ev->status) - conn->state = BT_CONNECTED; - - hci_connect_cfm(conn, ev->status); - hci_conn_drop(conn); - } else - hci_encrypt_cfm(conn, ev->status, ev->encrypt); + hci_encrypt_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); @@ -3139,7 +3131,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, switch (*opcode) { case HCI_OP_INQUIRY_CANCEL: - hci_cc_inquiry_cancel(hdev, skb); + hci_cc_inquiry_cancel(hdev, skb, status); break; case HCI_OP_PERIODIC_INQ: @@ -4064,6 +4056,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -4085,6 +4080,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } else { struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -4105,6 +4103,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } } +unlock: hci_dev_unlock(hdev); } @@ -4216,6 +4215,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ + case 0x1e: /* Invalid LMP Parameters */ case 0x1f: /* Unspecified error */ case 0x20: /* Unsupported LMP Parameter value */ if (conn->out) { @@ -4266,7 +4266,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -4791,6 +4791,11 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev, return; } + if (!hcon->amp_mgr) { + hci_dev_unlock(hdev); + return; + } + if (ev->status) { hci_conn_del(hcon); hci_dev_unlock(hdev); @@ -4835,6 +4840,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) return; hchan->handle = le16_to_cpu(ev->handle); + hchan->amp = true; BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); @@ -4867,7 +4873,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, hci_dev_lock(hdev); hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); - if (!hchan) + if (!hchan || !hchan->amp) goto unlock; amp_destroy_logical_link(hchan, ev->reason); @@ -5230,7 +5236,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len) + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5252,6 +5259,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * @@ -5311,7 +5323,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, direct_addr); - if (conn && type == LE_ADV_IND) { + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5365,7 +5377,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!has_pending_adv_report(hdev)) { + if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ @@ -5400,7 +5412,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the new report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -5440,7 +5453,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length); + ev->data, ev->length, false); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -5514,7 +5527,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length); + ev->data, ev->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } ptr += sizeof(*ev) + ev->length; @@ -5703,19 +5717,18 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) { u8 num_reports = skb->data[0]; - void *ptr = &skb->data[1]; + struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1]; + + if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1) + return; hci_dev_lock(hdev); - while (num_reports--) { - struct hci_ev_le_direct_adv_info *ev = ptr; - + for (; num_reports; num_reports--, ev++) process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0); - - ptr += sizeof(*ev); - } + ev->direct_addr_type, ev->rssi, NULL, 0, + false); hci_dev_unlock(hdev); } @@ -5833,6 +5846,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) u8 status = 0, event = hdr->evt, req_evt = 0; u16 opcode = HCI_OP_NOP; + if (!event) { + bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); + goto done; + } + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(cmd_hdr->opcode); @@ -6044,6 +6062,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) req_complete_skb(hdev, status, opcode, orig_skb); } +done: kfree_skb(orig_skb); kfree_skb(skb); hdev->stat.evt_rx++; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 3d25dbf10b26..7f3f4ea56d44 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -271,12 +271,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, { int ret; - if (!test_bit(HCI_UP, &hdev->flags)) - return -ENETDOWN; - /* Serialize all requests */ hci_req_sync_lock(hdev); - ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); + /* check the state after obtaing the lock to protect the HCI_UP + * against any races from hci_dev_do_close when the controller + * gets removed. + */ + if (test_bit(HCI_UP, &hdev->flags)) + ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); + else + ret = -ENETDOWN; hci_req_sync_unlock(hdev); return ret; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index a845786258a0..3499bace25ec 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -419,6 +419,9 @@ static void l2cap_chan_timeout(struct work_struct *work) BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); mutex_lock(&conn->chan_lock); + /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling + * this work. No need to call l2cap_chan_hold(chan) here again. + */ l2cap_chan_lock(chan); if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) @@ -431,12 +434,12 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_close(chan, reason); - l2cap_chan_unlock(chan); - chan->ops->close(chan); - mutex_unlock(&conn->chan_lock); + l2cap_chan_unlock(chan); l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); } struct l2cap_chan *l2cap_chan_create(void) @@ -1734,9 +1737,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_del(chan, err); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); } @@ -4131,7 +4134,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, return 0; } - if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { + if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && + chan->state != BT_CONNECTED) { cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); goto unlock; @@ -4355,6 +4359,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); rsp.dcid = cpu_to_le16(chan->scid); @@ -4363,12 +4368,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, chan->ops->set_shutdown(chan); - l2cap_chan_hold(chan); l2cap_chan_del(chan, ECONNRESET); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -4400,20 +4404,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); + l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); return 0; } - l2cap_chan_hold(chan); l2cap_chan_del(chan, 0); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -6696,9 +6701,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) goto drop; } - if ((chan->mode == L2CAP_MODE_ERTM || - chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb)) - goto drop; + if (chan->ops->filter) { + if (chan->ops->filter(chan, skb)) + goto drop; + } if (!control->sframe) { int err; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a7be8b59b3c2..8648c5211ebe 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1042,7 +1042,7 @@ done: } /* Kill socket (only if zapped and orphan) - * Must be called on unlocked socket. + * Must be called on unlocked socket, with l2cap channel lock. */ static void l2cap_sock_kill(struct sock *sk) { @@ -1193,6 +1193,7 @@ static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; + struct l2cap_chan *chan; BT_DBG("sock %p, sk %p", sock, sk); @@ -1202,9 +1203,17 @@ static int l2cap_sock_release(struct socket *sock) bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, 2); + chan = l2cap_pi(sk)->chan; + + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); sock_orphan(sk); l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return err; } @@ -1222,12 +1231,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) BT_DBG("child chan %p state %s", chan, state_to_string(chan->state)); + l2cap_chan_hold(chan); l2cap_chan_lock(chan); + __clear_chan_timer(chan); l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); - l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } } @@ -1332,8 +1344,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) parent = bt_sk(sk)->parent; - sock_set_flag(sk, SOCK_ZAPPED); - switch (chan->state) { case BT_OPEN: case BT_BOUND: @@ -1360,8 +1370,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) break; } - release_sock(sk); + + /* Only zap after cleanup to avoid use after free race */ + sock_set_flag(sk, SOCK_ZAPPED); + } static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state, @@ -1467,6 +1480,19 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) sk->sk_state_change(sk); } +static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct sock *sk = chan->data; + + switch (chan->mode) { + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + return sk_filter(sk, skb); + } + + return 0; +} + static const struct l2cap_ops l2cap_chan_ops = { .name = "L2CAP Socket Interface", .new_connection = l2cap_sock_new_connection_cb, @@ -1481,6 +1507,7 @@ static const struct l2cap_ops l2cap_chan_ops = { .set_shutdown = l2cap_sock_set_shutdown_cb, .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, + .filter = l2cap_sock_filter, }; static void l2cap_sock_destruct(struct sock *sk) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index acb7c6d5643f..5fce559a61bf 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -756,7 +756,8 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_ssp_capable(hdev)) { settings |= MGMT_SETTING_SSP; - settings |= MGMT_SETTING_HS; + if (IS_ENABLED(CONFIG_BT_HS)) + settings |= MGMT_SETTING_HS; } if (lmp_sc_capable(hdev)) @@ -1771,6 +1772,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) BT_DBG("request for %s", hdev->name); + if (!IS_ENABLED(CONFIG_BT_HS)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_NOT_SUPPORTED); + status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 1153bbcdff72..8ceebe872661 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -10,12 +10,13 @@ #include <net/bpf_sk_storage.h> #include <net/sock.h> #include <net/tcp.h> +#include <linux/error-injection.h> #define CREATE_TRACE_POINTS #include <trace/events/bpf_test_run.h> static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, - u32 *retval, u32 *time) + u32 *retval, u32 *time, bool xdp) { struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; enum bpf_cgroup_storage_type stype; @@ -41,7 +42,11 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, time_start = ktime_get_ns(); for (i = 0; i < repeat; i++) { bpf_cgroup_storage_set(storage); - *retval = BPF_PROG_RUN(prog, ctx); + + if (xdp) + *retval = bpf_prog_run_xdp(prog, ctx); + else + *retval = BPF_PROG_RUN(prog, ctx); if (signal_pending(current)) { ret = -EINTR; @@ -105,6 +110,48 @@ out: return err; } +/* Integer types of various sizes and pointer combinations cover variety of + * architecture dependent calling conventions. 7+ can be supported in the + * future. + */ +int noinline bpf_fentry_test1(int a) +{ + return a + 1; +} + +int noinline bpf_fentry_test2(int a, u64 b) +{ + return a + b; +} + +int noinline bpf_fentry_test3(char a, int b, u64 c) +{ + return a + b + c; +} + +int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) +{ + return (long)a + b + c + d; +} + +int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) +{ + return a + (long)b + c + d + e; +} + +int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) +{ + return a + (long)b + c + d + (long)e + f; +} + +int noinline bpf_modify_return_test(int a, int *b) +{ + *b += 1; + return a + *b; +} + +ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); + static void *bpf_test_init(const union bpf_attr *kattr, u32 size, u32 headroom, u32 tailroom) { @@ -122,9 +169,48 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size, kfree(data); return ERR_PTR(-EFAULT); } + return data; } +int bpf_prog_test_run_tracing(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + u16 side_effect = 0, ret = 0; + int b = 2, err = -EFAULT; + u32 retval = 0; + + switch (prog->expected_attach_type) { + case BPF_TRACE_FENTRY: + case BPF_TRACE_FEXIT: + if (bpf_fentry_test1(1) != 2 || + bpf_fentry_test2(2, 3) != 5 || + bpf_fentry_test3(4, 5, 6) != 15 || + bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || + bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || + bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) + goto out; + break; + case BPF_MODIFY_RETURN: + ret = bpf_modify_return_test(1, &b); + if (b != 2) + side_effect = 1; + break; + default: + goto out; + } + + retval = ((u32)side_effect << 16) | ret; + if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) + goto out; + + err = 0; +out: + trace_bpf_test_finish(&err); + return err; +} + static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) { void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); @@ -307,7 +393,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, ret = convert___skb_to_skb(skb, ctx); if (ret) goto out; - ret = bpf_test_run(prog, skb, repeat, &retval, &duration); + ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); if (ret) goto out; if (!is_l2) { @@ -364,8 +450,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); xdp.rxq = &rxqueue->xdp_rxq; - - ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration); + bpf_prog_change_xdp(NULL, prog); + ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); if (ret) goto out; if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || @@ -373,6 +459,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, size = xdp.data_end - xdp.data; ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); out: + bpf_prog_change_xdp(prog, NULL); kfree(data); return ret; } diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c index efea4874743e..05e1cfc1e5cd 100644 --- a/net/bpfilter/main.c +++ b/net/bpfilter/main.c @@ -35,7 +35,6 @@ static void loop(void) struct mbox_reply reply; int n; - fprintf(debug_f, "testing the buffer\n"); n = read(0, &req, sizeof(req)); if (n != sizeof(req)) { fprintf(debug_f, "invalid request %d\n", n); diff --git a/net/bridge/br.c b/net/bridge/br.c index 8a8f9e5f264f..cccbb9bf3ca4 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -43,7 +43,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v if (event == NETDEV_REGISTER) { /* register of bridge completed, add sysfs entries */ - br_sysfs_addbr(dev); + err = br_sysfs_addbr(dev); + if (err) + return notifier_from_errno(err); + return NOTIFY_DONE; } } diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c index 37908561a64b..b18cdf03edb3 100644 --- a/net/bridge/br_arp_nd_proxy.c +++ b/net/bridge/br_arp_nd_proxy.c @@ -276,6 +276,10 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p, ns_olen = request->len - (skb_network_offset(request) + sizeof(struct ipv6hdr)) - sizeof(*ns); for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) { + if (!ns->opt[i + 1]) { + kfree_skb(reply); + return; + } if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); break; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 022dc6e504c4..0dd8984a261d 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -168,6 +168,9 @@ static int br_dev_open(struct net_device *dev) br_stp_enable_bridge(br); br_multicast_open(br); + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) + br_multicast_join_snoopers(br); + return 0; } @@ -188,6 +191,9 @@ static int br_dev_stop(struct net_device *dev) br_stp_disable_bridge(br); br_multicast_stop(br); + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) + br_multicast_leave_snoopers(br); + netif_stop_queue(dev); return 0; @@ -214,6 +220,7 @@ static void br_get_stats64(struct net_device *dev, sum.rx_packets += tmp.rx_packets; } + netdev_stats_to_stats64(stats, &dev->stats); stats->tx_bytes = sum.tx_bytes; stats->tx_packets = sum.tx_packets; stats->rx_bytes = sum.rx_bytes; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index ad12fe3fca8c..6276030f5854 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1007,7 +1007,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); if (skb_transport_offset(skb) + ipv6_transport_len(skb) < - nsrcs_offset + sizeof(_nsrcs)) + nsrcs_offset + sizeof(__nsrcs)) return -EINVAL; _nsrcs = skb_header_pointer(skb, nsrcs_offset, @@ -1647,25 +1647,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, } #if IS_ENABLED(CONFIG_IPV6) -static int br_ip6_multicast_mrd_rcv(struct net_bridge *br, - struct net_bridge_port *port, - struct sk_buff *skb) +static void br_ip6_multicast_mrd_rcv(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) { - int ret; - - if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) - return -ENOMSG; - - ret = ipv6_mc_check_icmpv6(skb); - if (ret < 0) - return ret; - if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV) - return -ENOMSG; + return; br_multicast_mark_router(br, port); - - return 0; } static int br_multicast_ipv6_rcv(struct net_bridge *br, @@ -1679,18 +1668,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, err = ipv6_mc_check_mld(skb); - if (err == -ENOMSG) { + if (err == -ENOMSG || err == -ENODATA) { if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) BR_INPUT_SKB_CB(skb)->mrouters_only = 1; - - if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) { - err = br_ip6_multicast_mrd_rcv(br, port, skb); - - if (err < 0 && err != -ENOMSG) { - br_multicast_err_count(br, port, skb->protocol); - return err; - } - } + if (err == -ENODATA && + ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) + br_ip6_multicast_mrd_rcv(br, port, skb); return 0; } else if (err < 0) { @@ -1848,7 +1831,7 @@ static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) } #endif -static void br_multicast_join_snoopers(struct net_bridge *br) +void br_multicast_join_snoopers(struct net_bridge *br) { br_ip4_multicast_join_snoopers(br); br_ip6_multicast_join_snoopers(br); @@ -1879,7 +1862,7 @@ static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) } #endif -static void br_multicast_leave_snoopers(struct net_bridge *br) +void br_multicast_leave_snoopers(struct net_bridge *br) { br_ip4_multicast_leave_snoopers(br); br_ip6_multicast_leave_snoopers(br); @@ -1898,9 +1881,6 @@ static void __br_multicast_open(struct net_bridge *br, void br_multicast_open(struct net_bridge *br) { - if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) - br_multicast_join_snoopers(br); - __br_multicast_open(br, &br->ip4_own_query); #if IS_ENABLED(CONFIG_IPV6) __br_multicast_open(br, &br->ip6_own_query); @@ -1916,9 +1896,6 @@ void br_multicast_stop(struct net_bridge *br) del_timer_sync(&br->ip6_other_query.timer); del_timer_sync(&br->ip6_own_query.timer); #endif - - if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) - br_multicast_leave_snoopers(br); } void br_multicast_dev_del(struct net_bridge *br) @@ -2049,6 +2026,7 @@ static void br_multicast_start_querier(struct net_bridge *br, int br_multicast_toggle(struct net_bridge *br, unsigned long val) { struct net_bridge_port *port; + bool change_snoopers = false; spin_lock_bh(&br->multicast_lock); if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) @@ -2057,7 +2035,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) br_mc_disabled_update(br->dev, val); br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { - br_multicast_leave_snoopers(br); + change_snoopers = true; goto unlock; } @@ -2068,9 +2046,30 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) list_for_each_entry(port, &br->port_list, list) __br_multicast_enable_port(port); + change_snoopers = true; + unlock: spin_unlock_bh(&br->multicast_lock); + /* br_multicast_join_snoopers has the potential to cause + * an MLD Report/Leave to be delivered to br_multicast_rcv, + * which would in turn call br_multicast_add_group, which would + * attempt to acquire multicast_lock. This function should be + * called after the lock has been released to avoid deadlocks on + * multicast_lock. + * + * br_multicast_leave_snoopers does not have the problem since + * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and + * returns without calling br_multicast_ipv4/6_rcv if it's not + * enabled. Moved both functions out just for symmetry. + */ + if (change_snoopers) { + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) + br_multicast_join_snoopers(br); + else + br_multicast_leave_snoopers(br); + } + return 0; } diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 59980ecfc962..2371b833b2bc 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -735,6 +735,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff mtu_reserved = nf_bridge_mtu_reduction(skb); mtu = skb->dev->mtu; + if (nf_bridge->pkt_otherhost) { + skb->pkt_type = PACKET_OTHERHOST; + nf_bridge->pkt_otherhost = false; + } + if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) mtu = nf_bridge->frag_max_size; @@ -835,8 +840,6 @@ static unsigned int br_nf_post_routing(void *priv, else return NF_ACCEPT; - /* We assume any code from br_dev_queue_push_xmit onwards doesn't care - * about the value of skb->pkt_type. */ if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->pkt_otherhost = true; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index ce2ab14ee605..7615c2210e0d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -208,8 +208,8 @@ struct net_bridge_port_group { struct rcu_head rcu; struct timer_list timer; struct br_ip addr; + unsigned char eth_addr[ETH_ALEN] __aligned(2); unsigned char flags; - unsigned char eth_addr[ETH_ALEN]; }; struct net_bridge_mdb_entry { @@ -665,6 +665,8 @@ void br_multicast_del_port(struct net_bridge_port *port); void br_multicast_enable_port(struct net_bridge_port *port); void br_multicast_disable_port(struct net_bridge_port *port); void br_multicast_init(struct net_bridge *br); +void br_multicast_join_snoopers(struct net_bridge *br); +void br_multicast_leave_snoopers(struct net_bridge *br); void br_multicast_open(struct net_bridge *br); void br_multicast_stop(struct net_bridge *br); void br_multicast_dev_del(struct net_bridge *br); @@ -792,6 +794,14 @@ static inline void br_multicast_init(struct net_bridge *br) { } +static inline void br_multicast_join_snoopers(struct net_bridge *br) +{ +} + +static inline void br_multicast_leave_snoopers(struct net_bridge *br) +{ +} + static inline void br_multicast_open(struct net_bridge *br) { } diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 7a59cdddd3ce..5047e9c2333a 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -55,9 +55,8 @@ static BRPORT_ATTR(_name, 0644, \ static int store_flag(struct net_bridge_port *p, unsigned long v, unsigned long mask) { - unsigned long flags; - - flags = p->flags; + unsigned long flags = p->flags; + int err; if (v) flags |= mask; @@ -65,6 +64,10 @@ static int store_flag(struct net_bridge_port *p, unsigned long v, flags &= ~mask; if (flags != p->flags) { + err = br_switchdev_set_port_flag(p, flags, mask); + if (err) + return err; + p->flags = flags; br_port_flags_change(p, mask); } diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index bb98984cd27d..9257292bd1ae 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -260,8 +260,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags, } masterv = br_vlan_get_master(br, v->vid, extack); - if (!masterv) + if (!masterv) { + err = -ENOMEM; goto out_filt; + } v->brvlan = masterv; if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) { v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats); @@ -1229,11 +1231,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v, } } -static int __br_vlan_get_pvid(const struct net_device *dev, - struct net_bridge_port *p, u16 *p_pvid) +int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) { struct net_bridge_vlan_group *vg; + struct net_bridge_port *p; + ASSERT_RTNL(); + p = br_port_get_check_rtnl(dev); if (p) vg = nbp_vlan_group(p); else if (netif_is_bridge_master(dev)) @@ -1244,18 +1248,23 @@ static int __br_vlan_get_pvid(const struct net_device *dev, *p_pvid = br_get_pvid(vg); return 0; } - -int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) -{ - ASSERT_RTNL(); - - return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid); -} EXPORT_SYMBOL_GPL(br_vlan_get_pvid); int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) { - return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid); + struct net_bridge_vlan_group *vg; + struct net_bridge_port *p; + + p = br_port_get_check_rcu(dev); + if (p) + vg = nbp_vlan_group_rcu(p); + else if (netif_is_bridge_master(dev)) + vg = br_vlan_group_rcu(netdev_priv(dev)); + else + return -EINVAL; + + *p_pvid = br_get_pvid(vg); + return 0; } EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu); diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c index 12a4f4d93681..3fda71a8579d 100644 --- a/net/bridge/netfilter/ebt_dnat.c +++ b/net/bridge/netfilter/ebt_dnat.c @@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct ebt_nat_info *info = par->targinfo; - if (skb_ensure_writable(skb, ETH_ALEN)) + if (skb_ensure_writable(skb, 0)) return EBT_DROP; ether_addr_copy(eth_hdr(skb)->h_dest, info->mac); diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c index 0cad62a4052b..307790562b49 100644 --- a/net/bridge/netfilter/ebt_redirect.c +++ b/net/bridge/netfilter/ebt_redirect.c @@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct ebt_redirect_info *info = par->targinfo; - if (skb_ensure_writable(skb, ETH_ALEN)) + if (skb_ensure_writable(skb, 0)) return EBT_DROP; if (xt_hooknum(par) != NF_BR_BROUTING) diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c index 27443bf229a3..7dfbcdfc30e5 100644 --- a/net/bridge/netfilter/ebt_snat.c +++ b/net/bridge/netfilter/ebt_snat.c @@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct ebt_nat_info *info = par->targinfo; - if (skb_ensure_writable(skb, ETH_ALEN * 2)) + if (skb_ensure_writable(skb, 0)) return EBT_DROP; ether_addr_copy(eth_hdr(skb)->h_source, info->mac); diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 66e7af165494..32bc2821027f 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net) &net->xt.broute_table); } +static void __net_exit broute_net_pre_exit(struct net *net) +{ + ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute); +} + static void __net_exit broute_net_exit(struct net *net) { - ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute); + ebt_unregister_table(net, net->xt.broute_table); } static struct pernet_operations broute_net_ops = { .init = broute_net_init, .exit = broute_net_exit, + .pre_exit = broute_net_pre_exit, }; static int __init ebtable_broute_init(void) diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 78cb9b21022d..bcf982e12f16 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net *net) &net->xt.frame_filter); } +static void __net_exit frame_filter_net_pre_exit(struct net *net) +{ + ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter); +} + static void __net_exit frame_filter_net_exit(struct net *net) { - ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter); + ebt_unregister_table(net, net->xt.frame_filter); } static struct pernet_operations frame_filter_net_ops = { .init = frame_filter_net_init, .exit = frame_filter_net_exit, + .pre_exit = frame_filter_net_pre_exit, }; static int __init ebtable_filter_init(void) diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 0888936ef853..0d092773f816 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net) &net->xt.frame_nat); } +static void __net_exit frame_nat_net_pre_exit(struct net *net) +{ + ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat); +} + static void __net_exit frame_nat_net_exit(struct net *net) { - ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat); + ebt_unregister_table(net, net->xt.frame_nat); } static struct pernet_operations frame_nat_net_ops = { .init = frame_nat_net_init, .exit = frame_nat_net_exit, + .pre_exit = frame_nat_net_pre_exit, }; static int __init ebtable_nat_init(void) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index e1256e03a9a8..d9375c52f50e 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1237,10 +1237,34 @@ out: return ret; } -void ebt_unregister_table(struct net *net, struct ebt_table *table, - const struct nf_hook_ops *ops) +static struct ebt_table *__ebt_find_table(struct net *net, const char *name) +{ + struct ebt_table *t; + + mutex_lock(&ebt_mutex); + + list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) { + if (strcmp(t->name, name) == 0) { + mutex_unlock(&ebt_mutex); + return t; + } + } + + mutex_unlock(&ebt_mutex); + return NULL; +} + +void ebt_unregister_table_pre_exit(struct net *net, const char *name, const struct nf_hook_ops *ops) +{ + struct ebt_table *table = __ebt_find_table(net, name); + + if (table) + nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); +} +EXPORT_SYMBOL(ebt_unregister_table_pre_exit); + +void ebt_unregister_table(struct net *net, struct ebt_table *table) { - nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ebt_unregister_table(net, table); } diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 809673222382..8d033a75a766 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb, static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, const struct nf_hook_state *state) { +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) u16 zone_id = NF_CT_DEFAULT_ZONE_ID; enum ip_conntrack_info ctinfo; struct br_input_skb_cb cb; @@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm)); - err = nf_ipv6_br_defrag(state->net, skb, - IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); + err = nf_ct_frag6_gather(state->net, skb, + IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); /* queued */ if (err == -EINPROGRESS) return NF_STOLEN; br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size); return err == 0 ? NF_ACCEPT : NF_DROP; +#else + return NF_ACCEPT; +#endif } static int nf_ct_br_ip_check(const struct sk_buff *skb) diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index b325b569e761..f48cf4cfb80f 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -31,6 +31,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); eth->h_proto = eth_hdr(oldskb)->h_proto; skb_pull(nskb, ETH_HLEN); + + if (skb_vlan_tag_present(oldskb)) { + u16 vid = skb_vlan_tag_get(oldskb); + + __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); + } } static int nft_bridge_iphdr_validate(struct sk_buff *skb) diff --git a/net/can/af_can.c b/net/can/af_can.c index 128d37a4c2e0..c758a12ffe46 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -304,8 +304,8 @@ static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net, struct net_device *dev) { if (dev) { - struct can_ml_priv *ml_priv = dev->ml_priv; - return &ml_priv->dev_rcv_lists; + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + return &can_ml->dev_rcv_lists; } else { return net->can.rx_alldev_list; } @@ -539,10 +539,13 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, /* Check for bugs in CAN protocol implementations using af_can.c: * 'rcv' will be NULL if no matching list item was found for removal. + * As this case may potentially happen when closing a socket while + * the notifier for removing the CAN netdev is running we just print + * a warning here. */ if (!rcv) { - WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n", - DNAME(dev), can_id, mask); + pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n", + DNAME(dev), can_id, mask); goto out; } @@ -675,16 +678,25 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU || - cfd->len > CAN_MAX_DLEN)) { - pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n", + if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) { + pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n", + dev->type, skb->len); + goto free_skb; + } + + /* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */ + if (unlikely(cfd->len > CAN_MAX_DLEN)) { + pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d, datalen %d\n", dev->type, skb->len, cfd->len); - kfree_skb(skb); - return NET_RX_DROP; + goto free_skb; } can_receive(skb, dev); return NET_RX_SUCCESS; + +free_skb: + kfree_skb(skb); + return NET_RX_DROP; } static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, @@ -692,16 +704,25 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU || - cfd->len > CANFD_MAX_DLEN)) { - pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n", + if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) { + pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n", + dev->type, skb->len); + goto free_skb; + } + + /* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */ + if (unlikely(cfd->len > CANFD_MAX_DLEN)) { + pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d, datalen %d\n", dev->type, skb->len, cfd->len); - kfree_skb(skb); - return NET_RX_DROP; + goto free_skb; } can_receive(skb, dev); return NET_RX_SUCCESS; + +free_skb: + kfree_skb(skb); + return NET_RX_DROP; } /* af_can protocol functions */ @@ -767,25 +788,6 @@ void can_proto_unregister(const struct can_proto *cp) } EXPORT_SYMBOL(can_proto_unregister); -/* af_can notifier to create/remove CAN netdevice specific structs */ -static int can_notifier(struct notifier_block *nb, unsigned long msg, - void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - if (dev->type != ARPHRD_CAN) - return NOTIFY_DONE; - - switch (msg) { - case NETDEV_REGISTER: - WARN(!dev->ml_priv, - "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n"); - break; - } - - return NOTIFY_DONE; -} - static int can_pernet_init(struct net *net) { spin_lock_init(&net->can.rcvlists_lock); @@ -853,11 +855,6 @@ static const struct net_proto_family can_family_ops = { .owner = THIS_MODULE, }; -/* notifier block for netdevice event */ -static struct notifier_block can_netdev_notifier __read_mostly = { - .notifier_call = can_notifier, -}; - static struct pernet_operations can_pernet_ops __read_mostly = { .init = can_pernet_init, .exit = can_pernet_exit, @@ -888,17 +885,12 @@ static __init int can_init(void) err = sock_register(&can_family_ops); if (err) goto out_sock; - err = register_netdevice_notifier(&can_netdev_notifier); - if (err) - goto out_notifier; dev_add_pack(&can_packet); dev_add_pack(&canfd_packet); return 0; -out_notifier: - sock_unregister(PF_CAN); out_sock: unregister_pernet_subsys(&can_pernet_ops); out_pernet: @@ -912,7 +904,6 @@ static __exit void can_exit(void) /* protocol unregister */ dev_remove_pack(&canfd_packet); dev_remove_pack(&can_packet); - unregister_netdevice_notifier(&can_netdev_notifier); sock_unregister(PF_CAN); unregister_pernet_subsys(&can_pernet_ops); diff --git a/net/can/bcm.c b/net/can/bcm.c index c96fa0f33db3..af81804f8e46 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -88,6 +88,8 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS("can-proto-2"); +#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) + /* * easy access to the first 64 bit of can(fd)_frame payload. cp->data is * 64 bit aligned so the offset has to be multiples of 8 which is ensured @@ -778,6 +780,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, bcm_rx_handler, op); list_del(&op->list); + synchronize_rcu(); bcm_remove_op(op); return 1; /* done */ } @@ -1294,7 +1297,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* no bound device as default => check msg_name */ DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); - if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (msg->msg_namelen < BCM_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) @@ -1503,9 +1506,13 @@ static int bcm_release(struct socket *sock) REGMASK(op->can_id), bcm_rx_handler, op); - bcm_remove_op(op); } + synchronize_rcu(); + + list_for_each_entry_safe(op, next, &bo->rx_ops, list) + bcm_remove_op(op); + #if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ if (net->can.bcmproc_dir && bo->bcm_proc_read) @@ -1536,7 +1543,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, struct net *net = sock_net(sk); int ret = 0; - if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (len < BCM_MIN_NAMELEN) return -EINVAL; lock_sock(sk); @@ -1618,8 +1625,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { - __sockaddr_check_size(sizeof(struct sockaddr_can)); - msg->msg_namelen = sizeof(struct sockaddr_can); + __sockaddr_check_size(BCM_MIN_NAMELEN); + msg->msg_namelen = BCM_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c index 137054bff9ec..e52330f628c9 100644 --- a/net/can/j1939/main.c +++ b/net/can/j1939/main.c @@ -140,9 +140,9 @@ static struct j1939_priv *j1939_priv_create(struct net_device *ndev) static inline void j1939_priv_set(struct net_device *ndev, struct j1939_priv *priv) { - struct can_ml_priv *can_ml_priv = ndev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(ndev); - can_ml_priv->j1939_priv = priv; + can_ml->j1939_priv = priv; } static void __j1939_priv_release(struct kref *kref) @@ -211,12 +211,9 @@ static void __j1939_rx_release(struct kref *kref) /* get pointer to priv without increasing ref counter */ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev) { - struct can_ml_priv *can_ml_priv = ndev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(ndev); - if (!can_ml_priv) - return NULL; - - return can_ml_priv->j1939_priv; + return can_ml->j1939_priv; } static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev) @@ -225,9 +222,6 @@ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev) lockdep_assert_held(&j1939_netdev_lock); - if (ndev->type != ARPHRD_CAN) - return NULL; - priv = j1939_ndev_to_priv(ndev); if (priv) j1939_priv_get(priv); @@ -348,15 +342,16 @@ static int j1939_netdev_notify(struct notifier_block *nb, unsigned long msg, void *data) { struct net_device *ndev = netdev_notifier_info_to_dev(data); + struct can_ml_priv *can_ml = can_get_ml_priv(ndev); struct j1939_priv *priv; + if (!can_ml) + goto notify_done; + priv = j1939_priv_get_by_ndev(ndev); if (!priv) goto notify_done; - if (ndev->type != ARPHRD_CAN) - goto notify_put; - switch (msg) { case NETDEV_DOWN: j1939_cancel_active_session(priv, NULL); @@ -365,7 +360,6 @@ static int j1939_netdev_notify(struct notifier_block *nb, break; } -notify_put: j1939_priv_put(priv); notify_done: diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index f7587428febd..d57475c8ba07 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/can/can-ml.h> #include <linux/can/core.h> #include <linux/can/skb.h> #include <linux/errqueue.h> @@ -398,6 +399,7 @@ static int j1939_sk_init(struct sock *sk) spin_lock_init(&jsk->sk_session_queue_lock); INIT_LIST_HEAD(&jsk->sk_session_queue); sk->sk_destruct = j1939_sk_sock_destruct; + sk->sk_protocol = CAN_J1939; return 0; } @@ -452,6 +454,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) j1939_jsk_del(priv, jsk); j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa); } else { + struct can_ml_priv *can_ml; struct net_device *ndev; ndev = dev_get_by_index(net, addr->can_ifindex); @@ -460,12 +463,19 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out_release_sock; } - if (ndev->type != ARPHRD_CAN) { + can_ml = can_get_ml_priv(ndev); + if (!can_ml) { dev_put(ndev); ret = -ENODEV; goto out_release_sock; } + if (!(ndev->flags & IFF_UP)) { + dev_put(ndev); + ret = -ENETDOWN; + goto out_release_sock; + } + priv = j1939_netdev_start(ndev); dev_put(ndev); if (IS_ERR(priv)) { @@ -553,6 +563,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr, const struct j1939_sock *jsk, int peer) { + /* There are two holes (2 bytes and 3 bytes) to clear to avoid + * leaking kernel information to user space. + */ + memset(addr, 0, J1939_MIN_NAMELEN); + addr->can_family = AF_CAN; addr->can_ifindex = jsk->ifindex; addr->can_addr.j1939.pgn = jsk->addr.pgn; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 9f99af5b0b11..916fdf2464bc 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session, skb_queue_tail(&session->skb_queue, skb); } -static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +static struct +sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session, + unsigned int offset_start) { struct j1939_priv *priv = session->priv; + struct j1939_sk_buff_cb *do_skcb; struct sk_buff *skb = NULL; struct sk_buff *do_skb; - struct j1939_sk_buff_cb *do_skcb; - unsigned int offset_start; unsigned long flags; - offset_start = session->pkt.dpo * 7; - spin_lock_irqsave(&session->skb_queue.lock, flags); skb_queue_walk(&session->skb_queue, do_skb) { do_skcb = j1939_skb_to_cb(do_skb); @@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) return skb; } +static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +{ + unsigned int offset_start; + + offset_start = session->pkt.dpo * 7; + return j1939_session_skb_find_by_offset(session, offset_start); +} + /* see if we are receiver * returns 0 for broadcasts, although we will receive them */ @@ -573,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv, skb->dev = priv->ndev; can_skb_reserve(skb); can_skb_prv(skb)->ifindex = priv->ndev->ifindex; + can_skb_prv(skb)->skbcnt = 0; /* reserve CAN header */ skb_reserve(skb, offsetof(struct can_frame, data)); @@ -716,10 +724,12 @@ static int j1939_session_tx_rts(struct j1939_session *session) return ret; session->last_txcmd = dat[0]; - if (dat[0] == J1939_TP_CMD_BAM) + if (dat[0] == J1939_TP_CMD_BAM) { j1939_tp_schedule_txtimer(session, 50); - - j1939_tp_set_rxtimeout(session, 1250); + j1939_tp_set_rxtimeout(session, 250); + } else { + j1939_tp_set_rxtimeout(session, 1250); + } netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); @@ -766,7 +776,7 @@ static int j1939_session_tx_dat(struct j1939_session *session) int ret = 0; u8 dat[8]; - se_skb = j1939_session_skb_find(session); + se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7); if (!se_skb) return -ENOBUFS; @@ -787,6 +797,18 @@ static int j1939_session_tx_dat(struct j1939_session *session) if (len > 7) len = 7; + if (offset + len > se_skb->len) { + netdev_err_once(priv->ndev, + "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n", + __func__, session, skcb->offset, se_skb->len , session->pkt.tx); + return -EOVERFLOW; + } + + if (!len) { + ret = -ENOBUFS; + break; + } + memcpy(&dat[1], &tpdat[offset], len); ret = j1939_tp_tx_dat(session, dat, len + 1); if (ret < 0) { @@ -1055,9 +1077,9 @@ static void __j1939_session_cancel(struct j1939_session *session, lockdep_assert_held(&session->priv->active_session_list_lock); session->err = j1939_xtp_abort_to_errno(priv, err); + session->state = J1939_SESSION_WAITING_ABORT; /* do not send aborts on incoming broadcasts */ if (!j1939_cb_is_broadcast(&session->skcb)) { - session->state = J1939_SESSION_WAITING_ABORT; j1939_xtp_tx_abort(priv, &session->skcb, !session->transmission, err, session->skcb.addr.pgn); @@ -1120,6 +1142,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) * cleanup including propagation of the error to user space. */ break; + case -EOVERFLOW: + j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG); + break; case 0: session->tx_retry = 0; break; @@ -1463,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv, skb->dev = priv->ndev; can_skb_reserve(skb); can_skb_prv(skb)->ifindex = priv->ndev->ifindex; + can_skb_prv(skb)->skbcnt = 0; skcb = j1939_skb_to_cb(skb); memcpy(skcb, rel_skcb, sizeof(*skcb)); @@ -1651,8 +1677,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, return; } session = j1939_xtp_rx_rts_session_new(priv, skb); - if (!session) + if (!session) { + if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb)) + netdev_info(priv->ndev, "%s: failed to create TP BAM session\n", + __func__); return; + } } else { if (j1939_xtp_rx_rts_session_active(session, skb)) { j1939_session_put(session); @@ -1661,11 +1691,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, } session->last_cmd = cmd; - j1939_tp_set_rxtimeout(session, 1250); - - if (cmd != J1939_TP_CMD_BAM && !session->transmission) { - j1939_session_txtimer_cancel(session); - j1939_tp_schedule_txtimer(session, 0); + if (cmd == J1939_TP_CMD_BAM) { + if (!session->transmission) + j1939_tp_set_rxtimeout(session, 750); + } else { + if (!session->transmission) { + j1939_session_txtimer_cancel(session); + j1939_tp_schedule_txtimer(session, 0); + } + j1939_tp_set_rxtimeout(session, 1250); } j1939_session_put(session); @@ -1716,6 +1750,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, int offset; int nbytes; bool final = false; + bool remain = false; bool do_cts_eoma = false; int packet; @@ -1750,7 +1785,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, __func__, session); goto out_session_cancel; } - se_skb = j1939_session_skb_find(session); + + se_skb = j1939_session_skb_find_by_offset(session, packet * 7); if (!se_skb) { netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__, session); @@ -1769,7 +1805,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, } tpdat = se_skb->data; - memcpy(&tpdat[offset], &dat[1], nbytes); + if (!session->transmission) { + memcpy(&tpdat[offset], &dat[1], nbytes); + } else { + int err; + + err = memcmp(&tpdat[offset], &dat[1], nbytes); + if (err) + netdev_err_once(priv->ndev, + "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n", + __func__, session, + nbytes, &dat[1], + nbytes, &tpdat[offset]); + } + if (packet == session->pkt.rx) session->pkt.rx++; @@ -1777,6 +1826,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, j1939_cb_is_broadcast(&session->skcb)) { if (session->pkt.rx >= session->pkt.total) final = true; + else + remain = true; } else { /* never final, an EOMA must follow */ if (session->pkt.rx >= session->pkt.last) @@ -1784,7 +1835,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, } if (final) { + j1939_session_timers_cancel(session); j1939_session_completed(session); + } else if (remain) { + if (!session->transmission) + j1939_tp_set_rxtimeout(session, 750); } else if (do_cts_eoma) { j1939_tp_set_rxtimeout(session, 1250); if (!session->transmission) @@ -1829,6 +1884,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb) else j1939_xtp_rx_dat_one(session, skb); } + + if (j1939_cb_is_broadcast(skcb)) { + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + false); + if (session) + j1939_xtp_rx_dat_one(session, skb); + } } /* j1939 main intf */ @@ -1920,7 +1982,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) if (j1939_tp_im_transmitter(skcb)) j1939_xtp_rx_rts(priv, skb, true); - if (j1939_tp_im_receiver(skcb)) + if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb)) j1939_xtp_rx_rts(priv, skb, false); break; @@ -1984,7 +2046,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb) { struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); - if (!j1939_tp_im_involved_anydir(skcb)) + if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb)) return 0; switch (skcb->addr.pgn) { @@ -2017,6 +2079,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb) if (!skb->sk) return; + if (skb->sk->sk_family != AF_CAN || + skb->sk->sk_protocol != CAN_J1939) + return; + j1939_session_list_lock(priv); session = j1939_session_get_simple(priv, skb); j1939_session_list_unlock(priv); diff --git a/net/can/proc.c b/net/can/proc.c index e6881bfc3ed1..a5fc63c78370 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -329,8 +329,11 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v) /* receive list for registered CAN devices */ for_each_netdev_rcu(net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv) - can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv); + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + + if (can_ml) + can_rcvlist_proc_show_one(m, idx, dev, + &can_ml->dev_rcv_lists); } rcu_read_unlock(); @@ -382,8 +385,10 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) /* sff receive list for registered CAN devices */ for_each_netdev_rcu(net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv) { - dev_rcv_lists = dev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + + if (can_ml) { + dev_rcv_lists = &can_ml->dev_rcv_lists; can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff, ARRAY_SIZE(dev_rcv_lists->rx_sff)); } @@ -413,8 +418,10 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v) /* eff receive list for registered CAN devices */ for_each_netdev_rcu(net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv) { - dev_rcv_lists = dev->ml_priv; + struct can_ml_priv *can_ml = can_get_ml_priv(dev); + + if (can_ml) { + dev_rcv_lists = &can_ml->dev_rcv_lists; can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff, ARRAY_SIZE(dev_rcv_lists->rx_eff)); } @@ -471,6 +478,9 @@ void can_init_proc(struct net *net) */ void can_remove_proc(struct net *net) { + if (!net->can.proc_dir) + return; + if (net->can.pde_version) remove_proc_entry(CAN_PROC_VERSION, net->can.proc_dir); @@ -498,6 +508,5 @@ void can_remove_proc(struct net *net) if (net->can.pde_rcvlist_sff) remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir); - if (net->can.proc_dir) - remove_proc_entry("can", net->proc_net); + remove_proc_entry("can", net->proc_net); } diff --git a/net/can/raw.c b/net/can/raw.c index 59c039d73c6d..af513d0957c7 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -62,6 +62,8 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); MODULE_ALIAS("can-proto-1"); +#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) + #define MASK_ALL 0 /* A raw socket has a list of can_filters attached to it, each receiving @@ -396,7 +398,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) int err = 0; int notify_enetdown = 0; - if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (len < RAW_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) return -EINVAL; @@ -477,11 +479,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr, if (peer) return -EOPNOTSUPP; - memset(addr, 0, sizeof(*addr)); + memset(addr, 0, RAW_MIN_NAMELEN); addr->can_family = AF_CAN; addr->can_ifindex = ro->ifindex; - return sizeof(*addr); + return RAW_MIN_NAMELEN; } static int raw_setsockopt(struct socket *sock, int level, int optname, @@ -733,7 +735,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); - if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (msg->msg_namelen < RAW_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) @@ -822,8 +824,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { - __sockaddr_check_size(sizeof(struct sockaddr_can)); - msg->msg_namelen = sizeof(struct sockaddr_can); + __sockaddr_check_size(RAW_MIN_NAMELEN); + msg->msg_namelen = RAW_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 9a8de6ef451a..8e9964b7ec6b 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -810,6 +810,7 @@ static struct ctl_table ceph_table[] = { .mode = 0644, .proc_handler = proc_douintvec_minmax, }, + {} }; static struct ctl_table ceph_dir_table[] = { diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 4f956fabe462..1554d84f0527 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -3013,6 +3013,11 @@ static void con_fault(struct ceph_connection *con) ceph_msg_put(con->in_msg); con->in_msg = NULL; } + if (con->out_msg) { + BUG_ON(con->out_msg->con != con); + ceph_msg_put(con->out_msg); + con->out_msg = NULL; + } /* Requeue anything that hasn't been acked */ list_splice_init(&con->out_sent, &con->out_queue); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 76aa45a74439..e2f938f3a1b1 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -456,6 +456,7 @@ static void target_copy(struct ceph_osd_request_target *dest, dest->size = src->size; dest->min_size = src->min_size; dest->sort_bitwise = src->sort_bitwise; + dest->recovery_deletes = src->recovery_deletes; dest->flags = src->flags; dest->paused = src->paused; @@ -3683,7 +3684,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) * supported. */ req->r_t.target_oloc.pool = m.redirect.oloc.pool; - req->r_flags |= CEPH_OSD_FLAG_REDIRECTED; + req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | + CEPH_OSD_FLAG_IGNORE_OVERLAY | + CEPH_OSD_FLAG_IGNORE_CACHE; req->r_tid = 0; __submit_request(req, false); goto out_unlock_osdc; diff --git a/net/compat.c b/net/compat.c index 0f7ded26059e..c848bcb517f3 100644 --- a/net/compat.c +++ b/net/compat.c @@ -291,6 +291,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) break; } /* Bump the usage count and install the file. */ + __receive_sock(fp[i]); fd_install(new_fd, get_file(fp[i])); } diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 0147b26f585a..5c64e4627862 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -6,95 +6,15 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/bpf.h> +#include <linux/btf_ids.h> +#include <linux/bpf_local_storage.h> #include <net/bpf_sk_storage.h> #include <net/sock.h> +#include <uapi/linux/sock_diag.h> #include <uapi/linux/btf.h> +#include <linux/btf_ids.h> -static atomic_t cache_idx; - -#define SK_STORAGE_CREATE_FLAG_MASK \ - (BPF_F_NO_PREALLOC | BPF_F_CLONE) - -struct bucket { - struct hlist_head list; - raw_spinlock_t lock; -}; - -/* Thp map is not the primary owner of a bpf_sk_storage_elem. - * Instead, the sk->sk_bpf_storage is. - * - * The map (bpf_sk_storage_map) is for two purposes - * 1. Define the size of the "sk local storage". It is - * the map's value_size. - * - * 2. Maintain a list to keep track of all elems such - * that they can be cleaned up during the map destruction. - * - * When a bpf local storage is being looked up for a - * particular sk, the "bpf_map" pointer is actually used - * as the "key" to search in the list of elem in - * sk->sk_bpf_storage. - * - * Hence, consider sk->sk_bpf_storage is the mini-map - * with the "bpf_map" pointer as the searching key. - */ -struct bpf_sk_storage_map { - struct bpf_map map; - /* Lookup elem does not require accessing the map. - * - * Updating/Deleting requires a bucket lock to - * link/unlink the elem from the map. Having - * multiple buckets to improve contention. - */ - struct bucket *buckets; - u32 bucket_log; - u16 elem_size; - u16 cache_idx; -}; - -struct bpf_sk_storage_data { - /* smap is used as the searching key when looking up - * from sk->sk_bpf_storage. - * - * Put it in the same cacheline as the data to minimize - * the number of cachelines access during the cache hit case. - */ - struct bpf_sk_storage_map __rcu *smap; - u8 data[0] __aligned(8); -}; - -/* Linked to bpf_sk_storage and bpf_sk_storage_map */ -struct bpf_sk_storage_elem { - struct hlist_node map_node; /* Linked to bpf_sk_storage_map */ - struct hlist_node snode; /* Linked to bpf_sk_storage */ - struct bpf_sk_storage __rcu *sk_storage; - struct rcu_head rcu; - /* 8 bytes hole */ - /* The data is stored in aother cacheline to minimize - * the number of cachelines access during a cache hit. - */ - struct bpf_sk_storage_data sdata ____cacheline_aligned; -}; - -#define SELEM(_SDATA) container_of((_SDATA), struct bpf_sk_storage_elem, sdata) -#define SDATA(_SELEM) (&(_SELEM)->sdata) -#define BPF_SK_STORAGE_CACHE_SIZE 16 - -struct bpf_sk_storage { - struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE]; - struct hlist_head list; /* List of bpf_sk_storage_elem */ - struct sock *sk; /* The sk that owns the the above "list" of - * bpf_sk_storage_elem. - */ - struct rcu_head rcu; - raw_spinlock_t lock; /* Protect adding/removing from the "list" */ -}; - -static struct bucket *select_bucket(struct bpf_sk_storage_map *smap, - struct bpf_sk_storage_elem *selem) -{ - return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; -} +DEFINE_BPF_STORAGE_CACHE(sk_cache); static int omem_charge(struct sock *sk, unsigned int size) { @@ -108,405 +28,29 @@ static int omem_charge(struct sock *sk, unsigned int size) return -ENOMEM; } -static bool selem_linked_to_sk(const struct bpf_sk_storage_elem *selem) -{ - return !hlist_unhashed(&selem->snode); -} - -static bool selem_linked_to_map(const struct bpf_sk_storage_elem *selem) -{ - return !hlist_unhashed(&selem->map_node); -} - -static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap, - struct sock *sk, void *value, - bool charge_omem) -{ - struct bpf_sk_storage_elem *selem; - - if (charge_omem && omem_charge(sk, smap->elem_size)) - return NULL; - - selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN); - if (selem) { - if (value) - memcpy(SDATA(selem)->data, value, smap->map.value_size); - return selem; - } - - if (charge_omem) - atomic_sub(smap->elem_size, &sk->sk_omem_alloc); - - return NULL; -} - -/* sk_storage->lock must be held and selem->sk_storage == sk_storage. - * The caller must ensure selem->smap is still valid to be - * dereferenced for its smap->elem_size and smap->cache_idx. - */ -static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage, - struct bpf_sk_storage_elem *selem, - bool uncharge_omem) -{ - struct bpf_sk_storage_map *smap; - bool free_sk_storage; - struct sock *sk; - - smap = rcu_dereference(SDATA(selem)->smap); - sk = sk_storage->sk; - - /* All uncharging on sk->sk_omem_alloc must be done first. - * sk may be freed once the last selem is unlinked from sk_storage. - */ - if (uncharge_omem) - atomic_sub(smap->elem_size, &sk->sk_omem_alloc); - - free_sk_storage = hlist_is_singular_node(&selem->snode, - &sk_storage->list); - if (free_sk_storage) { - atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc); - sk_storage->sk = NULL; - /* After this RCU_INIT, sk may be freed and cannot be used */ - RCU_INIT_POINTER(sk->sk_bpf_storage, NULL); - - /* sk_storage is not freed now. sk_storage->lock is - * still held and raw_spin_unlock_bh(&sk_storage->lock) - * will be done by the caller. - * - * Although the unlock will be done under - * rcu_read_lock(), it is more intutivie to - * read if kfree_rcu(sk_storage, rcu) is done - * after the raw_spin_unlock_bh(&sk_storage->lock). - * - * Hence, a "bool free_sk_storage" is returned - * to the caller which then calls the kfree_rcu() - * after unlock. - */ - } - hlist_del_init_rcu(&selem->snode); - if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) == - SDATA(selem)) - RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL); - - kfree_rcu(selem, rcu); - - return free_sk_storage; -} - -static void selem_unlink_sk(struct bpf_sk_storage_elem *selem) -{ - struct bpf_sk_storage *sk_storage; - bool free_sk_storage = false; - - if (unlikely(!selem_linked_to_sk(selem))) - /* selem has already been unlinked from sk */ - return; - - sk_storage = rcu_dereference(selem->sk_storage); - raw_spin_lock_bh(&sk_storage->lock); - if (likely(selem_linked_to_sk(selem))) - free_sk_storage = __selem_unlink_sk(sk_storage, selem, true); - raw_spin_unlock_bh(&sk_storage->lock); - - if (free_sk_storage) - kfree_rcu(sk_storage, rcu); -} - -static void __selem_link_sk(struct bpf_sk_storage *sk_storage, - struct bpf_sk_storage_elem *selem) -{ - RCU_INIT_POINTER(selem->sk_storage, sk_storage); - hlist_add_head(&selem->snode, &sk_storage->list); -} - -static void selem_unlink_map(struct bpf_sk_storage_elem *selem) -{ - struct bpf_sk_storage_map *smap; - struct bucket *b; - - if (unlikely(!selem_linked_to_map(selem))) - /* selem has already be unlinked from smap */ - return; - - smap = rcu_dereference(SDATA(selem)->smap); - b = select_bucket(smap, selem); - raw_spin_lock_bh(&b->lock); - if (likely(selem_linked_to_map(selem))) - hlist_del_init_rcu(&selem->map_node); - raw_spin_unlock_bh(&b->lock); -} - -static void selem_link_map(struct bpf_sk_storage_map *smap, - struct bpf_sk_storage_elem *selem) -{ - struct bucket *b = select_bucket(smap, selem); - - raw_spin_lock_bh(&b->lock); - RCU_INIT_POINTER(SDATA(selem)->smap, smap); - hlist_add_head_rcu(&selem->map_node, &b->list); - raw_spin_unlock_bh(&b->lock); -} - -static void selem_unlink(struct bpf_sk_storage_elem *selem) -{ - /* Always unlink from map before unlinking from sk_storage - * because selem will be freed after successfully unlinked from - * the sk_storage. - */ - selem_unlink_map(selem); - selem_unlink_sk(selem); -} - -static struct bpf_sk_storage_data * -__sk_storage_lookup(struct bpf_sk_storage *sk_storage, - struct bpf_sk_storage_map *smap, - bool cacheit_lockit) -{ - struct bpf_sk_storage_data *sdata; - struct bpf_sk_storage_elem *selem; - - /* Fast path (cache hit) */ - sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]); - if (sdata && rcu_access_pointer(sdata->smap) == smap) - return sdata; - - /* Slow path (cache miss) */ - hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) - if (rcu_access_pointer(SDATA(selem)->smap) == smap) - break; - - if (!selem) - return NULL; - - sdata = SDATA(selem); - if (cacheit_lockit) { - /* spinlock is needed to avoid racing with the - * parallel delete. Otherwise, publishing an already - * deleted sdata to the cache will become a use-after-free - * problem in the next __sk_storage_lookup(). - */ - raw_spin_lock_bh(&sk_storage->lock); - if (selem_linked_to_sk(selem)) - rcu_assign_pointer(sk_storage->cache[smap->cache_idx], - sdata); - raw_spin_unlock_bh(&sk_storage->lock); - } - - return sdata; -} - -static struct bpf_sk_storage_data * +static struct bpf_local_storage_data * sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit) { - struct bpf_sk_storage *sk_storage; - struct bpf_sk_storage_map *smap; + struct bpf_local_storage *sk_storage; + struct bpf_local_storage_map *smap; sk_storage = rcu_dereference(sk->sk_bpf_storage); if (!sk_storage) return NULL; - smap = (struct bpf_sk_storage_map *)map; - return __sk_storage_lookup(sk_storage, smap, cacheit_lockit); -} - -static int check_flags(const struct bpf_sk_storage_data *old_sdata, - u64 map_flags) -{ - if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) - /* elem already exists */ - return -EEXIST; - - if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) - /* elem doesn't exist, cannot update it */ - return -ENOENT; - - return 0; -} - -static int sk_storage_alloc(struct sock *sk, - struct bpf_sk_storage_map *smap, - struct bpf_sk_storage_elem *first_selem) -{ - struct bpf_sk_storage *prev_sk_storage, *sk_storage; - int err; - - err = omem_charge(sk, sizeof(*sk_storage)); - if (err) - return err; - - sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN); - if (!sk_storage) { - err = -ENOMEM; - goto uncharge; - } - INIT_HLIST_HEAD(&sk_storage->list); - raw_spin_lock_init(&sk_storage->lock); - sk_storage->sk = sk; - - __selem_link_sk(sk_storage, first_selem); - selem_link_map(smap, first_selem); - /* Publish sk_storage to sk. sk->sk_lock cannot be acquired. - * Hence, atomic ops is used to set sk->sk_bpf_storage - * from NULL to the newly allocated sk_storage ptr. - * - * From now on, the sk->sk_bpf_storage pointer is protected - * by the sk_storage->lock. Hence, when freeing - * the sk->sk_bpf_storage, the sk_storage->lock must - * be held before setting sk->sk_bpf_storage to NULL. - */ - prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage, - NULL, sk_storage); - if (unlikely(prev_sk_storage)) { - selem_unlink_map(first_selem); - err = -EAGAIN; - goto uncharge; - - /* Note that even first_selem was linked to smap's - * bucket->list, first_selem can be freed immediately - * (instead of kfree_rcu) because - * bpf_sk_storage_map_free() does a - * synchronize_rcu() before walking the bucket->list. - * Hence, no one is accessing selem from the - * bucket->list under rcu_read_lock(). - */ - } - - return 0; - -uncharge: - kfree(sk_storage); - atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc); - return err; -} - -/* sk cannot be going away because it is linking new elem - * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0). - * Otherwise, it will become a leak (and other memory issues - * during map destruction). - */ -static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk, - struct bpf_map *map, - void *value, - u64 map_flags) -{ - struct bpf_sk_storage_data *old_sdata = NULL; - struct bpf_sk_storage_elem *selem; - struct bpf_sk_storage *sk_storage; - struct bpf_sk_storage_map *smap; - int err; - - /* BPF_EXIST and BPF_NOEXIST cannot be both set */ - if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) || - /* BPF_F_LOCK can only be used in a value with spin_lock */ - unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map))) - return ERR_PTR(-EINVAL); - - smap = (struct bpf_sk_storage_map *)map; - sk_storage = rcu_dereference(sk->sk_bpf_storage); - if (!sk_storage || hlist_empty(&sk_storage->list)) { - /* Very first elem for this sk */ - err = check_flags(NULL, map_flags); - if (err) - return ERR_PTR(err); - - selem = selem_alloc(smap, sk, value, true); - if (!selem) - return ERR_PTR(-ENOMEM); - - err = sk_storage_alloc(sk, smap, selem); - if (err) { - kfree(selem); - atomic_sub(smap->elem_size, &sk->sk_omem_alloc); - return ERR_PTR(err); - } - - return SDATA(selem); - } - - if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) { - /* Hoping to find an old_sdata to do inline update - * such that it can avoid taking the sk_storage->lock - * and changing the lists. - */ - old_sdata = __sk_storage_lookup(sk_storage, smap, false); - err = check_flags(old_sdata, map_flags); - if (err) - return ERR_PTR(err); - if (old_sdata && selem_linked_to_sk(SELEM(old_sdata))) { - copy_map_value_locked(map, old_sdata->data, - value, false); - return old_sdata; - } - } - - raw_spin_lock_bh(&sk_storage->lock); - - /* Recheck sk_storage->list under sk_storage->lock */ - if (unlikely(hlist_empty(&sk_storage->list))) { - /* A parallel del is happening and sk_storage is going - * away. It has just been checked before, so very - * unlikely. Return instead of retry to keep things - * simple. - */ - err = -EAGAIN; - goto unlock_err; - } - - old_sdata = __sk_storage_lookup(sk_storage, smap, false); - err = check_flags(old_sdata, map_flags); - if (err) - goto unlock_err; - - if (old_sdata && (map_flags & BPF_F_LOCK)) { - copy_map_value_locked(map, old_sdata->data, value, false); - selem = SELEM(old_sdata); - goto unlock; - } - - /* sk_storage->lock is held. Hence, we are sure - * we can unlink and uncharge the old_sdata successfully - * later. Hence, instead of charging the new selem now - * and then uncharge the old selem later (which may cause - * a potential but unnecessary charge failure), avoid taking - * a charge at all here (the "!old_sdata" check) and the - * old_sdata will not be uncharged later during __selem_unlink_sk(). - */ - selem = selem_alloc(smap, sk, value, !old_sdata); - if (!selem) { - err = -ENOMEM; - goto unlock_err; - } - - /* First, link the new selem to the map */ - selem_link_map(smap, selem); - - /* Second, link (and publish) the new selem to sk_storage */ - __selem_link_sk(sk_storage, selem); - - /* Third, remove old selem, SELEM(old_sdata) */ - if (old_sdata) { - selem_unlink_map(SELEM(old_sdata)); - __selem_unlink_sk(sk_storage, SELEM(old_sdata), false); - } - -unlock: - raw_spin_unlock_bh(&sk_storage->lock); - return SDATA(selem); - -unlock_err: - raw_spin_unlock_bh(&sk_storage->lock); - return ERR_PTR(err); + smap = (struct bpf_local_storage_map *)map; + return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit); } static int sk_storage_delete(struct sock *sk, struct bpf_map *map) { - struct bpf_sk_storage_data *sdata; + struct bpf_local_storage_data *sdata; sdata = sk_storage_lookup(sk, map, false); if (!sdata) return -ENOENT; - selem_unlink(SELEM(sdata)); + bpf_selem_unlink(SELEM(sdata)); return 0; } @@ -514,8 +58,8 @@ static int sk_storage_delete(struct sock *sk, struct bpf_map *map) /* Called by __sk_destruct() & bpf_sk_storage_clone() */ void bpf_sk_storage_free(struct sock *sk) { - struct bpf_sk_storage_elem *selem; - struct bpf_sk_storage *sk_storage; + struct bpf_local_storage_elem *selem; + struct bpf_local_storage *sk_storage; bool free_sk_storage = false; struct hlist_node *n; @@ -531,7 +75,7 @@ void bpf_sk_storage_free(struct sock *sk) * Thus, no elem can be added-to or deleted-from the * sk_storage->list by the bpf_prog or by the bpf-map's syscall. * - * It is racing with bpf_sk_storage_map_free() alone + * It is racing with bpf_local_storage_map_free() alone * when unlinking elem from the sk_storage->list and * the map's bucket->list. */ @@ -540,8 +84,9 @@ void bpf_sk_storage_free(struct sock *sk) /* Always unlink from map before unlinking from * sk_storage. */ - selem_unlink_map(selem); - free_sk_storage = __selem_unlink_sk(sk_storage, selem, true); + bpf_selem_unlink_map(selem); + free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage, + selem, true); } raw_spin_unlock_bh(&sk_storage->lock); rcu_read_unlock(); @@ -550,128 +95,24 @@ void bpf_sk_storage_free(struct sock *sk) kfree_rcu(sk_storage, rcu); } -static void bpf_sk_storage_map_free(struct bpf_map *map) +static void sk_storage_map_free(struct bpf_map *map) { - struct bpf_sk_storage_elem *selem; - struct bpf_sk_storage_map *smap; - struct bucket *b; - unsigned int i; + struct bpf_local_storage_map *smap; - smap = (struct bpf_sk_storage_map *)map; - - /* Note that this map might be concurrently cloned from - * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone - * RCU read section to finish before proceeding. New RCU - * read sections should be prevented via bpf_map_inc_not_zero. - */ - synchronize_rcu(); - - /* bpf prog and the userspace can no longer access this map - * now. No new selem (of this map) can be added - * to the sk->sk_bpf_storage or to the map bucket's list. - * - * The elem of this map can be cleaned up here - * or - * by bpf_sk_storage_free() during __sk_destruct(). - */ - for (i = 0; i < (1U << smap->bucket_log); i++) { - b = &smap->buckets[i]; - - rcu_read_lock(); - /* No one is adding to b->list now */ - while ((selem = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(&b->list)), - struct bpf_sk_storage_elem, - map_node))) { - selem_unlink(selem); - cond_resched_rcu(); - } - rcu_read_unlock(); - } - - /* bpf_sk_storage_free() may still need to access the map. - * e.g. bpf_sk_storage_free() has unlinked selem from the map - * which then made the above while((selem = ...)) loop - * exited immediately. - * - * However, the bpf_sk_storage_free() still needs to access - * the smap->elem_size to do the uncharging in - * __selem_unlink_sk(). - * - * Hence, wait another rcu grace period for the - * bpf_sk_storage_free() to finish. - */ - synchronize_rcu(); - - kvfree(smap->buckets); - kfree(map); + smap = (struct bpf_local_storage_map *)map; + bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx); + bpf_local_storage_map_free(smap); } -static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr) +static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr) { - if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK || - !(attr->map_flags & BPF_F_NO_PREALLOC) || - attr->max_entries || - attr->key_size != sizeof(int) || !attr->value_size || - /* Enforce BTF for userspace sk dumping */ - !attr->btf_key_type_id || !attr->btf_value_type_id) - return -EINVAL; + struct bpf_local_storage_map *smap; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (attr->value_size >= KMALLOC_MAX_SIZE - - MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem) || - /* U16_MAX is much more than enough for sk local storage - * considering a tcp_sock is ~2k. - */ - attr->value_size > U16_MAX - sizeof(struct bpf_sk_storage_elem)) - return -E2BIG; - - return 0; -} - -static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) -{ - struct bpf_sk_storage_map *smap; - unsigned int i; - u32 nbuckets; - u64 cost; - int ret; - - smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN); - if (!smap) - return ERR_PTR(-ENOMEM); - bpf_map_init_from_attr(&smap->map, attr); - - nbuckets = roundup_pow_of_two(num_possible_cpus()); - /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ - nbuckets = max_t(u32, 2, nbuckets); - smap->bucket_log = ilog2(nbuckets); - cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); - - ret = bpf_map_charge_init(&smap->map.memory, cost); - if (ret < 0) { - kfree(smap); - return ERR_PTR(ret); - } - - smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, - GFP_USER | __GFP_NOWARN); - if (!smap->buckets) { - bpf_map_charge_finish(&smap->map.memory); - kfree(smap); - return ERR_PTR(-ENOMEM); - } - - for (i = 0; i < nbuckets; i++) { - INIT_HLIST_HEAD(&smap->buckets[i].list); - raw_spin_lock_init(&smap->buckets[i].lock); - } - - smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size; - smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) % - BPF_SK_STORAGE_CACHE_SIZE; + smap = bpf_local_storage_map_alloc(attr); + if (IS_ERR(smap)) + return ERR_CAST(smap); + smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache); return &smap->map; } @@ -681,26 +122,9 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key, return -ENOTSUPP; } -static int bpf_sk_storage_map_check_btf(const struct bpf_map *map, - const struct btf *btf, - const struct btf_type *key_type, - const struct btf_type *value_type) -{ - u32 int_data; - - if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) - return -EINVAL; - - int_data = *(u32 *)(key_type + 1); - if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) - return -EINVAL; - - return 0; -} - static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key) { - struct bpf_sk_storage_data *sdata; + struct bpf_local_storage_data *sdata; struct socket *sock; int fd, err; @@ -718,14 +142,16 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key) static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { - struct bpf_sk_storage_data *sdata; + struct bpf_local_storage_data *sdata; struct socket *sock; int fd, err; fd = *(int *)key; sock = sockfd_lookup(fd, &err); if (sock) { - sdata = sk_storage_update(sock->sk, map, value, map_flags); + sdata = bpf_local_storage_update( + sock->sk, (struct bpf_local_storage_map *)map, value, + map_flags); sockfd_put(sock); return PTR_ERR_OR_ZERO(sdata); } @@ -749,14 +175,14 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key) return err; } -static struct bpf_sk_storage_elem * +static struct bpf_local_storage_elem * bpf_sk_storage_clone_elem(struct sock *newsk, - struct bpf_sk_storage_map *smap, - struct bpf_sk_storage_elem *selem) + struct bpf_local_storage_map *smap, + struct bpf_local_storage_elem *selem) { - struct bpf_sk_storage_elem *copy_selem; + struct bpf_local_storage_elem *copy_selem; - copy_selem = selem_alloc(smap, newsk, NULL, true); + copy_selem = bpf_selem_alloc(smap, newsk, NULL, true); if (!copy_selem) return NULL; @@ -772,9 +198,9 @@ bpf_sk_storage_clone_elem(struct sock *newsk, int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) { - struct bpf_sk_storage *new_sk_storage = NULL; - struct bpf_sk_storage *sk_storage; - struct bpf_sk_storage_elem *selem; + struct bpf_local_storage *new_sk_storage = NULL; + struct bpf_local_storage *sk_storage; + struct bpf_local_storage_elem *selem; int ret = 0; RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); @@ -786,8 +212,8 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) goto out; hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) { - struct bpf_sk_storage_elem *copy_selem; - struct bpf_sk_storage_map *smap; + struct bpf_local_storage_elem *copy_selem; + struct bpf_local_storage_map *smap; struct bpf_map *map; smap = rcu_dereference(SDATA(selem)->smap); @@ -795,11 +221,11 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) continue; /* Note that for lockless listeners adding new element - * here can race with cleanup in bpf_sk_storage_map_free. + * here can race with cleanup in bpf_local_storage_map_free. * Try to grab map refcnt to make sure that it's still * alive and prevent concurrent removal. */ - map = bpf_map_inc_not_zero(&smap->map, false); + map = bpf_map_inc_not_zero(&smap->map); if (IS_ERR(map)) continue; @@ -811,10 +237,10 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) } if (new_sk_storage) { - selem_link_map(smap, copy_selem); - __selem_link_sk(new_sk_storage, copy_selem); + bpf_selem_link_map(smap, copy_selem); + bpf_selem_link_storage_nolock(new_sk_storage, copy_selem); } else { - ret = sk_storage_alloc(newsk, smap, copy_selem); + ret = bpf_local_storage_alloc(newsk, smap, copy_selem); if (ret) { kfree(copy_selem); atomic_sub(smap->elem_size, @@ -823,7 +249,8 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) goto out; } - new_sk_storage = rcu_dereference(copy_selem->sk_storage); + new_sk_storage = + rcu_dereference(copy_selem->local_storage); } bpf_map_put(map); } @@ -841,7 +268,7 @@ out: BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, void *, value, u64, flags) { - struct bpf_sk_storage_data *sdata; + struct bpf_local_storage_data *sdata; if (flags > BPF_SK_STORAGE_GET_F_CREATE) return (unsigned long)NULL; @@ -857,7 +284,9 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, * destruction). */ refcount_inc_not_zero(&sk->sk_refcnt)) { - sdata = sk_storage_update(sk, map, value, BPF_NOEXIST); + sdata = bpf_local_storage_update( + sk, (struct bpf_local_storage_map *)map, value, + BPF_NOEXIST); /* sk must be a fullsock (guaranteed by verifier), * so sock_gen_put() is unnecessary. */ @@ -882,15 +311,43 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) return -ENOENT; } +static int sk_storage_charge(struct bpf_local_storage_map *smap, + void *owner, u32 size) +{ + return omem_charge(owner, size); +} + +static void sk_storage_uncharge(struct bpf_local_storage_map *smap, + void *owner, u32 size) +{ + struct sock *sk = owner; + + atomic_sub(size, &sk->sk_omem_alloc); +} + +static struct bpf_local_storage __rcu ** +sk_storage_ptr(void *owner) +{ + struct sock *sk = owner; + + return &sk->sk_bpf_storage; +} + +static int sk_storage_map_btf_id; const struct bpf_map_ops sk_storage_map_ops = { - .map_alloc_check = bpf_sk_storage_map_alloc_check, - .map_alloc = bpf_sk_storage_map_alloc, - .map_free = bpf_sk_storage_map_free, + .map_alloc_check = bpf_local_storage_map_alloc_check, + .map_alloc = sk_storage_map_alloc, + .map_free = sk_storage_map_free, .map_get_next_key = notsupp_get_next_key, .map_lookup_elem = bpf_fd_sk_storage_lookup_elem, .map_update_elem = bpf_fd_sk_storage_update_elem, .map_delete_elem = bpf_fd_sk_storage_delete_elem, - .map_check_btf = bpf_sk_storage_map_check_btf, + .map_check_btf = bpf_local_storage_map_check_btf, + .map_btf_name = "bpf_local_storage_map", + .map_btf_id = &sk_storage_map_btf_id, + .map_local_storage_charge = sk_storage_charge, + .map_local_storage_uncharge = sk_storage_uncharge, + .map_owner_storage_ptr = sk_storage_ptr, }; const struct bpf_func_proto bpf_sk_storage_get_proto = { @@ -910,3 +367,520 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = { .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_SOCKET, }; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned skip_elems; +}; + +static struct bpf_local_storage_elem * +bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info, + struct bpf_local_storage_elem *prev_selem) +{ + struct bpf_local_storage *sk_storage; + struct bpf_local_storage_elem *selem; + u32 skip_elems = info->skip_elems; + struct bpf_local_storage_map *smap; + u32 bucket_id = info->bucket_id; + u32 i, count, n_buckets; + struct bpf_local_storage_map_bucket *b; + + smap = (struct bpf_local_storage_map *)info->map; + n_buckets = 1U << smap->bucket_log; + if (bucket_id >= n_buckets) + return NULL; + + /* try to find next selem in the same bucket */ + selem = prev_selem; + count = 0; + while (selem) { + selem = hlist_entry_safe(selem->map_node.next, + struct bpf_local_storage_elem, map_node); + if (!selem) { + /* not found, unlock and go to the next bucket */ + b = &smap->buckets[bucket_id++]; + raw_spin_unlock_bh(&b->lock); + skip_elems = 0; + break; + } + sk_storage = rcu_dereference_raw(selem->local_storage); + if (sk_storage) { + info->skip_elems = skip_elems + count; + return selem; + } + count++; + } + + for (i = bucket_id; i < (1U << smap->bucket_log); i++) { + b = &smap->buckets[i]; + raw_spin_lock_bh(&b->lock); + count = 0; + hlist_for_each_entry(selem, &b->list, map_node) { + sk_storage = rcu_dereference_raw(selem->local_storage); + if (sk_storage && count >= skip_elems) { + info->bucket_id = i; + info->skip_elems = count; + return selem; + } + count++; + } + raw_spin_unlock_bh(&b->lock); + skip_elems = 0; + } + + info->bucket_id = i; + info->skip_elems = 0; + return NULL; +} + +static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_local_storage_elem *selem; + + selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL); + if (!selem) + return NULL; + + if (*pos == 0) + ++*pos; + return selem; +} + +static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v, + loff_t *pos) +{ + struct bpf_iter_seq_sk_storage_map_info *info = seq->private; + + ++*pos; + ++info->skip_elems; + return bpf_sk_storage_map_seq_find_next(seq->private, v); +} + +struct bpf_iter__bpf_sk_storage_map { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_map *, map); + __bpf_md_ptr(struct sock *, sk); + __bpf_md_ptr(void *, value); +}; + +DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta, + struct bpf_map *map, struct sock *sk, + void *value) + +static int __bpf_sk_storage_map_seq_show(struct seq_file *seq, + struct bpf_local_storage_elem *selem) +{ + struct bpf_iter_seq_sk_storage_map_info *info = seq->private; + struct bpf_iter__bpf_sk_storage_map ctx = {}; + struct bpf_local_storage *sk_storage; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int ret = 0; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, selem == NULL); + if (prog) { + ctx.meta = &meta; + ctx.map = info->map; + if (selem) { + sk_storage = rcu_dereference_raw(selem->local_storage); + ctx.sk = sk_storage->owner; + ctx.value = SDATA(selem)->data; + } + ret = bpf_iter_run_prog(prog, &ctx); + } + + return ret; +} + +static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_sk_storage_map_seq_show(seq, v); +} + +static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_seq_sk_storage_map_info *info = seq->private; + struct bpf_local_storage_map *smap; + struct bpf_local_storage_map_bucket *b; + + if (!v) { + (void)__bpf_sk_storage_map_seq_show(seq, v); + } else { + smap = (struct bpf_local_storage_map *)info->map; + b = &smap->buckets[info->bucket_id]; + raw_spin_unlock_bh(&b->lock); + } +} + +static int bpf_iter_init_sk_storage_map(void *priv_data, + struct bpf_iter_aux_info *aux) +{ + struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data; + + seq_info->map = aux->map; + return 0; +} + +static int bpf_iter_attach_map(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux) +{ + struct bpf_map *map; + int err = -EINVAL; + + if (!linfo->map.map_fd) + return -EBADF; + + map = bpf_map_get_with_uref(linfo->map.map_fd); + if (IS_ERR(map)) + return PTR_ERR(map); + + if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) + goto put_map; + + if (prog->aux->max_rdonly_access > map->value_size) { + err = -EACCES; + goto put_map; + } + + aux->map = map; + return 0; + +put_map: + bpf_map_put_with_uref(map); + return err; +} + +static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux) +{ + bpf_map_put_with_uref(aux->map); +} + +static const struct seq_operations bpf_sk_storage_map_seq_ops = { + .start = bpf_sk_storage_map_seq_start, + .next = bpf_sk_storage_map_seq_next, + .stop = bpf_sk_storage_map_seq_stop, + .show = bpf_sk_storage_map_seq_show, +}; + +static const struct bpf_iter_seq_info iter_seq_info = { + .seq_ops = &bpf_sk_storage_map_seq_ops, + .init_seq_private = bpf_iter_init_sk_storage_map, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info), +}; + +static struct bpf_iter_reg bpf_sk_storage_map_reg_info = { + .target = "bpf_sk_storage_map", + .attach_target = bpf_iter_attach_map, + .detach_target = bpf_iter_detach_map, + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), + PTR_TO_BTF_ID_OR_NULL }, + { offsetof(struct bpf_iter__bpf_sk_storage_map, value), + PTR_TO_RDWR_BUF_OR_NULL }, + }, + .seq_info = &iter_seq_info, +}; + +static int __init bpf_sk_storage_map_iter_init(void) +{ + bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id = + btf_sock_ids[BTF_SOCK_TYPE_SOCK]; + return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info); +} +late_initcall(bpf_sk_storage_map_iter_init); + +BTF_ID_LIST(sk_storage_btf_ids) +BTF_ID_UNUSED +BTF_ID(struct, sock) + +const struct bpf_func_proto sk_storage_get_btf_proto = { + .func = bpf_sk_storage_get, + .gpl_only = false, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_BTF_ID, + .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, + .arg4_type = ARG_ANYTHING, + .btf_id = sk_storage_btf_ids, +}; + +const struct bpf_func_proto sk_storage_delete_btf_proto = { + .func = bpf_sk_storage_delete, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_BTF_ID, + .btf_id = sk_storage_btf_ids, +}; + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[]; +}; + +/* The reply will be like: + * INET_DIAG_BPF_SK_STORAGES (nla_nest) + * SK_DIAG_BPF_STORAGE (nla_nest) + * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32) + * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit) + * SK_DIAG_BPF_STORAGE (nla_nest) + * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32) + * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit) + * .... + */ +static int nla_value_size(u32 value_size) +{ + /* SK_DIAG_BPF_STORAGE (nla_nest) + * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32) + * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit) + */ + return nla_total_size(0) + nla_total_size(sizeof(u32)) + + nla_total_size_64bit(value_size); +} + +void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag) +{ + u32 i; + + if (!diag) + return; + + for (i = 0; i < diag->nr_maps; i++) + bpf_map_put(diag->maps[i]); + + kfree(diag); +} +EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free); + +static bool diag_check_dup(const struct bpf_sk_storage_diag *diag, + const struct bpf_map *map) +{ + u32 i; + + for (i = 0; i < diag->nr_maps; i++) { + if (diag->maps[i] == map) + return true; + } + + return false; +} + +struct bpf_sk_storage_diag * +bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs) +{ + struct bpf_sk_storage_diag *diag; + struct nlattr *nla; + u32 nr_maps = 0; + int rem, err; + + /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as + * the map_alloc_check() side also does. + */ + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + nla_for_each_nested(nla, nla_stgs, rem) { + if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) + nr_maps++; + } + + diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps, + GFP_KERNEL); + if (!diag) + return ERR_PTR(-ENOMEM); + + nla_for_each_nested(nla, nla_stgs, rem) { + struct bpf_map *map; + int map_fd; + + if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD) + continue; + + map_fd = nla_get_u32(nla); + map = bpf_map_get(map_fd); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto err_free; + } + if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) { + bpf_map_put(map); + err = -EINVAL; + goto err_free; + } + if (diag_check_dup(diag, map)) { + bpf_map_put(map); + err = -EEXIST; + goto err_free; + } + diag->maps[diag->nr_maps++] = map; + } + + return diag; + +err_free: + bpf_sk_storage_diag_free(diag); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc); + +static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb) +{ + struct nlattr *nla_stg, *nla_value; + struct bpf_local_storage_map *smap; + + /* It cannot exceed max nlattr's payload */ + BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE); + + nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE); + if (!nla_stg) + return -EMSGSIZE; + + smap = rcu_dereference(sdata->smap); + if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id)) + goto errout; + + nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE, + smap->map.value_size, + SK_DIAG_BPF_STORAGE_PAD); + if (!nla_value) + goto errout; + + if (map_value_has_spin_lock(&smap->map)) + copy_map_value_locked(&smap->map, nla_data(nla_value), + sdata->data, true); + else + copy_map_value(&smap->map, nla_data(nla_value), sdata->data); + + nla_nest_end(skb, nla_stg); + return 0; + +errout: + nla_nest_cancel(skb, nla_stg); + return -EMSGSIZE; +} + +static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb, + int stg_array_type, + unsigned int *res_diag_size) +{ + /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */ + unsigned int diag_size = nla_total_size(0); + struct bpf_local_storage *sk_storage; + struct bpf_local_storage_elem *selem; + struct bpf_local_storage_map *smap; + struct nlattr *nla_stgs; + unsigned int saved_len; + int err = 0; + + rcu_read_lock(); + + sk_storage = rcu_dereference(sk->sk_bpf_storage); + if (!sk_storage || hlist_empty(&sk_storage->list)) { + rcu_read_unlock(); + return 0; + } + + nla_stgs = nla_nest_start(skb, stg_array_type); + if (!nla_stgs) + /* Continue to learn diag_size */ + err = -EMSGSIZE; + + saved_len = skb->len; + hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) { + smap = rcu_dereference(SDATA(selem)->smap); + diag_size += nla_value_size(smap->map.value_size); + + if (nla_stgs && diag_get(SDATA(selem), skb)) + /* Continue to learn diag_size */ + err = -EMSGSIZE; + } + + rcu_read_unlock(); + + if (nla_stgs) { + if (saved_len == skb->len) + nla_nest_cancel(skb, nla_stgs); + else + nla_nest_end(skb, nla_stgs); + } + + if (diag_size == nla_total_size(0)) { + *res_diag_size = 0; + return 0; + } + + *res_diag_size = diag_size; + return err; +} + +int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag, + struct sock *sk, struct sk_buff *skb, + int stg_array_type, + unsigned int *res_diag_size) +{ + /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */ + unsigned int diag_size = nla_total_size(0); + struct bpf_local_storage *sk_storage; + struct bpf_local_storage_data *sdata; + struct nlattr *nla_stgs; + unsigned int saved_len; + int err = 0; + u32 i; + + *res_diag_size = 0; + + /* No map has been specified. Dump all. */ + if (!diag->nr_maps) + return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type, + res_diag_size); + + rcu_read_lock(); + sk_storage = rcu_dereference(sk->sk_bpf_storage); + if (!sk_storage || hlist_empty(&sk_storage->list)) { + rcu_read_unlock(); + return 0; + } + + nla_stgs = nla_nest_start(skb, stg_array_type); + if (!nla_stgs) + /* Continue to learn diag_size */ + err = -EMSGSIZE; + + saved_len = skb->len; + for (i = 0; i < diag->nr_maps; i++) { + sdata = bpf_local_storage_lookup(sk_storage, + (struct bpf_local_storage_map *)diag->maps[i], + false); + + if (!sdata) + continue; + + diag_size += nla_value_size(diag->maps[i]->value_size); + + if (nla_stgs && diag_get(sdata, skb)) + /* Continue to learn diag_size */ + err = -EMSGSIZE; + } + rcu_read_unlock(); + + if (nla_stgs) { + if (saved_len == skb->len) + nla_nest_cancel(skb, nla_stgs); + else + nla_nest_end(skb, nla_stgs); + } + + if (diag_size == nla_total_size(0)) { + *res_diag_size = 0; + return 0; + } + + *res_diag_size = diag_size; + return err; +} +EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put); diff --git a/net/core/datagram.c b/net/core/datagram.c index da3c24ed129c..b0488f30f2c4 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -51,6 +51,7 @@ #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/uio.h> +#include <linux/indirect_call_wrapper.h> #include <net/protocol.h> #include <linux/skbuff.h> @@ -407,6 +408,11 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) } EXPORT_SYMBOL(skb_kill_datagram); +INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr, + size_t bytes, + void *data __always_unused, + struct iov_iter *i)); + static int __skb_datagram_iter(const struct sk_buff *skb, int offset, struct iov_iter *to, int len, bool fault_short, size_t (*cb)(const void *, size_t, void *, @@ -420,7 +426,8 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset, if (copy > 0) { if (copy > len) copy = len; - n = cb(skb->data + offset, copy, data, to); + n = INDIRECT_CALL_1(cb, simple_copy_to_iter, + skb->data + offset, copy, data, to); offset += n; if (n != copy) goto short_copy; @@ -442,8 +449,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - n = cb(vaddr + skb_frag_off(frag) + offset - start, - copy, data, to); + n = INDIRECT_CALL_1(cb, simple_copy_to_iter, + vaddr + skb_frag_off(frag) + offset - start, + copy, data, to); kunmap(page); offset += n; if (n != copy) @@ -692,8 +700,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, struct iov_iter *to, int len, __wsum *csump) { - return __skb_datagram_iter(skb, offset, to, len, true, - csum_and_copy_to_iter, csump); + struct csum_state csdata = { .csum = *csump }; + int ret; + + ret = __skb_datagram_iter(skb, offset, to, len, true, + csum_and_copy_to_iter, &csdata); + if (ret) + return ret; + + *csump = csdata.csum; + return 0; } /** diff --git a/net/core/dev.c b/net/core/dev.c index 8c5a9cfd6e6f..102b2ec47b55 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -79,6 +79,7 @@ #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/mutex.h> +#include <linux/rwsem.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> @@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(napi_hash_lock); static unsigned int napi_gen_id = NR_CPUS; static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); -static seqcount_t devnet_rename_seq; +static DECLARE_RWSEM(devnet_rename_sem); static inline void dev_base_seq_inc(struct net *net) { @@ -816,33 +817,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id); * @net: network namespace * @name: a pointer to the buffer where the name will be stored. * @ifindex: the ifindex of the interface to get the name from. - * - * The use of raw_seqcount_begin() and cond_resched() before - * retrying is required as we want to give the writers a chance - * to complete when CONFIG_PREEMPT is not set. */ int netdev_get_name(struct net *net, char *name, int ifindex) { struct net_device *dev; - unsigned int seq; + int ret; -retry: - seq = raw_seqcount_begin(&devnet_rename_seq); + down_read(&devnet_rename_sem); rcu_read_lock(); + dev = dev_get_by_index_rcu(net, ifindex); if (!dev) { - rcu_read_unlock(); - return -ENODEV; + ret = -ENODEV; + goto out; } strcpy(name, dev->name); - rcu_read_unlock(); - if (read_seqcount_retry(&devnet_rename_seq, seq)) { - cond_resched(); - goto retry; - } - return 0; + ret = 0; +out: + rcu_read_unlock(); + up_read(&devnet_rename_sem); + return ret; } /** @@ -1115,10 +1111,10 @@ int dev_change_name(struct net_device *dev, const char *newname) likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) return -EBUSY; - write_seqcount_begin(&devnet_rename_seq); + down_write(&devnet_rename_sem); if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return 0; } @@ -1126,7 +1122,7 @@ int dev_change_name(struct net_device *dev, const char *newname) err = dev_get_valid_name(net, dev, newname); if (err < 0) { - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return err; } @@ -1141,11 +1137,11 @@ rollback: if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return ret; } - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); netdev_adjacent_rename_links(dev, oldname); @@ -1166,7 +1162,7 @@ rollback: /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; - write_seqcount_begin(&devnet_rename_seq); + down_write(&devnet_rename_sem); memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; @@ -3836,10 +3832,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) local_bh_disable(); + dev_xmit_recursion_inc(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); + dev_xmit_recursion_dec(); local_bh_enable(); @@ -3863,7 +3861,8 @@ EXPORT_SYMBOL(netdev_max_backlog); int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; -unsigned int __read_mostly netdev_budget_usecs = 2000; +/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ +unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; int weight_p __read_mostly = 64; /* old backlog weight */ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ @@ -4082,7 +4081,7 @@ EXPORT_SYMBOL(rps_may_expire_flow); /* Called from hardirq (IPI) context */ static void rps_trigger_softirq(void *data) { - struct softnet_data *sd = data; + struct softnet_data *sd = &per_cpu(softnet_data, smp_processor_id()); ____napi_schedule(sd, &sd->backlog); sd->received_rps++; @@ -4550,7 +4549,7 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); static inline struct sk_buff * sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, - struct net_device *orig_dev) + struct net_device *orig_dev, bool *another) { #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); @@ -4593,7 +4592,11 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, * redirecting to another netdev */ __skb_push(skb, skb->mac_len); - skb_do_redirect(skb); + if (skb_do_redirect(skb) == -EAGAIN) { + __skb_pull(skb, skb->mac_len); + *another = true; + break; + } return NULL; case TC_ACT_CONSUMED: return NULL; @@ -4713,11 +4716,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, return 0; } -static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, +static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, struct packet_type **ppt_prev) { struct packet_type *ptype, *pt_prev; rx_handler_func_t *rx_handler; + struct sk_buff *skb = *pskb; struct net_device *orig_dev; bool deliver_exact = false; int ret = NET_RX_DROP; @@ -4748,8 +4752,10 @@ another_round: ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); preempt_enable(); - if (ret2 != XDP_PASS) - return NET_RX_DROP; + if (ret2 != XDP_PASS) { + ret = NET_RX_DROP; + goto out; + } skb_reset_mac_len(skb); } @@ -4781,7 +4787,12 @@ another_round: skip_taps: #ifdef CONFIG_NET_INGRESS if (static_branch_unlikely(&ingress_needed_key)) { - skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); + bool another = false; + + skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, + &another); + if (another) + goto another_round; if (!skb) goto out; @@ -4899,6 +4910,13 @@ drop: } out: + /* The invariant here is that if *ppt_prev is not NULL + * then skb should also be non-NULL. + * + * Apparently *ppt_prev assignment above holds this invariant due to + * skb dereferencing near it. + */ + *pskb = skb; return ret; } @@ -4908,7 +4926,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) struct packet_type *pt_prev = NULL; int ret; - ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); if (pt_prev) ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, skb->dev, pt_prev, orig_dev); @@ -4986,7 +5004,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo struct packet_type *pt_prev = NULL; skb_list_del_init(skb); - __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); if (!pt_prev) continue; if (pt_curr != pt_prev || od_curr != orig_dev) { @@ -5221,7 +5239,7 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); - kfree_skb(skb); + dev_kfree_skb_irq(skb); input_queue_head_incr(sd); } } @@ -5267,10 +5285,11 @@ static void gro_normal_list(struct napi_struct *napi) /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, * pass the whole batch up to the stack. */ -static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) +static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs) { list_add_tail(&skb->list, &napi->rx_list); - if (++napi->rx_count >= gro_normal_batch) + napi->rx_count += segs; + if (napi->rx_count >= gro_normal_batch) gro_normal_list(napi); } @@ -5309,7 +5328,7 @@ static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) } out: - gro_normal_one(napi, skb); + gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); return NET_RX_SUCCESS; } @@ -5386,7 +5405,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi, return head; } -static void skb_gro_reset_offset(struct sk_buff *skb) +static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) { const struct skb_shared_info *pinfo = skb_shinfo(skb); const skb_frag_t *frag0 = &pinfo->frags[0]; @@ -5397,7 +5416,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb) if (skb_mac_header(skb) == skb_tail_pointer(skb) && pinfo->nr_frags && - !PageHighMem(skb_frag_page(frag0))) { + !PageHighMem(skb_frag_page(frag0)) && + (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, skb_frag_size(frag0), @@ -5594,12 +5614,13 @@ static void napi_skb_free_stolen_head(struct sk_buff *skb) kmem_cache_free(skbuff_head_cache, skb); } -static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) +static gro_result_t napi_skb_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret) { switch (ret) { case GRO_NORMAL: - if (netif_receive_skb_internal(skb)) - ret = GRO_DROP; + gro_normal_one(napi, skb, 1); break; case GRO_DROP: @@ -5629,9 +5650,9 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) skb_mark_napi_id(skb, napi); trace_napi_gro_receive_entry(skb); - skb_gro_reset_offset(skb); + skb_gro_reset_offset(skb, 0); - ret = napi_skb_finish(dev_gro_receive(napi, skb), skb); + ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); trace_napi_gro_receive_exit(ret); return ret; @@ -5687,7 +5708,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, skb->dev); if (ret == GRO_NORMAL) - gro_normal_one(napi, skb); + gro_normal_one(napi, skb, 1); break; case GRO_DROP: @@ -5722,7 +5743,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) napi->skb = NULL; skb_reset_mac_header(skb); - skb_gro_reset_offset(skb); + skb_gro_reset_offset(skb, hlen); if (unlikely(skb_gro_header_hard(skb, hlen))) { eth = skb_gro_header_slow(skb, hlen, 0); @@ -5793,16 +5814,34 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) } EXPORT_SYMBOL(__skb_gro_checksum_complete); +#ifdef CONFIG_RPS +static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpumask, ipi_mask); +unsigned int sysctl_rps_using_pvipi = 1; +#endif + static void net_rps_send_ipi(struct softnet_data *remsd) { #ifdef CONFIG_RPS - while (remsd) { - struct softnet_data *next = remsd->rps_ipi_next; + if (sysctl_rps_using_pvipi) { + struct cpumask *tmpmask = this_cpu_ptr(&ipi_mask); + cpumask_clear(tmpmask); + smp_call_function_many_async_begin(tmpmask); + while (remsd) { + struct softnet_data *next = remsd->rps_ipi_next; - if (cpu_online(remsd->cpu)) - smp_call_function_single_async(remsd->cpu, &remsd->csd); - remsd = next; - } + if (cpu_online(remsd->cpu)) + smp_call_function_many_async(remsd->cpu, &remsd->csd, tmpmask); + remsd = next; + } + smp_call_function_many_async_end(tmpmask); + } else + while (remsd) { + struct softnet_data *next = remsd->rps_ipi_next; + + if (cpu_online(remsd->cpu)) + smp_call_function_single_async(remsd->cpu, &remsd->csd); + remsd = next; + } #endif } @@ -6222,12 +6261,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, netdev_err_once(dev, "%s() called with weight %d\n", __func__, weight); napi->weight = weight; - list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); + set_bit(NAPI_STATE_NPSVC, &napi->state); + list_add_rcu(&napi->dev_list, &dev->napi_list); napi_hash_add(napi); } EXPORT_SYMBOL(netif_napi_add); @@ -8133,6 +8173,48 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, } EXPORT_SYMBOL(dev_set_mac_address); +static DECLARE_RWSEM(dev_addr_sem); + +int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack) +{ + int ret; + + down_write(&dev_addr_sem); + ret = dev_set_mac_address(dev, sa, extack); + up_write(&dev_addr_sem); + return ret; +} +EXPORT_SYMBOL(dev_set_mac_address_user); + +int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) +{ + size_t size = sizeof(sa->sa_data); + struct net_device *dev; + int ret = 0; + + down_read(&dev_addr_sem); + rcu_read_lock(); + + dev = dev_get_by_name_rcu(net, dev_name); + if (!dev) { + ret = -ENODEV; + goto unlock; + } + if (!dev->addr_len) + memset(sa->sa_data, 0, size); + else + memcpy(sa->sa_data, dev->dev_addr, + min_t(size_t, size, dev->addr_len)); + sa->sa_family = dev->type; + +unlock: + rcu_read_unlock(); + up_read(&dev_addr_sem); + return ret; +} +EXPORT_SYMBOL(dev_get_mac_address); + /** * dev_change_carrier - Change device carrier * @dev: device @@ -8231,7 +8313,7 @@ int dev_get_port_parent_id(struct net_device *dev, if (!first.id_len) first = *ppid; else if (memcmp(&first, ppid, sizeof(*ppid))) - return -ENODATA; + return -EOPNOTSUPP; } return err; @@ -8317,7 +8399,17 @@ static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, struct netlink_ext_ack *extack, u32 flags, struct bpf_prog *prog) { + bool non_hw = !(flags & XDP_FLAGS_HW_MODE); + struct bpf_prog *prev_prog = NULL; struct netdev_bpf xdp; + int err; + + if (non_hw) { + prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op, + XDP_QUERY_PROG)); + if (IS_ERR(prev_prog)) + prev_prog = NULL; + } memset(&xdp, 0, sizeof(xdp)); if (flags & XDP_FLAGS_HW_MODE) @@ -8328,7 +8420,11 @@ static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, xdp.flags = flags; xdp.prog = prog; - return bpf_op(dev, &xdp); + err = bpf_op(dev, &xdp); + if (prev_prog) + bpf_prog_put(prev_prog); + + return err; } static void dev_xdp_uninstall(struct net_device *dev) @@ -8595,11 +8691,13 @@ static void netdev_sync_lower_features(struct net_device *upper, netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", &feature, lower->name); lower->wanted_features &= ~feature; - netdev_update_features(lower); + __netdev_update_features(lower); if (unlikely(lower->features & feature)) netdev_WARN(upper, "failed to disable %pNF on %s!\n", &feature, lower->name); + else + netdev_features_change(lower); } } } @@ -8680,6 +8778,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, } } + if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { + netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); + features &= ~NETIF_F_HW_TLS_RX; + } + return features; } @@ -9106,6 +9209,13 @@ int register_netdevice(struct net_device *dev) rcu_barrier(); dev->reg_state = NETREG_UNREGISTERED; + /* We should put the kobject that hold in + * netdev_unregister_kobject(), otherwise + * the net device cannot be freed when + * driver calls free_netdev(), because the + * kobject is being hold. + */ + kobject_put(&dev->dev.kobj); } /* * Prevent userspace races by waiting until the network @@ -10060,7 +10170,7 @@ static void __net_exit default_device_exit(struct net *net) continue; /* Leave virtual devices for the generic cleanup */ - if (dev->rtnl_link_ops) + if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) continue; /* Push remaining network devices to init_net */ diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 5163d900bb4f..69fb9219d51d 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -122,17 +122,6 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm ifr->ifr_mtu = dev->mtu; return 0; - case SIOCGIFHWADDR: - if (!dev->addr_len) - memset(ifr->ifr_hwaddr.sa_data, 0, - sizeof(ifr->ifr_hwaddr.sa_data)); - else - memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, - min(sizeof(ifr->ifr_hwaddr.sa_data), - (size_t)dev->addr_len)); - ifr->ifr_hwaddr.sa_family = dev->type; - return 0; - case SIOCGIFSLAVE: err = -EINVAL; break; @@ -246,7 +235,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) case SIOCSIFHWADDR: if (dev->addr_len > sizeof(struct sockaddr)) return -EINVAL; - return dev_set_mac_address(dev, &ifr->ifr_hwaddr, NULL); + return dev_set_mac_address_user(dev, &ifr->ifr_hwaddr, NULL); case SIOCSIFHWBROADCAST: if (ifr->ifr_hwaddr.sa_family != dev->type) @@ -396,6 +385,12 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c */ switch (cmd) { + case SIOCGIFHWADDR: + dev_load(net, ifr->ifr_name); + ret = dev_get_mac_address(&ifr->ifr_hwaddr, net, ifr->ifr_name); + if (colon) + *colon = ':'; + return ret; /* * These ioctl calls: * - can be done by all. @@ -405,7 +400,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: - case SIOCGIFHWADDR: case SIOCGIFSLAVE: case SIOCGIFMAP: case SIOCGIFINDEX: diff --git a/net/core/devlink.c b/net/core/devlink.c index 4c25f1aa2d37..0ac02cab3087 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -562,6 +562,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) goto nla_put_failure; + /* Hold rtnl lock while accessing port's netdev attributes. */ + rtnl_lock(); spin_lock_bh(&devlink_port->type_lock); if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type)) goto nla_put_failure_type_locked; @@ -588,6 +590,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, goto nla_put_failure_type_locked; } spin_unlock_bh(&devlink_port->type_lock); + rtnl_unlock(); if (devlink_nl_port_attrs_put(msg, devlink_port)) goto nla_put_failure; @@ -596,6 +599,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, nla_put_failure_type_locked: spin_unlock_bh(&devlink_port->type_lock); + rtnl_unlock(); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; @@ -1144,7 +1148,7 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index, pool_index, &cur, &max); if (err && err != -EOPNOTSUPP) - return err; + goto sb_occ_get_failure; if (!err) { if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) goto nla_put_failure; @@ -1157,8 +1161,10 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, return 0; nla_put_failure: + err = -EMSGSIZE; +sb_occ_get_failure: genlmsg_cancel(msg, hdr); - return -EMSGSIZE; + return err; } static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb, @@ -3907,6 +3913,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]); dump = false; + + if (start_offset == end_offset) { + err = 0; + goto nla_put_failure; + } } err = devlink_nl_region_read_snapshot_fill(skb, devlink, @@ -4818,6 +4829,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter, { enum devlink_health_reporter_state prev_health_state; struct devlink *devlink = reporter->devlink; + unsigned long recover_ts_threshold; /* write a log message of the current error */ WARN_ON(!msg); @@ -4827,10 +4839,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter, reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; /* abort if the previous error wasn't recovered */ + recover_ts_threshold = reporter->last_recovery_ts + + msecs_to_jiffies(reporter->graceful_period); if (reporter->auto_recover && (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY || - jiffies - reporter->last_recovery_ts < - msecs_to_jiffies(reporter->graceful_period))) { + (reporter->last_recovery_ts && reporter->recovery_count && + time_is_after_jiffies(recover_ts_threshold)))) { trace_devlink_health_recover_aborted(devlink, reporter->ops->name, reporter->health_state, diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 246a258b1fac..af0130039f37 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -212,6 +212,7 @@ static void sched_send_work(struct timer_list *t) static void trace_drop_common(struct sk_buff *skb, void *location) { struct net_dm_alert_msg *msg; + struct net_dm_drop_point *point; struct nlmsghdr *nlh; struct nlattr *nla; int i; @@ -230,11 +231,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location) nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); + point = msg->points; for (i = 0; i < msg->entries; i++) { - if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { - msg->points[i].count++; + if (!memcmp(&location, &point->pc, sizeof(void *))) { + point->count++; goto out; } + point++; } if (msg->entries == dm_hit_limit) goto out; @@ -243,8 +246,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location) */ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); - memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); - msg->points[msg->entries].count = 1; + memcpy(point->pc, &location, sizeof(void *)); + point->count = 1; msg->entries++; if (!timer_pending(&data->send_timer)) { diff --git a/net/core/filter.c b/net/core/filter.c index d59dbc88fef5..3cc74be4ab60 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -47,6 +47,7 @@ #include <linux/seccomp.h> #include <linux/if_vlan.h> #include <linux/bpf.h> +#include <linux/btf.h> #include <net/sch_generic.h> #include <net/cls_cgroup.h> #include <net/dst_metadata.h> @@ -73,6 +74,8 @@ #include <net/lwtunnel.h> #include <net/ipv6_stubs.h> #include <net/bpf_sk_storage.h> +#include <net/transp_v6.h> +#include <linux/btf_ids.h> /** * sk_filter_trim_cap - run a packet through a socket filter @@ -1475,7 +1478,7 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) if (copy_from_user(prog->insns, fprog->filter, fsize)) { __bpf_prog_free(prog); - return ERR_PTR(-EFAULT); + return ERR_PTR(-EINVAL); } prog->len = fprog->len; @@ -1766,25 +1769,27 @@ BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, u32, offset, void *, to, u32, len, u32, start_header) { u8 *end = skb_tail_pointer(skb); - u8 *net = skb_network_header(skb); - u8 *mac = skb_mac_header(skb); - u8 *ptr; + u8 *start, *ptr; - if (unlikely(offset > 0xffff || len > (end - mac))) + if (unlikely(offset > 0xffff)) goto err_clear; switch (start_header) { case BPF_HDR_START_MAC: - ptr = mac + offset; + if (unlikely(!skb_mac_header_was_set(skb))) + goto err_clear; + start = skb_mac_header(skb); break; case BPF_HDR_START_NET: - ptr = net + offset; + start = skb_network_header(skb); break; default: goto err_clear; } - if (likely(ptr >= mac && ptr + len <= end)) { + ptr = start + offset; + + if (likely(ptr + len <= end)) { memcpy(to, ptr, len); return 0; } @@ -2109,13 +2114,266 @@ static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, return __bpf_redirect_no_mac(skb, dev, flags); } +#if IS_ENABLED(CONFIG_IPV6) +static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, + struct net_device *dev, struct bpf_nh_params *nh) +{ + u32 hh_len = LL_RESERVED_SPACE(dev); + const struct in6_addr *nexthop; + struct dst_entry *dst = NULL; + struct neighbour *neigh; + + if (dev_xmit_recursion()) { + net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); + goto out_drop; + } + + skb->dev = dev; + skb->tstamp = 0; + + if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { + struct sk_buff *skb2; + + skb2 = skb_realloc_headroom(skb, hh_len); + if (unlikely(!skb2)) { + kfree_skb(skb); + return -ENOMEM; + } + if (skb->sk) + skb_set_owner_w(skb2, skb->sk); + consume_skb(skb); + skb = skb2; + } + + rcu_read_lock_bh(); + if (!nh) { + dst = skb_dst(skb); + nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst), + &ipv6_hdr(skb)->daddr); + } else { + nexthop = &nh->ipv6_nh; + } + neigh = ip_neigh_gw6(dev, nexthop); + if (likely(!IS_ERR(neigh))) { + int ret; + + sock_confirm_neigh(skb, neigh); + dev_xmit_recursion_inc(); + ret = neigh_output(neigh, skb, false); + dev_xmit_recursion_dec(); + rcu_read_unlock_bh(); + return ret; + } + rcu_read_unlock_bh(); + if (dst) + IP6_INC_STATS(dev_net(dst->dev), + ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); +out_drop: + kfree_skb(skb); + return -ENETDOWN; +} + +static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, + struct bpf_nh_params *nh) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct net *net = dev_net(dev); + int err, ret = NET_XMIT_DROP; + + if (!nh) { + struct dst_entry *dst; + struct flowi6 fl6 = { + .flowi6_flags = FLOWI_FLAG_ANYSRC, + .flowi6_mark = skb->mark, + .flowlabel = ip6_flowinfo(ip6h), + .flowi6_oif = dev->ifindex, + .flowi6_proto = ip6h->nexthdr, + .daddr = ip6h->daddr, + .saddr = ip6h->saddr, + }; + + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + goto out_drop; + + skb_dst_set(skb, dst); + } else if (nh->nh_family != AF_INET6) { + goto out_drop; + } + + err = bpf_out_neigh_v6(net, skb, dev, nh); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + goto out_xmit; +out_drop: + dev->stats.tx_errors++; + kfree_skb(skb); +out_xmit: + return ret; +} +#else +static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, + struct bpf_nh_params *nh) +{ + kfree_skb(skb); + return NET_XMIT_DROP; +} +#endif /* CONFIG_IPV6 */ + +#if IS_ENABLED(CONFIG_INET) +static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, + struct net_device *dev, struct bpf_nh_params *nh) +{ + u32 hh_len = LL_RESERVED_SPACE(dev); + struct neighbour *neigh; + bool is_v6gw = false; + + if (dev_xmit_recursion()) { + net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); + goto out_drop; + } + + skb->dev = dev; + skb->tstamp = 0; + + if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { + struct sk_buff *skb2; + + skb2 = skb_realloc_headroom(skb, hh_len); + if (unlikely(!skb2)) { + kfree_skb(skb); + return -ENOMEM; + } + if (skb->sk) + skb_set_owner_w(skb2, skb->sk); + consume_skb(skb); + skb = skb2; + } + + rcu_read_lock_bh(); + if (!nh) { + struct dst_entry *dst = skb_dst(skb); + struct rtable *rt = container_of(dst, struct rtable, dst); + + neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); + } else if (nh->nh_family == AF_INET6) { + neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); + is_v6gw = true; + } else if (nh->nh_family == AF_INET) { + neigh = ip_neigh_gw4(dev, nh->ipv4_nh); + } else { + rcu_read_unlock_bh(); + goto out_drop; + } + + if (likely(!IS_ERR(neigh))) { + int ret; + + sock_confirm_neigh(skb, neigh); + dev_xmit_recursion_inc(); + ret = neigh_output(neigh, skb, is_v6gw); + dev_xmit_recursion_dec(); + rcu_read_unlock_bh(); + return ret; + } + rcu_read_unlock_bh(); +out_drop: + kfree_skb(skb); + return -ENETDOWN; +} + +static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, + struct bpf_nh_params *nh) +{ + const struct iphdr *ip4h = ip_hdr(skb); + struct net *net = dev_net(dev); + int err, ret = NET_XMIT_DROP; + + if (!nh) { + struct flowi4 fl4 = { + .flowi4_flags = FLOWI_FLAG_ANYSRC, + .flowi4_mark = skb->mark, + .flowi4_tos = RT_TOS(ip4h->tos), + .flowi4_oif = dev->ifindex, + .flowi4_proto = ip4h->protocol, + .daddr = ip4h->daddr, + .saddr = ip4h->saddr, + }; + struct rtable *rt; + + rt = ip_route_output_flow(net, &fl4, NULL); + if (IS_ERR(rt)) + goto out_drop; + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); + goto out_drop; + } + + skb_dst_set(skb, &rt->dst); + } + + err = bpf_out_neigh_v4(net, skb, dev, nh); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + goto out_xmit; +out_drop: + dev->stats.tx_errors++; + kfree_skb(skb); +out_xmit: + return ret; +} +#else +static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, + struct bpf_nh_params *nh) +{ + kfree_skb(skb); + return NET_XMIT_DROP; +} +#endif /* CONFIG_INET */ + +static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev, + struct bpf_nh_params *nh) +{ + struct ethhdr *ethh = eth_hdr(skb); + + if (unlikely(skb->mac_header >= skb->network_header)) + goto out; + bpf_push_mac_rcsum(skb); + if (is_multicast_ether_addr(ethh->h_dest)) + goto out; + + skb_pull(skb, sizeof(*ethh)); + skb_unset_mac_header(skb); + skb_reset_network_header(skb); + + if (skb->protocol == htons(ETH_P_IP)) + return __bpf_redirect_neigh_v4(skb, dev, nh); + else if (skb->protocol == htons(ETH_P_IPV6)) + return __bpf_redirect_neigh_v6(skb, dev, nh); +out: + kfree_skb(skb); + return -ENOTSUPP; +} + +/* Internal, non-exposed redirect flags. */ +enum { + BPF_F_NEIGH = (1ULL << 1), + BPF_F_PEER = (1ULL << 2), + BPF_F_NEXTHOP = (1ULL << 3), +#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) +}; + BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) { struct net_device *dev; struct sk_buff *clone; int ret; - if (unlikely(flags & ~(BPF_F_INGRESS))) + if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) return -EINVAL; dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); @@ -2152,11 +2410,46 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = { DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); +int skb_do_redirect(struct sk_buff *skb) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct net *net = dev_net(skb->dev); + struct net_device *dev; + u32 flags = ri->flags; + + dev = dev_get_by_index_rcu(net, ri->tgt_index); + ri->tgt_index = 0; + ri->flags = 0; + if (unlikely(!dev)) + goto out_drop; + if (flags & BPF_F_PEER) { + const struct net_device_ops *ops = dev->netdev_ops; + + if (unlikely(!ops->ndo_get_peer_dev || + !skb_at_tc_ingress(skb))) + goto out_drop; + dev = ops->ndo_get_peer_dev(dev); + if (unlikely(!dev || + !is_skb_forwardable(dev, skb) || + net_eq(net, dev_net(dev)))) + goto out_drop; + skb->dev = dev; + return -EAGAIN; + } + return flags & BPF_F_NEIGH ? + __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ? + &ri->nh : NULL) : + __bpf_redirect(skb, dev, flags); +out_drop: + kfree_skb(skb); + return -EINVAL; +} + BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - if (unlikely(flags & ~(BPF_F_INGRESS))) + if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) return TC_ACT_SHOT; ri->flags = flags; @@ -2165,21 +2458,6 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) return TC_ACT_REDIRECT; } -int skb_do_redirect(struct sk_buff *skb) -{ - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); - struct net_device *dev; - - dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index); - ri->tgt_index = 0; - if (unlikely(!dev)) { - kfree_skb(skb); - return -EINVAL; - } - - return __bpf_redirect(skb, dev, ri->flags); -} - static const struct bpf_func_proto bpf_redirect_proto = { .func = bpf_redirect, .gpl_only = false, @@ -2188,6 +2466,55 @@ static const struct bpf_func_proto bpf_redirect_proto = { .arg2_type = ARG_ANYTHING, }; +BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + if (unlikely(flags)) + return TC_ACT_SHOT; + + ri->flags = BPF_F_PEER; + ri->tgt_index = ifindex; + + return TC_ACT_REDIRECT; +} + +static const struct bpf_func_proto bpf_redirect_peer_proto = { + .func = bpf_redirect_peer, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, + .arg2_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, + int, plen, u64, flags) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + if (unlikely((plen && plen < sizeof(*params)) || flags)) + return TC_ACT_SHOT; + + ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0); + ri->tgt_index = ifindex; + + BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params)); + if (plen) + memcpy(&ri->nh, params, sizeof(ri->nh)); + + return TC_ACT_REDIRECT; +} + +static const struct bpf_func_proto bpf_redirect_neigh_proto = { + .func = bpf_redirect_neigh, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, + .arg2_type = ARG_PTR_TO_MEM_OR_NULL, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, +}; + BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) { msg->apply_bytes = bytes; @@ -2590,8 +2917,8 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, } pop = 0; } else if (pop >= sge->length - a) { - sge->length = a; pop -= (sge->length - a); + sge->length = a; } } @@ -2642,6 +2969,25 @@ static const struct bpf_func_proto bpf_msg_pop_data_proto = { .arg4_type = ARG_ANYTHING, }; +#ifdef CONFIG_CGROUP_NET_CLASSID +BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb) +{ + struct sock *sk = skb_to_full_sk(skb); + + if (!sk || !sk_fullsock(sk)) + return 0; + + return sock_cgroup_classid(&sk->sk_cgrp_data); +} + +static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = { + .func = bpf_skb_cgroup_classid, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; +#endif + BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) { return task_get_classid(skb); @@ -3144,18 +3490,14 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, return 0; } -static u32 __bpf_skb_max_len(const struct sk_buff *skb) -{ - return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : - SKB_MAX_ALLOC; -} +#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, u32, mode, u64, flags) { u32 len_cur, len_diff_abs = abs(len_diff); u32 len_min = bpf_skb_net_base_len(skb); - u32 len_max = __bpf_skb_max_len(skb); + u32 len_max = BPF_SKB_MAX_LEN; __be16 proto = skb->protocol; bool shrink = len_diff < 0; u32 off; @@ -3235,7 +3577,7 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, u64 flags) { - u32 max_len = __bpf_skb_max_len(skb); + u32 max_len = BPF_SKB_MAX_LEN; u32 min_len = __bpf_skb_min_len(skb); int ret; @@ -3311,7 +3653,7 @@ static const struct bpf_func_proto sk_skb_change_tail_proto = { static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, u64 flags) { - u32 max_len = __bpf_skb_max_len(skb); + u32 max_len = BPF_SKB_MAX_LEN; u32 new_len = skb->len + head_room; int ret; @@ -3800,7 +4142,7 @@ BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; - if (unlikely(skb_size > skb->len)) + if (unlikely(!skb || skb_size > skb->len)) return -EFAULT; return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, @@ -3818,6 +4160,21 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; +BTF_ID_LIST(bpf_skb_output_btf_ids) +BTF_ID(struct, sk_buff) + +const struct bpf_func_proto bpf_skb_output_proto = { + .func = bpf_skb_event_output, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + .btf_id = bpf_skb_output_btf_ids, +}; + static unsigned short bpf_tunnel_key_af(u64 flags) { return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; @@ -4142,7 +4499,8 @@ BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; - if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) + if (unlikely(!xdp || + xdp_size > (unsigned long)(xdp->data_end - xdp->data))) return -EFAULT; return bpf_event_output(map, flags, meta, meta_size, xdp->data, @@ -4160,9 +4518,24 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; +BTF_ID_LIST(bpf_xdp_output_btf_ids) +BTF_ID(struct, xdp_buff) + +const struct bpf_func_proto bpf_xdp_output_proto = { + .func = bpf_xdp_event_output, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + .btf_id = bpf_xdp_output_btf_ids, +}; + BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) { - return skb->sk ? sock_gen_cookie(skb->sk) : 0; + return skb->sk ? __sock_gen_cookie(skb->sk) : 0; } static const struct bpf_func_proto bpf_get_socket_cookie_proto = { @@ -4174,7 +4547,7 @@ static const struct bpf_func_proto bpf_get_socket_cookie_proto = { BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) { - return sock_gen_cookie(ctx->sk); + return __sock_gen_cookie(ctx->sk); } static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { @@ -4184,9 +4557,21 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx) +{ + return __sock_gen_cookie(ctx); +} + +static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = { + .func = bpf_get_socket_cookie_sock, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) { - return sock_gen_cookie(ctx->sk); + return __sock_gen_cookie(ctx->sk); } static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { @@ -4196,6 +4581,61 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +static u64 __bpf_get_netns_cookie(struct sock *sk) +{ + const struct net *net = sk ? sock_net(sk) : &init_net; + + return net->net_cookie; +} + +BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx) +{ + return __bpf_get_netns_cookie(ctx); +} + +static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = { + .func = bpf_get_netns_cookie_sock, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX_OR_NULL, +}; + +BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) +{ + return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); +} + +static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = { + .func = bpf_get_netns_cookie_sock_addr, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX_OR_NULL, +}; + +BPF_CALL_1(bpf_get_netns_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) +{ + return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); +} + +static const struct bpf_func_proto bpf_get_netns_cookie_sock_ops_proto = { + .func = bpf_get_netns_cookie_sock_ops, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX_OR_NULL, +}; + +BPF_CALL_1(bpf_get_netns_cookie_sk_msg, struct sk_msg *, ctx) +{ + return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL); +} + +static const struct bpf_func_proto bpf_get_netns_cookie_sk_msg_proto = { + .func = bpf_get_netns_cookie_sk_msg, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX_OR_NULL, +}; + BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) { struct sock *sk = sk_to_full_sk(skb->sk); @@ -4214,8 +4654,8 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = { .arg1_type = ARG_PTR_TO_CTX, }; -BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock, - struct bpf_map *, map, u64, flags, void *, data, u64, size) +BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, u64, flags, + void *, data, u64, size) { if (unlikely(flags & ~(BPF_F_INDEX_MASK))) return -EINVAL; @@ -4223,8 +4663,8 @@ BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock, return bpf_event_output(map, flags, data, size, NULL, 0, NULL); } -static const struct bpf_func_proto bpf_sockopt_event_output_proto = { - .func = bpf_sockopt_event_output, +static const struct bpf_func_proto bpf_event_output_data_proto = { + .func = bpf_event_output_data, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, @@ -4268,7 +4708,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); - sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; + sk->sk_max_pacing_rate = (val == ~0U) ? + ~0UL : (unsigned int)val; sk->sk_pacing_rate = min(sk->sk_pacing_rate, sk->sk_max_pacing_rate); break; @@ -4509,30 +4950,28 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, { #ifdef CONFIG_INET struct sock *sk = ctx->sk; + u32 flags = BIND_FROM_BPF; int err; - /* Binding to port can be expensive so it's prohibited in the helper. - * Only binding to IP is supported. - */ err = -EINVAL; if (addr_len < offsetofend(struct sockaddr, sa_family)) return err; if (addr->sa_family == AF_INET) { if (addr_len < sizeof(struct sockaddr_in)) return err; - if (((struct sockaddr_in *)addr)->sin_port != htons(0)) - return err; - return __inet_bind(sk, addr, addr_len, true, false); + if (((struct sockaddr_in *)addr)->sin_port == htons(0)) + flags |= BIND_FORCE_ADDRESS_NO_PORT; + return __inet_bind(sk, addr, addr_len, flags); #if IS_ENABLED(CONFIG_IPV6) } else if (addr->sa_family == AF_INET6) { if (addr_len < SIN6_LEN_RFC2133) return err; - if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) - return err; + if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0)) + flags |= BIND_FORCE_ADDRESS_NO_PORT; /* ipv6_bpf_stub cannot be NULL, since it's called from * bpf_cgroup_inet6_connect hook and ipv6 is already loaded */ - return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false); + return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags); #endif /* CONFIG_IPV6 */ } #endif /* CONFIG_INET */ @@ -4648,6 +5087,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, fl4.saddr = params->ipv4_src; fl4.fl4_sport = params->sport; fl4.fl4_dport = params->dport; + fl4.flowi4_multipath_hash = 0; if (flags & BPF_FIB_LOOKUP_DIRECT) { u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; @@ -4876,6 +5316,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, { struct net *net = dev_net(skb->dev); int rc = -EAFNOSUPPORT; + bool check_mtu = false; if (plen < sizeof(*params)) return -EINVAL; @@ -4883,22 +5324,28 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) return -EINVAL; + if (params->tot_len) + check_mtu = true; + switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: - rc = bpf_ipv4_fib_lookup(net, params, flags, false); + rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu); break; #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - rc = bpf_ipv6_fib_lookup(net, params, flags, false); + rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu); break; #endif } - if (!rc) { + if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) { struct net_device *dev; + /* When tot_len isn't provided by user, check skb + * against MTU of FIB lookup resulting net_device + */ dev = dev_get_by_index_rcu(net, params->ifindex); if (!is_skb_forwardable(dev, skb)) rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; @@ -5410,8 +5857,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { BPF_CALL_1(bpf_sk_release, struct sock *, sk) { - /* Only full sockets have sk->sk_flags. */ - if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE)) + if (sk_is_refcounted(sk)) sock_gen_put(sk); return 0; } @@ -5728,12 +6174,16 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) { unsigned int iphdr_len; - if (skb->protocol == cpu_to_be16(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case cpu_to_be16(ETH_P_IP): iphdr_len = sizeof(struct iphdr); - else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) + break; + case cpu_to_be16(ETH_P_IPV6): iphdr_len = sizeof(struct ipv6hdr); - else + break; + default: return 0; + } if (skb_headlen(skb) < iphdr_len) return 0; @@ -5927,6 +6377,36 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { .arg5_type = ARG_CONST_SIZE, }; +BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags) +{ + if (flags != 0) + return -EINVAL; + if (!skb_at_tc_ingress(skb)) + return -EOPNOTSUPP; + if (unlikely(dev_net(skb->dev) != sock_net(sk))) + return -ENETUNREACH; + if (unlikely(sk->sk_reuseport)) + return -ESOCKTNOSUPPORT; + if (sk_is_refcounted(sk) && + unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) + return -ENOENT; + + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sock_pfree; + + return 0; +} + +static const struct bpf_func_proto bpf_sk_assign_proto = { + .func = bpf_sk_assign, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_SOCK_COMMON, + .arg3_type = ARG_ANYTHING, +}; + #endif /* CONFIG_INET */ bool bpf_helper_changes_pkt_data(void *func) @@ -5963,7 +6443,7 @@ bool bpf_helper_changes_pkt_data(void *func) return false; } -static const struct bpf_func_proto * +const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id) { switch (func_id) { @@ -5989,6 +6469,26 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_tail_call_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; + case BPF_FUNC_ringbuf_output: + return &bpf_ringbuf_output_proto; + case BPF_FUNC_ringbuf_reserve: + return &bpf_ringbuf_reserve_proto; + case BPF_FUNC_ringbuf_submit: + return &bpf_ringbuf_submit_proto; + case BPF_FUNC_ringbuf_discard: + return &bpf_ringbuf_discard_proto; + case BPF_FUNC_ringbuf_query: + return &bpf_ringbuf_query_proto; + case BPF_FUNC_for_each_map_elem: + return &bpf_for_each_map_elem_proto; + case BPF_FUNC_timer_init: + return &bpf_timer_init_proto; + case BPF_FUNC_timer_set_callback: + return &bpf_timer_set_callback_proto; + case BPF_FUNC_timer_start: + return &bpf_timer_start_proto; + case BPF_FUNC_timer_cancel: + return &bpf_timer_cancel_proto; default: break; } @@ -6003,6 +6503,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_spin_unlock_proto; case BPF_FUNC_trace_printk: return bpf_get_trace_printk_proto(); + case BPF_FUNC_jiffies64: + return &bpf_jiffies64_proto; default: return NULL; } @@ -6019,6 +6521,12 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_uid_gid_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; + case BPF_FUNC_get_socket_cookie: + return &bpf_get_socket_cookie_sock_proto; + case BPF_FUNC_get_netns_cookie: + return &bpf_get_netns_cookie_sock_proto; + case BPF_FUNC_perf_event_output: + return &bpf_event_output_data_proto; default: return bpf_base_func_proto(func_id); } @@ -6043,8 +6551,12 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_addr_proto; + case BPF_FUNC_get_netns_cookie: + return &bpf_get_netns_cookie_sock_addr_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; + case BPF_FUNC_perf_event_output: + return &bpf_event_output_data_proto; #ifdef CONFIG_INET case BPF_FUNC_sk_lookup_tcp: return &bpf_sock_addr_sk_lookup_tcp_proto; @@ -6163,6 +6675,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return bpf_get_skb_set_tunnel_proto(func_id); case BPF_FUNC_redirect: return &bpf_redirect_proto; + case BPF_FUNC_redirect_neigh: + return &bpf_redirect_neigh_proto; + case BPF_FUNC_redirect_peer: + return &bpf_redirect_peer_proto; case BPF_FUNC_get_route_realm: return &bpf_get_route_realm_proto; case BPF_FUNC_get_hash_recalc: @@ -6193,6 +6709,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_skb_get_xfrm_state: return &bpf_skb_get_xfrm_state_proto; #endif +#ifdef CONFIG_CGROUP_NET_CLASSID + case BPF_FUNC_skb_cgroup_classid: + return &bpf_skb_cgroup_classid_proto; +#endif #ifdef CONFIG_SOCK_CGROUP_DATA case BPF_FUNC_skb_cgroup_id: return &bpf_skb_cgroup_id_proto; @@ -6218,6 +6738,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skb_ecn_set_ce_proto; case BPF_FUNC_tcp_gen_syncookie: return &bpf_tcp_gen_syncookie_proto; + case BPF_FUNC_sk_assign: + return &bpf_sk_assign_proto; #endif default: return bpf_base_func_proto(func_id); @@ -6287,11 +6809,13 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; case BPF_FUNC_perf_event_output: - return &bpf_sockopt_event_output_proto; + return &bpf_event_output_data_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; + case BPF_FUNC_get_netns_cookie: + return &bpf_get_netns_cookie_sock_ops_proto; #ifdef CONFIG_INET case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; @@ -6322,6 +6846,12 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_msg_push_data_proto; case BPF_FUNC_msg_pop_data: return &bpf_msg_pop_data_proto; + case BPF_FUNC_sk_storage_get: + return &bpf_sk_storage_get_proto; + case BPF_FUNC_sk_storage_delete: + return &bpf_sk_storage_delete_proto; + case BPF_FUNC_get_netns_cookie: + return &bpf_get_netns_cookie_sk_msg_proto; default: return bpf_base_func_proto(func_id); } @@ -6566,7 +7096,7 @@ static bool cg_skb_is_valid_access(int off, int size, return false; case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_end): - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return false; break; } @@ -6578,7 +7108,7 @@ static bool cg_skb_is_valid_access(int off, int size, case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): break; case bpf_ctx_range(struct __sk_buff, tstamp): - if (!capable(CAP_SYS_ADMIN)) + if (!bpf_capable()) return false; break; default: @@ -6784,8 +7314,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, bool indirect = BPF_MODE(orig->code) == BPF_IND; struct bpf_insn *insn = insn_buf; - /* We're guaranteed here that CTX is in R6. */ - *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); } else { @@ -6793,6 +7321,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, if (orig->imm) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); } + /* We're guaranteed here that CTX is in R6. */ + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); switch (BPF_SIZE(orig->code)) { case BPF_B: @@ -6928,6 +7458,8 @@ static bool sock_addr_is_valid_access(int off, int size, switch (prog->expected_attach_type) { case BPF_CGROUP_INET4_BIND: case BPF_CGROUP_INET4_CONNECT: + case BPF_CGROUP_INET4_GETPEERNAME: + case BPF_CGROUP_INET4_GETSOCKNAME: case BPF_CGROUP_UDP4_SENDMSG: case BPF_CGROUP_UDP4_RECVMSG: break; @@ -6939,6 +7471,8 @@ static bool sock_addr_is_valid_access(int off, int size, switch (prog->expected_attach_type) { case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET6_CONNECT: + case BPF_CGROUP_INET6_GETPEERNAME: + case BPF_CGROUP_INET6_GETSOCKNAME: case BPF_CGROUP_UDP6_SENDMSG: case BPF_CGROUP_UDP6_RECVMSG: break; @@ -7135,6 +7669,11 @@ static bool sk_msg_is_valid_access(int off, int size, if (size != sizeof(__u64)) return false; break; + case offsetof(struct sk_msg_md, sk): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_SOCKET; + break; case bpf_ctx_range(struct sk_msg_md, family): case bpf_ctx_range(struct sk_msg_md, remote_ip4): case bpf_ctx_range(struct sk_msg_md, local_ip4): @@ -8004,6 +8543,43 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, offsetof(OBJ, OBJ_FIELD)); \ } while (0) +#define SOCK_OPS_GET_SK() \ + do { \ + int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + fullsock_reg = reg; \ + jmp += 2; \ + } \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ + struct bpf_sock_ops_kern, \ + is_fullsock), \ + fullsock_reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + is_fullsock)); \ + *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ + if (si->dst_reg == si->src_reg) \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ + struct bpf_sock_ops_kern, sk),\ + si->dst_reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, sk));\ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_JMP_A(1); \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + } \ + } while (0) + #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock) @@ -8288,17 +8864,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); break; case offsetof(struct bpf_sock_ops, sk): - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct bpf_sock_ops_kern, - is_fullsock), - si->dst_reg, si->src_reg, - offsetof(struct bpf_sock_ops_kern, - is_fullsock)); - *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct bpf_sock_ops_kern, sk), - si->dst_reg, si->src_reg, - offsetof(struct bpf_sock_ops_kern, sk)); + SOCK_OPS_GET_SK(); break; } return insn - insn_buf; @@ -8460,6 +9026,12 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, si->dst_reg, si->src_reg, offsetof(struct sk_msg_sg, size)); break; + + case offsetof(struct sk_msg_md, sk): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_msg, sk)); + break; } return insn - insn_buf; @@ -8672,16 +9244,6 @@ out: } #ifdef CONFIG_INET -struct sk_reuseport_kern { - struct sk_buff *skb; - struct sock *sk; - struct sock *selected_sk; - void *data_end; - u32 hash; - u32 reuseport_id; - bool bind_inany; -}; - static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, struct sock_reuseport *reuse, struct sock *sk, struct sk_buff *skb, @@ -8715,6 +9277,7 @@ struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, struct bpf_map *, map, void *, key, u32, flags) { + bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; struct sock_reuseport *reuse; struct sock *selected_sk; @@ -8723,26 +9286,24 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, return -ENOENT; reuse = rcu_dereference(selected_sk->sk_reuseport_cb); - if (!reuse) - /* selected_sk is unhashed (e.g. by close()) after the - * above map_lookup_elem(). Treat selected_sk has already - * been removed from the map. + if (!reuse) { + /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ + if (sk_is_refcounted(selected_sk)) + sock_put(selected_sk); + + /* reuseport_array has only sk with non NULL sk_reuseport_cb. + * The only (!reuse) case here is - the sk has already been + * unhashed (e.g. by close()), so treat it as -ENOENT. + * + * Other maps (e.g. sock_map) do not provide this guarantee and + * the sk may never be in the reuseport group to begin with. */ - return -ENOENT; + return is_sockarray ? -ENOENT : -EINVAL; + } if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { - struct sock *sk; + struct sock *sk = reuse_kern->sk; - if (unlikely(!reuse_kern->reuseport_id)) - /* There is a small race between adding the - * sk to the map and setting the - * reuse_kern->reuseport_id. - * Treat it as the sk has not been added to - * the bpf map yet. - */ - return -ENOENT; - - sk = reuse_kern->sk; if (sk->sk_protocol != selected_sk->sk_protocol) return -EPROTOTYPE; else if (sk->sk_family != selected_sk->sk_family) @@ -8937,4 +9498,320 @@ const struct bpf_verifier_ops sk_reuseport_verifier_ops = { const struct bpf_prog_ops sk_reuseport_prog_ops = { }; + +DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled); +EXPORT_SYMBOL(bpf_sk_lookup_enabled); + +BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, + struct sock *, sk, u64, flags) +{ + if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE | + BPF_SK_LOOKUP_F_NO_REUSEPORT))) + return -EINVAL; + if (unlikely(sk && sk_is_refcounted(sk))) + return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */ + if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED)) + return -ESOCKTNOSUPPORT; /* reject connected sockets */ + + /* Check if socket is suitable for packet L3/L4 protocol */ + if (sk && sk->sk_protocol != ctx->protocol) + return -EPROTOTYPE; + if (sk && sk->sk_family != ctx->family && + (sk->sk_family == AF_INET || ipv6_only_sock(sk))) + return -EAFNOSUPPORT; + + if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE)) + return -EEXIST; + + /* Select socket as lookup result */ + ctx->selected_sk = sk; + ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT; + return 0; +} + +static const struct bpf_func_proto bpf_sk_lookup_assign_proto = { + .func = bpf_sk_lookup_assign, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL, + .arg3_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto * +sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_perf_event_output: + return &bpf_event_output_data_proto; + case BPF_FUNC_sk_assign: + return &bpf_sk_lookup_assign_proto; + case BPF_FUNC_sk_release: + return &bpf_sk_release_proto; + default: + return bpf_base_func_proto(func_id); + } +} + +static bool sk_lookup_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(struct bpf_sk_lookup)) + return false; + if (off % size != 0) + return false; + if (type != BPF_READ) + return false; + + switch (off) { + case offsetof(struct bpf_sk_lookup, sk): + info->reg_type = PTR_TO_SOCKET_OR_NULL; + return size == sizeof(__u64); + + case bpf_ctx_range(struct bpf_sk_lookup, family): + case bpf_ctx_range(struct bpf_sk_lookup, protocol): + case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4): + case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): + case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): + case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): + case bpf_ctx_range(struct bpf_sk_lookup, remote_port): + case bpf_ctx_range(struct bpf_sk_lookup, local_port): + bpf_ctx_record_field_size(info, sizeof(__u32)); + return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); + + default: + return false; + } +} + +static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + struct bpf_insn *insn = insn_buf; + + switch (si->off) { + case offsetof(struct bpf_sk_lookup, sk): + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, selected_sk)); + break; + + case offsetof(struct bpf_sk_lookup, family): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + family, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, protocol): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + protocol, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, remote_ip4): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + v4.saddr, 4, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, local_ip4): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + v4.daddr, 4, target_size)); + break; + + case bpf_ctx_range_till(struct bpf_sk_lookup, + remote_ip6[0], remote_ip6[3]): { +#if IS_ENABLED(CONFIG_IPV6) + int off = si->off; + + off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]); + off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, v6.saddr)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + } + case bpf_ctx_range_till(struct bpf_sk_lookup, + local_ip6[0], local_ip6[3]): { +#if IS_ENABLED(CONFIG_IPV6) + int off = si->off; + + off -= offsetof(struct bpf_sk_lookup, local_ip6[0]); + off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, v6.daddr)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + } + case offsetof(struct bpf_sk_lookup, remote_port): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + sport, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, local_port): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + dport, 2, target_size)); + break; + } + + return insn - insn_buf; +} + +const struct bpf_prog_ops sk_lookup_prog_ops = { +}; + +const struct bpf_verifier_ops sk_lookup_verifier_ops = { + .get_func_proto = sk_lookup_func_proto, + .is_valid_access = sk_lookup_is_valid_access, + .convert_ctx_access = sk_lookup_convert_ctx_access, +}; + #endif /* CONFIG_INET */ + +DEFINE_BPF_DISPATCHER(bpf_dispatcher_xdp) + +void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) +{ + bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(bpf_dispatcher_xdp), + prev_prog, prog); +} + +#ifdef CONFIG_DEBUG_INFO_BTF +BTF_ID_LIST_GLOBAL(btf_sock_ids) +#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +#else +u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; +#endif + +static bool check_arg_btf_id(u32 btf_id, u32 arg) +{ + int i; + + /* only one argument, no need to check arg */ + for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) + if (btf_sock_ids[i] == btf_id) + return true; + return false; +} + +BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) +{ + /* tcp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct tcp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && + sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { + .func = bpf_skc_to_tcp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], +}; + +BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) +{ + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { + .func = bpf_skc_to_tcp_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], +}; + +BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { + .func = bpf_skc_to_tcp_timewait_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], +}; + +BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { + .func = bpf_skc_to_tcp_request_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], +}; + +BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) +{ + /* udp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct udp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && + sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { + .func = bpf_skc_to_udp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], +}; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 96b2566c298d..5a9dd2099236 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -31,8 +31,7 @@ #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_labels.h> #endif - -static DEFINE_MUTEX(flow_dissector_mutex); +#include <linux/bpf-netns.h> static void dissector_set_key(struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) @@ -70,83 +69,37 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, } EXPORT_SYMBOL(skb_flow_dissector_init); -int skb_flow_dissector_prog_query(const union bpf_attr *attr, - union bpf_attr __user *uattr) +#ifdef CONFIG_BPF_SYSCALL +int flow_dissector_bpf_prog_attach_check(struct net *net, + struct bpf_prog *prog) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); - u32 prog_id, prog_cnt = 0, flags = 0; - struct bpf_prog *attached; - struct net *net; + enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; - if (attr->query.query_flags) - return -EINVAL; + if (net == &init_net) { + /* BPF flow dissector in the root namespace overrides + * any per-net-namespace one. When attaching to root, + * make sure we don't have any BPF program attached + * to the non-root namespaces. + */ + struct net *ns; - net = get_net_ns_by_fd(attr->query.target_fd); - if (IS_ERR(net)) - return PTR_ERR(net); - - rcu_read_lock(); - attached = rcu_dereference(net->flow_dissector_prog); - if (attached) { - prog_cnt = 1; - prog_id = attached->aux->id; + for_each_net(ns) { + if (ns == &init_net) + continue; + if (rcu_access_pointer(ns->bpf.run_array[type])) + return -EEXIST; + } + } else { + /* Make sure root flow dissector is not attached + * when attaching to the non-root namespace. + */ + if (rcu_access_pointer(init_net.bpf.run_array[type])) + return -EEXIST; } - rcu_read_unlock(); - - put_net(net); - - if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) - return -EFAULT; - if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) - return -EFAULT; - - if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) - return 0; - - if (copy_to_user(prog_ids, &prog_id, sizeof(u32))) - return -EFAULT; return 0; } -int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, - struct bpf_prog *prog) -{ - struct bpf_prog *attached; - struct net *net; - - net = current->nsproxy->net_ns; - mutex_lock(&flow_dissector_mutex); - attached = rcu_dereference_protected(net->flow_dissector_prog, - lockdep_is_held(&flow_dissector_mutex)); - if (attached) { - /* Only one BPF program can be attached at a time */ - mutex_unlock(&flow_dissector_mutex); - return -EEXIST; - } - rcu_assign_pointer(net->flow_dissector_prog, prog); - mutex_unlock(&flow_dissector_mutex); - return 0; -} - -int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) -{ - struct bpf_prog *attached; - struct net *net; - - net = current->nsproxy->net_ns; - mutex_lock(&flow_dissector_mutex); - attached = rcu_dereference_protected(net->flow_dissector_prog, - lockdep_is_held(&flow_dissector_mutex)); - if (!attached) { - mutex_unlock(&flow_dissector_mutex); - return -ENOENT; - } - RCU_INIT_POINTER(net->flow_dissector_prog, NULL); - bpf_prog_put(attached); - mutex_unlock(&flow_dissector_mutex); - return 0; -} /** * skb_flow_get_be16 - extract be16 entity * @skb: sk_buff to extract from @@ -169,6 +122,8 @@ static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, return 0; } +#endif /* CONFIG_BPF_SYSCALL */ + /** * __skb_flow_get_ports - extract the upper layer ports and return them * @skb: sk_buff to extract the ports from @@ -887,7 +842,6 @@ bool __skb_flow_dissect(const struct net *net, struct flow_dissector_key_icmp *key_icmp; struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_vlan *key_vlan; - struct bpf_prog *attached = NULL; enum flow_dissect_ret fdret; enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; int num_hdrs = 0; @@ -941,10 +895,15 @@ bool __skb_flow_dissect(const struct net *net, WARN_ON_ONCE(!net); if (net) { - rcu_read_lock(); - attached = rcu_dereference(net->flow_dissector_prog); + enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; + struct bpf_prog_array *run_array; - if (attached) { + rcu_read_lock(); + run_array = rcu_dereference(init_net.bpf.run_array[type]); + if (!run_array) + run_array = rcu_dereference(net->bpf.run_array[type]); + + if (run_array) { struct bpf_flow_keys flow_keys; struct bpf_flow_dissector ctx = { .flow_keys = &flow_keys, @@ -952,6 +911,7 @@ bool __skb_flow_dissect(const struct net *net, .data_end = data + hlen, }; __be16 n_proto = proto; + struct bpf_prog *prog; if (skb) { ctx.skb = skb; @@ -962,7 +922,8 @@ bool __skb_flow_dissect(const struct net *net, n_proto = skb->protocol; } - ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff, + prog = READ_ONCE(run_array->items[0].prog); + ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, hlen, flags); __skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container); @@ -1012,6 +973,9 @@ proto_again: key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; } + __skb_flow_dissect_ipv4(skb, flow_dissector, + target_container, data, iph); + if (ip_is_fragment(iph)) { key_control->flags |= FLOW_DIS_IS_FRAGMENT; @@ -1028,9 +992,6 @@ proto_again: } } - __skb_flow_dissect_ipv4(skb, flow_dissector, - target_container, data, iph); - break; } case htons(ETH_P_IPV6): { @@ -1761,5 +1722,4 @@ static int __init init_default_flow_dissectors(void) ARRAY_SIZE(flow_keys_basic_dissector_keys)); return 0; } - core_initcall(init_default_flow_dissectors); diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index bfe7bdd4c340..98c396769be9 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t) u64 rate, brate; est_fetch_counters(est, &b); - brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log); - brate -= (est->avbps >> est->ewma_log); + brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log); + brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log); - rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log); - rate -= (est->avpps >> est->ewma_log); + rate = (u64)(b.packets - est->last_packets) << (10 - est->intvl_log); + rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log); write_seqcount_begin(&est->seq); est->avbps += brate; @@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, if (parm->interval < -2 || parm->interval > 3) return -EINVAL; + if (parm->ewma_log == 0 || parm->ewma_log >= 31) + return -EINVAL; + est = kzalloc(sizeof(*est), GFP_KERNEL); if (!est) return -ENOBUFS; diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 99a6de52b21d..a5502c5aa44e 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -39,12 +39,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, { int ret; - /* Preempt disable is needed to protect per-cpu redirect_info between - * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and - * access to maps strictly require a rcu_read_lock() for protection, - * mixing with BH RCU lock doesn't work. + /* Preempt disable and BH disable are needed to protect per-cpu + * redirect_info between BPF prog and skb_do_redirect(). */ preempt_disable(); + local_bh_disable(); bpf_compute_data_pointers(skb); ret = bpf_prog_run_save_cb(lwt->prog, skb); @@ -78,6 +77,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, break; } + local_bh_enable(); preempt_enable(); return ret; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 920784a9b7ff..6635b83113f8 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -235,6 +235,8 @@ static int neigh_forced_gc(struct neigh_table *tbl) write_lock(&n->lock); if ((n->nud_state == NUD_FAILED) || + (tbl->is_multicast && + tbl->is_multicast(n->primary_key)) || time_after(tref, n->updated)) remove = true; write_unlock(&n->lock); @@ -1242,13 +1244,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, old = neigh->nud_state; err = -EPERM; + if (neigh->dead) { + NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); + new = old; + goto out; + } if (!(flags & NEIGH_UPDATE_F_ADMIN) && (old & (NUD_NOARP | NUD_PERMANENT))) goto out; - if (neigh->dead) { - NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); - goto out; - } ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); @@ -1376,7 +1379,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, * we can reinject the packet there. */ n2 = NULL; - if (dst) { + if (dst && dst->obsolete != DST_OBSOLETE_DEAD) { n2 = dst_neigh_lookup_skb(dst, skb); if (n2) n1 = n2; @@ -1954,6 +1957,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, NEIGH_UPDATE_F_OVERRIDE_ISROUTER); } + if (protocol) + neigh->protocol = protocol; + if (ndm->ndm_flags & NTF_EXT_LEARNED) flags |= NEIGH_UPDATE_F_EXT_LEARNED; @@ -1967,9 +1973,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, NETLINK_CB(skb).portid, extack); - if (protocol) - neigh->protocol = protocol; - neigh_release(neigh); out: @@ -3290,6 +3293,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return per_cpu_ptr(tbl->stats, cpu); } + (*pos)++; return NULL; } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4c826b8bf9b1..98474d85fb51 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1036,7 +1036,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) trans_timeout = queue->trans_timeout; spin_unlock_irq(&queue->_xmit_lock); - return sprintf(buf, "%lu", trans_timeout); + return sprintf(buf, fmt_ulong, trans_timeout); } static unsigned int get_netdev_queue_index(struct netdev_queue *queue) @@ -1235,8 +1235,8 @@ static const struct attribute_group dql_group = { static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf) { + int cpu, len, ret, num_tc = 1, tc = 0; struct net_device *dev = queue->dev; - int cpu, len, num_tc = 1, tc = 0; struct xps_dev_maps *dev_maps; cpumask_var_t mask; unsigned long index; @@ -1246,22 +1246,31 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, index = get_netdev_queue_index(queue); + if (!rtnl_trylock()) + return restart_syscall(); + if (dev->num_tc) { /* Do not allow XPS on subordinate device directly */ num_tc = dev->num_tc; - if (num_tc < 0) - return -EINVAL; + if (num_tc < 0) { + ret = -EINVAL; + goto err_rtnl_unlock; + } /* If queue belongs to subordinate dev use its map */ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; tc = netdev_txq_to_tc(dev, index); - if (tc < 0) - return -EINVAL; + if (tc < 0) { + ret = -EINVAL; + goto err_rtnl_unlock; + } } - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) - return -ENOMEM; + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { + ret = -ENOMEM; + goto err_rtnl_unlock; + } rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_cpus_map); @@ -1284,9 +1293,15 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, } rcu_read_unlock(); + rtnl_unlock(); + len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); free_cpumask_var(mask); return len < PAGE_SIZE ? len : -EINVAL; + +err_rtnl_unlock: + rtnl_unlock(); + return ret; } static ssize_t xps_cpus_store(struct netdev_queue *queue, @@ -1314,7 +1329,13 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue, return err; } + if (!rtnl_trylock()) { + free_cpumask_var(mask); + return restart_syscall(); + } + err = netif_set_xps_queue(dev, mask, index); + rtnl_unlock(); free_cpumask_var(mask); @@ -1326,22 +1347,29 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) { + int j, len, ret, num_tc = 1, tc = 0; struct net_device *dev = queue->dev; struct xps_dev_maps *dev_maps; unsigned long *mask, index; - int j, len, num_tc = 1, tc = 0; index = get_netdev_queue_index(queue); + if (!rtnl_trylock()) + return restart_syscall(); + if (dev->num_tc) { num_tc = dev->num_tc; tc = netdev_txq_to_tc(dev, index); - if (tc < 0) - return -EINVAL; + if (tc < 0) { + ret = -EINVAL; + goto err_rtnl_unlock; + } } mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); - if (!mask) - return -ENOMEM; + if (!mask) { + ret = -ENOMEM; + goto err_rtnl_unlock; + } rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_rxqs_map); @@ -1367,10 +1395,16 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) out_no_maps: rcu_read_unlock(); + rtnl_unlock(); + len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues); bitmap_free(mask); return len < PAGE_SIZE ? len : -EINVAL; + +err_rtnl_unlock: + rtnl_unlock(); + return ret; } static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, @@ -1396,10 +1430,17 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, return err; } + if (!rtnl_trylock()) { + bitmap_free(mask); + return restart_syscall(); + } + cpus_read_lock(); err = __netif_set_xps_queue(dev, mask, index, true); cpus_read_unlock(); + rtnl_unlock(); + bitmap_free(mask); return err ? : len; } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 39402840025e..2d135b03b18b 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -19,6 +19,7 @@ #include <linux/net_namespace.h> #include <linux/sched/task.h> #include <linux/uidgid.h> +#include <linux/cookie.h> #include <net/sock.h> #include <net/netlink.h> @@ -69,6 +70,8 @@ EXPORT_SYMBOL_GPL(pernet_ops_rwsem); static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; +DEFINE_COOKIE(net_cookie); + static struct net_generic *net_alloc_generic(void) { struct net_generic *ng; @@ -325,6 +328,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) refcount_set(&net->count, 1); refcount_set(&net->passive, 1); get_random_bytes(&net->hash_mix, sizeof(u32)); + preempt_disable(); + net->net_cookie = gen_cookie_next(&net_cookie); + preempt_enable(); net->dev_base_seq = 1; net->user_ns = user_ns; idr_init(&net->netns_ids); diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index b4c87fe31be2..41b24cd31562 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -127,10 +127,8 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, cs->classid = (u32)value; css_task_iter_start(css, 0, &it); - while ((p = css_task_iter_next(&it))) { + while ((p = css_task_iter_next(&it))) update_classid_task(p, cs->classid); - cond_resched(); - } css_task_iter_end(&it); return 0; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 849380a622ef..78bbb912e502 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/export.h> #include <linux/if_vlan.h> +#include <net/dsa.h> #include <net/tcp.h> #include <net/udp.h> #include <net/addrconf.h> @@ -161,7 +162,7 @@ static void poll_napi(struct net_device *dev) struct napi_struct *napi; int cpu = smp_processor_id(); - list_for_each_entry(napi, &dev->napi_list, dev_list) { + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { poll_one_napi(napi); smp_store_release(&napi->poll_owner, -1); @@ -637,15 +638,15 @@ EXPORT_SYMBOL_GPL(__netpoll_setup); int netpoll_setup(struct netpoll *np) { - struct net_device *ndev = NULL; + struct net_device *ndev = NULL, *dev = NULL; + struct net *net = current->nsproxy->net_ns; struct in_device *in_dev; int err; rtnl_lock(); - if (np->dev_name[0]) { - struct net *net = current->nsproxy->net_ns; + if (np->dev_name[0]) ndev = __dev_get_by_name(net, np->dev_name); - } + if (!ndev) { np_err(np, "%s doesn't exist, aborting\n", np->dev_name); err = -ENODEV; @@ -653,6 +654,19 @@ int netpoll_setup(struct netpoll *np) } dev_hold(ndev); + /* bring up DSA management network devices up first */ + for_each_netdev(net, dev) { + if (!netdev_uses_dsa(dev)) + continue; + + err = dev_change_flags(dev, dev->flags | IFF_UP, NULL); + if (err < 0) { + np_err(np, "%s failed to open %s\n", + np->dev_name, dev->name); + goto put; + } + } + if (netdev_master_upper_dev_get(ndev)) { np_err(np, "%s is a slave device, aborting\n", np->dev_name); err = -EBUSY; diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 256b7954b720..8618242c677a 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -236,6 +236,8 @@ static void net_prio_attach(struct cgroup_taskset *tset) struct task_struct *p; struct cgroup_subsys_state *css; + cgroup_sk_alloc_disable(); + cgroup_taskset_for_each(p, css, tset) { void *v = (void *)(unsigned long)css->cgroup->id; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index cb3b565ff5ad..1d20dd70879b 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3465,7 +3465,7 @@ static int pktgen_thread_worker(void *arg) struct pktgen_dev *pkt_dev = NULL; int cpu = t->cpu; - BUG_ON(smp_processor_id() != cpu); + WARN_ON(smp_processor_id() != cpu); init_waitqueue_head(&t->queue); complete(&t->start_done); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 944acb1a9f29..73c09b5864d7 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2471,7 +2471,7 @@ static int do_setlink(const struct sk_buff *skb, sa->sa_family = dev->type; memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), dev->addr_len); - err = dev_set_mac_address(dev, sa, extack); + err = dev_set_mac_address_user(dev, sa, extack); kfree(sa); if (err) goto errout; @@ -3231,7 +3231,8 @@ replay: */ if (err < 0) { /* If device is not registered at all, free it now */ - if (dev->reg_state == NETREG_UNINITIALIZED) + if (dev->reg_state == NETREG_UNINITIALIZED || + dev->reg_state == NETREG_UNREGISTERED) free_netdev(dev); goto out; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 13f916ae0d29..407761f1debc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -431,7 +431,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, len += NET_SKB_PAD; - if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || + /* If requested length is either too small or too big, + * we use kmalloc() for skb->head allocation. + */ + if (len <= SKB_WITH_OVERHEAD(1024) || + len > SKB_WITH_OVERHEAD(PAGE_SIZE) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA)) || !netdev_pagefrag_enabled) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); @@ -497,13 +501,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb); struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, gfp_t gfp_mask) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; struct sk_buff *skb; void *data; len += NET_SKB_PAD + NET_IP_ALIGN; - if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || + /* If requested length is either too small or too big, + * we use kmalloc() for skb->head allocation. + */ + if (len <= SKB_WITH_OVERHEAD(1024) || + len > SKB_WITH_OVERHEAD(PAGE_SIZE) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) @@ -511,6 +519,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, goto skb_success; } + nc = this_cpu_ptr(&napi_alloc_cache); len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); len = SKB_DATA_ALIGN(len); @@ -689,7 +698,7 @@ EXPORT_SYMBOL(__kfree_skb); * Drop a reference to the buffer and free it if the usage count has * hit zero. */ -void kfree_skb(struct sk_buff *skb) +void __fix_address kfree_skb(struct sk_buff *skb) { if (!skb_unref(skb)) return; @@ -2018,6 +2027,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) skb->csum = csum_block_sub(skb->csum, skb_checksum(skb, len, delta, 0), len); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; + int offset = skb_checksum_start_offset(skb) + skb->csum_offset; + + if (offset + sizeof(__sum16) > hdlen) + return -EINVAL; } return __pskb_trim(skb, len); } @@ -3276,7 +3291,19 @@ EXPORT_SYMBOL(skb_split); */ static int skb_prepare_for_shift(struct sk_buff *skb) { - return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + int ret = 0; + + if (skb_cloned(skb)) { + /* Save and restore truesize: pskb_expand_head() may reallocate + * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we + * cannot change truesize at this point. + */ + unsigned int save_truesize = skb->truesize; + + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + skb->truesize = save_truesize; + } + return ret; } /** @@ -4453,7 +4480,7 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk) if (skb && (skb_next = skb_peek(q))) { icmp_next = is_icmp_err_skb(skb_next); if (icmp_next) - sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; + sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; } spin_unlock_irqrestore(&q->lock, flags); @@ -5318,8 +5345,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; - - if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) + /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; @@ -5516,7 +5543,7 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, lse->label_stack_entry = mpls_lse; skb_postpush_rcsum(skb, lse, MPLS_HLEN); - if (ethernet) + if (ethernet && mac_len >= ETH_HLEN) skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); skb->protocol = mpls_proto; @@ -5556,7 +5583,7 @@ int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); - if (ethernet) { + if (ethernet && mac_len >= ETH_HLEN) { struct ethhdr *hdr; /* use mpls_hdr() to get ethertype to account for VLANs. */ @@ -5619,6 +5646,9 @@ int skb_mpls_dec_ttl(struct sk_buff *skb) if (unlikely(!eth_p_mpls(skb->protocol))) return -EINVAL; + if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) + return -ENOMEM; + lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; if (!--ttl) @@ -5883,9 +5913,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); - if (k == 0) { - /* split line is in frag list */ - pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); + /* split line is in frag list */ + if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { + /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ + if (skb_has_frag_list(skb)) + kfree_skb_list(skb_shinfo(skb)->frag_list); + kfree(data); + return -ENOMEM; } skb_release_data(skb); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index ded2d5227678..c839d9b1ae48 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -7,6 +7,7 @@ #include <net/sock.h> #include <net/tcp.h> +#include <net/tls.h> static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) { @@ -169,10 +170,12 @@ static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i, struct scatterlist *sge = sk_msg_elem(msg, i); u32 len = sge->length; - if (charge) - sk_mem_uncharge(sk, len); - if (!msg->skb) + /* When the skb owns the memory we free it from consume_skb path. */ + if (!msg->skb) { + if (charge) + sk_mem_uncharge(sk, len); put_page(sg_page(sge)); + } memset(sge, 0, sizeof(*sge)); return len; } @@ -396,28 +399,38 @@ out: } EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); -static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) +static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, + struct sk_buff *skb) { - struct sock *sk = psock->sk; - int copied = 0, num_sge; struct sk_msg *msg; + if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) + return NULL; + + if (!sk_rmem_schedule(sk, skb, skb->truesize)) + return NULL; + msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); if (unlikely(!msg)) - return -EAGAIN; - if (!sk_rmem_schedule(sk, skb, skb->len)) { - kfree(msg); - return -EAGAIN; - } + return NULL; sk_msg_init(msg); - num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); + return msg; +} + +static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, + struct sk_psock *psock, + struct sock *sk, + struct sk_msg *msg) +{ + int num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); + int copied; + if (unlikely(num_sge < 0)) { kfree(msg); return num_sge; } - sk_mem_charge(sk, skb->len); copied = skb->len; msg->sg.start = 0; msg->sg.size = copied; @@ -429,6 +442,40 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) return copied; } +static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) +{ + struct sock *sk = psock->sk; + struct sk_msg *msg; + + msg = sk_psock_create_ingress_msg(sk, skb); + if (!msg) + return -EAGAIN; + + /* This will transition ownership of the data from the socket where + * the BPF program was run initiating the redirect to the socket + * we will eventually receive this data on. The data will be released + * from skb_consume found in __tcp_bpf_recvmsg() after its been copied + * into user buffers. + */ + skb_set_owner_r(skb, sk); + return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); +} + +/* Puts an skb on the ingress queue of the socket already assigned to the + * skb. In this case we do not need to check memory limits or skb_set_owner_r + * because the skb is already accounted for here. + */ +static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb) +{ + struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); + struct sock *sk = psock->sk; + + if (unlikely(!msg)) + return -EAGAIN; + sk_msg_init(msg); + return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); +} + static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len, bool ingress) { @@ -512,7 +559,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); refcount_set(&psock->refcnt, 1); - rcu_assign_sk_user_data(sk, psock); + rcu_assign_sk_user_data_nocopy(sk, psock); sock_hold(sk); return psock; @@ -686,13 +733,76 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp) return container_of(parser, struct sk_psock, parser); } -static void sk_psock_verdict_apply(struct sk_psock *psock, - struct sk_buff *skb, int verdict) +static void sk_psock_skb_redirect(struct sk_buff *skb) { struct sk_psock *psock_other; struct sock *sk_other; bool ingress; + sk_other = tcp_skb_bpf_redirect_fetch(skb); + if (unlikely(!sk_other)) { + kfree_skb(skb); + return; + } + psock_other = sk_psock(sk_other); + if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || + !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { + kfree_skb(skb); + return; + } + + ingress = tcp_skb_bpf_ingress(skb); + if ((!ingress && sock_writeable(sk_other)) || + (ingress && + atomic_read(&sk_other->sk_rmem_alloc) <= + sk_other->sk_rcvbuf)) { + if (!ingress) + skb_set_owner_w(skb, sk_other); + skb_queue_tail(&psock_other->ingress_skb, skb); + schedule_work(&psock_other->work); + } else { + kfree_skb(skb); + } +} + +static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict) +{ + switch (verdict) { + case __SK_REDIRECT: + sk_psock_skb_redirect(skb); + break; + case __SK_PASS: + case __SK_DROP: + default: + break; + } +} + +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) +{ + struct bpf_prog *prog; + int ret = __SK_PASS; + + rcu_read_lock(); + prog = READ_ONCE(psock->progs.skb_verdict); + if (likely(prog)) { + tcp_skb_bpf_redirect_clear(skb); + ret = sk_psock_bpf_run(psock, prog, skb); + ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); + } + sk_psock_tls_verdict_apply(skb, ret); + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); + +static void sk_psock_verdict_apply(struct sk_psock *psock, + struct sk_buff *skb, int verdict) +{ + struct tcp_skb_cb *tcp; + struct sock *sk_other; + int err = -EIO; + switch (verdict) { case __SK_PASS: sk_other = psock->sk; @@ -700,36 +810,27 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { goto out_free; } - if (atomic_read(&sk_other->sk_rmem_alloc) <= - sk_other->sk_rcvbuf) { - struct tcp_skb_cb *tcp = TCP_SKB_CB(skb); - tcp->bpf.flags |= BPF_F_INGRESS; + tcp = TCP_SKB_CB(skb); + tcp->bpf.flags |= BPF_F_INGRESS; + + /* If the queue is empty then we can submit directly + * into the msg queue. If its not empty we have to + * queue work otherwise we may get OOO data. Otherwise, + * if sk_psock_skb_ingress errors will be handled by + * retrying later from workqueue. + */ + if (skb_queue_empty(&psock->ingress_skb)) { + err = sk_psock_skb_ingress_self(psock, skb); + } + if (err < 0) { skb_queue_tail(&psock->ingress_skb, skb); schedule_work(&psock->work); - break; } - goto out_free; + break; case __SK_REDIRECT: - sk_other = tcp_skb_bpf_redirect_fetch(skb); - if (unlikely(!sk_other)) - goto out_free; - psock_other = sk_psock(sk_other); - if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || - !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) - goto out_free; - ingress = tcp_skb_bpf_ingress(skb); - if ((!ingress && sock_writeable(sk_other)) || - (ingress && - atomic_read(&sk_other->sk_rmem_alloc) <= - sk_other->sk_rcvbuf)) { - if (!ingress) - skb_set_owner_w(skb, sk_other); - skb_queue_tail(&psock_other->ingress_skb, skb); - schedule_work(&psock_other->work); - break; - } - /* fall-through */ + sk_psock_skb_redirect(skb); + break; case __SK_DROP: /* fall-through */ default: @@ -740,11 +841,18 @@ out_free: static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) { - struct sk_psock *psock = sk_psock_from_strp(strp); + struct sk_psock *psock; struct bpf_prog *prog; int ret = __SK_DROP; + struct sock *sk; rcu_read_lock(); + sk = strp->sk; + psock = sk_psock(sk); + if (unlikely(!psock)) { + kfree_skb(skb); + goto out; + } prog = READ_ONCE(psock->progs.skb_verdict); if (likely(prog)) { skb_orphan(skb); @@ -752,8 +860,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); } - rcu_read_unlock(); sk_psock_verdict_apply(psock, skb, ret); +out: + rcu_read_unlock(); } static int sk_psock_strp_read_done(struct strparser *strp, int err) @@ -783,9 +892,13 @@ static void sk_psock_strp_data_ready(struct sock *sk) rcu_read_lock(); psock = sk_psock(sk); if (likely(psock)) { - write_lock_bh(&sk->sk_callback_lock); - strp_data_ready(&psock->parser.strp); - write_unlock_bh(&sk->sk_callback_lock); + if (tls_sw_has_ctx_rx(sk)) { + psock->parser.saved_data_ready(sk); + } else { + write_lock_bh(&sk->sk_callback_lock); + strp_data_ready(&psock->parser.strp); + write_unlock_bh(&sk->sk_callback_lock); + } } rcu_read_unlock(); } diff --git a/net/core/sock.c b/net/core/sock.c index 0adf7a9e5a90..6cfe52e61777 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -281,6 +281,9 @@ int sysctl_tstamp_allow_data __read_mostly = 1; DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); EXPORT_SYMBOL_GPL(memalloc_socks_key); +int sysctl_forced_caps_enabled __read_mostly = 0; +EXPORT_SYMBOL(sysctl_forced_caps_enabled); + /** * sk_set_memalloc - sets %SOCK_MEMALLOC * @sk: socket to set it on @@ -709,7 +712,7 @@ bool sk_mc_loop(struct sock *sk) return inet6_sk(sk)->mc_loop; #endif } - WARN_ON(1); + WARN_ON_ONCE(1); return true; } EXPORT_SYMBOL(sk_mc_loop); @@ -923,13 +926,10 @@ set_rcvbuf: } else { sock_reset_flag(sk, SOCK_RCVTSTAMP); sock_reset_flag(sk, SOCK_RCVTSTAMPNS); - sock_reset_flag(sk, SOCK_TSTAMP_NEW); } break; case SO_TIMESTAMPING_NEW: - sock_set_flag(sk, SOCK_TSTAMP_NEW); - /* fall through */ case SO_TIMESTAMPING_OLD: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; @@ -958,16 +958,14 @@ set_rcvbuf: } sk->sk_tsflags = val; + sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); + if (val & SOF_TIMESTAMPING_RX_SOFTWARE) sock_enable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); - else { - if (optname == SO_TIMESTAMPING_NEW) - sock_reset_flag(sk, SOCK_TSTAMP_NEW); - + else sock_disable_timestamp(sk, (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); - } break; case SO_RCVLOWAT: @@ -1110,7 +1108,7 @@ set_rcvbuf: case SO_MAX_PACING_RATE: { - unsigned long ulval = (val == ~0U) ? ~0UL : val; + unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val; if (sizeof(ulval) != sizeof(val) && optlen >= sizeof(ulval) && @@ -1419,6 +1417,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_mark; break; + case SO_MARK2: + v.val = sk->sk_mark2; + break; + case SO_RXQ_OVFL: v.val = sock_flag(sk, SOCK_RXQ_OVFL); break; @@ -1527,6 +1529,21 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_bound_dev_if; break; + case SO_NETNS_COOKIE: + lv = sizeof(u64); + if (len != lv) + return -EINVAL; + v.val64 = sock_net(sk)->net_cookie; + break; + + case SO_TVPC_INFO: + if (len > sizeof(sk->sk_tvpc_info)) + len = sizeof(sk->sk_tvpc_info); + + if (copy_to_user(optval, &sk->sk_tvpc_info, len)) + return -EFAULT; + goto lenout; + default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). @@ -1574,13 +1591,14 @@ static inline void sock_lock_init(struct sock *sk) */ static void sock_copy(struct sock *nsk, const struct sock *osk) { + const struct proto *prot = READ_ONCE(osk->sk_prot); #ifdef CONFIG_SECURITY_NETWORK void *sptr = nsk->sk_security; #endif memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, - osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); + prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); #ifdef CONFIG_SECURITY_NETWORK nsk->sk_security = sptr; @@ -1679,6 +1697,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, cgroup_sk_alloc(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data); + sk_tx_queue_clear(sk); } return sk; @@ -1794,16 +1813,17 @@ static void sk_init_common(struct sock *sk) */ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) { + struct proto *prot = READ_ONCE(sk->sk_prot); struct sock *newsk; bool is_charged = true; - newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); + newsk = sk_prot_alloc(prot, priority, sk->sk_family); if (newsk != NULL) { struct sk_filter *filter; sock_copy(newsk, sk); - newsk->sk_prot_creator = sk->sk_prot; + newsk->sk_prot_creator = prot; /* SANITY */ if (likely(newsk->sk_net_refcnt)) @@ -1836,7 +1856,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* sk->sk_memcg will be populated at accept() time */ newsk->sk_memcg = NULL; - cgroup_sk_alloc(&newsk->sk_cgrp_data); + cgroup_sk_clone(&newsk->sk_cgrp_data); rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); @@ -1868,6 +1888,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) goto out; } + /* Clear sk_user_data if parent had the pointer tagged + * as not suitable for copying when cloning. + */ + if (sk_user_data_is_nocopy(newsk)) + RCU_INIT_POINTER(newsk->sk_user_data, NULL); + newsk->sk_err = 0; newsk->sk_err_soft = 0; newsk->sk_priority = 0; @@ -1895,6 +1921,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) */ sk_refcnt_debug_inc(newsk); sk_set_socket(newsk, NULL); + sk_tx_queue_clear(newsk); RCU_INIT_POINTER(newsk->sk_wq, NULL); if (newsk->sk_prot->sockets_allocated) @@ -1924,7 +1951,8 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) u32 max_segs = 1; sk_dst_set(sk, dst); - sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; + sk->sk_route_caps = dst->dev->features | + (sysctl_forced_caps_enabled ? sk->sk_route_forced_caps : 0); if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; sk->sk_route_caps &= ~sk->sk_route_nocaps; @@ -2029,16 +2057,10 @@ void skb_orphan_partial(struct sk_buff *skb) if (skb_is_tcp_pure_ack(skb)) return; - if (can_skb_orphan_partial(skb)) { - struct sock *sk = skb->sk; - - if (refcount_inc_not_zero(&sk->sk_refcnt)) { - WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); - skb->destructor = sock_efree; - } - } else { + if (can_skb_orphan_partial(skb)) + skb_set_owner_sk_safe(skb, skb->sk); + else skb_orphan(skb); - } } EXPORT_SYMBOL(skb_orphan_partial); @@ -2065,6 +2087,18 @@ void sock_efree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_efree); +/* Buffer destructor for prefetch/receive path where reference count may + * not be held, e.g. for listen sockets. + */ +#ifdef CONFIG_INET +void sock_pfree(struct sk_buff *skb) +{ + if (sk_is_refcounted(skb->sk)) + sock_gen_put(skb->sk); +} +EXPORT_SYMBOL(sock_pfree); +#endif /* CONFIG_INET */ + kuid_t sock_i_uid(struct sock *sk) { kuid_t uid; @@ -2346,8 +2380,6 @@ static void sk_leave_memory_pressure(struct sock *sk) } } -/* On 32bit arches, an skb frag is limited to 2^15 */ -#define SKB_FRAG_PAGE_ORDER get_order(32768) DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); /** @@ -2734,6 +2766,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct * } EXPORT_SYMBOL(sock_no_mmap); +/* + * When a file is received (via SCM_RIGHTS, etc), we must bump the + * various sock-based usage counts. + */ +void __receive_sock(struct file *file) +{ + struct socket *sock; + int error; + + /* + * The resulting value of "error" is ignored here since we only + * need to take action when the file is a socket and testing + * "sock" for NULL is sufficient. + */ + sock = sock_from_file(file, &error); + if (sock) { + sock_update_netprioidx(&sock->sk->sk_cgrp_data); + sock_update_classid(&sock->sk->sk_cgrp_data); + } +} + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; @@ -3335,6 +3388,16 @@ static void sock_inuse_add(struct net *net, int val) } #endif +static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) +{ + if (!twsk_prot) + return; + kfree(twsk_prot->twsk_slab_name); + twsk_prot->twsk_slab_name = NULL; + kmem_cache_destroy(twsk_prot->twsk_slab); + twsk_prot->twsk_slab = NULL; +} + static void req_prot_cleanup(struct request_sock_ops *rsk_prot) { if (!rsk_prot) @@ -3405,7 +3468,7 @@ int proto_register(struct proto *prot, int alloc_slab) prot->slab_flags, NULL); if (prot->twsk_prot->twsk_slab == NULL) - goto out_free_timewait_sock_slab_name; + goto out_free_timewait_sock_slab; } } @@ -3413,15 +3476,15 @@ int proto_register(struct proto *prot, int alloc_slab) ret = assign_proto_idx(prot); if (ret) { mutex_unlock(&proto_list_mutex); - goto out_free_timewait_sock_slab_name; + goto out_free_timewait_sock_slab; } list_add(&prot->node, &proto_list); mutex_unlock(&proto_list_mutex); return ret; -out_free_timewait_sock_slab_name: +out_free_timewait_sock_slab: if (alloc_slab && prot->twsk_prot) - kfree(prot->twsk_prot->twsk_slab_name); + tw_prot_cleanup(prot->twsk_prot); out_free_request_sock_slab: if (alloc_slab) { req_prot_cleanup(prot->rsk_prot); @@ -3445,12 +3508,7 @@ void proto_unregister(struct proto *prot) prot->slab = NULL; req_prot_cleanup(prot->rsk_prot); - - if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { - kmem_cache_destroy(prot->twsk_prot->twsk_slab); - kfree(prot->twsk_prot->twsk_slab_name); - prot->twsk_prot->twsk_slab = NULL; - } + tw_prot_cleanup(prot->twsk_prot); } EXPORT_SYMBOL(proto_unregister); diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index c13ffbd33d8d..c9c45b935f99 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -11,7 +11,7 @@ #include <linux/tcp.h> #include <linux/workqueue.h> #include <linux/nospec.h> - +#include <linux/cookie.h> #include <linux/inet_diag.h> #include <linux/sock_diag.h> @@ -19,16 +19,17 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); static struct workqueue_struct *broadcast_wq; -static atomic64_t cookie_gen; -u64 sock_gen_cookie(struct sock *sk) +DEFINE_COOKIE(sock_cookie); + +u64 __sock_gen_cookie(struct sock *sk) { while (1) { u64 res = atomic64_read(&sk->sk_cookie); if (res) return res; - res = atomic64_inc_return(&cookie_gen); + res = gen_cookie_next(&sock_cookie); atomic64_cmpxchg(&sk->sk_cookie, 0, res); } } diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 8291568b707f..e96407018401 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -10,6 +10,8 @@ #include <linux/skmsg.h> #include <linux/list.h> #include <linux/jhash.h> +#include <linux/sock_diag.h> +#include <net/udp.h> struct bpf_stab { struct bpf_map map; @@ -31,7 +33,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || + (attr->value_size != sizeof(u32) && + attr->value_size != sizeof(u64)) || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); @@ -71,7 +74,42 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - ret = sock_map_prog_update(map, prog, attr->attach_type); + ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); + fdput(f); + return ret; +} + +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) +{ + u32 ufd = attr->target_fd; + struct bpf_prog *prog; + struct bpf_map *map; + struct fd f; + int ret; + + if (attr->attach_flags) + return -EINVAL; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + prog = bpf_prog_get(attr->attach_bpf_fd); + if (IS_ERR(prog)) { + ret = PTR_ERR(prog); + goto put_map; + } + + if (prog->type != ptype) { + ret = -EINVAL; + goto put_prog; + } + + ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); +put_prog: + bpf_prog_put(prog); +put_map: fdput(f); return ret; } @@ -139,12 +177,58 @@ static void sock_map_unref(struct sock *sk, void *link_raw) } } +static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) +{ + struct proto *prot; + + sock_owned_by_me(sk); + + switch (sk->sk_type) { + case SOCK_STREAM: + prot = tcp_bpf_get_proto(sk, psock); + break; + + case SOCK_DGRAM: + prot = udp_bpf_get_proto(sk, psock); + break; + + default: + return -EINVAL; + } + + if (IS_ERR(prot)) + return PTR_ERR(prot); + + sk_psock_update_proto(sk, psock, prot); + return 0; +} + +static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) +{ + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + if (psock) { + if (sk->sk_prot->close != sock_map_close) { + psock = ERR_PTR(-EBUSY); + goto out; + } + + if (!refcount_inc_not_zero(&psock->refcnt)) + psock = ERR_PTR(-EBUSY); + } +out: + rcu_read_unlock(); + return psock; +} + static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs, struct sock *sk) { struct bpf_prog *msg_parser, *skb_parser, *skb_verdict; - bool skb_progs, sk_psock_is_new = false; struct sk_psock *psock; + bool skb_progs; int ret; skb_verdict = READ_ONCE(progs->skb_verdict); @@ -170,7 +254,7 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs, } } - psock = sk_psock_get_checked(sk); + psock = sock_map_psock_get_checked(sk); if (IS_ERR(psock)) { ret = PTR_ERR(psock); goto out_progs; @@ -189,18 +273,14 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs, ret = -ENOMEM; goto out_progs; } - sk_psock_is_new = true; } if (msg_parser) psock_set_prog(&psock->progs.msg_parser, msg_parser); - if (sk_psock_is_new) { - ret = tcp_bpf_init(sk); - if (ret < 0) - goto out_drop; - } else { - tcp_bpf_reinit(sk); - } + + ret = sock_map_init_proto(sk, psock); + if (ret < 0) + goto out_drop; write_lock_bh(&sk->sk_callback_lock); if (skb_progs && !psock->parser.enabled) { @@ -228,6 +308,27 @@ out: return ret; } +static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk) +{ + struct sk_psock *psock; + int ret; + + psock = sock_map_psock_get_checked(sk); + if (IS_ERR(psock)) + return PTR_ERR(psock); + + if (!psock) { + psock = sk_psock_init(sk, map->numa_node); + if (!psock) + return -ENOMEM; + } + + ret = sock_map_init_proto(sk, psock); + if (ret < 0) + sk_psock_put(sk, psock); + return ret; +} + static void sock_map_free(struct bpf_map *map) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); @@ -277,7 +378,29 @@ static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) static void *sock_map_lookup(struct bpf_map *map, void *key) { - return ERR_PTR(-EOPNOTSUPP); + struct sock *sk; + + sk = __sock_map_lookup_elem(map, *(u32 *)key); + if (!sk || !sk_fullsock(sk)) + return NULL; + if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) + return NULL; + return sk; +} + +static void *sock_map_lookup_sys(struct bpf_map *map, void *key) +{ + struct sock *sk; + + if (map->value_size != sizeof(u64)) + return ERR_PTR(-ENOSPC); + + sk = __sock_map_lookup_elem(map, *(u32 *)key); + if (!sk) + return ERR_PTR(-ENOENT); + + __sock_gen_cookie(sk); + return &sk->sk_cookie; } static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, @@ -336,11 +459,15 @@ static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) return 0; } +static bool sock_map_redirect_allowed(const struct sock *sk) +{ + return sk->sk_state != TCP_LISTEN; +} + static int sock_map_update_common(struct bpf_map *map, u32 idx, struct sock *sk, u64 flags) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - struct inet_connection_sock *icsk = inet_csk(sk); struct sk_psock_link *link; struct sk_psock *psock; struct sock *osk; @@ -351,14 +478,21 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx, return -EINVAL; if (unlikely(idx >= map->max_entries)) return -E2BIG; - if (unlikely(rcu_access_pointer(icsk->icsk_ulp_data))) + if (inet_csk_has_ulp(sk)) return -EINVAL; link = sk_psock_init_link(); if (!link) return -ENOMEM; - ret = sock_map_link(map, &stab->progs, sk); + /* Only sockets we can redirect into/from in BPF need to hold + * refs to parser/verdict progs and have their sk_data_ready + * and sk_write_space callbacks overridden. + */ + if (sock_map_redirect_allowed(sk)) + ret = sock_map_link(map, &stab->progs, sk); + else + ret = sock_map_link_no_progs(map, sk); if (ret < 0) goto out_free; @@ -393,23 +527,52 @@ out_free: static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) { return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || - ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; + ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || + ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; } -static bool sock_map_sk_is_suitable(const struct sock *sk) +static bool sk_is_tcp(const struct sock *sk) { return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; } +static bool sk_is_udp(const struct sock *sk) +{ + return sk->sk_type == SOCK_DGRAM && + sk->sk_protocol == IPPROTO_UDP; +} + +static bool sock_map_sk_is_suitable(const struct sock *sk) +{ + return sk_is_tcp(sk) || sk_is_udp(sk); +} + +static bool sock_map_sk_state_allowed(const struct sock *sk) +{ + if (sk_is_tcp(sk)) + return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); + else if (sk_is_udp(sk)) + return sk_hashed(sk); + + return false; +} + static int sock_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { - u32 ufd = *(u32 *)value; u32 idx = *(u32 *)key; struct socket *sock; struct sock *sk; int ret; + u64 ufd; + + if (map->value_size == sizeof(u64)) + ufd = *(u64 *)value; + else + ufd = *(u32 *)value; + if (ufd > S32_MAX) + return -EINVAL; sock = sockfd_lookup(ufd, &ret); if (!sock) @@ -425,7 +588,7 @@ static int sock_map_update_elem(struct bpf_map *map, void *key, } sock_map_sk_acquire(sk); - if (sk->sk_state != TCP_ESTABLISHED) + if (!sock_map_sk_state_allowed(sk)) ret = -EOPNOTSUPP; else ret = sock_map_update_common(map, idx, sk, flags); @@ -462,13 +625,17 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, struct bpf_map *, map, u32, key, u64, flags) { struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); + struct sock *sk; if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - tcb->bpf.flags = flags; - tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key); - if (!tcb->bpf.sk_redir) + + sk = __sock_map_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + tcb->bpf.flags = flags; + tcb->bpf.sk_redir = sk; return SK_PASS; } @@ -485,12 +652,17 @@ const struct bpf_func_proto bpf_sk_redirect_map_proto = { BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, struct bpf_map *, map, u32, key, u64, flags) { + struct sock *sk; + if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - msg->flags = flags; - msg->sk_redir = __sock_map_lookup_elem(map, key); - if (!msg->sk_redir) + + sk = __sock_map_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + msg->flags = flags; + msg->sk_redir = sk; return SK_PASS; } @@ -504,18 +676,22 @@ const struct bpf_func_proto bpf_msg_redirect_map_proto = { .arg4_type = ARG_ANYTHING, }; +static int sock_map_btf_id; const struct bpf_map_ops sock_map_ops = { .map_alloc = sock_map_alloc, .map_free = sock_map_free, .map_get_next_key = sock_map_get_next_key, + .map_lookup_elem_sys_only = sock_map_lookup_sys, .map_update_elem = sock_map_update_elem, .map_delete_elem = sock_map_delete_elem, .map_lookup_elem = sock_map_lookup, .map_release_uref = sock_map_release_progs, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_stab", + .map_btf_id = &sock_map_btf_id, }; -struct bpf_htab_elem { +struct bpf_shtab_elem { struct rcu_head rcu; u32 hash; struct sock *sk; @@ -523,14 +699,14 @@ struct bpf_htab_elem { u8 key[0]; }; -struct bpf_htab_bucket { +struct bpf_shtab_bucket { struct hlist_head head; raw_spinlock_t lock; }; -struct bpf_htab { +struct bpf_shtab { struct bpf_map map; - struct bpf_htab_bucket *buckets; + struct bpf_shtab_bucket *buckets; u32 buckets_num; u32 elem_size; struct sk_psock_progs progs; @@ -542,17 +718,17 @@ static inline u32 sock_hash_bucket_hash(const void *key, u32 len) return jhash(key, len, 0); } -static struct bpf_htab_bucket *sock_hash_select_bucket(struct bpf_htab *htab, - u32 hash) +static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, + u32 hash) { return &htab->buckets[hash & (htab->buckets_num - 1)]; } -static struct bpf_htab_elem * +static struct bpf_shtab_elem * sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, u32 key_size) { - struct bpf_htab_elem *elem; + struct bpf_shtab_elem *elem; hlist_for_each_entry_rcu(elem, head, node) { if (elem->hash == hash && @@ -565,10 +741,10 @@ sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 key_size = map->key_size, hash; - struct bpf_htab_bucket *bucket; - struct bpf_htab_elem *elem; + struct bpf_shtab_bucket *bucket; + struct bpf_shtab_elem *elem; WARN_ON_ONCE(!rcu_read_lock_held()); @@ -579,8 +755,8 @@ static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) return elem ? elem->sk : NULL; } -static void sock_hash_free_elem(struct bpf_htab *htab, - struct bpf_htab_elem *elem) +static void sock_hash_free_elem(struct bpf_shtab *htab, + struct bpf_shtab_elem *elem) { atomic_dec(&htab->count); kfree_rcu(elem, rcu); @@ -589,9 +765,9 @@ static void sock_hash_free_elem(struct bpf_htab *htab, static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, void *link_raw) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct bpf_htab_elem *elem_probe, *elem = link_raw; - struct bpf_htab_bucket *bucket; + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); + struct bpf_shtab_elem *elem_probe, *elem = link_raw; + struct bpf_shtab_bucket *bucket; WARN_ON_ONCE(!rcu_read_lock_held()); bucket = sock_hash_select_bucket(htab, elem->hash); @@ -613,10 +789,10 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, static int sock_hash_delete_elem(struct bpf_map *map, void *key) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 hash, key_size = map->key_size; - struct bpf_htab_bucket *bucket; - struct bpf_htab_elem *elem; + struct bpf_shtab_bucket *bucket; + struct bpf_shtab_elem *elem; int ret = -ENOENT; hash = sock_hash_bucket_hash(key, key_size); @@ -634,12 +810,12 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) return ret; } -static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab, - void *key, u32 key_size, - u32 hash, struct sock *sk, - struct bpf_htab_elem *old) +static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, + void *key, u32 key_size, + u32 hash, struct sock *sk, + struct bpf_shtab_elem *old) { - struct bpf_htab_elem *new; + struct bpf_shtab_elem *new; if (atomic_inc_return(&htab->count) > htab->map.max_entries) { if (!old) { @@ -663,11 +839,10 @@ static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab, static int sock_hash_update_common(struct bpf_map *map, void *key, struct sock *sk, u64 flags) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct inet_connection_sock *icsk = inet_csk(sk); + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 key_size = map->key_size, hash; - struct bpf_htab_elem *elem, *elem_new; - struct bpf_htab_bucket *bucket; + struct bpf_shtab_elem *elem, *elem_new; + struct bpf_shtab_bucket *bucket; struct sk_psock_link *link; struct sk_psock *psock; int ret; @@ -675,14 +850,21 @@ static int sock_hash_update_common(struct bpf_map *map, void *key, WARN_ON_ONCE(!rcu_read_lock_held()); if (unlikely(flags > BPF_EXIST)) return -EINVAL; - if (unlikely(icsk->icsk_ulp_data)) + if (inet_csk_has_ulp(sk)) return -EINVAL; link = sk_psock_init_link(); if (!link) return -ENOMEM; - ret = sock_map_link(map, &htab->progs, sk); + /* Only sockets we can redirect into/from in BPF need to hold + * refs to parser/verdict progs and have their sk_data_ready + * and sk_write_space callbacks overridden. + */ + if (sock_map_redirect_allowed(sk)) + ret = sock_map_link(map, &htab->progs, sk); + else + ret = sock_map_link_no_progs(map, sk); if (ret < 0) goto out_free; @@ -731,10 +913,17 @@ out_free: static int sock_hash_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { - u32 ufd = *(u32 *)value; struct socket *sock; struct sock *sk; int ret; + u64 ufd; + + if (map->value_size == sizeof(u64)) + ufd = *(u64 *)value; + else + ufd = *(u32 *)value; + if (ufd > S32_MAX) + return -EINVAL; sock = sockfd_lookup(ufd, &ret); if (!sock) @@ -750,7 +939,7 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key, } sock_map_sk_acquire(sk); - if (sk->sk_state != TCP_ESTABLISHED) + if (!sock_map_sk_state_allowed(sk)) ret = -EOPNOTSUPP; else ret = sock_hash_update_common(map, key, sk, flags); @@ -763,8 +952,8 @@ out: static int sock_hash_get_next_key(struct bpf_map *map, void *key, void *key_next) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct bpf_htab_elem *elem, *elem_next; + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); + struct bpf_shtab_elem *elem, *elem_next; u32 hash, key_size = map->key_size; struct hlist_head *head; int i = 0; @@ -778,7 +967,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key, goto find_first_elem; elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)), - struct bpf_htab_elem, node); + struct bpf_shtab_elem, node); if (elem_next) { memcpy(key_next, elem_next->key, key_size); return 0; @@ -790,7 +979,7 @@ find_first_elem: for (; i < htab->buckets_num; i++) { head = &sock_hash_select_bucket(htab, i)->head; elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), - struct bpf_htab_elem, node); + struct bpf_shtab_elem, node); if (elem_next) { memcpy(key_next, elem_next->key, key_size); return 0; @@ -802,7 +991,7 @@ find_first_elem: static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) { - struct bpf_htab *htab; + struct bpf_shtab *htab; int i, err; u64 cost; @@ -810,7 +999,8 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size == 0 || - attr->value_size != 4 || + (attr->value_size != sizeof(u32) && + attr->value_size != sizeof(u64)) || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); if (attr->key_size > MAX_BPF_STACK) @@ -823,25 +1013,29 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) bpf_map_init_from_attr(&htab->map, attr); htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); - htab->elem_size = sizeof(struct bpf_htab_elem) + + htab->elem_size = sizeof(struct bpf_shtab_elem) + round_up(htab->map.key_size, 8); if (htab->buckets_num == 0 || - htab->buckets_num > U32_MAX / sizeof(struct bpf_htab_bucket)) { + htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { err = -EINVAL; goto free_htab; } - cost = (u64) htab->buckets_num * sizeof(struct bpf_htab_bucket) + + cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) + (u64) htab->elem_size * htab->map.max_entries; if (cost >= U32_MAX - PAGE_SIZE) { err = -EINVAL; goto free_htab; } + err = bpf_map_charge_init(&htab->map.memory, cost); + if (err) + goto free_htab; htab->buckets = bpf_map_area_alloc(htab->buckets_num * - sizeof(struct bpf_htab_bucket), + sizeof(struct bpf_shtab_bucket), htab->map.numa_node); if (!htab->buckets) { + bpf_map_charge_finish(&htab->map.memory); err = -ENOMEM; goto free_htab; } @@ -859,9 +1053,10 @@ free_htab: static void sock_hash_free(struct bpf_map *map) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct bpf_htab_bucket *bucket; - struct bpf_htab_elem *elem; + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); + struct bpf_shtab_bucket *bucket; + struct hlist_head unlink_list; + struct bpf_shtab_elem *elem; struct hlist_node *node; int i; @@ -872,13 +1067,32 @@ static void sock_hash_free(struct bpf_map *map) synchronize_rcu(); for (i = 0; i < htab->buckets_num; i++) { bucket = sock_hash_select_bucket(htab, i); - hlist_for_each_entry_safe(elem, node, &bucket->head, node) { - hlist_del_rcu(&elem->node); + + /* We are racing with sock_hash_delete_from_link to + * enter the spin-lock critical section. Every socket on + * the list is still linked to sockhash. Since link + * exists, psock exists and holds a ref to socket. That + * lets us to grab a socket ref too. + */ + raw_spin_lock_bh(&bucket->lock); + hlist_for_each_entry(elem, &bucket->head, node) + sock_hold(elem->sk); + hlist_move_list(&bucket->head, &unlink_list); + raw_spin_unlock_bh(&bucket->lock); + + /* Process removed entries out of atomic context to + * block for socket lock before deleting the psock's + * link to sockhash. + */ + hlist_for_each_entry_safe(elem, node, &unlink_list, node) { + hlist_del(&elem->node); lock_sock(elem->sk); rcu_read_lock(); sock_map_unref(elem->sk, elem); rcu_read_unlock(); release_sock(elem->sk); + sock_put(elem->sk); + sock_hash_free_elem(htab, elem); } } @@ -892,9 +1106,36 @@ static void sock_hash_free(struct bpf_map *map) kfree(htab); } +static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) +{ + struct sock *sk; + + if (map->value_size != sizeof(u64)) + return ERR_PTR(-ENOSPC); + + sk = __sock_hash_lookup_elem(map, key); + if (!sk) + return ERR_PTR(-ENOENT); + + __sock_gen_cookie(sk); + return &sk->sk_cookie; +} + +static void *sock_hash_lookup(struct bpf_map *map, void *key) +{ + struct sock *sk; + + sk = __sock_hash_lookup_elem(map, key); + if (!sk || !sk_fullsock(sk)) + return NULL; + if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) + return NULL; + return sk; +} + static void sock_hash_release_progs(struct bpf_map *map) { - psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs); + psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); } BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, @@ -923,13 +1164,17 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, struct bpf_map *, map, void *, key, u64, flags) { struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); + struct sock *sk; if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - tcb->bpf.flags = flags; - tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key); - if (!tcb->bpf.sk_redir) + + sk = __sock_hash_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + tcb->bpf.flags = flags; + tcb->bpf.sk_redir = sk; return SK_PASS; } @@ -946,12 +1191,17 @@ const struct bpf_func_proto bpf_sk_redirect_hash_proto = { BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, struct bpf_map *, map, void *, key, u64, flags) { + struct sock *sk; + if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; - msg->flags = flags; - msg->sk_redir = __sock_hash_lookup_elem(map, key); - if (!msg->sk_redir) + + sk = __sock_hash_lookup_elem(map, key); + if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + + msg->flags = flags; + msg->sk_redir = sk; return SK_PASS; } @@ -965,15 +1215,19 @@ const struct bpf_func_proto bpf_msg_redirect_hash_proto = { .arg4_type = ARG_ANYTHING, }; +static int sock_hash_map_btf_id; const struct bpf_map_ops sock_hash_ops = { .map_alloc = sock_hash_alloc, .map_free = sock_hash_free, .map_get_next_key = sock_hash_get_next_key, .map_update_elem = sock_hash_update_elem, .map_delete_elem = sock_hash_delete_elem, - .map_lookup_elem = sock_map_lookup, + .map_lookup_elem = sock_hash_lookup, + .map_lookup_elem_sys_only = sock_hash_lookup_sys, .map_release_uref = sock_hash_release_progs, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_shtab", + .map_btf_id = &sock_hash_map_btf_id, }; static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) @@ -982,7 +1236,7 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) case BPF_MAP_TYPE_SOCKMAP: return &container_of(map, struct bpf_stab, map)->progs; case BPF_MAP_TYPE_SOCKHASH: - return &container_of(map, struct bpf_htab, map)->progs; + return &container_of(map, struct bpf_shtab, map)->progs; default: break; } @@ -991,31 +1245,36 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) } int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - u32 which) + struct bpf_prog *old, u32 which) { struct sk_psock_progs *progs = sock_map_progs(map); + struct bpf_prog **pprog; if (!progs) return -EOPNOTSUPP; switch (which) { case BPF_SK_MSG_VERDICT: - psock_set_prog(&progs->msg_parser, prog); + pprog = &progs->msg_parser; break; case BPF_SK_SKB_STREAM_PARSER: - psock_set_prog(&progs->skb_parser, prog); + pprog = &progs->skb_parser; break; case BPF_SK_SKB_STREAM_VERDICT: - psock_set_prog(&progs->skb_verdict, prog); + pprog = &progs->skb_verdict; break; default: return -EOPNOTSUPP; } + if (old) + return psock_replace_prog(pprog, prog, old); + + psock_set_prog(pprog, prog); return 0; } -void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link) +static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) { switch (link->map->map_type) { case BPF_MAP_TYPE_SOCKMAP: @@ -1028,3 +1287,54 @@ void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link) break; } } + +static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) +{ + struct sk_psock_link *link; + + while ((link = sk_psock_link_pop(psock))) { + sock_map_unlink(sk, link); + sk_psock_free_link(link); + } +} + +void sock_map_unhash(struct sock *sk) +{ + void (*saved_unhash)(struct sock *sk); + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + if (sk->sk_prot->unhash) + sk->sk_prot->unhash(sk); + return; + } + + saved_unhash = psock->saved_unhash; + sock_map_remove_links(sk, psock); + rcu_read_unlock(); + saved_unhash(sk); +} + +void sock_map_close(struct sock *sk, long timeout) +{ + void (*saved_close)(struct sock *sk, long timeout); + struct sk_psock *psock; + + lock_sock(sk); + rcu_read_lock(); + psock = sk_psock(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + release_sock(sk); + return sk->sk_prot->close(sk, timeout); + } + + saved_close = psock->saved_close; + sock_map_remove_links(sk, psock); + rcu_read_unlock(); + release_sock(sk); + saved_close(sk, timeout); +} diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index f3ceec93f392..7227c7a35314 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -16,27 +16,8 @@ DEFINE_SPINLOCK(reuseport_lock); -#define REUSEPORT_MIN_ID 1 static DEFINE_IDA(reuseport_ida); -int reuseport_get_id(struct sock_reuseport *reuse) -{ - int id; - - if (reuse->reuseport_id) - return reuse->reuseport_id; - - id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0, - /* Called under reuseport_lock */ - GFP_ATOMIC); - if (id < 0) - return id; - - reuse->reuseport_id = id; - - return reuse->reuseport_id; -} - static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) { unsigned int size = sizeof(struct sock_reuseport) + @@ -55,6 +36,7 @@ static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) int reuseport_alloc(struct sock *sk, bool bind_inany) { struct sock_reuseport *reuse; + int id, ret = 0; /* bh lock used since this function call may precede hlist lock in * soft irq of receive path or setsockopt from process context @@ -78,10 +60,18 @@ int reuseport_alloc(struct sock *sk, bool bind_inany) reuse = __reuseport_alloc(INIT_SOCKS); if (!reuse) { - spin_unlock_bh(&reuseport_lock); - return -ENOMEM; + ret = -ENOMEM; + goto out; } + id = ida_alloc(&reuseport_ida, GFP_ATOMIC); + if (id < 0) { + kfree(reuse); + ret = id; + goto out; + } + + reuse->reuseport_id = id; reuse->socks[0] = sk; reuse->num_socks = 1; reuse->bind_inany = bind_inany; @@ -90,7 +80,7 @@ int reuseport_alloc(struct sock *sk, bool bind_inany) out: spin_unlock_bh(&reuseport_lock); - return 0; + return ret; } EXPORT_SYMBOL(reuseport_alloc); @@ -112,6 +102,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) more_reuse->prog = reuse->prog; more_reuse->reuseport_id = reuse->reuseport_id; more_reuse->bind_inany = reuse->bind_inany; + more_reuse->has_conns = reuse->has_conns; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); @@ -135,8 +126,7 @@ static void reuseport_free_rcu(struct rcu_head *head) reuse = container_of(head, struct sock_reuseport, rcu); sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); - if (reuse->reuseport_id) - ida_simple_remove(&reuseport_ida, reuse->reuseport_id); + ida_free(&reuseport_ida, reuse->reuseport_id); kfree(reuse); } @@ -200,12 +190,15 @@ void reuseport_detach_sock(struct sock *sk) reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); - /* At least one of the sk in this reuseport group is added to - * a bpf map. Notify the bpf side. The bpf map logic will - * remove the sk if it is indeed added to a bpf map. + /* Notify the bpf side. The sk may be added to a sockarray + * map. If so, sockarray logic will remove it from the map. + * + * Other bpf map types that work with reuseport, like sockmap, + * don't need an explicit callback from here. They override sk + * unhash/close ops to remove the sk from the map before we + * get to this point. */ - if (reuse->reuseport_id) - bpf_sk_reuseport_detach(sk); + bpf_sk_reuseport_detach(sk); rcu_assign_pointer(sk->sk_reuseport_cb, NULL); @@ -301,7 +294,7 @@ select_by_hash: i = j = reciprocal_scale(hash, socks); while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) { i++; - if (i >= reuse->num_socks) + if (i >= socks) i = 0; if (i == j) goto out; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 8dcb6ce98d3a..12fbe51e5679 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -21,6 +21,7 @@ #include <net/net_ratelimit.h> #include <net/busy_poll.h> #include <net/pkt_sched.h> +#include <net/inet_connection_sock.h> static int two __maybe_unused = 2; static int min_sndbuf = SOCK_MIN_SNDBUF; @@ -277,7 +278,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && !ret) { if (jit_enable < 2 || - (jit_enable == 2 && bpf_dump_raw_ok())) { + (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) { *(int *)table->data = jit_enable; if (jit_enable == 2) pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n"); @@ -584,6 +585,22 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_pingpong_thresh", + .data = &sysctl_tcp_pingpong_thresh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + }, + { + .procname = "forced_caps_enabled", + .data = &sysctl_forced_caps_enabled, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index d2a4553bcf39..e9ecbb57df45 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1426,6 +1426,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; + int prio; int err; if (!ops) @@ -1475,6 +1476,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, struct dcbnl_buffer *buffer = nla_data(ieee[DCB_ATTR_DCB_BUFFER]); + for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { + if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { + err = -EINVAL; + goto err; + } + } + err = ops->dcbnl_setbuffer(netdev, buffer); if (err) goto err; @@ -1757,6 +1765,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, fn = &reply_funcs[dcb->cmd]; if (!fn->cb) return -EOPNOTSUPP; + if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN)) + return -EPERM; if (!tb[DCB_ATTR_IFNAME]) return -EINVAL; diff --git a/net/dccp/diag.c b/net/dccp/diag.c index 73ef73a218ff..8a82c5a2c5a8 100644 --- a/net/dccp/diag.c +++ b/net/dccp/diag.c @@ -46,16 +46,15 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, } static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc); + inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r); } -static int dccp_diag_dump_one(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, +static int dccp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req); + return inet_diag_dump_one_icsk(&dccp_hashinfo, cb, req); } static const struct inet_diag_handler dccp_diag_handler = { diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 1e5e08cc0bfc..9f73ccf46c9b 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) return 0; /* discard, don't send a reset here */ + if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { + __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); + return 0; + } + if (dccp_bad_service_code(sk, service)) { dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 5bad08dc4316..cb61a9d281f6 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1139,14 +1139,14 @@ static int __init dccp_init(void) inet_hashinfo_init(&dccp_hashinfo); rc = inet_hashinfo2_init_mod(&dccp_hashinfo); if (rc) - goto out_fail; + goto out_free_percpu; rc = -ENOBUFS; dccp_hashinfo.bind_bucket_cachep = kmem_cache_create("dccp_bind_bucket", sizeof(struct inet_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dccp_hashinfo.bind_bucket_cachep) - goto out_free_percpu; + goto out_free_hashinfo2; /* * Size and allocate the main established and bind bucket @@ -1242,6 +1242,8 @@ out_free_dccp_ehash: free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); out_free_bind_bucket_cachep: kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); +out_free_hashinfo2: + inet_hashinfo2_free_mod(&dccp_hashinfo); out_free_percpu: percpu_counter_destroy(&dccp_orphan_count); out_fail: @@ -1265,6 +1267,7 @@ static void __exit dccp_fini(void) kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); dccp_ackvec_exit(); dccp_sysctl_exit(); + inet_hashinfo2_free_mod(&dccp_hashinfo); percpu_counter_destroy(&dccp_orphan_count); } diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 3e1a90669006..ad53eb31d40f 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -302,7 +302,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) * - the key's semaphore is read-locked */ static long dns_resolver_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { int err = PTR_ERR(key->payload.data[dns_key_error]); diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 29e2bd5cc5af..7dce11ab2806 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -9,6 +9,7 @@ menuconfig NET_DSA tristate "Distributed Switch Architecture" depends on HAVE_NET_DSA depends on BRIDGE || BRIDGE=n + select GRO_CELLS select NET_SWITCHDEV select PHYLINK select NET_DEVLINK diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 43120a3fb06f..ca80f86995e6 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -238,7 +238,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, if (dsa_skb_defer_rx_timestamp(p, skb)) return 0; - netif_receive_skb(skb); + gro_cells_receive(&p->gcells, skb); return 0; } diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 716d265ba8ca..70e6fc2edd30 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -399,18 +399,21 @@ static int dsa_switch_setup(struct dsa_switch *ds) ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev); if (!ds->slave_mii_bus) { err = -ENOMEM; - goto unregister_notifier; + goto teardown; } dsa_slave_mii_bus_init(ds); err = mdiobus_register(ds->slave_mii_bus); if (err < 0) - goto unregister_notifier; + goto teardown; } return 0; +teardown: + if (ds->ops->teardown) + ds->ops->teardown(ds); unregister_notifier: dsa_switch_unregister_notifier(ds); unregister_devlink: @@ -461,18 +464,12 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) err = dsa_port_setup(dp); if (err) - goto ports_teardown; + continue; } } return 0; -ports_teardown: - for (i = 0; i < port; i++) - dsa_port_teardown(&ds->ports[i]); - - dsa_switch_teardown(ds); - switch_teardown: for (i = 0; i < device; i++) { ds = dst->ds[i]; diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index bf9947c577b6..d8e850724d13 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -11,6 +11,7 @@ #include <linux/netdevice.h> #include <linux/netpoll.h> #include <net/dsa.h> +#include <net/gro_cells.h> enum { DSA_NOTIFIER_AGEING_TIME, @@ -68,6 +69,8 @@ struct dsa_slave_priv { struct pcpu_sw_netstats *stats64; + struct gro_cells gcells; + /* DSA port data, such as switch, port index, etc. */ struct dsa_port *dp; diff --git a/net/dsa/master.c b/net/dsa/master.c index 3255dfc97f86..be0b4ed3b7d8 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -259,7 +259,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev) { struct dsa_port *cpu_dp = dev->dsa_ptr; - dev->netdev_ops = cpu_dp->orig_ndo_ops; + if (cpu_dp->orig_ndo_ops) + dev->netdev_ops = cpu_dp->orig_ndo_ops; cpu_dp->orig_ndo_ops = NULL; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 23c2210fa7ec..06f8874d53ee 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1409,6 +1409,7 @@ int dsa_slave_create(struct dsa_port *port) if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; slave_dev->hw_features |= NETIF_F_HW_TC; + slave_dev->features |= NETIF_F_LLTX; slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; if (!IS_ERR_OR_NULL(port->mac)) ether_addr_copy(slave_dev->dev_addr, port->mac); @@ -1430,6 +1431,11 @@ int dsa_slave_create(struct dsa_port *port) free_netdev(slave_dev); return -ENOMEM; } + + ret = gro_cells_init(&p->gcells, slave_dev); + if (ret) + goto out_free; + p->dp = port; INIT_LIST_HEAD(&p->mall_tc_list); INIT_WORK(&port->xmit_work, dsa_port_xmit_work); @@ -1442,7 +1448,7 @@ int dsa_slave_create(struct dsa_port *port) ret = dsa_slave_phy_setup(slave_dev); if (ret) { netdev_err(master, "error %d setting up slave phy\n", ret); - goto out_free; + goto out_gcells; } dsa_slave_notify(slave_dev, DSA_PORT_REGISTER); @@ -1461,6 +1467,8 @@ out_phy: phylink_disconnect_phy(p->dp->pl); rtnl_unlock(); phylink_destroy(p->dp->pl); +out_gcells: + gro_cells_destroy(&p->gcells); out_free: free_percpu(p->stats64); free_netdev(slave_dev); @@ -1481,6 +1489,7 @@ void dsa_slave_destroy(struct net_device *slave_dev) dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); unregister_netdev(slave_dev); phylink_destroy(dp->pl); + gro_cells_destroy(&p->gcells); free_percpu(p->stats64); free_netdev(slave_dev); } diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index e8eaa804ccb9..d6200ff98200 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -13,6 +13,16 @@ #define DSA_HLEN 4 #define EDSA_HLEN 8 +#define FRAME_TYPE_TO_CPU 0x00 +#define FRAME_TYPE_FORWARD 0x03 + +#define TO_CPU_CODE_MGMT_TRAP 0x00 +#define TO_CPU_CODE_FRAME2REG 0x01 +#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02 +#define TO_CPU_CODE_POLICY_TRAP 0x03 +#define TO_CPU_CODE_ARP_MIRROR 0x04 +#define TO_CPU_CODE_POLICY_MIRROR 0x05 + static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); @@ -77,6 +87,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u8 *edsa_header; + int frame_type; + int code; int source_device; int source_port; @@ -91,8 +103,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, /* * Check that frame type is either TO_CPU or FORWARD. */ - if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0) + frame_type = edsa_header[0] >> 6; + + switch (frame_type) { + case FRAME_TYPE_TO_CPU: + code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1); + + /* + * Mark the frame to never egress on any port of the same switch + * unless it's a trapped IGMP/MLD packet, in which case the + * bridge might want to forward it. + */ + if (code != TO_CPU_CODE_IGMP_MLD_TRAP) + skb->offload_fwd_mark = 1; + + break; + + case FRAME_TYPE_FORWARD: + skb->offload_fwd_mark = 1; + break; + + default: return NULL; + } /* * Determine source device and port. @@ -156,8 +189,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, 2 * ETH_ALEN); } - skb->offload_fwd_mark = 1; - return skb; } diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c index b5705cba8318..edc505e07125 100644 --- a/net/dsa/tag_mtk.c +++ b/net/dsa/tag_mtk.c @@ -13,15 +13,20 @@ #define MTK_HDR_LEN 4 #define MTK_HDR_XMIT_UNTAGGED 0 #define MTK_HDR_XMIT_TAGGED_TPID_8100 1 +#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2 #define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) #define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0) +#define MTK_HDR_XMIT_SA_DIS BIT(6) static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); + u8 xmit_tpid; u8 *mtk_tag; - bool is_vlan_skb = true; + unsigned char *dest = eth_hdr(skb)->h_dest; + bool is_multicast_skb = is_multicast_ether_addr(dest) && + !is_broadcast_ether_addr(dest); /* Build the special tag after the MAC Source Address. If VLAN header * is present, it's required that VLAN header and special tag is @@ -29,13 +34,20 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, * the both special and VLAN tag at the same time and then look up VLAN * table with VID. */ - if (!skb_vlan_tagged(skb)) { + switch (skb->protocol) { + case htons(ETH_P_8021Q): + xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_8100; + break; + case htons(ETH_P_8021AD): + xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_88A8; + break; + default: if (skb_cow_head(skb, MTK_HDR_LEN) < 0) return NULL; + xmit_tpid = MTK_HDR_XMIT_UNTAGGED; skb_push(skb, MTK_HDR_LEN); memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN); - is_vlan_skb = false; } mtk_tag = skb->data + 2 * ETH_ALEN; @@ -43,12 +55,15 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, /* Mark tag attribute on special tag insertion to notify hardware * whether that's a combined special tag with 802.1Q header. */ - mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 : - MTK_HDR_XMIT_UNTAGGED; + mtk_tag[0] = xmit_tpid; mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; + /* Disable SA learning for multicast frames */ + if (unlikely(is_multicast_skb)) + mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS; + /* Tag control information is kept for 802.1Q */ - if (!is_vlan_skb) { + if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) { mtk_tag[2] = 0; mtk_tag[3] = 0; } @@ -61,6 +76,9 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, { int port; __be16 *phdr, hdr; + unsigned char *dest = eth_hdr(skb)->h_dest; + bool is_multicast_skb = is_multicast_ether_addr(dest) && + !is_broadcast_ether_addr(dest); if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) return NULL; @@ -86,6 +104,10 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb->dev) return NULL; + /* Only unicast or broadcast frames are offloaded */ + if (likely(!is_multicast_skb)) + skb->offload_fwd_mark = 1; + return skb; } diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index c7bd6c49fadf..5dd463a18e4c 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -229,6 +229,7 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); if (master) { skb->dev = master->dev; + skb_reset_mac_header(skb); hsr_forward_skb(skb, master); } else { atomic_long_inc(&dev->tx_dropped); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ddd9605bad04..bf3ecf792688 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -349,12 +349,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) { struct hsr_frame_info frame; - if (skb_mac_header(skb) != skb->data) { - WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n", - __FILE__, __LINE__, port->dev->name); - goto out_drop; - } - if (hsr_fill_frame_info(&frame, skb, port) < 0) goto out_drop; hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 002f341f3564..4a9200729a32 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -318,7 +318,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, node_dst = find_node_by_addr_A(&port->hsr->node_db, eth_hdr(skb)->h_dest); if (!node_dst) { - WARN_ONCE(1, "%s: Unknown node\n", __func__); + if (net_ratelimit()) + netdev_err(skb->dev, "%s: Unknown node\n", __func__); return; } if (port->type != node_dst->addr_B_port) diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index fae21c863b1f..55c0b2e872a5 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -61,10 +61,16 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, else multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); - if (!data[IFLA_HSR_VERSION]) + if (!data[IFLA_HSR_VERSION]) { hsr_version = 0; - else + } else { hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); + if (hsr_version > 1) { + NL_SET_ERR_MSG_MOD(extack, + "Only versions 0..1 are supported"); + return -EINVAL; + } + } return hsr_dev_finalize(dev, link, multicast_spec, hsr_version); } diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 6d091e419d3e..d19c40c684e8 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]); if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) { - if (!info->attrs[IEEE802154_ATTR_PAN_ID] && - !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] || - info->attrs[IEEE802154_ATTR_HW_ADDR])) + if (!info->attrs[IEEE802154_ATTR_PAN_ID]) return -EINVAL; desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]); @@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, desc->device_addr.mode = IEEE802154_ADDR_SHORT; desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]); } else { + if (!info->attrs[IEEE802154_ATTR_HW_ADDR]) + return -EINVAL; + desc->device_addr.mode = IEEE802154_ADDR_LONG; desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); } diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index ffcfcef76291..328bb9f5342e 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -836,8 +836,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, goto nla_put_failure; #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + goto out; + if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0) goto nla_put_failure; + +out: #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ genlmsg_end(msg, hdr); @@ -1400,6 +1405,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb, u32 changed = 0; int ret; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + if (info->attrs[NL802154_ATTR_SEC_ENABLED]) { u8 enabled; @@ -1506,6 +1514,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { + err = skb->len; + goto out_err; + } + if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -1560,7 +1573,11 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info) struct ieee802154_llsec_key_id id = { }; u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { }; - if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_SEC_KEY] || + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) return -EINVAL; if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] || @@ -1608,7 +1625,11 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info) struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; struct ieee802154_llsec_key_id id; - if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_SEC_KEY] || + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) return -EINVAL; if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) @@ -1672,6 +1693,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { + err = skb->len; + goto out_err; + } + if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -1758,6 +1784,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info) struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct ieee802154_llsec_device dev_desc; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE], &dev_desc) < 0) return -EINVAL; @@ -1773,7 +1802,11 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info) struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; __le64 extended_addr; - if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack)) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_SEC_DEVICE] || + nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack)) return -EINVAL; if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]) @@ -1841,6 +1874,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { + err = skb->len; + goto out_err; + } + if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -1898,6 +1936,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info struct ieee802154_llsec_device_key key; __le64 extended_addr; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0) return -EINVAL; @@ -1929,7 +1970,11 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info struct ieee802154_llsec_device_key key; __le64 extended_addr; - if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack)) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || + nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack)) return -EINVAL; if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]) @@ -2002,6 +2047,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { + err = skb->len; + goto out_err; + } + if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -2086,6 +2136,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb, struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct ieee802154_llsec_seclevel sl; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], &sl) < 0) return -EINVAL; @@ -2101,6 +2154,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb, struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct ieee802154_llsec_seclevel sl; + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + if (!info->attrs[NL802154_ATTR_SEC_LEVEL] || llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], &sl) < 0) diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index d57ecfaf89d4..9e1a186a3671 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -61,7 +61,12 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o obj-$(CONFIG_NET_SOCK_MSG) += tcp_bpf.o +obj-$(CONFIG_BPF_STREAM_PARSER) += udp_bpf.o obj-$(CONFIG_NETLABEL) += cipso_ipv4.o obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ xfrm4_output.o xfrm4_protocol.o + +ifeq ($(CONFIG_BPF_JIT),y) +obj-$(CONFIG_BPF_SYSCALL) += bpf_tcp_ca.o +endif diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 0a25205ee69a..5226b0aeb073 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -451,12 +451,12 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (err) return err; - return __inet_bind(sk, uaddr, addr_len, false, true); + return __inet_bind(sk, uaddr, addr_len, BIND_WITH_LOCK); } EXPORT_SYMBOL(inet_bind); int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, - bool force_bind_address_no_port, bool with_lock) + u32 flags) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); @@ -507,7 +507,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, * would be illegal to use them (multicast/broadcast) in * which case the sending device address is used. */ - if (with_lock) + if (flags & BIND_WITH_LOCK) lock_sock(sk); /* Check these errors (active socket, double bind). */ @@ -521,16 +521,18 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, /* Make sure we are allowed to bind here. */ if (snum || !(inet->bind_address_no_port || - force_bind_address_no_port)) { + (flags & BIND_FORCE_ADDRESS_NO_PORT))) { if (sk->sk_prot->get_port(sk, snum)) { inet->inet_saddr = inet->inet_rcv_saddr = 0; err = -EADDRINUSE; goto out_release_sock; } - err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk); - if (err) { - inet->inet_saddr = inet->inet_rcv_saddr = 0; - goto out_release_sock; + if (!(flags & BIND_FROM_BPF)) { + err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk); + if (err) { + inet->inet_saddr = inet->inet_rcv_saddr = 0; + goto out_release_sock; + } } } @@ -544,7 +546,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, sk_dst_reset(sk); err = 0; out_release_sock: - if (with_lock) + if (flags & BIND_WITH_LOCK) release_sock(sk); out: return err; @@ -754,12 +756,11 @@ do_err: } EXPORT_SYMBOL(inet_accept); - /* * This does both peername and sockname. */ int inet_getname(struct socket *sock, struct sockaddr *uaddr, - int peer) + int peer) { struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); @@ -780,6 +781,11 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, sin->sin_port = inet->inet_sport; sin->sin_addr.s_addr = addr; } + if (cgroup_bpf_enabled) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin, + peer ? BPF_CGROUP_INET4_GETPEERNAME : + BPF_CGROUP_INET4_GETSOCKNAME, + NULL); memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); return sizeof(*sin); } @@ -1921,6 +1927,8 @@ static int __init inet_init(void) sock_skb_cb_check_size(sizeof(struct inet_skb_parm)); + raw_hashinfo_init(&raw_v4_hashinfo); + rc = proto_register(&tcp_prot, 1); if (rc) goto out; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 05eb42f347e8..7b951992c372 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -125,6 +125,7 @@ static int arp_constructor(struct neighbour *neigh); static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb); static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); static void parp_redo(struct sk_buff *skb); +static int arp_is_multicast(const void *pkey); static const struct neigh_ops arp_generic_ops = { .family = AF_INET, @@ -156,6 +157,7 @@ struct neigh_table arp_tbl = { .key_eq = arp_key_eq, .constructor = arp_constructor, .proxy_redo = parp_redo, + .is_multicast = arp_is_multicast, .id = "arp_cache", .parms = { .tbl = &arp_tbl, @@ -928,6 +930,10 @@ static void parp_redo(struct sk_buff *skb) arp_process(dev_net(skb->dev), NULL, skb); } +static int arp_is_multicast(const void *pkey) +{ + return ipv4_is_multicast(*((__be32 *)pkey)); +} /* * Receive an arp request from the device layer. diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c new file mode 100644 index 000000000000..574972bc7299 --- /dev/null +++ b/net/ipv4/bpf_tcp_ca.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <linux/types.h> +#include <linux/bpf_verifier.h> +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/filter.h> +#include <net/tcp.h> + +static u32 optional_ops[] = { + offsetof(struct tcp_congestion_ops, init), + offsetof(struct tcp_congestion_ops, release), + offsetof(struct tcp_congestion_ops, set_state), + offsetof(struct tcp_congestion_ops, cwnd_event), + offsetof(struct tcp_congestion_ops, in_ack_event), + offsetof(struct tcp_congestion_ops, pkts_acked), + offsetof(struct tcp_congestion_ops, min_tso_segs), + offsetof(struct tcp_congestion_ops, sndbuf_expand), + offsetof(struct tcp_congestion_ops, cong_control), +}; + +static u32 unsupported_ops[] = { + offsetof(struct tcp_congestion_ops, get_info), +}; + +static const struct btf_type *tcp_sock_type; +static u32 tcp_sock_id, sock_id; + +static int bpf_tcp_ca_init(struct btf *btf) +{ + s32 type_id; + + type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + sock_id = type_id; + + type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + tcp_sock_id = type_id; + tcp_sock_type = btf_type_by_id(btf, tcp_sock_id); + + return 0; +} + +static bool is_optional(u32 member_offset) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(optional_ops); i++) { + if (member_offset == optional_ops[i]) + return true; + } + + return false; +} + +static bool is_unsupported(u32 member_offset) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) { + if (member_offset == unsupported_ops[i]) + return true; + } + + return false; +} + +extern struct btf *btf_vmlinux; + +static bool bpf_tcp_ca_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + + if (!btf_ctx_access(off, size, type, prog, info)) + return false; + + if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id) + /* promote it to tcp_sock */ + info->btf_id = tcp_sock_id; + + return true; +} + +static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log, + const struct btf_type *t, int off, + int size, enum bpf_access_type atype, + u32 *next_btf_id) +{ + size_t end; + + if (atype == BPF_READ) + return btf_struct_access(log, t, off, size, atype, next_btf_id); + + if (t != tcp_sock_type) { + bpf_log(log, "only read is supported\n"); + return -EACCES; + } + + switch (off) { + case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv): + end = offsetofend(struct inet_connection_sock, icsk_ca_priv); + break; + case offsetof(struct inet_connection_sock, icsk_ack.pending): + end = offsetofend(struct inet_connection_sock, + icsk_ack.pending); + break; + case offsetof(struct tcp_sock, snd_cwnd): + end = offsetofend(struct tcp_sock, snd_cwnd); + break; + case offsetof(struct tcp_sock, snd_cwnd_cnt): + end = offsetofend(struct tcp_sock, snd_cwnd_cnt); + break; + case offsetof(struct tcp_sock, snd_ssthresh): + end = offsetofend(struct tcp_sock, snd_ssthresh); + break; + case offsetof(struct tcp_sock, ecn_flags): + end = offsetofend(struct tcp_sock, ecn_flags); + break; + default: + bpf_log(log, "no write support to tcp_sock at off %d\n", off); + return -EACCES; + } + + if (off + size > end) { + bpf_log(log, + "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n", + off, size, end); + return -EACCES; + } + + return NOT_INIT; +} + +BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt) +{ + /* bpf_tcp_ca prog cannot have NULL tp */ + __tcp_send_ack((struct sock *)tp, rcv_nxt); + return 0; +} + +static const struct bpf_func_proto bpf_tcp_send_ack_proto = { + .func = bpf_tcp_send_ack, + .gpl_only = false, + /* In case we want to report error later */ + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_ANYTHING, + .btf_id = &tcp_sock_id, +}; + +static const struct bpf_func_proto * +bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, + const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_tcp_send_ack: + return &bpf_tcp_send_ack_proto; + default: + return bpf_base_func_proto(func_id); + } +} + +static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = { + .get_func_proto = bpf_tcp_ca_get_func_proto, + .is_valid_access = bpf_tcp_ca_is_valid_access, + .btf_struct_access = bpf_tcp_ca_btf_struct_access, +}; + +static int bpf_tcp_ca_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + const struct tcp_congestion_ops *utcp_ca; + struct tcp_congestion_ops *tcp_ca; + size_t tcp_ca_name_len; + int prog_fd; + u32 moff; + + utcp_ca = (const struct tcp_congestion_ops *)udata; + tcp_ca = (struct tcp_congestion_ops *)kdata; + + moff = btf_member_bit_offset(t, member) / 8; + switch (moff) { + case offsetof(struct tcp_congestion_ops, flags): + if (utcp_ca->flags & ~TCP_CONG_MASK) + return -EINVAL; + tcp_ca->flags = utcp_ca->flags; + return 1; + case offsetof(struct tcp_congestion_ops, name): + tcp_ca_name_len = strnlen(utcp_ca->name, sizeof(utcp_ca->name)); + if (!tcp_ca_name_len || + tcp_ca_name_len == sizeof(utcp_ca->name)) + return -EINVAL; + if (tcp_ca_find(utcp_ca->name)) + return -EEXIST; + memcpy(tcp_ca->name, utcp_ca->name, sizeof(tcp_ca->name)); + return 1; + } + + if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL)) + return 0; + + /* Ensure bpf_prog is provided for compulsory func ptr */ + prog_fd = (int)(*(unsigned long *)(udata + moff)); + if (!prog_fd && !is_optional(moff) && !is_unsupported(moff)) + return -EINVAL; + + return 0; +} + +static int bpf_tcp_ca_check_member(const struct btf_type *t, + const struct btf_member *member) +{ + if (is_unsupported(btf_member_bit_offset(t, member) / 8)) + return -ENOTSUPP; + return 0; +} + +static int bpf_tcp_ca_reg(void *kdata) +{ + return tcp_register_congestion_control(kdata); +} + +static void bpf_tcp_ca_unreg(void *kdata) +{ + tcp_unregister_congestion_control(kdata); +} + +/* Avoid sparse warning. It is only used in bpf_struct_ops.c. */ +extern struct bpf_struct_ops bpf_tcp_congestion_ops; + +struct bpf_struct_ops bpf_tcp_congestion_ops = { + .verifier_ops = &bpf_tcp_ca_verifier_ops, + .reg = bpf_tcp_ca_reg, + .unreg = bpf_tcp_ca_unreg, + .check_member = bpf_tcp_ca_check_member, + .init_member = bpf_tcp_ca_init_member, + .init = bpf_tcp_ca_init, + .name = "tcp_congestion_ops", +}; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 0bd10a1f477f..e290a0c9e928 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -519,16 +519,10 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) ret_val = -ENOENT; goto doi_remove_return; } - if (!refcount_dec_and_test(&doi_def->refcount)) { - spin_unlock(&cipso_v4_doi_list_lock); - ret_val = -EBUSY; - goto doi_remove_return; - } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); - cipso_v4_cache_invalidate(); - call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); + cipso_v4_doi_putdef(doi_def); ret_val = 0; doi_remove_return: @@ -585,9 +579,6 @@ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) if (!refcount_dec_and_test(&doi_def->refcount)) return; - spin_lock(&cipso_v4_doi_list_lock); - list_del_rcu(&doi_def->list); - spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); @@ -1258,7 +1249,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, return ret_val; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; @@ -1439,7 +1431,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, return ret_val; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index e4632bd2026d..a27d034c85cc 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -276,6 +276,7 @@ static struct in_device *inetdev_init(struct net_device *dev) err = devinet_sysctl_register(in_dev); if (err) { in_dev->dead = 1; + neigh_parms_release(&arp_tbl, in_dev->arp_parms); in_dev_put(in_dev); in_dev = NULL; goto out; @@ -614,12 +615,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, return NULL; } -static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) +static int ip_mc_autojoin_config(struct net *net, bool join, + const struct in_ifaddr *ifa) { +#if defined(CONFIG_IP_MULTICAST) struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ifa->ifa_address, .imr_ifindex = ifa->ifa_dev->dev->ifindex, }; + struct sock *sk = net->ipv4.mc_autojoin_sk; int ret; ASSERT_RTNL(); @@ -632,6 +636,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) release_sock(sk); return ret; +#else + return -EOPNOTSUPP; +#endif } static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -675,7 +682,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, continue; if (ipv4_is_multicast(ifa->ifa_address)) - ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa); + ip_mc_autojoin_config(net, false, ifa); __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid); return 0; } @@ -940,8 +947,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, */ set_ifa_lifetime(ifa, valid_lft, prefered_lft); if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) { - int ret = ip_mc_config(net->ipv4.mc_autojoin_sk, - true, ifa); + int ret = ip_mc_autojoin_config(net, true, ifa); if (ret < 0) { inet_free_ifa(ifa); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 5c967764041f..ef20f550d2f8 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -272,12 +272,12 @@ static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struc int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { u8 *tail; - u8 *vaddr; int nfrags; int esph_offset; struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; + unsigned int allocsz; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { @@ -287,6 +287,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * return err; } + allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); + if (allocsz > ESP_SKB_FRAG_MAXSIZE) + goto cow; + if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { nfrags = 1; @@ -314,14 +318,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * page = pfrag->page; get_page(page); - vaddr = kmap_atomic(page); - - tail = vaddr + pfrag->offset; + tail = page_address(page) + pfrag->offset; esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); - kunmap_atomic(vaddr); - nfrags = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index e2e219c7854a..8c0af30fb067 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -63,10 +63,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head, sp->olen++; xo = xfrm_offload(skb); - if (!xo) { - xfrm_state_put(x); + if (!xo) goto out_reset; - } } xo->flags |= XFRM_GRO; @@ -179,10 +177,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) && !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev) - esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC); else if (!(features & NETIF_F_HW_ESP_TX_CSUM) && !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM)) - esp_features = features & ~NETIF_F_CSUM_MASK; + esp_features = features & ~(NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC); xo->flags |= XFRM_GSO_SEGMENT; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 48bf3b9be475..2ce191019526 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -302,7 +302,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_oif = l3mdev_master_ifindex_rcu(dev), .daddr = ip_hdr(skb)->saddr, - .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), + .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK, .flowi4_scope = scope, .flowi4_mark = vmark ? skb->mark : 0, }; @@ -319,17 +319,18 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev) { bool dev_match = false; #ifdef CONFIG_IP_ROUTE_MULTIPATH - int ret; + if (unlikely(fi->nh)) { + dev_match = nexthop_uses_dev(fi->nh, dev); + } else { + int ret; - for (ret = 0; ret < fib_info_num_path(fi); ret++) { - const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); + for (ret = 0; ret < fib_info_num_path(fi); ret++) { + const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); - if (nhc->nhc_dev == dev) { - dev_match = true; - break; - } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) { - dev_match = true; - break; + if (nhc_l3mdev_matches_dev(nhc, dev)) { + dev_match = true; + break; + } } } #else @@ -371,6 +372,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_tun_key.tun_id = 0; fl4.flowi4_flags = 0; fl4.flowi4_uid = sock_net_uid(net, NULL); + fl4.flowi4_multipath_hash = 0; no_addr = idev->ifa_list == NULL; @@ -704,7 +706,7 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla, cfg->fc_gw4 = *((__be32 *)via->rtvia_addr); break; case AF_INET6: -#ifdef CONFIG_IPV6 +#if IS_ENABLED(CONFIG_IPV6) if (alen != sizeof(struct in6_addr)) { NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA"); return -EINVAL; @@ -833,7 +835,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, if (has_gw && has_via) { NL_SET_ERR_MSG(extack, "Nexthop configuration can not contain both GATEWAY and VIA"); - goto errout; + return -EINVAL; } return 0; @@ -928,7 +930,6 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, else filter->dump_exceptions = false; - filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC); filter->flags = rtm->rtm_flags; filter->protocol = rtm->rtm_protocol; filter->rt_type = rtm->rtm_type; @@ -1000,7 +1001,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) if (filter.table_id) { tb = fib_get_table(net, filter.table_id); if (!tb) { - if (filter.dump_all_families) + if (rtnl_msg_family(cb->nlh) != PF_INET) return skb->len; NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist"); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index f1888c683426..b1b3220917ca 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1100,7 +1100,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, if (fl4.flowi4_scope < RT_SCOPE_LINK) fl4.flowi4_scope = RT_SCOPE_LINK; - if (table) + if (table && table != RT_TABLE_MAIN) tbl = fib_get_table(net, table); if (tbl) @@ -1999,7 +1999,7 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) hlist_for_each_entry_rcu(fa, fa_head, fa_list) { struct fib_info *next_fi = fa->fa_info; - struct fib_nh *nh; + struct fib_nh_common *nhc; if (fa->fa_slen != slen) continue; @@ -2022,8 +2022,8 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) fa->fa_type != RTN_UNICAST) continue; - nh = fib_info_nh(next_fi, 0); - if (!nh->fib_nh_gw4 || nh->fib_nh_scope != RT_SCOPE_LINK) + nhc = fib_info_nhc(next_fi, 0); + if (!nhc->nhc_gw_family || nhc->nhc_scope != RT_SCOPE_LINK) continue; fib_alias_accessed(fa); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index f12fa8da6127..51673d00bbea 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1751,7 +1751,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; - hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) @@ -2010,7 +2010,8 @@ void fib_info_notify_update(struct net *net, struct nl_info *info) struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; - hlist_for_each_entry_rcu(tb, head, tb_hlist) + hlist_for_each_entry_rcu(tb, head, tb_hlist, + lockdep_rtnl_is_held()) __fib_info_notify_update(net, tb, info); } } @@ -2455,6 +2456,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) " %zd bytes, size of tnode: %zd bytes.\n", LEAF_SIZE, TNODE_SIZE(0)); + rcu_read_lock(); for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; @@ -2474,7 +2476,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) trie_show_usage(seq, t->stats); #endif } + cond_resched_rcu(); } + rcu_read_unlock(); return 0; } diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 66fdbfe5447c..5d1e6fe9d838 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -128,7 +128,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, * to 0 and sets the configured key in the * inner erspan header field */ - if (greh->protocol == htons(ETH_P_ERSPAN) || + if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) || greh->protocol == htons(ETH_P_ERSPAN2)) { struct erspan_base_hdr *ershdr; diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 4de7e962d3da..c840141876bc 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -15,12 +15,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + bool need_csum, need_recompute_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; - bool need_csum, gso_partial; if (!skb->encapsulation) goto out; @@ -41,6 +41,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); + need_recompute_csum = skb->csum_not_inet; skb->encap_hdr_csum = need_csum; features &= skb->dev->hw_enc_features; @@ -98,7 +99,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, } *(pcsum + 1) = 0; - *pcsum = gso_make_checksum(skb, 0); + if (need_recompute_csum && !skb_is_gso(skb)) { + __wsum csum; + + csum = skb_checksum(skb, gre_offset, + skb->len - gre_offset, 0); + *pcsum = csum_fold(csum); + } else { + *pcsum = gso_make_checksum(skb, 0); + } } while ((skb = skb->next)); out: return segs; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ac95ba78b903..dd8fae89be72 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -239,7 +239,7 @@ static struct { /** * icmp_global_allow - Are we allowed to send one more ICMP message ? * - * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec. + * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. * Returns false if we reached the limit and can not send another packet. * Note: called with BH disabled */ @@ -267,7 +267,10 @@ bool icmp_global_allow(void) } credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); if (credit) { - credit--; + /* We want to use a credit of one in average, but need to randomize + * it for security reasons. + */ + credit = max_t(int, credit - prandom_u32_max(3), 0); rc = true; } WRITE_ONCE(icmp_global.credit, credit); @@ -427,7 +430,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) ipcm_init(&ipc); inet->tos = ip_hdr(skb)->tos; - sk->sk_mark = mark; + ipc.sockc.mark = mark; daddr = ipc.addr = ip_hdr(skb)->saddr; saddr = fib_compute_spec_dst(skb); @@ -709,10 +712,10 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; - sk->sk_mark = mark; ipcm_init(&ipc); ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; + ipc.sockc.mark = mark; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, type, code, &icmp_param); @@ -747,6 +750,40 @@ out:; } EXPORT_SYMBOL(__icmp_send); +#if IS_ENABLED(CONFIG_NF_NAT) +#include <net/netfilter/nf_conntrack.h> +void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct sk_buff *cloned_skb = NULL; + struct ip_options opts = { 0 }; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + __be32 orig_ip; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + __icmp_send(skb_in, type, code, info, &opts); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct iphdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct iphdr)))) + goto out; + + orig_ip = ip_hdr(skb_in)->saddr; + ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; + __icmp_send(skb_in, type, code, info, &opts); + ip_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +EXPORT_SYMBOL(icmp_ndo_send); +#endif static void icmp_socket_deliver(struct sk_buff *skb, u32 info) { diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index b0010c710802..d4583fe25865 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -23,18 +23,23 @@ #include <net/sock_reuseport.h> #include <net/addrconf.h> +int sysctl_tcp_pingpong_thresh __read_mostly = 3; +EXPORT_SYMBOL(sysctl_tcp_pingpong_thresh); + #if IS_ENABLED(CONFIG_IPV6) -/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6 - * only, and any IPv4 addresses if not IPv6 only - * match_wildcard == false: addresses must be exactly the same, i.e. - * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, - * and 0.0.0.0 equals to 0.0.0.0 only +/* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses + * if IPv6 only, and any IPv4 addresses + * if not IPv6 only + * match_sk*_wildcard == false: addresses must be exactly the same, i.e. + * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, + * and 0.0.0.0 equals to 0.0.0.0 only */ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, const struct in6_addr *sk2_rcv_saddr6, __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, bool sk1_ipv6only, bool sk2_ipv6only, - bool match_wildcard) + bool match_sk1_wildcard, + bool match_sk2_wildcard) { int addr_type = ipv6_addr_type(sk1_rcv_saddr6); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; @@ -44,8 +49,8 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, if (!sk2_ipv6only) { if (sk1_rcv_saddr == sk2_rcv_saddr) return true; - if (!sk1_rcv_saddr || !sk2_rcv_saddr) - return match_wildcard; + return (match_sk1_wildcard && !sk1_rcv_saddr) || + (match_sk2_wildcard && !sk2_rcv_saddr); } return false; } @@ -53,11 +58,11 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) return true; - if (addr_type2 == IPV6_ADDR_ANY && match_wildcard && + if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) return true; - if (addr_type == IPV6_ADDR_ANY && match_wildcard && + if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) return true; @@ -69,18 +74,19 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, } #endif -/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses - * match_wildcard == false: addresses must be exactly the same, i.e. - * 0.0.0.0 only equals to 0.0.0.0 +/* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses + * match_sk*_wildcard == false: addresses must be exactly the same, i.e. + * 0.0.0.0 only equals to 0.0.0.0 */ static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, - bool sk2_ipv6only, bool match_wildcard) + bool sk2_ipv6only, bool match_sk1_wildcard, + bool match_sk2_wildcard) { if (!sk2_ipv6only) { if (sk1_rcv_saddr == sk2_rcv_saddr) return true; - if (!sk1_rcv_saddr || !sk2_rcv_saddr) - return match_wildcard; + return (match_sk1_wildcard && !sk1_rcv_saddr) || + (match_sk2_wildcard && !sk2_rcv_saddr); } return false; } @@ -96,10 +102,12 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, sk2->sk_rcv_saddr, ipv6_only_sock(sk), ipv6_only_sock(sk2), + match_wildcard, match_wildcard); #endif return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, - ipv6_only_sock(sk2), match_wildcard); + ipv6_only_sock(sk2), match_wildcard, + match_wildcard); } EXPORT_SYMBOL(inet_rcv_saddr_equal); @@ -273,61 +281,18 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb, tb->fast_rcv_saddr, sk->sk_rcv_saddr, tb->fast_ipv6_only, - ipv6_only_sock(sk), true); + ipv6_only_sock(sk), true, false); #endif return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, - ipv6_only_sock(sk), true); + ipv6_only_sock(sk), true, false); } -/* Obtain a reference to a local port for the given sock, - * if snum is zero it means select any available local port. - * We try to allocate an odd port (and leave even ports for connect()) - */ -int inet_csk_get_port(struct sock *sk, unsigned short snum) +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk) { - bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; - struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - int ret = 1, port = snum; - struct inet_bind_hashbucket *head; - struct net *net = sock_net(sk); - struct inet_bind_bucket *tb = NULL; kuid_t uid = sock_i_uid(sk); - int l3mdev; + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; - l3mdev = inet_sk_bound_l3mdev(sk); - - if (!port) { - head = inet_csk_find_open_port(sk, &tb, &port); - if (!head) - return ret; - if (!tb) - goto tb_not_found; - goto success; - } - head = &hinfo->bhash[inet_bhashfn(net, port, - hinfo->bhash_size)]; - spin_lock_bh(&head->lock); - inet_bind_bucket_for_each(tb, &head->chain) - if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && - tb->port == port) - goto tb_found; -tb_not_found: - tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, - net, head, port, l3mdev); - if (!tb) - goto fail_unlock; -tb_found: - if (!hlist_empty(&tb->owners)) { - if (sk->sk_reuse == SK_FORCE_REUSE) - goto success; - - if ((tb->fastreuse > 0 && reuse) || - sk_reuseport_match(tb, sk)) - goto success; - if (inet_csk_bind_conflict(sk, tb, true, true)) - goto fail_unlock; - } -success: if (hlist_empty(&tb->owners)) { tb->fastreuse = reuse; if (sk->sk_reuseport) { @@ -371,6 +336,58 @@ success: tb->fastreuseport = 0; } } +} + +/* Obtain a reference to a local port for the given sock, + * if snum is zero it means select any available local port. + * We try to allocate an odd port (and leave even ports for connect()) + */ +int inet_csk_get_port(struct sock *sk, unsigned short snum) +{ + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; + struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; + int ret = 1, port = snum; + struct inet_bind_hashbucket *head; + struct net *net = sock_net(sk); + struct inet_bind_bucket *tb = NULL; + int l3mdev; + + l3mdev = inet_sk_bound_l3mdev(sk); + + if (!port) { + head = inet_csk_find_open_port(sk, &tb, &port); + if (!head) + return ret; + if (!tb) + goto tb_not_found; + goto success; + } + head = &hinfo->bhash[inet_bhashfn(net, port, + hinfo->bhash_size)]; + spin_lock_bh(&head->lock); + inet_bind_bucket_for_each(tb, &head->chain) + if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && + tb->port == port) + goto tb_found; +tb_not_found: + tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, + net, head, port, l3mdev); + if (!tb) + goto fail_unlock; +tb_found: + if (!hlist_empty(&tb->owners)) { + if (sk->sk_reuse == SK_FORCE_REUSE) + goto success; + + if ((tb->fastreuse > 0 && reuse) || + sk_reuseport_match(tb, sk)) + goto success; + if (inet_csk_bind_conflict(sk, tb, true, true)) + goto fail_unlock; + } +success: + inet_csk_update_fastreuse(tb, sk); + if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); @@ -686,12 +703,15 @@ static bool reqsk_queue_unlink(struct request_sock *req) return found; } -void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) +bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) { - if (reqsk_queue_unlink(req)) { + bool unlinked = reqsk_queue_unlink(req); + + if (unlinked) { reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); reqsk_put(req); } + return unlinked; } EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); @@ -808,6 +828,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, struct inet_connection_sock *newicsk = inet_csk(newsk); inet_sk_set_state(newsk, TCP_SYN_RECV); + newsk->sk_mark2 = req->mark; newicsk->icsk_bind_hash = NULL; inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; @@ -826,6 +847,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, newicsk->icsk_retransmits = 0; newicsk->icsk_backoff = 0; newicsk->icsk_probes_out = 0; + newicsk->icsk_probes_tstamp = 0; /* Deinitialize accept_queue to trap illegal accesses. */ memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 5b68bdaa8bff..49d0b552ecfa 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -23,6 +23,7 @@ #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/inet6_hashtables.h> +#include <net/bpf_sk_storage.h> #include <net/netlink.h> #include <linux/inet.h> @@ -170,26 +171,28 @@ errout: } EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill); +#define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, - bool net_admin) + struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + u16 nlmsg_flags, bool net_admin) { const struct tcp_congestion_ops *ca_ops; const struct inet_diag_handler *handler; + struct inet_diag_dump_data *cb_data; int ext = req->idiag_ext; struct inet_diag_msg *r; struct nlmsghdr *nlh; struct nlattr *attr; void *info = NULL; + cb_data = cb->data; handler = inet_diag_table[req->sdiag_protocol]; BUG_ON(!handler); - nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; @@ -201,7 +204,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, r->idiag_timer = 0; r->idiag_retrans = 0; - if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin)) + if (inet_diag_msg_attrs_fill(sk, skb, r, ext, + sk_user_ns(NETLINK_CB(cb->skb).sk), + net_admin)) goto errout; if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { @@ -298,6 +303,48 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto errout; } + /* Keep it at the end for potential retry with a larger skb, + * or else do best-effort fitting, which is only done for the + * first_nlmsg. + */ + if (cb_data->bpf_stg_diag) { + bool first_nlmsg = ((unsigned char *)nlh == skb->data); + unsigned int prev_min_dump_alloc; + unsigned int total_nla_size = 0; + unsigned int msg_len; + int err; + + msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; + err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb, + INET_DIAG_SK_BPF_STORAGES, + &total_nla_size); + + if (!err) + goto out; + + total_nla_size += msg_len; + prev_min_dump_alloc = cb->min_dump_alloc; + if (total_nla_size > prev_min_dump_alloc) + cb->min_dump_alloc = min_t(u32, total_nla_size, + MAX_DUMP_ALLOC_SIZE); + + if (!first_nlmsg) + goto errout; + + if (cb->min_dump_alloc > prev_min_dump_alloc) + /* Retry with pskb_expand_head() with + * __GFP_DIRECT_RECLAIM + */ + goto errout; + + WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc); + + /* Send what we have for this sk + * and move on to the next sk in the following + * dump() + */ + } + out: nlmsg_end(skb, nlh); return 0; @@ -308,30 +355,19 @@ errout: } EXPORT_SYMBOL_GPL(inet_sk_diag_fill); -static int inet_csk_diag_fill(struct sock *sk, - struct sk_buff *skb, - const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, - bool net_admin) -{ - return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns, - portid, seq, nlmsg_flags, unlh, net_admin); -} - static int inet_twsk_diag_fill(struct sock *sk, struct sk_buff *skb, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh) + struct netlink_callback *cb, + u16 nlmsg_flags) { struct inet_timewait_sock *tw = inet_twsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; - nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, + sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; @@ -358,16 +394,16 @@ static int inet_twsk_diag_fill(struct sock *sk, } static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin) + struct netlink_callback *cb, + u16 nlmsg_flags, bool net_admin) { struct request_sock *reqsk = inet_reqsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; - nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - nlmsg_flags); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; @@ -388,29 +424,28 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, r->idiag_inode = 0; if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, - inet_rsk(reqsk)->ir_mark)) + inet_rsk(reqsk)->ir_mark)) { + nlmsg_cancel(skb, nlh); return -EMSGSIZE; + } nlmsg_end(skb, nlh); return 0; } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, + struct netlink_callback *cb, const struct inet_diag_req_v2 *r, - struct user_namespace *user_ns, - u32 portid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin) + u16 nlmsg_flags, bool net_admin) { if (sk->sk_state == TCP_TIME_WAIT) - return inet_twsk_diag_fill(sk, skb, portid, seq, - nlmsg_flags, unlh); + return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags); if (sk->sk_state == TCP_NEW_SYN_RECV) - return inet_req_diag_fill(sk, skb, portid, seq, - nlmsg_flags, unlh, net_admin); + return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin); - return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, - nlmsg_flags, unlh, net_admin); + return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags, + net_admin); } struct sock *inet_diag_find_one_icsk(struct net *net, @@ -458,10 +493,10 @@ struct sock *inet_diag_find_one_icsk(struct net *net, EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN); struct net *net = sock_net(in_skb->sk); struct sk_buff *rep; @@ -478,10 +513,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, goto out; } - err = sk_diag_fill(sk, rep, req, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, net_admin); + err = sk_diag_fill(sk, rep, cb, req, 0, net_admin); if (err < 0) { WARN_ON(err == -EMSGSIZE); nlmsg_free(rep); @@ -508,14 +540,21 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb, int err; handler = inet_diag_lock_handler(req->sdiag_protocol); - if (IS_ERR(handler)) + if (IS_ERR(handler)) { err = PTR_ERR(handler); - else if (cmd == SOCK_DIAG_BY_FAMILY) - err = handler->dump_one(in_skb, nlh, req); - else if (cmd == SOCK_DESTROY && handler->destroy) + } else if (cmd == SOCK_DIAG_BY_FAMILY) { + struct inet_diag_dump_data empty_dump_data = {}; + struct netlink_callback cb = { + .nlh = nlh, + .skb = in_skb, + .data = &empty_dump_data, + }; + err = handler->dump_one(&cb, req); + } else if (cmd == SOCK_DESTROY && handler->destroy) { err = handler->destroy(in_skb, req); - else + } else { err = -EOPNOTSUPP; + } inet_diag_unlock_handler(handler); return err; @@ -846,23 +885,6 @@ static int inet_diag_bc_audit(const struct nlattr *attr, return len == 0 ? 0 : -EINVAL; } -static int inet_csk_diag_dump(struct sock *sk, - struct sk_buff *skb, - struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - const struct nlattr *bc, - bool net_admin) -{ - if (!inet_diag_bc_sk(bc, sk)) - return 0; - - return inet_csk_diag_fill(sk, skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, - net_admin); -} - static void twsk_build_assert(void) { BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) != @@ -891,14 +913,17 @@ static void twsk_build_assert(void) void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); + struct inet_diag_dump_data *cb_data = cb->data; struct net *net = sock_net(skb->sk); u32 idiag_states = r->idiag_states; int i, num, s_i, s_num; + struct nlattr *bc; struct sock *sk; + bc = cb_data->inet_diag_nla_bc; if (idiag_states & TCPF_SYN_RECV) idiag_states |= TCPF_NEW_SYN_RECV; s_i = cb->args[1]; @@ -934,8 +959,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, r->id.idiag_sport) goto next_listen; - if (inet_csk_diag_dump(sk, skb, cb, r, - bc, net_admin) < 0) { + if (!inet_diag_bc_sk(bc, sk)) + goto next_listen; + + if (inet_sk_diag_fill(sk, inet_csk(sk), skb, + cb, r, NLM_F_MULTI, + net_admin) < 0) { spin_unlock(&ilb->lock); goto done; } @@ -1013,11 +1042,8 @@ next_normal: res = 0; for (idx = 0; idx < accum; idx++) { if (res >= 0) { - res = sk_diag_fill(sk_arr[idx], skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - cb->nlh, net_admin); + res = sk_diag_fill(sk_arr[idx], skb, cb, r, + NLM_F_MULTI, net_admin); if (res < 0) num = num_arr[idx]; } @@ -1041,31 +1067,101 @@ out: EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc) + const struct inet_diag_req_v2 *r) { const struct inet_diag_handler *handler; + u32 prev_min_dump_alloc; int err = 0; +again: + prev_min_dump_alloc = cb->min_dump_alloc; handler = inet_diag_lock_handler(r->sdiag_protocol); if (!IS_ERR(handler)) - handler->dump(skb, cb, r, bc); + handler->dump(skb, cb, r); else err = PTR_ERR(handler); inet_diag_unlock_handler(handler); + /* The skb is not large enough to fit one sk info and + * inet_sk_diag_fill() has requested for a larger skb. + */ + if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) { + err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL); + if (!err) + goto again; + } + return err ? : skb->len; } static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { - int hdrlen = sizeof(struct inet_diag_req_v2); - struct nlattr *bc = NULL; + return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh)); +} - if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); +static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen) +{ + const struct nlmsghdr *nlh = cb->nlh; + struct inet_diag_dump_data *cb_data; + struct sk_buff *skb = cb->skb; + struct nlattr *nla; + int rem, err; - return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc); + cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL); + if (!cb_data) + return -ENOMEM; + + nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), rem) { + int type = nla_type(nla); + + if (type < __INET_DIAG_REQ_MAX) + cb_data->req_nlas[type] = nla; + } + + nla = cb_data->inet_diag_nla_bc; + if (nla) { + err = inet_diag_bc_audit(nla, skb); + if (err) { + kfree(cb_data); + return err; + } + } + + nla = cb_data->inet_diag_nla_bpf_stgs; + if (nla) { + struct bpf_sk_storage_diag *bpf_stg_diag; + + bpf_stg_diag = bpf_sk_storage_diag_alloc(nla); + if (IS_ERR(bpf_stg_diag)) { + kfree(cb_data); + return PTR_ERR(bpf_stg_diag); + } + cb_data->bpf_stg_diag = bpf_stg_diag; + } + + cb->data = cb_data; + return 0; +} + +static int inet_diag_dump_start(struct netlink_callback *cb) +{ + return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2)); +} + +static int inet_diag_dump_start_compat(struct netlink_callback *cb) +{ + return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req)); +} + +static int inet_diag_dump_done(struct netlink_callback *cb) +{ + struct inet_diag_dump_data *cb_data = cb->data; + + bpf_sk_storage_diag_free(cb_data->bpf_stg_diag); + kfree(cb->data); + + return 0; } static int inet_diag_type2proto(int type) @@ -1084,9 +1180,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb) { struct inet_diag_req *rc = nlmsg_data(cb->nlh); - int hdrlen = sizeof(struct inet_diag_req); struct inet_diag_req_v2 req; - struct nlattr *bc = NULL; req.sdiag_family = AF_UNSPEC; /* compatibility */ req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); @@ -1094,10 +1188,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, req.idiag_states = rc->idiag_states; req.id = rc->id; - if (nlmsg_attrlen(cb->nlh, hdrlen)) - bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); - - return __inet_diag_dump(skb, cb, &req, bc); + return __inet_diag_dump(skb, cb, &req); } static int inet_diag_get_exact_compat(struct sk_buff *in_skb, @@ -1125,22 +1216,12 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; if (nlh->nlmsg_flags & NLM_F_DUMP) { - if (nlmsg_attrlen(nlh, hdrlen)) { - struct nlattr *attr; - int err; - - attr = nlmsg_find_attr(nlh, hdrlen, - INET_DIAG_REQ_BYTECODE); - err = inet_diag_bc_audit(attr, skb); - if (err) - return err; - } - { - struct netlink_dump_control c = { - .dump = inet_diag_dump_compat, - }; - return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); - } + struct netlink_dump_control c = { + .start = inet_diag_dump_start_compat, + .done = inet_diag_dump_done, + .dump = inet_diag_dump_compat, + }; + return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); } return inet_diag_get_exact_compat(skb, nlh); @@ -1156,22 +1237,12 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h) if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && h->nlmsg_flags & NLM_F_DUMP) { - if (nlmsg_attrlen(h, hdrlen)) { - struct nlattr *attr; - int err; - - attr = nlmsg_find_attr(h, hdrlen, - INET_DIAG_REQ_BYTECODE); - err = inet_diag_bc_audit(attr, skb); - if (err) - return err; - } - { - struct netlink_dump_control c = { - .dump = inet_diag_dump, - }; - return netlink_dump_start(net->diag_nlsk, skb, h, &c); - } + struct netlink_dump_control c = { + .start = inet_diag_dump_start, + .done = inet_diag_dump_done, + .dump = inet_diag_dump, + }; + return netlink_dump_start(net->diag_nlsk, skb, h, &c); } return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h)); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index d9803a36b86f..fdc52aa50200 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) return -ENOMEM; } } + inet_csk_update_fastreuse(tb, child); } inet_bind_hash(child, tb, port); spin_unlock(&head->lock); @@ -246,6 +247,21 @@ static inline int compute_score(struct sock *sk, struct net *net, return score; } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, int doff, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned short hnum) +{ + struct sock *reuse_sk = NULL; + u32 phash; + + if (sk->sk_reuseport) { + phash = inet_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, phash, skb, doff); + } + return reuse_sk; +} + /* * Here are some nice properties to exploit here. The BSD API * does not allow a listening sock to specify the remote port nor the @@ -265,21 +281,17 @@ static struct sock *inet_lhash2_lookup(struct net *net, struct inet_connection_sock *icsk; struct sock *sk, *result = NULL; int score, hiscore = 0; - u32 phash = 0; inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { sk = (struct sock *)icsk; score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif); if (score > hiscore) { - if (sk->sk_reuseport) { - phash = inet_ehashfn(net, daddr, hnum, - saddr, sport); - result = reuseport_select_sock(sk, phash, - skb, doff); - if (result) - return result; - } + result = lookup_reuseport(net, sk, skb, doff, + saddr, sport, daddr, hnum); + if (result) + return result; + result = sk; hiscore = score; } @@ -288,6 +300,29 @@ static struct sock *inet_lhash2_lookup(struct net *net, return result; } +static inline struct sock *inet_lookup_run_bpf(struct net *net, + struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, + __be32 saddr, __be16 sport, + __be32 daddr, u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (hashinfo != &tcp_hashinfo) + return NULL; /* only TCP is supported */ + + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -299,6 +334,14 @@ struct sock *__inet_lookup_listener(struct net *net, struct sock *result = NULL; unsigned int hash2; + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + result = inet_lookup_run_bpf(net, hashinfo, skb, doff, + saddr, sport, daddr, hnum); + if (result) + goto done; + } + hash2 = ipv4_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 85ba1453ba5c..fedad3a3e61b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -603,9 +603,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, } if (dev->header_ops) { - /* Need space for new headers */ - if (skb_cow_head(skb, dev->needed_headroom - - (tunnel->hlen + sizeof(struct iphdr)))) + if (skb_cow_head(skb, 0)) goto free_skb; tnl_params = (const struct iphdr *)skb->data; @@ -723,7 +721,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu) len = tunnel->tun_hlen - len; tunnel->hlen = tunnel->hlen + len; - dev->needed_headroom = dev->needed_headroom + len; + if (dev->header_ops) + dev->hard_header_len += len; + else + dev->needed_headroom += len; + if (set_mtu) dev->mtu = max_t(int, dev->mtu - len, 68); @@ -926,6 +928,7 @@ static void __gre_tunnel_init(struct net_device *dev) tunnel->parms.iph.protocol = IPPROTO_GRE; tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; + dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph); dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; @@ -969,10 +972,14 @@ static int ipgre_tunnel_init(struct net_device *dev) return -EINVAL; dev->flags = IFF_BROADCAST; dev->header_ops = &ipgre_header_ops; + dev->hard_header_len = tunnel->hlen + sizeof(*iph); + dev->needed_headroom = 0; } #endif } else if (!tunnel->collect_md) { dev->header_ops = &ipgre_header_ops; + dev->hard_header_len = tunnel->hlen + sizeof(*iph); + dev->needed_headroom = 0; } return ip_tunnel_init(dev); diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index e665ee937719..94acd533d097 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -500,7 +500,8 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) IPCB(skb)->iif = skb->skb_iif; /* Must drop socket now because of tproxy. */ - skb_orphan(skb); + if (!skb_sk_is_prefetched(skb)) + skb_orphan(skb); return skb; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 77189800e539..11a87f7d67db 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -74,6 +74,7 @@ #include <net/icmp.h> #include <net/checksum.h> #include <net/inetpeer.h> +#include <net/inet_ecn.h> #include <net/lwtunnel.h> #include <linux/bpf-cgroup.h> #include <linux/igmp.h> @@ -306,7 +307,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff * if (skb_is_gso(skb)) return ip_finish_output_gso(net, sk, skb, mtu); - if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) + if (skb->len > mtu || IPCB(skb)->frag_max_size) return ip_fragment(net, sk, skb, mtu, ip_finish_output2); return ip_finish_output2(net, sk, skb); @@ -1703,12 +1704,12 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, if (IS_ERR(rt)) return; - inet_sk(sk)->tos = arg->tos; + inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; sk->sk_sndbuf = sysctl_wmem_default; - sk->sk_mark = fl4.flowi4_mark; + ipc.sockc.mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); if (unlikely(err)) { diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 74e1d964a615..f64d1743b86d 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, __be32 remote, __be32 local, __be32 key) { - unsigned int hash; struct ip_tunnel *t, *cand = NULL; struct hlist_head *head; + struct net_device *ndev; + unsigned int hash; hash = ip_tunnel_hash(key, remote); head = &itn->tunnels[hash]; @@ -142,11 +143,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } - if (flags & TUNNEL_NO_KEY) - goto skip_key_lookup; - hlist_for_each_entry_rcu(t, head, hash_node) { - if (t->parms.i_key != key || + if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) || t->parms.iph.saddr != 0 || t->parms.iph.daddr != 0 || !(t->dev->flags & IFF_UP)) @@ -158,7 +156,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } -skip_key_lookup: if (cand) return cand; @@ -166,8 +163,9 @@ skip_key_lookup: if (t && t->dev->flags & IFF_UP) return t; - if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP) - return netdev_priv(itn->fb_tunnel_dev); + ndev = READ_ONCE(itn->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -319,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) } dev->needed_headroom = t_hlen + hlen; - mtu -= (dev->hard_header_len + t_hlen); + mtu -= t_hlen; if (mtu < IPV4_MIN_MTU) mtu = IPV4_MIN_MTU; @@ -349,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, nt = netdev_priv(dev); t_hlen = nt->hlen + sizeof(struct iphdr); dev->min_mtu = ETH_MIN_MTU; - dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; + dev->max_mtu = IP_MAX_MTU - t_hlen; ip_tunnel_add(itn, nt); return nt; @@ -496,11 +494,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, int mtu; tunnel_hlen = md ? tunnel_hlen : tunnel->hlen; - pkt_size = skb->len - tunnel_hlen - dev->hard_header_len; + pkt_size = skb->len - tunnel_hlen; if (df) - mtu = dst_mtu(&rt->dst) - dev->hard_header_len - - sizeof(struct iphdr) - tunnel_hlen; + mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen); else mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; @@ -616,9 +613,6 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ttl = ip4_dst_hoplimit(&rt->dst); } - if (!df && skb->protocol == htons(ETH_P_IP)) - df = inner_iph->frag_off & htons(IP_DF); - headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; if (headroom > dev->needed_headroom) dev->needed_headroom = headroom; @@ -770,8 +764,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph, - 0, 0, false)) { + df = tnl_params->frag_off; + if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) + df |= (inner_iph->frag_off & htons(IP_DF)); + + if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) { ip_rt_put(rt); goto tx_error; } @@ -799,10 +796,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ttl = ip4_dst_hoplimit(&rt->dst); } - df = tnl_params->frag_off; - if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) - df |= (inner_iph->frag_off&htons(IP_DF)); - max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); if (max_headroom > dev->needed_headroom) @@ -970,7 +963,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) { struct ip_tunnel *tunnel = netdev_priv(dev); int t_hlen = tunnel->hlen + sizeof(struct iphdr); - int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; + int max_mtu = IP_MAX_MTU - t_hlen; if (new_mtu < ETH_MIN_MTU) return -EINVAL; @@ -1147,10 +1140,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], mtu = ip_tunnel_bind_dev(dev); if (tb[IFLA_MTU]) { - unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; + unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); - mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, - (unsigned int)(max - sizeof(struct iphdr))); + mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); } err = dev_set_mtu(dev, mtu); @@ -1249,9 +1241,9 @@ void ip_tunnel_uninit(struct net_device *dev) struct ip_tunnel_net *itn; itn = net_generic(net, tunnel->ip_tnl_net_id); - /* fb_tunnel_dev will be unregisted in net-exit call. */ - if (itn->fb_tunnel_dev != dev) - ip_tunnel_del(itn, netdev_priv(dev)); + ip_tunnel_del(itn, netdev_priv(dev)); + if (itn->fb_tunnel_dev == dev) + WRITE_ONCE(itn->fb_tunnel_dev, NULL); dst_cache_reset(&tunnel->dst_cache); } diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 8ecaf0f26973..bd41354ed8c1 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -93,7 +93,28 @@ static int vti_rcv_proto(struct sk_buff *skb) static int vti_rcv_tunnel(struct sk_buff *skb) { - return vti_rcv(skb, ip_hdr(skb)->saddr, true); + struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); + const struct iphdr *iph = ip_hdr(skb); + struct ip_tunnel *tunnel; + + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, + iph->saddr, iph->daddr, 0); + if (tunnel) { + struct tnl_ptk_info tpi = { + .proto = htons(ETH_P_IP), + }; + + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) + goto drop; + if (iptunnel_pull_header(skb, 0, tpi.proto, false)) + goto drop; + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); + } + + return -EINVAL; +drop: + kfree_skb(skb); + return 0; } static int vti_rcv_cb(struct sk_buff *skb, int err) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 2f01cf6fa0de..678575adaf3b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -698,7 +698,7 @@ out: rtnl_link_failed: #if IS_ENABLED(CONFIG_MPLS) - xfrm4_tunnel_deregister(&mplsip_handler, AF_INET); + xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS); xfrm_tunnel_mplsip_failed: #endif diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 58007439cffd..d71935618871 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2609,7 +2609,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); if (!mrt) { - if (filter.dump_all_families) + if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) return skb->len; NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist"); diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index a058213b77a7..7c841037c533 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -17,17 +17,19 @@ #include <net/netfilter/nf_queue.h> /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ -int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type) +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type) { const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; - const struct sock *sk = skb_to_full_sk(skb); - __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; + __u8 flags; struct net_device *dev = skb_dst(skb)->dev; unsigned int hh_len; + sk = sk_to_full_sk(sk); + flags = sk ? inet_sk_flowi_flags(sk) : 0; + if (addr_type == RTN_UNSPEC) addr_type = inet_addr_type_dev_table(net, dev, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index f1f78a742b36..a6f2e5bf7045 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1196,6 +1196,8 @@ static int translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; + memset(newinfo->entries, 0, size); + newinfo->number = compatr->num_entries; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = compatr->hook_entry[i]; @@ -1578,10 +1580,15 @@ out_free: return ret; } -void arpt_unregister_table(struct net *net, struct xt_table *table, - const struct nf_hook_ops *ops) +void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); +} +EXPORT_SYMBOL(arpt_unregister_table_pre_exit); + +void arpt_unregister_table(struct net *net, struct xt_table *table) +{ __arpt_unregister_table(net, table); } diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index c216b9ad3bb2..6c300ba5634e 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c @@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct net *net) return err; } +static void __net_exit arptable_filter_net_pre_exit(struct net *net) +{ + if (net->ipv4.arptable_filter) + arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter, + arpfilter_ops); +} + static void __net_exit arptable_filter_net_exit(struct net *net) { if (!net->ipv4.arptable_filter) return; - arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops); + arpt_unregister_table(net, net->ipv4.arptable_filter); net->ipv4.arptable_filter = NULL; } static struct pernet_operations arptable_filter_net_ops = { .exit = arptable_filter_net_exit, + .pre_exit = arptable_filter_net_pre_exit, }; static int __init arptable_filter_init(void) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 10b91ebdf213..0076449eea35 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1430,6 +1430,8 @@ translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; + memset(newinfo->entries, 0, size); + newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = compatr->hook_entry[i]; diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index cc23f1ce239c..8cd3224d913e 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) flow.daddr = iph->saddr; flow.saddr = rpfilter_get_saddr(iph->daddr); flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; - flow.flowi4_tos = RT_TOS(iph->tos); + flow.flowi4_tos = iph->tos & IPTOS_RT_MASK; flow.flowi4_scope = RT_SCOPE_UNIVERSE; flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par)); diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index bb9266ea3785..ae45bcdd335e 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -62,7 +62,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) iph->daddr != daddr || skb->mark != mark || iph->tos != tos) { - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c index 7a83f881efa9..136030ad2e54 100644 --- a/net/ipv4/netfilter/nf_log_arp.c +++ b/net/ipv4/netfilter/nf_log_arp.c @@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int nhoff) { - const struct arphdr *ah; - struct arphdr _arph; const struct arppayload *ap; struct arppayload _arpp; + const struct arphdr *ah; + unsigned int logflags; + struct arphdr _arph; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL) { nf_log_buf_add(m, "TRUNCATED"); return; } + + if (info->type == NF_LOG_TYPE_LOG) + logflags = info->u.log.logflags; + else + logflags = NF_LOG_DEFAULT_MASK; + + if (logflags & NF_LOG_MACDECODE) { + nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); + nf_log_dump_vlan(m, skb); + nf_log_buf_add(m, "MACPROTO=%04x ", + ntohs(eth_hdr(skb)->h_proto)); + } + nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d", ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op)); diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c index 4b2d49cc9f1a..cb288ffbcfde 100644 --- a/net/ipv4/netfilter/nf_log_ipv4.c +++ b/net/ipv4/netfilter/nf_log_ipv4.c @@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m, switch (dev->type) { case ARPHRD_ETHER: - nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", - eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, + nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); + nf_log_dump_vlan(m, skb); + nf_log_buf_add(m, "MACPROTO=%04x ", ntohs(eth_hdr(skb)->h_proto)); return; default: diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index b2aeb7bf5dac..2a1e10f4ae93 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -166,8 +166,7 @@ pptp_outbound_pkt(struct sk_buff *skb, break; default: pr_debug("unknown outbound packet 0x%04x:%s\n", msg, - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : - pptp_msg_name[0]); + pptp_msg_name(msg)); /* fall through */ case PPTP_SET_LINK_INFO: /* only need to NAT in case PAC is behind NAT box */ @@ -268,9 +267,7 @@ pptp_inbound_pkt(struct sk_buff *skb, pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); break; default: - pr_debug("unknown inbound packet %s\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : - pptp_msg_name[0]); + pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); /* fall through */ case PPTP_START_SESSION_REQUEST: case PPTP_START_SESSION_REPLY: diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 2361fdac2c43..57817313a85c 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -127,7 +127,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) ip4_dst_hoplimit(skb_dst(nskb))); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); - if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) goto free_nskb; niph = ip_hdr(nskb); diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 3737d32ad11a..f5f4369c131c 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -64,9 +64,16 @@ static void nexthop_free_mpath(struct nexthop *nh) int i; nhg = rcu_dereference_raw(nh->nh_grp); - for (i = 0; i < nhg->num_nh; ++i) - WARN_ON(nhg->nh_entries[i].nh); + for (i = 0; i < nhg->num_nh; ++i) { + struct nh_grp_entry *nhge = &nhg->nh_entries[i]; + WARN_ON(!list_empty(&nhge->nh_list)); + nexthop_put(nhge->nh); + } + + WARN_ON(nhg->spare == nhg); + + kfree(nhg->spare); kfree(nhg); } @@ -277,6 +284,7 @@ out: return 0; nla_put_failure: + nlmsg_cancel(skb, nlh); return -EMSGSIZE; } @@ -395,7 +403,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[], struct nexthop_grp *nhg; unsigned int i, j; - if (len & (sizeof(struct nexthop_grp) - 1)) { + if (!len || len & (sizeof(struct nexthop_grp) - 1)) { NL_SET_ERR_MSG(extack, "Invalid length for nexthop group attribute"); return -EINVAL; @@ -434,7 +442,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[], if (!valid_group_nh(nh, len, extack)) return -EINVAL; } - for (i = NHA_GROUP + 1; i < __NHA_MAX; ++i) { + for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) { if (!tb[i]) continue; @@ -694,41 +702,56 @@ static void nh_group_rebalance(struct nh_group *nhg) } } -static void remove_nh_grp_entry(struct nh_grp_entry *nhge, - struct nh_group *nhg, +static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, struct nl_info *nlinfo) { + struct nh_grp_entry *nhges, *new_nhges; + struct nexthop *nhp = nhge->nh_parent; struct nexthop *nh = nhge->nh; - struct nh_grp_entry *nhges; - bool found = false; - int i; + struct nh_group *nhg, *newg; + int i, j; WARN_ON(!nh); - nhges = nhg->nh_entries; - for (i = 0; i < nhg->num_nh; ++i) { - if (found) { - nhges[i-1].nh = nhges[i].nh; - nhges[i-1].weight = nhges[i].weight; - list_del(&nhges[i].nh_list); - list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list); - } else if (nhg->nh_entries[i].nh == nh) { - found = true; - } + nhg = rtnl_dereference(nhp->nh_grp); + newg = nhg->spare; + + /* last entry, keep it visible and remove the parent */ + if (nhg->num_nh == 1) { + remove_nexthop(net, nhp, nlinfo); + return; } - if (WARN_ON(!found)) - return; + newg->has_v4 = nhg->has_v4; + newg->mpath = nhg->mpath; + newg->num_nh = nhg->num_nh; - nhg->num_nh--; - nhg->nh_entries[nhg->num_nh].nh = NULL; + /* copy old entries to new except the one getting removed */ + nhges = nhg->nh_entries; + new_nhges = newg->nh_entries; + for (i = 0, j = 0; i < nhg->num_nh; ++i) { + /* current nexthop getting removed */ + if (nhg->nh_entries[i].nh == nh) { + newg->num_nh--; + continue; + } - nh_group_rebalance(nhg); + list_del(&nhges[i].nh_list); + new_nhges[j].nh_parent = nhges[i].nh_parent; + new_nhges[j].nh = nhges[i].nh; + new_nhges[j].weight = nhges[i].weight; + list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); + j++; + } - nexthop_put(nh); + nh_group_rebalance(newg); + rcu_assign_pointer(nhp->nh_grp, newg); + + list_del(&nhge->nh_list); + nexthop_put(nhge->nh); if (nlinfo) - nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo); + nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); } static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, @@ -736,17 +759,11 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, { struct nh_grp_entry *nhge, *tmp; - list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) { - struct nh_group *nhg; + list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) + remove_nh_grp_entry(net, nhge, nlinfo); - list_del(&nhge->nh_list); - nhg = rtnl_dereference(nhge->nh_parent->nh_grp); - remove_nh_grp_entry(nhge, nhg, nlinfo); - - /* if this group has no more entries then remove it */ - if (!nhg->num_nh) - remove_nexthop(net, nhge->nh_parent, nlinfo); - } + /* make sure all see the newly published array before releasing rtnl */ + synchronize_net(); } static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) @@ -760,10 +777,7 @@ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) if (WARN_ON(!nhge->nh)) continue; - list_del(&nhge->nh_list); - nexthop_put(nhge->nh); - nhge->nh = NULL; - nhg->num_nh--; + list_del_init(&nhge->nh_list); } } @@ -1051,7 +1065,7 @@ out: /* rtnl */ /* remove all nexthops tied to a device being deleted */ -static void nexthop_flush_dev(struct net_device *dev) +static void nexthop_flush_dev(struct net_device *dev, unsigned long event) { unsigned int hash = nh_dev_hashfn(dev->ifindex); struct net *net = dev_net(dev); @@ -1063,6 +1077,10 @@ static void nexthop_flush_dev(struct net_device *dev) if (nhi->fib_nhc.nhc_dev != dev) continue; + if (nhi->reject_nh && + (event == NETDEV_DOWN || event == NETDEV_CHANGE)) + continue; + remove_nexthop(net, nhi->nh_parent, NULL); } } @@ -1086,22 +1104,35 @@ static struct nexthop *nexthop_create_group(struct net *net, { struct nlattr *grps_attr = cfg->nh_grp; struct nexthop_grp *entry = nla_data(grps_attr); + u16 num_nh = nla_len(grps_attr) / sizeof(*entry); struct nh_group *nhg; struct nexthop *nh; int i; + if (WARN_ON(!num_nh)) + return ERR_PTR(-EINVAL); + nh = nexthop_alloc(); if (!nh) return ERR_PTR(-ENOMEM); nh->is_group = 1; - nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry)); + nhg = nexthop_grp_alloc(num_nh); if (!nhg) { kfree(nh); return ERR_PTR(-ENOMEM); } + /* spare group used for removals */ + nhg->spare = nexthop_grp_alloc(num_nh); + if (!nhg) { + kfree(nhg); + kfree(nh); + return NULL; + } + nhg->spare->spare = nhg; + for (i = 0; i < nhg->num_nh; ++i) { struct nexthop *nhe; struct nh_info *nhi; @@ -1130,9 +1161,12 @@ static struct nexthop *nexthop_create_group(struct net *net, return nh; out_no_nh: - for (; i >= 0; --i) + for (i--; i >= 0; --i) { + list_del(&nhg->nh_entries[i].nh_list); nexthop_put(nhg->nh_entries[i].nh); + } + kfree(nhg->spare); kfree(nhg); kfree(nh); @@ -1764,11 +1798,11 @@ static int nh_netdev_event(struct notifier_block *this, switch (event) { case NETDEV_DOWN: case NETDEV_UNREGISTER: - nexthop_flush_dev(dev); + nexthop_flush_dev(dev, event); break; case NETDEV_CHANGE: if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP))) - nexthop_flush_dev(dev); + nexthop_flush_dev(dev, event); break; case NETDEV_CHANGEMTU: info_ext = ptr; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 535427292194..df6fbefe44d4 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -786,6 +786,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, sk->sk_uid); + fl4.fl4_icmp_type = user_icmph.type; + fl4.fl4_icmp_code = user_icmph.code; + security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 3183413ebc6c..bb3665441054 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -85,22 +85,21 @@ struct raw_frag_vec { int hlen; }; -struct raw_hashinfo raw_v4_hashinfo = { - .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), -}; +struct raw_hashinfo raw_v4_hashinfo; EXPORT_SYMBOL_GPL(raw_v4_hashinfo); int raw_hash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; - struct hlist_head *head; + struct hlist_nulls_head *hlist; - head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; + hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; - write_lock_bh(&h->lock); - sk_add_node(sk, head); + spin_lock(&h->lock); + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, hlist); + sock_set_flag(sk, SOCK_RCU_FREE); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); - write_unlock_bh(&h->lock); + spin_unlock(&h->lock); return 0; } @@ -110,10 +109,10 @@ void raw_unhash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; - write_lock_bh(&h->lock); - if (sk_del_node_init(sk)) + spin_lock(&h->lock); + if (__sk_nulls_del_node_init_rcu(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - write_unlock_bh(&h->lock); + spin_unlock(&h->lock); } EXPORT_SYMBOL_GPL(raw_unhash_sk); @@ -121,7 +120,9 @@ struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, unsigned short num, __be32 raddr, __be32 laddr, int dif, int sdif) { - sk_for_each_from(sk) { + struct hlist_nulls_node *hnode; + + sk_nulls_for_each_from(sk, hnode) { struct inet_sock *inet = inet_sk(sk); if (net_eq(sock_net(sk), net) && inet->inet_num == num && @@ -171,17 +172,14 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) int sdif = inet_sdif(skb); int dif = inet_iif(skb); struct sock *sk; - struct hlist_head *head; + struct hlist_nulls_head *hlist; int delivered = 0; - struct net *net; + struct net *net = dev_net(skb->dev); - read_lock(&raw_v4_hashinfo.lock); - head = &raw_v4_hashinfo.ht[hash]; - if (hlist_empty(head)) - goto out; + hlist = &raw_v4_hashinfo.ht[hash]; + rcu_read_lock(); - net = dev_net(skb->dev); - sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, + sk = __raw_v4_lookup(net, __sk_nulls_head(hlist), iph->protocol, iph->saddr, iph->daddr, dif, sdif); while (sk) { @@ -195,12 +193,12 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) if (clone) raw_rcv(sk, clone); } - sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, + sk = __raw_v4_lookup(net, sk_nulls_next(sk), iph->protocol, iph->saddr, iph->daddr, dif, sdif); } -out: - read_unlock(&raw_v4_hashinfo.lock); + + rcu_read_unlock(); return delivered; } @@ -210,7 +208,7 @@ int raw_local_deliver(struct sk_buff *skb, int protocol) struct sock *raw_sk; hash = protocol & (RAW_HTABLE_SIZE - 1); - raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); + raw_sk = sk_nulls_head(&raw_v4_hashinfo.ht[hash]); /* If there maybe a raw socket we must check - if not we * don't care less @@ -292,8 +290,8 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) hash = protocol & (RAW_HTABLE_SIZE - 1); - read_lock(&raw_v4_hashinfo.lock); - raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); + rcu_read_lock(); + raw_sk = sk_nulls_head(&raw_v4_hashinfo.ht[hash]); if (raw_sk) { int dif = skb->dev->ifindex; int sdif = inet_sdif(skb); @@ -305,11 +303,11 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) iph->daddr, iph->saddr, dif, sdif)) != NULL) { raw_err(raw_sk, skb, info); - raw_sk = sk_next(raw_sk); + raw_sk = sk_nulls_next(raw_sk); iph = (const struct iphdr *)skb->data; } } - read_unlock(&raw_v4_hashinfo.lock); + rcu_read_unlock(); } static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) @@ -993,10 +991,13 @@ static struct sock *raw_get_first(struct seq_file *seq) struct sock *sk; struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file)); struct raw_iter_state *state = raw_seq_private(seq); + struct hlist_nulls_head *hlist; + struct hlist_nulls_node *hnode; for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; ++state->bucket) { - sk_for_each(sk, &h->ht[state->bucket]) + hlist = &h->ht[state->bucket]; + hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) if (sock_net(sk) == seq_file_net(seq)) goto found; } @@ -1011,13 +1012,13 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) struct raw_iter_state *state = raw_seq_private(seq); do { - sk = sk_next(sk); + sk = sk_nulls_next(sk); try_again: ; } while (sk && sock_net(sk) != seq_file_net(seq)); if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { - sk = sk_head(&h->ht[state->bucket]); + sk = sk_nulls_head(&h->ht[state->bucket]); goto try_again; } return sk; @@ -1035,9 +1036,7 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos) void *raw_seq_start(struct seq_file *seq, loff_t *pos) { - struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file)); - - read_lock(&h->lock); + rcu_read_lock(); return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(raw_seq_start); @@ -1057,9 +1056,7 @@ EXPORT_SYMBOL_GPL(raw_seq_next); void raw_seq_stop(struct seq_file *seq, void *v) { - struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file)); - - read_unlock(&h->lock); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(raw_seq_stop); diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c index a93e7d1e1251..fd7a0fcb8bd8 100644 --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@ -58,15 +58,18 @@ static struct sock *raw_lookup(struct net *net, struct sock *from, static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r) { struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); + struct hlist_nulls_head *hlist; + struct hlist_nulls_node *hnode; struct sock *sk = NULL, *s; int slot; if (IS_ERR(hashinfo)) return ERR_CAST(hashinfo); - read_lock(&hashinfo->lock); + rcu_read_lock(); for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) { - sk_for_each(s, &hashinfo->ht[slot]) { + hlist = &hashinfo->ht[slot]; + hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) { sk = raw_lookup(net, s, r); if (sk) { /* @@ -76,26 +79,27 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 * We can do that because we're keeping * hashinfo->lock here. */ - sock_hold(sk); - goto out_unlock; + if (refcount_inc_not_zero(&sk->sk_refcnt)) + goto out_unlock; } } } out_unlock: - read_unlock(&hashinfo->lock); + rcu_read_unlock(); return sk ? sk : ERR_PTR(-ENOENT); } -static int raw_diag_dump_one(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, +static int raw_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { - struct net *net = sock_net(in_skb->sk); + struct sk_buff *in_skb = cb->skb; struct sk_buff *rep; struct sock *sk; + struct net *net; int err; + net = sock_net(in_skb->sk); sk = raw_sock_get(net, r); if (IS_ERR(sk)) return PTR_ERR(sk); @@ -109,10 +113,7 @@ static int raw_diag_dump_one(struct sk_buff *in_skb, return -ENOMEM; } - err = inet_sk_diag_fill(sk, NULL, rep, r, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, + err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0, netlink_net_capable(in_skb, CAP_NET_ADMIN)); sock_put(sk); @@ -137,33 +138,36 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, if (!inet_diag_bc_sk(bc, sk)) return 0; - return inet_sk_diag_fill(sk, NULL, skb, r, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - cb->nlh, net_admin); + return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin); } static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; + struct hlist_nulls_head *hlist; + struct hlist_nulls_node *hnode; int num, s_num, slot, s_slot; struct sock *sk = NULL; + struct nlattr *bc; if (IS_ERR(hashinfo)) return; + cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; s_slot = cb->args[0]; num = s_num = cb->args[1]; - read_lock(&hashinfo->lock); + rcu_read_lock(); for (slot = s_slot; slot < RAW_HTABLE_SIZE; s_num = 0, slot++) { num = 0; - sk_for_each(sk, &hashinfo->ht[slot]) { + hlist = &hashinfo->ht[slot]; + hlist_nulls_for_each_entry(sk, hnode, hlist, sk_nulls_node) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) @@ -186,7 +190,7 @@ next: } out_unlock: - read_unlock(&hashinfo->lock); + rcu_read_unlock(); cb->args[0] = slot; cb->args[1] = num; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index fe34e9e0912a..3ff702380b62 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -66,6 +66,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/memblock.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> @@ -271,6 +272,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } + (*pos)++; return NULL; } @@ -475,8 +477,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); } -#define IP_IDENTS_SZ 2048u - +/* Hash tables of size 2048..262144 depending on RAM size. + * Each bucket uses 8 bytes. + */ +static u32 ip_idents_mask __read_mostly; static atomic_t *ip_idents __read_mostly; static u32 *ip_tstamps __read_mostly; @@ -486,22 +490,24 @@ static u32 *ip_tstamps __read_mostly; */ u32 ip_idents_reserve(u32 hash, int segs) { - u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; - atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; - u32 old = READ_ONCE(*p_tstamp); - u32 now = (u32)jiffies; - u32 new, delta = 0; + u32 bucket, old, now = (u32)jiffies; + atomic_t *p_id; + u32 *p_tstamp; + u32 delta = 0; + + bucket = hash & ip_idents_mask; + p_tstamp = ip_tstamps + bucket; + p_id = ip_idents + bucket; + old = READ_ONCE(*p_tstamp); if (old != now && cmpxchg(p_tstamp, old, now) == old) delta = prandom_u32_max(now - old); - /* Do not use atomic_add_return() as it makes UBSAN unhappy */ - do { - old = (u32)atomic_read(p_id); - new = old + delta + segs; - } while (atomic_cmpxchg(p_id, old, new) != old); - - return new - segs; + /* If UBSAN reports an error there, please make sure your compiler + * supports -fno-strict-overflow before reporting it that was a bug + * in UBSAN, and it has been fixed in GCC-8. + */ + return atomic_add_return(segs + delta, p_id) - segs; } EXPORT_SYMBOL(ip_idents_reserve); @@ -787,8 +793,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow neigh_event_send(n, NULL); } else { if (fib_lookup(net, fl4, &res, 0) == 0) { - struct fib_nh_common *nhc = FIB_RES_NHC(res); + struct fib_nh_common *nhc; + fib_select_path(net, &res, fl4, skb); + nhc = FIB_RES_NHC(res); update_or_create_fnhe(nhc, fl4->daddr, new_gw, 0, false, jiffies + ip_rt_gc_timeout); @@ -914,7 +922,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) /* Check for load limit; set rate_last to the latest sent * redirect. */ - if (peer->rate_tokens == 0 || + if (peer->n_redirects == 0 || time_after(jiffies, (peer->rate_last + (ip_rt_redirect_load << peer->n_redirects)))) { @@ -1014,6 +1022,7 @@ out: kfree_skb(skb); static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; + struct net *net = dev_net(dst->dev); u32 old_mtu = ipv4_mtu(dst); struct fib_result res; bool lock = false; @@ -1034,9 +1043,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) return; rcu_read_lock(); - if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { - struct fib_nh_common *nhc = FIB_RES_NHC(res); + if (fib_lookup(net, fl4, &res, 0) == 0) { + struct fib_nh_common *nhc; + fib_select_path(net, &res, fl4, NULL); + nhc = FIB_RES_NHC(res); update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, jiffies + ip_rt_mtu_expires); } @@ -2106,6 +2117,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.daddr = daddr; fl4.saddr = saddr; fl4.flowi4_uid = sock_net_uid(net, NULL); + fl4.flowi4_multipath_hash = 0; if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { flkeys = &_flkeys; @@ -2627,8 +2639,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, fib_select_path(net, res, fl4, skb); dev_out = FIB_RES_DEV(*res); - fl4->flowi4_oif = dev_out->ifindex; - make_route: rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); @@ -2725,10 +2735,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, if (IS_ERR(rt)) return rt; - if (flp4->flowi4_proto) + if (flp4->flowi4_proto) { + flp4->flowi4_oif = rt->dst.dev->ifindex; rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, flowi4_to_flowi(flp4), sk, 0); + } return rt; } @@ -3127,7 +3139,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, fl4.daddr = dst; fl4.saddr = src; - fl4.flowi4_tos = rtm->rtm_tos; + fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK; fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; fl4.flowi4_mark = mark; fl4.flowi4_uid = uid; @@ -3151,8 +3163,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, fl4.flowi4_iif = iif; /* for rt_fill_info */ skb->dev = dev; skb->mark = mark; - err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos, - dev, &res); + err = ip_route_input_rcu(skb, dst, src, + rtm->rtm_tos & IPTOS_RT_MASK, dev, + &res); rt = skb_rtable(skb); if (err == 0 && rt->dst.error) @@ -3453,18 +3466,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; int __init ip_rt_init(void) { + void *idents_hash; int cpu; - ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents), - GFP_KERNEL); - if (!ip_idents) - panic("IP: failed to allocate ip_idents\n"); + /* For modern hosts, this will use 2 MB of memory */ + idents_hash = alloc_large_system_hash("IP idents", + sizeof(*ip_idents) + sizeof(*ip_tstamps), + 0, + 16, /* one bucket per 64 KB */ + HASH_ZERO, + NULL, + &ip_idents_mask, + 2048, + 256*1024); - prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); + ip_idents = idents_hash; - ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL); - if (!ip_tstamps) - panic("IP: failed to allocate ip_tstamps\n"); + prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); + + ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); for_each_possible_cpu(cpu) { struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 535b69326f66..2b45d1455592 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -291,7 +291,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; - int mss; + int full_space, mss; struct rtable *rt; __u8 rcv_wscale; struct flowi4 fl4; @@ -386,8 +386,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) /* Try to redo what tcp_v4_send_synack did. */ req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); + /* limit the window selection if the user enforce a smaller rx buffer */ + full_space = tcp_full_space(sk); + if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && + (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) + req->rsk_window_clamp = full_space; - tcp_select_initial_window(sk, tcp_full_space(sk), req->mss, + tcp_select_initial_window(sk, full_space, req->mss, &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index eee828590abc..119eb6e55fd3 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -50,6 +50,8 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; static int comp_sack_nr_max = 255; static u32 u32_max_div_HZ = UINT_MAX / HZ; static int one_day_secs = 24 * 3600; +static int tw_timeout_min = 10; +static int tw_timeout_max = 60; /* obsolete */ static int sysctl_tcp_low_latency __read_mostly; @@ -307,24 +309,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2 * TCP_FASTOPEN_KEY_MAX) + (TCP_FASTOPEN_KEY_MAX * 5)) }; - struct tcp_fastopen_context *ctx; - u32 user_key[TCP_FASTOPEN_KEY_MAX * 4]; - __le32 key[TCP_FASTOPEN_KEY_MAX * 4]; + u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)]; + __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)]; char *backup_data; - int ret, i = 0, off = 0, n_keys = 0; + int ret, i = 0, off = 0, n_keys; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) return -ENOMEM; - rcu_read_lock(); - ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); - if (ctx) { - n_keys = tcp_fastopen_context_len(ctx); - memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys); - } - rcu_read_unlock(); - + n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key); if (!n_keys) { memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH); n_keys = 1; @@ -477,6 +471,39 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, } #endif +static int proc_tcp_tw_timeout(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int val, ret; + struct ctl_table tmp = { + .data = &val, + .maxlen = sizeof(val), + .extra1 = &tw_timeout_min, + .extra2 = &tw_timeout_max + }; + + if (!write) { + int tw_timeout = *((int *)table->data) / HZ; + tmp.data = &tw_timeout; + + return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); + } + + ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); + if (write && ret == 0) { + int old_val; + struct net *net; + + net = container_of(table->data, struct net, + ipv4.sysctl_tw_timeout); + old_val = net->ipv4.sysctl_tw_timeout; + net->ipv4.sysctl_tw_timeout = val * HZ; + } + + return ret; +} + static struct ctl_table ipv4_table[] = { { .procname = "inet_peer_threshold", @@ -656,6 +683,36 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "tcp_loss_init_cwnd", + .data = &sysctl_tcp_loss_init_cwnd, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "tcp_inherit_buffsize", + .data = &sysctl_tcp_inherit_buffsize, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "tcp_init_rto", + .data = &sysctl_tcp_init_rto, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &four + }, + { + .procname = "tcp_synack_rto_interval", + .data = &sysctl_tcp_synack_rto_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &four + }, { } }; @@ -1283,13 +1340,6 @@ static struct ctl_table ipv4_net_table[] = { .extra1 = SYSCTL_ONE, .extra2 = &gso_max_segs, }, - { - .procname = "tcp_loss_init_cwnd", - .data = &sysctl_tcp_loss_init_cwnd, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, { .procname = "tcp_min_rtt_wlen", .data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen, @@ -1382,27 +1432,13 @@ static struct ctl_table ipv4_net_table[] = { .extra1 = SYSCTL_ONE }, { - .procname = "tcp_inherit_buffsize", - .data = &sysctl_tcp_inherit_buffsize, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "tcp_init_rto", - .data = &sysctl_tcp_init_rto, + .procname = "tcp_tw_timeout", + .data = &init_net.ipv4.sysctl_tw_timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &four - }, - { - .procname = "tcp_synack_rto_interval", - .data = &sysctl_tcp_synack_rto_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &four + .proc_handler = proc_tcp_tw_timeout, + .extra1 = &tw_timeout_min, + .extra2 = &tw_timeout_max }, { } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8e9e6cddc393..09e3edaff45e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -477,9 +477,19 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, int target, struct sock *sk) { - return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) || - (sk->sk_prot->stream_memory_read ? - sk->sk_prot->stream_memory_read(sk) : false); + int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq); + + if (avail > 0) { + if (avail >= target) + return true; + if (tcp_rmem_pressure(sk)) + return true; + if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss) + return true; + } + if (sk->sk_prot->stream_memory_read) + return sk->sk_prot->stream_memory_read(sk); + return false; } /* @@ -963,7 +973,8 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if (IS_ENABLED(CONFIG_DEBUG_VM) && - WARN_ONCE(PageSlab(page), "page must not be a Slab one")) + WARN_ONCE(!sendpage_ok(page), + "page must not be a Slab one and have page_count > 0")) return -EINVAL; /* Wait for a connection to finish. One exception is TCP Fast Open @@ -1757,10 +1768,11 @@ static int tcp_zerocopy_receive(struct sock *sk, down_read(¤t->mm->mmap_sem); - ret = -EINVAL; vma = find_vma(current->mm, address); - if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) - goto out; + if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) { + up_read(¤t->mm->mmap_sem); + return -EINVAL; + } zc->length = min_t(unsigned long, zc->length, vma->vm_end - address); tp = tcp_sk(sk); @@ -2044,7 +2056,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, /* Well, if we have backlog, try to process it now yet. */ - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -2149,13 +2161,15 @@ skip_copy: tp->urg_data = 0; tcp_fast_path_check(sk); } - if (used + offset < skb->len) - continue; if (TCP_SKB_CB(skb)->has_rxtstamp) { tcp_update_recv_tstamps(skb, &tss); cmsg_flags |= 2; } + + if (used + offset < skb->len) + continue; + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; if (!(flags & MSG_PEEK)) @@ -2461,10 +2475,10 @@ adjudge_to_death: LINUX_MIB_TCPABORTONLINGER); } else { const int tmo = tcp_fin_time(sk); - - if (tmo > TCP_TIMEWAIT_LEN) { + int tw_timeout = sock_net(sk)->ipv4.sysctl_tw_timeout; + if (tmo > tw_timeout) { inet_csk_reset_keepalive_timer(sk, - tmo - TCP_TIMEWAIT_LEN); + tmo - tw_timeout); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto out; @@ -2613,6 +2627,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; + icsk->icsk_probes_tstamp = 0; icsk->icsk_rto = TCP_TIMEOUT_INIT; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd = TCP_INIT_CWND; @@ -2620,6 +2635,9 @@ int tcp_disconnect(struct sock *sk, int flags) tp->window_clamp = 0; tp->delivered = 0; tp->delivered_ce = 0; + if (icsk->icsk_ca_ops->release) + icsk->icsk_ca_ops->release(sk); + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; tcp_clear_retrans(tp); @@ -2939,16 +2957,23 @@ static int do_tcp_setsockopt(struct sock *sk, int level, break; case TCP_QUEUE_SEQ: - if (sk->sk_state != TCP_CLOSE) + if (sk->sk_state != TCP_CLOSE) { err = -EPERM; - else if (tp->repair_queue == TCP_SEND_QUEUE) - WRITE_ONCE(tp->write_seq, val); - else if (tp->repair_queue == TCP_RECV_QUEUE) { - WRITE_ONCE(tp->rcv_nxt, val); - WRITE_ONCE(tp->copied_seq, val); - } - else + } else if (tp->repair_queue == TCP_SEND_QUEUE) { + if (!tcp_rtx_queue_empty(sk)) + err = -EPERM; + else + WRITE_ONCE(tp->write_seq, val); + } else if (tp->repair_queue == TCP_RECV_QUEUE) { + if (tp->rcv_nxt != tp->copied_seq) { + err = -EPERM; + } else { + WRITE_ONCE(tp->rcv_nxt, val); + WRITE_ONCE(tp->copied_seq, val); + } + } else { err = -EINVAL; + } break; case TCP_REPAIR_OPTIONS: @@ -3074,10 +3099,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); - else - err = -EINVAL; + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); break; #endif case TCP_USER_TIMEOUT: @@ -3530,22 +3552,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level, return 0; case TCP_FASTOPEN_KEY: { - __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; - struct tcp_fastopen_context *ctx; - unsigned int key_len = 0; + u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; + unsigned int key_len; if (get_user(len, optlen)) return -EFAULT; - rcu_read_lock(); - ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); - if (ctx) { - key_len = tcp_fastopen_context_len(ctx) * - TCP_FASTOPEN_KEY_LENGTH; - memcpy(&key[0], &ctx->key[0], key_len); - } - rcu_read_unlock(); - + key_len = tcp_fastopen_get_cipher(net, icsk, key) * + TCP_FASTOPEN_KEY_LENGTH; len = min_t(unsigned int, len, key_len); if (put_user(len, optlen)) return -EFAULT; @@ -3847,10 +3861,13 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data); int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) { + u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ struct scatterlist sg; - sg_init_one(&sg, key->key, key->keylen); - ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); + sg_init_one(&sg, key->key, keylen); + ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); + + /* tcp_md5_do_add() might change key->key under us */ return crypto_ahash_update(hp->md5_req); } EXPORT_SYMBOL(tcp_md5_hash_key); diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 6c4d79baff26..6ea3dc2e4219 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -945,7 +945,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) filter_expired = after(tcp_jiffies32, bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); if (rs->rtt_us >= 0 && - (rs->rtt_us <= bbr->min_rtt_us || + (rs->rtt_us < bbr->min_rtt_us || (filter_expired && !rs->is_ack_delayed))) { bbr->min_rtt_us = rs->rtt_us; bbr->min_rtt_stamp = tcp_jiffies32; diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 8a01428f80c1..63dd21cd240a 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -47,8 +47,8 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, { struct iov_iter *iter = &msg->msg_iter; int peek = flags & MSG_PEEK; - int i, ret, copied = 0; struct sk_msg *msg_rx; + int i, copied = 0; msg_rx = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); @@ -69,17 +69,16 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, page = sg_page(sge); if (copied + copy > len) copy = len - copied; - ret = copy_page_to_iter(page, sge->offset, copy, iter); - if (ret != copy) { - msg_rx->sg.start = i; - return -EFAULT; - } + copy = copy_page_to_iter(page, sge->offset, copy, iter); + if (!copy) + return copied ? copied : -EFAULT; copied += copy; if (likely(!peek)) { sge->offset += copy; sge->length -= copy; - sk_mem_uncharge(sk, copy); + if (!msg_rx->skb) + sk_mem_uncharge(sk, copy); msg_rx->sg.size -= copy; if (!sge->length) { @@ -88,6 +87,11 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, put_page(page); } } else { + /* Lets not optimize peek case if copy_page_to_iter + * didn't copy the entire length lets just break. + */ + if (copy != sge->length) + return copied; sk_msg_iter_var_next(i); } @@ -96,6 +100,9 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, } while (i != msg_rx->sg.end); if (unlikely(peek)) { + if (msg_rx == list_last_entry(&psock->ingress_msg, + struct sk_msg, list)) + break; msg_rx = list_next_entry(msg_rx, list); continue; } @@ -121,14 +128,17 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, struct sk_psock *psock; int copied, ret; + if (unlikely(flags & MSG_ERRQUEUE)) + return inet_recv_error(sk, msg, len, addr_len); + psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); - if (unlikely(flags & MSG_ERRQUEUE)) - return inet_recv_error(sk, msg, len, addr_len); if (!skb_queue_empty(&sk->sk_receive_queue) && - sk_psock_queue_empty(psock)) + sk_psock_queue_empty(psock)) { + sk_psock_put(sk, psock); return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + } lock_sock(sk); msg_bytes_ready: copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags); @@ -200,7 +210,6 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, if (!ret) { msg->sg.start = i; - msg->sg.size -= apply_bytes; sk_psock_queue_msg(psock, tmp); sk_psock_data_ready(sk, psock); } else { @@ -528,57 +537,7 @@ out_err: return copied ? copied : err; } -static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock) -{ - struct sk_psock_link *link; - - while ((link = sk_psock_link_pop(psock))) { - sk_psock_unlink(sk, link); - sk_psock_free_link(link); - } -} - -static void tcp_bpf_unhash(struct sock *sk) -{ - void (*saved_unhash)(struct sock *sk); - struct sk_psock *psock; - - rcu_read_lock(); - psock = sk_psock(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - if (sk->sk_prot->unhash) - sk->sk_prot->unhash(sk); - return; - } - - saved_unhash = psock->saved_unhash; - tcp_bpf_remove(sk, psock); - rcu_read_unlock(); - saved_unhash(sk); -} - -static void tcp_bpf_close(struct sock *sk, long timeout) -{ - void (*saved_close)(struct sock *sk, long timeout); - struct sk_psock *psock; - - lock_sock(sk); - rcu_read_lock(); - psock = sk_psock(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - release_sock(sk); - return sk->sk_prot->close(sk, timeout); - } - - saved_close = psock->saved_close; - tcp_bpf_remove(sk, psock); - rcu_read_unlock(); - release_sock(sk); - saved_close(sk, timeout); -} - +#ifdef CONFIG_BPF_STREAM_PARSER enum { TCP_BPF_IPV4, TCP_BPF_IPV6, @@ -599,8 +558,8 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], struct proto *base) { prot[TCP_BPF_BASE] = *base; - prot[TCP_BPF_BASE].unhash = tcp_bpf_unhash; - prot[TCP_BPF_BASE].close = tcp_bpf_close; + prot[TCP_BPF_BASE].unhash = sock_map_unhash; + prot[TCP_BPF_BASE].close = sock_map_close; prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read; @@ -629,26 +588,6 @@ static int __init tcp_bpf_v4_build_proto(void) } core_initcall(tcp_bpf_v4_build_proto); -static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock) -{ - int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; - int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; - - sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]); -} - -static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock) -{ - int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; - int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; - - /* Reinit occurs when program types change e.g. TCP_BPF_TX is removed - * or added requiring sk_prot hook updates. We keep original saved - * hooks in this case. - */ - sk->sk_prot = &tcp_bpf_prots[family][config]; -} - static int tcp_bpf_assert_proto_ops(struct proto *ops) { /* In order to avoid retpoline, we make assumptions when we call @@ -660,34 +599,34 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops) ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; } -void tcp_bpf_reinit(struct sock *sk) +struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock) { - struct sk_psock *psock; + int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; + int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; - sock_owned_by_me(sk); + if (!psock->sk_proto) { + struct proto *ops = READ_ONCE(sk->sk_prot); - rcu_read_lock(); - psock = sk_psock(sk); - tcp_bpf_reinit_sk_prot(sk, psock); - rcu_read_unlock(); -} + if (tcp_bpf_assert_proto_ops(ops)) + return ERR_PTR(-EINVAL); -int tcp_bpf_init(struct sock *sk) -{ - struct proto *ops = READ_ONCE(sk->sk_prot); - struct sk_psock *psock; - - sock_owned_by_me(sk); - - rcu_read_lock(); - psock = sk_psock(sk); - if (unlikely(!psock || psock->sk_proto || - tcp_bpf_assert_proto_ops(ops))) { - rcu_read_unlock(); - return -EINVAL; + tcp_bpf_check_v6_needs_rebuild(sk, ops); } - tcp_bpf_check_v6_needs_rebuild(sk, ops); - tcp_bpf_update_sk_prot(sk, psock); - rcu_read_unlock(); - return 0; + + return &tcp_bpf_prots[family][config]; } + +/* If a child got cloned from a listening socket that had tcp_bpf + * protocol callbacks installed, we need to restore the callbacks to + * the default ones because the child does not inherit the psock state + * that tcp_bpf callbacks expect. + */ +void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) +{ + int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; + struct proto *prot = newsk->sk_prot; + + if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) + newsk->sk_prot = sk->sk_prot_creator; +} +#endif /* CONFIG_BPF_STREAM_PARSER */ diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index c445a81d144e..62e156b914d1 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -21,7 +21,7 @@ static DEFINE_SPINLOCK(tcp_cong_list_lock); static LIST_HEAD(tcp_cong_list); /* Simple linear search, don't expect many entries! */ -static struct tcp_congestion_ops *tcp_ca_find(const char *name) +struct tcp_congestion_ops *tcp_ca_find(const char *name) { struct tcp_congestion_ops *e; @@ -162,7 +162,7 @@ void tcp_assign_congestion_control(struct sock *sk) rcu_read_lock(); ca = rcu_dereference(net->ipv4.tcp_congestion_control); - if (unlikely(!try_module_get(ca->owner))) + if (unlikely(!bpf_try_module_get(ca, ca->owner))) ca = &tcp_reno; icsk->icsk_ca_ops = ca; rcu_read_unlock(); @@ -197,7 +197,12 @@ static void tcp_reinit_congestion_control(struct sock *sk, icsk->icsk_ca_setsockopt = 1; memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); - if (sk->sk_state != TCP_CLOSE) + if (ca->flags & TCP_CONG_NEEDS_ECN) + INET_ECN_xmit(sk); + else + INET_ECN_dontxmit(sk); + + if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) tcp_init_congestion_control(sk); } @@ -208,7 +213,7 @@ void tcp_cleanup_congestion_control(struct sock *sk) if (icsk->icsk_ca_ops->release) icsk->icsk_ca_ops->release(sk); - module_put(icsk->icsk_ca_ops->owner); + bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); } /* Used by sysctl to change default congestion control */ @@ -222,12 +227,16 @@ int tcp_set_default_congestion_control(struct net *net, const char *name) ca = tcp_ca_find_autoload(net, name); if (!ca) { ret = -ENOENT; - } else if (!try_module_get(ca->owner)) { + } else if (!bpf_try_module_get(ca, ca->owner)) { ret = -EBUSY; + } else if (!net_eq(net, &init_net) && + !(ca->flags & TCP_CONG_NON_RESTRICTED)) { + /* Only init netns can set default to a restricted algorithm */ + ret = -EPERM; } else { prev = xchg(&net->ipv4.tcp_congestion_control, ca); if (prev) - module_put(prev->owner); + bpf_module_put(prev, prev->owner); ca->flags |= TCP_CONG_NON_RESTRICTED; ret = 0; @@ -360,19 +369,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, } else if (!load) { const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops; - if (try_module_get(ca->owner)) { + if (bpf_try_module_get(ca, ca->owner)) { if (reinit) { tcp_reinit_congestion_control(sk, ca); } else { icsk->icsk_ca_ops = ca; - module_put(old_ca->owner); + bpf_module_put(old_ca, old_ca->owner); } } else { err = -EBUSY; } } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) { err = -EPERM; - } else if (!try_module_get(ca->owner)) { + } else if (!bpf_try_module_get(ca, ca->owner)) { err = -EBUSY; } else { tcp_reinit_congestion_control(sk, ca); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 1b3d032a4df2..ee6c38a73325 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -404,6 +404,8 @@ static void hystart_update(struct sock *sk, u32 delay) if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ + if (ca->curr_rtt > delay) + ca->curr_rtt = delay; if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { if (ca->curr_rtt == 0 || ca->curr_rtt > delay) ca->curr_rtt = delay; diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 549506162dde..b80fde5f1fc6 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -179,15 +179,15 @@ static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin) } static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc); + inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r); } -static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, +static int tcp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req); + return inet_diag_dump_one_icsk(&tcp_hashinfo, cb, req); } #ifdef CONFIG_INET_DIAG_DESTROY diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index a915ade0c818..a9971e41f31b 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -108,6 +108,29 @@ out: return err; } +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, + u64 *key) +{ + struct tcp_fastopen_context *ctx; + int n_keys = 0, i; + + rcu_read_lock(); + if (icsk) + ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); + else + ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); + if (ctx) { + n_keys = tcp_fastopen_context_len(ctx); + for (i = 0; i < n_keys; i++) { + put_unaligned_le64(ctx->key[i].key[0], key + (i * 2)); + put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1); + } + } + rcu_read_unlock(); + + return n_keys; +} + static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req, struct sk_buff *syn, const siphash_key_t *key, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 25a66d436222..29b4cf6f79bf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -263,7 +263,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb) * cwnd may be very low (even just 1 packet), so we should ACK * immediately. */ - inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; + if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } } @@ -448,7 +449,6 @@ void tcp_init_buffer_space(struct sock *sk) if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_sndbuf_expand(sk); - tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss); tcp_mstamp_refresh(tp); tp->rcvq_space.time = tp->tcp_mstamp; tp->rcvq_space.seq = tp->copied_seq; @@ -472,6 +472,8 @@ void tcp_init_buffer_space(struct sock *sk) tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_jiffies32; + tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, + (u32)TCP_INIT_CWND * tp->advmss); } /* 4. Recalculate window clamp after socket hit its memory bounds. */ @@ -2765,7 +2767,8 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag) } else if (tcp_is_rack(sk)) { u32 prior_retrans = tp->retrans_out; - tcp_rack_mark_lost(sk); + if (tcp_rack_mark_lost(sk)) + *ack_flag &= ~FLAG_SET_XMIT_TIMER; if (prior_retrans > tp->retrans_out) *ack_flag |= FLAG_LOST_RETRANS; } @@ -2946,6 +2949,8 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag, u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { + if (!delta) + delta = 1; seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ); ca_rtt_us = seq_rtt_us; } @@ -3285,6 +3290,7 @@ static void tcp_ack_probe(struct sock *sk) return; if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; + icsk->icsk_probes_tstamp = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! @@ -3292,6 +3298,7 @@ static void tcp_ack_probe(struct sock *sk) } else { unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); + when = tcp_clamp_probe0_to_user_timeout(sk, when); tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX, NULL); } @@ -3507,10 +3514,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) } } -/* This routine deals with acks during a TLP episode. - * We mark the end of a TLP episode on receiving TLP dupack or when - * ack is after tlp_high_seq. - * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. +/* This routine deals with acks during a TLP episode and ends an episode by + * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack */ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { @@ -3519,7 +3524,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) if (before(ack, tp->tlp_high_seq)) return; - if (flag & FLAG_DSACKING_ACK) { + if (!tp->tlp_retrans) { + /* TLP of new data has been acknowledged */ + tp->tlp_high_seq = 0; + } else if (flag & FLAG_DSACKING_ACK) { /* This DSACK means original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } else if (after(ack, tp->tlp_high_seq)) { @@ -3685,6 +3693,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_in_ack_event(sk, ack_ev_flags); } + /* This is a deviation from RFC3168 since it states that: + * "When the TCP data sender is ready to set the CWR bit after reducing + * the congestion window, it SHOULD set the CWR bit only on the first + * new data packet that it transmits." + * We accept CWR on pure ACKs to be more robust + * with widely-deployed TCP implementations that do this. + */ + tcp_ecn_accept_cwr(sk, skb); + /* We passed data and got it acked, remove any soft error * log. Something worked... */ @@ -3701,9 +3718,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); - /* If needed, reset TLP/RTO timer; RACK may later override this. */ - if (flag & FLAG_SET_XMIT_TIMER) - tcp_set_xmit_timer(sk); if (tcp_ack_is_dubious(sk, flag)) { if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) { @@ -3716,6 +3730,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) &rexmit); } + /* If needed, reset TLP/RTO timer when RACK doesn't set. */ + if (flag & FLAG_SET_XMIT_TIMER) + tcp_set_xmit_timer(sk); + if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) sk_dst_confirm(sk); @@ -4561,6 +4579,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); + sk->sk_data_ready(sk); tcp_drop(sk, skb); return; } @@ -4594,7 +4613,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { coalesce_done: - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; goto add_sack; @@ -4678,7 +4701,11 @@ add_sack: tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); skb_condense(skb); skb_set_owner_r(skb, sk); } @@ -4758,7 +4785,9 @@ void tcp_data_ready(struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); int avail = tp->rcv_nxt - tp->copied_seq; - if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE)) + if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) && + !sock_flag(sk, SOCK_DONE) && + tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss) return; sk->sk_data_ready(sk); @@ -4778,8 +4807,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); - tcp_ecn_accept_cwr(sk, skb); - tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. @@ -4798,6 +4825,7 @@ queue_and_out: sk_forced_mem_schedule(sk, skb->truesize); else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); + sk->sk_data_ready(sk); goto drop; } @@ -5689,6 +5717,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; + } else { + tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); } __tcp_ack_snd_check(sk, 0); @@ -6284,7 +6314,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) break; case TCP_FIN_WAIT1: { - int tmo; + int tmo, tw_timeout;; if (req) tcp_rcv_synrecv_state_fastopen(sk); @@ -6319,8 +6349,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) } tmo = tcp_fin_time(sk); - if (tmo > TCP_TIMEWAIT_LEN) { - inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); + tw_timeout = sock_net(sk)->ipv4.sysctl_tw_timeout; + + if (tmo > tw_timeout) { + inet_csk_reset_keepalive_timer(sk, tmo - tw_timeout); } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing @@ -6461,6 +6493,7 @@ static void tcp_openreq_init(struct request_sock *req, req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ req->cookie_ts = 0; + req->mark = skb->mark; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tcp_rsk(req)->snt_synack = 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 86861041323f..5f301ffe956b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -76,6 +76,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/inetdevice.h> +#include <linux/btf_ids.h> #include <crypto/hash.h> #include <linux/scatterlist.h> @@ -1079,9 +1080,18 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen); if (key) { - /* Pre-existing entry - just update that one. */ + /* Pre-existing entry - just update that one. + * Note that the key might be used concurrently. + */ memcpy(key->key, newkey, newkeylen); - key->keylen = newkeylen; + + /* Pairs with READ_ONCE() in tcp_md5_hash_key(). + * Also note that a reader could catch new key->keylen value + * but old key->key[], this is the reason we use __GFP_ZERO + * at sock_kmalloc() time below these lines. + */ + WRITE_ONCE(key->keylen, newkeylen); + return 0; } @@ -1097,7 +1107,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, rcu_assign_pointer(tp->md5sig_info, md5sig); } - key = sock_kmalloc(sk, sizeof(*key), gfp); + key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO); if (!key) return -ENOMEM; if (!tcp_alloc_md5sig_pool()) { @@ -1654,6 +1664,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); + u32 tail_gso_size, tail_gso_segs; struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; @@ -1661,6 +1672,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) unsigned int hdrlen; bool fragstolen; u32 gso_segs; + u32 gso_size; int delta; /* In case all data was pulled from skb frags (in __pskb_pull_tail()), @@ -1686,13 +1698,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) */ th = (const struct tcphdr *)skb->data; hdrlen = th->doff * 4; - shinfo = skb_shinfo(skb); - - if (!shinfo->gso_size) - shinfo->gso_size = skb->len - hdrlen; - - if (!shinfo->gso_segs) - shinfo->gso_segs = 1; tail = sk->sk_backlog.tail; if (!tail) @@ -1715,13 +1720,22 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) goto no_coalesce; __skb_pull(skb, hdrlen); - if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) { - thtail->window = th->window; + shinfo = skb_shinfo(skb); + gso_size = shinfo->gso_size ?: skb->len; + gso_segs = shinfo->gso_segs ?: 1; + + shinfo = skb_shinfo(tail); + tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); + tail_gso_segs = shinfo->gso_segs ?: 1; + + if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) { TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq; - if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq)) + if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) { TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq; + thtail->window = th->window; + } /* We have to update both TCP_SKB_CB(tail)->tcp_flags and * thtail->fin, so that the fast path in tcp_rcv_established() @@ -1741,11 +1755,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) } /* Not as strict as GRO. We only need to carry mss max value */ - skb_shinfo(tail)->gso_size = max(shinfo->gso_size, - skb_shinfo(tail)->gso_size); - - gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs; - skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF); + shinfo->gso_size = max(gso_size, tail_gso_size); + shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF); sk->sk_backlog.len += delta; __NET_INC_STATS(sock_net(sk), @@ -2165,13 +2176,18 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock); */ static void *listening_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; struct hlist_nulls_node *node; struct sock *sk = cur; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; @@ -2189,7 +2205,8 @@ get_sk: sk_nulls_for_each_from(sk, node) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) return sk; } spin_unlock(&ilb->lock); @@ -2228,11 +2245,16 @@ static inline bool empty_bucket(const struct tcp_iter_state *st) */ static void *established_get_first(struct seq_file *seq) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; @@ -2245,7 +2267,8 @@ static void *established_get_first(struct seq_file *seq) spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { - if (sk->sk_family != afinfo->family || + if ((afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family) || !net_eq(sock_net(sk), net)) { continue; } @@ -2262,19 +2285,25 @@ out: static void *established_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct sock *sk = cur; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { - if (sk->sk_family == afinfo->family && + if ((afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) && net_eq(sock_net(sk), net)) return sk; } @@ -2553,6 +2582,74 @@ out: return 0; } +#ifdef CONFIG_BPF_SYSCALL +struct bpf_iter__tcp { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct sock_common *, sk_common); + uid_t uid __aligned(8); +}; + +static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, + struct sock_common *sk_common, uid_t uid) +{ + struct bpf_iter__tcp ctx; + + meta->seq_num--; /* skip SEQ_START_TOKEN */ + ctx.meta = meta; + ctx.sk_common = sk_common; + ctx.uid = uid; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + struct sock *sk = v; + uid_t uid; + + if (v == SEQ_START_TOKEN) + return 0; + + if (sk->sk_state == TCP_TIME_WAIT) { + uid = 0; + } else if (sk->sk_state == TCP_NEW_SYN_RECV) { + const struct request_sock *req = v; + + uid = from_kuid_munged(seq_user_ns(seq), + sock_i_uid(req->rsk_listener)); + } else { + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); + } + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + return tcp_prog_seq_show(prog, &meta, v, uid); +} + +static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + if (!v) { + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog) + (void)tcp_prog_seq_show(prog, &meta, v, 0); + } + + tcp_seq_stop(seq, v); +} + +static const struct seq_operations bpf_iter_tcp_seq_ops = { + .show = bpf_iter_tcp_seq_show, + .start = tcp_seq_start, + .next = tcp_seq_next, + .stop = bpf_iter_tcp_seq_stop, +}; +#endif + static const struct seq_operations tcp4_seq_ops = { .show = tcp4_seq_show, .start = tcp_seq_start, @@ -2646,7 +2743,8 @@ static void __net_exit tcp_sk_exit(struct net *net) int cpu; if (net->ipv4.tcp_congestion_control) - module_put(net->ipv4.tcp_congestion_control->owner); + bpf_module_put(net->ipv4.tcp_congestion_control, + net->ipv4.tcp_congestion_control->owner); for_each_possible_cpu(cpu) inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); @@ -2735,6 +2833,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2; net->ipv4.sysctl_tcp_pacing_ss_ratio = 200; net->ipv4.sysctl_tcp_pacing_ca_ratio = 120; + net->ipv4.sysctl_tw_timeout = TCP_TIMEWAIT_LEN; if (net != &init_net) { net->ipv4.sysctl_tcp_max_orphans = NR_FILE; memcpy(net->ipv4.sysctl_tcp_rmem, @@ -2753,7 +2852,8 @@ static int __net_init tcp_sk_init(struct net *net) /* Reno is always built in */ if (!net_eq(net, &init_net) && - try_module_get(init_net.ipv4.tcp_congestion_control->owner)) + bpf_try_module_get(init_net.ipv4.tcp_congestion_control, + init_net.ipv4.tcp_congestion_control->owner)) net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control; else net->ipv4.tcp_congestion_control = &tcp_reno; @@ -2781,8 +2881,68 @@ static struct pernet_operations __net_initdata tcp_sk_ops = { .exit_batch = tcp_sk_exit_batch, }; +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta, + struct sock_common *sk_common, uid_t uid) + +static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux) +{ + struct tcp_iter_state *st = priv_data; + struct tcp_seq_afinfo *afinfo; + int ret; + + afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); + if (!afinfo) + return -ENOMEM; + + afinfo->family = AF_UNSPEC; + st->bpf_seq_afinfo = afinfo; + ret = bpf_iter_init_seq_net(priv_data, aux); + if (ret) + kfree(afinfo); + return ret; +} + +static void bpf_iter_fini_tcp(void *priv_data) +{ + struct tcp_iter_state *st = priv_data; + + kfree(st->bpf_seq_afinfo); + bpf_iter_fini_seq_net(priv_data); +} + +static const struct bpf_iter_seq_info tcp_seq_info = { + .seq_ops = &bpf_iter_tcp_seq_ops, + .init_seq_private = bpf_iter_init_tcp, + .fini_seq_private = bpf_iter_fini_tcp, + .seq_priv_size = sizeof(struct tcp_iter_state), +}; + +static struct bpf_iter_reg tcp_reg_info = { + .target = "tcp", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__tcp, sk_common), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &tcp_seq_info, +}; + +static void __init bpf_iter_register(void) +{ + tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON]; + if (bpf_iter_reg_target(&tcp_reg_info)) + pr_warn("Warning: could not register bpf iterator tcp\n"); +} + +#endif + void __init tcp_v4_init(void) { if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); + +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + bpf_iter_register(); +#endif } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 24ac727df7bd..b3507c1a9139 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -97,6 +97,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_options_received tmp_opt; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); bool paws_reject = false; + struct net *net = sock_net((struct sock *)tw); + int tw_timeout = net->ipv4.sysctl_tw_timeout; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { @@ -151,7 +153,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, tcptw->tw_ts_recent = tmp_opt.rcv_tsval; } - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); + inet_twsk_reschedule(tw, tw_timeout); return TCP_TW_ACK; } @@ -188,7 +190,7 @@ kill: return TCP_TW_SUCCESS; } } else { - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); + inet_twsk_reschedule(tw, tw_timeout); } if (tmp_opt.saw_tstamp) { @@ -239,7 +241,7 @@ kill: * Do not reschedule in the last case. */ if (paws_reject || th->ack) - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); + inet_twsk_reschedule(tw, tw_timeout); return tcp_timewait_check_oow_rate_limit( tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); @@ -257,7 +259,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); struct inet_timewait_sock *tw; - struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; + struct net *net = sock_net(sk); + struct inet_timewait_death_row *tcp_death_row = &net->ipv4.tcp_death_row; tw = inet_twsk_alloc(sk, tcp_death_row, state); @@ -265,6 +268,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); struct inet_sock *inet = inet_sk(sk); + int tw_timeout = net->ipv4.sysctl_tw_timeout; tw->tw_transparent = inet->transparent; tw->tw_mark = sk->sk_mark; @@ -317,7 +321,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) timeo = rto; if (state == TCP_TIME_WAIT) - timeo = TCP_TIMEWAIT_LEN; + timeo = tw_timeout; /* tw_timer is pinned, so we need to make sure BH are disabled * in following section, otherwise timer handler could run before @@ -416,7 +420,7 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) rcu_read_lock(); ca = tcp_ca_find_key(ca_key); - if (likely(ca && try_module_get(ca->owner))) { + if (likely(ca && bpf_try_module_get(ca, ca->owner))) { icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); icsk->icsk_ca_ops = ca; ca_got_dst = true; @@ -427,7 +431,7 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) /* If no valid choice made yet, assign current system default ca. */ if (!ca_got_dst && (!icsk->icsk_ca_setsockopt || - !try_module_get(icsk->icsk_ca_ops->owner))) + !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) tcp_assign_congestion_control(sk); tcp_set_ca_state(sk, TCP_CA_Open); @@ -555,6 +559,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->fastopen_req = NULL; RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); + tcp_bpf_clone(sk, newsk); + __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); return newsk; @@ -803,8 +809,11 @@ embryonic_reset: tcp_reset(sk); } if (!fastopen) { - inet_csk_reqsk_queue_drop(sk, req); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); + bool unlinked = inet_csk_reqsk_queue_drop(sk, req); + + if (unlinked) + __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); + *req_stolen = !unlinked; } return NULL; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a9c34a4ef216..f9d3c5d3f867 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -687,7 +687,8 @@ static unsigned int tcp_synack_options(const struct sock *sk, unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, const struct tcp_md5sig_key *md5, - struct tcp_fastopen_cookie *foc) + struct tcp_fastopen_cookie *foc, + enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; @@ -702,7 +703,8 @@ static unsigned int tcp_synack_options(const struct sock *sk, * rather than TS in order to fit in better with old, * buggy kernels, but that was deemed to be unnecessary. */ - ireq->tstamp_ok &= !ireq->sack_ok; + if (synack_type != TCP_SYNACK_COOKIE) + ireq->tstamp_ok &= !ireq->sack_ok; } #endif @@ -1698,7 +1700,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) * window, and remember whether we were cwnd-limited then. */ if (!before(tp->snd_una, tp->max_packets_seq) || - tp->packets_out > tp->max_packets_out) { + tp->packets_out > tp->max_packets_out || + is_cwnd_limited) { tp->max_packets_out = tp->packets_out; tp->max_packets_seq = tp->snd_nxt; tp->is_cwnd_limited = is_cwnd_limited; @@ -2520,6 +2523,10 @@ repair: else tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); + is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); + if (likely(sent_pkts || is_cwnd_limited)) + tcp_cwnd_validate(sk, is_cwnd_limited); + if (likely(sent_pkts)) { if (tcp_in_cwnd_reduction(sk)) tp->prr_out += sent_pkts; @@ -2527,8 +2534,6 @@ repair: /* Send one loss probe per tail loss episode. */ if (push_one != 2) tcp_schedule_loss_probe(sk, false); - is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); - tcp_cwnd_validate(sk, is_cwnd_limited); return false; } return !tp->packets_out && !tcp_write_queue_empty(sk); @@ -2608,6 +2613,11 @@ void tcp_send_loss_probe(struct sock *sk) int pcount; int mss = tcp_current_mss(sk); + /* At most one outstanding TLP */ + if (tp->tlp_high_seq) + goto rearm_timer; + + tp->tlp_retrans = 0; skb = tcp_send_head(sk); if (skb && tcp_snd_wnd_test(tp, skb, mss)) { pcount = tp->packets_out; @@ -2625,10 +2635,6 @@ void tcp_send_loss_probe(struct sock *sk) return; } - /* At most one outstanding TLP retransmission. */ - if (tp->tlp_high_seq) - goto rearm_timer; - if (skb_still_in_host_queue(sk, skb)) goto rearm_timer; @@ -2650,10 +2656,12 @@ void tcp_send_loss_probe(struct sock *sk) if (__tcp_retransmit_skb(sk, skb, 1)) goto rearm_timer; + tp->tlp_retrans = 1; + +probe_sent: /* Record snd_nxt for loss detection. */ tp->tlp_high_seq = tp->snd_nxt; -probe_sent: NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); /* Reset s.t. tcp_rearm_rto will restart timer from now */ inet_csk(sk)->icsk_pending = 0; @@ -3372,7 +3380,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, #endif skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, - foc) + sizeof(*th); + foc, synack_type) + sizeof(*th); skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); @@ -3422,8 +3430,8 @@ static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) rcu_read_lock(); ca = tcp_ca_find_key(ca_key); - if (likely(ca && try_module_get(ca->owner))) { - module_put(icsk->icsk_ca_ops->owner); + if (likely(ca && bpf_try_module_get(ca, ca->owner))) { + bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); icsk->icsk_ca_ops = ca; } @@ -3873,6 +3881,7 @@ void tcp_send_probe0(struct sock *sk) /* Cancel probe timer, if it is not required. */ icsk->icsk_probes_out = 0; icsk->icsk_backoff = 0; + icsk->icsk_probes_tstamp = 0; return; } @@ -3887,6 +3896,8 @@ void tcp_send_probe0(struct sock *sk) */ timeout = TCP_RESOURCE_PROBE_INTERVAL; } + + timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout); tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL); } diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index fdb715bdd2d1..8757bb6cb1d9 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -110,13 +110,13 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) } } -void tcp_rack_mark_lost(struct sock *sk) +bool tcp_rack_mark_lost(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); u32 timeout; if (!tp->rack.advanced) - return; + return false; /* Reset the advanced flag to avoid unnecessary queue scanning */ tp->rack.advanced = 0; @@ -126,6 +126,7 @@ void tcp_rack_mark_lost(struct sock *sk) inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, timeout, inet_csk(sk)->icsk_rto); } + return !!timeout; } /* Record the most recently (re)sent time among the (s)acked packets diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 3c787dea6603..ee3bb8bb0e48 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -56,6 +56,24 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); } +u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + u32 remaining; + s32 elapsed; + + if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp) + return when; + + elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; + if (unlikely(elapsed < 0)) + elapsed = 0; + remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed; + remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN); + + return min_t(u32, remaining, when); +} + /** * tcp_write_err() - close socket and save error info * @sk: The socket the error has appeared on. @@ -112,9 +130,11 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) shift++; if (tcp_check_oom(sk, shift)) { + int tw_timeout = sock_net(sk)->ipv4.sysctl_tw_timeout; + /* Catch exceptional cases, when connection requires reset. * 1. Last segment was sent recently. */ - if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || + if ((s32)(tcp_jiffies32 - tp->lsndtime) <= tw_timeout || /* 2. Window is closed. */ (!tp->snd_wnd && !tp->packets_out)) do_reset = true; @@ -360,6 +380,7 @@ static void tcp_probe_timer(struct sock *sk) if (tp->packets_out || !skb) { icsk->icsk_probes_out = 0; + icsk->icsk_probes_tstamp = 0; return; } @@ -371,13 +392,12 @@ static void tcp_probe_timer(struct sock *sk) * corresponding system limit. We also implement similar policy when * we use RTO to probe window in tcp_retransmit_timer(). */ - if (icsk->icsk_user_timeout) { - u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out, - tcp_probe0_base(sk)); - - if (elapsed >= icsk->icsk_user_timeout) - goto abort; - } + if (!icsk->icsk_probes_tstamp) + icsk->icsk_probes_tstamp = tcp_jiffies32; + else if (icsk->icsk_user_timeout && + (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >= + msecs_to_jiffies(icsk->icsk_user_timeout)) + goto abort; max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; if (sock_flag(sk, SOCK_DEAD)) { @@ -686,7 +706,8 @@ static void tcp_keepalive_timer (struct timer_list *t) tcp_mstamp_refresh(tp); if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { if (tp->linger2 >= 0) { - const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; + int tw_timeout = sock_net(sk)->ipv4.sysctl_tw_timeout; + const int tmo = tcp_fin_time(sk) - tw_timeout; if (tmo > 0) { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 9168645b760e..30a0169a28f8 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -101,12 +101,6 @@ void tcp_update_ulp(struct sock *sk, struct proto *proto, { struct inet_connection_sock *icsk = inet_csk(sk); - if (!icsk->icsk_ulp_ops) { - sk->sk_write_space = write_space; - sk->sk_prot = proto; - return; - } - if (icsk->icsk_ulp_ops->update) icsk->icsk_ulp_ops->update(sk, proto, write_space); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6ad20c6b463d..33555ac41582 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -106,6 +106,7 @@ #include <net/xfrm.h> #include <trace/events/udp.h> #include <linux/static_key.h> +#include <linux/btf_ids.h> #include <trace/events/skb.h> #include <net/busy_poll.h> #include "udp_impl.h" @@ -407,6 +408,25 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr, udp_ehash_secret + net_hash_mix(net)); } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned short hnum) +{ + struct sock *reuse_sk = NULL; + u32 hash; + + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { + hash = udp_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + /* Fall back to scoring if group has connections */ + if (reuseport_has_conns(sk, false)) + return NULL; + } + return reuse_sk; +} + /* called with rcu_read_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, @@ -417,7 +437,6 @@ static struct sock *udp4_lib_lookup2(struct net *net, { struct sock *sk, *result; int score, badness; - u32 hash = 0; result = NULL; badness = 0; @@ -425,15 +444,11 @@ static struct sock *udp4_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { - if (sk->sk_reuseport && - sk->sk_state != TCP_ESTABLISHED) { - hash = udp_ehashfn(net, daddr, hnum, - saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; - } + result = lookup_reuseport(net, sk, skb, + saddr, sport, daddr, hnum); + if (result) + return result; + badness = score; result = sk; } @@ -441,6 +456,29 @@ static struct sock *udp4_lib_lookup2(struct net *net, return result; } +static inline struct sock *udp4_lookup_run_bpf(struct net *net, + struct udp_table *udptable, + struct sk_buff *skb, + __be32 saddr, __be16 sport, + __be32 daddr, u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (udptable != &udp_table) + return NULL; /* only UDP is supported */ + + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ @@ -448,27 +486,45 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, int sdif, struct udp_table *udptable, struct sk_buff *skb) { - struct sock *result; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2; struct udp_hslot *hslot2; + struct sock *result, *sk; hash2 = ipv4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; + /* Lookup connected or non-wildcard socket */ result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, sdif, hslot2, skb); - if (!result) { - hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) + goto done; - result = udp4_lib_lookup2(net, saddr, sport, - htonl(INADDR_ANY), hnum, dif, sdif, - hslot2, skb); + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + sk = udp4_lookup_run_bpf(net, udptable, skb, + saddr, sport, daddr, hnum); + if (sk) { + result = sk; + goto done; + } } + + /* Got non-wildcard socket or error on first lookup */ + if (result) + goto done; + + /* Lookup wildcard sockets */ + hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); + slot2 = hash2 & udptable->mask; + hslot2 = &udptable->hash2[slot2]; + + result = udp4_lib_lookup2(net, saddr, sport, + htonl(INADDR_ANY), hnum, dif, sdif, + hslot2, skb); +done: if (IS_ERR(result)) return NULL; return result; @@ -2047,7 +2103,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are @@ -2116,7 +2172,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); ret = udp_queue_rcv_one_skb(sk, skb); if (ret > 0) - ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret); + ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); } return 0; } @@ -2288,6 +2344,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct rtable *rt = skb_rtable(skb); __be32 saddr, daddr; struct net *net = dev_net(skb->dev); + bool refcounted; /* * Validate the packet. @@ -2313,7 +2370,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (udp4_csum_init(skb, uh, proto)) goto csum_error; - sk = skb_steal_sock(skb); + sk = skb_steal_sock(skb, &refcounted); if (sk) { struct dst_entry *dst = skb_dst(skb); int ret; @@ -2322,7 +2379,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, udp_sk_rx_dst_set(sk, dst); ret = udp_unicast_rcv_skb(sk, skb, uh); - sock_put(sk); + if (refcounted) + sock_put(sk); return ret; } @@ -2494,7 +2552,8 @@ int udp_v4_early_demux(struct sk_buff *skb) */ if (!inet_sk(sk)->inet_daddr && in_dev) return ip_mc_validate_source(skb, iph->daddr, - iph->saddr, iph->tos, + iph->saddr, + iph->tos & IPTOS_RT_MASK, skb->dev, in_dev, &itag); } return 0; @@ -2690,6 +2749,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, val = up->gso_size; break; + case UDP_GRO: + val = up->gro_enabled; + break; + /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: @@ -2814,10 +2877,15 @@ EXPORT_SYMBOL(udp_prot); static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + for (state->bucket = start; state->bucket <= afinfo->udp_table->mask; ++state->bucket) { struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket]; @@ -2829,7 +2897,8 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) sk_for_each(sk, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) goto found; } spin_unlock_bh(&hslot->lock); @@ -2843,13 +2912,20 @@ found: static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + do { sk = sk_next(sk); - } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family)); + } while (sk && (!net_eq(sock_net(sk), net) || + (afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family))); if (!sk) { if (state->bucket <= afinfo->udp_table->mask) @@ -2894,9 +2970,14 @@ EXPORT_SYMBOL(udp_seq_next); void udp_seq_stop(struct seq_file *seq, void *v) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (state->bucket <= afinfo->udp_table->mask) spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); } @@ -2940,6 +3021,67 @@ int udp4_seq_show(struct seq_file *seq, void *v) return 0; } +#ifdef CONFIG_BPF_SYSCALL +struct bpf_iter__udp { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct udp_sock *, udp_sk); + uid_t uid __aligned(8); + int bucket __aligned(8); +}; + +static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, + struct udp_sock *udp_sk, uid_t uid, int bucket) +{ + struct bpf_iter__udp ctx; + + meta->seq_num--; /* skip SEQ_START_TOKEN */ + ctx.meta = meta; + ctx.udp_sk = udp_sk; + ctx.uid = uid; + ctx.bucket = bucket; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v) +{ + struct udp_iter_state *state = seq->private; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + struct sock *sk = v; + uid_t uid; + + if (v == SEQ_START_TOKEN) + return 0; + + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + return udp_prog_seq_show(prog, &meta, v, uid, state->bucket); +} + +static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + if (!v) { + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog) + (void)udp_prog_seq_show(prog, &meta, v, 0, 0); + } + + udp_seq_stop(seq, v); +} + +static const struct seq_operations bpf_iter_udp_seq_ops = { + .start = udp_seq_start, + .next = udp_seq_next, + .stop = bpf_iter_udp_seq_stop, + .show = bpf_iter_udp_seq_show, +}; +#endif + const struct seq_operations udp_seq_ops = { .start = udp_seq_start, .next = udp_seq_next, @@ -3057,6 +3199,62 @@ static struct pernet_operations __net_initdata udp_sysctl_ops = { .init = udp_sysctl_init, }; +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, + struct udp_sock *udp_sk, uid_t uid, int bucket) + +static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) +{ + struct udp_iter_state *st = priv_data; + struct udp_seq_afinfo *afinfo; + int ret; + + afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); + if (!afinfo) + return -ENOMEM; + + afinfo->family = AF_UNSPEC; + afinfo->udp_table = &udp_table; + st->bpf_seq_afinfo = afinfo; + ret = bpf_iter_init_seq_net(priv_data, aux); + if (ret) + kfree(afinfo); + return ret; +} + +static void bpf_iter_fini_udp(void *priv_data) +{ + struct udp_iter_state *st = priv_data; + + kfree(st->bpf_seq_afinfo); + bpf_iter_fini_seq_net(priv_data); +} + +static const struct bpf_iter_seq_info udp_seq_info = { + .seq_ops = &bpf_iter_udp_seq_ops, + .init_seq_private = bpf_iter_init_udp, + .fini_seq_private = bpf_iter_fini_udp, + .seq_priv_size = sizeof(struct udp_iter_state), +}; + +static struct bpf_iter_reg udp_reg_info = { + .target = "udp", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__udp, udp_sk), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &udp_seq_info, +}; + +static void __init bpf_iter_register(void) +{ + udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP]; + if (bpf_iter_reg_target(&udp_reg_info)) + pr_warn("Warning: could not register bpf iterator udp\n"); +} +#endif + void __init udp_init(void) { unsigned long limit; @@ -3082,4 +3280,8 @@ void __init udp_init(void) if (register_pernet_subsys(&udp_sysctl_ops)) panic("UDP: failed to init sysctl parameters.\n"); + +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + bpf_iter_register(); +#endif } diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c new file mode 100644 index 000000000000..eddd973e6575 --- /dev/null +++ b/net/ipv4/udp_bpf.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Cloudflare Ltd https://cloudflare.com */ + +#include <linux/skmsg.h> +#include <net/sock.h> +#include <net/udp.h> + +enum { + UDP_BPF_IPV4, + UDP_BPF_IPV6, + UDP_BPF_NUM_PROTS, +}; + +static struct proto *udpv6_prot_saved __read_mostly; +static DEFINE_SPINLOCK(udpv6_prot_lock); +static struct proto udp_bpf_prots[UDP_BPF_NUM_PROTS]; + +static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base) +{ + *prot = *base; + prot->unhash = sock_map_unhash; + prot->close = sock_map_close; +} + +static void udp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops) +{ + if (sk->sk_family == AF_INET6 && + unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) { + spin_lock_bh(&udpv6_prot_lock); + if (likely(ops != udpv6_prot_saved)) { + udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV6], ops); + smp_store_release(&udpv6_prot_saved, ops); + } + spin_unlock_bh(&udpv6_prot_lock); + } +} + +static int __init udp_bpf_v4_build_proto(void) +{ + udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot); + return 0; +} +core_initcall(udp_bpf_v4_build_proto); + +struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock) +{ + int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6; + + if (!psock->sk_proto) + udp_bpf_check_v6_needs_rebuild(sk, READ_ONCE(sk->sk_prot)); + + return &udp_bpf_prots[family]; +} diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index dccd2286bc28..1dbece34496e 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -21,16 +21,15 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, if (!inet_diag_bc_sk(bc, sk)) return 0; - return inet_sk_diag_fill(sk, NULL, skb, req, - sk_user_ns(NETLINK_CB(cb->skb).sk), - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin); + return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI, + net_admin); } -static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, - const struct nlmsghdr *nlh, +static int udp_dump_one(struct udp_table *tbl, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; int err = -EINVAL; struct sock *sk = NULL; struct sk_buff *rep; @@ -71,11 +70,8 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, if (!rep) goto out; - err = inet_sk_diag_fill(sk, NULL, rep, req, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, - netlink_net_capable(in_skb, CAP_NET_ADMIN)); + err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0, + netlink_net_capable(in_skb, CAP_NET_ADMIN)); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(rep); @@ -94,12 +90,16 @@ out_nosk: static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; int num, s_num, slot, s_slot; + struct nlattr *bc; + cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; s_slot = cb->args[0]; num = s_num = cb->args[1]; @@ -147,15 +147,15 @@ done: } static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - udp_dump(&udp_table, skb, cb, r, bc); + udp_dump(&udp_table, skb, cb, r); } -static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, +static int udp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return udp_dump_one(&udp_table, in_skb, nlh, req); + return udp_dump_one(&udp_table, cb, req); } static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, @@ -250,16 +250,15 @@ static const struct inet_diag_handler udp_diag_handler = { }; static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc) + const struct inet_diag_req_v2 *r) { - udp_dump(&udplite_table, skb, cb, r, bc); + udp_dump(&udplite_table, skb, cb, r); } -static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, +static int udplite_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - return udp_dump_one(&udplite_table, in_skb, nlh, req); + return udp_dump_one(&udplite_table, cb, req); } static const struct inet_diag_handler udplite_diag_handler = { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index a3908e55ed89..c463eebdc8fe 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -349,7 +349,7 @@ out: static struct sk_buff *udp_gro_receive_segment(struct list_head *head, struct sk_buff *skb) { - struct udphdr *uh = udp_hdr(skb); + struct udphdr *uh = udp_gro_udphdr(skb); struct sk_buff *pp = NULL; struct udphdr *uh2; struct sk_buff *p; @@ -426,7 +426,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, } if (NAPI_GRO_CB(skb)->encap_mark || - (skb->ip_summed != CHECKSUM_PARTIAL && + (uh->check && skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid) || !udp_sk(sk)->gro_receive) diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index ecff3fce9807..ab343ae686d4 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -58,9 +58,7 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); -#ifdef CONFIG_NETFILTER IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; -#endif return xfrm_output(sk, skb); } diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index ae1344e4cec5..dce14470b15a 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -289,6 +289,7 @@ config IPV6_SEG6_LWTUNNEL config IPV6_SEG6_HMAC bool "IPv6: Segment Routing HMAC support" depends on IPV6 + select CRYPTO select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6e1d200f30c1..52feab2baeee 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2452,8 +2452,9 @@ static void addrconf_add_mroute(struct net_device *dev) .fc_ifindex = dev->ifindex, .fc_dst_len = 8, .fc_flags = RTF_UP, - .fc_type = RTN_UNICAST, + .fc_type = RTN_MULTICAST, .fc_nlinfo.nl_net = dev_net(dev), + .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); @@ -4984,8 +4985,10 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, return -EMSGSIZE; if (args->netnsid >= 0 && - nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) { + nlmsg_cancel(skb, nlh); return -EMSGSIZE; + } put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 || @@ -5016,8 +5019,10 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, return -EMSGSIZE; if (args->netnsid >= 0 && - nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) { + nlmsg_cancel(skb, nlh); return -EMSGSIZE; + } put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 || diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 642fc6ac13d2..8a22486cf270 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -306,7 +306,9 @@ static int ip6addrlbl_del(struct net *net, /* add default label */ static int __net_init ip6addrlbl_net_init(struct net *net) { - int err = 0; + struct ip6addrlbl_entry *p = NULL; + struct hlist_node *n; + int err; int i; ADDRLABEL(KERN_DEBUG "%s\n", __func__); @@ -315,14 +317,20 @@ static int __net_init ip6addrlbl_net_init(struct net *net) INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head); for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { - int ret = ip6addrlbl_add(net, - ip6addrlbl_init_table[i].prefix, - ip6addrlbl_init_table[i].prefixlen, - 0, - ip6addrlbl_init_table[i].label, 0); - /* XXX: should we free all rules when we catch an error? */ - if (ret && (!err || err != -ENOMEM)) - err = ret; + err = ip6addrlbl_add(net, + ip6addrlbl_init_table[i].prefix, + ip6addrlbl_init_table[i].prefixlen, + 0, + ip6addrlbl_init_table[i].label, 0); + if (err) + goto err_ip6addrlbl_add; + } + return 0; + +err_ip6addrlbl_add: + hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { + hlist_del_rcu(&p->list); + kfree_rcu(p, rcu); } return err; } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 7a976aad30b9..650e49104413 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -59,6 +59,7 @@ #endif #include <net/calipso.h> #include <net/seg6.h> +#include <net/rawv6.h> #include <linux/uaccess.h> #include <linux/mroute6.h> @@ -273,7 +274,7 @@ out_rcu_unlock: } static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, - bool force_bind_address_no_port, bool with_lock) + u32 flags) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr; struct inet_sock *inet = inet_sk(sk); @@ -297,7 +298,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; - if (with_lock) + if (flags & BIND_WITH_LOCK) lock_sock(sk); /* Check these errors (active socket, double bind). */ @@ -400,18 +401,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, /* Make sure we are allowed to bind here. */ if (snum || !(inet->bind_address_no_port || - force_bind_address_no_port)) { + (flags & BIND_FORCE_ADDRESS_NO_PORT))) { if (sk->sk_prot->get_port(sk, snum)) { sk->sk_ipv6only = saved_ipv6only; inet_reset_saddr(sk); err = -EADDRINUSE; goto out; } - err = BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk); - if (err) { - sk->sk_ipv6only = saved_ipv6only; - inet_reset_saddr(sk); - goto out; + if (!(flags & BIND_FROM_BPF)) { + err = BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk); + if (err) { + sk->sk_ipv6only = saved_ipv6only; + inet_reset_saddr(sk); + goto out; + } } } @@ -423,7 +426,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, inet->inet_dport = 0; inet->inet_daddr = 0; out: - if (with_lock) + if (flags & BIND_WITH_LOCK) release_sock(sk); return err; out_unlock: @@ -451,7 +454,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (err) return err; - return __inet6_bind(sk, uaddr, addr_len, false, true); + return __inet6_bind(sk, uaddr, addr_len, BIND_WITH_LOCK); } EXPORT_SYMBOL(inet6_bind); @@ -502,9 +505,8 @@ EXPORT_SYMBOL_GPL(inet6_destroy_sock); /* * This does both peername and sockname. */ - int inet6_getname(struct socket *sock, struct sockaddr *uaddr, - int peer) + int peer) { struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; @@ -529,9 +531,13 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, sin->sin6_addr = np->saddr; else sin->sin6_addr = sk->sk_v6_rcv_saddr; - sin->sin6_port = inet->inet_sport; } + if (cgroup_bpf_enabled) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin, + peer ? BPF_CGROUP_INET6_GETPEERNAME : + BPF_CGROUP_INET6_GETSOCKNAME, + NULL); sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr, sk->sk_bound_dev_if); return sizeof(*sin); @@ -987,6 +993,8 @@ static int __init inet6_init(void) goto out; } + raw_hashinfo_init(&raw_v6_hashinfo); + err = proto_register(&tcpv6_prot, 1); if (err) goto out; diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 95835e8d99aa..1c5ecd07a43e 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -588,7 +588,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); memset(ah->auth_data, 0, ahp->icv_trunc_len); - if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) + err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN); + if (err) goto out_free; ip6h->priority = 0; diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index fed91ab7ec46..cf3a88a10ddd 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } -void ipv6_sock_ac_close(struct sock *sk) +void __ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; @@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk) struct net *net = sock_net(sk); int prev_index; - if (!np->ipv6_ac_list) - return; - - rtnl_lock(); + ASSERT_RTNL(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; @@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } +} + +void ipv6_sock_ac_close(struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + if (!np->ipv6_ac_list) + return; + rtnl_lock(); + __ipv6_sock_ac_close(sk); rtnl_unlock(); } diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 221c81f85cbf..7426e33686d1 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -83,6 +83,9 @@ struct calipso_map_cache_entry { static struct calipso_map_cache_bkt *calipso_cache; +static void calipso_cache_invalidate(void); +static void calipso_doi_putdef(struct calipso_doi *doi_def); + /* Label Mapping Cache Functions */ @@ -444,15 +447,10 @@ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info) ret_val = -ENOENT; goto doi_remove_return; } - if (!refcount_dec_and_test(&doi_def->refcount)) { - spin_unlock(&calipso_doi_list_lock); - ret_val = -EBUSY; - goto doi_remove_return; - } list_del_rcu(&doi_def->list); spin_unlock(&calipso_doi_list_lock); - call_rcu(&doi_def->rcu, calipso_doi_free_rcu); + calipso_doi_putdef(doi_def); ret_val = 0; doi_remove_return: @@ -508,10 +506,8 @@ static void calipso_doi_putdef(struct calipso_doi *doi_def) if (!refcount_dec_and_test(&doi_def->refcount)) return; - spin_lock(&calipso_doi_list_lock); - list_del_rcu(&doi_def->list); - spin_unlock(&calipso_doi_list_lock); + calipso_cache_invalidate(); call_rcu(&doi_def->rcu, calipso_doi_free_rcu); } @@ -1047,7 +1043,8 @@ static int calipso_opt_getattr(const unsigned char *calipso, goto getattr_return; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } secattr->type = NETLBL_NLTYPE_CALIPSO; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index a3b403ba8f8f..79f117e33b80 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -226,11 +226,15 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { u8 *tail; - u8 *vaddr; int nfrags; struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; + unsigned int allocsz; + + allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); + if (allocsz > ESP_SKB_FRAG_MAXSIZE) + goto cow; if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { @@ -259,14 +263,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info page = pfrag->page; get_page(page); - vaddr = kmap_atomic(page); - - tail = vaddr + pfrag->offset; + tail = page_address(page) + pfrag->offset; esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); - kunmap_atomic(vaddr); - nfrags = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index fd535053245b..1c532638b2ad 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -85,10 +85,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head, sp->olen++; xo = xfrm_offload(skb); - if (!xo) { - xfrm_state_put(x); + if (!xo) goto out_reset; - } } xo->flags |= XFRM_GRO; @@ -123,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) struct ip_esp_hdr *esph; struct ipv6hdr *iph = ipv6_hdr(skb); struct xfrm_offload *xo = xfrm_offload(skb); - int proto = iph->nexthdr; + u8 proto = iph->nexthdr; skb_push(skb, -skb_network_offset(skb)); + + if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) { + __be16 frag; + + ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag); + } + esph = ip_esp_hdr(skb); *skb_mac_header(skb) = IPPROTO_ESP; @@ -205,9 +210,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, skb->encap_hdr_csum = 1; if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) - esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC); else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) - esp_features = features & ~NETIF_F_CSUM_MASK; + esp_features = features & ~(NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC); xo->flags |= XFRM_GSO_SEGMENT; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 62c997201970..3db10cae7b17 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -158,7 +158,13 @@ static bool is_ineligible(const struct sk_buff *skb) tp = skb_header_pointer(skb, ptr+offsetof(struct icmp6hdr, icmp6_type), sizeof(_type), &_type); - if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) + + /* Based on RFC 8200, Section 4.5 Fragment Header, return + * false if this is a fragment packet with no icmp header info. + */ + if (!tp && frag_off != 0) + return false; + else if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) return true; } return false; @@ -306,10 +312,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st } #if IS_ENABLED(CONFIG_IPV6_MIP6) -static void mip6_addr_swap(struct sk_buff *skb) +static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) { struct ipv6hdr *iph = ipv6_hdr(skb); - struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; int off; @@ -326,7 +331,7 @@ static void mip6_addr_swap(struct sk_buff *skb) } } #else -static inline void mip6_addr_swap(struct sk_buff *skb) {} +static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {} #endif static struct dst_entry *icmpv6_route_lookup(struct net *net, @@ -420,8 +425,9 @@ static int icmp6_iif(const struct sk_buff *skb) /* * Send an ICMP message in response to a packet in error */ -static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm) { struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -514,7 +520,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type)) goto out_bh_enable; - mip6_addr_swap(skb); + mip6_addr_swap(skb, parm); memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; @@ -535,7 +541,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, if (!sk) goto out_bh_enable; - sk->sk_mark = mark; np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl6)) @@ -552,6 +557,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, fl6.flowi6_oif = np->ucast_oif; ipcm6_init_sk(&ipc6, np); + ipc6.sockc.mark = mark; fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = icmpv6_route_lookup(net, skb, sk, &fl6); @@ -594,12 +600,13 @@ out: out_bh_enable: local_bh_enable(); } +EXPORT_SYMBOL(icmp6_send); /* Slightly more convenient version of icmp6_send. */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { - icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); + icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb)); kfree_skb(skb); } @@ -656,10 +663,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, } if (type == ICMP_TIME_EXCEEDED) icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, - info, &temp_saddr); + info, &temp_saddr, IP6CB(skb2)); else icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, - info, &temp_saddr); + info, &temp_saddr, IP6CB(skb2)); if (rt) ip6_rt_put(rt); @@ -720,7 +727,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) sk = icmpv6_xmit_lock(net); if (!sk) goto out_bh_enable; - sk->sk_mark = mark; np = inet6_sk(sk); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) @@ -748,6 +754,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) ipcm6_init_sk(&ipc6, np); ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); + ipc6.sockc.mark = mark; if (ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index fbe9d4295eac..2d3add9e6116 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -21,6 +21,8 @@ #include <net/ip.h> #include <net/sock_reuseport.h> +extern struct inet_hashinfo tcp_hashinfo; + u32 inet6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) @@ -111,6 +113,23 @@ static inline int compute_score(struct sock *sk, struct net *net, return score; } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, int doff, + const struct in6_addr *saddr, + __be16 sport, + const struct in6_addr *daddr, + unsigned short hnum) +{ + struct sock *reuse_sk = NULL; + u32 phash; + + if (sk->sk_reuseport) { + phash = inet6_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, phash, skb, doff); + } + return reuse_sk; +} + /* called with rcu_read_lock() */ static struct sock *inet6_lhash2_lookup(struct net *net, struct inet_listen_hashbucket *ilb2, @@ -123,21 +142,17 @@ static struct sock *inet6_lhash2_lookup(struct net *net, struct inet_connection_sock *icsk; struct sock *sk, *result = NULL; int score, hiscore = 0; - u32 phash = 0; inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { sk = (struct sock *)icsk; score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif); if (score > hiscore) { - if (sk->sk_reuseport) { - phash = inet6_ehashfn(net, daddr, hnum, - saddr, sport); - result = reuseport_select_sock(sk, phash, - skb, doff); - if (result) - return result; - } + result = lookup_reuseport(net, sk, skb, doff, + saddr, sport, daddr, hnum); + if (result) + return result; + result = sk; hiscore = score; } @@ -146,6 +161,31 @@ static struct sock *inet6_lhash2_lookup(struct net *net, return result; } +static inline struct sock *inet6_lookup_run_bpf(struct net *net, + struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (hashinfo != &tcp_hashinfo) + return NULL; /* only TCP is supported */ + + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -157,6 +197,14 @@ struct sock *inet6_lookup_listener(struct net *net, struct sock *result = NULL; unsigned int hash2; + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, + saddr, sport, daddr, hnum); + if (result) + goto done; + } + hash2 = ipv6_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index c75274e0745c..e392941e5738 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -613,7 +613,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) if (arg.filter.table_id) { tb = fib6_get_table(net, arg.filter.table_id); if (!tb) { - if (arg.filter.dump_all_families) + if (rtnl_msg_family(cb->nlh) != PF_INET6) goto out; NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist"); @@ -973,6 +973,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, { struct fib6_table *table = rt->fib6_table; + /* Flush all cached dst in exception table */ + rt6_flush_exceptions(rt); fib6_drop_pcpu_from(rt, table); if (rt->nh && !list_empty(&rt->nh_list)) @@ -1839,9 +1841,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, net->ipv6.rt6_stats->fib_rt_entries--; net->ipv6.rt6_stats->fib_discarded_routes++; - /* Flush all cached dst in exception table */ - rt6_flush_exceptions(rt); - /* Reset round-robin state, if necessary */ if (rcu_access_pointer(fn->rr_ptr) == rt) fn->rr_ptr = NULL; @@ -1896,14 +1895,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, /* Need to own table->tb6_lock */ int fib6_del(struct fib6_info *rt, struct nl_info *info) { - struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, - lockdep_is_held(&rt->fib6_table->tb6_lock)); - struct fib6_table *table = rt->fib6_table; struct net *net = info->nl_net; struct fib6_info __rcu **rtp; struct fib6_info __rcu **rtp_next; + struct fib6_table *table; + struct fib6_node *fn; - if (!fn || rt == net->ipv6.fib6_null_entry) + if (rt == net->ipv6.fib6_null_entry) + return -ENOENT; + + table = rt->fib6_table; + fn = rcu_dereference_protected(rt->fib6_node, + lockdep_is_held(&table->tb6_lock)); + if (!fn) return -ENOENT; WARN_ON(!(fn->fn_flags & RTN_RTINFO)); @@ -2369,7 +2373,7 @@ void fib6_gc_cleanup(void) } #ifdef CONFIG_PROC_FS -static int ipv6_route_seq_show(struct seq_file *seq, void *v) +static int ipv6_route_native_seq_show(struct seq_file *seq, void *v) { struct fib6_info *rt = v; struct ipv6_route_iter *iter = seq->private; @@ -2378,7 +2382,7 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v) const struct net_device *dev; if (rt->nh) - fib6_nh = nexthop_fib6_nh(rt->nh); + fib6_nh = nexthop_fib6_nh_bh(rt->nh); seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen); @@ -2474,14 +2478,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; + ++(*pos); if (!v) goto iter_table; n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); - if (n) { - ++*pos; + if (n) return n; - } iter_table: ipv6_route_check_sernum(iter); @@ -2489,8 +2492,6 @@ iter_table: r = fib6_walk_continue(&iter->w); spin_unlock_bh(&iter->tbl->tb6_lock); if (r > 0) { - if (v) - ++*pos; return iter->w.leaf; } else if (r < 0) { fib6_walker_unlink(net, &iter->w); @@ -2517,8 +2518,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) iter->skip = *pos; if (iter->tbl) { + loff_t p = 0; + ipv6_route_seq_setup_walk(iter, net); - return ipv6_route_seq_next(seq, NULL, pos); + return ipv6_route_seq_next(seq, NULL, &p); } else { return NULL; } @@ -2530,7 +2533,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) return w->node && !(w->state == FWS_U && w->node == w->root); } -static void ipv6_route_seq_stop(struct seq_file *seq, void *v) +static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v) __releases(RCU_BH) { struct net *net = seq_file_net(seq); @@ -2542,6 +2545,62 @@ static void ipv6_route_seq_stop(struct seq_file *seq, void *v) rcu_read_unlock_bh(); } +#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) +static int ipv6_route_prog_seq_show(struct bpf_prog *prog, + struct bpf_iter_meta *meta, + void *v) +{ + struct bpf_iter__ipv6_route ctx; + + ctx.meta = meta; + ctx.rt = v; + return bpf_iter_run_prog(prog, &ctx); +} + +static int ipv6_route_seq_show(struct seq_file *seq, void *v) +{ + struct ipv6_route_iter *iter = seq->private; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int ret; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + if (!prog) + return ipv6_route_native_seq_show(seq, v); + + ret = ipv6_route_prog_seq_show(prog, &meta, v); + iter->w.leaf = NULL; + + return ret; +} + +static void ipv6_route_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + if (!v) { + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog) + (void)ipv6_route_prog_seq_show(prog, &meta, v); + } + + ipv6_route_native_seq_stop(seq, v); +} +#else +static int ipv6_route_seq_show(struct seq_file *seq, void *v) +{ + return ipv6_route_native_seq_show(seq, v); +} + +static void ipv6_route_seq_stop(struct seq_file *seq, void *v) +{ + ipv6_route_native_seq_stop(seq, v); +} +#endif + const struct seq_operations ipv6_route_seq_ops = { .start = ipv6_route_seq_start, .next = ipv6_route_seq_next, diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 9ec05a1df5e1..e4a43a8941c8 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, gre_proto == htons(ETH_P_ERSPAN2)) ? ARPHRD_ETHER : ARPHRD_IP6GRE; int score, cand_score = 4; + struct net_device *ndev; for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { if (!ipv6_addr_equal(local, &t->parms.laddr) || @@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, if (t && t->dev->flags & IFF_UP) return t; - dev = ign->fb_tunnel_dev; - if (dev && dev->flags & IFF_UP) - return netdev_priv(dev); + ndev = READ_ONCE(ign->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) ip6gre_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink(ign, t); + if (ign->fb_tunnel_dev == dev) + WRITE_ONCE(ign->fb_tunnel_dev, NULL); dst_cache_reset(&t->dst_cache); dev_put(dev); } @@ -1117,8 +1120,13 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, return; if (rt->dst.dev) { - dev->needed_headroom = rt->dst.dev->hard_header_len + - t_hlen; + unsigned short dst_len = rt->dst.dev->hard_header_len + + t_hlen; + + if (t->dev->header_ops) + dev->hard_header_len = dst_len; + else + dev->needed_headroom = dst_len; if (set_mtu) { dev->mtu = rt->dst.dev->mtu - t_hlen; @@ -1143,7 +1151,12 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); - tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; + + if (tunnel->dev->header_ops) + tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; + else + tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen; + return t_hlen; } @@ -1557,17 +1570,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) static int __net_init ip6gre_init_net(struct net *net) { struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); + struct net_device *ndev; int err; if (!net_has_fallback_tunnels(net)) return 0; - ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", - NET_NAME_UNKNOWN, - ip6gre_tunnel_setup); - if (!ign->fb_tunnel_dev) { + ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", + NET_NAME_UNKNOWN, ip6gre_tunnel_setup); + if (!ndev) { err = -ENOMEM; goto err_alloc_dev; } + ign->fb_tunnel_dev = ndev; dev_net_set(ign->fb_tunnel_dev, net); /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. @@ -1587,7 +1601,7 @@ static int __net_init ip6gre_init_net(struct net *net) return 0; err_reg_dev: - free_netdev(ign->fb_tunnel_dev); + free_netdev(ndev); err_alloc_dev: return err; } diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index 02045494c24c..9e3574880cb0 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -9,6 +9,8 @@ #if IS_ENABLED(CONFIG_IPV6) +#if !IS_BUILTIN(CONFIG_IPV6) + static ip6_icmp_send_t __rcu *ip6_icmp_send; int inet6_register_icmp_sender(ip6_icmp_send_t *fn) @@ -31,18 +33,52 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) } EXPORT_SYMBOL(inet6_unregister_icmp_sender); -void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) { ip6_icmp_send_t *send; rcu_read_lock(); send = rcu_dereference(ip6_icmp_send); - - if (!send) - goto out; - send(skb, type, code, info, NULL); -out: + if (send) + send(skb, type, code, info, NULL, parm); rcu_read_unlock(); } -EXPORT_SYMBOL(icmpv6_send); +EXPORT_SYMBOL(__icmpv6_send); +#endif + +#if IS_ENABLED(CONFIG_NF_NAT) +#include <net/netfilter/nf_conntrack.h> +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct in6_addr orig_ip; + struct nf_conn *ct; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + __icmpv6_send(skb_in, type, code, info, &parm); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct ipv6hdr)))) + goto out; + + orig_ip = ipv6_hdr(skb_in)->saddr; + ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; + __icmpv6_send(skb_in, type, code, info, &parm); + ipv6_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +EXPORT_SYMBOL(icmpv6_ndo_send); +#endif #endif diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index abf66ee02754..7dcac77e37a4 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -229,16 +229,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; - /* While RFC4291 is not explicit about v4mapped addresses - * in IPv6 headers, it seems clear linux dual-stack - * model can not deal properly with these. - * Security models could be fooled by ::ffff:127.0.0.1 for example. - * - * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02 - */ - if (ipv6_addr_v4mapped(&hdr->saddr)) - goto err; - skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); @@ -269,7 +259,8 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, rcu_read_unlock(); /* Must drop socket now because of tproxy. */ - skb_orphan(skb); + if (!skb_sk_is_prefetched(skb)) + skb_orphan(skb); return skb; err: diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f93d3bcbdfbe..49abde2e690e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -141,8 +141,43 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * return -EINVAL; } +static int +ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk, + struct sk_buff *skb, unsigned int mtu) +{ + struct sk_buff *segs, *nskb; + netdev_features_t features; + int ret = 0; + + /* Please see corresponding comment in ip_finish_output_gso + * describing the cases where GSO segment length exceeds the + * egress MTU. + */ + features = netif_skb_features(skb); + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); + if (IS_ERR_OR_NULL(segs)) { + kfree_skb(skb); + return -ENOMEM; + } + + consume_skb(skb); + + skb_list_walk_safe(segs, segs, nskb) { + int err; + + skb_mark_not_on_list(segs); + err = ip6_fragment(net, sk, segs, ip6_finish_output2); + if (err && ret == 0) + ret = err; + } + + return ret; +} + static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { + unsigned int mtu; + #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { @@ -151,7 +186,11 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff } #endif - if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || + mtu = ip6_skb_dst_mtu(skb); + if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)) + return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu); + + if ((skb->len > mtu && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)) || (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) return ip6_fragment(net, sk, skb, ip6_finish_output2); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b5dd20c4599b..69799b612ee8 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -860,7 +860,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, struct metadata_dst *tun_dst, bool log_ecn_err) { - return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate, + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, + const struct ipv6hdr *ipv6h, + struct sk_buff *skb); + + dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; + if (tpi->proto == htons(ETH_P_IP)) + dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; + + return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, log_ecn_err); } EXPORT_SYMBOL(ip6_tnl_rcv); @@ -2209,6 +2217,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head t = rtnl_dereference(t->next); } } + + t = rtnl_dereference(ip6n->tnls_wc[0]); + while (t) { + /* If dev is in the same netns, it has already + * been added to the list by the previous loop. + */ + if (!net_eq(dev_net(t->dev), net)) + unregister_netdevice_queue(t->dev, list); + t = rtnl_dereference(t->next); + } } static int __net_init ip6_tnl_init_net(struct net *net) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 857a89ad4d6c..dd41313d7fa5 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2498,7 +2498,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); if (!mrt) { - if (filter.dump_all_families) + if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR) return skb->len; NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist"); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index debdaeba5d8c..5352c7e68c42 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -188,10 +188,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = -EBUSY; break; } - break; } else { break; } + if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; @@ -205,6 +205,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); + __ipv6_sock_ac_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index eaa4c2cc2fbb..c875c9b6edbe 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2618,6 +2618,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) idev->mc_list = i->next; write_unlock_bh(&idev->lock); + ip6_mc_clear_src(i); ma_put(i); write_lock_bh(&idev->lock); } diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c index d3d6b6a66e5f..04d5fcdfa6e0 100644 --- a/net/ipv6/mcast_snoop.c +++ b/net/ipv6/mcast_snoop.c @@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb) struct mld_msg *mld; if (!ipv6_mc_may_pull(skb, len)) - return -EINVAL; + return -ENODATA; mld = (struct mld_msg *)skb_transport_header(skb); @@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb) case ICMPV6_MGM_QUERY: return ipv6_mc_check_mld_query(skb); default: - return -ENOMSG; + return -ENODATA; } } @@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb) return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo); } -int ipv6_mc_check_icmpv6(struct sk_buff *skb) +static int ipv6_mc_check_icmpv6(struct sk_buff *skb) { unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr); unsigned int transport_len = ipv6_transport_len(skb); @@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb) return 0; } -EXPORT_SYMBOL(ipv6_mc_check_icmpv6); /** * ipv6_mc_check_mld - checks whether this is a sane MLD packet @@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6); * * -EINVAL: A broken packet was detected, i.e. it violates some internet * standard - * -ENOMSG: IP header validation succeeded but it is not an MLD packet. + * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet + * with a hop-by-hop option. + * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded + * but it is not an MLD packet. * -ENOMEM: A memory allocation failure happened. * * Caller needs to set the skb network header and free any returned skb if it diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 53caf59c591e..118e19cabb72 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -81,6 +81,7 @@ static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb); static int pndisc_constructor(struct pneigh_entry *n); static void pndisc_destructor(struct pneigh_entry *n); static void pndisc_redo(struct sk_buff *skb); +static int ndisc_is_multicast(const void *pkey); static const struct neigh_ops ndisc_generic_ops = { .family = AF_INET6, @@ -115,6 +116,7 @@ struct neigh_table nd_tbl = { .pconstructor = pndisc_constructor, .pdestructor = pndisc_destructor, .proxy_redo = pndisc_redo, + .is_multicast = ndisc_is_multicast, .allow_add = ndisc_allow_add, .id = "ndisc_cache", .parms = { @@ -1705,6 +1707,11 @@ static void pndisc_redo(struct sk_buff *skb) kfree_skb(skb); } +static int ndisc_is_multicast(const void *pkey) +{ + return ipv6_addr_is_multicast((struct in6_addr *)pkey); +} + static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb) { struct inet6_dev *idev = __in6_dev_get(skb->dev); diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 409e79b84a83..ab9a279dd6d4 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -20,10 +20,10 @@ #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #include "../bridge/br_private.h" -int ip6_route_me_harder(struct net *net, struct sk_buff *skb) +int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); - struct sock *sk = sk_to_full_sk(skb->sk); + struct sock *sk = sk_to_full_sk(sk_partial); unsigned int hh_len; struct dst_entry *dst; int strict = (ipv6_addr_type(&iph->daddr) & @@ -84,7 +84,7 @@ static int nf_ip6_reroute(struct sk_buff *skb, if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) - return ip6_route_me_harder(entry->state.net, skb); + return ip6_route_me_harder(entry->state.net, entry->state.sk, skb); } return 0; } @@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = { .route_input = ip6_route_input, .fragment = ip6_fragment, .reroute = nf_ip6_reroute, -#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) - .br_defrag = nf_ct_frag6_gather, -#endif #if IS_MODULE(CONFIG_IPV6) .br_fragment = br_ip6_fragment, #endif diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index c973ace208c5..8bb543b0e775 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1445,6 +1445,8 @@ translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; + memset(newinfo->entries, 0, size); + newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = compatr->hook_entry[i]; diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 070afb97fa2b..401e8dcb2c84 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -57,7 +57,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) skb->mark != mark || ipv6_hdr(skb)->hop_limit != hop_limit || flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) { - err = ip6_route_me_harder(state->net, skb); + err = ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c index 22b80db6d882..5b40258d3a5e 100644 --- a/net/ipv6/netfilter/nf_log_ipv6.c +++ b/net/ipv6/netfilter/nf_log_ipv6.c @@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m, switch (dev->type) { case ARPHRD_ETHER: - nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", - eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, - ntohs(eth_hdr(skb)->h_proto)); + nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); + nf_log_dump_vlan(m, skb); + nf_log_buf_add(m, "MACPROTO=%04x ", + ntohs(eth_hdr(skb)->h_proto)); return; default: break; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index dfe5e603ffe1..a5df873fbaab 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -61,18 +61,17 @@ #define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */ -struct raw_hashinfo raw_v6_hashinfo = { - .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), -}; +struct raw_hashinfo raw_v6_hashinfo; EXPORT_SYMBOL_GPL(raw_v6_hashinfo); struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, unsigned short num, const struct in6_addr *loc_addr, const struct in6_addr *rmt_addr, int dif, int sdif) { + struct hlist_nulls_node *hnode; bool is_multicast = ipv6_addr_is_multicast(loc_addr); - sk_for_each_from(sk) + sk_nulls_for_each_from(sk, hnode) if (inet_sk(sk)->inet_num == num) { if (!net_eq(sock_net(sk), net)) @@ -168,8 +167,8 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) hash = nexthdr & (RAW_HTABLE_SIZE - 1); - read_lock(&raw_v6_hashinfo.lock); - sk = sk_head(&raw_v6_hashinfo.ht[hash]); + rcu_read_lock(); + sk = sk_nulls_head(&raw_v6_hashinfo.ht[hash]); if (!sk) goto out; @@ -219,11 +218,11 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) rawv6_rcv(sk, clone); } } - sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr, + sk = __raw_v6_lookup(net, sk_nulls_next(sk), nexthdr, daddr, saddr, inet6_iif(skb), inet6_sdif(skb)); } out: - read_unlock(&raw_v6_hashinfo.lock); + rcu_read_unlock(); return delivered; } @@ -231,7 +230,7 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr) { struct sock *raw_sk; - raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]); + raw_sk = sk_nulls_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]); if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) raw_sk = NULL; @@ -298,7 +297,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST) && - !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) { + !ipv6_can_nonlocal_bind(sock_net(sk), inet)) { err = -EADDRNOTAVAIL; if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, dev, 0)) { @@ -368,8 +367,8 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr, hash = nexthdr & (RAW_HTABLE_SIZE - 1); - read_lock(&raw_v6_hashinfo.lock); - sk = sk_head(&raw_v6_hashinfo.ht[hash]); + rcu_read_lock(); + sk = sk_nulls_head(&raw_v6_hashinfo.ht[hash]); if (sk) { /* Note: ipv6_hdr(skb) != skb->data */ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data; @@ -381,10 +380,10 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr, inet6_iif(skb), inet6_iif(skb)))) { rawv6_err(sk, skb, NULL, type, code, inner_offset, info); - sk = sk_next(sk); + sk = sk_nulls_next(sk); } } - read_unlock(&raw_v6_hashinfo.lock); + rcu_read_unlock(); } static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 1f5d4d196dcc..c8cf1bbad74a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -42,6 +42,8 @@ #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/tcp.h> +#include <linux/udp.h> #include <net/sock.h> #include <net/snmp.h> @@ -322,7 +324,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); - int iif; + __be16 frag_off; + int iif, offset; + u8 nexthdr; if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; @@ -351,6 +355,33 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return 1; } + /* RFC 8200, Section 4.5 Fragment Header: + * If the first fragment does not include all headers through an + * Upper-Layer header, then that fragment should be discarded and + * an ICMP Parameter Problem, Code 3, message should be sent to + * the source of the fragment, with the Pointer field set to zero. + */ + nexthdr = hdr->nexthdr; + offset = ipv6_skip_exthdr(skb, skb_transport_offset(skb), &nexthdr, &frag_off); + if (offset >= 0) { + /* Check some common protocols' header */ + if (nexthdr == IPPROTO_TCP) + offset += sizeof(struct tcphdr); + else if (nexthdr == IPPROTO_UDP) + offset += sizeof(struct udphdr); + else if (nexthdr == IPPROTO_ICMPV6) + offset += sizeof(struct icmp6hdr); + else + offset += 1; + + if (!(frag_off & htons(IP6_OFFSET)) && offset > skb->len) { + __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev), + IPSTATS_MIB_INHDRERRORS); + icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0); + return -1; + } + } + iif = skb->dev ? skb->dev->ifindex : 0; fq = fq_find(net, fhdr->identification, hdr, iif); if (fq) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 894c7370c1bd..94063bb11912 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -61,6 +61,7 @@ #include <net/l3mdev.h> #include <net/ip.h> #include <linux/uaccess.h> +#include <linux/btf_ids.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> @@ -431,9 +432,12 @@ void fib6_select_path(const struct net *net, struct fib6_result *res, struct fib6_info *sibling, *next_sibling; struct fib6_info *match = res->f6i; - if ((!match->fib6_nsiblings && !match->nh) || have_oif_match) + if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) goto out; + if (match->nh && have_oif_match && res->nh) + return; + /* We might have already computed the hash for ICMPv6 errors. In such * case it will always be non-zero. Otherwise now is the time to do it. */ @@ -1388,9 +1392,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res) } ip6_rt_copy_init(pcpu_rt, res); pcpu_rt->rt6i_flags |= RTF_PCPU; + + if (f6i->nh) + pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev)); + return pcpu_rt; } +static bool rt6_is_valid(const struct rt6_info *rt6) +{ + return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev)); +} + /* It should be called with rcu_read_lock() acquired */ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) { @@ -1398,6 +1411,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); + if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) { + struct rt6_info *prev, **p; + + p = this_cpu_ptr(res->nh->rt6i_pcpu); + prev = xchg(p, NULL); + if (prev) { + dst_dev_put(&prev->dst); + dst_release(&prev->dst); + } + + pcpu_rt = NULL; + } + return pcpu_rt; } @@ -2599,6 +2625,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) rt = container_of(dst, struct rt6_info, dst); + if (rt->sernum) + return rt6_is_valid(rt) ? dst : NULL; + rcu_read_lock(); /* All IPV6 dsts are created with ->obsolete set to the value @@ -2703,8 +2732,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; - if (dst_metric_locked(dst, RTAX_MTU)) - return; + /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU) + * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it. + * [see also comment in rt6_mtu_change_route()] + */ if (iph) { daddr = &iph->daddr; @@ -3379,7 +3410,7 @@ static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type) if ((flags & RTF_REJECT) || (dev && (dev->flags & IFF_LOOPBACK) && !(addr_type & IPV6_ADDR_LOOPBACK) && - !(flags & RTF_LOCAL))) + !(flags & (RTF_ANYCAST | RTF_LOCAL)))) return true; return false; @@ -3656,14 +3687,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, rt->fib6_src.plen = cfg->fc_src_len; #endif if (nh) { - if (!nexthop_get(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); - goto out; - } if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); goto out; } + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + goto out; + } rt->nh = nh; fib6_nh = nexthop_fib6_nh(rt->nh); } else { @@ -5153,9 +5184,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, * nexthops have been replaced by first new, the rest should * be added to it. */ - cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | - NLM_F_REPLACE); - cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; + if (cfg->fc_nlinfo.nlh) { + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | + NLM_F_REPLACE); + cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; + } nhn++; } @@ -6329,6 +6362,43 @@ void __init ip6_route_init_special_entries(void) #endif } +#if IS_BUILTIN(CONFIG_IPV6) +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt) + +BTF_ID_LIST(btf_fib6_info_id) +BTF_ID(struct, fib6_info) + +static const struct bpf_iter_seq_info ipv6_route_seq_info = { + .seq_ops = &ipv6_route_seq_ops, + .init_seq_private = bpf_iter_init_seq_net, + .fini_seq_private = bpf_iter_fini_seq_net, + .seq_priv_size = sizeof(struct ipv6_route_iter), +}; + +static struct bpf_iter_reg ipv6_route_reg_info = { + .target = "ipv6_route", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__ipv6_route, rt), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &ipv6_route_seq_info, +}; + +static int __init bpf_iter_register(void) +{ + ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id; + return bpf_iter_reg_target(&ipv6_route_reg_info); +} + +static void bpf_iter_unregister(void) +{ + bpf_iter_unreg_target(&ipv6_route_reg_info); +} +#endif +#endif + int __init ip6_route_init(void) { int ret; @@ -6391,6 +6461,14 @@ int __init ip6_route_init(void) if (ret) goto out_register_late_subsys; +#if IS_BUILTIN(CONFIG_IPV6) +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + ret = bpf_iter_register(); + if (ret) + goto out_register_late_subsys; +#endif +#endif + for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); @@ -6423,6 +6501,11 @@ out_kmem_cache: void ip6_route_cleanup(void) { +#if IS_BUILTIN(CONFIG_IPV6) +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + bpf_iter_unregister(); +#endif +#endif unregister_netdevice_notifier(&ip6_route_dev_notifier); unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_cleanup(); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 98954830c40b..2710f3bc856f 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1088,7 +1088,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) if (tdev && !netif_is_l3_master(tdev)) { int t_hlen = tunnel->hlen + sizeof(struct iphdr); - dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); dev->mtu = tdev->mtu - t_hlen; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; @@ -1378,7 +1377,6 @@ static void ipip6_tunnel_setup(struct net_device *dev) dev->priv_destructor = ipip6_dev_free; dev->type = ARPHRD_SIT; - dev->hard_header_len = LL_MAX_HEADER + t_hlen; dev->mtu = ETH_DATA_LEN - t_hlen; dev->min_mtu = IPV6_MIN_MTU; dev->max_mtu = IP6_MAX_MTU - t_hlen; @@ -1599,8 +1597,11 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, } #ifdef CONFIG_IPV6_SIT_6RD - if (ipip6_netlink_6rd_parms(data, &ip6rd)) + if (ipip6_netlink_6rd_parms(data, &ip6rd)) { err = ipip6_tunnel_update_6rd(nt, &ip6rd); + if (err < 0) + unregister_netdevice_queue(dev, NULL); + } #endif return err; @@ -1818,9 +1819,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net, if (dev->rtnl_link_ops == &sit_link_ops) unregister_netdevice_queue(dev, head); - for (prio = 1; prio < 4; prio++) { + for (prio = 0; prio < 4; prio++) { int h; - for (h = 0; h < IP6_SIT_HASH_SIZE; h++) { + for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) { struct ip_tunnel *t; t = rtnl_dereference(sitn->tunnels[prio][h]); diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 30915f6f31e3..ec155844012b 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -136,7 +136,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; - int mss; + int full_space, mss; struct dst_entry *dst; __u8 rcv_wscale; u32 tsoff = 0; @@ -241,7 +241,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) } req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); - tcp_select_initial_window(sk, tcp_full_space(sk), req->mss, + /* limit the window selection if the user enforce a smaller rx buffer */ + full_space = tcp_full_space(sk); + if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && + (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) + req->rsk_window_clamp = full_space; + + tcp_select_initial_window(sk, full_space, req->mss, &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 0f02f6dc517c..e76b1d697a7c 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -21,6 +21,7 @@ #include <net/calipso.h> #endif +static int two = 2; static int flowlabel_reflect_max = 0x7; static int auto_flowlabels_min; static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX; @@ -151,7 +152,7 @@ static struct ctl_table ipv6_table_template[] = { .mode = 0644, .proc_handler = proc_rt6_multipath_hash_policy, .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .extra2 = &two, }, { .procname = "seg6_flowlabel", diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0f754a2b0258..b7ea65255ee1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1093,6 +1093,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) goto drop; + if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { + __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); + return 0; + } + return tcp_conn_request(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, skb); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 9fec580c968e..b2b06333b0ed 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -141,6 +141,27 @@ static int compute_score(struct sock *sk, struct net *net, return score; } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, + const struct in6_addr *saddr, + __be16 sport, + const struct in6_addr *daddr, + unsigned int hnum) +{ + struct sock *reuse_sk = NULL; + u32 hash; + + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { + hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + /* Fall back to scoring if group has connections */ + if (reuseport_has_conns(sk, false)) + return NULL; + } + return reuse_sk; +} + /* called with rcu_read_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, @@ -150,7 +171,6 @@ static struct sock *udp6_lib_lookup2(struct net *net, { struct sock *sk, *result; int score, badness; - u32 hash = 0; result = NULL; badness = -1; @@ -158,16 +178,11 @@ static struct sock *udp6_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { - if (sk->sk_reuseport && - sk->sk_state != TCP_ESTABLISHED) { - hash = udp6_ehashfn(net, daddr, hnum, - saddr, sport); + result = lookup_reuseport(net, sk, skb, + saddr, sport, daddr, hnum); + if (result) + return result; - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; - } result = sk; badness = score; } @@ -175,6 +190,31 @@ static struct sock *udp6_lib_lookup2(struct net *net, return result; } +static inline struct sock *udp6_lookup_run_bpf(struct net *net, + struct udp_table *udptable, + struct sk_buff *skb, + const struct in6_addr *saddr, + __be16 sport, + const struct in6_addr *daddr, + u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (udptable != &udp_table) + return NULL; /* only UDP is supported */ + + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + /* rcu_read_lock() must be held */ struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, @@ -185,25 +225,42 @@ struct sock *__udp6_lib_lookup(struct net *net, unsigned short hnum = ntohs(dport); unsigned int hash2, slot2; struct udp_hslot *hslot2; - struct sock *result; + struct sock *result, *sk; hash2 = ipv6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; + /* Lookup connected or non-wildcard sockets */ result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, sdif, hslot2, skb); - if (!result) { - hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); - slot2 = hash2 & udptable->mask; + if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) + goto done; - hslot2 = &udptable->hash2[slot2]; - - result = udp6_lib_lookup2(net, saddr, sport, - &in6addr_any, hnum, dif, sdif, - hslot2, skb); + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + sk = udp6_lookup_run_bpf(net, udptable, skb, + saddr, sport, daddr, hnum); + if (sk) { + result = sk; + goto done; + } } + + /* Got non-wildcard socket or error on first lookup */ + if (result) + goto done; + + /* Lookup wildcard sockets */ + hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); + slot2 = hash2 & udptable->mask; + hslot2 = &udptable->hash2[slot2]; + + result = udp6_lib_lookup2(net, saddr, sport, + &in6addr_any, hnum, dif, sdif, + hslot2, skb); +done: if (IS_ERR(result)) return NULL; return result; @@ -643,7 +700,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", @@ -844,6 +901,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct net *net = dev_net(skb->dev); struct udphdr *uh; struct sock *sk; + bool refcounted; u32 ulen = 0; if (!pskb_may_pull(skb, sizeof(struct udphdr))) @@ -880,7 +938,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; /* Check if the socket is already available, e.g. due to early demux */ - sk = skb_steal_sock(skb); + sk = skb_steal_sock(skb, &refcounted); if (sk) { struct dst_entry *dst = skb_dst(skb); int ret; @@ -889,12 +947,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, udp6_sk_rx_dst_set(sk, dst); if (!uh->check && !udp_sk(sk)->no_check6_rx) { - sock_put(sk); + if (refcounted) + sock_put(sk); goto report_csum_error; } ret = udp6_unicast_rcv_skb(sk, skb, uh); - sock_put(sk); + if (refcounted) + sock_put(sk); return ret; } diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index eecac1b7148e..cf2a0ce15c1c 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -111,9 +111,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb) { memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); -#ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; -#endif return xfrm_output(sk, skb); } diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index ebb62a4ebe30..f6e194da92ac 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1784,7 +1784,7 @@ static int iucv_callback_connreq(struct iucv_path *path, } /* Create the new socket */ - nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); + nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); if (!nsk) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); @@ -1990,7 +1990,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) goto out; } - nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); + nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); bh_lock_sock(sk); if ((sk->sk_state != IUCV_LISTEN) || sk_acceptq_is_full(sk) || @@ -2175,7 +2175,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, char nullstring[8]; if (!pskb_may_pull(skb, sizeof(*trans_hdr))) { - WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len); kfree_skb(skb); return NET_RX_SUCCESS; } diff --git a/net/key/af_key.c b/net/key/af_key.c index b67ed3a8486c..907d04a47459 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + if ((xfilter->sadb_x_filter_splen >= + (sizeof(xfrm_address_t) << 3)) || + (xfilter->sadb_x_filter_dplen >= + (sizeof(xfrm_address_t) << 3))) { + mutex_unlock(&pfk->dump_lock); + return -EINVAL; + } filter = kmalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) { mutex_unlock(&pfk->dump_lock); @@ -2400,7 +2407,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa return err; } - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); @@ -2651,7 +2658,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); - xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; @@ -2895,7 +2902,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) break; if (!aalg->pfkey_supported) continue; - if (aalg_tmpl_set(t, aalg) && aalg->available) + if (aalg_tmpl_set(t, aalg)) sz += sizeof(struct sadb_comb); } return sz + sizeof(struct sadb_prop); @@ -2913,7 +2920,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!ealg->pfkey_supported) continue; - if (!(ealg_tmpl_set(t, ealg) && ealg->available)) + if (!(ealg_tmpl_set(t, ealg))) continue; for (k = 1; ; k++) { @@ -2924,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!aalg->pfkey_supported) continue; - if (aalg_tmpl_set(t, aalg) && aalg->available) + if (aalg_tmpl_set(t, aalg)) sz += sizeof(struct sadb_comb); } } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 425b95eb7e87..95805a6331be 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1030,6 +1030,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Queue the packet to IP for output */ skb->ignore_df = 1; + skb_dst_drop(skb); #if IS_ENABLED(CONFIG_IPV6) if (l2tp_sk_is_v6(tunnel->sock)) error = inet6_csk_xmit(tunnel->sock, skb, NULL); @@ -1101,10 +1102,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len goto out_unlock; } - /* Get routing info from the tunnel socket */ - skb_dst_drop(skb); - skb_dst_set(skb, sk_dst_check(sk, 0)); - inet = inet_sk(sk); fl = &inet->cork.fl; switch (tunnel->encap) { @@ -1460,6 +1457,9 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net, if (sk->sk_type != SOCK_DGRAM) return -EPROTONOSUPPORT; + if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) + return -EPROTONOSUPPORT; + if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) || (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP)) return -EPROTONOSUPPORT; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0d7c887a2b75..955662a6dee7 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -20,7 +20,6 @@ #include <net/icmp.h> #include <net/udp.h> #include <net/inet_common.h> -#include <net/inet_hashtables.h> #include <net/tcp_states.h> #include <net/protocol.h> #include <net/xfrm.h> @@ -209,15 +208,31 @@ discard: return 0; } +static int l2tp_ip_hash(struct sock *sk) +{ + if (sk_unhashed(sk)) { + write_lock_bh(&l2tp_ip_lock); + sk_add_node(sk, &l2tp_ip_table); + write_unlock_bh(&l2tp_ip_lock); + } + return 0; +} + +static void l2tp_ip_unhash(struct sock *sk) +{ + if (sk_unhashed(sk)) + return; + write_lock_bh(&l2tp_ip_lock); + sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip_lock); +} + static int l2tp_ip_open(struct sock *sk) { /* Prevent autobind. We don't have ports. */ inet_sk(sk)->inet_num = IPPROTO_L2TP; - write_lock_bh(&l2tp_ip_lock); - sk_add_node(sk, &l2tp_ip_table); - write_unlock_bh(&l2tp_ip_lock); - + l2tp_ip_hash(sk); return 0; } @@ -594,8 +609,8 @@ static struct proto l2tp_ip_prot = { .sendmsg = l2tp_ip_sendmsg, .recvmsg = l2tp_ip_recvmsg, .backlog_rcv = l2tp_ip_backlog_recv, - .hash = inet_hash, - .unhash = inet_unhash, + .hash = l2tp_ip_hash, + .unhash = l2tp_ip_unhash, .obj_size = sizeof(struct l2tp_ip_sock), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index d148766f40d1..0fa694bd3f6a 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -20,8 +20,6 @@ #include <net/icmp.h> #include <net/udp.h> #include <net/inet_common.h> -#include <net/inet_hashtables.h> -#include <net/inet6_hashtables.h> #include <net/tcp_states.h> #include <net/protocol.h> #include <net/xfrm.h> @@ -222,15 +220,31 @@ discard: return 0; } +static int l2tp_ip6_hash(struct sock *sk) +{ + if (sk_unhashed(sk)) { + write_lock_bh(&l2tp_ip6_lock); + sk_add_node(sk, &l2tp_ip6_table); + write_unlock_bh(&l2tp_ip6_lock); + } + return 0; +} + +static void l2tp_ip6_unhash(struct sock *sk) +{ + if (sk_unhashed(sk)) + return; + write_lock_bh(&l2tp_ip6_lock); + sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip6_lock); +} + static int l2tp_ip6_open(struct sock *sk) { /* Prevent autobind. We don't have ports. */ inet_sk(sk)->inet_num = IPPROTO_L2TP; - write_lock_bh(&l2tp_ip6_lock); - sk_add_node(sk, &l2tp_ip6_table); - write_unlock_bh(&l2tp_ip6_lock); - + l2tp_ip6_hash(sk); return 0; } @@ -728,8 +742,8 @@ static struct proto l2tp_ip6_prot = { .sendmsg = l2tp_ip6_sendmsg, .recvmsg = l2tp_ip6_recvmsg, .backlog_rcv = l2tp_ip6_backlog_recv, - .hash = inet6_hash, - .unhash = inet_unhash, + .hash = l2tp_ip6_hash, + .unhash = l2tp_ip6_unhash, .obj_size = sizeof(struct l2tp_ip6_sock), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index f5a9bdc4980c..ebb381c3f1b9 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -920,51 +920,51 @@ static const struct genl_ops l2tp_nl_ops[] = { .cmd = L2TP_CMD_TUNNEL_CREATE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_tunnel_create, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_DELETE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_tunnel_delete, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_MODIFY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_tunnel_modify, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_tunnel_get, .dumpit = l2tp_nl_cmd_tunnel_dump, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_CREATE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_session_create, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_DELETE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_session_delete, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_MODIFY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_session_modify, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = l2tp_nl_cmd_session_get, .dumpit = l2tp_nl_cmd_session_dump, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, }; diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index 7a4d0715d1c3..a966d29c772d 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c @@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb) skb = skb_dequeue(&lapb->write_queue); do { - if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { + skbn = skb_copy(skb, GFP_ATOMIC); + if (!skbn) { skb_queue_head(&lapb->write_queue, skb); break; } diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index c74f44dfaa22..fa0f3c1543ba 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) if (!sock_flag(sk, SOCK_ZAPPED)) goto out; + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (addr->sllc_arphrd != ARPHRD_ETHER) + goto out; rc = -ENODEV; if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); @@ -328,7 +332,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; - if (unlikely(addr->sllc_family != AF_LLC)) + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER)) goto out; dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); rc = -ENODEV; @@ -336,8 +342,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (llc->dev) { - if (!addr->sllc_arphrd) - addr->sllc_arphrd = llc->dev->type; if (is_zero_ether_addr(addr->sllc_mac)) memcpy(addr->sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); @@ -780,7 +784,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } /* Well, if we have backlog, try to process it now yet. */ - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 0daaf7e37a21..1b50bbf030ed 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -698,7 +698,8 @@ void sta_set_rate_info_tx(struct sta_info *sta, u16 brate; sband = ieee80211_get_sband(sta->sdata); - if (sband) { + WARN_ON_ONCE(sband && !sband->bitrates); + if (sband && sband->bitrates) { brate = sband->bitrates[rate->idx].bitrate; rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); } @@ -1669,8 +1670,10 @@ static int ieee80211_change_station(struct wiphy *wiphy, } if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && - sta->sdata->u.vlan.sta) + sta->sdata->u.vlan.sta) { + ieee80211_clear_fast_rx(sta); RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL); + } if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) ieee80211_vif_dec_num_mcast(sta->sdata); @@ -2140,6 +2143,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); + kfree(sdata->u.mesh.ie); mutex_unlock(&sdata->local->mtx); return 0; @@ -2904,14 +2908,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, continue; for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { - if (~sdata->rc_rateidx_mcs_mask[i][j]) { + if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) { sdata->rc_has_mcs_mask[i] = true; break; } } for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { - if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { + if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) { sdata->rc_has_vht_mcs_mask[i] = true; break; } diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index c9a8a2433e8a..48322e45e7dd 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local, } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { ret = drv_sta_add(local, sdata, &sta->sta); - if (ret == 0) + if (ret == 0) { sta->uploaded = true; + if (rcu_access_pointer(sta->sta.rates)) + drv_sta_rate_tbl_update(local, sdata, &sta->sta); + } } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { drv_sta_remove(local, sdata, &sta->sta); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 0a6ff01c68a9..0e26c83b6b41 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1868,6 +1868,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) /* remove beacon */ kfree(sdata->u.ibss.ie); + sdata->u.ibss.ie = NULL; + sdata->u.ibss.ie_len = 0; /* on the next join, re-program HT parameters */ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 05406e9c05b3..268f1d8f440b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1061,6 +1061,7 @@ enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_FLUSH, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, IEEE80211_QUEUE_STOP_REASONS, }; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index af8b09214786..6089b09ec13b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1537,6 +1537,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, if (ret) return ret; + ieee80211_stop_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); + synchronize_net(); + ieee80211_do_stop(sdata, false); ieee80211_teardown_sdata(sdata); @@ -1557,6 +1561,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, err = ieee80211_do_open(&sdata->wdev, false); WARN(err, "type change: do_open returned %d", err); + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); return ret; } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 2d05c4cfaf6d..f215218a88c9 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -954,8 +954,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) continue; if (!dflt_chandef.chan) { + /* + * Assign the first enabled channel to dflt_chandef + * from the list of channels + */ + for (i = 0; i < sband->n_channels; i++) + if (!(sband->channels[i].flags & + IEEE80211_CHAN_DISABLED)) + break; + /* if none found then use the first anyway */ + if (i == sband->n_channels) + i = 0; cfg80211_chandef_create(&dflt_chandef, - &sband->channels[0], + &sband->channels[i], NL80211_CHAN_NO_HT); /* init channel we're on */ if (!local->use_chanctx && !local->_oper_chandef.chan) { @@ -1045,7 +1056,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; if (hw->max_signal <= 0) { result = -EINVAL; - goto fail_wiphy_register; + goto fail_workqueue; } } @@ -1102,12 +1113,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (local->hw.wiphy->max_scan_ie_len) local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; - WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes, - local->hw.n_cipher_schemes)); + if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes, + local->hw.n_cipher_schemes))) { + result = -EINVAL; + goto fail_workqueue; + } result = ieee80211_init_cipher_suites(local); if (result < 0) - goto fail_wiphy_register; + goto fail_workqueue; if (!local->ops->remain_on_channel) local->hw.wiphy->max_remain_on_channel_duration = 5000; @@ -1133,10 +1147,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM; - result = wiphy_register(local->hw.wiphy); - if (result < 0) - goto fail_wiphy_register; - /* * We use the number of queues for feature tests (QoS, HT) internally * so restrict them appropriately. @@ -1159,8 +1169,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, IEEE80211_TX_STATUS_HEADROOM); - debugfs_hw_add(local); - /* * if the driver doesn't specify a max listen interval we * use 5 which should be a safe default @@ -1192,9 +1200,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) goto fail_flows; rtnl_lock(); - result = ieee80211_init_rate_ctrl_alg(local, hw->rate_control_algorithm); + rtnl_unlock(); if (result < 0) { wiphy_debug(local->hw.wiphy, "Failed to initialize rate control algorithm\n"); @@ -1248,6 +1256,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->sband_allocated |= BIT(band); } + result = wiphy_register(local->hw.wiphy); + if (result < 0) + goto fail_wiphy_register; + + debugfs_hw_add(local); + rate_control_add_debugfs(local); + + rtnl_lock(); + /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && !ieee80211_hw_check(hw, NO_AUTO_VIF)) { @@ -1287,17 +1304,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) #if defined(CONFIG_INET) || defined(CONFIG_IPV6) fail_ifa: #endif + wiphy_unregister(local->hw.wiphy); + fail_wiphy_register: rtnl_lock(); rate_control_deinitialize(local); ieee80211_remove_interfaces(local); - fail_rate: rtnl_unlock(); + fail_rate: fail_flows: ieee80211_led_exit(local); destroy_workqueue(local->workqueue); fail_workqueue: - wiphy_unregister(local->hw.wiphy); - fail_wiphy_register: if (local->wiphy_ciphers_allocated) kfree(local->hw.wiphy->cipher_suites); kfree(local->int_scan_req); @@ -1347,8 +1364,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) skb_queue_purge(&local->skb_queue_unreliable); skb_queue_purge(&local->skb_queue_tdls_chsw); - destroy_workqueue(local->workqueue); wiphy_unregister(local->hw.wiphy); + destroy_workqueue(local->workqueue); ieee80211_led_exit(local); kfree(local->int_scan_req); } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index d09b3c789314..36978a0e5000 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1257,15 +1257,15 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) mesh_neighbour_update(sdata, mgmt->sa, &elems, rx_status); + + if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && + !sdata->vif.csa_active) + ieee80211_mesh_process_chnswitch(sdata, &elems, true); } if (ifmsh->sync_ops) ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, &elems, rx_status); - - if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && - !sdata->vif.csa_active) - ieee80211_mesh_process_chnswitch(sdata, &elems, true); } int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) @@ -1373,6 +1373,9 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, ieee802_11_parse_elems(pos, len - baselen, true, &elems, mgmt->bssid, NULL); + if (!mesh_matches_local(sdata, &elems)) + return; + ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; if (!--ifmsh->chsw_ttl) fwd_csa = false; diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 38a0383dfbcf..b5b728a71ab5 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -356,7 +356,7 @@ u32 airtime_link_metric_get(struct ieee80211_local *local, */ tx_time = (device_constant + 10 * test_frame_len / rate); estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); - result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT); + result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT); return (u32)result; } @@ -1103,7 +1103,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, target_flags, mpath->dst, mpath->sn, da, 0, ttl, lifetime, 0, ifmsh->preq_id++, sdata); + + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_DELETED) { + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); + spin_unlock_bh(&mpath->state_lock); enddiscovery: rcu_read_unlock(); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 117519bf33d6..1708b64d4109 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -60,6 +60,7 @@ static struct mesh_table *mesh_table_alloc(void) atomic_set(&newtbl->entries, 0); spin_lock_init(&newtbl->gates_lock); spin_lock_init(&newtbl->walk_lock); + rhashtable_init(&newtbl->rhead, &mesh_rht_params); return newtbl; } @@ -521,6 +522,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } @@ -774,9 +776,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) goto free_path; } - rhashtable_init(&tbl_path->rhead, &mesh_rht_params); - rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); - sdata->u.mesh.mesh_paths = tbl_path; sdata->u.mesh.mpp_paths = tbl_mpp; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c7d8044ff0fa..17a3a1c938be 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2460,7 +2460,7 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, if (!ieee80211_is_data(hdr->frame_control)) return; - if (ieee80211_is_nullfunc(hdr->frame_control) && + if (ieee80211_is_any_nullfunc(hdr->frame_control) && sdata->u.mgd.probe_send_count > 0) { if (ack) ieee80211_sta_reset_conn_monitor(sdata); diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index a1e9fc7878aa..9841db84bce0 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -214,17 +214,16 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf, ref->ops->name, len); } -static const struct file_operations rcname_ops = { +const struct file_operations rcname_ops = { .read = rcname_read, .open = simple_open, .llseek = default_llseek, }; #endif -static struct rate_control_ref *rate_control_alloc(const char *name, - struct ieee80211_local *local) +static struct rate_control_ref * +rate_control_alloc(const char *name, struct ieee80211_local *local) { - struct dentry *debugfsdir = NULL; struct rate_control_ref *ref; ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); @@ -234,13 +233,7 @@ static struct rate_control_ref *rate_control_alloc(const char *name, if (!ref->ops) goto free; -#ifdef CONFIG_MAC80211_DEBUGFS - debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); - local->debugfs.rcdir = debugfsdir; - debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops); -#endif - - ref->priv = ref->ops->alloc(&local->hw, debugfsdir); + ref->priv = ref->ops->alloc(&local->hw); if (!ref->priv) goto free; return ref; @@ -941,7 +934,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, if (old) kfree_rcu(old, rcu_head); - drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + if (sta->uploaded) + drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 5397c6dad056..79b44d3db171 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -60,6 +60,29 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta) #endif } +extern const struct file_operations rcname_ops; + +static inline void rate_control_add_debugfs(struct ieee80211_local *local) +{ +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *debugfsdir; + + if (!local->rate_ctrl) + return; + + if (!local->rate_ctrl->ops->add_debugfs) + return; + + debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); + local->debugfs.rcdir = debugfsdir; + debugfs_create_file("name", 0400, debugfsdir, + local->rate_ctrl, &rcname_ops); + + local->rate_ctrl->ops->add_debugfs(&local->hw, local->rate_ctrl->priv, + debugfsdir); +#endif +} + void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata); /* Get a reference to the rate control algorithm. If `name' is NULL, get the diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index ee86c3333999..7227343551e9 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -270,7 +270,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, success = !!(info->flags & IEEE80211_TX_STAT_ACK); for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (ar[i].idx < 0) + if (ar[i].idx < 0 || !ar[i].count) break; ndx = rix_to_ndx(mi, ar[i].idx); @@ -283,12 +283,6 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, mi->r[ndx].stats.success += success; } - if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0)) - mi->sample_packets++; - - if (mi->sample_deferred > 0) - mi->sample_deferred--; - if (time_after(jiffies, mi->last_stats_update + (mp->update_interval * HZ) / 1000)) minstrel_update_stats(mp, mi); @@ -363,7 +357,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, return; delta = (mi->total_packets * sampling_ratio / 100) - - (mi->sample_packets + mi->sample_deferred / 2); + mi->sample_packets; /* delta < 0: no sampling required */ prev_sample = mi->prev_sample; @@ -372,7 +366,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, return; if (mi->total_packets >= 10000) { - mi->sample_deferred = 0; mi->sample_packets = 0; mi->total_packets = 0; } else if (delta > mi->n_rates * 2) { @@ -397,19 +390,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, * rate sampling method should be used. * Respect such rates that are not sampled for 20 interations. */ - if (mrr_capable && - msr->perfect_tx_time > mr->perfect_tx_time && - msr->stats.sample_skipped < 20) { - /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark - * packets that have the sampling rate deferred to the - * second MRR stage. Increase the sample counter only - * if the deferred sample rate was actually used. - * Use the sample_deferred counter to make sure that - * the sampling is not done in large bursts */ - info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; - rate++; - mi->sample_deferred++; - } else { + if (msr->perfect_tx_time < mr->perfect_tx_time || + msr->stats.sample_skipped >= 20) { if (!msr->sample_limit) return; @@ -429,6 +411,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, rate->idx = mi->r[ndx].rix; rate->count = minstrel_get_retry_count(&mi->r[ndx], info); + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; } diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 51d8b2c846e7..3112d85a014d 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -79,7 +79,6 @@ struct minstrel_sta_info { u8 max_prob_rate; unsigned int total_packets; unsigned int sample_packets; - int sample_deferred; unsigned int sample_row; unsigned int sample_column; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 0ef2633349b5..f2abe1f77dfc 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -1631,7 +1631,7 @@ minstrel_ht_init_cck_rates(struct minstrel_priv *mp) } static void * -minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +minstrel_ht_alloc(struct ieee80211_hw *hw) { struct minstrel_priv *mp; @@ -1668,18 +1668,24 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) mp->hw = hw; mp->update_interval = 100; + minstrel_ht_init_cck_rates(mp); + + return mp; +} + #ifdef CONFIG_MAC80211_DEBUGFS +static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv, + struct dentry *debugfsdir) +{ + struct minstrel_priv *mp = priv; + mp->fixed_rate_idx = (u32) -1; debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx); debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir, &mp->sample_switch); -#endif - - minstrel_ht_init_cck_rates(mp); - - return mp; } +#endif static void minstrel_ht_free(void *priv) @@ -1718,6 +1724,7 @@ static const struct rate_control_ops mac80211_minstrel_ht = { .alloc = minstrel_ht_alloc, .free = minstrel_ht_free, #ifdef CONFIG_MAC80211_DEBUGFS + .add_debugfs = minstrel_ht_add_debugfs, .add_sta_debugfs = minstrel_ht_add_sta_debugfs, #endif .get_expected_throughput = minstrel_ht_get_expected_throughput, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0ba98ad9bc85..1a15e7bae106 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -419,7 +419,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, else if (status->bw == RATE_INFO_BW_5) channel_flags |= IEEE80211_CHAN_QUARTER; - if (status->band == NL80211_BAND_5GHZ) + if (status->band == NL80211_BAND_5GHZ || + status->band == NL80211_BAND_6GHZ) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; else if (status->encoding != RX_ENC_LEGACY) channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; @@ -1450,8 +1451,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (ieee80211_is_ctl(hdr->frame_control) || - ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control) || + ieee80211_is_any_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return RX_CONTINUE; @@ -1838,8 +1838,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * Drop (qos-)data::nullfunc frames silently, since they * are used only to control station power saving mode. */ - if (ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) { + if (ieee80211_is_any_nullfunc(hdr->frame_control)) { I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); /* @@ -2307,6 +2306,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) { + struct ieee80211_hdr *hdr = (void *)rx->skb->data; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); @@ -2317,9 +2317,34 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) if (status->flag & RX_FLAG_DECRYPTED) return 0; + /* check mesh EAPOL frames first */ + if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && + ieee80211_is_data(fc))) { + struct ieee80211s_hdr *mesh_hdr; + u16 hdr_len = ieee80211_hdrlen(fc); + u16 ethertype_offset; + __be16 ethertype; + + if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) + goto drop_check; + + /* make sure fixed part of mesh header is there, also checks skb len */ + if (!pskb_may_pull(rx->skb, hdr_len + 6)) + goto drop_check; + + mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); + ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + + sizeof(rfc1042_header); + + if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && + ethertype == rx->sdata->control_port_protocol) + return 0; + } + +drop_check: /* Drop unencrypted frames if key is set. */ if (unlikely(!ieee80211_has_protected(fc) && - !ieee80211_is_nullfunc(fc) && + !ieee80211_is_any_nullfunc(fc) && ieee80211_is_data(fc) && rx->key)) return -EACCES; @@ -4055,6 +4080,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) rcu_read_lock(); key = rcu_dereference(sta->ptk[sta->ptk_idx]); + if (!key) + key = rcu_dereference(sdata->default_unicast_key); if (key) { switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 5fe2b645912f..132f8423adda 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -132,16 +132,20 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, } if (wide_bw_chansw_ie) { + u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1; struct ieee80211_vht_operation vht_oper = { .chan_width = wide_bw_chansw_ie->new_channel_width, .center_freq_seg0_idx = wide_bw_chansw_ie->new_center_freq_seg0, - .center_freq_seg1_idx = - wide_bw_chansw_ie->new_center_freq_seg1, + .center_freq_seg1_idx = new_seg1, /* .basic_mcs_set doesn't matter */ }; - struct ieee80211_ht_operation ht_oper = {}; + struct ieee80211_ht_operation ht_oper = { + .operation_mode = + cpu_to_le16(new_seg1 << + IEEE80211_HT_OP_MODE_CCFS2_SHIFT), + }; /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT, * to the previously parsed chandef diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 21b1422b1b1c..4a23996dce04 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -217,7 +217,8 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; int i = 0; - list_for_each_entry_rcu(sta, &local->sta_list, list) { + list_for_each_entry_rcu(sta, &local->sta_list, list, + lockdep_is_held(&local->sta_mtx)) { if (sdata != sta->sdata) continue; if (i < idx) { @@ -243,6 +244,24 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, */ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) { + /* + * If we had used sta_info_pre_move_state() then we might not + * have gone through the state transitions down again, so do + * it here now (and warn if it's inserted). + * + * This will clear state such as fast TX/RX that may have been + * allocated during state transitions. + */ + while (sta->sta_state > IEEE80211_STA_NONE) { + int ret; + + WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); + + ret = sta_info_move_state(sta, sta->sta_state - 1); + if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) + break; + } + if (sta->rate_ctrl) rate_control_free_sta(sta); @@ -669,7 +688,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) out_drop_sta: local->num_sta--; synchronize_net(); - __cleanup_single_sta(sta); + cleanup_single_sta(sta); out_err: mutex_unlock(&local->sta_mtx); kfree(sinfo); @@ -688,19 +707,13 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) err = sta_info_insert_check(sta); if (err) { + sta_info_free(local, sta); mutex_unlock(&local->sta_mtx); rcu_read_lock(); - goto out_free; + return err; } - err = sta_info_insert_finish(sta); - if (err) - goto out_free; - - return 0; - out_free: - sta_info_free(local, sta); - return err; + return sta_info_insert_finish(sta); } int sta_info_insert(struct sta_info *sta) @@ -1032,7 +1045,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) might_sleep(); lockdep_assert_held(&local->sta_mtx); - while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); WARN_ON_ONCE(ret); } @@ -2082,6 +2095,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); sband = local->hw.wiphy->bands[band]; + + if (WARN_ON_ONCE(!sband->bitrates)) + break; + brate = sband->bitrates[rate_idx].bitrate; if (rinfo->bw == RATE_INFO_BW_5) shift = 2; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 5a3d645fe1bc..c56831797655 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -643,8 +643,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, rcu_read_lock(); sdata = ieee80211_sdata_from_skb(local, skb); if (sdata) { - if (ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) + if (ieee80211_is_any_nullfunc(hdr->frame_control)) cfg80211_probe_status(sdata->dev, hdr->addr1, cookie, acked, info->status.ack_signal, @@ -1030,7 +1029,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, I802_DEBUG_INC(local->dot11FailedCount); } - if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && + if (ieee80211_is_any_nullfunc(fc) && ieee80211_has_pm(fc) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && !(info->flags & IEEE80211_TX_CTL_INJECTED) && diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 41da41cb5c40..538722522ffe 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -297,7 +297,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && !ieee80211_is_probe_req(hdr->frame_control) && - !ieee80211_is_nullfunc(hdr->frame_control)) + !ieee80211_is_any_nullfunc(hdr->frame_control)) /* * When software scanning only nullfunc frames (to notify * the sleep state to the AP) and probe requests (for the @@ -657,7 +657,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) if (!skip_hw && tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) info->control.hw_key = &tx->key->conf; - } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta && + } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta && test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) { return TX_DROP; } @@ -1944,19 +1944,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, /* device xmit handlers */ +enum ieee80211_encrypt { + ENCRYPT_NO, + ENCRYPT_MGMT, + ENCRYPT_DATA, +}; + static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, - int head_need, bool may_encrypt) + int head_need, + enum ieee80211_encrypt encrypt) { struct ieee80211_local *local = sdata->local; - struct ieee80211_hdr *hdr; bool enc_tailroom; int tail_need = 0; - hdr = (struct ieee80211_hdr *) skb->data; - enc_tailroom = may_encrypt && - (sdata->crypto_tx_tailroom_needed_cnt || - ieee80211_is_mgmt(hdr->frame_control)); + enc_tailroom = encrypt == ENCRYPT_MGMT || + (encrypt == ENCRYPT_DATA && + sdata->crypto_tx_tailroom_needed_cnt); if (enc_tailroom) { tail_need = IEEE80211_ENCRYPT_TAILROOM; @@ -1988,23 +1993,29 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; int headroom; - bool may_encrypt; + enum ieee80211_encrypt encrypt; - may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); + if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT) + encrypt = ENCRYPT_NO; + else if (ieee80211_is_mgmt(hdr->frame_control)) + encrypt = ENCRYPT_MGMT; + else + encrypt = ENCRYPT_DATA; headroom = local->tx_headroom; - if (may_encrypt) + if (encrypt != ENCRYPT_NO) headroom += sdata->encrypt_headroom; headroom -= skb_headroom(skb); headroom = max_t(int, 0, headroom); - if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { + if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) { ieee80211_free_txskb(&local->hw, skb); return; } + /* reload after potential resize */ hdr = (struct ieee80211_hdr *) skb->data; info->control.vif = &sdata->vif; @@ -2808,7 +2819,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, head_need += sdata->encrypt_headroom; head_need += local->tx_headroom; head_need = max_t(int, 0, head_need); - if (ieee80211_skb_resize(sdata, skb, head_need, true)) { + if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) { ieee80211_free_txskb(&local->hw, skb); skb = NULL; return ERR_PTR(-ENOMEM); @@ -3482,7 +3493,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, if (unlikely(ieee80211_skb_resize(sdata, skb, max_t(int, extra_head + hw_headroom - skb_headroom(skb), 0), - false))) { + ENCRYPT_NO))) { kfree_skb(skb); return true; } @@ -3571,7 +3582,7 @@ begin: test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) goto out; - if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) { + if (vif->txqs_stopped[txq->ac]) { set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); goto out; } @@ -3762,7 +3773,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw, * get immediately moved to the back of the list on the next * call to ieee80211_next_txq(). */ - if (txqi->txq.sta && + if (txqi->txq.sta && local->airtime_flags && wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) list_add(&txqi->schedule_order, @@ -3913,6 +3924,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, skb->prev = NULL; skb->next = NULL; + if (skb->protocol == sdata->control_port_protocol) + ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; + skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, ctrl_flags); if (IS_ERR(skb)) @@ -5096,7 +5110,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, return -EINVAL; if (proto == sdata->control_port_protocol) - ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO | + IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; if (unencrypted) flags = IEEE80211_TX_INTFL_DONT_ENCRYPT; diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index ccdcb9ad9ac7..cea83fa5fc5b 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -168,10 +168,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, /* take some capabilities as-is */ cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); vht_cap->cap = cap_info; - vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | - IEEE80211_VHT_CAP_RXLDPC | + vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | @@ -180,6 +177,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, + own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); + /* and some based on our own capabilities */ switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: @@ -446,12 +446,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) * IEEE80211-2016 specification makes higher bandwidth operation * possible on the TDLS link if the peers have wider bandwidth * capability. + * + * However, in this case, and only if the TDLS peer is authorized, + * limit to the tdls_chandef so that the configuration here isn't + * wider than what's actually requested on the channel context. */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && - test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) - return bw; - - bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); + test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) && + test_sta_flag(sta, WLAN_STA_AUTHORIZED) && + sta->tdls_chandef.chan) + bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width)); + else + bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); return bw; } diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index c079ee69d3d0..346a9c86bcf8 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -152,7 +152,7 @@ err_tfm0: crypto_free_sync_skcipher(key->tfm0); err_tfm: for (i = 0; i < ARRAY_SIZE(key->tfm); i++) - if (key->tfm[i]) + if (!IS_ERR_OR_NULL(key->tfm[i])) crypto_free_aead(key->tfm[i]); kzfree(key); diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index ab52811523e9..c829e4a75325 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -34,11 +34,11 @@ void ieee802154_xmit_worker(struct work_struct *work) if (res) goto err_tx; - ieee802154_xmit_complete(&local->hw, skb, false); - dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; + ieee802154_xmit_complete(&local->hw, skb, false); + return; err_tx: @@ -78,6 +78,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) /* async is priority, otherwise sync is fallback */ if (local->ops->xmit_async) { + unsigned int len = skb->len; + ret = drv_xmit_async(local, skb); if (ret) { ieee802154_wake_queue(&local->hw); @@ -85,7 +87,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) } dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += len; } else { local->tx_skb = skb; queue_work(local->workqueue, &local->tx_work); diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index b1690149b6fa..1482259de9b5 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -14,6 +14,7 @@ #include <linux/netdev_features.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <net/mpls.h> static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, netdev_features_t features) @@ -27,6 +28,8 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, skb_reset_network_header(skb); mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb); + if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN)) + goto out; if (unlikely(!pskb_may_pull(skb, mpls_hlen))) goto out; diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 70fe02697544..9bd12f7517ed 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -103,13 +103,20 @@ static void ncsi_channel_monitor(struct timer_list *t) monitor_state = nc->monitor.state; spin_unlock_irqrestore(&nc->lock, flags); - if (!enabled || chained) { - ncsi_stop_channel_monitor(nc); - return; - } + if (!enabled) + return; /* expected race disabling timer */ + if (WARN_ON_ONCE(chained)) + goto bad_state; + if (state != NCSI_CHANNEL_INACTIVE && state != NCSI_CHANNEL_ACTIVE) { - ncsi_stop_channel_monitor(nc); +bad_state: + netdev_warn(ndp->ndev.dev, + "Bad NCSI monitor state channel %d 0x%x %s queue\n", + nc->id, state, chained ? "on" : "off"); + spin_lock_irqsave(&nc->lock, flags); + nc->monitor.enabled = false; + spin_unlock_irqrestore(&nc->lock, flags); return; } @@ -134,10 +141,9 @@ static void ncsi_channel_monitor(struct timer_list *t) ncsi_report_link(ndp, true); ndp->flags |= NCSI_DEV_RESHUFFLE; - ncsi_stop_channel_monitor(nc); - ncm = &nc->modes[NCSI_MODE_LINK]; spin_lock_irqsave(&nc->lock, flags); + nc->monitor.enabled = false; nc->state = NCSI_CHANNEL_INVISIBLE; ncm->data[2] &= ~0x1; spin_unlock_irqrestore(&nc->lock, flags); @@ -1667,9 +1673,6 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev, ndp->ptype.dev = dev; dev_add_pack(&ndp->ptype); - /* Set up generic netlink interface */ - ncsi_init_netlink(dev); - return nd; } EXPORT_SYMBOL_GPL(ncsi_register_dev); @@ -1826,8 +1829,6 @@ void ncsi_unregister_dev(struct ncsi_dev *nd) list_del_rcu(&ndp->node); spin_unlock_irqrestore(&ncsi_dev_lock, flags); - ncsi_unregister_netlink(nd->dev); - kfree(ndp); } EXPORT_SYMBOL_GPL(ncsi_unregister_dev); diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 8b386d766e7d..a33ea45dec05 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c @@ -766,24 +766,8 @@ static struct genl_family ncsi_genl_family __ro_after_init = { .n_ops = ARRAY_SIZE(ncsi_ops), }; -int ncsi_init_netlink(struct net_device *dev) +static int __init ncsi_init_netlink(void) { - int rc; - - rc = genl_register_family(&ncsi_genl_family); - if (rc) - netdev_err(dev, "ncsi: failed to register netlink family\n"); - - return rc; -} - -int ncsi_unregister_netlink(struct net_device *dev) -{ - int rc; - - rc = genl_unregister_family(&ncsi_genl_family); - if (rc) - netdev_err(dev, "ncsi: failed to unregister netlink family\n"); - - return rc; + return genl_register_family(&ncsi_genl_family); } +subsys_initcall(ncsi_init_netlink); diff --git a/net/ncsi/ncsi-netlink.h b/net/ncsi/ncsi-netlink.h index 7502723fba83..39a1a9d7bf77 100644 --- a/net/ncsi/ncsi-netlink.h +++ b/net/ncsi/ncsi-netlink.h @@ -22,7 +22,4 @@ int ncsi_send_netlink_err(struct net_device *dev, struct nlmsghdr *nlhdr, int err); -int ncsi_init_netlink(struct net_device *dev); -int ncsi_unregister_netlink(struct net_device *dev); - #endif /* __NCSI_NETLINK_H__ */ diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index d5611f04926d..7c893c379920 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -1114,7 +1114,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev, int payload, i, ret; /* Find the NCSI device */ - nd = ncsi_find_dev(dev); + nd = ncsi_find_dev(orig_dev); ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; if (!ndp) return -ENODEV; diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index d934384f31ad..6e3cf4d19ce8 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -314,7 +314,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], set->variant = &bitmap_ip; if (!init_map_ip(set, map, first_ip, last_ip, elements, hosts, netmask)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index e8532783b43a..ae7cdc0d0f29 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_ipmac; if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index e3ac914fff1a..d4a14750f5c4 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -247,7 +247,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_port; if (!init_map_port(set, map, first_port, last_port)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 75da200aa5d8..16ae770f049d 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -285,8 +285,7 @@ flag_nested(const struct nlattr *nla) static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = { [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 }, - [IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY, - .len = sizeof(struct in6_addr) }, + [IPSET_ATTR_IPADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), }; int @@ -382,6 +381,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; + if (align < ip_set_extensions[id].align) + align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; @@ -483,13 +484,14 @@ ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, if (SET_WITH_COUNTER(set)) { struct ip_set_counter *counter = ext_counter(data, set); + ip_set_update_counter(counter, ext, flags); + if (flags & IPSET_FLAG_MATCH_COUNTERS && !(ip_set_match_counter(ip_set_get_packets(counter), mext->packets, mext->packets_op) && ip_set_match_counter(ip_set_get_bytes(counter), mext->bytes, mext->bytes_op))) return false; - ip_set_update_counter(counter, ext, flags); } if (SET_WITH_SKBINFO(set)) ip_set_get_skbinfo(ext_skbinfo(data, set), diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 2389c9f89e48..500de37858ac 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -143,20 +143,6 @@ htable_size(u8 hbits) return hsize * sizeof(struct hbucket *) + sizeof(struct htable); } -/* Compute htable_bits from the user input parameter hashsize */ -static u8 -htable_bits(u32 hashsize) -{ - /* Assume that hashsize == 2^htable_bits */ - u8 bits = fls(hashsize - 1); - - if (jhash_size(bits) != hashsize) - /* Round up to the first 2^n value */ - bits = fls(hashsize); - - return bits; -} - #ifdef IP_SET_HASH_WITH_NETS #if IPSET_NET_COUNT > 1 #define __CIDR(cidr, i) (cidr[i]) @@ -644,7 +630,7 @@ mtype_resize(struct ip_set *set, bool retried) struct htype *h = set->data; struct htable *t, *orig; u8 htable_bits; - size_t dsize = set->dsize; + size_t hsize, dsize = set->dsize; #ifdef IP_SET_HASH_WITH_NETS u8 flags; struct mtype_elem *tmp; @@ -668,21 +654,19 @@ mtype_resize(struct ip_set *set, bool retried) retry: ret = 0; htable_bits++; - if (!htable_bits) { - /* In case we have plenty of memory :-) */ - pr_warn("Cannot increase the hashsize of set %s further\n", - set->name); - ret = -IPSET_ERR_HASH_FULL; - goto out; - } - t = ip_set_alloc(htable_size(htable_bits)); + if (!htable_bits) + goto hbwarn; + hsize = htable_size(htable_bits); + if (!hsize) + goto hbwarn; + t = ip_set_alloc(hsize); if (!t) { ret = -ENOMEM; goto out; } t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits)); if (!t->hregion) { - kfree(t); + ip_set_free(t); ret = -ENOMEM; goto out; } @@ -817,6 +801,12 @@ cleanup: if (ret == -EAGAIN) goto retry; goto out; + +hbwarn: + /* In case we have plenty of memory :-) */ + pr_warn("Cannot increase the hashsize of set %s further\n", set->name); + ret = -IPSET_ERR_HASH_FULL; + goto out; } /* Get the current number of elements and ext_size in the set */ @@ -1520,7 +1510,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, if (!h) return -ENOMEM; - hbits = htable_bits(hashsize); + /* Compute htable_bits from the user input parameter hashsize. + * Assume that hashsize == 2^htable_bits, + * otherwise round up to the first 2^n value. + */ + hbits = fls(hashsize - 1); hsize = htable_size(hbits); if (hsize == 0) { kfree(h); @@ -1533,7 +1527,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, } t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits)); if (!t->hregion) { - kfree(t); + ip_set_free(t); kfree(h); return -ENOMEM; } diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 67ac50104e6f..63908123f7ba 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -59,7 +59,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, /* Don't lookup sub-counters at all */ opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) - opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; + opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; list_for_each_entry_rcu(e, &map->members, list) { ret = ip_set_test(e->id, skb, par, opt); if (ret <= 0) diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index a28f64c4dc57..1030d9bd1bcb 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -752,12 +752,12 @@ static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af, struct dst_entry *dst = skb_dst(skb); if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && - ip6_route_me_harder(ipvs->net, skb) != 0) + ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0) return 1; } else #endif if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && - ip_route_me_harder(net, skb, RTN_LOCAL) != 0) + ip_route_me_harder(net, skb->sk, skb, RTN_LOCAL) != 0) return 1; return 0; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 9644321713f8..09995691ccf6 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2483,6 +2483,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* Set timeout values for (tcp tcpfin udp) */ ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg); goto out_unlock; + } else if (!len) { + /* No more commands with len == 0 below */ + ret = -EINVAL; + goto out_unlock; } usvc_compat = (struct ip_vs_service_user *)arg; @@ -2559,9 +2563,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) break; case IP_VS_SO_SET_DELDEST: ret = ip_vs_del_dest(svc, &udest); - break; - default: - ret = -EINVAL; } out_unlock: diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 8dc892a9dc91..0c1bc654245c 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = tinfo->ipvs; + struct sock *sk = tinfo->sock->sk; + struct udp_sock *up = udp_sk(sk); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " @@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data) ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id); while (!kthread_should_stop()) { - wait_event_interruptible(*sk_sleep(tinfo->sock->sk), - !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) - || kthread_should_stop()); + wait_event_interruptible(*sk_sleep(sk), + !skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue) || + kthread_should_stop()); /* do we have data now? */ - while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { + while (!skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue)) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->bcfg.sync_maxlen); if (len <= 0) { diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index a57fffd92274..41a812177b5f 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -623,6 +623,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb, if (ret == NF_ACCEPT) { nf_reset_ct(skb); skb_forward_csum(skb); + if (skb->dev) + skb->tstamp = 0; } return ret; } @@ -668,6 +670,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, if (!local) { skb_forward_csum(skb); + if (skb->dev) + skb->tstamp = 0; NF_HOOK(pf, NF_INET_LOCAL_OUT, net, NULL, skb, NULL, skb_dst(skb)->dev, dst_output); } else @@ -692,6 +696,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, if (!local) { ip_vs_drop_early_demux_sk(skb); skb_forward_csum(skb); + if (skb->dev) + skb->tstamp = 0; NF_HOOK(pf, NF_INET_LOCAL_OUT, net, NULL, skb, NULL, skb_dst(skb)->dev, dst_output); } else diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 459e10ce042d..3f494cf4f80b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1091,7 +1091,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, * Let nf_ct_resolve_clash() deal with this later. */ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) && + nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) continue; NF_CT_STAT_INC_ATOMIC(net, found); @@ -1381,9 +1382,9 @@ __nf_conntrack_alloc(struct net *net, ct->status = 0; ct->timeout = 0; write_pnet(&ct->ct_net, net); - memset(&ct->__nfct_init_offset[0], 0, + memset(&ct->__nfct_init_offset, 0, offsetof(struct nf_conn, proto) - - offsetof(struct nf_conn, __nfct_init_offset[0])); + offsetof(struct nf_conn, __nfct_init_offset)); nf_ct_zone_add(ct, zone); @@ -1879,22 +1880,18 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) nf_conntrack_get(skb_nfct(nskb)); } -static int nf_conntrack_update(struct net *net, struct sk_buff *skb) +static int __nf_conntrack_update(struct net *net, struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo) { struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; - enum ip_conntrack_info ctinfo; struct nf_nat_hook *nat_hook; unsigned int status; - struct nf_conn *ct; int dataoff; u16 l3num; u8 l4num; - ct = nf_ct_get(skb, &ctinfo); - if (!ct || nf_ct_is_confirmed(ct)) - return 0; - l3num = nf_ct_l3num(ct); dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num); @@ -1951,6 +1948,78 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) return 0; } +/* This packet is coming from userspace via nf_queue, complete the packet + * processing after the helper invocation in nf_confirm(). + */ +static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo) +{ + const struct nf_conntrack_helper *helper; + const struct nf_conn_help *help; + int protoff; + + help = nfct_help(ct); + if (!help) + return 0; + + helper = rcu_dereference(help->helper); + if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) + return 0; + + switch (nf_ct_l3num(ct)) { + case NFPROTO_IPV4: + protoff = skb_network_offset(skb) + ip_hdrlen(skb); + break; +#if IS_ENABLED(CONFIG_IPV6) + case NFPROTO_IPV6: { + __be16 frag_off; + u8 pnum; + + pnum = ipv6_hdr(skb)->nexthdr; + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, + &frag_off); + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) + return 0; + break; + } +#endif + default: + return 0; + } + + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && + !nf_is_loopback_packet(skb)) { + if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); + return -1; + } + } + + /* We've seen it coming out the other side: confirm it */ + return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0; +} + +static int nf_conntrack_update(struct net *net, struct sk_buff *skb) +{ + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + int err; + + ct = nf_ct_get(skb, &ctinfo); + if (!ct) + return 0; + + if (!nf_ct_is_confirmed(ct)) { + err = __nf_conntrack_update(net, skb, ct, ctinfo); + if (err < 0) + return err; + + ct = nf_ct_get(skb, &ctinfo); + } + + return nf_confirm_cthelper(skb, ct, ctinfo); +} + static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) { @@ -2340,7 +2409,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); static __always_inline unsigned int total_extension_size(void) { /* remember to add new extensions below */ - BUILD_BUG_ON(NF_CT_EXT_NUM > 9); + BUILD_BUG_ON(NF_CT_EXT_NUM > 10); return sizeof(struct nf_ct_ext) + sizeof(struct nf_conn_help) diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index aa8adf930b3c..a7de00ccf37a 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -506,6 +506,31 @@ nla_put_failure: return -1; } +extern void (*ct_get_vmip_vpcid)(const struct nf_conn *ct, __be32 *vmip, u32 *vpcid); + +static inline int +ctnetlink_dump_vpcid_vmip(struct sk_buff *skb, const struct nf_conn *ct) +{ + int ret = 0; + void (*ct_get_vmip_vpcid_tmp)(const struct nf_conn *ct, __be32 *vmip, u32 *vpcid) = ct_get_vmip_vpcid; + + if(ct_get_vmip_vpcid_tmp) + { + __be32 vmip = 0; + u32 vpcid = 0; + ct_get_vmip_vpcid_tmp(ct, &vmip, &vpcid); + ret = nla_put_be32(skb, CTA_VPCID, htonl(vpcid)); + if (ret) + goto nla_put_failure; + ret = nla_put_be32(skb, CTA_VMIP, vmip); + if (ret) + goto nla_put_failure; + } + +nla_put_failure: + return ret; +} + static int ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct nf_conn *ct) @@ -562,6 +587,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 || ctnetlink_dump_master(skb, ct) < 0 || + ctnetlink_dump_vpcid_vmip(skb, ct) < 0 || ctnetlink_dump_ct_seq_adj(skb, ct) < 0 || ctnetlink_dump_ct_synproxy(skb, ct) < 0) goto nla_put_failure; @@ -1141,6 +1167,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], if (!tb[CTA_TUPLE_IP]) return -EINVAL; + if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) + return -EOPNOTSUPP; tuple->src.l3num = l3num; err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); @@ -1220,6 +1248,8 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { .len = NF_CT_LABELS_MAX_SIZE }, [CTA_LABELS_MASK] = { .type = NLA_BINARY, .len = NF_CT_LABELS_MAX_SIZE }, + [CTA_VPCID] = { .type = NLA_U32 }, + [CTA_VMIP] = { .type = NLA_U32 }, }; static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) @@ -2678,6 +2708,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb, memset(&m, 0xFF, sizeof(m)); memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); m.src.u.all = mask->src.u.all; + m.src.l3num = tuple->src.l3num; m.dst.protonum = tuple->dst.protonum; nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK); diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index a971183f11af..1f44d523b512 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -72,24 +72,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* PptpControlMessageType names */ -const char *const pptp_msg_name[] = { - "UNKNOWN_MESSAGE", - "START_SESSION_REQUEST", - "START_SESSION_REPLY", - "STOP_SESSION_REQUEST", - "STOP_SESSION_REPLY", - "ECHO_REQUEST", - "ECHO_REPLY", - "OUT_CALL_REQUEST", - "OUT_CALL_REPLY", - "IN_CALL_REQUEST", - "IN_CALL_REPLY", - "IN_CALL_CONNECT", - "CALL_CLEAR_REQUEST", - "CALL_DISCONNECT_NOTIFY", - "WAN_ERROR_NOTIFY", - "SET_LINK_INFO" +static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { + [0] = "UNKNOWN_MESSAGE", + [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", + [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", + [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", + [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", + [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", + [PPTP_ECHO_REPLY] = "ECHO_REPLY", + [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", + [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", + [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", + [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", + [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", + [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", + [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", + [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", + [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" }; + +const char *pptp_msg_name(u_int16_t msg) +{ + if (msg > PPTP_MSG_MAX) + return pptp_msg_name_array[0]; + + return pptp_msg_name_array[msg]; +} EXPORT_SYMBOL(pptp_msg_name); #endif @@ -276,7 +284,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; msg = ntohs(ctlh->messageType); - pr_debug("inbound control message %s\n", pptp_msg_name[msg]); + pr_debug("inbound control message %s\n", pptp_msg_name(msg)); switch (msg) { case PPTP_START_SESSION_REPLY: @@ -311,7 +319,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, pcid = pptpReq->ocack.peersCallID; if (info->pns_call_id != pcid) goto invalid; - pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], + pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), ntohs(cid), ntohs(pcid)); if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { @@ -328,7 +336,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, goto invalid; cid = pptpReq->icreq.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->cstate = PPTP_CALL_IN_REQ; info->pac_call_id = cid; break; @@ -347,7 +355,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, if (info->pns_call_id != pcid) goto invalid; - pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); + pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); info->cstate = PPTP_CALL_IN_CONF; /* we expect a GRE connection from PAC to PNS */ @@ -357,7 +365,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, case PPTP_CALL_DISCONNECT_NOTIFY: /* server confirms disconnect */ cid = pptpReq->disc.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->cstate = PPTP_CALL_NONE; /* untrack this call id, unexpect GRE packets */ @@ -384,7 +392,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], + pptp_msg_name(msg), msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; @@ -404,7 +412,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; msg = ntohs(ctlh->messageType); - pr_debug("outbound control message %s\n", pptp_msg_name[msg]); + pr_debug("outbound control message %s\n", pptp_msg_name(msg)); switch (msg) { case PPTP_START_SESSION_REQUEST: @@ -426,7 +434,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, info->cstate = PPTP_CALL_OUT_REQ; /* track PNS call id */ cid = pptpReq->ocreq.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->pns_call_id = cid; break; @@ -440,7 +448,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, pcid = pptpReq->icack.peersCallID; if (info->pac_call_id != pcid) goto invalid; - pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], + pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), ntohs(cid), ntohs(pcid)); if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { @@ -480,7 +488,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], + pptp_msg_name(msg), msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index a0560d175a7f..aaf4293ddd45 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -565,6 +565,7 @@ static int nf_ct_netns_inet_get(struct net *net) int err; err = nf_ct_netns_do_get(net, NFPROTO_IPV4); +#if IS_ENABLED(CONFIG_IPV6) if (err < 0) goto err1; err = nf_ct_netns_do_get(net, NFPROTO_IPV6); @@ -575,6 +576,7 @@ static int nf_ct_netns_inet_get(struct net *net) err2: nf_ct_netns_put(net, NFPROTO_IPV4); err1: +#endif return err; } diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 5b05487a60d2..db11e403d818 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -218,9 +218,6 @@ int nf_conntrack_gre_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state) { - if (state->pf != NFPROTO_IPV4) - return -NF_ACCEPT; - if (!nf_ct_is_confirmed(ct)) { unsigned int *timeouts = nf_ct_timeout_lookup(ct); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 4f897b14b606..810cca24b399 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; +#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 + #define sNO SCTP_CONNTRACK_NONE #define sCL SCTP_CONNTRACK_CLOSED #define sCW SCTP_CONNTRACK_COOKIE_WAIT @@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, u_int32_t offset, count; unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; + bool ignore = false; if (sctp_error(skb, dataoff, state)) return -NF_ACCEPT; @@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, /* Sec 8.5.1 (D) */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; - } else if (sch->type == SCTP_CID_HEARTBEAT || - sch->type == SCTP_CID_HEARTBEAT_ACK) { + } else if (sch->type == SCTP_CID_HEARTBEAT) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); + ct->proto.sctp.vtag[dir] = sh->vtag; + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.last_dir = dir; + ignore = true; + continue; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + } + } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) { if (ct->proto.sctp.vtag[dir] == 0) { pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); ct->proto.sctp.vtag[dir] = sh->vtag; } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { - pr_debug("Verification tag check failed\n"); - goto out_unlock; + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 || + ct->proto.sctp.last_dir == dir) + goto out_unlock; + + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.vtag[dir] = sh->vtag; + ct->proto.sctp.vtag[!dir] = 0; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; } } @@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } spin_unlock_bh(&ct->lock); + /* allow but do not refresh timeout */ + if (ignore) + return NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); if (!timeouts) timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 1926fd56df56..848b137151c2 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct, swin = win << sender->td_scale; sender->td_maxwin = (swin == 0 ? 1 : swin); sender->td_maxend = end + sender->td_maxwin; - /* - * We haven't seen traffic in the other direction yet - * but we have to tweak window tracking to pass III - * and IV until that happens. - */ - if (receiver->td_maxwin == 0) + if (receiver->td_maxwin == 0) { + /* We haven't seen traffic in the other + * direction yet but we have to tweak window + * tracking to pass III and IV until that + * happens. + */ receiver->td_end = receiver->td_maxend = sack; + } else if (sack == receiver->td_end + 1) { + /* Likely a reply to a keepalive. + * Needed for III. + */ + receiver->td_end++; + } + } } else if (((state->state == TCP_CONNTRACK_SYN_SENT && dir == IP_CT_DIR_ORIGINAL) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index c6d29db08bae..bf6cb46a78d6 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -270,6 +270,7 @@ static const char* l4proto_name(u16 proto) case IPPROTO_GRE: return "gre"; case IPPROTO_SCTP: return "sctp"; case IPPROTO_UDPLITE: return "udplite"; + case IPPROTO_ICMPV6: return "icmpv6"; } return "unknown"; @@ -293,6 +294,9 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) return 0; } +void (*ct_get_vmip_vpcid)(const struct nf_conn *ct, __be32 *vmip, u32 *vpcid) = NULL; +EXPORT_SYMBOL_GPL(ct_get_vmip_vpcid); + /* return 0 on success, 1 in case of error */ static int ct_seq_show(struct seq_file *s, void *v) { @@ -368,6 +372,15 @@ static int ct_seq_show(struct seq_file *s, void *v) ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR); ct_show_delta_time(s, ct); + void (*ct_get_vmip_vpcid_tmp)(const struct nf_conn *ct, __be32 *vmip, u32 *vpcid) = ct_get_vmip_vpcid; + if(ct_get_vmip_vpcid_tmp) { + __be32 vmip = 0; + u32 vpcid = 0; + ct_get_vmip_vpcid_tmp(ct, &vmip, &vpcid); + seq_printf(s, "vpcid=%u ", vpcid); + seq_printf(s, "vmip=%pI4 ", &vmip); + } + seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)); if (seq_has_overflowed(s)) @@ -525,6 +538,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write, { int ret; + /* module_param hashsize could have changed value */ + nf_conntrack_htable_size_user = nf_conntrack_htable_size; + ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret < 0 || !write) return ret; diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c index f108a76925dd..ec6e7d686016 100644 --- a/net/netfilter/nf_dup_netdev.c +++ b/net/netfilter/nf_dup_netdev.c @@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev) skb_push(skb, skb->mac_len); skb->dev = dev; + skb->tstamp = 0; dev_queue_xmit(skb); } diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 128245efe84a..e05e5df803d6 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -354,7 +354,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, return -1; tcph = (void *)(skb_network_header(skb) + thoff); - inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true); + inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false); return 0; } @@ -371,7 +371,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, udph = (void *)(skb_network_header(skb) + thoff); if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace2(&udph->check, skb, port, - new_port, true); + new_port, false); if (!udph->check) udph->check = CSUM_MANGLED_0; } diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c index ae5628ddbe6d..fd7c5f0f5c25 100644 --- a/net/netfilter/nf_log_common.c +++ b/net/netfilter/nf_log_common.c @@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, } EXPORT_SYMBOL_GPL(nf_log_dump_packet_common); +void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb) +{ + u16 vid; + + if (!skb_vlan_tag_present(skb)) + return; + + vid = skb_vlan_tag_get(skb); + nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid); +} +EXPORT_SYMBOL_GPL(nf_log_dump_vlan); + /* bridge and netdev logging families share this code. */ void nf_log_l2packet(struct net *net, u_int8_t pf, __be16 protocol, diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index bfc555fcbc72..89b58aa890a7 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -1174,6 +1174,7 @@ static int __init nf_nat_init(void) ret = register_pernet_subsys(&nat_net_ops); if (ret < 0) { nf_ct_extend_unregister(&nat_extend); + kvfree(nf_nat_bysource); return ret; } diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c index 64eedc17037a..4731d21fc3ad 100644 --- a/net/netfilter/nf_nat_proto.c +++ b/net/netfilter/nf_nat_proto.c @@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb, enum nf_nat_manip_type maniptype) { struct udphdr *hdr; - bool do_csum; if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false; hdr = (struct udphdr *)(skb->data + hdroff); - do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; + __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check); - __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum); return true; } @@ -648,8 +646,8 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, } static unsigned int -nf_nat_ipv4_in(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) +nf_nat_ipv4_pre_routing(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) { unsigned int ret; __be32 daddr = ip_hdr(skb)->daddr; @@ -661,6 +659,23 @@ nf_nat_ipv4_in(void *priv, struct sk_buff *skb, return ret; } +static unsigned int +nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + __be32 saddr = ip_hdr(skb)->saddr; + struct sock *sk = skb->sk; + unsigned int ret; + + ret = nf_nat_ipv4_fn(priv, skb, state); + + if (ret == NF_ACCEPT && sk && saddr != ip_hdr(skb)->saddr && + !inet_sk_transparent(sk)) + skb_orphan(skb); /* TCP edemux obtained wrong socket */ + + return ret; +} + static unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -717,7 +732,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, if (ct->tuplehash[dir].tuple.dst.u3.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } @@ -738,7 +753,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, static const struct nf_hook_ops nf_nat_ipv4_ops[] = { /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv4_in, + .hook = nf_nat_ipv4_pre_routing, .pf = NFPROTO_IPV4, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_NAT_DST, @@ -759,7 +774,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv4_fn, + .hook = nf_nat_ipv4_local_in, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP_PRI_NAT_SRC, @@ -955,7 +970,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb, if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &ct->tuplehash[!dir].tuple.src.u3)) { - err = nf_ip6_route_me_harder(state->net, skb); + err = nf_ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } @@ -1035,8 +1050,8 @@ int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops) ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops)); if (ret) - nf_nat_ipv6_unregister_fn(net, ops); - + nf_nat_unregister_fn(net, NFPROTO_IPV6, ops, + ARRAY_SIZE(nf_nat_ipv6_ops)); return ret; } EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn); diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index b9cbe1e2453e..4bb4cfde28b4 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -446,7 +446,7 @@ synproxy_send_tcp(struct net *net, skb_dst_set_noref(nskb, skb_dst(skb)); nskb->protocol = htons(ETH_P_IP); - if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) goto free_nskb; if (nfct) { diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 068daff41f6e..2aa28377a3b3 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -254,7 +254,7 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_expr *expr; expr = nft_expr_first(rule); - while (expr != nft_expr_last(rule) && expr->ops) { + while (nft_expr_more(rule, expr)) { if (expr->ops->activate) expr->ops->activate(ctx, expr); @@ -269,7 +269,7 @@ static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_expr *expr; expr = nft_expr_first(rule); - while (expr != nft_expr_last(rule) && expr->ops) { + while (nft_expr_more(rule, expr)) { if (expr->ops->deactivate) expr->ops->deactivate(ctx, expr, phase); @@ -456,7 +456,8 @@ static struct nft_table *nft_table_lookup(const struct net *net, if (nla == NULL) return ERR_PTR(-EINVAL); - list_for_each_entry_rcu(table, &net->nft.tables, list) { + list_for_each_entry_rcu(table, &net->nft.tables, list, + lockdep_is_held(&net->nft.commit_mutex)) { if (!nla_strcmp(nla, table->name) && table->family == family && nft_active_genmask(table, genmask)) @@ -559,7 +560,8 @@ static int nft_request_module(struct net *net, const char *fmt, ...) static void lockdep_nfnl_nft_mutex_not_held(void) { #ifdef CONFIG_PROVE_LOCKING - WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES)); + if (debug_locks) + WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES)); #endif } @@ -744,11 +746,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) - goto err; + goto err_fill_table_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_table_info: kfree_skb(skb2); return err; } @@ -1443,11 +1445,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) - goto err; + goto err_fill_chain_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_chain_info: kfree_skb(skb2); return err; } @@ -2622,11 +2624,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule, NULL); if (err < 0) - goto err; + goto err_fill_rule_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_rule_info: kfree_skb(skb2); return err; } @@ -2641,7 +2643,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); - while (expr != nft_expr_last(rule) && expr->ops) { + while (nft_expr_more(rule, expr)) { next = nft_expr_next(expr); nf_tables_expr_destroy(ctx, expr); expr = next; @@ -2707,6 +2709,7 @@ static int nft_table_validate(struct net *net, const struct nft_table *table) } static struct nft_rule *nft_rule_lookup_byid(const struct net *net, + const struct nft_chain *chain, const struct nlattr *nla); #define NFT_RULE_MAXEXPRS 128 @@ -2780,7 +2783,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return PTR_ERR(old_rule); } } else if (nla[NFTA_RULE_POSITION_ID]) { - old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]); + old_rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_POSITION_ID]); if (IS_ERR(old_rule)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]); return PTR_ERR(old_rule); @@ -2915,6 +2918,7 @@ err1: } static struct nft_rule *nft_rule_lookup_byid(const struct net *net, + const struct nft_chain *chain, const struct nlattr *nla) { u32 id = ntohl(nla_get_be32(nla)); @@ -2924,6 +2928,7 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net, struct nft_rule *rule = nft_trans_rule(trans); if (trans->msg_type == NFT_MSG_NEWRULE && + trans->ctx.chain == chain && id == nft_trans_rule_id(trans)) return rule; } @@ -2970,7 +2975,7 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk, err = nft_delrule(&ctx, rule); } else if (nla[NFTA_RULE_ID]) { - rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]); + rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_ID]); if (IS_ERR(rule)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_ID]); return PTR_ERR(rule); @@ -3185,6 +3190,7 @@ static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table, } static struct nft_set *nft_set_lookup_byid(const struct net *net, + const struct nft_table *table, const struct nlattr *nla, u8 genmask) { struct nft_trans *trans; @@ -3195,6 +3201,7 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net, struct nft_set *set = nft_trans_set(trans); if (id == nft_trans_set_id(trans) && + set->table == table && nft_active_genmask(set, genmask)) return set; } @@ -3215,7 +3222,7 @@ struct nft_set *nft_set_lookup_global(const struct net *net, if (!nla_set_id) return set; - set = nft_set_lookup_byid(net, nla_set_id, genmask); + set = nft_set_lookup_byid(net, table, nla_set_id, genmask); } return set; } @@ -3275,7 +3282,7 @@ cont: return 0; } -static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) +int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) { u64 ms = be64_to_cpu(nla_get_be64(nla)); u64 max = (u64)(~((u64)0)); @@ -3289,7 +3296,7 @@ static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) return 0; } -static __be64 nf_jiffies64_to_msecs(u64 input) +__be64 nf_jiffies64_to_msecs(u64 input) { return cpu_to_be64(jiffies64_to_msecs(input)); } @@ -3353,7 +3360,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; } - if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) + if (set->udata && + nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) goto nla_put_failure; desc = nla_nest_start_noflag(skb, NFTA_SET_DESC); @@ -3525,11 +3533,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) - goto err; + goto err_fill_set_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_set_info: kfree_skb(skb2); return err; } @@ -3598,7 +3606,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, NFT_SET_INTERVAL | NFT_SET_TIMEOUT | NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) - return -EINVAL; + return -EOPNOTSUPP; /* Only one of these operations is supported */ if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) == (NFT_SET_MAP | NFT_SET_OBJECT)) @@ -3636,7 +3644,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE])); if (objtype == NFT_OBJECT_UNSPEC || objtype > NFT_OBJECT_MAX) - return -EINVAL; + return -EOPNOTSUPP; } else if (flags & NFT_SET_OBJECT) return -EINVAL; else @@ -4304,24 +4312,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -ENOMEM; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb == NULL) - goto err1; + return err; err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, NFT_MSG_NEWSETELEM, 0, set, &elem); if (err < 0) - goto err2; + goto err_fill_setelem; - err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT); - /* This avoids a loop in nfnetlink. */ - if (err < 0) - goto err1; + return nfnetlink_unicast(skb, ctx->net, ctx->portid); - return 0; -err2: +err_fill_setelem: kfree_skb(skb); -err1: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + return err; } /* called with rcu_read_lock held */ @@ -5498,10 +5500,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); if (err < 0) - goto err; + goto err_fill_obj_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_obj_info: kfree_skb(skb2); return err; } @@ -6173,10 +6176,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, NFT_MSG_NEWFLOWTABLE, 0, family, flowtable); if (err < 0) - goto err; + goto err_fill_flowtable_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_flowtable_info: kfree_skb(skb2); return err; } @@ -6337,10 +6341,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk, err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) - goto err; + goto err_fill_gen_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_gen_info: kfree_skb(skb2); return err; } @@ -6605,6 +6610,12 @@ static void nf_tables_trans_destroy_work(struct work_struct *w) } } +void nf_tables_trans_destroy_flush_work(void) +{ + flush_work(&trans_destroy_work); +} +EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work); + static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain) { struct nft_rule *rule; @@ -6776,9 +6787,9 @@ static void nf_tables_commit_release(struct net *net) spin_unlock(&nf_tables_destroy_list_lock); nf_tables_module_autoload_cleanup(net); - mutex_unlock(&net->nft.commit_mutex); - schedule_work(&trans_destroy_work); + + mutex_unlock(&net->nft.commit_mutex); } static int nf_tables_commit(struct net *net, struct sk_buff *skb) @@ -7011,11 +7022,15 @@ static void nf_tables_abort_release(struct nft_trans *trans) kfree(trans); } -static int __nf_tables_abort(struct net *net, bool autoload) +static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) { struct nft_trans *trans, *next; struct nft_trans_elem *te; + if (action == NFNL_ABORT_VALIDATE && + nf_tables_validate(net) < 0) + return -EAGAIN; + list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, list) { switch (trans->msg_type) { @@ -7133,7 +7148,7 @@ static int __nf_tables_abort(struct net *net, bool autoload) nf_tables_abort_release(trans); } - if (autoload) + if (action == NFNL_ABORT_AUTOLOAD) nf_tables_module_autoload(net); else nf_tables_module_autoload_cleanup(net); @@ -7146,9 +7161,10 @@ static void nf_tables_cleanup(struct net *net) nft_validate_state_update(net, NFT_VALIDATE_SKIP); } -static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload) +static int nf_tables_abort(struct net *net, struct sk_buff *skb, + enum nfnl_abort_action action) { - int ret = __nf_tables_abort(net, autoload); + int ret = __nf_tables_abort(net, action); mutex_unlock(&net->nft.commit_mutex); @@ -7685,6 +7701,17 @@ int __nft_release_basechain(struct nft_ctx *ctx) } EXPORT_SYMBOL_GPL(__nft_release_basechain); +static void __nft_release_hooks(struct net *net) +{ + struct nft_table *table; + struct nft_chain *chain; + + list_for_each_entry(table, &net->nft.tables, list) { + list_for_each_entry(chain, &table->chains, list) + nf_tables_unregister_hook(net, table, chain); + } +} + static void __nft_release_tables(struct net *net) { struct nft_flowtable *flowtable, *nf; @@ -7700,10 +7727,6 @@ static void __nft_release_tables(struct net *net) list_for_each_entry_safe(table, nt, &net->nft.tables, list) { ctx.family = table->family; - - list_for_each_entry(chain, &table->chains, list) - nf_tables_unregister_hook(net, table, chain); - /* No packets are walking on these chains anymore. */ ctx.table = table; list_for_each_entry(chain, &table->chains, list) { ctx.chain = chain; @@ -7751,11 +7774,16 @@ static int __net_init nf_tables_init_net(struct net *net) return 0; } +static void __net_exit nf_tables_pre_exit_net(struct net *net) +{ + __nft_release_hooks(net); +} + static void __net_exit nf_tables_exit_net(struct net *net) { mutex_lock(&net->nft.commit_mutex); if (!list_empty(&net->nft.commit_list)) - __nf_tables_abort(net, false); + __nf_tables_abort(net, NFNL_ABORT_NONE); __nft_release_tables(net); mutex_unlock(&net->nft.commit_mutex); WARN_ON_ONCE(!list_empty(&net->nft.tables)); @@ -7763,8 +7791,9 @@ static void __net_exit nf_tables_exit_net(struct net *net) } static struct pernet_operations nf_tables_net_ops = { - .init = nf_tables_init_net, - .exit = nf_tables_exit_net, + .init = nf_tables_init_net, + .pre_exit = nf_tables_pre_exit_net, + .exit = nf_tables_exit_net, }; static int __init nf_tables_module_init(void) diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 914cd0618d5a..2d3bc22c855c 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -28,6 +28,23 @@ static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions) return flow; } +void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, + enum flow_dissector_key_id addr_type) +{ + struct nft_flow_match *match = &flow->match; + struct nft_flow_key *mask = &match->mask; + struct nft_flow_key *key = &match->key; + + if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) + return; + + key->control.addr_type = addr_type; + mask->control.addr_type = 0xffff; + match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); + match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] = + offsetof(struct nft_flow_key, control); +} + struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule) { @@ -37,8 +54,9 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, struct nft_expr *expr; expr = nft_expr_first(rule); - while (expr->ops && expr != nft_expr_last(rule)) { - if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION) + while (nft_expr_more(rule, expr)) { + if (expr->ops->offload_action && + expr->ops->offload_action(expr)) num_actions++; expr = nft_expr_next(expr); @@ -61,7 +79,7 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, ctx->net = net; ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; - while (expr->ops && expr != nft_expr_last(rule)) { + while (nft_expr_more(rule, expr)) { if (!expr->ops->offload) { err = -EOPNOTSUPP; goto err_out; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 99127e2d95a8..81c86a156c6c 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -148,10 +148,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) { - return netlink_unicast(net->nfnl, skb, portid, flags); + int err; + + err = nlmsg_unicast(net->nfnl, skb, portid); + if (err == -EAGAIN) + err = -ENOBUFS; + + return err; } EXPORT_SYMBOL_GPL(nfnetlink_unicast); @@ -310,7 +315,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, return netlink_ack(skb, nlh, -EINVAL, NULL); replay: status = 0; - +replay_abort: skb = netlink_skb_clone(oskb, GFP_KERNEL); if (!skb) return netlink_ack(oskb, nlh, -ENOMEM, NULL); @@ -476,7 +481,7 @@ ack: } done: if (status & NFNL_BATCH_REPLAY) { - ss->abort(net, oskb, true); + ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD); nfnl_err_reset(&err_list); kfree_skb(skb); module_put(ss->owner); @@ -487,11 +492,25 @@ done: status |= NFNL_BATCH_REPLAY; goto done; } else if (err) { - ss->abort(net, oskb, false); + ss->abort(net, oskb, NFNL_ABORT_NONE); netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); } } else { - ss->abort(net, oskb, false); + enum nfnl_abort_action abort_action; + + if (status & NFNL_BATCH_FAILURE) + abort_action = NFNL_ABORT_NONE; + else + abort_action = NFNL_ABORT_VALIDATE; + + err = ss->abort(net, oskb, abort_action); + if (err == -EAGAIN) { + nfnl_err_reset(&err_list); + kfree_skb(skb); + module_put(ss->owner); + status |= NFNL_BATCH_FAILURE; + goto replay_abort; + } } if (ss->cleanup) ss->cleanup(net); diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 60838d5fb8e0..81406b93f126 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -103,7 +103,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) if (help->helper->data_len == 0) return -EINVAL; - nla_memcpy(help->data, nla_data(attr), sizeof(help->data)); + nla_memcpy(help->data, attr, sizeof(help->data)); return 0; } @@ -240,6 +240,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[], ret = -ENOMEM; goto err2; } + helper->data_len = size; helper->flags |= NF_CT_HELPER_F_USERSPACE; memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple)); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 0ba020ca38e6..7ca2ca4bba05 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst) goto out; } } - nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid); out: inst->qlen = 0; inst->skb = NULL; diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 9f5dea0064ea..916a3c7f9eaf 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb, static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx, const struct sk_buff *skb, const struct iphdr *ip, - unsigned char *opts) + unsigned char *opts, + struct tcphdr *_tcph) { const struct tcphdr *tcp; - struct tcphdr _tcph; - tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); + tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph); if (!tcp) return NULL; @@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family, int fmatch = FMATCH_WRONG; struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; + struct tcphdr _tcph; memset(&ctx, 0, sizeof(ctx)); - tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); if (!tcp) return false; @@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb, const struct nf_osf_finger *kf; struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; + struct tcphdr _tcph; memset(&ctx, 0, sizeof(ctx)); - tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); if (!tcp) return false; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index feabdfb22920..6f0a2bad8ad5 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, *packet_id_ptr = htonl(entry->id); /* nfnetlink_unicast will either free the nskb or add it to a socket */ - err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); + err = nfnetlink_unicast(nskb, net, queue->peer_portid); if (err < 0) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; diff --git a/net/netfilter/nft_chain_route.c b/net/netfilter/nft_chain_route.c index 8826bbe71136..edd02cda57fc 100644 --- a/net/netfilter/nft_chain_route.c +++ b/net/netfilter/nft_chain_route.c @@ -42,7 +42,7 @@ static unsigned int nf_route_table_hook4(void *priv, iph->daddr != daddr || skb->mark != mark || iph->tos != tos) { - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } @@ -92,7 +92,7 @@ static unsigned int nf_route_table_hook6(void *priv, skb->mark != mark || ipv6_hdr(skb)->hop_limit != hop_limit || flowlabel != *((u32 *)ipv6_hdr(skb)))) { - err = nf_ip6_route_me_harder(state->net, skb); + err = nf_ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index f9adca62ccb3..bbe03b9a03b1 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -213,6 +213,17 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) return 0; } +static void nft_compat_wait_for_destructors(void) +{ + /* xtables matches or targets can have side effects, e.g. + * creation/destruction of /proc files. + * The xt ->destroy functions are run asynchronously from + * work queue. If we have pending invocations we thus + * need to wait for those to finish. + */ + nf_tables_trans_destroy_flush_work(); +} + static int nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -236,6 +247,8 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); + nft_compat_wait_for_destructors(); + ret = xt_check_target(&par, size, proto, inv); if (ret < 0) return ret; @@ -247,6 +260,12 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return 0; } +static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr) +{ + module_put(me); + kfree(expr->ops); +} + static void nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { @@ -262,8 +281,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) if (par.target->destroy != NULL) par.target->destroy(&par); - module_put(me); - kfree(expr->ops); + __nft_mt_tg_destroy(me, expr); } static int nft_extension_dump_info(struct sk_buff *skb, int attr, @@ -451,6 +469,8 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); + nft_compat_wait_for_destructors(); + return xt_check_match(&par, size, proto, inv); } @@ -494,8 +514,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, if (par.match->destroy != NULL) par.match->destroy(&par); - module_put(me); - kfree(expr->ops); + __nft_mt_tg_destroy(me, expr); } static void diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 46ca8bcca1bd..2042c6f4629c 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -177,8 +177,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr, } #endif case NFT_CT_ID: - if (!nf_ct_is_confirmed(ct)) - goto err; *dest = nf_ct_get_id(ct); return; default: diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c index c2e78c160fd7..6007089e1c2f 100644 --- a/net/netfilter/nft_dup_netdev.c +++ b/net/netfilter/nft_dup_netdev.c @@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx, return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif); } +static bool nft_dup_netdev_offload_action(const struct nft_expr *expr) +{ + return true; +} + static struct nft_expr_type nft_dup_netdev_type; static const struct nft_expr_ops nft_dup_netdev_ops = { .type = &nft_dup_netdev_type, @@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = { .init = nft_dup_netdev_init, .dump = nft_dup_netdev_dump, .offload = nft_dup_netdev_offload, + .offload_action = nft_dup_netdev_offload_action, }; static struct nft_expr_type nft_dup_netdev_type __read_mostly = { diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 8887295414dc..95415d2b81c9 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -146,7 +146,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx, u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS])); if (flags & ~NFT_DYNSET_F_INV) - return -EINVAL; + return -EOPNOTSUPP; if (flags & NFT_DYNSET_F_INV) priv->invert = true; } @@ -179,9 +179,11 @@ static int nft_dynset_init(const struct nft_ctx *ctx, timeout = 0; if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) - return -EINVAL; - timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( - tb[NFTA_DYNSET_TIMEOUT]))); + return -EOPNOTSUPP; + + err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout); + if (err) + return err; } priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); @@ -191,7 +193,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (tb[NFTA_DYNSET_SREG_DATA] != NULL) { if (!(set->flags & NFT_SET_MAP)) - return -EINVAL; + return -EOPNOTSUPP; if (set->dtype == NFT_DATA_VERDICT) return -EOPNOTSUPP; @@ -231,8 +233,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR, priv->expr->ops->size); if (set->flags & NFT_SET_TIMEOUT) { - if (timeout || set->timeout) + if (timeout || set->timeout) { + nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); + } } priv->timeout = timeout; @@ -296,7 +300,7 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr) if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) goto nla_put_failure; if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, - cpu_to_be64(jiffies_to_msecs(priv->timeout)), + nf_jiffies64_to_msecs(priv->timeout), NFTA_DYNSET_PAD)) goto nla_put_failure; if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index a5e8469859e3..427d77b111b1 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); if (priv->flags & NFT_EXTHDR_F_PRESENT) { - *dest = (err >= 0); + nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; @@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr, err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type); if (priv->flags & NFT_EXTHDR_F_PRESENT) { - *dest = (err >= 0); + nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index 3087e23297db..3b0dcd170551 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -77,6 +77,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx, return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif); } +static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr) +{ + return true; +} + struct nft_fwd_neigh { enum nft_registers sreg_dev:8; enum nft_registers sreg_addr:8; @@ -138,6 +143,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr, return; skb->dev = dev; + skb->tstamp = 0; neigh_xmit(neigh_table, dev, addr, skb); out: regs->verdict.code = verdict; @@ -218,6 +224,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = { .dump = nft_fwd_netdev_dump, .validate = nft_fwd_validate, .offload = nft_fwd_netdev_offload, + .offload_action = nft_fwd_netdev_offload_action, }; static const struct nft_expr_ops * diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index c7f0ef73d939..ee71d44f1b74 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -163,6 +163,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx, return 0; } +static bool nft_immediate_offload_action(const struct nft_expr *expr) +{ + const struct nft_immediate_expr *priv = nft_expr_priv(expr); + + if (priv->dreg == NFT_REG_VERDICT) + return true; + + return false; +} + static const struct nft_expr_ops nft_imm_ops = { .type = &nft_imm_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), @@ -173,7 +183,7 @@ static const struct nft_expr_ops nft_imm_ops = { .dump = nft_immediate_dump, .validate = nft_immediate_validate, .offload = nft_immediate_offload, - .offload_flags = NFT_OFFLOAD_F_ACTION, + .offload_action = nft_immediate_offload_action, }; struct nft_expr_type nft_imm_type __read_mostly = { diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index 35b67d7e3694..6e7b92e6f424 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c @@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit, return -EOVERFLOW; if (pkts) { - tokens = div_u64(limit->nsecs, limit->rate) * limit->burst; + tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst; } else { /* The token bucket size limits the number of tokens can be * accumulated. tokens_max specifies the bucket size. * tokens_max = unit * (rate + burst) / rate. */ - tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), + tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst), limit->rate); } diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index c3c93e95b46e..243e8107f456 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -129,7 +129,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, priv->type = NF_NAT_MANIP_DST; break; default: - return -EINVAL; + return -EOPNOTSUPP; } if (tb[NFTA_NAT_FAMILY] == NULL) @@ -196,7 +196,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, if (tb[NFTA_NAT_FLAGS]) { priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); if (priv->flags & ~NF_NAT_RANGE_MASK) - return -EINVAL; + return -EOPNOTSUPP; } return nf_ct_netns_get(ctx->net, family); diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 0e3bfbc26e79..921f8f45b17f 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -79,7 +79,9 @@ void nft_payload_eval(const struct nft_expr *expr, u32 *dest = ®s->data[priv->dreg]; int offset; - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; + switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) @@ -195,6 +197,7 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx, NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src, sizeof(struct in_addr), reg); + nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS); break; case offsetof(struct iphdr, daddr): if (priv->len != sizeof(struct in_addr)) @@ -202,6 +205,7 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx, NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst, sizeof(struct in_addr), reg); + nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS); break; case offsetof(struct iphdr, protocol): if (priv->len != sizeof(__u8)) @@ -231,6 +235,7 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx, NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src, sizeof(struct in6_addr), reg); + nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS); break; case offsetof(struct ipv6hdr, daddr): if (priv->len != sizeof(struct in6_addr)) @@ -238,6 +243,7 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx, NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst, sizeof(struct in6_addr), reg); + nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS); break; case offsetof(struct ipv6hdr, nexthdr): if (priv->len != sizeof(__u8)) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index a9f804f7a04a..ee7c29e0a9d7 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe) (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END); } +static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe) +{ + return !nft_rbtree_interval_end(rbe); +} + static bool nft_rbtree_equal(const struct nft_set *set, const void *this, const struct nft_rbtree_elem *interval) { @@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set if (interval && nft_rbtree_equal(set, this, interval) && nft_rbtree_interval_end(rbe) && - !nft_rbtree_interval_end(interval)) + nft_rbtree_interval_start(interval)) continue; interval = rbe; } else if (d > 0) @@ -74,6 +79,10 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set parent = rcu_dereference_raw(parent->rb_left); continue; } + + if (nft_set_elem_expired(&rbe->ext)) + return false; + if (nft_rbtree_interval_end(rbe)) { if (nft_set_is_anonymous(set)) return false; @@ -89,7 +98,8 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set if (set->flags & NFT_SET_INTERVAL && interval != NULL && nft_set_elem_active(&interval->ext, genmask) && - !nft_rbtree_interval_end(interval)) { + !nft_set_elem_expired(&interval->ext) && + nft_rbtree_interval_start(interval)) { *ext = &interval->ext; return true; } @@ -149,6 +159,9 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, continue; } + if (nft_set_elem_expired(&rbe->ext)) + return false; + if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) || (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) == (flags & NFT_SET_ELEM_INTERVAL_END)) { @@ -165,6 +178,7 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, if (set->flags & NFT_SET_INTERVAL && interval != NULL && nft_set_elem_active(&interval->ext, genmask) && + !nft_set_elem_expired(&interval->ext) && ((!nft_rbtree_interval_end(interval) && !(flags & NFT_SET_ELEM_INTERVAL_END)) || (nft_rbtree_interval_end(interval) && @@ -224,9 +238,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, p = &parent->rb_right; else { if (nft_rbtree_interval_end(rbe) && - !nft_rbtree_interval_end(new)) { + nft_rbtree_interval_start(new)) { p = &parent->rb_left; - } else if (!nft_rbtree_interval_end(rbe) && + } else if (nft_rbtree_interval_start(rbe) && nft_rbtree_interval_end(new)) { p = &parent->rb_right; } else if (nft_set_elem_active(&rbe->ext, genmask)) { @@ -317,10 +331,10 @@ static void *nft_rbtree_deactivate(const struct net *net, parent = parent->rb_right; else { if (nft_rbtree_interval_end(rbe) && - !nft_rbtree_interval_end(this)) { + nft_rbtree_interval_start(this)) { parent = parent->rb_left; continue; - } else if (!nft_rbtree_interval_end(rbe) && + } else if (nft_rbtree_interval_start(rbe) && nft_rbtree_interval_end(this)) { parent = parent->rb_right; continue; @@ -350,6 +364,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, if (iter->count < iter->skip) goto cont; + if (nft_set_elem_expired(&rbe->ext)) + goto cont; if (!nft_set_elem_active(&rbe->ext, iter->genmask)) goto cont; diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c index 51b454d8fa9c..924195861faf 100644 --- a/net/netfilter/utils.c +++ b/net/netfilter/utils.c @@ -191,8 +191,8 @@ static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry skb->mark == rt_info->mark && iph->daddr == rt_info->daddr && iph->saddr == rt_info->saddr)) - return ip_route_me_harder(entry->state.net, skb, - RTN_UNSPEC); + return ip_route_me_harder(entry->state.net, entry->state.sk, + skb, RTN_UNSPEC); } #endif return 0; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 44f971f31992..9cfee6664040 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -330,6 +330,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) const struct xt_match *m; int have_rev = 0; + mutex_lock(&xt[af].mutex); list_for_each_entry(m, &xt[af].match, list) { if (strcmp(m->name, name) == 0) { if (m->revision > *bestp) @@ -338,6 +339,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) have_rev = 1; } } + mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC && !have_rev) return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); @@ -350,6 +352,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) const struct xt_target *t; int have_rev = 0; + mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt[af].target, list) { if (strcmp(t->name, name) == 0) { if (t->revision > *bestp) @@ -358,6 +361,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) have_rev = 1; } } + mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC && !have_rev) return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); @@ -371,12 +375,10 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, { int have_rev, best = -1; - mutex_lock(&xt[af].mutex); if (target == 1) have_rev = target_revfn(af, name, revision, &best); else have_rev = match_revfn(af, name, revision, &best); - mutex_unlock(&xt[af].mutex); /* Nothing at all? Return 0 to try loading module. */ if (best == -1) { @@ -731,7 +733,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, { const struct xt_match *match = m->u.kernel.match; struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; - int pad, off = xt_compat_match_offset(match); + int off = xt_compat_match_offset(match); u_int16_t msize = cm->u.user.match_size; char name[sizeof(m->u.user.name)]; @@ -741,9 +743,6 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, match->compat_from_user(m->data, cm->data); else memcpy(m->data, cm->data, msize - sizeof(*cm)); - pad = XT_ALIGN(match->matchsize) - match->matchsize; - if (pad > 0) - memset(m->data + match->matchsize, 0, pad); msize += off; m->u.user.match_size = msize; @@ -1114,7 +1113,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, { const struct xt_target *target = t->u.kernel.target; struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; - int pad, off = xt_compat_target_offset(target); + int off = xt_compat_target_offset(target); u_int16_t tsize = ct->u.user.target_size; char name[sizeof(t->u.user.name)]; @@ -1124,9 +1123,6 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, target->compat_from_user(t->data, ct->data); else memcpy(t->data, ct->data, tsize - sizeof(*ct)); - pad = XT_ALIGN(target->targetsize) - target->targetsize; - if (pad > 0) - memset(t->data + target->targetsize, 0, pad); tsize += off; t->u.user.target_size = tsize; @@ -1387,7 +1383,7 @@ xt_replace_table(struct xt_table *table, table->private = newinfo; /* make sure all cpus see new ->private value */ - smp_wmb(); + smp_mb(); /* * Even though table entries have now been swapped, other CPU's diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 2236455b10a3..182c1285b4ad 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -115,6 +115,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) } cfg; int ret; + if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name)) + return -ENAMETOOLONG; + net_get_random_once(&jhash_rnd, sizeof(jhash_rnd)); mutex_lock(&xn->hash_lock); diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 6c2582a19766..3469b6073610 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -152,7 +152,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e) /* * Drop entries with timestamps older then 'time'. */ -static void recent_entry_reap(struct recent_table *t, unsigned long time) +static void recent_entry_reap(struct recent_table *t, unsigned long time, + struct recent_entry *working, bool update) { struct recent_entry *e; @@ -161,6 +162,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time) */ e = list_entry(t->lru_list.next, struct recent_entry, lru_list); + /* + * Do not reap the entry which are going to be updated. + */ + if (e == working && update) + return; + /* * The last time stamp is the most recent. */ @@ -303,7 +310,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par) /* info->seconds must be non-zero */ if (info->check_set & XT_RECENT_REAP) - recent_entry_reap(t, time); + recent_entry_reap(t, time, e, + info->check_set & XT_RECENT_UPDATE && ret); } if (info->check_set & XT_RECENT_SET || diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 0f16080b87cb..4cb43a2c07d1 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -575,6 +575,7 @@ list_start: break; } + cipso_v4_doi_putdef(doi_def); rcu_read_unlock(); genlmsg_end(ans_skb, data); @@ -583,12 +584,14 @@ list_start: list_retry: /* XXX - this limit is a guesstimate */ if (nlsze_mult < 4) { + cipso_v4_doi_putdef(doi_def); rcu_read_unlock(); kfree_skb(ans_skb); nlsze_mult *= 2; goto list_start; } list_failure_lock: + cipso_v4_doi_putdef(doi_def); rcu_read_unlock(); list_failure: kfree_skb(ans_skb); diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index f5d34da0646e..12aa803b2f68 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -85,6 +85,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@ -536,6 +537,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL; @@ -579,6 +582,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */ if (entry == NULL) return -ENOENT; @@ -596,6 +605,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); + if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, @@ -605,40 +617,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, audit_log_end(audit_buf); } - if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } -#if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } -#endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; -#if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; -#endif /* IPv6 */ + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); +#if IS_ENABLED(CONFIG_IPV6) + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } +#endif /* IPv6 */ + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; +#if IS_ENABLED(CONFIG_IPV6) + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; +#endif /* IPv6 */ } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry); return ret_val; } diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 409a3ae47ce2..5e1239cef000 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -734,6 +734,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap, if ((off & (BITS_PER_LONG - 1)) != 0) return -EINVAL; + /* a null catmap is equivalent to an empty one */ + if (!catmap) { + *offset = (u32)-1; + return 0; + } + if (off < catmap->startbit) { off = catmap->startbit; *offset = off; diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index d2e4ab8d1cb1..7b62cdea6163 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1165,12 +1165,13 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, struct netlbl_unlhsh_walk_arg cb_arg; u32 skip_bkt = cb->args[0]; u32 skip_chain = cb->args[1]; - u32 iter_bkt; - u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; + u32 skip_addr4 = cb->args[2]; + u32 iter_bkt, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; struct netlbl_unlhsh_iface *iface; struct list_head *iter_list; struct netlbl_af4list *addr4; #if IS_ENABLED(CONFIG_IPV6) + u32 skip_addr6 = cb->args[3]; struct netlbl_af6list *addr6; #endif @@ -1181,7 +1182,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, rcu_read_lock(); for (iter_bkt = skip_bkt; iter_bkt < rcu_dereference(netlbl_unlhsh)->size; - iter_bkt++, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0) { + iter_bkt++) { iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt]; list_for_each_entry_rcu(iface, iter_list, list) { if (!iface->valid || @@ -1189,7 +1190,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, continue; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < cb->args[2]) + if (iter_addr4++ < skip_addr4) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1202,10 +1203,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, goto unlabel_staticlist_return; } } + iter_addr4 = 0; + skip_addr4 = 0; #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < cb->args[3]) + if (iter_addr6++ < skip_addr6) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1218,8 +1221,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, goto unlabel_staticlist_return; } } + iter_addr6 = 0; + skip_addr6 = 0; #endif /* IPv6 */ } + iter_chain = 0; + skip_chain = 0; } unlabel_staticlist_return: diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 474723bde1cc..87a0efe0c70c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -60,6 +60,7 @@ #include <linux/genetlink.h> #include <linux/net_namespace.h> #include <linux/nospec.h> +#include <linux/btf_ids.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@ -2604,7 +2605,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) return __netlink_seq_next(seq); } -static void netlink_seq_stop(struct seq_file *seq, void *v) +static void netlink_native_seq_stop(struct seq_file *seq, void *v) { struct nl_seq_iter *iter = seq->private; @@ -2615,7 +2616,7 @@ static void netlink_seq_stop(struct seq_file *seq, void *v) } -static int netlink_seq_show(struct seq_file *seq, void *v) +static int netlink_native_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -2642,6 +2643,68 @@ static int netlink_seq_show(struct seq_file *seq, void *v) return 0; } +#ifdef CONFIG_BPF_SYSCALL +struct bpf_iter__netlink { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct netlink_sock *, sk); +}; + +DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk) + +static int netlink_prog_seq_show(struct bpf_prog *prog, + struct bpf_iter_meta *meta, + void *v) +{ + struct bpf_iter__netlink ctx; + + meta->seq_num--; /* skip SEQ_START_TOKEN */ + ctx.meta = meta; + ctx.sk = nlk_sk((struct sock *)v); + return bpf_iter_run_prog(prog, &ctx); +} + +static int netlink_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + if (!prog) + return netlink_native_seq_show(seq, v); + + if (v != SEQ_START_TOKEN) + return netlink_prog_seq_show(prog, &meta, v); + + return 0; +} + +static void netlink_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + if (!v) { + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog) + (void)netlink_prog_seq_show(prog, &meta, v); + } + + netlink_native_seq_stop(seq, v); +} +#else +static int netlink_seq_show(struct seq_file *seq, void *v) +{ + return netlink_native_seq_show(seq, v); +} + +static void netlink_seq_stop(struct seq_file *seq, void *v) +{ + netlink_native_seq_stop(seq, v); +} +#endif + static const struct seq_operations netlink_seq_ops = { .start = netlink_seq_start, .next = netlink_seq_next, @@ -2748,6 +2811,34 @@ static const struct rhashtable_params netlink_rhashtable_params = { .automatic_shrinking = true, }; +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +BTF_ID_LIST(btf_netlink_sock_id) +BTF_ID(struct, netlink_sock) + +static const struct bpf_iter_seq_info netlink_seq_info = { + .seq_ops = &netlink_seq_ops, + .init_seq_private = bpf_iter_init_seq_net, + .fini_seq_private = bpf_iter_fini_seq_net, + .seq_priv_size = sizeof(struct nl_seq_iter), +}; + +static struct bpf_iter_reg netlink_reg_info = { + .target = "netlink", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__netlink, sk), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &netlink_seq_info, +}; + +static int __init bpf_iter_register(void) +{ + netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id; + return bpf_iter_reg_target(&netlink_reg_info); +} +#endif + static int __init netlink_proto_init(void) { int i; @@ -2756,6 +2847,12 @@ static int __init netlink_proto_init(void) if (err != 0) goto out; +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + err = bpf_iter_register(); + if (err) + goto out; +#endif + BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb)); nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index efccd1ac9a66..102b8d6b5612 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -989,60 +989,11 @@ static struct genl_family genl_ctrl __ro_after_init = { .netnsok = true, }; -static int genl_bind(struct net *net, int group) -{ - struct genl_family *f; - int err = -ENOENT; - unsigned int id; - - down_read(&cb_lock); - - idr_for_each_entry(&genl_fam_idr, f, id) { - if (group >= f->mcgrp_offset && - group < f->mcgrp_offset + f->n_mcgrps) { - int fam_grp = group - f->mcgrp_offset; - - if (!f->netnsok && net != &init_net) - err = -ENOENT; - else if (f->mcast_bind) - err = f->mcast_bind(net, fam_grp); - else - err = 0; - break; - } - } - up_read(&cb_lock); - - return err; -} - -static void genl_unbind(struct net *net, int group) -{ - struct genl_family *f; - unsigned int id; - - down_read(&cb_lock); - - idr_for_each_entry(&genl_fam_idr, f, id) { - if (group >= f->mcgrp_offset && - group < f->mcgrp_offset + f->n_mcgrps) { - int fam_grp = group - f->mcgrp_offset; - - if (f->mcast_unbind) - f->mcast_unbind(net, fam_grp); - break; - } - } - up_read(&cb_lock); -} - static int __net_init genl_pernet_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = genl_rcv, .flags = NL_CFG_F_NONROOT_RECV, - .bind = genl_bind, - .unbind = genl_unbind, }; /* we'll bump the group number right afterwards */ diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index d41335bad1f8..89cd9de21594 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -208,6 +208,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, /* refcount initialized at 1 */ spin_unlock_bh(&nr_node_list_lock); + nr_neigh_put(nr_neigh); return 0; } nr_node_lock(nr_node); diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c index 65aaa9d7c813..bcd4d74e8a82 100644 --- a/net/nfc/digital_dep.c +++ b/net/nfc/digital_dep.c @@ -1276,6 +1276,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, } rc = nfc_tm_data_received(ddev->nfc_dev, resp); + if (rc) + resp = NULL; exit: kfree_skb(ddev->chaining_skb); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 28604414dec1..0d4246af6c02 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -108,11 +108,15 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) llcp_sock->service_name_len, GFP_KERNEL); if (!llcp_sock->service_name) { + nfc_llcp_local_put(llcp_sock->local); + llcp_sock->local = NULL; ret = -ENOMEM; goto put_dev; } llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); if (llcp_sock->ssap == LLCP_SAP_MAX) { + nfc_llcp_local_put(llcp_sock->local); + llcp_sock->local = NULL; kfree(llcp_sock->service_name); llcp_sock->service_name = NULL; ret = -EADDRINUSE; @@ -671,6 +675,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, ret = -EISCONN; goto error; } + if (sk->sk_state == LLCP_CONNECTING) { + ret = -EINPROGRESS; + goto error; + } dev = nfc_get_device(addr->dev_idx); if (dev == NULL) { @@ -702,6 +710,8 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, llcp_sock->local = nfc_llcp_local_get(local); llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { + nfc_llcp_local_put(llcp_sock->local); + llcp_sock->local = NULL; ret = -ENOMEM; goto put_dev; } @@ -743,9 +753,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, sock_unlink: nfc_llcp_sock_unlink(&local->connecting_sockets, sk); + kfree(llcp_sock->service_name); + llcp_sock->service_name = NULL; sock_llcp_release: nfc_llcp_put_ssap(local, llcp_sock->ssap); + nfc_llcp_local_put(llcp_sock->local); + llcp_sock->local = NULL; put_dev: nfc_put_device(dev); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 1b261375722e..99b06a16b808 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -860,6 +860,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) if (!dev->polling) { device_unlock(&dev->dev); + nfc_put_device(dev); return -EINVAL; } @@ -1225,7 +1226,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info) u32 idx; char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index ba5ffd3badd3..23d5e56306a4 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -105,7 +105,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, if (addr->target_idx > dev->target_next_idx - 1 || addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; - goto error; + goto put_dev; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); @@ -332,10 +332,13 @@ static int rawsock_create(struct net *net, struct socket *sock, if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; - if (sock->type == SOCK_RAW) + if (sock->type == SOCK_RAW) { + if (!capable(CAP_NET_RAW)) + return -EPERM; sock->ops = &rawsock_raw_ops; - else + } else { sock->ops = &rawsock_ops; + } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 99352f09deaa..5c68f9ea9881 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -196,6 +196,9 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, __be32 lse; int err; + if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) + return -ENOMEM; + stack = mpls_hdr(skb); lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask); err = skb_mpls_update_lse(skb, lse); @@ -828,17 +831,17 @@ static void ovs_fragment(struct net *net, struct vport *vport, } if (key->eth.type == htons(ETH_P_IP)) { - struct dst_entry ovs_dst; + struct rtable ovs_rt = { 0 }; unsigned long orig_dst; prepare_frag(vport, skb, orig_network_offset, ovs_key_mac_proto(key)); - dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1, + dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1, DST_OBSOLETE_NONE, DST_NOCOUNT); - ovs_dst.dev = vport->dev; + ovs_rt.dst.dev = vport->dev; orig_dst = skb->_skb_refdst; - skb_dst_set_noref(skb, &ovs_dst); + skb_dst_set_noref(skb, &ovs_rt.dst); IPCB(skb)->frag_max_size = mru; ip_do_fragment(net, skb->sk, skb, ovs_vport_output); @@ -1146,9 +1149,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr, bool last) { + struct ovs_skb_cb *ovs_cb = OVS_CB(skb); const struct nlattr *actions, *cpl_arg; + int len, max_len, rem = nla_len(attr); const struct check_pkt_len_arg *arg; - int rem = nla_len(attr); bool clone_flow_key; /* The first netlink attribute in 'attr' is always @@ -1157,7 +1161,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb, cpl_arg = nla_data(attr); arg = nla_data(cpl_arg); - if (skb->len <= arg->pkt_len) { + len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len; + max_len = arg->pkt_len; + + if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) || + len <= max_len) { /* Second netlink attribute in 'attr' is always * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'. */ diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 283e8f9a5fd2..b6f98eba71f1 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -276,10 +276,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) ovs_ct_update_key(skb, NULL, key, false, false); } -#define IN6_ADDR_INITIALIZER(ADDR) \ - { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \ - (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] } - int ovs_ct_put_key(const struct sw_flow_key *swkey, const struct sw_flow_key *output, struct sk_buff *skb) { @@ -301,24 +297,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, if (swkey->ct_orig_proto) { if (swkey->eth.type == htons(ETH_P_IP)) { - struct ovs_key_ct_tuple_ipv4 orig = { - output->ipv4.ct_orig.src, - output->ipv4.ct_orig.dst, - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv4 orig; + + memset(&orig, 0, sizeof(orig)); + orig.ipv4_src = output->ipv4.ct_orig.src; + orig.ipv4_dst = output->ipv4.ct_orig.dst; + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv4_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, sizeof(orig), &orig)) return -EMSGSIZE; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - struct ovs_key_ct_tuple_ipv6 orig = { - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src), - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv6 orig; + + memset(&orig, 0, sizeof(orig)); + memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32, + sizeof(orig.ipv6_src)); + memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32, + sizeof(orig.ipv6_dst)); + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv6_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, sizeof(orig), &orig)) return -EMSGSIZE; @@ -903,15 +905,19 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, } err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); - if (err == NF_ACCEPT && - ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { - if (maniptype == NF_NAT_MANIP_SRC) - maniptype = NF_NAT_MANIP_DST; - else - maniptype = NF_NAT_MANIP_SRC; + if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { + if (ct->status & IPS_SRC_NAT) { + if (maniptype == NF_NAT_MANIP_SRC) + maniptype = NF_NAT_MANIP_DST; + else + maniptype = NF_NAT_MANIP_SRC; - err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, - maniptype); + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, + maniptype); + } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { + err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, + NF_NAT_MANIP_SRC); + } } /* Mark NAT done if successful and update the flow key. */ @@ -1890,7 +1896,8 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net) struct hlist_head *head = &info->limits[i]; struct ovs_ct_limit *ct_limit; - hlist_for_each_entry_rcu(ct_limit, head, hlist_node) + hlist_for_each_entry_rcu(ct_limit, head, hlist_node, + lockdep_ovsl_is_held()) kfree_rcu(ct_limit, rcu); } kfree(ovs_net->ct_limit_info->limits); @@ -2012,16 +2019,12 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit, static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info, struct sk_buff *reply) { - struct ovs_zone_limit zone_limit; - int err; + struct ovs_zone_limit zone_limit = { + .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE, + .limit = info->default_limit, + }; - zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE; - zone_limit.limit = info->default_limit; - err = nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit); - if (err) - return err; - - return 0; + return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit); } static int __ovs_ct_limit_get_zone_limit(struct net *net, diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 3eed90bfa2bf..4f097bd3339e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2430,8 +2430,10 @@ static void __net_exit ovs_exit_net(struct net *dnet) struct net *net; LIST_HEAD(head); - ovs_ct_exit(dnet); ovs_lock(); + + ovs_ct_exit(dnet); + list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) __dp_destroy(dp); diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index 3323b79ff548..541eea74ef7a 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -251,8 +251,8 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) * * Start with a full bucket. */ - band->bucket = (band->burst_size + band->rate) * 1000; - band_max_delta_t = band->bucket / band->rate; + band->bucket = (band->burst_size + band->rate) * 1000ULL; + band_max_delta_t = div_u64(band->bucket, band->rate); if (band_max_delta_t > meter->max_delta_t) meter->max_delta_t = band_max_delta_t; band++; diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h index f645913870bd..2e3fd6f1d7eb 100644 --- a/net/openvswitch/meter.h +++ b/net/openvswitch/meter.h @@ -23,7 +23,7 @@ struct dp_meter_band { u32 type; u32 rate; u32 burst_size; - u32 bucket; /* 1/1000 packets, or in bits */ + u64 bucket; /* 1/1000 packets, or in bits */ struct ovs_flow_stats stats; }; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 56084a16d0f9..9165e004878b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) + __releases(&pkc->blk_fill_in_prog_lock) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); @@ -988,6 +989,7 @@ static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) + __acquires(&pkc->blk_fill_in_prog_lock) { struct tpacket3_hdr *ppd; @@ -1726,7 +1728,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) err = -ENOSPC; if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); - po->fanout = match; + + /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ + WRITE_ONCE(po->fanout, match); + po->rollover = rollover; rollover = NULL; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); @@ -2251,8 +2256,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, copy_skb = skb_get(skb); skb_head = skb->data; } - if (copy_skb) + if (copy_skb) { + memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0, + sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); skb_set_owner_r(copy_skb, sk); + } } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { @@ -2290,8 +2298,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (do_vnet && virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) + vio_le(), true, 0)) { + if (po->tp_version == TPACKET_V3) + prb_clear_blk_fill_status(&po->rx_ring); goto drop_n_account; + } if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); @@ -2397,7 +2408,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, __clear_bit(slot_id, po->rx_ring.rx_owner_map); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); - } else { + } else if (po->tp_version == TPACKET_V3) { prb_clear_blk_fill_status(&po->rx_ring); } @@ -2651,7 +2662,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) } if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); - proto = po->num; + proto = READ_ONCE(po->num); } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) @@ -2779,8 +2790,9 @@ tpacket_error: status = TP_STATUS_SEND_REQUEST; err = po->xmit(skb); - if (unlikely(err > 0)) { - err = net_xmit_errno(err); + if (unlikely(err != 0)) { + if (err > 0) + err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ @@ -2864,7 +2876,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); - proto = po->num; + proto = READ_ONCE(po->num); } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) @@ -2981,8 +2993,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->no_fcs = 1; err = po->xmit(skb); - if (err > 0 && (err = net_xmit_errno(err)) != 0) - goto out_unlock; + if (unlikely(err != 0)) { + if (err > 0) + err = net_xmit_errno(err); + if (err) + goto out_unlock; + } dev_put(dev); @@ -3136,7 +3152,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, /* prevents packet_notifier() from calling * register_prot_hook() */ - po->num = 0; + WRITE_ONCE(po->num, 0); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; @@ -3146,17 +3162,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, } BUG_ON(po->running); - po->num = proto; + WRITE_ONCE(po->num, proto); po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; - po->ifindex = -1; + WRITE_ONCE(po->ifindex, -1); packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; - po->ifindex = dev ? dev->ifindex : 0; + WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); packet_cached_dev_assign(po, dev); } } @@ -3395,6 +3411,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { + const size_t max_len = min(sizeof(skb->cb), + sizeof(struct sockaddr_storage)); int copy_len; /* If the address length field is there to be filled @@ -3417,6 +3435,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, msg->msg_namelen = sizeof(struct sockaddr_ll); } } + if (WARN_ON_ONCE(copy_len > max_len)) { + copy_len = max_len; + msg->msg_namelen = copy_len; + } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); } @@ -3470,7 +3492,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, uaddr->sa_family = AF_PACKET; memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); - dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); + dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); if (dev) strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); @@ -3485,16 +3507,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); + int ifindex; if (peer) return -EOPNOTSUPP; + ifindex = READ_ONCE(po->ifindex); sll->sll_family = AF_PACKET; - sll->sll_ifindex = po->ifindex; - sll->sll_protocol = po->num; + sll->sll_ifindex = ifindex; + sll->sll_protocol = READ_ONCE(po->num); sll->sll_pkttype = 0; rcu_read_lock(); - dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); + dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; @@ -3867,7 +3891,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv } case PACKET_FANOUT_DATA: { - if (!po->fanout) + /* Paired with the WRITE_ONCE() in fanout_add() */ + if (!READ_ONCE(po->fanout)) return -EINVAL; return fanout_set_data(po, optval, optlen); @@ -4094,7 +4119,7 @@ static int packet_notifier(struct notifier_block *this, } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); - po->ifindex = -1; + WRITE_ONCE(po->ifindex, -1); if (po->prot_hook.dev) dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; @@ -4400,7 +4425,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, was_running = po->running; num = po->num; if (was_running) { - po->num = 0; + WRITE_ONCE(po->num, 0); __unregister_prot_hook(sk, false); } spin_unlock(&po->bind_lock); @@ -4435,7 +4460,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, spin_lock(&po->bind_lock); if (was_running) { - po->num = num; + WRITE_ONCE(po->num, num); register_prot_hook(sk); } spin_unlock(&po->bind_lock); @@ -4446,9 +4471,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } out_free_pg_vec: - bitmap_free(rx_owner_map); - if (pg_vec) + if (pg_vec) { + bitmap_free(rx_owner_map); free_pg_vec(pg_vec, order, req->tp_block_nr); + } out: return err; } @@ -4608,8 +4634,8 @@ static int packet_seq_show(struct seq_file *seq, void *v) s, refcount_read(&s->sk_refcnt), s->sk_type, - ntohs(po->num), - po->ifindex, + ntohs(READ_ONCE(po->num)), + READ_ONCE(po->ifindex), po->running, atomic_read(&s->sk_rmem_alloc), from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 3d24d45be5f4..46273a838361 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -178,7 +178,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, { struct qrtr_hdr_v1 *hdr; size_t len = skb->len; - int rc = -ENODEV; + int rc; hdr = skb_push(skb, sizeof(*hdr)); hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); @@ -187,7 +187,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, hdr->src_port_id = cpu_to_le32(from->sq_port); if (to->sq_port == QRTR_PORT_CTRL) { hdr->dst_node_id = cpu_to_le32(node->nid); - hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST); + hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); } else { hdr->dst_node_id = cpu_to_le32(to->sq_node); hdr->dst_port_id = cpu_to_le32(to->sq_port); @@ -196,15 +196,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, hdr->size = cpu_to_le32(len); hdr->confirm_rx = 0; - skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); - - mutex_lock(&node->ep_lock); - if (node->ep) - rc = node->ep->xmit(node->ep, skb); - else - kfree_skb(skb); - mutex_unlock(&node->ep_lock); + rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); + if (!rc) { + mutex_lock(&node->ep_lock); + rc = -ENODEV; + if (node->ep) + rc = node->ep->xmit(node->ep, skb); + else + kfree_skb(skb); + mutex_unlock(&node->ep_lock); + } return rc; } @@ -259,10 +261,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) unsigned int ver; size_t hdrlen; - if (len & 3) + if (len == 0 || len & 3) return -EINVAL; - skb = netdev_alloc_skb(NULL, len); + skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN); if (!skb) return -ENOMEM; @@ -273,6 +275,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) switch (ver) { case QRTR_PROTO_VER_1: + if (len < sizeof(*v1)) + goto err; v1 = data; hdrlen = sizeof(*v1); @@ -286,6 +290,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) size = le32_to_cpu(v1->size); break; case QRTR_PROTO_VER_2: + if (len < sizeof(*v2)) + goto err; v2 = data; hdrlen = sizeof(*v2) + v2->optlen; @@ -543,23 +549,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc) */ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) { + u32 min_port; int rc; mutex_lock(&qrtr_port_lock); if (!*port) { - rc = idr_alloc(&qrtr_ports, ipc, - QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1, - GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = QRTR_MIN_EPH_SOCKET; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC); + if (!rc) + *port = min_port; } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { - rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); + min_port = 0; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC); } else { - rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = *port; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC); + if (!rc) + *port = min_port; } mutex_unlock(&qrtr_port_lock); @@ -711,7 +719,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, } mutex_unlock(&qrtr_node_lock); - qrtr_local_enqueue(node, skb, type, from, to); + qrtr_local_enqueue(NULL, skb, type, from, to); return 0; } @@ -763,27 +771,30 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) node = NULL; if (addr->sq_node == QRTR_NODE_BCAST) { - enqueue_fn = qrtr_bcast_enqueue; - if (addr->sq_port != QRTR_PORT_CTRL) { + if (addr->sq_port != QRTR_PORT_CTRL && + qrtr_local_nid != QRTR_NODE_BCAST) { release_sock(sk); return -ENOTCONN; } + enqueue_fn = qrtr_bcast_enqueue; } else if (addr->sq_node == ipc->us.sq_node) { enqueue_fn = qrtr_local_enqueue; } else { - enqueue_fn = qrtr_node_enqueue; node = qrtr_node_lookup(addr->sq_node); if (!node) { release_sock(sk); return -ECONNRESET; } + enqueue_fn = qrtr_node_enqueue; } plen = (len + 3) & ~3; skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE, msg->msg_flags & MSG_DONTWAIT, &rc); - if (!skb) + if (!skb) { + rc = -ENOMEM; goto out_node; + } skb_reserve(skb, QRTR_HDR_MAX_SIZE); @@ -851,6 +862,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, rc = copied; if (addr) { + /* There is an anonymous 2-byte hole after sq_family, + * make sure to clear it. + */ + memset(addr, 0, sizeof(*addr)); + cb = (struct qrtr_cb *)skb->cb; addr->sq_family = AF_QIPCRTR; addr->sq_node = cb->src_node; @@ -999,6 +1015,7 @@ static int qrtr_release(struct socket *sock) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); + sock_orphan(sk); sock->sk = NULL; if (!sock_flag(sk, SOCK_ZAPPED)) diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c index e35869e81766..cb425e216d46 100644 --- a/net/qrtr/tun.c +++ b/net/qrtr/tun.c @@ -31,6 +31,7 @@ static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb) static int qrtr_tun_open(struct inode *inode, struct file *filp) { struct qrtr_tun *tun; + int ret; tun = kzalloc(sizeof(*tun), GFP_KERNEL); if (!tun) @@ -43,7 +44,16 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp) filp->private_data = tun; - return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); + ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); + if (ret) + goto out; + + return 0; + +out: + filp->private_data = NULL; + kfree(tun); + return ret; } static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to) @@ -80,6 +90,12 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from) ssize_t ret; void *kbuf; + if (!len) + return -EINVAL; + + if (len > KMALLOC_MAX_SIZE) + return -ENOMEM; + kbuf = kzalloc(len, GFP_KERNEL); if (!kbuf) return -ENOMEM; diff --git a/net/rds/message.c b/net/rds/message.c index 50f13f1d4ae0..92b6b22884d4 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -308,26 +308,20 @@ out: /* * RDS ops use this to grab SG entries from the rm's sg pool. */ -struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents, - int *ret) +struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) { struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; struct scatterlist *sg_ret; - if (WARN_ON(!ret)) - return NULL; - if (nents <= 0) { pr_warn("rds: alloc sgs failed! nents <= 0\n"); - *ret = -EINVAL; - return NULL; + return ERR_PTR(-EINVAL); } if (rm->m_used_sgs + nents > rm->m_total_sgs) { pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n", rm->m_total_sgs, rm->m_used_sgs, nents); - *ret = -ENOMEM; - return NULL; + return ERR_PTR(-ENOMEM); } sg_ret = &sg_first[rm->m_used_sgs]; @@ -343,7 +337,6 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in unsigned int i; int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE); int extra_bytes = num_sgs * sizeof(struct scatterlist); - int ret; rm = rds_message_alloc(extra_bytes, GFP_NOWAIT); if (!rm) @@ -352,10 +345,11 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE); - rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); - if (!rm->data.op_sg) { + rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); + if (IS_ERR(rm->data.op_sg)) { + void *err = ERR_CAST(rm->data.op_sg); rds_message_put(rm); - return ERR_PTR(ret); + return err; } for (i = 0; i < rm->data.op_nents; ++i) { diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 916f5ec373d8..1c42a600fe7f 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -532,6 +532,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args, if (args->nr_local == 0) return -EINVAL; + if (args->nr_local > UIO_MAXIOV) + return -EMSGSIZE; + iov->iov = kcalloc(args->nr_local, sizeof(struct rds_iovec), GFP_KERNEL); @@ -624,9 +627,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, op->op_active = 1; op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); - op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret); - if (!op->op_sg) + op->op_sg = rds_message_alloc_sgs(rm, nr_pages); + if (IS_ERR(op->op_sg)) { + ret = PTR_ERR(op->op_sg); goto out_pages; + } if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because @@ -828,9 +833,11 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; - rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret); - if (!rm->atomic.op_sg) + rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); + if (IS_ERR(rm->atomic.op_sg)) { + ret = PTR_ERR(rm->atomic.op_sg); goto err; + } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { diff --git a/net/rds/rds.h b/net/rds/rds.h index 53e86911773a..2ac5b5e55901 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -849,8 +849,7 @@ rds_conn_connecting(struct rds_connection *conn) /* message.c */ struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); -struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents, - int *ret); +struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, bool zcopy); struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); diff --git a/net/rds/recv.c b/net/rds/recv.c index c8404971d5ab..aba4afe4dfed 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ + struct rds_rdma_notify cmsg; unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); int err = 0; + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ /* put_cmsg copies to user space and thus may sleep. We can't do this * with rs_lock held, so first grab as many notifications as we can stuff diff --git a/net/rds/send.c b/net/rds/send.c index 82dcd8b84fe7..68e2bdb08fd0 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1274,9 +1274,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) /* Attach data to the rm */ if (payload_len) { - rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); - if (!rm->data.op_sg) + rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); + if (IS_ERR(rm->data.op_sg)) { + ret = PTR_ERR(rm->data.op_sg); goto out; + } ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy); if (ret) goto out; diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 6c089320ae4f..5bba7c36ac74 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -876,6 +876,9 @@ static int rfkill_resume(struct device *dev) rfkill->suspended = false; + if (!rfkill->registered) + return 0; + if (!rfkill->persistent) { cur = !!(rfkill->state & RFKILL_BLOCK_SW); rfkill_set_block(rfkill, cur); diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 7b094275ea8b..11c45c8c6c16 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c @@ -96,10 +96,19 @@ static void rose_loopback_timer(struct timer_list *unused) } if (frametype == ROSE_CALL_REQUEST) { - if ((dev = rose_dev_get(dest)) != NULL) { - if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) - kfree_skb(skb); - } else { + if (!rose_loopback_neigh->dev) { + kfree_skb(skb); + continue; + } + + dev = rose_dev_get(dest); + if (!dev) { + kfree_skb(skb); + continue; + } + + if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) { + dev_put(dev); kfree_skb(skb); } } else { diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 6ffb7e9887ce..ddd0f95713a9 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -25,6 +25,7 @@ rxrpc-y := \ peer_event.o \ peer_object.o \ recvmsg.o \ + rtt.o \ security.o \ sendmsg.o \ skbuff.o \ diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index a293238fe1e7..9bacec6653ba 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -285,7 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, gfp_t gfp, rxrpc_notify_rx_t notify_rx, bool upgrade, - bool intr, + enum rxrpc_interruptibility interruptibility, unsigned int debug_id) { struct rxrpc_conn_parameters cp; @@ -310,7 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, memset(&p, 0, sizeof(p)); p.user_call_ID = user_call_ID; p.tx_total_len = tx_total_len; - p.intr = intr; + p.interruptibility = interruptibility; memset(&cp, 0, sizeof(cp)); cp.local = rx->local; @@ -976,7 +976,7 @@ static int __init af_rxrpc_init(void) goto error_security; } - ret = register_pernet_subsys(&rxrpc_net_ops); + ret = register_pernet_device(&rxrpc_net_ops); if (ret) goto error_pernet; @@ -1021,7 +1021,7 @@ error_key_type: error_sock: proto_unregister(&rxrpc_proto); error_proto: - unregister_pernet_subsys(&rxrpc_net_ops); + unregister_pernet_device(&rxrpc_net_ops); error_pernet: rxrpc_exit_security(); error_security: @@ -1043,7 +1043,7 @@ static void __exit af_rxrpc_exit(void) unregister_key_type(&key_type_rxrpc); sock_unregister(PF_RXRPC); proto_unregister(&rxrpc_proto); - unregister_pernet_subsys(&rxrpc_net_ops); + unregister_pernet_device(&rxrpc_net_ops); ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 394d18857979..9fe264bec70c 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -7,6 +7,7 @@ #include <linux/atomic.h> #include <linux/seqlock.h> +#include <linux/win_minmax.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> @@ -311,11 +312,14 @@ struct rxrpc_peer { #define RXRPC_RTT_CACHE_SIZE 32 spinlock_t rtt_input_lock; /* RTT lock for input routine */ ktime_t rtt_last_req; /* Time of last RTT request */ - u64 rtt; /* Current RTT estimate (in nS) */ - u64 rtt_sum; /* Sum of cache contents */ - u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */ - u8 rtt_cursor; /* next entry at which to insert */ - u8 rtt_usage; /* amount of cache actually used */ + unsigned int rtt_count; /* Number of samples we've got */ + + u32 srtt_us; /* smoothed round trip time << 3 in usecs */ + u32 mdev_us; /* medium deviation */ + u32 mdev_max_us; /* maximal mdev for the last rtt period */ + u32 rttvar_us; /* smoothed mdev_max */ + u32 rto_j; /* Retransmission timeout in jiffies */ + u8 backoff; /* Backoff timeout */ u8 cong_cwnd; /* Congestion window size */ }; @@ -489,7 +493,6 @@ enum rxrpc_call_flag { RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */ - RXRPC_CALL_IS_INTR, /* The call is interruptible */ RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */ }; @@ -598,6 +601,7 @@ struct rxrpc_call { atomic_t usage; u16 service_id; /* service ID */ u8 security_ix; /* Security type */ + enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ u32 call_id; /* call ID on connection */ u32 cid; /* connection ID plus channel index */ int debug_id; /* debug ID for printks */ @@ -720,7 +724,7 @@ struct rxrpc_call_params { u32 normal; /* Max time since last call packet (msec) */ } timeouts; u8 nr_timeouts; /* Number of timeouts specified */ - bool intr; /* The call is interruptible */ + enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */ }; struct rxrpc_send_params { @@ -1041,7 +1045,6 @@ extern unsigned long rxrpc_idle_ack_delay; extern unsigned int rxrpc_rx_window_size; extern unsigned int rxrpc_rx_mtu; extern unsigned int rxrpc_rx_jumbo_max; -extern unsigned long rxrpc_resend_timeout; extern const s8 rxrpc_ack_priority[]; @@ -1069,8 +1072,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *); * peer_event.c */ void rxrpc_error_report(struct sock *); -void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, - rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); void rxrpc_peer_keepalive_worker(struct work_struct *); /* @@ -1102,6 +1103,14 @@ extern const struct seq_operations rxrpc_peer_seq_ops; void rxrpc_notify_socket(struct rxrpc_call *); int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); +/* + * rtt.c + */ +void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, + rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); +unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); +void rxrpc_peer_init_rtt(struct rxrpc_peer *); + /* * rxkad.c */ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 70e44abf106c..55fb3744552d 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -22,6 +22,11 @@ #include <net/ip.h> #include "ar-internal.h" +static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, + unsigned long user_call_ID) +{ +} + /* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. @@ -202,6 +207,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) tail = b->peer_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_peer *peer = b->peer_backlog[tail]; + rxrpc_put_local(peer->local); kfree(peer); tail = (tail + 1) & (size - 1); } @@ -228,6 +234,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); + if (call->notify_rx) + call->notify_rx = rxrpc_dummy_notify; rxrpc_put_call(call, rxrpc_call_put_kernel); } rxrpc_call_completed(call); @@ -248,7 +256,7 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) struct rxrpc_skb_priv *sp = rxrpc_skb(skb); ktime_t now = skb->tstamp; - if (call->peer->rtt_usage < 3 || + if (call->peer->rtt_count < 3 || ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, true, true, diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index cedbbb3a7c2e..9ff85ee8337c 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -111,8 +111,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, } else { unsigned long now = jiffies, ack_at; - if (call->peer->rtt_usage > 0) - ack_at = nsecs_to_jiffies(call->peer->rtt); + if (call->peer->srtt_us != 0) + ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3); else ack_at = expiry; @@ -157,24 +157,18 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) { struct sk_buff *skb; - unsigned long resend_at; + unsigned long resend_at, rto_j; rxrpc_seq_t cursor, seq, top; - ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo; + ktime_t now, max_age, oldest, ack_ts; int ix; u8 annotation, anno_type, retrans = 0, unacked = 0; _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); - if (call->peer->rtt_usage > 1) - timeout = ns_to_ktime(call->peer->rtt * 3 / 2); - else - timeout = ms_to_ktime(rxrpc_resend_timeout); - min_timeo = ns_to_ktime((1000000000 / HZ) * 4); - if (ktime_before(timeout, min_timeo)) - timeout = min_timeo; + rto_j = call->peer->rto_j; now = ktime_get_real(); - max_age = ktime_sub(now, timeout); + max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); spin_lock_bh(&call->lock); @@ -219,7 +213,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) } resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); - resend_at += jiffies + rxrpc_resend_timeout; + resend_at += jiffies + rto_j; WRITE_ONCE(call->resend_at, resend_at); if (unacked) @@ -234,7 +228,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) rxrpc_timer_set_for_resend); spin_unlock_bh(&call->lock); ack_ts = ktime_sub(now, call->acks_latest_ts); - if (ktime_to_ns(ack_ts) < call->peer->rtt) + if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3)) goto out; rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, rxrpc_propose_ack_ping_for_lost_ack); @@ -254,7 +248,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) if (anno_type != RXRPC_TX_ANNO_RETRANS) continue; + /* We need to reset the retransmission state, but we need to do + * so before we drop the lock as a new ACK/NAK may come in and + * confuse things + */ + annotation &= ~RXRPC_TX_ANNO_MASK; + annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT; + call->rxtx_annotations[ix] = annotation; + skb = call->rxtx_buffer[ix]; + if (!skb) + continue; + rxrpc_get_skb(skb, rxrpc_skb_got); spin_unlock_bh(&call->lock); @@ -268,24 +273,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) rxrpc_free_skb(skb, rxrpc_skb_freed); spin_lock_bh(&call->lock); - - /* We need to clear the retransmit state, but there are two - * things we need to be aware of: A new ACK/NAK might have been - * received and the packet might have been hard-ACK'd (in which - * case it will no longer be in the buffer). - */ - if (after(seq, call->tx_hard_ack)) { - annotation = call->rxtx_annotations[ix]; - anno_type = annotation & RXRPC_TX_ANNO_MASK; - if (anno_type == RXRPC_TX_ANNO_RETRANS || - anno_type == RXRPC_TX_ANNO_NAK) { - annotation &= ~RXRPC_TX_ANNO_MASK; - annotation |= RXRPC_TX_ANNO_UNACK; - } - annotation |= RXRPC_TX_ANNO_RESENT; - call->rxtx_annotations[ix] = annotation; - } - if (after(call->tx_hard_ack, seq)) seq = call->tx_hard_ack; } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index c9f34b0a11df..f8233bc76c0e 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -237,8 +237,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, return call; } - if (p->intr) - __set_bit(RXRPC_CALL_IS_INTR, &call->flags); + call->interruptibility = p->interruptibility; call->tx_total_len = p->tx_total_len; trace_rxrpc_call(call->debug_id, rxrpc_call_new_client, atomic_read(&call->usage), @@ -289,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ ret = rxrpc_connect_call(rx, call, cp, srx, gfp); if (ret < 0) - goto error; + goto error_attached_to_socket; trace_rxrpc_call(call->debug_id, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); @@ -309,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); - ret = -EEXIST; - -error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); + RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(ret)); + atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = -EEXIST"); + return ERR_PTR(-EEXIST); + + /* We got an error, but the call is attached to the socket and is in + * need of release. However, we might now race with recvmsg() when + * completing the call queues it. Return 0 from sys_sendmsg() and + * leave the error to recvmsg() to deal with. + */ +error_attached_to_socket: + trace_rxrpc_call(call->debug_id, rxrpc_call_error, + atomic_read(&call->usage), here, ERR_PTR(ret)); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); + __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + RX_CALL_DEAD, ret); + _leave(" = c=%08x [err]", call->debug_id); + return call; } /* @@ -497,8 +507,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) rxrpc_disconnect_call(call); if (call->security) call->security->free_call_crypto(call); - - rxrpc_cleanup_ring(call); _leave(""); } diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index ea7d4c21f889..f2a1a5dbb5a7 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -655,13 +655,20 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp) add_wait_queue_exclusive(&call->waitq, &myself); for (;;) { - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags)) + switch (call->interruptibility) { + case RXRPC_INTERRUPTIBLE: + case RXRPC_PREINTERRUPTIBLE: set_current_state(TASK_INTERRUPTIBLE); - else + break; + case RXRPC_UNINTERRUPTIBLE: + default: set_current_state(TASK_UNINTERRUPTIBLE); + break; + } if (call->call_id) break; - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && + if ((call->interruptibility == RXRPC_INTERRUPTIBLE || + call->interruptibility == RXRPC_PREINTERRUPTIBLE) && signal_pending(current)) { ret = -ERESTARTSYS; break; diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 06fcff2ebbba..b5864683f200 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -341,18 +341,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, return ret; spin_lock(&conn->channel_lock); - spin_lock(&conn->state_lock); + spin_lock_bh(&conn->state_lock); if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { conn->state = RXRPC_CONN_SERVICE; - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); for (loop = 0; loop < RXRPC_MAXCALLS; loop++) rxrpc_call_is_secure( rcu_dereference_protected( conn->channels[loop].call, lockdep_is_held(&conn->channel_lock))); } else { - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); } spin_unlock(&conn->channel_lock); diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 19e141eeed17..8cbe0bf20ed5 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_cwnd = call->cong_cwnd; - spin_lock_bh(&conn->params.peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&conn->params.peer->lock); + if (!hlist_unhashed(&call->error_link)) { + spin_lock_bh(&call->peer->lock); + hlist_del_rcu(&call->error_link); + spin_unlock_bh(&call->peer->lock); + } if (rxrpc_is_client_call(call)) return rxrpc_disconnect_client_call(call); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 69e09d69c896..916d1f455b21 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -91,11 +91,11 @@ static void rxrpc_congestion_management(struct rxrpc_call *call, /* We analyse the number of packets that get ACK'd per RTT * period and increase the window if we managed to fill it. */ - if (call->peer->rtt_usage == 0) + if (call->peer->rtt_count == 0) goto out; if (ktime_before(skb->tstamp, - ktime_add_ns(call->cong_tstamp, - call->peer->rtt))) + ktime_add_us(call->cong_tstamp, + call->peer->srtt_us >> 3))) goto out_no_clear_ca; change = rxrpc_cong_rtt_window_end; call->cong_tstamp = skb->tstamp; @@ -431,7 +431,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) return; } - if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { + if (state == RXRPC_CALL_SERVER_RECV_REQUEST) { unsigned long timo = READ_ONCE(call->next_req_timo); unsigned long now, expect_req_by; @@ -723,13 +723,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), rwind, ntohl(ackinfo->jumbo_max)); + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) + rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (call->tx_winsize != rwind) { - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) - rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (rwind > call->tx_winsize) wake = true; - trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, - ntohl(ackinfo->rwind), wake); + trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); call->tx_winsize = rwind; } @@ -802,6 +801,30 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, } } +/* + * Return true if the ACK is valid - ie. it doesn't appear to have regressed + * with respect to the ack state conveyed by preceding ACKs. + */ +static bool rxrpc_is_ack_valid(struct rxrpc_call *call, + rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) +{ + rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq); + + if (after(first_pkt, base)) + return true; /* The window advanced */ + + if (before(first_pkt, base)) + return false; /* firstPacket regressed */ + + if (after_eq(prev_pkt, call->ackr_prev_seq)) + return true; /* previousPacket hasn't regressed. */ + + /* Some rx implementations put a serial number in previousPacket. */ + if (after_eq(prev_pkt, base + call->tx_winsize)) + return false; + return true; +} + /* * Process an ACK packet. * @@ -821,7 +844,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) struct rxrpc_ackinfo info; u8 acks[RXRPC_MAXACKS]; } buf; - rxrpc_serial_t acked_serial; + rxrpc_serial_t ack_serial, acked_serial; rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; int nr_acks, offset, ioffset; @@ -834,6 +857,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) } offset += sizeof(buf.ack); + ack_serial = sp->hdr.serial; acked_serial = ntohl(buf.ack.serial); first_soft_ack = ntohl(buf.ack.firstPacket); prev_pkt = ntohl(buf.ack.previousPacket); @@ -842,32 +866,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? buf.ack.reason : RXRPC_ACK__INVALID); - trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, + trace_rxrpc_rx_ack(call, ack_serial, acked_serial, first_soft_ack, prev_pkt, summary.ack_reason, nr_acks); if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) rxrpc_input_ping_response(call, skb->tstamp, acked_serial, - sp->hdr.serial); + ack_serial); if (buf.ack.reason == RXRPC_ACK_REQUESTED) rxrpc_input_requested_ack(call, skb->tstamp, acked_serial, - sp->hdr.serial); + ack_serial); if (buf.ack.reason == RXRPC_ACK_PING) { - _proto("Rx ACK %%%u PING Request", sp->hdr.serial); + _proto("Rx ACK %%%u PING Request", ack_serial); rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, - sp->hdr.serial, true, true, + ack_serial, true, true, rxrpc_propose_ack_respond_to_ping); } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, - sp->hdr.serial, true, true, + ack_serial, true, true, rxrpc_propose_ack_respond_to_ack); } /* Discard any out-of-order or duplicate ACKs (outside lock). */ - if (before(first_soft_ack, call->ackr_first_seq) || - before(prev_pkt, call->ackr_prev_seq)) + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { + trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, + first_soft_ack, call->ackr_first_seq, + prev_pkt, call->ackr_prev_seq); return; + } buf.info.rxMTU = 0; ioffset = offset + nr_acks + 3; @@ -878,9 +905,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) spin_lock(&call->input_lock); /* Discard any out-of-order or duplicate ACKs (inside lock). */ - if (before(first_soft_ack, call->ackr_first_seq) || - before(prev_pkt, call->ackr_prev_seq)) + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { + trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, + first_soft_ack, call->ackr_first_seq, + prev_pkt, call->ackr_prev_seq); goto out; + } call->acks_latest_ts = skb->tstamp; call->ackr_first_seq = first_soft_ack; @@ -936,7 +966,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) RXRPC_TX_ANNO_LAST && summary.nr_acks == call->tx_top - hard_ack && rxrpc_is_client_call(call)) - rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial, false, true, rxrpc_propose_ack_ping_for_lost_reply); diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 6c3f35fac42d..2b26c4d229a5 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -31,7 +31,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *); static void rxrpc_destroy(struct key *); static void rxrpc_destroy_s(struct key *); static void rxrpc_describe(const struct key *, struct seq_file *); -static long rxrpc_read(const struct key *, char __user *, size_t); +static long rxrpc_read(const struct key *, char *, size_t); /* * rxrpc defined keys take an arbitrary string as the description and an @@ -903,7 +903,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) _enter(""); - if (optlen <= 0 || optlen > PAGE_SIZE - 1) + if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) return -EINVAL; description = memdup_user_nul(optval, optlen); @@ -941,7 +941,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval, if (IS_ERR(description)) return PTR_ERR(description); - key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL); + key = request_key(&key_type_keyring, description, NULL); if (IS_ERR(key)) { kfree(description); _leave(" = %ld", PTR_ERR(key)); @@ -1042,12 +1042,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key); * - this returns the result in XDR form */ static long rxrpc_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { const struct rxrpc_key_token *token; const struct krb5_principal *princ; size_t size; - __be32 __user *xdr, *oldxdr; + __be32 *xdr, *oldxdr; u32 cnlen, toksize, ntoks, tok, zero; u16 toksizes[AFSTOKEN_MAX]; int loop; @@ -1073,7 +1073,7 @@ static long rxrpc_read(const struct key *key, switch (token->security_index) { case RXRPC_SECURITY_RXKAD: - toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, + toksize += 8 * 4; /* viceid, kvno, key*2, begin, * end, primary, tktlen */ toksize += RND(token->kad->ticket_len); break; @@ -1108,8 +1108,9 @@ static long rxrpc_read(const struct key *key, break; default: /* we have a ticket we can't encode */ - BUG(); - continue; + pr_err("Unsupported key token type (%u)\n", + token->security_index); + return -ENOPKG; } _debug("token[%u]: toksize=%u", ntoks, toksize); @@ -1124,30 +1125,33 @@ static long rxrpc_read(const struct key *key, if (!buffer || buflen < size) return size; - xdr = (__be32 __user *) buffer; + xdr = (__be32 *)buffer; zero = 0; #define ENCODE(x) \ do { \ - __be32 y = htonl(x); \ - if (put_user(y, xdr++) < 0) \ - goto fault; \ + *xdr++ = htonl(x); \ } while(0) #define ENCODE_DATA(l, s) \ do { \ u32 _l = (l); \ ENCODE(l); \ - if (copy_to_user(xdr, (s), _l) != 0) \ - goto fault; \ - if (_l & 3 && \ - copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ - goto fault; \ + memcpy(xdr, (s), _l); \ + if (_l & 3) \ + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ + xdr += (_l + 3) >> 2; \ + } while(0) +#define ENCODE_BYTES(l, s) \ + do { \ + u32 _l = (l); \ + memcpy(xdr, (s), _l); \ + if (_l & 3) \ + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ xdr += (_l + 3) >> 2; \ } while(0) #define ENCODE64(x) \ do { \ __be64 y = cpu_to_be64(x); \ - if (copy_to_user(xdr, &y, 8) != 0) \ - goto fault; \ + memcpy(xdr, &y, 8); \ xdr += 8 >> 2; \ } while(0) #define ENCODE_STR(s) \ @@ -1171,7 +1175,7 @@ static long rxrpc_read(const struct key *key, case RXRPC_SECURITY_RXKAD: ENCODE(token->kad->vice_id); ENCODE(token->kad->kvno); - ENCODE_DATA(8, token->kad->session_key); + ENCODE_BYTES(8, token->kad->session_key); ENCODE(token->kad->start); ENCODE(token->kad->expiry); ENCODE(token->kad->primary_flag); @@ -1221,8 +1225,9 @@ static long rxrpc_read(const struct key *key, break; default: - BUG(); - break; + pr_err("Unsupported key token type (%u)\n", + token->security_index); + return -ENOPKG; } ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==, @@ -1238,8 +1243,4 @@ static long rxrpc_read(const struct key *key, ASSERTCMP((char __user *) xdr - buffer, ==, size); _leave(" = %zu", size); return size; - -fault: - _leave(" = -EFAULT"); - return -EFAULT; } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index a6c1349e965d..01135e54d95d 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -165,15 +165,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) goto error; } - /* we want to set the don't fragment bit */ - opt = IPV6_PMTUDISC_DO; - ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, - (char *) &opt, sizeof(opt)); - if (ret < 0) { - _debug("setsockopt failed"); - goto error; - } - /* Fall through and set IPv4 options too otherwise we don't get * errors from IPv4 packets sent through the IPv6 socket. */ diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c index 214405f75346..d4144fd86f84 100644 --- a/net/rxrpc/misc.c +++ b/net/rxrpc/misc.c @@ -63,11 +63,6 @@ unsigned int rxrpc_rx_mtu = 5692; */ unsigned int rxrpc_rx_jumbo_max = 4; -/* - * Time till packet resend (in milliseconds). - */ -unsigned long rxrpc_resend_timeout = 4 * HZ; - const s8 rxrpc_ack_priority[] = { [0] = 0, [RXRPC_ACK_DELAY] = 1, diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index bad3d2420344..f8b632a5c619 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -369,7 +369,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || retrans || call->cong_mode == RXRPC_CALL_SLOW_START || - (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || + (call->peer->rtt_count < 3 && sp->hdr.seq & 1) || ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real()))) whdr.flags |= RXRPC_REQUEST_ACK; @@ -423,13 +423,10 @@ done: if (whdr.flags & RXRPC_REQUEST_ACK) { call->peer->rtt_last_req = skb->tstamp; trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); - if (call->peer->rtt_usage > 1) { + if (call->peer->rtt_count > 1) { unsigned long nowj = jiffies, ack_lost_at; - ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt); - if (ack_lost_at < 1) - ack_lost_at = 1; - + ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans); ack_lost_at += nowj; WRITE_ONCE(call->ack_lost_at, ack_lost_at); rxrpc_reduce_call_timer(call, ack_lost_at, nowj, @@ -474,42 +471,22 @@ send_fragmentable: skb->tstamp = ktime_get_real(); switch (conn->params.local->srx.transport.family) { + case AF_INET6: case AF_INET: opt = IP_PMTUDISC_DONT; - ret = kernel_setsockopt(conn->params.local->socket, - SOL_IP, IP_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - if (ret == 0) { - ret = kernel_sendmsg(conn->params.local->socket, &msg, - iov, 2, len); - conn->params.peer->last_tx_at = ktime_get_seconds(); + kernel_setsockopt(conn->params.local->socket, + SOL_IP, IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); + ret = kernel_sendmsg(conn->params.local->socket, &msg, + iov, 2, len); + conn->params.peer->last_tx_at = ktime_get_seconds(); - opt = IP_PMTUDISC_DO; - kernel_setsockopt(conn->params.local->socket, SOL_IP, - IP_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - } + opt = IP_PMTUDISC_DO; + kernel_setsockopt(conn->params.local->socket, + SOL_IP, IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); break; -#ifdef CONFIG_AF_RXRPC_IPV6 - case AF_INET6: - opt = IPV6_PMTUDISC_DONT; - ret = kernel_setsockopt(conn->params.local->socket, - SOL_IPV6, IPV6_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - if (ret == 0) { - ret = kernel_sendmsg(conn->params.local->socket, &msg, - iov, 2, len); - conn->params.peer->last_tx_at = ktime_get_seconds(); - - opt = IPV6_PMTUDISC_DO; - kernel_setsockopt(conn->params.local->socket, - SOL_IPV6, IPV6_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - } - break; -#endif - default: BUG(); } diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 923b263c401b..b1449d971883 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -295,52 +295,6 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error, } } -/* - * Add RTT information to cache. This is called in softirq mode and has - * exclusive access to the peer RTT data. - */ -void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, - rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, - ktime_t send_time, ktime_t resp_time) -{ - struct rxrpc_peer *peer = call->peer; - s64 rtt; - u64 sum = peer->rtt_sum, avg; - u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage; - - rtt = ktime_to_ns(ktime_sub(resp_time, send_time)); - if (rtt < 0) - return; - - spin_lock(&peer->rtt_input_lock); - - /* Replace the oldest datum in the RTT buffer */ - sum -= peer->rtt_cache[cursor]; - sum += rtt; - peer->rtt_cache[cursor] = rtt; - peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1); - peer->rtt_sum = sum; - if (usage < RXRPC_RTT_CACHE_SIZE) { - usage++; - peer->rtt_usage = usage; - } - - spin_unlock(&peer->rtt_input_lock); - - /* Now recalculate the average */ - if (usage == RXRPC_RTT_CACHE_SIZE) { - avg = sum / RXRPC_RTT_CACHE_SIZE; - } else { - avg = sum; - do_div(avg, usage); - } - - /* Don't need to update this under lock */ - peer->rtt = avg; - trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, - usage, avg); -} - /* * Perform keep-alive pings. */ diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 64830d8c1fdb..e011594adcd1 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -224,6 +224,8 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) spin_lock_init(&peer->rtt_input_lock); peer->debug_id = atomic_inc_return(&rxrpc_debug_id); + rxrpc_peer_init_rtt(peer); + if (RXRPC_TX_SMSS > 2190) peer->cong_cwnd = 2; else if (RXRPC_TX_SMSS > 1095) @@ -495,14 +497,24 @@ void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call, EXPORT_SYMBOL(rxrpc_kernel_get_peer); /** - * rxrpc_kernel_get_rtt - Get a call's peer RTT + * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT * @sock: The socket on which the call is in progress. * @call: The call to query + * @_srtt: Where to store the SRTT value. * - * Get the call's peer RTT. + * Get the call's peer smoothed RTT in uS. */ -u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call) +bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call, + u32 *_srtt) { - return call->peer->rtt; + struct rxrpc_peer *peer = call->peer; + + if (peer->rtt_count == 0) { + *_srtt = 1000000; /* 1S */ + return false; + } + + *_srtt = call->peer->srtt_us >> 3; + return true; } -EXPORT_SYMBOL(rxrpc_kernel_get_rtt); +EXPORT_SYMBOL(rxrpc_kernel_get_srtt); diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index b9d053e42821..543afd9bd664 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -68,7 +68,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) "Proto Local " " Remote " " SvID ConnID CallID End Use State Abort " - " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n"); + " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n"); return 0; } @@ -100,7 +100,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) rx_hard_ack = READ_ONCE(call->rx_hard_ack); seq_printf(seq, "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" - " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n", + " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n", lbuff, rbuff, call->service_id, @@ -110,7 +110,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) atomic_read(&call->usage), rxrpc_call_states[call->state], call->abort_code, - call->user_call_ID, + call->debug_id, tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack, call->rx_serial, @@ -222,7 +222,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) seq_puts(seq, "Proto Local " " Remote " - " Use CW MTU LastUse RTT Rc\n" + " Use CW MTU LastUse RTT RTO\n" ); return 0; } @@ -236,15 +236,15 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) now = ktime_get_seconds(); seq_printf(seq, "UDP %-47.47s %-47.47s %3u" - " %3u %5u %6llus %12llu %2u\n", + " %3u %5u %6llus %8u %8u\n", lbuff, rbuff, atomic_read(&peer->usage), peer->cong_cwnd, peer->mtu, now - peer->last_tx_at, - peer->rtt, - peer->rtt_cursor); + peer->srtt_us >> 3, + jiffies_to_usecs(peer->rto_j)); return 0; } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 8578c39ec839..4f48e3bdd4b4 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -464,7 +464,7 @@ try_again: list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); - return -ENODATA; + return -EAGAIN; } if (list_empty(&rx->recvmsg_q)) { @@ -541,7 +541,7 @@ try_again: goto error_unlock_call; } - if (msg->msg_name) { + if (msg->msg_name && call->peer) { struct sockaddr_rxrpc *srx = msg->msg_name; size_t len = sizeof(call->peer->srx); diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c new file mode 100644 index 000000000000..928d8b34a3ee --- /dev/null +++ b/net/rxrpc/rtt.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0 +/* RTT/RTO calculation. + * + * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com) + * + * https://tools.ietf.org/html/rfc6298 + * https://tools.ietf.org/html/rfc1122#section-4.2.3.1 + * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf + */ + +#include <linux/net.h> +#include "ar-internal.h" + +#define RXRPC_RTO_MAX ((unsigned)(120 * HZ)) +#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ +#define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */ +#define rxrpc_min_rtt_wlen 300 /* As sysctl_tcp_min_rtt_wlen */ + +static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer) +{ + return 200; +} + +static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer) +{ + return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us); +} + +static u32 rxrpc_bound_rto(u32 rto) +{ + return min(rto, RXRPC_RTO_MAX); +} + +/* + * Called to compute a smoothed rtt estimate. The data fed to this + * routine either comes from timestamps, or from segments that were + * known _not_ to have been retransmitted [see Karn/Partridge + * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 + * piece by Van Jacobson. + * NOTE: the next three routines used to be one big routine. + * To save cycles in the RFC 1323 implementation it was better to break + * it up into three procedures. -- erics + */ +static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us) +{ + long m = sample_rtt_us; /* RTT */ + u32 srtt = peer->srtt_us; + + /* The following amusing code comes from Jacobson's + * article in SIGCOMM '88. Note that rtt and mdev + * are scaled versions of rtt and mean deviation. + * This is designed to be as fast as possible + * m stands for "measurement". + * + * On a 1990 paper the rto value is changed to: + * RTO = rtt + 4 * mdev + * + * Funny. This algorithm seems to be very broken. + * These formulae increase RTO, when it should be decreased, increase + * too slowly, when it should be increased quickly, decrease too quickly + * etc. I guess in BSD RTO takes ONE value, so that it is absolutely + * does not matter how to _calculate_ it. Seems, it was trap + * that VJ failed to avoid. 8) + */ + if (srtt != 0) { + m -= (srtt >> 3); /* m is now error in rtt est */ + srtt += m; /* rtt = 7/8 rtt + 1/8 new */ + if (m < 0) { + m = -m; /* m is now abs(error) */ + m -= (peer->mdev_us >> 2); /* similar update on mdev */ + /* This is similar to one of Eifel findings. + * Eifel blocks mdev updates when rtt decreases. + * This solution is a bit different: we use finer gain + * for mdev in this case (alpha*beta). + * Like Eifel it also prevents growth of rto, + * but also it limits too fast rto decreases, + * happening in pure Eifel. + */ + if (m > 0) + m >>= 3; + } else { + m -= (peer->mdev_us >> 2); /* similar update on mdev */ + } + + peer->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ + if (peer->mdev_us > peer->mdev_max_us) { + peer->mdev_max_us = peer->mdev_us; + if (peer->mdev_max_us > peer->rttvar_us) + peer->rttvar_us = peer->mdev_max_us; + } + } else { + /* no previous measure. */ + srtt = m << 3; /* take the measured time to be rtt */ + peer->mdev_us = m << 1; /* make sure rto = 3*rtt */ + peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer)); + peer->mdev_max_us = peer->rttvar_us; + } + + peer->srtt_us = max(1U, srtt); +} + +/* + * Calculate rto without backoff. This is the second half of Van Jacobson's + * routine referred to above. + */ +static void rxrpc_set_rto(struct rxrpc_peer *peer) +{ + u32 rto; + + /* 1. If rtt variance happened to be less 50msec, it is hallucination. + * It cannot be less due to utterly erratic ACK generation made + * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ + * to do with delayed acks, because at cwnd>2 true delack timeout + * is invisible. Actually, Linux-2.4 also generates erratic + * ACKs in some circumstances. + */ + rto = __rxrpc_set_rto(peer); + + /* 2. Fixups made earlier cannot be right. + * If we do not estimate RTO correctly without them, + * all the algo is pure shit and should be replaced + * with correct one. It is exactly, which we pretend to do. + */ + + /* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo + * guarantees that rto is higher. + */ + peer->rto_j = rxrpc_bound_rto(rto); +} + +static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us) +{ + if (rtt_us < 0) + return; + + //rxrpc_update_rtt_min(peer, rtt_us); + rxrpc_rtt_estimator(peer, rtt_us); + rxrpc_set_rto(peer); + + /* RFC6298: only reset backoff on valid RTT measurement. */ + peer->backoff = 0; +} + +/* + * Add RTT information to cache. This is called in softirq mode and has + * exclusive access to the peer RTT data. + */ +void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, + rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, + ktime_t send_time, ktime_t resp_time) +{ + struct rxrpc_peer *peer = call->peer; + s64 rtt_us; + + rtt_us = ktime_to_us(ktime_sub(resp_time, send_time)); + if (rtt_us < 0) + return; + + spin_lock(&peer->rtt_input_lock); + rxrpc_ack_update_rtt(peer, rtt_us); + if (peer->rtt_count < 3) + peer->rtt_count++; + spin_unlock(&peer->rtt_input_lock); + + trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, + peer->srtt_us >> 3, peer->rto_j); +} + +/* + * Get the retransmission timeout to set in jiffies, backing it off each time + * we retransmit. + */ +unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans) +{ + u64 timo_j; + u8 backoff = READ_ONCE(peer->backoff); + + timo_j = peer->rto_j; + timo_j <<= backoff; + if (retrans && timo_j * 2 <= RXRPC_RTO_MAX) + WRITE_ONCE(peer->backoff, backoff + 1); + + if (timo_j < 1) + timo_j = 1; + + return timo_j; +} + +void rxrpc_peer_init_rtt(struct rxrpc_peer *peer) +{ + peer->rto_j = RXRPC_TIMEOUT_INIT; + peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT); + peer->backoff = 0; + //minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U); +} diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 098f1f9ec53b..52a24d4ef5d8 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -1148,7 +1148,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, &expiry, _abort_code); if (ret < 0) - goto temporary_error_free_resp; + goto temporary_error_free_ticket; /* use the session key from inside the ticket to decrypt the * response */ @@ -1230,7 +1230,6 @@ protocol_error: temporary_error_free_ticket: kfree(ticket); -temporary_error_free_resp: kfree(response); temporary_error: /* Ignore the response packet if we got a temporary error such as diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 136eb465bfcb..1a340eb0abf7 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -17,6 +17,21 @@ #include <net/af_rxrpc.h> #include "ar-internal.h" +/* + * Return true if there's sufficient Tx queue space. + */ +static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) +{ + unsigned int win_size = + min_t(unsigned int, call->tx_winsize, + call->cong_cwnd + call->cong_extra); + rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack); + + if (_tx_win) + *_tx_win = tx_win; + return call->tx_top - tx_win < win_size; +} + /* * Wait for space to appear in the Tx queue or a signal to occur. */ @@ -26,9 +41,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, { for (;;) { set_current_state(TASK_INTERRUPTIBLE); - if (call->tx_top - call->tx_hard_ack < - min_t(unsigned int, call->tx_winsize, - call->cong_cwnd + call->cong_extra)) + if (rxrpc_check_tx_space(call, NULL)) return 0; if (call->state >= RXRPC_CALL_COMPLETE) @@ -49,40 +62,36 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, * Wait for space to appear in the Tx queue uninterruptibly, but with * a timeout of 2*RTT if no progress was made and a signal occurred. */ -static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, +static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, struct rxrpc_call *call) { rxrpc_seq_t tx_start, tx_win; - signed long rtt2, timeout; - u64 rtt; + signed long rtt, timeout; - rtt = READ_ONCE(call->peer->rtt); - rtt2 = nsecs_to_jiffies64(rtt) * 2; - if (rtt2 < 2) - rtt2 = 2; + rtt = READ_ONCE(call->peer->srtt_us) >> 3; + rtt = usecs_to_jiffies(rtt) * 2; + if (rtt < 2) + rtt = 2; - timeout = rtt2; + timeout = rtt; tx_start = READ_ONCE(call->tx_hard_ack); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); tx_win = READ_ONCE(call->tx_hard_ack); - if (call->tx_top - tx_win < - min_t(unsigned int, call->tx_winsize, - call->cong_cwnd + call->cong_extra)) + if (rxrpc_check_tx_space(call, &tx_win)) return 0; if (call->state >= RXRPC_CALL_COMPLETE) return call->error; - if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && - timeout == 0 && + if (timeout == 0 && tx_win == tx_start && signal_pending(current)) return -EINTR; if (tx_win != tx_start) { - timeout = rtt2; + timeout = rtt; tx_start = tx_win; } @@ -91,6 +100,26 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, } } +/* + * Wait for space to appear in the Tx queue uninterruptibly. + */ +static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, + struct rxrpc_call *call, + long *timeo) +{ + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (rxrpc_check_tx_space(call, NULL)) + return 0; + + if (call->state >= RXRPC_CALL_COMPLETE) + return call->error; + + trace_rxrpc_transmit(call, rxrpc_transmit_wait); + *timeo = schedule_timeout(*timeo); + } +} + /* * wait for space to appear in the transmit/ACK window * - caller holds the socket locked @@ -108,10 +137,19 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, add_wait_queue(&call->waitq, &myself); - if (waitall) - ret = rxrpc_wait_for_tx_window_nonintr(rx, call); - else - ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); + switch (call->interruptibility) { + case RXRPC_INTERRUPTIBLE: + if (waitall) + ret = rxrpc_wait_for_tx_window_waitall(rx, call); + else + ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); + break; + case RXRPC_PREINTERRUPTIBLE: + case RXRPC_UNINTERRUPTIBLE: + default: + ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); + break; + } remove_wait_queue(&call->waitq, &myself); set_current_state(TASK_RUNNING); @@ -232,16 +270,9 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, _debug("need instant resend %d", ret); rxrpc_instant_resend(call, ix); } else { - unsigned long now = jiffies, resend_at; + unsigned long now = jiffies; + unsigned long resend_at = now + call->peer->rto_j; - if (call->peer->rtt_usage > 1) - resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2); - else - resend_at = rxrpc_resend_timeout; - if (resend_at < 1) - resend_at = 1; - - resend_at += now; WRITE_ONCE(call->resend_at, resend_at); rxrpc_reduce_call_timer(call, resend_at, now, rxrpc_timer_set_for_send); @@ -275,7 +306,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, /* this should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; more = msg->msg_flags & MSG_MORE; @@ -302,9 +333,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, _debug("alloc"); - if (call->tx_top - call->tx_hard_ack >= - min_t(unsigned int, call->tx_winsize, - call->cong_cwnd + call->cong_extra)) { + if (!rxrpc_check_tx_space(call, NULL)) { ret = -EAGAIN; if (msg->msg_flags & MSG_DONTWAIT) goto maybe_error; @@ -619,7 +648,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) .call.tx_total_len = -1, .call.user_call_ID = 0, .call.nr_timeouts = 0, - .call.intr = true, + .call.interruptibility = RXRPC_INTERRUPTIBLE, .abort_code = 0, .command = RXRPC_CMD_SEND_DATA, .exclusive = false, @@ -654,6 +683,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (IS_ERR(call)) return PTR_ERR(call); /* ... and we have the call lock. */ + ret = 0; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) + goto out_put_unlock; } else { switch (READ_ONCE(call->state)) { case RXRPC_CALL_UNINITIALISED: diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c index 2bbb38161851..18dade4e6f9a 100644 --- a/net/rxrpc/sysctl.c +++ b/net/rxrpc/sysctl.c @@ -71,15 +71,6 @@ static struct ctl_table rxrpc_sysctl_table[] = { .extra1 = (void *)&one_jiffy, .extra2 = (void *)&max_jiffies, }, - { - .procname = "resend_timeout", - .data = &rxrpc_resend_timeout, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = proc_doulongvec_ms_jiffies_minmax, - .extra1 = (void *)&one_jiffy, - .extra2 = (void *)&max_jiffies, - }, /* Non-time values */ { diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 69d4676a402f..716cad677318 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -303,6 +303,8 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, mutex_lock(&idrinfo->lock); idr_for_each_entry_ul(idr, p, tmp, id) { + if (IS_ERR(p)) + continue; ret = tcf_idr_release_unsafe(p); if (ret == ACT_P_DELETED) { module_put(ops->owner); @@ -451,17 +453,6 @@ err1: } EXPORT_SYMBOL(tcf_idr_create); -void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) -{ - struct tcf_idrinfo *idrinfo = tn->idrinfo; - - mutex_lock(&idrinfo->lock); - /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ - WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index))); - mutex_unlock(&idrinfo->lock); -} -EXPORT_SYMBOL(tcf_idr_insert); - /* Cleanup idr index that was allocated but not initialized. */ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) @@ -715,13 +706,6 @@ int tcf_action_destroy(struct tc_action *actions[], int bind) return ret; } -static int tcf_action_destroy_1(struct tc_action *a, int bind) -{ - struct tc_action *actions[] = { a, NULL }; - - return tcf_action_destroy(actions, bind); -} - static int tcf_action_put(struct tc_action *p) { return __tcf_action_put(p, false); @@ -839,6 +823,26 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, }; +void tcf_idr_insert_many(struct tc_action *actions[]) +{ + int i; + + for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { + struct tc_action *a = actions[i]; + struct tcf_idrinfo *idrinfo; + + if (!a) + continue; + idrinfo = a->idrinfo; + mutex_lock(&idrinfo->lock); + /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if + * it is just created, otherwise this is just a nop. + */ + idr_replace(&idrinfo->action_idr, a, a->tcfa_index); + mutex_unlock(&idrinfo->lock); + } +} + struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, @@ -931,12 +935,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, if (err != ACT_P_CREATED) module_put(a_o->owner); - if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) && - !rcu_access_pointer(a->goto_chain)) { - tcf_action_destroy_1(a, bind); - NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain"); - return ERR_PTR(-EINVAL); - } + if (!bind && ovr && err == ACT_P_CREATED) + refcount_set(&a->tcfa_refcnt, 2); return a; @@ -981,6 +981,11 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, actions[i - 1] = act; } + /* We have to commit them all together, because if any error happened in + * between, we could not handle the failure gracefully. + */ + tcf_idr_insert_many(actions); + *attr_size = tcf_action_full_attrs_size(sz); return i - 1; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 04b7bd4ec751..0b015ce802a4 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -12,6 +12,7 @@ #include <linux/bpf.h> #include <net/netlink.h> +#include <net/sock.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> @@ -53,6 +54,8 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act, bpf_compute_data_pointers(skb); filter_res = BPF_PROG_RUN(filter, skb); } + if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK) + skb_orphan(skb); rcu_read_unlock(); /* A BPF program may overwrite the default action opcode. @@ -361,9 +364,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (res == ACT_P_CREATED) { - tcf_idr_insert(tn, *act); - } else { + if (res != ACT_P_CREATED) { /* make sure the program being replaced is no longer executing */ synchronize_rcu(); tcf_bpf_cfg_cleanup(&old); diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 2b43cacf82af..b00105e62334 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -43,17 +43,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&ca->tcf_tm); bstats_update(&ca->tcf_bstats, skb); - if (skb->protocol == htons(ETH_P_IP)) { + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): if (skb->len < sizeof(struct iphdr)) goto out; proto = NFPROTO_IPV4; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + break; + case htons(ETH_P_IPV6): if (skb->len < sizeof(struct ipv6hdr)) goto out; proto = NFPROTO_IPV6; - } else { + break; + default: goto out; } @@ -136,7 +139,6 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, ci->net = net; ci->zone = parm->zone; - tcf_idr_insert(tn, *a); ret = ACT_P_CREATED; } else if (ret > 0) { ci = to_connmark(*a); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index d3cfad88dc3a..fa1b1fd10c44 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -110,9 +110,6 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, if (params_new) kfree_rcu(params_new, rcu); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); - return ret; put_chain: if (goto_ch) @@ -587,7 +584,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, goto drop; update_flags = params->update_flags; - protocol = tc_skb_protocol(skb); + protocol = skb_protocol(skb, false); again: switch (protocol) { case cpu_to_be16(ETH_P_IP): diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 0586546c20d7..6119c31dcd07 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -100,7 +100,7 @@ static u8 tcf_ct_skb_nf_family(struct sk_buff *skb) { u8 family = NFPROTO_UNSPEC; - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): family = NFPROTO_IPV4; break; @@ -186,7 +186,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); err = nf_ct_frag6_gather(net, skb, user); if (err && err != -EINPROGRESS) - goto out_free; + return err; #else err = -EOPNOTSUPP; goto out_free; @@ -222,6 +222,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, const struct nf_nat_range2 *range, enum nf_nat_manip_type maniptype) { + __be16 proto = skb_protocol(skb, true); int hooknum, err = NF_ACCEPT; /* See HOOK2MANIP(). */ @@ -233,14 +234,13 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, switch (ctinfo) { case IP_CT_RELATED: case IP_CT_RELATED_REPLY: - if (skb->protocol == htons(ETH_P_IP) && + if (proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_ICMP) { if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, hooknum)) err = NF_DROP; goto out; - } else if (IS_ENABLED(CONFIG_IPV6) && - skb->protocol == htons(ETH_P_IPV6)) { + } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) { __be16 frag_off; u8 nexthdr = ipv6_hdr(skb)->nexthdr; int hdrlen = ipv6_skip_exthdr(skb, @@ -740,8 +740,6 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, tcf_chain_put_by_act(goto_ch); if (params) call_rcu(¶ms->rcu, tcf_ct_params_free); - if (res == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return res; @@ -993,4 +991,3 @@ MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>"); MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>"); MODULE_DESCRIPTION("Connection tracking action"); MODULE_LICENSE("GPL v2"); - diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index f45995a6237a..9722204399a4 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -96,19 +96,22 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, action = READ_ONCE(ca->tcf_action); wlen = skb_network_offset(skb); - if (tc_skb_protocol(skb) == htons(ETH_P_IP)) { + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV4; - } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) { + break; + case htons(ETH_P_IPV6): wlen += sizeof(struct ipv6hdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV6; - } else { + break; + default: goto out; } @@ -266,9 +269,6 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, if (cp_new) kfree_rcu(cp_new, rcu); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); - return ret; put_chain: diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 324f1d1f6d47..faf68a44b845 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -139,8 +139,6 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; release_idr: tcf_idr_release(*a, bind); diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index a0cfb4793c93..488d10476e85 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a) kfree_rcu(p, rcu); } +static int load_metalist(struct nlattr **tb, bool rtnl_held) +{ + int i; + + for (i = 1; i < max_metacnt; i++) { + if (tb[i]) { + void *val = nla_data(tb[i]); + int len = nla_len(tb[i]); + int rc; + + rc = load_metaops_and_vet(i, val, len, rtnl_held); + if (rc != 0) + return rc; + } + } + + return 0; +} + static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, bool exists, bool rtnl_held) { @@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, val = nla_data(tb[i]); len = nla_len(tb[i]); - rc = load_metaops_and_vet(i, val, len, rtnl_held); - if (rc != 0) - return rc; - rc = add_metainfo(ife, i, val, len, exists); if (rc) return rc; @@ -508,6 +523,21 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!p) return -ENOMEM; + if (tb[TCA_IFE_METALST]) { + err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, + tb[TCA_IFE_METALST], NULL, + NULL); + if (err) { + kfree(p); + return err; + } + err = load_metalist(tb2, rtnl_held); + if (err) { + kfree(p); + return err; + } + } + index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) { @@ -569,15 +599,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, } if (tb[TCA_IFE_METALST]) { - err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, - tb[TCA_IFE_METALST], NULL, - NULL); - if (err) - goto metadata_parse_err; err = populate_metalist(ife, tb2, exists, rtnl_held); if (err) goto metadata_parse_err; - } else { /* if no passed metadata allow list or passed allow-all * then here we process by adding as many supported metadatum @@ -602,9 +626,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (p) kfree_rcu(p, rcu); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); - return ret; metadata_parse_err: if (goto_ch) diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 214a03d405cf..02b0cb67643e 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -189,8 +189,6 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, ipt->tcfi_t = t; ipt->tcfi_hook = hook; spin_unlock_bh(&ipt->tcf_lock); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; err3: diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 27f624971121..8327ef9793ef 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -194,8 +194,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, spin_lock(&mirred_list_lock); list_add(&m->tcfm_list, &mirred_list); spin_unlock(&mirred_list_lock); - - tcf_idr_insert(tn, *a); } return ret; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index db570d2bd0e0..0fccae356dc1 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -82,12 +82,15 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, goto drop; break; case TCA_MPLS_ACT_PUSH: - new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol)); + new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true))); if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len, skb->dev && skb->dev->type == ARPHRD_ETHER)) goto drop; break; case TCA_MPLS_ACT_MODIFY: + if (!pskb_may_pull(skb, + skb_network_offset(skb) + MPLS_HLEN)) + goto drop; new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false); if (skb_mpls_update_lse(skb, new_lse)) goto drop; @@ -273,8 +276,6 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, if (p) kfree_rcu(p, rcu); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; put_chain: if (goto_ch) @@ -410,6 +411,7 @@ static void __exit mpls_cleanup_module(void) module_init(mpls_init_module); module_exit(mpls_cleanup_module); +MODULE_SOFTDEP("post: mpls_gso"); MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MPLS manipulation actions"); diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index ea4c5359e7df..8b5d2360a4dc 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -93,9 +93,6 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); - return ret; release_idr: tcf_idr_release(*a, bind); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index b5bc631b96b7..ff4f2437b592 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -237,8 +237,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, spin_unlock_bh(&p->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; put_chain: diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 89c04c52af3d..8fd23a8b88a5 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -201,8 +201,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (new) kfree_rcu(new, rcu); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; failure: diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 514456a0b9a8..74450b0f69fc 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -116,8 +116,6 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; put_chain: if (goto_ch) diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 6120e56117ca..6b0617de71c0 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -156,8 +156,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, goto release_idr; } - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; put_chain: if (goto_ch) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 6a8d3337c577..f736da513d53 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -41,7 +41,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, if (params->flags & SKBEDIT_F_INHERITDSFIELD) { int wlen = skb_network_offset(skb); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen)) @@ -214,8 +214,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; put_chain: if (goto_ch) diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 888437f97ba6..e858a0a9c045 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -190,8 +190,6 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; put_chain: if (goto_ch) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index d55669e14741..a5a2bf01eb9b 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -315,7 +315,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port, 0, flags, - key_id, 0); + key_id, opts_len); } else { NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst"); ret = -EINVAL; @@ -392,9 +392,6 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); - return ret; put_chain: diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 08aaf719a70f..3c26042f4ea6 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -228,8 +228,6 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (p) kfree_rcu(p, rcu); - if (ret == ACT_P_CREATED) - tcf_idr_insert(tn, *a); return ret; put_chain: if (goto_ch) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index c2cdd0fc2e70..83e5a8aa2fb1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1571,7 +1571,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, reclassify: #endif for (; tp; tp = rcu_dereference_bh(tp->next)) { - __be16 protocol = tc_skb_protocol(skb); + __be16 protocol = skb_protocol(skb, false); int err; if (tp->protocol != protocol && @@ -2005,6 +2005,7 @@ replay: err = PTR_ERR(block); goto errout; } + block->classid = parent; chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; if (chain_index > TC_ACT_EXT_VAL_MASK) { @@ -2547,12 +2548,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; parent = tcm->tcm_parent; - if (!parent) { + if (!parent) q = dev->qdisc; - parent = q->handle; - } else { + else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); - } if (!q) goto out; cops = q->ops->cl_ops; @@ -2568,6 +2567,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) block = cops->tcf_block(q, cl, NULL); if (!block) goto out; + parent = block->classid; if (tcf_block_shared(block)) q = NULL; } @@ -3026,6 +3026,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, act->type = exts->type = TCA_OLD_COMPAT; exts->actions[0] = act; exts->nr_actions = 1; + tcf_idr_insert_many(exts->actions); } else if (exts->action && tb[exts->action]) { int err; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 80ae7b9fa90a..ab53a93b2f2b 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -80,7 +80,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) if (dst) return ntohl(dst); - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_proto(const struct sk_buff *skb, @@ -104,7 +104,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb, if (flow->ports.ports) return ntohs(flow->ports.dst); - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_iif(const struct sk_buff *skb) @@ -151,7 +151,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb) static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) { - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, src.u3.ip)); case htons(ETH_P_IPV6): @@ -164,7 +164,7 @@ fallback: static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) { - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, dst.u3.ip)); case htons(ETH_P_IPV6): diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 1d270540e74d..c5a0f2c2635e 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -310,7 +310,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* skb_flow_dissect() does not set n_proto in case an unknown * protocol, so do it rather here. */ - skb_key.basic.n_proto = skb->protocol; + skb_key.basic.n_proto = skb_protocol(skb, false); skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, fl_ct_info_to_flower_map, diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 5efa3e7ace15..315ca2b7e2ed 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -526,7 +526,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, rcu_assign_pointer(f->next, f1); rcu_assign_pointer(*fp, f); - if (fold && fold->handle && f->handle != fold->handle) { + if (fold) { th = to_hash(fold->handle); h = from_hash(fold->handle >> 16); b = rtnl_dereference(head->table[th]); diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 61e95029c18f..c9399e81c505 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -366,9 +366,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (tb[TCA_TCINDEX_MASK]) cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); - if (tb[TCA_TCINDEX_SHIFT]) + if (tb[TCA_TCINDEX_SHIFT]) { cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); - + if (cp->shift > 16) { + err = -EINVAL; + goto errout; + } + } if (!cp->hash) { /* Hash not specified, use perfect hash if the upper limit * of the hashing index is below the threshold. diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c index df00566d327d..c95cf86fb431 100644 --- a/net/sched/em_ipset.c +++ b/net/sched/em_ipset.c @@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em, }; int ret, network_offset; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): state.pf = NFPROTO_IPV4; if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) diff --git a/net/sched/em_ipt.c b/net/sched/em_ipt.c index 9fff6480acc6..e2c157df3f8b 100644 --- a/net/sched/em_ipt.c +++ b/net/sched/em_ipt.c @@ -212,7 +212,7 @@ static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em, struct nf_hook_state state; int ret; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) return 0; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 3177dcb17316..ad007cdcec97 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -195,7 +195,7 @@ META_COLLECTOR(int_priority) META_COLLECTOR(int_protocol) { /* Let userspace take care of the byte ordering */ - dst->value = tc_skb_protocol(skb); + dst->value = skb_protocol(skb, false); } META_COLLECTOR(int_pkttype) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 50794125bf02..3b1b5ee52137 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -409,7 +409,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, { struct qdisc_rate_table *rtab; - if (tab == NULL || r->rate == 0 || r->cell_log == 0 || + if (tab == NULL || r->rate == 0 || + r->cell_log == 0 || r->cell_log >= 32 || nla_len(tab) != TC_RTAB_SIZE) { NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching"); return NULL; @@ -2156,7 +2157,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, struct tcmsg *tcm, struct netlink_callback *cb, - int *t_p, int s_t) + int *t_p, int s_t, bool recur) { struct Qdisc *q; int b; @@ -2167,7 +2168,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) return -1; - if (!qdisc_dev(root)) + if (!qdisc_dev(root) || !recur) return 0; if (tcm->tcm_parent) { @@ -2202,13 +2203,13 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; t = 0; - if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) + if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, - &t, s_t) < 0) + &t, s_t, false) < 0) goto done; done: diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index f4f9b8cdbffb..6385995dc700 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -553,16 +553,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt, if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); + p->link.vcc = NULL; + p->link.sock = NULL; + p->link.common.classid = sch->handle; + p->link.ref = 1; err = tcf_block_get(&p->link.block, &p->link.filter_list, sch, extack); if (err) return err; - p->link.vcc = NULL; - p->link.sock = NULL; - p->link.common.classid = sch->handle; - p->link.ref = 1; tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); return 0; } diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 2277369feae5..896c0562cb42 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -592,7 +592,7 @@ static void cake_update_flowkeys(struct flow_keys *keys, struct nf_conntrack_tuple tuple = {}; bool rev = !skb->_nfct; - if (tc_skb_protocol(skb) != htons(ETH_P_IP)) + if (skb_protocol(skb, true) != htons(ETH_P_IP)) return; if (!nf_ct_get_tuple_skb(&tuple, skb)) @@ -1515,32 +1515,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) return idx + (tin << 16); } -static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) +static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash) { - int wlen = skb_network_offset(skb); + const int offset = skb_network_offset(skb); + u16 *buf, buf_; u8 dscp; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): - wlen += sizeof(struct iphdr); - if (!pskb_may_pull(skb, wlen) || - skb_try_make_writable(skb, wlen)) + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); + if (unlikely(!buf)) return 0; - dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; - if (wash && dscp) + /* ToS is in the second byte of iphdr */ + dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2; + + if (wash && dscp) { + const int wlen = offset + sizeof(struct iphdr); + + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) + return 0; + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + } + return dscp; case htons(ETH_P_IPV6): - wlen += sizeof(struct ipv6hdr); - if (!pskb_may_pull(skb, wlen) || - skb_try_make_writable(skb, wlen)) + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); + if (unlikely(!buf)) return 0; - dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; - if (wash && dscp) + /* Traffic class is in the first and second bytes of ipv6hdr */ + dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2; + + if (wash && dscp) { + const int wlen = offset + sizeof(struct ipv6hdr); + + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) + return 0; + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); + } + return dscp; case htons(ETH_P_ARP): @@ -1557,14 +1576,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, { struct cake_sched_data *q = qdisc_priv(sch); u32 tin, mark; + bool wash; u8 dscp; /* Tin selection: Default to diffserv-based selection, allow overriding - * using firewall marks or skb->priority. + * using firewall marks or skb->priority. Call DSCP parsing early if + * wash is enabled, otherwise defer to below to skip unneeded parsing. */ - dscp = cake_handle_diffserv(skb, - q->rate_flags & CAKE_FLAG_WASH); mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; + wash = !!(q->rate_flags & CAKE_FLAG_WASH); + if (wash) + dscp = cake_handle_diffserv(skb, wash); if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) tin = 0; @@ -1578,6 +1600,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; else { + if (!wash) + dscp = cake_handle_diffserv(skb, wash); tin = q->tin_index[dscp]; if (unlikely(tin >= q->tin_cnt)) @@ -2679,7 +2703,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, qdisc_watchdog_init(&q->watchdog, sch); if (opt) { - int err = cake_change(sch, opt, extack); + err = cake_change(sch, opt, extack); if (err) return err; @@ -2996,7 +3020,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl, PUT_STAT_S32(BLUE_TIMER_US, ktime_to_us( ktime_sub(now, - flow->cvars.blue_timer))); + flow->cvars.blue_timer))); } if (flow->cvars.dropping) { PUT_STAT_S32(DROP_NEXT_US, diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index dba70377bbd9..e54f6eabfa0c 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch) sch->q.qlen = 0; sch->qstats.backlog = 0; - memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); + if (q->tab) + memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; red_restart(&q->vars); } @@ -350,6 +351,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt, struct sk_buff **old = NULL; unsigned int mask; u32 max_P; + u8 *stab; if (opt == NULL) return -EINVAL; @@ -366,8 +368,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt, max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; ctl = nla_data(tb[TCA_CHOKE_PARMS]); - - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) + stab = nla_data(tb[TCA_CHOKE_STAB]); + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) return -EINVAL; if (ctl->limit > CHOKE_MAX_QUEUE) @@ -417,7 +419,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt, red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, - nla_data(tb[TCA_CHOKE_STAB]), + stab, max_P); red_set_vars(&q->vars); diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 05605b30bef3..2b88710994d7 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (p->set_tc_index) { int wlen = skb_network_offset(skb); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen) || @@ -303,7 +303,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) index = skb->tc_index & (p->indices - 1); pr_debug("index %d->%d\n", skb->tc_index, index); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask, p->mv[index].value); @@ -320,7 +320,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) */ if (p->mv[index].mask != 0xff || p->mv[index].value) pr_warn("%s: unsupported protocol %d\n", - __func__, ntohs(tc_skb_protocol(skb))); + __func__, ntohs(skb_protocol(skb, true))); break; } diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index b1da5589a0c6..c48f91075b5c 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -82,7 +82,7 @@ static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) if (q->skip_sock_check) goto skip; - if (!sk) + if (!sk || !sk_fullsock(sk)) return false; if (!sock_flag(sk, SOCK_TXTIME)) @@ -137,8 +137,9 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) struct sock_exterr_skb *serr; struct sk_buff *clone; ktime_t txtime = skb->tstamp; + struct sock *sk = skb->sk; - if (!skb->sk || !(skb->sk->sk_txtime_report_errors)) + if (!sk || !sk_fullsock(sk) || !(sk->sk_txtime_report_errors)) return; clone = skb_clone(skb, GFP_ATOMIC); @@ -154,7 +155,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */ serr->ee.ee_info = txtime; /* low part of tstamp */ - if (sock_queue_err_skb(skb->sk, clone)) + if (sock_queue_err_skb(sk, clone)) kfree_skb(clone); } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index c261c0a18868..76d72c3f52ed 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -417,7 +417,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) - q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 7c3c5fdb82a9..6e6147a81bc3 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -469,6 +469,7 @@ void __netdev_watchdog_up(struct net_device *dev) dev_hold(dev); } } +EXPORT_SYMBOL_GPL(__netdev_watchdog_up); static void dev_watchdog_up(struct net_device *dev) { @@ -1125,29 +1126,41 @@ static void dev_deactivate_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc_default) { + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc); struct Qdisc *qdisc_default = _qdisc_default; - struct Qdisc *qdisc; - qdisc = rtnl_dereference(dev_queue->qdisc); if (qdisc) { - bool nolock = qdisc->flags & TCQ_F_NOLOCK; - - if (nolock) - spin_lock_bh(&qdisc->seqlock); - spin_lock_bh(qdisc_lock(qdisc)); - if (!(qdisc->flags & TCQ_F_BUILTIN)) set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); rcu_assign_pointer(dev_queue->qdisc, qdisc_default); - qdisc_reset(qdisc); - - spin_unlock_bh(qdisc_lock(qdisc)); - if (nolock) - spin_unlock_bh(&qdisc->seqlock); } } +static void dev_reset_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_unused) +{ + struct Qdisc *qdisc; + bool nolock; + + qdisc = dev_queue->qdisc_sleeping; + if (!qdisc) + return; + + nolock = qdisc->flags & TCQ_F_NOLOCK; + + if (nolock) + spin_lock_bh(&qdisc->seqlock); + spin_lock_bh(qdisc_lock(qdisc)); + + qdisc_reset(qdisc); + + spin_unlock_bh(qdisc_lock(qdisc)); + if (nolock) + spin_unlock_bh(&qdisc->seqlock); +} + static bool some_qdisc_is_busy(struct net_device *dev) { unsigned int i; @@ -1206,12 +1219,20 @@ void dev_deactivate_many(struct list_head *head) dev_watchdog_down(dev); } - /* Wait for outstanding qdisc-less dev_queue_xmit calls. + /* Wait for outstanding qdisc-less dev_queue_xmit calls or + * outstanding qdisc enqueuing calls. * This is avoided if all devices are in dismantle phase : * Caller will call synchronize_net() for us */ synchronize_net(); + list_for_each_entry(dev, head, close_list) { + netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); + + if (dev_ingress_queue(dev)) + dev_reset_queue(dev, dev_ingress_queue(dev), NULL); + } + /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 8599c6f31b05..f4132dc25ac0 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q = table->tab[dp]; - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) { + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) { NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters"); return -EINVAL; } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 42e557d48e4e..f4101a920d1f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -330,7 +330,7 @@ static s64 tabledist(s64 mu, s32 sigma, /* default uniform distribution */ if (dist == NULL) - return ((rnd % (2 * sigma)) + mu) - sigma; + return ((rnd % (2 * (u32)sigma)) + mu) - sigma; t = dist->table[rnd % dist->size]; x = (sigma % NETEM_DIST_SCALE) * t; @@ -812,6 +812,10 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) q->slot_config.max_packets = INT_MAX; if (q->slot_config.max_bytes == 0) q->slot_config.max_bytes = INT_MAX; + + /* capping dist_jitter to the range acceptable by tabledist() */ + q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter)); + q->slot.packets_left = q->slot_config.max_packets; q->slot.bytes_left = q->slot_config.max_bytes; if (q->slot_config.min_delay | q->slot_config.max_delay | @@ -1037,6 +1041,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_NETEM_SLOT]) get_slot(q, tb[TCA_NETEM_SLOT]); + /* capping jitter to the range acceptable by tabledist() */ + q->jitter = min_t(s64, abs(q->jitter), INT_MAX); + return ret; get_table_failure: diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 1695421333e3..7741f102be4a 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -197,6 +197,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, struct tc_red_qopt *ctl; int err; u32 max_P; + u8 *stab; if (opt == NULL) return -EINVAL; @@ -213,7 +214,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; ctl = nla_data(tb[TCA_RED_PARMS]); - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) + stab = nla_data(tb[TCA_RED_STAB]); + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, + ctl->Scell_log, stab)) return -EINVAL; if (ctl->limit > 0) { @@ -238,7 +241,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, - nla_data(tb[TCA_RED_STAB]), + stab, max_P); red_set_vars(&q->vars); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index c787d4d46017..b92bafaf83f3 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -637,8 +637,17 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; + + /* slot->allot is a short, make sure quantum is not too big. */ + if (ctl->quantum) { + unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); + + if (scaled <= 0 || scaled > SHRT_MAX) + return -EINVAL; + } + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, - ctl_v1->Wlog)) + ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) return -EINVAL; if (ctl_v1 && ctl_v1->qth_min) { p = kmalloc(sizeof(*p), GFP_KERNEL); diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 0fb10abf7579..7a5e4c454715 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, { struct tc_skbprio_qopt *ctl = nla_data(opt); + if (opt->nla_len != nla_attr_size(sizeof(*ctl))) + return -EINVAL; + sch->limit = ctl->limit; return 0; } diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index b1eb12d33b9a..09116be99511 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -777,9 +777,11 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, }; -static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, +static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, + struct sched_entry *entry, struct netlink_ext_ack *extack) { + int min_duration = length_to_duration(q, ETH_ZLEN); u32 interval = 0; if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) @@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, interval = nla_get_u32( tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); - if (interval == 0) { + /* The interval should allow at least the minimum ethernet + * frame to go out. + */ + if (interval < min_duration) { NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); return -EINVAL; } @@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, return 0; } -static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, - int index, struct netlink_ext_ack *extack) +static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, + struct sched_entry *entry, int index, + struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; int err; @@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, entry->index = index; - return fill_sched_entry(tb, entry, extack); + return fill_sched_entry(q, tb, entry, extack); } -static int parse_sched_list(struct nlattr *list, +static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, struct sched_gate_list *sched, struct netlink_ext_ack *extack) { @@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list, return -ENOMEM; } - err = parse_sched_entry(n, entry, i, extack); + err = parse_sched_entry(q, n, entry, i, extack); if (err < 0) { kfree(entry); return err; @@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list, return i; } -static int parse_taprio_schedule(struct nlattr **tb, +static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, struct sched_gate_list *new, struct netlink_ext_ack *extack) { @@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb, new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) - err = parse_sched_list( - tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack); + err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], + new, extack); if (err < 0) return err; @@ -1177,9 +1183,27 @@ static void taprio_offload_config_changed(struct taprio_sched *q) spin_unlock(&q->current_entry_lock); } -static void taprio_sched_to_offload(struct taprio_sched *q, +static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) +{ + u32 i, queue_mask = 0; + + for (i = 0; i < dev->num_tc; i++) { + u32 offset, count; + + if (!(tc_mask & BIT(i))) + continue; + + offset = dev->tc_to_txq[i].offset; + count = dev->tc_to_txq[i].count; + + queue_mask |= GENMASK(offset + count - 1, offset); + } + + return queue_mask; +} + +static void taprio_sched_to_offload(struct net_device *dev, struct sched_gate_list *sched, - const struct tc_mqprio_qopt *mqprio, struct tc_taprio_qopt_offload *offload) { struct sched_entry *entry; @@ -1194,7 +1218,8 @@ static void taprio_sched_to_offload(struct taprio_sched *q, e->command = entry->command; e->interval = entry->interval; - e->gate_mask = entry->gate_mask; + e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask); + i++; } @@ -1202,7 +1227,6 @@ static void taprio_sched_to_offload(struct taprio_sched *q, } static int taprio_enable_offload(struct net_device *dev, - struct tc_mqprio_qopt *mqprio, struct taprio_sched *q, struct sched_gate_list *sched, struct netlink_ext_ack *extack) @@ -1224,7 +1248,7 @@ static int taprio_enable_offload(struct net_device *dev, return -ENOMEM; } offload->enable = 1; - taprio_sched_to_offload(q, sched, mqprio, offload); + taprio_sched_to_offload(dev, sched, offload); err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); if (err < 0) { @@ -1456,7 +1480,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, goto free_sched; } - err = parse_taprio_schedule(tb, new_admin, extack); + err = parse_taprio_schedule(q, tb, new_admin, extack); if (err < 0) goto free_sched; @@ -1486,7 +1510,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, } if (FULL_OFFLOAD_IS_ENABLED(q->flags)) - err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); + err = taprio_enable_offload(dev, q, new_admin, extack); else err = taprio_disable_offload(dev, q, extack); if (err) @@ -1573,6 +1597,21 @@ free_sched: return err; } +static void taprio_reset(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + int i; + + hrtimer_cancel(&q->advance_timer); + if (q->qdiscs) { + for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) + qdisc_reset(q->qdiscs[i]); + } + sch->qstats.backlog = 0; + sch->q.qlen = 0; +} + static void taprio_destroy(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); @@ -1583,12 +1622,11 @@ static void taprio_destroy(struct Qdisc *sch) list_del(&q->taprio_list); spin_unlock(&taprio_list_lock); - hrtimer_cancel(&q->advance_timer); taprio_disable_offload(dev, q, NULL); if (q->qdiscs) { - for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) + for (i = 0; i < dev->num_tx_queues; i++) qdisc_put(q->qdiscs[i]); kfree(q->qdiscs); @@ -1930,6 +1968,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { .init = taprio_init, .change = taprio_change, .destroy = taprio_destroy, + .reset = taprio_reset, .peek = taprio_peek, .dequeue = taprio_dequeue, .enqueue = taprio_enqueue, diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 689ef6f3ded8..6af6b95bdb67 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); struct teql_master *master = dat->m; + if (!master) + return; + prev = master->slaves; if (prev) { do { @@ -239,7 +242,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, char haddr[MAX_ADDR_LEN]; neigh_ha_snapshot(haddr, n, dev); - err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)), + err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)), haddr, NULL, skb->len); if (err < 0) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 41839b85c268..fb6f62264e87 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1569,12 +1569,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, enum sctp_scope scope, gfp_t gfp) { + struct sock *sk = asoc->base.sk; int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ - flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + if (!inet_v6_ipv6only(sk)) + flags |= SCTP_ADDR4_ALLOWED; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 4278764d82b8..1d898ee4018c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -494,6 +494,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) out_err: /* Clean up any successful allocations */ sctp_auth_destroy_hmacs(ep->auth_hmacs); + ep->auth_hmacs = NULL; return -ENOMEM; } diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 53bc61537f44..701c5a4e441d 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, * well as the remote peer. */ if ((((AF_INET == addr->sa.sa_family) && + (flags & SCTP_ADDR4_ALLOWED) && (flags & SCTP_ADDR4_PEERSUPP))) || (((AF_INET6 == addr->sa.sa_family) && (flags & SCTP_ADDR6_ALLOWED) && diff --git a/net/sctp/diag.c b/net/sctp/diag.c index ba9f64fdfd23..0eba74b94e6f 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -428,11 +428,12 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo); } -static int sctp_diag_dump_one(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, +static int sctp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { + struct sk_buff *in_skb = cb->skb; struct net *net = sock_net(in_skb->sk); + const struct nlmsghdr *nlh = cb->nlh; union sctp_addr laddr, paddr; struct sctp_comm_param commp = { .skb = in_skb, @@ -466,7 +467,7 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb, } static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, struct nlattr *bc) + const struct inet_diag_req_v2 *r) { u32 idiag_states = r->idiag_states; struct net *net = sock_net(skb->sk); diff --git a/net/sctp/input.c b/net/sctp/input.c index 4d2bcfc9d7f8..7807754f69c5 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -449,7 +449,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk, else { if (!mod_timer(&t->proto_unreach_timer, jiffies + (HZ/20))) - sctp_association_hold(asoc); + sctp_transport_hold(t); } } else { struct net *net = sock_net(sk); @@ -458,7 +458,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk, "encountered!\n", __func__); if (del_timer(&t->proto_unreach_timer)) - sctp_association_put(asoc); + sctp_transport_put(t); sctp_do_sm(net, SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index bc734cfaa29e..52c92b8d827f 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -228,7 +228,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct dst_entry *dst = NULL; - struct flowi6 *fl6 = &fl->u.ip6; + struct flowi _fl; + struct flowi6 *fl6 = &_fl.u.ip6; struct sctp_bind_addr *bp; struct ipv6_pinfo *np = inet6_sk(sk); struct sctp_sockaddr_entry *laddr; @@ -238,7 +239,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, enum sctp_scope scope; __u8 matchlen = 0; - memset(fl6, 0, sizeof(struct flowi6)); + memset(&_fl, 0, sizeof(_fl)); fl6->daddr = daddr->v6.sin6_addr; fl6->fl6_dport = daddr->v6.sin6_port; fl6->flowi6_proto = IPPROTO_SCTP; @@ -276,8 +277,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, rcu_read_unlock(); dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); - if (!asoc || saddr) + if (!asoc || saddr) { + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); goto out; + } bp = &asoc->base.bind_addr; scope = sctp_scope(daddr); @@ -300,6 +304,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if ((laddr->a.sa.sa_family == AF_INET6) && (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { rcu_read_unlock(); + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); goto out; } } @@ -338,6 +344,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (!IS_ERR_OR_NULL(dst)) dst_release(dst); dst = bdst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); break; } @@ -351,6 +359,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, dst_release(dst); dst = bdst; matchlen = bmatchlen; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); } rcu_read_unlock(); @@ -359,14 +369,12 @@ out: struct rt6_info *rt; rt = (struct rt6_info *)dst; - t->dst = dst; t->dst_cookie = rt6_get_cookie(rt); pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n", &rt->rt6i_dst.addr, rt->rt6i_dst.plen, - &fl6->saddr); + &fl->u.ip6.saddr); } else { t->dst = NULL; - pr_debug("no route\n"); } } @@ -635,8 +643,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) if (!(type & IPV6_ADDR_UNICAST)) return 0; - return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind || - ipv6_chk_addr(net, in6, NULL, 0); + return ipv6_can_nonlocal_bind(net, &sp->inet) || + ipv6_chk_addr(net, in6, NULL, 0); } /* This function checks if the address is a valid address to be used for @@ -925,8 +933,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) net = sock_net(&opt->inet.sk); rcu_read_lock(); dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); - if (!dev || !(opt->inet.freebind || - net->ipv6.sysctl.ip_nonlocal_bind || + if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) || ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0))) { rcu_read_unlock(); diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 0dab62b67b9a..adceb226ffab 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -36,6 +36,7 @@ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> +#include <trace/events/sctp.h> /* Declare internal functions here. */ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); @@ -1238,6 +1239,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) /* Grab the association's destination address list. */ transport_list = &asoc->peer.transport_addr_list; + /* SCTP path tracepoint for congestion control debugging. */ + list_for_each_entry(transport, transport_list, transports) { + trace_sctp_probe_path(transport, asoc); + } + sack_ctsn = ntohl(sack->cum_tsn_ack); gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); asoc->stats.gapcnt += gap_ack_blocks; diff --git a/net/sctp/proc.c b/net/sctp/proc.c index f7da88ae20a5..982a87b3e11f 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -215,6 +215,12 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) { struct sctp_ht_iter *iter = seq->private; + if (v && v != SEQ_START_TOKEN) { + struct sctp_transport *transport = v; + + sctp_transport_put(transport); + } + sctp_transport_walk_stop(&iter->hti); } @@ -222,6 +228,12 @@ static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sctp_ht_iter *iter = seq->private; + if (v && v != SEQ_START_TOKEN) { + struct sctp_transport *transport = v; + + sctp_transport_put(transport); + } + ++*pos; return sctp_transport_get_next(seq_file_net(seq), &iter->hti); @@ -277,8 +289,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) sk->sk_rcvbuf); seq_printf(seq, "\n"); - sctp_transport_put(transport); - return 0; } @@ -354,8 +364,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "\n"); } - sctp_transport_put(transport); - return 0; } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 681ffb3545db..981c7cbca46a 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, * sock as well as the remote peer. */ if (addr->a.sa.sa_family == AF_INET && - !(copy_flags & SCTP_ADDR4_PEERSUPP)) + (!(copy_flags & SCTP_ADDR4_ALLOWED) || + !(copy_flags & SCTP_ADDR4_PEERSUPP))) continue; if (addr->a.sa.sa_family == AF_INET6 && (!(copy_flags & SCTP_ADDR6_ALLOWED) || @@ -409,7 +410,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct rtable *rt; - struct flowi4 *fl4 = &fl->u.ip4; + struct flowi _fl; + struct flowi4 *fl4 = &_fl.u.ip4; struct sctp_bind_addr *bp; struct sctp_sockaddr_entry *laddr; struct dst_entry *dst = NULL; @@ -419,7 +421,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (t->dscp & SCTP_DSCP_SET_MASK) tos = t->dscp & SCTP_DSCP_VAL_MASK; - memset(fl4, 0x0, sizeof(struct flowi4)); + memset(&_fl, 0x0, sizeof(_fl)); fl4->daddr = daddr->v4.sin_addr.s_addr; fl4->fl4_dport = daddr->v4.sin_port; fl4->flowi4_proto = IPPROTO_SCTP; @@ -438,8 +440,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, &fl4->saddr); rt = ip_route_output_key(sock_net(sk), fl4); - if (!IS_ERR(rt)) + if (!IS_ERR(rt)) { dst = &rt->dst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); + } /* If there is no association or if a source address is passed, no * more validation is required. @@ -502,27 +507,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, false); if (!odev || odev->ifindex != fl4->flowi4_oif) { - if (!dst) + if (!dst) { dst = &rt->dst; - else + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); + } else { dst_release(&rt->dst); + } continue; } dst_release(dst); dst = &rt->dst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); break; } out_unlock: rcu_read_unlock(); out: - t->dst = dst; - if (dst) + if (dst) { pr_debug("rt_dst:%pI4, rt_src:%pI4\n", - &fl4->daddr, &fl4->saddr); - else + &fl->u.ip4.daddr, &fl->u.ip4.saddr); + } else { + t->dst = NULL; pr_debug("no route\n"); + } } /* For v4, the source address is cached in the route entry(dst). So no need diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 48d63956a68c..d5eda966a706 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -858,7 +858,11 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, struct sctp_chunk *retval; __u32 ctsn; - ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + if (chunk && chunk->asoc) + ctsn = sctp_tsnmap_get_ctsn(&chunk->asoc->peer.tsn_map); + else + ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b06cae508158..0d225f891b61 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -419,7 +419,7 @@ void sctp_generate_proto_unreach_event(struct timer_list *t) /* Try again later. */ if (!mod_timer(&transport->proto_unreach_timer, jiffies + (HZ/20))) - sctp_association_hold(asoc); + sctp_transport_hold(transport); goto out_unlock; } @@ -435,7 +435,7 @@ void sctp_generate_proto_unreach_event(struct timer_list *t) out_unlock: bh_unlock_sock(sk); - sctp_association_put(asoc); + sctp_transport_put(transport); } /* Handle the timeout of the RE-CONFIG timer. */ @@ -1522,9 +1522,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, timeout = asoc->timeouts[cmd->obj.to]; BUG_ON(!timeout); - timer->expires = jiffies + timeout; - sctp_association_hold(asoc); - add_timer(timer); + /* + * SCTP has a hard time with timer starts. Because we process + * timer starts as side effects, it can be hard to tell if we + * have already started a timer or not, which leads to BUG + * halts when we call add_timer. So here, instead of just starting + * a timer, if the timer is already started, and just mod + * the timer with the shorter of the two expiration times + */ + if (!timer_pending(timer)) + sctp_association_hold(asoc); + timer_reduce(timer, jiffies + timeout); break; case SCTP_CMD_TIMER_RESTART: @@ -1592,12 +1600,12 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, break; case SCTP_CMD_INIT_FAILED: - sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); + sctp_cmd_init_failed(commands, asoc, cmd->obj.u16); break; case SCTP_CMD_ASSOC_FAILED: sctp_cmd_assoc_failed(commands, asoc, event_type, - subtype, chunk, cmd->obj.u32); + subtype, chunk, cmd->obj.u16); break; case SCTP_CMD_INIT_COUNTER_INC: diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index c6d83a64eac3..84138a07e936 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1856,16 +1856,17 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); - if (sctp_state(asoc, SHUTDOWN_PENDING) && + if ((sctp_state(asoc, SHUTDOWN_PENDING) || + sctp_state(asoc, SHUTDOWN_SENT)) && (sctp_sstate(asoc->base.sk, CLOSING) || sock_flag(asoc->base.sk, SOCK_DEAD))) { - /* if were currently in SHUTDOWN_PENDING, but the socket - * has been closed by user, don't transition to ESTABLISHED. - * Instead trigger SHUTDOWN bundled with COOKIE_ACK. + /* If the socket has been closed by user, don't + * transition to ESTABLISHED. Instead trigger SHUTDOWN + * bundled with COOKIE_ACK. */ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, - SCTP_ST_CHUNK(0), NULL, + SCTP_ST_CHUNK(0), repl, commands); } else { sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, @@ -5470,7 +5471,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown( * in the Cumulative TSN Ack field the last sequential TSN it * has received from the peer. */ - reply = sctp_make_shutdown(asoc, NULL); + reply = sctp_make_shutdown(asoc, arg); if (!reply) goto nomem; @@ -6068,7 +6069,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire( disposition = SCTP_DISPOSITION_CONSUME; if (sctp_outq_is_empty(&asoc->outqueue)) { disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, - arg, commands); + NULL, commands); } return disposition; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ffd3262b7a41..2146372adff4 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -147,29 +147,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk) skb_orphan(chunk->skb); } +#define traverse_and_process() \ +do { \ + msg = chunk->msg; \ + if (msg == prev_msg) \ + continue; \ + list_for_each_entry(c, &msg->chunks, frag_list) { \ + if ((clear && asoc->base.sk == c->skb->sk) || \ + (!clear && asoc->base.sk != c->skb->sk)) \ + cb(c); \ + } \ + prev_msg = msg; \ +} while (0) + static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, + bool clear, void (*cb)(struct sctp_chunk *)) { + struct sctp_datamsg *msg, *prev_msg = NULL; struct sctp_outq *q = &asoc->outqueue; + struct sctp_chunk *chunk, *c; struct sctp_transport *t; - struct sctp_chunk *chunk; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) list_for_each_entry(chunk, &t->transmitted, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->retransmit, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->sacked, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->abandoned, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->out_chunk_list, list) - cb(chunk); + traverse_and_process(); } static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk, @@ -342,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, return af; } +static void sctp_auto_asconf_init(struct sctp_sock *sp) +{ + struct net *net = sock_net(&sp->inet.sk); + + if (net->sctp.default_auto_asconf) { + spin_lock(&net->sctp.addr_wq_lock); + list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); + spin_unlock(&net->sctp.addr_wq_lock); + sp->do_auto_asconf = 1; + } +} + /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { @@ -403,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) return -EADDRINUSE; /* Refresh ephemeral port. */ - if (!bp->port) + if (!bp->port) { bp->port = inet_sk(sk)->inet_num; + sctp_auto_asconf_init(sp); + } /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. @@ -1304,7 +1333,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk, kaddrs = memdup_user(addrs, addrs_size); if (IS_ERR(kaddrs)) - return PTR_ERR(kaddrs); + return PTR_ERR(kaddrs) == -EFAULT ? -EINVAL : PTR_ERR(kaddrs); /* Allow security module to validate connectx addresses. */ err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX, @@ -5100,19 +5129,6 @@ static int sctp_init_sock(struct sock *sk) sk_sockets_allocated_inc(sk); sock_prot_inuse_add(net, sk->sk_prot, 1); - /* Nothing can fail after this block, otherwise - * sctp_destroy_sock() will be called without addr_wq_lock held - */ - if (net->sctp.default_auto_asconf) { - spin_lock(&sock_net(sk)->sctp.addr_wq_lock); - list_add_tail(&sp->auto_asconf_list, - &net->sctp.auto_asconf_splist); - sp->do_auto_asconf = 1; - spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); - } else { - sp->do_auto_asconf = 0; - } - local_bh_enable(); return 0; @@ -8161,8 +8177,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) pr_debug("%s: begins, snum:%d\n", __func__, snum); - local_bh_disable(); - if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; @@ -8181,20 +8195,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: - spin_unlock(&head->lock); + spin_unlock_bh(&head->lock); + cond_resched(); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) - goto fail; + return ret; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's @@ -8209,7 +8224,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; @@ -8309,10 +8324,7 @@ success: ret = 0; fail_unlock: - spin_unlock(&head->lock); - -fail: - local_bh_enable(); + spin_unlock_bh(&head->lock); return ret; } @@ -9326,13 +9338,10 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, static inline void sctp_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { - int ancestor_size = sizeof(struct inet_sock) + - sizeof(struct sctp_sock) - - offsetof(struct sctp_sock, pd_lobby); - - if (sk_from->sk_family == PF_INET6) - ancestor_size += sizeof(struct ipv6_pinfo); + size_t ancestor_size = sizeof(struct inet_sock); + ancestor_size += sk_from->sk_prot->obj_size; + ancestor_size -= offsetof(struct sctp_sock, pd_lobby); __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } @@ -9394,6 +9403,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, return err; } + sctp_auto_asconf_init(newsp); + /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ @@ -9461,9 +9472,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); - sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); + sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w); sctp_assoc_migrate(assoc, newsk); - sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); + sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. diff --git a/net/sctp/stream.c b/net/sctp/stream.c index c1a100d2fed3..cd20638b6151 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -22,17 +22,11 @@ #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> -/* Migrates chunks from stream queues to new stream queues if needed, - * but not across associations. Also, removes those chunks to streams - * higher than the new max. - */ -static void sctp_stream_outq_migrate(struct sctp_stream *stream, - struct sctp_stream *new, __u16 outcnt) +static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) { struct sctp_association *asoc; struct sctp_chunk *ch, *temp; struct sctp_outq *outq; - int i; asoc = container_of(stream, struct sctp_association, stream); outq = &asoc->outqueue; @@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, sctp_chunk_free(ch); } +} + +/* Migrates chunks from stream queues to new stream queues if needed, + * but not across associations. Also, removes those chunks to streams + * higher than the new max. + */ +static void sctp_stream_outq_migrate(struct sctp_stream *stream, + struct sctp_stream *new, __u16 outcnt) +{ + int i; + + if (stream->outcnt > outcnt) + sctp_stream_shrink_out(stream, outcnt); if (new) { /* Here we actually move the old ext stuff into the new @@ -81,12 +88,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, int ret; if (outcnt <= stream->outcnt) - return 0; + goto out; ret = genradix_prealloc(&stream->out, outcnt, gfp); if (ret) return ret; +out: stream->outcnt = outcnt; return 0; } @@ -97,12 +105,13 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt, int ret; if (incnt <= stream->incnt) - return 0; + goto out; ret = genradix_prealloc(&stream->in, incnt, gfp); if (ret) return ret; +out: stream->incnt = incnt; return 0; } @@ -1038,11 +1047,13 @@ struct sctp_chunk *sctp_process_strreset_resp( nums = ntohs(addstrm->number_of_streams); number = stream->outcnt - nums; - if (result == SCTP_STRRESET_PERFORMED) + if (result == SCTP_STRRESET_PERFORMED) { for (i = number; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; - else + } else { + sctp_stream_shrink_out(stream, number); stream->outcnt = number; + } *evp = sctp_ulpevent_make_stream_change_event(asoc, flags, 0, nums, GFP_ATOMIC); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3bbe1a58ec87..a3dc5037c1eb 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -133,7 +133,7 @@ void sctp_transport_free(struct sctp_transport *transport) /* Delete the ICMP proto unreachable timer if it's active. */ if (del_timer(&transport->proto_unreach_timer)) - sctp_association_put(transport->asoc); + sctp_transport_put(transport); sctp_transport_put(transport); } diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 0c5fcb8ed404..aeea67f90841 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -795,7 +795,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, return buf_desc; } -#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ +#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, bool is_dmb, int bufsize) diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index e1f64f4ba236..da9ba6d1679b 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -170,13 +170,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_connection *conn = &smc->conn; - struct smcd_diag_dmbinfo dinfo = { - .linkid = *((u32 *)conn->lgr->id), - .peer_gid = conn->lgr->peer_gid, - .my_gid = conn->lgr->smcd->local_gid, - .token = conn->rmb_desc->token, - .peer_token = conn->peer_token - }; + struct smcd_diag_dmbinfo dinfo; + + memset(&dinfo, 0, sizeof(dinfo)); + + dinfo.linkid = *((u32 *)conn->lgr->id); + dinfo.peer_gid = conn->lgr->peer_gid; + dinfo.my_gid = conn->lgr->smcd->local_gid; + dinfo.token = conn->rmb_desc->token; + dinfo.peer_token = conn->peer_token; if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) goto errout; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index d74a71dff5b8..a2bc7e096ef8 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -191,9 +191,9 @@ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(attr); if (!IS_ERR(ndev) && - ((!vlan_id && !is_vlan_dev(attr->ndev)) || - (vlan_id && is_vlan_dev(attr->ndev) && - vlan_dev_vlan_id(attr->ndev) == vlan_id)) && + ((!vlan_id && !is_vlan_dev(ndev)) || + (vlan_id && is_vlan_dev(ndev) && + vlan_dev_vlan_id(ndev) == vlan_id)) && attr->gid_type == IB_GID_TYPE_ROCE) { rcu_read_unlock(); if (gid) diff --git a/net/socket.c b/net/socket.c index f5784503b865..3a95b0a377b0 100644 --- a/net/socket.c +++ b/net/socket.c @@ -538,7 +538,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) if (f.file) { sock = sock_from_file(f.file, err); if (likely(sock)) { - *fput_needed = f.flags; + *fput_needed = f.flags & FDPUT_FPUT; return sock; } fdput(f); diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index d024af4be85e..0d4a2bb09589 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -82,11 +82,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap, rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id); - if (unlikely((size_t)rc > sizeof(scopebuf))) + if (unlikely((size_t)rc >= sizeof(scopebuf))) return 0; len += rc; - if (unlikely(len > buflen)) + if (unlikely(len >= buflen)) return 0; strcat(buf, scopebuf); @@ -185,7 +185,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf, scope_id = dev->ifindex; dev_put(dev); } else { - if (kstrtou32(p, 10, &scope_id) == 0) { + if (kstrtou32(p, 10, &scope_id) != 0) { kfree(p); return 0; } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index d75fddca44c9..b7a71578bd98 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -20,6 +20,7 @@ #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/auth_gss.h> +#include <linux/sunrpc/gss_krb5.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/gss_err.h> #include <linux/workqueue.h> @@ -28,6 +29,7 @@ #include <linux/uaccess.h> #include <linux/hashtable.h> +#include "auth_gss_internal.h" #include "../netns.h" #include <trace/events/rpcgss.h> @@ -124,35 +126,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); } -static const void * -simple_get_bytes(const void *p, const void *end, void *res, size_t len) -{ - const void *q = (const void *)((const char *)p + len); - if (unlikely(q > end || q < p)) - return ERR_PTR(-EFAULT); - memcpy(res, p, len); - return q; -} - -static inline const void * -simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) -{ - const void *q; - unsigned int len; - - p = simple_get_bytes(p, end, &len, sizeof(len)); - if (IS_ERR(p)) - return p; - q = (const void *)((const char *)p + len); - if (unlikely(q > end || q < p)) - return ERR_PTR(-EFAULT); - dest->data = kmemdup(p, len, GFP_NOFS); - if (unlikely(dest->data == NULL)) - return ERR_PTR(-ENOMEM); - dest->len = len; - return q; -} - static struct gss_cl_ctx * gss_cred_get_ctx(struct rpc_cred *cred) { @@ -1050,7 +1023,7 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt) goto err_put_mech; auth = &gss_auth->rpc_auth; auth->au_cslack = GSS_CRED_SLACK >> 2; - auth->au_rslack = GSS_VERF_SLACK >> 2; + auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2; auth->au_verfsize = GSS_VERF_SLACK >> 2; auth->au_ralign = GSS_VERF_SLACK >> 2; auth->au_flags = 0; @@ -1934,35 +1907,69 @@ gss_unwrap_resp_auth(struct rpc_cred *cred) return 0; } +/* + * RFC 2203, Section 5.3.2.2 + * + * struct rpc_gss_integ_data { + * opaque databody_integ<>; + * opaque checksum<>; + * }; + * + * struct rpc_gss_data_t { + * unsigned int seq_num; + * proc_req_arg_t arg; + * }; + */ static int gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp, struct xdr_stream *xdr) { - struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf; - u32 data_offset, mic_offset, integ_len, maj_stat; + struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf; struct rpc_auth *auth = cred->cr_auth; + u32 len, offset, seqno, maj_stat; struct xdr_netobj mic; - __be32 *p; + int ret; - p = xdr_inline_decode(xdr, 2 * sizeof(*p)); - if (unlikely(!p)) + ret = -EIO; + mic.data = NULL; + + /* opaque databody_integ<>; */ + if (xdr_stream_decode_u32(xdr, &len)) goto unwrap_failed; - integ_len = be32_to_cpup(p++); - if (integ_len & 3) + if (len & 3) goto unwrap_failed; - data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base; - mic_offset = integ_len + data_offset; - if (mic_offset > rcv_buf->len) + offset = rcv_buf->len - xdr_stream_remaining(xdr); + if (xdr_stream_decode_u32(xdr, &seqno)) goto unwrap_failed; - if (be32_to_cpup(p) != rqstp->rq_seqno) + if (seqno != rqstp->rq_seqno) goto bad_seqno; + if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len)) + goto unwrap_failed; - if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len)) + /* + * The xdr_stream now points to the beginning of the + * upper layer payload, to be passed below to + * rpcauth_unwrap_resp_decode(). The checksum, which + * follows the upper layer payload in @rcv_buf, is + * located and parsed without updating the xdr_stream. + */ + + /* opaque checksum<>; */ + offset += len; + if (xdr_decode_word(rcv_buf, offset, &len)) goto unwrap_failed; - if (xdr_buf_read_mic(rcv_buf, &mic, mic_offset)) + offset += sizeof(__be32); + if (offset + len > rcv_buf->len) goto unwrap_failed; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); + mic.len = len; + mic.data = kmalloc(len, GFP_NOFS); + if (!mic.data) + goto unwrap_failed; + if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len)) + goto unwrap_failed; + + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat != GSS_S_COMPLETE) @@ -1970,16 +1977,21 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred, auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len); auth->au_ralign = auth->au_verfsize + 2; - return 0; + ret = 0; + +out: + kfree(mic.data); + return ret; + unwrap_failed: trace_rpcgss_unwrap_failed(task); - return -EIO; + goto out; bad_seqno: - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p)); - return -EIO; + trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno); + goto out; bad_mic: trace_rpcgss_verify_mic(task, maj_stat); - return -EIO; + goto out; } static int @@ -1990,7 +2002,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; struct kvec *head = rqstp->rq_rcv_buf.head; struct rpc_auth *auth = cred->cr_auth; - unsigned int savedlen = rcv_buf->len; u32 offset, opaque_len, maj_stat; __be32 *p; @@ -2001,9 +2012,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, offset = (u8 *)(p) - (u8 *)head->iov_base; if (offset + opaque_len > rcv_buf->len) goto unwrap_failed; - rcv_buf->len = offset + opaque_len; - maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); + maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, + offset + opaque_len, rcv_buf); if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat != GSS_S_COMPLETE) @@ -2017,10 +2028,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred, */ xdr_init_decode(xdr, rcv_buf, p, rqstp); - auth->au_rslack = auth->au_verfsize + 2 + - XDR_QUADLEN(savedlen - rcv_buf->len); - auth->au_ralign = auth->au_verfsize + 2 + - XDR_QUADLEN(savedlen - rcv_buf->len); + auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack; + auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align; + return 0; unwrap_failed: trace_rpcgss_unwrap_failed(task); diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h new file mode 100644 index 000000000000..f6d9631bd9d0 --- /dev/null +++ b/net/sunrpc/auth_gss/auth_gss_internal.h @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * linux/net/sunrpc/auth_gss/auth_gss_internal.h + * + * Internal definitions for RPCSEC_GSS client authentication + * + * Copyright (c) 2000 The Regents of the University of Michigan. + * All rights reserved. + * + */ +#include <linux/err.h> +#include <linux/string.h> +#include <linux/sunrpc/xdr.h> + +static inline const void * +simple_get_bytes(const void *p, const void *end, void *res, size_t len) +{ + const void *q = (const void *)((const char *)p + len); + if (unlikely(q > end || q < p)) + return ERR_PTR(-EFAULT); + memcpy(res, p, len); + return q; +} + +static inline const void * +simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +{ + const void *q; + unsigned int len; + + p = simple_get_bytes(p, end, &len, sizeof(len)); + if (IS_ERR(p)) + return p; + q = (const void *)((const char *)p + len); + if (unlikely(q > end || q < p)) + return ERR_PTR(-EFAULT); + if (len) { + dest->data = kmemdup(p, len, GFP_NOFS); + if (unlikely(dest->data == NULL)) + return ERR_PTR(-ENOMEM); + } else + dest->data = NULL; + dest->len = len; + return q; +} diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 6f2d30d7b766..e7180da1fc6a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -851,8 +851,8 @@ out_err: } u32 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, - u32 *headskip, u32 *tailskip) +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, + struct xdr_buf *buf, u32 *headskip, u32 *tailskip) { struct xdr_buf subbuf; u32 ret = 0; @@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, /* create a segment skipping the header and leaving out the checksum */ xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, - (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - + (len - offset - GSS_KRB5_TOK_HDR_LEN - kctx->gk5e->cksumlength)); nblocks = (subbuf.len + blocksize - 1) / blocksize; @@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, goto out_err; /* Get the packet's hmac value */ - ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, + ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength, pkt_hmac, kctx->gk5e->cksumlength); if (ret) goto out_err; diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 6e5d6d240215..b552dd4f32f8 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -21,6 +21,8 @@ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/gss_krb5_enctypes.h> +#include "auth_gss_internal.h" + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_AUTH #endif @@ -164,35 +166,6 @@ get_gss_krb5_enctype(int etype) return NULL; } -static const void * -simple_get_bytes(const void *p, const void *end, void *res, int len) -{ - const void *q = (const void *)((const char *)p + len); - if (unlikely(q > end || q < p)) - return ERR_PTR(-EFAULT); - memcpy(res, p, len); - return q; -} - -static const void * -simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) -{ - const void *q; - unsigned int len; - - p = simple_get_bytes(p, end, &len, sizeof(len)); - if (IS_ERR(p)) - return p; - q = (const void *)((const char *)p + len); - if (unlikely(q > end || q < p)) - return ERR_PTR(-EFAULT); - res->data = kmemdup(p, len, GFP_NOFS); - if (unlikely(res->data == NULL)) - return ERR_PTR(-ENOMEM); - res->len = len; - return q; -} - static inline const void * get_key(const void *p, const void *end, struct krb5_ctx *ctx, struct crypto_sync_skcipher **res) diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 14a0aff0cd84..78ad41656996 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -261,7 +261,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, } static u32 -gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) +gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len, + struct xdr_buf *buf, unsigned int *slack, + unsigned int *align) { int signalg; int sealalg; @@ -279,12 +281,13 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) u32 conflen = kctx->gk5e->conflen; int crypt_offset; u8 *cksumkey; + unsigned int saved_len = buf->len; dprintk("RPC: gss_unwrap_kerberos\n"); ptr = (u8 *)buf->head[0].iov_base + offset; if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, - buf->len - offset)) + len - offset)) return GSS_S_DEFECTIVE_TOKEN; if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || @@ -324,6 +327,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) (!kctx->initiate && direction != 0)) return GSS_S_BAD_SIG; + buf->len = len; if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { struct crypto_sync_skcipher *cipher; int err; @@ -376,11 +380,15 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; memmove(orig_start, data_start, data_len); buf->head[0].iov_len -= (data_start - orig_start); - buf->len -= (data_start - orig_start); + buf->len = len - (data_start - orig_start); if (gss_krb5_remove_padding(buf, blocksize)) return GSS_S_DEFECTIVE_TOKEN; + /* slack must include room for krb5 padding */ + *slack = XDR_QUADLEN(saved_len - buf->len); + /* The GSS blob always precedes the RPC message payload */ + *align = *slack; return GSS_S_COMPLETE; } @@ -486,7 +494,9 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, } static u32 -gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) +gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len, + struct xdr_buf *buf, unsigned int *slack, + unsigned int *align) { s32 now; u8 *ptr; @@ -532,7 +542,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) if (rrc != 0) rotate_left(offset + 16, buf, rrc); - err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, + err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf, &headskip, &tailskip); if (err) return GSS_S_FAILURE; @@ -542,7 +552,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) * it against the original */ err = read_bytes_from_xdr_buf(buf, - buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, + len - GSS_KRB5_TOK_HDR_LEN - tailskip, decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); if (err) { dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); @@ -568,18 +578,19 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) * Note that buf->head[0].iov_len may indicate the available * head buffer space rather than that actually occupied. */ - movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); + movelen = min_t(unsigned int, buf->head[0].iov_len, len); movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; - if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > - buf->head[0].iov_len) - return GSS_S_FAILURE; + BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > + buf->head[0].iov_len); memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; - buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; + buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip); /* Trim off the trailing "extra count" and checksum blob */ - buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip; + xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip); + *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip); + *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip); return GSS_S_COMPLETE; } @@ -603,7 +614,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, } u32 -gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) +gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, + int len, struct xdr_buf *buf) { struct krb5_ctx *kctx = gctx->internal_ctx_id; @@ -613,9 +625,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: - return gss_unwrap_kerberos_v1(kctx, offset, buf); + return gss_unwrap_kerberos_v1(kctx, offset, len, buf, + &gctx->slack, &gctx->align); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: - return gss_unwrap_kerberos_v2(kctx, offset, buf); + return gss_unwrap_kerberos_v2(kctx, offset, len, buf, + &gctx->slack, &gctx->align); } } diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 82060099a429..9314999bf095 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -36,6 +36,8 @@ gss_mech_free(struct gss_api_mech *gm) for (i = 0; i < gm->gm_pf_num; i++) { pf = &gm->gm_pfs[i]; + if (pf->domain) + auth_domain_put(pf->domain); kfree(pf->auth_domain_name); pf->auth_domain_name = NULL; } @@ -58,6 +60,7 @@ make_auth_domain_name(char *name) static int gss_mech_svc_setup(struct gss_api_mech *gm) { + struct auth_domain *dom; struct pf_desc *pf; int i, status; @@ -67,10 +70,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) status = -ENOMEM; if (pf->auth_domain_name == NULL) goto out; - status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, - pf->auth_domain_name); - if (status) + dom = svcauth_gss_register_pseudoflavor( + pf->pseudoflavor, pf->auth_domain_name); + if (IS_ERR(dom)) { + status = PTR_ERR(dom); goto out; + } + pf->domain = dom; } return 0; out: @@ -438,10 +444,11 @@ gss_wrap(struct gss_ctx *ctx_id, u32 gss_unwrap(struct gss_ctx *ctx_id, int offset, + int len, struct xdr_buf *buf) { return ctx_id->mech_type->gm_ops - ->gss_unwrap(ctx_id, offset, buf); + ->gss_unwrap(ctx_id, offset, len, buf); } diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index ed20fa8a6f70..d5470c7fe879 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -800,7 +800,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) EXPORT_SYMBOL_GPL(svcauth_gss_flavor); -int +struct auth_domain * svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) { struct gss_domain *new; @@ -817,21 +817,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) new->h.flavour = &svcauthops_gss; new->pseudoflavor = pseudoflavor; - stat = 0; test = auth_domain_lookup(name, &new->h); - if (test != &new->h) { /* Duplicate registration */ + if (test != &new->h) { + pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", + name); + stat = -EADDRINUSE; auth_domain_put(test); - kfree(new->h.name); - goto out_free_dom; + goto out_free_name; } - return 0; + return test; +out_free_name: + kfree(new->h.name); out_free_dom: kfree(new); out: - return stat; + return ERR_PTR(stat); } - EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); static inline int @@ -897,7 +899,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g if (svc_getnl(&buf->head[0]) != seq) goto out; /* trim off the mic and padding at the end before returning */ - buf->len -= 4 + round_up_to_quad(mic.len); + xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4); stat = 0; out: kfree(mic.data); @@ -925,7 +927,7 @@ static int unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) { u32 priv_len, maj_stat; - int pad, saved_len, remaining_len, offset; + int pad, remaining_len, offset; clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); @@ -945,13 +947,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs buf->len -= pad; fix_priv_head(buf, pad); - /* Maybe it would be better to give gss_unwrap a length parameter: */ - saved_len = buf->len; - buf->len = priv_len; - maj_stat = gss_unwrap(ctx, 0, buf); + maj_stat = gss_unwrap(ctx, 0, priv_len, buf); pad = priv_len - buf->len; - buf->len = saved_len; - buf->len -= pad; /* The upper layers assume the buffer is aligned on 4-byte boundaries. * In the krb5p case, at least, the data ends up offset, so we need to * move it around. */ @@ -1098,9 +1095,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, struct gssp_in_token *in_token) { struct kvec *argv = &rqstp->rq_arg.head[0]; - unsigned int page_base, length; - int pages, i, res; - size_t inlen; + unsigned int length, pgto_offs, pgfrom_offs; + int pages, i, res, pgto, pgfrom; + size_t inlen, to_offs, from_offs; res = gss_read_common_verf(gc, argv, authp, in_handle); if (res) @@ -1128,17 +1125,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, memcpy(page_address(in_token->pages[0]), argv->iov_base, length); inlen -= length; - i = 1; - page_base = rqstp->rq_arg.page_base; + to_offs = length; + from_offs = rqstp->rq_arg.page_base; while (inlen) { - length = min_t(unsigned int, inlen, PAGE_SIZE); - memcpy(page_address(in_token->pages[i]), - page_address(rqstp->rq_arg.pages[i]) + page_base, + pgto = to_offs >> PAGE_SHIFT; + pgfrom = from_offs >> PAGE_SHIFT; + pgto_offs = to_offs & ~PAGE_MASK; + pgfrom_offs = from_offs & ~PAGE_MASK; + + length = min_t(unsigned int, inlen, + min_t(unsigned int, PAGE_SIZE - pgto_offs, + PAGE_SIZE - pgfrom_offs)); + memcpy(page_address(in_token->pages[pgto]) + pgto_offs, + page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs, length); + to_offs += length; + from_offs += length; inlen -= length; - page_base = 0; - i++; } return 0; } @@ -1778,11 +1782,14 @@ static int svcauth_gss_release(struct svc_rqst *rqstp) { struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; - struct rpc_gss_wire_cred *gc = &gsd->clcred; + struct rpc_gss_wire_cred *gc; struct xdr_buf *resbuf = &rqstp->rq_res; int stat = -EINVAL; struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); + if (!gsd) + goto out; + gc = &gsd->clcred; if (gc->gc_proc != RPC_GSS_PROC_DATA) goto out; /* Release can be called twice, but we only wrap once. */ @@ -1823,10 +1830,10 @@ out_err: if (rqstp->rq_cred.cr_group_info) put_group_info(rqstp->rq_cred.cr_group_info); rqstp->rq_cred.cr_group_info = NULL; - if (gsd->rsci) + if (gsd && gsd->rsci) { cache_put(&gsd->rsci->h, sn->rsc_cache); - gsd->rsci = NULL; - + gsd->rsci = NULL; + } return stat; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index f7f78566be46..f1088ca39d44 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2422,6 +2422,11 @@ rpc_check_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; + if (RPC_SIGNALLED(task)) { + rpc_call_rpcerror(task, -ERESTARTSYS); + return; + } + if (xprt_adjust_timeout(task->tk_rqstp) == 0) return; diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index fd9bca242724..56029e3af6ff 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c @@ -128,13 +128,13 @@ static int do_xprt_debugfs(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *n return 0; len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", xprt->debugfs->d_name.name); - if (len > sizeof(name)) + if (len >= sizeof(name)) return -1; if (*nump == 0) strcpy(link, "xprt"); else { len = snprintf(link, sizeof(link), "xprt%d", *nump); - if (len > sizeof(link)) + if (len >= sizeof(link)) return -1; } debugfs_create_symlink(link, clnt->cl_debugfs, name); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index b71a39ded930..37792675ed57 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) q.len = strlen(gssd_dummy_clnt_dir[0].name); clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); if (!clnt_dentry) { + __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); pipe_dentry = ERR_PTR(-ENOENT); goto out; } diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 4a020b688860..1db9f62e466d 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -988,8 +988,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, p = xdr_inline_decode(xdr, len); if (unlikely(p == NULL)) goto out_fail; - dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid, - req->rq_task->tk_msg.rpc_proc->p_name, (char *)p); + dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid, + req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p); if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len, sap, sizeof(address)) == 0) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 987c4b1f0b17..4beb6d2957c3 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -204,10 +204,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) { - WARN_ON_ONCE(RPC_IS_QUEUED(task)); - if (RPC_IS_QUEUED(task)) - return; - INIT_LIST_HEAD(&task->u.tk_wait.timer_list); if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task, queue_priority); @@ -382,7 +378,7 @@ static void rpc_make_runnable(struct workqueue_struct *wq, * NB: An RPC task will only receive interrupt-driven events as long * as it's on a wait queue. */ -static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, +static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, unsigned char queue_priority) { @@ -395,12 +391,23 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, } +static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, + struct rpc_task *task, + unsigned char queue_priority) +{ + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) + return; + __rpc_do_sleep_on_priority(q, task, queue_priority); +} + static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, struct rpc_task *task, unsigned long timeout, unsigned char queue_priority) { + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) + return; if (time_is_after_jiffies(timeout)) { - __rpc_sleep_on_priority(q, task, queue_priority); + __rpc_do_sleep_on_priority(q, task, queue_priority); __rpc_add_timer(q, task, timeout); } else task->tk_status = -ETIMEDOUT; @@ -692,6 +699,23 @@ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) } EXPORT_SYMBOL_GPL(rpc_wake_up_next); +/** + * rpc_wake_up_locked - wake up all rpc_tasks + * @queue: rpc_wait_queue on which the tasks are sleeping + * + */ +static void rpc_wake_up_locked(struct rpc_wait_queue *queue) +{ + struct rpc_task *task; + + for (;;) { + task = __rpc_find_next_queued(queue); + if (task == NULL) + break; + rpc_wake_up_task_queue_locked(queue, task); + } +} + /** * rpc_wake_up - wake up all rpc_tasks * @queue: rpc_wait_queue on which the tasks are sleeping @@ -700,26 +724,29 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next); */ void rpc_wake_up(struct rpc_wait_queue *queue) { - struct list_head *head; - spin_lock(&queue->lock); - head = &queue->tasks[queue->maxpriority]; - for (;;) { - while (!list_empty(head)) { - struct rpc_task *task; - task = list_first_entry(head, - struct rpc_task, - u.tk_wait.list); - rpc_wake_up_task_queue_locked(queue, task); - } - if (head == &queue->tasks[0]) - break; - head--; - } + rpc_wake_up_locked(queue); spin_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(rpc_wake_up); +/** + * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. + * @queue: rpc_wait_queue on which the tasks are sleeping + * @status: status value to set + */ +static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) +{ + struct rpc_task *task; + + for (;;) { + task = __rpc_find_next_queued(queue); + if (task == NULL) + break; + rpc_wake_up_task_queue_set_status_locked(queue, task, status); + } +} + /** * rpc_wake_up_status - wake up all rpc_tasks and set their status value. * @queue: rpc_wait_queue on which the tasks are sleeping @@ -729,23 +756,8 @@ EXPORT_SYMBOL_GPL(rpc_wake_up); */ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) { - struct list_head *head; - spin_lock(&queue->lock); - head = &queue->tasks[queue->maxpriority]; - for (;;) { - while (!list_empty(head)) { - struct rpc_task *task; - task = list_first_entry(head, - struct rpc_task, - u.tk_wait.list); - task->tk_status = status; - rpc_wake_up_task_queue_locked(queue, task); - } - if (head == &queue->tasks[0]) - break; - head--; - } + rpc_wake_up_status_locked(queue, status); spin_unlock(&queue->lock); } EXPORT_SYMBOL_GPL(rpc_wake_up_status); @@ -824,6 +836,7 @@ rpc_reset_task_statistics(struct rpc_task *task) */ void rpc_exit_task(struct rpc_task *task) { + trace_rpc_task_end(task, task->tk_action); task->tk_action = NULL; if (task->tk_ops->rpc_count_stats) task->tk_ops->rpc_count_stats(task, task->tk_calldata); @@ -977,8 +990,11 @@ void rpc_execute(struct rpc_task *task) rpc_set_active(task); rpc_make_runnable(rpciod_workqueue, task); - if (!is_async) + if (!is_async) { + unsigned int pflags = memalloc_nofs_save(); __rpc_execute(task); + memalloc_nofs_restore(pflags); + } } static void rpc_async_schedule(struct work_struct *work) diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h index c9bacb3c930f..82035fa65b8f 100644 --- a/net/sunrpc/sunrpc.h +++ b/net/sunrpc/sunrpc.h @@ -56,4 +56,5 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr, int rpc_clients_notifier_register(void); void rpc_clients_notifier_unregister(void); +void auth_domain_cleanup(void); #endif /* _NET_SUNRPC_SUNRPC_H */ diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index f9edaa9174a4..236fadc4a439 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -23,6 +23,7 @@ #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/xprtsock.h> +#include "sunrpc.h" #include "netns.h" unsigned int sunrpc_net_id; @@ -131,6 +132,7 @@ cleanup_sunrpc(void) unregister_rpc_pipefs(); rpc_destroy_mempool(); unregister_pernet_subsys(&sunrpc_net_ops); + auth_domain_cleanup(); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) rpc_unregister_sysctl(); #endif diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index d11b70552c33..1741f114e8ff 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1417,7 +1417,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) sendit: if (svc_authorise(rqstp)) - goto close; + goto close_xprt; return 1; /* Caller can now send it */ release_dropit: @@ -1429,6 +1429,8 @@ release_dropit: return 0; close: + svc_authorise(rqstp); +close_xprt: if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) svc_close_xprt(rqstp->rq_xprt); dprintk("svc: svc_process close\n"); @@ -1437,7 +1439,7 @@ release_dropit: err_short_len: svc_printk(rqstp, "short len %zd, dropping request\n", argv->iov_len); - goto close; + goto close_xprt; err_bad_rpc: serv->sv_stats->rpcbadfmt++; @@ -1634,6 +1636,22 @@ u32 svc_max_payload(const struct svc_rqst *rqstp) } EXPORT_SYMBOL_GPL(svc_max_payload); +/** + * svc_encode_read_payload - mark a range of bytes as a READ payload + * @rqstp: svc_rqst to operate on + * @offset: payload's byte offset in rqstp->rq_res + * @length: size of payload, in bytes + * + * Returns zero on success, or a negative errno if a permanent + * error occurred. + */ +int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length); +} +EXPORT_SYMBOL_GPL(svc_encode_read_payload); + /** * svc_fill_write_vector - Construct data argument for VFS write call * @rqstp: svc_rqst to operate on diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index de3c077733a7..f911153339a9 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -104,8 +104,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) } EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); -/* - * Format the transport list for printing +/** + * svc_print_xprts - Format the transport list for printing + * @buf: target buffer for formatted address + * @maxlen: length of target buffer + * + * Fills in @buf with a string containing a list of transport names, each name + * terminated with '\n'. If the buffer is too small, some entries may be + * missing, but it is guaranteed that all lines in the output buffer are + * complete. + * + * Returns positive length of the filled-in string. */ int svc_print_xprts(char *buf, int maxlen) { @@ -118,9 +127,9 @@ int svc_print_xprts(char *buf, int maxlen) list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { int slen; - sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); - slen = strlen(tmpstr); - if (len + slen > maxlen) + slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", + xcl->xcl_name, xcl->xcl_max_payload); + if (slen >= sizeof(tmpstr) || len + slen >= maxlen) break; len += slen; strcat(buf, tmpstr); @@ -897,9 +906,6 @@ int svc_send(struct svc_rqst *rqstp) if (!xprt) goto out; - /* release the receive skb before sending the reply */ - xprt->xpt_ops->xpo_release_rqst(rqstp); - /* calculate over-all length */ xb = &rqstp->rq_res; xb->len = xb->head[0].iov_len + @@ -1028,6 +1034,8 @@ static void svc_delete_xprt(struct svc_xprt *xprt) dprintk("svc: svc_delete_xprt(%p)\n", xprt); xprt->xpt_ops->xpo_detach(xprt); + if (xprt->xpt_bc_xprt) + xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt); spin_lock_bh(&serv->sv_lock); list_del_init(&xprt->xpt_list); @@ -1064,7 +1072,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st struct svc_xprt *xprt; int ret = 0; - spin_lock(&serv->sv_lock); + spin_lock_bh(&serv->sv_lock); list_for_each_entry(xprt, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; @@ -1072,7 +1080,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); } - spin_unlock(&serv->sv_lock); + spin_unlock_bh(&serv->sv_lock); return ret; } diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 550b214cb001..998b196b6176 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -19,6 +19,10 @@ #include <linux/err.h> #include <linux/hash.h> +#include <trace/events/sunrpc.h> + +#include "sunrpc.h" + #define RPCDBG_FACILITY RPCDBG_AUTH @@ -203,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name) return NULL; } EXPORT_SYMBOL_GPL(auth_domain_find); + +/** + * auth_domain_cleanup - check that the auth_domain table is empty + * + * On module unload the auth_domain_table must be empty. To make it + * easier to catch bugs which don't clean up domains properly, we + * warn if anything remains in the table at cleanup time. + * + * Note that we cannot proactively remove the domains at this stage. + * The ->release() function might be in a module that has already been + * unloaded. + */ + +void auth_domain_cleanup(void) +{ + int h; + struct auth_domain *hp; + + for (h = 0; h < DN_HASHMAX; h++) + hlist_for_each_entry(hp, &auth_domain_table[h], hash) + pr_warn("svc: domain %s still present at module unload.\n", + hp->name); +} diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2934dd711715..d52abde51f1b 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -279,6 +279,12 @@ out: return len; } +static int svc_sock_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + return 0; +} + /* * Report socket names for nfsdfs */ @@ -605,6 +611,8 @@ svc_udp_sendto(struct svc_rqst *rqstp) { int error; + svc_release_udp_skb(rqstp); + error = svc_sendto(rqstp, &rqstp->rq_res); if (error == -ECONNREFUSED) /* ICMP error on earlier request. */ @@ -653,6 +661,7 @@ static const struct svc_xprt_ops svc_udp_ops = { .xpo_create = svc_udp_create, .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, + .xpo_read_payload = svc_sock_read_payload, .xpo_release_rqst = svc_release_udp_skb, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, @@ -1137,6 +1146,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) int sent; __be32 reclen; + svc_release_skb(rqstp); + /* Set up the first element of the reply kvec. * Any other kvecs that may be in use have been taken * care of by the server implementation itself. @@ -1171,6 +1182,7 @@ static const struct svc_xprt_ops svc_tcp_ops = { .xpo_create = svc_tcp_create, .xpo_recvfrom = svc_tcp_recvfrom, .xpo_sendto = svc_tcp_sendto, + .xpo_read_payload = svc_sock_read_payload, .xpo_release_rqst = svc_release_skb, .xpo_detach = svc_tcp_sock_detach, .xpo_free = svc_sock_free, diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index f3104be8ff5d..7ef37054071f 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->head[0].iov_len; + subbuf->head[0].iov_base = buf->head[0].iov_base; subbuf->head[0].iov_len = 0; } @@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->page_len; + subbuf->pages = buf->pages; + subbuf->page_base = 0; subbuf->page_len = 0; } @@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->tail[0].iov_len; + subbuf->tail[0].iov_base = buf->tail[0].iov_base; subbuf->tail[0].iov_len = 0; } @@ -1150,6 +1154,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, } EXPORT_SYMBOL_GPL(xdr_buf_subsegment); +/** + * xdr_buf_trim - lop at most "len" bytes off the end of "buf" + * @buf: buf to be trimmed + * @len: number of bytes to reduce "buf" by + * + * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note + * that it's possible that we'll trim less than that amount if the xdr_buf is + * too small, or if (for instance) it's all in the head and the parser has + * already read too far into it. + */ +void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) +{ + size_t cur; + unsigned int trim = len; + + if (buf->tail[0].iov_len) { + cur = min_t(size_t, buf->tail[0].iov_len, trim); + buf->tail[0].iov_len -= cur; + trim -= cur; + if (!trim) + goto fix_len; + } + + if (buf->page_len) { + cur = min_t(unsigned int, buf->page_len, trim); + buf->page_len -= cur; + trim -= cur; + if (!trim) + goto fix_len; + } + + if (buf->head[0].iov_len) { + cur = min_t(size_t, buf->head[0].iov_len, trim); + buf->head[0].iov_len -= cur; + trim -= cur; + } +fix_len: + buf->len -= (len - trim); +} +EXPORT_SYMBOL_GPL(xdr_buf_trim); + static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) { unsigned int this_len; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 41df4c507193..639837b3a5d9 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -151,31 +151,64 @@ out: } EXPORT_SYMBOL_GPL(xprt_unregister_transport); +static void +xprt_class_release(const struct xprt_class *t) +{ + module_put(t->owner); +} + +static const struct xprt_class * +xprt_class_find_by_netid_locked(const char *netid) +{ + const struct xprt_class *t; + unsigned int i; + + list_for_each_entry(t, &xprt_list, list) { + for (i = 0; t->netid[i][0] != '\0'; i++) { + if (strcmp(t->netid[i], netid) != 0) + continue; + if (!try_module_get(t->owner)) + continue; + return t; + } + } + return NULL; +} + +static const struct xprt_class * +xprt_class_find_by_netid(const char *netid) +{ + const struct xprt_class *t; + + spin_lock(&xprt_list_lock); + t = xprt_class_find_by_netid_locked(netid); + if (!t) { + spin_unlock(&xprt_list_lock); + request_module("rpc%s", netid); + spin_lock(&xprt_list_lock); + t = xprt_class_find_by_netid_locked(netid); + } + spin_unlock(&xprt_list_lock); + return t; +} + /** * xprt_load_transport - load a transport implementation - * @transport_name: transport to load + * @netid: transport to load * * Returns: * 0: transport successfully loaded * -ENOENT: transport module not available */ -int xprt_load_transport(const char *transport_name) +int xprt_load_transport(const char *netid) { - struct xprt_class *t; - int result; + const struct xprt_class *t; - result = 0; - spin_lock(&xprt_list_lock); - list_for_each_entry(t, &xprt_list, list) { - if (strcmp(t->name, transport_name) == 0) { - spin_unlock(&xprt_list_lock); - goto out; - } - } - spin_unlock(&xprt_list_lock); - result = request_module("xprt%s", transport_name); -out: - return result; + t = xprt_class_find_by_netid(netid); + if (!t) + return -ENOENT; + xprt_class_release(t); + return 0; } EXPORT_SYMBOL_GPL(xprt_load_transport); @@ -1503,10 +1536,13 @@ xprt_transmit(struct rpc_task *task) { struct rpc_rqst *next, *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - int status; + int counter, status; spin_lock(&xprt->queue_lock); + counter = 0; while (!list_empty(&xprt->xmit_queue)) { + if (++counter == 20) + break; next = list_first_entry(&xprt->xmit_queue, struct rpc_rqst, rq_xmit); xprt_pin_rqst(next); @@ -1514,7 +1550,6 @@ xprt_transmit(struct rpc_task *task) status = xprt_request_transmit(next, task); if (status == -EBADMSG && next != req) status = 0; - cond_resched(); spin_lock(&xprt->queue_lock); xprt_unpin_rqst(next); if (status == 0) { diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c index 620327c01302..45c5b41ac8dc 100644 --- a/net/sunrpc/xprtrdma/module.c +++ b/net/sunrpc/xprtrdma/module.c @@ -24,6 +24,7 @@ MODULE_DESCRIPTION("RPC/RDMA Transport"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("svcrdma"); MODULE_ALIAS("xprtrdma"); +MODULE_ALIAS("rpcrdma6"); static void __exit rpc_rdma_cleanup(void) { diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index ef5102b60589..c091417bd799 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -71,7 +71,7 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) size = RPCRDMA_HDRLEN_MIN; /* Maximum Read list size */ - size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); + size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); /* Minimal Read chunk size */ size += sizeof(__be32); /* segment count */ @@ -96,7 +96,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs) size = RPCRDMA_HDRLEN_MIN; /* Maximum Write list size */ - size = sizeof(__be32); /* segment count */ + size += sizeof(__be32); /* segment count */ size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32); size += sizeof(__be32); /* list discriminator */ @@ -183,6 +183,31 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt, r_xprt->rx_ep.rep_max_inline_recv; } +/* ACL likes to be lazy in allocating pages. For TCP, these + * pages can be allocated during receive processing. Not true + * for RDMA, which must always provision receive buffers + * up front. + */ +static noinline int +rpcrdma_alloc_sparse_pages(struct xdr_buf *buf) +{ + struct page **ppages; + int len; + + len = buf->page_len; + ppages = buf->pages + (buf->page_base >> PAGE_SHIFT); + while (len > 0) { + if (!*ppages) + *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN); + if (!*ppages) + return -ENOBUFS; + ppages++; + len -= PAGE_SIZE; + } + + return 0; +} + /* Split @vec on page boundaries into SGEs. FMR registers pages, not * a byte range. Other modes coalesce these SGEs into a single MR * when they can. @@ -237,15 +262,6 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); page_base = offset_in_page(xdrbuf->page_base); while (len) { - /* ACL likes to be lazy in allocating pages - ACLs - * are small by default but can get huge. - */ - if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) { - if (!*ppages) - *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN); - if (!*ppages) - return -ENOBUFS; - } seg->mr_page = *ppages; seg->mr_offset = (char *)page_base; seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len); @@ -800,6 +816,12 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) __be32 *p; int ret; + if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) { + ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf); + if (ret) + return ret; + } + rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf), rqst); @@ -1246,8 +1268,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, be32_to_cpup(p), be32_to_cpu(rep->rr_xid)); } - r_xprt->rx_stats.bad_reply_count++; - return -EREMOTEIO; + return -EIO; } /* Perform XID lookup, reconstruction of the RPC reply, and @@ -1284,13 +1305,11 @@ out: spin_unlock(&xprt->queue_lock); return; -/* If the incoming reply terminated a pending RPC, the next - * RPC call will post a replacement receive buffer as it is - * being marshaled. - */ out_badheader: trace_xprtrdma_reply_hdr(rep); r_xprt->rx_stats.bad_reply_count++; + rqst->rq_task->tk_status = status; + status = 0; goto out; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 908e78bb87c6..0ff5c5971ddd 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -15,26 +15,25 @@ #undef SVCRDMA_BACKCHANNEL_DEBUG /** - * svc_rdma_handle_bc_reply - Process incoming backchannel reply - * @xprt: controlling backchannel transport - * @rdma_resp: pointer to incoming transport header - * @rcvbuf: XDR buffer into which to decode the reply + * svc_rdma_handle_bc_reply - Process incoming backchannel Reply + * @rqstp: resources for handling the Reply + * @rctxt: Received message * - * Returns: - * %0 if @rcvbuf is filled in, xprt_complete_rqst called, - * %-EAGAIN if server should call ->recvfrom again. */ -int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, - struct xdr_buf *rcvbuf) +void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt) { + struct svc_xprt *sxprt = rqstp->rq_xprt; + struct rpc_xprt *xprt = sxprt->xpt_bc_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); + struct xdr_buf *rcvbuf = &rqstp->rq_arg; struct kvec *dst, *src = &rcvbuf->head[0]; + __be32 *rdma_resp = rctxt->rc_recv_buf; struct rpc_rqst *req; u32 credits; size_t len; __be32 xid; __be32 *p; - int ret; p = (__be32 *)src->iov_base; len = src->iov_len; @@ -49,14 +48,10 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, __func__, (int)len, p); #endif - ret = -EAGAIN; - if (src->iov_len < 24) - goto out_shortreply; - spin_lock(&xprt->queue_lock); req = xprt_lookup_rqst(xprt, xid); if (!req) - goto out_notfound; + goto out_unlock; dst = &req->rq_private_buf.head[0]; memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); @@ -77,25 +72,12 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, spin_unlock(&xprt->transport_lock); spin_lock(&xprt->queue_lock); - ret = 0; xprt_complete_rqst(req->rq_task, rcvbuf->len); xprt_unpin_rqst(req); rcvbuf->len = 0; out_unlock: spin_unlock(&xprt->queue_lock); -out: - return ret; - -out_shortreply: - dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n", - xprt, src->iov_len); - goto out; - -out_notfound: - dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", - xprt, be32_to_cpu(xid)); - goto out_unlock; } /* Send a backwards direction RPC call. @@ -242,6 +224,8 @@ static void xprt_rdma_bc_close(struct rpc_xprt *xprt) { dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); + + xprt_disconnect_done(xprt); xprt->cwnd = RPC_CWNDSHIFT; } @@ -250,6 +234,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt) { dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); + xprt_rdma_free_addresses(xprt); xprt_free(xprt); } @@ -300,9 +285,9 @@ xprt_setup_rdma_bc(struct xprt_create *args) xprt->timeout = &xprt_rdma_bc_timeout; xprt_set_bound(xprt); xprt_set_connected(xprt); - xprt->bind_timeout = RPCRDMA_BIND_TO; - xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; - xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; + xprt->bind_timeout = 0; + xprt->reestablish_timeout = 0; + xprt->idle_timeout = 0; xprt->prot = XPRT_TRANSPORT_BC_RDMA; xprt->ops = &xprt_rdma_bc_procs; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 96bccd398469..fd5c1f1bb988 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -193,6 +193,7 @@ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) out: ctxt->rc_page_count = 0; + ctxt->rc_read_payload_length = 0; return ctxt; out_empty: @@ -222,6 +223,26 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, svc_rdma_recv_ctxt_destroy(rdma, ctxt); } +/** + * svc_rdma_release_rqst - Release transport-specific per-rqst resources + * @rqstp: svc_rqst being released + * + * Ensure that the recv_ctxt is released whether or not a Reply + * was sent. For example, the client could close the connection, + * or svc_process could drop an RPC, before the Reply is sent. + */ +void svc_rdma_release_rqst(struct svc_rqst *rqstp) +{ + struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt; + struct svc_xprt *xprt = rqstp->rq_xprt; + struct svcxprt_rdma *rdma = + container_of(xprt, struct svcxprt_rdma, sc_xprt); + + rqstp->rq_xprt_ctxt = NULL; + if (ctxt) + svc_rdma_recv_ctxt_put(rdma, ctxt); +} + static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt) { @@ -244,6 +265,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) { struct svc_rdma_recv_ctxt *ctxt; + if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) + return 0; ctxt = svc_rdma_recv_ctxt_get(rdma); if (!ctxt) return -ENOMEM; @@ -756,6 +779,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) __be32 *p; int ret; + rqstp->rq_xprt_ctxt = NULL; + spin_lock(&rdma_xprt->sc_rq_dto_lock); ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); if (ctxt) { @@ -792,12 +817,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) goto out_drop; rqstp->rq_xprt_hlen = ret; - if (svc_rdma_is_backchannel_reply(xprt, p)) { - ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, - &rqstp->rq_arg); - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); - return ret; - } + if (svc_rdma_is_backchannel_reply(xprt, p)) + goto out_backchannel; + svc_rdma_get_inv_rkey(rdma_xprt, ctxt); p += rpcrdma_fixed_maxsz; @@ -827,6 +849,8 @@ out_postfail: svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return ret; +out_backchannel: + svc_rdma_handle_bc_reply(rqstp, ctxt); out_drop: svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return 0; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 48fe3b16b0d9..0bb3f0dca80d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -323,8 +323,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) if (atomic_sub_return(cc->cc_sqecount, &rdma->sc_sq_avail) > 0) { ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); - trace_svcrdma_post_rw(&cc->cc_cqe, - cc->cc_sqecount, ret); if (ret) break; return 0; @@ -337,6 +335,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) trace_svcrdma_sq_retry(rdma); } while (1); + trace_svcrdma_sq_post_err(rdma, ret); set_bit(XPT_CLOSE, &xprt->xpt_flags); /* If even one was posted, there will be a completion. */ @@ -482,18 +481,19 @@ static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, vec->iov_len); } -/* Send an xdr_buf's page list by itself. A Write chunk is - * just the page list. a Reply chunk is the head, page list, - * and tail. This function is shared between the two types - * of chunk. +/* Send an xdr_buf's page list by itself. A Write chunk is just + * the page list. A Reply chunk is @xdr's head, page list, and + * tail. This function is shared between the two types of chunk. */ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, - struct xdr_buf *xdr) + struct xdr_buf *xdr, + unsigned int offset, + unsigned long length) { info->wi_xdr = xdr; - info->wi_next_off = 0; + info->wi_next_off = offset - xdr->head[0].iov_len; return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, - xdr->page_len); + length); } /** @@ -501,6 +501,8 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, * @rdma: controlling RDMA transport * @wr_ch: Write chunk provided by client * @xdr: xdr_buf containing the data payload + * @offset: payload's byte offset in @xdr + * @length: size of payload, in bytes * * Returns a non-negative number of bytes the chunk consumed, or * %-E2BIG if the payload was larger than the Write chunk, @@ -510,19 +512,20 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, * %-EIO if rdma_rw initialization failed (DMA mapping, etc). */ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, - struct xdr_buf *xdr) + struct xdr_buf *xdr, + unsigned int offset, unsigned long length) { struct svc_rdma_write_info *info; int ret; - if (!xdr->page_len) + if (!length) return 0; info = svc_rdma_write_info_alloc(rdma, wr_ch); if (!info) return -ENOMEM; - ret = svc_rdma_send_xdr_pagelist(info, xdr); + ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length); if (ret < 0) goto out_err; @@ -531,7 +534,7 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, goto out_err; trace_svcrdma_encode_write(xdr->page_len); - return xdr->page_len; + return length; out_err: svc_rdma_write_info_free(info); @@ -571,7 +574,9 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, * client did not provide Write chunks. */ if (!writelist && xdr->page_len) { - ret = svc_rdma_send_xdr_pagelist(info, xdr); + ret = svc_rdma_send_xdr_pagelist(info, xdr, + xdr->head[0].iov_len, + xdr->page_len); if (ret < 0) goto out_err; consumed += xdr->page_len; @@ -672,7 +677,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, struct svc_rdma_read_info *info, __be32 *p) { - unsigned int i; int ret; ret = -EINVAL; @@ -695,12 +699,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, info->ri_chunklen += rs_length; } - /* Pages under I/O have been copied to head->rc_pages. - * Prevent their premature release by svc_xprt_release() . - */ - for (i = 0; i < info->ri_readctxt->rc_page_count; i++) - rqstp->rq_pages[i] = NULL; - return ret; } @@ -795,6 +793,26 @@ out: return ret; } +/* Pages under I/O have been copied to head->rc_pages. Ensure they + * are not released by svc_xprt_release() until the I/O is complete. + * + * This has to be done after all Read WRs are constructed to properly + * handle a page that is part of I/O on behalf of two different RDMA + * segments. + * + * Do this only if I/O has been posted. Otherwise, we do indeed want + * svc_xprt_release() to clean things up properly. + */ +static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, + const unsigned int start, + const unsigned int num_pages) +{ + unsigned int i; + + for (i = start; i < num_pages + start; i++) + rqstp->rq_pages[i] = NULL; +} + /** * svc_rdma_recv_read_chunk - Pull a Read chunk from the client * @rdma: controlling RDMA transport @@ -848,6 +866,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); if (ret < 0) goto out_err; + svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count); return 0; out_err: diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 6fdba72f89f4..25e8922c10b2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -306,15 +306,17 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); + trace_svcrdma_post_send(wr); ret = ib_post_send(rdma->sc_qp, wr, NULL); - trace_svcrdma_post_send(wr, ret); - if (ret) { - set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); - svc_xprt_put(&rdma->sc_xprt); - wake_up(&rdma->sc_send_wait); - } - break; + if (ret) + break; + return 0; } + + trace_svcrdma_sq_post_err(rdma, ret); + set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); + svc_xprt_put(&rdma->sc_xprt); + wake_up(&rdma->sc_send_wait); return ret; } @@ -607,10 +609,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, while (remaining) { len = min_t(u32, PAGE_SIZE - pageoff, remaining); - memcpy(dst, page_address(*ppages), len); + memcpy(dst, page_address(*ppages) + pageoff, len); remaining -= len; dst += len; pageoff = 0; + ppages++; } } @@ -854,7 +857,18 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) if (wr_lst) { /* XXX: Presume the client sent only one Write chunk */ - ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr); + unsigned long offset; + unsigned int length; + + if (rctxt->rc_read_payload_length) { + offset = rctxt->rc_read_payload_offset; + length = rctxt->rc_read_payload_length; + } else { + offset = xdr->head[0].iov_len; + length = xdr->page_len; + } + ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset, + length); if (ret < 0) goto err2; svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret); @@ -871,12 +885,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) wr_lst, rp_ch); if (ret < 0) goto err1; - ret = 0; - -out: - rqstp->rq_xprt_ctxt = NULL; - svc_rdma_recv_ctxt_put(rdma, rctxt); - return ret; + return 0; err2: if (ret != -E2BIG && ret != -EINVAL) @@ -885,14 +894,39 @@ out: ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp); if (ret < 0) goto err1; - ret = 0; - goto out; + return 0; err1: svc_rdma_send_ctxt_put(rdma, sctxt); err0: trace_svcrdma_send_failed(rqstp, ret); set_bit(XPT_CLOSE, &xprt->xpt_flags); - ret = -ENOTCONN; - goto out; + return -ENOTCONN; +} + +/** + * svc_rdma_read_payload - special processing for a READ payload + * @rqstp: svc_rqst to operate on + * @offset: payload's byte offset in @xdr + * @length: size of payload, in bytes + * + * Returns zero on success. + * + * For the moment, just record the xdr_buf location of the READ + * payload. svc_rdma_sendto will use that location later when + * we actually send the payload. + */ +int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; + + /* XXX: Just one READ payload slot for now, since our + * transport implementation currently supports only one + * Write chunk. + */ + rctxt->rc_read_payload_offset = offset; + rctxt->rc_read_payload_length = length; + + return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 145a3615c319..89a12676c59d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -71,7 +71,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct sockaddr *sa, int salen, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); -static void svc_rdma_release_rqst(struct svc_rqst *); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); static int svc_rdma_has_wspace(struct svc_xprt *xprt); @@ -82,6 +81,7 @@ static const struct svc_xprt_ops svc_rdma_ops = { .xpo_create = svc_rdma_create, .xpo_recvfrom = svc_rdma_recvfrom, .xpo_sendto = svc_rdma_sendto, + .xpo_read_payload = svc_rdma_read_payload, .xpo_release_rqst = svc_rdma_release_rqst, .xpo_detach = svc_rdma_detach, .xpo_free = svc_rdma_free, @@ -558,10 +558,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) return NULL; } -static void svc_rdma_release_rqst(struct svc_rqst *rqstp) -{ -} - /* * When connected, an svc_xprt has at least two references: * diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index c67d465dc062..2f21e3c52bfc 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -827,6 +827,7 @@ static struct xprt_class xprt_rdma = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_RDMA, .setup = xprt_setup_rdma, + .netid = { "rdma", "rdma6", "" }, }; void xprt_rdma_cleanup(void) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 5361b98f31ae..8ffc54b6661f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -432,7 +432,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, if (ret <= 0) goto sock_err; xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); - offset += ret - buf->page_base; + ret -= buf->page_base; + offset += ret; if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) goto out; if (ret != want) @@ -2714,6 +2715,7 @@ static int bc_send_request(struct rpc_rqst *req) static void bc_close(struct rpc_xprt *xprt) { + xprt_disconnect_done(xprt); } /* @@ -3203,6 +3205,7 @@ static struct xprt_class xs_local_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_LOCAL, .setup = xs_setup_local, + .netid = { "" }, }; static struct xprt_class xs_udp_transport = { @@ -3211,6 +3214,7 @@ static struct xprt_class xs_udp_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_UDP, .setup = xs_setup_udp, + .netid = { "udp", "udp6", "" }, }; static struct xprt_class xs_tcp_transport = { @@ -3219,6 +3223,7 @@ static struct xprt_class xs_tcp_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_TCP, .setup = xs_setup_tcp, + .netid = { "tcp", "tcp6", "" }, }; static struct xprt_class xs_bc_tcp_transport = { @@ -3227,6 +3232,7 @@ static struct xprt_class xs_bc_tcp_transport = { .owner = THIS_MODULE, .ident = XPRT_TRANSPORT_BC_TCP, .setup = xs_setup_bc_tcp, + .netid = { "" }, }; /** diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 3a1d428c1336..ea9ddea35a88 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -461,10 +461,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev, extack = switchdev_notifier_info_to_extack(&port_obj_info->info); if (check_cb(dev)) { - /* This flag is only checked if the return value is success. */ - port_obj_info->handled = true; - return add_cb(dev, port_obj_info->obj, port_obj_info->trans, - extack); + err = add_cb(dev, port_obj_info->obj, port_obj_info->trans, + extack); + if (err != -EOPNOTSUPP) + port_obj_info->handled = true; + return err; } /* Switch ports might be stacked under e.g. a LAG. Ignore the @@ -513,9 +514,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev, int err = -EOPNOTSUPP; if (check_cb(dev)) { - /* This flag is only checked if the return value is success. */ - port_obj_info->handled = true; - return del_cb(dev, port_obj_info->obj); + err = del_cb(dev, port_obj_info->obj); + if (err != -EOPNOTSUPP) + port_obj_info->handled = true; + return err; } /* Switch ports might be stacked under e.g. a LAG. Ignore the @@ -563,9 +565,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev, int err = -EOPNOTSUPP; if (check_cb(dev)) { - port_attr_info->handled = true; - return set_cb(dev, port_attr_info->attr, - port_attr_info->trans); + err = set_cb(dev, port_attr_info->attr, port_attr_info->trans); + if (err != -EOPNOTSUPP) + port_attr_info->handled = true; + return err; } /* Switch ports might be stacked under e.g. a LAG. Ignore the diff --git a/net/tipc/core.c b/net/tipc/core.c index 12192e7f4050..e3d79f8b69d8 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -59,6 +59,7 @@ static int __net_init tipc_init_net(struct net *net) tn->trial_addr = 0; tn->addr_trial_end = 0; tn->capabilities = TIPC_NODE_CAPABILITIES; + INIT_WORK(&tn->final_work.work, tipc_net_finalize_work); memset(tn->node_id, 0, sizeof(tn->node_id)); memset(tn->node_id_string, 0, sizeof(tn->node_id_string)); tn->mon_threshold = TIPC_DEF_MON_THRESHOLD; @@ -96,8 +97,13 @@ out_sk_rht: static void __net_exit tipc_exit_net(struct net *net) { + struct tipc_net *tn = tipc_net(net); + tipc_detach_loopback(net); + /* Make sure the tipc_net_finalize_work() finished */ + cancel_work_sync(&tn->final_work.work); tipc_net_stop(net); + tipc_bcast_stop(net); tipc_nametbl_stop(net); tipc_sk_rht_destroy(net); diff --git a/net/tipc/core.h b/net/tipc/core.h index 3042f654e0af..e119c4a88d63 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -86,6 +86,12 @@ extern unsigned int tipc_net_id __read_mostly; extern int sysctl_tipc_rmem[3] __read_mostly; extern int sysctl_tipc_named_timeout __read_mostly; +struct tipc_net_work { + struct work_struct work; + struct net *net; + u32 addr; +}; + struct tipc_net { u8 node_id[NODE_ID_LEN]; u32 node_addr; @@ -134,6 +140,9 @@ struct tipc_net { /* Tracing of node internal messages */ struct packet_type loopback_pt; + + /* Work item for net finalize */ + struct tipc_net_work final_work; }; static inline struct tipc_net *tipc_net(struct net *net) diff --git a/net/tipc/group.c b/net/tipc/group.c index 89257e2a980d..f53871baa42e 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp, return NULL; } -static void tipc_group_add_to_tree(struct tipc_group *grp, - struct tipc_member *m) +static int tipc_group_add_to_tree(struct tipc_group *grp, + struct tipc_member *m) { u64 nkey, key = (u64)m->node << 32 | m->port; struct rb_node **n, *parent = NULL; @@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp, else if (key > nkey) n = &(*n)->rb_right; else - return; + return -EEXIST; } rb_link_node(&m->tree_node, parent, n); rb_insert_color(&m->tree_node, &grp->members); + return 0; } static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, @@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, u32 instance, int state) { struct tipc_member *m; + int ret; m = kzalloc(sizeof(*m), GFP_ATOMIC); if (!m) @@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, m->port = port; m->instance = instance; m->bc_acked = grp->bc_snd_nxt - 1; + ret = tipc_group_add_to_tree(grp, m); + if (ret < 0) { + kfree(m); + return NULL; + } grp->member_cnt++; - tipc_group_add_to_tree(grp, m); tipc_nlist_add(&grp->dests, m->node); m->state = state; return m; diff --git a/net/tipc/link.c b/net/tipc/link.c index a9d8a81e80cf..f25010261a9e 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -939,9 +939,7 @@ void tipc_link_reset(struct tipc_link *l) int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *xmitq) { - struct tipc_msg *hdr = buf_msg(skb_peek(list)); unsigned int maxwin = l->window; - int imp = msg_importance(hdr); unsigned int mtu = l->mtu; u16 ack = l->rcv_nxt - 1; u16 seqno = l->snd_nxt; @@ -950,8 +948,14 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *backlogq = &l->backlogq; struct sk_buff *skb, *_skb, **tskb; int pkt_cnt = skb_queue_len(list); + struct tipc_msg *hdr; int rc = 0; + int imp; + if (pkt_cnt <= 0) + return 0; + + hdr = buf_msg(skb_peek(list)); if (unlikely(msg_size(hdr) > mtu)) { pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n", skb_queue_len(list), msg_user(hdr), @@ -960,6 +964,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, return -EMSGSIZE; } + imp = msg_importance(hdr); /* Allow oversubscription of one data msg per source at congestion */ if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { if (imp == TIPC_SYSTEM_IMPORTANCE) { diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 922d262e153f..46e89c992c2d 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -140,10 +140,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; - if (unlikely(skb_unclone(frag, GFP_ATOMIC))) + *buf = NULL; + frag = skb_unshare(frag, GFP_ATOMIC); + if (unlikely(!frag)) goto err; head = *headbuf = frag; - *buf = NULL; TIPC_SKB_CB(head)->tail = NULL; if (skb_is_nonlinear(head)) { skb_walk_frags(head, tail) { diff --git a/net/tipc/net.c b/net/tipc/net.c index 2de3cec9929d..2498ce8b83c1 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -105,12 +105,6 @@ * - A local spin_lock protecting the queue of subscriber events. */ -struct tipc_net_work { - struct work_struct work; - struct net *net; - u32 addr; -}; - static void tipc_net_finalize(struct net *net, u32 addr); int tipc_net_init(struct net *net, u8 *node_id, u32 addr) @@ -142,25 +136,21 @@ static void tipc_net_finalize(struct net *net, u32 addr) TIPC_CLUSTER_SCOPE, 0, addr); } -static void tipc_net_finalize_work(struct work_struct *work) +void tipc_net_finalize_work(struct work_struct *work) { struct tipc_net_work *fwork; fwork = container_of(work, struct tipc_net_work, work); tipc_net_finalize(fwork->net, fwork->addr); - kfree(fwork); } void tipc_sched_net_finalize(struct net *net, u32 addr) { - struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC); + struct tipc_net *tn = tipc_net(net); - if (!fwork) - return; - INIT_WORK(&fwork->work, tipc_net_finalize_work); - fwork->net = net; - fwork->addr = addr; - schedule_work(&fwork->work); + tn->final_work.net = net; + tn->final_work.addr = addr; + schedule_work(&tn->final_work.work); } void tipc_net_stop(struct net *net) diff --git a/net/tipc/net.h b/net/tipc/net.h index b7f2e364eb99..a6a4dba13673 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h @@ -42,6 +42,7 @@ extern const struct nla_policy tipc_nl_net_policy[]; int tipc_net_init(struct net *net, u8 *node_id, u32 addr); +void tipc_net_finalize_work(struct work_struct *work); void tipc_sched_net_finalize(struct net *net, u32 addr); void tipc_net_stop(struct net *net); int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index d4d2928424e2..11be9a84f8de 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -255,8 +255,9 @@ err_out: static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg) { - int err; + struct nlmsghdr *nlh; struct sk_buff *arg; + int err; if (msg->req_type && (!msg->req_size || !TLV_CHECK_TYPE(msg->req, msg->req_type))) @@ -285,6 +286,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; } + nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI); + if (!nlh) { + kfree_skb(arg); + kfree_skb(msg->rep); + msg->rep = NULL; + return -EMSGSIZE; + } + nlmsg_end(arg, nlh); + err = __tipc_nl_compat_dumpit(cmd, msg, arg); if (err) { kfree_skb(msg->rep); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index aea951a1f805..b2c36dcfc8e2 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -260,12 +260,12 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) * * Caller must hold socket lock */ -static void tsk_rej_rx_queue(struct sock *sk) +static void tsk_rej_rx_queue(struct sock *sk, int error) { struct sk_buff *skb; while ((skb = __skb_dequeue(&sk->sk_receive_queue))) - tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); + tipc_sk_respond(sk, skb, error); } static bool tipc_sk_connected(struct sock *sk) @@ -515,34 +515,45 @@ static void __tipc_shutdown(struct socket *sock, int error) /* Remove any pending SYN message */ __skb_queue_purge(&sk->sk_write_queue); - /* Reject all unreceived messages, except on an active connection - * (which disconnects locally & sends a 'FIN+' to peer). - */ - while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { - if (TIPC_SKB_CB(skb)->bytes_read) { - kfree_skb(skb); - continue; - } - if (!tipc_sk_type_connectionless(sk) && - sk->sk_state != TIPC_DISCONNECTING) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - tipc_node_remove_conn(net, dnode, tsk->portid); - } - tipc_sk_respond(sk, skb, error); + /* Remove partially received buffer if any */ + skb = skb_peek(&sk->sk_receive_queue); + if (skb && TIPC_SKB_CB(skb)->bytes_read) { + __skb_unlink(skb, &sk->sk_receive_queue); + kfree_skb(skb); } - if (tipc_sk_type_connectionless(sk)) + /* Reject all unreceived messages if connectionless */ + if (tipc_sk_type_connectionless(sk)) { + tsk_rej_rx_queue(sk, error); return; + } - if (sk->sk_state != TIPC_DISCONNECTING) { + switch (sk->sk_state) { + case TIPC_CONNECTING: + case TIPC_ESTABLISHED: + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + tipc_node_remove_conn(net, dnode, tsk->portid); + /* Send a FIN+/- to its peer */ + skb = __skb_dequeue(&sk->sk_receive_queue); + if (skb) { + __skb_queue_purge(&sk->sk_receive_queue); + tipc_sk_respond(sk, skb, error); + break; + } skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, tsk_own_node(tsk), tsk_peer_port(tsk), tsk->portid, error); if (skb) tipc_node_xmit_skb(net, skb, dnode, tsk->portid); - tipc_node_remove_conn(net, dnode, tsk->portid); - tipc_set_sk_state(sk, TIPC_DISCONNECTING); + break; + case TIPC_LISTEN: + /* Reject all SYN messages */ + tsk_rej_rx_queue(sk, error); + break; + default: + __skb_queue_purge(&sk->sk_receive_queue); + break; } } @@ -1199,7 +1210,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, spin_lock_bh(&inputq->lock); if (skb_peek(arrvq) == skb) { skb_queue_splice_tail_init(&tmpq, inputq); - kfree_skb(__skb_dequeue(arrvq)); + __skb_dequeue(arrvq); } spin_unlock_bh(&inputq->lock); __skb_queue_purge(&tmpq); @@ -2564,7 +2575,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ - tsk_rej_rx_queue(new_sk); + tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT); /* Connect new socket to it's peer */ tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); @@ -2616,18 +2627,18 @@ static int tipc_shutdown(struct socket *sock, int how) trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - sk->sk_shutdown = SEND_SHUTDOWN; + sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue); - /* Wake up anyone sleeping in poll */ - sk->sk_state_change(sk); res = 0; } else { res = -ENOTCONN; } + /* Wake up anyone sleeping in poll. */ + sk->sk_state_change(sk); release_sock(sk); return res; diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 3a12fc18239b..444e1792d02c 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -400,12 +400,15 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) return -EWOULDBLOCK; if (ret == sizeof(s)) { read_lock_bh(&sk->sk_callback_lock); - ret = tipc_conn_rcv_sub(srv, con, &s); + /* RACE: the connection can be closed in the meantime */ + if (likely(connected(con))) + ret = tipc_conn_rcv_sub(srv, con, &s); read_unlock_bh(&sk->sk_callback_lock); + if (!ret) + return 0; } - if (ret < 0) - tipc_conn_close(con); + tipc_conn_close(con); return ret; } @@ -661,12 +664,18 @@ static int tipc_topsrv_start(struct net *net) ret = tipc_topsrv_work_start(srv); if (ret < 0) - return ret; + goto err_start; ret = tipc_topsrv_create_listener(srv); if (ret < 0) - tipc_topsrv_work_stop(srv); + goto err_create; + return 0; + +err_create: + tipc_topsrv_work_stop(srv); +err_start: + kfree(srv); return ret; } diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 186c78431217..8f0977a9d423 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -161,9 +161,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, struct udp_bearer *ub, struct udp_media_addr *src, struct udp_media_addr *dst, struct dst_cache *cache) { - struct dst_entry *ndst = dst_cache_get(cache); + struct dst_entry *ndst; int ttl, err = 0; + local_bh_disable(); + ndst = dst_cache_get(cache); if (dst->proto == htons(ETH_P_IP)) { struct rtable *rt = (struct rtable *)ndst; @@ -210,9 +212,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, src->port, dst->port, false); #endif } + local_bh_enable(); return err; tx_error: + local_bh_enable(); kfree_skb(skb); return err; } diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 1adeb1c0473b..0f034c3bc37d 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -405,14 +405,14 @@ static int tls_push_data(struct sock *sk, struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); - int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); struct tls_record_info *record = ctx->open_record; int tls_push_record_flags; struct page_frag *pfrag; size_t orig_size = size; u32 max_open_record_len; - int copy, rc = 0; + bool more = false; bool done = false; + int copy, rc = 0; long timeo; if (flags & @@ -480,9 +480,8 @@ handle_error: if (!size) { last_record: tls_push_record_flags = flags; - if (more) { - tls_ctx->pending_open_record_frags = - !!record->num_frags; + if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) { + more = true; break; } @@ -514,6 +513,8 @@ last_record: } } while (!done); + tls_ctx->pending_open_record_frags = more; + if (orig_size - size > 0) rc = orig_size - size; @@ -549,7 +550,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct iov_iter msg_iter; - char *kaddr = kmap(page); + char *kaddr; struct kvec iov; int rc; @@ -564,6 +565,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, goto out; } + kaddr = kmap(page); iov.iov_base = kaddr + offset; iov.iov_len = size; iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); @@ -1161,6 +1163,8 @@ void tls_device_offload_cleanup_rx(struct sock *sk) if (tls_ctx->tx_conf != TLS_HW) { dev_put(netdev); tls_ctx->netdev = NULL; + } else { + set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags); } out: up_read(&device_offload_lock); @@ -1190,7 +1194,8 @@ static int tls_device_down(struct net_device *netdev) if (ctx->tx_conf == TLS_HW) netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); - if (ctx->rx_conf == TLS_HW) + if (ctx->rx_conf == TLS_HW && + !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_RX); WRITE_ONCE(ctx->netdev, NULL); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 7aba4ee77aba..4b51090ce45b 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -808,7 +808,8 @@ static void tls_update(struct sock *sk, struct proto *p, ctx->sk_write_space = write_space; ctx->sk_proto = p; } else { - sk->sk_prot = p; + /* Pairs with lockless read in sk_clone_lock(). */ + WRITE_ONCE(sk->sk_prot, p); sk->sk_write_space = write_space; } } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 41e9c2932b34..0d524ef0d8c8 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -203,10 +203,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) kfree(aead_req); + spin_lock_bh(&ctx->decrypt_compl_lock); pending = atomic_dec_return(&ctx->decrypt_pending); - if (!pending && READ_ONCE(ctx->async_notify)) + if (!pending && ctx->async_notify) complete(&ctx->async_wait.completion); + spin_unlock_bh(&ctx->decrypt_compl_lock); } static int tls_do_decryption(struct sock *sk, @@ -464,10 +466,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) ready = true; } + spin_lock_bh(&ctx->encrypt_compl_lock); pending = atomic_dec_return(&ctx->encrypt_pending); - if (!pending && READ_ONCE(ctx->async_notify)) + if (!pending && ctx->async_notify) complete(&ctx->async_wait.completion); + spin_unlock_bh(&ctx->encrypt_compl_lock); if (!ready) return; @@ -777,7 +781,7 @@ static int tls_push_record(struct sock *sk, int flags, static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, bool full_record, u8 record_type, - size_t *copied, int flags) + ssize_t *copied, int flags) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); @@ -793,10 +797,13 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, psock = sk_psock_get(sk); if (!psock || !policy) { err = tls_push_record(sk, flags, record_type); - if (err && err != -EINPROGRESS) { + if (err && sk->sk_err == EBADMSG) { *copied -= sk_msg_free(sk, msg); tls_free_open_rec(sk); + err = -sk->sk_err; } + if (psock) + sk_psock_put(sk, psock); return err; } more_data: @@ -819,9 +826,10 @@ more_data: switch (psock->eval) { case __SK_PASS: err = tls_push_record(sk, flags, record_type); - if (err && err != -EINPROGRESS) { + if (err && sk->sk_err == EBADMSG) { *copied -= sk_msg_free(sk, msg); tls_free_open_rec(sk); + err = -sk->sk_err; goto out_err; } break; @@ -911,7 +919,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) unsigned char record_type = TLS_RECORD_TYPE_DATA; bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); bool eor = !(msg->msg_flags & MSG_MORE); - size_t try_to_copy, copied = 0; + size_t try_to_copy; + ssize_t copied = 0; struct sk_msg *msg_pl, *msg_en; struct tls_rec *rec; int required_size; @@ -921,6 +930,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int num_zc = 0; int orig_size; int ret = 0; + int pending; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) return -EOPNOTSUPP; @@ -1087,13 +1097,19 @@ trim_sgl: goto send_end; } else if (num_zc) { /* Wait for pending encryptions to get completed */ - smp_store_mb(ctx->async_notify, true); + spin_lock_bh(&ctx->encrypt_compl_lock); + ctx->async_notify = true; - if (atomic_read(&ctx->encrypt_pending)) + pending = atomic_read(&ctx->encrypt_pending); + spin_unlock_bh(&ctx->encrypt_compl_lock); + if (pending) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); else reinit_completion(&ctx->async_wait.completion); + /* There can be no concurrent accesses, since we have no + * pending encrypt operations + */ WRITE_ONCE(ctx->async_notify, false); if (ctx->async_wait.err) { @@ -1113,7 +1129,7 @@ send_end: release_sock(sk); mutex_unlock(&tls_ctx->tx_lock); - return copied ? copied : ret; + return copied > 0 ? copied : ret; } static int tls_sw_do_sendpage(struct sock *sk, struct page *page, @@ -1127,7 +1143,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page, struct sk_msg *msg_pl; struct tls_rec *rec; int num_async = 0; - size_t copied = 0; + ssize_t copied = 0; bool full_record; int record_room; int ret = 0; @@ -1229,7 +1245,7 @@ wait_for_memory: } sendpage_end: ret = sk_stream_error(sk, flags, ret); - return copied ? copied : ret; + return copied > 0 ? copied : ret; } int tls_sw_sendpage_locked(struct sock *sk, struct page *page, @@ -1275,6 +1291,12 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock, return NULL; } + if (!skb_queue_empty(&sk->sk_receive_queue)) { + __strp_unpause(&ctx->strp); + if (ctx->recv_pkt) + return ctx->recv_pkt; + } + if (sk->sk_shutdown & RCV_SHUTDOWN) return NULL; @@ -1721,7 +1743,9 @@ int tls_sw_recvmsg(struct sock *sk, long timeo; bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); bool is_peek = flags & MSG_PEEK; + bool bpf_strp_enabled; int num_async = 0; + int pending; flags |= nonblock; @@ -1730,6 +1754,7 @@ int tls_sw_recvmsg(struct sock *sk, psock = sk_psock_get(sk); lock_sock(sk); + bpf_strp_enabled = sk_psock_strp_enabled(psock); /* Process pending decrypted records. It must be non-zero-copy */ err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false, @@ -1783,11 +1808,12 @@ int tls_sw_recvmsg(struct sock *sk, if (to_decrypt <= len && !is_kvec && !is_peek && ctx->control == TLS_RECORD_TYPE_DATA && - prot->version != TLS_1_3_VERSION) + prot->version != TLS_1_3_VERSION && + !bpf_strp_enabled) zc = true; /* Do not use async mode if record is non-data */ - if (ctx->control == TLS_RECORD_TYPE_DATA) + if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) async_capable = ctx->async_capable; else async_capable = false; @@ -1837,6 +1863,19 @@ int tls_sw_recvmsg(struct sock *sk, goto pick_next_record; if (!zc) { + if (bpf_strp_enabled) { + err = sk_psock_tls_strp_read(psock, skb); + if (err != __SK_PASS) { + rxm->offset = rxm->offset + rxm->full_len; + rxm->full_len = 0; + if (err == __SK_DROP) + consume_skb(skb); + ctx->recv_pkt = NULL; + __strp_unpause(&ctx->strp); + continue; + } + } + if (rxm->full_len > len) { retain_skb = true; chunk = len; @@ -1874,7 +1913,7 @@ pick_next_record: * another message type */ msg->msg_flags |= MSG_EOR; - if (ctx->control != TLS_RECORD_TYPE_DATA) + if (control != TLS_RECORD_TYPE_DATA) goto recv_end; } else { break; @@ -1884,8 +1923,11 @@ pick_next_record: recv_end: if (num_async) { /* Wait for all previously submitted records to be decrypted */ - smp_store_mb(ctx->async_notify, true); - if (atomic_read(&ctx->decrypt_pending)) { + spin_lock_bh(&ctx->decrypt_compl_lock); + ctx->async_notify = true; + pending = atomic_read(&ctx->decrypt_pending); + spin_unlock_bh(&ctx->decrypt_compl_lock); + if (pending) { err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); if (err) { /* one of async decrypt failed */ @@ -1897,6 +1939,10 @@ recv_end: } else { reinit_completion(&ctx->async_wait.completion); } + + /* There can be no concurrent accesses, since we have no + * pending decrypt operations + */ WRITE_ONCE(ctx->async_notify, false); /* Drain records from the rx_list & copy if required */ @@ -2076,8 +2122,9 @@ static void tls_data_ready(struct sock *sk) strp_data_ready(&ctx->strp); psock = sk_psock_get(sk); - if (psock && !list_empty(&psock->ingress_msg)) { - ctx->saved_data_ready(sk); + if (psock) { + if (!list_empty(&psock->ingress_msg)) + ctx->saved_data_ready(sk); sk_psock_put(sk, psock); } } @@ -2096,10 +2143,15 @@ void tls_sw_release_resources_tx(struct sock *sk) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); struct tls_rec *rec, *tmp; + int pending; /* Wait for any pending async encryptions to complete */ - smp_store_mb(ctx->async_notify, true); - if (atomic_read(&ctx->encrypt_pending)) + spin_lock_bh(&ctx->encrypt_compl_lock); + ctx->async_notify = true; + pending = atomic_read(&ctx->encrypt_pending); + spin_unlock_bh(&ctx->encrypt_compl_lock); + + if (pending) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); tls_tx_records(sk, -1); @@ -2282,6 +2334,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) if (tx) { crypto_init_wait(&sw_ctx_tx->async_wait); + spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); crypto_info = &ctx->crypto_send.info; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; @@ -2290,6 +2343,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) sw_ctx_tx->tx_work.sk = sk; } else { crypto_init_wait(&sw_ctx_rx->async_wait); + spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); crypto_info = &ctx->crypto_recv.info; cctx = &ctx->rx; skb_queue_head_init(&sw_ctx_rx->rx_list); diff --git a/net/toa/toa.c b/net/toa/toa.c index 82084e59597a..e0acaab03bad 100644 --- a/net/toa/toa.c +++ b/net/toa/toa.c @@ -1,4 +1,15 @@ #include "toa.h" +#include <net/genetlink.h> +#include <net/inet_common.h> +#include <net/inet_connection_sock.h> +#include <net/inet_hashtables.h> + +#define NIPQUAD_FMT "%u.%u.%u.%u" +#define NIPQUAD(addr) \ + ((unsigned char *)&addr)[0], \ + ((unsigned char *)&addr)[1], \ + ((unsigned char *)&addr)[2], \ + ((unsigned char *)&addr)[3] /* * TOA a new Tcp Option as Address, @@ -19,6 +30,21 @@ * 2 of the License, or (at your option) any later version. * */ +static int vtoa; +module_param(vtoa, int, 0600); +MODULE_PARM_DESC(vtoa, "vtoa module control.value can be 0 or 1 ;default is 0."); + +#define VTOA_GETPEERNAME_IPV4_DISABLE (0x1 << 0) +#define VTOA_GETPEERNAME_IPV6_DISABLE (0x1 << 1) + +static int disable_getpeername; +module_param(disable_getpeername, int, 0600); +MODULE_PARM_DESC(disable_getpeername, \ + "vtoa module control support getpeername.disable 1 ipv4, 2 ipv6, 3 ipv4 & ipv6;default is 0."); + +static int dbg; +module_param(dbg, int, 0600); +MODULE_PARM_DESC(dbg, "debug log switch"); unsigned long sk_data_ready_addr = 0; @@ -33,12 +59,348 @@ struct toa_stats_entry toa_stats[] = { TOA_STAT_ITEM("getname_toa_mismatch", GETNAME_TOA_MISMATCH_CNT), TOA_STAT_ITEM("getname_toa_bypass", GETNAME_TOA_BYPASS_CNT), TOA_STAT_ITEM("getname_toa_empty", GETNAME_TOA_EMPTY_CNT), + TOA_STAT_ITEM("vtoa_get", GETNAME_VTOA_GET_CNT), + TOA_STAT_ITEM("vtoa_attr_err", GETNAME_VTOA_GET_ATTR_ERR_CNT), + TOA_STAT_ITEM("vtoa_lookup_err", GETNAME_VTOA_GET_LOCKUP_ERR_CNT), + TOA_STAT_ITEM("vtoa_netlink_err", GETNAME_VTOA_GET_NETLINK_ERR_CNT), + TOA_STAT_ITEM("getname_vtoa_ipv4_err", GETNAME_VTOA_GETPEERNAME_IPV4_ERR_CNT), + TOA_STAT_ITEM("getname_vtoa_ipv6_err", GETNAME_VTOA_GETPEERNAME_IPV6_ERR_CNT), TOA_STAT_END }; DEFINE_TOA_STAT(struct toa_stat_mib, ext_stats); +enum { + VTOA_CMD_UNSPEC, + VTOA_CMD_GET, + + __VTOA_CMD_MAX +}; +#define VTOA_CMD_MAX (__VTOA_CMD_MAX - 1) + +enum { + VTOA_ATTR_UNSPEC, + VTOA_ATTR_REQ, + + VTOA_ATTR_I_VPCID, + VTOA_ATTR_I_VMIP, + VTOA_ATTR_I_SPORT, + VTOA_ATTR_I_VIP, + VTOA_ATTR_I_VPORT, + __VTOA_ATTR_MAX +}; +#define VTOA_ATTR_MAX (__VTOA_ATTR_MAX - 1) + +static struct nla_policy vtoa_policy[VTOA_ATTR_MAX + 1] = +{ + [VTOA_ATTR_REQ] = { .type = NLA_STRING}, + [VTOA_ATTR_I_VPCID] = { .type = NLA_U32}, + [VTOA_ATTR_I_VMIP] = { .type = NLA_U32}, + [VTOA_ATTR_I_SPORT] = { .type = NLA_U16}, + [VTOA_ATTR_I_VIP] = { .type = NLA_U32}, + [VTOA_ATTR_I_VPORT] = { .type = NLA_U16}, +}; +static struct genl_family vtoa_genl_family; + +struct vtoa_req{ + u32 if_idx; + __be32 sip; + __be32 dip; + __be16 sport; + __be16 dport; +}; + +struct vtoa_info{ + u32 i_vpcid; + __be32 i_vmip; + __be32 i_vip; + __be16 i_sport; + __be16 i_vport; +}; + + /* + * Funcs for vtoa hooks + */ + +/* Parse TCP options in skb, try to get client ip, port, vpcid, vmip, vport + * @param skb [in] received skb, it should be a ack/get-ack packet. + * @return 0 if we don't get client ip/port , vpcid and vmip/vport; + * 1 we get vtoa data; + * -1 something wrong . + */ +static int get_vtoa_data(struct sk_buff *skb, struct sock *sk) +{ + struct tcphdr *th; + int length; + unsigned char *ptr; + int is_toa = 0; + + /*TOA_DBG("get_vtoa_data called\n");*/ + + if (NULL != skb) { + th = tcp_hdr(skb); + length = (th->doff * 4) - sizeof(struct tcphdr); + ptr = (unsigned char *) (th + 1); + + while (length > 0) { + int opcode = *ptr++; + int opsize; + switch (opcode) { + case TCPOPT_EOL: + return -1; + case TCPOPT_NOP:/* Ref: RFC 793 section 3.1 */ + length--; + continue; + default: + opsize = *ptr++; + if (opsize < 2) /* "silly options" */ + return -1; + if (opsize > length) + /* don't parse partial options */ + return -1; + if (TCPOPT_REAL_CLIENTIP == opcode + && TCPOLEN_REAL_CLIENTIP == opsize) { + sk->sk_tvpc_info.sport = + *((__u16 *)(ptr)); + sk->sk_tvpc_info.vmip = + *((__u32 *)(ptr + 2)); + is_toa++; + } else if (TCPOPT_VM_VPCID == opcode + && TCPOLEN_VM_VPCID == opsize) { + sk->sk_tvpc_info.vpcid = + *((__u32 *)(ptr)); + is_toa++; + } else if (TCPOPT_VIP == opcode + && TCPOLEN_VIP == opsize) { + sk->sk_tvpc_info.vport = + *((__u16 *)(ptr)); + sk->sk_tvpc_info.vip = + *((__u32 *)(ptr + 2)); + is_toa++; + } + ptr += opsize - 2; + length -= opsize; + } + } + TOA_DBG("%s tcp source:%u dest:%u vpcid:%u vmip:%pI4 sport:%u vip:%pI4 vport:%u\n", + __func__, ntohs(th->source), ntohs(th->dest), sk->sk_tvpc_info.vpcid, + &sk->sk_tvpc_info.vmip, sk->sk_tvpc_info.sport, + &sk->sk_tvpc_info.vip, sk->sk_tvpc_info.vport); + } + return is_toa; +} + +/* get client ip from socket + * @param sock [in] the socket to getpeername() or getsockname() + * @param uaddr [out] the place to put client ip, port + * @param uaddr_len [out] lenth of @uaddr + * @peer [in] if(peer), try to get remote address; if(!peer), try to get local address + * @return return what the original inet_getname() returns. + */ +static int +inet_getname_vtoa(struct socket *sock, + struct sockaddr *uaddr, + int peer) +{ + int retval = 0; + struct sock *sk = sock->sk; + struct sockaddr_in *sin = (struct sockaddr_in *) uaddr; + + /* call orginal one */ + retval = inet_getname(sock, uaddr, peer); + if (disable_getpeername & VTOA_GETPEERNAME_IPV4_DISABLE){ + TOA_INC_STATS(ext_stats, + GETNAME_VTOA_GETPEERNAME_IPV4_ERR_CNT); + TOA_DBG("%s called, toa ipv getpeername disable\n", __func__); + return retval; + } + + TOA_DBG("%s called, sk_data_ready:%px retval:%d peer:%d vmip:%x sport:%u vip:%x vport:%u\n", + __func__, sk?sk->sk_data_ready:NULL, retval, peer, + sk?sk->sk_tvpc_info.vmip:0, sk?ntohs(sk->sk_tvpc_info.sport):0, + sk?sk->sk_tvpc_info.vip:0, sk?ntohs(sk->sk_tvpc_info.vport):0); + + /* set our value if need */ + if (retval == sizeof(struct sockaddr_in) && peer && sk) { + if (sk_data_ready_addr == (unsigned long) sk->sk_data_ready) { + /*syscall accept must go here, + *so change the client ip/port and vip/vport here + */ + if (0 != sk->sk_tvpc_info.vmip + && 0 != sk->sk_tvpc_info.sport) { + TOA_INC_STATS(ext_stats, GETNAME_TOA_OK_CNT); + sin->sin_port = sk->sk_tvpc_info.sport; + sin->sin_addr.s_addr = sk->sk_tvpc_info.vmip; + } else if (0 == sk->sk_tvpc_info.vmip + && 0 == sk->sk_tvpc_info.sport) { + TOA_INC_STATS(ext_stats, + SYN_RECV_SOCK_NO_TOA_CNT); + sk->sk_tvpc_info.sport = sin->sin_port; + sk->sk_tvpc_info.vmip = sin->sin_addr.s_addr; + } else + TOA_INFO("vmip or sport can not 0.\n"); + + /*do not get vip and vport, + *maybe vpcgw do not pass them, + *maybe it is a flow just for this host + */ + if (0 == sk->sk_tvpc_info.vip + && 0 == sk->sk_tvpc_info.vport) { + struct sockaddr_in addr; + if (inet_getname(sock, (struct sockaddr *)&addr, + 0) == sizeof(struct sockaddr_in)) { + sk->sk_tvpc_info.vport = addr.sin_port; + sk->sk_tvpc_info.vip = + addr.sin_addr.s_addr; + } + } + } else { + TOA_INC_STATS(ext_stats, GETNAME_TOA_BYPASS_CNT); + } + } else { /* no need to get client ip */ + TOA_INC_STATS(ext_stats, GETNAME_TOA_EMPTY_CNT); + } + + return retval; +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static int +inet6_getname_vtoa(struct socket *sock, + struct sockaddr *uaddr, + int peer) +{ + int retval = 0; + struct sock *sk = sock->sk; + struct sockaddr_in6 *sin = (struct sockaddr_in6 *) uaddr; + + /* call orginal one */ + retval = inet6_getname(sock, uaddr, peer); + if (disable_getpeername & VTOA_GETPEERNAME_IPV6_DISABLE){ + TOA_INC_STATS(ext_stats, + GETNAME_VTOA_GETPEERNAME_IPV6_ERR_CNT); + TOA_DBG("%s called, toa ipv6 getpeername disable\n", __func__); + return retval; + } + + TOA_DBG("%s called, sk_data_ready:%px retval:%d peer:%d vmip:%x sport:%u vip:%x vport:%u\n", + __func__, sk?sk->sk_data_ready:NULL, retval, peer, + sk?sk->sk_tvpc_info.vmip:0, sk?ntohs(sk->sk_tvpc_info.sport):0, + sk?sk->sk_tvpc_info.vip:0, sk?ntohs(sk->sk_tvpc_info.vport):0); + + /* set our value if need */ + if (retval == sizeof(struct sockaddr_in6) && peer && sk) { + if (sk_data_ready_addr == (unsigned long) sk->sk_data_ready) { + if (0 != sk->sk_tvpc_info.vmip + && 0 != sk->sk_tvpc_info.sport) { + TOA_INC_STATS(ext_stats, GETNAME_TOA_OK_CNT); + sin->sin6_port = sk->sk_tvpc_info.sport; + ipv6_addr_set(&sin->sin6_addr, 0, 0, + htonl(0x0000FFFF), + sk->sk_tvpc_info.vmip); + } else if (0 == sk->sk_tvpc_info.vmip + && 0 == sk->sk_tvpc_info.sport) { + TOA_INC_STATS(ext_stats, + SYN_RECV_SOCK_NO_TOA_CNT); + sk->sk_tvpc_info.sport = sin->sin6_port; + sk->sk_tvpc_info.vmip = + sin->sin6_addr.s6_addr32[3]; + } else + TOA_INFO("vmip or sport can not 0.\n"); + + if (0 == sk->sk_tvpc_info.vip + && 0 == sk->sk_tvpc_info.vport) { + struct sockaddr_in6 addr; + if (inet6_getname(sock, + (struct sockaddr *)&addr, + 0) == sizeof(struct sockaddr_in6)) { + sk->sk_tvpc_info.vport = + addr.sin6_port; + sk->sk_tvpc_info.vip = + addr.sin6_addr.s6_addr32[3]; + } + } + } else { + TOA_INC_STATS(ext_stats, GETNAME_TOA_BYPASS_CNT); + } + } else { /* no need to get client ip */ + TOA_INC_STATS(ext_stats, GETNAME_TOA_EMPTY_CNT); + } + + return retval; +} +#endif + +/* The three way handshake has completed - we got a valid synack - + * now create the new socket. + * We need to save toa data into the new socket. + * @param sk [out] the socket + * @param skb [in] the ack/ack-get packet + * @param req [in] the open request for this connection + * @param dst [out] route cache entry + * @return NULL if fail new socket if succeed. + */ +static struct sock * +tcp_v4_syn_recv_sock_vtoa(const struct sock *sk, + struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) +{ + struct sock *newsock = NULL; + int ret = 0; + + TOA_DBG("%s called\n", __func__); + + /* call orginal one */ + newsock = tcp_v4_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); + /* set our value if need */ + if (NULL != newsock) { + memset(&newsock->sk_tvpc_info, 0, + sizeof(newsock->sk_tvpc_info)); + ret = get_vtoa_data(skb, newsock); + if (ret > 0) + TOA_INC_STATS(ext_stats, SYN_RECV_SOCK_TOA_CNT); + else + TOA_INC_STATS(ext_stats, + SYN_RECV_SOCK_NO_TOA_CNT); + } + return newsock; +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static struct sock * +tcp_v6_syn_recv_sock_vtoa(const struct sock *sk, + struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req) +{ + struct sock *newsock = NULL; + int ret = 0; + TOA_DBG("%s called\n", __func__); + + /* call orginal one */ + newsock = tcp_v6_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); + + /* set our value if need */ + if (NULL != newsock) { + memset(&newsock->sk_tvpc_info, 0, + sizeof(newsock->sk_tvpc_info)); + ret = get_vtoa_data(skb, newsock); + if (ret > 0) + TOA_INC_STATS(ext_stats, SYN_RECV_SOCK_TOA_CNT); + else + TOA_INC_STATS(ext_stats, + SYN_RECV_SOCK_NO_TOA_CNT); + } + return newsock; +} +#endif + /* * Funcs for toa hooks */ @@ -79,12 +441,12 @@ static void * get_toa_data(struct sk_buff *skb) return NULL; if (opsize > length) return NULL; /* don't parse partial options */ - if (TCPOPT_TOA == opcode && TCPOLEN_TOA == opsize) { + if (TCPOPT_REAL_CLIENTIP == opcode && TCPOLEN_REAL_CLIENTIP == opsize) { memcpy(&tdata, ptr - 2, sizeof (tdata)); - //TOA_DBG("find toa data: ip = %u.%u.%u.%u, port = %u\n", NIPQUAD(tdata.ip), - //ntohs(tdata.port)); + TOA_DBG("find toa data: ip = %u.%u.%u.%u, port = %u\n", NIPQUAD(tdata.ip), + ntohs(tdata.port)); memcpy(&ret_ptr, &tdata, sizeof (ret_ptr)); - //TOA_DBG("coded toa data: %p\n", ret_ptr); + TOA_DBG("coded toa data: %px\n", ret_ptr); return ret_ptr; } ptr += opsize - 2; @@ -110,16 +472,17 @@ inet_getname_toa(struct socket *sock, struct sockaddr *uaddr, int peer) struct sockaddr_in *sin = (struct sockaddr_in *) uaddr; struct toa_data tdata; - //TOA_DBG("inet_getname_toa called, sk->sk_user_data is %p\n", sk->sk_user_data); - /* call orginal one */ retval = inet_getname(sock, uaddr, peer); + + TOA_DBG("%s called, sk_user_data:%px sk_data_ready:%px retval:%d peer:%d\n", + __func__, sk?sk->sk_user_data:NULL, sk?sk->sk_data_ready:NULL, retval, peer); /* set our value if need */ - if (retval == 0 && NULL != sk->sk_user_data && peer) { + if (sk && retval == sizeof(struct sockaddr_in) && NULL != sk->sk_user_data && peer) { if (sk_data_ready_addr == (unsigned long) sk->sk_data_ready) { memcpy(&tdata, &sk->sk_user_data, sizeof (tdata)); - if (TCPOPT_TOA == tdata.opcode && TCPOLEN_TOA == tdata.opsize) { + if (TCPOPT_REAL_CLIENTIP == tdata.opcode && TCPOLEN_REAL_CLIENTIP == tdata.opsize) { TOA_INC_STATS(ext_stats, GETNAME_TOA_OK_CNT); //TOA_DBG("inet_getname_toa: set new sockaddr, ip %u.%u.%u.%u -> %u.%u.%u.%u, port %u -> %u\n", // NIPQUAD(sin->sin_addr.s_addr), NIPQUAD(tdata.ip), ntohs(sin->sin_port), @@ -150,16 +513,17 @@ inet6_getname_toa(struct socket *sock, struct sockaddr *uaddr, int peer) struct sockaddr_in6 *sin = (struct sockaddr_in6 *) uaddr; struct toa_data tdata; - //TOA_DBG("inet6_getname_toa called, sk->sk_user_data is %p\n", sk->sk_user_data); - /* call orginal one */ retval = inet6_getname(sock, uaddr, peer); + TOA_DBG("%s called, sk_user_data:%px sk_data_ready:%px retval:%d peer:%d\n", + __func__, sk?sk->sk_user_data:NULL, sk?sk->sk_data_ready:NULL, retval, peer); + /* set our value if need */ - if (retval == 0 && NULL != sk->sk_user_data && peer) { + if (sk && retval == sizeof(struct sockaddr_in6) && NULL != sk->sk_user_data && peer) { if (sk_data_ready_addr == (unsigned long) sk->sk_data_ready) { memcpy(&tdata, &sk->sk_user_data, sizeof (tdata)); - if (TCPOPT_TOA == tdata.opcode && TCPOLEN_TOA == tdata.opsize) { + if (TCPOPT_REAL_CLIENTIP == tdata.opcode && TCPOLEN_REAL_CLIENTIP == tdata.opsize) { TOA_INC_STATS(ext_stats, GETNAME_TOA_OK_CNT); sin->sin6_port = tdata.port; ipv6_addr_set(&sin->sin6_addr, 0, 0, htonl(0x0000FFFF), tdata.ip); @@ -195,10 +559,11 @@ tcp_v4_syn_recv_sock_toa(const struct sock *sk, struct sk_buff *skb, { struct sock *newsock = NULL; - //TOA_DBG("tcp_v4_syn_recv_sock_toa called\n"); - /* call orginal one */ newsock = tcp_v4_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); + + TOA_DBG("%s called, newsock:%px sk_user_data:%px\n", + __func__, newsock, newsock?newsock->sk_user_data:NULL); /* set our value if need */ if (NULL != newsock && NULL == newsock->sk_user_data) { @@ -208,7 +573,6 @@ tcp_v4_syn_recv_sock_toa(const struct sock *sk, struct sk_buff *skb, } else { TOA_INC_STATS(ext_stats, SYN_RECV_SOCK_NO_TOA_CNT); } - //TOA_DBG("tcp_v4_syn_recv_sock_toa: set sk->sk_user_data to %p\n", newsock->sk_user_data); } return newsock; } @@ -223,10 +587,11 @@ tcp_v6_syn_recv_sock_toa(const struct sock *sk, struct sk_buff *skb, { struct sock *newsock = NULL; - //TOA_DBG("tcp_v4_syn_recv_sock_toa called\n"); - /* call orginal one */ newsock = tcp_v6_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); + + TOA_DBG("%s called, newsock:%px sk_user_data:%px\n", + __func__, newsock, newsock?newsock->sk_user_data:NULL); /* set our value if need */ if (NULL != newsock && NULL == newsock->sk_user_data) { @@ -256,29 +621,45 @@ hook_toa_functions(void) /* hook inet_getname for ipv4 */ inet_stream_ops_p = (struct proto_ops *)&inet_stream_ops; - inet_stream_ops_p->getname = inet_getname_toa; - TOA_INFO("CPU [%u] hooked inet_getname <%p> --> <%p>\n", smp_processor_id(), inet_getname, + if (vtoa) + inet_stream_ops_p->getname = inet_getname_vtoa; + else + inet_stream_ops_p->getname = inet_getname_toa; + + TOA_INFO("CPU [%u] hooked inet_getname <%px> --> <%px>\n", smp_processor_id(), inet_getname, inet_stream_ops_p->getname); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /* hook inet6_getname for ipv6 */ inet6_stream_ops_p = (struct proto_ops *)&inet6_stream_ops; - inet6_stream_ops_p->getname = inet6_getname_toa; - TOA_INFO("CPU [%u] hooked inet6_getname <%p> --> <%p>\n", smp_processor_id(), inet6_getname, + if (vtoa) + inet6_stream_ops_p->getname = inet6_getname_vtoa; + else + inet6_stream_ops_p->getname = inet6_getname_toa; + + TOA_INFO("CPU [%u] hooked inet6_getname <%px> --> <%px>\n", smp_processor_id(), inet6_getname, inet6_stream_ops_p->getname); #endif /* hook tcp_v4_syn_recv_sock for ipv4 */ ipv4_specific_p = (struct inet_connection_sock_af_ops *)&ipv4_specific; - ipv4_specific_p->syn_recv_sock = tcp_v4_syn_recv_sock_toa; - TOA_INFO("CPU [%u] hooked tcp_v4_syn_recv_sock <%p> --> <%p>\n", smp_processor_id(), tcp_v4_syn_recv_sock, + if (vtoa) + ipv4_specific_p->syn_recv_sock = tcp_v4_syn_recv_sock_vtoa; + else + ipv4_specific_p->syn_recv_sock = tcp_v4_syn_recv_sock_toa; + + TOA_INFO("CPU [%u] hooked tcp_v4_syn_recv_sock <%px> --> <%px>\n", smp_processor_id(), tcp_v4_syn_recv_sock, ipv4_specific_p->syn_recv_sock); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /* hook tcp_v6_syn_recv_sock for ipv6 */ ipv6_specific_p = (struct inet_connection_sock_af_ops *)&ipv6_specific; - ipv6_specific_p->syn_recv_sock = tcp_v6_syn_recv_sock_toa; - TOA_INFO("CPU [%u] hooked tcp_v6_syn_recv_sock <%p> --> <%p>\n", smp_processor_id(), tcp_v6_syn_recv_sock, + if (vtoa) + ipv6_specific_p->syn_recv_sock = tcp_v6_syn_recv_sock_vtoa; + else + ipv6_specific_p->syn_recv_sock = tcp_v6_syn_recv_sock_toa; + + TOA_INFO("CPU [%u] hooked tcp_v6_syn_recv_sock <%px> --> <%px>\n", smp_processor_id(), tcp_v6_syn_recv_sock, ipv6_specific_p->syn_recv_sock); #endif @@ -362,6 +743,157 @@ static const struct file_operations toa_stats_fops = { .release = single_release, }; +static int vpc_inet_dump_one_sk(struct vtoa_req *req, struct vtoa_info *info) +{ + struct sock *sk; + int err = -EINVAL; + + sk = inet_lookup(&init_net, &tcp_hashinfo, NULL, 0, req->sip, + req->sport, req->dip, + req->dport, req->if_idx); + if (!sk){ + TOA_INFO("inet_lookup fail(%d), if_idx %u sip %pI4 dip %pI4 sport %u dport %u.\n", + err, req->if_idx, &req->sip, &req->dip, + ntohs(req->sport), ntohs(req->dport)); + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_LOCKUP_ERR_CNT); + goto out_nosk; + } + + if (sk->sk_state == TCP_LISTEN){ + TOA_INFO("socket listen fail(%d), if_idx %u sip %pI4 dip %pI4 sport %u dport %u sk_state %u.\n", + err, req->if_idx, &req->sip, &req->dip, + ntohs(req->sport), ntohs(req->dport), sk->sk_state); + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_LOCKUP_ERR_CNT); + goto out_listen; + } + + info->i_vpcid = sk->sk_tvpc_info.vpcid; + info->i_vmip = sk->sk_tvpc_info.vmip; + info->i_sport = sk->sk_tvpc_info.sport; + info->i_vip = sk->sk_tvpc_info.vip; + info->i_vport = sk->sk_tvpc_info.vport; + err = 0; + +out_listen: + if (sk) { + if (sk->sk_state == TCP_TIME_WAIT) + inet_twsk_put((struct inet_timewait_sock *)sk); + else + sock_put(sk); + } +out_nosk: + return err; +} + + +static int vpc_vtoa_netlink_send(struct nlmsghdr *nlh, struct vtoa_info *info) +{ + struct sk_buff *skb; + void *hdr; + int err; + + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!skb) { + TOA_INFO("nlmsg_new fail, vpcid %d vmip %pI4 sport %u vip %pI4 vport %u.\n", + info->i_vpcid, &info->i_vmip, ntohs(info->i_sport), + &info->i_vip, ntohs(info->i_vport)); + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_NETLINK_ERR_CNT); + return -ENOMEM; + } + + hdr = genlmsg_put(skb, nlh->nlmsg_pid, 0, &vtoa_genl_family, 0, VTOA_CMD_GET); + if (!hdr){ + TOA_INFO("genlmsg_put fail, vpcid %d vmip %pI4 sport %u vip %pI4 vport %u.\n", + info->i_vpcid, &info->i_vmip, ntohs(info->i_sport), + &info->i_vip, ntohs(info->i_vport)); + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_NETLINK_ERR_CNT); + nlmsg_free(skb); + return -ENOMEM; + } + + nla_put_u32(skb, VTOA_ATTR_I_VPCID, info->i_vpcid); + nla_put_u32(skb, VTOA_ATTR_I_VMIP, info->i_vmip); + nla_put_u16(skb, VTOA_ATTR_I_SPORT, info->i_sport); + nla_put_u32(skb, VTOA_ATTR_I_VIP, info->i_vip); + nla_put_u16(skb, VTOA_ATTR_I_VPORT, info->i_vport); + genlmsg_end(skb, hdr); + + err = genlmsg_unicast(&init_net, skb, nlh->nlmsg_pid); + if (err < 0){ + TOA_INFO("genlmsg_unicast fail(%d), vpcid %d vmip %pI4 sport %u vip %pI4 vport %u.\n", + err, + info->i_vpcid, &info->i_vmip, ntohs(info->i_sport), + &info->i_vip, ntohs(info->i_vport)); + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_NETLINK_ERR_CNT); + return -EAGAIN; + } + + return 0; +} + +static int vpc_vtoa_get(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct nlmsghdr *nlh = nlmsg_hdr(skb); + struct vtoa_req *req; + struct vtoa_info vinfo; + int err; + + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_CNT); + + if (!attrs || + !attrs[VTOA_ATTR_REQ]) { + TOA_INFO("attrs null.\n"); + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_ATTR_ERR_CNT); + return -EINVAL; + } + if (nla_len(attrs[VTOA_ATTR_REQ]) != sizeof(struct vtoa_req)){ + TOA_INFO("attrs len(%u) err.\n", nla_len(attrs[VTOA_ATTR_REQ])); + TOA_INC_STATS(ext_stats, GETNAME_VTOA_GET_ATTR_ERR_CNT); + return -EINVAL; + } + + req = nla_data(attrs[VTOA_ATTR_REQ]); + + memset(&vinfo, 0, sizeof(vinfo)); + err = vpc_inet_dump_one_sk(req, &vinfo); + if (err != 0){ + return err; + } + + err = vpc_vtoa_netlink_send(nlh, &vinfo); + + TOA_DBG("req[if_idx:%u sip:%pI4 dip:%pI4 sport:%u dport:%u] info[" + "vpcid:%u vmip:%pI4 sport:%u vip:%pI4 vport:%u] err:%d\n", + req->if_idx, &req->sip, &req->dip, + ntohs(req->sport), ntohs(req->dport), vinfo.i_vpcid, &vinfo.i_vmip, + ntohs(vinfo.i_sport), &vinfo.i_vip, ntohs(vinfo.i_vport), err); + + return err; +} + +static struct genl_ops vtoa_genl_ops[] = +{ + { + .cmd = VTOA_CMD_GET, + .flags = 0, + .doit = vpc_vtoa_get, + .dumpit = NULL, + }, +}; + +static struct genl_family vtoa_genl_family = { + .hdrsize = 0, + .name = "VPC_VTOA", + .version = 1, + .maxattr = VTOA_ATTR_MAX, + .policy = vtoa_policy, + .module = THIS_MODULE, + .ops = vtoa_genl_ops, + .n_ops = ARRAY_SIZE(vtoa_genl_ops), +}; + + /* * TOA module init and destory */ @@ -389,6 +921,11 @@ toa_init(void) goto err; } + if (genl_register_family(&vtoa_genl_family) != 0){ + TOA_INFO("vtoa_genl_family register fail.\n"); + goto err; + } + /* hook funcs for parse and get toa */ hook_toa_functions(); @@ -411,7 +948,8 @@ toa_exit(void) { unhook_toa_functions(); synchronize_net(); - + + genl_unregister_family(&vtoa_genl_family); remove_proc_entry("toa_stats", init_net.proc_net); if (NULL != ext_stats) { free_percpu(ext_stats); diff --git a/net/toa/toa.h b/net/toa/toa.h index 615ae8b25bc2..7c0986449f38 100644 --- a/net/toa/toa.h +++ b/net/toa/toa.h @@ -20,11 +20,11 @@ #include <net/ipv6.h> #include <net/transp_v6.h> -#define TOA_VERSION "1.0.0.0" +#define TOA_VERSION "2.0.0.0" -#define TOA_DBG(msg...) \ +#define TOA_DBG(msg,...) \ do { \ - printk(KERN_DEBUG "[DEBUG] TOA: " msg); \ + if (0xff==dbg){printk(KERN_DEBUG "[DEBUG] TOA: comm:%s pid:%d " msg, current->comm, (int)current->pid, ##__VA_ARGS__);} \ } while (0) #define TOA_INFO(msg...) \ @@ -33,10 +33,15 @@ printk(KERN_INFO "TOA: " msg);\ } while(0) -#define TCPOPT_TOA 200 - -/* MUST be 4n !!!! */ -#define TCPOLEN_TOA 8 /* |opcode|size|ip+port| = 1 + 1 + 6 */ +#define TCPOPT_REAL_CLIENTIP 200 +/* |opcode|size|vmip+sport| = 1 + 1 + 6 */ +#define TCPOLEN_REAL_CLIENTIP 8 +#define TCPOPT_VM_VPCID 201 +/* |opcode=1|opcode=1|opcode|size|vpcid| = 1 + 1 + 4 */ +#define TCPOLEN_VM_VPCID 6 +#define TCPOPT_VIP 202 +/* |opcode|size|vip+vport| = 1 + 1 + 6 */ +#define TCPOLEN_VIP 8 /* MUST be 4 bytes alignment */ struct toa_data { @@ -54,6 +59,12 @@ enum { GETNAME_TOA_MISMATCH_CNT, GETNAME_TOA_BYPASS_CNT, GETNAME_TOA_EMPTY_CNT, + GETNAME_VTOA_GET_CNT, + GETNAME_VTOA_GET_ATTR_ERR_CNT, + GETNAME_VTOA_GET_LOCKUP_ERR_CNT, + GETNAME_VTOA_GET_NETLINK_ERR_CNT, + GETNAME_VTOA_GETPEERNAME_IPV4_ERR_CNT, + GETNAME_VTOA_GETPEERNAME_IPV6_ERR_CNT, TOA_STAT_LAST }; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b3369d678f1a..ecadd9e482c4 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -189,11 +189,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) return unix_peer(osk) == NULL || unix_our_peer(sk, osk); } -static inline int unix_recvq_full(struct sock const *sk) +static inline int unix_recvq_full(const struct sock *sk) { return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } +static inline int unix_recvq_full_lockless(const struct sock *sk) +{ + return skb_queue_len_lockless(&sk->sk_receive_queue) > + READ_ONCE(sk->sk_max_ack_backlog); +} + struct sock *unix_peer_get(struct sock *s) { struct sock *peer; @@ -1724,7 +1730,8 @@ restart_locked: * - unix_peer(sk) == sk by time of get but disconnected before lock */ if (other != sk && - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { + unlikely(unix_peer(other) != sk && + unix_recvq_full_lockless(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 582a3e4dfce2..c82e7b52ab1f 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -620,8 +620,9 @@ struct sock *__vsock_create(struct net *net, vsk->trusted = psk->trusted; vsk->owner = get_cred(psk->owner); vsk->connect_timeout = psk->connect_timeout; + security_sk_clone(parent, sk); } else { - vsk->trusted = capable(CAP_NET_ADMIN); + vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); vsk->owner = get_current_cred(); vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; } @@ -808,10 +809,12 @@ static int vsock_shutdown(struct socket *sock, int mode) */ sk = sock->sk; + + lock_sock(sk); if (sock->state == SS_UNCONNECTED) { err = -ENOTCONN; if (sk->sk_type == SOCK_STREAM) - return err; + goto out; } else { sock->state = SS_DISCONNECTING; err = 0; @@ -820,10 +823,8 @@ static int vsock_shutdown(struct socket *sock, int mode) /* Receive and send shutdowns are treated alike. */ mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); if (mode) { - lock_sock(sk); sk->sk_shutdown |= mode; sk->sk_state_change(sk); - release_sock(sk); if (sk->sk_type == SOCK_STREAM) { sock_reset_flag(sk, SOCK_DONE); @@ -831,6 +832,8 @@ static int vsock_shutdown(struct socket *sock, int mode) } } +out: + release_sock(sk); return err; } @@ -1099,7 +1102,6 @@ static void vsock_connect_timeout(struct work_struct *work) { struct sock *sk; struct vsock_sock *vsk; - int cancel = 0; vsk = container_of(work, struct vsock_sock, connect_work.work); sk = sk_vsock(vsk); @@ -1110,11 +1112,9 @@ static void vsock_connect_timeout(struct work_struct *work) sk->sk_state = TCP_CLOSE; sk->sk_err = ETIMEDOUT; sk->sk_error_report(sk); - cancel = 1; + vsock_transport_cancel_pkt(vsk); } release_sock(sk); - if (cancel) - vsock_transport_cancel_pkt(vsk); sock_put(sk); } @@ -1275,7 +1275,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags, /* Wait for children sockets to appear; these are the new sockets * created upon connection establishment. */ - timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); + timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK); prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); while ((connected = vsock_dequeue_accept(listener)) == NULL && diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 463cefc1e5ae..a3c57c048cbd 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -464,14 +464,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode) static int hvs_shutdown(struct vsock_sock *vsk, int mode) { - struct sock *sk = sk_vsock(vsk); - if (!(mode & SEND_SHUTDOWN)) return 0; - lock_sock(sk); hvs_shutdown_lock_held(vsk->trans, mode); - release_sock(sk); return 0; } diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 082a30936690..5905f0cddc89 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -22,7 +22,7 @@ #include <net/af_vsock.h> static struct workqueue_struct *virtio_vsock_workqueue; -static struct virtio_vsock *the_virtio_vsock; +static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ struct virtio_vsock { @@ -86,33 +86,6 @@ out_rcu: return ret; } -static void virtio_transport_loopback_work(struct work_struct *work) -{ - struct virtio_vsock *vsock = - container_of(work, struct virtio_vsock, loopback_work); - LIST_HEAD(pkts); - - spin_lock_bh(&vsock->loopback_list_lock); - list_splice_init(&vsock->loopback_list, &pkts); - spin_unlock_bh(&vsock->loopback_list_lock); - - mutex_lock(&vsock->rx_lock); - - if (!vsock->rx_run) - goto out; - - while (!list_empty(&pkts)) { - struct virtio_vsock_pkt *pkt; - - pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); - list_del_init(&pkt->list); - - virtio_transport_recv_pkt(pkt); - } -out: - mutex_unlock(&vsock->rx_lock); -} - static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, struct virtio_vsock_pkt *pkt) { @@ -370,59 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock) return val < virtqueue_get_vring_size(vq); } -static void virtio_transport_rx_work(struct work_struct *work) -{ - struct virtio_vsock *vsock = - container_of(work, struct virtio_vsock, rx_work); - struct virtqueue *vq; - - vq = vsock->vqs[VSOCK_VQ_RX]; - - mutex_lock(&vsock->rx_lock); - - if (!vsock->rx_run) - goto out; - - do { - virtqueue_disable_cb(vq); - for (;;) { - struct virtio_vsock_pkt *pkt; - unsigned int len; - - if (!virtio_transport_more_replies(vsock)) { - /* Stop rx until the device processes already - * pending replies. Leave rx virtqueue - * callbacks disabled. - */ - goto out; - } - - pkt = virtqueue_get_buf(vq, &len); - if (!pkt) { - break; - } - - vsock->rx_buf_nr--; - - /* Drop short/long packets */ - if (unlikely(len < sizeof(pkt->hdr) || - len > sizeof(pkt->hdr) + pkt->len)) { - virtio_transport_free_pkt(pkt); - continue; - } - - pkt->len = len - sizeof(pkt->hdr); - virtio_transport_deliver_tap_pkt(pkt); - virtio_transport_recv_pkt(pkt); - } - } while (!virtqueue_enable_cb(vq)); - -out: - if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) - virtio_vsock_rx_fill(vsock); - mutex_unlock(&vsock->rx_lock); -} - /* event_lock must be held */ static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, struct virtio_vsock_event *event) @@ -586,6 +506,86 @@ static struct virtio_transport virtio_transport = { .send_pkt = virtio_transport_send_pkt, }; +static void virtio_transport_loopback_work(struct work_struct *work) +{ + struct virtio_vsock *vsock = + container_of(work, struct virtio_vsock, loopback_work); + LIST_HEAD(pkts); + + spin_lock_bh(&vsock->loopback_list_lock); + list_splice_init(&vsock->loopback_list, &pkts); + spin_unlock_bh(&vsock->loopback_list_lock); + + mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + + while (!list_empty(&pkts)) { + struct virtio_vsock_pkt *pkt; + + pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); + list_del_init(&pkt->list); + + virtio_transport_recv_pkt(&virtio_transport, pkt); + } +out: + mutex_unlock(&vsock->rx_lock); +} + +static void virtio_transport_rx_work(struct work_struct *work) +{ + struct virtio_vsock *vsock = + container_of(work, struct virtio_vsock, rx_work); + struct virtqueue *vq; + + vq = vsock->vqs[VSOCK_VQ_RX]; + + mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + + do { + virtqueue_disable_cb(vq); + for (;;) { + struct virtio_vsock_pkt *pkt; + unsigned int len; + + if (!virtio_transport_more_replies(vsock)) { + /* Stop rx until the device processes already + * pending replies. Leave rx virtqueue + * callbacks disabled. + */ + goto out; + } + + pkt = virtqueue_get_buf(vq, &len); + if (!pkt) { + break; + } + + vsock->rx_buf_nr--; + + /* Drop short/long packets */ + if (unlikely(len < sizeof(pkt->hdr) || + len > sizeof(pkt->hdr) + pkt->len)) { + virtio_transport_free_pkt(pkt); + continue; + } + + pkt->len = len - sizeof(pkt->hdr); + virtio_transport_deliver_tap_pkt(pkt); + virtio_transport_recv_pkt(&virtio_transport, pkt); + } + } while (!virtqueue_enable_cb(vq)); + +out: + if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) + virtio_vsock_rx_fill(vsock); + mutex_unlock(&vsock->rx_lock); +} + static int virtio_vsock_probe(struct virtio_device *vdev) { vq_callback_t *callbacks[] = { diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index fb2060dffb0a..dde16a033a09 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -696,9 +696,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk, /* Normally packets are associated with a socket. There may be no socket if an * attempt was made to connect to a socket that does not exist. */ -static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) +static int virtio_transport_reset_no_sock(const struct virtio_transport *t, + struct virtio_vsock_pkt *pkt) { - const struct virtio_transport *t; struct virtio_vsock_pkt *reply; struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RST, @@ -718,7 +718,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) if (!reply) return -ENOMEM; - t = virtio_transport_get_ops(); if (!t) { virtio_transport_free_pkt(reply); return -ENOTCONN; @@ -1060,7 +1059,8 @@ static bool virtio_transport_space_update(struct sock *sk, /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex * lock. */ -void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) +void virtio_transport_recv_pkt(struct virtio_transport *t, + struct virtio_vsock_pkt *pkt) { struct sockaddr_vm src, dst; struct vsock_sock *vsk; @@ -1082,7 +1082,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) le32_to_cpu(pkt->hdr.fwd_cnt)); if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) { - (void)virtio_transport_reset_no_sock(pkt); + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } @@ -1093,17 +1093,17 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) if (!sk) { sk = vsock_find_bound_socket(&dst); if (!sk) { - (void)virtio_transport_reset_no_sock(pkt); + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } } vsk = vsock_sk(sk); - space_available = virtio_transport_space_update(sk, pkt); - lock_sock(sk); + space_available = virtio_transport_space_update(sk, pkt); + /* Update CID in case it has changed after a transport reset event */ vsk->local_addr.svm_cid = dst.svm_cid; @@ -1127,6 +1127,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) virtio_transport_free_pkt(pkt); break; default: + (void)virtio_transport_reset_no_sock(t, pkt); virtio_transport_free_pkt(pkt); break; } diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 8c9c4ed90fa7..aaabcd84268a 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -576,8 +576,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair, peer, flags, VMCI_NO_PRIVILEGE_FLAGS); out: if (err < 0) { - pr_err("Could not attach to queue pair with %d\n", - err); + pr_err_once("Could not attach to queue pair with %d\n", err); err = vmci_transport_error_to_vsock_error(err); } diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 63cf7131f601..211007c091d5 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -217,6 +217,7 @@ config LIB80211_CRYPT_WEP config LIB80211_CRYPT_CCMP tristate + select CRYPTO select CRYPTO_AES select CRYPTO_CCM diff --git a/net/wireless/core.c b/net/wireless/core.c index 3e25229a059d..5d151e8f8932 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -142,7 +142,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, if (result) return result; - if (rdev->wiphy.debugfsdir) + if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir)) debugfs_rename(rdev->wiphy.debugfsdir->d_parent, rdev->wiphy.debugfsdir, rdev->wiphy.debugfsdir->d_parent, newname); @@ -1224,8 +1224,7 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, } EXPORT_SYMBOL(cfg80211_stop_iface); -void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev) +void cfg80211_init_wdev(struct wireless_dev *wdev) { mutex_init(&wdev->mtx); INIT_LIST_HEAD(&wdev->event_list); @@ -1236,6 +1235,30 @@ void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); +#ifdef CONFIG_CFG80211_WEXT + wdev->wext.default_key = -1; + wdev->wext.default_mgmt_key = -1; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; +#endif + + if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) + wdev->ps = true; + else + wdev->ps = false; + /* allow mac80211 to determine the timeout */ + wdev->ps_timeout = -1; + + if ((wdev->iftype == NL80211_IFTYPE_STATION || + wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || + wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) + wdev->netdev->priv_flags |= IFF_DONT_BRIDGE; + + INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); +} + +void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ /* * We get here also when the interface changes network namespaces, * as it's registered into the new one, but we don't want it to @@ -1269,6 +1292,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, switch (state) { case NETDEV_POST_INIT: SET_NETDEV_DEVTYPE(dev, &wiphy_type); + wdev->netdev = dev; + /* can only change netns with wiphy */ + dev->features |= NETIF_F_NETNS_LOCAL; + + cfg80211_init_wdev(wdev); break; case NETDEV_REGISTER: /* @@ -1276,35 +1304,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, * called within code protected by it when interfaces * are added with nl80211. */ - /* can only change netns with wiphy */ - dev->features |= NETIF_F_NETNS_LOCAL; - if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, "phy80211")) { pr_err("failed to add phy80211 symlink to netdev!\n"); } - wdev->netdev = dev; -#ifdef CONFIG_CFG80211_WEXT - wdev->wext.default_key = -1; - wdev->wext.default_mgmt_key = -1; - wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; -#endif - if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) - wdev->ps = true; - else - wdev->ps = false; - /* allow mac80211 to determine the timeout */ - wdev->ps_timeout = -1; - - if ((wdev->iftype == NL80211_IFTYPE_STATION || - wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || - wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) - dev->priv_flags |= IFF_DONT_BRIDGE; - - INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); - - cfg80211_init_wdev(rdev, wdev); + cfg80211_register_wdev(rdev, wdev); break; case NETDEV_GOING_DOWN: cfg80211_leave(rdev, wdev); diff --git a/net/wireless/core.h b/net/wireless/core.h index ed487e324571..d83c8e009448 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -210,8 +210,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net); -void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev); +void cfg80211_init_wdev(struct wireless_dev *wdev); +void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev); static inline void wdev_lock(struct wireless_dev *wdev) __acquires(wdev) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index dbb6a14968ef..5bb2316befb9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -618,10 +618,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY, .len = NL80211_HE_MAX_CAPABILITY_LEN }, - [NL80211_ATTR_FTM_RESPONDER] = { - .type = NLA_NESTED, - .validation_data = nl80211_ftm_responder_policy, - }, + [NL80211_ATTR_FTM_RESPONDER] = + NLA_POLICY_NESTED(nl80211_ftm_responder_policy), [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), [NL80211_ATTR_PEER_MEASUREMENTS] = NLA_POLICY_NESTED(nl80211_pmsr_attr_policy), @@ -2229,7 +2227,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, * case we'll continue with more data in the next round, * but break unconditionally so unsplit data stops here. */ - state->split_start++; + if (state->split) + state->split_start++; + else + state->split_start = 0; break; case 9: if (rdev->wiphy.extended_capabilities && @@ -3653,7 +3654,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) * P2P Device and NAN do not have a netdev, so don't go * through the netdev notifier and must be added here */ - cfg80211_init_wdev(rdev, wdev); + cfg80211_init_wdev(wdev); + cfg80211_register_wdev(rdev, wdev); break; default: break; @@ -3977,6 +3979,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) if (err) return err; + if (key.idx < 0) + return -EINVAL; + if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@ -4495,16 +4500,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs, if (err) return err; - if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] || - !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]) - return -EINVAL; + if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]) + he_obss_pd->min_offset = + nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]); + if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]) + he_obss_pd->max_offset = + nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]); - he_obss_pd->min_offset = - nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]); - he_obss_pd->max_offset = - nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]); - - if (he_obss_pd->min_offset >= he_obss_pd->max_offset) + if (he_obss_pd->min_offset > he_obss_pd->max_offset) return -EINVAL; he_obss_pd->enable = true; @@ -4800,7 +4803,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) err = nl80211_parse_he_obss_pd( info->attrs[NL80211_ATTR_HE_OBSS_PD], ¶ms.he_obss_pd); - goto out; + if (err) + goto out; } nl80211_calculate_ap_params(¶ms); @@ -12029,7 +12033,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct nlattr *tb[NUM_NL80211_REKEY_DATA]; - struct cfg80211_gtk_rekey_data rekey_data; + struct cfg80211_gtk_rekey_data rekey_data = {}; int err; if (!info->attrs[NL80211_ATTR_REKEY_DATA]) @@ -12950,13 +12954,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev_running(wdev)) return -ENETDOWN; } - - if (!vcmd->doit) - return -EOPNOTSUPP; } else { wdev = NULL; } + if (!vcmd->doit) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 1a8218f1bbe0..0f3b57a73670 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2941,6 +2941,9 @@ int regulatory_hint_user(const char *alpha2, if (WARN_ON(!alpha2)) return -EINVAL; + if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; @@ -3402,7 +3405,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) power_rule = ®_rule->power_rule; if (reg_rule->flags & NL80211_RRF_AUTO_BW) - snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO", + snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO", freq_range->max_bandwidth_khz, reg_get_max_bandwidth(rd, reg_rule)); else diff --git a/net/wireless/scan.c b/net/wireless/scan.c index aef240fdf8df..83297832744a 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1257,6 +1257,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, if (rdev->bss_entries >= bss_entries_limit && !cfg80211_bss_expire_oldest(rdev)) { + if (!list_empty(&new->hidden_list)) + list_del(&new->hidden_list); kfree(new); goto drop; } @@ -2022,7 +2024,11 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev, spin_lock_bh(&rdev->bss_lock); - if (WARN_ON(cbss->pub.channel == chan)) + /* + * Some APs use CSA also for bandwidth changes, i.e., without actually + * changing the control channel, so no need to update in such a case. + */ + if (cbss->pub.channel == chan) goto done; /* use transmitting bss */ diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d32a2ec4d96a..63f89687a018 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -530,7 +530,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, cfg80211_sme_free(wdev); } - if (WARN_ON(wdev->conn)) + if (wdev->conn) return -EINPROGRESS; wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); diff --git a/net/wireless/util.c b/net/wireless/util.c index 8481e9ac33da..9abafd76ec50 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -116,11 +116,13 @@ int ieee80211_frequency_to_channel(int freq) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; - else if (freq < 5945) + else if (freq < 5925) return (freq - 5000) / 5; + else if (freq == 5935) + return 2; else if (freq <= 45000) /* DMG band lower limit */ - /* see 802.11ax D4.1 27.3.22.2 */ - return (freq - 5940) / 5; + /* see 802.11ax D6.1 27.3.22.2 */ + return (freq - 5950) / 5; else if (freq >= 58320 && freq <= 70200) return (freq - 56160) / 2160; else diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 69102fda9ebd..76a80a41615b 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -896,8 +896,9 @@ out: int call_commit_handler(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT - if ((netif_running(dev)) && - (dev->wireless_handlers->standard[0] != NULL)) + if (netif_running(dev) && + dev->wireless_handlers && + dev->wireless_handlers->standard[0]) /* Call the commit handler on the driver */ return dev->wireless_handlers->standard[0](dev, NULL, NULL, NULL); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 256f3e97d1f3..cb1f5016c433 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -675,7 +675,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) int len, i, rc = 0; if (addr_len != sizeof(struct sockaddr_x25) || - addr->sx25_family != AF_X25) { + addr->sx25_family != AF_X25 || + strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) { rc = -EINVAL; goto out; } @@ -769,7 +770,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, rc = -EINVAL; if (addr_len != sizeof(struct sockaddr_x25) || - addr->sx25_family != AF_X25) + addr->sx25_family != AF_X25 || + strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) goto out; rc = -ENETUNREACH; @@ -819,7 +821,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, sock->state = SS_CONNECTED; rc = 0; out_put_neigh: - if (rc) { + if (rc && x25->neighbour) { read_lock_bh(&x25_list_lock); x25_neigh_put(x25->neighbour); x25->neighbour = NULL; @@ -1044,6 +1046,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, makex25->lci = lci; makex25->dest_addr = dest_addr; makex25->source_addr = source_addr; + x25_neigh_hold(nb); makex25->neighbour = nb; makex25->facilities = facilities; makex25->dte_facilities= dte_facilities; diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 00e782335cb0..25bf72ee6cad 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -115,8 +115,10 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, goto drop; } - if (!pskb_may_pull(skb, 1)) + if (!pskb_may_pull(skb, 1)) { + x25_neigh_put(nb); return 0; + } switch (skb->data[0]) { diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 8aa415a38814..0285aaa1e93c 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -357,6 +357,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause, sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } + if (x25->neighbour) { + read_lock_bh(&x25_list_lock); + x25_neigh_put(x25->neighbour); + x25->neighbour = NULL; + read_unlock_bh(&x25_list_lock); + } } /* diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 3049af269fbf..993f14acbb9f 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -341,9 +341,9 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) { bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; u32 chunk_size = mr->chunk_size, headroom = mr->headroom; + u64 npgs, addr = mr->addr, size = mr->len; unsigned int chunks, chunks_per_page; - u64 addr = mr->addr, size = mr->len; - int size_chk, err; + int err; if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { /* Strictly speaking we could support this, if: @@ -372,6 +372,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) if ((addr + size) < addr) return -EINVAL; + npgs = div_u64(size, PAGE_SIZE); + if (npgs > U32_MAX) + return -EINVAL; + chunks = (unsigned int)div_u64(size, chunk_size); if (chunks == 0) return -EINVAL; @@ -382,8 +386,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) return -EINVAL; } - size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; - if (size_chk < 0) + if (headroom >= chunk_size - XDP_PACKET_HEADROOM) return -EINVAL; umem->address = (unsigned long)addr; @@ -392,7 +395,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) umem->size = size; umem->headroom = headroom; umem->chunk_size_nohr = chunk_size - headroom; - umem->npgs = size / PAGE_SIZE; + umem->npgs = (u32)npgs; umem->pgs = NULL; umem->user = NULL; umem->flags = mr->flags; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index d426fc01c529..2bc0d6e3e124 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -129,8 +129,9 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf, u64 page_start = addr & ~(PAGE_SIZE - 1); u64 first_len = PAGE_SIZE - (addr - page_start); - memcpy(to_buf, from_buf, first_len + metalen); - memcpy(next_pg_addr, from_buf + first_len, len - first_len); + memcpy(to_buf, from_buf, first_len); + memcpy(next_pg_addr, from_buf + first_len, + len + metalen - first_len); return; } @@ -361,10 +362,8 @@ static int xsk_generic_xmit(struct sock *sk) len = desc.len; skb = sock_alloc_send_skb(sk, len, 1, &err); - if (unlikely(!skb)) { - err = -EAGAIN; + if (unlikely(!skb)) goto out; - } skb_put(skb, len); addr = desc.addr; @@ -427,14 +426,16 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) return __xsk_sendmsg(sk); } -static unsigned int xsk_poll(struct file *file, struct socket *sock, +static __poll_t xsk_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait) { - unsigned int mask = datagram_poll(file, sock, wait); + __poll_t mask = 0; struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); struct xdp_umem *umem; + sock_poll_wait(file, sock, wait); + if (unlikely(!xsk_is_bound(xs))) return mask; @@ -449,9 +450,9 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock, } if (xs->rx && !xskq_empty_desc(xs->rx)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (xs->tx && !xskq_full_desc(xs->tx)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 64486ad81341..bb2292b5260c 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -25,12 +25,10 @@ static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, struct xfrm_offload *xo = xfrm_offload(skb); skb_reset_mac_len(skb); - pskb_pull(skb, skb->mac_len + hsize + x->props.header_len); - - if (xo->flags & XFRM_GSO_SEGMENT) { - skb_reset_transport_header(skb); + if (xo->flags & XFRM_GSO_SEGMENT) skb->transport_header -= x->props.header_len; - } + + pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); } static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, @@ -84,7 +82,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur struct xfrm_offload *xo = xfrm_offload(skb); struct sec_path *sp; - if (!xo) + if (!xo || (xo->flags & XFRM_XMIT)) return skb; if (!(features & NETIF_F_HW_ESP)) @@ -105,6 +103,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } + xo->flags |= XFRM_XMIT; + if (skb_is_gso(skb)) { struct net_device *dev = skb->dev; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 2c86a2fc3915..e120df0a6da1 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -643,7 +643,7 @@ resume: dev_put(skb->dev); spin_lock(&x->lock); - if (nexthdr <= 0) { + if (nexthdr < 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); @@ -656,7 +656,7 @@ resume: /* only the first xfrm gets the encap type */ encap_type = 0; - if (async && x->repl->recheck(x, skb, seq)) { + if (x->repl->recheck(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 4d5627e274fe..74e90d78c3b4 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -293,23 +293,26 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) } mtu = dst_mtu(dst); - if (!skb->ignore_df && skb->len > mtu) { + if (skb->len > mtu) { skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); } else { - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(mtu)); + if (!(ip_hdr(skb)->frag_off & htons(IP_DF))) + goto xmit; + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); } dst_release(dst); return -EMSGSIZE; } +xmit: xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev))); skb_dst_set(skb, dst); skb->dev = tdev; @@ -772,7 +775,28 @@ static void __net_exit xfrmi_exit_net(struct net *net) rtnl_unlock(); } +static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) +{ + struct net *net; + LIST_HEAD(list); + + rtnl_lock(); + list_for_each_entry(net, net_exit_list, exit_list) { + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); + struct xfrm_if __rcu **xip; + struct xfrm_if *xi; + + for (xip = &xfrmn->xfrmi[0]; + (xi = rtnl_dereference(*xip)) != NULL; + xip = &xi->next) + unregister_netdevice_queue(xi->dev, &list); + } + unregister_netdevice_many(&list); + rtnl_unlock(); +} + static struct pernet_operations xfrmi_net_ops = { + .exit_batch = xfrmi_exit_batch_net, .exit = xfrmi_exit_net, .id = &xfrmi_net_id, .size = sizeof(struct xfrmi_net), diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index b1db55b50ba1..9a6a8c4008ab 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -586,18 +586,20 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) xfrm_state_hold(x); if (skb_is_gso(skb)) { - skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; + if (skb->inner_protocol) + return xfrm_output_gso(net, sk, skb); - return xfrm_output2(net, sk, skb); + skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; + goto out; } if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) goto out; + } else { + if (skb_is_gso(skb)) + return xfrm_output_gso(net, sk, skb); } - if (skb_is_gso(skb)) - return xfrm_output_gso(net, sk, skb); - if (skb->ip_summed == CHECKSUM_PARTIAL) { err = skb_checksum_help(skb); if (err) { @@ -643,7 +645,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) if (skb->protocol == htons(ETH_P_IP)) proto = AF_INET; - else if (skb->protocol == htons(ETH_P_IPV6)) + else if (skb->protocol == htons(ETH_P_IPV6) && + skb->sk->sk_family == AF_INET6) proto = AF_INET6; else return; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 264cf05a4eaa..32c816342797 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -790,15 +790,22 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a, const xfrm_address_t *b, u8 prefixlen, u16 family) { + u32 ma, mb, mask; unsigned int pdw, pbi; int delta = 0; switch (family) { case AF_INET: - if (sizeof(long) == 4 && prefixlen == 0) - return ntohl(a->a4) - ntohl(b->a4); - return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) - - (ntohl(b->a4) & ((~0UL << (32 - prefixlen)))); + if (prefixlen == 0) + return 0; + mask = ~0U << (32 - prefixlen); + ma = ntohl(a->a4) & mask; + mb = ntohl(b->a4) & mask; + if (ma < mb) + delta = -1; + else if (ma > mb) + delta = 1; + break; case AF_INET6: pdw = prefixlen >> 5; pbi = prefixlen & 0x1f; @@ -809,10 +816,13 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a, return delta; } if (pbi) { - u32 mask = ~0u << (32 - pbi); - - delta = (ntohl(a->a6[pdw]) & mask) - - (ntohl(b->a6[pdw]) & mask); + mask = ~0U << (32 - pbi); + ma = ntohl(a->a6[pdw]) & mask; + mb = ntohl(b->a6[pdw]) & mask; + if (ma < mb) + delta = -1; + else if (ma > mb) + delta = 1; } break; default: @@ -1430,19 +1440,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_unlock_bh(&pq->hold_queue.lock); } -static bool xfrm_policy_mark_match(struct xfrm_policy *policy, - struct xfrm_policy *pol) +static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, + struct xfrm_policy *pol) { - u32 mark = policy->mark.v & policy->mark.m; - - if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) - return true; - - if ((mark & pol->mark.m) == pol->mark.v && - policy->priority == pol->priority) - return true; - - return false; + return mark->v == pol->mark.v && mark->m == pol->mark.m; } static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) @@ -1505,7 +1506,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { delpol = pol; @@ -1540,7 +1541,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { if (excl) @@ -1612,9 +1613,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) EXPORT_SYMBOL(xfrm_policy_insert); static struct xfrm_policy * -__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, +__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx) { struct xfrm_policy *pol; @@ -1625,7 +1625,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, hlist_for_each_entry(pol, chain, bydst) { if (pol->type == type && pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v && + xfrm_policy_mark_match(mark, pol) && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) return pol; @@ -1634,11 +1634,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, return NULL; } -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, - struct xfrm_sec_ctx *ctx, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, struct xfrm_selector *sel, + struct xfrm_sec_ctx *ctx, int delete, int *err) { struct xfrm_pol_inexact_bin *bin = NULL; struct xfrm_policy *pol, *ret = NULL; @@ -1705,9 +1704,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, u32 id, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; @@ -1722,8 +1721,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, ret = NULL; hlist_for_each_entry(pol, chain, byidx) { if (pol->type == type && pol->index == id && - pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v) { + pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( @@ -3077,8 +3075,8 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net, xflo.flags = flags; /* To accelerate a bit... */ - if ((dst_orig->flags & DST_NOXFRM) || - !net->xfrm.policy_count[XFRM_POLICY_OUT]) + if (!if_id && ((dst_orig->flags & DST_NOXFRM) || + !net->xfrm.policy_count[XFRM_POLICY_OUT])) goto nopol; xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f3423562d933..1423e2b7cb42 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work); */ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; -static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation); static struct kmem_cache *xfrm_state_cache __ro_after_init; static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); @@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work) } spin_lock_bh(&net->xfrm.xfrm_state_lock); - write_seqcount_begin(&xfrm_state_hash_generation); + write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net); @@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work) rcu_assign_pointer(net->xfrm.state_byspi, nspi); net->xfrm.state_hmask = nhashmask; - write_seqcount_end(&xfrm_state_hash_generation); + write_seqcount_end(&net->xfrm.xfrm_state_hash_generation); spin_unlock_bh(&net->xfrm.xfrm_state_lock); osize = (ohashmask + 1) * sizeof(struct hlist_head); @@ -1016,7 +1015,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, */ if (x->km.state == XFRM_STATE_VALID) { if ((x->sel.family && - !xfrm_selector_match(&x->sel, fl, x->sel.family)) || + (x->sel.family != family || + !xfrm_selector_match(&x->sel, fl, family))) || !security_xfrm_state_pol_flow_match(x, pol, fl)) return; @@ -1029,7 +1029,9 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, *acq_in_progress = 1; } else if (x->km.state == XFRM_STATE_ERROR || x->km.state == XFRM_STATE_EXPIRED) { - if (xfrm_selector_match(&x->sel, fl, x->sel.family) && + if ((!x->sel.family || + (x->sel.family == family && + xfrm_selector_match(&x->sel, fl, family))) && security_xfrm_state_pol_flow_match(x, pol, fl)) *error = -ESRCH; } @@ -1055,7 +1057,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, to_put = NULL; - sequence = read_seqcount_begin(&xfrm_state_hash_generation); + sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); rcu_read_lock(); h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); @@ -1069,7 +1071,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) - xfrm_state_look_at(pol, x, fl, encap_family, + xfrm_state_look_at(pol, x, fl, family, &best, &acquire_in_progress, &error); } if (best || acquire_in_progress) @@ -1086,7 +1088,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) - xfrm_state_look_at(pol, x, fl, encap_family, + xfrm_state_look_at(pol, x, fl, family, &best, &acquire_in_progress, &error); } @@ -1168,7 +1170,7 @@ out: if (to_put) xfrm_state_put(to_put); - if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) { + if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) { *err = -EAGAIN; if (x) { xfrm_state_put(x); @@ -1438,6 +1440,30 @@ out: EXPORT_SYMBOL(xfrm_state_add); #ifdef CONFIG_XFRM_MIGRATE +static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security) +{ + struct xfrm_user_sec_ctx *uctx; + int size = sizeof(*uctx) + security->ctx_len; + int err; + + uctx = kmalloc(size, GFP_KERNEL); + if (!uctx) + return -ENOMEM; + + uctx->exttype = XFRMA_SEC_CTX; + uctx->len = size; + uctx->ctx_doi = security->ctx_doi; + uctx->ctx_alg = security->ctx_alg; + uctx->ctx_len = security->ctx_len; + memcpy(uctx + 1, security->ctx_str, security->ctx_len); + err = security_xfrm_state_alloc(x, uctx); + kfree(uctx); + if (err) + return err; + + return 0; +} + static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, struct xfrm_encap_tmpl *encap) { @@ -1494,6 +1520,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, goto error; } + if (orig->security) + if (clone_security(x, orig->security)) + goto error; + if (orig->coaddr) { x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), GFP_KERNEL); @@ -1507,6 +1537,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, } memcpy(&x->mark, &orig->mark, sizeof(x->mark)); + memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); if (xfrm_init_state(x) < 0) goto error; @@ -1518,7 +1549,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, x->tfcpad = orig->tfcpad; x->replay_maxdiff = orig->replay_maxdiff; x->replay_maxage = orig->replay_maxage; - x->curlft.add_time = orig->curlft.add_time; + memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft)); x->km.state = orig->km.state; x->km.seq = orig->km.seq; x->replay = orig->replay; @@ -1969,6 +2000,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) int err = -ENOENT; __be32 minspi = htonl(low); __be32 maxspi = htonl(high); + __be32 newspi = 0; u32 mark = x->mark.v & x->mark.m; spin_lock_bh(&x->lock); @@ -1987,21 +2019,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) xfrm_state_put(x0); goto unlock; } - x->id.spi = minspi; + newspi = minspi; } else { u32 spi = 0; for (h = 0; h < high-low+1; h++) { spi = low + prandom_u32()%(high-low+1); x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); if (x0 == NULL) { - x->id.spi = htonl(spi); + newspi = htonl(spi); break; } xfrm_state_put(x0); } } - if (x->id.spi) { + if (newspi) { spin_lock_bh(&net->xfrm.xfrm_state_lock); + x->id.spi = newspi; h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); spin_unlock_bh(&net->xfrm.xfrm_state_lock); @@ -2554,6 +2587,7 @@ int __net_init xfrm_state_init(struct net *net) net->xfrm.state_num = 0; INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); spin_lock_init(&net->xfrm.xfrm_state_lock); + seqcount_init(&net->xfrm.xfrm_state_hash_generation); return 0; out_byspi: diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e6cfaa680ef3..fbb7d9d06478 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct km_event c; int delete; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; p = nlmsg_data(nlh); @@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, + p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, - ctx, delete, &err); + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, + &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) @@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs); @@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, + 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } diff --git a/package/arm/Module.kabi_aarch64 b/package/arm/Module.kabi_aarch64 new file mode 100644 index 000000000000..ae084261fe9b --- /dev/null +++ b/package/arm/Module.kabi_aarch64 @@ -0,0 +1,14869 @@ +0x981efe92 mpt_deregister drivers/message/fusion/mptbase EXPORT_SYMBOL +0xdbc42a8b iscsi_host_add drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x51c42f58 ipv6_chk_custom_prefix vmlinux EXPORT_SYMBOL +0x4abcf4d7 sata_pmp_error_handler vmlinux EXPORT_SYMBOL_GPL +0x55417264 unregister_vt_notifier vmlinux EXPORT_SYMBOL_GPL +0xc8ab2aa2 set_anon_super vmlinux EXPORT_SYMBOL +0x25ba14db kmem_cache_alloc vmlinux EXPORT_SYMBOL +0xffa8bcd1 replace_page_cache_page vmlinux EXPORT_SYMBOL_GPL +0x1381d4f3 net_cls_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x5fd441df st21nfca_dep_deinit drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x57ce2d36 xdp_attachment_setup vmlinux EXPORT_SYMBOL_GPL +0x23f9c5ce xps_needed vmlinux EXPORT_SYMBOL +0x9feab7d8 of_get_property vmlinux EXPORT_SYMBOL +0xfca181d1 i2c_put_adapter vmlinux EXPORT_SYMBOL +0x014a2b1a rtc_class_open vmlinux EXPORT_SYMBOL_GPL +0x96cd2b04 scsi_sense_key_string vmlinux EXPORT_SYMBOL +0x350ea558 dma_fence_default_wait vmlinux EXPORT_SYMBOL +0x013f26ae dma_fence_get_stub vmlinux EXPORT_SYMBOL +0xda895fe2 request_firmware vmlinux EXPORT_SYMBOL +0x7f5bfc68 dev_pm_enable_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x6ca1e564 ttm_mem_io_reserve vmlinux EXPORT_SYMBOL +0x7e0406d0 drm_atomic_set_crtc_for_connector vmlinux EXPORT_SYMBOL +0xada93cb2 irq_to_desc vmlinux EXPORT_SYMBOL +0xa25b93c3 ib_destroy_cm_id drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x90e807c7 ib_cache_gid_parse_type_str drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5e00aaf6 netlbl_calipso_ops_register vmlinux EXPORT_SYMBOL +0xc4cdb29c dev_mc_sync vmlinux EXPORT_SYMBOL +0x0e60add9 dev_uc_sync vmlinux EXPORT_SYMBOL +0x0170da3c hwmon_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x07242d92 put_dax vmlinux EXPORT_SYMBOL_GPL +0x3f9a7cec device_set_of_node_from_dev vmlinux EXPORT_SYMBOL_GPL +0xc8594d3d reset_control_acquire vmlinux EXPORT_SYMBOL_GPL +0xbc86f7f5 acpi_pm_device_sleep_state vmlinux EXPORT_SYMBOL +0xe113bbbc csum_partial vmlinux EXPORT_SYMBOL +0x80ca5026 _bin2bcd vmlinux EXPORT_SYMBOL +0xfbaaf01e console_lock vmlinux EXPORT_SYMBOL +0x9a73b032 ZSTD_initDStream_usingDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x430ecc96 ZSTD_initCStream_usingCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0x7d64c0eb wpan_phy_find net/ieee802154/ieee802154 EXPORT_SYMBOL +0x31bf3ca3 rxrpc_debug_id net/rxrpc/rxrpc EXPORT_SYMBOL +0x1638c357 hinic_unregister_uld drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x84a1e575 hinic_migrate_report drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x4b56368a hinic_create_qps drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x88638552 simd_skcipher_create_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0x4cc823ef o2hb_setup_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x4d8a96ab xas_set_mark vmlinux EXPORT_SYMBOL_GPL +0xc87fb025 xas_get_mark vmlinux EXPORT_SYMBOL_GPL +0xb58b247f tcp_v4_mtu_reduced vmlinux EXPORT_SYMBOL +0x2c88efbb mbox_send_message vmlinux EXPORT_SYMBOL_GPL +0x201d87c7 __drm_atomic_helper_crtc_duplicate_state vmlinux EXPORT_SYMBOL +0x759139cb of_pci_check_probe_only vmlinux EXPORT_SYMBOL_GPL +0x139f2189 __kfifo_alloc vmlinux EXPORT_SYMBOL +0xc366bfa1 ceph_pagelist_truncate net/ceph/libceph EXPORT_SYMBOL +0xb9caeb9a ps2_end_command drivers/input/serio/libps2 EXPORT_SYMBOL +0x190d8f8f iscsi_tcp_r2tpool_free drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x7672f942 nf_log_register vmlinux EXPORT_SYMBOL +0xdc200e25 serial8250_do_shutdown vmlinux EXPORT_SYMBOL_GPL +0x779a18af kstrtoll vmlinux EXPORT_SYMBOL +0x28accde2 cryptd_skcipher_queued vmlinux EXPORT_SYMBOL_GPL +0x65f24eea decode_rs8 lib/reed_solomon/reed_solomon EXPORT_SYMBOL_GPL +0x015d4d49 ib_device_get_by_name drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc1fd1dbc __tracepoint_bcache_btree_insert_key drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xcbef74fe hinic_dbg_lt_wr_16byte_mask drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x9ff6f403 qlt_abort_cmd drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x06cd2130 nfnl_ct_hook vmlinux EXPORT_SYMBOL_GPL +0x852234fe device_del vmlinux EXPORT_SYMBOL_GPL +0xb4f7c4ba device_add vmlinux EXPORT_SYMBOL_GPL +0x63106ada drm_fb_xrgb8888_to_rgb565 vmlinux EXPORT_SYMBOL +0x9f688a03 drm_dp_dpcd_read vmlinux EXPORT_SYMBOL +0x170ddf79 acpi_install_notify_handler vmlinux EXPORT_SYMBOL +0x44469a76 crc_ccitt_false_table vmlinux EXPORT_SYMBOL +0x52f56e49 crypto_hash_walk_done vmlinux EXPORT_SYMBOL_GPL +0xdd391eff profile_event_unregister vmlinux EXPORT_SYMBOL_GPL +0xf5b89236 init_uts_ns vmlinux EXPORT_SYMBOL_GPL +0x4d7e3b8b vringh_need_notify_kern drivers/vhost/vringh EXPORT_SYMBOL +0x44ee231d mlx5_core_destroy_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2ca0a2da mlx5_core_destroy_qp drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x428a344a mlx5_core_destroy_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xbb603ef8 dst_release vmlinux EXPORT_SYMBOL +0x5e539899 dst_discard_out vmlinux EXPORT_SYMBOL +0x9dc65bf0 sock_no_mmap vmlinux EXPORT_SYMBOL +0x7c80974c led_trigger_store vmlinux EXPORT_SYMBOL_GPL +0x161c5d62 drm_fb_memcpy_dstclip vmlinux EXPORT_SYMBOL +0xacb4d88c clk_rate_exclusive_put vmlinux EXPORT_SYMBOL_GPL +0x461d16ca sg_nents vmlinux EXPORT_SYMBOL +0xc1ff6fd3 disk_map_sector_rcu vmlinux EXPORT_SYMBOL_GPL +0x27866435 security_xfrm_state_alloc vmlinux EXPORT_SYMBOL +0x04505f68 sysfs_update_group vmlinux EXPORT_SYMBOL_GPL +0xa38602cd drain_workqueue vmlinux EXPORT_SYMBOL_GPL +0xe3c6fb94 mlxsw_core_driver_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xf103adbb mlx5_set_port_prio_tc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc2dd6d57 tcp_initialize_rcv_mss vmlinux EXPORT_SYMBOL +0x54e6fcdd net_enable_timestamp vmlinux EXPORT_SYMBOL +0xff1e957d sockfd_lookup vmlinux EXPORT_SYMBOL +0xb5c3dbfa cpufreq_disable_fast_switch vmlinux EXPORT_SYMBOL_GPL +0xd95e5ac3 usb_serial_generic_write_bulk_callback vmlinux EXPORT_SYMBOL_GPL +0xf7eb7286 ata_sff_tf_read vmlinux EXPORT_SYMBOL_GPL +0x731c4a9c dma_fence_signal vmlinux EXPORT_SYMBOL +0xedff4be5 acpi_load_table vmlinux EXPORT_SYMBOL +0xe6c4fe7c fb_validate_mode vmlinux EXPORT_SYMBOL +0x8b910be2 errseq_sample vmlinux EXPORT_SYMBOL +0x6bc3fbc0 __unregister_chrdev vmlinux EXPORT_SYMBOL +0xe4971ade tracing_alloc_snapshot vmlinux EXPORT_SYMBOL_GPL +0x39237717 srcu_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0x6004858d LZ4_compress_fast lib/lz4/lz4_compress EXPORT_SYMBOL +0xd0f86e0a nfc_llc_start net/nfc/hci/hci EXPORT_SYMBOL +0xfa4ef052 nfc_target_lost net/nfc/nfc EXPORT_SYMBOL +0x482ac5a4 g_token_size net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x39e18bf7 ax25_find_cb net/ax25/ax25 EXPORT_SYMBOL +0xd8fa57a6 dlm_unregister_eviction_cb fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xbdb535f5 ping_seq_next vmlinux EXPORT_SYMBOL_GPL +0x135fc1ff dev_graft_qdisc vmlinux EXPORT_SYMBOL +0xc61d5ab0 devlink_resource_register vmlinux EXPORT_SYMBOL_GPL +0x4c3a69d4 acpi_device_fix_up_power vmlinux EXPORT_SYMBOL_GPL +0xb91c7737 badblocks_store vmlinux EXPORT_SYMBOL_GPL +0xa851973a raw_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x4786edf0 kvm_vcpu_gfn_to_page vmlinux EXPORT_SYMBOL_GPL +0xdbc2e2a4 nf_ct_delete net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd10a2248 rdma_restrack_uadd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x74f592c8 rdma_restrack_kadd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x38819c08 __parport_register_driver drivers/parport/parport EXPORT_SYMBOL +0xd51ba1c0 md_finish_reshape drivers/md/md-mod EXPORT_SYMBOL +0xf6e82e38 mlx4_unregister_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x32fc4963 skb_ensure_writable vmlinux EXPORT_SYMBOL +0x380513ba skb_splice_bits vmlinux EXPORT_SYMBOL_GPL +0xdf270fa0 of_thermal_get_ntrips vmlinux EXPORT_SYMBOL_GPL +0x1182d844 sas_eh_device_reset_handler vmlinux EXPORT_SYMBOL_GPL +0x06e74fe1 scsi_target_quiesce vmlinux EXPORT_SYMBOL +0x1f385b9b transport_class_register vmlinux EXPORT_SYMBOL_GPL +0x7812c20f drm_gem_fb_get_obj vmlinux EXPORT_SYMBOL_GPL +0x08fde2cd regulator_bulk_unregister_supply_alias vmlinux EXPORT_SYMBOL_GPL +0x7c9ca58f __sg_page_iter_next vmlinux EXPORT_SYMBOL +0x99f2d00a sysfs_emit_at vmlinux EXPORT_SYMBOL_GPL +0x88348393 dccp_ctl_make_reset net/dccp/dccp EXPORT_SYMBOL_GPL +0xcb40f744 mpt_free_fw_memory drivers/message/fusion/mptbase EXPORT_SYMBOL +0xb1623638 can_rx_offload_add_timestamp drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x6dc521c9 hinic_get_dcb_state drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xb353d6cb iscsi_conn_setup drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x7e30be48 fcoe_ctlr_destroy drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x311edb04 mr_table_alloc vmlinux EXPORT_SYMBOL +0x03816e1f devlink_alloc vmlinux EXPORT_SYMBOL_GPL +0x38652311 dm_put_device vmlinux EXPORT_SYMBOL +0x8681c8f3 dm_get_device vmlinux EXPORT_SYMBOL +0x74934ea1 input_handler_for_each_handle vmlinux EXPORT_SYMBOL +0x6d613c50 fwnode_find_reference vmlinux EXPORT_SYMBOL_GPL +0xb827897b transport_class_unregister vmlinux EXPORT_SYMBOL_GPL +0x12f6f69c fb_videomode_to_var vmlinux EXPORT_SYMBOL +0x567851e4 jbd2_journal_try_to_free_buffers vmlinux EXPORT_SYMBOL +0xefa2c27d register_tracepoint_module_notifier vmlinux EXPORT_SYMBOL_GPL +0xea231bdc down_write_killable vmlinux EXPORT_SYMBOL +0xaad8c7d6 default_wake_function vmlinux EXPORT_SYMBOL +0xfe9ecede nfc_hci_send_cmd_async net/nfc/hci/hci EXPORT_SYMBOL +0x607e15fd p9_client_statfs net/9p/9pnet EXPORT_SYMBOL +0x7d053fc5 dm_rh_start_recovery drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x8473cca3 mlx5_cmd_destroy_vport_lag drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x9cc63dc1 hinic_update_rq_local_ci drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x32cd6e55 hinic_update_sq_local_ci drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x35a4a1b2 fc_remote_port_add drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xecb2109c input_mt_assign_slots vmlinux EXPORT_SYMBOL +0xdb499b09 dev_attr_link_power_management_policy vmlinux EXPORT_SYMBOL_GPL +0x91fec1cc drm_rect_calc_vscale vmlinux EXPORT_SYMBOL +0x2d50570f drm_rect_calc_hscale vmlinux EXPORT_SYMBOL +0xaa341905 acpi_bios_exception vmlinux EXPORT_SYMBOL +0x55453055 of_find_backlight vmlinux EXPORT_SYMBOL +0xf1fedae8 pci_bus_read_config_byte vmlinux EXPORT_SYMBOL +0x677566f7 blkcg_root vmlinux EXPORT_SYMBOL_GPL +0xeda52d95 mount_subtree vmlinux EXPORT_SYMBOL +0x03d6d09e iget_locked vmlinux EXPORT_SYMBOL +0x5bc92e85 LZ4_compress_destSize lib/lz4/lz4_compress EXPORT_SYMBOL +0x7384872b hinic_update_rq_delta drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x7632b723 nfs_sb_active fs/nfs/nfs EXPORT_SYMBOL_GPL +0x99e00602 of_graph_get_next_endpoint vmlinux EXPORT_SYMBOL +0xc104c356 class_interface_register vmlinux EXPORT_SYMBOL_GPL +0x33835e33 iommu_group_add_device vmlinux EXPORT_SYMBOL_GPL +0xae0f51bf clk_hw_unregister_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0x95b35cae blk_queue_max_write_zeroes_sectors vmlinux EXPORT_SYMBOL +0xec0271f0 key_type_keyring vmlinux EXPORT_SYMBOL +0x00b8efcb nfc_tm_activated net/nfc/nfc EXPORT_SYMBOL +0x09471db8 nfs_revalidate_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xee079e2e fscache_operation_init fs/fscache/fscache EXPORT_SYMBOL +0xae39f80e dst_cache_init vmlinux EXPORT_SYMBOL_GPL +0xfc61ca8a ir_raw_event_handle vmlinux EXPORT_SYMBOL_GPL +0x6649e760 rc_keyup vmlinux EXPORT_SYMBOL_GPL +0xf1b73374 spi_get_device_id vmlinux EXPORT_SYMBOL_GPL +0x5693ba1d sas_release_transport vmlinux EXPORT_SYMBOL +0x7e492096 class_interface_unregister vmlinux EXPORT_SYMBOL_GPL +0x270805fc drm_vma_node_allow vmlinux EXPORT_SYMBOL +0x2b220dea of_get_named_gpio_flags vmlinux EXPORT_SYMBOL_GPL +0xc617f82c unregister_oom_notifier vmlinux EXPORT_SYMBOL_GPL +0xbc10c1d8 xdr_decode_word net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4dac77f0 xdr_encode_netobj net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1f17d11d snd_power_wait sound/core/snd EXPORT_SYMBOL +0xf3c2dfe0 transport_set_vpd_proto_id drivers/target/target_core_mod EXPORT_SYMBOL +0xe9c085d7 mptscsih_scandv_complete drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x4676dd88 alloc_sja1000dev drivers/net/can/sja1000/sja1000 EXPORT_SYMBOL_GPL +0x6af3cf73 tcp_v4_md5_lookup vmlinux EXPORT_SYMBOL +0x9e00b059 i2c_generic_scl_recovery vmlinux EXPORT_SYMBOL_GPL +0xe3312a6e tty_register_device vmlinux EXPORT_SYMBOL +0xbb4f4766 simple_write_to_buffer vmlinux EXPORT_SYMBOL +0xea6eecf6 read_code vmlinux EXPORT_SYMBOL +0xf1531088 __srcu_read_unlock vmlinux EXPORT_SYMBOL_GPL +0xc2e587d1 reset_devices vmlinux EXPORT_SYMBOL +0xcf5627b3 mrp_unregister_application net/802/mrp EXPORT_SYMBOL_GPL +0x8f8d8b2e usb_otg_descriptor_init drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xc99fb88f mlx5_core_res_put drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x413741df pnfs_layout_mark_request_commit fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x5a9f1d63 memmove vmlinux EXPORT_SYMBOL +0x7b771533 of_find_all_nodes vmlinux EXPORT_SYMBOL +0xb34ee8b7 dma_resv_add_shared_fence vmlinux EXPORT_SYMBOL +0x086f0dc0 regulator_map_voltage_linear vmlinux EXPORT_SYMBOL_GPL +0xfa39b4be sha224_update vmlinux EXPORT_SYMBOL +0xc5f7e801 sg_last vmlinux EXPORT_SYMBOL +0xd046c3c8 param_get_string vmlinux EXPORT_SYMBOL +0x1a9fbbbe kvm_debugfs_dir vmlinux EXPORT_SYMBOL_GPL +0x02996e8b nci_core_conn_close net/nfc/nci/nci EXPORT_SYMBOL +0x56ca20cc rpc_d_lookup_sb net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4e6beddc tc_setup_cb_add vmlinux EXPORT_SYMBOL +0x2a0e2a90 devm_thermal_zone_of_sensor_unregister vmlinux EXPORT_SYMBOL_GPL +0xe39d0794 usb_phy_roothub_exit vmlinux EXPORT_SYMBOL_GPL +0xc93ee1e7 usb_phy_roothub_init vmlinux EXPORT_SYMBOL_GPL +0x4d11a55e pm_clk_remove vmlinux EXPORT_SYMBOL_GPL +0x3baccc62 drm_atomic_bridge_post_disable vmlinux EXPORT_SYMBOL +0x84afaa5b tpm_pm_resume vmlinux EXPORT_SYMBOL_GPL +0xe6f83837 acpi_bus_attach_private_data vmlinux EXPORT_SYMBOL_GPL +0x2b71f373 jbd2_journal_start_commit vmlinux EXPORT_SYMBOL +0x76d3cd60 laptop_mode vmlinux EXPORT_SYMBOL +0x11957c5d generic_file_direct_write vmlinux EXPORT_SYMBOL +0x1e3d3b4e end_page_writeback vmlinux EXPORT_SYMBOL +0x2b76663d vsock_remove_pending net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x242852b9 ax25_uid_policy net/ax25/ax25 EXPORT_SYMBOL +0x052be10b ip_tunnel_delete_nets net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xd0a561c2 skb_recv_datagram vmlinux EXPORT_SYMBOL +0xeeb8f1ea drm_gem_prime_mmap vmlinux EXPORT_SYMBOL +0xe23a6673 clk_hw_round_rate vmlinux EXPORT_SYMBOL_GPL +0x5e78d1ad fiemap_check_flags vmlinux EXPORT_SYMBOL +0xe4534b0a generic_file_readonly_mmap vmlinux EXPORT_SYMBOL +0xd06524ba raw_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0xb917b6d7 return_address vmlinux EXPORT_SYMBOL_GPL +0xa0ca0bbf nci_hci_get_param net/nfc/nci/nci EXPORT_SYMBOL +0x566b56d8 nci_hci_set_param net/nfc/nci/nci EXPORT_SYMBOL +0x7ea104b7 vhost_discard_vq_desc drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x0d542439 __ipv6_addr_type vmlinux EXPORT_SYMBOL +0xa237ad6d unregister_qdisc vmlinux EXPORT_SYMBOL +0xb62203a3 cpufreq_table_index_unsorted vmlinux EXPORT_SYMBOL_GPL +0x2b953cb5 usb_autopm_get_interface vmlinux EXPORT_SYMBOL_GPL +0x3be7202f phy_device_register vmlinux EXPORT_SYMBOL +0xbcbb9e1c ttm_dma_tt_fini vmlinux EXPORT_SYMBOL +0xf43b1453 drm_client_modeset_commit vmlinux EXPORT_SYMBOL +0xfad93e04 drm_client_modeset_commit_force vmlinux EXPORT_SYMBOL +0xbf4625ab acpi_pci_find_root vmlinux EXPORT_SYMBOL_GPL +0x51f702e0 pci_map_rom vmlinux EXPORT_SYMBOL +0xfee64246 crypto_shash_finup vmlinux EXPORT_SYMBOL_GPL +0xa7d4c56b crypto_shash_final vmlinux EXPORT_SYMBOL_GPL +0x6934d674 crypto_ahash_finup vmlinux EXPORT_SYMBOL_GPL +0x8b3078dd crypto_ahash_final vmlinux EXPORT_SYMBOL_GPL +0x221eab6d scatterwalk_copychunks vmlinux EXPORT_SYMBOL_GPL +0x052c9aed ktime_get_real_fast_ns vmlinux EXPORT_SYMBOL_GPL +0x7679f3fb devm_irq_alloc_generic_chip vmlinux EXPORT_SYMBOL_GPL +0x33a0acb2 rpc_free net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x159032f1 can_rx_unregister net/can/can EXPORT_SYMBOL +0x61d24c52 ib_rate_to_mbps drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xae956dce ib_rate_to_mult drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x947f597c mlx5_set_port_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xfc973272 mlx4_multicast_detach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x3f0096cc fc_vport_terminate drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xe53986b3 sock_diag_put_meminfo vmlinux EXPORT_SYMBOL_GPL +0x0d9f435b uhid_hid_driver vmlinux EXPORT_SYMBOL_GPL +0x6dd17e7b acpi_get_table_header vmlinux EXPORT_SYMBOL +0x64bbc288 string_unescape vmlinux EXPORT_SYMBOL +0xc6cb465a __kfifo_max_r vmlinux EXPORT_SYMBOL +0xdedbfc09 vm_insert_page vmlinux EXPORT_SYMBOL +0xd4888676 bpf_prog_sub vmlinux EXPORT_SYMBOL_GPL +0x1d6c8a65 irq_domain_add_legacy vmlinux EXPORT_SYMBOL_GPL +0xd41a9d39 irq_chip_disable_parent vmlinux EXPORT_SYMBOL_GPL +0x0405fdd9 register_console vmlinux EXPORT_SYMBOL +0x65aaf037 crc7_be_syndrome_table lib/crc7 EXPORT_SYMBOL +0xaf12c1e5 __ip_tunnel_change_mtu net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xc64f6d2d st21nfca_se_deinit drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x8974a22d inet_reqsk_alloc vmlinux EXPORT_SYMBOL +0x8d9bf0d4 sock_no_socketpair vmlinux EXPORT_SYMBOL +0x2aadad1a efi_capsule_update vmlinux EXPORT_SYMBOL_GPL +0x40652df3 input_ff_event vmlinux EXPORT_SYMBOL_GPL +0x825c7340 phylink_get_eee_err vmlinux EXPORT_SYMBOL_GPL +0xa20e40c4 ata_pci_bmdma_init_one vmlinux EXPORT_SYMBOL_GPL +0x836e9b00 drm_property_create_signed_range vmlinux EXPORT_SYMBOL +0xd54ea66e drm_framebuffer_plane_height vmlinux EXPORT_SYMBOL +0x019323eb devm_gpio_request vmlinux EXPORT_SYMBOL_GPL +0xefbe18d4 __filemap_set_wb_err vmlinux EXPORT_SYMBOL +0x3192d768 cpufreq_remove_update_util_hook vmlinux EXPORT_SYMBOL_GPL +0x2ed36c74 arpt_do_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0x610f0287 mlx5_eq_destroy_generic drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2c5f4d01 async_tx_submit crypto/async_tx/async_tx EXPORT_SYMBOL_GPL +0x5cf1923b nfs_retry_commit fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf2292466 nfs_client_init_status fs/nfs/nfs EXPORT_SYMBOL_GPL +0x44c3cd82 dev_addr_del vmlinux EXPORT_SYMBOL +0xe97ee911 gnet_stats_start_copy vmlinux EXPORT_SYMBOL +0x3b8a340c devm_of_platform_depopulate vmlinux EXPORT_SYMBOL_GPL +0x33f0768c cpufreq_quick_get_max vmlinux EXPORT_SYMBOL +0xc3d5b8d3 iscsi_is_session_online vmlinux EXPORT_SYMBOL_GPL +0x1c5ff742 clk_get_phase vmlinux EXPORT_SYMBOL_GPL +0xb7329c06 clk_set_phase vmlinux EXPORT_SYMBOL_GPL +0x9975dc22 acpi_get_handle vmlinux EXPORT_SYMBOL +0xcfb6a3da unregister_atmdevice_notifier net/atm/atm EXPORT_SYMBOL_GPL +0x07e08ab2 mlx5_fs_add_rx_underlay_qpn drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x588a6c3b st_nci_discover_se drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0xf40e7a73 __xa_alloc vmlinux EXPORT_SYMBOL +0x46013233 net_dec_ingress_queue vmlinux EXPORT_SYMBOL_GPL +0x14477e52 usb_gadget_set_state vmlinux EXPORT_SYMBOL_GPL +0x72093bf7 platform_device_del vmlinux EXPORT_SYMBOL_GPL +0x7465e23e platform_device_put vmlinux EXPORT_SYMBOL_GPL +0xca6d8491 drm_atomic_add_affected_connectors vmlinux EXPORT_SYMBOL +0x86e6e941 drm_gem_vram_create vmlinux EXPORT_SYMBOL +0xd36dc10c get_random_u32 vmlinux EXPORT_SYMBOL +0xe3ff2c41 get_random_u64 vmlinux EXPORT_SYMBOL +0xb6ea086b devm_clk_hw_register_clkdev vmlinux EXPORT_SYMBOL +0x400ca303 dw_pcie_find_capability vmlinux EXPORT_SYMBOL_GPL +0x556b5d62 __kfifo_dma_in_prepare_r vmlinux EXPORT_SYMBOL +0xb33a6fa4 blk_mq_stop_hw_queues vmlinux EXPORT_SYMBOL +0x486daa26 __fat_fs_error vmlinux EXPORT_SYMBOL_GPL +0x09616c21 __krealloc vmlinux EXPORT_SYMBOL +0x995d1071 prof_on vmlinux EXPORT_SYMBOL_GPL +0x55247f64 devm_free_irq vmlinux EXPORT_SYMBOL +0x2914ea2d ZSTD_compressBlock lib/zstd/zstd_compress EXPORT_SYMBOL +0xefce3c3b ceph_pagelist_reserve net/ceph/libceph EXPORT_SYMBOL +0xd3660fb4 rpc_exit net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x739670d7 mdev_set_iommu_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xf5eee0a4 capilib_data_b3_req drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xc401d489 bch_btree_insert_key drivers/md/bcache/bcache EXPORT_SYMBOL +0x995fa8fb mlx5_core_query_vendor_id drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x55686530 __arch_clear_user vmlinux EXPORT_SYMBOL +0xe411ade3 kset_create_and_add vmlinux EXPORT_SYMBOL_GPL +0x082adae1 kernel_accept vmlinux EXPORT_SYMBOL +0xf227a766 drm_dp_mst_topology_mgr_set_mst vmlinux EXPORT_SYMBOL +0x9e2737f0 acpi_install_interface_handler vmlinux EXPORT_SYMBOL +0xceac4ff5 pcim_iounmap vmlinux EXPORT_SYMBOL +0x62a83f63 mm_kobj vmlinux EXPORT_SYMBOL_GPL +0x570571f8 wb_writeout_inc vmlinux EXPORT_SYMBOL_GPL +0x5b2f27fb do_wait_intr vmlinux EXPORT_SYMBOL +0xb470ca30 iscsit_get_datain_values drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x0069fb14 mlx4_mr_enable drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc2a63f83 hinic_del_mac drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd2f2a4ea sysctl_tcp_rto_max vmlinux EXPORT_SYMBOL +0xc0794085 sysctl_tcp_rto_min vmlinux EXPORT_SYMBOL +0xa79db787 xt_proto_fini vmlinux EXPORT_SYMBOL_GPL +0xdf9c8cbf usb_gadget_map_request vmlinux EXPORT_SYMBOL_GPL +0x8ef02a61 sata_scr_write_flush vmlinux EXPORT_SYMBOL_GPL +0x8c9a9f44 driver_find vmlinux EXPORT_SYMBOL_GPL +0xb86c364b clk_register_divider vmlinux EXPORT_SYMBOL_GPL +0xd194ddf9 acpi_gpe_count vmlinux EXPORT_SYMBOL +0xfaa33018 pci_disable_pasid vmlinux EXPORT_SYMBOL_GPL +0x9144f0ec devm_gpio_request_one vmlinux EXPORT_SYMBOL_GPL +0x48653502 gpiod_set_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0xd9d5d879 sbitmap_queue_resize vmlinux EXPORT_SYMBOL_GPL +0x2f4d9b4e devm_ioremap_resource vmlinux EXPORT_SYMBOL +0xd5da108e sg_miter_start vmlinux EXPORT_SYMBOL +0xfc115259 freq_qos_remove_request vmlinux EXPORT_SYMBOL_GPL +0x56d697ce cpu_up vmlinux EXPORT_SYMBOL_GPL +0x4cf5f5a8 lc_is_used lib/lru_cache EXPORT_SYMBOL +0x6d2ba09d snd_card_free sound/core/snd EXPORT_SYMBOL +0x44fde533 iw_create_cm_id drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x3646e38f dm_tm_issue_prefetches drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x944f934b mlx4_SET_PORT_SCHEDULER drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xcfa4868a fc_block_scsi_eh drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xc879966d nfs_show_stats fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf5f6c9c7 tso_build_data vmlinux EXPORT_SYMBOL +0x74ef1458 sock_zerocopy_alloc vmlinux EXPORT_SYMBOL_GPL +0xf5a238a7 dma_buf_map_attachment vmlinux EXPORT_SYMBOL_GPL +0xaf4ad733 tty_mode_ioctl vmlinux EXPORT_SYMBOL_GPL +0x06a4d0c5 pnp_stop_dev vmlinux EXPORT_SYMBOL +0xf5b6419f fb_class vmlinux EXPORT_SYMBOL +0xdbe80cc6 cpci_hp_register_controller vmlinux EXPORT_SYMBOL_GPL +0x7bc6642d load_nls vmlinux EXPORT_SYMBOL +0xdef13c6c __ib_alloc_cq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf5dedb30 rdma_node_get_transport drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa64c5ef0 dm_exception_store_destroy drivers/md/dm-snapshot EXPORT_SYMBOL +0x8fffaf28 mlx5_core_attach_mcg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x97999817 rfkill_set_hw_state vmlinux EXPORT_SYMBOL +0x659ded26 xfrm_flush_gc vmlinux EXPORT_SYMBOL +0x4859b8bb rtc_year_days vmlinux EXPORT_SYMBOL +0xbc9b8588 ehci_cf_port_reset_rwsem vmlinux EXPORT_SYMBOL_GPL +0xfbad3cf0 scsi_normalize_sense vmlinux EXPORT_SYMBOL +0x24aaecc5 ata_host_suspend vmlinux EXPORT_SYMBOL_GPL +0x19c876d0 ata_link_offline vmlinux EXPORT_SYMBOL_GPL +0xf6e88b4e __pm_runtime_use_autosuspend vmlinux EXPORT_SYMBOL_GPL +0x0bd2f2f6 drm_fb_helper_cfb_copyarea vmlinux EXPORT_SYMBOL +0xb2f35c6a xxh64 vmlinux EXPORT_SYMBOL +0x8888f1fe xxh32 vmlinux EXPORT_SYMBOL +0x864a8619 bio_advance vmlinux EXPORT_SYMBOL +0xb8294737 from_kuid vmlinux EXPORT_SYMBOL +0x33209528 add_timer_on vmlinux EXPORT_SYMBOL_GPL +0x40af2754 ceph_osdc_alloc_request net/ceph/libceph EXPORT_SYMBOL +0x80580c89 nf_conntrack_eventmask_report net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xda862b79 register_pppox_proto drivers/net/ppp/pppox EXPORT_SYMBOL +0x819d72cb klist_iter_exit vmlinux EXPORT_SYMBOL_GPL +0x0a172cff ip6_local_out vmlinux EXPORT_SYMBOL_GPL +0x884deb9d eeprom_93cx6_read vmlinux EXPORT_SYMBOL_GPL +0x8daeff0f pm_runtime_set_autosuspend_delay vmlinux EXPORT_SYMBOL_GPL +0xfb6eedf9 power_group_name vmlinux EXPORT_SYMBOL_GPL +0x7202dac0 pci_slots_kset vmlinux EXPORT_SYMBOL_GPL +0x03f24f1f pci_find_next_ext_capability vmlinux EXPORT_SYMBOL_GPL +0x74b5ea68 lcm_not_zero vmlinux EXPORT_SYMBOL_GPL +0xe13d5d07 kstrtou16_from_user vmlinux EXPORT_SYMBOL +0x54245b39 kstrtoull_from_user vmlinux EXPORT_SYMBOL +0x3f63e65e crypto_skcipher_decrypt vmlinux EXPORT_SYMBOL_GPL +0x81626907 xdr_init_decode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5d999fc3 snd_pcm_new_internal sound/core/snd-pcm EXPORT_SYMBOL +0x64e66d2c ib_sa_path_rec_get drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7ae13a41 ib_check_mr_status drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc1d989c5 vfio_external_check_extension drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xebf4e1ec md_check_no_bitmap drivers/md/md-mod EXPORT_SYMBOL +0xdeff4950 mlxsw_core_event_listener_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9c611910 mlx5_lag_is_roce drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x160a7715 nfs_remove_bad_delegation fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x99f018c4 nvmem_cell_read vmlinux EXPORT_SYMBOL_GPL +0x72e3f3a0 scsi_is_sas_rphy vmlinux EXPORT_SYMBOL +0xeaf7884c device_unregister vmlinux EXPORT_SYMBOL_GPL +0x51460f46 drm_plane_create_color_properties vmlinux EXPORT_SYMBOL +0x5d128fcd virtqueue_get_vring vmlinux EXPORT_SYMBOL_GPL +0xfe6ce451 jbd2_journal_load vmlinux EXPORT_SYMBOL +0xbcb71e0c seq_escape_mem_ascii vmlinux EXPORT_SYMBOL +0x1a985df7 vsock_enqueue_accept net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x65a32650 xfrm_state_add vmlinux EXPORT_SYMBOL +0xc6444f2f ping_rcv vmlinux EXPORT_SYMBOL_GPL +0x3b4c256b __netlink_kernel_create vmlinux EXPORT_SYMBOL +0xec8bbb65 of_property_read_u64_index vmlinux EXPORT_SYMBOL_GPL +0xc561386b of_property_read_u32_index vmlinux EXPORT_SYMBOL_GPL +0xff5a8cfe cn_del_callback vmlinux EXPORT_SYMBOL_GPL +0x19c2521f regulator_is_enabled vmlinux EXPORT_SYMBOL_GPL +0xf3b95d79 btree_remove vmlinux EXPORT_SYMBOL_GPL +0x1e6613aa iov_iter_bvec vmlinux EXPORT_SYMBOL +0xccad4e5d iov_iter_kvec vmlinux EXPORT_SYMBOL +0xdfa796a4 p9_client_lock_dotl net/9p/9pnet EXPORT_SYMBOL +0x03c857a6 pn544_hci_probe drivers/nfc/pn544/pn544 EXPORT_SYMBOL +0x82d7af19 devlink_param_driverinit_value_set vmlinux EXPORT_SYMBOL_GPL +0xb83e014c devlink_param_driverinit_value_get vmlinux EXPORT_SYMBOL_GPL +0x62810e3c led_get_default_pattern vmlinux EXPORT_SYMBOL_GPL +0xa5395256 usb_serial_deregister_drivers vmlinux EXPORT_SYMBOL_GPL +0xaf1bed78 scsicam_bios_param vmlinux EXPORT_SYMBOL +0xec1e89f9 pci_generic_config_read32 vmlinux EXPORT_SYMBOL_GPL +0x75a6375a fscrypt_ioctl_get_key_status vmlinux EXPORT_SYMBOL_GPL +0xbb1bd29b I_BDEV vmlinux EXPORT_SYMBOL +0x7729cbdd task_handoff_register vmlinux EXPORT_SYMBOL_GPL +0x48cef5e2 irq_create_fwspec_mapping vmlinux EXPORT_SYMBOL_GPL +0x8518bdc3 hinic_get_sq_free_wqebbs drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd6ce244c iommu_sva_set_ops vmlinux EXPORT_SYMBOL_GPL +0xbeb5d114 tty_port_tty_set vmlinux EXPORT_SYMBOL +0xb226fa89 tty_port_tty_get vmlinux EXPORT_SYMBOL +0xe3ef769a devm_regulator_get vmlinux EXPORT_SYMBOL_GPL +0x4f1cd128 security_tun_dev_create vmlinux EXPORT_SYMBOL +0x589737b3 fs_context_for_mount vmlinux EXPORT_SYMBOL +0x3b6d4e3c dma_direct_sync_sg_for_device vmlinux EXPORT_SYMBOL +0x12d5dd2b nfc_get_local_general_bytes net/nfc/nfc EXPORT_SYMBOL +0xde23402a md_bitmap_resize drivers/md/md-mod EXPORT_SYMBOL_GPL +0x38a9f7c5 in6addr_loopback vmlinux EXPORT_SYMBOL +0x6423c71a inet_csk_route_child_sock vmlinux EXPORT_SYMBOL_GPL +0x04fc6810 netdev_set_num_tc vmlinux EXPORT_SYMBOL +0x30a9de3c skb_dump vmlinux EXPORT_SYMBOL +0xe9f0013b mipi_dsi_dcs_get_power_mode vmlinux EXPORT_SYMBOL +0xe00bea84 drm_format_info_min_pitch vmlinux EXPORT_SYMBOL +0x43f62bb6 lookup_user_key vmlinux EXPORT_SYMBOL +0x967a4204 vfs_path_lookup vmlinux EXPORT_SYMBOL +0xd03eaf4c schedule_hrtimeout_range vmlinux EXPORT_SYMBOL_GPL +0xee4aa69d kvm_vcpu_mark_page_dirty vmlinux EXPORT_SYMBOL_GPL +0x96612ba6 ib_unregister_device_and_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbc222a8d mlxsw_afk_clear drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x6f720406 set_phv_bit drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x6d7e88c6 qtree_release_dquot fs/quota/quota_tree EXPORT_SYMBOL +0x2dacbbe9 ce_aes_setkey arch/arm64/crypto/aes-ce-cipher EXPORT_SYMBOL +0x9ed4808c fib6_new_table vmlinux EXPORT_SYMBOL_GPL +0x5f173521 devlink_params_unpublish vmlinux EXPORT_SYMBOL_GPL +0xb09d7570 usb_sg_wait vmlinux EXPORT_SYMBOL_GPL +0x3d7ea7b9 usb_sg_init vmlinux EXPORT_SYMBOL_GPL +0x153d4c96 usb_urb_ep_type_check vmlinux EXPORT_SYMBOL_GPL +0x29b31b43 sas_phy_add vmlinux EXPORT_SYMBOL +0xd4d94c1e dev_attr_ncq_prio_enable vmlinux EXPORT_SYMBOL_GPL +0xb7e56b30 drm_kms_helper_hotplug_event vmlinux EXPORT_SYMBOL +0xe40976c0 pnp_range_reserved vmlinux EXPORT_SYMBOL +0x3fc39b9e jbd2_journal_flush vmlinux EXPORT_SYMBOL +0xe5902e17 invalidate_bdev vmlinux EXPORT_SYMBOL +0x944375db _totalram_pages vmlinux EXPORT_SYMBOL +0x9aeacb87 ring_buffer_iter_empty vmlinux EXPORT_SYMBOL_GPL +0x07fcca73 put_pid_ns vmlinux EXPORT_SYMBOL_GPL +0xc83db67c ceph_put_page_vector net/ceph/libceph EXPORT_SYMBOL +0xa86481ba p9_client_fsync net/9p/9pnet EXPORT_SYMBOL +0x81fff2d1 ip_set_netmask_map net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x45d28643 ip_set_put_byindex net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xd099974a vringh_getdesc_user drivers/vhost/vringh EXPORT_SYMBOL +0x610b257e mpt_reset_deregister drivers/message/fusion/mptbase EXPORT_SYMBOL +0xcdbdeca7 mdio45_ethtool_gset_npage drivers/net/mdio EXPORT_SYMBOL +0x49855f00 hinic_aeq_unregister_hw_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xcd55edfe hnae_get_handle drivers/net/ethernet/hisilicon/hns/hnae EXPORT_SYMBOL +0x88093487 kobject_uevent vmlinux EXPORT_SYMBOL_GPL +0x8ee30432 tcp_twsk_unique vmlinux EXPORT_SYMBOL_GPL +0x400bd564 netdev_info vmlinux EXPORT_SYMBOL +0xd8a994eb scsi_extd_sense_format vmlinux EXPORT_SYMBOL +0x54b1e753 drm_get_format_info vmlinux EXPORT_SYMBOL +0x6c61ce70 num_registered_fb vmlinux EXPORT_SYMBOL +0x98a20b74 pci_stop_root_bus vmlinux EXPORT_SYMBOL_GPL +0x37114700 __fscrypt_prepare_rename vmlinux EXPORT_SYMBOL_GPL +0xc56e3d24 vfs_ioctl vmlinux EXPORT_SYMBOL +0xf14031fb xdr_buf_from_iov net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9adb7399 nf_conntrack_expect_lock net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xec83dabe inet_send_prepare vmlinux EXPORT_SYMBOL_GPL +0x7c2d9b49 of_n_addr_cells vmlinux EXPORT_SYMBOL +0x868cdbd7 regmap_get_raw_write_max vmlinux EXPORT_SYMBOL_GPL +0xc203fc74 wakeup_source_add vmlinux EXPORT_SYMBOL_GPL +0x7ef97de0 drm_dp_atomic_release_vcpi_slots vmlinux EXPORT_SYMBOL +0xb76a5898 pci_bus_write_config_dword vmlinux EXPORT_SYMBOL +0x3e31d9c3 net_prio_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xffdb1a44 snd_pcm_lib_default_mmap sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x2fd976d9 vhost_chr_read_iter drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x31fa8f23 parport_wait_peripheral drivers/parport/parport EXPORT_SYMBOL +0xbf1a2968 btracker_create drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x21806072 mlx5_core_res_hold drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb21ed212 nfs_set_sb_security fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3b83b501 dst_cache_get_ip6 vmlinux EXPORT_SYMBOL_GPL +0x274bbbb3 netdev_lower_state_changed vmlinux EXPORT_SYMBOL +0x8f691058 drm_atomic_helper_prepare_planes vmlinux EXPORT_SYMBOL +0xb8f25b78 drm_dp_aux_init vmlinux EXPORT_SYMBOL +0x948ebc54 tty_hung_up_p vmlinux EXPORT_SYMBOL +0xd77c4939 blk_cleanup_queue vmlinux EXPORT_SYMBOL +0x5839f677 iomap_is_partially_uptodate vmlinux EXPORT_SYMBOL_GPL +0xfbc21200 generic_setlease vmlinux EXPORT_SYMBOL +0x5adc2807 btracker_destroy drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x213e4965 ps2_is_keyboard_id drivers/input/serio/libps2 EXPORT_SYMBOL +0x77ca0e4f mpt_GetIocState drivers/message/fusion/mptbase EXPORT_SYMBOL +0x5a49dbc9 timerqueue_del vmlinux EXPORT_SYMBOL_GPL +0x26325bd2 ipv6_getsockopt vmlinux EXPORT_SYMBOL +0x2733eaf7 scsi_dev_info_list_add_keyed vmlinux EXPORT_SYMBOL +0xdc97af2e syscore_suspend vmlinux EXPORT_SYMBOL_GPL +0x569abcca acpi_walk_resources vmlinux EXPORT_SYMBOL +0xe1314f8b ipmi_platform_add vmlinux EXPORT_SYMBOL +0xa35f5542 pcim_enable_device vmlinux EXPORT_SYMBOL +0x1301f4ff snd_ctl_sync_vmaster sound/core/snd EXPORT_SYMBOL_GPL +0xd059beeb vhost_dev_cleanup drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xdd97606d nf_register_net_hooks vmlinux EXPORT_SYMBOL +0x28deaa04 usb_driver_release_interface vmlinux EXPORT_SYMBOL_GPL +0x715c8d18 ata_sff_queue_work vmlinux EXPORT_SYMBOL_GPL +0xf6e874f5 ata_timing_merge vmlinux EXPORT_SYMBOL_GPL +0x87ef5728 proc_mkdir_data vmlinux EXPORT_SYMBOL_GPL +0x79862412 mpage_writepages vmlinux EXPORT_SYMBOL +0x7cf59a3b write_one_page vmlinux EXPORT_SYMBOL +0x7a81541b async_synchronize_cookie vmlinux EXPORT_SYMBOL_GPL +0xca80437b ceph_extent_to_file net/ceph/libceph EXPORT_SYMBOL +0xc7e88ad5 hnae3_unregister_ae_algo drivers/net/ethernet/hisilicon/hns3/hnae3 EXPORT_SYMBOL +0x46fe485d st21nfca_connectivity_event_received drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0xed614c08 o2nm_node_get fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x035e430b nlmsvc_ops fs/lockd/lockd EXPORT_SYMBOL_GPL +0xc838c3f5 __ashrti3 vmlinux EXPORT_SYMBOL +0xc2f52274 __lshrti3 vmlinux EXPORT_SYMBOL +0x350a3d6c udp6_set_csum vmlinux EXPORT_SYMBOL +0x10eee642 inet_add_protocol vmlinux EXPORT_SYMBOL +0xcd6f2dc9 nf_log_buf_add vmlinux EXPORT_SYMBOL_GPL +0x5441c067 ata_host_activate vmlinux EXPORT_SYMBOL_GPL +0xe10ec800 n_tty_ioctl_helper vmlinux EXPORT_SYMBOL +0xa4168a80 virtqueue_enable_cb_delayed vmlinux EXPORT_SYMBOL_GPL +0xe454edad acpi_set_modalias vmlinux EXPORT_SYMBOL_GPL +0x3ac59fdc sync_blockdev vmlinux EXPORT_SYMBOL +0x25240986 nobh_write_end vmlinux EXPORT_SYMBOL +0xa60fcee9 __tracepoint_bcache_read drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xe8277acb can_put_echo_skb drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xc1a81991 nfs_server_remove_lists fs/nfs/nfs EXPORT_SYMBOL_GPL +0x431e6927 __next_node_in vmlinux EXPORT_SYMBOL +0x85b5e625 rfkill_set_states vmlinux EXPORT_SYMBOL +0x6e720ff2 rtnl_unlock vmlinux EXPORT_SYMBOL +0xffa9acd7 sock_rfree vmlinux EXPORT_SYMBOL +0xde84e6df kernel_recvmsg vmlinux EXPORT_SYMBOL +0x331ae5bf usb_hub_find_child vmlinux EXPORT_SYMBOL_GPL +0x78efbb26 phy_request_interrupt vmlinux EXPORT_SYMBOL +0x63943dd3 __root_device_register vmlinux EXPORT_SYMBOL_GPL +0xc0a3d105 find_next_bit vmlinux EXPORT_SYMBOL +0xf8d07858 bitmap_from_arr32 vmlinux EXPORT_SYMBOL +0x1a8d5b19 disk_part_iter_init vmlinux EXPORT_SYMBOL_GPL +0xf98d8181 ima_file_check vmlinux EXPORT_SYMBOL_GPL +0x0fb5c42c dquot_reclaim_space_nodirty vmlinux EXPORT_SYMBOL +0xe648ec2b freeze_bdev vmlinux EXPORT_SYMBOL +0xcb5bd81d clockevents_unbind_device vmlinux EXPORT_SYMBOL_GPL +0x0eefa2a0 srcu_batches_completed vmlinux EXPORT_SYMBOL_GPL +0x5a7f46f7 vcc_process_recv_queue net/atm/atm EXPORT_SYMBOL +0x2cc2d52d vcc_hash net/atm/atm EXPORT_SYMBOL +0xeae9d08b nft_meta_set_destroy net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x07b02873 nf_nat_alloc_null_binding net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xea5bad6b mlx4_srq_lookup drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x9e63e4ca skb_kill_datagram vmlinux EXPORT_SYMBOL +0x38cbd3c4 kfree_skb vmlinux EXPORT_SYMBOL +0xa86d052e phy_resume vmlinux EXPORT_SYMBOL +0xa2394c99 kernel_kobj vmlinux EXPORT_SYMBOL_GPL +0x2b9997fb atomic_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0x00a30ed2 param_get_long vmlinux EXPORT_SYMBOL +0x6c23f4ef free_rs lib/reed_solomon/reed_solomon EXPORT_SYMBOL_GPL +0x2c1d9e60 ib_send_cm_mra drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x88a8bc54 can_rx_offload_get_echo_skb drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x81ca7216 nfs_atomic_open fs/nfs/nfs EXPORT_SYMBOL_GPL +0x98868c81 udp_seq_next vmlinux EXPORT_SYMBOL +0xaf3a20e4 i2c_smbus_read_i2c_block_data_or_emulated vmlinux EXPORT_SYMBOL +0xcc389048 drm_atomic_set_fence_for_plane vmlinux EXPORT_SYMBOL +0xc307a10c drm_mm_takedown vmlinux EXPORT_SYMBOL +0xe393aaa9 regulator_get_voltage vmlinux EXPORT_SYMBOL_GPL +0xd3bd32b5 amba_driver_unregister vmlinux EXPORT_SYMBOL +0x84502a47 blk_status_to_errno vmlinux EXPORT_SYMBOL_GPL +0xbee380ba posix_acl_alloc vmlinux EXPORT_SYMBOL +0x4b17e177 kernel_read_file_from_fd vmlinux EXPORT_SYMBOL_GPL +0xb02ca40f svc_set_num_threads net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6565850d nf_reject_ip6_tcphdr_get net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0xe04dfaba synproxy_send_client_synack_ipv6 net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xe084f0be mrp_request_leave net/802/mrp EXPORT_SYMBOL_GPL +0xd72e8499 get_mtd_device_nm drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xf3e8b89e nfs_permission fs/nfs/nfs EXPORT_SYMBOL_GPL +0x86c5a07d rtnl_delete_link vmlinux EXPORT_SYMBOL_GPL +0x5cf00e0f __kfree_skb vmlinux EXPORT_SYMBOL +0xcb465920 led_compose_name vmlinux EXPORT_SYMBOL_GPL +0x3a576961 stmmac_remove_config_dt vmlinux EXPORT_SYMBOL_GPL +0x29d1c3ce drm_crtc_vblank_on vmlinux EXPORT_SYMBOL +0x28cf7a2b drm_dp_mst_hpd_irq vmlinux EXPORT_SYMBOL +0xb12536f9 tty_port_register_device vmlinux EXPORT_SYMBOL_GPL +0xc9822234 clk_register_clkdev vmlinux EXPORT_SYMBOL +0xec5af25f pci_find_parent_resource vmlinux EXPORT_SYMBOL +0x38aea28c pinctrl_get_group_pins vmlinux EXPORT_SYMBOL_GPL +0xa6265bb9 fuse_do_open vmlinux EXPORT_SYMBOL_GPL +0x7c6baf03 sysfs_update_groups vmlinux EXPORT_SYMBOL_GPL +0x0640e35e register_trace_event vmlinux EXPORT_SYMBOL_GPL +0xa34e8397 ceph_print_client_options net/ceph/libceph EXPORT_SYMBOL +0x5e3da0c8 rds_send_drop_acked net/rds/rds EXPORT_SYMBOL_GPL +0x4c449a0b core_tpg_set_initiator_node_tag drivers/target/target_core_mod EXPORT_SYMBOL +0xcbc1dd45 __hw_addr_unsync_dev vmlinux EXPORT_SYMBOL +0x923446f8 gpiod_get_raw_array_value vmlinux EXPORT_SYMBOL_GPL +0x305c0b47 dma_set_coherent_mask vmlinux EXPORT_SYMBOL +0xb6c3cae8 nft_register_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x5ae737d9 vhost_exceeds_weight drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xbb9abdaf rdma_restrack_get_byid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x52d67a4e neon_aes_cbc_encrypt arch/arm64/crypto/aes-neon-blk EXPORT_SYMBOL +0x8e17b3ae idr_destroy vmlinux EXPORT_SYMBOL +0xa7d5f92e ida_destroy vmlinux EXPORT_SYMBOL +0x279dcb1f tcp_reno_undo_cwnd vmlinux EXPORT_SYMBOL_GPL +0x6b24ed81 ip_defrag vmlinux EXPORT_SYMBOL +0x46155c71 register_qdisc vmlinux EXPORT_SYMBOL +0x954d046e scsi_flush_work vmlinux EXPORT_SYMBOL_GPL +0xe441e95a refcount_dec_not_one vmlinux EXPORT_SYMBOL +0xf4da71b3 public_key_verify_signature vmlinux EXPORT_SYMBOL_GPL +0xa3b958ce reset_hung_task_detector vmlinux EXPORT_SYMBOL_GPL +0xcf06fd14 xdr_stream_decode_string_dup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa9a164ff xprt_wait_for_buffer_space net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2640a458 arpt_alloc_initial_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL_GPL +0x5bd6c9d1 nf_nat_ipv6_register_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x6efca79d sbc_attrib_attrs drivers/target/target_core_mod EXPORT_SYMBOL +0x56885f69 mlx4_get_roce_gid_from_slave drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xcde42430 mlx4_config_roce_v2_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xbe39fdc2 default_qdisc_ops vmlinux EXPORT_SYMBOL +0x08e94300 __tracepoint_br_fdb_update vmlinux EXPORT_SYMBOL_GPL +0xb9b7dadf free_netdev vmlinux EXPORT_SYMBOL +0xd96b425a __drm_printfn_coredump vmlinux EXPORT_SYMBOL +0xb9b461b8 drm_dp_link_probe vmlinux EXPORT_SYMBOL +0x75871f5e acpi_get_next_object vmlinux EXPORT_SYMBOL +0x279b2821 aead_register_instance vmlinux EXPORT_SYMBOL_GPL +0xc9ec4e21 free_percpu vmlinux EXPORT_SYMBOL_GPL +0x3737d9a9 ZSTD_DStreamWorkspaceBound lib/zstd/zstd_decompress EXPORT_SYMBOL +0x17dd39d6 dm_deferred_set_create drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xaf655a5f mlx5_query_port_link_width_oper drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5e2f8fd7 ip6_datagram_recv_ctl vmlinux EXPORT_SYMBOL_GPL +0x10c11f50 ip6_dst_alloc vmlinux EXPORT_SYMBOL +0xc9a36456 devlink_port_attrs_set vmlinux EXPORT_SYMBOL_GPL +0x11e05453 netdev_class_remove_file_ns vmlinux EXPORT_SYMBOL +0xca0370fc metadata_dst_free vmlinux EXPORT_SYMBOL_GPL +0xb9c0f82a __zerocopy_sg_from_iter vmlinux EXPORT_SYMBOL +0xe764be9f sk_setup_caps vmlinux EXPORT_SYMBOL_GPL +0x72d267dc nvmem_del_cell_lookups vmlinux EXPORT_SYMBOL_GPL +0xbd68d1c4 spi_sync_locked vmlinux EXPORT_SYMBOL_GPL +0x14b3414b scsi_host_get vmlinux EXPORT_SYMBOL +0xdae98b04 drm_gem_cma_prime_vunmap vmlinux EXPORT_SYMBOL_GPL +0x34febb0f drm_event_reserve_init vmlinux EXPORT_SYMBOL +0x4b50f3e6 drm_helper_move_panel_connectors_to_head vmlinux EXPORT_SYMBOL +0x81c9eeeb regulator_bulk_get vmlinux EXPORT_SYMBOL_GPL +0x424d3620 zlib_inflateIncomp vmlinux EXPORT_SYMBOL +0x2871986e fuse_send_init vmlinux EXPORT_SYMBOL_GPL +0x24e1094c iomap_releasepage vmlinux EXPORT_SYMBOL_GPL +0xf932015f __raw_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x6a5fa363 sigprocmask vmlinux EXPORT_SYMBOL +0xe34ea834 ieee802154_unregister_hw net/mac802154/mac802154 EXPORT_SYMBOL +0x7242d129 ax25_listen_register net/ax25/ax25 EXPORT_SYMBOL +0xdb4698cd mtd_concat_destroy drivers/mtd/mtd EXPORT_SYMBOL +0xb9a65c95 parport_ieee1284_epp_write_addr drivers/parport/parport EXPORT_SYMBOL +0x8707b3d3 parport_ieee1284_ecp_write_addr drivers/parport/parport EXPORT_SYMBOL +0x8472467a hinic_pf_id_of_vf drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x2eaa55b1 devm_hwrng_register drivers/char/hw_random/rng-core EXPORT_SYMBOL_GPL +0x9e13f6f6 gf128mul_lle crypto/gf128mul EXPORT_SYMBOL +0xa32619c3 nfs4_mark_deviceid_available fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xbf98847b __skb_flow_get_ports vmlinux EXPORT_SYMBOL +0x1614334f sk_stream_wait_connect vmlinux EXPORT_SYMBOL +0x2c97f8a2 of_reconfig_notifier_register vmlinux EXPORT_SYMBOL_GPL +0xaf5b48a7 phy_set_sym_pause vmlinux EXPORT_SYMBOL +0x8d4c1398 devres_alloc_node vmlinux EXPORT_SYMBOL_GPL +0xce2a837d drm_vblank_init vmlinux EXPORT_SYMBOL +0x634d0339 drm_dp_dual_mode_detect vmlinux EXPORT_SYMBOL +0x554ae3a4 irq_poll_sched vmlinux EXPORT_SYMBOL +0xfbb22e5d nvme_remove_namespaces drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xcc02b008 __async_tx_find_channel crypto/async_tx/async_tx EXPORT_SYMBOL_GPL +0xb8f11603 idr_alloc vmlinux EXPORT_SYMBOL_GPL +0xf2bf09ed sock_no_sendmsg vmlinux EXPORT_SYMBOL +0x2ebe2b9d phy_basic_features vmlinux EXPORT_SYMBOL_GPL +0x79dcad6d drm_hdcp_update_content_protection vmlinux EXPORT_SYMBOL +0xa6aee438 drm_atomic_helper_commit_duplicated_state vmlinux EXPORT_SYMBOL +0x7cef58bd jbd2_journal_update_sb_errno vmlinux EXPORT_SYMBOL +0x21344304 vfs_unlink vmlinux EXPORT_SYMBOL +0x3ba01b47 get_compat_sigset vmlinux EXPORT_SYMBOL_GPL +0xaa970ebb get_task_exe_file vmlinux EXPORT_SYMBOL +0xc6c80a55 nfc_hci_get_param net/nfc/hci/hci EXPORT_SYMBOL +0x4fcf6a91 nfc_hci_set_param net/nfc/hci/hci EXPORT_SYMBOL +0xab3f1bc1 transport_init_se_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xa5b35479 fcoe_check_wait_queue drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x5d7a64c7 srp_start_tl_fail_timers drivers/scsi/scsi_transport_srp EXPORT_SYMBOL +0x65cfd537 nfs_mkdir fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9241652d xfrm_input_unregister_afinfo vmlinux EXPORT_SYMBOL +0x2b7b3927 tcp_req_err vmlinux EXPORT_SYMBOL +0xc7fae024 xt_compat_calc_jump vmlinux EXPORT_SYMBOL_GPL +0x6b58b4c9 __skb_gro_checksum_complete vmlinux EXPORT_SYMBOL +0xb2405efc secure_tcp_seq vmlinux EXPORT_SYMBOL_GPL +0x6ca3a433 of_get_child_by_name vmlinux EXPORT_SYMBOL +0x269175a7 cpufreq_frequency_table_get_index vmlinux EXPORT_SYMBOL_GPL +0x08808536 phy_aneg_done vmlinux EXPORT_SYMBOL +0xb4a9df12 ata_acpi_stm vmlinux EXPORT_SYMBOL_GPL +0xebc2dcfc ata_acpi_gtm vmlinux EXPORT_SYMBOL_GPL +0x9c23d3bb key_revoke vmlinux EXPORT_SYMBOL +0x4ad671be pipe_unlock vmlinux EXPORT_SYMBOL +0x7d9863bc irq_generic_chip_ops vmlinux EXPORT_SYMBOL_GPL +0xc528a49a queued_write_lock_slowpath vmlinux EXPORT_SYMBOL +0x68fd4fe8 rds_message_addref net/rds/rds EXPORT_SYMBOL_GPL +0xf9a1eb37 xprt_reserve_xprt_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x804d53ac genlmsg_multicast_allns vmlinux EXPORT_SYMBOL +0x168bdd84 __sock_recv_wifi_status vmlinux EXPORT_SYMBOL_GPL +0x7d3c97ea devm_mbox_controller_register vmlinux EXPORT_SYMBOL_GPL +0x1f02c706 efivar_entry_delete vmlinux EXPORT_SYMBOL_GPL +0x9ea65596 pm_generic_restore vmlinux EXPORT_SYMBOL_GPL +0x2d587633 bus_rescan_devices vmlinux EXPORT_SYMBOL_GPL +0xa0c8f566 ttm_mem_global_alloc vmlinux EXPORT_SYMBOL +0x629cee20 put_tty_driver vmlinux EXPORT_SYMBOL +0xd2d15d06 vring_create_virtqueue vmlinux EXPORT_SYMBOL_GPL +0xecfd68ef acpi_get_node vmlinux EXPORT_SYMBOL +0xdde1e33f backlight_force_update vmlinux EXPORT_SYMBOL +0x6ff54e15 gpiod_toggle_active_low vmlinux EXPORT_SYMBOL_GPL +0xde87f512 debugfs_read_file_bool vmlinux EXPORT_SYMBOL_GPL +0xab4c9dac __tracepoint_rpm_resume vmlinux EXPORT_SYMBOL_GPL +0xdb262a9e queue_work_on vmlinux EXPORT_SYMBOL +0x264c0106 ioport_resource vmlinux EXPORT_SYMBOL +0xd07a6b57 nfs_release_request fs/nfs/nfs EXPORT_SYMBOL_GPL +0x84329603 __fscache_maybe_release_page fs/fscache/fscache EXPORT_SYMBOL +0xf9b17706 devlink_sb_register vmlinux EXPORT_SYMBOL_GPL +0x41943734 dst_cache_set_ip6 vmlinux EXPORT_SYMBOL_GPL +0xc7a4fbed rtnl_lock vmlinux EXPORT_SYMBOL +0xf13f59ac scsi_device_from_queue vmlinux EXPORT_SYMBOL_GPL +0xe7008324 ata_scsi_slave_destroy vmlinux EXPORT_SYMBOL_GPL +0x81412f6e drm_sched_entity_fini vmlinux EXPORT_SYMBOL +0x13b723c4 drm_crtc_vblank_restore vmlinux EXPORT_SYMBOL +0x3a96937e __drm_atomic_helper_connector_reset vmlinux EXPORT_SYMBOL +0xc394403e pci_probe_reset_bus vmlinux EXPORT_SYMBOL_GPL +0xb2b87031 debugfs_create_bool vmlinux EXPORT_SYMBOL_GPL +0x634fee06 generic_file_splice_read vmlinux EXPORT_SYMBOL +0xf414a2bb get_tree_single_reconf vmlinux EXPORT_SYMBOL +0xdd2efc0f ring_buffer_reset_cpu vmlinux EXPORT_SYMBOL_GPL +0x9bcd8616 __irq_domain_add vmlinux EXPORT_SYMBOL_GPL +0x1bddb5c1 prepare_kernel_cred vmlinux EXPORT_SYMBOL +0x015af7f4 system_state vmlinux EXPORT_SYMBOL +0x5c00d810 ZSTD_CDictWorkspaceBound lib/zstd/zstd_compress EXPORT_SYMBOL +0xf35e4d36 llc_add_pack net/llc/llc EXPORT_SYMBOL +0xc8e62c5c xfrm6_tunnel_alloc_spi net/ipv6/xfrm6_tunnel EXPORT_SYMBOL +0x1aa77f31 vfio_del_group_dev drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xb4bea853 pnfs_update_layout fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x497a6f8a nfs_access_zap_cache fs/nfs/nfs EXPORT_SYMBOL_GPL +0x98348c7f __icmp_send vmlinux EXPORT_SYMBOL +0x13009844 udp_push_pending_frames vmlinux EXPORT_SYMBOL +0xa66b44b0 hidinput_connect vmlinux EXPORT_SYMBOL_GPL +0x6726a46d __platform_driver_probe vmlinux EXPORT_SYMBOL_GPL +0x45558f56 clk_unregister_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0x15bed7a5 LZ4_decompress_safe_partial vmlinux EXPORT_SYMBOL +0xc4652d8d rhashtable_destroy vmlinux EXPORT_SYMBOL_GPL +0x773539c1 del_timer_sync vmlinux EXPORT_SYMBOL +0xfcf2c319 param_ops_uint vmlinux EXPORT_SYMBOL +0x6088a5b8 kvm_set_memory_region vmlinux EXPORT_SYMBOL_GPL +0xe870a553 dccp_setsockopt net/dccp/dccp EXPORT_SYMBOL_GPL +0xeb2525e6 mpls_stats_inc_outucastpkts net/mpls/mpls_router EXPORT_SYMBOL_GPL +0x2c66ac85 devlink_info_serial_number_put vmlinux EXPORT_SYMBOL_GPL +0xdf45e46f of_parse_phandle_with_fixed_args vmlinux EXPORT_SYMBOL +0xe28e4207 __tracepoint_dma_fence_emit vmlinux EXPORT_SYMBOL +0x171d32d0 drm_atomic_get_new_connector_for_encoder vmlinux EXPORT_SYMBOL +0x67d9b4a2 con_set_default_unimap vmlinux EXPORT_SYMBOL +0x2f384db3 acpi_is_video_device vmlinux EXPORT_SYMBOL +0x7150a9a0 fb_deferred_io_open vmlinux EXPORT_SYMBOL_GPL +0xbb578f80 gpiod_unexport vmlinux EXPORT_SYMBOL_GPL +0xdaba2f3c fscrypt_get_ctx vmlinux EXPORT_SYMBOL +0x7a95e5ae do_settimeofday64 vmlinux EXPORT_SYMBOL +0xa9946938 devm_request_resource vmlinux EXPORT_SYMBOL +0xe33df29f l2tp_tunnel_delete net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xfbcb1feb inet_diag_bc_sk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x1da93d8f nf_osf_match net/netfilter/nfnetlink_osf EXPORT_SYMBOL_GPL +0xc52981ce iscsit_build_datain_pdu drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x638a5a55 hinic_enable_nic_rss drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x517e939d srp_remove_host drivers/scsi/scsi_transport_srp EXPORT_SYMBOL_GPL +0xf7b3ddbe ipmi_get_smi_info drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xc8e3e3ef __qdisc_calculate_pkt_len vmlinux EXPORT_SYMBOL +0xc642990e dev_get_by_index vmlinux EXPORT_SYMBOL +0x4d6d21aa hid_compare_device_paths vmlinux EXPORT_SYMBOL_GPL +0x567a10e3 dma_buf_unmap_attachment vmlinux EXPORT_SYMBOL_GPL +0x4332e7ea serial8250_rpm_get_tx vmlinux EXPORT_SYMBOL_GPL +0x8757c9f5 pci_enable_ats vmlinux EXPORT_SYMBOL_GPL +0x3539f11b match_strlcpy vmlinux EXPORT_SYMBOL +0xf46378f6 ioctl_by_bdev vmlinux EXPORT_SYMBOL +0xc9827693 __bpf_call_base vmlinux EXPORT_SYMBOL_GPL +0x0294c6a2 nfs_callback_nr_threads fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4e71151b configfs_unregister_subsystem fs/configfs/configfs EXPORT_SYMBOL +0x924c46f8 zs_unmap_object mm/zsmalloc EXPORT_SYMBOL_GPL +0x79709cc3 inet6_hash vmlinux EXPORT_SYMBOL_GPL +0x5e2d69db init_net vmlinux EXPORT_SYMBOL +0x9de69171 cpufreq_freq_attr_scaling_boost_freqs vmlinux EXPORT_SYMBOL_GPL +0x1d734ebc usb_of_get_companion_dev vmlinux EXPORT_SYMBOL_GPL +0x068a07d5 __phy_modify_mmd_changed vmlinux EXPORT_SYMBOL_GPL +0x028e177c class_create_file_ns vmlinux EXPORT_SYMBOL_GPL +0x0059b617 regulator_force_disable vmlinux EXPORT_SYMBOL_GPL +0x99b9e998 pci_stop_and_remove_bus_device_locked vmlinux EXPORT_SYMBOL_GPL +0xa843abc1 textsearch_find_continuous vmlinux EXPORT_SYMBOL +0xf67d67dc __crypto_alloc_tfm vmlinux EXPORT_SYMBOL_GPL +0x319ee24c key_unlink vmlinux EXPORT_SYMBOL +0x4da0ac9b keyring_search vmlinux EXPORT_SYMBOL +0x41ed3cec eventfd_ctx_remove_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x9d9db14f generic_delete_inode vmlinux EXPORT_SYMBOL +0xab9bd884 perf_register_guest_info_callbacks vmlinux EXPORT_SYMBOL_GPL +0xc80ab559 swake_up_one vmlinux EXPORT_SYMBOL +0xc9c84f2e br_vlan_get_pvid_rcu net/bridge/bridge EXPORT_SYMBOL_GPL +0x503bd137 snd_interval_ranges sound/core/snd-pcm EXPORT_SYMBOL +0xffd8f8cb fc_vport_setlink drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xee91879b rb_first_postorder vmlinux EXPORT_SYMBOL +0x3b20fb95 dma_fence_remove_callback vmlinux EXPORT_SYMBOL +0x7d290c8c drm_atomic_helper_connector_tv_reset vmlinux EXPORT_SYMBOL +0x7de65a03 acpi_lpat_free_conversion_table vmlinux EXPORT_SYMBOL_GPL +0x666f61f9 gpiod_direction_output_raw vmlinux EXPORT_SYMBOL_GPL +0xd3a10c31 pinctrl_pm_select_idle_state vmlinux EXPORT_SYMBOL_GPL +0xb633f115 irq_poll_enable vmlinux EXPORT_SYMBOL +0xfa7cef41 ceph_release_page_vector net/ceph/libceph EXPORT_SYMBOL +0x62c347fb ceph_wait_for_latest_osdmap net/ceph/libceph EXPORT_SYMBOL +0xb206b50b nf_nat_pptp_hook_expectfn net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0x955a8696 dev_change_proto_down vmlinux EXPORT_SYMBOL +0x9c49e6d9 sock_gettstamp vmlinux EXPORT_SYMBOL +0xa908d367 regulator_set_voltage vmlinux EXPORT_SYMBOL_GPL +0x66af1fd1 lockref_put_or_lock vmlinux EXPORT_SYMBOL +0x3100cff9 lockref_get_or_lock vmlinux EXPORT_SYMBOL +0x53a6faeb deregister_atm_ioctl net/atm/atm EXPORT_SYMBOL +0xa9a6f37c fc_frame_crc_check drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x189f28be i2c_smbus_write_i2c_block_data vmlinux EXPORT_SYMBOL +0xe646555d of_clk_del_provider vmlinux EXPORT_SYMBOL_GPL +0x253a2f45 of_clk_add_provider vmlinux EXPORT_SYMBOL_GPL +0xf2c43f3f zlib_deflate vmlinux EXPORT_SYMBOL +0x89f845be shash_no_setkey vmlinux EXPORT_SYMBOL_GPL +0xf5e1a77c trace_seq_putmem_hex vmlinux EXPORT_SYMBOL_GPL +0xca431c05 wake_bit_function vmlinux EXPORT_SYMBOL +0xa0a2e84e snd_rawmidi_drain_output sound/core/snd-rawmidi EXPORT_SYMBOL +0xa79b642b ib_map_mr_sg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8352c8f7 m_can_class_get_clocks drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0xa4e901ee hinic_attach_roce drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x970335f7 inet_csk_route_req vmlinux EXPORT_SYMBOL_GPL +0xd042475c qdisc_get_rtab vmlinux EXPORT_SYMBOL +0xc2344452 dev_change_proto_down_generic vmlinux EXPORT_SYMBOL +0x173bb1dd skb_orphan_partial vmlinux EXPORT_SYMBOL +0xbcd19c8b hid_set_field vmlinux EXPORT_SYMBOL_GPL +0x56a2d14e ata_sff_dev_classify vmlinux EXPORT_SYMBOL_GPL +0x5a84ec45 bus_remove_file vmlinux EXPORT_SYMBOL_GPL +0xb97e6100 mipi_dsi_dcs_set_column_address vmlinux EXPORT_SYMBOL +0x86dd63d9 drm_gem_shmem_pin vmlinux EXPORT_SYMBOL +0xfc3cdbc3 locks_lock_inode_wait vmlinux EXPORT_SYMBOL +0x57345fc7 locks_release_private vmlinux EXPORT_SYMBOL_GPL +0x310cb807 __fscrypt_encrypt_symlink vmlinux EXPORT_SYMBOL_GPL +0x2aa0843e mempool_resize vmlinux EXPORT_SYMBOL +0xae2d4d20 __ftrace_vprintk vmlinux EXPORT_SYMBOL_GPL +0x758a507a snd_pcm_hw_constraint_mask64 sound/core/snd-pcm EXPORT_SYMBOL +0x54cc5b04 mlx5_query_port_max_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x9a4a2896 mlx4_get_slave_default_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x3ec9407d nvme_wait_freeze drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x1a9a226c xfrm_unregister_km vmlinux EXPORT_SYMBOL +0x3fa122aa in_dev_finish_destroy vmlinux EXPORT_SYMBOL +0x596c1891 of_find_node_by_phandle vmlinux EXPORT_SYMBOL +0x00654c5d hid_destroy_device vmlinux EXPORT_SYMBOL_GPL +0x2a0a40fa mdio_bus_init vmlinux EXPORT_SYMBOL_GPL +0x0dd0d42a device_get_next_child_node vmlinux EXPORT_SYMBOL_GPL +0xc75da655 drm_atomic_get_crtc_state vmlinux EXPORT_SYMBOL +0x87b8798d sg_next vmlinux EXPORT_SYMBOL +0x0b07abe2 unshare_fs_struct vmlinux EXPORT_SYMBOL_GPL +0x1ca4a930 smp_call_function_any vmlinux EXPORT_SYMBOL_GPL +0x6f9d540b freq_qos_add_request vmlinux EXPORT_SYMBOL_GPL +0x26b4a1c9 __nf_conntrack_confirm net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x00b5eff9 rdma_move_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2315e880 core_tpg_check_initiator_node_acl drivers/target/target_core_mod EXPORT_SYMBOL +0x9b9314c6 config_ep_by_speed drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xaf969565 ocfs2_dlm_lock fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x4be30aa0 nfs_pageio_init_write fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb7d289d1 unix_inq_len vmlinux EXPORT_SYMBOL_GPL +0x136209db power_supply_reg_notifier vmlinux EXPORT_SYMBOL_GPL +0xf5d22d48 mdiobus_scan vmlinux EXPORT_SYMBOL +0x713cb4ba phy_gbit_features vmlinux EXPORT_SYMBOL_GPL +0x858b3fe3 free_iova_mem vmlinux EXPORT_SYMBOL +0x9d4c15e7 tty_port_carrier_raised vmlinux EXPORT_SYMBOL +0x66136f40 regulator_set_voltage_time_sel vmlinux EXPORT_SYMBOL_GPL +0x827f283b __brelse vmlinux EXPORT_SYMBOL +0x7a283543 pipe_lock vmlinux EXPORT_SYMBOL +0x7de53067 rpc_init_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6f906065 ip_set_name_byindex net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x10763f4c ib_register_mad_agent drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa82b2066 dm_bufio_write_dirty_buffers drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xcecdc28d mlx5_rl_remove_rate drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x82ca2f9b scsi_device_set_state vmlinux EXPORT_SYMBOL +0x2e2dd02b drm_panel_enable vmlinux EXPORT_SYMBOL +0x8a59a5e0 drm_gem_fb_create_handle vmlinux EXPORT_SYMBOL +0xe0953c22 gpiod_get_direction vmlinux EXPORT_SYMBOL_GPL +0x020dbf27 bitmap_alloc vmlinux EXPORT_SYMBOL +0x941f2aaa eventfd_ctx_put vmlinux EXPORT_SYMBOL_GPL +0xd593f7a8 mark_buffer_async_write vmlinux EXPORT_SYMBOL +0xa33c0eac wait_for_completion_interruptible_timeout vmlinux EXPORT_SYMBOL +0xfbc4f89e io_schedule_timeout vmlinux EXPORT_SYMBOL +0x28e23139 xfrm_probe_algs net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0xf13914b3 gue_encap_hlen net/ipv4/fou EXPORT_SYMBOL +0x798ffe1b ip_route_me_harder vmlinux EXPORT_SYMBOL +0x11e10ccc inet_sock_destruct vmlinux EXPORT_SYMBOL +0xd6bece6d skb_udp_tunnel_segment vmlinux EXPORT_SYMBOL +0xe4e76fb4 nf_reinject vmlinux EXPORT_SYMBOL +0x84a005f1 iscsi_get_router_state_name vmlinux EXPORT_SYMBOL_GPL +0xc9c6bb25 eeprom_93cx6_readb vmlinux EXPORT_SYMBOL_GPL +0x1d2931ae dev_pm_qos_update_request vmlinux EXPORT_SYMBOL_GPL +0x62585351 tty_chars_in_buffer vmlinux EXPORT_SYMBOL +0x815588a6 clk_enable vmlinux EXPORT_SYMBOL_GPL +0x5c4265f6 blk_unregister_region vmlinux EXPORT_SYMBOL +0x71eee1ed elv_rqhash_del vmlinux EXPORT_SYMBOL_GPL +0x2fcf68f6 unregister_nls vmlinux EXPORT_SYMBOL +0xb2cb5f11 bpf_prog_get_type_dev vmlinux EXPORT_SYMBOL_GPL +0xae0592ef ring_buffer_discard_commit vmlinux EXPORT_SYMBOL_GPL +0x0310a613 ceph_auth_verify_authorizer_reply net/ceph/libceph EXPORT_SYMBOL +0xaba3fca8 dccp_feat_nn_get net/dccp/dccp EXPORT_SYMBOL_GPL +0xe2c84666 nft_reject_icmp_code net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x2d0ed6a0 ib_init_ah_attr_from_wc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x170602a0 md_integrity_register drivers/md/md-mod EXPORT_SYMBOL +0x2194fbbb hinic_glb_pf_vf_offset drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xcd0f8d0f __fscache_check_consistency fs/fscache/fscache EXPORT_SYMBOL +0x73a96c94 devlink_resource_occ_get_register vmlinux EXPORT_SYMBOL_GPL +0xbe0ba450 thermal_zone_get_slope vmlinux EXPORT_SYMBOL_GPL +0xddf3fbbb thermal_zone_set_trips vmlinux EXPORT_SYMBOL_GPL +0x53d8c50d __serio_register_port vmlinux EXPORT_SYMBOL +0x4a2315f5 usb_ep_set_maxpacket_limit vmlinux EXPORT_SYMBOL_GPL +0xc17515d7 usb_hcds_loaded vmlinux EXPORT_SYMBOL_GPL +0xd62f7373 regmap_write_async vmlinux EXPORT_SYMBOL_GPL +0x4db5d6bf verify_signature vmlinux EXPORT_SYMBOL_GPL +0x27cb788b skcipher_register_instance vmlinux EXPORT_SYMBOL_GPL +0x6e1fd004 fscrypt_setup_filename vmlinux EXPORT_SYMBOL +0x3fcc53db snd_ctl_unregister_ioctl sound/core/snd EXPORT_SYMBOL +0xccba3069 target_remove_session drivers/target/target_core_mod EXPORT_SYMBOL +0xe8b3f44b parport_claim drivers/parport/parport EXPORT_SYMBOL +0xac70a11b mlx5_core_query_mkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x19163eeb nfs_instantiate fs/nfs/nfs EXPORT_SYMBOL_GPL +0x16acd0be skb_pull_rcsum vmlinux EXPORT_SYMBOL_GPL +0xd6a86801 __scsi_device_lookup_by_target vmlinux EXPORT_SYMBOL +0xfe9ebbbb acpi_osi_is_win8 vmlinux EXPORT_SYMBOL +0x1fa1d95c sha256_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x4430be78 fscrypt_has_permitted_context vmlinux EXPORT_SYMBOL +0xa55c3daa current_work vmlinux EXPORT_SYMBOL +0xfc03d97a page_is_ram vmlinux EXPORT_SYMBOL_GPL +0x729aeb96 ib_dealloc_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x38c8b3ae mdev_parent_dev drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x6af8a872 dm_array_info_init drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xc9472089 nvme_init_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x95ec11b4 dlm_posix_unlock fs/dlm/dlm EXPORT_SYMBOL_GPL +0x6b27729b radix_tree_gang_lookup vmlinux EXPORT_SYMBOL +0x930dc3ba dm_bio_get_target_bio_nr vmlinux EXPORT_SYMBOL_GPL +0x842e2e50 devm_i2c_new_dummy_device vmlinux EXPORT_SYMBOL_GPL +0xa926b4d6 usb_amd_pt_check_port vmlinux EXPORT_SYMBOL_GPL +0x2856d21a pm_generic_suspend vmlinux EXPORT_SYMBOL_GPL +0x16ac2597 drm_printf vmlinux EXPORT_SYMBOL +0xefebe2f0 tty_buffer_lock_exclusive vmlinux EXPORT_SYMBOL_GPL +0xf814b0b1 locks_alloc_lock vmlinux EXPORT_SYMBOL_GPL +0xb121390a probe_irq_on vmlinux EXPORT_SYMBOL +0xb0aed408 ZSTD_compressStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x16e38de8 cache_create_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd85c3067 svc_generic_init_request net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf30f9d1e svc_prepare_thread net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1f1cfc6b dm_get_cell drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xec568141 vxlan_fdb_clear_offload drivers/net/vxlan EXPORT_SYMBOL_GPL +0x15801382 mlxsw_afk_key_info_put drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x98c3a6c1 fscache_mark_pages_cached fs/fscache/fscache EXPORT_SYMBOL +0x93b3fc74 register_dcbevent_notifier vmlinux EXPORT_SYMBOL +0x4cdd111e xfrm_state_lookup vmlinux EXPORT_SYMBOL +0x978bf8a4 tcp_parse_options vmlinux EXPORT_SYMBOL +0xb35b214f sk_filter_trim_cap vmlinux EXPORT_SYMBOL +0xa18e4d46 rps_may_expire_flow vmlinux EXPORT_SYMBOL +0x57566fd6 i2c_smbus_write_block_data vmlinux EXPORT_SYMBOL +0x99975573 usb_stor_suspend vmlinux EXPORT_SYMBOL_GPL +0xae8c38ae key_move vmlinux EXPORT_SYMBOL +0x1ba59527 __kmalloc_node vmlinux EXPORT_SYMBOL +0x721ad5f6 __nfc_alloc_vendor_cmd_reply_skb net/nfc/nfc EXPORT_SYMBOL +0x8108dafd rpc_unlink net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x200bf9aa spc_parse_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0xac00c714 usb_composite_probe drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x2c998fe1 mlxsw_i2c_driver_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_i2c EXPORT_SYMBOL +0x45a39f8b ndlc_probe drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0x6aed0d8b locks_in_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0x5afedb80 pskb_trim_rcsum_slow vmlinux EXPORT_SYMBOL +0xe3ebf9bd arch_set_freq_scale vmlinux EXPORT_SYMBOL_GPL +0xcf9b558d touchscreen_set_mt_pos vmlinux EXPORT_SYMBOL +0xa54d3858 module_refcount vmlinux EXPORT_SYMBOL +0xf77555cd __memcpy_toio vmlinux EXPORT_SYMBOL +0x17031524 sunrpc_destroy_cache_detail net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf12c9da8 dlm_register_domain fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xcb5b1f7e dlm_unregister_domain fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xf7636996 inet_ctl_sock_create vmlinux EXPORT_SYMBOL_GPL +0x5252d875 power_supply_find_ocv2cap_table vmlinux EXPORT_SYMBOL_GPL +0x7c6501ae firmware_kobj vmlinux EXPORT_SYMBOL_GPL +0xac46792f drm_mode_create_from_cmdline_mode vmlinux EXPORT_SYMBOL +0x4f4a7ae7 __tty_insert_flip_char vmlinux EXPORT_SYMBOL +0x85935a61 acpi_dev_irq_flags vmlinux EXPORT_SYMBOL_GPL +0x4aad0293 pci_store_saved_state vmlinux EXPORT_SYMBOL_GPL +0x6ccf5acb handle_edge_irq vmlinux EXPORT_SYMBOL +0xc58acd9a ip_vs_conn_in_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x7a9d0678 l3mdev_update_flow vmlinux EXPORT_SYMBOL_GPL +0x18ca222e usb_autopm_put_interface_async vmlinux EXPORT_SYMBOL_GPL +0x4e72d4ab usb_lock_device_for_reset vmlinux EXPORT_SYMBOL_GPL +0x0c5c0fc8 phy_ethtool_nway_reset vmlinux EXPORT_SYMBOL +0xae8a1f82 clk_hw_register_mux vmlinux EXPORT_SYMBOL_GPL +0x7ad1ded1 pinctrl_register_mappings vmlinux EXPORT_SYMBOL_GPL +0x1bc5eebe pinctrl_gpio_direction_input vmlinux EXPORT_SYMBOL_GPL +0x18e60984 __do_once_start vmlinux EXPORT_SYMBOL +0xfc980a58 __account_locked_vm vmlinux EXPORT_SYMBOL_GPL +0x2101cbc9 ceph_oid_destroy net/ceph/libceph EXPORT_SYMBOL +0x235ca7bb snd_timer_global_new sound/core/snd-timer EXPORT_SYMBOL +0xa75f49d6 mlx5_fill_page_array drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xe2d5255a strcmp vmlinux EXPORT_SYMBOL +0x85df9b6c strsep vmlinux EXPORT_SYMBOL +0x84406619 ___pskb_trim vmlinux EXPORT_SYMBOL +0xbb4b4efe __pskb_copy_fclone vmlinux EXPORT_SYMBOL +0x9a1e0532 ata_std_prereset vmlinux EXPORT_SYMBOL_GPL +0x75352938 drm_mode_create_tile_group vmlinux EXPORT_SYMBOL +0x4a80ba1d tty_driver_flush_buffer vmlinux EXPORT_SYMBOL +0xdef7c893 fb_match_mode vmlinux EXPORT_SYMBOL +0x0f7ca3e5 pci_setup_cardbus vmlinux EXPORT_SYMBOL +0x4b28201d kdb_current_task vmlinux EXPORT_SYMBOL +0x86a1664f iomem_resource vmlinux EXPORT_SYMBOL +0x111a7422 nci_hci_connect_gate net/nfc/nci/nci EXPORT_SYMBOL +0xce6e7ce8 sctp_do_peeloff net/sctp/sctp EXPORT_SYMBOL +0xf266a0d6 snd_ctl_add sound/core/snd EXPORT_SYMBOL +0xae10731f hinic_support_rdma drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xbc6720ba sata_scr_read vmlinux EXPORT_SYMBOL_GPL +0x927a47b1 devm_kfree vmlinux EXPORT_SYMBOL_GPL +0x18fb9aa6 mipi_dsi_driver_unregister vmlinux EXPORT_SYMBOL +0x2ddeee2e drm_fb_helper_pan_display vmlinux EXPORT_SYMBOL +0x402bdfd4 drm_atomic_helper_wait_for_dependencies vmlinux EXPORT_SYMBOL +0xa079e200 drm_dp_downstream_id vmlinux EXPORT_SYMBOL +0x0e696dbe register_virtio_driver vmlinux EXPORT_SYMBOL_GPL +0x2ef637d0 hdmi_infoframe_log vmlinux EXPORT_SYMBOL +0x82b3b874 gpiochip_set_nested_irqchip vmlinux EXPORT_SYMBOL_GPL +0xc791f0ed generic_start_io_acct vmlinux EXPORT_SYMBOL +0xf0032c73 vfs_create vmlinux EXPORT_SYMBOL +0xefce991c ceph_pagelist_append net/ceph/libceph EXPORT_SYMBOL +0xa3360569 dccp_done net/dccp/dccp EXPORT_SYMBOL_GPL +0xf6ed3334 ib_event_msg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x234c6f2d ptp_schedule_worker vmlinux EXPORT_SYMBOL +0xd2c2440f ohci_hub_status_data vmlinux EXPORT_SYMBOL_GPL +0x0b4e7407 devm_device_add_group vmlinux EXPORT_SYMBOL_GPL +0x2093f4dd clk_register_divider_table vmlinux EXPORT_SYMBOL_GPL +0xb9056bb6 remove_conflicting_framebuffers vmlinux EXPORT_SYMBOL +0x9f374385 devm_acpi_dev_remove_driver_gpios vmlinux EXPORT_SYMBOL_GPL +0xa1affab0 nla_reserve_64bit vmlinux EXPORT_SYMBOL +0xfbab3e58 __ablkcipher_walk_complete vmlinux EXPORT_SYMBOL_GPL +0x3d959cdd sb_min_blocksize vmlinux EXPORT_SYMBOL +0xa1ca5d73 nfc_llc_stop net/nfc/hci/hci EXPORT_SYMBOL +0xbcf1f0e6 zs_create_pool mm/zsmalloc EXPORT_SYMBOL_GPL +0xdf606907 devlink_dpipe_headers_unregister vmlinux EXPORT_SYMBOL_GPL +0x9282f433 __tracepoint_devlink_hwmsg vmlinux EXPORT_SYMBOL_GPL +0x154c6338 dm_kcopyd_client_destroy vmlinux EXPORT_SYMBOL +0x2ad91f6e ir_raw_gen_pl vmlinux EXPORT_SYMBOL +0x9e12e343 ir_raw_gen_pd vmlinux EXPORT_SYMBOL +0x415623d3 sas_phy_alloc vmlinux EXPORT_SYMBOL +0x2e0b1deb dma_fence_get_status vmlinux EXPORT_SYMBOL +0xabb228e0 regulator_get_mode vmlinux EXPORT_SYMBOL_GPL +0xbcf64fe4 devm_backlight_device_unregister vmlinux EXPORT_SYMBOL +0x500161e9 page_cache_prev_miss vmlinux EXPORT_SYMBOL +0xf353a698 register_module_notifier vmlinux EXPORT_SYMBOL +0x74fe3670 nfc_hci_result_to_errno net/nfc/hci/hci EXPORT_SYMBOL +0x58b8cd53 get_bitmap_from_slot drivers/md/md-mod EXPORT_SYMBOL +0xc8821dac tcp_rate_check_app_limited vmlinux EXPORT_SYMBOL_GPL +0x6cff3b90 register_fib_notifier vmlinux EXPORT_SYMBOL +0xa06f135f cpuidle_disable_device vmlinux EXPORT_SYMBOL_GPL +0xe766a83e ttm_bo_create vmlinux EXPORT_SYMBOL +0x4afc04e5 drm_client_init vmlinux EXPORT_SYMBOL +0xe8376e2f drm_atomic_helper_check vmlinux EXPORT_SYMBOL +0xc7208c3a serial8250_resume_port vmlinux EXPORT_SYMBOL +0x09d0cc49 vm_map_pages_zero vmlinux EXPORT_SYMBOL +0xbaa6a8e2 nft_unregister_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x727e0e3d rdma_consumer_reject_data drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x6c28be5a vfio_info_add_capability drivers/vfio/vfio EXPORT_SYMBOL +0x024e16a6 iscsi_target_check_login_request drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x04773b60 ccp_present drivers/crypto/ccp/ccp EXPORT_SYMBOL_GPL +0x7c1e7807 bch_bset_sort_state_init drivers/md/bcache/bcache EXPORT_SYMBOL +0x8a416a8a hinic_func_rx_tx_flush drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x6a782d59 inet_csk_clone_lock vmlinux EXPORT_SYMBOL_GPL +0x03bf2e32 devlink_register vmlinux EXPORT_SYMBOL_GPL +0x9bab1933 hid_input_report vmlinux EXPORT_SYMBOL_GPL +0x4d36e7a6 pcix_set_mmrbc vmlinux EXPORT_SYMBOL +0xecf82a90 gpiod_export_link vmlinux EXPORT_SYMBOL_GPL +0xea12c7cc gpiochip_generic_config vmlinux EXPORT_SYMBOL_GPL +0x6be0d38b unregister_sysctl_table vmlinux EXPORT_SYMBOL +0x203534e0 rdma_set_service_type drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x93b65e01 _ib_alloc_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe2056a6f mlx5_core_create_tir_out drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x798b7682 klist_prev vmlinux EXPORT_SYMBOL_GPL +0x08cfaa3b devlink_region_create vmlinux EXPORT_SYMBOL_GPL +0xa8bad19b lwtunnel_xmit vmlinux EXPORT_SYMBOL_GPL +0x5c24a651 of_find_property vmlinux EXPORT_SYMBOL +0xbc6336fe usb_gadget_map_request_by_dev vmlinux EXPORT_SYMBOL_GPL +0x1448a42c drm_atomic_helper_plane_duplicate_state vmlinux EXPORT_SYMBOL +0xab1d0f63 devm_clk_release_clkdev vmlinux EXPORT_SYMBOL +0x40b43bd0 sbitmap_add_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x3b321462 LZ4_setStreamDecode vmlinux EXPORT_SYMBOL +0x99c1c85c del_gendisk vmlinux EXPORT_SYMBOL +0x700b3c12 block_write_full_page vmlinux EXPORT_SYMBOL +0xfe731af8 nf_ct_invert_tuple net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa61aa028 snd_pcm_format_unsigned sound/core/snd-pcm EXPORT_SYMBOL +0x4a4cb558 dm_btree_insert_notify drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x1199fe29 lockd_up fs/lockd/lockd EXPORT_SYMBOL_GPL +0x8c0b11c9 xfrm_register_type vmlinux EXPORT_SYMBOL +0x36cf5e8e __tcp_md5_do_lookup vmlinux EXPORT_SYMBOL +0x437a0d6d __sock_tx_timestamp vmlinux EXPORT_SYMBOL +0x9305f8e6 cpufreq_get vmlinux EXPORT_SYMBOL +0xb544227a pci_set_vpd_size vmlinux EXPORT_SYMBOL +0x2892105d devm_gpiod_get_index vmlinux EXPORT_SYMBOL_GPL +0x59ccac96 fat_sync_inode vmlinux EXPORT_SYMBOL_GPL +0x387d24c9 walk_iomem_res_desc vmlinux EXPORT_SYMBOL_GPL +0x9688de8b memstart_addr vmlinux EXPORT_SYMBOL +0x618a30ab mlxsw_afa_block_commit drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x261d4b74 sock_no_getsockopt vmlinux EXPORT_SYMBOL +0x2e30777d sock_no_setsockopt vmlinux EXPORT_SYMBOL +0x31cad253 drm_crtc_vblank_off vmlinux EXPORT_SYMBOL +0x94bc9b85 drm_crtc_vblank_get vmlinux EXPORT_SYMBOL +0xd2064d6b clk_hw_register_gpio_mux vmlinux EXPORT_SYMBOL_GPL +0xa7b74543 fscrypt_decrypt_pagecache_blocks vmlinux EXPORT_SYMBOL +0x0c2bffdf fscrypt_encrypt_pagecache_blocks vmlinux EXPORT_SYMBOL +0x4a09362a __blockdev_direct_IO vmlinux EXPORT_SYMBOL +0x8a01d0e3 migrate_page vmlinux EXPORT_SYMBOL +0x68952493 rcu_note_context_switch vmlinux EXPORT_SYMBOL_GPL +0xf937dc94 vhost_poll_queue drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x47c37583 ip6_route_me_harder vmlinux EXPORT_SYMBOL +0x9afabab7 unix_destruct_scm vmlinux EXPORT_SYMBOL +0x89026b46 thermal_zone_device_update vmlinux EXPORT_SYMBOL_GPL +0xbf0fb030 rc_map_get vmlinux EXPORT_SYMBOL_GPL +0x320eec4d sas_rphy_delete vmlinux EXPORT_SYMBOL +0x64a95c65 dev_fwnode vmlinux EXPORT_SYMBOL_GPL +0x6703288c drm_property_create_bool vmlinux EXPORT_SYMBOL +0x9b307228 regulator_map_voltage_iterate vmlinux EXPORT_SYMBOL_GPL +0xabc640f3 list_lru_isolate vmlinux EXPORT_SYMBOL_GPL +0x9c212a54 bpf_prog_alloc vmlinux EXPORT_SYMBOL_GPL +0x60b57b62 snd_device_new sound/core/snd EXPORT_SYMBOL +0xe1d0a9f5 hinic_get_port_info drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x86a9a7c5 fcoe_start_io drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x375f8bbd fc_get_event_number drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x4670ada5 km_new_mapping vmlinux EXPORT_SYMBOL +0x38620ddb phy_attach_direct vmlinux EXPORT_SYMBOL +0x042db083 phy_gbit_all_ports_features vmlinux EXPORT_SYMBOL_GPL +0xa56d429c iscsi_get_port_state_name vmlinux EXPORT_SYMBOL_GPL +0xe964c56a iscsi_get_port_speed_name vmlinux EXPORT_SYMBOL_GPL +0x949644e0 pm_generic_thaw_early vmlinux EXPORT_SYMBOL_GPL +0x949f5146 drm_gem_cma_dumb_create vmlinux EXPORT_SYMBOL_GPL +0x8a0967e0 drm_crtc_enable_color_mgmt vmlinux EXPORT_SYMBOL +0xee477e80 of_clk_get_parent_name vmlinux EXPORT_SYMBOL_GPL +0x3bbb52dc acpi_check_resource_conflict vmlinux EXPORT_SYMBOL +0x47229b5c gpio_request vmlinux EXPORT_SYMBOL_GPL +0x3283e6b0 prandom_seed_full_state vmlinux EXPORT_SYMBOL +0x000f55d5 blk_queue_write_cache vmlinux EXPORT_SYMBOL_GPL +0x73709463 generic_block_bmap vmlinux EXPORT_SYMBOL +0x868784cb __symbol_get vmlinux EXPORT_SYMBOL_GPL +0x6ef6b54f ktime_get_boot_fast_ns vmlinux EXPORT_SYMBOL_GPL +0x4507f4a8 cpuhp_tasks_frozen vmlinux EXPORT_SYMBOL_GPL +0x2e883979 p9_client_disconnect net/9p/9pnet EXPORT_SYMBOL +0x2ccfce5e snd_pcm_hw_constraint_list sound/core/snd-pcm EXPORT_SYMBOL +0xafd7047f rndis_register drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x47270b12 mlx4_phys_to_slave_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x34db072d fdp_nci_recv_frame drivers/nfc/fdp/fdp EXPORT_SYMBOL +0x92ee3238 nfs_put_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x0783dfbf __ip_dev_find vmlinux EXPORT_SYMBOL +0x42230915 sbitmap_any_bit_set vmlinux EXPORT_SYMBOL_GPL +0x652ce9aa nla_memcmp vmlinux EXPORT_SYMBOL +0xf1db1704 nla_memcpy vmlinux EXPORT_SYMBOL +0x3bba251a kmem_cache_free vmlinux EXPORT_SYMBOL +0x73879664 vsock_addr_init net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xec2b53b6 nci_uart_set_config net/nfc/nci/nci_uart EXPORT_SYMBOL_GPL +0x7cd5b584 nf_ct_bridge_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x86851df0 sbc_get_write_same_sectors drivers/target/target_core_mod EXPORT_SYMBOL +0xccfe6409 btracker_nr_demotions_queued drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x0418aad6 mlx4_get_counter_stats drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xaca90ebd ipmi_request_supply_msgs drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x0d4ff75b netdev_set_sb_channel vmlinux EXPORT_SYMBOL +0x59354eb4 usb_serial_generic_get_icount vmlinux EXPORT_SYMBOL_GPL +0x002d525b get_cpu_device vmlinux EXPORT_SYMBOL_GPL +0x923b1276 dmaengine_get vmlinux EXPORT_SYMBOL +0x6b70cac1 amba_device_register vmlinux EXPORT_SYMBOL +0xcb9e1a22 acpi_os_map_generic_address vmlinux EXPORT_SYMBOL +0x06f7c4f6 pci_restore_pri_state vmlinux EXPORT_SYMBOL_GPL +0xd9de3df6 pci_restore_ats_state vmlinux EXPORT_SYMBOL_GPL +0x716de01a pci_restore_msi_state vmlinux EXPORT_SYMBOL_GPL +0xaba11ed1 pcie_capability_clear_and_set_dword vmlinux EXPORT_SYMBOL +0xfc72c3d6 vfs_fadvise vmlinux EXPORT_SYMBOL +0xd5fd90f1 prepare_to_wait vmlinux EXPORT_SYMBOL +0x9f46df2f __request_region vmlinux EXPORT_SYMBOL +0x38e6a5d2 roce_gid_type_mask_support drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x139fd993 target_setup_session drivers/target/target_core_mod EXPORT_SYMBOL +0xc2fd77e8 core_tpg_deregister drivers/target/target_core_mod EXPORT_SYMBOL +0xd9491c14 xa_destroy vmlinux EXPORT_SYMBOL +0xc81e91a8 napi_busy_loop vmlinux EXPORT_SYMBOL +0x2f69a9dc dev_get_valid_name vmlinux EXPORT_SYMBOL +0x5f8685fd of_irq_get_byname vmlinux EXPORT_SYMBOL_GPL +0xe0ed45e0 fixed_phy_set_link_update vmlinux EXPORT_SYMBOL_GPL +0xb031bcfa ata_host_detach vmlinux EXPORT_SYMBOL_GPL +0xa80a6e8f drm_connector_attach_vrr_capable_property vmlinux EXPORT_SYMBOL +0xf8395776 drm_dp_mst_reset_vcpi_slots vmlinux EXPORT_SYMBOL +0x843d70ef acpi_is_root_bridge vmlinux EXPORT_SYMBOL_GPL +0xebd226ae acpi_ec_add_query_handler vmlinux EXPORT_SYMBOL_GPL +0xf8524c30 bdev_read_only vmlinux EXPORT_SYMBOL +0xf0009fee put_pages_list vmlinux EXPORT_SYMBOL +0x46e89e77 from_kprojid vmlinux EXPORT_SYMBOL +0x64895338 rdma_roce_rescan_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x302efd40 target_configure_unmap_from_queue drivers/target/target_core_mod EXPORT_SYMBOL +0x9e41f494 mlxsw_afk_encode drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x21ee81c5 skb_copy_datagram_iter vmlinux EXPORT_SYMBOL +0x9a097df1 sock_wfree vmlinux EXPORT_SYMBOL +0x79240c9d i2c_new_ancillary_device vmlinux EXPORT_SYMBOL_GPL +0xa23f684b __tracepoint_io_page_fault vmlinux EXPORT_SYMBOL_GPL +0xd4a2bf33 __posix_acl_create vmlinux EXPORT_SYMBOL +0x066c3641 __page_cache_alloc vmlinux EXPORT_SYMBOL +0x091eb9b4 round_jiffies vmlinux EXPORT_SYMBOL_GPL +0xe913ff31 nci_spi_send net/nfc/nci/nci_spi EXPORT_SYMBOL_GPL +0xbf1613cf p9_is_proto_dotl net/9p/9pnet EXPORT_SYMBOL +0xadfd3f84 rpcauth_list_flavors net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa19de6c1 nft_unregister_chain_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x1488ac31 dm_exception_store_create drivers/md/dm-snapshot EXPORT_SYMBOL +0x25d04d5a inet_proto_csum_replace16 vmlinux EXPORT_SYMBOL +0xe99ed15a usb_gadget_wakeup vmlinux EXPORT_SYMBOL_GPL +0x67369d46 phylink_of_phy_connect vmlinux EXPORT_SYMBOL_GPL +0x1c35bf34 __dynamic_ibdev_dbg vmlinux EXPORT_SYMBOL +0x66c39615 crypto_sha512_finup vmlinux EXPORT_SYMBOL +0x9b7cc26a dquot_commit vmlinux EXPORT_SYMBOL +0x7f8261e2 __block_write_full_page vmlinux EXPORT_SYMBOL +0xf953b793 truncate_inode_pages vmlinux EXPORT_SYMBOL +0xdb84558d uprobe_unregister vmlinux EXPORT_SYMBOL_GPL +0x3d98d3c9 ip6_tnl_xmit net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x03feea40 cpumask_next vmlinux EXPORT_SYMBOL +0xe336acb0 fib_nh_common_release vmlinux EXPORT_SYMBOL_GPL +0xf7f495c1 input_flush_device vmlinux EXPORT_SYMBOL +0x9032ef94 iscsi_conn_login_event vmlinux EXPORT_SYMBOL_GPL +0x2cd24050 regmap_bulk_write vmlinux EXPORT_SYMBOL_GPL +0x5c3b9bfa drm_gem_shmem_free_object vmlinux EXPORT_SYMBOL_GPL +0x85b38978 percpu_ref_reinit vmlinux EXPORT_SYMBOL_GPL +0xa5d93c5c sysfs_notify vmlinux EXPORT_SYMBOL_GPL +0xbd0f699d vsock_addr_bound net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x0bd578d5 osd_req_op_cls_request_data_pagelist net/ceph/libceph EXPORT_SYMBOL +0x726182cc ceph_monc_got_map net/ceph/libceph EXPORT_SYMBOL +0xf6e102ff p9_client_renameat net/9p/9pnet EXPORT_SYMBOL +0x270052be nf_confirm net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xdc80606f transport_copy_sense_to_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xd5f41819 neon_aes_ecb_encrypt arch/arm64/crypto/aes-neon-blk EXPORT_SYMBOL +0x9a1dfd65 strpbrk vmlinux EXPORT_SYMBOL +0x3906f515 inet_hash vmlinux EXPORT_SYMBOL_GPL +0xf099eb9d mm_account_pinned_pages vmlinux EXPORT_SYMBOL_GPL +0xe1c502a7 fs_dax_get_by_bdev vmlinux EXPORT_SYMBOL_GPL +0x95efa1db pkcs7_free_message vmlinux EXPORT_SYMBOL_GPL +0x54964375 seq_lseek vmlinux EXPORT_SYMBOL +0x27eb86be ip_vs_conn_out_get_proto net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0x9e6d79f8 snd_info_get_str sound/core/snd EXPORT_SYMBOL +0xaab0ef04 dm_bitset_cursor_skip drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x469b2479 cdrom_get_last_written drivers/cdrom/cdrom EXPORT_SYMBOL +0x10dda3ca simd_unregister_skciphers crypto/crypto_simd EXPORT_SYMBOL_GPL +0x6560f011 inet_csk_reqsk_queue_add vmlinux EXPORT_SYMBOL +0x5decc1ac devlink_reload_enable vmlinux EXPORT_SYMBOL_GPL +0xb0c99448 dma_resv_reserve_shared vmlinux EXPORT_SYMBOL +0xbd7735fe drm_crtc_vblank_count vmlinux EXPORT_SYMBOL +0x8bebe51a drm_crtc_init vmlinux EXPORT_SYMBOL +0x66afbda1 drm_dp_mst_connector_early_unregister vmlinux EXPORT_SYMBOL +0x85a49dc7 pci_vpd_find_info_keyword vmlinux EXPORT_SYMBOL_GPL +0x63a7c28c bitmap_find_free_region vmlinux EXPORT_SYMBOL +0xd722b69b blk_mq_queue_inflight vmlinux EXPORT_SYMBOL_GPL +0x11afbf99 kernfs_find_and_get_ns vmlinux EXPORT_SYMBOL_GPL +0x787c882b lzo1x_1_compress lib/lzo/lzo_compress EXPORT_SYMBOL_GPL +0x71fa908a cache_flush net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x420663fd bc_svc_process net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xebbf1f7e ip_set_match_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x1ebc320d ubi_leb_erase drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x7d5e1815 dm_rh_get_region_key drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x00af95a1 __tracepoint_bcache_btree_set_root drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xbc9a86e1 mlx5_set_port_wol drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x51b10865 crypto_finalize_aead_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0x4ea38276 vlan_vid_add vmlinux EXPORT_SYMBOL +0x3f7ae9d9 ipv6_proxy_select_ident vmlinux EXPORT_SYMBOL_GPL +0x0c11e5d6 xfrm_parse_spi vmlinux EXPORT_SYMBOL +0x4a1367ea udp_gro_receive vmlinux EXPORT_SYMBOL +0x682c75b5 napi_schedule_prep vmlinux EXPORT_SYMBOL +0x2e1b5229 of_graph_parse_endpoint vmlinux EXPORT_SYMBOL +0x1869f60a of_property_read_variable_u8_array vmlinux EXPORT_SYMBOL_GPL +0xd92f0791 leds_list_lock vmlinux EXPORT_SYMBOL_GPL +0xabcefca7 drm_dp_dual_mode_write vmlinux EXPORT_SYMBOL +0x424bad65 drm_dp_find_vcpi_slots vmlinux EXPORT_SYMBOL +0x043e9f02 regulator_set_mode vmlinux EXPORT_SYMBOL_GPL +0x46328789 set_disk_ro vmlinux EXPORT_SYMBOL +0x46abaa1e dquot_enable vmlinux EXPORT_SYMBOL +0x88ab6fe3 kgdb_active vmlinux EXPORT_SYMBOL_GPL +0x48877aa5 svc_alien_sock net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x230b494e share_ns net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0x5b5c39c7 nf_conncount_init net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0xb60e5e5f capi_cmsg_header drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xe64e0a0f xfrm6_rcv_tnl vmlinux EXPORT_SYMBOL +0x5a6cdb52 nf_ct_zone_dflt vmlinux EXPORT_SYMBOL_GPL +0xc7d094b5 dm_read_arg_group vmlinux EXPORT_SYMBOL +0x4a2a2057 bus_find_device vmlinux EXPORT_SYMBOL_GPL +0x3b54c397 drm_gem_handle_create vmlinux EXPORT_SYMBOL +0xeedb1c22 sg_alloc_table_from_pages vmlinux EXPORT_SYMBOL +0x6dfaa048 blk_rq_unprep_clone vmlinux EXPORT_SYMBOL_GPL +0xcaa6e2be bpf_offload_dev_netdev_unregister vmlinux EXPORT_SYMBOL_GPL +0xdddf0a51 trace_raw_output_prep vmlinux EXPORT_SYMBOL +0xde5d21ee relay_switch_subbuf vmlinux EXPORT_SYMBOL_GPL +0x463b898f rxrpc_get_server_data_key net/rxrpc/rxrpc EXPORT_SYMBOL +0x97990f9e rpc_create net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5c699441 xfrm_aalg_get_byid net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x57693d10 esp_output_head net/ipv4/esp4 EXPORT_SYMBOL_GPL +0x682595a4 register_ip_vs_app_inc net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xb06064df nvme_set_queue_count drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x47726e5e tcp_create_openreq_child vmlinux EXPORT_SYMBOL +0xa05d995c sk_common_release vmlinux EXPORT_SYMBOL +0x09345bcf led_trigger_register vmlinux EXPORT_SYMBOL_GPL +0xa3edd23d drm_mode_set_config_internal vmlinux EXPORT_SYMBOL +0xa1c4231f kvm_set_pfn_dirty vmlinux EXPORT_SYMBOL_GPL +0xc3a98c1d lc_try_lock lib/lru_cache EXPORT_SYMBOL +0x430bb528 snd_pcm_hw_rule_noresample sound/core/snd-pcm EXPORT_SYMBOL +0x16da9949 capi_ctr_resume_output drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xf65461f8 lwtunnel_valid_encap_type_attr vmlinux EXPORT_SYMBOL_GPL +0x462d5bec skb_prepare_seq_read vmlinux EXPORT_SYMBOL +0xa115cf0a skb_queue_head vmlinux EXPORT_SYMBOL +0xe4f407d9 sk_reset_timer vmlinux EXPORT_SYMBOL +0x5acfb47e drm_connector_list_iter_begin vmlinux EXPORT_SYMBOL +0x9e8938c7 drm_helper_mode_fill_fb_struct vmlinux EXPORT_SYMBOL +0xb599cb6f regulator_bulk_enable vmlinux EXPORT_SYMBOL_GPL +0xc389518e fuse_conn_put vmlinux EXPORT_SYMBOL_GPL +0xad765b7e devm_irq_setup_generic_chip vmlinux EXPORT_SYMBOL_GPL +0x1b578ae2 irq_get_irq_data vmlinux EXPORT_SYMBOL_GPL +0x09a34a2b crc_itu_t lib/crc-itu-t EXPORT_SYMBOL +0xb0f162e2 mpt_Soft_Hard_ResetHandler drivers/message/fusion/mptbase EXPORT_SYMBOL +0xe5f24abf inet_frag_pull_head vmlinux EXPORT_SYMBOL +0x63480467 tcp_seq_next vmlinux EXPORT_SYMBOL +0xedf60e06 netdev_has_upper_dev vmlinux EXPORT_SYMBOL +0xff73be6f ahci_start_fis_rx vmlinux EXPORT_SYMBOL_GPL +0x8c27f7bb drm_pci_free vmlinux EXPORT_SYMBOL +0x92e1f627 iov_iter_npages vmlinux EXPORT_SYMBOL +0x820266eb bio_alloc_bioset vmlinux EXPORT_SYMBOL +0xce3864eb ZSTD_compress_usingDict lib/zstd/zstd_compress EXPORT_SYMBOL +0x176031a7 devlink_fmsg_string_put vmlinux EXPORT_SYMBOL_GPL +0x3914a44b of_irq_parse_one vmlinux EXPORT_SYMBOL_GPL +0xde9a5d9b usb_driver_set_configuration vmlinux EXPORT_SYMBOL_GPL +0x0c72a365 mii_ethtool_sset vmlinux EXPORT_SYMBOL +0x7e9be3f4 mii_ethtool_gset vmlinux EXPORT_SYMBOL +0x04370e4b class_compat_create_link vmlinux EXPORT_SYMBOL_GPL +0x6a89746f ttm_bo_mem_compat vmlinux EXPORT_SYMBOL +0x2a8dbc25 mipi_dsi_set_maximum_return_packet_size vmlinux EXPORT_SYMBOL +0x54fc931a drm_gem_vram_mm_funcs vmlinux EXPORT_SYMBOL +0x23175e48 __devm_reset_control_get vmlinux EXPORT_SYMBOL_GPL +0x7b6f9536 acpi_register_wakeup_handler vmlinux EXPORT_SYMBOL_GPL +0xbc31c5f9 pstore_unregister vmlinux EXPORT_SYMBOL_GPL +0x3903cc7d dump_emit vmlinux EXPORT_SYMBOL +0xaf4e0ba2 vfs_cancel_lock vmlinux EXPORT_SYMBOL_GPL +0xbe788cf9 __kernel_write vmlinux EXPORT_SYMBOL +0x9d871fac pm_qos_remove_request vmlinux EXPORT_SYMBOL_GPL +0xa22e94f1 kthread_cancel_work_sync vmlinux EXPORT_SYMBOL_GPL +0x5d4efc1f svc_fill_write_vector net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x56eec831 ib_get_cached_lmc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf89d5d85 transport_kunmap_data_sg drivers/target/target_core_mod EXPORT_SYMBOL +0x61e58b53 ipvlan_link_setup drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0x81231a51 netdev_pick_tx vmlinux EXPORT_SYMBOL +0x70933830 release_sock vmlinux EXPORT_SYMBOL +0x50e7193a __i2c_first_dynamic_bus_num vmlinux EXPORT_SYMBOL_GPL +0xc0968d3b phy_validate_pause vmlinux EXPORT_SYMBOL +0x17f0e983 pm_clk_suspend vmlinux EXPORT_SYMBOL_GPL +0x19f35758 drm_i2c_encoder_init vmlinux EXPORT_SYMBOL +0x8a092a34 textsearch_destroy vmlinux EXPORT_SYMBOL +0x8b058dd2 alloc_pages_current vmlinux EXPORT_SYMBOL +0xbbe27b6e nft_fib6_eval net/ipv6/netfilter/nft_fib_ipv6 EXPORT_SYMBOL_GPL +0xf6d6ef6e nft_fib4_eval net/ipv4/netfilter/nft_fib_ipv4 EXPORT_SYMBOL_GPL +0x345a6868 __snd_rawmidi_transmit_peek sound/core/snd-rawmidi EXPORT_SYMBOL +0xfe949f6c snd_pcm_hw_constraint_msbits sound/core/snd-pcm EXPORT_SYMBOL +0xabf51ef5 parport_register_dev_model drivers/parport/parport EXPORT_SYMBOL +0x35df519d __netlink_ns_capable vmlinux EXPORT_SYMBOL +0x5afdfb16 __sk_receive_skb vmlinux EXPORT_SYMBOL +0xa0277f7a mdiobus_setup_mdiodev_from_board_info vmlinux EXPORT_SYMBOL +0x38e5bc5a clk_set_rate_range vmlinux EXPORT_SYMBOL_GPL +0x2c82c36a security_secmark_relabel_packet vmlinux EXPORT_SYMBOL +0xccf5903d sync_inode vmlinux EXPORT_SYMBOL +0xb1e12d81 krealloc vmlinux EXPORT_SYMBOL +0x165ad395 trace_print_symbols_seq vmlinux EXPORT_SYMBOL +0xc3cd034d crc8_populate_lsb lib/crc8 EXPORT_SYMBOL +0xaa8106bc crc8_populate_msb lib/crc8 EXPORT_SYMBOL +0x8d490167 base_inv_false_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0xa295f969 osd_req_op_extent_dup_last net/ceph/libceph EXPORT_SYMBOL +0x73076315 snd_pci_quirk_lookup_id sound/core/snd EXPORT_SYMBOL +0x03671405 iscsit_process_scsi_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xcdee6ce3 input_register_device vmlinux EXPORT_SYMBOL +0xff5d901b drm_print_regset32 vmlinux EXPORT_SYMBOL +0x6df9d673 drm_fb_helper_fini vmlinux EXPORT_SYMBOL +0x31266931 con_debug_leave vmlinux EXPORT_SYMBOL_GPL +0x726b19c4 crypto_inst_setname vmlinux EXPORT_SYMBOL_GPL +0xaf81032b __invalidate_device vmlinux EXPORT_SYMBOL +0x51921fd3 seq_put_decimal_ull vmlinux EXPORT_SYMBOL +0x3916ccd0 mark_page_dirty vmlinux EXPORT_SYMBOL_GPL +0x5133dfe3 nf_ct_remove_expect net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf1eeca40 __tracepoint_mlx5_fs_del_fg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x90018bd2 __tracepoint_mlx5_fs_del_ft drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6919deac mlx5_set_port_tc_group drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x222a283c __fscache_write_page fs/fscache/fscache EXPORT_SYMBOL +0x9114a9ac led_classdev_unregister vmlinux EXPORT_SYMBOL_GPL +0x39ca0fef spi_split_transfers_maxsize vmlinux EXPORT_SYMBOL_GPL +0x1d4c335f ata_sff_qc_issue vmlinux EXPORT_SYMBOL_GPL +0x02bc2890 devm_regmap_field_alloc vmlinux EXPORT_SYMBOL_GPL +0x31195776 drm_send_event_locked vmlinux EXPORT_SYMBOL +0x1163f0a7 blk_max_low_pfn vmlinux EXPORT_SYMBOL +0xf97d7de2 register_sysctl_table vmlinux EXPORT_SYMBOL +0x67b78eb3 seq_hlist_next_rcu vmlinux EXPORT_SYMBOL +0x8e62b2c4 bdi_register_va vmlinux EXPORT_SYMBOL +0x7d1bb1d4 tnum_strn vmlinux EXPORT_SYMBOL_GPL +0x42014ea6 xdr_stream_decode_string net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xffadba96 rpc_clnt_xprt_switch_has_addr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe8cd16a6 nf_ct_get_id net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x63e0fee5 mdio45_links_ok drivers/net/mdio EXPORT_SYMBOL +0x47c9cce6 hinic_physical_port_id drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x35bbb9d7 hinic_ppf_tmr_stop drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xaef302c2 nvme_cancel_admin_tagset drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x05fd4105 nfs_sops fs/nfs/nfs EXPORT_SYMBOL_GPL +0x16ce46fd mr_mfc_find_any vmlinux EXPORT_SYMBOL +0x23a4ba54 ipv4_redirect vmlinux EXPORT_SYMBOL_GPL +0x6fdc8fea flow_block_cb_priv vmlinux EXPORT_SYMBOL +0xb49fa9bc flow_block_cb_free vmlinux EXPORT_SYMBOL +0xf79ca0f8 netdev_adjacent_change_prepare vmlinux EXPORT_SYMBOL +0x9130b62b __netif_set_xps_queue vmlinux EXPORT_SYMBOL_GPL +0xf9be270b drm_syncobj_get_fd vmlinux EXPORT_SYMBOL +0x79d766f2 drm_connector_attach_edid_property vmlinux EXPORT_SYMBOL +0xf54f1190 tty_flip_buffer_push vmlinux EXPORT_SYMBOL +0xaf6bc3d0 posix_acl_init vmlinux EXPORT_SYMBOL +0x22d148b8 ceph_osdc_wait_request net/ceph/libceph EXPORT_SYMBOL +0xbfee43fb atm_dev_register net/atm/atm EXPORT_SYMBOL +0xf2b1bd0b qlt_lport_register drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0xe613a798 inet_addr_is_any vmlinux EXPORT_SYMBOL +0x6f0f0675 hidinput_calc_abs_res vmlinux EXPORT_SYMBOL_GPL +0xc66b77b1 iommu_group_set_iommudata vmlinux EXPORT_SYMBOL_GPL +0xd55ad93b iommu_group_get_iommudata vmlinux EXPORT_SYMBOL_GPL +0x0e9fc7ce uart_get_divisor vmlinux EXPORT_SYMBOL +0xfdcb4ed3 acpi_os_get_line vmlinux EXPORT_SYMBOL +0xab74cbd9 pinctrl_utils_add_map_mux vmlinux EXPORT_SYMBOL_GPL +0xc8ef18cd io_cgrp_subsys vmlinux EXPORT_SYMBOL_GPL +0xffd0f302 bio_copy_data vmlinux EXPORT_SYMBOL +0x7d8188cf fscrypt_inherit_context vmlinux EXPORT_SYMBOL +0x03952887 ktime_add_safe vmlinux EXPORT_SYMBOL_GPL +0xf9a482f9 msleep vmlinux EXPORT_SYMBOL +0x18356efe ceph_zero_page_vector_range net/ceph/libceph EXPORT_SYMBOL +0x04aba826 sunrpc_cache_lookup_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3493e6f6 validate_xmit_xfrm vmlinux EXPORT_SYMBOL_GPL +0xf42cfc48 cookie_timestamp_decode vmlinux EXPORT_SYMBOL +0x726af5c0 unregister_netdev vmlinux EXPORT_SYMBOL +0x983276da phylink_disconnect_phy vmlinux EXPORT_SYMBOL_GPL +0x88368e9c sata_async_notification vmlinux EXPORT_SYMBOL_GPL +0xfcf8491c __bforget vmlinux EXPORT_SYMBOL +0x1a3052dd generic_file_write_iter vmlinux EXPORT_SYMBOL +0xcab3b87e __audit_inode_child vmlinux EXPORT_SYMBOL_GPL +0x30e02e5c param_set_ullong vmlinux EXPORT_SYMBOL +0xadcba50b ZSTD_findFrameCompressedSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0x41ce9823 p9stat_read net/9p/9pnet EXPORT_SYMBOL +0x19bb5df6 xdr_reserve_space net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1bf5b385 lapb_disconnect_request net/lapb/lapb EXPORT_SYMBOL +0xf9177557 set_h225_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0xec1fe316 md_unregister_thread drivers/md/md-mod EXPORT_SYMBOL +0xe0ff7a18 unregister_pppox_proto drivers/net/ppp/pppox EXPORT_SYMBOL +0x5663d135 inet6_add_protocol vmlinux EXPORT_SYMBOL +0xf5c26ced free_fib_info vmlinux EXPORT_SYMBOL_GPL +0x1ddae1ea raw_unhash_sk vmlinux EXPORT_SYMBOL_GPL +0xaf5a2fe9 xt_register_table vmlinux EXPORT_SYMBOL_GPL +0xdca05882 sock_no_listen vmlinux EXPORT_SYMBOL +0x36f50142 of_phy_deregister_fixed_link vmlinux EXPORT_SYMBOL +0x0b6e332f rc_map_unregister vmlinux EXPORT_SYMBOL_GPL +0xa809c867 input_mt_destroy_slots vmlinux EXPORT_SYMBOL +0x576fb83c usb_serial_generic_unthrottle vmlinux EXPORT_SYMBOL_GPL +0x86b13d2a usb_unpoison_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0x6fbd2bf6 phy_support_sym_pause vmlinux EXPORT_SYMBOL +0xeca7184b sas_alloc_task vmlinux EXPORT_SYMBOL_GPL +0x625dcbf9 scsi_device_lookup_by_target vmlinux EXPORT_SYMBOL +0x195e5237 drm_panel_add vmlinux EXPORT_SYMBOL +0x7e08d3c8 __tracepoint_remove_device_from_group vmlinux EXPORT_SYMBOL_GPL +0x79aa04a2 get_random_bytes vmlinux EXPORT_SYMBOL +0xcd9cd2ff wakeme_after_rcu vmlinux EXPORT_SYMBOL_GPL +0xab600421 probe_irq_off vmlinux EXPORT_SYMBOL +0xe83c7327 nfc_hci_connect_gate net/nfc/hci/hci EXPORT_SYMBOL +0x77750d97 rpcauth_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x92be170e snd_ctl_activate_id sound/core/snd EXPORT_SYMBOL_GPL +0xcf8c2590 dm_cache_policy_get_hint_size drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xbee8b868 hinic_slq_get_addr drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x53b77a80 hinic_vf_in_pf drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x23785156 fc_eh_host_reset drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xd52e53a0 nfs_setattr_update_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x7a55c609 nf_route vmlinux EXPORT_SYMBOL_GPL +0xae35ddc4 tcf_exts_destroy vmlinux EXPORT_SYMBOL +0xad6a4be5 dev_get_iflink vmlinux EXPORT_SYMBOL +0x0f077cdd input_enable_softrepeat vmlinux EXPORT_SYMBOL +0xde26d771 usb_serial_generic_submit_read_urbs vmlinux EXPORT_SYMBOL_GPL +0x225bfdcf regcache_sync_region vmlinux EXPORT_SYMBOL_GPL +0xa356c454 pm_runtime_allow vmlinux EXPORT_SYMBOL_GPL +0x7caf306c property_entries_dup vmlinux EXPORT_SYMBOL_GPL +0x0c872a24 do_unbind_con_driver vmlinux EXPORT_SYMBOL_GPL +0x64d8b36b hisi_clk_alloc vmlinux EXPORT_SYMBOL_GPL +0xd09911a6 acpi_dev_get_irq_type vmlinux EXPORT_SYMBOL_GPL +0xdaf4dfb3 fb_mode_option vmlinux EXPORT_SYMBOL_GPL +0xf1361941 pci_vpd_find_tag vmlinux EXPORT_SYMBOL_GPL +0xa8a95e24 iomap_truncate_page vmlinux EXPORT_SYMBOL_GPL +0xa63a71ea seq_vprintf vmlinux EXPORT_SYMBOL +0x94e15e66 ceph_create_client net/ceph/libceph EXPORT_SYMBOL +0x85e4e520 rds_stats_info_copy net/rds/rds EXPORT_SYMBOL_GPL +0xd239c2e1 xprt_request_get_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6e528fff nf_br_ops net/bridge/bridge EXPORT_SYMBOL_GPL +0xbecf5d14 nfnl_acct_put net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0xa6e79322 dm_exception_store_type_unregister drivers/md/dm-snapshot EXPORT_SYMBOL +0x3c2b68f7 of_changeset_apply vmlinux EXPORT_SYMBOL_GPL +0xc4b36b3d usb_serial_generic_close vmlinux EXPORT_SYMBOL_GPL +0x2406197a regulator_get vmlinux EXPORT_SYMBOL_GPL +0x47c95405 pci_write_config_word vmlinux EXPORT_SYMBOL +0x7a898aac blk_rq_prep_clone vmlinux EXPORT_SYMBOL_GPL +0xe74edde3 crypto_alloc_shash vmlinux EXPORT_SYMBOL_GPL +0xf14493f2 crypto_alloc_ahash vmlinux EXPORT_SYMBOL_GPL +0xb74e70a8 crypto_alg_extsize vmlinux EXPORT_SYMBOL_GPL +0xe5d780f1 irq_chip_enable_parent vmlinux EXPORT_SYMBOL_GPL +0xcf9123e8 osd_req_op_init net/ceph/libceph EXPORT_SYMBOL +0xd3074cdc unregister_ip_vs_scheduler net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x834dc955 snd_pcm_format_size sound/core/snd-pcm EXPORT_SYMBOL +0x83ce0c0f dm_cache_policy_register drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xaf507de1 __arch_copy_from_user vmlinux EXPORT_SYMBOL +0x4b2c325f ipv6_mc_check_mld vmlinux EXPORT_SYMBOL +0xe6dc163a xfrm_state_afinfo_get_rcu vmlinux EXPORT_SYMBOL_GPL +0x37c2bd0c tcf_action_check_ctrlact vmlinux EXPORT_SYMBOL +0xf7a0d045 vlan_ioctl_set vmlinux EXPORT_SYMBOL +0x92c84638 of_alias_get_id vmlinux EXPORT_SYMBOL_GPL +0xa5bda8a1 efi_capsule_supported vmlinux EXPORT_SYMBOL_GPL +0xf86dcbe4 bgpio_init vmlinux EXPORT_SYMBOL_GPL +0x9bd86421 __page_mapcount vmlinux EXPORT_SYMBOL_GPL +0x6a037cf1 mempool_kfree vmlinux EXPORT_SYMBOL +0x75d0deb9 nsecs_to_jiffies64 vmlinux EXPORT_SYMBOL +0x40d84a37 ZSTD_getFrameParams lib/zstd/zstd_decompress EXPORT_SYMBOL +0x72ad9f29 dm_cache_policy_unregister drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xc28e8799 __fscache_invalidate fs/fscache/fscache EXPORT_SYMBOL +0x1bcf97ac of_graph_get_remote_port_parent vmlinux EXPORT_SYMBOL +0x7e84e76c usb_wakeup_enabled_descendants vmlinux EXPORT_SYMBOL_GPL +0x85173433 scsi_scan_target vmlinux EXPORT_SYMBOL +0x8be25a09 subsys_dev_iter_exit vmlinux EXPORT_SYMBOL_GPL +0xf8e50b45 subsys_dev_iter_init vmlinux EXPORT_SYMBOL_GPL +0x0b44d6b8 mipi_dsi_host_unregister vmlinux EXPORT_SYMBOL +0x090d8a94 drm_property_create_object vmlinux EXPORT_SYMBOL +0x06c49551 drm_dp_dsc_sink_line_buf_depth vmlinux EXPORT_SYMBOL +0xdab25c47 virtqueue_is_broken vmlinux EXPORT_SYMBOL_GPL +0xcba4abe3 list_sort vmlinux EXPORT_SYMBOL +0xd0963bbf register_quota_format vmlinux EXPORT_SYMBOL +0x20397714 remove_arg_zero vmlinux EXPORT_SYMBOL +0x5b56860c vm_munmap vmlinux EXPORT_SYMBOL +0x08cba34c remap_pfn_range vmlinux EXPORT_SYMBOL +0x344717e7 nci_hci_send_cmd net/nfc/nci/nci EXPORT_SYMBOL +0x2e2c0c48 dccp_v4_conn_request net/dccp/dccp_ipv4 EXPORT_SYMBOL_GPL +0xcda143c2 ib_alloc_mr_integrity drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x890723f7 dm_cache_policy_get_version drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xb22e16d5 radix_tree_maybe_preload vmlinux EXPORT_SYMBOL +0x1a36a09a phy_register_fixup vmlinux EXPORT_SYMBOL +0x49d72d36 device_bind_driver vmlinux EXPORT_SYMBOL_GPL +0xafa1d234 device_store_ulong vmlinux EXPORT_SYMBOL_GPL +0xabad2ade __dax_zero_page_range vmlinux EXPORT_SYMBOL_GPL +0x5a553003 file_update_time vmlinux EXPORT_SYMBOL +0xd51f843a osd_req_op_extent_osd_data net/ceph/libceph EXPORT_SYMBOL +0xf49bc67a atm_pcr_goal net/atm/atm EXPORT_SYMBOL +0x641f10f4 closure_wait drivers/md/bcache/bcache EXPORT_SYMBOL +0x23eddc68 mlxsw_core_cpu_port_init drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x464f336f kobject_uevent_env vmlinux EXPORT_SYMBOL_GPL +0xb8cd3a7f nf_logger_put vmlinux EXPORT_SYMBOL_GPL +0xb647fabe dev_mc_init vmlinux EXPORT_SYMBOL +0xab9be3b8 dev_uc_init vmlinux EXPORT_SYMBOL +0x865029ac __hw_addr_sync vmlinux EXPORT_SYMBOL +0xe1649faa drm_gem_dmabuf_vmap vmlinux EXPORT_SYMBOL +0x337cb0a3 devm_gpiod_get_optional vmlinux EXPORT_SYMBOL_GPL +0x04ea56f9 _kstrtol vmlinux EXPORT_SYMBOL +0xd2f1fadd pagevec_lookup_range_nr_tag vmlinux EXPORT_SYMBOL +0x168df522 perf_trace_run_bpf_submit vmlinux EXPORT_SYMBOL_GPL +0x18f94265 relay_file_operations vmlinux EXPORT_SYMBOL_GPL +0xc6905fdf init_timer_key vmlinux EXPORT_SYMBOL +0x94e481cf ZSTD_adjustCParams lib/zstd/zstd_compress EXPORT_SYMBOL +0x93aeac62 rpc_wake_up_status net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe8ec1d9c sound_class sound/soundcore EXPORT_SYMBOL +0x8e34f5bb tap_destroy_cdev drivers/net/tap EXPORT_SYMBOL_GPL +0xbeece01b mlx5_vector2eqn drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xe5b2cb1e mlx4_xrcd_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe5cf06b5 fc_lport_notifier_head drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x8abc1e10 dev_set_mac_address_user vmlinux EXPORT_SYMBOL +0xc48bc5e6 put_cmsg vmlinux EXPORT_SYMBOL +0xdadd6125 skb_free_datagram vmlinux EXPORT_SYMBOL +0x1e146d98 skb_vlan_push vmlinux EXPORT_SYMBOL +0xcfd9d11a of_fdt_unflatten_tree vmlinux EXPORT_SYMBOL_GPL +0x0c6864e5 usb_set_configuration vmlinux EXPORT_SYMBOL_GPL +0xd9436cf5 ttm_eu_fence_buffer_objects vmlinux EXPORT_SYMBOL +0xf9f346a4 drm_gem_cma_mmap vmlinux EXPORT_SYMBOL_GPL +0x1af14102 pci_wake_from_d3 vmlinux EXPORT_SYMBOL +0x7c32d6a7 gpiod_set_consumer_name vmlinux EXPORT_SYMBOL_GPL +0x416c2f50 __tracepoint_wbc_writepage vmlinux EXPORT_SYMBOL_GPL +0xc9c4745f vfs_clone_file_range vmlinux EXPORT_SYMBOL +0x1a5b66f8 filp_close vmlinux EXPORT_SYMBOL +0xd428c998 truncate_setsize vmlinux EXPORT_SYMBOL +0x262e823a wait_for_completion_interruptible vmlinux EXPORT_SYMBOL +0x19184f5c svcauth_gss_register_pseudoflavor net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x32b581cf svcauth_gss_flavor net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x863e8f63 rdma_rw_ctx_signature_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xee926d8f __tracepoint_bcache_writeback_collision drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xab03bbda __phy_modify_mmd vmlinux EXPORT_SYMBOL_GPL +0xe868df08 drm_object_property_get_value vmlinux EXPORT_SYMBOL +0x67b27ec1 tty_std_termios vmlinux EXPORT_SYMBOL +0xd8d24416 hisi_clk_register_mux vmlinux EXPORT_SYMBOL_GPL +0x76d33847 pci_create_root_bus vmlinux EXPORT_SYMBOL_GPL +0x1c0f6b88 d_obtain_root vmlinux EXPORT_SYMBOL +0xe2e0c7c6 __flush_icache_range vmlinux EXPORT_SYMBOL +0x1341423e vsock_find_connected_socket net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x1000f0b6 lowpan_header_decompress net/6lowpan/6lowpan EXPORT_SYMBOL_GPL +0xe834d403 rpc_task_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6be7a92b capi20_get_manufacturer drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x7db84a25 mdio45_ethtool_ksettings_get_npage drivers/net/mdio EXPORT_SYMBOL +0xf1b45212 mr_table_dump vmlinux EXPORT_SYMBOL +0x9aee6154 ether_setup vmlinux EXPORT_SYMBOL +0x0433daad page_pool_unmap_page vmlinux EXPORT_SYMBOL +0xfbce8339 usb_deregister_device_driver vmlinux EXPORT_SYMBOL_GPL +0x783dc2ec device_create_file vmlinux EXPORT_SYMBOL_GPL +0x73348c4f drm_client_buffer_vunmap vmlinux EXPORT_SYMBOL +0x4ba8f946 __break_lease vmlinux EXPORT_SYMBOL +0x912b54ea fc_mount vmlinux EXPORT_SYMBOL +0xf20a6ad7 __get_vm_area vmlinux EXPORT_SYMBOL_GPL +0xdb02fa60 list_lru_add vmlinux EXPORT_SYMBOL_GPL +0x2e32888b param_ops_bint vmlinux EXPORT_SYMBOL +0x1e2af33a param_set_bool vmlinux EXPORT_SYMBOL +0x3c43daa2 param_ops_byte vmlinux EXPORT_SYMBOL +0x6aaad46e xprt_free net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x58d8bbae ibdev_printk drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2a74e9d1 async_tx_quiesce crypto/async_tx/async_tx EXPORT_SYMBOL_GPL +0x3393d479 nfs_alloc_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe0cb8fc7 xfrm_lookup_with_ifid vmlinux EXPORT_SYMBOL +0xccfb9e07 dst_default_metrics vmlinux EXPORT_SYMBOL +0x8298f8cd sock_dequeue_err_skb vmlinux EXPORT_SYMBOL +0xb0e218ee fwnode_property_read_u64_array vmlinux EXPORT_SYMBOL_GPL +0x8285093d fwnode_property_read_u32_array vmlinux EXPORT_SYMBOL_GPL +0x3725aa50 fwnode_property_read_u16_array vmlinux EXPORT_SYMBOL_GPL +0x87a93024 drm_lspcon_get_mode vmlinux EXPORT_SYMBOL +0xeea659d5 put_iova_domain vmlinux EXPORT_SYMBOL_GPL +0xf8269c69 would_dump vmlinux EXPORT_SYMBOL +0x1a45b951 truncate_inode_pages_range vmlinux EXPORT_SYMBOL +0x6ea9363b force_sig vmlinux EXPORT_SYMBOL +0x16756dc0 snd_usbmidi_input_start sound/usb/snd-usbmidi-lib EXPORT_SYMBOL +0x926b8518 parport_ieee1284_read_nibble drivers/parport/parport EXPORT_SYMBOL +0x2f40da68 dm_bm_set_read_write drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x72f07bf4 dm_bufio_set_minimum_buffers drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xf3a26f23 nfs4_pnfs_ds_put fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x4c70a139 nfs4_fs_type fs/nfs/nfs EXPORT_SYMBOL_GPL +0x64d3cc4e xas_load vmlinux EXPORT_SYMBOL_GPL +0xa36ea15e usb_ep_free_request vmlinux EXPORT_SYMBOL_GPL +0x7d340917 fwnode_get_next_parent vmlinux EXPORT_SYMBOL_GPL +0x05240ee7 percpu_counter_batch vmlinux EXPORT_SYMBOL +0x2bb4fa71 d_path vmlinux EXPORT_SYMBOL +0x68edd94e kvm_get_kvm vmlinux EXPORT_SYMBOL_GPL +0xeef6cfa3 ceph_iterate_extents net/ceph/libceph EXPORT_SYMBOL +0x5cbb4f31 mpls_pkt_too_big net/mpls/mpls_router EXPORT_SYMBOL_GPL +0x58d2ae3f iscsit_setup_nop_out drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xd511df1f devlink_params_register vmlinux EXPORT_SYMBOL_GPL +0xe19a9b6d skb_page_frag_refill vmlinux EXPORT_SYMBOL +0xe4118762 sdev_prefix_printk vmlinux EXPORT_SYMBOL +0xfa67d465 vring_del_virtqueue vmlinux EXPORT_SYMBOL_GPL +0xd09fda64 acpi_notifier_call_chain vmlinux EXPORT_SYMBOL +0x09b53e14 interval_tree_remove vmlinux EXPORT_SYMBOL_GPL +0x3062976f p9_show_client_options net/9p/9pnet EXPORT_SYMBOL +0x17ab934b mlx5_query_port_tc_bw_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xcf80f41f hinic_l2nic_reset_base drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1603cc7e devlink_dpipe_action_put vmlinux EXPORT_SYMBOL_GPL +0xb4a8d4a7 genphy_read_abilities vmlinux EXPORT_SYMBOL +0x502a50ae sdev_evt_send vmlinux EXPORT_SYMBOL_GPL +0xa25e8e90 csum_and_copy_to_iter vmlinux EXPORT_SYMBOL +0xceec8abd __mb_cache_entry_free vmlinux EXPORT_SYMBOL +0xd8432aac igrab vmlinux EXPORT_SYMBOL +0xf03fe862 ceph_pagelist_set_cursor net/ceph/libceph EXPORT_SYMBOL +0x8986a1a0 snd_pcm_open_substream sound/core/snd-pcm EXPORT_SYMBOL +0x18b1f4bf mlx5_core_query_vport_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7ee0b105 mlx5_query_nic_vport_min_inline drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc71e64a9 snmp_get_cpu_field vmlinux EXPORT_SYMBOL_GPL +0x663bb448 devlink_fmsg_obj_nest_end vmlinux EXPORT_SYMBOL_GPL +0x254c097a rtnl_notify vmlinux EXPORT_SYMBOL +0x030cff8a __rtnl_link_register vmlinux EXPORT_SYMBOL_GPL +0x5e89ec1f zerocopy_sg_from_iter vmlinux EXPORT_SYMBOL +0xe6b10832 sock_recvmsg vmlinux EXPORT_SYMBOL +0x81a01e08 pcc_mbox_free_channel vmlinux EXPORT_SYMBOL_GPL +0x52babded thermal_zone_get_offset vmlinux EXPORT_SYMBOL_GPL +0x42041512 i2c_get_dma_safe_msg_buf vmlinux EXPORT_SYMBOL_GPL +0xc79bc32c usb_hcd_unmap_urb_for_dma vmlinux EXPORT_SYMBOL_GPL +0x8a63dd71 phy_select_page vmlinux EXPORT_SYMBOL_GPL +0x9de6909d pci_back_from_sleep vmlinux EXPORT_SYMBOL +0x243f0b4b crypto_check_attr_type vmlinux EXPORT_SYMBOL_GPL +0x11b48dac snd_device_free sound/core/snd EXPORT_SYMBOL +0x96808093 ps2_sliced_command drivers/input/serio/libps2 EXPORT_SYMBOL +0x5b38177e linkwatch_fire_event vmlinux EXPORT_SYMBOL +0x8b13a8b8 hid_snto32 vmlinux EXPORT_SYMBOL_GPL +0xcc328a5c reservation_ww_class vmlinux EXPORT_SYMBOL +0xce6d3fe7 dev_pm_set_dedicated_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x647b7f42 drm_of_component_probe vmlinux EXPORT_SYMBOL +0xb0662838 drm_connector_set_link_status_property vmlinux EXPORT_SYMBOL +0x50ca9985 rhashtable_walk_peek vmlinux EXPORT_SYMBOL_GPL +0xa3f6f171 _copy_from_iter vmlinux EXPORT_SYMBOL +0x43f1a054 fs_bio_set vmlinux EXPORT_SYMBOL +0x4be32bb8 __get_task_comm vmlinux EXPORT_SYMBOL_GPL +0x27479d14 param_free_charp vmlinux EXPORT_SYMBOL +0xb0e60b11 sctp_transport_lookup_process net/sctp/sctp EXPORT_SYMBOL_GPL +0xcd7ac7cd nf_tables_deactivate_flowtable net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x950d35e4 iscsit_check_dataout_payload drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x2da6a51e dm_cell_release_no_holder drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x83794326 alloc_nfs_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9d1a5e3a __memcpy vmlinux EXPORT_SYMBOL +0x4fe1eddf unregister_netevent_notifier vmlinux EXPORT_SYMBOL_GPL +0x2951575a ethtool_rx_flow_rule_create vmlinux EXPORT_SYMBOL +0x84f3eb8f input_mt_report_slot_state vmlinux EXPORT_SYMBOL +0x881d4cf1 scsi_init_io vmlinux EXPORT_SYMBOL +0x59f9cdbb platform_get_irq_optional vmlinux EXPORT_SYMBOL_GPL +0xdad8ec37 clk_mux_determine_rate_flags vmlinux EXPORT_SYMBOL_GPL +0xa720b873 rpc_queue_upcall net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x49df1139 nf_ct_seq_adjust net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x10e87a23 mddev_congested drivers/md/md-mod EXPORT_SYMBOL_GPL +0x383d72f8 mlx4_unicast_detach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x539dce66 mlx4_unicast_attach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x295b982a hisi_clk_register_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0xb407c1df percpu_ref_switch_to_atomic vmlinux EXPORT_SYMBOL_GPL +0x409df5ec __blkg_prfill_u64 vmlinux EXPORT_SYMBOL_GPL +0x10a4ea53 vmf_insert_pfn vmlinux EXPORT_SYMBOL +0x165b1948 ceph_pagelist_free_reserve net/ceph/libceph EXPORT_SYMBOL +0xfdb40ca8 snd_pcm_period_elapsed sound/core/snd-pcm EXPORT_SYMBOL +0x024c2d4d fcoe_ctlr_link_up drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x42bed8d4 unix_gc_lock vmlinux EXPORT_SYMBOL +0x41350168 nf_queue_nf_hook_drop vmlinux EXPORT_SYMBOL_GPL +0x004415f5 nr_hook_count vmlinux EXPORT_SYMBOL +0x58f8bb7c spi_controller_suspend vmlinux EXPORT_SYMBOL_GPL +0xdc6b85e3 drm_av_sync_delay vmlinux EXPORT_SYMBOL +0x5f544771 of_dma_controller_free vmlinux EXPORT_SYMBOL_GPL +0x49568958 debugfs_create_x8 vmlinux EXPORT_SYMBOL_GPL +0x0c524129 cgroup_get_from_path vmlinux EXPORT_SYMBOL_GPL +0x4c788bc9 irq_domain_xlate_twocell vmlinux EXPORT_SYMBOL_GPL +0x8e517503 irq_domain_xlate_onecell vmlinux EXPORT_SYMBOL_GPL +0xca9fc082 synproxy_net_id net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x8f699913 capilib_release drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x904907ba capi20_get_serial drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x62c74501 dm_cell_quiesce_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x2de3329e mlx4_qp_query drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x191e6549 hinic_aeq_register_hw_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xe4683409 fib_info_nh_uses_dev vmlinux EXPORT_SYMBOL_GPL +0xf8864348 netdev_update_lockdep_key vmlinux EXPORT_SYMBOL +0x5b3b2fe3 power_supply_get_drvdata vmlinux EXPORT_SYMBOL_GPL +0xce2a8b4f phylink_fixed_state_cb vmlinux EXPORT_SYMBOL_GPL +0x21f34cf1 drm_panel_remove vmlinux EXPORT_SYMBOL +0xd2f46039 drm_object_attach_property vmlinux EXPORT_SYMBOL +0xf5f31d2c uart_write_wakeup vmlinux EXPORT_SYMBOL +0x4a441ace pci_bus_read_config_word vmlinux EXPORT_SYMBOL +0x465e24ff ucs2_utf8size vmlinux EXPORT_SYMBOL +0x7308e713 crypto_ahash_walk_first vmlinux EXPORT_SYMBOL_GPL +0xb26a1add elfcorehdr_addr vmlinux EXPORT_SYMBOL_GPL +0x29236fef rdma_dev_access_netns drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x24935482 __tracepoint_bcache_bypass_sequential drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xd9faf6b1 hinic_detach_roce drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x3a593c10 nfs_pgio_current_mirror fs/nfs/nfs EXPORT_SYMBOL_GPL +0xea0d7320 devlink_dpipe_table_unregister vmlinux EXPORT_SYMBOL_GPL +0x0de82f74 netdev_lower_get_next vmlinux EXPORT_SYMBOL +0x4de24f37 skb_append vmlinux EXPORT_SYMBOL +0x31c7970f pciserial_suspend_ports vmlinux EXPORT_SYMBOL_GPL +0x50cea9f8 read_cache_page_gfp vmlinux EXPORT_SYMBOL +0x1f1202b2 kvm_vcpu_on_spin vmlinux EXPORT_SYMBOL_GPL +0xae6f5103 setup_udp_tunnel_sock net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0xadd25548 iscsi_tcp_segment_unmap drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x18896f7f sk_stream_error vmlinux EXPORT_SYMBOL +0x4a252f31 usb_stor_CB_reset vmlinux EXPORT_SYMBOL_GPL +0x363877fd drm_fb_helper_debug_leave vmlinux EXPORT_SYMBOL +0x0328cc5d del_random_ready_callback vmlinux EXPORT_SYMBOL +0x49d94542 pci_ioremap_wc_bar vmlinux EXPORT_SYMBOL_GPL +0xf0129822 crypto_sha1_finup vmlinux EXPORT_SYMBOL +0x3c2707df put_user_pages vmlinux EXPORT_SYMBOL +0x71ab3f9a audit_log_start vmlinux EXPORT_SYMBOL +0xd41f563a nf_flow_table_cleanup net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0xdcb764ad memset vmlinux EXPORT_SYMBOL +0xf6cf68a5 netdev_crit vmlinux EXPORT_SYMBOL +0xe173c7c7 netdev_bonding_info_change vmlinux EXPORT_SYMBOL +0x0999cec2 devm_nvmem_unregister vmlinux EXPORT_SYMBOL +0x90573d51 of_console_check vmlinux EXPORT_SYMBOL_GPL +0xd3bfa753 usb_bus_idr_lock vmlinux EXPORT_SYMBOL_GPL +0xc708f1fe ec_write vmlinux EXPORT_SYMBOL +0xb26cedb4 sysfs_create_files vmlinux EXPORT_SYMBOL_GPL +0xf578eb83 dquot_get_state vmlinux EXPORT_SYMBOL +0xeeff0579 sget vmlinux EXPORT_SYMBOL +0xc9f313f2 rxrpc_kernel_charge_accept net/rxrpc/rxrpc EXPORT_SYMBOL +0xce2b6828 nf_ct_frag6_gather net/ipv6/netfilter/nf_defrag_ipv6 EXPORT_SYMBOL_GPL +0x0476f9e1 tcp_vegas_pkts_acked net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x39e01fcd ip_tunnel_setup net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xb77840b0 mtd_kmalloc_up_to drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x11e943a1 target_set_cmd_data_length drivers/target/target_core_mod EXPORT_SYMBOL +0x4199f50b md_find_rdev_nr_rcu drivers/md/md-mod EXPORT_SYMBOL_GPL +0x25c84fbe cdrom_media_changed drivers/cdrom/cdrom EXPORT_SYMBOL +0x252480ff mlx4_get_vf_stats drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xcf3ba5d5 iscsi_tcp_dgst_header drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x26882314 nfs4_sequence_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xcfdd3eff ahci_start_engine vmlinux EXPORT_SYMBOL_GPL +0xe7aef668 ata_platform_remove_one vmlinux EXPORT_SYMBOL_GPL +0x155cb492 component_master_add_with_match vmlinux EXPORT_SYMBOL_GPL +0x96b4c354 ttm_bo_unmap_virtual vmlinux EXPORT_SYMBOL +0x3130be71 tty_save_termios vmlinux EXPORT_SYMBOL_GPL +0x2020d8e2 pcie_get_mps vmlinux EXPORT_SYMBOL +0xdc3fcbc9 __sw_hweight8 vmlinux EXPORT_SYMBOL +0x3a13f54a sgl_alloc vmlinux EXPORT_SYMBOL +0xeb44339a free_pages_exact vmlinux EXPORT_SYMBOL +0xe4183f3c snd_card_rw_proc_new sound/core/snd EXPORT_SYMBOL_GPL +0xb2f65850 mlx4_cq_modify drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x77e4db1b nvme_disable_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x75de0f77 nfs4_mark_deviceid_unavailable fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x56e31445 __fscache_uncache_all_inode_pages fs/fscache/fscache EXPORT_SYMBOL +0xe9eda174 gpiochip_request_own_desc vmlinux EXPORT_SYMBOL_GPL +0x68d84686 pinctrl_pm_select_default_state vmlinux EXPORT_SYMBOL_GPL +0xfc684c1d debugfs_create_atomic_t vmlinux EXPORT_SYMBOL_GPL +0x064db9a5 mark_mounts_for_expiry vmlinux EXPORT_SYMBOL_GPL +0xf98d99ec bdi_put vmlinux EXPORT_SYMBOL +0xc8a82465 file_check_and_advance_wb_err vmlinux EXPORT_SYMBOL +0x0b2c64a3 raid6_vgfmul lib/raid6/raid6_pq EXPORT_SYMBOL +0xba490602 nci_to_errno net/nfc/nci/nci EXPORT_SYMBOL +0xf7e6ed7d nvme_cleanup_cmd drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xc9957204 __arch_copy_in_user vmlinux EXPORT_SYMBOL +0x6b2941b2 __arch_copy_to_user vmlinux EXPORT_SYMBOL +0x31f1d7aa inet_csk_reqsk_queue_drop_and_put vmlinux EXPORT_SYMBOL +0x04bca599 drm_crtc_vblank_count_and_time vmlinux EXPORT_SYMBOL +0xc47e5140 drm_send_event vmlinux EXPORT_SYMBOL +0x6d340f64 tty_termios_input_baud_rate vmlinux EXPORT_SYMBOL +0x0312b3b0 reset_controller_add_lookup vmlinux EXPORT_SYMBOL_GPL +0xc64a697f regulator_bulk_set_supply_names vmlinux EXPORT_SYMBOL_GPL +0xa8fb6321 vfs_get_fsid vmlinux EXPORT_SYMBOL +0x39981d5d d_set_d_op vmlinux EXPORT_SYMBOL +0x3771961d ns_capable_noaudit vmlinux EXPORT_SYMBOL +0xc6b1fdbe xfrm_aalg_get_byidx net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0xfe95ffc5 register_md_cluster_operations drivers/md/md-mod EXPORT_SYMBOL +0x2ad3dea1 ps2_drain drivers/input/serio/libps2 EXPORT_SYMBOL +0xf8e4e0d3 unregister_cdrom drivers/cdrom/cdrom EXPORT_SYMBOL +0x380e0d30 fc_lport_recv drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xbc28e227 spi_attach_transport drivers/scsi/scsi_transport_spi EXPORT_SYMBOL +0x3d5c2d1a unregister_tcf_proto_ops vmlinux EXPORT_SYMBOL +0x1f458f5b devlink_port_register vmlinux EXPORT_SYMBOL_GPL +0x91529bc0 __dev_get_by_flags vmlinux EXPORT_SYMBOL +0x436a03b4 devm_regulator_get_exclusive vmlinux EXPORT_SYMBOL_GPL +0xf991baff pci_cfg_access_unlock vmlinux EXPORT_SYMBOL_GPL +0x922f45a6 __bitmap_clear vmlinux EXPORT_SYMBOL +0xb6936ffe _bcd2bin vmlinux EXPORT_SYMBOL +0xc03deaac finalize_exec vmlinux EXPORT_SYMBOL +0x69824a14 ibcm_reject_msg drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xd859ac8c o2net_fill_node_map fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x95a67b07 udp_table vmlinux EXPORT_SYMBOL +0x7ac722bd phy_all_ports_features_array vmlinux EXPORT_SYMBOL_GPL +0x935aebaa pnp_get_resource vmlinux EXPORT_SYMBOL +0x9eef5122 acpi_bus_get_status vmlinux EXPORT_SYMBOL +0xfbc9ea56 iterate_dir vmlinux EXPORT_SYMBOL +0xd5529682 trace_event_buffer_reserve vmlinux EXPORT_SYMBOL_GPL +0x2e2f1740 ring_buffer_record_disable_cpu vmlinux EXPORT_SYMBOL_GPL +0x585f567b rds_message_populate_header net/rds/rds EXPORT_SYMBOL_GPL +0x080a7c55 hinic_disable_nic_rss drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x162e4bf4 nfcmrvl_nci_unregister_dev drivers/nfc/nfcmrvl/nfcmrvl EXPORT_SYMBOL_GPL +0x7b2702b9 skb_coalesce_rx_frag vmlinux EXPORT_SYMBOL +0xe3001bfb power_supply_am_i_supplied vmlinux EXPORT_SYMBOL_GPL +0xaf3dd7dc scsi_logging_level vmlinux EXPORT_SYMBOL +0x1052600a dax_supported vmlinux EXPORT_SYMBOL_GPL +0xee596df3 regcache_mark_dirty vmlinux EXPORT_SYMBOL_GPL +0x42112a49 transport_configure_device vmlinux EXPORT_SYMBOL_GPL +0xb4577003 acpi_dev_present vmlinux EXPORT_SYMBOL +0xbf1b7e99 nla_put vmlinux EXPORT_SYMBOL +0x5e855e56 gen_pool_first_fit_align vmlinux EXPORT_SYMBOL +0xf23fcb99 __kfifo_in vmlinux EXPORT_SYMBOL +0x75c50ff1 revalidate_disk vmlinux EXPORT_SYMBOL +0xac0f8c45 register_shrinker vmlinux EXPORT_SYMBOL +0x5fb8848b halt_poll_ns_grow_start vmlinux EXPORT_SYMBOL_GPL +0x1b9a0453 xprt_wait_for_reply_request_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x359edbea ip_tunnel_dellink net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x77218bf9 register_sound_dsp sound/soundcore EXPORT_SYMBOL +0x60f0a272 rdma_res_to_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x5af078ae fcoe_get_wwn drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x6cfb19d0 hwrng_register drivers/char/hw_random/rng-core EXPORT_SYMBOL_GPL +0xc7a5be4e af_alg_poll crypto/af_alg EXPORT_SYMBOL_GPL +0x1765ea1f __xa_alloc_cyclic vmlinux EXPORT_SYMBOL +0xa3f18504 inet6_csk_route_req vmlinux EXPORT_SYMBOL +0xa3c7c966 report_iommu_fault vmlinux EXPORT_SYMBOL_GPL +0x30502647 misc_register vmlinux EXPORT_SYMBOL +0xc2a13882 blk_post_runtime_suspend vmlinux EXPORT_SYMBOL +0x5d1a4957 block_invalidatepage vmlinux EXPORT_SYMBOL +0x7c1af440 may_umount_tree vmlinux EXPORT_SYMBOL +0x9d8cc799 d_set_fallthru vmlinux EXPORT_SYMBOL +0xdf9208c0 alloc_workqueue vmlinux EXPORT_SYMBOL_GPL +0x8deb6cdc ip_tunnel_init_net net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x8ed0adc9 synproxy_recv_client_ack_ipv6 net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x51e0b041 ib_init_ah_from_mcmember drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6dc11b4d alloc_ep_req drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x1fe1e1ad locks_end_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0x9aca7aa6 udp_lib_get_port vmlinux EXPORT_SYMBOL +0x5ea44e51 ip_route_input_noref vmlinux EXPORT_SYMBOL +0x44ac3631 skb_defer_rx_timestamp vmlinux EXPORT_SYMBOL_GPL +0xe0b5cd16 netdev_walk_all_lower_dev_rcu vmlinux EXPORT_SYMBOL_GPL +0x9fd7814e ata_sff_wait_ready vmlinux EXPORT_SYMBOL_GPL +0xf2b5a157 dev_attr_unload_heads vmlinux EXPORT_SYMBOL_GPL +0x88386f74 drm_modeset_backoff vmlinux EXPORT_SYMBOL +0xc618567e tty_ldisc_ref vmlinux EXPORT_SYMBOL_GPL +0x68730a39 fb_deferred_io_mmap vmlinux EXPORT_SYMBOL +0xb2c064f6 devm_ioport_map vmlinux EXPORT_SYMBOL +0xe23040f2 fscrypt_decrypt_bio vmlinux EXPORT_SYMBOL +0x1f6b105a file_modified vmlinux EXPORT_SYMBOL +0xc807af42 kernel_write vmlinux EXPORT_SYMBOL +0x78b44fb3 __devm_irq_alloc_descs vmlinux EXPORT_SYMBOL_GPL +0x4522d82a lc_reset lib/lru_cache EXPORT_SYMBOL +0x749849d8 LZ4_loadDict lib/lz4/lz4_compress EXPORT_SYMBOL +0xc9b72abf ceph_msg_data_add_pagelist net/ceph/libceph EXPORT_SYMBOL +0xf42258d3 dccp_sendmsg net/dccp/dccp EXPORT_SYMBOL_GPL +0xf728e62b rpc_put_sb_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xce238924 mtd_ooblayout_set_eccbytes drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x2c73b27c mtd_ooblayout_get_eccbytes drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xb9589a26 hinic_api_cmd_write_nack drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x5b2c113c hinic_get_sq_wqe drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xf8d3ef3d nfs_show_devname fs/nfs/nfs EXPORT_SYMBOL_GPL +0x33d36959 tcp_md5_do_add vmlinux EXPORT_SYMBOL +0xd6995174 rtnl_af_unregister vmlinux EXPORT_SYMBOL_GPL +0xbfee3ad5 loop_unregister_transfer vmlinux EXPORT_SYMBOL +0x2d838e37 class_dev_iter_exit vmlinux EXPORT_SYMBOL_GPL +0xf87b236d device_find_child_by_name vmlinux EXPORT_SYMBOL_GPL +0x307c84b8 blk_integrity_unregister vmlinux EXPORT_SYMBOL +0xf1b1ede9 iget_failed vmlinux EXPORT_SYMBOL +0xce678a59 xdr_decode_netobj net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6f7f9428 __fscache_acquire_cookie fs/fscache/fscache EXPORT_SYMBOL +0x958df3ac zs_free mm/zsmalloc EXPORT_SYMBOL_GPL +0x4ccb971f ata_bmdma_dumb_qc_prep vmlinux EXPORT_SYMBOL_GPL +0xe5bdad41 sata_set_spd vmlinux EXPORT_SYMBOL_GPL +0xfe8ed830 devm_clk_bulk_get_optional vmlinux EXPORT_SYMBOL_GPL +0xf05c5a07 pci_restore_state vmlinux EXPORT_SYMBOL +0x652032cb mac_pton vmlinux EXPORT_SYMBOL +0xf331236f btree_geo32 vmlinux EXPORT_SYMBOL_GPL +0x9a53f74c filemap_fdatawait_range vmlinux EXPORT_SYMBOL +0xebf8ad08 register_atm_ioctl net/atm/atm EXPORT_SYMBOL +0x0c2fc676 __tracepoint_mlx5_fs_del_fte drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2b63ff68 mlx5_modify_header_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x82d066ea mlx5_eq_notifier_register drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x92f6277c tc_setup_flow_action vmlinux EXPORT_SYMBOL +0x493af25f ethtool_intersect_link_masks vmlinux EXPORT_SYMBOL +0x264b18be skb_complete_tx_timestamp vmlinux EXPORT_SYMBOL_GPL +0xdd0e3a7d acpi_bus_trim vmlinux EXPORT_SYMBOL_GPL +0xe5e9190a debugfs_create_file_unsafe vmlinux EXPORT_SYMBOL_GPL +0x539e7c1f fuse_free_conn vmlinux EXPORT_SYMBOL_GPL +0x1bb80f6c inode_set_bytes vmlinux EXPORT_SYMBOL +0x14a9c30a inode_sub_bytes vmlinux EXPORT_SYMBOL +0x8d568868 ceph_alloc_page_vector net/ceph/libceph EXPORT_SYMBOL +0xb721e508 svc_unreg_xprt_class net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe2927bd8 rpc_prepare_reply_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5a7597a9 vhost_dev_check_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xde1f8ab5 ib_cm_notify drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x2fee222a rdma_nl_unicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3cbed0c5 mlx5_modify_header_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa9be2548 mlx5_buf_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x63fb21d6 mlx4_buf_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x001602c4 dlmlock fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x7e74bf2d arp_xmit vmlinux EXPORT_SYMBOL +0x1d6b2ca9 flow_rule_match_tcp vmlinux EXPORT_SYMBOL +0x3fa3bc4f __dev_getfirstbyhwtype vmlinux EXPORT_SYMBOL +0x0230719c __dev_get_by_index vmlinux EXPORT_SYMBOL +0x1464ad67 sock_inuse_get vmlinux EXPORT_SYMBOL_GPL +0x99691d14 pps_register_source vmlinux EXPORT_SYMBOL +0x87553d16 usb_serial_generic_write vmlinux EXPORT_SYMBOL_GPL +0x03546234 drm_gem_shmem_prime_import_sg_table vmlinux EXPORT_SYMBOL_GPL +0xfb63f72c drm_gem_shmem_get_pages vmlinux EXPORT_SYMBOL +0x0db6209c regulator_desc_list_voltage_linear_range vmlinux EXPORT_SYMBOL_GPL +0xa3a7f566 clkdev_hw_create vmlinux EXPORT_SYMBOL_GPL +0xe03b7519 pci_release_selected_regions vmlinux EXPORT_SYMBOL +0x9ec24137 debugfs_real_fops vmlinux EXPORT_SYMBOL_GPL +0x08ea343a cgroup_rstat_updated vmlinux EXPORT_SYMBOL_GPL +0x2fc49f86 kvm_io_bus_get_dev vmlinux EXPORT_SYMBOL_GPL +0xafb8a407 ceph_msgr_flush net/ceph/libceph EXPORT_SYMBOL +0xedcf6be4 qword_add net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xda0b93b9 rpc_max_bc_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x99547910 ip6_tnl_encap_setup net/ipv6/ip6_tunnel EXPORT_SYMBOL_GPL +0x9fa8a0b4 snd_timer_interrupt sound/core/snd-timer EXPORT_SYMBOL +0x3b321c9b capi_ctr_suspend_output drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xc2392a50 ppp_register_net_channel drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x6b77f3b0 mlx4_get_protocol_dev drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7662f737 fc_eh_device_reset drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x165a84ec __inet6_lookup_established vmlinux EXPORT_SYMBOL +0x1a528bdc ipv6_stub vmlinux EXPORT_SYMBOL_GPL +0xda6e1342 dm_bio_from_per_bio_data vmlinux EXPORT_SYMBOL_GPL +0xd6d3ef93 clk_get vmlinux EXPORT_SYMBOL +0x7981274f __nla_put_nohdr vmlinux EXPORT_SYMBOL +0xbab24c74 blk_mq_end_request vmlinux EXPORT_SYMBOL +0x4bc40fa0 blk_register_queue vmlinux EXPORT_SYMBOL_GPL +0x84b5365e crypto_unregister_instance vmlinux EXPORT_SYMBOL_GPL +0xc802bdf1 mrp_uninit_applicant net/802/mrp EXPORT_SYMBOL_GPL +0xefb69c71 md_register_thread drivers/md/md-mod EXPORT_SYMBOL +0x0fcee300 mlx5_modify_nic_vport_vlans drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x48d87a74 mr_mfc_find_parent vmlinux EXPORT_SYMBOL +0x40973662 sysctl_udp_mem vmlinux EXPORT_SYMBOL +0xa0ebd14c sysctl_tcp_mem vmlinux EXPORT_SYMBOL +0x6e286604 hdmi_drm_infoframe_pack vmlinux EXPORT_SYMBOL +0xdab5a1eb interval_tree_insert vmlinux EXPORT_SYMBOL_GPL +0xbe9f327e bio_chain vmlinux EXPORT_SYMBOL +0x5e51cd74 swiotlb_nr_tbl vmlinux EXPORT_SYMBOL_GPL +0xfd748217 __cpuhp_remove_state_cpuslocked vmlinux EXPORT_SYMBOL +0x67c07714 nfc_hci_send_cmd net/nfc/hci/hci EXPORT_SYMBOL +0x575a5a74 mlx5_get_flow_namespace drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x25d04e8b inet_csk_accept vmlinux EXPORT_SYMBOL +0x677f3ac8 inet_hash_connect vmlinux EXPORT_SYMBOL_GPL +0x47dbfdfb nf_log_trace vmlinux EXPORT_SYMBOL +0xc88bf1be reuseport_add_sock vmlinux EXPORT_SYMBOL +0x6b81c38b power_supply_unreg_notifier vmlinux EXPORT_SYMBOL_GPL +0x380105c8 generic_mii_ioctl vmlinux EXPORT_SYMBOL +0x6e8848d9 pci_add_resource vmlinux EXPORT_SYMBOL +0x0334da4e scsi_command_size_tbl vmlinux EXPORT_SYMBOL +0x2152bd22 skcipher_walk_async vmlinux EXPORT_SYMBOL_GPL +0x08835883 do_splice_direct vmlinux EXPORT_SYMBOL +0xce0c0edd inode_congested vmlinux EXPORT_SYMBOL_GPL +0xa843805a get_unused_fd_flags vmlinux EXPORT_SYMBOL +0x01553371 vm_brk_flags vmlinux EXPORT_SYMBOL +0xb1e25684 __trace_bputs vmlinux EXPORT_SYMBOL_GPL +0x16e297c3 bit_wait vmlinux EXPORT_SYMBOL +0x9b5de0de ceph_osdc_call net/ceph/libceph EXPORT_SYMBOL +0x5ca466ee p9_client_wstat net/9p/9pnet EXPORT_SYMBOL +0xee3e5550 xprt_wake_pending_tasks net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc248bde2 dm_disk_bitset_init drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x63fc449c fc_lport_init drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x5db4b833 nf_hook_slow_list vmlinux EXPORT_SYMBOL +0xcd0cc408 tcf_unregister_action vmlinux EXPORT_SYMBOL +0x3a1a26c0 usb_of_get_device_node vmlinux EXPORT_SYMBOL_GPL +0x7a92cf3a usb_bulk_msg vmlinux EXPORT_SYMBOL_GPL +0x1035d58c drm_atomic_get_old_connector_for_encoder vmlinux EXPORT_SYMBOL +0x19032d6d trace_handle_return vmlinux EXPORT_SYMBOL_GPL +0x18282638 ceph_messenger_fini net/ceph/libceph EXPORT_SYMBOL +0x831f800f svc_generic_rpcbind_set net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xda4a8ec1 vq_meta_prefetch drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x6bd663e8 rdma_modify_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcfd7c7f3 hdlcdrv_unregister drivers/net/hamradio/hdlcdrv EXPORT_SYMBOL +0x11bea438 crypto_engine_start crypto/crypto_engine EXPORT_SYMBOL_GPL +0x63e14e3c __tracepoint_nfs_fsync_exit fs/nfs/nfs EXPORT_SYMBOL_GPL +0x669bf73f xfrm_state_delete_tunnel vmlinux EXPORT_SYMBOL +0xd31c473d sock_edemux vmlinux EXPORT_SYMBOL +0x06548e3e phy_attached_print vmlinux EXPORT_SYMBOL +0x1854c654 device_remove_properties vmlinux EXPORT_SYMBOL_GPL +0x7d587851 platform_device_add_resources vmlinux EXPORT_SYMBOL_GPL +0xcf818899 drm_gem_vram_mmap_offset vmlinux EXPORT_SYMBOL +0x1b340fc6 acpiphp_register_attention vmlinux EXPORT_SYMBOL_GPL +0x4688d7ec pvclock_gtod_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x5625fb09 irq_chip_eoi_parent vmlinux EXPORT_SYMBOL_GPL +0xfda0eda1 gfn_to_pfn_prot vmlinux EXPORT_SYMBOL_GPL +0xd2ac2470 ceph_osdc_cancel_request net/ceph/libceph EXPORT_SYMBOL +0x575454ae p9_client_xattrcreate net/9p/9pnet EXPORT_SYMBOL_GPL +0x986c35da ib_set_vf_link_state drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x28c1de5c fscache_add_cache fs/fscache/fscache EXPORT_SYMBOL +0xcf9f3328 dlm_release_lockspace fs/dlm/dlm EXPORT_SYMBOL_GPL +0xf82ec573 rb_prev vmlinux EXPORT_SYMBOL +0x3322ae96 in6_dev_finish_destroy vmlinux EXPORT_SYMBOL +0x1ee6d0dc eth_header vmlinux EXPORT_SYMBOL +0xe590dea3 sk_busy_loop_end vmlinux EXPORT_SYMBOL +0xde07799d of_irq_parse_raw vmlinux EXPORT_SYMBOL_GPL +0x29e831cf of_property_count_elems_of_size vmlinux EXPORT_SYMBOL_GPL +0xf262965f sas_expander_alloc vmlinux EXPORT_SYMBOL +0x7fc1a0c6 scsi_rescan_device vmlinux EXPORT_SYMBOL +0x02ea111e scsi_driverbyte_string vmlinux EXPORT_SYMBOL +0xa14ad165 drm_modeset_lock_init vmlinux EXPORT_SYMBOL +0xf9a72c71 drm_gem_put_pages vmlinux EXPORT_SYMBOL +0xe62f9637 drm_gem_get_pages vmlinux EXPORT_SYMBOL +0x59ac0532 drm_fbdev_generic_setup vmlinux EXPORT_SYMBOL +0x9ed554b3 unregister_keyboard_notifier vmlinux EXPORT_SYMBOL_GPL +0x2853c455 unregister_framebuffer vmlinux EXPORT_SYMBOL +0x0e2fc9cd jbd2_journal_init_dev vmlinux EXPORT_SYMBOL +0x80b7869d forget_cached_acl vmlinux EXPORT_SYMBOL +0xe75c620b bd_unlink_disk_holder vmlinux EXPORT_SYMBOL_GPL +0xa9e18049 task_handoff_unregister vmlinux EXPORT_SYMBOL_GPL +0xfb746cc9 down_killable vmlinux EXPORT_SYMBOL +0x143de511 rdma_nl_put_driver_u32 drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd02205d2 capi20_register drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xe3c17af1 __closure_wake_up drivers/md/bcache/bcache EXPORT_SYMBOL +0xb3b0a473 netdev_master_upper_dev_get_rcu vmlinux EXPORT_SYMBOL +0x87b03ae6 napi_gro_receive vmlinux EXPORT_SYMBOL +0x8ac07c49 drm_master_internal_release vmlinux EXPORT_SYMBOL +0xd476bd57 drm_gem_fb_destroy vmlinux EXPORT_SYMBOL +0x9c607698 mctrl_gpio_free vmlinux EXPORT_SYMBOL_GPL +0x60091316 clk_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x6b3ae022 acpi_os_unmap_iomem vmlinux EXPORT_SYMBOL_GPL +0x0a3f1e91 pci_reset_function vmlinux EXPORT_SYMBOL_GPL +0x34e053e2 rhashtable_walk_start_check vmlinux EXPORT_SYMBOL_GPL +0x5bbba043 debugfs_print_regs32 vmlinux EXPORT_SYMBOL_GPL +0x5302d6c7 buffer_check_dirty_writeback vmlinux EXPORT_SYMBOL +0x4be982a8 phonet_stream_ops net/phonet/phonet EXPORT_SYMBOL +0xfb4e2726 cdrom_check_events drivers/cdrom/cdrom EXPORT_SYMBOL +0xeae3dfd6 __const_udelay vmlinux EXPORT_SYMBOL +0x469a6ec7 tcp_parse_md5sig_option vmlinux EXPORT_SYMBOL +0x40b68000 nf_unregister_queue_handler vmlinux EXPORT_SYMBOL +0x24eb7e32 leds_list vmlinux EXPORT_SYMBOL_GPL +0x679b00d5 kill_dev_dax vmlinux EXPORT_SYMBOL_GPL +0x79567066 ttm_dma_tt_init vmlinux EXPORT_SYMBOL +0xd92deb6b acpi_evaluate_object vmlinux EXPORT_SYMBOL +0xd7b33adf do_clone_file_range vmlinux EXPORT_SYMBOL +0xe47a0283 dump_page vmlinux EXPORT_SYMBOL +0x638aff11 proc_douintvec_minmax vmlinux EXPORT_SYMBOL_GPL +0xa9d16f34 kvm_release_page_clean vmlinux EXPORT_SYMBOL_GPL +0x8946ea72 fpsimd_context_busy vmlinux EXPORT_SYMBOL +0xc7d62e22 capi_ctr_handle_message drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x84a3ae81 mlx4_ALLOCATE_VPP_set drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xa77da2f3 mlx4_ALLOCATE_VPP_get drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x59eaadf9 s3fwrn5_probe drivers/nfc/s3fwrn5/s3fwrn5 EXPORT_SYMBOL +0x31cab048 ipmi_smi_msg_received drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x7cee71e7 qtree_get_next_id fs/quota/quota_tree EXPORT_SYMBOL +0xf44a904a net_ns_barrier vmlinux EXPORT_SYMBOL +0x684ce1ff drm_atomic_private_obj_fini vmlinux EXPORT_SYMBOL +0x7647726c handle_sysrq vmlinux EXPORT_SYMBOL +0xb970ac15 pcie_port_bus_type vmlinux EXPORT_SYMBOL_GPL +0x54a9db5f _kstrtoul vmlinux EXPORT_SYMBOL +0x3faafb8c bioset_init_from_src vmlinux EXPORT_SYMBOL +0x2cea32ee unregister_oldmem_pfn_is_ram vmlinux EXPORT_SYMBOL_GPL +0x193cadaa __block_write_begin vmlinux EXPORT_SYMBOL +0x3517383e register_reboot_notifier vmlinux EXPORT_SYMBOL +0x95bd8f96 p9dirent_read net/9p/9pnet EXPORT_SYMBOL +0xb649aa40 p9_client_readdir net/9p/9pnet EXPORT_SYMBOL +0xddd46aa5 dccp_send_ack net/dccp/dccp EXPORT_SYMBOL_GPL +0x19567d06 vfio_info_cap_shift drivers/vfio/vfio EXPORT_SYMBOL +0xb456eb02 iscsit_handle_logout_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x16022bdc iscsit_find_cmd_from_itt drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xd41c5408 mlx5_nic_vport_unaffiliate_multiport drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xebae76b9 serpent_setkey crypto/serpent_generic EXPORT_SYMBOL_GPL +0xb53f2810 tcp_sockets_allocated vmlinux EXPORT_SYMBOL +0x41b329e5 sk_free_unlock_clone vmlinux EXPORT_SYMBOL_GPL +0x27253a2c hisi_sas_sync_rst_work_handler vmlinux EXPORT_SYMBOL_GPL +0xcd8b879a drm_err vmlinux EXPORT_SYMBOL +0x5572123f tpm_chip_unregister vmlinux EXPORT_SYMBOL_GPL +0x151269e0 inode_owner_or_capable vmlinux EXPORT_SYMBOL +0x253486fd generic_pipe_buf_confirm vmlinux EXPORT_SYMBOL +0xeceb821c get_super_thawed vmlinux EXPORT_SYMBOL +0xd6df0627 l2tp_ioctl net/l2tp/l2tp_ip EXPORT_SYMBOL +0xb2b5673c mlx5_query_nic_vport_mac_address drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc13dd97a usb_gadget_vbus_connect vmlinux EXPORT_SYMBOL_GPL +0x217bf750 usb_gadget_frame_number vmlinux EXPORT_SYMBOL_GPL +0xef8c31e5 scsi_device_quiesce vmlinux EXPORT_SYMBOL +0xe0deeda0 drm_modeset_unlock_all vmlinux EXPORT_SYMBOL +0x8aafe76d gssd_running net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5fc6a8e9 synproxy_parse_options net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x19129e97 snd_pcm_hw_param_first sound/core/snd-pcm EXPORT_SYMBOL +0x39535b1f usb_put_function drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x5ab4883a mlx5_core_query_sq_state drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb1bfcad6 inet6_protos vmlinux EXPORT_SYMBOL +0x5e3b1a12 of_irq_find_parent vmlinux EXPORT_SYMBOL_GPL +0x324dbdc0 drm_bridge_mode_valid vmlinux EXPORT_SYMBOL +0x4400b009 pci_select_bars vmlinux EXPORT_SYMBOL +0xf184d189 kernel_power_off vmlinux EXPORT_SYMBOL_GPL +0x8bf260cd svc_create_pooled net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x852a30a2 rpc_clnt_xprt_switch_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd732bb35 ip6t_unregister_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL +0x3cbdb2e4 snd_pcm_new sound/core/snd-pcm EXPORT_SYMBOL +0x5988a7be rdma_set_reuseaddr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xae1de20f ib_find_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd1ef9fc0 mlx5_register_interface drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7af9556c mlx4_register_interface drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6f134246 nvme_delete_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xd74552ca qtree_write_dquot fs/quota/quota_tree EXPORT_SYMBOL +0x00ba10fe devlink_dpipe_entry_ctx_close vmlinux EXPORT_SYMBOL_GPL +0x3c826a28 xhci_dbg_trace vmlinux EXPORT_SYMBOL_GPL +0xb56392a7 sas_unregister_ha vmlinux EXPORT_SYMBOL_GPL +0xeabce505 sas_read_port_mode_page vmlinux EXPORT_SYMBOL +0x78250d83 __ata_ehi_push_desc vmlinux EXPORT_SYMBOL_GPL +0xc461bb2d drm_panel_attach vmlinux EXPORT_SYMBOL +0xf54c1686 con_copy_unimap vmlinux EXPORT_SYMBOL +0xda79ee8b tty_ldisc_ref_wait vmlinux EXPORT_SYMBOL_GPL +0xcce9ec68 blk_rq_count_integrity_sg vmlinux EXPORT_SYMBOL +0x612fac42 debugfs_create_u64 vmlinux EXPORT_SYMBOL_GPL +0x71b06da3 dquot_quota_on_mount vmlinux EXPORT_SYMBOL +0x10ff5d8c block_commit_write vmlinux EXPORT_SYMBOL +0x6dc433ce clone_private_mount vmlinux EXPORT_SYMBOL_GPL +0x8e8cb701 lapb_setparms net/lapb/lapb EXPORT_SYMBOL +0xdfc3e7cf _snd_pcm_lib_alloc_vmalloc_buffer sound/core/snd-pcm EXPORT_SYMBOL +0xa31217b6 mlx5_eq_create_generic drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x87eef177 __netdev_alloc_skb vmlinux EXPORT_SYMBOL +0x982713d6 pm_clk_resume vmlinux EXPORT_SYMBOL_GPL +0x76943503 drm_sched_dependency_optimized vmlinux EXPORT_SYMBOL +0x4be00bfc pcie_capability_read_word vmlinux EXPORT_SYMBOL +0xaa6b38c5 perf_event_create_kernel_counter vmlinux EXPORT_SYMBOL_GPL +0xeb31352e irq_chip_release_resources_parent vmlinux EXPORT_SYMBOL_GPL +0x76213e43 kvm_io_bus_write vmlinux EXPORT_SYMBOL_GPL +0x99dce33b mlx4_free_hwq_res drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x74643659 hinic_free_cmd_buf drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x529a712f hinic_get_pf_dcb_state drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x8caa72ff nvme_sync_queues drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xfc304d80 drm_fb_helper_fbdev_teardown vmlinux EXPORT_SYMBOL +0x71bcc545 tty_kref_put vmlinux EXPORT_SYMBOL +0x1b6a46f6 regulator_list_voltage_linear vmlinux EXPORT_SYMBOL_GPL +0x56310925 regulator_mode_to_status vmlinux EXPORT_SYMBOL_GPL +0x6c1971cd fb_set_suspend vmlinux EXPORT_SYMBOL +0x506dff1a __genradix_free vmlinux EXPORT_SYMBOL +0xa6d096ba locks_delete_block vmlinux EXPORT_SYMBOL +0x86fe0aef __lookup_constant vmlinux EXPORT_SYMBOL +0x20f78f0d dput vmlinux EXPORT_SYMBOL +0x0816bb29 try_module_get vmlinux EXPORT_SYMBOL +0x453e9bea rpc_proc_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb08cbfa8 rpc_delay net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x51610314 mlxfw_firmware_flash drivers/net/ethernet/mellanox/mlxfw/mlxfw EXPORT_SYMBOL +0xdc415cf1 mlxsw_afa_block_continue drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xd2a37853 register_net_sysctl vmlinux EXPORT_SYMBOL_GPL +0x8a490c90 rfkill_set_sw_state vmlinux EXPORT_SYMBOL +0x29585d27 dst_dev_put vmlinux EXPORT_SYMBOL +0x260e1622 fill_inquiry_response vmlinux EXPORT_SYMBOL_GPL +0xaa2cab02 usb_match_one_id vmlinux EXPORT_SYMBOL_GPL +0xc428068d sata_deb_timing_long vmlinux EXPORT_SYMBOL_GPL +0x791f74c8 screen_glyph vmlinux EXPORT_SYMBOL_GPL +0x665e92a0 clk_set_duty_cycle vmlinux EXPORT_SYMBOL_GPL +0x92095d1d srcu_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x20de5193 kvm_disable_largepages vmlinux EXPORT_SYMBOL_GPL +0x7a2af7b4 cpu_number vmlinux EXPORT_SYMBOL +0x1e6b75a3 nft_parse_register net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x305e5701 rdma_addr_size_kss drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb74d3ef0 vlan_vids_del_by_dev vmlinux EXPORT_SYMBOL +0xdf62ebc5 ip6_route_output_flags_noref vmlinux EXPORT_SYMBOL_GPL +0x4322b791 sk_free vmlinux EXPORT_SYMBOL +0x2cd9f2f7 scsi_is_sdev_device vmlinux EXPORT_SYMBOL +0x56af5d78 regmap_check_range_table vmlinux EXPORT_SYMBOL_GPL +0x9065f72f fwnode_graph_get_port_parent vmlinux EXPORT_SYMBOL_GPL +0x355adf0f drm_atomic_get_mst_topology_state vmlinux EXPORT_SYMBOL +0xdf141b89 clk_gpio_gate_ops vmlinux EXPORT_SYMBOL_GPL +0xdba0583f clk_hw_register_divider vmlinux EXPORT_SYMBOL_GPL +0x2a303d4d check_signature vmlinux EXPORT_SYMBOL +0xc30192f6 gpu_clean vmlinux EXPORT_SYMBOL_GPL +0x5da1992b parport_set_timeout drivers/parport/parport EXPORT_SYMBOL +0x04879a3b mlx5_frag_buf_alloc_node drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xd46f9fd7 kobject_init vmlinux EXPORT_SYMBOL +0x081cd720 skb_scrub_packet vmlinux EXPORT_SYMBOL_GPL +0x84c538f1 regulator_list_voltage_table vmlinux EXPORT_SYMBOL_GPL +0xbf991613 pci_bus_max_busnr vmlinux EXPORT_SYMBOL_GPL +0x819cccdf blk_set_pm_only vmlinux EXPORT_SYMBOL_GPL +0x785a93b4 si_mem_available vmlinux EXPORT_SYMBOL_GPL +0xb2e764e8 suspend_valid_only_mem vmlinux EXPORT_SYMBOL_GPL +0x42160169 flush_workqueue vmlinux EXPORT_SYMBOL +0xda666773 dccp_ioctl net/dccp/dccp EXPORT_SYMBOL_GPL +0x31622008 mdev_from_dev drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xff5cb3a3 parport_read drivers/parport/parport EXPORT_SYMBOL +0xed7761e2 gether_get_dev_addr drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x6e248eff gether_set_dev_addr drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0xac5d9d18 mpt_suspend drivers/message/fusion/mptbase EXPORT_SYMBOL +0xd75fbc81 ping_close vmlinux EXPORT_SYMBOL_GPL +0xe1502545 __tracepoint_neigh_timer_handler vmlinux EXPORT_SYMBOL_GPL +0xa048296a skb_flow_dissect_ct vmlinux EXPORT_SYMBOL +0xd59f8e36 ata_link_online vmlinux EXPORT_SYMBOL_GPL +0xca1988f5 drm_plane_create_rotation_property vmlinux EXPORT_SYMBOL +0x34a84df3 __tracepoint_detach_device_from_domain vmlinux EXPORT_SYMBOL_GPL +0x6cbd400c key_type_user vmlinux EXPORT_SYMBOL_GPL +0x076a1d8c nosteal_pipe_buf_ops vmlinux EXPORT_SYMBOL +0xc59d0243 simple_lookup vmlinux EXPORT_SYMBOL +0xfe217190 get_user_pages_locked vmlinux EXPORT_SYMBOL +0xf5d7eb5a register_ftrace_export vmlinux EXPORT_SYMBOL_GPL +0x5e3240a0 __cpu_online_mask vmlinux EXPORT_SYMBOL +0xc95beba9 p9_client_clunk net/9p/9pnet EXPORT_SYMBOL +0xe7792ba6 key_type_rxrpc net/rxrpc/rxrpc EXPORT_SYMBOL +0x2f700e3c ib_query_srq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfa599bb2 netlink_register_notifier vmlinux EXPORT_SYMBOL +0xa326d3a1 __skb_get_hash vmlinux EXPORT_SYMBOL +0x12db1031 dm_io vmlinux EXPORT_SYMBOL +0x10c3f894 usb_match_id vmlinux EXPORT_SYMBOL_GPL +0x56be148a phylink_mii_ioctl vmlinux EXPORT_SYMBOL_GPL +0xb97bc6f7 __scsi_execute vmlinux EXPORT_SYMBOL +0x72aad2a8 gpiochip_disable_irq vmlinux EXPORT_SYMBOL_GPL +0x21daf769 ct_get_vmip_vpcid net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe9e799fc ib_ud_header_pack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4eaf0c3d hinic_get_board_info drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x0eb59364 xfrm_policy_insert vmlinux EXPORT_SYMBOL +0xad54e423 lwtunnel_input vmlinux EXPORT_SYMBOL_GPL +0x181aba6d skb_segment vmlinux EXPORT_SYMBOL_GPL +0xdb7e1b73 dm_suspended vmlinux EXPORT_SYMBOL_GPL +0x12d01ee8 usb_add_hcd vmlinux EXPORT_SYMBOL_GPL +0x33f66d09 phylink_create vmlinux EXPORT_SYMBOL_GPL +0x4fc22123 hisi_sas_stt vmlinux EXPORT_SYMBOL_GPL +0x598a4638 blk_queue_max_segments vmlinux EXPORT_SYMBOL +0x8eac6833 unlock_rename vmlinux EXPORT_SYMBOL +0x0105b595 des_encrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0x145f0782 ip_vs_conn_put net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x79e966e2 ib_device_set_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x9968f211 capi20_put_message drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x2e0774dc dm_bufio_get_block_number drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xfd062399 md_update_sb drivers/md/md-mod EXPORT_SYMBOL +0x9ea94e8c tap_create_cdev drivers/net/tap EXPORT_SYMBOL_GPL +0x09d60dba skb_push vmlinux EXPORT_SYMBOL +0xc7477bbb iscsi_session_chkready vmlinux EXPORT_SYMBOL_GPL +0x08135613 dax_write_cache vmlinux EXPORT_SYMBOL_GPL +0xb4d97efd device_create vmlinux EXPORT_SYMBOL_GPL +0x4150510b drm_gem_dmabuf_export vmlinux EXPORT_SYMBOL +0x100dd8b7 simple_rmdir vmlinux EXPORT_SYMBOL +0x1f53a078 free_task vmlinux EXPORT_SYMBOL +0x3af0a0ab tcp_vegas_get_info net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0xad407a27 rdma_nl_put_driver_u64 drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3f11622d mtd_write drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xf849b997 passthrough_attrib_attrs drivers/target/target_core_mod EXPORT_SYMBOL +0x28991160 __tracepoint_bcache_read_retry drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xd13e4292 nvme_uninit_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x65267047 crypto_poly1305_setdesckey crypto/poly1305_generic EXPORT_SYMBOL_GPL +0x0b2cb334 psched_ratecfg_precompute vmlinux EXPORT_SYMBOL +0x575ad1c2 i2c_del_adapter vmlinux EXPORT_SYMBOL +0x61a71507 i2c_adapter_depth vmlinux EXPORT_SYMBOL_GPL +0x5838f6c9 rtc_valid_tm vmlinux EXPORT_SYMBOL +0x12105090 usb_register_driver vmlinux EXPORT_SYMBOL_GPL +0x43c5fe83 iscsi_find_flashnode_conn vmlinux EXPORT_SYMBOL_GPL +0x6a7ff48d scsi_block_requests vmlinux EXPORT_SYMBOL +0x30781ff2 drm_atomic_helper_commit_modeset_disables vmlinux EXPORT_SYMBOL +0x582f248e drm_dp_get_adjust_request_pre_emphasis vmlinux EXPORT_SYMBOL +0x773fa409 __kfifo_dma_in_finish_r vmlinux EXPORT_SYMBOL +0x82854dc5 blk_poll vmlinux EXPORT_SYMBOL_GPL +0x543ef284 seq_hlist_start vmlinux EXPORT_SYMBOL +0x26ab4755 put_old_itimerspec32 vmlinux EXPORT_SYMBOL_GPL +0x44bae227 bit_wait_timeout vmlinux EXPORT_SYMBOL_GPL +0x99c95fa5 unregister_sound_special sound/soundcore EXPORT_SYMBOL +0x7c4a6dcb led_blink_set_oneshot vmlinux EXPORT_SYMBOL_GPL +0x2f6c13a4 sas_end_device_alloc vmlinux EXPORT_SYMBOL +0x17f7f3a5 sdev_disable_disk_events vmlinux EXPORT_SYMBOL +0x914af09a ahci_save_initial_config vmlinux EXPORT_SYMBOL_GPL +0x2ed28f06 drm_bridge_attach vmlinux EXPORT_SYMBOL +0x25461144 tty_ldisc_release vmlinux EXPORT_SYMBOL_GPL +0xe51a53c1 acpi_dma_configure vmlinux EXPORT_SYMBOL_GPL +0x46c6f793 t10_pi_type3_ip vmlinux EXPORT_SYMBOL +0x2a3a256b fat_dir_empty vmlinux EXPORT_SYMBOL_GPL +0x3c7c8cb3 cdev_alloc vmlinux EXPORT_SYMBOL +0x5752f31b lc_del lib/lru_cache EXPORT_SYMBOL +0xd34e5358 rpc_put_task_async net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc4617807 iw_cm_connect drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x1b037462 __rdma_block_iter_start drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0b3b5be4 mlx5_eq_update_ci drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x903226e0 mlx4_qp_reserve_range drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x9613c89f ocfs2_plock fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xf586deef usb_hcd_unlink_urb_from_ep vmlinux EXPORT_SYMBOL_GPL +0x2989205c device_wakeup_enable vmlinux EXPORT_SYMBOL_GPL +0x3b9144c9 acpi_get_current_resources vmlinux EXPORT_SYMBOL +0xdbf7cb70 mpi_get_nbits vmlinux EXPORT_SYMBOL_GPL +0x5d771d6a security_path_mkdir vmlinux EXPORT_SYMBOL +0xc4913442 vfio_group_put_external_user drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x072b40fb get_phy_device vmlinux EXPORT_SYMBOL +0xa410ca8e ata_cable_sata vmlinux EXPORT_SYMBOL_GPL +0xb5b981a8 wakeup_source_register vmlinux EXPORT_SYMBOL_GPL +0x81a73033 drm_dev_alloc vmlinux EXPORT_SYMBOL +0xbbbd48f9 drm_fb_helper_cfb_fillrect vmlinux EXPORT_SYMBOL +0x9349aa89 devprop_gpiochip_set_names vmlinux EXPORT_SYMBOL_GPL +0x9f46ced8 __sw_hweight64 vmlinux EXPORT_SYMBOL +0x57674fd7 __sw_hweight16 vmlinux EXPORT_SYMBOL +0x74c134b9 __sw_hweight32 vmlinux EXPORT_SYMBOL +0xce4e47b6 __kfifo_skip_r vmlinux EXPORT_SYMBOL +0x95c2f1fb fat_truncate_time vmlinux EXPORT_SYMBOL_GPL +0x3d8ab6c9 posix_acl_default_xattr_handler vmlinux EXPORT_SYMBOL_GPL +0x6892e3c3 kvm_set_pfn_accessed vmlinux EXPORT_SYMBOL_GPL +0xa155c071 ZSTD_compressBegin_usingDict lib/zstd/zstd_compress EXPORT_SYMBOL +0xeb9770da rds_message_unmapped net/rds/rds EXPORT_SYMBOL_GPL +0x8b1d6c5f svc_exit_thread net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa11225c4 ubi_leb_map drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x230094ac ipmi_smi_watchdog_pretimeout drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x1f000f06 nfs_lock fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5993e232 dlm_posix_lock fs/dlm/dlm EXPORT_SYMBOL_GPL +0x39e61495 nf_logger_request_module vmlinux EXPORT_SYMBOL_GPL +0xbb59d5e8 tcf_get_next_proto vmlinux EXPORT_SYMBOL +0x75dd4ebe of_overlay_remove vmlinux EXPORT_SYMBOL_GPL +0x5f532994 drm_hdcp_check_ksvs_revoked vmlinux EXPORT_SYMBOL_GPL +0xeff178d5 drm_gem_map_detach vmlinux EXPORT_SYMBOL +0x287eef1d drm_gem_map_attach vmlinux EXPORT_SYMBOL +0x7b5452b8 acpi_unregister_gsi vmlinux EXPORT_SYMBOL_GPL +0xd1e82b54 gpiod_direction_input vmlinux EXPORT_SYMBOL_GPL +0x6e0371de rpc_clnt_show_stats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9527c0c2 print_tuple net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x29ef0066 __tracepoint_bcache_btree_gc_coalesce drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x72678d69 hinic_msg_to_mgmt_async drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xe16e5014 hinic_alloc_cmd_buf drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x8df92f66 memchr_inv vmlinux EXPORT_SYMBOL +0xfa325df6 tcp_reno_cong_avoid vmlinux EXPORT_SYMBOL_GPL +0x607c4683 devlink_info_version_fixed_put vmlinux EXPORT_SYMBOL_GPL +0x531641ab hid_dump_report vmlinux EXPORT_SYMBOL_GPL +0xb25decd8 led_trigger_blink vmlinux EXPORT_SYMBOL_GPL +0x60806523 i2c_acpi_get_i2c_resource vmlinux EXPORT_SYMBOL_GPL +0xe0e5b2e5 usb_reset_device vmlinux EXPORT_SYMBOL_GPL +0x67009ee4 ata_sas_queuecmd vmlinux EXPORT_SYMBOL_GPL +0x6c1bdca6 drm_gem_shmem_purge vmlinux EXPORT_SYMBOL +0x769f6e64 errseq_check vmlinux EXPORT_SYMBOL +0x651922c9 kernfs_put vmlinux EXPORT_SYMBOL_GPL +0xf48e64c9 kernfs_get vmlinux EXPORT_SYMBOL_GPL +0xe6064097 __dquot_free_space vmlinux EXPORT_SYMBOL +0x890320be locks_copy_lock vmlinux EXPORT_SYMBOL +0xf403571c irq_domain_associate vmlinux EXPORT_SYMBOL_GPL +0x3e1f72e0 kvm_vcpu_cache vmlinux EXPORT_SYMBOL_GPL +0x1e119a79 rpcauth_get_pseudoflavor net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7e344609 usb_add_config_only drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xd0b18943 fc_lport_reset drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xca6984b1 flow_block_cb_lookup vmlinux EXPORT_SYMBOL +0xca662dc6 gnet_stats_start_copy_compat vmlinux EXPORT_SYMBOL +0x760dd282 rtc_alarm_irq_enable vmlinux EXPORT_SYMBOL_GPL +0x3d9c43b6 ttm_bo_pipeline_move vmlinux EXPORT_SYMBOL +0x412fc57c gpiod_get_index vmlinux EXPORT_SYMBOL_GPL +0x6f2353f4 pcim_iounmap_regions vmlinux EXPORT_SYMBOL +0x0ee867ce check_move_unevictable_pages vmlinux EXPORT_SYMBOL_GPL +0x7522f3ba irq_modify_status vmlinux EXPORT_SYMBOL_GPL +0xcd2ba798 dm_bufio_forget drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x8f2feb10 dcbnl_ieee_notify vmlinux EXPORT_SYMBOL +0x1649c788 inet6_del_protocol vmlinux EXPORT_SYMBOL +0xdbe4d2b1 xfrm_input_resume vmlinux EXPORT_SYMBOL +0xd83c2bf4 udplite_prot vmlinux EXPORT_SYMBOL +0xdadf6f9f sock_create_kern vmlinux EXPORT_SYMBOL +0x51868aa8 stmmac_dvr_probe vmlinux EXPORT_SYMBOL_GPL +0x8fd6f9d5 scsi_eh_restore_cmnd vmlinux EXPORT_SYMBOL +0x610823e1 transport_remove_device vmlinux EXPORT_SYMBOL_GPL +0xede2149c drm_color_lut_extract vmlinux EXPORT_SYMBOL +0xfed84aa1 drm_class_device_register vmlinux EXPORT_SYMBOL_GPL +0x2f4606f0 bdev_write_page vmlinux EXPORT_SYMBOL_GPL +0x0c79d5ef vm_sockets_get_local_cid net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xcd5e7c95 cache_destroy_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x143ee839 mr_dump vmlinux EXPORT_SYMBOL +0x49f3ca81 sk_clone_lock vmlinux EXPORT_SYMBOL_GPL +0x2516f0c9 kernel_sock_ip_overhead vmlinux EXPORT_SYMBOL +0x25cbe3ce pcc_mbox_request_channel vmlinux EXPORT_SYMBOL_GPL +0x54c2af08 thermal_zone_get_zone_by_name vmlinux EXPORT_SYMBOL_GPL +0x2a678a13 __suspend_report_result vmlinux EXPORT_SYMBOL_GPL +0x13f7f483 vga_tryget vmlinux EXPORT_SYMBOL +0x134a2dbe drm_sched_job_init vmlinux EXPORT_SYMBOL +0xe2154ab8 drm_gtf_mode vmlinux EXPORT_SYMBOL +0x9cdb1012 regulator_count_voltages vmlinux EXPORT_SYMBOL_GPL +0x3ad7a5d5 acpi_evaluate_reference vmlinux EXPORT_SYMBOL +0x76b0a770 debugfs_file_put vmlinux EXPORT_SYMBOL_GPL +0x551009db debugfs_file_get vmlinux EXPORT_SYMBOL_GPL +0xe95b40a5 d_find_any_alias vmlinux EXPORT_SYMBOL +0x9f65c857 ZSTD_checkCParams lib/zstd/zstd_compress EXPORT_SYMBOL +0x7e8e4fad ceph_destroy_options net/ceph/libceph EXPORT_SYMBOL +0xec7ff13e snd_rawmidi_transmit_empty sound/core/snd-rawmidi EXPORT_SYMBOL +0xfaa137a7 snd_timer_open sound/core/snd-timer EXPORT_SYMBOL +0x969c73d9 vfio_device_put drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x0ca34ccf mlxsw_core_max_ports drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x6d5f5b91 radix_tree_tagged vmlinux EXPORT_SYMBOL +0xa609df6c vlan_uses_dev vmlinux EXPORT_SYMBOL +0xff819af1 sk_detach_filter vmlinux EXPORT_SYMBOL_GPL +0xd0e19e3f sk_attach_filter vmlinux EXPORT_SYMBOL_GPL +0xbd7504d8 i2c_dw_probe vmlinux EXPORT_SYMBOL_GPL +0x2a3900ea drm_bridge_remove vmlinux EXPORT_SYMBOL +0x4c96bf18 drm_atomic_helper_crtc_destroy_state vmlinux EXPORT_SYMBOL +0xf2c86a1c drm_dp_dpcd_write vmlinux EXPORT_SYMBOL +0xa5759b0b clk_register_gpio_gate vmlinux EXPORT_SYMBOL_GPL +0xab4462f7 fb_set_cmap vmlinux EXPORT_SYMBOL +0xb9a31234 pci_unregister_driver vmlinux EXPORT_SYMBOL +0xc890c008 zlib_deflateEnd vmlinux EXPORT_SYMBOL +0x33736a1d __genradix_ptr_alloc vmlinux EXPORT_SYMBOL +0x3d144c64 blk_mq_pci_map_queues vmlinux EXPORT_SYMBOL_GPL +0x16ac3077 unload_nls vmlinux EXPORT_SYMBOL +0x5f6ee4fa seq_open_private vmlinux EXPORT_SYMBOL +0x953e1b9e ktime_get_real_seconds vmlinux EXPORT_SYMBOL_GPL +0x54693645 v9fs_get_trans_by_name net/9p/9pnet EXPORT_SYMBOL +0xd1a5bc34 sunrpc_cache_update net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3a8fa37d ax25_listen_release net/ax25/ax25 EXPORT_SYMBOL +0xf6f9efbd ib_map_mr_sg_pi drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5420e6ac iscsit_aborted_task drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x878c6d6b tcp_conn_request vmlinux EXPORT_SYMBOL +0x742a4231 kernel_listen vmlinux EXPORT_SYMBOL +0x74324d52 serio_unregister_driver vmlinux EXPORT_SYMBOL +0x25e2c466 drm_modeset_lock_all vmlinux EXPORT_SYMBOL +0x1f6b9064 tpm_seal_trusted vmlinux EXPORT_SYMBOL_GPL +0x89b00a48 pnp_register_card_driver vmlinux EXPORT_SYMBOL +0x2b593aa8 gen_pool_alloc_algo_owner vmlinux EXPORT_SYMBOL +0x3b644591 __bitmap_shift_left vmlinux EXPORT_SYMBOL +0xf1a3e43c gss_mech_get net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL +0x759b3f1e ebt_unregister_table net/bridge/netfilter/ebtables EXPORT_SYMBOL +0x06b60dc9 ip_set_type_register net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x9d79efb9 nat_rtp_rtcp_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x03f49fa4 hnae3_set_client_init_flag drivers/net/ethernet/hisilicon/hns3/hnae3 EXPORT_SYMBOL +0x43f0217d fc_eh_abort drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xf8202a4b fc_set_rport_loss_tmo drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x4829a47e memcpy vmlinux EXPORT_SYMBOL +0x6a5e2bde __cookie_v6_init_sequence vmlinux EXPORT_SYMBOL_GPL +0x707297b4 rc_repeat vmlinux EXPORT_SYMBOL_GPL +0xf3083a1d phylink_destroy vmlinux EXPORT_SYMBOL_GPL +0x0dbe6a3f __scsi_device_lookup vmlinux EXPORT_SYMBOL +0xc9980c42 ata_bmdma_stop vmlinux EXPORT_SYMBOL_GPL +0x50674de7 drm_timeout_abs_to_jiffies vmlinux EXPORT_SYMBOL +0xe780d3f9 clk_hw_register_gpio_gate vmlinux EXPORT_SYMBOL_GPL +0xbe5c888b crypto_chain vmlinux EXPORT_SYMBOL_GPL +0x7412ed5b kvfree_sensitive vmlinux EXPORT_SYMBOL +0xcf5c6c05 perf_aux_output_begin vmlinux EXPORT_SYMBOL_GPL +0xd819a524 crc_itu_t_table lib/crc-itu-t EXPORT_SYMBOL +0x26c65e89 rpc_run_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe3853423 ip6t_do_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL +0x5efa6669 ib_sa_unregister_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x190e30bd closure_put drivers/md/bcache/bcache EXPORT_SYMBOL +0x320c68dc i8042_remove_filter drivers/input/serio/i8042 EXPORT_SYMBOL +0xf65486e9 spi_release_transport drivers/scsi/scsi_transport_spi EXPORT_SYMBOL +0x7cc6470f simd_register_skciphers_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0xd0d8718f skb_flow_dissect_tunnel_info vmlinux EXPORT_SYMBOL +0x046f359e of_overlay_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x81b03377 efivar_entry_set_safe vmlinux EXPORT_SYMBOL_GPL +0x7fe03a32 cpufreq_dbs_governor_exit vmlinux EXPORT_SYMBOL_GPL +0x5d7561c1 cpufreq_dbs_governor_init vmlinux EXPORT_SYMBOL_GPL +0xfc14bb2e dm_get_dev_t vmlinux EXPORT_SYMBOL_GPL +0xf7c20b5a ata_std_postreset vmlinux EXPORT_SYMBOL_GPL +0x394348e6 drm_dp_read_desc vmlinux EXPORT_SYMBOL +0xfbe8ee28 acpi_get_table_by_index vmlinux EXPORT_SYMBOL +0x1c80d27d btree_geo128 vmlinux EXPORT_SYMBOL_GPL +0xb27f1cf1 bdevname vmlinux EXPORT_SYMBOL +0x7a9b37e8 blk_start_plug vmlinux EXPORT_SYMBOL +0xd8655c8d cdev_init vmlinux EXPORT_SYMBOL +0xa8e0180f request_resource vmlinux EXPORT_SYMBOL +0xe0f21de2 xprt_unpin_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa589c19c nf_ct_expect_init net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x04e1b99f snd_pcm_std_chmaps sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x09ed702d snd_unregister_oss_device sound/core/snd EXPORT_SYMBOL +0x25cf37ab can_rx_offload_reset drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xb805e34a __nvme_submit_sync_cmd drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x93434b77 inet_getname vmlinux EXPORT_SYMBOL +0xe9e8faeb efi_tpm_final_log_size vmlinux EXPORT_SYMBOL +0x37e13f5b regcache_sync vmlinux EXPORT_SYMBOL_GPL +0xaa71dab3 devm_pinctrl_register vmlinux EXPORT_SYMBOL_GPL +0xa98439d9 iov_iter_fault_in_readable vmlinux EXPORT_SYMBOL +0xa9bc8b74 module_mutex vmlinux EXPORT_SYMBOL_GPL +0x67f71ed5 nci_set_config net/nfc/nci/nci EXPORT_SYMBOL +0x96b4dd27 put_mtd_device drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x706fdda7 mddev_suspend drivers/md/md-mod EXPORT_SYMBOL_GPL +0xa67189dc usb_ep_autoconfig_reset drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x18d8fef7 rndis_set_param_vendor drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x7a1bcd59 gf128mul_x8_ble crypto/gf128mul EXPORT_SYMBOL +0x4f080577 drm_mode_config_init vmlinux EXPORT_SYMBOL +0xd089875e timestamp_truncate vmlinux EXPORT_SYMBOL +0xa72ef0b2 modify_user_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0xbf9bcc8d __cap_empty_set vmlinux EXPORT_SYMBOL +0xd36f57fe xprt_register_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5360ad81 ip_tunnel_changelink net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xbce16a32 mtd_get_fact_prot_info drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x533373a1 st_nci_enable_se drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0x1e8cb469 nfs_show_path fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4d72d3aa chacha_block vmlinux EXPORT_SYMBOL +0x8b100dea inet_rcv_saddr_equal vmlinux EXPORT_SYMBOL +0x510b061d hisi_sas_phy_down vmlinux EXPORT_SYMBOL_GPL +0xb056b904 platform_get_resource vmlinux EXPORT_SYMBOL_GPL +0x0038ce63 drm_object_property_set_value vmlinux EXPORT_SYMBOL +0x52db9da1 pci_disable_pcie_error_reporting vmlinux EXPORT_SYMBOL_GPL +0x9dbe032f iput vmlinux EXPORT_SYMBOL +0x02a8eb19 nci_nfcee_discover net/nfc/nci/nci EXPORT_SYMBOL +0x09fc378e pn_sock_unhash net/phonet/phonet EXPORT_SYMBOL +0xea8bcda5 xprt_complete_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9f62bcd3 capi_ctr_down drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xd37f735c crypto_transfer_ablkcipher_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0x561613cc input_ff_upload vmlinux EXPORT_SYMBOL_GPL +0x3f3bc448 iscsi_alloc_session vmlinux EXPORT_SYMBOL_GPL +0x2d193ef4 iommu_aux_get_pasid vmlinux EXPORT_SYMBOL_GPL +0xe3d1616f devm_regulator_bulk_register_supply_alias vmlinux EXPORT_SYMBOL_GPL +0x2c2d5471 open_related_ns vmlinux EXPORT_SYMBOL_GPL +0x03fd2571 vm_unmap_ram vmlinux EXPORT_SYMBOL +0x0aa309cf synchronize_hardirq vmlinux EXPORT_SYMBOL +0x5efde8e6 proc_doulongvec_ms_jiffies_minmax vmlinux EXPORT_SYMBOL +0xb500e267 __snd_pcm_lib_xfer sound/core/snd-pcm EXPORT_SYMBOL +0xdc2d9f94 vringh_getdesc_kern drivers/vhost/vringh EXPORT_SYMBOL +0x08b3e9cb hinic_enable_tx_irq drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xa74e01ef xfrm_output vmlinux EXPORT_SYMBOL_GPL +0x73d5b08c of_phy_connect vmlinux EXPORT_SYMBOL +0xc9e4cd99 devm_led_classdev_unregister vmlinux EXPORT_SYMBOL_GPL +0x92d31cfb fixed_phy_add vmlinux EXPORT_SYMBOL_GPL +0x271985e0 drm_mode_equal_no_clocks_no_stereo vmlinux EXPORT_SYMBOL +0xfe63d9df pci_enable_device_mem vmlinux EXPORT_SYMBOL +0xf474c21c bitmap_print_to_pagebuf vmlinux EXPORT_SYMBOL +0x71a50dbc register_blkdev vmlinux EXPORT_SYMBOL +0xb43efd69 fuse_sync_release vmlinux EXPORT_SYMBOL_GPL +0x0ebfa7f6 ceph_osdc_clear_abort_err net/ceph/libceph EXPORT_SYMBOL +0x00a467af rds_wq net/rds/rds EXPORT_SYMBOL_GPL +0x69f1f5cc nf_conntrack_alter_reply net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd4f1bfc0 snd_rawmidi_proceed sound/core/snd-rawmidi EXPORT_SYMBOL +0xd7731337 mlx4_hw_rule_sz drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6918fc44 ipv6_select_ident vmlinux EXPORT_SYMBOL +0x1df7968b inet_offloads vmlinux EXPORT_SYMBOL +0x67601d27 neigh_event_ns vmlinux EXPORT_SYMBOL +0xac2af902 iscsi_flashnode_bus_match vmlinux EXPORT_SYMBOL_GPL +0xf0784f1a dpm_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0x9b1d3367 drm_crtc_handle_vblank vmlinux EXPORT_SYMBOL +0x5507e7de drm_framebuffer_lookup vmlinux EXPORT_SYMBOL +0x005e74c9 fb_firmware_edid vmlinux EXPORT_SYMBOL +0xaba86685 jbd2_journal_blocks_per_page vmlinux EXPORT_SYMBOL +0x0fd902db mb_cache_entry_create vmlinux EXPORT_SYMBOL +0x4f588d38 iterate_supers_type vmlinux EXPORT_SYMBOL +0xd0654aba woken_wake_function vmlinux EXPORT_SYMBOL +0xc4be8e01 iw_cm_init_qp_attr drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0xed9fae7f iscsi_queuecommand drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x49224181 nvme_reset_wq drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x36245dd5 fscache_check_aux fs/fscache/fscache EXPORT_SYMBOL +0xcd224e1d dlm_new_lockspace fs/dlm/dlm EXPORT_SYMBOL_GPL +0xd523f75f nf_queue_entry_get_refs vmlinux EXPORT_SYMBOL_GPL +0x962c8ae1 usb_kill_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0xccf93a24 ata_scsi_unlock_native_capacity vmlinux EXPORT_SYMBOL_GPL +0x3a426085 request_firmware_nowait vmlinux EXPORT_SYMBOL +0x96647fe2 drm_gem_handle_delete vmlinux EXPORT_SYMBOL +0x2688b7bb kmem_cache_free_bulk vmlinux EXPORT_SYMBOL +0x1e5c8bad percpu_down_write vmlinux EXPORT_SYMBOL_GPL +0x1403ad09 cpufreq_add_update_util_hook vmlinux EXPORT_SYMBOL_GPL +0x9646c915 svc_xprt_do_enqueue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8c4cb9c3 nf_conncount_list_init net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x6db4ebf1 target_undepend_item drivers/target/target_core_mod EXPORT_SYMBOL +0x3dbae082 __cast6_decrypt crypto/cast6_generic EXPORT_SYMBOL_GPL +0xcfce512f __cast6_encrypt crypto/cast6_generic EXPORT_SYMBOL_GPL +0x57191690 nfs4_put_deviceid_node fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x8819befe init_dummy_netdev vmlinux EXPORT_SYMBOL_GPL +0xe2f7586e netdev_change_features vmlinux EXPORT_SYMBOL +0x59514be8 __hid_request vmlinux EXPORT_SYMBOL_GPL +0xc633d82d phy_unregister_fixup vmlinux EXPORT_SYMBOL +0xd680fda9 ttm_bo_init_mm vmlinux EXPORT_SYMBOL +0x02a0d747 drm_connector_init_panel_orientation_property vmlinux EXPORT_SYMBOL +0x6b79a590 pci_assign_unassigned_bus_resources vmlinux EXPORT_SYMBOL_GPL +0xbd5cb8b9 ring_buffer_resize vmlinux EXPORT_SYMBOL_GPL +0xe5538557 ftrace_ops_set_global_filter vmlinux EXPORT_SYMBOL_GPL +0xf65aeb16 on_each_cpu_cond_mask vmlinux EXPORT_SYMBOL +0xe516c1b8 dma_direct_sync_single_for_device vmlinux EXPORT_SYMBOL +0xd617f5a3 p9_client_xattrwalk net/9p/9pnet EXPORT_SYMBOL_GPL +0x6986ab60 ip_tunnel_xmit net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x6db19af1 dst_destroy vmlinux EXPORT_SYMBOL +0x3200cd00 tpm_chip_stop vmlinux EXPORT_SYMBOL_GPL +0x0791896c pci_disable_pri vmlinux EXPORT_SYMBOL_GPL +0xa3c69e7f pci_disable_ats vmlinux EXPORT_SYMBOL_GPL +0x939c1291 pci_disable_msi vmlinux EXPORT_SYMBOL +0xd2de82a1 pci_disable_rom vmlinux EXPORT_SYMBOL_GPL +0x84168246 svc_wake_up net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf1ba88aa xprt_write_space net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x60bc42b3 nf_ct_helper_expectfn_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xbd13ee5d o2hb_check_node_heartbeating_no_sem fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x647d6170 dlm_lock fs/dlm/dlm EXPORT_SYMBOL_GPL +0xb6a7f4b0 compat_tcp_setsockopt vmlinux EXPORT_SYMBOL +0x618b0287 scsi_mode_sense vmlinux EXPORT_SYMBOL +0xd406d266 fb_mode_is_equal vmlinux EXPORT_SYMBOL +0xf4af35c2 rcu_gp_is_normal vmlinux EXPORT_SYMBOL_GPL +0xd9a5ea54 __init_waitqueue_head vmlinux EXPORT_SYMBOL +0xb386bd69 ns_capable_setid vmlinux EXPORT_SYMBOL +0x2719b91f xfrm6_tunnel_register net/ipv6/tunnel6 EXPORT_SYMBOL +0x11918c68 xfrm4_tunnel_register net/ipv4/tunnel4 EXPORT_SYMBOL +0x936262d2 mlx5_core_alloc_pd drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xdf92bc8c mlx4_FLOW_STEERING_IB_UC_QP_RANGE drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0db73d1f wait_until_doorbell_flush_states drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x5927cb77 zpool_unregister_driver mm/zpool EXPORT_SYMBOL +0x118f5984 of_property_read_u64 vmlinux EXPORT_SYMBOL_GPL +0x99b80b7a iscsi_find_flashnode_sess vmlinux EXPORT_SYMBOL_GPL +0xcbec7cfe drm_crtc_from_index vmlinux EXPORT_SYMBOL +0x9c98418a drm_atomic_helper_shutdown vmlinux EXPORT_SYMBOL +0x3809078f drm_dp_mst_topology_mgr_init vmlinux EXPORT_SYMBOL +0x65a726d9 iommu_domain_window_enable vmlinux EXPORT_SYMBOL_GPL +0x4a7e23f2 blk_queue_virt_boundary vmlinux EXPORT_SYMBOL +0x6ea825cd blk_insert_cloned_request vmlinux EXPORT_SYMBOL_GPL +0x76b8d127 security_sb_set_mnt_opts vmlinux EXPORT_SYMBOL +0x72dc5cd8 p9_client_readlink net/9p/9pnet EXPORT_SYMBOL +0xadc964c0 nf_nat_icmpv6_reply_translation net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x3511e50b snd_pcm_lib_ioctl sound/core/snd-pcm EXPORT_SYMBOL +0x31741494 mlx5_query_port_wol drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc41d451f __mlx4_register_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x17000a32 qlt_xmit_tm_rsp drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x682ca1ff ir_raw_event_store_edge vmlinux EXPORT_SYMBOL_GPL +0x85300de2 drm_gem_vram_unpin vmlinux EXPORT_SYMBOL +0x8d277631 regulator_enable vmlinux EXPORT_SYMBOL_GPL +0x4f2fe91e gpiochip_reqres_irq vmlinux EXPORT_SYMBOL_GPL +0x20eadeb6 ip_compute_csum vmlinux EXPORT_SYMBOL +0x931802f0 blk_mq_kick_requeue_list vmlinux EXPORT_SYMBOL +0xcd974f00 rcu_all_qs vmlinux EXPORT_SYMBOL_GPL +0x9ac11b74 suspend_set_ops vmlinux EXPORT_SYMBOL_GPL +0x43965406 lc_seq_printf_stats lib/lru_cache EXPORT_SYMBOL +0x8d8ddc42 ceph_cls_lock_info net/ceph/libceph EXPORT_SYMBOL +0x36f90813 hinic_mbox_to_host_sync drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x3ffdacf3 timerqueue_iterate_next vmlinux EXPORT_SYMBOL_GPL +0xef8be036 dst_alloc vmlinux EXPORT_SYMBOL +0x6214aef2 cpufreq_unregister_notifier vmlinux EXPORT_SYMBOL +0x64f36620 dax_flush vmlinux EXPORT_SYMBOL_GPL +0xe93e49c3 devres_free vmlinux EXPORT_SYMBOL_GPL +0x9c9cc10b mipi_dsi_device_register_full vmlinux EXPORT_SYMBOL +0x2630a731 of_find_backlight_by_node vmlinux EXPORT_SYMBOL +0x7f7f7bb4 irq_poll_disable vmlinux EXPORT_SYMBOL +0x1ed84287 blk_put_queue vmlinux EXPORT_SYMBOL +0x62632527 proc_create_mount_point vmlinux EXPORT_SYMBOL +0x3d613394 single_release vmlinux EXPORT_SYMBOL +0x6255c27a get_user_pages vmlinux EXPORT_SYMBOL +0xc6823891 set_wb_congested vmlinux EXPORT_SYMBOL +0x42635d55 pm_suspend_global_flags vmlinux EXPORT_SYMBOL_GPL +0x10d121a1 housekeeping_affine vmlinux EXPORT_SYMBOL_GPL +0x34e5340f mlx4_SYNC_TPT drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x95834687 ip6_append_data vmlinux EXPORT_SYMBOL_GPL +0xa108eb4d sysctl_optmem_max vmlinux EXPORT_SYMBOL +0x5972a6b6 stmmac_pltfr_pm_ops vmlinux EXPORT_SYMBOL_GPL +0xb8bdb3f5 drm_puts vmlinux EXPORT_SYMBOL +0xab735372 ipmi_dmi_get_slave_addr vmlinux EXPORT_SYMBOL +0xdc49c198 reciprocal_value_adv vmlinux EXPORT_SYMBOL +0x6fb51e7f akcipher_register_instance vmlinux EXPORT_SYMBOL_GPL +0x58d5c219 jbd2_journal_destroy vmlinux EXPORT_SYMBOL +0xb36fadf3 jbd2_journal_restart vmlinux EXPORT_SYMBOL +0xa5f7cf37 __cpu_possible_mask vmlinux EXPORT_SYMBOL +0xfabe36f0 ibnl_put_msg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf212d94c cast6_setkey crypto/cast6_generic EXPORT_SYMBOL_GPL +0x268976e3 cast5_setkey crypto/cast5_generic EXPORT_SYMBOL_GPL +0x9c995c69 xt_percpu_counter_alloc vmlinux EXPORT_SYMBOL_GPL +0x609bcd98 in6_pton vmlinux EXPORT_SYMBOL +0xac5fcec0 in4_pton vmlinux EXPORT_SYMBOL +0x388aa3c9 neigh_proc_dointvec_ms_jiffies vmlinux EXPORT_SYMBOL +0xe2be3d5b of_get_mac_address vmlinux EXPORT_SYMBOL +0x6286d9fc of_get_pci_address vmlinux EXPORT_SYMBOL +0xf6555f19 scsi_mode_select vmlinux EXPORT_SYMBOL_GPL +0x8b149c36 clk_is_match vmlinux EXPORT_SYMBOL_GPL +0xefee932c acpi_get_data_full vmlinux EXPORT_SYMBOL +0xf28bd43a cfb_copyarea vmlinux EXPORT_SYMBOL +0xf6fc8791 __bitmap_xor vmlinux EXPORT_SYMBOL +0xb3b93338 mnt_want_write vmlinux EXPORT_SYMBOL_GPL +0x20ee2e7e config_group_init fs/configfs/configfs EXPORT_SYMBOL +0xe0017b89 __dev_remove_pack vmlinux EXPORT_SYMBOL +0xe1a4f16a secure_ipv6_port_ephemeral vmlinux EXPORT_SYMBOL +0x00565f18 pernet_ops_rwsem vmlinux EXPORT_SYMBOL_GPL +0x5c66e90c efivar_run_worker vmlinux EXPORT_SYMBOL_GPL +0xbaf75595 hisi_sas_stop_phys vmlinux EXPORT_SYMBOL_GPL +0xe51f5716 ata_sff_softreset vmlinux EXPORT_SYMBOL_GPL +0x04f48db1 drm_atomic_helper_cleanup_planes vmlinux EXPORT_SYMBOL +0x4cd7f3f5 iommu_unmap vmlinux EXPORT_SYMBOL_GPL +0xd0febb12 pcie_relaxed_ordering_enabled vmlinux EXPORT_SYMBOL +0x0ec26385 devm_gpiod_unhinge vmlinux EXPORT_SYMBOL_GPL +0x03151321 iov_iter_init vmlinux EXPORT_SYMBOL +0xab8df795 posix_acl_to_xattr vmlinux EXPORT_SYMBOL +0x41a5edab path_is_mountpoint vmlinux EXPORT_SYMBOL +0x3b867124 vfs_submount vmlinux EXPORT_SYMBOL_GPL +0xb18429eb suspend_device_irqs vmlinux EXPORT_SYMBOL_GPL +0x7c85e783 devm_register_reboot_notifier vmlinux EXPORT_SYMBOL +0xa5ac3e33 ZSTD_DCtxWorkspaceBound lib/zstd/zstd_decompress EXPORT_SYMBOL +0x42017b3d dccp_invalid_packet net/dccp/dccp_ipv4 EXPORT_SYMBOL_GPL +0x1ad4f052 vringh_notify_enable_kern drivers/vhost/vringh EXPORT_SYMBOL +0xe52c844b mlx4_mw_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x2c541e7b radix_tree_next_chunk vmlinux EXPORT_SYMBOL +0x2d46ae15 tc_setup_cb_reoffload vmlinux EXPORT_SYMBOL +0x683dc278 __starget_for_each_device vmlinux EXPORT_SYMBOL +0xa2110424 drm_mode_config_cleanup vmlinux EXPORT_SYMBOL +0xd651a227 bioset_integrity_create vmlinux EXPORT_SYMBOL +0x951a2773 crypto_has_alg vmlinux EXPORT_SYMBOL_GPL +0x39a65c57 splice_to_pipe vmlinux EXPORT_SYMBOL_GPL +0x99ad9580 iget5_locked vmlinux EXPORT_SYMBOL +0xea1f6e0e hugetlb_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x3dcb88a0 irq_set_handler_data vmlinux EXPORT_SYMBOL +0x7550860c yield_to vmlinux EXPORT_SYMBOL_GPL +0xf76df3e2 mlxsw_afa_block_append_drop drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x5cac191b udp_pre_connect vmlinux EXPORT_SYMBOL +0x830bf41c call_fib_notifier vmlinux EXPORT_SYMBOL +0xa0c9d928 sk_page_frag_refill vmlinux EXPORT_SYMBOL +0x7d83af04 __phy_write_mmd vmlinux EXPORT_SYMBOL +0x600741f6 mii_check_media vmlinux EXPORT_SYMBOL +0x036cb9fa drm_of_encoder_active_endpoint vmlinux EXPORT_SYMBOL_GPL +0x9d1c0160 drm_ioctl_permit vmlinux EXPORT_SYMBOL +0x2f57861d dw_pcie_read_dbi vmlinux EXPORT_SYMBOL_GPL +0x81a85002 user_revoke vmlinux EXPORT_SYMBOL +0x3550ae5e debugfs_create_dir vmlinux EXPORT_SYMBOL_GPL +0x1c5cea04 dcache_dir_lseek vmlinux EXPORT_SYMBOL +0x5d114123 timer_reduce vmlinux EXPORT_SYMBOL +0x651a4139 test_taint vmlinux EXPORT_SYMBOL +0x17f54263 raid6_gfexp lib/raid6/raid6_pq EXPORT_SYMBOL +0xaa024146 sonet_copy_stats net/atm/atm EXPORT_SYMBOL +0xf0294cbc rdma_is_consumer_reject drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xba1ef701 mlx5_core_alloc_transport_domain drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xce739bcc vlan_filter_drop_vids vmlinux EXPORT_SYMBOL +0x8462d2f3 netdev_printk vmlinux EXPORT_SYMBOL +0x4f87794d of_address_to_resource vmlinux EXPORT_SYMBOL_GPL +0x5449ae14 of_graph_get_remote_port vmlinux EXPORT_SYMBOL +0x3394ccfc thermal_zone_device_register vmlinux EXPORT_SYMBOL_GPL +0x092e26bf acpi_remove_address_space_handler vmlinux EXPORT_SYMBOL +0xf5a691cd invalidate_bh_lrus vmlinux EXPORT_SYMBOL_GPL +0xc1b309bb vfs_iter_read vmlinux EXPORT_SYMBOL +0x3e09b381 unregister_trace_event vmlinux EXPORT_SYMBOL_GPL +0x970c17ce can_rx_register net/can/can EXPORT_SYMBOL +0x5e3eefb8 ipcomp_init_state net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0xe1a3be55 nf_nat_helper_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x4e19a82e usb_ep_autoconfig drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xccdcaf51 mlx4_bond drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xff520946 xt_check_match vmlinux EXPORT_SYMBOL_GPL +0x32c3cb4e class_compat_register vmlinux EXPORT_SYMBOL_GPL +0x4e03db81 drm_syncobj_find_fence vmlinux EXPORT_SYMBOL +0x527e3205 drm_mode_match vmlinux EXPORT_SYMBOL +0xd303c314 drm_atomic_helper_check_planes vmlinux EXPORT_SYMBOL +0x544b0c11 acpi_lid_notifier_register vmlinux EXPORT_SYMBOL +0x973fa82e register_acpi_notifier vmlinux EXPORT_SYMBOL +0xe8748e95 blk_mq_freeze_queue_wait vmlinux EXPORT_SYMBOL_GPL +0xb580b753 put_fs_context vmlinux EXPORT_SYMBOL +0xc4f2377e add_to_pipe vmlinux EXPORT_SYMBOL +0xe9bde96f d_splice_alias vmlinux EXPORT_SYMBOL +0xc5d789df alarm_expires_remaining vmlinux EXPORT_SYMBOL_GPL +0x0366307a console_suspend_enabled vmlinux EXPORT_SYMBOL +0x322a9620 svc_reg_xprt_class net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x45b3ba71 nf_reject_iphdr_put net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x122b81e8 c_can_power_up drivers/net/can/c_can/c_can EXPORT_SYMBOL_GPL +0x68d4ca5a kobject_rename vmlinux EXPORT_SYMBOL_GPL +0xc768cc7b devm_alloc_etherdev_mqs vmlinux EXPORT_SYMBOL +0x9c168cbd pps_unregister_source vmlinux EXPORT_SYMBOL +0x871ffd42 drm_mm_replace_node vmlinux EXPORT_SYMBOL +0x43f81957 clk_round_rate vmlinux EXPORT_SYMBOL_GPL +0x9b72478f acpi_unload_parent_table vmlinux EXPORT_SYMBOL +0x3b0ed89d acpi_irq_get vmlinux EXPORT_SYMBOL_GPL +0xdd6ba623 acpi_subsys_suspend_late vmlinux EXPORT_SYMBOL_GPL +0x1c726d2c gpiochip_generic_request vmlinux EXPORT_SYMBOL_GPL +0x83416cc0 param_ops_charp vmlinux EXPORT_SYMBOL +0x2a7e1ded gfn_to_pfn_memslot_atomic vmlinux EXPORT_SYMBOL_GPL +0x732e425f l2tp_session_get net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x6e9be7da rpc_count_iostats_metrics net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd7ba575e dlm_errmsg fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xad5737fc efivar_init vmlinux EXPORT_SYMBOL_GPL +0xe0cc702e dma_resv_copy_fences vmlinux EXPORT_SYMBOL +0xe0ccdadb drm_mode_config_helper_resume vmlinux EXPORT_SYMBOL +0x4c20c56f __inode_add_bytes vmlinux EXPORT_SYMBOL +0x2c66017f cad_pid vmlinux EXPORT_SYMBOL +0xfd684519 rndis_free_response drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0xb6ebf62a o2nm_this_node fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x268d1a4a inet_twsk_deschedule_put vmlinux EXPORT_SYMBOL +0x4d081eed tc_setup_cb_replace vmlinux EXPORT_SYMBOL +0x750089d6 phy_set_max_speed vmlinux EXPORT_SYMBOL +0xfb6710d6 ata_host_start vmlinux EXPORT_SYMBOL_GPL +0x30275bfb __tracepoint_dma_fence_enable_signal vmlinux EXPORT_SYMBOL +0x0361e68a drm_scdc_read vmlinux EXPORT_SYMBOL +0x400a024b acpi_scan_lock_release vmlinux EXPORT_SYMBOL_GPL +0x46045dd7 kstrtou8 vmlinux EXPORT_SYMBOL +0x7fdfc567 blk_rq_map_integrity_sg vmlinux EXPORT_SYMBOL +0x794b7271 orderly_reboot vmlinux EXPORT_SYMBOL_GPL +0x0f0edcab get_task_pid vmlinux EXPORT_SYMBOL_GPL +0xda72a7ec ZSTD_nextInputType lib/zstd/zstd_decompress EXPORT_SYMBOL +0xeaa89ced ceph_auth_invalidate_authorizer net/ceph/libceph EXPORT_SYMBOL +0x4443d399 atm_proc_root net/atm/atm EXPORT_SYMBOL +0xde7741f0 snd_component_add sound/core/snd EXPORT_SYMBOL +0xeb84d55b register_c_can_dev drivers/net/can/c_can/c_can EXPORT_SYMBOL_GPL +0xbb78fc37 mlxsw_pci_driver_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_pci EXPORT_SYMBOL +0x5c5c4394 of_nvmem_cell_get vmlinux EXPORT_SYMBOL_GPL +0xdf63ce29 touchscreen_report_pos vmlinux EXPORT_SYMBOL +0x9a3f527c of_usb_host_tpl_support vmlinux EXPORT_SYMBOL_GPL +0xd2f181d4 device_get_child_node_count vmlinux EXPORT_SYMBOL_GPL +0x0582dc3e serial8250_rx_chars vmlinux EXPORT_SYMBOL_GPL +0x5a7bfe41 crypto_probing_notify vmlinux EXPORT_SYMBOL_GPL +0xf738d1be register_blocking_lsm_notifier vmlinux EXPORT_SYMBOL +0x8977592f ip6t_alloc_initial_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL_GPL +0x12392507 mtd_writev drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xccf9b0d1 mtd_read_oob drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xd7e12064 mlx5_core_create_sq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x9de36f5e mlx5_core_create_rq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc6fbae3d mlx5_put_uars_page drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xdf72569b mlx4_gen_guid_change_eqe drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x7579f0b6 __inet_twsk_schedule vmlinux EXPORT_SYMBOL_GPL +0x775cc23f xt_compat_match_to_user vmlinux EXPORT_SYMBOL_GPL +0xa47676ea netif_receive_skb vmlinux EXPORT_SYMBOL +0x00ff1355 spi_new_device vmlinux EXPORT_SYMBOL_GPL +0xf7ae0667 ahci_platform_enable_phys vmlinux EXPORT_SYMBOL_GPL +0x5ec62549 ata_dev_classify vmlinux EXPORT_SYMBOL_GPL +0x222ebac9 pm_runtime_enable vmlinux EXPORT_SYMBOL_GPL +0x45b504e7 drm_of_crtc_port_mask vmlinux EXPORT_SYMBOL +0xb2e2e7fc drm_gem_vram_driver_dumb_mmap_offset vmlinux EXPORT_SYMBOL +0x759e3802 of_regulator_match vmlinux EXPORT_SYMBOL_GPL +0x36872b14 jbd2_journal_inode_ranged_write vmlinux EXPORT_SYMBOL +0x9d3ce3eb dax_writeback_mapping_range vmlinux EXPORT_SYMBOL_GPL +0x7ea75c24 __wake_up_locked_key_bookmark vmlinux EXPORT_SYMBOL_GPL +0xfb6af58d recalc_sigpending vmlinux EXPORT_SYMBOL +0x397f6231 ip_set_free net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xa9594679 nf_nat_mangle_udp_packet net/netfilter/nf_nat EXPORT_SYMBOL +0x643848df st_nci_hci_cmd_received drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0x16baf07c wimax_reset vmlinux EXPORT_SYMBOL +0xc3525002 input_match_device_id vmlinux EXPORT_SYMBOL +0xc845530a genphy_c45_read_pma vmlinux EXPORT_SYMBOL_GPL +0xc43c600d genphy_c45_read_lpa vmlinux EXPORT_SYMBOL_GPL +0xec472d45 ata_acpi_gtm_xfermask vmlinux EXPORT_SYMBOL_GPL +0x47f4cc73 serial8250_em485_init vmlinux EXPORT_SYMBOL_GPL +0x41cb4f3e pci_user_read_config_dword vmlinux EXPORT_SYMBOL_GPL +0xb3f548ad kmemdup_nul vmlinux EXPORT_SYMBOL +0xe6e40502 rcu_get_gp_seq vmlinux EXPORT_SYMBOL_GPL +0x41b7a263 nfc_class net/nfc/nfc EXPORT_SYMBOL +0x95286aa1 __tracepoint_bcache_invalidate drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x68cdace7 mlx5_create_auto_grouped_flow_table drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x605f7d1d mlx5_nic_vport_disable_roce drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x9b98bc55 mlx5_core_xrcd_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x789b75b7 mlx4_flow_attach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc4234790 hinic_host_oq_id_mask drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x8bde8e75 iscsi_tcp_recv_segment_is_hdr drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x2e06f9a9 iscsi_itt_to_ctask drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xcbd4898c fortify_panic vmlinux EXPORT_SYMBOL +0xada38766 dst_cache_destroy vmlinux EXPORT_SYMBOL_GPL +0xb1425b32 dm_table_add_target_callbacks vmlinux EXPORT_SYMBOL_GPL +0x5111f306 ptp_clock_index vmlinux EXPORT_SYMBOL +0x79e1b94d anon_transport_class_register vmlinux EXPORT_SYMBOL_GPL +0x24111cc0 virtqueue_get_used_addr vmlinux EXPORT_SYMBOL_GPL +0x258d2f76 net_dim_get_tx_moderation vmlinux EXPORT_SYMBOL +0x99d472b1 net_dim_get_rx_moderation vmlinux EXPORT_SYMBOL +0xb7969feb disk_part_iter_exit vmlinux EXPORT_SYMBOL_GPL +0xe9d26bc5 __tracepoint_block_bio_complete vmlinux EXPORT_SYMBOL_GPL +0x34407691 crypto_has_ahash vmlinux EXPORT_SYMBOL_GPL +0xeae665a1 sysfs_create_file_ns vmlinux EXPORT_SYMBOL_GPL +0x59b727fa follow_down_one vmlinux EXPORT_SYMBOL +0x7944e0fc tracing_off vmlinux EXPORT_SYMBOL_GPL +0x526c8fc1 rpcauth_init_credcache net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8b98a7f5 vhost_vq_init_access drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x14b82aef raw_seq_next vmlinux EXPORT_SYMBOL_GPL +0xeb176f5b ip_getsockopt vmlinux EXPORT_SYMBOL +0xfe207514 ip_setsockopt vmlinux EXPORT_SYMBOL +0xdce63147 tty_encode_baud_rate vmlinux EXPORT_SYMBOL_GPL +0x2b65cc07 clk_hw_set_rate_range vmlinux EXPORT_SYMBOL_GPL +0x797976bd page_endio vmlinux EXPORT_SYMBOL_GPL +0xf0f080fe xdr_shift_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x883ed868 rdma_disconnect drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x2e09263f usb_copy_descriptors drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xd32ffad2 mlx4_get_base_gid_ix drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xdfc107fe ip6_redirect vmlinux EXPORT_SYMBOL_GPL +0x752a153b ip_options_rcv_srr vmlinux EXPORT_SYMBOL +0x6ede3493 i2c_bit_add_numbered_bus vmlinux EXPORT_SYMBOL +0x4693d6d7 ata_sas_tport_delete vmlinux EXPORT_SYMBOL_GPL +0x3d07c71b device_register vmlinux EXPORT_SYMBOL_GPL +0x474f127c drm_dp_aux_unregister vmlinux EXPORT_SYMBOL +0x9d8c5c3e pnp_request_card_device vmlinux EXPORT_SYMBOL +0x0120ce33 acpi_irq_create_hierarchy vmlinux EXPORT_SYMBOL_GPL +0x661620e4 acpi_device_uevent_modalias vmlinux EXPORT_SYMBOL_GPL +0xc7c1107a LZ4_decompress_safe vmlinux EXPORT_SYMBOL +0xbefa51a3 gen_pool_add_owner vmlinux EXPORT_SYMBOL +0x1e277674 pcim_iomap_regions_request_all vmlinux EXPORT_SYMBOL +0xceef6090 blk_mq_rq_cpu vmlinux EXPORT_SYMBOL +0x4cedb821 dentry_open vmlinux EXPORT_SYMBOL +0xd4296f89 __SetPageMovable vmlinux EXPORT_SYMBOL +0x8deb69c7 freq_qos_add_notifier vmlinux EXPORT_SYMBOL_GPL +0xf8bf74bc mlx5_cmd_exec drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6890c58d unregister_netdevice_queue vmlinux EXPORT_SYMBOL +0xd338a01c skb_checksum_help vmlinux EXPORT_SYMBOL +0x8c89e3b8 usb_phy_roothub_power_off vmlinux EXPORT_SYMBOL_GPL +0x97bdfa60 scsi_dev_info_remove_list vmlinux EXPORT_SYMBOL +0x3773f977 regmap_write vmlinux EXPORT_SYMBOL_GPL +0xc95aceda drm_gem_shmem_vunmap vmlinux EXPORT_SYMBOL +0x0a72f765 drm_clflush_virt_range vmlinux EXPORT_SYMBOL +0xc14dc168 acpi_get_data vmlinux EXPORT_SYMBOL +0xcdc39c9e security_ismaclabel vmlinux EXPORT_SYMBOL +0x1d07e365 memdup_user_nul vmlinux EXPORT_SYMBOL +0x9545af6d tasklet_init vmlinux EXPORT_SYMBOL +0x2f809002 rpc_sleep_on_priority net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8bdec826 nf_nat_setup_info net/netfilter/nf_nat EXPORT_SYMBOL +0x42c20961 ib_dealloc_fmr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8391b169 mlx4_bf_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x8f996a30 ethtool_convert_legacy_u32_to_link_mode vmlinux EXPORT_SYMBOL +0xc59e4715 led_trigger_set_default vmlinux EXPORT_SYMBOL_GPL +0xf553318d cpuidle_pause_and_lock vmlinux EXPORT_SYMBOL_GPL +0x66d33511 thermal_zone_of_sensor_unregister vmlinux EXPORT_SYMBOL_GPL +0x1b05afee ata_sff_hsm_move vmlinux EXPORT_SYMBOL_GPL +0x91f3c354 sata_link_debounce vmlinux EXPORT_SYMBOL_GPL +0xe50604ec virtqueue_enable_cb vmlinux EXPORT_SYMBOL_GPL +0x4fbc3cde amba_bustype vmlinux EXPORT_SYMBOL_GPL +0x399f0dd1 jbd2_journal_get_undo_access vmlinux EXPORT_SYMBOL +0x8381e894 padata_unregister_cpumask_notifier vmlinux EXPORT_SYMBOL +0xabd45848 stop_machine vmlinux EXPORT_SYMBOL_GPL +0xb6261484 register_die_notifier vmlinux EXPORT_SYMBOL_GPL +0x6d603b05 nf_ct_unlink_expect_report net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe6024e59 dm_bufio_release drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xf51aa222 wimax_dev_add vmlinux EXPORT_SYMBOL_GPL +0x33b81d1e wimax_msg vmlinux EXPORT_SYMBOL_GPL +0x6162206b ip6_find_1stfragopt vmlinux EXPORT_SYMBOL +0x13f43a1d __tcf_idr_release vmlinux EXPORT_SYMBOL +0xe032bf09 sock_no_recvmsg vmlinux EXPORT_SYMBOL +0xe4db3cd1 sk_ns_capable vmlinux EXPORT_SYMBOL +0x2c057775 spi_controller_resume vmlinux EXPORT_SYMBOL_GPL +0x80a8ddae ata_do_set_mode vmlinux EXPORT_SYMBOL_GPL +0x9a37ade2 devm_regulator_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x8284ce04 devm_regulator_bulk_unregister_supply_alias vmlinux EXPORT_SYMBOL_GPL +0xa0c6befa hrtimer_cancel vmlinux EXPORT_SYMBOL_GPL +0x07dbd992 ib_set_vf_guid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4b60d426 nvme_start_freeze drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xab63baa5 unregister_inetaddr_validator_notifier vmlinux EXPORT_SYMBOL +0x22f739b7 ip_frag_next vmlinux EXPORT_SYMBOL +0x3cf2fda5 xt_unregister_matches vmlinux EXPORT_SYMBOL +0xeec47464 netlink_net_capable vmlinux EXPORT_SYMBOL +0x234cf416 devlink_fmsg_string_pair_put vmlinux EXPORT_SYMBOL_GPL +0x82c359ee mdio_driver_unregister vmlinux EXPORT_SYMBOL +0x4b51f74c ata_xfer_mode2mask vmlinux EXPORT_SYMBOL_GPL +0x4cf74e6b component_unbind_all vmlinux EXPORT_SYMBOL_GPL +0xe0875eb1 kstrtobool vmlinux EXPORT_SYMBOL +0x1bb63678 security_inode_setattr vmlinux EXPORT_SYMBOL_GPL +0xe36716b5 fuse_kill_sb_anon vmlinux EXPORT_SYMBOL_GPL +0xc4f04e92 dquot_file_open vmlinux EXPORT_SYMBOL +0x8f786bee fs_umode_to_dtype vmlinux EXPORT_SYMBOL_GPL +0x84264ced fs_umode_to_ftype vmlinux EXPORT_SYMBOL_GPL +0x605790dc fiemap_fill_next_extent vmlinux EXPORT_SYMBOL +0xca510975 trace_event_ignore_this_pid vmlinux EXPORT_SYMBOL_GPL +0x88295b96 dm_tm_unlock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x48e323be dm_bm_unlock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa1d07eb3 mlx4_uar_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb9af1d0d __xa_clear_mark vmlinux EXPORT_SYMBOL +0x017b9e65 qdisc_watchdog_cancel vmlinux EXPORT_SYMBOL +0xde8ec2bf dev_queue_xmit_accel vmlinux EXPORT_SYMBOL +0x08ecc98c devm_device_remove_groups vmlinux EXPORT_SYMBOL_GPL +0xd6cdf048 drm_gem_shmem_get_pages_sgt vmlinux EXPORT_SYMBOL_GPL +0x3a9b9942 drm_modeset_acquire_fini vmlinux EXPORT_SYMBOL +0x7a89428b t10_pi_type1_crc vmlinux EXPORT_SYMBOL +0x71ea9442 crypto_shash_digest vmlinux EXPORT_SYMBOL_GPL +0x972075fb crypto_ahash_digest vmlinux EXPORT_SYMBOL_GPL +0x3de9cae1 crypto_remove_final vmlinux EXPORT_SYMBOL_GPL +0x4f7a6f5c get_user_pages_unlocked vmlinux EXPORT_SYMBOL +0x7f4f1c0d rxrpc_kernel_begin_call net/rxrpc/rxrpc EXPORT_SYMBOL +0x8541b291 svc_authenticate net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0f19abcb mlx4_put_slave_node_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xdf0a4815 iscsi_update_cmdsn drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xcf78d314 nfs_file_operations fs/nfs/nfs EXPORT_SYMBOL_GPL +0x44daca58 rtnl_set_sk_err vmlinux EXPORT_SYMBOL +0xb209b7a3 __sock_queue_rcv_skb vmlinux EXPORT_SYMBOL +0x2f866100 kernel_sendpage_locked vmlinux EXPORT_SYMBOL +0x16ec863a usb_hcd_pci_remove vmlinux EXPORT_SYMBOL_GPL +0x9b585a91 phy_device_remove vmlinux EXPORT_SYMBOL +0x919689bb class_remove_file_ns vmlinux EXPORT_SYMBOL_GPL +0x2b7874a2 drm_atomic_helper_wait_for_vblanks vmlinux EXPORT_SYMBOL +0x5ff9eb0e lockref_mark_dead vmlinux EXPORT_SYMBOL +0xee718a14 irq_domain_free_fwnode vmlinux EXPORT_SYMBOL_GPL +0x18888d00 downgrade_write vmlinux EXPORT_SYMBOL +0x29361773 complete vmlinux EXPORT_SYMBOL +0x44cd670e kvm_clear_dirty_log_protect vmlinux EXPORT_SYMBOL_GPL +0x93ff008c LZ4_loadDictHC lib/lz4/lz4hc_compress EXPORT_SYMBOL +0xcdc6f992 gether_get_host_addr_u8 drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0xbad5c3c4 fc_fcp_destroy drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x70b19be5 st21nfca_apdu_reader_event_received drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0xf608e179 config_item_init_type_name fs/configfs/configfs EXPORT_SYMBOL +0x8a455d40 ip_check_defrag vmlinux EXPORT_SYMBOL +0xe40bb23e devlink_health_reporter_priv vmlinux EXPORT_SYMBOL_GPL +0xbc3bdc7f flow_get_u32_src vmlinux EXPORT_SYMBOL +0x0a154d9f iscsi_block_session vmlinux EXPORT_SYMBOL_GPL +0xe9e9a5e8 ata_bmdma_start vmlinux EXPORT_SYMBOL_GPL +0x426c3f5c ata_port_freeze vmlinux EXPORT_SYMBOL_GPL +0x8d83737c mipi_dsi_dcs_read vmlinux EXPORT_SYMBOL +0x617176cf rdev_get_dev vmlinux EXPORT_SYMBOL_GPL +0x3a4b3e53 iov_iter_single_seg_count vmlinux EXPORT_SYMBOL +0xd7534e80 adjust_managed_page_count vmlinux EXPORT_SYMBOL +0x9bb6fd09 vsock_connected_table net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xab04f3e8 l2tp_xmit_skb net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x15103ae4 snd_pcm_new_stream sound/core/snd-pcm EXPORT_SYMBOL +0x2dcb01c6 sync_page_io drivers/md/md-mod EXPORT_SYMBOL_GPL +0x58f677d8 mlx5_dm_sw_icm_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xef9294ff mlx5_query_port_tc_group drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5f1d2c55 hinic_alloc_db_phy_addr drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x185d926c usb_acpi_power_manageable vmlinux EXPORT_SYMBOL_GPL +0xf67cb4c9 scsi_test_unit_ready vmlinux EXPORT_SYMBOL +0xbfbfc320 sata_link_resume vmlinux EXPORT_SYMBOL_GPL +0x2cc3d7a8 serial8250_read_char vmlinux EXPORT_SYMBOL_GPL +0x8a7d1c31 high_memory vmlinux EXPORT_SYMBOL +0xc02eaf1d perf_pmu_register vmlinux EXPORT_SYMBOL_GPL +0x94fc8d93 smp_call_function_many vmlinux EXPORT_SYMBOL +0xd2b03f1f ceph_monc_blacklist_add net/ceph/libceph EXPORT_SYMBOL +0x64d8f754 xprt_reconnect_backoff net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x15ef66c3 iscsi_itt_to_task drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xb2e5bd00 st_register drivers/misc/ti-st/st_drv EXPORT_SYMBOL_GPL +0xa03055fa inet6_csk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x492d9ce8 spi_finalize_current_transfer vmlinux EXPORT_SYMBOL_GPL +0xb7bf0a49 regmap_parse_val vmlinux EXPORT_SYMBOL_GPL +0x0686439a drm_dp_mst_port_has_audio vmlinux EXPORT_SYMBOL +0x7cba6b3b clk_hw_get_rate vmlinux EXPORT_SYMBOL_GPL +0xf29403e5 acpi_install_table_handler vmlinux EXPORT_SYMBOL +0x0484c6c4 acpi_enter_sleep_state_prep vmlinux EXPORT_SYMBOL +0xf742f9c7 simple_link vmlinux EXPORT_SYMBOL +0x45a5d73e vfs_whiteout vmlinux EXPORT_SYMBOL +0x34d5af2e ceph_osdc_notify net/ceph/libceph EXPORT_SYMBOL +0xd1bdd0ed ib_create_fmr_pool drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xacfd3b10 usb_otg_descriptor_alloc drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x2b827430 hidinput_find_field vmlinux EXPORT_SYMBOL_GPL +0x8b29a80a usb_serial_suspend vmlinux EXPORT_SYMBOL +0x475c4760 scsi_remove_target vmlinux EXPORT_SYMBOL +0x2c39b27e regmap_noinc_read vmlinux EXPORT_SYMBOL_GPL +0xc73cda3c drm_vma_offset_manager_init vmlinux EXPORT_SYMBOL +0xb14ab1ef hdmi_audio_infoframe_init vmlinux EXPORT_SYMBOL +0x6ca2924c find_asymmetric_key vmlinux EXPORT_SYMBOL_GPL +0x0d47f7c4 fscrypt_zeroout_range vmlinux EXPORT_SYMBOL +0x0d49cf14 page_cache_async_readahead vmlinux EXPORT_SYMBOL_GPL +0x1953c958 mempool_create vmlinux EXPORT_SYMBOL +0x2762dd49 padata_alloc_possible vmlinux EXPORT_SYMBOL +0x57231f45 ring_buffer_record_on vmlinux EXPORT_SYMBOL_GPL +0x95a52abd dm_bm_is_read_only drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa98181d9 mlx5_set_port_pause drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x6821f5c6 crypto_transfer_skcipher_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0xd0a61e5e nfs4_init_ds_session fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x034bf75b tcf_get_next_chain vmlinux EXPORT_SYMBOL +0x9b27fbf7 of_cpu_node_to_id vmlinux EXPORT_SYMBOL +0xd7399d2a efivar_entry_iter_end vmlinux EXPORT_SYMBOL_GPL +0xfa97e360 phy_init_eee vmlinux EXPORT_SYMBOL +0x8a8ec276 ahci_platform_enable_resources vmlinux EXPORT_SYMBOL_GPL +0x2bfb9ccb drm_cvt_mode vmlinux EXPORT_SYMBOL +0x0272b4e9 clkdev_drop vmlinux EXPORT_SYMBOL +0xadb7e079 kblockd_schedule_work vmlinux EXPORT_SYMBOL +0x220e55d0 mem_section vmlinux EXPORT_SYMBOL +0x70e790fb send_sig_info vmlinux EXPORT_SYMBOL +0xedc03953 iounmap vmlinux EXPORT_SYMBOL +0xed083427 cache_seq_next_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x26dd8f2c xprt_release_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3dd8514 nf_nat_ipv4_register_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x1407abba mptscsih_get_scsi_lookup drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x27111fbb mlx4_qp_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc2c34fe4 nfs_file_mmap fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9c7fe3ac __neigh_event_send vmlinux EXPORT_SYMBOL +0xf1745041 sk_stream_wait_close vmlinux EXPORT_SYMBOL +0x8df6c972 skb_mpls_dec_ttl vmlinux EXPORT_SYMBOL_GPL +0xde4a7234 i2c_parse_fw_timings vmlinux EXPORT_SYMBOL_GPL +0x8135960b dev_pm_qos_hide_latency_tolerance vmlinux EXPORT_SYMBOL_GPL +0x15e06c85 show_class_attr_string vmlinux EXPORT_SYMBOL_GPL +0x56f650d6 drm_bridge_mode_fixup vmlinux EXPORT_SYMBOL +0x43ad1cb4 clk_hw_unregister_gate vmlinux EXPORT_SYMBOL_GPL +0x151f4898 schedule_timeout_uninterruptible vmlinux EXPORT_SYMBOL +0x4e29218b nci_prop_cmd net/nfc/nci/nci EXPORT_SYMBOL +0x91fcdabf ceph_file_layout_from_legacy net/ceph/libceph EXPORT_SYMBOL +0x38fb35d3 ceph_client_gid net/ceph/libceph EXPORT_SYMBOL +0xc425a35d snd_rawmidi_info_select sound/core/snd-rawmidi EXPORT_SYMBOL +0x05769604 snd_timer_global_register sound/core/snd-timer EXPORT_SYMBOL +0x18a23286 vhost_add_used drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x228ad5f7 hinic_get_fw_version drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x80b4b45e fc_fabric_logoff drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xebcaf169 fqdir_exit vmlinux EXPORT_SYMBOL +0xf4be3dfd ndo_dflt_fdb_add vmlinux EXPORT_SYMBOL +0x77be49bd pskb_extract vmlinux EXPORT_SYMBOL +0xa7b9c61a ir_raw_event_set_idle vmlinux EXPORT_SYMBOL_GPL +0x7886316a sas_remove_host vmlinux EXPORT_SYMBOL +0xca945b7b ata_host_register vmlinux EXPORT_SYMBOL_GPL +0x211dd666 platform_get_irq vmlinux EXPORT_SYMBOL_GPL +0x03892ffd drm_connector_unregister vmlinux EXPORT_SYMBOL +0x5eaa99da blk_mq_unquiesce_queue vmlinux EXPORT_SYMBOL_GPL +0x28d57950 crypto_register_rng vmlinux EXPORT_SYMBOL_GPL +0x86e8875f kill_anon_super vmlinux EXPORT_SYMBOL +0xb88dbfce irq_set_irqchip_state vmlinux EXPORT_SYMBOL_GPL +0x1d222ced irq_get_irqchip_state vmlinux EXPORT_SYMBOL_GPL +0xfe6e9416 ppp_output_wakeup drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x1e439a08 xfrm6_rcv vmlinux EXPORT_SYMBOL +0x8167bad2 xfrm4_rcv vmlinux EXPORT_SYMBOL +0xbe4eb6ed secure_dccpv6_sequence_number vmlinux EXPORT_SYMBOL +0x1b93bb8c ata_msleep vmlinux EXPORT_SYMBOL_GPL +0xd0db0f12 run_dax vmlinux EXPORT_SYMBOL_GPL +0x2772c48b virtqueue_add_inbuf_ctx vmlinux EXPORT_SYMBOL_GPL +0xb289c2a9 acpi_subsys_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0x0bfb6cbb pci_enable_pcie_error_reporting vmlinux EXPORT_SYMBOL_GPL +0x2efe0d36 of_pci_get_devfn vmlinux EXPORT_SYMBOL_GPL +0x12dbc8f6 percpu_ref_switch_to_atomic_sync vmlinux EXPORT_SYMBOL_GPL +0x52c13d46 _copy_from_iter_full vmlinux EXPORT_SYMBOL +0xb1134de3 request_key_rcu vmlinux EXPORT_SYMBOL +0x7c9fba96 request_key_tag vmlinux EXPORT_SYMBOL +0xc89a579b kthread_mod_delayed_work vmlinux EXPORT_SYMBOL_GPL +0x900ad949 xdr_buf_subsegment net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6a9355e9 nf_tables_bind_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xb6ec997d async_raid6_2data_recov crypto/async_tx/async_raid6_recov EXPORT_SYMBOL_GPL +0x57c89069 nfs_unlink fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc72bc70e fscache_init_cache fs/fscache/fscache EXPORT_SYMBOL +0x4dca08ee sync_file_get_fence vmlinux EXPORT_SYMBOL +0x68acd054 drm_legacy_ioremap vmlinux EXPORT_SYMBOL +0x4074065b drm_master_internal_acquire vmlinux EXPORT_SYMBOL +0x09f36207 bio_integrity_trim vmlinux EXPORT_SYMBOL +0x73a2ae25 bio_integrity_prep vmlinux EXPORT_SYMBOL +0x5ad506cc nf_nat_inet_unregister_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xd6d968c8 usb_put_function_instance drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xaf83bfcd hinic_get_interrupt_cfg drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xe434c88d st21nfca_se_init drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0xa85a3e6d xa_load vmlinux EXPORT_SYMBOL +0x1d112686 inet_dgram_connect vmlinux EXPORT_SYMBOL +0x7746bafd usb_find_alt_setting vmlinux EXPORT_SYMBOL_GPL +0x98394780 phy_connect vmlinux EXPORT_SYMBOL +0x1cc1cbdb dev_pm_disable_wake_irq vmlinux EXPORT_SYMBOL_GPL +0xdebc6613 device_connection_find_match vmlinux EXPORT_SYMBOL_GPL +0x7063c0fd driver_unregister vmlinux EXPORT_SYMBOL_GPL +0x4aa09034 ttm_bo_dma_acc_size vmlinux EXPORT_SYMBOL +0x5130b276 virtqueue_get_buf vmlinux EXPORT_SYMBOL_GPL +0x4336fcca ucs2_as_utf8 vmlinux EXPORT_SYMBOL +0x2d9cd416 key_type_encrypted vmlinux EXPORT_SYMBOL_GPL +0x463611c0 set_cached_acl vmlinux EXPORT_SYMBOL +0x751e3457 generic_pipe_buf_steal vmlinux EXPORT_SYMBOL +0x532e8237 param_ops_ulong vmlinux EXPORT_SYMBOL +0xad9262fa vsock_insert_connected net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xacdddcb8 rds_inc_init net/rds/rds EXPORT_SYMBOL_GPL +0x3b304ebb vringh_iov_push_user drivers/vhost/vringh EXPORT_SYMBOL +0x4311cd91 vringh_iov_pull_user drivers/vhost/vringh EXPORT_SYMBOL +0xd7e01597 mlx5_fpga_sbu_conn_sendmsg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x25e6b134 fcoe_ctlr_recv drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x88557131 nfs_kill_super fs/nfs/nfs EXPORT_SYMBOL_GPL +0xde924773 nfs_fill_super fs/nfs/nfs EXPORT_SYMBOL_GPL +0xfe4b8648 kobject_put vmlinux EXPORT_SYMBOL +0xac1af11a kobject_get vmlinux EXPORT_SYMBOL +0x9c09c2ef iscsi_unblock_session vmlinux EXPORT_SYMBOL_GPL +0x14acec1c ttm_dma_populate vmlinux EXPORT_SYMBOL_GPL +0x90dea622 drm_i2c_encoder_commit vmlinux EXPORT_SYMBOL +0x0e0eba61 tty_buffer_set_limit vmlinux EXPORT_SYMBOL_GPL +0x603d0d51 acpi_os_map_iomem vmlinux EXPORT_SYMBOL_GPL +0xa154d6a4 fwnode_get_named_gpiod vmlinux EXPORT_SYMBOL_GPL +0xd67732a8 relay_open vmlinux EXPORT_SYMBOL_GPL +0x412163b4 param_set_copystring vmlinux EXPORT_SYMBOL +0xaa6f987d br_vlan_enabled net/bridge/bridge EXPORT_SYMBOL_GPL +0x5edf5e78 nf_synproxy_ipv4_init net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xd2ea2134 usb_descriptor_fillbuf drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xc8ed2e83 c_can_power_down drivers/net/can/c_can/c_can EXPORT_SYMBOL_GPL +0xc9634df9 in6addr_linklocal_allrouters vmlinux EXPORT_SYMBOL +0x04c2173a reuseport_select_sock vmlinux EXPORT_SYMBOL +0x807766ea usb_scuttle_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0xc4b29d8e drm_sysfs_connector_status_event vmlinux EXPORT_SYMBOL +0x58b73bc7 match_wildcard vmlinux EXPORT_SYMBOL +0x975519c1 asymmetric_key_id_same vmlinux EXPORT_SYMBOL_GPL +0xa76d807f relay_close vmlinux EXPORT_SYMBOL_GPL +0xf0c3ab3b can_sock_destruct net/can/can EXPORT_SYMBOL +0x87ae529c nf_l4proto_log_invalid net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd991e3b9 dm_bufio_get_device_size drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x2e869911 mlx4_is_eq_vector_valid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x5e5c05f5 fc_exch_mgr_free drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x3e095c46 st_nci_hci_event_received drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0x9171bede net_ns_get_ownership vmlinux EXPORT_SYMBOL_GPL +0x696f2b63 of_changeset_init vmlinux EXPORT_SYMBOL_GPL +0xadf6d2e9 usb_reset_configuration vmlinux EXPORT_SYMBOL_GPL +0xb16900ad cmdline_parts_parse vmlinux EXPORT_SYMBOL +0xd005f277 sysfs_merge_group vmlinux EXPORT_SYMBOL_GPL +0x2bb6099e dq_data_lock vmlinux EXPORT_SYMBOL +0xc84a0a7e seq_hlist_start_rcu vmlinux EXPORT_SYMBOL +0x51c1adb5 __get_user_pages_fast vmlinux EXPORT_SYMBOL_GPL +0xfc7e2596 down_trylock vmlinux EXPORT_SYMBOL +0x73dd8e8a ceph_pg_to_acting_primary net/ceph/libceph EXPORT_SYMBOL +0x89960c37 rpc_clnt_setup_test_and_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x332b6021 ib_cm_listen drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xba2d20f9 af_alg_pull_tsgl crypto/af_alg EXPORT_SYMBOL_GPL +0x060f0f60 wimax_report_rfkill_sw vmlinux EXPORT_SYMBOL_GPL +0xcca212a8 genl_register_family vmlinux EXPORT_SYMBOL +0x4cf0f2ed of_phy_get_and_connect vmlinux EXPORT_SYMBOL +0x41377d49 phy_suspend vmlinux EXPORT_SYMBOL +0xf8fd1e68 iommu_dma_get_resv_regions vmlinux EXPORT_SYMBOL +0xf3e6402e __bitmap_equal vmlinux EXPORT_SYMBOL +0xd9c9857b page_zero_new_buffers vmlinux EXPORT_SYMBOL +0x25f3342f mntget vmlinux EXPORT_SYMBOL +0x4a7d7993 mntput vmlinux EXPORT_SYMBOL +0x28ba994d fasync_helper vmlinux EXPORT_SYMBOL +0xf3b451ca kdb_poll_funcs vmlinux EXPORT_SYMBOL_GPL +0x9b9071cb get_old_itimerspec32 vmlinux EXPORT_SYMBOL_GPL +0x7b4da6ff __init_rwsem vmlinux EXPORT_SYMBOL +0xd0e5fc93 nla_put_labels net/mpls/mpls_router EXPORT_SYMBOL_GPL +0x6842653e nft_register_flowtable_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x1ebe96b8 sbc_get_device_type drivers/target/target_core_mod EXPORT_SYMBOL +0x502108d0 mpt_fwfault_debug drivers/message/fusion/mptbase EXPORT_SYMBOL +0x810be37c ipvlan_link_new drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0x358ac9bc mlx4_fmr_enable drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb761506f hinic_get_toe_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xbd3e7542 cast_s1 crypto/cast_common EXPORT_SYMBOL_GPL +0x3a3fc46c pnfs_ld_write_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xbcf58541 nvmem_device_cell_read vmlinux EXPORT_SYMBOL_GPL +0x6ac6a42d phy_resolve_aneg_linkmode vmlinux EXPORT_SYMBOL_GPL +0xed3b6bc3 drm_mode_is_420_also vmlinux EXPORT_SYMBOL +0x6a4df47e drm_gem_unlock_reservations vmlinux EXPORT_SYMBOL +0x7016714c iommu_domain_free vmlinux EXPORT_SYMBOL_GPL +0x525870dc tpm_pcr_read vmlinux EXPORT_SYMBOL_GPL +0xfe69325f percpu_ref_resurrect vmlinux EXPORT_SYMBOL_GPL +0xe4ce275a blk_mq_flush_busy_ctxs vmlinux EXPORT_SYMBOL_GPL +0x2f4113a2 dcookie_register vmlinux EXPORT_SYMBOL_GPL +0xf58447f2 locks_mandatory_area vmlinux EXPORT_SYMBOL +0xda8956fa nft_reject_init net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x6fc65d87 capi20_get_version drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x02dfd3d0 mlxsw_afk_key_info_block_encoding_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xac642a25 mlx4_counter_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x90d5d34d iscsi_tcp_recv_skb drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0xb8588a3a s3fwrn5_remove drivers/nfc/s3fwrn5/s3fwrn5 EXPORT_SYMBOL +0xcb8c4a7c nfs_request_add_commit_list_locked fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3bf89a91 xt_table_unlock vmlinux EXPORT_SYMBOL_GPL +0x4b4d164c tcf_exts_dump_stats vmlinux EXPORT_SYMBOL +0xf7acd823 sock_alloc_send_pskb vmlinux EXPORT_SYMBOL +0x9cff037a xhci_resume vmlinux EXPORT_SYMBOL_GPL +0xda769fba sas_port_add vmlinux EXPORT_SYMBOL +0x0a2f0637 alloc_iova vmlinux EXPORT_SYMBOL_GPL +0xaad0ae78 __bitmap_shift_right vmlinux EXPORT_SYMBOL +0x4a6ec0d9 exportfs_encode_inode_fh vmlinux EXPORT_SYMBOL_GPL +0xaaa918c9 ftrace_dump vmlinux EXPORT_SYMBOL_GPL +0xed1bcb5d alarm_init vmlinux EXPORT_SYMBOL_GPL +0xa24f23d8 __request_module vmlinux EXPORT_SYMBOL +0xa3c9e92d ib_device_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x811811e0 fc_exch_done drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x12ab31f1 idr_alloc_u32 vmlinux EXPORT_SYMBOL_GPL +0x4193ea49 fib_nexthop_info vmlinux EXPORT_SYMBOL_GPL +0xf925e24e ip_valid_fib_dump_req vmlinux EXPORT_SYMBOL_GPL +0x336c6f0d inet_register_protosw vmlinux EXPORT_SYMBOL +0xf6f729ef __inet_inherit_port vmlinux EXPORT_SYMBOL_GPL +0x441bf169 dev_deactivate vmlinux EXPORT_SYMBOL +0x63e14d80 usb_ep_clear_halt vmlinux EXPORT_SYMBOL_GPL +0x18d58837 usb_asmedia_modifyflowcontrol vmlinux EXPORT_SYMBOL_GPL +0xad55d906 phy_find_first vmlinux EXPORT_SYMBOL +0x3fe375d1 ata_sff_error_handler vmlinux EXPORT_SYMBOL_GPL +0xd55da61c pm_runtime_force_suspend vmlinux EXPORT_SYMBOL_GPL +0x5e417069 blkdev_reread_part vmlinux EXPORT_SYMBOL +0x7bfdf0ff submit_bio_wait vmlinux EXPORT_SYMBOL +0x23fd3028 vmalloc_node vmlinux EXPORT_SYMBOL +0x9034a696 mempool_destroy vmlinux EXPORT_SYMBOL +0x054e550b kernel_halt vmlinux EXPORT_SYMBOL_GPL +0x37fa8c7d p9_client_begin_disconnect net/9p/9pnet EXPORT_SYMBOL +0x655ead01 xfrm6_tunnel_spi_lookup net/ipv6/xfrm6_tunnel EXPORT_SYMBOL +0xbdc5fb1e iw_destroy_cm_id drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x8192efb0 qtree_read_dquot fs/quota/quota_tree EXPORT_SYMBOL +0x019036ac scsi_ioctl vmlinux EXPORT_SYMBOL +0xebe921dc drm_fb_helper_set_suspend vmlinux EXPORT_SYMBOL +0x36075bb5 iommu_group_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x88574459 set_binfmt vmlinux EXPORT_SYMBOL +0xceab0311 strchrnul vmlinux EXPORT_SYMBOL +0xeb4c425f km_policy_expired vmlinux EXPORT_SYMBOL +0x9e4faeef dm_io_client_destroy vmlinux EXPORT_SYMBOL +0x642d8b60 i2c_client_type vmlinux EXPORT_SYMBOL_GPL +0xcf393335 usb_add_gadget_udc_release vmlinux EXPORT_SYMBOL_GPL +0x73583fcf ttm_fbdev_mmap vmlinux EXPORT_SYMBOL +0xe02abfbb drm_dp_downstream_max_bpc vmlinux EXPORT_SYMBOL +0xdde3da20 tpm_default_chip vmlinux EXPORT_SYMBOL_GPL +0x244e09b9 hvc_poll vmlinux EXPORT_SYMBOL_GPL +0xff635952 pci_user_write_config_word vmlinux EXPORT_SYMBOL_GPL +0x04557507 pci_iomap_wc vmlinux EXPORT_SYMBOL_GPL +0xf094079b elv_rb_find vmlinux EXPORT_SYMBOL +0x54e76a65 padata_register_cpumask_notifier vmlinux EXPORT_SYMBOL +0xa1141a48 mptscsih_IssueTaskMgmt drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xe46d6550 fscache_object_mark_killed fs/fscache/fscache EXPORT_SYMBOL +0xa717e238 flow_rule_match_basic vmlinux EXPORT_SYMBOL +0x8eb41573 call_fib_notifiers vmlinux EXPORT_SYMBOL +0xfbbe0f79 __drm_atomic_helper_connector_destroy_state vmlinux EXPORT_SYMBOL +0xd304b2bf devm_reset_controller_register vmlinux EXPORT_SYMBOL_GPL +0x1c322959 regulator_bulk_force_disable vmlinux EXPORT_SYMBOL_GPL +0x4f425f3e kstrdup_quotable_cmdline vmlinux EXPORT_SYMBOL_GPL +0x9829fc11 __kfifo_out_peek_r vmlinux EXPORT_SYMBOL +0x198620d7 security_add_mnt_opt vmlinux EXPORT_SYMBOL +0xd5bead23 ib_cm_insert_listen drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x833bfbae target_send_busy drivers/target/target_core_mod EXPORT_SYMBOL +0x8df7e28f ip6_frag_next vmlinux EXPORT_SYMBOL +0x0841b62b netlink_ns_capable vmlinux EXPORT_SYMBOL +0xc4212ab9 qdisc_class_hash_insert vmlinux EXPORT_SYMBOL +0xc60f0e62 eth_commit_mac_addr_change vmlinux EXPORT_SYMBOL +0xe5867808 dlci_ioctl_set vmlinux EXPORT_SYMBOL +0xd3b5a9d4 set_get_info_func vmlinux EXPORT_SYMBOL +0x990b06af pci_set_host_bridge_release vmlinux EXPORT_SYMBOL_GPL +0x7a1d6af4 acpi_gpiochip_free_interrupts vmlinux EXPORT_SYMBOL_GPL +0x3d5bb3fd refcount_dec_and_lock_irqsave vmlinux EXPORT_SYMBOL +0x26de4ef5 generic_perform_write vmlinux EXPORT_SYMBOL +0xfbe65c56 phonet_header_ops net/phonet/phonet EXPORT_SYMBOL +0xed216a35 __rpc_wait_for_completion_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3f1c1d1 rdma_init_qp_attr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xc806617f ib_redirect_mad_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x823e0696 macvlan_common_setup drivers/net/macvlan EXPORT_SYMBOL_GPL +0x1ea31bb7 hinic_func_max_qnum drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x054bef45 layoutstats_timer fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xce5bfa33 pnfs_read_resend_pnfs fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x33548855 config_item_get_unless_zero fs/configfs/configfs EXPORT_SYMBOL +0x0b22c69e opens_in_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0xcb4cb7bf udp_set_csum vmlinux EXPORT_SYMBOL +0x819a72ce scsi_print_sense vmlinux EXPORT_SYMBOL +0xef01d1d1 scsi_report_device_reset vmlinux EXPORT_SYMBOL +0x57a56afe of_drm_find_bridge vmlinux EXPORT_SYMBOL +0x4f310ce2 locks_free_lock vmlinux EXPORT_SYMBOL +0x50a2c7fa fixed_size_llseek vmlinux EXPORT_SYMBOL +0x41286299 ip_set_get_ip6_port net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xc707ffb6 ip_set_get_ip4_port net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x01eb5e15 register_sound_mixer sound/soundcore EXPORT_SYMBOL +0xeab7bf03 core_alua_check_nonop_delay drivers/target/target_core_mod EXPORT_SYMBOL +0x5ba397f0 ndlc_close drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0xf7d7541d inet6_add_offload vmlinux EXPORT_SYMBOL +0xa6841fb6 tun_ptr_to_xdp vmlinux EXPORT_SYMBOL +0x0a0a254e drm_dev_register vmlinux EXPORT_SYMBOL +0x8cb15b57 regulator_allow_bypass vmlinux EXPORT_SYMBOL_GPL +0x3537101e regulator_get_optional vmlinux EXPORT_SYMBOL_GPL +0xefd2ae5f gpiochip_set_chained_irqchip vmlinux EXPORT_SYMBOL_GPL +0x46410823 sg_miter_skip vmlinux EXPORT_SYMBOL +0x95af43c7 simple_attr_release vmlinux EXPORT_SYMBOL_GPL +0x047a3732 perf_aux_output_flag vmlinux EXPORT_SYMBOL_GPL +0x4260ea1f mptscsih_bus_reset drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x57daadab fcoe_ctlr_device_delete drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x949b614f reuseport_detach_prog vmlinux EXPORT_SYMBOL +0x4381693b led_sysfs_disable vmlinux EXPORT_SYMBOL_GPL +0x0d70b7ec usb_submit_urb vmlinux EXPORT_SYMBOL_GPL +0x4b1293f7 ata_link_abort vmlinux EXPORT_SYMBOL_GPL +0xc4c17350 fwnode_get_next_available_child_node vmlinux EXPORT_SYMBOL_GPL +0xb2ddebba cpci_hp_unregister_controller vmlinux EXPORT_SYMBOL_GPL +0x7ec78bdd rename_lock vmlinux EXPORT_SYMBOL +0x85aa3a13 page_symlink_inode_operations vmlinux EXPORT_SYMBOL +0x7f628ab6 file_path vmlinux EXPORT_SYMBOL +0x8ddb7698 dma_dummy_ops vmlinux EXPORT_SYMBOL +0xf5db326a xprt_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf56c2017 mlog_not_bits fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x1fbd16da ip_tos2prio vmlinux EXPORT_SYMBOL +0x2f5c1223 __tracepoint_neigh_cleanup_and_release vmlinux EXPORT_SYMBOL_GPL +0x27ab8e74 regmap_field_alloc vmlinux EXPORT_SYMBOL_GPL +0x4e1b30fd register_virtio_device vmlinux EXPORT_SYMBOL_GPL +0xb50f7dc8 pinctrl_register vmlinux EXPORT_SYMBOL_GPL +0x96e5d30f gen_pool_set_algo vmlinux EXPORT_SYMBOL +0x7b0192da kstrtou16 vmlinux EXPORT_SYMBOL +0x3c80c06c kstrtoull vmlinux EXPORT_SYMBOL +0xacd81eb3 jbd2_inode_cache vmlinux EXPORT_SYMBOL +0x94961283 vunmap vmlinux EXPORT_SYMBOL +0x4e6e8a72 write_cache_pages vmlinux EXPORT_SYMBOL +0x12b709a9 dma_direct_sync_sg_for_cpu vmlinux EXPORT_SYMBOL +0x346dd3fd iw_cm_disconnect drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x66555cd7 mlx5_cmd_exec_cb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2db0fd9f xfrm_policy_alloc vmlinux EXPORT_SYMBOL +0xf6afff8c led_blink_set vmlinux EXPORT_SYMBOL_GPL +0x138724e3 devm_thermal_of_cooling_device_register vmlinux EXPORT_SYMBOL_GPL +0xc950d2e4 mipi_dsi_dcs_enter_sleep_mode vmlinux EXPORT_SYMBOL +0xe5c78a99 do_blank_screen vmlinux EXPORT_SYMBOL +0xaa7c44f1 unregister_virtio_device vmlinux EXPORT_SYMBOL_GPL +0x5fdf7e2f hisi_clk_register_phase vmlinux EXPORT_SYMBOL_GPL +0x22887694 acpi_dev_pm_attach vmlinux EXPORT_SYMBOL_GPL +0xd0bd487b hdmi_drm_infoframe_pack_only vmlinux EXPORT_SYMBOL +0xa970f289 pci_check_and_unmask_intx vmlinux EXPORT_SYMBOL_GPL +0x67da9f7c sha512_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x9a8e35d4 fscrypt_get_symlink vmlinux EXPORT_SYMBOL_GPL +0x5b029105 finish_open vmlinux EXPORT_SYMBOL +0xe250ac42 kmem_cache_create_usercopy vmlinux EXPORT_SYMBOL +0x788bfcd4 line6_disconnect sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x1772b540 mtd_del_partition drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xb5c2723a bch_bset_init_next drivers/md/bcache/bcache EXPORT_SYMBOL +0xde794b91 mlx5_accel_esp_destroy_xfrm drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x4b62826c dlm_unlock fs/dlm/dlm EXPORT_SYMBOL_GPL +0x117093be qdisc_class_hash_init vmlinux EXPORT_SYMBOL +0xe9d4ede6 fwnode_property_read_string vmlinux EXPORT_SYMBOL_GPL +0x9676eead drm_dp_dual_mode_max_tmds_clock vmlinux EXPORT_SYMBOL +0xfa873ad0 prandom_seed vmlinux EXPORT_SYMBOL +0xcff88c00 fuse_dev_install vmlinux EXPORT_SYMBOL_GPL +0x79595c71 get_acl vmlinux EXPORT_SYMBOL +0xb798bbea set_page_dirty_lock vmlinux EXPORT_SYMBOL +0xd5fd02c0 rds_conn_create net/rds/rds EXPORT_SYMBOL_GPL +0x56cf5fde cache_unregister_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc95ffbd8 ib_get_device_fw_str drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xeb0b618d get_mtd_device drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x5692cc3f hinic_support_toe drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xcdc528ac fcoe_ctlr_link_down drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0xb2ead97c kimage_vaddr vmlinux EXPORT_SYMBOL +0xebf801ec tcp_v4_conn_request vmlinux EXPORT_SYMBOL +0x1e3e1097 rtc_add_group vmlinux EXPORT_SYMBOL +0x1422940e usb_get_descriptor vmlinux EXPORT_SYMBOL_GPL +0x3567a36e genphy_c45_read_link vmlinux EXPORT_SYMBOL_GPL +0x97b64a11 spi_async_locked vmlinux EXPORT_SYMBOL_GPL +0x369a2bcb __tracepoint_spi_transfer_stop vmlinux EXPORT_SYMBOL +0x16d105f2 ata_sas_port_alloc vmlinux EXPORT_SYMBOL_GPL +0x390addd9 ata_port_abort vmlinux EXPORT_SYMBOL_GPL +0x09c94fd9 acpi_dev_remove_driver_gpios vmlinux EXPORT_SYMBOL_GPL +0x35e7950c blk_rq_init vmlinux EXPORT_SYMBOL +0xca174e21 ceph_monc_get_version net/ceph/libceph EXPORT_SYMBOL +0xf75550d4 rds_conn_destroy net/rds/rds EXPORT_SYMBOL_GPL +0x86c12ebb icmp_ndo_send vmlinux EXPORT_SYMBOL +0xda0d4f80 tcp_v4_syn_recv_sock vmlinux EXPORT_SYMBOL +0x5d205914 usb_put_dev vmlinux EXPORT_SYMBOL_GPL +0x2570a138 reservation_seqcount_string vmlinux EXPORT_SYMBOL +0x1b965958 drm_panel_init vmlinux EXPORT_SYMBOL +0x26815dbc drm_dp_link_rate_to_bw_code vmlinux EXPORT_SYMBOL +0xdd742d72 __sg_free_table vmlinux EXPORT_SYMBOL +0x030ea467 cryptd_ahash_queued vmlinux EXPORT_SYMBOL_GPL +0x09bc430c kmem_cache_shrink vmlinux EXPORT_SYMBOL +0x5d49aabc init_wait_var_entry vmlinux EXPORT_SYMBOL +0x32bc0fcf preempt_notifier_dec vmlinux EXPORT_SYMBOL_GPL +0x0402cbbf preempt_notifier_inc vmlinux EXPORT_SYMBOL_GPL +0xf5b7fa8d kvm_read_guest_atomic vmlinux EXPORT_SYMBOL_GPL +0xe2aae5cc crc8 lib/crc8 EXPORT_SYMBOL +0xb5bef543 __iscsit_check_dataout_hdr drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x49045426 icmp_err_convert vmlinux EXPORT_SYMBOL +0x601f665f dm_io_client_create vmlinux EXPORT_SYMBOL +0x4842096d mdio_device_create vmlinux EXPORT_SYMBOL +0x7e6f7cc1 mii_check_link vmlinux EXPORT_SYMBOL +0x39064ff2 drm_atomic_helper_legacy_gamma_set vmlinux EXPORT_SYMBOL +0xdb82b182 clk_gpio_mux_ops vmlinux EXPORT_SYMBOL_GPL +0xd83a6302 acpi_device_fwnode_ops vmlinux EXPORT_SYMBOL_GPL +0x928443b2 acpi_subsys_complete vmlinux EXPORT_SYMBOL_GPL +0xcb733bf2 acpi_bus_set_power vmlinux EXPORT_SYMBOL +0x5ce6bd32 gpiod_get_optional vmlinux EXPORT_SYMBOL_GPL +0x28ab4fb9 pinctrl_gpio_free vmlinux EXPORT_SYMBOL_GPL +0x1072a394 csum_partial_copy_from_user vmlinux EXPORT_SYMBOL +0xa72957cc __dynamic_pr_debug vmlinux EXPORT_SYMBOL +0xb7f990e9 rht_bucket_nested vmlinux EXPORT_SYMBOL_GPL +0x597e4f49 blkcg_policy_register vmlinux EXPORT_SYMBOL_GPL +0x1e5b03dc pm_qos_add_notifier vmlinux EXPORT_SYMBOL_GPL +0xf3bbd580 inet_diag_dump_one_icsk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x5f8246a7 rdma_query_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x78e75182 vfio_device_get_from_dev drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x89724b81 mtd_is_locked drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x1a9fd0c9 mlx4_qp_release_range drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xbcba5d69 nlmclnt_init fs/lockd/lockd EXPORT_SYMBOL_GPL +0xe5a7264c __tracepoint_nfs4_pnfs_commit_ds fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xbfaa476f tcp_hashinfo vmlinux EXPORT_SYMBOL +0x2d140a58 genl_unlock vmlinux EXPORT_SYMBOL +0x9e90a4ac devlink_params_publish vmlinux EXPORT_SYMBOL_GPL +0x948595ec of_changeset_action vmlinux EXPORT_SYMBOL_GPL +0xa3be88ec of_n_size_cells vmlinux EXPORT_SYMBOL +0x1a24a918 hid_report_raw_event vmlinux EXPORT_SYMBOL_GPL +0x9d3c6860 dma_buf_begin_cpu_access vmlinux EXPORT_SYMBOL_GPL +0x9fea720b blk_mq_requeue_request vmlinux EXPORT_SYMBOL +0x852b91a3 crypto_aead_setauthsize vmlinux EXPORT_SYMBOL_GPL +0x38746b25 fd_install vmlinux EXPORT_SYMBOL +0x8b1843ac nf_nat_packet net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xe56d463b parport_release drivers/parport/parport EXPORT_SYMBOL +0xebcc64a4 dm_bufio_get_block_data drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x7c818e0f compat_ipv6_getsockopt vmlinux EXPORT_SYMBOL +0xf19f5064 compat_ipv6_setsockopt vmlinux EXPORT_SYMBOL +0x0b2e016c __phy_resume vmlinux EXPORT_SYMBOL +0x69a0ff8d bio_devname vmlinux EXPORT_SYMBOL +0xeff9bb6d direct_make_request vmlinux EXPORT_SYMBOL_GPL +0x5c8caa0c vfs_fsync_range vmlinux EXPORT_SYMBOL +0xc88087e1 memory_cgrp_subsys vmlinux EXPORT_SYMBOL +0x3ce4ca6f disable_irq vmlinux EXPORT_SYMBOL +0x55eecff4 bit_wait_io_timeout vmlinux EXPORT_SYMBOL_GPL +0xdc9fa232 raw_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0xd7a16516 vhost_add_used_n drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3a20a9d7 transport_set_vpd_ident_type drivers/target/target_core_mod EXPORT_SYMBOL +0x875dd680 cdrom_get_media_event drivers/cdrom/cdrom EXPORT_SYMBOL +0x116dc0ae mlx5_core_dct_query drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x2dec793e mlx4_free_cmd_mailbox drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0102f950 hinic_msg_to_mgmt_sync drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x9af151ba iscsi_tcp_hdr_recv_prep drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x33fe7c6e fcoe_fcf_device_delete drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x405b6e05 srp_parse_tmo drivers/scsi/scsi_transport_srp EXPORT_SYMBOL +0xf2d74d83 af_alg_async_cb crypto/af_alg EXPORT_SYMBOL_GPL +0x5ef1870c poly1305_core_emit crypto/poly1305_generic EXPORT_SYMBOL_GPL +0x206c38a9 nfs_create fs/nfs/nfs EXPORT_SYMBOL_GPL +0xfbb8a761 strscpy_pad vmlinux EXPORT_SYMBOL +0x16da1f88 devlink_fmsg_u32_put vmlinux EXPORT_SYMBOL_GPL +0x404cd4e1 dm_set_target_max_io_len vmlinux EXPORT_SYMBOL_GPL +0x660965d1 hisi_sas_phy_oob_ready vmlinux EXPORT_SYMBOL_GPL +0xb91e2fe8 scsi_host_alloc vmlinux EXPORT_SYMBOL +0xdc380b9c bus_get_device_klist vmlinux EXPORT_SYMBOL_GPL +0xe61d5fec fscrypt_drop_inode vmlinux EXPORT_SYMBOL_GPL +0x40be5db5 path_put vmlinux EXPORT_SYMBOL +0x56697b05 apply_to_page_range vmlinux EXPORT_SYMBOL_GPL +0x1984d421 out_of_line_wait_on_bit vmlinux EXPORT_SYMBOL +0x261adf16 tcp_vegas_state net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x03a7544b nft_register_expr net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x2899d726 mlx5_core_modify_hca_vport_context drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7b7ed234 fc_remote_port_delete drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xf8f23ccb xfrm6_protocol_deregister vmlinux EXPORT_SYMBOL +0xdc1e95de xfrm4_protocol_deregister vmlinux EXPORT_SYMBOL +0x09986ac5 led_set_brightness_sync vmlinux EXPORT_SYMBOL_GPL +0x001fb82d gen10g_config_aneg vmlinux EXPORT_SYMBOL_GPL +0x4828d4f5 drm_irq_uninstall vmlinux EXPORT_SYMBOL +0xc3b9c9a4 pci_request_irq vmlinux EXPORT_SYMBOL +0x73c2554f __iowrite64_copy vmlinux EXPORT_SYMBOL_GPL +0x28daca8b invalidate_partition vmlinux EXPORT_SYMBOL +0x01aac0ae handle_mm_fault vmlinux EXPORT_SYMBOL_GPL +0x13b65f27 probe_user_read vmlinux EXPORT_SYMBOL_GPL +0xd217e9e6 trace_set_clr_event vmlinux EXPORT_SYMBOL_GPL +0x3c9d114c blk_add_driver_data vmlinux EXPORT_SYMBOL_GPL +0xf6449ec8 kmsg_dump_rewind vmlinux EXPORT_SYMBOL_GPL +0xbdc33f2f kick_process vmlinux EXPORT_SYMBOL_GPL +0x2d47ac83 lc_committed lib/lru_cache EXPORT_SYMBOL +0xa698f998 ceph_free_lockers net/ceph/libceph EXPORT_SYMBOL +0xea58f1be __l2tp_session_unhash net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x47077bae usb_function_register drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x3686ea09 spi_print_msg drivers/scsi/scsi_transport_spi EXPORT_SYMBOL +0x5506b8f7 netlbl_bitmap_setbit vmlinux EXPORT_SYMBOL +0x7d4b176a netlbl_catmap_setbit vmlinux EXPORT_SYMBOL +0x85c54b61 efivar_validate vmlinux EXPORT_SYMBOL_GPL +0x50389929 dm_remap_zone_report vmlinux EXPORT_SYMBOL_GPL +0x3fa970d0 __devm_spi_alloc_controller vmlinux EXPORT_SYMBOL_GPL +0xbfbc5434 pciserial_resume_ports vmlinux EXPORT_SYMBOL_GPL +0xe84f6e5c pciserial_remove_ports vmlinux EXPORT_SYMBOL_GPL +0xd8026f68 is_acpi_device_node vmlinux EXPORT_SYMBOL +0x0239aa9c sg_miter_stop vmlinux EXPORT_SYMBOL +0x0da10ec3 security_sock_graft vmlinux EXPORT_SYMBOL +0x2ee0bcd3 key_reject_and_link vmlinux EXPORT_SYMBOL +0x3f9d50de jbd2_journal_init_jbd_inode vmlinux EXPORT_SYMBOL +0x5f5bad65 hinic_set_fcoe_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xdad35cb4 fscache_put_operation fs/fscache/fscache EXPORT_SYMBOL +0xdd4c814b xt_proto_init vmlinux EXPORT_SYMBOL_GPL +0x91c403bf devlink_port_unregister vmlinux EXPORT_SYMBOL_GPL +0x34a4640a drm_dbg vmlinux EXPORT_SYMBOL +0x59923626 __sg_alloc_table_from_pages vmlinux EXPORT_SYMBOL +0xf2215f74 blk_finish_plug vmlinux EXPORT_SYMBOL +0x0b1beb31 vmalloc_32_user vmlinux EXPORT_SYMBOL +0x03bc993e ipmi_set_my_LUN drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xc89813a1 ping_recvmsg vmlinux EXPORT_SYMBOL_GPL +0xfab30dc0 mdio_bus_exit vmlinux EXPORT_SYMBOL_GPL +0x961fb0e3 ahci_platform_disable_phys vmlinux EXPORT_SYMBOL_GPL +0x4d18f256 sata_pmp_qc_defer_cmd_switch vmlinux EXPORT_SYMBOL_GPL +0x1322adff ata_sff_port_intr vmlinux EXPORT_SYMBOL_GPL +0xb4c33f92 ata_scsi_change_queue_depth vmlinux EXPORT_SYMBOL_GPL +0x26bbbceb pm_clk_add_clk vmlinux EXPORT_SYMBOL_GPL +0xa90086c4 drm_master_put vmlinux EXPORT_SYMBOL +0x9ae3be5d pci_get_subsys vmlinux EXPORT_SYMBOL +0x220f6228 rcutorture_get_gp_data vmlinux EXPORT_SYMBOL_GPL +0xd5824c65 nfc_hci_target_discovered net/nfc/hci/hci EXPORT_SYMBOL +0x657533eb target_submit_tmr drivers/target/target_core_mod EXPORT_SYMBOL +0x16180de5 gether_get_host_addr_cdc drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x970c2bfb mlxsw_core_trap_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x3d9ee9f0 clear_page vmlinux EXPORT_SYMBOL +0x81188c30 match_string vmlinux EXPORT_SYMBOL +0xffb6cc58 ip_local_out vmlinux EXPORT_SYMBOL_GPL +0x3ef4f570 xt_compat_match_from_user vmlinux EXPORT_SYMBOL_GPL +0xa63706fa ehci_suspend vmlinux EXPORT_SYMBOL_GPL +0x1c5b9579 usb_hcd_check_unlink_urb vmlinux EXPORT_SYMBOL_GPL +0x987520e2 usb_find_common_endpoints_reverse vmlinux EXPORT_SYMBOL_GPL +0x68ac8802 phy_device_free vmlinux EXPORT_SYMBOL +0x8c3b9688 regcache_cache_bypass vmlinux EXPORT_SYMBOL_GPL +0x51f09aeb fwnode_graph_get_remote_endpoint vmlinux EXPORT_SYMBOL_GPL +0x0ce19729 mb_cache_entry_touch vmlinux EXPORT_SYMBOL +0x16f5cbdc generic_permission vmlinux EXPORT_SYMBOL +0x64faf25c vprintk_default vmlinux EXPORT_SYMBOL_GPL +0x63c8129d nmi_panic vmlinux EXPORT_SYMBOL +0x40b9855c mlxsw_core_trap_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xfb11d469 mlx5_query_nic_vport_node_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x0532dbf5 mroute6_is_socket vmlinux EXPORT_SYMBOL +0x85540ebc nvmem_cell_put vmlinux EXPORT_SYMBOL_GPL +0x165ddcce pci_test_config_bits vmlinux EXPORT_SYMBOL_GPL +0x6615069e drm_dp_dsc_sink_max_slice_count vmlinux EXPORT_SYMBOL +0x6f887704 pci_msi_mask_irq vmlinux EXPORT_SYMBOL_GPL +0x06197a15 fuse_dev_fiq_ops vmlinux EXPORT_SYMBOL_GPL +0xbb4c7570 pids_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xdd93b427 udp_tunnel_notify_del_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x4a0716d2 snd_ctl_notify sound/core/snd EXPORT_SYMBOL +0xd4f81b51 ibdev_warn drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf521e0c3 qlt_free_cmd drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0xe39a6f0b fc_get_host_stats drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x22f7c1f3 xt_compat_target_from_user vmlinux EXPORT_SYMBOL_GPL +0x0ffaa944 usb_ep_queue vmlinux EXPORT_SYMBOL_GPL +0xf24b00e5 mdio_device_register vmlinux EXPORT_SYMBOL +0x7a6f040f regmap_async_complete vmlinux EXPORT_SYMBOL_GPL +0xd7e8b31d uart_get_baud_rate vmlinux EXPORT_SYMBOL +0x24f39c39 reset_control_reset vmlinux EXPORT_SYMBOL_GPL +0x4100a662 clk_get_scaled_duty_cycle vmlinux EXPORT_SYMBOL_GPL +0x30c992c2 pci_find_next_bus vmlinux EXPORT_SYMBOL +0x57900416 gen_pool_fixed_alloc vmlinux EXPORT_SYMBOL +0x7181db30 atomic_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0x50b36f82 snd_pcm_hw_constraint_step sound/core/snd-pcm EXPORT_SYMBOL +0x4f477261 dm_bm_checksum drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x83131a43 unregister_cc770dev drivers/net/can/cc770/cc770 EXPORT_SYMBOL_GPL +0x3c221a29 mlx4_get_admin_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc74e053c scsi_vpd_tpg_id vmlinux EXPORT_SYMBOL +0x6b73e2da ahci_platform_suspend vmlinux EXPORT_SYMBOL_GPL +0xf576b6ed drm_framebuffer_init vmlinux EXPORT_SYMBOL +0x92295424 clk_register_gate vmlinux EXPORT_SYMBOL_GPL +0x97a57333 crc_t10dif_update vmlinux EXPORT_SYMBOL +0x15314771 trace_call_bpf vmlinux EXPORT_SYMBOL_GPL +0xc1704284 kgdb_register_io_module vmlinux EXPORT_SYMBOL_GPL +0x0fff5afc time64_to_tm vmlinux EXPORT_SYMBOL +0xbe8d5de2 set_user_nice vmlinux EXPORT_SYMBOL +0xd8325946 pcibus_to_node vmlinux EXPORT_SYMBOL +0x957b0113 gss_pseudoflavor_to_service net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL +0x925a1963 esp6_output_tail net/ipv6/esp6 EXPORT_SYMBOL_GPL +0x8d864069 snd_pcm_rate_range_to_bits sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x224a352e ib_modify_port drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd28256cf mlxsw_afa_block_append_allocated_counter drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x3e0a4d63 hinic_get_func_mode drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xc0667d11 netdev_notify_peers vmlinux EXPORT_SYMBOL +0x233e83f4 iscsi_create_flashnode_conn vmlinux EXPORT_SYMBOL_GPL +0x8cbc73a3 drm_atomic_set_crtc_for_plane vmlinux EXPORT_SYMBOL +0xb5607d33 splice_direct_to_actor vmlinux EXPORT_SYMBOL +0x2c338c0d rpc_release_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7991ee42 br_fdb_find_port net/bridge/bridge EXPORT_SYMBOL_GPL +0x6c95e586 nf_nat_pptp_hook_inbound net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0x2b9da7a4 genl_lock vmlinux EXPORT_SYMBOL +0x904db4bb qdisc_class_hash_grow vmlinux EXPORT_SYMBOL +0x1d686167 __dev_kfree_skb_any vmlinux EXPORT_SYMBOL +0x66f702d1 __dev_kfree_skb_irq vmlinux EXPORT_SYMBOL +0xc2fff2cf sock_setsockopt vmlinux EXPORT_SYMBOL +0xad0690ef usb_unlocked_enable_lpm vmlinux EXPORT_SYMBOL_GPL +0x40a07af5 drm_dp_mst_deallocate_vcpi vmlinux EXPORT_SYMBOL +0xd1bfaecb tty_port_close_start vmlinux EXPORT_SYMBOL +0x60998b5a dma_find_channel vmlinux EXPORT_SYMBOL +0x96f4d748 acpi_subsys_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0xef4f0560 fb_find_mode vmlinux EXPORT_SYMBOL +0x7e2f804a unlock_buffer vmlinux EXPORT_SYMBOL +0xd817d9c8 simple_transaction_release vmlinux EXPORT_SYMBOL +0x25345913 vhost_dev_has_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x6c20d72e ib_alloc_mr_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x142072c0 transport_set_vpd_assoc drivers/target/target_core_mod EXPORT_SYMBOL +0xe29304e6 iavf_register_client drivers/net/ethernet/intel/iavf/iavf EXPORT_SYMBOL +0x6ee6b976 crypto_xchacha_crypt crypto/chacha_generic EXPORT_SYMBOL_GPL +0xa8613464 nfs_commit_free fs/nfs/nfs EXPORT_SYMBOL_GPL +0xceccd9fe nfs_try_mount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x40f8bd4e klist_add_before vmlinux EXPORT_SYMBOL_GPL +0xa61ced89 qdisc_put_rtab vmlinux EXPORT_SYMBOL +0xa172a570 __phy_modify_changed vmlinux EXPORT_SYMBOL_GPL +0xe22fcb8a sas_queuecommand vmlinux EXPORT_SYMBOL_GPL +0x3efd1889 dax_direct_access vmlinux EXPORT_SYMBOL_GPL +0x09eb109a subsys_find_device_by_id vmlinux EXPORT_SYMBOL_GPL +0x37b53d41 vga_put vmlinux EXPORT_SYMBOL +0x09769037 dmt_modes vmlinux EXPORT_SYMBOL +0x8ec0e873 crypto_register_rngs vmlinux EXPORT_SYMBOL_GPL +0xc34046dd crypto_attr_alg2 vmlinux EXPORT_SYMBOL_GPL +0xdb3b9bb6 key_invalidate vmlinux EXPORT_SYMBOL +0xa1839690 __tracepoint_kmem_cache_alloc vmlinux EXPORT_SYMBOL +0x68a94ab0 freq_qos_update_request vmlinux EXPORT_SYMBOL_GPL +0x3928efe9 __per_cpu_offset vmlinux EXPORT_SYMBOL +0x220478f2 set_sig_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x1762eee4 proto_register vmlinux EXPORT_SYMBOL +0xfbb5dcf8 spi_replace_transfers vmlinux EXPORT_SYMBOL_GPL +0xa1dbf09b to_software_node vmlinux EXPORT_SYMBOL_GPL +0xe5dc1a2a _dev_crit vmlinux EXPORT_SYMBOL +0x7429b156 drm_crtc_helper_set_mode vmlinux EXPORT_SYMBOL +0x95ef63d0 tty_wakeup vmlinux EXPORT_SYMBOL_GPL +0x96d95059 amba_ahb_device_add vmlinux EXPORT_SYMBOL_GPL +0x7d0ba682 gen_pool_virt_to_phys vmlinux EXPORT_SYMBOL +0x51d12d4e acpi_pci_disabled vmlinux EXPORT_SYMBOL +0x29de71fb nf_reject_ip_tcphdr_get net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x5ba42bff tcf_block_put vmlinux EXPORT_SYMBOL +0x8b2a4c12 devlink_dpipe_entry_ctx_append vmlinux EXPORT_SYMBOL_GPL +0xc176e980 usb_acpi_set_power_state vmlinux EXPORT_SYMBOL_GPL +0xab5ae163 genphy_write_mmd_unsupported vmlinux EXPORT_SYMBOL +0x20ae78c9 drm_plane_cleanup vmlinux EXPORT_SYMBOL +0x0fb2f8a4 mktime64 vmlinux EXPORT_SYMBOL +0x668b19a1 down_read vmlinux EXPORT_SYMBOL +0x609280f1 nfs_alloc_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0xbe47515c km_report vmlinux EXPORT_SYMBOL +0x2be28d8a skb_copy_expand vmlinux EXPORT_SYMBOL +0x09b0d427 of_nvmem_device_get vmlinux EXPORT_SYMBOL_GPL +0x70b1bb99 sas_phy_reset vmlinux EXPORT_SYMBOL_GPL +0x197e26b5 device_property_read_string_array vmlinux EXPORT_SYMBOL_GPL +0x4f82311f transport_setup_device vmlinux EXPORT_SYMBOL_GPL +0x950ee7d1 fb_find_logo vmlinux EXPORT_SYMBOL_GPL +0xd6daa834 pci_hp_destroy vmlinux EXPORT_SYMBOL_GPL +0x33b63487 pci_release_resource vmlinux EXPORT_SYMBOL +0xc1d8cfaf __fdget vmlinux EXPORT_SYMBOL +0x9edf11d4 d_lookup vmlinux EXPORT_SYMBOL +0xa6491eb7 dmam_free_coherent vmlinux EXPORT_SYMBOL +0xca15413f ZSTD_resetDStream lib/zstd/zstd_decompress EXPORT_SYMBOL +0xe34580d8 nf_tables_destroy_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x22ff0236 ubi_leb_change drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0xf334bc8b nfs_create_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0x614150ff __tracepoint_devlink_hwerr vmlinux EXPORT_SYMBOL_GPL +0xb84eea4a phy_10gbit_fec_features vmlinux EXPORT_SYMBOL_GPL +0xc9672b0b phy_write_paged vmlinux EXPORT_SYMBOL +0x538d073d phy_duplex_to_str vmlinux EXPORT_SYMBOL_GPL +0xf811e69d scsi_eh_flush_done_q vmlinux EXPORT_SYMBOL +0x5063c5ae drm_atomic_bridge_pre_enable vmlinux EXPORT_SYMBOL +0xab47fdea virtio_check_driver_offered_feature vmlinux EXPORT_SYMBOL_GPL +0xdcc6c2dd clk_hw_register_fixed_rate_with_accuracy vmlinux EXPORT_SYMBOL_GPL +0x834b44bf pci_iomap vmlinux EXPORT_SYMBOL +0xd45cc6ca bin2hex vmlinux EXPORT_SYMBOL +0x13c4bda6 path_is_under vmlinux EXPORT_SYMBOL +0x3dc59255 vsock_core_get_transport net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xa76126fd transport_wait_for_tasks drivers/target/target_core_mod EXPORT_SYMBOL +0x16081ffb can_dlc2len drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x584b8482 nfs_inc_attr_generation_counter fs/nfs/nfs EXPORT_SYMBOL_GPL +0x91a8b052 unix_attach_fds vmlinux EXPORT_SYMBOL +0xd7a37eea skb_morph vmlinux EXPORT_SYMBOL_GPL +0x7c46233a cpufreq_quick_get vmlinux EXPORT_SYMBOL +0xceb5c427 serio_unregister_port vmlinux EXPORT_SYMBOL +0x9c8d1457 drm_mode_equal vmlinux EXPORT_SYMBOL +0xe5c60bd2 percpu_counter_set vmlinux EXPORT_SYMBOL +0xfe6e4041 rht_bucket_nested_insert vmlinux EXPORT_SYMBOL_GPL +0xb0747ed2 rcu_cpu_stall_suppress vmlinux EXPORT_SYMBOL_GPL +0xab8bc1a2 snd_pcm_rate_mask_intersect sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x68a24153 snd_pcm_format_physical_width sound/core/snd-pcm EXPORT_SYMBOL +0xb5493df1 transport_generic_handle_tmr drivers/target/target_core_mod EXPORT_SYMBOL +0xdc3f2860 fc_exch_mgr_reset drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xbbf5e5c4 pnfs_ld_read_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x6185b747 radix_tree_gang_lookup_tag vmlinux EXPORT_SYMBOL +0xe6f52443 klist_add_head vmlinux EXPORT_SYMBOL_GPL +0xff53bd7a hwmon_device_register_with_info vmlinux EXPORT_SYMBOL_GPL +0x85af8c17 tun_get_tx_ring vmlinux EXPORT_SYMBOL_GPL +0xc00fc70f scsi_remove_host vmlinux EXPORT_SYMBOL +0x0eaa9152 drm_atomic_get_connector_state vmlinux EXPORT_SYMBOL +0xc26a64ff of_clk_get vmlinux EXPORT_SYMBOL +0x27e0de54 pci_request_selected_regions_exclusive vmlinux EXPORT_SYMBOL +0xb05d4552 acpi_dev_add_driver_gpios vmlinux EXPORT_SYMBOL_GPL +0x76d698ac pinctrl_register_and_init vmlinux EXPORT_SYMBOL_GPL +0x69e683de uuid_gen vmlinux EXPORT_SYMBOL_GPL +0x8d293b39 gfn_to_pfn vmlinux EXPORT_SYMBOL_GPL +0x68a41696 gfn_to_hva vmlinux EXPORT_SYMBOL_GPL +0xc38f87c1 flush_dcache_page vmlinux EXPORT_SYMBOL +0x87bee547 btracker_queue drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x01d2f9ac dm_rh_recovery_start drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x2be8d135 mlx4_get_slave_from_roce_gid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xd374eb82 af_alg_release_parent crypto/af_alg EXPORT_SYMBOL_GPL +0xfb19838a scsi_is_host_device vmlinux EXPORT_SYMBOL +0x04dc488a ata_sas_tport_add vmlinux EXPORT_SYMBOL_GPL +0x719e17ff clk_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0xa23f4b1e gpiochip_irqchip_irq_valid vmlinux EXPORT_SYMBOL_GPL +0xbe0036bb devm_ioremap_nocache vmlinux EXPORT_SYMBOL +0x639d52c5 sync_dirty_buffer vmlinux EXPORT_SYMBOL +0x5106aca8 ebt_register_table net/bridge/netfilter/ebtables EXPORT_SYMBOL +0xcfa6c9a9 vhost_add_used_and_signal_n drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xc042ba8b ib_fmr_pool_map_phys drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x19acd14e __tracepoint_bcache_journal_entry_full drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x414436c5 hinic_ceq_num drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x82c3031e tcp_reno_ssthresh vmlinux EXPORT_SYMBOL_GPL +0xca0e2f15 netlink_has_listeners vmlinux EXPORT_SYMBOL_GPL +0xc025016c flow_keys_dissector vmlinux EXPORT_SYMBOL +0xcbfde1a8 of_irq_to_resource_table vmlinux EXPORT_SYMBOL_GPL +0xcedba996 of_detach_node vmlinux EXPORT_SYMBOL_GPL +0xf1421d13 drm_mode_sort vmlinux EXPORT_SYMBOL +0xc16be39d iter_div_u64_rem vmlinux EXPORT_SYMBOL +0x535c7ca6 dquot_get_next_id vmlinux EXPORT_SYMBOL +0xdedd50bb dma_supported vmlinux EXPORT_SYMBOL +0xaf20b267 atm_init_aal5 net/atm/atm EXPORT_SYMBOL +0xf96eb6bd rdma_resolve_route drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x77d3a3b8 sbc_parse_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0x17f341a0 i8042_lock_chip drivers/input/serio/i8042 EXPORT_SYMBOL +0x5b85321f mlx5_comp_vectors_count drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x66a585cc tcf_exts_num_actions vmlinux EXPORT_SYMBOL +0x64c4ad82 flow_indr_block_call vmlinux EXPORT_SYMBOL_GPL +0x977c24b9 usb_alloc_urb vmlinux EXPORT_SYMBOL_GPL +0x6185dbda phy_driver_is_genphy vmlinux EXPORT_SYMBOL_GPL +0x09019059 spi_setup vmlinux EXPORT_SYMBOL_GPL +0x74b968d1 add_disk_randomness vmlinux EXPORT_SYMBOL_GPL +0x55177fee serial8250_get_port vmlinux EXPORT_SYMBOL_GPL +0x4f2593f0 btree_update vmlinux EXPORT_SYMBOL_GPL +0x67adbb2b inode_init_once vmlinux EXPORT_SYMBOL +0x3bad6d56 uprobe_register vmlinux EXPORT_SYMBOL_GPL +0x369fcd70 tracing_snapshot vmlinux EXPORT_SYMBOL_GPL +0x73d69364 ring_buffer_change_overwrite vmlinux EXPORT_SYMBOL_GPL +0xbe687e88 wake_up_all_idle_cpus vmlinux EXPORT_SYMBOL_GPL +0x05900f85 dma_cache_sync vmlinux EXPORT_SYMBOL +0x426309be __srcu_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0xe4b051cf raid6_datap_recov lib/raid6/raid6_pq EXPORT_SYMBOL_GPL +0xc8aafd39 rdma_reject_msg drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x434d8ccd transport_backend_register drivers/target/target_core_mod EXPORT_SYMBOL +0x2f1dfbef md_start drivers/md/md-mod EXPORT_SYMBOL_GPL +0x4f146550 nf_ip6_checksum vmlinux EXPORT_SYMBOL +0x0b86a51d get_net_ns vmlinux EXPORT_SYMBOL_GPL +0x598b4e39 thermal_add_hwmon_sysfs vmlinux EXPORT_SYMBOL_GPL +0xa478f791 device_move vmlinux EXPORT_SYMBOL_GPL +0xd9148fa6 ttm_pool_unpopulate vmlinux EXPORT_SYMBOL +0x6257dda7 clk_rate_exclusive_get vmlinux EXPORT_SYMBOL_GPL +0xdc6699cb acpi_dev_free_resource_list vmlinux EXPORT_SYMBOL_GPL +0x47143a3d gpiod_cansleep vmlinux EXPORT_SYMBOL_GPL +0xc2615ec2 scsi_cmd_ioctl vmlinux EXPORT_SYMBOL +0xd178e211 mtd_block_isbad drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x2ac29773 alloc_can_err_skb drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xa6303da8 tcp_md5_do_del vmlinux EXPORT_SYMBOL +0x4c3edf4a neigh_seq_stop vmlinux EXPORT_SYMBOL +0x1635528b usb_stor_clear_halt vmlinux EXPORT_SYMBOL_GPL +0x40e0ac49 mipi_dsi_detach vmlinux EXPORT_SYMBOL +0x4aaa57dc drm_of_component_match_add vmlinux EXPORT_SYMBOL_GPL +0xfc05f052 pci_user_write_config_dword vmlinux EXPORT_SYMBOL_GPL +0xf17b19dc current_in_userns vmlinux EXPORT_SYMBOL +0x66decfd5 ns_to_timespec vmlinux EXPORT_SYMBOL +0xf92f0f4c p9_tag_lookup net/9p/9pnet EXPORT_SYMBOL +0xb51a3724 rpc_pipefs_notifier_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xaeb7451e ax25_defaddr net/ax25/ax25 EXPORT_SYMBOL_GPL +0xad265d98 mtd_read_user_prot_reg drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x46c38a2e mtd_get_user_prot_info drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x691bdbeb target_put_sess_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x73fc1a10 target_get_sess_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x06d94b0a __tracepoint_bcache_gc_start drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x489eda10 memset32 vmlinux EXPORT_SYMBOL +0x3e0742ee ip6_fraglist_prepare vmlinux EXPORT_SYMBOL +0xdaee9906 flow_rule_match_enc_control vmlinux EXPORT_SYMBOL +0x364aa717 dev_mc_del_global vmlinux EXPORT_SYMBOL +0x1bff6a3f dev_mc_add_global vmlinux EXPORT_SYMBOL +0xd9e92d4e drm_framebuffer_cleanup vmlinux EXPORT_SYMBOL +0xeafe07b8 clk_bulk_prepare vmlinux EXPORT_SYMBOL_GPL +0x306f5b7f pnp_device_attach vmlinux EXPORT_SYMBOL +0x90e5701e acpi_dev_suspend vmlinux EXPORT_SYMBOL_GPL +0x33d04508 fb_prepare_logo vmlinux EXPORT_SYMBOL +0x4ece3615 blocking_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0xaf21632f notifier_err_inject_init lib/notifier-error-inject EXPORT_SYMBOL_GPL +0xba55d23e crc7_be lib/crc7 EXPORT_SYMBOL +0x99d81171 hinic_api_csr_wr32 drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x2a7f16d2 hinic_set_ci_table drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x4dbd31a8 fcoe_ctlr_device_add drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0xc0025176 libfc_vport_create drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x460a9857 fifo_create_dflt vmlinux EXPORT_SYMBOL +0x6ffa38c1 devlink_flash_update_end_notify vmlinux EXPORT_SYMBOL_GPL +0xee3dc9e1 neigh_table_init vmlinux EXPORT_SYMBOL +0x0a936085 cpuidle_register_device vmlinux EXPORT_SYMBOL_GPL +0x59b2adbf input_ff_effect_from_user vmlinux EXPORT_SYMBOL_GPL +0x465a3565 attribute_container_register vmlinux EXPORT_SYMBOL_GPL +0x4ea9c122 blk_queue_segment_boundary vmlinux EXPORT_SYMBOL +0xcb421170 acomp_request_alloc vmlinux EXPORT_SYMBOL_GPL +0x1a44758e blkcipher_walk_virt_block vmlinux EXPORT_SYMBOL_GPL +0x88e52c64 sysfs_create_link vmlinux EXPORT_SYMBOL_GPL +0x55ed1d9b task_active_pid_ns vmlinux EXPORT_SYMBOL_GPL +0xb502ea05 lc_seq_dump_details lib/lru_cache EXPORT_SYMBOL +0x66d0fc7b svc_set_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x80aa4656 ipmi_free_recv_msg drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xbfa89921 tcp_fastopen_defer_connect vmlinux EXPORT_SYMBOL +0xc8e7622c inetpeer_invalidate_tree vmlinux EXPORT_SYMBOL +0xdb1695a3 __neigh_create vmlinux EXPORT_SYMBOL +0x730af10a cpufreq_freq_transition_end vmlinux EXPORT_SYMBOL_GPL +0xccf9b37c power_supply_powers vmlinux EXPORT_SYMBOL_GPL +0xe126553f __tracepoint_unmap vmlinux EXPORT_SYMBOL_GPL +0x809712ff hdmi_avi_infoframe_pack vmlinux EXPORT_SYMBOL +0x676bd4ca pci_add_dynid vmlinux EXPORT_SYMBOL_GPL +0xa38fbe80 gpiod_set_debounce vmlinux EXPORT_SYMBOL_GPL +0x4e6e61f1 trace_array_destroy vmlinux EXPORT_SYMBOL_GPL +0x6d294e43 clock_t_to_jiffies vmlinux EXPORT_SYMBOL +0x9a14a594 capable_wrt_inode_uidgid vmlinux EXPORT_SYMBOL +0xa7b4cb0d kvm_vcpu_block vmlinux EXPORT_SYMBOL_GPL +0xf8bf8e22 ZSTD_DDictWorkspaceBound lib/zstd/zstd_decompress EXPORT_SYMBOL +0xe97f4ce5 qword_get net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc8332c06 ip_vs_nfct_expect_related net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x5e223438 snd_pcm_lib_preallocate_pages_for_all sound/core/snd-pcm EXPORT_SYMBOL +0xc520b616 vringh_init_kern drivers/vhost/vringh EXPORT_SYMBOL +0xc5156bf3 fanout_mutex vmlinux EXPORT_SYMBOL_GPL +0x2251eb44 mr_vif_seq_next vmlinux EXPORT_SYMBOL +0xffb1076d skb_mac_gso_segment vmlinux EXPORT_SYMBOL +0xc7d2b7e5 fixed_phy_register_with_gpiod vmlinux EXPORT_SYMBOL_GPL +0xb4f8e20a drm_gem_shmem_vm_ops vmlinux EXPORT_SYMBOL_GPL +0x070b28aa drm_ht_remove_item vmlinux EXPORT_SYMBOL +0x01c5101b dim_on_top vmlinux EXPORT_SYMBOL +0x9ff130f9 pcim_iomap_table vmlinux EXPORT_SYMBOL +0xdfce35c6 crypto_larval_alloc vmlinux EXPORT_SYMBOL_GPL +0xffad426b debugfs_attr_write vmlinux EXPORT_SYMBOL_GPL +0x018574a1 mb_cache_entry_delete vmlinux EXPORT_SYMBOL +0x61b55d39 seq_release vmlinux EXPORT_SYMBOL +0x636ee3c9 insert_inode_locked vmlinux EXPORT_SYMBOL +0x1866cec2 ring_buffer_size vmlinux EXPORT_SYMBOL_GPL +0x838b13e7 ring_buffer_free vmlinux EXPORT_SYMBOL_GPL +0xb1bd233a module_layout vmlinux EXPORT_SYMBOL +0x5c5a1b16 tick_broadcast_control vmlinux EXPORT_SYMBOL_GPL +0xcc4ee841 raid6_gfexi lib/raid6/raid6_pq EXPORT_SYMBOL +0x38701a7c cfcnfg_del_phy_layer net/caif/caif EXPORT_SYMBOL +0xc6d0fee9 cfcnfg_add_phy_layer net/caif/caif EXPORT_SYMBOL +0xfd552f7a capi_message2str drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xc7dcf1e9 qtree_entry_unused fs/quota/quota_tree EXPORT_SYMBOL +0xe34e7874 xfrm_audit_state_notfound vmlinux EXPORT_SYMBOL_GPL +0xc7f0309b lwtstate_free vmlinux EXPORT_SYMBOL_GPL +0xebf7eb4e hidinput_disconnect vmlinux EXPORT_SYMBOL_GPL +0x651ffa62 pinctrl_utils_add_map_configs vmlinux EXPORT_SYMBOL_GPL +0x80a717a8 __percpu_counter_compare vmlinux EXPORT_SYMBOL +0x7586d0c7 blk_mq_tagset_wait_completed_request vmlinux EXPORT_SYMBOL +0xc0af6074 fuse_dev_free vmlinux EXPORT_SYMBOL_GPL +0xe88bee40 __sb_start_write vmlinux EXPORT_SYMBOL +0x01bf55fc paddr_vmcoreinfo_note vmlinux EXPORT_SYMBOL +0x07f2926e udp_tunnel_push_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x5ceb0146 rdma_restrack_set_task drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x424d4f17 dm_snap_origin drivers/md/dm-snapshot EXPORT_SYMBOL +0x1311c433 tun_get_socket vmlinux EXPORT_SYMBOL_GPL +0x4cb97052 sas_resume_ha vmlinux EXPORT_SYMBOL +0x73a48b4a ata_sff_std_ports vmlinux EXPORT_SYMBOL_GPL +0x7bf779c0 pm_clk_add vmlinux EXPORT_SYMBOL_GPL +0x3d388324 dpm_resume_end vmlinux EXPORT_SYMBOL_GPL +0xb9396f7c pinctrl_remove_gpio_range vmlinux EXPORT_SYMBOL_GPL +0x4f17a6af elv_rb_latter_request vmlinux EXPORT_SYMBOL +0x261b9df7 sysfs_create_bin_file vmlinux EXPORT_SYMBOL_GPL +0x27fa66e1 nr_free_buffer_pages vmlinux EXPORT_SYMBOL_GPL +0xcc03b139 mod_delayed_work_on vmlinux EXPORT_SYMBOL_GPL +0xbd94541e dccp_destroy_sock net/dccp/dccp EXPORT_SYMBOL_GPL +0x0f4fc6a9 rxrpc_kernel_get_srtt net/rxrpc/rxrpc EXPORT_SYMBOL +0x6135389d md_bitmap_end_sync drivers/md/md-mod EXPORT_SYMBOL +0xbfd26f15 simd_aead_free crypto/crypto_simd EXPORT_SYMBOL_GPL +0xd24e9e8c klist_init vmlinux EXPORT_SYMBOL_GPL +0xe76fc75d xfrm_state_unregister_afinfo vmlinux EXPORT_SYMBOL +0x3b651eab of_find_node_by_name vmlinux EXPORT_SYMBOL +0x304a03aa dm_noflush_suspending vmlinux EXPORT_SYMBOL_GPL +0x5e842746 _dev_emerg vmlinux EXPORT_SYMBOL +0x30b29ad9 drm_sched_entity_push_job vmlinux EXPORT_SYMBOL +0xacf6d3f9 serial8250_do_pm vmlinux EXPORT_SYMBOL +0x6b640864 nla_strlcpy vmlinux EXPORT_SYMBOL +0x69ad2f20 kstrtouint vmlinux EXPORT_SYMBOL +0xa8c92547 pstore_register vmlinux EXPORT_SYMBOL_GPL +0xeef322b4 fs_context_for_submount vmlinux EXPORT_SYMBOL +0xb8b043f2 kfree_link vmlinux EXPORT_SYMBOL +0xce807a25 up_write vmlinux EXPORT_SYMBOL +0x16e7e2cb cpu_all_bits vmlinux EXPORT_SYMBOL +0xed881195 p9_client_rename net/9p/9pnet EXPORT_SYMBOL +0x797711ea p9_client_remove net/9p/9pnet EXPORT_SYMBOL +0xc4b2481e auth_domain_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7361beb7 snd_rawmidi_kernel_open sound/core/snd-rawmidi EXPORT_SYMBOL +0x92aefdbf mpt_halt_firmware drivers/message/fusion/mptbase EXPORT_SYMBOL +0xf102033e slhc_remember drivers/net/slip/slhc EXPORT_SYMBOL +0x2decde87 mlxsw_core_fw_flash_start drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb9296a74 mlxsw_core_ptp_transmitted drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x78305586 mlx5_rdma_rn_get_params drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xf1043b52 cpufreq_cpu_put vmlinux EXPORT_SYMBOL_GPL +0x7a6057c6 cpufreq_cpu_get vmlinux EXPORT_SYMBOL_GPL +0x55bd1f86 power_supply_external_power_changed vmlinux EXPORT_SYMBOL_GPL +0xbd8df6f1 scsi_target_block vmlinux EXPORT_SYMBOL_GPL +0x858e33f9 drm_property_replace_global_blob vmlinux EXPORT_SYMBOL +0x84cb1b84 free_buffer_head vmlinux EXPORT_SYMBOL +0x1991b596 fget vmlinux EXPORT_SYMBOL +0xfe476039 ktime_get_resolution_ns vmlinux EXPORT_SYMBOL_GPL +0x139cee21 wait_for_completion_io_timeout vmlinux EXPORT_SYMBOL +0x1dbf1f49 kthread_associate_blkcg vmlinux EXPORT_SYMBOL +0x878469bd ZSTD_decompressStream lib/zstd/zstd_decompress EXPORT_SYMBOL +0x429a199a rds_inc_path_init net/rds/rds EXPORT_SYMBOL_GPL +0xa55edd77 can_send net/can/can EXPORT_SYMBOL +0x25f8c591 snd_ctl_boolean_mono_info sound/core/snd EXPORT_SYMBOL +0x6b958320 ib_ud_ip4_csum drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2e31d36f iscsi_complete_pdu drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xec7166ac fc_queuecommand drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xb064469f of_dma_get_range vmlinux EXPORT_SYMBOL_GPL +0x0e973431 firmware_request_nowarn vmlinux EXPORT_SYMBOL_GPL +0xfa1eb910 unregister_syscore_ops vmlinux EXPORT_SYMBOL_GPL +0x66c41c21 screen_pos vmlinux EXPORT_SYMBOL_GPL +0xdba1e39a pci_reset_bus vmlinux EXPORT_SYMBOL_GPL +0xe957a9ea jbd2_log_start_commit vmlinux EXPORT_SYMBOL +0xe01eda1d remove_proc_subtree vmlinux EXPORT_SYMBOL +0x07b21f85 kdb_get_kbd_char vmlinux EXPORT_SYMBOL_GPL +0xce6db656 rcu_is_watching vmlinux EXPORT_SYMBOL_GPL +0xe2763d98 ieee802154_stop_queue net/mac802154/mac802154 EXPORT_SYMBOL +0x79ea31ad hinic_register_uld drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x03ff30fd hinic_dcb_set_rq_iq_mapping drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xb0ddd5a1 fc_rport_flush_queue drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xea0e05ed rtc_add_groups vmlinux EXPORT_SYMBOL +0x5831ad71 drm_atomic_helper_check_modeset vmlinux EXPORT_SYMBOL +0x120910f3 mctrl_gpio_init vmlinux EXPORT_SYMBOL_GPL +0x29e1e204 hdmi_audio_infoframe_pack vmlinux EXPORT_SYMBOL +0x1ee8d6d4 refcount_inc_checked vmlinux EXPORT_SYMBOL +0x44e9a829 match_token vmlinux EXPORT_SYMBOL +0xe71d021b jbd2_journal_abort vmlinux EXPORT_SYMBOL +0x42214614 __ftrace_vbprintk vmlinux EXPORT_SYMBOL_GPL +0x57bc19d2 down_write vmlinux EXPORT_SYMBOL +0x318d6fec mutex_is_locked vmlinux EXPORT_SYMBOL +0x83ee922f hinic_ceq_unregister_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x60f356eb compat_ip_getsockopt vmlinux EXPORT_SYMBOL +0x5d307bcd ip_route_output_flow vmlinux EXPORT_SYMBOL_GPL +0x55a25494 compat_nf_getsockopt vmlinux EXPORT_SYMBOL +0xd2ef68f6 compat_mc_getsockopt vmlinux EXPORT_SYMBOL +0xea3c8e4e scsilun_to_int vmlinux EXPORT_SYMBOL +0x37fb0cf2 drm_prime_pages_to_sg vmlinux EXPORT_SYMBOL +0xb1728ddd key_type_logon vmlinux EXPORT_SYMBOL_GPL +0xc59ed19d proc_create_seq_private vmlinux EXPORT_SYMBOL +0x969ddb0b posix_test_lock vmlinux EXPORT_SYMBOL +0x06d8877a kill_block_super vmlinux EXPORT_SYMBOL +0x447c7aac flush_signals vmlinux EXPORT_SYMBOL +0x4184666b nf_ct_remove_expectations net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa9b1f9ca __inet_stream_connect vmlinux EXPORT_SYMBOL +0xe9d6e402 neigh_lookup_nodev vmlinux EXPORT_SYMBOL +0x75137775 kernel_setsockopt vmlinux EXPORT_SYMBOL +0x22a81c75 kernel_getsockopt vmlinux EXPORT_SYMBOL +0xc95a9812 usb_stor_disconnect vmlinux EXPORT_SYMBOL_GPL +0x9367aed1 scsi_change_queue_depth vmlinux EXPORT_SYMBOL +0x153e2389 inode_dax vmlinux EXPORT_SYMBOL_GPL +0x020bd208 fwnode_get_mac_address vmlinux EXPORT_SYMBOL +0x7791e066 dw_pcie_read vmlinux EXPORT_SYMBOL_GPL +0x3224b2a9 mpi_read_raw_from_sgl vmlinux EXPORT_SYMBOL_GPL +0xbd6841d4 crc16 vmlinux EXPORT_SYMBOL +0x7ff23675 dquot_operations vmlinux EXPORT_SYMBOL +0xb92952ae padata_start vmlinux EXPORT_SYMBOL +0x92dbde52 kallsyms_on_each_symbol vmlinux EXPORT_SYMBOL_GPL +0xe2948c8a sched_show_task vmlinux EXPORT_SYMBOL_GPL +0xf82f3657 work_on_cpu vmlinux EXPORT_SYMBOL_GPL +0xec19b64a nfnetlink_unicast net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x3779672a snd_mixer_oss_ioctl_card sound/core/oss/snd-mixer-oss EXPORT_SYMBOL +0xbd7596a2 vhost_log_write drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xe52975ab mpt_raid_phys_disk_pg1 drivers/message/fusion/mptbase EXPORT_SYMBOL +0xe57d8a4b nvme_start_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x4deb4c40 devm_nvmem_cell_put vmlinux EXPORT_SYMBOL +0xbf48f8d5 dev_pm_qos_hide_latency_limit vmlinux EXPORT_SYMBOL_GPL +0xb980b086 drm_of_find_possible_crtcs vmlinux EXPORT_SYMBOL +0x5ff8e053 drm_primary_helper_destroy vmlinux EXPORT_SYMBOL +0xe16fb6d9 iommu_iova_to_phys vmlinux EXPORT_SYMBOL_GPL +0x1118fa05 gpiochip_unlock_as_irq vmlinux EXPORT_SYMBOL_GPL +0x6b09cfd3 rhltable_init vmlinux EXPORT_SYMBOL_GPL +0x50a90e8d bsearch vmlinux EXPORT_SYMBOL +0x709fbaf6 blk_put_request vmlinux EXPORT_SYMBOL +0x6ca4bf88 async_synchronize_full_domain vmlinux EXPORT_SYMBOL_GPL +0xce50e5de ZSTD_compress_usingCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0x7b54d907 ib_set_device_ops drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0efbca4c btracker_promotion_already_present drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x70d4dc59 cdrom_mode_select drivers/cdrom/cdrom EXPORT_SYMBOL +0xf9aaad5e m_can_class_suspend drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0x9d92566e iscsi_tcp_task_xmit drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x7f4739c0 sock_zerocopy_put_abort vmlinux EXPORT_SYMBOL_GPL +0xf8ba6cf3 of_get_cpu_node vmlinux EXPORT_SYMBOL +0x7ff5841e input_get_timestamp vmlinux EXPORT_SYMBOL +0xf33ff825 usb_serial_generic_throttle vmlinux EXPORT_SYMBOL_GPL +0xfab992f0 drm_syncobj_create vmlinux EXPORT_SYMBOL +0xe861f156 crypto_create_tfm vmlinux EXPORT_SYMBOL_GPL +0x9134ea25 nf_conntrack_in net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x68a91bbd snd_pcm_lib_preallocate_free_for_all sound/core/snd-pcm EXPORT_SYMBOL +0x76877a98 ib_sa_join_multicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1d6184bc i2c_root_adapter drivers/i2c/i2c-mux EXPORT_SYMBOL_GPL +0xb4ae7c2d iscsi_tcp_conn_setup drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x3af049d9 nvme_start_queues drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x1149caa7 metadata_dst_alloc_percpu vmlinux EXPORT_SYMBOL_GPL +0x2e22d93e sock_queue_rcv_skb vmlinux EXPORT_SYMBOL +0xd688716b dm_kcopyd_client_create vmlinux EXPORT_SYMBOL +0x366ac4a3 rc_map_register vmlinux EXPORT_SYMBOL_GPL +0xbfbd656f ata_port_schedule_eh vmlinux EXPORT_SYMBOL_GPL +0xea7fcf17 drm_scdc_set_scrambling vmlinux EXPORT_SYMBOL +0x5009c71d glob_match vmlinux EXPORT_SYMBOL +0x567e8503 rhashtable_init vmlinux EXPORT_SYMBOL_GPL +0x7ad65f6b blk_limits_io_min vmlinux EXPORT_SYMBOL +0x331d2914 sysfs_remove_files vmlinux EXPORT_SYMBOL_GPL +0xa67e5951 percpu_up_write vmlinux EXPORT_SYMBOL_GPL +0x021d729c osd_req_op_cls_init net/ceph/libceph EXPORT_SYMBOL +0x7505cded rpc_setbufsize net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0692af34 aarp_send_ddp net/appletalk/appletalk EXPORT_SYMBOL +0x26651e86 __transport_register_session drivers/target/target_core_mod EXPORT_SYMBOL +0xcb427774 hinic_attach_nic drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x902926bd hinic_alloc_ceqs drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd34ef309 hinic_alloc_irqs drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xad0f2b6c unix_table_lock vmlinux EXPORT_SYMBOL_GPL +0x1921da72 rc_unregister_device vmlinux EXPORT_SYMBOL_GPL +0xeff888f7 serio_interrupt vmlinux EXPORT_SYMBOL +0x13a2e304 scsi_internal_device_unblock_nowait vmlinux EXPORT_SYMBOL_GPL +0x863a276a color_table vmlinux EXPORT_SYMBOL +0x8f76d347 devm_clk_hw_register vmlinux EXPORT_SYMBOL_GPL +0x3ada9e06 acpi_check_region vmlinux EXPORT_SYMBOL +0x21c5f645 put_disk vmlinux EXPORT_SYMBOL +0x9bf421fd add_timer vmlinux EXPORT_SYMBOL +0xef55ba30 can_proto_register net/can/can EXPORT_SYMBOL +0x9f044771 nf_nat_helper_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x333f8d0b md_set_array_sectors drivers/md/md-mod EXPORT_SYMBOL +0x7380dffa argv_split vmlinux EXPORT_SYMBOL +0xdc9c0e77 __ndisc_fill_addr_option vmlinux EXPORT_SYMBOL_GPL +0x3a49898d lwtunnel_get_encap_size vmlinux EXPORT_SYMBOL_GPL +0x660c7c87 hid_allocate_device vmlinux EXPORT_SYMBOL_GPL +0x34936b1d spi_sync vmlinux EXPORT_SYMBOL_GPL +0xc7061ef3 iova_cache_put vmlinux EXPORT_SYMBOL_GPL +0x35693da2 dma_sync_wait vmlinux EXPORT_SYMBOL +0x8b5b25c8 irq_cpu_rmap_add vmlinux EXPORT_SYMBOL +0x34f3484e security_tun_dev_attach_queue vmlinux EXPORT_SYMBOL +0x0832e264 iomap_bmap vmlinux EXPORT_SYMBOL_GPL +0x3feae1a5 irq_chip_mask_ack_parent vmlinux EXPORT_SYMBOL_GPL +0xd52a9dc9 xprt_reconnect_delay net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x71219115 vhost_work_flush drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xef8097be iscsit_logout_post_handler drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x09cc81fa dm_btree_cursor_skip drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe115e663 hinic_set_vport_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1b7f00f3 udp_abort vmlinux EXPORT_SYMBOL_GPL +0xa7904be1 __gnet_stats_copy_basic vmlinux EXPORT_SYMBOL +0xf7720074 sock_kzfree_s vmlinux EXPORT_SYMBOL +0xc289e46d cpufreq_generic_frequency_table_verify vmlinux EXPORT_SYMBOL_GPL +0x615f4273 usb_alloc_dev vmlinux EXPORT_SYMBOL_GPL +0xca797674 ahci_handle_port_intr vmlinux EXPORT_SYMBOL_GPL +0xd1f8459d drm_add_modes_noedid vmlinux EXPORT_SYMBOL +0xb737b185 gen_pool_best_fit vmlinux EXPORT_SYMBOL +0x121317d8 blk_queue_max_hw_sectors vmlinux EXPORT_SYMBOL +0xf0ba147c handle_bad_irq vmlinux EXPORT_SYMBOL_GPL +0xa6aa9857 des_decrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0x4e2c4a55 rds_inc_put net/rds/rds EXPORT_SYMBOL_GPL +0x31a89d59 rpc_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf4b9f93f svc_xprt_enqueue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x84553d1e of_device_request_module vmlinux EXPORT_SYMBOL_GPL +0xbc058e23 devm_kvasprintf vmlinux EXPORT_SYMBOL +0xdfedecd0 bus_create_file vmlinux EXPORT_SYMBOL_GPL +0xef7e47df drm_warn_on_modeset_not_all_locked vmlinux EXPORT_SYMBOL +0x2d182856 bus_set_iommu vmlinux EXPORT_SYMBOL_GPL +0x1a39872a crypto_aes_set_key vmlinux EXPORT_SYMBOL_GPL +0x3bdb5d28 alg_test vmlinux EXPORT_SYMBOL_GPL +0x4cbfcf4c jbd2__journal_restart vmlinux EXPORT_SYMBOL +0x7ce18c9f from_kqid vmlinux EXPORT_SYMBOL +0xbef43296 console_conditional_schedule vmlinux EXPORT_SYMBOL +0x29eba37f current_is_async vmlinux EXPORT_SYMBOL_GPL +0xe2ea4776 svc_destroy net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9086c229 snd_unregister_device sound/core/snd EXPORT_SYMBOL +0xb0075cb8 af_alg_accept crypto/af_alg EXPORT_SYMBOL_GPL +0x0c7b743d nfs_fattr_init fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd519069f __fscache_update_cookie fs/fscache/fscache EXPORT_SYMBOL +0xb28cd1ae qdisc_offload_graft_helper vmlinux EXPORT_SYMBOL +0x34d43346 thermal_remove_hwmon_sysfs vmlinux EXPORT_SYMBOL_GPL +0x4baa66cd ata_scsi_cmd_error_handler vmlinux EXPORT_SYMBOL +0xe020181a blkdev_issue_discard vmlinux EXPORT_SYMBOL +0xd8e7966e crypto_unregister_scomp vmlinux EXPORT_SYMBOL_GPL +0x5cafaaff crypto_unregister_acomp vmlinux EXPORT_SYMBOL_GPL +0xa1c842b4 debugfs_create_symlink vmlinux EXPORT_SYMBOL_GPL +0x790dbfd6 fat_detach vmlinux EXPORT_SYMBOL_GPL +0x93d61b49 ring_buffer_peek vmlinux EXPORT_SYMBOL_GPL +0x7305d41b handle_untracked_irq vmlinux EXPORT_SYMBOL_GPL +0xc3b6a26c dccp_orphan_count net/dccp/dccp EXPORT_SYMBOL_GPL +0xb0ad34ee capilib_new_ncci drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x5a69063f iscsi_put_task drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xc899320d vlan_for_each vmlinux EXPORT_SYMBOL +0x8db6a71f __ip_options_compile vmlinux EXPORT_SYMBOL +0xa0693a1d xt_request_find_match vmlinux EXPORT_SYMBOL_GPL +0x9034db24 fib_notifier_ops_unregister vmlinux EXPORT_SYMBOL +0xfd948c17 dev_pm_domain_set vmlinux EXPORT_SYMBOL_GPL +0x5f18fb39 device_match_devt vmlinux EXPORT_SYMBOL_GPL +0x2e439142 drm_get_panel_orientation_quirk vmlinux EXPORT_SYMBOL +0xc8474eec drm_ioctl vmlinux EXPORT_SYMBOL +0x6228c21f smp_call_function_single vmlinux EXPORT_SYMBOL +0xc0bca0f1 ZSTD_nextSrcSizeToDecompress lib/zstd/zstd_decompress EXPORT_SYMBOL +0x86369528 mlx5_cmd_init_async_ctx drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4d9b652b rb_erase vmlinux EXPORT_SYMBOL +0x2ad16603 cookie_ecn_ok vmlinux EXPORT_SYMBOL +0x44710827 drm_atomic_bridge_disable vmlinux EXPORT_SYMBOL +0xadacddfe drm_mode_duplicate vmlinux EXPORT_SYMBOL +0x2e621cc4 fsnotify_alloc_group vmlinux EXPORT_SYMBOL_GPL +0x56dbf27e __cpuhp_state_add_instance vmlinux EXPORT_SYMBOL_GPL +0x3f9a2b0b rpcauth_get_gssinfo net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2541acee can_proto_unregister net/can/can EXPORT_SYMBOL +0x9cebd72b vhost_dev_reset_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xcac9675a __tracepoint_nfs4_pnfs_read fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xaafdc258 strcasecmp vmlinux EXPORT_SYMBOL +0x0d6b6a2f __netdev_watchdog_up vmlinux EXPORT_SYMBOL_GPL +0xfa355613 hid_quirks_init vmlinux EXPORT_SYMBOL_GPL +0xce8a476b ata_sff_busy_sleep vmlinux EXPORT_SYMBOL_GPL +0xbf4c164c ttm_bo_synccpu_write_release vmlinux EXPORT_SYMBOL +0xcadef538 drm_vma_node_is_allowed vmlinux EXPORT_SYMBOL +0x44abbca4 drm_legacy_pci_init vmlinux EXPORT_SYMBOL +0x058b582a vt_get_leds vmlinux EXPORT_SYMBOL_GPL +0xf7aa4071 blk_mq_start_hw_queue vmlinux EXPORT_SYMBOL +0x04569255 security_cred_getsecid vmlinux EXPORT_SYMBOL +0x04b7b445 fuse_dequeue_forget vmlinux EXPORT_SYMBOL +0x42fd25cd unregister_kretprobe vmlinux EXPORT_SYMBOL_GPL +0xb2fbf2cf clockevents_register_device vmlinux EXPORT_SYMBOL_GPL +0xb7566a29 nci_recv_frame net/nfc/nci/nci EXPORT_SYMBOL +0x2b5f4bf3 cache_check net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa335e94f nf_conntrack_set_hashsize net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe3529f7a nvme_stop_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x5b17be06 cast_s4 crypto/cast_common EXPORT_SYMBOL_GPL +0x44067f3e tcf_idr_search vmlinux EXPORT_SYMBOL +0x69a3a631 fib_rules_seq_read vmlinux EXPORT_SYMBOL_GPL +0xa0f493d9 efi vmlinux EXPORT_SYMBOL +0x40f79de7 usb_ep_set_halt vmlinux EXPORT_SYMBOL_GPL +0xc8eccf07 phy_modify_paged_changed vmlinux EXPORT_SYMBOL +0xb5ba8263 phy_speed_down vmlinux EXPORT_SYMBOL_GPL +0xd0dce15f tty_unthrottle vmlinux EXPORT_SYMBOL +0xbe884637 gpiod_get_raw_value vmlinux EXPORT_SYMBOL_GPL +0xea842067 gpiochip_add_data_with_key vmlinux EXPORT_SYMBOL_GPL +0xea161f3c security_socket_getpeersec_dgram vmlinux EXPORT_SYMBOL +0x5914c295 set_blocksize vmlinux EXPORT_SYMBOL +0x3793b1ec block_page_mkwrite vmlinux EXPORT_SYMBOL +0xb04d1f7b perf_event_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xcc2dbfd8 irq_domain_check_msi_remap vmlinux EXPORT_SYMBOL_GPL +0x65cf8831 ZSTD_decompress_usingDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x2f02925c ib_mr_pool_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x97183af8 md_done_sync drivers/md/md-mod EXPORT_SYMBOL +0xcf477fb3 dst_release_immediate vmlinux EXPORT_SYMBOL +0x615014d0 class_compat_remove_link vmlinux EXPORT_SYMBOL_GPL +0x6320c1ab uart_unregister_driver vmlinux EXPORT_SYMBOL +0x0acf7679 dma_issue_pending_all vmlinux EXPORT_SYMBOL +0x59bb85f1 cpci_hp_unregister_bus vmlinux EXPORT_SYMBOL_GPL +0xf7e94768 gpiochip_irq_unmap vmlinux EXPORT_SYMBOL_GPL +0xa12f974e devm_pinctrl_unregister vmlinux EXPORT_SYMBOL_GPL +0xb4f555e3 bsg_job_get vmlinux EXPORT_SYMBOL_GPL +0x9d191ee0 bsg_job_put vmlinux EXPORT_SYMBOL_GPL +0xc5d296e0 crypto_spawn_tfm vmlinux EXPORT_SYMBOL_GPL +0x4af17ba6 security_inode_notifysecctx vmlinux EXPORT_SYMBOL +0xedfe59ec generic_error_remove_page vmlinux EXPORT_SYMBOL +0x10a840d3 mdev_bus_type drivers/vfio/mdev/mdev EXPORT_SYMBOL_GPL +0xe8c580d9 ipv6_chk_prefix vmlinux EXPORT_SYMBOL +0xf7489406 ip6_dst_lookup_flow vmlinux EXPORT_SYMBOL_GPL +0xeab9e42e inet_unhash vmlinux EXPORT_SYMBOL_GPL +0x3a3d2ece ip_fraglist_prepare vmlinux EXPORT_SYMBOL +0x8b48aeb8 dev_nit_active vmlinux EXPORT_SYMBOL_GPL +0xd4835ef8 dmi_check_system vmlinux EXPORT_SYMBOL +0x41f30d14 mdio_bus_type vmlinux EXPORT_SYMBOL +0xd2bab2ee blk_execute_rq vmlinux EXPORT_SYMBOL +0x42d4efa3 crypto_register_scomp vmlinux EXPORT_SYMBOL_GPL +0x25e9d4bd resource_list_free vmlinux EXPORT_SYMBOL +0x10c32754 rpc_call_sync net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1b815614 ip_tunnel_init net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x74a4af57 dm_cell_visit_release drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xca9beaa4 __xa_store vmlinux EXPORT_SYMBOL +0x59588850 vsscanf vmlinux EXPORT_SYMBOL +0xda06fddb ip6_xmit vmlinux EXPORT_SYMBOL +0x1a2c3cf4 compat_ip_setsockopt vmlinux EXPORT_SYMBOL +0x02fa5455 compat_nf_setsockopt vmlinux EXPORT_SYMBOL +0x65afba55 tcf_exts_dump vmlinux EXPORT_SYMBOL +0x8f5970fe compat_mc_setsockopt vmlinux EXPORT_SYMBOL +0x075dfa4c i2c_acpi_find_adapter_by_handle vmlinux EXPORT_SYMBOL_GPL +0xc38ce3f7 ata_sff_dev_select vmlinux EXPORT_SYMBOL_GPL +0x01e89261 ata_noop_qc_prep vmlinux EXPORT_SYMBOL_GPL +0xd2f9d7d7 dev_pm_qos_expose_flags vmlinux EXPORT_SYMBOL_GPL +0xb43d67e2 drm_hdmi_avi_infoframe_quant_range vmlinux EXPORT_SYMBOL +0xded6a415 acpi_get_object_info vmlinux EXPORT_SYMBOL +0xce8634b0 jbd2_journal_unlock_updates vmlinux EXPORT_SYMBOL +0x82d79b51 sysctl_vfs_cache_pressure vmlinux EXPORT_SYMBOL_GPL +0xf1e046cc panic vmlinux EXPORT_SYMBOL +0xadb3b921 caif_enroll_dev net/caif/caif EXPORT_SYMBOL +0x1c800d38 mtd_ooblayout_ecc drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x24e46f18 o2nm_get_node_by_ip fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xa56fda14 udp6_lib_lookup_skb vmlinux EXPORT_SYMBOL_GPL +0x7849f2a7 udp4_lib_lookup_skb vmlinux EXPORT_SYMBOL_GPL +0x1edfebab led_trigger_remove vmlinux EXPORT_SYMBOL_GPL +0x24149409 fwnode_property_match_string vmlinux EXPORT_SYMBOL_GPL +0xb320cc0e sg_init_one vmlinux EXPORT_SYMBOL +0xeb233a45 __kmalloc vmlinux EXPORT_SYMBOL +0xcddf45c2 __vmalloc vmlinux EXPORT_SYMBOL +0x55f0b6ef xdr_truncate_encode net/sunrpc/sunrpc EXPORT_SYMBOL +0x8afad01d line6_suspend sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0xdb518aab __fscache_relinquish_cookie fs/fscache/fscache EXPORT_SYMBOL +0xcf1189a7 dlm_posix_get fs/dlm/dlm EXPORT_SYMBOL_GPL +0xf643d104 hsiphash_4u32 vmlinux EXPORT_SYMBOL +0x9e0fa5ae hsiphash_3u32 vmlinux EXPORT_SYMBOL +0x30acfde9 hsiphash_2u32 vmlinux EXPORT_SYMBOL +0x6481ffe0 hsiphash_1u32 vmlinux EXPORT_SYMBOL +0x01597038 kobject_init_and_add vmlinux EXPORT_SYMBOL_GPL +0x7702349d tcp_set_keepalive vmlinux EXPORT_SYMBOL_GPL +0xd40181c4 ohci_suspend vmlinux EXPORT_SYMBOL_GPL +0x22e6dddf ata_sas_port_start vmlinux EXPORT_SYMBOL_GPL +0x1dfca3ba drm_property_replace_blob vmlinux EXPORT_SYMBOL +0x8f969f20 reset_controller_register vmlinux EXPORT_SYMBOL_GPL +0xce76c257 acpi_get_irq_routing_table vmlinux EXPORT_SYMBOL +0xfeac1a4c devm_pci_alloc_host_bridge vmlinux EXPORT_SYMBOL +0xa7b454a8 rpc_wake_up_next net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5cb0a24a __tracepoint_bcache_bypass_congested drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x3755f990 gf128mul_init_64k_bbe crypto/gf128mul EXPORT_SYMBOL +0xfd27e670 __ip_select_ident vmlinux EXPORT_SYMBOL +0xd2744401 skb_gro_receive vmlinux EXPORT_SYMBOL_GPL +0x4ded36d5 hook_info_flag vmlinux EXPORT_SYMBOL +0xb70c1d22 of_parse_phandle_with_args_map vmlinux EXPORT_SYMBOL +0xc72c5f48 ata_sg_init vmlinux EXPORT_SYMBOL_GPL +0xb11d9000 tty_dev_name_to_number vmlinux EXPORT_SYMBOL_GPL +0x83c2e3d6 pci_find_ht_capability vmlinux EXPORT_SYMBOL_GPL +0xa32cc108 pci_find_next_ht_capability vmlinux EXPORT_SYMBOL_GPL +0x90468464 padata_alloc_shell vmlinux EXPORT_SYMBOL +0x62c786ad osd_req_op_extent_osd_data_pages net/ceph/libceph EXPORT_SYMBOL +0x449ad0a7 memcmp vmlinux EXPORT_SYMBOL +0x1d40b6f3 idr_for_each vmlinux EXPORT_SYMBOL +0x265112da xt_find_match vmlinux EXPORT_SYMBOL +0xaf8a10a0 nf_nat_hook vmlinux EXPORT_SYMBOL_GPL +0x17da288a __skb_warn_lro_forwarding vmlinux EXPORT_SYMBOL +0x95ff7071 regmap_exit vmlinux EXPORT_SYMBOL_GPL +0x96c17136 fb_var_to_videomode vmlinux EXPORT_SYMBOL +0xd672ae94 bio_add_pc_page vmlinux EXPORT_SYMBOL +0xf917a318 query_asymmetric_key vmlinux EXPORT_SYMBOL_GPL +0x04a64c3b shash_ahash_digest vmlinux EXPORT_SYMBOL_GPL +0xe73ae160 locks_init_lock vmlinux EXPORT_SYMBOL +0x140a1340 split_page vmlinux EXPORT_SYMBOL_GPL +0xff9c4b56 ZSTD_compressBound lib/zstd/zstd_compress EXPORT_SYMBOL +0x67753fc1 ceph_msg_data_add_bvecs net/ceph/libceph EXPORT_SYMBOL +0x01602524 nf_ct_helper_expectfn_find_by_name net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xbc9fbf47 snd_card_set_id sound/core/snd EXPORT_SYMBOL +0x95258207 vfio_device_data drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x42f9a952 hinic_dma_attr_entry_num drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x37fac285 fc_slave_alloc drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x15463219 nfs4_label_alloc fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe7a02573 ida_alloc_range vmlinux EXPORT_SYMBOL +0x51796291 inet6_del_offload vmlinux EXPORT_SYMBOL +0xda89ea3b ip_idents_reserve vmlinux EXPORT_SYMBOL +0x091c1692 netif_stacked_transfer_operstate vmlinux EXPORT_SYMBOL +0xa4067ee3 serio_reconnect vmlinux EXPORT_SYMBOL +0xf7b77d3c genphy_read_mmd_unsupported vmlinux EXPORT_SYMBOL +0x8df9dd10 guid_null vmlinux EXPORT_SYMBOL +0xe361f49d crypto_type_has_alg vmlinux EXPORT_SYMBOL_GPL +0x0c270e25 key_set_timeout vmlinux EXPORT_SYMBOL_GPL +0x8edead4b d_alloc_name vmlinux EXPORT_SYMBOL +0x6fa6b2a6 dmam_alloc_attrs vmlinux EXPORT_SYMBOL +0xc3cd9fc5 of_translate_address vmlinux EXPORT_SYMBOL +0xc7044e23 power_supply_property_is_writeable vmlinux EXPORT_SYMBOL_GPL +0x4ab2c6d2 ttm_pool_populate vmlinux EXPORT_SYMBOL +0x0c8b65cd drm_encoder_cleanup vmlinux EXPORT_SYMBOL +0x74215253 pinctrl_dev_get_drvdata vmlinux EXPORT_SYMBOL_GPL +0x1c910ed5 devm_ioport_unmap vmlinux EXPORT_SYMBOL +0x8d55bb8a qid_eq vmlinux EXPORT_SYMBOL +0x66f9bc73 cont_write_begin vmlinux EXPORT_SYMBOL +0x08d3bf02 trace_vprintk vmlinux EXPORT_SYMBOL_GPL +0x98e508ef ignore_console_lock_warning vmlinux EXPORT_SYMBOL +0x7b399e66 rds_page_remainder_alloc net/rds/rds EXPORT_SYMBOL_GPL +0x91178ea2 udp_tunnel_xmit_skb net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0xe5b75e57 vfio_virqfd_enable drivers/vfio/vfio_virqfd EXPORT_SYMBOL_GPL +0x34d45c77 dm_btree_cursor_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xd262dfcb vscnprintf vmlinux EXPORT_SYMBOL +0x079e0620 pktgen_xfrm_outer_mode_output vmlinux EXPORT_SYMBOL_GPL +0x80c68137 nf_log_buf_close vmlinux EXPORT_SYMBOL_GPL +0xa2f812f9 phy_10gbit_fec_features_array vmlinux EXPORT_SYMBOL_GPL +0xc48b7ccf ata_mode_string vmlinux EXPORT_SYMBOL_GPL +0x00e6711e drm_crtc_send_vblank_event vmlinux EXPORT_SYMBOL +0xefc75f05 dmaenginem_async_device_register vmlinux EXPORT_SYMBOL +0x923bbf12 pci_d3cold_disable vmlinux EXPORT_SYMBOL_GPL +0x581f98da zlib_inflate vmlinux EXPORT_SYMBOL +0x52ecbc75 crc_ccitt vmlinux EXPORT_SYMBOL +0x0c9b0f2d simple_statfs vmlinux EXPORT_SYMBOL +0xf5b49e96 kmem_cache_alloc_node vmlinux EXPORT_SYMBOL +0x365e7911 kstrdup_const vmlinux EXPORT_SYMBOL +0x68f0b86f param_set_long vmlinux EXPORT_SYMBOL +0xf6798bec xfrm6_tunnel_deregister net/ipv6/tunnel6 EXPORT_SYMBOL +0xd69cc1b6 xfrm4_tunnel_deregister net/ipv4/tunnel4 EXPORT_SYMBOL +0xcedfc878 dm_bitset_cursor_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x05cf0f13 bch_btree_iter_init drivers/md/bcache/bcache EXPORT_SYMBOL +0x7f976c09 vxlan_fdb_find_uc drivers/net/vxlan EXPORT_SYMBOL_GPL +0x07c6447f hinic_get_speed drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xe69a6204 nxp_nci_probe drivers/nfc/nxp-nci/nxp-nci EXPORT_SYMBOL +0xfa8cc342 configfs_undepend_item fs/configfs/configfs EXPORT_SYMBOL +0xa17a4fd0 km_policy_notify vmlinux EXPORT_SYMBOL +0x4ed378af ip_mc_join_group vmlinux EXPORT_SYMBOL +0x6cfa7433 usb_string vmlinux EXPORT_SYMBOL_GPL +0x1220c9a1 usb_hub_release_port vmlinux EXPORT_SYMBOL_GPL +0xd2b10a05 ata_timing_find_mode vmlinux EXPORT_SYMBOL_GPL +0x6154269c drm_fb_swab16 vmlinux EXPORT_SYMBOL +0xdb760f52 __kfifo_free vmlinux EXPORT_SYMBOL +0xa263892b fscrypt_fname_free_buffer vmlinux EXPORT_SYMBOL +0x7c72ef2e p9_client_attach net/9p/9pnet EXPORT_SYMBOL +0x9f87ad35 nf_ct_unexpect_related net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x9d3c41cb can_rx_offload_irq_offload_timestamp drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x2d10e14b qlt_xmit_response drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x74afb58e inet_csk_reqsk_queue_hash_add vmlinux EXPORT_SYMBOL_GPL +0xd2202cfe flow_indr_add_block_cb vmlinux EXPORT_SYMBOL_GPL +0xdbcc03c6 drm_bridge_add vmlinux EXPORT_SYMBOL +0x28b895ff debugfs_remove vmlinux EXPORT_SYMBOL_GPL +0xc6368782 lease_modify vmlinux EXPORT_SYMBOL +0xa1b59b01 drop_nlink vmlinux EXPORT_SYMBOL +0xf75f02cc vmf_insert_mixed_mkwrite vmlinux EXPORT_SYMBOL +0x1a45affe kthread_cancel_delayed_work_sync vmlinux EXPORT_SYMBOL_GPL +0xfdf70093 ZSTD_CStreamOutSize lib/zstd/zstd_compress EXPORT_SYMBOL +0x24234986 be_roce_unregister_driver drivers/net/ethernet/emulex/benet/be2net EXPORT_SYMBOL +0x0a0ebc08 __xa_cmpxchg vmlinux EXPORT_SYMBOL +0xe5df0201 xfrm_dst_ifdown vmlinux EXPORT_SYMBOL +0x3f08a2dd neigh_seq_start vmlinux EXPORT_SYMBOL +0x6e7943ec iommu_group_id vmlinux EXPORT_SYMBOL_GPL +0xfadf1085 pci_assign_unassigned_bridge_resources vmlinux EXPORT_SYMBOL_GPL +0x09ee65cc gpiod_set_raw_array_value vmlinux EXPORT_SYMBOL_GPL +0x354dcf1c osd_req_op_alloc_hint_init net/ceph/libceph EXPORT_SYMBOL +0xd84fbd76 rpc_put_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3d6a93d7 snd_pcm_hw_constraint_ratdens sound/core/snd-pcm EXPORT_SYMBOL +0x1e7cf01d release_and_free_resource sound/core/snd EXPORT_SYMBOL +0x5882c978 iscsi_tcp_conn_teardown drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0xdd64e639 strscpy vmlinux EXPORT_SYMBOL +0x5792f848 strlcpy vmlinux EXPORT_SYMBOL +0x9166fada strncpy vmlinux EXPORT_SYMBOL +0x620f5f11 eth_header_cache_update vmlinux EXPORT_SYMBOL +0x9691baa8 usb_get_dr_mode vmlinux EXPORT_SYMBOL_GPL +0x8db2c04f software_node_register_nodes vmlinux EXPORT_SYMBOL_GPL +0xb0cc166a regulator_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x1b736142 virtqueue_disable_cb vmlinux EXPORT_SYMBOL_GPL +0x957b1fb6 refcount_inc_not_zero_checked vmlinux EXPORT_SYMBOL +0x905695ab sg_copy_from_buffer vmlinux EXPORT_SYMBOL +0x8abacc47 get_max_files vmlinux EXPORT_SYMBOL_GPL +0x1a45cb6c acpi_disabled vmlinux EXPORT_SYMBOL +0x2b2a9425 ceph_osdc_maybe_request_map net/ceph/libceph EXPORT_SYMBOL +0x1bc651c2 nf_reject_ip_tcphdr_put net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x787ccc4c ib_get_mad_data_offset drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xff05e262 __tracepoint_mlx5_fs_del_rule drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x97ccad14 mlx5_frag_buf_free drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7cceaf92 zs_pool_stats mm/zsmalloc EXPORT_SYMBOL_GPL +0xed7c7b91 raw_v6_hashinfo vmlinux EXPORT_SYMBOL_GPL +0x9c336312 tcf_em_tree_destroy vmlinux EXPORT_SYMBOL +0x22ffe0f1 drm_mode_get_tile_group vmlinux EXPORT_SYMBOL +0x40f0683e reset_control_put vmlinux EXPORT_SYMBOL_GPL +0x515083bf acpi_release_mutex vmlinux EXPORT_SYMBOL +0xf195c682 fb_invert_cmaps vmlinux EXPORT_SYMBOL +0xebea629b of_pci_parse_bus_range vmlinux EXPORT_SYMBOL_GPL +0x4562a134 __tracepoint_kmem_cache_free vmlinux EXPORT_SYMBOL +0x7b708beb vsock_remove_bound net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xb2d47df4 p9_client_cb net/9p/9pnet EXPORT_SYMBOL +0x0ec44410 ib_create_srq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x749556a2 mlxsw_afk_key_info_subset drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x3a41d437 tcp_register_ulp vmlinux EXPORT_SYMBOL_GPL +0xe9b49a26 driver_create_file vmlinux EXPORT_SYMBOL_GPL +0x44ae63d1 serial8250_do_startup vmlinux EXPORT_SYMBOL_GPL +0x66de1f09 clk_register_hisi_phase vmlinux EXPORT_SYMBOL_GPL +0x4d0015e2 cpu_hotplug_disable vmlinux EXPORT_SYMBOL_GPL +0x3c0c3cf8 ceph_msg_put net/ceph/libceph EXPORT_SYMBOL +0x067b9ef2 usb_ep_autoconfig_ss drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x6fffa6b4 fc_frame_alloc_fill drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x083eb21c rfkill_unregister vmlinux EXPORT_SYMBOL +0x7d60be0a devlink_port_param_driverinit_value_set vmlinux EXPORT_SYMBOL_GPL +0x84e7fd54 devlink_port_param_driverinit_value_get vmlinux EXPORT_SYMBOL_GPL +0xdd3c5d83 skb_get_hash_perturb vmlinux EXPORT_SYMBOL +0x8b37f0d7 of_map_rid vmlinux EXPORT_SYMBOL_GPL +0x793469c0 sas_drain_work vmlinux EXPORT_SYMBOL_GPL +0x3ca0ef20 drm_writeback_cleanup_job vmlinux EXPORT_SYMBOL +0x19254e2a drm_plane_force_disable vmlinux EXPORT_SYMBOL +0x098b71c6 fb_dealloc_cmap vmlinux EXPORT_SYMBOL +0x130dcfec blk_mq_run_hw_queue vmlinux EXPORT_SYMBOL +0x344f5fd0 jbd2_journal_start_reserved vmlinux EXPORT_SYMBOL +0x0c345852 blkdev_write_iter vmlinux EXPORT_SYMBOL_GPL +0x48eb2e56 insert_inode_locked4 vmlinux EXPORT_SYMBOL +0x41482d8b strndup_user vmlinux EXPORT_SYMBOL +0xddd58dc0 ring_buffer_reset vmlinux EXPORT_SYMBOL_GPL +0xbd3fe1e3 disable_hardirq vmlinux EXPORT_SYMBOL_GPL +0xd7c06029 ceph_file_layout_to_legacy net/ceph/libceph EXPORT_SYMBOL +0xa74bac25 rds_conn_create_outgoing net/rds/rds EXPORT_SYMBOL_GPL +0x81c9d214 cypress_load_firmware drivers/media/common/cypress_firmware EXPORT_SYMBOL +0x02657543 mptscsih_remove drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xf13609ef devlink_free vmlinux EXPORT_SYMBOL_GPL +0xc37f9c6e cpufreq_update_policy vmlinux EXPORT_SYMBOL +0xc5e96693 drm_atomic_add_affected_planes vmlinux EXPORT_SYMBOL +0xe07e5f44 acpi_reconfig_notifier_unregister vmlinux EXPORT_SYMBOL +0xdd37f9dc pcie_get_width_cap vmlinux EXPORT_SYMBOL +0x7a1e5019 crypto_init_spawn vmlinux EXPORT_SYMBOL_GPL +0x11ec3e51 simple_nosetlease vmlinux EXPORT_SYMBOL +0x5810eccf mount_nodev vmlinux EXPORT_SYMBOL +0xf23c8e29 ip6_tnl_get_iflink net/ipv6/ip6_tunnel EXPORT_SYMBOL +0xd8cb4ddd nf_dup_ipv6 net/ipv6/netfilter/nf_dup_ipv6 EXPORT_SYMBOL_GPL +0x2b35849e nf_nat_ipv6_unregister_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x4e0217a9 mlx4_query_diag_counters drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x08ad13d6 qlt_unreg_sess drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x5fc235e5 nfs_clear_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb911bb58 minmax_running_max vmlinux EXPORT_SYMBOL +0x909c76a2 xfrm_find_acq vmlinux EXPORT_SYMBOL +0xffdd216e eth_mac_addr vmlinux EXPORT_SYMBOL +0xc0031728 scsi_scan_host vmlinux EXPORT_SYMBOL +0xb053adda drm_rect_rotate vmlinux EXPORT_SYMBOL +0x99849f36 virtio_device_restore vmlinux EXPORT_SYMBOL_GPL +0xa8f7a406 security_inode_listsecurity vmlinux EXPORT_SYMBOL +0x2098d48e padata_free vmlinux EXPORT_SYMBOL +0x207e49e1 unregister_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0xcf50fc60 ceph_cls_break_lock net/ceph/libceph EXPORT_SYMBOL +0x1de558c1 nft_reject_icmpv6_code net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0xd90e327d iscsit_build_reject drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xf2513ef5 iscsit_handle_snack drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x2417c5c4 dm_btree_empty drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x41efdeaf radix_tree_lookup_slot vmlinux EXPORT_SYMBOL +0xe88fcfcf dcb_ieee_getapp_prio_dscp_mask_map vmlinux EXPORT_SYMBOL +0x2626859d tcp_sendpage vmlinux EXPORT_SYMBOL +0x6aea3daa ata_slave_link_init vmlinux EXPORT_SYMBOL_GPL +0xe5883bd9 class_compat_unregister vmlinux EXPORT_SYMBOL_GPL +0x01ed152e allocate_resource vmlinux EXPORT_SYMBOL +0xc067732c ceph_parse_ips net/ceph/libceph EXPORT_SYMBOL +0x2a09f713 cfpkt_fromnative net/caif/caif EXPORT_SYMBOL +0x35368e58 uio_event_notify drivers/uio/uio EXPORT_SYMBOL_GPL +0xc7856a3d inet6addr_notifier_call_chain vmlinux EXPORT_SYMBOL +0xbb6a3cbd devlink_fmsg_arr_pair_nest_start vmlinux EXPORT_SYMBOL_GPL +0x29010f14 cpuidle_unregister vmlinux EXPORT_SYMBOL_GPL +0xc3255142 phy_10gbit_features vmlinux EXPORT_SYMBOL_GPL +0x588a2e1b pm_schedule_suspend vmlinux EXPORT_SYMBOL_GPL +0xa26b2a78 ttm_mem_io_unlock vmlinux EXPORT_SYMBOL +0xb5ae1d2d clk_bulk_get_all vmlinux EXPORT_SYMBOL +0x86df8998 pci_ats_queue_depth vmlinux EXPORT_SYMBOL_GPL +0xf1e036a5 fscrypt_ioctl_remove_key_all_users vmlinux EXPORT_SYMBOL_GPL +0xc08647ff ring_buffer_bytes_cpu vmlinux EXPORT_SYMBOL_GPL +0xcbbf0a6f audit_log_task_context vmlinux EXPORT_SYMBOL +0x32b01970 __vsock_core_init net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xe5d4646c dccp_init_sock net/dccp/dccp EXPORT_SYMBOL_GPL +0xab6c0e76 ib_drain_rq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3281c69c ib_drain_sq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7df82768 ubi_get_volume_info drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x5c0ec78c mtd_device_parse_register drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x2618ef53 nvme_stop_keep_alive drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x389f9379 tcf_block_netif_keep_dst vmlinux EXPORT_SYMBOL +0xd24af559 hid_alloc_report_buf vmlinux EXPORT_SYMBOL_GPL +0xe5284f17 spi_write_then_read vmlinux EXPORT_SYMBOL_GPL +0x3f6e43bc drm_connector_attach_max_bpc_property vmlinux EXPORT_SYMBOL +0xa47826e4 drm_dp_calc_pbn_mode vmlinux EXPORT_SYMBOL +0x6e1ad79e backlight_device_get_by_type vmlinux EXPORT_SYMBOL +0x3f140ede pinctrl_lookup_state vmlinux EXPORT_SYMBOL_GPL +0xf9a054b5 __round_jiffies vmlinux EXPORT_SYMBOL_GPL +0xba886e29 kthread_create_worker vmlinux EXPORT_SYMBOL +0xc0f31557 rdma_rw_ctx_wrs drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x31b8ba8f ib_process_cq_direct drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1bf0a125 target_nacl_find_deve drivers/target/target_core_mod EXPORT_SYMBOL +0xf4909bea mlxsw_core_port_type_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xefe73979 simd_skcipher_free crypto/crypto_simd EXPORT_SYMBOL_GPL +0x63ae67a0 nfs_write_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb307c909 devlink_fmsg_u64_pair_put vmlinux EXPORT_SYMBOL_GPL +0x147e912a page_pool_alloc_pages vmlinux EXPORT_SYMBOL +0xf37694a1 regmap_get_max_register vmlinux EXPORT_SYMBOL_GPL +0x658f6e5d serial8250_em485_destroy vmlinux EXPORT_SYMBOL_GPL +0x0011fc34 devm_reset_control_array_get vmlinux EXPORT_SYMBOL_GPL +0xce84f7b7 security_sb_clone_mnt_opts vmlinux EXPORT_SYMBOL +0x16edcd54 vfs_rmdir vmlinux EXPORT_SYMBOL +0x6f74a31b generic_shutdown_super vmlinux EXPORT_SYMBOL +0x374f9363 ptrace_pre_hook vmlinux EXPORT_SYMBOL +0x779c9f95 ocfs2_stack_glue_unregister fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xb1645c94 xt_compat_target_to_user vmlinux EXPORT_SYMBOL_GPL +0x8a18de6d netdev_master_upper_dev_link vmlinux EXPORT_SYMBOL +0xdfebcab4 sas_port_delete_phy vmlinux EXPORT_SYMBOL +0xcbf946f0 sas_disable_tlr vmlinux EXPORT_SYMBOL_GPL +0xd48037d8 scsi_track_queue_full vmlinux EXPORT_SYMBOL +0x924a017b tpm_unseal_trusted vmlinux EXPORT_SYMBOL_GPL +0x735e6a81 acpi_evaluate_integer vmlinux EXPORT_SYMBOL +0x161619de pinctrl_count_index_with_args vmlinux EXPORT_SYMBOL_GPL +0x146289b7 crc16_table vmlinux EXPORT_SYMBOL +0x937733e3 qid_valid vmlinux EXPORT_SYMBOL +0xfe82b998 d_alloc_parallel vmlinux EXPORT_SYMBOL +0xf21e1f9b disable_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0x1cbd92b0 cpu_mitigations_off vmlinux EXPORT_SYMBOL_GPL +0xcd47fcc4 arc4_crypt lib/crypto/libarc4 EXPORT_SYMBOL +0x9a83caec svc_fill_symlink_pathname net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8596e94a snd_card_new sound/core/snd EXPORT_SYMBOL +0x19583581 ubi_close_volume drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x42c0d06a mlx5_cmd_cleanup_async_ctx drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4fee010d o2hb_unregister_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x8cb0a29a skb_copy_and_hash_datagram_iter vmlinux EXPORT_SYMBOL +0x4ecb920a __sock_recv_ts_and_drops vmlinux EXPORT_SYMBOL_GPL +0x47372313 sas_enable_tlr vmlinux EXPORT_SYMBOL_GPL +0xdaec6e1f device_add_properties vmlinux EXPORT_SYMBOL_GPL +0x5660bf02 jbd2__journal_start vmlinux EXPORT_SYMBOL +0x8658cd41 noop_set_page_dirty vmlinux EXPORT_SYMBOL_GPL +0x06a7eccb vfs_copy_file_range vmlinux EXPORT_SYMBOL +0x0f09cc34 schedule_timeout_killable vmlinux EXPORT_SYMBOL +0x4c7d71e8 pm_qos_request_active vmlinux EXPORT_SYMBOL_GPL +0xc5f13142 rpcauth_create net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3bc17c49 nf_nat_masquerade_inet_register_notifiers net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x7377a96b mlx4_mr_rereg_mem_write drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0a2ff08b mlx4_get_default_counter_index drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6d70ede7 fcoe_validate_vport_create drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0xcb5a258e rtm_getroute_parse_ip_proto vmlinux EXPORT_SYMBOL_GPL +0x762fe52d sock_no_getname vmlinux EXPORT_SYMBOL +0xd67348c1 of_device_is_big_endian vmlinux EXPORT_SYMBOL +0x9b8a53f5 drm_ht_just_insert_please vmlinux EXPORT_SYMBOL +0xfbcef0d1 acpi_dev_get_first_match_dev vmlinux EXPORT_SYMBOL +0x9738533c pci_msix_vec_count vmlinux EXPORT_SYMBOL +0xbf4b0e8f gpiochip_relres_irq vmlinux EXPORT_SYMBOL_GPL +0x8b0088d1 LZ4_decompress_safe_usingDict vmlinux EXPORT_SYMBOL +0x0b03e654 vfs_link vmlinux EXPORT_SYMBOL +0x0c242011 from_kuid_munged vmlinux EXPORT_SYMBOL +0x365acda7 set_normalized_timespec64 vmlinux EXPORT_SYMBOL +0x655e4879 __irq_alloc_descs vmlinux EXPORT_SYMBOL_GPL +0x4f21cbcc call_usermodehelper_setup vmlinux EXPORT_SYMBOL +0xccdd613b netlink_rcv_skb vmlinux EXPORT_SYMBOL +0x743e14b2 phy_driver_unregister vmlinux EXPORT_SYMBOL +0x82829280 of_clk_src_onecell_get vmlinux EXPORT_SYMBOL_GPL +0xf82abc1d isa_dma_bridge_buggy vmlinux EXPORT_SYMBOL +0x519db7fe crypto_alloc_acomp vmlinux EXPORT_SYMBOL_GPL +0x69b79d92 trace_event_reg vmlinux EXPORT_SYMBOL_GPL +0x07ceeac9 panic_notifier_list vmlinux EXPORT_SYMBOL +0xff7d12b0 ceph_check_fsid net/ceph/libceph EXPORT_SYMBOL +0xbb411e7a ip_set_elem_len net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xc6c266c0 rdma_rw_ctx_post drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb873573d rdma_rw_ctx_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x69c64858 unregister_mtd_chip_driver drivers/mtd/chips/chipreg EXPORT_SYMBOL +0x9f177e74 rndis_set_host_mac drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x50783142 mlx5_core_destroy_tis drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x43375003 __fcoe_get_lesb drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x5f3dc9d3 st21nfca_vendor_cmds_init drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x0b53c9ca __xfrm_state_delete vmlinux EXPORT_SYMBOL +0xb5467b8e mbox_client_peek_data vmlinux EXPORT_SYMBOL_GPL +0x0d75562b drm_fb_cma_get_gem_obj vmlinux EXPORT_SYMBOL_GPL +0xd768e985 regulator_has_full_constraints vmlinux EXPORT_SYMBOL_GPL +0x5b5bed30 __acpi_handle_debug vmlinux EXPORT_SYMBOL +0xed719890 gpiod_get_raw_array_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0x3801776b __ioread32_copy vmlinux EXPORT_SYMBOL_GPL +0xcdb19d2b d_delete vmlinux EXPORT_SYMBOL +0x8b2b9357 mod_timer vmlinux EXPORT_SYMBOL +0xccea7bd8 send_sig_mceerr vmlinux EXPORT_SYMBOL +0x0ea977fb line6_pcm_release sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0xfe9be996 ib_open_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7665a95b idr_remove vmlinux EXPORT_SYMBOL_GPL +0xcbc6e9e3 drm_mode_create_suggested_offset_properties vmlinux EXPORT_SYMBOL +0x012db0f2 vc_scrolldelta_helper vmlinux EXPORT_SYMBOL_GPL +0xbe49252c acpi_os_write_port vmlinux EXPORT_SYMBOL +0x26d46449 mnt_set_expiry vmlinux EXPORT_SYMBOL +0x16879620 use_mm vmlinux EXPORT_SYMBOL_GPL +0x2d41e6f5 __trace_puts vmlinux EXPORT_SYMBOL_GPL +0xa2df6c92 param_get_invbool vmlinux EXPORT_SYMBOL +0xaba85da3 save_stack_trace_tsk vmlinux EXPORT_SYMBOL_GPL +0xbf9d1b96 nfsd_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x63839c69 ip_tunnel_newlink net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xfd67e300 ct_sip_parse_numerical_param net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0xfe40f5c1 genl_notify vmlinux EXPORT_SYMBOL +0xa0dad88e netdev_adjacent_get_private vmlinux EXPORT_SYMBOL +0x414c27c6 devm_rtc_device_register vmlinux EXPORT_SYMBOL_GPL +0xc2c6cf55 devres_destroy vmlinux EXPORT_SYMBOL_GPL +0x7aefded5 drm_connector_list_iter_next vmlinux EXPORT_SYMBOL +0x7715d942 tty_hangup vmlinux EXPORT_SYMBOL +0x67c13ea0 acpi_read vmlinux EXPORT_SYMBOL +0x7a98dea9 pci_disable_device vmlinux EXPORT_SYMBOL +0xabb62dc2 gpiod_get_array_value vmlinux EXPORT_SYMBOL_GPL +0x8ac3334b net_dim_get_def_rx_moderation vmlinux EXPORT_SYMBOL +0x61dda52c add_page_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x9504df26 irq_wake_thread vmlinux EXPORT_SYMBOL_GPL +0xeffd862b prepare_to_swait_exclusive vmlinux EXPORT_SYMBOL +0xe58a3360 p9_error_init net/9p/9pnet EXPORT_SYMBOL +0xb1e70801 __twofish_setkey crypto/twofish_common EXPORT_SYMBOL_GPL +0x58a67f57 fscache_obtained_object fs/fscache/fscache EXPORT_SYMBOL +0x8ebb0aca xfrm_lookup_route vmlinux EXPORT_SYMBOL +0xd37014dd mdiobus_read vmlinux EXPORT_SYMBOL +0x244173dc pm_runtime_forbid vmlinux EXPORT_SYMBOL_GPL +0x30e74134 tty_termios_copy_hw vmlinux EXPORT_SYMBOL +0xd2593a92 regulator_disable_deferred vmlinux EXPORT_SYMBOL_GPL +0x5af2b5c8 dma_release_channel vmlinux EXPORT_SYMBOL_GPL +0x9489f981 acpi_dev_get_resources vmlinux EXPORT_SYMBOL_GPL +0xa391afe9 badblocks_set vmlinux EXPORT_SYMBOL_GPL +0x69775f2b cryptd_free_aead vmlinux EXPORT_SYMBOL_GPL +0xe689eee8 seq_put_decimal_ll vmlinux EXPORT_SYMBOL +0xc18015cd ceph_osdc_list_watchers net/ceph/libceph EXPORT_SYMBOL +0x81fd77f4 rpc_pton net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1f2a9970 rpc_ntop net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb4f1f819 nf_tproxy_laddr4 net/ipv4/netfilter/nf_tproxy_ipv4 EXPORT_SYMBOL_GPL +0x86cef180 rdma_addr_size drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbfe58b76 ib_query_port drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x74a0134a mpt_device_driver_deregister drivers/message/fusion/mptbase EXPORT_SYMBOL +0x86887362 mlx4_wol_read drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa0368db9 nfs_clone_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0x28cf4cc3 efivars_kobject vmlinux EXPORT_SYMBOL_GPL +0x37af3190 dm_table_run_md_queue_async vmlinux EXPORT_SYMBOL +0x2ab9f636 usb_get_maximum_speed vmlinux EXPORT_SYMBOL_GPL +0x9607722f fwnode_graph_get_remote_node vmlinux EXPORT_SYMBOL_GPL +0x79750389 drm_sched_resume_timeout vmlinux EXPORT_SYMBOL +0xc1f9a3d3 drm_atomic_get_plane_state vmlinux EXPORT_SYMBOL +0x8628ae27 drm_gem_prime_import_dev vmlinux EXPORT_SYMBOL +0xa6ff9496 drm_dp_link_train_clock_recovery_delay vmlinux EXPORT_SYMBOL +0xace9dc2f virtqueue_poll vmlinux EXPORT_SYMBOL_GPL +0x2e3b6b83 devm_clk_get vmlinux EXPORT_SYMBOL +0xfdb6e77e pcie_capability_clear_and_set_word vmlinux EXPORT_SYMBOL +0x0b3d617b PageHuge vmlinux EXPORT_SYMBOL_GPL +0xf7ae63ca irq_domain_remove vmlinux EXPORT_SYMBOL_GPL +0xd099b3ea ceph_osdc_abort_requests net/ceph/libceph EXPORT_SYMBOL +0x310183d9 rndis_signal_disconnect drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0xdaceb7a6 mdio_mii_ioctl drivers/net/mdio EXPORT_SYMBOL +0xc72cecfa mlx5_get_uars_page drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd96e508d hinic_max_num_cos drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x19df990c fc_eh_timed_out drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xc90a9bd4 ir_raw_event_store vmlinux EXPORT_SYMBOL_GPL +0xb5fbe55a ahci_error_handler vmlinux EXPORT_SYMBOL_GPL +0x98698714 request_firmware_into_buf vmlinux EXPORT_SYMBOL +0x2eb8fbc3 drm_client_rotation vmlinux EXPORT_SYMBOL +0x19ed325a drm_irq_install vmlinux EXPORT_SYMBOL +0xe4df2008 con_is_bound vmlinux EXPORT_SYMBOL +0xa00aca2a dql_completed vmlinux EXPORT_SYMBOL +0x6e2f22eb __bio_try_merge_page vmlinux EXPORT_SYMBOL_GPL +0xa3a04f4a crypto_unregister_scomps vmlinux EXPORT_SYMBOL_GPL +0x9008b5d4 crypto_unregister_acomps vmlinux EXPORT_SYMBOL_GPL +0xd9f2a5ff blkcipher_aead_walk_virt_block vmlinux EXPORT_SYMBOL_GPL +0x84eebb1d key_link vmlinux EXPORT_SYMBOL +0xcb3bc9a3 read_cache_page vmlinux EXPORT_SYMBOL +0xbfe5616d tick_broadcast_oneshot_control vmlinux EXPORT_SYMBOL_GPL +0x5bdbac4e rcu_unexpedite_gp vmlinux EXPORT_SYMBOL_GPL +0x8c2921e2 __tracepoint_pelt_irq_tp vmlinux EXPORT_SYMBOL_GPL +0x8dc42c89 transport_alloc_session_tags drivers/target/target_core_mod EXPORT_SYMBOL +0xb79a54ee mdio45_nway_restart drivers/net/mdio EXPORT_SYMBOL +0xd57d1c21 mlx4_max_tc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x1773f4de nfs4_find_get_deviceid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x8e7656f1 __udp_gso_segment vmlinux EXPORT_SYMBOL_GPL +0x06cf2bf1 dev_change_flags vmlinux EXPORT_SYMBOL +0xcde624e0 __skb_gso_segment vmlinux EXPORT_SYMBOL +0xd07294e6 ata_dev_set_feature vmlinux EXPORT_SYMBOL_GPL +0x06ae9045 dquot_drop vmlinux EXPORT_SYMBOL +0x752f4621 lapb_unregister net/lapb/lapb EXPORT_SYMBOL +0xd660aaee dm_cell_unlock_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x7666a51e xfrm_state_walk vmlinux EXPORT_SYMBOL +0x74076833 inet_gso_segment vmlinux EXPORT_SYMBOL +0xd863c0ae mbox_chan_txdone vmlinux EXPORT_SYMBOL_GPL +0xe13cd8a7 dmi_name_in_vendors vmlinux EXPORT_SYMBOL +0xc8ea074a usb_ep_dequeue vmlinux EXPORT_SYMBOL_GPL +0x51b79c8a fwnode_get_parent vmlinux EXPORT_SYMBOL_GPL +0xefcea2e7 acpi_warning vmlinux EXPORT_SYMBOL +0xf6c9228c sbitmap_queue_wake_all vmlinux EXPORT_SYMBOL_GPL +0x96cd77cc nla_put_nohdr vmlinux EXPORT_SYMBOL +0x1cb5a971 pid_nr_ns vmlinux EXPORT_SYMBOL_GPL +0x49e86a0e ib_get_gids_from_rdma_hdr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb0717797 mlxsw_afa_block_append_fid_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x29aabd5f napi_gro_flush vmlinux EXPORT_SYMBOL +0x649e3cf1 sas_free_task vmlinux EXPORT_SYMBOL_GPL +0xf2b5c750 scsi_set_medium_removal vmlinux EXPORT_SYMBOL +0xdb7c44bc ahci_platform_suspend_host vmlinux EXPORT_SYMBOL_GPL +0x8b989cf9 acpi_bus_can_wakeup vmlinux EXPORT_SYMBOL +0x1b47546e pinctrl_pm_select_sleep_state vmlinux EXPORT_SYMBOL_GPL +0xbc446ca4 blk_stat_enable_accounting vmlinux EXPORT_SYMBOL_GPL +0x9f90fac7 keyring_clear vmlinux EXPORT_SYMBOL +0xf22a8d83 profile_pc vmlinux EXPORT_SYMBOL +0xa0c5e8f1 snd_timer_resolution sound/core/snd-timer EXPORT_SYMBOL +0x6b43b62f rdma_restrack_del drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7b50270e rdma_restrack_get drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x65e16da4 mlxsw_afk_key_info_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x551f281d devm_regulator_register_supply_alias vmlinux EXPORT_SYMBOL_GPL +0x90fe97c6 devm_clk_bulk_get_all vmlinux EXPORT_SYMBOL_GPL +0x770daf4a acpi_pm_wakeup_event vmlinux EXPORT_SYMBOL_GPL +0xb7b1394b jbd2_journal_wipe vmlinux EXPORT_SYMBOL +0x444f1735 cpu_pm_register_notifier vmlinux EXPORT_SYMBOL_GPL +0xb36e4148 ib_sa_pack_path drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd20d16d8 __ps2_command drivers/input/serio/libps2 EXPORT_SYMBOL +0xa4e6010a mlx4_fmr_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc310b981 strnstr vmlinux EXPORT_SYMBOL +0x35b2132d netdev_lower_get_next_private_rcu vmlinux EXPORT_SYMBOL +0x5aec6c08 __skb_pad vmlinux EXPORT_SYMBOL +0x51edd551 usb_block_urb vmlinux EXPORT_SYMBOL_GPL +0xd7767e9a hisi_sas_release_tasks vmlinux EXPORT_SYMBOL_GPL +0x8c743fb6 reset_control_status vmlinux EXPORT_SYMBOL_GPL +0x509a7c6d clk_register_gpio_mux vmlinux EXPORT_SYMBOL_GPL +0x32e9f0e8 fb_set_var vmlinux EXPORT_SYMBOL +0x50572e1a __nla_put vmlinux EXPORT_SYMBOL +0x0f80c4ec __list_lru_init vmlinux EXPORT_SYMBOL_GPL +0xb8a2f75b perf_pmu_migrate_context vmlinux EXPORT_SYMBOL_GPL +0xdbef8559 inet_diag_register net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x641f71ff nf_ct_helper_init net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xdf3d615c dm_rh_dirty_log drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xcf57df99 pnfs_generic_layout_insert_lseg fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x169938c1 __sysfs_match_string vmlinux EXPORT_SYMBOL +0x2cd544a6 sas_port_mark_backlink vmlinux EXPORT_SYMBOL +0x6f81f395 ata_pci_sff_prepare_host vmlinux EXPORT_SYMBOL_GPL +0x2bbeef0a tpm_is_tpm2 vmlinux EXPORT_SYMBOL_GPL +0x0cc3b29e acpi_dev_filter_resource_type vmlinux EXPORT_SYMBOL_GPL +0x563afd4d dw_pcie_host_deinit vmlinux EXPORT_SYMBOL_GPL +0xe0132e3c devm_pci_remap_cfg_resource vmlinux EXPORT_SYMBOL +0x6322a3af crypto_register_acomps vmlinux EXPORT_SYMBOL_GPL +0x1ba99b44 blk_trace_startstop vmlinux EXPORT_SYMBOL_GPL +0xbc24b371 snd_pcm_hw_rule_add sound/core/snd-pcm EXPORT_SYMBOL +0x029cea78 vringh_complete_kern drivers/vhost/vringh EXPORT_SYMBOL +0xc5aaae76 mtd_ooblayout_find_eccregion drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x08c9b84b tcp_done vmlinux EXPORT_SYMBOL_GPL +0x9f6d1284 drm_gem_cma_prime_import_sg_table vmlinux EXPORT_SYMBOL_GPL +0x2efdeac0 drm_dp_mst_get_vcpi_slots vmlinux EXPORT_SYMBOL +0x2fb6de5d add_device_randomness vmlinux EXPORT_SYMBOL +0x20d65e40 fb_find_nearest_mode vmlinux EXPORT_SYMBOL +0x45f9453b gpiod_get_array vmlinux EXPORT_SYMBOL_GPL +0x2b0d543c rds_message_add_extension net/rds/rds EXPORT_SYMBOL_GPL +0xc2dab779 rds_info_copy net/rds/rds EXPORT_SYMBOL_GPL +0x0a575945 xfrm_count_pfkey_auth_supported net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x19debf81 snd_card_file_remove sound/core/snd EXPORT_SYMBOL +0xbc66815e vringh_notify_disable_user drivers/vhost/vringh EXPORT_SYMBOL +0x4d92bfd7 iscsi_change_param_sprintf drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x7ade1071 dm_tm_destroy drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x9c67cda2 __iscsi_complete_pdu drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x7fabcaa7 o2nm_get_node_by_num fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x6d178c95 unregister_nfs_version fs/nfs/nfs EXPORT_SYMBOL_GPL +0xed8a2d95 memset64 vmlinux EXPORT_SYMBOL +0x0c25ec48 secure_tcpv6_seq vmlinux EXPORT_SYMBOL +0x6d253dca dmi_match vmlinux EXPORT_SYMBOL_GPL +0x578ab41b usb_of_has_combined_node vmlinux EXPORT_SYMBOL_GPL +0xb2aa4f39 usb_init_urb vmlinux EXPORT_SYMBOL_GPL +0x02fe7272 genphy_aneg_done vmlinux EXPORT_SYMBOL +0x6217e847 iscsi_create_iface vmlinux EXPORT_SYMBOL_GPL +0x71650819 add_bootloader_randomness vmlinux EXPORT_SYMBOL_GPL +0x8d2c0fca serial8250_rpm_get vmlinux EXPORT_SYMBOL_GPL +0x7ae8dee6 devm_acpi_dev_add_driver_gpios vmlinux EXPORT_SYMBOL_GPL +0x2ceb37ec badblocks_clear vmlinux EXPORT_SYMBOL_GPL +0x562f06ee crypto_alloc_kpp vmlinux EXPORT_SYMBOL_GPL +0x56d14fdc security_inode_getsecctx vmlinux EXPORT_SYMBOL +0x4f970f18 fscrypt_enqueue_decrypt_work vmlinux EXPORT_SYMBOL +0xd43bcc28 wait_on_page_bit_killable vmlinux EXPORT_SYMBOL +0x274fff39 padata_remove_cpu vmlinux EXPORT_SYMBOL +0x1d62db33 irq_domain_free_irqs_common vmlinux EXPORT_SYMBOL_GPL +0xcb50c7f2 __irq_domain_alloc_fwnode vmlinux EXPORT_SYMBOL_GPL +0x2bb32ad1 arc4_setkey lib/crypto/libarc4 EXPORT_SYMBOL +0xac437f7b snd_interval_ratnum sound/core/snd-pcm EXPORT_SYMBOL +0xaa3c4a6a mtd_write_oob drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xb8fbe0c4 udp_init_sock vmlinux EXPORT_SYMBOL_GPL +0x7858bf8f i2c_acpi_new_device vmlinux EXPORT_SYMBOL_GPL +0x7f40d34c usb_hcd_unmap_urb_setup_for_dma vmlinux EXPORT_SYMBOL_GPL +0xd151f4ad scsi_device_put vmlinux EXPORT_SYMBOL +0xe11676de scsi_device_get vmlinux EXPORT_SYMBOL +0xcccfb2fa sata_deb_timing_hotplug vmlinux EXPORT_SYMBOL_GPL +0x436f62f4 of_pm_clk_add_clk vmlinux EXPORT_SYMBOL_GPL +0x051919a2 __pm_runtime_set_status vmlinux EXPORT_SYMBOL_GPL +0x5f937e45 drm_prime_sg_to_page_addr_arrays vmlinux EXPORT_SYMBOL +0x65d1bab2 acpi_bios_warning vmlinux EXPORT_SYMBOL +0x15896d67 balloon_page_dequeue vmlinux EXPORT_SYMBOL_GPL +0x23f8733a balloon_page_enqueue vmlinux EXPORT_SYMBOL_GPL +0xacab2e3b kmem_cache_alloc_bulk vmlinux EXPORT_SYMBOL +0x6d5480db map_vm_area vmlinux EXPORT_SYMBOL_GPL +0xd622c084 unregister_shrinker vmlinux EXPORT_SYMBOL +0xe037666e unregister_ftrace_function vmlinux EXPORT_SYMBOL_GPL +0x2e2360b1 ftrace_set_global_notrace vmlinux EXPORT_SYMBOL_GPL +0x6826a1bc br_vlan_get_proto net/bridge/bridge EXPORT_SYMBOL_GPL +0xca58e446 dm_register_path_selector drivers/md/dm-multipath EXPORT_SYMBOL_GPL +0x4031bb7f mlx4_get_slave_node_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xb649bc4d wimax_msg_send vmlinux EXPORT_SYMBOL_GPL +0xa5e9ba3f ipv6_dev_get_saddr vmlinux EXPORT_SYMBOL +0x634b300e xfrm_policy_flush vmlinux EXPORT_SYMBOL +0x4e5be9ea skb_tx_error vmlinux EXPORT_SYMBOL +0x27eb5cd3 sock_efree vmlinux EXPORT_SYMBOL +0xe5cb1943 hisi_clk_register_divider vmlinux EXPORT_SYMBOL_GPL +0x908cc69f rds_atomic_send_complete net/rds/rds EXPORT_SYMBOL_GPL +0x0fad2c85 pn_sock_hash net/phonet/phonet EXPORT_SYMBOL +0x2b422d63 mlx5_core_destroy_psv drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6a32f367 napi_gro_frags vmlinux EXPORT_SYMBOL +0x76b30eec netdev_unbind_sb_channel vmlinux EXPORT_SYMBOL +0x4cf89b20 skb_copy_datagram_from_iter vmlinux EXPORT_SYMBOL +0xc26b3202 cpufreq_cpu_get_raw vmlinux EXPORT_SYMBOL_GPL +0xaf54ef75 ttm_bo_init_reserved vmlinux EXPORT_SYMBOL +0x0ec15e50 pci_load_saved_state vmlinux EXPORT_SYMBOL_GPL +0xb129270a key_task_permission vmlinux EXPORT_SYMBOL +0xcd60563e page_cache_next_miss vmlinux EXPORT_SYMBOL +0x09437748 ring_buffer_read_events_cpu vmlinux EXPORT_SYMBOL_GPL +0x9b555c8c pm_suspend_default_s2idle vmlinux EXPORT_SYMBOL_GPL +0x767b8ba8 base_true_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0x0af751c3 inet_diag_unregister net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x0e1a39c6 nf_ct_netns_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xc2355452 nf_ct_netns_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x6036936b vringh_complete_multi_user drivers/vhost/vringh EXPORT_SYMBOL +0xb6e62054 detach_capi_ctr drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x0357d4ff mlx4_mr_hw_get_mpt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x9635e611 iavf_unregister_client drivers/net/ethernet/intel/iavf/iavf EXPORT_SYMBOL +0x0d4961de nf_log_buf_open vmlinux EXPORT_SYMBOL_GPL +0x5a4bcd27 netdev_has_upper_dev_all_rcu vmlinux EXPORT_SYMBOL +0x322e1725 sock_sendmsg vmlinux EXPORT_SYMBOL +0x6158a214 sas_domain_attach_transport vmlinux EXPORT_SYMBOL_GPL +0xfec3d37f iscsi_offload_mesg vmlinux EXPORT_SYMBOL_GPL +0x8b12829c import_iovec vmlinux EXPORT_SYMBOL +0x109264c4 disk_get_part vmlinux EXPORT_SYMBOL_GPL +0x81cf5282 trace_seq_bprintf vmlinux EXPORT_SYMBOL_GPL +0x2cbaae1e l2tp_recv_common net/l2tp/l2tp_core EXPORT_SYMBOL +0x10583a4e snd_pcm_stop_xrun sound/core/snd-pcm EXPORT_SYMBOL_GPL +0xba3222c1 snd_hwdep_new sound/core/snd-hwdep EXPORT_SYMBOL +0xbcafd740 netlink_kernel_release vmlinux EXPORT_SYMBOL +0x88dd1c91 __sock_create vmlinux EXPORT_SYMBOL +0xad1b6697 regulator_suspend_disable vmlinux EXPORT_SYMBOL_GPL +0x1ea0fc00 pci_bus_resource_n vmlinux EXPORT_SYMBOL_GPL +0xf5a20ed2 __genradix_prealloc vmlinux EXPORT_SYMBOL +0x35e3ee80 bio_free_pages vmlinux EXPORT_SYMBOL +0x69585523 __ksize vmlinux EXPORT_SYMBOL +0x81b395b3 down_interruptible vmlinux EXPORT_SYMBOL +0x001263d7 kvm_read_guest_cached vmlinux EXPORT_SYMBOL_GPL +0xf8f072f6 ieee802154_hdr_peek_addrs net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0x50f65edf ipmi_set_gets_events drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x6c5f427f tcp_abort vmlinux EXPORT_SYMBOL_GPL +0x299b38ca netif_carrier_off vmlinux EXPORT_SYMBOL +0x13ed8784 sdev_evt_alloc vmlinux EXPORT_SYMBOL_GPL +0x4378f1be drm_do_get_edid vmlinux EXPORT_SYMBOL_GPL +0x1627bce2 pinctrl_force_default vmlinux EXPORT_SYMBOL_GPL +0x3c9a4cce lease_get_mtime vmlinux EXPORT_SYMBOL +0x0c4cfea4 sb_set_blocksize vmlinux EXPORT_SYMBOL +0x43b679b5 srcu_barrier vmlinux EXPORT_SYMBOL_GPL +0x6f31caa5 kthread_unpark vmlinux EXPORT_SYMBOL_GPL +0x2805376a cache_seq_start_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x75ad5352 nf_dup_netdev_egress net/netfilter/nf_dup_netdev EXPORT_SYMBOL_GPL +0x10678a1b snd_ctl_boolean_stereo_info sound/core/snd EXPORT_SYMBOL +0x447f348f parport_ieee1284_epp_read_addr drivers/parport/parport EXPORT_SYMBOL +0xcaebea91 inet_csk_reqsk_queue_drop vmlinux EXPORT_SYMBOL +0x0e075306 nf_ct_hook vmlinux EXPORT_SYMBOL_GPL +0xdd372dbb flow_block_cb_incref vmlinux EXPORT_SYMBOL +0x8c0215f2 pm_system_wakeup vmlinux EXPORT_SYMBOL_GPL +0xd34653a3 pinctrl_utils_free_map vmlinux EXPORT_SYMBOL_GPL +0x3744cf36 vmalloc_to_pfn vmlinux EXPORT_SYMBOL +0x3e402957 access_process_vm vmlinux EXPORT_SYMBOL_GPL +0xa44fbefa __tracepoint_xdp_exception vmlinux EXPORT_SYMBOL_GPL +0xead0cd8f get_task_mm vmlinux EXPORT_SYMBOL_GPL +0x2fc384c5 mlx5_modify_nic_vport_promisc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xd4c5697b hinic_register_fault_recover drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x42f2c81f nfs4_client_id_uniquifier fs/nfs/nfs EXPORT_SYMBOL_GPL +0x551bd071 __rb_erase_color vmlinux EXPORT_SYMBOL +0xfeb5d0aa verify_spi_info vmlinux EXPORT_SYMBOL +0x09ee8933 udp_skb_destructor vmlinux EXPORT_SYMBOL +0xb0e240e7 inet_csk_prepare_forced_close vmlinux EXPORT_SYMBOL +0x2379345b devlink_dpipe_match_put vmlinux EXPORT_SYMBOL_GPL +0x7bb50b88 acpi_write vmlinux EXPORT_SYMBOL +0x8e16419b trace_clock_local vmlinux EXPORT_SYMBOL_GPL +0x40e33ba2 rpc_task_release_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x677a2e39 nft_unregister_flowtable_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xa4e576bc nf_ct_expect_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x97cc71b1 iscsit_register_transport drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x652c6067 md_handle_request drivers/md/md-mod EXPORT_SYMBOL +0x8854d198 mlxsw_reg_write drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x30673a1d register_tcf_proto_ops vmlinux EXPORT_SYMBOL +0xa186639e xdp_attachment_query vmlinux EXPORT_SYMBOL_GPL +0xb9b9df41 usb_amd_dev_put vmlinux EXPORT_SYMBOL_GPL +0xb067f42f drm_plane_from_index vmlinux EXPORT_SYMBOL +0x89485687 iommu_group_put vmlinux EXPORT_SYMBOL_GPL +0x23710db3 iommu_group_get vmlinux EXPORT_SYMBOL_GPL +0xcde781ef pci_pasid_features vmlinux EXPORT_SYMBOL_GPL +0xef9a36e0 pci_ignore_hotplug vmlinux EXPORT_SYMBOL_GPL +0x91c2409f crypto_unregister_rng vmlinux EXPORT_SYMBOL_GPL +0x9b69ddaa crypto_unregister_alg vmlinux EXPORT_SYMBOL_GPL +0xe2b78f48 lru_cache_add_file vmlinux EXPORT_SYMBOL +0xbec5307e sched_trace_rq_avg_rt vmlinux EXPORT_SYMBOL_GPL +0x14cbd518 synproxy_send_client_synack net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x6fe6e6f4 unregister_md_personality drivers/md/md-mod EXPORT_SYMBOL +0x4a6ed376 mlxsw_core_port_fini drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x284bfc79 mlx4_set_vf_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe428873c iscsi_suspend_tx drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xe7436d31 crypto_chacha_init crypto/chacha_generic EXPORT_SYMBOL_GPL +0x5e95a4b2 o2net_send_message_vec fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x3d81a5ff wimax_dev_init vmlinux EXPORT_SYMBOL_GPL +0x6c1bb310 qdisc_watchdog_schedule_ns vmlinux EXPORT_SYMBOL +0x38b7ce11 nvmem_get_mac_address vmlinux EXPORT_SYMBOL +0xe6e2c2fd netdev_walk_all_lower_dev vmlinux EXPORT_SYMBOL_GPL +0xe4da7181 __skb_vlan_pop vmlinux EXPORT_SYMBOL +0x605a8fd5 hwmon_device_register_with_groups vmlinux EXPORT_SYMBOL_GPL +0x428a34c1 __class_create vmlinux EXPORT_SYMBOL_GPL +0x556d2606 clk_register_mux_table vmlinux EXPORT_SYMBOL_GPL +0x27810361 acpi_os_wait_events_complete vmlinux EXPORT_SYMBOL +0x4c416eb9 LZ4_decompress_fast vmlinux EXPORT_SYMBOL +0x34f4c06e fuse_conn_get vmlinux EXPORT_SYMBOL_GPL +0xb2b8b60d kern_path_mountpoint vmlinux EXPORT_SYMBOL +0x2c5d5eb5 filp_open vmlinux EXPORT_SYMBOL +0x6ce10eb0 trace_clock_jiffies vmlinux EXPORT_SYMBOL_GPL +0x48ade5f8 smpboot_register_percpu_thread vmlinux EXPORT_SYMBOL_GPL +0x80993155 dccp_timestamp net/dccp/dccp EXPORT_SYMBOL_GPL +0xaab23340 xfrm_calg_get_byname net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x3281ee32 ip_vs_tcp_conn_listen net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x5a288c68 nf_flow_offload_ipv6_hook net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x07a3591f snd_rawmidi_set_ops sound/core/snd-rawmidi EXPORT_SYMBOL +0x5adc0751 _snd_pcm_stream_lock_irqsave sound/core/snd-pcm EXPORT_SYMBOL_GPL +0xa65c4cfa tap_get_ptr_ring drivers/net/tap EXPORT_SYMBOL_GPL +0x2ebe3135 cpu_is_hotpluggable vmlinux EXPORT_SYMBOL_GPL +0x973c4923 pci_read_vpd vmlinux EXPORT_SYMBOL +0x3d210724 gen_pool_dma_zalloc_align vmlinux EXPORT_SYMBOL +0x3207772e crypto_sha256_finup vmlinux EXPORT_SYMBOL +0x6636c3c9 irq_set_vcpu_affinity vmlinux EXPORT_SYMBOL_GPL +0xfed09e7f ipv6_synproxy_hook net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xdaf7bf09 ipv4_synproxy_hook net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x5cea54d4 nfnl_acct_overquota net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0xeca7949e dm_bufio_client_destroy drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x96172afd md_bitmap_unplug drivers/md/md-mod EXPORT_SYMBOL +0x26cc17ed hinic_get_slave_host_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x822728e3 hinic_func_max_vf drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x19876274 __tracepoint_nvme_sq drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x20b581e3 pingv6_prot vmlinux EXPORT_SYMBOL_GPL +0xe7eee3d5 __cookie_v4_init_sequence vmlinux EXPORT_SYMBOL_GPL +0x3154115f of_property_match_string vmlinux EXPORT_SYMBOL_GPL +0xbcd03d36 regulator_bulk_register_supply_alias vmlinux EXPORT_SYMBOL_GPL +0x8f70e1aa __clk_get_hw vmlinux EXPORT_SYMBOL_GPL +0xf43d44cb blk_pm_runtime_init vmlinux EXPORT_SYMBOL +0x1c5b8100 bsg_setup_queue vmlinux EXPORT_SYMBOL_GPL +0xbab1ff50 d_exact_alias vmlinux EXPORT_SYMBOL +0x3f437288 __free_pages vmlinux EXPORT_SYMBOL +0x54e9052c tracepoint_probe_register vmlinux EXPORT_SYMBOL_GPL +0x098d2ed9 lc_try_get lib/lru_cache EXPORT_SYMBOL +0x9a2a8114 nft_obj_notify net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xa1a5b85e nf_nat_amanda_hook net/netfilter/nf_conntrack_amanda EXPORT_SYMBOL_GPL +0xe0beb96e parport_put_port drivers/parport/parport EXPORT_SYMBOL +0x9081b15b parport_get_port drivers/parport/parport EXPORT_SYMBOL +0x202693f0 mlxsw_afa_block_cur_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x70caffba mlx5_toggle_port_link drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xfa8201a0 mlx5_core_qp_modify drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5d87843b hinic_free_ceq drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x3679ddc5 inet_bind vmlinux EXPORT_SYMBOL +0x0b2e05e6 tc_setup_cb_call vmlinux EXPORT_SYMBOL +0x221434f7 qdisc_watchdog_init vmlinux EXPORT_SYMBOL +0xd1a36c5c power_supply_class vmlinux EXPORT_SYMBOL_GPL +0x9219b5cb rtc_nvmem_register vmlinux EXPORT_SYMBOL_GPL +0x2f9a439c usb_gadget_probe_driver vmlinux EXPORT_SYMBOL_GPL +0x704f0248 usb_hcd_end_port_resume vmlinux EXPORT_SYMBOL_GPL +0x7073c04f phy_10_100_features_array vmlinux EXPORT_SYMBOL_GPL +0x3ddd9228 class_find_device vmlinux EXPORT_SYMBOL_GPL +0x8227c8a2 iommu_aux_attach_device vmlinux EXPORT_SYMBOL_GPL +0x5921ab8c msi_desc_to_pci_sysdata vmlinux EXPORT_SYMBOL_GPL +0x91f3f2a3 pci_scan_child_bus vmlinux EXPORT_SYMBOL_GPL +0x24ce7b06 blk_mq_run_hw_queues vmlinux EXPORT_SYMBOL +0xbf4063d3 __register_binfmt vmlinux EXPORT_SYMBOL +0xbfbe53d5 lc_get_cumulative lib/lru_cache EXPORT_SYMBOL +0xb8e702b2 v9fs_register_trans net/9p/9pnet EXPORT_SYMBOL +0xf3078e61 hinic_alloc_db_addr drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xdede9792 xfrm_init_state vmlinux EXPORT_SYMBOL +0xe1e7e40c rtnl_nla_parse_ifla vmlinux EXPORT_SYMBOL +0x4e05a80a alloc_skb_with_frags vmlinux EXPORT_SYMBOL +0xc385cb58 perf_num_counters vmlinux EXPORT_SYMBOL_GPL +0xd969a458 sb800_prefetch vmlinux EXPORT_SYMBOL_GPL +0x392872de drm_fb_memcpy vmlinux EXPORT_SYMBOL +0xc3bb0cb4 pci_assign_resource vmlinux EXPORT_SYMBOL +0xb417f082 kstrtos8_from_user vmlinux EXPORT_SYMBOL +0x755213d9 crypto_unregister_aead vmlinux EXPORT_SYMBOL_GPL +0xc23c5a80 crypto_init_spawn2 vmlinux EXPORT_SYMBOL_GPL +0xbbe21e03 linear_hugepage_index vmlinux EXPORT_SYMBOL_GPL +0x01c6cb0c cpu_cluster_pm_enter vmlinux EXPORT_SYMBOL_GPL +0x43b082bd ceph_osdc_copy_from net/ceph/libceph EXPORT_SYMBOL +0x6a6e014c caif_connect_client net/caif/caif EXPORT_SYMBOL +0xc9c53fcc rdma_destroy_ah_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfbfeaafc rdma_destroy_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd1a82f0b mlxsw_core_lag_mapping_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x8ba5fa7e mlxsw_core_lag_mapping_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xdf2b99fb hinic_detach_nic drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x897a4d75 nfs_file_write fs/nfs/nfs EXPORT_SYMBOL_GPL +0x0b07dee7 xfrm_user_policy vmlinux EXPORT_SYMBOL +0xd00ebe2f netif_napi_add vmlinux EXPORT_SYMBOL +0x524967b2 skb_split vmlinux EXPORT_SYMBOL +0xc6f59848 input_grab_device vmlinux EXPORT_SYMBOL +0x82be80e3 xhci_init_driver vmlinux EXPORT_SYMBOL_GPL +0x50fdb89c phy_modify_paged vmlinux EXPORT_SYMBOL +0x78a95c58 tty_port_block_til_ready vmlinux EXPORT_SYMBOL +0xc3e8f2d0 regulator_get_init_drvdata vmlinux EXPORT_SYMBOL_GPL +0x338ed1a5 regulator_get_current_limit vmlinux EXPORT_SYMBOL_GPL +0x148499dd hinic_set_mac drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x17ac1e80 vif_device_init vmlinux EXPORT_SYMBOL +0x5ce49ca5 netlink_broadcast vmlinux EXPORT_SYMBOL +0xf477bb90 __skb_get_hash_symmetric vmlinux EXPORT_SYMBOL_GPL +0x362faef7 nvmem_cell_read_u32 vmlinux EXPORT_SYMBOL_GPL +0xebc9c720 nvmem_cell_read_u16 vmlinux EXPORT_SYMBOL_GPL +0x0343bdf1 __i2c_board_list vmlinux EXPORT_SYMBOL_GPL +0x9860a0e1 ttm_get_kernel_zone_memory_size vmlinux EXPORT_SYMBOL +0x8e61820b drm_crtc_accurate_vblank_count vmlinux EXPORT_SYMBOL +0x2305a994 shmem_file_setup vmlinux EXPORT_SYMBOL_GPL +0xc631580a console_unlock vmlinux EXPORT_SYMBOL +0x8b8059bd in_group_p vmlinux EXPORT_SYMBOL +0x29ae3dba nfc_digital_unregister_device net/nfc/nfc_digital EXPORT_SYMBOL +0xf06ea7a6 ax25_send_frame net/ax25/ax25 EXPORT_SYMBOL +0x66f49ef9 mtd_block_markbad drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xef6e079e hnae_ae_unregister drivers/net/ethernet/hisilicon/hns/hnae EXPORT_SYMBOL +0x529bbbff udp_sendmsg vmlinux EXPORT_SYMBOL +0x00a8248d tcp_md5_hash_key vmlinux EXPORT_SYMBOL +0x5f9e1a1a __tracepoint_neigh_event_send_dead vmlinux EXPORT_SYMBOL_GPL +0x22648c02 __tracepoint_neigh_event_send_done vmlinux EXPORT_SYMBOL_GPL +0xa9d78514 ipv6_bpf_stub vmlinux EXPORT_SYMBOL_GPL +0xe4b818c3 phy_speed_to_str vmlinux EXPORT_SYMBOL_GPL +0xf29f8515 __kfifo_dma_out_prepare_r vmlinux EXPORT_SYMBOL +0x9a9c858b blkg_prfill_rwstat vmlinux EXPORT_SYMBOL_GPL +0x75b5906e elv_rb_former_request vmlinux EXPORT_SYMBOL +0x433ae21c user_preparse vmlinux EXPORT_SYMBOL_GPL +0xf2d05ccb bpf_trace_run9 vmlinux EXPORT_SYMBOL_GPL +0x0e2d42f9 bpf_trace_run8 vmlinux EXPORT_SYMBOL_GPL +0x03f75a7d bpf_trace_run7 vmlinux EXPORT_SYMBOL_GPL +0xd3051e07 bpf_trace_run6 vmlinux EXPORT_SYMBOL_GPL +0xa4bbd6c3 bpf_trace_run5 vmlinux EXPORT_SYMBOL_GPL +0x93146097 bpf_trace_run4 vmlinux EXPORT_SYMBOL_GPL +0x1967dbda bpf_trace_run3 vmlinux EXPORT_SYMBOL_GPL +0x4d8ee6b0 bpf_trace_run2 vmlinux EXPORT_SYMBOL_GPL +0x414ba4bc bpf_trace_run1 vmlinux EXPORT_SYMBOL_GPL +0x854b0c86 snd_timer_stop sound/core/snd-timer EXPORT_SYMBOL +0x47618e90 nfs4_test_deviceid_unavailable fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x5a2e1d38 dcb_ieee_delapp vmlinux EXPORT_SYMBOL +0x1b211823 dcb_ieee_setapp vmlinux EXPORT_SYMBOL +0xf53d4c26 qdisc_class_hash_destroy vmlinux EXPORT_SYMBOL +0xfe090118 usb_kill_urb vmlinux EXPORT_SYMBOL_GPL +0xb474f2f6 usb_get_intf vmlinux EXPORT_SYMBOL_GPL +0x3e974c69 iscsi_destroy_all_flashnode vmlinux EXPORT_SYMBOL_GPL +0x9bc1d573 ata_sff_lost_interrupt vmlinux EXPORT_SYMBOL_GPL +0x9b6710ce drm_crtc_vblank_reset vmlinux EXPORT_SYMBOL +0x4df48267 regulator_get_voltage_rdev vmlinux EXPORT_SYMBOL_GPL +0x7807eb10 devm_request_pci_bus_resources vmlinux EXPORT_SYMBOL_GPL +0x2f3ae586 blk_mq_bio_list_merge vmlinux EXPORT_SYMBOL_GPL +0xa6e1a69d kick_all_cpus_sync vmlinux EXPORT_SYMBOL_GPL +0x38f7b6e0 LZ4_compress_HC_continue lib/lz4/lz4hc_compress EXPORT_SYMBOL +0x6d9fa93f dm_bio_prison_alloc_cell drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x6b82ae5b crypto_finalize_hash_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0x23f1d190 hisi_sas_get_fw_info vmlinux EXPORT_SYMBOL_GPL +0xada36112 drm_gem_prime_import vmlinux EXPORT_SYMBOL +0xb015c05f drm_helper_encoder_in_use vmlinux EXPORT_SYMBOL +0xeaad96f9 sbitmap_queue_clear vmlinux EXPORT_SYMBOL_GPL +0xbe6763d7 key_type_asymmetric vmlinux EXPORT_SYMBOL_GPL +0x32ba3776 proc_get_parent_data vmlinux EXPORT_SYMBOL_GPL +0x335bf912 audit_log vmlinux EXPORT_SYMBOL +0xdd77c4dc set_security_override_from_ctx vmlinux EXPORT_SYMBOL +0x48005728 nfc_hci_register_device net/nfc/hci/hci EXPORT_SYMBOL +0xb7e88189 xdr_process_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0601f99b ip_tunnel_uninit net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x0871ffd6 transport_set_vpd_ident drivers/target/target_core_mod EXPORT_SYMBOL +0x87c934be dm_tm_inc drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x26d5808a hinic_pcie_itf_id drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1c56f0e9 sk_wait_data vmlinux EXPORT_SYMBOL +0x44a47cc2 usb_autopm_put_interface_no_suspend vmlinux EXPORT_SYMBOL_GPL +0x2a793e6e ata_sff_wait_after_reset vmlinux EXPORT_SYMBOL_GPL +0xc324efb9 drm_edid_to_sad vmlinux EXPORT_SYMBOL +0x9e09c219 tpm_try_get_ops vmlinux EXPORT_SYMBOL_GPL +0xfb8ea87f pci_enable_wake vmlinux EXPORT_SYMBOL +0x08a8fa0a __blk_mq_end_request vmlinux EXPORT_SYMBOL +0x2a6fe918 skcipher_walk_complete vmlinux EXPORT_SYMBOL_GPL +0xfa4abc48 crypto_remove_spawns vmlinux EXPORT_SYMBOL_GPL +0x3dd1f8a9 ring_buffer_empty_cpu vmlinux EXPORT_SYMBOL_GPL +0xbf63bb61 nft_set_gc_batch_release net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x8a7b7c26 mlx5_core_modify_sq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x51e61cf3 mlx5_core_modify_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6c010899 mlx5_core_modify_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x8c0bfeb9 mlx4_unbond drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xcdf4e8dd mlx4_set_vf_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x5bdee8ae ip6_input vmlinux EXPORT_SYMBOL_GPL +0x9117ed06 ip6tun_encaps vmlinux EXPORT_SYMBOL +0x3f600e55 xt_replace_table vmlinux EXPORT_SYMBOL_GPL +0x37f576b0 lwtunnel_state_alloc vmlinux EXPORT_SYMBOL_GPL +0x422c6e46 put_cmsg_scm_timestamping vmlinux EXPORT_SYMBOL +0x7e53de2f ttm_eu_reserve_buffers vmlinux EXPORT_SYMBOL +0x47bf87ef drm_atomic_get_new_private_obj_state vmlinux EXPORT_SYMBOL +0xb95212b8 drm_atomic_helper_commit_tail_rpm vmlinux EXPORT_SYMBOL +0x7211d712 pci_free_host_bridge vmlinux EXPORT_SYMBOL +0x20cbb30a __percpu_counter_init vmlinux EXPORT_SYMBOL +0x920cc389 visitorl vmlinux EXPORT_SYMBOL_GPL +0xd8c56a0f get_user_pages_remote vmlinux EXPORT_SYMBOL +0x9cb948ed irq_get_domain_generic_chip vmlinux EXPORT_SYMBOL_GPL +0x6380a4e0 irq_set_affinity_notifier vmlinux EXPORT_SYMBOL_GPL +0xea6ac84b ceph_monc_want_map net/ceph/libceph EXPORT_SYMBOL +0x1421f36c rdma_resolve_ip drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x05943563 mlx5_modify_nic_vport_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xfa963644 hns_dsaf_roce_reset drivers/net/ethernet/hisilicon/hns/hns_dsaf EXPORT_SYMBOL +0x7293dbfa iscsi_host_set_param drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xefb5015a pnfs_generic_prepare_to_resend_writes fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xa1691b63 xas_find_marked vmlinux EXPORT_SYMBOL_GPL +0x8a7b3ea4 inet_hashinfo_init vmlinux EXPORT_SYMBOL_GPL +0x2a7dde65 ipv4_sk_redirect vmlinux EXPORT_SYMBOL_GPL +0xa99ef899 devlink_fmsg_bool_pair_put vmlinux EXPORT_SYMBOL_GPL +0xd5b77d9c input_mt_report_pointer_emulation vmlinux EXPORT_SYMBOL +0x209942b2 usb_stor_bulk_transfer_sg vmlinux EXPORT_SYMBOL_GPL +0x15a94f80 sas_task_abort vmlinux EXPORT_SYMBOL_GPL +0x9a9b3949 tpm_put_ops vmlinux EXPORT_SYMBOL_GPL +0xbcbc0560 tty_ldisc_receive_buf vmlinux EXPORT_SYMBOL_GPL +0x2234ca51 acpi_match_platform_list vmlinux EXPORT_SYMBOL +0x7e3c2a68 pci_prg_resp_pasid_required vmlinux EXPORT_SYMBOL_GPL +0xcad4f3de pci_ioremap_bar vmlinux EXPORT_SYMBOL_GPL +0x449997f9 end_buffer_read_sync vmlinux EXPORT_SYMBOL +0x475253db d_add vmlinux EXPORT_SYMBOL +0x3a0a70ce set_cpus_allowed_ptr vmlinux EXPORT_SYMBOL_GPL +0x7f758adb nci_spi_allocate_spi net/nfc/nci/nci_spi EXPORT_SYMBOL_GPL +0x00880a1a crypto_finalize_ablkcipher_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0x50b73ce2 rfkill_find_type vmlinux EXPORT_SYMBOL +0x825e3af5 tcf_em_tree_dump vmlinux EXPORT_SYMBOL +0x9f4a6bfb neigh_for_each vmlinux EXPORT_SYMBOL +0x451625f4 rtc_initialize_alarm vmlinux EXPORT_SYMBOL_GPL +0x6971447a rtc_month_days vmlinux EXPORT_SYMBOL +0x7e64181d usb_calc_bus_time vmlinux EXPORT_SYMBOL_GPL +0xf9bb0327 sas_register_ha vmlinux EXPORT_SYMBOL_GPL +0xf7ab6567 drm_calc_vbltimestamp_from_scanoutpos vmlinux EXPORT_SYMBOL +0x10c62b61 __drm_printfn_debug vmlinux EXPORT_SYMBOL +0xcc7170c0 regulator_is_enabled_regmap vmlinux EXPORT_SYMBOL_GPL +0xf1b7c46b pci_sriov_get_totalvfs vmlinux EXPORT_SYMBOL_GPL +0x1afb296e devm_pinctrl_register_and_init vmlinux EXPORT_SYMBOL_GPL +0x77e9eb37 aes_encrypt vmlinux EXPORT_SYMBOL +0x0584be63 sg_miter_next vmlinux EXPORT_SYMBOL +0x9400900a crypto_init_shash_spawn vmlinux EXPORT_SYMBOL_GPL +0xfe065352 crypto_init_ahash_spawn vmlinux EXPORT_SYMBOL_GPL +0x85efc7e0 zero_pfn vmlinux EXPORT_SYMBOL +0x54069917 param_get_ushort vmlinux EXPORT_SYMBOL +0x1d9f92b3 vcc_insert_socket net/atm/atm EXPORT_SYMBOL +0xa08bea8b svc_sock_update_bufs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa7d6c15a nft_reject_dump net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0xb3753a1f vhost_dev_set_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x24e8a715 ubi_open_volume_path drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x813cf212 nvme_io_timeout drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x1566ede0 skb_pull vmlinux EXPORT_SYMBOL +0xf467f58a usb_find_interface vmlinux EXPORT_SYMBOL_GPL +0x900aa2d9 ata_scsi_ioctl vmlinux EXPORT_SYMBOL_GPL +0xdcbec937 device_get_match_data vmlinux EXPORT_SYMBOL_GPL +0x4678e8e5 clk_register vmlinux EXPORT_SYMBOL_GPL +0x2de7181c amba_find_device vmlinux EXPORT_SYMBOL +0x75659a22 pci_add_new_bus vmlinux EXPORT_SYMBOL +0x9ba2bb2b gpio_request_array vmlinux EXPORT_SYMBOL_GPL +0x09c8eb55 font_vga_8x16 vmlinux EXPORT_SYMBOL +0xa44a1307 interval_tree_iter_first vmlinux EXPORT_SYMBOL_GPL +0x2688ec10 bitmap_zalloc vmlinux EXPORT_SYMBOL +0xadee1426 blkcg_policy_unregister vmlinux EXPORT_SYMBOL_GPL +0x6f5db262 sysfs_remove_file_ns vmlinux EXPORT_SYMBOL_GPL +0x4e8f6ca7 sunrpc_net_id net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5e1a4d37 mtd_ooblayout_set_databytes drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x1dd1ab4c mtd_ooblayout_get_databytes drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xc1a806b2 iscsit_setup_scsi_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x6243cd67 neigh_app_ns vmlinux EXPORT_SYMBOL +0xc988a449 netdev_lower_get_first_private_rcu vmlinux EXPORT_SYMBOL +0x0e2467d4 iscsi_block_scsi_eh vmlinux EXPORT_SYMBOL_GPL +0xbb16a79b scsi_get_vpd_page vmlinux EXPORT_SYMBOL_GPL +0xe726b806 pm_clk_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0x385f103d pm_generic_restore_noirq vmlinux EXPORT_SYMBOL_GPL +0x4bfba5f1 software_node_unregister_nodes vmlinux EXPORT_SYMBOL_GPL +0x01a5a658 drm_gem_cma_vm_ops vmlinux EXPORT_SYMBOL_GPL +0x3ae72d08 drm_i2c_encoder_dpms vmlinux EXPORT_SYMBOL +0xec4f7c10 register_acpi_bus_type vmlinux EXPORT_SYMBOL_GPL +0x29f2bc98 pci_enable_pasid vmlinux EXPORT_SYMBOL_GPL +0xb0d1656c gpio_free_array vmlinux EXPORT_SYMBOL_GPL +0x2ddcdfa4 irq_remove_generic_chip vmlinux EXPORT_SYMBOL_GPL +0x7b178afe unlock_system_sleep vmlinux EXPORT_SYMBOL_GPL +0xae7e3a35 mutex_trylock_recursive vmlinux EXPORT_SYMBOL +0xf61edc91 ib_sa_guid_info_rec_query drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbd0de4a6 parport_register_port drivers/parport/parport EXPORT_SYMBOL +0x8684f658 mlx5_query_hca_vport_pkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x00153356 fc_fcp_init drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x64292beb st_nci_se_init drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0x47dd3571 xfrm6_protocol_register vmlinux EXPORT_SYMBOL +0xe57948eb xfrm_state_check_expire vmlinux EXPORT_SYMBOL +0xbf32dccb xfrm4_protocol_register vmlinux EXPORT_SYMBOL +0xfa7cca89 sock_diag_unregister_inet_compat vmlinux EXPORT_SYMBOL_GPL +0x8ad2cacd netdev_boot_setup_check vmlinux EXPORT_SYMBOL +0x08238ca2 sock_i_ino vmlinux EXPORT_SYMBOL +0xbf2d5911 sock_i_uid vmlinux EXPORT_SYMBOL +0x21a1fcea cpufreq_global_kobject vmlinux EXPORT_SYMBOL +0xb33fc7b3 phy_attached_info vmlinux EXPORT_SYMBOL +0xe3605aac regulator_set_voltage_time vmlinux EXPORT_SYMBOL_GPL +0x2bd60ab9 acpi_reset vmlinux EXPORT_SYMBOL +0xadddc11b kvm_vcpu_unmap vmlinux EXPORT_SYMBOL_GPL +0xfe4e149b xprt_get net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1eef80f0 snd_register_device sound/core/snd EXPORT_SYMBOL +0xf3df871f iwcm_reject_msg drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0xc2797b61 bch_bkey_try_merge drivers/md/bcache/bcache EXPORT_SYMBOL +0x510a606b ipvlan_count_rx drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0xb0e102e4 ip6_pol_route vmlinux EXPORT_SYMBOL_GPL +0x68d302d4 ip_build_and_send_pkt vmlinux EXPORT_SYMBOL_GPL +0x3dd6faa2 register_gifconf vmlinux EXPORT_SYMBOL +0x329bd79b cpufreq_unregister_driver vmlinux EXPORT_SYMBOL_GPL +0xdc56a9e1 regulator_get_voltage_sel_regmap vmlinux EXPORT_SYMBOL_GPL +0x8b67f8ca clk_gate_is_enabled vmlinux EXPORT_SYMBOL_GPL +0x160e4a00 __clk_mux_determine_rate_closest vmlinux EXPORT_SYMBOL_GPL +0xec4d9e3a clk_get_sys vmlinux EXPORT_SYMBOL +0x03833a6d pcie_print_link_status vmlinux EXPORT_SYMBOL +0xc6b2aabb gpiochip_find vmlinux EXPORT_SYMBOL_GPL +0x33fcf44a __kfifo_out_r vmlinux EXPORT_SYMBOL +0xe236f0e2 crypto_req_done vmlinux EXPORT_SYMBOL_GPL +0x457594fa crypto_alg_list vmlinux EXPORT_SYMBOL_GPL +0x21094da9 sync_inode_metadata vmlinux EXPORT_SYMBOL +0x878955a0 vm_map_pages vmlinux EXPORT_SYMBOL +0xc3762aec mempool_alloc vmlinux EXPORT_SYMBOL +0xd0beb171 unregister_wide_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x7b0577ea free_cgroup_ns vmlinux EXPORT_SYMBOL +0x86f6b99d synchronize_rcu_expedited vmlinux EXPORT_SYMBOL_GPL +0x255ab3b7 inet_dccp_listen net/dccp/dccp EXPORT_SYMBOL_GPL +0xc0f0458a ip_tunnel_unneed_metadata vmlinux EXPORT_SYMBOL_GPL +0x4ffeebaf hid_setup_resolution_multiplier vmlinux EXPORT_SYMBOL_GPL +0x57c13964 ata_do_dev_read_id vmlinux EXPORT_SYMBOL_GPL +0x20a1b519 acpi_resource_to_address64 vmlinux EXPORT_SYMBOL +0x6bff618f __srcu_read_lock vmlinux EXPORT_SYMBOL_GPL +0xc642671d irq_domain_alloc_irqs_parent vmlinux EXPORT_SYMBOL_GPL +0xbbc82f76 vfio_unregister_notifier drivers/vfio/vfio EXPORT_SYMBOL +0x2d5695dd usb_function_unregister drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xb290a5e5 mlx5_add_flow_rules drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xbbbbc14b iscsi_conn_stop drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xcd8c0cb2 nfs_submount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x57f70547 secure_ipv4_port_ephemeral vmlinux EXPORT_SYMBOL_GPL +0x545025e5 nvmem_add_cell_table vmlinux EXPORT_SYMBOL_GPL +0x16dee44d dma_fence_init vmlinux EXPORT_SYMBOL +0xc0659ed6 __drm_atomic_helper_set_config vmlinux EXPORT_SYMBOL +0x0aa3b785 iomap_seek_data vmlinux EXPORT_SYMBOL_GPL +0x85803e37 pagecache_get_page vmlinux EXPORT_SYMBOL +0x2cdf87a1 proc_dointvec_minmax vmlinux EXPORT_SYMBOL +0x8c008546 atm_charge net/atm/atm EXPORT_SYMBOL +0x7694cc97 mlx4_is_slave_active drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x244f994b mr_vif_seq_idx vmlinux EXPORT_SYMBOL +0x5746c0c5 dev_mc_add_excl vmlinux EXPORT_SYMBOL +0xc95a8907 dev_uc_add_excl vmlinux EXPORT_SYMBOL +0x141b049d i2c_bus_type vmlinux EXPORT_SYMBOL_GPL +0xe03a689d dma_fence_array_ops vmlinux EXPORT_SYMBOL +0x8157009e devm_drm_dev_init vmlinux EXPORT_SYMBOL +0xebfd0491 pci_msi_create_irq_domain vmlinux EXPORT_SYMBOL_GPL +0xcea3906e pci_unmap_iospace vmlinux EXPORT_SYMBOL +0x9cb5469d blkcg_print_blkgs vmlinux EXPORT_SYMBOL_GPL +0x30602868 blk_mq_alloc_request_hctx vmlinux EXPORT_SYMBOL_GPL +0x3efa965a kmalloc_caches vmlinux EXPORT_SYMBOL +0x3f19e28d module_enable_ro vmlinux EXPORT_SYMBOL_GPL +0xdfb2e4e0 rpc_malloc net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xdad35e82 bch_btree_keys_alloc drivers/md/bcache/bcache EXPORT_SYMBOL +0x931e8c7d mlx4_get_active_ports drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe0d74651 hinic_cmdq_async drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xdaf75ada hnae_reinit_handle drivers/net/ethernet/hisilicon/hns/hnae EXPORT_SYMBOL +0x65ade826 rt_dst_alloc vmlinux EXPORT_SYMBOL +0x0d7f5fcd xt_alloc_entry_offsets vmlinux EXPORT_SYMBOL +0xb18f9eec phylink_ethtool_ksettings_set vmlinux EXPORT_SYMBOL_GPL +0xdccfdecd phylink_ethtool_ksettings_get vmlinux EXPORT_SYMBOL_GPL +0x14143f93 do_take_over_console vmlinux EXPORT_SYMBOL_GPL +0x30c6c017 regulator_set_current_limit vmlinux EXPORT_SYMBOL_GPL +0xac28461f of_mm_gpiochip_add_data vmlinux EXPORT_SYMBOL_GPL +0x7b47e0df blk_queue_make_request vmlinux EXPORT_SYMBOL +0xff169dc3 sysfs_add_link_to_group vmlinux EXPORT_SYMBOL_GPL +0x1a5638bd truncate_pagecache_range vmlinux EXPORT_SYMBOL +0x6f9e763b timecounter_read vmlinux EXPORT_SYMBOL_GPL +0x1e77bb30 param_ops_ullong vmlinux EXPORT_SYMBOL +0xa4adedf1 sw842_decompress lib/842/842_decompress EXPORT_SYMBOL_GPL +0xdba7326b nf_conntrack_lock net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x5e63ab6a dm_cell_release drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x3cf0bd18 mlx5_query_mac_address drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xe75630fa __mlx4_replace_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xba75404e led_classdev_resume vmlinux EXPORT_SYMBOL_GPL +0x60ad42a7 input_reset_device vmlinux EXPORT_SYMBOL +0x5312a826 fwnode_graph_get_remote_port_parent vmlinux EXPORT_SYMBOL_GPL +0x149e45e5 tty_ldisc_deref vmlinux EXPORT_SYMBOL_GPL +0x2ee4c2b1 hdmi_avi_infoframe_pack_only vmlinux EXPORT_SYMBOL +0xf4e9478f security_inode_setsecctx vmlinux EXPORT_SYMBOL +0xfe02bb7c dquot_scan_active vmlinux EXPORT_SYMBOL +0x705b0d4e vfs_getattr vmlinux EXPORT_SYMBOL +0x9fbba67f ceph_buffer_new net/ceph/libceph EXPORT_SYMBOL +0xc97fd13e p9_client_walk net/9p/9pnet EXPORT_SYMBOL +0xc8791e8c rxrpc_kernel_set_max_life net/rxrpc/rxrpc EXPORT_SYMBOL +0x4ffc9b69 mpt_set_taskmgmt_in_progress_flag drivers/message/fusion/mptbase EXPORT_SYMBOL +0xe6334541 nfs_writeback_update_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x902f5199 cpumask_next_wrap vmlinux EXPORT_SYMBOL +0xc872fd85 in6addr_interfacelocal_allnodes vmlinux EXPORT_SYMBOL +0xc6ec131b inet_del_protocol vmlinux EXPORT_SYMBOL +0x5280708a neigh_update vmlinux EXPORT_SYMBOL +0x0c99fae4 phy_stop vmlinux EXPORT_SYMBOL +0xabaf7004 ata_dummy_port_info vmlinux EXPORT_SYMBOL_GPL +0xeec13851 drm_kms_helper_poll_enable vmlinux EXPORT_SYMBOL +0xf4edb126 tty_kclose vmlinux EXPORT_SYMBOL_GPL +0x6e8434d8 acpi_dma_simple_xlate vmlinux EXPORT_SYMBOL_GPL +0xd4fa5a87 __kfifo_dma_out_prepare vmlinux EXPORT_SYMBOL +0xfd744678 blk_queue_io_min vmlinux EXPORT_SYMBOL +0xc4c50f22 nobh_truncate_page vmlinux EXPORT_SYMBOL +0x14225685 ceph_monc_init net/ceph/libceph EXPORT_SYMBOL +0x3fa84493 cfpkt_add_head net/caif/caif EXPORT_SYMBOL +0x36ea7eed nf_ct_port_nla_policy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x46c6e2c0 mdev_set_drvdata drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xdef248f2 bch_btree_sort_lazy drivers/md/bcache/bcache EXPORT_SYMBOL +0xfd09420b iscsi_conn_start drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xc4d99852 o2hb_check_node_heartbeating_from_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x8be1ac01 nfs_pgio_header_alloc fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4d202b8c __xas_prev vmlinux EXPORT_SYMBOL_GPL +0x324b21b7 regulator_set_voltage_rdev vmlinux EXPORT_SYMBOL_GPL +0xb8d8b42d securityfs_create_symlink vmlinux EXPORT_SYMBOL_GPL +0xc559b8e8 __vfs_getxattr vmlinux EXPORT_SYMBOL +0xb74ab73b __put_page vmlinux EXPORT_SYMBOL +0xcf2a6966 up vmlinux EXPORT_SYMBOL +0x2520308e rpc_call_start net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x434c5d10 zgid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x027c1a68 ubi_leb_unmap drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0xcd09d157 st21nfca_dep_event_received drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x4a0165b6 nfs4_set_ds_client fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xc2fb95f6 nfs3_set_ds_client fs/nfs/nfsv3 EXPORT_SYMBOL_GPL +0xb9dba537 of_pci_range_parser_one vmlinux EXPORT_SYMBOL_GPL +0x94ae6770 devm_spi_register_controller vmlinux EXPORT_SYMBOL_GPL +0x826a5b5b iscsi_lookup_endpoint vmlinux EXPORT_SYMBOL_GPL +0xd7615727 drm_property_create_bitmask vmlinux EXPORT_SYMBOL +0x7102df73 pci_hp_remove_module_link vmlinux EXPORT_SYMBOL_GPL +0xd736b4ab iov_iter_copy_from_user_atomic vmlinux EXPORT_SYMBOL +0x0836e375 path_get vmlinux EXPORT_SYMBOL +0xde21c1af __inode_sub_bytes vmlinux EXPORT_SYMBOL +0x157aa73e __tracepoint_bcache_gc_copy drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x27adf24b mlx5_lag_get_roce_netdev drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x53be17ee mlx4_uar_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x8b4c51b8 addrconf_add_linklocal vmlinux EXPORT_SYMBOL_GPL +0xea4492d1 xfrm_state_update vmlinux EXPORT_SYMBOL +0x69207fb8 usb_deregister_dev vmlinux EXPORT_SYMBOL_GPL +0x18524c60 usb_hcd_is_primary_hcd vmlinux EXPORT_SYMBOL_GPL +0x75365310 pm_generic_resume_noirq vmlinux EXPORT_SYMBOL_GPL +0xc415cd53 pci_bridge_secondary_bus_reset vmlinux EXPORT_SYMBOL_GPL +0x13ce87e8 asn1_ber_decoder vmlinux EXPORT_SYMBOL_GPL +0x81eea5d0 crypto_grab_aead vmlinux EXPORT_SYMBOL_GPL +0x79a821e5 no_seek_end_llseek_size vmlinux EXPORT_SYMBOL +0x23864ce7 cpuset_mem_spread_node vmlinux EXPORT_SYMBOL_GPL +0x38b2279a get_pid_task vmlinux EXPORT_SYMBOL_GPL +0x47884890 system_power_efficient_wq vmlinux EXPORT_SYMBOL_GPL +0x42efc36c __ceph_open_session net/ceph/libceph EXPORT_SYMBOL +0x81b4c8e0 sctp_for_each_transport net/sctp/sctp EXPORT_SYMBOL_GPL +0x084cba95 nft_data_release net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x1915005d transport_deregister_session_configfs drivers/target/target_core_mod EXPORT_SYMBOL +0xfec955e3 devlink_param_value_changed vmlinux EXPORT_SYMBOL_GPL +0x5de751dd dev_open vmlinux EXPORT_SYMBOL +0xa4baa5a9 __skb_free_datagram_locked vmlinux EXPORT_SYMBOL +0x37e8a123 lock_sock_fast vmlinux EXPORT_SYMBOL +0x472665ce cn_add_callback vmlinux EXPORT_SYMBOL_GPL +0x8dbb3701 alloc_iova_fast vmlinux EXPORT_SYMBOL_GPL +0xf563af5c ilookup vmlinux EXPORT_SYMBOL +0x4302d0eb free_pages vmlinux EXPORT_SYMBOL +0x06ab03bd irq_create_of_mapping vmlinux EXPORT_SYMBOL_GPL +0x8f21beff osd_req_op_extent_init net/ceph/libceph EXPORT_SYMBOL +0xec195e42 nat_q931_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x1ce19a89 mrp_init_applicant net/802/mrp EXPORT_SYMBOL_GPL +0xdb928d8f i8042_install_filter drivers/input/serio/i8042 EXPORT_SYMBOL +0x6e9431b5 unregister_c_can_dev drivers/net/can/c_can/c_can EXPORT_SYMBOL_GPL +0x40739385 nfs_wait_bit_killable fs/nfs/nfs EXPORT_SYMBOL_GPL +0x81e6b37f dmi_get_system_info vmlinux EXPORT_SYMBOL +0xd4193f16 thermal_of_cooling_device_register vmlinux EXPORT_SYMBOL_GPL +0xe11a274e mdiobus_free vmlinux EXPORT_SYMBOL +0x7d148e99 sas_suspend_ha vmlinux EXPORT_SYMBOL +0x9c242488 of_pm_clk_add_clks vmlinux EXPORT_SYMBOL_GPL +0xc77eb907 drm_gem_vram_offset vmlinux EXPORT_SYMBOL +0x75422dd0 of_dma_request_slave_channel vmlinux EXPORT_SYMBOL_GPL +0x765ff474 crc_t10dif_generic vmlinux EXPORT_SYMBOL +0x03592ea0 security_unix_stream_connect vmlinux EXPORT_SYMBOL +0x39f512cd generic_file_fsync vmlinux EXPORT_SYMBOL +0x2bd06fd6 unlock_new_inode vmlinux EXPORT_SYMBOL +0xe500b48d __f_setown vmlinux EXPORT_SYMBOL +0xf9a0a52f irq_create_direct_mapping vmlinux EXPORT_SYMBOL_GPL +0x554b647b execute_in_process_context vmlinux EXPORT_SYMBOL_GPL +0x23745b93 nf_reject_ip6hdr_put net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0x51f0db80 vhost_has_work drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3702b60e ppp_dev_name drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x954f099c idr_preload vmlinux EXPORT_SYMBOL +0x96242c43 kset_register vmlinux EXPORT_SYMBOL +0xa8f6c843 ip_frag_ecn_table vmlinux EXPORT_SYMBOL +0xa7ae4212 xt_find_table_lock vmlinux EXPORT_SYMBOL_GPL +0x62737e1d sock_unregister vmlinux EXPORT_SYMBOL +0x265c4ef5 i2c_new_client_device vmlinux EXPORT_SYMBOL_GPL +0xabfc6724 devm_regmap_field_free vmlinux EXPORT_SYMBOL_GPL +0xf93fd09c fb_find_mode_cvt vmlinux EXPORT_SYMBOL +0x003685f9 pci_sriov_set_totalvfs vmlinux EXPORT_SYMBOL_GPL +0x4053e668 devm_gpiod_get_index_optional vmlinux EXPORT_SYMBOL_GPL +0x022d6822 fput vmlinux EXPORT_SYMBOL +0x517488a3 nf_connlabels_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x1f528a3a mtd_ooblayout_free drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xf2ca3bae mlxsw_core_res_query_enabled drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xebee7ac6 of_pci_range_parser_init vmlinux EXPORT_SYMBOL_GPL +0x14bad15f efivars_register vmlinux EXPORT_SYMBOL_GPL +0xe6eae593 sas_phy_delete vmlinux EXPORT_SYMBOL +0xb19997f5 platform_device_alloc vmlinux EXPORT_SYMBOL_GPL +0x48e8c371 dev_printk_emit vmlinux EXPORT_SYMBOL +0x95dcb403 clkdev_add vmlinux EXPORT_SYMBOL +0xcfd30d71 acpi_os_map_memory vmlinux EXPORT_SYMBOL_GPL +0xb8551a1a iov_iter_get_pages_alloc vmlinux EXPORT_SYMBOL +0xfdfc51fb uprobe_register_refctr vmlinux EXPORT_SYMBOL_GPL +0xd62b1e45 trace_print_flags_seq vmlinux EXPORT_SYMBOL +0x284e07d8 vsock_bind_table net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x69cc2628 snd_ctl_apply_vmaster_slaves sound/core/snd EXPORT_SYMBOL_GPL +0x9b27134d wimax_msg_len vmlinux EXPORT_SYMBOL_GPL +0x23ce6731 ip6_frag_init vmlinux EXPORT_SYMBOL +0x41849ef1 ir_raw_handler_unregister vmlinux EXPORT_SYMBOL +0x155e3c19 pm_generic_suspend_noirq vmlinux EXPORT_SYMBOL_GPL +0x1872e704 component_add_typed vmlinux EXPORT_SYMBOL_GPL +0x6b567544 dma_get_slave_caps vmlinux EXPORT_SYMBOL_GPL +0x2ddb9fb8 clk_hw_unregister_mux vmlinux EXPORT_SYMBOL_GPL +0x9ee435ac __fsnotify_parent vmlinux EXPORT_SYMBOL_GPL +0xc4eae733 perf_trace_buf_alloc vmlinux EXPORT_SYMBOL_GPL +0x944a564d is_console_locked vmlinux EXPORT_SYMBOL +0x1e0528a2 pm_qos_add_request vmlinux EXPORT_SYMBOL_GPL +0x88802539 param_ops_bool vmlinux EXPORT_SYMBOL +0x9433243e ceph_monc_open_session net/ceph/libceph EXPORT_SYMBOL +0xb7b6874e caif_free_client net/caif/caif EXPORT_SYMBOL +0x219b5660 l2tp_session_delete net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x5cd9b6b2 iscsit_release_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x9b096204 usb_ep0_reinit vmlinux EXPORT_SYMBOL_GPL +0x389779c7 sas_phy_free vmlinux EXPORT_SYMBOL +0x8563a554 drm_ht_remove vmlinux EXPORT_SYMBOL +0xc4176384 regulator_get_drvdata vmlinux EXPORT_SYMBOL_GPL +0xb163b6d6 xt_rateest_put net/netfilter/xt_RATEEST EXPORT_SYMBOL_GPL +0x53d9c16f mptscsih_info drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xcfb4bd80 __mlx4_unregister_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe6fa3a9c __udp6_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0x7f7cbc64 ip_tunnel_need_metadata vmlinux EXPORT_SYMBOL_GPL +0x97bd2940 inet_shutdown vmlinux EXPORT_SYMBOL +0x3458149d power_supply_set_property vmlinux EXPORT_SYMBOL_GPL +0x7da9894c power_supply_get_property vmlinux EXPORT_SYMBOL_GPL +0x7034d795 syscon_node_to_regmap vmlinux EXPORT_SYMBOL_GPL +0xa2b3a96f drm_atomic_set_fb_for_plane vmlinux EXPORT_SYMBOL +0x38374815 clear_selection vmlinux EXPORT_SYMBOL_GPL +0xa79fe90a regulator_set_voltage_sel_regmap vmlinux EXPORT_SYMBOL_GPL +0x80f7d128 __tracepoint_block_bio_remap vmlinux EXPORT_SYMBOL_GPL +0xc6be5e55 cryptd_shash_desc vmlinux EXPORT_SYMBOL_GPL +0xefec290a lc_find lib/lru_cache EXPORT_SYMBOL +0xd3129228 notifier_err_inject_dir lib/notifier-error-inject EXPORT_SYMBOL_GPL +0x9cce9460 nfc_hci_disconnect_gate net/nfc/hci/hci EXPORT_SYMBOL +0x839ddb7b cfcnfg_set_phy_state net/caif/caif EXPORT_SYMBOL +0x93feddae vhost_poll_stop drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x31e13052 ib_destroy_cq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2e052584 mlx4_qp_modify drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x1e5896b8 iscsi_requeue_task drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x98c972a4 pnfs_generic_sync fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x717159e6 xt_unregister_table vmlinux EXPORT_SYMBOL_GPL +0x5cc70c5f flow_rule_match_cvlan vmlinux EXPORT_SYMBOL +0x68380da3 drm_handle_vblank vmlinux EXPORT_SYMBOL +0x30729710 uart_remove_one_port vmlinux EXPORT_SYMBOL +0x167d7113 acpi_bus_register_early_device vmlinux EXPORT_SYMBOL_GPL +0x977d4fdc vmalloc_to_page vmlinux EXPORT_SYMBOL +0x1d027e4b snd_pcm_format_signed sound/core/snd-pcm EXPORT_SYMBOL +0x80f3c3c3 xfrm_trans_queue vmlinux EXPORT_SYMBOL +0x1c5541bd cpufreq_boost_enabled vmlinux EXPORT_SYMBOL_GPL +0x821772ce usb_get_hcd vmlinux EXPORT_SYMBOL_GPL +0xa0713087 drm_ht_find_item vmlinux EXPORT_SYMBOL +0xdaa06dc1 acpi_lpat_raw_to_temp vmlinux EXPORT_SYMBOL_GPL +0xb055b8fc fbcon_set_bitops vmlinux EXPORT_SYMBOL +0xed7cc0cb blk_integrity_merge_rq vmlinux EXPORT_SYMBOL +0xfac9ed22 blk_update_request vmlinux EXPORT_SYMBOL_GPL +0x02649054 security_sock_rcv_skb vmlinux EXPORT_SYMBOL +0xfc336d2e __wake_up_bit vmlinux EXPORT_SYMBOL +0xccff9d21 get_task_cred vmlinux EXPORT_SYMBOL +0x329dbd06 cfpkt_info net/caif/caif EXPORT_SYMBOL +0x49a7a72d xdr_encode_array2 net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x752d5ab9 rdma_alloc_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x588ea78a hchacha_block vmlinux EXPORT_SYMBOL +0x0e0ee2b0 sock_release vmlinux EXPORT_SYMBOL +0x2dca064f power_supply_get_by_phandle vmlinux EXPORT_SYMBOL_GPL +0x1b002aaa usb_hcd_giveback_urb vmlinux EXPORT_SYMBOL_GPL +0x1a3f6ce5 iscsi_is_session_dev vmlinux EXPORT_SYMBOL_GPL +0x39bbc557 drm_gem_cma_dumb_create_internal vmlinux EXPORT_SYMBOL_GPL +0xb63bdcde drm_prime_gem_destroy vmlinux EXPORT_SYMBOL +0x4e786e9c drm_i2c_encoder_mode_fixup vmlinux EXPORT_SYMBOL +0xc109c751 tty_port_tty_wakeup vmlinux EXPORT_SYMBOL_GPL +0xfe2d911c regulator_map_voltage_ascend vmlinux EXPORT_SYMBOL_GPL +0x92b99a33 acpi_put_table vmlinux EXPORT_SYMBOL +0xfd4c0b45 gpiochip_lock_as_irq vmlinux EXPORT_SYMBOL_GPL +0x284fe794 percpu_ref_exit vmlinux EXPORT_SYMBOL_GPL +0xde293f9e add_wait_queue_exclusive vmlinux EXPORT_SYMBOL +0x9746eb89 ZSTD_decompressBegin_usingDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0xcbec3f4a nf_nat_icmp_reply_translation net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x442b500a ct_sip_get_sdp_header net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0xbed43a41 snd_usbmidi_suspend sound/usb/snd-usbmidi-lib EXPORT_SYMBOL +0x5c0ed9f7 mlx4_mtt_addr drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x47decb54 nxp_nci_remove drivers/nfc/nxp-nci/nxp-nci EXPORT_SYMBOL +0x063de633 nlmclnt_proc fs/lockd/lockd EXPORT_SYMBOL_GPL +0x48b52b31 pnfs_generic_write_commit_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x7466c19b led_trigger_rename_static vmlinux EXPORT_SYMBOL_GPL +0x2a8dabc3 mipi_dsi_dcs_soft_reset vmlinux EXPORT_SYMBOL +0x7fe42344 drm_debugfs_create_files vmlinux EXPORT_SYMBOL +0x3e5230c3 iommu_dev_feature_enabled vmlinux EXPORT_SYMBOL_GPL +0x819531b6 tty_register_device_attr vmlinux EXPORT_SYMBOL_GPL +0x70867e0b pci_sriov_configure_simple vmlinux EXPORT_SYMBOL_GPL +0x7f03b6a9 crc_ccitt_table vmlinux EXPORT_SYMBOL +0xad7e706b rhashtable_walk_stop vmlinux EXPORT_SYMBOL_GPL +0x625410fa d_add_ci vmlinux EXPORT_SYMBOL +0xa2bd25da tracepoint_probe_register_prio vmlinux EXPORT_SYMBOL_GPL +0xe2c347b5 phys_mem_access_prot vmlinux EXPORT_SYMBOL +0xa9cce4c9 rdma_find_gid_by_port drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd176685a mtd_block_isreserved drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x0b25f6bc dm_array_resize drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x12b2ad06 iscsi_switch_str_param drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x9b2560b9 gf128mul_init_4k_bbe crypto/gf128mul EXPORT_SYMBOL +0x83581089 gf128mul_init_4k_lle crypto/gf128mul EXPORT_SYMBOL +0xc69b7ee5 zs_destroy_pool mm/zsmalloc EXPORT_SYMBOL_GPL +0x9b33e0d7 unregister_dcbevent_notifier vmlinux EXPORT_SYMBOL +0xc744ed6d __skb_checksum_complete_head vmlinux EXPORT_SYMBOL +0x83659591 skb_send_sock_locked vmlinux EXPORT_SYMBOL_GPL +0x076c7b13 usb_gadget_unmap_request_by_dev vmlinux EXPORT_SYMBOL_GPL +0x26bc5373 xhci_shutdown vmlinux EXPORT_SYMBOL_GPL +0xcca72da0 phy_ethtool_get_wol vmlinux EXPORT_SYMBOL +0x68edd203 phy_ethtool_set_wol vmlinux EXPORT_SYMBOL +0x1485a307 free_io_pgtable_ops vmlinux EXPORT_SYMBOL_GPL +0x8d804e2c unlink_framebuffer vmlinux EXPORT_SYMBOL +0x53126ecc __percpu_counter_sum vmlinux EXPORT_SYMBOL +0x2bceaa82 mpage_readpage vmlinux EXPORT_SYMBOL +0xd6ee688f vmalloc vmlinux EXPORT_SYMBOL +0xff3a5e48 perf_pmu_unregister vmlinux EXPORT_SYMBOL_GPL +0x4392584e dma_direct_unmap_page vmlinux EXPORT_SYMBOL +0xf31b3fd1 workqueue_set_max_active vmlinux EXPORT_SYMBOL_GPL +0xe3c10267 vhost_poll_start drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xa8a2b57d raid5_set_cache_size drivers/md/raid456 EXPORT_SYMBOL +0x3c84f082 macvlan_dellink drivers/net/macvlan EXPORT_SYMBOL_GPL +0x02998acf mlxsw_afa_block_append_counter drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x10052b4d flow_rule_match_enc_ports vmlinux EXPORT_SYMBOL +0x202d4ed6 nvmem_cell_write vmlinux EXPORT_SYMBOL_GPL +0x142a7b03 of_get_compatible_child vmlinux EXPORT_SYMBOL +0x462e1e9e ata_bmdma_error_handler vmlinux EXPORT_SYMBOL_GPL +0xe669dd23 tty_port_raise_dtr_rts vmlinux EXPORT_SYMBOL +0xd4f61eac devm_lcd_device_register vmlinux EXPORT_SYMBOL +0x61ca70c5 pci_request_region vmlinux EXPORT_SYMBOL +0x8c505cbb blk_queue_io_opt vmlinux EXPORT_SYMBOL +0x17df411d bdgrab vmlinux EXPORT_SYMBOL +0xd4c14632 system_unbound_wq vmlinux EXPORT_SYMBOL_GPL +0x5ac007ac udp_tunnel6_xmit_skb net/ipv6/ip6_udp_tunnel EXPORT_SYMBOL_GPL +0x7e6e4622 ct_sip_parse_request net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0xe9cace74 mtd_lock drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xcbf802a2 nfs_fscache_open_file fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd05d67de dcb_ieee_getapp_default_prio_mask vmlinux EXPORT_SYMBOL +0x50cbf9c9 flow_block_cb_alloc vmlinux EXPORT_SYMBOL +0xb7c6db70 sysctl_max_skb_frags vmlinux EXPORT_SYMBOL +0x1e9f794f hisi_sas_remove vmlinux EXPORT_SYMBOL_GPL +0xa741da40 ata_sff_irq_on vmlinux EXPORT_SYMBOL_GPL +0xb8a9dcb3 dma_resv_test_signaled_rcu vmlinux EXPORT_SYMBOL_GPL +0xd680a377 drm_gem_object_free vmlinux EXPORT_SYMBOL +0x66772039 clk_hw_unregister_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0x1ab04987 devm_get_clk_from_child vmlinux EXPORT_SYMBOL +0xbfdfd39a devm_lcd_device_unregister vmlinux EXPORT_SYMBOL +0xa2ab8df8 gpiod_direction_output vmlinux EXPORT_SYMBOL_GPL +0x9f50b770 keyring_restrict vmlinux EXPORT_SYMBOL +0x903b627c list_lru_isolate_move vmlinux EXPORT_SYMBOL_GPL +0x22ad68cc kvm_clear_guest_page vmlinux EXPORT_SYMBOL_GPL +0x1c13faff xprt_wait_for_reply_request_def net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb716c6d3 rpc_clnt_test_and_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc99a4bd5 ib_detach_mcast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6aa9ac77 ib_attach_mcast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8fac259e usb_function_activate drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xa57f9663 component_match_add_typed vmlinux EXPORT_SYMBOL +0x9cb7c2af drm_get_edid vmlinux EXPORT_SYMBOL +0x3a4f9d28 rng_is_initialized vmlinux EXPORT_SYMBOL +0xcf000c7e hdmi_infoframe_check vmlinux EXPORT_SYMBOL +0x3157aec8 iov_iter_revert vmlinux EXPORT_SYMBOL +0x1d9cbdc2 crypto_unregister_aeads vmlinux EXPORT_SYMBOL_GPL +0x23ee13fd mb_cache_entry_find_first vmlinux EXPORT_SYMBOL +0xb9852623 __bread_gfp vmlinux EXPORT_SYMBOL +0xb79f25fd p9_parse_header net/9p/9pnet EXPORT_SYMBOL +0x0171da26 gre_parse_header net/ipv4/gre EXPORT_SYMBOL +0x32aa05f2 capi20_isinstalled drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x504c71f1 ppp_unregister_compressor drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x19b7ade0 tcp_unregister_congestion_control vmlinux EXPORT_SYMBOL_GPL +0x87ad8bd0 nf_checksum_partial vmlinux EXPORT_SYMBOL_GPL +0x0274dc2b netif_get_num_default_rss_queues vmlinux EXPORT_SYMBOL +0x94816c92 usb_ep_set_wedge vmlinux EXPORT_SYMBOL_GPL +0x61f67c92 phy_gbit_features_array vmlinux EXPORT_SYMBOL_GPL +0x535833ee __phy_modify vmlinux EXPORT_SYMBOL_GPL +0xf702c91b clk_gate_restore_context vmlinux EXPORT_SYMBOL_GPL +0x41978417 gpiochip_line_is_open_drain vmlinux EXPORT_SYMBOL_GPL +0xcd55667f bio_split vmlinux EXPORT_SYMBOL +0x58a726cc lock_rename vmlinux EXPORT_SYMBOL +0x8d333f8f preempt_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x838bbe6f lowpan_unregister_netdev net/6lowpan/6lowpan EXPORT_SYMBOL +0x0fd067d1 dccp_set_state net/dccp/dccp EXPORT_SYMBOL_GPL +0x7f2595a7 usb_function_deactivate drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xe9af7397 __xa_set_mark vmlinux EXPORT_SYMBOL +0x043b25d3 netlink_remove_tap vmlinux EXPORT_SYMBOL_GPL +0x04b7adb1 i2c_bit_add_bus vmlinux EXPORT_SYMBOL +0xb6e615ea class_destroy vmlinux EXPORT_SYMBOL_GPL +0x414422a4 drm_gem_shmem_get_sg_table vmlinux EXPORT_SYMBOL_GPL +0xf43d2caa acpi_remove_interface vmlinux EXPORT_SYMBOL +0x1d1495d2 pci_pme_active vmlinux EXPORT_SYMBOL +0x595c592d gpiochip_remove vmlinux EXPORT_SYMBOL_GPL +0x6d0ae550 pinctrl_gpio_request vmlinux EXPORT_SYMBOL_GPL +0x2fc83bd9 proc_create_net_single vmlinux EXPORT_SYMBOL_GPL +0x7f8153ab proc_create_net_data_write vmlinux EXPORT_SYMBOL_GPL +0x2237045d put_user_pages_dirty_lock vmlinux EXPORT_SYMBOL +0xb061a98a mutex_lock_killable vmlinux EXPORT_SYMBOL +0xcd6b2843 xdr_inline_decode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9a9c9a94 vhost_vring_ioctl drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xc90df7e5 dm_bufio_prefetch drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x232159f0 md_rdev_init drivers/md/md-mod EXPORT_SYMBOL_GPL +0xb32e8931 mlx4_register_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x16fbc3c0 __fscache_check_page_write fs/fscache/fscache EXPORT_SYMBOL +0xab32fcaa tcp_check_req vmlinux EXPORT_SYMBOL +0xb3165e34 tcp_init_sock vmlinux EXPORT_SYMBOL +0x560b52ca netlink_unicast vmlinux EXPORT_SYMBOL +0xf4cc30aa neigh_sysctl_register vmlinux EXPORT_SYMBOL +0x8f408660 ata_bmdma_status vmlinux EXPORT_SYMBOL_GPL +0x5eb4b976 drm_dev_dbg vmlinux EXPORT_SYMBOL +0xa61cd3ac tty_port_tty_hangup vmlinux EXPORT_SYMBOL_GPL +0x6adafb8c __bio_add_page vmlinux EXPORT_SYMBOL_GPL +0x5feb6092 bpf_trace_run12 vmlinux EXPORT_SYMBOL_GPL +0x41607375 bpf_trace_run11 vmlinux EXPORT_SYMBOL_GPL +0x7eab8ec3 bpf_trace_run10 vmlinux EXPORT_SYMBOL_GPL +0x787c6152 kvm_write_guest_page vmlinux EXPORT_SYMBOL_GPL +0xaa9575df atrtr_get_dev net/appletalk/appletalk EXPORT_SYMBOL +0x8e6f9ae0 snd_timer_start sound/core/snd-timer EXPORT_SYMBOL +0x07abcc0c mlxsw_afa_block_append_trap drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x2427e3ae hinic_register_micro_log drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x8e876807 rps_needed vmlinux EXPORT_SYMBOL +0xfe045baa i2c_setup_smbus_alert vmlinux EXPORT_SYMBOL_GPL +0x895439f1 usb_serial_generic_read_bulk_callback vmlinux EXPORT_SYMBOL_GPL +0x0054bdae drm_gem_shmem_create_with_handle vmlinux EXPORT_SYMBOL +0xcc9e748d drm_gem_vram_driver_dumb_create vmlinux EXPORT_SYMBOL +0x628d4535 import_single_range vmlinux EXPORT_SYMBOL +0xbc794b62 security_sctp_bind_connect vmlinux EXPORT_SYMBOL +0x1c1b6781 set_nlink vmlinux EXPORT_SYMBOL +0xaf348da7 cpu_pm_exit vmlinux EXPORT_SYMBOL_GPL +0xbb6b1048 kill_pgrp vmlinux EXPORT_SYMBOL +0x7292ab34 cdebbuf_free drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x6ecf28d6 mptscsih_flush_running_cmds drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xe4f402f3 mlx4_qp_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x66983e96 simd_skcipher_create crypto/crypto_simd EXPORT_SYMBOL_GPL +0x2d3423e7 device_wakeup_disable vmlinux EXPORT_SYMBOL_GPL +0x3f9780d1 battery_hook_register vmlinux EXPORT_SYMBOL_GPL +0x7fa7e813 dw_pcie_wait_for_link vmlinux EXPORT_SYMBOL_GPL +0x1bdd57a6 fat_build_inode vmlinux EXPORT_SYMBOL_GPL +0x4673d6d4 seq_read vmlinux EXPORT_SYMBOL +0x9e2b440d is_bad_inode vmlinux EXPORT_SYMBOL +0x15b19f9a param_get_int vmlinux EXPORT_SYMBOL +0xcce645b3 flow_offload_add net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x7bbf3062 vxlan_dev_create drivers/net/vxlan EXPORT_SYMBOL_GPL +0x916b9f17 hdlcdrv_receiver drivers/net/hamradio/hdlcdrv EXPORT_SYMBOL +0xb5e6a161 configfs_unregister_group fs/configfs/configfs EXPORT_SYMBOL +0x3dfe3498 skb_ext_add vmlinux EXPORT_SYMBOL +0xac110127 start_tty vmlinux EXPORT_SYMBOL +0x9fb21855 acpi_dev_resource_ext_address_space vmlinux EXPORT_SYMBOL_GPL +0x6513a3fa fb_get_color_depth vmlinux EXPORT_SYMBOL +0xb39bda27 crypto_unregister_kpp vmlinux EXPORT_SYMBOL_GPL +0x4b8283a6 shash_ahash_finup vmlinux EXPORT_SYMBOL_GPL +0x53e940e6 user_update vmlinux EXPORT_SYMBOL_GPL +0xc4f0da12 ktime_get_with_offset vmlinux EXPORT_SYMBOL_GPL +0x9662db1c phonet_proto_register net/phonet/phonet EXPORT_SYMBOL +0x329094c5 nf_ct_get_tuplepr net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa8679d3f snd_pcm_add_chmap_ctls sound/core/snd-pcm EXPORT_SYMBOL_GPL +0xa6c6e73d snd_pcm_notify sound/core/snd-pcm EXPORT_SYMBOL +0x821ce839 mlx5_core_modify_tis drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x9859a529 kernel_sock_shutdown vmlinux EXPORT_SYMBOL +0xc66d919f dm_table_get_mode vmlinux EXPORT_SYMBOL +0x48d924b9 of_find_i2c_adapter_by_node vmlinux EXPORT_SYMBOL +0x62d7270a regulator_set_drvdata vmlinux EXPORT_SYMBOL_GPL +0x719df0d5 virtqueue_add_sgs vmlinux EXPORT_SYMBOL_GPL +0xbf38d60d pnp_unregister_card_driver vmlinux EXPORT_SYMBOL +0x54215db5 visitor64 vmlinux EXPORT_SYMBOL_GPL +0xc9641b48 visitor32 vmlinux EXPORT_SYMBOL_GPL +0xfda9581f prandom_u32 vmlinux EXPORT_SYMBOL +0x40ea55c1 posix_acl_chmod vmlinux EXPORT_SYMBOL +0x89e965ce notify_change vmlinux EXPORT_SYMBOL +0x8a79a9f9 follow_up vmlinux EXPORT_SYMBOL +0x5ecdd238 param_get_short vmlinux EXPORT_SYMBOL +0xd29859fa kvm_read_guest vmlinux EXPORT_SYMBOL_GPL +0x3585e72a lowpan_register_netdev net/6lowpan/6lowpan EXPORT_SYMBOL +0x48fa62bc garp_request_join net/802/garp EXPORT_SYMBOL_GPL +0x01b33592 line6_send_sysex_message sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x0054f69d dm_tm_pre_commit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x72289260 dm_block_manager_destroy drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x04b8849e inet_sk_set_state vmlinux EXPORT_SYMBOL +0xc456f1d9 nf_log_bind_pf vmlinux EXPORT_SYMBOL +0xb6749f87 dev_pick_tx_zero vmlinux EXPORT_SYMBOL +0xbb0aed7e sock_no_ioctl vmlinux EXPORT_SYMBOL +0x6f9d8f33 of_reconfig_get_state_change vmlinux EXPORT_SYMBOL_GPL +0x9fe899b7 get_cpu_idle_time vmlinux EXPORT_SYMBOL_GPL +0x85a2bcc2 __tracepoint_iscsi_dbg_tcp vmlinux EXPORT_SYMBOL_GPL +0xd9e00a00 mipi_dsi_dcs_set_tear_off vmlinux EXPORT_SYMBOL +0x275f3d49 hdmi_vendor_infoframe_check vmlinux EXPORT_SYMBOL +0x59452791 get_user_pages_fast vmlinux EXPORT_SYMBOL_GPL +0x8c4ac56b kernel_param_lock vmlinux EXPORT_SYMBOL +0x4137ed8a nf_conntrack_find_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x69482268 snd_rawmidi_transmit_ack sound/core/snd-rawmidi EXPORT_SYMBOL +0x0743cc81 snd_card_disconnect_sync sound/core/snd EXPORT_SYMBOL_GPL +0x2a7b7398 md_write_start drivers/md/md-mod EXPORT_SYMBOL +0xfbf8d085 mpt_verify_adapter drivers/message/fusion/mptbase EXPORT_SYMBOL +0xa87bc9e7 o2nm_configured_node_map fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x377adaa8 serio_unregister_child_port vmlinux EXPORT_SYMBOL +0x59edcdb6 scsi_remove_device vmlinux EXPORT_SYMBOL +0xb88d935c simple_empty vmlinux EXPORT_SYMBOL +0x001d84ab page_frag_alloc vmlinux EXPORT_SYMBOL +0x1bfaa4bc find_extend_vma vmlinux EXPORT_SYMBOL_GPL +0xfdd149d1 vm_mmap vmlinux EXPORT_SYMBOL +0x5347f95c shmem_read_mapping_page_gfp vmlinux EXPORT_SYMBOL_GPL +0x96e1106b dma_alloc_attrs vmlinux EXPORT_SYMBOL +0x2c696641 dccp_make_response net/dccp/dccp EXPORT_SYMBOL_GPL +0x49824962 nf_sk_lookup_slow_v6 net/ipv6/netfilter/nf_socket_ipv6 EXPORT_SYMBOL_GPL +0xcee5e80b nf_sk_lookup_slow_v4 net/ipv4/netfilter/nf_socket_ipv4 EXPORT_SYMBOL_GPL +0x0225406b mlxsw_i2c_driver_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_i2c EXPORT_SYMBOL +0x5ad95356 srp_timed_out drivers/scsi/scsi_transport_srp EXPORT_SYMBOL +0x9a22391e radix_tree_gang_lookup_tag_slot vmlinux EXPORT_SYMBOL +0x71b3f374 __sk_mem_schedule vmlinux EXPORT_SYMBOL +0x72287455 __hid_register_driver vmlinux EXPORT_SYMBOL_GPL +0x5f2817cb hid_disconnect vmlinux EXPORT_SYMBOL_GPL +0x70cf032f usb_hcd_irq vmlinux EXPORT_SYMBOL_GPL +0x97ea1521 __spi_register_driver vmlinux EXPORT_SYMBOL_GPL +0xa1d29f73 __pci_register_driver vmlinux EXPORT_SYMBOL +0x028043f5 fuse_file_poll vmlinux EXPORT_SYMBOL_GPL +0x50a21c3e mnt_want_write_file vmlinux EXPORT_SYMBOL_GPL +0x6a7a38a0 ceph_pr_addr net/ceph/libceph EXPORT_SYMBOL +0x7304c8f1 ibdev_notice drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x172abe82 dm_snap_cow drivers/md/dm-snapshot EXPORT_SYMBOL +0x58b03449 usb_string_ids_tab drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x833bf30f nfs_file_set_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd40d48f6 nfs_free_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5516f956 ip_ct_attach vmlinux EXPORT_SYMBOL +0xca43edd5 dev_set_mtu vmlinux EXPORT_SYMBOL +0x1c989357 ahci_fill_cmd_slot vmlinux EXPORT_SYMBOL_GPL +0x38ee3178 fwnode_handle_get vmlinux EXPORT_SYMBOL_GPL +0x41b42d60 crypto_unregister_template vmlinux EXPORT_SYMBOL_GPL +0x286e3981 security_sk_classify_flow vmlinux EXPORT_SYMBOL +0x5b2681ee bd_link_disk_holder vmlinux EXPORT_SYMBOL_GPL +0x2919b156 xdr_decode_string_inplace net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x079b6ab7 vhost_dev_init drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xe21ffb51 tap_handle_frame drivers/net/tap EXPORT_SYMBOL_GPL +0x0ebb7d2b fcoe_ctlr_set_fip_mode drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x6acefa42 fscache_cache_cleared_wq fs/fscache/fscache EXPORT_SYMBOL +0xe8c3533e wimax_state_get vmlinux EXPORT_SYMBOL_GPL +0xebdde5bc of_fwnode_ops vmlinux EXPORT_SYMBOL_GPL +0x99d0cd8d regulator_set_soft_start_regmap vmlinux EXPORT_SYMBOL_GPL +0x6e47f4f1 regulator_get_error_flags vmlinux EXPORT_SYMBOL_GPL +0xdc512134 backlight_register_notifier vmlinux EXPORT_SYMBOL +0x335c570f enable_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0x0a11df1b tipc_nl_sk_walk net/tipc/tipc EXPORT_SYMBOL +0xc3a2eb76 svc_seq_show net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x235dd975 hinic_host_total_func drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd544b78f fc_rport_create drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x08d38ff1 fc_vport_create drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x0626da5f od_unregister_powersave_bias_handler vmlinux EXPORT_SYMBOL_GPL +0xd87fc0a0 usb_amd_prefetch_quirk vmlinux EXPORT_SYMBOL_GPL +0x6bd97a81 scsi_free_host_dev vmlinux EXPORT_SYMBOL +0x4065d168 pm_print_active_wakeup_sources vmlinux EXPORT_SYMBOL_GPL +0x91073b4c driver_register vmlinux EXPORT_SYMBOL_GPL +0x0231f6b7 pnp_device_detach vmlinux EXPORT_SYMBOL +0x6188ba92 should_remove_suid vmlinux EXPORT_SYMBOL +0xc92f7f50 vsock_table_lock net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x6057c6f3 capi_message2cmsg drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x3a797d19 dm_btree_del drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb5a256e5 mlx5_nic_vport_affiliate_multiport drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x9d50ed7e fc_rport_destroy drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xe48d975d fc_lport_destroy drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xe2860258 input_ff_create vmlinux EXPORT_SYMBOL_GPL +0xccd86806 ata_id_string vmlinux EXPORT_SYMBOL_GPL +0xa246587c device_destroy vmlinux EXPORT_SYMBOL_GPL +0xcb9231b1 drm_mode_create_tv_margin_properties vmlinux EXPORT_SYMBOL +0xe71a6a77 divider_ro_round_rate_parent vmlinux EXPORT_SYMBOL_GPL +0x66b3d0b9 srcu_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0xb83129db ZSTD_decompressContinue lib/zstd/zstd_decompress EXPORT_SYMBOL +0x88c9fd31 nft_dump_register net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x3ff55ad3 nf_conncount_cache_free net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0xb5e762fa mlxsw_afk_values_add_buf drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x47041e4e mlxsw_afk_key_info_blocks_count_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xa9dbe80b hinic_register_mgmt_msg_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1df7d2d6 inet6_stream_ops vmlinux EXPORT_SYMBOL +0x538616bb xfrm_if_register_cb vmlinux EXPORT_SYMBOL +0xacd7353d unregister_pernet_device vmlinux EXPORT_SYMBOL_GPL +0xc8da1c99 of_dma_is_coherent vmlinux EXPORT_SYMBOL_GPL +0x4d60b747 input_ff_destroy vmlinux EXPORT_SYMBOL_GPL +0x5f6f1e9e dax_get_private vmlinux EXPORT_SYMBOL_GPL +0x14a6f6b0 platform_get_irq_byname vmlinux EXPORT_SYMBOL_GPL +0x7970f50c seq_dentry vmlinux EXPORT_SYMBOL +0x37110088 remove_wait_queue vmlinux EXPORT_SYMBOL +0xfeb21f91 param_array_ops vmlinux EXPORT_SYMBOL +0xc760f22d ceph_copy_from_page_vector net/ceph/libceph EXPORT_SYMBOL +0xe94bce5d lowpan_nhc_add net/6lowpan/6lowpan EXPORT_SYMBOL +0x5f5cb460 flow_offload_alloc net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x9bbfc253 ib_send_cm_sidr_rep drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xc3da6487 ib_send_cm_sidr_req drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xcfbde139 mtd_get_unmapped_area drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xdfc00457 iscsi_tcp_set_max_r2t drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x1b1ce863 iscsi_conn_queue_work drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x7438c058 pnfs_generic_clear_request_commit fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x10393d3c configfs_register_group fs/configfs/configfs EXPORT_SYMBOL +0xe6f5e6f5 xas_clear_mark vmlinux EXPORT_SYMBOL_GPL +0x7dd23ad3 kset_find_obj vmlinux EXPORT_SYMBOL_GPL +0xab059c15 napi_get_frags vmlinux EXPORT_SYMBOL +0x8bfb49cb efivar_entry_remove vmlinux EXPORT_SYMBOL_GPL +0x86e86f9d dev_pm_domain_detach vmlinux EXPORT_SYMBOL_GPL +0x65ca3bfb device_connection_remove vmlinux EXPORT_SYMBOL_GPL +0x0e363bee drm_connector_set_vrr_capable_property vmlinux EXPORT_SYMBOL +0xc73d0bf4 scsi_verify_blk_ioctl vmlinux EXPORT_SYMBOL +0x8af90dc6 kill_bdev vmlinux EXPORT_SYMBOL +0x6f3cd704 pagecache_isize_extended vmlinux EXPORT_SYMBOL +0x256a784c disable_kprobe vmlinux EXPORT_SYMBOL_GPL +0x56470118 __warn_printk vmlinux EXPORT_SYMBOL +0xf1212a2f hinic_update_rq_hw_pi drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x7117e89e wimax_rfkill vmlinux EXPORT_SYMBOL +0x225da241 ipv6_dup_options vmlinux EXPORT_SYMBOL_GPL +0x7efb5fc6 phy_register_fixup_for_id vmlinux EXPORT_SYMBOL +0x0a5cb41d phylink_connect_phy vmlinux EXPORT_SYMBOL_GPL +0x05d33f02 sys_copyarea vmlinux EXPORT_SYMBOL +0x4c663363 cgroup_attach_task_all vmlinux EXPORT_SYMBOL_GPL +0xc3182c1f delayed_work_timer_fn vmlinux EXPORT_SYMBOL +0xaf0847f0 nf_conntrack_locks net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x6f04d50c tap_get_socket drivers/net/tap EXPORT_SYMBOL_GPL +0xeae53252 configfs_remove_default_groups fs/configfs/configfs EXPORT_SYMBOL +0xa16e60d6 first_ec vmlinux EXPORT_SYMBOL +0x5c32a897 dim_park_tired vmlinux EXPORT_SYMBOL +0x6f8dc77b zap_vma_ptes vmlinux EXPORT_SYMBOL_GPL +0x485cd7f6 kvm_rebooting vmlinux EXPORT_SYMBOL_GPL +0xdc204bb1 rpc_init_priority_wait_queue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x85430a76 nft_validate_register_load net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x9e798e22 dm_bm_set_read_only drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xfb0a7083 md_bitmap_free drivers/md/md-mod EXPORT_SYMBOL +0xae4a5741 md_bitmap_endwrite drivers/md/md-mod EXPORT_SYMBOL +0x8a29c1a1 mlxsw_core_rx_listener_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc12e35ee mlx4_ACCESS_PTYS_REG drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6972e465 xfrm_policy_bysel_ctx vmlinux EXPORT_SYMBOL +0x52e0146b tcp_sendmsg vmlinux EXPORT_SYMBOL +0xdae4c731 i2c_smbus_write_word_data vmlinux EXPORT_SYMBOL +0xf3cd9dad ata_bmdma_port_start vmlinux EXPORT_SYMBOL_GPL +0x9749916b drm_panel_bridge_remove vmlinux EXPORT_SYMBOL +0x114c9b9c drm_dp_send_power_updown_phy vmlinux EXPORT_SYMBOL +0x959874d8 find_iova vmlinux EXPORT_SYMBOL_GPL +0x2b952517 clk_has_parent vmlinux EXPORT_SYMBOL_GPL +0xb29ef899 dim_park_on_top vmlinux EXPORT_SYMBOL +0x27ddfd0f elv_rb_del vmlinux EXPORT_SYMBOL +0xd4083181 elv_rb_add vmlinux EXPORT_SYMBOL +0xc43df6ea init_user_ns vmlinux EXPORT_SYMBOL_GPL +0x28b18398 wpan_phy_free net/ieee802154/ieee802154 EXPORT_SYMBOL +0xdae7340e rpc_init_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4dc38c70 mptscsih_qcmd drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x21d2a3a9 mptscsih_is_phys_disk drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xd593d707 ip_mc_leave_group vmlinux EXPORT_SYMBOL +0x45860eef lwtunnel_cmp_encap vmlinux EXPORT_SYMBOL_GPL +0x8a01e5f8 netif_skb_features vmlinux EXPORT_SYMBOL +0x7d461d00 pm_runtime_set_memalloc_noio vmlinux EXPORT_SYMBOL_GPL +0x5625ec45 iommu_fwspec_init vmlinux EXPORT_SYMBOL_GPL +0x821f299b devm_gpiod_get_array vmlinux EXPORT_SYMBOL_GPL +0x7165885d fuse_simple_background vmlinux EXPORT_SYMBOL_GPL +0x6dbaafd3 put_old_timespec32 vmlinux EXPORT_SYMBOL_GPL +0x5199e680 work_busy vmlinux EXPORT_SYMBOL_GPL +0x4a5d0960 dccp_disconnect net/dccp/dccp EXPORT_SYMBOL_GPL +0x3f73f0f9 ib_sa_get_mcmember_rec drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfc7dbf7a __sk_mem_reclaim vmlinux EXPORT_SYMBOL +0x16c98b0b drm_dp_stop_crc vmlinux EXPORT_SYMBOL +0x26da4eab tty_set_ldisc vmlinux EXPORT_SYMBOL_GPL +0xde8079cf lcd_device_unregister vmlinux EXPORT_SYMBOL +0x04863e28 hdmi_audio_infoframe_pack_only vmlinux EXPORT_SYMBOL +0x62f015d1 pcim_pin_device vmlinux EXPORT_SYMBOL +0xdd4c61e0 pci_cfg_access_lock vmlinux EXPORT_SYMBOL_GPL +0xc15b730a crypto_register_skcipher vmlinux EXPORT_SYMBOL_GPL +0xb07ff7fc d_prune_aliases vmlinux EXPORT_SYMBOL +0x12a38747 usleep_range vmlinux EXPORT_SYMBOL +0x2e78702e kmsg_dump_get_line vmlinux EXPORT_SYMBOL_GPL +0x2ab93744 ip_set_put_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x1b14cb7d ip_set_get_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xa3d0d2b6 mlxsw_afa_block_append_fwd drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x32fde95c mlx4_tunnel_steer_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xa0c7ca94 fib_nh_common_init vmlinux EXPORT_SYMBOL_GPL +0xec353f29 tcp_mss_to_mtu vmlinux EXPORT_SYMBOL +0x61d23f14 gro_cells_receive vmlinux EXPORT_SYMBOL +0x24e86fd0 pci_common_swizzle vmlinux EXPORT_SYMBOL_GPL +0x9cb12639 blkcg_dkstats_find_create vmlinux EXPORT_SYMBOL_GPL +0x6cd9cae0 blk_mq_init_sq_queue vmlinux EXPORT_SYMBOL +0x44271790 __pagevec_release vmlinux EXPORT_SYMBOL +0xf3d9cc60 cache_register_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf1a5611d o2net_unregister_handler_list fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xc2aeae18 metadata_dst_free_percpu vmlinux EXPORT_SYMBOL_GPL +0x5e8d0f7d hid_unregister_driver vmlinux EXPORT_SYMBOL_GPL +0x95ef1ccc dmi_memdev_size vmlinux EXPORT_SYMBOL_GPL +0xd94ba5f4 usb_autopm_put_interface vmlinux EXPORT_SYMBOL_GPL +0xe7014aae hisi_sas_debugfs_work_handler vmlinux EXPORT_SYMBOL_GPL +0x5bb754d8 drm_fb_helper_unregister_fbi vmlinux EXPORT_SYMBOL +0x1d911b23 drm_dp_link_configure vmlinux EXPORT_SYMBOL +0x5158935c pcie_set_readrq vmlinux EXPORT_SYMBOL +0x3c9785f4 pcie_bus_configure_settings vmlinux EXPORT_SYMBOL_GPL +0x52476975 nla_reserve_nohdr vmlinux EXPORT_SYMBOL +0x4230a8d7 sg_nents_for_len vmlinux EXPORT_SYMBOL +0x00b9b19d device_add_disk vmlinux EXPORT_SYMBOL +0x2daa2177 x509_free_certificate vmlinux EXPORT_SYMBOL_GPL +0xe849104b securityfs_create_file vmlinux EXPORT_SYMBOL_GPL +0xe863b9df exportfs_encode_fh vmlinux EXPORT_SYMBOL_GPL +0x200363c1 iomap_invalidatepage vmlinux EXPORT_SYMBOL_GPL +0x3ef14899 ilookup5 vmlinux EXPORT_SYMBOL +0x0e8a574a cpuacct_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x5e515be6 ktime_get_ts64 vmlinux EXPORT_SYMBOL_GPL +0xc0a96e14 rcu_gp_is_expedited vmlinux EXPORT_SYMBOL_GPL +0x409bcb62 mutex_unlock vmlinux EXPORT_SYMBOL +0x591a1367 prepare_creds vmlinux EXPORT_SYMBOL +0x0788ecc8 alloc_can_skb drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x4cb9e001 recover_lost_locks fs/nfs/nfs EXPORT_SYMBOL_GPL +0xfd37c49c register_nfs_version fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe515874a ip6_err_gen_icmpv6_unreach vmlinux EXPORT_SYMBOL +0x8473c7e1 dev_addr_flush vmlinux EXPORT_SYMBOL +0xbdf22fa2 nvmem_device_write vmlinux EXPORT_SYMBOL_GPL +0xfeca68f3 thermal_notify_framework vmlinux EXPORT_SYMBOL_GPL +0x532fbe0a drm_sched_increase_karma vmlinux EXPORT_SYMBOL +0xb8eee93c ttm_bo_eviction_valuable vmlinux EXPORT_SYMBOL +0x04dabb09 dquot_quota_sync vmlinux EXPORT_SYMBOL +0xcb7f015a sunrpc_cache_pipe_upcall net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9108bbe9 can_rx_offload_del drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x9883ff12 mlx4_cq_resize drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x834976bc st21nfca_hci_se_io drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0xa731f387 nl_table_lock vmlinux EXPORT_SYMBOL_GPL +0x6642b2e3 usb_phy_roothub_set_mode vmlinux EXPORT_SYMBOL_GPL +0xafca4c93 of_find_spi_device_by_node vmlinux EXPORT_SYMBOL_GPL +0x12ee19e7 drm_writeback_connector_init vmlinux EXPORT_SYMBOL +0x5cfb26a0 acpi_enter_sleep_state vmlinux EXPORT_SYMBOL +0x0c372aa3 blkdev_get_by_dev vmlinux EXPORT_SYMBOL +0xa574a02c get_tree_keyed vmlinux EXPORT_SYMBOL +0xd89da37f movable_zone vmlinux EXPORT_SYMBOL +0x618911fc numa_node vmlinux EXPORT_SYMBOL +0xa83f641e ring_buffer_unlock_commit vmlinux EXPORT_SYMBOL_GPL +0x41237f71 cpu_have_feature vmlinux EXPORT_SYMBOL_GPL +0x1757d1a4 fou_encap_hlen net/ipv4/fou EXPORT_SYMBOL +0xdbd019c4 ib_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL_GPL +0xe0aca834 mlx4_map_sw_to_hw_steering_id drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x488d6de4 nfs_scan_commit_list fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1153ac35 nfs4_dentry_operations fs/nfs/nfs EXPORT_SYMBOL_GPL +0x7a2f5411 ping_queue_rcv_skb vmlinux EXPORT_SYMBOL_GPL +0x6b55acd0 rtnl_lock_killable vmlinux EXPORT_SYMBOL +0xe2fd014e rc_allocate_device vmlinux EXPORT_SYMBOL_GPL +0x412ec453 drm_sched_fini vmlinux EXPORT_SYMBOL +0xc5996c8c pkcs7_get_content_data vmlinux EXPORT_SYMBOL_GPL +0x2705305e simple_fill_super vmlinux EXPORT_SYMBOL +0x18fd8bde event_triggers_call vmlinux EXPORT_SYMBOL_GPL +0x07cc4a5d printk_timed_ratelimit vmlinux EXPORT_SYMBOL +0xadbefda4 dm_cache_policy_destroy drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x3a18389a dm_rh_update_states drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xed3283a4 dm_bufio_set_sector_offset drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xab7b7e02 hinic_aeq_register_swe_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xe48c3726 pnfs_register_layoutdriver fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x95e878fe sock_queue_err_skb vmlinux EXPORT_SYMBOL +0x4baf7e59 sha256_final vmlinux EXPORT_SYMBOL +0xfe1d2e94 key_create_or_update vmlinux EXPORT_SYMBOL +0x1f8db7f9 ring_buffer_overrun_cpu vmlinux EXPORT_SYMBOL_GPL +0x0faef0ed __tasklet_schedule vmlinux EXPORT_SYMBOL +0x56466e42 ZSTD_CStreamInSize lib/zstd/zstd_compress EXPORT_SYMBOL +0xb611ed73 ceph_messenger_init net/ceph/libceph EXPORT_SYMBOL +0xdc89670e xprt_adjust_cwnd net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xfdf54210 ib_process_mad_wc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2d9df2a4 dev_load vmlinux EXPORT_SYMBOL +0xf4f14de6 rtnl_trylock vmlinux EXPORT_SYMBOL +0x85cf899e skb_flow_dissect_meta vmlinux EXPORT_SYMBOL +0xba2b7f64 cpufreq_generic_get vmlinux EXPORT_SYMBOL_GPL +0xae44fb40 rc_keydown vmlinux EXPORT_SYMBOL_GPL +0x818416e1 scsi_set_sense_information vmlinux EXPORT_SYMBOL +0xb47e54c0 bio_add_page vmlinux EXPORT_SYMBOL +0xfdac2ec2 debugfs_create_u16 vmlinux EXPORT_SYMBOL_GPL +0xd206b24e __d_lookup_done vmlinux EXPORT_SYMBOL +0xa9086c55 init_on_alloc vmlinux EXPORT_SYMBOL +0x5ba46264 init_pid_ns vmlinux EXPORT_SYMBOL_GPL +0x8263a6d9 proc_douintvec vmlinux EXPORT_SYMBOL +0x9bdf4b52 ipt_unregister_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL +0x6b73ef57 mptscsih_slave_destroy drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x40f2b10c ipmi_alloc_smi_msg drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x263f039e xas_nomem vmlinux EXPORT_SYMBOL_GPL +0x8be0bf65 gro_find_receive_by_type vmlinux EXPORT_SYMBOL +0x40d5c894 __sk_mem_raise_allocated vmlinux EXPORT_SYMBOL +0x933f75e0 usb_unlink_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0x4afc9955 fb_bl_default_curve vmlinux EXPORT_SYMBOL_GPL +0x107e5878 zlib_inflateEnd vmlinux EXPORT_SYMBOL +0x87b8d2ef __blkdev_issue_discard vmlinux EXPORT_SYMBOL +0x7583122f skcipher_walk_aead vmlinux EXPORT_SYMBOL_GPL +0xf9c4a82d p9_client_getlock_dotl net/9p/9pnet EXPORT_SYMBOL +0x232f40da rpc_num_bc_slots net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5407d097 i2c_mux_add_adapter drivers/i2c/i2c-mux EXPORT_SYMBOL_GPL +0x48012e28 xt_check_proc_name vmlinux EXPORT_SYMBOL +0x922f0511 xt_unregister_target vmlinux EXPORT_SYMBOL +0xee3a52d6 led_trigger_show vmlinux EXPORT_SYMBOL_GPL +0x74860b83 ehci_adjust_port_wakeup_flags vmlinux EXPORT_SYMBOL_GPL +0xb423dba1 console_blanked vmlinux EXPORT_SYMBOL +0x904f34eb pci_bus_add_device vmlinux EXPORT_SYMBOL_GPL +0x481f0785 ack_all_badblocks vmlinux EXPORT_SYMBOL_GPL +0x7179cd3c handle_fasteoi_nmi vmlinux EXPORT_SYMBOL_GPL +0xee02e420 ax25_findbyuid net/ax25/ax25 EXPORT_SYMBOL +0x259e897c config_group_init_type_name fs/configfs/configfs EXPORT_SYMBOL +0x9cf37c44 __iowrite32_copy vmlinux EXPORT_SYMBOL_GPL +0x9def727f __sb_end_write vmlinux EXPORT_SYMBOL +0xeb4221e4 trace_clock vmlinux EXPORT_SYMBOL_GPL +0x741d97a7 from_kgid vmlinux EXPORT_SYMBOL +0x8c26d495 prepare_to_wait_event vmlinux EXPORT_SYMBOL +0x3edbfa30 xdr_enter_page net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa4720a81 ct_sip_parse_address_param net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x6393d4f3 nf_conntrack_unregister_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xc346e3c1 nf_conntrack_helpers_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x745d99f4 fib_rules_register vmlinux EXPORT_SYMBOL_GPL +0x7adab7fb cpufreq_generic_suspend vmlinux EXPORT_SYMBOL +0xc2628c50 iscsi_create_conn vmlinux EXPORT_SYMBOL_GPL +0x2c0bea9d ata_do_eh vmlinux EXPORT_SYMBOL_GPL +0x9b435704 ttm_bo_mem_space vmlinux EXPORT_SYMBOL +0x509b64ea acpi_has_method vmlinux EXPORT_SYMBOL +0x679956aa simple_transaction_read vmlinux EXPORT_SYMBOL +0x8a99a016 mempool_free_slab vmlinux EXPORT_SYMBOL +0x18b48e28 __memset_io vmlinux EXPORT_SYMBOL +0xc4ff32da snd_ctl_register_ioctl_compat sound/core/snd EXPORT_SYMBOL +0x6d3f57bd dm_bufio_get_client drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xa1f0e5fd md_bitmap_copy_from_slot drivers/md/md-mod EXPORT_SYMBOL_GPL +0xb81bf015 mlx4_write_mtt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x4ff3ffd4 kpatch_shadow_free kernel/tkernel/kpatch/kpatch EXPORT_SYMBOL_GPL +0x2e60cd58 devlink_dpipe_entry_ctx_prepare vmlinux EXPORT_SYMBOL_GPL +0x77ae495d usb_speed_string vmlinux EXPORT_SYMBOL_GPL +0x319f393d mii_ethtool_set_link_ksettings vmlinux EXPORT_SYMBOL +0x1dccb3e2 mii_ethtool_get_link_ksettings vmlinux EXPORT_SYMBOL +0x584670d5 pm_generic_resume_early vmlinux EXPORT_SYMBOL_GPL +0xdc6d324d pm_generic_thaw vmlinux EXPORT_SYMBOL_GPL +0xee70a14a drm_dp_check_act_status vmlinux EXPORT_SYMBOL +0x272e9d77 hisi_reset_exit vmlinux EXPORT_SYMBOL_GPL +0x054da9b1 acpi_create_platform_device vmlinux EXPORT_SYMBOL_GPL +0xaa00fdc0 ec_transaction vmlinux EXPORT_SYMBOL +0x669a229e pci_scan_slot vmlinux EXPORT_SYMBOL +0x486bd25a net_dim vmlinux EXPORT_SYMBOL +0xbe6ed06e ceph_osdc_unwatch net/ceph/libceph EXPORT_SYMBOL +0x63a95777 gss_mech_register net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x0cb3a124 vfio_external_group_match_file drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xbe36e175 __ip_mc_dec_group vmlinux EXPORT_SYMBOL +0x83b56658 __ip_mc_inc_group vmlinux EXPORT_SYMBOL +0xfa39ad04 spi_unregister_controller vmlinux EXPORT_SYMBOL_GPL +0x7efb155a __pm_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0x2b79f990 devm_kstrdup_const vmlinux EXPORT_SYMBOL_GPL +0xbe3709cc of_reset_control_array_get vmlinux EXPORT_SYMBOL_GPL +0x91165d2f processors vmlinux EXPORT_SYMBOL +0xcf3d615f jbd2_journal_set_features vmlinux EXPORT_SYMBOL +0x9b45c22e mpage_readpages vmlinux EXPORT_SYMBOL +0xe7d6d2d4 filter_match_preds vmlinux EXPORT_SYMBOL_GPL +0xde0e6eb2 base_old_true_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0x469c402c mlx5_query_hca_vport_system_image_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x393190be iscsi_conn_get_param drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x12dd1e77 ipmi_set_maintenance_mode drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xb85de48c __tracepoint_pnfs_mds_fallback_read_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x2288fc63 regmap_raw_read vmlinux EXPORT_SYMBOL_GPL +0x9e1d0072 device_get_phy_mode vmlinux EXPORT_SYMBOL_GPL +0xcd9aa640 pci_request_regions vmlinux EXPORT_SYMBOL +0x4df119fa __bitmap_parse vmlinux EXPORT_SYMBOL +0x3f749801 blk_mq_unique_tag vmlinux EXPORT_SYMBOL +0x00cbae78 __wait_on_buffer vmlinux EXPORT_SYMBOL +0xa0d3456d nr_swap_pages vmlinux EXPORT_SYMBOL_GPL +0xb1464dc4 cancel_delayed_work_sync vmlinux EXPORT_SYMBOL +0xc6d2ffb6 rds_conn_path_drop net/rds/rds EXPORT_SYMBOL_GPL +0x7ae26f56 iscsit_find_cmd_from_itt_or_dump drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x9cedcd57 __tracepoint_bcache_alloc_fail drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xe477abc7 mlx5_create_lag_demux_flow_table drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7bc3d931 microread_probe drivers/nfc/microread/microread EXPORT_SYMBOL +0xe8b6096b lwtunnel_encap_add_ops vmlinux EXPORT_SYMBOL_GPL +0xa569e9eb sock_create vmlinux EXPORT_SYMBOL +0xad84bef8 dm_table_event vmlinux EXPORT_SYMBOL +0xe05af514 usb_serial_handle_sysrq_char vmlinux EXPORT_SYMBOL_GPL +0x743a165e ata_pack_xfermask vmlinux EXPORT_SYMBOL_GPL +0xb6b4e3c3 class_dev_iter_next vmlinux EXPORT_SYMBOL_GPL +0x6289a5a0 iommu_domain_window_disable vmlinux EXPORT_SYMBOL_GPL +0xce9b744a clk_add_alias vmlinux EXPORT_SYMBOL +0x373e2978 skcipher_walk_atomise vmlinux EXPORT_SYMBOL_GPL +0xfdd19ec7 debugfs_create_x32 vmlinux EXPORT_SYMBOL_GPL +0xacfc2e19 bprm_change_interp vmlinux EXPORT_SYMBOL +0xe06f52ba kill_litter_super vmlinux EXPORT_SYMBOL +0x91f9e156 resource_list_create_entry vmlinux EXPORT_SYMBOL +0x21517117 rxrpc_kernel_check_life net/rxrpc/rxrpc EXPORT_SYMBOL +0x19f1af03 ip_vs_conn_new net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xbc6bcd15 dm_region_hash_create drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xbe82d6cc mlxsw_env_get_module_info drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x2235fb24 hnae3_register_ae_dev drivers/net/ethernet/hisilicon/hns3/hnae3 EXPORT_SYMBOL +0x00b264e1 __fscache_read_or_alloc_page fs/fscache/fscache EXPORT_SYMBOL +0x4fbe795b xfrm_dev_offload_ok vmlinux EXPORT_SYMBOL_GPL +0xd506738a inet_proto_csum_replace4 vmlinux EXPORT_SYMBOL +0xa0d32d11 neigh_direct_output vmlinux EXPORT_SYMBOL +0xedb64a83 usb_free_urb vmlinux EXPORT_SYMBOL_GPL +0x2520df7c drm_panel_disable vmlinux EXPORT_SYMBOL +0x2472584d drm_connector_attach_encoder vmlinux EXPORT_SYMBOL +0xd67a08b9 tty_prepare_flip_string vmlinux EXPORT_SYMBOL_GPL +0x8cb0cfbe pci_add_resource_offset vmlinux EXPORT_SYMBOL +0xb95711c8 __breadahead_gfp vmlinux EXPORT_SYMBOL +0x75bda77a seq_hlist_next vmlinux EXPORT_SYMBOL +0xfe4450c7 __lock_page_killable vmlinux EXPORT_SYMBOL_GPL +0xd4671463 xor_block_inner_neon arch/arm64/lib/xor-neon EXPORT_SYMBOL +0x764c7363 nfc_fw_download_done net/nfc/nfc EXPORT_SYMBOL +0xc62f01ad nf_ct_expect_iterate_net net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xc68ddd16 __rdma_accept drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x4711c158 cdrom_number_of_slots drivers/cdrom/cdrom EXPORT_SYMBOL +0xf4641d47 fib_rules_unregister vmlinux EXPORT_SYMBOL_GPL +0x7ae78b73 skb_copy_bits vmlinux EXPORT_SYMBOL +0x49c85ef3 sock_no_sendpage_locked vmlinux EXPORT_SYMBOL +0x16c66718 drm_match_cea_mode vmlinux EXPORT_SYMBOL +0xd9943c4c drm_atomic_helper_disable_all vmlinux EXPORT_SYMBOL +0x5d830297 get_random_bytes_arch vmlinux EXPORT_SYMBOL +0xcca38340 clk_hw_register_fractional_divider vmlinux EXPORT_SYMBOL_GPL +0x7d81226a pci_irq_get_affinity vmlinux EXPORT_SYMBOL +0x681406d9 pcim_iomap vmlinux EXPORT_SYMBOL +0x4934b7b3 crypto_register_ahash vmlinux EXPORT_SYMBOL_GPL +0x1bfd79ad sysfs_remove_link vmlinux EXPORT_SYMBOL_GPL +0x9cb31acc kernfs_notify vmlinux EXPORT_SYMBOL_GPL +0x62703bb2 noop_direct_IO vmlinux EXPORT_SYMBOL_GPL +0x5b735e56 shmem_file_setup_with_mnt vmlinux EXPORT_SYMBOL_GPL +0x8924eb1e rcu_force_quiescent_state vmlinux EXPORT_SYMBOL_GPL +0xc08bf111 irq_domain_push_irq vmlinux EXPORT_SYMBOL_GPL +0x9e98722b ip_set_get_ipaddr6 net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xa293f8a6 ip_set_get_ipaddr4 net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xb6430297 ib_free_send_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf9966c2f mptscsih_taskmgmt_response_code drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x0c2cc712 tcp_make_synack vmlinux EXPORT_SYMBOL +0x7470e572 __skb_checksum vmlinux EXPORT_SYMBOL +0x5eacf9e9 hid_hw_stop vmlinux EXPORT_SYMBOL_GPL +0xa471982d dm_table_set_type vmlinux EXPORT_SYMBOL_GPL +0xb491386b mii_check_gmii_support vmlinux EXPORT_SYMBOL +0xbb05d2c2 ata_ehi_clear_desc vmlinux EXPORT_SYMBOL_GPL +0x5fd43696 drm_vma_node_revoke vmlinux EXPORT_SYMBOL +0x53423f41 drm_atomic_helper_damage_iter_init vmlinux EXPORT_SYMBOL +0x855c0aae dma_async_device_register vmlinux EXPORT_SYMBOL +0x26928c42 pci_resize_resource vmlinux EXPORT_SYMBOL +0x3c06a4f3 t10_pi_type1_ip vmlinux EXPORT_SYMBOL +0xdc21e866 hrtimer_forward vmlinux EXPORT_SYMBOL_GPL +0x7d59dd46 pm_wq vmlinux EXPORT_SYMBOL_GPL +0xad9901ae bit_waitqueue vmlinux EXPORT_SYMBOL +0x8a5793d0 kvm_release_page_dirty vmlinux EXPORT_SYMBOL_GPL +0xe043bb36 dm_cell_put_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x6d1bed26 dm_cell_get_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xd45434ee admin_timeout drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xf7446ee7 sock_common_recvmsg vmlinux EXPORT_SYMBOL +0xda838668 ptp_clock_register vmlinux EXPORT_SYMBOL +0xa50fcd09 ata_wait_register vmlinux EXPORT_SYMBOL_GPL +0xef52d869 drm_mode_validate_ycbcr420 vmlinux EXPORT_SYMBOL +0x73731c17 drm_ioctl_kernel vmlinux EXPORT_SYMBOL +0xfe12bcb9 drm_dsc_compute_rc_parameters vmlinux EXPORT_SYMBOL +0xc614c3ce dma_request_chan vmlinux EXPORT_SYMBOL_GPL +0x5aeeee62 ceph_oid_aprintf net/ceph/libceph EXPORT_SYMBOL +0xe7efe990 nf_tproxy_get_sock_v4 net/ipv4/netfilter/nf_tproxy_ipv4 EXPORT_SYMBOL_GPL +0xb7f2d169 mtd_pairing_groups drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xee1d8e7e md_stop_writes drivers/md/md-mod EXPORT_SYMBOL_GPL +0x8339df73 klist_add_behind vmlinux EXPORT_SYMBOL_GPL +0xdd20e68b inet_csk_addr2sockaddr vmlinux EXPORT_SYMBOL_GPL +0xdfc86068 inet_ehash_locks_alloc vmlinux EXPORT_SYMBOL_GPL +0xa28cfcc0 gen_estimator_active vmlinux EXPORT_SYMBOL +0xbdf9ef3e drm_crtc_check_viewport vmlinux EXPORT_SYMBOL +0x14d01690 clk_mux_index_to_val vmlinux EXPORT_SYMBOL_GPL +0x574c2e74 bitmap_release_region vmlinux EXPORT_SYMBOL +0xc622752f __blk_mq_debugfs_rq_show vmlinux EXPORT_SYMBOL_GPL +0xab2e3fc2 jbd2_transaction_committed vmlinux EXPORT_SYMBOL +0xb58aeaab kernel_cpustat vmlinux EXPORT_SYMBOL +0xbe38a431 dm_rh_recovery_prepare drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x5ff17b5c mlxsw_afa_block_destroy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x049142eb mlxsw_core_bus_device_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x8d303b1b iscsi_pool_free drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x8679b88d ip_mc_inc_group vmlinux EXPORT_SYMBOL +0xab00d0e4 tcp_orphan_count vmlinux EXPORT_SYMBOL_GPL +0x875582b7 nvmem_del_cell_table vmlinux EXPORT_SYMBOL_GPL +0x7befed31 input_setup_polling vmlinux EXPORT_SYMBOL +0x8d235818 usb_enable_intel_xhci_ports vmlinux EXPORT_SYMBOL_GPL +0xe9155d91 ata_sff_data_xfer vmlinux EXPORT_SYMBOL_GPL +0xa603e4e8 drm_dp_link_power_up vmlinux EXPORT_SYMBOL +0x15049889 fb_center_logo vmlinux EXPORT_SYMBOL +0xfd06d862 request_key_with_auxdata vmlinux EXPORT_SYMBOL +0x996b7825 fat_attach vmlinux EXPORT_SYMBOL_GPL +0xe9c9f023 sysfs_remove_bin_file vmlinux EXPORT_SYMBOL_GPL +0x41b5e2df inode_nohighmem vmlinux EXPORT_SYMBOL +0x92f09fd2 file_write_and_wait_range vmlinux EXPORT_SYMBOL +0x0d7d10ea register_wide_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x4b75bb56 osd_req_op_cls_request_data_pages net/ceph/libceph EXPORT_SYMBOL +0xfc700b11 ieee802154_register_hw net/mac802154/mac802154 EXPORT_SYMBOL +0xab004353 nft_set_lookup_global net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x01744556 line6_read_data sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0xcc567869 md_new_event drivers/md/md-mod EXPORT_SYMBOL_GPL +0x7e308fc9 mlx4_xrcd_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf8b85454 mlx4_gen_slaves_port_mgt_ev drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x656e4a6e snprintf vmlinux EXPORT_SYMBOL +0x711e9723 tcf_exts_validate vmlinux EXPORT_SYMBOL +0x7388cf68 hid_dump_device vmlinux EXPORT_SYMBOL_GPL +0x6a13bf66 wakeup_source_unregister vmlinux EXPORT_SYMBOL_GPL +0xac499e46 fwnode_graph_get_endpoint_by_id vmlinux EXPORT_SYMBOL_GPL +0xabcfa03b __tracepoint_block_rq_remap vmlinux EXPORT_SYMBOL_GPL +0xa313cbb8 proc_mkdir_mode vmlinux EXPORT_SYMBOL +0x62d26c55 tag_pages_for_writeback vmlinux EXPORT_SYMBOL +0x182d935a rpcauth_destroy_credcache net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2fa0c514 hinic_support_fc drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x35156909 nvmem_register vmlinux EXPORT_SYMBOL_GPL +0x87564cb5 ata_scsi_simulate vmlinux EXPORT_SYMBOL_GPL +0x4e9ec3ce tty_port_link_device vmlinux EXPORT_SYMBOL_GPL +0x684289ba pinctrl_find_and_add_gpio_range vmlinux EXPORT_SYMBOL_GPL +0x1eaec09e sbitmap_get vmlinux EXPORT_SYMBOL_GPL +0x75133f6e visitor128 vmlinux EXPORT_SYMBOL_GPL +0x11c23240 crypto_attr_u32 vmlinux EXPORT_SYMBOL_GPL +0x1ed2147c PDE_DATA vmlinux EXPORT_SYMBOL +0xe3c7d63c fscrypt_free_bounce_page vmlinux EXPORT_SYMBOL +0x9a362598 follow_pfn vmlinux EXPORT_SYMBOL +0x5cd4fcf4 follow_pte vmlinux EXPORT_SYMBOL_GPL +0xb24c129b make_kuid vmlinux EXPORT_SYMBOL +0x1ee7d3cd hrtimer_init vmlinux EXPORT_SYMBOL_GPL +0x0ea34a66 free_sja1000dev drivers/net/can/sja1000/sja1000 EXPORT_SYMBOL_GPL +0x85ad70cc m_can_class_resume drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0x048cc907 iscsi_session_teardown drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x0b3a31de __napi_alloc_skb vmlinux EXPORT_SYMBOL +0x32621043 usb_disable_xhci_ports vmlinux EXPORT_SYMBOL_GPL +0xebfae55c hisi_sas_get_ata_protocol vmlinux EXPORT_SYMBOL_GPL +0x0036f712 scsi_report_opcode vmlinux EXPORT_SYMBOL +0x492aad70 pm_clk_create vmlinux EXPORT_SYMBOL_GPL +0x9305bf68 find_next_and_bit vmlinux EXPORT_SYMBOL +0x59d36b60 blk_mq_freeze_queue_wait_timeout vmlinux EXPORT_SYMBOL_GPL +0x4f254150 file_remove_privs vmlinux EXPORT_SYMBOL +0xf2117a0d rds_rdma_send_complete net/rds/rds EXPORT_SYMBOL_GPL +0xd51cc493 nft_set_elem_destroy net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x774b0ba2 snd_card_register sound/core/snd EXPORT_SYMBOL +0xe0a5d77f nfs_pageio_init_read fs/nfs/nfs EXPORT_SYMBOL_GPL +0x7bc4f569 udp_seq_ops vmlinux EXPORT_SYMBOL +0x1b6314fd in_aton vmlinux EXPORT_SYMBOL +0x0e1c12c5 neigh_resolve_output vmlinux EXPORT_SYMBOL +0x8516c14a flow_get_u32_dst vmlinux EXPORT_SYMBOL +0xcd37479e ptp_clock_unregister vmlinux EXPORT_SYMBOL +0xfc4152fc ec_read vmlinux EXPORT_SYMBOL +0xdb0aa06d cfb_imageblit vmlinux EXPORT_SYMBOL +0xa424c4e3 fuse_fill_super_common vmlinux EXPORT_SYMBOL_GPL +0x0f6c0950 fuse_dev_alloc vmlinux EXPORT_SYMBOL_GPL +0x5b024f89 sysfs_unbreak_active_protection vmlinux EXPORT_SYMBOL_GPL +0x0d043075 __mnt_is_readonly vmlinux EXPORT_SYMBOL_GPL +0x02b24cd3 __tracepoint_cpu_idle vmlinux EXPORT_SYMBOL_GPL +0x5172d067 vsock_deliver_tap net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xa714246d nf_send_unreach net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x07dd0e85 nf_nat_irc_hook net/netfilter/nf_conntrack_irc EXPORT_SYMBOL_GPL +0xa1bc096c nf_nat_ftp_hook net/netfilter/nf_conntrack_ftp EXPORT_SYMBOL_GPL +0xf7f464db nf_ct_ext_add net/netfilter/nf_conntrack EXPORT_SYMBOL +0xf92682b5 mddev_resume drivers/md/md-mod EXPORT_SYMBOL_GPL +0xf65e0b62 config_ep_by_speed_and_alt drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xb72651fe ipmi_add_smi drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xf955e9c5 bprintf vmlinux EXPORT_SYMBOL_GPL +0xc22720c5 tcp_close vmlinux EXPORT_SYMBOL +0x6c66c53c inet_add_offload vmlinux EXPORT_SYMBOL +0x724b1178 __scm_send vmlinux EXPORT_SYMBOL +0xa3733f19 skb_try_coalesce vmlinux EXPORT_SYMBOL +0x2964a727 get_thermal_instance vmlinux EXPORT_SYMBOL +0x953fabf6 usb_register_dev vmlinux EXPORT_SYMBOL_GPL +0x1bd0673f scsi_dma_map vmlinux EXPORT_SYMBOL +0x44f5bb6a pnp_register_driver vmlinux EXPORT_SYMBOL +0x52d717da xz_dec_init vmlinux EXPORT_SYMBOL +0xec0c727a try_to_free_buffers vmlinux EXPORT_SYMBOL +0x032ed7c9 xattr_full_name vmlinux EXPORT_SYMBOL +0x1d2d6d15 wait_iff_congested vmlinux EXPORT_SYMBOL +0x7e5261de blk_trace_setup vmlinux EXPORT_SYMBOL_GPL +0x4f6a07fe show_rcu_gp_kthreads vmlinux EXPORT_SYMBOL_GPL +0xfe039e4d __put_cred vmlinux EXPORT_SYMBOL +0x07245aa8 ceph_auth_add_authorizer_challenge net/ceph/libceph EXPORT_SYMBOL +0x3bdb4d54 iscsit_cause_connection_reinstatement drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x88d20737 ping_getfrag vmlinux EXPORT_SYMBOL_GPL +0xa410a295 devlink_region_destroy vmlinux EXPORT_SYMBOL_GPL +0xbe2f4456 skb_csum_hwoffload_help vmlinux EXPORT_SYMBOL +0x2dd369d5 of_root vmlinux EXPORT_SYMBOL +0x96d6ccc0 dm_path_uevent vmlinux EXPORT_SYMBOL_GPL +0x47f985aa drm_edid_duplicate vmlinux EXPORT_SYMBOL +0x5fc3a33f __drm_mm_interval_first vmlinux EXPORT_SYMBOL +0x61e21286 serial8250_clear_and_reinit_fifos vmlinux EXPORT_SYMBOL_GPL +0xe94f05d9 register_framebuffer vmlinux EXPORT_SYMBOL +0x69838c28 crypto_unregister_templates vmlinux EXPORT_SYMBOL_GPL +0x83378b3b dquot_free_inode vmlinux EXPORT_SYMBOL +0x2531bb17 migrate_page_states vmlinux EXPORT_SYMBOL +0x5cf70c95 vsock_find_bound_socket net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xedd092d5 power_supply_notifier vmlinux EXPORT_SYMBOL_GPL +0xf1669344 ata_std_sched_eh vmlinux EXPORT_SYMBOL_GPL +0x4574d65a pm_generic_suspend_late vmlinux EXPORT_SYMBOL_GPL +0xecfca568 drm_fb_helper_lastclose vmlinux EXPORT_SYMBOL +0x3cd06035 add_input_randomness vmlinux EXPORT_SYMBOL_GPL +0x568995d8 regulator_enable_regmap vmlinux EXPORT_SYMBOL_GPL +0xa52cc31c clk_gate_ops vmlinux EXPORT_SYMBOL_GPL +0x0fcc3c25 pinctrl_force_sleep vmlinux EXPORT_SYMBOL_GPL +0xbca858b2 iov_iter_advance vmlinux EXPORT_SYMBOL +0x4bef1c67 empty_name vmlinux EXPORT_SYMBOL +0x2c81494e xdr_set_scratch_buffer net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8bed3afc nf_log_dump_vlan net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0x06ce2bbd transport_lookup_tmr_lun drivers/target/target_core_mod EXPORT_SYMBOL +0x665d64c7 transport_lookup_cmd_lun drivers/target/target_core_mod EXPORT_SYMBOL +0x65bd0519 register_cdrom drivers/cdrom/cdrom EXPORT_SYMBOL +0x9d71f325 hisi_sas_scan_finished vmlinux EXPORT_SYMBOL_GPL +0xe834ddfa scsi_unblock_requests vmlinux EXPORT_SYMBOL +0xa831f1f5 device_remove_bin_file vmlinux EXPORT_SYMBOL_GPL +0x7e548e96 mipi_dsi_generic_read vmlinux EXPORT_SYMBOL +0x42defe70 drm_hdmi_avi_infoframe_content_type vmlinux EXPORT_SYMBOL +0x20720f9c tty_port_free_xmit_buf vmlinux EXPORT_SYMBOL +0x4e3541e9 pci_irq_get_node vmlinux EXPORT_SYMBOL +0x8cccfe81 devm_gpiochip_add_data vmlinux EXPORT_SYMBOL_GPL +0xc97a00c9 alarm_try_to_cancel vmlinux EXPORT_SYMBOL_GPL +0xaf5bf6ef nfs_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd8192873 nf_ct_deliver_cached_events net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x113d203e mtd_unlock drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xa2ea5542 dm_bitset_del drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa78c5aaa usb_composite_unregister drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x30a2d54a xfrm_replay_seqhi vmlinux EXPORT_SYMBOL +0x9486b6bd tcp_enter_cwr vmlinux EXPORT_SYMBOL +0x343e8e2f hidraw_report_event vmlinux EXPORT_SYMBOL_GPL +0x28cb7cef usb_ep_fifo_status vmlinux EXPORT_SYMBOL_GPL +0xa40ff01b acpi_dbg_layer vmlinux EXPORT_SYMBOL +0x3c212744 sbitmap_del_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x9e9fdd9d memunmap vmlinux EXPORT_SYMBOL +0x4d924f20 memremap vmlinux EXPORT_SYMBOL +0x28aa6a67 call_rcu vmlinux EXPORT_SYMBOL_GPL +0xc3bf1dc1 devm_request_any_context_irq vmlinux EXPORT_SYMBOL +0xc2d217e3 ieee802154_hdr_peek net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0x9f1fd519 ieee802154_hdr_pull net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0x7d6290eb ieee802154_hdr_push net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0xbb461fb7 dm_bitset_cursor_begin drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x378bd458 mlx5_cmd_free_uar drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x60361bca mlx4_mtt_init drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x45975c42 mlx4_get_cpu_rmap drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x8d4e047b nvme_alloc_request drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xb605aeff hwrng_unregister drivers/char/hw_random/rng-core EXPORT_SYMBOL_GPL +0x7e86076b pci_try_reset_function vmlinux EXPORT_SYMBOL_GPL +0x46d23956 fs_parse vmlinux EXPORT_SYMBOL +0x47599cf0 dccp_reqsk_init net/dccp/dccp EXPORT_SYMBOL_GPL +0x2be626d3 ip_tunnel_change_mtu net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x92dcaed9 snd_pcm_release_substream sound/core/snd-pcm EXPORT_SYMBOL +0x0e1a933c parport_remove_port drivers/parport/parport EXPORT_SYMBOL +0xec75766f ipvlan_link_register drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0x97d0ba59 mlx5_set_port_caps drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x0f44ad32 xfrm6_rcv_spi vmlinux EXPORT_SYMBOL +0x00db2663 tcf_exts_change vmlinux EXPORT_SYMBOL +0xaca162b5 of_device_modalias vmlinux EXPORT_SYMBOL_GPL +0x09a4ade0 drm_dp_mst_get_port_malloc vmlinux EXPORT_SYMBOL +0xa53f61b8 disk_stack_limits vmlinux EXPORT_SYMBOL +0x9879932b crypto_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x02900331 padata_free_shell vmlinux EXPORT_SYMBOL +0x6be37568 dm_dirty_log_destroy drivers/md/dm-log EXPORT_SYMBOL +0xbad420f2 nvme_submit_sync_cmd drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xf49293d4 fib_table_lookup vmlinux EXPORT_SYMBOL_GPL +0xcb2361ef skb_mpls_update_lse vmlinux EXPORT_SYMBOL_GPL +0x5d2631d5 pskb_put vmlinux EXPORT_SYMBOL_GPL +0xa68281d8 __efivar_entry_iter vmlinux EXPORT_SYMBOL_GPL +0x816a41ca cpufreq_update_limits vmlinux EXPORT_SYMBOL_GPL +0x6e0f6fe0 ata_sas_slave_configure vmlinux EXPORT_SYMBOL_GPL +0x0b57b8fe ata_qc_complete vmlinux EXPORT_SYMBOL_GPL +0x8ad48baf device_reprobe vmlinux EXPORT_SYMBOL_GPL +0xe7ac8cc9 drm_client_framebuffer_delete vmlinux EXPORT_SYMBOL +0x319a1b56 virtio_finalize_features vmlinux EXPORT_SYMBOL_GPL +0x2f5619d8 virtio_config_enable vmlinux EXPORT_SYMBOL_GPL +0x479c3c86 find_next_zero_bit vmlinux EXPORT_SYMBOL +0xd9d0c64a inc_nlink vmlinux EXPORT_SYMBOL +0x8a1ab4ee timeval_to_jiffies vmlinux EXPORT_SYMBOL +0xc3dc42fd smpboot_unregister_percpu_thread vmlinux EXPORT_SYMBOL_GPL +0xcf048a91 sw842_compress lib/842/842_compress EXPORT_SYMBOL_GPL +0xa5f71562 nft_reject_policy net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x7138a0f8 snd_ctl_new1 sound/core/snd EXPORT_SYMBOL +0x1e491a04 ib_unmap_fmr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7774620f dm_rh_stop_recovery drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x9a5bc377 __netlink_dump_start vmlinux EXPORT_SYMBOL +0xfc81da5d phy_write_mmd vmlinux EXPORT_SYMBOL +0x017939f8 devm_drm_panel_bridge_add vmlinux EXPORT_SYMBOL +0xbcdd5b99 iommu_group_set_name vmlinux EXPORT_SYMBOL_GPL +0x1f5a2cff security_task_getsecid vmlinux EXPORT_SYMBOL +0xe310bae4 seq_open vmlinux EXPORT_SYMBOL +0x032b8f30 mnt_drop_write vmlinux EXPORT_SYMBOL_GPL +0x92c70418 vmf_insert_pfn_prot vmlinux EXPORT_SYMBOL +0xaf793668 __alloc_percpu_gfp vmlinux EXPORT_SYMBOL_GPL +0xb359980c ceph_osdc_readpages net/ceph/libceph EXPORT_SYMBOL +0xdec4793b rpc_uaddr2sockaddr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x85a187b4 nft_fib6_eval_type net/ipv6/netfilter/nft_fib_ipv6 EXPORT_SYMBOL_GPL +0xdbec9686 nft_fib4_eval_type net/ipv4/netfilter/nft_fib_ipv4 EXPORT_SYMBOL_GPL +0xee4397c4 tap_free_minor drivers/net/tap EXPORT_SYMBOL_GPL +0x9000200f iscsi_eh_device_reset drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x19e016ce fscache_mark_page_cached fs/fscache/fscache EXPORT_SYMBOL +0xd1e246a2 xt_compat_unlock vmlinux EXPORT_SYMBOL_GPL +0x6ae0f4d5 cpufreq_freq_transition_begin vmlinux EXPORT_SYMBOL_GPL +0x8d9ca0e6 dma_fence_enable_sw_signaling vmlinux EXPORT_SYMBOL +0x5f27d6ab pci_bus_type vmlinux EXPORT_SYMBOL +0x9f9cca9f simple_readpage vmlinux EXPORT_SYMBOL +0x74080358 __page_frag_cache_drain vmlinux EXPORT_SYMBOL +0x0465956b __lock_page vmlinux EXPORT_SYMBOL +0x0514bc90 ring_buffer_read vmlinux EXPORT_SYMBOL_GPL +0x9e676280 synchronize_srcu_expedited vmlinux EXPORT_SYMBOL_GPL +0xc8e3332b raid6_gflog lib/raid6/raid6_pq EXPORT_SYMBOL +0x7ebf8210 osd_req_op_cls_request_data_bvecs net/ceph/libceph EXPORT_SYMBOL +0xa909cfc5 vhost_work_init drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xda0d50ec ib_sa_cancel_query drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x81351174 nfs_close_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9f306e11 tcp_cong_avoid_ai vmlinux EXPORT_SYMBOL_GPL +0xcabe04de cpuidle_resume_and_unlock vmlinux EXPORT_SYMBOL_GPL +0xe1090e41 usb_altnum_to_altsetting vmlinux EXPORT_SYMBOL_GPL +0x41e5d246 pm_clk_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0x3d99abf3 dev_pm_qos_update_user_latency_tolerance vmlinux EXPORT_SYMBOL_GPL +0x4ca63b3b pci_create_slot vmlinux EXPORT_SYMBOL_GPL +0xd774957d mpi_write_to_sgl vmlinux EXPORT_SYMBOL_GPL +0xf44d53da security_secid_to_secctx vmlinux EXPORT_SYMBOL +0xa6bed8b8 follow_down vmlinux EXPORT_SYMBOL +0xb5862f46 filemap_write_and_wait_range vmlinux EXPORT_SYMBOL +0xf767998e rds_info_register_func net/rds/rds EXPORT_SYMBOL_GPL +0xb0d858f3 rpcauth_lookupcred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x408254f9 vhost_chr_write_iter drivers/vhost/vhost EXPORT_SYMBOL +0xaac32b9f md_reap_sync_thread drivers/md/md-mod EXPORT_SYMBOL +0x941f3795 eth_get_headlen vmlinux EXPORT_SYMBOL +0x22620a6a __regmap_init vmlinux EXPORT_SYMBOL_GPL +0x409760b3 drm_atomic_get_old_private_obj_state vmlinux EXPORT_SYMBOL +0xfab53ed9 pinctrl_gpio_can_use_line vmlinux EXPORT_SYMBOL_GPL +0x360b1afe probe_irq_mask vmlinux EXPORT_SYMBOL +0x59814a84 dccp_statistics net/dccp/dccp EXPORT_SYMBOL_GPL +0xc4d05a2c ubi_leb_read drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x93fc8810 usb_gadget_vbus_draw vmlinux EXPORT_SYMBOL_GPL +0x8d6f7dc6 genphy_read_status vmlinux EXPORT_SYMBOL +0x24f63dcf ata_xfer_mask2mode vmlinux EXPORT_SYMBOL_GPL +0x57aeb30f redraw_screen vmlinux EXPORT_SYMBOL +0x9edbc5a9 tty_unregister_driver vmlinux EXPORT_SYMBOL +0xe739ad94 virtqueue_detach_unused_buf vmlinux EXPORT_SYMBOL_GPL +0xbd52541f pci_reenable_device vmlinux EXPORT_SYMBOL +0x3603aff2 d_instantiate_new vmlinux EXPORT_SYMBOL +0x76cd1189 invalidate_mapping_pages vmlinux EXPORT_SYMBOL +0xd4a09fbf devm_release_resource vmlinux EXPORT_SYMBOL +0xa4c8127c ZSTD_maxCLevel lib/zstd/zstd_compress EXPORT_SYMBOL +0x5b5a4341 ieee802154_alloc_hw net/mac802154/mac802154 EXPORT_SYMBOL +0x4f1c24c8 qlt_stop_phase2 drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x3ed59a51 qlt_stop_phase1 drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x4bf25041 st21nfca_dep_init drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x8100a444 napi_disable vmlinux EXPORT_SYMBOL +0xd93a5cb1 efivar_variable_is_removable vmlinux EXPORT_SYMBOL_GPL +0x931a85ce efivar_work vmlinux EXPORT_SYMBOL_GPL +0x56323a02 drm_connector_attach_content_protection_property vmlinux EXPORT_SYMBOL +0x5096c6e4 drm_event_reserve_init_locked vmlinux EXPORT_SYMBOL +0xf11c46ee drm_poll vmlinux EXPORT_SYMBOL +0x411c57f5 __drm_atomic_helper_crtc_reset vmlinux EXPORT_SYMBOL +0x44227b9c pci_enable_rom vmlinux EXPORT_SYMBOL_GPL +0x5e82cc43 pinctrl_find_gpio_range_from_pin vmlinux EXPORT_SYMBOL_GPL +0x7c173634 __bitmap_complement vmlinux EXPORT_SYMBOL +0x9291cd3b memdup_user vmlinux EXPORT_SYMBOL +0x90b3feb5 synproxy_recv_client_ack net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xf0588047 hinic_get_chip_name_by_hwdev drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xcaabdade crypto_engine_alloc_init crypto/crypto_engine EXPORT_SYMBOL_GPL +0x4c3b4947 ping_get_port vmlinux EXPORT_SYMBOL_GPL +0x9f135d1f raw_abort vmlinux EXPORT_SYMBOL_GPL +0xcdb6adcc ras_userspace_consumers vmlinux EXPORT_SYMBOL_GPL +0x72e8d59b thermal_cooling_device_unregister vmlinux EXPORT_SYMBOL_GPL +0xdf7f925e ttm_bo_add_to_lru vmlinux EXPORT_SYMBOL +0xe3b4f8eb drm_framebuffer_plane_width vmlinux EXPORT_SYMBOL +0x9797984e drm_gem_dmabuf_vunmap vmlinux EXPORT_SYMBOL +0xec763dda drm_atomic_helper_connector_reset vmlinux EXPORT_SYMBOL +0x6b70ba96 __clk_mux_determine_rate vmlinux EXPORT_SYMBOL_GPL +0x1a806979 blk_mq_free_request vmlinux EXPORT_SYMBOL_GPL +0xb48d4d22 security_sb_eat_lsm_opts vmlinux EXPORT_SYMBOL +0xc280fb46 kdb_register vmlinux EXPORT_SYMBOL_GPL +0x6563b018 del_timer vmlinux EXPORT_SYMBOL +0x3a728fe2 dma_get_required_mask vmlinux EXPORT_SYMBOL_GPL +0x4a40170b osd_req_op_xattr_init net/ceph/libceph EXPORT_SYMBOL +0xc7cb6d22 map_destroy drivers/mtd/chips/chipreg EXPORT_SYMBOL +0x93102584 target_register_template drivers/target/target_core_mod EXPORT_SYMBOL +0xfaaa4831 ipmi_set_my_address drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xd3b4757c nfs_init_cinfo fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf6533477 nf_log_unbind_pf vmlinux EXPORT_SYMBOL +0xc654d3f4 lwtunnel_valid_encap_type vmlinux EXPORT_SYMBOL_GPL +0xcca0ec3c usb_get_current_frame_number vmlinux EXPORT_SYMBOL_GPL +0x5da7afa9 phy_resolve_aneg_pause vmlinux EXPORT_SYMBOL_GPL +0x72ba599d pcie_update_link_speed vmlinux EXPORT_SYMBOL_GPL +0x4389ddd5 blk_mq_start_request vmlinux EXPORT_SYMBOL +0x49864401 rds_recv_incoming net/rds/rds EXPORT_SYMBOL_GPL +0xf8ac8e17 vcc_release_async net/atm/atm EXPORT_SYMBOL +0xed895597 nf_ct_expect_find_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xdb456df3 nf_ct_iterate_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe80fd383 snd_pcm_lib_free_pages sound/core/snd-pcm EXPORT_SYMBOL +0x79302720 transport_generic_request_failure drivers/target/target_core_mod EXPORT_SYMBOL +0x65835607 __tracepoint_bcache_btree_write drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x51e8bdf3 pnfs_nfs_generic_sync fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x3f89f1ff tcp_v4_send_check vmlinux EXPORT_SYMBOL +0x0d22d728 devlink_region_snapshot_create vmlinux EXPORT_SYMBOL_GPL +0xddd56be0 dev_close vmlinux EXPORT_SYMBOL +0x35255dbb dma_buf_put vmlinux EXPORT_SYMBOL_GPL +0xceb99340 dma_buf_get vmlinux EXPORT_SYMBOL_GPL +0x88e7fef6 drm_sched_fault vmlinux EXPORT_SYMBOL +0x4da733f0 ttm_bo_manager_func vmlinux EXPORT_SYMBOL +0xaf6cea1d drm_display_info_set_bus_formats vmlinux EXPORT_SYMBOL +0xc378d0b4 pci_hp_add_bridge vmlinux EXPORT_SYMBOL_GPL +0xa8c8d1e9 __mark_inode_dirty vmlinux EXPORT_SYMBOL +0xdafcdc3a ktime_get_snapshot vmlinux EXPORT_SYMBOL_GPL +0x188ea314 jiffies_to_timespec64 vmlinux EXPORT_SYMBOL +0x4e85a397 snd_card_disconnect sound/core/snd EXPORT_SYMBOL +0xce4b4daf iscsit_process_text_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xa916b694 strnlen vmlinux EXPORT_SYMBOL +0x8c85e13b ip6_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x8f031045 tcp_set_rcvlowat vmlinux EXPORT_SYMBOL +0x4ea19d7f noop_qdisc vmlinux EXPORT_SYMBOL +0x0e87ba78 sock_alloc_send_skb vmlinux EXPORT_SYMBOL +0xff116fc4 dm_internal_suspend_noflush vmlinux EXPORT_SYMBOL_GPL +0x0d2a1e73 dm_put vmlinux EXPORT_SYMBOL_GPL +0xa3122741 firmware_request_cache vmlinux EXPORT_SYMBOL_GPL +0x223596c0 acpi_bus_get_device vmlinux EXPORT_SYMBOL +0x727242a9 sha256_update vmlinux EXPORT_SYMBOL +0x4d779cbf blk_mq_init_allocated_queue vmlinux EXPORT_SYMBOL +0x9498120f blk_mq_quiesce_queue_nowait vmlinux EXPORT_SYMBOL_GPL +0x339f79c6 unregister_filesystem vmlinux EXPORT_SYMBOL +0xce7bfe70 vm_brk vmlinux EXPORT_SYMBOL +0x1d264e8d irq_domain_add_simple vmlinux EXPORT_SYMBOL_GPL +0xb81685cf adjust_resource vmlinux EXPORT_SYMBOL +0x0bc00f80 nf_nat_helper_try_module_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x637e061a dm_dirty_log_create drivers/md/dm-log EXPORT_SYMBOL +0x275da71a crypto_poly1305_init crypto/poly1305_generic EXPORT_SYMBOL_GPL +0x6de58162 configfs_unregister_default_group fs/configfs/configfs EXPORT_SYMBOL +0x223bdfa3 xfrm_state_insert vmlinux EXPORT_SYMBOL +0x1bb8cc09 metadata_dst_alloc vmlinux EXPORT_SYMBOL_GPL +0x616966d3 devm_rc_allocate_device vmlinux EXPORT_SYMBOL_GPL +0x67789648 __of_reset_control_get vmlinux EXPORT_SYMBOL_GPL +0x9b4f6661 kmsg_dump_register vmlinux EXPORT_SYMBOL_GPL +0x641b8f34 ceph_msg_data_add_bio net/ceph/libceph EXPORT_SYMBOL +0x027ce1bf xprt_setup_backchannel net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x38000eb3 rpc_peeraddr2str net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x66d801b5 rdma_hold_gid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7bf7060b mlx4_SET_MCAST_FLTR drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xcbe56bc2 zs_get_total_pages mm/zsmalloc EXPORT_SYMBOL_GPL +0xddf68fc6 xt_find_revision vmlinux EXPORT_SYMBOL_GPL +0xda9f1f57 of_node_put vmlinux EXPORT_SYMBOL +0x3541ef8e of_node_get vmlinux EXPORT_SYMBOL +0xd860755b __tracepoint_spi_transfer_start vmlinux EXPORT_SYMBOL +0x0ad2e80a ahci_reset_controller vmlinux EXPORT_SYMBOL_GPL +0xfe80b89c platform_irq_count vmlinux EXPORT_SYMBOL_GPL +0x9e1fcce8 __destroy_inode vmlinux EXPORT_SYMBOL +0xf59e2113 nf_osf_find net/netfilter/nfnetlink_osf EXPORT_SYMBOL_GPL +0x432ab8f5 rdma_join_multicast drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x9ec7c2dc iw_cm_listen drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0xb51a2a35 mpt_event_register drivers/message/fusion/mptbase EXPORT_SYMBOL +0xf3164075 mlx4_srq_arm drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb415933e mdio_device_remove vmlinux EXPORT_SYMBOL +0x9c2caa41 dma_resv_add_excl_fence vmlinux EXPORT_SYMBOL +0x02293ac3 dma_fence_chain_ops vmlinux EXPORT_SYMBOL +0xa54fd072 device_property_present vmlinux EXPORT_SYMBOL_GPL +0x95e4bef3 drm_plane_create_blend_mode_property vmlinux EXPORT_SYMBOL +0x6d2e7058 drm_legacy_pci_exit vmlinux EXPORT_SYMBOL +0x8b24b47b pci_bus_add_devices vmlinux EXPORT_SYMBOL +0xf8a9ecff blk_rq_unmap_user vmlinux EXPORT_SYMBOL +0x54078814 crypto_mod_get vmlinux EXPORT_SYMBOL_GPL +0x225f68b9 dma_direct_unmap_sg vmlinux EXPORT_SYMBOL +0x0bc477a2 irq_set_irq_type vmlinux EXPORT_SYMBOL +0xce2840e7 irq_set_irq_wake vmlinux EXPORT_SYMBOL +0x4053e2de cpu_hwcap_keys vmlinux EXPORT_SYMBOL +0x38861500 init_rs_gfp lib/reed_solomon/reed_solomon EXPORT_SYMBOL_GPL +0x76d7893c nf_flow_table_init net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0xc31f71f6 snd_rawmidi_drop_output sound/core/snd-rawmidi EXPORT_SYMBOL +0xbc3134ad dm_bio_prison_alloc_cell_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x9cbf026d mlxsw_afa_destroy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0e81c09c mlxsw_afk_destroy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc3331d02 fc_remove_host drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x75d8ef8d of_platform_device_destroy vmlinux EXPORT_SYMBOL_GPL +0x90e68723 spi_res_add vmlinux EXPORT_SYMBOL_GPL +0xade31c6b ata_sas_port_init vmlinux EXPORT_SYMBOL_GPL +0x853eada1 drm_get_cea_aspect_ratio vmlinux EXPORT_SYMBOL +0x11287c34 drm_dp_link_power_down vmlinux EXPORT_SYMBOL +0x3eff93ff __remove_inode_hash vmlinux EXPORT_SYMBOL +0x16b3acb1 l2tp_session_free net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xf52e2c38 mlx5_core_destroy_mkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x481bc57f device_match_acpi_dev vmlinux EXPORT_SYMBOL +0x8ed7488e __pci_reset_function_locked vmlinux EXPORT_SYMBOL_GPL +0x6128b5fc __printk_ratelimit vmlinux EXPORT_SYMBOL +0x5e83c235 __percpu_up_read vmlinux EXPORT_SYMBOL_GPL +0xe9fe18b0 init_rs_non_canonical lib/reed_solomon/reed_solomon EXPORT_SYMBOL_GPL +0xfeefc65f xdr_read_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x73ac90ee mlx5_get_fdb_sub_ns drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x5d9ddace hinic_get_sq_hw_ci drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xcb64d07b tcf_chain_put_by_act vmlinux EXPORT_SYMBOL +0xd04c1a64 sysctl_devconf_inherit_init_net vmlinux EXPORT_SYMBOL +0x30bdffe4 iscsi_dbg_trace vmlinux EXPORT_SYMBOL_GPL +0xdc9d36bd bdev_dax_pgoff vmlinux EXPORT_SYMBOL +0x152f33f8 pm_generic_restore_early vmlinux EXPORT_SYMBOL_GPL +0x3dd9df98 drm_property_lookup_blob vmlinux EXPORT_SYMBOL +0x8900489a drm_modeset_unlock vmlinux EXPORT_SYMBOL +0x78f2fde0 acpi_dma_request_slave_chan_by_name vmlinux EXPORT_SYMBOL_GPL +0x5f22c39f clk_hw_get_parent_by_index vmlinux EXPORT_SYMBOL_GPL +0xb78debe3 LZ4_decompress_fast_usingDict vmlinux EXPORT_SYMBOL +0xe227569e blk_mq_can_queue vmlinux EXPORT_SYMBOL +0x94bf03ca utf8_to_utf32 vmlinux EXPORT_SYMBOL +0x9d09e8ae ring_buffer_event_data vmlinux EXPORT_SYMBOL_GPL +0xe48e10b5 dm_dirty_log_type_unregister drivers/md/dm-log EXPORT_SYMBOL +0xb750a951 hinic_set_func_capture_en drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xc4f437e8 fcoe_fcf_device_add drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x61186c5b skb_vlan_pop vmlinux EXPORT_SYMBOL +0x3bc18f08 iscsi_create_session vmlinux EXPORT_SYMBOL_GPL +0x97213938 __pm_runtime_disable vmlinux EXPORT_SYMBOL_GPL +0x127ce0fa paste_selection vmlinux EXPORT_SYMBOL_GPL +0xd9440aa5 crypto_sha1_update vmlinux EXPORT_SYMBOL +0xd04c7414 __tracepoint_kfree vmlinux EXPORT_SYMBOL +0x47b6de70 perf_aux_output_end vmlinux EXPORT_SYMBOL_GPL +0xd4387233 bpf_prog_select_runtime vmlinux EXPORT_SYMBOL_GPL +0xed814ec1 kmsg_dump_unregister vmlinux EXPORT_SYMBOL_GPL +0xb9f012b1 override_creds vmlinux EXPORT_SYMBOL +0x131db64a system_long_wq vmlinux EXPORT_SYMBOL_GPL +0xa555f7b2 rxrpc_kernel_set_tx_length net/rxrpc/rxrpc EXPORT_SYMBOL +0xed44280b nf_nat_helper_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x50dbc447 ib_flush_fmr_pool drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7cb0533d mlx4_get_base_qpn drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc078482c fcoe_ctlr_get_lesb drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0xdb55c076 radix_tree_iter_resume vmlinux EXPORT_SYMBOL +0xb5996861 __ip_queue_xmit vmlinux EXPORT_SYMBOL +0x3b5b01b1 input_register_handle vmlinux EXPORT_SYMBOL +0x8d22bb58 iommu_group_alloc vmlinux EXPORT_SYMBOL_GPL +0x89008c09 clk_mux_ops vmlinux EXPORT_SYMBOL_GPL +0x6259d291 clk_restore_context vmlinux EXPORT_SYMBOL_GPL +0x10598c69 acpi_device_hid vmlinux EXPORT_SYMBOL +0x8a45a555 acpi_unregister_wakeup_handler vmlinux EXPORT_SYMBOL_GPL +0x73cb2bf8 blkdev_issue_zeroout vmlinux EXPORT_SYMBOL +0xf9b7396e get_tree_bdev vmlinux EXPORT_SYMBOL +0xf8990351 handle_level_irq vmlinux EXPORT_SYMBOL_GPL +0x3c8d7111 ceph_get_num_objects net/ceph/libceph EXPORT_SYMBOL +0x2e094bc4 snd_pcm_stream_unlock_irq sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x5a7ad8fc bch_bset_insert drivers/md/bcache/bcache EXPORT_SYMBOL +0x48530ab2 mlxsw_core_skb_receive drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb98c5b60 hinic_be32_to_cpu drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x96eb18e6 iscsi_eh_abort drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xfb86b96f dlm_errname fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x19e26182 nfs4_init_deviceid_node fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xc43e03f3 __fscache_register_netfs fs/fscache/fscache EXPORT_SYMBOL +0xc781bd9f rfkill_resume_polling vmlinux EXPORT_SYMBOL +0xd31ccb06 of_machine_is_compatible vmlinux EXPORT_SYMBOL +0x92fb13b3 phy_mac_interrupt vmlinux EXPORT_SYMBOL +0x03943e2a ata_dev_printk vmlinux EXPORT_SYMBOL +0x35418619 drm_sched_entity_init vmlinux EXPORT_SYMBOL +0x04df8fbc lzo1x_decompress_safe vmlinux EXPORT_SYMBOL_GPL +0xdfe0f93e blkcg_deactivate_policy vmlinux EXPORT_SYMBOL_GPL +0xabc9daae __bio_clone_fast vmlinux EXPORT_SYMBOL +0x1338028b kern_unmount vmlinux EXPORT_SYMBOL +0x0cd40f45 ceph_osdc_put_request net/ceph/libceph EXPORT_SYMBOL +0xa488334d ceph_con_init net/ceph/libceph EXPORT_SYMBOL +0xadb4caa8 ip_vs_proto_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xe62c1ea0 mtd_table_mutex drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x9f40b894 iscsit_add_reject drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xe278bd6d __tracepoint_bcache_cache_insert drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xf18798e8 __fscache_read_or_alloc_pages fs/fscache/fscache EXPORT_SYMBOL +0x41926498 usb_control_msg vmlinux EXPORT_SYMBOL_GPL +0x3b6fc6a1 drm_atomic_set_mode_prop_for_crtc vmlinux EXPORT_SYMBOL +0x14de4b4d nla_append vmlinux EXPORT_SYMBOL +0x7fba95cb pcim_iomap_regions vmlinux EXPORT_SYMBOL +0xe151bae5 jbd2_journal_lock_updates vmlinux EXPORT_SYMBOL +0x279052ba vfs_ioc_setflags_prepare vmlinux EXPORT_SYMBOL +0x44710b27 vfs_llseek vmlinux EXPORT_SYMBOL +0xdc1c9cbb __page_file_index vmlinux EXPORT_SYMBOL_GPL +0x14b7d52d __irq_set_handler vmlinux EXPORT_SYMBOL_GPL +0xbcc86d96 snd_dma_free_pages sound/core/snd-pcm EXPORT_SYMBOL +0x867e87eb dm_bufio_get_dm_io_client drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xede2c946 mlx4_phys_to_slaves_pport_actv drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x1ce98a99 of_platform_depopulate vmlinux EXPORT_SYMBOL_GPL +0x11e0ec41 dm_read_arg vmlinux EXPORT_SYMBOL +0x81b453b5 ttm_io_prot vmlinux EXPORT_SYMBOL +0x00db08f6 ttm_bo_wait vmlinux EXPORT_SYMBOL +0xf6156052 ttm_bo_init vmlinux EXPORT_SYMBOL +0x9c1fee2a devm_regulator_get_optional vmlinux EXPORT_SYMBOL_GPL +0x5150f0dd iterate_fd vmlinux EXPORT_SYMBOL +0x7ceaf0d5 generic_handle_irq vmlinux EXPORT_SYMBOL_GPL +0x8b2ffd83 __cpu_present_mask vmlinux EXPORT_SYMBOL +0x1f03912b ZSTD_flushStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x9ca0fd3d vsock_stream_has_space net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xd21b44da snd_pcm_stream_lock_irq sound/core/snd-pcm EXPORT_SYMBOL_GPL +0xecd63253 can_free_echo_skb drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x806b4846 nfs4_schedule_lease_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xbfacb837 xt_percpu_counter_free vmlinux EXPORT_SYMBOL_GPL +0x5c2bcd37 bpf_warn_invalid_xdp_action vmlinux EXPORT_SYMBOL_GPL +0x6d112619 of_find_node_opts_by_path vmlinux EXPORT_SYMBOL +0x4347d9d9 iscsi_add_session vmlinux EXPORT_SYMBOL_GPL +0x3956df6c device_property_read_u8_array vmlinux EXPORT_SYMBOL_GPL +0xb2210d64 reset_control_deassert vmlinux EXPORT_SYMBOL_GPL +0xb5230dd0 dquot_resume vmlinux EXPORT_SYMBOL +0xe17e5220 blocking_notifier_chain_cond_register vmlinux EXPORT_SYMBOL_GPL +0xd95ac116 vsock_addr_validate net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xf50473f6 nfc_hci_disconnect_all_gates net/nfc/hci/hci EXPORT_SYMBOL +0x50bedc37 mddev_init_writes_pending drivers/md/md-mod EXPORT_SYMBOL_GPL +0xa60e5aaf __xfrm_state_destroy vmlinux EXPORT_SYMBOL +0x42f9fc30 tcp_v4_md5_hash_skb vmlinux EXPORT_SYMBOL +0xfec14c45 bfifo_qdisc_ops vmlinux EXPORT_SYMBOL +0x527e6a71 of_prop_next_u32 vmlinux EXPORT_SYMBOL_GPL +0x0ec7740c usb_unpoison_urb vmlinux EXPORT_SYMBOL_GPL +0x38461c83 tty_port_init vmlinux EXPORT_SYMBOL +0xedd14fe0 complete_request_key vmlinux EXPORT_SYMBOL +0x1f8544b8 panic_timeout vmlinux EXPORT_SYMBOL_GPL +0x37746fde ZSTD_initDStream lib/zstd/zstd_decompress EXPORT_SYMBOL +0x30af45a1 ZSTD_initCStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x32b3fd58 mlx5_eq_notifier_unregister drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb0e92fc2 hinic_free_db_addr drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xc9fae756 ocfs2_cluster_connect fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x7107ea1a nf_queue vmlinux EXPORT_SYMBOL_GPL +0x66017e2b __sock_recv_timestamp vmlinux EXPORT_SYMBOL_GPL +0x51ca4829 usb_hcd_resume_root_hub vmlinux EXPORT_SYMBOL_GPL +0xbe8c277c aead_init_geniv vmlinux EXPORT_SYMBOL_GPL +0xe4873080 securityfs_remove vmlinux EXPORT_SYMBOL_GPL +0x5dfb39a4 noop_fsync vmlinux EXPORT_SYMBOL +0xca40fd51 list_lru_destroy vmlinux EXPORT_SYMBOL_GPL +0xf008c2c2 rpc_count_iostats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xcc8dbd89 rpc_destroy_pipe_data net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0c29dfba mlx4_unicast_promisc_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x9c13f307 ndlc_open drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0x481b1826 crypto_engine_exit crypto/crypto_engine EXPORT_SYMBOL_GPL +0xc8293284 pnfs_generic_pg_cleanup fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xa09d4e7f nfs4_proc_getdeviceinfo fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb6f1e3ec fib_rule_matchall vmlinux EXPORT_SYMBOL_GPL +0x6579afca scsi_add_host_with_dma vmlinux EXPORT_SYMBOL +0xd3d598b4 devres_remove_group vmlinux EXPORT_SYMBOL_GPL +0xb35341c7 drm_atomic_private_obj_init vmlinux EXPORT_SYMBOL +0x42e0a0d1 tpm_calc_ordinal_duration vmlinux EXPORT_SYMBOL_GPL +0x6aa11aa6 sgl_free_n_order vmlinux EXPORT_SYMBOL +0x28fdac57 blkdev_issue_write_same vmlinux EXPORT_SYMBOL +0x12dc93e8 crypto_hash_walk_first vmlinux EXPORT_SYMBOL_GPL +0x099b220e alloc_anon_inode vmlinux EXPORT_SYMBOL +0xd39ba308 vfs_mkobj vmlinux EXPORT_SYMBOL +0x8e751abc __ClearPageMovable vmlinux EXPORT_SYMBOL +0x42493d75 vsock_remove_connected net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x62bbe76b snd_ctl_remove_id sound/core/snd EXPORT_SYMBOL +0xe60e8bc1 mlxsw_env_get_module_eeprom drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x829e8851 mlxsw_afa_block_first_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x7d6e7cc9 hinic_set_func_nic_state drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x12807277 pnfs_report_layoutstat fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xf313da4e sha_transform vmlinux EXPORT_SYMBOL +0x9cc8cea1 inet_sendpage vmlinux EXPORT_SYMBOL +0x27d3d4d2 dev_set_allmulti vmlinux EXPORT_SYMBOL +0xfc024026 dev_get_by_index_rcu vmlinux EXPORT_SYMBOL +0x9b1d7d85 i2c_add_adapter vmlinux EXPORT_SYMBOL +0x9db032e9 input_mt_report_finger_count vmlinux EXPORT_SYMBOL +0x83013954 drm_kms_helper_poll_fini vmlinux EXPORT_SYMBOL +0xee7d7deb gen_pool_dma_zalloc vmlinux EXPORT_SYMBOL +0xe32ab4d8 xxh64_digest vmlinux EXPORT_SYMBOL +0x4a96a8eb xxh32_digest vmlinux EXPORT_SYMBOL +0xbb35675b __bitmap_intersects vmlinux EXPORT_SYMBOL +0x04ea5d10 ksize vmlinux EXPORT_SYMBOL +0x38869d88 kstat vmlinux EXPORT_SYMBOL +0xcd792797 osd_req_op_extent_osd_data_bio net/ceph/libceph EXPORT_SYMBOL +0xe02a4956 tap_get_minor drivers/net/tap EXPORT_SYMBOL_GPL +0x372308c9 hinic_support_acl drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xfebb6207 hinic_clean_root_ctxt drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x061fe0a1 kobject_set_name vmlinux EXPORT_SYMBOL +0x643467b5 dm_send_uevents vmlinux EXPORT_SYMBOL_GPL +0xaf1d965f shmem_truncate_range vmlinux EXPORT_SYMBOL_GPL +0x5b21ceff ring_buffer_iter_peek vmlinux EXPORT_SYMBOL_GPL +0x6f12560a get_old_timespec32 vmlinux EXPORT_SYMBOL_GPL +0xa5efbf4c async_synchronize_full vmlinux EXPORT_SYMBOL_GPL +0x21dd46b3 mptscsih_bios_param drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x7182a7ae ipmi_smi_watcher_register drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x908a2c2e nfs4_schedule_migration_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x48c093fb _atomic_dec_and_lock_irqsave vmlinux EXPORT_SYMBOL +0x0b93bf64 phy_modify_mmd_changed vmlinux EXPORT_SYMBOL_GPL +0x886c587b ata_bmdma_irq_clear vmlinux EXPORT_SYMBOL_GPL +0x77a9a843 of_clk_src_simple_get vmlinux EXPORT_SYMBOL_GPL +0x1c58427f acpi_remove_notify_handler vmlinux EXPORT_SYMBOL +0xef72e930 set_device_ro vmlinux EXPORT_SYMBOL +0xaa230f88 perf_unregister_guest_info_callbacks vmlinux EXPORT_SYMBOL_GPL +0x3d510a7b rcu_jiffies_till_stall_check vmlinux EXPORT_SYMBOL_GPL +0x608741b5 __init_swait_queue_head vmlinux EXPORT_SYMBOL +0xa3dfd14b atm_dev_release_vccs net/atm/atm EXPORT_SYMBOL +0x24621ca3 dm_sm_disk_open drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xffc4444d fc_rport_recv_req drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x3258efbb ethtool_rx_flow_rule_destroy vmlinux EXPORT_SYMBOL +0x165e8ed9 usb_anchor_urb vmlinux EXPORT_SYMBOL_GPL +0x70c22fb9 drm_mode_object_put vmlinux EXPORT_SYMBOL +0x5dc8ac6a tpmm_chip_alloc vmlinux EXPORT_SYMBOL_GPL +0x28054e24 pci_clear_mwi vmlinux EXPORT_SYMBOL +0x0ddb1cd7 llist_reverse_order vmlinux EXPORT_SYMBOL_GPL +0x89207ac0 blk_rq_append_bio vmlinux EXPORT_SYMBOL +0x3386572f shash_attr_alg vmlinux EXPORT_SYMBOL_GPL +0x8e0001e7 migrate_page_move_mapping vmlinux EXPORT_SYMBOL +0x63eb9355 panic_blink vmlinux EXPORT_SYMBOL +0x11581d32 rds_info_deregister_func net/rds/rds EXPORT_SYMBOL_GPL +0x9fc4fb42 ppp_register_channel drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x21d7d648 st_nci_hci_load_session drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0x1c039c69 ata_sas_port_stop vmlinux EXPORT_SYMBOL_GPL +0xe62d6881 pci_cfg_access_trylock vmlinux EXPORT_SYMBOL_GPL +0x82e0a733 __sync_dirty_buffer vmlinux EXPORT_SYMBOL +0xf4fc2d6c __ring_buffer_alloc vmlinux EXPORT_SYMBOL_GPL +0x5e332b52 __var_waitqueue vmlinux EXPORT_SYMBOL +0x82072614 tasklet_kill vmlinux EXPORT_SYMBOL +0x4ead79b2 ib_get_vf_config drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6d26bf52 md_bitmap_start_sync drivers/md/md-mod EXPORT_SYMBOL +0x94de519d md_reload_sb drivers/md/md-mod EXPORT_SYMBOL +0xb8691c85 fc_get_host_speed drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x69d9cfd9 fc_fc4_deregister_provider drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x252969a5 async_syndrome_val crypto/async_tx/async_pq EXPORT_SYMBOL_GPL +0xffb7c514 ida_free vmlinux EXPORT_SYMBOL +0xb6507e11 nf_ip_route vmlinux EXPORT_SYMBOL_GPL +0x73264583 i2c_use_client vmlinux EXPORT_SYMBOL +0xee3eeef4 scsi_print_command vmlinux EXPORT_SYMBOL +0x025aa216 copy_reserved_iova vmlinux EXPORT_SYMBOL_GPL +0xe2d0dfbc amba_driver_register vmlinux EXPORT_SYMBOL +0xb0078c3d pci_try_set_mwi vmlinux EXPORT_SYMBOL +0xf915179e refcount_dec_if_one vmlinux EXPORT_SYMBOL +0x0d8e7083 blk_post_runtime_resume vmlinux EXPORT_SYMBOL +0xa6c66d82 bio_endio vmlinux EXPORT_SYMBOL +0x6d26f9e5 nr_execve_count vmlinux EXPORT_SYMBOL +0xb7c69a63 unregister_vmap_purge_notifier vmlinux EXPORT_SYMBOL_GPL +0xd08ef93d account_page_redirty vmlinux EXPORT_SYMBOL +0x1e5b16ce ring_buffer_normalize_time_stamp vmlinux EXPORT_SYMBOL_GPL +0x38b35e32 nft_fib_validate net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0x756ba3b9 nf_ct_expect_unregister_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x806c14c9 snd_pcm_stream_unlock_irqrestore sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x99e6aa60 __register_mtd_parser drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x7a1d0d3c target_alloc_sgl drivers/target/target_core_mod EXPORT_SYMBOL +0x111ab12a dm_bufio_mark_partial_buffer_dirty drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xc8bd466e mlxsw_core_port_eth_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x858a36f7 nfcmrvl_nci_recv_frame drivers/nfc/nfcmrvl/nfcmrvl EXPORT_SYMBOL_GPL +0xd60736ec gf128mul_free_64k crypto/gf128mul EXPORT_SYMBOL +0xff580d99 l3mdev_fib_table_rcu vmlinux EXPORT_SYMBOL_GPL +0x8f3e4641 eth_prepare_mac_addr_change vmlinux EXPORT_SYMBOL +0x7d7ed892 sk_stop_timer vmlinux EXPORT_SYMBOL +0x618b93f7 of_platform_bus_probe vmlinux EXPORT_SYMBOL +0xec02ebe0 phylink_init_eee vmlinux EXPORT_SYMBOL_GPL +0x02744327 sas_rphy_remove vmlinux EXPORT_SYMBOL +0x3adf897f drm_flip_work_queue_task vmlinux EXPORT_SYMBOL +0x7f5b4fe4 sg_free_table vmlinux EXPORT_SYMBOL +0x70845ca8 locks_remove_posix vmlinux EXPORT_SYMBOL +0xb0b85f47 ring_buffer_iter_reset vmlinux EXPORT_SYMBOL_GPL +0xf48ceebd net_cls_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x7b83128f flow_offload_teardown net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x1be0a316 nft_data_init net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x4e6e4b41 radix_tree_delete vmlinux EXPORT_SYMBOL +0x1c98e0dc __skb_recv_udp vmlinux EXPORT_SYMBOL +0x14a0c2d0 __phy_read_mmd vmlinux EXPORT_SYMBOL +0xd0ab8d98 iscsi_unregister_transport vmlinux EXPORT_SYMBOL_GPL +0xb9e66725 pm_generic_thaw_noirq vmlinux EXPORT_SYMBOL_GPL +0x7a4b3a67 drm_atomic_state_init vmlinux EXPORT_SYMBOL +0x28225658 pcie_port_service_unregister vmlinux EXPORT_SYMBOL +0xb67fec0e uuid_parse vmlinux EXPORT_SYMBOL +0x7e235eb6 sched_trace_rq_cpu vmlinux EXPORT_SYMBOL_GPL +0x078b2336 nfnl_acct_find_get net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0x85922237 mlx4_gen_pkey_eqe drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xccc1edbf hinic_mbox_ppf_to_vf drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x8cff6a57 pn544_hci_remove drivers/nfc/pn544/pn544 EXPORT_SYMBOL +0xd85c20cf tcp_slow_start vmlinux EXPORT_SYMBOL_GPL +0x2ac3fb50 dev_activate vmlinux EXPORT_SYMBOL +0xea4a02c7 __pneigh_lookup vmlinux EXPORT_SYMBOL_GPL +0xc82812c9 device_get_named_child_node vmlinux EXPORT_SYMBOL_GPL +0xff101efb fwnode_get_next_child_node vmlinux EXPORT_SYMBOL_GPL +0x4f4ecb8d mipi_dsi_dcs_set_tear_scanline vmlinux EXPORT_SYMBOL +0xcf11a549 drm_flip_work_allocate_task vmlinux EXPORT_SYMBOL +0xd169c28c drm_driver_legacy_fb_format vmlinux EXPORT_SYMBOL +0x41a5ca4a devm_iounmap vmlinux EXPORT_SYMBOL +0x5bb837d5 devm_ioremap vmlinux EXPORT_SYMBOL +0xf749debc md5_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x4bcc2662 mempool_init_node vmlinux EXPORT_SYMBOL +0x294b9ea1 on_each_cpu vmlinux EXPORT_SYMBOL +0x967e35d8 dma_set_mask vmlinux EXPORT_SYMBOL +0x99a10d40 kvm_put_kvm vmlinux EXPORT_SYMBOL_GPL +0x2747a86e snd_pcm_set_sync sound/core/snd-pcm EXPORT_SYMBOL +0x638bffb7 target_free_sgl drivers/target/target_core_mod EXPORT_SYMBOL +0xc7d6ba6a parport_find_number drivers/parport/parport EXPORT_SYMBOL +0x822d1676 safe_candev_priv drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x74e5ff1a udpv6_encap_enable vmlinux EXPORT_SYMBOL +0x7435f491 napi_hash_del vmlinux EXPORT_SYMBOL_GPL +0x9d769630 skb_zerocopy vmlinux EXPORT_SYMBOL_GPL +0xa7856098 cpu_topology vmlinux EXPORT_SYMBOL_GPL +0xdf666902 drm_rotation_simplify vmlinux EXPORT_SYMBOL +0xad09ee2c drm_gem_object_lookup vmlinux EXPORT_SYMBOL +0x19f1260e nla_reserve vmlinux EXPORT_SYMBOL +0x92ca567b bpf_offload_dev_match vmlinux EXPORT_SYMBOL_GPL +0xb6a424e4 tracepoint_srcu vmlinux EXPORT_SYMBOL_GPL +0x16316a10 ZSTD_getFrameContentSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0x7cb1aea1 devlink_dpipe_header_ethernet vmlinux EXPORT_SYMBOL +0xeee6f4cc rtnl_put_cacheinfo vmlinux EXPORT_SYMBOL_GPL +0x93444164 __cpufreq_driver_target vmlinux EXPORT_SYMBOL_GPL +0xea124bd1 gcd vmlinux EXPORT_SYMBOL_GPL +0xec29fcdb crypto_hash_alg_has_setkey vmlinux EXPORT_SYMBOL_GPL +0xde448ecd ihold vmlinux EXPORT_SYMBOL +0x3096be16 names_cachep vmlinux EXPORT_SYMBOL +0xa24ce73d invalidate_inode_pages2_range vmlinux EXPORT_SYMBOL_GPL +0xa45c7b90 stack_trace_print vmlinux EXPORT_SYMBOL_GPL +0xe87af6bd irq_domain_get_irq_data vmlinux EXPORT_SYMBOL_GPL +0xbbcb339e cancel_delayed_work vmlinux EXPORT_SYMBOL +0x2b4846a1 raid6_empty_zero_page lib/raid6/raid6_pq EXPORT_SYMBOL +0x2d107b5e base_inv_old_true_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0xc8e96dea qword_addhex net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xeb8edf5a snd_info_create_card_entry sound/core/snd EXPORT_SYMBOL +0x3f2690f2 nfs_check_flags fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe86d8991 xfrm_policy_register_afinfo vmlinux EXPORT_SYMBOL +0x4dae16e4 i2c_put_dma_safe_msg_buf vmlinux EXPORT_SYMBOL_GPL +0xa63b5850 ata_qc_complete_multiple vmlinux EXPORT_SYMBOL_GPL +0x2702ce07 pci_unmap_rom vmlinux EXPORT_SYMBOL +0xa0ebd8ed sbitmap_queue_show vmlinux EXPORT_SYMBOL_GPL +0x73a488e9 iov_iter_alignment vmlinux EXPORT_SYMBOL +0xfba15807 crypto_unregister_shashes vmlinux EXPORT_SYMBOL_GPL +0x706230fb crypto_unregister_ahashes vmlinux EXPORT_SYMBOL_GPL +0x03031bdf key_instantiate_and_link vmlinux EXPORT_SYMBOL +0x54431da3 dentry_path_raw vmlinux EXPORT_SYMBOL +0x265bbef9 kexec_crash_loaded vmlinux EXPORT_SYMBOL_GPL +0x9e579c03 mpt_get_msg_frame drivers/message/fusion/mptbase EXPORT_SYMBOL +0xc30f4de0 mlx4_mr_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa941321f wimax_msg_alloc vmlinux EXPORT_SYMBOL_GPL +0x2caf1724 ipv6_sock_mc_drop vmlinux EXPORT_SYMBOL +0xef0568ba xfrm_sad_getinfo vmlinux EXPORT_SYMBOL +0xd1f2eee2 nf_logger_find_get vmlinux EXPORT_SYMBOL_GPL +0x83dd4c3b netif_napi_del vmlinux EXPORT_SYMBOL +0xe233762a input_event_from_user vmlinux EXPORT_SYMBOL_GPL +0x4484a5a4 wait_for_device_probe vmlinux EXPORT_SYMBOL_GPL +0x52f6f2ec drm_crtc_add_crc_entry vmlinux EXPORT_SYMBOL_GPL +0x5d8af088 tty_port_put vmlinux EXPORT_SYMBOL +0xe91028b3 clk_register_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0xee58e970 fb_add_videomode vmlinux EXPORT_SYMBOL +0x57f40167 crypto_register_alg vmlinux EXPORT_SYMBOL_GPL +0x55171f9b debugfs_rename vmlinux EXPORT_SYMBOL_GPL +0x4f1eb3a0 simple_open vmlinux EXPORT_SYMBOL +0xfc957e7f single_open vmlinux EXPORT_SYMBOL +0x42074830 __page_file_mapping vmlinux EXPORT_SYMBOL_GPL +0x249f3fe1 fork_usermode_blob vmlinux EXPORT_SYMBOL_GPL +0xfcdad6c3 nft_register_chain_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x198788b4 snd_lookup_oss_minor_data sound/core/snd EXPORT_SYMBOL +0x4a3ea5c0 snd_request_card sound/core/snd EXPORT_SYMBOL +0xfa31f885 mlx4_mw_enable drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xfd063c1d __fscache_wait_on_invalidate fs/fscache/fscache EXPORT_SYMBOL +0x7c8506f9 xfrm6_find_1stfragopt vmlinux EXPORT_SYMBOL +0x5a1447ad sysctl_tcp_synack_rto_interval vmlinux EXPORT_SYMBOL +0xe039c812 __page_pool_put_page vmlinux EXPORT_SYMBOL +0x45334c8d of_graph_get_port_parent vmlinux EXPORT_SYMBOL +0x4ebb2e40 of_get_next_parent vmlinux EXPORT_SYMBOL +0x767a4603 drm_helper_disable_unused_functions vmlinux EXPORT_SYMBOL +0x7757b51a clk_unregister vmlinux EXPORT_SYMBOL_GPL +0xd0b74705 acpi_install_interface vmlinux EXPORT_SYMBOL +0xfbbd41ca no_action vmlinux EXPORT_SYMBOL_GPL +0xb9ed72f0 kthread_stop vmlinux EXPORT_SYMBOL +0x0c950496 l2tp_nl_register_ops net/l2tp/l2tp_netlink EXPORT_SYMBOL_GPL +0x44e9b446 vhost_poll_flush drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x5b4d6714 mlx4_pd_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa81e9760 lockd_down fs/lockd/lockd EXPORT_SYMBOL_GPL +0xdba979d2 tcp_sendmsg_locked vmlinux EXPORT_SYMBOL_GPL +0xee1e4a4f dm_register_target vmlinux EXPORT_SYMBOL +0x67334cad drm_fb_helper_set_suspend_unlocked vmlinux EXPORT_SYMBOL +0x45081703 ec_get_handle vmlinux EXPORT_SYMBOL +0x78a16f48 aes_decrypt vmlinux EXPORT_SYMBOL +0x7481b148 refcount_add_checked vmlinux EXPORT_SYMBOL +0x4389224b security_inode_create vmlinux EXPORT_SYMBOL_GPL +0xdbf17a51 xprt_reserve_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xfe0ffe42 ax25_display_timer net/ax25/ax25 EXPORT_SYMBOL +0x8bcae6ed nf_dup_ipv4 net/ipv4/netfilter/nf_dup_ipv4 EXPORT_SYMBOL_GPL +0xe2d20225 nf_nat_ipv4_unregister_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x9094ab3e nf_ct_seqadj_init net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xfa24dcc7 hinic_set_interrupt_cfg drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x3fe35aea irq_bypass_unregister_consumer vmlinux EXPORT_SYMBOL_GPL +0xc6d1b2a1 hid_register_report vmlinux EXPORT_SYMBOL_GPL +0xef5d29ec rtc_set_alarm vmlinux EXPORT_SYMBOL_GPL +0x34e08544 ahci_kick_engine vmlinux EXPORT_SYMBOL_GPL +0x4e095f11 drm_get_format_name vmlinux EXPORT_SYMBOL +0x76a19c62 screen_glyph_unicode vmlinux EXPORT_SYMBOL_GPL +0xc31ee8fd devm_fwnode_get_index_gpiod_from_child vmlinux EXPORT_SYMBOL_GPL +0x2edbeaf7 hex2bin vmlinux EXPORT_SYMBOL +0xa3949095 crypto_default_rng vmlinux EXPORT_SYMBOL_GPL +0xccc90c7d locks_copy_conflock vmlinux EXPORT_SYMBOL +0x0cf5cd56 d_make_root vmlinux EXPORT_SYMBOL +0x6f2dd198 vfs_get_super vmlinux EXPORT_SYMBOL +0xeeb31631 nfc_digital_register_device net/nfc/nfc_digital EXPORT_SYMBOL +0x9de41185 ip_set_test net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x0b9f96d3 ib_sa_register_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcefe01d4 inet_csk_listen_start vmlinux EXPORT_SYMBOL_GPL +0x2875a315 utf32_to_utf8 vmlinux EXPORT_SYMBOL +0xbac9fa96 blk_trace_remove vmlinux EXPORT_SYMBOL_GPL +0xa3a6dbed cleanup_srcu_struct vmlinux EXPORT_SYMBOL_GPL +0x643d9ba1 groups_free vmlinux EXPORT_SYMBOL +0xac1a55be unregister_reboot_notifier vmlinux EXPORT_SYMBOL +0x1af0a9b6 ceph_osdc_writepages net/ceph/libceph EXPORT_SYMBOL +0xbd178539 capilib_release_appl drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x6da48cdb mptscsih_taskmgmt_complete drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xbfb7df3c mlxsw_core_driver_priv drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x5e373fb4 gf128mul_64k_bbe crypto/gf128mul EXPORT_SYMBOL +0x7410aba2 strreplace vmlinux EXPORT_SYMBOL +0x026d2559 ip6_flush_pending_frames vmlinux EXPORT_SYMBOL_GPL +0xcee88e7a of_overlay_fdt_apply vmlinux EXPORT_SYMBOL_GPL +0x19d47cde __efivar_entry_get vmlinux EXPORT_SYMBOL_GPL +0xe6f61369 regmap_register_patch vmlinux EXPORT_SYMBOL_GPL +0x1b6e7907 device_add_groups vmlinux EXPORT_SYMBOL_GPL +0x415a6565 tty_perform_flush vmlinux EXPORT_SYMBOL_GPL +0x11329fab acpi_subsys_freeze vmlinux EXPORT_SYMBOL_GPL +0x9f75839b dw_pcie_write_dbi vmlinux EXPORT_SYMBOL_GPL +0xf0696401 acpi_pci_detect_ejectable vmlinux EXPORT_SYMBOL_GPL +0x08e4fee3 gpiod_set_transitory vmlinux EXPORT_SYMBOL_GPL +0x689ec7fe ip_vs_scheduler_err net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x03037654 nft_chain_validate net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xe4330a39 ipmi_unregister_smi drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x2a32cb5f __fscache_readpages_cancel fs/fscache/fscache EXPORT_SYMBOL +0xd4bb4a82 inet6addr_validator_notifier_call_chain vmlinux EXPORT_SYMBOL +0xc65e4e97 secure_dccp_sequence_number vmlinux EXPORT_SYMBOL +0xee7eb9e1 pnp_platform_devices vmlinux EXPORT_SYMBOL +0xcee1108f debugfs_remove_recursive vmlinux EXPORT_SYMBOL_GPL +0x3bee8120 ebt_unregister_table_pre_exit net/bridge/netfilter/ebtables EXPORT_SYMBOL +0xa3cc1157 dm_btree_cursor_begin drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xf076bc57 bch_bset_fix_invalidated_key drivers/md/bcache/bcache EXPORT_SYMBOL +0x83865af5 usb_os_desc_prepare_interf_dir drivers/usb/gadget/libcomposite EXPORT_SYMBOL +0xec37501c mlx5_notifier_register drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7d49ced1 nvme_set_features drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xf9c5d110 nfs_rmdir fs/nfs/nfs EXPORT_SYMBOL_GPL +0x25277497 rfkill_register vmlinux EXPORT_SYMBOL +0x94e8cafb devlink_port_params_register vmlinux EXPORT_SYMBOL_GPL +0xee2ff353 neigh_parms_alloc vmlinux EXPORT_SYMBOL +0x8e97b6c0 cpuidle_unregister_device vmlinux EXPORT_SYMBOL_GPL +0x19a304ba usb_disabled vmlinux EXPORT_SYMBOL_GPL +0x0117a155 drm_atomic_commit vmlinux EXPORT_SYMBOL +0x086aba99 drm_fb_helper_fbdev_setup vmlinux EXPORT_SYMBOL +0x34456b62 drm_atomic_helper_check_plane_state vmlinux EXPORT_SYMBOL +0x7c280b93 clk_hw_is_prepared vmlinux EXPORT_SYMBOL_GPL +0x433cabfb acpi_decode_pld_buffer vmlinux EXPORT_SYMBOL +0x66ae4cac mount_single vmlinux EXPORT_SYMBOL +0xbb1f9397 of_css vmlinux EXPORT_SYMBOL_GPL +0x5e7f4920 snd_pcm_format_set_silence sound/core/snd-pcm EXPORT_SYMBOL +0xa06c07c9 hinic_set_toe_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd25918ce netif_receive_skb_core vmlinux EXPORT_SYMBOL +0x67742df2 phy_restart_aneg vmlinux EXPORT_SYMBOL_GPL +0xb9c425de register_syscore_ops vmlinux EXPORT_SYMBOL_GPL +0xb02bce8d drm_gem_mmap_obj vmlinux EXPORT_SYMBOL +0x3a9be019 asymmetric_key_id_partial vmlinux EXPORT_SYMBOL_GPL +0x4901bd80 bpf_prog_inc vmlinux EXPORT_SYMBOL_GPL +0x1e90d6b0 remove_irq vmlinux EXPORT_SYMBOL_GPL +0x93a6e0b2 io_schedule vmlinux EXPORT_SYMBOL +0x061286f1 is_huawei vmlinux EXPORT_SYMBOL +0x4e925b76 ib_create_send_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5f872c87 ata_pci_bmdma_init vmlinux EXPORT_SYMBOL_GPL +0x8ce2d93a cfb_fillrect vmlinux EXPORT_SYMBOL +0x10b28383 pci_remap_iospace vmlinux EXPORT_SYMBOL +0x21ef32b1 disk_part_iter_next vmlinux EXPORT_SYMBOL_GPL +0xbbf39004 block_write_end vmlinux EXPORT_SYMBOL +0xd2da5504 pid_vnr vmlinux EXPORT_SYMBOL_GPL +0x5a389951 nfs_link fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3ab60428 __xfrm_decode_session vmlinux EXPORT_SYMBOL +0xc26a5b7b pm_wakeup_ws_event vmlinux EXPORT_SYMBOL_GPL +0xb7b53a06 tty_port_lower_dtr_rts vmlinux EXPORT_SYMBOL +0x5d0b4168 is_acpi_data_node vmlinux EXPORT_SYMBOL +0xcd24e146 hash_digest_size vmlinux EXPORT_SYMBOL_GPL +0x9b3b00b9 __pagevec_lru_add vmlinux EXPORT_SYMBOL +0x22072508 kvm_vcpu_kick vmlinux EXPORT_SYMBOL_GPL +0x6a45fdf1 rdma_read_gid_l2_fields drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6791a44e dm_deferred_entry_dec drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xca2e3a88 dm_deferred_entry_inc drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x4e21b837 mlx5_accel_esp_modify_xfrm drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc8cc68a6 mlx4_fmr_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x42bee086 fc_attach_transport drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x2b44ea43 netlink_set_err vmlinux EXPORT_SYMBOL +0xb550a102 alloc_netdev_mqs vmlinux EXPORT_SYMBOL +0xb9c62071 bus_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x3ce77caf register_kprobe vmlinux EXPORT_SYMBOL_GPL +0x27d1240c make_kprojid vmlinux EXPORT_SYMBOL +0x466b85b8 libceph_compatible net/ceph/libceph EXPORT_SYMBOL +0x35d3a277 register_snap_client net/802/psnap EXPORT_SYMBOL +0xc6bc5cb4 mtd_pairing_info_to_wunit drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x933f10ca mlx4_srq_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa4dff983 hinic_set_msix_state drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x263c3152 bcmp vmlinux EXPORT_SYMBOL +0x21ee5ea7 inet_listen vmlinux EXPORT_SYMBOL +0x0a59d774 __dst_destroy_metrics_generic vmlinux EXPORT_SYMBOL +0xf02f48e5 skb_queue_purge vmlinux EXPORT_SYMBOL +0x0b290ada dma_fence_chain_walk vmlinux EXPORT_SYMBOL +0x29ca06c8 iommu_present vmlinux EXPORT_SYMBOL_GPL +0xbf041102 register_vt_notifier vmlinux EXPORT_SYMBOL_GPL +0x092fd2df badblocks_show vmlinux EXPORT_SYMBOL_GPL +0x87a8c4c9 get_unmapped_area vmlinux EXPORT_SYMBOL +0x02e685f3 add_to_page_cache_lru vmlinux EXPORT_SYMBOL_GPL +0xe34b2fda nci_unregister_device net/nfc/nci/nci EXPORT_SYMBOL +0x82e803b9 ib_mr_pool_destroy drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8878cfa6 gether_cleanup drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0xb9e8e2cc in6addr_sitelocal_allrouters vmlinux EXPORT_SYMBOL +0xbd8648c7 flow_rule_match_ports vmlinux EXPORT_SYMBOL +0x07b52e38 rtnl_unregister vmlinux EXPORT_SYMBOL_GPL +0x90bf9f89 genphy_config_eee_advert vmlinux EXPORT_SYMBOL +0x97550d76 platform_bus_type vmlinux EXPORT_SYMBOL_GPL +0x0b1aad59 drm_fb_helper_check_var vmlinux EXPORT_SYMBOL +0x47de0dc7 clk_unregister_mux vmlinux EXPORT_SYMBOL_GPL +0xb12e9f63 blk_rq_map_user vmlinux EXPORT_SYMBOL +0x0a2c57ac crypto_sha512_update vmlinux EXPORT_SYMBOL +0x55b86687 jbd2_journal_clear_features vmlinux EXPORT_SYMBOL +0x81e2b625 generic_pipe_buf_release vmlinux EXPORT_SYMBOL +0x1b114735 kthread_queue_delayed_work vmlinux EXPORT_SYMBOL_GPL +0x704952f5 ip6_tnl_encap_del_ops net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x70b82697 nf_ct_gre_keymap_add net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x7a3205f8 line6_version_request_async sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x75a729a0 rdma_nl_unregister drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x51304384 tap_del_queues drivers/net/tap EXPORT_SYMBOL_GPL +0xe0e25235 qlt_free_mcmd drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x51892d61 nfs_callback_set_tcpport fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1fb0e97f iommu_sva_unbind_device vmlinux EXPORT_SYMBOL_GPL +0x435b4217 d_find_alias vmlinux EXPORT_SYMBOL +0x1fee7136 trace_seq_putc vmlinux EXPORT_SYMBOL_GPL +0x7d558968 trace_seq_puts vmlinux EXPORT_SYMBOL_GPL +0x760a0f4f yield vmlinux EXPORT_SYMBOL +0x1cb9884d rdma_listen drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x96e756bb iscsit_process_nop_out drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x68c6e324 xt_check_target vmlinux EXPORT_SYMBOL_GPL +0x3d49fc73 __tracepoint_napi_poll vmlinux EXPORT_SYMBOL_GPL +0x5df778c5 __tracepoint_kfree_skb vmlinux EXPORT_SYMBOL_GPL +0x64999cd6 netdev_upper_dev_unlink vmlinux EXPORT_SYMBOL +0xc8699335 devm_power_supply_get_by_phandle vmlinux EXPORT_SYMBOL_GPL +0xb4d2403b alloc_dax_region vmlinux EXPORT_SYMBOL_GPL +0xdec34075 dax_inode vmlinux EXPORT_SYMBOL_GPL +0xd157769e regmap_get_raw_read_max vmlinux EXPORT_SYMBOL_GPL +0x0e50df05 acpi_dma_request_slave_chan_by_index vmlinux EXPORT_SYMBOL_GPL +0xc07b0863 fb_destroy_modedb vmlinux EXPORT_SYMBOL +0xccd4c999 __sg_page_iter_start vmlinux EXPORT_SYMBOL +0xa98a7194 fsstack_copy_attr_all vmlinux EXPORT_SYMBOL_GPL +0x7b470506 kvm_write_guest_offset_cached vmlinux EXPORT_SYMBOL_GPL +0xc2784ac4 atm_dev_signal_change net/atm/atm EXPORT_SYMBOL +0x0a9be76a __nf_ct_try_assign_helper net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x79c3bfba transport_init_session drivers/target/target_core_mod EXPORT_SYMBOL +0x8ee990ed mlx5_nic_vport_enable_roce drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5da67adc zs_compact mm/zsmalloc EXPORT_SYMBOL_GPL +0x4d0d163d copy_page vmlinux EXPORT_SYMBOL +0x7bbabe30 udp6_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0xd8d6d68a xfrm_init_replay vmlinux EXPORT_SYMBOL +0xc3b5d74d udp4_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0xfa3d9165 sock_zerocopy_put vmlinux EXPORT_SYMBOL_GPL +0x4560fc35 scsi_command_normalize_sense vmlinux EXPORT_SYMBOL +0x25f0a3e8 devm_kmalloc vmlinux EXPORT_SYMBOL_GPL +0x74595920 __drm_atomic_helper_disable_plane vmlinux EXPORT_SYMBOL +0x0f37ca89 lockref_put_not_zero vmlinux EXPORT_SYMBOL +0x3ad5cda3 lockref_get_not_zero vmlinux EXPORT_SYMBOL +0xb95c9c26 inode_sb_list_add vmlinux EXPORT_SYMBOL_GPL +0xc8e28677 inode_init_always vmlinux EXPORT_SYMBOL +0xfba29a50 d_invalidate vmlinux EXPORT_SYMBOL +0xa0b04675 vmalloc_32 vmlinux EXPORT_SYMBOL +0x39d3695a pid_task vmlinux EXPORT_SYMBOL +0xe11ca997 ZSTD_getDictID_fromDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x28e4593f compat_dccp_getsockopt net/dccp/dccp EXPORT_SYMBOL_GPL +0xaf88734e compat_dccp_setsockopt net/dccp/dccp EXPORT_SYMBOL_GPL +0x376c1170 set_h245_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x28eff409 nf_conntrack_hash net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x046de2fa stp_proto_register net/802/stp EXPORT_SYMBOL_GPL +0x7f8c307e mdev_unregister_driver drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xcecd61a7 tcp_time_wait vmlinux EXPORT_SYMBOL +0xf0e54cb2 dm_internal_resume_fast vmlinux EXPORT_SYMBOL_GPL +0x48a1b227 of_get_i2c_adapter_by_node vmlinux EXPORT_SYMBOL +0x8b273d4b input_register_handler vmlinux EXPORT_SYMBOL +0xd1d87e92 scsi_mlreturn_string vmlinux EXPORT_SYMBOL +0x9375deb2 dax_attribute_group vmlinux EXPORT_SYMBOL_GPL +0xd2c209a5 drm_writeback_queue_job vmlinux EXPORT_SYMBOL +0xc5ec429d drm_class_device_unregister vmlinux EXPORT_SYMBOL_GPL +0xad04bce9 textsearch_unregister vmlinux EXPORT_SYMBOL +0xd0fe8d51 sg_pcopy_from_buffer vmlinux EXPORT_SYMBOL +0x2f7754a8 dma_pool_free vmlinux EXPORT_SYMBOL +0xb3de405c nft_fib_store_result net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0x35061a91 garp_register_application net/802/garp EXPORT_SYMBOL_GPL +0x63ebafdb nvme_kill_queues drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xbb0d2870 netif_carrier_on vmlinux EXPORT_SYMBOL +0x48450376 fib_rules_lookup vmlinux EXPORT_SYMBOL_GPL +0x1e175b31 spi_res_alloc vmlinux EXPORT_SYMBOL_GPL +0xe4ab05cf drm_dp_mst_topology_mgr_suspend vmlinux EXPORT_SYMBOL +0xa384202a acpi_pm_set_device_wakeup vmlinux EXPORT_SYMBOL_GPL +0xae7c231d mpi_cmp vmlinux EXPORT_SYMBOL_GPL +0x167c5967 print_hex_dump vmlinux EXPORT_SYMBOL +0x9d51c97b rds_send_xmit net/rds/rds EXPORT_SYMBOL_GPL +0x696fa2fa o2net_register_handler fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x27756bc8 scsi_sanitize_inquiry_string vmlinux EXPORT_SYMBOL +0x77f63fdd dev_pm_qos_add_notifier vmlinux EXPORT_SYMBOL_GPL +0x1df7caca drm_crtc_helper_set_config vmlinux EXPORT_SYMBOL +0xa69297fc tty_find_polling_driver vmlinux EXPORT_SYMBOL_GPL +0xe866518f pcie_port_find_device vmlinux EXPORT_SYMBOL_GPL +0xeaf7fe0f sbitmap_resize vmlinux EXPORT_SYMBOL_GPL +0x91a55068 public_key_free vmlinux EXPORT_SYMBOL_GPL +0x6c1631b8 inode_newsize_ok vmlinux EXPORT_SYMBOL +0x57673276 nfc_digital_allocate_device net/nfc/nfc_digital EXPORT_SYMBOL +0x7bbb6588 pn_sock_get_port net/phonet/phonet EXPORT_SYMBOL +0xf474a207 usb_gadget_config_buf drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xc9d84db6 mlx5_cmd_exec_polling drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa1ec0fad fcoe_ctlr_els_send drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x874fc9de st_nci_vendor_cmds_init drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0xcb79b45d nfs_pageio_reset_read_mds fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5a921311 strncmp vmlinux EXPORT_SYMBOL +0x25cebba2 inet_csk_destroy_sock vmlinux EXPORT_SYMBOL +0x0a0c9d45 inet_peer_base_init vmlinux EXPORT_SYMBOL_GPL +0xc423266e iscsi_post_host_event vmlinux EXPORT_SYMBOL_GPL +0xe7735def __pm_runtime_idle vmlinux EXPORT_SYMBOL_GPL +0x795a462b virtqueue_kick_prepare vmlinux EXPORT_SYMBOL_GPL +0x274cf5e1 __clk_get_flags vmlinux EXPORT_SYMBOL_GPL +0x2e1ca751 clk_put vmlinux EXPORT_SYMBOL +0x78df6bd7 no_pci_devices vmlinux EXPORT_SYMBOL +0xbc74dd8a crypto_register_shashes vmlinux EXPORT_SYMBOL_GPL +0x0d07f543 get_anon_bdev vmlinux EXPORT_SYMBOL +0xb4eda0da ring_buffer_event_length vmlinux EXPORT_SYMBOL_GPL +0x984c5e73 p9_fcall_fini net/9p/9pnet EXPORT_SYMBOL +0x8a141580 dccp_reqsk_send_ack net/dccp/dccp EXPORT_SYMBOL_GPL +0x3796bdcc snd_pcm_format_little_endian sound/core/snd-pcm EXPORT_SYMBOL +0x0c3ce2b4 rdma_create_user_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe4fa4591 nfs_add_or_obtain fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb29533ee zs_malloc mm/zsmalloc EXPORT_SYMBOL_GPL +0xa5056338 __hsiphash_aligned vmlinux EXPORT_SYMBOL +0xe753b68d devlink_fmsg_arr_pair_nest_end vmlinux EXPORT_SYMBOL_GPL +0xc330910c __flow_indr_block_cb_register vmlinux EXPORT_SYMBOL_GPL +0x3e833026 sock_kfree_s vmlinux EXPORT_SYMBOL +0x76daebca usb_stor_access_xfer_buf vmlinux EXPORT_SYMBOL_GPL +0x48a3d20b mctrl_gpio_get vmlinux EXPORT_SYMBOL_GPL +0x69dd3b5b crc32_le vmlinux EXPORT_SYMBOL +0xf54bd49b lcm vmlinux EXPORT_SYMBOL_GPL +0x845dbf3b scatterwalk_map_and_copy vmlinux EXPORT_SYMBOL_GPL +0xc1af009f empty_aops vmlinux EXPORT_SYMBOL +0x484f6edf ktime_get_coarse_real_ts64 vmlinux EXPORT_SYMBOL +0x446940eb irq_domain_pop_irq vmlinux EXPORT_SYMBOL_GPL +0x10c44b71 __task_pid_nr_ns vmlinux EXPORT_SYMBOL +0x1909784a br_forward net/bridge/bridge EXPORT_SYMBOL_GPL +0x8dc34cf0 udp_tunnel_sock_release net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0xcd267799 nft_unregister_obj net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xd9415b6a usb_string_id drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x5c8b7402 iscsi_tcp_task_init drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0xf7bc95b0 devlink_fmsg_pair_nest_start vmlinux EXPORT_SYMBOL_GPL +0x0cc23b21 ethtool_op_get_link vmlinux EXPORT_SYMBOL +0x088eea0c pps_lookup_dev vmlinux EXPORT_SYMBOL +0xf7455c16 input_event_to_user vmlinux EXPORT_SYMBOL_GPL +0xfbd98903 input_unregister_device vmlinux EXPORT_SYMBOL +0x1f553802 sas_port_free vmlinux EXPORT_SYMBOL +0xcaf2c603 scsi_sd_pm_domain vmlinux EXPORT_SYMBOL +0x89250e20 tty_buffer_space_avail vmlinux EXPORT_SYMBOL_GPL +0xe535e5b4 regulator_disable_regmap vmlinux EXPORT_SYMBOL_GPL +0xabbcdf98 clk_hw_set_parent vmlinux EXPORT_SYMBOL_GPL +0xac53a7b2 clk_hw_get_parent vmlinux EXPORT_SYMBOL_GPL +0x44390e70 clk_bulk_get_optional vmlinux EXPORT_SYMBOL_GPL +0xd2ea49b8 acpi_leave_sleep_state_prep vmlinux EXPORT_SYMBOL +0xa36443e6 read_cache_pages vmlinux EXPORT_SYMBOL +0x4aadeb9a ring_buffer_alloc_read_page vmlinux EXPORT_SYMBOL_GPL +0x1f774f46 cpuset_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xa8a8110c kernel_neon_end vmlinux EXPORT_SYMBOL +0x53445f68 nlm_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3e66033 stp_proto_unregister net/802/stp EXPORT_SYMBOL_GPL +0x63343b1d snd_usbmidi_input_stop sound/usb/snd-usbmidi-lib EXPORT_SYMBOL +0x8265f153 mlxsw_core_schedule_work drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc9c2e4cc mlxsw_core_lag_mapping_clear drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9ececd6d ip_generic_getfrag vmlinux EXPORT_SYMBOL +0x3f2cf2a2 call_netdevice_notifiers vmlinux EXPORT_SYMBOL +0x40e74dbb ehci_reset vmlinux EXPORT_SYMBOL_GPL +0xdc825d6c usb_amd_quirk_pll_disable vmlinux EXPORT_SYMBOL_GPL +0x3d8553c1 usb_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0x5464f6ea mii_nway_restart vmlinux EXPORT_SYMBOL +0x34d46fad sas_port_alloc_num vmlinux EXPORT_SYMBOL +0xa6b21ef2 dpm_suspend_end vmlinux EXPORT_SYMBOL_GPL +0xf07cc1d6 free_iova vmlinux EXPORT_SYMBOL_GPL +0xadf9699b pci_write_msi_msg vmlinux EXPORT_SYMBOL_GPL +0x09be2e32 bio_integrity_alloc vmlinux EXPORT_SYMBOL +0xe2526705 crypto_dequeue_request vmlinux EXPORT_SYMBOL_GPL +0x679d4931 crypto_enqueue_request vmlinux EXPORT_SYMBOL_GPL +0xd45718d3 fat_add_entries vmlinux EXPORT_SYMBOL_GPL +0x1e251ab3 thaw_bdev vmlinux EXPORT_SYMBOL +0x6971c1ed vfs_fallocate vmlinux EXPORT_SYMBOL_GPL +0x21830ad7 kmem_cache_alloc_trace vmlinux EXPORT_SYMBOL +0x1e7d6157 freezer_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xe41476d9 ZSTD_getParams lib/zstd/zstd_compress EXPORT_SYMBOL +0x2f3ac812 nf_tables_deactivate_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x233c9893 iscsit_allocate_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x3fca81f8 mlx5_fpga_sbu_conn_destroy drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x22d795a8 _nfs_display_fhandle_hash fs/nfs/nfs EXPORT_SYMBOL_GPL +0x7be2e929 nfs_lookup fs/nfs/nfs EXPORT_SYMBOL_GPL +0x81cbb7f1 nfs_force_lookup_revalidate fs/nfs/nfs EXPORT_SYMBOL_GPL +0x37981539 netdev_rx_csum_fault vmlinux EXPORT_SYMBOL +0xe09f3888 of_pci_address_to_resource vmlinux EXPORT_SYMBOL_GPL +0xb081224b usb_phy_roothub_suspend vmlinux EXPORT_SYMBOL_GPL +0x4baac716 ata_sff_data_xfer32 vmlinux EXPORT_SYMBOL_GPL +0x6546b6b4 devres_release_group vmlinux EXPORT_SYMBOL_GPL +0xcdf7f775 drm_bridge_disable vmlinux EXPORT_SYMBOL +0x2140e4c4 drm_atomic_helper_commit_cleanup_done vmlinux EXPORT_SYMBOL +0x8f7bd0a6 btree_init_mempool vmlinux EXPORT_SYMBOL_GPL +0x9e61bb05 set_freezable vmlinux EXPORT_SYMBOL +0x45329cde rds_send_path_reset net/rds/rds EXPORT_SYMBOL_GPL +0xa0b14117 xdr_decode_array2 net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4a03d2fd ipt_register_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL +0xe56a9336 snd_pcm_format_width sound/core/snd-pcm EXPORT_SYMBOL +0xbee28976 nfs_fhget fs/nfs/nfs EXPORT_SYMBOL_GPL +0x68f275ad ce_aes_expandkey arch/arm64/crypto/aes-ce-cipher EXPORT_SYMBOL +0xbb5261ef neigh_sysctl_unregister vmlinux EXPORT_SYMBOL +0x60b3071f neigh_proc_dointvec vmlinux EXPORT_SYMBOL +0xa0e62d4b usb_ep_alloc_request vmlinux EXPORT_SYMBOL_GPL +0xe4e48b12 swphy_validate_state vmlinux EXPORT_SYMBOL_GPL +0x6311792f ata_pio_need_iordy vmlinux EXPORT_SYMBOL_GPL +0x6c482edc dev_pm_clear_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x16cdc340 acpi_get_table vmlinux EXPORT_SYMBOL +0xc9561772 fb_destroy_modelist vmlinux EXPORT_SYMBOL_GPL +0xf9b24fa2 blk_abort_request vmlinux EXPORT_SYMBOL_GPL +0x60591fdc blk_set_queue_depth vmlinux EXPORT_SYMBOL +0x5986d190 kdb_printf vmlinux EXPORT_SYMBOL_GPL +0x3bfda043 snd_mixer_oss_notify_callback sound/core/snd EXPORT_SYMBOL +0x9f5e414e __scm_destroy vmlinux EXPORT_SYMBOL +0xcd1f6dc3 mdiobus_get_phy vmlinux EXPORT_SYMBOL +0x50bfff20 device_connection_find vmlinux EXPORT_SYMBOL_GPL +0x15dce304 drm_crtc_cleanup vmlinux EXPORT_SYMBOL +0xe820ee34 drm_atomic_helper_commit vmlinux EXPORT_SYMBOL +0x932b96d1 tty_port_register_device_serdev vmlinux EXPORT_SYMBOL_GPL +0x04fb5eea amba_ahb_device_add_res vmlinux EXPORT_SYMBOL_GPL +0xde4d4ace dim_calc_stats vmlinux EXPORT_SYMBOL +0xeb9e913d sgl_alloc_order vmlinux EXPORT_SYMBOL +0x54c160ba aead_exit_geniv vmlinux EXPORT_SYMBOL_GPL +0x1b223421 d_move vmlinux EXPORT_SYMBOL +0x5091b823 ring_buffer_read_start vmlinux EXPORT_SYMBOL_GPL +0x82676cd5 __vsock_create net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x94b182be rpc_wake_up_first net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xde95bdc1 ipcomp_destroy net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0x5ac06930 rdma_reject drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x04b3d619 mlx4_flow_steer_promisc_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x5048fae0 fc_lport_set_local_id drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x89a5279a ipmi_get_version drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x4d7e476a ping_common_sendmsg vmlinux EXPORT_SYMBOL_GPL +0xaa1aadf5 tcf_block_get vmlinux EXPORT_SYMBOL +0x54bbf99a ata_std_end_eh vmlinux EXPORT_SYMBOL +0xb2c3e445 battery_hook_unregister vmlinux EXPORT_SYMBOL_GPL +0x42200c4b touch_atime vmlinux EXPORT_SYMBOL +0x1e8f9b3f evict_inodes vmlinux EXPORT_SYMBOL_GPL +0x7b9793a2 get_cpu_idle_time_us vmlinux EXPORT_SYMBOL_GPL +0x9f9d7ad5 target_execute_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xd70d35a1 gf128mul_4k_bbe crypto/gf128mul EXPORT_SYMBOL +0x5a4d313e gf128mul_4k_lle crypto/gf128mul EXPORT_SYMBOL +0xe64a3cae kernel_sendmsg_locked vmlinux EXPORT_SYMBOL +0x0d3677a0 device_set_wakeup_capable vmlinux EXPORT_SYMBOL_GPL +0xdfb2618c device_dma_supported vmlinux EXPORT_SYMBOL_GPL +0x6419629b dw_pcie_setup_rc vmlinux EXPORT_SYMBOL_GPL +0x6394221d generic_end_io_acct vmlinux EXPORT_SYMBOL +0x2d551271 padata_do_serial vmlinux EXPORT_SYMBOL +0x81a805e7 mod_timer_pending vmlinux EXPORT_SYMBOL +0xc69f1bc6 phonet_proto_unregister net/phonet/phonet EXPORT_SYMBOL +0x48a1b2a9 ib_unregister_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb2478f09 target_tpg_has_node_acl drivers/target/target_core_mod EXPORT_SYMBOL +0x2eb01e04 dm_deferred_set_destroy drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x349cba85 strchr vmlinux EXPORT_SYMBOL +0x1e6d26a8 strstr vmlinux EXPORT_SYMBOL +0xf4e47853 qdisc_tree_reduce_backlog vmlinux EXPORT_SYMBOL +0xbbe9ce97 devlink_port_type_clear vmlinux EXPORT_SYMBOL_GPL +0x9af7a57a rtnl_configure_link vmlinux EXPORT_SYMBOL +0x6aa7b7e6 sock_wmalloc vmlinux EXPORT_SYMBOL +0x4371d51b of_msi_configure vmlinux EXPORT_SYMBOL_GPL +0x4de17ab3 usb_state_string vmlinux EXPORT_SYMBOL_GPL +0x252e3cd5 ttm_tt_init vmlinux EXPORT_SYMBOL +0x19e81304 btree_alloc vmlinux EXPORT_SYMBOL_GPL +0x31ffd761 security_kernel_post_read_file vmlinux EXPORT_SYMBOL_GPL +0x9961b21e task_user_regset_view vmlinux EXPORT_SYMBOL_GPL +0x42a06d22 snd_pcm_stream_unlock sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x188d9d26 __cast5_decrypt crypto/cast5_generic EXPORT_SYMBOL_GPL +0xef81a4af __cast5_encrypt crypto/cast5_generic EXPORT_SYMBOL_GPL +0x81871de7 crypto_transfer_aead_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0x3c827728 flow_indr_block_cb_unregister vmlinux EXPORT_SYMBOL_GPL +0xe4a2964a netif_tx_stop_all_queues vmlinux EXPORT_SYMBOL +0x9caab9ef acpi_gpio_get_irq_resource vmlinux EXPORT_SYMBOL_GPL +0xb299eb29 refcount_add_not_zero_checked vmlinux EXPORT_SYMBOL +0xc82a1701 perf_event_disable vmlinux EXPORT_SYMBOL_GPL +0x697397ee rpcauth_stringify_acceptor net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb2438d54 dm_bufio_release_move drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xa923da6e ppp_channel_index drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xdeb1dc2e mlxsw_afa_block_first_kvdl_index drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x29812d00 hinic_sm_ctr_rd32_clear drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xbf8ee4bd kobject_create_and_add vmlinux EXPORT_SYMBOL_GPL +0x37a2f16e inet_csk_listen_stop vmlinux EXPORT_SYMBOL_GPL +0xc57341b1 i2c_handle_smbus_host_notify vmlinux EXPORT_SYMBOL_GPL +0xaab84040 usb_serial_generic_resume vmlinux EXPORT_SYMBOL_GPL +0x407af304 usb_wait_anchor_empty_timeout vmlinux EXPORT_SYMBOL_GPL +0x2bd854d3 ttm_bo_validate vmlinux EXPORT_SYMBOL +0x4f72a987 uart_parse_options vmlinux EXPORT_SYMBOL_GPL +0x26710a6d virtio_add_status vmlinux EXPORT_SYMBOL_GPL +0x179a7de6 pnp_release_card_device vmlinux EXPORT_SYMBOL +0xc20c0023 pci_iomap_range vmlinux EXPORT_SYMBOL +0xb4b90270 put_disk_and_module vmlinux EXPORT_SYMBOL +0xf79ea36d setup_new_exec vmlinux EXPORT_SYMBOL +0xdb8a1b3f usermodehelper_read_trylock vmlinux EXPORT_SYMBOL_GPL +0x9de7334e md_check_recovery drivers/md/md-mod EXPORT_SYMBOL +0x166a038e macvlan_link_register drivers/net/macvlan EXPORT_SYMBOL_GPL +0xab7f0776 mlx5_set_port_admin_status drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x287d8d0d xfrm_state_lookup_byaddr vmlinux EXPORT_SYMBOL +0xa40876ba pskb_expand_head vmlinux EXPORT_SYMBOL +0x0c397332 drm_noop vmlinux EXPORT_SYMBOL +0x5867d4a3 drm_atomic_helper_damage_merged vmlinux EXPORT_SYMBOL +0xa3cac746 blkg_conf_prep vmlinux EXPORT_SYMBOL_GPL +0x64dac7e2 keyring_alloc vmlinux EXPORT_SYMBOL +0xb672aa97 lc_create lib/lru_cache EXPORT_SYMBOL +0x193e42e1 ibdev_crit drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb6f01c1d ib_sg_to_pages drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x19a50641 __tracepoint_bcache_journal_full drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xd252e62d mlxsw_core_skb_transmit_busy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x89998572 mlx5_core_query_sq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x410735b7 mlx5_core_query_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x3416dc9e __iscsi_put_task drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x2c71bebd __iscsi_get_task drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xb5073334 spi_display_xfer_agreement drivers/scsi/scsi_transport_spi EXPORT_SYMBOL +0x212133db xps_rxqs_needed vmlinux EXPORT_SYMBOL +0x015098b2 i2c_dw_prepare_clk vmlinux EXPORT_SYMBOL_GPL +0x7f190f57 i2c_release_client vmlinux EXPORT_SYMBOL +0x797fed8e drm_gem_dma_resv_wait vmlinux EXPORT_SYMBOL +0x25379e73 clk_set_min_rate vmlinux EXPORT_SYMBOL_GPL +0x6121835c skcipher_walk_virt vmlinux EXPORT_SYMBOL_GPL +0xcb278b97 fuse_do_ioctl vmlinux EXPORT_SYMBOL_GPL +0x5e5df7d5 thp_get_unmapped_area vmlinux EXPORT_SYMBOL_GPL +0xddd6a7be devices_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x6d9ee2a0 __request_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0x426fe71a pm_qos_update_request vmlinux EXPORT_SYMBOL_GPL +0xb09faf79 register_atmdevice_notifier net/atm/atm EXPORT_SYMBOL_GPL +0x43a7e36a st_nci_disable_se drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0x64ad172b ip6_fraglist_init vmlinux EXPORT_SYMBOL +0xcc66b961 devlink_port_type_ib_set vmlinux EXPORT_SYMBOL_GPL +0x6e413bc0 rtnl_link_register vmlinux EXPORT_SYMBOL_GPL +0x325a2a81 make_flow_keys_digest vmlinux EXPORT_SYMBOL +0x96375e9d sk_set_memalloc vmlinux EXPORT_SYMBOL_GPL +0x212c9389 gov_update_cpu_data vmlinux EXPORT_SYMBOL_GPL +0x1ea8f244 usb_gadget_activate vmlinux EXPORT_SYMBOL_GPL +0x68b3a4e5 ata_bmdma_port_intr vmlinux EXPORT_SYMBOL_GPL +0x12f5d51a dma_buf_export vmlinux EXPORT_SYMBOL_GPL +0x8506baa8 clk_unregister_gate vmlinux EXPORT_SYMBOL_GPL +0x369ef4c8 bh_submit_read vmlinux EXPORT_SYMBOL +0x5fa4138c vfs_dedupe_file_range_one vmlinux EXPORT_SYMBOL +0xa16a32d2 ftrace_set_clr_event vmlinux EXPORT_SYMBOL_GPL +0x43c2a786 __cpu_clear_user_page vmlinux EXPORT_SYMBOL_GPL +0x55eca261 osd_req_op_extent_osd_data_bvecs net/ceph/libceph EXPORT_SYMBOL +0x4b8b966e rpc_localaddr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9cdd20f9 ip_tunnel_get_link_net net/ipv4/ip_tunnel EXPORT_SYMBOL +0xe7ca1d5e nft_fwd_dup_netdev_offload net/netfilter/nf_dup_netdev EXPORT_SYMBOL_GPL +0x564243ad vhost_get_vq_desc drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xc6b67f14 hinic_ppf_ext_db_deinit drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xfaad63e9 hinic_return_rq_wqe drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xc9a6976f hinic_return_sq_wqe drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x82dfe9f3 ip_mc_check_igmp vmlinux EXPORT_SYMBOL +0x30b53461 eth_change_mtu vmlinux EXPORT_SYMBOL +0x259775b4 eth_header_parse vmlinux EXPORT_SYMBOL +0x11ef520e drm_writeback_signal_completion vmlinux EXPORT_SYMBOL +0xad30290d drm_plane_create_zpos_property vmlinux EXPORT_SYMBOL +0xf4c2a888 of_dma_router_register vmlinux EXPORT_SYMBOL_GPL +0x7023bea8 unregister_acpi_notifier vmlinux EXPORT_SYMBOL +0x06bd88b5 ucs2_strnlen vmlinux EXPORT_SYMBOL +0xf6d58c6a bdev_stack_limits vmlinux EXPORT_SYMBOL +0xe64d79b5 discard_new_inode vmlinux EXPORT_SYMBOL +0x374c2088 kmsg_dump_get_buffer vmlinux EXPORT_SYMBOL_GPL +0x643ec7d1 nf_nat_pptp_hook_exp_gre net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0x16196e3a transport_handle_cdb_direct drivers/target/target_core_mod EXPORT_SYMBOL +0x4bb13331 hinic_aeq_unregister_swe_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x522ca818 srp_stop_rport_timers drivers/scsi/scsi_transport_srp EXPORT_SYMBOL_GPL +0xa99467e8 crypto_finalize_skcipher_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0xc68fab12 sock_wake_async vmlinux EXPORT_SYMBOL +0xcf57151b usb_gadget_set_selfpowered vmlinux EXPORT_SYMBOL_GPL +0xc6faa68e ata_bmdma_qc_prep vmlinux EXPORT_SYMBOL_GPL +0xae5a04bb acpi_evaluate_dsm vmlinux EXPORT_SYMBOL +0x8875e4fe blk_set_runtime_active vmlinux EXPORT_SYMBOL +0xa92ea4cd bpf_offload_dev_netdev_register vmlinux EXPORT_SYMBOL_GPL +0x425603ec __wait_rcu_gp vmlinux EXPORT_SYMBOL_GPL +0x8369bd92 nf_synproxy_ipv6_fini net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xdd497e51 dm_cell_lock_promote_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xce37c1e1 nvme_cancel_request drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x131a6146 xa_clear_mark vmlinux EXPORT_SYMBOL +0xee5ad41f of_platform_device_create vmlinux EXPORT_SYMBOL +0xcb8adaae usb_gadget_connect vmlinux EXPORT_SYMBOL_GPL +0xe8821d15 drm_client_modeset_dpms vmlinux EXPORT_SYMBOL +0x91dd8055 rhashtable_walk_enter vmlinux EXPORT_SYMBOL_GPL +0xa14de503 crypto_register_aead vmlinux EXPORT_SYMBOL_GPL +0xc3d9aead crypto_register_algs vmlinux EXPORT_SYMBOL_GPL +0x51452b1c fat_free_clusters vmlinux EXPORT_SYMBOL_GPL +0x9be3c273 jbd2_complete_transaction vmlinux EXPORT_SYMBOL +0xbf2557c6 __set_page_dirty_buffers vmlinux EXPORT_SYMBOL +0xbe57afd3 unlock_two_nondirectories vmlinux EXPORT_SYMBOL +0x2773c485 __wake_up_locked vmlinux EXPORT_SYMBOL_GPL +0xb331b41b put_nfs_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x96848186 scnprintf vmlinux EXPORT_SYMBOL +0x001c824e tc_cleanup_flow_action vmlinux EXPORT_SYMBOL +0x9f093061 dev_pre_changeaddr_notify vmlinux EXPORT_SYMBOL +0xb2905e02 dev_queue_xmit_nit vmlinux EXPORT_SYMBOL_GPL +0xd797d8e7 sock_prot_inuse_add vmlinux EXPORT_SYMBOL_GPL +0x5fff8d9f usb_gadget_clear_selfpowered vmlinux EXPORT_SYMBOL_GPL +0x1b1a8f1d usb_hcd_platform_shutdown vmlinux EXPORT_SYMBOL_GPL +0x3014170d mod_zone_page_state vmlinux EXPORT_SYMBOL +0xb971a353 perf_event_refresh vmlinux EXPORT_SYMBOL_GPL +0x116d0a3b rdma_restrack_count drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xdd049f01 mlx5_core_set_delay_drop drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc2e3f347 tcf_register_action vmlinux EXPORT_SYMBOL +0x5a485bff mini_qdisc_pair_swap vmlinux EXPORT_SYMBOL +0x15d374d1 netdev_reset_tc vmlinux EXPORT_SYMBOL +0x067ae63a of_property_read_string_helper vmlinux EXPORT_SYMBOL_GPL +0x40267068 usb_anchor_resume_wakeups vmlinux EXPORT_SYMBOL_GPL +0x9c191eb7 dev_pm_set_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x8e552a8a pm_runtime_autosuspend_expiration vmlinux EXPORT_SYMBOL_GPL +0x622988b4 kvm_irq_has_notifier vmlinux EXPORT_SYMBOL_GPL +0x4a692e3a lapb_connect_request net/lapb/lapb EXPORT_SYMBOL +0x0cf0d304 md_bitmap_sync_with_cluster drivers/md/md-mod EXPORT_SYMBOL +0xf37b7c1f mlx5_rl_is_in_range drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x387aff99 rtnl_link_unregister vmlinux EXPORT_SYMBOL_GPL +0xdb993821 sata_port_ops vmlinux EXPORT_SYMBOL_GPL +0x399fbf54 __free_iova vmlinux EXPORT_SYMBOL_GPL +0x63a94614 d_alloc_anon vmlinux EXPORT_SYMBOL +0xab33820a copy_strings_kernel vmlinux EXPORT_SYMBOL +0x5439e786 generic_copy_file_range vmlinux EXPORT_SYMBOL +0xbe6a866f __wait_on_bit vmlinux EXPORT_SYMBOL +0xa1f8ca74 line6_send_raw_message_async sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x04403fcf unregister_capi_driver drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x3ca4fb99 vlan_filter_push_vids vmlinux EXPORT_SYMBOL +0x4fad7abf led_trigger_register_simple vmlinux EXPORT_SYMBOL_GPL +0x690f585e phy_basic_ports_array vmlinux EXPORT_SYMBOL_GPL +0x95228607 iscsi_recv_pdu vmlinux EXPORT_SYMBOL_GPL +0x55621edc __platform_register_drivers vmlinux EXPORT_SYMBOL_GPL +0x506c845b drm_bridge_post_disable vmlinux EXPORT_SYMBOL +0x20645642 drm_debug vmlinux EXPORT_SYMBOL +0xf424dc3b rdev_get_id vmlinux EXPORT_SYMBOL_GPL +0xe3f16633 rhashtable_free_and_destroy vmlinux EXPORT_SYMBOL_GPL +0x853ad413 device_add_disk_no_queue_reg vmlinux EXPORT_SYMBOL +0x3ab37cec is_phytium2000 vmlinux EXPORT_SYMBOL +0xccbfb7f0 md_kick_rdev_from_array drivers/md/md-mod EXPORT_SYMBOL_GPL +0x2479193e crypto_authenc_extractkeys crypto/authenc EXPORT_SYMBOL_GPL +0xb988fd60 tcp_enter_memory_pressure vmlinux EXPORT_SYMBOL_GPL +0xed9da189 xt_compat_match_offset vmlinux EXPORT_SYMBOL_GPL +0xaf8b64fb dst_init vmlinux EXPORT_SYMBOL +0xc4a641d1 skb_add_rx_frag vmlinux EXPORT_SYMBOL +0x8426fdcc usb_hcd_amd_remote_wakeup_quirk vmlinux EXPORT_SYMBOL_GPL +0x811dc334 usb_unregister_notify vmlinux EXPORT_SYMBOL_GPL +0xcb525da1 genphy_c45_an_disable_aneg vmlinux EXPORT_SYMBOL_GPL +0x3b1f2c2c sas_ioctl vmlinux EXPORT_SYMBOL_GPL +0x77456e0a acpi_root_dir vmlinux EXPORT_SYMBOL +0x2211a4c0 blk_op_str vmlinux EXPORT_SYMBOL_GPL +0x0e944a52 probe_kernel_write vmlinux EXPORT_SYMBOL_GPL +0xccef37e4 ZSTD_DStreamOutSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0xc5eff808 nf_ct_iterate_cleanup_net net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x1d417ce9 bch_btree_keys_init drivers/md/bcache/bcache EXPORT_SYMBOL +0xbc6ff75e mlx4_multicast_promisc_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf1e43985 hinic_er_id drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x76441ce6 hinic_ep_id drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1f605af3 spi_schedule_dv_device drivers/scsi/scsi_transport_spi EXPORT_SYMBOL +0xd5430dce arp_create vmlinux EXPORT_SYMBOL +0xa258dd37 of_modalias_node vmlinux EXPORT_SYMBOL_GPL +0x22256cc0 genphy_c45_an_config_aneg vmlinux EXPORT_SYMBOL_GPL +0xfa9e7ca2 ata_bmdma_post_internal_cmd vmlinux EXPORT_SYMBOL_GPL +0x489fd019 drm_atomic_helper_suspend vmlinux EXPORT_SYMBOL +0x916a3cfa tty_port_alloc_xmit_buf vmlinux EXPORT_SYMBOL +0xcd01b8e6 acpi_attach_data vmlinux EXPORT_SYMBOL +0x88822d38 unregister_blocking_lsm_notifier vmlinux EXPORT_SYMBOL +0xa7308694 jbd2_journal_get_create_access vmlinux EXPORT_SYMBOL +0x5dd46014 no_llseek vmlinux EXPORT_SYMBOL +0x23b8597e rt_mutex_timed_lock vmlinux EXPORT_SYMBOL_GPL +0x40babbe0 cfpkt_extr_head net/caif/caif EXPORT_SYMBOL +0x7dcc9a6a ib_init_ah_attr_from_path drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x038a26fe ib_modify_qp_with_udata drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3addba9a __vfio_platform_register_reset drivers/vfio/platform/vfio-platform-base EXPORT_SYMBOL_GPL +0x5375ca71 dm_bm_write_lock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x0fb1126e of_find_mipi_dsi_device_by_node vmlinux EXPORT_SYMBOL +0x50a4698c fb_videomode_to_modelist vmlinux EXPORT_SYMBOL +0x95ad7a46 pci_generic_config_write vmlinux EXPORT_SYMBOL_GPL +0xe1d92e91 iov_iter_for_each_range vmlinux EXPORT_SYMBOL +0x138fea60 vfs_truncate vmlinux EXPORT_SYMBOL_GPL +0x77eb2fe7 __hrtimer_get_remaining vmlinux EXPORT_SYMBOL_GPL +0xb2d4b263 gfn_to_page vmlinux EXPORT_SYMBOL_GPL +0x1a3e9c57 nfc_unregister_device net/nfc/nfc EXPORT_SYMBOL +0x42d5005e fc_lport_config drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x41e309e0 efivar_entry_add vmlinux EXPORT_SYMBOL_GPL +0x9c803020 usb_phy_roothub_power_on vmlinux EXPORT_SYMBOL_GPL +0xaf4a0282 mdio_device_free vmlinux EXPORT_SYMBOL +0x78c81484 device_match_name vmlinux EXPORT_SYMBOL_GPL +0xa33ab01d drm_mode_copy vmlinux EXPORT_SYMBOL +0xff070f3d acpi_pci_check_ejectable vmlinux EXPORT_SYMBOL_GPL +0x2d192c70 sg_zero_buffer vmlinux EXPORT_SYMBOL +0x0fd377bd register_sysctl_paths vmlinux EXPORT_SYMBOL +0x72a1c5e0 iomap_migrate_page vmlinux EXPORT_SYMBOL_GPL +0x35611e79 pagecache_write_begin vmlinux EXPORT_SYMBOL +0x0d459213 work_on_cpu_safe vmlinux EXPORT_SYMBOL_GPL +0x61577694 ZSTD_compressEnd lib/zstd/zstd_compress EXPORT_SYMBOL +0xdfe94e23 rpc_clnt_xprt_switch_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6398da2b iscsi_segment_init_linear drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x71f6670b __cookie_v6_check vmlinux EXPORT_SYMBOL_GPL +0xde71ed03 spi_finalize_current_message vmlinux EXPORT_SYMBOL_GPL +0x6e8bdc92 scmd_printk vmlinux EXPORT_SYMBOL +0x387ce948 ata_sff_pause vmlinux EXPORT_SYMBOL_GPL +0x17beaf26 dev_pm_qos_add_request vmlinux EXPORT_SYMBOL_GPL +0x4fd9ef73 ttm_bo_swapout vmlinux EXPORT_SYMBOL +0xc73c2954 acpi_bind_one vmlinux EXPORT_SYMBOL_GPL +0xa8223179 refcount_dec_checked vmlinux EXPORT_SYMBOL +0xfeea58a1 bio_associate_blkg_from_css vmlinux EXPORT_SYMBOL_GPL +0xcbd7a164 padata_set_cpumask vmlinux EXPORT_SYMBOL +0xc6cbbc89 capable vmlinux EXPORT_SYMBOL +0xc68b2ba9 v9fs_unregister_trans net/9p/9pnet EXPORT_SYMBOL +0x0f99e50f ps2_cmd_aborted drivers/input/serio/libps2 EXPORT_SYMBOL +0xf77692a1 can_change_state drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xd97e46f0 mlx5_core_dealloc_pd drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xdf36914b xa_find_after vmlinux EXPORT_SYMBOL +0x87bfc61f gro_cells_init vmlinux EXPORT_SYMBOL +0xc0b2664d devlink_dpipe_header_ipv4 vmlinux EXPORT_SYMBOL +0x90e78b98 of_irq_get vmlinux EXPORT_SYMBOL_GPL +0x428f96f5 hid_debug_event vmlinux EXPORT_SYMBOL_GPL +0xa89a4734 input_set_capability vmlinux EXPORT_SYMBOL +0x357ad39e phy_queue_state_machine vmlinux EXPORT_SYMBOL +0xee547c2c drm_mode_create_tv_properties vmlinux EXPORT_SYMBOL +0xca2e080c crypto_register_templates vmlinux EXPORT_SYMBOL_GPL +0x4ef5bcf4 perf_swevent_get_recursion_context vmlinux EXPORT_SYMBOL_GPL +0x64546725 irq_find_mapping vmlinux EXPORT_SYMBOL_GPL +0xad73041f autoremove_wake_function vmlinux EXPORT_SYMBOL +0x963d42aa __wake_up_sync vmlinux EXPORT_SYMBOL_GPL +0x5d300a3f sched_trace_cfs_rq_cpu vmlinux EXPORT_SYMBOL_GPL +0xa1cf5cfc nf_ct_port_tuple_to_nlattr net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x0ab91879 fcoe_get_paged_crc_eof drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x7e801cb3 vlan_vid_del vmlinux EXPORT_SYMBOL +0x0a041659 skb_mpls_push vmlinux EXPORT_SYMBOL_GPL +0x578a1876 tun_xdp_to_ptr vmlinux EXPORT_SYMBOL +0xa231c1a5 drm_fb_helper_hotplug_event vmlinux EXPORT_SYMBOL +0x0426f62d drm_fb_helper_initial_config vmlinux EXPORT_SYMBOL +0x840ddc72 iommu_report_device_fault vmlinux EXPORT_SYMBOL_GPL +0x488e13c8 give_up_console vmlinux EXPORT_SYMBOL +0xd71558b5 gpiod_put_array vmlinux EXPORT_SYMBOL_GPL +0xe7ffe877 pcpu_base_addr vmlinux EXPORT_SYMBOL_GPL +0xad55075b nf_conntrack_helper_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xb5cbf326 snd_seq_root sound/core/snd EXPORT_SYMBOL +0xad99f5a8 rdma_resolve_addr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x54d7de3f mpt_reset_register drivers/message/fusion/mptbase EXPORT_SYMBOL +0x43747d82 drm_mode_find_dmt vmlinux EXPORT_SYMBOL +0x12f95bf6 drm_atomic_helper_async_commit vmlinux EXPORT_SYMBOL +0xca81075e regulator_get_current_limit_regmap vmlinux EXPORT_SYMBOL_GPL +0x38f11a97 acpi_dev_resource_memory vmlinux EXPORT_SYMBOL_GPL +0x1b32c8c2 skcipher_walk_done vmlinux EXPORT_SYMBOL_GPL +0x9a11a0fc crypto_attr_alg_name vmlinux EXPORT_SYMBOL_GPL +0xb57eb610 debugfs_create_regset32 vmlinux EXPORT_SYMBOL_GPL +0x1395e0f5 __test_set_page_writeback vmlinux EXPORT_SYMBOL +0x83d7f222 irq_chip_set_wake_parent vmlinux EXPORT_SYMBOL_GPL +0x72395dc1 xfrm_calg_get_byid net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x06c8f2de slhc_compress drivers/net/slip/slhc EXPORT_SYMBOL +0x7a825c16 hinic_cos_valid_bitmap drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x366b321f ir_raw_handler_register vmlinux EXPORT_SYMBOL +0x0eb6bc85 uhci_check_and_reset_hc vmlinux EXPORT_SYMBOL_GPL +0xee5cae52 usb_hc_died vmlinux EXPORT_SYMBOL_GPL +0x27f14b5b pci_max_pasids vmlinux EXPORT_SYMBOL_GPL +0x3081aaab pci_free_irq vmlinux EXPORT_SYMBOL +0x4b0ff2b5 pinctrl_dev_get_name vmlinux EXPORT_SYMBOL_GPL +0xc1514a3b free_irq vmlinux EXPORT_SYMBOL +0x6b4b2933 __ioremap vmlinux EXPORT_SYMBOL +0x87a65b5c l2tp_session_create net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xb2af19e1 snd_usbmidi_resume sound/usb/snd-usbmidi-lib EXPORT_SYMBOL +0xbd902944 drm_gem_shmem_put_pages vmlinux EXPORT_SYMBOL +0x176cc19f acpi_subsys_restore_early vmlinux EXPORT_SYMBOL_GPL +0xa5af0c52 jbd2_journal_init_inode vmlinux EXPORT_SYMBOL +0xf45a0a58 take_dentry_name_snapshot vmlinux EXPORT_SYMBOL +0x7891dadf deactivate_locked_super vmlinux EXPORT_SYMBOL +0xaa9bddd8 ftrace_set_filter vmlinux EXPORT_SYMBOL_GPL +0xab9e4a9b lowpan_nhc_del net/6lowpan/6lowpan EXPORT_SYMBOL +0xa55a25f6 sunrpc_cache_unhash net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5a888176 rpcauth_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4f816e9b snd_pcm_format_big_endian sound/core/snd-pcm EXPORT_SYMBOL +0x85bcc355 xfrm_audit_policy_delete vmlinux EXPORT_SYMBOL_GPL +0x561b9752 inet_twsk_hashdance vmlinux EXPORT_SYMBOL_GPL +0xd3433e25 sock_zerocopy_realloc vmlinux EXPORT_SYMBOL_GPL +0xc3dcd0ed init_iova_flush_queue vmlinux EXPORT_SYMBOL_GPL +0x03b47a4d clkdev_create vmlinux EXPORT_SYMBOL_GPL +0x95bc9078 btree_free vmlinux EXPORT_SYMBOL_GPL +0x9c942adc vprintk_emit vmlinux EXPORT_SYMBOL +0x09d44df9 in_lock_functions vmlinux EXPORT_SYMBOL +0xbed46c2b static_relocate vmlinux EXPORT_SYMBOL_GPL +0xf3b8b6cb rpc_wake_up net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x021bfcc5 ip6_tnl_xmit_ctl net/ipv6/ip6_tunnel EXPORT_SYMBOL_GPL +0x30c37cc0 dm_bm_write_lock_zero drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xbfd01f33 mlxsw_core_port_ib_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x58c8a2d4 mlx4_assign_eq drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xa2e9f53f ndlc_recv drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0x851c747c simd_aead_create crypto/crypto_simd EXPORT_SYMBOL_GPL +0x927808e9 ip6_sk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x30c9b983 netdev_rx_handler_unregister vmlinux EXPORT_SYMBOL_GPL +0x11e42007 kernel_getpeername vmlinux EXPORT_SYMBOL +0x37b5b843 sas_port_add_phy vmlinux EXPORT_SYMBOL +0x1f781d68 drm_default_rgb_quant_range vmlinux EXPORT_SYMBOL +0x5407ae9e drm_dp_get_dual_mode_type_name vmlinux EXPORT_SYMBOL +0xcecabc7d serial8250_do_set_termios vmlinux EXPORT_SYMBOL +0xda02c254 clk_register_fixed_rate_with_accuracy vmlinux EXPORT_SYMBOL_GPL +0x4ac7774c sysfs_chmod_file vmlinux EXPORT_SYMBOL_GPL +0xbccfd4d8 register_oldmem_pfn_is_ram vmlinux EXPORT_SYMBOL_GPL +0xea754cbe find_module vmlinux EXPORT_SYMBOL_GPL +0xd88e2b74 rt_mutex_lock vmlinux EXPORT_SYMBOL_GPL +0xb49de221 __tracepoint_sched_overutilized_tp vmlinux EXPORT_SYMBOL_GPL +0x9e6cec6d ib_sa_service_rec_query drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2bb32529 ibnl_put_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1e979047 vfio_register_notifier drivers/vfio/vfio EXPORT_SYMBOL +0xc0dc1b7d l3mdev_link_scope_lookup vmlinux EXPORT_SYMBOL_GPL +0x821cf59d alloc_fcdev vmlinux EXPORT_SYMBOL +0x4eb58cb5 skb_complete_wifi_ack vmlinux EXPORT_SYMBOL_GPL +0x75b5c964 input_event vmlinux EXPORT_SYMBOL +0x190c00c4 dev_get_regmap vmlinux EXPORT_SYMBOL_GPL +0x74f796d1 pm_runtime_suspended_time vmlinux EXPORT_SYMBOL_GPL +0xfec38ab5 mipi_dsi_device_unregister vmlinux EXPORT_SYMBOL +0x06536e27 of_clk_hw_onecell_get vmlinux EXPORT_SYMBOL_GPL +0x5d8741f1 devm_pinctrl_put vmlinux EXPORT_SYMBOL_GPL +0x2897561a devm_pinctrl_get vmlinux EXPORT_SYMBOL_GPL +0x11f7ed4c hex_to_bin vmlinux EXPORT_SYMBOL +0xc8827b75 sysctl_vals vmlinux EXPORT_SYMBOL +0x19230142 simple_pin_fs vmlinux EXPORT_SYMBOL +0x5635a60a vmalloc_user vmlinux EXPORT_SYMBOL +0x00318772 posix_clock_unregister vmlinux EXPORT_SYMBOL_GPL +0xa139d1ff kthread_create_worker_on_cpu vmlinux EXPORT_SYMBOL +0x7257beee lc_element_by_index lib/lru_cache EXPORT_SYMBOL +0x5b899639 rxrpc_kernel_end_call net/rxrpc/rxrpc EXPORT_SYMBOL +0x570e3441 xprt_pin_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf7c1f573 nf_ct_nat_ext_add net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xa9c0ec8c vhost_enqueue_msg drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xff3d955e ib_unregister_device_queued drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x272a8933 udp_memory_allocated vmlinux EXPORT_SYMBOL +0x24c8e482 xt_copy_counters_from_user vmlinux EXPORT_SYMBOL_GPL +0xed1259ed bpf_prog_create_from_user vmlinux EXPORT_SYMBOL_GPL +0x895ce93a sock_no_sendpage vmlinux EXPORT_SYMBOL +0x7617ab38 thermal_generate_netlink_event vmlinux EXPORT_SYMBOL_GPL +0x3258f843 ttm_bo_synccpu_write_grab vmlinux EXPORT_SYMBOL +0x395fc01a acpi_dev_resource_address_space vmlinux EXPORT_SYMBOL_GPL +0xe4329092 __ctzdi2 vmlinux EXPORT_SYMBOL +0x6fd9c35a __clzdi2 vmlinux EXPORT_SYMBOL +0xa579548d blk_pre_runtime_suspend vmlinux EXPORT_SYMBOL +0xe4bbc1dd kimage_voffset vmlinux EXPORT_SYMBOL +0x11fcfd5b nci_hci_send_event net/nfc/nci/nci EXPORT_SYMBOL +0x194e2efb mdev_get_iommu_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x3c52e37a rdev_set_badblocks drivers/md/md-mod EXPORT_SYMBOL_GPL +0xccc90e97 mlx5_free_bfreg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2816a2df get_current_tty vmlinux EXPORT_SYMBOL_GPL +0x0cc4b4b6 crc_ccitt_false vmlinux EXPORT_SYMBOL +0x198e6145 proc_create_data vmlinux EXPORT_SYMBOL +0xf1b7cd05 filemap_fdatawait_range_keep_errors vmlinux EXPORT_SYMBOL +0x696a6663 filemap_range_has_page vmlinux EXPORT_SYMBOL +0x689978c3 param_ops_string vmlinux EXPORT_SYMBOL +0xd91319d6 raid6_gfmul lib/raid6/raid6_pq EXPORT_SYMBOL +0x13055a9a nci_conn_max_data_pkt_payload_size net/nfc/nci/nci EXPORT_SYMBOL +0x32446896 l2tp_tunnel_get_session net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xf0fe2e72 l2tp_tunnel_get net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x073c9e35 sunrpc_cache_register_pipefs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3971b4df snd_ecards_limit sound/core/snd EXPORT_SYMBOL +0xd47da9d5 unregister_gadget_item drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xf501d530 nfs_getattr fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc319595a nfs_setattr fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdf7fa33b __tracepoint_tcp_send_reset vmlinux EXPORT_SYMBOL_GPL +0x51199396 kernel_getsockname vmlinux EXPORT_SYMBOL +0x4cf9fdaa kernel_sendmsg vmlinux EXPORT_SYMBOL +0x021c12fb uart_register_driver vmlinux EXPORT_SYMBOL +0x41628a87 divider_round_rate_parent vmlinux EXPORT_SYMBOL_GPL +0x39b52d19 __bitmap_and vmlinux EXPORT_SYMBOL +0x0641307b lc_destroy lib/lru_cache EXPORT_SYMBOL +0xc32b59ff ib_destroy_fmr_pool drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcb99dc63 mlx5_eq_get_eqe drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd06f23d4 mlx4_eq_get_irq drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xfe306c22 xfrm_alloc_spi vmlinux EXPORT_SYMBOL +0x6dc8a381 nvmem_device_put vmlinux EXPORT_SYMBOL_GPL +0x97f3a2b3 nvmem_device_get vmlinux EXPORT_SYMBOL_GPL +0x7fa1dfd6 hisi_sas_probe vmlinux EXPORT_SYMBOL_GPL +0x84bf2d68 fwnode_remove_software_node vmlinux EXPORT_SYMBOL_GPL +0xc32042d4 drm_fb_helper_defio_init vmlinux EXPORT_SYMBOL +0xcefbfd8b drm_atomic_helper_resume vmlinux EXPORT_SYMBOL +0x2df67af7 tpm_get_random vmlinux EXPORT_SYMBOL_GPL +0xa50326e6 pci_enable_sriov vmlinux EXPORT_SYMBOL_GPL +0xb62b74af refcount_dec_and_test_checked vmlinux EXPORT_SYMBOL +0x9b991f65 blk_rq_err_bytes vmlinux EXPORT_SYMBOL_GPL +0x60c3fef7 crypto_alloc_sync_skcipher vmlinux EXPORT_SYMBOL_GPL +0xc77de193 crypto_blkcipher_type vmlinux EXPORT_SYMBOL_GPL +0x6e3facae nfc_targets_found net/nfc/nfc EXPORT_SYMBOL +0x4502c65a asc2ax net/ax25/ax25 EXPORT_SYMBOL +0x9b7c44b6 bch_bset_build_written_tree drivers/md/bcache/bcache EXPORT_SYMBOL +0xe37dd406 hinic_func_max_nic_qnum drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xcafd1a52 hinic_slq_alloc drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x79b97370 hnae3_unregister_ae_dev drivers/net/ethernet/hisilicon/hns3/hnae3 EXPORT_SYMBOL +0x3e9bad0d fcoe_get_lesb drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x6c0b643e nf_log_packet vmlinux EXPORT_SYMBOL +0x5efa53ca dev_remove_pack vmlinux EXPORT_SYMBOL +0xc7dbbfab of_get_next_cpu_node vmlinux EXPORT_SYMBOL +0x0f7ca236 dmi_memdev_name vmlinux EXPORT_SYMBOL_GPL +0x059a261d led_set_brightness_nopm vmlinux EXPORT_SYMBOL_GPL +0x55e5c9a2 i2c_bit_algo vmlinux EXPORT_SYMBOL +0x2c256e1f input_scancode_to_scalar vmlinux EXPORT_SYMBOL +0x0991a167 spi_get_next_queued_message vmlinux EXPORT_SYMBOL_GPL +0x80cc612b ata_eh_freeze_port vmlinux EXPORT_SYMBOL_GPL +0x098dd227 __devm_regmap_init vmlinux EXPORT_SYMBOL_GPL +0xf9dcca96 drm_gem_dumb_map_offset vmlinux EXPORT_SYMBOL_GPL +0x759bfe36 btree_destroy vmlinux EXPORT_SYMBOL_GPL +0xd8bcc3ef rq_flush_dcache_pages vmlinux EXPORT_SYMBOL_GPL +0xf1adfdc6 blkcipher_walk_virt vmlinux EXPORT_SYMBOL_GPL +0x4c36828a posix_lock_file vmlinux EXPORT_SYMBOL +0x71a672ef dmam_pool_destroy vmlinux EXPORT_SYMBOL +0x6da043b0 set_page_dirty vmlinux EXPORT_SYMBOL +0xf60f9f89 kthread_destroy_worker vmlinux EXPORT_SYMBOL +0x2a91bb33 ib_cache_gid_type_str drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xace9b57b dm_bio_prison_destroy_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x3c8c6b0e mlx5_cmd_init drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7cf52901 ir_raw_gen_manchester vmlinux EXPORT_SYMBOL +0x784cac32 acpi_match_device vmlinux EXPORT_SYMBOL_GPL +0x12285f09 cryptd_alloc_aead vmlinux EXPORT_SYMBOL_GPL +0x5831e062 cpus_read_trylock vmlinux EXPORT_SYMBOL_GPL +0xd384c683 p9stat_free net/9p/9pnet EXPORT_SYMBOL +0xf5e0f4cf ubi_do_get_device_info drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0xe2b51bff dm_rh_inc_pending drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xa35ff830 mlx4_INIT_PORT drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe4c7b15e hinic_get_rq_local_ci drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xec367346 fcoe_transport_attach drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0xe02e480f st21nfca_hci_probe drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x2aed7ebb async_gen_syndrome crypto/async_tx/async_pq EXPORT_SYMBOL_GPL +0x58c88ff2 o2hb_get_all_regions fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x8cfdc9ec udp_disconnect vmlinux EXPORT_SYMBOL +0x2397e2e0 dev_mc_unsync vmlinux EXPORT_SYMBOL +0x23919ef8 dev_uc_unsync vmlinux EXPORT_SYMBOL +0xb163e40f devm_nvmem_cell_get vmlinux EXPORT_SYMBOL_GPL +0x0f414879 usb_choose_configuration vmlinux EXPORT_SYMBOL_GPL +0x5f595b1f usb_free_streams vmlinux EXPORT_SYMBOL_GPL +0x1d8e8352 ata_sff_prereset vmlinux EXPORT_SYMBOL_GPL +0xcad2f56e regcache_cache_only vmlinux EXPORT_SYMBOL_GPL +0x9b92d16e pinctrl_gpio_set_config vmlinux EXPORT_SYMBOL_GPL +0x7b4c9ba9 sbitmap_queue_min_shallow_depth vmlinux EXPORT_SYMBOL_GPL +0x916b1713 ip_set_get_byname net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xac52632e mtd_add_partition drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x5f3d8d74 hinic_intr_num drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x78aed958 addrconf_prefix_rcv_add_addr vmlinux EXPORT_SYMBOL_GPL +0xa7c94f1d xt_compat_lock vmlinux EXPORT_SYMBOL_GPL +0x5f9dbb69 input_set_timestamp vmlinux EXPORT_SYMBOL +0x46ef6f9f phy_set_asym_pause vmlinux EXPORT_SYMBOL +0xaf8120db fwnode_graph_get_remote_port vmlinux EXPORT_SYMBOL_GPL +0x4a40510b bus_get_kset vmlinux EXPORT_SYMBOL_GPL +0xdb69e476 drm_mode_create_colorspace_property vmlinux EXPORT_SYMBOL +0x11ba004e drm_gem_fb_simple_display_pipe_prepare_fb vmlinux EXPORT_SYMBOL +0xf67411a7 regulator_set_active_discharge_regmap vmlinux EXPORT_SYMBOL_GPL +0x7d12d76d acpi_get_parent vmlinux EXPORT_SYMBOL +0x077c1735 fb_pan_display vmlinux EXPORT_SYMBOL +0x2b21b430 fb_show_logo vmlinux EXPORT_SYMBOL +0x7ed36522 blk_mq_virtio_map_queues vmlinux EXPORT_SYMBOL_GPL +0x56249086 __register_chrdev vmlinux EXPORT_SYMBOL +0xd39e9848 put_itimerspec64 vmlinux EXPORT_SYMBOL_GPL +0x59e640c0 halt_poll_ns vmlinux EXPORT_SYMBOL_GPL +0xb54676fa ceph_msg_type_name net/ceph/libceph EXPORT_SYMBOL +0xe5919cb1 xdr_encode_opaque net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x613b1e2e ib_is_mad_class_rmpp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4e90435c ib_sa_free_multicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe23aa988 mlxsw_core_resources_query drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xe1f79cc0 iscsi_session_recovery_timedout drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xc6a494ea rtc_class_close vmlinux EXPORT_SYMBOL_GPL +0x033832c7 usb_amd_hang_symptom_quirk vmlinux EXPORT_SYMBOL_GPL +0x002b5ede drm_atomic_set_mode_for_crtc vmlinux EXPORT_SYMBOL +0x2cf73cb7 kstrtoll_from_user vmlinux EXPORT_SYMBOL +0xf1969a8e __usecs_to_jiffies vmlinux EXPORT_SYMBOL +0x7f02188f __msecs_to_jiffies vmlinux EXPORT_SYMBOL +0xffb5e612 sched_trace_rq_avg_irq vmlinux EXPORT_SYMBOL_GPL +0x959bb381 svc_max_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6d7b02b0 snd_rawmidi_new sound/core/snd-rawmidi EXPORT_SYMBOL +0xd8aa4284 dm_rh_region_context drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x77bd8844 vlan_vids_add_by_dev vmlinux EXPORT_SYMBOL +0x483ee9b8 devm_of_platform_populate vmlinux EXPORT_SYMBOL_GPL +0xdc14eda7 pci_pci_problems vmlinux EXPORT_SYMBOL +0xb535549d pcie_capability_read_dword vmlinux EXPORT_SYMBOL +0x040b96f7 blk_queue_stack_limits vmlinux EXPORT_SYMBOL +0xefa18228 drop_super_exclusive vmlinux EXPORT_SYMBOL +0x0148a840 free_vm_area vmlinux EXPORT_SYMBOL_GPL +0x2cd96c7d tracing_snapshot_cond_disable vmlinux EXPORT_SYMBOL_GPL +0x9feaf287 sonet_subtract_stats net/atm/atm EXPORT_SYMBOL +0xaf934e73 nf_conntrack_free net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xdb2a3162 macvlan_common_newlink drivers/net/macvlan EXPORT_SYMBOL_GPL +0xd8560185 nfs41_sequence_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x092cf98e fscache_object_sleep_till_congested fs/fscache/fscache EXPORT_SYMBOL_GPL +0x2e678211 xas_find_conflict vmlinux EXPORT_SYMBOL_GPL +0xe394701d udp_lib_getsockopt vmlinux EXPORT_SYMBOL +0xf6365203 udp_lib_setsockopt vmlinux EXPORT_SYMBOL +0x7bce4603 xt_data_to_user vmlinux EXPORT_SYMBOL_GPL +0x81533963 sysfs_format_mac vmlinux EXPORT_SYMBOL +0x2836032c get_net_ns_by_fd vmlinux EXPORT_SYMBOL_GPL +0x3b85057c sk_send_sigurg vmlinux EXPORT_SYMBOL +0x4b5fd49e dm_kcopyd_do_callback vmlinux EXPORT_SYMBOL +0xf98cfa69 ptp_clock_event vmlinux EXPORT_SYMBOL +0x4482e5e2 mdiobus_alloc_size vmlinux EXPORT_SYMBOL +0xfbfd0876 spi_bus_lock vmlinux EXPORT_SYMBOL_GPL +0xe6b6f7b0 sas_ata_schedule_reset vmlinux EXPORT_SYMBOL_GPL +0x038cd9ef ata_eh_thaw_port vmlinux EXPORT_SYMBOL_GPL +0xb703ddd0 drm_dp_downstream_debug vmlinux EXPORT_SYMBOL +0x892e4e5e regulator_set_current_limit_regmap vmlinux EXPORT_SYMBOL_GPL +0x3d83c94b pnp_is_active vmlinux EXPORT_SYMBOL +0x881a07ac proc_set_size vmlinux EXPORT_SYMBOL +0xbdc7a050 atalk_find_dev_addr net/appletalk/appletalk EXPORT_SYMBOL +0x2adee13f dm_btree_cursor_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xbbb5df05 dm_array_cursor_skip drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x697bb311 mlx4_slave_convert_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6e9f44ad pm_generic_poweroff_noirq vmlinux EXPORT_SYMBOL_GPL +0x7c1c6e85 drm_sched_init vmlinux EXPORT_SYMBOL +0x0fd60df2 drm_get_connector_status_name vmlinux EXPORT_SYMBOL +0x49bdaa57 drm_gem_object_release vmlinux EXPORT_SYMBOL +0x91a88e57 drop_super vmlinux EXPORT_SYMBOL +0xc5850110 printk vmlinux EXPORT_SYMBOL +0x41ecf87a base_inv_true_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0xf961eb2c vhost_add_used_and_signal drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x2b946ed9 md_run drivers/md/md-mod EXPORT_SYMBOL_GPL +0x39e05ac3 nfs_idmap_cache_timeout fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa2bbe0fc devlink_health_reporter_create vmlinux EXPORT_SYMBOL_GPL +0xafa375ab flow_hash_from_keys vmlinux EXPORT_SYMBOL +0xe1a8d7c9 net_rwsem vmlinux EXPORT_SYMBOL_GPL +0x2208f837 sk_alloc vmlinux EXPORT_SYMBOL +0xc26de416 sas_is_tlr_enabled vmlinux EXPORT_SYMBOL_GPL +0xfd056437 ahci_init_controller vmlinux EXPORT_SYMBOL_GPL +0xb21016eb sata_std_hardreset vmlinux EXPORT_SYMBOL_GPL +0x7fb5d62e bus_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x82c00143 drm_property_add_enum vmlinux EXPORT_SYMBOL +0x402fd7bb __drm_atomic_helper_plane_destroy_state vmlinux EXPORT_SYMBOL +0x7f928743 drm_helper_resume_force_mode vmlinux EXPORT_SYMBOL +0x82b44a34 regulator_disable vmlinux EXPORT_SYMBOL_GPL +0xa21455fe try_to_release_page vmlinux EXPORT_SYMBOL +0xf54823ac synchronize_srcu vmlinux EXPORT_SYMBOL_GPL +0x498e9128 ZSTD_findDecompressedSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0x8e65e5f3 rpcauth_lookup_credcache net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x15bafe29 unregister_md_cluster_operations drivers/md/md-mod EXPORT_SYMBOL +0xae83acd5 spi_dv_device drivers/scsi/scsi_transport_spi EXPORT_SYMBOL +0x07a26aac nfs_commitdata_release fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6f7737d5 kobject_add vmlinux EXPORT_SYMBOL +0xe3cd5fae klist_iter_init vmlinux EXPORT_SYMBOL_GPL +0x5611ea5c usb_gadget_disconnect vmlinux EXPORT_SYMBOL_GPL +0x60ab6954 phy_restore_page vmlinux EXPORT_SYMBOL_GPL +0x9ee62648 dma_buf_vmap vmlinux EXPORT_SYMBOL_GPL +0x4152fcbc dma_buf_mmap vmlinux EXPORT_SYMBOL_GPL +0xc8ef146b dma_buf_kmap vmlinux EXPORT_SYMBOL_GPL +0x9187efd6 __platform_driver_register vmlinux EXPORT_SYMBOL_GPL +0x781c0978 mipi_dsi_attach vmlinux EXPORT_SYMBOL +0x35afae24 __drm_puts_seq_file vmlinux EXPORT_SYMBOL +0xacbdadb8 drm_fb_helper_init vmlinux EXPORT_SYMBOL +0x73011db0 drm_dp_bw_code_to_link_rate vmlinux EXPORT_SYMBOL +0x82987001 pci_disable_msix vmlinux EXPORT_SYMBOL +0xb9fb6839 bio_list_copy_data vmlinux EXPORT_SYMBOL +0x10138352 tracing_on vmlinux EXPORT_SYMBOL_GPL +0x3a3724cc parport_register_device drivers/parport/parport EXPORT_SYMBOL +0x73193ce9 __tracepoint_mlx5_fw drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2a9c17ec xfrm_audit_policy_add vmlinux EXPORT_SYMBOL_GPL +0x315282b3 sock_gen_put vmlinux EXPORT_SYMBOL_GPL +0xfa75cff8 thermal_zone_get_temp vmlinux EXPORT_SYMBOL_GPL +0x0dc05d7b platform_driver_unregister vmlinux EXPORT_SYMBOL_GPL +0x89e340cf acpi_bus_get_ejd vmlinux EXPORT_SYMBOL_GPL +0x34d00e35 fscrypt_enqueue_decrypt_bio vmlinux EXPORT_SYMBOL +0x1dee7e3b vhost_signal drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3bcd5645 nfs_file_release fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9cab34a6 rfkill_set_led_trigger_name vmlinux EXPORT_SYMBOL +0xaa6f23ad rfkill_get_led_trigger_name vmlinux EXPORT_SYMBOL +0x6d824271 xfrm_state_mtu vmlinux EXPORT_SYMBOL_GPL +0xf68ba113 gnet_stats_copy_queue vmlinux EXPORT_SYMBOL +0x1243d868 of_match_device vmlinux EXPORT_SYMBOL +0x714ffbfb of_node_name_eq vmlinux EXPORT_SYMBOL +0x97521036 hid_ignore vmlinux EXPORT_SYMBOL_GPL +0x0ad426bf usb_serial_generic_wait_until_sent vmlinux EXPORT_SYMBOL_GPL +0x356d2d88 xhci_gen_setup vmlinux EXPORT_SYMBOL_GPL +0x8a3f21d7 usb_get_urb vmlinux EXPORT_SYMBOL_GPL +0x5746e804 pci_scan_single_device vmlinux EXPORT_SYMBOL +0x461ac773 kstrtol_from_user vmlinux EXPORT_SYMBOL +0x011ff478 simple_dir_operations vmlinux EXPORT_SYMBOL +0x162893fd hashlen_string vmlinux EXPORT_SYMBOL +0x5af1e3b9 list_lru_del vmlinux EXPORT_SYMBOL_GPL +0xfe4c36e2 l2tp_session_set_header_len net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xec8beba6 nf_ct_expect_hash net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xedab174d mlx5_fpga_get_sbu_caps drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x277c6896 inet_frag_reasm_finish vmlinux EXPORT_SYMBOL +0xc2a814db tcp_memory_pressure vmlinux EXPORT_SYMBOL_GPL +0xa467a944 devlink_resource_size_get vmlinux EXPORT_SYMBOL_GPL +0x28114268 drm_color_lut_check vmlinux EXPORT_SYMBOL +0x840342c6 sgl_free vmlinux EXPORT_SYMBOL +0x04674aa7 dqget vmlinux EXPORT_SYMBOL +0x3146bbb7 dqput vmlinux EXPORT_SYMBOL +0x4bd0e7dd always_delete_dentry vmlinux EXPORT_SYMBOL +0x67a9941a setattr_copy vmlinux EXPORT_SYMBOL +0xb4ea7cf7 kgdb_connected vmlinux EXPORT_SYMBOL_GPL +0xa37fd0cb nfc_hci_driver_failure net/nfc/hci/hci EXPORT_SYMBOL +0x79932243 br_multicast_list_adjacent net/bridge/bridge EXPORT_SYMBOL_GPL +0xec2bec0a line6_resume sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x6090c0cb snd_register_oss_device sound/core/snd EXPORT_SYMBOL +0x24a94b26 snd_info_get_line sound/core/snd EXPORT_SYMBOL +0xb48aa87b mtd_point drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x72795279 iscsit_build_logout_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x3e6d6efc icmpv6_ndo_send vmlinux EXPORT_SYMBOL +0x4b296e30 inet_frags_fini vmlinux EXPORT_SYMBOL +0x65d9e877 cpufreq_register_notifier vmlinux EXPORT_SYMBOL +0x2e18d28a scsi_queue_work vmlinux EXPORT_SYMBOL_GPL +0x64bc557b ahci_qc_issue vmlinux EXPORT_SYMBOL_GPL +0xab8ae765 iommu_capable vmlinux EXPORT_SYMBOL_GPL +0xa7e6d8b5 gpiod_set_raw_value vmlinux EXPORT_SYMBOL_GPL +0x9be7bde4 security_tun_dev_attach vmlinux EXPORT_SYMBOL +0xf346231f seq_list_start_head vmlinux EXPORT_SYMBOL +0x36b0c8dd mmu_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x9f6d78fc kvm_get_pfn vmlinux EXPORT_SYMBOL_GPL +0xbbba652e iscsit_check_dataout_hdr drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xfbd03183 __tracepoint_bcache_btree_node_free drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x71453f87 gether_connect drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0xe7def4b1 mlx5_nic_vport_query_local_lb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x192f9e68 mlx4_SET_VPORT_QOS_set drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x76d3d058 mlx4_SET_VPORT_QOS_get drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x0b010634 nfs4_setup_sequence fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb79e547e dns_query vmlinux EXPORT_SYMBOL +0xfcfccffb netdev_set_default_ethtool_ops vmlinux EXPORT_SYMBOL_GPL +0x1eba5e1e netif_device_attach vmlinux EXPORT_SYMBOL +0x813e6d66 netif_device_detach vmlinux EXPORT_SYMBOL +0x106a2db6 device_rename vmlinux EXPORT_SYMBOL_GPL +0x7f220ca1 mipi_dsi_dcs_set_display_on vmlinux EXPORT_SYMBOL +0x5b3326ff drm_i2c_encoder_restore vmlinux EXPORT_SYMBOL +0xce6477b2 acpi_pci_osc_control_set vmlinux EXPORT_SYMBOL +0xad3670eb simple_attr_read vmlinux EXPORT_SYMBOL_GPL +0xbb639b81 relay_flush vmlinux EXPORT_SYMBOL_GPL +0xc12435e3 rpc_calc_rto net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8d35a728 kernel_bind vmlinux EXPORT_SYMBOL +0xde5e63e1 ttm_sg_tt_init vmlinux EXPORT_SYMBOL +0x6d3a5548 drm_atomic_helper_swap_state vmlinux EXPORT_SYMBOL +0x7006586e iommu_get_group_resv_regions vmlinux EXPORT_SYMBOL_GPL +0xcc248d26 serial8250_suspend_port vmlinux EXPORT_SYMBOL +0x4e6e8ea7 fg_console vmlinux EXPORT_SYMBOL +0xcd279169 nla_find vmlinux EXPORT_SYMBOL +0x86a7d883 kernel_read vmlinux EXPORT_SYMBOL +0xc8add232 ring_buffer_record_disable vmlinux EXPORT_SYMBOL_GPL +0xdfed1cd3 smp_call_function_many_async vmlinux EXPORT_SYMBOL_GPL +0xffae8e8b nsecs_to_jiffies vmlinux EXPORT_SYMBOL_GPL +0x2aa35350 dma_get_merge_boundary vmlinux EXPORT_SYMBOL_GPL +0x87d786ba __devm_release_region vmlinux EXPORT_SYMBOL +0x578a408b ZSTD_initDCtx lib/zstd/zstd_decompress EXPORT_SYMBOL +0x0e27a2dd ZSTD_initCCtx lib/zstd/zstd_compress EXPORT_SYMBOL +0xae001cad parport_write drivers/parport/parport EXPORT_SYMBOL +0x9642e41b fl6_merge_options vmlinux EXPORT_SYMBOL_GPL +0x6532c50f device_link_del vmlinux EXPORT_SYMBOL_GPL +0x72f89e0a device_link_add vmlinux EXPORT_SYMBOL_GPL +0xeb9f43bc pci_bus_find_capability vmlinux EXPORT_SYMBOL +0xd437ae9b dax_iomap_fault vmlinux EXPORT_SYMBOL_GPL +0x1032b126 kvm_is_visible_gfn vmlinux EXPORT_SYMBOL_GPL +0xe691ac7f ZSTD_decompressBegin lib/zstd/zstd_decompress EXPORT_SYMBOL +0xdba4feef base_inv_old_false_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0xe7f88f10 ceph_osdc_watch net/ceph/libceph EXPORT_SYMBOL +0x6dc9c259 tipc_dump_done net/tipc/tipc EXPORT_SYMBOL +0x7e1dba31 rxrpc_kernel_send_data net/rxrpc/rxrpc EXPORT_SYMBOL +0x117b5cda nf_ct_l4proto_find net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x9f0dd49c mdev_uuid drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x869236ad mlx5_query_nic_system_image_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7e601f11 nfs4_delete_deviceid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x06dc0460 vlan_dev_vlan_id vmlinux EXPORT_SYMBOL +0x59710473 __skb_try_recv_datagram vmlinux EXPORT_SYMBOL +0x44b9729d input_set_max_poll_interval vmlinux EXPORT_SYMBOL +0x4fdaa9aa input_set_min_poll_interval vmlinux EXPORT_SYMBOL +0x3f55dd81 ata_base_port_ops vmlinux EXPORT_SYMBOL_GPL +0x54e9fd3c drm_gem_prime_export vmlinux EXPORT_SYMBOL +0x960faab3 security_skb_classify_flow vmlinux EXPORT_SYMBOL +0xfc695572 pagevec_lookup_range_tag vmlinux EXPORT_SYMBOL +0x1f7676e0 sunrpc_cache_unregister_pipefs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3b4d4ae ip_set_alloc net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x8368118b transport_register_session drivers/target/target_core_mod EXPORT_SYMBOL +0x8e07e4fd pnfs_generic_scan_commit_lists fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xce646eae dst_cache_get_ip4 vmlinux EXPORT_SYMBOL_GPL +0xb46b4ffc ehci_hub_control vmlinux EXPORT_SYMBOL_GPL +0x034b45ee __class_register vmlinux EXPORT_SYMBOL_GPL +0xd147b292 drm_sched_suspend_timeout vmlinux EXPORT_SYMBOL +0xe891c01e drm_atomic_state_default_release vmlinux EXPORT_SYMBOL +0xdf6da3a1 gpiochip_populate_parent_fwspec_twocell vmlinux EXPORT_SYMBOL_GPL +0x6a5cb5ee __get_free_pages vmlinux EXPORT_SYMBOL +0x65cbeb22 nfc_driver_failure net/nfc/nfc EXPORT_SYMBOL +0x68295a20 rdma_read_gids drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x1c080b9b ubi_leb_read_sg drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x3bef2f73 iscsi_pool_init drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xac002bd7 rawv6_mh_filter_register vmlinux EXPORT_SYMBOL +0x74250390 nf_unregister_net_hook vmlinux EXPORT_SYMBOL +0xc29c94bf usb_free_coherent vmlinux EXPORT_SYMBOL_GPL +0x4099f919 tun_ptr_free vmlinux EXPORT_SYMBOL_GPL +0xadbeed61 mipi_dsi_packet_format_is_long vmlinux EXPORT_SYMBOL +0x603b57f2 pciserial_init_ports vmlinux EXPORT_SYMBOL_GPL +0x41ee6b05 pcie_has_flr vmlinux EXPORT_SYMBOL_GPL +0x97adb487 utf8s_to_utf16s vmlinux EXPORT_SYMBOL +0xb4b97c90 pvclock_gtod_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x37967fb4 ns_capable vmlinux EXPORT_SYMBOL +0x2f832992 dm_cell_error drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xb04f56ab dm_bufio_read drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x68b117dc rawv6_mh_filter_unregister vmlinux EXPORT_SYMBOL +0x02748db2 ip_route_output_key_hash vmlinux EXPORT_SYMBOL_GPL +0x9e3b9668 is_skb_forwardable vmlinux EXPORT_SYMBOL_GPL +0x1912be44 clk_hw_register_clkdev vmlinux EXPORT_SYMBOL +0xc47ce8ac acpi_subsys_poweroff vmlinux EXPORT_SYMBOL_GPL +0x2c2aaa38 fb_deferred_io_init vmlinux EXPORT_SYMBOL_GPL +0x67063911 debugfs_attr_read vmlinux EXPORT_SYMBOL_GPL +0x92b9b180 slash_name vmlinux EXPORT_SYMBOL +0x7bbccd05 nr_node_ids vmlinux EXPORT_SYMBOL +0xbcc15e75 ktime_get_coarse_with_offset vmlinux EXPORT_SYMBOL_GPL +0xb043d659 sched_trace_cfs_rq_avg vmlinux EXPORT_SYMBOL_GPL +0x6df1aaf1 kernel_sigaction vmlinux EXPORT_SYMBOL +0xd06f1d82 register_mtd_chip_driver drivers/mtd/chips/chipreg EXPORT_SYMBOL +0x943c7dc7 parport_ieee1284_write_compat drivers/parport/parport EXPORT_SYMBOL +0xadde7ce5 dm_bio_prison_free_cell_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x46d3186b st_unregister drivers/misc/ti-st/st_drv EXPORT_SYMBOL_GPL +0x02ab8a96 ipv6_specific vmlinux EXPORT_SYMBOL +0xe0c9197e ipv4_specific vmlinux EXPORT_SYMBOL +0xa6eafcaa nf_queue_entry_release_refs vmlinux EXPORT_SYMBOL_GPL +0x67721453 proto_unregister vmlinux EXPORT_SYMBOL +0x8a531943 i2c_new_dummy vmlinux EXPORT_SYMBOL_GPL +0x899289d4 blackhole_netdev vmlinux EXPORT_SYMBOL +0x1a6ae622 drm_atomic_normalize_zpos vmlinux EXPORT_SYMBOL +0xcc4fadf6 drm_fb_helper_sys_read vmlinux EXPORT_SYMBOL +0xf05406ce regulator_sync_voltage vmlinux EXPORT_SYMBOL_GPL +0x5359d43e csum_and_copy_from_iter vmlinux EXPORT_SYMBOL +0xfc148ba9 __fsnotify_inode_delete vmlinux EXPORT_SYMBOL_GPL +0x5ad0e769 failover_register net/core/failover EXPORT_SYMBOL_GPL +0xa0dedc3b deregister_mtd_parser drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xb19fda8d capi_cmd2str drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x6b74a2d5 dm_rh_mark_nosync drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xc9989482 nfs_remount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1f9bca4c device_node_to_regmap vmlinux EXPORT_SYMBOL_GPL +0x42319baa pci_disable_sriov vmlinux EXPORT_SYMBOL_GPL +0x7bed0278 bsg_scsi_register_queue vmlinux EXPORT_SYMBOL_GPL +0xe5f6997b blk_mq_free_tag_set vmlinux EXPORT_SYMBOL +0xa2c74331 list_lru_count_node vmlinux EXPORT_SYMBOL_GPL +0x46b16182 from_kprojid_munged vmlinux EXPORT_SYMBOL +0x57baf885 ceph_str_hash net/ceph/libceph EXPORT_SYMBOL +0xe30c68fd lowpan_register_netdevice net/6lowpan/6lowpan EXPORT_SYMBOL +0xdab1e90e rpc_find_or_alloc_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x19c0ab0e rpcauth_unwrap_resp_decode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4506478a register_ip_vs_app net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x36a34e58 dm_array_cursor_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa00f576e mlx5_modify_nic_vport_mac_list drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7b7fc86b flow_block_cb_setup_simple vmlinux EXPORT_SYMBOL +0xb3184084 flow_rule_match_enc_ip vmlinux EXPORT_SYMBOL +0x397f7d44 rtnl_af_register vmlinux EXPORT_SYMBOL_GPL +0xda78c05a mdio_device_reset vmlinux EXPORT_SYMBOL +0x33ef395a spi_statistics_add_transfer_stats vmlinux EXPORT_SYMBOL_GPL +0xedcf81ce drm_dp_channel_eq_ok vmlinux EXPORT_SYMBOL +0x84f69ca3 fb_sys_write vmlinux EXPORT_SYMBOL_GPL +0x63cf4bb7 pci_num_vf vmlinux EXPORT_SYMBOL_GPL +0x1a6546f9 fuse_dev_alloc_install vmlinux EXPORT_SYMBOL_GPL +0xd58628a4 param_ops_long vmlinux EXPORT_SYMBOL +0x01d5f261 rpc_add_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4dc78752 lapb_data_request net/lapb/lapb EXPORT_SYMBOL +0xba8d1763 ib_send_cm_apr drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xc2b0340b usb_composite_overwrite_options drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x70ad75fb radix_tree_lookup vmlinux EXPORT_SYMBOL +0x796ec9a8 inet_csk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0xf82be16e sata_scr_write vmlinux EXPORT_SYMBOL_GPL +0x06f81bad drm_format_info_block_height vmlinux EXPORT_SYMBOL +0x6f41a428 acpi_get_vendor_resource vmlinux EXPORT_SYMBOL +0xd5346bfc acpi_get_possible_resources vmlinux EXPORT_SYMBOL +0x3e7080cb mpi_read_from_buffer vmlinux EXPORT_SYMBOL_GPL +0x4aea463f crc32_le_shift vmlinux EXPORT_SYMBOL +0xacf4d843 match_strdup vmlinux EXPORT_SYMBOL +0xc3805cd1 fs_ftype_to_dtype vmlinux EXPORT_SYMBOL_GPL +0xb2ff3ad0 ring_buffer_free_read_page vmlinux EXPORT_SYMBOL_GPL +0x7dbff292 rt_mutex_unlock vmlinux EXPORT_SYMBOL_GPL +0x5fe93b77 nf_reject_ip6_tcphdr_put net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0xef6c17f7 neigh_seq_next vmlinux EXPORT_SYMBOL +0xc1d15a4c phylink_set_port_modes vmlinux EXPORT_SYMBOL_GPL +0xa74ccf24 ahci_pmp_retry_srst_ops vmlinux EXPORT_SYMBOL_GPL +0x1a411479 drm_syncobj_free vmlinux EXPORT_SYMBOL +0x8ed338e0 badblocks_init vmlinux EXPORT_SYMBOL_GPL +0x43566edb vfs_rename vmlinux EXPORT_SYMBOL +0x001b9d1d generic_ro_fops vmlinux EXPORT_SYMBOL +0x15af5338 nci_register_device net/nfc/nci/nci EXPORT_SYMBOL +0x0d4902d8 ceph_client_addr net/ceph/libceph EXPORT_SYMBOL +0xe7bc3072 usb_string_ids_n drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x8f682109 m_can_class_unregister drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0x740c5701 mlx5_fpga_sbu_conn_create drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb342869a mlx5_cmd_alloc_uar drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4e6036af nexthop_select_path vmlinux EXPORT_SYMBOL_GPL +0x6101a6e7 gnet_stats_copy_basic_hw vmlinux EXPORT_SYMBOL +0xbc830d3f phy_save_page vmlinux EXPORT_SYMBOL_GPL +0x30feaa24 iscsi_free_session vmlinux EXPORT_SYMBOL_GPL +0x4f7fd62b pm_runtime_force_resume vmlinux EXPORT_SYMBOL_GPL +0x5675af99 devm_pci_remap_cfgspace vmlinux EXPORT_SYMBOL +0x54aac16c iomap_page_mkwrite vmlinux EXPORT_SYMBOL_GPL +0x2b8ab2bf user_path_create vmlinux EXPORT_SYMBOL +0x50d3bf89 kthread_bind vmlinux EXPORT_SYMBOL +0x610ff434 vsock_remove_tap net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x52e27f09 xprt_disconnect_done net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2aa762a1 mlx5_query_nic_vport_promisc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf806fec5 nvme_enable_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x8aaf227f ipv6_setsockopt vmlinux EXPORT_SYMBOL +0x1ed8ddc2 dm_kcopyd_zero vmlinux EXPORT_SYMBOL +0x29df5a12 usb_serial_port_softint vmlinux EXPORT_SYMBOL_GPL +0x698409f2 phy_modify_mmd vmlinux EXPORT_SYMBOL_GPL +0xf1c560a3 drm_property_destroy vmlinux EXPORT_SYMBOL +0x84210ecb blk_queue_flag_test_and_set vmlinux EXPORT_SYMBOL_GPL +0x6b48b340 forget_all_cached_acls vmlinux EXPORT_SYMBOL +0x5ed06cf8 dccp_getsockopt net/dccp/dccp EXPORT_SYMBOL_GPL +0xe0fef28a mlx5_fc_create drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0160bfc4 mlx5_core_create_mkey_cb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xfd39bbde zpool_register_driver mm/zpool EXPORT_SYMBOL +0xdebd1c65 inet_frag_find vmlinux EXPORT_SYMBOL +0xec210055 inetdev_by_index vmlinux EXPORT_SYMBOL +0xb109b263 __sk_backlog_rcv vmlinux EXPORT_SYMBOL +0x842f046d usb_poison_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0x178b26ac drm_gem_vram_kunmap vmlinux EXPORT_SYMBOL +0x6173d64f con_is_visible vmlinux EXPORT_SYMBOL +0x0dba0868 devm_of_clk_add_hw_provider vmlinux EXPORT_SYMBOL_GPL +0x230c034c acpi_dev_resource_interrupt vmlinux EXPORT_SYMBOL_GPL +0xdf0f75c6 eventfd_signal vmlinux EXPORT_SYMBOL_GPL +0xbc1844fc clocksource_unregister vmlinux EXPORT_SYMBOL +0x92ee8b52 nfc_hci_send_event net/nfc/hci/hci EXPORT_SYMBOL +0xcc86cde2 rxrpc_kernel_get_reply_time net/rxrpc/rxrpc EXPORT_SYMBOL +0xf3f99595 fifo_set_limit vmlinux EXPORT_SYMBOL +0x58b4645c dev_close_many vmlinux EXPORT_SYMBOL +0x28b030d2 of_overlay_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0x301ce68b phy_drivers_unregister vmlinux EXPORT_SYMBOL +0x8d657e5e genphy_c45_check_and_restart_aneg vmlinux EXPORT_SYMBOL_GPL +0x295c827b con_debug_enter vmlinux EXPORT_SYMBOL_GPL +0xed55f929 acpi_os_unmap_generic_address vmlinux EXPORT_SYMBOL +0xff71a2e7 pci_write_config_dword vmlinux EXPORT_SYMBOL +0x599fb41c kvmalloc_node vmlinux EXPORT_SYMBOL +0x0b6d0d83 filemap_fdatawrite vmlinux EXPORT_SYMBOL +0x9cc4f70a register_pm_notifier vmlinux EXPORT_SYMBOL_GPL +0x8823e31e rpc_clone_client_set_auth net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9ae974b1 mptscsih_event_process drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xe9cc9672 iscsi_eh_session_reset drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x4c2054d7 ipmi_request_settime drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x807d2b2c xt_recseq vmlinux EXPORT_SYMBOL_GPL +0x61f3adf9 dev_fill_metadata_dst vmlinux EXPORT_SYMBOL_GPL +0x203ac72b dev_add_offload vmlinux EXPORT_SYMBOL +0xa5976e4f dev_base_lock vmlinux EXPORT_SYMBOL +0x50a3fc67 dm_hold vmlinux EXPORT_SYMBOL_GPL +0x2bc41466 usb_del_gadget_udc vmlinux EXPORT_SYMBOL_GPL +0x857f4ae5 mdiobus_is_registered_device vmlinux EXPORT_SYMBOL +0x089b8407 __devm_regmap_init_mmio_clk vmlinux EXPORT_SYMBOL_GPL +0x8e5ed512 drm_i2c_encoder_destroy vmlinux EXPORT_SYMBOL +0xd25d4f74 console_blank_hook vmlinux EXPORT_SYMBOL +0x7aa51698 devm_clk_register vmlinux EXPORT_SYMBOL_GPL +0xe85f2123 acpi_tb_unload_table vmlinux EXPORT_SYMBOL +0xfd0d3553 iov_iter_discard vmlinux EXPORT_SYMBOL +0x247ef831 kdb_unregister vmlinux EXPORT_SYMBOL_GPL +0x457a0f36 cgroup_get_from_fd vmlinux EXPORT_SYMBOL_GPL +0x9183b5dc nfc_hci_unregister_device net/nfc/hci/hci EXPORT_SYMBOL +0x47b21477 ceph_cls_lock net/ceph/libceph EXPORT_SYMBOL +0x56782944 rds_send_path_drop_acked net/rds/rds EXPORT_SYMBOL_GPL +0x9359c0ed mptscsih_resume drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x53279f0e net_failover_destroy drivers/net/net_failover EXPORT_SYMBOL_GPL +0x4eb2692c hdlcdrv_register drivers/net/hamradio/hdlcdrv EXPORT_SYMBOL +0x43f2aa63 xt_request_find_target vmlinux EXPORT_SYMBOL_GPL +0x5a8f09ab tcf_action_set_ctrlact vmlinux EXPORT_SYMBOL +0x33364f32 iommu_sva_get_pasid vmlinux EXPORT_SYMBOL_GPL +0x455438e9 sg_scsi_ioctl vmlinux EXPORT_SYMBOL_GPL +0x8b585c29 __alloc_disk_node vmlinux EXPORT_SYMBOL +0xc22a3091 vm_unmap_aliases vmlinux EXPORT_SYMBOL_GPL +0xbaafad00 sctp_get_sctp_info net/sctp/sctp EXPORT_SYMBOL_GPL +0x3dfc1499 __ib_create_cq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbb8e52f8 dm_rh_delay drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xff584cea md_write_inc drivers/md/md-mod EXPORT_SYMBOL +0x262f736a register_md_personality drivers/md/md-mod EXPORT_SYMBOL +0xc5356478 mddev_unlock drivers/md/md-mod EXPORT_SYMBOL_GPL +0x9ab67e85 mlx5_db_alloc_node drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xe4a2620b st21nfca_im_send_dep_req drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0xbace43af st21nfca_im_send_atr_req drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x00ab88d8 st21nfca_tm_send_dep_res drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x6e16b52a __tracepoint_pnfs_mds_fallback_read_pagelist fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xc379552a br_ip6_fragment vmlinux EXPORT_SYMBOL_GPL +0xe8f5fb24 xfrm_state_lookup_byspi vmlinux EXPORT_SYMBOL +0x3a298ee8 netlink_capable vmlinux EXPORT_SYMBOL +0x320ee11c gnet_stats_finish_copy vmlinux EXPORT_SYMBOL +0xfc9ce079 skb_checksum_trimmed vmlinux EXPORT_SYMBOL +0x6add5c9a dmi_find_device vmlinux EXPORT_SYMBOL +0x02fc7cea scsi_get_device_flags_keyed vmlinux EXPORT_SYMBOL +0x83c03d40 device_show_int vmlinux EXPORT_SYMBOL_GPL +0x179aa5d1 init_iova_domain vmlinux EXPORT_SYMBOL_GPL +0xf14c0977 uart_handle_cts_change vmlinux EXPORT_SYMBOL_GPL +0xd4736295 uart_handle_dcd_change vmlinux EXPORT_SYMBOL_GPL +0x1f4cd130 free_inode_nonrcu vmlinux EXPORT_SYMBOL +0xcb65ab5b dma_pool_create vmlinux EXPORT_SYMBOL +0xf0e9c79a bpf_map_inc_with_uref vmlinux EXPORT_SYMBOL_GPL +0x2ea52b08 ceph_osdc_start_request net/ceph/libceph EXPORT_SYMBOL +0x0b36102c dm_array_empty drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa64e9761 gether_disconnect drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x72eafa54 gether_set_gadget drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x16260659 __ip6_local_out vmlinux EXPORT_SYMBOL_GPL +0x4fa3e5a7 napi_complete_done vmlinux EXPORT_SYMBOL +0x41bb486d of_resolve_phandles vmlinux EXPORT_SYMBOL_GPL +0xde62e1cc usb_hcd_pci_probe vmlinux EXPORT_SYMBOL_GPL +0xddef37f8 drm_sched_entity_destroy vmlinux EXPORT_SYMBOL +0xbd19737c tty_name vmlinux EXPORT_SYMBOL +0x548ba223 get_disk_and_module vmlinux EXPORT_SYMBOL +0x99b1d8bc vfs_parse_fs_param vmlinux EXPORT_SYMBOL +0xb5aa7165 dma_pool_destroy vmlinux EXPORT_SYMBOL +0x45560b6b irq_create_strict_mappings vmlinux EXPORT_SYMBOL_GPL +0x3d1b9ceb vcpu_put vmlinux EXPORT_SYMBOL_GPL +0xff6104d0 snd_pcm_rate_bit_to_rate sound/core/snd-pcm EXPORT_SYMBOL +0x58356972 usb_assign_descriptors drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xed0acf20 mlx4_vf_set_enable_smi_admin drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x69fe1276 mlx4_vf_get_enable_smi_admin drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe8f6ca8e udp6_csum_init vmlinux EXPORT_SYMBOL +0x395c1cfe dst_cache_set_ip4 vmlinux EXPORT_SYMBOL_GPL +0x4ceaee92 of_dev_put vmlinux EXPORT_SYMBOL +0xfb29d29e of_dev_get vmlinux EXPORT_SYMBOL +0x2d912bca dmi_get_bios_year vmlinux EXPORT_SYMBOL +0x39752b10 power_supply_register vmlinux EXPORT_SYMBOL_GPL +0xaafd4256 serial8250_register_8250_port vmlinux EXPORT_SYMBOL +0x4cb75ac3 virtqueue_add_inbuf vmlinux EXPORT_SYMBOL_GPL +0xc13a7ba6 __tracepoint_kmalloc vmlinux EXPORT_SYMBOL +0xc0e2c5bb perf_event_release_kernel vmlinux EXPORT_SYMBOL_GPL +0x8c03d20c destroy_workqueue vmlinux EXPORT_SYMBOL_GPL +0x53ffd84a snd_ctl_get_preferred_subdevice sound/core/snd EXPORT_SYMBOL_GPL +0x936918b4 mptscsih_host_reset drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xb5a9c22c nfs_pgio_header_free fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9447988f inet_twsk_put vmlinux EXPORT_SYMBOL_GPL +0x32bd2c55 fib_default_rule_add vmlinux EXPORT_SYMBOL +0xe8d0a4f3 usb_serial_generic_write_start vmlinux EXPORT_SYMBOL_GPL +0xd129e788 ata_ehi_push_desc vmlinux EXPORT_SYMBOL_GPL +0xd25a6da1 regcache_drop_region vmlinux EXPORT_SYMBOL_GPL +0x377ff80c pm_generic_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0x05f34402 do_SAK vmlinux EXPORT_SYMBOL +0xb3f8dd58 clk_hw_register_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0x8eb16230 pci_load_and_free_saved_state vmlinux EXPORT_SYMBOL_GPL +0xdb4a4b11 pci_save_state vmlinux EXPORT_SYMBOL +0xa3c8855c fat_get_dotdot_entry vmlinux EXPORT_SYMBOL_GPL +0x27df8bcb check_disk_change vmlinux EXPORT_SYMBOL +0x73ee1083 writeback_inodes_sb vmlinux EXPORT_SYMBOL +0xa2e1b3ef trace_printk_init_buffers vmlinux EXPORT_SYMBOL_GPL +0x910a9d2a param_get_uint vmlinux EXPORT_SYMBOL +0x9bc6b539 ceph_find_or_create_string net/ceph/libceph EXPORT_SYMBOL +0xbeb335a1 wimax_msg_data vmlinux EXPORT_SYMBOL_GPL +0xeca080c4 sas_rphy_add vmlinux EXPORT_SYMBOL +0x8879fc76 ttm_bo_del_sub_from_lru vmlinux EXPORT_SYMBOL +0x4edfe9b9 drm_mm_scan_color_evict vmlinux EXPORT_SYMBOL +0x2ab7b21d elv_register vmlinux EXPORT_SYMBOL_GPL +0xc599a772 security_xfrm_state_delete vmlinux EXPORT_SYMBOL +0xd1893b92 compat_ptr_ioctl vmlinux EXPORT_SYMBOL +0x3c185c61 page_put_link vmlinux EXPORT_SYMBOL +0x593783eb release_pages vmlinux EXPORT_SYMBOL +0x2663cb64 vsock_addr_equals_addr net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x1e16b33b p9_client_mkdir_dotl net/9p/9pnet EXPORT_SYMBOL +0x16ccbb9a p9_client_mknod_dotl net/9p/9pnet EXPORT_SYMBOL +0x337f2432 l2tp_nl_unregister_ops net/l2tp/l2tp_netlink EXPORT_SYMBOL_GPL +0x5547011e rpc_sleep_on net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe295cee8 rdma_rw_ctx_destroy_signature drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x267a1996 mlx5_fpga_mem_write drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd24c8576 pnfs_generic_commit_pagelist fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x25fdbe8c fixed_phy_register vmlinux EXPORT_SYMBOL_GPL +0xafddd545 ata_id_c_string vmlinux EXPORT_SYMBOL_GPL +0xb1bed25d dpm_resume_start vmlinux EXPORT_SYMBOL_GPL +0xc8b58472 drm_syncobj_replace_fence vmlinux EXPORT_SYMBOL +0x5b326511 devm_regulator_put vmlinux EXPORT_SYMBOL_GPL +0x1327fa41 pci_enable_msi vmlinux EXPORT_SYMBOL +0xd75b20aa rsa_parse_priv_key vmlinux EXPORT_SYMBOL_GPL +0xc65304b3 security_xfrm_policy_alloc vmlinux EXPORT_SYMBOL +0xde6abac4 sctp_for_each_endpoint net/sctp/sctp EXPORT_SYMBOL_GPL +0x5c9f3353 ip_tunnel_get_iflink net/ipv4/ip_tunnel EXPORT_SYMBOL +0xf5ee9ba8 ubi_register_volume_notifier drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x01f778c2 nfs4_find_or_create_ds_client fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb0e10781 get_option vmlinux EXPORT_SYMBOL +0xde941f72 km_state_expired vmlinux EXPORT_SYMBOL +0x6911a3d2 of_phandle_iterator_next vmlinux EXPORT_SYMBOL_GPL +0x96cfa39a platform_msi_domain_alloc_irqs vmlinux EXPORT_SYMBOL_GPL +0x091453b5 component_bind_all vmlinux EXPORT_SYMBOL_GPL +0xbeaf75ca drm_property_create vmlinux EXPORT_SYMBOL +0xff05fa13 vring_interrupt vmlinux EXPORT_SYMBOL_GPL +0x3d80975e clk_hw_rate_is_protected vmlinux EXPORT_SYMBOL_GPL +0x9722304a blk_mq_freeze_queue vmlinux EXPORT_SYMBOL_GPL +0x14a98a21 cpu_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xca189d7a kvm_write_guest_cached vmlinux EXPORT_SYMBOL_GPL +0xee120c03 ceph_release_string net/ceph/libceph EXPORT_SYMBOL +0x2954f8fe ceph_con_send net/ceph/libceph EXPORT_SYMBOL +0x2d0d0621 nf_ct_gre_keymap_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x8233d382 hinic_api_csr_rd32 drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x2bfbab10 __memmove vmlinux EXPORT_SYMBOL +0x60352082 register_inet6addr_notifier vmlinux EXPORT_SYMBOL +0xb4d65d41 inet_accept vmlinux EXPORT_SYMBOL +0x5a0b3fac tcp_select_initial_window vmlinux EXPORT_SYMBOL +0xda02a7ae register_pernet_device vmlinux EXPORT_SYMBOL_GPL +0xb6e5cb26 hidraw_connect vmlinux EXPORT_SYMBOL_GPL +0x7b5e1d6c class_for_each_device vmlinux EXPORT_SYMBOL_GPL +0xcd8ce890 acpi_format_exception vmlinux EXPORT_SYMBOL +0xb347dd97 pci_free_irq_vectors vmlinux EXPORT_SYMBOL +0x558b281d aes_expandkey vmlinux EXPORT_SYMBOL +0x2d30596c from_kqid_munged vmlinux EXPORT_SYMBOL +0x44c088e0 padata_do_parallel vmlinux EXPORT_SYMBOL +0x700e0546 bpf_offload_dev_create vmlinux EXPORT_SYMBOL_GPL +0x53b954a2 up_read vmlinux EXPORT_SYMBOL +0xa1c76e0a _cond_resched vmlinux EXPORT_SYMBOL +0x5a956b5b empty_zero_page vmlinux EXPORT_SYMBOL +0x63758856 ceph_str_hash_name net/ceph/libceph EXPORT_SYMBOL +0xc2c49363 esp6_output_head net/ipv6/esp6 EXPORT_SYMBOL_GPL +0xd097de31 nfnetlink_subsys_register net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x493aaef7 snd_pcm_lib_free_vmalloc_buffer sound/core/snd-pcm EXPORT_SYMBOL +0xdca43564 ib_create_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2c1c1129 mtd_is_partition drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x34f9621d __closure_sync drivers/md/bcache/bcache EXPORT_SYMBOL +0x0bfc1956 mlxsw_core_res_valid drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb67a7bfe devm_hwrng_unregister drivers/char/hw_random/rng-core EXPORT_SYMBOL_GPL +0x87116df5 get_nfs_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x636fb251 fscache_object_init fs/fscache/fscache EXPORT_SYMBOL +0xb7a30f86 tso_count_descs vmlinux EXPORT_SYMBOL +0x38372172 ndo_dflt_bridge_getlink vmlinux EXPORT_SYMBOL_GPL +0xb87357b7 __pskb_pull_tail vmlinux EXPORT_SYMBOL +0xec774acb cpufreq_frequency_table_verify vmlinux EXPORT_SYMBOL_GPL +0xbf2ac035 i2c_match_id vmlinux EXPORT_SYMBOL_GPL +0x1e8609c6 usb_gadget_ep_match_desc vmlinux EXPORT_SYMBOL_GPL +0xdaf2ee1f drm_dp_mst_topology_mgr_destroy vmlinux EXPORT_SYMBOL +0x82428593 tty_wait_until_sent vmlinux EXPORT_SYMBOL +0xab9aebdb bio_disassociate_blkg vmlinux EXPORT_SYMBOL_GPL +0x56256e8a orderly_poweroff vmlinux EXPORT_SYMBOL_GPL +0x8a8f2e48 br_fdb_clear_offload net/bridge/bridge EXPORT_SYMBOL_GPL +0x33da4ca0 nf_conncount_destroy net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x688d422d dm_bm_block_size drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x0ad0dc4f dm_bufio_mark_buffer_dirty drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x813598e6 gether_setup_name drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x5c3100bb mlx5_fc_destroy drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0a726931 ocfs2_cluster_this_node fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x462ff591 skb_consume_udp vmlinux EXPORT_SYMBOL_GPL +0x0ec84adc tcp_leave_memory_pressure vmlinux EXPORT_SYMBOL_GPL +0xe1bde572 device_remove_groups vmlinux EXPORT_SYMBOL_GPL +0xdffc80fc vesa_modes vmlinux EXPORT_SYMBOL +0x9147d670 gpiod_set_raw_array_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0x01790e94 csum_partial_copy vmlinux EXPORT_SYMBOL +0xad06596d remap_vmalloc_range_partial vmlinux EXPORT_SYMBOL +0xda15a15d alarm_forward vmlinux EXPORT_SYMBOL_GPL +0x0869e83f p9_client_destroy net/9p/9pnet EXPORT_SYMBOL +0x66718b15 dccp_shutdown net/dccp/dccp EXPORT_SYMBOL_GPL +0x52e86624 pn_skb_send net/phonet/phonet EXPORT_SYMBOL +0x051177a4 nf_log_dump_packet_common net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0xa63c83e7 net_failover_create drivers/net/net_failover EXPORT_SYMBOL_GPL +0xf5192388 iscsi_target_alloc drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x868a7af1 __netif_schedule vmlinux EXPORT_SYMBOL +0xbe96dfd8 of_reconfig_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0xa5997b62 cpuidle_get_driver vmlinux EXPORT_SYMBOL_GPL +0xdcafd388 clk_fractional_divider_ops vmlinux EXPORT_SYMBOL_GPL +0x90608e58 pci_pr3_present vmlinux EXPORT_SYMBOL_GPL +0xf203b046 generic_fh_to_parent vmlinux EXPORT_SYMBOL_GPL +0x871a597a generic_fillattr vmlinux EXPORT_SYMBOL +0xc972449f mempool_alloc_slab vmlinux EXPORT_SYMBOL +0x9bb35fed param_set_invbool vmlinux EXPORT_SYMBOL +0x0c0f79af ZSTD_getDictID_fromFrame lib/zstd/zstd_decompress EXPORT_SYMBOL +0x31a8b1cf rdma_bind_addr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x693faba0 __inet_hash vmlinux EXPORT_SYMBOL +0x0611117e cpuidle_unregister_driver vmlinux EXPORT_SYMBOL_GPL +0xd9bf779f i2c_register_driver vmlinux EXPORT_SYMBOL +0x41dcc6ba fixed_phy_unregister vmlinux EXPORT_SYMBOL_GPL +0x4609d778 pci_bus_alloc_resource vmlinux EXPORT_SYMBOL +0x3887c969 gpiod_set_array_value vmlinux EXPORT_SYMBOL_GPL +0x2072ee9b request_threaded_irq vmlinux EXPORT_SYMBOL +0x56104c3a has_capability vmlinux EXPORT_SYMBOL +0xb4f13d2a abort vmlinux EXPORT_SYMBOL +0x5599a7ec svc_drop net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd3f0fced ib_free_recv_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4ec7ca75 rdma_copy_src_l2_addr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5201f700 tcp_sendpage_locked vmlinux EXPORT_SYMBOL_GPL +0x8e1ea7f8 devlink_sb_unregister vmlinux EXPORT_SYMBOL_GPL +0x93edef07 devlink_health_report vmlinux EXPORT_SYMBOL_GPL +0x16f13765 mbox_flush vmlinux EXPORT_SYMBOL_GPL +0xcfa97da6 ata_bmdma32_port_ops vmlinux EXPORT_SYMBOL_GPL +0xd170d71c sata_scr_valid vmlinux EXPORT_SYMBOL_GPL +0x250ecc36 pm_clk_add_notifier vmlinux EXPORT_SYMBOL_GPL +0xb0924319 pci_find_pcie_root_port vmlinux EXPORT_SYMBOL +0xc575c737 debug_locks_off vmlinux EXPORT_SYMBOL_GPL +0x13f8dc02 jbd2_journal_forget vmlinux EXPORT_SYMBOL +0x2db3d320 mutex_lock_interruptible vmlinux EXPORT_SYMBOL +0xe8c743d4 md_bitmap_cond_end_sync drivers/md/md-mod EXPORT_SYMBOL +0x0745a981 xa_erase vmlinux EXPORT_SYMBOL +0x86585a33 devlink_fmsg_obj_nest_start vmlinux EXPORT_SYMBOL_GPL +0xbc781e3a skb_clone vmlinux EXPORT_SYMBOL +0xde349107 devm_mbox_controller_unregister vmlinux EXPORT_SYMBOL_GPL +0x3395257c input_get_keycode vmlinux EXPORT_SYMBOL +0x299e161a regmap_can_raw_write vmlinux EXPORT_SYMBOL_GPL +0xcd59cd6f wakeup_source_destroy vmlinux EXPORT_SYMBOL_GPL +0x95ead333 virtqueue_enable_cb_prepare vmlinux EXPORT_SYMBOL_GPL +0xfdcc8a0e fb_find_best_display vmlinux EXPORT_SYMBOL +0xf861c41b __pci_hp_initialize vmlinux EXPORT_SYMBOL_GPL +0x05a63d93 set_task_ioprio vmlinux EXPORT_SYMBOL_GPL +0x420849af crypto_find_alg vmlinux EXPORT_SYMBOL_GPL +0xeb45b0cd setup_arg_pages vmlinux EXPORT_SYMBOL +0x31203b6c pagevec_lookup_range vmlinux EXPORT_SYMBOL +0xa7e38f12 flow_keys_basic_dissector vmlinux EXPORT_SYMBOL +0x8deb82cd usb_serial_generic_process_read_urb vmlinux EXPORT_SYMBOL_GPL +0x648d953b drm_dsc_dp_pps_header_init vmlinux EXPORT_SYMBOL +0xee4f976f of_get_regulator_init_data vmlinux EXPORT_SYMBOL_GPL +0xef29fcdd clk_bulk_put vmlinux EXPORT_SYMBOL_GPL +0x2310b1ff amba_request_regions vmlinux EXPORT_SYMBOL +0x63ae5c2f inode_dio_wait vmlinux EXPORT_SYMBOL +0x87d2a943 irq_chip_request_resources_parent vmlinux EXPORT_SYMBOL_GPL +0x9e472f5f snmp_fold_field vmlinux EXPORT_SYMBOL_GPL +0x7fe2ecb3 phy_ethtool_sset vmlinux EXPORT_SYMBOL +0x5ff23a51 iscsi_session_event vmlinux EXPORT_SYMBOL_GPL +0x56270a6f drm_encoder_init vmlinux EXPORT_SYMBOL +0x7ca8674a pci_disable_link_state vmlinux EXPORT_SYMBOL +0xd11fcff0 crypto_unregister_akcipher vmlinux EXPORT_SYMBOL_GPL +0xb6f52baa crypto_unregister_skcipher vmlinux EXPORT_SYMBOL_GPL +0x3a42acea jbd2_journal_start vmlinux EXPORT_SYMBOL +0x2344ea2c dquot_quota_on vmlinux EXPORT_SYMBOL +0xd67364f7 eventfd_ctx_fdget vmlinux EXPORT_SYMBOL_GPL +0xbffde8ec compat_alloc_user_space vmlinux EXPORT_SYMBOL_GPL +0x0ffcd64c vsock_stream_has_data net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x2750c622 l2tp_udp_encap_recv net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x4f6491ef garp_unregister_application net/802/garp EXPORT_SYMBOL_GPL +0xfa021e3c cpufreq_policy_transition_delay_us vmlinux EXPORT_SYMBOL_GPL +0x6d949faf thermal_zone_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x6b675214 __i2c_transfer vmlinux EXPORT_SYMBOL +0x37cc6804 ata_host_alloc_pinfo vmlinux EXPORT_SYMBOL_GPL +0x3b7e833a devm_ioremap_wc vmlinux EXPORT_SYMBOL +0x75ecee86 devm_ioremap_uc vmlinux EXPORT_SYMBOL_GPL +0x25c0b5d3 vfs_lock_file vmlinux EXPORT_SYMBOL_GPL +0xad83149c make_bad_inode vmlinux EXPORT_SYMBOL +0x4e53e9eb __d_drop vmlinux EXPORT_SYMBOL +0x819d5980 __mmdrop vmlinux EXPORT_SYMBOL_GPL +0x45fd8f03 gss_mech_unregister net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0xdc194996 unix_detach_fds vmlinux EXPORT_SYMBOL +0x59b4ac3e tcp_memory_allocated vmlinux EXPORT_SYMBOL +0xdcbea7ed hid_check_keys_pressed vmlinux EXPORT_SYMBOL_GPL +0x47aad3b9 have_governor_per_policy vmlinux EXPORT_SYMBOL_GPL +0xa6c7a364 scsi_bus_type vmlinux EXPORT_SYMBOL_GPL +0x90237a25 ata_sff_qc_fill_rtf vmlinux EXPORT_SYMBOL_GPL +0xcdf77557 fwnode_graph_parse_endpoint vmlinux EXPORT_SYMBOL +0xe1fe151c regulator_map_voltage_pickable_linear_range vmlinux EXPORT_SYMBOL_GPL +0x10a1762a jbd2_journal_force_commit_nested vmlinux EXPORT_SYMBOL +0x94b1325f seq_pad vmlinux EXPORT_SYMBOL +0x0424c6b5 seq_release_private vmlinux EXPORT_SYMBOL +0x5b75f568 ww_mutex_lock vmlinux EXPORT_SYMBOL +0xd7433e81 snd_ctl_remove sound/core/snd EXPORT_SYMBOL +0xc3ddb37c target_sess_cmd_list_set_waiting drivers/target/target_core_mod EXPORT_SYMBOL +0x46f42be0 devlink_fmsg_u8_put vmlinux EXPORT_SYMBOL_GPL +0x10c8d25f ata_pci_remove_one vmlinux EXPORT_SYMBOL_GPL +0x9213f5c3 drm_edid_to_speaker_allocation vmlinux EXPORT_SYMBOL +0x75cbfb09 add_interrupt_randomness vmlinux EXPORT_SYMBOL_GPL +0x1f34b8cf uart_console_write vmlinux EXPORT_SYMBOL_GPL +0x377d8004 acpi_error vmlinux EXPORT_SYMBOL +0xe713c38f gpiod_get_array_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0x1bee4974 sg_alloc_table_chained vmlinux EXPORT_SYMBOL_GPL +0x526683fd writeback_inodes_sb_nr vmlinux EXPORT_SYMBOL +0xa204933c vfs_removexattr vmlinux EXPORT_SYMBOL_GPL +0x80d7dacb lock_page_memcg vmlinux EXPORT_SYMBOL +0x8a53475d find_symbol vmlinux EXPORT_SYMBOL_GPL +0x977f511b __mutex_init vmlinux EXPORT_SYMBOL +0xd9e6dd63 nci_core_conn_create net/nfc/nci/nci EXPORT_SYMBOL +0xfa0b8b91 mlx5_core_create_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x14062bab mlx5_core_create_qp drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x6728b839 mlx5_core_create_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x1bae35ab mlx5_debugfs_root drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xf7801360 nfs41_maxgetdevinfo_overhead fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xefc32a9b neon_aes_xts_decrypt arch/arm64/crypto/aes-neon-blk EXPORT_SYMBOL +0xea11590c neon_aes_xts_encrypt arch/arm64/crypto/aes-neon-blk EXPORT_SYMBOL +0xed6ffe6e rtc_set_time vmlinux EXPORT_SYMBOL_GPL +0xb4f8fe45 usb_stor_host_template_init vmlinux EXPORT_SYMBOL_GPL +0x9260cfbc usb_phy_roothub_alloc vmlinux EXPORT_SYMBOL_GPL +0x2d26e667 mipi_dsi_dcs_set_display_off vmlinux EXPORT_SYMBOL +0xb69e8eb4 drm_dev_printk vmlinux EXPORT_SYMBOL +0x0e3de4f7 drm_connector_attach_content_type_property vmlinux EXPORT_SYMBOL +0xed546afd drm_i2c_encoder_mode_set vmlinux EXPORT_SYMBOL +0x8edd566c pcix_get_mmrbc vmlinux EXPORT_SYMBOL +0x5a680b22 jbd2_journal_check_used_features vmlinux EXPORT_SYMBOL +0xcd91b127 system_highpri_wq vmlinux EXPORT_SYMBOL_GPL +0x7e431c15 cordic_calc_iq lib/math/cordic EXPORT_SYMBOL +0xc9144ff8 auth_domain_lookup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3008c8a8 mrp_request_join net/802/mrp EXPORT_SYMBOL_GPL +0x68e9767d nfs4_schedule_session_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xae316c11 icmpv6_err_convert vmlinux EXPORT_SYMBOL +0x2f10213e xfrm_audit_state_delete vmlinux EXPORT_SYMBOL_GPL +0x3fa0e928 phylink_helper_basex_speed vmlinux EXPORT_SYMBOL_GPL +0x5dbdb6b9 iommu_fwspec_add_ids vmlinux EXPORT_SYMBOL_GPL +0xf4916ced dw8250_setup_port vmlinux EXPORT_SYMBOL_GPL +0xb2e618e3 divider_recalc_rate vmlinux EXPORT_SYMBOL_GPL +0xdba61271 fb_get_mode vmlinux EXPORT_SYMBOL +0xdf6ef4a1 ceph_oid_printf net/ceph/libceph EXPORT_SYMBOL +0xa0edae80 lowpan_header_compress net/6lowpan/6lowpan EXPORT_SYMBOL_GPL +0xa9200e5e inet_diag_find_one_icsk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0xd7eba8d5 fib6_get_table vmlinux EXPORT_SYMBOL_GPL +0xd11fc0aa gov_attr_set_init vmlinux EXPORT_SYMBOL_GPL +0x59751e20 drm_debugfs_remove_files vmlinux EXPORT_SYMBOL +0x25b8600a crypto_shash_setkey vmlinux EXPORT_SYMBOL_GPL +0x63ff708e crypto_ahash_setkey vmlinux EXPORT_SYMBOL_GPL +0x09a0109e search_binary_handler vmlinux EXPORT_SYMBOL +0x6cfcbd06 balloon_page_alloc vmlinux EXPORT_SYMBOL_GPL +0x25f545ca tracing_cond_snapshot_data vmlinux EXPORT_SYMBOL_GPL +0x3b72bbf7 rt_mutex_trylock vmlinux EXPORT_SYMBOL_GPL +0x1f0cb5bf pm_power_off_prepare vmlinux EXPORT_SYMBOL_GPL +0x1eff927c osd_req_op_extent_update net/ceph/libceph EXPORT_SYMBOL +0x6d4e9860 nfs_fs_type fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd5bd372b nfs_server_insert_lists fs/nfs/nfs EXPORT_SYMBOL_GPL +0x582b6275 xfrm_if_unregister_cb vmlinux EXPORT_SYMBOL +0xaabf7332 rtnl_link_get_net vmlinux EXPORT_SYMBOL +0x238b5877 cpufreq_freq_attr_scaling_available_freqs vmlinux EXPORT_SYMBOL_GPL +0x1078b83f usb_remove_hcd vmlinux EXPORT_SYMBOL_GPL +0xa417f261 device_store_bool vmlinux EXPORT_SYMBOL_GPL +0x1d1abdf0 acpi_get_physical_device_location vmlinux EXPORT_SYMBOL +0xfa1e8ea5 ahash_attr_alg vmlinux EXPORT_SYMBOL_GPL +0xcfb5871c irq_work_queue vmlinux EXPORT_SYMBOL_GPL +0xc49c815c snd_card_add_dev_attr sound/core/snd EXPORT_SYMBOL_GPL +0x0cd73d72 ib_destroy_srq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7fca83ba __bch_bset_search drivers/md/bcache/bcache EXPORT_SYMBOL +0xfe9512e8 mlx4_CLOSE_PORT drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7002d3bc tcp_disconnect vmlinux EXPORT_SYMBOL +0x837ee68d __skb_ext_del vmlinux EXPORT_SYMBOL +0xfdab7565 power_supply_put_battery_info vmlinux EXPORT_SYMBOL_GPL +0x6469fe4f power_supply_get_battery_info vmlinux EXPORT_SYMBOL_GPL +0xb387ce5c put_device vmlinux EXPORT_SYMBOL_GPL +0x81992424 drm_edid_header_is_valid vmlinux EXPORT_SYMBOL +0xec68b308 drm_gtf_mode_complex vmlinux EXPORT_SYMBOL +0x79868e31 mark_info_dirty vmlinux EXPORT_SYMBOL +0x0748d3ae nft_meta_set_init net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xbb030f42 nft_meta_get_init net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xa7eadcb5 btracker_complete drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xb4111f2e ppp_input drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x648eb59d gc_inflight_list vmlinux EXPORT_SYMBOL +0xfd3b3538 nf_unregister_net_hooks vmlinux EXPORT_SYMBOL +0x872ba171 hid_hw_open vmlinux EXPORT_SYMBOL_GPL +0x63f7cbba devm_power_supply_register_no_ws vmlinux EXPORT_SYMBOL_GPL +0xdc78e741 devm_input_allocate_device vmlinux EXPORT_SYMBOL +0xcf6a3d59 usb_phy_roothub_resume vmlinux EXPORT_SYMBOL_GPL +0xdc35ffb0 spi_alloc_device vmlinux EXPORT_SYMBOL_GPL +0x02238547 hisi_sas_rst_work_handler vmlinux EXPORT_SYMBOL_GPL +0x4003bae2 ata_port_wait_eh vmlinux EXPORT_SYMBOL_GPL +0x3891ffc8 ecryptfs_fill_auth_tok vmlinux EXPORT_SYMBOL +0xc367d3f6 register_filesystem vmlinux EXPORT_SYMBOL +0x93170790 blk_fill_rwbs vmlinux EXPORT_SYMBOL_GPL +0xba497f13 loops_per_jiffy vmlinux EXPORT_SYMBOL +0x794e21e7 rds_for_each_conn_info net/rds/rds EXPORT_SYMBOL_GPL +0xe17edbfd fc_disc_config drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xfb4e10a9 inet_confirm_addr vmlinux EXPORT_SYMBOL +0x01b77195 eth_validate_addr vmlinux EXPORT_SYMBOL +0x1f9c607b nvmem_dev_name vmlinux EXPORT_SYMBOL_GPL +0xb179ece3 hid_hw_close vmlinux EXPORT_SYMBOL_GPL +0x2c208607 power_supply_is_system_supplied vmlinux EXPORT_SYMBOL_GPL +0xe628bb9f phy_fibre_port_array vmlinux EXPORT_SYMBOL_GPL +0xe2834f82 sas_remove_children vmlinux EXPORT_SYMBOL +0x26a8397a drm_gem_object_put_unlocked vmlinux EXPORT_SYMBOL +0xb69c3d88 clk_hw_unregister_divider vmlinux EXPORT_SYMBOL_GPL +0xaebd12f0 acpi_get_name vmlinux EXPORT_SYMBOL +0xe2582a12 btree_init vmlinux EXPORT_SYMBOL_GPL +0x02b8ab42 sg_copy_to_buffer vmlinux EXPORT_SYMBOL +0x7d0db45c jiffies_to_clock_t vmlinux EXPORT_SYMBOL +0x7ab88a45 system_freezing_cnt vmlinux EXPORT_SYMBOL +0x1d0dd48f sched_setscheduler_nocheck vmlinux EXPORT_SYMBOL_GPL +0x853138c3 nfc_register_device net/nfc/nfc EXPORT_SYMBOL +0x32359e69 l2tp_tunnel_get_nth net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x55ce384e mpls_dev_mtu net/mpls/mpls_router EXPORT_SYMBOL_GPL +0x5d0bb8e2 target_put_nacl drivers/target/target_core_mod EXPORT_SYMBOL +0xcbe51a34 core_tmr_alloc_req drivers/target/target_core_mod EXPORT_SYMBOL +0x980e6680 mlx5_rl_add_rate drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x844221a3 xt_hook_ops_alloc vmlinux EXPORT_SYMBOL_GPL +0xa74c9877 refcount_dec_and_rtnl_lock vmlinux EXPORT_SYMBOL +0x33d8d694 sock_no_sendmsg_locked vmlinux EXPORT_SYMBOL +0xcfe25993 hidraw_disconnect vmlinux EXPORT_SYMBOL_GPL +0xc9099da1 usb_debug_root vmlinux EXPORT_SYMBOL_GPL +0x2edcf426 devm_mdiobus_free vmlinux EXPORT_SYMBOL_GPL +0xff7a0fdf eeprom_93cx6_multiread vmlinux EXPORT_SYMBOL_GPL +0x1c63535e drm_mode_crtc_set_gamma_size vmlinux EXPORT_SYMBOL +0xfa5e497c tty_kopen vmlinux EXPORT_SYMBOL_GPL +0x6277dfff regulator_get_exclusive vmlinux EXPORT_SYMBOL_GPL +0xb520eb79 btree_merge vmlinux EXPORT_SYMBOL_GPL +0x3e41d362 generic_make_request vmlinux EXPORT_SYMBOL +0xacab29b7 seq_hlist_start_percpu vmlinux EXPORT_SYMBOL +0x234e0290 generic_file_llseek_size vmlinux EXPORT_SYMBOL +0x1c87a811 __round_jiffies_up vmlinux EXPORT_SYMBOL_GPL +0xb3cf2e28 garp_uninit_applicant net/802/garp EXPORT_SYMBOL_GPL +0x71010a2a iscsit_build_text_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x11f637ec usb_composite_setup_continue drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xf3c6ef2f rndis_borrow_net drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x5f770644 hinic_slq_init drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x594f3ef6 nexthop_for_each_fib6_nh vmlinux EXPORT_SYMBOL_GPL +0x02124474 ip_send_check vmlinux EXPORT_SYMBOL +0xcfc5108a devlink_fmsg_u8_pair_put vmlinux EXPORT_SYMBOL_GPL +0xdc7630c3 dev_attr_sw_activity vmlinux EXPORT_SYMBOL_GPL +0x7fc447ef __vfs_setxattr_locked vmlinux EXPORT_SYMBOL_GPL +0x23b9d6e2 mangle_path vmlinux EXPORT_SYMBOL +0xbd2f79ae ceph_oloc_copy net/ceph/libceph EXPORT_SYMBOL +0x87e2553b ieee802154_max_payload net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0xab6a24a5 br_port_flag_is_set net/bridge/bridge EXPORT_SYMBOL_GPL +0x4df1e1b5 nf_flow_dnat_port net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x13364258 nf_flow_snat_port net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x8e7528da __rdma_block_iter_next drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x23a5fa3f dm_block_manager_create drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x330fd4f8 mbox_controller_register vmlinux EXPORT_SYMBOL_GPL +0xc991a0bc __genphy_config_aneg vmlinux EXPORT_SYMBOL +0xc5604800 clk_set_rate_exclusive vmlinux EXPORT_SYMBOL_GPL +0x2906d51d pci_restore_pasid_state vmlinux EXPORT_SYMBOL_GPL +0xa129d53e pci_get_domain_bus_and_slot vmlinux EXPORT_SYMBOL +0xa74aa9f4 pinctrl_find_gpio_range_from_pin_nolock vmlinux EXPORT_SYMBOL_GPL +0x798fb434 __blkdev_reread_part vmlinux EXPORT_SYMBOL +0xc313873f anon_inode_getfd vmlinux EXPORT_SYMBOL_GPL +0x94727805 bd_abort_claiming vmlinux EXPORT_SYMBOL +0xf96371be filemap_flush vmlinux EXPORT_SYMBOL +0x8e9c135c ceph_cls_unlock net/ceph/libceph EXPORT_SYMBOL +0x5dca77ee mpt_alloc_fw_memory drivers/message/fusion/mptbase EXPORT_SYMBOL +0x6405dcd3 slhc_toss drivers/net/slip/slhc EXPORT_SYMBOL +0xe7858333 tcf_idr_create vmlinux EXPORT_SYMBOL +0xe9e1a8c0 mbox_controller_unregister vmlinux EXPORT_SYMBOL_GPL +0x9b682ea8 pm_generic_freeze_late vmlinux EXPORT_SYMBOL_GPL +0x48621538 drm_gem_vm_close vmlinux EXPORT_SYMBOL +0x573ef9b2 serial8250_rpm_put_tx vmlinux EXPORT_SYMBOL_GPL +0xa2326c49 acpi_remove_table_handler vmlinux EXPORT_SYMBOL +0x2c5a74d1 blk_stack_limits vmlinux EXPORT_SYMBOL +0x6925738e fuse_conn_init vmlinux EXPORT_SYMBOL_GPL +0x80ae0701 probe_user_write vmlinux EXPORT_SYMBOL_GPL +0x69b9598e rpc_mkpipe_data net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1f5ca824 unix_domain_find net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7a1211f8 dlm_setup_eviction_cb fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xd9d1af08 __fib_lookup vmlinux EXPORT_SYMBOL_GPL +0x77662917 tcp_set_state vmlinux EXPORT_SYMBOL_GPL +0x50326b98 i2c_smbus_read_i2c_block_data vmlinux EXPORT_SYMBOL +0x5be8eac0 drm_kms_helper_poll_disable vmlinux EXPORT_SYMBOL +0xbcc308bb strnlen_user vmlinux EXPORT_SYMBOL +0x49e3f804 __fscrypt_prepare_symlink vmlinux EXPORT_SYMBOL_GPL +0x997c4347 unmap_kernel_range vmlinux EXPORT_SYMBOL_GPL +0x0723a4be br_handle_frame_finish net/bridge/bridge EXPORT_SYMBOL_GPL +0x66335dd3 mpt_print_ioc_summary drivers/message/fusion/mptbase EXPORT_SYMBOL +0xac2c83a9 mlx5_db_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf5566ef0 mlx4_db_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x1f809534 async_raid6_datap_recov crypto/async_tx/async_raid6_recov EXPORT_SYMBOL_GPL +0x881df2ef xt_free_table_info vmlinux EXPORT_SYMBOL +0xaaa19e87 input_set_poll_interval vmlinux EXPORT_SYMBOL +0xea50dad3 ahci_ignore_sss vmlinux EXPORT_SYMBOL_GPL +0x136beacf ata_bmdma_setup vmlinux EXPORT_SYMBOL_GPL +0x0a6c4041 cn_netlink_send vmlinux EXPORT_SYMBOL_GPL +0x6a4e51b1 drm_property_create_blob vmlinux EXPORT_SYMBOL +0xf2329e89 drm_dp_downstream_max_clock vmlinux EXPORT_SYMBOL +0xfbb7f5bb serial8250_modem_status vmlinux EXPORT_SYMBOL_GPL +0xcf32c7bb set_create_files_as vmlinux EXPORT_SYMBOL +0x53f6b930 dccp_poll net/dccp/dccp EXPORT_SYMBOL_GPL +0xd43ecbf1 null_ax25_address net/ax25/ax25 EXPORT_SYMBOL +0x0245b9dc snd_dma_alloc_pages_fallback sound/core/snd-pcm EXPORT_SYMBOL +0x76a65e3b mlxsw_core_port_init drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xdc6aa6c8 fdp_nci_probe drivers/nfc/fdp/fdp EXPORT_SYMBOL +0x804f922a ipmi_addr_length drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xdf68850b kpatch_root_kobj kernel/tkernel/kpatch/kpatch EXPORT_SYMBOL_GPL +0x268c6833 dev_get_by_name vmlinux EXPORT_SYMBOL +0x89f07352 power_supply_set_battery_charged vmlinux EXPORT_SYMBOL_GPL +0x282fd598 ohci_hub_control vmlinux EXPORT_SYMBOL_GPL +0x9476a8da serial8250_init_port vmlinux EXPORT_SYMBOL_GPL +0x2b2b37b9 pci_enable_msix_range vmlinux EXPORT_SYMBOL +0x1a375d54 iov_iter_get_pages vmlinux EXPORT_SYMBOL +0x14997d31 __page_symlink vmlinux EXPORT_SYMBOL +0x4aeb6679 install_exec_creds vmlinux EXPORT_SYMBOL +0xa38a9f71 get_itimerspec64 vmlinux EXPORT_SYMBOL_GPL +0x996af138 sched_setscheduler vmlinux EXPORT_SYMBOL_GPL +0x4dd4999b rxrpc_get_null_key net/rxrpc/rxrpc EXPORT_SYMBOL +0x4c077b75 ib_unregister_event_handler drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfde5e105 iscsi_segment_seek_sg drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x3ba17eab drm_ht_insert_item vmlinux EXPORT_SYMBOL +0x30a80826 __kfifo_from_user vmlinux EXPORT_SYMBOL +0x67271c0e cdev_set_parent vmlinux EXPORT_SYMBOL +0x6923ce63 irq_work_sync vmlinux EXPORT_SYMBOL_GPL +0xc7e64691 unregister_kretprobes vmlinux EXPORT_SYMBOL_GPL +0xbba95463 ipcomp_input net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0x2a913de4 nft_reject_validate net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0xbd3050b4 nat_h245_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x73cc23d3 snd_ctl_register_ioctl sound/core/snd EXPORT_SYMBOL +0xa70b1d9a rndis_get_next_response drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x9b90d4d0 mlx5_core_reserved_gids_count drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb0f36a3f hinic_support_ovs drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x4f7687ce udp_prot vmlinux EXPORT_SYMBOL +0x1b92e41d inet_putpeer vmlinux EXPORT_SYMBOL_GPL +0x473e4b6c __hw_addr_ref_unsync_dev vmlinux EXPORT_SYMBOL +0x4e81b8e0 br_fdb_test_addr_hook vmlinux EXPORT_SYMBOL_GPL +0x47cadd96 netif_set_xps_queue vmlinux EXPORT_SYMBOL +0xe12beed9 i2c_adapter_type vmlinux EXPORT_SYMBOL_GPL +0xb78fbb0c sas_prep_resume_ha vmlinux EXPORT_SYMBOL +0xdd592615 drm_vma_offset_remove vmlinux EXPORT_SYMBOL +0x41da8e9d pci_msi_vec_count vmlinux EXPORT_SYMBOL +0xc6e40132 rds_conn_connect_if_down net/rds/rds EXPORT_SYMBOL_GPL +0x2bd6b47d hinic_free_irq drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x710ab631 hinic_free_qps drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x5243c8dc inet6_sk_rebuild_header vmlinux EXPORT_SYMBOL_GPL +0x591786b3 __dev_set_mtu vmlinux EXPORT_SYMBOL +0xf4748c56 rc_keydown_notimeout vmlinux EXPORT_SYMBOL_GPL +0x36ded1f5 device_init_wakeup vmlinux EXPORT_SYMBOL_GPL +0x263d9da8 clkdev_hw_alloc vmlinux EXPORT_SYMBOL +0x47d0eea2 acpi_lpat_temp_to_raw vmlinux EXPORT_SYMBOL_GPL +0x47f7d01c unregister_quota_format vmlinux EXPORT_SYMBOL +0xf1e98c74 avenrun vmlinux EXPORT_SYMBOL +0x2d3385d3 system_wq vmlinux EXPORT_SYMBOL +0x32d5dbb3 kvm_vcpu_yield_to vmlinux EXPORT_SYMBOL_GPL +0xf998de85 nf_ct_extend_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x2a4c1fdc drm_gem_dmabuf_mmap vmlinux EXPORT_SYMBOL +0x2dc56bfc mark_page_accessed vmlinux EXPORT_SYMBOL +0x1edb69d6 ktime_get_raw_ts64 vmlinux EXPORT_SYMBOL +0x23e83ff6 l2tp_session_get_nth net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xae71716c nf_nat_sip_hooks net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x5a490bc1 transport_kmap_data_sg drivers/target/target_core_mod EXPORT_SYMBOL +0xfd93482e dm_rh_recovery_in_flight drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x83075f46 mlx4_gen_port_state_change_eqe drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xbc981806 fc_vport_id_lookup drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xf5531bea ipmi_poll_interface drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x200b2041 in6addr_any vmlinux EXPORT_SYMBOL +0x9b968291 inet_protos vmlinux EXPORT_SYMBOL +0x02a18c74 nf_conntrack_destroy vmlinux EXPORT_SYMBOL +0xb8adfc50 scsi_report_bus_reset vmlinux EXPORT_SYMBOL +0x34bb7cad tty_port_install vmlinux EXPORT_SYMBOL_GPL +0xd643b316 ceph_compare_options net/ceph/libceph EXPORT_SYMBOL +0x2973e12c __snd_usbmidi_create sound/usb/snd-usbmidi-lib EXPORT_SYMBOL +0x5e5eb201 fc_fill_reply_hdr drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x399a8b53 ip6_sk_dst_lookup_flow vmlinux EXPORT_SYMBOL_GPL +0x686a722d of_get_next_available_child vmlinux EXPORT_SYMBOL +0x0cb56398 hid_output_report vmlinux EXPORT_SYMBOL_GPL +0x41465de7 cpuidle_get_cpu_driver vmlinux EXPORT_SYMBOL_GPL +0x508bc822 __usb_create_hcd vmlinux EXPORT_SYMBOL_GPL +0x9bece81b mpi_cmp_ui vmlinux EXPORT_SYMBOL_GPL +0x2ec6bba0 errseq_set vmlinux EXPORT_SYMBOL +0x261c0eb2 vfs_parse_fs_string vmlinux EXPORT_SYMBOL +0xcfa36e59 cdev_add vmlinux EXPORT_SYMBOL +0x2f66c85e ring_buffer_empty vmlinux EXPORT_SYMBOL_GPL +0x69447467 ring_buffer_write vmlinux EXPORT_SYMBOL_GPL +0x05bc332b kvm_vcpu_write_guest vmlinux EXPORT_SYMBOL_GPL +0xd4eb7735 ceph_decode_entity_addr net/ceph/libceph EXPORT_SYMBOL +0xeeacab69 rpc_update_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xed017ac2 snd_timer_new sound/core/snd-timer EXPORT_SYMBOL +0xf3496929 ib_modify_srq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3cd27197 mptscsih_change_queue_depth drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x94e6d381 xfrm_input vmlinux EXPORT_SYMBOL +0x11c1fd1b dev_mc_del vmlinux EXPORT_SYMBOL +0x89fba96c dev_mc_add vmlinux EXPORT_SYMBOL +0x17bde51b dev_uc_del vmlinux EXPORT_SYMBOL +0x8f87b16c dev_uc_add vmlinux EXPORT_SYMBOL +0xea401b3f register_pernet_subsys vmlinux EXPORT_SYMBOL_GPL +0x1999cf1b __skb_ext_put vmlinux EXPORT_SYMBOL +0xbdc93ee2 rtc_update_irq vmlinux EXPORT_SYMBOL_GPL +0x0cfa8bcd dev_attr_em_message_type vmlinux EXPORT_SYMBOL_GPL +0xcb811a46 __devm_create_dev_dax vmlinux EXPORT_SYMBOL_GPL +0x4e0280b6 drm_mode_hsync vmlinux EXPORT_SYMBOL +0x8d701329 drm_dp_clock_recovery_ok vmlinux EXPORT_SYMBOL +0x6be1c1f8 acpi_install_method vmlinux EXPORT_SYMBOL +0x3aefdd36 fb_blank vmlinux EXPORT_SYMBOL +0x14faa514 devm_gpiod_get vmlinux EXPORT_SYMBOL_GPL +0xeab761f3 blk_queue_update_dma_pad vmlinux EXPORT_SYMBOL +0xc5480317 blkdev_get_by_path vmlinux EXPORT_SYMBOL +0x638f923c open_exec vmlinux EXPORT_SYMBOL +0xa77b3b62 des3_ede_expand_key lib/crypto/libdes EXPORT_SYMBOL_GPL +0x66bdb822 inet_gro_receive vmlinux EXPORT_SYMBOL +0xd25f1d29 iscsi_remove_session vmlinux EXPORT_SYMBOL_GPL +0x4e8b0ba3 ahci_platform_disable_resources vmlinux EXPORT_SYMBOL_GPL +0xfe576131 cpu_subsys vmlinux EXPORT_SYMBOL_GPL +0xd8df08ac acpi_handle_printk vmlinux EXPORT_SYMBOL +0x9d61e994 ucs2_strncmp vmlinux EXPORT_SYMBOL +0xbd462b55 __kfifo_init vmlinux EXPORT_SYMBOL +0xb0e824a5 blkcg_activate_policy vmlinux EXPORT_SYMBOL_GPL +0x47a0cdcb mb_cache_entry_find_next vmlinux EXPORT_SYMBOL +0x4415dbe5 inode_init_owner vmlinux EXPORT_SYMBOL +0x305c2cbd file_ns_capable vmlinux EXPORT_SYMBOL +0x2b30f429 raid6_call lib/raid6/raid6_pq EXPORT_SYMBOL_GPL +0x296cb509 __xa_insert vmlinux EXPORT_SYMBOL +0xf7625473 xfrm_dev_state_add vmlinux EXPORT_SYMBOL_GPL +0x43f56e82 ata_xfer_mode2shift vmlinux EXPORT_SYMBOL_GPL +0x54e97a4f of_clk_set_defaults vmlinux EXPORT_SYMBOL_GPL +0xd02ae940 pnp_activate_dev vmlinux EXPORT_SYMBOL +0xc73136c6 pci_find_next_capability vmlinux EXPORT_SYMBOL_GPL +0x85e551ad fs_context_for_reconfigure vmlinux EXPORT_SYMBOL +0x6a72007c list_lru_count_one vmlinux EXPORT_SYMBOL_GPL +0x80b7609e rt_mutex_destroy vmlinux EXPORT_SYMBOL_GPL +0x7b6d70b2 queue_delayed_work_on vmlinux EXPORT_SYMBOL +0xd4034828 system_freezable_wq vmlinux EXPORT_SYMBOL_GPL +0xbc028f64 rdma_nl_put_driver_u32_hex drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4a5bd169 ib_unregister_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb7500cb5 __tracepoint_bcache_gc_end drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x31121a10 hinic_send_sq_wqe drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x06fcb1f7 qdisc_put_unlocked vmlinux EXPORT_SYMBOL +0x0b3a622c skb_gso_validate_mac_len vmlinux EXPORT_SYMBOL_GPL +0x4453b248 iscsi_destroy_endpoint vmlinux EXPORT_SYMBOL_GPL +0xc4027ae7 devres_open_group vmlinux EXPORT_SYMBOL_GPL +0xea7b61ed drm_atomic_helper_disable_planes_on_crtc vmlinux EXPORT_SYMBOL +0x35b43aa2 drm_dp_dpcd_read_link_status vmlinux EXPORT_SYMBOL +0x95bfb962 pci_read_config_dword vmlinux EXPORT_SYMBOL +0x126c9149 __set_page_dirty_nobuffers vmlinux EXPORT_SYMBOL +0x1c27e09f bpf_prog_inc_not_zero vmlinux EXPORT_SYMBOL_GPL +0x0e037a95 svc_xprt_copy_addrs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x330ad70d esp_output_tail net/ipv4/esp4 EXPORT_SYMBOL_GPL +0xe7691352 mtd_ooblayout_count_eccbytes drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x215a8ec8 slhc_init drivers/net/slip/slhc EXPORT_SYMBOL +0x01729617 af_alg_get_rsgl crypto/af_alg EXPORT_SYMBOL_GPL +0x649e6a07 drm_vram_mm_mmap vmlinux EXPORT_SYMBOL +0x3efc18d2 clk_hw_unregister vmlinux EXPORT_SYMBOL_GPL +0xc4072008 gpiochip_line_is_open_source vmlinux EXPORT_SYMBOL_GPL +0x98503a63 mpi_alloc vmlinux EXPORT_SYMBOL_GPL +0x9c122bcf mempool_create_node vmlinux EXPORT_SYMBOL +0x3b65ea37 find_get_pid vmlinux EXPORT_SYMBOL_GPL +0x0863160e nf_conntrack_helper_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xff007c25 mlxsw_core_cpu_port_fini drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x5d6936ff iscsi_verify_itt drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x58d19cb7 configfs_depend_item_unlocked fs/configfs/configfs EXPORT_SYMBOL +0x0df9890d dev_set_promiscuity vmlinux EXPORT_SYMBOL +0xe86b464e usb_stor_transparent_scsi_command vmlinux EXPORT_SYMBOL_GPL +0x65520488 scsi_bios_ptable vmlinux EXPORT_SYMBOL +0x9f486c07 pm_generic_resume vmlinux EXPORT_SYMBOL_GPL +0x23daa989 mipi_dsi_create_packet vmlinux EXPORT_SYMBOL +0x08a89d5d pci_write_vpd vmlinux EXPORT_SYMBOL +0x010e7b66 pci_generic_config_write32 vmlinux EXPORT_SYMBOL_GPL +0x3680eebe nf_send_unreach6 net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0x18c12104 esp_input_done2 net/ipv4/esp4 EXPORT_SYMBOL_GPL +0xe5a78c1e ib_sa_unpack_path drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1f8bbe0a nvme_reset_ctrl_sync drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xb9cba57f cast_s3 crypto/cast_common EXPORT_SYMBOL_GPL +0xb9ecef2c tcp_unregister_ulp vmlinux EXPORT_SYMBOL_GPL +0x394fbc80 neigh_connected_output vmlinux EXPORT_SYMBOL +0xf92c9fa1 drm_plane_init vmlinux EXPORT_SYMBOL +0xe9f7149c zlib_deflate_workspacesize vmlinux EXPORT_SYMBOL +0x33087cc8 posix_acl_create vmlinux EXPORT_SYMBOL_GPL +0x14bd7b31 irq_domain_translate_twocell vmlinux EXPORT_SYMBOL_GPL +0x004af87f irq_set_chained_handler_and_data vmlinux EXPORT_SYMBOL_GPL +0x511af7d1 __percpu_init_rwsem vmlinux EXPORT_SYMBOL_GPL +0xdfb14029 down_read_killable vmlinux EXPORT_SYMBOL +0x98bf71fb param_set_ushort vmlinux EXPORT_SYMBOL +0x6d8730d7 svc_recv net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4274d993 snd_timer_notify sound/core/snd-timer EXPORT_SYMBOL +0x9a098389 mdev_register_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x0af439e2 iscsit_queue_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x24952eb9 usb_add_function drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xd5fdbdc1 usb_ep_autoconfig_release drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xa8e1cd6d pnfs_generic_pg_writepages fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x4e68e9be rb_next_postorder vmlinux EXPORT_SYMBOL +0x8f1de180 udp_flush_pending_frames vmlinux EXPORT_SYMBOL +0x47260f01 xdp_rxq_info_is_reg vmlinux EXPORT_SYMBOL_GPL +0x40d4eead dev_get_stats vmlinux EXPORT_SYMBOL +0x456c3a37 skb_vlan_untag vmlinux EXPORT_SYMBOL +0xb0cdfe23 of_property_read_variable_u64_array vmlinux EXPORT_SYMBOL_GPL +0x67906e79 of_property_read_variable_u32_array vmlinux EXPORT_SYMBOL_GPL +0x9bddad0e of_property_read_variable_u16_array vmlinux EXPORT_SYMBOL_GPL +0x851e6003 usb_phy_roothub_calibrate vmlinux EXPORT_SYMBOL_GPL +0xb62d5171 genphy_c45_pma_setup_forced vmlinux EXPORT_SYMBOL_GPL +0xb78956f0 pm_generic_poweroff vmlinux EXPORT_SYMBOL_GPL +0x6052860c ttm_round_pot vmlinux EXPORT_SYMBOL +0x86fb9b05 bitmap_parse_user vmlinux EXPORT_SYMBOL +0x01000e51 schedule vmlinux EXPORT_SYMBOL +0x3fc2387c ib_ud_header_unpack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3f67f701 nlmsg_notify vmlinux EXPORT_SYMBOL +0xdd8166a1 dma_fence_free vmlinux EXPORT_SYMBOL +0x0e20e616 drm_property_create_enum vmlinux EXPORT_SYMBOL +0x18899e09 drm_atomic_bridge_enable vmlinux EXPORT_SYMBOL +0xb2b0a3b6 drm_i2c_encoder_save vmlinux EXPORT_SYMBOL +0x5a4734d1 drm_kms_helper_is_poll_worker vmlinux EXPORT_SYMBOL +0x7269dddd serial8250_do_set_divisor vmlinux EXPORT_SYMBOL_GPL +0xa64e7585 blk_mq_tag_to_rq vmlinux EXPORT_SYMBOL +0xb089700f proc_create_net_single_write vmlinux EXPORT_SYMBOL_GPL +0xed2afa11 ww_mutex_unlock vmlinux EXPORT_SYMBOL +0xf1c67340 mmput vmlinux EXPORT_SYMBOL_GPL +0xe9832cc1 init_task vmlinux EXPORT_SYMBOL +0x6bd1c2ae snd_rawmidi_transmit_peek sound/core/snd-rawmidi EXPORT_SYMBOL +0xb3549bff transport_generic_free_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xa201bd86 nvme_complete_async_event drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x1378280f __fscache_wait_on_page_write fs/fscache/fscache EXPORT_SYMBOL +0xd9f45c5f vlan_dev_vlan_proto vmlinux EXPORT_SYMBOL +0x167ac886 __hw_addr_sync_dev vmlinux EXPORT_SYMBOL +0x64f1d250 wakeup_source_create vmlinux EXPORT_SYMBOL_GPL +0x75837adb ttm_mem_io_free vmlinux EXPORT_SYMBOL +0xae59ea0b drm_probe_ddc vmlinux EXPORT_SYMBOL +0x193055d3 pcibios_resource_to_bus vmlinux EXPORT_SYMBOL +0xd9e8aee7 refcount_dec_and_lock vmlinux EXPORT_SYMBOL +0x11ab0477 csum_and_copy_from_iter_full vmlinux EXPORT_SYMBOL +0xe6f748d8 iomap_file_dirty vmlinux EXPORT_SYMBOL_GPL +0xf6c8dc62 cpu_hotplug_enable vmlinux EXPORT_SYMBOL_GPL +0x050f9329 rpc_sleep_on_priority_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x48ad92b7 target_wait_for_sess_cmds drivers/target/target_core_mod EXPORT_SYMBOL +0xc1c92f93 unregister_sja1000dev drivers/net/can/sja1000/sja1000 EXPORT_SYMBOL_GPL +0x53d68f2c mlx5_query_min_inline drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xa0fe86fc gnet_stats_copy_app vmlinux EXPORT_SYMBOL +0xd681af60 i2c_verify_client vmlinux EXPORT_SYMBOL +0x33817879 input_allocate_device vmlinux EXPORT_SYMBOL +0xd1efa98b pm_runtime_barrier vmlinux EXPORT_SYMBOL_GPL +0x6f944f8f dev_printk vmlinux EXPORT_SYMBOL +0x3a8063f3 drm_dp_dsc_sink_supported_input_bpcs vmlinux EXPORT_SYMBOL +0xbcb7b7aa pci_set_pcie_reset_state vmlinux EXPORT_SYMBOL_GPL +0x1b015d25 bitmap_parselist vmlinux EXPORT_SYMBOL +0xd7537fbe __insert_inode_hash vmlinux EXPORT_SYMBOL +0xf474fdcb kfree_const vmlinux EXPORT_SYMBOL +0xd63ce82a __tracepoint_cpu_frequency vmlinux EXPORT_SYMBOL_GPL +0x65f19c38 revert_creds vmlinux EXPORT_SYMBOL +0xdf929370 fs_overflowgid vmlinux EXPORT_SYMBOL +0x36087aa4 rds_stats net/rds/rds EXPORT_SYMBOL_GPL +0xe3528bb2 md_do_sync drivers/md/md-mod EXPORT_SYMBOL_GPL +0x58fa7d83 __tracepoint_mlx5_fs_add_fg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x39153c11 __tracepoint_mlx5_fs_add_ft drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc8a91f5b cpumask_local_spread vmlinux EXPORT_SYMBOL +0xa32727d9 reuseport_attach_prog vmlinux EXPORT_SYMBOL +0x2b523ff5 drm_fb_helper_sys_imageblit vmlinux EXPORT_SYMBOL +0xd3d03355 reset_controller_unregister vmlinux EXPORT_SYMBOL_GPL +0x5153b6d1 fb_ddc_read vmlinux EXPORT_SYMBOL_GPL +0x714c5157 cpu_rmap_add vmlinux EXPORT_SYMBOL +0xdecd0b29 __stack_chk_fail vmlinux EXPORT_SYMBOL +0x932a6ffc dm_tm_shadow_block drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x45797650 nfs_client_init_is_complete fs/nfs/nfs EXPORT_SYMBOL_GPL +0x92fc8e92 xfrm_policy_walk vmlinux EXPORT_SYMBOL +0x3e393970 nf_log_unset vmlinux EXPORT_SYMBOL +0x345d0d23 pcie_get_speed_cap vmlinux EXPORT_SYMBOL +0x97de2b83 debug_locks_silent vmlinux EXPORT_SYMBOL_GPL +0xa25cd051 bdi_alloc_node vmlinux EXPORT_SYMBOL +0x823eae06 blocking_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x4d7c0f58 line6_init_midi sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0xa51fbedc dm_bitset_new drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x5a116e33 nfs_init_commit fs/nfs/nfs EXPORT_SYMBOL_GPL +0x8a9a34a6 ping_hash vmlinux EXPORT_SYMBOL_GPL +0x3fed55a8 __napi_schedule_irqoff vmlinux EXPORT_SYMBOL +0x9fccddf5 regmap_read vmlinux EXPORT_SYMBOL_GPL +0x7ca0833d drm_vma_offset_manager_destroy vmlinux EXPORT_SYMBOL +0x9b2492c3 iommu_map_sg vmlinux EXPORT_SYMBOL_GPL +0xe961f0b4 regulator_set_suspend_voltage vmlinux EXPORT_SYMBOL_GPL +0x6a22b289 clk_hw_register_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0x4bbd232c crypto_unregister_skciphers vmlinux EXPORT_SYMBOL_GPL +0xc0f15126 dquot_quota_off vmlinux EXPORT_SYMBOL +0x8bd07707 kthread_flush_work vmlinux EXPORT_SYMBOL_GPL +0x69f82f38 mptscsih_shutdown drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x10adfccb ppp_unregister_channel drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x8a9271f7 mlx5_nic_vport_update_local_lb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x37aa53ae mlx5_comp_irq_get_affinity_mask drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6005c351 zpool_has_pool mm/zpool EXPORT_SYMBOL +0x50f85302 __arm_smccc_hvc vmlinux EXPORT_SYMBOL +0xf93aae46 __arm_smccc_smc vmlinux EXPORT_SYMBOL +0x4fd149fb xfrm_audit_state_add vmlinux EXPORT_SYMBOL_GPL +0x52f40a55 __skb_checksum_complete vmlinux EXPORT_SYMBOL +0x20377c5c power_supply_register_no_ws vmlinux EXPORT_SYMBOL_GPL +0x8f4c8fd6 genphy_read_lpa vmlinux EXPORT_SYMBOL +0xf0a9354c ata_pci_device_do_suspend vmlinux EXPORT_SYMBOL_GPL +0x3c3aa1bc tty_lock vmlinux EXPORT_SYMBOL +0x6d940f96 regulator_register vmlinux EXPORT_SYMBOL_GPL +0xd5b3d0d5 xxh64_copy_state vmlinux EXPORT_SYMBOL +0xbe5a24e9 xxh32_copy_state vmlinux EXPORT_SYMBOL +0xb5a459dc unregister_blkdev vmlinux EXPORT_SYMBOL +0xe2858b8f key_alloc vmlinux EXPORT_SYMBOL +0x95695399 write_inode_now vmlinux EXPORT_SYMBOL +0xe313615b init_pseudo vmlinux EXPORT_SYMBOL +0x10075f38 housekeeping_any_cpu vmlinux EXPORT_SYMBOL_GPL +0x33dea223 nci_nfcc_loopback net/nfc/nci/nci EXPORT_SYMBOL +0xbc0d7c56 ceph_osdc_notify_ack net/ceph/libceph EXPORT_SYMBOL +0x69049cd2 radix_tree_replace_slot vmlinux EXPORT_SYMBOL +0x2f0d9053 usb_otg_state_string vmlinux EXPORT_SYMBOL_GPL +0x6c257ac0 tty_termios_hw_change vmlinux EXPORT_SYMBOL +0x43bbebba gpiochip_line_is_valid vmlinux EXPORT_SYMBOL_GPL +0x99eba351 crypto_rng_reset vmlinux EXPORT_SYMBOL_GPL +0x7b5a4926 sha1_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0xc67350b5 security_dentry_create_files_as vmlinux EXPORT_SYMBOL +0xe0adcbf0 fat_search_long vmlinux EXPORT_SYMBOL_GPL +0x40ce179e seq_path vmlinux EXPORT_SYMBOL +0xcc5005fe msleep_interruptible vmlinux EXPORT_SYMBOL +0xdc6596fa irq_set_parent vmlinux EXPORT_SYMBOL_GPL +0x411ef214 rxrpc_kernel_new_call_notification net/rxrpc/rxrpc EXPORT_SYMBOL +0xd7673035 g_verify_token_header net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x1f099794 synproxy_init_timestamp_cookie net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xfaa3b50b iscsi_session_setup drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x89434b4b radix_tree_tag_clear vmlinux EXPORT_SYMBOL +0x5536c930 dev_addr_add vmlinux EXPORT_SYMBOL +0x0b217848 netdev_err vmlinux EXPORT_SYMBOL +0x62849ac7 dev_valid_name vmlinux EXPORT_SYMBOL +0x411e540e kfree_skb_partial vmlinux EXPORT_SYMBOL +0xc6e2261b __sk_mem_reduce_allocated vmlinux EXPORT_SYMBOL +0xfc3973d8 __tracepoint_mc_event vmlinux EXPORT_SYMBOL_GPL +0xc7b791b2 of_node_name_prefix vmlinux EXPORT_SYMBOL +0x89bbafc6 usb_register_notify vmlinux EXPORT_SYMBOL_GPL +0xb0a0da0c rational_best_approximation vmlinux EXPORT_SYMBOL +0x1af267f8 int_pow vmlinux EXPORT_SYMBOL_GPL +0xf888ca21 sg_init_table vmlinux EXPORT_SYMBOL +0x39fe5a63 blk_pre_runtime_resume vmlinux EXPORT_SYMBOL +0xcc5a24fe fsnotify vmlinux EXPORT_SYMBOL_GPL +0xbf2ef395 generic_file_open vmlinux EXPORT_SYMBOL +0xa9da2563 fixup_user_fault vmlinux EXPORT_SYMBOL_GPL +0xb43f9365 ktime_get vmlinux EXPORT_SYMBOL_GPL +0xc54586d0 ip_set_get_ip_port net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x1851abb6 vringh_notify_enable_user drivers/vhost/vringh EXPORT_SYMBOL +0x5209c75e mlx5_core_create_rqt drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2c7d16b9 mlx5_core_create_tis drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xda1fad82 mlx5_core_create_tir drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc7b173bf mlx5_core_create_psv drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x15254a13 mlx5_core_create_dct drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xbfbefdc9 crypto_engine_stop crypto/crypto_engine EXPORT_SYMBOL_GPL +0xebb3761d inet_addr_type_dev_table vmlinux EXPORT_SYMBOL +0x33033f82 __mdiobus_register vmlinux EXPORT_SYMBOL +0xb4616ace genphy_update_link vmlinux EXPORT_SYMBOL +0x238dc59a ata_sff_freeze vmlinux EXPORT_SYMBOL_GPL +0xf29cdf27 software_node_register vmlinux EXPORT_SYMBOL_GPL +0x5289ed11 drm_get_pci_dev vmlinux EXPORT_SYMBOL +0x6749d53f hdmi_vendor_infoframe_init vmlinux EXPORT_SYMBOL +0x74c172f2 generic_cont_expand_simple vmlinux EXPORT_SYMBOL +0x50212007 __vfs_setxattr vmlinux EXPORT_SYMBOL +0xb2b2a9d7 d_instantiate vmlinux EXPORT_SYMBOL +0x4e8dfeea irq_chip_unmask_parent vmlinux EXPORT_SYMBOL_GPL +0x5b1a8694 nfc_hci_recv_frame net/nfc/hci/hci EXPORT_SYMBOL +0x8ed2fffa rpc_clnt_swap_deactivate net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x41969256 line6_write_data sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x07e2fac9 register_sound_special_device sound/soundcore EXPORT_SYMBOL +0x0765a1e4 vringh_init_user drivers/vhost/vringh EXPORT_SYMBOL +0x34757645 mlx5_core_dealloc_transport_domain drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xe1cfa261 __tracepoint_fdb_delete vmlinux EXPORT_SYMBOL_GPL +0xec9a6fb0 skb_set_owner_w vmlinux EXPORT_SYMBOL +0x7a1aeabd thermal_zone_of_sensor_register vmlinux EXPORT_SYMBOL_GPL +0x548aaf7f iscsi_ping_comp_event vmlinux EXPORT_SYMBOL_GPL +0xc25f3e50 drm_mm_scan_remove_block vmlinux EXPORT_SYMBOL +0xd3f66517 __nla_put_64bit vmlinux EXPORT_SYMBOL +0x984ce9bd __nla_parse vmlinux EXPORT_SYMBOL +0xc19d7942 page_mapping vmlinux EXPORT_SYMBOL +0xeaf3cb23 crc64_be lib/crc64 EXPORT_SYMBOL_GPL +0xd6524723 rdma_nl_put_driver_u64_hex drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4ab67754 rdma_read_gid_attr_ndev_rcu drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7b56b034 ib_unregister_driver drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2ea2839b m_can_init_ram drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0x01e05702 mlx4_SET_PORT_qpn_calc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xbd6f6605 af_alg_wmem_wakeup crypto/af_alg EXPORT_SYMBOL_GPL +0x8e45b4f1 __fscache_unregister_netfs fs/fscache/fscache EXPORT_SYMBOL +0xc75d5ab2 xt_alloc_table_info vmlinux EXPORT_SYMBOL +0x71e0ab9f usb_of_get_interface_node vmlinux EXPORT_SYMBOL_GPL +0x4ef72968 ttm_bo_bulk_move_lru_tail vmlinux EXPORT_SYMBOL +0xc9af9a15 iommu_domain_alloc vmlinux EXPORT_SYMBOL_GPL +0xba56cc0b clk_hw_register_gate vmlinux EXPORT_SYMBOL_GPL +0x4df02057 crc32_be vmlinux EXPORT_SYMBOL +0x5bbdfa26 scatterwalk_ffwd vmlinux EXPORT_SYMBOL_GPL +0xb89a6e9f iomap_readpage vmlinux EXPORT_SYMBOL_GPL +0x26520970 vm_memory_committed vmlinux EXPORT_SYMBOL_GPL +0x20fff6ec ZSTD_DStreamInSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0x9f10b785 ceph_osdc_new_request net/ceph/libceph EXPORT_SYMBOL +0xdc3eb437 rpc_restart_call net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x48d1c7dc dm_btree_find_lowest_key drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x44e5bc9b xfrm_output_resume vmlinux EXPORT_SYMBOL_GPL +0x97f9ef64 dev_get_phys_port_name vmlinux EXPORT_SYMBOL +0xab905125 skb_find_text vmlinux EXPORT_SYMBOL +0x4b2fd901 i2c_unregister_device vmlinux EXPORT_SYMBOL_GPL +0x3a4f6a32 ata_sff_interrupt vmlinux EXPORT_SYMBOL_GPL +0xde0ed454 ttm_kunmap_atomic_prot vmlinux EXPORT_SYMBOL +0xcea67211 drm_helper_crtc_in_use vmlinux EXPORT_SYMBOL +0x37c3c5b2 tpm_pcr_extend vmlinux EXPORT_SYMBOL_GPL +0xd462b0c8 regulator_unregister vmlinux EXPORT_SYMBOL_GPL +0x7603e395 pci_clear_master vmlinux EXPORT_SYMBOL +0xee4237b1 bio_integrity_add_page vmlinux EXPORT_SYMBOL +0x065922ac dump_align vmlinux EXPORT_SYMBOL +0xbbe80fdb kmalloc_order vmlinux EXPORT_SYMBOL +0x0f5f1ac0 __dec_node_page_state vmlinux EXPORT_SYMBOL +0x2cdb8788 __inc_node_page_state vmlinux EXPORT_SYMBOL +0x3b4b0f23 __mod_node_page_state vmlinux EXPORT_SYMBOL +0x751ff010 vsock_addr_unbind net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x23ebd5fb dm_bitset_set_bit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xfc619abd _nfs_display_fhandle fs/nfs/nfs EXPORT_SYMBOL_GPL +0x134cdd34 inet_frag_rbtree_purge vmlinux EXPORT_SYMBOL +0xd6ded98a skb_queue_tail vmlinux EXPORT_SYMBOL +0x6e30ba8e drm_rect_rotate_inv vmlinux EXPORT_SYMBOL +0x0810be09 free_irq_cpu_rmap vmlinux EXPORT_SYMBOL +0x161fe2f7 crypto_lookup_template vmlinux EXPORT_SYMBOL_GPL +0x9a35e343 __register_nls vmlinux EXPORT_SYMBOL +0x6041d3b9 register_sysctl vmlinux EXPORT_SYMBOL +0x4cb796d5 vfs_test_lock vmlinux EXPORT_SYMBOL_GPL +0x952664c5 do_exit vmlinux EXPORT_SYMBOL_GPL +0xfe07dc8e p9_is_proto_dotu net/9p/9pnet EXPORT_SYMBOL +0x6085edbd nfs_map_string_to_numeric fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x62a946bc l3mdev_master_upper_ifindex_by_index_rcu vmlinux EXPORT_SYMBOL_GPL +0x3709e2e0 devm_device_add_groups vmlinux EXPORT_SYMBOL_GPL +0x99fd20aa drm_rect_clip_scaled vmlinux EXPORT_SYMBOL +0xee4c5000 drm_self_refresh_helper_alter_state vmlinux EXPORT_SYMBOL +0x799aebb1 sbitmap_get_shallow vmlinux EXPORT_SYMBOL_GPL +0xec753a23 mark_buffer_dirty_inode vmlinux EXPORT_SYMBOL +0xabcffa08 ceph_msg_get net/ceph/libceph EXPORT_SYMBOL +0xef2df1b9 ip_tunnel_lookup net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xbc804745 ib_get_net_dev_by_params drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xba7bbff3 __put_mtd_device drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x7fa53a5c __get_mtd_device drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xc296c374 mtd_wunit_to_pairing_info drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x7c381a76 dm_bufio_get_block_size drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x162c58dd ata_sff_thaw vmlinux EXPORT_SYMBOL_GPL +0xa88d9d49 dev_pm_qos_remove_request vmlinux EXPORT_SYMBOL_GPL +0x8f46f879 fwnode_connection_find_match vmlinux EXPORT_SYMBOL_GPL +0x51098a60 drm_connector_set_path_property vmlinux EXPORT_SYMBOL +0xd4db2c1e dma_wait_for_async_tx vmlinux EXPORT_SYMBOL_GPL +0xdd6f877f devm_clk_bulk_get vmlinux EXPORT_SYMBOL_GPL +0x0b26b8c8 acpi_run_osc vmlinux EXPORT_SYMBOL +0x1a0fa45c backlight_device_register vmlinux EXPORT_SYMBOL +0x512afa23 simple_dentry_operations vmlinux EXPORT_SYMBOL +0xb1c3a01a oops_in_progress vmlinux EXPORT_SYMBOL +0x54c372cf nf_ct_helper_log net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x418873cc irq_bypass_register_producer vmlinux EXPORT_SYMBOL_GPL +0x7ac49768 netdev_lower_dev_get_private vmlinux EXPORT_SYMBOL +0x3cb83d5b eeprom_93cx6_multireadb vmlinux EXPORT_SYMBOL_GPL +0x72d73b22 tty_check_change vmlinux EXPORT_SYMBOL +0x3c3478cc tty_insert_flip_string_fixed_flag vmlinux EXPORT_SYMBOL +0x7e6b35d5 tty_register_ldisc vmlinux EXPORT_SYMBOL +0x0bb1c74d tty_throttle vmlinux EXPORT_SYMBOL +0xede9a09a btree_lookup vmlinux EXPORT_SYMBOL_GPL +0x70238735 public_key_subtype vmlinux EXPORT_SYMBOL_GPL +0x82013333 kill_fasync vmlinux EXPORT_SYMBOL +0xe6e5d754 sget_fc vmlinux EXPORT_SYMBOL +0x75d56387 param_get_byte vmlinux EXPORT_SYMBOL +0x2b8fc976 gfn_to_pfn_atomic vmlinux EXPORT_SYMBOL_GPL +0xf513044f save_stack_trace_regs vmlinux EXPORT_SYMBOL_GPL +0x1803a6ed raid6_2data_recov lib/raid6/raid6_pq EXPORT_SYMBOL_GPL +0x4b4f1e21 nci_uart_register net/nfc/nci/nci_uart EXPORT_SYMBOL_GPL +0x1718301d m_can_class_free_dev drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0x4dabac9e fc_fill_hdr drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x0cfd3fc5 ocfs2_cluster_connect_agnostic fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xad995dac netdev_stats_to_stats64 vmlinux EXPORT_SYMBOL +0x892b4a89 dev_disable_lro vmlinux EXPORT_SYMBOL +0x78e8a516 sock_alloc vmlinux EXPORT_SYMBOL +0x98fa1e20 dm_get_reserved_rq_based_ios vmlinux EXPORT_SYMBOL_GPL +0x67c11466 attribute_container_classdev_to_container vmlinux EXPORT_SYMBOL_GPL +0xc1670094 drm_put_dev vmlinux EXPORT_SYMBOL +0x2274ea85 drm_gem_vm_open vmlinux EXPORT_SYMBOL +0xcb7e240e drm_fb_helper_setcmap vmlinux EXPORT_SYMBOL +0x06d54d35 drm_lspcon_set_mode vmlinux EXPORT_SYMBOL +0x21bdb523 errseq_check_and_advance vmlinux EXPORT_SYMBOL +0xd9b85ef6 lockref_get vmlinux EXPORT_SYMBOL +0x283768c9 debugfs_create_u8 vmlinux EXPORT_SYMBOL_GPL +0x94ece1fa kill_pid_usb_asyncio vmlinux EXPORT_SYMBOL_GPL +0xaa4db45c ib_unregister_mad_agent drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc9a3422d dm_bufio_write_dirty_buffers_async drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x46a2e26f md_rdev_clear drivers/md/md-mod EXPORT_SYMBOL_GPL +0x4f0216b8 __tracepoint_bcache_btree_node_compact drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xd4af6d8a mlx4_map_phys_fmr drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x501dc1bf hinic_get_rq_free_wqebbs drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x55d0871c store_sampling_rate vmlinux EXPORT_SYMBOL_GPL +0xd3dd2679 regulator_list_voltage vmlinux EXPORT_SYMBOL_GPL +0x2446f6f5 __acpi_node_get_property_reference vmlinux EXPORT_SYMBOL_GPL +0xa5dbdcde pci_wait_for_pending_transaction vmlinux EXPORT_SYMBOL +0xced0f4d4 gen_pool_create vmlinux EXPORT_SYMBOL +0xcb3ae215 call_blocking_lsm_notifier vmlinux EXPORT_SYMBOL +0xdd0762df set_worker_desc vmlinux EXPORT_SYMBOL_GPL +0x9caef258 vhost_poll_init drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xc933d243 rdma_set_afonly drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xe3b55f17 ib_destroy_qp_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5cf3dd79 mlxsw_core_bus_device_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x061ac30d mlx5_set_port_tc_bw_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x25c51cbb hinic_set_arm_bit drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x7adab903 scsi_is_fc_rport drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xf1caafa7 nfs_file_llseek fs/nfs/nfs EXPORT_SYMBOL_GPL +0x09da0ba4 xa_set_mark vmlinux EXPORT_SYMBOL +0x01b6865c xa_get_mark vmlinux EXPORT_SYMBOL +0x14101101 tcf_idr_cleanup vmlinux EXPORT_SYMBOL +0xcd21f001 devm_nvmem_register vmlinux EXPORT_SYMBOL_GPL +0xfef2639d mdio_driver_register vmlinux EXPORT_SYMBOL +0xc99afa7b regmap_get_reg_stride vmlinux EXPORT_SYMBOL_GPL +0xa531471e clk_save_context vmlinux EXPORT_SYMBOL_GPL +0xd25bc5d4 csum_tcpudp_nofold vmlinux EXPORT_SYMBOL +0x6c224cda gen_pool_destroy vmlinux EXPORT_SYMBOL +0x5149b748 blk_mq_tagset_busy_iter vmlinux EXPORT_SYMBOL +0x3f9e3b5f vma_kernel_pagesize vmlinux EXPORT_SYMBOL_GPL +0xad6f7144 __tracepoint_kmalloc_node vmlinux EXPORT_SYMBOL +0x085bffd1 trace_seq_bitmask vmlinux EXPORT_SYMBOL_GPL +0x8ddd8aad schedule_timeout vmlinux EXPORT_SYMBOL +0x2ecda3ca irq_chip_mask_parent vmlinux EXPORT_SYMBOL_GPL +0x45727260 mtd_ooblayout_count_freebytes drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x7baca7fe __tracepoint_bcache_btree_read drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xaa51b464 mlx5_core_dealloc_q_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xea66fff5 nfs4_decode_mp_ds_addr fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x65ccb6f0 call_netevent_notifiers vmlinux EXPORT_SYMBOL_GPL +0x0720fa61 ata_scsi_port_error_handler vmlinux EXPORT_SYMBOL_GPL +0x97e2f3cf crypto_alloc_base vmlinux EXPORT_SYMBOL_GPL +0xbe9755d0 is_subdir vmlinux EXPORT_SYMBOL +0x1b1471f3 alarm_start vmlinux EXPORT_SYMBOL_GPL +0xfdeebe63 rdma_notify drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x68b2f180 __tracepoint_bcache_btree_cache_cannibalize drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xb4b03b2e __tracepoint_bcache_request_end drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x452ba683 ipv6_ext_hdr vmlinux EXPORT_SYMBOL +0x9b059aeb neigh_destroy vmlinux EXPORT_SYMBOL +0x929bd67a netdev_warn vmlinux EXPORT_SYMBOL +0x3b9b2506 hid_validate_values vmlinux EXPORT_SYMBOL_GPL +0x3e2af4ee ata_pci_device_do_resume vmlinux EXPORT_SYMBOL_GPL +0xcefcd99a serial8250_unregister_port vmlinux EXPORT_SYMBOL +0x0db177c6 snd_ctl_rename_id sound/core/snd EXPORT_SYMBOL +0xbe645d83 free_c_can_dev drivers/net/can/c_can/c_can EXPORT_SYMBOL_GPL +0xc5a1ff18 __xfrm_policy_check vmlinux EXPORT_SYMBOL +0xc26504a2 ata_pci_sff_init_host vmlinux EXPORT_SYMBOL_GPL +0x4d2c7133 acpi_info vmlinux EXPORT_SYMBOL +0x3aff3200 acpi_evaluate_object_typed vmlinux EXPORT_SYMBOL +0x21cde9d6 devm_pci_remap_iospace vmlinux EXPORT_SYMBOL +0x9ecb7b5c pci_read_config_word vmlinux EXPORT_SYMBOL +0x479b16c0 gpiod_count vmlinux EXPORT_SYMBOL_GPL +0xb49d1df2 rhashtable_walk_next vmlinux EXPORT_SYMBOL_GPL +0x6de41a91 blk_lld_busy vmlinux EXPORT_SYMBOL_GPL +0x1b864766 blk_sync_queue vmlinux EXPORT_SYMBOL +0xfea52655 cryptd_aead_child vmlinux EXPORT_SYMBOL_GPL +0x0d2f04e8 __kthread_init_worker vmlinux EXPORT_SYMBOL_GPL +0xf5491ec1 gre_del_protocol net/ipv4/gre EXPORT_SYMBOL_GPL +0x5338bad0 line6_pcm_acquire sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0xeda8226d ibdev_info drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2a4e7ffc ip6_route_input_lookup vmlinux EXPORT_SYMBOL_GPL +0x93922111 get_compat_bpf_fprog vmlinux EXPORT_SYMBOL_GPL +0x2e8b6151 lwtunnel_output vmlinux EXPORT_SYMBOL_GPL +0x7571420a skb_clone_tx_timestamp vmlinux EXPORT_SYMBOL_GPL +0xa882a7f1 hid_open_report vmlinux EXPORT_SYMBOL_GPL +0x6b54dc4c phy_advertise_supported vmlinux EXPORT_SYMBOL +0xe7a675d5 scsi_schedule_eh vmlinux EXPORT_SYMBOL_GPL +0x4ffd1605 regmap_attach_dev vmlinux EXPORT_SYMBOL_GPL +0x1ecdfce5 pm_runtime_irq_safe vmlinux EXPORT_SYMBOL_GPL +0x1db047cd class_unregister vmlinux EXPORT_SYMBOL_GPL +0x57173530 drm_gem_private_object_init vmlinux EXPORT_SYMBOL +0x2093624d fat_alloc_new_dir vmlinux EXPORT_SYMBOL_GPL +0xb6449f00 simple_write_end vmlinux EXPORT_SYMBOL +0x1234e483 get_cpu_iowait_time_us vmlinux EXPORT_SYMBOL_GPL +0x1bc9aff9 node_data vmlinux EXPORT_SYMBOL +0x38393d63 nfs_async_iocounter_wait fs/nfs/nfs EXPORT_SYMBOL_GPL +0x71ce9962 unix_get_socket vmlinux EXPORT_SYMBOL +0xefeefb57 ping_init_sock vmlinux EXPORT_SYMBOL_GPL +0x33162797 netdev_update_features vmlinux EXPORT_SYMBOL +0x08e301c5 dev_get_port_parent_id vmlinux EXPORT_SYMBOL +0x877a1126 spi_async vmlinux EXPORT_SYMBOL_GPL +0x679862a1 ata_sff_postreset vmlinux EXPORT_SYMBOL_GPL +0xbb97a746 ata_sas_port_resume vmlinux EXPORT_SYMBOL_GPL +0xb5c35b1f regmap_mmio_detach_clk vmlinux EXPORT_SYMBOL_GPL +0xec887d54 regmap_mmio_attach_clk vmlinux EXPORT_SYMBOL_GPL +0xd03b3bbb __regmap_init_i2c vmlinux EXPORT_SYMBOL_GPL +0x27eb9996 regmap_raw_write_async vmlinux EXPORT_SYMBOL_GPL +0xb048162b devm_acpi_dma_controller_free vmlinux EXPORT_SYMBOL_GPL +0x44db28f5 acpi_dev_get_property vmlinux EXPORT_SYMBOL_GPL +0xd23efa0c sys_imageblit vmlinux EXPORT_SYMBOL +0x274dd1a3 sg_free_table_chained vmlinux EXPORT_SYMBOL_GPL +0x59fe70a8 ftrace_set_filter_ip vmlinux EXPORT_SYMBOL_GPL +0xa0eae826 smp_call_function vmlinux EXPORT_SYMBOL +0xef6c3f70 round_jiffies_up_relative vmlinux EXPORT_SYMBOL_GPL +0xd9c25654 nf_nat_masquerade_inet_unregister_notifiers net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xa948fbae ps2_handle_response drivers/input/serio/libps2 EXPORT_SYMBOL +0x3d4b4367 ps2_command drivers/input/serio/libps2 EXPORT_SYMBOL +0x2940d571 eth_header_cache vmlinux EXPORT_SYMBOL +0x5cad8fc3 power_supply_ocv2cap_simple vmlinux EXPORT_SYMBOL_GPL +0xc0e8ce43 i2c_smbus_read_byte_data vmlinux EXPORT_SYMBOL +0x36464d7b scsi_print_sense_hdr vmlinux EXPORT_SYMBOL +0xe4ccbb9a scsi_host_lookup vmlinux EXPORT_SYMBOL +0x558794a5 iommu_dev_disable_feature vmlinux EXPORT_SYMBOL_GPL +0x932d1c8c uart_set_options vmlinux EXPORT_SYMBOL_GPL +0x45e02e2a regulator_is_supported_voltage vmlinux EXPORT_SYMBOL_GPL +0x402343de pci_find_capability vmlinux EXPORT_SYMBOL +0xb678366f int_sqrt vmlinux EXPORT_SYMBOL +0xb9d025c9 llist_del_first vmlinux EXPORT_SYMBOL_GPL +0x5808e7cb dec_zone_page_state vmlinux EXPORT_SYMBOL +0xdf93b9d8 timespec64_to_jiffies vmlinux EXPORT_SYMBOL +0x42c25846 llc_mac_hdr_init net/llc/llc EXPORT_SYMBOL +0xf38bcdf3 nf_conntrack_max net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x05f53153 mlxsw_core_driver_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x76a38f41 mlx4_test_interrupt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xf4da0aab srp_rport_del drivers/scsi/scsi_transport_srp EXPORT_SYMBOL_GPL +0x7647201b config_item_put fs/configfs/configfs EXPORT_SYMBOL +0x2220b0b1 config_item_get fs/configfs/configfs EXPORT_SYMBOL +0xc59639a4 drm_mode_create_aspect_ratio_property vmlinux EXPORT_SYMBOL +0xec68ba70 clk_bulk_enable vmlinux EXPORT_SYMBOL_GPL +0xd96babb4 interval_tree_iter_next vmlinux EXPORT_SYMBOL_GPL +0x9c4d7411 badblocks_exit vmlinux EXPORT_SYMBOL_GPL +0x5ec906b0 mnt_drop_write_file vmlinux EXPORT_SYMBOL +0xdd5ce2b1 generic_update_time vmlinux EXPORT_SYMBOL +0x10f8772b __tracepoint_module_get vmlinux EXPORT_SYMBOL +0x1eb9516e round_jiffies_relative vmlinux EXPORT_SYMBOL_GPL +0xa7eedcc4 call_usermodehelper vmlinux EXPORT_SYMBOL +0xb88e5bb4 ceph_monc_renew_subs net/ceph/libceph EXPORT_SYMBOL +0x453efa54 svc_pool_map net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x61a7e5eb fc_linkup drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x30b56bcd __cast6_setkey crypto/cast6_generic EXPORT_SYMBOL_GPL +0x30c5106e pnfs_layoutcommit_inode fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xad167af1 __fscache_disable_cookie fs/fscache/fscache EXPORT_SYMBOL +0x977be5c7 klist_iter_init_node vmlinux EXPORT_SYMBOL_GPL +0xbbacf278 inet_csk_compat_setsockopt vmlinux EXPORT_SYMBOL_GPL +0x90523184 inet_csk_compat_getsockopt vmlinux EXPORT_SYMBOL_GPL +0xcd4d3d74 mbox_client_txdone vmlinux EXPORT_SYMBOL_GPL +0x6816c283 dm_put_table_device vmlinux EXPORT_SYMBOL +0xa10b28e9 dm_get_table_device vmlinux EXPORT_SYMBOL_GPL +0xf4bdad2a __i2c_smbus_xfer vmlinux EXPORT_SYMBOL +0xf8665cd8 ttm_bo_device_release vmlinux EXPORT_SYMBOL +0x238b099f mipi_dsi_packet_format_is_short vmlinux EXPORT_SYMBOL +0xcc17e0df drm_atomic_helper_dirtyfb vmlinux EXPORT_SYMBOL +0xc8ddd5b5 kstrdup_quotable vmlinux EXPORT_SYMBOL_GPL +0x0634100a bitmap_parselist_user vmlinux EXPORT_SYMBOL +0xacacef2a blk_queue_update_dma_alignment vmlinux EXPORT_SYMBOL +0xbaf6850c fsnotify_wait_marks_destroyed vmlinux EXPORT_SYMBOL_GPL +0x07c23703 hrtimer_try_to_cancel vmlinux EXPORT_SYMBOL_GPL +0x1262c51b nfc_add_se net/nfc/nfc EXPORT_SYMBOL +0xa15aaa62 rds_connect_complete net/rds/rds EXPORT_SYMBOL_GPL +0x026edbcf nf_ct_seqadj_set net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x4b54fc7f vhost_dev_reset_owner_prepare drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3e0d7b42 st21nfca_hci_disable_se drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x7b82b9a1 idr_replace vmlinux EXPORT_SYMBOL +0x2119d11e __dev_get_by_name vmlinux EXPORT_SYMBOL +0x5abe6f63 skb_copy_and_csum_dev vmlinux EXPORT_SYMBOL +0x090d7381 input_unregister_handle vmlinux EXPORT_SYMBOL +0x5bffeeb7 usb_hcd_map_urb_for_dma vmlinux EXPORT_SYMBOL_GPL +0x6bec4c0e drm_hdmi_avi_infoframe_from_display_mode vmlinux EXPORT_SYMBOL +0x656c1a0e string_escape_mem vmlinux EXPORT_SYMBOL +0x4a6973a7 blk_mq_quiesce_queue vmlinux EXPORT_SYMBOL_GPL +0x33b8cdb1 register_asymmetric_key_parser vmlinux EXPORT_SYMBOL_GPL +0xae91fa0d mod_node_page_state vmlinux EXPORT_SYMBOL +0x2c7db649 irq_dispose_mapping vmlinux EXPORT_SYMBOL_GPL +0x530b1e98 pm_suspend vmlinux EXPORT_SYMBOL +0xae69b1c1 usermodehelper_read_unlock vmlinux EXPORT_SYMBOL_GPL +0x331de019 xdr_stream_decode_opaque_dup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd9d2bb03 snd_usbmidi_disconnect sound/usb/snd-usbmidi-lib EXPORT_SYMBOL +0x5aa9860b snd_timer_continue sound/core/snd-timer EXPORT_SYMBOL +0x12a9c3f1 ib_resize_cq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x451e32b6 nvme_unfreeze drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x127a5901 drbd_set_st_err_str drivers/block/drbd/drbd EXPORT_SYMBOL +0x79b25946 flow_rule_match_control vmlinux EXPORT_SYMBOL +0xac9a7f99 register_netdev vmlinux EXPORT_SYMBOL +0xd27f2525 mdiobus_write_nested vmlinux EXPORT_SYMBOL +0xc6dbbfa1 hisi_sas_host_reset vmlinux EXPORT_SYMBOL_GPL +0xf20c80af device_get_dma_attr vmlinux EXPORT_SYMBOL_GPL +0x596886bc blk_limits_io_opt vmlinux EXPORT_SYMBOL +0x20fea737 proc_create vmlinux EXPORT_SYMBOL +0x7aa1756e kvfree vmlinux EXPORT_SYMBOL +0x97f3ccb0 hinic_support_ft drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xad6ba40e radix_tree_tag_get vmlinux EXPORT_SYMBOL +0xe9663046 ping_err vmlinux EXPORT_SYMBOL_GPL +0xedda4705 inet_twsk_purge vmlinux EXPORT_SYMBOL_GPL +0x37db8f19 dmi_get_date vmlinux EXPORT_SYMBOL +0x0dd439e0 dm_internal_resume vmlinux EXPORT_SYMBOL_GPL +0x3069809a __tracepoint_xhci_dbg_quirks vmlinux EXPORT_SYMBOL_GPL +0x0272aa15 iscsi_create_endpoint vmlinux EXPORT_SYMBOL_GPL +0xeb785040 component_del vmlinux EXPORT_SYMBOL_GPL +0xadfdfcef __bitmap_andnot vmlinux EXPORT_SYMBOL +0x81f4c17e blkg_rwstat_recursive_sum vmlinux EXPORT_SYMBOL_GPL +0x99df8f5a posix_acl_from_xattr vmlinux EXPORT_SYMBOL +0x24ef4172 clear_nlink vmlinux EXPORT_SYMBOL +0x6d2da735 set_anon_super_fc vmlinux EXPORT_SYMBOL +0x74c7bffa stack_trace_snprint vmlinux EXPORT_SYMBOL_GPL +0xa4a4b05c ceph_osdc_alloc_messages net/ceph/libceph EXPORT_SYMBOL +0xf1038bdf nf_xfrm_me_harder net/netfilter/nf_nat EXPORT_SYMBOL +0xfdb3027c ib_dereg_mr_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x190f2525 mlx4_srq_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6ad11ef7 qlt_lport_deregister drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x5b47de02 dev_loopback_xmit vmlinux EXPORT_SYMBOL +0xc5f7bee7 skb_seq_read vmlinux EXPORT_SYMBOL +0x8d2b1f4a i2c_transfer vmlinux EXPORT_SYMBOL +0xb6bfc6c9 scsi_device_lookup vmlinux EXPORT_SYMBOL +0x81a3b013 drm_i2c_encoder_prepare vmlinux EXPORT_SYMBOL +0xdd18a993 acpi_check_dsm vmlinux EXPORT_SYMBOL +0xed00c4fb acpi_os_printf vmlinux EXPORT_SYMBOL +0x7220c422 of_irq_parse_and_map_pci vmlinux EXPORT_SYMBOL_GPL +0x92fb0254 gpiod_get_raw_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0x64127b67 bitmap_find_next_zero_area_off vmlinux EXPORT_SYMBOL +0x72aa55f9 skcipher_walk_aead_decrypt vmlinux EXPORT_SYMBOL_GPL +0x24946c5c fscrypt_fname_alloc_buffer vmlinux EXPORT_SYMBOL +0xd93206d8 wbc_detach_inode vmlinux EXPORT_SYMBOL_GPL +0xd73a85d6 cgroup_path_ns vmlinux EXPORT_SYMBOL_GPL +0xb6bcf005 param_set_int vmlinux EXPORT_SYMBOL +0xb455038f vcpu_load vmlinux EXPORT_SYMBOL_GPL +0x8912b986 xdr_inline_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x81d29dd4 snd_card_file_add sound/core/snd EXPORT_SYMBOL +0x24772bfe dm_bufio_get drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xfcc6c6f8 cdrom_dummy_generic_packet drivers/cdrom/cdrom EXPORT_SYMBOL +0xcfccd991 pppox_compat_ioctl drivers/net/ppp/pppox EXPORT_SYMBOL +0x356461c8 rtc_time64_to_tm vmlinux EXPORT_SYMBOL +0xbe7e05a8 acpi_tb_install_and_load_table vmlinux EXPORT_SYMBOL +0x5a820fd9 crypto_drop_spawn vmlinux EXPORT_SYMBOL_GPL +0x716ddb7a security_socket_socketpair vmlinux EXPORT_SYMBOL +0x9cfbd5f4 dquot_writeback_dquots vmlinux EXPORT_SYMBOL +0x420a88f8 bdev_read_page vmlinux EXPORT_SYMBOL_GPL +0x0cc3d0ca mptbase_sas_persist_operation drivers/message/fusion/mptbase EXPORT_SYMBOL +0xfbacb46a qlt_enable_vha drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x67369b42 ipmi_addr_src_to_str drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xb454f07e ata_cable_80wire vmlinux EXPORT_SYMBOL_GPL +0x57b6efe3 drm_ioctl_flags vmlinux EXPORT_SYMBOL +0x3da2687f drm_helper_probe_detect vmlinux EXPORT_SYMBOL +0x1c0087ce tty_port_register_device_attr_serdev vmlinux EXPORT_SYMBOL_GPL +0x9821d8d4 blk_mq_init_queue vmlinux EXPORT_SYMBOL +0x89ae7aa0 rsa_parse_pub_key vmlinux EXPORT_SYMBOL_GPL +0x5f71c8e0 param_set_short vmlinux EXPORT_SYMBOL +0x15306407 nf_nat_tftp_hook net/netfilter/nf_conntrack_tftp EXPORT_SYMBOL_GPL +0x6a6ac0b9 nf_nat_snmp_hook net/netfilter/nf_conntrack_snmp EXPORT_SYMBOL_GPL +0x4f5d1987 get_h225_addr net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x91c33fbd iscsit_tmr_post_handler drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xf6bc9182 target_to_linux_sector drivers/target/target_core_mod EXPORT_SYMBOL +0x9b1079c7 can_rx_offload_queue_tail drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x440122ee hinic_get_stateful_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x4b4af651 fc_exch_mgr_list_clone drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x77bc13a0 strim vmlinux EXPORT_SYMBOL +0x58d8fcaa drm_dsc_pps_payload_pack vmlinux EXPORT_SYMBOL +0xf35e42bb pci_scan_bus vmlinux EXPORT_SYMBOL +0x89a0cd52 crc32c_impl vmlinux EXPORT_SYMBOL +0xc761153d shash_register_instance vmlinux EXPORT_SYMBOL_GPL +0x402d2ca5 debugfs_lookup vmlinux EXPORT_SYMBOL_GPL +0xd26eaa3a seq_write vmlinux EXPORT_SYMBOL +0x7cebec86 no_seek_end_llseek vmlinux EXPORT_SYMBOL +0xbcc1f0e5 generic_file_llseek vmlinux EXPORT_SYMBOL +0x5102a30b do_wait_intr_irq vmlinux EXPORT_SYMBOL +0x9217cbf1 get_mm_exe_file vmlinux EXPORT_SYMBOL +0xaff95073 ip6_tnl_parse_tlv_enc_lim net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x122d3bc1 snd_info_create_module_entry sound/core/snd EXPORT_SYMBOL +0xa6c6113c mpt_config drivers/message/fusion/mptbase EXPORT_SYMBOL +0xa1bcd198 fc_cpu_mask drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x1a33113b tc_setup_cb_destroy vmlinux EXPORT_SYMBOL +0x8b5521c1 nvmem_device_cell_write vmlinux EXPORT_SYMBOL_GPL +0x13197cad rtc_update_irq_enable vmlinux EXPORT_SYMBOL_GPL +0xde9ef2e3 _dev_err vmlinux EXPORT_SYMBOL +0x798a5205 drm_plane_create_zpos_immutable_property vmlinux EXPORT_SYMBOL +0xa2f36447 dma_request_slave_channel vmlinux EXPORT_SYMBOL_GPL +0x7dbbc249 acpi_dev_resource_io vmlinux EXPORT_SYMBOL_GPL +0x0805f2c8 ecryptfs_get_auth_tok_key vmlinux EXPORT_SYMBOL +0x999e8297 vfree vmlinux EXPORT_SYMBOL +0x1e0a759c wait_on_page_bit vmlinux EXPORT_SYMBOL +0x9dcff49e hrtimer_sleeper_start_expires vmlinux EXPORT_SYMBOL_GPL +0x2ab7989d mutex_lock vmlinux EXPORT_SYMBOL +0x8aa47640 dccp_req_err net/dccp/dccp_ipv4 EXPORT_SYMBOL +0x4e155af0 ib_response_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8be5887c pnfs_generic_pg_readpages fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x82128453 fscache_object_retrying_stale fs/fscache/fscache EXPORT_SYMBOL +0x120b336a __rb_insert_augmented vmlinux EXPORT_SYMBOL +0x60745f01 scsi_get_host_dev vmlinux EXPORT_SYMBOL +0xeffba0c9 ata_acpi_cbl_80wire vmlinux EXPORT_SYMBOL_GPL +0x6c585b87 simple_symlink_inode_operations vmlinux EXPORT_SYMBOL +0x678b96ec dma_pool_alloc vmlinux EXPORT_SYMBOL +0x10773da3 mlx4_mr_hw_change_pd drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x3eaf37aa ip6_route_lookup vmlinux EXPORT_SYMBOL_GPL +0x2c559c88 tcp_v4_do_rcv vmlinux EXPORT_SYMBOL +0x893abbdd devlink_fmsg_u32_pair_put vmlinux EXPORT_SYMBOL_GPL +0xfb6f6f50 of_device_get_match_data vmlinux EXPORT_SYMBOL +0xa1e6ba18 ehci_handshake vmlinux EXPORT_SYMBOL_GPL +0xcde2641c ahci_platform_enable_regulators vmlinux EXPORT_SYMBOL_GPL +0x72c0f437 get_device vmlinux EXPORT_SYMBOL_GPL +0xc1b6a968 hvc_alloc vmlinux EXPORT_SYMBOL_GPL +0xd2144008 virtio_config_changed vmlinux EXPORT_SYMBOL_GPL +0x6bdef35c acpi_ec_mark_gpe_for_wake vmlinux EXPORT_SYMBOL_GPL +0x2d4c773a hdmi_spd_infoframe_init vmlinux EXPORT_SYMBOL +0x2415cd50 iov_iter_gap_alignment vmlinux EXPORT_SYMBOL +0x9db5308f blk_dump_rq_flags vmlinux EXPORT_SYMBOL +0x0a2f4fc3 exportfs_decode_fh vmlinux EXPORT_SYMBOL_GPL +0x26cd74a4 sysfs_create_group vmlinux EXPORT_SYMBOL_GPL +0xb9651acb ceph_msg_data_add_pages net/ceph/libceph EXPORT_SYMBOL +0x38d3dce5 g_make_token_header net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0xa41f3b0b xprt_release_rqst_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2a79ef70 target_submit_cmd_map_sgls drivers/target/target_core_mod EXPORT_SYMBOL +0x0774c894 mlx4_get_eqs_per_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xbae23b98 hinic_slq_uninit drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1edc9f17 nfs_sync_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe4b68ae2 secure_tcpv6_ts_off vmlinux EXPORT_SYMBOL +0xfdeeda58 consume_skb vmlinux EXPORT_SYMBOL +0x36a64f91 hid_lookup_quirk vmlinux EXPORT_SYMBOL_GPL +0x44392be6 scsi_add_device vmlinux EXPORT_SYMBOL +0x80785490 __pm_stay_awake vmlinux EXPORT_SYMBOL_GPL +0x46fb7a34 drm_gem_shmem_print_info vmlinux EXPORT_SYMBOL +0x8ed554b7 drm_atomic_state_alloc vmlinux EXPORT_SYMBOL +0x2e9b8df6 pci_match_id vmlinux EXPORT_SYMBOL +0x564dee11 pcie_set_mps vmlinux EXPORT_SYMBOL +0x48f15861 pci_dev_run_wake vmlinux EXPORT_SYMBOL_GPL +0x7d84a990 generic_fadvise vmlinux EXPORT_SYMBOL +0x7c44c23f rpc_alloc_iostats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa9ffc821 nft_trace_enabled net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x8d0e9b8c snd_pcm_lib_get_vmalloc_page sound/core/snd-pcm EXPORT_SYMBOL +0x5cdf64d9 mdev_unregister_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x045129a7 mlxsw_core_port_devlink_port_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xf3688daa __fib6_flush_trees vmlinux EXPORT_SYMBOL +0x24f98d5a tcf_action_dump_1 vmlinux EXPORT_SYMBOL +0xc510858b flow_rule_match_enc_ipv6_addrs vmlinux EXPORT_SYMBOL +0x3cd63ff2 flow_rule_match_enc_ipv4_addrs vmlinux EXPORT_SYMBOL +0x8efd6da5 i2c_add_numbered_adapter vmlinux EXPORT_SYMBOL_GPL +0x5659f27d usb_unanchor_urb vmlinux EXPORT_SYMBOL_GPL +0xd62da90b scsi_check_sense vmlinux EXPORT_SYMBOL_GPL +0xd84d35bd dax_read_lock vmlinux EXPORT_SYMBOL_GPL +0xd353870c drm_gem_fb_prepare_fb vmlinux EXPORT_SYMBOL_GPL +0x39b1c852 dma_get_slave_channel vmlinux EXPORT_SYMBOL_GPL +0x12d473d4 pcie_aspm_enabled vmlinux EXPORT_SYMBOL_GPL +0xc91277a1 kgdb_schedule_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x0a410fe4 p9_client_open net/9p/9pnet EXPORT_SYMBOL +0x09b66aed nf_ct_tmpl_alloc net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa432baf6 ib_unpack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe36e6aaf vfio_iommu_group_put drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x92a87411 vfio_iommu_group_get drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x36b84cda dm_array_del drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb6d5c65d dm_deferred_set_add_work drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x2a0079ff rndis_msg_parser drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x95d47eb2 __tracepoint_mlx5_fs_set_fte drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7d9e6258 st21nfca_hci_remove drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x118a2193 mr_mfc_seq_next vmlinux EXPORT_SYMBOL +0x7b829452 tcp_prot vmlinux EXPORT_SYMBOL +0x8451168f of_graph_get_remote_endpoint vmlinux EXPORT_SYMBOL +0xf27fa80d root_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x06a82c16 mipi_dsi_turn_on_peripheral vmlinux EXPORT_SYMBOL +0x9fe64809 vring_transport_features vmlinux EXPORT_SYMBOL_GPL +0xea978ccf textsearch_prepare vmlinux EXPORT_SYMBOL +0x9f3d5199 fuse_direct_io vmlinux EXPORT_SYMBOL_GPL +0xa6b0a81e migrate_page_copy vmlinux EXPORT_SYMBOL +0x617c452b queued_read_lock_slowpath vmlinux EXPORT_SYMBOL +0x84c1c552 proc_dointvec_ms_jiffies vmlinux EXPORT_SYMBOL +0x3cd98daa ceph_con_keepalive net/ceph/libceph EXPORT_SYMBOL +0xf4097f14 hinic_ppf_tmr_start drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x74c35e7a async_memcpy crypto/async_tx/async_memcpy EXPORT_SYMBOL_GPL +0xb504d55e inet_frag_kill vmlinux EXPORT_SYMBOL +0xaab56be9 led_update_brightness vmlinux EXPORT_SYMBOL_GPL +0xda642058 sas_port_alloc vmlinux EXPORT_SYMBOL +0xbd066d06 drm_flip_work_cleanup vmlinux EXPORT_SYMBOL +0x0e36ad75 drm_get_edid_switcheroo vmlinux EXPORT_SYMBOL +0x3ca07f3a cryptd_free_skcipher vmlinux EXPORT_SYMBOL_GPL +0x646185a7 simple_setattr vmlinux EXPORT_SYMBOL +0xd2709580 simple_getattr vmlinux EXPORT_SYMBOL +0xa897e3e7 mempool_free vmlinux EXPORT_SYMBOL +0xf600270e nfc_dep_link_is_up net/nfc/nfc EXPORT_SYMBOL +0x0afd6834 llc_sap_open net/llc/llc EXPORT_SYMBOL +0x98d4012c mlx5_core_destroy_tir drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xdf24adef hnae_unregister_notifier drivers/net/ethernet/hisilicon/hns/hnae EXPORT_SYMBOL +0xbc00d299 srp_rport_put drivers/scsi/scsi_transport_srp EXPORT_SYMBOL +0xc0e98f6a configfs_register_subsystem fs/configfs/configfs EXPORT_SYMBOL +0xe7d4bae4 tcf_em_tree_validate vmlinux EXPORT_SYMBOL +0xae3600ef neigh_carrier_down vmlinux EXPORT_SYMBOL +0x787351f7 usb_stor_Bulk_reset vmlinux EXPORT_SYMBOL_GPL +0x1250616e attribute_container_unregister vmlinux EXPORT_SYMBOL_GPL +0x2e99a442 driver_for_each_device vmlinux EXPORT_SYMBOL_GPL +0xfcd1819a hdmi_spd_infoframe_check vmlinux EXPORT_SYMBOL +0x3f4bd846 gen_pool_first_fit_order_align vmlinux EXPORT_SYMBOL +0xef92ef33 btree_last vmlinux EXPORT_SYMBOL_GPL +0xab48e33d bdget_disk vmlinux EXPORT_SYMBOL +0xaa03a1ef __compat_only_sysfs_link_entry_to_kobj vmlinux EXPORT_SYMBOL_GPL +0xf1b31314 delayacct_on vmlinux EXPORT_SYMBOL_GPL +0xb07308b9 rdma_get_service_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x77e35ccc nlmsvc_unlock_all_by_ip fs/lockd/lockd EXPORT_SYMBOL_GPL +0x00edb19a nlmsvc_unlock_all_by_sb fs/lockd/lockd EXPORT_SYMBOL_GPL +0xbcab6ee6 sscanf vmlinux EXPORT_SYMBOL +0xff429d06 fib6_rule_default vmlinux EXPORT_SYMBOL_GPL +0x2e92f1b1 fib4_rule_default vmlinux EXPORT_SYMBOL_GPL +0xc3dcdff4 tcp_rcv_state_process vmlinux EXPORT_SYMBOL +0x0c72928c skb_to_sgvec vmlinux EXPORT_SYMBOL_GPL +0x5c656623 drm_bridge_mode_set vmlinux EXPORT_SYMBOL +0xd9e8b470 drm_atomic_helper_connector_destroy_state vmlinux EXPORT_SYMBOL +0x9829c577 __breadahead vmlinux EXPORT_SYMBOL +0x13c7c0f5 perf_event_sysfs_show vmlinux EXPORT_SYMBOL_GPL +0x67ab7520 rpc_bind_new_program net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7a029029 ip6_tnl_encap_add_ops net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x1923c125 snd_pcm_hw_constraint_integer sound/core/snd-pcm EXPORT_SYMBOL +0xa7f0dbd3 ibdev_alert drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd87ae60d xt_check_entry_offsets vmlinux EXPORT_SYMBOL +0xc0f78843 pfifo_fast_ops vmlinux EXPORT_SYMBOL +0xfc3bba0f unregister_fib_notifier vmlinux EXPORT_SYMBOL +0xcb9d81c4 led_classdev_suspend vmlinux EXPORT_SYMBOL_GPL +0xdf55b3f9 iscsi_destroy_iface vmlinux EXPORT_SYMBOL_GPL +0x94165303 platform_device_register_full vmlinux EXPORT_SYMBOL_GPL +0x28d2f4e5 drm_gem_dmabuf_release vmlinux EXPORT_SYMBOL +0x48193639 acpi_lid_open vmlinux EXPORT_SYMBOL +0xbfd5fe2d devm_gpio_free vmlinux EXPORT_SYMBOL_GPL +0x2db3bc61 check_zeroed_user vmlinux EXPORT_SYMBOL +0x53fcd85b blk_mq_queue_stopped vmlinux EXPORT_SYMBOL +0x47899e91 vmf_insert_pfn_pmd vmlinux EXPORT_SYMBOL_GPL +0x08bc0870 compat_put_timespec vmlinux EXPORT_SYMBOL_GPL +0xfa901b31 compat_get_timespec vmlinux EXPORT_SYMBOL_GPL +0x6de13801 wait_for_completion vmlinux EXPORT_SYMBOL +0xe3d6084a __cpuhp_setup_state vmlinux EXPORT_SYMBOL +0x79f697e4 lzorle1x_1_compress lib/lzo/lzo_compress EXPORT_SYMBOL_GPL +0x24f0e37c csum_partial_copy_to_xdr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x108a0acd bstr_printf vmlinux EXPORT_SYMBOL_GPL +0x1d24c881 ___ratelimit vmlinux EXPORT_SYMBOL +0x22a6b674 dev_get_by_name_rcu vmlinux EXPORT_SYMBOL +0xf5da1e40 i2c_smbus_write_byte_data vmlinux EXPORT_SYMBOL +0xfdb49435 usb_unlink_urb vmlinux EXPORT_SYMBOL_GPL +0x6fa426d2 phylink_ethtool_nway_reset vmlinux EXPORT_SYMBOL_GPL +0xa2e4bf0c drm_mm_reserve_node vmlinux EXPORT_SYMBOL +0x3e50b109 drm_gem_fence_array_add vmlinux EXPORT_SYMBOL +0x2b3bd5d0 virtio_config_disable vmlinux EXPORT_SYMBOL_GPL +0x99025a2b gpiod_get_from_of_node vmlinux EXPORT_SYMBOL_GPL +0x78aeb2a7 iomap_fiemap vmlinux EXPORT_SYMBOL_GPL +0x82c87ad5 nr_online_nodes vmlinux EXPORT_SYMBOL +0xa8fb743d des_expand_key lib/crypto/libdes EXPORT_SYMBOL_GPL +0x1209c844 __nf_nat_mangle_tcp_packet net/netfilter/nf_nat EXPORT_SYMBOL +0xc1741674 snd_dma_alloc_pages sound/core/snd-pcm EXPORT_SYMBOL +0xbcb4c446 iscsit_stop_dataout_timer drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x49ebd0d2 sja1000_interrupt drivers/net/can/sja1000/sja1000 EXPORT_SYMBOL_GPL +0xc8805705 sock_diag_destroy vmlinux EXPORT_SYMBOL_GPL +0x6b8bf149 netif_receive_skb_list vmlinux EXPORT_SYMBOL +0x6c5dae23 scsi_kmap_atomic_sg vmlinux EXPORT_SYMBOL +0xf154d8d1 platform_device_add_data vmlinux EXPORT_SYMBOL_GPL +0xb23ae8a7 drm_modeset_lock_all_ctx vmlinux EXPORT_SYMBOL +0x29d8ccb1 __vring_new_virtqueue vmlinux EXPORT_SYMBOL_GPL +0x757a7856 fscrypt_ioctl_get_policy vmlinux EXPORT_SYMBOL +0xc40b0eec fscrypt_ioctl_set_policy vmlinux EXPORT_SYMBOL +0xc79413b2 register_kprobes vmlinux EXPORT_SYMBOL_GPL +0xd3e97c21 hugetlb_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xcdc86b55 sched_clock vmlinux EXPORT_SYMBOL_GPL +0xb7fde10e rxrpc_kernel_abort_call net/rxrpc/rxrpc EXPORT_SYMBOL +0x42a662a1 put_rpccred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4757a9fa rpcauth_init_cred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x537de725 lapb_getparms net/lapb/lapb EXPORT_SYMBOL +0x8c3c3916 xfrm_audit_state_replay_overflow vmlinux EXPORT_SYMBOL_GPL +0xa08d0403 sock_create_lite vmlinux EXPORT_SYMBOL +0x7b59ce88 ata_std_qc_defer vmlinux EXPORT_SYMBOL_GPL +0xde01f9e2 drm_gem_create_mmap_offset vmlinux EXPORT_SYMBOL +0x21f1399c drm_helper_force_disable_all vmlinux EXPORT_SYMBOL +0x7071a4f2 cmdline_parts_free vmlinux EXPORT_SYMBOL +0xda872864 security_locked_down vmlinux EXPORT_SYMBOL +0xa7eafbdc jbd2_journal_free_reserved vmlinux EXPORT_SYMBOL +0x466e5342 net_prio_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xc270ba4e irq_domain_free_irqs_parent vmlinux EXPORT_SYMBOL_GPL +0x58e3306d bit_wait_io vmlinux EXPORT_SYMBOL +0x69098d9b nfc_proto_register net/nfc/nfc EXPORT_SYMBOL +0x6c36e95b p9_req_put net/9p/9pnet EXPORT_SYMBOL +0x374b4cff cdrom_release drivers/cdrom/cdrom EXPORT_SYMBOL +0x42ffc5b0 mlx4_register_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd7ef12db fcoe_clean_pending_queue drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x2777dbe7 _fc_frame_alloc drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xa2a76008 cpufreq_unregister_governor vmlinux EXPORT_SYMBOL_GPL +0x1409161d pm_clk_destroy vmlinux EXPORT_SYMBOL_GPL +0xb818fac8 drm_gem_cma_create vmlinux EXPORT_SYMBOL_GPL +0xb1c9c5c4 regulator_suspend_enable vmlinux EXPORT_SYMBOL_GPL +0x7f2db89d pci_iomap_wc_range vmlinux EXPORT_SYMBOL_GPL +0x4171ce05 iomap_readpages vmlinux EXPORT_SYMBOL_GPL +0x99c5abf1 d_obtain_alias vmlinux EXPORT_SYMBOL +0x839d7d57 mmu_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0x6a6fb035 irq_domain_set_info vmlinux EXPORT_SYMBOL +0x1927513b nf_conntrack_helper_try_module_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x4f154c54 ip6_datagram_connect vmlinux EXPORT_SYMBOL_GPL +0x437eb1df ipv6_mod_enabled vmlinux EXPORT_SYMBOL_GPL +0xe6cea4a7 ip4_datagram_connect vmlinux EXPORT_SYMBOL +0x72288126 tcp_timewait_state_process vmlinux EXPORT_SYMBOL +0x968ef361 __tcp_send_ack vmlinux EXPORT_SYMBOL_GPL +0xd76fe6dc devlink_resources_unregister vmlinux EXPORT_SYMBOL_GPL +0x1d3047d3 stmmac_suspend vmlinux EXPORT_SYMBOL_GPL +0x0acaa6d4 ata_pci_sff_init_one vmlinux EXPORT_SYMBOL_GPL +0xca537efc pnp_start_dev vmlinux EXPORT_SYMBOL +0x99d8939b security_inode_init_security vmlinux EXPORT_SYMBOL +0x6a460dc5 schedule_hrtimeout vmlinux EXPORT_SYMBOL_GPL +0x635ff76d LZ4_saveDict lib/lz4/lz4_compress EXPORT_SYMBOL +0x61a557d7 parport_wait_event drivers/parport/parport EXPORT_SYMBOL +0x6dc35b25 radix_tree_iter_delete vmlinux EXPORT_SYMBOL +0x916f7e1a flow_rule_match_enc_opts vmlinux EXPORT_SYMBOL +0x99384fc7 starget_for_each_device vmlinux EXPORT_SYMBOL +0x25bc0aa4 subsys_interface_register vmlinux EXPORT_SYMBOL_GPL +0xf3916987 global_cursor_default vmlinux EXPORT_SYMBOL +0xb6e6d99d clk_disable vmlinux EXPORT_SYMBOL_GPL +0x62e087cf nobh_writepage vmlinux EXPORT_SYMBOL +0xfedcdb60 seq_hlist_next_percpu vmlinux EXPORT_SYMBOL +0xa2eb9dfe clear_page_dirty_for_io vmlinux EXPORT_SYMBOL +0x9a1fc4b4 jiffies_to_timeval vmlinux EXPORT_SYMBOL +0xcd8b21ed srcu_init_notifier_head vmlinux EXPORT_SYMBOL_GPL +0x5d112304 __memcpy_fromio vmlinux EXPORT_SYMBOL +0x89125c64 xdr_commit_encode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x038a1a03 snd_rawmidi_kernel_write sound/core/snd-rawmidi EXPORT_SYMBOL +0x3fa9d639 mlxsw_core_schedule_dw drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x90e15eb6 mlx5_query_nic_vport_qkey_viol_cntr drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc1e56a80 s3fwrn5_recv_frame drivers/nfc/s3fwrn5/s3fwrn5 EXPORT_SYMBOL +0xaae9ad30 locks_start_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0xf5397921 wimax_dev_rm vmlinux EXPORT_SYMBOL_GPL +0x2b4509dd devlink_health_reporter_state_update vmlinux EXPORT_SYMBOL_GPL +0x07b7090f of_phy_register_fixed_link vmlinux EXPORT_SYMBOL +0x1ca99c9f ahci_platform_resume vmlinux EXPORT_SYMBOL_GPL +0xccafb0a8 devm_free_pages vmlinux EXPORT_SYMBOL_GPL +0x3a2c7e8e drm_syncobj_add_point vmlinux EXPORT_SYMBOL +0xfdbeedc7 tty_schedule_flip vmlinux EXPORT_SYMBOL +0x016aeb0d dump_skip vmlinux EXPORT_SYMBOL +0x22a52ab9 tracepoint_probe_unregister vmlinux EXPORT_SYMBOL_GPL +0x8da7ff9a nci_core_cmd net/nfc/nci/nci EXPORT_SYMBOL +0x14b18ccb svc_create_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x32b10fed ib_port_unregister_module_stat drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xffecc1f6 closure_sub drivers/md/bcache/bcache EXPORT_SYMBOL +0xb8898363 pnfs_write_done_resend_to_mds fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x5062af22 nfs_server_copy_userdata fs/nfs/nfs EXPORT_SYMBOL_GPL +0x8e21c9a1 dma_fence_add_callback vmlinux EXPORT_SYMBOL +0x8d8400ca fwnode_property_read_string_array vmlinux EXPORT_SYMBOL_GPL +0x955bc291 drm_gem_vram_fill_create_dumb vmlinux EXPORT_SYMBOL +0x734f750e tty_devnum vmlinux EXPORT_SYMBOL +0x87d840e8 pcie_flr vmlinux EXPORT_SYMBOL_GPL +0xc3c4c6cc hash_algo_name vmlinux EXPORT_SYMBOL_GPL +0x1e262ef8 __vfs_removexattr vmlinux EXPORT_SYMBOL +0x547aac4c ib_post_send_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6f5f303d mlx4_get_slave_port_state drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x01c4e200 page_pool_destroy vmlinux EXPORT_SYMBOL +0x8566c181 usb_serial_generic_tiocmiwait vmlinux EXPORT_SYMBOL_GPL +0x543ef733 genphy_c45_config_aneg vmlinux EXPORT_SYMBOL_GPL +0x4a1d81a3 dax_driver_unregister vmlinux EXPORT_SYMBOL_GPL +0x4c85c171 iommu_group_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0x32e6f1a0 acpi_video_backlight_string vmlinux EXPORT_SYMBOL +0x76eeeb0f sha384_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x946dd559 sha224_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x423b127c write_dirty_buffer vmlinux EXPORT_SYMBOL +0x2dad1ee6 noop_llseek vmlinux EXPORT_SYMBOL +0xc65d3eed ring_buffer_entries_cpu vmlinux EXPORT_SYMBOL_GPL +0xe9aeb042 nfc_proto_unregister net/nfc/nfc EXPORT_SYMBOL +0x4af04e71 llc_sap_find net/llc/llc EXPORT_SYMBOL +0xa97463c9 __siphash_aligned vmlinux EXPORT_SYMBOL +0x34590b05 __udp_enqueue_schedule_skb vmlinux EXPORT_SYMBOL_GPL +0xd4ad07eb cpufreq_driver_fast_switch vmlinux EXPORT_SYMBOL_GPL +0x6b498da1 input_free_device vmlinux EXPORT_SYMBOL +0x6ffb3b2d phy_remove_link_mode vmlinux EXPORT_SYMBOL +0x7048283f ata_host_init vmlinux EXPORT_SYMBOL_GPL +0x7d251dfa drm_gem_vram_bo_driver_evict_flags vmlinux EXPORT_SYMBOL +0x21ac8b77 iommu_group_get_by_id vmlinux EXPORT_SYMBOL_GPL +0x91bb3558 regulator_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x6a9ef117 skcipher_alloc_instance_simple vmlinux EXPORT_SYMBOL_GPL +0x0cc9116c ceph_reset_client_addr net/ceph/libceph EXPORT_SYMBOL +0x1e256777 alloc_ltalkdev net/appletalk/appletalk EXPORT_SYMBOL +0x390383d3 pppox_unbind_sock drivers/net/ppp/pppox EXPORT_SYMBOL +0xdbd73520 mlx5_query_nic_vport_mac_list drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x13f2928b hinic_func_tmr_bitmap_set drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x75de98b8 hinic_unregister_mgmt_msg_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xbbec2c5e st_nci_se_io drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0x7f7f12bb async_trigger_callback crypto/async_tx/async_tx EXPORT_SYMBOL_GPL +0x575aadfd ip6_route_output_flags vmlinux EXPORT_SYMBOL_GPL +0x3e9110fa __hw_addr_unsync vmlinux EXPORT_SYMBOL +0x3a9e1a1c of_platform_default_populate vmlinux EXPORT_SYMBOL_GPL +0x0416986c usb_hid_driver vmlinux EXPORT_SYMBOL_GPL +0x7deff673 dm_consume_args vmlinux EXPORT_SYMBOL +0x06652a7d i2c_verify_adapter vmlinux EXPORT_SYMBOL +0x7eac9cda drm_atomic_helper_setup_commit vmlinux EXPORT_SYMBOL +0xbe9a83d5 dw_pcie_write vmlinux EXPORT_SYMBOL_GPL +0x919c58f3 __clzsi2 vmlinux EXPORT_SYMBOL +0xc4777aa9 __ctzsi2 vmlinux EXPORT_SYMBOL +0xb8a30a28 security_kernel_read_file vmlinux EXPORT_SYMBOL_GPL +0x15ba50a6 jiffies vmlinux EXPORT_SYMBOL +0x6468b0ae kvm_vcpu_write_guest_page vmlinux EXPORT_SYMBOL_GPL +0x9cab5651 do_map_probe drivers/mtd/chips/chipreg EXPORT_SYMBOL +0x76f40744 ocfs2_dlm_lvb fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xf999267e pnfs_generic_commit_release fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xce2c1311 nfs_get_lock_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x20edb7ac tcp_v6_syn_recv_sock vmlinux EXPORT_SYMBOL +0x84cc5323 dev_queue_xmit vmlinux EXPORT_SYMBOL +0xd472d996 of_device_alloc vmlinux EXPORT_SYMBOL +0xc96c9a3f devm_device_remove_group vmlinux EXPORT_SYMBOL_GPL +0x0f8d79ff drm_dp_mst_allocate_vcpi vmlinux EXPORT_SYMBOL +0x13d0adf7 __kfifo_out vmlinux EXPORT_SYMBOL +0xe06141e9 security_sk_clone vmlinux EXPORT_SYMBOL +0x9649a4bf proc_mkdir vmlinux EXPORT_SYMBOL +0x3c0da71b bd_finish_claiming vmlinux EXPORT_SYMBOL +0x0db1236e dummy_irq_chip vmlinux EXPORT_SYMBOL_GPL +0x4eac5fc1 cpu_mitigations_auto_nosmt vmlinux EXPORT_SYMBOL_GPL +0x8f678b07 __stack_chk_guard vmlinux EXPORT_SYMBOL +0x21d9ff34 rdma_iw_cm_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x1ad20414 parport_find_base drivers/parport/parport EXPORT_SYMBOL +0x089a5324 mlx5_fpga_mem_read drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x1364d63e mlx5_query_hca_vport_context drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xd94a3fb2 mlx4_flow_steer_promisc_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x67be703a hinic_set_root_ctxt drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x7e92994b fcoe_wwn_from_mac drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0xd4c9681a __serpent_setkey crypto/serpent_generic EXPORT_SYMBOL_GPL +0x90688bcd devlink_info_driver_name_put vmlinux EXPORT_SYMBOL_GPL +0xe37ec3a4 usb_hcd_start_port_resume vmlinux EXPORT_SYMBOL_GPL +0x8b0f9009 blk_integrity_merge_bio vmlinux EXPORT_SYMBOL +0x25f75d49 blk_rq_map_sg vmlinux EXPORT_SYMBOL +0x00f4f268 get_cached_acl_rcu vmlinux EXPORT_SYMBOL +0x12178cae bd_start_claiming vmlinux EXPORT_SYMBOL +0xaa702dfd get_mem_cgroup_from_mm vmlinux EXPORT_SYMBOL +0x54955855 alarm_start_relative vmlinux EXPORT_SYMBOL_GPL +0xdf6b082f proc_dointvec_jiffies vmlinux EXPORT_SYMBOL +0x8b595dd7 nci_spi_read net/nfc/nci/nci_spi EXPORT_SYMBOL_GPL +0x5d9792cb hinic_global_func_id drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x5205f7ed __tracepoint_pnfs_mds_fallback_write_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb3d8fd38 tcp_peek_len vmlinux EXPORT_SYMBOL +0x385de89f devlink_traps_register vmlinux EXPORT_SYMBOL_GPL +0x2dd923aa dev_change_carrier vmlinux EXPORT_SYMBOL +0x9ccf98f8 __napi_schedule vmlinux EXPORT_SYMBOL +0xd84c3c75 sk_stream_wait_memory vmlinux EXPORT_SYMBOL +0xeadaf8b9 hid_bus_type vmlinux EXPORT_SYMBOL +0x096092e4 usb_set_interface vmlinux EXPORT_SYMBOL_GPL +0xc2a17ebe seqno_fence_ops vmlinux EXPORT_SYMBOL +0x06b79e08 dma_resv_wait_timeout_rcu vmlinux EXPORT_SYMBOL_GPL +0x41558d74 tty_unlock vmlinux EXPORT_SYMBOL +0xeff608e0 kstrtos16_from_user vmlinux EXPORT_SYMBOL +0xa084749a __bitmap_or vmlinux EXPORT_SYMBOL +0x3df90aae security_req_classify_flow vmlinux EXPORT_SYMBOL +0xad7b7398 get_dcookie vmlinux EXPORT_SYMBOL_GPL +0x45fd812f trace_event_raw_init vmlinux EXPORT_SYMBOL_GPL +0x921dc520 relay_late_setup_files vmlinux EXPORT_SYMBOL_GPL +0xcf02c707 nf_ct_kill_acct net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x8d720bfb __fscache_alloc_page fs/fscache/fscache EXPORT_SYMBOL +0x98cf60b3 strlen vmlinux EXPORT_SYMBOL +0xc29bf967 strspn vmlinux EXPORT_SYMBOL +0x79feb026 netlink_broadcast_filtered vmlinux EXPORT_SYMBOL +0x4761f17c register_netevent_notifier vmlinux EXPORT_SYMBOL_GPL +0xc8ffe164 of_device_uevent_modalias vmlinux EXPORT_SYMBOL_GPL +0xf6418f34 sata_lpm_ignore_phy_events vmlinux EXPORT_SYMBOL_GPL +0xc16410b9 ZSTD_getDictID_fromDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0xf1f3182c iscsit_handle_task_mgt_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xdb68bbad rfkill_destroy vmlinux EXPORT_SYMBOL +0x021c5c26 drm_vma_offset_add vmlinux EXPORT_SYMBOL +0xfc37df11 regulator_set_pull_down_regmap vmlinux EXPORT_SYMBOL_GPL +0xedbaee5e nla_strcmp vmlinux EXPORT_SYMBOL +0xc0ff12fb nla_strdup vmlinux EXPORT_SYMBOL +0x2464da17 gen_pool_size vmlinux EXPORT_SYMBOL_GPL +0xfeebc7c4 __kfifo_from_user_r vmlinux EXPORT_SYMBOL +0xcc445ceb __sg_page_iter_dma_next vmlinux EXPORT_SYMBOL +0x31ced4e0 crypto_ahash_type vmlinux EXPORT_SYMBOL_GPL +0x85bdffdd dccp_hashinfo net/dccp/dccp EXPORT_SYMBOL_GPL +0xa696d2fd svc_return_autherr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x531468bd parport_announce_port drivers/parport/parport EXPORT_SYMBOL +0xf7831122 mlx5_query_nic_vport_system_image_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x87a45267 mlx5_core_detach_mcg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x02d3d0b9 fc_seq_assign drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xa4834547 tcp_get_cookie_sock vmlinux EXPORT_SYMBOL +0x0f09a034 fib_add_nexthop vmlinux EXPORT_SYMBOL_GPL +0x4a319722 __skb_flow_dissect vmlinux EXPORT_SYMBOL +0x596ebc7e skb_trim vmlinux EXPORT_SYMBOL +0x2c04828a usb_enable_autosuspend vmlinux EXPORT_SYMBOL_GPL +0x72c17b77 drm_simple_display_pipe_init vmlinux EXPORT_SYMBOL +0x403f9529 gpio_request_one vmlinux EXPORT_SYMBOL_GPL +0x55940256 blk_mq_unfreeze_queue vmlinux EXPORT_SYMBOL_GPL +0xcc3c2d1e perf_tp_event vmlinux EXPORT_SYMBOL_GPL +0x2326717e gfn_to_pfn_memslot vmlinux EXPORT_SYMBOL_GPL +0x4a2e1dae gfn_to_hva_memslot vmlinux EXPORT_SYMBOL_GPL +0x53f79f4e svc_proc_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2b830871 fdp_nci_remove drivers/nfc/fdp/fdp EXPORT_SYMBOL +0x4e20bcf8 radix_tree_tag_set vmlinux EXPORT_SYMBOL +0xac226a12 xfrm_policy_delete vmlinux EXPORT_SYMBOL +0x870d13db inet_frags_init vmlinux EXPORT_SYMBOL +0x271bbdb6 power_supply_changed vmlinux EXPORT_SYMBOL_GPL +0x1467989c i2c_get_device_id vmlinux EXPORT_SYMBOL_GPL +0x066bba4c ata_qc_get_active vmlinux EXPORT_SYMBOL_GPL +0x63a6da27 pci_scan_root_bus_bridge vmlinux EXPORT_SYMBOL +0x4e3567f7 match_int vmlinux EXPORT_SYMBOL +0xca7a4180 blk_queue_required_elevator_features vmlinux EXPORT_SYMBOL_GPL +0x27859a7c shash_free_instance vmlinux EXPORT_SYMBOL_GPL +0xde78a496 debugfs_create_x64 vmlinux EXPORT_SYMBOL_GPL +0x2e89f33f fscrypt_ioctl_remove_key vmlinux EXPORT_SYMBOL_GPL +0xe90f285f nfc_hci_free_device net/nfc/hci/hci EXPORT_SYMBOL +0x8dc1715d snd_pcm_kernel_ioctl sound/core/snd-pcm EXPORT_SYMBOL +0xe2fefeb9 rdma_put_gid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5475ba9e dm_block_location drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xfe107303 tcf_idrinfo_destroy vmlinux EXPORT_SYMBOL +0xc70faf2c skb_cow_data vmlinux EXPORT_SYMBOL_GPL +0xba2fa337 scsi_internal_device_block_nowait vmlinux EXPORT_SYMBOL_GPL +0x8e4b63a6 hisi_clk_register_gate_sep vmlinux EXPORT_SYMBOL_GPL +0xc008e229 pci_fixup_cardbus vmlinux EXPORT_SYMBOL +0xaf986f08 encrypt_blob vmlinux EXPORT_SYMBOL_GPL +0x331390be posix_acl_update_mode vmlinux EXPORT_SYMBOL +0xcade6d41 __tracepoint_pelt_cfs_tp vmlinux EXPORT_SYMBOL_GPL +0x382aeb28 nf_conncount_count net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x525c3818 snd_card_free_when_closed sound/core/snd EXPORT_SYMBOL +0x286e239b rdma_connect drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xc24d0e96 mtd_erase drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xd4bddf5c dm_bufio_issue_flush drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x0c0a5ebb hinic_dbg_lt_rd_16byte drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x868acba5 get_options vmlinux EXPORT_SYMBOL +0x318511a4 ip6_datagram_release_cb vmlinux EXPORT_SYMBOL_GPL +0xca6db0ac ip4_datagram_release_cb vmlinux EXPORT_SYMBOL_GPL +0xb3d34237 i2c_get_adapter vmlinux EXPORT_SYMBOL +0xd13f9985 drm_edid_block_valid vmlinux EXPORT_SYMBOL +0x5e71d44b timespec64_trunc vmlinux EXPORT_SYMBOL +0x236357d3 vfs_iter_write vmlinux EXPORT_SYMBOL +0xf5e7ea40 ktime_get_coarse_ts64 vmlinux EXPORT_SYMBOL +0xe5094cd2 setup_irq vmlinux EXPORT_SYMBOL_GPL +0x769c985f udp_sock_create6 net/ipv6/ip6_udp_tunnel EXPORT_SYMBOL_GPL +0xabd7cd1e nft_unregister_expr net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x79ed4d6d fib_new_table vmlinux EXPORT_SYMBOL_GPL +0xe30602e8 xdp_do_redirect vmlinux EXPORT_SYMBOL_GPL +0xea6a29fe input_unregister_handler vmlinux EXPORT_SYMBOL +0xfd9c411c drm_fb_helper_modinit vmlinux EXPORT_SYMBOL +0x6b1e8e2c drm_fb_helper_sys_copyarea vmlinux EXPORT_SYMBOL +0xf137c8fb acpi_get_pci_dev vmlinux EXPORT_SYMBOL_GPL +0x3955fcf6 __kfifo_in_r vmlinux EXPORT_SYMBOL +0x8a8485b5 iomap_dio_iopoll vmlinux EXPORT_SYMBOL_GPL +0xccb45b95 bh_uptodate_or_lock vmlinux EXPORT_SYMBOL +0xaf94b083 mem_cgroup_from_task vmlinux EXPORT_SYMBOL +0x559f3046 spc_emulate_evpd_83 drivers/target/target_core_mod EXPORT_SYMBOL +0x77f51c64 xfrm_state_walk_init vmlinux EXPORT_SYMBOL +0x014a03a9 udp4_hwcsum vmlinux EXPORT_SYMBOL_GPL +0x4f81b817 __tracepoint_br_fdb_add vmlinux EXPORT_SYMBOL_GPL +0xca6372c6 of_graph_get_port_by_id vmlinux EXPORT_SYMBOL +0x8f0da365 rc_register_device vmlinux EXPORT_SYMBOL_GPL +0xed038d0d rtc_read_alarm vmlinux EXPORT_SYMBOL_GPL +0x0b0b6fff device_match_of_node vmlinux EXPORT_SYMBOL_GPL +0xb768aed1 __drm_atomic_helper_crtc_destroy_state vmlinux EXPORT_SYMBOL +0x48a91171 string_get_size vmlinux EXPORT_SYMBOL +0x6709a879 security_path_mknod vmlinux EXPORT_SYMBOL +0x2eec7d9d fat_fill_super vmlinux EXPORT_SYMBOL_GPL +0x5e41139b dquot_initialize vmlinux EXPORT_SYMBOL +0xecc6821b vmf_insert_mixed vmlinux EXPORT_SYMBOL +0xde57b5f5 nft_parse_u32_check net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x92f9df0b nf_log_l2packet net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0xf2d5d5eb snd_pci_quirk_lookup sound/core/snd EXPORT_SYMBOL +0xf536d04d vhost_init_device_iotlb drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3acf5cec of_can_transceiver drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xf808ce49 __sock_cmsg_send vmlinux EXPORT_SYMBOL +0x3955bdaa i2c_dw_read_comp_param vmlinux EXPORT_SYMBOL_GPL +0x37dd37d2 of_mdio_find_bus vmlinux EXPORT_SYMBOL +0xb43146d8 drm_vram_mm_cleanup vmlinux EXPORT_SYMBOL +0xe50f53bb pci_bus_write_config_word vmlinux EXPORT_SYMBOL +0x3a2f6702 sg_alloc_table vmlinux EXPORT_SYMBOL +0xe7373223 set_get_execve_info_func vmlinux EXPORT_SYMBOL +0xca2ab29c handle_simple_irq vmlinux EXPORT_SYMBOL_GPL +0x286cc647 async_synchronize_cookie_domain vmlinux EXPORT_SYMBOL_GPL +0xba7ceeb7 call_usermodehelper_exec vmlinux EXPORT_SYMBOL +0xfbf17fde ceph_open_session net/ceph/libceph EXPORT_SYMBOL +0x49587fad br_vlan_get_pvid net/bridge/bridge EXPORT_SYMBOL_GPL +0x3825c655 snd_info_free_entry sound/core/snd EXPORT_SYMBOL +0x199f70fe transport_send_check_condition_and_sense drivers/target/target_core_mod EXPORT_SYMBOL +0x4c6bc93c fc_set_mfs drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xe6e198dd fc_disc_init drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xb455924d sha256_block_data_order arch/arm64/crypto/sha256-arm64 EXPORT_SYMBOL +0x26a84c58 power_supply_batinfo_ocv2cap vmlinux EXPORT_SYMBOL_GPL +0x56ba6d3a drm_gem_prime_fd_to_handle vmlinux EXPORT_SYMBOL +0x84b61fdf drm_scdc_write vmlinux EXPORT_SYMBOL +0x149c93f9 pci_release_region vmlinux EXPORT_SYMBOL +0x371583d4 io_uring_get_socket vmlinux EXPORT_SYMBOL +0x141f38bf ktime_get_raw_fast_ns vmlinux EXPORT_SYMBOL_GPL +0xbcac6160 pm_qos_remove_notifier vmlinux EXPORT_SYMBOL_GPL +0xd1ad0786 snd_pcm_hw_constraint_ranges sound/core/snd-pcm EXPORT_SYMBOL +0x52a9cfee vfio_platform_probe_common drivers/vfio/platform/vfio-platform-base EXPORT_SYMBOL_GPL +0xc414905b fc_elsct_init drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xe4f4665b ipmi_validate_addr drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x74530ecd fscache_op_debug_id fs/fscache/fscache EXPORT_SYMBOL +0x65252744 inet_addr_type_table vmlinux EXPORT_SYMBOL +0x4b38910e bpf_redirect_info vmlinux EXPORT_SYMBOL_GPL +0x82ad3cb8 power_supply_unregister vmlinux EXPORT_SYMBOL_GPL +0xa45d49ed genphy_c45_read_status vmlinux EXPORT_SYMBOL_GPL +0x12d280f8 ata_host_put vmlinux EXPORT_SYMBOL_GPL +0x8eee3399 dax_read_unlock vmlinux EXPORT_SYMBOL_GPL +0xcbace93a drm_release vmlinux EXPORT_SYMBOL +0xc569d8ce __clk_get_name vmlinux EXPORT_SYMBOL_GPL +0x486075c8 gen_pool_dma_alloc vmlinux EXPORT_SYMBOL +0xff1e9dd8 seq_list_start vmlinux EXPORT_SYMBOL +0xf8fe094f irq_find_matching_fwspec vmlinux EXPORT_SYMBOL_GPL +0x50d3c229 kthread_queue_work vmlinux EXPORT_SYMBOL_GPL +0x0049ca83 xfrm_aead_get_byname net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x6d47a74a register_ip_vs_scheduler net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xc6b23eef nf_ct_port_nlattr_to_tuple net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xcc0ce39a snd_pcm_set_ops sound/core/snd-pcm EXPORT_SYMBOL +0x907df803 rdma_event_msg drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x3a2ee17b mlx5_del_flow_rules drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa9d66ab5 km_state_notify vmlinux EXPORT_SYMBOL +0xda624ec3 lwtunnel_encap_del_ops vmlinux EXPORT_SYMBOL_GPL +0x58d13ea7 cpuidle_enable_device vmlinux EXPORT_SYMBOL_GPL +0x8d2600bc dma_run_dependencies vmlinux EXPORT_SYMBOL_GPL +0xd32694be sbitmap_prepare_to_wait vmlinux EXPORT_SYMBOL_GPL +0x7cb803de btree_grim_visitor vmlinux EXPORT_SYMBOL_GPL +0x401d1699 bpf_prog_add vmlinux EXPORT_SYMBOL_GPL +0xc071b3c5 trace_seq_putmem vmlinux EXPORT_SYMBOL_GPL +0x51bd55b5 completion_done vmlinux EXPORT_SYMBOL +0x5ddc0029 mptscsih_dev_reset drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x437d11e9 mpt_detach drivers/message/fusion/mptbase EXPORT_SYMBOL +0x25039d2a mlx5_eq_enable drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4a849a47 mlx4_alloc_cmd_mailbox drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe1373f53 hinic_mpf_idx drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x88bb14ef pnfs_set_layoutcommit fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb83ca541 nfs_setsecurity fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb94339c4 qdisc_put_stab vmlinux EXPORT_SYMBOL +0x567bdf97 devm_led_trigger_register vmlinux EXPORT_SYMBOL_GPL +0xdcc1cb45 drm_connector_cleanup vmlinux EXPORT_SYMBOL +0xce4cdb8e fb_find_best_mode vmlinux EXPORT_SYMBOL +0xae3520e8 seq_printf vmlinux EXPORT_SYMBOL +0xf17fba2e ax25_linkfail_release net/ax25/ax25 EXPORT_SYMBOL +0xc7406f29 inet_diag_msg_common_fill net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0xbe548922 mptscsih_raid_id_to_num drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x47fd6eee mlxsw_core_fw_flash_end drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x97623558 xas_create_range vmlinux EXPORT_SYMBOL_GPL +0x0b742fd7 simple_strtol vmlinux EXPORT_SYMBOL +0x84037559 devres_release vmlinux EXPORT_SYMBOL_GPL +0x9b36ec1d drm_mode_validate_size vmlinux EXPORT_SYMBOL +0xcc8b950b acpi_device_set_power vmlinux EXPORT_SYMBOL +0x0a4af942 pci_dev_driver vmlinux EXPORT_SYMBOL +0xbd66e658 setattr_prepare vmlinux EXPORT_SYMBOL +0x095632c6 ilookup5_nowait vmlinux EXPORT_SYMBOL +0xc3eae2c7 irq_gc_mask_clr_bit vmlinux EXPORT_SYMBOL_GPL +0xbe2058c1 irq_gc_mask_set_bit vmlinux EXPORT_SYMBOL_GPL +0x6aa95b75 gretap_fb_dev_create net/ipv4/ip_gre EXPORT_SYMBOL_GPL +0x672ffbe8 ib_send_cm_lap drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x93326b89 mlx5_query_hca_vport_gid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf06806d2 nxp_nci_fw_recv_frame drivers/nfc/nxp-nci/nxp-nci EXPORT_SYMBOL +0x7c9722ba nfs_dreq_bytes_left fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa8e9e1ae send_implementation_id fs/nfs/nfs EXPORT_SYMBOL_GPL +0xded39a6b gen_kill_estimator vmlinux EXPORT_SYMBOL +0x40b4cda8 devm_add_action vmlinux EXPORT_SYMBOL_GPL +0x7b982195 drm_mm_init vmlinux EXPORT_SYMBOL +0x6db49dfc fbcon_set_tileops vmlinux EXPORT_SYMBOL +0xc7554cfc pci_set_power_state vmlinux EXPORT_SYMBOL +0x7f9d7ba6 blk_queue_alignment_offset vmlinux EXPORT_SYMBOL +0x1875eede fuse_dev_operations vmlinux EXPORT_SYMBOL_GPL +0xc4658f64 dquot_alloc vmlinux EXPORT_SYMBOL +0x9853c0db wait_on_page_writeback vmlinux EXPORT_SYMBOL_GPL +0x4adf0c4c attach_capi_ctr drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xa2365f44 btracker_issue drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xd366e0d6 fc_lport_logo_resp drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xa7e3ba1c kpatch_unregister kernel/tkernel/kpatch/kpatch EXPORT_SYMBOL +0xb41c2b3f devlink_unregister vmlinux EXPORT_SYMBOL_GPL +0xdd849d51 scsi_get_sense_info_fld vmlinux EXPORT_SYMBOL +0x99b74309 mipi_dsi_shutdown_peripheral vmlinux EXPORT_SYMBOL +0x4f55166f acpi_set_current_resources vmlinux EXPORT_SYMBOL +0xdd9ed4de pci_bus_size_bridges vmlinux EXPORT_SYMBOL +0x32f8c0dc blk_mq_sched_free_hctx_data vmlinux EXPORT_SYMBOL_GPL +0x1c10d295 sysfs_create_groups vmlinux EXPORT_SYMBOL_GPL +0x7d2e6074 vfs_setpos vmlinux EXPORT_SYMBOL +0xe5c4cf93 vm_node_stat vmlinux EXPORT_SYMBOL +0xa0fbac79 wake_up_bit vmlinux EXPORT_SYMBOL +0x53d27c9c snd_pcm_lib_malloc_pages sound/core/snd-pcm EXPORT_SYMBOL +0x9f77e649 transport_alloc_session drivers/target/target_core_mod EXPORT_SYMBOL +0x097245e5 mlx5_modify_nic_vport_mac_address drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x25298364 drm_mode_create_content_type_property vmlinux EXPORT_SYMBOL +0xb62d7497 __hvc_resize vmlinux EXPORT_SYMBOL_GPL +0xb8e84258 tty_do_resize vmlinux EXPORT_SYMBOL +0x6aeefac4 zlib_deflateReset vmlinux EXPORT_SYMBOL +0x4584c152 fscrypt_put_encryption_info vmlinux EXPORT_SYMBOL +0xef055323 fscrypt_get_encryption_info vmlinux EXPORT_SYMBOL +0xcdca3691 nr_irqs vmlinux EXPORT_SYMBOL_GPL +0x51e77c97 pfn_valid vmlinux EXPORT_SYMBOL +0x64f75637 osd_req_op_extent_osd_data_bvec_pos net/ceph/libceph EXPORT_SYMBOL +0x6fecbd43 rds_connect_path_complete net/rds/rds EXPORT_SYMBOL_GPL +0x532a6885 ib_cancel_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd7f2d992 nfs_post_op_update_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdaf5c16e __cookie_v4_check vmlinux EXPORT_SYMBOL_GPL +0x94780084 xt_match_to_user vmlinux EXPORT_SYMBOL_GPL +0x56c8799d scsi_kunmap_atomic_sg vmlinux EXPORT_SYMBOL +0x6c015bad scsi_block_when_processing_errors vmlinux EXPORT_SYMBOL +0x871ab41a drm_rect_intersect vmlinux EXPORT_SYMBOL +0xecd1f850 wait_for_key_construction vmlinux EXPORT_SYMBOL +0x9b7c498c jbd2_journal_set_triggers vmlinux EXPORT_SYMBOL +0x120178a3 f_setown vmlinux EXPORT_SYMBOL +0x1a1bac9c ZSTD_decompressDCtx lib/zstd/zstd_decompress EXPORT_SYMBOL +0x30c3463f wpan_phy_new net/ieee802154/ieee802154 EXPORT_SYMBOL +0x05e807a9 xdr_encode_string net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x42801d20 ubi_sync drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x1769daa7 mlx4_sync_pkey_table drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x02a97220 fc_linkdown drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xb2115d1a ip6_dst_hoplimit vmlinux EXPORT_SYMBOL +0x49891ab8 nvmem_device_read vmlinux EXPORT_SYMBOL_GPL +0xae6b0da5 usb_clear_halt vmlinux EXPORT_SYMBOL_GPL +0x18e4f8aa swphy_read_reg vmlinux EXPORT_SYMBOL_GPL +0xa0736220 ttm_check_under_lowerlimit vmlinux EXPORT_SYMBOL +0xe22f6bd0 iommu_get_domain_for_dev vmlinux EXPORT_SYMBOL_GPL +0x6b26bb89 amba_device_unregister vmlinux EXPORT_SYMBOL +0xd643239a acpi_leave_sleep_state vmlinux EXPORT_SYMBOL +0xb12b67ac elv_unregister vmlinux EXPORT_SYMBOL_GPL +0x7c57421b x509_cert_parse vmlinux EXPORT_SYMBOL_GPL +0xadc044b7 vfio_set_irqs_validate_and_prepare drivers/vfio/vfio EXPORT_SYMBOL +0x13964ffa ip6_datagram_connect_v6_only vmlinux EXPORT_SYMBOL_GPL +0x5fbbdb33 flow_rule_match_vlan vmlinux EXPORT_SYMBOL +0x6e4c9ccc dev_mc_sync_multiple vmlinux EXPORT_SYMBOL +0xc9dd767f dev_uc_sync_multiple vmlinux EXPORT_SYMBOL +0x98366b5f of_pci_dma_range_parser_init vmlinux EXPORT_SYMBOL_GPL +0x05a0c61c free_iova_fast vmlinux EXPORT_SYMBOL_GPL +0x993938f9 devm_acpi_dma_controller_register vmlinux EXPORT_SYMBOL_GPL +0x14a39346 devm_gpiod_get_array_optional vmlinux EXPORT_SYMBOL_GPL +0x3832be9a submit_bh vmlinux EXPORT_SYMBOL +0xb2a1a79a mnt_clone_write vmlinux EXPORT_SYMBOL_GPL +0x8f034750 page_mkclean vmlinux EXPORT_SYMBOL_GPL +0xb40c6376 cpuset_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xccf8ef1f unregister_8022_client net/802/p8022 EXPORT_SYMBOL +0x45fb8985 rdma_nl_chk_listeners drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd1989065 md_cluster_ops drivers/md/md-mod EXPORT_SYMBOL +0xf56c8ca1 fc_seq_set_resp drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xa5526619 rb_insert_color vmlinux EXPORT_SYMBOL +0xe5e88194 mr_mfc_find_any_parent vmlinux EXPORT_SYMBOL +0x4b1e1a2b passthru_features_check vmlinux EXPORT_SYMBOL +0x80909a0e skb_checksum_setup vmlinux EXPORT_SYMBOL +0x7e8d8619 usb_anchor_empty vmlinux EXPORT_SYMBOL_GPL +0x138e0957 dax_write_cache_enabled vmlinux EXPORT_SYMBOL_GPL +0xe0b1c103 clk_set_max_rate vmlinux EXPORT_SYMBOL_GPL +0x7bba3202 sys_fillrect vmlinux EXPORT_SYMBOL +0x1cb2c6d8 kvasprintf vmlinux EXPORT_SYMBOL +0xe26f70c5 skcipher_walk_aead_encrypt vmlinux EXPORT_SYMBOL_GPL +0x2081acd7 debugfs_write_file_bool vmlinux EXPORT_SYMBOL_GPL +0x510d4bb3 page_get_link vmlinux EXPORT_SYMBOL +0x755fbaa1 dma_direct_sync_single_for_cpu vmlinux EXPORT_SYMBOL +0x91164d59 kthread_flush_worker vmlinux EXPORT_SYMBOL_GPL +0x837b12c0 svcauth_unix_set_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x35eff5e0 nf_osf_fingers net/netfilter/nfnetlink_osf EXPORT_SYMBOL_GPL +0xde42f933 kpatch_shadow_alloc kernel/tkernel/kpatch/kpatch EXPORT_SYMBOL_GPL +0xffe243a4 __xfrm_init_state vmlinux EXPORT_SYMBOL +0x0a1c7271 xdp_rxq_info_reg vmlinux EXPORT_SYMBOL_GPL +0xbb1bb6e3 hid_dump_field vmlinux EXPORT_SYMBOL_GPL +0x76dc9f5b __scsi_init_queue vmlinux EXPORT_SYMBOL_GPL +0xae47fd8a ata_sff_dma_pause vmlinux EXPORT_SYMBOL_GPL +0x07646cee ata_tf_to_fis vmlinux EXPORT_SYMBOL_GPL +0x1f77739c drm_helper_probe_single_connector_modes vmlinux EXPORT_SYMBOL +0x5790e7a0 pci_unlock_rescan_remove vmlinux EXPORT_SYMBOL_GPL +0xf2405fe3 __module_text_address vmlinux EXPORT_SYMBOL_GPL +0x66d87d38 symbol_put_addr vmlinux EXPORT_SYMBOL_GPL +0xfbdfc558 hrtimer_start_range_ns vmlinux EXPORT_SYMBOL_GPL +0x38f2d94e ceph_file_to_extents net/ceph/libceph EXPORT_SYMBOL +0x1904093f ieee802154_free_hw net/mac802154/mac802154 EXPORT_SYMBOL +0x0dbe7ca7 llc_set_station_handler net/llc/llc EXPORT_SYMBOL +0x25323632 snd_ctl_add_vmaster_hook sound/core/snd EXPORT_SYMBOL_GPL +0x5eb3cf9a i2c_smbus_read_word_data vmlinux EXPORT_SYMBOL +0x3c62a682 device_for_each_child_reverse vmlinux EXPORT_SYMBOL_GPL +0xa5919950 drm_property_blob_get vmlinux EXPORT_SYMBOL +0x7e2bfdc9 drm_property_blob_put vmlinux EXPORT_SYMBOL +0xd54b52d8 drm_connector_init_with_ddc vmlinux EXPORT_SYMBOL +0x27596222 tty_port_unregister_device vmlinux EXPORT_SYMBOL_GPL +0x9a6ea1d2 tty_buffer_request_room vmlinux EXPORT_SYMBOL_GPL +0xac762b8b d_alloc vmlinux EXPORT_SYMBOL +0x883762f4 kmem_cache_alloc_node_trace vmlinux EXPORT_SYMBOL +0x9126dfe2 __alloc_pages_nodemask vmlinux EXPORT_SYMBOL +0x098a8587 unuse_mm vmlinux EXPORT_SYMBOL_GPL +0x511f28b3 fc_host_post_event drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xa2baf754 regmap_multi_reg_write_bypassed vmlinux EXPORT_SYMBOL_GPL +0xb66a72a9 ttm_mem_glob vmlinux EXPORT_SYMBOL +0xcb2340b8 drm_rect_debug_print vmlinux EXPORT_SYMBOL +0xe324a623 drm_simple_display_pipe_attach_bridge vmlinux EXPORT_SYMBOL +0x1a551022 ring_buffer_lock_reserve vmlinux EXPORT_SYMBOL_GPL +0x28f26684 irq_set_affinity_hint vmlinux EXPORT_SYMBOL_GPL +0x65b7acd5 finish_swait vmlinux EXPORT_SYMBOL +0x56bd5947 __tracepoint_bcache_gc_copy_collision drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x445afb43 fscache_object_lookup_negative fs/fscache/fscache EXPORT_SYMBOL +0x9d30d762 inet_sk_rx_dst_set vmlinux EXPORT_SYMBOL +0xf8b9c077 netdev_port_same_parent_id vmlinux EXPORT_SYMBOL +0xb7fe2f50 drm_cma_gem_create_object_default_funcs vmlinux EXPORT_SYMBOL +0xdfe9c884 of_clk_get_from_provider vmlinux EXPORT_SYMBOL_GPL +0x76d9b876 clk_set_rate vmlinux EXPORT_SYMBOL_GPL +0x556e4390 clk_get_rate vmlinux EXPORT_SYMBOL_GPL +0x231e83ac acpi_driver_match_device vmlinux EXPORT_SYMBOL_GPL +0xc219f6a1 gpiod_remove_lookup_table vmlinux EXPORT_SYMBOL_GPL +0x2f414e43 blk_execute_rq_nowait vmlinux EXPORT_SYMBOL_GPL +0x1cde80af blk_set_stacking_limits vmlinux EXPORT_SYMBOL +0x98923cc4 security_inode_mkdir vmlinux EXPORT_SYMBOL_GPL +0xf348ff41 bpf_stats_enabled_key vmlinux EXPORT_SYMBOL +0x3471ae52 nci_nfcee_mode_set net/nfc/nci/nci EXPORT_SYMBOL +0xb72c162e ceph_buffer_release net/ceph/libceph EXPORT_SYMBOL +0xee30a438 xdr_stream_decode_opaque net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd922ca8b usb_gstrings_attach drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xf388b18b ipmi_destroy_user drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x56ca6d7c nfs_access_add_cache fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9fc19106 tcf_chain_get_by_act vmlinux EXPORT_SYMBOL +0xf8dbac5f kernel_sendpage vmlinux EXPORT_SYMBOL +0x3b7ed52d ata_print_version vmlinux EXPORT_SYMBOL +0x9be33e98 pci_choose_state vmlinux EXPORT_SYMBOL +0xf13cfedf dquot_quotactl_sysfile_ops vmlinux EXPORT_SYMBOL +0x1646b709 delete_from_page_cache vmlinux EXPORT_SYMBOL +0xa9320d27 ktime_get_seconds vmlinux EXPORT_SYMBOL_GPL +0xa096b889 wait_for_completion_killable vmlinux EXPORT_SYMBOL +0x693c3961 nf_ct_helper_hash net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x06293d83 iscsi_complete_scsi_task drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x53f36614 ahci_platform_shutdown vmlinux EXPORT_SYMBOL_GPL +0xf0024ad2 uart_insert_char vmlinux EXPORT_SYMBOL_GPL +0x26519b9d logfc vmlinux EXPORT_SYMBOL +0x949f7342 __alloc_percpu vmlinux EXPORT_SYMBOL_GPL +0x624aa681 trace_print_hex_seq vmlinux EXPORT_SYMBOL +0x6ebe366f ktime_get_mono_fast_ns vmlinux EXPORT_SYMBOL_GPL +0x19f462ab kfree_call_rcu vmlinux EXPORT_SYMBOL_GPL +0xf1fb2184 __percpu_down_read vmlinux EXPORT_SYMBOL_GPL +0x58752a2e br_multicast_enabled net/bridge/bridge EXPORT_SYMBOL_GPL +0x93dcd07a ip_tunnel_encap_add_ops net/ipv4/ip_tunnel EXPORT_SYMBOL +0xaf83d245 rdma_leave_multicast drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x9f823278 register_capi_driver drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x77d83398 mlxsw_core_read_frc_l drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xd806a273 ocfs2_dlm_dump_lksb fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xbd8d896c __raw_v6_lookup vmlinux EXPORT_SYMBOL_GPL +0x63986f20 __raw_v4_lookup vmlinux EXPORT_SYMBOL_GPL +0xf698ebc7 compat_tcp_getsockopt vmlinux EXPORT_SYMBOL +0x81043d0f led_trigger_unregister vmlinux EXPORT_SYMBOL_GPL +0x8b209213 dm_device_name vmlinux EXPORT_SYMBOL_GPL +0x0004e6a4 ata_eh_qc_retry vmlinux EXPORT_SYMBOL_GPL +0x7a5f6fd3 drm_helper_hpd_irq_event vmlinux EXPORT_SYMBOL +0x7a56f3e9 tpm_get_timeouts vmlinux EXPORT_SYMBOL_GPL +0x5296fe8e pci_enable_pri vmlinux EXPORT_SYMBOL_GPL +0x550bf96c cryptd_alloc_skcipher vmlinux EXPORT_SYMBOL_GPL +0xc811de9c clear_inode vmlinux EXPORT_SYMBOL +0x052ab70b svc_close_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x84b9c78b target_show_dynamic_sessions drivers/target/target_core_mod EXPORT_SYMBOL +0x54a71430 __mlx4_cmd drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0b8b5af5 iscsi_eh_cmd_timed_out drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x99517682 udp_encap_enable vmlinux EXPORT_SYMBOL +0x9479a63e qdisc_hash_add vmlinux EXPORT_SYMBOL +0x00e7ee81 neigh_parms_release vmlinux EXPORT_SYMBOL +0x3db5ae6c led_set_brightness_nosleep vmlinux EXPORT_SYMBOL_GPL +0x7aef5b1e usb_hcd_link_urb_to_ep vmlinux EXPORT_SYMBOL_GPL +0x5f7ffc72 device_release_driver vmlinux EXPORT_SYMBOL_GPL +0x1c0863d3 acpi_initialize_hp_context vmlinux EXPORT_SYMBOL_GPL +0xb2af650c blk_lookup_devt vmlinux EXPORT_SYMBOL +0x7a87af12 crypto_grab_akcipher vmlinux EXPORT_SYMBOL_GPL +0x50c318b5 crypto_grab_skcipher vmlinux EXPORT_SYMBOL_GPL +0xb7872b80 security_path_rename vmlinux EXPORT_SYMBOL +0xe953b21f get_next_ino vmlinux EXPORT_SYMBOL +0xec519735 pm_vt_switch_required vmlinux EXPORT_SYMBOL +0x53c70595 queue_rcu_work vmlinux EXPORT_SYMBOL +0xc03f4efd ceph_osdc_get_request net/ceph/libceph EXPORT_SYMBOL +0xfffd89db copy_from_user_toio sound/core/snd EXPORT_SYMBOL +0x87280c3f iscsit_build_task_mgt_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x5875ee62 iscsi_suspend_queue drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xb80b47a4 nfs4_schedule_lease_moved_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x25b7a8de __fl6_sock_lookup vmlinux EXPORT_SYMBOL_GPL +0x0c81c759 efivar_entry_size vmlinux EXPORT_SYMBOL_GPL +0xa299870e dm_kcopyd_copy vmlinux EXPORT_SYMBOL +0xde87d9c4 usb_serial_handle_dcd_change vmlinux EXPORT_SYMBOL_GPL +0xaa0bb15c usb_stor_bulk_srb vmlinux EXPORT_SYMBOL_GPL +0x452101b3 usb_alloc_streams vmlinux EXPORT_SYMBOL_GPL +0x04280970 fixed_phy_change_carrier vmlinux EXPORT_SYMBOL_GPL +0x466cb984 scsi_register_interface vmlinux EXPORT_SYMBOL +0x61b63cb9 iommu_attach_device vmlinux EXPORT_SYMBOL_GPL +0x76cb7d4d pinctrl_utils_add_config vmlinux EXPORT_SYMBOL_GPL +0x4e1cdfad vfs_setlease vmlinux EXPORT_SYMBOL_GPL +0x31ec67a1 find_get_pages_range_tag vmlinux EXPORT_SYMBOL +0x10e81426 from_kgid_munged vmlinux EXPORT_SYMBOL +0x40d04664 console_trylock vmlinux EXPORT_SYMBOL +0xaa0134dc rds_send_ping net/rds/rds EXPORT_SYMBOL_GPL +0x25f5ec84 mlx4_fmr_unmap drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xce8f7ede xdp_do_generic_redirect vmlinux EXPORT_SYMBOL_GPL +0x472124b6 pps_event vmlinux EXPORT_SYMBOL +0x5c838bac i2c_smbus_read_block_data vmlinux EXPORT_SYMBOL +0x37e54273 usb_put_intf vmlinux EXPORT_SYMBOL_GPL +0x7e673cbc ata_sff_queue_pio_task vmlinux EXPORT_SYMBOL_GPL +0x3b907625 __regmap_init_mmio_clk vmlinux EXPORT_SYMBOL_GPL +0x6cc4f15a drm_sched_resubmit_jobs vmlinux EXPORT_SYMBOL +0x6c3b884a clk_multiplier_ops vmlinux EXPORT_SYMBOL_GPL +0xdd18ac37 nla_put_64bit vmlinux EXPORT_SYMBOL +0xb1dabc1e unregister_ftrace_export vmlinux EXPORT_SYMBOL_GPL +0x4d1ff60a wait_for_completion_timeout vmlinux EXPORT_SYMBOL +0x025483b1 set_current_groups vmlinux EXPORT_SYMBOL +0x99d80a33 line6_read_serial_number sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x118d5c69 core_tpg_register drivers/target/target_core_mod EXPORT_SYMBOL +0xa1048d62 mlx4_cq_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xfcb44d23 pnfs_generic_pg_test fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x026dfe4d nfs_show_options fs/nfs/nfs EXPORT_SYMBOL_GPL +0x08cde756 dcb_ieee_getapp_mask vmlinux EXPORT_SYMBOL +0x451f67c0 xfrm6_input_addr vmlinux EXPORT_SYMBOL +0x00c291f0 rt_dst_clone vmlinux EXPORT_SYMBOL +0x097af021 neigh_proc_dointvec_jiffies vmlinux EXPORT_SYMBOL +0x3a3b50f4 skb_copy_header vmlinux EXPORT_SYMBOL +0x0f9850eb usb_stor_probe2 vmlinux EXPORT_SYMBOL_GPL +0xb44e1b01 usb_stor_probe1 vmlinux EXPORT_SYMBOL_GPL +0x34ef9fbd update_region vmlinux EXPORT_SYMBOL +0x87e57166 dma_request_chan_by_mask vmlinux EXPORT_SYMBOL_GPL +0x81db6ebb xz_dec_reset vmlinux EXPORT_SYMBOL +0x1ac458a1 public_key_signature_free vmlinux EXPORT_SYMBOL_GPL +0x88e1d0f0 page_frag_free vmlinux EXPORT_SYMBOL +0x661601de sprint_symbol vmlinux EXPORT_SYMBOL_GPL +0xfe48eecb __tracepoint_pelt_se_tp vmlinux EXPORT_SYMBOL_GPL +0xed8bbe99 __tracepoint_pelt_dl_tp vmlinux EXPORT_SYMBOL_GPL +0x6894835c __tracepoint_pelt_rt_tp vmlinux EXPORT_SYMBOL_GPL +0xf9eced44 LZ4_compress_fast_continue lib/lz4/lz4_compress EXPORT_SYMBOL +0x91bf6753 capi20_release drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x16d3873a blowfish_setkey crypto/blowfish_common EXPORT_SYMBOL_GPL +0x1290be79 tcf_em_unregister vmlinux EXPORT_SYMBOL +0xaf11510f scm_fp_dup vmlinux EXPORT_SYMBOL +0x05610897 of_changeset_destroy vmlinux EXPORT_SYMBOL_GPL +0xfc64dbb2 led_stop_software_blink vmlinux EXPORT_SYMBOL_GPL +0x37701f5f rc_g_keycode_from_table vmlinux EXPORT_SYMBOL_GPL +0x32c92976 devres_remove vmlinux EXPORT_SYMBOL_GPL +0x23c3b187 mipi_dsi_dcs_set_tear_on vmlinux EXPORT_SYMBOL +0x0e26591c drm_fb_helper_fill_info vmlinux EXPORT_SYMBOL +0x5f97e44c tty_vhangup vmlinux EXPORT_SYMBOL +0xda9fc7ae kstrtobool_from_user vmlinux EXPORT_SYMBOL +0xdd626ee3 fuse_len_args vmlinux EXPORT_SYMBOL_GPL +0x55ecb221 shrink_dcache_parent vmlinux EXPORT_SYMBOL +0xb4dea6cb trace_seq_vprintf vmlinux EXPORT_SYMBOL_GPL +0x2557dcad preempt_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0xf04b2c72 nft_register_obj net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x28a8ba71 ib_close_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x94788376 iscsit_reject_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x2cd73f7b rndis_deregister drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x61492bb7 mlx5_rl_are_equal drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x9a2ca2b4 fc_lport_iterate drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x609f1c7e synchronize_net vmlinux EXPORT_SYMBOL +0x29a24450 netdev_adjacent_change_abort vmlinux EXPORT_SYMBOL +0x69f298ff mdiobus_register_device vmlinux EXPORT_SYMBOL +0x0d393e81 bus_for_each_drv vmlinux EXPORT_SYMBOL_GPL +0x52315bd9 bus_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0x2e686a05 drm_gem_cma_print_info vmlinux EXPORT_SYMBOL +0xa32bfd21 bsg_job_done vmlinux EXPORT_SYMBOL_GPL +0x6091797f synchronize_rcu vmlinux EXPORT_SYMBOL_GPL +0x42825ce2 rcu_scheduler_active vmlinux EXPORT_SYMBOL_GPL +0xe523ad75 synchronize_irq vmlinux EXPORT_SYMBOL +0xe009e338 parport_negotiate drivers/parport/parport EXPORT_SYMBOL +0x74dcd98c dm_bufio_get_aux_data drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xdf8c695a __ndelay vmlinux EXPORT_SYMBOL +0x9e7d6bd0 __udelay vmlinux EXPORT_SYMBOL +0x20b4f809 flow_rule_alloc vmlinux EXPORT_SYMBOL +0x8469f24d pneigh_lookup vmlinux EXPORT_SYMBOL +0x4db55120 usb_gadget_udc_reset vmlinux EXPORT_SYMBOL_GPL +0x9dc54773 component_master_del vmlinux EXPORT_SYMBOL_GPL +0x2bab379a ttm_bo_move_to_lru_tail vmlinux EXPORT_SYMBOL +0x985453e1 lease_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0xe741f48c kern_path_create vmlinux EXPORT_SYMBOL +0xa3a54979 init_on_free vmlinux EXPORT_SYMBOL +0xa9129894 irq_create_mapping_affinity vmlinux EXPORT_SYMBOL_GPL +0x1611823c xdr_terminate_string net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9f7666ca rpc_net_ns net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9639214d snd_timer_close sound/core/snd-timer EXPORT_SYMBOL +0x4eb4c55e __serpent_encrypt crypto/serpent_generic EXPORT_SYMBOL_GPL +0x2daaadd6 fib_rules_dump vmlinux EXPORT_SYMBOL_GPL +0x01939535 drm_mode_create_scaling_mode_property vmlinux EXPORT_SYMBOL +0x97594501 drm_gem_vram_pin vmlinux EXPORT_SYMBOL +0x62dbff55 pci_irq_vector vmlinux EXPORT_SYMBOL +0xe2ffeef3 scsi_req_init vmlinux EXPORT_SYMBOL +0x84d5d2b5 fsnotify_find_mark vmlinux EXPORT_SYMBOL_GPL +0x5733e70e super_setup_bdi vmlinux EXPORT_SYMBOL +0xcd1c1a53 trace_seq_printf vmlinux EXPORT_SYMBOL_GPL +0xb44e18ea audit_enabled vmlinux EXPORT_SYMBOL_GPL +0x39461d6a in_egroup_p vmlinux EXPORT_SYMBOL +0x4e2e10d2 be_roce_mcc_cmd drivers/net/ethernet/emulex/benet/be2net EXPORT_SYMBOL +0xabe2de09 af_alg_release crypto/af_alg EXPORT_SYMBOL_GPL +0xfd617650 eth_gro_complete vmlinux EXPORT_SYMBOL +0x8b242b4c drm_fb_cma_get_gem_addr vmlinux EXPORT_SYMBOL_GPL +0xef92b16a drm_atomic_helper_fake_vblank vmlinux EXPORT_SYMBOL +0x5b503390 drm_dp_aux_register vmlinux EXPORT_SYMBOL +0x409873e3 tty_termios_baud_rate vmlinux EXPORT_SYMBOL +0x1240f2bc rdev_get_regmap vmlinux EXPORT_SYMBOL_GPL +0x07a890c8 fb_alloc_cmap vmlinux EXPORT_SYMBOL +0x22dc2e14 perf_event_addr_filters_sync vmlinux EXPORT_SYMBOL_GPL +0x8485cf18 irq_chip_set_type_parent vmlinux EXPORT_SYMBOL_GPL +0x12068dc2 dccp_check_req net/dccp/dccp EXPORT_SYMBOL_GPL +0x9a55ca7e nf_conntrack_broadcast_help net/netfilter/nf_conntrack_broadcast EXPORT_SYMBOL_GPL +0x00ab62e1 line6_init_pcm sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x683b2ca2 ping_seq_start vmlinux EXPORT_SYMBOL_GPL +0x9f54ead7 gro_cells_destroy vmlinux EXPORT_SYMBOL +0x41611304 input_mt_sync_frame vmlinux EXPORT_SYMBOL +0xddcc92a3 phy_speed_up vmlinux EXPORT_SYMBOL_GPL +0x47554880 dax_copy_from_iter vmlinux EXPORT_SYMBOL_GPL +0xc982c12e release_firmware vmlinux EXPORT_SYMBOL +0x6f116cf3 drm_gem_shmem_mmap vmlinux EXPORT_SYMBOL_GPL +0xa5ceda35 drm_gem_shmem_vmap vmlinux EXPORT_SYMBOL +0xc0092a9c blk_mq_start_stopped_hw_queue vmlinux EXPORT_SYMBOL_GPL +0xeb0809f0 set_posix_acl vmlinux EXPORT_SYMBOL +0x9e0c711d vzalloc_node vmlinux EXPORT_SYMBOL +0x7f24de73 jiffies_to_usecs vmlinux EXPORT_SYMBOL +0x37befc70 jiffies_to_msecs vmlinux EXPORT_SYMBOL +0x4eca26e4 iscsit_build_nopin_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x21272424 mlx5_query_port_oper_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc4ece23c hnae3_unregister_client drivers/net/ethernet/hisilicon/hns3/hnae3 EXPORT_SYMBOL +0x95ab8542 iptun_encaps vmlinux EXPORT_SYMBOL +0xb31d3875 inet_frag_queue_insert vmlinux EXPORT_SYMBOL +0xcfb4343f tcp_child_process vmlinux EXPORT_SYMBOL +0xe766de7e sock_diag_put_filterinfo vmlinux EXPORT_SYMBOL +0x6106be94 netdev_set_tc_queue vmlinux EXPORT_SYMBOL +0xe2661656 devm_power_supply_register vmlinux EXPORT_SYMBOL_GPL +0x2262cdd2 devm_mdiobus_alloc_size vmlinux EXPORT_SYMBOL_GPL +0x55eaad3c phy_start_machine vmlinux EXPORT_SYMBOL_GPL +0x770c3281 __pm_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0xa8ad5d01 drm_detect_hdmi_monitor vmlinux EXPORT_SYMBOL +0x7c6c868e drm_atomic_helper_update_plane vmlinux EXPORT_SYMBOL +0xc666a132 crc_t10dif vmlinux EXPORT_SYMBOL +0xc6055c9e kvasprintf_const vmlinux EXPORT_SYMBOL +0xaa09aa8c param_get_charp vmlinux EXPORT_SYMBOL +0xa4ec1f29 nf_nat_redirect_ipv6 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xfcbccfda nf_nat_redirect_ipv4 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x53196a7b mtd_unpoint drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x1826892d xdp_rxq_info_unreg vmlinux EXPORT_SYMBOL_GPL +0x3f4f75cf netdev_next_lower_dev_rcu vmlinux EXPORT_SYMBOL +0xe6337e9e validate_xmit_skb_list vmlinux EXPORT_SYMBOL_GPL +0xc0737cf3 of_find_matching_node_and_match vmlinux EXPORT_SYMBOL +0x0c92db21 mdiobus_write vmlinux EXPORT_SYMBOL +0x52ae771f tty_port_destroy vmlinux EXPORT_SYMBOL +0x15f02ad3 bsg_unregister_queue vmlinux EXPORT_SYMBOL_GPL +0xb21da642 bio_trim vmlinux EXPORT_SYMBOL_GPL +0x50e67370 dquot_claim_space_nodirty vmlinux EXPORT_SYMBOL +0xaa6c6668 dax_iomap_rw vmlinux EXPORT_SYMBOL_GPL +0x9009a29c blkdev_read_iter vmlinux EXPORT_SYMBOL_GPL +0x5a8f6974 prepare_binprm vmlinux EXPORT_SYMBOL +0x7bc93f7c nfc_digital_free_device net/nfc/nfc_digital EXPORT_SYMBOL +0xde98baf2 atm_dev_deregister net/atm/atm EXPORT_SYMBOL +0xd85a9700 rdma_restrack_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xdf2c2742 rb_last vmlinux EXPORT_SYMBOL +0x817573dd tcp_v4_destroy_sock vmlinux EXPORT_SYMBOL +0x2754a7fa ehci_setup vmlinux EXPORT_SYMBOL_GPL +0xcafc063d ahci_port_resume vmlinux EXPORT_SYMBOL_GPL +0x69f83dc8 pm_clk_remove_clk vmlinux EXPORT_SYMBOL_GPL +0x1876d4c2 acpi_debugfs_dir vmlinux EXPORT_SYMBOL_GPL +0x104d7f85 remove_conflicting_pci_framebuffers vmlinux EXPORT_SYMBOL +0x6d8aafc2 gpiod_export vmlinux EXPORT_SYMBOL_GPL +0x04c4f603 mpi_get_buffer vmlinux EXPORT_SYMBOL_GPL +0x98a882a5 sysfs_create_mount_point vmlinux EXPORT_SYMBOL_GPL +0x085da3a6 mount_bdev vmlinux EXPORT_SYMBOL +0xcc5d22d9 can_do_mlock vmlinux EXPORT_SYMBOL +0x60a13e90 rcu_barrier vmlinux EXPORT_SYMBOL_GPL +0xe76e7226 ceph_pagelist_alloc net/ceph/libceph EXPORT_SYMBOL +0xd0b634ed nft_do_chain net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x9b6eba81 nf_nat_masquerade_ipv6 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x468d85f6 nf_nat_masquerade_ipv4 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xaa144d0c nf_ct_helper_expectfn_find_by_symbol net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x3ede1448 ib_mr_pool_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x368320d8 nfs_umount_begin fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3c3ff9fd sprintf vmlinux EXPORT_SYMBOL +0xd7542307 ndisc_mc_map vmlinux EXPORT_SYMBOL +0x1bd031df dm_internal_suspend_fast vmlinux EXPORT_SYMBOL_GPL +0x01d757cc hisi_sas_kill_tasklets vmlinux EXPORT_SYMBOL_GPL +0x9a2ecf37 ttm_dma_unpopulate vmlinux EXPORT_SYMBOL_GPL +0xec2b8a42 acpi_walk_namespace vmlinux EXPORT_SYMBOL +0x7df9327c dup_iter vmlinux EXPORT_SYMBOL +0xda3d10a8 security_tun_dev_open vmlinux EXPORT_SYMBOL +0xb6bfc03d simple_attr_write vmlinux EXPORT_SYMBOL_GPL +0x1fa8c9a2 simple_unlink vmlinux EXPORT_SYMBOL +0xfc93b5f5 generic_listxattr vmlinux EXPORT_SYMBOL +0x5379cea3 ceph_caps_for_mode net/ceph/libceph EXPORT_SYMBOL +0x2d1c6e4e usb_add_config drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x01c96f3b mptscsih_io_done drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x05e1501c wimax_state_change vmlinux EXPORT_SYMBOL_GPL +0xf6ebc03b net_ratelimit vmlinux EXPORT_SYMBOL +0xe391f5e8 dev_addr_init vmlinux EXPORT_SYMBOL +0xdb9ef326 ahci_platform_enable_clks vmlinux EXPORT_SYMBOL_GPL +0x12b5171b platform_device_add_properties vmlinux EXPORT_SYMBOL_GPL +0x7d0ed711 ttm_mem_global_free vmlinux EXPORT_SYMBOL +0x28482614 drm_event_cancel_free vmlinux EXPORT_SYMBOL +0x1ebf6c2a pci_power_names vmlinux EXPORT_SYMBOL_GPL +0x7af43f4a shash_ahash_update vmlinux EXPORT_SYMBOL_GPL +0xf2e5bd87 security_free_mnt_opts vmlinux EXPORT_SYMBOL +0xa9f041e3 posix_acl_valid vmlinux EXPORT_SYMBOL +0xe15b71bf invalidate_inode_buffers vmlinux EXPORT_SYMBOL +0xd7834d8c PageMovable vmlinux EXPORT_SYMBOL +0x3d8560e4 cpu_hwcaps vmlinux EXPORT_SYMBOL +0x5551e6db ip_tunnel_ioctl net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xc22cd18c mlx4_SET_PORT_PRIO2TC drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x1d77b0f8 unix_socket_table vmlinux EXPORT_SYMBOL_GPL +0x4e87e6b8 nl_table vmlinux EXPORT_SYMBOL_GPL +0x7013d792 usb_autopm_get_interface_async vmlinux EXPORT_SYMBOL_GPL +0x0ee55954 raid_class_release vmlinux EXPORT_SYMBOL +0xbf09c4d5 mipi_dsi_dcs_exit_sleep_mode vmlinux EXPORT_SYMBOL +0x6e0bbabe drm_mm_print vmlinux EXPORT_SYMBOL +0x742578a5 wait_for_random_bytes vmlinux EXPORT_SYMBOL +0xbde7dc15 regulator_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x9db61d1b crypto_shash_update vmlinux EXPORT_SYMBOL_GPL +0x8040873e find_vma vmlinux EXPORT_SYMBOL +0xf79e8ec5 gfn_to_page_many_atomic vmlinux EXPORT_SYMBOL_GPL +0x59a2712d raid6_gfinv lib/raid6/raid6_pq EXPORT_SYMBOL +0x994ffbb4 svc_print_addr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x925a9513 nf_ct_extend_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe104c5e7 can_get_echo_skb drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xe3a2b60d hdlcdrv_transmitter drivers/net/hamradio/hdlcdrv EXPORT_SYMBOL +0xabc023c6 mlx4_multicast_promisc_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x07b133d2 dcb_ieee_getapp_dscp_prio_mask_map vmlinux EXPORT_SYMBOL +0x082c071a kernel_connect vmlinux EXPORT_SYMBOL +0x1bb071d9 of_phandle_iterator_init vmlinux EXPORT_SYMBOL_GPL +0x3af473d5 sas_request_addr vmlinux EXPORT_SYMBOL_GPL +0x48ecf76e drm_gem_cma_prime_import_sg_table_vmap vmlinux EXPORT_SYMBOL +0x2976a755 drm_dp_mst_connector_late_register vmlinux EXPORT_SYMBOL +0x05d08a0c drm_vram_mm_file_operations_mmap vmlinux EXPORT_SYMBOL +0x48687702 devm_regulator_unregister vmlinux EXPORT_SYMBOL_GPL +0xcad1aca8 acpi_exception vmlinux EXPORT_SYMBOL +0xa7068b12 gen_pool_get vmlinux EXPORT_SYMBOL_GPL +0x49a9467e dquot_destroy vmlinux EXPORT_SYMBOL +0xae1446a6 single_open_size vmlinux EXPORT_SYMBOL +0x87bd2e61 alloc_canfd_skb drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x025fb358 fcoe_ctlr_recv_flogi drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x6d4b5941 inet_stream_connect vmlinux EXPORT_SYMBOL +0x8ad28f9d peernet2id vmlinux EXPORT_SYMBOL +0x4c45e7f8 stmmac_resume vmlinux EXPORT_SYMBOL_GPL +0xb0311ebd mdiobus_read_nested vmlinux EXPORT_SYMBOL +0x5b97df19 dma_buf_end_cpu_access vmlinux EXPORT_SYMBOL_GPL +0xdebed918 virtqueue_notify vmlinux EXPORT_SYMBOL_GPL +0xd35a6d31 mempool_kmalloc vmlinux EXPORT_SYMBOL +0xcbd98a6d rpc_mkpipe_dentry net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7c21a2dc hinic_get_chip_present_flag drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xb13a8b8c qdisc_warn_nonwc vmlinux EXPORT_SYMBOL +0xe0fe04d7 devlink_port_type_eth_set vmlinux EXPORT_SYMBOL_GPL +0xab8cc2ba ahci_stop_engine vmlinux EXPORT_SYMBOL_GPL +0xe98ece32 ata_eh_qc_complete vmlinux EXPORT_SYMBOL_GPL +0xf64a43a9 vga_remove_vgacon vmlinux EXPORT_SYMBOL +0xbe36c87b pci_release_regions vmlinux EXPORT_SYMBOL +0xaa78a429 find_lock_entry vmlinux EXPORT_SYMBOL +0xe8e38e48 mpt_device_driver_register drivers/message/fusion/mptbase EXPORT_SYMBOL +0x5fdbb4f7 nlmclnt_done fs/lockd/lockd EXPORT_SYMBOL_GPL +0x75fb9062 arch_timer_read_counter vmlinux EXPORT_SYMBOL_GPL +0x553b49a4 cpufreq_get_driver_data vmlinux EXPORT_SYMBOL_GPL +0x2cad8a81 tpm_send vmlinux EXPORT_SYMBOL_GPL +0xa8e6933a qdf2400_e44_present vmlinux EXPORT_SYMBOL +0x7bf0428f blk_queue_physical_block_size vmlinux EXPORT_SYMBOL +0x98ca60a0 dio_end_io vmlinux EXPORT_SYMBOL_GPL +0xd5a63cce relay_subbufs_consumed vmlinux EXPORT_SYMBOL_GPL +0xb8212341 timecounter_cyc2time vmlinux EXPORT_SYMBOL_GPL +0xd707d016 dccp_feat_signal_nn_change net/dccp/dccp EXPORT_SYMBOL_GPL +0x7bc525d6 ip_vs_new_conn_out net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xe0cb5a00 target_setup_cmd_from_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0x6a114af5 mlx4_mr_hw_put_mpt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xcba377cf fc_seq_els_rsp_send drivers/scsi/libfc/libfc EXPORT_SYMBOL_GPL +0x838d2bc8 siphash_3u32 vmlinux EXPORT_SYMBOL +0x70002fe8 siphash_1u32 vmlinux EXPORT_SYMBOL +0xd7bead7a sock_diag_check_cookie vmlinux EXPORT_SYMBOL_GPL +0x23412816 rtc_tm_to_ktime vmlinux EXPORT_SYMBOL_GPL +0xa095e02e generic_check_addressable vmlinux EXPORT_SYMBOL +0x0b7cf853 get_tree_nodev vmlinux EXPORT_SYMBOL +0x1fa18781 perf_event_read_value vmlinux EXPORT_SYMBOL_GPL +0x04482cdb __refrigerator vmlinux EXPORT_SYMBOL +0x39b8057a irqchip_fwnode_ops vmlinux EXPORT_SYMBOL_GPL +0x7cb1ae69 cpu_down vmlinux EXPORT_SYMBOL +0xd78f432d nci_hci_dev_session_init net/nfc/nci/nci EXPORT_SYMBOL +0x25458acd nft_validate_register_store net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x93fc0c12 inet6_csk_addr2sockaddr vmlinux EXPORT_SYMBOL_GPL +0xf14cd232 netdev_refcnt_read vmlinux EXPORT_SYMBOL +0x597b583f i2c_recover_bus vmlinux EXPORT_SYMBOL_GPL +0x014d19fe input_mt_init_slots vmlinux EXPORT_SYMBOL +0x21071eb9 usb_disable_lpm vmlinux EXPORT_SYMBOL_GPL +0xb16cec52 usb_disable_ltm vmlinux EXPORT_SYMBOL_GPL +0xa39f6999 phy_10gbit_full_features vmlinux EXPORT_SYMBOL_GPL +0xcadb5ac8 ata_link_printk vmlinux EXPORT_SYMBOL +0xb2bb948a devres_close_group vmlinux EXPORT_SYMBOL_GPL +0x3ba048f4 drm_mode_parse_command_line_for_connector vmlinux EXPORT_SYMBOL +0xf6ad8ad9 devm_of_find_backlight vmlinux EXPORT_SYMBOL +0xf5f370e0 async_schedule_node vmlinux EXPORT_SYMBOL_GPL +0x62fec4c2 failover_unregister net/core/failover EXPORT_SYMBOL_GPL +0x8c060b0f register_candev drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xe6aeebaf nfs_initiate_commit fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd7e56a4e simple_strtoll vmlinux EXPORT_SYMBOL +0x20000329 simple_strtoul vmlinux EXPORT_SYMBOL +0x355bc89a klist_next vmlinux EXPORT_SYMBOL_GPL +0xbd344149 tcp_rcv_established vmlinux EXPORT_SYMBOL +0x4aa58bea inet_getpeer vmlinux EXPORT_SYMBOL_GPL +0xd7904511 ipv4_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0xd7cade2b xt_unregister_targets vmlinux EXPORT_SYMBOL +0xbf259d98 usb_enable_lpm vmlinux EXPORT_SYMBOL_GPL +0x2f4e6f73 usb_enable_ltm vmlinux EXPORT_SYMBOL_GPL +0x43ce21c5 ahci_set_em_messages vmlinux EXPORT_SYMBOL_GPL +0xf5a08d2d drm_mode_object_get vmlinux EXPORT_SYMBOL +0xde25f88c __tracepoint_add_device_to_group vmlinux EXPORT_SYMBOL_GPL +0x39655269 acpi_ec_remove_query_handler vmlinux EXPORT_SYMBOL_GPL +0x8e7f0a9c acpi_get_phys_id vmlinux EXPORT_SYMBOL_GPL +0x3aca3c87 fsnotify_destroy_mark vmlinux EXPORT_SYMBOL_GPL +0x39d96b92 balance_dirty_pages_ratelimited vmlinux EXPORT_SYMBOL +0x99c9cd6d ib_send_cm_drep drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x9370e53a ib_send_cm_dreq drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xe3e3386f ib_sa_sendonly_fullmem_support drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x54344b1d net_ns_type_operations vmlinux EXPORT_SYMBOL_GPL +0xbe6ba231 i2c_detect_slave_mode vmlinux EXPORT_SYMBOL_GPL +0xf34fa5a8 phy_free_interrupt vmlinux EXPORT_SYMBOL +0xcf2df258 mipi_dsi_dcs_write_buffer vmlinux EXPORT_SYMBOL +0xb9cad492 __drm_atomic_state_free vmlinux EXPORT_SYMBOL +0x1d5d45e9 drm_panel_bridge_add vmlinux EXPORT_SYMBOL +0x646a6b3c drm_gem_vram_put vmlinux EXPORT_SYMBOL +0x8a9d4e45 iommu_unregister_device_fault_handler vmlinux EXPORT_SYMBOL_GPL +0x2de1ef4a proc_symlink vmlinux EXPORT_SYMBOL +0x74f14b6c encode_rs8 lib/reed_solomon/reed_solomon EXPORT_SYMBOL_GPL +0x9ca95932 ceph_create_snap_context net/ceph/libceph EXPORT_SYMBOL +0x2373ef1f ceph_copy_user_to_page_vector net/ceph/libceph EXPORT_SYMBOL +0xb9bd9478 parport_ieee1284_epp_write_data drivers/parport/parport EXPORT_SYMBOL +0x871c7b3e parport_ieee1284_ecp_write_data drivers/parport/parport EXPORT_SYMBOL +0x6bd07ec9 spi_populate_tag_msg drivers/scsi/scsi_transport_spi EXPORT_SYMBOL_GPL +0x0e17678a siphash_4u64 vmlinux EXPORT_SYMBOL +0xa0ae1e73 siphash_3u64 vmlinux EXPORT_SYMBOL +0x12cabc89 siphash_2u64 vmlinux EXPORT_SYMBOL +0x3126a9e8 siphash_1u64 vmlinux EXPORT_SYMBOL +0x8c7bd877 __tracepoint_br_fdb_external_learn_add vmlinux EXPORT_SYMBOL_GPL +0xbaaa203e __rtnl_link_unregister vmlinux EXPORT_SYMBOL_GPL +0x38feb0a4 rtnl_kfree_skbs vmlinux EXPORT_SYMBOL +0x5d1d1895 usb_hcd_pci_pm_ops vmlinux EXPORT_SYMBOL_GPL +0x88478416 cdc_parse_cdc_header vmlinux EXPORT_SYMBOL +0xc4a49ca3 drm_gem_cma_prime_get_sg_table vmlinux EXPORT_SYMBOL_GPL +0xb51a629d drm_fb_xrgb8888_to_rgb888_dstclip vmlinux EXPORT_SYMBOL +0x9fbe77f7 drm_gem_vram_bo_driver_verify_access vmlinux EXPORT_SYMBOL +0xc6a4a872 __clk_is_enabled vmlinux EXPORT_SYMBOL_GPL +0x564f7608 acpi_reconfig_notifier_register vmlinux EXPORT_SYMBOL +0x05b27a8d cpu_rmap_put vmlinux EXPORT_SYMBOL +0x3af90b12 blk_integrity_compare vmlinux EXPORT_SYMBOL +0x5323541d blk_queue_dma_alignment vmlinux EXPORT_SYMBOL +0xc31db02c security_path_unlink vmlinux EXPORT_SYMBOL +0xfef8cf74 vfs_statx vmlinux EXPORT_SYMBOL +0xd15b2dd5 remap_vmalloc_range vmlinux EXPORT_SYMBOL +0x53457aa9 noop_backing_dev_info vmlinux EXPORT_SYMBOL_GPL +0xd985dc99 mempool_free_pages vmlinux EXPORT_SYMBOL +0x49f80d87 irq_domain_set_hwirq_and_chip vmlinux EXPORT_SYMBOL_GPL +0xfcec0987 enable_irq vmlinux EXPORT_SYMBOL +0x1e7bbcb3 kernel_restart vmlinux EXPORT_SYMBOL_GPL +0xd7689f13 ib_modify_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x117fb211 mlx5_core_alloc_q_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x93bfdbaf iscsi_set_param drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xfb1d0bad ip_cmsg_recv_offset vmlinux EXPORT_SYMBOL +0x1e62643b skb_flow_dissector_init vmlinux EXPORT_SYMBOL +0xd2695833 nvmem_unregister vmlinux EXPORT_SYMBOL_GPL +0xc4f9c78f ata_port_printk vmlinux EXPORT_SYMBOL +0xdb6536a2 of_pci_get_max_link_speed vmlinux EXPORT_SYMBOL_GPL +0x90576ec4 vmemdup_user vmlinux EXPORT_SYMBOL +0x1e8faf90 filemap_page_mkwrite vmlinux EXPORT_SYMBOL +0x170cc36c put_timespec64 vmlinux EXPORT_SYMBOL_GPL +0xe452a262 ax25_ip_xmit net/ax25/ax25 EXPORT_SYMBOL +0xf1dbb9f6 fc_rport_terminate_io drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xa07a37f0 memchr vmlinux EXPORT_SYMBOL +0x90f8e489 tcf_block_put_ext vmlinux EXPORT_SYMBOL +0x6fa38b9d __get_hash_from_flowi6 vmlinux EXPORT_SYMBOL +0xb4a18f65 raid_class_attach vmlinux EXPORT_SYMBOL +0xfad9c827 kill_dax vmlinux EXPORT_SYMBOL_GPL +0x0ad561a5 mipi_dsi_generic_write vmlinux EXPORT_SYMBOL +0x1b2eaf92 drm_crtc_arm_vblank_event vmlinux EXPORT_SYMBOL +0xecbf3de0 drm_mode_create vmlinux EXPORT_SYMBOL +0x69f011b1 __fscrypt_prepare_lookup vmlinux EXPORT_SYMBOL_GPL +0x7c508d68 clear_wb_congested vmlinux EXPORT_SYMBOL +0xc18ac88d nf_ct_expect_hsize net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x8b246811 nfs_request_add_commit_list fs/nfs/nfs EXPORT_SYMBOL_GPL +0xaef507f4 nfs_invalidate_atime fs/nfs/nfs EXPORT_SYMBOL_GPL +0xbb273b36 nfs_rename fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf7495491 inet6_release vmlinux EXPORT_SYMBOL +0x5a8c1266 of_get_parent vmlinux EXPORT_SYMBOL +0xd8602b6a tun_is_xdp_frame vmlinux EXPORT_SYMBOL +0xed5f5714 drm_syncobj_find vmlinux EXPORT_SYMBOL +0x69d34858 drm_mode_destroy vmlinux EXPORT_SYMBOL +0xad8a346e drm_dp_mst_topology_mgr_resume vmlinux EXPORT_SYMBOL +0x03bf8b0a clk_hw_get_name vmlinux EXPORT_SYMBOL_GPL +0x736ed2de pci_destroy_slot vmlinux EXPORT_SYMBOL_GPL +0x404c96a2 vfs_kern_mount vmlinux EXPORT_SYMBOL_GPL +0xd5bd7dac ring_buffer_record_enable_cpu vmlinux EXPORT_SYMBOL_GPL +0x267df662 smp_call_on_cpu vmlinux EXPORT_SYMBOL_GPL +0x9ec6ca96 ktime_get_real_ts64 vmlinux EXPORT_SYMBOL +0x7406bb23 insert_resource vmlinux EXPORT_SYMBOL_GPL +0x6f1269f0 nfc_set_remote_general_bytes net/nfc/nfc EXPORT_SYMBOL +0x56e1fcad ib_get_cached_port_state drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x484489a4 mlxsw_cmd_exec drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x4c3a464b hinic_misx_intr_clear_resend_bit drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xdbe97161 flow_indr_block_cb_register vmlinux EXPORT_SYMBOL_GPL +0x74931d09 sock_diag_register vmlinux EXPORT_SYMBOL_GPL +0x7753d7e3 sk_net_capable vmlinux EXPORT_SYMBOL +0xb54e2b45 spi_bus_type vmlinux EXPORT_SYMBOL_GPL +0x898175e5 anon_transport_class_unregister vmlinux EXPORT_SYMBOL_GPL +0xfc38c89a mipi_dsi_dcs_nop vmlinux EXPORT_SYMBOL +0x45c152b9 drm_dp_mst_atomic_check vmlinux EXPORT_SYMBOL +0xb2bcb088 acpi_current_gpe_count vmlinux EXPORT_SYMBOL +0xa4f8ee4a acpi_device_modalias vmlinux EXPORT_SYMBOL_GPL +0x5a0b73d0 zlib_deflateInit2 vmlinux EXPORT_SYMBOL +0x7a908fb5 submit_bio vmlinux EXPORT_SYMBOL +0x75d819ce alloc_page_buffers vmlinux EXPORT_SYMBOL_GPL +0x622c7922 register_oom_notifier vmlinux EXPORT_SYMBOL_GPL +0xda7c78bb trace_event_buffer_lock_reserve vmlinux EXPORT_SYMBOL_GPL +0x25a65511 on_each_cpu_mask vmlinux EXPORT_SYMBOL +0x549525ef handle_nested_irq vmlinux EXPORT_SYMBOL_GPL +0x2592fc6c console_printk vmlinux EXPORT_SYMBOL_GPL +0x55c76a23 ksys_sync_helper vmlinux EXPORT_SYMBOL_GPL +0xbddd70b6 ceph_osdc_update_epoch_barrier net/ceph/libceph EXPORT_SYMBOL +0xf311ca57 ceph_msg_dump net/ceph/libceph EXPORT_SYMBOL +0xa7098c9e br_dev_queue_push_xmit net/bridge/bridge EXPORT_SYMBOL_GPL +0x34766d9d crypto_transfer_akcipher_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0xd344e4ee ocfs2_stack_glue_set_max_proto_version fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xf2741b88 fl6_update_dst vmlinux EXPORT_SYMBOL_GPL +0x73c3e508 neigh_lookup vmlinux EXPORT_SYMBOL +0x9338e670 usb_put_hcd vmlinux EXPORT_SYMBOL_GPL +0x5f10b54c sas_alloc_slow_task vmlinux EXPORT_SYMBOL_GPL +0xccfd2ebc scsi_dev_info_list_del_keyed vmlinux EXPORT_SYMBOL +0x41b094ed ata_host_resume vmlinux EXPORT_SYMBOL_GPL +0xef8e88d9 uart_get_rs485_mode vmlinux EXPORT_SYMBOL_GPL +0xcdb8f16c debugfs_create_u32_array vmlinux EXPORT_SYMBOL_GPL +0x3b475f93 bpf_map_inc_not_zero vmlinux EXPORT_SYMBOL_GPL +0x572c5d69 trace_array_printk vmlinux EXPORT_SYMBOL_GPL +0x0e1e7a4d mpt_HardResetHandler drivers/message/fusion/mptbase EXPORT_SYMBOL +0x827a2f1f mlxsw_afa_block_jump drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x688fc5bd hinic_get_pci_device_id drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x98b0ece8 nfs_init_timeout_values fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd7452eed tcp_mtup_init vmlinux EXPORT_SYMBOL +0x46066e5b perf_pmu_name vmlinux EXPORT_SYMBOL_GPL +0x122b6304 cpuidle_register vmlinux EXPORT_SYMBOL_GPL +0xdaa64358 devm_clk_unregister vmlinux EXPORT_SYMBOL_GPL +0xc1179daa kstrtou8_from_user vmlinux EXPORT_SYMBOL +0x8a6af65c kstrtoul_from_user vmlinux EXPORT_SYMBOL +0x3e5afcec __clocksource_update_freq_scale vmlinux EXPORT_SYMBOL_GPL +0xbea5ff1e static_key_initialized vmlinux EXPORT_SYMBOL_GPL +0x38e2ebd7 l2tp_tunnel_create net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xd7654df0 cache_purge net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf0057322 rpc_sleep_on_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd163cade dm_tm_commit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x71e1d813 mlxsw_core_port_clear drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x18007d59 crypto_chacha_crypt crypto/chacha_generic EXPORT_SYMBOL_GPL +0x222e7ce2 sysfs_streq vmlinux EXPORT_SYMBOL +0x413f7eed dbs_update vmlinux EXPORT_SYMBOL_GPL +0xf18bf519 drm_dev_fini vmlinux EXPORT_SYMBOL +0x48b62a57 drm_dp_link_train_channel_eq_delay vmlinux EXPORT_SYMBOL +0xf5e5a87b hdmi_infoframe_pack_only vmlinux EXPORT_SYMBOL +0xafe1981f sbitmap_bitmap_show vmlinux EXPORT_SYMBOL_GPL +0x62f9a25c bio_associate_blkg vmlinux EXPORT_SYMBOL_GPL +0xea7154b6 alloc_buffer_head vmlinux EXPORT_SYMBOL +0x2d52de4b bpf_event_output vmlinux EXPORT_SYMBOL_GPL +0xd067d3c5 system_freezable_power_efficient_wq vmlinux EXPORT_SYMBOL_GPL +0x0209ba50 xdr_write_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbc20350e mdev_register_driver drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x47e8eefa nvme_sync_io_queues drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x26884ff7 nfs_alloc_fhandle fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1fe912f1 netdev_alloc_frag vmlinux EXPORT_SYMBOL +0xb2fecfac dm_accept_partial_bio vmlinux EXPORT_SYMBOL_GPL +0x2b2c43a6 pci_bus_read_dev_vendor_id vmlinux EXPORT_SYMBOL +0xd2f7f282 fscrypt_decrypt_block_inplace vmlinux EXPORT_SYMBOL +0x30e7ccea fscrypt_encrypt_block_inplace vmlinux EXPORT_SYMBOL +0x884af21c vhost_disable_notify drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x6287243b vhost_vq_avail_empty drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xb1150977 iscsi_conn_teardown drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x9170acaf nvme_stop_queues drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xcafdd707 ocfs2_dlm_lock_status fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xaf6f2a1c nd_tbl vmlinux EXPORT_SYMBOL_GPL +0xf43e71fd device_remove_file vmlinux EXPORT_SYMBOL_GPL +0xbba8f618 acpi_dev_get_dma_resources vmlinux EXPORT_SYMBOL_GPL +0x1b8822d8 pinctrl_gpio_direction_output vmlinux EXPORT_SYMBOL_GPL +0x1db59f2d security_inode_invalidate_secctx vmlinux EXPORT_SYMBOL +0x60506751 unmap_kernel_range_noflush vmlinux EXPORT_SYMBOL_GPL +0x02af1a9b param_set_bool_enable_only vmlinux EXPORT_SYMBOL_GPL +0x289c3714 nf_ct_alloc_hashtable net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x71620ba0 ipv6_skip_exthdr vmlinux EXPORT_SYMBOL +0xf7d914c7 tcp_md5_hash_skb_data vmlinux EXPORT_SYMBOL +0x016c86d2 ip_options_compile vmlinux EXPORT_SYMBOL +0xe0db36fa sock_diag_unregister vmlinux EXPORT_SYMBOL_GPL +0xe082e88d acpi_check_address_range vmlinux EXPORT_SYMBOL +0x2e868f9a pcibios_bus_to_resource vmlinux EXPORT_SYMBOL +0xcb46065c param_get_ulong vmlinux EXPORT_SYMBOL +0x45850703 kill_pid vmlinux EXPORT_SYMBOL +0x97fa07e9 vringh_iov_push_kern drivers/vhost/vringh EXPORT_SYMBOL +0xde804bb9 vringh_iov_pull_kern drivers/vhost/vringh EXPORT_SYMBOL +0xe5bc9a53 slhc_free drivers/net/slip/slhc EXPORT_SYMBOL +0x7862ad31 hinic_get_rq_wqe drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x0a2bc980 rt6_lookup vmlinux EXPORT_SYMBOL +0x304eb671 unix_outq_len vmlinux EXPORT_SYMBOL_GPL +0x6bfc9b3b udp_seq_start vmlinux EXPORT_SYMBOL +0x08ce14f5 device_store_int vmlinux EXPORT_SYMBOL_GPL +0x53fb5893 drm_gem_mmap vmlinux EXPORT_SYMBOL +0x71c0b65e dim_turn vmlinux EXPORT_SYMBOL +0x64999478 congestion_wait vmlinux EXPORT_SYMBOL +0xa6f963ee ring_buffer_read_prepare vmlinux EXPORT_SYMBOL_GPL +0x8ea32aa3 tipc_dump_start net/tipc/tipc EXPORT_SYMBOL +0x8d4970c4 dccp_v4_connect net/dccp/dccp_ipv4 EXPORT_SYMBOL_GPL +0x5a636665 nf_ct_tmpl_free net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x8eebde07 mlx4_get_is_vlan_offload_disabled drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x335cf3f7 hinic_support_fic drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x724f924c hinic_support_nic drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xc3aaa291 config_item_set_name fs/configfs/configfs EXPORT_SYMBOL +0x38a44c3e tcp_get_syncookie_mss vmlinux EXPORT_SYMBOL_GPL +0xf28404cf devlink_dpipe_header_ipv6 vmlinux EXPORT_SYMBOL +0xf6ee31a1 inet_pton_with_scope vmlinux EXPORT_SYMBOL +0xaed73a55 i2c_del_driver vmlinux EXPORT_SYMBOL +0x7450060d device_property_read_u64_array vmlinux EXPORT_SYMBOL_GPL +0x6cc5cccf device_property_read_u32_array vmlinux EXPORT_SYMBOL_GPL +0x1bbab909 device_property_read_u16_array vmlinux EXPORT_SYMBOL_GPL +0x943cfef5 drm_fb_helper_restore_fbdev_mode_unlocked vmlinux EXPORT_SYMBOL +0xeee3234e blk_queue_dma_drain vmlinux EXPORT_SYMBOL_GPL +0x7fd2fab2 dquot_release vmlinux EXPORT_SYMBOL +0x6a6cafd2 ring_buffer_read_page vmlinux EXPORT_SYMBOL_GPL +0x5de7447d __atomic_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x21a9d793 __gfn_to_pfn_memslot vmlinux EXPORT_SYMBOL_GPL +0x9cef495b LZ4_saveDictHC lib/lz4/lz4hc_compress EXPORT_SYMBOL +0xfa8f19fa vfio_pin_pages drivers/vfio/vfio EXPORT_SYMBOL +0xd2002790 nfs_generic_pgio fs/nfs/nfs EXPORT_SYMBOL_GPL +0x53737800 usb_hcd_setup_local_mem vmlinux EXPORT_SYMBOL_GPL +0xcafde0ae sas_slave_configure vmlinux EXPORT_SYMBOL_GPL +0x0d6fd66f drm_legacy_ioremapfree vmlinux EXPORT_SYMBOL +0x83793866 __dma_request_channel vmlinux EXPORT_SYMBOL_GPL +0xab49819e clk_hw_is_enabled vmlinux EXPORT_SYMBOL_GPL +0xfe916dc6 hex_dump_to_buffer vmlinux EXPORT_SYMBOL +0x404cd7f4 file_ra_state_init vmlinux EXPORT_SYMBOL_GPL +0x9138172c get_tree_mtd drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x432b8178 dm_array_cursor_begin drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x258c9fdf mlx5_core_modify_cq_moderation drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x805b92a0 mlx4_unicast_promisc_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x59f96e35 usb_alloc_coherent vmlinux EXPORT_SYMBOL_GPL +0x9f635d8c crypto_larval_kill vmlinux EXPORT_SYMBOL_GPL +0xb0dd4f2b irq_set_default_host vmlinux EXPORT_SYMBOL_GPL +0xf553d1c5 nft_flowtable_lookup net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xecfe0a59 snd_rawmidi_kernel_release sound/core/snd-rawmidi EXPORT_SYMBOL +0xe7b52e5f mult_to_ib_rate drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5d100583 inet_csk_get_port vmlinux EXPORT_SYMBOL_GPL +0x329d3548 netdev_is_rx_handler_busy vmlinux EXPORT_SYMBOL_GPL +0xc277e891 hid_match_device vmlinux EXPORT_SYMBOL_GPL +0x4eb3c1dd i2c_clients_command vmlinux EXPORT_SYMBOL +0x642dcd84 touchscreen_parse_properties vmlinux EXPORT_SYMBOL +0x17e1afd7 sas_tlr_supported vmlinux EXPORT_SYMBOL_GPL +0x74b094ba drm_modeset_acquire_init vmlinux EXPORT_SYMBOL +0x03fa11c6 drm_mode_is_420_only vmlinux EXPORT_SYMBOL +0xbf636428 rdma_dim vmlinux EXPORT_SYMBOL +0x8b590374 kiocb_set_cancel_fn vmlinux EXPORT_SYMBOL +0x73f84434 add_to_page_cache vmlinux EXPORT_SYMBOL +0xeb3f8466 unregister_kprobe vmlinux EXPORT_SYMBOL_GPL +0xdd7f64f0 cpu_logical_map vmlinux EXPORT_SYMBOL_GPL +0xbf7f18ef ceph_pg_pool_flags net/ceph/libceph EXPORT_SYMBOL +0x4bbf47d3 capi20_get_profile drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x4d7c8690 mptscsih_ioc_reset drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x18eadc3c mlx5_notifier_unregister drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7887b249 mlx5_query_port_ib_proto_oper drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc82c721f klist_remove vmlinux EXPORT_SYMBOL_GPL +0x6536be68 devlink_port_param_value_changed vmlinux EXPORT_SYMBOL_GPL +0xf7c38aab dev_get_flags vmlinux EXPORT_SYMBOL +0x21ea1b49 usb_get_status vmlinux EXPORT_SYMBOL_GPL +0x331342c0 crypto_sha256_update vmlinux EXPORT_SYMBOL +0x9d92f3ad __wait_on_bit_lock vmlinux EXPORT_SYMBOL +0xcca1814b l2tp_tunnel_register net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xabcb9b8d svc_auth_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xda3396e9 nf_ct_seq_offset net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x22f3cf93 ib_pack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfc911fcf usb_remove_function drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x8eed985e fib_nl_delrule vmlinux EXPORT_SYMBOL_GPL +0xc6f64812 fib_nl_newrule vmlinux EXPORT_SYMBOL_GPL +0xbac58131 gen_new_estimator vmlinux EXPORT_SYMBOL +0x7cd6f042 cpufreq_get_current_driver vmlinux EXPORT_SYMBOL_GPL +0xbdaf7a60 input_class vmlinux EXPORT_SYMBOL_GPL +0x2fa0fbbd hisi_sas_sata_done vmlinux EXPORT_SYMBOL_GPL +0x7c61019b platform_unregister_drivers vmlinux EXPORT_SYMBOL_GPL +0xfc3b4246 acpi_bus_update_power vmlinux EXPORT_SYMBOL_GPL +0x30b0d3d2 pci_set_master vmlinux EXPORT_SYMBOL +0xa2cb7bc8 iov_iter_pipe vmlinux EXPORT_SYMBOL +0xcc7863b2 blk_mq_stop_hw_queue vmlinux EXPORT_SYMBOL +0x5510cde8 crypto_alg_mod_lookup vmlinux EXPORT_SYMBOL_GPL +0x36aed951 debugfs_create_ulong vmlinux EXPORT_SYMBOL_GPL +0x31330872 jbd2_journal_inode_ranged_wait vmlinux EXPORT_SYMBOL +0x94e0c153 freeze_super vmlinux EXPORT_SYMBOL +0xe9af0bd1 __devm_request_region vmlinux EXPORT_SYMBOL +0xd114fddf iw_cm_accept drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x440b4830 bch_btree_iter_next drivers/md/bcache/bcache EXPORT_SYMBOL +0x86c3b58f ndlc_send drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0x73128faa nfs_pageio_resend fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9e00c88b l3mdev_fib_table_by_index vmlinux EXPORT_SYMBOL_GPL +0xd02d37bb hid_connect vmlinux EXPORT_SYMBOL_GPL +0xab48bbab i2c_new_dummy_device vmlinux EXPORT_SYMBOL_GPL +0xe87c0613 usb_set_device_state vmlinux EXPORT_SYMBOL_GPL +0xce631c70 stmmac_probe_config_dt vmlinux EXPORT_SYMBOL_GPL +0x9be07b34 scsi_autopm_put_device vmlinux EXPORT_SYMBOL_GPL +0xa9c27232 scsi_autopm_get_device vmlinux EXPORT_SYMBOL_GPL +0x64a3e5a6 scsi_eh_finish_cmd vmlinux EXPORT_SYMBOL +0xb059996b drm_wait_one_vblank vmlinux EXPORT_SYMBOL +0x82929f99 vc_cons vmlinux EXPORT_SYMBOL +0xb33926f2 dma_async_device_unregister vmlinux EXPORT_SYMBOL +0xca21ebd3 bitmap_free vmlinux EXPORT_SYMBOL +0xd1a696a8 find_inode_nowait vmlinux EXPORT_SYMBOL +0xe3bd068a transport_free_session drivers/target/target_core_mod EXPORT_SYMBOL +0xf9c0b663 strlcat vmlinux EXPORT_SYMBOL +0x2e2b40d2 strncat vmlinux EXPORT_SYMBOL +0x6956f73a ata_common_sdev_attrs vmlinux EXPORT_SYMBOL_GPL +0x27371363 drm_add_edid_modes vmlinux EXPORT_SYMBOL +0x6913ce97 fsl8250_handle_irq vmlinux EXPORT_SYMBOL_GPL +0xebda9f66 devm_of_clk_del_provider vmlinux EXPORT_SYMBOL +0x9a0d7d99 acpi_kobj vmlinux EXPORT_SYMBOL_GPL +0x52f3910b gpiod_get_value vmlinux EXPORT_SYMBOL_GPL +0x3fbfa89c cpu_rmap_update vmlinux EXPORT_SYMBOL +0x8d863953 blk_queue_max_discard_sectors vmlinux EXPORT_SYMBOL +0xbb6f025a asymmetric_key_generate_id vmlinux EXPORT_SYMBOL_GPL +0xb3f7646e kthread_should_stop vmlinux EXPORT_SYMBOL +0x605d5bfa cache_line_size vmlinux EXPORT_SYMBOL_GPL +0x6df33caf nfnetlink_set_err net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0xb82d1a76 mtd_lock_user_prot_reg drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x71f515db mddev_init drivers/md/md-mod EXPORT_SYMBOL_GPL +0x59dfd314 mlx4_config_vxlan_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x20598db6 mr_mfc_seq_idx vmlinux EXPORT_SYMBOL +0xd704acdb netdev_notice vmlinux EXPORT_SYMBOL +0xd70b3ad4 ata_pci_device_suspend vmlinux EXPORT_SYMBOL_GPL +0x4633b3d1 pm_generic_freeze_noirq vmlinux EXPORT_SYMBOL_GPL +0xb6190d2b blk_rq_map_kern vmlinux EXPORT_SYMBOL +0x42541abf sync_mapping_buffers vmlinux EXPORT_SYMBOL +0xae9a388a noop_invalidatepage vmlinux EXPORT_SYMBOL_GPL +0xeb1e7667 ceph_auth_is_authenticated net/ceph/libceph EXPORT_SYMBOL +0x401a2a1b target_lun_is_rdonly drivers/target/target_core_mod EXPORT_SYMBOL +0x0cce5352 mpt_raid_phys_disk_get_num_paths drivers/message/fusion/mptbase EXPORT_SYMBOL +0x23bed8cc mlx4_get_parav_qkey drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x37dc8559 inet_ehash_nolisten vmlinux EXPORT_SYMBOL_GPL +0x7878dcf9 genl_family_attrbuf vmlinux EXPORT_SYMBOL +0xb62cc701 usb_serial_handle_break vmlinux EXPORT_SYMBOL_GPL +0x85317d87 __scsi_add_device vmlinux EXPORT_SYMBOL +0x514b2392 drm_hdmi_vendor_infoframe_from_display_mode vmlinux EXPORT_SYMBOL +0xbc9c459b virtqueue_get_avail_addr vmlinux EXPORT_SYMBOL_GPL +0x97240d7c desc_to_gpio vmlinux EXPORT_SYMBOL_GPL +0x4c076b22 fat_scan vmlinux EXPORT_SYMBOL_GPL +0x522c9994 snd_pcm_hw_constraint_minmax sound/core/snd-pcm EXPORT_SYMBOL +0x9d83a396 datagram_poll vmlinux EXPORT_SYMBOL +0xa257347d __scsi_print_sense vmlinux EXPORT_SYMBOL +0x1c256b24 regmap_update_bits_base vmlinux EXPORT_SYMBOL_GPL +0xebd8d392 __tty_alloc_driver vmlinux EXPORT_SYMBOL +0x422784a3 gpiochip_irq_domain_deactivate vmlinux EXPORT_SYMBOL_GPL +0x4d4d7b79 blk_mq_map_queues vmlinux EXPORT_SYMBOL_GPL +0x1e9edfb7 seq_hlist_start_head_rcu vmlinux EXPORT_SYMBOL +0x1b1a9634 current_time vmlinux EXPORT_SYMBOL +0x13c8fd5d user_path_at_empty vmlinux EXPORT_SYMBOL +0xa384b7c8 __tcf_em_tree_match vmlinux EXPORT_SYMBOL +0xc28a85fc dev_mc_flush vmlinux EXPORT_SYMBOL +0x2bf4fcd0 dev_uc_flush vmlinux EXPORT_SYMBOL +0x363a418d usb_stor_Bulk_transport vmlinux EXPORT_SYMBOL_GPL +0x6d70c91f xhci_suspend vmlinux EXPORT_SYMBOL_GPL +0xc17e9946 usb_show_dynids vmlinux EXPORT_SYMBOL_GPL +0x6296f12a hisi_sas_controller_reset_done vmlinux EXPORT_SYMBOL_GPL +0xe8efbb51 drm_atomic_state_clear vmlinux EXPORT_SYMBOL +0xf2f96ade pinctrl_enable vmlinux EXPORT_SYMBOL_GPL +0xed61f6b3 security_release_secctx vmlinux EXPORT_SYMBOL +0x2d994605 security_inode_copy_up_xattr vmlinux EXPORT_SYMBOL +0xb4366fb5 jbd2_journal_check_available_features vmlinux EXPORT_SYMBOL +0xe5fa1ffc flush_old_exec vmlinux EXPORT_SYMBOL +0x862258db timecounter_init vmlinux EXPORT_SYMBOL_GPL +0x98be1702 dma_free_attrs vmlinux EXPORT_SYMBOL +0x5ec65d24 llc_sap_close net/llc/llc EXPORT_SYMBOL +0x429d48f5 garp_request_leave net/802/garp EXPORT_SYMBOL_GPL +0x25443d81 mlx4_set_vf_link_state drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc55e9466 hinic_ppf_idx drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x8efa8aee be_roce_register_driver drivers/net/ethernet/emulex/benet/be2net EXPORT_SYMBOL +0x17e58b72 st21nfca_hci_enable_se drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x53b8eeeb inet_addr_type vmlinux EXPORT_SYMBOL +0x7b0fb5e6 inet_recvmsg vmlinux EXPORT_SYMBOL +0x55e31703 ethtool_convert_link_mode_to_legacy_u32 vmlinux EXPORT_SYMBOL +0x917c5fec stmmac_get_platform_resources vmlinux EXPORT_SYMBOL_GPL +0x26c90686 mdiobus_unregister vmlinux EXPORT_SYMBOL +0x580669f6 ata_bmdma_port_start32 vmlinux EXPORT_SYMBOL_GPL +0xe123f3d9 dma_fence_release vmlinux EXPORT_SYMBOL +0xfeb5b745 drm_client_register vmlinux EXPORT_SYMBOL +0x08c73234 __tracepoint_rpm_suspend vmlinux EXPORT_SYMBOL_GPL +0x92db8f68 do_trace_rcu_torture_read vmlinux EXPORT_SYMBOL_GPL +0x4fd67fc3 ceph_auth_update_authorizer net/ceph/libceph EXPORT_SYMBOL +0x457d778e nfs_commitdata_alloc fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb192c76c phy_detach vmlinux EXPORT_SYMBOL +0x8495ef4e phy_driver_is_genphy_10g vmlinux EXPORT_SYMBOL_GPL +0x585532ab mii_link_ok vmlinux EXPORT_SYMBOL +0xed38c848 __tracepoint_rpm_idle vmlinux EXPORT_SYMBOL_GPL +0xa4b9d947 nf_ct_ext_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL +0x89142682 transport_deregister_session drivers/target/target_core_mod EXPORT_SYMBOL +0x2ff9bd9b mlx5_alloc_bfreg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x28db75a3 pnfs_unregister_layoutdriver fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xf6f2fac2 fscache_fsdef_index fs/fscache/fscache EXPORT_SYMBOL +0x2dce2f1c __irq_regs vmlinux EXPORT_SYMBOL +0xe522ef2f wimax_msg_data_len vmlinux EXPORT_SYMBOL_GPL +0x4faf6057 mr_fill_mroute vmlinux EXPORT_SYMBOL +0xb172ffac efivars_sysfs_init vmlinux EXPORT_SYMBOL_GPL +0xa96941e9 gov_attr_set_put vmlinux EXPORT_SYMBOL_GPL +0xee47bf7a gov_attr_set_get vmlinux EXPORT_SYMBOL_GPL +0x48d88a03 wakeup_source_remove vmlinux EXPORT_SYMBOL_GPL +0xb2dcbd40 stop_tty vmlinux EXPORT_SYMBOL +0x2ee7c52b btree_visitor vmlinux EXPORT_SYMBOL_GPL +0x66e0d568 dquot_get_dqblk vmlinux EXPORT_SYMBOL +0x856a5ef3 des3_ede_encrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0xd6636ca6 rdma_addr_size_in6 drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2da62542 nfs_pageio_reset_write_mds fs/nfs/nfs EXPORT_SYMBOL_GPL +0x238d7687 ipv6_push_frag_opts vmlinux EXPORT_SYMBOL +0x4725eda1 ir_raw_encode_carrier vmlinux EXPORT_SYMBOL +0x42578e80 acpi_get_type vmlinux EXPORT_SYMBOL +0x05f97904 unlock_page_memcg vmlinux EXPORT_SYMBOL +0xe7afde0e hinic_get_netdev_by_pcidev drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x7730f22d drbd_conn_str drivers/block/drbd/drbd EXPORT_SYMBOL +0xaba79a6f xfrm_policy_destroy vmlinux EXPORT_SYMBOL +0xcac8f3b0 inet_del_offload vmlinux EXPORT_SYMBOL +0x9cdfb3f7 sysctl_fb_tunnels_only_for_init_net vmlinux EXPORT_SYMBOL +0xdf7b28b0 devm_hwmon_device_register_with_info vmlinux EXPORT_SYMBOL_GPL +0x1c9f9495 hisi_sas_free vmlinux EXPORT_SYMBOL_GPL +0x79c976ad device_connection_add vmlinux EXPORT_SYMBOL_GPL +0x18ef6df6 __drm_atomic_helper_plane_duplicate_state vmlinux EXPORT_SYMBOL +0xf57d7802 gpiod_get_index_optional vmlinux EXPORT_SYMBOL_GPL +0x0cad4570 security_kernel_load_data vmlinux EXPORT_SYMBOL_GPL +0x03659bac jbd2_journal_stop vmlinux EXPORT_SYMBOL +0xeefd7a9d __trace_note_message vmlinux EXPORT_SYMBOL_GPL +0x74cde968 p9_client_unlinkat net/9p/9pnet EXPORT_SYMBOL +0xab701981 snd_pcm_hw_constraint_ratnums sound/core/snd-pcm EXPORT_SYMBOL +0x7d0aa051 snd_ctl_replace sound/core/snd EXPORT_SYMBOL +0xc42d9ec1 capi20_manufacturer drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xa6943929 hinic_update_sq_pi drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x74778a80 ipmi_get_my_LUN drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xe6ef8d4c twofish_setkey crypto/twofish_common EXPORT_SYMBOL_GPL +0x1ac5d3cb strcspn vmlinux EXPORT_SYMBOL +0x8382be3e sysctl_tcp_init_rto vmlinux EXPORT_SYMBOL +0x0333f705 devlink_is_reload_failed vmlinux EXPORT_SYMBOL_GPL +0x3ada832e neigh_ifdown vmlinux EXPORT_SYMBOL +0x702c8377 ahci_platform_ops vmlinux EXPORT_SYMBOL_GPL +0x6cf972a8 zero_fill_bio_iter vmlinux EXPORT_SYMBOL +0x62632161 freq_qos_remove_notifier vmlinux EXPORT_SYMBOL_GPL +0x95680a0b md_allow_write drivers/md/md-mod EXPORT_SYMBOL_GPL +0x0faf4ed5 mddev_create_wb_pool drivers/md/md-mod EXPORT_SYMBOL_GPL +0x7bb86d27 hinic_free_qp_ctxts drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xc9fcd097 thermal_zone_unbind_cooling_device vmlinux EXPORT_SYMBOL_GPL +0x191ccaad __tracepoint_iscsi_dbg_sw_tcp vmlinux EXPORT_SYMBOL_GPL +0x3be4d56d ata_cable_ignore vmlinux EXPORT_SYMBOL_GPL +0x34700491 devm_get_free_pages vmlinux EXPORT_SYMBOL_GPL +0x5e798ffb divider_get_val vmlinux EXPORT_SYMBOL_GPL +0x3ab8d82d pci_scan_bridge vmlinux EXPORT_SYMBOL +0x0936fd52 pci_generic_config_read vmlinux EXPORT_SYMBOL_GPL +0xa1ea667c bio_integrity_clone vmlinux EXPORT_SYMBOL +0x65f8a9ce blk_mq_delay_kick_requeue_list vmlinux EXPORT_SYMBOL +0x0a164955 jbd2_journal_extend vmlinux EXPORT_SYMBOL +0xa3c00c06 memcg_sockets_enabled_key vmlinux EXPORT_SYMBOL +0x7fb90806 ip_md_tunnel_xmit net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xc9dd5186 skb_copy_and_csum_datagram_msg vmlinux EXPORT_SYMBOL +0x0ef09b2b hid_parse_report vmlinux EXPORT_SYMBOL_GPL +0xf7ba9332 usb_serial_resume vmlinux EXPORT_SYMBOL +0x9c629295 ehci_resume vmlinux EXPORT_SYMBOL_GPL +0x0e789807 drm_invalid_op vmlinux EXPORT_SYMBOL +0xe3948ff4 acpi_walk_dep_device_list vmlinux EXPORT_SYMBOL_GPL +0x9ae4191f sbitmap_queue_init_node vmlinux EXPORT_SYMBOL_GPL +0x060ba97c gen_pool_free_owner vmlinux EXPORT_SYMBOL +0x5fc72f0e alloc_pages_exact vmlinux EXPORT_SYMBOL +0x685e31ca groups_sort vmlinux EXPORT_SYMBOL +0xfa9b6df3 ceph_msg_new net/ceph/libceph EXPORT_SYMBOL +0xa13acdb8 svc_pool_map_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb312d0c4 svc_pool_map_get net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xebd1ca96 core_allocate_nexus_loss_ua drivers/target/target_core_mod EXPORT_SYMBOL +0xbb9d4fce nvme_wait_reset drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xb0e27c23 xdp_rxq_info_unused vmlinux EXPORT_SYMBOL_GPL +0xf79f2a76 ata_timing_compute vmlinux EXPORT_SYMBOL_GPL +0x29afdf46 regmap_get_device vmlinux EXPORT_SYMBOL_GPL +0x19a146a7 drm_connector_set_tile_property vmlinux EXPORT_SYMBOL +0x3fd400d1 drm_atomic_helper_plane_reset vmlinux EXPORT_SYMBOL +0xdee38994 devm_regulator_bulk_get vmlinux EXPORT_SYMBOL_GPL +0xd8ecb6b3 regulator_get_hardware_vsel_register vmlinux EXPORT_SYMBOL_GPL +0x056dd2a2 virtio_break_device vmlinux EXPORT_SYMBOL_GPL +0x2612fc84 of_clk_hw_simple_get vmlinux EXPORT_SYMBOL_GPL +0xb0495c23 fs_lookup_param vmlinux EXPORT_SYMBOL +0xa647e4bf dccp_rcv_established net/dccp/dccp EXPORT_SYMBOL_GPL +0x04279b9e svc_find_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xec7e42a4 svc_process net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x55f5b658 rpc_force_rebind net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5a8d7874 register_sound_special sound/soundcore EXPORT_SYMBOL +0xd71b5e01 parport_unregister_device drivers/parport/parport EXPORT_SYMBOL +0x2ea739f2 hinic_support_fcoe drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xa2224c1f nf_register_net_hook vmlinux EXPORT_SYMBOL +0xa7936265 of_parse_phandle_with_args vmlinux EXPORT_SYMBOL +0x9a2a5639 ttm_bo_swapout_all vmlinux EXPORT_SYMBOL +0x2790e4c5 drm_gem_objects_lookup vmlinux EXPORT_SYMBOL +0x0317767f gpiod_to_chip vmlinux EXPORT_SYMBOL_GPL +0x160651f0 __nla_reserve vmlinux EXPORT_SYMBOL +0xe3ec2f2b alloc_chrdev_region vmlinux EXPORT_SYMBOL +0x14915823 inc_zone_page_state vmlinux EXPORT_SYMBOL +0xb11625b9 cpu_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xe9e3b427 rpc_killall_tasks net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x92e971e2 mpt_clear_taskmgmt_in_progress_flag drivers/message/fusion/mptbase EXPORT_SYMBOL +0xca9360b5 rb_next vmlinux EXPORT_SYMBOL +0x04e27719 xt_compat_flush_offsets vmlinux EXPORT_SYMBOL_GPL +0xf857f1d6 netdev_bind_sb_channel_queue vmlinux EXPORT_SYMBOL +0x88fa8083 skb_checksum vmlinux EXPORT_SYMBOL +0xe4f6c674 kfree_skb_list vmlinux EXPORT_SYMBOL +0x4e9978c3 ohci_setup vmlinux EXPORT_SYMBOL_GPL +0xa1fefe6a drm_dp_psr_setup_time vmlinux EXPORT_SYMBOL +0x4021acdd iommu_dev_enable_feature vmlinux EXPORT_SYMBOL_GPL +0x9570532e pci_pme_capable vmlinux EXPORT_SYMBOL +0x2f51e40a fscrypt_fname_disk_to_usr vmlinux EXPORT_SYMBOL +0xd5769828 inode_permission vmlinux EXPORT_SYMBOL +0x9c730612 buffer_migrate_page vmlinux EXPORT_SYMBOL +0x92540fbf finish_wait vmlinux EXPORT_SYMBOL +0x7cc01082 svc_xprt_init net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x52d7b2fd llc_sap_list net/llc/llc EXPORT_SYMBOL +0xa58a0cc4 vfio_add_group_dev drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xf8818701 gether_get_ifname drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x2dddca19 nfs4_pnfs_ds_add fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xca6b8477 nfs_get_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x61b7b126 simple_strtoull vmlinux EXPORT_SYMBOL +0x6b4024b4 cpumask_any_but vmlinux EXPORT_SYMBOL +0xfd577a02 xdp_return_frame_rx_napi vmlinux EXPORT_SYMBOL_GPL +0xf4a7d998 phy_start_aneg vmlinux EXPORT_SYMBOL +0xae485f37 scsi_print_result vmlinux EXPORT_SYMBOL +0x63c4d61f __bitmap_weight vmlinux EXPORT_SYMBOL +0x619cb7dd simple_read_from_buffer vmlinux EXPORT_SYMBOL +0x3ae815fd clockevent_delta2ns vmlinux EXPORT_SYMBOL_GPL +0x1262b439 irq_domain_simple_ops vmlinux EXPORT_SYMBOL_GPL +0x2f3dcecb lc_index_of lib/lru_cache EXPORT_SYMBOL +0x992e21d6 nf_defrag_ipv6_enable net/ipv6/netfilter/nf_defrag_ipv6 EXPORT_SYMBOL_GPL +0x14378bf2 nf_defrag_ipv4_enable net/ipv4/netfilter/nf_defrag_ipv4 EXPORT_SYMBOL_GPL +0x2da27c13 target_complete_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x0a7e77f3 dm_btree_cursor_end drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x42a77821 mlx5_cmd_create_vport_lag drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc6ecdd6d mlx4_SET_PORT_user_mtu drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x3d4a0a89 nf_checksum vmlinux EXPORT_SYMBOL_GPL +0x420f3d01 nvmem_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x45006cee default_red vmlinux EXPORT_SYMBOL +0x6632f2bb clk_mux_val_to_index vmlinux EXPORT_SYMBOL_GPL +0x0397edd5 fb_edid_to_monspecs vmlinux EXPORT_SYMBOL +0x9071219a pci_d3cold_enable vmlinux EXPORT_SYMBOL_GPL +0x786c2487 gpiochip_add_pin_range vmlinux EXPORT_SYMBOL_GPL +0xa681fe88 generate_random_uuid vmlinux EXPORT_SYMBOL +0xe1639925 get_mem_cgroup_from_page vmlinux EXPORT_SYMBOL +0x182ec2bf dccp_ackvec_parsed_add net/dccp/dccp EXPORT_SYMBOL_GPL +0x880c6ee6 nf_ct_tcp_seqadj_set net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x1e382318 __tracepoint_bcache_btree_node_split drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x1321ac8a crypto_transfer_hash_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0x2f7912d1 xfrm_policy_hash_rebuild vmlinux EXPORT_SYMBOL +0x95f4cd16 get_net_ns_by_pid vmlinux EXPORT_SYMBOL_GPL +0x45850268 mbox_chan_received_data vmlinux EXPORT_SYMBOL_GPL +0x404874d5 _dev_warn vmlinux EXPORT_SYMBOL +0x3966a636 drm_gem_cma_prime_vmap vmlinux EXPORT_SYMBOL_GPL +0xf2bf26bd drm_gem_cma_prime_mmap vmlinux EXPORT_SYMBOL_GPL +0x8a4f3e82 fb_deferred_io_cleanup vmlinux EXPORT_SYMBOL_GPL +0x182ce40d vfs_statfs vmlinux EXPORT_SYMBOL +0xe3f86de3 target_submit_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x804f5ff3 af_alg_register_type crypto/af_alg EXPORT_SYMBOL_GPL +0x47d4e987 __mdiobus_read vmlinux EXPORT_SYMBOL +0xc0fd77a3 _dev_alert vmlinux EXPORT_SYMBOL +0x388ca95b mipi_dsi_driver_register_full vmlinux EXPORT_SYMBOL +0xac908223 drm_writeback_get_out_fence vmlinux EXPORT_SYMBOL +0x235c8c2b drm_fb_helper_output_poll_changed vmlinux EXPORT_SYMBOL +0x607c3c1c drm_dp_update_payload_part2 vmlinux EXPORT_SYMBOL +0xb20d51d0 drm_dp_update_payload_part1 vmlinux EXPORT_SYMBOL +0x8384647a acpi_map_pxm_to_online_node vmlinux EXPORT_SYMBOL +0x438610bd security_tun_dev_alloc_security vmlinux EXPORT_SYMBOL +0x8a4f2dc2 list_lru_walk_one vmlinux EXPORT_SYMBOL_GPL +0x7afe324e halt_poll_ns_grow vmlinux EXPORT_SYMBOL_GPL +0x81b7f8f8 xdr_buf_trim net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb00c74a3 iscsit_sequence_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x26d23e16 mlx5_accel_ipsec_device_caps drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xe02d4619 async_xor_val crypto/async_tx/async_xor EXPORT_SYMBOL_GPL +0xd430ebee nfs4_pnfs_ds_connect fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x47cd52e6 ip6_datagram_send_ctl vmlinux EXPORT_SYMBOL_GPL +0x43912da2 xfrm_input_register_afinfo vmlinux EXPORT_SYMBOL +0x0cdae61b udp_ioctl vmlinux EXPORT_SYMBOL +0x5fa5c734 devlink_reload_disable vmlinux EXPORT_SYMBOL_GPL +0x57d5853c i2c_new_probed_device vmlinux EXPORT_SYMBOL_GPL +0x95be28ba __mdiobus_write vmlinux EXPORT_SYMBOL +0x302f8d5d genphy_loopback vmlinux EXPORT_SYMBOL +0x0497328b tty_port_default_client_ops vmlinux EXPORT_SYMBOL_GPL +0x5ac66957 __pci_complete_power_transition vmlinux EXPORT_SYMBOL_GPL +0xceb1f126 mpi_read_raw_data vmlinux EXPORT_SYMBOL_GPL +0xa9f2955a __nla_reserve_nohdr vmlinux EXPORT_SYMBOL +0x7dda30af unregister_tracepoint_module_notifier vmlinux EXPORT_SYMBOL_GPL +0x13c19ee7 alarm_forward_now vmlinux EXPORT_SYMBOL_GPL +0xfbb0f0bb rdma_find_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd51c29f1 dm_sm_disk_create drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe545a9fa mlx5_core_query_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xcd1a68d0 rtnetlink_put_metrics vmlinux EXPORT_SYMBOL +0x5ed90adc int_to_scsilun vmlinux EXPORT_SYMBOL +0x2d13cbd9 blk_queue_max_write_same_sectors vmlinux EXPORT_SYMBOL +0xb1936dba may_umount vmlinux EXPORT_SYMBOL +0xf61baa65 pids_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xc06e5f67 rpc_switch_client_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xca54ea36 md_integrity_add_rdev drivers/md/md-mod EXPORT_SYMBOL +0x9a83b497 iscsi_tcp_r2tpool_alloc drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x699fe53e iscsi_get_discovery_parent_name vmlinux EXPORT_SYMBOL_GPL +0x0dea066a drm_dev_unplug vmlinux EXPORT_SYMBOL +0x6ef49034 iommu_detach_device vmlinux EXPORT_SYMBOL_GPL +0x07f6ca2a tty_buffer_unlock_exclusive vmlinux EXPORT_SYMBOL_GPL +0x142db01d regulator_map_voltage_linear_range vmlinux EXPORT_SYMBOL_GPL +0x2a2e12e6 pcix_get_max_mmrbc vmlinux EXPORT_SYMBOL +0xf8e13b32 block_read_full_page vmlinux EXPORT_SYMBOL +0xec79755c console_drivers vmlinux EXPORT_SYMBOL_GPL +0xdfc367f7 hinic_func_type drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x45e6f753 nfs_mark_client_ready fs/nfs/nfs EXPORT_SYMBOL_GPL +0xcffa9c4f devlink_dpipe_table_register vmlinux EXPORT_SYMBOL_GPL +0x58c3345f bus_sort_breadthfirst vmlinux EXPORT_SYMBOL_GPL +0x0ecb0d63 pci_alloc_host_bridge vmlinux EXPORT_SYMBOL +0x9fe939e1 mpi_powm vmlinux EXPORT_SYMBOL_GPL +0x37a8d476 dec_node_page_state vmlinux EXPORT_SYMBOL +0xff091a91 ipt_alloc_initial_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL_GPL +0x19e1fbec ct_sip_get_header net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0xb003d11f rdma_replace_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x19eb9434 hnae_ae_register drivers/net/ethernet/hisilicon/hns/hnae EXPORT_SYMBOL +0xb589a2dd nfs_open fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf097d619 tcp_v4_connect vmlinux EXPORT_SYMBOL +0x9d72dd58 tcp_sync_mss vmlinux EXPORT_SYMBOL +0xb8acff05 devlink_resource_occ_get_unregister vmlinux EXPORT_SYMBOL_GPL +0xd0931854 ir_raw_event_store_with_timeout vmlinux EXPORT_SYMBOL_GPL +0x6918ea43 rc_free_device vmlinux EXPORT_SYMBOL_GPL +0x26a6eeac sas_port_delete vmlinux EXPORT_SYMBOL +0x9afd0931 alloc_dax vmlinux EXPORT_SYMBOL_GPL +0x72538e2e regmap_raw_write vmlinux EXPORT_SYMBOL_GPL +0xd06f10a3 drm_dev_init vmlinux EXPORT_SYMBOL +0xe8a034df drm_dev_exit vmlinux EXPORT_SYMBOL +0x33fd1739 tpm_chip_alloc vmlinux EXPORT_SYMBOL_GPL +0x74dc32a4 devm_regulator_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x5157831d regulator_bulk_disable vmlinux EXPORT_SYMBOL_GPL +0xc42dcb99 acpi_evaluate_ost vmlinux EXPORT_SYMBOL +0x3fe2ccbe memweight vmlinux EXPORT_SYMBOL +0x3393046f jbd2_journal_ack_err vmlinux EXPORT_SYMBOL +0xd0c05159 emergency_restart vmlinux EXPORT_SYMBOL_GPL +0x3d73a797 p9_errstr2errno net/9p/9pnet EXPORT_SYMBOL +0xd3991a6c snd_rawmidi_input_params sound/core/snd-rawmidi EXPORT_SYMBOL +0x1e3f728d dm_block_data drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x32cfd4e1 md_error drivers/md/md-mod EXPORT_SYMBOL +0x072460c4 mlx5_fill_page_frag_array drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x28c76cc5 netdev_upper_dev_link vmlinux EXPORT_SYMBOL +0x72bd1ee1 serio_rescan vmlinux EXPORT_SYMBOL +0x5fcab59b regmap_field_read vmlinux EXPORT_SYMBOL_GPL +0x42f728aa mctrl_gpio_get_outputs vmlinux EXPORT_SYMBOL_GPL +0xd0ea1669 reset_control_get_count vmlinux EXPORT_SYMBOL_GPL +0x1d318001 clk_fixed_factor_ops vmlinux EXPORT_SYMBOL_GPL +0x479f7d4b clk_bulk_disable vmlinux EXPORT_SYMBOL_GPL +0x4a420d09 acpi_bus_detach_private_data vmlinux EXPORT_SYMBOL_GPL +0x1c19ab42 framebuffer_alloc vmlinux EXPORT_SYMBOL +0xdc980692 dw_pcie_host_init vmlinux EXPORT_SYMBOL_GPL +0xfcb926cd kstrtouint_from_user vmlinux EXPORT_SYMBOL +0xc8cf9a87 rds_trans_register net/rds/rds EXPORT_SYMBOL_GPL +0x86be7924 dccp_packet_name net/dccp/dccp EXPORT_SYMBOL_GPL +0xc7be5110 mtd_panic_write drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x38efaf5a dm_region_hash_destroy drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x24c79556 hinic_ovs_set_vf_nic_state drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xb8a309d6 xfrm_policy_byid vmlinux EXPORT_SYMBOL +0x061a5fb9 mbox_free_channel vmlinux EXPORT_SYMBOL_GPL +0x6ccd549d of_io_request_and_map vmlinux EXPORT_SYMBOL +0x46eaef3b cpufreq_driver_target vmlinux EXPORT_SYMBOL_GPL +0x9d47be80 dm_post_suspending vmlinux EXPORT_SYMBOL_GPL +0x7f622d62 tpm_transmit_cmd vmlinux EXPORT_SYMBOL_GPL +0x6b08d892 amba_apb_device_add vmlinux EXPORT_SYMBOL_GPL +0x9f116919 pci_prepare_to_sleep vmlinux EXPORT_SYMBOL +0x38ae1486 sbitmap_any_bit_clear vmlinux EXPORT_SYMBOL_GPL +0x1c36d695 ahash_register_instance vmlinux EXPORT_SYMBOL_GPL +0x679e9f54 load_nls_default vmlinux EXPORT_SYMBOL +0x6694d00b nonseekable_open vmlinux EXPORT_SYMBOL +0xab09c1c9 kmem_cache_create vmlinux EXPORT_SYMBOL +0xfe53ac04 queue_work_node vmlinux EXPORT_SYMBOL_GPL +0xf30be85a kvm_vcpu_read_guest_atomic vmlinux EXPORT_SYMBOL_GPL +0x57e16c3e dm_rh_get_state drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x0be67537 dm_btree_walk drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xf2878302 mlxsw_afa_create drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xbeac05cd mlxsw_afk_create drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x88389a84 mlx4_mr_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7f3407a7 fc_seq_start_next drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x4fe11d70 nvme_reset_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x4b313ea1 inet_get_local_port_range vmlinux EXPORT_SYMBOL +0xdac2af17 devm_nvmem_device_get vmlinux EXPORT_SYMBOL_GPL +0x09ed6b80 devm_nvmem_device_put vmlinux EXPORT_SYMBOL_GPL +0xa99defc1 of_graph_get_endpoint_count vmlinux EXPORT_SYMBOL +0x0e0d768c ohci_restart vmlinux EXPORT_SYMBOL_GPL +0x768b612c phy_get_eee_err vmlinux EXPORT_SYMBOL +0x25954896 drm_self_refresh_helper_init vmlinux EXPORT_SYMBOL +0xf77337a1 hdmi_audio_infoframe_check vmlinux EXPORT_SYMBOL +0x426d2ceb pci_bus_claim_resources vmlinux EXPORT_SYMBOL +0xd0d3f0a4 gen_pool_avail vmlinux EXPORT_SYMBOL_GPL +0xd329412d __seq_open_private vmlinux EXPORT_SYMBOL +0xa5e9158f seq_escape vmlinux EXPORT_SYMBOL +0xd6f50cf7 xfrm_ealg_get_byname net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x4c973131 ubi_open_volume drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0xcf38f4fe open_candev drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xf7c448cc mlx5_core_destroy_sq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x09844db3 mlx5_core_destroy_rq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xff260e26 kpatch_shadow_get kernel/tkernel/kpatch/kpatch EXPORT_SYMBOL_GPL +0x559b27f8 xdp_do_flush_map vmlinux EXPORT_SYMBOL_GPL +0xeb10e240 __sk_queue_drop_skb vmlinux EXPORT_SYMBOL +0x1c636fd7 sock_alloc_file vmlinux EXPORT_SYMBOL +0xe15efd7c of_mdiobus_register vmlinux EXPORT_SYMBOL +0x5eb24829 dm_shift_arg vmlinux EXPORT_SYMBOL +0x58efbb08 usb_stor_resume vmlinux EXPORT_SYMBOL_GPL +0xe5dc6da3 device_match_any vmlinux EXPORT_SYMBOL_GPL +0xc5a7285b drm_client_dev_hotplug vmlinux EXPORT_SYMBOL +0xe4bda8e5 __generic_file_write_iter vmlinux EXPORT_SYMBOL +0xcbe73e76 mlx4_pd_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x22b90986 fc_exch_mgr_del drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x5a29b55d fc_exch_mgr_add drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x08f272e1 nfs_access_set_mask fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9592944d xfrm_lookup vmlinux EXPORT_SYMBOL +0x15510a89 devlink_fmsg_binary_put vmlinux EXPORT_SYMBOL_GPL +0xb939f6d1 skb_store_bits vmlinux EXPORT_SYMBOL +0x5def0078 ata_sas_scsi_ioctl vmlinux EXPORT_SYMBOL_GPL +0xac08cbfb dma_resv_fini vmlinux EXPORT_SYMBOL +0x870b53e9 eeprom_93cx6_write vmlinux EXPORT_SYMBOL_GPL +0xa977ad06 page_cache_sync_readahead vmlinux EXPORT_SYMBOL_GPL +0x7681946c unregister_pm_notifier vmlinux EXPORT_SYMBOL_GPL +0x694edbe2 kvm_vcpu_read_guest_page vmlinux EXPORT_SYMBOL_GPL +0x65e01af9 __sync_icache_dcache vmlinux EXPORT_SYMBOL_GPL +0xc418b801 nfc_send_to_raw_sock net/nfc/nfc EXPORT_SYMBOL +0x47894fb9 unregister_ip_vs_pe net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0xe2e2b008 nft_fib_init net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0xc0d7df85 dm_bufio_new drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0xcfe06ca4 mlx4_set_admin_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6c19b08f inet_unregister_protosw vmlinux EXPORT_SYMBOL +0x81c0c9bd cpufreq_dbs_governor_stop vmlinux EXPORT_SYMBOL_GPL +0xed93ab19 i2c_smbus_read_byte vmlinux EXPORT_SYMBOL +0x33683824 i2c_probe_func_quick_read vmlinux EXPORT_SYMBOL_GPL +0xd9b7c026 iscsi_scan_finished vmlinux EXPORT_SYMBOL_GPL +0x6520fec2 __tracepoint_iscsi_dbg_session vmlinux EXPORT_SYMBOL_GPL +0xb9af4815 _dev_info vmlinux EXPORT_SYMBOL +0xa1ed9c8b add_hwgenerator_randomness vmlinux EXPORT_SYMBOL_GPL +0x6dcf857f uuid_null vmlinux EXPORT_SYMBOL +0xd6d0ea88 __posix_acl_chmod vmlinux EXPORT_SYMBOL +0x1c798d9f for_each_kernel_tracepoint vmlinux EXPORT_SYMBOL_GPL +0x37963dbd svc_xprt_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7cf7b1bb led_classdev_register_ext vmlinux EXPORT_SYMBOL_GPL +0xd67a4b00 devres_for_each_res vmlinux EXPORT_SYMBOL_GPL +0x4323a319 device_remove_file_self vmlinux EXPORT_SYMBOL_GPL +0x7be01d67 drm_plane_create_alpha_property vmlinux EXPORT_SYMBOL +0x283d6875 pci_alloc_dev vmlinux EXPORT_SYMBOL +0x1f9ba4f0 blkcipher_walk_phys vmlinux EXPORT_SYMBOL_GPL +0x7da9e8ce security_inet_conn_established vmlinux EXPORT_SYMBOL +0x3dfc897c seq_hlist_start_head vmlinux EXPORT_SYMBOL +0x68171f0a clocksource_change_rating vmlinux EXPORT_SYMBOL +0x49ed86a0 ZSTD_endStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x8aacd5ab rds_trans_unregister net/rds/rds EXPORT_SYMBOL_GPL +0x94adae52 dccp_v4_send_check net/dccp/dccp_ipv4 EXPORT_SYMBOL_GPL +0x513b1ba1 ip6_tnl_rcv_ctl net/ipv6/ip6_tunnel EXPORT_SYMBOL_GPL +0xe75e7ee3 snd_pcm_stream_lock sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x2dacd348 rdma_create_qp drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xf9a50d20 rdma_create_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8762619a can_len2dlc drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x1cb8f858 mlxsw_reg_trans_query drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x43707c6e mlx4_read_clock drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xdbca5ae8 nfcmrvl_nci_register_dev drivers/nfc/nfcmrvl/nfcmrvl EXPORT_SYMBOL_GPL +0xbfd6782f nfs_file_fsync fs/nfs/nfs EXPORT_SYMBOL_GPL +0x39991865 icmp_global_allow vmlinux EXPORT_SYMBOL +0x0cf217e0 peernet2id_alloc vmlinux EXPORT_SYMBOL_GPL +0x97105282 usb_stor_post_reset vmlinux EXPORT_SYMBOL_GPL +0xd36760ef __usb_get_extra_descriptor vmlinux EXPORT_SYMBOL_GPL +0x87b2b34e __tracepoint_attach_device_to_domain vmlinux EXPORT_SYMBOL_GPL +0xf0e5bc48 pci_hp_create_module_link vmlinux EXPORT_SYMBOL_GPL +0x5bf3dd02 pci_bus_assign_resources vmlinux EXPORT_SYMBOL +0xfab14bba crypto_aead_decrypt vmlinux EXPORT_SYMBOL_GPL +0xc2e1f1ef crypto_aead_encrypt vmlinux EXPORT_SYMBOL_GPL +0xbf0fee80 mark_buffer_write_io_error vmlinux EXPORT_SYMBOL +0x77c1cb88 find_get_entry vmlinux EXPORT_SYMBOL +0xa389a49a profile_event_register vmlinux EXPORT_SYMBOL_GPL +0x774d1836 nf_tproxy_get_sock_v6 net/ipv6/netfilter/nf_tproxy_ipv6 EXPORT_SYMBOL_GPL +0x50b9b989 __nf_conntrack_helper_find net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf4770c32 gether_register_netdev drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x7674b73b ppp_input_error drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xad1b6248 ppp_unit_number drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xa731e8a2 can_change_mtu drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xbbc4ef97 ocfs2_stack_supports_plocks fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x451a5643 nfs_generic_pg_test fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa77c14b2 nfs_init_server_rpcclient fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4648b2aa phy_gbit_fibre_features vmlinux EXPORT_SYMBOL_GPL +0xa89b9ad9 drm_dev_enter vmlinux EXPORT_SYMBOL +0x615911d7 __bitmap_set vmlinux EXPORT_SYMBOL +0x2f4013e9 sysfs_rename_link_ns vmlinux EXPORT_SYMBOL_GPL +0x7daece67 quota_send_warning vmlinux EXPORT_SYMBOL +0xd1cf68ba __dquot_transfer vmlinux EXPORT_SYMBOL +0x1a6bf28f fsnotify_get_cookie vmlinux EXPORT_SYMBOL_GPL +0xc74470a0 lock_two_nondirectories vmlinux EXPORT_SYMBOL +0x6869c8eb unregister_kprobes vmlinux EXPORT_SYMBOL_GPL +0x5d1945d6 p9_client_link net/9p/9pnet EXPORT_SYMBOL +0x5fc81396 ps2_init drivers/input/serio/libps2 EXPORT_SYMBOL +0xeff93820 can_rx_offload_queue_sorted drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x6e271b9c fc_exch_init drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x71c8b74e dcb_setapp vmlinux EXPORT_SYMBOL +0x7b3d7695 dcb_getapp vmlinux EXPORT_SYMBOL +0xe013c12d register_netdevice vmlinux EXPORT_SYMBOL +0x05489621 usb_serial_generic_open vmlinux EXPORT_SYMBOL_GPL +0xb909573c ahci_reset_em vmlinux EXPORT_SYMBOL_GPL +0x86c961b3 __set_dax_synchronous vmlinux EXPORT_SYMBOL_GPL +0xb2cfe14c drm_universal_plane_init vmlinux EXPORT_SYMBOL +0x483488c5 drm_connector_update_edid_property vmlinux EXPORT_SYMBOL +0x7027f6fc drm_gem_create_mmap_offset_size vmlinux EXPORT_SYMBOL +0xd2851b24 drm_primary_helper_funcs vmlinux EXPORT_SYMBOL +0x23c5d9b5 user_describe vmlinux EXPORT_SYMBOL_GPL +0x0cba4061 blkdev_put vmlinux EXPORT_SYMBOL +0xee2704ea blkdev_get vmlinux EXPORT_SYMBOL +0xda3ea271 finish_no_open vmlinux EXPORT_SYMBOL +0xbf8813f8 bpf_prog_get_type_path vmlinux EXPORT_SYMBOL +0x294cde61 ebt_do_table net/bridge/netfilter/ebtables EXPORT_SYMBOL +0x36fee19a ip_set_type_unregister net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x675742b8 md_wait_for_blocked_rdev drivers/md/md-mod EXPORT_SYMBOL +0x1b046bc7 ipvlan_link_delete drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0xab3eae28 fscache_enqueue_operation fs/fscache/fscache EXPORT_SYMBOL +0x2d2dd36f kobj_ns_grab_current vmlinux EXPORT_SYMBOL_GPL +0x6598e185 __neigh_set_probe_once vmlinux EXPORT_SYMBOL +0x5851cbf9 build_skb vmlinux EXPORT_SYMBOL +0xd3654580 get_tz_trend vmlinux EXPORT_SYMBOL +0xe1ad6a18 __devm_regmap_init_i2c vmlinux EXPORT_SYMBOL_GPL +0x346dd2eb fwnode_handle_put vmlinux EXPORT_SYMBOL_GPL +0x52252316 clk_unregister_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0x88da112c nexthop_find_by_id vmlinux EXPORT_SYMBOL_GPL +0x07747073 tcp_get_info vmlinux EXPORT_SYMBOL_GPL +0xdda41c02 skb_to_sgvec_nomark vmlinux EXPORT_SYMBOL_GPL +0x9b17b9d6 __alloc_skb vmlinux EXPORT_SYMBOL +0x90641b39 ata_sff_tf_load vmlinux EXPORT_SYMBOL_GPL +0xd8b126a7 regmap_get_val_bytes vmlinux EXPORT_SYMBOL_GPL +0x997e4e55 drm_atomic_nonblocking_commit vmlinux EXPORT_SYMBOL +0xccd199c0 of_get_dma_window vmlinux EXPORT_SYMBOL_GPL +0x56b4f54c jbd2_trans_will_send_data_barrier vmlinux EXPORT_SYMBOL +0x94ce8b2c jbd2_journal_begin_ordered_truncate vmlinux EXPORT_SYMBOL +0x100fbe69 vm_zone_stat vmlinux EXPORT_SYMBOL +0xf1736fa2 file_fdatawait_range vmlinux EXPORT_SYMBOL +0x055e77e8 jiffies_64 vmlinux EXPORT_SYMBOL +0xf21017d9 mutex_trylock vmlinux EXPORT_SYMBOL +0x065bd1db housekeeping_cpumask vmlinux EXPORT_SYMBOL_GPL +0xf7ff7a92 p9_client_setattr net/9p/9pnet EXPORT_SYMBOL +0x97653815 svc_create net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x10e6ccea dm_bitset_clear_bit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x59704f3c mptscsih_abort drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x4ab3cb50 iscsi_tcp_segment_done drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x9507547f ocfs2_cluster_disconnect fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xb4844afc blk_mq_start_stopped_hw_queues vmlinux EXPORT_SYMBOL +0xca80eb72 ceph_pg_pool_name_by_id net/ceph/libceph EXPORT_SYMBOL +0x885b0024 dm_array_new drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x8c41e294 led_set_brightness vmlinux EXPORT_SYMBOL_GPL +0x37b0d5c6 pci_cleanup_aer_uncorrect_error_status vmlinux EXPORT_SYMBOL_GPL +0x4d5f3312 dcache_readdir vmlinux EXPORT_SYMBOL +0x86a8c7ca inode_add_bytes vmlinux EXPORT_SYMBOL +0x97a89bf9 thaw_super vmlinux EXPORT_SYMBOL +0xc51f3564 generic_file_mmap vmlinux EXPORT_SYMBOL +0x5b6b0329 swiotlb_max_segment vmlinux EXPORT_SYMBOL_GPL +0x0f053cba abort_creds vmlinux EXPORT_SYMBOL +0x38b92846 llc_remove_pack net/llc/llc EXPORT_SYMBOL +0x769872cc rndis_add_hdr drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x04ace206 mptscsih_slave_configure drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x20978fb9 idr_find vmlinux EXPORT_SYMBOL_GPL +0x204c19f5 tcp_alloc_md5sig_pool vmlinux EXPORT_SYMBOL +0x9f466587 xt_target_to_user vmlinux EXPORT_SYMBOL_GPL +0x4188d439 neigh_rand_reach_time vmlinux EXPORT_SYMBOL +0x954d345c dev_forward_skb vmlinux EXPORT_SYMBOL_GPL +0x4df2ea84 gen_estimator_read vmlinux EXPORT_SYMBOL +0xe1d611d3 sock_common_setsockopt vmlinux EXPORT_SYMBOL +0xe772e6ea sock_common_getsockopt vmlinux EXPORT_SYMBOL +0xe987d9aa hisi_sas_debugfs_enable vmlinux EXPORT_SYMBOL_GPL +0xb17bbe56 drm_mode_create_dvi_i_properties vmlinux EXPORT_SYMBOL +0x95dbc76d drm_ht_create vmlinux EXPORT_SYMBOL +0xa78910e5 n_tty_inherit_ops vmlinux EXPORT_SYMBOL_GPL +0xf04429b4 acpi_bus_get_status_handle vmlinux EXPORT_SYMBOL_GPL +0x65408378 zlib_inflate_blob vmlinux EXPORT_SYMBOL +0xcea0c0ff security_sctp_sk_clone vmlinux EXPORT_SYMBOL +0x9b037359 try_to_writeback_inodes_sb vmlinux EXPORT_SYMBOL +0xafeb7351 vfs_mknod vmlinux EXPORT_SYMBOL +0xd2738174 nfc_hci_get_clientdata net/nfc/hci/hci EXPORT_SYMBOL +0x4505ebd5 nfc_hci_set_clientdata net/nfc/hci/hci EXPORT_SYMBOL +0x2f3d0c66 nft_chain_validate_hooks net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x94f30e58 ib_get_rmpp_segment drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4beb505d usb_gadget_get_string drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xa0a63437 mlx4_multicast_attach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa1d6af1e skb_dequeue vmlinux EXPORT_SYMBOL +0xa8306b78 scaled_ppm_to_ppb vmlinux EXPORT_SYMBOL +0x0bb028d4 hisi_clk_register_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0x0f43c507 clk_hw_register vmlinux EXPORT_SYMBOL_GPL +0x9f4f2aa3 acpi_gbl_FADT vmlinux EXPORT_SYMBOL +0xc5a69227 filemap_write_and_wait vmlinux EXPORT_SYMBOL +0x27bbf221 disable_irq_nosync vmlinux EXPORT_SYMBOL +0x9ff9d33b dccp_create_openreq_child net/dccp/dccp EXPORT_SYMBOL_GPL +0x29cebe4d nf_send_reset net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x0e08ed1d dm_cell_promote_or_release drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xaaf60b56 mlx4_srq_query drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xcc3e8966 pnfs_destroy_layout fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x719e0e44 add_uevent_var vmlinux EXPORT_SYMBOL_GPL +0x56abc5b9 inet_rtx_syn_ack vmlinux EXPORT_SYMBOL +0xa437816a __pm_relax vmlinux EXPORT_SYMBOL_GPL +0xbfdb2318 drm_framebuffer_unregister_private vmlinux EXPORT_SYMBOL +0xf7584a9c find_font vmlinux EXPORT_SYMBOL +0x6f791233 alloc_cpu_rmap vmlinux EXPORT_SYMBOL +0x29d09bea blk_queue_chunk_sectors vmlinux EXPORT_SYMBOL +0xfc39b69c blk_clear_pm_only vmlinux EXPORT_SYMBOL_GPL +0x844712df perf_event_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xdf7e4882 p9_client_read net/9p/9pnet EXPORT_SYMBOL +0x09842752 dccp_child_process net/dccp/dccp EXPORT_SYMBOL_GPL +0x0bfafadb udp_tunnel_drop_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x3251d762 nf_tables_trans_destroy_flush_work net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x70a3641a ipmi_smi_watcher_unregister drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xd45a7377 tcp_filter vmlinux EXPORT_SYMBOL +0x7ee0c56d netdev_features_change vmlinux EXPORT_SYMBOL +0xbcbc7add platform_get_resource_byname vmlinux EXPORT_SYMBOL_GPL +0x407693b7 acpi_unbind_one vmlinux EXPORT_SYMBOL_GPL +0x1808491e fbcon_update_vcs vmlinux EXPORT_SYMBOL +0x2978395d p9_client_write net/9p/9pnet EXPORT_SYMBOL +0xd505c3e0 nf_ct_port_nlattr_tuple_size net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x62561307 mpt_register drivers/message/fusion/mptbase EXPORT_SYMBOL +0xb80a1f52 of_find_node_by_type vmlinux EXPORT_SYMBOL +0x02ff933a usb_stor_adjust_quirks vmlinux EXPORT_SYMBOL_GPL +0x7c9b84dc phy_start vmlinux EXPORT_SYMBOL +0xd3fafe62 hisi_sas_slot_task_free vmlinux EXPORT_SYMBOL_GPL +0x3ab7b1cc scsi_set_sense_field_pointer vmlinux EXPORT_SYMBOL +0x4d351840 devres_find vmlinux EXPORT_SYMBOL_GPL +0x15d8aa41 __drm_printfn_seq_file vmlinux EXPORT_SYMBOL +0x77d6def7 drm_mode_probed_add vmlinux EXPORT_SYMBOL +0xf7a2687e user_free_preparse vmlinux EXPORT_SYMBOL_GPL +0xcfaab29d page_mapped vmlinux EXPORT_SYMBOL +0x00513f58 get_timespec64 vmlinux EXPORT_SYMBOL_GPL +0x319d493d proc_dostring vmlinux EXPORT_SYMBOL +0x64c0925e kvm_init vmlinux EXPORT_SYMBOL_GPL +0xc49842fb ib_query_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xec79d31a fc_fc4_register_provider drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x504cb053 simd_aead_create_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0x444cc8ed tcp_md5_needed vmlinux EXPORT_SYMBOL +0xfbcdae06 genlmsg_put vmlinux EXPORT_SYMBOL +0xc7171ec7 ttm_kmap_atomic_prot vmlinux EXPORT_SYMBOL +0xd3e5267a drm_crtc_set_max_vblank_count vmlinux EXPORT_SYMBOL +0x8fdfc9a0 drm_dp_mst_detect_port vmlinux EXPORT_SYMBOL +0xff2d257b pci_bus_write_config_byte vmlinux EXPORT_SYMBOL +0x8960aa40 devm_gpiod_put_array vmlinux EXPORT_SYMBOL_GPL +0xd7d280ad irq_poll_complete vmlinux EXPORT_SYMBOL +0x47709e42 free_anon_bdev vmlinux EXPORT_SYMBOL +0x2e15b5bd __kvm_set_memory_region vmlinux EXPORT_SYMBOL_GPL +0x09b48155 md_bitmap_close_sync drivers/md/md-mod EXPORT_SYMBOL +0xae80d656 md_bitmap_startwrite drivers/md/md-mod EXPORT_SYMBOL +0x0e230a7f raw_hash_sk vmlinux EXPORT_SYMBOL_GPL +0x3b81b0ce tcp_seq_start vmlinux EXPORT_SYMBOL +0xdf5b9568 dst_cache_get vmlinux EXPORT_SYMBOL_GPL +0xf24bc9d7 rps_sock_flow_table vmlinux EXPORT_SYMBOL +0x90bb0451 usb_stor_control_msg vmlinux EXPORT_SYMBOL_GPL +0x6e4bc056 spi_res_free vmlinux EXPORT_SYMBOL_GPL +0x04a1e165 virtio_max_dma_size vmlinux EXPORT_SYMBOL_GPL +0x6123d2eb blk_mq_complete_request vmlinux EXPORT_SYMBOL +0x6d7e951e rcu_exp_batches_completed vmlinux EXPORT_SYMBOL_GPL +0xe3043ff9 request_any_context_irq vmlinux EXPORT_SYMBOL_GPL +0x2970ec11 ieee802154_wake_queue net/mac802154/mac802154 EXPORT_SYMBOL +0x7f2688b3 rpc_clnt_swap_activate net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xaa37d3d2 uio_unregister_device drivers/uio/uio EXPORT_SYMBOL_GPL +0xb6aab79b mlx5_core_query_q_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x15d7f6c2 mlx4_port_map_set drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7ab80a37 hinic_support_roce drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x46369830 nfs_wb_all fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd4d1983c udplite_table vmlinux EXPORT_SYMBOL +0xf55f929d cpufreq_driver_resolve_freq vmlinux EXPORT_SYMBOL_GPL +0xb022ad52 pm_runtime_get_if_in_use vmlinux EXPORT_SYMBOL_GPL +0x2e492668 mipi_dsi_dcs_set_page_address vmlinux EXPORT_SYMBOL +0x88547e70 drm_atomic_helper_duplicate_state vmlinux EXPORT_SYMBOL +0x8639d8fe serial8250_do_set_mctrl vmlinux EXPORT_SYMBOL_GPL +0x72c9da5d serial8250_do_get_mctrl vmlinux EXPORT_SYMBOL_GPL +0xdb63a944 acpi_lpat_get_conversion_table vmlinux EXPORT_SYMBOL_GPL +0xad93cfa1 pci_ats_page_aligned vmlinux EXPORT_SYMBOL_GPL +0x71eb57c2 msi_desc_to_pci_dev vmlinux EXPORT_SYMBOL +0x6c3e63f5 blk_queue_max_segment_size vmlinux EXPORT_SYMBOL +0xd790d790 mmu_notifier_get_locked vmlinux EXPORT_SYMBOL_GPL +0x53ef1c3a vhost_new_msg drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xc1b21c30 ib_destroy_rwq_ind_table drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcf677ce1 kill_mtd_super drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x55b4fa08 gether_get_host_addr drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x34658cf5 gether_set_host_addr drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x7fdde0b4 mlx4_handle_eth_header_mcast_prio drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x611cfa85 klist_add_tail vmlinux EXPORT_SYMBOL_GPL +0x9fca13d7 ip_fraglist_init vmlinux EXPORT_SYMBOL +0x050877b9 dmi_first_match vmlinux EXPORT_SYMBOL +0x2473f47e dm_table_get_size vmlinux EXPORT_SYMBOL +0x1023bc23 usb_gadget_unmap_request vmlinux EXPORT_SYMBOL_GPL +0x2db5901e ahci_shost_attrs vmlinux EXPORT_SYMBOL_GPL +0x600683d3 do_unblank_screen vmlinux EXPORT_SYMBOL +0x7691bbfb pci_find_ext_capability vmlinux EXPORT_SYMBOL_GPL +0x34bab869 look_up_OID vmlinux EXPORT_SYMBOL_GPL +0xfa50bd29 fscrypt_release_ctx vmlinux EXPORT_SYMBOL +0x37f5b94b kernel_read_file vmlinux EXPORT_SYMBOL_GPL +0x6e9dd606 __symbol_put vmlinux EXPORT_SYMBOL +0x8193f512 iscsit_increment_maxcmdsn drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x3a212d2f md_bitmap_update_sb drivers/md/md-mod EXPORT_SYMBOL +0xe93dfa01 hinic_set_link_status_follow drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xfa4667d1 fc_seq_release drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x7ebf4ace __tracepoint_nfs_xdr_status fs/nfs/nfs EXPORT_SYMBOL_GPL +0x20835a9f __xdp_release_frame vmlinux EXPORT_SYMBOL_GPL +0xf665f74f sock_load_diag_module vmlinux EXPORT_SYMBOL +0xfca723a5 genphy_restart_aneg vmlinux EXPORT_SYMBOL +0xbaeba429 drm_compat_ioctl vmlinux EXPORT_SYMBOL +0x209565f5 drm_connector_attach_scaling_mode_property vmlinux EXPORT_SYMBOL +0xe1b4ab34 pci_claim_resource vmlinux EXPORT_SYMBOL +0x996ef6c4 mmu_notifier_range_update_to_read_only vmlinux EXPORT_SYMBOL_GPL +0xf09b5d9a get_zeroed_page vmlinux EXPORT_SYMBOL +0xd95e829b get_kernel_page vmlinux EXPORT_SYMBOL_GPL +0xe42cba51 bpf_prog_put vmlinux EXPORT_SYMBOL_GPL +0xa0c0ca6c mpt_raid_phys_disk_pg0 drivers/message/fusion/mptbase EXPORT_SYMBOL +0x487e60c7 af_alg_sendmsg crypto/af_alg EXPORT_SYMBOL_GPL +0x4d3af7fa ocfs2_cluster_hangup fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x3d844ce2 devlink_port_attrs_pci_vf_set vmlinux EXPORT_SYMBOL_GPL +0xdf90c978 devlink_port_attrs_pci_pf_set vmlinux EXPORT_SYMBOL_GPL +0x3f3947ac ndo_dflt_fdb_dump vmlinux EXPORT_SYMBOL +0x1872ef78 mbox_request_channel vmlinux EXPORT_SYMBOL_GPL +0x49560855 of_translate_dma_address vmlinux EXPORT_SYMBOL +0x3812c51b of_device_is_available vmlinux EXPORT_SYMBOL +0x0fdca8fc input_close_device vmlinux EXPORT_SYMBOL +0x563e6dff sort_r vmlinux EXPORT_SYMBOL +0x2364da19 key_validate vmlinux EXPORT_SYMBOL +0x52da2b65 arm64_const_caps_ready vmlinux EXPORT_SYMBOL +0x484a760e ceph_msg_new2 net/ceph/libceph EXPORT_SYMBOL +0xac317db8 rpc_init_pipe_dir_head net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x33de91bb tcp_vegas_init net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x650f8603 snd_pcm_format_silence_64 sound/core/snd-pcm EXPORT_SYMBOL +0x50b33ca4 capi_cmsg2message drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xe6a636ee md_stop drivers/md/md-mod EXPORT_SYMBOL_GPL +0x06a1e8e5 iscsi_eh_recover_target drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x49b0a088 rtnl_create_link vmlinux EXPORT_SYMBOL +0x6f7922e2 of_usb_get_phy_mode vmlinux EXPORT_SYMBOL_GPL +0x963a0ce8 phy_connect_direct vmlinux EXPORT_SYMBOL +0x9e8affa9 ata_std_error_handler vmlinux EXPORT_SYMBOL_GPL +0x3a8d7e28 _dev_notice vmlinux EXPORT_SYMBOL +0xf9fdc927 drm_crtc_vblank_waitqueue vmlinux EXPORT_SYMBOL +0xed22de12 tty_insert_flip_string_flags vmlinux EXPORT_SYMBOL +0xb2efee8c virtio_device_freeze vmlinux EXPORT_SYMBOL_GPL +0x42fba1c7 __sbitmap_queue_get_shallow vmlinux EXPORT_SYMBOL_GPL +0x87d6ac49 address_space_init_once vmlinux EXPORT_SYMBOL +0xcf8b6f4c bdi_dev_name vmlinux EXPORT_SYMBOL_GPL +0x9714e0bb ktime_get_raw vmlinux EXPORT_SYMBOL_GPL +0x45e69e01 prepare_to_wait_exclusive vmlinux EXPORT_SYMBOL +0x85a8ee76 ubi_unregister_volume_notifier drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0xe43c531d ubi_open_volume_nm drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x00f18877 md_bitmap_load drivers/md/md-mod EXPORT_SYMBOL_GPL +0xc7fa4aa9 kobj_ns_drop vmlinux EXPORT_SYMBOL_GPL +0x31978096 xfrm_audit_state_replay vmlinux EXPORT_SYMBOL_GPL +0x8321cc25 inet_dev_addr_type vmlinux EXPORT_SYMBOL +0x8839da99 tcf_action_exec vmlinux EXPORT_SYMBOL +0xac9f83c7 ata_pci_shutdown_one vmlinux EXPORT_SYMBOL_GPL +0x2f909a34 platform_device_add vmlinux EXPORT_SYMBOL_GPL +0xfb08d7c4 ttm_bo_mmap vmlinux EXPORT_SYMBOL +0x150a2a8e ttm_bo_kmap vmlinux EXPORT_SYMBOL +0xb9bfcd02 clk_hw_get_num_parents vmlinux EXPORT_SYMBOL_GPL +0xb853d95b debugfs_create_size_t vmlinux EXPORT_SYMBOL_GPL +0xd9892453 remove_proc_entry vmlinux EXPORT_SYMBOL +0x51d15382 fsnotify_add_mark vmlinux EXPORT_SYMBOL_GPL +0x2a983d26 ceph_pagelist_release net/ceph/libceph EXPORT_SYMBOL +0x1b871f2d arpt_register_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0xd1ec683c ipv6_opt_accepted vmlinux EXPORT_SYMBOL_GPL +0x4c1f233e xfrm_spd_getinfo vmlinux EXPORT_SYMBOL +0xb2d25e32 dm_kobject_release vmlinux EXPORT_SYMBOL +0x610e6ec1 ptp_find_pin vmlinux EXPORT_SYMBOL +0x71812b5c genphy_setup_forced vmlinux EXPORT_SYMBOL +0xf3ac5c82 ttm_bo_lock_delayed_workqueue vmlinux EXPORT_SYMBOL +0x6c53b778 drm_modeset_lock vmlinux EXPORT_SYMBOL +0xe415ff9e pci_walk_bus vmlinux EXPORT_SYMBOL_GPL +0x92b79728 block_is_partially_uptodate vmlinux EXPORT_SYMBOL +0xd9ecb670 ring_buffer_overruns vmlinux EXPORT_SYMBOL_GPL +0x9eb90164 nci_uart_unregister net/nfc/nci/nci_uart EXPORT_SYMBOL_GPL +0x66f0ed1e atm_alloc_charge net/atm/atm EXPORT_SYMBOL +0xcee467f3 xprt_load_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x39bf9301 _snd_pcm_hw_param_setempty sound/core/snd-pcm EXPORT_SYMBOL +0xe514b0c3 ib_get_eth_speed drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x44cb211e hinic_rx_tx_flush drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x605347e2 alloc_etherdev_mqs vmlinux EXPORT_SYMBOL +0x5be63c5b crc32c_csum_stub vmlinux EXPORT_SYMBOL +0xacbccf55 drm_atomic_helper_commit_hw_done vmlinux EXPORT_SYMBOL +0x0047886a uart_resume_port vmlinux EXPORT_SYMBOL +0xd9cf11bf gpiod_get_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0x49e0fd21 __cpu_copy_user_page vmlinux EXPORT_SYMBOL_GPL +0x3c0eb79e nfc_tm_data_received net/nfc/nfc EXPORT_SYMBOL +0x84d63af0 ieee802154_rx_irqsafe net/mac802154/mac802154 EXPORT_SYMBOL +0x69e2f583 rds_cong_map_updated net/rds/rds EXPORT_SYMBOL_GPL +0x8ede9e26 ax25_protocol_release net/ax25/ax25 EXPORT_SYMBOL +0x9c4cd2f4 ib_port_register_module_stat drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4b22c8a1 iscsit_setup_text_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x185768d0 parport_ieee1284_epp_read_data drivers/parport/parport EXPORT_SYMBOL +0x823b8310 parport_ieee1284_ecp_read_data drivers/parport/parport EXPORT_SYMBOL +0xada907a4 capilib_free_ncci drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x6bb4bf8f dm_array_cursor_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x7d6bcaa8 ps2_handle_ack drivers/input/serio/libps2 EXPORT_SYMBOL +0x8005cf66 netlbl_audit_start vmlinux EXPORT_SYMBOL +0xb05fc310 sysctl_rmem_max vmlinux EXPORT_SYMBOL +0xfac8865f sysctl_wmem_max vmlinux EXPORT_SYMBOL +0x06209f49 phy_lookup_setting vmlinux EXPORT_SYMBOL_GPL +0x20ef1bfd ahci_platform_resume_host vmlinux EXPORT_SYMBOL_GPL +0x68d59f59 pm_relax vmlinux EXPORT_SYMBOL_GPL +0x62aadf5e ttm_bo_mem_put vmlinux EXPORT_SYMBOL +0xe02caa98 of_drm_find_panel vmlinux EXPORT_SYMBOL +0x4ab208ba acpi_walk_resource_buffer vmlinux EXPORT_SYMBOL +0xd8db9d66 pci_user_write_config_byte vmlinux EXPORT_SYMBOL_GPL +0x0d116ad0 __sbitmap_queue_get vmlinux EXPORT_SYMBOL_GPL +0xbb51afc1 ahash_free_instance vmlinux EXPORT_SYMBOL_GPL +0x09e31b43 crypto_grab_spawn vmlinux EXPORT_SYMBOL_GPL +0x51727532 jbd2_journal_errno vmlinux EXPORT_SYMBOL +0x5332460a dquot_transfer vmlinux EXPORT_SYMBOL +0x26ed2186 register_vmap_purge_notifier vmlinux EXPORT_SYMBOL_GPL +0x87dd4159 kvm_clear_guest vmlinux EXPORT_SYMBOL_GPL +0xe9392b37 lapb_register net/lapb/lapb EXPORT_SYMBOL +0x9c1f53b7 xt_rateest_lookup net/netfilter/xt_RATEEST EXPORT_SYMBOL_GPL +0x337a85aa rdma_get_gid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd6711a58 dm_bitset_cursor_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x9c20eec3 md_wakeup_thread drivers/md/md-mod EXPORT_SYMBOL +0xc7ba971b mlx4_SET_PORT_BEACON drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xc5b7dbe3 fc_elsct_send drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x467df16d netdev_rss_key_fill vmlinux EXPORT_SYMBOL +0x5cf53ce2 input_free_minor vmlinux EXPORT_SYMBOL +0x4d2f5e2e phy_ethtool_ksettings_get vmlinux EXPORT_SYMBOL +0xb1478751 phy_ethtool_ksettings_set vmlinux EXPORT_SYMBOL +0x554361b8 ata_std_bios_param vmlinux EXPORT_SYMBOL_GPL +0xd291d727 pm_wakeup_dev_event vmlinux EXPORT_SYMBOL_GPL +0x1f86da8c drm_gem_cma_free_object vmlinux EXPORT_SYMBOL_GPL +0x5f1fa4be drm_gem_map_dma_buf vmlinux EXPORT_SYMBOL +0xa770ca60 drm_clflush_pages vmlinux EXPORT_SYMBOL +0xabb6e9f0 drm_gem_fb_create_with_funcs vmlinux EXPORT_SYMBOL_GPL +0xc88f8572 pci_write_config_byte vmlinux EXPORT_SYMBOL +0xbc049492 pagecache_write_end vmlinux EXPORT_SYMBOL +0x68ee5d98 rpc_clone_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9ec2b128 arpt_unregister_table_pre_exit net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0x5b35c4f9 vfio_group_set_kvm drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x3e17f466 mdio_set_flag drivers/net/mdio EXPORT_SYMBOL +0xce7c44aa xfrm_audit_state_icvfail vmlinux EXPORT_SYMBOL_GPL +0x07be6905 net_inc_egress_queue vmlinux EXPORT_SYMBOL_GPL +0x28dbbfa5 of_get_next_child vmlinux EXPORT_SYMBOL +0x5bb000b2 hid_resolv_usage vmlinux EXPORT_SYMBOL_GPL +0x2df26ea5 hidinput_get_led_field vmlinux EXPORT_SYMBOL_GPL +0xba2f45a2 ahci_platform_disable_clks vmlinux EXPORT_SYMBOL_GPL +0x9bbd29a9 drm_panel_detach vmlinux EXPORT_SYMBOL +0x5bb2453d drm_gem_shmem_dumb_create vmlinux EXPORT_SYMBOL_GPL +0x6ff141ec drm_read vmlinux EXPORT_SYMBOL +0xb077e70a clk_unprepare vmlinux EXPORT_SYMBOL_GPL +0xb12cbacb fb_unregister_client vmlinux EXPORT_SYMBOL +0x23c9891c kstrtoint_from_user vmlinux EXPORT_SYMBOL +0x122a7d5f crypto_skcipher_encrypt vmlinux EXPORT_SYMBOL_GPL +0xd5d63cf2 perf_get_aux vmlinux EXPORT_SYMBOL_GPL +0x1446b60a caif_client_register_refcnt net/caif/caif EXPORT_SYMBOL +0x13ee3274 get_cfcnfg net/caif/caif EXPORT_SYMBOL +0x44c6e633 vcc_sklist_lock net/atm/atm EXPORT_SYMBOL +0x30b93c5d ip_vs_conn_out_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x202a1b1b line6_midi_id sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0xd9f711ae mlxsw_afa_block_append_mcrouter drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xceca99c0 i40e_register_client drivers/net/ethernet/intel/i40e/i40e EXPORT_SYMBOL +0x372ce0fe hinic_ceq_register_cb drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xef0fbd7c fcoe_transport_detach drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x1cf07398 inet_csk_init_xmit_timers vmlinux EXPORT_SYMBOL +0x9ce3946c drm_fb_helper_debug_enter vmlinux EXPORT_SYMBOL +0xdec4f0d0 hisi_clk_init vmlinux EXPORT_SYMBOL_GPL +0x9318586c clk_hw_register_mux_table vmlinux EXPORT_SYMBOL_GPL +0x8f1b4353 crypto_has_skcipher2 vmlinux EXPORT_SYMBOL_GPL +0x2e1da9fb probe_kernel_read vmlinux EXPORT_SYMBOL_GPL +0xa1c856ab irq_set_chip_and_handler_name vmlinux EXPORT_SYMBOL_GPL +0x4bbedaf4 ceph_con_open net/ceph/libceph EXPORT_SYMBOL +0xf8ce8f2a p9_release_pages net/9p/9pnet EXPORT_SYMBOL +0x9a71d92e ip_tunnel_encap_setup net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xc4204a6d vfio_platform_remove_common drivers/vfio/platform/vfio-platform-base EXPORT_SYMBOL_GPL +0x7ac1578c get_phv_bit drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x5dc4a64d ohci_resume vmlinux EXPORT_SYMBOL_GPL +0x3efe1703 phy_unregister_fixup_for_id vmlinux EXPORT_SYMBOL +0xfc2f38d5 dev_pm_put_subsys_data vmlinux EXPORT_SYMBOL_GPL +0x064083cd dev_pm_get_subsys_data vmlinux EXPORT_SYMBOL_GPL +0x490253e3 transport_add_device vmlinux EXPORT_SYMBOL_GPL +0x7d8aaeec kill_device vmlinux EXPORT_SYMBOL_GPL +0x72095557 drm_mode_prune_invalid vmlinux EXPORT_SYMBOL +0x814e371c vfs_readlink vmlinux EXPORT_SYMBOL +0x8133c67d complete_and_exit vmlinux EXPORT_SYMBOL +0xbd2d9004 kvm_write_guest vmlinux EXPORT_SYMBOL_GPL +0xedd38d19 capi_ctr_ready drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0xc8d0a51f i2c_mux_del_adapters drivers/i2c/i2c-mux EXPORT_SYMBOL_GPL +0x3d14aaf1 mlx4_buf_write_mtt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x570decd5 inet6_lookup vmlinux EXPORT_SYMBOL_GPL +0x9c16688c udp_seq_stop vmlinux EXPORT_SYMBOL +0x19d52f1f hid_quirks_exit vmlinux EXPORT_SYMBOL_GPL +0xa4be8413 scsi_nl_sock vmlinux EXPORT_SYMBOL_GPL +0x4d3f0a26 regmap_reinit_cache vmlinux EXPORT_SYMBOL_GPL +0x530f4d42 of_clk_parent_fill vmlinux EXPORT_SYMBOL_GPL +0x16516798 osc_pc_lpi_support_confirmed vmlinux EXPORT_SYMBOL_GPL +0xff6878cf fb_default_cmap vmlinux EXPORT_SYMBOL +0x11ae6c4f sysfs_remove_group vmlinux EXPORT_SYMBOL_GPL +0x6d7ea044 sysfs_remove_file_from_group vmlinux EXPORT_SYMBOL_GPL +0xc3ff38c2 down_read_trylock vmlinux EXPORT_SYMBOL +0x12d53864 wpan_phy_register net/ieee802154/ieee802154 EXPORT_SYMBOL +0xf28c4adb ip_set_nfnl_get_byindex net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x6905901d nf_flow_table_free net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x3685a07b spc_emulate_inquiry_std drivers/target/target_core_mod EXPORT_SYMBOL +0x2d686cd7 dcbnl_cee_notify vmlinux EXPORT_SYMBOL +0x823edea5 xt_compat_add_offset vmlinux EXPORT_SYMBOL_GPL +0x08b5fb63 hid_hw_start vmlinux EXPORT_SYMBOL_GPL +0x77154b0c cpufreq_register_driver vmlinux EXPORT_SYMBOL_GPL +0x6fbfbb28 serial8250_handle_irq vmlinux EXPORT_SYMBOL_GPL +0x9833bc0c hvc_kick vmlinux EXPORT_SYMBOL_GPL +0x4ec91cb7 path_has_submounts vmlinux EXPORT_SYMBOL +0x3ef1a059 open_with_fake_path vmlinux EXPORT_SYMBOL +0xcdb04a76 ib_register_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa9f693c7 ps2_begin_command drivers/input/serio/libps2 EXPORT_SYMBOL +0xcbbeba94 mlx4_wol_write drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x02a029bb inet6_offloads vmlinux EXPORT_SYMBOL +0xee20ee2f nf_getsockopt vmlinux EXPORT_SYMBOL +0x9a8c5575 nf_setsockopt vmlinux EXPORT_SYMBOL +0x579e0bf5 rtnl_unregister_all vmlinux EXPORT_SYMBOL_GPL +0xcb6cf7e1 netif_rx_ni vmlinux EXPORT_SYMBOL +0x423db847 netif_tx_wake_queue vmlinux EXPORT_SYMBOL +0x3131b773 ir_raw_encode_scancode vmlinux EXPORT_SYMBOL +0xfa691d74 i2c_smbus_xfer vmlinux EXPORT_SYMBOL +0xfe8c3c1b hisi_sas_debugfs_exit vmlinux EXPORT_SYMBOL_GPL +0xb616cf3d hisi_sas_debugfs_init vmlinux EXPORT_SYMBOL_GPL +0x09c0c902 dev_pm_qos_remove_notifier vmlinux EXPORT_SYMBOL_GPL +0x3db2f222 device_attach vmlinux EXPORT_SYMBOL_GPL +0x0ecb0461 drm_gem_fence_array_add_implicit vmlinux EXPORT_SYMBOL +0xd2173e2f drm_is_current_master vmlinux EXPORT_SYMBOL +0x4ec57c40 tty_port_open vmlinux EXPORT_SYMBOL +0x41f16601 dma_get_any_slave_channel vmlinux EXPORT_SYMBOL_GPL +0x95ae4056 blk_rq_map_user_iov vmlinux EXPORT_SYMBOL +0xa9349a70 ioc_lookup_icq vmlinux EXPORT_SYMBOL +0x1f006946 dquot_commit_info vmlinux EXPORT_SYMBOL +0x3110fb25 seq_puts vmlinux EXPORT_SYMBOL +0xac9fc194 seq_putc vmlinux EXPORT_SYMBOL +0x06d9ff85 super_setup_bdi_name vmlinux EXPORT_SYMBOL +0x0af20eae down_read_interruptible vmlinux EXPORT_SYMBOL +0xb3092242 nf_nat_inet_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xdc7442b3 snd_pcm_hw_param_last sound/core/snd-pcm EXPORT_SYMBOL +0x814d64e9 crypto_poly1305_final crypto/poly1305_generic EXPORT_SYMBOL_GPL +0xdae9b5d7 nfs4_disable_idmapping fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe09a473f usb_create_shared_hcd vmlinux EXPORT_SYMBOL_GPL +0x5508e1f1 ata_dev_disable vmlinux EXPORT_SYMBOL_GPL +0xe05893a9 uart_update_timeout vmlinux EXPORT_SYMBOL +0xa73f717b pci_vfs_assigned vmlinux EXPORT_SYMBOL_GPL +0x44cbac99 crypto_register_acomp vmlinux EXPORT_SYMBOL_GPL +0xdbab143b d_genocide vmlinux EXPORT_SYMBOL +0xe81ddad6 tracing_generic_entry_update vmlinux EXPORT_SYMBOL_GPL +0xc2d3ae4b __module_put_and_exit vmlinux EXPORT_SYMBOL +0x6b853d06 ns_to_kernel_old_timeval vmlinux EXPORT_SYMBOL +0xb93ec1ee ceph_monc_stop net/ceph/libceph EXPORT_SYMBOL +0x7da14e47 __gue_build_header net/ipv4/fou EXPORT_SYMBOL +0x82730e87 __fou_build_header net/ipv4/fou EXPORT_SYMBOL +0xce0acaff nf_ct_l4proto_log_invalid net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x5d8392e2 hinic_get_hwdev_by_netdev drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xcffa2aff spi_populate_width_msg drivers/scsi/scsi_transport_spi EXPORT_SYMBOL_GPL +0xc9028001 netlink_add_tap vmlinux EXPORT_SYMBOL_GPL +0xd56ab2bf __skb_wait_for_more_packets vmlinux EXPORT_SYMBOL +0xd00c1c13 pm_clk_init vmlinux EXPORT_SYMBOL_GPL +0xc5283a24 iommu_get_dma_cookie vmlinux EXPORT_SYMBOL +0x8e774df3 of_clk_hw_register vmlinux EXPORT_SYMBOL_GPL +0x7a15d473 blkg_print_stat_ios_recursive vmlinux EXPORT_SYMBOL_GPL +0x53fa36d1 ZSTD_decompressBlock lib/zstd/zstd_decompress EXPORT_SYMBOL +0x7d066dc6 rds_conn_path_connect_if_down net/rds/rds EXPORT_SYMBOL_GPL +0x8b7b9b7d tcp_vegas_cwnd_event net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x035b16a0 nf_log_dump_sk_uid_gid net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0x81b69e41 snd_ctl_enum_info sound/core/snd EXPORT_SYMBOL +0x171ab2b4 ib_create_qp_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x083dedcb nvme_shutdown_ctrl drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xe14a2958 af_alg_count_tsgl crypto/af_alg EXPORT_SYMBOL_GPL +0xd7ff1b8a __ashlti3 vmlinux EXPORT_SYMBOL +0xd234944c netdev_txq_to_tc vmlinux EXPORT_SYMBOL +0x7c983a5d dmi_walk vmlinux EXPORT_SYMBOL_GPL +0xf1374d71 fwnode_graph_get_next_endpoint vmlinux EXPORT_SYMBOL_GPL +0xba1008c8 __crc32c_le vmlinux EXPORT_SYMBOL +0x7a463c36 end_buffer_write_sync vmlinux EXPORT_SYMBOL +0xc4ccf5fc d_rehash vmlinux EXPORT_SYMBOL +0x4b9b80c5 d_drop vmlinux EXPORT_SYMBOL +0xba3cafab file_open_root vmlinux EXPORT_SYMBOL +0x9f413221 vfio_group_get_external_user drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x762eed75 nfs_initiate_pgio fs/nfs/nfs EXPORT_SYMBOL_GPL +0x7ec17a4f nfs_create_rpc_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x83a335c9 tso_build_hdr vmlinux EXPORT_SYMBOL +0x077f8e61 sock_recv_errqueue vmlinux EXPORT_SYMBOL +0xdccdb1b1 usb_poison_urb vmlinux EXPORT_SYMBOL_GPL +0xc55ff962 phy_basic_t1_features_array vmlinux EXPORT_SYMBOL_GPL +0xc2a3e570 errata vmlinux EXPORT_SYMBOL_GPL +0x1ec400aa pci_get_class vmlinux EXPORT_SYMBOL +0xe569f4be __bdevname vmlinux EXPORT_SYMBOL +0xf6a28554 region_intersects vmlinux EXPORT_SYMBOL_GPL +0xc3cf1e55 snd_pcm_hw_refine sound/core/snd-pcm EXPORT_SYMBOL +0x7551b46e dm_tm_open_with_sm drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xc47b54e9 hinic_flush_sq_res drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x0375691a netif_rx vmlinux EXPORT_SYMBOL +0x4fd2e446 i2c_acpi_find_bus_speed vmlinux EXPORT_SYMBOL_GPL +0xd8281fd1 bio_clone_fast vmlinux EXPORT_SYMBOL +0x955b0e2e kthread_worker_fn vmlinux EXPORT_SYMBOL_GPL +0xc5533ac8 vsock_add_tap net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xfa9e6399 unregister_ip_vs_app net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xa83cb8b4 mlx5_core_xrcd_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xde8fb6a5 nfs_dentry_operations fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd0a2847c sha_init vmlinux EXPORT_SYMBOL +0xde28890b xfrm_stateonly_find vmlinux EXPORT_SYMBOL +0x6ae7bc2a usb_create_hcd vmlinux EXPORT_SYMBOL_GPL +0x91293466 sas_eh_abort_handler vmlinux EXPORT_SYMBOL_GPL +0x72ea7b2d scsi_device_type vmlinux EXPORT_SYMBOL +0x9b45ed1e elv_bio_merge_ok vmlinux EXPORT_SYMBOL +0x597639e5 rpc_get_sb_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3a1a3979 ccp_version drivers/crypto/ccp/ccp EXPORT_SYMBOL_GPL +0x622359c9 can_bus_off drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x35c1e153 mlx4_get_slave_pkey_gid_tbl_len drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xd6bfc521 nvme_cancel_tagset drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x2af27cee usb_autopm_get_interface_no_resume vmlinux EXPORT_SYMBOL_GPL +0xaf545418 gpiochip_get_data vmlinux EXPORT_SYMBOL_GPL +0xa965ca81 reciprocal_value vmlinux EXPORT_SYMBOL +0xc5640808 unregister_binfmt vmlinux EXPORT_SYMBOL +0xd602ab6f alloc_vm_area vmlinux EXPORT_SYMBOL_GPL +0xe87ffc05 make_kgid vmlinux EXPORT_SYMBOL +0x359ec42f _raw_read_trylock vmlinux EXPORT_SYMBOL +0xb83b70f2 housekeeping_enabled vmlinux EXPORT_SYMBOL_GPL +0x2524ba17 ZSTD_getCParams lib/zstd/zstd_compress EXPORT_SYMBOL +0xdd9dfdd8 xdr_init_encode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xec3ceb34 hinic_cpu_to_be32 drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xb31a3731 tcf_idr_check_alloc vmlinux EXPORT_SYMBOL +0x854378ae sock_no_accept vmlinux EXPORT_SYMBOL +0x87477d6f of_pci_range_to_resource vmlinux EXPORT_SYMBOL +0x004b1e9a usb_serial_register_drivers vmlinux EXPORT_SYMBOL_GPL +0xac979463 ata_sff_check_status vmlinux EXPORT_SYMBOL_GPL +0xf4b754fd acpi_resources_are_enforced vmlinux EXPORT_SYMBOL +0x2bc11c9b pinctrl_dev_get_devname vmlinux EXPORT_SYMBOL_GPL +0x6c3f70e0 guid_gen vmlinux EXPORT_SYMBOL_GPL +0x3e729b33 cryptd_ahash_child vmlinux EXPORT_SYMBOL_GPL +0xe1d4fcf4 fuse_get_unique vmlinux EXPORT_SYMBOL_GPL +0xa48196c8 kdb_poll_idx vmlinux EXPORT_SYMBOL_GPL +0x3d69e338 set_groups vmlinux EXPORT_SYMBOL +0x153b60a6 klist_del vmlinux EXPORT_SYMBOL_GPL +0xbc380da9 tcf_generic_walker vmlinux EXPORT_SYMBOL +0xab213b95 flow_rule_match_eth_addrs vmlinux EXPORT_SYMBOL +0x36959692 sock_diag_register_inet_compat vmlinux EXPORT_SYMBOL_GPL +0x97865df4 netif_schedule_queue vmlinux EXPORT_SYMBOL +0x4184f10f dev_remove_offload vmlinux EXPORT_SYMBOL +0x1f8be10c genphy_soft_reset vmlinux EXPORT_SYMBOL +0xe98c2500 phy_modify_changed vmlinux EXPORT_SYMBOL_GPL +0x7f27db2a __lock_buffer vmlinux EXPORT_SYMBOL +0xdf255dcf memory_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x9629d686 esp6_input_done2 net/ipv6/esp6 EXPORT_SYMBOL_GPL +0x79af81ea passthrough_parse_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0xf43f5a2c mlx4_mr_hw_change_access drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x06b949c6 tcp_openreq_init_rwin vmlinux EXPORT_SYMBOL +0xb55da6de xdp_rxq_info_reg_mem_model vmlinux EXPORT_SYMBOL_GPL +0xa168d9f2 dev_direct_xmit vmlinux EXPORT_SYMBOL +0xbb0ab47b debug_locks vmlinux EXPORT_SYMBOL_GPL +0x7ec35b8b ablkcipher_walk_phys vmlinux EXPORT_SYMBOL_GPL +0x817f0809 ablkcipher_walk_done vmlinux EXPORT_SYMBOL_GPL +0xe145a5d5 aead_geniv_alloc vmlinux EXPORT_SYMBOL_GPL +0xb8da0378 perf_event_update_userpage vmlinux EXPORT_SYMBOL_GPL +0x6485cd35 trace_print_bitmask_seq vmlinux EXPORT_SYMBOL_GPL +0x7924b6de ip_set_hostmask_map net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x8d897519 fib6_check_nexthop vmlinux EXPORT_SYMBOL_GPL +0x015c8ef3 eth_header_parse_protocol vmlinux EXPORT_SYMBOL +0x3856a09a xdp_return_frame vmlinux EXPORT_SYMBOL_GPL +0x4f83e115 to_hisi_sas_port vmlinux EXPORT_SYMBOL_GPL +0x99f095ef iscsi_destroy_flashnode_sess vmlinux EXPORT_SYMBOL_GPL +0x20a789ac irq_set_chip_data vmlinux EXPORT_SYMBOL +0x63197685 s2idle_wake vmlinux EXPORT_SYMBOL_GPL +0x61a7b238 nfc_find_se net/nfc/nfc EXPORT_SYMBOL +0xd8d4b516 ip6_tnl_get_link_net net/ipv6/ip6_tunnel EXPORT_SYMBOL +0xe73785ba register_ip_vs_pe net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0xb26beada vhost_dev_ioctl drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x9674c051 dm_unregister_path_selector drivers/md/dm-multipath EXPORT_SYMBOL_GPL +0x055b5454 inet6_csk_xmit vmlinux EXPORT_SYMBOL_GPL +0xa89ad683 tcp_ioctl vmlinux EXPORT_SYMBOL +0x67a0ae65 ata_eh_analyze_ncq_error vmlinux EXPORT_SYMBOL_GPL +0xdb713423 drm_fb_helper_sys_fillrect vmlinux EXPORT_SYMBOL +0x1f55df3d iommu_attach_group vmlinux EXPORT_SYMBOL_GPL +0x4a372220 registered_fb vmlinux EXPORT_SYMBOL +0xe194fa0d vfs_symlink vmlinux EXPORT_SYMBOL +0xd273b1b1 __round_jiffies_up_relative vmlinux EXPORT_SYMBOL_GPL +0x35465e15 wait_for_completion_io vmlinux EXPORT_SYMBOL +0x457f3082 find_vpid vmlinux EXPORT_SYMBOL_GPL +0x6f3614b6 rdma_is_zero_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa0c71dac spi_populate_sync_msg drivers/scsi/scsi_transport_spi EXPORT_SYMBOL_GPL +0xd58bbbcb nvme_delete_wq drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xd98999d8 ttm_tt_set_placement_caching vmlinux EXPORT_SYMBOL +0x9081b5db btree_insert vmlinux EXPORT_SYMBOL_GPL +0xb0c5e247 lockref_put_return vmlinux EXPORT_SYMBOL +0x263beb75 ecryptfs_get_versions vmlinux EXPORT_SYMBOL +0xea1ff4f8 unmap_mapping_range vmlinux EXPORT_SYMBOL +0x7171121c overflowgid vmlinux EXPORT_SYMBOL +0x8b618d08 overflowuid vmlinux EXPORT_SYMBOL +0xebc229ea mpls_output_possible net/mpls/mpls_router EXPORT_SYMBOL_GPL +0x1369dba6 free_cc770dev drivers/net/can/cc770/cc770 EXPORT_SYMBOL_GPL +0x4ad7e9d4 __tracepoint_mlx5_fs_add_rule drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x1c382120 mlx4_unregister_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x68f1eada hinic_init_qp_ctxts drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x6d78355b crypto_chacha12_setkey crypto/chacha_generic EXPORT_SYMBOL_GPL +0x276c3b79 crypto_chacha20_setkey crypto/chacha_generic EXPORT_SYMBOL_GPL +0xed15fb41 nfs4_print_deviceid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x4c07eadf mr_rtm_dumproute vmlinux EXPORT_SYMBOL +0x83b4fb67 ping_unhash vmlinux EXPORT_SYMBOL_GPL +0xdbe11bfe inet_select_addr vmlinux EXPORT_SYMBOL +0xefb134b3 dev_getbyhwaddr_rcu vmlinux EXPORT_SYMBOL +0x1a146ec3 usb_ep_type_string vmlinux EXPORT_SYMBOL_GPL +0x62e53760 iommu_get_msi_cookie vmlinux EXPORT_SYMBOL +0x83fbb072 dma_async_tx_descriptor_init vmlinux EXPORT_SYMBOL +0x798d202e pci_enable_device vmlinux EXPORT_SYMBOL +0x1fc7a59b __nla_validate vmlinux EXPORT_SYMBOL +0x7fbb960b vfs_dedupe_file_range vmlinux EXPORT_SYMBOL +0xd91e09a8 xprt_release_xprt_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xde09a94d xas_find vmlinux EXPORT_SYMBOL_GPL +0xfce91d7e ipv6_recv_error vmlinux EXPORT_SYMBOL_GPL +0x3e30cd57 udp_sk_rx_dst_set vmlinux EXPORT_SYMBOL +0x968f9a23 efivar_entry_iter_begin vmlinux EXPORT_SYMBOL_GPL +0xdae943d2 phy_read_mmd vmlinux EXPORT_SYMBOL +0x0465a073 regmap_reg_in_ranges vmlinux EXPORT_SYMBOL_GPL +0xd6f19436 iommu_fwspec_free vmlinux EXPORT_SYMBOL_GPL +0x1f449588 mctrl_gpio_disable_ms vmlinux EXPORT_SYMBOL_GPL +0xa50335f4 sbitmap_finish_wait vmlinux EXPORT_SYMBOL_GPL +0x2ac48128 blk_mq_debugfs_rq_show vmlinux EXPORT_SYMBOL_GPL +0xdf2e2327 cryptd_skcipher_child vmlinux EXPORT_SYMBOL_GPL +0x7bc4b597 security_sb_remount vmlinux EXPORT_SYMBOL +0x95dd790a dcache_dir_close vmlinux EXPORT_SYMBOL +0x2c91e17c vm_get_page_prot vmlinux EXPORT_SYMBOL +0x1f563160 bpf_offload_dev_priv vmlinux EXPORT_SYMBOL_GPL +0x5af471c3 rpc_wake_up_queued_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc4575727 unregister_snap_client net/802/psnap EXPORT_SYMBOL +0x2df096f9 snd_ctl_unregister_ioctl_compat sound/core/snd EXPORT_SYMBOL +0x7485935a dm_btree_lookup drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xf8bb4242 mlx5_dm_sw_icm_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xa4e67f03 tcp_enter_quickack_mode vmlinux EXPORT_SYMBOL +0x3eb66973 phy_driver_register vmlinux EXPORT_SYMBOL +0xc5a261ae scsi_dma_unmap vmlinux EXPORT_SYMBOL +0x0c5fddd2 vga_set_legacy_decoding vmlinux EXPORT_SYMBOL +0xf0f2af21 ttm_bo_move_accel_cleanup vmlinux EXPORT_SYMBOL +0xdcb19aeb drm_atomic_state_default_clear vmlinux EXPORT_SYMBOL +0x50b4e726 drm_mode_config_helper_suspend vmlinux EXPORT_SYMBOL +0xfc833099 drm_dp_mst_put_port_malloc vmlinux EXPORT_SYMBOL +0x49cc44d1 regulator_get_voltage_sel_pickable_regmap vmlinux EXPORT_SYMBOL_GPL +0xe58b6bcc crypto_alloc_rng vmlinux EXPORT_SYMBOL_GPL +0xe54d31fa relay_reset vmlinux EXPORT_SYMBOL_GPL +0xbefb44b8 nf_send_reset6 net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0x12d09123 nf_conntrack_register_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x70a675a6 dm_exception_store_type_register drivers/md/dm-snapshot EXPORT_SYMBOL +0x9798e452 fscache_withdraw_cache fs/fscache/fscache EXPORT_SYMBOL +0x2fdff90b inet_put_port vmlinux EXPORT_SYMBOL +0x80a00484 sock_no_connect vmlinux EXPORT_SYMBOL +0xdcea72c0 sk_set_peek_off vmlinux EXPORT_SYMBOL_GPL +0x5591473d usb_ep_fifo_flush vmlinux EXPORT_SYMBOL_GPL +0xc0da7810 phy_print_status vmlinux EXPORT_SYMBOL +0xb68f8f6d cpu_device_create vmlinux EXPORT_SYMBOL_GPL +0xa44ef64a clk_register_fractional_divider vmlinux EXPORT_SYMBOL_GPL +0x366fd49c pci_remove_bus vmlinux EXPORT_SYMBOL +0x206c0037 pinctrl_select_state vmlinux EXPORT_SYMBOL_GPL +0x63f964e1 iov_iter_zero vmlinux EXPORT_SYMBOL +0x9a623550 crypto_register_aeads vmlinux EXPORT_SYMBOL_GPL +0xe3c3b53d dquot_mark_dquot_dirty vmlinux EXPORT_SYMBOL +0x1c338147 vm_numa_stat vmlinux EXPORT_SYMBOL +0xe85a9fd3 cpu_cluster_pm_exit vmlinux EXPORT_SYMBOL_GPL +0x1e1e140e ns_to_timespec64 vmlinux EXPORT_SYMBOL +0x0917490b wait_for_completion_killable_timeout vmlinux EXPORT_SYMBOL +0x9b144970 mpt_attach drivers/message/fusion/mptbase EXPORT_SYMBOL +0xc24894f8 ndlc_remove drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0x10ecc52c usb_amd_quirk_pll_enable vmlinux EXPORT_SYMBOL_GPL +0xf0a33001 of_clk_add_hw_provider vmlinux EXPORT_SYMBOL_GPL +0x88948819 generic_file_read_iter vmlinux EXPORT_SYMBOL +0xd521693d get_device_system_crosststamp vmlinux EXPORT_SYMBOL_GPL +0x86fca7e4 ceph_put_snap_context net/ceph/libceph EXPORT_SYMBOL +0x62bdff6e xdr_stream_pos net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5de3a991 ib_destroy_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xdf4307f6 rdma_port_get_link_layer drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x996c5d6d mlxsw_reg_trans_bulk_wait drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb1266858 hnae_register_notifier drivers/net/ethernet/hisilicon/hns/hnae EXPORT_SYMBOL +0xcf9d0aa6 o2nm_node_put fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x5b3e282f xa_store vmlinux EXPORT_SYMBOL +0x69d2df6d dev_get_phys_port_id vmlinux EXPORT_SYMBOL +0x942f9c0a i2c_of_match_device vmlinux EXPORT_SYMBOL_GPL +0x6f4f1407 spi_res_release vmlinux EXPORT_SYMBOL_GPL +0xaf345336 drm_state_dump vmlinux EXPORT_SYMBOL +0x90e5ca33 iommu_map vmlinux EXPORT_SYMBOL_GPL +0x0d61eeee __bitmap_subset vmlinux EXPORT_SYMBOL +0x697c5d0d tracing_snapshot_alloc vmlinux EXPORT_SYMBOL_GPL +0x77d9ba4c alloc_cc770dev drivers/net/can/cc770/cc770 EXPORT_SYMBOL_GPL +0x6204ab06 eth_type_trans vmlinux EXPORT_SYMBOL +0x19492289 of_get_address vmlinux EXPORT_SYMBOL +0x62b17d60 fwnode_get_phy_mode vmlinux EXPORT_SYMBOL_GPL +0x10daad27 ttm_tt_bind vmlinux EXPORT_SYMBOL +0xbc565ab9 drm_atomic_helper_crtc_duplicate_state vmlinux EXPORT_SYMBOL +0xf17e6282 clk_register_mux vmlinux EXPORT_SYMBOL_GPL +0xc5237e38 gpiochip_free_own_desc vmlinux EXPORT_SYMBOL_GPL +0x94bb7ec3 gen_pool_dma_zalloc_algo vmlinux EXPORT_SYMBOL +0x34b3195b debugfs_create_blob vmlinux EXPORT_SYMBOL_GPL +0x6276bb7b posix_acl_access_xattr_handler vmlinux EXPORT_SYMBOL_GPL +0xac8597d5 mb_cache_entry_get vmlinux EXPORT_SYMBOL +0x37ea3358 send_sig vmlinux EXPORT_SYMBOL +0x342a2354 copy_to_user_fromio sound/core/snd EXPORT_SYMBOL +0x38e10c1d ubi_flush drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x98db2687 dm_bitset_cursor_end drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb5bf65d0 hinic_api_cmd_read_ack drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd428466a tcp_shutdown vmlinux EXPORT_SYMBOL +0x09ddbcab devlink_flash_update_begin_notify vmlinux EXPORT_SYMBOL_GPL +0x580ddd4f usb_stor_set_xfer_buf vmlinux EXPORT_SYMBOL_GPL +0xcbae6c7a acpi_lid_notifier_unregister vmlinux EXPORT_SYMBOL +0x03b4e119 blk_mq_alloc_request vmlinux EXPORT_SYMBOL +0xe40c37ea down_write_trylock vmlinux EXPORT_SYMBOL +0xcb25fa93 param_set_uint vmlinux EXPORT_SYMBOL +0xddf86133 LZ4_compress_HC lib/lz4/lz4hc_compress EXPORT_SYMBOL +0x5538aea1 rpc_init_wait_queue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x287aacd9 usb_get_function drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xab4edc14 gether_get_qmult drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0xbf8bf4d9 gether_set_qmult drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x6d6b8b93 cdrom_open drivers/cdrom/cdrom EXPORT_SYMBOL +0x573b5453 ipv6_fixup_options vmlinux EXPORT_SYMBOL_GPL +0xf389fe60 __hw_addr_init vmlinux EXPORT_SYMBOL +0x199ed0cd net_disable_timestamp vmlinux EXPORT_SYMBOL +0xaec762f6 sock_cmsg_send vmlinux EXPORT_SYMBOL +0x94122827 usb_stor_ctrl_transfer vmlinux EXPORT_SYMBOL_GPL +0xbe509b9b drm_panel_unprepare vmlinux EXPORT_SYMBOL +0x36a45a80 drm_dp_dual_mode_read vmlinux EXPORT_SYMBOL +0xf5ea612f drm_vram_helper_release_mm vmlinux EXPORT_SYMBOL +0x391d7c30 blk_queue_max_discard_segments vmlinux EXPORT_SYMBOL_GPL +0x923d9dba pkcs7_parse_message vmlinux EXPORT_SYMBOL_GPL +0x6a4f623b mmu_notifier_synchronize vmlinux EXPORT_SYMBOL_GPL +0xe42f476d vringh_notify_disable_kern drivers/vhost/vringh EXPORT_SYMBOL +0x803d031e mpt_put_msg_frame drivers/message/fusion/mptbase EXPORT_SYMBOL +0x565d33e1 mlxsw_core_trap_action_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb6dfddf2 mlx5_query_port_vl_hw_cap drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xfdf3d2c2 mlx4_map_sw_to_hw_steering_mode drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xfd8a823a ip_fib_metrics_init vmlinux EXPORT_SYMBOL_GPL +0x1ead6fea flow_block_cb_decref vmlinux EXPORT_SYMBOL +0x86c11d40 of_prop_next_string vmlinux EXPORT_SYMBOL_GPL +0x0cf29211 cpufreq_enable_fast_switch vmlinux EXPORT_SYMBOL_GPL +0x92d778bb stmmac_get_mac_addr vmlinux EXPORT_SYMBOL_GPL +0xd1cc24db stmmac_set_mac_addr vmlinux EXPORT_SYMBOL_GPL +0x32fd99f4 spi_register_controller vmlinux EXPORT_SYMBOL_GPL +0x4afe9a77 scsi_partsize vmlinux EXPORT_SYMBOL +0x66992180 ata_pci_device_resume vmlinux EXPORT_SYMBOL_GPL +0x241ab711 ata_scsi_slave_config vmlinux EXPORT_SYMBOL_GPL +0x3c257a44 drm_client_buffer_vmap vmlinux EXPORT_SYMBOL +0x426f9384 drm_connector_attach_tv_margin_properties vmlinux EXPORT_SYMBOL +0x3c24d861 backlight_device_set_brightness vmlinux EXPORT_SYMBOL +0x24e15c5a pcie_capability_write_word vmlinux EXPORT_SYMBOL +0x78db3618 copy_page_to_iter vmlinux EXPORT_SYMBOL +0x0be6862e dquot_set_dqinfo vmlinux EXPORT_SYMBOL +0x622fb78a css_next_descendant_pre vmlinux EXPORT_SYMBOL_GPL +0xe7698027 ioremap_cache vmlinux EXPORT_SYMBOL +0x93767274 qdisc_hash_del vmlinux EXPORT_SYMBOL +0x5239c798 of_find_device_by_node vmlinux EXPORT_SYMBOL +0xa507245c iscsi_conn_error_event vmlinux EXPORT_SYMBOL_GPL +0xa3ab0268 ata_port_pbar_desc vmlinux EXPORT_SYMBOL_GPL +0xe0e86aa9 ttm_populate_and_map_pages vmlinux EXPORT_SYMBOL +0x28eacabf drm_pci_alloc vmlinux EXPORT_SYMBOL +0xbfe53ed4 __blkdev_issue_zeroout vmlinux EXPORT_SYMBOL +0xaed4adbc kblockd_mod_delayed_work_on vmlinux EXPORT_SYMBOL +0xa35d99e8 cryptd_alloc_ahash vmlinux EXPORT_SYMBOL_GPL +0x6cb0ce87 irq_get_percpu_devid_partition vmlinux EXPORT_SYMBOL_GPL +0x3e2b0ba6 groups_alloc vmlinux EXPORT_SYMBOL +0x638ce6eb nci_send_cmd net/nfc/nci/nci EXPORT_SYMBOL +0xfde5f229 p9_client_stat net/9p/9pnet EXPORT_SYMBOL +0x6ef646e3 svc_rqst_free net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2924ab0c ib_create_cm_id drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xbbc50884 hinic_api_csr_rd64 drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xdf54a8f7 netlink_unregister_notifier vmlinux EXPORT_SYMBOL +0x80e6bb1c lwtunnel_build_state vmlinux EXPORT_SYMBOL_GPL +0xfdf637af dm_table_device_name vmlinux EXPORT_SYMBOL_GPL +0xdba632ce devm_rtc_allocate_device vmlinux EXPORT_SYMBOL_GPL +0xbecf886d drm_sched_entity_flush vmlinux EXPORT_SYMBOL +0xa2ff5e75 drm_syncobj_get_handle vmlinux EXPORT_SYMBOL +0xbb860b8e __dynamic_dev_dbg vmlinux EXPORT_SYMBOL +0x0d3e0246 simple_release_fs vmlinux EXPORT_SYMBOL +0x6626afca down vmlinux EXPORT_SYMBOL +0x679daf1c mlx4_set_vf_rate drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x9c777edd hinic_intr_type drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xae71627d ipmi_create_user drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x4846884a __spi_alloc_controller vmlinux EXPORT_SYMBOL_GPL +0xf258b567 sync_file_create vmlinux EXPORT_SYMBOL +0x9440c733 drm_dev_put vmlinux EXPORT_SYMBOL +0x35f2494c drm_dev_get vmlinux EXPORT_SYMBOL +0x7a63693d clk_divider_ro_ops vmlinux EXPORT_SYMBOL_GPL +0xb2ee9f8f irq_chip_set_affinity_parent vmlinux EXPORT_SYMBOL_GPL +0x21c804ee nfc_se_transaction net/nfc/nfc EXPORT_SYMBOL +0x4ce08913 inet_diag_dump_icsk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0xb8357ac5 line6_alloc_sysex_buffer sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x14e07780 ubi_leb_write drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x7a27581b mtd_concat_create drivers/mtd/mtd EXPORT_SYMBOL +0xc30b9232 tcp_rtx_synack vmlinux EXPORT_SYMBOL +0xc7c5c039 skb_put vmlinux EXPORT_SYMBOL +0xaf55b11c scsi_host_put vmlinux EXPORT_SYMBOL +0xe60e9eb4 vga_default_device vmlinux EXPORT_SYMBOL_GPL +0x9074c7a7 pcim_set_mwi vmlinux EXPORT_SYMBOL +0xe495926a alarm_restart vmlinux EXPORT_SYMBOL_GPL +0x56d24308 kvm_gfn_to_hva_cache_init vmlinux EXPORT_SYMBOL_GPL +0xdd231c55 nfc_hci_sak_to_protocol net/nfc/hci/hci EXPORT_SYMBOL +0xdb065657 nfnl_unlock net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x2cc9a897 core_tpg_set_initiator_node_queue_depth drivers/target/target_core_mod EXPORT_SYMBOL +0xcde1026b capilib_data_b3_conf drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x387f2c17 configfs_depend_item fs/configfs/configfs EXPORT_SYMBOL +0xff9a667e ethtool_op_get_ts_info vmlinux EXPORT_SYMBOL +0x8bd5c9f9 sk_capable vmlinux EXPORT_SYMBOL +0xe4f3e8cc get_governor_parent_kobj vmlinux EXPORT_SYMBOL_GPL +0x4828e77b acpi_scan_lock_acquire vmlinux EXPORT_SYMBOL_GPL +0xec74680a pinctrl_put vmlinux EXPORT_SYMBOL_GPL +0xf3ae4ad9 pinctrl_get vmlinux EXPORT_SYMBOL_GPL +0x30f82fd6 generic_block_fiemap vmlinux EXPORT_SYMBOL +0x4afb2238 add_wait_queue vmlinux EXPORT_SYMBOL +0xc39e6e87 param_ops_invbool vmlinux EXPORT_SYMBOL +0x702b5351 snd_device_disconnect sound/core/snd EXPORT_SYMBOL_GPL +0x11b58247 __ib_alloc_cq_any drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x97263968 dm_bitset_resize drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x9f984513 strrchr vmlinux EXPORT_SYMBOL +0xe7257ab8 xa_store_range vmlinux EXPORT_SYMBOL +0x4841bdee strnchr vmlinux EXPORT_SYMBOL +0x2ca4d0ff dev_add_pack vmlinux EXPORT_SYMBOL +0x613efcb3 usb_hub_clear_tt_buffer vmlinux EXPORT_SYMBOL_GPL +0x824c2756 ahci_sdev_attrs vmlinux EXPORT_SYMBOL_GPL +0x4812c1cd ttm_bo_move_memcpy vmlinux EXPORT_SYMBOL +0xd938fe68 drm_modeset_drop_locks vmlinux EXPORT_SYMBOL +0x40e27379 drm_helper_connector_dpms vmlinux EXPORT_SYMBOL +0xbfe84422 regulator_list_voltage_linear_range vmlinux EXPORT_SYMBOL_GPL +0x5add9202 gpiod_get vmlinux EXPORT_SYMBOL_GPL +0x7cfe368d net_dim_get_def_tx_moderation vmlinux EXPORT_SYMBOL +0xd20bf6ba dcookie_unregister vmlinux EXPORT_SYMBOL_GPL +0x38b467fb kvm_vcpu_init vmlinux EXPORT_SYMBOL_GPL +0x77bfdae1 bio_alloc_mddev drivers/md/md-mod EXPORT_SYMBOL_GPL +0x96eb2c99 mlx5_fc_id drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x900ed007 mlx4_set_vf_spoofchk drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe1eca9bd __tracepoint_pnfs_mds_fallback_pg_get_mirror_count fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x36751a61 __tracepoint_pnfs_mds_fallback_pg_init_write fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xe5974249 __fscache_uncache_page fs/fscache/fscache EXPORT_SYMBOL +0x9ea53d7f vsnprintf vmlinux EXPORT_SYMBOL +0x7fe32873 rb_replace_node vmlinux EXPORT_SYMBOL +0xb18110e0 __tracepoint_arm_event vmlinux EXPORT_SYMBOL_GPL +0x5ad469f7 drm_gem_shmem_unpin vmlinux EXPORT_SYMBOL +0x3f17580b alloc_io_pgtable_ops vmlinux EXPORT_SYMBOL_GPL +0x5bd0748f crypto_del_default_rng vmlinux EXPORT_SYMBOL_GPL +0x668402aa crypto_put_default_rng vmlinux EXPORT_SYMBOL_GPL +0x6ff607b6 crypto_get_default_rng vmlinux EXPORT_SYMBOL_GPL +0xe3b70886 crypto_mod_put vmlinux EXPORT_SYMBOL_GPL +0x1e09b013 filemap_map_pages vmlinux EXPORT_SYMBOL +0xdee365b0 _raw_write_trylock vmlinux EXPORT_SYMBOL +0xebf82d8a cancel_work_sync vmlinux EXPORT_SYMBOL_GPL +0x47ea0e0e snd_pcm_hw_constraint_pow2 sound/core/snd-pcm EXPORT_SYMBOL +0x418d0815 vhost_dev_stop drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xe8d3f3f4 st21nfca_hci_loopback_event_received drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0x221b9eda dlm_register_eviction_cb fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x32ce3777 radix_tree_preload vmlinux EXPORT_SYMBOL +0x2709f56e kobject_get_unless_zero vmlinux EXPORT_SYMBOL +0xa604012a pingv6_ops vmlinux EXPORT_SYMBOL_GPL +0xffae4174 nf_log_set vmlinux EXPORT_SYMBOL +0x4e8d6f20 input_set_keycode vmlinux EXPORT_SYMBOL +0x4db5d3cc subsys_virtual_register vmlinux EXPORT_SYMBOL_GPL +0xf48cf0c9 drm_gem_free_mmap_offset vmlinux EXPORT_SYMBOL +0x60b5073a blkdev_ioctl vmlinux EXPORT_SYMBOL_GPL +0x3eac93be __quota_error vmlinux EXPORT_SYMBOL +0xe5377f60 vfs_dup_fs_context vmlinux EXPORT_SYMBOL +0xf74a28e2 nfc_hci_allocate_device net/nfc/hci/hci EXPORT_SYMBOL +0x5e3a3912 parport_irq_handler drivers/parport/parport EXPORT_SYMBOL +0x5065be62 mlx5_packet_reformat_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x10962bc2 nfs_auth_info_match fs/nfs/nfs EXPORT_SYMBOL_GPL +0x91f44510 idr_alloc_cyclic vmlinux EXPORT_SYMBOL +0xd14e36e6 dev_set_alias vmlinux EXPORT_SYMBOL +0x696340a5 __i2c_board_lock vmlinux EXPORT_SYMBOL_GPL +0x8eae8dfd usb_find_common_endpoints vmlinux EXPORT_SYMBOL_GPL +0x4437de01 phy_basic_t1_features vmlinux EXPORT_SYMBOL_GPL +0xe9ea3910 drm_panel_prepare vmlinux EXPORT_SYMBOL +0x87724981 drm_atomic_helper_wait_for_flip_done vmlinux EXPORT_SYMBOL +0xad0d207b set_selection_kernel vmlinux EXPORT_SYMBOL_GPL +0x6486c79d tty_ldisc_flush vmlinux EXPORT_SYMBOL_GPL +0x6386acac regulator_set_voltage_sel_pickable_regmap vmlinux EXPORT_SYMBOL_GPL +0xdfbe1405 __dynamic_netdev_dbg vmlinux EXPORT_SYMBOL +0x07e71b93 __blkg_prfill_rwstat vmlinux EXPORT_SYMBOL_GPL +0xdd4945ec crypto_register_instance vmlinux EXPORT_SYMBOL_GPL +0xf7d1f02f crypto_alloc_tfm vmlinux EXPORT_SYMBOL_GPL +0x909cf5f5 rpc_machine_cred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8a073a37 hinic_ovs_set_vf_load_state drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x6d03aa2d fscache_object_destroy fs/fscache/fscache EXPORT_SYMBOL +0xf812cff6 memscan vmlinux EXPORT_SYMBOL +0x888c5be5 irq_bypass_register_consumer vmlinux EXPORT_SYMBOL_GPL +0xb10d964d devlink_fmsg_pair_nest_end vmlinux EXPORT_SYMBOL_GPL +0x0e8ffea1 skb_tstamp_tx vmlinux EXPORT_SYMBOL_GPL +0xa62892c6 efivar_sysfs_list vmlinux EXPORT_SYMBOL_GPL +0x93022ba6 __scsi_format_command vmlinux EXPORT_SYMBOL +0x3c00aed2 tty_set_operations vmlinux EXPORT_SYMBOL +0x0d082982 regulator_register_supply_alias vmlinux EXPORT_SYMBOL_GPL +0xab67a0ac dql_init vmlinux EXPORT_SYMBOL +0x7744fdc6 mark_buffer_dirty vmlinux EXPORT_SYMBOL +0x61969601 get_super vmlinux EXPORT_SYMBOL +0x9c1e5bf5 queued_spin_lock_slowpath vmlinux EXPORT_SYMBOL +0x2e413337 write_bytes_to_xdr_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2241372e ib_create_ah_from_wc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4ff500ef mlx4_get_module_info drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x32b63d30 srp_attach_transport drivers/scsi/scsi_transport_srp EXPORT_SYMBOL_GPL +0xaf27bebf drbd_disk_str drivers/block/drbd/drbd EXPORT_SYMBOL +0x2b70362c skb_copy_and_csum_bits vmlinux EXPORT_SYMBOL +0x16ddac57 devm_hwmon_device_register_with_groups vmlinux EXPORT_SYMBOL_GPL +0x355110b5 ahci_check_ready vmlinux EXPORT_SYMBOL_GPL +0xae277372 __drm_crtc_commit_free vmlinux EXPORT_SYMBOL +0x3fe490d0 clk_mux_ro_ops vmlinux EXPORT_SYMBOL_GPL +0x11b5abdb ll_rw_block vmlinux EXPORT_SYMBOL +0x7d8a0d31 block_write_begin vmlinux EXPORT_SYMBOL +0xc0176970 irq_domain_associate_many vmlinux EXPORT_SYMBOL_GPL +0xbc6bec66 free_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0x155217c5 ps2_sendbyte drivers/input/serio/libps2 EXPORT_SYMBOL +0xdc31781e mlxsw_reg_trans_write drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9d253614 mlx5_set_port_pfc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x4917f8cd mlx4_release_eq drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x21a3dc2a fc_release_transport drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x45f1bc79 __tracepoint_non_standard_event vmlinux EXPORT_SYMBOL_GPL +0xec8d8837 sas_rphy_free vmlinux EXPORT_SYMBOL +0xbe1887e4 ata_unpack_xfermask vmlinux EXPORT_SYMBOL_GPL +0x610ec9f5 drm_mode_validate_driver vmlinux EXPORT_SYMBOL +0xb1be8818 fb_deferred_io_fsync vmlinux EXPORT_SYMBOL_GPL +0xe73fb169 pci_alloc_irq_vectors_affinity vmlinux EXPORT_SYMBOL +0xc27c0d16 blk_mq_sched_mark_restart_hctx vmlinux EXPORT_SYMBOL_GPL +0xb16c2e8d blk_mq_start_hw_queues vmlinux EXPORT_SYMBOL +0xe1761617 security_inet_conn_request vmlinux EXPORT_SYMBOL +0xef074558 sysfs_remove_groups vmlinux EXPORT_SYMBOL_GPL +0x1268f357 resume_device_irqs vmlinux EXPORT_SYMBOL_GPL +0x5dae806b iscsit_unregister_transport drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x150d4d8d fcoe_fcf_get_selected drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x6087986b fc_lport_flogi_resp drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x7858006a pnfs_generic_recover_commit_reqs fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x349dc2b3 of_dma_configure vmlinux EXPORT_SYMBOL_GPL +0xf25ab022 of_usb_update_otg_caps vmlinux EXPORT_SYMBOL_GPL +0x4f99b3a5 dev_pm_domain_attach_by_id vmlinux EXPORT_SYMBOL_GPL +0x9c100806 component_match_add_release vmlinux EXPORT_SYMBOL +0x857a34c3 drm_add_override_edid_modes vmlinux EXPORT_SYMBOL +0xecc3a879 drm_fb_helper_alloc_fbi vmlinux EXPORT_SYMBOL +0x7b340485 regulator_list_voltage_pickable_linear_range vmlinux EXPORT_SYMBOL_GPL +0x1e0cd7fe acpi_detach_data vmlinux EXPORT_SYMBOL +0x139e3e2e gpiod_set_array_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0xbcbdf60f kstrtos8 vmlinux EXPORT_SYMBOL +0xdb80423c kblockd_schedule_work_on vmlinux EXPORT_SYMBOL +0xfdbd7a17 crypto_get_attr_type vmlinux EXPORT_SYMBOL_GPL +0x88abb78b ZSTD_insertBlock lib/zstd/zstd_decompress EXPORT_SYMBOL +0x077a3922 svc_proc_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe4907368 svc_rpcbind_set_version net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4b7a5177 nfnetlink_send net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0xbdd50fe0 ib_fmr_pool_unmap drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa14bc89c st_nci_remove drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0x5dac07fd pnfs_generic_pg_init_write fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x30a91597 pnfs_error_mark_layout_for_return fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x6735d56e tcp_ca_get_name_by_key vmlinux EXPORT_SYMBOL_GPL +0xccc21f5e input_get_new_minor vmlinux EXPORT_SYMBOL +0x44c19129 phy_modify vmlinux EXPORT_SYMBOL_GPL +0x26c90ea4 scsi_eh_get_sense vmlinux EXPORT_SYMBOL_GPL +0x03a91e66 device_create_bin_file vmlinux EXPORT_SYMBOL_GPL +0x8e8aa811 drm_crtc_wait_one_vblank vmlinux EXPORT_SYMBOL +0x1ec37a6b drm_dev_unregister vmlinux EXPORT_SYMBOL +0x00096581 drm_scdc_set_high_tmds_clock_ratio vmlinux EXPORT_SYMBOL +0x2a43b5f8 pci_intx vmlinux EXPORT_SYMBOL_GPL +0x0ea1d8d0 crypto_alloc_aead vmlinux EXPORT_SYMBOL_GPL +0x7eaa6b0e crypto_spawn_tfm2 vmlinux EXPORT_SYMBOL_GPL +0x50fad434 round_jiffies_up vmlinux EXPORT_SYMBOL_GPL +0x2444dcaa ceph_cls_assert_locked net/ceph/libceph EXPORT_SYMBOL +0xce1414b2 register_capictr_notifier drivers/isdn/capi/kernelcapi EXPORT_SYMBOL_GPL +0xa53387c7 dm_rh_flush drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x3ad0f55b dm_bm_flush drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x71e05813 mlx4_mtt_cleanup drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x5e40a9f9 hinic_unregister_micro_log drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x8fb1790c hinic_get_netdev_by_lld drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x75f0e875 xas_store vmlinux EXPORT_SYMBOL_GPL +0x6139ab67 xt_request_find_table_lock vmlinux EXPORT_SYMBOL_GPL +0x3f9ce155 dev_get_mac_address vmlinux EXPORT_SYMBOL +0x3ccf0061 dev_set_mac_address vmlinux EXPORT_SYMBOL +0xe4bdb134 sk_mc_loop vmlinux EXPORT_SYMBOL +0xbd76f4b4 serio_open vmlinux EXPORT_SYMBOL +0x22b325d5 kd_mksound vmlinux EXPORT_SYMBOL +0x34331f04 acpi_os_unmap_memory vmlinux EXPORT_SYMBOL_GPL +0xabe270b7 crypto_register_scomps vmlinux EXPORT_SYMBOL_GPL +0xe783e261 sysfs_emit vmlinux EXPORT_SYMBOL_GPL +0x100ab093 __tracepoint_powernv_throttle vmlinux EXPORT_SYMBOL_GPL +0xe1dcf64a audit_log_format vmlinux EXPORT_SYMBOL +0xc9af0c18 nf_fwd_netdev_egress net/netfilter/nf_dup_netdev EXPORT_SYMBOL_GPL +0x831227bb vringh_complete_user drivers/vhost/vringh EXPORT_SYMBOL +0xb6517b2e mlxsw_afa_block_append_trap_and_forward drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x6d2fc5a6 net_namespace_list vmlinux EXPORT_SYMBOL_GPL +0xff75b697 sock_register vmlinux EXPORT_SYMBOL +0x15fb269a serio_close vmlinux EXPORT_SYMBOL +0x7fcf6aae ata_link_next vmlinux EXPORT_SYMBOL_GPL +0x0b520439 loop_register_transfer vmlinux EXPORT_SYMBOL +0x24269e54 device_set_wakeup_enable vmlinux EXPORT_SYMBOL_GPL +0xca260278 drm_vblank_restore vmlinux EXPORT_SYMBOL +0xb6ef393e tpm2_probe vmlinux EXPORT_SYMBOL_GPL +0x82529d8d regulator_get_bypass_regmap vmlinux EXPORT_SYMBOL_GPL +0x33c84ea0 pci_find_bus vmlinux EXPORT_SYMBOL +0xe9aff520 pinctrl_unregister vmlinux EXPORT_SYMBOL_GPL +0xa1059e07 _proc_mkdir vmlinux EXPORT_SYMBOL_GPL +0xe0c7e9c6 register_ftrace_function vmlinux EXPORT_SYMBOL_GPL +0xd06fc8d3 cgrp_dfl_root vmlinux EXPORT_SYMBOL_GPL +0x91c8b5b5 mutex_lock_io vmlinux EXPORT_SYMBOL_GPL +0xc3d02e20 kvm_vcpu_read_guest vmlinux EXPORT_SYMBOL_GPL +0x5ce3b588 nfnl_lock net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0xd4cd3c1a __tracepoint_bcache_writeback drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xe6a5d217 nfs_flock fs/nfs/nfs EXPORT_SYMBOL_GPL +0xed4da719 inet6_register_protosw vmlinux EXPORT_SYMBOL +0x3410807d xdp_attachment_flags_ok vmlinux EXPORT_SYMBOL_GPL +0x650778bf dev_set_group vmlinux EXPORT_SYMBOL +0x3df903dc netif_set_real_num_rx_queues vmlinux EXPORT_SYMBOL +0x73b70f5e netif_set_real_num_tx_queues vmlinux EXPORT_SYMBOL +0x9b9df714 phy_init_hw vmlinux EXPORT_SYMBOL +0x16f42e3d phy_register_fixup_for_uid vmlinux EXPORT_SYMBOL +0x2aa5e907 hisi_sas_controller_reset_prepare vmlinux EXPORT_SYMBOL_GPL +0x204c5067 scsi_dev_info_add_list vmlinux EXPORT_SYMBOL +0x1d12fd6e drm_mm_insert_node_in_range vmlinux EXPORT_SYMBOL +0x30da0bb0 pci_find_resource vmlinux EXPORT_SYMBOL +0x614ef9ea pci_user_read_config_word vmlinux EXPORT_SYMBOL_GPL +0x740d7f1f badblocks_check vmlinux EXPORT_SYMBOL_GPL +0x32d7baa4 proc_remove vmlinux EXPORT_SYMBOL +0xde653bef end_buffer_async_write vmlinux EXPORT_SYMBOL +0x39fd83db halt_poll_ns_shrink vmlinux EXPORT_SYMBOL_GPL +0xc4878267 mlx4_SET_PORT_fcs_check drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x1751b5bd hinic_slq_free drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x9a583306 netlbl_bitmap_walk vmlinux EXPORT_SYMBOL +0x6cb46525 netlbl_catmap_walk vmlinux EXPORT_SYMBOL +0x90344072 drm_fb_helper_unlink_fbi vmlinux EXPORT_SYMBOL +0xf222794c drm_fb_xrgb8888_to_gray8 vmlinux EXPORT_SYMBOL +0x08f53ba9 drm_atomic_helper_update_legacy_modeset_state vmlinux EXPORT_SYMBOL +0x9258c776 hdmi_vendor_infoframe_pack_only vmlinux EXPORT_SYMBOL +0x50097088 security_tun_dev_free_security vmlinux EXPORT_SYMBOL +0x2f7b1029 add_swap_extent vmlinux EXPORT_SYMBOL_GPL +0xc994e0a8 svc_rpcb_cleanup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe622057d iscsi_tcp_conn_get_stats drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x58a85bba dm_disk vmlinux EXPORT_SYMBOL_GPL +0x911fcd6c phylink_start vmlinux EXPORT_SYMBOL_GPL +0x8a47043d LZ4_decompress_safe_continue vmlinux EXPORT_SYMBOL +0xfc70c5d8 ring_buffer_consume vmlinux EXPORT_SYMBOL_GPL +0x1c5b1f28 irq_free_descs vmlinux EXPORT_SYMBOL_GPL +0xa8181adf proc_dointvec vmlinux EXPORT_SYMBOL +0xc56a41e6 vabits_actual vmlinux EXPORT_SYMBOL +0x782b43c2 qlt_rdy_to_xfer drivers/scsi/qla2xxx/qla2xxx EXPORT_SYMBOL +0x81a17396 mlog_and_bits fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x5abd92bb __inet_lookup_established vmlinux EXPORT_SYMBOL_GPL +0xb2e7751b netdev_alert vmlinux EXPORT_SYMBOL +0xc9888822 netdev_has_any_upper_dev vmlinux EXPORT_SYMBOL +0x0321cdbf of_alias_get_highest_id vmlinux EXPORT_SYMBOL_GPL +0x6be81f16 drm_atomic_helper_async_check vmlinux EXPORT_SYMBOL +0xe14765b2 drm_dp_atomic_find_vcpi_slots vmlinux EXPORT_SYMBOL +0x5351d825 __reset_control_get vmlinux EXPORT_SYMBOL_GPL +0x9430b198 trace_dump_stack vmlinux EXPORT_SYMBOL_GPL +0x4248ae3c single_task_running vmlinux EXPORT_SYMBOL +0x2003c5a1 ceph_osdc_flush_notifies net/ceph/libceph EXPORT_SYMBOL +0x9804bb1f p9_client_fcreate net/9p/9pnet EXPORT_SYMBOL +0xee5f65b8 atm_dev_lookup net/atm/atm EXPORT_SYMBOL +0x44f19e1a svc_rpcb_setup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa8ba5946 br_forward_finish net/bridge/bridge EXPORT_SYMBOL_GPL +0xe57bfa95 nf_synproxy_ipv4_fini net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xdbf58f38 mlx4_flow_detach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x4d0040a0 cpumask_next_and vmlinux EXPORT_SYMBOL +0x39042641 udp_lib_rehash vmlinux EXPORT_SYMBOL +0xfecb7759 udp_lib_unhash vmlinux EXPORT_SYMBOL +0x05495392 hid_debug vmlinux EXPORT_SYMBOL_GPL +0x2647717e pm_stay_awake vmlinux EXPORT_SYMBOL_GPL +0xb0276c67 device_show_ulong vmlinux EXPORT_SYMBOL_GPL +0x3a5aefbd drm_hdmi_infoframe_set_hdr_metadata vmlinux EXPORT_SYMBOL +0xd025474d acpi_bus_register_driver vmlinux EXPORT_SYMBOL +0x8750b2d4 of_pci_find_child_device vmlinux EXPORT_SYMBOL_GPL +0x9e9d711f set_ras_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x1f939050 tcp_ca_get_key_by_name vmlinux EXPORT_SYMBOL_GPL +0xcc30f0f1 tcp_tx_delay_enabled vmlinux EXPORT_SYMBOL +0xb8fab6db nf_hook_slow vmlinux EXPORT_SYMBOL +0x3cb73b9d task_cls_state vmlinux EXPORT_SYMBOL_GPL +0x6bd45ada dev_pick_tx_cpu_id vmlinux EXPORT_SYMBOL +0xbc071179 iscsi_get_ipaddress_state_name vmlinux EXPORT_SYMBOL_GPL +0xd2b51e43 drm_open vmlinux EXPORT_SYMBOL +0x8583b308 tpm1_do_selftest vmlinux EXPORT_SYMBOL_GPL +0xce5ac24f zlib_inflate_workspacesize vmlinux EXPORT_SYMBOL +0x629ebee9 of_gen_pool_get vmlinux EXPORT_SYMBOL_GPL +0xc60d0620 __num_online_cpus vmlinux EXPORT_SYMBOL +0x4e3fd1b4 kvm_release_pfn_clean vmlinux EXPORT_SYMBOL_GPL +0x43af8509 nci_send_data net/nfc/nci/nci EXPORT_SYMBOL +0xce7f62f9 ceph_osdc_sync net/ceph/libceph EXPORT_SYMBOL +0x6b88cb38 svc_age_temp_xprts_now net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe08cb051 ip6_tnl_change_mtu net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x08995031 snd_pcm_limit_hw_rates sound/core/snd-pcm EXPORT_SYMBOL +0x740c784c vhost_dequeue_msg drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3537f78b dm_dirty_log_type_register drivers/md/dm-log EXPORT_SYMBOL +0xafae4e81 __tracepoint_bcache_btree_node_alloc_fail drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xf982e6db o2net_send_message fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x646bbf34 pnfs_put_lseg fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x43e3882f nfs_sb_deactive fs/nfs/nfs EXPORT_SYMBOL_GPL +0x14c8335d wimax_report_rfkill_hw vmlinux EXPORT_SYMBOL_GPL +0x9f41feb9 tcp_recvmsg vmlinux EXPORT_SYMBOL +0xa25fc115 xt_compat_check_entry_offsets vmlinux EXPORT_SYMBOL +0xee71bf66 xdp_return_buff vmlinux EXPORT_SYMBOL_GPL +0x374c53e1 ata_get_cmd_descript vmlinux EXPORT_SYMBOL_GPL +0xdf50cba7 vga_client_register vmlinux EXPORT_SYMBOL +0x62e8b33d bioset_init vmlinux EXPORT_SYMBOL +0x7db5227b bioset_exit vmlinux EXPORT_SYMBOL +0x9cfd805d irq_alloc_generic_chip vmlinux EXPORT_SYMBOL_GPL +0xfce4ff70 v9fs_get_default_trans net/9p/9pnet EXPORT_SYMBOL +0xbd75cfb2 fcoe_ctlr_init drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL +0x0f8ddd5e xfrm_audit_state_notfound_simple vmlinux EXPORT_SYMBOL_GPL +0x5a829aaf udp_destruct_sock vmlinux EXPORT_SYMBOL_GPL +0x66a65a62 inet_hashinfo2_init_mod vmlinux EXPORT_SYMBOL_GPL +0xd7ca1402 ata_pci_sff_activate_host vmlinux EXPORT_SYMBOL_GPL +0xd4682ee2 ata_timing_cycle2mode vmlinux EXPORT_SYMBOL_GPL +0xf1775b63 ata_scsi_queuecmd vmlinux EXPORT_SYMBOL_GPL +0x66551bc7 drm_detect_monitor_audio vmlinux EXPORT_SYMBOL +0x0bcb5764 bpf_verifier_log_write vmlinux EXPORT_SYMBOL_GPL +0xb676353d console_start vmlinux EXPORT_SYMBOL +0x3dd9b230 proc_dointvec_userhz_jiffies vmlinux EXPORT_SYMBOL +0x8fd180e7 kernel_neon_begin vmlinux EXPORT_SYMBOL +0x65dc10ea nf_flow_offload_ip_hook net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x59233430 parport_unregister_driver drivers/parport/parport EXPORT_SYMBOL +0x3a165a9c show_mem vmlinux EXPORT_SYMBOL +0xa489c472 tcp_simple_retransmit vmlinux EXPORT_SYMBOL +0xb842863f xt_register_match vmlinux EXPORT_SYMBOL +0x36453cc8 of_find_node_with_property vmlinux EXPORT_SYMBOL +0x9fa00108 drm_crtc_vblank_put vmlinux EXPORT_SYMBOL +0x856d840e drm_gem_unmap_dma_buf vmlinux EXPORT_SYMBOL +0x54bc9a3c pci_check_and_mask_intx vmlinux EXPORT_SYMBOL_GPL +0x1650a6fc proc_set_user vmlinux EXPORT_SYMBOL +0x0f71ec5e anon_inode_getfile vmlinux EXPORT_SYMBOL_GPL +0xdf16e472 simple_dir_inode_operations vmlinux EXPORT_SYMBOL +0xd0e487ab trace_output_call vmlinux EXPORT_SYMBOL_GPL +0x3f3e9eed irq_domain_xlate_onetwocell vmlinux EXPORT_SYMBOL_GPL +0xf76606d2 cred_fscmp vmlinux EXPORT_SYMBOL +0xe8ce54e5 napi_consume_skb vmlinux EXPORT_SYMBOL +0xc73333f9 sas_get_local_phy vmlinux EXPORT_SYMBOL_GPL +0x204d49d8 ahci_do_softreset vmlinux EXPORT_SYMBOL_GPL +0x2595dd4b pci_host_probe vmlinux EXPORT_SYMBOL_GPL +0x9b51c5a8 refcount_sub_and_test_checked vmlinux EXPORT_SYMBOL +0xe2ef509d vfs_get_tree vmlinux EXPORT_SYMBOL +0x3b5a3e16 dmam_pool_create vmlinux EXPORT_SYMBOL +0x2e7fe630 register_kretprobe vmlinux EXPORT_SYMBOL_GPL +0x3a08ec70 iscsi_host_alloc drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x9cf6cffb input_mt_get_slot_by_key vmlinux EXPORT_SYMBOL +0x183cc32d input_mt_drop_unused vmlinux EXPORT_SYMBOL +0xc41263c7 usb_ep_disable vmlinux EXPORT_SYMBOL_GPL +0xa9284e44 genphy_suspend vmlinux EXPORT_SYMBOL +0xf278bcb8 __platform_create_bundle vmlinux EXPORT_SYMBOL_GPL +0x201611a1 subsys_system_register vmlinux EXPORT_SYMBOL_GPL +0x646288df ttm_unmap_and_unpopulate_pages vmlinux EXPORT_SYMBOL +0xaac39ff3 __drm_printfn_info vmlinux EXPORT_SYMBOL +0xc6f5aeda drm_atomic_check_only vmlinux EXPORT_SYMBOL +0x1dd03340 acpi_find_child_device vmlinux EXPORT_SYMBOL_GPL +0x0c660876 cpci_hp_register_bus vmlinux EXPORT_SYMBOL_GPL +0x2eb3ab87 __find_get_block vmlinux EXPORT_SYMBOL +0x2197a615 dcache_dir_open vmlinux EXPORT_SYMBOL +0x3f4547a7 put_unused_fd vmlinux EXPORT_SYMBOL +0xb1ddf995 jiffies_64_to_clock_t vmlinux EXPORT_SYMBOL +0x6231e52f irq_domain_create_hierarchy vmlinux EXPORT_SYMBOL_GPL +0xfd525ec7 lc_put lib/lru_cache EXPORT_SYMBOL +0x9134feb7 lc_get lib/lru_cache EXPORT_SYMBOL +0xcc979a91 lc_set lib/lru_cache EXPORT_SYMBOL +0x4ac2b34b ibdev_err drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfe9fe1b3 mlx4_counter_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x874d05a2 srp_reconnect_rport drivers/scsi/scsi_transport_srp EXPORT_SYMBOL +0xc3a2be67 nfs_net_id fs/nfs/nfs EXPORT_SYMBOL_GPL +0xece784c2 rb_first vmlinux EXPORT_SYMBOL +0xeabd377c inet6_lookup_listener vmlinux EXPORT_SYMBOL_GPL +0xf4cdf14f tcp_seq_stop vmlinux EXPORT_SYMBOL +0x56df260e hisi_sas_slave_configure vmlinux EXPORT_SYMBOL_GPL +0xd04f6038 dev_pm_qos_expose_latency_limit vmlinux EXPORT_SYMBOL_GPL +0xf278799d of_dma_simple_xlate vmlinux EXPORT_SYMBOL_GPL +0xcdbf1c3d gpiochip_generic_free vmlinux EXPORT_SYMBOL_GPL +0xa20d01ba __trace_bprintk vmlinux EXPORT_SYMBOL_GPL +0x6a5ecb18 unregister_module_notifier vmlinux EXPORT_SYMBOL +0x3c1c3725 rcu_fwd_progress_check vmlinux EXPORT_SYMBOL_GPL +0x3dc619d3 swake_up_locked vmlinux EXPORT_SYMBOL +0xf8f61ebc wake_up_var vmlinux EXPORT_SYMBOL +0xf1160be9 flush_delayed_work vmlinux EXPORT_SYMBOL +0xb4985beb ZSTD_resetCStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x59d943c8 vsock_addr_cast net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xd2c107bb ceph_flags_to_mode net/ceph/libceph EXPORT_SYMBOL +0xbf85deac nf_conntrack_hash_check_insert net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe2acbd58 ib_find_exact_cached_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf85018fb microread_remove drivers/nfc/microread/microread EXPORT_SYMBOL +0xaa5a0d93 skb_zerocopy_headlen vmlinux EXPORT_SYMBOL_GPL +0x805ce241 sas_change_queue_depth vmlinux EXPORT_SYMBOL_GPL +0x4bde9b8c ata_dummy_port_ops vmlinux EXPORT_SYMBOL_GPL +0x85e44b4e fwnode_property_present vmlinux EXPORT_SYMBOL_GPL +0x2cc01f12 devm_release_action vmlinux EXPORT_SYMBOL_GPL +0x1d05ad83 drm_of_find_panel_or_bridge vmlinux EXPORT_SYMBOL_GPL +0xf767597d iscsit_response_queue drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xfd132d92 unix_peer_get vmlinux EXPORT_SYMBOL_GPL +0xd2eb32b9 tcp_rx_skb_cache_key vmlinux EXPORT_SYMBOL +0xeedd987e phy_10gbit_features_array vmlinux EXPORT_SYMBOL_GPL +0x6ab52305 device_property_read_string vmlinux EXPORT_SYMBOL_GPL +0xa77bbef9 bus_register vmlinux EXPORT_SYMBOL_GPL +0x8b111e90 clk_hw_register_divider_table vmlinux EXPORT_SYMBOL_GPL +0x082c3213 pci_root_buses vmlinux EXPORT_SYMBOL +0xfdf48fe1 blk_freeze_queue_start vmlinux EXPORT_SYMBOL_GPL +0xe887de6f balloon_aops vmlinux EXPORT_SYMBOL_GPL +0xacf649bf audit_log_task_info vmlinux EXPORT_SYMBOL +0x685366c5 nft_meta_set_validate net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x95d23273 tcp_syn_ack_timeout vmlinux EXPORT_SYMBOL +0xb11c3a25 nf_log_unregister vmlinux EXPORT_SYMBOL +0xff786d43 scsi_eh_ready_devs vmlinux EXPORT_SYMBOL_GPL +0x5cd70894 drm_atomic_helper_connector_duplicate_state vmlinux EXPORT_SYMBOL +0x9bfda69f of_clk_get_by_name vmlinux EXPORT_SYMBOL +0x141271bf acpi_dev_found vmlinux EXPORT_SYMBOL +0x42f1b900 fb_pad_unaligned_buffer vmlinux EXPORT_SYMBOL +0xf7e29129 gpiochip_irqchip_add_key vmlinux EXPORT_SYMBOL_GPL +0x33c9834f d_hash_and_lookup vmlinux EXPORT_SYMBOL +0x1df63e88 ZSTD_compressBegin lib/zstd/zstd_compress EXPORT_SYMBOL +0xf51d25c5 dccp_death_row net/dccp/dccp EXPORT_SYMBOL_GPL +0xc6d4678a lapb_data_received net/lapb/lapb EXPORT_SYMBOL +0xe4bc0fcc snd_rawmidi_drain_input sound/core/snd-rawmidi EXPORT_SYMBOL +0x7ea07d14 of_property_read_string vmlinux EXPORT_SYMBOL_GPL +0x4a7abbb2 timer_unstable_counter_workaround vmlinux EXPORT_SYMBOL_GPL +0xe7544b08 dmi_kobj vmlinux EXPORT_SYMBOL_GPL +0x82fc0fc6 thermal_cooling_device_register vmlinux EXPORT_SYMBOL_GPL +0x8a470bc9 input_open_device vmlinux EXPORT_SYMBOL +0x3b9f478e ahci_platform_disable_regulators vmlinux EXPORT_SYMBOL_GPL +0x75c323df drm_atomic_helper_commit_planes vmlinux EXPORT_SYMBOL +0x86f278db regulator_set_bypass_regmap vmlinux EXPORT_SYMBOL_GPL +0xfe990052 gpio_free vmlinux EXPORT_SYMBOL_GPL +0x2a2fa260 nla_policy_len vmlinux EXPORT_SYMBOL +0xad0acf48 _copy_from_iter_nocache vmlinux EXPORT_SYMBOL +0x83535fc2 blk_mq_rdma_map_queues vmlinux EXPORT_SYMBOL_GPL +0x1152d95e fsync_bdev vmlinux EXPORT_SYMBOL +0x6a982dd8 simple_get_link vmlinux EXPORT_SYMBOL +0x92ec510d jiffies64_to_msecs vmlinux EXPORT_SYMBOL +0xee8d74d6 jiffies64_to_nsecs vmlinux EXPORT_SYMBOL +0xb2612501 hinic_unregister_fault_recover drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xb4859740 icmp6_send vmlinux EXPORT_SYMBOL +0x637c17fb ip6_dst_lookup vmlinux EXPORT_SYMBOL_GPL +0xea41184e efivar_entry_set_get_size vmlinux EXPORT_SYMBOL_GPL +0xdcf9646a i2c_new_device vmlinux EXPORT_SYMBOL_GPL +0x025d1cbf device_for_each_child vmlinux EXPORT_SYMBOL_GPL +0x1627a02c drm_flip_work_queue vmlinux EXPORT_SYMBOL +0x8c06514e drm_vma_offset_lookup_locked vmlinux EXPORT_SYMBOL +0x8c2a17d8 drm_mode_set_name vmlinux EXPORT_SYMBOL +0x957ce2c3 drm_legacy_ioremap_wc vmlinux EXPORT_SYMBOL +0x40a9b349 vzalloc vmlinux EXPORT_SYMBOL +0xbaffff96 ZSTD_CStreamWorkspaceBound lib/zstd/zstd_compress EXPORT_SYMBOL +0xee02a67f dccp_close net/dccp/dccp EXPORT_SYMBOL_GPL +0xf7210ebf __ib_alloc_pd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcff1a284 mlx4_find_cached_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x1902178a srp_tmo_valid drivers/scsi/scsi_transport_srp EXPORT_SYMBOL_GPL +0x73f9cb65 nfs_commit_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x56dc5be3 kpatch_register kernel/tkernel/kpatch/kpatch EXPORT_SYMBOL +0x3527cc90 pm_generic_poweroff_late vmlinux EXPORT_SYMBOL_GPL +0xaddf6eec pci_probe_reset_slot vmlinux EXPORT_SYMBOL_GPL +0x422ce162 unregister_key_type vmlinux EXPORT_SYMBOL +0xd6740159 nci_allocate_device net/nfc/nci/nci EXPORT_SYMBOL +0x7f44295c rpc_shutdown_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf38f04ed failover_slave_unregister net/core/failover EXPORT_SYMBOL_GPL +0x2e0989c7 vringh_abandon_user drivers/vhost/vringh EXPORT_SYMBOL +0xe5840ec6 ib_wc_status_msg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfc324d71 transport_generic_new_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x5bde6064 pppox_ioctl drivers/net/ppp/pppox EXPORT_SYMBOL +0xdad85bf6 mlx4_get_internal_clock_params drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x54ef680b hinic_get_ppf_uld_by_pdev drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xf371fda3 af_alg_wait_for_data crypto/af_alg EXPORT_SYMBOL_GPL +0xd01480f7 dlmunlock fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x11089ac7 _ctype vmlinux EXPORT_SYMBOL +0x0d1ab37c skb_mpls_pop vmlinux EXPORT_SYMBOL_GPL +0x1f7bdf2e usb_add_gadget_udc vmlinux EXPORT_SYMBOL_GPL +0xd0ef875a drm_fb_helper_prepare vmlinux EXPORT_SYMBOL +0x34e15d21 __drm_atomic_helper_connector_duplicate_state vmlinux EXPORT_SYMBOL +0xefe96a89 blk_get_queue vmlinux EXPORT_SYMBOL +0xae139d43 bd_set_size vmlinux EXPORT_SYMBOL +0x7bd4cbbd pm_vt_switch_unregister vmlinux EXPORT_SYMBOL +0xca7d8764 kthread_freezable_should_stop vmlinux EXPORT_SYMBOL_GPL +0x18fb2caf cpus_read_unlock vmlinux EXPORT_SYMBOL_GPL +0x4f35a702 rxrpc_kernel_recv_data net/rxrpc/rxrpc EXPORT_SYMBOL +0x4f7eee62 nf_conncount_gc_list net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x389b3807 mtd_device_unregister drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x3ae1afd1 __tracepoint_bcache_journal_write drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x1b99051e __fscache_enable_cookie fs/fscache/fscache EXPORT_SYMBOL +0x0853d0cd power_supply_get_by_name vmlinux EXPORT_SYMBOL_GPL +0x5c84a5a2 regulator_set_load vmlinux EXPORT_SYMBOL_GPL +0xf205fa65 pcie_capability_write_dword vmlinux EXPORT_SYMBOL +0x67cca8dc gpiod_is_active_low vmlinux EXPORT_SYMBOL_GPL +0x7846af3e __kfifo_len_r vmlinux EXPORT_SYMBOL +0xd8d2876e vfs_create_mount vmlinux EXPORT_SYMBOL +0x009933b9 srcutorture_get_gp_data vmlinux EXPORT_SYMBOL_GPL +0x3a9afbf5 svc_xprt_names net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8663dd4f snd_pcm_stop sound/core/snd-pcm EXPORT_SYMBOL +0x51dbdd3f rdma_rw_mr_factor drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd19958f4 ib_alloc_fmr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa18b7ee0 target_backend_unregister drivers/target/target_core_mod EXPORT_SYMBOL +0x4e573d17 dm_cell_lock_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x3b67ccff close_candev drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x27864d57 memparse vmlinux EXPORT_SYMBOL +0xa2060911 inet_current_timestamp vmlinux EXPORT_SYMBOL +0x62ddf4a6 inet_sk_rebuild_header vmlinux EXPORT_SYMBOL +0x3706d9f2 devlink_dpipe_table_counter_enabled vmlinux EXPORT_SYMBOL_GPL +0xe2cecc56 flow_block_cb_is_busy vmlinux EXPORT_SYMBOL +0x25ab9ba8 skb_copy vmlinux EXPORT_SYMBOL +0xeecde34b input_ff_flush vmlinux EXPORT_SYMBOL_GPL +0xc1ebbf92 input_inject_event vmlinux EXPORT_SYMBOL +0xe6f7988a gadget_find_ep_by_name vmlinux EXPORT_SYMBOL_GPL +0x1b451393 regmap_field_free vmlinux EXPORT_SYMBOL_GPL +0xe5a6c866 acpi_is_pnp_device vmlinux EXPORT_SYMBOL_GPL +0x8c23d0b1 pci_bus_read_config_dword vmlinux EXPORT_SYMBOL +0x4ffebfa9 sysfs_remove_mount_point vmlinux EXPORT_SYMBOL_GPL +0xe816e452 block_truncate_page vmlinux EXPORT_SYMBOL +0x7a056171 vfs_tmpfile vmlinux EXPORT_SYMBOL +0xf6d08ae9 generic_write_checks vmlinux EXPORT_SYMBOL +0x4c6b52f9 kthread_park vmlinux EXPORT_SYMBOL_GPL +0x529dfa48 snd_device_initialize sound/core/snd EXPORT_SYMBOL_GPL +0xe94729f4 mptscsih_host_attrs drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x1df8597c iscsi_session_get_param drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x42cdf2aa xfrm_dev_state_flush vmlinux EXPORT_SYMBOL +0x69668826 netdev_increment_features vmlinux EXPORT_SYMBOL +0x4f2250ba rtc_tm_to_time64 vmlinux EXPORT_SYMBOL +0xe9b2822d sas_rphy_unlink vmlinux EXPORT_SYMBOL +0x3f9a5c8c fwnode_property_read_u8_array vmlinux EXPORT_SYMBOL_GPL +0x7d57b6f0 drm_atomic_helper_page_flip_target vmlinux EXPORT_SYMBOL +0x91b9d5f2 uart_match_port vmlinux EXPORT_SYMBOL +0xed149286 acpi_register_gsi vmlinux EXPORT_SYMBOL_GPL +0xa642686c debugfs_create_file_size vmlinux EXPORT_SYMBOL_GPL +0x24f8c2ec vfs_get_link vmlinux EXPORT_SYMBOL +0xaa905851 trace_array_create vmlinux EXPORT_SYMBOL_GPL +0x03372453 force_irqthreads vmlinux EXPORT_SYMBOL_GPL +0x061d0f76 svc_shutdown_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe72c5b5b iscsi_host_get_param drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xccae368c of_thermal_get_trip_points vmlinux EXPORT_SYMBOL_GPL +0xfb2da00e ttm_eu_backoff_reservation vmlinux EXPORT_SYMBOL +0x5ec7dbb3 iommu_register_device_fault_handler vmlinux EXPORT_SYMBOL_GPL +0x95cc86d5 regulator_get_linear_step vmlinux EXPORT_SYMBOL_GPL +0xe1894942 acpi_device_get_match_data vmlinux EXPORT_SYMBOL_GPL +0x917b0c7b pcie_port_service_register vmlinux EXPORT_SYMBOL +0x5d26b579 pci_request_selected_regions vmlinux EXPORT_SYMBOL +0xe080baa3 __splice_from_pipe vmlinux EXPORT_SYMBOL +0x07c0fa4c nci_send_frame net/nfc/nci/nci EXPORT_SYMBOL +0x1d826391 fc_rport_login drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x02b61070 nfcmrvl_parse_dt drivers/nfc/nfcmrvl/nfcmrvl EXPORT_SYMBOL_GPL +0xc57c6d80 unregister_net_sysctl_table vmlinux EXPORT_SYMBOL_GPL +0xefc671fb inet_gro_complete vmlinux EXPORT_SYMBOL +0x0552b967 xt_check_table_hooks vmlinux EXPORT_SYMBOL +0xb8cfe1ee __skb_tstamp_tx vmlinux EXPORT_SYMBOL_GPL +0x0d565051 cpufreq_register_governor vmlinux EXPORT_SYMBOL_GPL +0xa9afa624 scsi_is_sas_phy vmlinux EXPORT_SYMBOL +0x1b5059ce ata_id_xfermask vmlinux EXPORT_SYMBOL_GPL +0xf0777d66 tty_write_room vmlinux EXPORT_SYMBOL +0xe138fb8c percpu_counter_add_batch vmlinux EXPORT_SYMBOL +0x5fcd5845 sysfs_add_file_to_group vmlinux EXPORT_SYMBOL_GPL +0x6da4b638 param_ops_ushort vmlinux EXPORT_SYMBOL +0x73242dcd cpu_set_feature vmlinux EXPORT_SYMBOL_GPL +0xbfd7d7a2 o2hb_global_heartbeat_active fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL +0xa7de111c dev_trans_start vmlinux EXPORT_SYMBOL +0x542fdd19 devlink_port_params_unregister vmlinux EXPORT_SYMBOL_GPL +0x3accbefe of_graph_get_remote_node vmlinux EXPORT_SYMBOL +0xc954302a usb_unlocked_disable_lpm vmlinux EXPORT_SYMBOL_GPL +0x9f7202f6 serial8250_rpm_put vmlinux EXPORT_SYMBOL_GPL +0x64a9c928 default_blu vmlinux EXPORT_SYMBOL +0xd9accb62 devm_regulator_register vmlinux EXPORT_SYMBOL_GPL +0x57575f08 dmaengine_put vmlinux EXPORT_SYMBOL +0xaae8ab0e acpi_bus_power_manageable vmlinux EXPORT_SYMBOL +0xf681acfc hdmi_infoframe_unpack vmlinux EXPORT_SYMBOL +0x4ea25709 dql_reset vmlinux EXPORT_SYMBOL +0x3f1626ea blk_register_region vmlinux EXPORT_SYMBOL +0x095893b4 bio_put vmlinux EXPORT_SYMBOL +0x36ca0116 devm_memunmap vmlinux EXPORT_SYMBOL +0x05777c8a devm_memremap vmlinux EXPORT_SYMBOL +0x036b2e31 bpf_map_inc vmlinux EXPORT_SYMBOL_GPL +0x27bd5a47 trace_print_array_seq vmlinux EXPORT_SYMBOL +0x1b597b7a swake_up_all vmlinux EXPORT_SYMBOL +0x5de611a9 btracker_nr_writebacks_queued drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x3e73f10c __tracepoint_pnfs_mds_fallback_pg_init_read fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x4735e6a5 of_find_compatible_node vmlinux EXPORT_SYMBOL +0xd6814560 input_alloc_absinfo vmlinux EXPORT_SYMBOL +0xad1a9ecf serio_bus vmlinux EXPORT_SYMBOL +0xf0de6af3 scsi_is_target_device vmlinux EXPORT_SYMBOL +0x77b4fe21 ahci_do_hardreset vmlinux EXPORT_SYMBOL_GPL +0x21cd536a crypto_put_default_null_skcipher vmlinux EXPORT_SYMBOL_GPL +0x2a6fd642 crypto_get_default_null_skcipher vmlinux EXPORT_SYMBOL_GPL +0x65a1e7ee each_symbol_section vmlinux EXPORT_SYMBOL_GPL +0x582fe5cf rds_message_add_rdma_dest_extension net/rds/rds EXPORT_SYMBOL_GPL +0xac93ae05 ax25_bcast net/ax25/ax25 EXPORT_SYMBOL_GPL +0x99b23286 ib_modify_qp_is_ok drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x58239d49 mdev_get_drvdata drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x37960d94 mlx5_core_access_reg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb2a96b14 nfs_init_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb73f253b ping_bind vmlinux EXPORT_SYMBOL_GPL +0x4aac52b3 nf_hook_entries_insert_raw vmlinux EXPORT_SYMBOL_GPL +0x4e1bd637 sk_dst_check vmlinux EXPORT_SYMBOL +0x975f54fb ttm_mem_io_lock vmlinux EXPORT_SYMBOL +0x6b5c2b06 drm_atomic_helper_damage_iter_next vmlinux EXPORT_SYMBOL +0x453c8403 pci_msi_enabled vmlinux EXPORT_SYMBOL +0xd36e3d59 prandom_bytes_state vmlinux EXPORT_SYMBOL +0x9ead0c3a inode_needs_sync vmlinux EXPORT_SYMBOL +0x7668db4a dma_direct_map_resource vmlinux EXPORT_SYMBOL +0x399bb8c0 __wake_up_sync_key vmlinux EXPORT_SYMBOL_GPL +0xfbcd7bb0 snd_rawmidi_receive sound/core/snd-rawmidi EXPORT_SYMBOL +0x527928ca inet_csk_complete_hashdance vmlinux EXPORT_SYMBOL +0x283d3677 dev_getfirstbyhwtype vmlinux EXPORT_SYMBOL +0x4e2267ab ahci_print_info vmlinux EXPORT_SYMBOL_GPL +0x217ff10b drm_framebuffer_remove vmlinux EXPORT_SYMBOL +0x6f3a4838 dma_can_mmap vmlinux EXPORT_SYMBOL_GPL +0x8ff18803 wake_up_process vmlinux EXPORT_SYMBOL +0x25820c64 fs_overflowuid vmlinux EXPORT_SYMBOL +0x77c73de3 l2tp_session_register net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x53dea1ff ax2asc net/ax25/ax25 EXPORT_SYMBOL +0x4cc9bbb4 __nf_ct_refresh_acct net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x328ffea6 vhost_enable_notify drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xa15c0bcc ib_modify_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbb24f607 init_cdrom_command drivers/cdrom/cdrom EXPORT_SYMBOL +0x73273760 nfs_do_submount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6f125b4e gro_find_complete_by_type vmlinux EXPORT_SYMBOL +0xe2d38193 skb_dequeue_tail vmlinux EXPORT_SYMBOL +0x223969aa mm_unaccount_pinned_pages vmlinux EXPORT_SYMBOL_GPL +0xe1fe7bf2 hidinput_count_leds vmlinux EXPORT_SYMBOL_GPL +0x104f27eb input_ff_erase vmlinux EXPORT_SYMBOL_GPL +0xf3579867 scsi_is_sas_port vmlinux EXPORT_SYMBOL +0xb65a8ab1 scsi_target_resume vmlinux EXPORT_SYMBOL +0x6e710769 ata_sff_queue_delayed_work vmlinux EXPORT_SYMBOL_GPL +0x8462cb62 atapi_cmd_type vmlinux EXPORT_SYMBOL_GPL +0x8b7f2eea pci_device_is_present vmlinux EXPORT_SYMBOL_GPL +0x95368d33 memcg_kmem_enabled_key vmlinux EXPORT_SYMBOL +0xaf012ff7 __module_address vmlinux EXPORT_SYMBOL_GPL +0xdfc091f9 ceph_entity_type_name net/ceph/libceph EXPORT_SYMBOL +0x28051bcb mlx5_eq_disable drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4b949e85 inet_stream_ops vmlinux EXPORT_SYMBOL +0x8fecdc2c qdisc_reset vmlinux EXPORT_SYMBOL +0x4d85d7ff flow_rule_match_ip vmlinux EXPORT_SYMBOL +0xd2da1048 register_netdevice_notifier vmlinux EXPORT_SYMBOL +0xde8b09b1 syscon_regmap_lookup_by_phandle vmlinux EXPORT_SYMBOL_GPL +0xcb643068 regmap_multi_reg_write vmlinux EXPORT_SYMBOL_GPL +0x5a7e732a of_dma_controller_register vmlinux EXPORT_SYMBOL_GPL +0x953b360a pci_get_device vmlinux EXPORT_SYMBOL +0xb086ba69 blk_mq_delay_run_hw_queue vmlinux EXPORT_SYMBOL +0x808ec1a3 crypto_alg_tested vmlinux EXPORT_SYMBOL_GPL +0xb6c5a524 iomap_seek_hole vmlinux EXPORT_SYMBOL_GPL +0xe769232e sprint_symbol_no_offset vmlinux EXPORT_SYMBOL_GPL +0x3fa6d956 nci_free_device net/nfc/nci/nci EXPORT_SYMBOL +0x9a48cd92 dm_rh_bio_to_region drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xb66e6e19 mlxsw_afa_block_append_mirror drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc57c48a3 idr_get_next vmlinux EXPORT_SYMBOL +0x5c2e3473 led_init_core vmlinux EXPORT_SYMBOL_GPL +0xec333652 ehci_init_driver vmlinux EXPORT_SYMBOL_GPL +0x7bae3521 of_usb_get_dr_mode_by_phy vmlinux EXPORT_SYMBOL_GPL +0x5873122c ata_sff_drain_fifo vmlinux EXPORT_SYMBOL_GPL +0x9b4463d1 tpm1_getcap vmlinux EXPORT_SYMBOL_GPL +0x881c4413 gen_pool_first_fit vmlinux EXPORT_SYMBOL +0x8df17d75 dquot_initialize_needed vmlinux EXPORT_SYMBOL +0x2f548802 ns_to_timeval vmlinux EXPORT_SYMBOL +0x0dc2a373 dccp_syn_ack_timeout net/dccp/dccp_ipv4 EXPORT_SYMBOL +0x2599b036 dccp_connect net/dccp/dccp EXPORT_SYMBOL_GPL +0x3bb87a97 nf_ct_expect_register_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x14080637 nfs42_proc_layouterror fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xd0a91bab skip_spaces vmlinux EXPORT_SYMBOL +0x466c14a7 __delay vmlinux EXPORT_SYMBOL +0x4d65cbd5 csum_ipv6_magic vmlinux EXPORT_SYMBOL +0xc2c56a7f do_xdp_generic vmlinux EXPORT_SYMBOL_GPL +0xbbbf2d9b regmap_async_complete_cb vmlinux EXPORT_SYMBOL_GPL +0xb6127243 drm_need_swiotlb vmlinux EXPORT_SYMBOL +0xd0f4cf9f dw_pcie_msi_init vmlinux EXPORT_SYMBOL_GPL +0xab02a177 param_get_ullong vmlinux EXPORT_SYMBOL +0x76d451c4 add_taint vmlinux EXPORT_SYMBOL +0x60a32ea9 pm_power_off vmlinux EXPORT_SYMBOL_GPL +0xbe94e1b4 iscsit_build_rsp_pdu drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x283f91b9 mlx5_core_roce_gid_set drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x844bf8f5 nfs4_set_rw_stateid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x45d6bb6a __neigh_for_each_release vmlinux EXPORT_SYMBOL +0x6479ce20 irq_of_parse_and_map vmlinux EXPORT_SYMBOL_GPL +0xb4ec3fe8 spi_busnum_to_master vmlinux EXPORT_SYMBOL_GPL +0xe862c4b7 dpm_suspend_start vmlinux EXPORT_SYMBOL_GPL +0x5d1fc66c device_initialize vmlinux EXPORT_SYMBOL_GPL +0x4039fee6 drm_sched_job_cleanup vmlinux EXPORT_SYMBOL +0x207cd451 user_destroy vmlinux EXPORT_SYMBOL_GPL +0xce07d995 dquot_alloc_inode vmlinux EXPORT_SYMBOL +0x7837860a bdput vmlinux EXPORT_SYMBOL +0x55995fcd bdget vmlinux EXPORT_SYMBOL +0xfe9c9ba0 fsstack_copy_inode_size vmlinux EXPORT_SYMBOL_GPL +0x7631b1bd mlxsw_core_skb_transmit drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc2cf700b dev_get_by_napi_id vmlinux EXPORT_SYMBOL +0xefcffb3a ata_pci_bmdma_prepare_host vmlinux EXPORT_SYMBOL_GPL +0x75361d60 fwnode_get_named_child_node vmlinux EXPORT_SYMBOL_GPL +0x69493b1a kstrtos16 vmlinux EXPORT_SYMBOL +0xfb32b30f ring_buffer_read_prepare_sync vmlinux EXPORT_SYMBOL_GPL +0x19fa5852 mlxsw_core_flush_owq drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xff632039 mlx5_query_module_eeprom drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xbcdfc4a3 tcf_em_register vmlinux EXPORT_SYMBOL +0x92f77f12 dm_get_md vmlinux EXPORT_SYMBOL_GPL +0xe84f7108 hisi_sas_phy_enable vmlinux EXPORT_SYMBOL_GPL +0xd6e67efc mipi_dsi_host_register vmlinux EXPORT_SYMBOL +0x5b7e9cde devm_init_badblocks vmlinux EXPORT_SYMBOL_GPL +0xda1129c8 __tracepoint_block_unplug vmlinux EXPORT_SYMBOL_GPL +0x958d71ef iomap_set_page_dirty vmlinux EXPORT_SYMBOL_GPL +0xd69da809 dump_truncate vmlinux EXPORT_SYMBOL +0xcd2e7879 fs_kobj vmlinux EXPORT_SYMBOL_GPL +0x27046576 kvm_exit vmlinux EXPORT_SYMBOL_GPL +0xc40f284c nf_ct_helper_hsize net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xebf2206d cdrom_ioctl drivers/cdrom/cdrom EXPORT_SYMBOL +0xc2d9bd71 ahci_platform_init_host vmlinux EXPORT_SYMBOL_GPL +0xa83450fc sata_link_scr_lpm vmlinux EXPORT_SYMBOL_GPL +0xc5992401 __drm_puts_coredump vmlinux EXPORT_SYMBOL +0x0768e1a2 drm_connector_list_update vmlinux EXPORT_SYMBOL +0x521c94e8 drm_mode_debug_printmodeline vmlinux EXPORT_SYMBOL +0x24428be5 strncpy_from_user vmlinux EXPORT_SYMBOL +0x5ed2969e string_escape_mem_ascii vmlinux EXPORT_SYMBOL +0xdfee6c1d bio_init vmlinux EXPORT_SYMBOL +0x7278d328 all_vm_events vmlinux EXPORT_SYMBOL_GPL +0x41814cb8 dirty_writeback_interval vmlinux EXPORT_SYMBOL_GPL +0xb4a1b73d gss_mech_put net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL +0x1082ac6f rpc_max_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7061facd rdma_move_grh_sgid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x45c4fd86 vfio_unregister_iommu_driver drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xadeaff67 i40e_unregister_client drivers/net/ethernet/intel/i40e/i40e EXPORT_SYMBOL +0x51a6670a inet6_hash_connect vmlinux EXPORT_SYMBOL_GPL +0xfa26ec23 iptunnel_handle_offloads vmlinux EXPORT_SYMBOL_GPL +0x519b70a5 gnet_stats_copy_rate_est vmlinux EXPORT_SYMBOL +0x3b1ca9d5 skb_partial_csum_set vmlinux EXPORT_SYMBOL_GPL +0x1ed44bf4 genphy_c45_aneg_done vmlinux EXPORT_SYMBOL_GPL +0x42c303c1 pm_runtime_no_callbacks vmlinux EXPORT_SYMBOL_GPL +0x6115c764 subsys_dev_iter_next vmlinux EXPORT_SYMBOL_GPL +0x9dd26089 drm_mm_scan_init_with_range vmlinux EXPORT_SYMBOL +0xc934163b drm_gem_vram_kmap vmlinux EXPORT_SYMBOL +0x82f425c9 iommu_detach_group vmlinux EXPORT_SYMBOL_GPL +0xf57789cc pci_scan_root_bus vmlinux EXPORT_SYMBOL +0x684ab828 create_empty_buffers vmlinux EXPORT_SYMBOL +0x5da12e95 init_special_inode vmlinux EXPORT_SYMBOL +0x2910f4cb on_each_cpu_cond vmlinux EXPORT_SYMBOL +0xb74e58e9 br_vlan_get_info net/bridge/bridge EXPORT_SYMBOL_GPL +0x201c3399 nfnl_acct_update net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0x8f595b11 snd_major sound/core/snd EXPORT_SYMBOL +0xe482c5cd srp_release_transport drivers/scsi/scsi_transport_srp EXPORT_SYMBOL_GPL +0x75c5f5c1 nfs_drop_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa273d5c6 configfs_register_default_group fs/configfs/configfs EXPORT_SYMBOL +0x788cc37e __xfrm_dst_lookup vmlinux EXPORT_SYMBOL +0xe9ea75ad ipmr_rule_default vmlinux EXPORT_SYMBOL +0x3d0386ef iptunnel_xmit vmlinux EXPORT_SYMBOL_GPL +0x484ed9fe inet_frag_destroy vmlinux EXPORT_SYMBOL +0xe8bca1f6 ipv4_sk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x3f1ef70a xt_tee_enabled vmlinux EXPORT_SYMBOL_GPL +0xc79bcd36 dm_vcalloc vmlinux EXPORT_SYMBOL +0xae876f5d of_i2c_get_board_info vmlinux EXPORT_SYMBOL_GPL +0x4531624f usb_decode_ctrl vmlinux EXPORT_SYMBOL_GPL +0xe673e202 drm_flip_work_commit vmlinux EXPORT_SYMBOL +0x813a57a8 __dec_zone_page_state vmlinux EXPORT_SYMBOL +0xcda3e840 __inc_zone_page_state vmlinux EXPORT_SYMBOL +0x103d4ff5 __mod_zone_page_state vmlinux EXPORT_SYMBOL +0x27f4f029 ftrace_set_global_filter vmlinux EXPORT_SYMBOL_GPL +0xa0c8ddd1 __put_user_ns vmlinux EXPORT_SYMBOL +0xab6babaf pm_qos_request vmlinux EXPORT_SYMBOL_GPL +0x15fe9aab release_resource vmlinux EXPORT_SYMBOL +0x09e913c1 snd_pcm_alt_chmaps sound/core/snd-pcm EXPORT_SYMBOL_GPL +0x51aa0bee sbc_dif_verify drivers/target/target_core_mod EXPORT_SYMBOL +0x56d6cd81 ppp_register_compressor drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x86fef008 register_sja1000dev drivers/net/can/sja1000/sja1000 EXPORT_SYMBOL_GPL +0x00c84a93 __nf_ip6_route vmlinux EXPORT_SYMBOL_GPL +0x5d0090d7 devlink_fmsg_binary_pair_put vmlinux EXPORT_SYMBOL_GPL +0xd18c0cf4 brioctl_set vmlinux EXPORT_SYMBOL +0x6d066b14 input_set_abs_params vmlinux EXPORT_SYMBOL +0x7c674212 spi_bus_unlock vmlinux EXPORT_SYMBOL_GPL +0x07f88521 drm_edid_is_valid vmlinux EXPORT_SYMBOL +0x88d1caae drm_dp_dual_mode_set_tmds_output vmlinux EXPORT_SYMBOL +0x9a81e730 drm_dp_dual_mode_get_tmds_output vmlinux EXPORT_SYMBOL +0xfa297415 acpi_map_pxm_to_node vmlinux EXPORT_SYMBOL +0x5e0b3f66 gpiod_add_lookup_table vmlinux EXPORT_SYMBOL_GPL +0xb3687850 out_of_line_wait_on_bit_lock vmlinux EXPORT_SYMBOL +0xf3784dcc xprt_free_slot net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x814aeadb spc_emulate_report_luns drivers/target/target_core_mod EXPORT_SYMBOL +0x86bec324 target_unregister_template drivers/target/target_core_mod EXPORT_SYMBOL +0x8f68fe5e hinic_mbox_to_vf drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x7be9eb9b nfs_alloc_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3939f8f0 rfkill_pause_polling vmlinux EXPORT_SYMBOL +0x18ceb194 xfrm_state_alloc vmlinux EXPORT_SYMBOL +0x976f6377 netdev_class_create_file_ns vmlinux EXPORT_SYMBOL +0x85670f1d rtnl_is_locked vmlinux EXPORT_SYMBOL +0x4af35562 devm_kmemdup vmlinux EXPORT_SYMBOL_GPL +0x53ba3289 fat_remove_entries vmlinux EXPORT_SYMBOL_GPL +0x404d0705 page_symlink vmlinux EXPORT_SYMBOL +0xa26d9b4f workqueue_congested vmlinux EXPORT_SYMBOL_GPL +0x76cba743 o2hb_register_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x36adec5e nfsacl_decode fs/nfs_common/nfs_acl EXPORT_SYMBOL_GPL +0x5e874a2e nfsacl_encode fs/nfs_common/nfs_acl EXPORT_SYMBOL_GPL +0xb04976d9 __xfrm_route_forward vmlinux EXPORT_SYMBOL +0x5f2d8f8d udp_gro_complete vmlinux EXPORT_SYMBOL +0x52d54fce devlink_info_version_stored_put vmlinux EXPORT_SYMBOL_GPL +0x1b3b6cec rtnl_get_net_ns_capable vmlinux EXPORT_SYMBOL_GPL +0x49f5e8fd compat_sock_common_setsockopt vmlinux EXPORT_SYMBOL +0xeb78ea92 compat_sock_common_getsockopt vmlinux EXPORT_SYMBOL +0xf30a5502 cpufreq_enable_boost_support vmlinux EXPORT_SYMBOL_GPL +0xbea53762 dm_get_queue_limits vmlinux EXPORT_SYMBOL_GPL +0x1c0520d3 dm_per_bio_data vmlinux EXPORT_SYMBOL_GPL +0x15c4e3e2 phylink_ethtool_set_pauseparam vmlinux EXPORT_SYMBOL_GPL +0xf8fe5642 phylink_ethtool_get_pauseparam vmlinux EXPORT_SYMBOL_GPL +0x6c153ce1 drm_dp_mst_get_edid vmlinux EXPORT_SYMBOL +0x7c70f677 dax_finish_sync_fault vmlinux EXPORT_SYMBOL_GPL +0x6921aa34 compat_put_timeval vmlinux EXPORT_SYMBOL_GPL +0x321bdbb1 compat_get_timeval vmlinux EXPORT_SYMBOL_GPL +0x38764c0b kvm_vcpu_wake_up vmlinux EXPORT_SYMBOL_GPL +0x2562aae3 p9_client_getattr_dotl net/9p/9pnet EXPORT_SYMBOL +0x061a0e94 nf_ct_expect_iterate_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd388b08f fc_get_host_port_state drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x37a1909f gen_replace_estimator vmlinux EXPORT_SYMBOL +0x3c055c66 scsi_target_unblock vmlinux EXPORT_SYMBOL_GPL +0x1d4b18ae devm_clk_hw_unregister vmlinux EXPORT_SYMBOL_GPL +0xf0518aea blk_mq_request_completed vmlinux EXPORT_SYMBOL_GPL +0x19bd383b security_secmark_refcount_dec vmlinux EXPORT_SYMBOL +0x2f03fc4b security_secmark_refcount_inc vmlinux EXPORT_SYMBOL +0x476dbc82 vfs_ioc_fssetxattr_check vmlinux EXPORT_SYMBOL +0x142c493e inc_node_page_state vmlinux EXPORT_SYMBOL +0xe2aaef08 param_set_bint vmlinux EXPORT_SYMBOL +0xb3551238 param_get_bool vmlinux EXPORT_SYMBOL +0x497fc72c param_set_byte vmlinux EXPORT_SYMBOL +0x53cdbc60 rpc_call_null net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1455657e br_multicast_router net/bridge/bridge EXPORT_SYMBOL_GPL +0xb8dc6d64 fc_exch_mgr_alloc drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x2a1d51ee __ip6_datagram_connect vmlinux EXPORT_SYMBOL_GPL +0x200036a3 ip_tunnel_metadata_cnt vmlinux EXPORT_SYMBOL +0xd97dcce2 __ip4_datagram_connect vmlinux EXPORT_SYMBOL +0x69d3dc82 nf_ipv6_ops vmlinux EXPORT_SYMBOL_GPL +0x9fdecc31 unregister_netdevice_many vmlinux EXPORT_SYMBOL +0x803546d9 skb_clone_sk vmlinux EXPORT_SYMBOL +0xeb7f6046 acpi_get_devices vmlinux EXPORT_SYMBOL +0x11166aaa acpi_gpiochip_request_interrupts vmlinux EXPORT_SYMBOL_GPL +0xea778fab sg_pcopy_to_buffer vmlinux EXPORT_SYMBOL +0x68c26a20 blk_queue_logical_block_size vmlinux EXPORT_SYMBOL +0x3e49aa20 init_srcu_struct vmlinux EXPORT_SYMBOL_GPL +0x5a8ae15a ZSTD_initDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x371e7f3a ZSTD_initCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0x4d2a941b parport_ieee1284_interrupt drivers/parport/parport EXPORT_SYMBOL +0xe16986dd mlxsw_afa_block_activity_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x15e9de69 mlx5_query_port_pause drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x56928284 hinic_free_nic_hwdev drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd9562e26 arp_tbl vmlinux EXPORT_SYMBOL +0xa9964bbb __skb_recv_datagram vmlinux EXPORT_SYMBOL +0x4537a8fd phy_attach vmlinux EXPORT_SYMBOL +0x718f108a sas_get_address vmlinux EXPORT_SYMBOL +0x3c186c9e gpiochip_remove_pin_ranges vmlinux EXPORT_SYMBOL_GPL +0x688d0e03 list_lru_walk_node vmlinux EXPORT_SYMBOL_GPL +0x017de3d5 nr_cpu_ids vmlinux EXPORT_SYMBOL +0x982583d9 p9_client_create_dotl net/9p/9pnet EXPORT_SYMBOL +0x065994f1 xdr_encode_opaque_fixed net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xfedbeeb0 rdma_translate_ip drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x39f334d9 mptscsih_show_info drivers/message/fusion/mptscsih EXPORT_SYMBOL +0xc341ae6d zs_map_object mm/zsmalloc EXPORT_SYMBOL_GPL +0x6695aea9 tcp_splice_read vmlinux EXPORT_SYMBOL +0xdafd1d08 xt_unregister_match vmlinux EXPORT_SYMBOL +0x012bd742 regmap_fields_update_bits_base vmlinux EXPORT_SYMBOL_GPL +0x656f3781 request_firmware_direct vmlinux EXPORT_SYMBOL_GPL +0x0c4c678f drm_plane_enable_fb_damage_clips vmlinux EXPORT_SYMBOL +0x21a563da clk_get_accuracy vmlinux EXPORT_SYMBOL_GPL +0x4de77dec acpiphp_unregister_attention vmlinux EXPORT_SYMBOL_GPL +0x50f91491 __genradix_ptr vmlinux EXPORT_SYMBOL +0x3113d93f irq_setup_generic_chip vmlinux EXPORT_SYMBOL_GPL +0xc10fddb8 name_to_dev_t vmlinux EXPORT_SYMBOL_GPL +0xcbc88a23 ZSTD_isFrame lib/zstd/zstd_decompress EXPORT_SYMBOL +0xc7024280 ip_vs_conn_in_get_proto net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0xc3569c3f ip_set_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x3567743b vfio_external_user_iommu_id drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x43ca49fd can_rx_offload_add_fifo drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x4af71e4c vxlan_fdb_replay drivers/net/vxlan EXPORT_SYMBOL_GPL +0xbbd6b59f hnae3_register_ae_algo drivers/net/ethernet/hisilicon/hns3/hnae3 EXPORT_SYMBOL +0xee0b1411 iscsi_prep_data_out_pdu drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xc32c71af register_inetaddr_validator_notifier vmlinux EXPORT_SYMBOL +0x9aaeefce sysctl_nf_log_all_netns vmlinux EXPORT_SYMBOL +0x5d9d2b2c __tracepoint_iscsi_dbg_eh vmlinux EXPORT_SYMBOL_GPL +0x4f1b5e11 platform_msi_domain_free_irqs vmlinux EXPORT_SYMBOL_GPL +0x26a1bf9b drm_master_get vmlinux EXPORT_SYMBOL +0x49b163b8 acpi_bus_scan vmlinux EXPORT_SYMBOL +0xd70f62b6 acpi_os_execute vmlinux EXPORT_SYMBOL +0xe0c3371c mptscsih_suspend drivers/message/fusion/mptscsih EXPORT_SYMBOL +0x35131b36 drbd_role_str drivers/block/drbd/drbd EXPORT_SYMBOL +0x94f35bc4 nfs_file_read fs/nfs/nfs EXPORT_SYMBOL_GPL +0x8955e48e of_irq_to_resource vmlinux EXPORT_SYMBOL_GPL +0x05213312 i2c_slave_register vmlinux EXPORT_SYMBOL_GPL +0xcbc9557f unregister_sysrq_key vmlinux EXPORT_SYMBOL +0x93d2bce4 regulator_put vmlinux EXPORT_SYMBOL_GPL +0x41b200f9 percpu_ref_init vmlinux EXPORT_SYMBOL_GPL +0x255a1bf6 crypto_unregister_rngs vmlinux EXPORT_SYMBOL_GPL +0x639240d9 crypto_aead_setkey vmlinux EXPORT_SYMBOL_GPL +0x1ee23ead crypto_unregister_algs vmlinux EXPORT_SYMBOL_GPL +0x51db8bac simple_transaction_get vmlinux EXPORT_SYMBOL +0xf2de2580 simple_transaction_set vmlinux EXPORT_SYMBOL +0x34c91672 register_kretprobes vmlinux EXPORT_SYMBOL_GPL +0x3d1b0fbf find_pid_ns vmlinux EXPORT_SYMBOL_GPL +0x2864abc9 klist_node_attached vmlinux EXPORT_SYMBOL_GPL +0xa79b46e8 rfkill_alloc vmlinux EXPORT_SYMBOL +0xc2aa11cc __nlmsg_put vmlinux EXPORT_SYMBOL +0x8fdb8e8b lock_sock_nested vmlinux EXPORT_SYMBOL +0x180b6ecc nvmem_cell_get vmlinux EXPORT_SYMBOL_GPL +0xddd4e730 hid_add_device vmlinux EXPORT_SYMBOL_GPL +0xc3a8526a power_supply_set_input_current_limit_from_supplier vmlinux EXPORT_SYMBOL_GPL +0x3ae8ad00 platform_add_devices vmlinux EXPORT_SYMBOL_GPL +0x92dc960b device_show_bool vmlinux EXPORT_SYMBOL_GPL +0xeec0dd04 pci_read_config_byte vmlinux EXPORT_SYMBOL +0x421880b0 gpiochip_add_pingroup_range vmlinux EXPORT_SYMBOL_GPL +0xde8db364 rhashtable_walk_exit vmlinux EXPORT_SYMBOL_GPL +0xefca1f79 sched_trace_rd_span vmlinux EXPORT_SYMBOL_GPL +0xd354ee57 flush_work vmlinux EXPORT_SYMBOL_GPL +0xc0b0c273 udp_tunnel_notify_add_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x49081644 dm_btree_remove drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb372e54d mlx5_db_free drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc41c141d mlx4_update_qp drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd26af501 mlx4_db_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x8c92d425 fc_seq_send drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x1068004b gf128mul_bbe crypto/gf128mul EXPORT_SYMBOL +0x01af9562 of_thermal_is_trip_valid vmlinux EXPORT_SYMBOL_GPL +0xeae26c6c usb_deregister vmlinux EXPORT_SYMBOL_GPL +0x29b612b8 phy_ethtool_set_eee vmlinux EXPORT_SYMBOL +0x19442dba phy_ethtool_get_eee vmlinux EXPORT_SYMBOL +0x738f8e17 ata_sff_port_ops vmlinux EXPORT_SYMBOL_GPL +0x0d83e429 gpiochip_line_is_persistent vmlinux EXPORT_SYMBOL_GPL +0xcf1c6ca3 cmdline_parts_find vmlinux EXPORT_SYMBOL +0x6dc1c0b6 create_signature vmlinux EXPORT_SYMBOL_GPL +0x5775d078 __generic_file_fsync vmlinux EXPORT_SYMBOL +0x2087719e ceph_oid_copy net/ceph/libceph EXPORT_SYMBOL +0x04cda566 snd_interval_refine sound/core/snd-pcm EXPORT_SYMBOL +0xb4cd5628 nvme_complete_rq drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xd1c43391 nfs_fs_mount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6b2dc060 dump_stack vmlinux EXPORT_SYMBOL +0x10c3f57e __gnet_stats_copy_queue vmlinux EXPORT_SYMBOL +0x614adcb7 of_overlay_remove_all vmlinux EXPORT_SYMBOL_GPL +0x45dc0f40 of_device_is_compatible vmlinux EXPORT_SYMBOL +0xa8bc1596 led_colors vmlinux EXPORT_SYMBOL_GPL +0x2c287154 drm_mm_remove_node vmlinux EXPORT_SYMBOL +0xfa3102d3 rdev_get_drvdata vmlinux EXPORT_SYMBOL_GPL +0x878d89e0 acpi_bus_unregister_driver vmlinux EXPORT_SYMBOL +0xe64e57ed pci_msi_unmask_irq vmlinux EXPORT_SYMBOL_GPL +0x18826384 crypto_alloc_instance vmlinux EXPORT_SYMBOL_GPL +0xbdfa625e mpage_writepage vmlinux EXPORT_SYMBOL +0xd942d353 ring_buffer_record_off vmlinux EXPORT_SYMBOL_GPL +0xc53e3755 set_security_override vmlinux EXPORT_SYMBOL +0xd56c7790 nfc_allocate_device net/nfc/nfc EXPORT_SYMBOL +0x650bcce4 nfnetlink_subsys_unregister net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0xd0592e7c usb_get_function_instance drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x200eb0c9 mlx4_qp_to_ready drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7adc0fbf rb_replace_node_rcu vmlinux EXPORT_SYMBOL +0xed3636bb raw_seq_start vmlinux EXPORT_SYMBOL_GPL +0xf78f8ea5 dst_cow_metrics_generic vmlinux EXPORT_SYMBOL +0x40c263af ir_raw_event_store_with_filter vmlinux EXPORT_SYMBOL_GPL +0x75906f07 rtc_read_time vmlinux EXPORT_SYMBOL_GPL +0x7c44f20d ata_dev_pair vmlinux EXPORT_SYMBOL_GPL +0xba239141 tpm2_get_tpm_pt vmlinux EXPORT_SYMBOL_GPL +0xe21f18ac __genradix_iter_peek vmlinux EXPORT_SYMBOL +0xe52fdf7a clean_bdev_aliases vmlinux EXPORT_SYMBOL +0xbf9c8972 p9_client_create net/9p/9pnet EXPORT_SYMBOL +0x299fbb2e poly1305_core_setkey crypto/poly1305_generic EXPORT_SYMBOL_GPL +0x590706bc kobject_del vmlinux EXPORT_SYMBOL +0xcbebf52a netdev_master_upper_dev_get vmlinux EXPORT_SYMBOL +0x63d2ff63 eeprom_93cx6_wren vmlinux EXPORT_SYMBOL_GPL +0x533d62f5 dmaengine_get_unmap_data vmlinux EXPORT_SYMBOL +0x422ffd76 pci_hp_del vmlinux EXPORT_SYMBOL_GPL +0x389695ff pci_hp_add vmlinux EXPORT_SYMBOL_GPL +0x4158cbe8 crypto_unregister_shash vmlinux EXPORT_SYMBOL_GPL +0xa0a38df1 crypto_unregister_ahash vmlinux EXPORT_SYMBOL_GPL +0xb8b9f817 kmalloc_order_trace vmlinux EXPORT_SYMBOL +0x61684262 rpc_remove_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf32da854 rpc_clnt_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x262bb9d6 nat_callforwarding_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x823f1197 snd_ctl_find_numid sound/core/snd EXPORT_SYMBOL +0xadedd2fd dm_bio_detain drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x35aad713 devlink_dpipe_headers_register vmlinux EXPORT_SYMBOL_GPL +0x1fb1950b cpufreq_dbs_governor_start vmlinux EXPORT_SYMBOL_GPL +0x02215323 policy_has_boost_freq vmlinux EXPORT_SYMBOL_GPL +0x60bf3f84 i2c_slave_unregister vmlinux EXPORT_SYMBOL_GPL +0x1d712780 usb_reset_endpoint vmlinux EXPORT_SYMBOL_GPL +0x7073f980 mctrl_gpio_init_noauto vmlinux EXPORT_SYMBOL_GPL +0x96554810 register_keyboard_notifier vmlinux EXPORT_SYMBOL_GPL +0x3debb435 devm_backlight_device_register vmlinux EXPORT_SYMBOL +0xc096e23d hdmi_drm_infoframe_init vmlinux EXPORT_SYMBOL +0xb6a68816 find_last_bit vmlinux EXPORT_SYMBOL +0xb68930fd poll_initwait vmlinux EXPORT_SYMBOL +0x853bbde4 dma_direct_map_sg vmlinux EXPORT_SYMBOL +0xccfa143c irq_gc_ack_set_bit vmlinux EXPORT_SYMBOL_GPL +0x519ca136 unregister_console vmlinux EXPORT_SYMBOL +0xbdf2ff24 ceph_monc_wait_osdmap net/ceph/libceph EXPORT_SYMBOL +0x59f33102 nf_ct_helper_ext_add net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x3467b606 nf_ct_unconfirmed_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf438022f __tracepoint_bcache_btree_node_alloc drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x752efd66 mlx4_SET_PORT_VXLAN drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xb2e09a1f hinic_free_db_phy_addr drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x4900035b o2hb_stop_all_regions fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xc389f1dc led_trigger_unregister_simple vmlinux EXPORT_SYMBOL_GPL +0x84b9c052 usb_stor_reset_resume vmlinux EXPORT_SYMBOL_GPL +0x790be0b9 usb_bus_idr vmlinux EXPORT_SYMBOL_GPL +0xe734b1da phy_disconnect vmlinux EXPORT_SYMBOL +0xc0492492 phy_ethtool_set_link_ksettings vmlinux EXPORT_SYMBOL +0xf8b31ba4 phy_ethtool_get_link_ksettings vmlinux EXPORT_SYMBOL +0x63e0697b device_get_mac_address vmlinux EXPORT_SYMBOL +0xb5a8c226 acpi_gsi_to_irq vmlinux EXPORT_SYMBOL_GPL +0xce4111fd aead_geniv_free vmlinux EXPORT_SYMBOL_GPL +0x3fd78f3b register_chrdev_region vmlinux EXPORT_SYMBOL +0xe7b0353b __cpu_active_mask vmlinux EXPORT_SYMBOL +0xde267df1 rpcauth_wrap_req_encode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8f64be30 nft_set_ext_types net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x7f0e0889 mlx5_unregister_interface drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x80444adf mlx4_unregister_interface drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf5c6b2d8 netdev_state_change vmlinux EXPORT_SYMBOL +0x9b6d47ca drm_mode_is_420 vmlinux EXPORT_SYMBOL +0xe3a53f4c sort vmlinux EXPORT_SYMBOL +0xa3f12f69 __crypto_xor vmlinux EXPORT_SYMBOL_GPL +0xa8fef7bb security_unix_may_send vmlinux EXPORT_SYMBOL +0x439ad018 vfs_getattr_nosec vmlinux EXPORT_SYMBOL +0x282abe03 mmu_notifier_put vmlinux EXPORT_SYMBOL_GPL +0x541bd60a irq_work_run vmlinux EXPORT_SYMBOL_GPL +0xd9f3e65f __tracepoint_suspend_resume vmlinux EXPORT_SYMBOL_GPL +0xe881cb4b tracing_snapshot_cond vmlinux EXPORT_SYMBOL_GPL +0x1278221d ZSTD_compressBegin_usingCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0xe413b0a4 nla_get_labels net/mpls/mpls_router EXPORT_SYMBOL_GPL +0x014a257c __nft_release_basechain net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x72b4fac5 nf_conncount_add net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x49ca02e0 snd_rawmidi_kernel_read sound/core/snd-rawmidi EXPORT_SYMBOL +0x91f4b5aa ib_register_mad_snoop drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x780b10de mlx4_SET_PORT_general drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xc59135a2 iscsi_host_remove drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x3d2eeb46 srp_rport_get drivers/scsi/scsi_transport_srp EXPORT_SYMBOL +0x9014389f nfs_probe_fsinfo fs/nfs/nfs EXPORT_SYMBOL_GPL +0x22282c98 fscache_io_error fs/fscache/fscache EXPORT_SYMBOL +0x73a1c69a usb_serial_generic_chars_in_buffer vmlinux EXPORT_SYMBOL_GPL +0xe377053b usb_sg_cancel vmlinux EXPORT_SYMBOL_GPL +0xed6e98cf drm_crtc_init_with_planes vmlinux EXPORT_SYMBOL +0xd1823156 devm_gpiod_get_from_of_node vmlinux EXPORT_SYMBOL_GPL +0xf65dd7ba debugfs_create_devm_seqfile vmlinux EXPORT_SYMBOL_GPL +0x80bd94b2 kern_path vmlinux EXPORT_SYMBOL +0x680bba4f irq_chip_ack_parent vmlinux EXPORT_SYMBOL_GPL +0x042a5c28 rpc_set_connect_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1cec67e0 ib_register_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x9246f91d nfs_clone_sb_security fs/nfs/nfs EXPORT_SYMBOL_GPL +0xaa43caa2 xfrm_dev_resume vmlinux EXPORT_SYMBOL_GPL +0xa78ae6e3 nf_register_queue_handler vmlinux EXPORT_SYMBOL +0x6f4385af iscsi_destroy_conn vmlinux EXPORT_SYMBOL_GPL +0xe4309905 syscore_resume vmlinux EXPORT_SYMBOL_GPL +0x72c35d79 drm_gem_shmem_madvise vmlinux EXPORT_SYMBOL +0x8f3f8a90 drm_client_framebuffer_create vmlinux EXPORT_SYMBOL +0xeb9bda2b clk_hw_get_flags vmlinux EXPORT_SYMBOL_GPL +0x499fcc15 clk_bulk_get vmlinux EXPORT_SYMBOL +0xe6db03ed gpiod_set_raw_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0x260a095a __sg_alloc_table vmlinux EXPORT_SYMBOL +0x8cf35c7d shrink_dcache_sb vmlinux EXPORT_SYMBOL +0x137e2312 __tracepoint_xdp_bulk_tx vmlinux EXPORT_SYMBOL_GPL +0x2ae50477 percpu_free_rwsem vmlinux EXPORT_SYMBOL_GPL +0x878a84c9 param_ops_int vmlinux EXPORT_SYMBOL +0xbeb19aff ib_register_event_handler drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xaa3db489 alloc_c_can_dev drivers/net/can/c_can/c_can EXPORT_SYMBOL_GPL +0xd54a5050 ipmi_unregister_for_cmd drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x1915f170 efivar_entry_iter vmlinux EXPORT_SYMBOL_GPL +0x3ea48336 regmap_fields_read vmlinux EXPORT_SYMBOL_GPL +0x5ebe4982 drm_client_modeset_probe vmlinux EXPORT_SYMBOL +0xd4b9c370 serial8250_do_set_ldisc vmlinux EXPORT_SYMBOL_GPL +0x6ed8a5fc hdmi_drm_infoframe_check vmlinux EXPORT_SYMBOL +0xfb0ca0dd __pci_hp_register vmlinux EXPORT_SYMBOL_GPL +0x2fb72e9b sbitmap_init_node vmlinux EXPORT_SYMBOL_GPL +0x11c886d3 blkdev_issue_flush vmlinux EXPORT_SYMBOL +0x499043d3 crypto_init_queue vmlinux EXPORT_SYMBOL_GPL +0xb510c250 raw_v4_hashinfo vmlinux EXPORT_SYMBOL_GPL +0x16000b12 drm_gem_object_init vmlinux EXPORT_SYMBOL +0xb46538c2 acpi_subsys_suspend_noirq vmlinux EXPORT_SYMBOL_GPL +0xd64ed259 __memcat_p vmlinux EXPORT_SYMBOL_GPL +0x8eddab20 sysfs_break_active_protection vmlinux EXPORT_SYMBOL_GPL +0x5ee360fe iomap_zero_range vmlinux EXPORT_SYMBOL_GPL +0xca38285e fsnotify_put_mark vmlinux EXPORT_SYMBOL_GPL +0x32a3ea9d blockdev_superblock vmlinux EXPORT_SYMBOL_GPL +0xb308c97d wait_woken vmlinux EXPORT_SYMBOL +0x0424dc6c param_ops_short vmlinux EXPORT_SYMBOL +0xee9a4a5b dccp_v4_request_recv_sock net/dccp/dccp_ipv4 EXPORT_SYMBOL_GPL +0xa87f624b rxrpc_kernel_get_epoch net/rxrpc/rxrpc EXPORT_SYMBOL +0xe81efba0 rpcb_getport_async net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc4701b25 ib_get_vf_stats drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x57f86275 mpt_findImVolumes drivers/message/fusion/mptbase EXPORT_SYMBOL +0x85fbc931 slhc_uncompress drivers/net/slip/slhc EXPORT_SYMBOL +0x3f4c5242 nvme_try_sched_reset drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xaad6d92f rfkill_init_sw_state vmlinux EXPORT_SYMBOL +0x5114fa29 skb_zerocopy_iter_dgram vmlinux EXPORT_SYMBOL_GPL +0xd71ea702 usb_gadget_deactivate vmlinux EXPORT_SYMBOL_GPL +0x76db4550 fwnode_irq_get vmlinux EXPORT_SYMBOL +0x713805de amba_device_put vmlinux EXPORT_SYMBOL_GPL +0x500b2d76 amba_device_add vmlinux EXPORT_SYMBOL_GPL +0x2484adc3 __kfifo_to_user_r vmlinux EXPORT_SYMBOL +0x0e198232 dm_btree_insert drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x2cd1be34 __tracepoint_bcache_request_start drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x5c9a238c fc_rport_logoff drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xca3c5290 fc_rport_lookup drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xac3201b0 udp_flow_hashrnd vmlinux EXPORT_SYMBOL +0xcb3a95d6 devlink_trap_report vmlinux EXPORT_SYMBOL_GPL +0xa9cf612f softnet_data vmlinux EXPORT_SYMBOL +0x24f4a0b4 efivar_entry_get vmlinux EXPORT_SYMBOL_GPL +0x99b7e7b3 efivar_entry_set vmlinux EXPORT_SYMBOL_GPL +0x86700220 acpi_get_cpuid vmlinux EXPORT_SYMBOL_GPL +0xddca840a gpiod_to_irq vmlinux EXPORT_SYMBOL_GPL +0xfe5d4bb2 sys_tz vmlinux EXPORT_SYMBOL +0x172a866e dma_get_sgtable_attrs vmlinux EXPORT_SYMBOL +0x1297d169 osd_req_op_extent_osd_data_pagelist net/ceph/libceph EXPORT_SYMBOL +0x55ab27ce rpc_clnt_iterate_for_each_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x764567c8 dm_btree_find_highest_key drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x5a49e4cd mlxsw_pci_driver_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_pci EXPORT_SYMBOL +0x9e49a306 fcoe_queue_timer drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x9242d55e ip6_sk_redirect vmlinux EXPORT_SYMBOL_GPL +0x9ffa3a75 netdev_max_backlog vmlinux EXPORT_SYMBOL +0xa0bc9bb1 dev_pm_domain_attach_by_name vmlinux EXPORT_SYMBOL_GPL +0xda0a91fa drm_fb_helper_generic_probe vmlinux EXPORT_SYMBOL +0xddbeeecc pci_lock_rescan_remove vmlinux EXPORT_SYMBOL_GPL +0x6f0d2a5d blk_integrity_register vmlinux EXPORT_SYMBOL +0x1df4c2e6 kernfs_path_from_node vmlinux EXPORT_SYMBOL_GPL +0xcc00c622 unlock_page vmlinux EXPORT_SYMBOL +0x4f98d766 cpu_pm_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0xadad9a94 osd_req_op_cls_response_data_pages net/ceph/libceph EXPORT_SYMBOL +0x387e1639 rpc_pipefs_notifier_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7eff16d4 nft_data_dump net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xd99e003d dm_bio_prison_create_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x5997a5ee devlink_region_shapshot_id_get vmlinux EXPORT_SYMBOL_GPL +0x0e3f92c7 devlink_dpipe_table_resource_set vmlinux EXPORT_SYMBOL_GPL +0x4aee5f6e netdev_walk_all_upper_dev_rcu vmlinux EXPORT_SYMBOL_GPL +0x4127cdfa drm_sysfs_hotplug_event vmlinux EXPORT_SYMBOL +0xe0c48a68 lcd_device_register vmlinux EXPORT_SYMBOL +0xf3a57892 release_dentry_name_snapshot vmlinux EXPORT_SYMBOL +0xdb735885 alarm_cancel vmlinux EXPORT_SYMBOL_GPL +0x38e7b1cb osd_req_op_raw_data_in_pages net/ceph/libceph EXPORT_SYMBOL +0x5a45ba31 svc_auth_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4cab4f4b snd_info_register sound/core/snd EXPORT_SYMBOL +0xee484d38 rdma_nl_register drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x16af9071 dm_array_set_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb6949944 dm_array_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xc151e45f __tracepoint_pnfs_mds_fallback_write_pagelist fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xd9a87840 xfrm_unregister_type vmlinux EXPORT_SYMBOL +0x462fc3bf nf_ip_checksum vmlinux EXPORT_SYMBOL +0xbd04c2b0 sock_diag_save_cookie vmlinux EXPORT_SYMBOL_GPL +0xcd8f8489 led_sysfs_enable vmlinux EXPORT_SYMBOL_GPL +0x55073acf pci_request_regions_exclusive vmlinux EXPORT_SYMBOL +0x594bb503 pkcs7_validate_trust vmlinux EXPORT_SYMBOL_GPL +0x68c44b21 jbd2_log_wait_commit vmlinux EXPORT_SYMBOL +0x8e9ef217 hrtimer_init_sleeper vmlinux EXPORT_SYMBOL_GPL +0x48c2ae53 kvm_vcpu_uninit vmlinux EXPORT_SYMBOL_GPL +0x0283dfe3 _snd_pcm_hw_params_any sound/core/snd-pcm EXPORT_SYMBOL +0x5a68668e mlx5_core_qp_query drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x654449c3 memset16 vmlinux EXPORT_SYMBOL +0xa36be6d3 xhci_ext_cap_init vmlinux EXPORT_SYMBOL_GPL +0x57727285 phylink_ethtool_set_eee vmlinux EXPORT_SYMBOL_GPL +0x2c8e28ee phylink_ethtool_get_eee vmlinux EXPORT_SYMBOL_GPL +0x060d5fdd hisi_sas_init_mem vmlinux EXPORT_SYMBOL_GPL +0x22544e18 drm_self_refresh_helper_update_avg_times vmlinux EXPORT_SYMBOL +0xfc4641ae acpi_dma_controller_free vmlinux EXPORT_SYMBOL_GPL +0x526eef2c hdmi_vendor_infoframe_pack vmlinux EXPORT_SYMBOL +0xe3ecb28c blk_queue_can_use_dma_map_merging vmlinux EXPORT_SYMBOL_GPL +0xea24db2e dquot_disable vmlinux EXPORT_SYMBOL +0x31e72320 trace_event_buffer_commit vmlinux EXPORT_SYMBOL_GPL +0xdbb88b93 module_put vmlinux EXPORT_SYMBOL +0x2459bbcc console_set_on_cmdline vmlinux EXPORT_SYMBOL +0xf8cbe7fb ip_tunnel_rcv net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x2c1636c6 ib_free_cq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3eb2010a ib_drain_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x38972f23 dm_rh_region_to_sector drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x2fdc9563 mlx5_fc_query drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xe8f03644 fc_host_fpin_rcv drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x0705dd14 ipmi_register_for_cmd drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xb08a22a3 cpufreq_show_cpus vmlinux EXPORT_SYMBOL_GPL +0xd558127d usb_get_from_anchor vmlinux EXPORT_SYMBOL_GPL +0x0f8a3b69 genphy_resume vmlinux EXPORT_SYMBOL +0xa2896228 hisi_sas_alloc vmlinux EXPORT_SYMBOL_GPL +0x93712ea8 sata_link_hardreset vmlinux EXPORT_SYMBOL_GPL +0x9d9ac853 regulator_unlock vmlinux EXPORT_SYMBOL_GPL +0xdbcf041a acpi_install_address_space_handler vmlinux EXPORT_SYMBOL +0x50208460 inode_get_bytes vmlinux EXPORT_SYMBOL +0x3eeb2322 __wake_up vmlinux EXPORT_SYMBOL +0x7d4ffb38 nft_meta_set_eval net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x3ed12c0b nft_meta_get_eval net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x1fee9cc5 nf_conntrack_helper_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd76105c9 mpt_free_msg_frame drivers/message/fusion/mptbase EXPORT_SYMBOL +0xb9c691a0 mpt_put_msg_frame_hi_pri drivers/message/fusion/mptbase EXPORT_SYMBOL +0x35ba2254 mlxsw_afk_values_add_u32 drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0419e175 vbin_printf vmlinux EXPORT_SYMBOL_GPL +0x9c1f0a10 xdp_rxq_info_unreg_mem_model vmlinux EXPORT_SYMBOL_GPL +0xa6fbe4d3 i2c_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0x335e2e86 dev_driver_string vmlinux EXPORT_SYMBOL +0x6230470a pcie_get_readrq vmlinux EXPORT_SYMBOL +0xc7a1840e llist_add_batch vmlinux EXPORT_SYMBOL_GPL +0x26f3e9f7 blkg_print_stat_ios vmlinux EXPORT_SYMBOL_GPL +0x5061f457 blk_mq_update_nr_hw_queues vmlinux EXPORT_SYMBOL_GPL +0x675fd43e bio_reset vmlinux EXPORT_SYMBOL +0xbd968cd1 security_d_instantiate vmlinux EXPORT_SYMBOL +0xf821d767 iter_file_splice_write vmlinux EXPORT_SYMBOL +0x256f9852 vfs_mkdir vmlinux EXPORT_SYMBOL +0x1b4ae5d8 srcu_torture_stats_print vmlinux EXPORT_SYMBOL_GPL +0x9900edd1 of_match_node vmlinux EXPORT_SYMBOL +0x6c0f42ea usb_driver_claim_interface vmlinux EXPORT_SYMBOL_GPL +0x74c0a6cf stmmac_pltfr_remove vmlinux EXPORT_SYMBOL_GPL +0x0f4c15b7 fwnode_property_get_reference_args vmlinux EXPORT_SYMBOL_GPL +0x425bb062 drm_gem_prime_handle_to_fd vmlinux EXPORT_SYMBOL +0xc5a5c678 uart_parse_earlycon vmlinux EXPORT_SYMBOL_GPL +0xfc5c46e2 acpi_buffer_to_resource vmlinux EXPORT_SYMBOL +0x8353dfff acpi_os_get_iomem vmlinux EXPORT_SYMBOL_GPL +0x26022abb pinctrl_add_gpio_range vmlinux EXPORT_SYMBOL_GPL +0x01a0ec5b cryptd_free_ahash vmlinux EXPORT_SYMBOL_GPL +0x28f7f275 eventfd_ctx_fileget vmlinux EXPORT_SYMBOL_GPL +0xf0f3ab04 rpc_pipe_generic_upcall net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd2d05958 xprt_unregister_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x38cd197f nf_synproxy_ipv6_init net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x28ae460d iscsi_conn_failure drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xac7d090f led_trigger_set vmlinux EXPORT_SYMBOL_GPL +0xc42ccb19 of_find_i2c_device_by_node vmlinux EXPORT_SYMBOL +0x3a7010a4 __ata_change_queue_depth vmlinux EXPORT_SYMBOL_GPL +0x63187451 pcie_aspm_support_enabled vmlinux EXPORT_SYMBOL +0x1bde197f gpiod_get_array_optional vmlinux EXPORT_SYMBOL_GPL +0xf163f6d7 eventfd_fget vmlinux EXPORT_SYMBOL_GPL +0xf09b7d51 dget_parent vmlinux EXPORT_SYMBOL +0xaf8cf75d dccp_insert_option net/dccp/dccp EXPORT_SYMBOL_GPL +0xfaf1275c snd_pcm_mmap_data sound/core/snd-pcm EXPORT_SYMBOL +0xa0436e98 in6addr_linklocal_allnodes vmlinux EXPORT_SYMBOL +0x6bd5fe10 do_tcp_sendpages vmlinux EXPORT_SYMBOL_GPL +0x6795a1ca hid_dump_input vmlinux EXPORT_SYMBOL_GPL +0x327f51af usb_disable_autosuspend vmlinux EXPORT_SYMBOL_GPL +0x87792a3e usb_get_dev vmlinux EXPORT_SYMBOL_GPL +0xdcb0a2c0 phylink_stop vmlinux EXPORT_SYMBOL_GPL +0xeca5d40d software_node_find_by_name vmlinux EXPORT_SYMBOL_GPL +0xe6112b0a device_property_match_string vmlinux EXPORT_SYMBOL_GPL +0x81d7c5b7 percpu_ref_kill_and_confirm vmlinux EXPORT_SYMBOL_GPL +0x65323570 securityfs_create_dir vmlinux EXPORT_SYMBOL_GPL +0xdd2c169b mb_cache_create vmlinux EXPORT_SYMBOL +0x379cf02a iunique vmlinux EXPORT_SYMBOL +0xa13efa7d try_lookup_one_len vmlinux EXPORT_SYMBOL +0x121d958a unregister_die_notifier vmlinux EXPORT_SYMBOL_GPL +0x3e7c3ca4 nfs4_schedule_stateid_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x134f705d xfrm_policy_walk_done vmlinux EXPORT_SYMBOL +0xa24ad64c inet_dgram_ops vmlinux EXPORT_SYMBOL +0xd8dd6a9c input_release_device vmlinux EXPORT_SYMBOL +0x21139ba8 ata_dev_next vmlinux EXPORT_SYMBOL_GPL +0xb7de7124 drm_format_info vmlinux EXPORT_SYMBOL +0xc93e8461 acpi_get_event_resources vmlinux EXPORT_SYMBOL +0x1271bbc0 __do_once_done vmlinux EXPORT_SYMBOL +0x2f91a395 blk_alloc_queue vmlinux EXPORT_SYMBOL +0xd5263820 mb_cache_destroy vmlinux EXPORT_SYMBOL +0xd396d406 node_to_cpumask_map vmlinux EXPORT_SYMBOL +0x54e60827 ip_set_nfnl_put net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xdbc2e2f7 nf_conntrack_helpers_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x04f6a142 dm_bufio_client_create drivers/md/dm-bufio EXPORT_SYMBOL_GPL +0x5e5c36b1 mlx5_destroy_flow_table drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6b2f9b4f mlx4_get_devlink_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x3d8baf3b zs_huge_class_size mm/zsmalloc EXPORT_SYMBOL_GPL +0xf3ab6c51 drm_fb_xrgb8888_to_rgb565_dstclip vmlinux EXPORT_SYMBOL +0x7a377f5c drm_atomic_helper_set_config vmlinux EXPORT_SYMBOL +0xc3ea5305 iommu_default_passthrough vmlinux EXPORT_SYMBOL_GPL +0x2bb9af28 gen_pool_for_each_chunk vmlinux EXPORT_SYMBOL +0x60fbba99 get_gendisk vmlinux EXPORT_SYMBOL +0xec2e1c8f proc_doulongvec_minmax vmlinux EXPORT_SYMBOL +0xec1488b5 nfc_hci_reset_pipes net/nfc/hci/hci EXPORT_SYMBOL +0xacd2fc7b nft_chain_validate_dependency net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xb3172894 usb_interface_id drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0x3c31dec8 pnfs_generic_pg_init_read fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xcb3e91cc xt_counters_alloc vmlinux EXPORT_SYMBOL +0x73ea1660 hwmon_device_register vmlinux EXPORT_SYMBOL_GPL +0x5d4e673e usb_store_new_id vmlinux EXPORT_SYMBOL_GPL +0x00eb4f56 sdev_evt_send_simple vmlinux EXPORT_SYMBOL_GPL +0xc9c6a043 ttm_tt_fini vmlinux EXPORT_SYMBOL +0x5ea6d07a of_find_mipi_dsi_host_by_node vmlinux EXPORT_SYMBOL +0xbf5944a3 drm_fb_helper_blank vmlinux EXPORT_SYMBOL +0x8f030c0d crypto_register_akcipher vmlinux EXPORT_SYMBOL_GPL +0x91421d5d simple_attr_open vmlinux EXPORT_SYMBOL_GPL +0x8c44487a seq_hex_dump vmlinux EXPORT_SYMBOL +0x9d80ccad truncate_inode_pages_final vmlinux EXPORT_SYMBOL +0x56e9103b cpu_pm_enter vmlinux EXPORT_SYMBOL_GPL +0x09337cd0 __wake_up_locked_key vmlinux EXPORT_SYMBOL_GPL +0x2d1b02d2 usermodehelper_read_lock_wait vmlinux EXPORT_SYMBOL_GPL +0x82d07161 __cpuhp_setup_state_cpuslocked vmlinux EXPORT_SYMBOL +0x417a9131 ceph_oloc_destroy net/ceph/libceph EXPORT_SYMBOL +0xd48fa0df vhost_work_queue drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xf398644f dm_btree_lookup_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x6a7e9e78 rndis_uninit drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0xcfb6f654 netdev_adjacent_change_commit vmlinux EXPORT_SYMBOL +0xf72c0597 sk_clear_memalloc vmlinux EXPORT_SYMBOL_GPL +0x11bd2e96 usb_root_hub_lost_power vmlinux EXPORT_SYMBOL_GPL +0x5a109dee sas_ssp_task_response vmlinux EXPORT_SYMBOL_GPL +0x6c3ebf04 ata_pci_bmdma_clear_simplex vmlinux EXPORT_SYMBOL_GPL +0x1ed8f406 hvc_remove vmlinux EXPORT_SYMBOL_GPL +0xefdee9cb pci_stop_and_remove_bus_device vmlinux EXPORT_SYMBOL +0x7d5e1008 __crc32c_le_shift vmlinux EXPORT_SYMBOL +0x4578f528 __kfifo_to_user vmlinux EXPORT_SYMBOL +0xf202c1ac blk_mq_sched_request_inserted vmlinux EXPORT_SYMBOL_GPL +0x6d4c2944 done_path_create vmlinux EXPORT_SYMBOL +0x60687e1f get_super_exclusive_thawed vmlinux EXPORT_SYMBOL +0x33153bbf ceph_con_close net/ceph/libceph EXPORT_SYMBOL +0xf34332f0 iscsi_find_param_from_key drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x5589a245 xfrm4_rcv_encap vmlinux EXPORT_SYMBOL +0xceb15d7c udp_poll vmlinux EXPORT_SYMBOL +0xe1d03c86 skb_copy_ubufs vmlinux EXPORT_SYMBOL_GPL +0x0594e040 mdiobus_unregister_device vmlinux EXPORT_SYMBOL +0x5b3252a4 ahci_host_activate vmlinux EXPORT_SYMBOL_GPL +0x5fd23199 iommu_group_remove_device vmlinux EXPORT_SYMBOL_GPL +0xf1f18f52 pci_hp_deregister vmlinux EXPORT_SYMBOL_GPL +0xd106b924 pci_remove_root_bus vmlinux EXPORT_SYMBOL_GPL +0x1a809853 posix_acl_from_mode vmlinux EXPORT_SYMBOL +0x9eacf8a5 kstrndup vmlinux EXPORT_SYMBOL +0x687b6a16 kdbgetsymval vmlinux EXPORT_SYMBOL +0x7840ad2c irq_domain_reset_irq_data vmlinux EXPORT_SYMBOL_GPL +0x2d3b2369 irq_set_chip vmlinux EXPORT_SYMBOL +0x4362158c console_stop vmlinux EXPORT_SYMBOL +0x64e9301d nf_conntrack_tuple_taken net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x2de41ee9 snd_device_register sound/core/snd EXPORT_SYMBOL +0xe3f07315 ib_find_cached_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6f2fe3c4 dm_btree_remove_leaves drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x3df59123 mlx4_mr_rereg_mem_cleanup drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xbcc074f3 __serpent_decrypt crypto/serpent_generic EXPORT_SYMBOL_GPL +0x91d1fe52 max_session_slots fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa77bfd29 register_inet6addr_validator_notifier vmlinux EXPORT_SYMBOL +0x90d27d8a inet6_getname vmlinux EXPORT_SYMBOL +0x42508e58 power_supply_put vmlinux EXPORT_SYMBOL_GPL +0x96caecee sata_pmp_port_ops vmlinux EXPORT_SYMBOL_GPL +0xad2cc58a drm_client_release vmlinux EXPORT_SYMBOL +0x4d22b807 drm_mode_equal_no_clocks vmlinux EXPORT_SYMBOL +0x3c8e7a00 fscrypt_ioctl_get_policy_ex vmlinux EXPORT_SYMBOL_GPL +0xa8dc52ed rds_message_put net/rds/rds EXPORT_SYMBOL_GPL +0x83d8bb47 sunrpc_init_cache_detail net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbc6f1970 xdr_encode_word net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x598bfcee auth_domain_find net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x22d966c6 ip_set_range_to_cidr net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x0223b0c1 register_mtd_user drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xc35e8aef mlx4_find_cached_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa70bf20f ohci_init_driver vmlinux EXPORT_SYMBOL_GPL +0x4fdc945d sata_deb_timing_normal vmlinux EXPORT_SYMBOL_GPL +0xb431df61 drm_mode_config_reset vmlinux EXPORT_SYMBOL +0x8d73278e hex_asc_upper vmlinux EXPORT_SYMBOL +0x6a03751f sgl_free_order vmlinux EXPORT_SYMBOL +0xeb8456ba debugfs_create_file vmlinux EXPORT_SYMBOL_GPL +0x113a0d8b new_inode vmlinux EXPORT_SYMBOL +0x682f00f9 rpc_restart_call_prepare net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xdb067ed1 snd_ctl_find_id sound/core/snd EXPORT_SYMBOL +0x14cb9863 __uio_register_device drivers/uio/uio EXPORT_SYMBOL_GPL +0xe0b13336 argv_free vmlinux EXPORT_SYMBOL +0x86f85114 net_dec_egress_queue vmlinux EXPORT_SYMBOL_GPL +0x9dc555e0 build_skb_around vmlinux EXPORT_SYMBOL +0x69340a39 of_get_phy_mode vmlinux EXPORT_SYMBOL_GPL +0xd0ae90b0 __rtc_register_device vmlinux EXPORT_SYMBOL_GPL +0x3fd37f68 xhci_run vmlinux EXPORT_SYMBOL_GPL +0x8112b3d2 scsi_build_sense_buffer vmlinux EXPORT_SYMBOL +0xb15ab250 dma_fence_array_create vmlinux EXPORT_SYMBOL +0x3e309477 regmap_field_update_bits_base vmlinux EXPORT_SYMBOL_GPL +0x552156b2 platform_find_device_by_driver vmlinux EXPORT_SYMBOL_GPL +0x666ca9c5 drm_dev_set_unique vmlinux EXPORT_SYMBOL +0x52bf053e drm_dp_start_crc vmlinux EXPORT_SYMBOL +0x183690c9 tty_port_close vmlinux EXPORT_SYMBOL +0x65dccf13 xz_dec_end vmlinux EXPORT_SYMBOL +0x6e5b8651 xz_dec_run vmlinux EXPORT_SYMBOL +0xbe651e79 jbd2_journal_clear_err vmlinux EXPORT_SYMBOL +0x3f123442 mlxsw_core_kvd_sizes_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xfe0f2369 ipmi_get_maintenance_mode drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x88e45d64 pnfs_read_done_resend_to_mds fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xa5d3bea5 nfs_refresh_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9b828c90 genl_unregister_family vmlinux EXPORT_SYMBOL +0xdc7de679 netdev_upper_get_next_dev_rcu vmlinux EXPORT_SYMBOL +0x9d0d6206 unregister_netdevice_notifier vmlinux EXPORT_SYMBOL +0xc9d09a72 of_graph_get_endpoint_by_regs vmlinux EXPORT_SYMBOL +0xb048c043 hisi_sas_scan_start vmlinux EXPORT_SYMBOL_GPL +0x35f11fd3 ttm_bo_clean_mm vmlinux EXPORT_SYMBOL +0xf06a4ebd drm_gem_fb_create_with_dirty vmlinux EXPORT_SYMBOL_GPL +0x32e4cb4e tty_register_driver vmlinux EXPORT_SYMBOL +0x0fab1ab0 hdmi_spd_infoframe_pack vmlinux EXPORT_SYMBOL +0xd1363cc1 ucs2_strsize vmlinux EXPORT_SYMBOL +0x00c723af wbc_attach_and_unlock_inode vmlinux EXPORT_SYMBOL_GPL +0x264950bc generic_remap_file_range_prep vmlinux EXPORT_SYMBOL +0x217e68d7 padata_stop vmlinux EXPORT_SYMBOL +0xad703657 ceph_auth_destroy_authorizer net/ceph/libceph EXPORT_SYMBOL +0xc7e57ddb mlx4_mw_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0d42a8c4 mlx4_config_dev_retrieval drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xba2974fe kset_unregister vmlinux EXPORT_SYMBOL +0x4295d4c8 ndo_dflt_fdb_del vmlinux EXPORT_SYMBOL +0x3fac1b7e driver_remove_file vmlinux EXPORT_SYMBOL_GPL +0x3bb64f23 iommu_sva_bind_device vmlinux EXPORT_SYMBOL_GPL +0x8c9e338f acpi_bios_error vmlinux EXPORT_SYMBOL +0x6a8441be cpci_hp_start vmlinux EXPORT_SYMBOL_GPL +0x42869613 debugfs_create_u32 vmlinux EXPORT_SYMBOL_GPL +0xf4a3ce96 llc_build_and_send_ui_pkt net/llc/llc EXPORT_SYMBOL +0x2976f912 core_tpg_get_initiator_node_acl drivers/target/target_core_mod EXPORT_SYMBOL +0xfe77a02b set_and_calc_slave_port_state drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x0ef06974 spi_populate_ppr_msg drivers/scsi/scsi_transport_spi EXPORT_SYMBOL_GPL +0xd3559ef4 __memset vmlinux EXPORT_SYMBOL +0x65cea145 netdev_rx_handler_register vmlinux EXPORT_SYMBOL_GPL +0x55ea8fb4 devm_thermal_zone_of_sensor_register vmlinux EXPORT_SYMBOL_GPL +0x2030658c drm_fb_helper_ioctl vmlinux EXPORT_SYMBOL +0x438d8df2 iova_cache_get vmlinux EXPORT_SYMBOL_GPL +0x9ad56d2d __tracepoint_block_split vmlinux EXPORT_SYMBOL_GPL +0x8d8f4eae jbd2_journal_release_jbd_inode vmlinux EXPORT_SYMBOL +0xd170139c wbc_account_cgroup_owner vmlinux EXPORT_SYMBOL_GPL +0x6bd1aa56 stack_trace_save vmlinux EXPORT_SYMBOL_GPL +0x35a39657 inet_csk_reset_keepalive_timer vmlinux EXPORT_SYMBOL +0x2ce16d7f efivar_entry_find vmlinux EXPORT_SYMBOL_GPL +0x50b88f41 usb_get_gadget_udc_name vmlinux EXPORT_SYMBOL_GPL +0xa5e4a492 dev_attr_phy_event_threshold vmlinux EXPORT_SYMBOL_GPL +0x813dd259 dev_vprintk_emit vmlinux EXPORT_SYMBOL +0x7c9a7371 clk_prepare vmlinux EXPORT_SYMBOL_GPL +0xeeab8274 rhashtable_insert_slow vmlinux EXPORT_SYMBOL_GPL +0x40dc94d8 user_read vmlinux EXPORT_SYMBOL_GPL +0xea751d2b balloon_page_list_dequeue vmlinux EXPORT_SYMBOL_GPL +0x1e5502e7 balloon_page_list_enqueue vmlinux EXPORT_SYMBOL_GPL +0x766a0927 mempool_alloc_pages vmlinux EXPORT_SYMBOL +0xafe15f0b trace_define_field vmlinux EXPORT_SYMBOL_GPL +0x76d9da86 kvm_vcpu_gfn_to_pfn_atomic vmlinux EXPORT_SYMBOL_GPL +0x47939e0d __tasklet_hi_schedule vmlinux EXPORT_SYMBOL +0x31cc115f read_bytes_from_xdr_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0d11a495 line6_probe sound/usb/line6/snd-usb-line6 EXPORT_SYMBOL_GPL +0x15bbc608 fcoe_fc_crc drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x492cc640 arp_send vmlinux EXPORT_SYMBOL +0x7e880bcd xt_register_matches vmlinux EXPORT_SYMBOL +0x8255a6c8 sata_sff_hardreset vmlinux EXPORT_SYMBOL_GPL +0x3d02cd70 dma_fence_signal_locked vmlinux EXPORT_SYMBOL +0x2accba8f ttm_bo_unlock_delayed_workqueue vmlinux EXPORT_SYMBOL +0x4223bdf1 unregister_acpi_bus_type vmlinux EXPORT_SYMBOL_GPL +0xbb0540aa zlib_inflateReset vmlinux EXPORT_SYMBOL +0x281823c5 __kfifo_out_peek vmlinux EXPORT_SYMBOL +0xc64416e5 security_sctp_assoc_request vmlinux EXPORT_SYMBOL +0xa2baa27b cdev_del vmlinux EXPORT_SYMBOL +0x377bbcbc pm_suspend_target_state vmlinux EXPORT_SYMBOL_GPL +0x122e2120 dccp_parse_options net/dccp/dccp EXPORT_SYMBOL_GPL +0x8bab4778 i2c_mux_alloc drivers/i2c/i2c-mux EXPORT_SYMBOL_GPL +0x35dd9004 async_xor crypto/async_tx/async_xor EXPORT_SYMBOL_GPL +0x7a015c9f pneigh_enqueue vmlinux EXPORT_SYMBOL +0xf4c0790c dev_alloc_name vmlinux EXPORT_SYMBOL +0x3606182b raid_component_add vmlinux EXPORT_SYMBOL +0xeeb73cf1 drm_gem_dumb_destroy vmlinux EXPORT_SYMBOL +0x7b02057b drm_atomic_helper_check_plane_damage vmlinux EXPORT_SYMBOL +0xd576c2fb soft_cursor vmlinux EXPORT_SYMBOL +0xd0d156e9 __rht_bucket_nested vmlinux EXPORT_SYMBOL_GPL +0x9354e0f5 dax_layout_busy_page vmlinux EXPORT_SYMBOL_GPL +0x963c359b fsnotify_init_mark vmlinux EXPORT_SYMBOL_GPL +0x5f4a6e61 dm_rh_dec drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xe781f874 dm_tm_dec drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe443bab4 usb_free_all_descriptors drivers/usb/gadget/libcomposite EXPORT_SYMBOL_GPL +0xf5fffdf2 ping_seq_stop vmlinux EXPORT_SYMBOL_GPL +0x1396ecaa tcp_add_backlog vmlinux EXPORT_SYMBOL +0x2c3054f9 net_inc_ingress_queue vmlinux EXPORT_SYMBOL_GPL +0xf48493c5 vga_get vmlinux EXPORT_SYMBOL +0x79d81544 mipi_dsi_dcs_get_display_brightness vmlinux EXPORT_SYMBOL +0x2cffd24c mipi_dsi_dcs_set_display_brightness vmlinux EXPORT_SYMBOL +0x3aa0a275 drm_gem_fb_create vmlinux EXPORT_SYMBOL_GPL +0x6a1733eb iommu_group_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x05668697 hisi_reset_init vmlinux EXPORT_SYMBOL_GPL +0x4963a2b4 __clk_determine_rate vmlinux EXPORT_SYMBOL_GPL +0x35664931 security_dentry_init_security vmlinux EXPORT_SYMBOL +0x5265c87f generic_fh_to_dentry vmlinux EXPORT_SYMBOL_GPL +0x38a3b865 filemap_fdatawait_keep_errors vmlinux EXPORT_SYMBOL +0xbdfb7e86 try_to_del_timer_sync vmlinux EXPORT_SYMBOL +0x408d2a04 play_idle vmlinux EXPORT_SYMBOL_GPL +0x4cdd391d dccp_feat_list_purge net/dccp/dccp EXPORT_SYMBOL_GPL +0x63e32eb0 svc_addsock net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x96c8f4c5 ipcomp_output net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0x70575ad5 nf_ct_expect_related_report net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xc917fbf3 mlx5_query_port_prio_tc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xdf6bcae0 nfs_statfs fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4c17dfb8 qdisc_offload_dump_helper vmlinux EXPORT_SYMBOL +0x333fff6e devlink_traps_unregister vmlinux EXPORT_SYMBOL_GPL +0x52e5cde1 dm_unregister_target vmlinux EXPORT_SYMBOL +0x916ccd14 usb_stor_bulk_transfer_buf vmlinux EXPORT_SYMBOL_GPL +0xaca86f4c tty_init_termios vmlinux EXPORT_SYMBOL_GPL +0x4b66e5f7 pci_enable_device_io vmlinux EXPORT_SYMBOL +0xf0500331 alloc_pages_vma vmlinux EXPORT_SYMBOL +0xe8ec17e9 __clocksource_register_scale vmlinux EXPORT_SYMBOL_GPL +0x7807f0f8 schedule_timeout_idle vmlinux EXPORT_SYMBOL +0xfe487975 init_wait_entry vmlinux EXPORT_SYMBOL +0x74725e69 ZSTD_compressContinue lib/zstd/zstd_compress EXPORT_SYMBOL +0xc55faaf7 ceph_monc_get_version_async net/ceph/libceph EXPORT_SYMBOL +0x8a9ad44c mtd_read drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x1c9e7140 hinic_slq_get_first_pageaddr drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xc986467c config_group_find_item fs/configfs/configfs EXPORT_SYMBOL +0x982433c3 tcp_get_md5sig_pool vmlinux EXPORT_SYMBOL +0x21ff3f74 dm_mq_kick_requeue_list vmlinux EXPORT_SYMBOL +0xe1775ee7 dm_get_reserved_bio_based_ios vmlinux EXPORT_SYMBOL_GPL +0x1dfde267 ata_sas_port_destroy vmlinux EXPORT_SYMBOL_GPL +0xdaa2552a ata_sas_port_suspend vmlinux EXPORT_SYMBOL_GPL +0x3145216f pci_dev_present vmlinux EXPORT_SYMBOL +0x4dee836c pci_dev_put vmlinux EXPORT_SYMBOL +0x336dfd75 pci_dev_get vmlinux EXPORT_SYMBOL +0x4b4e2e09 scsi_cmd_blk_ioctl vmlinux EXPORT_SYMBOL +0x6ed3ffbd read_dev_sector vmlinux EXPORT_SYMBOL +0x6cbdf2f1 jbd2_journal_revoke vmlinux EXPORT_SYMBOL +0x8be6b107 ceph_monc_do_statfs net/ceph/libceph EXPORT_SYMBOL +0xf1b94ebc ib_device_get_by_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf78de769 nvme_change_ctrl_state drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x634ed040 af_alg_alloc_areq crypto/af_alg EXPORT_SYMBOL_GPL +0x5609ce41 cast_s2 crypto/cast_common EXPORT_SYMBOL_GPL +0x84a8d0eb of_changeset_revert vmlinux EXPORT_SYMBOL_GPL +0x1184dbda usb_ifnum_to_if vmlinux EXPORT_SYMBOL_GPL +0xc0cfe22f drm_atomic_helper_disable_plane vmlinux EXPORT_SYMBOL +0x4de995ec gen_pool_dma_alloc_algo vmlinux EXPORT_SYMBOL +0x890fa0fa btree_get_prev vmlinux EXPORT_SYMBOL_GPL +0xfba7ddd2 match_u64 vmlinux EXPORT_SYMBOL +0xab273e63 security_old_inode_init_security vmlinux EXPORT_SYMBOL +0x42fb2616 debugfs_create_x16 vmlinux EXPORT_SYMBOL_GPL +0xfc564771 fsnotify_put_group vmlinux EXPORT_SYMBOL_GPL +0x08690bbf __tracepoint_kmem_cache_alloc_node vmlinux EXPORT_SYMBOL +0xd81de62c ring_buffer_record_enable vmlinux EXPORT_SYMBOL_GPL +0x5170e7f3 nft_meta_policy net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xf1f002be snd_rawmidi_transmit sound/core/snd-rawmidi EXPORT_SYMBOL +0xbe047dc1 ib_mr_pool_get drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x466e5c70 md_find_rdev_rcu drivers/md/md-mod EXPORT_SYMBOL_GPL +0x34da08a0 cdrom_mode_sense drivers/cdrom/cdrom EXPORT_SYMBOL +0x660a88c8 iscsi_tcp_cleanup_task drivers/scsi/libiscsi_tcp EXPORT_SYMBOL_GPL +0x265d4b86 af_alg_unregister_type crypto/af_alg EXPORT_SYMBOL_GPL +0x52b90a04 qtree_delete_dquot fs/quota/quota_tree EXPORT_SYMBOL +0x061651be strcat vmlinux EXPORT_SYMBOL +0xc7a55bcd inet6_destroy_sock vmlinux EXPORT_SYMBOL_GPL +0x6905d23f phy_device_create vmlinux EXPORT_SYMBOL +0x24784f25 drm_gem_shmem_purge_locked vmlinux EXPORT_SYMBOL +0x08162c74 free_bucket_spinlocks vmlinux EXPORT_SYMBOL +0x6f915a45 dqstats vmlinux EXPORT_SYMBOL +0x6d0bfed3 rdma_set_ack_timeout drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x85655fda ib_query_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x72a25a72 capi_cmsg2str drivers/isdn/capi/kernelcapi EXPORT_SYMBOL +0x679d4777 iscsi_conn_send_pdu drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x50873741 xt_compat_init_offsets vmlinux EXPORT_SYMBOL +0x285e9c83 tcf_classify vmlinux EXPORT_SYMBOL +0x805667cf devlink_fmsg_u64_put vmlinux EXPORT_SYMBOL_GPL +0x51b65fc7 usb_gadget_unregister_driver vmlinux EXPORT_SYMBOL_GPL +0x71c7155e dma_buf_vunmap vmlinux EXPORT_SYMBOL_GPL +0x48641d2f dma_buf_kunmap vmlinux EXPORT_SYMBOL_GPL +0xb109b8a9 drm_sched_entity_set_priority vmlinux EXPORT_SYMBOL +0x8df943b4 crypto_register_shash vmlinux EXPORT_SYMBOL_GPL +0xbbfc68ab jbd2_journal_invalidatepage vmlinux EXPORT_SYMBOL +0x8ce69555 jbd2_journal_get_write_access vmlinux EXPORT_SYMBOL +0xef75156a rds_conn_drop net/rds/rds EXPORT_SYMBOL_GPL +0x6dee5986 gre_add_protocol net/ipv4/gre EXPORT_SYMBOL_GPL +0xd831a1a2 ip_vs_proto_name net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xd9f80b0f vfio_platform_unregister_reset drivers/vfio/platform/vfio-platform-base EXPORT_SYMBOL_GPL +0x08b25b62 alloc_candev_mqs drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x374d43ba fc_fabric_login drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x6fbc6a00 radix_tree_insert vmlinux EXPORT_SYMBOL +0x6d0d4648 inet6_unregister_protosw vmlinux EXPORT_SYMBOL +0x9cc8bc80 xfrm_register_type_offload vmlinux EXPORT_SYMBOL +0x9f4657b8 pfifo_qdisc_ops vmlinux EXPORT_SYMBOL +0x7880c781 dm_kcopyd_prepare_callback vmlinux EXPORT_SYMBOL +0x09685385 i2c_transfer_buffer_flags vmlinux EXPORT_SYMBOL +0x54e712d0 vfs_listxattr vmlinux EXPORT_SYMBOL_GPL +0x7bc8ab8d kern_mount vmlinux EXPORT_SYMBOL_GPL +0x8eadc9ae __check_sticky vmlinux EXPORT_SYMBOL +0xd09aa7f5 find_get_pages_contig vmlinux EXPORT_SYMBOL +0x133969d7 __trace_printk vmlinux EXPORT_SYMBOL_GPL +0xd3c9d88c dccp_v4_do_rcv net/dccp/dccp_ipv4 EXPORT_SYMBOL_GPL +0x137dd4d8 ib_dispatch_event drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3483f668 __ib_alloc_xrcd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0975a194 mtd_write_user_prot_reg drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x36bf0a3c target_depend_item drivers/target/target_core_mod EXPORT_SYMBOL +0x179cc403 ipv6_dev_mc_dec vmlinux EXPORT_SYMBOL +0x5f2436e7 ipv6_dev_mc_inc vmlinux EXPORT_SYMBOL +0x1796f164 xfrm_state_walk_done vmlinux EXPORT_SYMBOL +0xef9d62fb qdisc_create_dflt vmlinux EXPORT_SYMBOL +0x18f23201 flow_rule_match_ipv6_addrs vmlinux EXPORT_SYMBOL +0xb876ead6 flow_rule_match_ipv4_addrs vmlinux EXPORT_SYMBOL +0x4402fdfe of_reserved_mem_device_release vmlinux EXPORT_SYMBOL_GPL +0x46eaacad uart_add_one_port vmlinux EXPORT_SYMBOL +0x8a3a22fc clk_divider_ops vmlinux EXPORT_SYMBOL_GPL +0x75e60613 key_put vmlinux EXPORT_SYMBOL +0x1e738f79 bmap vmlinux EXPORT_SYMBOL +0x9d52a761 bdi_register_owner vmlinux EXPORT_SYMBOL +0x66b4cc41 kmemdup vmlinux EXPORT_SYMBOL +0x1c44b902 __blocking_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0xad7e3e66 nci_hci_clear_all_pipes net/nfc/nci/nci EXPORT_SYMBOL +0xe7f29750 dccp_sync_mss net/dccp/dccp EXPORT_SYMBOL_GPL +0xfb29f913 _copy_from_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9e98460e dm_bitset_empty drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb70b342a dm_bio_prison_destroy drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xb26bf25d drm_mode_set_crtcinfo vmlinux EXPORT_SYMBOL +0x6661bd33 drm_mode_vrefresh vmlinux EXPORT_SYMBOL +0x6030401c __drm_atomic_helper_plane_reset vmlinux EXPORT_SYMBOL +0xa99b39c2 prandom_bytes vmlinux EXPORT_SYMBOL +0xee9c6406 generic_parse_monolithic vmlinux EXPORT_SYMBOL +0x2833f577 ZSTD_compressBegin_advanced lib/zstd/zstd_compress EXPORT_SYMBOL +0x007ad8d4 nf_conntrack_alloc net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x358e23fd ib_create_rwq_ind_table drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x9ef7446e mlx4_bf_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0187eded iscsi_host_free drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x6181e79f timerqueue_add vmlinux EXPORT_SYMBOL_GPL +0xf6e772c3 irq_bypass_unregister_producer vmlinux EXPORT_SYMBOL_GPL +0x5add0a18 tcp_gro_complete vmlinux EXPORT_SYMBOL +0x09aa850e tcp_register_congestion_control vmlinux EXPORT_SYMBOL_GPL +0xb52aae52 tcp_release_cb vmlinux EXPORT_SYMBOL +0x0ba744c6 devlink_flash_update_status_notify vmlinux EXPORT_SYMBOL_GPL +0xfe6dca78 skb_gso_validate_network_len vmlinux EXPORT_SYMBOL_GPL +0xba5604fb stmmac_dvr_remove vmlinux EXPORT_SYMBOL_GPL +0x0a50e447 dma_buf_fd vmlinux EXPORT_SYMBOL_GPL +0xf8dbffd1 __dax_driver_register vmlinux EXPORT_SYMBOL_GPL +0xead54924 mctrl_gpio_to_gpiod vmlinux EXPORT_SYMBOL_GPL +0x4d974b9c register_sysrq_key vmlinux EXPORT_SYMBOL +0xdc340caa kstrdup_quotable_file vmlinux EXPORT_SYMBOL_GPL +0xc156c981 refcount_dec_and_mutex_lock vmlinux EXPORT_SYMBOL +0xf58eeb8e sync_filesystem vmlinux EXPORT_SYMBOL +0xee277350 nci_hci_open_pipe net/nfc/nci/nci EXPORT_SYMBOL +0x1537fdb8 vringh_need_notify_user drivers/vhost/vringh EXPORT_SYMBOL +0x64643266 iscsit_immediate_queue drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x043c229f dm_bio_prison_free_cell drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x93cea380 pnfs_generic_pg_check_layout fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x1eda497d __tracepoint_nfs_fsync_enter fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe64306dc tcp_connect vmlinux EXPORT_SYMBOL +0xe0862050 qdisc_watchdog_init_clockid vmlinux EXPORT_SYMBOL +0xb6eda198 neigh_table_clear vmlinux EXPORT_SYMBOL +0x06b53bd2 memalloc_socks_key vmlinux EXPORT_SYMBOL_GPL +0xd98dfd10 of_phy_find_device vmlinux EXPORT_SYMBOL +0xd4ccab04 led_trigger_event vmlinux EXPORT_SYMBOL_GPL +0x03638acd thermal_zone_bind_cooling_device vmlinux EXPORT_SYMBOL_GPL +0xef83b18d device_match_fwnode vmlinux EXPORT_SYMBOL_GPL +0x2396c7f0 clk_set_parent vmlinux EXPORT_SYMBOL_GPL +0x63150e06 clk_get_parent vmlinux EXPORT_SYMBOL_GPL +0xf95322f4 kthread_parkme vmlinux EXPORT_SYMBOL_GPL +0x627f576f __cpuhp_remove_state vmlinux EXPORT_SYMBOL +0x4e2ac9e6 wpan_phy_for_each net/ieee802154/ieee802154 EXPORT_SYMBOL +0x37a02412 xfrm_aalg_get_byname net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x46091331 __snd_rawmidi_transmit_ack sound/core/snd-rawmidi EXPORT_SYMBOL +0x94098ff8 snd_interval_list sound/core/snd-pcm EXPORT_SYMBOL +0xb7f73ef8 xas_init_marks vmlinux EXPORT_SYMBOL_GPL +0xb2d75089 xfrm_state_flush vmlinux EXPORT_SYMBOL +0x9b896724 devlink_param_value_str_fill vmlinux EXPORT_SYMBOL_GPL +0xf1356bbb reserve_iova vmlinux EXPORT_SYMBOL_GPL +0xe1ad9486 pci_fixup_device vmlinux EXPORT_SYMBOL +0x35c55639 blk_mq_request_started vmlinux EXPORT_SYMBOL_GPL +0xceafe0b5 fat_flush_inodes vmlinux EXPORT_SYMBOL_GPL +0x24b9f8a2 __dquot_alloc_space vmlinux EXPORT_SYMBOL +0xeb092198 stream_open vmlinux EXPORT_SYMBOL +0xf6c72c11 register_cc770dev drivers/net/can/cc770/cc770 EXPORT_SYMBOL_GPL +0xf1a532ae hinic_sm_ctr_rd32 drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xaa694823 hnae_put_handle drivers/net/ethernet/hisilicon/hns/hnae EXPORT_SYMBOL +0xad3de6ff nfs_wait_on_request fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1517dacc xfrm_find_acq_byseq vmlinux EXPORT_SYMBOL +0x323d7095 xfrm_policy_unregister_afinfo vmlinux EXPORT_SYMBOL +0x3461086f reuseport_detach_sock vmlinux EXPORT_SYMBOL +0x9938528e skb_zerocopy_iter_stream vmlinux EXPORT_SYMBOL_GPL +0xf08c67de napi_alloc_frag vmlinux EXPORT_SYMBOL +0x88649cca governor_sysfs_ops vmlinux EXPORT_SYMBOL_GPL +0xe499c36b drm_atomic_helper_commit_modeset_enables vmlinux EXPORT_SYMBOL +0xe250c3d4 __vfs_removexattr_locked vmlinux EXPORT_SYMBOL_GPL +0x054496b4 schedule_timeout_interruptible vmlinux EXPORT_SYMBOL +0xf6397e77 save_stack_trace vmlinux EXPORT_SYMBOL_GPL +0xb53dc45e snd_ctl_make_virtual_master sound/core/snd EXPORT_SYMBOL +0x8c6b0dfe rdma_set_ib_path drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x509512b2 rndis_signal_connect drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x846cba25 secpath_set vmlinux EXPORT_SYMBOL +0xd0d49947 dma_buf_detach vmlinux EXPORT_SYMBOL_GPL +0x9c63e75e dma_buf_attach vmlinux EXPORT_SYMBOL_GPL +0xbfc81f58 fwnode_create_software_node vmlinux EXPORT_SYMBOL_GPL +0x5a9ef579 devm_kasprintf vmlinux EXPORT_SYMBOL_GPL +0x07427527 iommu_set_fault_handler vmlinux EXPORT_SYMBOL_GPL +0xdac4913a bitmap_allocate_region vmlinux EXPORT_SYMBOL +0x65e0d6d7 memory_read_from_buffer vmlinux EXPORT_SYMBOL +0x6c30f0d6 alarmtimer_get_rtcdev vmlinux EXPORT_SYMBOL_GPL +0x5dffb495 ZSTD_decompress_usingDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x1a107de2 ZSTD_compressCCtx lib/zstd/zstd_compress EXPORT_SYMBOL +0xff1eb119 xprt_force_disconnect net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6ce848f8 nft_meta_set_dump net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xfcf2ded7 nft_meta_get_dump net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xd981eaa7 ib_send_cm_rej drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x606e0612 ib_send_cm_rtu drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x1c1993e9 ib_send_cm_rep drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x8193bcb3 ib_send_cm_req drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x5ad3cee5 rdma_rw_ctx_destroy drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0c2048fd hdlcdrv_arbitrate drivers/net/hamradio/hdlcdrv EXPORT_SYMBOL +0x94a31763 mlx5_query_nic_vport_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x2172c00c mlx5_cmd_cleanup drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xaafd4acc max_session_cb_slots fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb01bebf9 xfrm_get_acqseq vmlinux EXPORT_SYMBOL +0xb3d2c76d scsi_hostbyte_string vmlinux EXPORT_SYMBOL +0x1789c9ab __scsi_iterate_devices vmlinux EXPORT_SYMBOL +0x14605535 dma_fence_context_alloc vmlinux EXPORT_SYMBOL +0xb5021d52 mipi_dsi_dcs_set_pixel_format vmlinux EXPORT_SYMBOL +0xf9b007ec mipi_dsi_dcs_get_pixel_format vmlinux EXPORT_SYMBOL +0xab9b5618 devm_of_pci_get_host_bridge_resources vmlinux EXPORT_SYMBOL_GPL +0x7ad050b9 qid_lt vmlinux EXPORT_SYMBOL +0xd54744c0 svc_reserve net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x293edd46 ip6_tnl_get_cap net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x358626fd mlx5_eswitch_get_total_vports drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa853396b xa_extract vmlinux EXPORT_SYMBOL +0x90f289a5 qdisc_put vmlinux EXPORT_SYMBOL +0x3710aa64 eth_gro_receive vmlinux EXPORT_SYMBOL +0x901c6c99 __tracepoint_neigh_update_done vmlinux EXPORT_SYMBOL_GPL +0x2444f2f3 genphy_c45_restart_aneg vmlinux EXPORT_SYMBOL_GPL +0x12135396 phylink_mac_change vmlinux EXPORT_SYMBOL_GPL +0x4e4f0f16 dma_fence_chain_find_seqno vmlinux EXPORT_SYMBOL +0x58a981d2 platform_bus vmlinux EXPORT_SYMBOL_GPL +0xfae7d056 blk_queue_bounce_limit vmlinux EXPORT_SYMBOL +0x55339365 flush_delayed_fput vmlinux EXPORT_SYMBOL_GPL +0x1e8eb889 hinic_set_port_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xd4a1d058 ip6mr_rule_default vmlinux EXPORT_SYMBOL +0x8c71f8c1 skb_abort_seq_read vmlinux EXPORT_SYMBOL +0xc0e8e540 iscsi_host_for_each_session vmlinux EXPORT_SYMBOL_GPL +0xb9b9b8a3 iscsi_create_flashnode_sess vmlinux EXPORT_SYMBOL_GPL +0x10d9f885 scsi_sense_desc_find vmlinux EXPORT_SYMBOL +0x52af6cd5 drm_atomic_helper_crtc_reset vmlinux EXPORT_SYMBOL +0x72b9d287 default_grn vmlinux EXPORT_SYMBOL +0xfa061327 devm_clk_get_optional vmlinux EXPORT_SYMBOL +0xd9db94f5 blk_queue_split vmlinux EXPORT_SYMBOL +0x1cc59d2f remove_resource vmlinux EXPORT_SYMBOL_GPL +0xbe3879aa ceph_get_snap_context net/ceph/libceph EXPORT_SYMBOL +0x6ef8fcd8 snd_pcm_format_linear sound/core/snd-pcm EXPORT_SYMBOL +0x2d135d0a iscsit_build_r2ts_for_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xdd0a718b sbc_dif_copy_prot drivers/target/target_core_mod EXPORT_SYMBOL +0xdbab6b29 target_complete_cmd_with_length drivers/target/target_core_mod EXPORT_SYMBOL +0xbdf9cf36 mlx5_fs_remove_rx_underlay_qpn drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0d09d27c hinic_dev_ver_info drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x4a2cb6e4 inet6_ioctl vmlinux EXPORT_SYMBOL +0x61609ef8 tcp_getsockopt vmlinux EXPORT_SYMBOL +0xebb07ff8 tcp_setsockopt vmlinux EXPORT_SYMBOL +0x3752c0bd rtnl_unicast vmlinux EXPORT_SYMBOL +0xc586dfde refresh_frequency_limits vmlinux EXPORT_SYMBOL +0xf8383aba drm_property_create_range vmlinux EXPORT_SYMBOL +0x0e40ea59 pnpacpi_protocol vmlinux EXPORT_SYMBOL +0xef464c28 getboottime64 vmlinux EXPORT_SYMBOL_GPL +0x76a89305 dequeue_signal vmlinux EXPORT_SYMBOL_GPL +0x48ddd102 rxrpc_kernel_get_peer net/rxrpc/rxrpc EXPORT_SYMBOL +0x84106f36 devlink_trap_ctx_priv vmlinux EXPORT_SYMBOL_GPL +0xa33c1781 acpi_get_hp_hw_control_from_firmware vmlinux EXPORT_SYMBOL +0x2666556f crypto_ablkcipher_type vmlinux EXPORT_SYMBOL_GPL +0x62bb09bf clocks_calc_mult_shift vmlinux EXPORT_SYMBOL_GPL +0x31cc8452 ip6t_register_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL +0xb7852a05 ib_ud_header_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4d1e67cb mlx4_qp_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd4cca1af nvme_setup_cmd drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0x22d73f8e pnfs_set_lo_fail fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x318b600f flow_indr_del_block_cb vmlinux EXPORT_SYMBOL_GPL +0x5f8632da flow_rule_match_mpls vmlinux EXPORT_SYMBOL +0x6e1b6fc8 flow_rule_match_icmp vmlinux EXPORT_SYMBOL +0x32b2ab1b flow_rule_match_meta vmlinux EXPORT_SYMBOL +0x4a8e3d34 try_test_sas_gpio_gp_bit vmlinux EXPORT_SYMBOL +0x9384cd49 ata_tf_from_fis vmlinux EXPORT_SYMBOL_GPL +0xf623d8fb __nla_reserve_64bit vmlinux EXPORT_SYMBOL +0x7a1cd970 fget_raw vmlinux EXPORT_SYMBOL +0xd3752c27 atomic_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x61f7a526 caif_disconnect_client net/caif/caif EXPORT_SYMBOL +0xde6075f1 hinic_host_id drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x64b62862 nvme_wq drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xda752c63 st_nci_se_deinit drivers/nfc/st-nci/st-nci EXPORT_SYMBOL +0xe27847cc spi_add_device vmlinux EXPORT_SYMBOL_GPL +0xa91cb4f9 drm_writeback_prepare_job vmlinux EXPORT_SYMBOL +0x54bbbe7c backlight_device_unregister vmlinux EXPORT_SYMBOL +0x12d0af77 pci_reset_function_locked vmlinux EXPORT_SYMBOL_GPL +0xf66b86eb pinctrl_add_gpio_ranges vmlinux EXPORT_SYMBOL_GPL +0x250ecee5 dccp_recvmsg net/dccp/dccp EXPORT_SYMBOL_GPL +0xa811a95b nft_fib_dump net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0x11ba83b1 unregister_mtd_user drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x6b76fb41 iscsit_set_unsolicited_dataout drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x7604ff03 mlx5_lag_is_sriov drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xac7b22f7 mlx5_query_port_admin_status drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xec1c2a90 ipmi_get_my_address drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x6a0c3847 __mlog_printk fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x1176b399 nfs_path fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc0763484 rfkill_blocked vmlinux EXPORT_SYMBOL +0xb6d6e81c ip6_push_pending_frames vmlinux EXPORT_SYMBOL_GPL +0x81e1f191 dev_change_net_namespace vmlinux EXPORT_SYMBOL_GPL +0x2884e311 sas_eh_target_reset_handler vmlinux EXPORT_SYMBOL_GPL +0x7e4db32d ahci_dev_classify vmlinux EXPORT_SYMBOL_GPL +0x40965b2e ttm_bo_put vmlinux EXPORT_SYMBOL +0xe2ebd755 pci_rescan_bus vmlinux EXPORT_SYMBOL_GPL +0xd3d9e8fb blk_queue_rq_timeout vmlinux EXPORT_SYMBOL_GPL +0x8430aa46 blk_queue_flag_clear vmlinux EXPORT_SYMBOL +0x0d6375ae cache_seq_stop_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xcd762548 xdr_init_decode_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9b32f8cf md_write_end drivers/md/md-mod EXPORT_SYMBOL +0x07aa4424 mlx5_accel_esp_create_xfrm drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xd96294b5 st21nfca_hci_discover_se drivers/nfc/st21nfca/st21nfca_hci EXPORT_SYMBOL +0xb4895436 __tracepoint_nfs4_pnfs_write fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x143c3426 nfs_mknod fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1f6bd401 tcp_twsk_destructor vmlinux EXPORT_SYMBOL_GPL +0xb5b02be8 xt_compat_target_offset vmlinux EXPORT_SYMBOL_GPL +0x8443fd36 scm_detach_fds vmlinux EXPORT_SYMBOL +0xe4c2c66c rtc_ktime_to_tm vmlinux EXPORT_SYMBOL_GPL +0x130cc864 usb_stor_pre_reset vmlinux EXPORT_SYMBOL_GPL +0xc8a60d07 scsi_ioctl_block_when_processing_errors vmlinux EXPORT_SYMBOL_GPL +0x4dd9d14d cn_netlink_send_mult vmlinux EXPORT_SYMBOL_GPL +0x1e0670c6 reset_control_release vmlinux EXPORT_SYMBOL_GPL +0x48760c63 put_pid vmlinux EXPORT_SYMBOL_GPL +0x02edd3ed xdr_buf_read_mic net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa286a234 snd_pcm_format_name sound/core/snd-pcm EXPORT_SYMBOL_GPL +0xb3960631 hinic_init_nic_hwdev drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xfd78c012 of_count_phandle_with_args vmlinux EXPORT_SYMBOL +0x7992ec49 __efivar_entry_delete vmlinux EXPORT_SYMBOL_GPL +0x5417f09d regmap_bulk_read vmlinux EXPORT_SYMBOL_GPL +0x49f068f7 dev_pm_domain_attach vmlinux EXPORT_SYMBOL_GPL +0x7d92133d fwnode_device_is_available vmlinux EXPORT_SYMBOL_GPL +0x78796f90 driver_find_device vmlinux EXPORT_SYMBOL_GPL +0x02aa6aa6 regulator_is_equal vmlinux EXPORT_SYMBOL_GPL +0xac5d2ad0 pci_user_read_config_byte vmlinux EXPORT_SYMBOL_GPL +0x9d2d9640 pinctrl_parse_index_with_args vmlinux EXPORT_SYMBOL_GPL +0xe7cdd8b1 __inode_attach_wb vmlinux EXPORT_SYMBOL_GPL +0xc7e39bca ring_buffer_dropped_events_cpu vmlinux EXPORT_SYMBOL_GPL +0x1fd07fff kdb_grepping_flag vmlinux EXPORT_SYMBOL +0x81168355 posix_clock_register vmlinux EXPORT_SYMBOL_GPL +0xd21b61bd async_schedule_node_domain vmlinux EXPORT_SYMBOL_GPL +0x755a6301 drm_mode_plane_set_obj_prop vmlinux EXPORT_SYMBOL +0x68a90b51 get_default_font vmlinux EXPORT_SYMBOL +0xbf104af0 blk_get_request vmlinux EXPORT_SYMBOL +0x04345403 cryptd_aead_queued vmlinux EXPORT_SYMBOL_GPL +0x916faced __set_page_dirty vmlinux EXPORT_SYMBOL_GPL +0x8748fb66 vmalloc_user_node_flags vmlinux EXPORT_SYMBOL +0xd381a463 bpf_prog_free vmlinux EXPORT_SYMBOL_GPL +0x00c80741 xfrm_ealg_get_byid net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x1526b301 unix_tot_inflight vmlinux EXPORT_SYMBOL +0xee6b71c4 syscon_regmap_lookup_by_compatible vmlinux EXPORT_SYMBOL_GPL +0x8f141fef device_pm_wait_for_dev vmlinux EXPORT_SYMBOL_GPL +0x97ad96d9 ttm_bo_device_init vmlinux EXPORT_SYMBOL +0x5dae754f drm_connector_list_iter_end vmlinux EXPORT_SYMBOL +0x7bca1049 drm_set_preferred_mode vmlinux EXPORT_SYMBOL +0xda58f6ff of_mm_gpiochip_remove vmlinux EXPORT_SYMBOL_GPL +0xd6098337 bio_uninit vmlinux EXPORT_SYMBOL +0x56054c05 crypto_it_tab vmlinux EXPORT_SYMBOL_GPL +0x1a10c32b crypto_ft_tab vmlinux EXPORT_SYMBOL_GPL +0x5a44f8cb __crypto_memneq vmlinux EXPORT_SYMBOL +0x6d6fec1f ktime_mono_to_any vmlinux EXPORT_SYMBOL_GPL +0x09682235 down_timeout vmlinux EXPORT_SYMBOL +0x47960bc4 proc_do_large_bitmap vmlinux EXPORT_SYMBOL +0x821d1569 xprt_update_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd357480a nfnetlink_has_listeners net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x82c7ba91 __fscache_attr_changed fs/fscache/fscache EXPORT_SYMBOL +0xc9a802c4 of_reserved_mem_lookup vmlinux EXPORT_SYMBOL_GPL +0x274c250d ahci_ops vmlinux EXPORT_SYMBOL_GPL +0x20e7ce51 ata_wait_after_reset vmlinux EXPORT_SYMBOL_GPL +0xe721cc0d devm_platform_ioremap_resource vmlinux EXPORT_SYMBOL_GPL +0xb4e21fb9 mipi_dsi_dcs_write vmlinux EXPORT_SYMBOL +0x1fca0b38 housekeeping_overridden vmlinux EXPORT_SYMBOL_GPL +0x0374a0b3 ax25_header_ops net/ax25/ax25 EXPORT_SYMBOL +0x3c34ba9a ip_tunnel_encap_del_ops net/ipv4/ip_tunnel EXPORT_SYMBOL +0xf6be6dac snd_pcm_suspend_all sound/core/snd-pcm EXPORT_SYMBOL +0x90a5530f nfsiod_workqueue fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe23aba0f iommu_dev_has_feature vmlinux EXPORT_SYMBOL_GPL +0x09e5d2aa misc_deregister vmlinux EXPORT_SYMBOL +0x33abb34d acpi_match_device_ids vmlinux EXPORT_SYMBOL +0x3f0eabd2 xxh64_update vmlinux EXPORT_SYMBOL +0x45535485 xxh32_update vmlinux EXPORT_SYMBOL +0xf7c14e93 blkdev_fsync vmlinux EXPORT_SYMBOL +0xf27081f1 lookup_one_len vmlinux EXPORT_SYMBOL +0x7cda3b20 relay_buf_full vmlinux EXPORT_SYMBOL_GPL +0x32ab06cc irq_percpu_is_enabled vmlinux EXPORT_SYMBOL_GPL +0xb9330e1c flow_offload_lookup net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x563a6e2e snd_card_ref sound/core/snd EXPORT_SYMBOL_GPL +0x7afc9d8a unregister_sound_mixer sound/soundcore EXPORT_SYMBOL +0x8fa25c24 xa_find vmlinux EXPORT_SYMBOL +0x47c65bfc unregister_inet6addr_validator_notifier vmlinux EXPORT_SYMBOL +0x7d0e974b inet6_bind vmlinux EXPORT_SYMBOL +0xf68285c0 register_inetaddr_notifier vmlinux EXPORT_SYMBOL +0x9277ccf8 skb_headers_offset_update vmlinux EXPORT_SYMBOL +0x3e9a6646 hisi_sas_notify_phy_event vmlinux EXPORT_SYMBOL_GPL +0xdeec9f18 sas_port_get_phy vmlinux EXPORT_SYMBOL +0xce7a7fd7 acpi_device_update_power vmlinux EXPORT_SYMBOL_GPL +0x4351577a fb_parse_edid vmlinux EXPORT_SYMBOL +0x2e694cb1 fscrypt_ioctl_add_key vmlinux EXPORT_SYMBOL_GPL +0x4c4c3d11 kgdb_unregister_io_module vmlinux EXPORT_SYMBOL_GPL +0x7e0826e2 atomic_dec_and_mutex_lock vmlinux EXPORT_SYMBOL +0x2a895cad kthread_delayed_work_timer_fn vmlinux EXPORT_SYMBOL +0xd90cb249 ZSTD_getBlockSizeMax lib/zstd/zstd_compress EXPORT_SYMBOL +0x7c0cdc30 mlx5_query_port_pfc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x4702e767 af_alg_sendpage crypto/af_alg EXPORT_SYMBOL_GPL +0x8c704eab regmap_noinc_write vmlinux EXPORT_SYMBOL_GPL +0xbac3944e devres_get vmlinux EXPORT_SYMBOL_GPL +0x713342d0 devres_add vmlinux EXPORT_SYMBOL_GPL +0x9cc247d8 device_link_remove vmlinux EXPORT_SYMBOL_GPL +0x3e89afa0 serial8250_set_defaults vmlinux EXPORT_SYMBOL_GPL +0xfdc17369 fscrypt_free_inode vmlinux EXPORT_SYMBOL +0x5a614898 inode_set_flags vmlinux EXPORT_SYMBOL +0xd0a7a7e9 dma_mmap_attrs vmlinux EXPORT_SYMBOL +0x95b13ff8 prot_sock_flag vmlinux EXPORT_SYMBOL +0x4a237e57 cfpkt_tonative net/caif/caif EXPORT_SYMBOL +0x2b0a7de9 mlx5_modify_port_ets_rate_limit drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb356026f fc_host_post_vendor_event drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0xe9010c5c nfs_free_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0xef26443d ip_frag_init vmlinux EXPORT_SYMBOL +0x5d7eb133 led_trigger_blink_oneshot vmlinux EXPORT_SYMBOL_GPL +0x3f84bcd7 dax_alive vmlinux EXPORT_SYMBOL_GPL +0x35efb040 drm_bridge_enable vmlinux EXPORT_SYMBOL +0x00d4dc7f clk_fixed_rate_ops vmlinux EXPORT_SYMBOL_GPL +0xd5ad8526 pci_set_mwi vmlinux EXPORT_SYMBOL +0x1d8d8fab sync_inodes_sb vmlinux EXPORT_SYMBOL +0x6091b333 unregister_chrdev_region vmlinux EXPORT_SYMBOL +0x758fbe24 generic_writepages vmlinux EXPORT_SYMBOL +0x15c85de3 mempool_init vmlinux EXPORT_SYMBOL +0x38e46431 mempool_exit vmlinux EXPORT_SYMBOL +0xdbe0d0b6 ceph_monc_validate_auth net/ceph/libceph EXPORT_SYMBOL +0xa2d83bc8 register_8022_client net/802/p8022 EXPORT_SYMBOL +0x551ea60e vhost_vq_access_ok drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x7b047bd9 dm_tm_create_non_blocking_clone drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xeed6f087 simd_register_aeads_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0x36646941 udp_cmsg_send vmlinux EXPORT_SYMBOL_GPL +0x58acf24b mdiobus_register_board_info vmlinux EXPORT_SYMBOL +0x798a13b1 drm_i2c_encoder_detect vmlinux EXPORT_SYMBOL +0x271cba95 acpi_bus_private_data_handler vmlinux EXPORT_SYMBOL +0x10df3786 gpiochip_is_requested vmlinux EXPORT_SYMBOL_GPL +0xc8d1d433 blk_set_default_limits vmlinux EXPORT_SYMBOL +0x74f4c6a0 fuse_request_end vmlinux EXPORT_SYMBOL_GPL +0xc3293638 dquot_get_next_dqblk vmlinux EXPORT_SYMBOL +0x0c725fb8 posix_acl_equiv_mode vmlinux EXPORT_SYMBOL +0x9731f360 kvm_read_guest_page vmlinux EXPORT_SYMBOL_GPL +0xace81187 ceph_pg_poolid_by_name net/ceph/libceph EXPORT_SYMBOL +0x23359aff unregister_capictr_notifier drivers/isdn/capi/kernelcapi EXPORT_SYMBOL_GPL +0x2d1624d5 __udp_disconnect vmlinux EXPORT_SYMBOL +0xb20b5250 sock_from_file vmlinux EXPORT_SYMBOL +0x6486c0d8 cpufreq_generic_attr vmlinux EXPORT_SYMBOL_GPL +0xda5693fc cpufreq_generic_init vmlinux EXPORT_SYMBOL_GPL +0xae0ecfae lookup_bdev vmlinux EXPORT_SYMBOL +0xa22d9548 trace_seq_to_user vmlinux EXPORT_SYMBOL_GPL +0xf44447de clockevents_config_and_register vmlinux EXPORT_SYMBOL_GPL +0xa04f945a cpus_read_lock vmlinux EXPORT_SYMBOL_GPL +0x39f9769f irq_stat vmlinux EXPORT_SYMBOL +0xa49016d5 ceph_destroy_client net/ceph/libceph EXPORT_SYMBOL +0x7a8ca627 xfrm_count_pfkey_enc_supported net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x5f098b2a in6addr_interfacelocal_allrouters vmlinux EXPORT_SYMBOL +0x836147e0 raw_seq_stop vmlinux EXPORT_SYMBOL_GPL +0xf5f587df inet_proto_csum_replace_by_diff vmlinux EXPORT_SYMBOL +0xfa690589 netdev_cmd_to_name vmlinux EXPORT_SYMBOL_GPL +0x2a3a3485 gnet_stats_copy_basic vmlinux EXPORT_SYMBOL +0xbfc43a48 drm_vram_mm_init vmlinux EXPORT_SYMBOL +0x3bf17755 mpi_read_buffer vmlinux EXPORT_SYMBOL_GPL +0xac537ac2 percpu_counter_destroy vmlinux EXPORT_SYMBOL +0xced7f0c6 vm_map_ram vmlinux EXPORT_SYMBOL +0x030ce007 filemap_fdatawrite_range vmlinux EXPORT_SYMBOL +0xeab9cbd5 iscsi_conn_get_addr_param drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x83fbe0d7 inet_sendmsg vmlinux EXPORT_SYMBOL +0xc7ecd590 hidinput_report_event vmlinux EXPORT_SYMBOL_GPL +0xfe3d0164 ttm_bo_acc_size vmlinux EXPORT_SYMBOL +0x41b342fe elevator_alloc vmlinux EXPORT_SYMBOL +0x35d3dc46 crypto_alg_sem vmlinux EXPORT_SYMBOL_GPL +0x43aa319e lease_register_notifier vmlinux EXPORT_SYMBOL_GPL +0xbd2458d1 __getblk_gfp vmlinux EXPORT_SYMBOL +0xc10d541d dma_direct_map_page vmlinux EXPORT_SYMBOL +0x3d23c66c rpc_call_async net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x74f59c0c br_multicast_has_querier_adjacent net/bridge/bridge EXPORT_SYMBOL_GPL +0x9a320aac mlx5_core_create_mkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb6510888 netlink_strict_get_check vmlinux EXPORT_SYMBOL_GPL +0xa43799a8 rfs_needed vmlinux EXPORT_SYMBOL +0x56802ae8 rps_cpu_mask vmlinux EXPORT_SYMBOL +0x8b477a15 inverse_translate vmlinux EXPORT_SYMBOL_GPL +0xb654ef65 acpi_os_read_port vmlinux EXPORT_SYMBOL +0x65537437 freezer_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x3b3e1ae2 flush_rcu_work vmlinux EXPORT_SYMBOL +0xe8b3f0d8 __cpuhp_state_remove_instance vmlinux EXPORT_SYMBOL_GPL +0x75ed16c7 __rdma_create_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x87c0687a hinic_get_sq_local_ci drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x0907da3f af_alg_free_sg crypto/af_alg EXPORT_SYMBOL_GPL +0x9b7c7d09 af_alg_make_sg crypto/af_alg EXPORT_SYMBOL_GPL +0xfe029963 unregister_inetaddr_notifier vmlinux EXPORT_SYMBOL +0x61643fdb ata_sff_exec_command vmlinux EXPORT_SYMBOL_GPL +0x5908a33c hvc_instantiate vmlinux EXPORT_SYMBOL_GPL +0x9b38c84b amba_release_regions vmlinux EXPORT_SYMBOL +0x61ea189b fb_pad_aligned_buffer vmlinux EXPORT_SYMBOL +0x1a9a433c prandom_u32_state vmlinux EXPORT_SYMBOL +0xdfb915bd elv_rqhash_add vmlinux EXPORT_SYMBOL_GPL +0xa40e361f get_kernel_pages vmlinux EXPORT_SYMBOL_GPL +0x8415de8b perf_event_enable vmlinux EXPORT_SYMBOL_GPL +0xe05e2f85 nexthop_free_rcu vmlinux EXPORT_SYMBOL_GPL +0x2838afc6 od_register_powersave_bias_handler vmlinux EXPORT_SYMBOL_GPL +0xd6547cd1 phy_reset_after_clk_enable vmlinux EXPORT_SYMBOL +0xd6c1e25c virtqueue_add_outbuf vmlinux EXPORT_SYMBOL_GPL +0x612bfd89 errno_to_blk_status vmlinux EXPORT_SYMBOL_GPL +0x02fa05e8 nobh_write_begin vmlinux EXPORT_SYMBOL +0x15003e67 filemap_check_errors vmlinux EXPORT_SYMBOL +0x30a2b5f5 cpuacct_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xe284c11b nfs_post_op_update_inode_force_wcc fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb8752e4d __tracepoint_fib6_table_lookup vmlinux EXPORT_SYMBOL_GPL +0xa7d57f32 xfrm_state_free vmlinux EXPORT_SYMBOL +0x3bcd7a64 nf_hook_entries_delete_raw vmlinux EXPORT_SYMBOL_GPL +0xd41fdbd4 netlink_ack vmlinux EXPORT_SYMBOL +0xa9f3dc06 sock_no_bind vmlinux EXPORT_SYMBOL +0xd738ca1b phy_unregister_fixup_for_uid vmlinux EXPORT_SYMBOL +0xf1dd76e4 iscsi_register_transport vmlinux EXPORT_SYMBOL_GPL +0xe162b81a dmaengine_unmap_put vmlinux EXPORT_SYMBOL_GPL +0x63c08029 clk_bulk_unprepare vmlinux EXPORT_SYMBOL_GPL +0x5f93525c acpi_extract_package vmlinux EXPORT_SYMBOL +0x35a88f28 zlib_inflateInit2 vmlinux EXPORT_SYMBOL +0xa5d7c388 pstore_type_to_name vmlinux EXPORT_SYMBOL_GPL +0x5c6a90d0 param_set_charp vmlinux EXPORT_SYMBOL +0x5cf0d0bb dm_tm_create_with_sm drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xfa4c77d5 tap_queue_resize drivers/net/tap EXPORT_SYMBOL_GPL +0x6c22307b iscsi_session_failure drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0x0fd13ba5 xfrm_unregister_type_offload vmlinux EXPORT_SYMBOL +0x7e6648f8 dax_copy_to_iter vmlinux EXPORT_SYMBOL_GPL +0xc255d356 drm_gem_shmem_create vmlinux EXPORT_SYMBOL_GPL +0x3962789c acpi_dev_resume vmlinux EXPORT_SYMBOL_GPL +0xb7de145d blkg_lookup_slowpath vmlinux EXPORT_SYMBOL_GPL +0x9101d3b1 __mmu_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x4071b517 out_of_line_wait_on_bit_timeout vmlinux EXPORT_SYMBOL_GPL +0x036e60ee flow_rule_match_enc_keyid vmlinux EXPORT_SYMBOL +0xfb44ff47 ata_sas_sync_probe vmlinux EXPORT_SYMBOL_GPL +0xdf6b0216 set_primary_fwnode vmlinux EXPORT_SYMBOL_GPL +0x08d795e9 drm_atomic_helper_page_flip vmlinux EXPORT_SYMBOL +0x5bfb9d90 fb_sys_read vmlinux EXPORT_SYMBOL_GPL +0xada31e57 gen_pool_dma_alloc_align vmlinux EXPORT_SYMBOL +0xa9c63b69 security_xfrm_policy_free vmlinux EXPORT_SYMBOL +0x949f5a61 nci_req_complete net/nfc/nci/nci EXPORT_SYMBOL +0x567120e2 wpan_phy_unregister net/ieee802154/ieee802154 EXPORT_SYMBOL +0x8df3789f snd_oss_info_register sound/core/snd EXPORT_SYMBOL +0xa8d9c2c2 vfio_unpin_pages drivers/vfio/vfio EXPORT_SYMBOL +0x7086e988 mtd_get_device_size drivers/mtd/mtd EXPORT_SYMBOL_GPL +0x4bc2a91c fc_lport_bsg_request drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x0c36014e simd_unregister_aeads crypto/crypto_simd EXPORT_SYMBOL_GPL +0x91a7b1da qdisc_class_hash_remove vmlinux EXPORT_SYMBOL +0x4bc52111 __put_net vmlinux EXPORT_SYMBOL_GPL +0xa2b90256 usb_gadget_vbus_disconnect vmlinux EXPORT_SYMBOL_GPL +0x98c039dc dma_fence_wait_timeout vmlinux EXPORT_SYMBOL +0x64925270 __dax_synchronous vmlinux EXPORT_SYMBOL_GPL +0x238027e1 transport_destroy_device vmlinux EXPORT_SYMBOL_GPL +0xfbc7fc2e drm_gem_object_put vmlinux EXPORT_SYMBOL +0xba058617 serial8250_tx_chars vmlinux EXPORT_SYMBOL_GPL +0xd52ca944 gpiochip_enable_irq vmlinux EXPORT_SYMBOL_GPL +0xbfa48604 blk_mq_alloc_tag_set vmlinux EXPORT_SYMBOL +0xb0a6ff6e blk_steal_bios vmlinux EXPORT_SYMBOL_GPL +0x2760a52b ip_set_del net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x0038c67b ip_set_add net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x5fd516fd m_can_class_allocate_dev drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0xb2b0d885 mlx5_lag_is_active drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6476d839 mlx4_replace_zero_macs drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x48298514 crypto_poly1305_update crypto/poly1305_generic EXPORT_SYMBOL_GPL +0xa63109ee tcf_block_get_ext vmlinux EXPORT_SYMBOL +0x6ce0cb76 thermal_cdev_update vmlinux EXPORT_SYMBOL +0xa946db21 ttm_bo_move_ttm vmlinux EXPORT_SYMBOL +0x06d88c7a tty_unregister_device vmlinux EXPORT_SYMBOL +0x4ec54e78 bitmap_to_arr32 vmlinux EXPORT_SYMBOL +0xf311e156 key_being_used_for vmlinux EXPORT_SYMBOL_GPL +0xd534c30a crypto_register_skciphers vmlinux EXPORT_SYMBOL_GPL +0xd6fde043 is_module_sig_enforced vmlinux EXPORT_SYMBOL +0xf3808cb1 get_state_synchronize_rcu vmlinux EXPORT_SYMBOL_GPL +0xfb481954 vprintk vmlinux EXPORT_SYMBOL +0x8899eb50 vsock_for_each_connected_socket net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x753e20b2 dm_bio_prison_create drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x272392d3 hinic_get_card_present_state drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xac94e86b vsprintf vmlinux EXPORT_SYMBOL +0x85207575 ipv6_sock_mc_join vmlinux EXPORT_SYMBOL +0xd293f4e5 __udp4_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0x7b420d15 tcp_poll vmlinux EXPORT_SYMBOL +0x8c29bc66 netdev_lower_get_next_private vmlinux EXPORT_SYMBOL +0xd8d9c271 generic_splice_sendpage vmlinux EXPORT_SYMBOL +0xbe5d367e vfs_statx_fd vmlinux EXPORT_SYMBOL +0x110b41b8 dma_max_mapping_size vmlinux EXPORT_SYMBOL_GPL +0xdf513d1c kvm_map_gfn vmlinux EXPORT_SYMBOL_GPL +0xc01c7bfa udp_tun_rx_dst net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x1f59657d mdev_dev drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xc03fd6bb mlxsw_core_res_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x5cab06b1 ip_do_fragment vmlinux EXPORT_SYMBOL +0xd3eaf1ed devlink_dpipe_entry_clear vmlinux EXPORT_SYMBOL +0x95225e1d attribute_container_find_class_device vmlinux EXPORT_SYMBOL_GPL +0x58604e4d alloc_iova_mem vmlinux EXPORT_SYMBOL +0xca3ab270 __tracepoint_map vmlinux EXPORT_SYMBOL_GPL +0xfc8e3c98 register_key_type vmlinux EXPORT_SYMBOL +0xb56fbed9 grab_cache_page_write_begin vmlinux EXPORT_SYMBOL +0xebc9a09f lock_system_sleep vmlinux EXPORT_SYMBOL_GPL +0x0ca14884 rt_mutex_lock_interruptible vmlinux EXPORT_SYMBOL_GPL +0x5f5f3f59 nfc_vendor_cmd_reply net/nfc/nfc EXPORT_SYMBOL +0x6335e1cf __mtd_next_device drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xa7765e88 mlxsw_reg_query drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x62097177 mlx4_phys_to_slaves_pport drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x792aa337 ttm_page_alloc_debugfs vmlinux EXPORT_SYMBOL +0xc8a3051e acpi_subsys_suspend vmlinux EXPORT_SYMBOL_GPL +0x2b7b8987 acpi_subsys_prepare vmlinux EXPORT_SYMBOL_GPL +0x0a5400c7 pci_disable_link_state_locked vmlinux EXPORT_SYMBOL +0x66fff6af _copy_to_iter vmlinux EXPORT_SYMBOL +0x6390f209 blkg_conf_finish vmlinux EXPORT_SYMBOL_GPL +0xabf32f29 utf16s_to_utf8s vmlinux EXPORT_SYMBOL +0x78b97f3c iomap_swapfile_activate vmlinux EXPORT_SYMBOL_GPL +0xc21b3cca devices_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xa02aa74a __cond_resched_lock vmlinux EXPORT_SYMBOL +0x5d09a70e __put_task_struct vmlinux EXPORT_SYMBOL_GPL +0xf43e176e kvm_get_dirty_log_protect vmlinux EXPORT_SYMBOL_GPL +0x9e3e305d cfpkt_set_prio net/caif/caif EXPORT_SYMBOL +0x18b27dcd mrp_register_application net/802/mrp EXPORT_SYMBOL_GPL +0x47b996fd snd_rawmidi_output_params sound/core/snd-rawmidi EXPORT_SYMBOL +0x02aa8894 rdma_query_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf30b9aa6 __tracepoint_bcache_journal_replay_key drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x0e2b5842 mlxsw_afa_block_append_vlan_modify drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9c9589ac unregister_pernet_subsys vmlinux EXPORT_SYMBOL_GPL +0xebecc084 devm_led_classdev_register_ext vmlinux EXPORT_SYMBOL_GPL +0xc5c99a79 drm_dp_get_adjust_request_voltage vmlinux EXPORT_SYMBOL +0x5568af51 pnp_unregister_driver vmlinux EXPORT_SYMBOL +0xacbbbb3a simple_write_begin vmlinux EXPORT_SYMBOL +0x51f1a54d redirty_page_for_writepage vmlinux EXPORT_SYMBOL +0xa4a9be59 bdi_set_max_ratio vmlinux EXPORT_SYMBOL +0x8ffe7e89 nf_conntrack_htable_size net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x65f5a00e can_rx_offload_enable drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x63874d4c mlxsw_core_port_driver_priv drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc71906c9 fc_block_rport drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x08a616c6 netdev_emerg vmlinux EXPORT_SYMBOL +0x0d881280 sk_stream_kill_queues vmlinux EXPORT_SYMBOL +0xc76f334d ata_port_desc vmlinux EXPORT_SYMBOL_GPL +0x64e167c4 dev_set_name vmlinux EXPORT_SYMBOL_GPL +0x6b5bcdda drm_flip_work_init vmlinux EXPORT_SYMBOL +0xeb37101c audit_log_end vmlinux EXPORT_SYMBOL +0x36d69557 ipv6_flowlabel_exclusive vmlinux EXPORT_SYMBOL +0xf3a089ed lwtunnel_fill_encap vmlinux EXPORT_SYMBOL_GPL +0x3c3a648e __sk_dst_check vmlinux EXPORT_SYMBOL +0x92708d13 of_platform_populate vmlinux EXPORT_SYMBOL_GPL +0xa1b23e1c usb_queue_reset_device vmlinux EXPORT_SYMBOL_GPL +0xedeb59d9 __tracepoint_dma_fence_signaled vmlinux EXPORT_SYMBOL +0x80935c79 drm_mm_scan_add_block vmlinux EXPORT_SYMBOL +0x49c8035e drm_fb_helper_sys_write vmlinux EXPORT_SYMBOL +0x762a0433 drm_scdc_get_scrambling_status vmlinux EXPORT_SYMBOL +0x5027bde2 acpi_acquire_mutex vmlinux EXPORT_SYMBOL +0x960c65e7 devm_of_iomap vmlinux EXPORT_SYMBOL +0xb10e7df4 __kfifo_dma_in_prepare vmlinux EXPORT_SYMBOL +0xbc892785 hash_and_copy_to_iter vmlinux EXPORT_SYMBOL +0xd27b25dd blk_check_plugged vmlinux EXPORT_SYMBOL +0x716265c7 debugfs_initialized vmlinux EXPORT_SYMBOL_GPL +0xa2261de6 lookup_one_len_unlocked vmlinux EXPORT_SYMBOL +0x49e4b95b bpf_map_put vmlinux EXPORT_SYMBOL_GPL +0x2089f94c tracing_snapshot_cond_enable vmlinux EXPORT_SYMBOL_GPL +0x7cc6c56d task_cputime_adjusted vmlinux EXPORT_SYMBOL_GPL +0xec2b5bd8 ax25_register_pid net/ax25/ax25 EXPORT_SYMBOL_GPL +0x2b2062ad snd_pcm_lib_preallocate_pages sound/core/snd-pcm EXPORT_SYMBOL +0xf2ae70d7 xdp_convert_zc_to_xdp_frame vmlinux EXPORT_SYMBOL_GPL +0xaf4014ff usb_amd_quirk_pll_check vmlinux EXPORT_SYMBOL_GPL +0x752afaa3 usb_interrupt_msg vmlinux EXPORT_SYMBOL_GPL +0x6ce114d4 pm_generic_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0x5e0477a4 bus_unregister vmlinux EXPORT_SYMBOL_GPL +0x93a0c39d set_bh_page vmlinux EXPORT_SYMBOL +0x3df70c99 trace_clock_global vmlinux EXPORT_SYMBOL_GPL +0x14066e2d kthread_blkcg vmlinux EXPORT_SYMBOL +0xf04ec1d8 ax25_linkfail_register net/ax25/ax25 EXPORT_SYMBOL +0x9290e07a dm_tm_read_lock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x7b6b3af5 dm_bm_read_lock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x2911f58b nfs_zap_acl_cache fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf4db35bc stpcpy vmlinux EXPORT_SYMBOL +0xe914e41e strcpy vmlinux EXPORT_SYMBOL +0xdce26129 kobj_sysfs_ops vmlinux EXPORT_SYMBOL_GPL +0x3f8ab72e devlink_fmsg_bool_put vmlinux EXPORT_SYMBOL_GPL +0x2fb13488 efivars_unregister vmlinux EXPORT_SYMBOL_GPL +0x51fc3803 drm_format_info_block_width vmlinux EXPORT_SYMBOL +0xabc9ddbe clkdev_alloc vmlinux EXPORT_SYMBOL +0x609b2853 hdmi_infoframe_pack vmlinux EXPORT_SYMBOL +0xa802ddf0 crypto_destroy_tfm vmlinux EXPORT_SYMBOL_GPL +0x35b0e4c1 sysfs_remove_link_from_group vmlinux EXPORT_SYMBOL_GPL +0x5cb57c84 filemap_fault vmlinux EXPORT_SYMBOL +0xe5a7f1f9 nfc_tm_deactivated net/nfc/nfc EXPORT_SYMBOL +0x6d5bf833 nft_fib_policy net/netfilter/nft_fib EXPORT_SYMBOL +0xe6a4dd07 parport_ieee1284_read_byte drivers/parport/parport EXPORT_SYMBOL +0x4d31b8a6 parport_claim_or_block drivers/parport/parport EXPORT_SYMBOL +0x7e13d233 rndis_set_param_medium drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x9d074f65 mlx4_is_eq_shared drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x1475f64b ocfs2_dlm_lvb_valid fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x1bc3edc2 usb_stor_sense_invalidCDB vmlinux EXPORT_SYMBOL_GPL +0x7e5cf8ae sdev_enable_disk_events vmlinux EXPORT_SYMBOL +0x864e1c67 fb_get_buffer_offset vmlinux EXPORT_SYMBOL +0x3741e380 t10_pi_type3_crc vmlinux EXPORT_SYMBOL +0x733ed24a key_payload_reserve vmlinux EXPORT_SYMBOL +0xdccd03e5 ftrace_set_notrace vmlinux EXPORT_SYMBOL_GPL +0x0907d14d blocking_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0xa52fcc79 ieee802154_xmit_complete net/mac802154/mac802154 EXPORT_SYMBOL +0x63b0c22d dm_bitset_flush drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x21828bdf hinic_ppf_ext_db_init drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x13a9feca nfs_filemap_write_and_wait_range fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd03309ec skb_unlink vmlinux EXPORT_SYMBOL +0x815bccf0 sock_zerocopy_callback vmlinux EXPORT_SYMBOL_GPL +0xff291ecf clk_unregister_divider vmlinux EXPORT_SYMBOL_GPL +0x7108bded blk_set_queue_dying vmlinux EXPORT_SYMBOL_GPL +0x3fa81eba crypto_register_template vmlinux EXPORT_SYMBOL_GPL +0xe2a90d71 fuse_abort_conn vmlinux EXPORT_SYMBOL_GPL +0xacb020ce node_states vmlinux EXPORT_SYMBOL +0x1dd8d178 vm_iomap_memory vmlinux EXPORT_SYMBOL +0x49e96999 cond_synchronize_rcu vmlinux EXPORT_SYMBOL_GPL +0x732c9928 irq_setup_alt_chip vmlinux EXPORT_SYMBOL_GPL +0xf511376b ceph_auth_create_authorizer net/ceph/libceph EXPORT_SYMBOL +0xa283256e dccp_send_sync net/dccp/dccp EXPORT_SYMBOL_GPL +0x64cde3e8 nf_tproxy_handle_time_wait6 net/ipv6/netfilter/nf_tproxy_ipv6 EXPORT_SYMBOL_GPL +0x23495426 nf_tproxy_handle_time_wait4 net/ipv4/netfilter/nf_tproxy_ipv4 EXPORT_SYMBOL_GPL +0xb2e5ae4a snd_lookup_minor_data sound/core/snd EXPORT_SYMBOL +0x2f69f29e ibdev_emerg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe1bdae47 dm_cache_policy_create drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x253569d1 ptp_classify_raw vmlinux EXPORT_SYMBOL_GPL +0x88be2f58 __ethtool_get_link_ksettings vmlinux EXPORT_SYMBOL +0xbc9ab803 of_phy_attach vmlinux EXPORT_SYMBOL +0x6a000dab spi_set_cs_timing vmlinux EXPORT_SYMBOL_GPL +0x0e43c839 drm_sched_stop vmlinux EXPORT_SYMBOL +0x5641485b tty_termios_encode_baud_rate vmlinux EXPORT_SYMBOL_GPL +0x4a02c7aa tty_release_struct vmlinux EXPORT_SYMBOL_GPL +0xb4043948 acpi_execute_simple_method vmlinux EXPORT_SYMBOL +0xc6a27775 smp_call_function_single_async vmlinux EXPORT_SYMBOL_GPL +0x8a70c74d sched_trace_rq_avg_dl vmlinux EXPORT_SYMBOL_GPL +0xde425730 gpu_create_wait vmlinux EXPORT_SYMBOL_GPL +0x332306b7 m_can_class_register drivers/net/can/m_can/m_can EXPORT_SYMBOL_GPL +0x9cff847d fc_host_post_fc_event drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x16fe2354 nfs_wait_client_init_complete fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa709c835 fib6_info_destroy_rcu vmlinux EXPORT_SYMBOL_GPL +0x317976b7 inet_csk_delete_keepalive_timer vmlinux EXPORT_SYMBOL +0x458a24cd eth_platform_get_mac_address vmlinux EXPORT_SYMBOL +0xfb2d0da5 skb_append_pagefrags vmlinux EXPORT_SYMBOL_GPL +0x81d61db2 mbox_request_channel_byname vmlinux EXPORT_SYMBOL_GPL +0x2b9e3588 of_reserved_mem_device_init_by_idx vmlinux EXPORT_SYMBOL_GPL +0x1b85771c of_parse_phandle vmlinux EXPORT_SYMBOL +0xea688cbc usb_hcd_pci_shutdown vmlinux EXPORT_SYMBOL_GPL +0x3b288db1 ata_cable_40wire vmlinux EXPORT_SYMBOL_GPL +0x6958ae23 dax_get_by_host vmlinux EXPORT_SYMBOL_GPL +0xad084e94 __bdev_dax_supported vmlinux EXPORT_SYMBOL_GPL +0xfb581501 dev_pm_qos_add_ancestor_request vmlinux EXPORT_SYMBOL_GPL +0x0bc169a9 drm_fb_helper_set_par vmlinux EXPORT_SYMBOL +0x271ad4ea pci_bus_set_ops vmlinux EXPORT_SYMBOL +0x389617b0 LZ4_decompress_fast_continue vmlinux EXPORT_SYMBOL +0x658ce1a8 xxh64_reset vmlinux EXPORT_SYMBOL +0x6673f96d xxh32_reset vmlinux EXPORT_SYMBOL +0x7e5db80b pstore_name_to_type vmlinux EXPORT_SYMBOL_GPL +0x0df26207 proc_create_single_data vmlinux EXPORT_SYMBOL +0x0d756b59 nf_log_dump_tcp_header net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0x001d39a4 nf_log_dump_udp_header net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0x4fdee897 i8042_command drivers/input/serio/i8042 EXPORT_SYMBOL +0x26451667 xfrm_state_register_afinfo vmlinux EXPORT_SYMBOL +0x649e6aa0 usb_register_device_driver vmlinux EXPORT_SYMBOL_GPL +0xd8851b13 phy_loopback vmlinux EXPORT_SYMBOL +0xbc488bee ata_cable_unknown vmlinux EXPORT_SYMBOL_GPL +0x68f2e635 subsys_interface_unregister vmlinux EXPORT_SYMBOL_GPL +0xd8e26441 drm_atomic_helper_commit_planes_on_crtc vmlinux EXPORT_SYMBOL +0xa753a2e2 regulator_list_hardware_vsel vmlinux EXPORT_SYMBOL_GPL +0xeee16073 kmem_cache_size vmlinux EXPORT_SYMBOL +0x27715e4a enable_kprobe vmlinux EXPORT_SYMBOL_GPL +0x7f730c80 vsock_core_exit net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x69324e97 l2tp_session_get_by_ifname net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xaa25b7cc ib_cm_init_qp_attr drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xe2d7e96f mlx4_get_vf_config drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc9901bfa nfs_symlink fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1de4978e kobject_get_path vmlinux EXPORT_SYMBOL_GPL +0xfa8ec279 inet_ioctl vmlinux EXPORT_SYMBOL +0x415d499c of_device_register vmlinux EXPORT_SYMBOL +0x6c0a4379 genphy_c45_pma_read_abilities vmlinux EXPORT_SYMBOL_GPL +0xd19cb888 regmap_get_val_endian vmlinux EXPORT_SYMBOL_GPL +0x74754435 acpi_bus_generate_netlink_event vmlinux EXPORT_SYMBOL +0x5bfdea24 is_phytium1500 vmlinux EXPORT_SYMBOL +0x9480d128 hinic_cmdq_direct_resp drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x6cba78b4 nvme_wait_freeze_timeout drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xcc1b882a idr_get_next_ul vmlinux EXPORT_SYMBOL +0x65f58d56 xt_register_target vmlinux EXPORT_SYMBOL +0xbbb11a31 page_pool_create vmlinux EXPORT_SYMBOL +0xc44dac23 phy_drivers_register vmlinux EXPORT_SYMBOL +0x83561e2e scsi_vpd_lun_id vmlinux EXPORT_SYMBOL +0x1fe679ec ata_sas_async_probe vmlinux EXPORT_SYMBOL_GPL +0x24a8be86 drm_any_plane_has_format vmlinux EXPORT_SYMBOL +0xf71466c4 drm_connector_register vmlinux EXPORT_SYMBOL +0x3fea029c hisi_clk_register_gate vmlinux EXPORT_SYMBOL_GPL +0x9bbc9a6e decrypt_blob vmlinux EXPORT_SYMBOL_GPL +0xd6eaaea1 full_name_hash vmlinux EXPORT_SYMBOL +0xb4ff6bb6 hrtimer_active vmlinux EXPORT_SYMBOL_GPL +0x406c4cb1 hrtimer_resolution vmlinux EXPORT_SYMBOL_GPL +0x31abadab kvm_get_dirty_log vmlinux EXPORT_SYMBOL_GPL +0xfd039e48 nfc_remove_se net/nfc/nfc EXPORT_SYMBOL +0x895d3aa3 svc_rqst_alloc net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x26b393b8 ip_vs_proto_data_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x820db7e0 nf_nat_inet_register_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x24a73f8f ib_get_cached_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x68e8e5d8 mlx5_core_query_ib_ppcnt drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x17053745 mlx4_mr_hw_write_mpt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7c7a4c04 nvme_get_features drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xc3cba0ca ping_prot vmlinux EXPORT_SYMBOL +0x0a292872 reservation_seqcount_class vmlinux EXPORT_SYMBOL +0xba525cf4 pm_generic_freeze vmlinux EXPORT_SYMBOL_GPL +0x5055972d device_create_vargs vmlinux EXPORT_SYMBOL_GPL +0xc65764ac drm_bridge_pre_enable vmlinux EXPORT_SYMBOL +0x278757c5 iommu_unmap_fast vmlinux EXPORT_SYMBOL_GPL +0xc5596b0f tty_port_hangup vmlinux EXPORT_SYMBOL +0x390120a4 pnp_disable_dev vmlinux EXPORT_SYMBOL +0xfc23b41c framebuffer_release vmlinux EXPORT_SYMBOL +0x1fe6e504 gpiod_add_hogs vmlinux EXPORT_SYMBOL_GPL +0x72d9fb46 pinctrl_utils_reserve_map vmlinux EXPORT_SYMBOL_GPL +0x3ef051c8 crypto_inc vmlinux EXPORT_SYMBOL_GPL +0xa09e722b iomap_file_buffered_write vmlinux EXPORT_SYMBOL_GPL +0x8c7bfa86 get_cached_acl vmlinux EXPORT_SYMBOL +0x1aafee41 wait_for_stable_page vmlinux EXPORT_SYMBOL_GPL +0x2e6eae97 event_triggers_post_call vmlinux EXPORT_SYMBOL_GPL +0xa38c1436 cpu_bit_bitmap vmlinux EXPORT_SYMBOL_GPL +0x9670b5a1 rpc_free_iostats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x48f82ef1 rpc_destroy_wait_queue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4557b425 dm_bitset_test_bit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x1b8b95ad i8042_unlock_chip drivers/input/serio/i8042 EXPORT_SYMBOL +0x192caa5d mlx5_packet_reformat_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd7d6e4ab mlx4_test_async drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x57287ef3 fqdir_init vmlinux EXPORT_SYMBOL +0x5c35dc07 drm_connector_has_possible_encoder vmlinux EXPORT_SYMBOL +0x8d28c09e drm_atomic_helper_wait_for_fences vmlinux EXPORT_SYMBOL +0x38a71b7e pci_free_resource_list vmlinux EXPORT_SYMBOL +0xd6234359 blk_mq_sched_try_merge vmlinux EXPORT_SYMBOL_GPL +0x306f5576 pkcs7_verify vmlinux EXPORT_SYMBOL_GPL +0x9157ec31 crypto_register_kpp vmlinux EXPORT_SYMBOL_GPL +0xce0b66b6 touch_buffer vmlinux EXPORT_SYMBOL +0xb8149779 alloc_file_pseudo vmlinux EXPORT_SYMBOL +0xdfa76acf prepare_to_swait_event vmlinux EXPORT_SYMBOL +0xd260bea9 __kthread_should_park vmlinux EXPORT_SYMBOL_GPL +0x46c47fb6 __node_distance vmlinux EXPORT_SYMBOL +0xfc0b3d72 nci_get_conn_info_by_dest_type_params net/nfc/nci/nci EXPORT_SYMBOL +0xb40f590e nfc_se_connectivity net/nfc/nfc EXPORT_SYMBOL +0xf3830d20 svcauth_unix_purge net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe6c1e126 mpt_event_deregister drivers/message/fusion/mptbase EXPORT_SYMBOL +0x3001cb64 kobject_move vmlinux EXPORT_SYMBOL_GPL +0x808da16c of_phy_is_fixed_link vmlinux EXPORT_SYMBOL +0x2c03ddfb dm_table_get_md vmlinux EXPORT_SYMBOL +0x932e810f devm_rc_register_device vmlinux EXPORT_SYMBOL_GPL +0x6d7fd059 usb_hcd_poll_rh_status vmlinux EXPORT_SYMBOL_GPL +0x654d6e97 dev_pm_qos_hide_flags vmlinux EXPORT_SYMBOL_GPL +0x518e5ba5 device_find_child vmlinux EXPORT_SYMBOL_GPL +0x8ac743de sg_copy_buffer vmlinux EXPORT_SYMBOL +0x4fe841bc fat_setattr vmlinux EXPORT_SYMBOL_GPL +0xea79dab2 fat_getattr vmlinux EXPORT_SYMBOL_GPL +0xb477e8e0 __fscrypt_prepare_link vmlinux EXPORT_SYMBOL_GPL +0x3892e9da fscrypt_file_open vmlinux EXPORT_SYMBOL_GPL +0x79593bee generic_read_dir vmlinux EXPORT_SYMBOL +0xe007de41 kallsyms_lookup_name vmlinux EXPORT_SYMBOL_GPL +0xe9c856f2 xprt_lookup_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xceb2b2f7 ct_sip_parse_header_uri net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x8f2ac572 fc_exch_update_stats drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xbd05750f fc_exch_seq_send drivers/scsi/libfc/libfc EXPORT_SYMBOL +0x6bfad17f usb_ep_enable vmlinux EXPORT_SYMBOL_GPL +0xbdc7510a drm_dp_mst_dump_topology vmlinux EXPORT_SYMBOL +0x8cbd9c68 iommu_aux_detach_device vmlinux EXPORT_SYMBOL_GPL +0x6ee811ca tpm_pm_suspend vmlinux EXPORT_SYMBOL_GPL +0x176d0954 devm_gpiod_put vmlinux EXPORT_SYMBOL_GPL +0xb63072fa bsg_remove_queue vmlinux EXPORT_SYMBOL_GPL +0x0ac046bc truncate_pagecache vmlinux EXPORT_SYMBOL +0xa376d145 ring_buffer_time_stamp vmlinux EXPORT_SYMBOL_GPL +0xbaa657c9 param_ops_bool_enable_only vmlinux EXPORT_SYMBOL_GPL +0x68be9d9b vsock_add_pending net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xa8174239 vsock_remove_sock net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xb73be794 xfrm_ealg_get_byidx net/xfrm/xfrm_algo EXPORT_SYMBOL_GPL +0x51ec2869 ocfs2_kset fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x69d3558d pnfs_generic_rw_release fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x871114bf nfs4_test_session_trunk fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x837942b7 tso_start vmlinux EXPORT_SYMBOL +0x4b7d06f5 sock_no_shutdown vmlinux EXPORT_SYMBOL +0xacc1a824 of_device_unregister vmlinux EXPORT_SYMBOL +0xa0d3c233 vc_resize vmlinux EXPORT_SYMBOL +0x94ef4d05 cpci_hp_stop vmlinux EXPORT_SYMBOL_GPL +0xcf1048ae fat_update_time vmlinux EXPORT_SYMBOL_GPL +0xa028c9ec __generic_block_fiemap vmlinux EXPORT_SYMBOL +0x4005f38c try_wait_for_completion vmlinux EXPORT_SYMBOL +0x71e11714 param_set_ulong vmlinux EXPORT_SYMBOL +0x6e311058 gfn_to_memslot vmlinux EXPORT_SYMBOL_GPL +0xee2d0fc7 _local_bh_enable vmlinux EXPORT_SYMBOL +0x750d0d33 nf_ct_expect_alloc net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x66403f08 tcp_mmap vmlinux EXPORT_SYMBOL +0x1a77903a of_alias_get_alias_list vmlinux EXPORT_SYMBOL_GPL +0x66a3cb5f spi_unregister_device vmlinux EXPORT_SYMBOL_GPL +0x9b4b54a9 ttm_dma_page_alloc_debugfs vmlinux EXPORT_SYMBOL_GPL +0x4bc2faad drm_fb_helper_deferred_io vmlinux EXPORT_SYMBOL +0xef50c2de drm_kms_helper_poll_init vmlinux EXPORT_SYMBOL +0xcc35186e tpm_chip_start vmlinux EXPORT_SYMBOL_GPL +0xec01448a generic_key_instantiate vmlinux EXPORT_SYMBOL +0x52400cc9 sysfs_unmerge_group vmlinux EXPORT_SYMBOL_GPL +0xe305235d __cancel_dirty_page vmlinux EXPORT_SYMBOL +0x79ee513e sched_trace_cfs_rq_path vmlinux EXPORT_SYMBOL_GPL +0x79defbe1 kthread_should_park vmlinux EXPORT_SYMBOL_GPL +0x33df2423 ceph_copy_to_page_vector net/ceph/libceph EXPORT_SYMBOL +0xf3346bfe ib_mad_kernel_rmpp_agent drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8668f1c8 ib_modify_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd764a60f can_rx_offload_irq_offload_fifo drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0xf3ad003f hinic_get_hw_pf_infos drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1b89c6ee o2hb_fill_node_map fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xc5196999 ocfs2_dlm_unlock fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x70c52dc5 nf_skb_duplicated vmlinux EXPORT_SYMBOL_GPL +0xc87d80df devlink_params_unregister vmlinux EXPORT_SYMBOL_GPL +0xd6a2e5a7 fib_notifier_ops_register vmlinux EXPORT_SYMBOL +0xcb550fb6 dma_resv_init vmlinux EXPORT_SYMBOL +0x227d1fd5 ttm_bo_kunmap vmlinux EXPORT_SYMBOL +0x5ac568c8 drm_calc_timestamping_constants vmlinux EXPORT_SYMBOL +0x05b2173b drm_fb_helper_cfb_imageblit vmlinux EXPORT_SYMBOL +0xa16cdb4d add_random_ready_callback vmlinux EXPORT_SYMBOL +0xb549e125 dummy_con vmlinux EXPORT_SYMBOL_GPL +0xf9c1f9ab security_secctx_to_secid vmlinux EXPORT_SYMBOL +0x99f9e7e6 sysfs_create_link_nowarn vmlinux EXPORT_SYMBOL_GPL +0xf36554a4 kvm_vcpu_map vmlinux EXPORT_SYMBOL_GPL +0x0bd0b09f svc_encode_read_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4b1c48a9 snd_timer_global_free sound/core/snd-timer EXPORT_SYMBOL +0x81939661 mlx5_query_port_ets_rate_limit drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xddba5e96 fcoe_link_speed_update drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0x21e13cb3 inet_peer_xrlim_allow vmlinux EXPORT_SYMBOL +0x2966cd22 nf_ct_attach vmlinux EXPORT_SYMBOL +0xc24f34c4 tcf_queue_work vmlinux EXPORT_SYMBOL +0xe818b32b ata_bmdma_interrupt vmlinux EXPORT_SYMBOL_GPL +0x450963b7 drm_vram_helper_alloc_mm vmlinux EXPORT_SYMBOL +0xdce23a83 sbitmap_queue_wake_up vmlinux EXPORT_SYMBOL_GPL +0xeb924dc6 register_user_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x7c94c99a kvm_release_pfn_dirty vmlinux EXPORT_SYMBOL_GPL +0x8b9ea582 ZSTD_copyDCtx lib/zstd/zstd_decompress EXPORT_SYMBOL +0x279be432 ZSTD_copyCCtx lib/zstd/zstd_compress EXPORT_SYMBOL +0x2dea50f7 mlx5_query_port_ptys drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x8d4c8fdf nfs_inode_attach_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9f8da45b fscache_op_complete fs/fscache/fscache EXPORT_SYMBOL +0xc9df055a xfrm_policy_walk_init vmlinux EXPORT_SYMBOL +0x8bc0cbd9 cpufreq_dbs_governor_limits vmlinux EXPORT_SYMBOL_GPL +0xb02a4dd2 sas_target_destroy vmlinux EXPORT_SYMBOL_GPL +0xbd6e3454 driver_attach vmlinux EXPORT_SYMBOL_GPL +0x093712e5 acpi_purge_cached_objects vmlinux EXPORT_SYMBOL +0xddad7952 acpi_dbg_level vmlinux EXPORT_SYMBOL +0xc9640104 gpiochip_irq_map vmlinux EXPORT_SYMBOL_GPL +0x762411a3 textsearch_register vmlinux EXPORT_SYMBOL +0x10cde69c bio_clone_blkg_association vmlinux EXPORT_SYMBOL_GPL +0x74baf17a tracing_is_on vmlinux EXPORT_SYMBOL_GPL +0x381a798a setup_max_cpus vmlinux EXPORT_SYMBOL +0x40183f79 commit_creds vmlinux EXPORT_SYMBOL +0x574eda34 des3_ede_decrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0xfeb16652 dccp_rcv_state_process net/dccp/dccp EXPORT_SYMBOL_GPL +0x95a32ce9 __nf_ct_expect_find net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x31406cf9 garp_init_applicant net/802/garp EXPORT_SYMBOL_GPL +0x6171b8f6 iptunnel_metadata_reply vmlinux EXPORT_SYMBOL_GPL +0x1cf62720 platform_device_register vmlinux EXPORT_SYMBOL_GPL +0x975673d2 drm_atomic_helper_commit_tail vmlinux EXPORT_SYMBOL +0x80d68d3e fb_register_client vmlinux EXPORT_SYMBOL +0xad0413d4 match_hex vmlinux EXPORT_SYMBOL +0xa929ab9a rdma_nl_multicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x858c30d0 mlxsw_afa_block_create drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xe33bed2b __inet_lookup_listener vmlinux EXPORT_SYMBOL_GPL +0xfef779fa xt_find_jump_offset vmlinux EXPORT_SYMBOL +0xd92fe489 nf_unregister_sockopt vmlinux EXPORT_SYMBOL +0x62a1de16 skb_realloc_headroom vmlinux EXPORT_SYMBOL +0x5c8c7403 dev_attr_em_message vmlinux EXPORT_SYMBOL_GPL +0x686053ba software_node_fwnode vmlinux EXPORT_SYMBOL_GPL +0xf97bcaff platform_device_unregister vmlinux EXPORT_SYMBOL_GPL +0xb1fd63cf serial8250_set_isa_configurator vmlinux EXPORT_SYMBOL +0x0676a6dd acpi_data_fwnode_ops vmlinux EXPORT_SYMBOL_GPL +0xa3a04602 btree_geo64 vmlinux EXPORT_SYMBOL_GPL +0x87d3b5af generic_pipe_buf_get vmlinux EXPORT_SYMBOL +0x79d0e906 nfs_may_open fs/nfs/nfs EXPORT_SYMBOL_GPL +0xbf2fb6a3 ipv6_find_hdr vmlinux EXPORT_SYMBOL +0xc6c6f5bd ipv6_chk_addr_and_flags vmlinux EXPORT_SYMBOL +0xbf430564 __dev_forward_skb vmlinux EXPORT_SYMBOL_GPL +0xcdfee3a6 uhci_reset_hc vmlinux EXPORT_SYMBOL_GPL +0xfa2d291c drm_mode_get_hv_timing vmlinux EXPORT_SYMBOL +0xd67402e2 iommu_page_response vmlinux EXPORT_SYMBOL_GPL +0xe2484d98 tty_port_close_end vmlinux EXPORT_SYMBOL +0x74e46056 pcie_bandwidth_available vmlinux EXPORT_SYMBOL +0x702946da ucs2_strlen vmlinux EXPORT_SYMBOL +0x8bcf1611 jbd2_journal_dirty_metadata vmlinux EXPORT_SYMBOL +0x4f79383a account_locked_vm vmlinux EXPORT_SYMBOL_GPL +0xebbe1622 io_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xdc7004d8 handle_fasteoi_irq vmlinux EXPORT_SYMBOL_GPL +0xc945dc8f xdr_restrict_buflen net/sunrpc/sunrpc EXPORT_SYMBOL +0xa21c4531 nf_ct_bridge_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa405fd80 ib_set_client_data drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc04c9c1c rdma_copy_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x58bcc56e md_flush_request drivers/md/md-mod EXPORT_SYMBOL +0x8889b3f3 mlx4_cq_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x363ec6de __vlan_find_dev_deep_rcu vmlinux EXPORT_SYMBOL +0x79bc842c usb_anchor_suspend_wakeups vmlinux EXPORT_SYMBOL_GPL +0x9b807c91 hisi_sas_get_prog_phy_linkrate_mask vmlinux EXPORT_SYMBOL_GPL +0x1f171102 sas_bios_param vmlinux EXPORT_SYMBOL_GPL +0x5733e316 sas_attach_transport vmlinux EXPORT_SYMBOL +0x9a7e1be0 iommu_put_dma_cookie vmlinux EXPORT_SYMBOL +0x207a88af blkcipher_walk_done vmlinux EXPORT_SYMBOL_GPL +0x447aca6f __close_fd vmlinux EXPORT_SYMBOL +0xd39b339c page_readlink vmlinux EXPORT_SYMBOL +0x07bf29cd get_cached_msi_msg vmlinux EXPORT_SYMBOL_GPL +0xc06dc5e1 rdma_set_cq_moderation drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6476fe9e ubi_is_mapped drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x30dbed6e poly1305_core_blocks crypto/poly1305_generic EXPORT_SYMBOL_GPL +0x61130578 inet_frag_reasm_prepare vmlinux EXPORT_SYMBOL +0x97431fc6 dma_fence_chain_init vmlinux EXPORT_SYMBOL +0x583f0e58 virtqueue_kick vmlinux EXPORT_SYMBOL_GPL +0x1dd571e6 fb_copy_cmap vmlinux EXPORT_SYMBOL +0x2ee9ed8e gpiod_set_value vmlinux EXPORT_SYMBOL_GPL +0xd9d952d1 crypto_aes_sbox vmlinux EXPORT_SYMBOL +0x27639220 blk_verify_command vmlinux EXPORT_SYMBOL +0x2d39b0a7 kstrdup vmlinux EXPORT_SYMBOL +0x32c78bbc freezing_slow_path vmlinux EXPORT_SYMBOL +0xd271351a kvm_vcpu_gfn_to_pfn vmlinux EXPORT_SYMBOL_GPL +0x0407a624 kvm_vcpu_gfn_to_hva vmlinux EXPORT_SYMBOL_GPL +0x1c8dde58 mlx4_SET_PORT_user_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xd551432d fcoe_libfc_config drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0xa47af033 st_nci_probe drivers/nfc/st-nci/st-nci EXPORT_SYMBOL_GPL +0x61ae1d2d xas_pause vmlinux EXPORT_SYMBOL_GPL +0xcf4fdd4d _atomic_dec_and_lock vmlinux EXPORT_SYMBOL +0x68f37e9f __tracepoint_neigh_update vmlinux EXPORT_SYMBOL_GPL +0x3e1fb0b4 of_iomap vmlinux EXPORT_SYMBOL +0xa1fc6b28 ata_bmdma_port_ops vmlinux EXPORT_SYMBOL_GPL +0xb53a480f ata_host_alloc vmlinux EXPORT_SYMBOL_GPL +0xe8ea71b0 is_software_node vmlinux EXPORT_SYMBOL_GPL +0x72763355 blkg_print_stat_bytes vmlinux EXPORT_SYMBOL_GPL +0x7984eefc key_update vmlinux EXPORT_SYMBOL +0xc1444946 ax25cmp net/ax25/ax25 EXPORT_SYMBOL +0x23af2922 ib_get_cached_subnet_prefix drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x589e12c6 hinic_cmdq_detail_resp drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x0b52aa49 iscsi_conn_bind drivers/scsi/libiscsi EXPORT_SYMBOL_GPL +0xee9e53bf l3mdev_master_ifindex_rcu vmlinux EXPORT_SYMBOL_GPL +0x336f6def xfrm_register_km vmlinux EXPORT_SYMBOL +0x810947e3 __generic_fsdax_supported vmlinux EXPORT_SYMBOL_GPL +0x26414388 device_create_with_groups vmlinux EXPORT_SYMBOL_GPL +0x7d8ec67f virtqueue_get_vring_size vmlinux EXPORT_SYMBOL_GPL +0xab781570 fb_get_options vmlinux EXPORT_SYMBOL +0xfb384d37 kasprintf vmlinux EXPORT_SYMBOL +0xf3b3b137 debugfs_create_automount vmlinux EXPORT_SYMBOL +0x7feed8cd simple_rename vmlinux EXPORT_SYMBOL +0x0333af60 vfs_getxattr vmlinux EXPORT_SYMBOL_GPL +0x5cb574d9 vfs_setxattr vmlinux EXPORT_SYMBOL_GPL +0xf714d989 trace_seq_path vmlinux EXPORT_SYMBOL_GPL +0x6d46bca0 sched_setattr vmlinux EXPORT_SYMBOL_GPL +0xa53767c8 kernel_param_unlock vmlinux EXPORT_SYMBOL +0x4e71e62f nci_core_init net/nfc/nci/nci EXPORT_SYMBOL +0x7f63ec98 snd_ctl_free_one sound/core/snd EXPORT_SYMBOL +0xde82714e iw_cm_reject drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x850bb6db devlink_health_reporter_destroy vmlinux EXPORT_SYMBOL_GPL +0xa2143e2a sock_init_data vmlinux EXPORT_SYMBOL +0xe347ecc7 ata_host_get vmlinux EXPORT_SYMBOL_GPL +0xc541a58b tty_set_termios vmlinux EXPORT_SYMBOL_GPL +0x130afd75 acpi_get_sleep_type_data vmlinux EXPORT_SYMBOL +0x330f6186 of_get_pci_domain_nr vmlinux EXPORT_SYMBOL_GPL +0x3a536bd7 ring_buffer_read_finish vmlinux EXPORT_SYMBOL_GPL +0xe02eb6d0 ring_buffer_commit_overrun_cpu vmlinux EXPORT_SYMBOL_GPL +0x72eb4ea9 base_old_false_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0xad0a855d unregister_candev drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x02855ada mlx4_vf_smi_enabled drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb6f2cd65 dlm_print_one_lock fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x189ce9f8 rtnl_register_module vmlinux EXPORT_SYMBOL_GPL +0x2afe765a put_cmsg_scm_timestamping64 vmlinux EXPORT_SYMBOL +0xf92f1c91 class_dev_iter_init vmlinux EXPORT_SYMBOL_GPL +0xbcc626a5 clk_register_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0x42e4deb1 pci_get_slot vmlinux EXPORT_SYMBOL +0x4973f7eb devm_gen_pool_create vmlinux EXPORT_SYMBOL +0xff87cd18 lockref_get_not_dead vmlinux EXPORT_SYMBOL +0x129a923f blk_queue_flag_set vmlinux EXPORT_SYMBOL +0xe3c8e136 perf_aux_output_skip vmlinux EXPORT_SYMBOL_GPL +0x06ffc516 task_cgroup_path vmlinux EXPORT_SYMBOL_GPL +0xe2936650 module_disable_ro vmlinux EXPORT_SYMBOL_GPL +0xefe4f679 ZSTD_CCtxWorkspaceBound lib/zstd/zstd_compress EXPORT_SYMBOL +0xe99ed704 inet_diag_msg_attrs_fill net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0xfd448c32 nf_ct_helper_expectfn_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x6a6244fc vhost_log_access_ok drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xf813ed77 ib_find_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x51d9feb4 ib_dealloc_pd_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x53140e94 vfio_register_iommu_driver drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x26eca71d mpt_resume drivers/message/fusion/mptbase EXPORT_SYMBOL +0xcc6decc9 km_query vmlinux EXPORT_SYMBOL +0x679e0b3a inet_release vmlinux EXPORT_SYMBOL +0x026aae74 usb_udc_vbus_handler vmlinux EXPORT_SYMBOL_GPL +0x62104126 phylink_ethtool_set_wol vmlinux EXPORT_SYMBOL_GPL +0x08213956 phylink_ethtool_get_wol vmlinux EXPORT_SYMBOL_GPL +0x7cd6ce32 ahci_platform_get_resources vmlinux EXPORT_SYMBOL_GPL +0x457d6223 jbd2_journal_force_commit vmlinux EXPORT_SYMBOL +0x1c844d9b xprt_alloc net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8e5ea150 rdma_addr_cancel drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa83588eb dm_rh_recovery_end drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xa8a5afa3 bch_btree_sort_partial drivers/md/bcache/bcache EXPORT_SYMBOL +0x68fd6e79 usb_stor_CB_transport vmlinux EXPORT_SYMBOL_GPL +0x51c35f8d phy_read_paged vmlinux EXPORT_SYMBOL +0xe9f594a4 drm_connector_init vmlinux EXPORT_SYMBOL +0x9fce80db fb_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x2276db98 kstrtoint vmlinux EXPORT_SYMBOL +0x7cd7b71d blk_mq_sched_try_insert_merge vmlinux EXPORT_SYMBOL_GPL +0x0408083d crypto_alloc_akcipher vmlinux EXPORT_SYMBOL_GPL +0xc8eb42ef crypto_alloc_skcipher vmlinux EXPORT_SYMBOL_GPL +0x8f67e93e get_tree_single vmlinux EXPORT_SYMBOL +0x01e1a8de kgdb_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x91dc2451 call_srcu vmlinux EXPORT_SYMBOL_GPL +0xc9b4a67b vringh_abandon_kern drivers/vhost/vringh EXPORT_SYMBOL +0x60a634c4 vfio_info_cap_add drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xdd805159 ioc_list drivers/message/fusion/mptbase EXPORT_SYMBOL +0x60443957 mdio45_probe drivers/net/mdio EXPORT_SYMBOL +0x1f93326b mlxsw_core_event_listener_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x8a240bff __xas_next vmlinux EXPORT_SYMBOL_GPL +0x08ea012f cpufreq_get_policy vmlinux EXPORT_SYMBOL +0xebd4cc11 mctrl_gpio_enable_ms vmlinux EXPORT_SYMBOL_GPL +0x89b3ade2 sha224_final vmlinux EXPORT_SYMBOL +0x38aa0278 dquot_set_dqblk vmlinux EXPORT_SYMBOL +0x5e12ecc1 __tracepoint_rpm_return_int vmlinux EXPORT_SYMBOL_GPL +0x8f6cee77 __round_jiffies_relative vmlinux EXPORT_SYMBOL_GPL +0x8f0748af rcu_expedite_gp vmlinux EXPORT_SYMBOL_GPL +0xe3008a1a svc_pool_stats_open net/sunrpc/sunrpc EXPORT_SYMBOL +0x36642dcb rdma_nl_unicast_wait drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1c2d157f mlx5_core_destroy_rqt drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0c24462b mlx5_core_destroy_dct drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x805e2339 inet_twsk_alloc vmlinux EXPORT_SYMBOL_GPL +0x4c910e6a __serio_register_driver vmlinux EXPORT_SYMBOL +0xcc76d80f dev_pm_qos_expose_latency_tolerance vmlinux EXPORT_SYMBOL_GPL +0xa2ad61b3 tty_put_char vmlinux EXPORT_SYMBOL_GPL +0x56953eeb regulator_lock vmlinux EXPORT_SYMBOL_GPL +0x09501b7f acpi_release_memory vmlinux EXPORT_SYMBOL_GPL +0x26c622ee percpu_ref_switch_to_percpu vmlinux EXPORT_SYMBOL_GPL +0x77e28955 crypto_register_ahashes vmlinux EXPORT_SYMBOL_GPL +0x037a0cba kfree vmlinux EXPORT_SYMBOL +0x5c724709 memory_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xa0712998 __rt_mutex_init vmlinux EXPORT_SYMBOL_GPL +0xd9d8fd16 register_restart_handler vmlinux EXPORT_SYMBOL +0xb9638db4 snd_pcm_rate_to_rate_bit sound/core/snd-pcm EXPORT_SYMBOL +0xa0c8df3d ib_modify_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8f2936aa target_show_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x1ea1a1e0 mlx5_lag_query_cong_counters drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb1d7acce vlan_dev_real_dev vmlinux EXPORT_SYMBOL +0x9f320723 xfrm_state_delete vmlinux EXPORT_SYMBOL +0x3ddd307a nf_register_sockopt vmlinux EXPORT_SYMBOL +0x7313e10e sock_kmalloc vmlinux EXPORT_SYMBOL +0x0b644e6e genphy_c45_read_mdix vmlinux EXPORT_SYMBOL_GPL +0x5e5d378b sas_target_alloc vmlinux EXPORT_SYMBOL_GPL +0x7cd7d6be __tracepoint_iscsi_dbg_conn vmlinux EXPORT_SYMBOL_GPL +0xdfcb6c90 mctrl_gpio_set vmlinux EXPORT_SYMBOL_GPL +0xe8fbf4fa __alloc_bucket_spinlocks vmlinux EXPORT_SYMBOL +0xfa600a92 housekeeping_test_cpu vmlinux EXPORT_SYMBOL_GPL +0x9af6b231 base_false_key lib/test_static_key_base EXPORT_SYMBOL_GPL +0x9fefa3cb ceph_calc_file_object_mapping net/ceph/libceph EXPORT_SYMBOL +0x893ea44e tipc_sk_fill_sock_diag net/tipc/tipc EXPORT_SYMBOL +0xb8364d94 l2tp_tunnel_free net/l2tp/l2tp_core EXPORT_SYMBOL +0xc7a9169d xprt_destroy_backchannel net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe31c6ca0 br_multicast_has_querier_anywhere net/bridge/bridge EXPORT_SYMBOL_GPL +0xf540e352 rdma_init_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x66011ab6 ubi_get_device_info drivers/mtd/ubi/ubi EXPORT_SYMBOL_GPL +0x262f3609 mlx5_buf_free drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf6484658 mlx4_buf_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x26553f41 nfs_free_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9a44dcda ip_tunnel_get_stats64 vmlinux EXPORT_SYMBOL_GPL +0x732fad4c dax_region_put vmlinux EXPORT_SYMBOL_GPL +0x107742a9 drm_get_subpixel_order_name vmlinux EXPORT_SYMBOL +0x0712e21d drm_edid_get_monitor_name vmlinux EXPORT_SYMBOL +0xf00f5d27 drm_gem_lock_reservations vmlinux EXPORT_SYMBOL +0x6c4b6684 reset_control_assert vmlinux EXPORT_SYMBOL_GPL +0xcae61d48 acpi_dma_controller_register vmlinux EXPORT_SYMBOL_GPL +0x9939eba0 backlight_unregister_notifier vmlinux EXPORT_SYMBOL +0x93da13df dw_pcie_find_ext_capability vmlinux EXPORT_SYMBOL_GPL +0x9749edb0 pci_lost_interrupt vmlinux EXPORT_SYMBOL +0xe7d4daac seq_list_next vmlinux EXPORT_SYMBOL +0x91d227a0 __irq_alloc_domain_generic_chips vmlinux EXPORT_SYMBOL_GPL +0xfd7adc4f nfs_alloc_fattr fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe02c9c92 __xa_erase vmlinux EXPORT_SYMBOL +0x2fe252cc unregister_inet6addr_notifier vmlinux EXPORT_SYMBOL +0xc72a7485 devm_hwmon_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x43aa1cff ttm_bo_evict_mm vmlinux EXPORT_SYMBOL +0x9b3fed98 drm_mode_put_tile_group vmlinux EXPORT_SYMBOL +0x00ca40d0 tpm_chip_register vmlinux EXPORT_SYMBOL_GPL +0x138f1890 virtqueue_get_buf_ctx vmlinux EXPORT_SYMBOL_GPL +0x2480d572 gpiochip_line_is_irq vmlinux EXPORT_SYMBOL_GPL +0x399ad043 __kfifo_dma_out_finish_r vmlinux EXPORT_SYMBOL +0x57c39727 kdb_register_flags vmlinux EXPORT_SYMBOL_GPL +0x4ceb2654 lowpan_unregister_netdevice net/6lowpan/6lowpan EXPORT_SYMBOL +0x45a4781e rds_addr_cmp net/rds/rds EXPORT_SYMBOL_GPL +0x4125d864 ip6_tnl_rcv net/ipv6/ip6_tunnel EXPORT_SYMBOL +0xa0ad69fc nf_tproxy_laddr6 net/ipv6/netfilter/nf_tproxy_ipv6 EXPORT_SYMBOL_GPL +0x78a42efe mlxsw_core_rx_listener_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x2aff6ae3 ipv6_chk_addr vmlinux EXPORT_SYMBOL +0x1942f306 neigh_xmit vmlinux EXPORT_SYMBOL +0xb73713d7 nvmem_add_cell_lookups vmlinux EXPORT_SYMBOL_GPL +0xa5c71b19 dev_pm_qos_flags vmlinux EXPORT_SYMBOL_GPL +0x583c088f __devm_alloc_percpu vmlinux EXPORT_SYMBOL_GPL +0x200dcd99 devm_kstrdup vmlinux EXPORT_SYMBOL_GPL +0xec506e18 drm_hdmi_avi_infoframe_colorspace vmlinux EXPORT_SYMBOL +0xddd42ab0 uart_suspend_port vmlinux EXPORT_SYMBOL +0x68d5d5c9 of_dma_xlate_by_chan_id vmlinux EXPORT_SYMBOL_GPL +0xab3697e4 irq_poll_init vmlinux EXPORT_SYMBOL +0x74980587 __blkdev_driver_ioctl vmlinux EXPORT_SYMBOL_GPL +0xfae8b7e6 blk_alloc_queue_node vmlinux EXPORT_SYMBOL +0xe5dc9245 ceph_parse_options net/ceph/libceph EXPORT_SYMBOL +0x9598d24d dccp_ackvec_parsed_cleanup net/dccp/dccp EXPORT_SYMBOL_GPL +0x4afedab1 xprtiod_workqueue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe2b5c22d nf_connlabels_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x80fb2286 mtd_read_fact_prot_reg drivers/mtd/mtd EXPORT_SYMBOL_GPL +0xf92b8a3d dm_rh_get_region_size drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x6ec668c8 rndis_set_param_dev drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x9f606a68 hnae3_register_client drivers/net/ethernet/hisilicon/hns3/hnae3 EXPORT_SYMBOL +0xd8603912 hid_field_extract vmlinux EXPORT_SYMBOL_GPL +0x5bad9606 scsi_device_resume vmlinux EXPORT_SYMBOL +0x708d0770 ata_bmdma_qc_issue vmlinux EXPORT_SYMBOL_GPL +0x6010d1e8 platform_get_irq_byname_optional vmlinux EXPORT_SYMBOL_GPL +0x932b6202 queue_iova vmlinux EXPORT_SYMBOL_GPL +0x2b585704 sbitmap_show vmlinux EXPORT_SYMBOL_GPL +0xdd8585d7 kernel_read_file_from_path vmlinux EXPORT_SYMBOL_GPL +0x3c2ac92f ib_dealloc_xrcd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x26626990 ipv6_find_tlv vmlinux EXPORT_SYMBOL_GPL +0x8c9648a9 neigh_changeaddr vmlinux EXPORT_SYMBOL +0xfea66ed4 devm_free_percpu vmlinux EXPORT_SYMBOL_GPL +0x6a2bdf53 do_unregister_con_driver vmlinux EXPORT_SYMBOL_GPL +0xa120d33c tty_unregister_ldisc vmlinux EXPORT_SYMBOL +0xee35a437 tty_standard_install vmlinux EXPORT_SYMBOL_GPL +0x67739059 amba_apb_device_add_res vmlinux EXPORT_SYMBOL_GPL +0xc17db87a gpiochip_populate_parent_fwspec_fourcell vmlinux EXPORT_SYMBOL_GPL +0x710c73b6 crypto_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0xe4526ab4 invalidate_inode_pages2 vmlinux EXPORT_SYMBOL_GPL +0x97e7f902 trace_vbprintk vmlinux EXPORT_SYMBOL_GPL +0x216de4e1 rcu_get_gp_kthreads_prio vmlinux EXPORT_SYMBOL_GPL +0x4bd50f56 udp_sock_create4 net/ipv4/udp_tunnel EXPORT_SYMBOL +0xdd0797bc ipt_do_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL +0x4b654829 _snd_ctl_add_slave sound/core/snd EXPORT_SYMBOL +0x4d7aeca4 hinic_net_port_mode drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x1089152f __hw_addr_ref_sync_dev vmlinux EXPORT_SYMBOL +0x8be55e2b generic_xdp_tx vmlinux EXPORT_SYMBOL_GPL +0x1263ef4f dma_resv_get_fences_rcu vmlinux EXPORT_SYMBOL_GPL +0xc3f6d72d pnp_possible_config vmlinux EXPORT_SYMBOL +0x79831b76 kmem_cache_destroy vmlinux EXPORT_SYMBOL +0x55b7faa0 add_to_page_cache_locked vmlinux EXPORT_SYMBOL +0x4e109192 ring_buffer_entries vmlinux EXPORT_SYMBOL_GPL +0x69fa6dc0 nft_set_gc_batch_alloc net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xe63a1539 nfs_request_remove_commit_list fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4f96a31d alloc_skb_for_msg vmlinux EXPORT_SYMBOL_GPL +0xc1db1070 drm_mode_object_find vmlinux EXPORT_SYMBOL +0xb0b05ff5 drm_clflush_sg vmlinux EXPORT_SYMBOL +0x14fb2365 cmdline_parts_set vmlinux EXPORT_SYMBOL +0x3aac3041 blkcg_root_css vmlinux EXPORT_SYMBOL_GPL +0x46e8ff6b proc_create_net_data vmlinux EXPORT_SYMBOL_GPL +0xc19cafc6 d_instantiate_anon vmlinux EXPORT_SYMBOL +0x121dae8f rpc_proc_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x70797159 svc_set_num_threads_sync net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf2a36612 pptp_msg_name net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL +0x01e500b4 nf_connlabels_replace net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x89783bda dm_array_cursor_end drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe68d70a9 __tracepoint_bcache_write drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x28c9cb6c hinic_get_fcoe_enable drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xf167cb7a fcoe_wwn_to_str drivers/scsi/fcoe/libfcoe EXPORT_SYMBOL_GPL +0xb556b743 nfs_fs_mount_common fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4630e74e nf_ct_get_tuple_skb vmlinux EXPORT_SYMBOL +0xcb2bfe2b nvmem_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x16931144 phy_mii_ioctl vmlinux EXPORT_SYMBOL +0x976bce9e drm_atomic_helper_plane_destroy_state vmlinux EXPORT_SYMBOL +0xdca28ee3 vring_new_virtqueue vmlinux EXPORT_SYMBOL_GPL +0xa8caa845 clk_bulk_put_all vmlinux EXPORT_SYMBOL +0xd2e2a9d0 hdmi_spd_infoframe_pack_only vmlinux EXPORT_SYMBOL +0xf7eeb433 gpiochip_irq_domain_activate vmlinux EXPORT_SYMBOL_GPL +0x18728552 sprint_OID vmlinux EXPORT_SYMBOL_GPL +0xfc201b66 sprint_oid vmlinux EXPORT_SYMBOL_GPL +0x2321d76c acomp_request_free vmlinux EXPORT_SYMBOL_GPL +0x0c46da66 vm_event_states vmlinux EXPORT_SYMBOL +0x9e66203f __module_get vmlinux EXPORT_SYMBOL +0xfd94814e complete_all vmlinux EXPORT_SYMBOL +0x3c1902d9 __release_region vmlinux EXPORT_SYMBOL +0x7838c5d1 nfc_hci_reset_pipes_per_host net/nfc/hci/hci EXPORT_SYMBOL +0xf7d46060 nfc_alloc_recv_skb net/nfc/nfc EXPORT_SYMBOL +0x6d4ff834 parport_del_port drivers/parport/parport EXPORT_SYMBOL +0xc322ca88 ccp_enqueue_cmd drivers/crypto/ccp/ccp EXPORT_SYMBOL_GPL +0x3910c0fa r5c_journal_mode_set drivers/md/raid456 EXPORT_SYMBOL +0xcaba0fc2 rndis_rm_hdr drivers/usb/gadget/function/usb_f_rndis EXPORT_SYMBOL_GPL +0x5ed040b0 pm_set_vt_switch vmlinux EXPORT_SYMBOL +0x3be74138 amba_device_alloc vmlinux EXPORT_SYMBOL_GPL +0x0a5fdcf7 acpi_dev_gpio_irq_get vmlinux EXPORT_SYMBOL_GPL +0x8a6f5b13 bdi_register vmlinux EXPORT_SYMBOL +0xcf54ea93 async_unregister_domain vmlinux EXPORT_SYMBOL_GPL +0xf40701ab ceph_object_locator_to_pg net/ceph/libceph EXPORT_SYMBOL +0x046b6472 xprt_alloc_slot net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x52e3e4a5 snd_pcm_hw_param_value sound/core/snd-pcm EXPORT_SYMBOL +0x810f59da rdma_destroy_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xce74511e rdma_destroy_qp drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xee1bf44f vfio_virqfd_disable drivers/vfio/vfio_virqfd EXPORT_SYMBOL_GPL +0x54808bb9 iscsit_add_cmd_to_immediate_queue drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x43a9b87e mlxsw_afa_block_terminate drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x3149fac0 hinic_support_iwarp drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x2083abae hinic_vector_to_irq drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0x4032cf28 hinic_vector_to_eqn drivers/net/ethernet/huawei/hinic/hinic EXPORT_SYMBOL +0xfe7ab934 nfs_pgheader_init fs/nfs/nfs EXPORT_SYMBOL_GPL +0x83c52fba xfrm4_protocol_init vmlinux EXPORT_SYMBOL +0x3e950dad __iptunnel_pull_header vmlinux EXPORT_SYMBOL_GPL +0xbfed91af inet_csk_clear_xmit_timers vmlinux EXPORT_SYMBOL +0x7c4defb9 reuseport_alloc vmlinux EXPORT_SYMBOL +0x2210cb04 drm_sched_start vmlinux EXPORT_SYMBOL +0x58fe5db2 drm_panel_get_modes vmlinux EXPORT_SYMBOL +0xbb0c591b __drm_atomic_helper_private_obj_duplicate_state vmlinux EXPORT_SYMBOL +0x718e4799 __device_reset vmlinux EXPORT_SYMBOL_GPL +0x897aa602 regulator_unregister_supply_alias vmlinux EXPORT_SYMBOL_GPL +0x7129e5f8 hex_asc vmlinux EXPORT_SYMBOL +0x131e626e fat_time_unix2fat vmlinux EXPORT_SYMBOL_GPL +0xd57fe544 d_tmpfile vmlinux EXPORT_SYMBOL +0x43a1cf7c nf_nat_follow_master net/netfilter/nf_nat EXPORT_SYMBOL +0x184621a4 ib_get_rdma_header_version drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2c68ced3 mlxsw_core_read_frc_h drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x44929d1f mlx5_query_hca_vport_node_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xae55cf1c srp_rport_add drivers/scsi/scsi_transport_srp EXPORT_SYMBOL_GPL +0x6ff1551d fc_remote_port_rolechg drivers/scsi/scsi_transport_fc EXPORT_SYMBOL +0x66243619 nvme_init_identify drivers/nvme/host/nvme-core EXPORT_SYMBOL_GPL +0xf550572b ocfs2_stack_glue_register fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x96b29254 strncasecmp vmlinux EXPORT_SYMBOL +0xf8f3a0fb ata_ratelimit vmlinux EXPORT_SYMBOL_GPL +0x2f5e5dc4 drm_modeset_lock_single_interruptible vmlinux EXPORT_SYMBOL +0xdf031257 tty_get_pgrp vmlinux EXPORT_SYMBOL_GPL +0xd5a7b561 of_clk_get_parent_count vmlinux EXPORT_SYMBOL_GPL +0x9fb1d0ed uuid_is_valid vmlinux EXPORT_SYMBOL +0x32f93d42 blkcg_dkstats_find vmlinux EXPORT_SYMBOL_GPL +0xd2db1920 unregister_asymmetric_key_parser vmlinux EXPORT_SYMBOL_GPL +0x24077555 fuse_dev_release vmlinux EXPORT_SYMBOL_GPL +0xbb024b82 default_llseek vmlinux EXPORT_SYMBOL +0xdeb32765 ring_buffer_oldest_event_ts vmlinux EXPORT_SYMBOL_GPL +0x125a6e20 p9_client_symlink net/9p/9pnet EXPORT_SYMBOL +0x8cee750e svc_bind net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9ea9922b cm_class drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x461f6ceb tcp_read_sock vmlinux EXPORT_SYMBOL +0xb1d2b465 of_find_net_device_by_node vmlinux EXPORT_SYMBOL +0x7162bd28 __flow_indr_block_cb_unregister vmlinux EXPORT_SYMBOL_GPL +0xf0979a46 usb_gadget_giveback_request vmlinux EXPORT_SYMBOL_GPL +0x6321da4b scsi_host_busy vmlinux EXPORT_SYMBOL +0x051d58e8 dma_fence_wait_any_timeout vmlinux EXPORT_SYMBOL +0x5b925657 virtqueue_get_desc_addr vmlinux EXPORT_SYMBOL_GPL +0x7dc84c57 pci_reset_pri vmlinux EXPORT_SYMBOL_GPL +0xb89b6e6b guid_parse vmlinux EXPORT_SYMBOL +0xa9e05660 io_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x8d18270b rpc_peeraddr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc4d16f04 nft_obj_lookup net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x0667188c rdma_link_register drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xde17c810 rdev_clear_badblocks drivers/md/md-mod EXPORT_SYMBOL_GPL +0x4fcc3f9e nfs_put_lock_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb70ec65f cpuidle_register_driver vmlinux EXPORT_SYMBOL_GPL +0x5196975b scsi_eh_prep_cmnd vmlinux EXPORT_SYMBOL +0x74de85ed to_drm_sched_fence vmlinux EXPORT_SYMBOL +0x3bc05133 drm_self_refresh_helper_cleanup vmlinux EXPORT_SYMBOL +0x459971ee tty_port_register_device_attr vmlinux EXPORT_SYMBOL_GPL +0xb8832fb7 devm_regulator_unregister_supply_alias vmlinux EXPORT_SYMBOL_GPL +0xfd9e5b32 pci_set_cacheline_size vmlinux EXPORT_SYMBOL_GPL +0x4045d6e7 copy_page_from_iter vmlinux EXPORT_SYMBOL +0x815b5dd4 match_octal vmlinux EXPORT_SYMBOL +0xf273e6cd cdev_device_del vmlinux EXPORT_SYMBOL +0x0ba9e42e cdev_device_add vmlinux EXPORT_SYMBOL +0x6081c558 bch_btree_keys_free drivers/md/bcache/bcache EXPORT_SYMBOL +0x94e14db7 xt_register_targets vmlinux EXPORT_SYMBOL +0xf7736d45 i2c_smbus_write_byte vmlinux EXPORT_SYMBOL +0x31544ebb scsi_register_driver vmlinux EXPORT_SYMBOL +0xacaa4c72 dma_fence_match_context vmlinux EXPORT_SYMBOL +0x3a9cedab unregister_virtio_driver vmlinux EXPORT_SYMBOL_GPL +0x148f46e3 hdmi_avi_infoframe_init vmlinux EXPORT_SYMBOL +0xb15b4109 crc32c vmlinux EXPORT_SYMBOL +0x54723aa1 iomap_dio_rw vmlinux EXPORT_SYMBOL_GPL +0xc45fb9ab vfs_fsync vmlinux EXPORT_SYMBOL +0x4e3e17fc poll_freewait vmlinux EXPORT_SYMBOL +0x40c7247c si_meminfo vmlinux EXPORT_SYMBOL +0x2135d6fc kthread_create_on_node vmlinux EXPORT_SYMBOL +0x5e2bb1a5 is_virtual vmlinux EXPORT_SYMBOL +0xbf77f558 snd_timer_pause sound/core/snd-timer EXPORT_SYMBOL +0xc4bc6f11 vhost_chr_poll drivers/vhost/vhost EXPORT_SYMBOL +0xc6283e9a iscsit_free_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x7890d535 dm_cache_policy_get_name drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x5b6c00e6 xor_blocks crypto/xor EXPORT_SYMBOL +0x04e381a3 af_alg_free_resources crypto/af_alg EXPORT_SYMBOL_GPL +0x10fa0ff2 crypto_finalize_akcipher_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0xd2237016 radix_tree_delete_item vmlinux EXPORT_SYMBOL +0xb1647fc2 devlink_info_version_running_put vmlinux EXPORT_SYMBOL_GPL +0xb193bd15 component_add vmlinux EXPORT_SYMBOL_GPL +0xa71da068 iommu_domain_set_attr vmlinux EXPORT_SYMBOL_GPL +0x1a2265ed iommu_domain_get_attr vmlinux EXPORT_SYMBOL_GPL +0x19734eed _copy_from_iter_full_nocache vmlinux EXPORT_SYMBOL +0x9ad08322 blkg_print_stat_bytes_recursive vmlinux EXPORT_SYMBOL_GPL +0x5e95b1cd current_umask vmlinux EXPORT_SYMBOL +0xe345a9fe get_fs_type vmlinux EXPORT_SYMBOL +0xbffbc928 ref_module vmlinux EXPORT_SYMBOL_GPL +0x40d59096 unregister_restart_handler vmlinux EXPORT_SYMBOL +0x3e42d080 kvm_unmap_gfn vmlinux EXPORT_SYMBOL_GPL +0x3a5643f7 nf_nat_pptp_hook_outbound net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0xdda2126e free_candev drivers/net/can/dev/can-dev EXPORT_SYMBOL_GPL +0x3f0010f8 mlx4_alloc_hwq_res drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x890b2c07 usb_hub_claim_port vmlinux EXPORT_SYMBOL_GPL +0x29f078d1 drm_mode_legacy_fb_format vmlinux EXPORT_SYMBOL +0x3eac676f tty_driver_kref_put vmlinux EXPORT_SYMBOL +0x514fefa4 regulator_bulk_free vmlinux EXPORT_SYMBOL_GPL +0x6c389761 acpi_bus_get_private_data vmlinux EXPORT_SYMBOL_GPL +0x80d5e57a mpi_free vmlinux EXPORT_SYMBOL_GPL +0x92b52aaf dquot_acquire vmlinux EXPORT_SYMBOL +0x3c3fce39 __local_bh_enable_ip vmlinux EXPORT_SYMBOL +0x06f1c4ef mpt_send_handshake_request drivers/message/fusion/mptbase EXPORT_SYMBOL +0x24b0fc81 fc_exch_recv drivers/scsi/libfc/libfc EXPORT_SYMBOL +0xc803a301 xfrm_local_error vmlinux EXPORT_SYMBOL_GPL +0x8d84c693 sock_prot_inuse_get vmlinux EXPORT_SYMBOL_GPL +0x55947a6e usb_wakeup_notification vmlinux EXPORT_SYMBOL_GPL +0x66cd738d phy_support_asym_pause vmlinux EXPORT_SYMBOL +0x5805c088 drm_atomic_get_private_obj_state vmlinux EXPORT_SYMBOL +0x9dc8c778 drm_dp_mst_topology_state_funcs vmlinux EXPORT_SYMBOL +0x91db7b09 gpiod_put vmlinux EXPORT_SYMBOL_GPL +0x998d79d6 x509_decode_time vmlinux EXPORT_SYMBOL_GPL +0x40611138 generic_write_end vmlinux EXPORT_SYMBOL +0x30be6731 seq_file_path vmlinux EXPORT_SYMBOL +0x67955ce6 profile_hits vmlinux EXPORT_SYMBOL_GPL +0x442169ef devm_request_threaded_irq vmlinux EXPORT_SYMBOL +0x56fd5051 flow_offload_free net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x13d12a18 nat_t120_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0xcd083b10 unregister_sound_dsp sound/soundcore EXPORT_SYMBOL +0x6e1e3821 dm_array_walk drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x4d9718c5 udp6_seq_ops vmlinux EXPORT_SYMBOL +0x78e2fe95 mini_qdisc_pair_init vmlinux EXPORT_SYMBOL +0x4d0f07c3 bpf_prog_create vmlinux EXPORT_SYMBOL_GPL +0x2a4cf402 property_entries_free vmlinux EXPORT_SYMBOL_GPL +0x08b21ccc gpio_to_desc vmlinux EXPORT_SYMBOL_GPL +0x5b5007bd security_inode_copy_up vmlinux EXPORT_SYMBOL +0x7a4497db kzfree vmlinux EXPORT_SYMBOL +0x352ec68b bpf_offload_dev_destroy vmlinux EXPORT_SYMBOL_GPL +0x4f4d78c5 LZ4_compress_default lib/lz4/lz4_compress EXPORT_SYMBOL +0x99cef094 nci_core_reset net/nfc/nci/nci EXPORT_SYMBOL +0x45082580 ceph_cls_set_cookie net/ceph/libceph EXPORT_SYMBOL +0x3eae62c1 inet_sk_diag_fill net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x5a32a44f arpt_unregister_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0x590e0202 rdma_link_unregister drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xdffaf441 gether_setup_name_default drivers/usb/gadget/function/u_ether EXPORT_SYMBOL_GPL +0x72728a6e tcp_ca_openreq_child vmlinux EXPORT_SYMBOL_GPL +0xc5894550 bpf_prog_destroy vmlinux EXPORT_SYMBOL_GPL +0x4048a787 devm_remove_action vmlinux EXPORT_SYMBOL_GPL +0x1e5b56dd devm_clk_put vmlinux EXPORT_SYMBOL +0x21be37e1 hdmi_avi_infoframe_check vmlinux EXPORT_SYMBOL +0x889c0f8c pci_enable_atomic_ops_to_root vmlinux EXPORT_SYMBOL +0xa84ce9e0 crypto_aes_inv_sbox vmlinux EXPORT_SYMBOL +0xfa6fde5e bio_copy_data_iter vmlinux EXPORT_SYMBOL +0x7b78a43b inode_insert5 vmlinux EXPORT_SYMBOL +0x9684dfb9 deactivate_super vmlinux EXPORT_SYMBOL +0x2838e39f vmap vmlinux EXPORT_SYMBOL +0x42c5e6b6 ww_mutex_lock_interruptible vmlinux EXPORT_SYMBOL diff --git a/package/arm/config.default b/package/arm/config.default deleted file mode 100644 index b8107d3471eb..000000000000 --- a/package/arm/config.default +++ /dev/null @@ -1,6068 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.4.32-1 Kernel Configuration -# - -# -# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) -# -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=80301 -CONFIG_CLANG_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_HAS_ASM_GOTO=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="-tlinux4" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -CONFIG_USELIB=y -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_GENERIC_IRQ_CHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_IRQ_MSI_IOMMU=y -CONFIG_HANDLE_DOMAIN_IRQ=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -# end of IRQ subsystem - -CONFIG_GENERIC_IRQ_MULTI_HANDLER=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -# end of Timers subsystem - -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -# CONFIG_PSI is not set -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -# end of RCU Subsystem - -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=18 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -CONFIG_GENERIC_SCHED_CLOCK=y - -# -# Scheduler features -# -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_CGROUP_PIDS=y -# CONFIG_CGROUP_RDMA is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -# CONFIG_CGROUP_BPF is not set -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_CHECKPOINT_RESTORE=y -# CONFIG_SCHED_AUTOGROUP is not set -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_BPF=y -CONFIG_EXPERT=y -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_PRINTK_NMI=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_BPF_SYSCALL=y -# CONFIG_BPF_JIT_ALWAYS_ON is not set -CONFIG_USERFAULTFD=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_RSEQ=y -# CONFIG_DEBUG_RSEQ is not set -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -# end of Kernel Performance Events And Counters - -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_MEMCG_SYSFS_ON is not set -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_SLOB is not set -CONFIG_SLAB_MERGE_DEFAULT=y -# CONFIG_SLAB_FREELIST_RANDOM is not set -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set -CONFIG_SLUB_CPU_PARTIAL=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -# end of General setup - -CONFIG_ARM64=y -CONFIG_64BIT=y -CONFIG_MMU=y -CONFIG_ARM64_PAGE_SHIFT=16 -CONFIG_ARM64_CONT_SHIFT=5 -CONFIG_ARCH_MMAP_RND_BITS_MIN=14 -CONFIG_ARCH_MMAP_RND_BITS_MAX=29 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=7 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA32=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_SMP=y -CONFIG_KERNEL_MODE_NEON=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=3 -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_PROC_KCORE_TEXT=y - -# -# Platform selection -# -# CONFIG_ARCH_ACTIONS is not set -# CONFIG_ARCH_AGILEX is not set -# CONFIG_ARCH_SUNXI is not set -# CONFIG_ARCH_ALPINE is not set -# CONFIG_ARCH_BCM2835 is not set -# CONFIG_ARCH_BCM_IPROC is not set -# CONFIG_ARCH_BERLIN is not set -# CONFIG_ARCH_BITMAIN is not set -# CONFIG_ARCH_BRCMSTB is not set -# CONFIG_ARCH_EXYNOS is not set -# CONFIG_ARCH_K3 is not set -# CONFIG_ARCH_LAYERSCAPE is not set -# CONFIG_ARCH_LG1K is not set -CONFIG_ARCH_HISI=y -# CONFIG_ARCH_MEDIATEK is not set -# CONFIG_ARCH_MESON is not set -# CONFIG_ARCH_MVEBU is not set -# CONFIG_ARCH_MXC is not set -# CONFIG_ARCH_QCOM is not set -# CONFIG_ARCH_REALTEK is not set -# CONFIG_ARCH_RENESAS is not set -# CONFIG_ARCH_ROCKCHIP is not set -# CONFIG_ARCH_SEATTLE is not set -# CONFIG_ARCH_STRATIX10 is not set -# CONFIG_ARCH_SYNQUACER is not set -# CONFIG_ARCH_TEGRA is not set -# CONFIG_ARCH_SPRD is not set -# CONFIG_ARCH_THUNDER is not set -# CONFIG_ARCH_THUNDER2 is not set -# CONFIG_ARCH_UNIPHIER is not set -# CONFIG_ARCH_VEXPRESS is not set -# CONFIG_ARCH_XGENE is not set -# CONFIG_ARCH_ZX is not set -# CONFIG_ARCH_ZYNQMP is not set -# end of Platform selection - -CONFIG_KPATCH=m - -# -# Kernel Features -# - -# -# ARM errata workarounds via the alternatives framework -# -CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_834220=y -CONFIG_ARM64_ERRATUM_845719=y -# CONFIG_ARM64_ERRATUM_843419 is not set -CONFIG_ARM64_ERRATUM_1024718=y -CONFIG_ARM64_ERRATUM_1418040=y -CONFIG_ARM64_ERRATUM_1165522=y -CONFIG_ARM64_ERRATUM_1286807=y -CONFIG_ARM64_ERRATUM_1463225=y -CONFIG_CAVIUM_ERRATUM_22375=y -CONFIG_CAVIUM_ERRATUM_23144=y -CONFIG_CAVIUM_ERRATUM_23154=y -CONFIG_CAVIUM_ERRATUM_27456=y -CONFIG_CAVIUM_ERRATUM_30115=y -CONFIG_CAVIUM_TX2_ERRATUM_219=y -CONFIG_QCOM_FALKOR_ERRATUM_1003=y -CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y -CONFIG_QCOM_FALKOR_ERRATUM_1009=y -CONFIG_QCOM_QDF2400_ERRATUM_0065=y -CONFIG_SOCIONEXT_SYNQUACER_PREITS=y -CONFIG_HISILICON_ERRATUM_161600802=y -CONFIG_QCOM_FALKOR_ERRATUM_E1041=y -CONFIG_FUJITSU_ERRATUM_010001=y -# end of ARM errata workarounds via the alternatives framework - -# CONFIG_ARM64_4K_PAGES is not set -# CONFIG_ARM64_16K_PAGES is not set -CONFIG_ARM64_64K_PAGES=y -# CONFIG_ARM64_VA_BITS_42 is not set -CONFIG_ARM64_VA_BITS_48=y -# CONFIG_ARM64_VA_BITS_52 is not set -CONFIG_ARM64_VA_BITS=48 -CONFIG_ARM64_PA_BITS_48=y -# CONFIG_ARM64_PA_BITS_52 is not set -CONFIG_ARM64_PA_BITS=48 -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_SCHED_MC=y -# CONFIG_SCHED_SMT is not set -CONFIG_NR_CPUS=256 -CONFIG_HOTPLUG_CPU=y -CONFIG_NUMA=y -CONFIG_NODES_SHIFT=4 -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_HOLES_IN_ZONE=y -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HW_PERF_EVENTS=y -CONFIG_SYS_SUPPORTS_HUGETLBFS=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_SECCOMP=y -# CONFIG_PARAVIRT is not set -# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set -CONFIG_KEXEC=y -# CONFIG_KEXEC_FILE is not set -CONFIG_CRASH_DUMP=y -# CONFIG_XEN is not set -CONFIG_FORCE_MAX_ZONEORDER=14 -CONFIG_UNMAP_KERNEL_AT_EL0=y -CONFIG_HARDEN_BRANCH_PREDICTOR=y -CONFIG_HARDEN_EL2_VECTORS=y -CONFIG_ARM64_SSBD=y -CONFIG_RODATA_FULL_DEFAULT_ENABLED=y -# CONFIG_ARM64_SW_TTBR0_PAN is not set -CONFIG_ARM64_TAGGED_ADDR_ABI=y -CONFIG_COMPAT=y -CONFIG_KUSER_HELPERS=y -# CONFIG_ARMV8_DEPRECATED is not set - -# -# ARMv8.1 architectural features -# -# CONFIG_ARM64_HW_AFDBM is not set -# CONFIG_ARM64_PAN is not set -# CONFIG_ARM64_VHE is not set -# end of ARMv8.1 architectural features - -# -# ARMv8.2 architectural features -# -CONFIG_ARM64_UAO=y -# CONFIG_ARM64_PMEM is not set -CONFIG_ARM64_RAS_EXTN=y -CONFIG_ARM64_CNP=y -# end of ARMv8.2 architectural features - -# -# ARMv8.3 architectural features -# -# end of ARMv8.3 architectural features - -CONFIG_ARM64_MODULE_PLTS=y -# CONFIG_ARM64_PSEUDO_NMI is not set -CONFIG_RELOCATABLE=y -# CONFIG_RANDOMIZE_BASE is not set -# end of Kernel Features - -# -# Boot options -# -# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set -CONFIG_CMDLINE="" -# CONFIG_CMDLINE_FORCE is not set -CONFIG_EFI_STUB=y -CONFIG_EFI=y -CONFIG_DMI=y -# end of Boot options - -CONFIG_SYSVIPC_COMPAT=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -# CONFIG_SUSPEND_SKIP_SYNC is not set -# CONFIG_HIBERNATION is not set -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=100 -CONFIG_PM_WAKELOCKS_GC=y -CONFIG_PM=y -CONFIG_PM_DEBUG=y -CONFIG_PM_ADVANCED_DEBUG=y -# CONFIG_PM_TEST_SUSPEND is not set -CONFIG_PM_SLEEP_DEBUG=y -# CONFIG_DPM_WATCHDOG is not set -CONFIG_PM_CLK=y -CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y -CONFIG_CPU_PM=y -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -# end of Power management options - -# -# CPU Power Management -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set - -# -# ARM CPU Idle Drivers -# -# CONFIG_ARM_CPUIDLE is not set -# CONFIG_ARM_PSCI_CPUIDLE is not set -# end of ARM CPU Idle Drivers -# end of CPU Idle - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set - -# -# CPU frequency scaling drivers -# -# CONFIG_CPUFREQ_DT is not set -# CONFIG_QORIQ_CPUFREQ is not set -# end of CPU Frequency scaling -# end of CPU Power Management - -# -# Firmware Drivers -# -# CONFIG_ARM_SDE_INTERFACE is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=m -# CONFIG_ISCSI_IBFT is not set -# CONFIG_FW_CFG_SYSFS is not set -CONFIG_HAVE_ARM_SMCCC=y -CONFIG_ARM_PSCI_FW=y -# CONFIG_ARM_PSCI_CHECKER is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=y -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=m -# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_ARMSTUB=y -CONFIG_EFI_ARMSTUB_DTB_LOADER=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -# end of EFI (Extensible Firmware Interface) Support - -CONFIG_EFI_EARLYCON=y - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_GENERIC_GSI=y -CONFIG_ACPI_CCA_REQUIRED=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -# CONFIG_ACPI_FAN is not set -# CONFIG_ACPI_TAD is not set -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_MCFG=y -# CONFIG_ACPI_PROCESSOR is not set -# CONFIG_ACPI_IPMI is not set -CONFIG_ACPI_NUMA=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -# CONFIG_ACPI_PCI_SLOT is not set -# CONFIG_ACPI_CONTAINER is not set -# CONFIG_ACPI_HED is not set -# CONFIG_ACPI_CUSTOM_METHOD is not set -# CONFIG_ACPI_BGRT is not set -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y -# CONFIG_ACPI_HMAT is not set -CONFIG_HAVE_ACPI_APEI=y -# CONFIG_ACPI_APEI is not set -# CONFIG_PMIC_OPREGION is not set -# CONFIG_ACPI_CONFIGFS is not set -CONFIG_ACPI_IORT=y -CONFIG_ACPI_GTDT=y -CONFIG_ACPI_PPTT=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y -CONFIG_IRQ_BYPASS_MANAGER=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -CONFIG_KVM_ARM_HOST=y -CONFIG_KVM_ARM_PMU=y -CONFIG_KVM_INDIRECT_VECTORS=y -CONFIG_VHOST_NET=m -# CONFIG_VHOST_SCSI is not set -# CONFIG_VHOST_VSOCK is not set -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=m -# CONFIG_CRYPTO_SHA512_ARM64 is not set -CONFIG_CRYPTO_SHA1_ARM64_CE=m -CONFIG_CRYPTO_SHA2_ARM64_CE=m -# CONFIG_CRYPTO_SHA512_ARM64_CE is not set -# CONFIG_CRYPTO_SHA3_ARM64 is not set -# CONFIG_CRYPTO_SM3_ARM64_CE is not set -# CONFIG_CRYPTO_SM4_ARM64_CE is not set -CONFIG_CRYPTO_GHASH_ARM64_CE=m -# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set -CONFIG_CRYPTO_AES_ARM64=m -CONFIG_CRYPTO_AES_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64_CE_CCM=m -CONFIG_CRYPTO_AES_ARM64_CE_BLK=m -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m -# CONFIG_CRYPTO_CHACHA20_NEON is not set -# CONFIG_CRYPTO_NHPOLY1305_NEON is not set -# CONFIG_CRYPTO_AES_ARM64_BS is not set - -# -# General architecture-dependent options -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_KPROBES=y -# CONFIG_JUMP_LABEL is not set -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_KRETPROBES=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_KEEPINITRD=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_ARCH_STACKLEAK=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 -CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y -CONFIG_HAVE_COPY_THREAD_TLS=y -CONFIG_CLONE_BACKWARDS=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_64BIT_TIME=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_REFCOUNT_FULL=y -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_ARCH_HAS_RELR=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling - -CONFIG_PLUGIN_HOSTCC="" -CONFIG_HAVE_GCC_PLUGINS=y -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_MODULE_SIG is not set -# CONFIG_MODULE_COMPRESS is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -CONFIG_UNUSED_SYMBOLS=y -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_ZONED is not set -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -CONFIG_BLK_CMDLINE_PARSER=y -# CONFIG_BLK_WBT is not set -# CONFIG_BLK_CGROUP_IOLATENCY is not set -# CONFIG_BLK_CGROUP_IOCOST is not set -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -CONFIG_AIX_PARTITION=y -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y -CONFIG_CMDLINE_PARTITION=y -# end of Partition Types - -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y -CONFIG_BLK_PM=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -# CONFIG_IOSCHED_BFQ is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK=y -CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_READ_LOCK=y -CONFIG_ARCH_INLINE_READ_LOCK_BH=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_READ_UNLOCK=y -CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_WRITE_LOCK=y -CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_SPIN_TRYLOCK=y -CONFIG_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_INLINE_SPIN_LOCK=y -CONFIG_INLINE_SPIN_LOCK_BH=y -CONFIG_INLINE_SPIN_LOCK_IRQ=y -CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_INLINE_SPIN_UNLOCK_BH=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_READ_LOCK=y -CONFIG_INLINE_READ_LOCK_BH=y -CONFIG_INLINE_READ_LOCK_IRQ=y -CONFIG_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_BH=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_WRITE_LOCK=y -CONFIG_INLINE_WRITE_LOCK_BH=y -CONFIG_INLINE_WRITE_LOCK_IRQ=y -CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_BH=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_ARCH_KEEP_MEMBLOCK=y -# CONFIG_MEMORY_HOTPLUG is not set -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -# CONFIG_MEMORY_FAILURE is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -# CONFIG_CLEANCACHE is not set -# CONFIG_FRONTSWAP is not set -# CONFIG_CMA is not set -CONFIG_ZPOOL=m -CONFIG_ZBUD=m -# CONFIG_Z3FOLD is not set -CONFIG_ZSMALLOC=m -# CONFIG_PGTABLE_MAPPING is not set -CONFIG_ZSMALLOC_STAT=y -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_PTE_DEVMAP=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_BENCHMARK is not set -# CONFIG_READ_ONLY_THP_FOR_FS is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=y -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_UNIX_DIAG=m -# CONFIG_TLS is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=m -CONFIG_XFRM_USER=m -# CONFIG_XFRM_INTERFACE is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -# CONFIG_NET_KEY_MIGRATE is not set -# CONFIG_SMC is not set -# CONFIG_XDP_SOCKETS is not set -CONFIG_TOA=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -CONFIG_NET_FOU=m -CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_INET_ESP_OFFLOAD is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -# CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -CONFIG_TCP_CONG_CDG=m -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -# CONFIG_INET6_ESP_OFFLOAD is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -# CONFIG_IPV6_VTI is not set -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_FOU=m -CONFIG_IPV6_FOU_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y -# CONFIG_IPV6_SUBTREES is not set -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -CONFIG_NETLABEL=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_COMMON=m -# CONFIG_NF_LOG_NETDEV is not set -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -# CONFIG_NF_CONNTRACK_TIMEOUT is not set -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -# CONFIG_NF_CT_PROTO_DCCP is not set -CONFIG_NF_CT_PROTO_GRE=y -# CONFIG_NF_CT_PROTO_SCTP is not set -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_SET=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_COUNTER=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_TUNNEL=m -CONFIG_NFT_OBJREF=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=m -CONFIG_NFT_OSF=m -CONFIG_NFT_TPROXY=m -CONFIG_NFT_SYNPROXY=m -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -# CONFIG_NETFILTER_XT_MATCH_DCCP is not set -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -# CONFIG_NETFILTER_XT_MATCH_SCTP is not set -# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -# CONFIG_IP_SET_HASH_IPMARK is not set -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -# CONFIG_IP_SET_HASH_IPMAC is not set -# CONFIG_IP_SET_HASH_MAC is not set -# CONFIG_IP_SET_HASH_NETPORTNET is not set -CONFIG_IP_SET_HASH_NET=m -# CONFIG_IP_SET_HASH_NETNET is not set -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -# CONFIG_IP_VS_PROTO_SCTP is not set - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -# CONFIG_IP_VS_MH is not set -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_FLOW_TABLE_IPV4=m -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -# CONFIG_IP_NF_TARGET_SYNPROXY is not set -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -# CONFIG_IP_NF_TARGET_NETMAP is not set -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -# CONFIG_IP6_NF_MATCH_SRH is not set -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -# CONFIG_IP6_NF_TARGET_SYNPROXY is not set -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -# CONFIG_IP6_NF_NAT is not set -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m - -# -# DECnet: Netfilter Configuration -# -CONFIG_DECNET_NF_GRABULATOR=m -# end of DECnet: Netfilter Configuration - -CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NFT_BRIDGE_META=m -CONFIG_NFT_BRIDGE_REJECT=m -CONFIG_NF_LOG_BRIDGE=m -# CONFIG_NF_CONNTRACK_BRIDGE is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_BPFILTER is not set -CONFIG_IP_DCCP=m -CONFIG_INET_DCCP_DIAG=m - -# -# DCCP CCIDs Configuration -# -# CONFIG_IP_DCCP_CCID2_DEBUG is not set -# CONFIG_IP_DCCP_CCID3 is not set -# end of DCCP CCIDs Configuration - -# -# DCCP Kernel Hacking -# -# CONFIG_IP_DCCP_DEBUG is not set -# end of DCCP Kernel Hacking - -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_OBJCNT is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -CONFIG_RDS=m -CONFIG_RDS_RDMA=m -CONFIG_RDS_TCP=m -# CONFIG_RDS_DEBUG is not set -CONFIG_TIPC=m -CONFIG_TIPC_MEDIA_IB=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_DIAG=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -CONFIG_ATM_MPOA=m -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -CONFIG_DECNET=m -# CONFIG_DECNET_ROUTER is not set -CONFIG_LLC=m -CONFIG_LLC2=m -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=m -CONFIG_IPDDP=m -# CONFIG_IPDDP_ENCAP is not set -CONFIG_X25=m -CONFIG_LAPB=m -CONFIG_PHONET=m -CONFIG_6LOWPAN=m -CONFIG_6LOWPAN_DEBUGFS=y -CONFIG_6LOWPAN_NHC=m -CONFIG_6LOWPAN_NHC_DEST=m -CONFIG_6LOWPAN_NHC_FRAGMENT=m -CONFIG_6LOWPAN_NHC_HOP=m -CONFIG_6LOWPAN_NHC_IPV6=m -CONFIG_6LOWPAN_NHC_MOBILITY=m -CONFIG_6LOWPAN_NHC_ROUTING=m -CONFIG_6LOWPAN_NHC_UDP=m -CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m -CONFIG_6LOWPAN_GHC_UDP=m -CONFIG_6LOWPAN_GHC_ICMPV6=m -CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m -CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m -CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m -CONFIG_IEEE802154=m -CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y -CONFIG_IEEE802154_SOCKET=m -CONFIG_IEEE802154_6LOWPAN=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -# CONFIG_NET_SCH_CBS is not set -# CONFIG_NET_SCH_ETF is not set -# CONFIG_NET_SCH_TAPRIO is not set -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -# CONFIG_NET_SCH_SKBPRIO is not set -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -# CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -# CONFIG_CLS_U32_PERF is not set -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=m -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -# CONFIG_NET_CLS_MATCHALL is not set -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_CANID=m -CONFIG_NET_EMATCH_IPSET=m -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -# CONFIG_NET_ACT_SAMPLE is not set -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -# CONFIG_NET_ACT_MPLS is not set -CONFIG_NET_ACT_VLAN=m -CONFIG_NET_ACT_BPF=m -CONFIG_NET_ACT_CONNMARK=m -# CONFIG_NET_ACT_CTINFO is not set -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -# CONFIG_NET_ACT_TUNNEL_KEY is not set -# CONFIG_NET_ACT_CT is not set -# CONFIG_NET_TC_SKB_EXT is not set -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=y -CONFIG_BATMAN_ADV=m -# CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_BLA=y -CONFIG_BATMAN_ADV_DAT=y -CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y -CONFIG_BATMAN_ADV_DEBUGFS=y -# CONFIG_BATMAN_ADV_DEBUG is not set -CONFIG_BATMAN_ADV_SYSFS=y -# CONFIG_BATMAN_ADV_TRACING is not set -# CONFIG_OPENVSWITCH is not set -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -# CONFIG_VIRTIO_VSOCKETS is not set -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=m -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -# CONFIG_NET_NSH is not set -CONFIG_HSR=m -# CONFIG_NET_SWITCHDEV is not set -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_NET_NCSI is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_JIT=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -# CONFIG_NET_DROP_MONITOR is not set -# end of Network testing -# end of Networking options - -CONFIG_HAMRADIO=y - -# -# Packet Radio protocols -# -CONFIG_AX25=m -CONFIG_AX25_DAMA_SLAVE=y -CONFIG_NETROM=m -CONFIG_ROSE=m - -# -# AX.25 network device drivers -# -CONFIG_MKISS=m -CONFIG_6PACK=m -CONFIG_BPQETHER=m -CONFIG_BAYCOM_SER_FDX=m -CONFIG_BAYCOM_SER_HDX=m -# CONFIG_BAYCOM_PAR is not set -CONFIG_YAM=m -# end of AX.25 network device drivers - -CONFIG_CAN=m -CONFIG_CAN_RAW=m -CONFIG_CAN_BCM=m -CONFIG_CAN_GW=m -# CONFIG_CAN_J1939 is not set - -# -# CAN Device Drivers -# -CONFIG_CAN_VCAN=m -# CONFIG_CAN_VXCAN is not set -CONFIG_CAN_SLCAN=m -CONFIG_CAN_DEV=m -CONFIG_CAN_CALC_BITTIMING=y -# CONFIG_CAN_FLEXCAN is not set -CONFIG_CAN_GRCAN=m -# CONFIG_CAN_KVASER_PCIEFD is not set -CONFIG_CAN_XILINXCAN=m -CONFIG_CAN_C_CAN=m -CONFIG_CAN_C_CAN_PLATFORM=m -CONFIG_CAN_C_CAN_PCI=m -CONFIG_CAN_CC770=m -CONFIG_CAN_CC770_ISA=m -CONFIG_CAN_CC770_PLATFORM=m -# CONFIG_CAN_IFI_CANFD is not set -CONFIG_CAN_M_CAN=m -# CONFIG_CAN_M_CAN_PLATFORM is not set -# CONFIG_CAN_PEAK_PCIEFD is not set -CONFIG_CAN_SJA1000=m -CONFIG_CAN_EMS_PCI=m -# CONFIG_CAN_F81601 is not set -CONFIG_CAN_KVASER_PCI=m -CONFIG_CAN_PEAK_PCI=m -CONFIG_CAN_PEAK_PCIEC=y -CONFIG_CAN_PLX_PCI=m -CONFIG_CAN_SJA1000_ISA=m -CONFIG_CAN_SJA1000_PLATFORM=m -CONFIG_CAN_SOFTING=m - -# -# CAN SPI interfaces -# -# CONFIG_CAN_HI311X is not set -CONFIG_CAN_MCP251X=m -# end of CAN SPI interfaces - -# -# CAN USB interfaces -# -CONFIG_CAN_8DEV_USB=m -CONFIG_CAN_EMS_USB=m -CONFIG_CAN_ESD_USB2=m -CONFIG_CAN_GS_USB=m -CONFIG_CAN_KVASER_USB=m -# CONFIG_CAN_MCBA_USB is not set -CONFIG_CAN_PEAK_USB=m -# CONFIG_CAN_UCAN is not set -# end of CAN USB interfaces - -# CONFIG_CAN_DEBUG_DEVICES is not set -# end of CAN Device Drivers - -# CONFIG_BT is not set -CONFIG_AF_RXRPC=m -CONFIG_AF_RXRPC_IPV6=y -# CONFIG_AF_RXRPC_INJECT_LOSS is not set -# CONFIG_AF_RXRPC_DEBUG is not set -# CONFIG_RXKAD is not set -# CONFIG_AF_KCM is not set -CONFIG_FIB_RULES=y -# CONFIG_WIRELESS is not set -CONFIG_WIMAX=y -CONFIG_WIMAX_DEBUG_LEVEL=8 -CONFIG_RFKILL=y -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -CONFIG_RFKILL_GPIO=m -CONFIG_NET_9P=m -CONFIG_NET_9P_VIRTIO=m -CONFIG_NET_9P_RDMA=m -# CONFIG_NET_9P_DEBUG is not set -CONFIG_CAIF=m -# CONFIG_CAIF_DEBUG is not set -CONFIG_CAIF_NETDEV=m -CONFIG_CAIF_USB=m -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -CONFIG_NFC=m -CONFIG_NFC_DIGITAL=m -CONFIG_NFC_NCI=m -CONFIG_NFC_NCI_SPI=m -CONFIG_NFC_NCI_UART=m -CONFIG_NFC_HCI=m -CONFIG_NFC_SHDLC=y - -# -# Near Field Communication (NFC) devices -# -CONFIG_NFC_TRF7970A=m -CONFIG_NFC_SIM=m -CONFIG_NFC_PORT100=m -CONFIG_NFC_FDP=m -CONFIG_NFC_FDP_I2C=m -CONFIG_NFC_PN544=m -CONFIG_NFC_PN544_I2C=m -# CONFIG_NFC_PN533_USB is not set -# CONFIG_NFC_PN533_I2C is not set -CONFIG_NFC_MICROREAD=m -CONFIG_NFC_MICROREAD_I2C=m -CONFIG_NFC_MRVL=m -CONFIG_NFC_MRVL_USB=m -CONFIG_NFC_MRVL_UART=m -CONFIG_NFC_MRVL_I2C=m -CONFIG_NFC_MRVL_SPI=m -CONFIG_NFC_ST21NFCA=m -CONFIG_NFC_ST21NFCA_I2C=m -CONFIG_NFC_ST_NCI=m -CONFIG_NFC_ST_NCI_I2C=m -CONFIG_NFC_ST_NCI_SPI=m -CONFIG_NFC_NXP_NCI=m -CONFIG_NFC_NXP_NCI_I2C=m -CONFIG_NFC_S3FWRN5=m -CONFIG_NFC_S3FWRN5_I2C=m -# CONFIG_NFC_ST95HF is not set -# end of Near Field Communication (NFC) devices - -# CONFIG_PSAMPLE is not set -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_PAGE_POOL=y -CONFIG_FAILOVER=m -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# -CONFIG_ARM_AMBA=y -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_SYSCALL=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -# CONFIG_PCIEAER_INJECT is not set -# CONFIG_PCIE_ECRC is not set -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -# CONFIG_PCIE_DPC is not set -# CONFIG_PCIE_PTM is not set -# CONFIG_PCIE_BW is not set -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=m -# CONFIG_PCI_PF_STUB is not set -CONFIG_PCI_ATS=y -CONFIG_PCI_ECAM=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -CONFIG_PCI_LABEL=y -CONFIG_HOTPLUG_PCI=y -# CONFIG_HOTPLUG_PCI_ACPI is not set -CONFIG_HOTPLUG_PCI_CPCI=y -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# PCI controller drivers -# - -# -# Cadence PCIe controllers support -# -# CONFIG_PCIE_CADENCE_HOST is not set -# end of Cadence PCIe controllers support - -# CONFIG_PCI_FTPCI100 is not set -CONFIG_PCI_HOST_COMMON=y -CONFIG_PCI_HOST_GENERIC=y -# CONFIG_PCIE_XILINX is not set -# CONFIG_PCI_XGENE is not set -# CONFIG_PCIE_ALTERA is not set -# CONFIG_PCI_HOST_THUNDER_PEM is not set -# CONFIG_PCI_HOST_THUNDER_ECAM is not set - -# -# DesignWare PCI Core Support -# -CONFIG_PCIE_DW=y -CONFIG_PCIE_DW_HOST=y -# CONFIG_PCIE_DW_PLAT_HOST is not set -CONFIG_PCI_HISI=y -CONFIG_PCIE_KIRIN=y -# CONFIG_PCIE_HISI_STB is not set -# CONFIG_PCI_MESON is not set -# CONFIG_PCIE_AL is not set -# end of DesignWare PCI Core Support -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -# CONFIG_PCCARD is not set -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_FW_LOADER_USER_HELPER is not set -# CONFIG_FW_LOADER_COMPRESS is not set -# end of Firmware loader - -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=y -CONFIG_REGMAP_MMIO=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -CONFIG_GENERIC_ARCH_TOPOLOGY=y -# end of Generic Driver Options - -# -# Bus devices -# -# CONFIG_BRCMSTB_GISB_ARB is not set -# CONFIG_MOXTET is not set -# CONFIG_HISILICON_LPC is not set -# CONFIG_SIMPLE_PM_BUS is not set -# CONFIG_VEXPRESS_CONFIG is not set -# end of Bus devices - -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_GNSS is not set -CONFIG_MTD=m -# CONFIG_MTD_TESTS is not set - -# -# Partition parsers -# -# CONFIG_MTD_AR7_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -CONFIG_MTD_OF_PARTS=m -# CONFIG_MTD_AFS_PARTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -# end of Partition parsers - -# -# User Modules And Translation Layers -# -# CONFIG_MTD_BLOCK is not set -# CONFIG_MTD_BLOCK_RO is not set -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -# CONFIG_MTD_CFI is not set -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set -# end of RAM/ROM/Flash chip drivers - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set -# end of Mapping drivers for chip access - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# end of Self-contained MTD device drivers - -# CONFIG_MTD_ONENAND is not set -# CONFIG_MTD_RAW_NAND is not set -# CONFIG_MTD_SPI_NAND is not set - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# end of LPDDR & LPDDR2 PCM memory drivers - -# CONFIG_MTD_SPI_NOR is not set -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -# CONFIG_MTD_UBI_FASTMAP is not set -# CONFIG_MTD_UBI_GLUEBI is not set -# CONFIG_MTD_UBI_BLOCK is not set -# CONFIG_MTD_HYPERBUS is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_KOBJ=y -CONFIG_OF_DYNAMIC=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_IRQ=y -CONFIG_OF_NET=y -CONFIG_OF_MDIO=y -CONFIG_OF_RESERVED_MEM=y -CONFIG_OF_RESOLVE=y -CONFIG_OF_OVERLAY=y -CONFIG_OF_NUMA=y -CONFIG_PARPORT=m -# CONFIG_PARPORT_AX88796 is not set -# CONFIG_PARPORT_1284 is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_NULL_BLK is not set -CONFIG_CDROM=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -# CONFIG_ZRAM is not set -# CONFIG_BLK_DEV_UMEM is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -CONFIG_BLK_DEV_DRBD=m -CONFIG_DRBD_FAULT_INJECTION=y -CONFIG_BLK_DEV_NBD=m -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_SX8 is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=y -# CONFIG_VIRTIO_BLK_SCSI is not set -CONFIG_BLK_DEV_RBD=m -# CONFIG_BLK_DEV_RSXX is not set - -# -# NVME Support -# -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m -# CONFIG_NVME_MULTIPATH is not set -# CONFIG_NVME_RDMA is not set -# CONFIG_NVME_FC is not set -# CONFIG_NVME_TCP is not set -# CONFIG_NVME_TARGET is not set -# end of NVME Support - -# -# Misc devices -# -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -# CONFIG_PVPANIC is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_AT25 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -CONFIG_EEPROM_93CX6=y -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -# CONFIG_CB710_CORE is not set - -# -# Texas Instruments shared transport line discipline -# -CONFIG_TI_ST=m -# end of Texas Instruments shared transport line discipline - -# CONFIG_SENSORS_LIS3_I2C is not set -# CONFIG_ALTERA_STAPL is not set - -# -# Intel MIC & related support -# - -# -# Intel MIC Bus Driver -# - -# -# SCIF Bus Driver -# - -# -# VOP Bus Driver -# -# CONFIG_VOP_BUS is not set - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# - -# -# VOP Driver -# -CONFIG_VHOST_RING=m -# end of Intel MIC & related support - -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MISC_RTSX_PCI is not set -# CONFIG_MISC_RTSX_USB is not set -# CONFIG_HABANA_AI is not set -# end of Misc devices - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=y -CONFIG_SCSI_SAS_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=y -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -# CONFIG_ISCSI_BOOT_SYSFS is not set -# CONFIG_SCSI_CXGB3_ISCSI is not set -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_SCSI_BNX2X_FCOE is not set -# CONFIG_BE2ISCSI is not set -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -# CONFIG_SCSI_HPSA is not set -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -CONFIG_SCSI_HISI_SAS=y -CONFIG_SCSI_HISI_SAS_PCI=y -CONFIG_SCSI_MVSAS=m -CONFIG_SCSI_MVSAS_DEBUG=y -CONFIG_SCSI_MVSAS_TASKLET=y -CONFIG_SCSI_MVUMI=m -# CONFIG_SCSI_ADVANSYS is not set -CONFIG_SCSI_ARCMSR=m -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT3SAS=y -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_SMARTPQI is not set -# CONFIG_SCSI_UFSHCD is not set -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -# CONFIG_SCSI_GDTH is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -CONFIG_SCSI_QLOGIC_1280=m -CONFIG_SCSI_QLA_FC=m -CONFIG_TCM_QLA2XXX=m -# CONFIG_TCM_QLA2XXX_DEBUG is not set -# CONFIG_SCSI_QLA_ISCSI is not set -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -CONFIG_SCSI_DEBUG=m -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m -# CONFIG_SCSI_CHELSIO_FCOE is not set -# CONFIG_SCSI_DH is not set -# end of SCSI device support - -CONFIG_HAVE_PATA_PLATFORM=y -CONFIG_ATA=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=y -# CONFIG_AHCI_CEVA is not set -# CONFIG_AHCI_QORIQ is not set -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -CONFIG_SATA_SIL24=y -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -# CONFIG_ATA_PIIX is not set -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -# CONFIG_ATA_GENERIC is not set -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -# CONFIG_MD_CLUSTER is not set -CONFIG_BCACHE=m -# CONFIG_BCACHE_DEBUG is not set -# CONFIG_BCACHE_CLOSURES_DEBUG is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_DEBUG is not set -CONFIG_DM_BUFIO=m -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -# CONFIG_DM_WRITECACHE is not set -CONFIG_DM_ERA=m -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set -# CONFIG_DM_INIT is not set -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=m -CONFIG_DM_LOG_WRITES=m -# CONFIG_DM_INTEGRITY is not set -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -CONFIG_TCM_USER2=m -CONFIG_LOOPBACK_TARGET=m -CONFIG_TCM_FC=m -CONFIG_ISCSI_TARGET=m -CONFIG_FUSION=y -# CONFIG_FUSION_SPI is not set -# CONFIG_FUSION_FC is not set -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -# CONFIG_FUSION_CTL is not set -# CONFIG_FUSION_LOGGING is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -CONFIG_NETDEVICES=y -CONFIG_MII=y -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -# CONFIG_IFB is not set -# CONFIG_NET_TEAM is not set -CONFIG_MACVLAN=m -# CONFIG_MACVTAP is not set -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -# CONFIG_GENEVE is not set -# CONFIG_GTP is not set -# CONFIG_MACSEC is not set -# CONFIG_NETCONSOLE is not set -CONFIG_TUN=y -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -# CONFIG_NLMON is not set -# CONFIG_NET_VRF is not set -# CONFIG_ARCNET is not set -CONFIG_ATM_DRIVERS=y -# CONFIG_ATM_DUMMY is not set -# CONFIG_ATM_TCP is not set -# CONFIG_ATM_LANAI is not set -# CONFIG_ATM_ENI is not set -# CONFIG_ATM_NICSTAR is not set -# CONFIG_ATM_IDT77252 is not set -# CONFIG_ATM_IA is not set -# CONFIG_ATM_FORE200E is not set -# CONFIG_ATM_HE is not set -# CONFIG_ATM_SOLOS is not set - -# -# CAIF transport drivers -# -# CONFIG_CAIF_TTY is not set -# CONFIG_CAIF_SPI_SLAVE is not set -# CONFIG_CAIF_HSI is not set -CONFIG_CAIF_VIRTIO=m - -# -# Distributed Switch Architecture drivers -# -# end of Distributed Switch Architecture drivers - -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -CONFIG_NET_VENDOR_ALACRITECH=y -# CONFIG_SLICOSS is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set -# CONFIG_NET_VENDOR_AMD is not set -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQTION is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -# CONFIG_BNX2 is not set -# CONFIG_CNIC is not set -# CONFIG_TIGON3 is not set -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -# CONFIG_BNXT is not set -# CONFIG_NET_VENDOR_BROCADE is not set -CONFIG_NET_VENDOR_CADENCE=y -# CONFIG_MACB is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_CISCO is not set -CONFIG_NET_VENDOR_CORTINA=y -# CONFIG_GEMINI_ETHERNET is not set -# CONFIG_DNET is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_HWMON=y -CONFIG_BE2NET_BE2=y -CONFIG_BE2NET_BE3=y -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -# CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_GOOGLE=y -# CONFIG_GVE is not set -CONFIG_NET_VENDOR_HISILICON=y -CONFIG_HIX5HD2_GMAC=m -CONFIG_HISI_FEMAC=m -CONFIG_HIP04_ETH=m -# CONFIG_HI13X1_GMAC is not set -CONFIG_HNS_MDIO=m -CONFIG_HNS=m -CONFIG_HNS_DSAF=m -CONFIG_HNS_ENET=m -CONFIG_HNS3=m -CONFIG_HNS3_HCLGE=m -CONFIG_HNS3_DCB=y -CONFIG_HNS3_HCLGEVF=m -CONFIG_HNS3_ENET=m -# CONFIG_NET_VENDOR_HP is not set -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m -CONFIG_NET_VENDOR_I825XX=y -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IGB=y -CONFIG_IGB_HWMON=y -# CONFIG_IGBVF is not set -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -# CONFIG_IXGBE_DCB is not set -CONFIG_IXGBEVF=m -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_IAVF=m -CONFIG_I40EVF=m -# CONFIG_ICE is not set -CONFIG_FM10K=m -# CONFIG_IGC is not set -# CONFIG_JME is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MELLANOX is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_NET_VENDOR_MICROCHIP=y -# CONFIG_ENC28J60 is not set -# CONFIG_ENCX24J600 is not set -# CONFIG_LAN743X is not set -CONFIG_NET_VENDOR_MICROSEMI=y -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_NET_VENDOR_NETERION=y -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -CONFIG_NET_VENDOR_NETRONOME=y -# CONFIG_NFP is not set -CONFIG_NET_VENDOR_NI=y -# CONFIG_NI_XGE_MANAGEMENT_ENET is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_ETHOC is not set -CONFIG_NET_VENDOR_PACKET_ENGINES=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_8139CP is not set -# CONFIG_8139TOO is not set -CONFIG_R8169=m -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_NET_VENDOR_SMSC is not set -CONFIG_NET_VENDOR_SOCIONEXT=y -CONFIG_NET_VENDOR_STMICRO=y -CONFIG_STMMAC_ETH=y -# CONFIG_STMMAC_SELFTESTS is not set -CONFIG_STMMAC_PLATFORM=y -# CONFIG_DWMAC_DWC_QOS_ETH is not set -CONFIG_DWMAC_GENERIC=y -CONFIG_STMMAC_PCI=m -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -# CONFIG_MDIO_BCM_UNIMAC is not set -# CONFIG_MDIO_BITBANG is not set -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -CONFIG_MDIO_HISI_FEMAC=y -# CONFIG_MDIO_MSCC_MIIM is not set -# CONFIG_MDIO_OCTEON is not set -# CONFIG_MDIO_THUNDER is not set -CONFIG_PHYLINK=y -CONFIG_PHYLIB=y -CONFIG_SWPHY=y -# CONFIG_LED_TRIGGER_PHY is not set - -# -# MII PHY device drivers -# -# CONFIG_SFP is not set -# CONFIG_ADIN_PHY is not set -# CONFIG_AMD_PHY is not set -# CONFIG_AQUANTIA_PHY is not set -# CONFIG_AX88796B_PHY is not set -# CONFIG_AT803X_PHY is not set -# CONFIG_BCM7XXX_PHY is not set -# CONFIG_BCM87XX_PHY is not set -# CONFIG_BROADCOM_PHY is not set -# CONFIG_CICADA_PHY is not set -# CONFIG_CORTINA_PHY is not set -# CONFIG_DAVICOM_PHY is not set -# CONFIG_DP83822_PHY is not set -# CONFIG_DP83TC811_PHY is not set -# CONFIG_DP83848_PHY is not set -# CONFIG_DP83867_PHY is not set -CONFIG_FIXED_PHY=y -# CONFIG_ICPLUS_PHY is not set -# CONFIG_INTEL_XWAY_PHY is not set -# CONFIG_LSI_ET1011C_PHY is not set -# CONFIG_LXT_PHY is not set -CONFIG_MARVELL_PHY=m -# CONFIG_MARVELL_10G_PHY is not set -CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_PHY is not set -# CONFIG_MICROCHIP_T1_PHY is not set -# CONFIG_MICROSEMI_PHY is not set -# CONFIG_NATIONAL_PHY is not set -# CONFIG_NXP_TJA11XX_PHY is not set -# CONFIG_QSEMI_PHY is not set -CONFIG_REALTEK_PHY=m -# CONFIG_RENESAS_PHY is not set -# CONFIG_ROCKCHIP_PHY is not set -# CONFIG_SMSC_PHY is not set -# CONFIG_STE10XP is not set -# CONFIG_TERANETICS_PHY is not set -# CONFIG_VITESSE_PHY is not set -# CONFIG_XILINX_GMII2RGMII is not set -# CONFIG_MICREL_KS8995MA is not set -CONFIG_PLIP=m -CONFIG_PPP=m -# CONFIG_PPP_BSDCOMP is not set -# CONFIG_PPP_DEFLATE is not set -# CONFIG_PPP_FILTER is not set -# CONFIG_PPP_MPPE is not set -# CONFIG_PPP_MULTILINK is not set -# CONFIG_PPPOATM is not set -CONFIG_PPPOE=m -# CONFIG_PPTP is not set -# CONFIG_PPPOL2TP is not set -# CONFIG_PPP_ASYNC is not set -# CONFIG_PPP_SYNC_TTY is not set -CONFIG_SLIP=m -CONFIG_SLHC=m -# CONFIG_SLIP_COMPRESSED is not set -# CONFIG_SLIP_SMART is not set -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=m -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -CONFIG_USB_RTL8152=m -# CONFIG_USB_LAN78XX is not set -# CONFIG_USB_USBNET is not set -# CONFIG_USB_HSO is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_WLAN is not set - -# -# WiMAX Wireless Broadband devices -# -# CONFIG_WIMAX_I2400M_USB is not set -# end of WiMAX Wireless Broadband devices - -# CONFIG_WAN is not set -CONFIG_IEEE802154_DRIVERS=m -# CONFIG_IEEE802154_FAKELB is not set -# CONFIG_IEEE802154_AT86RF230 is not set -# CONFIG_IEEE802154_MRF24J40 is not set -# CONFIG_IEEE802154_CC2520 is not set -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_ADF7242 is not set -# CONFIG_IEEE802154_CA8210 is not set -# CONFIG_IEEE802154_MCR20A is not set -# CONFIG_IEEE802154_HWSIM is not set -# CONFIG_FUJITSU_ES is not set -# CONFIG_NETDEVSIM is not set -CONFIG_NET_FAILOVER=m -CONFIG_ISDN=y -CONFIG_ISDN_CAPI=m -CONFIG_CAPI_TRACE=y -# CONFIG_ISDN_CAPI_CAPI20 is not set -# CONFIG_MISDN is not set -# CONFIG_NVM is not set - -# -# Input device support -# -CONFIG_INPUT=y -# CONFIG_INPUT_LEDS is not set -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_POLLDEV is not set -# CONFIG_INPUT_SPARSEKMAP is not set -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_GPIO is not set -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_OMAP4 is not set -# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CAP11XX is not set -# CONFIG_KEYBOARD_BCM is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=y -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_BYD=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y -CONFIG_MOUSE_PS2_CYPRESS=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set -CONFIG_MOUSE_PS2_SMBUS=y -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_CYAPA is not set -# CONFIG_MOUSE_ELAN_I2C is not set -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_GPIO is not set -# CONFIG_MOUSE_SYNAPTICS_I2C is not set -# CONFIG_MOUSE_SYNAPTICS_USB is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_PROPERTIES=y -# CONFIG_TOUCHSCREEN_ADS7846 is not set -# CONFIG_TOUCHSCREEN_AD7877 is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_AR1021_I2C is not set -# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set -# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set -# CONFIG_TOUCHSCREEN_BU21013 is not set -# CONFIG_TOUCHSCREEN_BU21029 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set -# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set -# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set -# CONFIG_TOUCHSCREEN_DYNAPRO is not set -# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set -# CONFIG_TOUCHSCREEN_EETI is not set -# CONFIG_TOUCHSCREEN_EGALAX is not set -# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set -# CONFIG_TOUCHSCREEN_EXC3000 is not set -# CONFIG_TOUCHSCREEN_FUJITSU is not set -# CONFIG_TOUCHSCREEN_GOODIX is not set -# CONFIG_TOUCHSCREEN_HIDEEP is not set -# CONFIG_TOUCHSCREEN_ILI210X is not set -# CONFIG_TOUCHSCREEN_S6SY761 is not set -# CONFIG_TOUCHSCREEN_GUNZE is not set -# CONFIG_TOUCHSCREEN_EKTF2127 is not set -# CONFIG_TOUCHSCREEN_ELAN is not set -# CONFIG_TOUCHSCREEN_ELO is not set -# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set -# CONFIG_TOUCHSCREEN_WACOM_I2C is not set -# CONFIG_TOUCHSCREEN_MAX11801 is not set -# CONFIG_TOUCHSCREEN_MCS5000 is not set -# CONFIG_TOUCHSCREEN_MMS114 is not set -# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set -# CONFIG_TOUCHSCREEN_MTOUCH is not set -# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set -# CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_MK712 is not set -# CONFIG_TOUCHSCREEN_PENMOUNT is not set -# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set -# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set -# CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_PIXCIR is not set -# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set -CONFIG_TOUCHSCREEN_USB_COMPOSITE=y -CONFIG_TOUCHSCREEN_USB_EGALAX=y -CONFIG_TOUCHSCREEN_USB_PANJIT=y -CONFIG_TOUCHSCREEN_USB_3M=y -CONFIG_TOUCHSCREEN_USB_ITM=y -CONFIG_TOUCHSCREEN_USB_ETURBO=y -CONFIG_TOUCHSCREEN_USB_GUNZE=y -CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y -CONFIG_TOUCHSCREEN_USB_IRTOUCH=y -CONFIG_TOUCHSCREEN_USB_IDEALTEK=y -CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y -CONFIG_TOUCHSCREEN_USB_GOTOP=y -CONFIG_TOUCHSCREEN_USB_JASTEC=y -CONFIG_TOUCHSCREEN_USB_ELO=y -CONFIG_TOUCHSCREEN_USB_E2I=y -CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y -CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y -CONFIG_TOUCHSCREEN_USB_NEXIO=y -CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y -# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set -# CONFIG_TOUCHSCREEN_TSC_SERIO is not set -# CONFIG_TOUCHSCREEN_TSC2004 is not set -# CONFIG_TOUCHSCREEN_TSC2005 is not set -# CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_RM_TS is not set -# CONFIG_TOUCHSCREEN_SILEAD is not set -# CONFIG_TOUCHSCREEN_SIS_I2C is not set -# CONFIG_TOUCHSCREEN_ST1232 is not set -# CONFIG_TOUCHSCREEN_STMFTS is not set -# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set -# CONFIG_TOUCHSCREEN_SX8654 is not set -# CONFIG_TOUCHSCREEN_TPS6507X is not set -# CONFIG_TOUCHSCREEN_ZET6223 is not set -# CONFIG_TOUCHSCREEN_ZFORCE is not set -# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set -# CONFIG_TOUCHSCREEN_IQS5XX is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_ATMEL_CAPTOUCH is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -# CONFIG_INPUT_MSM_VIBRATOR is not set -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_GP2A is not set -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_DECODER is not set -# CONFIG_INPUT_GPIO_VIBRA is not set -# CONFIG_INPUT_ATI_REMOTE2 is not set -# CONFIG_INPUT_KEYSPAN_REMOTE is not set -# CONFIG_INPUT_KXTJ9 is not set -# CONFIG_INPUT_POWERMATE is not set -# CONFIG_INPUT_YEALINK is not set -# CONFIG_INPUT_CM109 is not set -# CONFIG_INPUT_REGULATOR_HAPTIC is not set -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_IMS_PCU is not set -# CONFIG_INPUT_CMA3000 is not set -# CONFIG_INPUT_DRV260X_HAPTICS is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -CONFIG_INPUT_HISI_POWERKEY=y -# CONFIG_RMI4_CORE is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_SERIO_PARKBD is not set -CONFIG_SERIO_AMBAKMI=y -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set -# CONFIG_SERIO_ALTERA_PS2 is not set -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_SERIO_ARC_PS2 is not set -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=0 -# CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_NOZOMI is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -# CONFIG_NULL_TTY is not set -CONFIG_LDISC_AUTOLOAD=y -CONFIG_DEVMEM=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -# CONFIG_SERIAL_8250_DMA is not set -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=16 -CONFIG_SERIAL_8250_RUNTIME_UARTS=16 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -# CONFIG_SERIAL_8250_ASPEED_VUART is not set -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_8250_DETECT_IRQ=y -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y -CONFIG_SERIAL_8250_FSL=y -CONFIG_SERIAL_8250_DW=y -# CONFIG_SERIAL_8250_RT288X is not set -CONFIG_SERIAL_OF_PLATFORM=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set -# CONFIG_SERIAL_KGDB_NMI is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_CONSOLE_POLL=y -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_SIFIVE is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_IFX6X60 is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set -# end of Serial drivers - -CONFIG_SERIAL_MCTRL_GPIO=y -# CONFIG_SERIAL_DEV_BUS is not set -# CONFIG_TTY_PRINTK is not set -# CONFIG_PRINTER is not set -# CONFIG_PPDEV is not set -CONFIG_HVC_DRIVER=y -# CONFIG_HVC_DCC is not set -CONFIG_VIRTIO_CONSOLE=y -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y -# CONFIG_IPMI_PANIC_EVENT is not set -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -# CONFIG_IPMI_SSIF is not set -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -# CONFIG_IPMB_DEVICE_INTERFACE is not set -CONFIG_HW_RANDOM=m -# CONFIG_HW_RANDOM_TIMERIOMEM is not set -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_HW_RANDOM_HISI=m -CONFIG_HW_RANDOM_CAVIUM=m -# CONFIG_APPLICOM is not set -CONFIG_RAW_DRIVER=m -CONFIG_MAX_RAW_DEVS=256 -CONFIG_TCG_TPM=y -# CONFIG_TCG_TIS is not set -# CONFIG_TCG_TIS_SPI is not set -# CONFIG_TCG_TIS_I2C_ATMEL is not set -# CONFIG_TCG_TIS_I2C_INFINEON is not set -# CONFIG_TCG_TIS_I2C_NUVOTON is not set -# CONFIG_TCG_ATMEL is not set -# CONFIG_TCG_INFINEON is not set -CONFIG_TCG_CRB=y -# CONFIG_TCG_VTPM_PROXY is not set -# CONFIG_TCG_TIS_ST33ZP24_I2C is not set -# CONFIG_TCG_TIS_ST33ZP24_SPI is not set -CONFIG_DEVPORT=y -# CONFIG_XILLYBUS is not set -# end of Character devices - -# CONFIG_RANDOM_TRUST_BOOTLOADER is not set - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=m - -# -# Multiplexer I2C Chip support -# -# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set -# CONFIG_I2C_MUX_GPIO is not set -# CONFIG_I2C_MUX_GPMUX is not set -# CONFIG_I2C_MUX_LTC4306 is not set -# CONFIG_I2C_MUX_PCA9541 is not set -# CONFIG_I2C_MUX_PCA954x is not set -# CONFIG_I2C_MUX_PINCTRL is not set -# CONFIG_I2C_MUX_REG is not set -# CONFIG_I2C_DEMUX_PINCTRL is not set -# CONFIG_I2C_MUX_MLXCPLD is not set -# end of Multiplexer I2C Chip support - -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_ALGOBIT=y - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_AMD_MP2 is not set -# CONFIG_I2C_HIX5HD2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -CONFIG_I2C_DESIGNWARE_CORE=y -CONFIG_I2C_DESIGNWARE_PLATFORM=y -# CONFIG_I2C_DESIGNWARE_SLAVE is not set -CONFIG_I2C_DESIGNWARE_PCI=y -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_NOMADIK is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_RK3X is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_THUNDERX is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# end of I2C Hardware Bus support - -# CONFIG_I2C_STUB is not set -CONFIG_I2C_SLAVE=y -# CONFIG_I2C_SLAVE_EEPROM is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y -# CONFIG_SPI_MEM is not set - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_BUTTERFLY is not set -# CONFIG_SPI_CADENCE is not set -# CONFIG_SPI_DESIGNWARE is not set -# CONFIG_SPI_NXP_FLEXSPI is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_LM70_LLP is not set -# CONFIG_SPI_FSL_SPI is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PL022 is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_ROCKCHIP is not set -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_SIFIVE is not set -# CONFIG_SPI_MXIC is not set -# CONFIG_SPI_THUNDERX is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -# CONFIG_PPS_CLIENT_LDISC is not set -# CONFIG_PPS_CLIENT_PARPORT is not set -# CONFIG_PPS_CLIENT_GPIO is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -# CONFIG_DP83640_PHY is not set -# end of PTP clock support - -CONFIG_PINCTRL=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_SX150X is not set -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PINCTRL_OCELOT is not set -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_OF_GPIO=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_GENERIC=y - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set -# CONFIG_GPIO_AMDPT is not set -# CONFIG_GPIO_CADENCE is not set -CONFIG_GPIO_DWAPB=y -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_FTGPIO010 is not set -# CONFIG_GPIO_GENERIC_PLATFORM is not set -# CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HLWD is not set -# CONFIG_GPIO_MB86S7X is not set -CONFIG_GPIO_PL061=y -# CONFIG_GPIO_SAMA5D2_PIOBU is not set -# CONFIG_GPIO_SYSCON is not set -# CONFIG_GPIO_XGENE is not set -# CONFIG_GPIO_XILINX is not set -# CONFIG_GPIO_AMD_FCH is not set -# end of Memory mapped GPIO drivers - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADP5588 is not set -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# end of I2C GPIO expanders - -# -# MFD GPIO expanders -# -# end of MFD GPIO expanders - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set -# end of PCI GPIO expanders - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set -# end of SPI GPIO expanders - -# -# USB GPIO expanders -# -# end of USB GPIO expanders - -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_W1 is not set -# CONFIG_POWER_AVS is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_BRCMSTB is not set -# CONFIG_POWER_RESET_GPIO is not set -# CONFIG_POWER_RESET_GPIO_RESTART is not set -CONFIG_POWER_RESET_HISI=y -# CONFIG_POWER_RESET_LTC2952 is not set -# CONFIG_POWER_RESET_RESTART is not set -# CONFIG_POWER_RESET_XGENE is not set -# CONFIG_POWER_RESET_SYSCON is not set -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -# CONFIG_NVMEM_REBOOT_MODE is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_MANAGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_MANAGER is not set -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_DETECTOR_MAX14656 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_CHARGER_RT9455 is not set -# CONFIG_CHARGER_UCS1002 is not set -CONFIG_HWMON=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_AD7314 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7310 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_AS370 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_ASPEED is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_GPIO_FAN is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_IBMAEM is not set -# CONFIG_SENSORS_IBMPEX is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4222 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4260 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX31722 is not set -# CONFIG_SENSORS_MAX6621 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_ADCXX is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM70 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_ADS7871 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83773G is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -CONFIG_SENSORS_W83795=y -# CONFIG_SENSORS_W83795_FANCTRL is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -CONFIG_THERMAL=y -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -# CONFIG_THERMAL_WRITABLE_TRIPS is not set -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -# CONFIG_THERMAL_GOV_FAIR_SHARE is not set -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -# CONFIG_THERMAL_GOV_USER_SPACE is not set -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -# CONFIG_CPU_THERMAL is not set -# CONFIG_THERMAL_EMULATION is not set -# CONFIG_THERMAL_MMIO is not set -CONFIG_HISI_THERMAL=y -# CONFIG_QORIQ_THERMAL is not set -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -# CONFIG_BCMA is not set - -# -# Multifunction device drivers -# -# CONFIG_MFD_ACT8945A is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_MFD_HI655X_PMIC is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77620 is not set -# CONFIG_MFD_MAX77650 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_CPCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK808 is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_SMSC is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TPS68470 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TI_LP87565 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS80031 is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TQMX86 is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_ROHM_BD718XX is not set -# CONFIG_MFD_ROHM_BD70528 is not set -# CONFIG_MFD_STPMIC1 is not set -# CONFIG_MFD_STMFX is not set -# end of Multifunction device drivers - -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -CONFIG_REGULATOR_FIXED_VOLTAGE=m -CONFIG_REGULATOR_VIRTUAL_CONSUMER=m -CONFIG_REGULATOR_USERSPACE_CONSUMER=m -# CONFIG_REGULATOR_88PG86X is not set -# CONFIG_REGULATOR_ACT8865 is not set -# CONFIG_REGULATOR_AD5398 is not set -# CONFIG_REGULATOR_ANATOP is not set -# CONFIG_REGULATOR_DA9210 is not set -# CONFIG_REGULATOR_DA9211 is not set -# CONFIG_REGULATOR_FAN53555 is not set -CONFIG_REGULATOR_GPIO=m -# CONFIG_REGULATOR_ISL9305 is not set -# CONFIG_REGULATOR_ISL6271A is not set -# CONFIG_REGULATOR_LP3971 is not set -# CONFIG_REGULATOR_LP3972 is not set -# CONFIG_REGULATOR_LP872X is not set -# CONFIG_REGULATOR_LP8755 is not set -# CONFIG_REGULATOR_LTC3589 is not set -# CONFIG_REGULATOR_LTC3676 is not set -# CONFIG_REGULATOR_MAX1586 is not set -# CONFIG_REGULATOR_MAX8649 is not set -# CONFIG_REGULATOR_MAX8660 is not set -# CONFIG_REGULATOR_MAX8952 is not set -# CONFIG_REGULATOR_MAX8973 is not set -# CONFIG_REGULATOR_MCP16502 is not set -# CONFIG_REGULATOR_MT6311 is not set -# CONFIG_REGULATOR_PFUZE100 is not set -# CONFIG_REGULATOR_PV88060 is not set -# CONFIG_REGULATOR_PV88080 is not set -# CONFIG_REGULATOR_PV88090 is not set -# CONFIG_REGULATOR_SLG51000 is not set -# CONFIG_REGULATOR_SY8106A is not set -# CONFIG_REGULATOR_SY8824X is not set -# CONFIG_REGULATOR_TPS51632 is not set -# CONFIG_REGULATOR_TPS62360 is not set -# CONFIG_REGULATOR_TPS65023 is not set -# CONFIG_REGULATOR_TPS6507X is not set -# CONFIG_REGULATOR_TPS65132 is not set -# CONFIG_REGULATOR_TPS6524X is not set -# CONFIG_REGULATOR_VCTRL is not set -CONFIG_RC_CORE=y -CONFIG_RC_MAP=y -# CONFIG_LIRC is not set -CONFIG_RC_DECODERS=y -CONFIG_IR_NEC_DECODER=y -CONFIG_IR_RC5_DECODER=y -CONFIG_IR_RC6_DECODER=y -CONFIG_IR_JVC_DECODER=y -CONFIG_IR_SONY_DECODER=y -CONFIG_IR_SANYO_DECODER=y -CONFIG_IR_SHARP_DECODER=y -CONFIG_IR_MCE_KBD_DECODER=y -CONFIG_IR_XMP_DECODER=y -# CONFIG_IR_IMON_DECODER is not set -# CONFIG_IR_RCMM_DECODER is not set -# CONFIG_RC_DEVICES is not set -CONFIG_MEDIA_SUPPORT=y - -# -# Multimedia core support -# -# CONFIG_MEDIA_CAMERA_SUPPORT is not set -# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set -# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set -# CONFIG_MEDIA_RADIO_SUPPORT is not set -# CONFIG_MEDIA_SDR_SUPPORT is not set -# CONFIG_MEDIA_CEC_SUPPORT is not set -# CONFIG_VIDEO_ADV_DEBUG is not set -# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set - -# -# Media drivers -# -# CONFIG_MEDIA_USB_SUPPORT is not set -# CONFIG_MEDIA_PCI_SUPPORT is not set - -# -# Supported MMC/SDIO adapters -# -CONFIG_CYPRESS_FIRMWARE=m - -# -# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) -# - -# -# Media SPI Adapters -# -# end of Media SPI Adapters - -# -# Customise DVB Frontends -# - -# -# Tools to develop new frontends -# -# end of Customise DVB Frontends - -# -# Graphics support -# -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_DRM=y -CONFIG_DRM_MIPI_DSI=y -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DEBUG_MM is not set -# CONFIG_DRM_DEBUG_SELFTEST is not set -CONFIG_DRM_KMS_HELPER=y -CONFIG_DRM_KMS_FB_HELPER=y -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set -# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set -# CONFIG_DRM_DP_CEC is not set -CONFIG_DRM_TTM=y -CONFIG_DRM_VRAM_HELPER=y -CONFIG_DRM_GEM_CMA_HELPER=y -CONFIG_DRM_KMS_CMA_HELPER=y -CONFIG_DRM_GEM_SHMEM_HELPER=y -CONFIG_DRM_SCHED=y - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# CONFIG_DRM_HDLCD is not set -# CONFIG_DRM_MALI_DISPLAY is not set -# CONFIG_DRM_KOMEDA is not set -# end of ARM devices - -CONFIG_DRM_RADEON=y -# CONFIG_DRM_RADEON_USERPTR is not set -CONFIG_DRM_AMDGPU=y -# CONFIG_DRM_AMDGPU_SI is not set -# CONFIG_DRM_AMDGPU_CIK is not set -# CONFIG_DRM_AMDGPU_USERPTR is not set -CONFIG_DRM_AMDGPU_GART_DEBUGFS=y - -# -# ACP (Audio CoProcessor) Configuration -# -# CONFIG_DRM_AMD_ACP is not set -# end of ACP (Audio CoProcessor) Configuration - -# -# Display Engine Configuration -# -CONFIG_DRM_AMD_DC=y -# CONFIG_DEBUG_KERNEL_DC is not set -# end of Display Engine Configuration - -# CONFIG_HSA_AMD is not set -# CONFIG_DRM_NOUVEAU is not set -# CONFIG_DRM_VGEM is not set -# CONFIG_DRM_VKMS is not set -CONFIG_DRM_UDL=y -CONFIG_DRM_AST=y -# CONFIG_DRM_MGAG200 is not set -CONFIG_DRM_CIRRUS_QEMU=y -# CONFIG_DRM_RCAR_DW_HDMI is not set -# CONFIG_DRM_RCAR_LVDS is not set -CONFIG_DRM_RCAR_WRITEBACK=y -CONFIG_DRM_QXL=m -CONFIG_DRM_BOCHS=y -CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_ARM_VERSATILE is not set -# CONFIG_DRM_PANEL_LVDS is not set -# CONFIG_DRM_PANEL_SIMPLE is not set -# CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set -# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set -# CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set -# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set -# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set -# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set -# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set -# CONFIG_DRM_PANEL_LG_LB035Q02 is not set -# CONFIG_DRM_PANEL_LG_LG4573 is not set -# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set -# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set -# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set -# CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set -# CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS is not set -# CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set -# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set -# CONFIG_DRM_PANEL_RAYDIUM_RM67191 is not set -# CONFIG_DRM_PANEL_RAYDIUM_RM68200 is not set -# CONFIG_DRM_PANEL_ROCKTECH_JH057N00900 is not set -# CONFIG_DRM_PANEL_RONBO_RB070D30 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set -# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set -# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set -# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set -# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7701 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set -# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set -# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set -# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set -# CONFIG_DRM_PANEL_TPO_TPG110 is not set -# CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_CDNS_DSI is not set -# CONFIG_DRM_DUMB_VGA_DAC is not set -# CONFIG_DRM_LVDS_ENCODER is not set -# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set -# CONFIG_DRM_NXP_PTN3460 is not set -# CONFIG_DRM_PARADE_PS8622 is not set -# CONFIG_DRM_SIL_SII8620 is not set -# CONFIG_DRM_SII902X is not set -# CONFIG_DRM_SII9234 is not set -# CONFIG_DRM_THINE_THC63LVD1024 is not set -# CONFIG_DRM_TOSHIBA_TC358764 is not set -# CONFIG_DRM_TOSHIBA_TC358767 is not set -# CONFIG_DRM_TI_TFP410 is not set -# CONFIG_DRM_TI_SN65DSI86 is not set -# CONFIG_DRM_I2C_ADV7511 is not set -# end of Display Interface Bridges - -# CONFIG_DRM_ETNAVIV is not set -# CONFIG_DRM_ARCPGU is not set -CONFIG_DRM_HISI_HIBMC=y -CONFIG_DRM_HISI_KIRIN=y -# CONFIG_DRM_MXSFB is not set -# CONFIG_DRM_GM12U320 is not set -# CONFIG_TINYDRM_HX8357D is not set -# CONFIG_TINYDRM_ILI9225 is not set -# CONFIG_TINYDRM_ILI9341 is not set -# CONFIG_TINYDRM_MI0283QT is not set -# CONFIG_TINYDRM_REPAPER is not set -# CONFIG_TINYDRM_ST7586 is not set -# CONFIG_TINYDRM_ST7735R is not set -# CONFIG_DRM_PL111 is not set -# CONFIG_DRM_LIMA is not set -# CONFIG_DRM_PANFROST is not set -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y - -# -# Frame buffer Devices -# -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -CONFIG_FB=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_DDC=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_BACKLIGHT=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -CONFIG_FB_CIRRUS=y -# CONFIG_FB_PM2 is not set -# CONFIG_FB_ARMCLCD is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -CONFIG_FB_UVESA=m -CONFIG_FB_EFI=y -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -CONFIG_FB_RADEON=y -CONFIG_FB_RADEON_I2C=y -CONFIG_FB_RADEON_BACKLIGHT=y -CONFIG_FB_RADEON_DEBUG=y -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -CONFIG_FB_VIRTUAL=y -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -CONFIG_FB_SIMPLE=y -# CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -CONFIG_LCD_CLASS_DEVICE=y -# CONFIG_LCD_L4F00242T03 is not set -# CONFIG_LCD_LMS283GF05 is not set -# CONFIG_LCD_LTV350QV is not set -# CONFIG_LCD_ILI922X is not set -# CONFIG_LCD_ILI9320 is not set -# CONFIG_LCD_TDO24M is not set -# CONFIG_LCD_VGG2432A4 is not set -# CONFIG_LCD_PLATFORM is not set -# CONFIG_LCD_AMS369FG06 is not set -# CONFIG_LCD_LMS501KF03 is not set -# CONFIG_LCD_HX8357 is not set -# CONFIG_LCD_OTM3225A is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_GENERIC=y -# CONFIG_BACKLIGHT_PM8941_WLED is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3639 is not set -# CONFIG_BACKLIGHT_GPIO is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -# end of Graphics support - -CONFIG_SOUND=m -CONFIG_SOUND_OSS_CORE=y -CONFIG_SOUND_OSS_CORE_PRECLAIM=y -CONFIG_SND=m -CONFIG_SND_TIMER=m -CONFIG_SND_PCM=m -CONFIG_SND_HWDEP=m -CONFIG_SND_RAWMIDI=m -CONFIG_SND_OSSEMUL=y -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_PCM_OSS_PLUGINS=y -CONFIG_SND_PCM_TIMER=y -# CONFIG_SND_HRTIMER is not set -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_MAX_CARDS=32 -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_PROC_FS=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -# CONFIG_SND_DEBUG is not set -CONFIG_SND_VMASTER=y -# CONFIG_SND_SEQUENCER is not set -CONFIG_SND_DRIVERS=y -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_ALOOP is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_MTS64 is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set -# CONFIG_SND_PORTMAN2X4 is not set -CONFIG_SND_PCI=y -# CONFIG_SND_AD1889 is not set -# CONFIG_SND_ATIIXP is not set -# CONFIG_SND_ATIIXP_MODEM is not set -# CONFIG_SND_AU8810 is not set -# CONFIG_SND_AU8820 is not set -# CONFIG_SND_AU8830 is not set -# CONFIG_SND_AW2 is not set -# CONFIG_SND_BT87X is not set -# CONFIG_SND_CA0106 is not set -# CONFIG_SND_CMIPCI is not set -# CONFIG_SND_OXYGEN is not set -# CONFIG_SND_CS4281 is not set -# CONFIG_SND_CS46XX is not set -# CONFIG_SND_CTXFI is not set -# CONFIG_SND_DARLA20 is not set -# CONFIG_SND_GINA20 is not set -# CONFIG_SND_LAYLA20 is not set -# CONFIG_SND_DARLA24 is not set -# CONFIG_SND_GINA24 is not set -# CONFIG_SND_LAYLA24 is not set -# CONFIG_SND_MONA is not set -# CONFIG_SND_MIA is not set -# CONFIG_SND_ECHO3G is not set -# CONFIG_SND_INDIGO is not set -# CONFIG_SND_INDIGOIO is not set -# CONFIG_SND_INDIGODJ is not set -# CONFIG_SND_INDIGOIOX is not set -# CONFIG_SND_INDIGODJX is not set -# CONFIG_SND_ENS1370 is not set -# CONFIG_SND_ENS1371 is not set -# CONFIG_SND_FM801 is not set -# CONFIG_SND_HDSP is not set -# CONFIG_SND_HDSPM is not set -# CONFIG_SND_ICE1724 is not set -# CONFIG_SND_INTEL8X0 is not set -# CONFIG_SND_INTEL8X0M is not set -# CONFIG_SND_KORG1212 is not set -# CONFIG_SND_LOLA is not set -# CONFIG_SND_LX6464ES is not set -# CONFIG_SND_MIXART is not set -# CONFIG_SND_NM256 is not set -# CONFIG_SND_PCXHR is not set -# CONFIG_SND_RIPTIDE is not set -# CONFIG_SND_RME32 is not set -# CONFIG_SND_RME96 is not set -# CONFIG_SND_RME9652 is not set -# CONFIG_SND_SE6X is not set -# CONFIG_SND_VIA82XX is not set -# CONFIG_SND_VIA82XX_MODEM is not set -# CONFIG_SND_VIRTUOSO is not set -# CONFIG_SND_VX222 is not set -# CONFIG_SND_YMFPCI is not set - -# -# HD-Audio -# -# CONFIG_SND_HDA_INTEL is not set -# end of HD-Audio - -CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_SPI=y -CONFIG_SND_USB=y -CONFIG_SND_USB_AUDIO=m -CONFIG_SND_USB_UA101=m -CONFIG_SND_USB_CAIAQ=m -CONFIG_SND_USB_CAIAQ_INPUT=y -CONFIG_SND_USB_6FIRE=m -CONFIG_SND_USB_HIFACE=m -CONFIG_SND_BCD2000=m -CONFIG_SND_USB_LINE6=m -CONFIG_SND_USB_POD=m -CONFIG_SND_USB_PODHD=m -CONFIG_SND_USB_TONEPORT=m -CONFIG_SND_USB_VARIAX=m -# CONFIG_SND_SOC is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -CONFIG_HIDRAW=y -CONFIG_UHID=y -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -# CONFIG_HID_A4TECH is not set -# CONFIG_HID_ACCUTOUCH is not set -# CONFIG_HID_ACRUX is not set -# CONFIG_HID_APPLE is not set -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_ASUS is not set -# CONFIG_HID_AUREAL is not set -# CONFIG_HID_BELKIN is not set -# CONFIG_HID_BETOP_FF is not set -# CONFIG_HID_BIGBEN_FF is not set -# CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set -# CONFIG_HID_CORSAIR is not set -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_PRODIKEYS is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -# CONFIG_HID_CYPRESS is not set -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELAN is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -# CONFIG_HID_EZKEY is not set -# CONFIG_HID_GEMBIRD is not set -# CONFIG_HID_GFRM is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_GT683R is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_ITE is not set -# CONFIG_HID_JABRA is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LED is not set -# CONFIG_HID_LENOVO is not set -# CONFIG_HID_LOGITECH is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_REDRAGON is not set -# CONFIG_HID_MICROSOFT is not set -# CONFIG_HID_MONTEREY is not set -CONFIG_HID_MULTITOUCH=m -# CONFIG_HID_NTI is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PLANTRONICS is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_RETRODE is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SONY is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEAM is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THINGM is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_WIIMOTE is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set -# CONFIG_HID_ALPS is not set -# end of Special HID drivers - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -# CONFIG_USB_HIDDEV is not set -# end of USB HID support - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set -# end of I2C HID support -# end of HID support - -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -# CONFIG_USB_LED_TRIG is not set -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_USB_CONN_GPIO is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -CONFIG_USB_DYNAMIC_MINORS=y -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set -CONFIG_USB_AUTOSUSPEND_DELAY=2 -# CONFIG_USB_MON is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_DBGCAP is not set -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_XHCI_PLATFORM=y -# CONFIG_USB_XHCI_HISTB is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -CONFIG_USB_EHCI_HCD_PLATFORM=y -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=y -CONFIG_USB_PRINTER=m -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=y -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -# CONFIG_USB_UAS is not set - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_CDNS3 is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -# CONFIG_USB_USS720 is not set -CONFIG_USB_SERIAL=y -# CONFIG_USB_SERIAL_CONSOLE is not set -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_SIMPLE=y -# CONFIG_USB_SERIAL_AIRCABLE is not set -# CONFIG_USB_SERIAL_ARK3116 is not set -# CONFIG_USB_SERIAL_BELKIN is not set -# CONFIG_USB_SERIAL_CH341 is not set -# CONFIG_USB_SERIAL_WHITEHEAT is not set -# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set -# CONFIG_USB_SERIAL_CP210X is not set -# CONFIG_USB_SERIAL_CYPRESS_M8 is not set -# CONFIG_USB_SERIAL_EMPEG is not set -# CONFIG_USB_SERIAL_FTDI_SIO is not set -# CONFIG_USB_SERIAL_VISOR is not set -# CONFIG_USB_SERIAL_IPAQ is not set -# CONFIG_USB_SERIAL_IR is not set -# CONFIG_USB_SERIAL_EDGEPORT is not set -# CONFIG_USB_SERIAL_EDGEPORT_TI is not set -# CONFIG_USB_SERIAL_F81232 is not set -# CONFIG_USB_SERIAL_F8153X is not set -# CONFIG_USB_SERIAL_GARMIN is not set -# CONFIG_USB_SERIAL_IPW is not set -# CONFIG_USB_SERIAL_IUU is not set -# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set -# CONFIG_USB_SERIAL_KEYSPAN is not set -# CONFIG_USB_SERIAL_KLSI is not set -# CONFIG_USB_SERIAL_KOBIL_SCT is not set -# CONFIG_USB_SERIAL_MCT_U232 is not set -# CONFIG_USB_SERIAL_METRO is not set -# CONFIG_USB_SERIAL_MOS7720 is not set -# CONFIG_USB_SERIAL_MOS7840 is not set -# CONFIG_USB_SERIAL_MXUPORT is not set -# CONFIG_USB_SERIAL_NAVMAN is not set -CONFIG_USB_SERIAL_PL2303=m -# CONFIG_USB_SERIAL_OTI6858 is not set -# CONFIG_USB_SERIAL_QCAUX is not set -# CONFIG_USB_SERIAL_QUALCOMM is not set -# CONFIG_USB_SERIAL_SPCP8X5 is not set -# CONFIG_USB_SERIAL_SAFE is not set -# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set -# CONFIG_USB_SERIAL_SYMBOL is not set -# CONFIG_USB_SERIAL_TI is not set -# CONFIG_USB_SERIAL_CYBERJACK is not set -# CONFIG_USB_SERIAL_XIRCOM is not set -# CONFIG_USB_SERIAL_OPTION is not set -# CONFIG_USB_SERIAL_OMNINET is not set -# CONFIG_USB_SERIAL_OPTICON is not set -# CONFIG_USB_SERIAL_XSENS_MT is not set -# CONFIG_USB_SERIAL_WISHBONE is not set -# CONFIG_USB_SERIAL_SSU100 is not set -# CONFIG_USB_SERIAL_QT2 is not set -# CONFIG_USB_SERIAL_UPD78F0730 is not set -# CONFIG_USB_SERIAL_DEBUG is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HUB_USB251XB is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set -# CONFIG_USB_ATM is not set - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_ULPI is not set -# end of USB Physical Layer drivers - -CONFIG_USB_GADGET=y -# CONFIG_USB_GADGET_DEBUG is not set -# CONFIG_USB_GADGET_DEBUG_FILES is not set -# CONFIG_USB_GADGET_DEBUG_FS is not set -CONFIG_USB_GADGET_VBUS_DRAW=2 -CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 - -# -# USB Peripheral Controller -# -# CONFIG_USB_FOTG210_UDC is not set -# CONFIG_USB_GR_UDC is not set -# CONFIG_USB_R8A66597 is not set -# CONFIG_USB_PXA27X is not set -# CONFIG_USB_MV_UDC is not set -# CONFIG_USB_MV_U3D is not set -# CONFIG_USB_SNP_UDC_PLAT is not set -# CONFIG_USB_M66592 is not set -# CONFIG_USB_BDC_UDC is not set -# CONFIG_USB_AMD5536UDC is not set -# CONFIG_USB_NET2272 is not set -# CONFIG_USB_NET2280 is not set -# CONFIG_USB_GOKU is not set -# CONFIG_USB_EG20T is not set -# CONFIG_USB_GADGET_XILINX is not set -# CONFIG_USB_DUMMY_HCD is not set -# end of USB Peripheral Controller - -CONFIG_USB_LIBCOMPOSITE=m -CONFIG_USB_U_ETHER=m -CONFIG_USB_F_ECM=m -CONFIG_USB_F_SUBSET=m -CONFIG_USB_F_RNDIS=m -# CONFIG_USB_CONFIGFS is not set -# CONFIG_USB_ZERO is not set -# CONFIG_USB_AUDIO is not set -CONFIG_USB_ETH=m -CONFIG_USB_ETH_RNDIS=y -# CONFIG_USB_ETH_EEM is not set -# CONFIG_USB_G_NCM is not set -# CONFIG_USB_GADGETFS is not set -# CONFIG_USB_FUNCTIONFS is not set -# CONFIG_USB_MASS_STORAGE is not set -# CONFIG_USB_GADGET_TARGET is not set -# CONFIG_USB_G_SERIAL is not set -# CONFIG_USB_MIDI_GADGET is not set -# CONFIG_USB_G_PRINTER is not set -# CONFIG_USB_CDC_COMPOSITE is not set -# CONFIG_USB_G_NOKIA is not set -# CONFIG_USB_G_ACM_MS is not set -# CONFIG_USB_G_MULTI is not set -# CONFIG_USB_G_HID is not set -# CONFIG_USB_G_DBGP is not set -# CONFIG_TYPEC is not set -# CONFIG_USB_ROLE_SWITCH is not set -# CONFIG_MMC is not set -# CONFIG_MEMSTICK is not set -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -# CONFIG_LEDS_CLASS_FLASH is not set -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_AN30259A is not set -# CONFIG_LEDS_BCM6328 is not set -# CONFIG_LEDS_BCM6358 is not set -# CONFIG_LEDS_CR0014114 is not set -# CONFIG_LEDS_LM3530 is not set -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_LM3692X is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -# CONFIG_LEDS_LP3944 is not set -# CONFIG_LEDS_LP3952 is not set -# CONFIG_LEDS_LP5521 is not set -# CONFIG_LEDS_LP5523 is not set -# CONFIG_LEDS_LP5562 is not set -# CONFIG_LEDS_LP8501 is not set -# CONFIG_LEDS_LP8860 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_REGULATOR is not set -# CONFIG_LEDS_BD2802 is not set -# CONFIG_LEDS_LT3593 is not set -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_IS31FL319X is not set -# CONFIG_LEDS_IS31FL32XX is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -# CONFIG_LEDS_BLINKM is not set -# CONFIG_LEDS_SYSCON is not set -# CONFIG_LEDS_MLXREG is not set -# CONFIG_LEDS_USER is not set -# CONFIG_LEDS_SPI_BYTE is not set -# CONFIG_LEDS_TI_LMU_COMMON is not set - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -# CONFIG_LEDS_TRIGGER_TIMER is not set -# CONFIG_LEDS_TRIGGER_ONESHOT is not set -# CONFIG_LEDS_TRIGGER_DISK is not set -# CONFIG_LEDS_TRIGGER_MTD is not set -# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set -# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_ACTIVITY is not set -# CONFIG_LEDS_TRIGGER_GPIO is not set -# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set - -# -# iptables trigger is under Netfilter config (LED target) -# -# CONFIG_LEDS_TRIGGER_TRANSIENT is not set -# CONFIG_LEDS_TRIGGER_CAMERA is not set -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_LEDS_TRIGGER_NETDEV is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -# CONFIG_LEDS_TRIGGER_AUDIO is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -# CONFIG_INFINIBAND_USER_MAD is not set -# CONFIG_INFINIBAND_USER_ACCESS is not set -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_INFINIBAND_I40IW is not set -# CONFIG_MLX4_INFINIBAND is not set -# CONFIG_INFINIBAND_OCRDMA is not set -CONFIG_INFINIBAND_HNS=m -# CONFIG_INFINIBAND_HNS_HIP06 is not set -# CONFIG_INFINIBAND_HNS_HIP08 is not set -# CONFIG_INFINIBAND_BNXT_RE is not set -# CONFIG_RDMA_RXE is not set -# CONFIG_RDMA_SIW is not set -CONFIG_INFINIBAND_IPOIB=m -# CONFIG_INFINIBAND_IPOIB_CM is not set -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -# CONFIG_INFINIBAND_SRP is not set -# CONFIG_INFINIBAND_SRPT is not set -# CONFIG_INFINIBAND_ISER is not set -# CONFIG_INFINIBAND_ISERT is not set -CONFIG_EDAC_SUPPORT=y -# CONFIG_EDAC is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABEOZ9 is not set -# CONFIG_RTC_DRV_ABX80X is not set -CONFIG_RTC_DRV_DS1307=y -# CONFIG_RTC_DRV_DS1307_CENTURY is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_HYM8563 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_ISL12026 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set -CONFIG_RTC_DRV_PCF8563=y -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8010 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1302 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6916 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_RX6110 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -# CONFIG_RTC_DRV_DS3232 is not set -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# Platform RTC drivers -# -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -CONFIG_RTC_DRV_EFI=y -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_PL030 is not set -CONFIG_RTC_DRV_PL031=y -# CONFIG_RTC_DRV_CADENCE is not set -# CONFIG_RTC_DRV_FTRTC010 is not set -# CONFIG_RTC_DRV_SNVS is not set -# CONFIG_RTC_DRV_R7301 is not set - -# -# HID Sensor RTC drivers -# -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_ACPI=y -CONFIG_DMA_OF=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_DW_AXI_DMAC is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_FSL_QDMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_K3_DMA is not set -# CONFIG_MV_XOR_V2 is not set -# CONFIG_PL330_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_ZYNQMP_DMA is not set -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set -# CONFIG_DW_EDMA is not set -# CONFIG_DW_EDMA_PCIE is not set - -# -# DMA Clients -# -# CONFIG_ASYNC_TX_DMA is not set -# CONFIG_DMATEST is not set - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_SELFTESTS is not set -# end of DMABUF options - -# CONFIG_AUXDISPLAY is not set -# CONFIG_PANEL is not set -CONFIG_UIO=m -# CONFIG_UIO_CIF is not set -# CONFIG_UIO_PDRV_GENIRQ is not set -# CONFIG_UIO_DMEM_GENIRQ is not set -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_VIRQFD=m -CONFIG_VFIO=m -CONFIG_VFIO_NOIOMMU=y -CONFIG_VFIO_PCI=m -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PLATFORM=m -CONFIG_VFIO_AMBA=m -CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET=m -CONFIG_VFIO_PLATFORM_AMDXGBE_RESET=m -CONFIG_VFIO_MDEV=m -CONFIG_VFIO_MDEV_DEVICE=m -CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO=y -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=y -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y - -# -# Microsoft Hyper-V guest support -# -# end of Microsoft Hyper-V guest support - -# CONFIG_GREYBUS is not set -CONFIG_STAGING=y -# CONFIG_COMEDI is not set -# CONFIG_RTS5208 is not set -# CONFIG_FB_SM750 is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -# end of Speakup console speech - -# CONFIG_STAGING_MEDIA is not set - -# -# Android -# -# end of Android - -# CONFIG_STAGING_BOARD is not set -# CONFIG_LTE_GDM724X is not set -# CONFIG_GS_FPGABOOT is not set -# CONFIG_UNISYSSPAR is not set -# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set -# CONFIG_FB_TFT is not set -# CONFIG_MOST is not set -# CONFIG_PI433 is not set - -# -# Gasket devices -# -# CONFIG_STAGING_GASKET_FRAMEWORK is not set -# end of Gasket devices - -# CONFIG_XIL_AXIS_FIFO is not set -# CONFIG_FIELDBUS_DEV is not set -# CONFIG_KPC2000 is not set - -# -# ISDN CAPI drivers -# -# CONFIG_CAPI_AVM is not set -# CONFIG_ISDN_DRV_GIGASET is not set -# CONFIG_HYSDN is not set -# end of ISDN CAPI drivers - -# CONFIG_USB_WUSB_CBAF is not set -# CONFIG_UWB is not set -# CONFIG_EXFAT_FS is not set -# CONFIG_QLGE is not set -# CONFIG_GOLDFISH is not set -# CONFIG_MFD_CROS_EC is not set -# CONFIG_CHROME_PLATFORMS is not set -# CONFIG_MELLANOX_PLATFORM is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Common Clock Framework -# -CONFIG_COMMON_CLK_VERSATILE=y -CONFIG_CLK_SP810=y -# CONFIG_CLK_HSDK is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI514 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_SI570 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CDCE925 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_CLK_QORIQ is not set -CONFIG_COMMON_CLK_XGENE=y -# CONFIG_COMMON_CLK_VC5 is not set -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -CONFIG_COMMON_CLK_HI3516CV300=y -CONFIG_COMMON_CLK_HI3519=y -CONFIG_COMMON_CLK_HI3660=y -CONFIG_COMMON_CLK_HI3670=y -CONFIG_COMMON_CLK_HI3798CV200=y -CONFIG_COMMON_CLK_HI6220=y -CONFIG_RESET_HISI=y -# end of Common Clock Framework - -# CONFIG_HWSPINLOCK is not set - -# -# Clock Source drivers -# -CONFIG_TIMER_OF=y -CONFIG_TIMER_ACPI=y -CONFIG_TIMER_PROBE=y -CONFIG_CLKSRC_MMIO=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y -CONFIG_FSL_ERRATUM_A008585=y -CONFIG_HISILICON_ERRATUM_161010101=y -CONFIG_ARM64_ERRATUM_858921=y -CONFIG_ARM_TIMER_SP804=y -# end of Clock Source drivers - -# CONFIG_MAILBOX is not set -CONFIG_IOMMU_IOVA=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IO_PGTABLE=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set -CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y -# CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST is not set -# end of Generic IOMMU Pagetable Support - -# CONFIG_IOMMU_DEBUGFS is not set -# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set -CONFIG_OF_IOMMU=y -CONFIG_IOMMU_DMA=y -CONFIG_ARM_SMMU=y -CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y -CONFIG_ARM_SMMU_V3=y -CONFIG_VIRTIO_IOMMU=y -CONFIG_SMMU_BYPASS_DEV=y - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Aspeed SoC drivers -# -# end of Aspeed SoC drivers - -# -# Broadcom SoC drivers -# -# CONFIG_SOC_BRCMSTB is not set -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# end of NXP/Freescale QorIQ SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Qualcomm SoC drivers -# -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# CONFIG_XILINX_VCU is not set -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - -# CONFIG_PM_DEVFREQ is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -# CONFIG_PWM is not set - -# -# IRQ chip support -# -CONFIG_IRQCHIP=y -CONFIG_ARM_GIC=y -CONFIG_ARM_GIC_MAX_NR=1 -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y -CONFIG_ARM_GIC_V3_ITS_PCI=y -# CONFIG_AL_FIC is not set -CONFIG_HISILICON_IRQ_MBIGEN=y -CONFIG_PARTITION_PERCPU=y -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_TI_SYSCON is not set -CONFIG_COMMON_RESET_HI3660=y -CONFIG_COMMON_RESET_HI6220=y - -# -# PHY Subsystem -# -# CONFIG_GENERIC_PHY is not set -# CONFIG_PHY_XGENE is not set -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_CADENCE_DP is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_FSL_IMX8MQ_USB is not set -# CONFIG_PHY_MIXEL_MIPI_DPHY is not set -# CONFIG_PHY_HI6220_USB is not set -# CONFIG_PHY_HI3660_USB is not set -# CONFIG_PHY_HISTB_COMBPHY is not set -# CONFIG_PHY_HISI_INNO_USB2 is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_MAPPHONE_MDM6600 is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# end of PHY Subsystem - -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# CONFIG_ARM_CCI_PMU is not set -# CONFIG_ARM_CCN is not set -CONFIG_ARM_PMU=y -CONFIG_ARM_PMU_ACPI=y -# CONFIG_ARM_SMMU_V3_PMU is not set -# CONFIG_ARM_DSU_PMU is not set -# CONFIG_HISI_PMU is not set -# CONFIG_ARM_SPE_PMU is not set -# end of Performance monitor support - -CONFIG_RAS=y - -# -# Android -# -# CONFIG_ANDROID is not set -# end of Android - -# CONFIG_LIBNVDIMM is not set -CONFIG_DAX=y -# CONFIG_DEV_DAX is not set -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# HW tracing support -# -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -# CONFIG_FSI is not set -# CONFIG_TEE is not set -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -CONFIG_REISERFS_FS=m -# CONFIG_REISERFS_CHECK is not set -# CONFIG_REISERFS_PROC_INFO is not set -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -# CONFIG_JFS_DEBUG is not set -CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -# CONFIG_XFS_ONLINE_SCRUB is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y -CONFIG_OCFS2_FS=m -CONFIG_OCFS2_FS_O2CB=m -CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m -CONFIG_OCFS2_FS_STATS=y -CONFIG_OCFS2_DEBUG_MASKLOG=y -# CONFIG_OCFS2_DEBUG_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -CONFIG_NILFS2_FS=m -CONFIG_F2FS_FS=m -CONFIG_F2FS_STAT_FS=y -CONFIG_F2FS_FS_XATTR=y -CONFIG_F2FS_FS_POSIX_ACL=y -CONFIG_F2FS_FS_SECURITY=y -# CONFIG_F2FS_CHECK_FS is not set -# CONFIG_F2FS_IO_TRACE is not set -# CONFIG_F2FS_FAULT_INJECTION is not set -CONFIG_FS_DAX=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set -CONFIG_FILE_LOCKING=y -CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=y -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=m -CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m -CONFIG_QUOTACTL=y -CONFIG_AUTOFS4_FS=m -CONFIG_AUTOFS_FS=m -CONFIG_FUSE_FS=y -CONFIG_CUSE=m -# CONFIG_VIRTIO_FS is not set -CONFIG_OVERLAY_FS=m -# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -# CONFIG_OVERLAY_FS_INDEX is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set - -# -# Caches -# -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_FAT_DEFAULT_UTF8 is not set -CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -CONFIG_NTFS_RW=y -# end of DOS/FAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_MEMFD_CREATE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=m -CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -CONFIG_ADFS_FS=m -# CONFIG_ADFS_FS_RW is not set -CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_BEFS_FS=m -# CONFIG_BEFS_DEBUG is not set -CONFIG_BFS_FS=m -CONFIG_EFS_FS=m -CONFIG_JFFS2_FS=m -CONFIG_JFFS2_FS_DEBUG=0 -CONFIG_JFFS2_FS_WRITEBUFFER=y -# CONFIG_JFFS2_FS_WBUF_VERIFY is not set -# CONFIG_JFFS2_SUMMARY is not set -CONFIG_JFFS2_FS_XATTR=y -CONFIG_JFFS2_FS_POSIX_ACL=y -CONFIG_JFFS2_FS_SECURITY=y -CONFIG_JFFS2_COMPRESSION_OPTIONS=y -CONFIG_JFFS2_ZLIB=y -CONFIG_JFFS2_LZO=y -CONFIG_JFFS2_RTIME=y -# CONFIG_JFFS2_RUBIN is not set -# CONFIG_JFFS2_CMODE_NONE is not set -# CONFIG_JFFS2_CMODE_PRIORITY is not set -# CONFIG_JFFS2_CMODE_SIZE is not set -CONFIG_JFFS2_CMODE_FAVOURLZO=y -CONFIG_UBIFS_FS=m -# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set -CONFIG_UBIFS_FS_LZO=y -CONFIG_UBIFS_FS_ZLIB=y -CONFIG_UBIFS_FS_ZSTD=y -CONFIG_UBIFS_ATIME_SUPPORT=y -CONFIG_UBIFS_FS_XATTR=y -CONFIG_UBIFS_FS_SECURITY=y -# CONFIG_UBIFS_FS_AUTHENTICATION is not set -CONFIG_CRAMFS=m -CONFIG_CRAMFS_BLOCKDEV=y -# CONFIG_CRAMFS_MTD is not set -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -# CONFIG_SQUASHFS_DECOMP_SINGLE is not set -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -CONFIG_VXFS_FS=m -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m -CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m -# CONFIG_QNX6FS_DEBUG is not set -CONFIG_ROMFS_FS=m -CONFIG_ROMFS_BACKED_BY_BLOCK=y -# CONFIG_ROMFS_BACKED_BY_MTD is not set -# CONFIG_ROMFS_BACKED_BY_BOTH is not set -CONFIG_ROMFS_ON_BLOCK=y -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFLATE_COMPRESS=y -# CONFIG_PSTORE_LZO_COMPRESS is not set -# CONFIG_PSTORE_LZ4_COMPRESS is not set -# CONFIG_PSTORE_LZ4HC_COMPRESS is not set -# CONFIG_PSTORE_842_COMPRESS is not set -# CONFIG_PSTORE_ZSTD_COMPRESS is not set -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y -CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" -# CONFIG_PSTORE_CONSOLE is not set -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=m -CONFIG_SYSV_FS=m -CONFIG_UFS_FS=m -CONFIG_UFS_FS_WRITE=y -# CONFIG_UFS_DEBUG is not set -# CONFIG_EROFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V2=m -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -CONFIG_NFS_SWAP=y -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -CONFIG_NFS_V4_1_MIGRATION=y -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -# CONFIG_NFSD_SCSILAYOUT is not set -# CONFIG_NFSD_FLEXFILELAYOUT is not set -CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_SUNRPC_SWAP=y -CONFIG_RPCSEC_GSS_KRB5=m -# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -# CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=m -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SMB_DIRECT is not set -CONFIG_CIFS_FSCACHE=y -CONFIG_CODA_FS=m -CONFIG_AFS_FS=m -# CONFIG_AFS_DEBUG is not set -CONFIG_AFS_FSCACHE=y -# CONFIG_AFS_DEBUG_CURSOR is not set -CONFIG_9P_FS=m -CONFIG_9P_FSCACHE=y -CONFIG_9P_FS_POSIX_ACL=y -CONFIG_9P_FS_SECURITY=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -# CONFIG_DLM_DEBUG is not set -# CONFIG_UNICODE is not set -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_KEYS_COMPAT=y -# CONFIG_KEYS_REQUEST_CACHE is not set -# CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_BIG_KEYS is not set -# CONFIG_TRUSTED_KEYS is not set -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITY_WRITABLE_HOOKS=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -# CONFIG_SECURITY_INFINIBAND is not set -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_LSM_MMAP_MIN_ADDR=32768 -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -# CONFIG_HARDENED_USERCOPY is not set -# CONFIG_FORTIFY_SOURCE is not set -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -CONFIG_SECURITY_APPARMOR=y -CONFIG_SECURITY_APPARMOR_HASH=y -CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y -# CONFIG_SECURITY_APPARMOR_DEBUG is not set -# CONFIG_SECURITY_LOADPIN is not set -# CONFIG_SECURITY_YAMA is not set -# CONFIG_SECURITY_SAFESETID is not set -# CONFIG_SECURITY_LOCKDOWN_LSM is not set -CONFIG_INTEGRITY=y -# CONFIG_INTEGRITY_SIGNATURE is not set -CONFIG_INTEGRITY_AUDIT=y -CONFIG_IMA=y -CONFIG_IMA_MEASURE_PCR_IDX=10 -CONFIG_IMA_LSM_RULES=y -# CONFIG_IMA_TEMPLATE is not set -CONFIG_IMA_NG_TEMPLATE=y -# CONFIG_IMA_SIG_TEMPLATE is not set -CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" -CONFIG_IMA_DEFAULT_HASH_SHA1=y -# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -CONFIG_IMA_DEFAULT_HASH="sha1" -# CONFIG_IMA_WRITE_POLICY is not set -# CONFIG_IMA_READ_POLICY is not set -CONFIG_IMA_APPRAISE=y -CONFIG_IMA_APPRAISE_BOOTPARAM=y -# CONFIG_EVM is not set -# CONFIG_DEFAULT_SECURITY_SELINUX is not set -# CONFIG_DEFAULT_SECURITY_APPARMOR is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -# end of Memory initialization -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_SIMD=m -CONFIG_CRYPTO_ENGINE=m - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -# CONFIG_CRYPTO_DH is not set -# CONFIG_CRYPTO_ECDH is not set -# CONFIG_CRYPTO_ECRDSA is not set - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_CHACHA20POLY1305=m -# CONFIG_CRYPTO_AEGIS128 is not set -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=m - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_CFB is not set -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -# CONFIG_CRYPTO_OFB is not set -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -CONFIG_CRYPTO_KEYWRAP=m -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_ESSIV=m - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -# CONFIG_CRYPTO_XXHASH is not set -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_GHASH=m -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_LIB_SHA256=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_SHA3 is not set -# CONFIG_CRYPTO_SM3 is not set -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m - -# -# Ciphers -# -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -# CONFIG_CRYPTO_SM4 is not set -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=m -CONFIG_CRYPTO_842=m -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=m - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_USER_API=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m -# CONFIG_CRYPTO_STATS is not set -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set -CONFIG_CRYPTO_DEV_VIRTIO=m -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_CCREE is not set -# CONFIG_CRYPTO_DEV_HISI_SEC is not set -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y - -# -# Certificates for signature checking -# -# CONFIG_SYSTEM_TRUSTED_KEYRING is not set -# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_RAID6_PQ_BENCHMARK=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_CORDIC=m -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -# CONFIG_INDIRECT_PIO is not set -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=m -# CONFIG_CRC4 is not set -CONFIG_CRC7=m -CONFIG_LIBCRC32C=y -CONFIG_CRC8=m -CONFIG_XXHASH=y -CONFIG_AUDIT_GENERIC=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -CONFIG_AUDIT_COMPAT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_842_COMPRESS=m -CONFIG_842_DECOMPRESS=m -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=m -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=m -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -CONFIG_XZ_DEC_TEST=m -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=m -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DECLARE_COHERENT=y -CONFIG_ARCH_HAS_SETUP_DMA_OPS=y -CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y -CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y -CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y -CONFIG_SWIOTLB=y -CONFIG_DMA_REMAP=y -CONFIG_DMA_DIRECT_REMAP=y -# CONFIG_DMA_API_DEBUG is not set -CONFIG_SGL_ALLOC=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_DIMLIB=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_FONT_SUPPORT=y -CONFIG_FONTS=y -# CONFIG_FONT_8x8 is not set -CONFIG_FONT_8x16=y -# CONFIG_FONT_6x11 is not set -# CONFIG_FONT_7x14 is not set -# CONFIG_FONT_PEARL_8x8 is not set -# CONFIG_FONT_ACORN_8x8 is not set -# CONFIG_FONT_MINI_4x6 is not set -# CONFIG_FONT_6x10 is not set -# CONFIG_FONT_10x18 is not set -# CONFIG_FONT_SUN8x16 is not set -# CONFIG_FONT_SUN12x22 is not set -# CONFIG_FONT_TER16x32 is not set -CONFIG_FONT_AUTOSELECT=y -CONFIG_SG_POOL=y -CONFIG_SBITMAP=y -# CONFIG_STRING_SELFTEST is not set -# end of Library routines - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y -# end of printk and dmesg options - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -CONFIG_DEBUG_INFO_DWARF4=y -# CONFIG_DEBUG_INFO_BTF is not set -CONFIG_GDB_SCRIPTS=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=2048 -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_READABLE_ASM is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_INSTALL is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -# CONFIG_DEBUG_MEMORY_INIT is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y -CONFIG_CC_HAS_KASAN_GENERIC=y -# CONFIG_KASAN is not set -CONFIG_KASAN_STACK=1 -# end of Memory Debugging - -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -# CONFIG_SOFTLOCKUP_DETECTOR is not set -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -# CONFIG_WQ_WATCHDOG is not set -# end of Debug Lockups and Hangs - -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_TIMEKEEPING is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_HAVE_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_RCU_PERF_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -CONFIG_NOTIFIER_ERROR_INJECTION=m -CONFIG_PM_NOTIFIER_ERROR_INJECT=m -# CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT is not set -# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -CONFIG_SCHED_TRACER=y -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_FUNCTION_PROFILER=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -CONFIG_RBTREE_TEST=m -# CONFIG_REED_SOLOMON_TEST is not set -CONFIG_INTERVAL_TREE_TEST=m -CONFIG_PERCPU_TEST=m -# CONFIG_ATOMIC64_SELFTEST is not set -CONFIG_ASYNC_RAID6_TEST=m -CONFIG_TEST_HEXDUMP=m -CONFIG_TEST_STRING_HELPERS=m -# CONFIG_TEST_STRSCPY is not set -CONFIG_TEST_KSTRTOX=m -CONFIG_TEST_PRINTF=m -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_BITFIELD is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_XARRAY is not set -# CONFIG_TEST_OVERFLOW is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_IDA is not set -CONFIG_TEST_LKM=m -# CONFIG_TEST_VMALLOC is not set -CONFIG_TEST_USER_COPY=m -CONFIG_TEST_BPF=m -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -CONFIG_TEST_FIRMWARE=m -# CONFIG_TEST_SYSCTL is not set -CONFIG_TEST_UDELAY=m -CONFIG_TEST_STATIC_KEYS=m -# CONFIG_TEST_KMOD is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_STACKINIT is not set -# CONFIG_TEST_MEMINIT is not set -CONFIG_MEMTEST=y -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_KGDB=y -CONFIG_KGDB_SERIAL_CONSOLE=y -# CONFIG_KGDB_TESTS is not set -CONFIG_KGDB_KDB=y -CONFIG_KDB_DEFAULT_ENABLE=0x1 -CONFIG_KDB_KEYBOARD=y -CONFIG_KDB_CONTINUE_CATASTROPHIC=0 -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_UBSAN_ALIGNMENT=y -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set -# CONFIG_ARM64_PTDUMP_DEBUGFS is not set -CONFIG_PID_IN_CONTEXTIDR=y -# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_WX is not set -# CONFIG_DEBUG_ALIGN_RODATA is not set -# CONFIG_DEBUG_EFI is not set -# CONFIG_ARM64_RELOC_TEST is not set -# CONFIG_CORESIGHT is not set -# end of Kernel hacking diff --git a/package/arm/config.default_kasan b/package/arm/config.default_kasan index 0fc5e7ca2f00..4e92865c440a 100644 --- a/package/arm/config.default_kasan +++ b/package/arm/config.default_kasan @@ -2022,7 +2022,7 @@ CONFIG_DRBD_FAULT_INJECTION=y CONFIG_BLK_DEV_NBD=m # CONFIG_BLK_DEV_SKD is not set # CONFIG_BLK_DEV_SX8 is not set -CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 CONFIG_CDROM_PKTCDVD=m @@ -5832,7 +5832,7 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_INFO_REDUCED is not set # CONFIG_DEBUG_INFO_SPLIT is not set CONFIG_DEBUG_INFO_DWARF4=y -# CONFIG_DEBUG_INFO_BTF is not set +CONFIG_DEBUG_INFO_BTF=y CONFIG_GDB_SCRIPTS=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=2048 diff --git a/package/arm/generate-rpms.sh b/package/arm/generate-rpms.sh deleted file mode 100755 index a5096afb579e..000000000000 --- a/package/arm/generate-rpms.sh +++ /dev/null @@ -1,345 +0,0 @@ -#!/bin/bash - -# ============================================================================== -# Description: Automaticly generating kernel rpm package with specified branch -# according to HEAD tag. -# -# Author: David Shan <davidshan@tencent.com> -# =============================================================================== - -#variables -kernel_full_name="" -kernel_version="" -tlinux_branch="" -tlinux_release="" -extra_version="" -curdir=`pwd` -build_specdir="/usr/src/packages/SPECS" -build_srcdir="/usr/src/packages/SOURCES" -kernel_type="default" -spec_file_name="" -make_jobs="" -nosign_suffix="" -build_src_only=0 -strip_sign_token=0 -nosign_suffix="" -raw_tagged_name="" -# to control wheter to build rpm for xen domU -isXenU="" -build_perf="" -kernel_default_types=(default) - -#funcitons -usage() -{ - echo "generate-rpms [-t \$tag_name ] [-j \$jobs ][-h]" - echo " -t tag_name" - echo " Tagged name in the git tree." - echo " -j jobs" - echo " Specifies the number of jobs (threads) to run make." - echo " -p" - echo " build perf rpm only." - - echo -e "\n" - echo " Note:By default, we only generate latest tag kernel for standard kernels" - echo " without -t" - echo " So, if you hope to get certain tag kernel, please use -t." -} - -get_kernel_version() -{ - local makefile="$1/Makefile" - - if [ ! -e $makefile ]; then - echo "Error: No Makefile found." - exit 1 - fi - - kernel_version+=`grep "^VERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - kernel_version+="." - kernel_version+=`grep "^PATCHLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - kernel_version+="." - kernel_version+=`grep "^SUBLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - #kernel_version+=`grep "^EXTRAVERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - - echo "kernel version: $kernel_version" -} - -get_tlinux_name() -{ - if [ -n "$tag_name" ]; then - raw_tagged_name="$tag_name" - else - raw_tagged_name=`git describe --tags` - fi - - if [ -z "${raw_tagged_name}" ];then - echo "Error:Can't get kernel version from git tree." - exit 1 - fi - - if [[ $raw_tagged_name != public-arm64-* ]]; then - echo "$raw_tagged_name not valid tag name for public-arm64" - exit 1 - fi - - tagged_name=${raw_tagged_name#public-arm64-} - - echo "${tagged_name}" | grep 'kvm_guest' - if [ $? -eq 0 ]; then - echo "start kvm guest build" - kvm_guest="yes" - fi - - echo "${tagged_name}" | grep 'xen_guest' - if [ $? -eq 0 ]; then - echo "start xen guest build" - xen_guest="yes" - fi - - echo "${tagged_name}" | grep 'ec' - if [ $? -eq 0 ]; then - echo "start ECkernel build" - ec="yes" - fi - - echo "${tagged_name}" | grep 'kasan' - if [ $? -eq 0 ]; then - nosign_suffix="_kasan" - fi - - - #if [ "${tagged_name#*-*-*}" != ${tagged_name} ];then - #echo "Error: bad tag name:$tagged_name." - #exit 1 - #fi - - if [ "$build_perf" = "yes" ]; then - rpm_name="perf" - else - rpm_name="kernel" - fi - #tlinux_release=${tagged_name#*-} - tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` - extra_version=`echo $tagged_name | cut -d- -f2` - kernel_full_name=${rpm_name}-${kernel_version} - - echo "$tlinux_release" - echo "$kernel_full_name" - echo "extra_version $extra_version" - -} - -prepare_tlinux_spec() -{ - local sign_token - spec_file_name=${build_specdir}/${kernel_full_name}.spec - spec_contents="%define name ${rpm_name} \n" - spec_contents+="%define version ${kernel_version} \n" - spec_contents+="%define release_os ${tlinux_release} \n" - spec_contents+="%define tagged_name ${tagged_name} \n" - spec_contents+="%define extra_version ${extra_version} \n" - sign_token= - if [ "$sign_token" = "" ]; then - sign_token=0 - fi - spec_contents+="%define sign_token ${sign_token} \n" - - - if [ -z "${kernel_type}" ];then - spec_contents+="%define kernel_all_types ${kernel_default_types[@]} \n" - else - spec_contents+="%define kernel_all_types ${kernel_type} \n" - fi - - if [ -n "${make_jobs}" ];then - spec_contents+="%define jobs ${make_jobs} \n\n" - fi - - if [ -e /etc/tlinux-release ];then - spec_contents+="%define debug_package %{nil}\n" - spec_contents+="%define __os_install_post %{nil}\n" - - fi - - if [[ $isXenU == "32" && `uname -m` == "x86_64" ]];then - spec_contents+="%define make_flag \"ARCH=i386\" \n" - fi - - echo -e "${spec_contents}" > $spec_file_name - - if [ "$build_perf" = "yes" ]; then - cat $curdir/perf-tlinux.spec >> $spec_file_name - else - cat $curdir/kernel-tlinux.spec >> $spec_file_name - fi -} - -#main function - -#Parse Parameters -while getopts ":t:j:hdp" option "$@" -do - case $option in - "t" ) - tag_name=$OPTARG - ;; - "j" ) - make_jobs=$OPTARG - ;; - "h" ) - usage - exit 0 - ;; - "p" ) - build_perf="yes" - ;; - "?" ) - usage - exit 1 - ;; - esac -done - - -#test if parameter of kernel type is valid -if [ -n "${kernel_type}" ] && [ ! -e config."${kernel_type}" ] -then - echo "Error: bad kernel type name. Config file for ${kernel_type} does not exist." - usage - exit 1 -elif [ -n "${kernel_type}" ] && [ -n "$isXenU" ] && [ ! -e config."${kernel_type}".xenU ] -then - echo "Error: bad kernel type name. Config file for ${kernel_type}.xenU does not exist." - usage - exit 1 - -fi - - -if [ -e /etc/SuSE-release ];then - build_specdir="/usr/src/packages/SPECS" - build_srcdir="/usr/src/packages/SOURCES" -elif [ -e /etc/tlinux-release ];then - build_specdir="$HOME/rpmbuild/SPECS" - build_srcdir="$HOME/rpmbuild/SOURCES" -else - echo "Error: Compile on unknown system." - exit 1 -fi - - -if [ ! -e "${build_srcdir}" ]; then - mkdir $build_srcdir - [ $? == 1 ] && exit 1 -fi - -if [ ! -e "${build_specdir}" ]; then - mkdir $build_specdir - [ $? == 1 ] && exit 1 -fi - -cd ../../ -origsrc=`pwd` - -#if [ -n "$tag_name" ]; then -# local_valid_branch=`git branch | grep "*" | awk '{print $2 }'` -# git checkout "${tag_name}" -# if [ $? -ne 0 ];then -# echo "Error: bad tag name [$tag_name] " -# exit 1 -# fi -#fi - - -get_kernel_version $origsrc -get_tlinux_name -echo "Prepare $kernel_full_name source." -prepare_tlinux_spec - -cp package/default/tlinux_cciss_link.modules $build_srcdir -if [ "$kvm_guest" = "yes" ] ; then - cp package/default/kvm_guest/*.patch $build_srcdir -fi - -if [ "$ec" = "yes" ] ; then - cp package/default/ec/*.patch $build_srcdir -fi - -if test -e ${build_srcdir}/${kernel_full_name}; then - rm -fr ${build_srcdir}/${kernel_full_name} -fi - -#tagged_name is a confirmed tag name and will be used for final source. -git archive --format=tar --prefix=${kernel_full_name}/ ${raw_tagged_name} | (cd ${build_srcdir} && tar xf -) -if [ $? -ne 0 ];then - echo "Error:can't prepare $kernel_full_name source with git archive!" - exit 1 -fi - -cd ${build_srcdir}/${kernel_full_name} - -if [[ $isXenU == "32" ]] -then - config_suffix="xenU32" - target="--target i686" -elif [[ $isXenU == "64" ]] -then - config_suffix="xenU" -fi -echo $config_suffix - -if [ -z "${kernel_type}" ];then - for type_name in "${kernel_default_types[@]}" - do - if [ -n "$isXenU" ]; then - echo mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name{nosign_suffix} - mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$type_name${nosign_suffix} - fi - done -else - if [ -n "$isXenU" ]; then - echo mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} - mv -f ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type.$config_suffix${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} - - elif [ "$kvm_guest" = "yes" ] ; then - echo \ - cp -f ${build_srcdir}/${kernel_full_name}/package/default/kvm_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} - cp -f ${build_srcdir}/${kernel_full_name}/package/default/kvm_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} - elif [ "$xen_guest" = "yes" ] ; then - echo \ - cp -f ${build_srcdir}/${kernel_full_name}/package/default/xen_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} - cp -f ${build_srcdir}/${kernel_full_name}/package/default/xen_guest/config.${kernel_type}${config_suffix}${nosign_suffix} ${build_srcdir}/${kernel_full_name}/package/default/config.$kernel_type${nosign_suffix} - fi -fi - -cd .. -tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} -rm -fr ${kernel_full_name} - -#if [ -n "$tag_name" ];then -# cd $origsrc -# git checkout $local_valid_branch > /dev/null -#fi - - -echo "Build kernel rpm package." -if [ $build_src_only -eq 0 ]; then - rpmbuild -bb ${target} ${spec_file_name} - sed -i 's/^%define sign_token.*/%define sign_token please_enter_your_token_here/' ${spec_file_name} -else - if [ $strip_sign_token -ne 0 ]; then - sed -i 's/^%define sign_token.*/%define sign_token please_enter_your_token_here/' ${spec_file_name} - fi -fi - -echo "Build kernel source rpm package." - -cd ${build_srcdir} - -tar -zxf ${kernel_full_name}.tar.gz - -rm -fr ${kernel_full_name}/package/default/generate-rpms.sh -tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} - -rpmbuild -bs ${target} ${spec_file_name} diff --git a/package/arm/kernel-tlinux.spec b/package/arm/kernel-tlinux.spec deleted file mode 100644 index aa506255f210..000000000000 --- a/package/arm/kernel-tlinux.spec +++ /dev/null @@ -1,551 +0,0 @@ -%global with_debuginfo 0 -%global with_perf 1 -%if 0%{?rhel} == 6 -%global rdist .tl1 -%global debug_path /usr/lib/debug/lib/ -%else -%global debug_path /usr/lib/debug/usr/lib/ -%if 0%{?rhel} == 7 -%global rdist .tl2 -%endif -%if 0%{?rhel} == 8 -%global rdist .tl3 -%global __python /usr/bin/python2 -%global _enable_debug_packages %{nil} -%global debug_package %{nil} -%global __debug_package %{nil} -%global __debug_install_post %{nil} -%global _build_id_links none -%endif -%endif - -%global dist %{nil} - -Summary: Kernel for Tencent physical machine -Name: %{name} -Version: %{version} -Release: %{release_os}%{?rdist} -License: GPLv2 -Vendor: Tencent -Packager: tlinux team <g_APD_SRDC_OS@tencent.com> -Provides: kernel = %{version}-%{release} -Group: System Environment/Kernel -Source0: %{name}-%{version}.tar.gz -Source1: tlinux_cciss_link.modules -URL: http://www.tencent.com -ExclusiveArch: aarch64 -Distribution: Tencent Linux -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build -BuildRequires: wget bc module-init-tools curl -%if %{with_perf} -BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex -BuildRequires: xmlto asciidoc hmaccalc -BuildRequires: audit-libs-devel -#BuildRequires: uboot-tools -%if 0%{?rhel} == 8 -BuildRequires: python2-devel -%else -BuildRequires: python-devel -%endif - - -%ifnarch s390 s390x -BuildRequires: numactl-devel -%endif -%endif -Requires(pre): linux-firmware >= 20100806-2 - -# for the 'hostname' command -%if 0%{?rhel} == 7 -BuildRequires: hostname -%else -BuildRequires: net-tools -%endif - -%description -This package contains tlinux kernel for arm64 Bare-Metal& VM - -%package debuginfo-common -Summary: tlinux kernel vmlinux for crash debug -Release: %{release} -Group: System Environment/Kernel -Provides: kernel-debuginfo-common kernel-debuginfo -AutoReqprov: no - -%description debuginfo-common -This package container vmlinux, System.map and config files for kernel -debugging. - -%package devel -Summary: Development package for building kernel modules to match the %{version}-%{release} kernel -Release: %{release} -Group: System Environment/Kernel -AutoReqprov: no - -%description devel -This package provides kernel headers and makefiles sufficient to build modules -against the %{version}-%{release} kernel package. - - -%package headers -Summary: Header files for the Linux kernel for use by glibc -Group: Development/System -Obsoletes: glibc-kernheaders -Provides: glibc-kernheaders = 3.0-46 -AutoReqprov: no - -%description headers -Kernel-headers includes the C header files that specify the interface -between the Linux kernel and userspace libraries and programs. The -header files define structures and constants that are needed for -building most standard programs and are also needed for rebuilding the -glibc package. - -%if %{with_perf} -%package -n perf -Summary: Performance monitoring for the Linux kernel -Group: Development/System -License: GPLv2 -%description -n perf -This package contains the perf tool, which enables performance monitoring -of the Linux kernel. - -%package -n perf-debuginfo -Summary: Debug information for package perf -Group: Development/Debug -#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} -AutoReqProv: no -%description -n perf-debuginfo -This package provides debug information for the perf package. - -# Note that this pattern only works right to match the .build-id -# symlinks because of the trailing nonmatching alternation and -# the leading .*, because of find-debuginfo.sh's buggy handling -# of matching the pattern against the symlinks file. -%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|XXX' -o perf-debuginfo.list} - -%package -n python-perf -Summary: Python bindings for apps which will manipulate perf events -Group: Development/Libraries -%description -n python-perf -The python-perf package contains a module that permits applications -written in the Python programming language to use the interface -to manipulate perf events. - -%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} - -%package -n python-perf-debuginfo -Summary: Debug information for package perf python bindings -Group: Development/Debug -#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} -AutoReqProv: no -%description -n python-perf-debuginfo -This package provides debug information for the perf python bindings. - -# the python_sitearch macro should already be defined from above -%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{python_sitearch}/perf.so(\.debug)?|XXX' -o python-perf-debuginfo.list} - - -%endif # with_perf - -# prep ######################################################################### -%prep -%setup -q -c -T -a 0 - -# build ######################################################################## -%build - - -cd %{name}-%{version} - -all_types="%{kernel_all_types}" -no_sign="yes" -for type_name in $all_types -do - sed -i '/^EXTRAVERSION/cEXTRAVERSION = -%{extra_version}' Makefile - objdir=../%{name}-%{version}-obj-${type_name} - [ -d ${objdir} ] || mkdir ${objdir} - echo "include $PWD/Makefile" > ${objdir}/Makefile - cp ./package/arm/config.${type_name} ${objdir}/.config - localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' - sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config - make -C ${objdir} olddefconfig > /dev/null - make -j16 -C ${objdir} RPM_BUILD_MODULE=y %{?jobs:-j %jobs} all > /dev/null - -# dealing with files under lib/modules - make -j16 -C ${objdir} RPM_BUILD_MODULE=y modules > /dev/null - - %global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} - %if %{with_perf} - # perf - %{perf_make} all - %{perf_make} man - %endif -done - -# install ###################################################################### -%install - - -echo install %{version} -arch=`uname -m` -if [ "$arch" != "aarch64" ];then - echo "Unexpected error. Cannot build this rpm in non-aarch64 platform\n" - exit 1 -fi -mkdir -p %buildroot/boot -cd %{name}-%{version} - -all_types="%{kernel_all_types}" -for type_name in $all_types -do - elfname=vmlinuz-%{tagged_name}%{?dist} - mapname=System.map-%{tagged_name}%{?dist} - configname=config-%{tagged_name}%{?dist} - builddir=../%{name}-%{version}-obj-${type_name} -%if 0%{?rhel} == 7 - cp -rpf ${builddir}/arch/arm64/boot/Image %buildroot/boot/${elfname} -%endif - #mkimage -A arm64 -O linux -T kernel -C none -a 0x80080000 -e 0x80080000 -n %{tagged_name} -d ${builddir}/arch/arm64/boot/Image %buildroot/boot/uImage-%{tagged_name}%{?dist} - cp -rpf ${builddir}/System.map %buildroot/boot/${mapname} - cp -rpf ${builddir}/.config %buildroot/boot/${configname} - cp -rpf ${builddir}/vmlinux %buildroot/boot/vmlinux-%{tagged_name}%{?dist} - sha512hmac %buildroot/boot/${elfname} | sed -e "s,$RPM_BUILD_ROOT,," > %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac - - # dealing with kernel-devel package - objdir=${builddir} - #KernelVer=%{version}-%{title}-%{release_os}-${type_name} - KernelVer=%{tagged_name}%{?dist} - - - - ####### make kernel-devel package: methods comes from rhel6(kernel.spec) ####### - make -C ${objdir} RPM_BUILD_MODULE=y INSTALL_MOD_PATH=%buildroot modules_install > /dev/null - #don't package firmware, use linux-firmware rpm instead - rm -rf %buildroot/lib/firmware - # And save the headers/makefiles etc for building modules against - # - # This all looks scary, but the end result is supposed to be: - # * all arch relevant include/ files - # * all Makefile/Kconfig files - # * all script/ files - rm -rf %buildroot/lib/modules/${KernelVer}/build - rm -rf %buildroot/lib/modules/${KernelVer}/source - mkdir -p %buildroot/lib/modules/${KernelVer}/build - (cd $RPM_BUILD_ROOT/lib/modules/${KernelVer} ; ln -s build source) - mkdir -p %buildroot/lib/modules/${KernelVer}/source/arch/arm64/kernel && cp -pf arch/arm64/kernel/module.lds %buildroot/lib/modules/${KernelVer}/source/arch/arm64/kernel/ - # dirs for additional modules per module-init-tools, kbuild/modules.txt - mkdir -p %buildroot/lib/modules/${KernelVer}/extra - mkdir -p %buildroot/lib/modules/${KernelVer}/updates - mkdir -p %buildroot/lib/modules/${KernelVer}/weak-updates -%if 0%{?rhel} == 8 - cp -rpf ${builddir}/arch/arm64/boot/Image %buildroot/lib/modules/${KernelVer}/vmlinuz -%endif - # first copy everything - cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` %buildroot/lib/modules/${KernelVer}/build - cp ${builddir}/Module.symvers %buildroot/lib/modules/${KernelVer}/build - cp ${builddir}/System.map %buildroot/lib/modules/${KernelVer}/build - - if [ -s ${builddir}/Module.markers ]; then - cp ${builddir}/Module.markers %buildroot/lib/modules/${KernelVer}/build - fi - - # create the kABI metadata for use in packaging - echo "**** GENERATING kernel ABI metadata ****" - gzip -c9 < ${builddir}/Module.symvers > %buildroot/boot/symvers-${KernelVer}.gz - # chmod 0755 %_sourcedir/kabitool - # rm -f %{_tmppath}/kernel-$KernelVer-kabideps - # %_sourcedir/kabitool -s Module.symvers -o %{_tmppath}/kernel-$KernelVer-kabideps - - rm -rf %buildroot/lib/modules/${KernelVer}/build/Documentation - rm -rf %buildroot/lib/modules/${KernelVer}/build/scripts - rm -rf %buildroot/lib/modules/${KernelVer}/build/include - cp ${builddir}/.config %buildroot/lib/modules/${KernelVer}/build - cp -a scripts %buildroot/lib/modules/${KernelVer}/build - cp -a ${builddir}/scripts %buildroot/lib/modules/${KernelVer}/build - rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*.o - rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*/*.o - cp -a --parents arch/arm64/include %buildroot/lib/modules/${KernelVer}/build/ - (cd ${builddir} ; cp -a --parents arch/arm64/include/generated %buildroot/lib/modules/${KernelVer}/build/) - mkdir -p %buildroot/lib/modules/${KernelVer}/build/include - - cp -a ${builddir}/include/config %buildroot/lib/modules/${KernelVer}/build/include/ - cp -a ${builddir}/include/generated %buildroot/lib/modules/${KernelVer}/build/include/ - - cd include - cp -a acpi asm-generic clocksource crypto drm dt-bindings keys kvm linux math-emu media misc net pcmcia ras rdma scsi soc sound target trace uapi video xen %buildroot/lib/modules/${KernelVer}/build/include - - cp -a ../arch/arm64 $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/asm-arm64 - pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include - ln -s asm-arm64 asm - popd - - # Make sure the Makefile and version.h have a matching timestamp so that - # external modules can be built - touch -r %buildroot/lib/modules/${KernelVer}/build/Makefile %buildroot/lib/modules/${KernelVer}/build/include/generated/uapi/linux/version.h - #touch -r %buildroot/lib/modules/${KernelVer}/build/.config %buildroot/lib/modules/${KernelVer}/build/include/linux/autoconf.h - # Copy .config to include/config/auto.conf so "make prepare" is unnecessary. - cp %buildroot/lib/modules/$KernelVer/build/.config %buildroot/lib/modules/${KernelVer}/build/include/config/auto.conf - cd .. - - - find %buildroot/lib/modules/${KernelVer} -name "*.ko" -type f >modnames - - - mkdir -p %buildroot%debug_path/modules/${KernelVer} - cp -r %buildroot/lib/modules/${KernelVer}/kernel %buildroot%debug_path/modules/${KernelVer} - xargs --no-run-if-empty strip -g < modnames - - xargs --no-run-if-empty chmod u+x < modnames - - # Generate a list of modules for block and networking. - fgrep /drivers/ modnames | xargs --no-run-if-empty nm -upA | - sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef - - collect_modules_list() - { - sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | - LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 - if [ ! -z "$3" ]; then - sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 - fi - } - - collect_modules_list networking 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt2x00(pci|usb)_probe|register_netdevice' - collect_modules_list block 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' - collect_modules_list drm 'drm_open|drm_init' - collect_modules_list modesetting 'drm_crtc_init' - # detect missing or incorrect license tags - rm -f modinfo - while read i - do - echo -n "${i#%buildroot/lib/modules/${KernelVer}/} " >> modinfo - /sbin/modinfo -l $i >> modinfo - done < modnames - - egrep -v \ - 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' \ - modinfo && exit 1 - - rm -f modinfo modnames - # remove files that will be auto generated by depmod at rpm -i time - #for i in alias alias.bin ccwmap dep dep.bin ieee1394map inputmap isapnpmap ofmap pcimap seriomap symbols symbols.bin usbmap - #do - # rm -f %buildroot/lib/modules/${KernelVer}/modules.$i - #done - - # Move the devel headers out of the root file system - mkdir -p %buildroot/usr/src/kernels - mv %buildroot/lib/modules/${KernelVer}/build %buildroot/usr/src/kernels/${KernelVer} - ln -sf /usr/src/kernels/${KernelVer} %buildroot/lib/modules/${KernelVer}/build - # - # Generate the kernel-modules-* files lists - # -%if 0%{?rhel} == 8 - mkdir -p %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/boot/$configname %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/boot/System.map-$KernelVer %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/lib/modules/$KernelVer/dist_compat -%endif - - - # Copy the System.map file for depmod to use, and create a backup of the - # full module tree so we can restore it after we're done filtering - cp ${builddir}/System.map $RPM_BUILD_ROOT/. - pushd $RPM_BUILD_ROOT - mkdir restore - - - cp -r lib/modules/$KernelVer/* restore/. - - find lib/modules/$KernelVer/ -not -type d | sort -n > modules.list - find lib/modules/$KernelVer/ -type d | sort -n > module-dirs.list - - # Run depmod on the resulting module tree and make sure it isn't broken - depmod -b . -aeF ./System.map $KernelVer &> depmod.out - if [ -s depmod.out ]; then - echo "Depmod failure" - cat depmod.out - exit 1 - else - rm depmod.out - fi - - # Cleanup - rm System.map - cp -r restore/* lib/modules/$KernelVer/. - rm -rf restore - popd - -%if 0%{?rhel} == 8 - mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat -%endif - - # Make sure the files lists start with absolute paths or rpmbuild fails. - # Also add in the dir entries - sed -e 's/^lib*/%dir \/lib/' $RPM_BUILD_ROOT/module-dirs.list > ../core.list - sed -e 's/^lib*/\/lib/' $RPM_BUILD_ROOT/modules.list >> ../core.list - - # Cleanup - rm -f $RPM_BUILD_ROOT/modules.list - rm -f $RPM_BUILD_ROOT/module-dirs.list - - %if %{with_perf} - # perf tool binary and supporting scripts/binaries - %{perf_make} DESTDIR=$RPM_BUILD_ROOT install - - # perf-python extension - %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext - - # perf man pages (note: implicit rpm magic compresses them later) - %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man - %endif -done - -mkdir -p %buildroot/etc/sysconfig/modules -install -m755 %SOURCE1 %buildroot/etc/sysconfig/modules - -#### Header -make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install -# Do headers_check but don't die if it fails. -make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_check \ - > hdrwarnings.txt || : -if grep -q exist hdrwarnings.txt; then - sed s:^$RPM_BUILD_ROOT/usr/include/:: hdrwarnings.txt - # Temporarily cause a build failure if header inconsistencies. - # exit 1 -fi - -find $RPM_BUILD_ROOT/usr/include \ - \( -name .install -o -name .check -o \ - -name ..install.cmd -o -name ..check.cmd \) | xargs rm -f - -# glibc provides scsi headers for itself, for now -rm -rf $RPM_BUILD_ROOT/usr/include/scsi -rm -f $RPM_BUILD_ROOT/usr/include/asm*/atomic.h -rm -f $RPM_BUILD_ROOT/usr/include/asm*/io.h -rm -f $RPM_BUILD_ROOT/usr/include/asm*/irq.h - - -%pre -# pre ######################################################################### -system_arch=`uname -m` - -if [ %{_target_cpu} != ${system_arch} ]; then - echo "ERROR: Failed installing this rpm!!!!" - echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; - exit 1; -fi; - -%post -# post ######################################################################### -echo "Install tlinux kernel" -%if 0%{?rhel} == 8 -kernel-install add %{tagged_name} /lib/modules/%{tagged_name}/vmlinuz -cp -p /lib/modules/%{tagged_name}/dist_compat/config-%{tagged_name}%{?dist} /boot -cp -p /lib/modules/%{tagged_name}/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac /boot -cp -p /lib/modules/%{tagged_name}/dist_compat/System.map-%{tagged_name}%{?dist} /boot -%else -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --package kernel --install %{tagged_name}%{?dist} --make-default|| exit $? -echo -e "Set Grub default to \"%{tagged_name}%{?dist}\" Done." -%endif -%endif - -%preun -# preun ####################################################################### -%if 0%{?rhel} == 8 -kernel-install remove %{tagged_name} -%else -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? -echo -e "Remove \"%{tagged_name}%{?dist}\" Done." -%endif -%endif - -%posttrans -# posttrans #################################################################### -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? -/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? -%endif - -%files -f core.list -# files ######################################################################## -%defattr(-,root,root) -%if 0%{?rhel} == 7 -/boot/vmlinuz-%%{tagged_name}%%{?dist} -/boot/.vmlinuz-%%{tagged_name}%%{?dist}.hmac -#/boot/uImage-%%{tagged_name}%%{?dist} -/boot/System.map-%%{tagged_name}%%{?dist} -/boot/config-%%{tagged_name}%%{?dist} -%endif -/boot/symvers-%{tagged_name}%{?dist}* -/etc/sysconfig/modules/tlinux_cciss_link.modules -#/lib/modules/%%{tagged_name}/vmlinuz -#do not package firmware ,use linux-firmware rpm instead -#/lib/firmware/ - -%files debuginfo-common -%defattr(-,root,root) -/boot/vmlinux-%{tagged_name}%{?dist} -%dir %debug_path -%debug_path/modules -#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* -#/boot/config-%%{version}-%%{title}-%%{release_os}-* - -%files headers -%defattr(-,root,root) -/usr/include/* - -%files devel -%defattr(-,root,root) -/usr/src/kernels/%{tagged_name}%{?dist} - -%if %{with_perf} -%files -n perf -%defattr(-,root,root) -%{_bindir}/* -/usr/lib64/* -/usr/lib/traceevent/plugins/*.so -/usr/lib/perf/examples/bpf/* -/usr/lib/perf/include/bpf/* -/usr/share/* -%dir %{_libexecdir}/perf-core -%{_libexecdir}/perf-core/* -%{_sysconfdir}/bash_completion.d/perf - -%files -n python-perf -%defattr(-,root,root) -%{python_sitearch} - -%if %{with_debuginfo} -%files -f perf-debuginfo.list -n perf-debuginfo -%defattr(-,root,root) - -%files -f python-perf-debuginfo.list -n python-perf-debuginfo -%defattr(-,root,root) -%endif -%endif # with_perf - -# changelog ################################################################### -%changelog -* Mon Mar 11 2019 Jia Wang <jasperwang@tencent.com> - - Initial 4.14.99 repository diff --git a/package/arm/perf-tlinux.spec b/package/arm/perf-tlinux.spec deleted file mode 100644 index 049c128c0be4..000000000000 --- a/package/arm/perf-tlinux.spec +++ /dev/null @@ -1,154 +0,0 @@ -%define sign_server 10.12.65.118 - -%global with_debuginfo 0 -%if 0%{?rhel} == 6 -%global dist .tl1 -%global debug_path /usr/lib/debug/lib/ -%else -%global debug_path /usr/lib/debug/usr/lib/ -%if 0%{?rhel} == 7 -%global dist .tl2 -%endif -%endif - -Summary: Performance monitoring for the Linux kernel -Group: Development/System -Name: %{name} -Version: %{version} -Release: %{release_os}%{?dist} -License: GPLv2 -Vendor: Tencent -Packager: tlinux team <g_APD_SRDC_OS@tencent.com> -Provides: perf = %{version}-%{release} -Source0: %{name}-%{version}.tar.gz -URL: http://www.tencent.com -ExclusiveArch: aarch64 -Distribution: Tencent Linux -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build -BuildRequires: wget bc module-init-tools curl -BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel python-devel perl(ExtUtils::Embed) bison flex -BuildRequires: xmlto asciidoc -BuildRequires: audit-libs-devel -%ifnarch s390 s390x -BuildRequires: numactl-devel -%endif - -# for the 'hostname' command -%if 0%{?rhel} == 7 -BuildRequires: hostname -%else -BuildRequires: net-tools -%endif - -%description -This package contains the perf tool, which enables performance monitoring -of the Linux kernel. - - -%package -n python-perf -Summary: Python bindings for apps which will manipulate perf events -Group: Development/Libraries -%description -n python-perf -The python-perf package contains a module that permits applications -written in the Python programming language to use the interface -to manipulate perf events. - -%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} - - -# prep ######################################################################### -%prep - -%setup -q -c -T -a 0 - -# build ######################################################################## -%build - - -if [ ! -f /etc/tlinux-release ]; then - echo "Error: please build this rpm on tlinux\n" - exit 1 -fi - - -cd %{name}-%{version} -all_types="%{kernel_all_types}" -num_processor=`cat /proc/cpuinfo | grep 'processor' | wc -l` -if [ $num_processor -gt 8 ]; then - num_processor=8 -fi - -type_name=default - -objdir=../%{name}-%{version}-obj-${type_name} -[ -d ${objdir} ] || mkdir ${objdir} -bash scripts/mkmakefile $PWD ${objdir} 2 6 -if [ "$no_sign" != "yes" ]; then -cp ./package/default/config.${type_name} ${objdir}/.config -else -cp ./package/default/config.${type_name}_nosign ${objdir}/.config -fi -localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' -sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config - - -%global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} -# perf -%{perf_make} all -%{perf_make} man - -# install ###################################################################### -%install -cd %{name}-%{version} -%{perf_make} DESTDIR=$RPM_BUILD_ROOT install - -# perf-python extension -%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext - -# perf man pages (note: implicit rpm magic compresses them later) -%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man - -%pre -# pre ######################################################################### -system_arch=`uname -m` - -if [ %{_target_cpu} != ${system_arch} ]; then - echo "ERROR: Failed installing this rpm!!!!" - echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; - exit 1; -fi; - -if [ ! -f /etc/tlinux-release -o ! -f /etc/redhat-release ]; then - echo "Error: Cannot install this rpm on non-tlinux OS" - exit 1; -fi - -%post -# post ######################################################################### - - -%postun - -%files -%defattr(-,root,root) -%{_bindir}/perf -%dir %{_libexecdir}/perf-core -%{_libexecdir}/perf-core/* -%{_mandir}/man[1-8]/perf* -%{_sysconfdir}/bash_completion.d/perf -/usr/bin/trace -/usr/lib/traceevent/plugins/* -/usr/lib/perf -/usr/share/doc/* -/usr/share/perf-core/strace/groups -/usr/share/perf-core/strace/groups - -%files -n python-perf -%defattr(-,root,root) -%{python_sitearch} - - -# changelog ################################################################### -%changelog -* Thu Feb 2 2012 Samuel Liao <samuelliao@tencent.com> - - Initial 3.0.18 repository diff --git a/package/arm/repackage/generate-rpms.sh b/package/arm/repackage/generate-rpms.sh deleted file mode 100755 index 69506d3d38a2..000000000000 --- a/package/arm/repackage/generate-rpms.sh +++ /dev/null @@ -1,223 +0,0 @@ -#!/bin/bash -# ============================================================================== -# Description: Automaticly generating kernel rpm package with specified branch -# according to HEAD tag. -# -# Author: David Shan <davidshan@tencent.com> -# =============================================================================== - -#variables -kernel_full_name="" -kernel_version="" -tlinux_branch="" -tlinux_release="" -curdir=`pwd` - - -build_specdir=`readlink -f ~/rpmbuild/SPECS` -build_srcdir=`readlink -f ~/rpmbuild/SOURCES` -dist="tl2" - -kernel_type="default" -spec_file_name="" -make_jobs="" -tag_name="" -nosign_suffix="" -build_src_only=0 -strip_sign_token=0 -# to control wheter to build rpm for xen domU -isXenU="" -kernel_default_types=(default) - -#funcitons -usage() -{ - echo "generate-rpms [-t \$tag_name ] [-k \$kernel_type ] [-j \$jobs ][-h][-x \$arch]" - echo " -t tag_name" - echo " Tagged name in the git tree." - echo " -k kernel_type" - echo " Kernel type is default, state, container or user-specified type." - echo " If -k parameter is not set, we will compile standard three kernels of default, state and container." - echo " For user-specified type kernel, only compile individually." - echo " -j jobs" - echo " Specifies the number of jobs (threads) to run make." - echo " -x arch" - echo " switch to build rpm for xen domU. arch is 32 or 64." - echo " -d" - echo " disable module digital signature." - - echo -e "\n" - echo " Note:By default, we only generate latest tag kernel for standard kernels" - echo " without -t and -k option." - echo " So, if you hope to get certain tag kernel, please use -t." - echo " Also, only for state type kernel, please use -k option with state value." -} - -get_kernel_version() -{ - local tagged_name - raw_tagged_name=$tag_name - - if [[ $raw_tagged_name != public-arm64-* ]]; then - echo "$raw_tagged_name not valid tag name for public-arm64" - exit 1 - fi - - tagged_name=${raw_tagged_name#public-arm64-} - - kernel_version=`echo $tagged_name|cut -d- -f1` - #kernel_version=${kernel_version}-1 - #echo "kernel version: $kernel_version" - echo "kernel version: ${kernel_version}" -} - -get_tlinux_name() -{ - if [ -n "$tag_name" ]; then - raw_tagged_name="$tag_name" - else - raw_tagged_name=`git describe --tags` - fi - - if [ -z "${raw_tagged_name}" ];then - echo "Error:Can't get kernel version from git tree." - exit 1 - fi - - if [[ $raw_tagged_name != public-arm64-* ]]; then - echo "$raw_tagged_name not valid tag name for public-arm64" - exit 1 - fi - - tagged_name=${raw_tagged_name#public-arm64-} - - - echo "${tagged_name}" | grep 'kvm_guest' - if [ $? -eq 0 ]; then - echo "start kvm guest build" - kvm_guest="yes" - fi - - #if [ "${tagged_name#*-*-*}" != ${tagged_name} ];then - #echo "Error: bad tag name:$tagged_name." - #exit 1 - #fi - - - release=`echo $tagged_name|cut -d- -f2` - - tlinux_branch=`echo $tagged_name|cut -d- -f3` - - if [ $release -eq 19 ]; then - rpm_name="kernel" - tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` - else - rpm_name="kernel"-${tlinux_branch} - tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f4` - fi - - echo "rpm_name $rpm_name" - #tlinux_release=${tagged_name#*-} - kernel_full_name=${rpm_name}-${kernel_version} - echo $kernel_full_name - #exit 1 -} - -prepare_tlinux_spec() -{ - spec_file_name=${build_specdir}/${kernel_full_name}_${dist}.spec - spec_contents="%define name ${rpm_name} \n" - spec_contents+="%define version ${kernel_version} \n" - spec_contents+="%define release_os ${tlinux_release} \n" - spec_contents+="%define title ${tlinux_branch} \n" - spec_contents+="%define tagged_name ${tagged_name} \n" - - echo -e "${spec_contents}" > $spec_file_name - - if [ "$kvm_guest" = "yes" ] ; then - cat $curdir/kernel-tlinux_kvmguest_${dist}.spec >> $spec_file_name - else - cat $curdir/kernel-tlinux_${dist}.spec >> $spec_file_name - #cp $curdir/${kernel_full_name}_${dist}.spec $spec_file_name - fi -} - -prepare_tlinux_bin_sources() -{ - local kernel_tl3_rpm=${rpm_name}-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm - local kernel_debuginfo_tl3_rpm=${rpm_name}-debuginfo-common-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm - local kernel_headers_tl3_rpm=${rpm_name}-headers-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm - local kernel_devel_tl3_rpm=${rpm_name}-devel-${kernel_version}-${tlinux_release}.tl3.aarch64.rpm - - if [ ! -f $kernel_tl3_rpm ]; then - echo "$kernel_tl3_rpm not exist" - exit 1 - fi - - if [ ! -f $kernel_debuginfo_tl3_rpm ]; then - echo "$kernel_debuginfo_tl3_rpm not exist" - exit 1 - fi - - if [ ! -f $kernel_headers_tl3_rpm ]; then - echo "$kernel_headers_tl3_rpm not exist" - exit 1 - fi - - if [ ! -f $kernel_devel_tl3_rpm ]; then - echo "$kernel_devel_tl3_rpm not exist" - exit 1 - fi - - rm -rf ${rpm_name}-${kernel_version} - rm -rf ${rpm_name}-${kernel_version}_bin.tar.gz - mkdir ${rpm_name}-${kernel_version} - pushd . - cd ${rpm_name}-${kernel_version} - rpm2cpio ../$kernel_tl3_rpm | cpio -divm - rpm2cpio ../$kernel_debuginfo_tl3_rpm | cpio -divm - rpm2cpio ../$kernel_headers_tl3_rpm | cpio -divm - rpm2cpio ../$kernel_devel_tl3_rpm | cpio -divm - popd - tar -czvf ${rpm_name}-${kernel_version}_bin.tar.gz ${rpm_name}-${kernel_version} -} - -#main function - -if [ $# -eq 0 ]; then - echo "please specify tag" - exit 1 -fi - -tag_name=$1 - -if [ ! -e "${build_srcdir}" ]; then - mkdir $build_srcdir - [ $? == 1 ] && exit 1 -fi - -if [ ! -e "${build_specdir}" ]; then - mkdir $build_specdir - [ $? == 1 ] && exit 1 -fi - -origsrc=`pwd` - -get_kernel_version $origsrc -get_tlinux_name -prepare_tlinux_bin_sources -echo "Prepare $kernel_full_name source." -prepare_tlinux_spec - - -if test -e ${build_srcdir}/${kernel_full_name}; then - rm -fr ${build_srcdir}/${kernel_full_name} -fi - -rm -rf /var/tmp/${kernel_full_name} - -cp ${kernel_full_name}_bin.tar.gz ${build_srcdir}/ - -echo "Build kernel rpm package." - -rpmbuild -bb ${target} ${spec_file_name} diff --git a/package/arm/repackage/kernel-tlinux_tl2.spec b/package/arm/repackage/kernel-tlinux_tl2.spec deleted file mode 100644 index 1de3e5e399b0..000000000000 --- a/package/arm/repackage/kernel-tlinux_tl2.spec +++ /dev/null @@ -1,231 +0,0 @@ -%global with_debuginfo 0 -%global with_perf 1 -%if 0%{?rhel} == 6 -%global rdist .tl1 -%global debug_path /usr/lib/debug/lib/ -%else -%global debug_path /usr/lib/debug/usr/lib/ -%if 0%{?rhel} == 7 -%global rdist .tl2 -%global _enable_debug_packages %{nil} -%global debug_package %{nil} -%global __debug_package %{nil} -%global __debug_install_post %{nil} -%global _build_id_links none -%endif -%if 0%{?rhel} == 8 -%global rdist .tl3 -%global __python /usr/bin/python2 -%global _enable_debug_packages %{nil} -%global debug_package %{nil} -%global __debug_package %{nil} -%global __debug_install_post %{nil} -%global _build_id_links none -%endif -%endif - -%global dist %{nil} - -Summary: Kernel for Tencent physical machine -Name: %{name} -Version: %{version} -Release: %{release_os}%{?rdist} -License: GPLv2 -Vendor: Tencent -Packager: tlinux team <g_APD_SRDC_OS@tencent.com> -Provides: kernel = %{version}-%{release} -Group: System Environment/Kernel -Source0: %{name}-%{version}_bin.tar.gz -Source1: tlinux_cciss_link.modules -URL: http://www.tencent.com -ExclusiveArch: aarch64 -Distribution: Tencent Linux -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build -BuildRequires: wget bc module-init-tools curl -%if %{with_perf} -BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex -BuildRequires: xmlto asciidoc hmaccalc -BuildRequires: audit-libs-devel -#BuildRequires: uboot-tools -%if 0%{?rhel} == 8 -BuildRequires: python2-devel -%else -BuildRequires: python-devel -%endif - - -%ifnarch s390 s390x -BuildRequires: numactl-devel -%endif -%endif -Requires(pre): linux-firmware >= 20100806-2 - -# for the 'hostname' command -%if 0%{?rhel} == 7 -BuildRequires: hostname -%else -BuildRequires: net-tools -%endif - -%description -This package contains tlinux kernel for arm64 Bare-Metal& VM - -%package debuginfo-common -Summary: tlinux kernel vmlinux for crash debug -Release: %{release} -Group: System Environment/Kernel -Provides: kernel-debuginfo-common kernel-debuginfo -AutoReqprov: no - -%description debuginfo-common -This package container vmlinux, System.map and config files for kernel -debugging. - -%package devel -Summary: Development package for building kernel modules to match the %{version}-%{release} kernel -Release: %{release} -Group: System Environment/Kernel -AutoReqprov: no - -%description devel -This package provides kernel headers and makefiles sufficient to build modules -against the %{version}-%{release} kernel package. - - -%package headers -Summary: Header files for the Linux kernel for use by glibc -Group: Development/System -Obsoletes: glibc-kernheaders -Provides: glibc-kernheaders = 3.0-46 -AutoReqprov: no - -%description headers -Kernel-headers includes the C header files that specify the interface -between the Linux kernel and userspace libraries and programs. The -header files define structures and constants that are needed for -building most standard programs and are also needed for rebuilding the -glibc package. - - -# prep ######################################################################### -%prep -%setup -q -c -T -a 0 - -# build ######################################################################## -%build - - -# install ###################################################################### -%install - - -echo install %{version} -arch=`uname -m` -if [ "$arch" != "aarch64" ];then - echo "Unexpected error. Cannot build this rpm in non-aarch64 platform\n" - exit 1 -fi - -cd %{name}-%{version} - -cp -rp * %buildroot/ - -elfname=vmlinuz-%{tagged_name}%{?dist} -mapname=System.map-%{tagged_name}%{?dist} -configname=config-%{tagged_name}%{?dist} - -mkdir -p %buildroot/boot -mv %buildroot/lib/modules/%tagged_name/vmlinuz %buildroot/boot/${elfname} -mv %buildroot/lib/modules/%tagged_name/dist_compat/${mapname} %buildroot/boot/${mapname} -mv %buildroot/lib/modules/%tagged_name/dist_compat/${configname} %buildroot/boot/${configname} -mv %buildroot/lib/modules/%tagged_name/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/boot/ - -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.softdep %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.devname %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.builtin.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias %buildroot/lib/modules/%tagged_name/ - - - -%pre -# pre ######################################################################### -system_arch=`uname -m` - -if [ %{_target_cpu} != ${system_arch} ]; then - echo "ERROR: Failed installing this rpm!!!!" - echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; - exit 1; -fi; - -%post -# post ######################################################################### -echo "Install tlinux kernel" -%if 0%{?rhel} == 8 -kernel-install add %{tagged_name} /lib/modules/%{tagged_name}/vmlinuz -cp -p /lib/modules/%{tagged_name}/dist_compat/config-%{tagged_name}%{?dist} /boot -cp -p /lib/modules/%{tagged_name}/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac /boot -cp -p /lib/modules/%{tagged_name}/dist_compat/System.map-%{tagged_name}%{?dist} /boot -%else -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --package kernel --install %{tagged_name}%{?dist} --make-default|| exit $? -echo -e "Set Grub default to \"%{tagged_name}%{?dist}\" Done." -%endif -%endif - -%preun -# preun ####################################################################### -%if 0%{?rhel} == 8 -kernel-install remove %{tagged_name} -%else -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? -echo -e "Remove \"%{tagged_name}%{?dist}\" Done." -%endif -%endif - -%posttrans -# posttrans #################################################################### -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? -/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? -%endif - -%files -# files ######################################################################## -%defattr(-,root,root) -/boot/vmlinuz-%%{tagged_name}%%{?dist} -/boot/.vmlinuz-%%{tagged_name}%%{?dist}.hmac -#/boot/uImage-%%{tagged_name}%%{?dist} -/boot/System.map-%%{tagged_name}%%{?dist} -/boot/config-%%{tagged_name}%%{?dist} -/boot/symvers-%{tagged_name}%{?dist}* -/etc/sysconfig/modules/tlinux_cciss_link.modules -/lib/modules/%%{tagged_name}/* -#do not package firmware ,use linux-firmware rpm instead -#/lib/firmware/ - -%files debuginfo-common -%defattr(-,root,root) -/boot/vmlinux-%{tagged_name}%{?dist} -%dir %debug_path -%debug_path/modules -#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* -#/boot/config-%%{version}-%%{title}-%%{release_os}-* - -%files headers -%defattr(-,root,root) -/usr/include/* - -%files devel -%defattr(-,root,root) -/usr/src/kernels/%{tagged_name}%{?dist} - -# changelog ################################################################### -%changelog -* Mon Mar 11 2019 Jia Wang <jasperwang@tencent.com> - - Initial 4.14.99 repository diff --git a/package/arm/tlinux_cciss_link.modules b/package/arm/tlinux_cciss_link.modules deleted file mode 100644 index 8d15da12d851..000000000000 --- a/package/arm/tlinux_cciss_link.modules +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -if [ -b /dev/sda -a ! -b /dev/cciss/c0d0 ]; then - mkdir -p /dev/cciss - set -- 0 a 1 b 2 c 3 d 4 e 5 f 6 g 7 h 8 i 9 j 10 k 11 l 12 m 13 n 14 o 15 p - while [ $# -ge 2 ]; do - c=/dev/cciss/c0d$1; shift - s=/dev/sd$1; shift - ln -s ${s} ${c} - ln -s ${s}1 ${c}p1 - ln -s ${s}2 ${c}p2 - ln -s ${s}3 ${c}p3 - ln -s ${s}4 ${c}p4 - done -fi - diff --git a/package/default/Module.kabi_x86_64 b/package/default/Module.kabi_x86_64 new file mode 100644 index 000000000000..5b218c061494 --- /dev/null +++ b/package/default/Module.kabi_x86_64 @@ -0,0 +1,14233 @@ +0x7d2ca27c kvm_get_cs_db_l_bits arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa1b72855 ipv6_chk_custom_prefix vmlinux EXPORT_SYMBOL +0x981efe92 mpt_deregister vmlinux EXPORT_SYMBOL +0xd9a5dd1f iscsi_host_add vmlinux EXPORT_SYMBOL_GPL +0xa24c1a8d sata_pmp_error_handler vmlinux EXPORT_SYMBOL_GPL +0x55417264 unregister_vt_notifier vmlinux EXPORT_SYMBOL_GPL +0x35db1756 set_anon_super vmlinux EXPORT_SYMBOL +0xe3b6fd5f kmem_cache_alloc vmlinux EXPORT_SYMBOL +0x4c68feca replace_page_cache_page vmlinux EXPORT_SYMBOL_GPL +0x1381d4f3 net_cls_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xc2708d88 i2c_put_adapter drivers/i2c/i2c-core EXPORT_SYMBOL +0x22a70156 drm_atomic_set_crtc_for_connector drivers/gpu/drm/drm EXPORT_SYMBOL +0x48dcd18b xdp_attachment_setup vmlinux EXPORT_SYMBOL_GPL +0x23f9c5ce xps_needed vmlinux EXPORT_SYMBOL +0x6ffddd0b rtc_class_open vmlinux EXPORT_SYMBOL_GPL +0x96cd2b04 scsi_sense_key_string vmlinux EXPORT_SYMBOL +0x350ea558 dma_fence_default_wait vmlinux EXPORT_SYMBOL +0x013f26ae dma_fence_get_stub vmlinux EXPORT_SYMBOL +0xb7759ce5 request_firmware vmlinux EXPORT_SYMBOL +0x5779a8fa dev_pm_enable_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x784d5a43 irq_to_desc vmlinux EXPORT_SYMBOL +0x01b0389d ib_destroy_cm_id drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x90e807c7 ib_cache_gid_parse_type_str drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2be48aff t3_register_cpl_handler drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x4c9d28b0 phys_base vmlinux EXPORT_SYMBOL +0xe113bbbc csum_partial vmlinux EXPORT_SYMBOL +0x23cdb5f2 netpoll_poll_enable vmlinux EXPORT_SYMBOL +0x4da8caab dev_mc_sync vmlinux EXPORT_SYMBOL +0x03af52c1 dev_uc_sync vmlinux EXPORT_SYMBOL +0xb6da9cb3 hwmon_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x07242d92 put_dax vmlinux EXPORT_SYMBOL_GPL +0x6c8a858b device_set_of_node_from_dev vmlinux EXPORT_SYMBOL_GPL +0xfe9141e3 acpi_pm_device_sleep_state vmlinux EXPORT_SYMBOL +0x80ca5026 _bin2bcd vmlinux EXPORT_SYMBOL +0xfbaaf01e console_lock vmlinux EXPORT_SYMBOL +0x9a73b032 ZSTD_initDStream_usingDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x430ecc96 ZSTD_initCStream_usingCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0xce1a8e73 wpan_phy_find net/ieee802154/ieee802154 EXPORT_SYMBOL +0x6a67418f __drm_atomic_helper_crtc_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x88638552 simd_skcipher_create_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0x4464302c o2hb_setup_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x9084b044 clear_page_erms vmlinux EXPORT_SYMBOL_GPL +0x4d8a96ab xas_set_mark vmlinux EXPORT_SYMBOL_GPL +0xc87fb025 xas_get_mark vmlinux EXPORT_SYMBOL_GPL +0x4375e076 tcp_v4_mtu_reduced vmlinux EXPORT_SYMBOL +0x8148a105 mbox_send_message vmlinux EXPORT_SYMBOL_GPL +0x757a52e0 nd_blk_region_provider_data vmlinux EXPORT_SYMBOL_GPL +0x139f2189 __kfifo_alloc vmlinux EXPORT_SYMBOL +0xc366bfa1 ceph_pagelist_truncate net/ceph/libceph EXPORT_SYMBOL +0x1a5b4587 cryptd_skcipher_queued crypto/cryptd EXPORT_SYMBOL_GPL +0x369c6ec9 nf_log_register vmlinux EXPORT_SYMBOL +0x10b7837e ps2_end_command vmlinux EXPORT_SYMBOL +0xcdb64abe iscsi_tcp_r2tpool_free vmlinux EXPORT_SYMBOL_GPL +0x46e77acb serial8250_do_shutdown vmlinux EXPORT_SYMBOL_GPL +0xd7d14141 phy_reset vmlinux EXPORT_SYMBOL_GPL +0x3854774b kstrtoll vmlinux EXPORT_SYMBOL +0x44469a76 crc_ccitt_false_table lib/crc-ccitt EXPORT_SYMBOL +0x165ea1da ib_device_get_by_name drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc1fd1dbc __tracepoint_bcache_btree_insert_key drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x63106ada drm_fb_xrgb8888_to_rgb565 drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x747367cd drm_dp_dpcd_read drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb91ac628 nfnl_ct_hook vmlinux EXPORT_SYMBOL_GPL +0xfd496864 devfreq_unregister_notifier vmlinux EXPORT_SYMBOL +0x59c8110c qlt_abort_cmd vmlinux EXPORT_SYMBOL +0xca7facb4 device_del vmlinux EXPORT_SYMBOL_GPL +0x2251985f device_add vmlinux EXPORT_SYMBOL_GPL +0x170ddf79 acpi_install_notify_handler vmlinux EXPORT_SYMBOL +0x6b8c7ed7 crypto_hash_walk_done vmlinux EXPORT_SYMBOL_GPL +0xdd391eff profile_event_unregister vmlinux EXPORT_SYMBOL_GPL +0xb3635b01 _raw_spin_lock_bh vmlinux EXPORT_SYMBOL +0x9b76be8b init_uts_ns vmlinux EXPORT_SYMBOL_GPL +0xdd4a64b0 mlx5_core_destroy_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x41cea775 mlx5_core_destroy_qp drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xe3a62091 mlx5_core_destroy_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x161c5d62 drm_fb_memcpy_dstclip drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x31ec5716 dst_release vmlinux EXPORT_SYMBOL +0x5f0c5466 dst_discard_out vmlinux EXPORT_SYMBOL +0x03706903 sock_no_mmap vmlinux EXPORT_SYMBOL +0xacb4d88c clk_rate_exclusive_put vmlinux EXPORT_SYMBOL_GPL +0xfdb9b629 ioread32be vmlinux EXPORT_SYMBOL +0x461d16ca sg_nents vmlinux EXPORT_SYMBOL +0xb598dd5f disk_map_sector_rcu vmlinux EXPORT_SYMBOL_GPL +0x64879981 sysfs_update_group vmlinux EXPORT_SYMBOL_GPL +0xa38602cd drain_workqueue vmlinux EXPORT_SYMBOL_GPL +0xc16869b6 mlxsw_core_driver_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xce98a165 mlx5_set_port_prio_tc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb4e98a46 twofish_dec_blk_3way arch/x86/crypto/twofish-x86_64-3way EXPORT_SYMBOL_GPL +0x86d3c7ee tcp_initialize_rcv_mss vmlinux EXPORT_SYMBOL +0x54e6fcdd net_enable_timestamp vmlinux EXPORT_SYMBOL +0x4260cc4e sockfd_lookup vmlinux EXPORT_SYMBOL +0x52b7c441 cpufreq_disable_fast_switch vmlinux EXPORT_SYMBOL_GPL +0x631ad481 ata_sff_tf_read vmlinux EXPORT_SYMBOL_GPL +0x731c4a9c dma_fence_signal vmlinux EXPORT_SYMBOL +0xedff4be5 acpi_load_table vmlinux EXPORT_SYMBOL +0x831ca3ff fb_validate_mode vmlinux EXPORT_SYMBOL +0x8b910be2 errseq_sample vmlinux EXPORT_SYMBOL +0x6bc3fbc0 __unregister_chrdev vmlinux EXPORT_SYMBOL +0xe4971ade tracing_alloc_snapshot vmlinux EXPORT_SYMBOL_GPL +0xff777322 srcu_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0x482ac5a4 g_token_size net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x1f6ba977 cxgbi_get_host_param drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xf831fecd __nvmf_check_ready drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0xd8fa57a6 dlm_unregister_eviction_cb fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x7fdbbe57 kvm_vcpu_gfn_to_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xaf47cc63 ping_seq_next vmlinux EXPORT_SYMBOL_GPL +0x03a24bcf dev_graft_qdisc vmlinux EXPORT_SYMBOL +0x1c36110d devlink_resource_register vmlinux EXPORT_SYMBOL_GPL +0x24644204 acpi_device_fix_up_power vmlinux EXPORT_SYMBOL_GPL +0xac029209 badblocks_store vmlinux EXPORT_SYMBOL_GPL +0xa851973a raw_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0xa7a13814 nf_ct_delete net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x55c24b02 rdma_restrack_uadd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x980cf92a rdma_restrack_kadd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x70cdcbfd vbg_warn drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0x1f4cc5ba mlx4_unregister_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x404ae463 drm_gem_fb_get_obj drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL_GPL +0xf18d8306 xsk_set_tx_need_wakeup vmlinux EXPORT_SYMBOL +0x9d5d8110 xsk_set_rx_need_wakeup vmlinux EXPORT_SYMBOL +0xb8472f23 skb_ensure_writable vmlinux EXPORT_SYMBOL +0x0bc17779 skb_splice_bits vmlinux EXPORT_SYMBOL_GPL +0x32ab2682 md_finish_reshape vmlinux EXPORT_SYMBOL +0x49603151 sas_eh_device_reset_handler vmlinux EXPORT_SYMBOL_GPL +0xa2e9f2ce scsi_target_quiesce vmlinux EXPORT_SYMBOL +0x7f6d03a8 transport_class_register vmlinux EXPORT_SYMBOL_GPL +0x7c9ca58f __sg_page_iter_next vmlinux EXPORT_SYMBOL +0x99f2d00a sysfs_emit_at vmlinux EXPORT_SYMBOL_GPL +0x2f84b90d mr_table_alloc vmlinux EXPORT_SYMBOL +0xa11dd76c devlink_alloc vmlinux EXPORT_SYMBOL_GPL +0x963e5d39 dm_put_device vmlinux EXPORT_SYMBOL +0x6777f894 dm_get_device vmlinux EXPORT_SYMBOL +0x73847edc input_handler_for_each_handle vmlinux EXPORT_SYMBOL +0x85167394 mpt_free_fw_memory vmlinux EXPORT_SYMBOL +0x45d3c459 iscsi_conn_setup vmlinux EXPORT_SYMBOL_GPL +0x034f8738 fcoe_ctlr_destroy vmlinux EXPORT_SYMBOL +0x0d4de70d fwnode_find_reference vmlinux EXPORT_SYMBOL_GPL +0xc0338126 transport_class_unregister vmlinux EXPORT_SYMBOL_GPL +0xf1848ee2 acpi_install_sci_handler vmlinux EXPORT_SYMBOL +0x12f6f69c fb_videomode_to_var vmlinux EXPORT_SYMBOL +0x45c5ad6e jbd2_journal_try_to_free_buffers vmlinux EXPORT_SYMBOL +0xefa2c27d register_tracepoint_module_notifier vmlinux EXPORT_SYMBOL_GPL +0x364850b1 down_write_killable vmlinux EXPORT_SYMBOL +0xaad8c7d6 default_wake_function vmlinux EXPORT_SYMBOL +0x4fcc8ad2 ex_handler_uaccess vmlinux EXPORT_SYMBOL +0x7d053fc5 dm_rh_start_recovery drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x25d94d93 mlx5_cmd_destroy_vport_lag drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x91fec1cc drm_rect_calc_vscale drivers/gpu/drm/drm EXPORT_SYMBOL +0x2d50570f drm_rect_calc_hscale drivers/gpu/drm/drm EXPORT_SYMBOL +0x2412f508 of_find_backlight drivers/video/backlight/backlight EXPORT_SYMBOL +0xb7e45ed0 input_mt_assign_slots vmlinux EXPORT_SYMBOL +0x0bd3ef08 fc_remote_port_add vmlinux EXPORT_SYMBOL +0x35fe14dd dev_attr_link_power_management_policy vmlinux EXPORT_SYMBOL_GPL +0xaa341905 acpi_bios_exception vmlinux EXPORT_SYMBOL +0x896c7b5e pci_bus_read_config_byte vmlinux EXPORT_SYMBOL +0x10b24c6d blkcg_root vmlinux EXPORT_SYMBOL_GPL +0x30c6fc32 mount_subtree vmlinux EXPORT_SYMBOL +0x7d11eb8d iget_locked vmlinux EXPORT_SYMBOL +0x7991fd8d crash_vmclear_loaded_vmcss vmlinux EXPORT_SYMBOL_GPL +0x9199fb0b virtio_transport_notify_send_pre_block net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xe0678136 nfs_sb_active fs/nfs/nfs EXPORT_SYMBOL_GPL +0x21bc7f29 handle_ud arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7309e62d class_interface_register vmlinux EXPORT_SYMBOL_GPL +0x56624f6c iommu_group_add_device vmlinux EXPORT_SYMBOL_GPL +0xae0f51bf clk_hw_unregister_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0xa4b94fea iowrite8_rep vmlinux EXPORT_SYMBOL +0x0cfc36e4 blk_queue_max_write_zeroes_sectors vmlinux EXPORT_SYMBOL +0xfb5af060 key_type_keyring vmlinux EXPORT_SYMBOL +0x270805fc drm_vma_node_allow drivers/gpu/drm/drm EXPORT_SYMBOL +0xd5a6a953 nfs_revalidate_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1464f30b fscache_operation_init fs/fscache/fscache EXPORT_SYMBOL +0x72f22e9e kvm_apic_set_eoi_accelerated arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xae39f80e dst_cache_init vmlinux EXPORT_SYMBOL_GPL +0xdf560d6f ir_raw_event_handle vmlinux EXPORT_SYMBOL_GPL +0xb337e84b rc_keyup vmlinux EXPORT_SYMBOL_GPL +0xe39c0ce2 sas_release_transport vmlinux EXPORT_SYMBOL +0x187d26fd class_interface_unregister vmlinux EXPORT_SYMBOL_GPL +0xc617f82c unregister_oom_notifier vmlinux EXPORT_SYMBOL_GPL +0x9141dd61 xdr_decode_word net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4dac77f0 xdr_encode_netobj net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3c2dfe0 transport_set_vpd_proto_id drivers/target/target_core_mod EXPORT_SYMBOL +0x26bc1a40 i2c_generic_scl_recovery drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0xbea6adad tcp_v4_md5_lookup vmlinux EXPORT_SYMBOL +0x4e887439 mptscsih_scandv_complete vmlinux EXPORT_SYMBOL +0x2e3dd72d tty_register_device vmlinux EXPORT_SYMBOL +0xbb4f4766 simple_write_to_buffer vmlinux EXPORT_SYMBOL +0x0bba86df read_code vmlinux EXPORT_SYMBOL +0x8bb06245 __srcu_read_unlock vmlinux EXPORT_SYMBOL_GPL +0xc2e587d1 reset_devices vmlinux EXPORT_SYMBOL +0xc99fb88f mlx5_core_res_put drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x255ab30f cxgb_get_4tuple drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0x676bd843 vmci_qpair_consume_free_space drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x843f1e9e pnfs_layout_mark_request_commit fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x2a09cb5b kvm_debugfs_dir arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb0e602eb memmove vmlinux EXPORT_SYMBOL +0xb7373e5a mrp_unregister_application vmlinux EXPORT_SYMBOL_GPL +0x021f5485 dma_resv_add_shared_fence vmlinux EXPORT_SYMBOL +0x791748c8 adxl_decode vmlinux EXPORT_SYMBOL_GPL +0xfa39b4be sha224_update vmlinux EXPORT_SYMBOL +0xc5f7e801 sg_last vmlinux EXPORT_SYMBOL +0x168c18e0 param_get_string vmlinux EXPORT_SYMBOL +0xb739eb7a rpc_d_lookup_sb net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x79df4564 nvmf_free_options drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x0f807c79 drm_atomic_bridge_post_disable drivers/gpu/drm/drm EXPORT_SYMBOL +0x573a1573 tc_setup_cb_add vmlinux EXPORT_SYMBOL +0xe39d0794 usb_phy_roothub_exit vmlinux EXPORT_SYMBOL_GPL +0xc93ee1e7 usb_phy_roothub_init vmlinux EXPORT_SYMBOL_GPL +0x893895c3 pm_clk_remove vmlinux EXPORT_SYMBOL_GPL +0x60310dcb tpm_pm_resume vmlinux EXPORT_SYMBOL_GPL +0xe6f83837 acpi_bus_attach_private_data vmlinux EXPORT_SYMBOL_GPL +0xe1537255 __list_del_entry_valid vmlinux EXPORT_SYMBOL +0x6dda7ca7 wbt_enable_default vmlinux EXPORT_SYMBOL_GPL +0xeb89a32b jbd2_journal_start_commit vmlinux EXPORT_SYMBOL +0x76d3cd60 laptop_mode vmlinux EXPORT_SYMBOL +0x0b9dcaba generic_file_direct_write vmlinux EXPORT_SYMBOL +0x46b856c1 end_page_writeback vmlinux EXPORT_SYMBOL +0x42c1bb5e vsock_remove_pending net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xfe9ce7dc ip_tunnel_delete_nets net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x75b65a61 drm_agp_enable drivers/gpu/drm/drm EXPORT_SYMBOL +0x4b0e6492 drm_gem_prime_mmap drivers/gpu/drm/drm EXPORT_SYMBOL +0x743553fc skb_recv_datagram vmlinux EXPORT_SYMBOL +0xd4436983 clk_hw_round_rate vmlinux EXPORT_SYMBOL_GPL +0x5e78d1ad fiemap_check_flags vmlinux EXPORT_SYMBOL +0x185f646a get_hwpoison_page vmlinux EXPORT_SYMBOL_GPL +0xcf9f31a7 generic_file_readonly_mmap vmlinux EXPORT_SYMBOL +0xd06524ba raw_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0x450110e8 perf_assign_events vmlinux EXPORT_SYMBOL_GPL +0x85bc42e5 vhost_discard_vq_desc drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xbff963a8 drm_client_modeset_commit drivers/gpu/drm/drm EXPORT_SYMBOL +0x53e95e41 drm_client_modeset_commit_force drivers/gpu/drm/drm EXPORT_SYMBOL +0x0d542439 __ipv6_addr_type vmlinux EXPORT_SYMBOL +0xbfc22763 unregister_qdisc vmlinux EXPORT_SYMBOL +0x3928b472 cpufreq_table_index_unsorted vmlinux EXPORT_SYMBOL_GPL +0xcfe0175b dev_pm_opp_detach_genpd vmlinux EXPORT_SYMBOL_GPL +0xadb3743b usb_autopm_get_interface vmlinux EXPORT_SYMBOL_GPL +0x34df9729 phy_device_register vmlinux EXPORT_SYMBOL +0xdf0bc6dc acpi_pci_find_root vmlinux EXPORT_SYMBOL_GPL +0x0761795d pci_map_rom vmlinux EXPORT_SYMBOL +0x0c4ab191 crypto_shash_finup vmlinux EXPORT_SYMBOL_GPL +0x52221bfc crypto_shash_final vmlinux EXPORT_SYMBOL_GPL +0xf79926ba crypto_ahash_finup vmlinux EXPORT_SYMBOL_GPL +0x645ae85e crypto_ahash_final vmlinux EXPORT_SYMBOL_GPL +0x221eab6d scatterwalk_copychunks vmlinux EXPORT_SYMBOL_GPL +0x052c9aed ktime_get_real_fast_ns vmlinux EXPORT_SYMBOL_GPL +0xf643eca3 rpc_free net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x61d24c52 ib_rate_to_mbps drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xae956dce ib_rate_to_mult drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7f44a1c3 mlx5_set_port_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xd7a19d67 mlx4_multicast_detach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x8dba4a7c acpi_video_get_edid drivers/acpi/video EXPORT_SYMBOL +0x408d8009 sock_diag_put_meminfo vmlinux EXPORT_SYMBOL_GPL +0x8fcbac2b fc_vport_terminate vmlinux EXPORT_SYMBOL +0x6dd17e7b acpi_get_table_header vmlinux EXPORT_SYMBOL +0x64bbc288 string_unescape vmlinux EXPORT_SYMBOL +0xc6cb465a __kfifo_max_r vmlinux EXPORT_SYMBOL +0xaae12c77 vm_insert_page vmlinux EXPORT_SYMBOL +0x0ee77e35 bpf_prog_sub vmlinux EXPORT_SYMBOL_GPL +0x7a6639bd irq_domain_add_legacy vmlinux EXPORT_SYMBOL_GPL +0x9c4a93c8 irq_chip_disable_parent vmlinux EXPORT_SYMBOL_GPL +0x4cf42fbd register_console vmlinux EXPORT_SYMBOL +0x5549005d __ip_tunnel_change_mtu net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xb42edf20 drm_property_create_signed_range drivers/gpu/drm/drm EXPORT_SYMBOL +0xc685b3fa drm_framebuffer_plane_height drivers/gpu/drm/drm EXPORT_SYMBOL +0x43c6a105 amd_iommu_set_invalidate_ctx_cb drivers/iommu/amd_iommu_v2 EXPORT_SYMBOL +0x25301bc6 arch_wb_cache_pmem vmlinux EXPORT_SYMBOL_GPL +0xa6e6c854 inet_reqsk_alloc vmlinux EXPORT_SYMBOL +0x82c3bb4e sock_no_socketpair vmlinux EXPORT_SYMBOL +0x2aadad1a efi_capsule_update vmlinux EXPORT_SYMBOL_GPL +0x3de207ca input_ff_event vmlinux EXPORT_SYMBOL_GPL +0xb1e5d9bb ata_pci_bmdma_init_one vmlinux EXPORT_SYMBOL_GPL +0x110106c1 cper_severity_to_aer vmlinux EXPORT_SYMBOL_GPL +0x2d557bfe __filemap_set_wb_err vmlinux EXPORT_SYMBOL +0x3192d768 cpufreq_remove_update_util_hook vmlinux EXPORT_SYMBOL_GPL +0x1aff3d55 mce_register_injector_chain vmlinux EXPORT_SYMBOL_GPL +0xf4691a4d ovs_vport_alloc net/openvswitch/openvswitch EXPORT_SYMBOL_GPL +0x92862de1 arpt_do_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0xfbf32540 mlx5_eq_destroy_generic drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xaf334671 nfs_retry_commit fs/nfs/nfs EXPORT_SYMBOL_GPL +0x527036c7 nfs_client_init_status fs/nfs/nfs EXPORT_SYMBOL_GPL +0x07894c42 kvm_vcpu_reload_apic_access_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xaff84fee dev_addr_del vmlinux EXPORT_SYMBOL +0xa3eeb8fc gnet_stats_start_copy vmlinux EXPORT_SYMBOL +0x33f0768c cpufreq_quick_get_max vmlinux EXPORT_SYMBOL +0xa21fafb9 mraid_mm_adapter_app_handle vmlinux EXPORT_SYMBOL +0xe197f4ba iscsi_is_session_online vmlinux EXPORT_SYMBOL_GPL +0x1c5ff742 clk_get_phase vmlinux EXPORT_SYMBOL_GPL +0xb7329c06 clk_set_phase vmlinux EXPORT_SYMBOL_GPL +0xbc2031de acpi_processor_get_bios_limit vmlinux EXPORT_SYMBOL +0x9975dc22 acpi_get_handle vmlinux EXPORT_SYMBOL +0x8aa99509 async_tx_submit vmlinux EXPORT_SYMBOL_GPL +0xd3ced5bd legacy_pic vmlinux EXPORT_SYMBOL +0x7844bb64 mlx5_fs_add_rx_underlay_qpn drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd13733fc drm_atomic_add_affected_connectors drivers/gpu/drm/drm EXPORT_SYMBOL +0x9114b616 __xa_alloc vmlinux EXPORT_SYMBOL +0x46013233 net_dec_ingress_queue vmlinux EXPORT_SYMBOL_GPL +0x839b1401 platform_device_del vmlinux EXPORT_SYMBOL_GPL +0x12363a0b platform_device_put vmlinux EXPORT_SYMBOL_GPL +0xd36dc10c get_random_u32 vmlinux EXPORT_SYMBOL +0xe3ff2c41 get_random_u64 vmlinux EXPORT_SYMBOL +0xb9d8d84b devm_clk_hw_register_clkdev vmlinux EXPORT_SYMBOL +0x556b5d62 __kfifo_dma_in_prepare_r vmlinux EXPORT_SYMBOL +0xcb4869fd blk_mq_stop_hw_queues vmlinux EXPORT_SYMBOL +0x131baecd __fat_fs_error vmlinux EXPORT_SYMBOL_GPL +0x734a4bce __krealloc vmlinux EXPORT_SYMBOL +0x995d1071 prof_on vmlinux EXPORT_SYMBOL_GPL +0x9819613c devm_free_irq vmlinux EXPORT_SYMBOL +0x2914ea2d ZSTD_compressBlock lib/zstd/zstd_compress EXPORT_SYMBOL +0xefce3c3b ceph_pagelist_reserve net/ceph/libceph EXPORT_SYMBOL +0xd8444096 rpc_exit net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc657a3ed mdev_set_iommu_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xc401d489 bch_btree_insert_key drivers/md/bcache/bcache EXPORT_SYMBOL +0xf52f38a2 team_modeop_port_change_dev_addr drivers/net/team/team EXPORT_SYMBOL +0xcb74d01d mlx5_core_query_vendor_id drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x1bd8eafc drm_dp_mst_topology_mgr_set_mst drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1486ef34 kset_create_and_add vmlinux EXPORT_SYMBOL_GPL +0x1fb5d4e8 kernel_accept vmlinux EXPORT_SYMBOL +0x9e2737f0 acpi_install_interface_handler vmlinux EXPORT_SYMBOL +0xfa349688 aer_recover_queue vmlinux EXPORT_SYMBOL_GPL +0xe044f921 pcim_iounmap vmlinux EXPORT_SYMBOL +0x7a3ed43f mm_kobj vmlinux EXPORT_SYMBOL_GPL +0x93c9db8e wb_writeout_inc vmlinux EXPORT_SYMBOL_GPL +0x5b2f27fb do_wait_intr vmlinux EXPORT_SYMBOL +0xe100dc5a iscsit_get_datain_values drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x2562421e mlx4_mr_enable drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd2f2a4ea sysctl_tcp_rto_max vmlinux EXPORT_SYMBOL +0xc0794085 sysctl_tcp_rto_min vmlinux EXPORT_SYMBOL +0xb19872e5 xt_proto_fini vmlinux EXPORT_SYMBOL_GPL +0x7631719a sata_scr_write_flush vmlinux EXPORT_SYMBOL_GPL +0xb590422e driver_find vmlinux EXPORT_SYMBOL_GPL +0xb86c364b clk_register_divider vmlinux EXPORT_SYMBOL_GPL +0xd194ddf9 acpi_gpe_count vmlinux EXPORT_SYMBOL +0x14780486 pci_disable_pasid vmlinux EXPORT_SYMBOL_GPL +0xd9d5d879 sbitmap_queue_resize vmlinux EXPORT_SYMBOL_GPL +0x75d74232 devm_ioremap_resource vmlinux EXPORT_SYMBOL +0xff94d43e sg_miter_start vmlinux EXPORT_SYMBOL +0xfc115259 freq_qos_remove_request vmlinux EXPORT_SYMBOL_GPL +0x56d697ce cpu_up vmlinux EXPORT_SYMBOL_GPL +0x1d19f77b physical_mask vmlinux EXPORT_SYMBOL +0x4cf5f5a8 lc_is_used lib/lru_cache EXPORT_SYMBOL +0x6dfbf357 iw_create_cm_id drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x3646e38f dm_tm_issue_prefetches drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x1cc64b2d mlx4_SET_PORT_SCHEDULER drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x0905b5f7 cxgbi_ppm_release drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0x2ee4fe2a nfs_show_stats fs/nfs/nfs EXPORT_SYMBOL_GPL +0x33beda6c tso_build_data vmlinux EXPORT_SYMBOL +0x44b8b5dc sock_zerocopy_alloc vmlinux EXPORT_SYMBOL_GPL +0xf9e43e4c fc_block_scsi_eh vmlinux EXPORT_SYMBOL +0xe7dbc1dc dma_buf_map_attachment vmlinux EXPORT_SYMBOL_GPL +0x40c9bec1 tty_mode_ioctl vmlinux EXPORT_SYMBOL_GPL +0xc5c4ee59 pnp_stop_dev vmlinux EXPORT_SYMBOL +0xe65f4f17 fb_class vmlinux EXPORT_SYMBOL +0x6c61a2f3 load_nls vmlinux EXPORT_SYMBOL +0x74627101 __ib_alloc_cq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf5dedb30 rdma_node_get_transport drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x70922a26 input_unregister_polled_device drivers/input/input-polldev EXPORT_SYMBOL +0x96b9c2fc mlx5_core_attach_mcg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc86dd601 drm_fb_helper_cfb_copyarea drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x659ded26 xfrm_flush_gc vmlinux EXPORT_SYMBOL +0x5058f967 dm_exception_store_destroy vmlinux EXPORT_SYMBOL +0x4859b8bb rtc_year_days vmlinux EXPORT_SYMBOL +0xbc9b8588 ehci_cf_port_reset_rwsem vmlinux EXPORT_SYMBOL_GPL +0xfbad3cf0 scsi_normalize_sense vmlinux EXPORT_SYMBOL +0x079d2a46 ata_host_suspend vmlinux EXPORT_SYMBOL_GPL +0x7c4941d5 ata_link_offline vmlinux EXPORT_SYMBOL_GPL +0x65d11441 __pm_runtime_use_autosuspend vmlinux EXPORT_SYMBOL_GPL +0x8f80bf11 acpi_install_gpe_raw_handler vmlinux EXPORT_SYMBOL +0xb2f35c6a xxh64 vmlinux EXPORT_SYMBOL +0x8888f1fe xxh32 vmlinux EXPORT_SYMBOL +0x298c0897 bio_advance vmlinux EXPORT_SYMBOL +0x1420a97b from_kuid vmlinux EXPORT_SYMBOL +0x33209528 add_timer_on vmlinux EXPORT_SYMBOL_GPL +0xe4ecd2dc ceph_osdc_alloc_request net/ceph/libceph EXPORT_SYMBOL +0xe0419360 nf_conntrack_eventmask_report net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xcde6f9ef register_pppox_proto drivers/net/ppp/pppox EXPORT_SYMBOL +0x819d72cb klist_iter_exit vmlinux EXPORT_SYMBOL_GPL +0xbddd0704 ip6_local_out vmlinux EXPORT_SYMBOL_GPL +0xe07cd5d8 pm_runtime_set_autosuspend_delay vmlinux EXPORT_SYMBOL_GPL +0xfb6eedf9 power_group_name vmlinux EXPORT_SYMBOL_GPL +0x1ef9c8e3 pci_slots_kset vmlinux EXPORT_SYMBOL_GPL +0x955e7eec pci_find_next_ext_capability vmlinux EXPORT_SYMBOL_GPL +0x74b5ea68 lcm_not_zero vmlinux EXPORT_SYMBOL_GPL +0x0474edef kstrtou16_from_user vmlinux EXPORT_SYMBOL +0x7d74d522 kstrtoull_from_user vmlinux EXPORT_SYMBOL +0x4e29eff7 crypto_skcipher_decrypt vmlinux EXPORT_SYMBOL_GPL +0x72b243d4 free_dma vmlinux EXPORT_SYMBOL +0x0dd2fa6b xdr_init_decode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xdb9e04b7 ib_sa_path_rec_get drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x21bd783d ib_check_mr_status drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc1d989c5 vfio_external_check_extension drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xdeff4950 mlxsw_core_event_listener_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x1f1badd0 mlx5_lag_is_roce drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb81be9c7 drm_plane_create_color_properties drivers/gpu/drm/drm EXPORT_SYMBOL +0x3af161fd nfs_remove_bad_delegation fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x99f018c4 nvmem_cell_read vmlinux EXPORT_SYMBOL_GPL +0xc5762bdf md_check_no_bitmap vmlinux EXPORT_SYMBOL +0x4458d514 scsi_is_sas_rphy vmlinux EXPORT_SYMBOL +0x020df06c device_unregister vmlinux EXPORT_SYMBOL_GPL +0x1b02d9e6 serial8250_request_dma vmlinux EXPORT_SYMBOL_GPL +0xa93ef2cb virtqueue_get_vring vmlinux EXPORT_SYMBOL_GPL +0x215a3207 jbd2_journal_load vmlinux EXPORT_SYMBOL +0x7dcbdcd1 seq_escape_mem_ascii vmlinux EXPORT_SYMBOL +0x338e369e vsock_enqueue_accept net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xd932925c kvm_requeue_exception arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9f621b2b xfrm_state_add vmlinux EXPORT_SYMBOL +0x07a71ed8 ping_rcv vmlinux EXPORT_SYMBOL_GPL +0x154b7b34 __netlink_kernel_create vmlinux EXPORT_SYMBOL +0xff5a8cfe cn_del_callback vmlinux EXPORT_SYMBOL_GPL +0xf3b95d79 btree_remove vmlinux EXPORT_SYMBOL_GPL +0x5da6fcf5 iov_iter_bvec vmlinux EXPORT_SYMBOL +0x740856ef iov_iter_kvec vmlinux EXPORT_SYMBOL +0x88543fe6 devlink_param_driverinit_value_set vmlinux EXPORT_SYMBOL_GPL +0xfa3ff5dd devlink_param_driverinit_value_get vmlinux EXPORT_SYMBOL_GPL +0x07f3a40b scsicam_bios_param vmlinux EXPORT_SYMBOL +0xb0fd2e2f pci_generic_config_read32 vmlinux EXPORT_SYMBOL_GPL +0xb2ce3cc9 fscrypt_ioctl_get_key_status vmlinux EXPORT_SYMBOL_GPL +0xb2c20d36 I_BDEV vmlinux EXPORT_SYMBOL +0x9dc5e8c2 __cgroup_bpf_check_dev_permission vmlinux EXPORT_SYMBOL +0x7729cbdd task_handoff_register vmlinux EXPORT_SYMBOL_GPL +0x197d6752 irq_create_fwspec_mapping vmlinux EXPORT_SYMBOL_GPL +0xe3bc7fd4 hpet_unregister_irq_handler vmlinux EXPORT_SYMBOL_GPL +0x6f19d5c3 iommu_sva_set_ops vmlinux EXPORT_SYMBOL_GPL +0x6aba1bfb tty_port_tty_set vmlinux EXPORT_SYMBOL +0xf9e67c8b tty_port_tty_get vmlinux EXPORT_SYMBOL +0x02c656b6 acpi_enable_all_runtime_gpes vmlinux EXPORT_SYMBOL +0x4f1cd128 security_tun_dev_create vmlinux EXPORT_SYMBOL +0x4ef8c9d4 fs_context_for_mount vmlinux EXPORT_SYMBOL +0x96293941 dma_direct_sync_sg_for_device vmlinux EXPORT_SYMBOL +0x495e378d __pv_queued_spin_lock_slowpath vmlinux EXPORT_SYMBOL +0xe00bea84 drm_format_info_min_pitch drivers/gpu/drm/drm EXPORT_SYMBOL +0x6856acda kvm_vcpu_mark_page_dirty arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x38a9f7c5 in6addr_loopback vmlinux EXPORT_SYMBOL +0x88392f8b inet_csk_route_child_sock vmlinux EXPORT_SYMBOL_GPL +0x9a16669a netdev_set_num_tc vmlinux EXPORT_SYMBOL +0xec1ad27e skb_dump vmlinux EXPORT_SYMBOL +0xc76d5d7b md_bitmap_resize vmlinux EXPORT_SYMBOL_GPL +0xde898c7d mipi_dsi_dcs_get_power_mode vmlinux EXPORT_SYMBOL +0xdc7df67f apei_exec_ctx_init vmlinux EXPORT_SYMBOL_GPL +0x43f62bb6 lookup_user_key vmlinux EXPORT_SYMBOL +0xd044f5f8 vfs_path_lookup vmlinux EXPORT_SYMBOL +0xd03eaf4c schedule_hrtimeout_range vmlinux EXPORT_SYMBOL_GPL +0x7ea47051 ib_unregister_device_and_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbc222a8d mlxsw_afk_clear drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x26f44eda set_phv_bit drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x9a5c8448 drm_kms_helper_hotplug_event drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x241e7111 fib6_new_table vmlinux EXPORT_SYMBOL_GPL +0x0ee1e903 devlink_params_unpublish vmlinux EXPORT_SYMBOL_GPL +0x31dc1717 usb_sg_wait vmlinux EXPORT_SYMBOL_GPL +0xe70a5550 usb_sg_init vmlinux EXPORT_SYMBOL_GPL +0xbbadf464 usb_urb_ep_type_check vmlinux EXPORT_SYMBOL_GPL +0xc2caddd1 sas_phy_add vmlinux EXPORT_SYMBOL +0xe40fd693 dev_attr_ncq_prio_enable vmlinux EXPORT_SYMBOL_GPL +0xe40976c0 pnp_range_reserved vmlinux EXPORT_SYMBOL +0x008960f1 jbd2_journal_flush vmlinux EXPORT_SYMBOL +0x76b93164 qtree_release_dquot vmlinux EXPORT_SYMBOL +0xdf855868 invalidate_bdev vmlinux EXPORT_SYMBOL +0x944375db _totalram_pages vmlinux EXPORT_SYMBOL +0x9aeacb87 ring_buffer_iter_empty vmlinux EXPORT_SYMBOL_GPL +0xbc98fa30 put_pid_ns vmlinux EXPORT_SYMBOL_GPL +0x07d06e6e ceph_put_page_vector net/ceph/libceph EXPORT_SYMBOL +0x81fff2d1 ip_set_netmask_map net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xe7bcf5f8 ip_set_put_byindex net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xcdbdeca7 mdio45_ethtool_gset_npage drivers/net/mdio EXPORT_SYMBOL +0x54b1e753 drm_get_format_info drivers/gpu/drm/drm EXPORT_SYMBOL +0x96eb492d acpi_smbus_write drivers/acpi/sbshc EXPORT_SYMBOL_GPL +0x5be9173d kobject_uevent vmlinux EXPORT_SYMBOL_GPL +0xd45c6a23 tcp_twsk_unique vmlinux EXPORT_SYMBOL_GPL +0xa41431be netdev_info vmlinux EXPORT_SYMBOL +0x610b257e mpt_reset_deregister vmlinux EXPORT_SYMBOL +0xd8a994eb scsi_extd_sense_format vmlinux EXPORT_SYMBOL +0x6c61ce70 num_registered_fb vmlinux EXPORT_SYMBOL +0x9da29938 pci_stop_root_bus vmlinux EXPORT_SYMBOL_GPL +0x154257f0 __fscrypt_prepare_rename vmlinux EXPORT_SYMBOL_GPL +0x3e8a214c vfs_ioctl vmlinux EXPORT_SYMBOL +0xc4d3c04b xdr_buf_from_iov net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9adb7399 nf_conntrack_expect_lock net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x32978d64 drm_dp_atomic_release_vcpi_slots drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1db7706b __copy_user_nocache vmlinux EXPORT_SYMBOL +0xb8ff7aa7 inet_send_prepare vmlinux EXPORT_SYMBOL_GPL +0x431037ee wakeup_source_add vmlinux EXPORT_SYMBOL_GPL +0xb837b45a pci_bus_write_config_dword vmlinux EXPORT_SYMBOL +0x3e31d9c3 net_prio_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xac37e2cc vhost_chr_read_iter drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xbf1a2968 btracker_create drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xf489b9c1 mlx5_core_res_hold drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x2a273f25 drm_atomic_helper_prepare_planes drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x890944d6 drm_dp_aux_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x341aa326 nfs_set_sb_security fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc79796ef dst_cache_get_ip6 vmlinux EXPORT_SYMBOL_GPL +0xc3e6ad78 netdev_lower_state_changed vmlinux EXPORT_SYMBOL +0x218cf347 tty_hung_up_p vmlinux EXPORT_SYMBOL +0xdaedcc92 blk_cleanup_queue vmlinux EXPORT_SYMBOL +0x2d27b25d iomap_is_partially_uptodate vmlinux EXPORT_SYMBOL_GPL +0x730c50d2 generic_setlease vmlinux EXPORT_SYMBOL +0x5adc2807 btracker_destroy drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x5a49dbc9 timerqueue_del vmlinux EXPORT_SYMBOL_GPL +0xbd832101 ipv6_getsockopt vmlinux EXPORT_SYMBOL +0x65453fbc dev_pm_opp_get_voltage vmlinux EXPORT_SYMBOL_GPL +0x213e4965 ps2_is_keyboard_id vmlinux EXPORT_SYMBOL +0x4720ebf5 mpt_GetIocState vmlinux EXPORT_SYMBOL +0x2733eaf7 scsi_dev_info_list_add_keyed vmlinux EXPORT_SYMBOL +0x9599d890 nvdimm_provider_data vmlinux EXPORT_SYMBOL_GPL +0xdc97af2e syscore_suspend vmlinux EXPORT_SYMBOL_GPL +0x569abcca acpi_walk_resources vmlinux EXPORT_SYMBOL +0x465f55fd ipmi_platform_add vmlinux EXPORT_SYMBOL +0xd5b917d8 pcim_enable_device vmlinux EXPORT_SYMBOL +0x25db1577 do_trace_write_msr vmlinux EXPORT_SYMBOL +0x85d7edfd hpet_set_periodic_freq vmlinux EXPORT_SYMBOL_GPL +0x4fe5220d vhost_dev_cleanup drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xb16dbfa8 nf_register_net_hooks vmlinux EXPORT_SYMBOL +0xbf14d2ed usb_driver_release_interface vmlinux EXPORT_SYMBOL_GPL +0x715c8d18 ata_sff_queue_work vmlinux EXPORT_SYMBOL_GPL +0xf6e874f5 ata_timing_merge vmlinux EXPORT_SYMBOL_GPL +0x93000495 proc_mkdir_data vmlinux EXPORT_SYMBOL_GPL +0xd1f049fc mpage_writepages vmlinux EXPORT_SYMBOL +0x068ca6bf write_one_page vmlinux EXPORT_SYMBOL +0x7a81541b async_synchronize_cookie vmlinux EXPORT_SYMBOL_GPL +0xca80437b ceph_extent_to_file net/ceph/libceph EXPORT_SYMBOL +0x050833fe drm_agp_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xe5dff9ed o2nm_node_get fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x1cd292d6 nlmsvc_ops fs/lockd/lockd EXPORT_SYMBOL_GPL +0x004e6fdc udp6_set_csum vmlinux EXPORT_SYMBOL +0xc9e8d253 inet_add_protocol vmlinux EXPORT_SYMBOL +0xcd6f2dc9 nf_log_buf_add vmlinux EXPORT_SYMBOL_GPL +0x447d5339 ata_host_activate vmlinux EXPORT_SYMBOL_GPL +0xc3b4a8d4 n_tty_ioctl_helper vmlinux EXPORT_SYMBOL +0x331d834e virtqueue_enable_cb_delayed vmlinux EXPORT_SYMBOL_GPL +0xf60ab926 acpi_get_event_status vmlinux EXPORT_SYMBOL +0xd570b1ba acpi_set_modalias vmlinux EXPORT_SYMBOL_GPL +0xb652166b sync_blockdev vmlinux EXPORT_SYMBOL +0x3a23a36c nobh_write_end vmlinux EXPORT_SYMBOL +0xa60fcee9 __tracepoint_bcache_read drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x4475afb3 nfs_server_remove_lists fs/nfs/nfs EXPORT_SYMBOL_GPL +0x77b0fed9 __next_node_in vmlinux EXPORT_SYMBOL +0x6e720ff2 rtnl_unlock vmlinux EXPORT_SYMBOL +0x7bb08d46 sock_rfree vmlinux EXPORT_SYMBOL +0xb8ce8468 kernel_recvmsg vmlinux EXPORT_SYMBOL +0xacfe818f usb_hub_find_child vmlinux EXPORT_SYMBOL_GPL +0xbf7beaa5 phy_request_interrupt vmlinux EXPORT_SYMBOL +0xda668b93 __root_device_register vmlinux EXPORT_SYMBOL_GPL +0xc0a3d105 find_next_bit vmlinux EXPORT_SYMBOL +0xf8d07858 bitmap_from_arr32 vmlinux EXPORT_SYMBOL +0x69aa8fdd disk_part_iter_init vmlinux EXPORT_SYMBOL_GPL +0x662af7df dquot_reclaim_space_nodirty vmlinux EXPORT_SYMBOL +0x14a76902 freeze_bdev vmlinux EXPORT_SYMBOL +0x979f7c0f clockevents_unbind_device vmlinux EXPORT_SYMBOL_GPL +0x18018bd7 srcu_batches_completed vmlinux EXPORT_SYMBOL_GPL +0x64a62e11 acpi_processor_ffh_cstate_enter vmlinux EXPORT_SYMBOL_GPL +0x534356fd nft_meta_set_destroy net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x61f70baa nf_nat_alloc_null_binding net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xa7aa51ff mlx4_srq_lookup drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb7f93ba5 skb_kill_datagram vmlinux EXPORT_SYMBOL +0x34ed02a9 kfree_skb vmlinux EXPORT_SYMBOL +0xc9a25e89 phy_resume vmlinux EXPORT_SYMBOL +0xd741efa5 kernel_kobj vmlinux EXPORT_SYMBOL_GPL +0x2b9997fb atomic_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0xb39c89ed param_get_long vmlinux EXPORT_SYMBOL +0x97b71b22 ib_send_cm_mra drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x26a64c6a i2c_smbus_read_i2c_block_data_or_emulated drivers/i2c/i2c-core EXPORT_SYMBOL +0x34a54270 drm_atomic_set_fence_for_plane drivers/gpu/drm/drm EXPORT_SYMBOL +0xc307a10c drm_mm_takedown drivers/gpu/drm/drm EXPORT_SYMBOL +0xf69ccd8f nfs_atomic_open fs/nfs/nfs EXPORT_SYMBOL_GPL +0x2c4e98b3 udp_seq_next vmlinux EXPORT_SYMBOL +0x84502a47 blk_status_to_errno vmlinux EXPORT_SYMBOL_GPL +0xbee380ba posix_acl_alloc vmlinux EXPORT_SYMBOL +0x4b17e177 kernel_read_file_from_fd vmlinux EXPORT_SYMBOL_GPL +0x0fc37562 amd_smn_read vmlinux EXPORT_SYMBOL_GPL +0xfb172b5b svc_set_num_threads net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe93e3a6a nf_reject_ip6_tcphdr_get net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0xbcfe69c2 synproxy_send_client_synack_ipv6 net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x6c782cbc drm_crtc_vblank_on drivers/gpu/drm/drm EXPORT_SYMBOL +0x3d68c1fa drm_dp_mst_hpd_irq drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa500c966 fuse_do_open fs/fuse/fuse EXPORT_SYMBOL_GPL +0x99639116 nfs_permission fs/nfs/nfs EXPORT_SYMBOL_GPL +0x294647b8 mrp_request_leave vmlinux EXPORT_SYMBOL_GPL +0x58d76654 rtnl_delete_link vmlinux EXPORT_SYMBOL_GPL +0xc00b1f7e __kfree_skb vmlinux EXPORT_SYMBOL +0x80e80ded tty_port_register_device vmlinux EXPORT_SYMBOL_GPL +0xc9822234 clk_register_clkdev vmlinux EXPORT_SYMBOL +0x6c8068aa pci_find_parent_resource vmlinux EXPORT_SYMBOL +0xb3cfde9c pinctrl_get_group_pins vmlinux EXPORT_SYMBOL_GPL +0x4ebca694 sysfs_update_groups vmlinux EXPORT_SYMBOL_GPL +0xe8f30d49 register_trace_event vmlinux EXPORT_SYMBOL_GPL +0x89f03d41 ceph_print_client_options net/ceph/libceph EXPORT_SYMBOL +0x5cb60f10 core_tpg_set_initiator_node_tag drivers/target/target_core_mod EXPORT_SYMBOL +0x31b31f5c csum_partial_copy_nocheck vmlinux EXPORT_SYMBOL +0x7d1f053e __hw_addr_unsync_dev vmlinux EXPORT_SYMBOL +0xd052b5a8 dma_set_coherent_mask vmlinux EXPORT_SYMBOL +0xd75912f8 nft_register_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x348b6893 vhost_exceeds_weight drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x2ad56f66 rdma_restrack_get_byid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xea143610 vmci_datagram_send drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x8e17b3ae idr_destroy vmlinux EXPORT_SYMBOL +0xa7d5f92e ida_destroy vmlinux EXPORT_SYMBOL +0x211ba4f0 tcp_reno_undo_cwnd vmlinux EXPORT_SYMBOL_GPL +0x25a78a50 ip_defrag vmlinux EXPORT_SYMBOL +0x3a485cd6 register_qdisc vmlinux EXPORT_SYMBOL +0x41914a1d scsi_flush_work vmlinux EXPORT_SYMBOL_GPL +0xe591a05e nvdimm_has_cache vmlinux EXPORT_SYMBOL_GPL +0x343060b0 fbcon_rotate_ccw vmlinux EXPORT_SYMBOL +0x47c20f8a refcount_dec_not_one vmlinux EXPORT_SYMBOL +0xf4da71b3 public_key_verify_signature vmlinux EXPORT_SYMBOL_GPL +0xa3b958ce reset_hung_task_detector vmlinux EXPORT_SYMBOL_GPL +0x67719f27 xdr_stream_decode_string_dup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xef566efa xprt_wait_for_buffer_space net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf9411abf arpt_alloc_initial_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL_GPL +0x38823f02 nf_nat_ipv6_register_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x5943e6aa sbc_attrib_attrs drivers/target/target_core_mod EXPORT_SYMBOL +0xd915f44c mlx4_get_roce_gid_from_slave drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x34d91614 mlx4_config_roce_v2_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd96b425a __drm_printfn_coredump drivers/gpu/drm/drm EXPORT_SYMBOL +0xddd62f10 drm_dp_link_probe drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x94941a45 kvm_mmu_free_roots arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xeab89a37 xsk_reuseq_swap vmlinux EXPORT_SYMBOL_GPL +0xdc1224ff default_qdisc_ops vmlinux EXPORT_SYMBOL +0x08e94300 __tracepoint_br_fdb_update vmlinux EXPORT_SYMBOL_GPL +0xb449e6d6 free_netdev vmlinux EXPORT_SYMBOL +0x75871f5e acpi_get_next_object vmlinux EXPORT_SYMBOL +0x6f35afad aead_register_instance vmlinux EXPORT_SYMBOL_GPL +0xc9ec4e21 free_percpu vmlinux EXPORT_SYMBOL_GPL +0x3737d9a9 ZSTD_DStreamWorkspaceBound lib/zstd/zstd_decompress EXPORT_SYMBOL +0x17dd39d6 dm_deferred_set_create drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xbfb47387 mlx5_query_port_link_width_oper drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x4d6f0af5 bcm_phy_write_shadow drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0xb0fb7a72 drm_event_reserve_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x217aaabb drm_helper_move_panel_connectors_to_head drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x0b56f0f9 fuse_send_init fs/fuse/fuse EXPORT_SYMBOL_GPL +0x168909c5 ip6_datagram_recv_ctl vmlinux EXPORT_SYMBOL_GPL +0xf7cf4015 ip6_dst_alloc vmlinux EXPORT_SYMBOL +0xfd21d965 devlink_port_attrs_set vmlinux EXPORT_SYMBOL_GPL +0x2d68c776 netdev_class_remove_file_ns vmlinux EXPORT_SYMBOL +0xcea6ac60 metadata_dst_free vmlinux EXPORT_SYMBOL_GPL +0x2722d0b8 __zerocopy_sg_from_iter vmlinux EXPORT_SYMBOL +0xfeaf38fe sk_setup_caps vmlinux EXPORT_SYMBOL_GPL +0x72d267dc nvmem_del_cell_lookups vmlinux EXPORT_SYMBOL_GPL +0x0a79cddf scsi_host_get vmlinux EXPORT_SYMBOL +0x424d3620 zlib_inflateIncomp vmlinux EXPORT_SYMBOL +0x7f8e67d4 wbt_disable_default vmlinux EXPORT_SYMBOL_GPL +0xb23dcf95 iomap_releasepage vmlinux EXPORT_SYMBOL_GPL +0xf932015f __raw_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x6a5fa363 sigprocmask vmlinux EXPORT_SYMBOL +0x3fa8f8e7 ieee802154_unregister_hw net/mac802154/mac802154 EXPORT_SYMBOL +0x159dbbdf drm_vblank_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x85216337 drm_dp_dual_mode_detect drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x9e13f6f6 gf128mul_lle crypto/gf128mul EXPORT_SYMBOL +0xc04e335e nfs4_mark_deviceid_available fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x3583bcd6 __skb_flow_get_ports vmlinux EXPORT_SYMBOL +0x926c82fb sk_stream_wait_connect vmlinux EXPORT_SYMBOL +0xb2ea0ac9 phy_set_sym_pause vmlinux EXPORT_SYMBOL +0x1e140c1c devres_alloc_node vmlinux EXPORT_SYMBOL_GPL +0x52639dfa devm_hwrng_register vmlinux EXPORT_SYMBOL_GPL +0x554ae3a4 irq_poll_sched vmlinux EXPORT_SYMBOL +0x3e1d165b drm_hdcp_update_content_protection drivers/gpu/drm/drm EXPORT_SYMBOL +0xa9504d5a drm_atomic_helper_commit_duplicated_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6d390fd7 kvm_init_shadow_ept_mmu arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb8f11603 idr_alloc vmlinux EXPORT_SYMBOL_GPL +0x6adf292f sock_no_sendmsg vmlinux EXPORT_SYMBOL +0x2ebe2b9d phy_basic_features vmlinux EXPORT_SYMBOL_GPL +0x779cdf2a nvme_remove_namespaces vmlinux EXPORT_SYMBOL_GPL +0x54a53ee3 __async_tx_find_channel vmlinux EXPORT_SYMBOL_GPL +0x209f172b jbd2_journal_update_sb_errno vmlinux EXPORT_SYMBOL +0x6153d6e8 vfs_unlink vmlinux EXPORT_SYMBOL +0x3ba01b47 get_compat_sigset vmlinux EXPORT_SYMBOL_GPL +0xe9a55499 get_task_exe_file vmlinux EXPORT_SYMBOL +0xc6ab0af1 transport_init_se_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x612df9ae vmci_qpair_detach drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x82ab889b nfs_mkdir fs/nfs/nfs EXPORT_SYMBOL_GPL +0xcb101164 xfrm_input_unregister_afinfo vmlinux EXPORT_SYMBOL +0x504cc673 tcp_req_err vmlinux EXPORT_SYMBOL +0xc7fae024 xt_compat_calc_jump vmlinux EXPORT_SYMBOL_GPL +0x338637a7 __skb_gro_checksum_complete vmlinux EXPORT_SYMBOL +0xb2405efc secure_tcp_seq vmlinux EXPORT_SYMBOL_GPL +0x61d8a376 cpufreq_frequency_table_get_index vmlinux EXPORT_SYMBOL_GPL +0xb10e82a2 phy_aneg_done vmlinux EXPORT_SYMBOL +0xb1069be2 fcoe_check_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x8a72c22c srp_start_tl_fail_timers vmlinux EXPORT_SYMBOL +0x6d663094 ata_acpi_stm vmlinux EXPORT_SYMBOL_GPL +0xa4b3d247 ata_acpi_gtm vmlinux EXPORT_SYMBOL_GPL +0x9c23d3bb key_revoke vmlinux EXPORT_SYMBOL +0x5cd2e50c pipe_unlock vmlinux EXPORT_SYMBOL +0xc528a49a queued_write_lock_slowpath vmlinux EXPORT_SYMBOL +0xb283726b xprt_reserve_xprt_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2f1976ad backlight_force_update drivers/video/backlight/backlight EXPORT_SYMBOL +0xf7495e27 genlmsg_multicast_allns vmlinux EXPORT_SYMBOL +0xe8ec9f73 __sock_recv_wifi_status vmlinux EXPORT_SYMBOL_GPL +0xeba6025d devm_mbox_controller_register vmlinux EXPORT_SYMBOL_GPL +0x4a3425eb efivar_entry_delete vmlinux EXPORT_SYMBOL_GPL +0xd2ee5cd1 pm_generic_restore vmlinux EXPORT_SYMBOL_GPL +0xa70d548f bus_rescan_devices vmlinux EXPORT_SYMBOL_GPL +0xd6b7d7d6 put_tty_driver vmlinux EXPORT_SYMBOL +0xc83bf59e vring_create_virtqueue vmlinux EXPORT_SYMBOL_GPL +0xecfd68ef acpi_get_node vmlinux EXPORT_SYMBOL +0xed668705 debugfs_read_file_bool vmlinux EXPORT_SYMBOL_GPL +0xab4c9dac __tracepoint_rpm_resume vmlinux EXPORT_SYMBOL_GPL +0xdb262a9e queue_work_on vmlinux EXPORT_SYMBOL +0x264c0106 ioport_resource vmlinux EXPORT_SYMBOL +0x997a6d11 drm_crtc_vblank_restore drivers/gpu/drm/drm EXPORT_SYMBOL +0x6c3dffb3 __drm_atomic_helper_connector_reset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x7c5a86fc nfs_release_request fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb71f95ac __fscache_maybe_release_page fs/fscache/fscache EXPORT_SYMBOL +0xa1a5c09c devlink_sb_register vmlinux EXPORT_SYMBOL_GPL +0x294084d6 dst_cache_set_ip6 vmlinux EXPORT_SYMBOL_GPL +0xc7a4fbed rtnl_lock vmlinux EXPORT_SYMBOL +0xe7a1ec36 scsi_device_from_queue vmlinux EXPORT_SYMBOL_GPL +0xac55713b ata_scsi_slave_destroy vmlinux EXPORT_SYMBOL_GPL +0xed6f844d pci_probe_reset_bus vmlinux EXPORT_SYMBOL_GPL +0x59578f14 debugfs_create_bool vmlinux EXPORT_SYMBOL_GPL +0xce2ab121 generic_file_splice_read vmlinux EXPORT_SYMBOL +0x8eed066f get_tree_single_reconf vmlinux EXPORT_SYMBOL +0xdd2efc0f ring_buffer_reset_cpu vmlinux EXPORT_SYMBOL_GPL +0x5194e2be __irq_domain_add vmlinux EXPORT_SYMBOL_GPL +0x1e81a32d prepare_kernel_cred vmlinux EXPORT_SYMBOL +0x015af7f4 system_state vmlinux EXPORT_SYMBOL +0x5c00d810 ZSTD_CDictWorkspaceBound lib/zstd/zstd_compress EXPORT_SYMBOL +0xbf772ed7 xfrm6_tunnel_alloc_spi net/ipv6/xfrm6_tunnel EXPORT_SYMBOL +0x35978cc4 vfio_del_group_dev drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xe2eaed05 pnfs_update_layout fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x7d493006 nfs_access_zap_cache fs/nfs/nfs EXPORT_SYMBOL_GPL +0x156f2462 kvm_set_memory_region arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xee3482f8 __icmp_send vmlinux EXPORT_SYMBOL +0xcef67470 udp_push_pending_frames vmlinux EXPORT_SYMBOL +0x62fd04c7 llc_add_pack vmlinux EXPORT_SYMBOL +0xa7a0ddc8 pcibios_align_resource vmlinux EXPORT_SYMBOL +0x9babd5ff hidinput_connect vmlinux EXPORT_SYMBOL_GPL +0x4c2ee5c7 __platform_driver_probe vmlinux EXPORT_SYMBOL_GPL +0x45558f56 clk_unregister_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0x15bed7a5 LZ4_decompress_safe_partial vmlinux EXPORT_SYMBOL +0xc4652d8d rhashtable_destroy vmlinux EXPORT_SYMBOL_GPL +0x773539c1 del_timer_sync vmlinux EXPORT_SYMBOL +0x36e1d426 param_ops_uint vmlinux EXPORT_SYMBOL +0xa9854364 umc_normaddr_to_sysaddr vmlinux EXPORT_SYMBOL_GPL +0xf3511ac9 nvmf_should_reconnect drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0xa987f4cb drm_atomic_get_new_connector_for_encoder drivers/gpu/drm/drm EXPORT_SYMBOL +0x2c66ac85 devlink_info_serial_number_put vmlinux EXPORT_SYMBOL_GPL +0xe28e4207 __tracepoint_dma_fence_emit vmlinux EXPORT_SYMBOL +0xf29857b7 nvdimm_bus_attribute_group vmlinux EXPORT_SYMBOL_GPL +0xfa8e7de0 con_set_default_unimap vmlinux EXPORT_SYMBOL +0x2f384db3 acpi_is_video_device vmlinux EXPORT_SYMBOL +0xd9239d82 fb_deferred_io_open vmlinux EXPORT_SYMBOL_GPL +0xdaba2f3c fscrypt_get_ctx vmlinux EXPORT_SYMBOL +0x7a95e5ae do_settimeofday64 vmlinux EXPORT_SYMBOL +0x68a00e3c devm_request_resource vmlinux EXPORT_SYMBOL +0x3e5879d0 l2tp_tunnel_delete net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x975793bc inet_diag_bc_sk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x47150fb2 nf_osf_match net/netfilter/nfnetlink_osf EXPORT_SYMBOL_GPL +0x6c507480 edac_get_sysfs_subsys drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x2d85dd69 iscsit_build_datain_pdu drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x56007411 ipmi_get_smi_info drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x0c5afca5 xsk_umem_complete_tx vmlinux EXPORT_SYMBOL +0x49b51ea3 __qdisc_calculate_pkt_len vmlinux EXPORT_SYMBOL +0xde5013d4 dev_get_by_index vmlinux EXPORT_SYMBOL +0x709a96d3 hid_compare_device_paths vmlinux EXPORT_SYMBOL_GPL +0x1db55bda srp_remove_host vmlinux EXPORT_SYMBOL_GPL +0xc6c6dd49 dma_buf_unmap_attachment vmlinux EXPORT_SYMBOL_GPL +0x8f261ec8 serial8250_rpm_get_tx vmlinux EXPORT_SYMBOL_GPL +0x65a9e218 pci_enable_ats vmlinux EXPORT_SYMBOL_GPL +0x3539f11b match_strlcpy vmlinux EXPORT_SYMBOL +0x901623c0 ioctl_by_bdev vmlinux EXPORT_SYMBOL +0xc9827693 __bpf_call_base vmlinux EXPORT_SYMBOL_GPL +0x7941385d edac_raw_mc_handle_error drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x0294c6a2 nfs_callback_nr_threads fs/nfs/nfs EXPORT_SYMBOL_GPL +0x113a1c2d inet6_hash vmlinux EXPORT_SYMBOL_GPL +0x36bfe1cc init_net vmlinux EXPORT_SYMBOL +0x799826dc cpufreq_freq_attr_scaling_boost_freqs vmlinux EXPORT_SYMBOL_GPL +0x9cd83da1 __phy_modify_mmd_changed vmlinux EXPORT_SYMBOL_GPL +0x38bb7259 class_create_file_ns vmlinux EXPORT_SYMBOL_GPL +0x830b120e pci_stop_and_remove_bus_device_locked vmlinux EXPORT_SYMBOL_GPL +0x96da9e48 textsearch_find_continuous vmlinux EXPORT_SYMBOL +0x4ef1d748 __crypto_alloc_tfm vmlinux EXPORT_SYMBOL_GPL +0xd5093c66 key_unlink vmlinux EXPORT_SYMBOL +0xa222d93c keyring_search vmlinux EXPORT_SYMBOL +0x6a965e17 configfs_unregister_subsystem vmlinux EXPORT_SYMBOL +0x41ed3cec eventfd_ctx_remove_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x68c5fe27 generic_delete_inode vmlinux EXPORT_SYMBOL +0xab9bd884 perf_register_guest_info_callbacks vmlinux EXPORT_SYMBOL_GPL +0xc80ab559 swake_up_one vmlinux EXPORT_SYMBOL +0x20771768 drm_atomic_helper_connector_tv_reset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6653ee78 kvm_apic_update_irr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xee91879b rb_first_postorder vmlinux EXPORT_SYMBOL +0x80248a8e br_vlan_get_pvid_rcu vmlinux EXPORT_SYMBOL_GPL +0x0b86a571 fc_vport_setlink vmlinux EXPORT_SYMBOL +0x3b20fb95 dma_fence_remove_callback vmlinux EXPORT_SYMBOL +0x7de65a03 acpi_lpat_free_conversion_table vmlinux EXPORT_SYMBOL_GPL +0x2ddda855 pinctrl_pm_select_idle_state vmlinux EXPORT_SYMBOL_GPL +0xb9e7429c memcpy_toio vmlinux EXPORT_SYMBOL +0xb633f115 irq_poll_enable vmlinux EXPORT_SYMBOL +0xb4f3a7fe ceph_release_page_vector net/ceph/libceph EXPORT_SYMBOL +0x3a19c6c5 ceph_wait_for_latest_osdmap net/ceph/libceph EXPORT_SYMBOL +0x494f26bf nf_nat_pptp_hook_expectfn net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0xdf5cba0f kvm_mmu_sync_roots arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x975f5035 dev_change_proto_down vmlinux EXPORT_SYMBOL +0x5603c02b sock_gettstamp vmlinux EXPORT_SYMBOL +0x66af1fd1 lockref_put_or_lock vmlinux EXPORT_SYMBOL +0x3100cff9 lockref_get_or_lock vmlinux EXPORT_SYMBOL +0x12b5918e virtio_transport_stream_has_data net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x9ec49fe5 i2c_smbus_write_i2c_block_data drivers/i2c/i2c-core EXPORT_SYMBOL +0xf5ce1f20 cxgb4_remove_server drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x5c6330d3 fc_frame_crc_check vmlinux EXPORT_SYMBOL +0xecf5ec26 vchan_tx_desc_free vmlinux EXPORT_SYMBOL_GPL +0xf2c43f3f zlib_deflate vmlinux EXPORT_SYMBOL +0x00734387 shash_no_setkey vmlinux EXPORT_SYMBOL_GPL +0x11e08f96 trace_seq_putmem_hex vmlinux EXPORT_SYMBOL_GPL +0xca431c05 wake_bit_function vmlinux EXPORT_SYMBOL +0x5306b631 ib_map_mr_sg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xce9b9079 inet_csk_route_req vmlinux EXPORT_SYMBOL_GPL +0xd042475c qdisc_get_rtab vmlinux EXPORT_SYMBOL +0xc62ec14e dev_change_proto_down_generic vmlinux EXPORT_SYMBOL +0xe2ee21d5 skb_orphan_partial vmlinux EXPORT_SYMBOL +0xc881e2ad hid_set_field vmlinux EXPORT_SYMBOL_GPL +0x4b96130c ata_sff_dev_classify vmlinux EXPORT_SYMBOL_GPL +0x0c00227f bus_remove_file vmlinux EXPORT_SYMBOL_GPL +0xa0834dfa mipi_dsi_dcs_set_column_address vmlinux EXPORT_SYMBOL +0xfa86438b locks_lock_inode_wait vmlinux EXPORT_SYMBOL +0x88a680bb locks_release_private vmlinux EXPORT_SYMBOL_GPL +0xec5a6637 __fscrypt_encrypt_symlink vmlinux EXPORT_SYMBOL_GPL +0x2aa0843e mempool_resize vmlinux EXPORT_SYMBOL +0xae2d4d20 __ftrace_vprintk vmlinux EXPORT_SYMBOL_GPL +0x196614ce hw_breakpoint_restore vmlinux EXPORT_SYMBOL_GPL +0x48aacbb2 mlx5_query_port_max_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xfef9059e mlx4_get_slave_default_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc5c6fcd7 drm_agp_unbind drivers/gpu/drm/drm EXPORT_SYMBOL +0xbd76685e drm_atomic_get_crtc_state drivers/gpu/drm/drm EXPORT_SYMBOL +0x7c4b06be xfrm_unregister_km vmlinux EXPORT_SYMBOL +0x9b723816 in_dev_finish_destroy vmlinux EXPORT_SYMBOL +0x4ad28203 hid_destroy_device vmlinux EXPORT_SYMBOL_GPL +0x2a0a40fa mdio_bus_init vmlinux EXPORT_SYMBOL_GPL +0x2eb71747 nvme_wait_freeze vmlinux EXPORT_SYMBOL_GPL +0x492d52a6 device_get_next_child_node vmlinux EXPORT_SYMBOL_GPL +0x87b8798d sg_next vmlinux EXPORT_SYMBOL +0x0b07abe2 unshare_fs_struct vmlinux EXPORT_SYMBOL_GPL +0x01ee5532 smp_call_function_any vmlinux EXPORT_SYMBOL_GPL +0x6f9d540b freq_qos_add_request vmlinux EXPORT_SYMBOL_GPL +0xbc04bd46 x86_platform vmlinux EXPORT_SYMBOL_GPL +0x203cbd00 __nf_conntrack_confirm net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x547738b3 rdma_move_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa7e21e43 core_tpg_check_initiator_node_acl drivers/target/target_core_mod EXPORT_SYMBOL +0xaf969565 ocfs2_dlm_lock fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x187faee2 nfs_pageio_init_write fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc7ba3b4f unix_inq_len vmlinux EXPORT_SYMBOL_GPL +0x136209db power_supply_reg_notifier vmlinux EXPORT_SYMBOL_GPL +0x80bf0fa5 mdiobus_scan vmlinux EXPORT_SYMBOL +0x713cb4ba phy_gbit_features vmlinux EXPORT_SYMBOL_GPL +0xf4915757 nd_blk_memremap_flags vmlinux EXPORT_SYMBOL_GPL +0x858b3fe3 free_iova_mem vmlinux EXPORT_SYMBOL +0x8a003b01 tty_port_carrier_raised vmlinux EXPORT_SYMBOL +0x0a7e4787 __brelse vmlinux EXPORT_SYMBOL +0x4f4b1982 pipe_lock vmlinux EXPORT_SYMBOL +0x7de53067 rpc_init_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9c206366 ip_set_name_byindex net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xbe761a11 ib_register_mad_agent drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x448f0ca9 adf_dev_start drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x0cfbd0ba mlx5_rl_remove_rate drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa2c3f373 drm_panel_enable drivers/gpu/drm/drm EXPORT_SYMBOL +0x201e3854 drm_gem_fb_create_handle drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x96dbe382 kvm_mpx_supported arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x91416cd5 __kvm_request_immediate_exit arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa82b2066 dm_bufio_write_dirty_buffers vmlinux EXPORT_SYMBOL_GPL +0x37e2f2ed scsi_device_set_state vmlinux EXPORT_SYMBOL +0x020dbf27 bitmap_alloc vmlinux EXPORT_SYMBOL +0x941f2aaa eventfd_ctx_put vmlinux EXPORT_SYMBOL_GPL +0xa46e598c mark_buffer_async_write vmlinux EXPORT_SYMBOL +0xa33c0eac wait_for_completion_interruptible_timeout vmlinux EXPORT_SYMBOL +0xfbc4f89e io_schedule_timeout vmlinux EXPORT_SYMBOL +0x6ff4d5cb vbg_hgcm_disconnect drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0x7e773de2 sparse_keymap_entry_from_scancode drivers/input/sparse-keymap EXPORT_SYMBOL +0x28e23139 xfrm_probe_algs vmlinux EXPORT_SYMBOL_GPL +0x883a9eb0 ip_route_me_harder vmlinux EXPORT_SYMBOL +0x10a76a8e inet_sock_destruct vmlinux EXPORT_SYMBOL +0x97d61efe skb_udp_tunnel_segment vmlinux EXPORT_SYMBOL +0x2db4f2b6 nf_reinject vmlinux EXPORT_SYMBOL +0x84a005f1 iscsi_get_router_state_name vmlinux EXPORT_SYMBOL_GPL +0xf5081f43 dev_pm_qos_update_request vmlinux EXPORT_SYMBOL_GPL +0xf3ee21b8 tty_chars_in_buffer vmlinux EXPORT_SYMBOL +0x815588a6 clk_enable vmlinux EXPORT_SYMBOL_GPL +0x5c4265f6 blk_unregister_region vmlinux EXPORT_SYMBOL +0xbcc7c369 elv_rqhash_del vmlinux EXPORT_SYMBOL_GPL +0x81787cc6 unregister_nls vmlinux EXPORT_SYMBOL +0x15070829 bpf_prog_get_type_dev vmlinux EXPORT_SYMBOL_GPL +0xae0592ef ring_buffer_discard_commit vmlinux EXPORT_SYMBOL_GPL +0xc18cdf36 amd_df_indirect_read vmlinux EXPORT_SYMBOL_GPL +0x333d510a ceph_auth_verify_authorizer_reply net/ceph/libceph EXPORT_SYMBOL +0xe2c84666 nft_reject_icmp_code net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x08100ee4 ib_init_ah_attr_from_wc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xae43d25b __fscache_check_consistency fs/fscache/fscache EXPORT_SYMBOL +0x209dee02 devlink_resource_occ_get_register vmlinux EXPORT_SYMBOL_GPL +0x395e7f45 md_integrity_register vmlinux EXPORT_SYMBOL +0x50b82e11 thermal_zone_get_slope vmlinux EXPORT_SYMBOL_GPL +0xd3007a43 thermal_zone_set_trips vmlinux EXPORT_SYMBOL_GPL +0x74459997 __serio_register_port vmlinux EXPORT_SYMBOL +0xc17515d7 usb_hcds_loaded vmlinux EXPORT_SYMBOL_GPL +0x576e0004 phy_exit vmlinux EXPORT_SYMBOL_GPL +0x06939c3a verify_signature vmlinux EXPORT_SYMBOL_GPL +0xc5b1dbc4 skcipher_register_instance vmlinux EXPORT_SYMBOL_GPL +0x0317ae66 fscrypt_setup_filename vmlinux EXPORT_SYMBOL +0x5dc311e0 target_remove_session drivers/target/target_core_mod EXPORT_SYMBOL +0xb12cab88 mlx5_core_query_mkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xcd4b29f2 bcm_phy_read_shadow drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0xe59c97e5 nfs_instantiate fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb722229e kvm_arch_no_poll arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x199d5cae skb_pull_rcsum vmlinux EXPORT_SYMBOL_GPL +0xb5ccd4f6 __scsi_device_lookup_by_target vmlinux EXPORT_SYMBOL +0x9e005e6f cppc_get_perf_caps vmlinux EXPORT_SYMBOL_GPL +0xfe9ebbbb acpi_osi_is_win8 vmlinux EXPORT_SYMBOL +0x1fa1d95c sha256_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x669b80a4 fscrypt_has_permitted_context vmlinux EXPORT_SYMBOL +0xa55c3daa current_work vmlinux EXPORT_SYMBOL +0xfc03d97a page_is_ram vmlinux EXPORT_SYMBOL_GPL +0xe5183e76 ib_dealloc_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc83c5f7d mdev_parent_dev drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x2b67f096 speedstep_get_frequency drivers/cpufreq/speedstep-lib EXPORT_SYMBOL_GPL +0x6af8a872 dm_array_info_init drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa1bed1d2 devm_i2c_new_dummy_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x16ac2597 drm_printf drivers/gpu/drm/drm EXPORT_SYMBOL +0x6b27729b radix_tree_gang_lookup vmlinux EXPORT_SYMBOL +0xad68c7bc dm_bio_get_target_bio_nr vmlinux EXPORT_SYMBOL_GPL +0x43ce7821 usb_amd_pt_check_port vmlinux EXPORT_SYMBOL_GPL +0x3af06161 nvme_init_ctrl vmlinux EXPORT_SYMBOL_GPL +0x7aae6848 pm_generic_suspend vmlinux EXPORT_SYMBOL_GPL +0x5a9abd82 agp_remove_bridge vmlinux EXPORT_SYMBOL_GPL +0x1f90190c tty_buffer_lock_exclusive vmlinux EXPORT_SYMBOL_GPL +0xebf37e96 locks_alloc_lock vmlinux EXPORT_SYMBOL_GPL +0xb121390a probe_irq_on vmlinux EXPORT_SYMBOL +0xb0aed408 ZSTD_compressStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x33d11b85 cache_create_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x314f76fb svc_generic_init_request net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x730f03d8 svc_prepare_thread net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x71e79577 dm_get_cell drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x66d3fea2 i2c_smbus_write_block_data drivers/i2c/i2c-core EXPORT_SYMBOL +0x545d123a usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xca50f8b7 vxlan_fdb_clear_offload drivers/net/vxlan EXPORT_SYMBOL_GPL +0x15801382 mlxsw_afk_key_info_put drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x065d9967 fscache_mark_pages_cached fs/fscache/fscache EXPORT_SYMBOL +0x93b3fc74 register_dcbevent_notifier vmlinux EXPORT_SYMBOL +0x2b3fb82b xfrm_state_lookup vmlinux EXPORT_SYMBOL +0xfef99786 tcp_parse_options vmlinux EXPORT_SYMBOL +0xfac75faa sk_filter_trim_cap vmlinux EXPORT_SYMBOL +0x39852e2b rps_may_expire_flow vmlinux EXPORT_SYMBOL +0x6795c703 key_move vmlinux EXPORT_SYMBOL +0x1ba59527 __kmalloc_node vmlinux EXPORT_SYMBOL +0x17add64b gdt_page vmlinux EXPORT_SYMBOL_GPL +0x39b62a41 rpc_unlink net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x209c49a5 edac_mc_find_csrow_by_page drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x23141b82 spc_parse_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0xed1a1dd6 mlxsw_i2c_driver_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_i2c EXPORT_SYMBOL +0x6e0273f1 locks_in_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0x315d28f7 camellia_crypt_ctr_2way arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0x02a7efbf pskb_trim_rcsum_slow vmlinux EXPORT_SYMBOL +0x9fab32df arch_set_freq_scale vmlinux EXPORT_SYMBOL_GPL +0x3f8d5a2b ufshcd_get_local_unipro_ver vmlinux EXPORT_SYMBOL +0xe4d80bf4 acpi_enable vmlinux EXPORT_SYMBOL +0xba6ca94e module_refcount vmlinux EXPORT_SYMBOL +0x32c06051 sunrpc_destroy_cache_detail net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb9a4b6ec drm_mode_create_from_cmdline_mode drivers/gpu/drm/drm EXPORT_SYMBOL +0xe4991a9d dlm_register_domain fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x55d7280a dlm_unregister_domain fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x580d6d1f inet_ctl_sock_create vmlinux EXPORT_SYMBOL_GPL +0x5252d875 power_supply_find_ocv2cap_table vmlinux EXPORT_SYMBOL_GPL +0xc95c26e1 firmware_kobj vmlinux EXPORT_SYMBOL_GPL +0x19c6c53a __tty_insert_flip_char vmlinux EXPORT_SYMBOL +0x85935a61 acpi_dev_irq_flags vmlinux EXPORT_SYMBOL_GPL +0x8f8eff1f pci_store_saved_state vmlinux EXPORT_SYMBOL_GPL +0xd70121b8 handle_edge_irq vmlinux EXPORT_SYMBOL +0xaa8b3750 ip_vs_conn_in_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xbc63d64e l3mdev_update_flow vmlinux EXPORT_SYMBOL_GPL +0x2500c31c usb_autopm_put_interface_async vmlinux EXPORT_SYMBOL_GPL +0x23da6d47 usb_lock_device_for_reset vmlinux EXPORT_SYMBOL_GPL +0xbacc66b9 phy_ethtool_nway_reset vmlinux EXPORT_SYMBOL +0xae8a1f82 clk_hw_register_mux vmlinux EXPORT_SYMBOL_GPL +0x7ad1ded1 pinctrl_register_mappings vmlinux EXPORT_SYMBOL_GPL +0x1bc5eebe pinctrl_gpio_direction_input vmlinux EXPORT_SYMBOL_GPL +0x18e60984 __do_once_start vmlinux EXPORT_SYMBOL +0xd241e488 __account_locked_vm vmlinux EXPORT_SYMBOL_GPL +0x8534ea75 virtio_transport_dgram_dequeue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x2101cbc9 ceph_oid_destroy net/ceph/libceph EXPORT_SYMBOL +0xa75f49d6 mlx5_fill_page_array drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x08a7896d i915_gpu_raise drivers/gpu/drm/i915/i915 EXPORT_SYMBOL_GPL +0xcff414d3 drm_mode_create_tile_group drivers/gpu/drm/drm EXPORT_SYMBOL +0x6729d3df __get_user_4 vmlinux EXPORT_SYMBOL +0xb2fd5ceb __put_user_4 vmlinux EXPORT_SYMBOL +0x85df9b6c strsep vmlinux EXPORT_SYMBOL +0xe2d5255a strcmp vmlinux EXPORT_SYMBOL +0xc02c366c ___pskb_trim vmlinux EXPORT_SYMBOL +0xa44102bd __pskb_copy_fclone vmlinux EXPORT_SYMBOL +0x144e79ff ata_std_prereset vmlinux EXPORT_SYMBOL_GPL +0x0c91fa57 tty_driver_flush_buffer vmlinux EXPORT_SYMBOL +0xdef7c893 fb_match_mode vmlinux EXPORT_SYMBOL +0x45f4a3af pci_setup_cardbus vmlinux EXPORT_SYMBOL +0x86a1664f iomem_resource vmlinux EXPORT_SYMBOL +0x39f6cd87 drm_fb_helper_pan_display drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x7301a16b drm_atomic_helper_wait_for_dependencies drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5503ca95 drm_dp_downstream_id drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x440293be sata_scr_read vmlinux EXPORT_SYMBOL_GPL +0xb6e0b7ae devm_kfree vmlinux EXPORT_SYMBOL_GPL +0x78ce4e35 mipi_dsi_driver_unregister vmlinux EXPORT_SYMBOL +0xc5a0bd98 register_virtio_driver vmlinux EXPORT_SYMBOL_GPL +0xa50b8757 hdmi_infoframe_log vmlinux EXPORT_SYMBOL +0xbd4e23f9 generic_start_io_acct vmlinux EXPORT_SYMBOL +0xef7e4d7b vfs_create vmlinux EXPORT_SYMBOL +0xefce991c ceph_pagelist_append net/ceph/libceph EXPORT_SYMBOL +0xf6ed3334 ib_event_msg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcdfb2da3 ptp_schedule_worker drivers/ptp/ptp EXPORT_SYMBOL +0x78458fad ohci_hub_status_data vmlinux EXPORT_SYMBOL_GPL +0x10624756 devm_device_add_group vmlinux EXPORT_SYMBOL_GPL +0x2093f4dd clk_register_divider_table vmlinux EXPORT_SYMBOL_GPL +0xb9056bb6 remove_conflicting_framebuffers vmlinux EXPORT_SYMBOL +0xe745fd1c nla_reserve_64bit vmlinux EXPORT_SYMBOL +0x50802e3a __ablkcipher_walk_complete vmlinux EXPORT_SYMBOL_GPL +0xfbef4317 sb_min_blocksize vmlinux EXPORT_SYMBOL +0xc8b99486 __unwind_start vmlinux EXPORT_SYMBOL_GPL +0xa4ca9251 devm_backlight_device_unregister drivers/video/backlight/backlight EXPORT_SYMBOL +0xa5f72b95 devlink_dpipe_headers_unregister vmlinux EXPORT_SYMBOL_GPL +0x9282f433 __tracepoint_devlink_hwmsg vmlinux EXPORT_SYMBOL_GPL +0x154c6338 dm_kcopyd_client_destroy vmlinux EXPORT_SYMBOL +0x2ad91f6e ir_raw_gen_pl vmlinux EXPORT_SYMBOL +0x9e12e343 ir_raw_gen_pd vmlinux EXPORT_SYMBOL +0x618a1746 sas_phy_alloc vmlinux EXPORT_SYMBOL +0x2e0b1deb dma_fence_get_status vmlinux EXPORT_SYMBOL +0x7c13b930 intel_svm_bind_mm vmlinux EXPORT_SYMBOL_GPL +0xe8fb49a8 page_cache_prev_miss vmlinux EXPORT_SYMBOL +0xf353a698 register_module_notifier vmlinux EXPORT_SYMBOL +0xe7adbe40 adf_init_arb drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x413ccbe7 drm_client_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xc0acfe49 drm_atomic_helper_check drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xcd32cc8b tcp_rate_check_app_limited vmlinux EXPORT_SYMBOL_GPL +0x6cff3b90 register_fib_notifier vmlinux EXPORT_SYMBOL +0xa06f135f cpuidle_disable_device vmlinux EXPORT_SYMBOL_GPL +0x0025e941 get_bitmap_from_slot vmlinux EXPORT_SYMBOL +0xc7208c3a serial8250_resume_port vmlinux EXPORT_SYMBOL +0x5f56663b rdmsrl_on_cpu vmlinux EXPORT_SYMBOL +0x9d0dc9a8 vm_map_pages_zero vmlinux EXPORT_SYMBOL +0xf4d66f2f nft_unregister_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xd7832bb3 rdma_consumer_reject_data drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x6c28be5a vfio_info_add_capability drivers/vfio/vfio EXPORT_SYMBOL +0x99e8d44f iscsi_target_check_login_request drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x7c1e7807 bch_bset_sort_state_init drivers/md/bcache/bcache EXPORT_SYMBOL +0xe07ac99d inet_csk_clone_lock vmlinux EXPORT_SYMBOL_GPL +0x2b6991e4 devlink_register vmlinux EXPORT_SYMBOL_GPL +0xcb77a5ed hid_input_report vmlinux EXPORT_SYMBOL_GPL +0x60e247de dev_pm_opp_register_notifier vmlinux EXPORT_SYMBOL +0xcd33b804 pcix_set_mmrbc vmlinux EXPORT_SYMBOL +0x6be0d38b unregister_sysctl_table vmlinux EXPORT_SYMBOL +0xcde5c49a rdma_set_service_type drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xe6b45734 _ib_alloc_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc080243d mlx5_core_create_tir_out drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xeb12037c drm_atomic_helper_plane_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1c8984c7 acpi_smbus_unregister_callback drivers/acpi/sbshc EXPORT_SYMBOL_GPL +0x798b7682 klist_prev vmlinux EXPORT_SYMBOL_GPL +0xde1d7c13 devlink_region_create vmlinux EXPORT_SYMBOL_GPL +0xdb648e3b lwtunnel_xmit vmlinux EXPORT_SYMBOL_GPL +0x0e6cbc00 devm_clk_release_clkdev vmlinux EXPORT_SYMBOL +0x40b43bd0 sbitmap_add_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x3b321462 LZ4_setStreamDecode vmlinux EXPORT_SYMBOL +0xc055e773 del_gendisk vmlinux EXPORT_SYMBOL +0x343fcb86 block_write_full_page vmlinux EXPORT_SYMBOL +0xfe731af8 nf_ct_invert_tuple net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x4a4cb558 dm_btree_insert_notify drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x1db26226 lockd_up fs/lockd/lockd EXPORT_SYMBOL_GPL +0x6f5d4481 xfrm_register_type vmlinux EXPORT_SYMBOL +0xd3bf9978 __tcp_md5_do_lookup vmlinux EXPORT_SYMBOL +0x437a0d6d __sock_tx_timestamp vmlinux EXPORT_SYMBOL +0x9305f8e6 cpufreq_get vmlinux EXPORT_SYMBOL +0xcacf671c pci_set_vpd_size vmlinux EXPORT_SYMBOL +0x9bcb1486 fat_sync_inode vmlinux EXPORT_SYMBOL_GPL +0x387d24c9 walk_iomem_res_desc vmlinux EXPORT_SYMBOL_GPL +0x618a30ab mlxsw_afa_block_commit drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x18b04d3c drm_crtc_vblank_off drivers/gpu/drm/drm EXPORT_SYMBOL +0x8bfe62d5 drm_crtc_vblank_get drivers/gpu/drm/drm EXPORT_SYMBOL +0xe5e5c48a kvm_page_track_register_notifier arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa3a6f4c1 sock_no_getsockopt vmlinux EXPORT_SYMBOL +0x92623358 sock_no_setsockopt vmlinux EXPORT_SYMBOL +0x1377238b clk_hw_register_gpio_mux vmlinux EXPORT_SYMBOL_GPL +0xd85ffaee fscrypt_decrypt_pagecache_blocks vmlinux EXPORT_SYMBOL +0x889e891f fscrypt_encrypt_pagecache_blocks vmlinux EXPORT_SYMBOL +0xfa7aac70 __blockdev_direct_IO vmlinux EXPORT_SYMBOL +0xa2092eaf migrate_page vmlinux EXPORT_SYMBOL +0x68952493 rcu_note_context_switch vmlinux EXPORT_SYMBOL_GPL +0x2d3456fb vhost_poll_queue drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xc51d33d9 drm_property_create_bool drivers/gpu/drm/drm EXPORT_SYMBOL +0x2a4577a8 ip6_route_me_harder vmlinux EXPORT_SYMBOL +0x68a33fda unix_destruct_scm vmlinux EXPORT_SYMBOL +0xdbd604b7 thermal_zone_device_update vmlinux EXPORT_SYMBOL_GPL +0xbf0fb030 rc_map_get vmlinux EXPORT_SYMBOL_GPL +0xd1253532 sas_rphy_delete vmlinux EXPORT_SYMBOL +0xcab3f77b dev_fwnode vmlinux EXPORT_SYMBOL_GPL +0xabc640f3 list_lru_isolate vmlinux EXPORT_SYMBOL_GPL +0x057d0077 bpf_prog_alloc vmlinux EXPORT_SYMBOL_GPL +0x96e90324 adf_cfg_dev_remove drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x8d5e9c79 vnic_dev_unregister drivers/net/ethernet/cisco/enic/enic EXPORT_SYMBOL +0x4b387b04 drm_crtc_enable_color_mgmt drivers/gpu/drm/drm EXPORT_SYMBOL +0x2996cebd km_new_mapping vmlinux EXPORT_SYMBOL +0xf9ad2ae9 phy_attach_direct vmlinux EXPORT_SYMBOL +0x042db083 phy_gbit_all_ports_features vmlinux EXPORT_SYMBOL_GPL +0x6382fb57 fcoe_start_io vmlinux EXPORT_SYMBOL_GPL +0x4e862a46 iscsi_get_port_state_name vmlinux EXPORT_SYMBOL_GPL +0xb989d397 iscsi_get_port_speed_name vmlinux EXPORT_SYMBOL_GPL +0x375f8bbd fc_get_event_number vmlinux EXPORT_SYMBOL +0x51b7aa6e pm_generic_thaw_early vmlinux EXPORT_SYMBOL_GPL +0x3bbb52dc acpi_check_resource_conflict vmlinux EXPORT_SYMBOL +0x3283e6b0 prandom_seed_full_state vmlinux EXPORT_SYMBOL +0x6e515e1a blk_queue_write_cache vmlinux EXPORT_SYMBOL_GPL +0x0c377efd generic_block_bmap vmlinux EXPORT_SYMBOL +0x868784cb __symbol_get vmlinux EXPORT_SYMBOL_GPL +0x6ef6b54f ktime_get_boot_fast_ns vmlinux EXPORT_SYMBOL_GPL +0x4507f4a8 cpuhp_tasks_frozen vmlinux EXPORT_SYMBOL_GPL +0xa00ce390 mlx4_phys_to_slave_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xde3abc2e vmci_datagram_create_handle_priv drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x897514b2 nfs_put_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x46324e84 __ip_dev_find vmlinux EXPORT_SYMBOL +0x7fceefec iscsi_boot_create_ethernet vmlinux EXPORT_SYMBOL_GPL +0x42230915 sbitmap_any_bit_set vmlinux EXPORT_SYMBOL_GPL +0x652ce9aa nla_memcmp vmlinux EXPORT_SYMBOL +0xf1db1704 nla_memcpy vmlinux EXPORT_SYMBOL +0x3761a5fa kmem_cache_free vmlinux EXPORT_SYMBOL +0x73879664 vsock_addr_init net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x6f765b30 nf_ct_bridge_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xbe71d835 sbc_get_write_same_sectors drivers/target/target_core_mod EXPORT_SYMBOL +0xccfe6409 btracker_nr_demotions_queued drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xb02e1138 mlx4_get_counter_stats drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xaca90ebd ipmi_request_supply_msgs drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xcf436c0a kvm_deliver_exception_payload arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x853d42da netdev_set_sb_channel vmlinux EXPORT_SYMBOL +0xf68557f2 get_cpu_device vmlinux EXPORT_SYMBOL_GPL +0x0d5e2381 do_dw_dma_disable vmlinux EXPORT_SYMBOL_GPL +0x923b1276 dmaengine_get vmlinux EXPORT_SYMBOL +0xcb9e1a22 acpi_os_map_generic_address vmlinux EXPORT_SYMBOL +0xdc62543b pci_restore_pri_state vmlinux EXPORT_SYMBOL_GPL +0x2afd5875 pci_restore_ats_state vmlinux EXPORT_SYMBOL_GPL +0xf039c89d pci_restore_msi_state vmlinux EXPORT_SYMBOL_GPL +0x045889a0 pcie_capability_clear_and_set_dword vmlinux EXPORT_SYMBOL +0x730786e5 vfs_fadvise vmlinux EXPORT_SYMBOL +0xd5fd90f1 prepare_to_wait vmlinux EXPORT_SYMBOL +0x9f46df2f __request_region vmlinux EXPORT_SYMBOL +0x04e3dbfc roce_gid_type_mask_support drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe321b5df target_setup_session drivers/target/target_core_mod EXPORT_SYMBOL +0x4056ecb8 core_tpg_deregister drivers/target/target_core_mod EXPORT_SYMBOL +0x03c45108 cxgb4_alloc_sftid drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x83110e91 nvmf_get_address drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x5abbf23f drm_connector_attach_vrr_capable_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xe014ace5 drm_dp_mst_reset_vcpi_slots drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd9491c14 xa_destroy vmlinux EXPORT_SYMBOL +0xc81e91a8 napi_busy_loop vmlinux EXPORT_SYMBOL +0x25f01fd4 dev_get_valid_name vmlinux EXPORT_SYMBOL +0xc564a76d fixed_phy_set_link_update vmlinux EXPORT_SYMBOL_GPL +0x0c338ff6 ata_host_detach vmlinux EXPORT_SYMBOL_GPL +0x13b194f5 nd_cmd_in_size vmlinux EXPORT_SYMBOL_GPL +0xe089cfcc agp_memory_reserved vmlinux EXPORT_SYMBOL_GPL +0x843d70ef acpi_is_root_bridge vmlinux EXPORT_SYMBOL_GPL +0xebd226ae acpi_ec_add_query_handler vmlinux EXPORT_SYMBOL_GPL +0x1978f2e0 bdev_read_only vmlinux EXPORT_SYMBOL +0xf0009fee put_pages_list vmlinux EXPORT_SYMBOL +0x639ff50e from_kprojid vmlinux EXPORT_SYMBOL +0x1744b6b1 rdma_roce_rescan_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x42cf6acf target_configure_unmap_from_queue drivers/target/target_core_mod EXPORT_SYMBOL +0x670c4952 i2c_new_ancillary_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x9e41f494 mlxsw_afk_encode drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xaa7324b0 skb_copy_datagram_iter vmlinux EXPORT_SYMBOL +0x30cc3f0d sock_wfree vmlinux EXPORT_SYMBOL +0x5a9e34c8 dev_pm_opp_get_sharing_cpus vmlinux EXPORT_SYMBOL_GPL +0xa23f684b __tracepoint_io_page_fault vmlinux EXPORT_SYMBOL_GPL +0x21947a06 acpi_cppc_processor_exit vmlinux EXPORT_SYMBOL_GPL +0xd4a2bf33 __posix_acl_create vmlinux EXPORT_SYMBOL +0xa0ee384f __page_cache_alloc vmlinux EXPORT_SYMBOL +0x091eb9b4 round_jiffies vmlinux EXPORT_SYMBOL_GPL +0xadfd3f84 rpcauth_list_flavors net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5a956016 nft_unregister_chain_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xbefcb087 inet_proto_csum_replace16 vmlinux EXPORT_SYMBOL +0x4827eb8e dm_exception_store_create vmlinux EXPORT_SYMBOL +0x7d1bb9f9 __nvdimm_create vmlinux EXPORT_SYMBOL_GPL +0x9e202125 __dynamic_ibdev_dbg vmlinux EXPORT_SYMBOL +0x4133ad35 crypto_sha512_finup vmlinux EXPORT_SYMBOL +0x70cf4cb3 dquot_commit vmlinux EXPORT_SYMBOL +0x5a1a6c74 __block_write_full_page vmlinux EXPORT_SYMBOL +0x451ae31c truncate_inode_pages vmlinux EXPORT_SYMBOL +0xcf91788a uprobe_unregister vmlinux EXPORT_SYMBOL_GPL +0xb1cf13c2 ip6_tnl_xmit net/ipv6/ip6_tunnel EXPORT_SYMBOL +0xcbafa235 cxgb4_ring_tx_db drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x19711697 camellia_xts_dec_16way arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0x6f3a8de5 camellia_xts_enc_16way arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0xc5e4a5d1 cpumask_next vmlinux EXPORT_SYMBOL +0xf9da0287 fib_nh_common_release vmlinux EXPORT_SYMBOL_GPL +0xc9ebef22 input_flush_device vmlinux EXPORT_SYMBOL +0x16c97ac2 iscsi_conn_login_event vmlinux EXPORT_SYMBOL_GPL +0x0a502c98 dmar_platform_optin vmlinux EXPORT_SYMBOL_GPL +0x85b38978 percpu_ref_reinit vmlinux EXPORT_SYMBOL_GPL +0xe5a045ab sysfs_notify vmlinux EXPORT_SYMBOL_GPL +0x2caf63d1 topology_phys_to_logical_die vmlinux EXPORT_SYMBOL +0xbd0f699d vsock_addr_bound net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xd9de819b osd_req_op_cls_request_data_pagelist net/ceph/libceph EXPORT_SYMBOL +0x11e32d01 ceph_monc_got_map net/ceph/libceph EXPORT_SYMBOL +0x34ee001a nf_confirm net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x60a92bc0 transport_copy_sense_to_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xa8db193f cxgb4_smt_release drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x3701a196 csum_partial_copy_to_user vmlinux EXPORT_SYMBOL +0x9a1dfd65 strpbrk vmlinux EXPORT_SYMBOL +0x6028425a inet_hash vmlinux EXPORT_SYMBOL_GPL +0xf099eb9d mm_account_pinned_pages vmlinux EXPORT_SYMBOL_GPL +0x96b5557e devfreq_unregister_opp_notifier vmlinux EXPORT_SYMBOL +0x6d994d20 fs_dax_get_by_bdev vmlinux EXPORT_SYMBOL_GPL +0x95efa1db pkcs7_free_message vmlinux EXPORT_SYMBOL_GPL +0xe364ad65 seq_lseek vmlinux EXPORT_SYMBOL +0xee9fcda6 apei_mce_report_mem_error vmlinux EXPORT_SYMBOL_GPL +0x40a6b7dc ip_vs_conn_out_get_proto net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0xaab0ef04 dm_bitset_cursor_skip drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x177a6c39 cdrom_get_last_written drivers/cdrom/cdrom EXPORT_SYMBOL +0x44f4c995 drm_crtc_vblank_count drivers/gpu/drm/drm EXPORT_SYMBOL +0xee313474 drm_crtc_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xdd5ab8fd drm_dp_mst_connector_early_unregister drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd1f4927d simd_unregister_skciphers crypto/crypto_simd EXPORT_SYMBOL_GPL +0x574a5f12 inet_csk_reqsk_queue_add vmlinux EXPORT_SYMBOL +0x8a251fa1 devlink_reload_enable vmlinux EXPORT_SYMBOL_GPL +0x252b6b51 dma_resv_reserve_shared vmlinux EXPORT_SYMBOL +0x85a49dc7 pci_vpd_find_info_keyword vmlinux EXPORT_SYMBOL_GPL +0x63a7c28c bitmap_find_free_region vmlinux EXPORT_SYMBOL +0x4ee63f0a blk_mq_queue_inflight vmlinux EXPORT_SYMBOL_GPL +0x3fbc1a76 kernfs_find_and_get_ns vmlinux EXPORT_SYMBOL_GPL +0x71fa908a cache_flush net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd6153eb6 bc_svc_process net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3c7bfa1c ip_set_match_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xf6975571 edac_pci_alloc_ctl_info drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x7d5e1815 dm_rh_get_region_key drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x00af95a1 __tracepoint_bcache_btree_set_root drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xa45a56e7 mlx5_set_port_wol drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf8be3348 drm_dp_dual_mode_write drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xc9775aa7 drm_dp_find_vcpi_slots drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xfaa1aba6 crypto_finalize_aead_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0x496b27b0 vlan_vid_add vmlinux EXPORT_SYMBOL +0x24c1d99c ipv6_proxy_select_ident vmlinux EXPORT_SYMBOL_GPL +0x061bcc81 xfrm_parse_spi vmlinux EXPORT_SYMBOL +0xe3721252 udp_gro_receive vmlinux EXPORT_SYMBOL +0xaa3f8a1d napi_schedule_prep vmlinux EXPORT_SYMBOL +0x787c882b lzo1x_1_compress vmlinux EXPORT_SYMBOL_GPL +0x45fa911b set_disk_ro vmlinux EXPORT_SYMBOL +0x6f159fa4 dquot_enable vmlinux EXPORT_SYMBOL +0xddab84fe svc_alien_sock net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x230b494e share_ns net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0x6d7d1951 nf_conncount_init net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x04687b2f drm_gem_handle_create drivers/gpu/drm/drm EXPORT_SYMBOL +0xee0f6ee3 xfrm6_rcv_tnl vmlinux EXPORT_SYMBOL +0x5a6cdb52 nf_ct_zone_dflt vmlinux EXPORT_SYMBOL_GPL +0xc7d094b5 dm_read_arg_group vmlinux EXPORT_SYMBOL +0xd5dfcbfe bus_find_device vmlinux EXPORT_SYMBOL_GPL +0xf8420792 sg_alloc_table_from_pages vmlinux EXPORT_SYMBOL +0x6f002c60 blk_rq_unprep_clone vmlinux EXPORT_SYMBOL_GPL +0x1ee00594 bpf_offload_dev_netdev_unregister vmlinux EXPORT_SYMBOL_GPL +0x05ad1fba trace_raw_output_prep vmlinux EXPORT_SYMBOL +0xe8182cca relay_switch_subbuf vmlinux EXPORT_SYMBOL_GPL +0x9820b20b rpc_create net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xde6d5026 esp_output_head net/ipv4/esp4 EXPORT_SYMBOL_GPL +0x7d3d338a register_ip_vs_app_inc net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xe5041b58 drm_mode_set_config_internal drivers/gpu/drm/drm EXPORT_SYMBOL +0xa1c4231f kvm_set_pfn_dirty arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x5c699441 xfrm_aalg_get_byid vmlinux EXPORT_SYMBOL_GPL +0xa5df47b0 tcp_create_openreq_child vmlinux EXPORT_SYMBOL +0xecbabfa4 sk_common_release vmlinux EXPORT_SYMBOL +0x3f75155a nvme_set_queue_count vmlinux EXPORT_SYMBOL_GPL +0xc426c51f klp_shadow_free_all vmlinux EXPORT_SYMBOL_GPL +0xc8d62696 sched_autogroup_create_attach vmlinux EXPORT_SYMBOL +0xc3a98c1d lc_try_lock lib/lru_cache EXPORT_SYMBOL +0xecee660a cxgbi_hbas_add drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x33d1219c drm_connector_list_iter_begin drivers/gpu/drm/drm EXPORT_SYMBOL +0xf7e2bec2 drm_helper_mode_fill_fb_struct drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf213ad9f fuse_conn_put fs/fuse/fuse EXPORT_SYMBOL_GPL +0xf65461f8 lwtunnel_valid_encap_type_attr vmlinux EXPORT_SYMBOL_GPL +0x1f06cf6d skb_prepare_seq_read vmlinux EXPORT_SYMBOL +0x9240d802 skb_queue_head vmlinux EXPORT_SYMBOL +0xea3809f1 sk_reset_timer vmlinux EXPORT_SYMBOL +0x7e7d99be devm_nsio_disable vmlinux EXPORT_SYMBOL_GPL +0xb3febb3f is_nvdimm_sync vmlinux EXPORT_SYMBOL_GPL +0x30e1ec25 apei_map_generic_address vmlinux EXPORT_SYMBOL_GPL +0x4f5f826b irq_get_irq_data vmlinux EXPORT_SYMBOL_GPL +0xb761318b sev_active vmlinux EXPORT_SYMBOL_GPL +0x39b0c435 cxgbi_device_portmap_cleanup drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xedd40a17 drm_pci_free drivers/gpu/drm/drm EXPORT_SYMBOL +0x0b901549 camellia_dec_blk_2way arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0xca8bece8 inet_frag_pull_head vmlinux EXPORT_SYMBOL +0xecc74a99 tcp_seq_next vmlinux EXPORT_SYMBOL +0x68ffd00c netdev_has_upper_dev vmlinux EXPORT_SYMBOL +0xdbc6fad0 devfreq_add_governor vmlinux EXPORT_SYMBOL +0x15402b65 mpt_Soft_Hard_ResetHandler vmlinux EXPORT_SYMBOL +0xcf892ab1 ahci_start_fis_rx vmlinux EXPORT_SYMBOL_GPL +0x48f49400 apei_hest_parse vmlinux EXPORT_SYMBOL_GPL +0xa836ba02 wrmsr_safe_regs vmlinux EXPORT_SYMBOL +0x09a34a2b crc_itu_t vmlinux EXPORT_SYMBOL +0xe1bcf53e iov_iter_npages vmlinux EXPORT_SYMBOL +0xbb1cf93f bio_alloc_bioset vmlinux EXPORT_SYMBOL +0xce3864eb ZSTD_compress_usingDict lib/zstd/zstd_compress EXPORT_SYMBOL +0x0533d462 mii_ethtool_sset drivers/net/mii EXPORT_SYMBOL +0xeafdc0e5 mii_ethtool_gset drivers/net/mii EXPORT_SYMBOL +0x176031a7 devlink_fmsg_string_put vmlinux EXPORT_SYMBOL_GPL +0xa9bffc07 usb_driver_set_configuration vmlinux EXPORT_SYMBOL_GPL +0x6106cec3 class_compat_create_link vmlinux EXPORT_SYMBOL_GPL +0xa95aefbc mipi_dsi_set_maximum_return_packet_size vmlinux EXPORT_SYMBOL +0x7b6f9536 acpi_register_wakeup_handler vmlinux EXPORT_SYMBOL_GPL +0x8281f9df pstore_unregister vmlinux EXPORT_SYMBOL_GPL +0xf96e1a6f dump_emit vmlinux EXPORT_SYMBOL +0x91ec9043 vfs_cancel_lock vmlinux EXPORT_SYMBOL_GPL +0x92521782 __kernel_write vmlinux EXPORT_SYMBOL +0x9d871fac pm_qos_remove_request vmlinux EXPORT_SYMBOL_GPL +0x2e7860cf kthread_cancel_work_sync vmlinux EXPORT_SYMBOL_GPL +0xffc30c3a acpi_processor_power_init_bm_check vmlinux EXPORT_SYMBOL +0x85d4f825 svc_fill_write_vector net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x041e0553 ib_get_cached_lmc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x582027fe transport_kunmap_data_sg drivers/target/target_core_mod EXPORT_SYMBOL +0xc16a7a95 ipvlan_link_setup drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0xa15e5eed drm_i2c_encoder_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x358dcc71 netdev_pick_tx vmlinux EXPORT_SYMBOL +0x34c807df release_sock vmlinux EXPORT_SYMBOL +0x582eeb72 dev_pm_opp_disable vmlinux EXPORT_SYMBOL_GPL +0x50e7193a __i2c_first_dynamic_bus_num vmlinux EXPORT_SYMBOL_GPL +0x6f86ec33 phy_validate_pause vmlinux EXPORT_SYMBOL +0x16e85625 pm_clk_suspend vmlinux EXPORT_SYMBOL_GPL +0xb57b26d5 agp_backend_acquire vmlinux EXPORT_SYMBOL +0x909734e9 textsearch_destroy vmlinux EXPORT_SYMBOL +0xf51e26b1 alloc_pages_current vmlinux EXPORT_SYMBOL +0x6f62b3ff nft_fib6_eval net/ipv6/netfilter/nft_fib_ipv6 EXPORT_SYMBOL_GPL +0x05770c30 nft_fib4_eval net/ipv4/netfilter/nft_fib_ipv4 EXPORT_SYMBOL_GPL +0x2d82cc24 kvm_spec_ctrl_test_value arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x095dd5ec __netlink_ns_capable vmlinux EXPORT_SYMBOL +0x9a46f65b __sk_receive_skb vmlinux EXPORT_SYMBOL +0x15999211 mdiobus_setup_mdiodev_from_board_info vmlinux EXPORT_SYMBOL +0x38e5bc5a clk_set_rate_range vmlinux EXPORT_SYMBOL_GPL +0x2c82c36a security_secmark_relabel_packet vmlinux EXPORT_SYMBOL +0x6fc9c8db sync_inode vmlinux EXPORT_SYMBOL +0xc8dcc62a krealloc vmlinux EXPORT_SYMBOL +0xcc5c2df4 trace_print_symbols_seq vmlinux EXPORT_SYMBOL +0xc3cd034d crc8_populate_lsb lib/crc8 EXPORT_SYMBOL +0xaa8106bc crc8_populate_msb lib/crc8 EXPORT_SYMBOL +0xb00fdd93 osd_req_op_extent_dup_last net/ceph/libceph EXPORT_SYMBOL +0x9156d716 iscsit_process_scsi_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xff5d901b drm_print_regset32 drivers/gpu/drm/drm EXPORT_SYMBOL +0x2a05d9cc drm_fb_helper_fini drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x4dc54ae2 mark_page_dirty arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x81323150 input_register_device vmlinux EXPORT_SYMBOL +0x31266931 con_debug_leave vmlinux EXPORT_SYMBOL_GPL +0x3f128c2f crypto_inst_setname vmlinux EXPORT_SYMBOL_GPL +0x375da767 __invalidate_device vmlinux EXPORT_SYMBOL +0x080d72a6 seq_put_decimal_ull vmlinux EXPORT_SYMBOL +0x2f40911d nf_ct_remove_expect net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf1eeca40 __tracepoint_mlx5_fs_del_fg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x90018bd2 __tracepoint_mlx5_fs_del_ft drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x31472e7c mlx5_set_port_tc_group drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x796af513 drm_send_event_locked drivers/gpu/drm/drm EXPORT_SYMBOL +0xd702d39f __fscache_write_page fs/fscache/fscache EXPORT_SYMBOL +0x11c6c222 ata_sff_qc_issue vmlinux EXPORT_SYMBOL_GPL +0x1163f0a7 blk_max_low_pfn vmlinux EXPORT_SYMBOL +0xf97d7de2 register_sysctl_table vmlinux EXPORT_SYMBOL +0x67b78eb3 seq_hlist_next_rcu vmlinux EXPORT_SYMBOL +0x1fb6f336 bdi_register_va vmlinux EXPORT_SYMBOL +0x7d1bb1d4 tnum_strn vmlinux EXPORT_SYMBOL_GPL +0xea0d02b8 xdr_stream_decode_string net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc73997ee rpc_clnt_xprt_switch_has_addr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x90774718 nf_ct_get_id net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x63e0fee5 mdio45_links_ok drivers/net/mdio EXPORT_SYMBOL +0xf2f18c89 drm_syncobj_get_fd drivers/gpu/drm/drm EXPORT_SYMBOL +0x9d439e3e drm_connector_attach_edid_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xe2553e22 nfs_sops fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdd80f89b strp_data_ready vmlinux EXPORT_SYMBOL_GPL +0xb499c13a mr_mfc_find_any vmlinux EXPORT_SYMBOL +0x1dd7fe9f ipv4_redirect vmlinux EXPORT_SYMBOL_GPL +0x6fdc8fea flow_block_cb_priv vmlinux EXPORT_SYMBOL +0xb49fa9bc flow_block_cb_free vmlinux EXPORT_SYMBOL +0x14c4b375 netdev_adjacent_change_prepare vmlinux EXPORT_SYMBOL +0x9b8c421e __netif_set_xps_queue vmlinux EXPORT_SYMBOL_GPL +0xdeb29f56 nvme_cancel_admin_tagset vmlinux EXPORT_SYMBOL_GPL +0x6bbb7dac tty_flip_buffer_push vmlinux EXPORT_SYMBOL +0xaf6bc3d0 posix_acl_init vmlinux EXPORT_SYMBOL +0xeb9c0630 perf_msr_probe vmlinux EXPORT_SYMBOL_GPL +0x626ed94d virtio_transport_notify_send_pre_enqueue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x0e4d4f97 ceph_osdc_wait_request net/ceph/libceph EXPORT_SYMBOL +0x1d34e996 pp_msgs drivers/edac/edac_mce_amd EXPORT_SYMBOL_GPL +0xe613a798 inet_addr_is_any vmlinux EXPORT_SYMBOL +0xd9ea811a hidinput_calc_abs_res vmlinux EXPORT_SYMBOL_GPL +0x9b6b1f82 dev_pm_opp_get_freq vmlinux EXPORT_SYMBOL_GPL +0x89c0fa18 qlt_lport_register vmlinux EXPORT_SYMBOL +0x3f4cded6 amd_iommu_domain_clear_gcr3 vmlinux EXPORT_SYMBOL +0xc66b77b1 iommu_group_set_iommudata vmlinux EXPORT_SYMBOL_GPL +0xd55ad93b iommu_group_get_iommudata vmlinux EXPORT_SYMBOL_GPL +0x5b224201 uart_get_divisor vmlinux EXPORT_SYMBOL +0xfdcb4ed3 acpi_os_get_line vmlinux EXPORT_SYMBOL +0x35a3fb6e pinctrl_utils_add_map_mux vmlinux EXPORT_SYMBOL_GPL +0xe26ed01c io_cgrp_subsys vmlinux EXPORT_SYMBOL_GPL +0xfc505c65 bio_copy_data vmlinux EXPORT_SYMBOL +0x1e569a7f fscrypt_inherit_context vmlinux EXPORT_SYMBOL +0x03952887 ktime_add_safe vmlinux EXPORT_SYMBOL_GPL +0xf9a482f9 msleep vmlinux EXPORT_SYMBOL +0xd8000722 ceph_zero_page_vector_range net/ceph/libceph EXPORT_SYMBOL +0x3102f228 sunrpc_cache_lookup_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb091643a _uverbs_get_const drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x35eaf2ea validate_xmit_xfrm vmlinux EXPORT_SYMBOL_GPL +0xf66fcbc6 cookie_timestamp_decode vmlinux EXPORT_SYMBOL +0x6fe5fd6d unregister_netdev vmlinux EXPORT_SYMBOL +0x91a39398 dca3_get_tag vmlinux EXPORT_SYMBOL_GPL +0xa75a6a90 sata_async_notification vmlinux EXPORT_SYMBOL_GPL +0x655566b5 nd_device_notify vmlinux EXPORT_SYMBOL +0xa1aa1783 __bforget vmlinux EXPORT_SYMBOL +0x6bc5880b generic_file_write_iter vmlinux EXPORT_SYMBOL +0x62029c77 __audit_inode_child vmlinux EXPORT_SYMBOL_GPL +0x17e78100 param_set_ullong vmlinux EXPORT_SYMBOL +0xadcba50b ZSTD_findFrameCompressedSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0xcb38f7f2 xdr_reserve_space net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd5d7c62f set_h225_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0xe0ff7a18 unregister_pppox_proto drivers/net/ppp/pppox EXPORT_SYMBOL +0x20b1847c drm_panel_add drivers/gpu/drm/drm EXPORT_SYMBOL +0xcf4d87a2 __tracepoint_kvm_pi_irte_update arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x3736c82a inet6_add_protocol vmlinux EXPORT_SYMBOL +0xbc4f7c67 free_fib_info vmlinux EXPORT_SYMBOL_GPL +0x6aa2a67a raw_unhash_sk vmlinux EXPORT_SYMBOL_GPL +0xecc0d2ce xt_register_table vmlinux EXPORT_SYMBOL_GPL +0xe7db1bef sock_no_listen vmlinux EXPORT_SYMBOL +0xe83cb5df md_unregister_thread vmlinux EXPORT_SYMBOL +0x0b6e332f rc_map_unregister vmlinux EXPORT_SYMBOL_GPL +0x6deb574b input_mt_destroy_slots vmlinux EXPORT_SYMBOL +0x86b13d2a usb_unpoison_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0x32da53b8 phy_support_sym_pause vmlinux EXPORT_SYMBOL +0xeffb23b6 sas_alloc_task vmlinux EXPORT_SYMBOL_GPL +0x6f6e09a5 scsi_device_lookup_by_target vmlinux EXPORT_SYMBOL +0xad1036a2 amd_iommu_activate_guest_mode vmlinux EXPORT_SYMBOL +0x7e08d3c8 __tracepoint_remove_device_from_group vmlinux EXPORT_SYMBOL_GPL +0x79aa04a2 get_random_bytes vmlinux EXPORT_SYMBOL +0xcd9cd2ff wakeme_after_rcu vmlinux EXPORT_SYMBOL_GPL +0xab600421 probe_irq_off vmlinux EXPORT_SYMBOL +0x6715cc1c rpcauth_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xcf8c2590 dm_cache_policy_get_hint_size drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x9c70ed54 nfs_setattr_update_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x50cab80c nf_route vmlinux EXPORT_SYMBOL_GPL +0xc940e852 tcf_exts_destroy vmlinux EXPORT_SYMBOL +0x06b1b99d dev_get_iflink vmlinux EXPORT_SYMBOL +0xb03d7c05 input_enable_softrepeat vmlinux EXPORT_SYMBOL +0x6db105d6 fc_eh_host_reset vmlinux EXPORT_SYMBOL +0x692de0ea pm_runtime_allow vmlinux EXPORT_SYMBOL_GPL +0x7caf306c property_entries_dup vmlinux EXPORT_SYMBOL_GPL +0xa3f1153c do_unbind_con_driver vmlinux EXPORT_SYMBOL_GPL +0xd09911a6 acpi_dev_get_irq_type vmlinux EXPORT_SYMBOL_GPL +0xdaf4dfb3 fb_mode_option vmlinux EXPORT_SYMBOL_GPL +0xf1361941 pci_vpd_find_tag vmlinux EXPORT_SYMBOL_GPL +0x4a6b46df iomap_truncate_page vmlinux EXPORT_SYMBOL_GPL +0x934513a3 seq_vprintf vmlinux EXPORT_SYMBOL +0x63dc9ff2 ceph_create_client net/ceph/libceph EXPORT_SYMBOL +0xf13e9ab4 xprt_request_get_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbecf5d14 nfnl_acct_put net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0x6395c855 input_free_polled_device drivers/input/input-polldev EXPORT_SYMBOL +0x01ee716c nf_br_ops vmlinux EXPORT_SYMBOL_GPL +0x8208bd06 dm_exception_store_type_unregister vmlinux EXPORT_SYMBOL +0xc9bba7e6 pci_write_config_word vmlinux EXPORT_SYMBOL +0x79965b6a blk_rq_prep_clone vmlinux EXPORT_SYMBOL_GPL +0xf2a779b8 crypto_alloc_shash vmlinux EXPORT_SYMBOL_GPL +0x6f1c9044 crypto_alloc_ahash vmlinux EXPORT_SYMBOL_GPL +0xb7a0c12c crypto_alg_extsize vmlinux EXPORT_SYMBOL_GPL +0xf47bfa2b hwpoison_filter vmlinux EXPORT_SYMBOL_GPL +0xdf7ad92a irq_chip_enable_parent vmlinux EXPORT_SYMBOL_GPL +0x56ebc47d osd_req_op_init net/ceph/libceph EXPORT_SYMBOL +0xebd91050 unregister_ip_vs_scheduler net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x0ffdb32a dm_cache_policy_register drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xbe456e4b cxgb3_unregister_client drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0xf44bc52d ipv6_mc_check_mld vmlinux EXPORT_SYMBOL +0xfc8b1d9d xfrm_state_afinfo_get_rcu vmlinux EXPORT_SYMBOL_GPL +0x984bc945 tcf_action_check_ctrlact vmlinux EXPORT_SYMBOL +0xdcebeb70 vlan_ioctl_set vmlinux EXPORT_SYMBOL +0xa5bda8a1 efi_capsule_supported vmlinux EXPORT_SYMBOL_GPL +0xb29add3b __page_mapcount vmlinux EXPORT_SYMBOL_GPL +0x6a037cf1 mempool_kfree vmlinux EXPORT_SYMBOL +0x75d0deb9 nsecs_to_jiffies64 vmlinux EXPORT_SYMBOL +0xad2951a9 ex_handler_rdmsr_unsafe vmlinux EXPORT_SYMBOL +0x40d84a37 ZSTD_getFrameParams lib/zstd/zstd_decompress EXPORT_SYMBOL +0xdecc0e19 dm_cache_policy_unregister drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x310f7225 drm_property_create_object drivers/gpu/drm/drm EXPORT_SYMBOL +0x06c49551 drm_dp_dsc_sink_line_buf_depth drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x60c3a23c __fscache_invalidate fs/fscache/fscache EXPORT_SYMBOL +0x28e09af1 iosf_mbi_available arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0xe4ae7508 __twofish_enc_blk_3way arch/x86/crypto/twofish-x86_64-3way EXPORT_SYMBOL_GPL +0x5e40c204 usb_wakeup_enabled_descendants vmlinux EXPORT_SYMBOL_GPL +0x9d3f6b47 scsi_scan_target vmlinux EXPORT_SYMBOL +0xb6362676 subsys_dev_iter_exit vmlinux EXPORT_SYMBOL_GPL +0xbc2c9b03 subsys_dev_iter_init vmlinux EXPORT_SYMBOL_GPL +0x6ac7696d mipi_dsi_host_unregister vmlinux EXPORT_SYMBOL +0x341e9814 virtqueue_is_broken vmlinux EXPORT_SYMBOL_GPL +0xcba4abe3 list_sort vmlinux EXPORT_SYMBOL +0x4b2160b7 register_quota_format vmlinux EXPORT_SYMBOL +0x36125570 remove_arg_zero vmlinux EXPORT_SYMBOL +0x5b56860c vm_munmap vmlinux EXPORT_SYMBOL +0xad59181f remap_pfn_range vmlinux EXPORT_SYMBOL +0x51c5db1b ib_alloc_mr_integrity drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x890723f7 dm_cache_policy_get_version drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xb22e16d5 radix_tree_maybe_preload vmlinux EXPORT_SYMBOL +0xf41e4803 phy_register_fixup vmlinux EXPORT_SYMBOL +0x9b407097 device_bind_driver vmlinux EXPORT_SYMBOL_GPL +0xa112c11f device_store_ulong vmlinux EXPORT_SYMBOL_GPL +0x95c4bdf2 __dax_zero_page_range vmlinux EXPORT_SYMBOL_GPL +0xabb8a178 file_update_time vmlinux EXPORT_SYMBOL +0x14966b4c osd_req_op_extent_osd_data net/ceph/libceph EXPORT_SYMBOL +0x641f10f4 closure_wait drivers/md/bcache/bcache EXPORT_SYMBOL +0x23eddc68 mlxsw_core_cpu_port_init drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x47ddb0b2 drm_gem_dmabuf_vmap drivers/gpu/drm/drm EXPORT_SYMBOL +0x1163423c kobject_uevent_env vmlinux EXPORT_SYMBOL_GPL +0xb8cd3a7f nf_logger_put vmlinux EXPORT_SYMBOL_GPL +0x7fc50b7d dev_mc_init vmlinux EXPORT_SYMBOL +0xb1bde1f6 dev_uc_init vmlinux EXPORT_SYMBOL +0x865029ac __hw_addr_sync vmlinux EXPORT_SYMBOL +0x777bcef6 sis_info133_for_sata vmlinux EXPORT_SYMBOL_GPL +0xaa19e4aa _kstrtol vmlinux EXPORT_SYMBOL +0x223b639c pagevec_lookup_range_nr_tag vmlinux EXPORT_SYMBOL +0x5176080d perf_trace_run_bpf_submit vmlinux EXPORT_SYMBOL_GPL +0xd54397ae relay_file_operations vmlinux EXPORT_SYMBOL_GPL +0xc6905fdf init_timer_key vmlinux EXPORT_SYMBOL +0x94e481cf ZSTD_adjustCParams lib/zstd/zstd_compress EXPORT_SYMBOL +0x93aeac62 rpc_wake_up_status net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd23464ef tap_destroy_cdev drivers/net/tap EXPORT_SYMBOL_GPL +0x5d8501b6 mlx5_vector2eqn drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x645d1cc4 mlx4_xrcd_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xec25d63e dev_set_mac_address_user vmlinux EXPORT_SYMBOL +0x8bdf1c54 put_cmsg vmlinux EXPORT_SYMBOL +0x3e40a095 skb_free_datagram vmlinux EXPORT_SYMBOL +0x69e46ea7 skb_vlan_push vmlinux EXPORT_SYMBOL +0xade9f3d5 usb_set_configuration vmlinux EXPORT_SYMBOL_GPL +0xe5cf06b5 fc_lport_notifier_head vmlinux EXPORT_SYMBOL +0xae14597e pci_wake_from_d3 vmlinux EXPORT_SYMBOL +0x416c2f50 __tracepoint_wbc_writepage vmlinux EXPORT_SYMBOL_GPL +0x9320826a vfs_clone_file_range vmlinux EXPORT_SYMBOL +0x52def944 filp_close vmlinux EXPORT_SYMBOL +0x342c6dd7 truncate_setsize vmlinux EXPORT_SYMBOL +0x262e823a wait_for_completion_interruptible vmlinux EXPORT_SYMBOL +0xbe122314 svcauth_gss_register_pseudoflavor net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0xc44fa984 svcauth_gss_flavor net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x9e7eb6e4 rdma_rw_ctx_signature_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xee926d8f __tracepoint_bcache_writeback_collision drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x8cf7e3ff drm_object_property_get_value drivers/gpu/drm/drm EXPORT_SYMBOL +0x96c73e5a __tracepoint_extlog_mem_event vmlinux EXPORT_SYMBOL_GPL +0x423836f8 __phy_modify_mmd vmlinux EXPORT_SYMBOL_GPL +0x67b27ec1 tty_std_termios vmlinux EXPORT_SYMBOL +0x8d631e75 dw_dma_remove vmlinux EXPORT_SYMBOL_GPL +0x529a73d3 pci_create_root_bus vmlinux EXPORT_SYMBOL_GPL +0xed7bda16 d_obtain_root vmlinux EXPORT_SYMBOL +0xffa51229 x86_vector_domain vmlinux EXPORT_SYMBOL_GPL +0x6d2e899d mce_usable_address vmlinux EXPORT_SYMBOL_GPL +0x39ecf18c vsock_find_connected_socket net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x8842b361 rpc_task_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0637e458 uverbs_fd_class drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x7db84a25 mdio45_ethtool_ksettings_get_npage drivers/net/mdio EXPORT_SYMBOL +0x16c20b53 drm_client_buffer_vunmap drivers/gpu/drm/drm EXPORT_SYMBOL +0xe374b329 mr_table_dump vmlinux EXPORT_SYMBOL +0xc42275ba ether_setup vmlinux EXPORT_SYMBOL +0xbf39ffd1 page_pool_unmap_page vmlinux EXPORT_SYMBOL +0xdd3b3f7a usb_deregister_device_driver vmlinux EXPORT_SYMBOL_GPL +0xf293c691 device_create_file vmlinux EXPORT_SYMBOL_GPL +0xfa223ad4 __break_lease vmlinux EXPORT_SYMBOL +0xc3375f12 fc_mount vmlinux EXPORT_SYMBOL +0x49f39797 __get_vm_area vmlinux EXPORT_SYMBOL_GPL +0xdb02fa60 list_lru_add vmlinux EXPORT_SYMBOL_GPL +0xe4219fb4 param_ops_bint vmlinux EXPORT_SYMBOL +0xcf5afde3 param_set_bool vmlinux EXPORT_SYMBOL +0xf650cd9d param_ops_byte vmlinux EXPORT_SYMBOL +0x96962b87 xprt_free net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x963c93c7 ibdev_printk drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x64387a70 drm_lspcon_get_mode drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x25ba2b8a nfs_alloc_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1171cb01 xfrm_lookup_with_ifid vmlinux EXPORT_SYMBOL +0xccfb9e07 dst_default_metrics vmlinux EXPORT_SYMBOL +0xf6a0a3e6 sock_dequeue_err_skb vmlinux EXPORT_SYMBOL +0xeb2d918c dev_pm_opp_remove_all_dynamic vmlinux EXPORT_SYMBOL_GPL +0x4b62e9d3 fwnode_property_read_u64_array vmlinux EXPORT_SYMBOL_GPL +0x49d3c0b7 fwnode_property_read_u32_array vmlinux EXPORT_SYMBOL_GPL +0x4ff6fea5 fwnode_property_read_u16_array vmlinux EXPORT_SYMBOL_GPL +0xeea659d5 put_iova_domain vmlinux EXPORT_SYMBOL_GPL +0xb23662be async_tx_quiesce vmlinux EXPORT_SYMBOL_GPL +0x04ff54b3 would_dump vmlinux EXPORT_SYMBOL +0xd7855c0a set_online_page_callback vmlinux EXPORT_SYMBOL_GPL +0xd4ed9277 truncate_inode_pages_range vmlinux EXPORT_SYMBOL +0x6ea9363b force_sig vmlinux EXPORT_SYMBOL +0x2f40da68 dm_bm_set_read_write drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x552ec23c nfs4_pnfs_ds_put fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x4dca6cfe nfs4_fs_type fs/nfs/nfs EXPORT_SYMBOL_GPL +0x346ec03f kvm_get_kvm arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x64d3cc4e xas_load vmlinux EXPORT_SYMBOL_GPL +0x72f07bf4 dm_bufio_set_minimum_buffers vmlinux EXPORT_SYMBOL_GPL +0xbacc2f52 fwnode_get_next_parent vmlinux EXPORT_SYMBOL_GPL +0x05240ee7 percpu_counter_batch vmlinux EXPORT_SYMBOL +0xa464ffe3 d_path vmlinux EXPORT_SYMBOL +0xeef6cfa3 ceph_iterate_extents net/ceph/libceph EXPORT_SYMBOL +0x3fcbf10a iscsit_setup_nop_out drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xc00f725a camellia_ctr_16way arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0xa5e2ab04 devlink_params_register vmlinux EXPORT_SYMBOL_GPL +0x1057095e skb_page_frag_refill vmlinux EXPORT_SYMBOL +0x3a61ca3f sdev_prefix_printk vmlinux EXPORT_SYMBOL +0x557fbeb7 vring_del_virtqueue vmlinux EXPORT_SYMBOL_GPL +0x44171a41 acpi_notifier_call_chain vmlinux EXPORT_SYMBOL +0x09b53e14 interval_tree_remove vmlinux EXPORT_SYMBOL_GPL +0x1d80f0f3 ib_umem_odp_alloc_child drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xae5ce20c mlx5_query_port_tc_bw_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x211739ea virtio_pmem_host_ack drivers/nvdimm/nd_virtio EXPORT_SYMBOL_GPL +0xa7c82902 __tracepoint_kvm_fast_mmio arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x25d1f4e5 devlink_dpipe_action_put vmlinux EXPORT_SYMBOL_GPL +0xed29fd9f devfreq_add_device vmlinux EXPORT_SYMBOL +0xb6762700 genphy_read_abilities vmlinux EXPORT_SYMBOL +0xb8cb2564 sdev_evt_send vmlinux EXPORT_SYMBOL_GPL +0xb7254781 csum_and_copy_to_iter vmlinux EXPORT_SYMBOL +0xceec8abd __mb_cache_entry_free vmlinux EXPORT_SYMBOL +0xfff1c294 igrab vmlinux EXPORT_SYMBOL +0x6a325a42 virtio_transport_recv_pkt net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xf03fe862 ceph_pagelist_set_cursor net/ceph/libceph EXPORT_SYMBOL +0x42041512 i2c_get_dma_safe_msg_buf drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x0f2f04b7 mlx5_core_query_vport_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x3c6fecca mlx5_query_nic_vport_min_inline drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc71e64a9 snmp_get_cpu_field vmlinux EXPORT_SYMBOL_GPL +0x663bb448 devlink_fmsg_obj_nest_end vmlinux EXPORT_SYMBOL_GPL +0xad748725 rtnl_notify vmlinux EXPORT_SYMBOL +0xe9316872 __rtnl_link_register vmlinux EXPORT_SYMBOL_GPL +0xf92ae8b1 zerocopy_sg_from_iter vmlinux EXPORT_SYMBOL +0xb646d321 sock_recvmsg vmlinux EXPORT_SYMBOL +0x0ddc9ecf pcc_mbox_free_channel vmlinux EXPORT_SYMBOL_GPL +0x59a8c84b thermal_zone_get_offset vmlinux EXPORT_SYMBOL_GPL +0x2dd7d8b1 usb_hcd_unmap_urb_for_dma vmlinux EXPORT_SYMBOL_GPL +0xcc483377 phy_select_page vmlinux EXPORT_SYMBOL_GPL +0x742f1d34 pci_back_from_sleep vmlinux EXPORT_SYMBOL +0x243f0b4b crypto_check_attr_type vmlinux EXPORT_SYMBOL_GPL +0x8ec581b8 drm_connector_set_link_status_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xdf280afa linkwatch_fire_event vmlinux EXPORT_SYMBOL +0x8b13a8b8 hid_snto32 vmlinux EXPORT_SYMBOL_GPL +0x3b60b307 ps2_sliced_command vmlinux EXPORT_SYMBOL +0xcc328a5c reservation_ww_class vmlinux EXPORT_SYMBOL +0xecfb281c dev_pm_set_dedicated_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x3b029f48 acpi_install_fixed_event_handler vmlinux EXPORT_SYMBOL +0x50ca9985 rhashtable_walk_peek vmlinux EXPORT_SYMBOL_GPL +0x4905bffd _copy_from_iter vmlinux EXPORT_SYMBOL +0xfb687b54 fs_bio_set vmlinux EXPORT_SYMBOL +0x9af9328d __get_task_comm vmlinux EXPORT_SYMBOL_GPL +0x27479d14 param_free_charp vmlinux EXPORT_SYMBOL +0x606a42f6 nf_tables_deactivate_flowtable net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xdceeae51 iscsit_check_dataout_payload drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x1ee8202f dm_cell_release_no_holder drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x4e35676a team_option_inst_set_change drivers/net/team/team EXPORT_SYMBOL +0xaffb2621 alloc_nfs_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3ab7b28d load_pdptrs arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xecac8407 __memcpy vmlinux EXPORT_SYMBOL +0xbdfb6dbb __fentry__ vmlinux EXPORT_SYMBOL +0x6340434e x86_model vmlinux EXPORT_SYMBOL_GPL +0x4fe1eddf unregister_netevent_notifier vmlinux EXPORT_SYMBOL_GPL +0x0060b97d ethtool_rx_flow_rule_create vmlinux EXPORT_SYMBOL +0xc7ca8f7c input_mt_report_slot_state vmlinux EXPORT_SYMBOL +0xa80241d4 scsi_init_io vmlinux EXPORT_SYMBOL +0xa5a6b5e6 platform_get_irq_optional vmlinux EXPORT_SYMBOL_GPL +0xc193c972 clk_mux_determine_rate_flags vmlinux EXPORT_SYMBOL_GPL +0xfb3aaae0 rpc_queue_upcall net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x592b9684 nf_ct_seq_adjust net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x380c5717 mlx4_unicast_detach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa028d051 mlx4_unicast_attach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x38bfbafc kvm_vcpu_is_reset_bsp arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6b2e46d0 hiddev_hid_event vmlinux EXPORT_SYMBOL_GPL +0x51e7ad2f mddev_congested vmlinux EXPORT_SYMBOL_GPL +0xb407c1df percpu_ref_switch_to_atomic vmlinux EXPORT_SYMBOL_GPL +0x78fa1ea4 __blkg_prfill_u64 vmlinux EXPORT_SYMBOL_GPL +0xd4f71a84 vmf_insert_pfn vmlinux EXPORT_SYMBOL +0x165b1948 ceph_pagelist_free_reserve net/ceph/libceph EXPORT_SYMBOL +0x36f9a7d1 adf_dev_init drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x6331d9e7 cxgbi_device_portmap_create drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x367b54ad drm_av_sync_delay drivers/gpu/drm/drm EXPORT_SYMBOL +0x42bed8d4 unix_gc_lock vmlinux EXPORT_SYMBOL +0x73147cf1 nf_queue_nf_hook_drop vmlinux EXPORT_SYMBOL_GPL +0x004415f5 nr_hook_count vmlinux EXPORT_SYMBOL +0x314cffaa fcoe_ctlr_link_up vmlinux EXPORT_SYMBOL +0x398fddef debugfs_create_x8 vmlinux EXPORT_SYMBOL_GPL +0xe1c19628 cgroup_get_from_path vmlinux EXPORT_SYMBOL_GPL +0x6572b00a irq_domain_xlate_twocell vmlinux EXPORT_SYMBOL_GPL +0x4786ea81 irq_domain_xlate_onecell vmlinux EXPORT_SYMBOL_GPL +0xca9fc082 synproxy_net_id net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x2aaad112 dm_cell_quiesce_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x283656cf mlx4_qp_query drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb19133b5 drm_panel_remove drivers/gpu/drm/drm EXPORT_SYMBOL +0x7257c258 drm_object_attach_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xfd5640d7 fib_info_nh_uses_dev vmlinux EXPORT_SYMBOL_GPL +0xc7e9e3e3 netdev_update_lockdep_key vmlinux EXPORT_SYMBOL +0x6f9b5826 power_supply_get_drvdata vmlinux EXPORT_SYMBOL_GPL +0x894da7ce uart_write_wakeup vmlinux EXPORT_SYMBOL +0x38b4380a pci_bus_read_config_word vmlinux EXPORT_SYMBOL +0x465e24ff ucs2_utf8size vmlinux EXPORT_SYMBOL +0x14d9b818 crypto_ahash_walk_first vmlinux EXPORT_SYMBOL_GPL +0xb26a1add elfcorehdr_addr vmlinux EXPORT_SYMBOL_GPL +0x00033b76 rdma_dev_access_netns drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x24935482 __tracepoint_bcache_bypass_sequential drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x5e96e973 nfs_pgio_current_mirror fs/nfs/nfs EXPORT_SYMBOL_GPL +0x90efe988 kvm_vcpu_on_spin arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x57049dce devlink_dpipe_table_unregister vmlinux EXPORT_SYMBOL_GPL +0x5bd61ec2 netdev_lower_get_next vmlinux EXPORT_SYMBOL +0x0f635ef2 skb_append vmlinux EXPORT_SYMBOL +0x31c7970f pciserial_suspend_ports vmlinux EXPORT_SYMBOL_GPL +0x0b5245f1 read_cache_page_gfp vmlinux EXPORT_SYMBOL +0xe79bf0c4 klp_shadow_get vmlinux EXPORT_SYMBOL_GPL +0xf42bd5eb setup_udp_tunnel_sock net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x3283cad1 adf_cfg_add_key_value_param drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x33b3697c usb_stor_CB_reset drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xe53258bd drm_fb_helper_debug_leave drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xc5e3cec8 __camellia_setkey arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0xdd151cba sk_stream_error vmlinux EXPORT_SYMBOL +0x4e3824ba iscsi_tcp_segment_unmap vmlinux EXPORT_SYMBOL_GPL +0x34f7886d del_random_ready_callback vmlinux EXPORT_SYMBOL +0x36f2d587 pci_ioremap_wc_bar vmlinux EXPORT_SYMBOL_GPL +0x1971ee36 crypto_sha1_finup vmlinux EXPORT_SYMBOL +0xcaf81729 put_user_pages vmlinux EXPORT_SYMBOL +0x51a11b0d audit_log_start vmlinux EXPORT_SYMBOL +0xe41a4876 nf_flow_table_cleanup net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0xfb578fc5 memset vmlinux EXPORT_SYMBOL +0x463f782c netdev_crit vmlinux EXPORT_SYMBOL +0xea5aa820 netdev_bonding_info_change vmlinux EXPORT_SYMBOL +0xeda878fd devm_nvmem_unregister vmlinux EXPORT_SYMBOL +0xd3bfa753 usb_bus_idr_lock vmlinux EXPORT_SYMBOL_GPL +0xc708f1fe ec_write vmlinux EXPORT_SYMBOL +0x700c7b88 sysfs_create_files vmlinux EXPORT_SYMBOL_GPL +0x9363ad8d dquot_get_state vmlinux EXPORT_SYMBOL +0x58eece91 sget vmlinux EXPORT_SYMBOL +0xe3615010 pci_map_biosrom vmlinux EXPORT_SYMBOL +0xb4916dc0 nf_ct_frag6_gather net/ipv6/netfilter/nf_defrag_ipv6 EXPORT_SYMBOL_GPL +0x7d394ea2 tcp_vegas_pkts_acked net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x02b1eb7f ip_tunnel_setup net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x14390311 target_set_cmd_data_length drivers/target/target_core_mod EXPORT_SYMBOL +0xf8adc738 cdrom_media_changed drivers/cdrom/cdrom EXPORT_SYMBOL +0x789fae43 mlx4_get_vf_stats drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf423fb88 nfs4_sequence_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x0ff80f59 zalloc_cpumask_var vmlinux EXPORT_SYMBOL +0x9c6939f0 md_find_rdev_nr_rcu vmlinux EXPORT_SYMBOL_GPL +0x0329f343 iscsi_tcp_dgst_header vmlinux EXPORT_SYMBOL_GPL +0x8a462b37 ahci_start_engine vmlinux EXPORT_SYMBOL_GPL +0x2d67031f ata_platform_remove_one vmlinux EXPORT_SYMBOL_GPL +0xbfd54a5c component_master_add_with_match vmlinux EXPORT_SYMBOL_GPL +0x8defda8b tty_save_termios vmlinux EXPORT_SYMBOL_GPL +0xef7cbbb6 pcie_get_mps vmlinux EXPORT_SYMBOL +0x3a13f54a sgl_alloc vmlinux EXPORT_SYMBOL +0xeb44339a free_pages_exact vmlinux EXPORT_SYMBOL +0xbaf9d785 __tss_limit_invalid vmlinux EXPORT_SYMBOL_GPL +0x411310f0 mlx4_cq_modify drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x800b8194 nfs4_mark_deviceid_unavailable fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x387f810a __fscache_uncache_all_inode_pages fs/fscache/fscache EXPORT_SYMBOL +0xe000be02 nvme_disable_ctrl vmlinux EXPORT_SYMBOL_GPL +0x2739535e pinctrl_pm_select_default_state vmlinux EXPORT_SYMBOL_GPL +0x67de3586 debugfs_create_atomic_t vmlinux EXPORT_SYMBOL_GPL +0x064db9a5 mark_mounts_for_expiry vmlinux EXPORT_SYMBOL_GPL +0x6d72fb37 bdi_put vmlinux EXPORT_SYMBOL +0xa497a7b2 file_check_and_advance_wb_err vmlinux EXPORT_SYMBOL +0x66750a66 drm_crtc_vblank_count_and_time drivers/gpu/drm/drm EXPORT_SYMBOL +0x7dcfa545 drm_send_event drivers/gpu/drm/drm EXPORT_SYMBOL +0x77a74370 __tracepoint_kvm_pml_full arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xdf3ab738 kvm_get_apic_mode arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xc294ee13 inet_csk_reqsk_queue_drop_and_put vmlinux EXPORT_SYMBOL +0xed4822fa nvme_cleanup_cmd vmlinux EXPORT_SYMBOL_GPL +0x6d340f64 tty_termios_input_baud_rate vmlinux EXPORT_SYMBOL +0x0d3ca6c1 idma32_dma_remove vmlinux EXPORT_SYMBOL_GPL +0x0b2c64a3 raid6_vgfmul vmlinux EXPORT_SYMBOL +0x8979107c vfs_get_fsid vmlinux EXPORT_SYMBOL +0x174fdc11 d_set_d_op vmlinux EXPORT_SYMBOL +0x3771961d ns_capable_noaudit vmlinux EXPORT_SYMBOL +0x2e6d8b3c adf_dev_stop drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x6955c2df unregister_cdrom drivers/cdrom/cdrom EXPORT_SYMBOL +0xc6b1fdbe xfrm_aalg_get_byidx vmlinux EXPORT_SYMBOL_GPL +0x85aee3d6 unregister_tcf_proto_ops vmlinux EXPORT_SYMBOL +0x59a5339c devlink_port_register vmlinux EXPORT_SYMBOL_GPL +0x89383979 __dev_get_by_flags vmlinux EXPORT_SYMBOL +0x74dc40de register_md_cluster_operations vmlinux EXPORT_SYMBOL +0xb22cb4f8 ps2_drain vmlinux EXPORT_SYMBOL +0x0a467cf0 fc_lport_recv vmlinux EXPORT_SYMBOL +0x4e5d06d2 spi_attach_transport vmlinux EXPORT_SYMBOL +0x82092899 badrange_forget vmlinux EXPORT_SYMBOL_GPL +0x34e9632e pci_cfg_access_unlock vmlinux EXPORT_SYMBOL_GPL +0x922f45a6 __bitmap_clear vmlinux EXPORT_SYMBOL +0xb6936ffe _bcd2bin vmlinux EXPORT_SYMBOL +0x33caca8a finalize_exec vmlinux EXPORT_SYMBOL +0x69824a14 ibcm_reject_msg drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x09658c90 cxgb4_read_tpte drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x6c8a6cf6 drm_agp_bind drivers/gpu/drm/drm EXPORT_SYMBOL +0xd859ac8c o2net_fill_node_map fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x95a67b07 udp_table vmlinux EXPORT_SYMBOL +0x7ac722bd phy_all_ports_features_array vmlinux EXPORT_SYMBOL_GPL +0xc56bd143 agp3_generic_tlbflush vmlinux EXPORT_SYMBOL +0x4dd77a51 pnp_get_resource vmlinux EXPORT_SYMBOL +0xc759896c acpi_bus_get_status vmlinux EXPORT_SYMBOL +0xaa7b2371 iterate_dir vmlinux EXPORT_SYMBOL +0x538e3f9b trace_event_buffer_reserve vmlinux EXPORT_SYMBOL_GPL +0x2e2f1740 ring_buffer_record_disable_cpu vmlinux EXPORT_SYMBOL_GPL +0x79df9633 ioremap_encrypted vmlinux EXPORT_SYMBOL +0x5fb8848b halt_poll_ns_grow_start arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x0fcc1969 copy_from_user_nmi vmlinux EXPORT_SYMBOL_GPL +0x43b40ce7 skb_coalesce_rx_frag vmlinux EXPORT_SYMBOL +0xef003bb7 power_supply_am_i_supplied vmlinux EXPORT_SYMBOL_GPL +0xaf3dd7dc scsi_logging_level vmlinux EXPORT_SYMBOL +0x6efa5e6c dax_supported vmlinux EXPORT_SYMBOL_GPL +0x71eaffc7 transport_configure_device vmlinux EXPORT_SYMBOL_GPL +0xb4577003 acpi_dev_present vmlinux EXPORT_SYMBOL +0x04966237 nla_put vmlinux EXPORT_SYMBOL +0x5e855e56 gen_pool_first_fit_align vmlinux EXPORT_SYMBOL +0xf23fcb99 __kfifo_in vmlinux EXPORT_SYMBOL +0xb11af8e1 revalidate_disk vmlinux EXPORT_SYMBOL +0xdd33352a register_shrinker vmlinux EXPORT_SYMBOL +0x947b40c6 cpu_smt_possible vmlinux EXPORT_SYMBOL_GPL +0xea508a78 xprt_wait_for_reply_request_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x54609885 ip_tunnel_dellink net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x23e4778a rdma_res_to_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xb04a43ad __xa_alloc_cyclic vmlinux EXPORT_SYMBOL +0xf3cafca7 inet6_csk_route_req vmlinux EXPORT_SYMBOL +0x9a471d9e del_dma_domain vmlinux EXPORT_SYMBOL_GPL +0xc10908d6 fcoe_get_wwn vmlinux EXPORT_SYMBOL_GPL +0x3976ea7d report_iommu_fault vmlinux EXPORT_SYMBOL_GPL +0x70e4cb59 tpm_tis_resume vmlinux EXPORT_SYMBOL_GPL +0x6cfb19d0 hwrng_register vmlinux EXPORT_SYMBOL_GPL +0xd05c11f7 misc_register vmlinux EXPORT_SYMBOL +0xca143a4c blk_post_runtime_suspend vmlinux EXPORT_SYMBOL +0x5a08cb3b af_alg_poll vmlinux EXPORT_SYMBOL_GPL +0x9038310e block_invalidatepage vmlinux EXPORT_SYMBOL +0x9168ceb3 may_umount_tree vmlinux EXPORT_SYMBOL +0xd87039c8 d_set_fallthru vmlinux EXPORT_SYMBOL +0xdf9208c0 alloc_workqueue vmlinux EXPORT_SYMBOL_GPL +0xb85cc46e ip_tunnel_init_net net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x31825ffd synproxy_recv_client_ack_ipv6 net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xb6bc61ab ib_init_ah_from_mcmember drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x28db5c8c drm_modeset_backoff drivers/gpu/drm/drm EXPORT_SYMBOL +0x1fe1e1ad locks_end_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0xcd0ef44c udp_lib_get_port vmlinux EXPORT_SYMBOL +0x92002bcb ip_route_input_noref vmlinux EXPORT_SYMBOL +0x22aa8fd9 skb_defer_rx_timestamp vmlinux EXPORT_SYMBOL_GPL +0x75f6963b netdev_walk_all_lower_dev_rcu vmlinux EXPORT_SYMBOL_GPL +0x7781a8d7 ata_sff_wait_ready vmlinux EXPORT_SYMBOL_GPL +0xfcef654a dev_attr_unload_heads vmlinux EXPORT_SYMBOL_GPL +0xb3c72e65 firmware_config_table vmlinux EXPORT_SYMBOL_GPL +0x1e17982b tty_ldisc_ref vmlinux EXPORT_SYMBOL_GPL +0x21079f66 fb_deferred_io_mmap vmlinux EXPORT_SYMBOL +0xbfac8e06 devm_ioport_map vmlinux EXPORT_SYMBOL +0xe0f2c770 fscrypt_decrypt_bio vmlinux EXPORT_SYMBOL +0xbd7bbf16 file_modified vmlinux EXPORT_SYMBOL +0xb512960f kernel_write vmlinux EXPORT_SYMBOL +0x7e1754e2 __devm_irq_alloc_descs vmlinux EXPORT_SYMBOL_GPL +0x4522d82a lc_reset lib/lru_cache EXPORT_SYMBOL +0x7d636c8a ceph_msg_data_add_pagelist net/ceph/libceph EXPORT_SYMBOL +0xb2449fc4 rpc_put_sb_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb7a878c4 nfs_show_devname fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd3be32c5 tcp_md5_do_add vmlinux EXPORT_SYMBOL +0xe09a9c63 rtnl_af_unregister vmlinux EXPORT_SYMBOL_GPL +0xbfee3ad5 loop_unregister_transfer vmlinux EXPORT_SYMBOL +0xc8ee9442 class_dev_iter_exit vmlinux EXPORT_SYMBOL_GPL +0x69cd341a device_find_child_by_name vmlinux EXPORT_SYMBOL_GPL +0x9d099a39 acpi_remove_gpe_handler vmlinux EXPORT_SYMBOL +0xe70877d4 acpi_remove_sci_handler vmlinux EXPORT_SYMBOL +0x07c3577c __of_phy_provider_register vmlinux EXPORT_SYMBOL_GPL +0xb918cc62 phy_create vmlinux EXPORT_SYMBOL_GPL +0x5ab6f9e6 blk_integrity_unregister vmlinux EXPORT_SYMBOL +0xaebe8b4c iget_failed vmlinux EXPORT_SYMBOL +0xce678a59 xdr_decode_netobj net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x799a8849 input_register_polled_device drivers/input/input-polldev EXPORT_SYMBOL +0xaa05f38b __fscache_acquire_cookie fs/fscache/fscache EXPORT_SYMBOL +0xfa277969 ata_bmdma_dumb_qc_prep vmlinux EXPORT_SYMBOL_GPL +0x44437678 sata_set_spd vmlinux EXPORT_SYMBOL_GPL +0x4702c709 devm_clk_bulk_get_optional vmlinux EXPORT_SYMBOL_GPL +0x536d4ba7 pci_restore_state vmlinux EXPORT_SYMBOL +0x652032cb mac_pton vmlinux EXPORT_SYMBOL +0xf331236f btree_geo32 vmlinux EXPORT_SYMBOL_GPL +0x7d8747ea filemap_fdatawait_range vmlinux EXPORT_SYMBOL +0xe3c226df adf_dev_started drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x0c2fc676 __tracepoint_mlx5_fs_del_fte drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x82e45588 mlx5_modify_header_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x83587294 mlx5_eq_notifier_register drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x86d1f81f fuse_free_conn fs/fuse/fuse EXPORT_SYMBOL_GPL +0xbce11fc6 tc_setup_flow_action vmlinux EXPORT_SYMBOL +0x493af25f ethtool_intersect_link_masks vmlinux EXPORT_SYMBOL +0x7bf07143 skb_complete_tx_timestamp vmlinux EXPORT_SYMBOL_GPL +0x853d4458 acpi_bus_trim vmlinux EXPORT_SYMBOL_GPL +0x02b9756f debugfs_create_file_unsafe vmlinux EXPORT_SYMBOL_GPL +0x63295c68 inode_set_bytes vmlinux EXPORT_SYMBOL +0xbb50122d inode_sub_bytes vmlinux EXPORT_SYMBOL +0xfeee6f27 ceph_alloc_page_vector net/ceph/libceph EXPORT_SYMBOL +0x1ccb76e8 svc_unreg_xprt_class net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0bf57c28 rpc_prepare_reply_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbf858ec9 vhost_dev_check_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3b88ec31 ib_cm_notify drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x984a47e3 rdma_nl_unicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd3bb51c7 pps_register_source drivers/pps/pps_core EXPORT_SYMBOL +0x216d0029 mlx5_modify_header_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb32b9a16 mlx5_buf_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x676a2c7c mlx4_buf_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xddc08537 dlmlock fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xe28fe403 reprogram_counter arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x94a9b08b kvm_io_bus_get_dev arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x755d3e60 arp_xmit vmlinux EXPORT_SYMBOL +0xba3d63ae flow_rule_match_tcp vmlinux EXPORT_SYMBOL +0xec550f2b __dev_getfirstbyhwtype vmlinux EXPORT_SYMBOL +0x748a2e25 __dev_get_by_index vmlinux EXPORT_SYMBOL +0xab663f3e sock_inuse_get vmlinux EXPORT_SYMBOL_GPL +0x66d34505 agp_enable vmlinux EXPORT_SYMBOL +0xa3a7f566 clkdev_hw_create vmlinux EXPORT_SYMBOL_GPL +0x8f5b0bd7 pci_release_selected_regions vmlinux EXPORT_SYMBOL +0x1585b7a7 debugfs_real_fops vmlinux EXPORT_SYMBOL_GPL +0x6f69a44f cgroup_rstat_updated vmlinux EXPORT_SYMBOL_GPL +0xa16c8613 _raw_read_unlock_irqrestore vmlinux EXPORT_SYMBOL +0x957c14e3 smca_get_long_name vmlinux EXPORT_SYMBOL_GPL +0xafb8a407 ceph_msgr_flush net/ceph/libceph EXPORT_SYMBOL +0xedcf6be4 qword_add net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0bf3227c rpc_max_bc_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4a9d6da9 ip6_tnl_encap_setup net/ipv6/ip6_tunnel EXPORT_SYMBOL_GPL +0xb93c7211 ppp_register_net_channel drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xf2c342ed mlx4_get_protocol_dev drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xad53cc13 __inet6_lookup_established vmlinux EXPORT_SYMBOL +0xe0a1a6a2 ipv6_stub vmlinux EXPORT_SYMBOL_GPL +0x2e69d638 dm_bio_from_per_bio_data vmlinux EXPORT_SYMBOL_GPL +0xf1926aab fc_eh_device_reset vmlinux EXPORT_SYMBOL +0xccdb72fd clk_get vmlinux EXPORT_SYMBOL +0x5482d906 __nla_put_nohdr vmlinux EXPORT_SYMBOL +0xbcbfc9f1 blk_mq_end_request vmlinux EXPORT_SYMBOL +0x770583a8 blk_register_queue vmlinux EXPORT_SYMBOL_GPL +0x38538e94 crypto_unregister_instance vmlinux EXPORT_SYMBOL_GPL +0xc4ae915e arch_touch_nmi_watchdog vmlinux EXPORT_SYMBOL +0xbcb838a1 amd_flush_garts vmlinux EXPORT_SYMBOL_GPL +0x27057f90 edac_stop_work drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x6f774db9 mlx5_modify_nic_vport_vlans drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x676a11a9 cxgb4_unregister_uld drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x7846e5ea mr_mfc_find_parent vmlinux EXPORT_SYMBOL +0x40973662 sysctl_udp_mem vmlinux EXPORT_SYMBOL +0xa0ebd14c sysctl_tcp_mem vmlinux EXPORT_SYMBOL +0xe37b778f mrp_uninit_applicant vmlinux EXPORT_SYMBOL_GPL +0x99b25076 md_register_thread vmlinux EXPORT_SYMBOL +0x6e286604 hdmi_drm_infoframe_pack vmlinux EXPORT_SYMBOL +0xdab5a1eb interval_tree_insert vmlinux EXPORT_SYMBOL_GPL +0x5abf63d9 bio_chain vmlinux EXPORT_SYMBOL +0x5e51cd74 swiotlb_nr_tbl vmlinux EXPORT_SYMBOL_GPL +0xfd748217 __cpuhp_remove_state_cpuslocked vmlinux EXPORT_SYMBOL +0xb674a534 acpi_unmap_cpu vmlinux EXPORT_SYMBOL +0xd09bf554 generic_mii_ioctl drivers/net/mii EXPORT_SYMBOL +0x9da8c296 mlx5_get_flow_namespace drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa6064d9a inet_csk_accept vmlinux EXPORT_SYMBOL +0x82880a8b inet_hash_connect vmlinux EXPORT_SYMBOL_GPL +0xc82a4c58 nf_log_trace vmlinux EXPORT_SYMBOL +0x1c7321f4 reuseport_add_sock vmlinux EXPORT_SYMBOL +0x6b81c38b power_supply_unreg_notifier vmlinux EXPORT_SYMBOL_GPL +0x6e8848d9 pci_add_resource vmlinux EXPORT_SYMBOL +0x0334da4e scsi_command_size_tbl vmlinux EXPORT_SYMBOL +0x8b9b8cf8 skcipher_walk_async vmlinux EXPORT_SYMBOL_GPL +0x55304001 do_splice_direct vmlinux EXPORT_SYMBOL +0x4252bb36 inode_congested vmlinux EXPORT_SYMBOL_GPL +0xa843805a get_unused_fd_flags vmlinux EXPORT_SYMBOL +0x8a7094ba vm_brk_flags vmlinux EXPORT_SYMBOL +0xb1e25684 __trace_bputs vmlinux EXPORT_SYMBOL_GPL +0x16e297c3 bit_wait vmlinux EXPORT_SYMBOL +0x91607d95 set_memory_wb vmlinux EXPORT_SYMBOL +0x767ddb02 set_memory_wc vmlinux EXPORT_SYMBOL +0xa0619f16 ceph_osdc_call net/ceph/libceph EXPORT_SYMBOL +0x15d45559 xprt_wake_pending_tasks net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc248bde2 dm_disk_bitset_init drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xaeaf1397 drm_atomic_get_old_connector_for_encoder drivers/gpu/drm/drm EXPORT_SYMBOL +0x1121eeeb nf_hook_slow_list vmlinux EXPORT_SYMBOL +0x0c80a83a tcf_unregister_action vmlinux EXPORT_SYMBOL +0x9cad5791 usb_bulk_msg vmlinux EXPORT_SYMBOL_GPL +0xa8d3e684 fc_lport_init vmlinux EXPORT_SYMBOL +0x7381287f trace_handle_return vmlinux EXPORT_SYMBOL_GPL +0x43ad39ee cpu_tlbstate vmlinux EXPORT_SYMBOL +0x8803299b ceph_messenger_fini net/ceph/libceph EXPORT_SYMBOL +0x7cd05d5a svc_generic_rpcbind_set net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2ff6e1eb vq_meta_prefetch drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xff263379 rdma_modify_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xac5eeed3 crypto_engine_start crypto/crypto_engine EXPORT_SYMBOL_GPL +0x63e14e3c __tracepoint_nfs_fsync_exit fs/nfs/nfs EXPORT_SYMBOL_GPL +0x92d895cb gfn_to_pfn_prot arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x34d9d3f2 xfrm_state_delete_tunnel vmlinux EXPORT_SYMBOL +0xfd8f2991 sock_edemux vmlinux EXPORT_SYMBOL +0x825d5737 phy_attached_print vmlinux EXPORT_SYMBOL +0x2071cfbc device_remove_properties vmlinux EXPORT_SYMBOL_GPL +0x181bc200 platform_device_add_resources vmlinux EXPORT_SYMBOL_GPL +0x26972aa1 acpiphp_register_attention vmlinux EXPORT_SYMBOL_GPL +0x4688d7ec pvclock_gtod_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x46389504 irq_chip_eoi_parent vmlinux EXPORT_SYMBOL_GPL +0x7521afb6 leave_mm vmlinux EXPORT_SYMBOL_GPL +0x091a6869 ceph_osdc_cancel_request net/ceph/libceph EXPORT_SYMBOL +0x6f3e94d1 ib_umem_page_count drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xc50ea736 ib_set_vf_link_state drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2a1479be drm_modeset_lock_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xe382ff57 drm_gem_put_pages drivers/gpu/drm/drm EXPORT_SYMBOL +0x4a846e50 drm_gem_get_pages drivers/gpu/drm/drm EXPORT_SYMBOL +0xa05221bd drm_fbdev_generic_setup drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa703c2d0 fscache_add_cache fs/fscache/fscache EXPORT_SYMBOL +0xf82ec573 rb_prev vmlinux EXPORT_SYMBOL +0xbefa4e2a in6_dev_finish_destroy vmlinux EXPORT_SYMBOL +0x27d12fcd eth_header vmlinux EXPORT_SYMBOL +0xe590dea3 sk_busy_loop_end vmlinux EXPORT_SYMBOL +0x7eeadf53 sas_expander_alloc vmlinux EXPORT_SYMBOL +0xe4894546 scsi_rescan_device vmlinux EXPORT_SYMBOL +0x02ea111e scsi_driverbyte_string vmlinux EXPORT_SYMBOL +0x9ed554b3 unregister_keyboard_notifier vmlinux EXPORT_SYMBOL_GPL +0x4b1baf68 acpi_unlock_ac_dir vmlinux EXPORT_SYMBOL +0x9d8d70f8 unregister_framebuffer vmlinux EXPORT_SYMBOL +0xe1377875 jbd2_journal_init_dev vmlinux EXPORT_SYMBOL +0x9d180003 forget_cached_acl vmlinux EXPORT_SYMBOL +0x47e7f6c5 bd_unlink_disk_holder vmlinux EXPORT_SYMBOL_GPL +0xa9e18049 task_handoff_unregister vmlinux EXPORT_SYMBOL_GPL +0x36b6ebbf down_killable vmlinux EXPORT_SYMBOL +0x0bf28108 ib_umem_get drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x61d00d91 rdma_nl_put_driver_u32 drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe3c17af1 __closure_wake_up drivers/md/bcache/bcache EXPORT_SYMBOL +0xab7d41be drm_master_internal_release drivers/gpu/drm/drm EXPORT_SYMBOL +0x52483536 drm_gem_fb_destroy drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x93f73b14 netdev_master_upper_dev_get_rcu vmlinux EXPORT_SYMBOL +0x6d39666b napi_gro_receive vmlinux EXPORT_SYMBOL +0x60091316 clk_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x6b3ae022 acpi_os_unmap_iomem vmlinux EXPORT_SYMBOL_GPL +0xc7734e3e pci_reset_function vmlinux EXPORT_SYMBOL_GPL +0x34e053e2 rhashtable_walk_start_check vmlinux EXPORT_SYMBOL_GPL +0x1ce90859 debugfs_print_regs32 vmlinux EXPORT_SYMBOL_GPL +0x7357b105 buffer_check_dirty_writeback vmlinux EXPORT_SYMBOL +0xe7b5c5c4 cdrom_check_events drivers/cdrom/cdrom EXPORT_SYMBOL +0xa8250393 kvm_release_page_clean arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xeae3dfd6 __const_udelay vmlinux EXPORT_SYMBOL +0x469a6ec7 tcp_parse_md5sig_option vmlinux EXPORT_SYMBOL +0x7693908f nf_unregister_queue_handler vmlinux EXPORT_SYMBOL +0x302215f1 sk_psock_tls_strp_read vmlinux EXPORT_SYMBOL_GPL +0x8878bb6c kill_dev_dax vmlinux EXPORT_SYMBOL_GPL +0xee3a43a0 nd_blk_region_to_dimm vmlinux EXPORT_SYMBOL_GPL +0xd92deb6b acpi_evaluate_object vmlinux EXPORT_SYMBOL +0xe5f4c9d8 do_clone_file_range vmlinux EXPORT_SYMBOL +0xd0df1896 dump_page vmlinux EXPORT_SYMBOL +0x638aff11 proc_douintvec_minmax vmlinux EXPORT_SYMBOL_GPL +0x1b7a35aa mlx4_ALLOCATE_VPP_set drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x87a89769 mlx4_ALLOCATE_VPP_get drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x05876c69 i915_gpu_busy drivers/gpu/drm/i915/i915 EXPORT_SYMBOL_GPL +0x9a513010 drm_atomic_private_obj_fini drivers/gpu/drm/drm EXPORT_SYMBOL +0x31cab048 ipmi_smi_msg_received drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xf44a904a net_ns_barrier vmlinux EXPORT_SYMBOL +0x7647726c handle_sysrq vmlinux EXPORT_SYMBOL +0xd0598280 pcie_port_bus_type vmlinux EXPORT_SYMBOL_GPL +0x0668b595 _kstrtoul vmlinux EXPORT_SYMBOL +0x16575539 bioset_init_from_src vmlinux EXPORT_SYMBOL +0x2cea32ee unregister_oldmem_pfn_is_ram vmlinux EXPORT_SYMBOL_GPL +0x0413b6a6 qtree_get_next_id vmlinux EXPORT_SYMBOL +0x2ac5ec59 __block_write_begin vmlinux EXPORT_SYMBOL +0x3517383e register_reboot_notifier vmlinux EXPORT_SYMBOL +0x19567d06 vfio_info_cap_shift drivers/vfio/vfio EXPORT_SYMBOL +0xf7b03510 iscsit_handle_logout_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x0472020a iscsit_find_cmd_from_itt drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xf1d93b08 mlx5_nic_vport_unaffiliate_multiport drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xcd8b879a drm_err drivers/gpu/drm/drm EXPORT_SYMBOL +0x7b5815a7 serpent_setkey crypto/serpent_generic EXPORT_SYMBOL_GPL +0xb53f2810 tcp_sockets_allocated vmlinux EXPORT_SYMBOL +0xaae68581 sk_msg_alloc vmlinux EXPORT_SYMBOL_GPL +0x68759e37 sk_free_unlock_clone vmlinux EXPORT_SYMBOL_GPL +0x2579293a tpm_chip_unregister vmlinux EXPORT_SYMBOL_GPL +0x74d2c4a8 vchan_find_desc vmlinux EXPORT_SYMBOL_GPL +0xa805ecfc acpi_release_global_lock vmlinux EXPORT_SYMBOL +0x56f047a2 inode_owner_or_capable vmlinux EXPORT_SYMBOL +0x563b9a02 generic_pipe_buf_confirm vmlinux EXPORT_SYMBOL +0x57a80662 get_super_thawed vmlinux EXPORT_SYMBOL +0x83785add l2tp_ioctl net/l2tp/l2tp_ip EXPORT_SYMBOL +0x8052274e mlx5_query_nic_vport_mac_address drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb4313dd4 drm_modeset_unlock_all drivers/gpu/drm/drm EXPORT_SYMBOL +0x66c4423a kvm_define_shared_msr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa31cf474 scsi_device_quiesce vmlinux EXPORT_SYMBOL +0x663182c9 acpi_get_gpe_status vmlinux EXPORT_SYMBOL +0xdad13544 ptrs_per_p4d vmlinux EXPORT_SYMBOL +0x64b22631 gssd_running net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x23c11d0d synproxy_parse_options net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x7fbe0251 mlx5_core_query_sq_state drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x776a40fa drm_bridge_mode_valid drivers/gpu/drm/drm EXPORT_SYMBOL +0x5295b5d9 inet6_protos vmlinux EXPORT_SYMBOL +0x366371f1 pci_select_bars vmlinux EXPORT_SYMBOL +0xf184d189 kernel_power_off vmlinux EXPORT_SYMBOL_GPL +0x2d3a9c09 svc_create_pooled net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3fc44b20 rpc_clnt_xprt_switch_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe3fdf791 ip6t_unregister_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL +0x9b822a10 rdma_set_reuseaddr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x1d264f91 ib_find_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbee331e8 mlx5_register_interface drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x37f4218b mlx4_register_interface drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc204f6a5 drm_panel_attach drivers/gpu/drm/drm EXPORT_SYMBOL +0xa9fc9036 devlink_dpipe_entry_ctx_close vmlinux EXPORT_SYMBOL_GPL +0x00e08f88 sk_msg_trim vmlinux EXPORT_SYMBOL_GPL +0xad820605 xhci_dbg_trace vmlinux EXPORT_SYMBOL_GPL +0x29e3cfbd sas_unregister_ha vmlinux EXPORT_SYMBOL_GPL +0xeb53c8cf sas_read_port_mode_page vmlinux EXPORT_SYMBOL +0x64a07aed __ata_ehi_push_desc vmlinux EXPORT_SYMBOL_GPL +0x6921af06 nvme_delete_ctrl vmlinux EXPORT_SYMBOL_GPL +0x86471ae2 con_copy_unimap vmlinux EXPORT_SYMBOL +0xfea53f2f tty_ldisc_ref_wait vmlinux EXPORT_SYMBOL_GPL +0x3ec179a8 blk_rq_count_integrity_sg vmlinux EXPORT_SYMBOL +0xa6ad7109 debugfs_create_u64 vmlinux EXPORT_SYMBOL_GPL +0xf3860616 qtree_write_dquot vmlinux EXPORT_SYMBOL +0x436c3e76 dquot_quota_on_mount vmlinux EXPORT_SYMBOL +0xb090fc53 block_commit_write vmlinux EXPORT_SYMBOL +0xfbd30bce clone_private_mount vmlinux EXPORT_SYMBOL_GPL +0x1d8e5532 mlx5_eq_create_generic drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x09601840 __tracepoint_kvm_inj_virq arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xbdc8220e kvm_complete_insn_gp arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xceb739dc kvm_io_bus_write arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x89b2bf23 __netdev_alloc_skb vmlinux EXPORT_SYMBOL +0x98ec27e0 nvdimm_security_setup_events vmlinux EXPORT_SYMBOL_GPL +0x3c973836 pm_clk_resume vmlinux EXPORT_SYMBOL_GPL +0x487d7c9e acpi_lock_ac_dir vmlinux EXPORT_SYMBOL +0xb6c5e614 acpi_processor_evaluate_cst vmlinux EXPORT_SYMBOL_GPL +0x0c5be183 pcie_capability_read_word vmlinux EXPORT_SYMBOL +0xe9e46de7 perf_event_create_kernel_counter vmlinux EXPORT_SYMBOL_GPL +0x17ec85fd irq_chip_release_resources_parent vmlinux EXPORT_SYMBOL_GPL +0x2704b711 mlx4_free_hwq_res drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0975d3ae cxgbi_sock_skb_entail drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x8b1ecf5d cxgbi_sock_rcv_wr_ack drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xa0507dca drm_fb_helper_fbdev_teardown drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x85bf46e5 nvme_sync_queues vmlinux EXPORT_SYMBOL_GPL +0xd8395c6b tty_kref_put vmlinux EXPORT_SYMBOL +0xe0769a1c fb_set_suspend vmlinux EXPORT_SYMBOL +0x506dff1a __genradix_free vmlinux EXPORT_SYMBOL +0x0f4b1e4e locks_delete_block vmlinux EXPORT_SYMBOL +0x86fe0aef __lookup_constant vmlinux EXPORT_SYMBOL +0x2ff065d3 dput vmlinux EXPORT_SYMBOL +0x3fbf680e try_module_get vmlinux EXPORT_SYMBOL +0x1f73107b rpc_proc_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa96ec2ad rpc_delay net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6b1ba979 fill_inquiry_response drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x5733bce3 mlxfw_firmware_flash drivers/net/ethernet/mellanox/mlxfw/mlxfw EXPORT_SYMBOL +0xdc415cf1 mlxsw_afa_block_continue drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x20de5193 kvm_disable_largepages arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x47c4e08f glue_xts_crypt_128bit_one arch/x86/crypto/glue_helper EXPORT_SYMBOL_GPL +0xf8c8a8fb register_net_sysctl vmlinux EXPORT_SYMBOL_GPL +0x32a8656a dst_dev_put vmlinux EXPORT_SYMBOL +0xc39728de usb_match_one_id vmlinux EXPORT_SYMBOL_GPL +0x964de5bd ufshcd_release vmlinux EXPORT_SYMBOL_GPL +0xc428068d sata_deb_timing_long vmlinux EXPORT_SYMBOL_GPL +0xaf076aec nd_fletcher64 vmlinux EXPORT_SYMBOL_GPL +0x7e6d9d59 screen_glyph vmlinux EXPORT_SYMBOL_GPL +0x665e92a0 clk_set_duty_cycle vmlinux EXPORT_SYMBOL_GPL +0x5f2e6fd0 srcu_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x92141343 kvm_async_pf_task_wake vmlinux EXPORT_SYMBOL_GPL +0x7a2af7b4 cpu_number vmlinux EXPORT_SYMBOL +0xede98ec5 intel_pt_validate_hw_cap vmlinux EXPORT_SYMBOL_GPL +0x1e6b75a3 nft_parse_register net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x305e5701 rdma_addr_size_kss drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcc5019c7 drm_atomic_get_mst_topology_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xdd6d8f42 vlan_vids_del_by_dev vmlinux EXPORT_SYMBOL +0x844b7965 ip6_route_output_flags_noref vmlinux EXPORT_SYMBOL_GPL +0x51613181 sk_free vmlinux EXPORT_SYMBOL +0xe5bc228c scsi_is_sdev_device vmlinux EXPORT_SYMBOL +0x89b1d0b7 fwnode_graph_get_port_parent vmlinux EXPORT_SYMBOL_GPL +0xdf141b89 clk_gpio_gate_ops vmlinux EXPORT_SYMBOL_GPL +0xdba0583f clk_hw_register_divider vmlinux EXPORT_SYMBOL_GPL +0x2a303d4d check_signature vmlinux EXPORT_SYMBOL +0xc30192f6 gpu_clean vmlinux EXPORT_SYMBOL_GPL +0x6dbce305 mlx5_frag_buf_alloc_node drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x6a3da01b kobject_init vmlinux EXPORT_SYMBOL +0x389e20ed skb_scrub_packet vmlinux EXPORT_SYMBOL_GPL +0x914c71b1 nd_region_to_nstype vmlinux EXPORT_SYMBOL +0x1968a24e pci_bus_max_busnr vmlinux EXPORT_SYMBOL_GPL +0x103020ba blk_set_pm_only vmlinux EXPORT_SYMBOL_GPL +0x785a93b4 si_mem_available vmlinux EXPORT_SYMBOL_GPL +0xb2e764e8 suspend_valid_only_mem vmlinux EXPORT_SYMBOL_GPL +0x42160169 flush_workqueue vmlinux EXPORT_SYMBOL +0x9ac1edaf mdev_from_dev drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x4b983183 drm_plane_create_rotation_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x9f2157c3 ping_close vmlinux EXPORT_SYMBOL_GPL +0xe1502545 __tracepoint_neigh_timer_handler vmlinux EXPORT_SYMBOL_GPL +0xc8d98045 skb_flow_dissect_ct vmlinux EXPORT_SYMBOL +0x25fbe348 mpt_suspend vmlinux EXPORT_SYMBOL +0x5c7d077e ata_link_online vmlinux EXPORT_SYMBOL_GPL +0x34a84df3 __tracepoint_detach_device_from_domain vmlinux EXPORT_SYMBOL_GPL +0x1ca08578 key_type_user vmlinux EXPORT_SYMBOL_GPL +0xef5b53e2 nosteal_pipe_buf_ops vmlinux EXPORT_SYMBOL +0xa9539294 simple_lookup vmlinux EXPORT_SYMBOL +0xc2a74aa2 get_user_pages_locked vmlinux EXPORT_SYMBOL +0xf5d7eb5a register_ftrace_export vmlinux EXPORT_SYMBOL_GPL +0x5a5a2271 __cpu_online_mask vmlinux EXPORT_SYMBOL +0x424d14c8 ib_query_srq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfa599bb2 netlink_register_notifier vmlinux EXPORT_SYMBOL +0x5d320e4e __skb_get_hash vmlinux EXPORT_SYMBOL +0x6f0f2a47 dm_io vmlinux EXPORT_SYMBOL +0xc56f88b6 usb_match_id vmlinux EXPORT_SYMBOL_GPL +0x2426b434 __scsi_execute vmlinux EXPORT_SYMBOL +0x5aefd9c2 nd_pfn_validate vmlinux EXPORT_SYMBOL +0xcaa1d815 ct_get_vmip_vpcid net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe9e799fc ib_ud_header_pack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf11bf09b cxgbi_ppm_ppods_reserve drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0x63270977 kvm_default_tsc_scaling_ratio arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x775de5db xfrm_policy_insert vmlinux EXPORT_SYMBOL +0x8f7b9774 lwtunnel_input vmlinux EXPORT_SYMBOL_GPL +0x3229ff66 __netpoll_cleanup vmlinux EXPORT_SYMBOL_GPL +0x28db1001 skb_segment vmlinux EXPORT_SYMBOL_GPL +0xba24a0af dm_suspended vmlinux EXPORT_SYMBOL_GPL +0x1f8c88c3 usb_add_hcd vmlinux EXPORT_SYMBOL_GPL +0x9bc3159b blk_queue_max_segments vmlinux EXPORT_SYMBOL +0x093ee605 unlock_rename vmlinux EXPORT_SYMBOL +0x0105b595 des_encrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0xb6f34afc virtio_transport_put_credit net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xf37ee6ea virtio_transport_get_credit net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x83551569 ip_vs_conn_put net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x1d87519f ib_device_set_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x97d968af tap_create_cdev drivers/net/tap EXPORT_SYMBOL_GPL +0x4b658559 drm_gem_dmabuf_export drivers/gpu/drm/drm EXPORT_SYMBOL +0x49972321 skb_push vmlinux EXPORT_SYMBOL +0x2e0774dc dm_bufio_get_block_number vmlinux EXPORT_SYMBOL_GPL +0x08c4876f md_update_sb vmlinux EXPORT_SYMBOL +0xe9346487 iscsi_session_chkready vmlinux EXPORT_SYMBOL_GPL +0x08135613 dax_write_cache vmlinux EXPORT_SYMBOL_GPL +0x9da0c148 device_create vmlinux EXPORT_SYMBOL_GPL +0x65aeff0d simple_rmdir vmlinux EXPORT_SYMBOL +0xad4a09d0 free_task vmlinux EXPORT_SYMBOL +0x3f6d78cc tcp_vegas_get_info net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x270e98d7 rdma_nl_put_driver_u64 drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6da05808 passthrough_attrib_attrs drivers/target/target_core_mod EXPORT_SYMBOL +0x28991160 __tracepoint_bcache_read_retry drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x46e4d6a2 i2c_del_adapter drivers/i2c/i2c-core EXPORT_SYMBOL +0xcf951784 i2c_adapter_depth drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0xb7d2bf44 drm_atomic_helper_commit_modeset_disables drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x582f248e drm_dp_get_adjust_request_pre_emphasis drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x0b2cb334 psched_ratecfg_precompute vmlinux EXPORT_SYMBOL +0x5838f6c9 rtc_valid_tm vmlinux EXPORT_SYMBOL +0xff2a7242 usb_register_driver vmlinux EXPORT_SYMBOL_GPL +0x5612d077 iscsi_find_flashnode_conn vmlinux EXPORT_SYMBOL_GPL +0x3a244994 scsi_block_requests vmlinux EXPORT_SYMBOL +0xef0d7e1e nvme_uninit_ctrl vmlinux EXPORT_SYMBOL_GPL +0x773fa409 __kfifo_dma_in_finish_r vmlinux EXPORT_SYMBOL +0x98209e58 blk_poll vmlinux EXPORT_SYMBOL_GPL +0x543ef284 seq_hlist_start vmlinux EXPORT_SYMBOL +0x26ab4755 put_old_itimerspec32 vmlinux EXPORT_SYMBOL_GPL +0x44bae227 bit_wait_timeout vmlinux EXPORT_SYMBOL_GPL +0x31def5c0 drm_bridge_attach drivers/gpu/drm/drm EXPORT_SYMBOL +0xb6a8a478 sas_end_device_alloc vmlinux EXPORT_SYMBOL +0xbc78c6ef sdev_disable_disk_events vmlinux EXPORT_SYMBOL +0x500a53cb ahci_save_initial_config vmlinux EXPORT_SYMBOL_GPL +0xe053778f tty_ldisc_release vmlinux EXPORT_SYMBOL_GPL +0x972bf8fa acpi_dma_configure vmlinux EXPORT_SYMBOL_GPL +0xdbc17656 t10_pi_type3_ip vmlinux EXPORT_SYMBOL +0xd4a3930a fat_dir_empty vmlinux EXPORT_SYMBOL_GPL +0xce468dbb cdev_alloc vmlinux EXPORT_SYMBOL +0xad536c91 x86_cpu_to_acpiid vmlinux EXPORT_SYMBOL +0xa50bcff0 x86_cpu_to_apicid vmlinux EXPORT_SYMBOL +0x5752f31b lc_del lib/lru_cache EXPORT_SYMBOL +0x7676b05e rpc_put_task_async net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6f79feda iw_cm_connect drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x1b037462 __rdma_block_iter_start drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfcd11260 mlx5_eq_update_ci drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc15243c9 mlx4_qp_reserve_range drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb4e0bfbe ocfs2_plock fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x1486050b usb_hcd_unlink_urb_from_ep vmlinux EXPORT_SYMBOL_GPL +0x53ad971a ufshcd_dme_get_attr vmlinux EXPORT_SYMBOL_GPL +0xe847582f device_wakeup_enable vmlinux EXPORT_SYMBOL_GPL +0x3b9144c9 acpi_get_current_resources vmlinux EXPORT_SYMBOL +0xdbf7cb70 mpi_get_nbits vmlinux EXPORT_SYMBOL_GPL +0xb8deeb43 security_path_mkdir vmlinux EXPORT_SYMBOL +0xc4913442 vfio_group_put_external_user drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x753fbd98 drm_dev_alloc drivers/gpu/drm/drm EXPORT_SYMBOL +0xa4d98a3e drm_fb_helper_cfb_fillrect drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6892e3c3 kvm_set_pfn_accessed arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9f46ced8 __sw_hweight64 vmlinux EXPORT_SYMBOL +0x74c134b9 __sw_hweight32 vmlinux EXPORT_SYMBOL +0x191699f8 get_phy_device vmlinux EXPORT_SYMBOL +0x09230455 ata_cable_sata vmlinux EXPORT_SYMBOL_GPL +0xa09c6631 wakeup_source_register vmlinux EXPORT_SYMBOL_GPL +0xce4e47b6 __kfifo_skip_r vmlinux EXPORT_SYMBOL +0xf4982ded fat_truncate_time vmlinux EXPORT_SYMBOL_GPL +0x33283ab3 posix_acl_default_xattr_handler vmlinux EXPORT_SYMBOL_GPL +0xa155c071 ZSTD_compressBegin_usingDict lib/zstd/zstd_compress EXPORT_SYMBOL +0xdb3f5f0f svc_exit_thread net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8e0f626a drm_hdcp_check_ksvs_revoked drivers/gpu/drm/drm EXPORT_SYMBOL_GPL +0x82fb9012 drm_gem_map_detach drivers/gpu/drm/drm EXPORT_SYMBOL +0x4064b024 drm_gem_map_attach drivers/gpu/drm/drm EXPORT_SYMBOL +0x230094ac ipmi_smi_watchdog_pretimeout drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xfedb6b94 nfs_lock fs/nfs/nfs EXPORT_SYMBOL_GPL +0x39e61495 nf_logger_request_module vmlinux EXPORT_SYMBOL_GPL +0x00754fca tcf_get_next_proto vmlinux EXPORT_SYMBOL +0x7b5452b8 acpi_unregister_gsi vmlinux EXPORT_SYMBOL_GPL +0x1b7cb754 rpc_clnt_show_stats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb7abe122 print_tuple net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x29ef0066 __tracepoint_bcache_btree_gc_coalesce drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x60806523 i2c_acpi_get_i2c_resource drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x3e1f72e0 kvm_vcpu_cache arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1f491d36 twofish_dec_blk arch/x86/crypto/twofish-x86_64 EXPORT_SYMBOL_GPL +0x7c7bf6e0 twofish_enc_blk arch/x86/crypto/twofish-x86_64 EXPORT_SYMBOL_GPL +0x8df92f66 memchr_inv vmlinux EXPORT_SYMBOL +0xa85e3348 tcp_reno_cong_avoid vmlinux EXPORT_SYMBOL_GPL +0x607c4683 devlink_info_version_fixed_put vmlinux EXPORT_SYMBOL_GPL +0x7f4f4f0e hid_dump_report vmlinux EXPORT_SYMBOL_GPL +0xa8fac44b usb_reset_device vmlinux EXPORT_SYMBOL_GPL +0xc37a65a0 ata_sas_queuecmd vmlinux EXPORT_SYMBOL_GPL +0xc0393a4a nvdimm_namespace_disk_name vmlinux EXPORT_SYMBOL +0xc841d94d nd_region_attribute_group vmlinux EXPORT_SYMBOL_GPL +0x769f6e64 errseq_check vmlinux EXPORT_SYMBOL +0x48d06405 kernfs_put vmlinux EXPORT_SYMBOL_GPL +0xb0bca351 kernfs_get vmlinux EXPORT_SYMBOL_GPL +0x5e74ef6b __dquot_free_space vmlinux EXPORT_SYMBOL +0x52499454 locks_copy_lock vmlinux EXPORT_SYMBOL +0xd0e66b12 irq_domain_associate vmlinux EXPORT_SYMBOL_GPL +0x1e119a79 rpcauth_get_pseudoflavor net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xca6984b1 flow_block_cb_lookup vmlinux EXPORT_SYMBOL +0xf2430856 gnet_stats_start_copy_compat vmlinux EXPORT_SYMBOL +0x2426b138 rtc_alarm_irq_enable vmlinux EXPORT_SYMBOL_GPL +0x31c2906f fc_lport_reset vmlinux EXPORT_SYMBOL +0xad7e0d9c pcim_iounmap_regions vmlinux EXPORT_SYMBOL +0xd4b2fe1c check_move_unevictable_pages vmlinux EXPORT_SYMBOL_GPL +0x7522f3ba irq_modify_status vmlinux EXPORT_SYMBOL_GPL +0x134ce9ff ex_handler_clear_fs vmlinux EXPORT_SYMBOL +0x6393419d adf_exit_arb drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xede2149c drm_color_lut_extract drivers/gpu/drm/drm EXPORT_SYMBOL +0xbbc35119 drm_class_device_register drivers/gpu/drm/drm EXPORT_SYMBOL_GPL +0x8a1b2eb9 dcbnl_ieee_notify vmlinux EXPORT_SYMBOL +0xef42274f inet6_del_protocol vmlinux EXPORT_SYMBOL +0x9ee29d50 xfrm_input_resume vmlinux EXPORT_SYMBOL +0x59ed4677 udplite_prot vmlinux EXPORT_SYMBOL +0x167c6266 sock_create_kern vmlinux EXPORT_SYMBOL +0x81a2c488 devfreq_remove_device vmlinux EXPORT_SYMBOL +0xcd2ba798 dm_bufio_forget vmlinux EXPORT_SYMBOL_GPL +0x62f3eb57 scsi_eh_restore_cmnd vmlinux EXPORT_SYMBOL +0x74ec6118 transport_remove_device vmlinux EXPORT_SYMBOL_GPL +0xadcf4b34 bdev_write_page vmlinux EXPORT_SYMBOL_GPL +0x0c79d5ef vm_sockets_get_local_cid net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xe575d7bf cache_destroy_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf5340fff drm_gtf_mode drivers/gpu/drm/drm EXPORT_SYMBOL +0x255aeee9 mr_dump vmlinux EXPORT_SYMBOL +0x5bdf43e0 sk_clone_lock vmlinux EXPORT_SYMBOL_GPL +0xfc8abaee kernel_sock_ip_overhead vmlinux EXPORT_SYMBOL +0xebce5d35 pcc_mbox_request_channel vmlinux EXPORT_SYMBOL_GPL +0x86306a40 thermal_zone_get_zone_by_name vmlinux EXPORT_SYMBOL_GPL +0x2a678a13 __suspend_report_result vmlinux EXPORT_SYMBOL_GPL +0xdf2ae1d8 vga_tryget vmlinux EXPORT_SYMBOL +0x3ad7a5d5 acpi_evaluate_reference vmlinux EXPORT_SYMBOL +0x14d242d9 debugfs_file_put vmlinux EXPORT_SYMBOL_GPL +0xfa138e68 debugfs_file_get vmlinux EXPORT_SYMBOL_GPL +0xb460ff72 d_find_any_alias vmlinux EXPORT_SYMBOL +0x9f65c857 ZSTD_checkCParams lib/zstd/zstd_compress EXPORT_SYMBOL +0xddb76c90 virtio_transport_stream_has_space net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x7e8e4fad ceph_destroy_options net/ceph/libceph EXPORT_SYMBOL +0x969c73d9 vfio_device_put drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x0ca34ccf mlxsw_core_max_ports drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x51082108 drm_bridge_remove drivers/gpu/drm/drm EXPORT_SYMBOL +0x8a1bbc0b drm_atomic_helper_crtc_destroy_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6eba769a drm_dp_dpcd_write drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6d5f5b91 radix_tree_tagged vmlinux EXPORT_SYMBOL +0xdb9824e6 vlan_uses_dev vmlinux EXPORT_SYMBOL +0xc1b168fd sk_detach_filter vmlinux EXPORT_SYMBOL_GPL +0xc469c204 sk_attach_filter vmlinux EXPORT_SYMBOL_GPL +0x3f7fe7d9 clk_register_gpio_gate vmlinux EXPORT_SYMBOL_GPL +0xc587eb16 fb_set_cmap vmlinux EXPORT_SYMBOL +0xd0b7262b pci_unregister_driver vmlinux EXPORT_SYMBOL +0xc890c008 zlib_deflateEnd vmlinux EXPORT_SYMBOL +0x33736a1d __genradix_ptr_alloc vmlinux EXPORT_SYMBOL +0xcc6ca6f2 blk_mq_pci_map_queues vmlinux EXPORT_SYMBOL_GPL +0x0472b6ab unload_nls vmlinux EXPORT_SYMBOL +0x0a37652e seq_open_private vmlinux EXPORT_SYMBOL +0x953e1b9e ktime_get_real_seconds vmlinux EXPORT_SYMBOL_GPL +0xc512626a __supported_pte_mask vmlinux EXPORT_SYMBOL_GPL +0x0d76831a sunrpc_cache_update net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbf69a113 ib_map_mr_sg_pi drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa8e570b7 iscsit_aborted_task drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xd08d7327 drm_modeset_lock_all drivers/gpu/drm/drm EXPORT_SYMBOL +0xae9efaab kvm_lapic_switch_to_sw_timer arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf89053c6 tcp_conn_request vmlinux EXPORT_SYMBOL +0xdeb0b611 kernel_listen vmlinux EXPORT_SYMBOL +0xefbabee7 serio_unregister_driver vmlinux EXPORT_SYMBOL +0xcfdc64c3 tpm_seal_trusted vmlinux EXPORT_SYMBOL_GPL +0xf214dcf1 pnp_register_card_driver vmlinux EXPORT_SYMBOL +0x2b593aa8 gen_pool_alloc_algo_owner vmlinux EXPORT_SYMBOL +0x3b644591 __bitmap_shift_left vmlinux EXPORT_SYMBOL +0xc7c1f66b gss_mech_get net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL +0xcc9b70d6 ebt_unregister_table net/bridge/netfilter/ebtables EXPORT_SYMBOL +0x1ddaea6a ip_set_type_register net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x0a7fe2b5 nat_rtp_rtcp_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x605a24be cxgbi_set_conn_param drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x50674de7 drm_timeout_abs_to_jiffies drivers/gpu/drm/drm EXPORT_SYMBOL +0x69acdf38 memcpy vmlinux EXPORT_SYMBOL +0x6a5e2bde __cookie_v6_init_sequence vmlinux EXPORT_SYMBOL_GPL +0xebd5aaa3 rc_repeat vmlinux EXPORT_SYMBOL_GPL +0x9953b83b fc_eh_abort vmlinux EXPORT_SYMBOL +0x40f91b50 fc_set_rport_loss_tmo vmlinux EXPORT_SYMBOL +0x0cd4ae1d __scsi_device_lookup vmlinux EXPORT_SYMBOL +0x5a3543ac ata_bmdma_stop vmlinux EXPORT_SYMBOL_GPL +0x01407c56 clk_hw_register_gpio_gate vmlinux EXPORT_SYMBOL_GPL +0xbe5c888b crypto_chain vmlinux EXPORT_SYMBOL_GPL +0x7412ed5b kvfree_sensitive vmlinux EXPORT_SYMBOL +0x80a8cf9f perf_aux_output_begin vmlinux EXPORT_SYMBOL_GPL +0x532fd1c8 efi_mm vmlinux EXPORT_SYMBOL_GPL +0x37dbab39 rpc_run_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3f4e261f ip6t_do_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL +0x5efa6669 ib_sa_unregister_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x190e30bd closure_put drivers/md/bcache/bcache EXPORT_SYMBOL +0x03abfef4 drm_dp_read_desc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3e827a38 simd_register_skciphers_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0xfc682547 kvm_init_shadow_mmu arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xec7e7373 skb_flow_dissect_tunnel_info vmlinux EXPORT_SYMBOL +0x81b03377 efivar_entry_set_safe vmlinux EXPORT_SYMBOL_GPL +0xb542cb30 cpufreq_dbs_governor_exit vmlinux EXPORT_SYMBOL_GPL +0xc55acd19 cpufreq_dbs_governor_init vmlinux EXPORT_SYMBOL_GPL +0xfc14bb2e dm_get_dev_t vmlinux EXPORT_SYMBOL_GPL +0xa171ecc6 i8042_remove_filter vmlinux EXPORT_SYMBOL +0xc599f127 ufshcd_remove vmlinux EXPORT_SYMBOL_GPL +0x66caf6eb spi_release_transport vmlinux EXPORT_SYMBOL +0x8bb6e7cb ata_std_postreset vmlinux EXPORT_SYMBOL_GPL +0xfbe8ee28 acpi_get_table_by_index vmlinux EXPORT_SYMBOL +0xd819a524 crc_itu_t_table vmlinux EXPORT_SYMBOL +0x1c80d27d btree_geo128 vmlinux EXPORT_SYMBOL_GPL +0x72a80922 bdevname vmlinux EXPORT_SYMBOL +0x7a9b37e8 blk_start_plug vmlinux EXPORT_SYMBOL +0x444ba6ee cdev_init vmlinux EXPORT_SYMBOL +0xa8e0180f request_resource vmlinux EXPORT_SYMBOL +0x749a1a1a xprt_unpin_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd31012c0 nf_ct_expect_init net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x104151fc xsk_reuseq_free vmlinux EXPORT_SYMBOL_GPL +0x32927605 inet_getname vmlinux EXPORT_SYMBOL +0xe9e8faeb efi_tpm_final_log_size vmlinux EXPORT_SYMBOL +0xa79f7631 __nvme_submit_sync_cmd vmlinux EXPORT_SYMBOL_GPL +0x0f59f0ce agp_generic_insert_memory vmlinux EXPORT_SYMBOL +0x3e57df06 devm_pinctrl_register vmlinux EXPORT_SYMBOL_GPL +0xbd3ff0d6 iov_iter_fault_in_readable vmlinux EXPORT_SYMBOL +0xa9bc8b74 module_mutex vmlinux EXPORT_SYMBOL_GPL +0xfedb5a09 drm_mode_config_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x7a1bcd59 gf128mul_x8_ble crypto/gf128mul EXPORT_SYMBOL +0x2319bff3 mddev_suspend vmlinux EXPORT_SYMBOL_GPL +0x2e08226d badrange_add vmlinux EXPORT_SYMBOL_GPL +0xc2cc1896 timestamp_truncate vmlinux EXPORT_SYMBOL +0x2004e59b modify_user_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0xbf9bcc8d __cap_empty_set vmlinux EXPORT_SYMBOL +0x2122e2f0 xprt_register_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xcf4d026f ip_tunnel_changelink net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x402468e9 i915_gpu_lower drivers/gpu/drm/i915/i915 EXPORT_SYMBOL_GPL +0x3d94d979 drm_object_property_set_value drivers/gpu/drm/drm EXPORT_SYMBOL +0x6755fc16 nfs_show_path fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4d72d3aa chacha_block vmlinux EXPORT_SYMBOL +0x4692838a inet_rcv_saddr_equal vmlinux EXPORT_SYMBOL +0xfbbd02e4 platform_get_resource vmlinux EXPORT_SYMBOL_GPL +0xac2e6523 pci_disable_pcie_error_reporting vmlinux EXPORT_SYMBOL_GPL +0xf5a2f345 iput vmlinux EXPORT_SYMBOL +0xb96a6caa xprt_complete_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xeb28e60e uverbs_close_fd drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xe3cd6966 vbg_put_gdev drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0x58124917 crypto_transfer_ablkcipher_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0x47a9e9ee input_ff_upload vmlinux EXPORT_SYMBOL_GPL +0x6a250b7f iscsi_alloc_session vmlinux EXPORT_SYMBOL_GPL +0x83486fc0 iommu_aux_get_pasid vmlinux EXPORT_SYMBOL_GPL +0xb3a2ba29 open_related_ns vmlinux EXPORT_SYMBOL_GPL +0x03fd2571 vm_unmap_ram vmlinux EXPORT_SYMBOL +0x0aa309cf synchronize_hardirq vmlinux EXPORT_SYMBOL +0x5efde8e6 proc_doulongvec_ms_jiffies_minmax vmlinux EXPORT_SYMBOL +0x8e8b7b68 vbg_hgcm_call drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0x271985e0 drm_mode_equal_no_clocks_no_stereo drivers/gpu/drm/drm EXPORT_SYMBOL +0x8032ab13 fuse_sync_release fs/fuse/fuse EXPORT_SYMBOL_GPL +0xb892cb25 kvm_mmu_unprotect_page_virt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8b77acdc xfrm_output vmlinux EXPORT_SYMBOL_GPL +0xd2cab942 dev_pm_opp_find_freq_exact vmlinux EXPORT_SYMBOL_GPL +0x92d31cfb fixed_phy_add vmlinux EXPORT_SYMBOL_GPL +0x727b7500 amd_iommu_pc_set_reg vmlinux EXPORT_SYMBOL +0x0c95ada3 amd_iommu_pc_get_reg vmlinux EXPORT_SYMBOL +0xd0f216e5 pci_enable_device_mem vmlinux EXPORT_SYMBOL +0xf474c21c bitmap_print_to_pagebuf vmlinux EXPORT_SYMBOL +0x71a50dbc register_blkdev vmlinux EXPORT_SYMBOL +0x87c49300 ceph_osdc_clear_abort_err net/ceph/libceph EXPORT_SYMBOL +0x1e14cc82 nf_conntrack_alter_reply net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x222e9177 mlx4_hw_rule_sz drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x11f869ff drm_crtc_handle_vblank drivers/gpu/drm/drm EXPORT_SYMBOL +0x098e1fb1 drm_framebuffer_lookup drivers/gpu/drm/drm EXPORT_SYMBOL +0xa908f9cb ipv6_select_ident vmlinux EXPORT_SYMBOL +0xa3951677 inet_offloads vmlinux EXPORT_SYMBOL +0xa27bf45d neigh_event_ns vmlinux EXPORT_SYMBOL +0xb3dce0a7 iscsi_flashnode_bus_match vmlinux EXPORT_SYMBOL_GPL +0x3aeae555 dpm_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0xa14eb664 fb_firmware_edid vmlinux EXPORT_SYMBOL +0x40733cef jbd2_journal_blocks_per_page vmlinux EXPORT_SYMBOL +0x0fd902db mb_cache_entry_create vmlinux EXPORT_SYMBOL +0x4eaecaa4 iterate_supers_type vmlinux EXPORT_SYMBOL +0xd0654aba woken_wake_function vmlinux EXPORT_SYMBOL +0xf4b8562a iw_cm_init_qp_attr drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x31304c41 drm_gem_handle_delete drivers/gpu/drm/drm EXPORT_SYMBOL +0xec8af5d2 fscache_check_aux fs/fscache/fscache EXPORT_SYMBOL +0x48790159 nf_queue_entry_get_refs vmlinux EXPORT_SYMBOL_GPL +0x962c8ae1 usb_kill_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0x4b4888be iscsi_queuecommand vmlinux EXPORT_SYMBOL_GPL +0xf3a8644a ata_scsi_unlock_native_capacity vmlinux EXPORT_SYMBOL_GPL +0x49224181 nvme_reset_wq vmlinux EXPORT_SYMBOL_GPL +0xb60a5524 request_firmware_nowait vmlinux EXPORT_SYMBOL +0x6c02c6e6 kmem_cache_free_bulk vmlinux EXPORT_SYMBOL +0xa582fc55 percpu_down_write vmlinux EXPORT_SYMBOL_GPL +0x1403ad09 cpufreq_add_update_util_hook vmlinux EXPORT_SYMBOL_GPL +0xa88ff005 svc_xprt_do_enqueue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8c4cb9c3 nf_conncount_list_init net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x2ccc2d98 target_undepend_item drivers/target/target_core_mod EXPORT_SYMBOL +0xaaefbf32 drm_connector_init_panel_orientation_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x3dbae082 __cast6_decrypt crypto/cast6_generic EXPORT_SYMBOL_GPL +0xcfce512f __cast6_encrypt crypto/cast6_generic EXPORT_SYMBOL_GPL +0x4ffbd78d nfs4_put_deviceid_node fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x904dc0b6 kvm_intr_is_single_vcpu arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1d21a6b4 init_dummy_netdev vmlinux EXPORT_SYMBOL_GPL +0x4df1dfc5 netdev_change_features vmlinux EXPORT_SYMBOL +0x1f43c855 __hid_request vmlinux EXPORT_SYMBOL_GPL +0xc633d82d phy_unregister_fixup vmlinux EXPORT_SYMBOL +0x9a4cfcda nvdimm_blk_region_create vmlinux EXPORT_SYMBOL_GPL +0xc2de27ca hest_disable vmlinux EXPORT_SYMBOL_GPL +0xf5b13e23 pci_assign_unassigned_bus_resources vmlinux EXPORT_SYMBOL_GPL +0xbd5cb8b9 ring_buffer_resize vmlinux EXPORT_SYMBOL_GPL +0x01b10fca ftrace_ops_set_global_filter vmlinux EXPORT_SYMBOL_GPL +0x4719f511 on_each_cpu_cond_mask vmlinux EXPORT_SYMBOL +0x24761d03 dma_direct_sync_single_for_device vmlinux EXPORT_SYMBOL +0x6d4907a5 ip_tunnel_xmit net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x656d120a __tracepoint_kvm_nested_intr_vmexit arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x2a1fa5d0 kvm_set_rflags arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9c65c3d4 kvm_get_rflags arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb572501e dst_destroy vmlinux EXPORT_SYMBOL +0xfb3ae8ac tpm_chip_stop vmlinux EXPORT_SYMBOL_GPL +0x2833cb68 pci_disable_pri vmlinux EXPORT_SYMBOL_GPL +0xe844e14f pci_disable_ats vmlinux EXPORT_SYMBOL_GPL +0x3cbbaa14 pci_disable_msi vmlinux EXPORT_SYMBOL +0xa0334e91 pci_disable_rom vmlinux EXPORT_SYMBOL_GPL +0x92d33cde of_phy_get vmlinux EXPORT_SYMBOL_GPL +0x0cfdc968 svc_wake_up net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7b455fa9 xprt_write_space net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb16966f1 nf_ct_helper_expectfn_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xbd13ee5d o2hb_check_node_heartbeating_no_sem fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x6ead302b compat_tcp_setsockopt vmlinux EXPORT_SYMBOL +0xb241b8dd scsi_mode_sense vmlinux EXPORT_SYMBOL +0xd406d266 fb_mode_is_equal vmlinux EXPORT_SYMBOL +0xf4af35c2 rcu_gp_is_normal vmlinux EXPORT_SYMBOL_GPL +0xd9a5ea54 __init_waitqueue_head vmlinux EXPORT_SYMBOL +0xb386bd69 ns_capable_setid vmlinux EXPORT_SYMBOL +0xf0c0bfe4 xfrm6_tunnel_register net/ipv6/tunnel6 EXPORT_SYMBOL +0xe804b3b1 xfrm4_tunnel_register net/ipv4/tunnel4 EXPORT_SYMBOL +0x1ec16362 mlx5_core_alloc_pd drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa2a54f60 mlx4_FLOW_STEERING_IB_UC_QP_RANGE drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x1018a902 drm_crtc_from_index drivers/gpu/drm/drm EXPORT_SYMBOL +0x13cea12a drm_atomic_helper_shutdown drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x227b1482 drm_dp_mst_topology_mgr_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xc406f000 iscsi_find_flashnode_sess vmlinux EXPORT_SYMBOL_GPL +0x587998d2 scsi_dh_attached_handler_name vmlinux EXPORT_SYMBOL_GPL +0x8a22bbe1 nd_mapping_attribute_group vmlinux EXPORT_SYMBOL_GPL +0x5ab0235a iommu_domain_window_enable vmlinux EXPORT_SYMBOL_GPL +0x3b033ff9 phy_remove_lookup vmlinux EXPORT_SYMBOL_GPL +0x11f2f779 blk_queue_virt_boundary vmlinux EXPORT_SYMBOL +0x17a91054 blk_insert_cloned_request vmlinux EXPORT_SYMBOL_GPL +0xe583de18 security_sb_set_mnt_opts vmlinux EXPORT_SYMBOL +0x572004d9 irq_inject_interrupt vmlinux EXPORT_SYMBOL_GPL +0xb5d5daf1 nf_nat_icmpv6_reply_translation net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x89e7adaa adf_dev_put drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x78b7d2b0 adf_dev_get drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x70231f18 mlx5_query_port_wol drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5408a2c1 __mlx4_register_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x20eadeb6 ip_compute_csum vmlinux EXPORT_SYMBOL +0x07abc5b8 ir_raw_event_store_edge vmlinux EXPORT_SYMBOL_GPL +0x63734b15 qlt_xmit_tm_rsp vmlinux EXPORT_SYMBOL +0xe34b2123 nd_dev_to_uuid vmlinux EXPORT_SYMBOL +0x31ee086a __tracepoint_read_msr vmlinux EXPORT_SYMBOL +0x3736af74 blk_mq_kick_requeue_list vmlinux EXPORT_SYMBOL +0xcd974f00 rcu_all_qs vmlinux EXPORT_SYMBOL_GPL +0x9ac11b74 suspend_set_ops vmlinux EXPORT_SYMBOL_GPL +0x84fc54d2 lc_seq_printf_stats lib/lru_cache EXPORT_SYMBOL +0xb0b4d82b ceph_cls_lock_info net/ceph/libceph EXPORT_SYMBOL +0x3ffdacf3 timerqueue_iterate_next vmlinux EXPORT_SYMBOL_GPL +0x04fc4a13 dst_alloc vmlinux EXPORT_SYMBOL +0x6214aef2 cpufreq_unregister_notifier vmlinux EXPORT_SYMBOL +0x64f36620 dax_flush vmlinux EXPORT_SYMBOL_GPL +0xe93e49c3 devres_free vmlinux EXPORT_SYMBOL_GPL +0xa85b7910 mipi_dsi_device_register_full vmlinux EXPORT_SYMBOL +0x7f7f7bb4 irq_poll_disable vmlinux EXPORT_SYMBOL +0xc8fdea10 blk_put_queue vmlinux EXPORT_SYMBOL +0xd1eb2c2f proc_create_mount_point vmlinux EXPORT_SYMBOL +0x2dddd21b single_release vmlinux EXPORT_SYMBOL +0xf96145f3 get_user_pages vmlinux EXPORT_SYMBOL +0x4b4833c5 set_wb_congested vmlinux EXPORT_SYMBOL +0x42635d55 pm_suspend_global_flags vmlinux EXPORT_SYMBOL_GPL +0x3a0e96bd housekeeping_affine vmlinux EXPORT_SYMBOL_GPL +0x3fc63b9a mlx4_SYNC_TPT drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb8bdb3f5 drm_puts drivers/gpu/drm/drm EXPORT_SYMBOL +0x10324ce7 ip6_append_data vmlinux EXPORT_SYMBOL_GPL +0xa108eb4d sysctl_optmem_max vmlinux EXPORT_SYMBOL +0xab735372 ipmi_dmi_get_slave_addr vmlinux EXPORT_SYMBOL +0xdc49c198 reciprocal_value_adv vmlinux EXPORT_SYMBOL +0x8fa2181d akcipher_register_instance vmlinux EXPORT_SYMBOL_GPL +0x26c392c1 jbd2_journal_destroy vmlinux EXPORT_SYMBOL +0x8c94be61 jbd2_journal_restart vmlinux EXPORT_SYMBOL +0x9e683f75 __cpu_possible_mask vmlinux EXPORT_SYMBOL +0xd645e124 ibnl_put_msg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xab075b0a cast6_setkey crypto/cast6_generic EXPORT_SYMBOL_GPL +0x9275ea3a cast5_setkey crypto/cast5_generic EXPORT_SYMBOL_GPL +0xfac19588 __clear_user vmlinux EXPORT_SYMBOL +0x9c995c69 xt_percpu_counter_alloc vmlinux EXPORT_SYMBOL_GPL +0x609bcd98 in6_pton vmlinux EXPORT_SYMBOL +0xac5fcec0 in4_pton vmlinux EXPORT_SYMBOL +0x388aa3c9 neigh_proc_dointvec_ms_jiffies vmlinux EXPORT_SYMBOL +0xd00498e3 scsi_mode_select vmlinux EXPORT_SYMBOL_GPL +0x8b149c36 clk_is_match vmlinux EXPORT_SYMBOL_GPL +0xefee932c acpi_get_data_full vmlinux EXPORT_SYMBOL +0xe6ae9ec9 cfb_copyarea vmlinux EXPORT_SYMBOL +0xf6fc8791 __bitmap_xor vmlinux EXPORT_SYMBOL +0xd1086b7f mnt_want_write vmlinux EXPORT_SYMBOL_GPL +0x6c694f6d drm_atomic_helper_cleanup_planes drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1a3f4b9e __dev_remove_pack vmlinux EXPORT_SYMBOL +0xe1a4f16a secure_ipv6_port_ephemeral vmlinux EXPORT_SYMBOL +0x00565f18 pernet_ops_rwsem vmlinux EXPORT_SYMBOL_GPL +0x5c66e90c efivar_run_worker vmlinux EXPORT_SYMBOL_GPL +0xcd09d471 ata_sff_softreset vmlinux EXPORT_SYMBOL_GPL +0x1618e5be iommu_unmap vmlinux EXPORT_SYMBOL_GPL +0x560951d5 acpi_nfit_ctl vmlinux EXPORT_SYMBOL_GPL +0xfd83651c pcie_relaxed_ordering_enabled vmlinux EXPORT_SYMBOL +0x48ea93c9 iov_iter_init vmlinux EXPORT_SYMBOL +0x9f5b4f13 config_group_init vmlinux EXPORT_SYMBOL +0xab8df795 posix_acl_to_xattr vmlinux EXPORT_SYMBOL +0x0eb92114 path_is_mountpoint vmlinux EXPORT_SYMBOL +0x03a254f9 vfs_submount vmlinux EXPORT_SYMBOL_GPL +0xb18429eb suspend_device_irqs vmlinux EXPORT_SYMBOL_GPL +0xc14a7cc7 devm_register_reboot_notifier vmlinux EXPORT_SYMBOL +0xa5ac3e33 ZSTD_DCtxWorkspaceBound lib/zstd/zstd_decompress EXPORT_SYMBOL +0x4c09ddbb mlx4_mw_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa12a0b4b drm_mode_config_cleanup drivers/gpu/drm/drm EXPORT_SYMBOL +0x2c541e7b radix_tree_next_chunk vmlinux EXPORT_SYMBOL +0x6e1ecfb6 tc_setup_cb_reoffload vmlinux EXPORT_SYMBOL +0xdd8e9305 __starget_for_each_device vmlinux EXPORT_SYMBOL +0x76ef2787 bioset_integrity_create vmlinux EXPORT_SYMBOL +0x951a2773 crypto_has_alg vmlinux EXPORT_SYMBOL_GPL +0x05b52dad splice_to_pipe vmlinux EXPORT_SYMBOL_GPL +0x3694e5b1 iget5_locked vmlinux EXPORT_SYMBOL +0xea1f6e0e hugetlb_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x3dcb88a0 irq_set_handler_data vmlinux EXPORT_SYMBOL +0x35dbb7c0 yield_to vmlinux EXPORT_SYMBOL_GPL +0x7e0a5c30 ex_handler_ext vmlinux EXPORT_SYMBOL +0xc976cc96 mii_check_media drivers/net/mii EXPORT_SYMBOL +0xf76df3e2 mlxsw_afa_block_append_drop drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb836ebb6 drm_ioctl_permit drivers/gpu/drm/drm EXPORT_SYMBOL +0x6d5daf2c kvm_mmu_invpcid_gva arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xdb283526 __tracepoint_kvm_nested_intercepts arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7fdd6d64 udp_pre_connect vmlinux EXPORT_SYMBOL +0xad19f930 call_fib_notifier vmlinux EXPORT_SYMBOL +0x12611439 sk_page_frag_refill vmlinux EXPORT_SYMBOL +0x9d9419ac __phy_write_mmd vmlinux EXPORT_SYMBOL +0xc8a0f57f user_revoke vmlinux EXPORT_SYMBOL +0x6613eca9 debugfs_create_dir vmlinux EXPORT_SYMBOL_GPL +0x89eb2c9f dcache_dir_lseek vmlinux EXPORT_SYMBOL +0x5d114123 timer_reduce vmlinux EXPORT_SYMBOL +0x651a4139 test_taint vmlinux EXPORT_SYMBOL +0x9c649ea2 rdma_is_consumer_reject drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xa5eef37d mlx5_core_alloc_transport_domain drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0c87ad70 vlan_filter_drop_vids vmlinux EXPORT_SYMBOL +0x6dbcf143 netdev_printk vmlinux EXPORT_SYMBOL +0x9c5a8056 thermal_zone_device_register vmlinux EXPORT_SYMBOL_GPL +0x092e26bf acpi_remove_address_space_handler vmlinux EXPORT_SYMBOL +0x17f54263 raid6_gfexp vmlinux EXPORT_SYMBOL +0xf5a691cd invalidate_bh_lrus vmlinux EXPORT_SYMBOL_GPL +0x33e8c941 vfs_iter_read vmlinux EXPORT_SYMBOL +0xa6185773 unregister_trace_event vmlinux EXPORT_SYMBOL_GPL +0x99b3b2bb ipcomp_init_state net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0x90dd4390 nf_nat_helper_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x7ce07178 mlx4_bond drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xed14d4d2 drm_syncobj_find_fence drivers/gpu/drm/drm EXPORT_SYMBOL +0x527e3205 drm_mode_match drivers/gpu/drm/drm EXPORT_SYMBOL +0xc1bf06df drm_atomic_helper_check_planes drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xbe057810 xt_check_match vmlinux EXPORT_SYMBOL_GPL +0x32c3cb4e class_compat_register vmlinux EXPORT_SYMBOL_GPL +0x544b0c11 acpi_lid_notifier_register vmlinux EXPORT_SYMBOL +0x973fa82e register_acpi_notifier vmlinux EXPORT_SYMBOL +0x644d70b0 blk_mq_freeze_queue_wait vmlinux EXPORT_SYMBOL_GPL +0x7eb3e9e2 put_fs_context vmlinux EXPORT_SYMBOL +0xfe402f90 add_to_pipe vmlinux EXPORT_SYMBOL +0xfc008e2b d_splice_alias vmlinux EXPORT_SYMBOL +0xc5d789df alarm_expires_remaining vmlinux EXPORT_SYMBOL_GPL +0x0366307a console_suspend_enabled vmlinux EXPORT_SYMBOL +0x5b8fcf69 node_to_amd_nb vmlinux EXPORT_SYMBOL_GPL +0x5b8ebc3c svc_reg_xprt_class net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x042de440 nf_reject_iphdr_put net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x11cebb0b pps_unregister_source drivers/pps/pps_core EXPORT_SYMBOL +0x05a7f62e nvmf_register_transport drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x871ffd42 drm_mm_replace_node drivers/gpu/drm/drm EXPORT_SYMBOL +0xb1f29d1a gfn_to_pfn_memslot_atomic arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x5721865f kobject_rename vmlinux EXPORT_SYMBOL_GPL +0x637751ce devm_alloc_etherdev_mqs vmlinux EXPORT_SYMBOL +0x5bf5dc14 intel_svm_is_pasid_valid vmlinux EXPORT_SYMBOL_GPL +0x43f81957 clk_round_rate vmlinux EXPORT_SYMBOL_GPL +0x9b72478f acpi_unload_parent_table vmlinux EXPORT_SYMBOL +0x47fdf4c4 acpi_subsys_suspend_late vmlinux EXPORT_SYMBOL_GPL +0x35ed52ea param_ops_charp vmlinux EXPORT_SYMBOL +0x1c6d2b7f l2tp_session_get net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x4da990ea rpc_count_iostats_metrics net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0fad595a edac_mod_work drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x48fa6ddd drm_mode_config_helper_resume drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd7ba575e dlm_errmsg fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xad5737fc efivar_init vmlinux EXPORT_SYMBOL_GPL +0x79624f41 dma_resv_copy_fences vmlinux EXPORT_SYMBOL +0x87bed5bd __inode_add_bytes vmlinux EXPORT_SYMBOL +0xd027f605 cad_pid vmlinux EXPORT_SYMBOL +0x1bf1919a bcm_phy_get_stats drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0xdee3da54 drm_scdc_read drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb6ebf62a o2nm_this_node fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x8b39d66b inet_twsk_deschedule_put vmlinux EXPORT_SYMBOL +0x84a5a7fb tc_setup_cb_replace vmlinux EXPORT_SYMBOL +0x1533890b phy_set_max_speed vmlinux EXPORT_SYMBOL +0x9d65320b ata_host_start vmlinux EXPORT_SYMBOL_GPL +0x30275bfb __tracepoint_dma_fence_enable_signal vmlinux EXPORT_SYMBOL +0x93fb56bb agp_put_bridge vmlinux EXPORT_SYMBOL +0x400a024b acpi_scan_lock_release vmlinux EXPORT_SYMBOL_GPL +0x6a6e05bf kstrtou8 vmlinux EXPORT_SYMBOL +0xa16db553 blk_rq_map_integrity_sg vmlinux EXPORT_SYMBOL +0x794b7271 orderly_reboot vmlinux EXPORT_SYMBOL_GPL +0x2d3193ec get_task_pid vmlinux EXPORT_SYMBOL_GPL +0xda72a7ec ZSTD_nextInputType lib/zstd/zstd_decompress EXPORT_SYMBOL +0x7d7edfb3 ceph_auth_invalidate_authorizer net/ceph/libceph EXPORT_SYMBOL +0xbec006f6 mlxsw_pci_driver_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_pci EXPORT_SYMBOL +0x2abe4306 device_get_child_node_count vmlinux EXPORT_SYMBOL_GPL +0x3d7a5470 serial8250_rx_chars vmlinux EXPORT_SYMBOL_GPL +0x5a7bfe41 crypto_probing_notify vmlinux EXPORT_SYMBOL_GPL +0xf738d1be register_blocking_lsm_notifier vmlinux EXPORT_SYMBOL +0x01158e0a __cgroup_bpf_run_filter_setsockopt vmlinux EXPORT_SYMBOL +0xfc9adce3 tls_unregister_device net/tls/tls EXPORT_SYMBOL +0x68bb373a ip6t_alloc_initial_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL_GPL +0xc0282722 mlx5_core_create_sq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd323451a mlx5_core_create_rq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x30568ba9 mlx5_put_uars_page drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x519b92f1 mlx4_gen_guid_change_eqe drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x1d013832 kvm_enable_efer_bits arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xdb22e255 __inet_twsk_schedule vmlinux EXPORT_SYMBOL_GPL +0x1e3255b7 xt_compat_match_to_user vmlinux EXPORT_SYMBOL_GPL +0x657b23d0 netif_receive_skb vmlinux EXPORT_SYMBOL +0x33fb867e ahci_platform_enable_phys vmlinux EXPORT_SYMBOL_GPL +0x5ec62549 ata_dev_classify vmlinux EXPORT_SYMBOL_GPL +0x8b4b01f1 pm_runtime_enable vmlinux EXPORT_SYMBOL_GPL +0x780fdfd1 intel_enable_gtt vmlinux EXPORT_SYMBOL +0x72614ae1 jbd2_journal_inode_ranged_write vmlinux EXPORT_SYMBOL +0x4573f292 dax_writeback_mapping_range vmlinux EXPORT_SYMBOL_GPL +0x7ea75c24 __wake_up_locked_key_bookmark vmlinux EXPORT_SYMBOL_GPL +0xfb6af58d recalc_sigpending vmlinux EXPORT_SYMBOL +0x397f6231 ip_set_free net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xee105bbd nf_nat_mangle_udp_packet net/netfilter/nf_nat EXPORT_SYMBOL +0xb51fbd64 edac_op_state drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x75fe065a vmci_send_datagram drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x41cae40a input_match_device_id vmlinux EXPORT_SYMBOL +0xc50d08b8 genphy_c45_read_pma vmlinux EXPORT_SYMBOL_GPL +0xb48bb0c8 genphy_c45_read_lpa vmlinux EXPORT_SYMBOL_GPL +0x5cb4498c ata_acpi_gtm_xfermask vmlinux EXPORT_SYMBOL_GPL +0x499f0ecf nd_sb_checksum vmlinux EXPORT_SYMBOL +0x08c80867 serial8250_em485_init vmlinux EXPORT_SYMBOL_GPL +0x7f075fb6 pci_user_read_config_dword vmlinux EXPORT_SYMBOL_GPL +0xb3f548ad kmemdup_nul vmlinux EXPORT_SYMBOL +0xe6e40502 rcu_get_gp_seq vmlinux EXPORT_SYMBOL_GPL +0x95286aa1 __tracepoint_bcache_invalidate drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x3f5e6945 ptp_clock_index drivers/ptp/ptp EXPORT_SYMBOL +0x68cdace7 mlx5_create_auto_grouped_flow_table drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x5022077d mlx5_nic_vport_disable_roce drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x2f78e290 mlx5_core_xrcd_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5be9adf1 mlx4_flow_attach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x959f4b42 kvm_no_apic_vcpu arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xcbd4898c fortify_panic vmlinux EXPORT_SYMBOL +0xada38766 dst_cache_destroy vmlinux EXPORT_SYMBOL_GPL +0xb1425b32 dm_table_add_target_callbacks vmlinux EXPORT_SYMBOL_GPL +0x55b5c24a iscsi_tcp_recv_segment_is_hdr vmlinux EXPORT_SYMBOL_GPL +0x292c6e9e iscsi_itt_to_ctask vmlinux EXPORT_SYMBOL_GPL +0xcf72150a nvdimm_bus_add_badrange vmlinux EXPORT_SYMBOL_GPL +0x8746f069 anon_transport_class_register vmlinux EXPORT_SYMBOL_GPL +0x46f2794f virtqueue_get_used_addr vmlinux EXPORT_SYMBOL_GPL +0xb3e76377 devm_phy_create vmlinux EXPORT_SYMBOL_GPL +0x258d2f76 net_dim_get_tx_moderation vmlinux EXPORT_SYMBOL +0x99d472b1 net_dim_get_rx_moderation vmlinux EXPORT_SYMBOL +0xdce70752 disk_part_iter_exit vmlinux EXPORT_SYMBOL_GPL +0xe9d26bc5 __tracepoint_block_bio_complete vmlinux EXPORT_SYMBOL_GPL +0x34407691 crypto_has_ahash vmlinux EXPORT_SYMBOL_GPL +0x6eb9a17c sysfs_create_file_ns vmlinux EXPORT_SYMBOL_GPL +0x534239fd follow_down_one vmlinux EXPORT_SYMBOL +0x7944e0fc tracing_off vmlinux EXPORT_SYMBOL_GPL +0xc9216a82 recalibrate_cpu_khz vmlinux EXPORT_SYMBOL +0x50ef5170 rpcauth_init_credcache net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x48779364 vhost_vq_init_access drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x156847e8 raw_seq_next vmlinux EXPORT_SYMBOL_GPL +0xfb8ba3bc ip_getsockopt vmlinux EXPORT_SYMBOL +0x6b298699 ip_setsockopt vmlinux EXPORT_SYMBOL +0x9a1a96b9 input_ff_create_memless vmlinux EXPORT_SYMBOL_GPL +0xa1bedd72 amd_iommu_pc_get_max_counters vmlinux EXPORT_SYMBOL +0x5b126902 tty_encode_baud_rate vmlinux EXPORT_SYMBOL_GPL +0x985268a5 clk_hw_set_rate_range vmlinux EXPORT_SYMBOL_GPL +0x554407a2 devm_phy_destroy vmlinux EXPORT_SYMBOL_GPL +0xad913789 page_endio vmlinux EXPORT_SYMBOL_GPL +0x82d55176 xdr_shift_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe4e4f17d rdma_disconnect drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xbcd06dd4 i2c_bit_add_numbered_bus drivers/i2c/algos/i2c-algo-bit EXPORT_SYMBOL +0x1245c3dc mlx4_get_base_gid_ix drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x8d26e3ff drm_dp_aux_unregister drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5fd6510c ip6_redirect vmlinux EXPORT_SYMBOL_GPL +0x6d4611fc ip_options_rcv_srr vmlinux EXPORT_SYMBOL +0xddd0407d ata_sas_tport_delete vmlinux EXPORT_SYMBOL_GPL +0xaa751518 device_register vmlinux EXPORT_SYMBOL_GPL +0x049ced27 pnp_request_card_device vmlinux EXPORT_SYMBOL +0x034617a2 acpi_device_uevent_modalias vmlinux EXPORT_SYMBOL_GPL +0xc7c1107a LZ4_decompress_safe vmlinux EXPORT_SYMBOL +0xbefa51a3 gen_pool_add_owner vmlinux EXPORT_SYMBOL +0x3c9bcfaa pcim_iomap_regions_request_all vmlinux EXPORT_SYMBOL +0x334be7df blk_mq_rq_cpu vmlinux EXPORT_SYMBOL +0xfcebacbd dentry_open vmlinux EXPORT_SYMBOL +0x98dc5040 __SetPageMovable vmlinux EXPORT_SYMBOL +0x8deb69c7 freq_qos_add_notifier vmlinux EXPORT_SYMBOL_GPL +0xd1a3da0e mlx5_cmd_exec drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xae46343a cxgb4_write_sgl drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x0a72f765 drm_clflush_virt_range drivers/gpu/drm/drm EXPORT_SYMBOL +0x30e1554d unregister_netdevice_queue vmlinux EXPORT_SYMBOL +0x0725f311 skb_checksum_help vmlinux EXPORT_SYMBOL +0x8c89e3b8 usb_phy_roothub_power_off vmlinux EXPORT_SYMBOL_GPL +0x97bdfa60 scsi_dev_info_remove_list vmlinux EXPORT_SYMBOL +0xc14dc168 acpi_get_data vmlinux EXPORT_SYMBOL +0x9176145b acpi_install_global_event_handler vmlinux EXPORT_SYMBOL +0xcdc39c9e security_ismaclabel vmlinux EXPORT_SYMBOL +0x1d07e365 memdup_user_nul vmlinux EXPORT_SYMBOL +0x9545af6d tasklet_init vmlinux EXPORT_SYMBOL +0x271c7f84 rpc_sleep_on_priority net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6670c367 nf_nat_setup_info net/netfilter/nf_nat EXPORT_SYMBOL +0xccc9a813 ib_dealloc_fmr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x64460433 mlx4_bf_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x04ef64bb bcm_phy_downshift_set drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x99dda420 bcm_phy_downshift_get drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x8f996a30 ethtool_convert_legacy_u32_to_link_mode vmlinux EXPORT_SYMBOL +0xf553318d cpuidle_pause_and_lock vmlinux EXPORT_SYMBOL_GPL +0x3f53c0f9 ata_sff_hsm_move vmlinux EXPORT_SYMBOL_GPL +0xf9e0ad8e sata_link_debounce vmlinux EXPORT_SYMBOL_GPL +0x42e94963 virtqueue_enable_cb vmlinux EXPORT_SYMBOL_GPL +0x9d1cff83 jbd2_journal_get_undo_access vmlinux EXPORT_SYMBOL +0xf1ccaa4c padata_unregister_cpumask_notifier vmlinux EXPORT_SYMBOL +0xcb970751 stop_machine vmlinux EXPORT_SYMBOL_GPL +0xb6261484 register_die_notifier vmlinux EXPORT_SYMBOL_GPL +0x6034cd7f nf_ct_unlink_expect_report net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe0b6028a ip6_find_1stfragopt vmlinux EXPORT_SYMBOL +0x5deee757 __tcf_idr_release vmlinux EXPORT_SYMBOL +0x71768a5f sock_no_recvmsg vmlinux EXPORT_SYMBOL +0xca5aec4c sk_ns_capable vmlinux EXPORT_SYMBOL +0x5f2594f2 fb_is_primary_device vmlinux EXPORT_SYMBOL +0xe6024e59 dm_bufio_release vmlinux EXPORT_SYMBOL_GPL +0x1448f8bc ata_do_set_mode vmlinux EXPORT_SYMBOL_GPL +0xa0c6befa hrtimer_cancel vmlinux EXPORT_SYMBOL_GPL +0xf504a0dd ib_set_vf_guid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe2ba709c fuse_kill_sb_anon fs/fuse/fuse EXPORT_SYMBOL_GPL +0x28411ed7 kvm_max_tsc_scaling_ratio arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xab63baa5 unregister_inetaddr_validator_notifier vmlinux EXPORT_SYMBOL +0x3002e8e6 ip_frag_next vmlinux EXPORT_SYMBOL +0xe40118c0 xt_unregister_matches vmlinux EXPORT_SYMBOL +0x759248fa netlink_net_capable vmlinux EXPORT_SYMBOL +0x234cf416 devlink_fmsg_string_pair_put vmlinux EXPORT_SYMBOL_GPL +0x54ddd0d7 mdio_driver_unregister vmlinux EXPORT_SYMBOL +0x4b51f74c ata_xfer_mode2mask vmlinux EXPORT_SYMBOL_GPL +0x4e0d234d nvme_start_freeze vmlinux EXPORT_SYMBOL_GPL +0x1343101f component_unbind_all vmlinux EXPORT_SYMBOL_GPL +0x124bad4d kstrtobool vmlinux EXPORT_SYMBOL +0x5ebfb748 security_inode_setattr vmlinux EXPORT_SYMBOL_GPL +0x9378f18c dquot_file_open vmlinux EXPORT_SYMBOL +0x8f786bee fs_umode_to_dtype vmlinux EXPORT_SYMBOL_GPL +0x84264ced fs_umode_to_ftype vmlinux EXPORT_SYMBOL_GPL +0x605790dc fiemap_fill_next_extent vmlinux EXPORT_SYMBOL +0xb97a9781 trace_event_ignore_this_pid vmlinux EXPORT_SYMBOL_GPL +0x88295b96 dm_tm_unlock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x48e323be dm_bm_unlock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x629d661b mlx4_uar_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x3cb86a78 drm_modeset_acquire_fini drivers/gpu/drm/drm EXPORT_SYMBOL +0xadaaa877 kvm_set_msr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x03751cc2 kvm_get_msr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xcd26968c kvm_set_xcr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xc631dc37 glue_ecb_req_128bit arch/x86/crypto/glue_helper EXPORT_SYMBOL_GPL +0xb9af1d0d __xa_clear_mark vmlinux EXPORT_SYMBOL +0x8075479d qdisc_watchdog_cancel vmlinux EXPORT_SYMBOL +0x04c16526 dev_queue_xmit_accel vmlinux EXPORT_SYMBOL +0x9c788fbe devm_device_remove_groups vmlinux EXPORT_SYMBOL_GPL +0x911a7335 t10_pi_type1_crc vmlinux EXPORT_SYMBOL +0x730a66c6 crypto_shash_digest vmlinux EXPORT_SYMBOL_GPL +0x4412d723 crypto_ahash_digest vmlinux EXPORT_SYMBOL_GPL +0x3de9cae1 crypto_remove_final vmlinux EXPORT_SYMBOL_GPL +0xbba2d992 get_user_pages_unlocked vmlinux EXPORT_SYMBOL +0x6dc0cb19 svc_authenticate net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3f51e95a mlx4_put_slave_node_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xfedcd40d drm_atomic_helper_wait_for_vblanks drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x8acaa172 nfs_file_operations fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb7dacd1c kvm_clear_dirty_log_protect arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x3dde3b67 rtnl_set_sk_err vmlinux EXPORT_SYMBOL +0x22143c9b __sock_queue_rcv_skb vmlinux EXPORT_SYMBOL +0x203efa8c kernel_sendpage_locked vmlinux EXPORT_SYMBOL +0xc21b1e24 usb_hcd_pci_remove vmlinux EXPORT_SYMBOL_GPL +0x58bd3f26 phy_device_remove vmlinux EXPORT_SYMBOL +0x9e965eae ufshcd_dump_regs vmlinux EXPORT_SYMBOL_GPL +0x1f736e57 iscsi_update_cmdsn vmlinux EXPORT_SYMBOL_GPL +0x15eb5266 class_remove_file_ns vmlinux EXPORT_SYMBOL_GPL +0x5ff9eb0e lockref_mark_dead vmlinux EXPORT_SYMBOL +0x03b0999c irq_domain_free_fwnode vmlinux EXPORT_SYMBOL_GPL +0x32ae5741 _raw_read_lock vmlinux EXPORT_SYMBOL +0x18888d00 downgrade_write vmlinux EXPORT_SYMBOL +0x29361773 complete vmlinux EXPORT_SYMBOL +0x59bbf145 pv_info vmlinux EXPORT_SYMBOL_GPL +0xda910811 cxgbi_device_find_by_netdev_rcu drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xad8a21c1 ip_check_defrag vmlinux EXPORT_SYMBOL +0xe40bb23e devlink_health_reporter_priv vmlinux EXPORT_SYMBOL_GPL +0xbc3bdc7f flow_get_u32_src vmlinux EXPORT_SYMBOL +0x48cef84c fc_fcp_destroy vmlinux EXPORT_SYMBOL +0x4641eec2 iscsi_block_session vmlinux EXPORT_SYMBOL_GPL +0xbc7a6a18 ata_bmdma_start vmlinux EXPORT_SYMBOL_GPL +0x91e318c5 ata_port_freeze vmlinux EXPORT_SYMBOL_GPL +0xcde1b7ec mipi_dsi_dcs_read vmlinux EXPORT_SYMBOL +0x85b70770 iov_iter_single_seg_count vmlinux EXPORT_SYMBOL +0xf18ae60f config_item_init_type_name vmlinux EXPORT_SYMBOL +0x989e0626 adjust_managed_page_count vmlinux EXPORT_SYMBOL +0x9bb6fd09 vsock_connected_table net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xb0b17e3b l2tp_xmit_skb net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x93a25a1e mlx5_dm_sw_icm_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb8777934 mlx5_query_port_tc_group drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x34d8f16a sync_page_io vmlinux EXPORT_SYMBOL_GPL +0x710822a7 usb_acpi_power_manageable vmlinux EXPORT_SYMBOL_GPL +0xb24d84bd scsi_test_unit_ready vmlinux EXPORT_SYMBOL +0x33514bd9 sata_link_resume vmlinux EXPORT_SYMBOL_GPL +0x0c769533 serial8250_read_char vmlinux EXPORT_SYMBOL_GPL +0x8a7d1c31 high_memory vmlinux EXPORT_SYMBOL +0xa24416da perf_pmu_register vmlinux EXPORT_SYMBOL_GPL +0xbb13595e smp_call_function_many vmlinux EXPORT_SYMBOL +0x326425ca pci_unmap_biosrom vmlinux EXPORT_SYMBOL +0x23acad23 ceph_monc_blacklist_add net/ceph/libceph EXPORT_SYMBOL +0xb34e2230 xprt_reconnect_backoff net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc51650ff drm_dp_mst_port_has_audio drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x89b2edf6 inet6_csk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x52bc48c7 iscsi_itt_to_task vmlinux EXPORT_SYMBOL_GPL +0x58ec4992 clk_hw_get_rate vmlinux EXPORT_SYMBOL_GPL +0xf29403e5 acpi_install_table_handler vmlinux EXPORT_SYMBOL +0x0484c6c4 acpi_enter_sleep_state_prep vmlinux EXPORT_SYMBOL +0xb46e24ed simple_link vmlinux EXPORT_SYMBOL +0x203156dc vfs_whiteout vmlinux EXPORT_SYMBOL +0xc46de994 ceph_osdc_notify net/ceph/libceph EXPORT_SYMBOL +0xe9674415 ib_create_fmr_pool drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc73cda3c drm_vma_offset_manager_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xe3098282 kvm_find_cpuid_entry arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x86406a29 hidinput_find_field vmlinux EXPORT_SYMBOL_GPL +0xd7495211 scsi_remove_target vmlinux EXPORT_SYMBOL +0xd16ec7fd nvdimm_clear_poison vmlinux EXPORT_SYMBOL_GPL +0xb14ab1ef hdmi_audio_infoframe_init vmlinux EXPORT_SYMBOL +0x760e1539 _copy_to_iter_mcsafe vmlinux EXPORT_SYMBOL_GPL +0xfd29f864 find_asymmetric_key vmlinux EXPORT_SYMBOL_GPL +0xc93476c7 fscrypt_zeroout_range vmlinux EXPORT_SYMBOL +0xb7b505e8 page_cache_async_readahead vmlinux EXPORT_SYMBOL_GPL +0x1953c958 mempool_create vmlinux EXPORT_SYMBOL +0x7d937590 padata_alloc_possible vmlinux EXPORT_SYMBOL +0x57231f45 ring_buffer_record_on vmlinux EXPORT_SYMBOL_GPL +0x95a52abd dm_bm_is_read_only drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xf395a3da mlx5_set_port_pause drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x301ae939 bcm_phy_r_rc_cal_reset drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x9195b921 drm_cvt_mode drivers/gpu/drm/drm EXPORT_SYMBOL +0x7c9f4d51 crypto_transfer_skcipher_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0xeec39983 nfs4_init_ds_session fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x54b86d07 tcf_get_next_chain vmlinux EXPORT_SYMBOL +0xd7399d2a efivar_entry_iter_end vmlinux EXPORT_SYMBOL_GPL +0x0e6883d2 phy_init_eee vmlinux EXPORT_SYMBOL +0xe437bc90 ahci_platform_enable_resources vmlinux EXPORT_SYMBOL_GPL +0x0272b4e9 clkdev_drop vmlinux EXPORT_SYMBOL +0xadb7e079 kblockd_schedule_work vmlinux EXPORT_SYMBOL +0xa94a09bb mem_section vmlinux EXPORT_SYMBOL +0x379720b4 send_sig_info vmlinux EXPORT_SYMBOL +0xedc03953 iounmap vmlinux EXPORT_SYMBOL +0x17e67cf7 cache_seq_next_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0529737e xprt_release_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x242f3875 nf_nat_ipv4_register_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x77584dfd i2c_parse_fw_timings drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x07b84d7e mlx4_qp_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x1fb8d0ca drm_bridge_mode_fixup drivers/gpu/drm/drm EXPORT_SYMBOL +0xc355c06c nfs_file_mmap fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd4d69a75 __neigh_event_send vmlinux EXPORT_SYMBOL +0x4fd09d31 sk_stream_wait_close vmlinux EXPORT_SYMBOL +0x7216d6c4 skb_mpls_dec_ttl vmlinux EXPORT_SYMBOL_GPL +0x49ead4f1 mptscsih_get_scsi_lookup vmlinux EXPORT_SYMBOL +0x66ff429c dev_pm_qos_hide_latency_tolerance vmlinux EXPORT_SYMBOL_GPL +0x5f7bb552 show_class_attr_string vmlinux EXPORT_SYMBOL_GPL +0x43ad1cb4 clk_hw_unregister_gate vmlinux EXPORT_SYMBOL_GPL +0x151f4898 schedule_timeout_uninterruptible vmlinux EXPORT_SYMBOL +0x91fcdabf ceph_file_layout_from_legacy net/ceph/libceph EXPORT_SYMBOL +0x2f77c1f2 ceph_client_gid net/ceph/libceph EXPORT_SYMBOL +0xc0f7f6a2 vhost_add_used drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x4b779b98 drm_connector_unregister drivers/gpu/drm/drm EXPORT_SYMBOL +0x808a499e kvm_set_msi_irq arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf279060f kvm_load_guest_xcr0 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xfc4ad315 fqdir_exit vmlinux EXPORT_SYMBOL +0x787da6be ndo_dflt_fdb_add vmlinux EXPORT_SYMBOL +0x338aa564 pskb_extract vmlinux EXPORT_SYMBOL +0x29dc44ca ir_raw_event_set_idle vmlinux EXPORT_SYMBOL_GPL +0x9b8b2496 fc_fabric_logoff vmlinux EXPORT_SYMBOL +0xbd65443d sas_remove_host vmlinux EXPORT_SYMBOL +0x8ff21520 ata_host_register vmlinux EXPORT_SYMBOL_GPL +0x60032f74 platform_get_irq vmlinux EXPORT_SYMBOL_GPL +0x476af885 blk_mq_unquiesce_queue vmlinux EXPORT_SYMBOL_GPL +0x602b2757 crypto_register_rng vmlinux EXPORT_SYMBOL_GPL +0x91df870b kill_anon_super vmlinux EXPORT_SYMBOL +0xb88dbfce irq_set_irqchip_state vmlinux EXPORT_SYMBOL_GPL +0x1d222ced irq_get_irqchip_state vmlinux EXPORT_SYMBOL_GPL +0x0c805f93 clflush_cache_range vmlinux EXPORT_SYMBOL_GPL +0xff7a73b4 ppp_output_wakeup drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xf46cbbe8 xfrm6_rcv vmlinux EXPORT_SYMBOL +0xd30821e9 xfrm4_rcv vmlinux EXPORT_SYMBOL +0xc0fc03b4 ata_msleep vmlinux EXPORT_SYMBOL_GPL +0xd0db0f12 run_dax vmlinux EXPORT_SYMBOL_GPL +0xa444b705 virtqueue_add_inbuf_ctx vmlinux EXPORT_SYMBOL_GPL +0x1c923f2d acpi_subsys_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0x8361f6bf pci_enable_pcie_error_reporting vmlinux EXPORT_SYMBOL_GPL +0x12dbc8f6 percpu_ref_switch_to_atomic_sync vmlinux EXPORT_SYMBOL_GPL +0x0c153fb0 _copy_from_iter_full vmlinux EXPORT_SYMBOL +0xf4b9024d request_key_rcu vmlinux EXPORT_SYMBOL +0xc108e7e5 request_key_tag vmlinux EXPORT_SYMBOL +0xd2741e9c kthread_mod_delayed_work vmlinux EXPORT_SYMBOL_GPL +0xdb16b170 topology_phys_to_logical_pkg vmlinux EXPORT_SYMBOL +0xd746a361 xdr_buf_subsegment net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x14292921 nf_tables_bind_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x3c46d138 cxgbi_device_register drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x34b7a40b drm_legacy_ioremap drivers/gpu/drm/drm EXPORT_SYMBOL +0x1442bfca drm_master_internal_acquire drivers/gpu/drm/drm EXPORT_SYMBOL +0x862d3154 nfs_unlink fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3c5f2903 fscache_init_cache fs/fscache/fscache EXPORT_SYMBOL +0x4dca08ee sync_file_get_fence vmlinux EXPORT_SYMBOL +0x330053e1 bio_integrity_trim vmlinux EXPORT_SYMBOL +0x21692cd9 bio_integrity_prep vmlinux EXPORT_SYMBOL +0x63bffcb2 async_raid6_2data_recov vmlinux EXPORT_SYMBOL_GPL +0xff4447f7 nf_nat_inet_unregister_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xa85a3e6d xa_load vmlinux EXPORT_SYMBOL +0xcc939af7 inet_dgram_connect vmlinux EXPORT_SYMBOL +0x69edaf70 usb_find_alt_setting vmlinux EXPORT_SYMBOL_GPL +0xfe693c8e phy_connect vmlinux EXPORT_SYMBOL +0x7941e42b dev_pm_disable_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x7be6d6f5 device_connection_find_match vmlinux EXPORT_SYMBOL_GPL +0x0489e4d9 driver_unregister vmlinux EXPORT_SYMBOL_GPL +0x1adb5626 virtqueue_get_buf vmlinux EXPORT_SYMBOL_GPL +0x4336fcca ucs2_as_utf8 vmlinux EXPORT_SYMBOL +0xb68dfac4 key_type_encrypted vmlinux EXPORT_SYMBOL_GPL +0x00c060f5 set_cached_acl vmlinux EXPORT_SYMBOL +0x29ff3a96 generic_pipe_buf_steal vmlinux EXPORT_SYMBOL +0x6a421062 memory_failure_queue vmlinux EXPORT_SYMBOL_GPL +0xe582bc1d param_ops_ulong vmlinux EXPORT_SYMBOL +0xe9ce931a kvm_para_available vmlinux EXPORT_SYMBOL_GPL +0x1e40195e vsock_insert_connected net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xb4c42fba mlx5_fpga_sbu_conn_sendmsg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6a52c2f7 drm_i2c_encoder_commit drivers/gpu/drm/drm EXPORT_SYMBOL +0xa392b92e nfs_kill_super fs/nfs/nfs EXPORT_SYMBOL_GPL +0x2d6576ff nfs_fill_super fs/nfs/nfs EXPORT_SYMBOL_GPL +0x29711243 kobject_put vmlinux EXPORT_SYMBOL +0x2d059664 kobject_get vmlinux EXPORT_SYMBOL +0x8c37dff7 fcoe_ctlr_recv vmlinux EXPORT_SYMBOL +0x3c094775 iscsi_unblock_session vmlinux EXPORT_SYMBOL_GPL +0x28159fe7 tty_buffer_set_limit vmlinux EXPORT_SYMBOL_GPL +0x603d0d51 acpi_os_map_iomem vmlinux EXPORT_SYMBOL_GPL +0x5b99e4ab relay_open vmlinux EXPORT_SYMBOL_GPL +0x3d54a832 param_set_copystring vmlinux EXPORT_SYMBOL +0x37c41e43 nf_synproxy_ipv4_init net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x867914c0 drm_sysfs_connector_status_event drivers/gpu/drm/drm EXPORT_SYMBOL +0xffcd7f49 iosf_mbi_punit_acquire arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x3101b8c4 br_vlan_enabled vmlinux EXPORT_SYMBOL_GPL +0xc9634df9 in6addr_linklocal_allrouters vmlinux EXPORT_SYMBOL +0xaef63a3f reuseport_select_sock vmlinux EXPORT_SYMBOL +0x807766ea usb_scuttle_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0xd3ea3541 vchan_tx_submit vmlinux EXPORT_SYMBOL_GPL +0xfc39e32f ioport_unmap vmlinux EXPORT_SYMBOL +0x58b73bc7 match_wildcard vmlinux EXPORT_SYMBOL +0x975519c1 asymmetric_key_id_same vmlinux EXPORT_SYMBOL_GPL +0x43dbe6b2 relay_close vmlinux EXPORT_SYMBOL_GPL +0xf4ceea11 nf_l4proto_log_invalid net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x1b54bd5d ib_copy_path_rec_from_user drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xfd80550d mlx4_is_eq_vector_valid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x21271fd0 copy_user_enhanced_fast_string vmlinux EXPORT_SYMBOL +0xd6ea71e4 net_ns_get_ownership vmlinux EXPORT_SYMBOL_GPL +0xd991e3b9 dm_bufio_get_device_size vmlinux EXPORT_SYMBOL_GPL +0x0dc51797 usb_reset_configuration vmlinux EXPORT_SYMBOL_GPL +0x4a943e0f fc_exch_mgr_free vmlinux EXPORT_SYMBOL +0x147cc537 sysfs_merge_group vmlinux EXPORT_SYMBOL_GPL +0x2bb6099e dq_data_lock vmlinux EXPORT_SYMBOL +0xc84a0a7e seq_hlist_start_rcu vmlinux EXPORT_SYMBOL +0x5f36ca15 __get_user_pages_fast vmlinux EXPORT_SYMBOL_GPL +0xe9ffc063 down_trylock vmlinux EXPORT_SYMBOL +0x73dd8e8a ceph_pg_to_acting_primary net/ceph/libceph EXPORT_SYMBOL +0xe3ad835a rpc_clnt_setup_test_and_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xda08b417 ib_cm_listen drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x45de4e5d genl_register_family vmlinux EXPORT_SYMBOL +0x2db795f5 phy_suspend vmlinux EXPORT_SYMBOL +0xf3e6402e __bitmap_equal vmlinux EXPORT_SYMBOL +0x054f2271 af_alg_pull_tsgl vmlinux EXPORT_SYMBOL_GPL +0x3b9038b3 page_zero_new_buffers vmlinux EXPORT_SYMBOL +0xf1b393ac mntget vmlinux EXPORT_SYMBOL +0x3686a63f mntput vmlinux EXPORT_SYMBOL +0x8d1f649e fasync_helper vmlinux EXPORT_SYMBOL +0x9b9071cb get_old_itimerspec32 vmlinux EXPORT_SYMBOL_GPL +0x7b4da6ff __init_rwsem vmlinux EXPORT_SYMBOL +0x91f82324 nft_register_flowtable_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x6e3ff83a edac_device_alloc_index drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x41efa479 sbc_get_device_type drivers/target/target_core_mod EXPORT_SYMBOL +0xf26542f8 ipvlan_link_new drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0x2a62b786 mlx4_fmr_enable drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xed3b6bc3 drm_mode_is_420_also drivers/gpu/drm/drm EXPORT_SYMBOL +0xa0e6c936 drm_gem_unlock_reservations drivers/gpu/drm/drm EXPORT_SYMBOL +0xbd3e7542 cast_s1 crypto/cast_common EXPORT_SYMBOL_GPL +0x6211f393 pnfs_ld_write_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x9fb49f5a nvmem_device_cell_read vmlinux EXPORT_SYMBOL_GPL +0xbb0988f7 devm_devfreq_register_notifier vmlinux EXPORT_SYMBOL +0x502108d0 mpt_fwfault_debug vmlinux EXPORT_SYMBOL +0xcdd45c85 phy_resolve_aneg_linkmode vmlinux EXPORT_SYMBOL_GPL +0xefe0ad47 iommu_domain_free vmlinux EXPORT_SYMBOL_GPL +0x69cb62b2 tpm_pcr_read vmlinux EXPORT_SYMBOL_GPL +0xfe69325f percpu_ref_resurrect vmlinux EXPORT_SYMBOL_GPL +0x4977952b blk_mq_flush_busy_ctxs vmlinux EXPORT_SYMBOL_GPL +0x2f4113a2 dcookie_register vmlinux EXPORT_SYMBOL_GPL +0x3fef6190 locks_mandatory_area vmlinux EXPORT_SYMBOL +0x47d27c78 nft_reject_init net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x02dfd3d0 mlxsw_afk_key_info_block_encoding_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x51a19837 mlx4_counter_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xea245d4b nfs_request_add_commit_list_locked fs/nfs/nfs EXPORT_SYMBOL_GPL +0x2e1fda4f xt_table_unlock vmlinux EXPORT_SYMBOL_GPL +0x35d0e354 tcf_exts_dump_stats vmlinux EXPORT_SYMBOL +0x38f585ee sock_alloc_send_pskb vmlinux EXPORT_SYMBOL +0xce9063a2 xhci_resume vmlinux EXPORT_SYMBOL_GPL +0x83e7fff1 iscsi_tcp_recv_skb vmlinux EXPORT_SYMBOL_GPL +0xe25af232 sas_port_add vmlinux EXPORT_SYMBOL +0x0a2f0637 alloc_iova vmlinux EXPORT_SYMBOL_GPL +0xaad0ae78 __bitmap_shift_right vmlinux EXPORT_SYMBOL +0x9bfea1c8 exportfs_encode_inode_fh vmlinux EXPORT_SYMBOL_GPL +0xaaa918c9 ftrace_dump vmlinux EXPORT_SYMBOL_GPL +0xed1bcb5d alarm_init vmlinux EXPORT_SYMBOL_GPL +0xa24f23d8 __request_module vmlinux EXPORT_SYMBOL +0xa63ada76 ib_device_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd9916c3a idr_alloc_u32 vmlinux EXPORT_SYMBOL_GPL +0x063c5a42 fib_nexthop_info vmlinux EXPORT_SYMBOL_GPL +0xf22d11b1 ip_valid_fib_dump_req vmlinux EXPORT_SYMBOL_GPL +0x0ad517e6 inet_register_protosw vmlinux EXPORT_SYMBOL +0x0e78c05e __inet_inherit_port vmlinux EXPORT_SYMBOL_GPL +0x65e4ab4f dev_deactivate vmlinux EXPORT_SYMBOL +0x53e58baf usb_asmedia_modifyflowcontrol vmlinux EXPORT_SYMBOL_GPL +0x49a8db1c phy_find_first vmlinux EXPORT_SYMBOL +0x811811e0 fc_exch_done vmlinux EXPORT_SYMBOL +0x7f61a354 ata_sff_error_handler vmlinux EXPORT_SYMBOL_GPL +0x0d003853 pm_runtime_force_suspend vmlinux EXPORT_SYMBOL_GPL +0xf2a3237e blkdev_reread_part vmlinux EXPORT_SYMBOL +0x618dfe76 submit_bio_wait vmlinux EXPORT_SYMBOL +0x23fd3028 vmalloc_node vmlinux EXPORT_SYMBOL +0x9034a696 mempool_destroy vmlinux EXPORT_SYMBOL +0x054e550b kernel_halt vmlinux EXPORT_SYMBOL_GPL +0x0856e46a xfrm6_tunnel_spi_lookup net/ipv6/xfrm6_tunnel EXPORT_SYMBOL +0xd7df00bb iw_destroy_cm_id drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x4234bd7c vmci_qpair_dequev drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x7588d7b0 drm_fb_helper_set_suspend drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x78863d5c netpoll_cleanup vmlinux EXPORT_SYMBOL +0x6b0be15b scsi_ioctl vmlinux EXPORT_SYMBOL +0x506fcde6 amd_iommu_domain_enable_v2 vmlinux EXPORT_SYMBOL +0x36075bb5 iommu_group_register_notifier vmlinux EXPORT_SYMBOL_GPL +0xf99f77fd qtree_read_dquot vmlinux EXPORT_SYMBOL +0xf7106fbd set_binfmt vmlinux EXPORT_SYMBOL +0x579fde53 i2c_client_type drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0xbd04e2b2 sparse_keymap_entry_from_keycode drivers/input/sparse-keymap EXPORT_SYMBOL +0xe02abfbb drm_dp_downstream_max_bpc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xceab0311 strchrnul vmlinux EXPORT_SYMBOL +0xd254c160 xsk_umem_consume_tx_done vmlinux EXPORT_SYMBOL +0x1bf7f301 km_policy_expired vmlinux EXPORT_SYMBOL +0x9e4faeef dm_io_client_destroy vmlinux EXPORT_SYMBOL +0x045a0843 tpm_default_chip vmlinux EXPORT_SYMBOL_GPL +0x483b613a hvc_poll vmlinux EXPORT_SYMBOL_GPL +0x1d7d2667 pci_user_write_config_word vmlinux EXPORT_SYMBOL_GPL +0x6b85a095 pci_iomap_wc vmlinux EXPORT_SYMBOL_GPL +0x98d19a09 elv_rb_find vmlinux EXPORT_SYMBOL +0xca9b2fd3 padata_register_cpumask_notifier vmlinux EXPORT_SYMBOL +0x891a1736 __drm_atomic_helper_connector_destroy_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x56077551 fscache_object_mark_killed fs/fscache/fscache EXPORT_SYMBOL +0x7e20a57c flow_rule_match_basic vmlinux EXPORT_SYMBOL +0x4ea067be call_fib_notifiers vmlinux EXPORT_SYMBOL +0xcc2cb201 mptscsih_IssueTaskMgmt vmlinux EXPORT_SYMBOL +0x013806fe kstrdup_quotable_cmdline vmlinux EXPORT_SYMBOL_GPL +0x9829fc11 __kfifo_out_peek_r vmlinux EXPORT_SYMBOL +0x198620d7 security_add_mnt_opt vmlinux EXPORT_SYMBOL +0x21f18497 ib_cm_insert_listen drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x894502be target_send_busy drivers/target/target_core_mod EXPORT_SYMBOL +0xd7b8617f kvm_lapic_expired_hv_timer arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1272b16e kvm_vector_hashing_enabled arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x833aeb93 ip6_frag_next vmlinux EXPORT_SYMBOL +0x42fb1dc5 netlink_ns_capable vmlinux EXPORT_SYMBOL +0xc4212ab9 qdisc_class_hash_insert vmlinux EXPORT_SYMBOL +0x33e33429 eth_commit_mac_addr_change vmlinux EXPORT_SYMBOL +0xe5867808 dlci_ioctl_set vmlinux EXPORT_SYMBOL +0x2d828bb6 set_get_info_func vmlinux EXPORT_SYMBOL +0xdb287e3e pci_set_host_bridge_release vmlinux EXPORT_SYMBOL_GPL +0xafb864c1 refcount_dec_and_lock_irqsave vmlinux EXPORT_SYMBOL +0x7f4eb92c generic_perform_write vmlinux EXPORT_SYMBOL +0x1bdc700e __rpc_wait_for_completion_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xadad0e42 rdma_init_qp_attr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x323fa775 ib_redirect_mad_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5e6816b1 macvlan_common_setup drivers/net/macvlan EXPORT_SYMBOL_GPL +0xd17b4dac cxgbi_sock_established drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x054bef45 layoutstats_timer fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x52f0357d pnfs_read_resend_pnfs fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x7d0db3bf opens_in_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0x4140192a serpent_ecb_dec_8way_avx arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0x194b2841 serpent_ecb_enc_8way_avx arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0xf0da2a72 udp_set_csum vmlinux EXPORT_SYMBOL +0x9646e741 scsi_print_sense vmlinux EXPORT_SYMBOL +0xc90347f2 scsi_report_device_reset vmlinux EXPORT_SYMBOL +0xded5161f apei_get_debugfs_dir vmlinux EXPORT_SYMBOL_GPL +0x7f5a5878 devm_phy_optional_get vmlinux EXPORT_SYMBOL_GPL +0x30371ddd config_item_get_unless_zero vmlinux EXPORT_SYMBOL +0x69a50a16 locks_free_lock vmlinux EXPORT_SYMBOL +0x9f22c384 fixed_size_llseek vmlinux EXPORT_SYMBOL +0xa6d92207 ip_set_get_ip6_port net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x51f31fd8 ip_set_get_ip4_port net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x3e1bf1f6 core_alua_check_nonop_delay drivers/target/target_core_mod EXPORT_SYMBOL +0xa6841fb6 tun_ptr_to_xdp drivers/net/tun EXPORT_SYMBOL +0x0c91c0f2 drm_dev_register drivers/gpu/drm/drm EXPORT_SYMBOL +0x7acc1057 inet6_add_offload vmlinux EXPORT_SYMBOL +0x3b7b91a8 sg_miter_skip vmlinux EXPORT_SYMBOL +0x5fb4e9b7 simple_attr_release vmlinux EXPORT_SYMBOL_GPL +0x40a1e51a perf_aux_output_flag vmlinux EXPORT_SYMBOL_GPL +0xfe36a204 reuseport_detach_prog vmlinux EXPORT_SYMBOL +0x85a82e75 usb_submit_urb vmlinux EXPORT_SYMBOL_GPL +0xf076c77a mptscsih_bus_reset vmlinux EXPORT_SYMBOL +0x05676360 fcoe_ctlr_device_delete vmlinux EXPORT_SYMBOL_GPL +0xd16ef741 ata_link_abort vmlinux EXPORT_SYMBOL_GPL +0x3b169ab0 fwnode_get_next_available_child_node vmlinux EXPORT_SYMBOL_GPL +0x7ec78bdd rename_lock vmlinux EXPORT_SYMBOL +0xbed9fe41 page_symlink_inode_operations vmlinux EXPORT_SYMBOL +0x50e67379 file_path vmlinux EXPORT_SYMBOL +0x1018cc2a dma_dummy_ops vmlinux EXPORT_SYMBOL +0x8ad29bab _raw_write_unlock_bh vmlinux EXPORT_SYMBOL +0x56572eb5 xprt_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf56c2017 mlog_not_bits fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x1fbd16da ip_tos2prio vmlinux EXPORT_SYMBOL +0x2f5c1223 __tracepoint_neigh_cleanup_and_release vmlinux EXPORT_SYMBOL_GPL +0x35e42d5d nvdimm_region_notify vmlinux EXPORT_SYMBOL_GPL +0xe396bfd5 register_virtio_device vmlinux EXPORT_SYMBOL_GPL +0x8c88beda pinctrl_register vmlinux EXPORT_SYMBOL_GPL +0x96e5d30f gen_pool_set_algo vmlinux EXPORT_SYMBOL +0x4af6ddf0 kstrtou16 vmlinux EXPORT_SYMBOL +0x5c3c7387 kstrtoull vmlinux EXPORT_SYMBOL +0xacd81eb3 jbd2_inode_cache vmlinux EXPORT_SYMBOL +0x94961283 vunmap vmlinux EXPORT_SYMBOL +0x09b6d9ee write_cache_pages vmlinux EXPORT_SYMBOL +0x5d06fdaa dma_direct_sync_sg_for_cpu vmlinux EXPORT_SYMBOL +0xbcf1491b iw_cm_disconnect drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x11e897d8 mlx5_cmd_exec_cb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7fdb6004 cxgbi_device_unregister_all drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x7d6016e8 xfrm_policy_alloc vmlinux EXPORT_SYMBOL +0xc88a83bd devm_thermal_of_cooling_device_register vmlinux EXPORT_SYMBOL_GPL +0xcff1e8ac mipi_dsi_dcs_enter_sleep_mode vmlinux EXPORT_SYMBOL +0xe5c78a99 do_blank_screen vmlinux EXPORT_SYMBOL +0x976dbdf0 unregister_virtio_device vmlinux EXPORT_SYMBOL_GPL +0x6f4879e8 acpi_dev_pm_attach vmlinux EXPORT_SYMBOL_GPL +0xd0bd487b hdmi_drm_infoframe_pack_only vmlinux EXPORT_SYMBOL +0x518ff330 pci_check_and_unmask_intx vmlinux EXPORT_SYMBOL_GPL +0x67da9f7c sha512_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0xfa4dff42 fscrypt_get_symlink vmlinux EXPORT_SYMBOL_GPL +0x0d3e8c70 finish_open vmlinux EXPORT_SYMBOL +0xbcd06fa2 kmem_cache_create_usercopy vmlinux EXPORT_SYMBOL +0xb5c2723a bch_bset_init_next drivers/md/bcache/bcache EXPORT_SYMBOL +0x06333e21 mlx5_accel_esp_destroy_xfrm drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5932b332 drm_dp_dual_mode_max_tmds_clock drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5b4b5a99 fuse_dev_install fs/fuse/fuse EXPORT_SYMBOL_GPL +0x117093be qdisc_class_hash_init vmlinux EXPORT_SYMBOL +0x8be88ffd fwnode_property_read_string vmlinux EXPORT_SYMBOL_GPL +0xfa873ad0 prandom_seed vmlinux EXPORT_SYMBOL +0xa04d58dd get_acl vmlinux EXPORT_SYMBOL +0xa13df913 set_page_dirty_lock vmlinux EXPORT_SYMBOL +0xe90c6fa6 cache_unregister_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x72dfbef1 ib_get_device_fw_str drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4e91a072 edac_get_report_status drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x6b212e3f tcp_v4_conn_request vmlinux EXPORT_SYMBOL +0xcd039ba8 rtc_add_group vmlinux EXPORT_SYMBOL +0x695c79f4 usb_get_descriptor vmlinux EXPORT_SYMBOL_GPL +0x82b2967c genphy_c45_read_link vmlinux EXPORT_SYMBOL_GPL +0x26cbc117 fcoe_ctlr_link_down vmlinux EXPORT_SYMBOL +0x2f2d8833 ata_sas_port_alloc vmlinux EXPORT_SYMBOL_GPL +0xf5289470 ata_port_abort vmlinux EXPORT_SYMBOL_GPL +0x81b10005 blk_rq_init vmlinux EXPORT_SYMBOL +0xf6ffce90 ceph_monc_get_version net/ceph/libceph EXPORT_SYMBOL +0x45f26992 drm_panel_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x26815dbc drm_dp_link_rate_to_bw_code drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x0e6004b8 cryptd_ahash_queued crypto/cryptd EXPORT_SYMBOL_GPL +0x935e2b2a kvm_read_guest_atomic arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb3d3c28a icmp_ndo_send vmlinux EXPORT_SYMBOL +0x2652c222 tcp_v4_syn_recv_sock vmlinux EXPORT_SYMBOL +0xd67fc1a7 usb_put_dev vmlinux EXPORT_SYMBOL_GPL +0x2570a138 reservation_seqcount_string vmlinux EXPORT_SYMBOL +0x7fb6a430 agp_find_bridge vmlinux EXPORT_SYMBOL +0xdd742d72 __sg_free_table vmlinux EXPORT_SYMBOL +0x2b9ceac9 kmem_cache_shrink vmlinux EXPORT_SYMBOL +0x5d49aabc init_wait_var_entry vmlinux EXPORT_SYMBOL +0x32bc0fcf preempt_notifier_dec vmlinux EXPORT_SYMBOL_GPL +0x0402cbbf preempt_notifier_inc vmlinux EXPORT_SYMBOL_GPL +0xe2aae5cc crc8 lib/crc8 EXPORT_SYMBOL +0x122125ac uverbs_idr_class drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x845567ce __iscsit_check_dataout_hdr drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x360e4b3e adf_dev_shutdown drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x029d294c mii_check_link drivers/net/mii EXPORT_SYMBOL +0xa163261c drm_atomic_helper_legacy_gamma_set drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1072a394 csum_partial_copy_from_user vmlinux EXPORT_SYMBOL +0x49045426 icmp_err_convert vmlinux EXPORT_SYMBOL +0x601f665f dm_io_client_create vmlinux EXPORT_SYMBOL +0x5b48875f mdio_device_create vmlinux EXPORT_SYMBOL +0xdb82b182 clk_gpio_mux_ops vmlinux EXPORT_SYMBOL_GPL +0x4b2dc531 acpi_device_fwnode_ops vmlinux EXPORT_SYMBOL_GPL +0x4b071516 acpi_subsys_complete vmlinux EXPORT_SYMBOL_GPL +0xcb733bf2 acpi_bus_set_power vmlinux EXPORT_SYMBOL +0x28ab4fb9 pinctrl_gpio_free vmlinux EXPORT_SYMBOL_GPL +0xa72957cc __dynamic_pr_debug vmlinux EXPORT_SYMBOL +0x3c457453 ioread64_lo_hi vmlinux EXPORT_SYMBOL +0xb7f990e9 rht_bucket_nested vmlinux EXPORT_SYMBOL_GPL +0xb4197aac blkcg_policy_register vmlinux EXPORT_SYMBOL_GPL +0x1e5b03dc pm_qos_add_notifier vmlinux EXPORT_SYMBOL_GPL +0x5fa48e3b inet_diag_dump_one_icsk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x055e3bcf rdma_query_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7e822291 vfio_device_get_from_dev drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xbeaa0f8c mlx4_qp_release_range drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x4ab6882a nlmclnt_init fs/lockd/lockd EXPORT_SYMBOL_GPL +0xe5a7264c __tracepoint_nfs4_pnfs_commit_ds fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x823c19ea iosf_mbi_unregister_pmic_bus_access_notifier_unlocked arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0xbfaa476f tcp_hashinfo vmlinux EXPORT_SYMBOL +0x2d140a58 genl_unlock vmlinux EXPORT_SYMBOL +0xcdc51dad devlink_params_publish vmlinux EXPORT_SYMBOL_GPL +0x05387375 hid_report_raw_event vmlinux EXPORT_SYMBOL_GPL +0xfddcf388 dma_buf_begin_cpu_access vmlinux EXPORT_SYMBOL_GPL +0xe912352b blk_mq_requeue_request vmlinux EXPORT_SYMBOL +0x63e8686a crypto_aead_setauthsize vmlinux EXPORT_SYMBOL_GPL +0xfdfdee1a fd_install vmlinux EXPORT_SYMBOL +0x150e3657 _raw_read_lock_bh vmlinux EXPORT_SYMBOL +0xb9950a98 convert_art_ns_to_tsc vmlinux EXPORT_SYMBOL +0x0b065ce0 nf_nat_packet net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x426e498b cxgb4_read_sge_timestamp drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xab548d12 compat_ipv6_getsockopt vmlinux EXPORT_SYMBOL +0x8a0e2b54 compat_ipv6_setsockopt vmlinux EXPORT_SYMBOL +0xebcc64a4 dm_bufio_get_block_data vmlinux EXPORT_SYMBOL_GPL +0x364bfb9b __phy_resume vmlinux EXPORT_SYMBOL +0x76fb08a7 amd_iommu_unregister_ppr_notifier vmlinux EXPORT_SYMBOL +0xf33457cc bio_devname vmlinux EXPORT_SYMBOL +0x783f5c09 direct_make_request vmlinux EXPORT_SYMBOL_GPL +0x67b9b8c1 vfs_fsync_range vmlinux EXPORT_SYMBOL +0x6fb22dad memory_cgrp_subsys vmlinux EXPORT_SYMBOL +0x3ce4ca6f disable_irq vmlinux EXPORT_SYMBOL +0x55eecff4 bit_wait_io_timeout vmlinux EXPORT_SYMBOL_GPL +0xdc9fa232 raw_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0xa70fabbe release_evntsel_nmi vmlinux EXPORT_SYMBOL +0x7c20b6a0 load_direct_gdt vmlinux EXPORT_SYMBOL_GPL +0x44bb9b15 vhost_add_used_n drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3a20a9d7 transport_set_vpd_ident_type drivers/target/target_core_mod EXPORT_SYMBOL +0x90fe317c cdrom_get_media_event drivers/cdrom/cdrom EXPORT_SYMBOL +0x273c4cac mlx5_core_dct_query drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x85496931 mlx4_free_cmd_mailbox drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x37125027 nfs_create fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5c2d37cf kvm_arch_end_assignment arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xfbb8a761 strscpy_pad vmlinux EXPORT_SYMBOL +0x16da1f88 devlink_fmsg_u32_put vmlinux EXPORT_SYMBOL_GPL +0x6906d3dd dm_set_target_max_io_len vmlinux EXPORT_SYMBOL_GPL +0xedfd90f6 iscsi_tcp_hdr_recv_prep vmlinux EXPORT_SYMBOL_GPL +0xbb549de6 fcoe_fcf_device_delete vmlinux EXPORT_SYMBOL_GPL +0x405b6e05 srp_parse_tmo vmlinux EXPORT_SYMBOL +0xd24efbb2 scsi_host_alloc vmlinux EXPORT_SYMBOL +0xe1efc6d8 bus_get_device_klist vmlinux EXPORT_SYMBOL_GPL +0x277b9ada af_alg_async_cb vmlinux EXPORT_SYMBOL_GPL +0x81a57121 fscrypt_drop_inode vmlinux EXPORT_SYMBOL_GPL +0x299949a7 path_put vmlinux EXPORT_SYMBOL +0xd7f647cc apply_to_page_range vmlinux EXPORT_SYMBOL_GPL +0x1984d421 out_of_line_wait_on_bit vmlinux EXPORT_SYMBOL +0xc60d2909 tcp_vegas_state net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x7145b204 nft_register_expr net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x77d3ac0b edac_mc_find drivers/edac/edac_core EXPORT_SYMBOL +0x98eb4e40 mlx5_core_modify_hca_vport_context drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf6e4c4bb drm_irq_uninstall drivers/gpu/drm/drm EXPORT_SYMBOL +0x4c24ab79 xfrm6_protocol_deregister vmlinux EXPORT_SYMBOL +0x69794f85 xfrm4_protocol_deregister vmlinux EXPORT_SYMBOL +0x17236438 gen10g_config_aneg vmlinux EXPORT_SYMBOL_GPL +0x70449b7c fc_remote_port_delete vmlinux EXPORT_SYMBOL +0xe84581b7 amd_iommu_enable_device_erratum vmlinux EXPORT_SYMBOL +0x5e104cab pci_request_irq vmlinux EXPORT_SYMBOL +0x73c2554f __iowrite64_copy vmlinux EXPORT_SYMBOL_GPL +0xd8dc0fa2 invalidate_partition vmlinux EXPORT_SYMBOL +0xfedc2b92 handle_mm_fault vmlinux EXPORT_SYMBOL_GPL +0x13b65f27 probe_user_read vmlinux EXPORT_SYMBOL_GPL +0xd217e9e6 trace_set_clr_event vmlinux EXPORT_SYMBOL_GPL +0x150a7617 blk_add_driver_data vmlinux EXPORT_SYMBOL_GPL +0xf6449ec8 kmsg_dump_rewind vmlinux EXPORT_SYMBOL_GPL +0x59487abb kick_process vmlinux EXPORT_SYMBOL_GPL +0x1ba237b0 default_cpu_present_to_apicid vmlinux EXPORT_SYMBOL_GPL +0x2d47ac83 lc_committed lib/lru_cache EXPORT_SYMBOL +0xa698f998 ceph_free_lockers net/ceph/libceph EXPORT_SYMBOL +0x8a085f42 __l2tp_session_unhash net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x85c54b61 efivar_validate vmlinux EXPORT_SYMBOL_GPL +0xbd228b99 dm_remap_zone_report vmlinux EXPORT_SYMBOL_GPL +0x3686ea09 spi_print_msg vmlinux EXPORT_SYMBOL +0xb7314f62 is_nd_pfn vmlinux EXPORT_SYMBOL +0xbfbc5434 pciserial_resume_ports vmlinux EXPORT_SYMBOL_GPL +0xe84f6e5c pciserial_remove_ports vmlinux EXPORT_SYMBOL_GPL +0x9a4e579e is_acpi_device_node vmlinux EXPORT_SYMBOL +0xc234f67d sg_miter_stop vmlinux EXPORT_SYMBOL +0x0da10ec3 security_sock_graft vmlinux EXPORT_SYMBOL +0x2ee0bcd3 key_reject_and_link vmlinux EXPORT_SYMBOL +0x8e9bff56 jbd2_journal_init_jbd_inode vmlinux EXPORT_SYMBOL +0x49c41a57 _raw_spin_unlock_bh vmlinux EXPORT_SYMBOL +0x23869e02 pci_biosrom_size vmlinux EXPORT_SYMBOL +0x34a4640a drm_dbg drivers/gpu/drm/drm EXPORT_SYMBOL +0x36d232e7 fscache_put_operation fs/fscache/fscache EXPORT_SYMBOL +0x76bf0999 xt_proto_init vmlinux EXPORT_SYMBOL_GPL +0x816bed95 devlink_port_unregister vmlinux EXPORT_SYMBOL_GPL +0x826689ca __sg_alloc_table_from_pages vmlinux EXPORT_SYMBOL +0xf2215f74 blk_finish_plug vmlinux EXPORT_SYMBOL +0x0b1beb31 vmalloc_32_user vmlinux EXPORT_SYMBOL +0xbfbe0a65 ib_umem_odp_get drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x1a626403 drm_master_put drivers/gpu/drm/drm EXPORT_SYMBOL +0x03bc993e ipmi_set_my_LUN drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x21e73cf3 ping_recvmsg vmlinux EXPORT_SYMBOL_GPL +0xae81896b dev_pm_opp_unregister_set_opp_helper vmlinux EXPORT_SYMBOL_GPL +0xfab30dc0 mdio_bus_exit vmlinux EXPORT_SYMBOL_GPL +0xd18bb49f ahci_platform_disable_phys vmlinux EXPORT_SYMBOL_GPL +0x448b7d0c sata_pmp_qc_defer_cmd_switch vmlinux EXPORT_SYMBOL_GPL +0xdbab12d1 ata_sff_port_intr vmlinux EXPORT_SYMBOL_GPL +0x8a29796f ata_scsi_change_queue_depth vmlinux EXPORT_SYMBOL_GPL +0xfd0ae1e7 pm_clk_add_clk vmlinux EXPORT_SYMBOL_GPL +0x3687a350 pci_get_subsys vmlinux EXPORT_SYMBOL +0x220f6228 rcutorture_get_gp_data vmlinux EXPORT_SYMBOL_GPL +0x2196f590 target_submit_tmr drivers/target/target_core_mod EXPORT_SYMBOL +0x2c9c3a19 mlxsw_core_trap_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x81188c30 match_string vmlinux EXPORT_SYMBOL +0xf7e8357a ip_local_out vmlinux EXPORT_SYMBOL_GPL +0xd89eda5a xt_compat_match_from_user vmlinux EXPORT_SYMBOL_GPL +0x3c4f247e dev_pm_opp_find_freq_floor vmlinux EXPORT_SYMBOL_GPL +0x2e8fed4c ehci_suspend vmlinux EXPORT_SYMBOL_GPL +0x9fbf2774 usb_hcd_check_unlink_urb vmlinux EXPORT_SYMBOL_GPL +0x1e9c590a usb_find_common_endpoints_reverse vmlinux EXPORT_SYMBOL_GPL +0xe57f403e phy_device_free vmlinux EXPORT_SYMBOL +0x9539ab1e fwnode_graph_get_remote_endpoint vmlinux EXPORT_SYMBOL_GPL +0x0ce19729 mb_cache_entry_touch vmlinux EXPORT_SYMBOL +0xfbe7ef3a generic_permission vmlinux EXPORT_SYMBOL +0x64faf25c vprintk_default vmlinux EXPORT_SYMBOL_GPL +0xb3a2dfdf nmi_panic vmlinux EXPORT_SYMBOL +0xfef635a4 virtio_transport_dgram_bind net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xc0dcb59e edac_layer_name drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xabf1b34a mlxsw_core_trap_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x142b8e3e mlx5_query_nic_vport_node_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x6615069e drm_dp_dsc_sink_max_slice_count drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x07b7348a fuse_dev_fiq_ops fs/fuse/fuse EXPORT_SYMBOL_GPL +0xa09cbe15 mroute6_is_socket vmlinux EXPORT_SYMBOL +0x85540ebc nvmem_cell_put vmlinux EXPORT_SYMBOL_GPL +0x058f5640 pci_test_config_bits vmlinux EXPORT_SYMBOL_GPL +0x62bd459c pci_msi_mask_irq vmlinux EXPORT_SYMBOL_GPL +0x641ea55d __cgroup_bpf_run_filter_sysctl vmlinux EXPORT_SYMBOL +0xbb4c7570 pids_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x5d2caf87 udp_tunnel_notify_del_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x38d0e638 ibdev_warn drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xdda9ef4d xt_compat_target_from_user vmlinux EXPORT_SYMBOL_GPL +0x75943e25 i8253_lock vmlinux EXPORT_SYMBOL +0xf11bdc28 mdio_device_register vmlinux EXPORT_SYMBOL +0x01abdf02 qlt_free_cmd vmlinux EXPORT_SYMBOL +0xc6fa06cb fc_get_host_stats vmlinux EXPORT_SYMBOL +0x8f7caa02 uart_get_baud_rate vmlinux EXPORT_SYMBOL +0x4100a662 clk_get_scaled_duty_cycle vmlinux EXPORT_SYMBOL_GPL +0xbda64060 pci_find_next_bus vmlinux EXPORT_SYMBOL +0x57900416 gen_pool_fixed_alloc vmlinux EXPORT_SYMBOL +0x7181db30 atomic_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0x4f477261 dm_bm_checksum drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x26fb8dbf mlx4_get_admin_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0cdff08a drm_framebuffer_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xaf0ba1b5 scsi_vpd_tpg_id vmlinux EXPORT_SYMBOL +0x63c3740f ahci_platform_suspend vmlinux EXPORT_SYMBOL_GPL +0x92295424 clk_register_gate vmlinux EXPORT_SYMBOL_GPL +0x97a57333 crc_t10dif_update vmlinux EXPORT_SYMBOL +0x817ac88b trace_call_bpf vmlinux EXPORT_SYMBOL_GPL +0x0fff5afc time64_to_tm vmlinux EXPORT_SYMBOL +0x8ee0e647 set_user_nice vmlinux EXPORT_SYMBOL +0xf18c2726 gss_pseudoflavor_to_service net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL +0x836e6766 esp6_output_tail net/ipv6/esp6 EXPORT_SYMBOL_GPL +0x2de78124 ib_modify_port drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd28256cf mlxsw_afa_block_append_allocated_counter drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x947b012c drm_atomic_set_crtc_for_plane drivers/gpu/drm/drm EXPORT_SYMBOL +0x6233b999 netdev_notify_peers vmlinux EXPORT_SYMBOL +0x4ab517ee iscsi_create_flashnode_conn vmlinux EXPORT_SYMBOL_GPL +0x6d2d6e62 splice_direct_to_actor vmlinux EXPORT_SYMBOL +0xe4ba127e rpc_release_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbf41d946 nf_nat_pptp_hook_inbound net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0x4d2c81fd drm_dp_mst_deallocate_vcpi drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3d478d24 br_fdb_find_port vmlinux EXPORT_SYMBOL_GPL +0x2b9da7a4 genl_lock vmlinux EXPORT_SYMBOL +0x915447a0 qdisc_class_hash_grow vmlinux EXPORT_SYMBOL +0x7a223cc3 __dev_kfree_skb_any vmlinux EXPORT_SYMBOL +0x7a0c1922 __dev_kfree_skb_irq vmlinux EXPORT_SYMBOL +0xbbddb8af sock_setsockopt vmlinux EXPORT_SYMBOL +0x7935e009 usb_unlocked_enable_lpm vmlinux EXPORT_SYMBOL_GPL +0x24665a3b nd_region_release_lane vmlinux EXPORT_SYMBOL +0x73f9718a tty_port_close_start vmlinux EXPORT_SYMBOL +0x2d91657a dma_find_channel vmlinux EXPORT_SYMBOL +0x7c2d19b9 acpi_subsys_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0x95b4ac22 fb_find_mode vmlinux EXPORT_SYMBOL +0x6bdd121c unlock_buffer vmlinux EXPORT_SYMBOL +0xef6b8303 simple_transaction_release vmlinux EXPORT_SYMBOL +0x8b9200fd lookup_address vmlinux EXPORT_SYMBOL_GPL +0x124e2e91 vhost_dev_has_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x76164365 ib_alloc_mr_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x142072c0 transport_set_vpd_assoc drivers/target/target_core_mod EXPORT_SYMBOL +0xe8c43b71 iavf_register_client drivers/net/ethernet/intel/iavf/iavf EXPORT_SYMBOL +0x5ec995db nfs_commit_free fs/nfs/nfs EXPORT_SYMBOL_GPL +0x8cc1e7f2 nfs_try_mount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x40f8bd4e klist_add_before vmlinux EXPORT_SYMBOL_GPL +0xa61ced89 qdisc_put_rtab vmlinux EXPORT_SYMBOL +0x66822f0b __phy_modify_changed vmlinux EXPORT_SYMBOL_GPL +0xe6184508 sas_queuecommand vmlinux EXPORT_SYMBOL_GPL +0x3efd1889 dax_direct_access vmlinux EXPORT_SYMBOL_GPL +0xb745dcd7 subsys_find_device_by_id vmlinux EXPORT_SYMBOL_GPL +0x4ccaaca1 vga_put vmlinux EXPORT_SYMBOL +0xa8971190 crypto_register_rngs vmlinux EXPORT_SYMBOL_GPL +0x4ea5e08e crypto_attr_alg2 vmlinux EXPORT_SYMBOL_GPL +0xdb3b9bb6 key_invalidate vmlinux EXPORT_SYMBOL +0xa1839690 __tracepoint_kmem_cache_alloc vmlinux EXPORT_SYMBOL +0x68a94ab0 freq_qos_update_request vmlinux EXPORT_SYMBOL_GPL +0xb19a5453 __per_cpu_offset vmlinux EXPORT_SYMBOL +0xea8a1fbf set_sig_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x8cfc697e drm_crtc_helper_set_mode drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x08e3065b proto_register vmlinux EXPORT_SYMBOL +0x7bf41759 to_software_node vmlinux EXPORT_SYMBOL_GPL +0x7a15a1a1 _dev_crit vmlinux EXPORT_SYMBOL +0x290e3261 tty_wakeup vmlinux EXPORT_SYMBOL_GPL +0x7d0ba682 gen_pool_virt_to_phys vmlinux EXPORT_SYMBOL +0x51d12d4e acpi_pci_disabled vmlinux EXPORT_SYMBOL +0x1951741e virtio_transport_notify_recv_init net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xfddaffda nf_reject_ip_tcphdr_get net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x21ee5d21 taprio_offload_free net/sched/sch_taprio EXPORT_SYMBOL_GPL +0xeda1212e drm_plane_cleanup drivers/gpu/drm/drm EXPORT_SYMBOL +0x4ace5d35 tcf_block_put vmlinux EXPORT_SYMBOL +0x09c32a2c devlink_dpipe_entry_ctx_append vmlinux EXPORT_SYMBOL_GPL +0x8012ed6a usb_acpi_set_power_state vmlinux EXPORT_SYMBOL_GPL +0x0d115af9 genphy_write_mmd_unsupported vmlinux EXPORT_SYMBOL +0x0fb2f8a4 mktime64 vmlinux EXPORT_SYMBOL +0x668b19a1 down_read vmlinux EXPORT_SYMBOL +0x2d68b4b8 cxgbi_conn_init_pdu drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x3d5f790f nfs_alloc_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6664d568 km_report vmlinux EXPORT_SYMBOL +0x8fdc83fd skb_copy_expand vmlinux EXPORT_SYMBOL +0xb26c8fc6 ufshcd_hold vmlinux EXPORT_SYMBOL_GPL +0x0de793c2 sas_phy_reset vmlinux EXPORT_SYMBOL_GPL +0x62279652 device_property_read_string_array vmlinux EXPORT_SYMBOL_GPL +0xed3fef85 transport_setup_device vmlinux EXPORT_SYMBOL_GPL +0x18bca678 pci_hp_destroy vmlinux EXPORT_SYMBOL_GPL +0x826cb152 pci_release_resource vmlinux EXPORT_SYMBOL +0xc1d8cfaf __fdget vmlinux EXPORT_SYMBOL +0x6f6eabb0 d_lookup vmlinux EXPORT_SYMBOL +0x5b6c2ee2 dmam_free_coherent vmlinux EXPORT_SYMBOL +0xca15413f ZSTD_resetDStream lib/zstd/zstd_decompress EXPORT_SYMBOL +0xb034d231 nf_tables_destroy_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x26cbd5dd cxgb4_iscsi_init drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x72efa546 cxgbi_iscsi_init drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xea61eefe vmci_qpair_produce_buf_ready drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x1d5d2ee2 drm_atomic_bridge_pre_enable drivers/gpu/drm/drm EXPORT_SYMBOL +0x8003132a nfs_create_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0x614150ff __tracepoint_devlink_hwerr vmlinux EXPORT_SYMBOL_GPL +0xb84eea4a phy_10gbit_fec_features vmlinux EXPORT_SYMBOL_GPL +0xdccc817a phy_write_paged vmlinux EXPORT_SYMBOL +0x538d073d phy_duplex_to_str vmlinux EXPORT_SYMBOL_GPL +0xf811e69d scsi_eh_flush_done_q vmlinux EXPORT_SYMBOL +0xdaef57a4 virtio_check_driver_offered_feature vmlinux EXPORT_SYMBOL_GPL +0xea3ca789 clk_hw_register_fixed_rate_with_accuracy vmlinux EXPORT_SYMBOL_GPL +0x8c71bbba pci_iomap vmlinux EXPORT_SYMBOL +0xd45cc6ca bin2hex vmlinux EXPORT_SYMBOL +0x294cad0f path_is_under vmlinux EXPORT_SYMBOL +0x5096f0e5 vsock_core_get_transport net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xbf1c3328 transport_wait_for_tasks drivers/target/target_core_mod EXPORT_SYMBOL +0x9c8d1457 drm_mode_equal drivers/gpu/drm/drm EXPORT_SYMBOL +0x584b8482 nfs_inc_attr_generation_counter fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc07c45f0 unix_attach_fds vmlinux EXPORT_SYMBOL +0x8e19f666 skb_morph vmlinux EXPORT_SYMBOL_GPL +0x7c46233a cpufreq_quick_get vmlinux EXPORT_SYMBOL +0x957ea0b6 serio_unregister_port vmlinux EXPORT_SYMBOL +0xe5c60bd2 percpu_counter_set vmlinux EXPORT_SYMBOL +0xfe6e4041 rht_bucket_nested_insert vmlinux EXPORT_SYMBOL_GPL +0xb0747ed2 rcu_cpu_stall_suppress vmlinux EXPORT_SYMBOL_GPL +0x093a219c ioremap_nocache vmlinux EXPORT_SYMBOL +0x787ad4b4 transport_generic_handle_tmr drivers/target/target_core_mod EXPORT_SYMBOL +0x8fd14a22 tun_get_tx_ring drivers/net/tun EXPORT_SYMBOL_GPL +0x58d04d3c t3_l2t_send_event drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x0959d5ec drm_atomic_get_connector_state drivers/gpu/drm/drm EXPORT_SYMBOL +0xc12d5e81 pnfs_ld_read_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xd38da231 gfn_to_pfn arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xaa867d25 gfn_to_hva arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6185b747 radix_tree_gang_lookup_tag vmlinux EXPORT_SYMBOL +0xe6f52443 klist_add_head vmlinux EXPORT_SYMBOL_GPL +0xc740a855 hwmon_device_register_with_info vmlinux EXPORT_SYMBOL_GPL +0x7e601469 fc_exch_mgr_reset vmlinux EXPORT_SYMBOL +0x0c9f1cac scsi_remove_host vmlinux EXPORT_SYMBOL +0x7a057bf2 pci_request_selected_regions_exclusive vmlinux EXPORT_SYMBOL +0xf88d5dc9 pinctrl_register_and_init vmlinux EXPORT_SYMBOL_GPL +0x69e683de uuid_gen vmlinux EXPORT_SYMBOL_GPL +0x87bee547 btracker_queue drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x01d2f9ac dm_rh_recovery_start drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xd958c4c2 mlx4_get_slave_from_roce_gid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xe396fa61 scsi_is_host_device vmlinux EXPORT_SYMBOL +0x68516cea ata_sas_tport_add vmlinux EXPORT_SYMBOL_GPL +0x719e17ff clk_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0x5c5c0623 devm_ioremap_nocache vmlinux EXPORT_SYMBOL +0x0b673698 af_alg_release_parent vmlinux EXPORT_SYMBOL_GPL +0x913a1287 sync_dirty_buffer vmlinux EXPORT_SYMBOL +0xa572dd7c ebt_register_table net/bridge/netfilter/ebtables EXPORT_SYMBOL +0x95956c7c vhost_add_used_and_signal_n drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x3a39807d ib_fmr_pool_map_phys drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfc67ac7e adf_iov_putmsg drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x19acd14e __tracepoint_bcache_journal_entry_full drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xf1421d13 drm_mode_sort drivers/gpu/drm/drm EXPORT_SYMBOL +0xc47906d6 tcp_reno_ssthresh vmlinux EXPORT_SYMBOL_GPL +0x2024db24 netlink_has_listeners vmlinux EXPORT_SYMBOL_GPL +0xc025016c flow_keys_dissector vmlinux EXPORT_SYMBOL +0xac33e81d nd_numa_attribute_group vmlinux EXPORT_SYMBOL_GPL +0xc16be39d iter_div_u64_rem vmlinux EXPORT_SYMBOL +0x8905439f dquot_get_next_id vmlinux EXPORT_SYMBOL +0xbfecc6b8 dma_supported vmlinux EXPORT_SYMBOL +0xa685e3ca rdma_resolve_route drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x54b87044 sbc_parse_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0xd3de08ee mlx5_comp_vectors_count drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4fd95297 tcf_exts_num_actions vmlinux EXPORT_SYMBOL +0x06f843d5 flow_indr_block_call vmlinux EXPORT_SYMBOL_GPL +0x17f341a0 i8042_lock_chip vmlinux EXPORT_SYMBOL +0x81b236df usb_alloc_urb vmlinux EXPORT_SYMBOL_GPL +0x8bdc9ba9 phy_driver_is_genphy vmlinux EXPORT_SYMBOL_GPL +0x1749478e add_disk_randomness vmlinux EXPORT_SYMBOL_GPL +0x59aadb36 serial8250_get_port vmlinux EXPORT_SYMBOL_GPL +0x1b2a372f phy_optional_get vmlinux EXPORT_SYMBOL_GPL +0x4f2593f0 btree_update vmlinux EXPORT_SYMBOL_GPL +0xb539fe9b inode_init_once vmlinux EXPORT_SYMBOL +0x925cc05b uprobe_register vmlinux EXPORT_SYMBOL_GPL +0x369fcd70 tracing_snapshot vmlinux EXPORT_SYMBOL_GPL +0x73d69364 ring_buffer_change_overwrite vmlinux EXPORT_SYMBOL_GPL +0xbe687e88 wake_up_all_idle_cpus vmlinux EXPORT_SYMBOL_GPL +0x54fb8f28 dma_cache_sync vmlinux EXPORT_SYMBOL +0xdb427120 __srcu_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x7b957cb3 rdma_reject_msg drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x21498435 transport_backend_register drivers/target/target_core_mod EXPORT_SYMBOL +0x624ff00c nf_ip6_checksum vmlinux EXPORT_SYMBOL +0x2338f2d0 netpoll_send_skb_on_dev vmlinux EXPORT_SYMBOL +0x0b86a51d get_net_ns vmlinux EXPORT_SYMBOL_GPL +0x170c31ea md_start vmlinux EXPORT_SYMBOL_GPL +0xbd7fdeb8 thermal_add_hwmon_sysfs vmlinux EXPORT_SYMBOL_GPL +0x68753533 device_move vmlinux EXPORT_SYMBOL_GPL +0x6257dda7 clk_rate_exclusive_get vmlinux EXPORT_SYMBOL_GPL +0xdc6699cb acpi_dev_free_resource_list vmlinux EXPORT_SYMBOL_GPL +0xe4b051cf raid6_datap_recov vmlinux EXPORT_SYMBOL_GPL +0xf3de476a scsi_cmd_ioctl vmlinux EXPORT_SYMBOL +0x1ec50485 usb_stor_clear_halt drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x7c373b29 tcp_md5_do_del vmlinux EXPORT_SYMBOL +0xe894298a neigh_seq_stop vmlinux EXPORT_SYMBOL +0x016e15d5 mipi_dsi_detach vmlinux EXPORT_SYMBOL +0x863c5361 pci_user_write_config_dword vmlinux EXPORT_SYMBOL_GPL +0x2ad0c8d5 current_in_userns vmlinux EXPORT_SYMBOL +0x66decfd5 ns_to_timespec vmlinux EXPORT_SYMBOL +0xb51a3724 rpc_pipefs_notifier_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3ece2a7c target_put_sess_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x97598d7c target_get_sess_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x06d94b0a __tracepoint_bcache_gc_start drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xccaed164 cxgbi_set_host_param drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xedf7da41 drm_framebuffer_cleanup drivers/gpu/drm/drm EXPORT_SYMBOL +0x2fc632ff ip6_fraglist_prepare vmlinux EXPORT_SYMBOL +0xfa81c562 flow_rule_match_enc_control vmlinux EXPORT_SYMBOL +0x5fd9bd49 dev_mc_del_global vmlinux EXPORT_SYMBOL +0x7ec3fa44 dev_mc_add_global vmlinux EXPORT_SYMBOL +0xead5c8e5 clk_bulk_prepare vmlinux EXPORT_SYMBOL_GPL +0xbc46ee65 pnp_device_attach vmlinux EXPORT_SYMBOL +0x547e3344 acpi_disable vmlinux EXPORT_SYMBOL +0x80c88f48 acpi_dev_suspend vmlinux EXPORT_SYMBOL_GPL +0x9ea19a68 fb_prepare_logo vmlinux EXPORT_SYMBOL +0x4cd5bc5e rdmsr_safe_regs vmlinux EXPORT_SYMBOL +0x4ece3615 blocking_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0x910d16c3 fifo_create_dflt vmlinux EXPORT_SYMBOL +0x2a6d1b74 devlink_flash_update_end_notify vmlinux EXPORT_SYMBOL_GPL +0x15498520 neigh_table_init vmlinux EXPORT_SYMBOL +0x0a936085 cpuidle_register_device vmlinux EXPORT_SYMBOL_GPL +0x59b2adbf input_ff_effect_from_user vmlinux EXPORT_SYMBOL_GPL +0x087a7942 fcoe_ctlr_device_add vmlinux EXPORT_SYMBOL_GPL +0x79dc9c9c libfc_vport_create vmlinux EXPORT_SYMBOL +0x2a614b7d attribute_container_register vmlinux EXPORT_SYMBOL_GPL +0x9f8355f5 blk_queue_segment_boundary vmlinux EXPORT_SYMBOL +0x05a36385 acomp_request_alloc vmlinux EXPORT_SYMBOL_GPL +0x2c80bcce blkcipher_walk_virt_block vmlinux EXPORT_SYMBOL_GPL +0xb3b220d6 sysfs_create_link vmlinux EXPORT_SYMBOL_GPL +0x08ca47b4 task_active_pid_ns vmlinux EXPORT_SYMBOL_GPL +0x3c900f33 lc_seq_dump_details lib/lru_cache EXPORT_SYMBOL +0xe4f01577 svc_set_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x80aa4656 ipmi_free_recv_msg drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x538b47a5 kvm_vcpu_block arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xef58cc85 tcp_fastopen_defer_connect vmlinux EXPORT_SYMBOL +0xc8e7622c inetpeer_invalidate_tree vmlinux EXPORT_SYMBOL +0x44179a9f __neigh_create vmlinux EXPORT_SYMBOL +0x85674341 cpufreq_freq_transition_end vmlinux EXPORT_SYMBOL_GPL +0xa5a6713d power_supply_powers vmlinux EXPORT_SYMBOL_GPL +0xe126553f __tracepoint_unmap vmlinux EXPORT_SYMBOL_GPL +0x809712ff hdmi_avi_infoframe_pack vmlinux EXPORT_SYMBOL +0x339662ad pci_add_dynid vmlinux EXPORT_SYMBOL_GPL +0xf75847b1 trace_array_destroy vmlinux EXPORT_SYMBOL_GPL +0x6d294e43 clock_t_to_jiffies vmlinux EXPORT_SYMBOL +0x491904da capable_wrt_inode_uidgid vmlinux EXPORT_SYMBOL +0xa7127da7 mce_unregister_injector_chain vmlinux EXPORT_SYMBOL_GPL +0xf8bf8e22 ZSTD_DDictWorkspaceBound lib/zstd/zstd_decompress EXPORT_SYMBOL +0xe97f4ce5 qword_get net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2137801b ip_vs_nfct_expect_related net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x070b28aa drm_ht_remove_item drivers/gpu/drm/drm EXPORT_SYMBOL +0x6ada8f59 kvm_mmu_set_mmio_spte_mask arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xc5156bf3 fanout_mutex vmlinux EXPORT_SYMBOL_GPL +0x4917451e mr_vif_seq_next vmlinux EXPORT_SYMBOL +0x84fcb7b3 skb_mac_gso_segment vmlinux EXPORT_SYMBOL +0x417168ae fixed_phy_register_with_gpiod vmlinux EXPORT_SYMBOL_GPL +0x01c5101b dim_on_top vmlinux EXPORT_SYMBOL +0xcf52d379 pcim_iomap_table vmlinux EXPORT_SYMBOL +0x331f5083 crypto_larval_alloc vmlinux EXPORT_SYMBOL_GPL +0xffbf2f90 debugfs_attr_write vmlinux EXPORT_SYMBOL_GPL +0x018574a1 mb_cache_entry_delete vmlinux EXPORT_SYMBOL +0x8d901e6e seq_release vmlinux EXPORT_SYMBOL +0xdda52785 insert_inode_locked vmlinux EXPORT_SYMBOL +0x1866cec2 ring_buffer_size vmlinux EXPORT_SYMBOL_GPL +0x838b13e7 ring_buffer_free vmlinux EXPORT_SYMBOL_GPL +0x1283a5ef module_layout vmlinux EXPORT_SYMBOL +0x5c5a1b16 tick_broadcast_control vmlinux EXPORT_SYMBOL_GPL +0x90c3a3a8 fuse_dev_free fs/fuse/fuse EXPORT_SYMBOL_GPL +0xf2622179 xfrm_audit_state_notfound vmlinux EXPORT_SYMBOL_GPL +0x5a03a215 lwtstate_free vmlinux EXPORT_SYMBOL_GPL +0xa5cbf52a hidinput_disconnect vmlinux EXPORT_SYMBOL_GPL +0x829eda64 to_nd_dax vmlinux EXPORT_SYMBOL +0x11e06ee9 badrange_init vmlinux EXPORT_SYMBOL_GPL +0xa87dea51 pinctrl_utils_add_map_configs vmlinux EXPORT_SYMBOL_GPL +0x80a717a8 __percpu_counter_compare vmlinux EXPORT_SYMBOL +0xcc4ee841 raid6_gfexi vmlinux EXPORT_SYMBOL +0x63fb1fb7 blk_mq_tagset_wait_completed_request vmlinux EXPORT_SYMBOL +0x1fb0e760 qtree_entry_unused vmlinux EXPORT_SYMBOL +0x366f4450 __sb_start_write vmlinux EXPORT_SYMBOL +0x01bf55fc paddr_vmcoreinfo_note vmlinux EXPORT_SYMBOL +0xb2affe59 udp_tunnel_push_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x0e9d6adf rdma_restrack_set_task drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x78ce117b tun_get_socket drivers/net/tun EXPORT_SYMBOL_GPL +0xa0100109 serpent_xts_dec arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0xcee44453 serpent_xts_enc arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0x5d7fe71b dm_snap_origin vmlinux EXPORT_SYMBOL +0x7f8e7d36 sas_resume_ha vmlinux EXPORT_SYMBOL +0x73a48b4a ata_sff_std_ports vmlinux EXPORT_SYMBOL_GPL +0x0c68451e pm_clk_add vmlinux EXPORT_SYMBOL_GPL +0x3d388324 dpm_resume_end vmlinux EXPORT_SYMBOL_GPL +0xd6feefa5 agp_num_entries vmlinux EXPORT_SYMBOL_GPL +0x00cc556c pinctrl_remove_gpio_range vmlinux EXPORT_SYMBOL_GPL +0xf292e9e2 elv_rb_latter_request vmlinux EXPORT_SYMBOL +0x597341fd sysfs_create_bin_file vmlinux EXPORT_SYMBOL_GPL +0x27fa66e1 nr_free_buffer_pages vmlinux EXPORT_SYMBOL_GPL +0xcc03b139 mod_delayed_work_on vmlinux EXPORT_SYMBOL_GPL +0xd6b33026 cpu_khz vmlinux EXPORT_SYMBOL +0x4cb55f7a tls_register_device net/tls/tls EXPORT_SYMBOL +0xf5455294 cxgbi_sock_check_wr_invariants drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xbfd26f15 simd_aead_free crypto/crypto_simd EXPORT_SYMBOL_GPL +0xd24e9e8c klist_init vmlinux EXPORT_SYMBOL_GPL +0x7fb6182c xfrm_state_unregister_afinfo vmlinux EXPORT_SYMBOL +0x5378eabf dm_noflush_suspending vmlinux EXPORT_SYMBOL_GPL +0xd9708635 md_bitmap_end_sync vmlinux EXPORT_SYMBOL +0x87ab1c6c _dev_emerg vmlinux EXPORT_SYMBOL +0x6b1ddf20 serial8250_do_pm vmlinux EXPORT_SYMBOL +0x6b640864 nla_strlcpy vmlinux EXPORT_SYMBOL +0x3b6c41ea kstrtouint vmlinux EXPORT_SYMBOL +0xb332c396 pstore_register vmlinux EXPORT_SYMBOL_GPL +0x77e8cbfd fs_context_for_submount vmlinux EXPORT_SYMBOL +0xb8b043f2 kfree_link vmlinux EXPORT_SYMBOL +0xce807a25 up_write vmlinux EXPORT_SYMBOL +0xc278c965 cpu_all_bits vmlinux EXPORT_SYMBOL +0x9b9e1d53 auth_domain_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf102033e slhc_remember drivers/net/slip/slhc EXPORT_SYMBOL +0x2decde87 mlxsw_core_fw_flash_start drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x046b43c4 mlxsw_core_ptp_transmitted drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0683b811 mlx5_rdma_rn_get_params drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xda098b29 drm_property_replace_global_blob drivers/gpu/drm/drm EXPORT_SYMBOL +0xfe5abf52 cpufreq_cpu_put vmlinux EXPORT_SYMBOL_GPL +0x3cff1916 cpufreq_cpu_get vmlinux EXPORT_SYMBOL_GPL +0xfa4461c4 power_supply_external_power_changed vmlinux EXPORT_SYMBOL_GPL +0x1ce444dd mpt_halt_firmware vmlinux EXPORT_SYMBOL +0x14d27f32 scsi_target_block vmlinux EXPORT_SYMBOL_GPL +0x058bcb79 dw_dma_probe vmlinux EXPORT_SYMBOL_GPL +0xdb0a2cae free_buffer_head vmlinux EXPORT_SYMBOL +0xcc40cb4b fget vmlinux EXPORT_SYMBOL +0xfe476039 ktime_get_resolution_ns vmlinux EXPORT_SYMBOL_GPL +0x139cee21 wait_for_completion_io_timeout vmlinux EXPORT_SYMBOL +0x379588c3 kthread_associate_blkcg vmlinux EXPORT_SYMBOL +0x878469bd ZSTD_decompressStream lib/zstd/zstd_decompress EXPORT_SYMBOL +0x6b958320 ib_ud_ip4_csum drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa532656b iscsi_complete_pdu vmlinux EXPORT_SYMBOL_GPL +0x6a891834 fc_queuecommand vmlinux EXPORT_SYMBOL +0xb95dcb11 firmware_request_nowarn vmlinux EXPORT_SYMBOL_GPL +0xfa1eb910 unregister_syscore_ops vmlinux EXPORT_SYMBOL_GPL +0xbc3a9d32 screen_pos vmlinux EXPORT_SYMBOL_GPL +0x2f64415f unregister_acpi_hed_notifier vmlinux EXPORT_SYMBOL_GPL +0x0061f580 pci_reset_bus vmlinux EXPORT_SYMBOL_GPL +0xefebbd40 ioread64be_lo_hi vmlinux EXPORT_SYMBOL +0xaae35068 jbd2_log_start_commit vmlinux EXPORT_SYMBOL +0x72ff440c remove_proc_subtree vmlinux EXPORT_SYMBOL +0xce6db656 rcu_is_watching vmlinux EXPORT_SYMBOL_GPL +0xc3d5904f ieee802154_stop_queue net/mac802154/mac802154 EXPORT_SYMBOL +0x782c6e61 drm_atomic_helper_check_modeset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x9bc43242 rtc_add_groups vmlinux EXPORT_SYMBOL +0xb0ddd5a1 fc_rport_flush_queue vmlinux EXPORT_SYMBOL +0x29e1e204 hdmi_audio_infoframe_pack vmlinux EXPORT_SYMBOL +0x1ee8d6d4 refcount_inc_checked vmlinux EXPORT_SYMBOL +0x44e9a829 match_token vmlinux EXPORT_SYMBOL +0x53856016 jbd2_journal_abort vmlinux EXPORT_SYMBOL +0x42214614 __ftrace_vbprintk vmlinux EXPORT_SYMBOL_GPL +0x57bc19d2 down_write vmlinux EXPORT_SYMBOL +0x318d6fec mutex_is_locked vmlinux EXPORT_SYMBOL +0x47c701d6 drm_prime_pages_to_sg drivers/gpu/drm/drm EXPORT_SYMBOL +0x4da71255 compat_ip_getsockopt vmlinux EXPORT_SYMBOL +0x3265f050 ip_route_output_flow vmlinux EXPORT_SYMBOL_GPL +0x97a6a4d4 compat_nf_getsockopt vmlinux EXPORT_SYMBOL +0xb0670c2a compat_mc_getsockopt vmlinux EXPORT_SYMBOL +0xea3c8e4e scsilun_to_int vmlinux EXPORT_SYMBOL +0xe66a253d key_type_logon vmlinux EXPORT_SYMBOL_GPL +0xd870ab98 proc_create_seq_private vmlinux EXPORT_SYMBOL +0xcb9260fe posix_test_lock vmlinux EXPORT_SYMBOL +0xd1c112fb kill_block_super vmlinux EXPORT_SYMBOL +0x32141dbc flush_signals vmlinux EXPORT_SYMBOL +0x519a4b68 nf_ct_remove_expectations net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x967ca382 usb_stor_disconnect drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x96f6df7a cxgbi_ppm_make_ppod_hdr drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0x056c6cf8 __inet_stream_connect vmlinux EXPORT_SYMBOL +0xc3e37e49 neigh_lookup_nodev vmlinux EXPORT_SYMBOL +0xc39d1813 kernel_setsockopt vmlinux EXPORT_SYMBOL +0x8cd90413 kernel_getsockopt vmlinux EXPORT_SYMBOL +0x0c470660 scsi_change_queue_depth vmlinux EXPORT_SYMBOL +0x8d96f157 inode_dax vmlinux EXPORT_SYMBOL_GPL +0x0a770832 register_memory_notifier vmlinux EXPORT_SYMBOL +0x15b4a8e8 fwnode_get_mac_address vmlinux EXPORT_SYMBOL +0xdcb547e1 phy_pm_runtime_allow vmlinux EXPORT_SYMBOL_GPL +0x3224b2a9 mpi_read_raw_from_sgl vmlinux EXPORT_SYMBOL_GPL +0xbd6841d4 crc16 vmlinux EXPORT_SYMBOL +0xc7e8dfb2 dquot_operations vmlinux EXPORT_SYMBOL +0xe60e35c4 padata_start vmlinux EXPORT_SYMBOL +0x0f027e84 kallsyms_on_each_symbol vmlinux EXPORT_SYMBOL_GPL +0x970776d4 sched_show_task vmlinux EXPORT_SYMBOL_GPL +0xf82f3657 work_on_cpu vmlinux EXPORT_SYMBOL_GPL +0x1fe0bbd5 nfnetlink_unicast net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x7af43f70 vhost_log_write drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x303e9d78 drm_primary_helper_destroy drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xe950423e devm_nvmem_cell_put vmlinux EXPORT_SYMBOL +0x5abfe27b mpt_raid_phys_disk_pg1 vmlinux EXPORT_SYMBOL +0x2837ac03 nvme_start_ctrl vmlinux EXPORT_SYMBOL_GPL +0x1a62f48d dev_pm_qos_hide_latency_limit vmlinux EXPORT_SYMBOL_GPL +0xaf7179dc iommu_iova_to_phys vmlinux EXPORT_SYMBOL_GPL +0x6b09cfd3 rhltable_init vmlinux EXPORT_SYMBOL_GPL +0x50a90e8d bsearch vmlinux EXPORT_SYMBOL +0x26f4d92a blk_put_request vmlinux EXPORT_SYMBOL +0x6ca4bf88 async_synchronize_full_domain vmlinux EXPORT_SYMBOL_GPL +0xce50e5de ZSTD_compress_usingCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0x86b0ec50 ib_set_device_ops drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0efbca4c btracker_promotion_already_present drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xfa32bfb4 cdrom_mode_select drivers/cdrom/cdrom EXPORT_SYMBOL +0xb2f646a9 drm_syncobj_create drivers/gpu/drm/drm EXPORT_SYMBOL +0x7f4739c0 sock_zerocopy_put_abort vmlinux EXPORT_SYMBOL_GPL +0xf0533bb0 input_get_timestamp vmlinux EXPORT_SYMBOL +0x0b9ff31e iscsi_tcp_task_xmit vmlinux EXPORT_SYMBOL_GPL +0x818be2a8 crypto_create_tfm vmlinux EXPORT_SYMBOL_GPL +0x572b628d virtio_transport_destruct net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xff925d22 nf_conntrack_in net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x40120d25 ib_sa_join_multicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3683d1bf edac_pci_free_ctl_info drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x716f208c drm_scdc_set_scrambling drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xef3d4ea6 metadata_dst_alloc_percpu vmlinux EXPORT_SYMBOL_GPL +0x718dc64d sock_queue_rcv_skb vmlinux EXPORT_SYMBOL +0xd688716b dm_kcopyd_client_create vmlinux EXPORT_SYMBOL +0x366ac4a3 rc_map_register vmlinux EXPORT_SYMBOL_GPL +0xffc3c987 iscsi_tcp_conn_setup vmlinux EXPORT_SYMBOL_GPL +0x46c0be5b ata_port_schedule_eh vmlinux EXPORT_SYMBOL_GPL +0x1076eab4 nvme_start_queues vmlinux EXPORT_SYMBOL_GPL +0x5009c71d glob_match vmlinux EXPORT_SYMBOL +0x567e8503 rhashtable_init vmlinux EXPORT_SYMBOL_GPL +0x7ad65f6b blk_limits_io_min vmlinux EXPORT_SYMBOL +0x10261ba1 sysfs_remove_files vmlinux EXPORT_SYMBOL_GPL +0x9398d9e5 percpu_up_write vmlinux EXPORT_SYMBOL_GPL +0xc28c984d osd_req_op_cls_init net/ceph/libceph EXPORT_SYMBOL +0x5c2b2422 rpc_setbufsize net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0a621056 ib_umem_odp_alloc_implicit drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x75d6b7c2 __transport_register_session drivers/target/target_core_mod EXPORT_SYMBOL +0xd908ab63 cxgb4_get_srq_entry drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xad0f2b6c unix_table_lock vmlinux EXPORT_SYMBOL_GPL +0x2cfababf rc_unregister_device vmlinux EXPORT_SYMBOL_GPL +0x09ce86a6 serio_interrupt vmlinux EXPORT_SYMBOL +0xc5a8616a scsi_internal_device_unblock_nowait vmlinux EXPORT_SYMBOL_GPL +0x863a276a color_table vmlinux EXPORT_SYMBOL +0x2671f917 devm_clk_hw_register vmlinux EXPORT_SYMBOL_GPL +0x3ada9e06 acpi_check_region vmlinux EXPORT_SYMBOL +0x14c9b6ea put_disk vmlinux EXPORT_SYMBOL +0x9bf421fd add_timer vmlinux EXPORT_SYMBOL +0x9d60b410 __ovs_vport_ops_register net/openvswitch/openvswitch EXPORT_SYMBOL_GPL +0xb86b7575 nf_nat_helper_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x7380dffa argv_split vmlinux EXPORT_SYMBOL +0x5e221710 __ndisc_fill_addr_option vmlinux EXPORT_SYMBOL_GPL +0x0516df4a lwtunnel_get_encap_size vmlinux EXPORT_SYMBOL_GPL +0x058d6178 hid_allocate_device vmlinux EXPORT_SYMBOL_GPL +0x4df802a6 md_set_array_sectors vmlinux EXPORT_SYMBOL +0xc7061ef3 iova_cache_put vmlinux EXPORT_SYMBOL_GPL +0x713dba6a dma_sync_wait vmlinux EXPORT_SYMBOL +0x8b5b25c8 irq_cpu_rmap_add vmlinux EXPORT_SYMBOL +0x34f3484e security_tun_dev_attach_queue vmlinux EXPORT_SYMBOL +0x3dea83a8 iomap_bmap vmlinux EXPORT_SYMBOL_GPL +0xcc799d97 irq_chip_mask_ack_parent vmlinux EXPORT_SYMBOL_GPL +0xe0a097c1 xprt_reconnect_delay net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x361ea36d vhost_work_flush drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xa65e223b iscsit_logout_post_handler drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x09cc81fa dm_btree_cursor_skip drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x6dbf37ad cxgb4_l2t_release drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x87ffcc1a drm_add_modes_noedid drivers/gpu/drm/drm EXPORT_SYMBOL +0x96eab78b iosf_mbi_modify arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x33a1e066 udp_abort vmlinux EXPORT_SYMBOL_GPL +0xa7904be1 __gnet_stats_copy_basic vmlinux EXPORT_SYMBOL +0x58f0ffa1 sock_kzfree_s vmlinux EXPORT_SYMBOL +0xc289e46d cpufreq_generic_frequency_table_verify vmlinux EXPORT_SYMBOL_GPL +0x66483bd5 usb_alloc_dev vmlinux EXPORT_SYMBOL_GPL +0x7a69576b ahci_handle_port_intr vmlinux EXPORT_SYMBOL_GPL +0xb737b185 gen_pool_best_fit vmlinux EXPORT_SYMBOL +0xb352177e find_first_bit vmlinux EXPORT_SYMBOL +0x622dc816 blk_queue_max_hw_sectors vmlinux EXPORT_SYMBOL +0x18c1d032 handle_bad_irq vmlinux EXPORT_SYMBOL_GPL +0xa6aa9857 des_decrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0x31a89d59 rpc_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x452ee487 svc_xprt_enqueue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5f884777 drm_warn_on_modeset_not_all_locked drivers/gpu/drm/drm EXPORT_SYMBOL +0x48c293a9 devm_kvasprintf vmlinux EXPORT_SYMBOL +0x1188b26b bus_create_file vmlinux EXPORT_SYMBOL_GPL +0xd4170950 bus_set_iommu vmlinux EXPORT_SYMBOL_GPL +0x0926ac6e crypto_aes_set_key vmlinux EXPORT_SYMBOL_GPL +0x3bdb5d28 alg_test vmlinux EXPORT_SYMBOL_GPL +0xa4bef4cf jbd2__journal_restart vmlinux EXPORT_SYMBOL +0x7ce18c9f from_kqid vmlinux EXPORT_SYMBOL +0xbef43296 console_conditional_schedule vmlinux EXPORT_SYMBOL +0x29eba37f current_is_async vmlinux EXPORT_SYMBOL_GPL +0x0d157843 svc_destroy net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0c7b743d nfs_fattr_init fs/nfs/nfs EXPORT_SYMBOL_GPL +0x0e8ec6ad __fscache_update_cookie fs/fscache/fscache EXPORT_SYMBOL +0x3d021be6 kvm_lapic_switch_to_hv_timer arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x75720a23 qdisc_offload_graft_helper vmlinux EXPORT_SYMBOL +0x1565c213 thermal_remove_hwmon_sysfs vmlinux EXPORT_SYMBOL_GPL +0xec2bda46 ata_scsi_cmd_error_handler vmlinux EXPORT_SYMBOL +0x2c038092 blkdev_issue_discard vmlinux EXPORT_SYMBOL +0x9ba300c2 af_alg_accept vmlinux EXPORT_SYMBOL_GPL +0xab415d05 crypto_unregister_scomp vmlinux EXPORT_SYMBOL_GPL +0x3f4349c0 crypto_unregister_acomp vmlinux EXPORT_SYMBOL_GPL +0x593de99c debugfs_create_symlink vmlinux EXPORT_SYMBOL_GPL +0x45bf60c1 fat_detach vmlinux EXPORT_SYMBOL_GPL +0x93d61b49 ring_buffer_peek vmlinux EXPORT_SYMBOL_GPL +0x69a83b19 handle_untracked_irq vmlinux EXPORT_SYMBOL_GPL +0x63057f4b ib_copy_path_rec_to_user drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x10ee11f0 drm_ioctl drivers/gpu/drm/drm EXPORT_SYMBOL +0xf034f237 vlan_for_each vmlinux EXPORT_SYMBOL +0xb9c7efff __ip_options_compile vmlinux EXPORT_SYMBOL +0x62561ea7 xt_request_find_match vmlinux EXPORT_SYMBOL_GPL +0xd98332d3 fib_notifier_ops_unregister vmlinux EXPORT_SYMBOL +0xb924dd9e iscsi_put_task vmlinux EXPORT_SYMBOL_GPL +0x1908d7f1 dev_pm_domain_set vmlinux EXPORT_SYMBOL_GPL +0x71637414 device_match_devt vmlinux EXPORT_SYMBOL_GPL +0x2e439142 drm_get_panel_orientation_quirk vmlinux EXPORT_SYMBOL +0x6228c21f smp_call_function_single vmlinux EXPORT_SYMBOL +0xc0bca0f1 ZSTD_nextSrcSizeToDecompress lib/zstd/zstd_decompress EXPORT_SYMBOL +0xe44bf59a mlx5_cmd_init_async_ctx drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xdbb72cb5 bcm_phy_get_sset_count drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x5686ddc3 bcm_phy_enable_apd drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0xf7403032 drm_atomic_bridge_disable drivers/gpu/drm/drm EXPORT_SYMBOL +0x983cfedf drm_mode_duplicate drivers/gpu/drm/drm EXPORT_SYMBOL +0x4d9b652b rb_erase vmlinux EXPORT_SYMBOL +0xbded9260 cookie_ecn_ok vmlinux EXPORT_SYMBOL +0xf1d899bf devm_phy_get vmlinux EXPORT_SYMBOL_GPL +0xbc0ab8a0 fsnotify_alloc_group vmlinux EXPORT_SYMBOL_GPL +0x56dbf27e __cpuhp_state_add_instance vmlinux EXPORT_SYMBOL_GPL +0x3f9a2b0b rpcauth_get_gssinfo net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd37207bd vhost_dev_reset_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xcadef538 drm_vma_node_is_allowed drivers/gpu/drm/drm EXPORT_SYMBOL +0x740dd9d2 drm_legacy_pci_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x7b372f42 fuse_dequeue_forget fs/fuse/fuse EXPORT_SYMBOL +0xcac9675a __tracepoint_nfs4_pnfs_read fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xaafdc258 strcasecmp vmlinux EXPORT_SYMBOL +0x63b49aea __netdev_watchdog_up vmlinux EXPORT_SYMBOL_GPL +0xfa355613 hid_quirks_init vmlinux EXPORT_SYMBOL_GPL +0x376f2dd5 ata_sff_busy_sleep vmlinux EXPORT_SYMBOL_GPL +0x058b582a vt_get_leds vmlinux EXPORT_SYMBOL_GPL +0x631c2496 blk_mq_start_hw_queue vmlinux EXPORT_SYMBOL +0x04569255 security_cred_getsecid vmlinux EXPORT_SYMBOL +0xe332b076 unregister_kretprobe vmlinux EXPORT_SYMBOL_GPL +0xaaa8ab11 clockevents_register_device vmlinux EXPORT_SYMBOL_GPL +0xcb5d2f0c cache_check net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe27c1fac nf_conntrack_set_hashsize net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x5b17be06 cast_s4 crypto/cast_common EXPORT_SYMBOL_GPL +0x56bf1a99 tcf_idr_search vmlinux EXPORT_SYMBOL +0x83906c02 fib_rules_seq_read vmlinux EXPORT_SYMBOL_GPL +0xa0f493d9 efi vmlinux EXPORT_SYMBOL +0x0d1dfd4f phy_modify_paged_changed vmlinux EXPORT_SYMBOL +0xa4142f85 phy_speed_down vmlinux EXPORT_SYMBOL_GPL +0xcf2975cf nvme_stop_ctrl vmlinux EXPORT_SYMBOL_GPL +0x5e9add59 tty_unthrottle vmlinux EXPORT_SYMBOL +0xea161f3c security_socket_getpeersec_dgram vmlinux EXPORT_SYMBOL +0xeed24675 set_blocksize vmlinux EXPORT_SYMBOL +0x1f91c342 block_page_mkwrite vmlinux EXPORT_SYMBOL +0xb04d1f7b perf_event_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xcc2dbfd8 irq_domain_check_msi_remap vmlinux EXPORT_SYMBOL_GPL +0x638a9653 memory_add_physaddr_to_nid vmlinux EXPORT_SYMBOL_GPL +0x65cf8831 ZSTD_decompress_usingDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x33a09276 ib_umem_odp_map_dma_pages drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xbe8c420c ib_mr_pool_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5c227e85 dst_release_immediate vmlinux EXPORT_SYMBOL +0xa6fc45a6 md_done_sync vmlinux EXPORT_SYMBOL +0x6bddaf1a class_compat_remove_link vmlinux EXPORT_SYMBOL_GPL +0x1c8b2203 translation_pre_enabled vmlinux EXPORT_SYMBOL +0xcf8c3a56 uart_unregister_driver vmlinux EXPORT_SYMBOL +0x0acf7679 dma_issue_pending_all vmlinux EXPORT_SYMBOL +0x83a52f7d devm_pinctrl_unregister vmlinux EXPORT_SYMBOL_GPL +0x76d5322c bsg_job_get vmlinux EXPORT_SYMBOL_GPL +0x970d64e1 bsg_job_put vmlinux EXPORT_SYMBOL_GPL +0xcee2df05 crypto_spawn_tfm vmlinux EXPORT_SYMBOL_GPL +0xc63714eb security_inode_notifysecctx vmlinux EXPORT_SYMBOL +0xd3630444 generic_error_remove_page vmlinux EXPORT_SYMBOL +0x26d6d19b mdev_bus_type drivers/vfio/mdev/mdev EXPORT_SYMBOL_GPL +0x7d7a69ae cxgb4_update_root_dev_clip drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x8ff4521e altera_init drivers/misc/altera-stapl/altera-stapl EXPORT_SYMBOL +0x7a8c6808 ipv6_chk_prefix vmlinux EXPORT_SYMBOL +0x3e010f4a ip6_dst_lookup_flow vmlinux EXPORT_SYMBOL_GPL +0x6b872316 inet_unhash vmlinux EXPORT_SYMBOL_GPL +0x86a98824 ip_fraglist_prepare vmlinux EXPORT_SYMBOL +0x146ce590 dev_nit_active vmlinux EXPORT_SYMBOL_GPL +0xd4835ef8 dmi_check_system vmlinux EXPORT_SYMBOL +0xfe35519b mdio_bus_type vmlinux EXPORT_SYMBOL +0x774e467a blk_execute_rq vmlinux EXPORT_SYMBOL +0x449ac6d6 crypto_register_scomp vmlinux EXPORT_SYMBOL_GPL +0x25e9d4bd resource_list_free vmlinux EXPORT_SYMBOL +0xa94b1ec1 virtio_transport_deliver_tap_pkt net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xfeddb604 rpc_call_sync net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x211e3747 ip_tunnel_init net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x418607d7 dm_cell_visit_release drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x1881768e i2c_acpi_find_adapter_by_handle drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0xa535d12b drm_hdmi_avi_infoframe_quant_range drivers/gpu/drm/drm EXPORT_SYMBOL +0xca9beaa4 __xa_store vmlinux EXPORT_SYMBOL +0x59588850 vsscanf vmlinux EXPORT_SYMBOL +0x500ab572 ip6_xmit vmlinux EXPORT_SYMBOL +0x10af76ce compat_ip_setsockopt vmlinux EXPORT_SYMBOL +0xdb87697d compat_nf_setsockopt vmlinux EXPORT_SYMBOL +0x46154293 tcf_exts_dump vmlinux EXPORT_SYMBOL +0x945f92bf compat_mc_setsockopt vmlinux EXPORT_SYMBOL +0xbad5b8c5 ata_sff_dev_select vmlinux EXPORT_SYMBOL_GPL +0xb5ea26fd ata_noop_qc_prep vmlinux EXPORT_SYMBOL_GPL +0x80f75a68 dev_pm_qos_expose_flags vmlinux EXPORT_SYMBOL_GPL +0xded6a415 acpi_get_object_info vmlinux EXPORT_SYMBOL +0x30b8d647 jbd2_journal_unlock_updates vmlinux EXPORT_SYMBOL +0x82d79b51 sysctl_vfs_cache_pressure vmlinux EXPORT_SYMBOL_GPL +0xf1e046cc panic vmlinux EXPORT_SYMBOL +0x55f95e07 ioremap_prot vmlinux EXPORT_SYMBOL +0xd7dd777b reserve_perfctr_nmi vmlinux EXPORT_SYMBOL +0xa6071a51 o2nm_get_node_by_ip fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x69f4ff25 __camellia_enc_blk_2way arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0x5d8c3fa5 udp6_lib_lookup_skb vmlinux EXPORT_SYMBOL_GPL +0xbef333f4 udp4_lib_lookup_skb vmlinux EXPORT_SYMBOL_GPL +0x1efeb08c fwnode_property_match_string vmlinux EXPORT_SYMBOL_GPL +0xb320cc0e sg_init_one vmlinux EXPORT_SYMBOL +0xeb233a45 __kmalloc vmlinux EXPORT_SYMBOL +0x7812c047 __vmalloc vmlinux EXPORT_SYMBOL +0x074a30dd xdr_truncate_encode net/sunrpc/sunrpc EXPORT_SYMBOL +0x266aeb71 drm_property_replace_blob drivers/gpu/drm/drm EXPORT_SYMBOL +0xc7888f02 __fscache_relinquish_cookie fs/fscache/fscache EXPORT_SYMBOL +0xf643d104 hsiphash_4u32 vmlinux EXPORT_SYMBOL +0x9e0fa5ae hsiphash_3u32 vmlinux EXPORT_SYMBOL +0x30acfde9 hsiphash_2u32 vmlinux EXPORT_SYMBOL +0x6481ffe0 hsiphash_1u32 vmlinux EXPORT_SYMBOL +0x24599f22 kobject_init_and_add vmlinux EXPORT_SYMBOL_GPL +0x6ccaba0d tcp_set_keepalive vmlinux EXPORT_SYMBOL_GPL +0x6c3fa288 ohci_suspend vmlinux EXPORT_SYMBOL_GPL +0x14a04b29 ata_sas_port_start vmlinux EXPORT_SYMBOL_GPL +0xce76c257 acpi_get_irq_routing_table vmlinux EXPORT_SYMBOL +0x56a15191 devm_pci_alloc_host_bridge vmlinux EXPORT_SYMBOL +0x138b079f rpc_wake_up_next net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5cb0a24a __tracepoint_bcache_bypass_congested drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x3755f990 gf128mul_init_64k_bbe crypto/gf128mul EXPORT_SYMBOL +0x8eaef9be __ip_select_ident vmlinux EXPORT_SYMBOL +0xaf609d39 skb_gro_receive vmlinux EXPORT_SYMBOL_GPL +0x4ded36d5 hook_info_flag vmlinux EXPORT_SYMBOL +0xc799d229 ata_sg_init vmlinux EXPORT_SYMBOL_GPL +0xb11d9000 tty_dev_name_to_number vmlinux EXPORT_SYMBOL_GPL +0xf16c1745 pci_find_ht_capability vmlinux EXPORT_SYMBOL_GPL +0xfcb5b9fc pci_find_next_ht_capability vmlinux EXPORT_SYMBOL_GPL +0xcdb359cc padata_alloc_shell vmlinux EXPORT_SYMBOL +0xef18ce65 osd_req_op_extent_osd_data_pages net/ceph/libceph EXPORT_SYMBOL +0x449ad0a7 memcmp vmlinux EXPORT_SYMBOL +0x1d40b6f3 idr_for_each vmlinux EXPORT_SYMBOL +0x76fb9ce3 xt_find_match vmlinux EXPORT_SYMBOL +0x744b50c3 nf_nat_hook vmlinux EXPORT_SYMBOL_GPL +0xe7f1abe4 __skb_warn_lro_forwarding vmlinux EXPORT_SYMBOL +0x96c17136 fb_var_to_videomode vmlinux EXPORT_SYMBOL +0x79562b15 devm_of_phy_get vmlinux EXPORT_SYMBOL_GPL +0x2c65619a bio_add_pc_page vmlinux EXPORT_SYMBOL +0xf0b53c6e query_asymmetric_key vmlinux EXPORT_SYMBOL_GPL +0xc6d83bc3 shash_ahash_digest vmlinux EXPORT_SYMBOL_GPL +0x26c10030 locks_init_lock vmlinux EXPORT_SYMBOL +0xf47dfac5 split_page vmlinux EXPORT_SYMBOL_GPL +0xff9c4b56 ZSTD_compressBound lib/zstd/zstd_compress EXPORT_SYMBOL +0xd4de55af ceph_msg_data_add_bvecs net/ceph/libceph EXPORT_SYMBOL +0xfa7458b0 nf_ct_helper_expectfn_find_by_name net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x95258207 vfio_device_data drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x53c6bc72 nfs4_label_alloc fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1e6ec17e kvm_task_switch arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7ba578c4 kvm_set_cr4 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe7a02573 ida_alloc_range vmlinux EXPORT_SYMBOL +0xccaf9942 inet6_del_offload vmlinux EXPORT_SYMBOL +0xda89ea3b ip_idents_reserve vmlinux EXPORT_SYMBOL +0x074d42e5 netif_stacked_transfer_operstate vmlinux EXPORT_SYMBOL +0x28524b5d serio_reconnect vmlinux EXPORT_SYMBOL +0x3a814860 genphy_read_mmd_unsupported vmlinux EXPORT_SYMBOL +0x9e0ad319 fc_slave_alloc vmlinux EXPORT_SYMBOL +0x8df9dd10 guid_null vmlinux EXPORT_SYMBOL +0xb5d8bbb5 crypto_type_has_alg vmlinux EXPORT_SYMBOL_GPL +0x0c270e25 key_set_timeout vmlinux EXPORT_SYMBOL_GPL +0x79872483 d_alloc_name vmlinux EXPORT_SYMBOL +0xbaba8099 dmam_alloc_attrs vmlinux EXPORT_SYMBOL +0xe25ee9d3 _raw_write_lock_irqsave vmlinux EXPORT_SYMBOL +0x37f60587 bcm_phy_read_misc drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x59c2ebd4 drm_encoder_cleanup drivers/gpu/drm/drm EXPORT_SYMBOL +0x72c4c944 power_supply_property_is_writeable vmlinux EXPORT_SYMBOL_GPL +0xfe0e7cd3 apei_exec_post_unmap_gars vmlinux EXPORT_SYMBOL_GPL +0x05fa2257 pinctrl_dev_get_drvdata vmlinux EXPORT_SYMBOL_GPL +0x5635fdba devm_ioport_unmap vmlinux EXPORT_SYMBOL +0x8d55bb8a qid_eq vmlinux EXPORT_SYMBOL +0x7a92382b cont_write_begin vmlinux EXPORT_SYMBOL +0x08d3bf02 trace_vprintk vmlinux EXPORT_SYMBOL_GPL +0x98e508ef ignore_console_lock_warning vmlinux EXPORT_SYMBOL +0x52ecbc75 crc_ccitt lib/crc-ccitt EXPORT_SYMBOL +0x0208440b udp_tunnel_xmit_skb net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0xc7650e0c vfio_virqfd_enable drivers/vfio/vfio_virqfd EXPORT_SYMBOL_GPL +0x34d45c77 dm_btree_cursor_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x87258bf6 drm_crtc_send_vblank_event drivers/gpu/drm/drm EXPORT_SYMBOL +0xd262dfcb vscnprintf vmlinux EXPORT_SYMBOL +0x1f75c78e pktgen_xfrm_outer_mode_output vmlinux EXPORT_SYMBOL_GPL +0x80c68137 nf_log_buf_close vmlinux EXPORT_SYMBOL_GPL +0xa2f812f9 phy_10gbit_fec_features_array vmlinux EXPORT_SYMBOL_GPL +0xc48b7ccf ata_mode_string vmlinux EXPORT_SYMBOL_GPL +0x53dd51c6 devm_nvdimm_memremap vmlinux EXPORT_SYMBOL_GPL +0xcdd97d22 dmaenginem_async_device_register vmlinux EXPORT_SYMBOL +0xd24bf2dd pci_d3cold_disable vmlinux EXPORT_SYMBOL_GPL +0x581f98da zlib_inflate vmlinux EXPORT_SYMBOL +0x99edc3dc simple_statfs vmlinux EXPORT_SYMBOL +0xaaf70afb kmem_cache_alloc_node vmlinux EXPORT_SYMBOL +0x365e7911 kstrdup_const vmlinux EXPORT_SYMBOL +0x39576809 param_set_long vmlinux EXPORT_SYMBOL +0x6988d0ca cpu_dr7 vmlinux EXPORT_SYMBOL +0x18a71018 xfrm6_tunnel_deregister net/ipv6/tunnel6 EXPORT_SYMBOL +0x73b6bff9 xfrm4_tunnel_deregister net/ipv4/tunnel4 EXPORT_SYMBOL +0xcedfc878 dm_bitset_cursor_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x05cf0f13 bch_btree_iter_init drivers/md/bcache/bcache EXPORT_SYMBOL +0xaaa088f9 vxlan_fdb_find_uc drivers/net/vxlan EXPORT_SYMBOL_GPL +0x6154269c drm_fb_swab16 drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x88eba8be km_policy_notify vmlinux EXPORT_SYMBOL +0x88ed695a ip_mc_join_group vmlinux EXPORT_SYMBOL +0x7868e5dd usb_string vmlinux EXPORT_SYMBOL_GPL +0xa6d5fa0a usb_hub_release_port vmlinux EXPORT_SYMBOL_GPL +0xd2b10a05 ata_timing_find_mode vmlinux EXPORT_SYMBOL_GPL +0xdb760f52 __kfifo_free vmlinux EXPORT_SYMBOL +0xe4019e89 configfs_undepend_item vmlinux EXPORT_SYMBOL +0xa263892b fscrypt_fname_free_buffer vmlinux EXPORT_SYMBOL +0x7c643500 nf_ct_unexpect_related net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xbcb85f62 vmci_doorbell_notify drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x150e2ce7 drm_bridge_add drivers/gpu/drm/drm EXPORT_SYMBOL +0x1219e90c inet_csk_reqsk_queue_hash_add vmlinux EXPORT_SYMBOL_GPL +0xc5eab18e flow_indr_add_block_cb vmlinux EXPORT_SYMBOL_GPL +0x38620d7c qlt_xmit_response vmlinux EXPORT_SYMBOL +0xcae6e3c3 agp_generic_free_gatt_table vmlinux EXPORT_SYMBOL +0xf441ac43 ioread8_rep vmlinux EXPORT_SYMBOL +0xb0721290 debugfs_remove vmlinux EXPORT_SYMBOL_GPL +0x7f5d72f5 lease_modify vmlinux EXPORT_SYMBOL +0xb216f06f drop_nlink vmlinux EXPORT_SYMBOL +0xa466eec3 vmf_insert_mixed_mkwrite vmlinux EXPORT_SYMBOL +0x57eb7127 kthread_cancel_delayed_work_sync vmlinux EXPORT_SYMBOL_GPL +0xfdf70093 ZSTD_CStreamOutSize lib/zstd/zstd_compress EXPORT_SYMBOL +0x5707a0c3 be_roce_unregister_driver drivers/net/ethernet/emulex/benet/be2net EXPORT_SYMBOL +0x86f27420 iosf_mbi_block_punit_i2c_access arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x0a0ebc08 __xa_cmpxchg vmlinux EXPORT_SYMBOL +0xf4e758ea xfrm_dst_ifdown vmlinux EXPORT_SYMBOL +0xa5691332 neigh_seq_start vmlinux EXPORT_SYMBOL +0x6e7943ec iommu_group_id vmlinux EXPORT_SYMBOL_GPL +0x9fbe6f26 pci_assign_unassigned_bridge_resources vmlinux EXPORT_SYMBOL_GPL +0x3f0546a8 ioread32_rep vmlinux EXPORT_SYMBOL +0x5e486210 virtio_transport_stream_is_active net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x8a161233 osd_req_op_alloc_hint_init net/ceph/libceph EXPORT_SYMBOL +0x3cf24646 rpc_put_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x64578a02 kvm_write_guest_virt_system arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xdd64e639 strscpy vmlinux EXPORT_SYMBOL +0x5792f848 strlcpy vmlinux EXPORT_SYMBOL +0x9166fada strncpy vmlinux EXPORT_SYMBOL +0x2364d7b0 eth_header_cache_update vmlinux EXPORT_SYMBOL +0x18ff8b54 usb_get_dr_mode vmlinux EXPORT_SYMBOL_GPL +0x4eac4c61 iscsi_tcp_conn_teardown vmlinux EXPORT_SYMBOL_GPL +0x8db2c04f software_node_register_nodes vmlinux EXPORT_SYMBOL_GPL +0x07906da6 virtqueue_disable_cb vmlinux EXPORT_SYMBOL_GPL +0xe2b5e146 refcount_inc_not_zero_checked vmlinux EXPORT_SYMBOL +0x905695ab sg_copy_from_buffer vmlinux EXPORT_SYMBOL +0x8abacc47 get_max_files vmlinux EXPORT_SYMBOL_GPL +0x1a45cb6c acpi_disabled vmlinux EXPORT_SYMBOL +0x1dcc390d ceph_osdc_maybe_request_map net/ceph/libceph EXPORT_SYMBOL +0x7d97407a nf_reject_ip_tcphdr_put net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x787ccc4c ib_get_mad_data_offset drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xff05e262 __tracepoint_mlx5_fs_del_rule drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x43adc6f2 mlx5_frag_buf_free drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc7f91c7f drm_mode_get_tile_group drivers/gpu/drm/drm EXPORT_SYMBOL +0x69d299ec __tracepoint_kvm_page_fault arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xed7c7b91 raw_v6_hashinfo vmlinux EXPORT_SYMBOL_GPL +0x688ab5e9 tcf_em_tree_destroy vmlinux EXPORT_SYMBOL +0x515083bf acpi_release_mutex vmlinux EXPORT_SYMBOL +0xf195c682 fb_invert_cmaps vmlinux EXPORT_SYMBOL +0x4562a134 __tracepoint_kmem_cache_free vmlinux EXPORT_SYMBOL +0x5fdd73be vsock_remove_bound net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x29b8c61a ib_create_srq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x749556a2 mlxsw_afk_key_info_subset drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x37a1a1c2 tcp_register_ulp vmlinux EXPORT_SYMBOL_GPL +0xe2126b10 driver_create_file vmlinux EXPORT_SYMBOL_GPL +0xbc263260 serial8250_do_startup vmlinux EXPORT_SYMBOL_GPL +0x4d0015e2 cpu_hotplug_disable vmlinux EXPORT_SYMBOL_GPL +0x25ea8d67 ceph_msg_put net/ceph/libceph EXPORT_SYMBOL +0xf86b9027 drm_writeback_cleanup_job drivers/gpu/drm/drm EXPORT_SYMBOL +0x2767eae4 drm_plane_force_disable drivers/gpu/drm/drm EXPORT_SYMBOL +0x3a3cc43a devlink_port_param_driverinit_value_set vmlinux EXPORT_SYMBOL_GPL +0xdb35aa6c devlink_port_param_driverinit_value_get vmlinux EXPORT_SYMBOL_GPL +0x16b4f839 skb_get_hash_perturb vmlinux EXPORT_SYMBOL +0xc8d3a826 fc_frame_alloc_fill vmlinux EXPORT_SYMBOL +0x45365516 sas_drain_work vmlinux EXPORT_SYMBOL_GPL +0xee119c44 nd_device_attribute_group vmlinux EXPORT_SYMBOL_GPL +0x098b71c6 fb_dealloc_cmap vmlinux EXPORT_SYMBOL +0x3456067b blk_mq_run_hw_queue vmlinux EXPORT_SYMBOL +0xe023222e jbd2_journal_start_reserved vmlinux EXPORT_SYMBOL +0xdf5f8b35 blkdev_write_iter vmlinux EXPORT_SYMBOL_GPL +0xfe5bab7e insert_inode_locked4 vmlinux EXPORT_SYMBOL +0x41482d8b strndup_user vmlinux EXPORT_SYMBOL +0xddd58dc0 ring_buffer_reset vmlinux EXPORT_SYMBOL_GPL +0xbd3fe1e3 disable_hardirq vmlinux EXPORT_SYMBOL_GPL +0x12ca0739 set_pages_wb vmlinux EXPORT_SYMBOL +0xd7c06029 ceph_file_layout_to_legacy net/ceph/libceph EXPORT_SYMBOL +0xfd0a71cd taprio_offload_get net/sched/sch_taprio EXPORT_SYMBOL_GPL +0x54c37a00 team_modeop_port_enter drivers/net/team/team EXPORT_SYMBOL +0x49dbb296 drm_atomic_add_affected_planes drivers/gpu/drm/drm EXPORT_SYMBOL +0x81b07941 devlink_free vmlinux EXPORT_SYMBOL_GPL +0xc37f9c6e cpufreq_update_policy vmlinux EXPORT_SYMBOL +0xdebd3fbb mptscsih_remove vmlinux EXPORT_SYMBOL +0x11fcdb5e to_nd_desc vmlinux EXPORT_SYMBOL_GPL +0x720521a3 agp_bind_memory vmlinux EXPORT_SYMBOL +0xe07e5f44 acpi_reconfig_notifier_unregister vmlinux EXPORT_SYMBOL +0xb221a88e pcie_get_width_cap vmlinux EXPORT_SYMBOL +0xc6910aa0 do_trace_rdpmc vmlinux EXPORT_SYMBOL +0x676ddb50 crypto_init_spawn vmlinux EXPORT_SYMBOL_GPL +0xc1050287 simple_nosetlease vmlinux EXPORT_SYMBOL +0xc0c73f74 mount_nodev vmlinux EXPORT_SYMBOL +0x1e391e73 ip6_tnl_get_iflink net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x39341f85 nf_dup_ipv6 net/ipv6/netfilter/nf_dup_ipv6 EXPORT_SYMBOL_GPL +0xb8696e5c nf_nat_ipv6_unregister_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xf56a8946 mlx4_query_diag_counters drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xb12cb668 cxgbi_conn_tx_open drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xb053adda drm_rect_rotate drivers/gpu/drm/drm EXPORT_SYMBOL +0x2bbcfa1e nfs_clear_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb911bb58 minmax_running_max vmlinux EXPORT_SYMBOL +0x17aa6fa6 xfrm_find_acq vmlinux EXPORT_SYMBOL +0x524c8843 eth_mac_addr vmlinux EXPORT_SYMBOL +0x4cf1bff0 qlt_unreg_sess vmlinux EXPORT_SYMBOL +0x1773fbd4 scsi_scan_host vmlinux EXPORT_SYMBOL +0xe9c90c5e virtio_device_restore vmlinux EXPORT_SYMBOL_GPL +0x3d7aca23 security_inode_listsecurity vmlinux EXPORT_SYMBOL +0x3b8b8ced padata_free vmlinux EXPORT_SYMBOL +0xcf1b7d72 unregister_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x6e648da0 ceph_cls_break_lock net/ceph/libceph EXPORT_SYMBOL +0x1de558c1 nft_reject_icmpv6_code net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x8c998a26 iscsit_build_reject drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x1e66d446 iscsit_handle_snack drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x9bc00eba adf_devmgr_rm_dev drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x2417c5c4 dm_btree_empty drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x41efdeaf radix_tree_lookup_slot vmlinux EXPORT_SYMBOL +0xbf26aea7 dcb_ieee_getapp_prio_dscp_mask_map vmlinux EXPORT_SYMBOL +0xe028d75b tcp_sendpage vmlinux EXPORT_SYMBOL +0xcdc1edab ata_slave_link_init vmlinux EXPORT_SYMBOL_GPL +0xe5883bd9 class_compat_unregister vmlinux EXPORT_SYMBOL_GPL +0x01ed152e allocate_resource vmlinux EXPORT_SYMBOL +0xc067732c ceph_parse_ips net/ceph/libceph EXPORT_SYMBOL +0x6dd8e457 uio_event_notify drivers/uio/uio EXPORT_SYMBOL_GPL +0x55de47b5 alloc_mdio_bitbang drivers/net/phy/mdio-bitbang EXPORT_SYMBOL +0xc7856a3d inet6addr_notifier_call_chain vmlinux EXPORT_SYMBOL +0xbb6a3cbd devlink_fmsg_arr_pair_nest_start vmlinux EXPORT_SYMBOL_GPL +0xc935840b cpuidle_unregister vmlinux EXPORT_SYMBOL_GPL +0xc3255142 phy_10gbit_features vmlinux EXPORT_SYMBOL_GPL +0xb3428a50 pm_schedule_suspend vmlinux EXPORT_SYMBOL_GPL +0x5beb7f78 clk_bulk_get_all vmlinux EXPORT_SYMBOL +0x321eef43 pci_ats_queue_depth vmlinux EXPORT_SYMBOL_GPL +0xc962173c fscrypt_ioctl_remove_key_all_users vmlinux EXPORT_SYMBOL_GPL +0xc08647ff ring_buffer_bytes_cpu vmlinux EXPORT_SYMBOL_GPL +0xcbbf0a6f audit_log_task_context vmlinux EXPORT_SYMBOL +0xbfaf0413 __vsock_core_init net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xc66be8fb ib_drain_rq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc5260a67 ib_drain_sq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc2a36e6b drm_connector_attach_max_bpc_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xa47826e4 drm_dp_calc_pbn_mode drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x91f2cb6c backlight_device_get_by_type drivers/video/backlight/backlight EXPORT_SYMBOL +0xd7982a0a tcf_block_netif_keep_dst vmlinux EXPORT_SYMBOL +0x3f2b1d63 hid_alloc_report_buf vmlinux EXPORT_SYMBOL_GPL +0xb9d7ad3e nvme_stop_keep_alive vmlinux EXPORT_SYMBOL_GPL +0x49e9aa19 pinctrl_lookup_state vmlinux EXPORT_SYMBOL_GPL +0xf9a054b5 __round_jiffies vmlinux EXPORT_SYMBOL_GPL +0xf0320049 kthread_create_worker vmlinux EXPORT_SYMBOL +0xee9f5707 rdma_rw_ctx_wrs drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x9a34aae7 ib_process_cq_direct drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x792de9a2 target_nacl_find_deve drivers/target/target_core_mod EXPORT_SYMBOL +0xf4909bea mlxsw_core_port_type_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xefe73979 simd_skcipher_free crypto/crypto_simd EXPORT_SYMBOL_GPL +0xd28437e0 nfs_write_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x597f54c0 native_restore_fl vmlinux EXPORT_SYMBOL +0xb307c909 devlink_fmsg_u64_pair_put vmlinux EXPORT_SYMBOL_GPL +0xe1094aa4 page_pool_alloc_pages vmlinux EXPORT_SYMBOL +0x806ff464 serial8250_em485_destroy vmlinux EXPORT_SYMBOL_GPL +0xd4621bfb security_sb_clone_mnt_opts vmlinux EXPORT_SYMBOL +0x9fdde158 vfs_rmdir vmlinux EXPORT_SYMBOL +0x3c9ced00 generic_shutdown_super vmlinux EXPORT_SYMBOL +0xc9e62a19 ptrace_pre_hook vmlinux EXPORT_SYMBOL +0x18e37a9b ocfs2_stack_glue_unregister fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x4b1756c3 xt_compat_target_to_user vmlinux EXPORT_SYMBOL_GPL +0xa86ada21 netdev_master_upper_dev_link vmlinux EXPORT_SYMBOL +0xe8708277 sas_port_delete_phy vmlinux EXPORT_SYMBOL +0x5fdd53e2 sas_disable_tlr vmlinux EXPORT_SYMBOL_GPL +0xae882f40 scsi_track_queue_full vmlinux EXPORT_SYMBOL +0xbcc69b89 tpm_unseal_trusted vmlinux EXPORT_SYMBOL_GPL +0x735e6a81 acpi_evaluate_integer vmlinux EXPORT_SYMBOL +0x146289b7 crc16_table vmlinux EXPORT_SYMBOL +0x937733e3 qid_valid vmlinux EXPORT_SYMBOL +0x56ab9206 d_alloc_parallel vmlinux EXPORT_SYMBOL +0xf21e1f9b disable_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0x1cbd92b0 cpu_mitigations_off vmlinux EXPORT_SYMBOL_GPL +0xcd47fcc4 arc4_crypt lib/crypto/libarc4 EXPORT_SYMBOL +0xe7d3a608 svc_fill_symlink_pathname net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf70fab23 mlx5_cmd_cleanup_async_ctx drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x27b46767 o2hb_unregister_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x7e215b77 skb_copy_and_hash_datagram_iter vmlinux EXPORT_SYMBOL +0x61332ac0 __sock_recv_ts_and_drops vmlinux EXPORT_SYMBOL_GPL +0xcef0b976 sas_enable_tlr vmlinux EXPORT_SYMBOL_GPL +0xbf7ee6b5 device_add_properties vmlinux EXPORT_SYMBOL_GPL +0xa4332d5a jbd2__journal_start vmlinux EXPORT_SYMBOL +0xd0b8b351 noop_set_page_dirty vmlinux EXPORT_SYMBOL_GPL +0xd44b99dd vfs_copy_file_range vmlinux EXPORT_SYMBOL +0x0f09cc34 schedule_timeout_killable vmlinux EXPORT_SYMBOL +0x4c7d71e8 pm_qos_request_active vmlinux EXPORT_SYMBOL_GPL +0xc0fcb982 sev_enable_key vmlinux EXPORT_SYMBOL_GPL +0x93dc2586 pgprot_writethrough vmlinux EXPORT_SYMBOL_GPL +0xfd8454ed rpcauth_create net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3bc17c49 nf_nat_masquerade_inet_register_notifiers net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x57e31525 mlx4_mr_rereg_mem_write drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xcc4a991e mlx4_get_default_counter_index drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x95ed8ffb dev2t3cdev drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x9b8a53f5 drm_ht_just_insert_please drivers/gpu/drm/drm EXPORT_SYMBOL +0xda2a22a6 kvm_apic_update_ppr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x0139aafa xsk_clear_tx_need_wakeup vmlinux EXPORT_SYMBOL +0xcb5a258e rtm_getroute_parse_ip_proto vmlinux EXPORT_SYMBOL_GPL +0x47842b68 sock_no_getname vmlinux EXPORT_SYMBOL +0x55077d37 fcoe_validate_vport_create vmlinux EXPORT_SYMBOL_GPL +0x60f99e1b cppc_set_perf vmlinux EXPORT_SYMBOL_GPL +0x21f7623c acpi_dev_get_first_match_dev vmlinux EXPORT_SYMBOL +0xb5da1c73 pci_msix_vec_count vmlinux EXPORT_SYMBOL +0x8b0088d1 LZ4_decompress_safe_usingDict vmlinux EXPORT_SYMBOL +0x04bd05ac vfs_link vmlinux EXPORT_SYMBOL +0x94cedfec from_kuid_munged vmlinux EXPORT_SYMBOL +0x365acda7 set_normalized_timespec64 vmlinux EXPORT_SYMBOL +0x463d8290 __irq_alloc_descs vmlinux EXPORT_SYMBOL_GPL +0x8af83909 call_usermodehelper_setup vmlinux EXPORT_SYMBOL +0x61c66546 netlink_rcv_skb vmlinux EXPORT_SYMBOL +0x9fa1236b phy_driver_unregister vmlinux EXPORT_SYMBOL +0xf82abc1d isa_dma_bridge_buggy vmlinux EXPORT_SYMBOL +0x649a098f crypto_alloc_acomp vmlinux EXPORT_SYMBOL_GPL +0x6011f4c9 trace_event_reg vmlinux EXPORT_SYMBOL_GPL +0x07ceeac9 panic_notifier_list vmlinux EXPORT_SYMBOL +0xd0d0c5c5 kvm_async_pf_task_wait vmlinux EXPORT_SYMBOL_GPL +0x75792186 get_xsave_addr vmlinux EXPORT_SYMBOL_GPL +0x9f91f281 ceph_check_fsid net/ceph/libceph EXPORT_SYMBOL +0x1bbe82d3 ip_set_elem_len net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x4cd54887 rdma_rw_ctx_post drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7c0a7449 rdma_rw_ctx_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x11615d03 mlx5_core_destroy_tis drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x5d4272af __xfrm_state_delete vmlinux EXPORT_SYMBOL +0x91e3305c mbox_client_peek_data vmlinux EXPORT_SYMBOL_GPL +0x6b159a36 __fcoe_get_lesb vmlinux EXPORT_SYMBOL_GPL +0x5b5bed30 __acpi_handle_debug vmlinux EXPORT_SYMBOL +0x3801776b __ioread32_copy vmlinux EXPORT_SYMBOL_GPL +0x7ddf962f d_delete vmlinux EXPORT_SYMBOL +0x8b2b9357 mod_timer vmlinux EXPORT_SYMBOL +0xa20fa4c8 send_sig_mceerr vmlinux EXPORT_SYMBOL +0x52dcf0ca ib_open_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf245ce3b drm_mode_create_suggested_offset_properties drivers/gpu/drm/drm EXPORT_SYMBOL +0x7665a95b idr_remove vmlinux EXPORT_SYMBOL_GPL +0x23087bd0 vc_scrolldelta_helper vmlinux EXPORT_SYMBOL_GPL +0xbe49252c acpi_os_write_port vmlinux EXPORT_SYMBOL +0x6851664e wrmsrl_safe_on_cpu vmlinux EXPORT_SYMBOL +0xa4a5b6eb mnt_set_expiry vmlinux EXPORT_SYMBOL +0xe1aa307e use_mm vmlinux EXPORT_SYMBOL_GPL +0x2d41e6f5 __trace_puts vmlinux EXPORT_SYMBOL_GPL +0xce07d7ec param_get_invbool vmlinux EXPORT_SYMBOL +0xbf9d1b96 nfsd_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa3a91850 ip_tunnel_newlink net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x294f7578 ct_sip_parse_numerical_param net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x0be1a4d8 amd_unregister_ecc_decoder drivers/edac/edac_mce_amd EXPORT_SYMBOL_GPL +0x4e1ece42 bcm_phy_write_misc drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0xb94f84c5 drm_connector_list_iter_next drivers/gpu/drm/drm EXPORT_SYMBOL +0x525d5ff1 genl_notify vmlinux EXPORT_SYMBOL +0xa0dad88e netdev_adjacent_get_private vmlinux EXPORT_SYMBOL +0x7a61f27a devfreq_suspend_device vmlinux EXPORT_SYMBOL +0xd18ccda4 dev_pm_opp_put_clkname vmlinux EXPORT_SYMBOL_GPL +0x0e0bfa9a devm_rtc_device_register vmlinux EXPORT_SYMBOL_GPL +0x2a47f90c devres_destroy vmlinux EXPORT_SYMBOL_GPL +0x9c46abf6 tty_hangup vmlinux EXPORT_SYMBOL +0x67c13ea0 acpi_read vmlinux EXPORT_SYMBOL +0x35bc336d pci_disable_device vmlinux EXPORT_SYMBOL +0x8ac3334b net_dim_get_def_rx_moderation vmlinux EXPORT_SYMBOL +0xe096791e add_page_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x9504df26 irq_wake_thread vmlinux EXPORT_SYMBOL_GPL +0x7427ee90 prepare_to_swait_exclusive vmlinux EXPORT_SYMBOL +0xb1e70801 __twofish_setkey crypto/twofish_common EXPORT_SYMBOL_GPL +0x7b398bfd cryptd_free_aead crypto/cryptd EXPORT_SYMBOL_GPL +0x02ba6528 fscache_obtained_object fs/fscache/fscache EXPORT_SYMBOL +0x4816e76c xfrm_lookup_route vmlinux EXPORT_SYMBOL +0x3f710d55 mdiobus_read vmlinux EXPORT_SYMBOL +0x6ca0bb6c pm_runtime_forbid vmlinux EXPORT_SYMBOL_GPL +0x30e74134 tty_termios_copy_hw vmlinux EXPORT_SYMBOL +0x1e7dd0ab dma_release_channel vmlinux EXPORT_SYMBOL_GPL +0xb4702696 acpi_dev_get_resources vmlinux EXPORT_SYMBOL_GPL +0x208fd846 badblocks_set vmlinux EXPORT_SYMBOL_GPL +0x6092fb56 seq_put_decimal_ll vmlinux EXPORT_SYMBOL +0x7e7bcf26 acpi_map_cpu vmlinux EXPORT_SYMBOL +0x800f21c2 virtio_transport_notify_send_post_enqueue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x69cbed95 ceph_osdc_list_watchers net/ceph/libceph EXPORT_SYMBOL +0x7a969eb0 rpc_pton net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1f2a9970 rpc_ntop net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0c8db0e1 nf_tproxy_laddr4 net/ipv4/netfilter/nf_tproxy_ipv4 EXPORT_SYMBOL_GPL +0x86cef180 rdma_addr_size drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3f2a4fae ib_query_port drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb1094a3a mlx4_wol_read drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x10a53167 drm_atomic_get_plane_state drivers/gpu/drm/drm EXPORT_SYMBOL +0x5758a4a3 drm_gem_prime_import_dev drivers/gpu/drm/drm EXPORT_SYMBOL +0xa6ff9496 drm_dp_link_train_clock_recovery_delay drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6589f9d5 nfs_clone_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0x37f292c4 pmc_atom_write vmlinux EXPORT_SYMBOL_GPL +0xbc4b4d52 efivars_kobject vmlinux EXPORT_SYMBOL_GPL +0x37af3190 dm_table_run_md_queue_async vmlinux EXPORT_SYMBOL +0x7318729c usb_get_maximum_speed vmlinux EXPORT_SYMBOL_GPL +0x74a0134a mpt_device_driver_deregister vmlinux EXPORT_SYMBOL +0x3be5c121 nvdimm_attribute_group vmlinux EXPORT_SYMBOL_GPL +0x05eac780 fwnode_graph_get_remote_node vmlinux EXPORT_SYMBOL_GPL +0xe8528bc9 virtqueue_poll vmlinux EXPORT_SYMBOL_GPL +0x2c333187 devm_clk_get vmlinux EXPORT_SYMBOL +0x7c9a246b pcie_capability_clear_and_set_word vmlinux EXPORT_SYMBOL +0xfa855d59 PageHuge vmlinux EXPORT_SYMBOL_GPL +0x12aefd28 irq_domain_remove vmlinux EXPORT_SYMBOL_GPL +0x33809507 ceph_osdc_abort_requests net/ceph/libceph EXPORT_SYMBOL +0xdaceb7a6 mdio_mii_ioctl drivers/net/mdio EXPORT_SYMBOL +0x4e94316c mlx5_get_uars_page drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x82069d36 drm_client_rotation drivers/gpu/drm/drm EXPORT_SYMBOL +0x380eee25 drm_irq_install drivers/gpu/drm/drm EXPORT_SYMBOL +0x2414d404 ir_raw_event_store vmlinux EXPORT_SYMBOL_GPL +0xc3e1f69c fc_eh_timed_out vmlinux EXPORT_SYMBOL +0x9cb2d574 ahci_error_handler vmlinux EXPORT_SYMBOL_GPL +0x20af6659 request_firmware_into_buf vmlinux EXPORT_SYMBOL +0xfbc247a6 con_is_bound vmlinux EXPORT_SYMBOL +0xa00aca2a dql_completed vmlinux EXPORT_SYMBOL +0xe63ce2b0 __bio_try_merge_page vmlinux EXPORT_SYMBOL_GPL +0x499e816e crypto_unregister_scomps vmlinux EXPORT_SYMBOL_GPL +0x0f16d3e8 crypto_unregister_acomps vmlinux EXPORT_SYMBOL_GPL +0xb94e4d78 blkcipher_aead_walk_virt_block vmlinux EXPORT_SYMBOL_GPL +0x9106a791 key_link vmlinux EXPORT_SYMBOL +0x4a48122b read_cache_page vmlinux EXPORT_SYMBOL +0xbfe5616d tick_broadcast_oneshot_control vmlinux EXPORT_SYMBOL_GPL +0x5bdbac4e rcu_unexpedite_gp vmlinux EXPORT_SYMBOL_GPL +0x8c2921e2 __tracepoint_pelt_irq_tp vmlinux EXPORT_SYMBOL_GPL +0x194d5ac9 mds_user_clear vmlinux EXPORT_SYMBOL_GPL +0x934b21ba transport_alloc_session_tags drivers/target/target_core_mod EXPORT_SYMBOL +0xb79a54ee mdio45_nway_restart drivers/net/mdio EXPORT_SYMBOL +0x5724d444 mlx4_max_tc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xa0877cc0 nfs4_find_get_deviceid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x73408cbc __udp_gso_segment vmlinux EXPORT_SYMBOL_GPL +0x38d5979f sk_msg_return vmlinux EXPORT_SYMBOL_GPL +0x81988a48 dev_change_flags vmlinux EXPORT_SYMBOL +0x52d815ec __skb_gso_segment vmlinux EXPORT_SYMBOL +0x34f655df ata_dev_set_feature vmlinux EXPORT_SYMBOL_GPL +0x52dc7885 dquot_drop vmlinux EXPORT_SYMBOL +0x7f91dad9 dm_cell_unlock_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xb0cade62 xfrm_state_walk vmlinux EXPORT_SYMBOL +0xe9b8f6d2 inet_gso_segment vmlinux EXPORT_SYMBOL +0x04a2fcbe mbox_chan_txdone vmlinux EXPORT_SYMBOL_GPL +0xe13cd8a7 dmi_name_in_vendors vmlinux EXPORT_SYMBOL +0xd9473877 iscsi_boot_create_host_kset vmlinux EXPORT_SYMBOL_GPL +0x57c6d1b3 fwnode_get_parent vmlinux EXPORT_SYMBOL_GPL +0xefcea2e7 acpi_warning vmlinux EXPORT_SYMBOL +0xbe0110e7 acpi_set_gpe vmlinux EXPORT_SYMBOL +0xf6c9228c sbitmap_queue_wake_all vmlinux EXPORT_SYMBOL_GPL +0xb2f2fdce nla_put_nohdr vmlinux EXPORT_SYMBOL +0x89078aba pid_nr_ns vmlinux EXPORT_SYMBOL_GPL +0x81221cad amd_nb_num vmlinux EXPORT_SYMBOL_GPL +0x0f2d7d87 mce_unregister_decode_chain vmlinux EXPORT_SYMBOL_GPL +0x9258937a e820__mapped_raw_any vmlinux EXPORT_SYMBOL_GPL +0x49e86a0e ib_get_gids_from_rdma_hdr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb0717797 mlxsw_afa_block_append_fid_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x5bdccc38 napi_gro_flush vmlinux EXPORT_SYMBOL +0x946d3e7a sas_free_task vmlinux EXPORT_SYMBOL_GPL +0xc9be0e5f scsi_set_medium_removal vmlinux EXPORT_SYMBOL +0x20e71415 ahci_platform_suspend_host vmlinux EXPORT_SYMBOL_GPL +0x8b989cf9 acpi_bus_can_wakeup vmlinux EXPORT_SYMBOL +0xa816a195 pinctrl_pm_select_sleep_state vmlinux EXPORT_SYMBOL_GPL +0xdf99e3d0 blk_stat_enable_accounting vmlinux EXPORT_SYMBOL_GPL +0x6f889baa keyring_clear vmlinux EXPORT_SYMBOL +0xc558530d profile_pc vmlinux EXPORT_SYMBOL +0x9e7d0a51 rdma_restrack_del drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x42e1e7dd rdma_restrack_get drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x65e16da4 mlxsw_afk_key_info_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb1175aff devm_clk_bulk_get_all vmlinux EXPORT_SYMBOL_GPL +0x22ee2687 acpi_pm_wakeup_event vmlinux EXPORT_SYMBOL_GPL +0xd08c1451 jbd2_journal_wipe vmlinux EXPORT_SYMBOL +0xa72cfb7d ioremap_wt vmlinux EXPORT_SYMBOL +0xfd93ee35 ioremap_wc vmlinux EXPORT_SYMBOL +0x5f2fcc83 ioremap_uc vmlinux EXPORT_SYMBOL_GPL +0xb36e4148 ib_sa_pack_path drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x93148e71 mlx4_fmr_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xc310b981 strnstr vmlinux EXPORT_SYMBOL +0xc88d35b5 netdev_lower_get_next_private_rcu vmlinux EXPORT_SYMBOL +0x6ef76707 __skb_pad vmlinux EXPORT_SYMBOL +0x7064fa42 __ps2_command vmlinux EXPORT_SYMBOL +0x1a5b43c7 usb_block_urb vmlinux EXPORT_SYMBOL_GPL +0x278b6cd6 clk_register_gpio_mux vmlinux EXPORT_SYMBOL_GPL +0x5c93de4f fb_set_var vmlinux EXPORT_SYMBOL +0xe2e646ff __nla_put vmlinux EXPORT_SYMBOL +0x2ae09d58 __list_lru_init vmlinux EXPORT_SYMBOL_GPL +0x1b48b46b perf_pmu_migrate_context vmlinux EXPORT_SYMBOL_GPL +0x0237b57a arch_unregister_cpu vmlinux EXPORT_SYMBOL +0x1cd54918 inet_diag_register net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x23af5043 nf_ct_helper_init net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x13e9c5c0 dm_rh_dirty_log drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x9015b605 bcm_phy_get_strings drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0xc60d60d0 pnfs_generic_layout_insert_lseg fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x169938c1 __sysfs_match_string vmlinux EXPORT_SYMBOL +0xfc6ff7e4 sas_port_mark_backlink vmlinux EXPORT_SYMBOL +0xd599fd83 ata_pci_sff_prepare_host vmlinux EXPORT_SYMBOL_GPL +0x84303df9 tpm_is_tpm2 vmlinux EXPORT_SYMBOL_GPL +0x0cc3b29e acpi_dev_filter_resource_type vmlinux EXPORT_SYMBOL_GPL +0xfb5355e6 devm_pci_remap_cfg_resource vmlinux EXPORT_SYMBOL +0xfa4eb041 crypto_register_acomps vmlinux EXPORT_SYMBOL_GPL +0x0d64e64f blk_trace_startstop vmlinux EXPORT_SYMBOL_GPL +0x51760917 _raw_spin_lock_irqsave vmlinux EXPORT_SYMBOL +0x2f79ca93 drm_dp_mst_get_vcpi_slots drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x0a00b34d tcp_done vmlinux EXPORT_SYMBOL_GPL +0x2fb6de5d add_device_randomness vmlinux EXPORT_SYMBOL +0x20d65e40 fb_find_nearest_mode vmlinux EXPORT_SYMBOL +0x978ff884 iscsi_change_param_sprintf drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x7ade1071 dm_tm_destroy drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xdcc3706b cxgbi_sock_free_cpl_skbs drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xa45cf07b o2nm_get_node_by_num fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x6f9a6391 unregister_nfs_version fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5a4896a8 __put_user_2 vmlinux EXPORT_SYMBOL +0x8f9c199c __get_user_2 vmlinux EXPORT_SYMBOL +0x0a575945 xfrm_count_pfkey_auth_supported vmlinux EXPORT_SYMBOL_GPL +0x0c25ec48 secure_tcpv6_seq vmlinux EXPORT_SYMBOL +0x6d253dca dmi_match vmlinux EXPORT_SYMBOL_GPL +0xbe75d7a9 usb_init_urb vmlinux EXPORT_SYMBOL_GPL +0x7c0f6d62 genphy_aneg_done vmlinux EXPORT_SYMBOL +0xacb54001 __iscsi_complete_pdu vmlinux EXPORT_SYMBOL_GPL +0xae8d87a8 iscsi_create_iface vmlinux EXPORT_SYMBOL_GPL +0x71650819 add_bootloader_randomness vmlinux EXPORT_SYMBOL_GPL +0x23f70f5f serial8250_rpm_get vmlinux EXPORT_SYMBOL_GPL +0x91358a47 badblocks_clear vmlinux EXPORT_SYMBOL_GPL +0x0c96337c crypto_alloc_kpp vmlinux EXPORT_SYMBOL_GPL +0xd781bb69 security_inode_getsecctx vmlinux EXPORT_SYMBOL +0x4f970f18 fscrypt_enqueue_decrypt_work vmlinux EXPORT_SYMBOL +0x108bc6e9 wait_on_page_bit_killable vmlinux EXPORT_SYMBOL +0x7d9b51d8 padata_remove_cpu vmlinux EXPORT_SYMBOL +0x8d6033fb irq_domain_free_irqs_common vmlinux EXPORT_SYMBOL_GPL +0x0ce8657c __irq_domain_alloc_fwnode vmlinux EXPORT_SYMBOL_GPL +0x2bb32ad1 arc4_setkey lib/crypto/libarc4 EXPORT_SYMBOL +0x64b2265e i2c_acpi_new_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x7c7f6ff2 drm_prime_sg_to_page_addr_arrays drivers/gpu/drm/drm EXPORT_SYMBOL +0x5f42c1a1 udp_init_sock vmlinux EXPORT_SYMBOL_GPL +0x29256a03 usb_hcd_unmap_urb_setup_for_dma vmlinux EXPORT_SYMBOL_GPL +0xb0c9a2da ufshcd_alloc_host vmlinux EXPORT_SYMBOL +0x1121f17e scsi_device_put vmlinux EXPORT_SYMBOL +0x3266e61b scsi_device_get vmlinux EXPORT_SYMBOL +0xcccfb2fa sata_deb_timing_hotplug vmlinux EXPORT_SYMBOL_GPL +0xaefaf114 of_pm_clk_add_clk vmlinux EXPORT_SYMBOL_GPL +0x149eb710 __pm_runtime_set_status vmlinux EXPORT_SYMBOL_GPL +0x65d1bab2 acpi_bios_warning vmlinux EXPORT_SYMBOL +0xabb174e3 balloon_page_dequeue vmlinux EXPORT_SYMBOL_GPL +0xd9b65df5 balloon_page_enqueue vmlinux EXPORT_SYMBOL_GPL +0xea03c2ae kmem_cache_alloc_bulk vmlinux EXPORT_SYMBOL +0x00b4377c map_vm_area vmlinux EXPORT_SYMBOL_GPL +0xa29886f4 unregister_shrinker vmlinux EXPORT_SYMBOL +0x9e423bbc unregister_ftrace_function vmlinux EXPORT_SYMBOL_GPL +0x2e2360b1 ftrace_set_global_notrace vmlinux EXPORT_SYMBOL_GPL +0x94c4397e dm_register_path_selector drivers/md/dm-multipath EXPORT_SYMBOL_GPL +0x16d6906b mlx4_get_slave_node_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x60ae5dba br_vlan_get_proto vmlinux EXPORT_SYMBOL_GPL +0xf397872f ipv6_dev_get_saddr vmlinux EXPORT_SYMBOL +0x1366a185 xfrm_policy_flush vmlinux EXPORT_SYMBOL +0xb9797783 skb_tx_error vmlinux EXPORT_SYMBOL +0x02c82e2d sock_efree vmlinux EXPORT_SYMBOL +0xdc866bb5 ovs_netdev_link net/openvswitch/openvswitch EXPORT_SYMBOL_GPL +0x761fa8e3 mlx5_core_destroy_psv drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xbdd162ff cxgbi_device_unregister drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xe05800eb napi_gro_frags vmlinux EXPORT_SYMBOL +0xa1092095 netdev_unbind_sb_channel vmlinux EXPORT_SYMBOL +0x6169721e skb_copy_datagram_from_iter vmlinux EXPORT_SYMBOL +0xf75d5921 cpufreq_cpu_get_raw vmlinux EXPORT_SYMBOL_GPL +0xe2b03962 pci_load_saved_state vmlinux EXPORT_SYMBOL_GPL +0xb129270a key_task_permission vmlinux EXPORT_SYMBOL +0x3b15c4c4 page_cache_next_miss vmlinux EXPORT_SYMBOL +0x09437748 ring_buffer_read_events_cpu vmlinux EXPORT_SYMBOL_GPL +0x9b555c8c pm_suspend_default_s2idle vmlinux EXPORT_SYMBOL_GPL +0x8677f369 pvclock_get_pvti_cpu0_va vmlinux EXPORT_SYMBOL_GPL +0xf27bf0d0 ovs_vport_free net/openvswitch/openvswitch EXPORT_SYMBOL_GPL +0xe5b3f86b inet_diag_unregister net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x6a3eea2b nf_ct_netns_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x2cf195a1 nf_ct_netns_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xc11c52ef mlx4_mr_hw_get_mpt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x2c07ea6f iavf_unregister_client drivers/net/ethernet/intel/iavf/iavf EXPORT_SYMBOL +0x0d4961de nf_log_buf_open vmlinux EXPORT_SYMBOL_GPL +0x0bf0bd74 sk_psock_msg_verdict vmlinux EXPORT_SYMBOL_GPL +0x0f7d4342 netdev_has_upper_dev_all_rcu vmlinux EXPORT_SYMBOL +0x1d1a850f sock_sendmsg vmlinux EXPORT_SYMBOL +0x1c3d0081 sas_domain_attach_transport vmlinux EXPORT_SYMBOL_GPL +0x8b0e5317 iscsi_offload_mesg vmlinux EXPORT_SYMBOL_GPL +0x42595e58 vgacon_text_force vmlinux EXPORT_SYMBOL +0x80423604 import_iovec vmlinux EXPORT_SYMBOL +0xac5cf7e3 disk_get_part vmlinux EXPORT_SYMBOL_GPL +0xc43e92b9 trace_seq_bprintf vmlinux EXPORT_SYMBOL_GPL +0xba6ed991 virtio_transport_stream_rcvhiwat net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x54184748 l2tp_recv_common net/l2tp/l2tp_core EXPORT_SYMBOL +0x97674fbc kvm_read_guest_cached arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe5ca33f7 netlink_kernel_release vmlinux EXPORT_SYMBOL +0x1a5c3db3 __sock_create vmlinux EXPORT_SYMBOL +0x7e7b04e0 pci_bus_resource_n vmlinux EXPORT_SYMBOL_GPL +0xf5a20ed2 __genradix_prealloc vmlinux EXPORT_SYMBOL +0x714ca96b bio_free_pages vmlinux EXPORT_SYMBOL +0x69585523 __ksize vmlinux EXPORT_SYMBOL +0x6bd0e573 down_interruptible vmlinux EXPORT_SYMBOL +0xe2bf56d1 ieee802154_hdr_peek_addrs net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0x62b0fc89 drm_do_get_edid drivers/gpu/drm/drm EXPORT_SYMBOL_GPL +0x50f65edf ipmi_set_gets_events drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x755b3ee6 tcp_abort vmlinux EXPORT_SYMBOL_GPL +0x5ef14b98 netif_carrier_off vmlinux EXPORT_SYMBOL +0x13ed8784 sdev_evt_alloc vmlinux EXPORT_SYMBOL_GPL +0xb4fa93b1 pinctrl_force_default vmlinux EXPORT_SYMBOL_GPL +0x4a6eca9f lease_get_mtime vmlinux EXPORT_SYMBOL +0xf7f2f796 sb_set_blocksize vmlinux EXPORT_SYMBOL +0xa41fc740 srcu_barrier vmlinux EXPORT_SYMBOL_GPL +0xb0adaeca kthread_unpark vmlinux EXPORT_SYMBOL_GPL +0xce1cc3e4 cache_seq_start_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe60fdba2 nf_dup_netdev_egress net/netfilter/nf_dup_netdev EXPORT_SYMBOL_GPL +0xd6e1334e edac_pci_add_device drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x4406d768 inet_csk_reqsk_queue_drop vmlinux EXPORT_SYMBOL +0x13cb1428 nf_ct_hook vmlinux EXPORT_SYMBOL_GPL +0xdd372dbb flow_block_cb_incref vmlinux EXPORT_SYMBOL +0x8c0215f2 pm_system_wakeup vmlinux EXPORT_SYMBOL_GPL +0xbb296c9c pinctrl_utils_free_map vmlinux EXPORT_SYMBOL_GPL +0x3744cf36 vmalloc_to_pfn vmlinux EXPORT_SYMBOL +0x9c0146e0 access_process_vm vmlinux EXPORT_SYMBOL_GPL +0xa44fbefa __tracepoint_xdp_exception vmlinux EXPORT_SYMBOL_GPL +0x68db49b7 get_task_mm vmlinux EXPORT_SYMBOL_GPL +0x65b108bd mlx5_modify_nic_vport_promisc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x42f2c81f nfs4_client_id_uniquifier fs/nfs/nfs EXPORT_SYMBOL_GPL +0x551bd071 __rb_erase_color vmlinux EXPORT_SYMBOL +0xfeb5d0aa verify_spi_info vmlinux EXPORT_SYMBOL +0x6b050202 udp_skb_destructor vmlinux EXPORT_SYMBOL +0x14c6baae inet_csk_prepare_forced_close vmlinux EXPORT_SYMBOL +0xb8f2ca16 devlink_dpipe_match_put vmlinux EXPORT_SYMBOL_GPL +0x7355188d nvdimm_cmd_mask vmlinux EXPORT_SYMBOL_GPL +0x7bb50b88 acpi_write vmlinux EXPORT_SYMBOL +0x7452adff user_return_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x2951a872 trace_clock_local vmlinux EXPORT_SYMBOL_GPL +0xd32c8916 rpc_task_release_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0a89c4d4 nft_unregister_flowtable_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x4e74f9e9 nf_ct_expect_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xc7188d68 iscsit_register_transport drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x8854d198 mlxsw_reg_write drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9a120a47 drm_plane_from_index drivers/gpu/drm/drm EXPORT_SYMBOL +0x9292cecf register_tcf_proto_ops vmlinux EXPORT_SYMBOL +0x79c60c75 xdp_attachment_query vmlinux EXPORT_SYMBOL_GPL +0x31d13e7f md_handle_request vmlinux EXPORT_SYMBOL +0xb9b9df41 usb_amd_dev_put vmlinux EXPORT_SYMBOL_GPL +0x89485687 iommu_group_put vmlinux EXPORT_SYMBOL_GPL +0xb47a58a4 iommu_group_get vmlinux EXPORT_SYMBOL_GPL +0xa48dfd4a pci_pasid_features vmlinux EXPORT_SYMBOL_GPL +0xbff4a88f pci_ignore_hotplug vmlinux EXPORT_SYMBOL_GPL +0x34d1f09d crypto_unregister_rng vmlinux EXPORT_SYMBOL_GPL +0x0d3a1f86 crypto_unregister_alg vmlinux EXPORT_SYMBOL_GPL +0xa1fe66ce lru_cache_add_file vmlinux EXPORT_SYMBOL +0x1d019a13 sched_trace_rq_avg_rt vmlinux EXPORT_SYMBOL_GPL +0x38f33bed dump_fpu vmlinux EXPORT_SYMBOL +0x21efe1be synproxy_send_client_synack net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xb968cc98 adf_enable_aer drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x4a6ed376 mlxsw_core_port_fini drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x990bf0e8 mlx4_set_vf_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x5e95a4b2 o2net_send_message_vec fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xdfb95f81 fuse_conn_get fs/fuse/fuse EXPORT_SYMBOL_GPL +0x99341b41 serpent_xts_dec_8way_avx arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0xbdfa6cc0 serpent_xts_enc_8way_avx arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0x9f39f45c qdisc_watchdog_schedule_ns vmlinux EXPORT_SYMBOL +0x33e37903 nvmem_get_mac_address vmlinux EXPORT_SYMBOL +0x03c9e4cb netdev_walk_all_lower_dev vmlinux EXPORT_SYMBOL_GPL +0x00a5d235 __skb_vlan_pop vmlinux EXPORT_SYMBOL +0x20a0e57c unregister_md_personality vmlinux EXPORT_SYMBOL +0xaa719e4b hwmon_device_register_with_groups vmlinux EXPORT_SYMBOL_GPL +0xe1696d09 iscsi_suspend_tx vmlinux EXPORT_SYMBOL_GPL +0x5eaf0bff __class_create vmlinux EXPORT_SYMBOL_GPL +0x556d2606 clk_register_mux_table vmlinux EXPORT_SYMBOL_GPL +0x27810361 acpi_os_wait_events_complete vmlinux EXPORT_SYMBOL +0x4c416eb9 LZ4_decompress_fast vmlinux EXPORT_SYMBOL +0xee109562 kern_path_mountpoint vmlinux EXPORT_SYMBOL +0x95e031be filp_open vmlinux EXPORT_SYMBOL +0x3a8bbb8e trace_clock_jiffies vmlinux EXPORT_SYMBOL_GPL +0x6608822d smpboot_register_percpu_thread vmlinux EXPORT_SYMBOL_GPL +0xbb856561 ip_vs_tcp_conn_listen net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xd6593e78 nf_flow_offload_ipv6_hook net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0xfb41163e tap_get_ptr_ring drivers/net/tap EXPORT_SYMBOL_GPL +0xaab23340 xfrm_calg_get_byname vmlinux EXPORT_SYMBOL_GPL +0x2ebe3135 cpu_is_hotpluggable vmlinux EXPORT_SYMBOL_GPL +0xa2a7adfd agp_allocate_memory vmlinux EXPORT_SYMBOL +0xf0cef2bf pci_read_vpd vmlinux EXPORT_SYMBOL +0x3d210724 gen_pool_dma_zalloc_align vmlinux EXPORT_SYMBOL +0x63ff40bb crypto_sha256_finup vmlinux EXPORT_SYMBOL +0x6636c3c9 irq_set_vcpu_affinity vmlinux EXPORT_SYMBOL_GPL +0x4f1ab292 ipv6_synproxy_hook net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x70fdb530 ipv4_synproxy_hook net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x3bb60690 nfnl_acct_overquota net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0x51ee5924 pingv6_prot vmlinux EXPORT_SYMBOL_GPL +0xe7eee3d5 __cookie_v4_init_sequence vmlinux EXPORT_SYMBOL_GPL +0xeca7949e dm_bufio_client_destroy vmlinux EXPORT_SYMBOL_GPL +0xae2f72a1 md_bitmap_unplug vmlinux EXPORT_SYMBOL +0x19876274 __tracepoint_nvme_sq vmlinux EXPORT_SYMBOL_GPL +0x955cc030 __clk_get_hw vmlinux EXPORT_SYMBOL_GPL +0x03ae51e5 blk_pm_runtime_init vmlinux EXPORT_SYMBOL +0x5bb60a74 bsg_setup_queue vmlinux EXPORT_SYMBOL_GPL +0xddfbf20d d_exact_alias vmlinux EXPORT_SYMBOL +0x5a41c20f __free_pages vmlinux EXPORT_SYMBOL +0x54e9052c tracepoint_probe_register vmlinux EXPORT_SYMBOL_GPL +0x098d2ed9 lc_try_get lib/lru_cache EXPORT_SYMBOL +0xd8c0d1ef nft_obj_notify net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x4bf9ba41 nf_nat_amanda_hook net/netfilter/nf_conntrack_amanda EXPORT_SYMBOL_GPL +0x202693f0 mlxsw_afa_block_cur_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xf7350296 mlx5_toggle_port_link drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x0cb37109 mlx5_core_qp_modify drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5754471d vnic_dev_get_res_count drivers/net/ethernet/cisco/enic/enic EXPORT_SYMBOL +0x6243ac82 __kvm_apic_update_irr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9522855b inet_bind vmlinux EXPORT_SYMBOL +0x131f5f53 tc_setup_cb_call vmlinux EXPORT_SYMBOL +0x8ff5be3a qdisc_watchdog_init vmlinux EXPORT_SYMBOL +0x83317253 power_supply_class vmlinux EXPORT_SYMBOL_GPL +0xc12acd76 rtc_nvmem_register vmlinux EXPORT_SYMBOL_GPL +0x08d6feb8 usb_hcd_end_port_resume vmlinux EXPORT_SYMBOL_GPL +0x7073c04f phy_10_100_features_array vmlinux EXPORT_SYMBOL_GPL +0x7eb9ca09 class_find_device vmlinux EXPORT_SYMBOL_GPL +0x913c0754 iommu_aux_attach_device vmlinux EXPORT_SYMBOL_GPL +0x3020c9b0 msi_desc_to_pci_sysdata vmlinux EXPORT_SYMBOL_GPL +0x3b3e97ff pci_scan_child_bus vmlinux EXPORT_SYMBOL_GPL +0xfe66694a blk_mq_run_hw_queues vmlinux EXPORT_SYMBOL +0x471b1a4c __register_binfmt vmlinux EXPORT_SYMBOL +0xbfbe53d5 lc_get_cumulative lib/lru_cache EXPORT_SYMBOL +0x392872de drm_fb_memcpy drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb9a47af6 kvm_mmu_unprotect_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x74a1c913 xfrm_init_state vmlinux EXPORT_SYMBOL +0xe1e7e40c rtnl_nla_parse_ifla vmlinux EXPORT_SYMBOL +0x87fd91ef alloc_skb_with_frags vmlinux EXPORT_SYMBOL +0xfd4fbd84 sb800_prefetch vmlinux EXPORT_SYMBOL_GPL +0xef1f6e23 apei_resources_request vmlinux EXPORT_SYMBOL_GPL +0x90c8498c apei_exec_write_register vmlinux EXPORT_SYMBOL_GPL +0xf5a5b2cb pci_assign_resource vmlinux EXPORT_SYMBOL +0xb3f49446 kstrtos8_from_user vmlinux EXPORT_SYMBOL +0x7052ec41 crypto_unregister_aead vmlinux EXPORT_SYMBOL_GPL +0x19988203 crypto_init_spawn2 vmlinux EXPORT_SYMBOL_GPL +0xae9516f2 linear_hugepage_index vmlinux EXPORT_SYMBOL_GPL +0xf31b191e ceph_osdc_copy_from net/ceph/libceph EXPORT_SYMBOL +0x0f2beda9 rdma_destroy_ah_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x81462a98 rdma_destroy_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd1a82f0b mlxsw_core_lag_mapping_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x8ba5fa7e mlxsw_core_lag_mapping_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9330a153 cxgb4_sync_txq_pidx drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x773eb172 nfs_file_write fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa29c2ed8 xfrm_user_policy vmlinux EXPORT_SYMBOL +0xec924f7d netpoll_poll_disable vmlinux EXPORT_SYMBOL +0x15de050c netif_napi_add vmlinux EXPORT_SYMBOL +0xd2c007a8 skb_split vmlinux EXPORT_SYMBOL +0x1963f1ad input_grab_device vmlinux EXPORT_SYMBOL +0xdb96e289 xhci_init_driver vmlinux EXPORT_SYMBOL_GPL +0x40838e2b phy_modify_paged vmlinux EXPORT_SYMBOL +0xf256917d tty_port_block_til_ready vmlinux EXPORT_SYMBOL +0xf10de535 ioread8 vmlinux EXPORT_SYMBOL +0xa78b6dc7 drm_crtc_accurate_vblank_count drivers/gpu/drm/drm EXPORT_SYMBOL +0x8cf99291 vif_device_init vmlinux EXPORT_SYMBOL +0x771b3ec7 netlink_broadcast vmlinux EXPORT_SYMBOL +0x6c375220 __skb_get_hash_symmetric vmlinux EXPORT_SYMBOL_GPL +0xdf2d2af9 nvmem_cell_read_u32 vmlinux EXPORT_SYMBOL_GPL +0xa0a4287e nvmem_cell_read_u16 vmlinux EXPORT_SYMBOL_GPL +0x0cefa687 dev_pm_opp_get_max_clock_latency vmlinux EXPORT_SYMBOL_GPL +0x0343bdf1 __i2c_board_list vmlinux EXPORT_SYMBOL_GPL +0x8e6fa8b5 apei_exec_pre_map_gars vmlinux EXPORT_SYMBOL_GPL +0xc510eeac shmem_file_setup vmlinux EXPORT_SYMBOL_GPL +0xc631580a console_unlock vmlinux EXPORT_SYMBOL +0x8b8059bd in_group_p vmlinux EXPORT_SYMBOL +0x53c47c53 apic vmlinux EXPORT_SYMBOL_GPL +0x6b7ad162 udp_sendmsg vmlinux EXPORT_SYMBOL +0x1ecfe49f tcp_md5_hash_key vmlinux EXPORT_SYMBOL +0x5f9e1a1a __tracepoint_neigh_event_send_dead vmlinux EXPORT_SYMBOL_GPL +0x22648c02 __tracepoint_neigh_event_send_done vmlinux EXPORT_SYMBOL_GPL +0xa544f52e ipv6_bpf_stub vmlinux EXPORT_SYMBOL_GPL +0xe4b818c3 phy_speed_to_str vmlinux EXPORT_SYMBOL_GPL +0xf29f8515 __kfifo_dma_out_prepare_r vmlinux EXPORT_SYMBOL +0x032be17d blkg_prfill_rwstat vmlinux EXPORT_SYMBOL_GPL +0xc830df23 elv_rb_former_request vmlinux EXPORT_SYMBOL +0x433ae21c user_preparse vmlinux EXPORT_SYMBOL_GPL +0x3a51527e bpf_trace_run9 vmlinux EXPORT_SYMBOL_GPL +0x605c21c9 bpf_trace_run8 vmlinux EXPORT_SYMBOL_GPL +0xa9ab03fd bpf_trace_run7 vmlinux EXPORT_SYMBOL_GPL +0xef45c26f bpf_trace_run6 vmlinux EXPORT_SYMBOL_GPL +0x338b1976 bpf_trace_run5 vmlinux EXPORT_SYMBOL_GPL +0xb9ae00cc bpf_trace_run4 vmlinux EXPORT_SYMBOL_GPL +0xee80ce58 bpf_trace_run3 vmlinux EXPORT_SYMBOL_GPL +0x01b84853 bpf_trace_run2 vmlinux EXPORT_SYMBOL_GPL +0xfdf92027 bpf_trace_run1 vmlinux EXPORT_SYMBOL_GPL +0x554fe5d1 drm_crtc_vblank_reset drivers/gpu/drm/drm EXPORT_SYMBOL +0x9648d9fc nfs4_test_deviceid_unavailable fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x1511070a dcb_ieee_delapp vmlinux EXPORT_SYMBOL +0xce673bfc dcb_ieee_setapp vmlinux EXPORT_SYMBOL +0xf53d4c26 qdisc_class_hash_destroy vmlinux EXPORT_SYMBOL +0xe5fb276a usb_kill_urb vmlinux EXPORT_SYMBOL_GPL +0xe6c26607 usb_get_intf vmlinux EXPORT_SYMBOL_GPL +0xee487076 iscsi_destroy_all_flashnode vmlinux EXPORT_SYMBOL_GPL +0xc09cb38b ata_sff_lost_interrupt vmlinux EXPORT_SYMBOL_GPL +0xd09a5087 devm_request_pci_bus_resources vmlinux EXPORT_SYMBOL_GPL +0xd0f34c3f blk_mq_bio_list_merge vmlinux EXPORT_SYMBOL_GPL +0xa6e1a69d kick_all_cpus_sync vmlinux EXPORT_SYMBOL_GPL +0x911166a2 dm_bio_prison_alloc_cell drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xbc16ace5 drm_gem_prime_import drivers/gpu/drm/drm EXPORT_SYMBOL +0x6b221e88 drm_helper_encoder_in_use drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5852a981 crypto_finalize_hash_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0xeaad96f9 sbitmap_queue_clear vmlinux EXPORT_SYMBOL_GPL +0x83fae3bd key_type_asymmetric vmlinux EXPORT_SYMBOL_GPL +0x3a938892 proc_get_parent_data vmlinux EXPORT_SYMBOL_GPL +0x81da92e9 audit_log vmlinux EXPORT_SYMBOL +0xdd77c4dc set_security_override_from_ctx vmlinux EXPORT_SYMBOL +0x9dd97d41 xdr_process_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x308e5d25 ip_tunnel_uninit net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x0871ffd6 transport_set_vpd_ident drivers/target/target_core_mod EXPORT_SYMBOL +0x87c934be dm_tm_inc drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xc324efb9 drm_edid_to_sad drivers/gpu/drm/drm EXPORT_SYMBOL +0xe322e2fb sk_wait_data vmlinux EXPORT_SYMBOL +0xd9f8badf usb_autopm_put_interface_no_suspend vmlinux EXPORT_SYMBOL_GPL +0xdf138a81 ata_sff_wait_after_reset vmlinux EXPORT_SYMBOL_GPL +0xae2e0bce tpm_try_get_ops vmlinux EXPORT_SYMBOL_GPL +0x6d3c301f pci_enable_wake vmlinux EXPORT_SYMBOL +0x1642abe9 __blk_mq_end_request vmlinux EXPORT_SYMBOL +0xf7a476d1 skcipher_walk_complete vmlinux EXPORT_SYMBOL_GPL +0xec3476f0 crypto_remove_spawns vmlinux EXPORT_SYMBOL_GPL +0x3dd1f8a9 ring_buffer_empty_cpu vmlinux EXPORT_SYMBOL_GPL +0xbf63bb61 nft_set_gc_batch_release net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xe2631042 mlx5_core_modify_sq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc82ddbb6 mlx5_core_modify_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd898acef mlx5_core_modify_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xeee573a9 mlx4_unbond drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd477af3c mlx4_set_vf_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xdb5e44b5 drm_atomic_get_new_private_obj_state drivers/gpu/drm/drm EXPORT_SYMBOL +0x1f03dbcb drm_atomic_helper_commit_tail_rpm drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x36ab46b0 strp_init vmlinux EXPORT_SYMBOL_GPL +0x1f9fb55b ip6_input vmlinux EXPORT_SYMBOL_GPL +0xeca3cf37 ip6tun_encaps vmlinux EXPORT_SYMBOL +0x4db99480 xt_replace_table vmlinux EXPORT_SYMBOL_GPL +0xd1998f29 lwtunnel_state_alloc vmlinux EXPORT_SYMBOL_GPL +0x65e942c9 __netpoll_setup vmlinux EXPORT_SYMBOL_GPL +0x0b7b0e96 put_cmsg_scm_timestamping vmlinux EXPORT_SYMBOL +0xf6c71a25 cper_severity_str vmlinux EXPORT_SYMBOL_GPL +0x6c66f9c9 acpi_lock_battery_dir vmlinux EXPORT_SYMBOL +0xce2c27ad pci_free_host_bridge vmlinux EXPORT_SYMBOL +0x20cbb30a __percpu_counter_init vmlinux EXPORT_SYMBOL +0x920cc389 visitorl vmlinux EXPORT_SYMBOL_GPL +0x53dd848c get_user_pages_remote vmlinux EXPORT_SYMBOL +0x63cc5281 irq_set_affinity_notifier vmlinux EXPORT_SYMBOL_GPL +0x56398615 mark_tsc_unstable vmlinux EXPORT_SYMBOL_GPL +0x8c448976 ceph_monc_want_map net/ceph/libceph EXPORT_SYMBOL +0x80a2ea4b rdma_resolve_ip drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc12259d9 usb_stor_bulk_transfer_sg drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x30a9baea mlx5_modify_nic_vport_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x66d414f8 pnfs_generic_prepare_to_resend_writes fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xa1a85d5e kvm_init_mmu arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa1691b63 xas_find_marked vmlinux EXPORT_SYMBOL_GPL +0x8a7b3ea4 inet_hashinfo_init vmlinux EXPORT_SYMBOL_GPL +0x8f1bc8f4 ipv4_sk_redirect vmlinux EXPORT_SYMBOL_GPL +0xa99ef899 devlink_fmsg_bool_pair_put vmlinux EXPORT_SYMBOL_GPL +0x2edf6a74 input_mt_report_pointer_emulation vmlinux EXPORT_SYMBOL +0xea320088 iscsi_host_set_param vmlinux EXPORT_SYMBOL_GPL +0x7cb8b1e5 sas_task_abort vmlinux EXPORT_SYMBOL_GPL +0x036b533c tpm_put_ops vmlinux EXPORT_SYMBOL_GPL +0x01ae8d95 tty_ldisc_receive_buf vmlinux EXPORT_SYMBOL_GPL +0x2234ca51 acpi_match_platform_list vmlinux EXPORT_SYMBOL +0x35a798bc pci_prg_resp_pasid_required vmlinux EXPORT_SYMBOL_GPL +0x27b78365 pci_ioremap_bar vmlinux EXPORT_SYMBOL_GPL +0xc2f992d6 end_buffer_read_sync vmlinux EXPORT_SYMBOL +0x0e476067 d_add vmlinux EXPORT_SYMBOL +0x9d151678 set_cpus_allowed_ptr vmlinux EXPORT_SYMBOL_GPL +0x0b0f53a5 drm_calc_vbltimestamp_from_scanoutpos drivers/gpu/drm/drm EXPORT_SYMBOL +0x10c62b61 __drm_printfn_debug drivers/gpu/drm/drm EXPORT_SYMBOL +0xb9e7228f crypto_finalize_ablkcipher_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0x43a8e652 tcf_em_tree_dump vmlinux EXPORT_SYMBOL +0x29aae5bf neigh_for_each vmlinux EXPORT_SYMBOL +0xa5f2e079 rtc_initialize_alarm vmlinux EXPORT_SYMBOL_GPL +0x6971447a rtc_month_days vmlinux EXPORT_SYMBOL +0x7e64181d usb_calc_bus_time vmlinux EXPORT_SYMBOL_GPL +0xfbfc20bf sas_register_ha vmlinux EXPORT_SYMBOL_GPL +0x2eaa4f78 pci_sriov_get_totalvfs vmlinux EXPORT_SYMBOL_GPL +0x03bea44c devm_pinctrl_register_and_init vmlinux EXPORT_SYMBOL_GPL +0x77e9eb37 aes_encrypt vmlinux EXPORT_SYMBOL +0x9f327de1 sg_miter_next vmlinux EXPORT_SYMBOL +0x12b02ce0 crypto_init_shash_spawn vmlinux EXPORT_SYMBOL_GPL +0xd874a762 crypto_init_ahash_spawn vmlinux EXPORT_SYMBOL_GPL +0x85efc7e0 zero_pfn vmlinux EXPORT_SYMBOL +0x8c90999f param_get_ushort vmlinux EXPORT_SYMBOL +0x6d881faa svc_sock_update_bufs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9b875cbb nft_reject_dump net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0xa564105a vhost_dev_set_owner drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x1db1c372 enable_vmware_backdoor arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1ae46aae skb_pull vmlinux EXPORT_SYMBOL +0x123b4c11 usb_find_interface vmlinux EXPORT_SYMBOL_GPL +0x4f56dcc6 ata_scsi_ioctl vmlinux EXPORT_SYMBOL_GPL +0x813cf212 nvme_io_timeout vmlinux EXPORT_SYMBOL_GPL +0xeb904ea2 device_get_match_data vmlinux EXPORT_SYMBOL_GPL +0x0e4c771d clk_register vmlinux EXPORT_SYMBOL_GPL +0x19965056 pci_add_new_bus vmlinux EXPORT_SYMBOL +0x09c8eb55 font_vga_8x16 vmlinux EXPORT_SYMBOL +0xa44a1307 interval_tree_iter_first vmlinux EXPORT_SYMBOL_GPL +0x2688ec10 bitmap_zalloc vmlinux EXPORT_SYMBOL +0xbe39ad43 blkcg_policy_unregister vmlinux EXPORT_SYMBOL_GPL +0xf3395cb7 sysfs_remove_file_ns vmlinux EXPORT_SYMBOL_GPL +0x4e8f6ca7 sunrpc_net_id net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x859adfe1 iscsit_setup_scsi_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xe11ecdca cxgbi_device_find_by_lldev drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xeb0881fc drm_i2c_encoder_dpms drivers/gpu/drm/drm EXPORT_SYMBOL +0xa975020d kvm_mmu_set_mask_ptes arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8c67dde2 neigh_app_ns vmlinux EXPORT_SYMBOL +0x24ae1543 netdev_lower_get_first_private_rcu vmlinux EXPORT_SYMBOL +0xa5035611 iscsi_block_scsi_eh vmlinux EXPORT_SYMBOL_GPL +0x7db027d0 scsi_get_vpd_page vmlinux EXPORT_SYMBOL_GPL +0x61e046c9 pm_clk_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0x0c2feb9d pm_generic_restore_noirq vmlinux EXPORT_SYMBOL_GPL +0x4bfba5f1 software_node_unregister_nodes vmlinux EXPORT_SYMBOL_GPL +0x8ec83c57 register_acpi_bus_type vmlinux EXPORT_SYMBOL_GPL +0xacccaff4 pci_enable_pasid vmlinux EXPORT_SYMBOL_GPL +0x7b178afe unlock_system_sleep vmlinux EXPORT_SYMBOL_GPL +0x71b8a818 mutex_trylock_recursive vmlinux EXPORT_SYMBOL +0xe98a5ba0 ib_sa_guid_info_rec_query drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x88552402 mlx5_query_hca_vport_pkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf1e30cbe kvm_vcpu_unmap arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1d111118 xfrm6_protocol_register vmlinux EXPORT_SYMBOL +0x2eef2303 xfrm_state_check_expire vmlinux EXPORT_SYMBOL +0x922c8fd3 xfrm4_protocol_register vmlinux EXPORT_SYMBOL +0x2bb25af6 sock_diag_unregister_inet_compat vmlinux EXPORT_SYMBOL_GPL +0x281b7fe6 netdev_boot_setup_check vmlinux EXPORT_SYMBOL +0xbbf2709e sock_i_ino vmlinux EXPORT_SYMBOL +0x4a5cb105 sock_i_uid vmlinux EXPORT_SYMBOL +0x706e92b5 cpufreq_global_kobject vmlinux EXPORT_SYMBOL +0x9d1337b3 phy_attached_info vmlinux EXPORT_SYMBOL +0xb6f45180 fc_fcp_init vmlinux EXPORT_SYMBOL +0x2bd60ab9 acpi_reset vmlinux EXPORT_SYMBOL +0x59c88d23 xprt_get net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3df871f iwcm_reject_msg drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0xe7e5c8d0 edac_pci_handle_npe drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xc2797b61 bch_bkey_try_merge drivers/md/bcache/bcache EXPORT_SYMBOL +0xf2230a3f ipvlan_count_rx drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0x160ca910 __tracepoint_kvm_skinit arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9217b2e0 ip6_pol_route vmlinux EXPORT_SYMBOL_GPL +0xfdceab19 ip_build_and_send_pkt vmlinux EXPORT_SYMBOL_GPL +0x20952ec4 register_gifconf vmlinux EXPORT_SYMBOL +0xdfdf914d cpufreq_unregister_driver vmlinux EXPORT_SYMBOL_GPL +0x8b67f8ca clk_gate_is_enabled vmlinux EXPORT_SYMBOL_GPL +0x7819bc15 __clk_mux_determine_rate_closest vmlinux EXPORT_SYMBOL_GPL +0xec4d9e3a clk_get_sys vmlinux EXPORT_SYMBOL +0xfa752438 pcie_print_link_status vmlinux EXPORT_SYMBOL +0x33fcf44a __kfifo_out_r vmlinux EXPORT_SYMBOL +0x42b77666 crypto_req_done vmlinux EXPORT_SYMBOL_GPL +0x457594fa crypto_alg_list vmlinux EXPORT_SYMBOL_GPL +0xcf14a452 sync_inode_metadata vmlinux EXPORT_SYMBOL +0x4d66be46 vm_map_pages vmlinux EXPORT_SYMBOL +0xc3762aec mempool_alloc vmlinux EXPORT_SYMBOL +0x4320dd4b unregister_wide_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0xd8622367 free_cgroup_ns vmlinux EXPORT_SYMBOL +0x86f6b99d synchronize_rcu_expedited vmlinux EXPORT_SYMBOL_GPL +0xc0f0458a ip_tunnel_unneed_metadata vmlinux EXPORT_SYMBOL_GPL +0x754804a4 devfreq_update_status vmlinux EXPORT_SYMBOL +0x674f8ce5 hid_setup_resolution_multiplier vmlinux EXPORT_SYMBOL_GPL +0xf620a539 ata_do_dev_read_id vmlinux EXPORT_SYMBOL_GPL +0x20a1b519 acpi_resource_to_address64 vmlinux EXPORT_SYMBOL +0x3a7f8c5c __srcu_read_lock vmlinux EXPORT_SYMBOL_GPL +0xd7ab4472 irq_domain_alloc_irqs_parent vmlinux EXPORT_SYMBOL_GPL +0x8195f483 vfio_unregister_notifier drivers/vfio/vfio EXPORT_SYMBOL +0xef1e7746 mlx5_add_flow_rules drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xe6faecfd __drm_atomic_helper_set_config drivers/gpu/drm/drm EXPORT_SYMBOL +0xa65e73d8 nfs_submount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x78d529a5 kvm_wait_lapic_expire arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x57f70547 secure_ipv4_port_ephemeral vmlinux EXPORT_SYMBOL_GPL +0x545025e5 nvmem_add_cell_table vmlinux EXPORT_SYMBOL_GPL +0x0967f563 iscsi_conn_stop vmlinux EXPORT_SYMBOL_GPL +0x16dee44d dma_fence_init vmlinux EXPORT_SYMBOL +0xc61ca65e iowrite64be_hi_lo vmlinux EXPORT_SYMBOL +0x50e32d60 iomap_seek_data vmlinux EXPORT_SYMBOL_GPL +0xb97f67cd pagecache_get_page vmlinux EXPORT_SYMBOL +0x2cdf87a1 proc_dointvec_minmax vmlinux EXPORT_SYMBOL +0xe6c6d62f i2c_bus_type drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x78161d4f mlx4_is_slave_active drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x9196518d devm_drm_dev_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xeeda3d01 mr_vif_seq_idx vmlinux EXPORT_SYMBOL +0x181155ca dev_mc_add_excl vmlinux EXPORT_SYMBOL +0xb7db8d54 dev_uc_add_excl vmlinux EXPORT_SYMBOL +0xe03a689d dma_fence_array_ops vmlinux EXPORT_SYMBOL +0x0ba66d01 pci_msi_create_irq_domain vmlinux EXPORT_SYMBOL_GPL +0xcea3906e pci_unmap_iospace vmlinux EXPORT_SYMBOL +0xc8cc328b blkcg_print_blkgs vmlinux EXPORT_SYMBOL_GPL +0xebdce6ee blk_mq_alloc_request_hctx vmlinux EXPORT_SYMBOL_GPL +0x1b7727d5 kmalloc_caches vmlinux EXPORT_SYMBOL +0x4a6b9dee module_enable_ro vmlinux EXPORT_SYMBOL_GPL +0x2016427b rpc_malloc net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xdad35e82 bch_btree_keys_alloc drivers/md/bcache/bcache EXPORT_SYMBOL +0x54a5ee5f mlx4_get_active_ports drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xeb7c5254 rt_dst_alloc vmlinux EXPORT_SYMBOL +0x0d7f5fcd xt_alloc_entry_offsets vmlinux EXPORT_SYMBOL +0xf3e396ae do_take_over_console vmlinux EXPORT_SYMBOL_GPL +0xa2fd8185 blk_queue_make_request vmlinux EXPORT_SYMBOL +0xd326374d sysfs_add_link_to_group vmlinux EXPORT_SYMBOL_GPL +0xf295891e truncate_pagecache_range vmlinux EXPORT_SYMBOL +0x6f9e763b timecounter_read vmlinux EXPORT_SYMBOL_GPL +0xc57aded8 param_ops_ullong vmlinux EXPORT_SYMBOL +0xdba7326b nf_conntrack_lock net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd7c7cf42 edac_mc_add_mc_with_groups drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x7bdf4b97 dm_cell_release drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xfe1e97ef mlx5_query_mac_address drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x077a1543 __mlx4_replace_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xdcbff5df input_reset_device vmlinux EXPORT_SYMBOL +0xc4f8647f fwnode_graph_get_remote_port_parent vmlinux EXPORT_SYMBOL_GPL +0xa44e2745 tty_ldisc_deref vmlinux EXPORT_SYMBOL_GPL +0x2ee4c2b1 hdmi_avi_infoframe_pack_only vmlinux EXPORT_SYMBOL +0x9100044d security_inode_setsecctx vmlinux EXPORT_SYMBOL +0x100d33fd dquot_scan_active vmlinux EXPORT_SYMBOL +0x1441a1de vfs_getattr vmlinux EXPORT_SYMBOL +0x9fbba67f ceph_buffer_new net/ceph/libceph EXPORT_SYMBOL +0xdaad52f5 drm_kms_helper_poll_enable drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x86ba8ca0 nfs_writeback_update_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa084f79f cpumask_next_wrap vmlinux EXPORT_SYMBOL +0xc872fd85 in6addr_interfacelocal_allnodes vmlinux EXPORT_SYMBOL +0x6d97fe94 inet_del_protocol vmlinux EXPORT_SYMBOL +0xf8154864 neigh_update vmlinux EXPORT_SYMBOL +0x9a2182f1 mpt_set_taskmgmt_in_progress_flag vmlinux EXPORT_SYMBOL +0xae783ec7 phy_stop vmlinux EXPORT_SYMBOL +0xca7effa3 ata_dummy_port_info vmlinux EXPORT_SYMBOL_GPL +0xb0ee7cfb tty_kclose vmlinux EXPORT_SYMBOL_GPL +0xc257006b acpi_dma_simple_xlate vmlinux EXPORT_SYMBOL_GPL +0xd4fa5a87 __kfifo_dma_out_prepare vmlinux EXPORT_SYMBOL +0x9566713d blk_queue_io_min vmlinux EXPORT_SYMBOL +0x9bc4f173 nobh_truncate_page vmlinux EXPORT_SYMBOL +0x8b747ee8 ceph_monc_init net/ceph/libceph EXPORT_SYMBOL +0x36ea7eed nf_ct_port_nla_policy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x22b18bec mdev_set_drvdata drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xdef248f2 bch_btree_sort_lazy drivers/md/bcache/bcache EXPORT_SYMBOL +0xc403cafe vmci_is_context_owner drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xc4d99852 o2hb_check_node_heartbeating_from_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xc2fb935c nfs_pgio_header_alloc fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4d202b8c __xas_prev vmlinux EXPORT_SYMBOL_GPL +0xa9fc491b xsk_umem_consume_tx vmlinux EXPORT_SYMBOL +0xba0e6650 devfreq_register_notifier vmlinux EXPORT_SYMBOL +0x940202a9 iscsi_conn_start vmlinux EXPORT_SYMBOL_GPL +0x0059b071 securityfs_create_symlink vmlinux EXPORT_SYMBOL_GPL +0xa7923c5b __vfs_getxattr vmlinux EXPORT_SYMBOL +0xdbf4c030 __put_page vmlinux EXPORT_SYMBOL +0xcf2a6966 up vmlinux EXPORT_SYMBOL +0x8bb5fa6a rpc_call_start net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x434c5d10 zgid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x30104e47 drm_property_create_bitmask drivers/gpu/drm/drm EXPORT_SYMBOL +0x73d186cf nfs4_set_ds_client fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x1d756eda nfs3_set_ds_client fs/nfs/nfsv3 EXPORT_SYMBOL_GPL +0x711f4013 iscsi_lookup_endpoint vmlinux EXPORT_SYMBOL_GPL +0xf33032a7 pci_hp_remove_module_link vmlinux EXPORT_SYMBOL_GPL +0xb961b341 iov_iter_copy_from_user_atomic vmlinux EXPORT_SYMBOL +0x5b7a73a1 path_get vmlinux EXPORT_SYMBOL +0xccd751f4 __inode_sub_bytes vmlinux EXPORT_SYMBOL +0x531b604e __virt_addr_valid vmlinux EXPORT_SYMBOL +0x157aa73e __tracepoint_bcache_gc_copy drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xcdf033e2 mlx5_lag_get_roce_netdev drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xaab6b2e6 mlx4_uar_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x038be308 addrconf_add_linklocal vmlinux EXPORT_SYMBOL_GPL +0x71ac9b48 xfrm_state_update vmlinux EXPORT_SYMBOL +0x438743cf usb_deregister_dev vmlinux EXPORT_SYMBOL_GPL +0x1625e4f5 usb_hcd_is_primary_hcd vmlinux EXPORT_SYMBOL_GPL +0xb759fe1e pm_generic_resume_noirq vmlinux EXPORT_SYMBOL_GPL +0xac80af63 pci_bridge_secondary_bus_reset vmlinux EXPORT_SYMBOL_GPL +0x13ce87e8 asn1_ber_decoder vmlinux EXPORT_SYMBOL_GPL +0xa2057351 crypto_grab_aead vmlinux EXPORT_SYMBOL_GPL +0x66fc96f9 no_seek_end_llseek_size vmlinux EXPORT_SYMBOL +0x23864ce7 cpuset_mem_spread_node vmlinux EXPORT_SYMBOL_GPL +0xd1c2f335 get_pid_task vmlinux EXPORT_SYMBOL_GPL +0x47884890 system_power_efficient_wq vmlinux EXPORT_SYMBOL_GPL +0x46619b8d __ceph_open_session net/ceph/libceph EXPORT_SYMBOL +0x084cba95 nft_data_release net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xed26fe0b transport_deregister_session_configfs drivers/target/target_core_mod EXPORT_SYMBOL +0x7a45377b acpi_video_unregister drivers/acpi/video EXPORT_SYMBOL +0x29cbfcbe devlink_param_value_changed vmlinux EXPORT_SYMBOL_GPL +0x59b67e33 sk_psock_drop vmlinux EXPORT_SYMBOL_GPL +0x8d09ff2c dev_open vmlinux EXPORT_SYMBOL +0x331c6ca9 __skb_free_datagram_locked vmlinux EXPORT_SYMBOL +0x0a6f0d02 lock_sock_fast vmlinux EXPORT_SYMBOL +0x5ff61070 cn_add_callback vmlinux EXPORT_SYMBOL_GPL +0x8dbb3701 alloc_iova_fast vmlinux EXPORT_SYMBOL_GPL +0xbfc177bc iowrite32_rep vmlinux EXPORT_SYMBOL +0x8cc79cab iowrite16_rep vmlinux EXPORT_SYMBOL +0x142b8506 ilookup vmlinux EXPORT_SYMBOL +0x4302d0eb free_pages vmlinux EXPORT_SYMBOL +0xc437c77d irq_create_of_mapping vmlinux EXPORT_SYMBOL_GPL +0xac2f0798 osd_req_op_extent_init net/ceph/libceph EXPORT_SYMBOL +0x366344c9 nat_q931_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x40739385 nfs_wait_bit_killable fs/nfs/nfs EXPORT_SYMBOL_GPL +0x09bd92f6 mrp_init_applicant vmlinux EXPORT_SYMBOL_GPL +0x81e6b37f dmi_get_system_info vmlinux EXPORT_SYMBOL +0xf2eacaf7 thermal_of_cooling_device_register vmlinux EXPORT_SYMBOL_GPL +0x8cef54c8 i8042_install_filter vmlinux EXPORT_SYMBOL +0x3396b16c mdiobus_free vmlinux EXPORT_SYMBOL +0x286163fa sas_suspend_ha vmlinux EXPORT_SYMBOL +0x013bf0d0 of_pm_clk_add_clks vmlinux EXPORT_SYMBOL_GPL +0x765ff474 crc_t10dif_generic vmlinux EXPORT_SYMBOL +0x03592ea0 security_unix_stream_connect vmlinux EXPORT_SYMBOL +0x7eb4ce29 generic_file_fsync vmlinux EXPORT_SYMBOL +0x02321d21 unlock_new_inode vmlinux EXPORT_SYMBOL +0xcb0ce28e __f_setown vmlinux EXPORT_SYMBOL +0x438b3b21 irq_create_direct_mapping vmlinux EXPORT_SYMBOL_GPL +0x554b647b execute_in_process_context vmlinux EXPORT_SYMBOL_GPL +0x73dd54eb irq_fpu_usable vmlinux EXPORT_SYMBOL +0x4976cc7b nf_reject_ip6hdr_put net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0x7fc99e45 vhost_has_work drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xb74eb0ee i2c_new_client_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x5df38297 ppp_dev_name drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x954f099c idr_preload vmlinux EXPORT_SYMBOL +0x4291a239 kset_register vmlinux EXPORT_SYMBOL +0xa8f6c843 ip_frag_ecn_table vmlinux EXPORT_SYMBOL +0x9462b904 xt_find_table_lock vmlinux EXPORT_SYMBOL_GPL +0x62737e1d sock_unregister vmlinux EXPORT_SYMBOL +0xf93fd09c fb_find_mode_cvt vmlinux EXPORT_SYMBOL +0xf33ae20f pci_sriov_set_totalvfs vmlinux EXPORT_SYMBOL_GPL +0xb37f81a6 fput vmlinux EXPORT_SYMBOL +0xcd30334a nf_connlabels_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf2ca3bae mlxsw_core_res_query_enabled drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9fef9ec7 efivars_register vmlinux EXPORT_SYMBOL_GPL +0xbe100de7 sas_phy_delete vmlinux EXPORT_SYMBOL +0x85890c10 platform_device_alloc vmlinux EXPORT_SYMBOL_GPL +0x43e31064 dev_printk_emit vmlinux EXPORT_SYMBOL +0x95dcb403 clkdev_add vmlinux EXPORT_SYMBOL +0xcfd30d71 acpi_os_map_memory vmlinux EXPORT_SYMBOL_GPL +0xf5a5c84c msrs_alloc vmlinux EXPORT_SYMBOL +0x97c5a165 iov_iter_get_pages_alloc vmlinux EXPORT_SYMBOL +0x2baf247d uprobe_register_refctr vmlinux EXPORT_SYMBOL_GPL +0x99078b39 trace_print_flags_seq vmlinux EXPORT_SYMBOL +0x284e07d8 vsock_bind_table net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x9540d1b1 ip6_frag_init vmlinux EXPORT_SYMBOL +0xee167cfe ir_raw_handler_unregister vmlinux EXPORT_SYMBOL +0x2e98e6b5 pm_generic_suspend_noirq vmlinux EXPORT_SYMBOL_GPL +0xeffc1bd7 component_add_typed vmlinux EXPORT_SYMBOL_GPL +0x3bc87c61 dma_get_slave_caps vmlinux EXPORT_SYMBOL_GPL +0x2ddb9fb8 clk_hw_unregister_mux vmlinux EXPORT_SYMBOL_GPL +0x37073239 __fsnotify_parent vmlinux EXPORT_SYMBOL_GPL +0xad5f0017 perf_trace_buf_alloc vmlinux EXPORT_SYMBOL_GPL +0x944a564d is_console_locked vmlinux EXPORT_SYMBOL +0x1e0528a2 pm_qos_add_request vmlinux EXPORT_SYMBOL_GPL +0x42933206 param_ops_bool vmlinux EXPORT_SYMBOL +0x0d9579dd ceph_monc_open_session net/ceph/libceph EXPORT_SYMBOL +0xa2eb6140 l2tp_session_delete net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x3e4adc28 iscsit_release_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x8563a554 drm_ht_remove drivers/gpu/drm/drm EXPORT_SYMBOL +0x68ee3b4e usb_ep0_reinit vmlinux EXPORT_SYMBOL_GPL +0x1e0f0430 sas_phy_free vmlinux EXPORT_SYMBOL +0x9f2c623e xt_rateest_put net/netfilter/xt_RATEEST EXPORT_SYMBOL_GPL +0xa0418611 __mlx4_unregister_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd663a386 drm_atomic_set_fb_for_plane drivers/gpu/drm/drm EXPORT_SYMBOL +0xa5fa5602 cryptd_shash_desc crypto/cryptd EXPORT_SYMBOL_GPL +0x4c8d12c9 __udp6_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0x7f7cbc64 ip_tunnel_need_metadata vmlinux EXPORT_SYMBOL_GPL +0xe4b8a882 inet_shutdown vmlinux EXPORT_SYMBOL +0xaf920fa9 power_supply_set_property vmlinux EXPORT_SYMBOL_GPL +0xcf6c0558 power_supply_get_property vmlinux EXPORT_SYMBOL_GPL +0x57e4a53e mptscsih_info vmlinux EXPORT_SYMBOL +0x38374815 clear_selection vmlinux EXPORT_SYMBOL_GPL +0x80f7d128 __tracepoint_block_bio_remap vmlinux EXPORT_SYMBOL_GPL +0xefec290a lc_find lib/lru_cache EXPORT_SYMBOL +0xae7c5930 vhost_poll_stop drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x2dcce77e ib_destroy_cq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc64c41e5 mlx4_qp_modify drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x62e42485 drm_handle_vblank drivers/gpu/drm/drm EXPORT_SYMBOL +0xfa279376 pnfs_generic_sync fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb506b6ba xt_unregister_table vmlinux EXPORT_SYMBOL_GPL +0x01593c46 flow_rule_match_cvlan vmlinux EXPORT_SYMBOL +0x470b4b35 iscsi_requeue_task vmlinux EXPORT_SYMBOL_GPL +0x48d50e79 amd_iommu_register_ppr_notifier vmlinux EXPORT_SYMBOL +0xb27cd60c uart_remove_one_port vmlinux EXPORT_SYMBOL +0x167d7113 acpi_bus_register_early_device vmlinux EXPORT_SYMBOL_GPL +0xc2c00335 vmalloc_to_page vmlinux EXPORT_SYMBOL +0x9530f385 __cgroup_bpf_run_filter_sock_addr vmlinux EXPORT_SYMBOL +0xe033cb29 native_queued_spin_lock_slowpath vmlinux EXPORT_SYMBOL +0xa0713087 drm_ht_find_item drivers/gpu/drm/drm EXPORT_SYMBOL +0x1ff57f19 xfrm_trans_queue vmlinux EXPORT_SYMBOL +0x1c5541bd cpufreq_boost_enabled vmlinux EXPORT_SYMBOL_GPL +0xc44e1177 usb_get_hcd vmlinux EXPORT_SYMBOL_GPL +0xdaa06dc1 acpi_lpat_raw_to_temp vmlinux EXPORT_SYMBOL_GPL +0x080db948 fbcon_set_bitops vmlinux EXPORT_SYMBOL +0x43eefd40 blk_integrity_merge_rq vmlinux EXPORT_SYMBOL +0xe069bfae blk_update_request vmlinux EXPORT_SYMBOL_GPL +0x02649054 security_sock_rcv_skb vmlinux EXPORT_SYMBOL +0xfc336d2e __wake_up_bit vmlinux EXPORT_SYMBOL +0x80dd13cd get_task_cred vmlinux EXPORT_SYMBOL +0xea220bc7 xdr_encode_array2 net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8a0be009 rdma_alloc_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1d94c56d drm_prime_gem_destroy drivers/gpu/drm/drm EXPORT_SYMBOL +0x5a067c3e drm_i2c_encoder_mode_fixup drivers/gpu/drm/drm EXPORT_SYMBOL +0xff09bd65 camellia_dec_blk arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0x588ea78a hchacha_block vmlinux EXPORT_SYMBOL +0xa81b2332 sock_release vmlinux EXPORT_SYMBOL +0x475faebd usb_hcd_giveback_urb vmlinux EXPORT_SYMBOL_GPL +0x10303afa iscsi_is_session_dev vmlinux EXPORT_SYMBOL_GPL +0x788ee7f2 tty_port_tty_wakeup vmlinux EXPORT_SYMBOL_GPL +0x92b99a33 acpi_put_table vmlinux EXPORT_SYMBOL +0x284fe794 percpu_ref_exit vmlinux EXPORT_SYMBOL_GPL +0xde293f9e add_wait_queue_exclusive vmlinux EXPORT_SYMBOL +0x9746eb89 ZSTD_decompressBegin_usingDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x7f03b6a9 crc_ccitt_table lib/crc-ccitt EXPORT_SYMBOL +0xfdeb4c93 nf_nat_icmp_reply_translation net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xc08c3b5f ct_sip_get_sdp_header net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x25a9ca92 mlx4_mtt_addr drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x23f7454c drm_debugfs_create_files drivers/gpu/drm/drm EXPORT_SYMBOL +0xe01f7671 nlmclnt_proc fs/lockd/lockd EXPORT_SYMBOL_GPL +0x3557b664 pnfs_generic_write_commit_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x262e9f1e mipi_dsi_dcs_soft_reset vmlinux EXPORT_SYMBOL +0x2e8e65d4 iommu_dev_feature_enabled vmlinux EXPORT_SYMBOL_GPL +0xd3ca852b tty_register_device_attr vmlinux EXPORT_SYMBOL_GPL +0x19245a94 pci_sriov_configure_simple vmlinux EXPORT_SYMBOL_GPL +0xad7e706b rhashtable_walk_stop vmlinux EXPORT_SYMBOL_GPL +0xb5122b52 d_add_ci vmlinux EXPORT_SYMBOL +0xa2bd25da tracepoint_probe_register_prio vmlinux EXPORT_SYMBOL_GPL +0x20eb13c8 rdma_find_gid_by_port drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0b25f6bc dm_array_resize drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x056837fb vmci_get_context_id drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x9b2560b9 gf128mul_init_4k_bbe crypto/gf128mul EXPORT_SYMBOL +0x83581089 gf128mul_init_4k_lle crypto/gf128mul EXPORT_SYMBOL +0x7a88da87 iosf_mbi_write arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x9b33e0d7 unregister_dcbevent_notifier vmlinux EXPORT_SYMBOL +0x29bc1269 __skb_checksum_complete_head vmlinux EXPORT_SYMBOL +0x59b30095 skb_send_sock_locked vmlinux EXPORT_SYMBOL_GPL +0x82ae0aa6 xhci_shutdown vmlinux EXPORT_SYMBOL_GPL +0xd19779c2 phy_ethtool_get_wol vmlinux EXPORT_SYMBOL +0x0742366d phy_ethtool_set_wol vmlinux EXPORT_SYMBOL +0x12b2ad06 iscsi_switch_str_param vmlinux EXPORT_SYMBOL_GPL +0x35050b55 scsi_dh_attach vmlinux EXPORT_SYMBOL_GPL +0xdfe29792 unlink_framebuffer vmlinux EXPORT_SYMBOL +0x53126ecc __percpu_counter_sum vmlinux EXPORT_SYMBOL +0xe92398af mpage_readpage vmlinux EXPORT_SYMBOL +0xd6ee688f vmalloc vmlinux EXPORT_SYMBOL +0xc1f6a75e perf_pmu_unregister vmlinux EXPORT_SYMBOL_GPL +0xa20beb6f dma_direct_unmap_page vmlinux EXPORT_SYMBOL +0xf31b3fd1 workqueue_set_max_active vmlinux EXPORT_SYMBOL_GPL +0xfdcd3be8 vhost_poll_start drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xcf046c95 macvlan_dellink drivers/net/macvlan EXPORT_SYMBOL_GPL +0x02998acf mlxsw_afa_block_append_counter drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb94f2e56 devm_lcd_device_register drivers/video/backlight/lcd EXPORT_SYMBOL +0x5d021c81 flow_rule_match_enc_ports vmlinux EXPORT_SYMBOL +0x202d4ed6 nvmem_cell_write vmlinux EXPORT_SYMBOL_GPL +0x37b11bd9 raid5_set_cache_size vmlinux EXPORT_SYMBOL +0x3fb06f9c ata_bmdma_error_handler vmlinux EXPORT_SYMBOL_GPL +0x99b49127 tty_port_raise_dtr_rts vmlinux EXPORT_SYMBOL +0xf5e5c394 pci_request_region vmlinux EXPORT_SYMBOL +0x3041bc90 blk_queue_io_opt vmlinux EXPORT_SYMBOL +0x2b260feb bdgrab vmlinux EXPORT_SYMBOL +0xd4c14632 system_unbound_wq vmlinux EXPORT_SYMBOL_GPL +0x05258e28 udp_tunnel6_xmit_skb net/ipv6/ip6_udp_tunnel EXPORT_SYMBOL_GPL +0xbbe84116 ct_sip_parse_request net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0xd680a377 drm_gem_object_free drivers/gpu/drm/drm EXPORT_SYMBOL +0x58d750c8 devm_lcd_device_unregister drivers/video/backlight/lcd EXPORT_SYMBOL +0x5ba5b950 nfs_fscache_open_file fs/nfs/nfs EXPORT_SYMBOL_GPL +0x716cd6a0 kvm_clear_guest_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9e0fc803 dcb_ieee_getapp_default_prio_mask vmlinux EXPORT_SYMBOL +0x50cbf9c9 flow_block_cb_alloc vmlinux EXPORT_SYMBOL +0xb7c6db70 sysctl_max_skb_frags vmlinux EXPORT_SYMBOL +0x94c6085c ata_sff_irq_on vmlinux EXPORT_SYMBOL_GPL +0xab7d8a8d dma_resv_test_signaled_rcu vmlinux EXPORT_SYMBOL_GPL +0x66772039 clk_hw_unregister_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0x91924378 devm_get_clk_from_child vmlinux EXPORT_SYMBOL +0xf11543ff find_first_zero_bit vmlinux EXPORT_SYMBOL +0x9f50b770 keyring_restrict vmlinux EXPORT_SYMBOL +0x903b627c list_lru_isolate_move vmlinux EXPORT_SYMBOL_GPL +0x847d6b6d xprt_wait_for_reply_request_def net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe6107c58 rpc_clnt_test_and_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x953e9965 ib_detach_mcast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5a25c2b5 ib_attach_mcast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3127c95e drm_get_edid drivers/gpu/drm/drm EXPORT_SYMBOL +0x5aa443a7 component_match_add_typed vmlinux EXPORT_SYMBOL +0x3a4f9d28 rng_is_initialized vmlinux EXPORT_SYMBOL +0xcf000c7e hdmi_infoframe_check vmlinux EXPORT_SYMBOL +0x9725752a iov_iter_revert vmlinux EXPORT_SYMBOL +0xb55bb4b5 crypto_unregister_aeads vmlinux EXPORT_SYMBOL_GPL +0x23ee13fd mb_cache_entry_find_first vmlinux EXPORT_SYMBOL +0x59d662d8 __bread_gfp vmlinux EXPORT_SYMBOL +0x30901ac7 gre_parse_header net/ipv4/gre EXPORT_SYMBOL +0x4aced3a5 ppp_unregister_compressor drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xf302385d cxgbi_sock_rcv_peer_close drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x14e10813 tcp_unregister_congestion_control vmlinux EXPORT_SYMBOL_GPL +0x5d2da605 nf_checksum_partial vmlinux EXPORT_SYMBOL_GPL +0x0274dc2b netif_get_num_default_rss_queues vmlinux EXPORT_SYMBOL +0x61f67c92 phy_gbit_features_array vmlinux EXPORT_SYMBOL_GPL +0x15aaf6ef __phy_modify vmlinux EXPORT_SYMBOL_GPL +0x2b2b2316 agp_generic_alloc_page vmlinux EXPORT_SYMBOL +0x569f435e clk_gate_restore_context vmlinux EXPORT_SYMBOL_GPL +0xd079709c bio_split vmlinux EXPORT_SYMBOL +0x0e262fe9 lock_rename vmlinux EXPORT_SYMBOL +0x62d5ce4e preempt_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x43e7f519 i2c_bit_add_bus drivers/i2c/algos/i2c-algo-bit EXPORT_SYMBOL +0xdff882b3 cxgbi_destroy_session drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xe9af7397 __xa_set_mark vmlinux EXPORT_SYMBOL +0x878dd70d netlink_remove_tap vmlinux EXPORT_SYMBOL_GPL +0x15f9d841 cpuidle_poll_state_init vmlinux EXPORT_SYMBOL_GPL +0xaa529591 class_destroy vmlinux EXPORT_SYMBOL_GPL +0x4e0f1b98 tpm_tis_remove vmlinux EXPORT_SYMBOL_GPL +0xf43d2caa acpi_remove_interface vmlinux EXPORT_SYMBOL +0x14ac1e49 pci_pme_active vmlinux EXPORT_SYMBOL +0x6d0ae550 pinctrl_gpio_request vmlinux EXPORT_SYMBOL_GPL +0xef21d251 proc_create_net_single vmlinux EXPORT_SYMBOL_GPL +0x62400fbe proc_create_net_data_write vmlinux EXPORT_SYMBOL_GPL +0xe846f6e4 put_user_pages_dirty_lock vmlinux EXPORT_SYMBOL +0xbdff3e7d mutex_lock_killable vmlinux EXPORT_SYMBOL +0xef853ff6 xdr_inline_decode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4a89b785 vhost_vring_ioctl drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x039243f3 mlx4_register_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x15de6d0c drm_dev_dbg drivers/gpu/drm/drm EXPORT_SYMBOL +0xdcce01ce __fscache_check_page_write fs/fscache/fscache EXPORT_SYMBOL +0x7d208a89 kvm_write_guest_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xbe610663 tcp_check_req vmlinux EXPORT_SYMBOL +0x4ea2e36e tcp_init_sock vmlinux EXPORT_SYMBOL +0x2fc25e75 netlink_unicast vmlinux EXPORT_SYMBOL +0x3e77b123 neigh_sysctl_register vmlinux EXPORT_SYMBOL +0xc90df7e5 dm_bufio_prefetch vmlinux EXPORT_SYMBOL_GPL +0xce8e47b8 md_rdev_init vmlinux EXPORT_SYMBOL_GPL +0x415589fc ata_bmdma_status vmlinux EXPORT_SYMBOL_GPL +0x79fc6c3b tty_port_tty_hangup vmlinux EXPORT_SYMBOL_GPL +0x44645212 __bio_add_page vmlinux EXPORT_SYMBOL_GPL +0xadb3d032 bpf_trace_run12 vmlinux EXPORT_SYMBOL_GPL +0x03401265 bpf_trace_run11 vmlinux EXPORT_SYMBOL_GPL +0x1a6b0c17 bpf_trace_run10 vmlinux EXPORT_SYMBOL_GPL +0x65321af1 i2c_setup_smbus_alert drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x07abcc0c mlxsw_afa_block_append_trap drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x8e876807 rps_needed vmlinux EXPORT_SYMBOL +0xef061576 ufshcd_runtime_idle vmlinux EXPORT_SYMBOL +0xb4ba83b1 import_single_range vmlinux EXPORT_SYMBOL +0xbc794b62 security_sctp_bind_connect vmlinux EXPORT_SYMBOL +0xc9f2fe77 set_nlink vmlinux EXPORT_SYMBOL +0xd5ba98cd kill_pgrp vmlinux EXPORT_SYMBOL +0x0d0466c7 adf_devmgr_add_dev drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xd16af30d mlx4_qp_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x66983e96 simd_skcipher_create crypto/crypto_simd EXPORT_SYMBOL_GPL +0xddb1ebcd sk_msg_memcopy_from_iter vmlinux EXPORT_SYMBOL_GPL +0x0b84b47f mptscsih_flush_running_cmds vmlinux EXPORT_SYMBOL +0xc02842b0 device_wakeup_disable vmlinux EXPORT_SYMBOL_GPL +0xa4d4f0e6 global_cache_flush vmlinux EXPORT_SYMBOL +0xe42b33d5 fat_build_inode vmlinux EXPORT_SYMBOL_GPL +0x0f395416 seq_read vmlinux EXPORT_SYMBOL +0xa6c0b154 is_bad_inode vmlinux EXPORT_SYMBOL +0x93d88792 param_get_int vmlinux EXPORT_SYMBOL +0xab5b3c46 flow_offload_add net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x41aad640 vxlan_dev_create drivers/net/vxlan EXPORT_SYMBOL_GPL +0xffed9e60 skb_ext_add vmlinux EXPORT_SYMBOL +0x5587bfa1 start_tty vmlinux EXPORT_SYMBOL +0x9fb21855 acpi_dev_resource_ext_address_space vmlinux EXPORT_SYMBOL_GPL +0x6513a3fa fb_get_color_depth vmlinux EXPORT_SYMBOL +0x1da80c41 crypto_unregister_kpp vmlinux EXPORT_SYMBOL_GPL +0x9e2244e9 shash_ahash_finup vmlinux EXPORT_SYMBOL_GPL +0x360e2d03 user_update vmlinux EXPORT_SYMBOL_GPL +0x56388be7 configfs_unregister_group vmlinux EXPORT_SYMBOL +0xc4f0da12 ktime_get_with_offset vmlinux EXPORT_SYMBOL_GPL +0xb665f56d __cachemode2pte_tbl vmlinux EXPORT_SYMBOL +0x45e8d7b5 native_write_cr0 vmlinux EXPORT_SYMBOL +0x2865fe0a nf_ct_get_tuplepr net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x35ab6c6a adf_vf_isr_resource_alloc drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x7658af60 mlx5_core_modify_tis drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x43ff8f7a kvm_read_guest arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xef465492 kernel_sock_shutdown vmlinux EXPORT_SYMBOL +0xc66d919f dm_table_get_mode vmlinux EXPORT_SYMBOL +0xf9632b25 virtqueue_add_sgs vmlinux EXPORT_SYMBOL_GPL +0x0faa75cb pnp_unregister_card_driver vmlinux EXPORT_SYMBOL +0x54215db5 visitor64 vmlinux EXPORT_SYMBOL_GPL +0xc9641b48 visitor32 vmlinux EXPORT_SYMBOL_GPL +0xfda9581f prandom_u32 vmlinux EXPORT_SYMBOL +0x9e1b6813 posix_acl_chmod vmlinux EXPORT_SYMBOL +0x6106f0cb notify_change vmlinux EXPORT_SYMBOL +0xccd6df57 follow_up vmlinux EXPORT_SYMBOL +0x489af01a param_get_short vmlinux EXPORT_SYMBOL +0x0054f69d dm_tm_pre_commit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x72289260 dm_block_manager_destroy drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xfddc0da8 cxgb4_port_chan drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x1e692c55 inet_sk_set_state vmlinux EXPORT_SYMBOL +0x05a9462e nf_log_bind_pf vmlinux EXPORT_SYMBOL +0x6b20e8bb garp_request_join vmlinux EXPORT_SYMBOL_GPL +0x5d7fcd68 dev_pick_tx_zero vmlinux EXPORT_SYMBOL +0x731bbbcf sock_no_ioctl vmlinux EXPORT_SYMBOL +0x9fe899b7 get_cpu_idle_time vmlinux EXPORT_SYMBOL_GPL +0x85a2bcc2 __tracepoint_iscsi_dbg_tcp vmlinux EXPORT_SYMBOL_GPL +0xf1ced4e8 mipi_dsi_dcs_set_tear_off vmlinux EXPORT_SYMBOL +0x275f3d49 hdmi_vendor_infoframe_check vmlinux EXPORT_SYMBOL +0x3ef32144 get_user_pages_fast vmlinux EXPORT_SYMBOL_GPL +0x9d7d71aa kernel_param_lock vmlinux EXPORT_SYMBOL +0xbbb98859 edid_info vmlinux EXPORT_SYMBOL_GPL +0x79c8ec82 nf_conntrack_find_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa87bc9e7 o2nm_configured_node_map fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xa83e8d4a md_write_start vmlinux EXPORT_SYMBOL +0x985cf057 serio_unregister_child_port vmlinux EXPORT_SYMBOL +0x928262a3 mpt_verify_adapter vmlinux EXPORT_SYMBOL +0x47b98cf4 scsi_remove_device vmlinux EXPORT_SYMBOL +0x3f54b4ec idma32_dma_probe vmlinux EXPORT_SYMBOL_GPL +0xd235c3ba simple_empty vmlinux EXPORT_SYMBOL +0xc306c3a8 page_frag_alloc vmlinux EXPORT_SYMBOL +0x7b973b9a find_extend_vma vmlinux EXPORT_SYMBOL_GPL +0xdf5969f5 vm_mmap vmlinux EXPORT_SYMBOL +0x1d81a48a shmem_read_mapping_page_gfp vmlinux EXPORT_SYMBOL_GPL +0x743dfedd dma_alloc_attrs vmlinux EXPORT_SYMBOL +0xed9641f0 nf_sk_lookup_slow_v6 net/ipv6/netfilter/nf_socket_ipv6 EXPORT_SYMBOL_GPL +0x6af1e099 nf_sk_lookup_slow_v4 net/ipv4/netfilter/nf_socket_ipv4 EXPORT_SYMBOL_GPL +0xf321fb6e mlxsw_i2c_driver_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_i2c EXPORT_SYMBOL +0x0910311d fuse_file_poll fs/fuse/fuse EXPORT_SYMBOL_GPL +0x9a22391e radix_tree_gang_lookup_tag_slot vmlinux EXPORT_SYMBOL +0xc99b1ecd __sk_mem_schedule vmlinux EXPORT_SYMBOL +0xc2303472 __hid_register_driver vmlinux EXPORT_SYMBOL_GPL +0x789ec271 hid_disconnect vmlinux EXPORT_SYMBOL_GPL +0x70cf032f usb_hcd_irq vmlinux EXPORT_SYMBOL_GPL +0xf155420e srp_timed_out vmlinux EXPORT_SYMBOL +0x767dce4b acpi_disable_all_gpes vmlinux EXPORT_SYMBOL +0x2fe61b1e __pci_register_driver vmlinux EXPORT_SYMBOL +0xce130a7c mnt_want_write_file vmlinux EXPORT_SYMBOL_GPL +0x6ffce680 x86_cpu_has_min_microcode_rev vmlinux EXPORT_SYMBOL_GPL +0x6a7a38a0 ceph_pr_addr net/ceph/libceph EXPORT_SYMBOL +0x9350628b ibdev_notice drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6e3190ca nfs_file_set_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc7f89415 nfs_free_server fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5ed945b5 ip_ct_attach vmlinux EXPORT_SYMBOL +0x02e5e714 dev_set_mtu vmlinux EXPORT_SYMBOL +0xa835640f dm_snap_cow vmlinux EXPORT_SYMBOL +0xce8123d8 ahci_fill_cmd_slot vmlinux EXPORT_SYMBOL_GPL +0x243e1d06 fwnode_handle_get vmlinux EXPORT_SYMBOL_GPL +0x72c85ebe crypto_unregister_template vmlinux EXPORT_SYMBOL_GPL +0x286e3981 security_sk_classify_flow vmlinux EXPORT_SYMBOL +0x1fca5c99 bd_link_disk_holder vmlinux EXPORT_SYMBOL_GPL +0x2919b156 xdr_decode_string_inplace net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc7fd14f8 vhost_dev_init drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xe4c0ca9d tap_handle_frame drivers/net/tap EXPORT_SYMBOL_GPL +0xdc512134 backlight_register_notifier drivers/video/backlight/backlight EXPORT_SYMBOL +0x6acefa42 fscache_cache_cleared_wq fs/fscache/fscache EXPORT_SYMBOL +0x44414ff2 iosf_mbi_unblock_punit_i2c_access arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x5ee01d2f dev_pm_opp_get_opp_count vmlinux EXPORT_SYMBOL_GPL +0x5df997b9 fcoe_ctlr_set_fip_mode vmlinux EXPORT_SYMBOL +0x335c570f enable_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0x43babee3 svc_seq_show net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0626da5f od_unregister_powersave_bias_handler vmlinux EXPORT_SYMBOL_GPL +0xd87fc0a0 usb_amd_prefetch_quirk vmlinux EXPORT_SYMBOL_GPL +0xd293a925 fc_rport_create vmlinux EXPORT_SYMBOL +0x90ba69e0 fc_vport_create vmlinux EXPORT_SYMBOL +0x238b342f scsi_free_host_dev vmlinux EXPORT_SYMBOL +0x4065d168 pm_print_active_wakeup_sources vmlinux EXPORT_SYMBOL_GPL +0x423e347d driver_register vmlinux EXPORT_SYMBOL_GPL +0xd0fef3b2 agp_free_key vmlinux EXPORT_SYMBOL +0xe63246bd pnp_device_detach vmlinux EXPORT_SYMBOL +0xa40964f2 should_remove_suid vmlinux EXPORT_SYMBOL +0xc92f7f50 vsock_table_lock net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x6082b7be adf_devmgr_pci_to_accel_dev drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x3a797d19 dm_btree_del drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb908910f mlx5_nic_vport_affiliate_multiport drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7c383793 drm_mode_create_tv_margin_properties drivers/gpu/drm/drm EXPORT_SYMBOL +0x27a0c849 input_ff_create vmlinux EXPORT_SYMBOL_GPL +0x9d50ed7e fc_rport_destroy vmlinux EXPORT_SYMBOL +0xb67d1683 fc_lport_destroy vmlinux EXPORT_SYMBOL +0xccd86806 ata_id_string vmlinux EXPORT_SYMBOL_GPL +0xbe0a2952 device_destroy vmlinux EXPORT_SYMBOL_GPL +0xe71a6a77 divider_ro_round_rate_parent vmlinux EXPORT_SYMBOL_GPL +0x74881d07 acpi_unlock_battery_dir vmlinux EXPORT_SYMBOL +0xa0904697 srcu_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL +0x1389619c __max_die_per_package vmlinux EXPORT_SYMBOL +0xb83129db ZSTD_decompressContinue lib/zstd/zstd_decompress EXPORT_SYMBOL +0x6211bd44 nft_dump_register net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x3ff55ad3 nf_conncount_cache_free net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0xb5e762fa mlxsw_afk_values_add_buf drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x47041e4e mlxsw_afk_key_info_blocks_count_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc257db6c inet6_stream_ops vmlinux EXPORT_SYMBOL +0xfe44163f xfrm_if_register_cb vmlinux EXPORT_SYMBOL +0x8ccc380a unregister_pernet_device vmlinux EXPORT_SYMBOL_GPL +0x0cc9bec7 devfreq_recommended_opp vmlinux EXPORT_SYMBOL +0x1982f664 input_ff_destroy vmlinux EXPORT_SYMBOL_GPL +0x5f6f1e9e dax_get_private vmlinux EXPORT_SYMBOL_GPL +0x7af0d84f platform_get_irq_byname vmlinux EXPORT_SYMBOL_GPL +0xc0ff202c seq_dentry vmlinux EXPORT_SYMBOL +0x37110088 remove_wait_queue vmlinux EXPORT_SYMBOL +0x481e21bb param_array_ops vmlinux EXPORT_SYMBOL +0x73f87d2e ceph_copy_from_page_vector net/ceph/libceph EXPORT_SYMBOL +0xbce854ad flow_offload_alloc net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x0563bf20 ib_send_cm_sidr_rep drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xe4385fb8 ib_send_cm_sidr_req drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xb21bef92 drm_connector_set_vrr_capable_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xd0f33847 pnfs_generic_clear_request_commit fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xe6f5e6f5 xas_clear_mark vmlinux EXPORT_SYMBOL_GPL +0xe74598c2 kset_find_obj vmlinux EXPORT_SYMBOL_GPL +0xad1c8b2a napi_get_frags vmlinux EXPORT_SYMBOL +0x1417bc9b efivar_entry_remove vmlinux EXPORT_SYMBOL_GPL +0x0ced3a86 iscsi_tcp_set_max_r2t vmlinux EXPORT_SYMBOL_GPL +0xc50032b2 iscsi_conn_queue_work vmlinux EXPORT_SYMBOL_GPL +0x8e20ad67 dev_pm_domain_detach vmlinux EXPORT_SYMBOL_GPL +0x97c05782 device_connection_remove vmlinux EXPORT_SYMBOL_GPL +0xfc15565f scsi_verify_blk_ioctl vmlinux EXPORT_SYMBOL +0x193584ca configfs_register_group vmlinux EXPORT_SYMBOL +0x7a38ea99 kill_bdev vmlinux EXPORT_SYMBOL +0xd418dd8f pagecache_isize_extended vmlinux EXPORT_SYMBOL +0x2a58ae55 disable_kprobe vmlinux EXPORT_SYMBOL_GPL +0x56470118 __warn_printk vmlinux EXPORT_SYMBOL +0x79cf1043 fpu_kernel_xstate_size vmlinux EXPORT_SYMBOL_GPL +0x298bc667 adf_dev_in_use drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x3bf98253 sys_copyarea drivers/video/fbdev/core/syscopyarea EXPORT_SYMBOL +0xb8b7fafe ipv6_dup_options vmlinux EXPORT_SYMBOL_GPL +0x0315abd9 phy_register_fixup_for_id vmlinux EXPORT_SYMBOL +0x1fc0cc7c intel_gtt_insert_sg_entries vmlinux EXPORT_SYMBOL +0x8cfc1064 cgroup_attach_task_all vmlinux EXPORT_SYMBOL_GPL +0xc3182c1f delayed_work_timer_fn vmlinux EXPORT_SYMBOL +0xaf0847f0 nf_conntrack_locks net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x941814c1 tap_get_socket drivers/net/tap EXPORT_SYMBOL_GPL +0x485cd7f6 kvm_rebooting arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x3fa01be2 vchan_dma_desc_free_list vmlinux EXPORT_SYMBOL_GPL +0xf79ca3bb acpi_remove_gpe_block vmlinux EXPORT_SYMBOL +0xa16e60d6 first_ec vmlinux EXPORT_SYMBOL +0x5c32a897 dim_park_tired vmlinux EXPORT_SYMBOL +0xeaf7fdee configfs_remove_default_groups vmlinux EXPORT_SYMBOL +0xc7d4075c zap_vma_ptes vmlinux EXPORT_SYMBOL_GPL +0xab65ed80 set_memory_uc vmlinux EXPORT_SYMBOL +0xdc204bb1 rpc_init_priority_wait_queue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x85430a76 nft_validate_register_load net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x9e798e22 dm_bm_set_read_only drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x6bd10c9d i2c_smbus_write_word_data drivers/i2c/i2c-core EXPORT_SYMBOL +0x62b1e0b8 mlxsw_core_rx_listener_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x3ec6e75b mlx4_ACCESS_PTYS_REG drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb4d1683a drm_panel_bridge_remove drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x4491207b drm_dp_send_power_updown_phy drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x88bebf20 xfrm_policy_bysel_ctx vmlinux EXPORT_SYMBOL +0x904d055c tcp_sendmsg vmlinux EXPORT_SYMBOL +0xfcdd31a0 dev_pm_opp_is_turbo vmlinux EXPORT_SYMBOL_GPL +0x8547d2c0 md_bitmap_free vmlinux EXPORT_SYMBOL +0x06a07527 md_bitmap_endwrite vmlinux EXPORT_SYMBOL +0xd0481f29 ata_bmdma_port_start vmlinux EXPORT_SYMBOL_GPL +0x959874d8 find_iova vmlinux EXPORT_SYMBOL_GPL +0x2b952517 clk_has_parent vmlinux EXPORT_SYMBOL_GPL +0xb29ef899 dim_park_on_top vmlinux EXPORT_SYMBOL +0x508dee1b _copy_from_iter_flushcache vmlinux EXPORT_SYMBOL_GPL +0x33f7b01e elv_rb_del vmlinux EXPORT_SYMBOL +0x127eb763 elv_rb_add vmlinux EXPORT_SYMBOL +0xe3677e9d init_user_ns vmlinux EXPORT_SYMBOL_GPL +0x90efbcb1 wpan_phy_free net/ieee802154/ieee802154 EXPORT_SYMBOL +0xe76b2ef4 rpc_init_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x024d14bc vmci_qpair_produce_free_space drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x863c6711 fuse_simple_background fs/fuse/fuse EXPORT_SYMBOL_GPL +0xd6d84e02 kvm_write_tsc arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9ad7a582 iosf_mbi_assert_punit_acquired arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x68187bcb ip_mc_leave_group vmlinux EXPORT_SYMBOL +0x3a6fb444 lwtunnel_cmp_encap vmlinux EXPORT_SYMBOL_GPL +0xd78067e7 netif_skb_features vmlinux EXPORT_SYMBOL +0xc8de7a6c mptscsih_qcmd vmlinux EXPORT_SYMBOL +0xe74293c9 mptscsih_is_phys_disk vmlinux EXPORT_SYMBOL +0x27f0320f pm_runtime_set_memalloc_noio vmlinux EXPORT_SYMBOL_GPL +0x0388d422 iommu_fwspec_init vmlinux EXPORT_SYMBOL_GPL +0x6dbaafd3 put_old_timespec32 vmlinux EXPORT_SYMBOL_GPL +0x5199e680 work_busy vmlinux EXPORT_SYMBOL_GPL +0x0de52c6c ib_sa_get_mcmember_rec drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x24c90ca6 find_mci_by_dev drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x50c3be1c drm_dp_stop_crc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x270463e7 lcd_device_unregister drivers/video/backlight/lcd EXPORT_SYMBOL +0xcbd9627b __sk_mem_reclaim vmlinux EXPORT_SYMBOL +0x38fe29d1 tty_set_ldisc vmlinux EXPORT_SYMBOL_GPL +0x04863e28 hdmi_audio_infoframe_pack_only vmlinux EXPORT_SYMBOL +0x595045d9 pcim_pin_device vmlinux EXPORT_SYMBOL +0x405f573f pci_cfg_access_lock vmlinux EXPORT_SYMBOL_GPL +0x1161cf40 crypto_register_skcipher vmlinux EXPORT_SYMBOL_GPL +0x13553fa8 d_prune_aliases vmlinux EXPORT_SYMBOL +0x12a38747 usleep_range vmlinux EXPORT_SYMBOL +0x2e78702e kmsg_dump_get_line vmlinux EXPORT_SYMBOL_GPL +0x3b907a6b ip_set_put_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x37db1962 ip_set_get_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xa3d0d2b6 mlxsw_afa_block_append_fwd drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xcc27bb98 mlx4_tunnel_steer_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xd0474696 fib_nh_common_init vmlinux EXPORT_SYMBOL_GPL +0x198227ee tcp_mss_to_mtu vmlinux EXPORT_SYMBOL +0x4649aa40 gro_cells_receive vmlinux EXPORT_SYMBOL +0x7c95b231 dev_pm_opp_find_level_exact vmlinux EXPORT_SYMBOL_GPL +0x09b5e481 pci_common_swizzle vmlinux EXPORT_SYMBOL_GPL +0x08568e5f blkcg_dkstats_find_create vmlinux EXPORT_SYMBOL_GPL +0x71466619 blk_mq_init_sq_queue vmlinux EXPORT_SYMBOL +0x4e13e41b __pagevec_release vmlinux EXPORT_SYMBOL +0x7040fff9 rtc_lock vmlinux EXPORT_SYMBOL +0x6b835595 cache_register_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x44ba7e3b drm_fb_helper_unregister_fbi drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xda09c30b drm_dp_link_configure drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf1a5611d o2net_unregister_handler_list fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x14f7f15d metadata_dst_free_percpu vmlinux EXPORT_SYMBOL_GPL +0xe9c5dfba hid_unregister_driver vmlinux EXPORT_SYMBOL_GPL +0x95ef1ccc dmi_memdev_size vmlinux EXPORT_SYMBOL_GPL +0x28197054 usb_autopm_put_interface vmlinux EXPORT_SYMBOL_GPL +0xb7ddeb66 pcie_set_readrq vmlinux EXPORT_SYMBOL +0xf19185e6 pcie_bus_configure_settings vmlinux EXPORT_SYMBOL_GPL +0xb78c7131 nla_reserve_nohdr vmlinux EXPORT_SYMBOL +0x4230a8d7 sg_nents_for_len vmlinux EXPORT_SYMBOL +0xfa2c11ea device_add_disk vmlinux EXPORT_SYMBOL +0x2daa2177 x509_free_certificate vmlinux EXPORT_SYMBOL_GPL +0x50c81417 securityfs_create_file vmlinux EXPORT_SYMBOL_GPL +0x78950025 exportfs_encode_fh vmlinux EXPORT_SYMBOL_GPL +0xb4b34818 iomap_invalidatepage vmlinux EXPORT_SYMBOL_GPL +0x287e87c5 ilookup5 vmlinux EXPORT_SYMBOL +0x0e8a574a cpuacct_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x5e515be6 ktime_get_ts64 vmlinux EXPORT_SYMBOL_GPL +0xc0a96e14 rcu_gp_is_expedited vmlinux EXPORT_SYMBOL_GPL +0x47941711 _raw_spin_lock_irq vmlinux EXPORT_SYMBOL +0x409bcb62 mutex_unlock vmlinux EXPORT_SYMBOL +0x591a1367 prepare_creds vmlinux EXPORT_SYMBOL +0x746925fc cxgb4_inline_tx_skb drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xeafb75e0 amd_iommu_init_device drivers/iommu/amd_iommu_v2 EXPORT_SYMBOL +0x4cb9e001 recover_lost_locks fs/nfs/nfs EXPORT_SYMBOL_GPL +0x2f1c02d3 register_nfs_version fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9047ffe4 kvm_hv_assist_page_enabled arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x94fb3de1 __tracepoint_kvm_nested_vmrun arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x2935b1be ip6_err_gen_icmpv6_unreach vmlinux EXPORT_SYMBOL +0x3f6e596e dev_addr_flush vmlinux EXPORT_SYMBOL +0xfd53991d nvmem_device_write vmlinux EXPORT_SYMBOL_GPL +0x0146d3cb thermal_notify_framework vmlinux EXPORT_SYMBOL_GPL +0xb4be9c93 dquot_quota_sync vmlinux EXPORT_SYMBOL +0xcd81a945 switch_fpu_return vmlinux EXPORT_SYMBOL_GPL +0xe218f2c6 sunrpc_cache_pipe_upcall net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x160fd629 uverbs_copy_to drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x1512b075 mlx4_cq_resize drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xfc382f2c drm_writeback_connector_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xa731f387 nl_table_lock vmlinux EXPORT_SYMBOL_GPL +0x6642b2e3 usb_phy_roothub_set_mode vmlinux EXPORT_SYMBOL_GPL +0x5cfb26a0 acpi_enter_sleep_state vmlinux EXPORT_SYMBOL +0x21d6adf7 blkdev_get_by_dev vmlinux EXPORT_SYMBOL +0x16993f01 get_tree_keyed vmlinux EXPORT_SYMBOL +0xd89da37f movable_zone vmlinux EXPORT_SYMBOL +0x618911fc numa_node vmlinux EXPORT_SYMBOL +0xa83f641e ring_buffer_unlock_commit vmlinux EXPORT_SYMBOL_GPL +0xdbd019c4 ib_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL_GPL +0xe8944612 mlx4_map_sw_to_hw_steering_id drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf84ee996 vmci_qpair_peekv drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x5bbea15f nfs_scan_commit_list fs/nfs/nfs EXPORT_SYMBOL_GPL +0x83f9decf nfs4_dentry_operations fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe10d6901 ping_queue_rcv_skb vmlinux EXPORT_SYMBOL_GPL +0x6b55acd0 rtnl_lock_killable vmlinux EXPORT_SYMBOL +0xa4d286d7 rc_allocate_device vmlinux EXPORT_SYMBOL_GPL +0xc5996c8c pkcs7_get_content_data vmlinux EXPORT_SYMBOL_GPL +0x69e51172 simple_fill_super vmlinux EXPORT_SYMBOL +0xc1be0270 event_triggers_call vmlinux EXPORT_SYMBOL_GPL +0x07cc4a5d printk_timed_ratelimit vmlinux EXPORT_SYMBOL +0x6596d8d5 unwind_next_frame vmlinux EXPORT_SYMBOL_GPL +0xadbefda4 dm_cache_policy_destroy drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x3a18389a dm_rh_update_states drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x50ee5c07 cxgb4_best_aligned_mtu drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xcd38f326 pnfs_register_layoutdriver fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xc6ef6565 sock_queue_err_skb vmlinux EXPORT_SYMBOL +0xed3283a4 dm_bufio_set_sector_offset vmlinux EXPORT_SYMBOL_GPL +0x2bf10fee phy_create_lookup vmlinux EXPORT_SYMBOL_GPL +0x4baf7e59 sha256_final vmlinux EXPORT_SYMBOL +0xfe1d2e94 key_create_or_update vmlinux EXPORT_SYMBOL +0x96995abf devm_memunmap_pages vmlinux EXPORT_SYMBOL_GPL +0x91bb9ffa devm_memremap_pages vmlinux EXPORT_SYMBOL_GPL +0x1f8db7f9 ring_buffer_overrun_cpu vmlinux EXPORT_SYMBOL_GPL +0x0faef0ed __tasklet_schedule vmlinux EXPORT_SYMBOL +0x56466e42 ZSTD_CStreamInSize lib/zstd/zstd_compress EXPORT_SYMBOL +0x342b051b ceph_messenger_init net/ceph/libceph EXPORT_SYMBOL +0xa9354e03 xprt_adjust_cwnd net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb16d361d ib_process_mad_wc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x05a1d6a7 dev_load vmlinux EXPORT_SYMBOL +0xf4f14de6 rtnl_trylock vmlinux EXPORT_SYMBOL +0xa3fc6433 skb_flow_dissect_meta vmlinux EXPORT_SYMBOL +0xba2b7f64 cpufreq_generic_get vmlinux EXPORT_SYMBOL_GPL +0xf96ec8ad rc_keydown vmlinux EXPORT_SYMBOL_GPL +0x818416e1 scsi_set_sense_information vmlinux EXPORT_SYMBOL +0xa58c2966 bio_add_page vmlinux EXPORT_SYMBOL +0x3a2ef389 debugfs_create_u16 vmlinux EXPORT_SYMBOL_GPL +0x50ff880a __d_lookup_done vmlinux EXPORT_SYMBOL +0xa9086c55 init_on_alloc vmlinux EXPORT_SYMBOL +0x1a8c908d init_pid_ns vmlinux EXPORT_SYMBOL_GPL +0x8263a6d9 proc_douintvec vmlinux EXPORT_SYMBOL +0x04d3e81e ipt_unregister_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL +0xd19404ff team_options_register drivers/net/team/team EXPORT_SYMBOL +0x40f2b10c ipmi_alloc_smi_msg drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x2178b8c2 kvm_arch_register_noncoherent_dma arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x263f039e xas_nomem vmlinux EXPORT_SYMBOL_GPL +0x90709b70 gro_find_receive_by_type vmlinux EXPORT_SYMBOL +0x4caddd6b __sk_mem_raise_allocated vmlinux EXPORT_SYMBOL +0x933f75e0 usb_unlink_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0x1ed2a12e mptscsih_slave_destroy vmlinux EXPORT_SYMBOL +0x816347c6 agp_device_command vmlinux EXPORT_SYMBOL +0x107e5878 zlib_inflateEnd vmlinux EXPORT_SYMBOL +0x904d4134 __blkdev_issue_discard vmlinux EXPORT_SYMBOL +0x96863830 skcipher_walk_aead vmlinux EXPORT_SYMBOL_GPL +0xb706e27c rpc_num_bc_slots net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x37df9da2 cxgb3_register_client drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x48012e28 xt_check_proc_name vmlinux EXPORT_SYMBOL +0xec5bb8ca xt_unregister_target vmlinux EXPORT_SYMBOL +0x767e08a7 ehci_adjust_port_wakeup_flags vmlinux EXPORT_SYMBOL_GPL +0xb423dba1 console_blanked vmlinux EXPORT_SYMBOL +0x71235e12 hsu_dma_get_status vmlinux EXPORT_SYMBOL_GPL +0xa1e27118 pci_bus_add_device vmlinux EXPORT_SYMBOL_GPL +0x679c7ca5 ack_all_badblocks vmlinux EXPORT_SYMBOL_GPL +0x8510d9a4 handle_fasteoi_nmi vmlinux EXPORT_SYMBOL_GPL +0xf6584cd4 adf_vf2pf_init drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x22ae3f46 cxgb4_register_uld drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xdf8d781f acpi_update_all_gpes vmlinux EXPORT_SYMBOL +0x9cf37c44 __iowrite32_copy vmlinux EXPORT_SYMBOL_GPL +0xf14905ef config_group_init_type_name vmlinux EXPORT_SYMBOL +0xe36573a7 __sb_end_write vmlinux EXPORT_SYMBOL +0x58d6311d trace_clock vmlinux EXPORT_SYMBOL_GPL +0xe5ee5dc6 from_kgid vmlinux EXPORT_SYMBOL +0x8c26d495 prepare_to_wait_event vmlinux EXPORT_SYMBOL +0xb1e91478 xdr_enter_page net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x88393d7e ct_sip_parse_address_param net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x4b944f8f nf_conntrack_unregister_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x9f9ce350 nf_conntrack_helpers_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x11eff38c fib_rules_register vmlinux EXPORT_SYMBOL_GPL +0x39171ce6 devfreq_register_opp_notifier vmlinux EXPORT_SYMBOL +0x49f1ee93 cpufreq_generic_suspend vmlinux EXPORT_SYMBOL +0x1afc7dbc dev_pm_opp_get_level vmlinux EXPORT_SYMBOL_GPL +0x97e29d30 iscsi_create_conn vmlinux EXPORT_SYMBOL_GPL +0x906527e8 ata_do_eh vmlinux EXPORT_SYMBOL_GPL +0xc111ae64 intel_gtt_get vmlinux EXPORT_SYMBOL +0x509b64ea acpi_has_method vmlinux EXPORT_SYMBOL +0x821c17c1 simple_transaction_read vmlinux EXPORT_SYMBOL +0x8a99a016 mempool_free_slab vmlinux EXPORT_SYMBOL +0x0f0dbaa5 virtio_transport_set_max_buffer_size net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xebd83a95 virtio_transport_set_min_buffer_size net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x8fc66749 virtio_transport_get_max_buffer_size net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xef76aa04 virtio_transport_get_min_buffer_size net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x2481f5e4 mii_ethtool_set_link_ksettings drivers/net/mii EXPORT_SYMBOL +0x5f0b0174 mii_ethtool_get_link_ksettings drivers/net/mii EXPORT_SYMBOL +0x693e6f7b mlx4_write_mtt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x07e47a7a drm_dp_check_act_status drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x53483512 xdp_get_umem_from_qid vmlinux EXPORT_SYMBOL +0x73866b06 devlink_dpipe_entry_ctx_prepare vmlinux EXPORT_SYMBOL_GPL +0x6d3f57bd dm_bufio_get_client vmlinux EXPORT_SYMBOL_GPL +0x1047a62e md_bitmap_copy_from_slot vmlinux EXPORT_SYMBOL_GPL +0x77ae495d usb_speed_string vmlinux EXPORT_SYMBOL_GPL +0x67c497d2 pm_generic_resume_early vmlinux EXPORT_SYMBOL_GPL +0xc5d90567 pm_generic_thaw vmlinux EXPORT_SYMBOL_GPL +0xba5007a7 acpi_create_platform_device vmlinux EXPORT_SYMBOL_GPL +0xaa00fdc0 ec_transaction vmlinux EXPORT_SYMBOL +0x309f96a1 pci_scan_slot vmlinux EXPORT_SYMBOL +0x486bd25a net_dim vmlinux EXPORT_SYMBOL +0xbabd30f5 virtio_transport_dgram_allow net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xf162bdaa ceph_osdc_unwatch net/ceph/libceph EXPORT_SYMBOL +0x8e5e84aa gss_mech_register net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0xa440d162 vfio_external_group_match_file drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x889b24e1 __ip_mc_dec_group vmlinux EXPORT_SYMBOL +0xedc0456d __ip_mc_inc_group vmlinux EXPORT_SYMBOL +0x650ca472 __pm_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0xf9e6ca87 devm_kstrdup_const vmlinux EXPORT_SYMBOL_GPL +0xa05d5ecc processors vmlinux EXPORT_SYMBOL +0x097dfae1 jbd2_journal_set_features vmlinux EXPORT_SYMBOL +0xd68d2ab1 mpage_readpages vmlinux EXPORT_SYMBOL +0xe7d6d2d4 filter_match_preds vmlinux EXPORT_SYMBOL_GPL +0x3517ce2d virtio_transport_notify_poll_in net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xeada5f85 mlx5_query_hca_vport_system_image_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x12dd1e77 ipmi_set_maintenance_mode drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xb85de48c __tracepoint_pnfs_mds_fallback_read_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x0715d95b iscsi_conn_get_param vmlinux EXPORT_SYMBOL_GPL +0xe5a98f4b device_get_phy_mode vmlinux EXPORT_SYMBOL_GPL +0x2177bd71 acpi_disable_event vmlinux EXPORT_SYMBOL +0xab962f36 pci_request_regions vmlinux EXPORT_SYMBOL +0x4df119fa __bitmap_parse vmlinux EXPORT_SYMBOL +0xf42b2d41 blk_mq_unique_tag vmlinux EXPORT_SYMBOL +0x2432a47c __wait_on_buffer vmlinux EXPORT_SYMBOL +0xa0d3456d nr_swap_pages vmlinux EXPORT_SYMBOL_GPL +0xb1464dc4 cancel_delayed_work_sync vmlinux EXPORT_SYMBOL +0xc063bd7c iscsit_find_cmd_from_itt_or_dump drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x9cedcd57 __tracepoint_bcache_alloc_fail drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xe477abc7 mlx5_create_lag_demux_flow_table drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x13a35d44 lwtunnel_encap_add_ops vmlinux EXPORT_SYMBOL_GPL +0xd2b5eaa0 sock_create vmlinux EXPORT_SYMBOL +0xad84bef8 dm_table_event vmlinux EXPORT_SYMBOL +0x743a165e ata_pack_xfermask vmlinux EXPORT_SYMBOL_GPL +0xf4952bcc class_dev_iter_next vmlinux EXPORT_SYMBOL_GPL +0x03015344 iommu_domain_window_disable vmlinux EXPORT_SYMBOL_GPL +0xcd0d27ef clk_add_alias vmlinux EXPORT_SYMBOL +0xa0f74543 skcipher_walk_atomise vmlinux EXPORT_SYMBOL_GPL +0x3a53438c debugfs_create_x32 vmlinux EXPORT_SYMBOL_GPL +0x6af1cf17 bprm_change_interp vmlinux EXPORT_SYMBOL +0x5fcb6b64 kill_litter_super vmlinux EXPORT_SYMBOL +0x91f9e156 resource_list_create_entry vmlinux EXPORT_SYMBOL +0x81d0e5eb ip_vs_conn_new net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x0e1c1556 dm_region_hash_create drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xbe82d6cc mlxsw_env_get_module_info drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb208b2bd drm_panel_disable drivers/gpu/drm/drm EXPORT_SYMBOL +0xf41518f1 drm_connector_attach_encoder drivers/gpu/drm/drm EXPORT_SYMBOL +0x6b7cee3e __fscache_read_or_alloc_page fs/fscache/fscache EXPORT_SYMBOL +0x8e7d8969 xfrm_dev_offload_ok vmlinux EXPORT_SYMBOL_GPL +0x141e5d9e inet_proto_csum_replace4 vmlinux EXPORT_SYMBOL +0x33a2e4e1 neigh_direct_output vmlinux EXPORT_SYMBOL +0xe9a2ae88 usb_free_urb vmlinux EXPORT_SYMBOL_GPL +0xf72deefa agp_generic_enable vmlinux EXPORT_SYMBOL +0x94be1223 tty_prepare_flip_string vmlinux EXPORT_SYMBOL_GPL +0x8cb0cfbe pci_add_resource_offset vmlinux EXPORT_SYMBOL +0xaa125fdd __breadahead_gfp vmlinux EXPORT_SYMBOL +0x75bda77a seq_hlist_next vmlinux EXPORT_SYMBOL +0x6db6f1f7 __lock_page_killable vmlinux EXPORT_SYMBOL_GPL +0xc84e6067 virtio_transport_notify_recv_pre_block net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x2929680a nf_ct_expect_iterate_net net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa3a79260 __rdma_accept drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x436cb6f0 cdrom_number_of_slots drivers/cdrom/cdrom EXPORT_SYMBOL +0x16c66718 drm_match_cea_mode drivers/gpu/drm/drm EXPORT_SYMBOL +0xb743e2c2 drm_atomic_helper_disable_all drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd22ba75f fib_rules_unregister vmlinux EXPORT_SYMBOL_GPL +0xbcf1469c skb_copy_bits vmlinux EXPORT_SYMBOL +0x763bcdbd sock_no_sendpage_locked vmlinux EXPORT_SYMBOL +0x19b5f3a7 agp_generic_alloc_pages vmlinux EXPORT_SYMBOL +0x4a8a6949 get_random_bytes_arch vmlinux EXPORT_SYMBOL +0x2cda059d clk_hw_register_fractional_divider vmlinux EXPORT_SYMBOL_GPL +0x6ea7575d acpi_dispatch_gpe vmlinux EXPORT_SYMBOL +0x2080ec19 pci_irq_get_affinity vmlinux EXPORT_SYMBOL +0x4638fc62 pcim_iomap vmlinux EXPORT_SYMBOL +0x66892693 crypto_register_ahash vmlinux EXPORT_SYMBOL_GPL +0xebc7eeed sysfs_remove_link vmlinux EXPORT_SYMBOL_GPL +0x425786aa kernfs_notify vmlinux EXPORT_SYMBOL_GPL +0x497070ab noop_direct_IO vmlinux EXPORT_SYMBOL_GPL +0xbb5ca7dc shmem_file_setup_with_mnt vmlinux EXPORT_SYMBOL_GPL +0x8924eb1e rcu_force_quiescent_state vmlinux EXPORT_SYMBOL_GPL +0xeb43b4f8 irq_domain_push_irq vmlinux EXPORT_SYMBOL_GPL +0x9e98722b ip_set_get_ipaddr6 net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xa293f8a6 ip_set_get_ipaddr4 net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x8d5fa512 ib_free_send_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x748548e1 mii_check_gmii_support drivers/net/mii EXPORT_SYMBOL +0x5fd43696 drm_vma_node_revoke drivers/gpu/drm/drm EXPORT_SYMBOL +0xf6fa5a6d drm_atomic_helper_damage_iter_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd8662126 kvm_release_page_dirty arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x676b69e8 tcp_make_synack vmlinux EXPORT_SYMBOL +0x9ae6fd9f __skb_checksum vmlinux EXPORT_SYMBOL +0xe3644a8e hid_hw_stop vmlinux EXPORT_SYMBOL_GPL +0xa471982d dm_table_set_type vmlinux EXPORT_SYMBOL_GPL +0x1f4e5805 mptscsih_taskmgmt_response_code vmlinux EXPORT_SYMBOL +0xb21d5845 ata_ehi_clear_desc vmlinux EXPORT_SYMBOL_GPL +0xd4513015 nvdimm_bus_unlock vmlinux EXPORT_SYMBOL +0x05059229 dma_async_device_register vmlinux EXPORT_SYMBOL +0x6c86ba96 pci_resize_resource vmlinux EXPORT_SYMBOL +0xa1012536 t10_pi_type1_ip vmlinux EXPORT_SYMBOL +0xdc21e866 hrtimer_forward vmlinux EXPORT_SYMBOL_GPL +0x7d59dd46 pm_wq vmlinux EXPORT_SYMBOL_GPL +0xad9901ae bit_waitqueue vmlinux EXPORT_SYMBOL +0xf34b0dd3 dm_cell_put_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xb746c5d9 dm_cell_get_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x07a09607 ptp_clock_register drivers/ptp/ptp EXPORT_SYMBOL +0xbca7d862 drm_mode_validate_ycbcr420 drivers/gpu/drm/drm EXPORT_SYMBOL +0x536a486c drm_ioctl_kernel drivers/gpu/drm/drm EXPORT_SYMBOL +0xfe12bcb9 drm_dsc_compute_rc_parameters drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x21641019 sock_common_recvmsg vmlinux EXPORT_SYMBOL +0xb0152daa ata_wait_register vmlinux EXPORT_SYMBOL_GPL +0xd45434ee admin_timeout vmlinux EXPORT_SYMBOL_GPL +0x08020dcb nd_blk_region_set_provider_data vmlinux EXPORT_SYMBOL_GPL +0x33d3964f dma_request_chan vmlinux EXPORT_SYMBOL_GPL +0xceb66bec sched_clock_cpu vmlinux EXPORT_SYMBOL_GPL +0x5aeeee62 ceph_oid_aprintf net/ceph/libceph EXPORT_SYMBOL +0xe75b8c6b nf_tproxy_get_sock_v4 net/ipv4/netfilter/nf_tproxy_ipv4 EXPORT_SYMBOL_GPL +0xc8011f41 drm_crtc_check_viewport drivers/gpu/drm/drm EXPORT_SYMBOL +0x8339df73 klist_add_behind vmlinux EXPORT_SYMBOL_GPL +0x889d73d4 inet_csk_addr2sockaddr vmlinux EXPORT_SYMBOL_GPL +0xdfc86068 inet_ehash_locks_alloc vmlinux EXPORT_SYMBOL_GPL +0xa28cfcc0 gen_estimator_active vmlinux EXPORT_SYMBOL +0x8f945621 md_stop_writes vmlinux EXPORT_SYMBOL_GPL +0x14d01690 clk_mux_index_to_val vmlinux EXPORT_SYMBOL_GPL +0x574c2e74 bitmap_release_region vmlinux EXPORT_SYMBOL +0xd8fbd942 __blk_mq_debugfs_rq_show vmlinux EXPORT_SYMBOL_GPL +0x5c6579c1 jbd2_transaction_committed vmlinux EXPORT_SYMBOL +0x7054a3e4 request_dma vmlinux EXPORT_SYMBOL +0xb58aeaab kernel_cpustat vmlinux EXPORT_SYMBOL +0xd0ed307c edac_queue_work drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xbe38a431 dm_rh_recovery_prepare drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x5ff17b5c mlxsw_afa_block_destroy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x4afa3b60 mlxsw_core_bus_device_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0208ab40 drm_dp_link_power_up drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x14b6a9dd ip_mc_inc_group vmlinux EXPORT_SYMBOL +0xab00d0e4 tcp_orphan_count vmlinux EXPORT_SYMBOL_GPL +0x875582b7 nvmem_del_cell_table vmlinux EXPORT_SYMBOL_GPL +0x0a3be8be input_setup_polling vmlinux EXPORT_SYMBOL +0x6856a2a7 usb_enable_intel_xhci_ports vmlinux EXPORT_SYMBOL_GPL +0x8d303b1b iscsi_pool_free vmlinux EXPORT_SYMBOL_GPL +0xe02feae8 ata_sff_data_xfer vmlinux EXPORT_SYMBOL_GPL +0x15049889 fb_center_logo vmlinux EXPORT_SYMBOL +0x35c3db7b request_key_with_auxdata vmlinux EXPORT_SYMBOL +0xbc4d265a fat_attach vmlinux EXPORT_SYMBOL_GPL +0xc39dbd62 sysfs_remove_bin_file vmlinux EXPORT_SYMBOL_GPL +0xc6648092 inode_nohighmem vmlinux EXPORT_SYMBOL +0x4a2a88cf file_write_and_wait_range vmlinux EXPORT_SYMBOL +0xf8efa8ca register_wide_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0xb7b01723 osd_req_op_cls_request_data_pages net/ceph/libceph EXPORT_SYMBOL +0x54cd6a3c ieee802154_register_hw net/mac802154/mac802154 EXPORT_SYMBOL +0xd740aeed nft_set_lookup_global net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x6f6da397 ib_umem_odp_unmap_dma_pages drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xd36937e7 mlx4_xrcd_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x156f3274 mlx4_gen_slaves_port_mgt_ev drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x656e4a6e snprintf vmlinux EXPORT_SYMBOL +0xdfc34011 tcf_exts_validate vmlinux EXPORT_SYMBOL +0x48c34d7e hid_dump_device vmlinux EXPORT_SYMBOL_GPL +0xac31a2f0 md_new_event vmlinux EXPORT_SYMBOL_GPL +0xc8505ee9 wakeup_source_unregister vmlinux EXPORT_SYMBOL_GPL +0x22f29dee fwnode_graph_get_endpoint_by_id vmlinux EXPORT_SYMBOL_GPL +0xabcfa03b __tracepoint_block_rq_remap vmlinux EXPORT_SYMBOL_GPL +0x75a0e572 proc_mkdir_mode vmlinux EXPORT_SYMBOL +0x33fb7b6e tag_pages_for_writeback vmlinux EXPORT_SYMBOL +0x93ca6625 rpcauth_destroy_credcache net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1be9f07a nvmem_register vmlinux EXPORT_SYMBOL_GPL +0x258f55da ata_scsi_simulate vmlinux EXPORT_SYMBOL_GPL +0xe146deca tty_port_link_device vmlinux EXPORT_SYMBOL_GPL +0x162d27b3 pinctrl_find_and_add_gpio_range vmlinux EXPORT_SYMBOL_GPL +0x1eaec09e sbitmap_get vmlinux EXPORT_SYMBOL_GPL +0x75133f6e visitor128 vmlinux EXPORT_SYMBOL_GPL +0x11c23240 crypto_attr_u32 vmlinux EXPORT_SYMBOL_GPL +0x2581a78a PDE_DATA vmlinux EXPORT_SYMBOL +0xba7b7662 fscrypt_free_bounce_page vmlinux EXPORT_SYMBOL +0xe01424a7 follow_pfn vmlinux EXPORT_SYMBOL +0xa6011be5 follow_pte vmlinux EXPORT_SYMBOL_GPL +0xee05f3e7 make_kuid vmlinux EXPORT_SYMBOL +0x1ee7d3cd hrtimer_init vmlinux EXPORT_SYMBOL_GPL +0xacf32165 __napi_alloc_skb vmlinux EXPORT_SYMBOL +0xc2ede5b7 usb_disable_xhci_ports vmlinux EXPORT_SYMBOL_GPL +0x295ae175 iscsi_session_teardown vmlinux EXPORT_SYMBOL_GPL +0xde946a36 scsi_report_opcode vmlinux EXPORT_SYMBOL +0xa2b34076 pm_clk_create vmlinux EXPORT_SYMBOL_GPL +0x4b085dbf agp3_generic_configure vmlinux EXPORT_SYMBOL +0xe1aaa4e3 agp_alloc_page_array vmlinux EXPORT_SYMBOL +0x9305bf68 find_next_and_bit vmlinux EXPORT_SYMBOL +0xada4b592 blk_mq_freeze_queue_wait_timeout vmlinux EXPORT_SYMBOL_GPL +0x7bca4bde file_remove_privs vmlinux EXPORT_SYMBOL +0xc683da81 set_memory_decrypted vmlinux EXPORT_SYMBOL_GPL +0xdadf6e58 nft_set_elem_destroy net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x77e2ca62 ptp_clock_unregister drivers/ptp/ptp EXPORT_SYMBOL +0xef346615 cxgbi_attr_is_visible drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x5c89c97b fuse_fill_super_common fs/fuse/fuse EXPORT_SYMBOL_GPL +0x3218530c fuse_dev_alloc fs/fuse/fuse EXPORT_SYMBOL_GPL +0x4c32f664 nfs_pageio_init_read fs/nfs/nfs EXPORT_SYMBOL_GPL +0x2c2f5a09 x86_family vmlinux EXPORT_SYMBOL_GPL +0x37013064 udp_seq_ops vmlinux EXPORT_SYMBOL +0x1b6314fd in_aton vmlinux EXPORT_SYMBOL +0x6a3e0781 neigh_resolve_output vmlinux EXPORT_SYMBOL +0x8516c14a flow_get_u32_dst vmlinux EXPORT_SYMBOL +0x06cf115a ufshcd_system_suspend vmlinux EXPORT_SYMBOL +0xfc4152fc ec_read vmlinux EXPORT_SYMBOL +0x6853dd3b cfb_imageblit vmlinux EXPORT_SYMBOL +0x804af87c wrmsr_safe_on_cpu vmlinux EXPORT_SYMBOL +0x627448c9 sysfs_unbreak_active_protection vmlinux EXPORT_SYMBOL_GPL +0x2ef72f05 __mnt_is_readonly vmlinux EXPORT_SYMBOL_GPL +0x02b24cd3 __tracepoint_cpu_idle vmlinux EXPORT_SYMBOL_GPL +0xb0354bc6 vsock_deliver_tap net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x440faebe ovs_vport_ops_unregister net/openvswitch/openvswitch EXPORT_SYMBOL_GPL +0x5887ee1c nf_send_unreach net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x80c63334 nf_nat_irc_hook net/netfilter/nf_conntrack_irc EXPORT_SYMBOL_GPL +0x8bff5b38 nf_nat_ftp_hook net/netfilter/nf_conntrack_ftp EXPORT_SYMBOL_GPL +0x11e943aa nf_ct_ext_add net/netfilter/nf_conntrack EXPORT_SYMBOL +0x7c1c5a24 ipmi_add_smi drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xf955e9c5 bprintf vmlinux EXPORT_SYMBOL_GPL +0x526497f9 tcp_close vmlinux EXPORT_SYMBOL +0xe338f9be inet_add_offload vmlinux EXPORT_SYMBOL +0xd6bbc80c __scm_send vmlinux EXPORT_SYMBOL +0xd76137fa skb_try_coalesce vmlinux EXPORT_SYMBOL +0xab162a1c mddev_resume vmlinux EXPORT_SYMBOL_GPL +0x85fa1488 get_thermal_instance vmlinux EXPORT_SYMBOL +0x6ed34dc7 usb_register_dev vmlinux EXPORT_SYMBOL_GPL +0x09bcb9c3 ufshcd_map_desc_id_to_length vmlinux EXPORT_SYMBOL +0x0c95600e scsi_dma_map vmlinux EXPORT_SYMBOL +0x00a4b044 amd_iommu_deactivate_guest_mode vmlinux EXPORT_SYMBOL +0xdb8e2631 pnp_register_driver vmlinux EXPORT_SYMBOL +0x52d717da xz_dec_init vmlinux EXPORT_SYMBOL +0x3f46bb8f try_to_free_buffers vmlinux EXPORT_SYMBOL +0x87883d35 xattr_full_name vmlinux EXPORT_SYMBOL +0x1d2d6d15 wait_iff_congested vmlinux EXPORT_SYMBOL +0x332246f1 blk_trace_setup vmlinux EXPORT_SYMBOL_GPL +0x4f6a07fe show_rcu_gp_kthreads vmlinux EXPORT_SYMBOL_GPL +0xfe039e4d __put_cred vmlinux EXPORT_SYMBOL +0x2689af6a ceph_auth_add_authorizer_challenge net/ceph/libceph EXPORT_SYMBOL +0x9f0898a5 iscsit_cause_connection_reinstatement drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x47f985aa drm_edid_duplicate drivers/gpu/drm/drm EXPORT_SYMBOL +0x5fc3a33f __drm_mm_interval_first drivers/gpu/drm/drm EXPORT_SYMBOL +0x882f74ab xfrm_migrate vmlinux EXPORT_SYMBOL +0x46fbc29f ping_getfrag vmlinux EXPORT_SYMBOL_GPL +0xa410a295 devlink_region_destroy vmlinux EXPORT_SYMBOL_GPL +0xff215667 skb_csum_hwoffload_help vmlinux EXPORT_SYMBOL +0x59d5cd79 dm_path_uevent vmlinux EXPORT_SYMBOL_GPL +0xb52ee8be intel_gtt_clear_range vmlinux EXPORT_SYMBOL +0xd489e4be serial8250_clear_and_reinit_fifos vmlinux EXPORT_SYMBOL_GPL +0x3d75083e register_framebuffer vmlinux EXPORT_SYMBOL +0xd55b05d6 crypto_unregister_templates vmlinux EXPORT_SYMBOL_GPL +0xc18ecfaa dquot_free_inode vmlinux EXPORT_SYMBOL +0x2dc5bf3d migrate_page_states vmlinux EXPORT_SYMBOL +0xaa8adc40 vsock_find_bound_socket net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x816a0198 drm_fb_helper_lastclose drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xedd092d5 power_supply_notifier vmlinux EXPORT_SYMBOL_GPL +0xd02298cb ata_std_sched_eh vmlinux EXPORT_SYMBOL_GPL +0xd32d3dfb nvdimm_volatile_region_create vmlinux EXPORT_SYMBOL_GPL +0x869b63c3 pm_generic_suspend_late vmlinux EXPORT_SYMBOL_GPL +0x3cd06035 add_input_randomness vmlinux EXPORT_SYMBOL_GPL +0xa52cc31c clk_gate_ops vmlinux EXPORT_SYMBOL_GPL +0xb81e556e pinctrl_force_sleep vmlinux EXPORT_SYMBOL_GPL +0xc48c7b23 iov_iter_advance vmlinux EXPORT_SYMBOL +0x4bef1c67 empty_name vmlinux EXPORT_SYMBOL +0x001b074f mce_is_correctable vmlinux EXPORT_SYMBOL_GPL +0x750745eb xdr_set_scratch_buffer net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1fd0d69e nf_log_dump_vlan net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0xc0689ece transport_lookup_tmr_lun drivers/target/target_core_mod EXPORT_SYMBOL +0x6c2fc102 transport_lookup_cmd_lun drivers/target/target_core_mod EXPORT_SYMBOL +0x2f7452c2 register_cdrom drivers/cdrom/cdrom EXPORT_SYMBOL +0x7784c52d drm_hdmi_avi_infoframe_content_type drivers/gpu/drm/drm EXPORT_SYMBOL +0xc7e14572 scsi_unblock_requests vmlinux EXPORT_SYMBOL +0x09b9cf56 device_remove_bin_file vmlinux EXPORT_SYMBOL_GPL +0xd802e977 mipi_dsi_generic_read vmlinux EXPORT_SYMBOL +0x2b36a2ff tty_port_free_xmit_buf vmlinux EXPORT_SYMBOL +0xa93139ae pci_irq_get_node vmlinux EXPORT_SYMBOL +0xc97a00c9 alarm_try_to_cancel vmlinux EXPORT_SYMBOL_GPL +0xaf5bf6ef nfs_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9d4bddab nf_ct_deliver_cached_events net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa2ea5542 dm_bitset_del drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xd947d3a3 xfrm_replay_seqhi vmlinux EXPORT_SYMBOL +0x4faa465a tcp_enter_cwr vmlinux EXPORT_SYMBOL +0xa40ff01b acpi_dbg_layer vmlinux EXPORT_SYMBOL +0x3c212744 sbitmap_del_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x9e9fdd9d memunmap vmlinux EXPORT_SYMBOL +0x4d924f20 memremap vmlinux EXPORT_SYMBOL +0x28aa6a67 call_rcu vmlinux EXPORT_SYMBOL_GPL +0x2e5ff5e3 devm_request_any_context_irq vmlinux EXPORT_SYMBOL +0xd562d88b ieee802154_hdr_peek net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0x0ee34bbf ieee802154_hdr_pull net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0x4f7f5f3a ieee802154_hdr_push net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0xbb461fb7 dm_bitset_cursor_begin drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x25daeb69 mlx5_cmd_free_uar drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa414f0ab mlx4_mtt_init drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x39a7bf9b mlx4_get_cpu_rmap drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x75fb81df kvm_mmu_new_cr3 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x5b9fc1e3 dev_pm_opp_put_prop_name vmlinux EXPORT_SYMBOL_GPL +0x68592f21 nvme_alloc_request vmlinux EXPORT_SYMBOL_GPL +0xb605aeff hwrng_unregister vmlinux EXPORT_SYMBOL_GPL +0x9aacf209 pci_try_reset_function vmlinux EXPORT_SYMBOL_GPL +0x5aa1a503 fs_parse vmlinux EXPORT_SYMBOL +0x86169f3e amd_smn_write vmlinux EXPORT_SYMBOL_GPL +0x3bf0088f ip_tunnel_change_mtu net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x78b34574 ipvlan_link_register drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0xa413ea30 mlx5_set_port_caps drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf9500d2c drm_dp_mst_get_port_malloc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x03cd3209 xfrm6_rcv_spi vmlinux EXPORT_SYMBOL +0x46b6ada4 tcf_exts_change vmlinux EXPORT_SYMBOL +0x9625695d acpi_install_gpe_block vmlinux EXPORT_SYMBOL +0xcfcded97 disk_stack_limits vmlinux EXPORT_SYMBOL +0x9879932b crypto_register_notifier vmlinux EXPORT_SYMBOL_GPL +0xf45b685c padata_free_shell vmlinux EXPORT_SYMBOL +0x187ffb13 dm_dirty_log_destroy drivers/md/dm-log EXPORT_SYMBOL +0xe1dcc995 drm_client_framebuffer_delete drivers/gpu/drm/drm EXPORT_SYMBOL +0x0483ab25 fib_table_lookup vmlinux EXPORT_SYMBOL_GPL +0x4417848c skb_mpls_update_lse vmlinux EXPORT_SYMBOL_GPL +0xf7f1ca9c pskb_put vmlinux EXPORT_SYMBOL_GPL +0x85adb7da __efivar_entry_iter vmlinux EXPORT_SYMBOL_GPL +0x816a41ca cpufreq_update_limits vmlinux EXPORT_SYMBOL_GPL +0xb6857678 ata_sas_slave_configure vmlinux EXPORT_SYMBOL_GPL +0x83441042 ata_qc_complete vmlinux EXPORT_SYMBOL_GPL +0xc9417287 nvme_submit_sync_cmd vmlinux EXPORT_SYMBOL_GPL +0x975db5f2 device_reprobe vmlinux EXPORT_SYMBOL_GPL +0xcfdb6b95 virtio_finalize_features vmlinux EXPORT_SYMBOL_GPL +0x7e4edd15 virtio_config_enable vmlinux EXPORT_SYMBOL_GPL +0x479c3c86 find_next_zero_bit vmlinux EXPORT_SYMBOL +0xfe15bab5 inc_nlink vmlinux EXPORT_SYMBOL +0x8a1ab4ee timeval_to_jiffies vmlinux EXPORT_SYMBOL +0x8ca4bc46 smpboot_unregister_percpu_thread vmlinux EXPORT_SYMBOL_GPL +0xa5f71562 nft_reject_policy net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x1e491a04 ib_unmap_fmr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7774620f dm_rh_stop_recovery drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xcf45d26b cxgbi_ppm_ppod_release drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0x94177e77 devm_drm_panel_bridge_add drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5cea0c9c serpent_ctr_8way_avx arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0x2a1cb591 __netlink_dump_start vmlinux EXPORT_SYMBOL +0xa81bc8f5 sk_msg_return_zero vmlinux EXPORT_SYMBOL_GPL +0x036f0563 phy_write_mmd vmlinux EXPORT_SYMBOL +0xbcdd5b99 iommu_group_set_name vmlinux EXPORT_SYMBOL_GPL +0xf1160942 security_task_getsecid vmlinux EXPORT_SYMBOL +0xcd9a1641 seq_open vmlinux EXPORT_SYMBOL +0x341cbc94 mnt_drop_write vmlinux EXPORT_SYMBOL_GPL +0x7c88ec47 vmf_insert_pfn_prot vmlinux EXPORT_SYMBOL +0xaf793668 __alloc_percpu_gfp vmlinux EXPORT_SYMBOL_GPL +0xf9eb42f9 ceph_osdc_readpages net/ceph/libceph EXPORT_SYMBOL +0x7bf7f878 rpc_uaddr2sockaddr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xff9369b2 nft_fib6_eval_type net/ipv6/netfilter/nft_fib_ipv6 EXPORT_SYMBOL_GPL +0x1adc4f19 nft_fib4_eval_type net/ipv4/netfilter/nft_fib_ipv4 EXPORT_SYMBOL_GPL +0x66223988 tap_free_minor drivers/net/tap EXPORT_SYMBOL_GPL +0xb86337d8 fscache_mark_page_cached fs/fscache/fscache EXPORT_SYMBOL +0xd1e246a2 xt_compat_unlock vmlinux EXPORT_SYMBOL_GPL +0xaa14c9db cpufreq_freq_transition_begin vmlinux EXPORT_SYMBOL_GPL +0x330a1614 iscsi_eh_device_reset vmlinux EXPORT_SYMBOL_GPL +0x8d9ca0e6 dma_fence_enable_sw_signaling vmlinux EXPORT_SYMBOL +0x22de4931 amd_iommu_register_ga_log_notifier vmlinux EXPORT_SYMBOL +0x688a56d9 pci_bus_type vmlinux EXPORT_SYMBOL +0x6c7295ad simple_readpage vmlinux EXPORT_SYMBOL +0x0b288197 __page_frag_cache_drain vmlinux EXPORT_SYMBOL +0xbaa41c42 __lock_page vmlinux EXPORT_SYMBOL +0x0514bc90 ring_buffer_read vmlinux EXPORT_SYMBOL_GPL +0xd333d6e3 synchronize_srcu_expedited vmlinux EXPORT_SYMBOL_GPL +0x43cd47e6 osd_req_op_cls_request_data_bvecs net/ceph/libceph EXPORT_SYMBOL +0xa909cfc5 vhost_work_init drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xda0d50ec ib_sa_cancel_query drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x9e2ce09f nfs_close_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x26d6d9b8 tcp_cong_avoid_ai vmlinux EXPORT_SYMBOL_GPL +0xcabe04de cpuidle_resume_and_unlock vmlinux EXPORT_SYMBOL_GPL +0x6a8ddb12 usb_altnum_to_altsetting vmlinux EXPORT_SYMBOL_GPL +0x33b74237 pm_clk_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0xa822364e dev_pm_qos_update_user_latency_tolerance vmlinux EXPORT_SYMBOL_GPL +0xeadbd138 pci_create_slot vmlinux EXPORT_SYMBOL_GPL +0xd774957d mpi_write_to_sgl vmlinux EXPORT_SYMBOL_GPL +0xc8e3332b raid6_gflog vmlinux EXPORT_SYMBOL +0xf44d53da security_secid_to_secctx vmlinux EXPORT_SYMBOL +0x337eb10e follow_down vmlinux EXPORT_SYMBOL +0x0679ee3b filemap_write_and_wait_range vmlinux EXPORT_SYMBOL +0x38722f80 kernel_fpu_end vmlinux EXPORT_SYMBOL_GPL +0x264afff3 rpcauth_lookupcred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6c121565 vhost_chr_write_iter drivers/vhost/vhost EXPORT_SYMBOL +0xdc76a3e9 drm_atomic_get_old_private_obj_state drivers/gpu/drm/drm EXPORT_SYMBOL +0x6fd7a6f9 eth_get_headlen vmlinux EXPORT_SYMBOL +0xbeb44a99 md_reap_sync_thread vmlinux EXPORT_SYMBOL +0xae473c62 nd_btt_version vmlinux EXPORT_SYMBOL +0xfab53ed9 pinctrl_gpio_can_use_line vmlinux EXPORT_SYMBOL_GPL +0x360b1afe probe_irq_mask vmlinux EXPORT_SYMBOL +0xa18565cb __tracepoint_kvm_ple_window_update arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa61e96e0 genphy_read_status vmlinux EXPORT_SYMBOL +0x24f63dcf ata_xfer_mask2mode vmlinux EXPORT_SYMBOL_GPL +0x4c4d6f04 redraw_screen vmlinux EXPORT_SYMBOL +0x8fa150c5 tty_unregister_driver vmlinux EXPORT_SYMBOL +0xf399aa41 virtqueue_detach_unused_buf vmlinux EXPORT_SYMBOL_GPL +0x5067fbf1 __acpi_nfit_notify vmlinux EXPORT_SYMBOL_GPL +0x00a4eda1 pci_reenable_device vmlinux EXPORT_SYMBOL +0x45187c9e d_instantiate_new vmlinux EXPORT_SYMBOL +0x1d9a1fd7 invalidate_mapping_pages vmlinux EXPORT_SYMBOL +0xe61ee83b devm_release_resource vmlinux EXPORT_SYMBOL +0xa4c8127c ZSTD_maxCLevel lib/zstd/zstd_compress EXPORT_SYMBOL +0xc3a4bbce ieee802154_alloc_hw net/mac802154/mac802154 EXPORT_SYMBOL +0x5a6858b6 drm_connector_attach_content_protection_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x48419911 drm_event_reserve_init_locked drivers/gpu/drm/drm EXPORT_SYMBOL +0x53408176 drm_poll drivers/gpu/drm/drm EXPORT_SYMBOL +0x749b33d1 __drm_atomic_helper_crtc_reset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf7589bb0 napi_disable vmlinux EXPORT_SYMBOL +0xd93a5cb1 efivar_variable_is_removable vmlinux EXPORT_SYMBOL_GPL +0x931a85ce efivar_work vmlinux EXPORT_SYMBOL_GPL +0x97eadab9 qlt_stop_phase2 vmlinux EXPORT_SYMBOL +0xee36e696 qlt_stop_phase1 vmlinux EXPORT_SYMBOL +0x14d42ca1 pci_enable_rom vmlinux EXPORT_SYMBOL_GPL +0x142fa677 pinctrl_find_gpio_range_from_pin vmlinux EXPORT_SYMBOL_GPL +0x7c173634 __bitmap_complement vmlinux EXPORT_SYMBOL +0x9291cd3b memdup_user vmlinux EXPORT_SYMBOL +0x0a0c2e11 events_sysfs_show vmlinux EXPORT_SYMBOL_GPL +0xd2b4231e synproxy_recv_client_ack net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x45ad0b54 drm_framebuffer_plane_width drivers/gpu/drm/drm EXPORT_SYMBOL +0xffd108e3 drm_gem_dmabuf_vunmap drivers/gpu/drm/drm EXPORT_SYMBOL +0x2b33fb08 drm_atomic_helper_connector_reset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x13bd3ab3 crypto_engine_alloc_init crypto/crypto_engine EXPORT_SYMBOL_GPL +0xa4712048 kvm_lmsw arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8b56c107 ping_get_port vmlinux EXPORT_SYMBOL_GPL +0x7eb18a0e raw_abort vmlinux EXPORT_SYMBOL_GPL +0xcdb6adcc ras_userspace_consumers vmlinux EXPORT_SYMBOL_GPL +0x246c4cb6 thermal_cooling_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x31953839 __clk_mux_determine_rate vmlinux EXPORT_SYMBOL_GPL +0x969d216d blk_mq_free_request vmlinux EXPORT_SYMBOL_GPL +0xb48d4d22 security_sb_eat_lsm_opts vmlinux EXPORT_SYMBOL +0x6563b018 del_timer vmlinux EXPORT_SYMBOL +0x3162a540 dma_get_required_mask vmlinux EXPORT_SYMBOL_GPL +0xd7529d61 osd_req_op_xattr_init net/ceph/libceph EXPORT_SYMBOL +0x06209729 target_register_template drivers/target/target_core_mod EXPORT_SYMBOL +0xcb8259a7 adf_send_admin_init drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xfaaa4831 ipmi_set_my_address drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x0606b1da nfs_init_cinfo fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc4a9a1a9 kvm_cpu_has_pending_timer arch/x86/kvm/kvm EXPORT_SYMBOL +0x9f4e1e9c nf_log_unbind_pf vmlinux EXPORT_SYMBOL +0xc654d3f4 lwtunnel_valid_encap_type vmlinux EXPORT_SYMBOL_GPL +0x1f119dbc net_dm_hw_report vmlinux EXPORT_SYMBOL_GPL +0x914ec402 usb_get_current_frame_number vmlinux EXPORT_SYMBOL_GPL +0xd3dccd57 phy_resolve_aneg_pause vmlinux EXPORT_SYMBOL_GPL +0xa9c72303 amd_iommu_pc_get_max_banks vmlinux EXPORT_SYMBOL +0xa47a18fa pcie_update_link_speed vmlinux EXPORT_SYMBOL_GPL +0xe47e5cfe blk_mq_start_request vmlinux EXPORT_SYMBOL +0x46b8f9bb nf_ct_expect_find_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x27207461 nf_ct_iterate_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xfe25efc5 transport_generic_request_failure drivers/target/target_core_mod EXPORT_SYMBOL +0x65835607 __tracepoint_bcache_btree_write drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xaf6cea1d drm_display_info_set_bus_formats drivers/gpu/drm/drm EXPORT_SYMBOL +0xef41fc4c pnfs_nfs_generic_sync fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xc483012d tcp_v4_send_check vmlinux EXPORT_SYMBOL +0x0d22d728 devlink_region_snapshot_create vmlinux EXPORT_SYMBOL_GPL +0x0316b548 dev_close vmlinux EXPORT_SYMBOL +0xf04ee806 dma_buf_put vmlinux EXPORT_SYMBOL_GPL +0x41aa6f53 dma_buf_get vmlinux EXPORT_SYMBOL_GPL +0x06483b58 __nd_driver_register vmlinux EXPORT_SYMBOL +0x8d68ce66 pci_hp_add_bridge vmlinux EXPORT_SYMBOL_GPL +0xb13e80c8 __mark_inode_dirty vmlinux EXPORT_SYMBOL +0xdafcdc3a ktime_get_snapshot vmlinux EXPORT_SYMBOL_GPL +0x188ea314 jiffies_to_timespec64 vmlinux EXPORT_SYMBOL +0x528db4b6 iscsit_process_text_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xa916b694 strnlen vmlinux EXPORT_SYMBOL +0x75352078 ip6_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0xae26898f tcp_set_rcvlowat vmlinux EXPORT_SYMBOL +0x8ddc39e7 noop_qdisc vmlinux EXPORT_SYMBOL +0x45aea5f1 sock_alloc_send_skb vmlinux EXPORT_SYMBOL +0x8fad9b85 dm_internal_suspend_noflush vmlinux EXPORT_SYMBOL_GPL +0xa0e2da66 dm_put vmlinux EXPORT_SYMBOL_GPL +0x31896172 firmware_request_cache vmlinux EXPORT_SYMBOL_GPL +0x70920aa2 acpi_bus_get_device vmlinux EXPORT_SYMBOL +0x9a8df9e2 devm_of_phy_get_by_index vmlinux EXPORT_SYMBOL_GPL +0x727242a9 sha256_update vmlinux EXPORT_SYMBOL +0xed4af76c blk_mq_init_allocated_queue vmlinux EXPORT_SYMBOL +0xd8dced89 blk_mq_quiesce_queue_nowait vmlinux EXPORT_SYMBOL_GPL +0x44357507 unregister_filesystem vmlinux EXPORT_SYMBOL +0x0ba0b938 vm_brk vmlinux EXPORT_SYMBOL +0xd96b056c irq_domain_add_simple vmlinux EXPORT_SYMBOL_GPL +0xb81685cf adjust_resource vmlinux EXPORT_SYMBOL +0x0bc00f80 nf_nat_helper_try_module_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa3ede235 dm_dirty_log_create drivers/md/dm-log EXPORT_SYMBOL +0xee61eb71 camellia_crypt_ctr arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0xa5b5fdf1 xfrm_state_insert vmlinux EXPORT_SYMBOL +0x94d61be4 metadata_dst_alloc vmlinux EXPORT_SYMBOL_GPL +0xb07793b7 devm_rc_allocate_device vmlinux EXPORT_SYMBOL_GPL +0xcc8b33da configfs_unregister_default_group vmlinux EXPORT_SYMBOL +0x9b4f6661 kmsg_dump_register vmlinux EXPORT_SYMBOL_GPL +0x27ed53a0 ceph_msg_data_add_bio net/ceph/libceph EXPORT_SYMBOL +0x188bd0c0 xprt_setup_backchannel net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xecbd57ca rpc_peeraddr2str net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x95dbbb90 rdma_hold_gid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd72759b7 mlx4_SET_MCAST_FLTR drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xd1b6f9ab cxgb4_create_server_filter drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xddf68fc6 xt_find_revision vmlinux EXPORT_SYMBOL_GPL +0xcd9bbc15 sk_psock_init vmlinux EXPORT_SYMBOL_GPL +0xac32844c dev_pm_opp_set_clkname vmlinux EXPORT_SYMBOL_GPL +0x826ad85e ahci_reset_controller vmlinux EXPORT_SYMBOL_GPL +0x9a76a6b8 platform_irq_count vmlinux EXPORT_SYMBOL_GPL +0xb5ff8aa8 __destroy_inode vmlinux EXPORT_SYMBOL +0x1ac163e9 nf_osf_find net/netfilter/nfnetlink_osf EXPORT_SYMBOL_GPL +0x9aa70cc2 rdma_join_multicast drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xb4295bba iw_cm_listen drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x816f707f mlx4_srq_arm drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x1015de7b bcm_phy_config_intr drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x4868e28a drm_plane_create_blend_mode_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x4b318bc8 drm_legacy_pci_exit drivers/gpu/drm/drm EXPORT_SYMBOL +0x8d9b761c camellia_decrypt_cbc_2way arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0x3050df47 mpt_event_register vmlinux EXPORT_SYMBOL +0x429970df mdio_device_remove vmlinux EXPORT_SYMBOL +0x033d5e96 dma_resv_add_excl_fence vmlinux EXPORT_SYMBOL +0x02293ac3 dma_fence_chain_ops vmlinux EXPORT_SYMBOL +0xad19706f device_property_present vmlinux EXPORT_SYMBOL_GPL +0x174edf83 pci_bus_add_devices vmlinux EXPORT_SYMBOL +0xcd1a37be blk_rq_unmap_user vmlinux EXPORT_SYMBOL +0xbd666dd5 crypto_mod_get vmlinux EXPORT_SYMBOL_GPL +0xc3c635f1 dma_direct_unmap_sg vmlinux EXPORT_SYMBOL +0x0bc477a2 irq_set_irq_type vmlinux EXPORT_SYMBOL +0xce2840e7 irq_set_irq_wake vmlinux EXPORT_SYMBOL +0xfcdd2d96 nf_flow_table_init net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0xd9887208 dm_bio_prison_alloc_cell_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x9cbf026d mlxsw_afa_destroy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0e81c09c mlxsw_afk_destroy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x18eb3466 cxgb4_select_ntuple drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x853eada1 drm_get_cea_aspect_ratio drivers/gpu/drm/drm EXPORT_SYMBOL +0xb95d8751 drm_dp_link_power_down drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x013a8e66 sk_msg_free_partial vmlinux EXPORT_SYMBOL_GPL +0xe93656e7 fc_remove_host vmlinux EXPORT_SYMBOL +0xf4820334 ata_sas_port_init vmlinux EXPORT_SYMBOL_GPL +0x52fc2955 __remove_inode_hash vmlinux EXPORT_SYMBOL +0x4bbfbcac l2tp_session_free net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x04b7e19a mlx5_core_destroy_mkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x80cf54d4 device_match_acpi_dev vmlinux EXPORT_SYMBOL +0xb394c5a3 __pci_reset_function_locked vmlinux EXPORT_SYMBOL_GPL +0x6128b5fc __printk_ratelimit vmlinux EXPORT_SYMBOL +0x10c653d5 __percpu_up_read vmlinux EXPORT_SYMBOL_GPL +0x50d1f870 pgprot_writecombine vmlinux EXPORT_SYMBOL_GPL +0xc4765071 xdr_read_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5875c6fa mlx5_get_fdb_sub_ns drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x45b122b5 drm_property_lookup_blob drivers/gpu/drm/drm EXPORT_SYMBOL +0xfad1ca9c drm_modeset_unlock drivers/gpu/drm/drm EXPORT_SYMBOL +0x73d4e4b3 tcf_chain_put_by_act vmlinux EXPORT_SYMBOL +0xd04c1a64 sysctl_devconf_inherit_init_net vmlinux EXPORT_SYMBOL +0xbf3326c8 iscsi_dbg_trace vmlinux EXPORT_SYMBOL_GPL +0x6877b3a1 bdev_dax_pgoff vmlinux EXPORT_SYMBOL +0xdcb28251 pm_generic_restore_early vmlinux EXPORT_SYMBOL_GPL +0x7b4fb9db acpi_dma_request_slave_chan_by_name vmlinux EXPORT_SYMBOL_GPL +0xe428f6f4 clk_hw_get_parent_by_index vmlinux EXPORT_SYMBOL_GPL +0xb78debe3 LZ4_decompress_fast_usingDict vmlinux EXPORT_SYMBOL +0x62acea59 blk_mq_can_queue vmlinux EXPORT_SYMBOL +0x94bf03ca utf8_to_utf32 vmlinux EXPORT_SYMBOL +0x9d09e8ae ring_buffer_event_data vmlinux EXPORT_SYMBOL_GPL +0x8f2eb429 kvm_arch_para_hints vmlinux EXPORT_SYMBOL_GPL +0xd2a2e64f dm_dirty_log_type_unregister drivers/md/dm-log EXPORT_SYMBOL +0x0b73bbb6 kvm_require_dr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6fbb9701 skb_vlan_pop vmlinux EXPORT_SYMBOL +0xe5c6d211 fcoe_fcf_device_add vmlinux EXPORT_SYMBOL_GPL +0x5e58974b iscsi_create_session vmlinux EXPORT_SYMBOL_GPL +0x030d3398 __pm_runtime_disable vmlinux EXPORT_SYMBOL_GPL +0x2fcc2322 paste_selection vmlinux EXPORT_SYMBOL_GPL +0x34740a3c phy_pm_runtime_get_sync vmlinux EXPORT_SYMBOL_GPL +0xb2351360 crypto_sha1_update vmlinux EXPORT_SYMBOL +0xd04c7414 __tracepoint_kfree vmlinux EXPORT_SYMBOL +0xba12c985 perf_aux_output_end vmlinux EXPORT_SYMBOL_GPL +0x7737dabf bpf_prog_select_runtime vmlinux EXPORT_SYMBOL_GPL +0xed814ec1 kmsg_dump_unregister vmlinux EXPORT_SYMBOL_GPL +0xb9f012b1 override_creds vmlinux EXPORT_SYMBOL +0x131db64a system_long_wq vmlinux EXPORT_SYMBOL_GPL +0xf9ab8c31 nf_nat_helper_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x50dbc447 ib_flush_fmr_pool drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe9b6a6df mlx4_get_base_qpn drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x4967e79f radix_tree_iter_resume vmlinux EXPORT_SYMBOL +0xab239b5a __ip_queue_xmit vmlinux EXPORT_SYMBOL +0x7da72a59 dca_remove_requester vmlinux EXPORT_SYMBOL_GPL +0x04b37eb5 input_register_handle vmlinux EXPORT_SYMBOL +0x0af8e078 fcoe_ctlr_get_lesb vmlinux EXPORT_SYMBOL_GPL +0x8d22bb58 iommu_group_alloc vmlinux EXPORT_SYMBOL_GPL +0x89008c09 clk_mux_ops vmlinux EXPORT_SYMBOL_GPL +0x6259d291 clk_restore_context vmlinux EXPORT_SYMBOL_GPL +0x73565103 acpi_device_hid vmlinux EXPORT_SYMBOL +0x8a45a555 acpi_unregister_wakeup_handler vmlinux EXPORT_SYMBOL_GPL +0x8338df66 blkdev_issue_zeroout vmlinux EXPORT_SYMBOL +0x5dd6868e get_tree_bdev vmlinux EXPORT_SYMBOL +0x0ce01994 handle_level_irq vmlinux EXPORT_SYMBOL_GPL +0x3c8d7111 ceph_get_num_objects net/ceph/libceph EXPORT_SYMBOL +0x5a7ad8fc bch_bset_insert drivers/md/bcache/bcache EXPORT_SYMBOL +0xe3b9673c mlxsw_core_skb_receive drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xfb86b96f dlm_errname fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x43a5fbd7 nfs4_init_deviceid_node fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x445e4fda __fscache_register_netfs fs/fscache/fscache EXPORT_SYMBOL +0xf4b47ee2 sk_msg_free_nocharge vmlinux EXPORT_SYMBOL_GPL +0x0b64405b phy_mac_interrupt vmlinux EXPORT_SYMBOL +0xd6917aa9 iscsi_eh_abort vmlinux EXPORT_SYMBOL_GPL +0x2500ea22 ata_dev_printk vmlinux EXPORT_SYMBOL +0x04df8fbc lzo1x_decompress_safe vmlinux EXPORT_SYMBOL_GPL +0xdacbfb54 blkcg_deactivate_policy vmlinux EXPORT_SYMBOL_GPL +0xc0b0b714 __bio_clone_fast vmlinux EXPORT_SYMBOL +0x470f8e04 kern_unmount vmlinux EXPORT_SYMBOL +0xa438022d ceph_osdc_put_request net/ceph/libceph EXPORT_SYMBOL +0xfc82c743 ceph_con_init net/ceph/libceph EXPORT_SYMBOL +0xbbc125d9 ip_vs_proto_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x592baf75 iscsit_add_reject drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xe278bd6d __tracepoint_bcache_cache_insert drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x3991c6bc drm_atomic_set_mode_prop_for_crtc drivers/gpu/drm/drm EXPORT_SYMBOL +0x88d38d63 __fscache_read_or_alloc_pages fs/fscache/fscache EXPORT_SYMBOL +0x5c5e8c3f kvm_mmu_reset_context arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xc33de4ca usb_control_msg vmlinux EXPORT_SYMBOL_GPL +0x465b8639 nla_append vmlinux EXPORT_SYMBOL +0x8328f02b pcim_iomap_regions vmlinux EXPORT_SYMBOL +0x3b23c5db jbd2_journal_lock_updates vmlinux EXPORT_SYMBOL +0x16064921 vfs_ioc_setflags_prepare vmlinux EXPORT_SYMBOL +0xdd3bf925 vfs_llseek vmlinux EXPORT_SYMBOL +0xd6d8424c __page_file_index vmlinux EXPORT_SYMBOL_GPL +0x84f9fd78 __irq_set_handler vmlinux EXPORT_SYMBOL_GPL +0xf2b81b64 arch_io_reserve_memtype_wc vmlinux EXPORT_SYMBOL +0x19842035 mlx4_phys_to_slaves_pport_actv drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x867e87eb dm_bufio_get_dm_io_client vmlinux EXPORT_SYMBOL_GPL +0x11e0ec41 dm_read_arg vmlinux EXPORT_SYMBOL +0x5aaa8c8d iterate_fd vmlinux EXPORT_SYMBOL +0x7ceaf0d5 generic_handle_irq vmlinux EXPORT_SYMBOL_GPL +0x7aff77a3 __cpu_present_mask vmlinux EXPORT_SYMBOL +0xde4eeab5 __register_nmi_handler vmlinux EXPORT_SYMBOL +0x1f03912b ZSTD_flushStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x074de80e vsock_stream_has_space net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x91316bff nfs4_schedule_lease_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xbfacb837 xt_percpu_counter_free vmlinux EXPORT_SYMBOL_GPL +0x5c2bcd37 bpf_warn_invalid_xdp_action vmlinux EXPORT_SYMBOL_GPL +0xb8576104 iscsi_add_session vmlinux EXPORT_SYMBOL_GPL +0x81de37ae device_property_read_u8_array vmlinux EXPORT_SYMBOL_GPL +0xba287fc2 dquot_resume vmlinux EXPORT_SYMBOL +0xe17e5220 blocking_notifier_chain_cond_register vmlinux EXPORT_SYMBOL_GPL +0xd95ac116 vsock_addr_validate net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xe2e467f7 __xfrm_state_destroy vmlinux EXPORT_SYMBOL +0x311f0f77 tcp_v4_md5_hash_skb vmlinux EXPORT_SYMBOL +0xf18d5730 bfifo_qdisc_ops vmlinux EXPORT_SYMBOL +0xd9424fa6 devm_devfreq_unregister_opp_notifier vmlinux EXPORT_SYMBOL +0xd41147a6 mddev_init_writes_pending vmlinux EXPORT_SYMBOL_GPL +0x721b39ff usb_unpoison_urb vmlinux EXPORT_SYMBOL_GPL +0x172d7d91 nvdimm_namespace_locked vmlinux EXPORT_SYMBOL +0x975dfeb9 tty_port_init vmlinux EXPORT_SYMBOL +0x7ae56327 complete_request_key vmlinux EXPORT_SYMBOL +0x1f8544b8 panic_timeout vmlinux EXPORT_SYMBOL_GPL +0x37746fde ZSTD_initDStream lib/zstd/zstd_decompress EXPORT_SYMBOL +0x30af45a1 ZSTD_initCStream lib/zstd/zstd_compress EXPORT_SYMBOL +0xbc3c5f8b mlx5_eq_notifier_unregister drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc9fae756 ocfs2_cluster_connect fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x1b459a9d nf_queue vmlinux EXPORT_SYMBOL_GPL +0xb3aea62e __sock_recv_timestamp vmlinux EXPORT_SYMBOL_GPL +0xfb44ea22 usb_hcd_resume_root_hub vmlinux EXPORT_SYMBOL_GPL +0x1185c249 arch_apei_report_mem_error vmlinux EXPORT_SYMBOL_GPL +0x7e6296b8 aead_init_geniv vmlinux EXPORT_SYMBOL_GPL +0x42026684 securityfs_remove vmlinux EXPORT_SYMBOL_GPL +0x21a80629 noop_fsync vmlinux EXPORT_SYMBOL +0xca40fd51 list_lru_destroy vmlinux EXPORT_SYMBOL_GPL +0x1418d2d4 rpc_count_iostats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0e84f135 rpc_destroy_pipe_data net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5d510033 mlx4_unicast_promisc_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6d2b9c14 drm_atomic_private_obj_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x67ba8291 crypto_engine_exit crypto/crypto_engine EXPORT_SYMBOL_GPL +0x88336cfe pnfs_generic_pg_cleanup fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x9cf46078 nfs4_proc_getdeviceinfo fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x8a1c987c kvm_get_msr_common arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xfc6aad88 kvm_set_msr_common arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xbfc6f70f xfrm_migrate_state_find vmlinux EXPORT_SYMBOL +0x59f3ef22 fib_rule_matchall vmlinux EXPORT_SYMBOL_GPL +0x84ccba28 scsi_add_host_with_dma vmlinux EXPORT_SYMBOL +0x99314611 devres_remove_group vmlinux EXPORT_SYMBOL_GPL +0x187d7ee5 tpm_calc_ordinal_duration vmlinux EXPORT_SYMBOL_GPL +0xd46af5ef cppc_get_perf_ctrs vmlinux EXPORT_SYMBOL_GPL +0x6aa11aa6 sgl_free_n_order vmlinux EXPORT_SYMBOL +0x1a02774d blkdev_issue_write_same vmlinux EXPORT_SYMBOL +0xd9a3dd13 crypto_hash_walk_first vmlinux EXPORT_SYMBOL_GPL +0x5658bf64 alloc_anon_inode vmlinux EXPORT_SYMBOL +0xd2ce454c vfs_mkobj vmlinux EXPORT_SYMBOL +0x5cc5f37b __ClearPageMovable vmlinux EXPORT_SYMBOL +0xa7077dcc vsock_remove_connected net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x84c8c7d3 i2c_add_adapter drivers/i2c/i2c-core EXPORT_SYMBOL +0xddd2ba8f mlxsw_env_get_module_eeprom drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x829e8851 mlxsw_afa_block_first_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9ae8f00c drm_kms_helper_poll_fini drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x9c1475e9 pnfs_report_layoutstat fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xf313da4e sha_transform vmlinux EXPORT_SYMBOL +0x2b08f74d inet_sendpage vmlinux EXPORT_SYMBOL +0xf2f4fa0a dev_set_allmulti vmlinux EXPORT_SYMBOL +0x32d16f08 dev_get_by_index_rcu vmlinux EXPORT_SYMBOL +0x7d91802e input_mt_report_finger_count vmlinux EXPORT_SYMBOL +0xdbafa38b to_ndd vmlinux EXPORT_SYMBOL +0xee7d7deb gen_pool_dma_zalloc vmlinux EXPORT_SYMBOL +0xe32ab4d8 xxh64_digest vmlinux EXPORT_SYMBOL +0x4a96a8eb xxh32_digest vmlinux EXPORT_SYMBOL +0xbb35675b __bitmap_intersects vmlinux EXPORT_SYMBOL +0x04ea5d10 ksize vmlinux EXPORT_SYMBOL +0x38869d88 kstat vmlinux EXPORT_SYMBOL +0x8f7d15c3 osd_req_op_extent_osd_data_bio net/ceph/libceph EXPORT_SYMBOL +0xba180dde tap_get_minor drivers/net/tap EXPORT_SYMBOL_GPL +0x08984313 kobject_set_name vmlinux EXPORT_SYMBOL +0xf9f93a53 dm_send_uevents vmlinux EXPORT_SYMBOL_GPL +0x1bf693e2 shmem_truncate_range vmlinux EXPORT_SYMBOL_GPL +0x5b21ceff ring_buffer_iter_peek vmlinux EXPORT_SYMBOL_GPL +0x6f12560a get_old_timespec32 vmlinux EXPORT_SYMBOL_GPL +0xa5efbf4c async_synchronize_full vmlinux EXPORT_SYMBOL_GPL +0x797d7c91 ipmi_smi_watcher_register drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xa507e852 nfs4_schedule_migration_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb7593ddc iosf_mbi_unregister_pmic_bus_access_notifier arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x48c093fb _atomic_dec_and_lock_irqsave vmlinux EXPORT_SYMBOL +0xc787d71d unregister_dca_provider vmlinux EXPORT_SYMBOL_GPL +0x9e9fa189 mptscsih_bios_param vmlinux EXPORT_SYMBOL +0x78d26f89 phy_modify_mmd_changed vmlinux EXPORT_SYMBOL_GPL +0x33ecab1a ata_bmdma_irq_clear vmlinux EXPORT_SYMBOL_GPL +0x33fd9da4 acpi_get_gpe_device vmlinux EXPORT_SYMBOL +0x1c58427f acpi_remove_notify_handler vmlinux EXPORT_SYMBOL +0xd244d1db set_device_ro vmlinux EXPORT_SYMBOL +0xae8b165b restore_online_page_callback vmlinux EXPORT_SYMBOL_GPL +0xaa230f88 perf_unregister_guest_info_callbacks vmlinux EXPORT_SYMBOL_GPL +0x3d510a7b rcu_jiffies_till_stall_check vmlinux EXPORT_SYMBOL_GPL +0x608741b5 __init_swait_queue_head vmlinux EXPORT_SYMBOL +0x24621ca3 dm_sm_disk_open drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xcaf7fe60 drm_mode_object_put drivers/gpu/drm/drm EXPORT_SYMBOL +0x65b4d181 ethtool_rx_flow_rule_destroy vmlinux EXPORT_SYMBOL +0x999e2228 usb_anchor_urb vmlinux EXPORT_SYMBOL_GPL +0xfe75bcbb fc_rport_recv_req vmlinux EXPORT_SYMBOL +0xb9c5c14e tpmm_chip_alloc vmlinux EXPORT_SYMBOL_GPL +0x955a4800 pci_clear_mwi vmlinux EXPORT_SYMBOL +0x0ddb1cd7 llist_reverse_order vmlinux EXPORT_SYMBOL_GPL +0xdd034ef6 blk_rq_append_bio vmlinux EXPORT_SYMBOL +0x64a156b5 shash_attr_alg vmlinux EXPORT_SYMBOL_GPL +0x9696a240 migrate_page_move_mapping vmlinux EXPORT_SYMBOL +0x63eb9355 panic_blink vmlinux EXPORT_SYMBOL +0xd75a8e5e ppp_register_channel drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x1234ffa1 cper_estatus_check_header vmlinux EXPORT_SYMBOL_GPL +0x03f018c1 ata_sas_port_stop vmlinux EXPORT_SYMBOL_GPL +0xcea49c1a pci_cfg_access_trylock vmlinux EXPORT_SYMBOL_GPL +0x50428d74 __sync_dirty_buffer vmlinux EXPORT_SYMBOL +0xf4fc2d6c __ring_buffer_alloc vmlinux EXPORT_SYMBOL_GPL +0x5e332b52 __var_waitqueue vmlinux EXPORT_SYMBOL +0x82072614 tasklet_kill vmlinux EXPORT_SYMBOL +0xe474c773 ib_get_vf_config drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x60857e09 i2c_use_client drivers/i2c/i2c-core EXPORT_SYMBOL +0xfd853e2b cxgb4_flush_eq_cache drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x733f64d7 drm_agp_alloc drivers/gpu/drm/drm EXPORT_SYMBOL +0xffb7c514 ida_free vmlinux EXPORT_SYMBOL +0x4cfe0eb7 nf_ip_route vmlinux EXPORT_SYMBOL_GPL +0x3724841f md_bitmap_start_sync vmlinux EXPORT_SYMBOL +0x9af40e1c md_reload_sb vmlinux EXPORT_SYMBOL +0x86595caf fc_get_host_speed vmlinux EXPORT_SYMBOL +0x7f285611 fc_fc4_deregister_provider vmlinux EXPORT_SYMBOL +0x1b0c8287 scsi_print_command vmlinux EXPORT_SYMBOL +0x025aa216 copy_reserved_iova vmlinux EXPORT_SYMBOL_GPL +0x66da0341 pci_try_set_mwi vmlinux EXPORT_SYMBOL +0xb73ee4eb phy_configure vmlinux EXPORT_SYMBOL_GPL +0xc6f3b3fc refcount_dec_if_one vmlinux EXPORT_SYMBOL +0x33421467 blk_post_runtime_resume vmlinux EXPORT_SYMBOL +0x4226d9db bio_endio vmlinux EXPORT_SYMBOL +0x28c298fc async_syndrome_val vmlinux EXPORT_SYMBOL_GPL +0x6d26f9e5 nr_execve_count vmlinux EXPORT_SYMBOL +0xb7c69a63 unregister_vmap_purge_notifier vmlinux EXPORT_SYMBOL_GPL +0x9a9fd82f account_page_redirty vmlinux EXPORT_SYMBOL +0x1e5b16ce ring_buffer_normalize_time_stamp vmlinux EXPORT_SYMBOL_GPL +0xf86c9db3 nft_fib_validate net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0xc87e9fe0 nf_ct_expect_unregister_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x7a1d0d3c target_alloc_sgl drivers/target/target_core_mod EXPORT_SYMBOL +0xadbc4e66 adf_disable_aer drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x4894ba7c mlxsw_core_port_eth_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x3adf897f drm_flip_work_queue_task drivers/gpu/drm/drm EXPORT_SYMBOL +0xd60736ec gf128mul_free_64k crypto/gf128mul EXPORT_SYMBOL +0x9558320f l3mdev_fib_table_rcu vmlinux EXPORT_SYMBOL_GPL +0x12c9951e eth_prepare_mac_addr_change vmlinux EXPORT_SYMBOL +0xdb3caac6 sk_stop_timer vmlinux EXPORT_SYMBOL +0x111ab12a dm_bufio_mark_partial_buffer_dirty vmlinux EXPORT_SYMBOL_GPL +0x5d6c0958 sas_rphy_remove vmlinux EXPORT_SYMBOL +0xd3ae7756 fw_fallback_config vmlinux EXPORT_SYMBOL_GPL +0x7fa96509 erst_get_record_id_next vmlinux EXPORT_SYMBOL_GPL +0x7f5b4fe4 sg_free_table vmlinux EXPORT_SYMBOL +0x2bbbb734 locks_remove_posix vmlinux EXPORT_SYMBOL +0xb0b85f47 ring_buffer_iter_reset vmlinux EXPORT_SYMBOL_GPL +0xf48ceebd net_cls_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xaad38c48 flow_offload_teardown net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0xac70dd21 nft_data_init net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xeb22f8a0 edac_mc_handle_error drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x57208126 cxgb4_smt_alloc_switching drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x7a4543ae drm_atomic_state_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x4e6e4b41 radix_tree_delete vmlinux EXPORT_SYMBOL +0xd120fdb3 __skb_recv_udp vmlinux EXPORT_SYMBOL +0x08dfc88f __phy_read_mmd vmlinux EXPORT_SYMBOL +0x9f473289 iscsi_unregister_transport vmlinux EXPORT_SYMBOL_GPL +0x812ac3a2 pm_generic_thaw_noirq vmlinux EXPORT_SYMBOL_GPL +0xd86283ea acpi_get_psd_map vmlinux EXPORT_SYMBOL_GPL +0xfdabd69d pcie_port_service_unregister vmlinux EXPORT_SYMBOL +0xb67fec0e uuid_parse vmlinux EXPORT_SYMBOL +0x412cb2d5 sched_trace_rq_cpu vmlinux EXPORT_SYMBOL_GPL +0x18582826 amd_pmu_disable_virt vmlinux EXPORT_SYMBOL_GPL +0x956db3e4 nfnl_acct_find_get net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0xe57724de mlx4_gen_pkey_eqe drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xcf11a549 drm_flip_work_allocate_task drivers/gpu/drm/drm EXPORT_SYMBOL +0xd169c28c drm_driver_legacy_fb_format drivers/gpu/drm/drm EXPORT_SYMBOL +0xf48c693e kvm_put_kvm arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9d9349ac tcp_slow_start vmlinux EXPORT_SYMBOL_GPL +0xd4fe3b38 dev_activate vmlinux EXPORT_SYMBOL +0xcec4f97c __pneigh_lookup vmlinux EXPORT_SYMBOL_GPL +0x36e5dd98 device_get_named_child_node vmlinux EXPORT_SYMBOL_GPL +0x9f3cc5a6 fwnode_get_next_child_node vmlinux EXPORT_SYMBOL_GPL +0x16b35b5a mipi_dsi_dcs_set_tear_scanline vmlinux EXPORT_SYMBOL +0x13233212 devm_iounmap vmlinux EXPORT_SYMBOL +0x528683ed devm_ioremap vmlinux EXPORT_SYMBOL +0xf749debc md5_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x4bcc2662 mempool_init_node vmlinux EXPORT_SYMBOL +0x294b9ea1 on_each_cpu vmlinux EXPORT_SYMBOL +0x54951ed3 dma_set_mask vmlinux EXPORT_SYMBOL +0x1a51d587 pci_msi_set_desc vmlinux EXPORT_SYMBOL_GPL +0x638bffb7 target_free_sgl drivers/target/target_core_mod EXPORT_SYMBOL +0xdf666902 drm_rotation_simplify drivers/gpu/drm/drm EXPORT_SYMBOL +0xdf2319c5 drm_gem_object_lookup drivers/gpu/drm/drm EXPORT_SYMBOL +0x74e5ff1a udpv6_encap_enable vmlinux EXPORT_SYMBOL +0x63d28ef0 napi_hash_del vmlinux EXPORT_SYMBOL_GPL +0x107dfa23 skb_zerocopy vmlinux EXPORT_SYMBOL_GPL +0x6d58f69e agp3_generic_sizes vmlinux EXPORT_SYMBOL +0x8e3419e1 nla_reserve vmlinux EXPORT_SYMBOL +0x9221990d bpf_offload_dev_match vmlinux EXPORT_SYMBOL_GPL +0x8063dad6 tracepoint_srcu vmlinux EXPORT_SYMBOL_GPL +0x16316a10 ZSTD_getFrameContentSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0x7cb1aea1 devlink_dpipe_header_ethernet vmlinux EXPORT_SYMBOL +0x6e22eab6 rtnl_put_cacheinfo vmlinux EXPORT_SYMBOL_GPL +0x2421be1f __cpufreq_driver_target vmlinux EXPORT_SYMBOL_GPL +0xea124bd1 gcd vmlinux EXPORT_SYMBOL_GPL +0x7accdff0 crypto_hash_alg_has_setkey vmlinux EXPORT_SYMBOL_GPL +0xca1867ff ihold vmlinux EXPORT_SYMBOL +0x3096be16 names_cachep vmlinux EXPORT_SYMBOL +0x97be9b46 invalidate_inode_pages2_range vmlinux EXPORT_SYMBOL_GPL +0xa45c7b90 stack_trace_print vmlinux EXPORT_SYMBOL_GPL +0x3c058039 irq_domain_get_irq_data vmlinux EXPORT_SYMBOL_GPL +0xbbcb339e cancel_delayed_work vmlinux EXPORT_SYMBOL +0xc8e96dea qword_addhex net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4dae16e4 i2c_put_dma_safe_msg_buf drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x3f2690f2 nfs_check_flags fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdfcf2971 kvm_emulate_wbinvd arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x4de07c7d xfrm_policy_register_afinfo vmlinux EXPORT_SYMBOL +0xb01686df ata_qc_complete_multiple vmlinux EXPORT_SYMBOL_GPL +0x71ab5e51 pci_unmap_rom vmlinux EXPORT_SYMBOL +0x7acf657e sbitmap_queue_show vmlinux EXPORT_SYMBOL_GPL +0xb0d904b7 raid6_empty_zero_page vmlinux EXPORT_SYMBOL +0x8bc88dd2 iov_iter_alignment vmlinux EXPORT_SYMBOL +0x18cf99d4 crypto_unregister_shashes vmlinux EXPORT_SYMBOL_GPL +0xccaaa0ef crypto_unregister_ahashes vmlinux EXPORT_SYMBOL_GPL +0x03031bdf key_instantiate_and_link vmlinux EXPORT_SYMBOL +0xc5c08c7b dentry_path_raw vmlinux EXPORT_SYMBOL +0x265bbef9 kexec_crash_loaded vmlinux EXPORT_SYMBOL_GPL +0xd9e28909 mlx4_mr_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x82921f96 drm_crtc_add_crc_entry drivers/gpu/drm/drm EXPORT_SYMBOL_GPL +0x48d6d1d3 kvm_cpu_has_interrupt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x5b339dd7 ipv6_sock_mc_drop vmlinux EXPORT_SYMBOL +0xe8487e90 xfrm_sad_getinfo vmlinux EXPORT_SYMBOL +0xd1f2eee2 nf_logger_find_get vmlinux EXPORT_SYMBOL_GPL +0x96e5067e netif_napi_del vmlinux EXPORT_SYMBOL +0xe233762a input_event_from_user vmlinux EXPORT_SYMBOL_GPL +0x375b8cac mpt_get_msg_frame vmlinux EXPORT_SYMBOL +0x4484a5a4 wait_for_device_probe vmlinux EXPORT_SYMBOL_GPL +0x57b63fa3 tty_port_put vmlinux EXPORT_SYMBOL +0x99bd3863 clk_register_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0xee58e970 fb_add_videomode vmlinux EXPORT_SYMBOL +0x0646b95e crypto_register_alg vmlinux EXPORT_SYMBOL_GPL +0x4e019056 debugfs_rename vmlinux EXPORT_SYMBOL_GPL +0xce306aca simple_open vmlinux EXPORT_SYMBOL +0x3b879bcd single_open vmlinux EXPORT_SYMBOL +0x4f0b6a7c __page_file_mapping vmlinux EXPORT_SYMBOL_GPL +0x112e45c4 fork_usermode_blob vmlinux EXPORT_SYMBOL_GPL +0xc6abea44 nft_register_chain_type net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x82a7fe23 mlx4_mw_enable drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xbdfe3de3 drm_helper_disable_unused_functions drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3dcf3d91 __fscache_wait_on_invalidate fs/fscache/fscache EXPORT_SYMBOL +0x16a7ed01 xfrm6_find_1stfragopt vmlinux EXPORT_SYMBOL +0x5a1447ad sysctl_tcp_synack_rto_interval vmlinux EXPORT_SYMBOL +0x158acc50 __page_pool_put_page vmlinux EXPORT_SYMBOL +0x7757b51a clk_unregister vmlinux EXPORT_SYMBOL_GPL +0xd0b74705 acpi_install_interface vmlinux EXPORT_SYMBOL +0xfbbd41ca no_action vmlinux EXPORT_SYMBOL_GPL +0x185f8758 kthread_stop vmlinux EXPORT_SYMBOL +0xfb9a1504 l2tp_nl_register_ops net/l2tp/l2tp_netlink EXPORT_SYMBOL_GPL +0x565453bd vhost_poll_flush drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xda4d51e2 mlx4_pd_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x050c8384 drm_fb_helper_set_suspend_unlocked drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x768d9163 lockd_down fs/lockd/lockd EXPORT_SYMBOL_GPL +0xb75988d7 __serpent_crypt_ctr arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0x3f746e0e tcp_sendmsg_locked vmlinux EXPORT_SYMBOL_GPL +0xd70d9322 dm_register_target vmlinux EXPORT_SYMBOL +0x89e4973f nvdimm_pmem_region_create vmlinux EXPORT_SYMBOL_GPL +0x45081703 ec_get_handle vmlinux EXPORT_SYMBOL +0x78a16f48 aes_decrypt vmlinux EXPORT_SYMBOL +0x7481b148 refcount_add_checked vmlinux EXPORT_SYMBOL +0x1de2798a security_inode_create vmlinux EXPORT_SYMBOL_GPL +0x3fe35aea irq_bypass_unregister_consumer virt/lib/irqbypass EXPORT_SYMBOL_GPL +0xdac7f33b xprt_reserve_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa0e5e94e nf_dup_ipv4 net/ipv4/netfilter/nf_dup_ipv4 EXPORT_SYMBOL_GPL +0x46730863 nf_nat_ipv4_unregister_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x8eb65187 nf_ct_seqadj_init net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x4e095f11 drm_get_format_name drivers/gpu/drm/drm EXPORT_SYMBOL +0x8c423b1a hid_register_report vmlinux EXPORT_SYMBOL_GPL +0x1dacf0d6 rtc_set_alarm vmlinux EXPORT_SYMBOL_GPL +0x0eee75ae ahci_kick_engine vmlinux EXPORT_SYMBOL_GPL +0x7075434b screen_glyph_unicode vmlinux EXPORT_SYMBOL_GPL +0x50cf7585 hex2bin vmlinux EXPORT_SYMBOL +0x10cfbc6f crypto_default_rng vmlinux EXPORT_SYMBOL_GPL +0x54945783 locks_copy_conflock vmlinux EXPORT_SYMBOL +0x55cb64b6 d_make_root vmlinux EXPORT_SYMBOL +0x5b082a3a vfs_get_super vmlinux EXPORT_SYMBOL +0x9ca20cf7 ip_set_test net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x0b9f96d3 ib_sa_register_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x80e0b0a7 adf_isr_resource_free drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x0abdd577 inet_csk_listen_start vmlinux EXPORT_SYMBOL_GPL +0x2875a315 utf32_to_utf8 vmlinux EXPORT_SYMBOL +0xae62b83f blk_trace_remove vmlinux EXPORT_SYMBOL_GPL +0x424b1958 cleanup_srcu_struct vmlinux EXPORT_SYMBOL_GPL +0x643d9ba1 groups_free vmlinux EXPORT_SYMBOL +0xac1a55be unregister_reboot_notifier vmlinux EXPORT_SYMBOL +0x2def7f76 rtc_cmos_write vmlinux EXPORT_SYMBOL +0xdb691d6a ceph_osdc_writepages net/ceph/libceph EXPORT_SYMBOL +0xbfb7df3c mlxsw_core_driver_priv drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x5e373fb4 gf128mul_64k_bbe crypto/gf128mul EXPORT_SYMBOL +0xfc19bc45 crypto_dh_encode_key crypto/dh_generic EXPORT_SYMBOL_GPL +0x035c4fd3 kvm_mtrr_valid arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7410aba2 strreplace vmlinux EXPORT_SYMBOL +0x1fa3a9c7 ip6_flush_pending_frames vmlinux EXPORT_SYMBOL_GPL +0x921cb295 __efivar_entry_get vmlinux EXPORT_SYMBOL_GPL +0x7280ae39 mptscsih_taskmgmt_complete vmlinux EXPORT_SYMBOL +0x82cdce6d device_add_groups vmlinux EXPORT_SYMBOL_GPL +0x04b7ce2e tty_perform_flush vmlinux EXPORT_SYMBOL_GPL +0xa156a1f2 erst_get_record_id_end vmlinux EXPORT_SYMBOL_GPL +0x82a0416b acpi_subsys_freeze vmlinux EXPORT_SYMBOL_GPL +0xf0696401 acpi_pci_detect_ejectable vmlinux EXPORT_SYMBOL_GPL +0x023d1b90 wrmsr_on_cpu vmlinux EXPORT_SYMBOL +0x0b5647c6 ip_vs_scheduler_err net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x80fcebca nft_chain_validate net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x1bdaafe1 cxgbi_tagmask_set drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0xe4330a39 ipmi_unregister_smi drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x216bbc5a __fscache_readpages_cancel fs/fscache/fscache EXPORT_SYMBOL +0x7453abb7 glue_cbc_encrypt_req_128bit arch/x86/crypto/glue_helper EXPORT_SYMBOL_GPL +0xd4bb4a82 inet6addr_validator_notifier_call_chain vmlinux EXPORT_SYMBOL +0xee7eb9e1 pnp_platform_devices vmlinux EXPORT_SYMBOL +0xb401e960 debugfs_remove_recursive vmlinux EXPORT_SYMBOL_GPL +0x0ad10eb8 _raw_read_unlock_bh vmlinux EXPORT_SYMBOL +0x7e162403 ebt_unregister_table_pre_exit net/bridge/netfilter/ebtables EXPORT_SYMBOL +0xa3cc1157 dm_btree_cursor_begin drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xf076bc57 bch_bset_fix_invalidated_key drivers/md/bcache/bcache EXPORT_SYMBOL +0x5afac431 mlx5_notifier_register drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xd69a0294 cxgb4_best_mtu drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xc9489866 drm_atomic_commit drivers/gpu/drm/drm EXPORT_SYMBOL +0x3d1f474d drm_fb_helper_fbdev_setup drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xbafa83d3 drm_atomic_helper_check_plane_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5f0e81c6 nfs_rmdir fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdd9e7aeb kvm_set_shared_msr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x830ad6d9 km_migrate vmlinux EXPORT_SYMBOL +0x7104c137 devlink_port_params_register vmlinux EXPORT_SYMBOL_GPL +0x4d2e862b neigh_parms_alloc vmlinux EXPORT_SYMBOL +0x8e97b6c0 cpuidle_unregister_device vmlinux EXPORT_SYMBOL_GPL +0x19a304ba usb_disabled vmlinux EXPORT_SYMBOL_GPL +0x31f24696 nvme_set_features vmlinux EXPORT_SYMBOL_GPL +0xbccc660f clk_hw_is_prepared vmlinux EXPORT_SYMBOL_GPL +0x433cabfb acpi_decode_pld_buffer vmlinux EXPORT_SYMBOL +0x3a85f17b mount_single vmlinux EXPORT_SYMBOL +0xc9424529 of_css vmlinux EXPORT_SYMBOL_GPL +0xc87e487a sched_clock_idle_sleep_event vmlinux EXPORT_SYMBOL_GPL +0x9e1e2b2b drm_gem_mmap_obj drivers/gpu/drm/drm EXPORT_SYMBOL +0x9acfdd5b kvm_cpuid arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x89def1a7 netif_receive_skb_core vmlinux EXPORT_SYMBOL +0x8ca6f4f7 phy_restart_aneg vmlinux EXPORT_SYMBOL_GPL +0xb9c425de register_syscore_ops vmlinux EXPORT_SYMBOL_GPL +0xb2f74fb6 intel_gmch_remove vmlinux EXPORT_SYMBOL +0x4276d768 acpi_nfit_desc_init vmlinux EXPORT_SYMBOL_GPL +0x3a9be019 asymmetric_key_id_partial vmlinux EXPORT_SYMBOL_GPL +0x16e75540 bpf_prog_inc vmlinux EXPORT_SYMBOL_GPL +0x6a90e91a remove_irq vmlinux EXPORT_SYMBOL_GPL +0x93a6e0b2 io_schedule vmlinux EXPORT_SYMBOL +0xf89359f7 ib_create_send_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0e23b37f alloc_cpumask_var_node vmlinux EXPORT_SYMBOL +0x4fa49102 ata_pci_bmdma_init vmlinux EXPORT_SYMBOL_GPL +0x22d103ee cfb_fillrect vmlinux EXPORT_SYMBOL +0x10b28383 pci_remap_iospace vmlinux EXPORT_SYMBOL +0x2c011bc4 disk_part_iter_next vmlinux EXPORT_SYMBOL_GPL +0xc4eaff0f block_write_end vmlinux EXPORT_SYMBOL +0x71616b92 pid_vnr vmlinux EXPORT_SYMBOL_GPL +0x1db73680 cxgb_find_route drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0x6ce0cd1b nfs_link fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb680145a kvm_vcpu_kick arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6de9a8ec xsk_clear_rx_need_wakeup vmlinux EXPORT_SYMBOL +0x56f95e0c __xfrm_decode_session vmlinux EXPORT_SYMBOL +0x475caa7c pm_wakeup_ws_event vmlinux EXPORT_SYMBOL_GPL +0xaf415550 get_amd_iommu vmlinux EXPORT_SYMBOL +0x18eb0297 tty_port_lower_dtr_rts vmlinux EXPORT_SYMBOL +0xd08d015b is_acpi_data_node vmlinux EXPORT_SYMBOL +0xcd24e146 hash_digest_size vmlinux EXPORT_SYMBOL_GPL +0xe11a71f6 __pagevec_lru_add vmlinux EXPORT_SYMBOL +0xfe29f9aa rdma_read_gid_l2_fields drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6791a44e dm_deferred_entry_dec drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xca2e3a88 dm_deferred_entry_inc drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x6b2d375a mlx5_accel_esp_modify_xfrm drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x88e8f5f7 mlx4_fmr_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf930ddaa netlink_set_err vmlinux EXPORT_SYMBOL +0x066ef0b8 alloc_netdev_mqs vmlinux EXPORT_SYMBOL +0x09943961 fc_attach_transport vmlinux EXPORT_SYMBOL +0x00d002ef bus_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x998457b6 agp_bridge vmlinux EXPORT_SYMBOL +0x8ee53e31 register_kprobe vmlinux EXPORT_SYMBOL_GPL +0xc757cd84 make_kprojid vmlinux EXPORT_SYMBOL +0x466b85b8 libceph_compatible net/ceph/libceph EXPORT_SYMBOL +0x5a46c437 mlx4_srq_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x2274a587 bcm_phy_ack_intr drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x33e76e17 __tracepoint_kvm_avic_incomplete_ipi arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x3fcff723 glue_ctr_req_128bit arch/x86/crypto/glue_helper EXPORT_SYMBOL_GPL +0x263c3152 bcmp vmlinux EXPORT_SYMBOL +0xee11942f inet_listen vmlinux EXPORT_SYMBOL +0x23fbe7fd register_snap_client vmlinux EXPORT_SYMBOL +0x9faf6bf9 __dst_destroy_metrics_generic vmlinux EXPORT_SYMBOL +0xcaedecf0 skb_queue_purge vmlinux EXPORT_SYMBOL +0x0b290ada dma_fence_chain_walk vmlinux EXPORT_SYMBOL +0x55f0790e iommu_present vmlinux EXPORT_SYMBOL_GPL +0xbf041102 register_vt_notifier vmlinux EXPORT_SYMBOL_GPL +0xdf6770a7 badblocks_show vmlinux EXPORT_SYMBOL_GPL +0x800e8dd2 get_unmapped_area vmlinux EXPORT_SYMBOL +0x6ad0a8f5 add_to_page_cache_lru vmlinux EXPORT_SYMBOL_GPL +0xe95817ab ex_handler_copy vmlinux EXPORT_SYMBOL +0xd38f9dea ib_mr_pool_destroy drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x79ecf67f drm_fb_helper_check_var drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb9e8e2cc in6addr_sitelocal_allrouters vmlinux EXPORT_SYMBOL +0x9c837c35 flow_rule_match_ports vmlinux EXPORT_SYMBOL +0x07b52e38 rtnl_unregister vmlinux EXPORT_SYMBOL_GPL +0x955b0aa9 genphy_config_eee_advert vmlinux EXPORT_SYMBOL +0xfd58ecef platform_bus_type vmlinux EXPORT_SYMBOL_GPL +0x834fb995 agp_generic_destroy_pages vmlinux EXPORT_SYMBOL +0x47de0dc7 clk_unregister_mux vmlinux EXPORT_SYMBOL_GPL +0xef15dbdf blk_rq_map_user vmlinux EXPORT_SYMBOL +0xc39af4c4 crypto_sha512_update vmlinux EXPORT_SYMBOL +0x742a9538 jbd2_journal_clear_features vmlinux EXPORT_SYMBOL +0xb8c68bdc generic_pipe_buf_release vmlinux EXPORT_SYMBOL +0x37ea659f add_memory vmlinux EXPORT_SYMBOL_GPL +0x8a973cfa kthread_queue_delayed_work vmlinux EXPORT_SYMBOL_GPL +0x1150a692 ip6_tnl_encap_del_ops net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x3276a241 nf_ct_gre_keymap_add net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x75a729a0 rdma_nl_unregister drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xedb10501 tap_del_queues drivers/net/tap EXPORT_SYMBOL_GPL +0x51892d61 nfs_callback_set_tcpport fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5b683d70 qlt_free_mcmd vmlinux EXPORT_SYMBOL +0x7b87ff0a iommu_sva_unbind_device vmlinux EXPORT_SYMBOL_GPL +0xa5e55057 rdmsrl_safe_on_cpu vmlinux EXPORT_SYMBOL +0xe3089ec6 d_find_alias vmlinux EXPORT_SYMBOL +0x1b5f4377 trace_seq_putc vmlinux EXPORT_SYMBOL_GPL +0x1741ddee trace_seq_puts vmlinux EXPORT_SYMBOL_GPL +0x760a0f4f yield vmlinux EXPORT_SYMBOL +0xbedeca86 rdma_listen drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x2efc3317 iscsit_process_nop_out drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xb2fa2f21 kvm_write_guest_offset_cached arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xc70ab7ec xt_check_target vmlinux EXPORT_SYMBOL_GPL +0x3d49fc73 __tracepoint_napi_poll vmlinux EXPORT_SYMBOL_GPL +0x5df778c5 __tracepoint_kfree_skb vmlinux EXPORT_SYMBOL_GPL +0x821c2929 netdev_upper_dev_unlink vmlinux EXPORT_SYMBOL +0x8ca7cee5 alloc_dax_region vmlinux EXPORT_SYMBOL_GPL +0xb3ead56a dax_inode vmlinux EXPORT_SYMBOL_GPL +0xd821af42 acpi_dma_request_slave_chan_by_index vmlinux EXPORT_SYMBOL_GPL +0xc07b0863 fb_destroy_modedb vmlinux EXPORT_SYMBOL +0xccd4c999 __sg_page_iter_start vmlinux EXPORT_SYMBOL +0xcd766b70 fsstack_copy_attr_all vmlinux EXPORT_SYMBOL_GPL +0x70576fee acpi_processor_ffh_cstate_probe vmlinux EXPORT_SYMBOL_GPL +0x9a2c4fed __nf_ct_try_assign_helper net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x4e710c95 transport_init_session drivers/target/target_core_mod EXPORT_SYMBOL +0x569b312f vbg_info drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0xb4208621 mlx5_nic_vport_enable_roce drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xcbd84716 __drm_atomic_helper_disable_plane drivers/gpu/drm/drm EXPORT_SYMBOL +0x7cc484a5 acpi_video_handles_brightness_key_presses drivers/acpi/video EXPORT_SYMBOL +0x7a95bbc6 kvm_require_cpl arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x33b84f74 copy_page vmlinux EXPORT_SYMBOL +0xe3d8bbe2 udp6_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0x86460fb2 xfrm_init_replay vmlinux EXPORT_SYMBOL +0x5833e159 udp4_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0xfa3d9165 sock_zerocopy_put vmlinux EXPORT_SYMBOL_GPL +0xdd20e40b dev_pm_opp_get_opp_table vmlinux EXPORT_SYMBOL_GPL +0x62eb15c1 scsi_command_normalize_sense vmlinux EXPORT_SYMBOL +0xdac6a188 devm_kmalloc vmlinux EXPORT_SYMBOL_GPL +0x0f37ca89 lockref_put_not_zero vmlinux EXPORT_SYMBOL +0x3ad5cda3 lockref_get_not_zero vmlinux EXPORT_SYMBOL +0x2808a856 inode_sb_list_add vmlinux EXPORT_SYMBOL_GPL +0xcd5df914 inode_init_always vmlinux EXPORT_SYMBOL +0x0433b038 d_invalidate vmlinux EXPORT_SYMBOL +0xa0b04675 vmalloc_32 vmlinux EXPORT_SYMBOL +0xa96048e7 pid_task vmlinux EXPORT_SYMBOL +0xe11ca997 ZSTD_getDictID_fromDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0xc9141a7d set_h245_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x28eff409 nf_conntrack_hash net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x126cdb28 mdev_unregister_driver drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x8758de6c drm_writeback_queue_job drivers/gpu/drm/drm EXPORT_SYMBOL +0xec662ae2 drm_class_device_unregister drivers/gpu/drm/drm EXPORT_SYMBOL_GPL +0x4aed9cd0 tcp_time_wait vmlinux EXPORT_SYMBOL +0x686de3e3 stp_proto_register vmlinux EXPORT_SYMBOL_GPL +0x008dfeea sk_msg_clone vmlinux EXPORT_SYMBOL_GPL +0xcdc601f9 dm_internal_resume_fast vmlinux EXPORT_SYMBOL_GPL +0x24c621e3 input_register_handler vmlinux EXPORT_SYMBOL +0xd1d87e92 scsi_mlreturn_string vmlinux EXPORT_SYMBOL +0x2ca6b912 dax_attribute_group vmlinux EXPORT_SYMBOL_GPL +0x5f88106e textsearch_unregister vmlinux EXPORT_SYMBOL +0xd0fe8d51 sg_pcopy_from_buffer vmlinux EXPORT_SYMBOL +0x2f7754a8 dma_pool_free vmlinux EXPORT_SYMBOL +0x03a81e69 virtio_transport_stream_allow net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x09bb3eeb nft_fib_store_result net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0x2613bdf2 drm_dp_mst_topology_mgr_suspend drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x2070a2f6 netif_carrier_on vmlinux EXPORT_SYMBOL +0x8e1996c1 garp_register_application vmlinux EXPORT_SYMBOL_GPL +0x92b73b23 fib_rules_lookup vmlinux EXPORT_SYMBOL_GPL +0x462da69e scsi_unregister_device_handler vmlinux EXPORT_SYMBOL_GPL +0xb41b79f0 nvme_kill_queues vmlinux EXPORT_SYMBOL_GPL +0xd5f94a7b acpi_pm_set_device_wakeup vmlinux EXPORT_SYMBOL_GPL +0xae7c231d mpi_cmp vmlinux EXPORT_SYMBOL_GPL +0x167c5967 print_hex_dump vmlinux EXPORT_SYMBOL +0x5ec2ddf0 drm_crtc_helper_set_config drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x696fa2fa o2net_register_handler fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x27756bc8 scsi_sanitize_inquiry_string vmlinux EXPORT_SYMBOL +0x4c2a627c dev_pm_qos_add_notifier vmlinux EXPORT_SYMBOL_GPL +0xd9169974 acpi_nfit_init vmlinux EXPORT_SYMBOL_GPL +0x45a9cb5c pcie_port_find_device vmlinux EXPORT_SYMBOL_GPL +0xeaf7fe0f sbitmap_resize vmlinux EXPORT_SYMBOL_GPL +0x91a55068 public_key_free vmlinux EXPORT_SYMBOL_GPL +0xd5869285 inode_newsize_ok vmlinux EXPORT_SYMBOL +0x15ea2648 hwpoison_filter_flags_mask vmlinux EXPORT_SYMBOL_GPL +0x682675d4 _uverbs_alloc drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xb8e35082 mlx5_cmd_exec_polling drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xfdb33aa2 nfs_pageio_reset_read_mds fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdc9eced3 kvm_inject_page_fault arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x5a921311 strncmp vmlinux EXPORT_SYMBOL +0xdc10514a inet_csk_destroy_sock vmlinux EXPORT_SYMBOL +0x0a0c9d45 inet_peer_base_init vmlinux EXPORT_SYMBOL_GPL +0xf2c0d430 fcoe_ctlr_els_send vmlinux EXPORT_SYMBOL +0xbd7c68f3 iscsi_post_host_event vmlinux EXPORT_SYMBOL_GPL +0x01e837d6 __pm_runtime_idle vmlinux EXPORT_SYMBOL_GPL +0xfd2b7818 virtqueue_kick_prepare vmlinux EXPORT_SYMBOL_GPL +0x274cf5e1 __clk_get_flags vmlinux EXPORT_SYMBOL_GPL +0x2e1ca751 clk_put vmlinux EXPORT_SYMBOL +0x78df6bd7 no_pci_devices vmlinux EXPORT_SYMBOL +0xa7c0653d crypto_register_shashes vmlinux EXPORT_SYMBOL_GPL +0x0d07f543 get_anon_bdev vmlinux EXPORT_SYMBOL +0xb4eda0da ring_buffer_event_length vmlinux EXPORT_SYMBOL_GPL +0x0244ff22 rdma_create_user_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf0f0aed6 usb_stor_access_xfer_buf drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x9963320b nfs_add_or_obtain fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1f199d24 copy_user_generic_string vmlinux EXPORT_SYMBOL +0xa5056338 __hsiphash_aligned vmlinux EXPORT_SYMBOL +0xe753b68d devlink_fmsg_arr_pair_nest_end vmlinux EXPORT_SYMBOL_GPL +0x3035dfda __flow_indr_block_cb_register vmlinux EXPORT_SYMBOL_GPL +0xa98a245a sock_kfree_s vmlinux EXPORT_SYMBOL +0xa74a7ffa nvdimm_bus_register vmlinux EXPORT_SYMBOL_GPL +0x69dd3b5b crc32_le vmlinux EXPORT_SYMBOL +0xf54bd49b lcm vmlinux EXPORT_SYMBOL_GPL +0x845dbf3b scatterwalk_map_and_copy vmlinux EXPORT_SYMBOL_GPL +0x38f26dcd empty_aops vmlinux EXPORT_SYMBOL +0x484f6edf ktime_get_coarse_real_ts64 vmlinux EXPORT_SYMBOL +0x59010166 irq_domain_pop_irq vmlinux EXPORT_SYMBOL_GPL +0x3b8d393e __task_pid_nr_ns vmlinux EXPORT_SYMBOL +0xdc26f878 udp_tunnel_sock_release net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x2adb6620 nft_unregister_obj net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x043db153 pps_lookup_dev drivers/pps/pps_core EXPORT_SYMBOL +0x6017204f br_forward vmlinux EXPORT_SYMBOL_GPL +0xf7bc95b0 devlink_fmsg_pair_nest_start vmlinux EXPORT_SYMBOL_GPL +0xdeb01536 ethtool_op_get_link vmlinux EXPORT_SYMBOL +0xf7455c16 input_event_to_user vmlinux EXPORT_SYMBOL_GPL +0xcd95a332 input_unregister_device vmlinux EXPORT_SYMBOL +0x97be8eb2 iscsi_tcp_task_init vmlinux EXPORT_SYMBOL_GPL +0xedd422e1 sas_port_free vmlinux EXPORT_SYMBOL +0xcaf2c603 scsi_sd_pm_domain vmlinux EXPORT_SYMBOL +0x60950b89 tty_buffer_space_avail vmlinux EXPORT_SYMBOL_GPL +0x6d69bc2c clk_hw_set_parent vmlinux EXPORT_SYMBOL_GPL +0xa42f36e1 clk_hw_get_parent vmlinux EXPORT_SYMBOL_GPL +0x4bc5fb05 clk_bulk_get_optional vmlinux EXPORT_SYMBOL_GPL +0xd2ea49b8 acpi_leave_sleep_state_prep vmlinux EXPORT_SYMBOL +0xef26a59c read_cache_pages vmlinux EXPORT_SYMBOL +0x4aadeb9a ring_buffer_alloc_read_page vmlinux EXPORT_SYMBOL_GPL +0x1f774f46 cpuset_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xef9aedfc boot_option_idle_override vmlinux EXPORT_SYMBOL +0x53445f68 nlm_debug net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7a52fb40 mii_nway_restart drivers/net/mii EXPORT_SYMBOL +0x8265f153 mlxsw_core_schedule_work drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc9c2e4cc mlxsw_core_lag_mapping_clear drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x8b4de263 tcp_bpf_sendmsg_redir vmlinux EXPORT_SYMBOL_GPL +0xcf88ee23 ip_generic_getfrag vmlinux EXPORT_SYMBOL +0xe09a2b98 stp_proto_unregister vmlinux EXPORT_SYMBOL_GPL +0xe7e52099 call_netdevice_notifiers vmlinux EXPORT_SYMBOL +0x21c27318 ehci_reset vmlinux EXPORT_SYMBOL_GPL +0xdc825d6c usb_amd_quirk_pll_disable vmlinux EXPORT_SYMBOL_GPL +0xc5696eea usb_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0xd90f874a sas_port_alloc_num vmlinux EXPORT_SYMBOL +0xa6b21ef2 dpm_suspend_end vmlinux EXPORT_SYMBOL_GPL +0xf07cc1d6 free_iova vmlinux EXPORT_SYMBOL_GPL +0xadf9699b pci_write_msi_msg vmlinux EXPORT_SYMBOL_GPL +0x5c5a5b5f bio_integrity_alloc vmlinux EXPORT_SYMBOL +0xe3890c4b crypto_dequeue_request vmlinux EXPORT_SYMBOL_GPL +0x3c105f55 crypto_enqueue_request vmlinux EXPORT_SYMBOL_GPL +0x97af1e80 fat_add_entries vmlinux EXPORT_SYMBOL_GPL +0x65c99925 thaw_bdev vmlinux EXPORT_SYMBOL +0xc25e84b1 vfs_fallocate vmlinux EXPORT_SYMBOL_GPL +0x44304925 kmem_cache_alloc_trace vmlinux EXPORT_SYMBOL +0x1e7d6157 freezer_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x370147d9 set_pages_array_uc vmlinux EXPORT_SYMBOL +0x1b25ce54 current_task vmlinux EXPORT_SYMBOL +0xe41476d9 ZSTD_getParams lib/zstd/zstd_compress EXPORT_SYMBOL +0xe629d909 nf_tables_deactivate_set net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xccd6b689 iscsit_allocate_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xcef14599 mlx5_fpga_sbu_conn_destroy drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xaeef95b8 drm_bridge_disable drivers/gpu/drm/drm EXPORT_SYMBOL +0xf454e2cf drm_atomic_helper_commit_cleanup_done drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x22d795a8 _nfs_display_fhandle_hash fs/nfs/nfs EXPORT_SYMBOL_GPL +0x44c8169c nfs_lookup fs/nfs/nfs EXPORT_SYMBOL_GPL +0x771f74ea nfs_force_lookup_revalidate fs/nfs/nfs EXPORT_SYMBOL_GPL +0x54cbcfbb netdev_rx_csum_fault vmlinux EXPORT_SYMBOL +0x1035f5f7 usb_phy_roothub_suspend vmlinux EXPORT_SYMBOL_GPL +0x4477b4f0 ata_sff_data_xfer32 vmlinux EXPORT_SYMBOL_GPL +0x42d73e53 devres_release_group vmlinux EXPORT_SYMBOL_GPL +0x8f7bd0a6 btree_init_mempool vmlinux EXPORT_SYMBOL_GPL +0x9e61bb05 set_freezable vmlinux EXPORT_SYMBOL +0xd1d026b2 xdr_decode_array2 net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5fc6c5d9 ipt_register_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL +0x1fa8f45f nfs_fhget fs/nfs/nfs EXPORT_SYMBOL_GPL +0x19eca5dc neigh_sysctl_unregister vmlinux EXPORT_SYMBOL +0x60b3071f neigh_proc_dointvec vmlinux EXPORT_SYMBOL +0xe4e48b12 swphy_validate_state vmlinux EXPORT_SYMBOL_GPL +0x25c471e1 ata_pio_need_iordy vmlinux EXPORT_SYMBOL_GPL +0x7cf66a08 dev_pm_clear_wake_irq vmlinux EXPORT_SYMBOL_GPL +0x16cdc340 acpi_get_table vmlinux EXPORT_SYMBOL +0xc9561772 fb_destroy_modelist vmlinux EXPORT_SYMBOL_GPL +0x0ae0ea66 blk_abort_request vmlinux EXPORT_SYMBOL_GPL +0x291d1906 blk_set_queue_depth vmlinux EXPORT_SYMBOL +0xf6bcfab9 drm_crtc_cleanup drivers/gpu/drm/drm EXPORT_SYMBOL +0x0265c549 drm_atomic_helper_commit drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6b5d0e96 __scm_destroy vmlinux EXPORT_SYMBOL +0xf8947580 mdiobus_get_phy vmlinux EXPORT_SYMBOL +0xd1f98f26 device_connection_find vmlinux EXPORT_SYMBOL_GPL +0x37156acc tty_port_register_device_serdev vmlinux EXPORT_SYMBOL_GPL +0x443076d2 phy_pm_runtime_put vmlinux EXPORT_SYMBOL_GPL +0xde4d4ace dim_calc_stats vmlinux EXPORT_SYMBOL +0xeb9e913d sgl_alloc_order vmlinux EXPORT_SYMBOL +0x740ea850 aead_exit_geniv vmlinux EXPORT_SYMBOL_GPL +0xbc078418 d_move vmlinux EXPORT_SYMBOL +0x5091b823 ring_buffer_read_start vmlinux EXPORT_SYMBOL_GPL +0xbe93d7c0 __vsock_create net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x9e177174 rpc_wake_up_first net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x971936e9 ipcomp_destroy net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0xc8a75e09 rdma_reject drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xaaf582c4 mlx4_flow_steer_promisc_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x89a5279a ipmi_get_version drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x8bebc35a ping_common_sendmsg vmlinux EXPORT_SYMBOL_GPL +0xdc9ce2ee tcf_block_get vmlinux EXPORT_SYMBOL +0x961bda94 fc_lport_set_local_id vmlinux EXPORT_SYMBOL +0x8a8ffb2c ata_std_end_eh vmlinux EXPORT_SYMBOL +0x2cf0cdf5 touch_atime vmlinux EXPORT_SYMBOL +0xefb80a96 evict_inodes vmlinux EXPORT_SYMBOL_GPL +0x7b9793a2 get_cpu_idle_time_us vmlinux EXPORT_SYMBOL_GPL +0x39081193 __max_logical_packages vmlinux EXPORT_SYMBOL +0x06a45c0d target_execute_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xd70d35a1 gf128mul_4k_bbe crypto/gf128mul EXPORT_SYMBOL +0x5a4d313e gf128mul_4k_lle crypto/gf128mul EXPORT_SYMBOL +0x5202a913 kernel_sendmsg_locked vmlinux EXPORT_SYMBOL +0xcb6e90b1 device_set_wakeup_capable vmlinux EXPORT_SYMBOL_GPL +0x5655c7ca device_dma_supported vmlinux EXPORT_SYMBOL_GPL +0x4e1ac292 generic_end_io_acct vmlinux EXPORT_SYMBOL +0x6cc2bebc padata_do_serial vmlinux EXPORT_SYMBOL +0x81a805e7 mod_timer_pending vmlinux EXPORT_SYMBOL +0x2f24b6a5 ib_unregister_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xeaad3487 target_tpg_has_node_acl drivers/target/target_core_mod EXPORT_SYMBOL +0x2eb01e04 dm_deferred_set_destroy drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x1e6d26a8 strstr vmlinux EXPORT_SYMBOL +0x349cba85 strchr vmlinux EXPORT_SYMBOL +0x86c5c0b9 qdisc_tree_reduce_backlog vmlinux EXPORT_SYMBOL +0xcd183356 devlink_port_type_clear vmlinux EXPORT_SYMBOL_GPL +0xdada53e3 rtnl_configure_link vmlinux EXPORT_SYMBOL +0xb61b5c7b sock_wmalloc vmlinux EXPORT_SYMBOL +0x4de17ab3 usb_state_string vmlinux EXPORT_SYMBOL_GPL +0x5715b847 amd_iommu_domain_direct_map vmlinux EXPORT_SYMBOL +0xcd8cd769 acpi_processor_notify_smm vmlinux EXPORT_SYMBOL +0x19e81304 btree_alloc vmlinux EXPORT_SYMBOL_GPL +0xcbb36386 security_kernel_post_read_file vmlinux EXPORT_SYMBOL_GPL +0xa3c667a1 task_user_regset_view vmlinux EXPORT_SYMBOL_GPL +0x7156de5b virtio_transport_set_buffer_size net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x2d70be19 virtio_transport_get_buffer_size net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x188d9d26 __cast5_decrypt crypto/cast5_generic EXPORT_SYMBOL_GPL +0xef81a4af __cast5_encrypt crypto/cast5_generic EXPORT_SYMBOL_GPL +0xfc7f63bd crypto_transfer_aead_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0xdb38b87f flow_indr_block_cb_unregister vmlinux EXPORT_SYMBOL_GPL +0x4954e4f9 netif_tx_stop_all_queues vmlinux EXPORT_SYMBOL +0xef16186e refcount_add_not_zero_checked vmlinux EXPORT_SYMBOL +0xa905318c perf_event_disable vmlinux EXPORT_SYMBOL_GPL +0x2dd16564 arch_register_cpu vmlinux EXPORT_SYMBOL +0x9cb986f2 vmalloc_base vmlinux EXPORT_SYMBOL +0x49195b76 rpcauth_stringify_acceptor net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc2eff770 i2c_handle_smbus_host_notify drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x80bb907a ppp_channel_index drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xdeb1dc2e mlxsw_afa_block_first_kvdl_index drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x68a645a1 kobject_create_and_add vmlinux EXPORT_SYMBOL_GPL +0x900df8d8 inet_csk_listen_stop vmlinux EXPORT_SYMBOL_GPL +0xb2438d54 dm_bufio_release_move vmlinux EXPORT_SYMBOL_GPL +0x407af304 usb_wait_anchor_empty_timeout vmlinux EXPORT_SYMBOL_GPL +0x4f72a987 uart_parse_options vmlinux EXPORT_SYMBOL_GPL +0xa9d269b7 virtio_add_status vmlinux EXPORT_SYMBOL_GPL +0xdd9d9bb2 pnp_release_card_device vmlinux EXPORT_SYMBOL +0xe4089df9 pci_iomap_range vmlinux EXPORT_SYMBOL +0xa0c60c30 put_disk_and_module vmlinux EXPORT_SYMBOL +0x237d314c setup_new_exec vmlinux EXPORT_SYMBOL +0xdb8a1b3f usermodehelper_read_trylock vmlinux EXPORT_SYMBOL_GPL +0xeef02732 macvlan_link_register drivers/net/macvlan EXPORT_SYMBOL_GPL +0x992e7ca5 mlx5_set_port_admin_status drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xd6409c8c drm_noop drivers/gpu/drm/drm EXPORT_SYMBOL +0x0f14397d drm_atomic_helper_damage_merged drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa2c27f2b xfrm_state_lookup_byaddr vmlinux EXPORT_SYMBOL +0x6618cf40 pskb_expand_head vmlinux EXPORT_SYMBOL +0x6e53593e md_check_recovery vmlinux EXPORT_SYMBOL +0xa183dd53 blkg_conf_prep vmlinux EXPORT_SYMBOL_GPL +0x7eebfc62 keyring_alloc vmlinux EXPORT_SYMBOL +0xb672aa97 lc_create lib/lru_cache EXPORT_SYMBOL +0x45c61382 ibdev_crit drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6efa7211 ib_sg_to_pages drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x19a50641 __tracepoint_bcache_journal_full drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xcc3bf928 i2c_release_client drivers/i2c/i2c-core EXPORT_SYMBOL +0x9a988ca1 sparse_keymap_report_event drivers/input/sparse-keymap EXPORT_SYMBOL +0xd252e62d mlxsw_core_skb_transmit_busy drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xcd375d3c mlx5_core_query_sq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xf814893a mlx5_core_query_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0b995adc drm_gem_dma_resv_wait drivers/gpu/drm/drm EXPORT_SYMBOL +0x06fa3a38 fuse_do_ioctl fs/fuse/fuse EXPORT_SYMBOL_GPL +0x212133db xps_rxqs_needed vmlinux EXPORT_SYMBOL +0xa9b85673 iscsi_boot_create_kset vmlinux EXPORT_SYMBOL_GPL +0x60efb95f __iscsi_put_task vmlinux EXPORT_SYMBOL_GPL +0x11a2866e __iscsi_get_task vmlinux EXPORT_SYMBOL_GPL +0xeee3dd1a spi_display_xfer_agreement vmlinux EXPORT_SYMBOL +0x25379e73 clk_set_min_rate vmlinux EXPORT_SYMBOL_GPL +0xcfa4c972 skcipher_walk_virt vmlinux EXPORT_SYMBOL_GPL +0x0290558a thp_get_unmapped_area vmlinux EXPORT_SYMBOL_GPL +0xddd6a7be devices_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x7bb045a7 __request_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0x426fe71a pm_qos_update_request vmlinux EXPORT_SYMBOL_GPL +0x8d52eeb8 cxgbi_create_conn drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x0ab5f848 ip6_fraglist_init vmlinux EXPORT_SYMBOL +0x97e6cd97 devlink_port_type_ib_set vmlinux EXPORT_SYMBOL_GPL +0x68c2310b rtnl_link_register vmlinux EXPORT_SYMBOL_GPL +0x325a2a81 make_flow_keys_digest vmlinux EXPORT_SYMBOL +0xaa4beb25 sk_set_memalloc vmlinux EXPORT_SYMBOL_GPL +0x9757724b gov_update_cpu_data vmlinux EXPORT_SYMBOL_GPL +0x0619db17 ata_bmdma_port_intr vmlinux EXPORT_SYMBOL_GPL +0xe3076443 dma_buf_export vmlinux EXPORT_SYMBOL_GPL +0x8506baa8 clk_unregister_gate vmlinux EXPORT_SYMBOL_GPL +0x3441445f msrs_free vmlinux EXPORT_SYMBOL +0x3411e296 bh_submit_read vmlinux EXPORT_SYMBOL +0xb54bb7ff vfs_dedupe_file_range_one vmlinux EXPORT_SYMBOL +0x787416f3 ftrace_set_clr_event vmlinux EXPORT_SYMBOL_GPL +0xd355f0ab osd_req_op_extent_osd_data_bvecs net/ceph/libceph EXPORT_SYMBOL +0x5d27f473 rpc_localaddr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6adf62c4 ip_tunnel_get_link_net net/ipv4/ip_tunnel EXPORT_SYMBOL +0xfaf75750 nft_fwd_dup_netdev_offload net/netfilter/nf_dup_netdev EXPORT_SYMBOL_GPL +0x9edff55e vhost_get_vq_desc drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x8b9485f8 drm_writeback_signal_completion drivers/gpu/drm/drm EXPORT_SYMBOL +0x78aa77d7 drm_plane_create_zpos_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x6d8829ac ip_mc_check_igmp vmlinux EXPORT_SYMBOL +0x08f26f80 eth_change_mtu vmlinux EXPORT_SYMBOL +0xfc21b796 eth_header_parse vmlinux EXPORT_SYMBOL +0x7023bea8 unregister_acpi_notifier vmlinux EXPORT_SYMBOL +0x06bd88b5 ucs2_strnlen vmlinux EXPORT_SYMBOL +0x814ac649 bdev_stack_limits vmlinux EXPORT_SYMBOL +0x10879281 discard_new_inode vmlinux EXPORT_SYMBOL +0x374c2088 kmsg_dump_get_buffer vmlinux EXPORT_SYMBOL_GPL +0x4fac98a7 machine_check_poll vmlinux EXPORT_SYMBOL_GPL +0x5e8f6d84 nf_nat_pptp_hook_exp_gre net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0x705bf72e transport_handle_cdb_direct drivers/target/target_core_mod EXPORT_SYMBOL +0x6a4cdfae bcm_phy_read_exp drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0xeea46a94 crypto_finalize_skcipher_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0x0f0d768c sock_wake_async vmlinux EXPORT_SYMBOL +0x40429d01 srp_stop_rport_timers vmlinux EXPORT_SYMBOL_GPL +0x865d93c3 ata_bmdma_qc_prep vmlinux EXPORT_SYMBOL_GPL +0x96a6c0ab nd_pfn_probe vmlinux EXPORT_SYMBOL +0xae5a04bb acpi_evaluate_dsm vmlinux EXPORT_SYMBOL +0x65ed76ef blk_set_runtime_active vmlinux EXPORT_SYMBOL +0x8c867cba bpf_offload_dev_netdev_register vmlinux EXPORT_SYMBOL_GPL +0x425603ec __wait_rcu_gp vmlinux EXPORT_SYMBOL_GPL +0x996a96fe nf_synproxy_ipv6_fini net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x4130b1d8 dm_cell_lock_promote_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x268f0881 drm_client_modeset_dpms drivers/gpu/drm/drm EXPORT_SYMBOL +0x131a6146 xa_clear_mark vmlinux EXPORT_SYMBOL +0x82d9eae1 nvme_cancel_request vmlinux EXPORT_SYMBOL_GPL +0x91dd8055 rhashtable_walk_enter vmlinux EXPORT_SYMBOL_GPL +0x4954b49a crypto_register_aead vmlinux EXPORT_SYMBOL_GPL +0x2b0e370f crypto_register_algs vmlinux EXPORT_SYMBOL_GPL +0x56b028af fat_free_clusters vmlinux EXPORT_SYMBOL_GPL +0xd5a1b26b jbd2_complete_transaction vmlinux EXPORT_SYMBOL +0x1a051ccd __set_page_dirty_buffers vmlinux EXPORT_SYMBOL +0xaae20a87 unlock_two_nondirectories vmlinux EXPORT_SYMBOL +0x2773c485 __wake_up_locked vmlinux EXPORT_SYMBOL_GPL +0x99f068d5 x86_cpu_to_node_map vmlinux EXPORT_SYMBOL +0x8b27e458 put_nfs_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x96848186 scnprintf vmlinux EXPORT_SYMBOL +0x106a5603 tc_cleanup_flow_action vmlinux EXPORT_SYMBOL +0x376ece7c dev_pre_changeaddr_notify vmlinux EXPORT_SYMBOL +0x2a1ca9e3 dev_queue_xmit_nit vmlinux EXPORT_SYMBOL_GPL +0x197ad72b sock_prot_inuse_add vmlinux EXPORT_SYMBOL_GPL +0x5eeb7088 dev_pm_opp_get_max_transition_latency vmlinux EXPORT_SYMBOL_GPL +0x084eca68 usb_hcd_platform_shutdown vmlinux EXPORT_SYMBOL_GPL +0xa5431b04 mod_zone_page_state vmlinux EXPORT_SYMBOL +0xfe9d166c perf_event_refresh vmlinux EXPORT_SYMBOL_GPL +0x29ad8e33 x86_hyper_type vmlinux EXPORT_SYMBOL +0x1e351d03 rdma_restrack_count drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8592d892 amd_register_ecc_decoder drivers/edac/edac_mce_amd EXPORT_SYMBOL_GPL +0xf12a8bd9 mlx5_core_set_delay_drop drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7fc0b6c5 kvm_irq_has_notifier arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1a124f1e tcf_register_action vmlinux EXPORT_SYMBOL +0xa4b10f2e mini_qdisc_pair_swap vmlinux EXPORT_SYMBOL +0xb0c3b920 netdev_reset_tc vmlinux EXPORT_SYMBOL +0x40267068 usb_anchor_resume_wakeups vmlinux EXPORT_SYMBOL_GPL +0x7eed04cc ufshcd_runtime_suspend vmlinux EXPORT_SYMBOL +0x481d0d60 dev_pm_set_wake_irq vmlinux EXPORT_SYMBOL_GPL +0xbdf1af9f pm_runtime_autosuspend_expiration vmlinux EXPORT_SYMBOL_GPL +0x3ddc6c04 x86_bios_cpu_apicid vmlinux EXPORT_SYMBOL +0x994f9aec mlx5_rl_is_in_range drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x17c14bd0 rtnl_link_unregister vmlinux EXPORT_SYMBOL_GPL +0x4596c485 md_bitmap_sync_with_cluster vmlinux EXPORT_SYMBOL +0xe33d0d78 sata_port_ops vmlinux EXPORT_SYMBOL_GPL +0x399fbf54 __free_iova vmlinux EXPORT_SYMBOL_GPL +0xf60ee6ea d_alloc_anon vmlinux EXPORT_SYMBOL +0x0ba7f329 copy_strings_kernel vmlinux EXPORT_SYMBOL +0x732e7ad0 generic_copy_file_range vmlinux EXPORT_SYMBOL +0xbe6a866f __wait_on_bit vmlinux EXPORT_SYMBOL +0xaf611eac amd_nb_misc_ids vmlinux EXPORT_SYMBOL_GPL +0xe7237b0b i915_gpu_turbo_disable drivers/gpu/drm/i915/i915 EXPORT_SYMBOL_GPL +0x89656647 drm_bridge_post_disable drivers/gpu/drm/drm EXPORT_SYMBOL +0x20645642 drm_debug drivers/gpu/drm/drm EXPORT_SYMBOL +0x3fa7788b vlan_filter_push_vids vmlinux EXPORT_SYMBOL +0x690f585e phy_basic_ports_array vmlinux EXPORT_SYMBOL_GPL +0x237e7cfc iscsi_recv_pdu vmlinux EXPORT_SYMBOL_GPL +0xcaa0931b __platform_register_drivers vmlinux EXPORT_SYMBOL_GPL +0xa507125e acpi_clear_gpe vmlinux EXPORT_SYMBOL +0xe3f16633 rhashtable_free_and_destroy vmlinux EXPORT_SYMBOL_GPL +0xbc8a802f device_add_disk_no_queue_reg vmlinux EXPORT_SYMBOL +0x2479193e crypto_authenc_extractkeys crypto/authenc EXPORT_SYMBOL_GPL +0xd033c66f kvm_vcpu_halt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x2e2522f7 tcp_enter_memory_pressure vmlinux EXPORT_SYMBOL_GPL +0x712f28d0 xt_compat_match_offset vmlinux EXPORT_SYMBOL_GPL +0x05176bba dst_init vmlinux EXPORT_SYMBOL +0x4e3fabdc skb_add_rx_frag vmlinux EXPORT_SYMBOL +0x1106c99a md_kick_rdev_from_array vmlinux EXPORT_SYMBOL_GPL +0xf2a70ba2 usb_hcd_amd_remote_wakeup_quirk vmlinux EXPORT_SYMBOL_GPL +0x811dc334 usb_unregister_notify vmlinux EXPORT_SYMBOL_GPL +0x5d6e4237 genphy_c45_an_disable_aneg vmlinux EXPORT_SYMBOL_GPL +0xdb2553ef sas_ioctl vmlinux EXPORT_SYMBOL_GPL +0x77456e0a acpi_root_dir vmlinux EXPORT_SYMBOL +0xf4a565fd wrmsr_on_cpus vmlinux EXPORT_SYMBOL +0x2211a4c0 blk_op_str vmlinux EXPORT_SYMBOL_GPL +0x7ea1a2bc probe_kernel_write vmlinux EXPORT_SYMBOL_GPL +0xd1f60a89 arch_io_free_memtype_wc vmlinux EXPORT_SYMBOL +0x97651e6c vmemmap_base vmlinux EXPORT_SYMBOL +0xccef37e4 ZSTD_DStreamOutSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0xc023e88a nf_ct_iterate_cleanup_net net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x1d417ce9 bch_btree_keys_init drivers/md/bcache/bcache EXPORT_SYMBOL +0xaaf3cb16 mlx4_multicast_promisc_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0290a381 drm_atomic_helper_suspend drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6e66aa91 arp_create vmlinux EXPORT_SYMBOL +0xe3be8155 genphy_c45_an_config_aneg vmlinux EXPORT_SYMBOL_GPL +0x10d190ba spi_schedule_dv_device vmlinux EXPORT_SYMBOL +0x427c74b9 ata_bmdma_post_internal_cmd vmlinux EXPORT_SYMBOL_GPL +0x6ee703b8 tty_port_alloc_xmit_buf vmlinux EXPORT_SYMBOL +0xcd01b8e6 acpi_attach_data vmlinux EXPORT_SYMBOL +0x88822d38 unregister_blocking_lsm_notifier vmlinux EXPORT_SYMBOL +0x0a020c80 jbd2_journal_get_create_access vmlinux EXPORT_SYMBOL +0xbe0489fb no_llseek vmlinux EXPORT_SYMBOL +0xaf9d73e3 rt_mutex_timed_lock vmlinux EXPORT_SYMBOL_GPL +0x738fe32b amd_get_nodes_per_socket vmlinux EXPORT_SYMBOL_GPL +0x556cca46 x86_apple_machine vmlinux EXPORT_SYMBOL +0x0f9a65ec ib_init_ah_attr_from_path drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x12ff4506 ib_modify_qp_with_udata drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5375ca71 dm_bm_write_lock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x5c618c22 cxgb4_port_viid drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x030cba87 cxgb4_free_stid drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x1f41767f cxgb3_free_stid drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0xb0525248 gfn_to_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x0d845f81 of_find_mipi_dsi_device_by_node vmlinux EXPORT_SYMBOL +0x50a4698c fb_videomode_to_modelist vmlinux EXPORT_SYMBOL +0xddb3b8ef pci_generic_config_write vmlinux EXPORT_SYMBOL_GPL +0x0ea48c51 iov_iter_for_each_range vmlinux EXPORT_SYMBOL +0x5cb34b67 vfs_truncate vmlinux EXPORT_SYMBOL_GPL +0x33084980 __cgroup_bpf_run_filter_sock_ops vmlinux EXPORT_SYMBOL +0x77eb2fe7 __hrtimer_get_remaining vmlinux EXPORT_SYMBOL_GPL +0xa33ab01d drm_mode_copy drivers/gpu/drm/drm EXPORT_SYMBOL +0x0167337d efivar_entry_add vmlinux EXPORT_SYMBOL_GPL +0x9c803020 usb_phy_roothub_power_on vmlinux EXPORT_SYMBOL_GPL +0x8048715b mdio_device_free vmlinux EXPORT_SYMBOL +0x344c7158 fc_lport_config vmlinux EXPORT_SYMBOL +0xe933b401 device_match_name vmlinux EXPORT_SYMBOL_GPL +0xbb383dac acpi_pci_check_ejectable vmlinux EXPORT_SYMBOL_GPL +0x2d192c70 sg_zero_buffer vmlinux EXPORT_SYMBOL +0x0fd377bd register_sysctl_paths vmlinux EXPORT_SYMBOL +0x0da09f53 iomap_migrate_page vmlinux EXPORT_SYMBOL_GPL +0x75791363 pagecache_write_begin vmlinux EXPORT_SYMBOL +0x0d459213 work_on_cpu_safe vmlinux EXPORT_SYMBOL_GPL +0x61577694 ZSTD_compressEnd lib/zstd/zstd_compress EXPORT_SYMBOL +0x09b1e5f9 rpc_clnt_xprt_switch_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x71f6670b __cookie_v6_check vmlinux EXPORT_SYMBOL_GPL +0x752b4106 iscsi_segment_init_linear vmlinux EXPORT_SYMBOL_GPL +0xaf7523ff scmd_printk vmlinux EXPORT_SYMBOL +0x8762e419 ata_sff_pause vmlinux EXPORT_SYMBOL_GPL +0x03db77dc dev_pm_qos_add_request vmlinux EXPORT_SYMBOL_GPL +0xbf634f96 acpi_bind_one vmlinux EXPORT_SYMBOL_GPL +0x16286538 iowrite64be_lo_hi vmlinux EXPORT_SYMBOL +0xa8223179 refcount_dec_checked vmlinux EXPORT_SYMBOL +0x2b9987a9 bio_associate_blkg_from_css vmlinux EXPORT_SYMBOL_GPL +0x05aed71f padata_set_cpumask vmlinux EXPORT_SYMBOL +0xc6cbbc89 capable vmlinux EXPORT_SYMBOL +0xcf7a953a mlx5_core_dealloc_pd drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xe6d75168 nvmf_reg_read64 drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x97428e59 nvmf_reg_read32 drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x1c42cc16 drm_mode_create_tv_properties drivers/gpu/drm/drm EXPORT_SYMBOL +0xdf36914b xa_find_after vmlinux EXPORT_SYMBOL +0x7cdd3577 gro_cells_init vmlinux EXPORT_SYMBOL +0xc0b2664d devlink_dpipe_header_ipv4 vmlinux EXPORT_SYMBOL +0x0f080ca8 hid_debug_event vmlinux EXPORT_SYMBOL_GPL +0x9e1aaf34 input_set_capability vmlinux EXPORT_SYMBOL +0xe3be271c ps2_cmd_aborted vmlinux EXPORT_SYMBOL +0xeac2abb8 phy_queue_state_machine vmlinux EXPORT_SYMBOL +0xbfe55ed8 crypto_register_templates vmlinux EXPORT_SYMBOL_GPL +0x4ef5bcf4 perf_swevent_get_recursion_context vmlinux EXPORT_SYMBOL_GPL +0x0e487fa4 irq_find_mapping vmlinux EXPORT_SYMBOL_GPL +0xad73041f autoremove_wake_function vmlinux EXPORT_SYMBOL +0x963d42aa __wake_up_sync vmlinux EXPORT_SYMBOL_GPL +0x5334fd83 sched_trace_cfs_rq_cpu vmlinux EXPORT_SYMBOL_GPL +0x78272103 virtio_transport_notify_poll_out net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xb03961fd nf_ct_port_tuple_to_nlattr net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x578a1876 tun_xdp_to_ptr drivers/net/tun EXPORT_SYMBOL +0x81d61eef vmci_qpair_dequeue drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x6c66fa66 drm_fb_helper_hotplug_event drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x77d29288 drm_fb_helper_initial_config drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xc365dd50 vlan_vid_del vmlinux EXPORT_SYMBOL +0xb18f84b7 skb_mpls_push vmlinux EXPORT_SYMBOL_GPL +0xb6eb6e41 fcoe_get_paged_crc_eof vmlinux EXPORT_SYMBOL_GPL +0xc2bb83f6 iommu_report_device_fault vmlinux EXPORT_SYMBOL_GPL +0x6824e097 give_up_console vmlinux EXPORT_SYMBOL +0xe7ffe877 pcpu_base_addr vmlinux EXPORT_SYMBOL_GPL +0xed40c481 kvm_clock vmlinux EXPORT_SYMBOL_GPL +0x5f62d6d2 nf_conntrack_helper_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe5ba434f ib_uverbs_flow_resources_free drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x89197daa rdma_resolve_addr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xbc3af3b9 drm_mode_find_dmt drivers/gpu/drm/drm EXPORT_SYMBOL +0x75942444 drm_atomic_helper_async_commit drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6d1cc2d7 mpt_reset_register vmlinux EXPORT_SYMBOL +0x38f11a97 acpi_dev_resource_memory vmlinux EXPORT_SYMBOL_GPL +0xd048bbca skcipher_walk_done vmlinux EXPORT_SYMBOL_GPL +0x9a11a0fc crypto_attr_alg_name vmlinux EXPORT_SYMBOL_GPL +0xdeee6af2 debugfs_create_regset32 vmlinux EXPORT_SYMBOL_GPL +0x811bdd03 __test_set_page_writeback vmlinux EXPORT_SYMBOL +0x8251f15b irq_chip_set_wake_parent vmlinux EXPORT_SYMBOL_GPL +0x50d68377 arch_phys_wc_del vmlinux EXPORT_SYMBOL +0x06c8f2de slhc_compress drivers/net/slip/slhc EXPORT_SYMBOL +0x72395dc1 xfrm_calg_get_byid vmlinux EXPORT_SYMBOL_GPL +0xa7f5b7c4 ir_raw_handler_register vmlinux EXPORT_SYMBOL +0x85fa59c6 uhci_check_and_reset_hc vmlinux EXPORT_SYMBOL_GPL +0x2e2d9cf3 usb_hc_died vmlinux EXPORT_SYMBOL_GPL +0x86e4d192 hsu_dma_do_irq vmlinux EXPORT_SYMBOL_GPL +0x44958a28 pci_max_pasids vmlinux EXPORT_SYMBOL_GPL +0x5aeb53ec pci_free_irq vmlinux EXPORT_SYMBOL +0x5980ed8c pinctrl_dev_get_name vmlinux EXPORT_SYMBOL_GPL +0xc1514a3b free_irq vmlinux EXPORT_SYMBOL +0x4097e475 l2tp_session_create net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x3c19e9ff team_options_unregister drivers/net/team/team EXPORT_SYMBOL +0x19ae0d2f cxgb4_clip_release drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xefff6975 nd_cmd_out_size vmlinux EXPORT_SYMBOL_GPL +0x2d178589 nvdimm_to_bus vmlinux EXPORT_SYMBOL_GPL +0x7b046c83 acpi_subsys_restore_early vmlinux EXPORT_SYMBOL_GPL +0xc0f83180 jbd2_journal_init_inode vmlinux EXPORT_SYMBOL +0xb0f05cbb take_dentry_name_snapshot vmlinux EXPORT_SYMBOL +0xe8f8225c deactivate_locked_super vmlinux EXPORT_SYMBOL +0xb1ef0548 ftrace_set_filter vmlinux EXPORT_SYMBOL_GPL +0xb4d9c654 virtio_transport_dgram_enqueue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xedcb7608 sunrpc_cache_unhash net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc538f247 rpcauth_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd292fbf6 xfrm_audit_policy_delete vmlinux EXPORT_SYMBOL_GPL +0x1cd1ba96 inet_twsk_hashdance vmlinux EXPORT_SYMBOL_GPL +0x319b3e77 sock_zerocopy_realloc vmlinux EXPORT_SYMBOL_GPL +0xc3dcd0ed init_iova_flush_queue vmlinux EXPORT_SYMBOL_GPL +0x03b47a4d clkdev_create vmlinux EXPORT_SYMBOL_GPL +0x95bc9078 btree_free vmlinux EXPORT_SYMBOL_GPL +0x9c942adc vprintk_emit vmlinux EXPORT_SYMBOL +0x09d44df9 in_lock_functions vmlinux EXPORT_SYMBOL +0xf3b8b6cb rpc_wake_up net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf38bb93c ip6_tnl_xmit_ctl net/ipv6/ip6_tunnel EXPORT_SYMBOL_GPL +0x30c37cc0 dm_bm_write_lock_zero drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xbfd01f33 mlxsw_core_port_ib_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x696ea7a0 mlx4_assign_eq drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xbb06f3f3 cxgb3_insert_tid drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x1f781d68 drm_default_rgb_quant_range drivers/gpu/drm/drm EXPORT_SYMBOL +0x5407ae9e drm_dp_get_dual_mode_type_name drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x851c747c simd_aead_create crypto/crypto_simd EXPORT_SYMBOL_GPL +0x34dedbdf ip6_sk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x3a214c65 netpoll_parse_options vmlinux EXPORT_SYMBOL +0x57a1e453 netdev_rx_handler_unregister vmlinux EXPORT_SYMBOL_GPL +0x44c35a06 kernel_getpeername vmlinux EXPORT_SYMBOL +0xd3d33fb5 sas_port_add_phy vmlinux EXPORT_SYMBOL +0x447a1527 to_nd_pfn vmlinux EXPORT_SYMBOL +0xfdfb792f amd_iommu_pc_supported vmlinux EXPORT_SYMBOL +0x9a694cd2 serial8250_do_set_termios vmlinux EXPORT_SYMBOL +0x240e9469 clk_register_fixed_rate_with_accuracy vmlinux EXPORT_SYMBOL_GPL +0xc21a0569 __devm_of_phy_provider_register vmlinux EXPORT_SYMBOL_GPL +0xb2713ffb sysfs_chmod_file vmlinux EXPORT_SYMBOL_GPL +0xbccfd4d8 register_oldmem_pfn_is_ram vmlinux EXPORT_SYMBOL_GPL +0x2e50fe1e find_module vmlinux EXPORT_SYMBOL_GPL +0x5164e99e rt_mutex_lock vmlinux EXPORT_SYMBOL_GPL +0xb49de221 __tracepoint_sched_overutilized_tp vmlinux EXPORT_SYMBOL_GPL +0xe0c77bb5 mce_notify_irq vmlinux EXPORT_SYMBOL_GPL +0x8ab02f3a ib_sa_service_rec_query drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7935daee ibnl_put_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1d06ac0d vfio_register_notifier drivers/vfio/vfio EXPORT_SYMBOL +0x7e7877e1 kvm_handle_page_fault arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6694bd5e l3mdev_link_scope_lookup vmlinux EXPORT_SYMBOL_GPL +0x32054b10 alloc_fcdev vmlinux EXPORT_SYMBOL +0xcc7e390f skb_complete_wifi_ack vmlinux EXPORT_SYMBOL_GPL +0x4236989b input_event vmlinux EXPORT_SYMBOL +0x1d9371b3 pm_runtime_suspended_time vmlinux EXPORT_SYMBOL_GPL +0xb7605b71 mipi_dsi_device_unregister vmlinux EXPORT_SYMBOL +0xb329d7e2 devm_pinctrl_put vmlinux EXPORT_SYMBOL_GPL +0x77292add devm_pinctrl_get vmlinux EXPORT_SYMBOL_GPL +0x11f7ed4c hex_to_bin vmlinux EXPORT_SYMBOL +0xc8827b75 sysctl_vals vmlinux EXPORT_SYMBOL +0x7570e415 simple_pin_fs vmlinux EXPORT_SYMBOL +0x5635a60a vmalloc_user vmlinux EXPORT_SYMBOL +0xc301b771 posix_clock_unregister vmlinux EXPORT_SYMBOL_GPL +0x122f5a88 kthread_create_worker_on_cpu vmlinux EXPORT_SYMBOL +0x7257beee lc_element_by_index lib/lru_cache EXPORT_SYMBOL +0x1096e288 xprt_pin_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe0a81acd nf_ct_nat_ext_add net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xa4a79e8f vhost_enqueue_msg drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x56714feb ib_unregister_device_queued drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbb09de63 adf_vf2pf_shutdown drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x272a8933 udp_memory_allocated vmlinux EXPORT_SYMBOL +0x24c8e482 xt_copy_counters_from_user vmlinux EXPORT_SYMBOL_GPL +0xce67bcac bpf_prog_create_from_user vmlinux EXPORT_SYMBOL_GPL +0x18a8f6f3 sock_no_sendpage vmlinux EXPORT_SYMBOL +0x3cd9b2ee thermal_generate_netlink_event vmlinux EXPORT_SYMBOL_GPL +0x395fc01a acpi_dev_resource_address_space vmlinux EXPORT_SYMBOL_GPL +0xe4329092 __ctzdi2 vmlinux EXPORT_SYMBOL +0x6fd9c35a __clzdi2 vmlinux EXPORT_SYMBOL +0x41b466e1 blk_pre_runtime_suspend vmlinux EXPORT_SYMBOL +0x0cc4b4b6 crc_ccitt_false lib/crc-ccitt EXPORT_SYMBOL +0x5aba5415 mdev_get_iommu_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x1b1f2bda speedstep_get_freqs drivers/cpufreq/speedstep-lib EXPORT_SYMBOL_GPL +0x81bcfe7c mlx5_free_bfreg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7f448e7a rdev_set_badblocks vmlinux EXPORT_SYMBOL_GPL +0x90646844 get_current_tty vmlinux EXPORT_SYMBOL_GPL +0xb013b3a0 proc_create_data vmlinux EXPORT_SYMBOL +0x25fe8583 filemap_fdatawait_range_keep_errors vmlinux EXPORT_SYMBOL +0x98b62ca5 filemap_range_has_page vmlinux EXPORT_SYMBOL +0xb3941d2b param_ops_string vmlinux EXPORT_SYMBOL +0xf35083ce l2tp_tunnel_get_session net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x37c71ff2 l2tp_tunnel_get net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xc8904c35 sunrpc_cache_register_pipefs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xdcfc4a18 edac_device_alloc_ctl_info drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x5e949e0a vmci_doorbell_destroy drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x37ffbe56 nfs_getattr fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb28ae5ad nfs_setattr fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdf7fa33b __tracepoint_tcp_send_reset vmlinux EXPORT_SYMBOL_GPL +0x280dce43 kernel_getsockname vmlinux EXPORT_SYMBOL +0x307dc950 kernel_sendmsg vmlinux EXPORT_SYMBOL +0xd264810d uart_register_driver vmlinux EXPORT_SYMBOL +0x41628a87 divider_round_rate_parent vmlinux EXPORT_SYMBOL_GPL +0xd91319d6 raid6_gfmul vmlinux EXPORT_SYMBOL +0x39b52d19 __bitmap_and vmlinux EXPORT_SYMBOL +0x0641307b lc_destroy lib/lru_cache EXPORT_SYMBOL +0xc32b59ff ib_destroy_fmr_pool drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7bd4811d mlx5_eq_get_eqe drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa5210984 mlx4_eq_get_irq drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xaf8982f3 drm_fb_helper_defio_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xfd52cae3 drm_atomic_helper_resume drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb6f43fd9 xfrm_alloc_spi vmlinux EXPORT_SYMBOL +0x05d090d1 nvmem_device_put vmlinux EXPORT_SYMBOL_GPL +0x8e13ddf0 nvmem_device_get vmlinux EXPORT_SYMBOL_GPL +0xca6ca14f fwnode_remove_software_node vmlinux EXPORT_SYMBOL_GPL +0x36b5497e intel_iommu_enabled vmlinux EXPORT_SYMBOL_GPL +0x32b4bf97 tpm_get_random vmlinux EXPORT_SYMBOL_GPL +0x3f772916 pci_enable_sriov vmlinux EXPORT_SYMBOL_GPL +0xc1e58a5f refcount_dec_and_test_checked vmlinux EXPORT_SYMBOL +0xc01ee510 blk_rq_err_bytes vmlinux EXPORT_SYMBOL_GPL +0xd36d5fc9 crypto_alloc_sync_skcipher vmlinux EXPORT_SYMBOL_GPL +0xc81cf012 crypto_blkcipher_type vmlinux EXPORT_SYMBOL_GPL +0x9b7c44b6 bch_bset_build_written_tree drivers/md/bcache/bcache EXPORT_SYMBOL +0x7e2d84cb i2c_bit_algo drivers/i2c/algos/i2c-algo-bit EXPORT_SYMBOL +0xbc1e4e8f drm_gem_dumb_map_offset drivers/gpu/drm/drm EXPORT_SYMBOL_GPL +0x3fbd8500 kvm_get_dr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x32af3191 kvm_set_dr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x4e8a464a nf_log_packet vmlinux EXPORT_SYMBOL +0x649f6ae7 dev_remove_pack vmlinux EXPORT_SYMBOL +0x0f7ca236 dmi_memdev_name vmlinux EXPORT_SYMBOL_GPL +0x2c256e1f input_scancode_to_scalar vmlinux EXPORT_SYMBOL +0x3de3b996 fcoe_get_lesb vmlinux EXPORT_SYMBOL_GPL +0x6da9c2d0 ata_eh_freeze_port vmlinux EXPORT_SYMBOL_GPL +0x759bfe36 btree_destroy vmlinux EXPORT_SYMBOL_GPL +0x1024e92a blkcipher_walk_virt vmlinux EXPORT_SYMBOL_GPL +0x8794d2a5 posix_lock_file vmlinux EXPORT_SYMBOL +0x71a672ef dmam_pool_destroy vmlinux EXPORT_SYMBOL +0x6a019558 set_page_dirty vmlinux EXPORT_SYMBOL +0x039974a1 kthread_destroy_worker vmlinux EXPORT_SYMBOL +0xe83eba32 itlb_multihit_kvm_mitigation vmlinux EXPORT_SYMBOL_GPL +0x2a91bb33 ib_cache_gid_type_str drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xace9b57b dm_bio_prison_destroy_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x8f6f947e mlx5_cmd_init drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xe8fa6506 cryptd_alloc_aead crypto/cryptd EXPORT_SYMBOL_GPL +0x7cf52901 ir_raw_gen_manchester vmlinux EXPORT_SYMBOL +0x39091ea5 acpi_match_device vmlinux EXPORT_SYMBOL_GPL +0x5831e062 cpus_read_trylock vmlinux EXPORT_SYMBOL_GPL +0xa98e0f59 dm_rh_inc_pending drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xe83c8ddc mlx4_INIT_PORT drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x58c88ff2 o2hb_get_all_regions fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xf36ff71a kvm_lapic_hv_timer_in_use arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1150c3b8 udp_disconnect vmlinux EXPORT_SYMBOL +0x1267dbbb dev_mc_unsync vmlinux EXPORT_SYMBOL +0x73a10de7 dev_uc_unsync vmlinux EXPORT_SYMBOL +0xc9bab0d3 devm_nvmem_cell_get vmlinux EXPORT_SYMBOL_GPL +0xc54bb660 usb_choose_configuration vmlinux EXPORT_SYMBOL_GPL +0x8a00ae27 usb_free_streams vmlinux EXPORT_SYMBOL_GPL +0x66a99c1f fcoe_transport_attach vmlinux EXPORT_SYMBOL +0x7c422164 ata_sff_prereset vmlinux EXPORT_SYMBOL_GPL +0x9b92d16e pinctrl_gpio_set_config vmlinux EXPORT_SYMBOL_GPL +0x7b4c9ba9 sbitmap_queue_min_shallow_depth vmlinux EXPORT_SYMBOL_GPL +0x0ddbb9d6 async_gen_syndrome vmlinux EXPORT_SYMBOL_GPL +0xd2782c20 ip_set_get_byname net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xc6b86fc8 drm_mode_create_colorspace_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xc6ea16d4 drm_gem_fb_simple_display_pipe_prepare_fb drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1cf65ffc kvm_max_guest_tsc_khz arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x59e640c0 halt_poll_ns arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x88965034 addrconf_prefix_rcv_add_addr vmlinux EXPORT_SYMBOL_GPL +0xa7c94f1d xt_compat_lock vmlinux EXPORT_SYMBOL_GPL +0xd3340cb5 input_set_timestamp vmlinux EXPORT_SYMBOL +0xaf1c1201 phy_set_asym_pause vmlinux EXPORT_SYMBOL +0xb6550743 fwnode_graph_get_remote_port vmlinux EXPORT_SYMBOL_GPL +0x6ff2e138 bus_get_kset vmlinux EXPORT_SYMBOL_GPL +0x7d12d76d acpi_get_parent vmlinux EXPORT_SYMBOL +0x7ecb8dac fb_pan_display vmlinux EXPORT_SYMBOL +0x91caacc8 fb_show_logo vmlinux EXPORT_SYMBOL +0xd4650095 blk_mq_virtio_map_queues vmlinux EXPORT_SYMBOL_GPL +0x76c2480f __register_chrdev vmlinux EXPORT_SYMBOL +0xd39e9848 put_itimerspec64 vmlinux EXPORT_SYMBOL_GPL +0xb54676fa ceph_msg_type_name net/ceph/libceph EXPORT_SYMBOL +0xe5919cb1 xdr_encode_opaque net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x613b1e2e ib_is_mad_class_rmpp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4e90435c ib_sa_free_multicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe23aa988 mlxsw_core_resources_query drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x4d012000 drm_atomic_set_mode_for_crtc drivers/gpu/drm/drm EXPORT_SYMBOL +0x1235000a kvm_tsc_scaling_ratio_frac_bits arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe4248980 cper_estatus_print vmlinux EXPORT_SYMBOL_GPL +0xae19a08f rtc_class_close vmlinux EXPORT_SYMBOL_GPL +0x033832c7 usb_amd_hang_symptom_quirk vmlinux EXPORT_SYMBOL_GPL +0x33b6dcad ufshcd_config_pwr_mode vmlinux EXPORT_SYMBOL_GPL +0x08c8a2c5 iscsi_session_recovery_timedout vmlinux EXPORT_SYMBOL_GPL +0xc58d5a90 kstrtoll_from_user vmlinux EXPORT_SYMBOL +0xf1969a8e __usecs_to_jiffies vmlinux EXPORT_SYMBOL +0x7f02188f __msecs_to_jiffies vmlinux EXPORT_SYMBOL +0x77eeea77 sched_trace_rq_avg_irq vmlinux EXPORT_SYMBOL_GPL +0x854469ea svc_max_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd14e9a41 adf_reset_sbr drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xd8aa4284 dm_rh_region_context drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x0a4f0e3b kvm_rdpmc arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb9c4c198 vlan_vids_add_by_dev vmlinux EXPORT_SYMBOL +0xdc14eda7 pci_pci_problems vmlinux EXPORT_SYMBOL +0xbe741142 pcie_capability_read_dword vmlinux EXPORT_SYMBOL +0x26dd4d43 blk_queue_stack_limits vmlinux EXPORT_SYMBOL +0xc2154b4f drop_super_exclusive vmlinux EXPORT_SYMBOL +0x22f44900 free_vm_area vmlinux EXPORT_SYMBOL_GPL +0x589c948a tracing_snapshot_cond_disable vmlinux EXPORT_SYMBOL_GPL +0xb5147467 ovs_netdev_tunnel_destroy net/openvswitch/openvswitch EXPORT_SYMBOL_GPL +0xe146ae9d nf_conntrack_free net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xefd6883a ptp_clock_event drivers/ptp/ptp EXPORT_SYMBOL +0x4f8fdbae macvlan_common_newlink drivers/net/macvlan EXPORT_SYMBOL_GPL +0xfc8d265f drm_dp_downstream_debug drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x9d012bb8 nfs41_sequence_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x092cf98e fscache_object_sleep_till_congested fs/fscache/fscache EXPORT_SYMBOL_GPL +0x2e678211 xas_find_conflict vmlinux EXPORT_SYMBOL_GPL +0xa20a0ab2 udp_lib_getsockopt vmlinux EXPORT_SYMBOL +0x85531179 udp_lib_setsockopt vmlinux EXPORT_SYMBOL +0x7bce4603 xt_data_to_user vmlinux EXPORT_SYMBOL_GPL +0x81533963 sysfs_format_mac vmlinux EXPORT_SYMBOL +0x0beaae4b get_net_ns_by_fd vmlinux EXPORT_SYMBOL_GPL +0xfa12b9a7 sk_send_sigurg vmlinux EXPORT_SYMBOL +0x4b5fd49e dm_kcopyd_do_callback vmlinux EXPORT_SYMBOL +0x4187636a mdiobus_alloc_size vmlinux EXPORT_SYMBOL +0x579e79df sas_ata_schedule_reset vmlinux EXPORT_SYMBOL_GPL +0x58bca2ba ata_eh_thaw_port vmlinux EXPORT_SYMBOL_GPL +0xb6b255a5 pnp_is_active vmlinux EXPORT_SYMBOL +0xf6fe2e3e proc_set_size vmlinux EXPORT_SYMBOL +0x2adee13f dm_btree_cursor_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xbbb5df05 dm_array_cursor_skip drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x27023346 mlx4_slave_convert_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x0fd60df2 drm_get_connector_status_name drivers/gpu/drm/drm EXPORT_SYMBOL +0xc7379aff drm_gem_object_release drivers/gpu/drm/drm EXPORT_SYMBOL +0x84b99013 pm_generic_poweroff_noirq vmlinux EXPORT_SYMBOL_GPL +0x7b5d4be1 drop_super vmlinux EXPORT_SYMBOL +0xc5850110 printk vmlinux EXPORT_SYMBOL +0xe60139cd vhost_add_used_and_signal drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x260590c0 vbg_err drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0x6441af36 drm_property_add_enum drivers/gpu/drm/drm EXPORT_SYMBOL +0x394bd277 __drm_atomic_helper_plane_destroy_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd0ec90c0 drm_helper_resume_force_mode drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x39e05ac3 nfs_idmap_cache_timeout fs/nfs/nfs EXPORT_SYMBOL_GPL +0xcad15cc9 devlink_health_reporter_create vmlinux EXPORT_SYMBOL_GPL +0xafa375ab flow_hash_from_keys vmlinux EXPORT_SYMBOL +0xe1a8d7c9 net_rwsem vmlinux EXPORT_SYMBOL_GPL +0xf319d4e5 sk_alloc vmlinux EXPORT_SYMBOL +0xd4719f97 md_run vmlinux EXPORT_SYMBOL_GPL +0x08e5cd78 sas_is_tlr_enabled vmlinux EXPORT_SYMBOL_GPL +0xf5e8f802 ahci_init_controller vmlinux EXPORT_SYMBOL_GPL +0x5ca1d4b8 sata_std_hardreset vmlinux EXPORT_SYMBOL_GPL +0x12a81aa7 bus_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x326cefe5 hwpoison_filter_dev_minor vmlinux EXPORT_SYMBOL_GPL +0x8d7e3373 hwpoison_filter_dev_major vmlinux EXPORT_SYMBOL_GPL +0xeca0a8b6 try_to_release_page vmlinux EXPORT_SYMBOL +0xdfa5992a synchronize_srcu vmlinux EXPORT_SYMBOL_GPL +0x498e9128 ZSTD_findDecompressedSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0xbe598471 rpcauth_lookup_credcache net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x35afae24 __drm_puts_seq_file drivers/gpu/drm/drm EXPORT_SYMBOL +0x0ec7f975 drm_fb_helper_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x73011db0 drm_dp_bw_code_to_link_rate drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xfc9eafc1 nfs_commitdata_release fs/nfs/nfs EXPORT_SYMBOL_GPL +0x27677a70 kobject_add vmlinux EXPORT_SYMBOL +0xe3cd5fae klist_iter_init vmlinux EXPORT_SYMBOL_GPL +0x15bafe29 unregister_md_cluster_operations vmlinux EXPORT_SYMBOL +0x61f9068e phy_restore_page vmlinux EXPORT_SYMBOL_GPL +0x8392a7d6 spi_dv_device vmlinux EXPORT_SYMBOL +0xc51de576 dma_buf_vmap vmlinux EXPORT_SYMBOL_GPL +0xc208a225 dma_buf_mmap vmlinux EXPORT_SYMBOL_GPL +0x57e979e9 dma_buf_kmap vmlinux EXPORT_SYMBOL_GPL +0xce281e93 nvdimm_revalidate_disk vmlinux EXPORT_SYMBOL +0x2ee92935 __platform_driver_register vmlinux EXPORT_SYMBOL_GPL +0xf3613618 mipi_dsi_attach vmlinux EXPORT_SYMBOL +0x112de081 pci_disable_msix vmlinux EXPORT_SYMBOL +0x31e33e36 devm_of_phy_provider_unregister vmlinux EXPORT_SYMBOL_GPL +0xb604f26e bio_list_copy_data vmlinux EXPORT_SYMBOL +0x10138352 tracing_on vmlinux EXPORT_SYMBOL_GPL +0x73193ce9 __tracepoint_mlx5_fw drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x3a71b7d4 reprogram_fixed_counter arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x38047e10 xfrm_audit_policy_add vmlinux EXPORT_SYMBOL_GPL +0xd6d4363c sock_gen_put vmlinux EXPORT_SYMBOL_GPL +0xddcccc96 thermal_zone_get_temp vmlinux EXPORT_SYMBOL_GPL +0xe052a907 platform_driver_unregister vmlinux EXPORT_SYMBOL_GPL +0x78eb5c3b agp_generic_alloc_user vmlinux EXPORT_SYMBOL +0x89e340cf acpi_bus_get_ejd vmlinux EXPORT_SYMBOL_GPL +0x84520d7b fscrypt_enqueue_decrypt_bio vmlinux EXPORT_SYMBOL +0x735a0bd5 native_io_delay vmlinux EXPORT_SYMBOL +0x4b762828 start_thread vmlinux EXPORT_SYMBOL_GPL +0x7c911312 vhost_signal drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xbe7ad8d3 nfs_file_release fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3b6d49cc kvm_apic_write_nodecode arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1a9c85b8 xfrm_state_mtu vmlinux EXPORT_SYMBOL_GPL +0x4751b53e gnet_stats_copy_queue vmlinux EXPORT_SYMBOL +0xd892d2d7 hid_ignore vmlinux EXPORT_SYMBOL_GPL +0x6bf3ca1f dev_pm_opp_put vmlinux EXPORT_SYMBOL_GPL +0x49ce12eb xhci_gen_setup vmlinux EXPORT_SYMBOL_GPL +0x28e20208 usb_get_urb vmlinux EXPORT_SYMBOL_GPL +0xbd3c2c7b amd_iommu_flush_tlb vmlinux EXPORT_SYMBOL +0x1cb7c983 apei_exec_read_register_value vmlinux EXPORT_SYMBOL_GPL +0x9698558a pci_scan_single_device vmlinux EXPORT_SYMBOL +0x2505bf18 kstrtol_from_user vmlinux EXPORT_SYMBOL +0x6b10bee1 _copy_to_user vmlinux EXPORT_SYMBOL +0x5a58b965 simple_dir_operations vmlinux EXPORT_SYMBOL +0x162893fd hashlen_string vmlinux EXPORT_SYMBOL +0x5af1e3b9 list_lru_del vmlinux EXPORT_SYMBOL_GPL +0xc8557b9e l2tp_session_set_header_len net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xec8beba6 nf_ct_expect_hash net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x29c683db mlx5_fpga_get_sbu_caps drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x55952429 drm_color_lut_check drivers/gpu/drm/drm EXPORT_SYMBOL +0xac222438 inet_frag_reasm_finish vmlinux EXPORT_SYMBOL +0xc2a814db tcp_memory_pressure vmlinux EXPORT_SYMBOL_GPL +0x8aa104eb devlink_resource_size_get vmlinux EXPORT_SYMBOL_GPL +0xda911e7d agp_generic_remove_memory vmlinux EXPORT_SYMBOL +0x840342c6 sgl_free vmlinux EXPORT_SYMBOL +0xd59e45e3 dqget vmlinux EXPORT_SYMBOL +0x8d0fb23b dqput vmlinux EXPORT_SYMBOL +0xeb327656 always_delete_dentry vmlinux EXPORT_SYMBOL +0x5fb81a2c setattr_copy vmlinux EXPORT_SYMBOL +0xca07cfde iscsit_build_logout_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xc04c7e84 vmci_qpair_get_consume_indexes drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x5591b58e vmci_context_get_priv_flags drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x9f6d78fc kvm_get_pfn arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6565901d br_multicast_list_adjacent vmlinux EXPORT_SYMBOL_GPL +0xcbcf2107 icmpv6_ndo_send vmlinux EXPORT_SYMBOL +0x565ef039 inet_frags_fini vmlinux EXPORT_SYMBOL +0x65d9e877 cpufreq_register_notifier vmlinux EXPORT_SYMBOL +0x5b6f8855 scsi_queue_work vmlinux EXPORT_SYMBOL_GPL +0x47f66408 ahci_qc_issue vmlinux EXPORT_SYMBOL_GPL +0x64f7b381 iommu_capable vmlinux EXPORT_SYMBOL_GPL +0x9be7bde4 security_tun_dev_attach vmlinux EXPORT_SYMBOL +0xf346231f seq_list_start_head vmlinux EXPORT_SYMBOL +0xdc64cb5e mmu_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x3bb11ca9 nsh_pop net/nsh/nsh EXPORT_SYMBOL_GPL +0x0b2420c2 iscsit_check_dataout_hdr drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xfbd03183 __tracepoint_bcache_btree_node_free drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x1019e116 mlx5_nic_vport_query_local_lb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xdaf80fae mlx4_SET_VPORT_QOS_set drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xea988dce mlx4_SET_VPORT_QOS_get drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xb6b4513d cxgbi_create_session drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x1af26af4 drm_i2c_encoder_restore drivers/gpu/drm/drm EXPORT_SYMBOL +0xe8e4c58f nfs4_setup_sequence fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xc6db3ee7 dns_query vmlinux EXPORT_SYMBOL +0xde2df0d5 netdev_set_default_ethtool_ops vmlinux EXPORT_SYMBOL_GPL +0x8b8cb4d1 netif_device_attach vmlinux EXPORT_SYMBOL +0x7701cc1a netif_device_detach vmlinux EXPORT_SYMBOL +0xbbe88028 device_rename vmlinux EXPORT_SYMBOL_GPL +0x40eadae8 mipi_dsi_dcs_set_display_on vmlinux EXPORT_SYMBOL +0xce6477b2 acpi_pci_osc_control_set vmlinux EXPORT_SYMBOL +0x2c231596 fbcon_rotate_ud vmlinux EXPORT_SYMBOL +0x78302302 fbcon_rotate_cw vmlinux EXPORT_SYMBOL +0x36396875 simple_attr_read vmlinux EXPORT_SYMBOL_GPL +0x82b2a67a relay_flush vmlinux EXPORT_SYMBOL_GPL +0xc12435e3 rpc_calc_rto net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xaa9c93ee drm_atomic_helper_swap_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf5eced70 kernel_bind vmlinux EXPORT_SYMBOL +0x7006586e iommu_get_group_resv_regions vmlinux EXPORT_SYMBOL_GPL +0xcc248d26 serial8250_suspend_port vmlinux EXPORT_SYMBOL +0x4e6e8ea7 fg_console vmlinux EXPORT_SYMBOL +0x7a655f68 acpi_processor_claim_cst_control vmlinux EXPORT_SYMBOL_GPL +0xcd279169 nla_find vmlinux EXPORT_SYMBOL +0x390089be kernel_read vmlinux EXPORT_SYMBOL +0xc8add232 ring_buffer_record_disable vmlinux EXPORT_SYMBOL_GPL +0xac3e74d9 smp_call_function_many_async vmlinux EXPORT_SYMBOL_GPL +0xffae8e8b nsecs_to_jiffies vmlinux EXPORT_SYMBOL_GPL +0x1732f8bd dma_get_merge_boundary vmlinux EXPORT_SYMBOL_GPL +0x6886037a __devm_release_region vmlinux EXPORT_SYMBOL +0x578a408b ZSTD_initDCtx lib/zstd/zstd_decompress EXPORT_SYMBOL +0x0e27a2dd ZSTD_initCCtx lib/zstd/zstd_compress EXPORT_SYMBOL +0x75d6377a kvm_is_visible_gfn arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8f310362 fl6_merge_options vmlinux EXPORT_SYMBOL_GPL +0xc6a7e604 device_link_del vmlinux EXPORT_SYMBOL_GPL +0xe21c6b21 device_link_add vmlinux EXPORT_SYMBOL_GPL +0x4453aa61 pci_bus_find_capability vmlinux EXPORT_SYMBOL +0xdbf6fe4a dax_iomap_fault vmlinux EXPORT_SYMBOL_GPL +0xe691ac7f ZSTD_decompressBegin lib/zstd/zstd_decompress EXPORT_SYMBOL +0x1a8f4486 ceph_osdc_watch net/ceph/libceph EXPORT_SYMBOL +0x471adfec nf_ct_l4proto_find net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x724c72d4 mdev_uuid drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xdf88e62b mlx5_query_nic_system_image_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xef61d1e8 drm_gem_prime_export drivers/gpu/drm/drm EXPORT_SYMBOL +0xfa0e743b nfs4_delete_deviceid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xa2817d80 kvm_flush_remote_tlbs arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x063ba523 vlan_dev_vlan_id vmlinux EXPORT_SYMBOL +0xd01e327e __skb_try_recv_datagram vmlinux EXPORT_SYMBOL +0x254299dd input_set_max_poll_interval vmlinux EXPORT_SYMBOL +0x6711572f input_set_min_poll_interval vmlinux EXPORT_SYMBOL +0x49997ba1 ata_base_port_ops vmlinux EXPORT_SYMBOL_GPL +0x9e5dd18d pagevec_lookup_range_tag vmlinux EXPORT_SYMBOL +0xc8b2a233 sunrpc_cache_unregister_pipefs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3b4d4ae ip_set_alloc net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xefb9a1c1 transport_register_session drivers/target/target_core_mod EXPORT_SYMBOL +0xdb0920f8 drm_atomic_state_default_release drivers/gpu/drm/drm EXPORT_SYMBOL +0x2e73b955 pnfs_generic_scan_commit_lists fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x43841d9a dst_cache_get_ip4 vmlinux EXPORT_SYMBOL_GPL +0x6b5d062c ehci_hub_control vmlinux EXPORT_SYMBOL_GPL +0xc61ff2c5 __class_register vmlinux EXPORT_SYMBOL_GPL +0x1514c933 agp_collect_device_status vmlinux EXPORT_SYMBOL +0xcebf5571 hsu_dma_probe vmlinux EXPORT_SYMBOL_GPL +0x6a5cb5ee __get_free_pages vmlinux EXPORT_SYMBOL +0x3c36b671 virtio_transport_release net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x743be8bc rdma_read_gids drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x4099f919 tun_ptr_free drivers/net/tun EXPORT_SYMBOL_GPL +0x22e47af7 rawv6_mh_filter_register vmlinux EXPORT_SYMBOL +0xba399ea2 nf_unregister_net_hook vmlinux EXPORT_SYMBOL +0x42343efb usb_free_coherent vmlinux EXPORT_SYMBOL_GPL +0x3bef2f73 iscsi_pool_init vmlinux EXPORT_SYMBOL_GPL +0xadbeed61 mipi_dsi_packet_format_is_long vmlinux EXPORT_SYMBOL +0xc28bb4d8 pciserial_init_ports vmlinux EXPORT_SYMBOL_GPL +0x085e4c1a pcie_has_flr vmlinux EXPORT_SYMBOL_GPL +0x97adb487 utf8s_to_utf16s vmlinux EXPORT_SYMBOL +0xb4b97c90 pvclock_gtod_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x37967fb4 ns_capable vmlinux EXPORT_SYMBOL +0x21a7ff81 dm_cell_error drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xe3ccde2b x86_fpu_cache arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x583225fd rawv6_mh_filter_unregister vmlinux EXPORT_SYMBOL +0x6baad9bb ip_route_output_key_hash vmlinux EXPORT_SYMBOL_GPL +0xcbef462a is_skb_forwardable vmlinux EXPORT_SYMBOL_GPL +0xb04f56ab dm_bufio_read vmlinux EXPORT_SYMBOL_GPL +0x1912be44 clk_hw_register_clkdev vmlinux EXPORT_SYMBOL +0x75f33224 acpi_subsys_poweroff vmlinux EXPORT_SYMBOL_GPL +0x67eb70cd fb_deferred_io_init vmlinux EXPORT_SYMBOL_GPL +0x2bf3f151 debugfs_attr_read vmlinux EXPORT_SYMBOL_GPL +0x92b9b180 slash_name vmlinux EXPORT_SYMBOL +0x096cc04e shake_page vmlinux EXPORT_SYMBOL_GPL +0x7bbccd05 nr_node_ids vmlinux EXPORT_SYMBOL +0xbcc15e75 ktime_get_coarse_with_offset vmlinux EXPORT_SYMBOL_GPL +0x6ae4fdc8 sched_trace_cfs_rq_avg vmlinux EXPORT_SYMBOL_GPL +0x6df1aaf1 kernel_sigaction vmlinux EXPORT_SYMBOL +0x4a17fc91 dm_bio_prison_free_cell_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xa2370ca8 i2c_new_dummy drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0xede5ded1 drm_atomic_normalize_zpos drivers/gpu/drm/drm EXPORT_SYMBOL +0x562da826 drm_fb_helper_sys_read drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xe8611d8a ipv6_specific vmlinux EXPORT_SYMBOL +0x3205d6c0 ipv4_specific vmlinux EXPORT_SYMBOL +0x3bc29237 nf_queue_entry_release_refs vmlinux EXPORT_SYMBOL_GPL +0x75479fbc proto_unregister vmlinux EXPORT_SYMBOL +0xd42bcc8e blackhole_netdev vmlinux EXPORT_SYMBOL +0x20939bd4 csum_and_copy_from_iter vmlinux EXPORT_SYMBOL +0xd47e579d __fsnotify_inode_delete vmlinux EXPORT_SYMBOL_GPL +0x3e0a9b11 failover_register net/core/failover EXPORT_SYMBOL_GPL +0x77282d95 dm_rh_mark_nosync drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x82361bf0 nfs_remount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x19dd499a __tracepoint_kvm_invlpga arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x058e9685 pci_disable_sriov vmlinux EXPORT_SYMBOL_GPL +0x1bdf34ea bsg_scsi_register_queue vmlinux EXPORT_SYMBOL_GPL +0x54ca0590 blk_mq_free_tag_set vmlinux EXPORT_SYMBOL +0xa2c74331 list_lru_count_node vmlinux EXPORT_SYMBOL_GPL +0xaa72510e from_kprojid_munged vmlinux EXPORT_SYMBOL +0x57baf885 ceph_str_hash net/ceph/libceph EXPORT_SYMBOL +0xc32184f4 rpc_find_or_alloc_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1c07f8c9 rpcauth_unwrap_resp_decode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1d9d0e2b register_ip_vs_app net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x36a34e58 dm_array_cursor_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x846c3580 mlx5_modify_nic_vport_mac_list drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xedcf81ce drm_dp_channel_eq_ok drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x2807974e fb_sys_write drivers/video/fbdev/core/fb_sys_fops EXPORT_SYMBOL_GPL +0x61cfa029 fuse_dev_alloc_install fs/fuse/fuse EXPORT_SYMBOL_GPL +0x752f14a5 flow_block_cb_setup_simple vmlinux EXPORT_SYMBOL +0xa8796b59 flow_rule_match_enc_ip vmlinux EXPORT_SYMBOL +0x77000558 rtnl_af_register vmlinux EXPORT_SYMBOL_GPL +0x2adfa32b mdio_device_reset vmlinux EXPORT_SYMBOL +0xcb7143f8 pci_num_vf vmlinux EXPORT_SYMBOL_GPL +0x1f953f9b param_ops_long vmlinux EXPORT_SYMBOL +0x0539af23 rpc_add_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf45b75cd ib_send_cm_apr drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x06f81bad drm_format_info_block_height drivers/gpu/drm/drm EXPORT_SYMBOL +0x70ad75fb radix_tree_lookup vmlinux EXPORT_SYMBOL +0xa3523b83 inet_csk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x152349c6 sata_scr_write vmlinux EXPORT_SYMBOL_GPL +0x6f41a428 acpi_get_vendor_resource vmlinux EXPORT_SYMBOL +0xd5346bfc acpi_get_possible_resources vmlinux EXPORT_SYMBOL +0x3e7080cb mpi_read_from_buffer vmlinux EXPORT_SYMBOL_GPL +0x4aea463f crc32_le_shift vmlinux EXPORT_SYMBOL +0xacf4d843 match_strdup vmlinux EXPORT_SYMBOL +0xc3805cd1 fs_ftype_to_dtype vmlinux EXPORT_SYMBOL_GPL +0xb2ff3ad0 ring_buffer_free_read_page vmlinux EXPORT_SYMBOL_GPL +0xac60d69d rt_mutex_unlock vmlinux EXPORT_SYMBOL_GPL +0xf461bca0 nf_reject_ip6_tcphdr_put net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0x30117a6e sparse_keymap_setup drivers/input/sparse-keymap EXPORT_SYMBOL +0x1a411479 drm_syncobj_free drivers/gpu/drm/drm EXPORT_SYMBOL +0x0148a709 neigh_seq_next vmlinux EXPORT_SYMBOL +0xb8493e1c ahci_pmp_retry_srst_ops vmlinux EXPORT_SYMBOL_GPL +0xe7477824 badblocks_init vmlinux EXPORT_SYMBOL_GPL +0x21dabcb0 vfs_rename vmlinux EXPORT_SYMBOL +0x83bef88f generic_ro_fops vmlinux EXPORT_SYMBOL +0x7cd8d75e page_offset_base vmlinux EXPORT_SYMBOL +0x061ceaed ceph_client_addr net/ceph/libceph EXPORT_SYMBOL +0x293a13e6 mlx5_fpga_sbu_conn_create drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xf46f601a mlx5_cmd_alloc_uar drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x95dac902 cxgbi_conn_pdu_ready drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xbc400c8e nexthop_select_path vmlinux EXPORT_SYMBOL_GPL +0x741f579d gnet_stats_copy_basic_hw vmlinux EXPORT_SYMBOL +0xc59c8cdf dev_pm_opp_free_cpufreq_table vmlinux EXPORT_SYMBOL_GPL +0x57061d16 dev_pm_opp_enable vmlinux EXPORT_SYMBOL_GPL +0xbb77c3b7 phy_save_page vmlinux EXPORT_SYMBOL_GPL +0x7f0af8ca iscsi_free_session vmlinux EXPORT_SYMBOL_GPL +0xfc9e1d19 pm_runtime_force_resume vmlinux EXPORT_SYMBOL_GPL +0xa0e21856 devm_pci_remap_cfgspace vmlinux EXPORT_SYMBOL +0xd774add4 phy_calibrate vmlinux EXPORT_SYMBOL_GPL +0x4976e4e6 iomap_page_mkwrite vmlinux EXPORT_SYMBOL_GPL +0x1e7c3543 user_path_create vmlinux EXPORT_SYMBOL +0x64bf9c3a kthread_bind vmlinux EXPORT_SYMBOL +0xdaaf9828 virtio_transport_stream_enqueue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x3cbbb36d vsock_remove_tap net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x72e9241a xprt_disconnect_done net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb67bdde7 mlx5_query_nic_vport_promisc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x88bd9466 drm_property_destroy drivers/gpu/drm/drm EXPORT_SYMBOL +0xfc41d798 ipv6_setsockopt vmlinux EXPORT_SYMBOL +0x3e13a54c dm_kcopyd_zero vmlinux EXPORT_SYMBOL +0x6e02eca1 phy_modify_mmd vmlinux EXPORT_SYMBOL_GPL +0x1f800bba nvme_enable_ctrl vmlinux EXPORT_SYMBOL_GPL +0x8db22efe acpi_setup_gpe_for_wake vmlinux EXPORT_SYMBOL +0xa4191c0b memset_io vmlinux EXPORT_SYMBOL +0x3d6a43e4 blk_queue_flag_test_and_set vmlinux EXPORT_SYMBOL_GPL +0x2c5eddfa forget_all_cached_acls vmlinux EXPORT_SYMBOL +0x9448a48c mlx5_fc_create drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x5c55c760 mlx5_core_create_mkey_cb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x768c631d inet_frag_find vmlinux EXPORT_SYMBOL +0x01b83eed inetdev_by_index vmlinux EXPORT_SYMBOL +0xd71f0f56 __sk_backlog_rcv vmlinux EXPORT_SYMBOL +0x842f046d usb_poison_anchored_urbs vmlinux EXPORT_SYMBOL_GPL +0x313aefb9 con_is_visible vmlinux EXPORT_SYMBOL +0x230c034c acpi_dev_resource_interrupt vmlinux EXPORT_SYMBOL_GPL +0xdf0f75c6 eventfd_signal vmlinux EXPORT_SYMBOL_GPL +0x242b1b50 clocksource_unregister vmlinux EXPORT_SYMBOL +0x87919550 fifo_set_limit vmlinux EXPORT_SYMBOL +0x58b4645c dev_close_many vmlinux EXPORT_SYMBOL +0x3e759ead dev_pm_opp_set_prop_name vmlinux EXPORT_SYMBOL_GPL +0xca8232b0 phy_drivers_unregister vmlinux EXPORT_SYMBOL +0xaaeef18e genphy_c45_check_and_restart_aneg vmlinux EXPORT_SYMBOL_GPL +0xa91ad435 con_debug_enter vmlinux EXPORT_SYMBOL_GPL +0xed55f929 acpi_os_unmap_generic_address vmlinux EXPORT_SYMBOL +0x9d7364a2 pci_write_config_dword vmlinux EXPORT_SYMBOL +0x599fb41c kvmalloc_node vmlinux EXPORT_SYMBOL +0xf12bfe5b filemap_fdatawrite vmlinux EXPORT_SYMBOL +0x9cc4f70a register_pm_notifier vmlinux EXPORT_SYMBOL_GPL +0xe45019eb rpc_clone_client_set_auth net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf4f61be4 drm_i2c_encoder_destroy drivers/gpu/drm/drm EXPORT_SYMBOL +0x4c2054d7 ipmi_request_settime drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x807d2b2c xt_recseq vmlinux EXPORT_SYMBOL_GPL +0x730252c4 dev_fill_metadata_dst vmlinux EXPORT_SYMBOL_GPL +0xea14b1d5 dev_add_offload vmlinux EXPORT_SYMBOL +0xa5976e4f dev_base_lock vmlinux EXPORT_SYMBOL +0x9ab9d9d0 dm_hold vmlinux EXPORT_SYMBOL_GPL +0x4e6b135f mptscsih_event_process vmlinux EXPORT_SYMBOL +0xf5021620 mdiobus_is_registered_device vmlinux EXPORT_SYMBOL +0x75e132d4 iscsi_eh_session_reset vmlinux EXPORT_SYMBOL_GPL +0x5fc748e6 nd_region_provider_data vmlinux EXPORT_SYMBOL_GPL +0xd25d4f74 console_blank_hook vmlinux EXPORT_SYMBOL +0x56a3678b devm_clk_register vmlinux EXPORT_SYMBOL_GPL +0xe85f2123 acpi_tb_unload_table vmlinux EXPORT_SYMBOL +0x812a2645 iov_iter_discard vmlinux EXPORT_SYMBOL +0x7207bcb3 cgroup_get_from_fd vmlinux EXPORT_SYMBOL_GPL +0x736b5662 _raw_read_lock_irqsave vmlinux EXPORT_SYMBOL +0xecb901f5 ceph_cls_lock net/ceph/libceph EXPORT_SYMBOL +0x340fa27c net_failover_destroy drivers/net/net_failover EXPORT_SYMBOL_GPL +0x9b6e1dfc kvm_emulate_instruction_from_buffer arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x4c6dd1e6 xt_request_find_target vmlinux EXPORT_SYMBOL_GPL +0x48ece429 tcf_action_set_ctrlact vmlinux EXPORT_SYMBOL +0x5f7006cf mptscsih_resume vmlinux EXPORT_SYMBOL +0x67661d27 iommu_sva_get_pasid vmlinux EXPORT_SYMBOL_GPL +0xffc1913b sg_scsi_ioctl vmlinux EXPORT_SYMBOL_GPL +0x67ebd463 __alloc_disk_node vmlinux EXPORT_SYMBOL +0xc22a3091 vm_unmap_aliases vmlinux EXPORT_SYMBOL_GPL +0x35f6ec0e __ib_create_cq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x423a6e6e dm_rh_delay drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xd30a434b mlx5_db_alloc_node drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xcc3ddc34 async_pmem_flush drivers/nvdimm/nd_virtio EXPORT_SYMBOL_GPL +0x6e16b52a __tracepoint_pnfs_mds_fallback_read_pagelist fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb6cb932c br_ip6_fragment vmlinux EXPORT_SYMBOL_GPL +0xc18c685f xfrm_state_lookup_byspi vmlinux EXPORT_SYMBOL +0x723c5a5c netlink_capable vmlinux EXPORT_SYMBOL +0x5b28bfa4 gnet_stats_finish_copy vmlinux EXPORT_SYMBOL +0x94de8c64 skb_checksum_trimmed vmlinux EXPORT_SYMBOL +0x6add5c9a dmi_find_device vmlinux EXPORT_SYMBOL +0x3a6d40e0 md_write_inc vmlinux EXPORT_SYMBOL +0xbb993921 register_md_personality vmlinux EXPORT_SYMBOL +0x1bfc3180 mddev_unlock vmlinux EXPORT_SYMBOL_GPL +0x3ab57476 scsi_get_device_flags_keyed vmlinux EXPORT_SYMBOL +0x82b1e91f device_show_int vmlinux EXPORT_SYMBOL_GPL +0x179aa5d1 init_iova_domain vmlinux EXPORT_SYMBOL_GPL +0x59c91e96 uart_handle_cts_change vmlinux EXPORT_SYMBOL_GPL +0xb31f2ff0 uart_handle_dcd_change vmlinux EXPORT_SYMBOL_GPL +0xb7752302 free_inode_nonrcu vmlinux EXPORT_SYMBOL +0xcf851b10 dma_pool_create vmlinux EXPORT_SYMBOL +0x82e6cea1 bpf_map_inc_with_uref vmlinux EXPORT_SYMBOL_GPL +0x9c4894f6 ceph_osdc_start_request net/ceph/libceph EXPORT_SYMBOL +0x0b36102c dm_array_empty drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb572e830 vmci_doorbell_create drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xc3325f6c vcpu_put arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6f1e8047 __ip6_local_out vmlinux EXPORT_SYMBOL_GPL +0x8ad6c6be napi_complete_done vmlinux EXPORT_SYMBOL +0x4ea69707 usb_hcd_pci_probe vmlinux EXPORT_SYMBOL_GPL +0x6114a2bb tty_name vmlinux EXPORT_SYMBOL +0xe038515b get_disk_and_module vmlinux EXPORT_SYMBOL +0xcded245f vfs_parse_fs_param vmlinux EXPORT_SYMBOL +0xb5aa7165 dma_pool_destroy vmlinux EXPORT_SYMBOL +0x45e994e8 irq_create_strict_mappings vmlinux EXPORT_SYMBOL_GPL +0xa9d720c3 mlx4_vf_set_enable_smi_admin drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6cac976e mlx4_vf_get_enable_smi_admin drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x3942ec77 xsk_umem_discard_addr vmlinux EXPORT_SYMBOL +0x3ea376ca udp6_csum_init vmlinux EXPORT_SYMBOL +0x7966f136 dst_cache_set_ip4 vmlinux EXPORT_SYMBOL_GPL +0x2d912bca dmi_get_bios_year vmlinux EXPORT_SYMBOL +0xd0d8bf50 power_supply_register vmlinux EXPORT_SYMBOL_GPL +0x83ac4bb6 serial8250_register_8250_port vmlinux EXPORT_SYMBOL +0xa15d0ac6 virtqueue_add_inbuf vmlinux EXPORT_SYMBOL_GPL +0xc13a7ba6 __tracepoint_kmalloc vmlinux EXPORT_SYMBOL +0x7d2717b8 perf_event_release_kernel vmlinux EXPORT_SYMBOL_GPL +0x8c03d20c destroy_workqueue vmlinux EXPORT_SYMBOL_GPL +0xf2bcdb1a enic_api_devcmd_proxy_by_index drivers/net/ethernet/cisco/enic/enic EXPORT_SYMBOL +0xaa46689c nfs_pgio_header_free fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1496a663 inet_twsk_put vmlinux EXPORT_SYMBOL_GPL +0x5cfdde61 fib_default_rule_add vmlinux EXPORT_SYMBOL +0x4d61ae5f mptscsih_host_reset vmlinux EXPORT_SYMBOL +0x33ec43e8 ata_ehi_push_desc vmlinux EXPORT_SYMBOL_GPL +0x34e159e2 pm_generic_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0x8f15aab9 do_SAK vmlinux EXPORT_SYMBOL +0x128196a0 clk_hw_register_fixed_factor vmlinux EXPORT_SYMBOL_GPL +0x6d9fdf65 pci_load_and_free_saved_state vmlinux EXPORT_SYMBOL_GPL +0x85a8e47a pci_save_state vmlinux EXPORT_SYMBOL +0x258ef0bd fat_get_dotdot_entry vmlinux EXPORT_SYMBOL_GPL +0x96c28568 check_disk_change vmlinux EXPORT_SYMBOL +0xcf07ce7e writeback_inodes_sb vmlinux EXPORT_SYMBOL +0xa2e1b3ef trace_printk_init_buffers vmlinux EXPORT_SYMBOL_GPL +0xdbb48ac2 param_get_uint vmlinux EXPORT_SYMBOL +0x9bc6b539 ceph_find_or_create_string net/ceph/libceph EXPORT_SYMBOL +0x4edfe9b9 drm_mm_scan_color_evict drivers/gpu/drm/drm EXPORT_SYMBOL +0xf633b7df sas_rphy_add vmlinux EXPORT_SYMBOL +0xe51256f1 elv_register vmlinux EXPORT_SYMBOL_GPL +0xa8874217 compat_ptr_ioctl vmlinux EXPORT_SYMBOL +0x3c185c61 page_put_link vmlinux EXPORT_SYMBOL +0x631cca99 release_pages vmlinux EXPORT_SYMBOL +0xc895d208 mce_inject_log vmlinux EXPORT_SYMBOL_GPL +0x2663cb64 vsock_addr_equals_addr net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x337f2432 l2tp_nl_unregister_ops net/l2tp/l2tp_netlink EXPORT_SYMBOL_GPL +0xcd10164c rpc_sleep_on net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x02df3839 rdma_rw_ctx_destroy_signature drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x12013832 mlx5_fpga_mem_write drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x7d94ac3d drm_syncobj_replace_fence drivers/gpu/drm/drm EXPORT_SYMBOL +0x5ec29126 pnfs_generic_commit_pagelist fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x164b82ad fixed_phy_register vmlinux EXPORT_SYMBOL_GPL +0xafddd545 ata_id_c_string vmlinux EXPORT_SYMBOL_GPL +0x5f0e1ad1 nd_region_acquire_lane vmlinux EXPORT_SYMBOL +0xb1bed25d dpm_resume_start vmlinux EXPORT_SYMBOL_GPL +0x7c81e4d1 pci_enable_msi vmlinux EXPORT_SYMBOL +0xd75b20aa rsa_parse_priv_key vmlinux EXPORT_SYMBOL_GPL +0xd6aa3cc9 ip_tunnel_get_iflink net/ipv4/ip_tunnel EXPORT_SYMBOL +0xa50cceaa drm_property_create drivers/gpu/drm/drm EXPORT_SYMBOL +0x484701de nfs4_find_or_create_ds_client fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xbb0b1d56 kvm_write_guest_cached arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb0e10781 get_option vmlinux EXPORT_SYMBOL +0x65022064 km_state_expired vmlinux EXPORT_SYMBOL +0x0ff53009 dev_pm_opp_get_suspend_opp_freq vmlinux EXPORT_SYMBOL_GPL +0xe477e28d platform_msi_domain_alloc_irqs vmlinux EXPORT_SYMBOL_GPL +0xfdccf856 component_bind_all vmlinux EXPORT_SYMBOL_GPL +0xff05fa13 vring_interrupt vmlinux EXPORT_SYMBOL_GPL +0x19a846f1 clk_hw_rate_is_protected vmlinux EXPORT_SYMBOL_GPL +0x623424c9 blk_mq_freeze_queue vmlinux EXPORT_SYMBOL_GPL +0x14a98a21 cpu_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0xee120c03 ceph_release_string net/ceph/libceph EXPORT_SYMBOL +0x2fd11e33 ceph_con_send net/ceph/libceph EXPORT_SYMBOL +0x1c1258e0 nf_ct_gre_keymap_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x06052f8d __memmove vmlinux EXPORT_SYMBOL +0x815f2897 empty_zero_page vmlinux EXPORT_SYMBOL +0xed9ddf39 strp_stop vmlinux EXPORT_SYMBOL_GPL +0x60352082 register_inet6addr_notifier vmlinux EXPORT_SYMBOL +0xa05db564 inet_accept vmlinux EXPORT_SYMBOL +0x09222b87 tcp_select_initial_window vmlinux EXPORT_SYMBOL +0x6adbed78 register_pernet_device vmlinux EXPORT_SYMBOL_GPL +0x87b13819 class_for_each_device vmlinux EXPORT_SYMBOL_GPL +0xcd8ce890 acpi_format_exception vmlinux EXPORT_SYMBOL +0x2bb889d5 pci_free_irq_vectors vmlinux EXPORT_SYMBOL +0x558b281d aes_expandkey vmlinux EXPORT_SYMBOL +0x2d30596c from_kqid_munged vmlinux EXPORT_SYMBOL +0x152955bb padata_do_parallel vmlinux EXPORT_SYMBOL +0xe10ed594 bpf_offload_dev_create vmlinux EXPORT_SYMBOL_GPL +0x53b954a2 up_read vmlinux EXPORT_SYMBOL +0xa1c76e0a _cond_resched vmlinux EXPORT_SYMBOL +0x63758856 ceph_str_hash_name net/ceph/libceph EXPORT_SYMBOL +0x3fe0b663 esp6_output_head net/ipv6/esp6 EXPORT_SYMBOL_GPL +0x153e92ec nfnetlink_subsys_register net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0xe26d1da7 ib_create_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x34f9621d __closure_sync drivers/md/bcache/bcache EXPORT_SYMBOL +0xe614b3a1 i2c_match_id drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x0bfc1956 mlxsw_core_res_valid drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x5c12addd drm_dp_mst_topology_mgr_destroy drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf0dd9a0e get_nfs_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6ec906c2 fscache_object_init fs/fscache/fscache EXPORT_SYMBOL +0xe57ab1f5 tso_count_descs vmlinux EXPORT_SYMBOL +0x95fe709f ndo_dflt_bridge_getlink vmlinux EXPORT_SYMBOL_GPL +0x3978eb38 __pskb_pull_tail vmlinux EXPORT_SYMBOL +0xec774acb cpufreq_frequency_table_verify vmlinux EXPORT_SYMBOL_GPL +0xcdc249ef devm_hwrng_unregister vmlinux EXPORT_SYMBOL_GPL +0x8c391d72 tty_wait_until_sent vmlinux EXPORT_SYMBOL +0x9cb240dd bio_disassociate_blkg vmlinux EXPORT_SYMBOL_GPL +0x175e33fb dma_spin_lock vmlinux EXPORT_SYMBOL +0x56256e8a orderly_poweroff vmlinux EXPORT_SYMBOL_GPL +0x9f512356 nf_conncount_destroy net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x688d422d dm_bm_block_size drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb07a1a95 mlx5_fc_destroy drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0a726931 ocfs2_cluster_this_node fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x501e7f24 br_fdb_clear_offload vmlinux EXPORT_SYMBOL_GPL +0x08990794 skb_consume_udp vmlinux EXPORT_SYMBOL_GPL +0x084b4e44 tcp_leave_memory_pressure vmlinux EXPORT_SYMBOL_GPL +0x0ad0dc4f dm_bufio_mark_buffer_dirty vmlinux EXPORT_SYMBOL_GPL +0x843a5d64 device_remove_groups vmlinux EXPORT_SYMBOL_GPL +0x51228a56 remap_vmalloc_range_partial vmlinux EXPORT_SYMBOL +0xda15a15d alarm_forward vmlinux EXPORT_SYMBOL_GPL +0xc3edf6aa save_fsgs_for_kvm vmlinux EXPORT_SYMBOL_GPL +0x849b3711 nf_log_dump_packet_common net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0xc5b1e3c4 net_failover_create drivers/net/net_failover EXPORT_SYMBOL_GPL +0x71025213 __netif_schedule vmlinux EXPORT_SYMBOL +0x24e9e130 cpuidle_get_driver vmlinux EXPORT_SYMBOL_GPL +0x42cde70e iscsi_target_alloc vmlinux EXPORT_SYMBOL_GPL +0xc5d9c46c agp_try_unsupported_boot vmlinux EXPORT_SYMBOL +0xdcafd388 clk_fractional_divider_ops vmlinux EXPORT_SYMBOL_GPL +0xb53192ef pci_pr3_present vmlinux EXPORT_SYMBOL_GPL +0xc5534d64 ioread16 vmlinux EXPORT_SYMBOL +0x8d0ef6d6 generic_fh_to_parent vmlinux EXPORT_SYMBOL_GPL +0xfde9ee41 generic_fillattr vmlinux EXPORT_SYMBOL +0xc972449f mempool_alloc_slab vmlinux EXPORT_SYMBOL +0x6f4a9e5f param_set_invbool vmlinux EXPORT_SYMBOL +0x0c0f79af ZSTD_getDictID_fromFrame lib/zstd/zstd_decompress EXPORT_SYMBOL +0x50ccafdd rdma_bind_addr drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x4a04a098 i2c_register_driver drivers/i2c/i2c-core EXPORT_SYMBOL +0x4c762b5c x86_stepping vmlinux EXPORT_SYMBOL_GPL +0xf825a584 __inet_hash vmlinux EXPORT_SYMBOL +0xa75daf6b cpuidle_unregister_driver vmlinux EXPORT_SYMBOL_GPL +0xeaaea988 fixed_phy_unregister vmlinux EXPORT_SYMBOL_GPL +0xdc33f12e pci_bus_alloc_resource vmlinux EXPORT_SYMBOL +0xc8bac7fc of_phy_provider_unregister vmlinux EXPORT_SYMBOL_GPL +0x92d5838e request_threaded_irq vmlinux EXPORT_SYMBOL +0x8c3253ec _raw_spin_trylock vmlinux EXPORT_SYMBOL +0xd6ad734d has_capability vmlinux EXPORT_SYMBOL +0xb4f13d2a abort vmlinux EXPORT_SYMBOL +0x21055ead svc_drop net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x17377bbb ib_free_recv_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1347cfb2 rdma_copy_src_l2_addr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xff204214 tcp_sendpage_locked vmlinux EXPORT_SYMBOL_GPL +0x59f86eab devlink_sb_unregister vmlinux EXPORT_SYMBOL_GPL +0x93edef07 devlink_health_report vmlinux EXPORT_SYMBOL_GPL +0x262b5824 mbox_flush vmlinux EXPORT_SYMBOL_GPL +0x39411aa8 ata_bmdma32_port_ops vmlinux EXPORT_SYMBOL_GPL +0xc249b4cd sata_scr_valid vmlinux EXPORT_SYMBOL_GPL +0xa9c8b8d3 pm_clk_add_notifier vmlinux EXPORT_SYMBOL_GPL +0xdca5c6cb pci_find_pcie_root_port vmlinux EXPORT_SYMBOL +0xc575c737 debug_locks_off vmlinux EXPORT_SYMBOL_GPL +0x885af770 jbd2_journal_forget vmlinux EXPORT_SYMBOL +0xb5136dc7 mutex_lock_interruptible vmlinux EXPORT_SYMBOL +0x7923c748 edac_mc_alloc drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x0745a981 xa_erase vmlinux EXPORT_SYMBOL +0x86585a33 devlink_fmsg_obj_nest_start vmlinux EXPORT_SYMBOL_GPL +0xc8404511 skb_clone vmlinux EXPORT_SYMBOL +0x957f8f2a devm_mbox_controller_unregister vmlinux EXPORT_SYMBOL_GPL +0xceed8318 ibft_addr vmlinux EXPORT_SYMBOL_GPL +0xc56f1b10 md_bitmap_cond_end_sync vmlinux EXPORT_SYMBOL +0xbe68b8bf input_get_keycode vmlinux EXPORT_SYMBOL +0x52ac96fd wakeup_source_destroy vmlinux EXPORT_SYMBOL_GPL +0x927901d9 virtqueue_enable_cb_prepare vmlinux EXPORT_SYMBOL_GPL +0xfdcc8a0e fb_find_best_display vmlinux EXPORT_SYMBOL +0x22cae12a __pci_hp_initialize vmlinux EXPORT_SYMBOL_GPL +0xb30426cf set_task_ioprio vmlinux EXPORT_SYMBOL_GPL +0xdeb859b0 crypto_find_alg vmlinux EXPORT_SYMBOL_GPL +0x3200d056 setup_arg_pages vmlinux EXPORT_SYMBOL +0x86181f72 pagevec_lookup_range vmlinux EXPORT_SYMBOL +0xe48f56ae bcm_phy_write_exp drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x648d953b drm_dsc_dp_pps_header_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x339c33c5 camellia_cbc_dec_16way arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0xa7e38f12 flow_keys_basic_dissector vmlinux EXPORT_SYMBOL +0x89ff787c get_dev_data vmlinux EXPORT_SYMBOL +0xef29fcdd clk_bulk_put vmlinux EXPORT_SYMBOL_GPL +0x0ca96d62 inode_dio_wait vmlinux EXPORT_SYMBOL +0x6b49dcf1 irq_chip_request_resources_parent vmlinux EXPORT_SYMBOL_GPL +0x8029e212 ib_umem_odp_release drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x756edb4f drm_encoder_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x9e472f5f snmp_fold_field vmlinux EXPORT_SYMBOL_GPL +0x70c6d827 mc146818_set_time vmlinux EXPORT_SYMBOL_GPL +0x0828d609 mc146818_get_time vmlinux EXPORT_SYMBOL_GPL +0x95f12050 phy_ethtool_sset vmlinux EXPORT_SYMBOL +0x35a64318 iscsi_session_event vmlinux EXPORT_SYMBOL_GPL +0xa38f21b9 amd_iommu_update_ga vmlinux EXPORT_SYMBOL +0x2f0ef8bf pci_disable_link_state vmlinux EXPORT_SYMBOL +0x08375118 crypto_unregister_akcipher vmlinux EXPORT_SYMBOL_GPL +0x9f2a150d crypto_unregister_skcipher vmlinux EXPORT_SYMBOL_GPL +0x7b79b030 jbd2_journal_start vmlinux EXPORT_SYMBOL +0x18c35f5c dquot_quota_on vmlinux EXPORT_SYMBOL +0xd67364f7 eventfd_ctx_fdget vmlinux EXPORT_SYMBOL_GPL +0xbffde8ec compat_alloc_user_space vmlinux EXPORT_SYMBOL_GPL +0xf17913e1 vsock_stream_has_data net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xb5079ee5 l2tp_udp_encap_recv net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x30ce6cfc __i2c_transfer drivers/i2c/i2c-core EXPORT_SYMBOL +0x3dc669d7 garp_unregister_application vmlinux EXPORT_SYMBOL_GPL +0x27761b10 cpufreq_policy_transition_delay_us vmlinux EXPORT_SYMBOL_GPL +0x2f682030 thermal_zone_device_unregister vmlinux EXPORT_SYMBOL_GPL +0xef60cfaf ata_host_alloc_pinfo vmlinux EXPORT_SYMBOL_GPL +0xf4f8bb3f amd_iommu_flush_page vmlinux EXPORT_SYMBOL +0xac8263c7 acpi_cppc_processor_probe vmlinux EXPORT_SYMBOL_GPL +0x4f08341a devm_ioremap_wc vmlinux EXPORT_SYMBOL +0x89974eb6 devm_ioremap_uc vmlinux EXPORT_SYMBOL_GPL +0x17320914 vfs_lock_file vmlinux EXPORT_SYMBOL_GPL +0x3ba0ee85 make_bad_inode vmlinux EXPORT_SYMBOL +0x3435341e __d_drop vmlinux EXPORT_SYMBOL +0xeccc7cd3 __mmdrop vmlinux EXPORT_SYMBOL_GPL +0x23baa40d gss_mech_unregister net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0xba9966d3 unix_detach_fds vmlinux EXPORT_SYMBOL +0x59b4ac3e tcp_memory_allocated vmlinux EXPORT_SYMBOL +0xfdf4687c hid_check_keys_pressed vmlinux EXPORT_SYMBOL_GPL +0x47aad3b9 have_governor_per_policy vmlinux EXPORT_SYMBOL_GPL +0x7082596a scsi_bus_type vmlinux EXPORT_SYMBOL_GPL +0x088b8860 ata_sff_qc_fill_rtf vmlinux EXPORT_SYMBOL_GPL +0xfcc3ed08 fwnode_graph_parse_endpoint vmlinux EXPORT_SYMBOL +0x8d94925e jbd2_journal_force_commit_nested vmlinux EXPORT_SYMBOL +0x5d8498d1 seq_pad vmlinux EXPORT_SYMBOL +0x77f774f9 seq_release_private vmlinux EXPORT_SYMBOL +0xf7974709 ww_mutex_lock vmlinux EXPORT_SYMBOL +0x6eb40847 target_sess_cmd_list_set_waiting drivers/target/target_core_mod EXPORT_SYMBOL +0x9213f5c3 drm_edid_to_speaker_allocation drivers/gpu/drm/drm EXPORT_SYMBOL +0x46f42be0 devlink_fmsg_u8_put vmlinux EXPORT_SYMBOL_GPL +0xa51bad78 ata_pci_remove_one vmlinux EXPORT_SYMBOL_GPL +0x75cbfb09 add_interrupt_randomness vmlinux EXPORT_SYMBOL_GPL +0x9ba10b4a uart_console_write vmlinux EXPORT_SYMBOL_GPL +0x377d8004 acpi_error vmlinux EXPORT_SYMBOL +0x1bee4974 sg_alloc_table_chained vmlinux EXPORT_SYMBOL_GPL +0x02761bf4 writeback_inodes_sb_nr vmlinux EXPORT_SYMBOL +0x750efa36 vfs_removexattr vmlinux EXPORT_SYMBOL_GPL +0x10488eab lock_page_memcg vmlinux EXPORT_SYMBOL +0x6dd92471 find_symbol vmlinux EXPORT_SYMBOL_GPL +0x977f511b __mutex_init vmlinux EXPORT_SYMBOL +0x2ce415a1 usb_stor_host_template_init drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x3723baa1 mlx5_core_create_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6d3dd759 mlx5_core_create_qp drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc57b51a4 mlx5_core_create_cq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x3517a3b4 mlx5_debugfs_root drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4644d67a drm_dev_printk drivers/gpu/drm/drm EXPORT_SYMBOL +0xa9694ccc drm_connector_attach_content_type_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xad1e180b drm_i2c_encoder_mode_set drivers/gpu/drm/drm EXPORT_SYMBOL +0xf7801360 nfs41_maxgetdevinfo_overhead fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xcc2d1f92 rtc_set_time vmlinux EXPORT_SYMBOL_GPL +0x67e74462 usb_phy_roothub_alloc vmlinux EXPORT_SYMBOL_GPL +0xf63a3acd mipi_dsi_dcs_set_display_off vmlinux EXPORT_SYMBOL +0x5acad299 pcix_get_mmrbc vmlinux EXPORT_SYMBOL +0x3af80e20 jbd2_journal_check_used_features vmlinux EXPORT_SYMBOL +0xcd91b127 system_highpri_wq vmlinux EXPORT_SYMBOL_GPL +0x7e431c15 cordic_calc_iq lib/math/cordic EXPORT_SYMBOL +0x34bf06fa auth_domain_lookup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc92061dc nfs4_schedule_session_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xae316c11 icmpv6_err_convert vmlinux EXPORT_SYMBOL +0x794d1109 xfrm_audit_state_delete vmlinux EXPORT_SYMBOL_GPL +0x3fd9bac5 mrp_request_join vmlinux EXPORT_SYMBOL_GPL +0x7d404963 scsi_register_device_handler vmlinux EXPORT_SYMBOL_GPL +0x0b782e06 iommu_fwspec_add_ids vmlinux EXPORT_SYMBOL_GPL +0x9722bf55 dw8250_setup_port vmlinux EXPORT_SYMBOL_GPL +0xb2e618e3 divider_recalc_rate vmlinux EXPORT_SYMBOL_GPL +0x32c23fb2 fb_get_mode vmlinux EXPORT_SYMBOL +0xdf6ef4a1 ceph_oid_printf net/ceph/libceph EXPORT_SYMBOL +0xb56075e6 inet_diag_find_one_icsk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0xfd45fd9a drm_debugfs_remove_files drivers/gpu/drm/drm EXPORT_SYMBOL +0x6d215948 fib6_get_table vmlinux EXPORT_SYMBOL_GPL +0xf61d81b6 gov_attr_set_init vmlinux EXPORT_SYMBOL_GPL +0x7b2ebc86 ufshcd_dealloc_host vmlinux EXPORT_SYMBOL_GPL +0xd5b7c703 crypto_shash_setkey vmlinux EXPORT_SYMBOL_GPL +0x52869577 crypto_ahash_setkey vmlinux EXPORT_SYMBOL_GPL +0x641085a2 search_binary_handler vmlinux EXPORT_SYMBOL +0xc7469334 balloon_page_alloc vmlinux EXPORT_SYMBOL_GPL +0xa5d99956 tracing_cond_snapshot_data vmlinux EXPORT_SYMBOL_GPL +0x5d5583b5 rt_mutex_trylock vmlinux EXPORT_SYMBOL_GPL +0x1f0cb5bf pm_power_off_prepare vmlinux EXPORT_SYMBOL_GPL +0x987ab0a5 amd_get_nb_id vmlinux EXPORT_SYMBOL_GPL +0x54403a78 osd_req_op_extent_update net/ceph/libceph EXPORT_SYMBOL +0x0cf25921 nfs_fs_type fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc28ec25c nfs_server_insert_lists fs/nfs/nfs EXPORT_SYMBOL_GPL +0x0286a59a kvm_lapic_find_highest_irr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6d334118 __get_user_8 vmlinux EXPORT_SYMBOL +0xb8e7ce2c __put_user_8 vmlinux EXPORT_SYMBOL +0x582b6275 xfrm_if_unregister_cb vmlinux EXPORT_SYMBOL +0x370f674a rtnl_link_get_net vmlinux EXPORT_SYMBOL +0x4a400323 cpufreq_freq_attr_scaling_available_freqs vmlinux EXPORT_SYMBOL_GPL +0xe31c79fe usb_remove_hcd vmlinux EXPORT_SYMBOL_GPL +0x3f9d66c4 device_store_bool vmlinux EXPORT_SYMBOL_GPL +0x1d1abdf0 acpi_get_physical_device_location vmlinux EXPORT_SYMBOL +0xdc5b406f ahash_attr_alg vmlinux EXPORT_SYMBOL_GPL +0xcfb5871c irq_work_queue vmlinux EXPORT_SYMBOL_GPL +0x166db1b5 sched_clock_idle_wakeup_event vmlinux EXPORT_SYMBOL_GPL +0x13e3aa9a smca_banks vmlinux EXPORT_SYMBOL_GPL +0x44aaf30f tsc_khz vmlinux EXPORT_SYMBOL +0x3836c3bb ib_destroy_srq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7fca83ba __bch_bset_search drivers/md/bcache/bcache EXPORT_SYMBOL +0x35d22e23 mlx4_CLOSE_PORT drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x81992424 drm_edid_header_is_valid drivers/gpu/drm/drm EXPORT_SYMBOL +0x3dfa5ebd drm_gtf_mode_complex drivers/gpu/drm/drm EXPORT_SYMBOL +0x9ab5fbe8 tcp_disconnect vmlinux EXPORT_SYMBOL +0x9819c395 __skb_ext_del vmlinux EXPORT_SYMBOL +0x92424e30 power_supply_put_battery_info vmlinux EXPORT_SYMBOL_GPL +0x228d18ea power_supply_get_battery_info vmlinux EXPORT_SYMBOL_GPL +0xf0c85059 put_device vmlinux EXPORT_SYMBOL_GPL +0xdc3b6bd4 mark_info_dirty vmlinux EXPORT_SYMBOL +0xb907513f unpoison_memory vmlinux EXPORT_SYMBOL +0x6fcb87a1 touch_softlockup_watchdog vmlinux EXPORT_SYMBOL +0x6d2e4555 pv_ops vmlinux EXPORT_SYMBOL +0x19b2f5ed nft_meta_set_init net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xccdc5021 nft_meta_get_init net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xa7eadcb5 btracker_complete drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x681a4b25 ppp_input drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x648eb59d gc_inflight_list vmlinux EXPORT_SYMBOL +0x80f494ca nf_unregister_net_hooks vmlinux EXPORT_SYMBOL +0xf064a0f5 hid_hw_open vmlinux EXPORT_SYMBOL_GPL +0xf48ba137 devm_power_supply_register_no_ws vmlinux EXPORT_SYMBOL_GPL +0x11b77c02 devm_input_allocate_device vmlinux EXPORT_SYMBOL +0x49e3253b usb_phy_roothub_resume vmlinux EXPORT_SYMBOL_GPL +0xc1116133 ata_port_wait_eh vmlinux EXPORT_SYMBOL_GPL +0x3891ffc8 ecryptfs_fill_auth_tok vmlinux EXPORT_SYMBOL +0xccbeeaf4 register_filesystem vmlinux EXPORT_SYMBOL +0x75d2285b __online_page_free vmlinux EXPORT_SYMBOL_GPL +0x93170790 blk_fill_rwbs vmlinux EXPORT_SYMBOL_GPL +0xba497f13 loops_per_jiffy vmlinux EXPORT_SYMBOL +0x2b275fdc drm_gem_object_put_unlocked drivers/gpu/drm/drm EXPORT_SYMBOL +0x8f3ed0cc inet_confirm_addr vmlinux EXPORT_SYMBOL +0x9afc6061 eth_validate_addr vmlinux EXPORT_SYMBOL +0xafbd2203 nvmem_dev_name vmlinux EXPORT_SYMBOL_GPL +0xd6e29999 hid_hw_close vmlinux EXPORT_SYMBOL_GPL +0x2c208607 power_supply_is_system_supplied vmlinux EXPORT_SYMBOL_GPL +0xe628bb9f phy_fibre_port_array vmlinux EXPORT_SYMBOL_GPL +0xa2e02c72 fc_disc_config vmlinux EXPORT_SYMBOL +0xfd334f78 sas_remove_children vmlinux EXPORT_SYMBOL +0x673f815e agp_bridges vmlinux EXPORT_SYMBOL +0xb69c3d88 clk_hw_unregister_divider vmlinux EXPORT_SYMBOL_GPL +0xaebd12f0 acpi_get_name vmlinux EXPORT_SYMBOL +0xf7afb369 btree_init vmlinux EXPORT_SYMBOL_GPL +0x02b8ab42 sg_copy_to_buffer vmlinux EXPORT_SYMBOL +0x7d0db45c jiffies_to_clock_t vmlinux EXPORT_SYMBOL +0x7ab88a45 system_freezing_cnt vmlinux EXPORT_SYMBOL +0x51819d72 sched_setscheduler_nocheck vmlinux EXPORT_SYMBOL_GPL +0xf1417d5c l2tp_tunnel_get_nth net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x74349d34 target_put_nacl drivers/target/target_core_mod EXPORT_SYMBOL +0xcbe51a34 core_tmr_alloc_req drivers/target/target_core_mod EXPORT_SYMBOL +0x6ac0608d mlx5_rl_add_rate drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x076355fd drm_mode_crtc_set_gamma_size drivers/gpu/drm/drm EXPORT_SYMBOL +0x409b44ec xt_hook_ops_alloc vmlinux EXPORT_SYMBOL_GPL +0xa74c9877 refcount_dec_and_rtnl_lock vmlinux EXPORT_SYMBOL +0xbd90cb65 sock_no_sendmsg_locked vmlinux EXPORT_SYMBOL +0xa9c9e551 usb_debug_root vmlinux EXPORT_SYMBOL_GPL +0x3791fa9d devm_mdiobus_free vmlinux EXPORT_SYMBOL_GPL +0x4189be2d tty_kopen vmlinux EXPORT_SYMBOL_GPL +0xb520eb79 btree_merge vmlinux EXPORT_SYMBOL_GPL +0x8748c935 generic_make_request vmlinux EXPORT_SYMBOL +0xacab29b7 seq_hlist_start_percpu vmlinux EXPORT_SYMBOL +0x1a9392d7 generic_file_llseek_size vmlinux EXPORT_SYMBOL +0x1c87a811 __round_jiffies_up vmlinux EXPORT_SYMBOL_GPL +0x50b03f5d l1tf_vmx_mitigation vmlinux EXPORT_SYMBOL_GPL +0x52a7c5a4 iscsit_build_text_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x67967b14 nexthop_for_each_fib6_nh vmlinux EXPORT_SYMBOL_GPL +0x02124474 ip_send_check vmlinux EXPORT_SYMBOL +0xa3a9bcb4 garp_uninit_applicant vmlinux EXPORT_SYMBOL_GPL +0xcfc5108a devlink_fmsg_u8_pair_put vmlinux EXPORT_SYMBOL_GPL +0x5d3a1fcb dev_attr_sw_activity vmlinux EXPORT_SYMBOL_GPL +0x1c9ee793 __vfs_setxattr_locked vmlinux EXPORT_SYMBOL_GPL +0x23b9d6e2 mangle_path vmlinux EXPORT_SYMBOL +0xbd2f79ae ceph_oloc_copy net/ceph/libceph EXPORT_SYMBOL +0x87e2553b ieee802154_max_payload net/ieee802154/ieee802154 EXPORT_SYMBOL_GPL +0xa097a060 nf_flow_dnat_port net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x5eaad29a nf_flow_snat_port net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x8e7528da __rdma_block_iter_next drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5e1ad977 dm_block_manager_create drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe0c65d20 br_port_flag_is_set vmlinux EXPORT_SYMBOL_GPL +0x5e1d1dcd mbox_controller_register vmlinux EXPORT_SYMBOL_GPL +0x69926bf9 __genphy_config_aneg vmlinux EXPORT_SYMBOL +0xc5604800 clk_set_rate_exclusive vmlinux EXPORT_SYMBOL_GPL +0xf76cc3ae pci_restore_pasid_state vmlinux EXPORT_SYMBOL_GPL +0x8dd3b8ed pci_get_domain_bus_and_slot vmlinux EXPORT_SYMBOL +0xc8b2d10f pinctrl_find_gpio_range_from_pin_nolock vmlinux EXPORT_SYMBOL_GPL +0x990070ec __tracepoint_write_msr vmlinux EXPORT_SYMBOL +0x5b2dc01e __blkdev_reread_part vmlinux EXPORT_SYMBOL +0x53743f64 anon_inode_getfd vmlinux EXPORT_SYMBOL_GPL +0xec546c3f bd_abort_claiming vmlinux EXPORT_SYMBOL +0x9b2ef501 filemap_flush vmlinux EXPORT_SYMBOL +0xc56cdf0e ceph_cls_unlock net/ceph/libceph EXPORT_SYMBOL +0x6405dcd3 slhc_toss drivers/net/slip/slhc EXPORT_SYMBOL +0x4766cf33 drm_gem_vm_close drivers/gpu/drm/drm EXPORT_SYMBOL +0xfc6dd12e fuse_conn_init fs/fuse/fuse EXPORT_SYMBOL_GPL +0x83491391 __tracepoint_kvm_nested_vmexit_inject arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb0bc5153 tcf_idr_create vmlinux EXPORT_SYMBOL +0xa627d50a mbox_controller_unregister vmlinux EXPORT_SYMBOL_GPL +0xc03939dd mpt_alloc_fw_memory vmlinux EXPORT_SYMBOL +0x6a137e57 pm_generic_freeze_late vmlinux EXPORT_SYMBOL_GPL +0x5c60c64d serial8250_rpm_put_tx vmlinux EXPORT_SYMBOL_GPL +0xa2326c49 acpi_remove_table_handler vmlinux EXPORT_SYMBOL +0x2c5a74d1 blk_stack_limits vmlinux EXPORT_SYMBOL +0x318e3744 __online_page_increment_counters vmlinux EXPORT_SYMBOL_GPL +0x99470a38 probe_user_write vmlinux EXPORT_SYMBOL_GPL +0x53906e89 rpc_mkpipe_data net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x158a8e7f unix_domain_find net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb5061318 i2c_smbus_read_i2c_block_data drivers/i2c/i2c-core EXPORT_SYMBOL +0xc611cdf0 drm_kms_helper_poll_disable drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x7a1211f8 dlm_setup_eviction_cb fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x68d9edb8 kvm_fast_pio arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xc6339d21 __fib_lookup vmlinux EXPORT_SYMBOL_GPL +0xff6b2fa9 tcp_set_state vmlinux EXPORT_SYMBOL_GPL +0xd91f6ab6 strnlen_user vmlinux EXPORT_SYMBOL +0xce3ab2fe __fscrypt_prepare_symlink vmlinux EXPORT_SYMBOL_GPL +0x997c4347 unmap_kernel_range vmlinux EXPORT_SYMBOL_GPL +0xb400b0e4 mlx5_db_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xbff2c891 mlx4_db_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x4bb727d1 drm_property_create_blob drivers/gpu/drm/drm EXPORT_SYMBOL +0xf2329e89 drm_dp_downstream_max_clock drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x068c0a0f br_handle_frame_finish vmlinux EXPORT_SYMBOL_GPL +0x881df2ef xt_free_table_info vmlinux EXPORT_SYMBOL +0x44e65874 input_set_poll_interval vmlinux EXPORT_SYMBOL +0x71418305 mpt_print_ioc_summary vmlinux EXPORT_SYMBOL +0x62366824 ufshcd_runtime_resume vmlinux EXPORT_SYMBOL +0xea50dad3 ahci_ignore_sss vmlinux EXPORT_SYMBOL_GPL +0xc604d03a ata_bmdma_setup vmlinux EXPORT_SYMBOL_GPL +0x0a6c4041 cn_netlink_send vmlinux EXPORT_SYMBOL_GPL +0xf82ad010 serial8250_modem_status vmlinux EXPORT_SYMBOL_GPL +0xfa8db591 async_raid6_datap_recov vmlinux EXPORT_SYMBOL_GPL +0x8a122113 set_create_files_as vmlinux EXPORT_SYMBOL +0x76a65e3b mlxsw_core_port_init drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x804f922a ipmi_addr_length drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x83b9c888 dev_get_by_name vmlinux EXPORT_SYMBOL +0xd96e88b1 power_supply_set_battery_charged vmlinux EXPORT_SYMBOL_GPL +0xc81844c4 ohci_hub_control vmlinux EXPORT_SYMBOL_GPL +0x01c89807 serial8250_init_port vmlinux EXPORT_SYMBOL_GPL +0xa6532b08 pci_enable_msix_range vmlinux EXPORT_SYMBOL +0xf80be44e rdmsr_safe_on_cpu vmlinux EXPORT_SYMBOL +0xe6f59c38 iov_iter_get_pages vmlinux EXPORT_SYMBOL +0x08823411 __page_symlink vmlinux EXPORT_SYMBOL +0xca98f497 install_exec_creds vmlinux EXPORT_SYMBOL +0xa38a9f71 get_itimerspec64 vmlinux EXPORT_SYMBOL_GPL +0xe7b5d3b3 sched_setscheduler vmlinux EXPORT_SYMBOL_GPL +0xc2433bc4 ib_unregister_event_handler drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3ba17eab drm_ht_insert_item drivers/gpu/drm/drm EXPORT_SYMBOL +0x5bb4e537 ufshcd_shutdown vmlinux EXPORT_SYMBOL +0x93a4c2a1 iscsi_segment_seek_sg vmlinux EXPORT_SYMBOL_GPL +0x30a80826 __kfifo_from_user vmlinux EXPORT_SYMBOL +0x52096971 cdev_set_parent vmlinux EXPORT_SYMBOL +0x6923ce63 irq_work_sync vmlinux EXPORT_SYMBOL_GPL +0xf1af2adf unregister_kretprobes vmlinux EXPORT_SYMBOL_GPL +0x8a35b432 sme_me_mask vmlinux EXPORT_SYMBOL +0x9e8938d2 ipcomp_input net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0xeeb30841 nft_reject_validate net/netfilter/nft_reject EXPORT_SYMBOL_GPL +0x92535821 nat_h245_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x5ec83d9a i2c_adapter_type drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x98958235 mlx5_core_reserved_gids_count drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xdd592615 drm_vma_offset_remove drivers/gpu/drm/drm EXPORT_SYMBOL +0x4b1c30f9 udp_prot vmlinux EXPORT_SYMBOL +0x1b92e41d inet_putpeer vmlinux EXPORT_SYMBOL_GPL +0x8d8c24dc __hw_addr_ref_unsync_dev vmlinux EXPORT_SYMBOL +0x1c36da63 netif_set_xps_queue vmlinux EXPORT_SYMBOL +0xb9794b38 sas_prep_resume_ha vmlinux EXPORT_SYMBOL +0x7034ff5b nvdimm_in_overwrite vmlinux EXPORT_SYMBOL_GPL +0x1d48994f pci_msi_vec_count vmlinux EXPORT_SYMBOL +0x56b4e672 virtio_transport_connect net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xff61571a cxgb4_remove_server_filter drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xbd4e7538 kvm_vcpu_yield_to arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x754f4b83 inet6_sk_rebuild_header vmlinux EXPORT_SYMBOL_GPL +0xb8c9a596 __dev_set_mtu vmlinux EXPORT_SYMBOL +0x0f9d66cd rc_keydown_notimeout vmlinux EXPORT_SYMBOL_GPL +0xfb030814 device_init_wakeup vmlinux EXPORT_SYMBOL_GPL +0x263d9da8 clkdev_hw_alloc vmlinux EXPORT_SYMBOL +0x47d0eea2 acpi_lpat_temp_to_raw vmlinux EXPORT_SYMBOL_GPL +0x7265a468 unregister_quota_format vmlinux EXPORT_SYMBOL +0xcc9268fc hwpoison_filter_enable vmlinux EXPORT_SYMBOL_GPL +0xf1e98c74 avenrun vmlinux EXPORT_SYMBOL +0x2d3385d3 system_wq vmlinux EXPORT_SYMBOL +0xe411ed30 nf_ct_extend_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x090ce8ad cxgb4_reclaim_completed_tx drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x8e58f4e5 free_mdio_bitbang drivers/net/phy/mdio-bitbang EXPORT_SYMBOL +0x7ded7c25 drm_gem_dmabuf_mmap drivers/gpu/drm/drm EXPORT_SYMBOL +0x189eca6e dev_pm_opp_set_sharing_cpus vmlinux EXPORT_SYMBOL_GPL +0x37d7daed mark_page_accessed vmlinux EXPORT_SYMBOL +0x1edb69d6 ktime_get_raw_ts64 vmlinux EXPORT_SYMBOL +0x7a4c5432 l2tp_session_get_nth net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x24c5c0c1 nf_nat_sip_hooks net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x6c08ea6a transport_kmap_data_sg drivers/target/target_core_mod EXPORT_SYMBOL +0xfd93482e dm_rh_recovery_in_flight drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xa47f448d mlx4_gen_port_state_change_eqe drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xf5531bea ipmi_poll_interface drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x200b2041 in6addr_any vmlinux EXPORT_SYMBOL +0x1587f3fa inet_protos vmlinux EXPORT_SYMBOL +0x02a18c74 nf_conntrack_destroy vmlinux EXPORT_SYMBOL +0x0546d5ec fc_vport_id_lookup vmlinux EXPORT_SYMBOL +0x9330f205 scsi_report_bus_reset vmlinux EXPORT_SYMBOL +0xfc551332 nd_dax_probe vmlinux EXPORT_SYMBOL +0x86404fdf tty_port_install vmlinux EXPORT_SYMBOL_GPL +0xc7390f8b phy_set_mode_ext vmlinux EXPORT_SYMBOL_GPL +0x90de0452 platform_thermal_package_notify vmlinux EXPORT_SYMBOL_GPL +0x47e80ab8 ceph_compare_options net/ceph/libceph EXPORT_SYMBOL +0x950dd06e kvm_vcpu_write_guest arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1725a13b ip6_sk_dst_lookup_flow vmlinux EXPORT_SYMBOL_GPL +0x392ea8fb hid_output_report vmlinux EXPORT_SYMBOL_GPL +0xff3af4d3 cpuidle_get_cpu_driver vmlinux EXPORT_SYMBOL_GPL +0x866ac1e9 __usb_create_hcd vmlinux EXPORT_SYMBOL_GPL +0xd38731fb fc_fill_reply_hdr vmlinux EXPORT_SYMBOL +0x9bece81b mpi_cmp_ui vmlinux EXPORT_SYMBOL_GPL +0x2ec6bba0 errseq_set vmlinux EXPORT_SYMBOL +0x5b5fcae8 vfs_parse_fs_string vmlinux EXPORT_SYMBOL +0x2bd795dd cdev_add vmlinux EXPORT_SYMBOL +0x2f66c85e ring_buffer_empty vmlinux EXPORT_SYMBOL_GPL +0x69447467 ring_buffer_write vmlinux EXPORT_SYMBOL_GPL +0xd4eb7735 ceph_decode_entity_addr net/ceph/libceph EXPORT_SYMBOL +0xeeacab69 rpc_update_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2f118562 ib_modify_srq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4e0280b6 drm_mode_hsync drivers/gpu/drm/drm EXPORT_SYMBOL +0x8d701329 drm_dp_clock_recovery_ok drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x77fd283d pdptrs_changed arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x93c37419 xfrm_input vmlinux EXPORT_SYMBOL +0x9a9868d2 dev_mc_del vmlinux EXPORT_SYMBOL +0x8df1a1b5 dev_mc_add vmlinux EXPORT_SYMBOL +0x37b1e70c dev_uc_del vmlinux EXPORT_SYMBOL +0x20d82e6b dev_uc_add vmlinux EXPORT_SYMBOL +0x03eec3d4 register_pernet_subsys vmlinux EXPORT_SYMBOL_GPL +0x1999cf1b __skb_ext_put vmlinux EXPORT_SYMBOL +0x441a4766 rtc_update_irq vmlinux EXPORT_SYMBOL_GPL +0x033f90f6 mptscsih_change_queue_depth vmlinux EXPORT_SYMBOL +0x3c2c1140 dev_attr_em_message_type vmlinux EXPORT_SYMBOL_GPL +0xf12c7c57 __devm_create_dev_dax vmlinux EXPORT_SYMBOL_GPL +0x6be1c1f8 acpi_install_method vmlinux EXPORT_SYMBOL +0x2c7af14e fb_blank vmlinux EXPORT_SYMBOL +0x099e654a blk_queue_update_dma_pad vmlinux EXPORT_SYMBOL +0xbddc09c5 blkdev_get_by_path vmlinux EXPORT_SYMBOL +0x36c4c99a open_exec vmlinux EXPORT_SYMBOL +0x3b91db5b intel_pt_handle_vmx vmlinux EXPORT_SYMBOL_GPL +0xa77b3b62 des3_ede_expand_key lib/crypto/libdes EXPORT_SYMBOL_GPL +0x5d9a3139 __tracepoint_kvm_nested_vmenter_failed arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb12dd552 inet_gro_receive vmlinux EXPORT_SYMBOL +0x17ea7fa5 iscsi_remove_session vmlinux EXPORT_SYMBOL_GPL +0x93189beb ahci_platform_disable_resources vmlinux EXPORT_SYMBOL_GPL +0x7ad47ada cpu_subsys vmlinux EXPORT_SYMBOL_GPL +0xd8df08ac acpi_handle_printk vmlinux EXPORT_SYMBOL +0x9d61e994 ucs2_strncmp vmlinux EXPORT_SYMBOL +0xbd462b55 __kfifo_init vmlinux EXPORT_SYMBOL +0x63b75f40 blkcg_activate_policy vmlinux EXPORT_SYMBOL_GPL +0x47a0cdcb mb_cache_entry_find_next vmlinux EXPORT_SYMBOL +0xe05ade76 inode_init_owner vmlinux EXPORT_SYMBOL +0x5f47a27b file_ns_capable vmlinux EXPORT_SYMBOL +0xeee667d3 fpregs_assert_state_consistent vmlinux EXPORT_SYMBOL_GPL +0x7dcf4135 __xa_insert vmlinux EXPORT_SYMBOL +0xd611f6de xfrm_dev_state_add vmlinux EXPORT_SYMBOL_GPL +0x43f56e82 ata_xfer_mode2shift vmlinux EXPORT_SYMBOL_GPL +0x500da9df pnp_activate_dev vmlinux EXPORT_SYMBOL +0xe9bea849 pci_find_next_capability vmlinux EXPORT_SYMBOL_GPL +0x2b30f429 raid6_call vmlinux EXPORT_SYMBOL_GPL +0xcb8118e5 fs_context_for_reconfigure vmlinux EXPORT_SYMBOL +0x0951ce32 list_lru_count_one vmlinux EXPORT_SYMBOL_GPL +0xd30aef48 rt_mutex_destroy vmlinux EXPORT_SYMBOL_GPL +0x7b6d70b2 queue_delayed_work_on vmlinux EXPORT_SYMBOL +0xd4034828 system_freezable_wq vmlinux EXPORT_SYMBOL_GPL +0x90dc29df aout_dump_debugregs vmlinux EXPORT_SYMBOL_GPL +0x73d5d6c7 rdma_nl_put_driver_u32_hex drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1908516c ib_unregister_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0c32ff8a edac_pci_alloc_index drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xb7500cb5 __tracepoint_bcache_gc_end drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x9c519b5d drm_atomic_helper_disable_planes_on_crtc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd0bee653 drm_dp_dpcd_read_link_status drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa71608a4 qdisc_put_unlocked vmlinux EXPORT_SYMBOL +0xbf114d86 skb_gso_validate_mac_len vmlinux EXPORT_SYMBOL_GPL +0xad5d3ea5 update_devfreq vmlinux EXPORT_SYMBOL +0xc48ad5e0 iscsi_destroy_endpoint vmlinux EXPORT_SYMBOL_GPL +0xd117500c devres_open_group vmlinux EXPORT_SYMBOL_GPL +0xf8f0a605 pci_read_config_dword vmlinux EXPORT_SYMBOL +0x9fdcd68f __set_page_dirty_nobuffers vmlinux EXPORT_SYMBOL +0x58ba2444 bpf_prog_inc_not_zero vmlinux EXPORT_SYMBOL_GPL +0xf997ded4 svc_xprt_copy_addrs net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0b52d259 esp_output_tail net/ipv4/esp4 EXPORT_SYMBOL_GPL +0x215a8ec8 slhc_init drivers/net/slip/slhc EXPORT_SYMBOL +0xb871ed8c kvm_x86_ops arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x0dc36ea3 clk_hw_unregister vmlinux EXPORT_SYMBOL_GPL +0x98503a63 mpi_alloc vmlinux EXPORT_SYMBOL_GPL +0xc8d28757 af_alg_get_rsgl vmlinux EXPORT_SYMBOL_GPL +0x9c122bcf mempool_create_node vmlinux EXPORT_SYMBOL +0x7c9fcdf6 find_get_pid vmlinux EXPORT_SYMBOL_GPL +0xc5d43702 arch_trigger_cpumask_backtrace vmlinux EXPORT_SYMBOL_GPL +0x6632d6eb nf_conntrack_helper_put net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd8984de1 usb_stor_transparent_scsi_command drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xff007c25 mlxsw_core_cpu_port_fini drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xd2365b46 cxgb4_port_idx drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x0f460a13 dev_set_promiscuity vmlinux EXPORT_SYMBOL +0x1263e83d iscsi_verify_itt vmlinux EXPORT_SYMBOL_GPL +0x65fe68a4 scsi_bios_ptable vmlinux EXPORT_SYMBOL +0xaf0964ff pm_generic_resume vmlinux EXPORT_SYMBOL_GPL +0x23daa989 mipi_dsi_create_packet vmlinux EXPORT_SYMBOL +0x4639bcda acpi_nfit_shutdown vmlinux EXPORT_SYMBOL_GPL +0x74bf37af pci_write_vpd vmlinux EXPORT_SYMBOL +0xeb212e14 pci_generic_config_write32 vmlinux EXPORT_SYMBOL_GPL +0xbac328be configfs_depend_item_unlocked vmlinux EXPORT_SYMBOL +0xd17ffe19 e820__mapped_any vmlinux EXPORT_SYMBOL_GPL +0x127e7002 nf_send_unreach6 net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0x354a16c6 esp_input_done2 net/ipv4/esp4 EXPORT_SYMBOL_GPL +0xe5a78c1e ib_sa_unpack_path drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x224018ee drm_plane_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xb9cba57f cast_s3 crypto/cast_common EXPORT_SYMBOL_GPL +0x6174f952 tcp_unregister_ulp vmlinux EXPORT_SYMBOL_GPL +0xf9cac29b neigh_connected_output vmlinux EXPORT_SYMBOL +0x155d0f77 nvme_reset_ctrl_sync vmlinux EXPORT_SYMBOL_GPL +0xe9f7149c zlib_deflate_workspacesize vmlinux EXPORT_SYMBOL +0xf3bb252b posix_acl_create vmlinux EXPORT_SYMBOL_GPL +0x4d89f80f irq_domain_translate_twocell vmlinux EXPORT_SYMBOL_GPL +0x76e51b7d irq_set_chained_handler_and_data vmlinux EXPORT_SYMBOL_GPL +0x019c77d9 __percpu_init_rwsem vmlinux EXPORT_SYMBOL_GPL +0x62f7e207 down_read_killable vmlinux EXPORT_SYMBOL +0xf124d0e7 param_set_ushort vmlinux EXPORT_SYMBOL +0xefed2b4d svc_recv net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb219cf94 mdev_register_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x8d1506ee iscsit_queue_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x8c170c92 pnfs_generic_pg_writepages fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x4e68e9be rb_next_postorder vmlinux EXPORT_SYMBOL +0x25887ce4 udp_flush_pending_frames vmlinux EXPORT_SYMBOL +0x447c1117 xdp_rxq_info_is_reg vmlinux EXPORT_SYMBOL_GPL +0xd740973d dev_get_stats vmlinux EXPORT_SYMBOL +0xdad0b8c9 skb_vlan_untag vmlinux EXPORT_SYMBOL +0x851e6003 usb_phy_roothub_calibrate vmlinux EXPORT_SYMBOL_GPL +0x78aeb984 genphy_c45_pma_setup_forced vmlinux EXPORT_SYMBOL_GPL +0x4f27cf1e pm_generic_poweroff vmlinux EXPORT_SYMBOL_GPL +0xb6b35a76 tpm_tis_core_init vmlinux EXPORT_SYMBOL_GPL +0x86fb9b05 bitmap_parse_user vmlinux EXPORT_SYMBOL +0x01000e51 schedule vmlinux EXPORT_SYMBOL +0x3fc2387c ib_ud_header_unpack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb4220803 drm_property_create_enum drivers/gpu/drm/drm EXPORT_SYMBOL +0x5bda969c drm_atomic_bridge_enable drivers/gpu/drm/drm EXPORT_SYMBOL +0x4e3a53d0 drm_i2c_encoder_save drivers/gpu/drm/drm EXPORT_SYMBOL +0x5a4734d1 drm_kms_helper_is_poll_worker drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3797eab3 nlmsg_notify vmlinux EXPORT_SYMBOL +0xdd8166a1 dma_fence_free vmlinux EXPORT_SYMBOL +0xc1e07264 nd_namespace_blk_validate vmlinux EXPORT_SYMBOL +0x606fd4af serial8250_do_set_divisor vmlinux EXPORT_SYMBOL_GPL +0xde8182ac blk_mq_tag_to_rq vmlinux EXPORT_SYMBOL +0x20b9f04f proc_create_net_single_write vmlinux EXPORT_SYMBOL_GPL +0x5b658b73 ww_mutex_unlock vmlinux EXPORT_SYMBOL +0xe677d32e mmput vmlinux EXPORT_SYMBOL_GPL +0xc3ae84fe init_task vmlinux EXPORT_SYMBOL +0x39bb8cb7 transport_generic_free_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xe1b35a63 vmci_qpair_enquev drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x3e92b49d drm_probe_ddc drivers/gpu/drm/drm EXPORT_SYMBOL +0x4580835b __fscache_wait_on_page_write fs/fscache/fscache EXPORT_SYMBOL +0x5d447198 vlan_dev_vlan_proto vmlinux EXPORT_SYMBOL +0x460037e1 __hw_addr_sync_dev vmlinux EXPORT_SYMBOL +0x09320c79 nvme_complete_async_event vmlinux EXPORT_SYMBOL_GPL +0xcfd106b0 wakeup_source_create vmlinux EXPORT_SYMBOL_GPL +0x553c34bf pcibios_resource_to_bus vmlinux EXPORT_SYMBOL +0x5e06bc5c refcount_dec_and_lock vmlinux EXPORT_SYMBOL +0x219bcea3 csum_and_copy_from_iter_full vmlinux EXPORT_SYMBOL +0xbe7d6ed7 iomap_file_dirty vmlinux EXPORT_SYMBOL_GPL +0xf6c8dc62 cpu_hotplug_enable vmlinux EXPORT_SYMBOL_GPL +0xcd691fa1 rpc_sleep_on_priority_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x43dccab4 target_wait_for_sess_cmds drivers/target/target_core_mod EXPORT_SYMBOL +0x19227378 i2c_verify_client drivers/i2c/i2c-core EXPORT_SYMBOL +0x2317bb3f mlx5_query_min_inline drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x3a8063f3 drm_dp_dsc_sink_supported_input_bpcs drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb5fb4b01 gnet_stats_copy_app vmlinux EXPORT_SYMBOL +0xf85b658c input_allocate_device vmlinux EXPORT_SYMBOL +0x20740c0f pm_runtime_barrier vmlinux EXPORT_SYMBOL_GPL +0xaae198db dev_printk vmlinux EXPORT_SYMBOL +0x2e2df7f4 irq_remapping_cap vmlinux EXPORT_SYMBOL_GPL +0x204fe87f pci_set_pcie_reset_state vmlinux EXPORT_SYMBOL_GPL +0xee7554f2 phy_power_on vmlinux EXPORT_SYMBOL_GPL +0x1b015d25 bitmap_parselist vmlinux EXPORT_SYMBOL +0x7df072b7 __insert_inode_hash vmlinux EXPORT_SYMBOL +0xf474fdcb kfree_const vmlinux EXPORT_SYMBOL +0xd63ce82a __tracepoint_cpu_frequency vmlinux EXPORT_SYMBOL_GPL +0x65f19c38 revert_creds vmlinux EXPORT_SYMBOL +0xdf929370 fs_overflowgid vmlinux EXPORT_SYMBOL +0x9c07db80 virtio_transport_inc_tx_pkt net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xda47271c edac_mc_del_mc drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x58fa7d83 __tracepoint_mlx5_fs_add_fg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x39153c11 __tracepoint_mlx5_fs_add_ft drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb7d5f913 drm_fb_helper_sys_imageblit drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x8ac756c0 x86_set_memory_region arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf7ef9a79 iosf_mbi_punit_release arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0xc8a91f5b cpumask_local_spread vmlinux EXPORT_SYMBOL +0x97c125c6 reuseport_attach_prog vmlinux EXPORT_SYMBOL +0xec4ab4a4 md_do_sync vmlinux EXPORT_SYMBOL_GPL +0xaa2d4d44 scsi_dh_set_params vmlinux EXPORT_SYMBOL_GPL +0x33ea8672 agp_generic_type_to_mask_type vmlinux EXPORT_SYMBOL +0x17e01f11 erst_clear vmlinux EXPORT_SYMBOL_GPL +0x714c5157 cpu_rmap_add vmlinux EXPORT_SYMBOL +0xdecd0b29 __stack_chk_fail vmlinux EXPORT_SYMBOL +0x3da171f9 pci_mem_start vmlinux EXPORT_SYMBOL +0x932a6ffc dm_tm_shadow_block drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x57678433 nfs_client_init_is_complete fs/nfs/nfs EXPORT_SYMBOL_GPL +0x952dd39f xfrm_policy_walk vmlinux EXPORT_SYMBOL +0x5e71b9bb nf_log_unset vmlinux EXPORT_SYMBOL +0x03311534 fbcon_set_rotate vmlinux EXPORT_SYMBOL +0x072620a6 pcie_get_speed_cap vmlinux EXPORT_SYMBOL +0x97de2b83 debug_locks_silent vmlinux EXPORT_SYMBOL_GPL +0xeab8b9e7 bdi_alloc_node vmlinux EXPORT_SYMBOL +0x823eae06 blocking_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0xa51fbedc dm_bitset_new drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x7ca0833d drm_vma_offset_manager_destroy drivers/gpu/drm/drm EXPORT_SYMBOL +0xbabcccb9 nfs_init_commit fs/nfs/nfs EXPORT_SYMBOL_GPL +0x47d5cd2a strp_check_rcv vmlinux EXPORT_SYMBOL_GPL +0x0e884535 ping_hash vmlinux EXPORT_SYMBOL_GPL +0xd28f463a __napi_schedule_irqoff vmlinux EXPORT_SYMBOL +0xbb2af3f3 iommu_map_sg vmlinux EXPORT_SYMBOL_GPL +0x87220ab7 clk_hw_register_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0x594bf15b ioport_map vmlinux EXPORT_SYMBOL +0xf7db8854 crypto_unregister_skciphers vmlinux EXPORT_SYMBOL_GPL +0x0b2f4277 dquot_quota_off vmlinux EXPORT_SYMBOL +0x15b60005 kthread_flush_work vmlinux EXPORT_SYMBOL_GPL +0x257e2c26 ib_umem_copy_from drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xf35c1478 devm_input_allocate_polled_device drivers/input/input-polldev EXPORT_SYMBOL +0x5267415e ppp_unregister_channel drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xe4dd7e91 mlx5_nic_vport_update_local_lb drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x9c79a6e2 mlx5_comp_irq_get_affinity_mask drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc62ea79e kvm_requeue_exception_e arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x211130c1 alloc_cpumask_var vmlinux EXPORT_SYMBOL +0xf5e7f26d xfrm_audit_state_add vmlinux EXPORT_SYMBOL_GPL +0xceff8632 __skb_checksum_complete vmlinux EXPORT_SYMBOL +0x12dbdd17 power_supply_register_no_ws vmlinux EXPORT_SYMBOL_GPL +0x67607e6f mptscsih_shutdown vmlinux EXPORT_SYMBOL +0x1eea984f genphy_read_lpa vmlinux EXPORT_SYMBOL +0xa1de7228 ata_pci_device_do_suspend vmlinux EXPORT_SYMBOL_GPL +0x25897653 tty_lock vmlinux EXPORT_SYMBOL +0xd5b3d0d5 xxh64_copy_state vmlinux EXPORT_SYMBOL +0xbe5a24e9 xxh32_copy_state vmlinux EXPORT_SYMBOL +0xb5a459dc unregister_blkdev vmlinux EXPORT_SYMBOL +0xe2858b8f key_alloc vmlinux EXPORT_SYMBOL +0x01a71764 write_inode_now vmlinux EXPORT_SYMBOL +0x2cef5f75 init_pseudo vmlinux EXPORT_SYMBOL +0x10075f38 housekeeping_any_cpu vmlinux EXPORT_SYMBOL_GPL +0x924ac70e ceph_osdc_notify_ack net/ceph/libceph EXPORT_SYMBOL +0x69049cd2 radix_tree_replace_slot vmlinux EXPORT_SYMBOL +0x2f0d9053 usb_otg_state_string vmlinux EXPORT_SYMBOL_GPL +0x6c257ac0 tty_termios_hw_change vmlinux EXPORT_SYMBOL +0xba97e4ed crypto_rng_reset vmlinux EXPORT_SYMBOL_GPL +0x7b5a4926 sha1_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x7338a9c6 security_dentry_create_files_as vmlinux EXPORT_SYMBOL +0x71600023 fat_search_long vmlinux EXPORT_SYMBOL_GPL +0x1001dc1c seq_path vmlinux EXPORT_SYMBOL +0xcc5005fe msleep_interruptible vmlinux EXPORT_SYMBOL +0x49c14a61 ex_handler_fault vmlinux EXPORT_SYMBOL_GPL +0xd1f6c5f3 smp_num_siblings vmlinux EXPORT_SYMBOL +0xd7673035 g_verify_token_header net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x1f099794 synproxy_init_timestamp_cookie net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xc7691225 cxgb4_map_skb drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x055ea0f4 kvm_emulate_wrmsr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7f4c7b5d kvm_emulate_rdmsr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x89434b4b radix_tree_tag_clear vmlinux EXPORT_SYMBOL +0x72b1765e dev_addr_add vmlinux EXPORT_SYMBOL +0x89128005 netdev_err vmlinux EXPORT_SYMBOL +0x62849ac7 dev_valid_name vmlinux EXPORT_SYMBOL +0xe89694d1 kfree_skb_partial vmlinux EXPORT_SYMBOL +0x591350be __sk_mem_reduce_allocated vmlinux EXPORT_SYMBOL +0xfc3973d8 __tracepoint_mc_event vmlinux EXPORT_SYMBOL_GPL +0x89bbafc6 usb_register_notify vmlinux EXPORT_SYMBOL_GPL +0x59bc6695 iscsi_session_setup vmlinux EXPORT_SYMBOL_GPL +0xf0a45ce5 agp_create_memory vmlinux EXPORT_SYMBOL +0xb0a0da0c rational_best_approximation vmlinux EXPORT_SYMBOL +0x1af267f8 int_pow vmlinux EXPORT_SYMBOL_GPL +0xf888ca21 sg_init_table vmlinux EXPORT_SYMBOL +0x26780a4e blk_pre_runtime_resume vmlinux EXPORT_SYMBOL +0xf4cabeae fsnotify vmlinux EXPORT_SYMBOL_GPL +0x5a53fd3b generic_file_open vmlinux EXPORT_SYMBOL +0x959dff64 fixup_user_fault vmlinux EXPORT_SYMBOL_GPL +0xb43f9365 ktime_get vmlinux EXPORT_SYMBOL_GPL +0x886e4a34 ip_set_get_ip_port net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x9cdeb6f6 adf_devmgr_update_class_index drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x2257ca13 mlx5_core_create_rqt drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x77f4dd4d mlx5_core_create_tis drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xcd0ea583 mlx5_core_create_tir drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xffdf9a9d mlx5_core_create_psv drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x1873af13 mlx5_core_create_dct drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x8fe5845c drm_get_pci_dev drivers/gpu/drm/drm EXPORT_SYMBOL +0xbd1933d4 crypto_engine_stop crypto/crypto_engine EXPORT_SYMBOL_GPL +0x22501aeb inet_addr_type_dev_table vmlinux EXPORT_SYMBOL +0xe7a491bc __mdiobus_register vmlinux EXPORT_SYMBOL +0xc7b9715a genphy_update_link vmlinux EXPORT_SYMBOL +0xc9a89d69 ata_sff_freeze vmlinux EXPORT_SYMBOL_GPL +0xf29cdf27 software_node_register vmlinux EXPORT_SYMBOL_GPL +0x6749d53f hdmi_vendor_infoframe_init vmlinux EXPORT_SYMBOL +0x89e34a76 generic_cont_expand_simple vmlinux EXPORT_SYMBOL +0x917e9fa3 __vfs_setxattr vmlinux EXPORT_SYMBOL +0xdc187162 d_instantiate vmlinux EXPORT_SYMBOL +0x8dfcaba2 irq_chip_unmask_parent vmlinux EXPORT_SYMBOL_GPL +0x345e9be4 mlx5_core_dealloc_transport_domain drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc25f3e50 drm_mm_scan_remove_block drivers/gpu/drm/drm EXPORT_SYMBOL +0xe1cfa261 __tracepoint_fdb_delete vmlinux EXPORT_SYMBOL_GPL +0xc29bd778 skb_set_owner_w vmlinux EXPORT_SYMBOL +0xc604eed3 iscsi_ping_comp_event vmlinux EXPORT_SYMBOL_GPL +0xa7245367 agp_generic_create_gatt_table vmlinux EXPORT_SYMBOL +0x0afbda2a __nla_put_64bit vmlinux EXPORT_SYMBOL +0x984ce9bd __nla_parse vmlinux EXPORT_SYMBOL +0x9ba178c9 page_mapping vmlinux EXPORT_SYMBOL +0xeaf3cb23 crc64_be lib/crc64 EXPORT_SYMBOL_GPL +0xd7053b6d rdma_nl_put_driver_u64_hex drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6e287f0f rdma_read_gid_attr_ndev_rcu drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7b56b034 ib_unregister_driver drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0350f080 mlx4_SET_PORT_qpn_calc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xefdcdecf __fscache_unregister_netfs fs/fscache/fscache EXPORT_SYMBOL +0x8b44ee75 camellia_ecb_dec_16way arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0x2c8b5dbf camellia_ecb_enc_16way arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0xc75d5ab2 xt_alloc_table_info vmlinux EXPORT_SYMBOL +0x11c5fc4c iommu_domain_alloc vmlinux EXPORT_SYMBOL_GPL +0xba56cc0b clk_hw_register_gate vmlinux EXPORT_SYMBOL_GPL +0x4df02057 crc32_be vmlinux EXPORT_SYMBOL +0x5d91f5e6 af_alg_wmem_wakeup vmlinux EXPORT_SYMBOL_GPL +0x5bbdfa26 scatterwalk_ffwd vmlinux EXPORT_SYMBOL_GPL +0xa74ad542 iomap_readpage vmlinux EXPORT_SYMBOL_GPL +0x26520970 vm_memory_committed vmlinux EXPORT_SYMBOL_GPL +0xdc5736d5 acpi_register_ioapic vmlinux EXPORT_SYMBOL +0x20fff6ec ZSTD_DStreamInSize lib/zstd/zstd_decompress EXPORT_SYMBOL +0x804bd029 ceph_osdc_new_request net/ceph/libceph EXPORT_SYMBOL +0xacfcb4c6 rpc_restart_call net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x48d1c7dc dm_btree_find_lowest_key drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x812e462f i2c_unregister_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0xc31f52c8 drm_helper_crtc_in_use drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa02beb4b kvm_emulate_hypercall arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x20457ecb xfrm_output_resume vmlinux EXPORT_SYMBOL_GPL +0xc84d6172 dev_get_phys_port_name vmlinux EXPORT_SYMBOL +0x699c2fce skb_find_text vmlinux EXPORT_SYMBOL +0x3a4f6a32 ata_sff_interrupt vmlinux EXPORT_SYMBOL_GPL +0xd379dce6 tpm_pcr_extend vmlinux EXPORT_SYMBOL_GPL +0x983c11be pci_clear_master vmlinux EXPORT_SYMBOL +0x865f3883 bio_integrity_add_page vmlinux EXPORT_SYMBOL +0x1375c67f dump_align vmlinux EXPORT_SYMBOL +0xbbe80fdb kmalloc_order vmlinux EXPORT_SYMBOL +0x15a22a16 __dec_node_page_state vmlinux EXPORT_SYMBOL +0x5887c804 __inc_node_page_state vmlinux EXPORT_SYMBOL +0xf28c0bd0 __mod_node_page_state vmlinux EXPORT_SYMBOL +0x751ff010 vsock_addr_unbind net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x23ebd5fb dm_bitset_set_bit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x6e30ba8e drm_rect_rotate_inv drivers/gpu/drm/drm EXPORT_SYMBOL +0xfc619abd _nfs_display_fhandle fs/nfs/nfs EXPORT_SYMBOL_GPL +0x134cdd34 inet_frag_rbtree_purge vmlinux EXPORT_SYMBOL +0xbcbbd219 skb_queue_tail vmlinux EXPORT_SYMBOL +0x34a1f7e3 acpi_processor_get_psd vmlinux EXPORT_SYMBOL +0x0810be09 free_irq_cpu_rmap vmlinux EXPORT_SYMBOL +0xa17e6f28 crypto_lookup_template vmlinux EXPORT_SYMBOL_GPL +0x87575106 __register_nls vmlinux EXPORT_SYMBOL +0x6041d3b9 register_sysctl vmlinux EXPORT_SYMBOL +0x978e5072 vfs_test_lock vmlinux EXPORT_SYMBOL_GPL +0x952664c5 do_exit vmlinux EXPORT_SYMBOL_GPL +0x99fd20aa drm_rect_clip_scaled drivers/gpu/drm/drm EXPORT_SYMBOL +0xc996cbdd drm_self_refresh_helper_alter_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6085edbd nfs_map_string_to_numeric fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x45c4f272 l3mdev_master_upper_ifindex_by_index_rcu vmlinux EXPORT_SYMBOL_GPL +0x7141d91f devm_device_add_groups vmlinux EXPORT_SYMBOL_GPL +0x799aebb1 sbitmap_get_shallow vmlinux EXPORT_SYMBOL_GPL +0xa7adfe16 mark_buffer_dirty_inode vmlinux EXPORT_SYMBOL +0xb1d10a96 generic_access_phys vmlinux EXPORT_SYMBOL_GPL +0x470c56b7 ceph_msg_get net/ceph/libceph EXPORT_SYMBOL +0x5bf7c11e ip_tunnel_lookup net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0xa1ea4874 ib_get_net_dev_by_params drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0ff50691 dev_dax_probe drivers/dax/device_dax EXPORT_SYMBOL_GPL +0x5ba06d73 drm_connector_set_path_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x09606e6c backlight_device_register drivers/video/backlight/backlight EXPORT_SYMBOL +0x7c381a76 dm_bufio_get_block_size vmlinux EXPORT_SYMBOL_GPL +0x3a2ac52f ata_sff_thaw vmlinux EXPORT_SYMBOL_GPL +0xc385ff11 dev_pm_qos_remove_request vmlinux EXPORT_SYMBOL_GPL +0x337a2e8e fwnode_connection_find_match vmlinux EXPORT_SYMBOL_GPL +0x0635cc90 dma_wait_for_async_tx vmlinux EXPORT_SYMBOL_GPL +0x0014617e devm_clk_bulk_get vmlinux EXPORT_SYMBOL_GPL +0x0b26b8c8 acpi_run_osc vmlinux EXPORT_SYMBOL +0xe1bb8f93 simple_dentry_operations vmlinux EXPORT_SYMBOL +0xb1c3a01a oops_in_progress vmlinux EXPORT_SYMBOL +0x6b8283be convert_art_to_tsc vmlinux EXPORT_SYMBOL +0x418873cc irq_bypass_register_producer virt/lib/irqbypass EXPORT_SYMBOL_GPL +0xd906224f nf_ct_helper_log net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x51ecb57b flow_resources_add drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x32b29846 gfn_to_pfn_atomic arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa4e3d56c netdev_lower_dev_get_private vmlinux EXPORT_SYMBOL +0x48e1dbea tty_check_change vmlinux EXPORT_SYMBOL +0x3d580d7f tty_insert_flip_string_fixed_flag vmlinux EXPORT_SYMBOL +0xdccd8a96 tty_register_ldisc vmlinux EXPORT_SYMBOL +0xa36de5cb tty_throttle vmlinux EXPORT_SYMBOL +0xd0f19a73 hsu_dma_remove vmlinux EXPORT_SYMBOL_GPL +0xede9a09a btree_lookup vmlinux EXPORT_SYMBOL_GPL +0xd1d300cf public_key_subtype vmlinux EXPORT_SYMBOL_GPL +0x01aa1682 kill_fasync vmlinux EXPORT_SYMBOL +0x2fc70e7b sget_fc vmlinux EXPORT_SYMBOL +0x078610bc param_get_byte vmlinux EXPORT_SYMBOL +0x1497bd4b drm_put_dev drivers/gpu/drm/drm EXPORT_SYMBOL +0x4bcff6c7 drm_gem_vm_open drivers/gpu/drm/drm EXPORT_SYMBOL +0x08de2b38 drm_fb_helper_setcmap drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb2505046 drm_lspcon_set_mode drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x0cfd3fc5 ocfs2_cluster_connect_agnostic fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xad995dac netdev_stats_to_stats64 vmlinux EXPORT_SYMBOL +0x96078626 dev_disable_lro vmlinux EXPORT_SYMBOL +0x73a3470d sock_alloc vmlinux EXPORT_SYMBOL +0x98fa1e20 dm_get_reserved_rq_based_ios vmlinux EXPORT_SYMBOL_GPL +0x161cb6e0 fc_fill_hdr vmlinux EXPORT_SYMBOL +0x77dbcc99 attribute_container_classdev_to_container vmlinux EXPORT_SYMBOL_GPL +0x1803a6ed raid6_2data_recov vmlinux EXPORT_SYMBOL_GPL +0x21bdb523 errseq_check_and_advance vmlinux EXPORT_SYMBOL +0xd9b85ef6 lockref_get vmlinux EXPORT_SYMBOL +0x58ee3c7e debugfs_create_u8 vmlinux EXPORT_SYMBOL_GPL +0x72d8a735 kill_pid_usb_asyncio vmlinux EXPORT_SYMBOL_GPL +0x02aae0d1 ib_unregister_mad_agent drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4f0216b8 __tracepoint_bcache_btree_node_compact drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x4abffb9f mlx4_map_phys_fmr drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe58e0f0d store_sampling_rate vmlinux EXPORT_SYMBOL_GPL +0xc9a3422d dm_bufio_write_dirty_buffers_async vmlinux EXPORT_SYMBOL_GPL +0x317e20ae md_rdev_clear vmlinux EXPORT_SYMBOL_GPL +0x44eb9136 is_nd_btt vmlinux EXPORT_SYMBOL +0xfe4a8cec __acpi_node_get_property_reference vmlinux EXPORT_SYMBOL_GPL +0xe5e0ffd4 pci_wait_for_pending_transaction vmlinux EXPORT_SYMBOL +0xced0f4d4 gen_pool_create vmlinux EXPORT_SYMBOL +0xcb3ae215 call_blocking_lsm_notifier vmlinux EXPORT_SYMBOL +0xdd0762df set_worker_desc vmlinux EXPORT_SYMBOL_GPL +0xf52ad673 vhost_poll_init drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xb7cbefc2 rdma_set_afonly drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xe9e32ffe ib_destroy_qp_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5cf3dd79 mlxsw_core_bus_device_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xf00100fc mlx5_set_port_tc_bw_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x71268354 nfs_file_llseek fs/nfs/nfs EXPORT_SYMBOL_GPL +0x09da0ba4 xa_set_mark vmlinux EXPORT_SYMBOL +0x01b6865c xa_get_mark vmlinux EXPORT_SYMBOL +0x82477510 tcf_idr_cleanup vmlinux EXPORT_SYMBOL +0xdb99e251 devm_nvmem_register vmlinux EXPORT_SYMBOL_GPL +0x7b3b1b8e mdio_driver_register vmlinux EXPORT_SYMBOL +0xe4eeecb0 scsi_is_fc_rport vmlinux EXPORT_SYMBOL +0xa531471e clk_save_context vmlinux EXPORT_SYMBOL_GPL +0x6c224cda gen_pool_destroy vmlinux EXPORT_SYMBOL +0xa1848cff blk_mq_tagset_busy_iter vmlinux EXPORT_SYMBOL +0x3d64171c vma_kernel_pagesize vmlinux EXPORT_SYMBOL_GPL +0xad6f7144 __tracepoint_kmalloc_node vmlinux EXPORT_SYMBOL +0xec5ad73b trace_seq_bitmask vmlinux EXPORT_SYMBOL_GPL +0x8ddd8aad schedule_timeout vmlinux EXPORT_SYMBOL +0x59fb2e1a irq_chip_mask_parent vmlinux EXPORT_SYMBOL_GPL +0x7baca7fe __tracepoint_bcache_btree_read drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x8a0144c0 mlx5_core_dealloc_q_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x8b71bb42 nfs4_decode_mp_ds_addr fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x77ca08a5 xsk_reuseq_prepare vmlinux EXPORT_SYMBOL_GPL +0x65ccb6f0 call_netevent_notifiers vmlinux EXPORT_SYMBOL_GPL +0x19e2401a ata_scsi_port_error_handler vmlinux EXPORT_SYMBOL_GPL +0x270fb9df nvdimm_badblocks_populate vmlinux EXPORT_SYMBOL_GPL +0x6a83007a crypto_alloc_base vmlinux EXPORT_SYMBOL_GPL +0x05a6e388 is_subdir vmlinux EXPORT_SYMBOL +0x1b1471f3 alarm_start vmlinux EXPORT_SYMBOL_GPL +0x2201b0dd rdma_notify drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x68b2f180 __tracepoint_bcache_btree_cache_cannibalize drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xb4b03b2e __tracepoint_bcache_request_end drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x452ba683 ipv6_ext_hdr vmlinux EXPORT_SYMBOL +0x646687b9 neigh_destroy vmlinux EXPORT_SYMBOL +0xe9d5d5ee netdev_warn vmlinux EXPORT_SYMBOL +0x399972a2 hid_validate_values vmlinux EXPORT_SYMBOL_GPL +0x466de93f ata_pci_device_do_resume vmlinux EXPORT_SYMBOL_GPL +0xcefcd99a serial8250_unregister_port vmlinux EXPORT_SYMBOL +0x213dc88c cryptd_aead_child crypto/cryptd EXPORT_SYMBOL_GPL +0x256a9e0b kvm_set_cr0 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6c4e3fee __xfrm_policy_check vmlinux EXPORT_SYMBOL +0x105e26ba ata_pci_sff_init_host vmlinux EXPORT_SYMBOL_GPL +0x21397ddc to_nd_blk_region vmlinux EXPORT_SYMBOL_GPL +0x4d2c7133 acpi_info vmlinux EXPORT_SYMBOL +0x3aff3200 acpi_evaluate_object_typed vmlinux EXPORT_SYMBOL +0x736045b4 devm_pci_remap_iospace vmlinux EXPORT_SYMBOL +0x5b03cd6b pci_read_config_word vmlinux EXPORT_SYMBOL +0x13c49cc2 _copy_from_user vmlinux EXPORT_SYMBOL +0xb49d1df2 rhashtable_walk_next vmlinux EXPORT_SYMBOL_GPL +0xf3fdb93c blk_lld_busy vmlinux EXPORT_SYMBOL_GPL +0xccb0de16 blk_sync_queue vmlinux EXPORT_SYMBOL +0xd3103429 __kthread_init_worker vmlinux EXPORT_SYMBOL_GPL +0x277b20d8 gre_del_protocol net/ipv4/gre EXPORT_SYMBOL_GPL +0x76056847 ibdev_info drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc063a03e drm_gem_private_object_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xce50555c ip6_route_input_lookup vmlinux EXPORT_SYMBOL_GPL +0x93922111 get_compat_bpf_fprog vmlinux EXPORT_SYMBOL_GPL +0xa88e9303 lwtunnel_output vmlinux EXPORT_SYMBOL_GPL +0xfd491781 skb_clone_tx_timestamp vmlinux EXPORT_SYMBOL_GPL +0x40c04df8 hid_open_report vmlinux EXPORT_SYMBOL_GPL +0xb7969b83 phy_advertise_supported vmlinux EXPORT_SYMBOL +0x2aff8b76 scsi_schedule_eh vmlinux EXPORT_SYMBOL_GPL +0xa322be7f pm_runtime_irq_safe vmlinux EXPORT_SYMBOL_GPL +0x46d971d5 class_unregister vmlinux EXPORT_SYMBOL_GPL +0xbad32c46 fat_alloc_new_dir vmlinux EXPORT_SYMBOL_GPL +0xaae64d99 simple_write_end vmlinux EXPORT_SYMBOL +0x1234e483 get_cpu_iowait_time_us vmlinux EXPORT_SYMBOL_GPL +0xbb07ec89 node_data vmlinux EXPORT_SYMBOL +0x0ddfa9c5 sys_imageblit drivers/video/fbdev/core/sysimgblt EXPORT_SYMBOL +0xf0d8e3db nfs_async_iocounter_wait fs/nfs/nfs EXPORT_SYMBOL_GPL +0xbfcb243b unix_get_socket vmlinux EXPORT_SYMBOL +0xc505ff7e ping_init_sock vmlinux EXPORT_SYMBOL_GPL +0xcf656390 netdev_update_features vmlinux EXPORT_SYMBOL +0x1ed508d6 dev_get_port_parent_id vmlinux EXPORT_SYMBOL +0x9776ee70 ata_sff_postreset vmlinux EXPORT_SYMBOL_GPL +0x4302534d ata_sas_port_resume vmlinux EXPORT_SYMBOL_GPL +0x5fa9ba37 devm_acpi_dma_controller_free vmlinux EXPORT_SYMBOL_GPL +0xe3efe50b acpi_dev_get_property vmlinux EXPORT_SYMBOL_GPL +0x274dd1a3 sg_free_table_chained vmlinux EXPORT_SYMBOL_GPL +0xc8f162c9 ftrace_set_filter_ip vmlinux EXPORT_SYMBOL_GPL +0xa0eae826 smp_call_function vmlinux EXPORT_SYMBOL +0xef6c3f70 round_jiffies_up_relative vmlinux EXPORT_SYMBOL_GPL +0xcaa68533 cpu_has_xfeatures vmlinux EXPORT_SYMBOL_GPL +0xd9c25654 nf_nat_masquerade_inet_unregister_notifiers net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x179404b8 i2c_smbus_read_byte_data drivers/i2c/i2c-core EXPORT_SYMBOL +0x4d5e26af eth_header_cache vmlinux EXPORT_SYMBOL +0x5cad8fc3 power_supply_ocv2cap_simple vmlinux EXPORT_SYMBOL_GPL +0xcd51b696 ps2_handle_response vmlinux EXPORT_SYMBOL +0x2d8127d5 ps2_command vmlinux EXPORT_SYMBOL +0x1ed28612 scsi_print_sense_hdr vmlinux EXPORT_SYMBOL +0x2fb24708 scsi_host_lookup vmlinux EXPORT_SYMBOL +0xe55d526f iommu_dev_disable_feature vmlinux EXPORT_SYMBOL_GPL +0x40f198aa uart_set_options vmlinux EXPORT_SYMBOL_GPL +0x401fc1d1 pci_find_capability vmlinux EXPORT_SYMBOL +0x4938bf03 phy_get vmlinux EXPORT_SYMBOL_GPL +0xb678366f int_sqrt vmlinux EXPORT_SYMBOL +0xb9d025c9 llist_del_first vmlinux EXPORT_SYMBOL_GPL +0xd0f97420 dec_zone_page_state vmlinux EXPORT_SYMBOL +0xdf93b9d8 timespec64_to_jiffies vmlinux EXPORT_SYMBOL +0xf38bcdf3 nf_conntrack_max net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xcd315c62 uverbs_destroy_def_handler drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x102381e2 adf_sriov_configure drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x108002ed mlxsw_core_driver_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0c7a9835 mlx4_test_interrupt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x8d7842d9 drm_mode_create_aspect_ratio_property drivers/gpu/drm/drm EXPORT_SYMBOL +0xe4c4e8d0 llc_mac_hdr_init vmlinux EXPORT_SYMBOL +0x83326f61 srp_rport_del vmlinux EXPORT_SYMBOL_GPL +0xc7a7e770 clk_bulk_enable vmlinux EXPORT_SYMBOL_GPL +0xd96babb4 interval_tree_iter_next vmlinux EXPORT_SYMBOL_GPL +0x002d60b7 badblocks_exit vmlinux EXPORT_SYMBOL_GPL +0x8bba466d config_item_put vmlinux EXPORT_SYMBOL +0x13a4e544 config_item_get vmlinux EXPORT_SYMBOL +0xf92e255b mnt_drop_write_file vmlinux EXPORT_SYMBOL +0xe1ac8193 generic_update_time vmlinux EXPORT_SYMBOL +0x10f8772b __tracepoint_module_get vmlinux EXPORT_SYMBOL +0x1eb9516e round_jiffies_relative vmlinux EXPORT_SYMBOL_GPL +0xa7eedcc4 call_usermodehelper vmlinux EXPORT_SYMBOL +0x53b629e1 ceph_monc_renew_subs net/ceph/libceph EXPORT_SYMBOL +0x453efa54 svc_pool_map net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb50743e3 __i2c_smbus_xfer drivers/i2c/i2c-core EXPORT_SYMBOL +0xb67cb3a3 drm_atomic_helper_dirtyfb drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x30b56bcd __cast6_setkey crypto/cast6_generic EXPORT_SYMBOL_GPL +0x4b99ac23 pnfs_layoutcommit_inode fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x96c06ebe __fscache_disable_cookie fs/fscache/fscache EXPORT_SYMBOL +0x977be5c7 klist_iter_init_node vmlinux EXPORT_SYMBOL_GPL +0xd47f38ef inet_csk_compat_setsockopt vmlinux EXPORT_SYMBOL_GPL +0xfc1fb55a inet_csk_compat_getsockopt vmlinux EXPORT_SYMBOL_GPL +0xb7eca811 mbox_client_txdone vmlinux EXPORT_SYMBOL_GPL +0xb247d2f5 dev_pm_opp_find_freq_ceil_by_volt vmlinux EXPORT_SYMBOL_GPL +0x5193725b dm_put_table_device vmlinux EXPORT_SYMBOL +0x238b65f0 dm_get_table_device vmlinux EXPORT_SYMBOL_GPL +0x55fef8da fc_linkup vmlinux EXPORT_SYMBOL +0x4b37228d pmem_should_map_pages vmlinux EXPORT_SYMBOL +0x238b099f mipi_dsi_packet_format_is_short vmlinux EXPORT_SYMBOL +0x2d6aa0f0 arch_apei_enable_cmcff vmlinux EXPORT_SYMBOL_GPL +0xc8ddd5b5 kstrdup_quotable vmlinux EXPORT_SYMBOL_GPL +0x0634100a bitmap_parselist_user vmlinux EXPORT_SYMBOL +0x19965aaf blk_queue_update_dma_alignment vmlinux EXPORT_SYMBOL +0xbaf6850c fsnotify_wait_marks_destroyed vmlinux EXPORT_SYMBOL_GPL +0x07c23703 hrtimer_try_to_cancel vmlinux EXPORT_SYMBOL_GPL +0xf8595510 _raw_read_lock_irq vmlinux EXPORT_SYMBOL +0xff8e74e2 arch_haltpoll_enable vmlinux EXPORT_SYMBOL_GPL +0x1762eccc nf_ct_seqadj_set net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x4b54fc7f vhost_dev_reset_owner_prepare drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x6fb076d8 drm_hdmi_avi_infoframe_from_display_mode drivers/gpu/drm/drm EXPORT_SYMBOL +0x88b44f8b kvm_slot_page_track_remove_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x0baa914a kvm_is_linear_rip arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7b82b9a1 idr_replace vmlinux EXPORT_SYMBOL +0xf0fea0b0 __dev_get_by_name vmlinux EXPORT_SYMBOL +0xbf8751f6 skb_copy_and_csum_dev vmlinux EXPORT_SYMBOL +0x703c71ef input_unregister_handle vmlinux EXPORT_SYMBOL +0x040e3298 usb_hcd_map_urb_for_dma vmlinux EXPORT_SYMBOL_GPL +0x656c1a0e string_escape_mem vmlinux EXPORT_SYMBOL +0x0f495efd blk_mq_quiesce_queue vmlinux EXPORT_SYMBOL_GPL +0x02a454d0 register_asymmetric_key_parser vmlinux EXPORT_SYMBOL_GPL +0x3d659493 mod_node_page_state vmlinux EXPORT_SYMBOL +0x2c7db649 irq_dispose_mapping vmlinux EXPORT_SYMBOL_GPL +0x530b1e98 pm_suspend vmlinux EXPORT_SYMBOL +0xae69b1c1 usermodehelper_read_unlock vmlinux EXPORT_SYMBOL_GPL +0x3421431a xdr_stream_decode_opaque_dup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x73eecd2f ib_resize_cq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x127a5901 drbd_set_st_err_str drivers/block/drbd/drbd EXPORT_SYMBOL +0x4fc06679 kvm_page_track_unregister_notifier arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf0f1e7cc flow_rule_match_control vmlinux EXPORT_SYMBOL +0x4d4c53e4 register_netdev vmlinux EXPORT_SYMBOL +0xd0669623 mdiobus_write_nested vmlinux EXPORT_SYMBOL +0x86bde9e8 nvme_unfreeze vmlinux EXPORT_SYMBOL_GPL +0xd8052bcc device_get_dma_attr vmlinux EXPORT_SYMBOL_GPL +0x596886bc blk_limits_io_opt vmlinux EXPORT_SYMBOL +0x21fe7f18 proc_create vmlinux EXPORT_SYMBOL +0x7aa1756e kvfree vmlinux EXPORT_SYMBOL +0xc79e8912 t3_l2t_get drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0xad6ba40e radix_tree_tag_get vmlinux EXPORT_SYMBOL +0x3a575cb6 ping_err vmlinux EXPORT_SYMBOL_GPL +0xedda4705 inet_twsk_purge vmlinux EXPORT_SYMBOL_GPL +0x37db8f19 dmi_get_date vmlinux EXPORT_SYMBOL +0x381cd7ca dm_internal_resume vmlinux EXPORT_SYMBOL_GPL +0x3069809a __tracepoint_xhci_dbg_quirks vmlinux EXPORT_SYMBOL_GPL +0xf107b15d iscsi_create_endpoint vmlinux EXPORT_SYMBOL_GPL +0x86c54f3a component_del vmlinux EXPORT_SYMBOL_GPL +0xadfdfcef __bitmap_andnot vmlinux EXPORT_SYMBOL +0xe81309c7 blkg_rwstat_recursive_sum vmlinux EXPORT_SYMBOL_GPL +0x99df8f5a posix_acl_from_xattr vmlinux EXPORT_SYMBOL +0x466df04c clear_nlink vmlinux EXPORT_SYMBOL +0xcc07e650 set_anon_super_fc vmlinux EXPORT_SYMBOL +0x74c7bffa stack_trace_snprint vmlinux EXPORT_SYMBOL_GPL +0x2c3a2bc1 ceph_osdc_alloc_messages net/ceph/libceph EXPORT_SYMBOL +0xad1f3f6c nf_xfrm_me_harder net/netfilter/nf_nat EXPORT_SYMBOL +0x6943c4c7 ib_dereg_mr_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x8eaa963a i2c_transfer drivers/i2c/i2c-core EXPORT_SYMBOL +0xcc29b393 mlx4_srq_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x92490f87 drm_i2c_encoder_prepare drivers/gpu/drm/drm EXPORT_SYMBOL +0xdcbd6d1d kvm_inject_nmi arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x709cd8cb kvm_spurious_fault arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe3a170f3 vcpu_load arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x88071b6b dev_loopback_xmit vmlinux EXPORT_SYMBOL +0xfa011671 skb_seq_read vmlinux EXPORT_SYMBOL +0x303c7102 qlt_lport_deregister vmlinux EXPORT_SYMBOL +0x0872a3f2 scsi_device_lookup vmlinux EXPORT_SYMBOL +0xdd18a993 acpi_check_dsm vmlinux EXPORT_SYMBOL +0xed00c4fb acpi_os_printf vmlinux EXPORT_SYMBOL +0x64127b67 bitmap_find_next_zero_area_off vmlinux EXPORT_SYMBOL +0x28c9167a skcipher_walk_aead_decrypt vmlinux EXPORT_SYMBOL_GPL +0xd6bb90d1 fscrypt_fname_alloc_buffer vmlinux EXPORT_SYMBOL +0x7dc54b96 wbc_detach_inode vmlinux EXPORT_SYMBOL_GPL +0x3f31d5bc cgroup_path_ns vmlinux EXPORT_SYMBOL_GPL +0x08e70afb param_set_int vmlinux EXPORT_SYMBOL +0x96cbe0b5 xdr_inline_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x84e7e7f5 cdrom_dummy_generic_packet drivers/cdrom/cdrom EXPORT_SYMBOL +0x8d51afb2 pppox_compat_ioctl drivers/net/ppp/pppox EXPORT_SYMBOL +0x24772bfe dm_bufio_get vmlinux EXPORT_SYMBOL_GPL +0x356461c8 rtc_time64_to_tm vmlinux EXPORT_SYMBOL +0x5863576e serial8250_rx_dma_flush vmlinux EXPORT_SYMBOL_GPL +0xbe7e05a8 acpi_tb_install_and_load_table vmlinux EXPORT_SYMBOL +0xc0f9a037 crypto_drop_spawn vmlinux EXPORT_SYMBOL_GPL +0x716ddb7a security_socket_socketpair vmlinux EXPORT_SYMBOL +0x854e1dce dquot_writeback_dquots vmlinux EXPORT_SYMBOL +0x798fb62a bdev_read_page vmlinux EXPORT_SYMBOL_GPL +0x57b6efe3 drm_ioctl_flags drivers/gpu/drm/drm EXPORT_SYMBOL +0x77b17be9 drm_helper_probe_detect drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x67369b42 ipmi_addr_src_to_str drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x95db81cd kvm_slot_page_track_add_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x4390878a mptbase_sas_persist_operation vmlinux EXPORT_SYMBOL +0x32ada8ba qlt_enable_vha vmlinux EXPORT_SYMBOL +0x2459c7df ata_cable_80wire vmlinux EXPORT_SYMBOL_GPL +0x873c9175 tty_port_register_device_attr_serdev vmlinux EXPORT_SYMBOL_GPL +0xfdb2426b blk_mq_init_queue vmlinux EXPORT_SYMBOL +0x89ae7aa0 rsa_parse_pub_key vmlinux EXPORT_SYMBOL_GPL +0x2711b3ac param_set_short vmlinux EXPORT_SYMBOL +0x1c846036 nf_nat_tftp_hook net/netfilter/nf_conntrack_tftp EXPORT_SYMBOL_GPL +0x2dcd5e0a nf_nat_snmp_hook net/netfilter/nf_conntrack_snmp EXPORT_SYMBOL_GPL +0xc2a006a2 get_h225_addr net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x93cf8ea3 iscsit_tmr_post_handler drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x12ad1df9 target_to_linux_sector drivers/target/target_core_mod EXPORT_SYMBOL +0x58d8fcaa drm_dsc_pps_payload_pack drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x77bc13a0 strim vmlinux EXPORT_SYMBOL +0xfa893449 fc_exch_mgr_list_clone vmlinux EXPORT_SYMBOL +0xe3432dec pci_scan_bus vmlinux EXPORT_SYMBOL +0x89a0cd52 crc32c_impl vmlinux EXPORT_SYMBOL +0x93187500 shash_register_instance vmlinux EXPORT_SYMBOL_GPL +0xbd68ebe6 debugfs_lookup vmlinux EXPORT_SYMBOL_GPL +0xae7f81b1 seq_write vmlinux EXPORT_SYMBOL +0x1580fca6 no_seek_end_llseek vmlinux EXPORT_SYMBOL +0xb139ae87 generic_file_llseek vmlinux EXPORT_SYMBOL +0x5102a30b do_wait_intr_irq vmlinux EXPORT_SYMBOL +0x6d6316d1 get_mm_exe_file vmlinux EXPORT_SYMBOL +0x9e78f12d ip6_tnl_parse_tlv_enc_lim net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x1d0a5b6a sparse_keymap_report_entry drivers/input/sparse-keymap EXPORT_SYMBOL +0xe21f17aa drm_plane_create_zpos_immutable_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x28e48c3d tc_setup_cb_destroy vmlinux EXPORT_SYMBOL +0x261386ad nvmem_device_cell_write vmlinux EXPORT_SYMBOL_GPL +0xdc566a08 rtc_update_irq_enable vmlinux EXPORT_SYMBOL_GPL +0xf401d94e mpt_config vmlinux EXPORT_SYMBOL +0xa1bcd198 fc_cpu_mask vmlinux EXPORT_SYMBOL +0xf17b5f0f _dev_err vmlinux EXPORT_SYMBOL +0x82fea446 dma_request_slave_channel vmlinux EXPORT_SYMBOL_GPL +0x7dbbc249 acpi_dev_resource_io vmlinux EXPORT_SYMBOL_GPL +0x0805f2c8 ecryptfs_get_auth_tok_key vmlinux EXPORT_SYMBOL +0x999e8297 vfree vmlinux EXPORT_SYMBOL +0x3b69881d wait_on_page_bit vmlinux EXPORT_SYMBOL +0x629ed508 hrtimer_sleeper_start_expires vmlinux EXPORT_SYMBOL_GPL +0x2ab7989d mutex_lock vmlinux EXPORT_SYMBOL +0x4e155af0 ib_response_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x02036d1b pnfs_generic_pg_readpages fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x3ad77155 fscache_object_retrying_stale fs/fscache/fscache EXPORT_SYMBOL +0x120b336a __rb_insert_augmented vmlinux EXPORT_SYMBOL +0x6aff106f scsi_get_host_dev vmlinux EXPORT_SYMBOL +0x0c94ef45 ata_acpi_cbl_80wire vmlinux EXPORT_SYMBOL_GPL +0x20ba4f3e rdmsr_on_cpu vmlinux EXPORT_SYMBOL +0x5ab18e9f simple_symlink_inode_operations vmlinux EXPORT_SYMBOL +0x678b96ec dma_pool_alloc vmlinux EXPORT_SYMBOL +0x34c9564e mlx4_mr_hw_change_pd drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x5cdb1f36 ip6_route_lookup vmlinux EXPORT_SYMBOL_GPL +0x5c001d47 tcp_v4_do_rcv vmlinux EXPORT_SYMBOL +0x893abbdd devlink_fmsg_u32_pair_put vmlinux EXPORT_SYMBOL_GPL +0xf23adb04 ehci_handshake vmlinux EXPORT_SYMBOL_GPL +0x379ef608 ahci_platform_enable_regulators vmlinux EXPORT_SYMBOL_GPL +0xd2a43dc7 get_device vmlinux EXPORT_SYMBOL_GPL +0xb3011b4c hvc_alloc vmlinux EXPORT_SYMBOL_GPL +0xf58237ae virtio_config_changed vmlinux EXPORT_SYMBOL_GPL +0x6bdef35c acpi_ec_mark_gpe_for_wake vmlinux EXPORT_SYMBOL_GPL +0x2d4c773a hdmi_spd_infoframe_init vmlinux EXPORT_SYMBOL +0xf065f629 ioread16be vmlinux EXPORT_SYMBOL +0x8e5dc8ed iov_iter_gap_alignment vmlinux EXPORT_SYMBOL +0xc40dfc8d blk_dump_rq_flags vmlinux EXPORT_SYMBOL +0xb967c2e8 exportfs_decode_fh vmlinux EXPORT_SYMBOL_GPL +0x4447567f sysfs_create_group vmlinux EXPORT_SYMBOL_GPL +0x608fd991 ceph_msg_data_add_pages net/ceph/libceph EXPORT_SYMBOL +0x38d3dce5 g_make_token_header net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL_GPL +0x78faf84a xprt_release_rqst_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf3da5a5f target_submit_cmd_map_sgls drivers/target/target_core_mod EXPORT_SYMBOL +0xd8dbfda9 mlx4_get_eqs_per_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x21011160 drm_atomic_state_alloc drivers/gpu/drm/drm EXPORT_SYMBOL +0xb36cf375 nfs_sync_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xac3c0e1f secure_tcpv6_ts_off vmlinux EXPORT_SYMBOL +0x13481f82 consume_skb vmlinux EXPORT_SYMBOL +0x95262121 hid_lookup_quirk vmlinux EXPORT_SYMBOL_GPL +0xc3b93e48 scsi_add_device vmlinux EXPORT_SYMBOL +0xc0f27895 __pm_stay_awake vmlinux EXPORT_SYMBOL_GPL +0xc82bbe6a pci_match_id vmlinux EXPORT_SYMBOL +0x5fb64f07 pcie_set_mps vmlinux EXPORT_SYMBOL +0xa84fa2c4 pci_dev_run_wake vmlinux EXPORT_SYMBOL_GPL +0x8cdcff51 generic_fadvise vmlinux EXPORT_SYMBOL +0x6bd26752 rpc_alloc_iostats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa9ffc821 nft_trace_enabled net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xe95495bb mdev_unregister_device drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x305a30e3 i2c_add_numbered_adapter drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x05136cbc input_allocate_polled_device drivers/input/input-polldev EXPORT_SYMBOL +0x67db3b27 mlxsw_core_port_devlink_port_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xbfb60915 cxgbi_ep_connect drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xb99f640b cxgbi_sock_fail_act_open drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xb4fb033d drm_gem_fb_prepare_fb drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL_GPL +0x4ef4b1f9 amd_iommu_unbind_pasid drivers/iommu/amd_iommu_v2 EXPORT_SYMBOL +0xd452ee36 __fib6_flush_trees vmlinux EXPORT_SYMBOL +0xb21f0a15 tcf_action_dump_1 vmlinux EXPORT_SYMBOL +0xfdff3c32 flow_rule_match_enc_ipv6_addrs vmlinux EXPORT_SYMBOL +0xfcbc95cd flow_rule_match_enc_ipv4_addrs vmlinux EXPORT_SYMBOL +0x0fe06fdf usb_unanchor_urb vmlinux EXPORT_SYMBOL_GPL +0xe8408b34 scsi_check_sense vmlinux EXPORT_SYMBOL_GPL +0xd84d35bd dax_read_lock vmlinux EXPORT_SYMBOL_GPL +0x704aacf5 dma_get_slave_channel vmlinux EXPORT_SYMBOL_GPL +0x942d6b3d pcie_aspm_enabled vmlinux EXPORT_SYMBOL_GPL +0xf6b4ac31 phy_power_off vmlinux EXPORT_SYMBOL_GPL +0x92245c1c nf_ct_tmpl_alloc net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa432baf6 ib_unpack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5baf4d9b vfio_iommu_group_put drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x39808b3e vfio_iommu_group_get drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x36b84cda dm_array_del drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb6d5c65d dm_deferred_set_add_work drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x95d47eb2 __tracepoint_mlx5_fs_set_fte drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x9cc846ef fuse_direct_io fs/fuse/fuse EXPORT_SYMBOL_GPL +0x3219cfe0 mr_mfc_seq_next vmlinux EXPORT_SYMBOL +0x36db7502 tcp_prot vmlinux EXPORT_SYMBOL +0x65553d35 root_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x3ef9c646 mipi_dsi_turn_on_peripheral vmlinux EXPORT_SYMBOL +0x8b4d00c3 vring_transport_features vmlinux EXPORT_SYMBOL_GPL +0xcf76af35 textsearch_prepare vmlinux EXPORT_SYMBOL +0x9a6b71b3 migrate_page_copy vmlinux EXPORT_SYMBOL +0x617c452b queued_read_lock_slowpath vmlinux EXPORT_SYMBOL +0x84c1c552 proc_dointvec_ms_jiffies vmlinux EXPORT_SYMBOL +0xfe1ab7c3 ceph_con_keepalive net/ceph/libceph EXPORT_SYMBOL +0xbd066d06 drm_flip_work_cleanup drivers/gpu/drm/drm EXPORT_SYMBOL +0x44d07a68 drm_get_edid_switcheroo drivers/gpu/drm/drm EXPORT_SYMBOL +0xe79b91d1 cryptd_free_skcipher crypto/cryptd EXPORT_SYMBOL_GPL +0x8f303d40 inet_frag_kill vmlinux EXPORT_SYMBOL +0x658ac2e2 sas_port_alloc vmlinux EXPORT_SYMBOL +0xab138843 pci_iounmap vmlinux EXPORT_SYMBOL +0x5b8e74a5 async_memcpy vmlinux EXPORT_SYMBOL_GPL +0xcd5aa251 simple_setattr vmlinux EXPORT_SYMBOL +0x51fa9069 simple_getattr vmlinux EXPORT_SYMBOL +0xa897e3e7 mempool_free vmlinux EXPORT_SYMBOL +0xe35d8426 usb_stor_Bulk_reset drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x88a5abe4 mlx5_core_destroy_tir drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4faddeb9 tcf_em_tree_validate vmlinux EXPORT_SYMBOL +0x840b54b3 llc_sap_open vmlinux EXPORT_SYMBOL +0x5703d43a neigh_carrier_down vmlinux EXPORT_SYMBOL +0x3ede52df srp_rport_put vmlinux EXPORT_SYMBOL +0x5260f597 attribute_container_unregister vmlinux EXPORT_SYMBOL_GPL +0x8b5e871d driver_for_each_device vmlinux EXPORT_SYMBOL_GPL +0xfcd1819a hdmi_spd_infoframe_check vmlinux EXPORT_SYMBOL +0x3f4bd846 gen_pool_first_fit_order_align vmlinux EXPORT_SYMBOL +0xef92ef33 btree_last vmlinux EXPORT_SYMBOL_GPL +0xc76056fc bdget_disk vmlinux EXPORT_SYMBOL +0x6788c017 configfs_register_subsystem vmlinux EXPORT_SYMBOL +0x97215cd7 __compat_only_sysfs_link_entry_to_kobj vmlinux EXPORT_SYMBOL_GPL +0xf1b31314 delayacct_on vmlinux EXPORT_SYMBOL_GPL +0x6a9151e7 rdma_get_service_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x9ea55b3d drm_bridge_mode_set drivers/gpu/drm/drm EXPORT_SYMBOL +0x532afe10 drm_atomic_helper_connector_destroy_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x77e35ccc nlmsvc_unlock_all_by_ip fs/lockd/lockd EXPORT_SYMBOL_GPL +0x51556d10 nlmsvc_unlock_all_by_sb fs/lockd/lockd EXPORT_SYMBOL_GPL +0xbcab6ee6 sscanf vmlinux EXPORT_SYMBOL +0x3bdb2344 fib6_rule_default vmlinux EXPORT_SYMBOL_GPL +0xcda25655 fib4_rule_default vmlinux EXPORT_SYMBOL_GPL +0x38b33e1d tcp_rcv_state_process vmlinux EXPORT_SYMBOL +0xda59a5e7 skb_to_sgvec vmlinux EXPORT_SYMBOL_GPL +0x8ca51929 agp_alloc_bridge vmlinux EXPORT_SYMBOL +0xb3863a67 acpi_set_gpe_wake_mask vmlinux EXPORT_SYMBOL +0xa0447d9f __breadahead vmlinux EXPORT_SYMBOL +0x628465f7 perf_event_sysfs_show vmlinux EXPORT_SYMBOL_GPL +0xd63be8ba rpc_bind_new_program net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x26b2ff5e ip6_tnl_encap_add_ops net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x767aba72 ibdev_alert drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x12eec706 drm_gem_dmabuf_release drivers/gpu/drm/drm EXPORT_SYMBOL +0xd87ae60d xt_check_entry_offsets vmlinux EXPORT_SYMBOL +0xb646fed0 pfifo_fast_ops vmlinux EXPORT_SYMBOL +0xfc3bba0f unregister_fib_notifier vmlinux EXPORT_SYMBOL +0xd691f07e iscsi_boot_destroy_kset vmlinux EXPORT_SYMBOL_GPL +0x1c3884b4 iscsi_boot_create_initiator vmlinux EXPORT_SYMBOL_GPL +0xc4086ac0 iscsi_destroy_iface vmlinux EXPORT_SYMBOL_GPL +0x83bdf930 nvdimm_setup_pfn vmlinux EXPORT_SYMBOL_GPL +0x550733f2 platform_device_register_full vmlinux EXPORT_SYMBOL_GPL +0x500c768c apei_exec_read_register vmlinux EXPORT_SYMBOL_GPL +0x48193639 acpi_lid_open vmlinux EXPORT_SYMBOL +0x0bfc1d1a check_zeroed_user vmlinux EXPORT_SYMBOL +0xc865d327 blk_mq_queue_stopped vmlinux EXPORT_SYMBOL +0x5a988794 vmf_insert_pfn_pud vmlinux EXPORT_SYMBOL_GPL +0xeb1d39ca vmf_insert_pfn_pmd vmlinux EXPORT_SYMBOL_GPL +0x08bc0870 compat_put_timespec vmlinux EXPORT_SYMBOL_GPL +0xfa901b31 compat_get_timespec vmlinux EXPORT_SYMBOL_GPL +0x6de13801 wait_for_completion vmlinux EXPORT_SYMBOL +0xe3d6084a __cpuhp_setup_state vmlinux EXPORT_SYMBOL +0x74f6c65d csum_partial_copy_to_xdr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbb80a269 i2c_smbus_write_byte_data drivers/i2c/i2c-core EXPORT_SYMBOL +0xa2e4bf0c drm_mm_reserve_node drivers/gpu/drm/drm EXPORT_SYMBOL +0x3e50b109 drm_gem_fence_array_add drivers/gpu/drm/drm EXPORT_SYMBOL +0x108a0acd bstr_printf vmlinux EXPORT_SYMBOL_GPL +0x1d24c881 ___ratelimit vmlinux EXPORT_SYMBOL +0x3520b75e dev_get_by_name_rcu vmlinux EXPORT_SYMBOL +0x5271dfd6 usb_unlink_urb vmlinux EXPORT_SYMBOL_GPL +0x8b1f38ee agp_add_bridge vmlinux EXPORT_SYMBOL_GPL +0x9106758d virtio_config_disable vmlinux EXPORT_SYMBOL_GPL +0x79f697e4 lzorle1x_1_compress vmlinux EXPORT_SYMBOL_GPL +0x905ce19f iomap_fiemap vmlinux EXPORT_SYMBOL_GPL +0x82c87ad5 nr_online_nodes vmlinux EXPORT_SYMBOL +0xa8fb743d des_expand_key lib/crypto/libdes EXPORT_SYMBOL_GPL +0x074560e7 __nf_nat_mangle_tcp_packet net/netfilter/nf_nat EXPORT_SYMBOL +0x32f259d5 iscsit_stop_dataout_timer drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x1cd1e22c drm_modeset_lock_all_ctx drivers/gpu/drm/drm EXPORT_SYMBOL +0x6a9bc881 amd_iommu_free_device drivers/iommu/amd_iommu_v2 EXPORT_SYMBOL +0xac49eff5 sock_diag_destroy vmlinux EXPORT_SYMBOL_GPL +0x6b8bf149 netif_receive_skb_list vmlinux EXPORT_SYMBOL +0x6165f52e devfreq_get_devfreq_by_phandle vmlinux EXPORT_SYMBOL_GPL +0x6c5dae23 scsi_kmap_atomic_sg vmlinux EXPORT_SYMBOL +0xd1b3a578 to_nd_region vmlinux EXPORT_SYMBOL_GPL +0x9f40c38d platform_device_add_data vmlinux EXPORT_SYMBOL_GPL +0x38f1e560 __vring_new_virtqueue vmlinux EXPORT_SYMBOL_GPL +0xde750b3c fscrypt_ioctl_get_policy vmlinux EXPORT_SYMBOL +0xe6116e3d fscrypt_ioctl_set_policy vmlinux EXPORT_SYMBOL +0x7bcfd6c1 register_kprobes vmlinux EXPORT_SYMBOL_GPL +0xd3e97c21 hugetlb_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x3a26ed11 sched_clock vmlinux EXPORT_SYMBOL_GPL +0x763b779c put_rpccred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x848d79ad rpcauth_init_cred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xae9fc1e8 drm_gem_create_mmap_offset drivers/gpu/drm/drm EXPORT_SYMBOL +0xe9d10637 drm_helper_force_disable_all drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x27e199a1 xfrm_audit_state_replay_overflow vmlinux EXPORT_SYMBOL_GPL +0x445ebfde sock_create_lite vmlinux EXPORT_SYMBOL +0x4b5044b2 ata_std_qc_defer vmlinux EXPORT_SYMBOL_GPL +0xda872864 security_locked_down vmlinux EXPORT_SYMBOL +0xfbb4b816 jbd2_journal_free_reserved vmlinux EXPORT_SYMBOL +0x466e5342 net_prio_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xb5f27b87 irq_domain_free_irqs_parent vmlinux EXPORT_SYMBOL_GPL +0x58e3306d bit_wait_io vmlinux EXPORT_SYMBOL +0x27a3c2f7 cdrom_release drivers/cdrom/cdrom EXPORT_SYMBOL +0x03d10cb3 mlx4_register_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7b512f71 cpufreq_unregister_governor vmlinux EXPORT_SYMBOL_GPL +0x69e813d9 fcoe_clean_pending_queue vmlinux EXPORT_SYMBOL_GPL +0x651f2244 _fc_frame_alloc vmlinux EXPORT_SYMBOL +0x72f25c7e pm_clk_destroy vmlinux EXPORT_SYMBOL_GPL +0xf478b769 pci_iomap_wc_range vmlinux EXPORT_SYMBOL_GPL +0x8ddaeb09 iomap_readpages vmlinux EXPORT_SYMBOL_GPL +0x8e836103 d_obtain_alias vmlinux EXPORT_SYMBOL +0x1f0924e0 mmu_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0xa0c9dcda irq_domain_set_info vmlinux EXPORT_SYMBOL +0xc287d96a kvm_set_posted_intr_wakeup_handler vmlinux EXPORT_SYMBOL_GPL +0xb34599f6 nf_conntrack_helper_try_module_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x945125f4 cxgb4_l2t_alloc_switching drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x38800636 serpent_cbc_dec_8way_avx arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0x2edc297f ip6_datagram_connect vmlinux EXPORT_SYMBOL_GPL +0x437eb1df ipv6_mod_enabled vmlinux EXPORT_SYMBOL_GPL +0x2ac9544c ip4_datagram_connect vmlinux EXPORT_SYMBOL +0xb3822b19 tcp_timewait_state_process vmlinux EXPORT_SYMBOL +0x7d081372 __tcp_send_ack vmlinux EXPORT_SYMBOL_GPL +0x90856f5d devlink_resources_unregister vmlinux EXPORT_SYMBOL_GPL +0xc9a53685 ata_pci_sff_init_one vmlinux EXPORT_SYMBOL_GPL +0xe7655c01 pnp_start_dev vmlinux EXPORT_SYMBOL +0xecc12c67 security_inode_init_security vmlinux EXPORT_SYMBOL +0x6a460dc5 schedule_hrtimeout vmlinux EXPORT_SYMBOL_GPL +0x8e3d911b arch_phys_wc_index vmlinux EXPORT_SYMBOL_GPL +0x6dc35b25 radix_tree_iter_delete vmlinux EXPORT_SYMBOL +0x2e8c4c6c flow_rule_match_enc_opts vmlinux EXPORT_SYMBOL +0x5c6d6351 starget_for_each_device vmlinux EXPORT_SYMBOL +0x858bd767 subsys_interface_register vmlinux EXPORT_SYMBOL_GPL +0xf3916987 global_cursor_default vmlinux EXPORT_SYMBOL +0xb6e6d99d clk_disable vmlinux EXPORT_SYMBOL_GPL +0xbda47780 nobh_writepage vmlinux EXPORT_SYMBOL +0xfedcdb60 seq_hlist_next_percpu vmlinux EXPORT_SYMBOL +0xa4a41591 clear_page_dirty_for_io vmlinux EXPORT_SYMBOL +0x9a1fc4b4 jiffies_to_timeval vmlinux EXPORT_SYMBOL +0x4127ca34 srcu_init_notifier_head vmlinux EXPORT_SYMBOL_GPL +0x88362718 virtio_transport_shutdown net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x75fde9da xdr_commit_encode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x63e9a077 adf_init_admin_comms drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x3fa9d639 mlxsw_core_schedule_dw drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xecafe690 mlx5_query_nic_vport_qkey_viol_cntr drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xfca6561f drm_syncobj_add_point drivers/gpu/drm/drm EXPORT_SYMBOL +0x75c22987 locks_start_grace fs/nfs_common/grace EXPORT_SYMBOL_GPL +0x2b4509dd devlink_health_reporter_state_update vmlinux EXPORT_SYMBOL_GPL +0x984e74fc ahci_platform_resume vmlinux EXPORT_SYMBOL_GPL +0x6058d47a devm_free_pages vmlinux EXPORT_SYMBOL_GPL +0xafd5ff2c amd_iommu_v2_supported vmlinux EXPORT_SYMBOL +0xb2ff3b36 tty_schedule_flip vmlinux EXPORT_SYMBOL +0xb77573fa dump_skip vmlinux EXPORT_SYMBOL +0x22a52ab9 tracepoint_probe_unregister vmlinux EXPORT_SYMBOL_GPL +0x1f044d01 svc_create_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9528010e ib_port_unregister_module_stat drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xffecc1f6 closure_sub drivers/md/bcache/bcache EXPORT_SYMBOL +0xd8d7ef55 pnfs_write_done_resend_to_mds fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x11a0c3f2 nfs_server_copy_userdata fs/nfs/nfs EXPORT_SYMBOL_GPL +0x8e21c9a1 dma_fence_add_callback vmlinux EXPORT_SYMBOL +0x59abb7ee fwnode_property_read_string_array vmlinux EXPORT_SYMBOL_GPL +0x4747f6c5 tty_devnum vmlinux EXPORT_SYMBOL +0xf0eda282 pcie_flr vmlinux EXPORT_SYMBOL_GPL +0xc3c4c6cc hash_algo_name vmlinux EXPORT_SYMBOL_GPL +0x0435cdb6 __vfs_removexattr vmlinux EXPORT_SYMBOL +0x396e2fd7 ms_hyperv vmlinux EXPORT_SYMBOL_GPL +0xccea4e34 perf_get_x86_pmu_capability vmlinux EXPORT_SYMBOL_GPL +0x98e48110 ib_post_send_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xdd212078 mlx4_get_slave_port_state drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xa03d8c0f cxgb_find_route6 drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0xe2eacedf cxgbi_ppm_init drivers/net/ethernet/chelsio/libcxgb/libcxgb EXPORT_SYMBOL +0xf65a0590 page_pool_destroy vmlinux EXPORT_SYMBOL +0x077e78af genphy_c45_config_aneg vmlinux EXPORT_SYMBOL_GPL +0xdca8d656 dax_driver_unregister vmlinux EXPORT_SYMBOL_GPL +0xe9aa1db2 iommu_group_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0x32e6f1a0 acpi_video_backlight_string vmlinux EXPORT_SYMBOL +0x76eeeb0f sha384_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x946dd559 sha224_zero_message_hash vmlinux EXPORT_SYMBOL_GPL +0x8ef0e45e write_dirty_buffer vmlinux EXPORT_SYMBOL +0x7306b258 noop_llseek vmlinux EXPORT_SYMBOL +0xc65d3eed ring_buffer_entries_cpu vmlinux EXPORT_SYMBOL_GPL +0xa97463c9 __siphash_aligned vmlinux EXPORT_SYMBOL +0x9b7bd959 __udp_enqueue_schedule_skb vmlinux EXPORT_SYMBOL_GPL +0x33d6feb4 llc_sap_find vmlinux EXPORT_SYMBOL +0x50e58dd7 cpufreq_driver_fast_switch vmlinux EXPORT_SYMBOL_GPL +0x859a08da input_free_device vmlinux EXPORT_SYMBOL +0x7f433383 phy_remove_link_mode vmlinux EXPORT_SYMBOL +0xfec5c7a7 ata_host_init vmlinux EXPORT_SYMBOL_GPL +0x21ac8b77 iommu_group_get_by_id vmlinux EXPORT_SYMBOL_GPL +0x574609c5 apei_exec_write_register_value vmlinux EXPORT_SYMBOL_GPL +0x969a659b skcipher_alloc_instance_simple vmlinux EXPORT_SYMBOL_GPL +0x4129f5ee kernel_fpu_begin_mask vmlinux EXPORT_SYMBOL_GPL +0x850da3e7 ceph_reset_client_addr net/ceph/libceph EXPORT_SYMBOL +0x95fb54ac adf_cleanup_etr_data drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x34032550 i2c_verify_adapter drivers/i2c/i2c-core EXPORT_SYMBOL +0x4127c92f pppox_unbind_sock drivers/net/ppp/pppox EXPORT_SYMBOL +0x50dec871 mlx5_query_nic_vport_mac_list drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x25ba8b14 drm_atomic_helper_setup_commit drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xe5965dc7 kvm_vcpu_write_guest_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa966f784 ip6_route_output_flags vmlinux EXPORT_SYMBOL_GPL +0x3e9110fa __hw_addr_unsync vmlinux EXPORT_SYMBOL +0x37c59204 usb_hid_driver vmlinux EXPORT_SYMBOL_GPL +0x7deff673 dm_consume_args vmlinux EXPORT_SYMBOL +0x919c58f3 __clzsi2 vmlinux EXPORT_SYMBOL +0xc4777aa9 __ctzsi2 vmlinux EXPORT_SYMBOL +0xa4350fd3 async_trigger_callback vmlinux EXPORT_SYMBOL_GPL +0xedb5e912 security_kernel_read_file vmlinux EXPORT_SYMBOL_GPL +0x15ba50a6 jiffies vmlinux EXPORT_SYMBOL +0x867ba324 cxgb4_l2t_get drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xa619313c drm_dp_mst_allocate_vcpi drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x76f40744 ocfs2_dlm_lvb fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xf999267e pnfs_generic_commit_release fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xc1ff8bc0 nfs_get_lock_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x16d8c891 tcp_v6_syn_recv_sock vmlinux EXPORT_SYMBOL +0xe2e10fe9 dev_queue_xmit vmlinux EXPORT_SYMBOL +0xec7e6706 devm_device_remove_group vmlinux EXPORT_SYMBOL_GPL +0x13d0adf7 __kfifo_out vmlinux EXPORT_SYMBOL +0xe06141e9 security_sk_clone vmlinux EXPORT_SYMBOL +0xf900a9a3 proc_mkdir vmlinux EXPORT_SYMBOL +0x68fa4299 bd_finish_claiming vmlinux EXPORT_SYMBOL +0x734cafc4 dummy_irq_chip vmlinux EXPORT_SYMBOL_GPL +0xeb86a470 sched_smt_present vmlinux EXPORT_SYMBOL_GPL +0x4eac5fc1 cpu_mitigations_auto_nosmt vmlinux EXPORT_SYMBOL_GPL +0x15974b81 rdma_iw_cm_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x65e6f315 mlx5_fpga_mem_read drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x42a8d9d3 mlx5_query_hca_vport_context drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x4b793042 mlx4_flow_steer_promisc_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xd4c9681a __serpent_setkey crypto/serpent_generic EXPORT_SYMBOL_GPL +0xfe729ed6 __camellia_enc_blk arch/x86/crypto/camellia-x86_64 EXPORT_SYMBOL_GPL +0x90688bcd devlink_info_driver_name_put vmlinux EXPORT_SYMBOL_GPL +0x44a8a78d usb_hcd_start_port_resume vmlinux EXPORT_SYMBOL_GPL +0x7e92994b fcoe_wwn_from_mac vmlinux EXPORT_SYMBOL_GPL +0x060cfdfe phy_destroy vmlinux EXPORT_SYMBOL_GPL +0x4246eade blk_integrity_merge_bio vmlinux EXPORT_SYMBOL +0x0d4e0a78 blk_rq_map_sg vmlinux EXPORT_SYMBOL +0x01e2349d get_cached_acl_rcu vmlinux EXPORT_SYMBOL +0xd8985923 bd_start_claiming vmlinux EXPORT_SYMBOL +0xb0707e09 get_mem_cgroup_from_mm vmlinux EXPORT_SYMBOL +0x54955855 alarm_start_relative vmlinux EXPORT_SYMBOL_GPL +0xdf6b082f proc_dointvec_jiffies vmlinux EXPORT_SYMBOL +0x5205f7ed __tracepoint_pnfs_mds_fallback_write_done fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xfd1c20b4 tcp_peek_len vmlinux EXPORT_SYMBOL +0xe37483af devlink_traps_register vmlinux EXPORT_SYMBOL_GPL +0x2754f1d1 dev_change_carrier vmlinux EXPORT_SYMBOL +0xaeabefcc __napi_schedule vmlinux EXPORT_SYMBOL +0x6e533756 sk_stream_wait_memory vmlinux EXPORT_SYMBOL +0xed591f0e hid_bus_type vmlinux EXPORT_SYMBOL +0xb0de7518 dev_pm_opp_put_supported_hw vmlinux EXPORT_SYMBOL_GPL +0xa2a39d1c usb_set_interface vmlinux EXPORT_SYMBOL_GPL +0xc2a17ebe seqno_fence_ops vmlinux EXPORT_SYMBOL +0x8f820d4a dma_resv_wait_timeout_rcu vmlinux EXPORT_SYMBOL_GPL +0x61d82d1c tty_unlock vmlinux EXPORT_SYMBOL +0xb23027c1 kstrtos16_from_user vmlinux EXPORT_SYMBOL +0xa084749a __bitmap_or vmlinux EXPORT_SYMBOL +0x3df90aae security_req_classify_flow vmlinux EXPORT_SYMBOL +0x0dfaa611 get_dcookie vmlinux EXPORT_SYMBOL_GPL +0xd00c2687 trace_event_raw_init vmlinux EXPORT_SYMBOL_GPL +0xeff6d438 relay_late_setup_files vmlinux EXPORT_SYMBOL_GPL +0x8f0492cb nf_ct_kill_acct net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x14878009 amd_report_gart_errors drivers/edac/edac_mce_amd EXPORT_SYMBOL_GPL +0x72318ee5 __fscache_alloc_page fs/fscache/fscache EXPORT_SYMBOL +0xc29bf967 strspn vmlinux EXPORT_SYMBOL +0x754d539c strlen vmlinux EXPORT_SYMBOL +0x5f83c98d netlink_broadcast_filtered vmlinux EXPORT_SYMBOL +0x4761f17c register_netevent_notifier vmlinux EXPORT_SYMBOL_GPL +0xda835178 sata_lpm_ignore_phy_events vmlinux EXPORT_SYMBOL_GPL +0x848d372e iowrite8 vmlinux EXPORT_SYMBOL +0xc16410b9 ZSTD_getDictID_fromDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x3f1baa97 iscsit_handle_task_mgt_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x021c5c26 drm_vma_offset_add drivers/gpu/drm/drm EXPORT_SYMBOL +0xedbaee5e nla_strcmp vmlinux EXPORT_SYMBOL +0xc0ff12fb nla_strdup vmlinux EXPORT_SYMBOL +0x2464da17 gen_pool_size vmlinux EXPORT_SYMBOL_GPL +0xfeebc7c4 __kfifo_from_user_r vmlinux EXPORT_SYMBOL +0xcc445ceb __sg_page_iter_dma_next vmlinux EXPORT_SYMBOL +0x278679e3 crypto_ahash_type vmlinux EXPORT_SYMBOL_GPL +0x99daa9bf try_offline_node vmlinux EXPORT_SYMBOL +0x01d5ee2a svc_return_autherr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x30b83371 mlx5_query_nic_vport_system_image_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x4eed8a8b mlx5_core_detach_mcg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x1042637b drm_simple_display_pipe_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3b4babdb gfn_to_pfn_memslot arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x3a3026d4 gfn_to_hva_memslot arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xba85525b tcp_get_cookie_sock vmlinux EXPORT_SYMBOL +0x7f111873 fib_add_nexthop vmlinux EXPORT_SYMBOL_GPL +0x21e4eca7 __skb_flow_dissect vmlinux EXPORT_SYMBOL +0x375e66ac skb_trim vmlinux EXPORT_SYMBOL +0x5a22f0d2 usb_enable_autosuspend vmlinux EXPORT_SYMBOL_GPL +0x3192ae9e fc_seq_assign vmlinux EXPORT_SYMBOL +0x54175c5f acpi_read_bit_register vmlinux EXPORT_SYMBOL +0xf3a4176e blk_mq_unfreeze_queue vmlinux EXPORT_SYMBOL_GPL +0x015ddf14 perf_tp_event vmlinux EXPORT_SYMBOL_GPL +0x445a81ce boot_cpu_data vmlinux EXPORT_SYMBOL +0x3e4d0db6 svc_proc_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x56860d2d i2c_get_device_id drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x4e20bcf8 radix_tree_tag_set vmlinux EXPORT_SYMBOL +0x2bb1f1fc xfrm_policy_delete vmlinux EXPORT_SYMBOL +0x30eda7a1 inet_frags_init vmlinux EXPORT_SYMBOL +0x44d19b9f power_supply_changed vmlinux EXPORT_SYMBOL_GPL +0x791e00a8 ata_qc_get_active vmlinux EXPORT_SYMBOL_GPL +0xb501b2df nd_cmd_dimm_desc vmlinux EXPORT_SYMBOL_GPL +0x382b798a pci_scan_root_bus_bridge vmlinux EXPORT_SYMBOL +0x4e3567f7 match_int vmlinux EXPORT_SYMBOL +0x7b13d890 blk_queue_required_elevator_features vmlinux EXPORT_SYMBOL_GPL +0x4d7e5e29 shash_free_instance vmlinux EXPORT_SYMBOL_GPL +0x19fa79dd debugfs_create_x64 vmlinux EXPORT_SYMBOL_GPL +0xb274012d fscrypt_ioctl_remove_key vmlinux EXPORT_SYMBOL_GPL +0x73924a5d rdma_put_gid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5475ba9e dm_block_location drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x41705ff0 tcf_idrinfo_destroy vmlinux EXPORT_SYMBOL +0xd59009cf skb_cow_data vmlinux EXPORT_SYMBOL_GPL +0x28f5c429 scsi_internal_device_block_nowait vmlinux EXPORT_SYMBOL_GPL +0x0f6b8552 pci_fixup_cardbus vmlinux EXPORT_SYMBOL +0xa62bfc18 encrypt_blob vmlinux EXPORT_SYMBOL_GPL +0x6f2fac7e posix_acl_update_mode vmlinux EXPORT_SYMBOL +0xcade6d41 __tracepoint_pelt_cfs_tp vmlinux EXPORT_SYMBOL_GPL +0x04d8c750 release_perfctr_nmi vmlinux EXPORT_SYMBOL +0x1a513fa4 nf_conncount_count net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x3fcd7191 rdma_connect drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x8875d8c0 i2c_get_adapter drivers/i2c/i2c-core EXPORT_SYMBOL +0xd13f9985 drm_edid_block_valid drivers/gpu/drm/drm EXPORT_SYMBOL +0xb86f74c5 free_cpumask_var vmlinux EXPORT_SYMBOL +0x868acba5 get_options vmlinux EXPORT_SYMBOL +0x89ef0318 ip6_datagram_release_cb vmlinux EXPORT_SYMBOL_GPL +0xdcd11f0a ip4_datagram_release_cb vmlinux EXPORT_SYMBOL_GPL +0xd4bddf5c dm_bufio_issue_flush vmlinux EXPORT_SYMBOL_GPL +0x5e71d44b timespec64_trunc vmlinux EXPORT_SYMBOL +0xd697f2ea vfs_iter_write vmlinux EXPORT_SYMBOL +0xf5e7ea40 ktime_get_coarse_ts64 vmlinux EXPORT_SYMBOL +0x0dc25523 setup_irq vmlinux EXPORT_SYMBOL_GPL +0xece4ef02 udp_sock_create6 net/ipv6/ip6_udp_tunnel EXPORT_SYMBOL_GPL +0xc5d65e4f nft_unregister_expr net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xfd9c411c drm_fb_helper_modinit drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x328b0b9c drm_fb_helper_sys_copyarea drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5d1cd2bd fib_new_table vmlinux EXPORT_SYMBOL_GPL +0x5866d922 xdp_do_redirect vmlinux EXPORT_SYMBOL_GPL +0x5086670d input_unregister_handler vmlinux EXPORT_SYMBOL +0xee714cd0 agp_generic_alloc_by_type vmlinux EXPORT_SYMBOL +0x0059d726 acpi_get_pci_dev vmlinux EXPORT_SYMBOL_GPL +0x3955fcf6 __kfifo_in_r vmlinux EXPORT_SYMBOL +0x826429e5 iomap_dio_iopoll vmlinux EXPORT_SYMBOL_GPL +0x6531cb53 bh_uptodate_or_lock vmlinux EXPORT_SYMBOL +0xb594e377 mem_cgroup_from_task vmlinux EXPORT_SYMBOL +0xeecba48e spc_emulate_evpd_83 drivers/target/target_core_mod EXPORT_SYMBOL +0x8cc33ebe __drm_atomic_helper_crtc_destroy_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x9d70541a native_save_fl vmlinux EXPORT_SYMBOL +0x77f51c64 xfrm_state_walk_init vmlinux EXPORT_SYMBOL +0xc48165a2 udp4_hwcsum vmlinux EXPORT_SYMBOL_GPL +0x4f81b817 __tracepoint_br_fdb_add vmlinux EXPORT_SYMBOL_GPL +0x38af5c10 rc_register_device vmlinux EXPORT_SYMBOL_GPL +0xc3b06a11 rtc_read_alarm vmlinux EXPORT_SYMBOL_GPL +0x24ad9077 ufshcd_dme_set_attr vmlinux EXPORT_SYMBOL_GPL +0xad76f174 device_match_of_node vmlinux EXPORT_SYMBOL_GPL +0x48a91171 string_get_size vmlinux EXPORT_SYMBOL +0x57521ac6 security_path_mknod vmlinux EXPORT_SYMBOL +0x64ffe611 fat_fill_super vmlinux EXPORT_SYMBOL_GPL +0x93bd7778 dquot_initialize vmlinux EXPORT_SYMBOL +0x460b6a10 vmf_insert_mixed vmlinux EXPORT_SYMBOL +0xde57b5f5 nft_parse_u32_check net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xe13db3d7 nf_log_l2packet net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0x317b3b18 vhost_init_device_iotlb drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x8d6fd6d8 __sock_cmsg_send vmlinux EXPORT_SYMBOL +0x15eeeea0 pci_bus_write_config_word vmlinux EXPORT_SYMBOL +0x3a2f6702 sg_alloc_table vmlinux EXPORT_SYMBOL +0xe7373223 set_get_execve_info_func vmlinux EXPORT_SYMBOL +0xc2f487ea handle_simple_irq vmlinux EXPORT_SYMBOL_GPL +0x286cc647 async_synchronize_cookie_domain vmlinux EXPORT_SYMBOL_GPL +0xdafc4055 call_usermodehelper_exec vmlinux EXPORT_SYMBOL +0x9052a334 ceph_open_session net/ceph/libceph EXPORT_SYMBOL +0xc6751e5e transport_send_check_condition_and_sense drivers/target/target_core_mod EXPORT_SYMBOL +0xa92e3a7a drm_gem_prime_fd_to_handle drivers/gpu/drm/drm EXPORT_SYMBOL +0xb3ed865c drm_scdc_write drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x7ce54da4 kvm_mtrr_get_guest_memory_type arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x29272e87 br_vlan_get_pvid vmlinux EXPORT_SYMBOL_GPL +0xf37322d4 dev_pm_opp_put_regulators vmlinux EXPORT_SYMBOL_GPL +0x26a84c58 power_supply_batinfo_ocv2cap vmlinux EXPORT_SYMBOL_GPL +0xf533fa49 fc_set_mfs vmlinux EXPORT_SYMBOL +0xe59f5074 fc_disc_init vmlinux EXPORT_SYMBOL +0xdeaf646a pci_release_region vmlinux EXPORT_SYMBOL +0x4cf87ccf io_uring_get_socket vmlinux EXPORT_SYMBOL +0x141f38bf ktime_get_raw_fast_ns vmlinux EXPORT_SYMBOL_GPL +0xbcac6160 pm_qos_remove_notifier vmlinux EXPORT_SYMBOL_GPL +0x9ff92495 drm_release drivers/gpu/drm/drm EXPORT_SYMBOL +0xe4f4665b ipmi_validate_addr drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x74530ecd fscache_op_debug_id fs/fscache/fscache EXPORT_SYMBOL +0xa18ce46e inet_addr_type_table vmlinux EXPORT_SYMBOL +0x3a8caaed bpf_redirect_info vmlinux EXPORT_SYMBOL_GPL +0xd376cc15 power_supply_unregister vmlinux EXPORT_SYMBOL_GPL +0xeb6a5128 genphy_c45_read_status vmlinux EXPORT_SYMBOL_GPL +0xbdc16c48 fc_elsct_init vmlinux EXPORT_SYMBOL +0x4f347d53 ata_host_put vmlinux EXPORT_SYMBOL_GPL +0x8eee3399 dax_read_unlock vmlinux EXPORT_SYMBOL_GPL +0xc569d8ce __clk_get_name vmlinux EXPORT_SYMBOL_GPL +0x486075c8 gen_pool_dma_alloc vmlinux EXPORT_SYMBOL +0xff1e9dd8 seq_list_start vmlinux EXPORT_SYMBOL +0x10e709cd irq_find_matching_fwspec vmlinux EXPORT_SYMBOL_GPL +0xc52751ad kthread_queue_work vmlinux EXPORT_SYMBOL_GPL +0x5f7845f9 register_ip_vs_scheduler net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xc6b23eef nf_ct_port_nlattr_to_tuple net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x907df803 rdma_event_msg drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x3a2ee17b mlx5_del_flow_rules drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0049ca83 xfrm_aead_get_byname vmlinux EXPORT_SYMBOL_GPL +0xb426abd9 km_state_notify vmlinux EXPORT_SYMBOL +0x3b1323dc lwtunnel_encap_del_ops vmlinux EXPORT_SYMBOL_GPL +0x58d13ea7 cpuidle_enable_device vmlinux EXPORT_SYMBOL_GPL +0x952da871 dma_run_dependencies vmlinux EXPORT_SYMBOL_GPL +0xd32694be sbitmap_prepare_to_wait vmlinux EXPORT_SYMBOL_GPL +0x7cb803de btree_grim_visitor vmlinux EXPORT_SYMBOL_GPL +0xf99c7049 bpf_prog_add vmlinux EXPORT_SYMBOL_GPL +0x24709b2f trace_seq_putmem vmlinux EXPORT_SYMBOL_GPL +0x5a245f6d _raw_write_lock vmlinux EXPORT_SYMBOL +0x51bd55b5 completion_done vmlinux EXPORT_SYMBOL +0x42313219 kvm_read_and_reset_pf_reason vmlinux EXPORT_SYMBOL_GPL +0x1a0faaff mlx5_eq_enable drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xfe6981d9 mlx4_alloc_cmd_mailbox drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x10004bac drm_connector_cleanup drivers/gpu/drm/drm EXPORT_SYMBOL +0x1f6cd8f1 pnfs_set_layoutcommit fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x603a51aa nfs_setsecurity fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb94339c4 qdisc_put_stab vmlinux EXPORT_SYMBOL +0x36cec28c mptscsih_dev_reset vmlinux EXPORT_SYMBOL +0xfadcfc04 mpt_detach vmlinux EXPORT_SYMBOL +0xce4cdb8e fb_find_best_mode vmlinux EXPORT_SYMBOL +0xa68b198f seq_printf vmlinux EXPORT_SYMBOL +0x70c8d6db inet_diag_msg_common_fill net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x47fd6eee mlxsw_core_fw_flash_end drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x9b36ec1d drm_mode_validate_size drivers/gpu/drm/drm EXPORT_SYMBOL +0xd5cb9878 reprogram_gp_counter arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe0eb594d kvm_lapic_set_eoi arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf0fb49f9 kvm_set_apic_base arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8cc9c2a3 kvm_get_apic_base arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x97623558 xas_create_range vmlinux EXPORT_SYMBOL_GPL +0x0b742fd7 simple_strtol vmlinux EXPORT_SYMBOL +0xa80d7dae mptscsih_raid_id_to_num vmlinux EXPORT_SYMBOL +0x1d5e017d nd_device_register vmlinux EXPORT_SYMBOL +0xbcb28e42 devres_release vmlinux EXPORT_SYMBOL_GPL +0xa0be9b76 acpi_device_set_power vmlinux EXPORT_SYMBOL +0xf1deafc8 pci_dev_driver vmlinux EXPORT_SYMBOL +0x9adf244d setattr_prepare vmlinux EXPORT_SYMBOL +0x4b593264 ilookup5_nowait vmlinux EXPORT_SYMBOL +0x01848a8e local_apic_timer_c2_ok vmlinux EXPORT_SYMBOL_GPL +0x627484b0 gretap_fb_dev_create net/ipv4/ip_gre EXPORT_SYMBOL_GPL +0x8bcf148e ib_send_cm_lap drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x99dacfbb mlx5_query_hca_vport_gid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7b982195 drm_mm_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xae306e0f fuse_dev_operations fs/fuse/fuse EXPORT_SYMBOL_GPL +0x7c9722ba nfs_dreq_bytes_left fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa8e9e1ae send_implementation_id fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1e3152f4 __tracepoint_kvm_write_tsc_offset arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xded39a6b gen_kill_estimator vmlinux EXPORT_SYMBOL +0x1e4f713b devm_add_action vmlinux EXPORT_SYMBOL_GPL +0xb97f7045 acpi_install_gpe_handler vmlinux EXPORT_SYMBOL +0xa25f22e0 pci_set_power_state vmlinux EXPORT_SYMBOL +0xf05c32ad rdmsr_on_cpus vmlinux EXPORT_SYMBOL +0x4ea86fb2 blk_queue_alignment_offset vmlinux EXPORT_SYMBOL +0x1e48474d dquot_alloc vmlinux EXPORT_SYMBOL +0x7e86937d wait_on_page_writeback vmlinux EXPORT_SYMBOL_GPL +0xa5430547 unwind_get_return_address vmlinux EXPORT_SYMBOL_GPL +0xa2365f44 btracker_issue drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x40d33802 devlink_unregister vmlinux EXPORT_SYMBOL_GPL +0xbd43e10c fc_lport_logo_resp vmlinux EXPORT_SYMBOL +0xdd849d51 scsi_get_sense_info_fld vmlinux EXPORT_SYMBOL +0x7a6ef547 mipi_dsi_shutdown_peripheral vmlinux EXPORT_SYMBOL +0x4f55166f acpi_set_current_resources vmlinux EXPORT_SYMBOL +0x9e0b96fb pci_bus_size_bridges vmlinux EXPORT_SYMBOL +0x7d628444 memcpy_fromio vmlinux EXPORT_SYMBOL +0xb49a4b08 blk_mq_sched_free_hctx_data vmlinux EXPORT_SYMBOL_GPL +0x898b64d6 sysfs_create_groups vmlinux EXPORT_SYMBOL_GPL +0x3477439e vfs_setpos vmlinux EXPORT_SYMBOL +0xe5c4cf93 vm_node_stat vmlinux EXPORT_SYMBOL +0xa0fbac79 wake_up_bit vmlinux EXPORT_SYMBOL +0x30cf804f slow_virt_to_phys vmlinux EXPORT_SYMBOL_GPL +0xcd2c07e7 transport_alloc_session drivers/target/target_core_mod EXPORT_SYMBOL +0x551dfd56 mlx5_modify_nic_vport_mac_address drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb6661060 drm_mode_create_content_type_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x900a5f02 dev_pm_opp_init_cpufreq_table vmlinux EXPORT_SYMBOL_GPL +0x7665262e __hvc_resize vmlinux EXPORT_SYMBOL_GPL +0x120c25c4 tty_do_resize vmlinux EXPORT_SYMBOL +0x94d1a0f9 __acpi_nvdimm_notify vmlinux EXPORT_SYMBOL_GPL +0x6aeefac4 zlib_deflateReset vmlinux EXPORT_SYMBOL +0x774da6f5 fscrypt_put_encryption_info vmlinux EXPORT_SYMBOL +0x24190a30 fscrypt_get_encryption_info vmlinux EXPORT_SYMBOL +0xcdca3691 nr_irqs vmlinux EXPORT_SYMBOL_GPL +0x963dcba1 _raw_spin_trylock_bh vmlinux EXPORT_SYMBOL +0x1f4dcda5 osd_req_op_extent_osd_data_bvec_pos net/ceph/libceph EXPORT_SYMBOL +0x800bcdc9 ib_cancel_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x871ab41a drm_rect_intersect drivers/gpu/drm/drm EXPORT_SYMBOL +0xe5611138 nfs_post_op_update_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdaf5c16e __cookie_v4_check vmlinux EXPORT_SYMBOL_GPL +0xba2a6baa xt_match_to_user vmlinux EXPORT_SYMBOL_GPL +0x56c8799d scsi_kunmap_atomic_sg vmlinux EXPORT_SYMBOL +0xe9ebc845 scsi_block_when_processing_errors vmlinux EXPORT_SYMBOL +0xde9b17ed agp3_generic_fetch_size vmlinux EXPORT_SYMBOL +0xcde26600 cppc_get_transition_latency vmlinux EXPORT_SYMBOL_GPL +0xbe75cb39 wait_for_key_construction vmlinux EXPORT_SYMBOL +0x2d0ba9e8 jbd2_journal_set_triggers vmlinux EXPORT_SYMBOL +0x80bbcf32 f_setown vmlinux EXPORT_SYMBOL +0x1a1bac9c ZSTD_decompressDCtx lib/zstd/zstd_decompress EXPORT_SYMBOL +0xf1053760 wpan_phy_new net/ieee802154/ieee802154 EXPORT_SYMBOL +0x05e807a9 xdr_encode_string net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa20496fa edac_device_free_ctl_info drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x670d1b0d mlx4_sync_pkey_table drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xe49d23a6 ip6_dst_hoplimit vmlinux EXPORT_SYMBOL +0x8f83ce10 nvmem_device_read vmlinux EXPORT_SYMBOL_GPL +0xbca6a925 usb_clear_halt vmlinux EXPORT_SYMBOL_GPL +0x18e4f8aa swphy_read_reg vmlinux EXPORT_SYMBOL_GPL +0x3dfad0e5 fc_linkdown vmlinux EXPORT_SYMBOL +0x0f6a45a1 iommu_get_domain_for_dev vmlinux EXPORT_SYMBOL_GPL +0xd643239a acpi_leave_sleep_state vmlinux EXPORT_SYMBOL +0xcd37d65c elv_unregister vmlinux EXPORT_SYMBOL_GPL +0x7c57421b x509_cert_parse vmlinux EXPORT_SYMBOL_GPL +0x739ab77d sched_autogroup_detach vmlinux EXPORT_SYMBOL +0xadc044b7 vfio_set_irqs_validate_and_prepare drivers/vfio/vfio EXPORT_SYMBOL +0x504414af ip6_datagram_connect_v6_only vmlinux EXPORT_SYMBOL_GPL +0x4cc2b9b6 flow_rule_match_vlan vmlinux EXPORT_SYMBOL +0xee742662 dev_mc_sync_multiple vmlinux EXPORT_SYMBOL +0x2ba9d78e dev_uc_sync_multiple vmlinux EXPORT_SYMBOL +0xd3a6b6b8 nd_device_unregister vmlinux EXPORT_SYMBOL +0x05a0c61c free_iova_fast vmlinux EXPORT_SYMBOL_GPL +0x52d1ce8d devm_acpi_dma_controller_register vmlinux EXPORT_SYMBOL_GPL +0xd21c5139 iowrite64_lo_hi vmlinux EXPORT_SYMBOL +0x109382d0 submit_bh vmlinux EXPORT_SYMBOL +0x82fe254f mnt_clone_write vmlinux EXPORT_SYMBOL_GPL +0xb48eae46 page_mkclean vmlinux EXPORT_SYMBOL_GPL +0xb40c6376 cpuset_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x45fb8985 rdma_nl_chk_listeners drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xff861f74 sys_fillrect drivers/video/fbdev/core/sysfillrect EXPORT_SYMBOL +0x7665ee72 crypto_dh_decode_key crypto/dh_generic EXPORT_SYMBOL_GPL +0xc5c7fb0c __tracepoint_kvm_nested_vmexit arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa5526619 rb_insert_color vmlinux EXPORT_SYMBOL +0xfd6400ea mr_mfc_find_any_parent vmlinux EXPORT_SYMBOL +0x2456fe1e unregister_8022_client vmlinux EXPORT_SYMBOL +0xc5449975 passthru_features_check vmlinux EXPORT_SYMBOL +0x3c2be99c skb_checksum_setup vmlinux EXPORT_SYMBOL +0xae5e62b1 md_cluster_ops vmlinux EXPORT_SYMBOL +0x7e8d8619 usb_anchor_empty vmlinux EXPORT_SYMBOL_GPL +0x541b6551 fc_seq_set_resp vmlinux EXPORT_SYMBOL +0x138e0957 dax_write_cache_enabled vmlinux EXPORT_SYMBOL_GPL +0xe0b1c103 clk_set_max_rate vmlinux EXPORT_SYMBOL_GPL +0x3ea5196d apei_osc_setup vmlinux EXPORT_SYMBOL_GPL +0x1cb2c6d8 kvasprintf vmlinux EXPORT_SYMBOL +0x5323bdfd skcipher_walk_aead_encrypt vmlinux EXPORT_SYMBOL_GPL +0xad0adc4a debugfs_write_file_bool vmlinux EXPORT_SYMBOL_GPL +0xa7f72878 page_get_link vmlinux EXPORT_SYMBOL +0xe4e6f859 dma_direct_sync_single_for_cpu vmlinux EXPORT_SYMBOL +0xc2986eee kthread_flush_worker vmlinux EXPORT_SYMBOL_GPL +0xd919806a amd_cache_northbridges vmlinux EXPORT_SYMBOL_GPL +0x1c06b84f svcauth_unix_set_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x35eff5e0 nf_osf_fingers net/netfilter/nfnetlink_osf EXPORT_SYMBOL_GPL +0x613e1c53 drm_helper_probe_single_connector_modes drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6d927027 __xfrm_init_state vmlinux EXPORT_SYMBOL +0x5c8cf3b8 xdp_rxq_info_reg vmlinux EXPORT_SYMBOL_GPL +0xc2a11516 hid_dump_field vmlinux EXPORT_SYMBOL_GPL +0x22309cad __scsi_init_queue vmlinux EXPORT_SYMBOL_GPL +0xa7fcf8a6 ata_sff_dma_pause vmlinux EXPORT_SYMBOL_GPL +0x07646cee ata_tf_to_fis vmlinux EXPORT_SYMBOL_GPL +0x5790e7a0 pci_unlock_rescan_remove vmlinux EXPORT_SYMBOL_GPL +0xbfb4308e __module_text_address vmlinux EXPORT_SYMBOL_GPL +0x66d87d38 symbol_put_addr vmlinux EXPORT_SYMBOL_GPL +0xfbdfc558 hrtimer_start_range_ns vmlinux EXPORT_SYMBOL_GPL +0x38f2d94e ceph_file_to_extents net/ceph/libceph EXPORT_SYMBOL +0xa8d0fb6a ieee802154_free_hw net/mac802154/mac802154 EXPORT_SYMBOL +0x36998b17 i2c_smbus_read_word_data drivers/i2c/i2c-core EXPORT_SYMBOL +0xfb63888b drm_property_blob_get drivers/gpu/drm/drm EXPORT_SYMBOL +0x992c9ade drm_property_blob_put drivers/gpu/drm/drm EXPORT_SYMBOL +0x151c5f56 drm_connector_init_with_ddc drivers/gpu/drm/drm EXPORT_SYMBOL +0x4ca264ba llc_set_station_handler vmlinux EXPORT_SYMBOL +0x538059f9 device_for_each_child_reverse vmlinux EXPORT_SYMBOL_GPL +0x5aa55b77 tty_port_unregister_device vmlinux EXPORT_SYMBOL_GPL +0xdbfcd602 tty_buffer_request_room vmlinux EXPORT_SYMBOL_GPL +0x3b31ba10 d_alloc vmlinux EXPORT_SYMBOL +0xe440bd7c kmem_cache_alloc_node_trace vmlinux EXPORT_SYMBOL +0x5216285b __alloc_pages_nodemask vmlinux EXPORT_SYMBOL +0x71d25547 unuse_mm vmlinux EXPORT_SYMBOL_GPL +0xcb2340b8 drm_rect_debug_print drivers/gpu/drm/drm EXPORT_SYMBOL +0x7fd6f62d drm_simple_display_pipe_attach_bridge drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x8ac7fb9a fc_host_post_event vmlinux EXPORT_SYMBOL +0x1a551022 ring_buffer_lock_reserve vmlinux EXPORT_SYMBOL_GPL +0x59c6aff4 irq_set_affinity_hint vmlinux EXPORT_SYMBOL_GPL +0x69b8d1de finish_swait vmlinux EXPORT_SYMBOL +0x56bd5947 __tracepoint_bcache_gc_copy_collision drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x341112db cxgbi_get_conn_stats drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x887ce7ac fscache_object_lookup_negative fs/fscache/fscache EXPORT_SYMBOL +0xc2d942cd inet_sk_rx_dst_set vmlinux EXPORT_SYMBOL +0x07c7b044 netdev_port_same_parent_id vmlinux EXPORT_SYMBOL +0x76d9b876 clk_set_rate vmlinux EXPORT_SYMBOL_GPL +0x556e4390 clk_get_rate vmlinux EXPORT_SYMBOL_GPL +0x8b928f10 acpi_driver_match_device vmlinux EXPORT_SYMBOL_GPL +0xd18599fb blk_execute_rq_nowait vmlinux EXPORT_SYMBOL_GPL +0x1cde80af blk_set_stacking_limits vmlinux EXPORT_SYMBOL +0x0f2c19c6 security_inode_mkdir vmlinux EXPORT_SYMBOL_GPL +0xf348ff41 bpf_stats_enabled_key vmlinux EXPORT_SYMBOL +0xb72c162e ceph_buffer_release net/ceph/libceph EXPORT_SYMBOL +0xcf038e6a xdr_stream_decode_opaque net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2f9ef44e edac_pci_release_generic_ctl drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xf388b18b ipmi_destroy_user drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xfbf9d09f nfs_access_add_cache fs/nfs/nfs EXPORT_SYMBOL_GPL +0xdf2f41d6 xsk_umem_peek_addr vmlinux EXPORT_SYMBOL +0xdbe1ffca tcf_chain_get_by_act vmlinux EXPORT_SYMBOL +0x7981a1de kernel_sendpage vmlinux EXPORT_SYMBOL +0x958c7378 ata_print_version vmlinux EXPORT_SYMBOL +0xf6f87528 nvdimm_has_flush vmlinux EXPORT_SYMBOL_GPL +0x66c01331 pci_choose_state vmlinux EXPORT_SYMBOL +0x69b474b5 dquot_quotactl_sysfile_ops vmlinux EXPORT_SYMBOL +0x73b26dba delete_from_page_cache vmlinux EXPORT_SYMBOL +0xa9320d27 ktime_get_seconds vmlinux EXPORT_SYMBOL_GPL +0xa096b889 wait_for_completion_killable vmlinux EXPORT_SYMBOL +0x693c3961 nf_ct_helper_hash net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x899a5f3d kvm_can_post_timer_interrupt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xd8cef6e1 clear_user vmlinux EXPORT_SYMBOL +0x38bf1388 iscsi_complete_scsi_task vmlinux EXPORT_SYMBOL_GPL +0xb728a4aa ahci_platform_shutdown vmlinux EXPORT_SYMBOL_GPL +0x5907c14d uart_insert_char vmlinux EXPORT_SYMBOL_GPL +0x41f97691 logfc vmlinux EXPORT_SYMBOL +0x949f7342 __alloc_percpu vmlinux EXPORT_SYMBOL_GPL +0xe02ba436 trace_print_hex_seq vmlinux EXPORT_SYMBOL +0x6ebe366f ktime_get_mono_fast_ns vmlinux EXPORT_SYMBOL_GPL +0x19f462ab kfree_call_rcu vmlinux EXPORT_SYMBOL_GPL +0xf1f4c462 __percpu_down_read vmlinux EXPORT_SYMBOL_GPL +0x8445ecd0 ip_tunnel_encap_add_ops net/ipv4/ip_tunnel EXPORT_SYMBOL +0x94a420cb rdma_leave_multicast drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x77d83398 mlxsw_core_read_frc_l drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xfec25a7b drm_helper_hpd_irq_event drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xfac0cff6 cryptd_alloc_skcipher crypto/cryptd EXPORT_SYMBOL_GPL +0xd806a273 ocfs2_dlm_dump_lksb fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xf782e6e4 br_multicast_enabled vmlinux EXPORT_SYMBOL_GPL +0x63e37498 __raw_v6_lookup vmlinux EXPORT_SYMBOL_GPL +0x2b141751 __raw_v4_lookup vmlinux EXPORT_SYMBOL_GPL +0x3c506ad0 compat_tcp_getsockopt vmlinux EXPORT_SYMBOL +0xc507999f dm_device_name vmlinux EXPORT_SYMBOL_GPL +0xc03bae1b ata_eh_qc_retry vmlinux EXPORT_SYMBOL_GPL +0x8afaacf3 tpm_get_timeouts vmlinux EXPORT_SYMBOL_GPL +0x99de202d pci_enable_pri vmlinux EXPORT_SYMBOL_GPL +0xadc6ebbd clear_inode vmlinux EXPORT_SYMBOL +0x7d8dd0f4 svc_close_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xfa14b781 uverbs_get_flags32 drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x6fd70b82 target_show_dynamic_sessions drivers/target/target_core_mod EXPORT_SYMBOL +0xa5a28bcf __mlx4_cmd drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x99517682 udp_encap_enable vmlinux EXPORT_SYMBOL +0xe927549d qdisc_hash_add vmlinux EXPORT_SYMBOL +0xf880cf6b sk_psock_destroy vmlinux EXPORT_SYMBOL_GPL +0x6f4e72f6 neigh_parms_release vmlinux EXPORT_SYMBOL +0xf64d14a0 free_dca_provider vmlinux EXPORT_SYMBOL_GPL +0xa0d34ae7 usb_hcd_link_urb_to_ep vmlinux EXPORT_SYMBOL_GPL +0xa4121c01 iscsi_eh_cmd_timed_out vmlinux EXPORT_SYMBOL_GPL +0xd0e03112 device_release_driver vmlinux EXPORT_SYMBOL_GPL +0x16c51917 acpi_initialize_hp_context vmlinux EXPORT_SYMBOL_GPL +0xb2af650c blk_lookup_devt vmlinux EXPORT_SYMBOL +0x182e56d9 crypto_grab_akcipher vmlinux EXPORT_SYMBOL_GPL +0x61f4f5be crypto_grab_skcipher vmlinux EXPORT_SYMBOL_GPL +0x60bd77e3 security_path_rename vmlinux EXPORT_SYMBOL +0xe953b21f get_next_ino vmlinux EXPORT_SYMBOL +0xa0c97a9f pm_vt_switch_required vmlinux EXPORT_SYMBOL +0x53c70595 queue_rcu_work vmlinux EXPORT_SYMBOL +0x9a026003 ceph_osdc_get_request net/ceph/libceph EXPORT_SYMBOL +0xc248782c iscsit_build_task_mgt_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xfe100991 usb_stor_bulk_srb drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xf2fbe0be cxgbi_sock_select_mss drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xe69dbc7b nfs4_schedule_lease_moved_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xf35b9654 kvm_inject_realmode_interrupt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf30965ac iosf_mbi_register_pmic_bus_access_notifier arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x592e8af9 __fl6_sock_lookup vmlinux EXPORT_SYMBOL_GPL +0x9074765f efivar_entry_size vmlinux EXPORT_SYMBOL_GPL +0x9aaac699 dev_pm_opp_cpumask_remove_table vmlinux EXPORT_SYMBOL_GPL +0xd6037206 dm_kcopyd_copy vmlinux EXPORT_SYMBOL +0xb5f28399 usb_alloc_streams vmlinux EXPORT_SYMBOL_GPL +0xf78ab256 fixed_phy_change_carrier vmlinux EXPORT_SYMBOL_GPL +0xb955186c iscsi_suspend_queue vmlinux EXPORT_SYMBOL_GPL +0x8c3662cb scsi_register_interface vmlinux EXPORT_SYMBOL +0x5f979c87 iommu_attach_device vmlinux EXPORT_SYMBOL_GPL +0x84173c05 pinctrl_utils_add_config vmlinux EXPORT_SYMBOL_GPL +0xd878a361 vfs_setlease vmlinux EXPORT_SYMBOL_GPL +0x33dce7e5 find_get_pages_range_tag vmlinux EXPORT_SYMBOL +0xc984c11d from_kgid_munged vmlinux EXPORT_SYMBOL +0x40d04664 console_trylock vmlinux EXPORT_SYMBOL +0x8a535cb7 pps_event drivers/pps/pps_core EXPORT_SYMBOL +0x970c1af7 i2c_smbus_read_block_data drivers/i2c/i2c-core EXPORT_SYMBOL +0xb09c9956 mlx4_fmr_unmap drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x3cdfeeaa xdp_do_generic_redirect vmlinux EXPORT_SYMBOL_GPL +0x81882a68 usb_put_intf vmlinux EXPORT_SYMBOL_GPL +0x7297fff7 ata_sff_queue_pio_task vmlinux EXPORT_SYMBOL_GPL +0x6c3b884a clk_multiplier_ops vmlinux EXPORT_SYMBOL_GPL +0xd573f568 nla_put_64bit vmlinux EXPORT_SYMBOL +0xb1dabc1e unregister_ftrace_export vmlinux EXPORT_SYMBOL_GPL +0x4d1ff60a wait_for_completion_timeout vmlinux EXPORT_SYMBOL +0x025483b1 set_current_groups vmlinux EXPORT_SYMBOL +0xf767ca35 fixed_percpu_data vmlinux EXPORT_SYMBOL_GPL +0x9dc5b78f flow_resources_alloc drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x4873e99c core_tpg_register drivers/target/target_core_mod EXPORT_SYMBOL +0x9ea77222 adf_cfg_dev_add drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xe19f8e23 usb_stor_probe2 drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xd33009bc usb_stor_probe1 drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xe15e5264 mlx4_cq_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x80f91573 cxgbi_ddp_ppm_setup drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x6eb1376e pnfs_generic_pg_test fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x33adbc9d nfs_show_options fs/nfs/nfs EXPORT_SYMBOL_GPL +0xfa66a032 dcb_ieee_getapp_mask vmlinux EXPORT_SYMBOL +0xbe5908df xfrm6_input_addr vmlinux EXPORT_SYMBOL +0x00e7f46f rt_dst_clone vmlinux EXPORT_SYMBOL +0x097af021 neigh_proc_dointvec_jiffies vmlinux EXPORT_SYMBOL +0xf088cfb0 skb_copy_header vmlinux EXPORT_SYMBOL +0x00edde51 update_region vmlinux EXPORT_SYMBOL +0x722224e7 dma_request_chan_by_mask vmlinux EXPORT_SYMBOL_GPL +0x4c07a7e0 acpi_processor_unregister_performance vmlinux EXPORT_SYMBOL +0x81db6ebb xz_dec_reset vmlinux EXPORT_SYMBOL +0x1ac458a1 public_key_signature_free vmlinux EXPORT_SYMBOL_GPL +0x88e1d0f0 page_frag_free vmlinux EXPORT_SYMBOL +0x661601de sprint_symbol vmlinux EXPORT_SYMBOL_GPL +0xfe48eecb __tracepoint_pelt_se_tp vmlinux EXPORT_SYMBOL_GPL +0xed8bbe99 __tracepoint_pelt_dl_tp vmlinux EXPORT_SYMBOL_GPL +0x6894835c __tracepoint_pelt_rt_tp vmlinux EXPORT_SYMBOL_GPL +0x1af55628 drm_fb_helper_fill_info drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf2fc1436 blowfish_setkey crypto/blowfish_common EXPORT_SYMBOL_GPL +0xdd626ee3 fuse_len_args fs/fuse/fuse EXPORT_SYMBOL_GPL +0xe38cd003 tcf_em_unregister vmlinux EXPORT_SYMBOL +0x4f4f3148 scm_fp_dup vmlinux EXPORT_SYMBOL +0x7553d9c2 rc_g_keycode_from_table vmlinux EXPORT_SYMBOL_GPL +0x13adbdd0 devres_remove vmlinux EXPORT_SYMBOL_GPL +0xf414a471 mipi_dsi_dcs_set_tear_on vmlinux EXPORT_SYMBOL +0x3d41f769 serial8250_release_dma vmlinux EXPORT_SYMBOL_GPL +0xf9e5503e tty_vhangup vmlinux EXPORT_SYMBOL +0x0248efd3 kstrtobool_from_user vmlinux EXPORT_SYMBOL +0x5c41b53f shrink_dcache_parent vmlinux EXPORT_SYMBOL +0xb9f89246 trace_seq_vprintf vmlinux EXPORT_SYMBOL_GPL +0x538bc86d preempt_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0x28391bf9 nft_register_obj net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x7ca5bb81 ib_close_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x23e1255c iscsit_reject_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x61492bb7 mlx5_rl_are_equal drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0d8f4740 kvm_mce_cap_supported arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x609f1c7e synchronize_net vmlinux EXPORT_SYMBOL +0xea7daa8a netdev_adjacent_change_abort vmlinux EXPORT_SYMBOL +0x5852b8ae mdiobus_register_device vmlinux EXPORT_SYMBOL +0xc81eb801 fc_lport_iterate vmlinux EXPORT_SYMBOL +0x3269be1a bus_for_each_drv vmlinux EXPORT_SYMBOL_GPL +0x21aaaca8 bus_for_each_dev vmlinux EXPORT_SYMBOL_GPL +0x6a4c4888 bsg_job_done vmlinux EXPORT_SYMBOL_GPL +0x6091797f synchronize_rcu vmlinux EXPORT_SYMBOL_GPL +0x42825ce2 rcu_scheduler_active vmlinux EXPORT_SYMBOL_GPL +0xe523ad75 synchronize_irq vmlinux EXPORT_SYMBOL +0xdf8c695a __ndelay vmlinux EXPORT_SYMBOL +0x9e7d6bd0 __udelay vmlinux EXPORT_SYMBOL +0xb709ab4a flow_rule_alloc vmlinux EXPORT_SYMBOL +0x6e88f6f3 pneigh_lookup vmlinux EXPORT_SYMBOL +0x74dcd98c dm_bufio_get_aux_data vmlinux EXPORT_SYMBOL_GPL +0x0f5673ec component_master_del vmlinux EXPORT_SYMBOL_GPL +0x985453e1 lease_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0xd2b77370 kern_path_create vmlinux EXPORT_SYMBOL +0xa3a54979 init_on_free vmlinux EXPORT_SYMBOL +0x2e381827 irq_create_mapping_affinity vmlinux EXPORT_SYMBOL_GPL +0xe1ed698d _raw_write_lock_bh vmlinux EXPORT_SYMBOL +0x621f6780 xdr_terminate_string net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x33b0b12c rpc_net_ns net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2095fac1 drm_mode_create_scaling_mode_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x4eb4c55e __serpent_encrypt crypto/serpent_generic EXPORT_SYMBOL_GPL +0x00aaf935 kvm_disable_tdp arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x46e2efc9 fib_rules_dump vmlinux EXPORT_SYMBOL_GPL +0xc65d25e1 pci_irq_vector vmlinux EXPORT_SYMBOL +0xe2ffeef3 scsi_req_init vmlinux EXPORT_SYMBOL +0x383f41e0 fsnotify_find_mark vmlinux EXPORT_SYMBOL_GPL +0x910eda59 super_setup_bdi vmlinux EXPORT_SYMBOL +0x525d0aa3 trace_seq_printf vmlinux EXPORT_SYMBOL_GPL +0xb44e18ea audit_enabled vmlinux EXPORT_SYMBOL_GPL +0x39461d6a in_egroup_p vmlinux EXPORT_SYMBOL +0x4e2e10d2 be_roce_mcc_cmd drivers/net/ethernet/emulex/benet/be2net EXPORT_SYMBOL +0xe87811f6 drm_atomic_helper_fake_vblank drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xed772fc2 drm_dp_aux_register drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x8ce4f3ab kvm_enable_tdp arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x09c99abb eth_gro_complete vmlinux EXPORT_SYMBOL +0x409873e3 tty_termios_baud_rate vmlinux EXPORT_SYMBOL +0x07a890c8 fb_alloc_cmap vmlinux EXPORT_SYMBOL +0x73216f35 af_alg_release vmlinux EXPORT_SYMBOL_GPL +0x2176e42a hwpoison_filter_memcg vmlinux EXPORT_SYMBOL_GPL +0x46b2018d perf_event_addr_filters_sync vmlinux EXPORT_SYMBOL_GPL +0xcd53405f irq_chip_set_type_parent vmlinux EXPORT_SYMBOL_GPL +0x8a5eedf2 nf_conntrack_broadcast_help net/netfilter/nf_conntrack_broadcast EXPORT_SYMBOL_GPL +0xc33afcc3 ping_seq_start vmlinux EXPORT_SYMBOL_GPL +0x9f54ead7 gro_cells_destroy vmlinux EXPORT_SYMBOL +0x514bcf69 input_mt_sync_frame vmlinux EXPORT_SYMBOL +0xc9d61fb8 phy_speed_up vmlinux EXPORT_SYMBOL_GPL +0xc99fb0e6 dax_copy_from_iter vmlinux EXPORT_SYMBOL_GPL +0xdf40119d release_firmware vmlinux EXPORT_SYMBOL +0x4a453f53 iowrite32 vmlinux EXPORT_SYMBOL +0x06a86bc1 iowrite16 vmlinux EXPORT_SYMBOL +0x2e255534 blk_mq_start_stopped_hw_queue vmlinux EXPORT_SYMBOL_GPL +0x0fe6e6c5 set_posix_acl vmlinux EXPORT_SYMBOL +0x9e0c711d vzalloc_node vmlinux EXPORT_SYMBOL +0x7f24de73 jiffies_to_usecs vmlinux EXPORT_SYMBOL +0x37befc70 jiffies_to_msecs vmlinux EXPORT_SYMBOL +0x02b07f6a iscsit_build_nopin_rsp drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x35121539 mlx5_query_port_oper_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xa8ad5d01 drm_detect_hdmi_monitor drivers/gpu/drm/drm EXPORT_SYMBOL +0x7a8eaa58 drm_atomic_helper_update_plane drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x607dbd52 iptun_encaps vmlinux EXPORT_SYMBOL +0x08708f76 inet_frag_queue_insert vmlinux EXPORT_SYMBOL +0x301c144f tcp_child_process vmlinux EXPORT_SYMBOL +0x61e91dfa sock_diag_put_filterinfo vmlinux EXPORT_SYMBOL +0x4f92fc65 netdev_set_tc_queue vmlinux EXPORT_SYMBOL +0xd6abbe7a devm_power_supply_register vmlinux EXPORT_SYMBOL_GPL +0xc3d10564 devm_mdiobus_alloc_size vmlinux EXPORT_SYMBOL_GPL +0x56bd3c4b phy_start_machine vmlinux EXPORT_SYMBOL_GPL +0x36c7aae2 __pm_runtime_suspend vmlinux EXPORT_SYMBOL_GPL +0xc666a132 crc_t10dif vmlinux EXPORT_SYMBOL +0xc6055c9e kvasprintf_const vmlinux EXPORT_SYMBOL +0x546857fd param_get_charp vmlinux EXPORT_SYMBOL +0xe872d540 nf_nat_redirect_ipv6 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x181c8265 nf_nat_redirect_ipv4 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x204cf629 cxgb4_ofld_send drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x5a262da7 cxgb3_ofld_send drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x0099c6cc nvmf_fail_nonready_command drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x37632a0e xdp_rxq_info_unreg vmlinux EXPORT_SYMBOL_GPL +0xc496e665 netdev_next_lower_dev_rcu vmlinux EXPORT_SYMBOL +0x5d0ca4f0 validate_xmit_skb_list vmlinux EXPORT_SYMBOL_GPL +0xaa6753c4 mdiobus_write vmlinux EXPORT_SYMBOL +0x0b576b07 tty_port_destroy vmlinux EXPORT_SYMBOL +0x97df43bd bsg_unregister_queue vmlinux EXPORT_SYMBOL_GPL +0x22af7768 bio_trim vmlinux EXPORT_SYMBOL_GPL +0x7a970e8d dquot_claim_space_nodirty vmlinux EXPORT_SYMBOL +0x93fc7a2d dax_iomap_rw vmlinux EXPORT_SYMBOL_GPL +0x0f1254d4 blkdev_read_iter vmlinux EXPORT_SYMBOL_GPL +0xc33aa1d0 prepare_binprm vmlinux EXPORT_SYMBOL +0xb50351e8 rdma_restrack_put drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xdf2c2742 rb_last vmlinux EXPORT_SYMBOL +0x625a5da5 tcp_v4_destroy_sock vmlinux EXPORT_SYMBOL +0xb5b3f68c ehci_setup vmlinux EXPORT_SYMBOL_GPL +0x4e33cac2 ahci_port_resume vmlinux EXPORT_SYMBOL_GPL +0x26e22329 nd_btt_arena_is_valid vmlinux EXPORT_SYMBOL +0x3ab4cf3e pm_clk_remove_clk vmlinux EXPORT_SYMBOL_GPL +0x35dd683b acpi_debugfs_dir vmlinux EXPORT_SYMBOL_GPL +0x12a2b1b4 remove_conflicting_pci_framebuffers vmlinux EXPORT_SYMBOL +0x04c4f603 mpi_get_buffer vmlinux EXPORT_SYMBOL_GPL +0x5191d302 sysfs_create_mount_point vmlinux EXPORT_SYMBOL_GPL +0x301a81c7 mount_bdev vmlinux EXPORT_SYMBOL +0xcc5d22d9 can_do_mlock vmlinux EXPORT_SYMBOL +0x60a13e90 rcu_barrier vmlinux EXPORT_SYMBOL_GPL +0xe76e7226 ceph_pagelist_alloc net/ceph/libceph EXPORT_SYMBOL +0xeedd031a nft_do_chain net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xe8f47602 nf_nat_masquerade_ipv6 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xd2b65cbb nf_nat_masquerade_ipv4 net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0x918df487 nf_ct_helper_expectfn_find_by_symbol net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xbc8c2441 ib_mr_pool_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4f6c2360 acpi_smbus_read drivers/acpi/sbshc EXPORT_SYMBOL_GPL +0x6b98e5da nfs_umount_begin fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3c3ff9fd sprintf vmlinux EXPORT_SYMBOL +0x2d056090 ndisc_mc_map vmlinux EXPORT_SYMBOL +0x38225f1d dm_internal_suspend_fast vmlinux EXPORT_SYMBOL_GPL +0xec2b8a42 acpi_walk_namespace vmlinux EXPORT_SYMBOL +0xd991975e dup_iter vmlinux EXPORT_SYMBOL +0xda3d10a8 security_tun_dev_open vmlinux EXPORT_SYMBOL +0x3cf39a2e simple_attr_write vmlinux EXPORT_SYMBOL_GPL +0xc79267da simple_unlink vmlinux EXPORT_SYMBOL +0x26f8912e generic_listxattr vmlinux EXPORT_SYMBOL +0x12f7a168 virtio_transport_free_pkt net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x5379cea3 ceph_caps_for_mode net/ceph/libceph EXPORT_SYMBOL +0x7aaa35d9 drm_event_cancel_free drivers/gpu/drm/drm EXPORT_SYMBOL +0xf6ebc03b net_ratelimit vmlinux EXPORT_SYMBOL +0x1307fb74 dev_addr_init vmlinux EXPORT_SYMBOL +0x81ba2bfb mptscsih_io_done vmlinux EXPORT_SYMBOL +0x70499fbd ahci_platform_enable_clks vmlinux EXPORT_SYMBOL_GPL +0xbd861b76 platform_device_add_properties vmlinux EXPORT_SYMBOL_GPL +0x1ebf6c2a pci_power_names vmlinux EXPORT_SYMBOL_GPL +0x3e35dc43 shash_ahash_update vmlinux EXPORT_SYMBOL_GPL +0xf2e5bd87 security_free_mnt_opts vmlinux EXPORT_SYMBOL +0xa9f041e3 posix_acl_valid vmlinux EXPORT_SYMBOL +0xdbccc54b invalidate_inode_buffers vmlinux EXPORT_SYMBOL +0x0c9e6416 PageMovable vmlinux EXPORT_SYMBOL +0x9247c563 ip_tunnel_ioctl net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x562095b1 mlx4_SET_PORT_PRIO2TC drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xc3850e5b cxgbi_parse_pdu_itt drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x6e0bbabe drm_mm_print drivers/gpu/drm/drm EXPORT_SYMBOL +0xb839350b gfn_to_page_many_atomic arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1d77b0f8 unix_socket_table vmlinux EXPORT_SYMBOL_GPL +0xd7017852 nl_table vmlinux EXPORT_SYMBOL_GPL +0xec0af859 usb_autopm_get_interface_async vmlinux EXPORT_SYMBOL_GPL +0x31369b2d raid_class_release vmlinux EXPORT_SYMBOL +0xdec36f70 mipi_dsi_dcs_exit_sleep_mode vmlinux EXPORT_SYMBOL +0x742578a5 wait_for_random_bytes vmlinux EXPORT_SYMBOL +0x8daa6556 crypto_shash_update vmlinux EXPORT_SYMBOL_GPL +0x665a5ea8 find_vma vmlinux EXPORT_SYMBOL +0x6222976b svc_print_addr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6ae543f4 nf_ct_extend_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x8c67a4ea mlx4_multicast_promisc_add drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x00148b5f drm_dp_mst_connector_late_register drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3c2cffe6 dcb_ieee_getapp_dscp_prio_mask_map vmlinux EXPORT_SYMBOL +0x5c606194 kernel_connect vmlinux EXPORT_SYMBOL +0x1b808aec sas_request_addr vmlinux EXPORT_SYMBOL_GPL +0xcad1aca8 acpi_exception vmlinux EXPORT_SYMBOL +0x59a2712d raid6_gfinv vmlinux EXPORT_SYMBOL +0xe39e74a7 gen_pool_get vmlinux EXPORT_SYMBOL_GPL +0x80f1a3c2 dquot_destroy vmlinux EXPORT_SYMBOL +0x679604b3 single_open_size vmlinux EXPORT_SYMBOL +0x9fa7df85 dma_ops vmlinux EXPORT_SYMBOL +0x993a42f1 inet_stream_connect vmlinux EXPORT_SYMBOL +0xf3a47b6a peernet2id vmlinux EXPORT_SYMBOL +0xc9b04da0 mdiobus_read_nested vmlinux EXPORT_SYMBOL +0x3a014baa fcoe_ctlr_recv_flogi vmlinux EXPORT_SYMBOL +0x9a177321 dma_buf_end_cpu_access vmlinux EXPORT_SYMBOL_GPL +0xa87317b3 virtqueue_notify vmlinux EXPORT_SYMBOL_GPL +0xd35a6d31 mempool_kmalloc vmlinux EXPORT_SYMBOL +0x37b8b39e screen_info vmlinux EXPORT_SYMBOL +0x0a087ff9 rpc_mkpipe_dentry net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd16b7fbb qdisc_warn_nonwc vmlinux EXPORT_SYMBOL +0xca543ee5 devlink_port_type_eth_set vmlinux EXPORT_SYMBOL_GPL +0xb10677c6 ahci_stop_engine vmlinux EXPORT_SYMBOL_GPL +0x3239f8b6 ata_eh_qc_complete vmlinux EXPORT_SYMBOL_GPL +0xe51e95b1 vga_remove_vgacon vmlinux EXPORT_SYMBOL +0x7026334a pci_release_regions vmlinux EXPORT_SYMBOL +0x63e2ca9f find_lock_entry vmlinux EXPORT_SYMBOL +0x0fb01042 nlmclnt_done fs/lockd/lockd EXPORT_SYMBOL_GPL +0x553b49a4 cpufreq_get_driver_data vmlinux EXPORT_SYMBOL_GPL +0xfdd082b7 mpt_device_driver_register vmlinux EXPORT_SYMBOL +0xa7e54030 tpm_send vmlinux EXPORT_SYMBOL_GPL +0xa8e6933a qdf2400_e44_present vmlinux EXPORT_SYMBOL +0x499bbf57 nfit_get_smbios_id vmlinux EXPORT_SYMBOL_GPL +0x9e56434c blk_queue_physical_block_size vmlinux EXPORT_SYMBOL +0xaeb84dc2 dio_end_io vmlinux EXPORT_SYMBOL_GPL +0xfa494464 relay_subbufs_consumed vmlinux EXPORT_SYMBOL_GPL +0xb8212341 timecounter_cyc2time vmlinux EXPORT_SYMBOL_GPL +0xf4835f2d ip_vs_new_conn_out net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x5857fa81 target_setup_cmd_from_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0x810102cc mlx4_mr_hw_put_mpt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xbe8a216a bcm_phy_28nm_a0b0_afe_config_init drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x838d2bc8 siphash_3u32 vmlinux EXPORT_SYMBOL +0x70002fe8 siphash_1u32 vmlinux EXPORT_SYMBOL +0xc819fa22 sock_diag_check_cookie vmlinux EXPORT_SYMBOL_GPL +0x23412816 rtc_tm_to_ktime vmlinux EXPORT_SYMBOL_GPL +0xa475e2f4 fc_seq_els_rsp_send vmlinux EXPORT_SYMBOL_GPL +0xa095e02e generic_check_addressable vmlinux EXPORT_SYMBOL +0xc52738b2 get_tree_nodev vmlinux EXPORT_SYMBOL +0x9db7c218 perf_event_read_value vmlinux EXPORT_SYMBOL_GPL +0x04482cdb __refrigerator vmlinux EXPORT_SYMBOL +0x35c3be50 irqchip_fwnode_ops vmlinux EXPORT_SYMBOL_GPL +0x7cb1ae69 cpu_down vmlinux EXPORT_SYMBOL +0x61a13025 nft_validate_register_store net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xf1d3728c i2c_recover_bus drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x44b70a51 drm_mode_parse_command_line_for_connector drivers/gpu/drm/drm EXPORT_SYMBOL +0x286aef15 devm_of_find_backlight drivers/video/backlight/backlight EXPORT_SYMBOL +0x673d02c2 inet6_csk_addr2sockaddr vmlinux EXPORT_SYMBOL_GPL +0xdadf5f44 netdev_refcnt_read vmlinux EXPORT_SYMBOL +0x6be8b201 input_mt_init_slots vmlinux EXPORT_SYMBOL +0xf9a7bc8a usb_disable_lpm vmlinux EXPORT_SYMBOL_GPL +0x94a63c81 usb_disable_ltm vmlinux EXPORT_SYMBOL_GPL +0xa39f6999 phy_10gbit_full_features vmlinux EXPORT_SYMBOL_GPL +0x3f1364c1 ata_link_printk vmlinux EXPORT_SYMBOL +0x56ad722f devres_close_group vmlinux EXPORT_SYMBOL_GPL +0xf5f370e0 async_schedule_node vmlinux EXPORT_SYMBOL_GPL +0xcc83cb6c failover_unregister net/core/failover EXPORT_SYMBOL_GPL +0x734b0028 cxgb4_l2t_send drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x19422c18 drm_mode_object_get drivers/gpu/drm/drm EXPORT_SYMBOL +0xc566370b nfs_initiate_commit fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd7e56a4e simple_strtoll vmlinux EXPORT_SYMBOL +0x20000329 simple_strtoul vmlinux EXPORT_SYMBOL +0x355bc89a klist_next vmlinux EXPORT_SYMBOL_GPL +0x88192f68 tcp_rcv_established vmlinux EXPORT_SYMBOL +0x4aa58bea inet_getpeer vmlinux EXPORT_SYMBOL_GPL +0x57583b30 ipv4_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x3c04172e xt_unregister_targets vmlinux EXPORT_SYMBOL +0x5b913e38 usb_enable_lpm vmlinux EXPORT_SYMBOL_GPL +0x3690be33 usb_enable_ltm vmlinux EXPORT_SYMBOL_GPL +0xe1fca3c9 ahci_set_em_messages vmlinux EXPORT_SYMBOL_GPL +0xde25f88c __tracepoint_add_device_to_group vmlinux EXPORT_SYMBOL_GPL +0x39655269 acpi_ec_remove_query_handler vmlinux EXPORT_SYMBOL_GPL +0x99430ba2 acpi_get_phys_id vmlinux EXPORT_SYMBOL_GPL +0xd74e6549 fsnotify_destroy_mark vmlinux EXPORT_SYMBOL_GPL +0x23c809c8 balance_dirty_pages_ratelimited vmlinux EXPORT_SYMBOL +0x008539f0 klp_shadow_alloc vmlinux EXPORT_SYMBOL_GPL +0x92aee1db ib_send_cm_drep drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x3c3ab9a6 ib_send_cm_dreq drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xb6759dc1 ib_sa_sendonly_fullmem_support drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb9cad492 __drm_atomic_state_free drivers/gpu/drm/drm EXPORT_SYMBOL +0x297a0292 drm_panel_bridge_add drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xee31ef44 kvm_emulate_instruction arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xfc12d514 net_ns_type_operations vmlinux EXPORT_SYMBOL_GPL +0x546b47a2 phy_free_interrupt vmlinux EXPORT_SYMBOL +0x3952d883 mraid_mm_register_adp vmlinux EXPORT_SYMBOL +0x0ea51241 mipi_dsi_dcs_write_buffer vmlinux EXPORT_SYMBOL +0xf7e366b4 iommu_unregister_device_fault_handler vmlinux EXPORT_SYMBOL_GPL +0xc0220bf2 proc_symlink vmlinux EXPORT_SYMBOL +0x9ca95932 ceph_create_snap_context net/ceph/libceph EXPORT_SYMBOL +0xc776f53a ceph_copy_user_to_page_vector net/ceph/libceph EXPORT_SYMBOL +0xb51a629d drm_fb_xrgb8888_to_rgb888_dstclip drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x0e17678a siphash_4u64 vmlinux EXPORT_SYMBOL +0xa0ae1e73 siphash_3u64 vmlinux EXPORT_SYMBOL +0x12cabc89 siphash_2u64 vmlinux EXPORT_SYMBOL +0x3126a9e8 siphash_1u64 vmlinux EXPORT_SYMBOL +0x8c7bd877 __tracepoint_br_fdb_external_learn_add vmlinux EXPORT_SYMBOL_GPL +0xa3b1d69c __rtnl_link_unregister vmlinux EXPORT_SYMBOL_GPL +0x89dbd310 rtnl_kfree_skbs vmlinux EXPORT_SYMBOL +0xcdcbc47d dev_pm_opp_find_freq_ceil vmlinux EXPORT_SYMBOL_GPL +0xdf1882af dbgp_reset_prep vmlinux EXPORT_SYMBOL_GPL +0x77dbc829 usb_hcd_pci_pm_ops vmlinux EXPORT_SYMBOL_GPL +0x65d28054 cdc_parse_cdc_header vmlinux EXPORT_SYMBOL +0xab712861 spi_populate_tag_msg vmlinux EXPORT_SYMBOL_GPL +0xc6a4a872 __clk_is_enabled vmlinux EXPORT_SYMBOL_GPL +0x564f7608 acpi_reconfig_notifier_register vmlinux EXPORT_SYMBOL +0x05b27a8d cpu_rmap_put vmlinux EXPORT_SYMBOL +0xe7ab7d21 blk_integrity_compare vmlinux EXPORT_SYMBOL +0xb36cb7bb blk_queue_dma_alignment vmlinux EXPORT_SYMBOL +0xf371f56a security_path_unlink vmlinux EXPORT_SYMBOL +0xfef8cf74 vfs_statx vmlinux EXPORT_SYMBOL +0x8ae7b501 remap_vmalloc_range vmlinux EXPORT_SYMBOL +0x9136acf6 noop_backing_dev_info vmlinux EXPORT_SYMBOL_GPL +0xd985dc99 mempool_free_pages vmlinux EXPORT_SYMBOL +0x629c17d5 irq_domain_set_hwirq_and_chip vmlinux EXPORT_SYMBOL_GPL +0xfcec0987 enable_irq vmlinux EXPORT_SYMBOL +0x1e7bbcb3 kernel_restart vmlinux EXPORT_SYMBOL_GPL +0xee13e697 set_personality_ia32 vmlinux EXPORT_SYMBOL_GPL +0xb1313f64 ib_modify_mad drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x218f33a9 mlx5_core_alloc_q_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc2b708bf glue_cbc_decrypt_req_128bit arch/x86/crypto/glue_helper EXPORT_SYMBOL_GPL +0x375a0a50 ip_cmsg_recv_offset vmlinux EXPORT_SYMBOL +0x1e62643b skb_flow_dissector_init vmlinux EXPORT_SYMBOL +0x6cf69938 nvmem_unregister vmlinux EXPORT_SYMBOL_GPL +0x206405b3 iscsi_set_param vmlinux EXPORT_SYMBOL_GPL +0x35bf93b4 ata_port_printk vmlinux EXPORT_SYMBOL +0x62d99944 nvdimm_flush vmlinux EXPORT_SYMBOL_GPL +0xa070c7a9 agp_unbind_memory vmlinux EXPORT_SYMBOL +0x90576ec4 vmemdup_user vmlinux EXPORT_SYMBOL +0x693e3b6b filemap_page_mkwrite vmlinux EXPORT_SYMBOL +0x170cc36c put_timespec64 vmlinux EXPORT_SYMBOL_GPL +0x5d8ede8c drm_crtc_arm_vblank_event drivers/gpu/drm/drm EXPORT_SYMBOL +0x4286c239 drm_mode_create drivers/gpu/drm/drm EXPORT_SYMBOL +0x8222ca18 kvm_get_linear_rip arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa07a37f0 memchr vmlinux EXPORT_SYMBOL +0x856ff1b7 tcf_block_put_ext vmlinux EXPORT_SYMBOL +0x6fa38b9d __get_hash_from_flowi6 vmlinux EXPORT_SYMBOL +0x25c16af5 fc_rport_terminate_io vmlinux EXPORT_SYMBOL +0x87a98d5d raid_class_attach vmlinux EXPORT_SYMBOL +0xfad9c827 kill_dax vmlinux EXPORT_SYMBOL_GPL +0xf1f4e24f mipi_dsi_generic_write vmlinux EXPORT_SYMBOL +0x5e1a20a9 __fscrypt_prepare_lookup vmlinux EXPORT_SYMBOL_GPL +0x2e79cc3e clear_wb_congested vmlinux EXPORT_SYMBOL +0xc18ac88d nf_ct_expect_hsize net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd8602b6a tun_is_xdp_frame drivers/net/tun EXPORT_SYMBOL +0xb85a1b1b drm_syncobj_find drivers/gpu/drm/drm EXPORT_SYMBOL +0x491cc305 drm_mode_destroy drivers/gpu/drm/drm EXPORT_SYMBOL +0x97b05e2d drm_dp_mst_topology_mgr_resume drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xef7ef4dd nfs_request_add_commit_list fs/nfs/nfs EXPORT_SYMBOL_GPL +0x96349ef0 nfs_invalidate_atime fs/nfs/nfs EXPORT_SYMBOL_GPL +0x87855c18 nfs_rename fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf2772cf7 inet6_release vmlinux EXPORT_SYMBOL +0x94e4ede7 clk_hw_get_name vmlinux EXPORT_SYMBOL_GPL +0xe246d926 pci_destroy_slot vmlinux EXPORT_SYMBOL_GPL +0x59bd1903 vfs_kern_mount vmlinux EXPORT_SYMBOL_GPL +0xd5bd7dac ring_buffer_record_enable_cpu vmlinux EXPORT_SYMBOL_GPL +0x267df662 smp_call_on_cpu vmlinux EXPORT_SYMBOL_GPL +0x9ec6ca96 ktime_get_real_ts64 vmlinux EXPORT_SYMBOL +0x7406bb23 insert_resource vmlinux EXPORT_SYMBOL_GPL +0xadb8f873 do_machine_check vmlinux EXPORT_SYMBOL_GPL +0x1c7badd0 ib_get_cached_port_state drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x484489a4 mlxsw_cmd_exec drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x865255bc __dax_pmem_probe drivers/dax/pmem/dax_pmem_core EXPORT_SYMBOL_GPL +0x3d54a4b3 drm_dp_mst_atomic_check drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x52d53192 flow_indr_block_cb_register vmlinux EXPORT_SYMBOL_GPL +0xa16ace23 sock_diag_register vmlinux EXPORT_SYMBOL_GPL +0x36b15c30 sk_net_capable vmlinux EXPORT_SYMBOL +0xef539e6e devm_devfreq_remove_device vmlinux EXPORT_SYMBOL +0xdab203d6 anon_transport_class_unregister vmlinux EXPORT_SYMBOL_GPL +0xce74af87 mipi_dsi_dcs_nop vmlinux EXPORT_SYMBOL +0xb2bcb088 acpi_current_gpe_count vmlinux EXPORT_SYMBOL +0xa8838cb1 acpi_device_modalias vmlinux EXPORT_SYMBOL_GPL +0x5a0b73d0 zlib_deflateInit2 vmlinux EXPORT_SYMBOL +0xdcdb853d submit_bio vmlinux EXPORT_SYMBOL +0x76585427 alloc_page_buffers vmlinux EXPORT_SYMBOL_GPL +0x622c7922 register_oom_notifier vmlinux EXPORT_SYMBOL_GPL +0xfb8cce78 trace_event_buffer_lock_reserve vmlinux EXPORT_SYMBOL_GPL +0xb814e18a on_each_cpu_mask vmlinux EXPORT_SYMBOL +0x549525ef handle_nested_irq vmlinux EXPORT_SYMBOL_GPL +0x2592fc6c console_printk vmlinux EXPORT_SYMBOL_GPL +0x55c76a23 ksys_sync_helper vmlinux EXPORT_SYMBOL_GPL +0x3e44c616 ceph_osdc_update_epoch_barrier net/ceph/libceph EXPORT_SYMBOL +0xed9b4ea3 ceph_msg_dump net/ceph/libceph EXPORT_SYMBOL +0x08756db1 crypto_transfer_akcipher_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0xd344e4ee ocfs2_stack_glue_set_max_proto_version fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x9e660b62 br_dev_queue_push_xmit vmlinux EXPORT_SYMBOL_GPL +0xf2741b88 fl6_update_dst vmlinux EXPORT_SYMBOL_GPL +0x92e7488a neigh_lookup vmlinux EXPORT_SYMBOL +0x971c3d43 usb_put_hcd vmlinux EXPORT_SYMBOL_GPL +0x8615da17 sas_alloc_slow_task vmlinux EXPORT_SYMBOL_GPL +0xccfd2ebc scsi_dev_info_list_del_keyed vmlinux EXPORT_SYMBOL +0xa6198196 ata_host_resume vmlinux EXPORT_SYMBOL_GPL +0xa0aed30b uart_get_rs485_mode vmlinux EXPORT_SYMBOL_GPL +0xcf88bdaa debugfs_create_u32_array vmlinux EXPORT_SYMBOL_GPL +0xc9c745a9 bpf_map_inc_not_zero vmlinux EXPORT_SYMBOL_GPL +0x71be84c5 trace_array_printk vmlinux EXPORT_SYMBOL_GPL +0x827a2f1f mlxsw_afa_block_jump drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x98b0ece8 nfs_init_timeout_values fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5709e5a1 tcp_mtup_init vmlinux EXPORT_SYMBOL +0xe61ced55 cpuidle_register vmlinux EXPORT_SYMBOL_GPL +0xac8873ab mpt_HardResetHandler vmlinux EXPORT_SYMBOL +0x1746e84c devm_clk_unregister vmlinux EXPORT_SYMBOL_GPL +0xdf256037 kstrtou8_from_user vmlinux EXPORT_SYMBOL +0xf7d31de9 kstrtoul_from_user vmlinux EXPORT_SYMBOL +0x466754b1 __clocksource_update_freq_scale vmlinux EXPORT_SYMBOL_GPL +0x3a08475f platform_thermal_notify vmlinux EXPORT_SYMBOL +0xbea5ff1e static_key_initialized vmlinux EXPORT_SYMBOL_GPL +0xe5b94481 l2tp_tunnel_create net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x0b2c228c cache_purge net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc4fe8e02 rpc_sleep_on_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9c072aa8 vbg_status_code_to_errno drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0xd163cade dm_tm_commit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x71e1d813 mlxsw_core_port_clear drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x95eaab4d drm_dev_fini drivers/gpu/drm/drm EXPORT_SYMBOL +0x48b62a57 drm_dp_link_train_channel_eq_delay drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x222e7ce2 sysfs_streq vmlinux EXPORT_SYMBOL +0x310d8810 dbs_update vmlinux EXPORT_SYMBOL_GPL +0x8a6c7139 acpi_mask_gpe vmlinux EXPORT_SYMBOL +0xf5e5a87b hdmi_infoframe_pack_only vmlinux EXPORT_SYMBOL +0xe56190b4 sbitmap_bitmap_show vmlinux EXPORT_SYMBOL_GPL +0xc106448d bio_associate_blkg vmlinux EXPORT_SYMBOL_GPL +0xf7d2195b alloc_buffer_head vmlinux EXPORT_SYMBOL +0xec63db32 bpf_event_output vmlinux EXPORT_SYMBOL_GPL +0xd067d3c5 system_freezable_power_efficient_wq vmlinux EXPORT_SYMBOL_GPL +0x1a8264fb xdr_write_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd026d13e mdev_register_driver drivers/vfio/mdev/mdev EXPORT_SYMBOL +0x26884ff7 nfs_alloc_fhandle fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1fe912f1 netdev_alloc_frag vmlinux EXPORT_SYMBOL +0x88da13df dm_accept_partial_bio vmlinux EXPORT_SYMBOL_GPL +0x8a0d823c nvme_sync_io_queues vmlinux EXPORT_SYMBOL_GPL +0xd785ab4e pci_bus_read_dev_vendor_id vmlinux EXPORT_SYMBOL +0x4d862a59 fscrypt_decrypt_block_inplace vmlinux EXPORT_SYMBOL +0xa220093b fscrypt_encrypt_block_inplace vmlinux EXPORT_SYMBOL +0xbf7609d5 vhost_disable_notify drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x4022cdc2 vhost_vq_avail_empty drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xcafdd707 ocfs2_dlm_lock_status fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xd0ad4732 nd_tbl vmlinux EXPORT_SYMBOL_GPL +0x1712adc1 iscsi_conn_teardown vmlinux EXPORT_SYMBOL_GPL +0x8836ff98 nvme_stop_queues vmlinux EXPORT_SYMBOL_GPL +0x1520ac53 device_remove_file vmlinux EXPORT_SYMBOL_GPL +0xdef8fcbf acpi_dev_get_dma_resources vmlinux EXPORT_SYMBOL_GPL +0x1b8822d8 pinctrl_gpio_direction_output vmlinux EXPORT_SYMBOL_GPL +0x0f81254d security_inode_invalidate_secctx vmlinux EXPORT_SYMBOL +0x60506751 unmap_kernel_range_noflush vmlinux EXPORT_SYMBOL_GPL +0x9b3f0153 param_set_bool_enable_only vmlinux EXPORT_SYMBOL_GPL +0x289c3714 nf_ct_alloc_hashtable net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd09c3df4 nvmf_ip_options_match drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0xd752cb11 ipv6_skip_exthdr vmlinux EXPORT_SYMBOL +0x1aafc043 tcp_md5_hash_skb_data vmlinux EXPORT_SYMBOL +0x7fb5cced ip_options_compile vmlinux EXPORT_SYMBOL +0x3e75669c sock_diag_unregister vmlinux EXPORT_SYMBOL_GPL +0xe082e88d acpi_check_address_range vmlinux EXPORT_SYMBOL +0x39b4b352 pcibios_bus_to_resource vmlinux EXPORT_SYMBOL +0xd8bfb414 param_get_ulong vmlinux EXPORT_SYMBOL +0xeebfc872 kill_pid vmlinux EXPORT_SYMBOL +0xe5bc9a53 slhc_free drivers/net/slip/slhc EXPORT_SYMBOL +0x35a8c273 drm_gem_mmap drivers/gpu/drm/drm EXPORT_SYMBOL +0xccfbe5f5 rt6_lookup vmlinux EXPORT_SYMBOL +0x309d154c unix_outq_len vmlinux EXPORT_SYMBOL_GPL +0x0c21aa48 udp_seq_start vmlinux EXPORT_SYMBOL +0x26e76b0e device_store_int vmlinux EXPORT_SYMBOL_GPL +0x71c0b65e dim_turn vmlinux EXPORT_SYMBOL +0x64999478 congestion_wait vmlinux EXPORT_SYMBOL +0x1f378136 __cgroup_bpf_run_filter_sk vmlinux EXPORT_SYMBOL +0xa6f963ee ring_buffer_read_prepare vmlinux EXPORT_SYMBOL_GPL +0x619b14da fpstate_init vmlinux EXPORT_SYMBOL_GPL +0x5fb38303 nf_ct_tmpl_free net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x875e300b i2c_del_driver drivers/i2c/i2c-core EXPORT_SYMBOL +0x85de4832 mlx4_get_is_vlan_offload_disabled drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x88c7abbf drm_fb_helper_restore_fbdev_mode_unlocked drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x2bdb9b1b __gfn_to_pfn_memslot arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf08a107e tcp_get_syncookie_mss vmlinux EXPORT_SYMBOL_GPL +0xf28404cf devlink_dpipe_header_ipv6 vmlinux EXPORT_SYMBOL +0xa858084d inet_pton_with_scope vmlinux EXPORT_SYMBOL +0x9113b1c9 device_property_read_u64_array vmlinux EXPORT_SYMBOL_GPL +0x19272209 device_property_read_u32_array vmlinux EXPORT_SYMBOL_GPL +0x8a4921a0 device_property_read_u16_array vmlinux EXPORT_SYMBOL_GPL +0xda1ddef1 acpi_mark_gpe_for_wake vmlinux EXPORT_SYMBOL +0x64a0f7c1 blk_queue_dma_drain vmlinux EXPORT_SYMBOL_GPL +0xf6e87beb config_item_set_name vmlinux EXPORT_SYMBOL +0xc63292c4 dquot_release vmlinux EXPORT_SYMBOL +0x6a6cafd2 ring_buffer_read_page vmlinux EXPORT_SYMBOL_GPL +0x1c1b9f8e _raw_write_unlock_irqrestore vmlinux EXPORT_SYMBOL +0x5de7447d __atomic_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x08303ac5 x86_match_cpu vmlinux EXPORT_SYMBOL +0xe447d2c9 vfio_pin_pages drivers/vfio/vfio EXPORT_SYMBOL +0x75c6adff drm_legacy_ioremapfree drivers/gpu/drm/drm EXPORT_SYMBOL +0xbcba5b37 nfs_generic_pgio fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf8a8fc3f usb_hcd_setup_local_mem vmlinux EXPORT_SYMBOL_GPL +0xa1f431d6 sas_slave_configure vmlinux EXPORT_SYMBOL_GPL +0xc1df483f __dma_request_channel vmlinux EXPORT_SYMBOL_GPL +0xcb0e2ce1 clk_hw_is_enabled vmlinux EXPORT_SYMBOL_GPL +0xfe916dc6 hex_dump_to_buffer vmlinux EXPORT_SYMBOL +0x59d63d5c file_ra_state_init vmlinux EXPORT_SYMBOL_GPL +0x432b8178 dm_array_cursor_begin drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x6c771cf9 mlx5_core_modify_cq_moderation drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x2d7a290c mlx4_unicast_promisc_remove drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x35121355 usb_alloc_coherent vmlinux EXPORT_SYMBOL_GPL +0xfdb0800c crypto_larval_kill vmlinux EXPORT_SYMBOL_GPL +0x028639b9 irq_set_default_host vmlinux EXPORT_SYMBOL_GPL +0x75847676 nft_flowtable_lookup net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xe7b52e5f mult_to_ib_rate drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd14c42ef i2c_clients_command drivers/i2c/i2c-core EXPORT_SYMBOL +0x31381511 drm_modeset_acquire_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x03fa11c6 drm_mode_is_420_only drivers/gpu/drm/drm EXPORT_SYMBOL +0x59c89076 inet_csk_get_port vmlinux EXPORT_SYMBOL_GPL +0x0b1f39c4 netdev_is_rx_handler_busy vmlinux EXPORT_SYMBOL_GPL +0x1b085751 hid_match_device vmlinux EXPORT_SYMBOL_GPL +0x47b54f1e sas_tlr_supported vmlinux EXPORT_SYMBOL_GPL +0xbf636428 rdma_dim vmlinux EXPORT_SYMBOL +0x8a1a1038 kiocb_set_cancel_fn vmlinux EXPORT_SYMBOL +0xe71c8b6d add_to_page_cache vmlinux EXPORT_SYMBOL +0xd1fbc889 unregister_kprobe vmlinux EXPORT_SYMBOL_GPL +0xbf7f18ef ceph_pg_pool_flags net/ceph/libceph EXPORT_SYMBOL +0x90e68f1d adf_isr_resource_alloc drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x3629cffc mlx5_notifier_unregister drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x37edcb7c mlx5_query_port_ib_proto_oper drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4ba5c46b vmci_qpair_peek drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xc82c721f klist_remove vmlinux EXPORT_SYMBOL_GPL +0x2fa1f91f devlink_port_param_value_changed vmlinux EXPORT_SYMBOL_GPL +0x11909ca9 dev_get_flags vmlinux EXPORT_SYMBOL +0xb5445960 dev_pm_opp_get_max_volt_latency vmlinux EXPORT_SYMBOL_GPL +0x04641624 usb_get_status vmlinux EXPORT_SYMBOL_GPL +0xe631cf03 mptscsih_ioc_reset vmlinux EXPORT_SYMBOL +0xc17c323e crypto_sha256_update vmlinux EXPORT_SYMBOL +0x9d92f3ad __wait_on_bit_lock vmlinux EXPORT_SYMBOL +0xb3253ed9 hpet_rtc_timer_init vmlinux EXPORT_SYMBOL_GPL +0x62b1d76d l2tp_tunnel_register net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0xc23c129d svc_auth_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x98077983 nf_ct_seq_offset net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x22f3cf93 ib_pack drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xbe607d00 fib_nl_delrule vmlinux EXPORT_SYMBOL_GPL +0x7bd3c04f fib_nl_newrule vmlinux EXPORT_SYMBOL_GPL +0xbac58131 gen_new_estimator vmlinux EXPORT_SYMBOL +0x7cd6f042 cpufreq_get_current_driver vmlinux EXPORT_SYMBOL_GPL +0x51f22316 input_class vmlinux EXPORT_SYMBOL_GPL +0xa8b51b50 platform_unregister_drivers vmlinux EXPORT_SYMBOL_GPL +0x9f8e1259 agp_backend_release vmlinux EXPORT_SYMBOL +0xfc3b4246 acpi_bus_update_power vmlinux EXPORT_SYMBOL_GPL +0xb5d49c29 pci_set_master vmlinux EXPORT_SYMBOL +0xab47f330 iov_iter_pipe vmlinux EXPORT_SYMBOL +0x2f33aecd blk_mq_stop_hw_queue vmlinux EXPORT_SYMBOL +0x60c30e8c crypto_alg_mod_lookup vmlinux EXPORT_SYMBOL_GPL +0x8dc2d8ae debugfs_create_ulong vmlinux EXPORT_SYMBOL_GPL +0xb2929a52 jbd2_journal_inode_ranged_wait vmlinux EXPORT_SYMBOL +0x880eeec6 freeze_super vmlinux EXPORT_SYMBOL +0xbd27a938 __devm_request_region vmlinux EXPORT_SYMBOL +0x0b053cec native_write_cr4 vmlinux EXPORT_SYMBOL +0x73fb075d iw_cm_accept drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x440b4830 bch_btree_iter_next drivers/md/bcache/bcache EXPORT_SYMBOL +0xf0c08c32 i2c_new_dummy_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x6ad7aaeb drm_wait_one_vblank drivers/gpu/drm/drm EXPORT_SYMBOL +0x64c29f7b nfs_pageio_resend fs/nfs/nfs EXPORT_SYMBOL_GPL +0x394d36b9 l3mdev_fib_table_by_index vmlinux EXPORT_SYMBOL_GPL +0xb4c1c93e hid_connect vmlinux EXPORT_SYMBOL_GPL +0xc2a83457 usb_set_device_state vmlinux EXPORT_SYMBOL_GPL +0xc0a66050 scsi_autopm_put_device vmlinux EXPORT_SYMBOL_GPL +0xd3b58e71 scsi_autopm_get_device vmlinux EXPORT_SYMBOL_GPL +0x93a8d99b scsi_eh_finish_cmd vmlinux EXPORT_SYMBOL +0x45439eca intel_gmch_probe vmlinux EXPORT_SYMBOL +0x7ac50320 vc_cons vmlinux EXPORT_SYMBOL +0x67b67e02 dma_async_device_unregister vmlinux EXPORT_SYMBOL +0xca21ebd3 bitmap_free vmlinux EXPORT_SYMBOL +0xcbc80e4c find_inode_nowait vmlinux EXPORT_SYMBOL +0x6ba36c6a hwpoison_filter_flags_value vmlinux EXPORT_SYMBOL_GPL +0x476167c8 remove_memory vmlinux EXPORT_SYMBOL_GPL +0x4ad82489 transport_free_session drivers/target/target_core_mod EXPORT_SYMBOL +0x612efd1f drm_add_edid_modes drivers/gpu/drm/drm EXPORT_SYMBOL +0xf9c0b663 strlcat vmlinux EXPORT_SYMBOL +0x2e2b40d2 strncat vmlinux EXPORT_SYMBOL +0x64a56aa8 ata_common_sdev_attrs vmlinux EXPORT_SYMBOL_GPL +0xe192743e acpi_kobj vmlinux EXPORT_SYMBOL_GPL +0x4abd7a6c cpu_rmap_update vmlinux EXPORT_SYMBOL +0x3e3cbc52 blk_queue_max_discard_sectors vmlinux EXPORT_SYMBOL +0xbb6f025a asymmetric_key_generate_id vmlinux EXPORT_SYMBOL_GPL +0xb3f7646e kthread_should_stop vmlinux EXPORT_SYMBOL +0xf8456e37 nfnetlink_set_err net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x48e19582 mlx4_config_vxlan_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x29084d87 mr_mfc_seq_idx vmlinux EXPORT_SYMBOL +0xd2fe2d67 netdev_notice vmlinux EXPORT_SYMBOL +0x3164f78b mddev_init vmlinux EXPORT_SYMBOL_GPL +0x00cbd041 ata_pci_device_suspend vmlinux EXPORT_SYMBOL_GPL +0xd91639f5 pm_generic_freeze_noirq vmlinux EXPORT_SYMBOL_GPL +0xa62780dc blk_rq_map_kern vmlinux EXPORT_SYMBOL +0x4ba16886 sync_mapping_buffers vmlinux EXPORT_SYMBOL +0xb9e79ce0 noop_invalidatepage vmlinux EXPORT_SYMBOL_GPL +0xf686d7aa ceph_auth_is_authenticated net/ceph/libceph EXPORT_SYMBOL +0x0672547a target_lun_is_rdonly drivers/target/target_core_mod EXPORT_SYMBOL +0x8dce601f mlx4_get_parav_qkey drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x423134ba drm_hdmi_vendor_infoframe_from_display_mode drivers/gpu/drm/drm EXPORT_SYMBOL +0x76249fd1 inet_ehash_nolisten vmlinux EXPORT_SYMBOL_GPL +0x2c55c77a genl_family_attrbuf vmlinux EXPORT_SYMBOL +0x46833af7 mpt_raid_phys_disk_get_num_paths vmlinux EXPORT_SYMBOL +0xd147db2a __scsi_add_device vmlinux EXPORT_SYMBOL +0xcddf1583 virtqueue_get_avail_addr vmlinux EXPORT_SYMBOL_GPL +0x99fe4f22 fat_scan vmlinux EXPORT_SYMBOL_GPL +0x19b1a401 datagram_poll vmlinux EXPORT_SYMBOL +0xce229ee2 devfreq_monitor_suspend vmlinux EXPORT_SYMBOL +0x01a33ab9 dca_unregister_notify vmlinux EXPORT_SYMBOL_GPL +0x4e5c01ca __scsi_print_sense vmlinux EXPORT_SYMBOL +0x06ad0d32 to_nvdimm_bus vmlinux EXPORT_SYMBOL_GPL +0xadb6eddd __tty_alloc_driver vmlinux EXPORT_SYMBOL +0xa5956abe ioread64_hi_lo vmlinux EXPORT_SYMBOL +0x4d4d7b79 blk_mq_map_queues vmlinux EXPORT_SYMBOL_GPL +0x1e9edfb7 seq_hlist_start_head_rcu vmlinux EXPORT_SYMBOL +0xd901e94e current_time vmlinux EXPORT_SYMBOL +0xa0d0047b user_path_at_empty vmlinux EXPORT_SYMBOL +0x9fedc519 usb_stor_Bulk_transport drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xae0ab30f drm_atomic_state_clear drivers/gpu/drm/drm EXPORT_SYMBOL +0xca276e8b __tcf_em_tree_match vmlinux EXPORT_SYMBOL +0xc6b3056d dev_mc_flush vmlinux EXPORT_SYMBOL +0xbc17272f dev_uc_flush vmlinux EXPORT_SYMBOL +0xdcc4089c xhci_suspend vmlinux EXPORT_SYMBOL_GPL +0xc17e9946 usb_show_dynids vmlinux EXPORT_SYMBOL_GPL +0xcb6575e8 pinctrl_enable vmlinux EXPORT_SYMBOL_GPL +0xed61f6b3 security_release_secctx vmlinux EXPORT_SYMBOL +0x2d994605 security_inode_copy_up_xattr vmlinux EXPORT_SYMBOL +0xa04667bd jbd2_journal_check_available_features vmlinux EXPORT_SYMBOL +0xe0cb96ee flush_old_exec vmlinux EXPORT_SYMBOL +0x862258db timecounter_init vmlinux EXPORT_SYMBOL_GPL +0x327b77e7 dma_free_attrs vmlinux EXPORT_SYMBOL +0x5550679f mlx4_set_vf_link_state drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x539ea3af be_roce_register_driver drivers/net/ethernet/emulex/benet/be2net EXPORT_SYMBOL +0xfd9bb65d drm_client_register drivers/gpu/drm/drm EXPORT_SYMBOL +0x020d4402 inet_addr_type vmlinux EXPORT_SYMBOL +0xdb88f320 inet_recvmsg vmlinux EXPORT_SYMBOL +0x65ce6062 garp_request_leave vmlinux EXPORT_SYMBOL_GPL +0x43d0127d llc_sap_close vmlinux EXPORT_SYMBOL +0x55e31703 ethtool_convert_link_mode_to_legacy_u32 vmlinux EXPORT_SYMBOL +0xfbe3eb39 mdiobus_unregister vmlinux EXPORT_SYMBOL +0x5d6d9552 ata_bmdma_port_start32 vmlinux EXPORT_SYMBOL_GPL +0xe123f3d9 dma_fence_release vmlinux EXPORT_SYMBOL +0x79e69460 intel_iommu_gfx_mapped vmlinux EXPORT_SYMBOL_GPL +0x08c73234 __tracepoint_rpm_suspend vmlinux EXPORT_SYMBOL_GPL +0x92db8f68 do_trace_rcu_torture_read vmlinux EXPORT_SYMBOL_GPL +0x2b700b89 ceph_auth_update_authorizer net/ceph/libceph EXPORT_SYMBOL +0xb36290fa mii_link_ok drivers/net/mii EXPORT_SYMBOL +0x4427634e nfs_commitdata_alloc fs/nfs/nfs EXPORT_SYMBOL_GPL +0x60af4aba phy_detach vmlinux EXPORT_SYMBOL +0xe195a790 phy_driver_is_genphy_10g vmlinux EXPORT_SYMBOL_GPL +0xed38c848 __tracepoint_rpm_idle vmlinux EXPORT_SYMBOL_GPL +0x105b30e9 nf_ct_ext_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL +0xb9e44205 transport_deregister_session drivers/target/target_core_mod EXPORT_SYMBOL +0x8773a5dd mlx5_alloc_bfreg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4973df78 cxgbi_get_ep_param drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x721064e6 amd_iommu_bind_pasid drivers/iommu/amd_iommu_v2 EXPORT_SYMBOL +0x21a70361 pnfs_unregister_layoutdriver fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x327433a6 fscache_fsdef_index fs/fscache/fscache EXPORT_SYMBOL +0xbb9d75f7 mr_fill_mroute vmlinux EXPORT_SYMBOL +0xb172ffac efivars_sysfs_init vmlinux EXPORT_SYMBOL_GPL +0xa8f42ea9 gov_attr_set_put vmlinux EXPORT_SYMBOL_GPL +0xfd08d457 gov_attr_set_get vmlinux EXPORT_SYMBOL_GPL +0xb9a61832 wakeup_source_remove vmlinux EXPORT_SYMBOL_GPL +0x2d0bb48a stop_tty vmlinux EXPORT_SYMBOL +0x2ee7c52b btree_visitor vmlinux EXPORT_SYMBOL_GPL +0x87939892 dquot_get_dqblk vmlinux EXPORT_SYMBOL +0x856a5ef3 des3_ede_encrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0xd6636ca6 rdma_addr_size_in6 drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x36d2b2ac nfs_pageio_reset_write_mds fs/nfs/nfs EXPORT_SYMBOL_GPL +0xeb661531 ipv6_push_frag_opts vmlinux EXPORT_SYMBOL +0x4725eda1 ir_raw_encode_carrier vmlinux EXPORT_SYMBOL +0x42578e80 acpi_get_type vmlinux EXPORT_SYMBOL +0x1e142f30 unlock_page_memcg vmlinux EXPORT_SYMBOL +0x4629334c __preempt_count vmlinux EXPORT_SYMBOL +0x33705026 geneve_dev_create_fb drivers/net/geneve EXPORT_SYMBOL_GPL +0x7730f22d drbd_conn_str drivers/block/drbd/drbd EXPORT_SYMBOL +0x08d18921 __drm_atomic_helper_plane_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xaeb51bef xfrm_policy_destroy vmlinux EXPORT_SYMBOL +0x555b70ab inet_del_offload vmlinux EXPORT_SYMBOL +0x9cdfb3f7 sysctl_fb_tunnels_only_for_init_net vmlinux EXPORT_SYMBOL +0x3e53f4de devfreq_monitor_start vmlinux EXPORT_SYMBOL +0xf2d2961a devm_hwmon_device_register_with_info vmlinux EXPORT_SYMBOL_GPL +0x55c38291 device_connection_add vmlinux EXPORT_SYMBOL_GPL +0x0cad4570 security_kernel_load_data vmlinux EXPORT_SYMBOL_GPL +0x91adb00e jbd2_journal_stop vmlinux EXPORT_SYMBOL +0x90495d49 __trace_note_message vmlinux EXPORT_SYMBOL_GPL +0x74778a80 ipmi_get_my_LUN drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x78541342 twofish_setkey crypto/twofish_common EXPORT_SYMBOL_GPL +0xeb59e8c3 native_load_gs_index vmlinux EXPORT_SYMBOL +0x1ac5d3cb strcspn vmlinux EXPORT_SYMBOL +0x8382be3e sysctl_tcp_init_rto vmlinux EXPORT_SYMBOL +0x56953e0d devlink_is_reload_failed vmlinux EXPORT_SYMBOL_GPL +0xd209ffb2 neigh_ifdown vmlinux EXPORT_SYMBOL +0x8e11f804 ahci_platform_ops vmlinux EXPORT_SYMBOL_GPL +0x5201f035 zero_fill_bio_iter vmlinux EXPORT_SYMBOL +0x62632161 freq_qos_remove_notifier vmlinux EXPORT_SYMBOL_GPL +0x2d5e8872 md_allow_write vmlinux EXPORT_SYMBOL_GPL +0x99070cf5 mddev_create_wb_pool vmlinux EXPORT_SYMBOL_GPL +0x20698fc8 thermal_zone_unbind_cooling_device vmlinux EXPORT_SYMBOL_GPL +0x191ccaad __tracepoint_iscsi_dbg_sw_tcp vmlinux EXPORT_SYMBOL_GPL +0x3b681e87 ata_cable_ignore vmlinux EXPORT_SYMBOL_GPL +0x225f3fe3 devm_get_free_pages vmlinux EXPORT_SYMBOL_GPL +0x5e798ffb divider_get_val vmlinux EXPORT_SYMBOL_GPL +0x0ffb4bf2 pci_scan_bridge vmlinux EXPORT_SYMBOL +0x2115c372 pci_generic_config_read vmlinux EXPORT_SYMBOL_GPL +0x58f563c7 bio_integrity_clone vmlinux EXPORT_SYMBOL +0x02e5520e blk_mq_delay_kick_requeue_list vmlinux EXPORT_SYMBOL +0x4e37c73e jbd2_journal_extend vmlinux EXPORT_SYMBOL +0xa3c00c06 memcg_sockets_enabled_key vmlinux EXPORT_SYMBOL +0x39103bac virtio_transport_notify_recv_pre_dequeue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0xcebec217 ip_md_tunnel_xmit net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x862d398f drm_invalid_op drivers/gpu/drm/drm EXPORT_SYMBOL +0x466c7431 skb_copy_and_csum_datagram_msg vmlinux EXPORT_SYMBOL +0x04554194 hid_parse_report vmlinux EXPORT_SYMBOL_GPL +0xe06d8f72 ehci_resume vmlinux EXPORT_SYMBOL_GPL +0xe3948ff4 acpi_walk_dep_device_list vmlinux EXPORT_SYMBOL_GPL +0x9ae4191f sbitmap_queue_init_node vmlinux EXPORT_SYMBOL_GPL +0x060ba97c gen_pool_free_owner vmlinux EXPORT_SYMBOL +0x5fc72f0e alloc_pages_exact vmlinux EXPORT_SYMBOL +0x685e31ca groups_sort vmlinux EXPORT_SYMBOL +0x7064efa4 ceph_msg_new net/ceph/libceph EXPORT_SYMBOL +0xa13acdb8 svc_pool_map_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb312d0c4 svc_pool_map_get net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa30592be core_allocate_nexus_loss_ua drivers/target/target_core_mod EXPORT_SYMBOL +0x306a1b67 adf_exit_admin_comms drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x090cceb1 drm_connector_set_tile_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x7d98d78d drm_atomic_helper_plane_reset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x42e30bcd xdp_rxq_info_unused vmlinux EXPORT_SYMBOL_GPL +0x2b965f1f ata_timing_compute vmlinux EXPORT_SYMBOL_GPL +0x6fcc8263 nvme_wait_reset vmlinux EXPORT_SYMBOL_GPL +0xf2b37cb8 virtio_break_device vmlinux EXPORT_SYMBOL_GPL +0x9e0de07b fs_lookup_param vmlinux EXPORT_SYMBOL +0xaf5a27ae svc_find_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xab162f3e svc_process net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x10968dbd rpc_force_rebind net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x14e5d026 drm_gem_objects_lookup drivers/gpu/drm/drm EXPORT_SYMBOL +0x35ef54af nf_register_net_hook vmlinux EXPORT_SYMBOL +0x3e8f55d6 __nla_reserve vmlinux EXPORT_SYMBOL +0xe3ec2f2b alloc_chrdev_region vmlinux EXPORT_SYMBOL +0xf6991546 inc_zone_page_state vmlinux EXPORT_SYMBOL +0xb11625b9 cpu_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x28724165 rpc_killall_tasks net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa1fefe6a drm_dp_psr_setup_time drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xca9360b5 rb_next vmlinux EXPORT_SYMBOL +0x04e27719 xt_compat_flush_offsets vmlinux EXPORT_SYMBOL_GPL +0x4746541e netdev_bind_sb_channel_queue vmlinux EXPORT_SYMBOL +0x655ec11f skb_checksum vmlinux EXPORT_SYMBOL +0x5a8b7420 kfree_skb_list vmlinux EXPORT_SYMBOL +0x13d22cf7 ohci_setup vmlinux EXPORT_SYMBOL_GPL +0x1fa2b808 mpt_clear_taskmgmt_in_progress_flag vmlinux EXPORT_SYMBOL +0x8a0c2f85 iommu_dev_enable_feature vmlinux EXPORT_SYMBOL_GPL +0x4d405245 pci_pme_capable vmlinux EXPORT_SYMBOL +0xf8cd58a8 fscrypt_fname_disk_to_usr vmlinux EXPORT_SYMBOL +0x983e2a72 inode_permission vmlinux EXPORT_SYMBOL +0x785b5950 buffer_migrate_page vmlinux EXPORT_SYMBOL +0x92540fbf finish_wait vmlinux EXPORT_SYMBOL +0x696d7e0b l1tf_mitigation vmlinux EXPORT_SYMBOL_GPL +0x6e85b00b local_touch_nmi vmlinux EXPORT_SYMBOL_GPL +0x706f2f31 svc_xprt_init net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xfdbc8994 vfio_add_group_dev drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x005b0882 nfs4_pnfs_ds_add fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x93cb439a nfs_get_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x61b7b126 simple_strtoull vmlinux EXPORT_SYMBOL +0x8df7e8d6 cpumask_any_but vmlinux EXPORT_SYMBOL +0x52d7b2fd llc_sap_list vmlinux EXPORT_SYMBOL +0x56d1943a xdp_return_frame_rx_napi vmlinux EXPORT_SYMBOL_GPL +0xd2fe6977 phy_start_aneg vmlinux EXPORT_SYMBOL +0x7dc6c153 scsi_print_result vmlinux EXPORT_SYMBOL +0x04b27bc2 do_dw_dma_enable vmlinux EXPORT_SYMBOL_GPL +0xb219d56c wbinvd_on_cpu vmlinux EXPORT_SYMBOL +0x63c4d61f __bitmap_weight vmlinux EXPORT_SYMBOL +0x619cb7dd simple_read_from_buffer vmlinux EXPORT_SYMBOL +0x83aa3aaa get_dev_pagemap vmlinux EXPORT_SYMBOL_GPL +0xc91335fb clockevent_delta2ns vmlinux EXPORT_SYMBOL_GPL +0xc472d957 irq_domain_simple_ops vmlinux EXPORT_SYMBOL_GPL +0x2f3dcecb lc_index_of lib/lru_cache EXPORT_SYMBOL +0x8c9afb59 nf_defrag_ipv6_enable net/ipv6/netfilter/nf_defrag_ipv6 EXPORT_SYMBOL_GPL +0xf10f5f6b nf_defrag_ipv4_enable net/ipv4/netfilter/nf_defrag_ipv4 EXPORT_SYMBOL_GPL +0x23d95205 edac_set_report_status drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x1ad3a8bc target_complete_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x0a7e77f3 dm_btree_cursor_end drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa990061b mlx5_cmd_create_vport_lag drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4e5db3e9 mlx4_SET_PORT_user_mtu drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xee31c41b nf_checksum vmlinux EXPORT_SYMBOL_GPL +0x420f3d01 nvmem_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x45006cee default_red vmlinux EXPORT_SYMBOL +0x6632f2bb clk_mux_val_to_index vmlinux EXPORT_SYMBOL_GPL +0x0397edd5 fb_edid_to_monspecs vmlinux EXPORT_SYMBOL +0x6f7fd013 pci_d3cold_enable vmlinux EXPORT_SYMBOL_GPL +0xa681fe88 generate_random_uuid vmlinux EXPORT_SYMBOL +0x688af52b get_mem_cgroup_from_page vmlinux EXPORT_SYMBOL +0xda115e64 nf_ct_tcp_seqadj_set net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x1e382318 __tracepoint_bcache_btree_node_split drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x02a4593a t3_l2t_send_slow drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x11a2e0ac crypto_dh_key_len crypto/dh_generic EXPORT_SYMBOL_GPL +0xe4040919 crypto_transfer_hash_request_to_engine crypto/crypto_engine EXPORT_SYMBOL_GPL +0x7c9f8fbe kvm_queue_exception_e arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xcf533d8a xfrm_policy_hash_rebuild vmlinux EXPORT_SYMBOL +0x4ca04ccb get_net_ns_by_pid vmlinux EXPORT_SYMBOL_GPL +0xdf5484d7 mbox_chan_received_data vmlinux EXPORT_SYMBOL_GPL +0x86477f3a _dev_warn vmlinux EXPORT_SYMBOL +0xd846c315 acpi_write_bit_register vmlinux EXPORT_SYMBOL +0x4ba76caf fb_deferred_io_cleanup vmlinux EXPORT_SYMBOL_GPL +0xce90833b vfs_statfs vmlinux EXPORT_SYMBOL +0xc68f49ea target_submit_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0x51a806cf t3_l2e_free drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x414eb655 drm_writeback_get_out_fence drivers/gpu/drm/drm EXPORT_SYMBOL +0x0abe9080 drm_fb_helper_output_poll_changed drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x927b2cb4 drm_dp_update_payload_part2 drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x407ac38e drm_dp_update_payload_part1 drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x7afe324e halt_poll_ns_grow arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x34347676 __mdiobus_read vmlinux EXPORT_SYMBOL +0x1098baaf _dev_alert vmlinux EXPORT_SYMBOL +0xdb4d84a5 mipi_dsi_driver_register_full vmlinux EXPORT_SYMBOL +0x8384647a acpi_map_pxm_to_online_node vmlinux EXPORT_SYMBOL +0x6e40aeae af_alg_register_type vmlinux EXPORT_SYMBOL_GPL +0x438610bd security_tun_dev_alloc_security vmlinux EXPORT_SYMBOL +0x74b05091 list_lru_walk_one vmlinux EXPORT_SYMBOL_GPL +0xffb1ddba xdr_buf_trim net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xa5bfb353 iscsit_sequence_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xbd55c54b i2c_new_probed_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x1889efff mlx5_accel_ipsec_device_caps drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xc8f3e213 drm_agp_free drivers/gpu/drm/drm EXPORT_SYMBOL +0xe09d1b69 nfs4_pnfs_ds_connect fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb8507076 kvm_read_l1_tsc arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x47e385d3 ip6_datagram_send_ctl vmlinux EXPORT_SYMBOL_GPL +0x8648c6d9 xfrm_input_register_afinfo vmlinux EXPORT_SYMBOL +0xe46a1192 udp_ioctl vmlinux EXPORT_SYMBOL +0x206eee30 devlink_reload_disable vmlinux EXPORT_SYMBOL_GPL +0x72c5ddff __mdiobus_write vmlinux EXPORT_SYMBOL +0xe0c42004 genphy_loopback vmlinux EXPORT_SYMBOL +0xe16367de tty_port_default_client_ops vmlinux EXPORT_SYMBOL_GPL +0x7f7ae032 __pci_complete_power_transition vmlinux EXPORT_SYMBOL_GPL +0xceb1f126 mpi_read_raw_data vmlinux EXPORT_SYMBOL_GPL +0x0f4a9187 __nla_reserve_nohdr vmlinux EXPORT_SYMBOL +0x35d427d8 async_xor_val vmlinux EXPORT_SYMBOL_GPL +0x7dda30af unregister_tracepoint_module_notifier vmlinux EXPORT_SYMBOL_GPL +0x13c19ee7 alarm_forward_now vmlinux EXPORT_SYMBOL_GPL +0x8c278ebd rdma_find_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd51c29f1 dm_sm_disk_create drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xa85f50c0 mlx5_core_query_rq drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x5ab67e47 xsk_umem_uses_need_wakeup vmlinux EXPORT_SYMBOL +0xd8da5385 rtnetlink_put_metrics vmlinux EXPORT_SYMBOL +0x5ed90adc int_to_scsilun vmlinux EXPORT_SYMBOL +0xe07a1426 is_nvdimm_bus_locked vmlinux EXPORT_SYMBOL +0x589b476e blk_queue_max_write_same_sectors vmlinux EXPORT_SYMBOL +0x2436f3f7 may_umount vmlinux EXPORT_SYMBOL +0xf61baa65 pids_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x4a60e9a5 rpc_switch_client_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x30cacf5b cxgb4_crypto_send drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xfc475429 drm_dev_unplug drivers/gpu/drm/drm EXPORT_SYMBOL +0x84e5606e kvm_emulate_cpuid arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x63ea80b3 cper_mem_err_type_str vmlinux EXPORT_SYMBOL_GPL +0x1e6f3def md_integrity_add_rdev vmlinux EXPORT_SYMBOL +0xbbe5d1b3 iscsi_tcp_r2tpool_alloc vmlinux EXPORT_SYMBOL_GPL +0x699fe53e iscsi_get_discovery_parent_name vmlinux EXPORT_SYMBOL_GPL +0x92612bb4 iommu_detach_device vmlinux EXPORT_SYMBOL_GPL +0xaabca776 tty_buffer_unlock_exclusive vmlinux EXPORT_SYMBOL_GPL +0x86a657e7 pcix_get_max_mmrbc vmlinux EXPORT_SYMBOL +0x7960c170 block_read_full_page vmlinux EXPORT_SYMBOL +0x855b477b console_drivers vmlinux EXPORT_SYMBOL_GPL +0x59a78ae1 adf_enable_vf2pf_comms drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xef23f648 nfs_mark_client_ready fs/nfs/nfs EXPORT_SYMBOL_GPL +0x335f9d08 kvm_skip_emulated_instruction arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb7d927c3 devlink_dpipe_table_register vmlinux EXPORT_SYMBOL_GPL +0xc6f25821 to_nvdimm vmlinux EXPORT_SYMBOL_GPL +0x99e44610 bus_sort_breadthfirst vmlinux EXPORT_SYMBOL_GPL +0xc9b6ba06 pci_alloc_host_bridge vmlinux EXPORT_SYMBOL +0x9fe939e1 mpi_powm vmlinux EXPORT_SYMBOL_GPL +0x1d5400b2 dec_node_page_state vmlinux EXPORT_SYMBOL +0xb5392125 ipt_alloc_initial_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL_GPL +0xe0c49c5c ct_sip_get_header net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x5920002a rdma_replace_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x91c72ef1 edac_pci_del_device drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x89c65966 drm_dev_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xe8a034df drm_dev_exit drivers/gpu/drm/drm EXPORT_SYMBOL +0x8ce1f175 nfs_open fs/nfs/nfs EXPORT_SYMBOL_GPL +0x98a4dee4 tcp_v4_connect vmlinux EXPORT_SYMBOL +0x28026213 tcp_sync_mss vmlinux EXPORT_SYMBOL +0x179d2be7 devlink_resource_occ_get_unregister vmlinux EXPORT_SYMBOL_GPL +0xcde05d89 ir_raw_event_store_with_timeout vmlinux EXPORT_SYMBOL_GPL +0x110f10ef rc_free_device vmlinux EXPORT_SYMBOL_GPL +0x2f079875 sas_port_delete vmlinux EXPORT_SYMBOL +0x19537396 alloc_dax vmlinux EXPORT_SYMBOL_GPL +0x305f38b5 tpm_chip_alloc vmlinux EXPORT_SYMBOL_GPL +0xc42dcb99 acpi_evaluate_ost vmlinux EXPORT_SYMBOL +0x3fe2ccbe memweight vmlinux EXPORT_SYMBOL +0x37700ce7 jbd2_journal_ack_err vmlinux EXPORT_SYMBOL +0xd0c05159 emergency_restart vmlinux EXPORT_SYMBOL_GPL +0x1e3f728d dm_block_data drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x072460c4 mlx5_fill_page_frag_array drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x3798a50a cxgb4_remove_tid drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x4f0f79fa cxgb3_remove_tid drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0xbffb684e __netpoll_free vmlinux EXPORT_SYMBOL_GPL +0xf414a2bf netdev_upper_dev_link vmlinux EXPORT_SYMBOL +0xeb9d9469 md_error vmlinux EXPORT_SYMBOL +0xfa85d84b serio_rescan vmlinux EXPORT_SYMBOL +0x1d318001 clk_fixed_factor_ops vmlinux EXPORT_SYMBOL_GPL +0x479f7d4b clk_bulk_disable vmlinux EXPORT_SYMBOL_GPL +0x4a420d09 acpi_bus_detach_private_data vmlinux EXPORT_SYMBOL_GPL +0x9ada24e5 framebuffer_alloc vmlinux EXPORT_SYMBOL +0x47cfd825 kstrtouint_from_user vmlinux EXPORT_SYMBOL +0x312d6cbf adf_devmgr_in_reset drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x38efaf5a dm_region_hash_destroy drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xca9c063f kvm_vcpu_read_guest_atomic arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x5b67bde4 xfrm_policy_byid vmlinux EXPORT_SYMBOL +0xb382e9c4 mbox_free_channel vmlinux EXPORT_SYMBOL_GPL +0x90017d10 cpufreq_driver_target vmlinux EXPORT_SYMBOL_GPL +0xe3a50dd6 dm_post_suspending vmlinux EXPORT_SYMBOL_GPL +0x6552a594 devm_nsio_enable vmlinux EXPORT_SYMBOL_GPL +0x324be179 tpm_transmit_cmd vmlinux EXPORT_SYMBOL_GPL +0x60635618 pci_prepare_to_sleep vmlinux EXPORT_SYMBOL +0x38ae1486 sbitmap_any_bit_clear vmlinux EXPORT_SYMBOL_GPL +0xddf34bea ahash_register_instance vmlinux EXPORT_SYMBOL_GPL +0xe02a6b39 load_nls_default vmlinux EXPORT_SYMBOL +0xe1a24e27 nonseekable_open vmlinux EXPORT_SYMBOL +0x5a9c945d kmem_cache_create vmlinux EXPORT_SYMBOL +0xfe53ac04 queue_work_node vmlinux EXPORT_SYMBOL_GPL +0x5fdfa2c1 amd_pmu_enable_virt vmlinux EXPORT_SYMBOL_GPL +0x57e16c3e dm_rh_get_state drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x0be67537 dm_btree_walk drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x6ec1d175 mlxsw_afa_create drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xbeac05cd mlxsw_afk_create drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xe945024f mlx4_mr_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x4eb5bea6 drm_self_refresh_helper_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x32d09394 cpuid_query_maxphyaddr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x503663fc kvm_put_guest_xcr0 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x10bc15b4 inet_get_local_port_range vmlinux EXPORT_SYMBOL +0xc42f44fc devm_nvmem_device_get vmlinux EXPORT_SYMBOL_GPL +0xadd925c4 devm_nvmem_device_put vmlinux EXPORT_SYMBOL_GPL +0x28d27a4d ohci_restart vmlinux EXPORT_SYMBOL_GPL +0x67c4a9fc phy_get_eee_err vmlinux EXPORT_SYMBOL +0x7f3407a7 fc_seq_start_next vmlinux EXPORT_SYMBOL +0x71ed18fd nvme_reset_ctrl vmlinux EXPORT_SYMBOL_GPL +0xf77337a1 hdmi_audio_infoframe_check vmlinux EXPORT_SYMBOL +0x7295591c pci_bus_claim_resources vmlinux EXPORT_SYMBOL +0xd0d3f0a4 gen_pool_avail vmlinux EXPORT_SYMBOL_GPL +0xdd410c83 __seq_open_private vmlinux EXPORT_SYMBOL +0x1c957af2 seq_escape vmlinux EXPORT_SYMBOL +0x9e55f0c3 usb_stor_resume drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x6a056e7f mlx5_core_destroy_sq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x738dec41 mlx5_core_destroy_rq_tracked drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xa77e75fa drm_client_dev_hotplug drivers/gpu/drm/drm EXPORT_SYMBOL +0x9586f940 reset_shadow_zero_bits_mask arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xd6f50cf7 xfrm_ealg_get_byname vmlinux EXPORT_SYMBOL_GPL +0x559b27f8 xdp_do_flush_map vmlinux EXPORT_SYMBOL_GPL +0x61ff6c69 __sk_queue_drop_skb vmlinux EXPORT_SYMBOL +0xee74b229 sock_alloc_file vmlinux EXPORT_SYMBOL +0x5eb24829 dm_shift_arg vmlinux EXPORT_SYMBOL +0x4886476e nd_btt_probe vmlinux EXPORT_SYMBOL +0x32d9f5ea device_match_any vmlinux EXPORT_SYMBOL_GPL +0x0228925f iowrite64_hi_lo vmlinux EXPORT_SYMBOL +0x436fbc8a __generic_file_write_iter vmlinux EXPORT_SYMBOL +0xdbe0cae0 mlx4_pd_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x08f272e1 nfs_access_set_mask fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa0e4e2a2 kvm_vcpu_read_guest_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe9bcf189 xfrm_lookup vmlinux EXPORT_SYMBOL +0x15510a89 devlink_fmsg_binary_put vmlinux EXPORT_SYMBOL_GPL +0x3d484cae skb_store_bits vmlinux EXPORT_SYMBOL +0x22b90986 fc_exch_mgr_del vmlinux EXPORT_SYMBOL +0x313439f3 fc_exch_mgr_add vmlinux EXPORT_SYMBOL +0x96c23dea ata_sas_scsi_ioctl vmlinux EXPORT_SYMBOL_GPL +0xe80618ce dma_resv_fini vmlinux EXPORT_SYMBOL +0x976d577a page_cache_sync_readahead vmlinux EXPORT_SYMBOL_GPL +0x25264839 __cgroup_bpf_run_filter_skb vmlinux EXPORT_SYMBOL +0x7681946c unregister_pm_notifier vmlinux EXPORT_SYMBOL_GPL +0x9e040110 unregister_ip_vs_pe net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0x87b76460 nft_fib_init net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0xb81df8b6 i2c_smbus_read_byte drivers/i2c/i2c-core EXPORT_SYMBOL +0xd0144407 i2c_probe_func_quick_read drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0xfaa5eea3 mlx4_set_admin_guid drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7d951e40 inet_unregister_protosw vmlinux EXPORT_SYMBOL +0xcf1da4f6 devm_devfreq_register_opp_notifier vmlinux EXPORT_SYMBOL +0x1a1d9608 cpufreq_dbs_governor_stop vmlinux EXPORT_SYMBOL_GPL +0xc0d7df85 dm_bufio_new vmlinux EXPORT_SYMBOL_GPL +0xd2affb81 iscsi_scan_finished vmlinux EXPORT_SYMBOL_GPL +0x6520fec2 __tracepoint_iscsi_dbg_session vmlinux EXPORT_SYMBOL_GPL +0xbcbbdde5 _dev_info vmlinux EXPORT_SYMBOL +0xa1ed9c8b add_hwgenerator_randomness vmlinux EXPORT_SYMBOL_GPL +0x6dcf857f uuid_null vmlinux EXPORT_SYMBOL +0xd6d0ea88 __posix_acl_chmod vmlinux EXPORT_SYMBOL +0x1c798d9f for_each_kernel_tracepoint vmlinux EXPORT_SYMBOL_GPL +0xe295c0ff is_hpet_enabled vmlinux EXPORT_SYMBOL_GPL +0x71d975bb svc_xprt_put net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xea2f35b0 drm_plane_create_alpha_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x1a89027a devres_for_each_res vmlinux EXPORT_SYMBOL_GPL +0x7911c2c1 device_remove_file_self vmlinux EXPORT_SYMBOL_GPL +0x82daa5d0 pci_alloc_dev vmlinux EXPORT_SYMBOL +0x668137e7 blkcipher_walk_phys vmlinux EXPORT_SYMBOL_GPL +0x7da9e8ce security_inet_conn_established vmlinux EXPORT_SYMBOL +0x3dfc897c seq_hlist_start_head vmlinux EXPORT_SYMBOL +0x41be8523 clocksource_change_rating vmlinux EXPORT_SYMBOL +0x49ed86a0 ZSTD_endStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x90ac178e ip6_tnl_rcv_ctl net/ipv6/ip6_tunnel EXPORT_SYMBOL_GPL +0x0a8150d7 rdma_create_qp drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x34123380 rdma_create_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5f31cf33 usb_stor_post_reset drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x1cb8f858 mlxsw_reg_trans_query drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x348445e7 mlx4_read_clock drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe4bc5aad cxgb4_free_atid drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x0f41bd2e cxgb3_free_atid drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0xd6e572b9 cxgbi_bind_conn drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x1ec1442e nfs_file_fsync fs/nfs/nfs EXPORT_SYMBOL_GPL +0x0925493f clear_page_orig vmlinux EXPORT_SYMBOL_GPL +0x39991865 icmp_global_allow vmlinux EXPORT_SYMBOL +0x29e182ae peernet2id_alloc vmlinux EXPORT_SYMBOL_GPL +0xd36760ef __usb_get_extra_descriptor vmlinux EXPORT_SYMBOL_GPL +0x87b2b34e __tracepoint_attach_device_to_domain vmlinux EXPORT_SYMBOL_GPL +0x03b48867 pci_hp_create_module_link vmlinux EXPORT_SYMBOL_GPL +0xfcfcf5a3 pci_bus_assign_resources vmlinux EXPORT_SYMBOL +0xc4ba9235 crypto_aead_decrypt vmlinux EXPORT_SYMBOL_GPL +0x622cbd86 crypto_aead_encrypt vmlinux EXPORT_SYMBOL_GPL +0xce2b2b87 mark_buffer_write_io_error vmlinux EXPORT_SYMBOL +0xe18ffbfb find_get_entry vmlinux EXPORT_SYMBOL +0xa389a49a profile_event_register vmlinux EXPORT_SYMBOL_GPL +0xe3b63242 nf_tproxy_get_sock_v6 net/ipv6/netfilter/nf_tproxy_ipv6 EXPORT_SYMBOL_GPL +0xba93afff __nf_conntrack_helper_find net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x7a8a49e2 ppp_input_error drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xcc3e4c1b ppp_unit_number drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0xfe241a27 drm_dev_enter drivers/gpu/drm/drm EXPORT_SYMBOL +0xbbc4ef97 ocfs2_stack_supports_plocks fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x91a00a01 nfs_generic_pg_test fs/nfs/nfs EXPORT_SYMBOL_GPL +0x509d6230 nfs_init_server_rpcclient fs/nfs/nfs EXPORT_SYMBOL_GPL +0x4648b2aa phy_gbit_fibre_features vmlinux EXPORT_SYMBOL_GPL +0x615911d7 __bitmap_set vmlinux EXPORT_SYMBOL +0xc10bb889 sysfs_rename_link_ns vmlinux EXPORT_SYMBOL_GPL +0x7daece67 quota_send_warning vmlinux EXPORT_SYMBOL +0x3ba91358 __dquot_transfer vmlinux EXPORT_SYMBOL +0x1a6bf28f fsnotify_get_cookie vmlinux EXPORT_SYMBOL_GPL +0x2094bce9 lock_two_nondirectories vmlinux EXPORT_SYMBOL +0x7b4ef438 unregister_kprobes vmlinux EXPORT_SYMBOL_GPL +0x2f707605 drm_universal_plane_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x9e4c51ce drm_connector_update_edid_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x82a1e929 drm_gem_create_mmap_offset_size drivers/gpu/drm/drm EXPORT_SYMBOL +0xe4908ee5 drm_primary_helper_funcs drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xcfe259b1 kvm_scale_tsc arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x533848ac xsk_umem_has_addrs vmlinux EXPORT_SYMBOL +0x4fd28fed dcb_setapp vmlinux EXPORT_SYMBOL +0x4c6bd9fe dcb_getapp vmlinux EXPORT_SYMBOL +0x6628daa7 register_netdevice vmlinux EXPORT_SYMBOL +0x70d28e21 ps2_init vmlinux EXPORT_SYMBOL +0x4aaca8c8 fc_exch_init vmlinux EXPORT_SYMBOL +0x684a2b9c ahci_reset_em vmlinux EXPORT_SYMBOL_GPL +0x86c961b3 __set_dax_synchronous vmlinux EXPORT_SYMBOL_GPL +0x3a32839e intel_gtt_chipset_flush vmlinux EXPORT_SYMBOL +0xea7ece7e user_describe vmlinux EXPORT_SYMBOL_GPL +0xeb422d62 blkdev_put vmlinux EXPORT_SYMBOL +0xd0d5423d blkdev_get vmlinux EXPORT_SYMBOL +0xd24f4050 finish_no_open vmlinux EXPORT_SYMBOL +0xf0223313 bpf_prog_get_type_path vmlinux EXPORT_SYMBOL +0x2bb789f3 ebt_do_table net/bridge/netfilter/ebtables EXPORT_SYMBOL +0x6b79a7f9 ip_set_type_unregister net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x059472c5 ipvlan_link_delete drivers/net/ipvlan/ipvlan EXPORT_SYMBOL_GPL +0x6f1990bb fscache_enqueue_operation fs/fscache/fscache EXPORT_SYMBOL +0x2d2dd36f kobj_ns_grab_current vmlinux EXPORT_SYMBOL_GPL +0xe722a07d __neigh_set_probe_once vmlinux EXPORT_SYMBOL +0xc7ed4907 build_skb vmlinux EXPORT_SYMBOL +0x7cf0a150 md_wait_for_blocked_rdev vmlinux EXPORT_SYMBOL +0x1de62624 get_tz_trend vmlinux EXPORT_SYMBOL +0xa4dcebbd fwnode_handle_put vmlinux EXPORT_SYMBOL_GPL +0x52252316 clk_unregister_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0xb218b5ff drm_atomic_nonblocking_commit drivers/gpu/drm/drm EXPORT_SYMBOL +0x9d523a3d nexthop_find_by_id vmlinux EXPORT_SYMBOL_GPL +0x766edaa7 tcp_get_info vmlinux EXPORT_SYMBOL_GPL +0x1e2e4be6 skb_to_sgvec_nomark vmlinux EXPORT_SYMBOL_GPL +0x92a222aa __alloc_skb vmlinux EXPORT_SYMBOL +0xf7cb6bba devfreq_monitor_stop vmlinux EXPORT_SYMBOL +0x644ebe40 ata_sff_tf_load vmlinux EXPORT_SYMBOL_GPL +0xb9e28357 jbd2_trans_will_send_data_barrier vmlinux EXPORT_SYMBOL +0x1e3801bf jbd2_journal_begin_ordered_truncate vmlinux EXPORT_SYMBOL +0x02ba1187 vm_zone_stat vmlinux EXPORT_SYMBOL +0x6988ecbb file_fdatawait_range vmlinux EXPORT_SYMBOL +0x055e77e8 jiffies_64 vmlinux EXPORT_SYMBOL +0xf21017d9 mutex_trylock vmlinux EXPORT_SYMBOL +0x3f4b6caf housekeeping_cpumask vmlinux EXPORT_SYMBOL_GPL +0x8df8d56a svc_create net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x10e6ccea dm_bitset_clear_bit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x9507547f ocfs2_cluster_disconnect fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x2dc3fb0a kvm_inject_pending_timer_irqs arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xaffc1be1 mptscsih_abort vmlinux EXPORT_SYMBOL +0x5c6f9a10 iscsi_tcp_segment_done vmlinux EXPORT_SYMBOL_GPL +0xf3bb032b blk_mq_start_stopped_hw_queues vmlinux EXPORT_SYMBOL +0x9026b1a4 mcsafe_key vmlinux EXPORT_SYMBOL_GPL +0xca80eb72 ceph_pg_pool_name_by_id net/ceph/libceph EXPORT_SYMBOL +0x885b0024 dm_array_new drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xc6d18a25 xfrm_state_migrate vmlinux EXPORT_SYMBOL +0xff7d9718 pci_cleanup_aer_uncorrect_error_status vmlinux EXPORT_SYMBOL_GPL +0xfce98200 dcache_readdir vmlinux EXPORT_SYMBOL +0xf0399664 inode_add_bytes vmlinux EXPORT_SYMBOL +0x2bc17ff9 thaw_super vmlinux EXPORT_SYMBOL +0x504c9204 generic_file_mmap vmlinux EXPORT_SYMBOL +0x5b6b0329 swiotlb_max_segment vmlinux EXPORT_SYMBOL_GPL +0x0f053cba abort_creds vmlinux EXPORT_SYMBOL +0xac04ddb9 drm_mode_create_dvi_i_properties drivers/gpu/drm/drm EXPORT_SYMBOL +0x95dbc76d drm_ht_create drivers/gpu/drm/drm EXPORT_SYMBOL +0x20978fb9 idr_find vmlinux EXPORT_SYMBOL_GPL +0x204c19f5 tcp_alloc_md5sig_pool vmlinux EXPORT_SYMBOL +0xb4b11c51 xt_target_to_user vmlinux EXPORT_SYMBOL_GPL +0x38b92846 llc_remove_pack vmlinux EXPORT_SYMBOL +0x4188d439 neigh_rand_reach_time vmlinux EXPORT_SYMBOL +0xbf3057b9 dev_forward_skb vmlinux EXPORT_SYMBOL_GPL +0x4df2ea84 gen_estimator_read vmlinux EXPORT_SYMBOL +0x4679543c sock_common_setsockopt vmlinux EXPORT_SYMBOL +0xb44d8eb1 sock_common_getsockopt vmlinux EXPORT_SYMBOL +0x5e90c202 mptscsih_slave_configure vmlinux EXPORT_SYMBOL +0x9564c299 n_tty_inherit_ops vmlinux EXPORT_SYMBOL_GPL +0x17614bf3 apei_resources_sub vmlinux EXPORT_SYMBOL_GPL +0xf04429b4 acpi_bus_get_status_handle vmlinux EXPORT_SYMBOL_GPL +0xb9e276cf wrmsr_safe_regs_on_cpu vmlinux EXPORT_SYMBOL +0x65408378 zlib_inflate_blob vmlinux EXPORT_SYMBOL +0xcea0c0ff security_sctp_sk_clone vmlinux EXPORT_SYMBOL +0xae4384f4 try_to_writeback_inodes_sb vmlinux EXPORT_SYMBOL +0x54b97698 vfs_mknod vmlinux EXPORT_SYMBOL +0x1c648225 klp_enable_patch vmlinux EXPORT_SYMBOL_GPL +0x8f38d383 ex_handler_default vmlinux EXPORT_SYMBOL +0x2f3d0c66 nft_chain_validate_hooks net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xeafa5e33 ib_get_rmpp_segment drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa8306b78 scaled_ppm_to_ppb drivers/ptp/ptp EXPORT_SYMBOL +0x8be02e90 mlx4_multicast_attach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x285f1e9d skb_dequeue vmlinux EXPORT_SYMBOL +0xf0646804 clk_hw_register vmlinux EXPORT_SYMBOL_GPL +0x9f4f2aa3 acpi_gbl_FADT vmlinux EXPORT_SYMBOL +0xdcd0712d filemap_write_and_wait vmlinux EXPORT_SYMBOL +0x27bbf221 disable_irq_nosync vmlinux EXPORT_SYMBOL +0xb5c383a2 nf_send_reset net/ipv4/netfilter/nf_reject_ipv4 EXPORT_SYMBOL_GPL +0x084876fc dm_cell_promote_or_release drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x5d577935 mlx4_srq_query drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xab46ee58 drm_framebuffer_unregister_private drivers/gpu/drm/drm EXPORT_SYMBOL +0x44f44470 pnfs_destroy_layout fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x719e0e44 add_uevent_var vmlinux EXPORT_SYMBOL_GPL +0xd0b832be inet_rtx_syn_ack vmlinux EXPORT_SYMBOL +0x7744e12d register_dca_provider vmlinux EXPORT_SYMBOL_GPL +0xa8324e3d __pm_relax vmlinux EXPORT_SYMBOL_GPL +0xf7584a9c find_font vmlinux EXPORT_SYMBOL +0x6f791233 alloc_cpu_rmap vmlinux EXPORT_SYMBOL +0x5d4bfabe blk_queue_chunk_sectors vmlinux EXPORT_SYMBOL +0x107cae90 blk_clear_pm_only vmlinux EXPORT_SYMBOL_GPL +0x844712df perf_event_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x8080f398 udp_tunnel_drop_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x3251d762 nf_tables_trans_destroy_flush_work net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xff54f065 ipmi_smi_watcher_unregister drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xad728646 tcp_filter vmlinux EXPORT_SYMBOL +0x3f614c83 netdev_features_change vmlinux EXPORT_SYMBOL +0xe6d16f41 platform_get_resource_byname vmlinux EXPORT_SYMBOL_GPL +0x766ccc19 acpi_unbind_one vmlinux EXPORT_SYMBOL_GPL +0x67910fb3 fbcon_update_vcs vmlinux EXPORT_SYMBOL +0x67817cf4 smp_ops vmlinux EXPORT_SYMBOL_GPL +0xd505c3e0 nf_ct_port_nlattr_tuple_size net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x53f0526b usb_stor_adjust_quirks drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x15d8aa41 __drm_printfn_seq_file drivers/gpu/drm/drm EXPORT_SYMBOL +0xeaa4a150 drm_mode_probed_add drivers/gpu/drm/drm EXPORT_SYMBOL +0xa227d21b kvm_init arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb950a45f mpt_register vmlinux EXPORT_SYMBOL +0x395774de phy_start vmlinux EXPORT_SYMBOL +0x3ab7b1cc scsi_set_sense_field_pointer vmlinux EXPORT_SYMBOL +0xde1c02ad devres_find vmlinux EXPORT_SYMBOL_GPL +0xf7a2687e user_free_preparse vmlinux EXPORT_SYMBOL_GPL +0x258f8c9b page_mapped vmlinux EXPORT_SYMBOL +0x00513f58 get_timespec64 vmlinux EXPORT_SYMBOL_GPL +0x319d493d proc_dostring vmlinux EXPORT_SYMBOL +0xe2bdb6cf ib_query_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x79449961 drm_crtc_set_max_vblank_count drivers/gpu/drm/drm EXPORT_SYMBOL +0x3ef506c9 drm_dp_mst_detect_port drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x504cb053 simd_aead_create_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0xbb1db4b3 __x86_set_memory_region arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xdf6df7f6 __kvm_set_memory_region arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x444cc8ed tcp_md5_needed vmlinux EXPORT_SYMBOL +0x3fee7b76 genlmsg_put vmlinux EXPORT_SYMBOL +0xff707c7c fc_fc4_register_provider vmlinux EXPORT_SYMBOL +0xd2878e52 pci_bus_write_config_byte vmlinux EXPORT_SYMBOL +0xd7d280ad irq_poll_complete vmlinux EXPORT_SYMBOL +0x47709e42 free_anon_bdev vmlinux EXPORT_SYMBOL +0x1b590e1f usb_stor_control_msg drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x864c9410 raw_hash_sk vmlinux EXPORT_SYMBOL_GPL +0x55cee8b9 tcp_seq_start vmlinux EXPORT_SYMBOL +0x12f94143 dst_cache_get vmlinux EXPORT_SYMBOL_GPL +0xf24bc9d7 rps_sock_flow_table vmlinux EXPORT_SYMBOL +0xdfaf7d05 md_bitmap_close_sync vmlinux EXPORT_SYMBOL +0xc6c142a4 md_bitmap_startwrite vmlinux EXPORT_SYMBOL +0x887bcb1a virtio_max_dma_size vmlinux EXPORT_SYMBOL_GPL +0x749402b8 blk_mq_complete_request vmlinux EXPORT_SYMBOL +0x6d7e951e rcu_exp_batches_completed vmlinux EXPORT_SYMBOL_GPL +0xdbdb0e8b request_any_context_irq vmlinux EXPORT_SYMBOL_GPL +0x8ce716de ieee802154_wake_queue net/mac802154/mac802154 EXPORT_SYMBOL +0xafdf0da3 uio_unregister_device drivers/uio/uio EXPORT_SYMBOL_GPL +0xbe6f3db3 mlx5_core_query_q_counter drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x0e9ab9cd mlx4_port_map_set drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x500858b9 i915_read_mch_val drivers/gpu/drm/i915/i915 EXPORT_SYMBOL_GPL +0x241d2bb1 drm_atomic_helper_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x7c5a46db nfs_wb_all fs/nfs/nfs EXPORT_SYMBOL_GPL +0xd4d1983c udplite_table vmlinux EXPORT_SYMBOL +0xcbe16fc3 cpufreq_driver_resolve_freq vmlinux EXPORT_SYMBOL_GPL +0x20dbbedf pm_runtime_get_if_in_use vmlinux EXPORT_SYMBOL_GPL +0xf050d15c mipi_dsi_dcs_set_page_address vmlinux EXPORT_SYMBOL +0x46394891 serial8250_do_set_mctrl vmlinux EXPORT_SYMBOL_GPL +0x8d45f051 serial8250_do_get_mctrl vmlinux EXPORT_SYMBOL_GPL +0xdb63a944 acpi_lpat_get_conversion_table vmlinux EXPORT_SYMBOL_GPL +0x74a30369 pci_ats_page_aligned vmlinux EXPORT_SYMBOL_GPL +0x9fb39d55 msi_desc_to_pci_dev vmlinux EXPORT_SYMBOL +0xfbf48415 blk_queue_max_segment_size vmlinux EXPORT_SYMBOL +0x1da53ff1 mmu_notifier_get_locked vmlinux EXPORT_SYMBOL_GPL +0x46036a85 vhost_new_msg drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x9d5ce8a7 ib_destroy_rwq_ind_table drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7fdde0b4 mlx4_handle_eth_header_mcast_prio drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x611cfa85 klist_add_tail vmlinux EXPORT_SYMBOL_GPL +0xae77d2f9 ip_fraglist_init vmlinux EXPORT_SYMBOL +0x050877b9 dmi_first_match vmlinux EXPORT_SYMBOL +0x2473f47e dm_table_get_size vmlinux EXPORT_SYMBOL +0x663fd3cc ahci_shost_attrs vmlinux EXPORT_SYMBOL_GPL +0x600683d3 do_unblank_screen vmlinux EXPORT_SYMBOL +0x1038b96f adxl_get_component_names vmlinux EXPORT_SYMBOL_GPL +0xc43bb6c0 pci_find_ext_capability vmlinux EXPORT_SYMBOL_GPL +0x7c154a92 __tracepoint_rdpmc vmlinux EXPORT_SYMBOL +0x34bab869 look_up_OID vmlinux EXPORT_SYMBOL_GPL +0xfa50bd29 fscrypt_release_ctx vmlinux EXPORT_SYMBOL +0x38468a34 kernel_read_file vmlinux EXPORT_SYMBOL_GPL +0x6e9dd606 __symbol_put vmlinux EXPORT_SYMBOL +0x043a3b0a pci_msi_prepare vmlinux EXPORT_SYMBOL_GPL +0xab694fcb iscsit_increment_maxcmdsn drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x3cc0221a drm_compat_ioctl drivers/gpu/drm/drm EXPORT_SYMBOL +0x37338281 drm_connector_attach_scaling_mode_property drivers/gpu/drm/drm EXPORT_SYMBOL +0x7ebf4ace __tracepoint_nfs_xdr_status fs/nfs/nfs EXPORT_SYMBOL_GPL +0xeb00f746 netpoll_send_udp vmlinux EXPORT_SYMBOL +0x2f68154a sk_msg_zerocopy_from_iter vmlinux EXPORT_SYMBOL_GPL +0x20835a9f __xdp_release_frame vmlinux EXPORT_SYMBOL_GPL +0xf665f74f sock_load_diag_module vmlinux EXPORT_SYMBOL +0x087f955f md_bitmap_update_sb vmlinux EXPORT_SYMBOL +0xb3e8bdcf genphy_restart_aneg vmlinux EXPORT_SYMBOL +0xfa4667d1 fc_seq_release vmlinux EXPORT_SYMBOL +0x3489859f acpi_enter_sleep_state_s4bios vmlinux EXPORT_SYMBOL +0xedd8af92 pci_claim_resource vmlinux EXPORT_SYMBOL +0xb0371916 mmu_notifier_range_update_to_read_only vmlinux EXPORT_SYMBOL_GPL +0xf09b5d9a get_zeroed_page vmlinux EXPORT_SYMBOL +0xd0fde16f get_kernel_page vmlinux EXPORT_SYMBOL_GPL +0x325cc6d4 bpf_prog_put vmlinux EXPORT_SYMBOL_GPL +0x4d3af7fa ocfs2_cluster_hangup fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x394468ef devlink_port_attrs_pci_vf_set vmlinux EXPORT_SYMBOL_GPL +0x54af7825 devlink_port_attrs_pci_pf_set vmlinux EXPORT_SYMBOL_GPL +0xd30be6d3 ndo_dflt_fdb_dump vmlinux EXPORT_SYMBOL +0xc4f81ae0 mbox_request_channel vmlinux EXPORT_SYMBOL_GPL +0xb9144fd8 input_close_device vmlinux EXPORT_SYMBOL +0xe69dd969 mpt_raid_phys_disk_pg0 vmlinux EXPORT_SYMBOL +0x563e6dff sort_r vmlinux EXPORT_SYMBOL +0x0d03383b af_alg_sendmsg vmlinux EXPORT_SYMBOL_GPL +0x2364da19 key_validate vmlinux EXPORT_SYMBOL +0xb5c08537 ceph_msg_new2 net/ceph/libceph EXPORT_SYMBOL +0x207bd0c0 rpc_init_pipe_dir_head net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x05515d61 tcp_vegas_init net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0xc2e62ab6 drm_crtc_vblank_waitqueue drivers/gpu/drm/drm EXPORT_SYMBOL +0x47a40b2d rtnl_create_link vmlinux EXPORT_SYMBOL +0x72bc9d38 md_stop vmlinux EXPORT_SYMBOL_GPL +0x0db5fdbe phy_connect_direct vmlinux EXPORT_SYMBOL +0x84df1561 iscsi_eh_recover_target vmlinux EXPORT_SYMBOL_GPL +0x16790f7f ata_std_error_handler vmlinux EXPORT_SYMBOL_GPL +0xfcc1edd3 memory_block_size_bytes vmlinux EXPORT_SYMBOL_GPL +0x5aee4842 _dev_notice vmlinux EXPORT_SYMBOL +0xa0723740 amd_iommu_get_v2_domain vmlinux EXPORT_SYMBOL +0x4ff676df tty_insert_flip_string_flags vmlinux EXPORT_SYMBOL +0x7613f3ba virtio_device_freeze vmlinux EXPORT_SYMBOL_GPL +0x42fba1c7 __sbitmap_queue_get_shallow vmlinux EXPORT_SYMBOL_GPL +0x65c8e1e9 address_space_init_once vmlinux EXPORT_SYMBOL +0x24cc1e7a bdi_dev_name vmlinux EXPORT_SYMBOL_GPL +0x9714e0bb ktime_get_raw vmlinux EXPORT_SYMBOL_GPL +0x45e69e01 prepare_to_wait_exclusive vmlinux EXPORT_SYMBOL +0xc011af75 cxgbi_ddp_set_one_ppod drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xc7fa4aa9 kobj_ns_drop vmlinux EXPORT_SYMBOL_GPL +0x45c98c2d xfrm_audit_state_replay vmlinux EXPORT_SYMBOL_GPL +0x245863b1 inet_dev_addr_type vmlinux EXPORT_SYMBOL +0xba2b1a0a tcf_action_exec vmlinux EXPORT_SYMBOL +0x8d686d78 md_bitmap_load vmlinux EXPORT_SYMBOL_GPL +0xa8f42afc ata_pci_shutdown_one vmlinux EXPORT_SYMBOL_GPL +0x32400aba platform_device_add vmlinux EXPORT_SYMBOL_GPL +0x786e9216 clk_hw_get_num_parents vmlinux EXPORT_SYMBOL_GPL +0x7f196669 debugfs_create_size_t vmlinux EXPORT_SYMBOL_GPL +0x701810de remove_proc_entry vmlinux EXPORT_SYMBOL +0x04e1fbcb fsnotify_add_mark vmlinux EXPORT_SYMBOL_GPL +0x2a983d26 ceph_pagelist_release net/ceph/libceph EXPORT_SYMBOL +0x6e71b91c arpt_register_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0x037b629c ptp_find_pin drivers/ptp/ptp EXPORT_SYMBOL +0x14320910 nvmf_reg_write32 drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x1c87c054 drm_modeset_lock drivers/gpu/drm/drm EXPORT_SYMBOL +0x8e0f8d59 ipv6_opt_accepted vmlinux EXPORT_SYMBOL_GPL +0x32c4606f xfrm_spd_getinfo vmlinux EXPORT_SYMBOL +0x3c2839af dm_kobject_release vmlinux EXPORT_SYMBOL +0xd4e31699 genphy_setup_forced vmlinux EXPORT_SYMBOL +0x51a34d15 pci_walk_bus vmlinux EXPORT_SYMBOL_GPL +0x25b36405 block_is_partially_uptodate vmlinux EXPORT_SYMBOL +0xd9ecb670 ring_buffer_overruns vmlinux EXPORT_SYMBOL_GPL +0xc45d0d13 injectm vmlinux EXPORT_SYMBOL_GPL +0xcee467f3 xprt_load_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xb82ddfea ib_get_eth_speed drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf13c6010 drm_atomic_helper_commit_hw_done drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x7377e230 alloc_etherdev_mqs vmlinux EXPORT_SYMBOL +0x5be63c5b crc32c_csum_stub vmlinux EXPORT_SYMBOL +0x73ebc82e amd_iommu_complete_ppr vmlinux EXPORT_SYMBOL +0xdf2a16d7 uart_resume_port vmlinux EXPORT_SYMBOL +0xeff47420 __put_devmap_managed_page vmlinux EXPORT_SYMBOL +0xfa35044a alternatives_patched vmlinux EXPORT_SYMBOL_GPL +0xdcdb7601 ieee802154_rx_irqsafe net/mac802154/mac802154 EXPORT_SYMBOL +0x6138dac5 ib_port_register_module_stat drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x26cfdee1 iscsit_setup_text_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x6bb4bf8f dm_array_cursor_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x6ae62a97 kvm_clear_guest arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb05fc310 sysctl_rmem_max vmlinux EXPORT_SYMBOL +0xfac8865f sysctl_wmem_max vmlinux EXPORT_SYMBOL +0x5bb3a80b ps2_handle_ack vmlinux EXPORT_SYMBOL +0x06209f49 phy_lookup_setting vmlinux EXPORT_SYMBOL_GPL +0x31d68e3d ahci_platform_resume_host vmlinux EXPORT_SYMBOL_GPL +0xe49275de pm_relax vmlinux EXPORT_SYMBOL_GPL +0x4ab208ba acpi_walk_resource_buffer vmlinux EXPORT_SYMBOL +0x3db33d58 pci_user_write_config_byte vmlinux EXPORT_SYMBOL_GPL +0x0d116ad0 __sbitmap_queue_get vmlinux EXPORT_SYMBOL_GPL +0xe7ea689c ahash_free_instance vmlinux EXPORT_SYMBOL_GPL +0x2e7c6868 crypto_grab_spawn vmlinux EXPORT_SYMBOL_GPL +0x7e054ce1 jbd2_journal_errno vmlinux EXPORT_SYMBOL +0x7590c4c3 dquot_transfer vmlinux EXPORT_SYMBOL +0x26ed2186 register_vmap_purge_notifier vmlinux EXPORT_SYMBOL_GPL +0x9892fe0f xt_rateest_lookup net/netfilter/xt_RATEEST EXPORT_SYMBOL_GPL +0xd7cb6243 rdma_get_gid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xd6711a58 dm_bitset_cursor_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x7f64a1c9 mlx4_SET_PORT_BEACON drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xed4948cc nvmf_connect_io_queue drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0x1cc12733 drm_agp_release drivers/gpu/drm/drm EXPORT_SYMBOL +0x18c65d9d drm_gem_map_dma_buf drivers/gpu/drm/drm EXPORT_SYMBOL +0x5f5e4694 drm_clflush_pages drivers/gpu/drm/drm EXPORT_SYMBOL +0x079d7375 drm_gem_fb_create_with_funcs drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL_GPL +0x467df16d netdev_rss_key_fill vmlinux EXPORT_SYMBOL +0x8087c46e md_wakeup_thread vmlinux EXPORT_SYMBOL +0x5cf53ce2 input_free_minor vmlinux EXPORT_SYMBOL +0x5ec02bcf phy_ethtool_ksettings_get vmlinux EXPORT_SYMBOL +0xb7dc0165 phy_ethtool_ksettings_set vmlinux EXPORT_SYMBOL +0xccf9fe5e fc_elsct_send vmlinux EXPORT_SYMBOL +0xf1a81017 ata_std_bios_param vmlinux EXPORT_SYMBOL_GPL +0xdc49dd60 pm_wakeup_dev_event vmlinux EXPORT_SYMBOL_GPL +0x3b12332b pci_write_config_byte vmlinux EXPORT_SYMBOL +0x525eac48 pagecache_write_end vmlinux EXPORT_SYMBOL +0xc1a5b294 rpc_clone_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbb3b941c arpt_unregister_table_pre_exit net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0x5b35c4f9 vfio_group_set_kvm drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x3e17f466 mdio_set_flag drivers/net/mdio EXPORT_SYMBOL +0xe4c3c8be drm_panel_detach drivers/gpu/drm/drm EXPORT_SYMBOL +0xe625cd10 drm_read drivers/gpu/drm/drm EXPORT_SYMBOL +0x2c65bda1 xfrm_audit_state_icvfail vmlinux EXPORT_SYMBOL_GPL +0x07be6905 net_inc_egress_queue vmlinux EXPORT_SYMBOL_GPL +0x8cc19883 hid_resolv_usage vmlinux EXPORT_SYMBOL_GPL +0x3903d711 hidinput_get_led_field vmlinux EXPORT_SYMBOL_GPL +0x12900647 dev_pm_opp_set_supported_hw vmlinux EXPORT_SYMBOL_GPL +0xc59224c6 iscsi_boot_create_acpitbl vmlinux EXPORT_SYMBOL_GPL +0x9239ad5c ahci_platform_disable_clks vmlinux EXPORT_SYMBOL_GPL +0xb077e70a clk_unprepare vmlinux EXPORT_SYMBOL_GPL +0xb12cbacb fb_unregister_client vmlinux EXPORT_SYMBOL +0xf9ca2eb4 kstrtoint_from_user vmlinux EXPORT_SYMBOL +0x0b94c2c5 crypto_skcipher_encrypt vmlinux EXPORT_SYMBOL_GPL +0x261d0def perf_get_aux vmlinux EXPORT_SYMBOL_GPL +0x4b671609 ip_vs_conn_out_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xd9f711ae mlxsw_afa_block_append_mcrouter drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0abb1226 i40e_register_client drivers/net/ethernet/intel/i40e/i40e EXPORT_SYMBOL +0xf688724e drm_fb_helper_debug_enter drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x956f88ef inet_csk_init_xmit_timers vmlinux EXPORT_SYMBOL +0xcea64cba dev_pm_opp_remove vmlinux EXPORT_SYMBOL_GPL +0xeb1f76b5 fcoe_transport_detach vmlinux EXPORT_SYMBOL +0x9318586c clk_hw_register_mux_table vmlinux EXPORT_SYMBOL_GPL +0x8f1b4353 crypto_has_skcipher2 vmlinux EXPORT_SYMBOL_GPL +0x2e1da9fb probe_kernel_read vmlinux EXPORT_SYMBOL_GPL +0x1c0a3f90 irq_set_chip_and_handler_name vmlinux EXPORT_SYMBOL_GPL +0x7939ff41 ceph_con_open net/ceph/libceph EXPORT_SYMBOL +0xf001423b ip_tunnel_encap_setup net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x719ac521 get_phv_bit drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x6a170994 drm_mode_prune_invalid drivers/gpu/drm/drm EXPORT_SYMBOL +0xac71dae8 kvm_write_guest arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6bf93e8b ohci_resume vmlinux EXPORT_SYMBOL_GPL +0x3efe1703 phy_unregister_fixup_for_id vmlinux EXPORT_SYMBOL +0x3db9629e dev_pm_put_subsys_data vmlinux EXPORT_SYMBOL_GPL +0x014ee5ef dev_pm_get_subsys_data vmlinux EXPORT_SYMBOL_GPL +0x3f68dd59 transport_add_device vmlinux EXPORT_SYMBOL_GPL +0x9dd7e82c kill_device vmlinux EXPORT_SYMBOL_GPL +0xfeeecd05 apei_read vmlinux EXPORT_SYMBOL_GPL +0xa631800a vfs_readlink vmlinux EXPORT_SYMBOL +0x8133c67d complete_and_exit vmlinux EXPORT_SYMBOL +0x0a49c1d4 mlx4_buf_write_mtt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x8d0595fd t4_cleanup_clip_tbl drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xbc2e630c inet6_lookup vmlinux EXPORT_SYMBOL_GPL +0x3b3c253e udp_seq_stop vmlinux EXPORT_SYMBOL +0x19d52f1f hid_quirks_exit vmlinux EXPORT_SYMBOL_GPL +0xe1bb0264 scsi_nl_sock vmlinux EXPORT_SYMBOL_GPL +0xad8861d2 nd_region_dev vmlinux EXPORT_SYMBOL_GPL +0x16516798 osc_pc_lpi_support_confirmed vmlinux EXPORT_SYMBOL_GPL +0xff6878cf fb_default_cmap vmlinux EXPORT_SYMBOL +0x51634925 sysfs_remove_group vmlinux EXPORT_SYMBOL_GPL +0x5c013815 sysfs_remove_file_from_group vmlinux EXPORT_SYMBOL_GPL +0xc3ff38c2 down_read_trylock vmlinux EXPORT_SYMBOL +0x030dd098 wpan_phy_register net/ieee802154/ieee802154 EXPORT_SYMBOL +0xf7a7d5fc ip_set_nfnl_get_byindex net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x30ad272b nf_flow_table_free net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x3515fb84 spc_emulate_inquiry_std drivers/target/target_core_mod EXPORT_SYMBOL +0xcc8151d9 dcbnl_cee_notify vmlinux EXPORT_SYMBOL +0x823edea5 xt_compat_add_offset vmlinux EXPORT_SYMBOL_GPL +0xac126a54 hid_hw_start vmlinux EXPORT_SYMBOL_GPL +0x86ccd7d9 cpufreq_register_driver vmlinux EXPORT_SYMBOL_GPL +0x8aaa353c serial8250_handle_irq vmlinux EXPORT_SYMBOL_GPL +0x9833bc0c hvc_kick vmlinux EXPORT_SYMBOL_GPL +0xa2402ecf path_has_submounts vmlinux EXPORT_SYMBOL +0x8958659d open_with_fake_path vmlinux EXPORT_SYMBOL +0x165b145c ex_handler_refcount vmlinux EXPORT_SYMBOL +0x15f52261 ib_register_client drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2c8dd6b8 edac_mem_types drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x8f7b5a8d i2c_smbus_xfer drivers/i2c/i2c-core EXPORT_SYMBOL +0x04a47e6a mlx4_wol_write drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x288fffd8 drm_gem_fence_array_add_implicit drivers/gpu/drm/drm EXPORT_SYMBOL +0xfbf1f78e drm_is_current_master drivers/gpu/drm/drm EXPORT_SYMBOL +0xc05b7643 __strp_unpause vmlinux EXPORT_SYMBOL_GPL +0xb615f50c inet6_offloads vmlinux EXPORT_SYMBOL +0x4cb2d4f1 nf_getsockopt vmlinux EXPORT_SYMBOL +0x29157d5d nf_setsockopt vmlinux EXPORT_SYMBOL +0x579e0bf5 rtnl_unregister_all vmlinux EXPORT_SYMBOL_GPL +0x79da7532 netif_rx_ni vmlinux EXPORT_SYMBOL +0xa6911cd3 netif_tx_wake_queue vmlinux EXPORT_SYMBOL +0x3131b773 ir_raw_encode_scancode vmlinux EXPORT_SYMBOL +0xa76fd157 ps2_begin_command vmlinux EXPORT_SYMBOL +0xa51b4ddd dev_pm_qos_remove_notifier vmlinux EXPORT_SYMBOL_GPL +0x3956da17 device_attach vmlinux EXPORT_SYMBOL_GPL +0xf1c37272 tty_port_open vmlinux EXPORT_SYMBOL +0x0776b133 dma_get_any_slave_channel vmlinux EXPORT_SYMBOL_GPL +0x763ba3ad ioread64be_hi_lo vmlinux EXPORT_SYMBOL +0x84f03fe9 blk_rq_map_user_iov vmlinux EXPORT_SYMBOL +0xf57e3a22 ioc_lookup_icq vmlinux EXPORT_SYMBOL +0x0bd7748d dquot_commit_info vmlinux EXPORT_SYMBOL +0x2f582cfb seq_puts vmlinux EXPORT_SYMBOL +0x91a80c7a seq_putc vmlinux EXPORT_SYMBOL +0xb62f7790 super_setup_bdi_name vmlinux EXPORT_SYMBOL +0xb61d6fc2 down_read_interruptible vmlinux EXPORT_SYMBOL +0xa452c297 hpet_mask_rtc_irq_bit vmlinux EXPORT_SYMBOL_GPL +0x86ae5b71 nf_nat_inet_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xdae9b5d7 nfs4_disable_idmapping fs/nfs/nfs EXPORT_SYMBOL_GPL +0x12756936 usb_create_shared_hcd vmlinux EXPORT_SYMBOL_GPL +0x4d3302d3 ata_dev_disable vmlinux EXPORT_SYMBOL_GPL +0x955ebe3b uart_update_timeout vmlinux EXPORT_SYMBOL +0xec9adea9 pci_vfs_assigned vmlinux EXPORT_SYMBOL_GPL +0xe2fe725e crypto_register_acomp vmlinux EXPORT_SYMBOL_GPL +0x7c6c7ab6 d_genocide vmlinux EXPORT_SYMBOL +0xe81ddad6 tracing_generic_entry_update vmlinux EXPORT_SYMBOL_GPL +0x9ca24c30 __module_put_and_exit vmlinux EXPORT_SYMBOL +0x6b853d06 ns_to_kernel_old_timeval vmlinux EXPORT_SYMBOL +0x3b95f543 klp_shadow_free vmlinux EXPORT_SYMBOL_GPL +0xb8b2b1f7 mce_register_decode_chain vmlinux EXPORT_SYMBOL_GPL +0xd7b55f7d ceph_monc_stop net/ceph/libceph EXPORT_SYMBOL +0xea902dbd nf_ct_l4proto_log_invalid net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xb0f19f37 netlink_add_tap vmlinux EXPORT_SYMBOL_GPL +0xd600680e __skb_wait_for_more_packets vmlinux EXPORT_SYMBOL +0xcffa2aff spi_populate_width_msg vmlinux EXPORT_SYMBOL_GPL +0xe3a44cd2 pm_clk_init vmlinux EXPORT_SYMBOL_GPL +0xb917c7a2 of_clk_hw_register vmlinux EXPORT_SYMBOL_GPL +0xd39a38a8 blkg_print_stat_ios_recursive vmlinux EXPORT_SYMBOL_GPL +0x53fa36d1 ZSTD_decompressBlock lib/zstd/zstd_decompress EXPORT_SYMBOL +0xd9eb79db tcp_vegas_cwnd_event net/ipv4/tcp_vegas EXPORT_SYMBOL_GPL +0x91862450 nf_log_dump_sk_uid_gid net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0xbd235253 ib_create_qp_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb76481d9 netdev_txq_to_tc vmlinux EXPORT_SYMBOL +0x7c983a5d dmi_walk vmlinux EXPORT_SYMBOL_GPL +0x76979079 nvme_shutdown_ctrl vmlinux EXPORT_SYMBOL_GPL +0x3d0f4a29 fwnode_graph_get_next_endpoint vmlinux EXPORT_SYMBOL_GPL +0xba1008c8 __crc32c_le vmlinux EXPORT_SYMBOL +0x6b9f5fe8 af_alg_count_tsgl vmlinux EXPORT_SYMBOL_GPL +0x689082a0 end_buffer_write_sync vmlinux EXPORT_SYMBOL +0xfcb77236 d_rehash vmlinux EXPORT_SYMBOL +0x08704cc3 d_drop vmlinux EXPORT_SYMBOL +0xcb2d0fe9 file_open_root vmlinux EXPORT_SYMBOL +0x45fddd3e vfio_group_get_external_user drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x573e6450 nfs_initiate_pgio fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5819aaa8 nfs_create_rpc_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x122e1bc5 tso_build_hdr vmlinux EXPORT_SYMBOL +0x7f5798a0 sock_recv_errqueue vmlinux EXPORT_SYMBOL +0x12d68842 usb_poison_urb vmlinux EXPORT_SYMBOL_GPL +0xc55ff962 phy_basic_t1_features_array vmlinux EXPORT_SYMBOL_GPL +0xc2a3e570 errata vmlinux EXPORT_SYMBOL_GPL +0xec788566 acpi_target_system_state vmlinux EXPORT_SYMBOL_GPL +0x5209db70 pci_get_class vmlinux EXPORT_SYMBOL +0xe569f4be __bdevname vmlinux EXPORT_SYMBOL +0x7918d817 memory_failure vmlinux EXPORT_SYMBOL_GPL +0xf6a28554 region_intersects vmlinux EXPORT_SYMBOL_GPL +0x7551b46e dm_tm_open_with_sm drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xf3db35e8 i2c_acpi_find_bus_speed drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x038190ec netif_rx vmlinux EXPORT_SYMBOL +0xcf7d2901 dev_pm_opp_set_regulators vmlinux EXPORT_SYMBOL_GPL +0xb2cf7c01 mraid_mm_unregister_adp vmlinux EXPORT_SYMBOL +0x432e98c1 bio_clone_fast vmlinux EXPORT_SYMBOL +0x955b0e2e kthread_worker_fn vmlinux EXPORT_SYMBOL_GPL +0x57865853 vsock_add_tap net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x61fbe963 unregister_ip_vs_app net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0xcbffb7ac mlx5_core_xrcd_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xa673d769 nfs_dentry_operations fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc4365a98 kvm_mmu_unload arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xd0a2847c sha_init vmlinux EXPORT_SYMBOL +0x54977b2d xfrm_stateonly_find vmlinux EXPORT_SYMBOL +0x571552b0 usb_create_hcd vmlinux EXPORT_SYMBOL_GPL +0x9cd487a1 sas_eh_abort_handler vmlinux EXPORT_SYMBOL_GPL +0x72ea7b2d scsi_device_type vmlinux EXPORT_SYMBOL +0x1403de48 elv_bio_merge_ok vmlinux EXPORT_SYMBOL +0x49377af6 rpc_get_sb_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x44d376e6 mlx4_get_slave_pkey_gid_tbl_len drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xda94b479 usb_autopm_get_interface_no_resume vmlinux EXPORT_SYMBOL_GPL +0xc143c690 nvme_cancel_tagset vmlinux EXPORT_SYMBOL_GPL +0xa965ca81 reciprocal_value vmlinux EXPORT_SYMBOL +0x4be27fd2 unregister_binfmt vmlinux EXPORT_SYMBOL +0xce239661 alloc_vm_area vmlinux EXPORT_SYMBOL_GPL +0x4bedb04a make_kgid vmlinux EXPORT_SYMBOL +0x359ec42f _raw_read_trylock vmlinux EXPORT_SYMBOL +0xb83b70f2 housekeeping_enabled vmlinux EXPORT_SYMBOL_GPL +0x2524ba17 ZSTD_getCParams lib/zstd/zstd_compress EXPORT_SYMBOL +0xb479eab2 xdr_init_encode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x56947501 cryptd_ahash_child crypto/cryptd EXPORT_SYMBOL_GPL +0xa57f48fd fuse_get_unique fs/fuse/fuse EXPORT_SYMBOL_GPL +0xb2622c7b tcf_idr_check_alloc vmlinux EXPORT_SYMBOL +0x0b9090be sock_no_accept vmlinux EXPORT_SYMBOL +0x7ae83224 ata_sff_check_status vmlinux EXPORT_SYMBOL_GPL +0xf4b754fd acpi_resources_are_enforced vmlinux EXPORT_SYMBOL +0x20128b0f pinctrl_dev_get_devname vmlinux EXPORT_SYMBOL_GPL +0x6c3f70e0 guid_gen vmlinux EXPORT_SYMBOL_GPL +0x3d69e338 set_groups vmlinux EXPORT_SYMBOL +0xd5f3bb7b set_memory_encrypted vmlinux EXPORT_SYMBOL_GPL +0x153b60a6 klist_del vmlinux EXPORT_SYMBOL_GPL +0xe40ddf9d tcf_generic_walker vmlinux EXPORT_SYMBOL +0x042f8c4c flow_rule_match_eth_addrs vmlinux EXPORT_SYMBOL +0x9ccef490 sock_diag_register_inet_compat vmlinux EXPORT_SYMBOL_GPL +0xb4065fc5 netif_schedule_queue vmlinux EXPORT_SYMBOL +0x3422526d dev_remove_offload vmlinux EXPORT_SYMBOL +0x195c1aa3 genphy_soft_reset vmlinux EXPORT_SYMBOL +0x415b8383 phy_modify_changed vmlinux EXPORT_SYMBOL_GPL +0x7c5e361c __lock_buffer vmlinux EXPORT_SYMBOL +0xdf255dcf memory_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x411b40dc esp6_input_done2 net/ipv6/esp6 EXPORT_SYMBOL_GPL +0xbc69b145 passthrough_parse_cdb drivers/target/target_core_mod EXPORT_SYMBOL +0xf8d1fbad mlx4_mr_hw_change_access drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf92acbc2 kvm_valid_efer arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6bf7e63d tcp_openreq_init_rwin vmlinux EXPORT_SYMBOL +0x926e7bff xdp_rxq_info_reg_mem_model vmlinux EXPORT_SYMBOL_GPL +0x39e2a6aa dev_direct_xmit vmlinux EXPORT_SYMBOL +0x3e4200eb devfreq_remove_governor vmlinux EXPORT_SYMBOL +0x17be68ca acpi_clear_event vmlinux EXPORT_SYMBOL +0xbb0ab47b debug_locks vmlinux EXPORT_SYMBOL_GPL +0x158da98e ablkcipher_walk_phys vmlinux EXPORT_SYMBOL_GPL +0x7bb2a529 ablkcipher_walk_done vmlinux EXPORT_SYMBOL_GPL +0xc146f47a aead_geniv_alloc vmlinux EXPORT_SYMBOL_GPL +0x618e112b perf_event_update_userpage vmlinux EXPORT_SYMBOL_GPL +0x9a58dd2d trace_print_bitmask_seq vmlinux EXPORT_SYMBOL_GPL +0x7924b6de ip_set_hostmask_map net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0xd0459e67 kvm_has_tsc_control arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6766cb0c fib6_check_nexthop vmlinux EXPORT_SYMBOL_GPL +0x8691cc51 eth_header_parse_protocol vmlinux EXPORT_SYMBOL +0x75b63e97 xdp_return_frame vmlinux EXPORT_SYMBOL_GPL +0x2e01215f iscsi_destroy_flashnode_sess vmlinux EXPORT_SYMBOL_GPL +0x7538b132 agp_off vmlinux EXPORT_SYMBOL +0x20a789ac irq_set_chip_data vmlinux EXPORT_SYMBOL +0x63197685 s2idle_wake vmlinux EXPORT_SYMBOL_GPL +0xe9fd4d18 ip6_tnl_get_link_net net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x98543121 register_ip_vs_pe net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0xe15cab05 vhost_dev_ioctl drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x02120cc1 dm_unregister_path_selector drivers/md/dm-multipath EXPORT_SYMBOL_GPL +0x9fdee78a vnic_dev_get_pdev drivers/net/ethernet/cisco/enic/enic EXPORT_SYMBOL +0xc7a4bcf5 cxgbi_sock_act_open_req_arp_failure drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x5e3f57a3 drm_fb_helper_sys_fillrect drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xadc897d9 inet6_csk_xmit vmlinux EXPORT_SYMBOL_GPL +0x7ae13940 tcp_ioctl vmlinux EXPORT_SYMBOL +0xb92393bd ata_eh_analyze_ncq_error vmlinux EXPORT_SYMBOL_GPL +0x86604db8 iommu_attach_group vmlinux EXPORT_SYMBOL_GPL +0x6c655913 register_acpi_hed_notifier vmlinux EXPORT_SYMBOL_GPL +0x753460a8 registered_fb vmlinux EXPORT_SYMBOL +0x82762d85 vfs_symlink vmlinux EXPORT_SYMBOL +0xb2d0053e cgroup_bpf_enabled_key vmlinux EXPORT_SYMBOL +0xd273b1b1 __round_jiffies_up_relative vmlinux EXPORT_SYMBOL_GPL +0x35465e15 wait_for_completion_io vmlinux EXPORT_SYMBOL +0xc8058c7c find_vpid vmlinux EXPORT_SYMBOL_GPL +0x6f3614b6 rdma_is_zero_gid drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa0c71dac spi_populate_sync_msg vmlinux EXPORT_SYMBOL_GPL +0xd58bbbcb nvme_delete_wq vmlinux EXPORT_SYMBOL_GPL +0x50df94f5 btree_insert vmlinux EXPORT_SYMBOL_GPL +0xb0c5e247 lockref_put_return vmlinux EXPORT_SYMBOL +0x263beb75 ecryptfs_get_versions vmlinux EXPORT_SYMBOL +0x9096d82a unmap_mapping_range vmlinux EXPORT_SYMBOL +0x7171121c overflowgid vmlinux EXPORT_SYMBOL +0x8b618d08 overflowuid vmlinux EXPORT_SYMBOL +0x4ad7e9d4 __tracepoint_mlx5_fs_add_rule drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x691ef436 mlx4_unregister_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xed15fb41 nfs4_print_deviceid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x957ede91 mr_rtm_dumproute vmlinux EXPORT_SYMBOL +0xf5316877 ping_unhash vmlinux EXPORT_SYMBOL_GPL +0xe5ed9873 inet_select_addr vmlinux EXPORT_SYMBOL +0x3254fa64 dev_getbyhwaddr_rcu vmlinux EXPORT_SYMBOL +0x1a146ec3 usb_ep_type_string vmlinux EXPORT_SYMBOL_GPL +0x6f50b160 dma_async_tx_descriptor_init vmlinux EXPORT_SYMBOL +0xd8ec8bf6 pci_enable_device vmlinux EXPORT_SYMBOL +0x1fc7a59b __nla_validate vmlinux EXPORT_SYMBOL +0x79d9543d vfs_dedupe_file_range vmlinux EXPORT_SYMBOL +0x6a153713 xprt_release_xprt_cong net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xedd05110 cryptd_skcipher_child crypto/cryptd EXPORT_SYMBOL_GPL +0x01960e25 __tracepoint_kvm_cr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xde09a94d xas_find vmlinux EXPORT_SYMBOL_GPL +0xb7a40128 ipv6_recv_error vmlinux EXPORT_SYMBOL_GPL +0xa3c04137 udp_sk_rx_dst_set vmlinux EXPORT_SYMBOL +0x968f9a23 efivar_entry_iter_begin vmlinux EXPORT_SYMBOL_GPL +0x53778253 phy_read_mmd vmlinux EXPORT_SYMBOL +0x294de777 iommu_fwspec_free vmlinux EXPORT_SYMBOL_GPL +0xa50335f4 sbitmap_finish_wait vmlinux EXPORT_SYMBOL_GPL +0x136f7aee blk_mq_debugfs_rq_show vmlinux EXPORT_SYMBOL_GPL +0x6c3caa73 security_sb_remount vmlinux EXPORT_SYMBOL +0x321d6368 dcache_dir_close vmlinux EXPORT_SYMBOL +0x494e3393 vm_get_page_prot vmlinux EXPORT_SYMBOL +0x1f563160 bpf_offload_dev_priv vmlinux EXPORT_SYMBOL_GPL +0x044cef15 rpc_wake_up_queued_task net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7485935a dm_btree_lookup drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb6e8f9bd mlx5_dm_sw_icm_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x23281610 drm_atomic_state_default_clear drivers/gpu/drm/drm EXPORT_SYMBOL +0xa5ebbf53 drm_mode_config_helper_suspend drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x8c060832 drm_dp_mst_put_port_malloc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6cdc691c tcp_enter_quickack_mode vmlinux EXPORT_SYMBOL +0x2d1359c5 unregister_snap_client vmlinux EXPORT_SYMBOL +0x38b20768 phy_driver_register vmlinux EXPORT_SYMBOL +0xced795b3 scsi_dma_unmap vmlinux EXPORT_SYMBOL +0xfbf27c5b vga_set_legacy_decoding vmlinux EXPORT_SYMBOL +0x5d17148b apei_write vmlinux EXPORT_SYMBOL_GPL +0xbab7c2e0 crypto_alloc_rng vmlinux EXPORT_SYMBOL_GPL +0x6a3291ac relay_reset vmlinux EXPORT_SYMBOL_GPL +0xb6b9d9a2 nf_send_reset6 net/ipv6/netfilter/nf_reject_ipv6 EXPORT_SYMBOL_GPL +0x9d9ef76f nf_conntrack_register_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x58692a3a fscache_withdraw_cache fs/fscache/fscache EXPORT_SYMBOL +0xd6f3f63c inet_put_port vmlinux EXPORT_SYMBOL +0xd72e0225 sock_no_connect vmlinux EXPORT_SYMBOL +0x5ee6567e sk_set_peek_off vmlinux EXPORT_SYMBOL_GPL +0x0403d3e0 dm_exception_store_type_register vmlinux EXPORT_SYMBOL +0x538ef1c1 phy_print_status vmlinux EXPORT_SYMBOL +0xa5449599 cpu_device_create vmlinux EXPORT_SYMBOL_GPL +0xbeb7d9c9 clk_register_fractional_divider vmlinux EXPORT_SYMBOL_GPL +0x735d0256 pci_remove_bus vmlinux EXPORT_SYMBOL +0x98a800ad pinctrl_select_state vmlinux EXPORT_SYMBOL_GPL +0xe1dcec5e iov_iter_zero vmlinux EXPORT_SYMBOL +0xfe212b1b crypto_register_aeads vmlinux EXPORT_SYMBOL_GPL +0xcdaa769e dquot_mark_dquot_dirty vmlinux EXPORT_SYMBOL +0x1c338147 vm_numa_stat vmlinux EXPORT_SYMBOL +0x1e1e140e ns_to_timespec64 vmlinux EXPORT_SYMBOL +0xb6888188 klp_shadow_get_or_alloc vmlinux EXPORT_SYMBOL_GPL +0x0917490b wait_for_completion_killable_timeout vmlinux EXPORT_SYMBOL +0x10ecc52c usb_amd_quirk_pll_enable vmlinux EXPORT_SYMBOL_GPL +0xdf70a9f7 mpt_attach vmlinux EXPORT_SYMBOL +0xbffb13f6 iscsi_boot_create_target vmlinux EXPORT_SYMBOL_GPL +0x8219a3cd to_nvdimm_bus_dev vmlinux EXPORT_SYMBOL_GPL +0xc9f34c1d acpi_acquire_global_lock vmlinux EXPORT_SYMBOL +0x0e0662cf generic_file_read_iter vmlinux EXPORT_SYMBOL +0xda75be83 get_device_system_crosststamp vmlinux EXPORT_SYMBOL_GPL +0x86fca7e4 ceph_put_snap_context net/ceph/libceph EXPORT_SYMBOL +0x9eb8980a xdr_stream_pos net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x350058ec ib_destroy_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x98e83e51 rdma_port_get_link_layer drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x996c5d6d mlxsw_reg_trans_bulk_wait drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xca8950ac drm_state_dump drivers/gpu/drm/drm EXPORT_SYMBOL +0xefbaf900 o2nm_node_put fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x5b3e282f xa_store vmlinux EXPORT_SYMBOL +0x305f8cb8 dev_get_phys_port_id vmlinux EXPORT_SYMBOL +0x247a1d3e iommu_map vmlinux EXPORT_SYMBOL_GPL +0x0d61eeee __bitmap_subset vmlinux EXPORT_SYMBOL +0x697c5d0d tracing_snapshot_alloc vmlinux EXPORT_SYMBOL_GPL +0xcc06ed64 drm_atomic_helper_crtc_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5809194a eth_type_trans vmlinux EXPORT_SYMBOL +0xfce515c8 fwnode_get_phy_mode vmlinux EXPORT_SYMBOL_GPL +0xf17e6282 clk_register_mux vmlinux EXPORT_SYMBOL_GPL +0x94bb7ec3 gen_pool_dma_zalloc_algo vmlinux EXPORT_SYMBOL +0x7bd98447 debugfs_create_blob vmlinux EXPORT_SYMBOL_GPL +0x1b72f373 posix_acl_access_xattr_handler vmlinux EXPORT_SYMBOL_GPL +0xac8597d5 mb_cache_entry_get vmlinux EXPORT_SYMBOL +0x8a2afa3c send_sig vmlinux EXPORT_SYMBOL +0x98db2687 dm_bitset_cursor_end drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xcfea61e3 usb_stor_set_xfer_buf drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xb49185e2 tcp_shutdown vmlinux EXPORT_SYMBOL +0x1f03a838 devlink_flash_update_begin_notify vmlinux EXPORT_SYMBOL_GPL +0xcbae6c7a acpi_lid_notifier_unregister vmlinux EXPORT_SYMBOL +0xf938c7aa blk_mq_alloc_request vmlinux EXPORT_SYMBOL +0xe40c37ea down_write_trylock vmlinux EXPORT_SYMBOL +0xab4fb873 param_set_uint vmlinux EXPORT_SYMBOL +0x5538aea1 rpc_init_wait_queue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x43cf464e usb_stor_ctrl_transfer drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x23797b4b cdrom_open drivers/cdrom/cdrom EXPORT_SYMBOL +0xce9fc008 cxgb4_pktgl_to_skb drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x36c5211b drm_panel_unprepare drivers/gpu/drm/drm EXPORT_SYMBOL +0x96418aec drm_dp_dual_mode_read drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x573b5453 ipv6_fixup_options vmlinux EXPORT_SYMBOL_GPL +0xf389fe60 __hw_addr_init vmlinux EXPORT_SYMBOL +0x199ed0cd net_disable_timestamp vmlinux EXPORT_SYMBOL +0x40782a10 sock_cmsg_send vmlinux EXPORT_SYMBOL +0x0ad137d3 lpit_read_residency_count_address vmlinux EXPORT_SYMBOL_GPL +0x863282cb blk_queue_max_discard_segments vmlinux EXPORT_SYMBOL_GPL +0x923d9dba pkcs7_parse_message vmlinux EXPORT_SYMBOL_GPL +0x6a4f623b mmu_notifier_synchronize vmlinux EXPORT_SYMBOL_GPL +0xee86bd09 cpu_info vmlinux EXPORT_SYMBOL +0xe260ce39 mlxsw_core_trap_action_set drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x2485959b mlx5_query_port_vl_hw_cap drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xce3537d9 mlx4_map_sw_to_hw_steering_mode drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xb0196303 drm_client_buffer_vmap drivers/gpu/drm/drm EXPORT_SYMBOL +0xd37c849a drm_connector_attach_tv_margin_properties drivers/gpu/drm/drm EXPORT_SYMBOL +0xb2fb7a70 backlight_device_set_brightness drivers/video/backlight/backlight EXPORT_SYMBOL +0x0afd6849 ip_fib_metrics_init vmlinux EXPORT_SYMBOL_GPL +0x4da4931f sk_msg_free vmlinux EXPORT_SYMBOL_GPL +0x1ead6fea flow_block_cb_decref vmlinux EXPORT_SYMBOL +0x4944cf2e cpufreq_enable_fast_switch vmlinux EXPORT_SYMBOL_GPL +0x3769a4c7 mpt_put_msg_frame vmlinux EXPORT_SYMBOL +0x4afe9a77 scsi_partsize vmlinux EXPORT_SYMBOL +0x50209f43 ata_pci_device_resume vmlinux EXPORT_SYMBOL_GPL +0xfdb55923 ata_scsi_slave_config vmlinux EXPORT_SYMBOL_GPL +0xe3d7534e pcie_capability_write_word vmlinux EXPORT_SYMBOL +0x85cf47dd copy_page_to_iter vmlinux EXPORT_SYMBOL +0x0e1269de dquot_set_dqinfo vmlinux EXPORT_SYMBOL +0x1f7167f0 css_next_descendant_pre vmlinux EXPORT_SYMBOL_GPL +0x556422b3 ioremap_cache vmlinux EXPORT_SYMBOL +0x7b73d5dc cxgbi_sock_rcv_abort_rpl drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x8aa08680 drm_pci_alloc drivers/gpu/drm/drm EXPORT_SYMBOL +0xab73bc87 cryptd_alloc_ahash crypto/cryptd EXPORT_SYMBOL_GPL +0x84dfe6a8 qdisc_hash_del vmlinux EXPORT_SYMBOL +0xe5dff6a4 iscsi_conn_error_event vmlinux EXPORT_SYMBOL_GPL +0x31ef7a6e ata_port_pbar_desc vmlinux EXPORT_SYMBOL_GPL +0x2cd6ee5b __blkdev_issue_zeroout vmlinux EXPORT_SYMBOL +0xaed4adbc kblockd_mod_delayed_work_on vmlinux EXPORT_SYMBOL +0x2f9e9ed7 memremap_pages vmlinux EXPORT_SYMBOL_GPL +0x83b2432a memunmap_pages vmlinux EXPORT_SYMBOL_GPL +0xc08bbce6 irq_get_percpu_devid_partition vmlinux EXPORT_SYMBOL_GPL +0x3e2b0ba6 groups_alloc vmlinux EXPORT_SYMBOL +0xa3c7bb39 svc_rqst_free net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xfed10321 ib_create_cm_id drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xb7406393 cxgbi_ep_poll drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xe3b40c4f drm_syncobj_get_handle drivers/gpu/drm/drm EXPORT_SYMBOL +0xa119d138 __tracepoint_kvm_exit arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xdf54a8f7 netlink_unregister_notifier vmlinux EXPORT_SYMBOL +0xe327c492 lwtunnel_build_state vmlinux EXPORT_SYMBOL_GPL +0xfdf637af dm_table_device_name vmlinux EXPORT_SYMBOL_GPL +0xbed2b13c devm_rtc_allocate_device vmlinux EXPORT_SYMBOL_GPL +0x7d820b1e __dynamic_dev_dbg vmlinux EXPORT_SYMBOL +0x2d5a9d3b simple_release_fs vmlinux EXPORT_SYMBOL +0x6626afca down vmlinux EXPORT_SYMBOL +0x700e6373 mlx4_set_vf_rate drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x62de00ba drm_dev_put drivers/gpu/drm/drm EXPORT_SYMBOL +0x0eddd60c drm_dev_get drivers/gpu/drm/drm EXPORT_SYMBOL +0xae71627d ipmi_create_user drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x6ef3fca6 sync_file_create vmlinux EXPORT_SYMBOL +0x7a63693d clk_divider_ro_ops vmlinux EXPORT_SYMBOL_GPL +0xf6970b18 irq_chip_set_affinity_parent vmlinux EXPORT_SYMBOL_GPL +0xa09cdb05 inet_diag_dump_icsk net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x686e7732 cxgbi_sock_rcv_close_conn_rpl drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x734f45db kvm_queue_exception arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xcc598718 kvm_gfn_to_hva_cache_init arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xccd352b9 tcp_rtx_synack vmlinux EXPORT_SYMBOL +0xb6597ffc skb_put vmlinux EXPORT_SYMBOL +0x9f53c365 scsi_host_put vmlinux EXPORT_SYMBOL +0x7db251da vga_default_device vmlinux EXPORT_SYMBOL_GPL +0x8dde3d55 pcim_set_mwi vmlinux EXPORT_SYMBOL +0xe495926a alarm_restart vmlinux EXPORT_SYMBOL_GPL +0xdb065657 nfnl_unlock net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x8bcaa4fd core_tpg_set_initiator_node_queue_depth drivers/target/target_core_mod EXPORT_SYMBOL +0x6bd6f4bd ethtool_op_get_ts_info vmlinux EXPORT_SYMBOL +0xebd2edc9 sk_capable vmlinux EXPORT_SYMBOL +0xe1869f24 get_governor_parent_kobj vmlinux EXPORT_SYMBOL_GPL +0x4828e77b acpi_scan_lock_acquire vmlinux EXPORT_SYMBOL_GPL +0x1cfa4c4c pinctrl_put vmlinux EXPORT_SYMBOL_GPL +0xfde9cae4 pinctrl_get vmlinux EXPORT_SYMBOL_GPL +0x2d355c34 configfs_depend_item vmlinux EXPORT_SYMBOL +0x9affd920 generic_block_fiemap vmlinux EXPORT_SYMBOL +0x4afb2238 add_wait_queue vmlinux EXPORT_SYMBOL +0x6d9409a8 param_ops_invbool vmlinux EXPORT_SYMBOL +0x286f4014 intel_graphics_stolen_res vmlinux EXPORT_SYMBOL +0xcc59f639 __ib_alloc_cq_any drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe981999b adf_init_etr_data drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x97263968 dm_bitset_resize drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xc40100a3 drm_modeset_drop_locks drivers/gpu/drm/drm EXPORT_SYMBOL +0x4c92c765 drm_helper_connector_dpms drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xc508d308 kvm_vcpu_init arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe7257ab8 xa_store_range vmlinux EXPORT_SYMBOL +0x4841bdee strnchr vmlinux EXPORT_SYMBOL +0x9f984513 strrchr vmlinux EXPORT_SYMBOL +0x3a91b03a dev_add_pack vmlinux EXPORT_SYMBOL +0x9f8aff4f usb_hub_clear_tt_buffer vmlinux EXPORT_SYMBOL_GPL +0x0cdbb273 ahci_sdev_attrs vmlinux EXPORT_SYMBOL_GPL +0x7cfe368d net_dim_get_def_tx_moderation vmlinux EXPORT_SYMBOL +0xd20bf6ba dcookie_unregister vmlinux EXPORT_SYMBOL_GPL +0x1f608f06 edac_pci_handle_pe drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x96eb2c99 mlx5_fc_id drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x155fc57a mlx4_set_vf_spoofchk drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe1eca9bd __tracepoint_pnfs_mds_fallback_pg_get_mirror_count fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x36751a61 __tracepoint_pnfs_mds_fallback_pg_init_write fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xd3b8279e __fscache_uncache_page fs/fscache/fscache EXPORT_SYMBOL +0x9ea53d7f vsnprintf vmlinux EXPORT_SYMBOL +0x7fe32873 rb_replace_node vmlinux EXPORT_SYMBOL +0xb18110e0 __tracepoint_arm_event vmlinux EXPORT_SYMBOL_GPL +0xb4d76817 bio_alloc_mddev vmlinux EXPORT_SYMBOL_GPL +0x5bd0748f crypto_del_default_rng vmlinux EXPORT_SYMBOL_GPL +0x668402aa crypto_put_default_rng vmlinux EXPORT_SYMBOL_GPL +0x6ff607b6 crypto_get_default_rng vmlinux EXPORT_SYMBOL_GPL +0xb100656c crypto_mod_put vmlinux EXPORT_SYMBOL_GPL +0x587ee524 filemap_map_pages vmlinux EXPORT_SYMBOL +0xdee365b0 _raw_write_trylock vmlinux EXPORT_SYMBOL +0xebf82d8a cancel_work_sync vmlinux EXPORT_SYMBOL_GPL +0x9e6aacbf vhost_dev_stop drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xaaf5dcea drm_gem_free_mmap_offset drivers/gpu/drm/drm EXPORT_SYMBOL +0x9e6bb465 dlm_register_eviction_cb fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xa24f8b01 kvm_arch_start_assignment arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xbc610211 kvm_read_guest_page_mmu arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x32ce3777 radix_tree_preload vmlinux EXPORT_SYMBOL +0x417e605f kobject_get_unless_zero vmlinux EXPORT_SYMBOL +0x79a4508f pingv6_ops vmlinux EXPORT_SYMBOL_GPL +0x6123618e nf_log_set vmlinux EXPORT_SYMBOL +0xefb5d81a input_set_keycode vmlinux EXPORT_SYMBOL +0x562f0cdc subsys_virtual_register vmlinux EXPORT_SYMBOL_GPL +0x02a670f2 blkdev_ioctl vmlinux EXPORT_SYMBOL_GPL +0xa3c2af08 __quota_error vmlinux EXPORT_SYMBOL +0x0ff5cc17 vfs_dup_fs_context vmlinux EXPORT_SYMBOL +0x40ad4188 mlx5_packet_reformat_dealloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xbb83b4e4 drm_panel_prepare drivers/gpu/drm/drm EXPORT_SYMBOL +0x17f57f43 drm_atomic_helper_wait_for_flip_done drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x10962bc2 nfs_auth_info_match fs/nfs/nfs EXPORT_SYMBOL_GPL +0x91f44510 idr_alloc_cyclic vmlinux EXPORT_SYMBOL +0xd6329e1d dev_set_alias vmlinux EXPORT_SYMBOL +0x696340a5 __i2c_board_lock vmlinux EXPORT_SYMBOL_GPL +0x93c7edeb usb_find_common_endpoints vmlinux EXPORT_SYMBOL_GPL +0x4437de01 phy_basic_t1_features vmlinux EXPORT_SYMBOL_GPL +0x98fe7528 set_selection_kernel vmlinux EXPORT_SYMBOL_GPL +0x86d057f0 tty_ldisc_flush vmlinux EXPORT_SYMBOL_GPL +0x9a06f458 __dynamic_netdev_dbg vmlinux EXPORT_SYMBOL +0x6ca8880c __blkg_prfill_rwstat vmlinux EXPORT_SYMBOL_GPL +0xa8646890 crypto_register_instance vmlinux EXPORT_SYMBOL_GPL +0x8839e406 crypto_alloc_tfm vmlinux EXPORT_SYMBOL_GPL +0x881e90d4 get_cpu_entry_area vmlinux EXPORT_SYMBOL +0xc9c3f176 hpet_register_irq_handler vmlinux EXPORT_SYMBOL_GPL +0x32e3b076 mxcsr_feature_mask vmlinux EXPORT_SYMBOL_GPL +0x888c5be5 irq_bypass_register_consumer virt/lib/irqbypass EXPORT_SYMBOL_GPL +0x909cf5f5 rpc_machine_cred net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8ed348e4 fscache_object_destroy fs/fscache/fscache EXPORT_SYMBOL +0xf812cff6 memscan vmlinux EXPORT_SYMBOL +0xb10d964d devlink_fmsg_pair_nest_end vmlinux EXPORT_SYMBOL_GPL +0x817f31b2 skb_tstamp_tx vmlinux EXPORT_SYMBOL_GPL +0x3256f4ac devfreq_monitor_resume vmlinux EXPORT_SYMBOL +0xa62892c6 efivar_sysfs_list vmlinux EXPORT_SYMBOL_GPL +0x93022ba6 __scsi_format_command vmlinux EXPORT_SYMBOL +0x120d67d4 unregister_memory_isolate_notifier vmlinux EXPORT_SYMBOL +0xaa7a1d78 tty_set_operations vmlinux EXPORT_SYMBOL +0xab67a0ac dql_init vmlinux EXPORT_SYMBOL +0xb58ccf38 mark_buffer_dirty vmlinux EXPORT_SYMBOL +0x46e92ecd get_super vmlinux EXPORT_SYMBOL +0x46592ce4 write_bytes_to_xdr_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x99fdd75c ib_create_ah_from_wc drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc6a94351 mlx4_get_module_info drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0xa5a3187c cxgb4_immdata_send drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xaf27bebf drbd_disk_str drivers/block/drbd/drbd EXPORT_SYMBOL +0xae277372 __drm_crtc_commit_free drivers/gpu/drm/drm EXPORT_SYMBOL +0x38353483 skb_copy_and_csum_bits vmlinux EXPORT_SYMBOL +0x95439319 devm_hwmon_device_register_with_groups vmlinux EXPORT_SYMBOL_GPL +0x8da14ce6 srp_attach_transport vmlinux EXPORT_SYMBOL_GPL +0x0d868843 ahci_check_ready vmlinux EXPORT_SYMBOL_GPL +0x3fe490d0 clk_mux_ro_ops vmlinux EXPORT_SYMBOL_GPL +0xbb366d77 ll_rw_block vmlinux EXPORT_SYMBOL +0x262d0242 block_write_begin vmlinux EXPORT_SYMBOL +0xcc8cf070 irq_domain_associate_many vmlinux EXPORT_SYMBOL_GPL +0xbc6bec66 free_percpu_irq vmlinux EXPORT_SYMBOL_GPL +0xb7d7c12e hpet_set_alarm_time vmlinux EXPORT_SYMBOL_GPL +0x60ed199d adf_disable_sriov drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xdc31781e mlxsw_reg_trans_write drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x6add767c mlx5_set_port_pfc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x7ddcf786 mlx4_release_eq drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x2460d966 drm_mode_validate_driver drivers/gpu/drm/drm EXPORT_SYMBOL +0x45f1bc79 __tracepoint_non_standard_event vmlinux EXPORT_SYMBOL_GPL +0x71090e81 ps2_sendbyte vmlinux EXPORT_SYMBOL +0xc3193a8a sas_rphy_free vmlinux EXPORT_SYMBOL +0x79c92ae1 fc_release_transport vmlinux EXPORT_SYMBOL +0xbe1887e4 ata_unpack_xfermask vmlinux EXPORT_SYMBOL_GPL +0xbd02b9d4 fb_deferred_io_fsync vmlinux EXPORT_SYMBOL_GPL +0x73f0e200 pci_alloc_irq_vectors_affinity vmlinux EXPORT_SYMBOL +0xe81da98f blk_mq_sched_mark_restart_hctx vmlinux EXPORT_SYMBOL_GPL +0x37bdce0b blk_mq_start_hw_queues vmlinux EXPORT_SYMBOL +0xe1761617 security_inet_conn_request vmlinux EXPORT_SYMBOL +0x19f925ea sysfs_remove_groups vmlinux EXPORT_SYMBOL_GPL +0x1268f357 resume_device_irqs vmlinux EXPORT_SYMBOL_GPL +0x7017f8df iscsit_unregister_transport drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x593bf405 drm_add_override_edid_modes drivers/gpu/drm/drm EXPORT_SYMBOL +0x13eebaaf drm_fb_helper_alloc_fbi drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1047ae98 pnfs_generic_recover_commit_reqs fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x47ea5c6c ufshcd_system_resume vmlinux EXPORT_SYMBOL +0x38bef4f8 fcoe_fcf_get_selected vmlinux EXPORT_SYMBOL +0xb1b53829 fc_lport_flogi_resp vmlinux EXPORT_SYMBOL +0xdae7040a nvdimm_name vmlinux EXPORT_SYMBOL_GPL +0x2ce36a7b dev_pm_domain_attach_by_id vmlinux EXPORT_SYMBOL_GPL +0x5146d67f component_match_add_release vmlinux EXPORT_SYMBOL +0x1e0cd7fe acpi_detach_data vmlinux EXPORT_SYMBOL +0x7429e20c kstrtos8 vmlinux EXPORT_SYMBOL +0xdb80423c kblockd_schedule_work_on vmlinux EXPORT_SYMBOL +0xfdbd7a17 crypto_get_attr_type vmlinux EXPORT_SYMBOL_GPL +0xbf3193ec acpi_unregister_ioapic vmlinux EXPORT_SYMBOL +0x88abb78b ZSTD_insertBlock lib/zstd/zstd_decompress EXPORT_SYMBOL +0xd7e34861 virtio_transport_do_socket_init net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x545f12de svc_proc_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xcf88efe0 svc_rpcbind_set_version net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xdc86d7ef nfnetlink_send net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x3a258ca7 ib_umem_release drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x2467a2ec ib_fmr_pool_unmap drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xc6e03446 drm_crtc_wait_one_vblank drivers/gpu/drm/drm EXPORT_SYMBOL +0xd1f1c57a drm_dev_unregister drivers/gpu/drm/drm EXPORT_SYMBOL +0x2a32eb5c drm_scdc_set_high_tmds_clock_ratio drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x91d09d2e pnfs_generic_pg_init_write fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x0f6183b3 pnfs_error_mark_layout_for_return fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x6735d56e tcp_ca_get_name_by_key vmlinux EXPORT_SYMBOL_GPL +0xc0ff21c1 input_get_new_minor vmlinux EXPORT_SYMBOL +0x9e424645 phy_modify vmlinux EXPORT_SYMBOL_GPL +0x26c90ea4 scsi_eh_get_sense vmlinux EXPORT_SYMBOL_GPL +0x1d722739 device_create_bin_file vmlinux EXPORT_SYMBOL_GPL +0xc7a9244e pci_intx vmlinux EXPORT_SYMBOL_GPL +0x32890a5c crypto_alloc_aead vmlinux EXPORT_SYMBOL_GPL +0x66871aeb crypto_spawn_tfm2 vmlinux EXPORT_SYMBOL_GPL +0x50fad434 round_jiffies_up vmlinux EXPORT_SYMBOL_GPL +0x0b80a330 ceph_cls_assert_locked net/ceph/libceph EXPORT_SYMBOL +0xa53387c7 dm_rh_flush drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x3ad0f55b dm_bm_flush drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x3cdcfe9c mlx4_mtt_cleanup drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x75f0e875 xas_store vmlinux EXPORT_SYMBOL_GPL +0x3d3fe171 xt_request_find_table_lock vmlinux EXPORT_SYMBOL_GPL +0x961b328f dev_get_mac_address vmlinux EXPORT_SYMBOL +0xf35196c6 dev_set_mac_address vmlinux EXPORT_SYMBOL +0x77b0898c sk_mc_loop vmlinux EXPORT_SYMBOL +0x26eb91cf serio_open vmlinux EXPORT_SYMBOL +0x348a9c0e agp_generic_free_by_type vmlinux EXPORT_SYMBOL +0x22b325d5 kd_mksound vmlinux EXPORT_SYMBOL +0x34331f04 acpi_os_unmap_memory vmlinux EXPORT_SYMBOL_GPL +0x61cd6e2e crypto_register_scomps vmlinux EXPORT_SYMBOL_GPL +0xe783e261 sysfs_emit vmlinux EXPORT_SYMBOL_GPL +0x100ab093 __tracepoint_powernv_throttle vmlinux EXPORT_SYMBOL_GPL +0xe1dcf64a audit_log_format vmlinux EXPORT_SYMBOL +0x5ecc7788 irqd_cfg vmlinux EXPORT_SYMBOL_GPL +0x952d6902 nf_fwd_netdev_egress net/netfilter/nf_dup_netdev EXPORT_SYMBOL_GPL +0xb6517b2e mlxsw_afa_block_append_trap_and_forward drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x89c5130d drm_vblank_restore drivers/gpu/drm/drm EXPORT_SYMBOL +0xd0bcee22 kvm_vcpu_read_guest arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x6d2fc5a6 net_namespace_list vmlinux EXPORT_SYMBOL_GPL +0x5dd60f34 sock_register vmlinux EXPORT_SYMBOL +0xa6e65f7b serio_close vmlinux EXPORT_SYMBOL +0x474ba5bd ata_link_next vmlinux EXPORT_SYMBOL_GPL +0xaf2e159e loop_register_transfer vmlinux EXPORT_SYMBOL +0xd60106b3 device_set_wakeup_enable vmlinux EXPORT_SYMBOL_GPL +0x977b2156 tpm2_probe vmlinux EXPORT_SYMBOL_GPL +0x08d390b1 vchan_init vmlinux EXPORT_SYMBOL_GPL +0xf6ca862a pci_find_bus vmlinux EXPORT_SYMBOL +0x810e0099 pinctrl_unregister vmlinux EXPORT_SYMBOL_GPL +0xcda0022e _proc_mkdir vmlinux EXPORT_SYMBOL_GPL +0x58f03b99 register_ftrace_function vmlinux EXPORT_SYMBOL_GPL +0xff7e1547 cgrp_dfl_root vmlinux EXPORT_SYMBOL_GPL +0x91c8b5b5 mutex_lock_io vmlinux EXPORT_SYMBOL_GPL +0xf4a5c213 avail_to_resrv_perfctr_nmi_bit vmlinux EXPORT_SYMBOL +0x5ce3b588 nfnl_lock net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0xd4cd3c1a __tracepoint_bcache_writeback drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x1d12fd6e drm_mm_insert_node_in_range drivers/gpu/drm/drm EXPORT_SYMBOL +0x5e487326 nfs_flock fs/nfs/nfs EXPORT_SYMBOL_GPL +0x39fd83db halt_poll_ns_shrink arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1e941cc8 inet6_register_protosw vmlinux EXPORT_SYMBOL +0xb80be99d xdp_attachment_flags_ok vmlinux EXPORT_SYMBOL_GPL +0x62d8de40 dev_set_group vmlinux EXPORT_SYMBOL +0x2f34ac9c netif_set_real_num_rx_queues vmlinux EXPORT_SYMBOL +0xce689592 netif_set_real_num_tx_queues vmlinux EXPORT_SYMBOL +0x00eb3792 phy_init_hw vmlinux EXPORT_SYMBOL +0xaee8c13e phy_register_fixup_for_uid vmlinux EXPORT_SYMBOL +0x204c5067 scsi_dev_info_add_list vmlinux EXPORT_SYMBOL +0x0bf46046 pci_find_resource vmlinux EXPORT_SYMBOL +0x1e08bebd pci_user_read_config_word vmlinux EXPORT_SYMBOL_GPL +0xa0628b0a badblocks_check vmlinux EXPORT_SYMBOL_GPL +0x13dcb350 proc_remove vmlinux EXPORT_SYMBOL +0xfb1722c1 end_buffer_async_write vmlinux EXPORT_SYMBOL +0x5b641283 arch_phys_wc_add vmlinux EXPORT_SYMBOL +0xfdbe4c39 mlx4_SET_PORT_fcs_check drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x3a22fa8a vmci_datagram_destroy_handle drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x7a452e56 drm_fb_helper_unlink_fbi drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf222794c drm_fb_xrgb8888_to_gray8 drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xd7d1f27e drm_atomic_helper_update_legacy_modeset_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x4ffb464f devm_devfreq_unregister_notifier vmlinux EXPORT_SYMBOL +0x9258c776 hdmi_vendor_infoframe_pack_only vmlinux EXPORT_SYMBOL +0x50097088 security_tun_dev_free_security vmlinux EXPORT_SYMBOL +0xafe2de4b add_swap_extent vmlinux EXPORT_SYMBOL_GPL +0x4af20f95 svc_rpcb_cleanup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x0aac0356 dm_disk vmlinux EXPORT_SYMBOL_GPL +0x9493800e iscsi_tcp_conn_get_stats vmlinux EXPORT_SYMBOL_GPL +0x8a47043d LZ4_decompress_safe_continue vmlinux EXPORT_SYMBOL +0x68f31cbd __list_add_valid vmlinux EXPORT_SYMBOL +0xfc70c5d8 ring_buffer_consume vmlinux EXPORT_SYMBOL_GPL +0x1c5b1f28 irq_free_descs vmlinux EXPORT_SYMBOL_GPL +0xa8181adf proc_dointvec vmlinux EXPORT_SYMBOL +0x66f992b7 drm_atomic_helper_async_check drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1ec8c16e drm_dp_atomic_find_vcpi_slots drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x81a17396 mlog_and_bits fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x7909689c __inet_lookup_established vmlinux EXPORT_SYMBOL_GPL +0x1c3fae25 netdev_alert vmlinux EXPORT_SYMBOL +0xd3fa0904 netdev_has_any_upper_dev vmlinux EXPORT_SYMBOL +0xad947170 qlt_rdy_to_xfer vmlinux EXPORT_SYMBOL +0x1fe6918b phy_validate vmlinux EXPORT_SYMBOL_GPL +0x9430b198 trace_dump_stack vmlinux EXPORT_SYMBOL_GPL +0x4248ae3c single_task_running vmlinux EXPORT_SYMBOL +0x691c73da ceph_osdc_flush_notifies net/ceph/libceph EXPORT_SYMBOL +0x4f7d5508 svc_rpcb_setup net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf78477d5 nf_synproxy_ipv4_fini net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0xb682284e mlx4_flow_detach drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xaec88ddd drm_hdmi_infoframe_set_hdr_metadata drivers/gpu/drm/drm EXPORT_SYMBOL +0xf8386d97 cpumask_next_and vmlinux EXPORT_SYMBOL +0xb3428b1e br_forward_finish vmlinux EXPORT_SYMBOL_GPL +0x45a074ed udp_lib_rehash vmlinux EXPORT_SYMBOL +0x21c3f61f udp_lib_unhash vmlinux EXPORT_SYMBOL +0x05495392 hid_debug vmlinux EXPORT_SYMBOL_GPL +0x9dace2d0 pm_stay_awake vmlinux EXPORT_SYMBOL_GPL +0xbcf05d5a device_show_ulong vmlinux EXPORT_SYMBOL_GPL +0x3f07dfa2 dw_dma_filter vmlinux EXPORT_SYMBOL_GPL +0x560fda3a acpi_bus_register_driver vmlinux EXPORT_SYMBOL +0xbaea9d02 set_ras_addr_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0xa0d4c60d drm_open drivers/gpu/drm/drm EXPORT_SYMBOL +0x4e3fd1b4 kvm_release_pfn_clean arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7d388ff4 tcp_ca_get_key_by_name vmlinux EXPORT_SYMBOL_GPL +0xcc30f0f1 tcp_tx_delay_enabled vmlinux EXPORT_SYMBOL +0x789cfce0 nf_hook_slow vmlinux EXPORT_SYMBOL +0x7c2eba6b task_cls_state vmlinux EXPORT_SYMBOL_GPL +0x06e13bb2 dev_pick_tx_cpu_id vmlinux EXPORT_SYMBOL +0xbc071179 iscsi_get_ipaddress_state_name vmlinux EXPORT_SYMBOL_GPL +0x040de940 tpm1_do_selftest vmlinux EXPORT_SYMBOL_GPL +0xce5ac24f zlib_inflate_workspacesize vmlinux EXPORT_SYMBOL +0xc60d0620 __num_online_cpus vmlinux EXPORT_SYMBOL +0xafc42dc8 ceph_osdc_sync net/ceph/libceph EXPORT_SYMBOL +0x5ef94dac svc_age_temp_xprts_now net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x14cddefa ip6_tnl_change_mtu net/ipv6/ip6_tunnel EXPORT_SYMBOL +0xd3b03a99 vhost_dequeue_msg drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x4e6c2f54 dm_dirty_log_type_register drivers/md/dm-log EXPORT_SYMBOL +0xafae4e81 __tracepoint_bcache_btree_node_alloc_fail drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xf982e6db o2net_send_message fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xdbf9ab24 pnfs_put_lseg fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb26b8230 nfs_sb_deactive fs/nfs/nfs EXPORT_SYMBOL_GPL +0x661847ee tcp_recvmsg vmlinux EXPORT_SYMBOL +0xa25fc115 xt_compat_check_entry_offsets vmlinux EXPORT_SYMBOL +0x6738c51c xdp_return_buff vmlinux EXPORT_SYMBOL_GPL +0xac34ecec dca_register_notify vmlinux EXPORT_SYMBOL_GPL +0x374c53e1 ata_get_cmd_descript vmlinux EXPORT_SYMBOL_GPL +0xdc3aab7a vga_client_register vmlinux EXPORT_SYMBOL +0x5876792b bioset_init vmlinux EXPORT_SYMBOL +0x67ed9b4d bioset_exit vmlinux EXPORT_SYMBOL +0x2aff68f9 perf_guest_get_msrs vmlinux EXPORT_SYMBOL_GPL +0x66551bc7 drm_detect_monitor_audio drivers/gpu/drm/drm EXPORT_SYMBOL +0xd80dc129 kvm_lapic_reg_write arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xc5e50f5c xfrm_audit_state_notfound_simple vmlinux EXPORT_SYMBOL_GPL +0x74a4ea71 udp_destruct_sock vmlinux EXPORT_SYMBOL_GPL +0x66a65a62 inet_hashinfo2_init_mod vmlinux EXPORT_SYMBOL_GPL +0xa10d6431 fcoe_ctlr_init vmlinux EXPORT_SYMBOL +0x84595a3d ata_pci_sff_activate_host vmlinux EXPORT_SYMBOL_GPL +0xd4682ee2 ata_timing_cycle2mode vmlinux EXPORT_SYMBOL_GPL +0x76ac8280 ata_scsi_queuecmd vmlinux EXPORT_SYMBOL_GPL +0x7974a4b6 bpf_verifier_log_write vmlinux EXPORT_SYMBOL_GPL +0x9cee6e70 console_start vmlinux EXPORT_SYMBOL +0x3dd9b230 proc_dointvec_userhz_jiffies vmlinux EXPORT_SYMBOL +0xc4b2b6ed nf_flow_offload_ip_hook net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x08eeefda ib_uverbs_get_ucontext_file drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xd2a57107 drm_crtc_vblank_put drivers/gpu/drm/drm EXPORT_SYMBOL +0xaf68360f drm_gem_unmap_dma_buf drivers/gpu/drm/drm EXPORT_SYMBOL +0x2b7a9cee show_mem vmlinux EXPORT_SYMBOL +0xdbc43e2c tcp_simple_retransmit vmlinux EXPORT_SYMBOL +0xdf6f43b8 xt_register_match vmlinux EXPORT_SYMBOL +0xafb75a83 pci_check_and_mask_intx vmlinux EXPORT_SYMBOL_GPL +0xfe4359c5 proc_set_user vmlinux EXPORT_SYMBOL +0xa833455a anon_inode_getfile vmlinux EXPORT_SYMBOL_GPL +0x35921032 simple_dir_inode_operations vmlinux EXPORT_SYMBOL +0x07b80552 trace_output_call vmlinux EXPORT_SYMBOL_GPL +0x572c6f4d irq_domain_xlate_onetwocell vmlinux EXPORT_SYMBOL_GPL +0xf76606d2 cred_fscmp vmlinux EXPORT_SYMBOL +0x224e35f7 vbg_get_gdev drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0xaea0f73e napi_consume_skb vmlinux EXPORT_SYMBOL +0xe4df4663 sas_get_local_phy vmlinux EXPORT_SYMBOL_GPL +0xf50ac3d9 ahci_do_softreset vmlinux EXPORT_SYMBOL_GPL +0x5133ea89 pci_host_probe vmlinux EXPORT_SYMBOL_GPL +0xc6de36ef refcount_sub_and_test_checked vmlinux EXPORT_SYMBOL +0xf48633c5 vfs_get_tree vmlinux EXPORT_SYMBOL +0xc6f25f51 dmam_pool_create vmlinux EXPORT_SYMBOL +0x1905834a register_kretprobe vmlinux EXPORT_SYMBOL_GPL +0xaac39ff3 __drm_printfn_info drivers/gpu/drm/drm EXPORT_SYMBOL +0x261953f4 drm_atomic_check_only drivers/gpu/drm/drm EXPORT_SYMBOL +0x5b355416 input_mt_get_slot_by_key vmlinux EXPORT_SYMBOL +0x7d287b92 input_mt_drop_unused vmlinux EXPORT_SYMBOL +0x12478855 genphy_suspend vmlinux EXPORT_SYMBOL +0xab16e83d iscsi_host_alloc vmlinux EXPORT_SYMBOL_GPL +0xdaf67f90 __platform_create_bundle vmlinux EXPORT_SYMBOL_GPL +0x8a60b0cb subsys_system_register vmlinux EXPORT_SYMBOL_GPL +0x3ef38dc9 arch_nvram_ops vmlinux EXPORT_SYMBOL +0xa3f37c2b acpi_find_child_device vmlinux EXPORT_SYMBOL_GPL +0xa811c68a __find_get_block vmlinux EXPORT_SYMBOL +0x6f95c2a4 dcache_dir_open vmlinux EXPORT_SYMBOL +0x3f4547a7 put_unused_fd vmlinux EXPORT_SYMBOL +0xb1ddf995 jiffies_64_to_clock_t vmlinux EXPORT_SYMBOL +0xd1c8c21b irq_domain_create_hierarchy vmlinux EXPORT_SYMBOL_GPL +0xfd525ec7 lc_put lib/lru_cache EXPORT_SYMBOL +0x9134feb7 lc_get lib/lru_cache EXPORT_SYMBOL +0xcc979a91 lc_set lib/lru_cache EXPORT_SYMBOL +0x99a1fe74 ibdev_err drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4edcd35b mlx4_counter_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x45d0919b cxgb4_get_tcp_stats drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xc3a2be67 nfs_net_id fs/nfs/nfs EXPORT_SYMBOL_GPL +0xece784c2 rb_first vmlinux EXPORT_SYMBOL +0x58ecd405 inet6_lookup_listener vmlinux EXPORT_SYMBOL_GPL +0x1b0be72f tcp_seq_stop vmlinux EXPORT_SYMBOL +0xbe923ee4 srp_reconnect_rport vmlinux EXPORT_SYMBOL +0x0c550cc8 dev_pm_qos_expose_latency_limit vmlinux EXPORT_SYMBOL_GPL +0xa20d01ba __trace_bprintk vmlinux EXPORT_SYMBOL_GPL +0x6a5ecb18 unregister_module_notifier vmlinux EXPORT_SYMBOL +0x3c1c3725 rcu_fwd_progress_check vmlinux EXPORT_SYMBOL_GPL +0x3dc619d3 swake_up_locked vmlinux EXPORT_SYMBOL +0xf8f61ebc wake_up_var vmlinux EXPORT_SYMBOL +0xf1160be9 flush_delayed_work vmlinux EXPORT_SYMBOL +0xb4985beb ZSTD_resetCStream lib/zstd/zstd_compress EXPORT_SYMBOL +0x59d943c8 vsock_addr_cast net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xd2c107bb ceph_flags_to_mode net/ceph/libceph EXPORT_SYMBOL +0x306ac912 nf_conntrack_hash_check_insert net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf0331b7a ib_find_exact_cached_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa174764e cxgbi_sock_purge_wr_queue drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xcbf9afde __tracepoint_kvm_msr arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xbd6fdd3f skb_zerocopy_headlen vmlinux EXPORT_SYMBOL_GPL +0xb2ff26a8 sas_change_queue_depth vmlinux EXPORT_SYMBOL_GPL +0x0bd6b613 ata_dummy_port_ops vmlinux EXPORT_SYMBOL_GPL +0xcd42298d fwnode_property_present vmlinux EXPORT_SYMBOL_GPL +0x6f4c2896 devm_release_action vmlinux EXPORT_SYMBOL_GPL +0xfd7243c7 erst_disable vmlinux EXPORT_SYMBOL_GPL +0xa4faf62a acpi_disable_gpe vmlinux EXPORT_SYMBOL +0x39e3c030 do_trace_read_msr vmlinux EXPORT_SYMBOL +0xa3d29dda iscsit_response_queue drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x69653e69 unix_peer_get vmlinux EXPORT_SYMBOL_GPL +0xd2eb32b9 tcp_rx_skb_cache_key vmlinux EXPORT_SYMBOL +0xeedd987e phy_10gbit_features_array vmlinux EXPORT_SYMBOL_GPL +0x7d00c65b nd_synchronize vmlinux EXPORT_SYMBOL_GPL +0xcc000f16 device_property_read_string vmlinux EXPORT_SYMBOL_GPL +0xfd73ab00 bus_register vmlinux EXPORT_SYMBOL_GPL +0x8b111e90 clk_hw_register_divider_table vmlinux EXPORT_SYMBOL_GPL +0x082c3213 pci_root_buses vmlinux EXPORT_SYMBOL +0x36e348a7 blk_freeze_queue_start vmlinux EXPORT_SYMBOL_GPL +0xae9b7974 balloon_aops vmlinux EXPORT_SYMBOL_GPL +0xacf649bf audit_log_task_info vmlinux EXPORT_SYMBOL +0x4db237d6 nft_meta_set_validate net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x6003bd7d drm_atomic_helper_connector_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x77eee5fd tcp_syn_ack_timeout vmlinux EXPORT_SYMBOL +0xdc126026 nf_log_unregister vmlinux EXPORT_SYMBOL +0x781b9168 dca_add_requester vmlinux EXPORT_SYMBOL_GPL +0x008462e0 scsi_eh_ready_devs vmlinux EXPORT_SYMBOL_GPL +0x0e13cb4d apei_resources_release vmlinux EXPORT_SYMBOL_GPL +0x141271bf acpi_dev_found vmlinux EXPORT_SYMBOL +0x42f1b900 fb_pad_unaligned_buffer vmlinux EXPORT_SYMBOL +0x740ae808 d_hash_and_lookup vmlinux EXPORT_SYMBOL +0x1df63e88 ZSTD_compressBegin lib/zstd/zstd_compress EXPORT_SYMBOL +0x9d965dfb drm_atomic_helper_commit_planes drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x87802be2 dmi_kobj vmlinux EXPORT_SYMBOL_GPL +0x489846bd thermal_cooling_device_register vmlinux EXPORT_SYMBOL_GPL +0x6b4e12a3 input_open_device vmlinux EXPORT_SYMBOL +0xf03e719b ahci_platform_disable_regulators vmlinux EXPORT_SYMBOL_GPL +0x2a2fa260 nla_policy_len vmlinux EXPORT_SYMBOL +0x6cefde88 _copy_from_iter_nocache vmlinux EXPORT_SYMBOL +0xf9616085 blk_mq_rdma_map_queues vmlinux EXPORT_SYMBOL_GPL +0x0cf7b635 fsync_bdev vmlinux EXPORT_SYMBOL +0xd2478c44 simple_get_link vmlinux EXPORT_SYMBOL +0x92ec510d jiffies64_to_msecs vmlinux EXPORT_SYMBOL +0xee8d74d6 jiffies64_to_nsecs vmlinux EXPORT_SYMBOL +0xd918c451 i2c_new_device drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x1627a02c drm_flip_work_queue drivers/gpu/drm/drm EXPORT_SYMBOL +0x8c06514e drm_vma_offset_lookup_locked drivers/gpu/drm/drm EXPORT_SYMBOL +0x8c2a17d8 drm_mode_set_name drivers/gpu/drm/drm EXPORT_SYMBOL +0x8108de55 drm_legacy_ioremap_wc drivers/gpu/drm/drm EXPORT_SYMBOL +0xba32a78e icmp6_send vmlinux EXPORT_SYMBOL +0x6003865c ip6_dst_lookup vmlinux EXPORT_SYMBOL_GPL +0x5090109e efivar_entry_set_get_size vmlinux EXPORT_SYMBOL_GPL +0x193bebc4 device_for_each_child vmlinux EXPORT_SYMBOL_GPL +0x40a9b349 vzalloc vmlinux EXPORT_SYMBOL +0xf73607a2 set_pages_uc vmlinux EXPORT_SYMBOL +0xbaffff96 ZSTD_CStreamWorkspaceBound lib/zstd/zstd_compress EXPORT_SYMBOL +0xd12e5224 __ib_alloc_pd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x5c542e64 mlx4_find_cached_vlan drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xbb0f4c1b nvmf_connect_admin_queue drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0xf9e101f7 nfs_commit_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0x1902178a srp_tmo_valid vmlinux EXPORT_SYMBOL_GPL +0xada19aad pm_generic_poweroff_late vmlinux EXPORT_SYMBOL_GPL +0x2817f7fd cppc_get_desired_perf vmlinux EXPORT_SYMBOL_GPL +0x1344d7e6 acpi_enable_gpe vmlinux EXPORT_SYMBOL +0x9c86dddb pci_probe_reset_slot vmlinux EXPORT_SYMBOL_GPL +0x422ce162 unregister_key_type vmlinux EXPORT_SYMBOL +0x9424058f arch_haltpoll_disable vmlinux EXPORT_SYMBOL_GPL +0x518c2fc6 hpet_rtc_dropped_irq vmlinux EXPORT_SYMBOL_GPL +0x24f05ce9 rpc_shutdown_client net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x90161761 failover_slave_unregister net/core/failover EXPORT_SYMBOL_GPL +0xe5840ec6 ib_wc_status_msg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1cc4b32b transport_generic_new_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xce5b4c9b pppox_ioctl drivers/net/ppp/pppox EXPORT_SYMBOL +0x1677570a mlx4_get_internal_clock_params drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xfa240ebc drm_fb_helper_prepare drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x72027e12 __drm_atomic_helper_connector_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x77239956 dlmunlock fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0x11089ac7 _ctype vmlinux EXPORT_SYMBOL +0x99fd9277 skb_mpls_pop vmlinux EXPORT_SYMBOL_GPL +0x2869cde5 dev_pm_opp_remove_table vmlinux EXPORT_SYMBOL_GPL +0x13e66d16 blk_get_queue vmlinux EXPORT_SYMBOL +0xa3e21b0b af_alg_wait_for_data vmlinux EXPORT_SYMBOL_GPL +0x423532cc bd_set_size vmlinux EXPORT_SYMBOL +0x452abe56 pm_vt_switch_unregister vmlinux EXPORT_SYMBOL +0xca7d8764 kthread_freezable_should_stop vmlinux EXPORT_SYMBOL_GPL +0x18fb2caf cpus_read_unlock vmlinux EXPORT_SYMBOL_GPL +0xd099b4f6 nf_conncount_gc_list net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0xcd5d5757 adf_reset_flr drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x3ae1afd1 __tracepoint_bcache_journal_write drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xdcf80fc7 __fscache_enable_cookie fs/fscache/fscache EXPORT_SYMBOL +0x11197b4e power_supply_get_by_name vmlinux EXPORT_SYMBOL_GPL +0xc4706178 pcie_capability_write_dword vmlinux EXPORT_SYMBOL +0x7846af3e __kfifo_len_r vmlinux EXPORT_SYMBOL +0xc09c8220 vfs_create_mount vmlinux EXPORT_SYMBOL +0xc85582c3 srcutorture_get_gp_data vmlinux EXPORT_SYMBOL_GPL +0xe0514ea7 svc_xprt_names net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x030ba99e rdma_rw_mr_factor drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3fc564d7 ib_alloc_fmr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfd62f5a8 target_backend_unregister drivers/target/target_core_mod EXPORT_SYMBOL +0xe5ad4eba dm_cell_lock_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x27864d57 memparse vmlinux EXPORT_SYMBOL +0xa2060911 inet_current_timestamp vmlinux EXPORT_SYMBOL +0x77ac277a inet_sk_rebuild_header vmlinux EXPORT_SYMBOL +0xe28d442f devlink_dpipe_table_counter_enabled vmlinux EXPORT_SYMBOL_GPL +0xe2cecc56 flow_block_cb_is_busy vmlinux EXPORT_SYMBOL +0xee9d05c4 skb_copy vmlinux EXPORT_SYMBOL +0x36397b16 input_ff_flush vmlinux EXPORT_SYMBOL_GPL +0x598be554 input_inject_event vmlinux EXPORT_SYMBOL +0xcb02a4d5 acpi_is_pnp_device vmlinux EXPORT_SYMBOL_GPL +0xd63d7c82 pci_bus_read_config_dword vmlinux EXPORT_SYMBOL +0xa870db59 sysfs_remove_mount_point vmlinux EXPORT_SYMBOL_GPL +0x6c1bd8a7 block_truncate_page vmlinux EXPORT_SYMBOL +0x6f6fb75d vfs_tmpfile vmlinux EXPORT_SYMBOL +0xc0a3de40 generic_write_checks vmlinux EXPORT_SYMBOL +0xe7232e0f user_return_notifier_unregister vmlinux EXPORT_SYMBOL_GPL +0x21a79217 kthread_park vmlinux EXPORT_SYMBOL_GPL +0xf8fe3986 pat_pfn_immune_to_uc_mtrr vmlinux EXPORT_SYMBOL_GPL +0xa07b9aca drm_atomic_helper_page_flip_target drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xe18f1516 xfrm_dev_state_flush vmlinux EXPORT_SYMBOL +0x69668826 netdev_increment_features vmlinux EXPORT_SYMBOL +0x4f2250ba rtc_tm_to_time64 vmlinux EXPORT_SYMBOL +0x375efea9 mptscsih_host_attrs vmlinux EXPORT_SYMBOL +0x3104ae23 iscsi_session_get_param vmlinux EXPORT_SYMBOL_GPL +0x34b2f382 sas_rphy_unlink vmlinux EXPORT_SYMBOL +0x055f1586 fwnode_property_read_u8_array vmlinux EXPORT_SYMBOL_GPL +0x05f68710 uart_match_port vmlinux EXPORT_SYMBOL +0x73417245 debugfs_create_file_size vmlinux EXPORT_SYMBOL_GPL +0x1f5f1536 vfs_get_link vmlinux EXPORT_SYMBOL +0xd3c36a28 trace_array_create vmlinux EXPORT_SYMBOL_GPL +0x03372453 force_irqthreads vmlinux EXPORT_SYMBOL_GPL +0x1dcbd687 __pte2cachemode_tbl vmlinux EXPORT_SYMBOL +0xb432b6e7 acpi_register_gsi vmlinux EXPORT_SYMBOL_GPL +0x25569726 svc_shutdown_net net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x91bc94fc kvm_mmu_invlpg arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x23b4e0d7 clear_page_rep vmlinux EXPORT_SYMBOL_GPL +0x9719bcfd pmc_atom_read vmlinux EXPORT_SYMBOL_GPL +0xaa634427 dca_get_tag vmlinux EXPORT_SYMBOL_GPL +0xcd9cc50f iscsi_host_get_param vmlinux EXPORT_SYMBOL_GPL +0xbefa6d14 iommu_register_device_fault_handler vmlinux EXPORT_SYMBOL_GPL +0x167befc5 acpi_device_get_match_data vmlinux EXPORT_SYMBOL_GPL +0x36f0cb35 pcie_port_service_register vmlinux EXPORT_SYMBOL +0x972acd33 pci_request_selected_regions vmlinux EXPORT_SYMBOL +0xae6be4c7 __splice_from_pipe vmlinux EXPORT_SYMBOL +0x80e82880 cxgbi_ep_disconnect drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xc57c6d80 unregister_net_sysctl_table vmlinux EXPORT_SYMBOL_GPL +0x687b9127 inet_gro_complete vmlinux EXPORT_SYMBOL +0x0552b967 xt_check_table_hooks vmlinux EXPORT_SYMBOL +0x24891501 __skb_tstamp_tx vmlinux EXPORT_SYMBOL_GPL +0xb578ba5a cpufreq_register_governor vmlinux EXPORT_SYMBOL_GPL +0x091d60f9 fc_rport_login vmlinux EXPORT_SYMBOL +0x2a8f5746 scsi_is_sas_phy vmlinux EXPORT_SYMBOL +0x1b5059ce ata_id_xfermask vmlinux EXPORT_SYMBOL_GPL +0xb702d49b tty_write_room vmlinux EXPORT_SYMBOL +0xe787698f acpi_processor_register_performance vmlinux EXPORT_SYMBOL +0xe138fb8c percpu_counter_add_batch vmlinux EXPORT_SYMBOL +0x842077d6 sysfs_add_file_to_group vmlinux EXPORT_SYMBOL_GPL +0xb6a9d3d0 param_ops_ushort vmlinux EXPORT_SYMBOL +0xefeafcf1 edac_has_mcs drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xbfd7d7a2 o2hb_global_heartbeat_active fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL +0x0ed788c9 kvm_mmu_clear_dirty_pt_masked arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xdb6dea21 dev_trans_start vmlinux EXPORT_SYMBOL +0x20dee6a4 devlink_port_params_unregister vmlinux EXPORT_SYMBOL_GPL +0x0b11decd usb_unlocked_disable_lpm vmlinux EXPORT_SYMBOL_GPL +0x49fe9dd2 serial8250_rpm_put vmlinux EXPORT_SYMBOL_GPL +0x64a9c928 default_blu vmlinux EXPORT_SYMBOL +0x57575f08 dmaengine_put vmlinux EXPORT_SYMBOL +0xaae8ab0e acpi_bus_power_manageable vmlinux EXPORT_SYMBOL +0xf681acfc hdmi_infoframe_unpack vmlinux EXPORT_SYMBOL +0x4ea25709 dql_reset vmlinux EXPORT_SYMBOL +0xc56780af blk_register_region vmlinux EXPORT_SYMBOL +0x4918f86d bio_put vmlinux EXPORT_SYMBOL +0x98dc302a devm_memunmap vmlinux EXPORT_SYMBOL +0xf07fadcd devm_memremap vmlinux EXPORT_SYMBOL +0x23e61699 bpf_map_inc vmlinux EXPORT_SYMBOL_GPL +0xc3bc72ad trace_print_array_seq vmlinux EXPORT_SYMBOL +0x1b597b7a swake_up_all vmlinux EXPORT_SYMBOL +0x5de611a9 btracker_nr_writebacks_queued drivers/md/dm-cache EXPORT_SYMBOL_GPL +0x3e73f10c __tracepoint_pnfs_mds_fallback_pg_init_read fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x402a1ab7 input_alloc_absinfo vmlinux EXPORT_SYMBOL +0xbbec3e48 serio_bus vmlinux EXPORT_SYMBOL +0xae4f7776 scsi_is_target_device vmlinux EXPORT_SYMBOL +0x3d1d6b62 ahci_do_hardreset vmlinux EXPORT_SYMBOL_GPL +0x21cd536a crypto_put_default_null_skcipher vmlinux EXPORT_SYMBOL_GPL +0x3a7396b7 crypto_get_default_null_skcipher vmlinux EXPORT_SYMBOL_GPL +0x42d96b6d each_symbol_section vmlinux EXPORT_SYMBOL_GPL +0x3b0be5a4 virtio_transport_stream_dequeue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x99b23286 ib_modify_qp_is_ok drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xde5e79bb mdev_get_drvdata drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xe4da9990 mlx5_core_access_reg drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x6b5c2b06 drm_atomic_helper_damage_iter_next drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x4d074649 nfs_init_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x164a055e kvm_arch_unregister_noncoherent_dma arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x212396d2 xts_camellia_setkey arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0x61c07512 ping_bind vmlinux EXPORT_SYMBOL_GPL +0xe5bfc83b nf_hook_entries_insert_raw vmlinux EXPORT_SYMBOL_GPL +0x954e7336 sk_dst_check vmlinux EXPORT_SYMBOL +0x453c8403 pci_msi_enabled vmlinux EXPORT_SYMBOL +0xd36e3d59 prandom_bytes_state vmlinux EXPORT_SYMBOL +0x5c11c23b inode_needs_sync vmlinux EXPORT_SYMBOL +0xd6190558 dma_direct_map_resource vmlinux EXPORT_SYMBOL +0x399bb8c0 __wake_up_sync_key vmlinux EXPORT_SYMBOL_GPL +0x347e0c69 drm_framebuffer_remove drivers/gpu/drm/drm EXPORT_SYMBOL +0x8826c13b acpi_video_register drivers/acpi/video EXPORT_SYMBOL +0x4071a9d7 bpfilter_ops vmlinux EXPORT_SYMBOL_GPL +0xd5191411 inet_csk_complete_hashdance vmlinux EXPORT_SYMBOL +0x0027e202 dev_getfirstbyhwtype vmlinux EXPORT_SYMBOL +0x0ebbfe57 ahci_print_info vmlinux EXPORT_SYMBOL_GPL +0x26e298e0 unregister_memory_notifier vmlinux EXPORT_SYMBOL +0xceec93be to_nfit_uuid vmlinux EXPORT_SYMBOL +0x7c548734 of_phy_simple_xlate vmlinux EXPORT_SYMBOL_GPL +0x54de622b dma_can_mmap vmlinux EXPORT_SYMBOL_GPL +0x1796cf7f wake_up_process vmlinux EXPORT_SYMBOL +0x25820c64 fs_overflowuid vmlinux EXPORT_SYMBOL +0xe0ff61f7 l2tp_session_register net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x666c923d __nf_ct_refresh_acct net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xe9716bf0 vhost_enable_notify drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x5e8a638f ib_modify_wq drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xcc3b167a adf_clean_vf_map drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xbb24f607 init_cdrom_command drivers/cdrom/cdrom EXPORT_SYMBOL +0x18f5480a nfs_do_submount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x02d42f81 gro_find_complete_by_type vmlinux EXPORT_SYMBOL +0xb5fe6123 skb_dequeue_tail vmlinux EXPORT_SYMBOL +0x223969aa mm_unaccount_pinned_pages vmlinux EXPORT_SYMBOL_GPL +0xd1d8b2d7 hidinput_count_leds vmlinux EXPORT_SYMBOL_GPL +0x6d35a7bc input_ff_erase vmlinux EXPORT_SYMBOL_GPL +0x8e8f7049 scsi_is_sas_port vmlinux EXPORT_SYMBOL +0x11f0b34e scsi_target_resume vmlinux EXPORT_SYMBOL +0x6e710769 ata_sff_queue_delayed_work vmlinux EXPORT_SYMBOL_GPL +0x8462cb62 atapi_cmd_type vmlinux EXPORT_SYMBOL_GPL +0xec0aff6b pci_device_is_present vmlinux EXPORT_SYMBOL_GPL +0x95368d33 memcg_kmem_enabled_key vmlinux EXPORT_SYMBOL +0xdd5004fd __module_address vmlinux EXPORT_SYMBOL_GPL +0xff31fdb0 arch_debugfs_dir vmlinux EXPORT_SYMBOL +0xdfc091f9 ceph_entity_type_name net/ceph/libceph EXPORT_SYMBOL +0xa5b58138 mlx5_eq_disable drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x40bd5453 inet_stream_ops vmlinux EXPORT_SYMBOL +0xaad4fdc9 qdisc_reset vmlinux EXPORT_SYMBOL +0x1f73c752 flow_rule_match_ip vmlinux EXPORT_SYMBOL +0xd2da1048 register_netdevice_notifier vmlinux EXPORT_SYMBOL +0xb9c15bd9 pci_get_device vmlinux EXPORT_SYMBOL +0x9a0836f9 blk_mq_delay_run_hw_queue vmlinux EXPORT_SYMBOL +0x808ec1a3 crypto_alg_tested vmlinux EXPORT_SYMBOL_GPL +0x01115f89 iomap_seek_hole vmlinux EXPORT_SYMBOL_GPL +0xe769232e sprint_symbol_no_offset vmlinux EXPORT_SYMBOL_GPL +0xfa556e79 edac_mc_free drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xc7564fbd dm_rh_bio_to_region drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x94707bb9 mlxsw_afa_block_append_mirror drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc57c48a3 idr_get_next vmlinux EXPORT_SYMBOL +0x1eab9f42 ehci_init_driver vmlinux EXPORT_SYMBOL_GPL +0x809b6063 ata_sff_drain_fifo vmlinux EXPORT_SYMBOL_GPL +0xb02362d7 tpm1_getcap vmlinux EXPORT_SYMBOL_GPL +0x881c4413 gen_pool_first_fit vmlinux EXPORT_SYMBOL +0xd6b50990 dquot_initialize_needed vmlinux EXPORT_SYMBOL +0x2f548802 ns_to_timeval vmlinux EXPORT_SYMBOL +0xe8e47edf nf_ct_expect_register_notifier net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xb6127243 drm_need_swiotlb drivers/gpu/drm/drm EXPORT_SYMBOL +0x49e691e6 nfs42_proc_layouterror fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x466c14a7 __delay vmlinux EXPORT_SYMBOL +0xb47cca30 csum_ipv6_magic vmlinux EXPORT_SYMBOL +0xb71589f0 skip_spaces vmlinux EXPORT_SYMBOL +0x6b43b680 do_xdp_generic vmlinux EXPORT_SYMBOL_GPL +0x7421c767 dev_pm_opp_put_opp_table vmlinux EXPORT_SYMBOL_GPL +0x0c0c015e ring_buffer_swap_cpu vmlinux EXPORT_SYMBOL_GPL +0xdc8d32f6 param_get_ullong vmlinux EXPORT_SYMBOL +0x76d451c4 add_taint vmlinux EXPORT_SYMBOL +0x60a32ea9 pm_power_off vmlinux EXPORT_SYMBOL +0x1e7d3bd3 iscsit_build_rsp_pdu drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xdcd5f617 mlx5_core_roce_gid_set drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x8acaf8e3 nfs4_set_rw_stateid fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xe0c123d5 __neigh_for_each_release vmlinux EXPORT_SYMBOL +0x01954898 nd_integrity_init vmlinux EXPORT_SYMBOL +0xe862c4b7 dpm_suspend_start vmlinux EXPORT_SYMBOL_GPL +0x7b5097a6 device_initialize vmlinux EXPORT_SYMBOL_GPL +0xdbdcba61 user_destroy vmlinux EXPORT_SYMBOL_GPL +0x5cf039a7 dquot_alloc_inode vmlinux EXPORT_SYMBOL +0x4b61e68a bdput vmlinux EXPORT_SYMBOL +0x6ff6b911 bdget vmlinux EXPORT_SYMBOL +0x317fb2af fsstack_copy_inode_size vmlinux EXPORT_SYMBOL_GPL +0x83435f46 virtio_transport_notify_send_init net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x88266698 vbg_hgcm_connect drivers/virt/vboxguest/vboxguest EXPORT_SYMBOL +0x959f665d mlxsw_core_skb_transmit drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x6cc9923d dev_get_by_napi_id vmlinux EXPORT_SYMBOL +0x0c4b1406 ata_pci_bmdma_prepare_host vmlinux EXPORT_SYMBOL_GPL +0xe7a269a8 fwnode_get_named_child_node vmlinux EXPORT_SYMBOL_GPL +0xe0419ac4 kstrtos16 vmlinux EXPORT_SYMBOL +0xfb32b30f ring_buffer_read_prepare_sync vmlinux EXPORT_SYMBOL_GPL +0x19fa5852 mlxsw_core_flush_owq drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xcb54fee9 mlx5_query_module_eeprom drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x27046576 kvm_exit arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x08e959fd strp_process vmlinux EXPORT_SYMBOL_GPL +0x83ba1ab7 tcf_em_register vmlinux EXPORT_SYMBOL +0xabd1588a dm_get_md vmlinux EXPORT_SYMBOL_GPL +0x0cc0bfd8 mipi_dsi_host_register vmlinux EXPORT_SYMBOL +0x0da6c32c devm_init_badblocks vmlinux EXPORT_SYMBOL_GPL +0xda1129c8 __tracepoint_block_unplug vmlinux EXPORT_SYMBOL_GPL +0x3f670b8b iomap_set_page_dirty vmlinux EXPORT_SYMBOL_GPL +0xce35592c dump_truncate vmlinux EXPORT_SYMBOL +0xd9d7f923 fs_kobj vmlinux EXPORT_SYMBOL_GPL +0xc40f284c nf_ct_helper_hsize net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa8c54d5f cdrom_ioctl drivers/cdrom/cdrom EXPORT_SYMBOL +0xc5992401 __drm_puts_coredump drivers/gpu/drm/drm EXPORT_SYMBOL +0x2101a9f3 drm_connector_list_update drivers/gpu/drm/drm EXPORT_SYMBOL +0x521c94e8 drm_mode_debug_printmodeline drivers/gpu/drm/drm EXPORT_SYMBOL +0x9705b259 ahci_platform_init_host vmlinux EXPORT_SYMBOL_GPL +0xe1c88d6a sata_link_scr_lpm vmlinux EXPORT_SYMBOL_GPL +0x8522d6bc strncpy_from_user vmlinux EXPORT_SYMBOL +0x5ed2969e string_escape_mem_ascii vmlinux EXPORT_SYMBOL +0xe82198cb bio_init vmlinux EXPORT_SYMBOL +0x17994d70 memhp_auto_online vmlinux EXPORT_SYMBOL_GPL +0x7278d328 all_vm_events vmlinux EXPORT_SYMBOL_GPL +0x41814cb8 dirty_writeback_interval vmlinux EXPORT_SYMBOL_GPL +0x11554aec gss_mech_put net/sunrpc/auth_gss/auth_rpcgss EXPORT_SYMBOL +0xb03bb478 rpc_max_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xc2fcd845 rdma_move_grh_sgid_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3a3af79f vfio_unregister_iommu_driver drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xe314538c adf_vf_isr_resource_free drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0xa77399f0 i40e_unregister_client drivers/net/ethernet/intel/i40e/i40e EXPORT_SYMBOL +0x9dd26089 drm_mm_scan_init_with_range drivers/gpu/drm/drm EXPORT_SYMBOL +0x1b06ff52 inet6_hash_connect vmlinux EXPORT_SYMBOL_GPL +0x2fac9949 iptunnel_handle_offloads vmlinux EXPORT_SYMBOL_GPL +0xc1b16663 gnet_stats_copy_rate_est vmlinux EXPORT_SYMBOL +0x9977b85b skb_partial_csum_set vmlinux EXPORT_SYMBOL_GPL +0x8b8b749f dev_pm_opp_register_set_opp_helper vmlinux EXPORT_SYMBOL_GPL +0xb859f980 genphy_c45_aneg_done vmlinux EXPORT_SYMBOL_GPL +0xf1234873 pm_runtime_no_callbacks vmlinux EXPORT_SYMBOL_GPL +0xfabb429d subsys_dev_iter_next vmlinux EXPORT_SYMBOL_GPL +0x03dc0a54 iommu_detach_group vmlinux EXPORT_SYMBOL_GPL +0x7fbc00d6 pci_scan_root_bus vmlinux EXPORT_SYMBOL +0xc3f27515 create_empty_buffers vmlinux EXPORT_SYMBOL +0xbe494808 init_special_inode vmlinux EXPORT_SYMBOL +0x2910f4cb on_each_cpu_cond vmlinux EXPORT_SYMBOL +0x67587c54 nfnl_acct_update net/netfilter/nfnetlink_acct EXPORT_SYMBOL_GPL +0xe673e202 drm_flip_work_commit drivers/gpu/drm/drm EXPORT_SYMBOL +0x2c0e472e nfs_drop_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa3fdb692 br_vlan_get_info vmlinux EXPORT_SYMBOL_GPL +0x5e6a8079 __xfrm_dst_lookup vmlinux EXPORT_SYMBOL +0x9282116e ipmr_rule_default vmlinux EXPORT_SYMBOL +0x2cb7d784 iptunnel_xmit vmlinux EXPORT_SYMBOL_GPL +0x5bc1621e inet_frag_destroy vmlinux EXPORT_SYMBOL +0x737724bc ipv4_sk_update_pmtu vmlinux EXPORT_SYMBOL_GPL +0x3f1ef70a xt_tee_enabled vmlinux EXPORT_SYMBOL_GPL +0xc79bcd36 dm_vcalloc vmlinux EXPORT_SYMBOL +0x4531624f usb_decode_ctrl vmlinux EXPORT_SYMBOL_GPL +0xb89a846f srp_release_transport vmlinux EXPORT_SYMBOL_GPL +0x1544c1dd configfs_register_default_group vmlinux EXPORT_SYMBOL +0x2b85aff2 __dec_zone_page_state vmlinux EXPORT_SYMBOL +0x0de5ce94 __inc_zone_page_state vmlinux EXPORT_SYMBOL +0x8ca379ec __mod_zone_page_state vmlinux EXPORT_SYMBOL +0x27f4f029 ftrace_set_global_filter vmlinux EXPORT_SYMBOL_GPL +0x6b45adf1 __put_user_ns vmlinux EXPORT_SYMBOL +0xab6babaf pm_qos_request vmlinux EXPORT_SYMBOL_GPL +0x15fe9aab release_resource vmlinux EXPORT_SYMBOL +0x553004e5 sbc_dif_verify drivers/target/target_core_mod EXPORT_SYMBOL +0x36bf0017 ppp_register_compressor drivers/net/ppp/ppp_generic EXPORT_SYMBOL +0x07f88521 drm_edid_is_valid drivers/gpu/drm/drm EXPORT_SYMBOL +0xb8c36f08 drm_dp_dual_mode_set_tmds_output drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3c987628 drm_dp_dual_mode_get_tmds_output drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x9147f314 __nf_ip6_route vmlinux EXPORT_SYMBOL_GPL +0x5d0090d7 devlink_fmsg_binary_pair_put vmlinux EXPORT_SYMBOL_GPL +0x8a26270d brioctl_set vmlinux EXPORT_SYMBOL +0x180a08da input_set_abs_params vmlinux EXPORT_SYMBOL +0xfa297415 acpi_map_pxm_to_node vmlinux EXPORT_SYMBOL +0xb3687850 out_of_line_wait_on_bit_lock vmlinux EXPORT_SYMBOL +0x47c42a22 cpu_tss_rw vmlinux EXPORT_SYMBOL +0xbb5f5476 xprt_free_slot net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xbdf96510 spc_emulate_report_luns drivers/target/target_core_mod EXPORT_SYMBOL +0x24287e55 target_unregister_template drivers/target/target_core_mod EXPORT_SYMBOL +0x95396889 nfs_alloc_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0x35cd4408 xfrm_state_alloc vmlinux EXPORT_SYMBOL +0x7fceb655 netdev_class_create_file_ns vmlinux EXPORT_SYMBOL +0x85670f1d rtnl_is_locked vmlinux EXPORT_SYMBOL +0x383191ba devm_kmemdup vmlinux EXPORT_SYMBOL_GPL +0xe619873d fat_remove_entries vmlinux EXPORT_SYMBOL_GPL +0x7b9319bf page_symlink vmlinux EXPORT_SYMBOL +0xa26d9b4f workqueue_congested vmlinux EXPORT_SYMBOL_GPL +0x3c006126 drm_dp_mst_get_edid drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x4bbae1b2 o2hb_register_callback fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x06552459 nfsacl_decode fs/nfs_common/nfs_acl EXPORT_SYMBOL_GPL +0x1ed024c1 nfsacl_encode fs/nfs_common/nfs_acl EXPORT_SYMBOL_GPL +0x6dd62441 kvm_arch_has_assigned_device arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8dc61c77 kvm_vcpu_wake_up arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x888f5c82 __xfrm_route_forward vmlinux EXPORT_SYMBOL +0x0fea6c8e udp_gro_complete vmlinux EXPORT_SYMBOL +0x52d54fce devlink_info_version_stored_put vmlinux EXPORT_SYMBOL_GPL +0x0101bfcf rtnl_get_net_ns_capable vmlinux EXPORT_SYMBOL_GPL +0x6cd5e337 compat_sock_common_setsockopt vmlinux EXPORT_SYMBOL +0x93b0e731 compat_sock_common_getsockopt vmlinux EXPORT_SYMBOL +0xf30a5502 cpufreq_enable_boost_support vmlinux EXPORT_SYMBOL_GPL +0x3ca04c74 dm_get_queue_limits vmlinux EXPORT_SYMBOL_GPL +0x297ff5d6 dm_per_bio_data vmlinux EXPORT_SYMBOL_GPL +0x4ba0d5e3 dax_finish_sync_fault vmlinux EXPORT_SYMBOL_GPL +0x6921aa34 compat_put_timeval vmlinux EXPORT_SYMBOL_GPL +0x321bdbb1 compat_get_timeval vmlinux EXPORT_SYMBOL_GPL +0xa2f11734 nf_ct_expect_iterate_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x05ce5706 glue_xts_req_128bit arch/x86/crypto/glue_helper EXPORT_SYMBOL_GPL +0x37a1909f gen_replace_estimator vmlinux EXPORT_SYMBOL +0x74ef773e fc_get_host_port_state vmlinux EXPORT_SYMBOL +0xce3a8342 scsi_target_unblock vmlinux EXPORT_SYMBOL_GPL +0x0c8e952e devm_clk_hw_unregister vmlinux EXPORT_SYMBOL_GPL +0x1347ec57 blk_mq_request_completed vmlinux EXPORT_SYMBOL_GPL +0x19bd383b security_secmark_refcount_dec vmlinux EXPORT_SYMBOL +0x2f03fc4b security_secmark_refcount_inc vmlinux EXPORT_SYMBOL +0x10de608a vfs_ioc_fssetxattr_check vmlinux EXPORT_SYMBOL +0x5071e2a0 inc_node_page_state vmlinux EXPORT_SYMBOL +0xf85b550b param_set_bint vmlinux EXPORT_SYMBOL +0xf311ca79 param_get_bool vmlinux EXPORT_SYMBOL +0xdf60170d param_set_byte vmlinux EXPORT_SYMBOL +0xcb90b9fc rpc_call_null net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9beb5c87 kvm_mmu_slot_set_dirty arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xff3809f1 br_multicast_router vmlinux EXPORT_SYMBOL_GPL +0x82355ff8 __ip6_datagram_connect vmlinux EXPORT_SYMBOL_GPL +0x200036a3 ip_tunnel_metadata_cnt vmlinux EXPORT_SYMBOL +0x99cdc041 __ip4_datagram_connect vmlinux EXPORT_SYMBOL +0x52996724 nf_ipv6_ops vmlinux EXPORT_SYMBOL_GPL +0x9fdecc31 unregister_netdevice_many vmlinux EXPORT_SYMBOL +0x508c0c5b skb_clone_sk vmlinux EXPORT_SYMBOL +0x53f2a0fe fc_exch_mgr_alloc vmlinux EXPORT_SYMBOL +0xeb7f6046 acpi_get_devices vmlinux EXPORT_SYMBOL +0xea778fab sg_pcopy_to_buffer vmlinux EXPORT_SYMBOL +0xf12468eb blk_queue_logical_block_size vmlinux EXPORT_SYMBOL +0xae471805 init_srcu_struct vmlinux EXPORT_SYMBOL_GPL +0x740a1b95 reserve_evntsel_nmi vmlinux EXPORT_SYMBOL +0x5a8ae15a ZSTD_initDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x371e7f3a ZSTD_initCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0xe16986dd mlxsw_afa_block_activity_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x86133988 mlx5_query_port_pause drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x58be8d67 arp_tbl vmlinux EXPORT_SYMBOL +0x9114f47c __skb_recv_datagram vmlinux EXPORT_SYMBOL +0xc9ce3ce0 phy_attach vmlinux EXPORT_SYMBOL +0x87643a9a sas_get_address vmlinux EXPORT_SYMBOL +0x19df99b9 acpi_finish_gpe vmlinux EXPORT_SYMBOL +0x44902cff acpi_enable_event vmlinux EXPORT_SYMBOL +0x688d0e03 list_lru_walk_node vmlinux EXPORT_SYMBOL_GPL +0x017de3d5 nr_cpu_ids vmlinux EXPORT_SYMBOL +0x8a7cb9c4 platform_thermal_package_rate_control vmlinux EXPORT_SYMBOL_GPL +0x065994f1 xdr_encode_opaque_fixed net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xaa13f7b9 rdma_translate_ip drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x1aa93530 drm_plane_enable_fb_damage_clips drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x955564f3 tcp_splice_read vmlinux EXPORT_SYMBOL +0x0a392666 xt_unregister_match vmlinux EXPORT_SYMBOL +0x22828664 mptscsih_show_info vmlinux EXPORT_SYMBOL +0x1af081fb request_firmware_direct vmlinux EXPORT_SYMBOL_GPL +0x21a563da clk_get_accuracy vmlinux EXPORT_SYMBOL_GPL +0x37ee8049 acpiphp_unregister_attention vmlinux EXPORT_SYMBOL_GPL +0x50f91491 __genradix_ptr vmlinux EXPORT_SYMBOL +0xc10fddb8 name_to_dev_t vmlinux EXPORT_SYMBOL_GPL +0xcbc88a23 ZSTD_isFrame lib/zstd/zstd_decompress EXPORT_SYMBOL +0x5779ec1b ip_vs_conn_in_get_proto net/netfilter/ipvs/ip_vs EXPORT_SYMBOL_GPL +0x4e86baab ip_set_extensions net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x3567743b vfio_external_user_iommu_id drivers/vfio/vfio EXPORT_SYMBOL_GPL +0xe03ab7a5 vxlan_fdb_replay drivers/net/vxlan EXPORT_SYMBOL_GPL +0xca773faf drm_master_get drivers/gpu/drm/drm EXPORT_SYMBOL +0xc32c71af register_inetaddr_validator_notifier vmlinux EXPORT_SYMBOL +0x9aaeefce sysctl_nf_log_all_netns vmlinux EXPORT_SYMBOL +0x8bca4d47 iscsi_prep_data_out_pdu vmlinux EXPORT_SYMBOL_GPL +0x5d9d2b2c __tracepoint_iscsi_dbg_eh vmlinux EXPORT_SYMBOL_GPL +0xdfaf57df platform_msi_domain_free_irqs vmlinux EXPORT_SYMBOL_GPL +0x49b163b8 acpi_bus_scan vmlinux EXPORT_SYMBOL +0xd70f62b6 acpi_os_execute vmlinux EXPORT_SYMBOL +0x35131b36 drbd_role_str drivers/block/drbd/drbd EXPORT_SYMBOL +0x9e97c911 drm_agp_acquire drivers/gpu/drm/drm EXPORT_SYMBOL +0x452bbade nfs_file_read fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe9b608a2 mptscsih_suspend vmlinux EXPORT_SYMBOL +0xcbc9557f unregister_sysrq_key vmlinux EXPORT_SYMBOL +0x058f9366 apei_exec_collect_resources vmlinux EXPORT_SYMBOL_GPL +0xeaac1cc3 percpu_ref_init vmlinux EXPORT_SYMBOL_GPL +0x892a2dcb crypto_unregister_rngs vmlinux EXPORT_SYMBOL_GPL +0x99fdc8e9 crypto_aead_setkey vmlinux EXPORT_SYMBOL_GPL +0x97440211 crypto_unregister_algs vmlinux EXPORT_SYMBOL_GPL +0xc502265e simple_transaction_get vmlinux EXPORT_SYMBOL +0x64193235 simple_transaction_set vmlinux EXPORT_SYMBOL +0xeb25ec24 register_kretprobes vmlinux EXPORT_SYMBOL_GPL +0x0dd96fd2 find_pid_ns vmlinux EXPORT_SYMBOL_GPL +0x87e64181 amd_nb_has_feature vmlinux EXPORT_SYMBOL_GPL +0x328e3354 __memcpy_flushcache vmlinux EXPORT_SYMBOL_GPL +0x2864abc9 klist_node_attached vmlinux EXPORT_SYMBOL_GPL +0xdb131a59 __nlmsg_put vmlinux EXPORT_SYMBOL +0x1ee34256 lock_sock_nested vmlinux EXPORT_SYMBOL +0xafc4cee5 nvmem_cell_get vmlinux EXPORT_SYMBOL_GPL +0x6d95f418 hid_add_device vmlinux EXPORT_SYMBOL_GPL +0x7e8afcb8 power_supply_set_input_current_limit_from_supplier vmlinux EXPORT_SYMBOL_GPL +0xc91464d6 platform_add_devices vmlinux EXPORT_SYMBOL_GPL +0xff153da6 device_show_bool vmlinux EXPORT_SYMBOL_GPL +0xd94569af acpi_processor_get_performance_info vmlinux EXPORT_SYMBOL_GPL +0xecbb96b4 pci_read_config_byte vmlinux EXPORT_SYMBOL +0xde8db364 rhashtable_walk_exit vmlinux EXPORT_SYMBOL_GPL +0xdc81dc9a sched_trace_rd_span vmlinux EXPORT_SYMBOL_GPL +0xd354ee57 flush_work vmlinux EXPORT_SYMBOL_GPL +0xbfc8f40f udp_tunnel_notify_add_rx_port net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x49081644 dm_btree_remove drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x5a2d2b40 mlx5_db_free drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x5801b7a0 mlx4_update_qp drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x92a6bafb mlx4_db_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xa671f6f5 cxgb4_alloc_stid drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x84755df4 cxgb4_alloc_atid drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x05114c37 cxgb3_alloc_stid drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x3646e2dc cxgb3_alloc_atid drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x1068004b gf128mul_bbe crypto/gf128mul EXPORT_SYMBOL +0xed4aa1c3 usb_deregister vmlinux EXPORT_SYMBOL_GPL +0x7396945d phy_ethtool_set_eee vmlinux EXPORT_SYMBOL +0xbae53903 phy_ethtool_get_eee vmlinux EXPORT_SYMBOL +0x82f0fbe1 fc_seq_send vmlinux EXPORT_SYMBOL +0xad5cf193 ata_sff_port_ops vmlinux EXPORT_SYMBOL_GPL +0x5893f854 vga_con vmlinux EXPORT_SYMBOL +0xff2bdf89 create_signature vmlinux EXPORT_SYMBOL_GPL +0x512940e2 __generic_file_fsync vmlinux EXPORT_SYMBOL +0x2087719e ceph_oid_copy net/ceph/libceph EXPORT_SYMBOL +0x2c287154 drm_mm_remove_node drivers/gpu/drm/drm EXPORT_SYMBOL +0xb50d7554 nfs_fs_mount fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6b2dc060 dump_stack vmlinux EXPORT_SYMBOL +0x10c3f57e __gnet_stats_copy_queue vmlinux EXPORT_SYMBOL +0x35764b89 nvme_complete_rq vmlinux EXPORT_SYMBOL_GPL +0x780deaa3 acpi_bus_unregister_driver vmlinux EXPORT_SYMBOL +0xd6e25a66 pci_msi_unmask_irq vmlinux EXPORT_SYMBOL_GPL +0xe7197d78 crypto_alloc_instance vmlinux EXPORT_SYMBOL_GPL +0x55fb9629 mpage_writepage vmlinux EXPORT_SYMBOL +0xd942d353 ring_buffer_record_off vmlinux EXPORT_SYMBOL_GPL +0xc53e3755 set_security_override vmlinux EXPORT_SYMBOL +0x16810f56 nfnetlink_subsys_unregister net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0x9ff7dc5b mlx4_qp_to_ready drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x7adc0fbf rb_replace_node_rcu vmlinux EXPORT_SYMBOL +0x2280270a raw_seq_start vmlinux EXPORT_SYMBOL_GPL +0xb44a4e60 dst_cow_metrics_generic vmlinux EXPORT_SYMBOL +0xde452fc4 ir_raw_event_store_with_filter vmlinux EXPORT_SYMBOL_GPL +0xeecd228f rtc_read_time vmlinux EXPORT_SYMBOL_GPL +0xf0891dab scsi_dh_activate vmlinux EXPORT_SYMBOL_GPL +0x47055a67 ata_dev_pair vmlinux EXPORT_SYMBOL_GPL +0x9068a5f5 tpm2_get_tpm_pt vmlinux EXPORT_SYMBOL_GPL +0xe21f18ac __genradix_iter_peek vmlinux EXPORT_SYMBOL +0xbba343a0 clean_bdev_aliases vmlinux EXPORT_SYMBOL +0x6b62d54f kvm_set_cr3 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf0f74fa7 kobject_del vmlinux EXPORT_SYMBOL +0xdc6df400 netdev_master_upper_dev_get vmlinux EXPORT_SYMBOL +0x989257f4 dmaengine_get_unmap_data vmlinux EXPORT_SYMBOL +0xeebb3445 pci_hp_del vmlinux EXPORT_SYMBOL_GPL +0x1f0fd128 pci_hp_add vmlinux EXPORT_SYMBOL_GPL +0x067ba25c crypto_unregister_shash vmlinux EXPORT_SYMBOL_GPL +0x0348e120 crypto_unregister_ahash vmlinux EXPORT_SYMBOL_GPL +0xb8b9f817 kmalloc_order_trace vmlinux EXPORT_SYMBOL +0x35366d40 rpc_remove_pipe_dir_object net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1843bc45 rpc_clnt_add_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1ff3c15e nat_callforwarding_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x57af689e dm_bio_detain drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x4c9fb281 devm_backlight_device_register drivers/video/backlight/backlight EXPORT_SYMBOL +0x75476e2a devlink_dpipe_headers_register vmlinux EXPORT_SYMBOL_GPL +0xabcdccbd devm_devfreq_add_device vmlinux EXPORT_SYMBOL +0x0de97a21 cpufreq_dbs_governor_start vmlinux EXPORT_SYMBOL_GPL +0x38c1f576 policy_has_boost_freq vmlinux EXPORT_SYMBOL_GPL +0xdeab486e usb_reset_endpoint vmlinux EXPORT_SYMBOL_GPL +0x96554810 register_keyboard_notifier vmlinux EXPORT_SYMBOL_GPL +0xc096e23d hdmi_drm_infoframe_init vmlinux EXPORT_SYMBOL +0xb6a68816 find_last_bit vmlinux EXPORT_SYMBOL +0x02fe1ab7 poll_initwait vmlinux EXPORT_SYMBOL +0xb0071429 dma_direct_map_sg vmlinux EXPORT_SYMBOL +0x97dc238d unregister_console vmlinux EXPORT_SYMBOL +0xd304292d ceph_monc_wait_osdmap net/ceph/libceph EXPORT_SYMBOL +0x2975c966 nf_ct_helper_ext_add net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x46f02ed5 nf_ct_unconfirmed_destroy net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf129e76c edac_device_handle_ue drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x33694b1f edac_device_handle_ce drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xf438022f __tracepoint_bcache_btree_node_alloc drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x9ba581da usb_stor_reset_resume drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x60ad8a01 mlx4_SET_PORT_VXLAN drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x4900035b o2hb_stop_all_regions fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x790be0b9 usb_bus_idr vmlinux EXPORT_SYMBOL_GPL +0x847ebfa6 phy_disconnect vmlinux EXPORT_SYMBOL +0x18504e1a phy_ethtool_set_link_ksettings vmlinux EXPORT_SYMBOL +0x15533e03 phy_ethtool_get_link_ksettings vmlinux EXPORT_SYMBOL +0xeee39b29 device_get_mac_address vmlinux EXPORT_SYMBOL +0x97950191 aead_geniv_free vmlinux EXPORT_SYMBOL_GPL +0x3fd78f3b register_chrdev_region vmlinux EXPORT_SYMBOL +0xe3d857ea __cpu_active_mask vmlinux EXPORT_SYMBOL +0xb5a8c226 acpi_gsi_to_irq vmlinux EXPORT_SYMBOL_GPL +0xa2e71315 rpcauth_wrap_req_encode net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8f64be30 nft_set_ext_types net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x760ad241 mlx5_unregister_interface drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x17285a85 mlx4_unregister_interface drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x9b6d47ca drm_mode_is_420 drivers/gpu/drm/drm EXPORT_SYMBOL +0xeefce041 netdev_state_change vmlinux EXPORT_SYMBOL +0xe3a53f4c sort vmlinux EXPORT_SYMBOL +0xa3f12f69 __crypto_xor vmlinux EXPORT_SYMBOL_GPL +0xa8fef7bb security_unix_may_send vmlinux EXPORT_SYMBOL +0x8180056f vfs_getattr_nosec vmlinux EXPORT_SYMBOL +0xa7d0ee87 mmu_notifier_put vmlinux EXPORT_SYMBOL_GPL +0x541bd60a irq_work_run vmlinux EXPORT_SYMBOL_GPL +0xd9f3e65f __tracepoint_suspend_resume vmlinux EXPORT_SYMBOL_GPL +0xcdf02738 tracing_snapshot_cond vmlinux EXPORT_SYMBOL_GPL +0x1278221d ZSTD_compressBegin_usingCDict lib/zstd/zstd_compress EXPORT_SYMBOL +0x19bb1b8a __nft_release_basechain net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x83a240da nf_conncount_add net/netfilter/nf_conncount EXPORT_SYMBOL_GPL +0x4b57ab99 ib_register_mad_snoop drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x35bbefa6 mlx4_SET_PORT_general drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x4c0791ca cxgbi_conn_xmit_pdu drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x37e79d2d drm_crtc_init_with_planes drivers/gpu/drm/drm EXPORT_SYMBOL +0x71a47518 nfs_probe_fsinfo fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6cabf24c fscache_io_error fs/fscache/fscache EXPORT_SYMBOL +0x833c3313 __memcpy_mcsafe vmlinux EXPORT_SYMBOL_GPL +0x6a59399b usb_sg_cancel vmlinux EXPORT_SYMBOL_GPL +0x3faca2d7 iscsi_host_remove vmlinux EXPORT_SYMBOL_GPL +0x08227382 srp_rport_get vmlinux EXPORT_SYMBOL +0x29203e01 debugfs_create_devm_seqfile vmlinux EXPORT_SYMBOL_GPL +0xd574baae kern_path vmlinux EXPORT_SYMBOL +0x980ec101 irq_chip_ack_parent vmlinux EXPORT_SYMBOL_GPL +0x59f2ec78 rpc_set_connect_timeout net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd1b8df6c ib_register_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7c74d7a6 vmci_qpair_consume_buf_ready drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xbb705512 drm_client_framebuffer_create drivers/gpu/drm/drm EXPORT_SYMBOL +0x066fd1e8 nfs_clone_sb_security fs/nfs/nfs EXPORT_SYMBOL_GPL +0x15ddaea0 xfrm_dev_resume vmlinux EXPORT_SYMBOL_GPL +0x5d02c734 nf_register_queue_handler vmlinux EXPORT_SYMBOL +0x13b70ffc netpoll_print_options vmlinux EXPORT_SYMBOL +0x1b2bb844 iscsi_destroy_conn vmlinux EXPORT_SYMBOL_GPL +0xe4309905 syscore_resume vmlinux EXPORT_SYMBOL_GPL +0x4db15bab clk_hw_get_flags vmlinux EXPORT_SYMBOL_GPL +0xa3026a6f clk_bulk_get vmlinux EXPORT_SYMBOL +0x260a095a __sg_alloc_table vmlinux EXPORT_SYMBOL +0x1c3113d7 shrink_dcache_sb vmlinux EXPORT_SYMBOL +0x137e2312 __tracepoint_xdp_bulk_tx vmlinux EXPORT_SYMBOL_GPL +0xf85b620d percpu_free_rwsem vmlinux EXPORT_SYMBOL_GPL +0x2e5f3138 param_ops_int vmlinux EXPORT_SYMBOL +0x8c2c6240 ib_register_event_handler drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x24c22a8f drm_client_modeset_probe drivers/gpu/drm/drm EXPORT_SYMBOL +0xd54a5050 ipmi_unregister_for_cmd drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xfbd8cd30 efivar_entry_iter vmlinux EXPORT_SYMBOL_GPL +0x8e253d93 serial8250_do_set_ldisc vmlinux EXPORT_SYMBOL_GPL +0x6ed8a5fc hdmi_drm_infoframe_check vmlinux EXPORT_SYMBOL +0xee3608ad __pci_hp_register vmlinux EXPORT_SYMBOL_GPL +0x2fb72e9b sbitmap_init_node vmlinux EXPORT_SYMBOL_GPL +0xe02b995c blkdev_issue_flush vmlinux EXPORT_SYMBOL +0x499043d3 crypto_init_queue vmlinux EXPORT_SYMBOL_GPL +0x441f9ef7 drm_gem_object_init drivers/gpu/drm/drm EXPORT_SYMBOL +0xb510c250 raw_v4_hashinfo vmlinux EXPORT_SYMBOL_GPL +0x2b359312 acpi_subsys_suspend_noirq vmlinux EXPORT_SYMBOL_GPL +0xd64ed259 __memcat_p vmlinux EXPORT_SYMBOL_GPL +0xe2d3b5cc sysfs_break_active_protection vmlinux EXPORT_SYMBOL_GPL +0xb365037a iomap_zero_range vmlinux EXPORT_SYMBOL_GPL +0xf9686f96 fsnotify_put_mark vmlinux EXPORT_SYMBOL_GPL +0x8373f38f blockdev_superblock vmlinux EXPORT_SYMBOL_GPL +0xb308c97d wait_woken vmlinux EXPORT_SYMBOL +0xb288e246 param_ops_short vmlinux EXPORT_SYMBOL +0x093de831 rpcb_getport_async net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2baa040f ib_get_vf_stats drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x85fbc931 slhc_uncompress drivers/net/slip/slhc EXPORT_SYMBOL +0xe55ff465 kvm_cpu_get_interrupt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xf1e0b382 skb_zerocopy_iter_dgram vmlinux EXPORT_SYMBOL_GPL +0xc9170f71 mpt_findImVolumes vmlinux EXPORT_SYMBOL +0x17002c34 nvme_try_sched_reset vmlinux EXPORT_SYMBOL_GPL +0x5bfeb513 fwnode_irq_get vmlinux EXPORT_SYMBOL +0x2dd2565d get_agp_version vmlinux EXPORT_SYMBOL +0x2484adc3 __kfifo_to_user_r vmlinux EXPORT_SYMBOL +0x0e198232 dm_btree_insert drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x2cd1be34 __tracepoint_bcache_request_start drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0xe08b0f80 cxgbi_conn_alloc_pdu drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xac3201b0 udp_flow_hashrnd vmlinux EXPORT_SYMBOL +0x3afcb00f devlink_trap_report vmlinux EXPORT_SYMBOL_GPL +0xd94ccd63 softnet_data vmlinux EXPORT_SYMBOL +0x58f65ac9 efivar_entry_get vmlinux EXPORT_SYMBOL_GPL +0x015e7c32 efivar_entry_set vmlinux EXPORT_SYMBOL_GPL +0x6527a231 dbgp_external_startup vmlinux EXPORT_SYMBOL_GPL +0x1d19e2df fc_rport_logoff vmlinux EXPORT_SYMBOL +0x13a7de5e fc_rport_lookup vmlinux EXPORT_SYMBOL +0x86700220 acpi_get_cpuid vmlinux EXPORT_SYMBOL_GPL +0xfe5d4bb2 sys_tz vmlinux EXPORT_SYMBOL +0x13083656 dma_get_sgtable_attrs vmlinux EXPORT_SYMBOL +0x23a4a536 osd_req_op_extent_osd_data_pagelist net/ceph/libceph EXPORT_SYMBOL +0x9cf19dd0 rpc_clnt_iterate_for_each_xprt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x764567c8 dm_btree_find_highest_key drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x075271cb team_mode_register drivers/net/team/team EXPORT_SYMBOL +0x667ea308 team_options_change_check drivers/net/team/team EXPORT_SYMBOL +0x9e5bacfa mlxsw_pci_driver_register drivers/net/ethernet/mellanox/mlxsw/mlxsw_pci EXPORT_SYMBOL +0x87a693d0 drm_fb_helper_generic_probe drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xcd7300b4 ip6_sk_redirect vmlinux EXPORT_SYMBOL_GPL +0x9ffa3a75 netdev_max_backlog vmlinux EXPORT_SYMBOL +0x58276f93 cper_next_record_id vmlinux EXPORT_SYMBOL_GPL +0x9e49a306 fcoe_queue_timer vmlinux EXPORT_SYMBOL_GPL +0x40087a2a dev_pm_domain_attach_by_name vmlinux EXPORT_SYMBOL_GPL +0xf1a68107 acpi_processor_preregister_performance vmlinux EXPORT_SYMBOL +0xddbeeecc pci_lock_rescan_remove vmlinux EXPORT_SYMBOL_GPL +0xc5f5937e blk_integrity_register vmlinux EXPORT_SYMBOL +0x035d9723 kernfs_path_from_node vmlinux EXPORT_SYMBOL_GPL +0xbf34b0fd unlock_page vmlinux EXPORT_SYMBOL +0x5c82db0a osd_req_op_cls_response_data_pages net/ceph/libceph EXPORT_SYMBOL +0x387e1639 rpc_pipefs_notifier_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xacb20a24 nft_data_dump net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xd99e003d dm_bio_prison_create_v2 drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x1f2d26f4 drm_sysfs_hotplug_event drivers/gpu/drm/drm EXPORT_SYMBOL +0x112ed2e2 lcd_device_register drivers/video/backlight/lcd EXPORT_SYMBOL +0x4a2f42f4 devlink_region_shapshot_id_get vmlinux EXPORT_SYMBOL_GPL +0xd99fa7e2 devlink_dpipe_table_resource_set vmlinux EXPORT_SYMBOL_GPL +0xac9a6197 netdev_walk_all_upper_dev_rcu vmlinux EXPORT_SYMBOL_GPL +0xe14638b6 dev_pm_opp_set_rate vmlinux EXPORT_SYMBOL_GPL +0xf3a57892 release_dentry_name_snapshot vmlinux EXPORT_SYMBOL +0xdb735885 alarm_cancel vmlinux EXPORT_SYMBOL_GPL +0x2fea1431 osd_req_op_raw_data_in_pages net/ceph/libceph EXPORT_SYMBOL +0x5a45ba31 svc_auth_unregister net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf6feab66 rdma_nl_register drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x16af9071 dm_array_set_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb6949944 dm_array_get_value drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe67343c1 vmci_qpair_enqueue drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xc151e45f __tracepoint_pnfs_mds_fallback_write_pagelist fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xd68040e9 kvm_vcpu_uninit arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x9e59852e xfrm_unregister_type vmlinux EXPORT_SYMBOL +0x5cf1d0ec nf_ip_checksum vmlinux EXPORT_SYMBOL +0xf2f3cf52 sock_diag_save_cookie vmlinux EXPORT_SYMBOL_GPL +0x947374df pci_request_regions_exclusive vmlinux EXPORT_SYMBOL +0x594bb503 pkcs7_validate_trust vmlinux EXPORT_SYMBOL_GPL +0x97f9c5b5 jbd2_log_wait_commit vmlinux EXPORT_SYMBOL +0xc50aeaac hrtimer_init_sleeper vmlinux EXPORT_SYMBOL_GPL +0x5e97eca2 mlx5_core_qp_query drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x4977256c drm_self_refresh_helper_update_avg_times drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x8e663d0f zalloc_cpumask_var_node vmlinux EXPORT_SYMBOL +0x84cf92cf xhci_ext_cap_init vmlinux EXPORT_SYMBOL_GPL +0x12bf883a acpi_dma_controller_free vmlinux EXPORT_SYMBOL_GPL +0x526eef2c hdmi_vendor_infoframe_pack vmlinux EXPORT_SYMBOL +0x9d8247ec blk_queue_can_use_dma_map_merging vmlinux EXPORT_SYMBOL_GPL +0x901796d5 dquot_disable vmlinux EXPORT_SYMBOL +0x80446b8d trace_event_buffer_commit vmlinux EXPORT_SYMBOL_GPL +0x85dd524a module_put vmlinux EXPORT_SYMBOL +0x2459bbcc console_set_on_cmdline vmlinux EXPORT_SYMBOL +0xb4fa84d2 nsh_push net/nsh/nsh EXPORT_SYMBOL_GPL +0x69559a2f ip_tunnel_rcv net/ipv4/ip_tunnel EXPORT_SYMBOL_GPL +0x71de04be ib_free_cq_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x38bd5fdd ib_drain_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x38972f23 dm_rh_region_to_sector drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x4dde4eaa mlx5_fc_query drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x0705dd14 ipmi_register_for_cmd drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0xbc60dc37 cpufreq_show_cpus vmlinux EXPORT_SYMBOL_GPL +0x8acac0f5 usb_get_from_anchor vmlinux EXPORT_SYMBOL_GPL +0x8cb9232f genphy_resume vmlinux EXPORT_SYMBOL +0x4951a8d7 fc_host_fpin_rcv vmlinux EXPORT_SYMBOL +0x44cd8c35 sata_link_hardreset vmlinux EXPORT_SYMBOL_GPL +0x3750d770 erst_read vmlinux EXPORT_SYMBOL_GPL +0xdbcf041a acpi_install_address_space_handler vmlinux EXPORT_SYMBOL +0x1866d87b inode_get_bytes vmlinux EXPORT_SYMBOL +0x3eeb2322 __wake_up vmlinux EXPORT_SYMBOL +0x4c8c9bff nft_meta_set_eval net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x2dad2080 nft_meta_get_eval net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xd9d7b741 nf_conntrack_helper_unregister net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa5159344 edac_device_del_device drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x24539f09 edac_device_add_device drivers/edac/edac_core EXPORT_SYMBOL_GPL +0xa3dd2f48 i2c_for_each_dev drivers/i2c/i2c-core EXPORT_SYMBOL_GPL +0x35ba2254 mlxsw_afk_values_add_u32 drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0419e175 vbin_printf vmlinux EXPORT_SYMBOL_GPL +0x0cc2064b xdp_rxq_info_unreg_mem_model vmlinux EXPORT_SYMBOL_GPL +0xa7dd3819 mpt_free_msg_frame vmlinux EXPORT_SYMBOL +0xb648d637 mpt_put_msg_frame_hi_pri vmlinux EXPORT_SYMBOL +0xa01a8d9b nd_cmd_bus_desc vmlinux EXPORT_SYMBOL_GPL +0x4a1bb20e dev_driver_string vmlinux EXPORT_SYMBOL +0x84637c67 pcie_get_readrq vmlinux EXPORT_SYMBOL +0xc7a1840e llist_add_batch vmlinux EXPORT_SYMBOL_GPL +0x1ab1d8c1 blkg_print_stat_ios vmlinux EXPORT_SYMBOL_GPL +0xd15b5e8e blk_mq_update_nr_hw_queues vmlinux EXPORT_SYMBOL_GPL +0xc85c1aa6 bio_reset vmlinux EXPORT_SYMBOL +0x947238fb security_d_instantiate vmlinux EXPORT_SYMBOL +0xb9b6c35a iter_file_splice_write vmlinux EXPORT_SYMBOL +0x7c7ab3a2 vfs_mkdir vmlinux EXPORT_SYMBOL +0x9780ac86 srcu_torture_stats_print vmlinux EXPORT_SYMBOL_GPL +0x843b9791 team_mode_unregister drivers/net/team/team EXPORT_SYMBOL +0xea6c69b6 bcm54xx_auxctl_write drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL +0xcd889b85 drm_gem_prime_handle_to_fd drivers/gpu/drm/drm EXPORT_SYMBOL +0x6ac7aec1 cryptd_free_ahash crypto/cryptd EXPORT_SYMBOL_GPL +0xd53e5003 kvm_mmu_slot_largepage_remove_write_access arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x69b39019 usb_driver_claim_interface vmlinux EXPORT_SYMBOL_GPL +0x3fa286c1 fwnode_property_get_reference_args vmlinux EXPORT_SYMBOL_GPL +0xc5a5c678 uart_parse_earlycon vmlinux EXPORT_SYMBOL_GPL +0xfc5c46e2 acpi_buffer_to_resource vmlinux EXPORT_SYMBOL +0x8353dfff acpi_os_get_iomem vmlinux EXPORT_SYMBOL_GPL +0x8879abc5 pinctrl_add_gpio_range vmlinux EXPORT_SYMBOL_GPL +0xb4d4cb4d eventfd_ctx_fileget vmlinux EXPORT_SYMBOL_GPL +0xd0d73713 rpc_pipe_generic_upcall net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xcb5b15b0 xprt_unregister_transport net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x592aff68 nf_synproxy_ipv6_init net/netfilter/nf_synproxy_core EXPORT_SYMBOL_GPL +0x8de82b35 iscsi_conn_failure vmlinux EXPORT_SYMBOL_GPL +0xd24fecb8 __ata_change_queue_depth vmlinux EXPORT_SYMBOL_GPL +0xe2f30634 to_nd_btt vmlinux EXPORT_SYMBOL +0x63187451 pcie_aspm_support_enabled vmlinux EXPORT_SYMBOL +0xb79ae253 eventfd_fget vmlinux EXPORT_SYMBOL_GPL +0x50c8a180 dget_parent vmlinux EXPORT_SYMBOL +0xe0cc9c92 vmci_qpair_alloc drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xa0436e98 in6addr_linklocal_allnodes vmlinux EXPORT_SYMBOL +0xc8f52930 __tcp_bpf_recvmsg vmlinux EXPORT_SYMBOL_GPL +0x143bdf25 do_tcp_sendpages vmlinux EXPORT_SYMBOL_GPL +0x28d4c54d hid_dump_input vmlinux EXPORT_SYMBOL_GPL +0x84c45c1a usb_disable_autosuspend vmlinux EXPORT_SYMBOL_GPL +0xe8151943 usb_get_dev vmlinux EXPORT_SYMBOL_GPL +0xeca5d40d software_node_find_by_name vmlinux EXPORT_SYMBOL_GPL +0xead8171b device_property_match_string vmlinux EXPORT_SYMBOL_GPL +0x81d7c5b7 percpu_ref_kill_and_confirm vmlinux EXPORT_SYMBOL_GPL +0xf0f8d0b8 securityfs_create_dir vmlinux EXPORT_SYMBOL_GPL +0xdd2c169b mb_cache_create vmlinux EXPORT_SYMBOL +0xe05c82b1 iunique vmlinux EXPORT_SYMBOL +0x7b7ddd7b try_lookup_one_len vmlinux EXPORT_SYMBOL +0x121d958a unregister_die_notifier vmlinux EXPORT_SYMBOL_GPL +0x046dd187 vmci_datagram_create_handle drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xb7de7124 drm_format_info drivers/gpu/drm/drm EXPORT_SYMBOL +0x2eb50f8b nfs4_schedule_stateid_recovery fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x56590b9e xfrm_policy_walk_done vmlinux EXPORT_SYMBOL +0xa7fde491 inet_dgram_ops vmlinux EXPORT_SYMBOL +0xaee317ad input_release_device vmlinux EXPORT_SYMBOL +0x274ab77b ata_dev_next vmlinux EXPORT_SYMBOL_GPL +0xc93e8461 acpi_get_event_resources vmlinux EXPORT_SYMBOL +0x1271bbc0 __do_once_done vmlinux EXPORT_SYMBOL +0x5431ca52 blk_alloc_queue vmlinux EXPORT_SYMBOL +0xd5263820 mb_cache_destroy vmlinux EXPORT_SYMBOL +0x45d246da node_to_cpumask_map vmlinux EXPORT_SYMBOL +0xd38cd261 __default_kernel_pte_mask vmlinux EXPORT_SYMBOL +0x6922b927 ip_set_nfnl_put net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x08f604e1 nf_conntrack_helpers_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x5e5c36b1 mlx5_destroy_flow_table drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xbbee027e mlx4_get_devlink_port drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xf3ab6c51 drm_fb_xrgb8888_to_rgb565_dstclip drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xc27163e3 drm_atomic_helper_set_config drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb9532574 dm_bufio_client_create vmlinux EXPORT_SYMBOL_GPL +0xc3ea5305 iommu_default_passthrough vmlinux EXPORT_SYMBOL_GPL +0x2bb9af28 gen_pool_for_each_chunk vmlinux EXPORT_SYMBOL +0x7a818c7c get_gendisk vmlinux EXPORT_SYMBOL +0xec2e1c8f proc_doulongvec_minmax vmlinux EXPORT_SYMBOL +0xacd2fc7b nft_chain_validate_dependency net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x9d4bde2b drm_fb_helper_blank drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1c7a7e73 pnfs_generic_pg_init_read fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xcb3e91cc xt_counters_alloc vmlinux EXPORT_SYMBOL +0x0ce05eab hwmon_device_register vmlinux EXPORT_SYMBOL_GPL +0x5e91559a usb_store_new_id vmlinux EXPORT_SYMBOL_GPL +0x37a8f042 sdev_evt_send_simple vmlinux EXPORT_SYMBOL_GPL +0xa538f67c of_find_mipi_dsi_host_by_node vmlinux EXPORT_SYMBOL +0x4ed79290 crypto_register_akcipher vmlinux EXPORT_SYMBOL_GPL +0x2b5ba51a simple_attr_open vmlinux EXPORT_SYMBOL_GPL +0x2c748d24 seq_hex_dump vmlinux EXPORT_SYMBOL +0xe53a279f truncate_inode_pages_final vmlinux EXPORT_SYMBOL +0x09337cd0 __wake_up_locked_key vmlinux EXPORT_SYMBOL_GPL +0x2d1b02d2 usermodehelper_read_lock_wait vmlinux EXPORT_SYMBOL_GPL +0x82d07161 __cpuhp_setup_state_cpuslocked vmlinux EXPORT_SYMBOL +0x417a9131 ceph_oloc_destroy net/ceph/libceph EXPORT_SYMBOL +0x407b465f vhost_work_queue drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xf398644f dm_btree_lookup_next drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xd964819e netdev_adjacent_change_commit vmlinux EXPORT_SYMBOL +0x594439ab sk_clear_memalloc vmlinux EXPORT_SYMBOL_GPL +0x28629544 usb_root_hub_lost_power vmlinux EXPORT_SYMBOL_GPL +0x2603e6d0 sas_ssp_task_response vmlinux EXPORT_SYMBOL_GPL +0x3cebb51f ata_pci_bmdma_clear_simplex vmlinux EXPORT_SYMBOL_GPL +0x2b68ae56 hvc_remove vmlinux EXPORT_SYMBOL_GPL +0xe0362a6c pci_stop_and_remove_bus_device vmlinux EXPORT_SYMBOL +0x7d5e1008 __crc32c_le_shift vmlinux EXPORT_SYMBOL +0x4578f528 __kfifo_to_user vmlinux EXPORT_SYMBOL +0xbee6b122 blk_mq_sched_request_inserted vmlinux EXPORT_SYMBOL_GPL +0x1a7f8a54 done_path_create vmlinux EXPORT_SYMBOL +0x36314423 get_super_exclusive_thawed vmlinux EXPORT_SYMBOL +0x717a4881 ceph_con_close net/ceph/libceph EXPORT_SYMBOL +0xf34332f0 iscsi_find_param_from_key drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xd9ec7830 xfrm4_rcv_encap vmlinux EXPORT_SYMBOL +0x776de6f1 udp_poll vmlinux EXPORT_SYMBOL +0x3bf64a72 skb_copy_ubufs vmlinux EXPORT_SYMBOL_GPL +0xb82513c6 dev_pm_opp_attach_genpd vmlinux EXPORT_SYMBOL_GPL +0xd551bef7 mdiobus_unregister_device vmlinux EXPORT_SYMBOL +0x141034ad ahci_host_activate vmlinux EXPORT_SYMBOL_GPL +0x5eb85a22 iommu_group_remove_device vmlinux EXPORT_SYMBOL_GPL +0xb05663a8 pci_hp_deregister vmlinux EXPORT_SYMBOL_GPL +0x0e0e2ee5 pci_remove_root_bus vmlinux EXPORT_SYMBOL_GPL +0x1a809853 posix_acl_from_mode vmlinux EXPORT_SYMBOL +0x9eacf8a5 kstrndup vmlinux EXPORT_SYMBOL +0x35040d99 irq_domain_reset_irq_data vmlinux EXPORT_SYMBOL_GPL +0x4792af12 irq_set_chip vmlinux EXPORT_SYMBOL +0x33a4cfb9 console_stop vmlinux EXPORT_SYMBOL +0xe564496a nf_conntrack_tuple_taken net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x8bcb0bca ib_find_cached_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6f2fe3c4 dm_btree_remove_leaves drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xff955b11 mlx4_mr_rereg_mem_cleanup drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x2a8641b1 drm_client_release drivers/gpu/drm/drm EXPORT_SYMBOL +0x4d22b807 drm_mode_equal_no_clocks drivers/gpu/drm/drm EXPORT_SYMBOL +0xbcc074f3 __serpent_decrypt crypto/serpent_generic EXPORT_SYMBOL_GPL +0x91d1fe52 max_session_slots fs/nfs/nfs EXPORT_SYMBOL_GPL +0x543f9379 __tracepoint_kvm_avic_unaccelerated_access arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa77bfd29 register_inet6addr_validator_notifier vmlinux EXPORT_SYMBOL +0xce8a48c0 inet6_getname vmlinux EXPORT_SYMBOL +0xdf95ac04 power_supply_put vmlinux EXPORT_SYMBOL_GPL +0x7cc178a8 sata_pmp_port_ops vmlinux EXPORT_SYMBOL_GPL +0x371dfd29 fscrypt_ioctl_get_policy_ex vmlinux EXPORT_SYMBOL_GPL +0x5bf6dafb sunrpc_init_cache_detail net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x203bb15e xdr_encode_word net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xdeaee7ae auth_domain_find net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x22d966c6 ip_set_range_to_cidr net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x8c40df1c mlx4_find_cached_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xccbc7440 cxgb4_bar2_sge_qregs drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xe3d1da0e drm_mode_config_reset drivers/gpu/drm/drm EXPORT_SYMBOL +0x87bd07bd acpi_smbus_register_callback drivers/acpi/sbshc EXPORT_SYMBOL_GPL +0x09a7ec19 ohci_init_driver vmlinux EXPORT_SYMBOL_GPL +0x4fdc945d sata_deb_timing_normal vmlinux EXPORT_SYMBOL_GPL +0x8d73278e hex_asc_upper vmlinux EXPORT_SYMBOL +0x6a03751f sgl_free_order vmlinux EXPORT_SYMBOL +0xcf8327d9 debugfs_create_file vmlinux EXPORT_SYMBOL_GPL +0x75393a7c new_inode vmlinux EXPORT_SYMBOL +0xe1a104cd rpc_restart_call_prepare net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x89194e4f __uio_register_device drivers/uio/uio EXPORT_SYMBOL_GPL +0x7cbd0d37 qat_crypto_dev_config drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x645b8d72 drm_dev_set_unique drivers/gpu/drm/drm EXPORT_SYMBOL +0x704b918f drm_dp_start_crc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xe0b13336 argv_free vmlinux EXPORT_SYMBOL +0x86f85114 net_dec_egress_queue vmlinux EXPORT_SYMBOL_GPL +0x720f4f86 build_skb_around vmlinux EXPORT_SYMBOL +0x7abf6730 __rtc_register_device vmlinux EXPORT_SYMBOL_GPL +0xf8919a10 xhci_run vmlinux EXPORT_SYMBOL_GPL +0x8112b3d2 scsi_build_sense_buffer vmlinux EXPORT_SYMBOL +0xb15ab250 dma_fence_array_create vmlinux EXPORT_SYMBOL +0x9abcd0d1 platform_find_device_by_driver vmlinux EXPORT_SYMBOL_GPL +0x338bf603 tty_port_close vmlinux EXPORT_SYMBOL +0x65dccf13 xz_dec_end vmlinux EXPORT_SYMBOL +0x6e5b8651 xz_dec_run vmlinux EXPORT_SYMBOL +0x114c6f63 jbd2_journal_clear_err vmlinux EXPORT_SYMBOL +0x3f123442 mlxsw_core_kvd_sizes_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x4a79eb4f drm_gem_fb_create_with_dirty drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL_GPL +0xfe0f2369 ipmi_get_maintenance_mode drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x2e810435 pnfs_read_done_resend_to_mds fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xb86d8552 nfs_refresh_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xcdc6bb5b kvm_mmu_page_fault arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xfed34287 genl_unregister_family vmlinux EXPORT_SYMBOL +0x7b6b4c73 netdev_upper_get_next_dev_rcu vmlinux EXPORT_SYMBOL +0x9d0d6206 unregister_netdevice_notifier vmlinux EXPORT_SYMBOL +0x881e634d nvdimm_namespace_common_probe vmlinux EXPORT_SYMBOL +0x5b73cca1 tty_register_driver vmlinux EXPORT_SYMBOL +0x0fab1ab0 hdmi_spd_infoframe_pack vmlinux EXPORT_SYMBOL +0xd1363cc1 ucs2_strsize vmlinux EXPORT_SYMBOL +0x93c1f1cf wbc_attach_and_unlock_inode vmlinux EXPORT_SYMBOL_GPL +0x5d956125 generic_remap_file_range_prep vmlinux EXPORT_SYMBOL +0x9aa031be padata_stop vmlinux EXPORT_SYMBOL +0xad703657 ceph_auth_destroy_authorizer net/ceph/libceph EXPORT_SYMBOL +0x2b2d6ab5 mlx4_mw_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x81082a0d mlx4_config_dev_retrieval drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x86c7272b iosf_mbi_read arch/x86/platform/intel/iosf_mbi EXPORT_SYMBOL +0x1045076c kset_unregister vmlinux EXPORT_SYMBOL +0x5c5a05ea ndo_dflt_fdb_del vmlinux EXPORT_SYMBOL +0x2562431b driver_remove_file vmlinux EXPORT_SYMBOL_GPL +0xe3db3154 iommu_sva_bind_device vmlinux EXPORT_SYMBOL_GPL +0x8c9e338f acpi_bios_error vmlinux EXPORT_SYMBOL +0x85044b58 debugfs_create_u32 vmlinux EXPORT_SYMBOL_GPL +0xf72ca7f4 core_tpg_get_initiator_node_acl drivers/target/target_core_mod EXPORT_SYMBOL +0xfaebcf7c set_and_calc_slave_port_state drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x62e05a8c drm_fb_helper_ioctl drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x04c62fd7 __memset vmlinux EXPORT_SYMBOL +0xd49e7071 strp_unpause vmlinux EXPORT_SYMBOL_GPL +0xb0f66300 llc_build_and_send_ui_pkt vmlinux EXPORT_SYMBOL +0xab8330cb netdev_rx_handler_register vmlinux EXPORT_SYMBOL_GPL +0x0ef06974 spi_populate_ppr_msg vmlinux EXPORT_SYMBOL_GPL +0x438d8df2 iova_cache_get vmlinux EXPORT_SYMBOL_GPL +0x9ad56d2d __tracepoint_block_split vmlinux EXPORT_SYMBOL_GPL +0xb2b7d3ba jbd2_journal_release_jbd_inode vmlinux EXPORT_SYMBOL +0x2d718735 wbc_account_cgroup_owner vmlinux EXPORT_SYMBOL_GPL +0x6bd1aa56 stack_trace_save vmlinux EXPORT_SYMBOL_GPL +0xe64ad8ea unregister_nmi_handler vmlinux EXPORT_SYMBOL_GPL +0x2449459d vmci_event_subscribe drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x546ca650 kvm_vcpu_gfn_to_pfn_atomic arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe7e665e3 inet_csk_reset_keepalive_timer vmlinux EXPORT_SYMBOL +0x153d49ce efivar_entry_find vmlinux EXPORT_SYMBOL_GPL +0x1b6a0f51 dev_attr_phy_event_threshold vmlinux EXPORT_SYMBOL_GPL +0x2d0b3ab0 dev_vprintk_emit vmlinux EXPORT_SYMBOL +0x7c9a7371 clk_prepare vmlinux EXPORT_SYMBOL_GPL +0xeeab8274 rhashtable_insert_slow vmlinux EXPORT_SYMBOL_GPL +0xe117b9b9 user_read vmlinux EXPORT_SYMBOL_GPL +0xef64cdb9 balloon_page_list_dequeue vmlinux EXPORT_SYMBOL_GPL +0x67e51523 balloon_page_list_enqueue vmlinux EXPORT_SYMBOL_GPL +0x766a0927 mempool_alloc_pages vmlinux EXPORT_SYMBOL +0xb3539efc trace_define_field vmlinux EXPORT_SYMBOL_GPL +0x47939e0d __tasklet_hi_schedule vmlinux EXPORT_SYMBOL +0x25d717b4 read_bytes_from_xdr_buf net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x30ab7780 bcm_phy_set_eee drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x4ccdbdef arp_send vmlinux EXPORT_SYMBOL +0x9a1efd74 xt_register_matches vmlinux EXPORT_SYMBOL +0x89de7830 fcoe_fc_crc vmlinux EXPORT_SYMBOL_GPL +0x9d668786 sata_sff_hardreset vmlinux EXPORT_SYMBOL_GPL +0x3d02cd70 dma_fence_signal_locked vmlinux EXPORT_SYMBOL +0x12256f67 unregister_acpi_bus_type vmlinux EXPORT_SYMBOL_GPL +0xbb0540aa zlib_inflateReset vmlinux EXPORT_SYMBOL +0x281823c5 __kfifo_out_peek vmlinux EXPORT_SYMBOL +0xc64416e5 security_sctp_assoc_request vmlinux EXPORT_SYMBOL +0x6c21487a cdev_del vmlinux EXPORT_SYMBOL +0x377bbcbc pm_suspend_target_state vmlinux EXPORT_SYMBOL_GPL +0x657850a1 ib_copy_qp_attr_to_user drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x37270f40 ib_copy_ah_attr_to_user drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x8ad3d11b drm_gem_dumb_destroy drivers/gpu/drm/drm EXPORT_SYMBOL +0x40d7c309 drm_atomic_helper_check_plane_damage drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x61962bcb pneigh_enqueue vmlinux EXPORT_SYMBOL +0xde9d417f dev_alloc_name vmlinux EXPORT_SYMBOL +0xfe88b300 raid_component_add vmlinux EXPORT_SYMBOL +0x142d89e7 soft_cursor vmlinux EXPORT_SYMBOL +0xd0d156e9 __rht_bucket_nested vmlinux EXPORT_SYMBOL_GPL +0x7bddc0f6 async_xor vmlinux EXPORT_SYMBOL_GPL +0x0f3a6431 dax_layout_busy_page vmlinux EXPORT_SYMBOL_GPL +0x73cbd67a fsnotify_init_mark vmlinux EXPORT_SYMBOL_GPL +0x5f4a6e61 dm_rh_dec drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xe781f874 dm_tm_dec drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x71949133 drm_gem_fb_create drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL_GPL +0xf0973d90 kvm_emulate_halt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x1b0de195 ping_seq_stop vmlinux EXPORT_SYMBOL_GPL +0x66f6d003 tcp_add_backlog vmlinux EXPORT_SYMBOL +0x2c3054f9 net_inc_ingress_queue vmlinux EXPORT_SYMBOL_GPL +0x276a792e vga_get vmlinux EXPORT_SYMBOL +0xe7c0a438 mipi_dsi_dcs_get_display_brightness vmlinux EXPORT_SYMBOL +0xce8be6f1 mipi_dsi_dcs_set_display_brightness vmlinux EXPORT_SYMBOL +0x6a1733eb iommu_group_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x0b5e934c __clk_determine_rate vmlinux EXPORT_SYMBOL_GPL +0x9fbfebab erst_write vmlinux EXPORT_SYMBOL_GPL +0xd53b5e1d security_dentry_init_security vmlinux EXPORT_SYMBOL +0x2d688eef generic_fh_to_dentry vmlinux EXPORT_SYMBOL_GPL +0xc51296f8 filemap_fdatawait_keep_errors vmlinux EXPORT_SYMBOL +0xbdfb7e86 try_to_del_timer_sync vmlinux EXPORT_SYMBOL +0x408d2a04 play_idle vmlinux EXPORT_SYMBOL_GPL +0xddffbe00 svc_addsock net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x17a00bb1 ipcomp_output net/xfrm/xfrm_ipcomp EXPORT_SYMBOL_GPL +0x49c328e4 nf_ct_expect_related_report net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x9698e787 usb_stor_bulk_transfer_buf drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x43dc757f mlx5_query_port_prio_tc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x2c9eb7f1 nfs_statfs fs/nfs/nfs EXPORT_SYMBOL_GPL +0x5ef50678 qdisc_offload_dump_helper vmlinux EXPORT_SYMBOL +0xdb98e87e devlink_traps_unregister vmlinux EXPORT_SYMBOL_GPL +0xea5f8763 dev_pm_opp_add vmlinux EXPORT_SYMBOL_GPL +0x9ee733eb dm_unregister_target vmlinux EXPORT_SYMBOL +0xf3f685c7 tty_init_termios vmlinux EXPORT_SYMBOL_GPL +0xfd3a341a pci_enable_device_io vmlinux EXPORT_SYMBOL +0x2364f075 alloc_pages_vma vmlinux EXPORT_SYMBOL +0x0bf88598 __clocksource_register_scale vmlinux EXPORT_SYMBOL_GPL +0x7807f0f8 schedule_timeout_idle vmlinux EXPORT_SYMBOL +0xfe487975 init_wait_entry vmlinux EXPORT_SYMBOL +0xf6230e49 fpregs_mark_activate vmlinux EXPORT_SYMBOL_GPL +0x74725e69 ZSTD_compressContinue lib/zstd/zstd_compress EXPORT_SYMBOL +0xda67fe6e ceph_monc_get_version_async net/ceph/libceph EXPORT_SYMBOL +0x480d9045 cxgb4_clip_get drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x6551b8d4 cxgbi_cleanup_task drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x772a09a7 nvmf_unregister_transport drivers/nvme/host/nvme-fabrics EXPORT_SYMBOL_GPL +0xcbf8b202 tcp_get_md5sig_pool vmlinux EXPORT_SYMBOL +0x2e77d9cf dm_mq_kick_requeue_list vmlinux EXPORT_SYMBOL +0xe1775ee7 dm_get_reserved_bio_based_ios vmlinux EXPORT_SYMBOL_GPL +0x23de9b5f ata_sas_port_destroy vmlinux EXPORT_SYMBOL_GPL +0x78efb543 ata_sas_port_suspend vmlinux EXPORT_SYMBOL_GPL +0x3145216f pci_dev_present vmlinux EXPORT_SYMBOL +0x0aa5e9ae pci_dev_put vmlinux EXPORT_SYMBOL +0x83a5c98f pci_dev_get vmlinux EXPORT_SYMBOL +0x2b28c1ac scsi_cmd_blk_ioctl vmlinux EXPORT_SYMBOL +0x687c5859 read_dev_sector vmlinux EXPORT_SYMBOL +0xcd747b95 jbd2_journal_revoke vmlinux EXPORT_SYMBOL +0xc8476b89 config_group_find_item vmlinux EXPORT_SYMBOL +0x0d6cdb99 ceph_monc_do_statfs net/ceph/libceph EXPORT_SYMBOL +0xf9e528d7 ib_device_get_by_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe6ee72be drm_atomic_helper_disable_plane drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x5609ce41 cast_s2 crypto/cast_common EXPORT_SYMBOL_GPL +0xe01c5cd0 usb_ifnum_to_if vmlinux EXPORT_SYMBOL_GPL +0xde34673a nvme_change_ctrl_state vmlinux EXPORT_SYMBOL_GPL +0x4de995ec gen_pool_dma_alloc_algo vmlinux EXPORT_SYMBOL +0x890fa0fa btree_get_prev vmlinux EXPORT_SYMBOL_GPL +0xfba7ddd2 match_u64 vmlinux EXPORT_SYMBOL +0x462ca647 af_alg_alloc_areq vmlinux EXPORT_SYMBOL_GPL +0xf91ff119 security_old_inode_init_security vmlinux EXPORT_SYMBOL +0x8579fb5d debugfs_create_x16 vmlinux EXPORT_SYMBOL_GPL +0x73c9e389 fsnotify_put_group vmlinux EXPORT_SYMBOL_GPL +0x08690bbf __tracepoint_kmem_cache_alloc_node vmlinux EXPORT_SYMBOL +0xd81de62c ring_buffer_record_enable vmlinux EXPORT_SYMBOL_GPL +0x5170e7f3 nft_meta_policy net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x3d2261f8 ib_mr_pool_get drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xce171f12 cdrom_mode_sense drivers/cdrom/cdrom EXPORT_SYMBOL +0x061651be strcat vmlinux EXPORT_SYMBOL +0x6e48da59 inet6_destroy_sock vmlinux EXPORT_SYMBOL_GPL +0xbbde6e1f md_find_rdev_rcu vmlinux EXPORT_SYMBOL_GPL +0xef3d5892 phy_device_create vmlinux EXPORT_SYMBOL +0x4b6939eb iscsi_tcp_cleanup_task vmlinux EXPORT_SYMBOL_GPL +0x90a54bb9 nvdimm_kobj vmlinux EXPORT_SYMBOL_GPL +0x012e730e apei_exec_noop vmlinux EXPORT_SYMBOL_GPL +0x08162c74 free_bucket_spinlocks vmlinux EXPORT_SYMBOL +0x97ca51da af_alg_unregister_type vmlinux EXPORT_SYMBOL_GPL +0xdec0265f qtree_delete_dquot vmlinux EXPORT_SYMBOL +0x6f915a45 dqstats vmlinux EXPORT_SYMBOL +0xc96b2065 __online_page_set_limits vmlinux EXPORT_SYMBOL_GPL +0x3812050a _raw_spin_unlock_irqrestore vmlinux EXPORT_SYMBOL +0xb865c2bd rdma_set_ack_timeout drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xc3ebc9e0 ib_query_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x28cff34c xts_serpent_setkey arch/x86/crypto/serpent-avx-x86_64 EXPORT_SYMBOL_GPL +0x50873741 xt_compat_init_offsets vmlinux EXPORT_SYMBOL +0x378247c9 tcf_classify vmlinux EXPORT_SYMBOL +0x805667cf devlink_fmsg_u64_put vmlinux EXPORT_SYMBOL_GPL +0x09ccb326 iscsi_conn_send_pdu vmlinux EXPORT_SYMBOL_GPL +0x1d58756a dma_buf_vunmap vmlinux EXPORT_SYMBOL_GPL +0x4a334b50 dma_buf_kunmap vmlinux EXPORT_SYMBOL_GPL +0xbc7168a4 crypto_register_shash vmlinux EXPORT_SYMBOL_GPL +0xc4df4be3 jbd2_journal_invalidatepage vmlinux EXPORT_SYMBOL +0xac285daf jbd2_journal_get_write_access vmlinux EXPORT_SYMBOL +0xa3d99c52 gre_add_protocol net/ipv4/gre EXPORT_SYMBOL_GPL +0xd831a1a2 ip_vs_proto_name net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x3cef417e i2c_transfer_buffer_flags drivers/i2c/i2c-core EXPORT_SYMBOL +0x6fbc6a00 radix_tree_insert vmlinux EXPORT_SYMBOL +0x0b39a258 inet6_unregister_protosw vmlinux EXPORT_SYMBOL +0x6ed70895 xfrm_register_type_offload vmlinux EXPORT_SYMBOL +0x900a4ccd pfifo_qdisc_ops vmlinux EXPORT_SYMBOL +0x7880c781 dm_kcopyd_prepare_callback vmlinux EXPORT_SYMBOL +0x148d10d5 fc_fabric_login vmlinux EXPORT_SYMBOL +0xb4cf8c2e vfs_listxattr vmlinux EXPORT_SYMBOL_GPL +0x6386aec3 kern_mount vmlinux EXPORT_SYMBOL_GPL +0x43bd1a49 __check_sticky vmlinux EXPORT_SYMBOL +0x09fef996 find_get_pages_contig vmlinux EXPORT_SYMBOL +0x133969d7 __trace_printk vmlinux EXPORT_SYMBOL_GPL +0x9cc03686 ib_dispatch_event drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x41616ed9 __ib_alloc_xrcd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x611a0bb8 target_depend_item drivers/target/target_core_mod EXPORT_SYMBOL +0xfd79382b cxgb4_create_server drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xe70d1703 drm_agp_info drivers/gpu/drm/drm EXPORT_SYMBOL +0x7eaec87a ipv6_dev_mc_dec vmlinux EXPORT_SYMBOL +0x1c0a210d ipv6_dev_mc_inc vmlinux EXPORT_SYMBOL +0x66f43a40 xfrm_state_walk_done vmlinux EXPORT_SYMBOL +0xc3be1db5 qdisc_create_dflt vmlinux EXPORT_SYMBOL +0x0907d93d flow_rule_match_ipv6_addrs vmlinux EXPORT_SYMBOL +0xb9f10376 flow_rule_match_ipv4_addrs vmlinux EXPORT_SYMBOL +0xabe38528 uart_add_one_port vmlinux EXPORT_SYMBOL +0x8a3a22fc clk_divider_ops vmlinux EXPORT_SYMBOL_GPL +0x16301b34 wrmsrl_on_cpu vmlinux EXPORT_SYMBOL +0x75e60613 key_put vmlinux EXPORT_SYMBOL +0xd1d018c7 bmap vmlinux EXPORT_SYMBOL +0x3f965a2b bdi_register_owner vmlinux EXPORT_SYMBOL +0x66b4cc41 kmemdup vmlinux EXPORT_SYMBOL +0x1c44b902 __blocking_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x096a7e6f x86_spec_ctrl_base vmlinux EXPORT_SYMBOL_GPL +0xe55154a8 _copy_from_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x9e98460e dm_bitset_empty drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb70b342a dm_bio_prison_destroy drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xb26bf25d drm_mode_set_crtcinfo drivers/gpu/drm/drm EXPORT_SYMBOL +0x6661bd33 drm_mode_vrefresh drivers/gpu/drm/drm EXPORT_SYMBOL +0xc6946c62 __drm_atomic_helper_plane_reset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa99b39c2 prandom_bytes vmlinux EXPORT_SYMBOL +0x1e57aab7 generic_parse_monolithic vmlinux EXPORT_SYMBOL +0xc6b10427 ex_handler_fprestore vmlinux EXPORT_SYMBOL_GPL +0xf6e772c3 irq_bypass_unregister_producer virt/lib/irqbypass EXPORT_SYMBOL_GPL +0x2833f577 ZSTD_compressBegin_advanced lib/zstd/zstd_compress EXPORT_SYMBOL +0x68d0a08e nf_conntrack_alloc net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xc96d2f4d ib_create_rwq_ind_table drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xb6372724 mlx4_bf_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6181e79f timerqueue_add vmlinux EXPORT_SYMBOL_GPL +0x205afa2b tcp_gro_complete vmlinux EXPORT_SYMBOL +0xf551bc03 tcp_register_congestion_control vmlinux EXPORT_SYMBOL_GPL +0x17fd3a5f tcp_release_cb vmlinux EXPORT_SYMBOL +0x7269ef40 devlink_flash_update_status_notify vmlinux EXPORT_SYMBOL_GPL +0x9f0ba696 skb_gso_validate_network_len vmlinux EXPORT_SYMBOL_GPL +0x87139661 iscsi_host_free vmlinux EXPORT_SYMBOL_GPL +0xc9fa54de dma_buf_fd vmlinux EXPORT_SYMBOL_GPL +0xb8589da9 __dax_driver_register vmlinux EXPORT_SYMBOL_GPL +0x4d974b9c register_sysrq_key vmlinux EXPORT_SYMBOL +0xd753df8a kstrdup_quotable_file vmlinux EXPORT_SYMBOL_GPL +0x1dfdd782 refcount_dec_and_mutex_lock vmlinux EXPORT_SYMBOL +0x7756398f sync_filesystem vmlinux EXPORT_SYMBOL +0x02359f05 iscsit_immediate_queue drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xbaf18278 dm_bio_prison_free_cell drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0xd80d18a8 pnfs_generic_pg_check_layout fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x1eda497d __tracepoint_nfs_fsync_enter fs/nfs/nfs EXPORT_SYMBOL_GPL +0x997ed302 tcp_connect vmlinux EXPORT_SYMBOL +0x3ba8475e qdisc_watchdog_init_clockid vmlinux EXPORT_SYMBOL +0x2c02f7a4 neigh_table_clear vmlinux EXPORT_SYMBOL +0x06b53bd2 memalloc_socks_key vmlinux EXPORT_SYMBOL_GPL +0x8aa85e6d devfreq_interval_update vmlinux EXPORT_SYMBOL +0x45fe33d5 thermal_zone_bind_cooling_device vmlinux EXPORT_SYMBOL_GPL +0xde50f6bf device_match_fwnode vmlinux EXPORT_SYMBOL_GPL +0x2396c7f0 clk_set_parent vmlinux EXPORT_SYMBOL_GPL +0x63150e06 clk_get_parent vmlinux EXPORT_SYMBOL_GPL +0xf95322f4 kthread_parkme vmlinux EXPORT_SYMBOL_GPL +0x627f576f __cpuhp_remove_state vmlinux EXPORT_SYMBOL +0x78f2b88c wpan_phy_for_each net/ieee802154/ieee802154 EXPORT_SYMBOL +0xb7f73ef8 xas_init_marks vmlinux EXPORT_SYMBOL_GPL +0x37a02412 xfrm_aalg_get_byname vmlinux EXPORT_SYMBOL_GPL +0x11c9b3bf xfrm_state_flush vmlinux EXPORT_SYMBOL +0x9b896724 devlink_param_value_str_fill vmlinux EXPORT_SYMBOL_GPL +0xf1356bbb reserve_iova vmlinux EXPORT_SYMBOL_GPL +0x679fa519 pci_fixup_device vmlinux EXPORT_SYMBOL +0x9a3b2126 blk_mq_request_started vmlinux EXPORT_SYMBOL_GPL +0xd3ab4aa6 fat_flush_inodes vmlinux EXPORT_SYMBOL_GPL +0x6e7aa266 __dquot_alloc_space vmlinux EXPORT_SYMBOL +0x23bb4125 stream_open vmlinux EXPORT_SYMBOL +0x16fd08c4 virtio_transport_notify_recv_post_dequeue net/vmw_vsock/vmw_vsock_virtio_transport_common EXPORT_SYMBOL_GPL +0x98d285d6 drm_atomic_helper_commit_modeset_enables drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x6699d9d5 nfs_wait_on_request fs/nfs/nfs EXPORT_SYMBOL_GPL +0x8cec98ce xfrm_find_acq_byseq vmlinux EXPORT_SYMBOL +0xd06d1e6c xfrm_policy_unregister_afinfo vmlinux EXPORT_SYMBOL +0xc0cdbf1c reuseport_detach_sock vmlinux EXPORT_SYMBOL +0xc10a214c skb_zerocopy_iter_stream vmlinux EXPORT_SYMBOL_GPL +0xf08c67de napi_alloc_frag vmlinux EXPORT_SYMBOL +0x3819e783 governor_sysfs_ops vmlinux EXPORT_SYMBOL_GPL +0x6b23898e __vfs_removexattr_locked vmlinux EXPORT_SYMBOL_GPL +0x054496b4 schedule_timeout_interruptible vmlinux EXPORT_SYMBOL +0x9675424d rdma_set_ib_path drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xe49d1e3b secpath_set vmlinux EXPORT_SYMBOL +0x9a144176 dma_buf_detach vmlinux EXPORT_SYMBOL_GPL +0x9bbc86f4 dma_buf_attach vmlinux EXPORT_SYMBOL_GPL +0x55d5e44c fwnode_create_software_node vmlinux EXPORT_SYMBOL_GPL +0x350898ca devm_kasprintf vmlinux EXPORT_SYMBOL_GPL +0xd318b98f iommu_set_fault_handler vmlinux EXPORT_SYMBOL_GPL +0xdac4913a bitmap_allocate_region vmlinux EXPORT_SYMBOL +0x65e0d6d7 memory_read_from_buffer vmlinux EXPORT_SYMBOL +0xb0c00641 alarmtimer_get_rtcdev vmlinux EXPORT_SYMBOL_GPL +0x5dffb495 ZSTD_decompress_usingDDict lib/zstd/zstd_decompress EXPORT_SYMBOL +0x1a107de2 ZSTD_compressCCtx lib/zstd/zstd_compress EXPORT_SYMBOL +0x44bc9c28 xprt_force_disconnect net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x1062dbaf nft_meta_set_dump net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xc162b1fa nft_meta_get_dump net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x21ec343c ib_send_cm_rej drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x3baec449 ib_send_cm_rtu drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xc63266fc ib_send_cm_rep drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x91a66c27 ib_send_cm_req drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xe9045d32 rdma_rw_ctx_destroy drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf3a5e140 mlx5_query_nic_vport_mtu drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf5eea145 mlx5_cmd_cleanup drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xaafd4acc max_session_cb_slots fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb01bebf9 xfrm_get_acqseq vmlinux EXPORT_SYMBOL +0xb3d2c76d scsi_hostbyte_string vmlinux EXPORT_SYMBOL +0x9e799792 __scsi_iterate_devices vmlinux EXPORT_SYMBOL +0x14605535 dma_fence_context_alloc vmlinux EXPORT_SYMBOL +0x4f0f591b mipi_dsi_dcs_set_pixel_format vmlinux EXPORT_SYMBOL +0xfd7b08fa mipi_dsi_dcs_get_pixel_format vmlinux EXPORT_SYMBOL +0x7ad050b9 qid_lt vmlinux EXPORT_SYMBOL +0x0fa9cfef svc_reserve net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2409eb4f ip6_tnl_get_cap net/ipv6/ip6_tunnel EXPORT_SYMBOL +0xa707769f mlx5_eswitch_get_total_vports drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x31ddef7a twofish_enc_blk_ctr_3way arch/x86/crypto/twofish-x86_64-3way EXPORT_SYMBOL_GPL +0x92a51c43 twofish_dec_blk_cbc_3way arch/x86/crypto/twofish-x86_64-3way EXPORT_SYMBOL_GPL +0xa853396b xa_extract vmlinux EXPORT_SYMBOL +0xd6ad0ca5 qdisc_put vmlinux EXPORT_SYMBOL +0x16371f61 eth_gro_receive vmlinux EXPORT_SYMBOL +0x901c6c99 __tracepoint_neigh_update_done vmlinux EXPORT_SYMBOL_GPL +0xd9de7caa genphy_c45_restart_aneg vmlinux EXPORT_SYMBOL_GPL +0x4e4f0f16 dma_fence_chain_find_seqno vmlinux EXPORT_SYMBOL +0x64218c07 platform_bus vmlinux EXPORT_SYMBOL_GPL +0xbb09a75c blk_queue_bounce_limit vmlinux EXPORT_SYMBOL +0x55339365 flush_delayed_fput vmlinux EXPORT_SYMBOL_GPL +0xccf396a3 x86_perf_get_lbr vmlinux EXPORT_SYMBOL_GPL +0x604b0c3a vnic_dev_register drivers/net/ethernet/cisco/enic/enic EXPORT_SYMBOL +0xf0da4df6 drm_atomic_helper_crtc_reset drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xe92ca535 acpi_video_set_dmi_backlight_type drivers/acpi/video EXPORT_SYMBOL +0x65bb54d7 ip6mr_rule_default vmlinux EXPORT_SYMBOL +0xb58a29fb netpoll_setup vmlinux EXPORT_SYMBOL +0x0b7fa67e skb_abort_seq_read vmlinux EXPORT_SYMBOL +0x51fe059e iscsi_host_for_each_session vmlinux EXPORT_SYMBOL_GPL +0xcece1dcd iscsi_create_flashnode_sess vmlinux EXPORT_SYMBOL_GPL +0x10d9f885 scsi_sense_desc_find vmlinux EXPORT_SYMBOL +0x72b9d287 default_grn vmlinux EXPORT_SYMBOL +0xef7cba3d devm_clk_get_optional vmlinux EXPORT_SYMBOL +0x5857b225 ioread16_rep vmlinux EXPORT_SYMBOL +0xb9897e95 blk_queue_split vmlinux EXPORT_SYMBOL +0x1cc59d2f remove_resource vmlinux EXPORT_SYMBOL_GPL +0xbe3879aa ceph_get_snap_context net/ceph/libceph EXPORT_SYMBOL +0x1cb605ea iscsit_build_r2ts_for_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x46b55488 sbc_dif_copy_prot drivers/target/target_core_mod EXPORT_SYMBOL +0xe7a54eb7 target_complete_cmd_with_length drivers/target/target_core_mod EXPORT_SYMBOL +0x2f2aae00 mlx5_fs_remove_rx_underlay_qpn drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x6d8b5777 drm_property_create_range drivers/gpu/drm/drm EXPORT_SYMBOL +0x9cf56bcc inet6_ioctl vmlinux EXPORT_SYMBOL +0x6c77390e tcp_getsockopt vmlinux EXPORT_SYMBOL +0xa7a7cedc tcp_setsockopt vmlinux EXPORT_SYMBOL +0x087bb6ea rtnl_unicast vmlinux EXPORT_SYMBOL +0x4578a09c refresh_frequency_limits vmlinux EXPORT_SYMBOL +0x72d9b690 pnpacpi_protocol vmlinux EXPORT_SYMBOL +0xef464c28 getboottime64 vmlinux EXPORT_SYMBOL_GPL +0xfdd48e80 dequeue_signal vmlinux EXPORT_SYMBOL_GPL +0xce0a41a3 bcm54xx_auxctl_read drivers/net/phy/bcm-phy-lib EXPORT_SYMBOL_GPL +0x84106f36 devlink_trap_ctx_priv vmlinux EXPORT_SYMBOL_GPL +0xfbec3949 agp_generic_destroy_page vmlinux EXPORT_SYMBOL +0x3a452620 acpi_get_hp_hw_control_from_firmware vmlinux EXPORT_SYMBOL +0xbcd687c8 crypto_ablkcipher_type vmlinux EXPORT_SYMBOL_GPL +0x62bb09bf clocks_calc_mult_shift vmlinux EXPORT_SYMBOL_GPL +0x6ebf3669 ip6t_register_table net/ipv6/netfilter/ip6_tables EXPORT_SYMBOL +0xb7852a05 ib_ud_header_init drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x55650922 mlx4_qp_alloc drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x95f327d8 pnfs_set_lo_fail fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0xa4a4235a flow_indr_del_block_cb vmlinux EXPORT_SYMBOL_GPL +0x815f1496 flow_rule_match_mpls vmlinux EXPORT_SYMBOL +0x1cf5f973 flow_rule_match_icmp vmlinux EXPORT_SYMBOL +0x820f324d flow_rule_match_meta vmlinux EXPORT_SYMBOL +0x4a8e3d34 try_test_sas_gpio_gp_bit vmlinux EXPORT_SYMBOL +0x9384cd49 ata_tf_from_fis vmlinux EXPORT_SYMBOL_GPL +0xba0aec6b nvme_setup_cmd vmlinux EXPORT_SYMBOL_GPL +0x65d419ec __nla_reserve_64bit vmlinux EXPORT_SYMBOL +0x9ed72f6b fget_raw vmlinux EXPORT_SYMBOL +0xd3752c27 atomic_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x0a49799b drm_writeback_prepare_job drivers/gpu/drm/drm EXPORT_SYMBOL +0xe9652526 backlight_device_unregister drivers/video/backlight/backlight EXPORT_SYMBOL +0x3587ab77 devfreq_resume_device vmlinux EXPORT_SYMBOL +0x64b62862 nvme_wq vmlinux EXPORT_SYMBOL_GPL +0xb3940dc1 pci_reset_function_locked vmlinux EXPORT_SYMBOL_GPL +0xa92a9722 pinctrl_add_gpio_ranges vmlinux EXPORT_SYMBOL_GPL +0xe2123cf8 phy_pm_runtime_forbid vmlinux EXPORT_SYMBOL_GPL +0x05118d82 nft_fib_dump net/netfilter/nft_fib EXPORT_SYMBOL_GPL +0x18c61caf iscsit_set_unsolicited_dataout drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0xee659ed5 mlx5_lag_is_sriov drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xc0041f00 mlx5_query_port_admin_status drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xec1c2a90 ipmi_get_my_address drivers/char/ipmi/ipmi_msghandler EXPORT_SYMBOL +0x6a0c3847 __mlog_printk fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0x438130e1 nfs_path fs/nfs/nfs EXPORT_SYMBOL_GPL +0x6b6f9c68 ip6_push_pending_frames vmlinux EXPORT_SYMBOL_GPL +0x430a15d0 dev_change_net_namespace vmlinux EXPORT_SYMBOL_GPL +0x2d660f45 sas_eh_target_reset_handler vmlinux EXPORT_SYMBOL_GPL +0x74ceefdf ahci_dev_classify vmlinux EXPORT_SYMBOL_GPL +0x3776be90 amd_iommu_rlookup_table vmlinux EXPORT_SYMBOL +0xd6bbf023 pci_rescan_bus vmlinux EXPORT_SYMBOL_GPL +0x7d9be57d blk_queue_rq_timeout vmlinux EXPORT_SYMBOL_GPL +0x66791d40 blk_queue_flag_clear vmlinux EXPORT_SYMBOL +0xe43ff2b1 cache_seq_stop_rcu net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x66b14a5b xdr_init_decode_pages net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6698ad6a usb_stor_pre_reset drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0x83a5d0b5 mlx5_accel_esp_create_xfrm drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xb1cd2569 cxgb4_port_e2cchan drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xb4895436 __tracepoint_nfs4_pnfs_write fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x7b54a74f nfs_mknod fs/nfs/nfs EXPORT_SYMBOL_GPL +0x9ce40163 kvm_lapic_reg_read arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xbee64bc8 tcp_twsk_destructor vmlinux EXPORT_SYMBOL_GPL +0xd4e900d1 xt_compat_target_offset vmlinux EXPORT_SYMBOL_GPL +0x5282e5d3 scm_detach_fds vmlinux EXPORT_SYMBOL +0xde595274 md_write_end vmlinux EXPORT_SYMBOL +0xe4c2c66c rtc_ktime_to_tm vmlinux EXPORT_SYMBOL_GPL +0xd63f353c scsi_ioctl_block_when_processing_errors vmlinux EXPORT_SYMBOL_GPL +0x4dd9d14d cn_netlink_send_mult vmlinux EXPORT_SYMBOL_GPL +0x4d34d223 put_pid vmlinux EXPORT_SYMBOL_GPL +0xeac73847 irq_regs vmlinux EXPORT_SYMBOL +0x21b0917f xdr_buf_read_mic net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x677e1052 adf_cfg_section_add drivers/crypto/qat/qat_common/intel_qat EXPORT_SYMBOL_GPL +0x8f7226a7 __efivar_entry_delete vmlinux EXPORT_SYMBOL_GPL +0xd24bc143 dev_pm_domain_attach vmlinux EXPORT_SYMBOL_GPL +0xd784cb50 fwnode_device_is_available vmlinux EXPORT_SYMBOL_GPL +0x5a327d0e driver_find_device vmlinux EXPORT_SYMBOL_GPL +0x4155413d pci_user_read_config_byte vmlinux EXPORT_SYMBOL_GPL +0x3a2d1dfa rdmsr_safe_regs_on_cpu vmlinux EXPORT_SYMBOL +0x53e2a410 __inode_attach_wb vmlinux EXPORT_SYMBOL_GPL +0xc7e39bca ring_buffer_dropped_events_cpu vmlinux EXPORT_SYMBOL_GPL +0xb410327b posix_clock_register vmlinux EXPORT_SYMBOL_GPL +0xd21b61bd async_schedule_node_domain vmlinux EXPORT_SYMBOL_GPL +0x275e30fa drm_mode_plane_set_obj_prop drivers/gpu/drm/drm EXPORT_SYMBOL +0xef5de143 cryptd_aead_queued crypto/cryptd EXPORT_SYMBOL_GPL +0x68a90b51 get_default_font vmlinux EXPORT_SYMBOL +0x36719d25 blk_get_request vmlinux EXPORT_SYMBOL +0x13d3b6de __set_page_dirty vmlinux EXPORT_SYMBOL_GPL +0x8748fb66 vmalloc_user_node_flags vmlinux EXPORT_SYMBOL +0xb74210c5 bpf_prog_free vmlinux EXPORT_SYMBOL_GPL +0x00d1eea5 drm_connector_list_iter_end drivers/gpu/drm/drm EXPORT_SYMBOL +0x61c377f4 drm_set_preferred_mode drivers/gpu/drm/drm EXPORT_SYMBOL +0x1526b301 unix_tot_inflight vmlinux EXPORT_SYMBOL +0x00c80741 xfrm_ealg_get_byid vmlinux EXPORT_SYMBOL_GPL +0xb74e6106 device_pm_wait_for_dev vmlinux EXPORT_SYMBOL_GPL +0x4055a920 acpi_remove_fixed_event_handler vmlinux EXPORT_SYMBOL +0xde3a3108 bio_uninit vmlinux EXPORT_SYMBOL +0x56054c05 crypto_it_tab vmlinux EXPORT_SYMBOL_GPL +0x1a10c32b crypto_ft_tab vmlinux EXPORT_SYMBOL_GPL +0x5a44f8cb __crypto_memneq vmlinux EXPORT_SYMBOL +0x6d6fec1f ktime_mono_to_any vmlinux EXPORT_SYMBOL_GPL +0x92e683f5 down_timeout vmlinux EXPORT_SYMBOL +0x47960bc4 proc_do_large_bitmap vmlinux EXPORT_SYMBOL +0xf933069d xprt_update_rtt net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3ec86eb6 nfnetlink_has_listeners net/netfilter/nfnetlink EXPORT_SYMBOL_GPL +0xceac50e0 rdma_user_mmap_io drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xb19bac86 __fscache_attr_changed fs/fscache/fscache EXPORT_SYMBOL +0x86c43a8c cper_estatus_check vmlinux EXPORT_SYMBOL_GPL +0xe0d1e19c ahci_ops vmlinux EXPORT_SYMBOL_GPL +0xf80db9a7 ata_wait_after_reset vmlinux EXPORT_SYMBOL_GPL +0x740aa1b9 devm_platform_ioremap_resource vmlinux EXPORT_SYMBOL_GPL +0xfb11aa9b mipi_dsi_dcs_write vmlinux EXPORT_SYMBOL +0x1fca0b38 housekeeping_overridden vmlinux EXPORT_SYMBOL_GPL +0x3d7a39e4 ip_tunnel_encap_del_ops net/ipv4/ip_tunnel EXPORT_SYMBOL +0x1fd4782d vmci_qpair_get_produce_indexes drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0x90a5530f nfsiod_workqueue fs/nfs/nfs EXPORT_SYMBOL_GPL +0xce53f5b8 iommu_dev_has_feature vmlinux EXPORT_SYMBOL_GPL +0xc924ac93 misc_deregister vmlinux EXPORT_SYMBOL +0x74457e56 apei_resources_fini vmlinux EXPORT_SYMBOL_GPL +0x2af73d74 acpi_match_device_ids vmlinux EXPORT_SYMBOL +0x3f0eabd2 xxh64_update vmlinux EXPORT_SYMBOL +0x45535485 xxh32_update vmlinux EXPORT_SYMBOL +0x7b2ad7c9 blkdev_fsync vmlinux EXPORT_SYMBOL +0x95d5f459 lookup_one_len vmlinux EXPORT_SYMBOL +0x7959d532 relay_buf_full vmlinux EXPORT_SYMBOL_GPL +0x32ab06cc irq_percpu_is_enabled vmlinux EXPORT_SYMBOL_GPL +0x782adb74 hpet_rtc_interrupt vmlinux EXPORT_SYMBOL_GPL +0x00fe0c7b flow_offload_lookup net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0xd0d4125e ib_umem_find_best_pgsz drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0xf245c5ba uverbs_get_flags64 drivers/infiniband/core/ib_uverbs EXPORT_SYMBOL +0x302525de edac_pci_create_generic_ctl drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x8fa25c24 xa_find vmlinux EXPORT_SYMBOL +0x47c65bfc unregister_inet6addr_validator_notifier vmlinux EXPORT_SYMBOL +0x4f83b4f5 inet6_bind vmlinux EXPORT_SYMBOL +0xf68285c0 register_inetaddr_notifier vmlinux EXPORT_SYMBOL +0x33de0a26 skb_headers_offset_update vmlinux EXPORT_SYMBOL +0x44858bbe sas_port_get_phy vmlinux EXPORT_SYMBOL +0x98b627c3 acpi_device_update_power vmlinux EXPORT_SYMBOL_GPL +0x4351577a fb_parse_edid vmlinux EXPORT_SYMBOL +0x3642be41 fscrypt_ioctl_add_key vmlinux EXPORT_SYMBOL_GPL +0x7e0826e2 atomic_dec_and_mutex_lock vmlinux EXPORT_SYMBOL +0x2a895cad kthread_delayed_work_timer_fn vmlinux EXPORT_SYMBOL +0xd90cb249 ZSTD_getBlockSizeMax lib/zstd/zstd_compress EXPORT_SYMBOL +0xd7cdce3b mlx5_query_port_pfc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x74def688 devres_get vmlinux EXPORT_SYMBOL_GPL +0x8f5ea4e2 devres_add vmlinux EXPORT_SYMBOL_GPL +0x76cd2bbb device_link_remove vmlinux EXPORT_SYMBOL_GPL +0xba400916 serial8250_set_defaults vmlinux EXPORT_SYMBOL_GPL +0x464d325f af_alg_sendpage vmlinux EXPORT_SYMBOL_GPL +0x6fdaa4cd fscrypt_free_inode vmlinux EXPORT_SYMBOL +0x917836ee inode_set_flags vmlinux EXPORT_SYMBOL +0x14448cea dma_mmap_attrs vmlinux EXPORT_SYMBOL +0x95b13ff8 prot_sock_flag vmlinux EXPORT_SYMBOL +0x01d4b9ac mlx5_modify_port_ets_rate_limit drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xf836c328 drm_bridge_enable drivers/gpu/drm/drm EXPORT_SYMBOL +0xd27fd4c4 nfs_free_client fs/nfs/nfs EXPORT_SYMBOL_GPL +0xc9b43888 ip_frag_init vmlinux EXPORT_SYMBOL +0xadc5ab9e fc_host_post_vendor_event vmlinux EXPORT_SYMBOL +0x3f84bcd7 dax_alive vmlinux EXPORT_SYMBOL_GPL +0x00d4dc7f clk_fixed_rate_ops vmlinux EXPORT_SYMBOL_GPL +0xb1d9a42e pci_set_mwi vmlinux EXPORT_SYMBOL +0xa56b732f sync_inodes_sb vmlinux EXPORT_SYMBOL +0x6091b333 unregister_chrdev_region vmlinux EXPORT_SYMBOL +0xac01cf00 generic_writepages vmlinux EXPORT_SYMBOL +0x15c85de3 mempool_init vmlinux EXPORT_SYMBOL +0x38e46431 mempool_exit vmlinux EXPORT_SYMBOL +0x430a5560 ceph_monc_validate_auth net/ceph/libceph EXPORT_SYMBOL +0x2b747ede vhost_vq_access_ok drivers/vhost/vhost EXPORT_SYMBOL_GPL +0x7b047bd9 dm_tm_create_non_blocking_clone drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x985a5542 cxgbi_device_find_by_netdev drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x7313343a drm_i2c_encoder_detect drivers/gpu/drm/drm EXPORT_SYMBOL +0xc07e7d53 simd_register_aeads_compat crypto/crypto_simd EXPORT_SYMBOL_GPL +0x91f730ce fuse_request_end fs/fuse/fuse EXPORT_SYMBOL_GPL +0xe1d3e642 kvm_read_guest_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x212ffd72 udp_cmsg_send vmlinux EXPORT_SYMBOL_GPL +0x27ce119c register_8022_client vmlinux EXPORT_SYMBOL +0x58acf24b mdiobus_register_board_info vmlinux EXPORT_SYMBOL +0x271cba95 acpi_bus_private_data_handler vmlinux EXPORT_SYMBOL +0xc8d1d433 blk_set_default_limits vmlinux EXPORT_SYMBOL +0x01789a3c dquot_get_next_dqblk vmlinux EXPORT_SYMBOL +0x0c725fb8 posix_acl_equiv_mode vmlinux EXPORT_SYMBOL +0x53569707 this_cpu_off vmlinux EXPORT_SYMBOL +0xace81187 ceph_pg_poolid_by_name net/ceph/libceph EXPORT_SYMBOL +0xbaa1a1a2 __udp_disconnect vmlinux EXPORT_SYMBOL +0x331009fe sock_from_file vmlinux EXPORT_SYMBOL +0x54d43bc1 cpufreq_generic_attr vmlinux EXPORT_SYMBOL_GPL +0xb2e696f4 cpufreq_generic_init vmlinux EXPORT_SYMBOL_GPL +0xe484e35f ioread32 vmlinux EXPORT_SYMBOL +0x8601b234 lookup_bdev vmlinux EXPORT_SYMBOL +0xc839c1ce trace_seq_to_user vmlinux EXPORT_SYMBOL_GPL +0x131af6f5 clockevents_config_and_register vmlinux EXPORT_SYMBOL_GPL +0xa04f945a cpus_read_lock vmlinux EXPORT_SYMBOL_GPL +0x9c70ad80 irq_stat vmlinux EXPORT_SYMBOL +0x61fbd7a9 ceph_destroy_client net/ceph/libceph EXPORT_SYMBOL +0x5f098b2a in6addr_interfacelocal_allrouters vmlinux EXPORT_SYMBOL +0x7a8ca627 xfrm_count_pfkey_enc_supported vmlinux EXPORT_SYMBOL_GPL +0x03bf498c raw_seq_stop vmlinux EXPORT_SYMBOL_GPL +0xe835212c inet_proto_csum_replace_by_diff vmlinux EXPORT_SYMBOL +0xfa690589 netdev_cmd_to_name vmlinux EXPORT_SYMBOL_GPL +0x569299d3 gnet_stats_copy_basic vmlinux EXPORT_SYMBOL +0x3bf17755 mpi_read_buffer vmlinux EXPORT_SYMBOL_GPL +0xac537ac2 percpu_counter_destroy vmlinux EXPORT_SYMBOL +0xe261d7cc vm_map_ram vmlinux EXPORT_SYMBOL +0x7758b430 filemap_fdatawrite_range vmlinux EXPORT_SYMBOL +0xc4354855 inet_sendmsg vmlinux EXPORT_SYMBOL +0xdec6ddb9 hidinput_report_event vmlinux EXPORT_SYMBOL_GPL +0xeab9cbd5 iscsi_conn_get_addr_param vmlinux EXPORT_SYMBOL_GPL +0x181012f1 elevator_alloc vmlinux EXPORT_SYMBOL +0x35d3dc46 crypto_alg_sem vmlinux EXPORT_SYMBOL_GPL +0x43aa319e lease_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x97ab2001 __getblk_gfp vmlinux EXPORT_SYMBOL +0xb0d9bea3 dma_direct_map_page vmlinux EXPORT_SYMBOL +0xc14c3581 rpc_call_async net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x5210699d mlx5_core_create_mkey drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x63562d85 br_multicast_has_querier_adjacent vmlinux EXPORT_SYMBOL_GPL +0x493446db netlink_strict_get_check vmlinux EXPORT_SYMBOL_GPL +0xa43799a8 rfs_needed vmlinux EXPORT_SYMBOL +0x56802ae8 rps_cpu_mask vmlinux EXPORT_SYMBOL +0x95391702 ufshcd_init vmlinux EXPORT_SYMBOL_GPL +0x9dbb946b inverse_translate vmlinux EXPORT_SYMBOL_GPL +0xb654ef65 acpi_os_read_port vmlinux EXPORT_SYMBOL +0x65537437 freezer_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x3b3e1ae2 flush_rcu_work vmlinux EXPORT_SYMBOL +0xe8b3f0d8 __cpuhp_state_remove_instance vmlinux EXPORT_SYMBOL_GPL +0xa9126bff hpet_set_rtc_irq_bit vmlinux EXPORT_SYMBOL_GPL +0x3b83610f cpu_sibling_map vmlinux EXPORT_SYMBOL +0xf346f8a9 __rdma_create_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0xfe029963 unregister_inetaddr_notifier vmlinux EXPORT_SYMBOL +0x05a7e44d ata_sff_exec_command vmlinux EXPORT_SYMBOL_GPL +0xaafa131a hvc_instantiate vmlinux EXPORT_SYMBOL_GPL +0x61ea189b fb_pad_aligned_buffer vmlinux EXPORT_SYMBOL +0x1a9a433c prandom_u32_state vmlinux EXPORT_SYMBOL +0x1085d400 elv_rqhash_add vmlinux EXPORT_SYMBOL_GPL +0x160e2743 af_alg_free_sg vmlinux EXPORT_SYMBOL_GPL +0x8efd75d3 af_alg_make_sg vmlinux EXPORT_SYMBOL_GPL +0xa9fc4b6a get_kernel_pages vmlinux EXPORT_SYMBOL_GPL +0x23d30188 perf_event_enable vmlinux EXPORT_SYMBOL_GPL +0xe05e2f85 nexthop_free_rcu vmlinux EXPORT_SYMBOL_GPL +0xb5959c1e od_register_powersave_bias_handler vmlinux EXPORT_SYMBOL_GPL +0x8133645c phy_reset_after_clk_enable vmlinux EXPORT_SYMBOL +0xe2bcdabd intel_svm_unbind_mm vmlinux EXPORT_SYMBOL_GPL +0xd3aa11b7 agp_generic_mask_memory vmlinux EXPORT_SYMBOL +0x282a873a virtqueue_add_outbuf vmlinux EXPORT_SYMBOL_GPL +0x612bfd89 errno_to_blk_status vmlinux EXPORT_SYMBOL_GPL +0xd54076ca nobh_write_begin vmlinux EXPORT_SYMBOL +0x6654db59 filemap_check_errors vmlinux EXPORT_SYMBOL +0xb404dd27 __cgroup_bpf_run_filter_getsockopt vmlinux EXPORT_SYMBOL +0x30a2b5f5 cpuacct_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x1eb922a3 IO_APIC_get_PCI_irq_vector vmlinux EXPORT_SYMBOL +0x404c3e8a nfs_post_op_update_inode_force_wcc fs/nfs/nfs EXPORT_SYMBOL_GPL +0xb8752e4d __tracepoint_fib6_table_lookup vmlinux EXPORT_SYMBOL_GPL +0x03e83395 xfrm_state_free vmlinux EXPORT_SYMBOL +0x77e42add nf_hook_entries_delete_raw vmlinux EXPORT_SYMBOL_GPL +0x38002f21 netlink_ack vmlinux EXPORT_SYMBOL +0xe8ea4330 sock_no_bind vmlinux EXPORT_SYMBOL +0xd738ca1b phy_unregister_fixup_for_uid vmlinux EXPORT_SYMBOL +0xd327d52a iscsi_register_transport vmlinux EXPORT_SYMBOL_GPL +0x3e46d6cb dmaengine_unmap_put vmlinux EXPORT_SYMBOL_GPL +0x63c08029 clk_bulk_unprepare vmlinux EXPORT_SYMBOL_GPL +0x5f93525c acpi_extract_package vmlinux EXPORT_SYMBOL +0x35a88f28 zlib_inflateInit2 vmlinux EXPORT_SYMBOL +0xa5d7c388 pstore_type_to_name vmlinux EXPORT_SYMBOL_GPL +0xd7045f28 param_set_charp vmlinux EXPORT_SYMBOL +0x5cf0d0bb dm_tm_create_with_sm drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe9835ec5 tap_queue_resize drivers/net/tap EXPORT_SYMBOL_GPL +0x48dca605 xfrm_unregister_type_offload vmlinux EXPORT_SYMBOL +0x1d931264 iscsi_session_failure vmlinux EXPORT_SYMBOL_GPL +0x8feb043a dax_copy_to_iter vmlinux EXPORT_SYMBOL_GPL +0xbec66c3a __apei_exec_run vmlinux EXPORT_SYMBOL_GPL +0x0a8d03b6 acpi_dev_resume vmlinux EXPORT_SYMBOL_GPL +0x17e0896d blkg_lookup_slowpath vmlinux EXPORT_SYMBOL_GPL +0x28db0f27 __mmu_notifier_register vmlinux EXPORT_SYMBOL_GPL +0x4071b517 out_of_line_wait_on_bit_timeout vmlinux EXPORT_SYMBOL_GPL +0x5ab09745 edac_get_owner drivers/edac/edac_core EXPORT_SYMBOL_GPL +0x21ab6aef drm_atomic_helper_page_flip drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x129d5ca6 fb_sys_read drivers/video/fbdev/core/fb_sys_fops EXPORT_SYMBOL_GPL +0xe6003fb5 flow_rule_match_enc_keyid vmlinux EXPORT_SYMBOL +0xe337538d ata_sas_sync_probe vmlinux EXPORT_SYMBOL_GPL +0x8346ec96 set_primary_fwnode vmlinux EXPORT_SYMBOL_GPL +0xada31e57 gen_pool_dma_alloc_align vmlinux EXPORT_SYMBOL +0x8096adde wpan_phy_unregister net/ieee802154/ieee802154 EXPORT_SYMBOL +0x5a917475 vfio_unpin_pages drivers/vfio/vfio EXPORT_SYMBOL +0x16c19f45 cxgbi_sock_closed drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xf9c0ab0c drm_gem_object_put drivers/gpu/drm/drm EXPORT_SYMBOL +0x8eae130a simd_unregister_aeads crypto/crypto_simd EXPORT_SYMBOL_GPL +0x91a7b1da qdisc_class_hash_remove vmlinux EXPORT_SYMBOL +0xcd8af51b __put_net vmlinux EXPORT_SYMBOL_GPL +0x1de86fab fc_lport_bsg_request vmlinux EXPORT_SYMBOL +0x98c039dc dma_fence_wait_timeout vmlinux EXPORT_SYMBOL +0x64925270 __dax_synchronous vmlinux EXPORT_SYMBOL_GPL +0xd1c5e052 nvdimm_namespace_capacity vmlinux EXPORT_SYMBOL +0x441e6a31 transport_destroy_device vmlinux EXPORT_SYMBOL_GPL +0x758a1ad7 serial8250_tx_chars vmlinux EXPORT_SYMBOL_GPL +0x7f56f309 blk_mq_alloc_tag_set vmlinux EXPORT_SYMBOL +0xf066cb80 blk_steal_bios vmlinux EXPORT_SYMBOL_GPL +0xb6762fcf ip_set_del net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x7690da26 ip_set_add net/netfilter/ipset/ip_set EXPORT_SYMBOL_GPL +0x5c6aee5b mlx5_lag_is_active drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x4a726bef mlx4_replace_zero_macs drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x2d45f00a tcf_block_get_ext vmlinux EXPORT_SYMBOL +0x3109133e thermal_cdev_update vmlinux EXPORT_SYMBOL +0x2ba57c4f tty_unregister_device vmlinux EXPORT_SYMBOL +0x4ec54e78 bitmap_to_arr32 vmlinux EXPORT_SYMBOL +0xf311e156 key_being_used_for vmlinux EXPORT_SYMBOL_GPL +0x43070bf4 crypto_register_skciphers vmlinux EXPORT_SYMBOL_GPL +0xd6fde043 is_module_sig_enforced vmlinux EXPORT_SYMBOL +0xf3808cb1 get_state_synchronize_rcu vmlinux EXPORT_SYMBOL_GPL +0xfb481954 vprintk vmlinux EXPORT_SYMBOL +0x6fe948d0 vsock_for_each_connected_socket net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x753e20b2 dm_bio_prison_create drivers/md/dm-bio-prison EXPORT_SYMBOL_GPL +0x2d224e6e kvm_map_gfn arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xac94e86b vsprintf vmlinux EXPORT_SYMBOL +0xccf760ed ipv6_sock_mc_join vmlinux EXPORT_SYMBOL +0x035b9bb7 __udp4_lib_lookup vmlinux EXPORT_SYMBOL_GPL +0xe5e1d7ca tcp_poll vmlinux EXPORT_SYMBOL +0x22570bd0 netdev_lower_get_next_private vmlinux EXPORT_SYMBOL +0xbd8b5ee5 generic_splice_sendpage vmlinux EXPORT_SYMBOL +0xbe5d367e vfs_statx_fd vmlinux EXPORT_SYMBOL +0x53ca97ed dma_max_mapping_size vmlinux EXPORT_SYMBOL_GPL +0x5980fead udp_tun_rx_dst net/ipv4/udp_tunnel EXPORT_SYMBOL_GPL +0x1fcdcc10 mdev_dev drivers/vfio/mdev/mdev EXPORT_SYMBOL +0xc03fd6bb mlxsw_core_res_get drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xb5b1b3f7 cxgb4_create_server6 drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0x167e7f9d __get_user_1 vmlinux EXPORT_SYMBOL +0xc3aaf0a9 __put_user_1 vmlinux EXPORT_SYMBOL +0xba2e5ee8 ip_do_fragment vmlinux EXPORT_SYMBOL +0xd3eaf1ed devlink_dpipe_entry_clear vmlinux EXPORT_SYMBOL +0x4f12138a attribute_container_find_class_device vmlinux EXPORT_SYMBOL_GPL +0x58604e4d alloc_iova_mem vmlinux EXPORT_SYMBOL +0xca3ab270 __tracepoint_map vmlinux EXPORT_SYMBOL_GPL +0xfc8e3c98 register_key_type vmlinux EXPORT_SYMBOL +0xc7113319 grab_cache_page_write_begin vmlinux EXPORT_SYMBOL +0xebc9a09f lock_system_sleep vmlinux EXPORT_SYMBOL_GPL +0x2e8a5a5d rt_mutex_lock_interruptible vmlinux EXPORT_SYMBOL_GPL +0xa7765e88 mlxsw_reg_query drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x963de590 mlx4_phys_to_slaves_pport drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x5496df82 kvm_mmu_slot_leaf_clear_dirty arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe1bd1d6a kvm_get_dirty_log_protect arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x90f924cb acpi_subsys_suspend vmlinux EXPORT_SYMBOL_GPL +0xa370446b acpi_subsys_prepare vmlinux EXPORT_SYMBOL_GPL +0x814c34c5 pci_disable_link_state_locked vmlinux EXPORT_SYMBOL +0x5c78764f _copy_to_iter vmlinux EXPORT_SYMBOL +0x0b632bba blkg_conf_finish vmlinux EXPORT_SYMBOL_GPL +0xabf32f29 utf16s_to_utf8s vmlinux EXPORT_SYMBOL +0xf312d441 iomap_swapfile_activate vmlinux EXPORT_SYMBOL_GPL +0xc21b3cca devices_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0xa02aa74a __cond_resched_lock vmlinux EXPORT_SYMBOL +0x9ecf241e __put_task_struct vmlinux EXPORT_SYMBOL_GPL +0xacd010f1 rdma_query_ah drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xf30b9aa6 __tracepoint_bcache_journal_replay_key drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x0e2b5842 mlxsw_afa_block_append_vlan_modify drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xc5c99a79 drm_dp_get_adjust_request_voltage drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xfea2b457 camellia_xts_dec arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0x9056f10d camellia_xts_enc arch/x86/crypto/camellia-aesni-avx-x86_64 EXPORT_SYMBOL_GPL +0x787cbcf2 mrp_register_application vmlinux EXPORT_SYMBOL_GPL +0xe5f916a6 unregister_pernet_subsys vmlinux EXPORT_SYMBOL_GPL +0x38f0bcb9 pnp_unregister_driver vmlinux EXPORT_SYMBOL +0xaa48c96a simple_write_begin vmlinux EXPORT_SYMBOL +0x9ec3ef63 redirty_page_for_writepage vmlinux EXPORT_SYMBOL +0x97ddb847 bdi_set_max_ratio vmlinux EXPORT_SYMBOL +0x8ffe7e89 nf_conntrack_htable_size net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x63874d4c mlxsw_core_port_driver_priv drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x49d92063 cxgb3_queue_tid_release drivers/net/ethernet/chelsio/cxgb3/cxgb3 EXPORT_SYMBOL +0x6b5bcdda drm_flip_work_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x8bdbf786 kvm_read_guest_virt arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb746b415 netpoll_poll_dev vmlinux EXPORT_SYMBOL +0x914749aa netdev_emerg vmlinux EXPORT_SYMBOL +0x89110e37 sk_stream_kill_queues vmlinux EXPORT_SYMBOL +0x02dbd438 fc_block_rport vmlinux EXPORT_SYMBOL +0xe5e50e3b ata_port_desc vmlinux EXPORT_SYMBOL_GPL +0xd7a93745 dev_set_name vmlinux EXPORT_SYMBOL_GPL +0xeb37101c audit_log_end vmlinux EXPORT_SYMBOL +0x80935c79 drm_mm_scan_add_block drivers/gpu/drm/drm EXPORT_SYMBOL +0xef26dcd0 drm_fb_helper_sys_write drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x3573913a drm_scdc_get_scrambling_status drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x36d69557 ipv6_flowlabel_exclusive vmlinux EXPORT_SYMBOL +0x396a5abc lwtunnel_fill_encap vmlinux EXPORT_SYMBOL_GPL +0x839dd9fd __sk_dst_check vmlinux EXPORT_SYMBOL +0xe5a3d989 usb_queue_reset_device vmlinux EXPORT_SYMBOL_GPL +0xedeb59d9 __tracepoint_dma_fence_signaled vmlinux EXPORT_SYMBOL +0x5027bde2 acpi_acquire_mutex vmlinux EXPORT_SYMBOL +0x3324ef3b acpi_set_firmware_waking_vector vmlinux EXPORT_SYMBOL +0x75f59e7c devm_of_iomap vmlinux EXPORT_SYMBOL +0xb10e7df4 __kfifo_dma_in_prepare vmlinux EXPORT_SYMBOL +0x0621f834 hash_and_copy_to_iter vmlinux EXPORT_SYMBOL +0xd27b25dd blk_check_plugged vmlinux EXPORT_SYMBOL +0x716265c7 debugfs_initialized vmlinux EXPORT_SYMBOL_GPL +0x198861cd lookup_one_len_unlocked vmlinux EXPORT_SYMBOL +0x2cd6246a bpf_map_put vmlinux EXPORT_SYMBOL_GPL +0xe8ab01bc tracing_snapshot_cond_enable vmlinux EXPORT_SYMBOL_GPL +0x026bccaa task_cputime_adjusted vmlinux EXPORT_SYMBOL_GPL +0xa9785b49 cpu_core_map vmlinux EXPORT_SYMBOL +0x367953a1 xdp_convert_zc_to_xdp_frame vmlinux EXPORT_SYMBOL_GPL +0xaf4014ff usb_amd_quirk_pll_check vmlinux EXPORT_SYMBOL_GPL +0xc6827920 usb_interrupt_msg vmlinux EXPORT_SYMBOL_GPL +0xe4bb9a1d pm_generic_runtime_resume vmlinux EXPORT_SYMBOL_GPL +0x081fccfb bus_unregister vmlinux EXPORT_SYMBOL_GPL +0x78041b8f byte_rev_table vmlinux EXPORT_SYMBOL_GPL +0x3bc5791f set_bh_page vmlinux EXPORT_SYMBOL +0xe48611ac trace_clock_global vmlinux EXPORT_SYMBOL_GPL +0xf91a7415 kthread_blkcg vmlinux EXPORT_SYMBOL +0x6790ebd3 mce_is_memory_error vmlinux EXPORT_SYMBOL_GPL +0x9290e07a dm_tm_read_lock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x7b6b3af5 dm_bm_read_lock drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x51fc3803 drm_format_info_block_width drivers/gpu/drm/drm EXPORT_SYMBOL +0xbd38b28c nfs_zap_acl_cache fs/nfs/nfs EXPORT_SYMBOL_GPL +0xf4db35bc stpcpy vmlinux EXPORT_SYMBOL +0xe914e41e strcpy vmlinux EXPORT_SYMBOL +0xe4ef9bfc kobj_sysfs_ops vmlinux EXPORT_SYMBOL_GPL +0x3f8ab72e devlink_fmsg_bool_put vmlinux EXPORT_SYMBOL_GPL +0x30c510c3 efivars_unregister vmlinux EXPORT_SYMBOL_GPL +0xabc9ddbe clkdev_alloc vmlinux EXPORT_SYMBOL +0x609b2853 hdmi_infoframe_pack vmlinux EXPORT_SYMBOL +0x840bd86d crypto_destroy_tfm vmlinux EXPORT_SYMBOL_GPL +0xa9456182 sysfs_remove_link_from_group vmlinux EXPORT_SYMBOL_GPL +0x77269369 filemap_fault vmlinux EXPORT_SYMBOL +0x636257f7 get_ibs_caps vmlinux EXPORT_SYMBOL +0x6d5bf833 nft_fib_policy net/netfilter/nft_fib EXPORT_SYMBOL +0x1bc3edc2 usb_stor_sense_invalidCDB drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xd14f5495 mlx4_is_eq_shared drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x1475f64b ocfs2_dlm_lvb_valid fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x54ad3e5e sdev_enable_disk_events vmlinux EXPORT_SYMBOL +0x73e58576 fb_get_buffer_offset vmlinux EXPORT_SYMBOL +0xdcd2d23e t10_pi_type3_crc vmlinux EXPORT_SYMBOL +0x733ed24a key_payload_reserve vmlinux EXPORT_SYMBOL +0x8cbd6bd2 ftrace_set_notrace vmlinux EXPORT_SYMBOL_GPL +0x0907d14d blocking_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL +0x3991eebf ieee802154_xmit_complete net/mac802154/mac802154 EXPORT_SYMBOL +0x63b0c22d dm_bitset_flush drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0x45485f00 fuse_abort_conn fs/fuse/fuse EXPORT_SYMBOL_GPL +0xa0b1ef73 nfs_filemap_write_and_wait_range fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe3ce6bad skb_unlink vmlinux EXPORT_SYMBOL +0x815bccf0 sock_zerocopy_callback vmlinux EXPORT_SYMBOL_GPL +0xd3cffc39 nvdimm_bus_lock vmlinux EXPORT_SYMBOL +0xff291ecf clk_unregister_divider vmlinux EXPORT_SYMBOL_GPL +0x3ff30a80 blk_set_queue_dying vmlinux EXPORT_SYMBOL_GPL +0xc11cc817 crypto_register_template vmlinux EXPORT_SYMBOL_GPL +0x0944c43f node_states vmlinux EXPORT_SYMBOL +0x3df0f21a vm_iomap_memory vmlinux EXPORT_SYMBOL +0x49e96999 cond_synchronize_rcu vmlinux EXPORT_SYMBOL_GPL +0xf02ed1a0 ceph_auth_create_authorizer net/ceph/libceph EXPORT_SYMBOL +0xf17cb696 nf_tproxy_handle_time_wait6 net/ipv6/netfilter/nf_tproxy_ipv6 EXPORT_SYMBOL_GPL +0xb7622059 nf_tproxy_handle_time_wait4 net/ipv4/netfilter/nf_tproxy_ipv4 EXPORT_SYMBOL_GPL +0xf4d7b068 ibdev_emerg drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xe1bdae47 dm_cache_policy_create drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xa20fbe6d ptp_classify_raw vmlinux EXPORT_SYMBOL_GPL +0xf2cfac45 __ethtool_get_link_ksettings vmlinux EXPORT_SYMBOL +0x5641485b tty_termios_encode_baud_rate vmlinux EXPORT_SYMBOL_GPL +0xc1ef713c tty_release_struct vmlinux EXPORT_SYMBOL_GPL +0xb4043948 acpi_execute_simple_method vmlinux EXPORT_SYMBOL +0xc6a27775 smp_call_function_single_async vmlinux EXPORT_SYMBOL_GPL +0xa7edb330 sched_trace_rq_avg_dl vmlinux EXPORT_SYMBOL_GPL +0xde425730 gpu_create_wait vmlinux EXPORT_SYMBOL_GPL +0x9f7b0fa0 drm_fb_helper_set_par drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa895eedf nfs_wait_client_init_complete fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa709c835 fib6_info_destroy_rcu vmlinux EXPORT_SYMBOL_GPL +0x0856dfdd inet_csk_delete_keepalive_timer vmlinux EXPORT_SYMBOL +0x94befb86 eth_platform_get_mac_address vmlinux EXPORT_SYMBOL +0xbfc5e927 skb_append_pagefrags vmlinux EXPORT_SYMBOL_GPL +0x515d5bef mbox_request_channel_byname vmlinux EXPORT_SYMBOL_GPL +0x6c6b56e0 usb_hcd_pci_shutdown vmlinux EXPORT_SYMBOL_GPL +0x64ee4115 fc_host_post_fc_event vmlinux EXPORT_SYMBOL +0xf7240467 ata_cable_40wire vmlinux EXPORT_SYMBOL_GPL +0x6958ae23 dax_get_by_host vmlinux EXPORT_SYMBOL_GPL +0xa940d0f8 __bdev_dax_supported vmlinux EXPORT_SYMBOL_GPL +0x1717a0a3 dev_pm_qos_add_ancestor_request vmlinux EXPORT_SYMBOL_GPL +0x1b52f403 pci_bus_set_ops vmlinux EXPORT_SYMBOL +0x389617b0 LZ4_decompress_fast_continue vmlinux EXPORT_SYMBOL +0x658ce1a8 xxh64_reset vmlinux EXPORT_SYMBOL +0x6673f96d xxh32_reset vmlinux EXPORT_SYMBOL +0x7e5db80b pstore_name_to_type vmlinux EXPORT_SYMBOL_GPL +0x359b671c proc_create_single_data vmlinux EXPORT_SYMBOL +0xc6870734 nf_log_dump_tcp_header net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0xac90fb73 nf_log_dump_udp_header net/netfilter/nf_log_common EXPORT_SYMBOL_GPL +0xec6dbfe0 drm_atomic_helper_commit_planes_on_crtc drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xda02e283 xfrm_state_register_afinfo vmlinux EXPORT_SYMBOL +0x4fdee897 i8042_command vmlinux EXPORT_SYMBOL +0xe8a25ab5 usb_register_device_driver vmlinux EXPORT_SYMBOL_GPL +0x7a630cc8 phy_loopback vmlinux EXPORT_SYMBOL +0x5665d59f ata_cable_unknown vmlinux EXPORT_SYMBOL_GPL +0x62bfb553 subsys_interface_unregister vmlinux EXPORT_SYMBOL_GPL +0x2926ef88 devmap_managed_key vmlinux EXPORT_SYMBOL +0x5abb981c kmem_cache_size vmlinux EXPORT_SYMBOL +0xc85b066d enable_kprobe vmlinux EXPORT_SYMBOL_GPL +0x7f730c80 vsock_core_exit net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xe9093cf3 l2tp_session_get_by_ifname net/l2tp/l2tp_core EXPORT_SYMBOL_GPL +0x58c2edfc ib_cm_init_qp_attr drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0x790b6ca8 mlx4_get_vf_config drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x9d6cd42e amd_iommu_set_invalid_ppr_cb drivers/iommu/amd_iommu_v2 EXPORT_SYMBOL +0x40add3a4 nfs_symlink fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa6e2377b kobject_get_path vmlinux EXPORT_SYMBOL_GPL +0xadf76e21 inet_ioctl vmlinux EXPORT_SYMBOL +0xc3c99268 genphy_c45_pma_read_abilities vmlinux EXPORT_SYMBOL_GPL +0x74754435 acpi_bus_generate_netlink_event vmlinux EXPORT_SYMBOL +0x8f2703b7 wbinvd_on_all_cpus vmlinux EXPORT_SYMBOL +0xd39b4759 drm_any_plane_has_format drivers/gpu/drm/drm EXPORT_SYMBOL +0xc1ca4588 drm_connector_register drivers/gpu/drm/drm EXPORT_SYMBOL +0x2848bc51 acpi_video_get_levels drivers/acpi/video EXPORT_SYMBOL +0xa403c9b9 kvm_get_dirty_log arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xcc1b882a idr_get_next_ul vmlinux EXPORT_SYMBOL +0xce3810da xt_register_target vmlinux EXPORT_SYMBOL +0xb6c93bcf page_pool_create vmlinux EXPORT_SYMBOL +0x9b9f3648 pcibios_scan_specific_bus vmlinux EXPORT_SYMBOL_GPL +0xb212ed49 phy_drivers_register vmlinux EXPORT_SYMBOL +0x9a402b23 scsi_vpd_lun_id vmlinux EXPORT_SYMBOL +0xd15204af ata_sas_async_probe vmlinux EXPORT_SYMBOL_GPL +0x01b58541 nvme_wait_freeze_timeout vmlinux EXPORT_SYMBOL_GPL +0xfb49e522 decrypt_blob vmlinux EXPORT_SYMBOL_GPL +0xd6eaaea1 full_name_hash vmlinux EXPORT_SYMBOL +0xb4ff6bb6 hrtimer_active vmlinux EXPORT_SYMBOL_GPL +0x406c4cb1 hrtimer_resolution vmlinux EXPORT_SYMBOL_GPL +0x20bac053 svc_rqst_alloc net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x6b862193 ip_vs_proto_data_get net/netfilter/ipvs/ip_vs EXPORT_SYMBOL +0x61b27444 nf_nat_inet_register_fn net/netfilter/nf_nat EXPORT_SYMBOL_GPL +0xad48b823 ib_get_cached_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x3e7017f5 mlx5_core_query_ib_ppcnt drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xdd5f89e2 mlx4_mr_hw_write_mpt drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xe0ae073b drm_bridge_pre_enable drivers/gpu/drm/drm EXPORT_SYMBOL +0x88b3c96a ping_prot vmlinux EXPORT_SYMBOL +0x763d53a6 nvme_get_features vmlinux EXPORT_SYMBOL_GPL +0x0a292872 reservation_seqcount_class vmlinux EXPORT_SYMBOL +0xd8eb7fca pm_generic_freeze vmlinux EXPORT_SYMBOL_GPL +0xc1ffdd8e device_create_vargs vmlinux EXPORT_SYMBOL_GPL +0x862b7d0e iommu_unmap_fast vmlinux EXPORT_SYMBOL_GPL +0xbe869b49 tty_port_hangup vmlinux EXPORT_SYMBOL +0x9b38c7d0 pnp_disable_dev vmlinux EXPORT_SYMBOL +0x3e17cc01 framebuffer_release vmlinux EXPORT_SYMBOL +0xe6df644d pinctrl_utils_reserve_map vmlinux EXPORT_SYMBOL_GPL +0x3ef051c8 crypto_inc vmlinux EXPORT_SYMBOL_GPL +0x48a68600 iomap_file_buffered_write vmlinux EXPORT_SYMBOL_GPL +0x443168f8 get_cached_acl vmlinux EXPORT_SYMBOL +0xf375c5e5 wait_for_stable_page vmlinux EXPORT_SYMBOL_GPL +0x7cb3f569 event_triggers_post_call vmlinux EXPORT_SYMBOL_GPL +0x01c12c32 cpu_bit_bitmap vmlinux EXPORT_SYMBOL_GPL +0x9670b5a1 rpc_free_iostats net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x48f82ef1 rpc_destroy_wait_queue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4557b425 dm_bitset_test_bit drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xb58dc1dd mlx5_packet_reformat_alloc drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xf83ccb0e mlx4_test_async drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x68d461fa vnic_dev_get_res drivers/net/ethernet/cisco/enic/enic EXPORT_SYMBOL +0xcc284925 drm_connector_has_possible_encoder drivers/gpu/drm/drm EXPORT_SYMBOL +0x9eb99b14 drm_atomic_helper_wait_for_fences drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xa28dd16c kvm_get_cr8 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x98f55395 kvm_set_cr8 arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8e4df40c fqdir_init vmlinux EXPORT_SYMBOL +0x1b8b95ad i8042_unlock_chip vmlinux EXPORT_SYMBOL +0xd426dbc4 erst_get_record_count vmlinux EXPORT_SYMBOL_GPL +0x38a71b7e pci_free_resource_list vmlinux EXPORT_SYMBOL +0xf82a5c6d blk_mq_sched_try_merge vmlinux EXPORT_SYMBOL_GPL +0x306f5576 pkcs7_verify vmlinux EXPORT_SYMBOL_GPL +0xea4239d5 crypto_register_kpp vmlinux EXPORT_SYMBOL_GPL +0xdf7b35d1 touch_buffer vmlinux EXPORT_SYMBOL +0x792d8de9 alloc_file_pseudo vmlinux EXPORT_SYMBOL +0xdf7d9ad7 prepare_to_swait_event vmlinux EXPORT_SYMBOL +0x16a3ecb9 __kthread_should_park vmlinux EXPORT_SYMBOL_GPL +0x46c47fb6 __node_distance vmlinux EXPORT_SYMBOL +0x8473f2c1 svcauth_unix_purge net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x7130edba kobject_move vmlinux EXPORT_SYMBOL_GPL +0x503387c5 dm_table_get_md vmlinux EXPORT_SYMBOL +0x0f1195ee devm_rc_register_device vmlinux EXPORT_SYMBOL_GPL +0x3cba3e60 usb_hcd_poll_rh_status vmlinux EXPORT_SYMBOL_GPL +0xe6c1e126 mpt_event_deregister vmlinux EXPORT_SYMBOL +0x48759e34 dev_pm_qos_hide_flags vmlinux EXPORT_SYMBOL_GPL +0x2ccbb040 device_find_child vmlinux EXPORT_SYMBOL_GPL +0x8ac743de sg_copy_buffer vmlinux EXPORT_SYMBOL +0xd854a697 fat_setattr vmlinux EXPORT_SYMBOL_GPL +0x3950f436 fat_getattr vmlinux EXPORT_SYMBOL_GPL +0x197251bf __fscrypt_prepare_link vmlinux EXPORT_SYMBOL_GPL +0x8690fdf7 fscrypt_file_open vmlinux EXPORT_SYMBOL_GPL +0x50e37b47 generic_read_dir vmlinux EXPORT_SYMBOL +0xe007de41 kallsyms_lookup_name vmlinux EXPORT_SYMBOL_GPL +0xff1e67b9 setup_APIC_eilvt vmlinux EXPORT_SYMBOL_GPL +0x2ca57223 xprt_lookup_rqst net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xaef0ac6b ct_sip_parse_header_uri net/netfilter/nf_conntrack_sip EXPORT_SYMBOL_GPL +0x9784bd74 drm_dp_mst_dump_topology drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xfbc319c1 fc_exch_update_stats vmlinux EXPORT_SYMBOL +0xa9aed4a1 fc_exch_seq_send vmlinux EXPORT_SYMBOL +0x82d07a29 iommu_aux_detach_device vmlinux EXPORT_SYMBOL_GPL +0x3d7cf096 tpm_pm_suspend vmlinux EXPORT_SYMBOL_GPL +0x7caad3b6 phy_pm_runtime_get vmlinux EXPORT_SYMBOL_GPL +0x31aebaa9 bsg_remove_queue vmlinux EXPORT_SYMBOL_GPL +0xb6de0949 truncate_pagecache vmlinux EXPORT_SYMBOL +0xa376d145 ring_buffer_time_stamp vmlinux EXPORT_SYMBOL_GPL +0xbaa69cc9 param_ops_bool_enable_only vmlinux EXPORT_SYMBOL_GPL +0xe1120dd0 vsock_add_pending net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0x14289636 vsock_remove_sock net/vmw_vsock/vsock EXPORT_SYMBOL_GPL +0xabcd953f ocfs2_kset fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0x69d3558d pnfs_generic_rw_release fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x524d5a11 nfs4_test_session_trunk fs/nfs/nfsv4 EXPORT_SYMBOL_GPL +0x86b95058 gfn_to_memslot arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xb73be794 xfrm_ealg_get_byidx vmlinux EXPORT_SYMBOL_GPL +0xc08bed92 tso_start vmlinux EXPORT_SYMBOL +0x7740933d sock_no_shutdown vmlinux EXPORT_SYMBOL +0xaa0811cb vc_resize vmlinux EXPORT_SYMBOL +0x0b52e502 apei_resources_add vmlinux EXPORT_SYMBOL_GPL +0xecba9e61 phy_put vmlinux EXPORT_SYMBOL_GPL +0xc568b35b phy_pm_runtime_put_sync vmlinux EXPORT_SYMBOL_GPL +0x18036698 fat_update_time vmlinux EXPORT_SYMBOL_GPL +0x286b6116 __generic_block_fiemap vmlinux EXPORT_SYMBOL +0x4005f38c try_wait_for_completion vmlinux EXPORT_SYMBOL +0xda97cba9 param_set_ulong vmlinux EXPORT_SYMBOL +0xee2d0fc7 _local_bh_enable vmlinux EXPORT_SYMBOL +0x72d79d83 pgdir_shift vmlinux EXPORT_SYMBOL +0xee61ce3b nf_ct_expect_alloc net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xa6a3f65c drm_fb_helper_deferred_io drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf4d2cd04 drm_kms_helper_poll_init drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xb14cc6b6 kvm_arch_has_noncoherent_dma arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xd6ed3e99 tcp_mmap vmlinux EXPORT_SYMBOL +0x7ba0a807 tpm_chip_start vmlinux EXPORT_SYMBOL_GPL +0xec01448a generic_key_instantiate vmlinux EXPORT_SYMBOL +0xa3c2e0be sysfs_unmerge_group vmlinux EXPORT_SYMBOL_GPL +0xa776e34d __cancel_dirty_page vmlinux EXPORT_SYMBOL +0x1e04baac sched_trace_cfs_rq_path vmlinux EXPORT_SYMBOL_GPL +0x79defbe1 kthread_should_park vmlinux EXPORT_SYMBOL_GPL +0x018b3d1e intel_pt_validate_cap vmlinux EXPORT_SYMBOL_GPL +0xed307abd ceph_copy_to_page_vector net/ceph/libceph EXPORT_SYMBOL +0xb76fcd67 ib_mad_kernel_rmpp_agent drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x0de126c7 ib_modify_qp drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x56183d3e drm_calc_timestamping_constants drivers/gpu/drm/drm EXPORT_SYMBOL +0x348ed215 drm_fb_helper_cfb_imageblit drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x1b89c6ee o2hb_fill_node_map fs/ocfs2/cluster/ocfs2_nodemanager EXPORT_SYMBOL_GPL +0xc5196999 ocfs2_dlm_unlock fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xa4cad6d8 kvm_vcpu_map arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x70c52dc5 nf_skb_duplicated vmlinux EXPORT_SYMBOL_GPL +0x688806b7 devlink_params_unregister vmlinux EXPORT_SYMBOL_GPL +0xa1962460 fib_notifier_ops_register vmlinux EXPORT_SYMBOL +0x9f7338b4 dma_resv_init vmlinux EXPORT_SYMBOL +0x5dba52a9 add_random_ready_callback vmlinux EXPORT_SYMBOL +0xaf86dfbe dummy_con vmlinux EXPORT_SYMBOL_GPL +0xf9c1f9ab security_secctx_to_secid vmlinux EXPORT_SYMBOL +0x5c1d251c sysfs_create_link_nowarn vmlinux EXPORT_SYMBOL_GPL +0x4541fe0d mtrr_state vmlinux EXPORT_SYMBOL_GPL +0x19b83992 svc_encode_read_payload net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xe90c5a1d mlx5_query_port_ets_rate_limit drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x63f76d25 cxgbi_hbas_remove drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0x6a600605 kvm_apic_match_dest arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x7c94c99a kvm_release_pfn_dirty arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x21e13cb3 inet_peer_xrlim_allow vmlinux EXPORT_SYMBOL +0x5f8884d9 nf_ct_attach vmlinux EXPORT_SYMBOL +0xc24f34c4 tcf_queue_work vmlinux EXPORT_SYMBOL +0x7aeff17b dev_pm_opp_unregister_notifier vmlinux EXPORT_SYMBOL +0x980ed6ac fcoe_link_speed_update vmlinux EXPORT_SYMBOL_GPL +0xe818b32b ata_bmdma_interrupt vmlinux EXPORT_SYMBOL_GPL +0xc1365323 acpi_enable_all_wakeup_gpes vmlinux EXPORT_SYMBOL +0xcf88e0ac devm_phy_put vmlinux EXPORT_SYMBOL_GPL +0xdce23a83 sbitmap_queue_wake_up vmlinux EXPORT_SYMBOL_GPL +0x0e54f32b register_user_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x8b9ea582 ZSTD_copyDCtx lib/zstd/zstd_decompress EXPORT_SYMBOL +0x279be432 ZSTD_copyCCtx lib/zstd/zstd_compress EXPORT_SYMBOL +0xbb244c36 mlx5_query_port_ptys drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x635381aa cxgb4_dbfifo_count drivers/net/ethernet/chelsio/cxgb4/cxgb4 EXPORT_SYMBOL +0xa1855b8c nfs_inode_attach_open_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0x45e7b385 fscache_op_complete fs/fscache/fscache EXPORT_SYMBOL +0xc9df055a xfrm_policy_walk_init vmlinux EXPORT_SYMBOL +0x5c1a68b8 cpufreq_dbs_governor_limits vmlinux EXPORT_SYMBOL_GPL +0xe20a4284 sas_target_destroy vmlinux EXPORT_SYMBOL_GPL +0x995b4f24 driver_attach vmlinux EXPORT_SYMBOL_GPL +0x093712e5 acpi_purge_cached_objects vmlinux EXPORT_SYMBOL +0xddad7952 acpi_dbg_level vmlinux EXPORT_SYMBOL +0x18db7cc7 textsearch_register vmlinux EXPORT_SYMBOL +0x09eb3398 bio_clone_blkg_association vmlinux EXPORT_SYMBOL_GPL +0x74baf17a tracing_is_on vmlinux EXPORT_SYMBOL_GPL +0x381a798a setup_max_cpus vmlinux EXPORT_SYMBOL +0x40183f79 commit_creds vmlinux EXPORT_SYMBOL +0x574eda34 des3_ede_decrypt lib/crypto/libdes EXPORT_SYMBOL_GPL +0xd13f1de3 __nf_ct_expect_find net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x5f1dbbd1 drm_atomic_helper_commit_tail drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf12b4a85 iptunnel_metadata_reply vmlinux EXPORT_SYMBOL_GPL +0xe279363b garp_init_applicant vmlinux EXPORT_SYMBOL_GPL +0x360ece04 platform_device_register vmlinux EXPORT_SYMBOL_GPL +0x80d68d3e fb_register_client vmlinux EXPORT_SYMBOL +0xad0413d4 match_hex vmlinux EXPORT_SYMBOL +0x6bdefc7b rdma_nl_multicast drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x858c30d0 mlxsw_afa_block_create drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x01b146ec __inet_lookup_listener vmlinux EXPORT_SYMBOL_GPL +0xfef779fa xt_find_jump_offset vmlinux EXPORT_SYMBOL +0xada0685b nf_unregister_sockopt vmlinux EXPORT_SYMBOL +0x338698b8 skb_realloc_headroom vmlinux EXPORT_SYMBOL +0xaeb5b1d7 dev_attr_em_message vmlinux EXPORT_SYMBOL_GPL +0x10bc586f software_node_fwnode vmlinux EXPORT_SYMBOL_GPL +0x4c2c5aaa platform_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x4d22c2fc serial8250_set_isa_configurator vmlinux EXPORT_SYMBOL +0x5e2a434f acpi_data_fwnode_ops vmlinux EXPORT_SYMBOL_GPL +0xa3a04602 btree_geo64 vmlinux EXPORT_SYMBOL_GPL +0x01fdd34b generic_pipe_buf_get vmlinux EXPORT_SYMBOL +0xfa2d291c drm_mode_get_hv_timing drivers/gpu/drm/drm EXPORT_SYMBOL +0xee6fd961 nfs_may_open fs/nfs/nfs EXPORT_SYMBOL_GPL +0x23626096 ipv6_find_hdr vmlinux EXPORT_SYMBOL +0x444300bb ipv6_chk_addr_and_flags vmlinux EXPORT_SYMBOL +0x4ffc39b8 __dev_forward_skb vmlinux EXPORT_SYMBOL_GPL +0x754560b8 uhci_reset_hc vmlinux EXPORT_SYMBOL_GPL +0x3d445a6f iommu_page_response vmlinux EXPORT_SYMBOL_GPL +0x8c47160f tty_port_close_end vmlinux EXPORT_SYMBOL +0xba69719c pcie_bandwidth_available vmlinux EXPORT_SYMBOL +0x702946da ucs2_strlen vmlinux EXPORT_SYMBOL +0x9d8801c4 jbd2_journal_dirty_metadata vmlinux EXPORT_SYMBOL +0xe0aad9e7 account_locked_vm vmlinux EXPORT_SYMBOL_GPL +0xebbe1622 io_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x1bf4f232 handle_fasteoi_irq vmlinux EXPORT_SYMBOL_GPL +0x2410c338 x86_virt_spec_ctrl vmlinux EXPORT_SYMBOL_GPL +0x8d6f0003 xdr_restrict_buflen net/sunrpc/sunrpc EXPORT_SYMBOL +0xa77acea3 nf_ct_bridge_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xd45c60c5 ib_set_client_data drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x116e58be rdma_copy_ah_attr drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x886e8e8d mlx4_cq_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x6de7f7ff acpi_video_get_backlight_type drivers/acpi/video EXPORT_SYMBOL +0x2c7b3458 twofish_enc_blk_ctr arch/x86/crypto/twofish-x86_64-3way EXPORT_SYMBOL_GPL +0xfefe1376 __vlan_find_dev_deep_rcu vmlinux EXPORT_SYMBOL +0xd53a19e7 md_flush_request vmlinux EXPORT_SYMBOL +0x79bc842c usb_anchor_suspend_wakeups vmlinux EXPORT_SYMBOL_GPL +0xe5e43736 sas_bios_param vmlinux EXPORT_SYMBOL_GPL +0x3e883b6f sas_attach_transport vmlinux EXPORT_SYMBOL +0x6555e048 blkcipher_walk_done vmlinux EXPORT_SYMBOL_GPL +0xe2c42a0e __close_fd vmlinux EXPORT_SYMBOL +0xc9481888 page_readlink vmlinux EXPORT_SYMBOL +0x07bf29cd get_cached_msi_msg vmlinux EXPORT_SYMBOL_GPL +0x2c635527 arch_invalidate_pmem vmlinux EXPORT_SYMBOL_GPL +0x6dc8c5d5 rdma_set_cq_moderation drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x29caafea kvm_vcpu_gfn_to_pfn arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x48c6de99 kvm_vcpu_gfn_to_hva arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x69f55901 inet_frag_reasm_prepare vmlinux EXPORT_SYMBOL +0x97431fc6 dma_fence_chain_init vmlinux EXPORT_SYMBOL +0xe643e319 virtqueue_kick vmlinux EXPORT_SYMBOL_GPL +0x1dd571e6 fb_copy_cmap vmlinux EXPORT_SYMBOL +0xd9d952d1 crypto_aes_sbox vmlinux EXPORT_SYMBOL +0x27639220 blk_verify_command vmlinux EXPORT_SYMBOL +0x2d39b0a7 kstrdup vmlinux EXPORT_SYMBOL +0x026addf6 freezing_slow_path vmlinux EXPORT_SYMBOL +0x9eab8d85 _raw_write_lock_irq vmlinux EXPORT_SYMBOL +0x211dcf13 mlx4_SET_PORT_user_mac drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL +0x61ae1d2d xas_pause vmlinux EXPORT_SYMBOL_GPL +0xcf4fdd4d _atomic_dec_and_lock vmlinux EXPORT_SYMBOL +0x68f37e9f __tracepoint_neigh_update vmlinux EXPORT_SYMBOL_GPL +0xd117293f add_dma_domain vmlinux EXPORT_SYMBOL_GPL +0x5bcc367f fcoe_libfc_config vmlinux EXPORT_SYMBOL_GPL +0x3ea98419 ata_bmdma_port_ops vmlinux EXPORT_SYMBOL_GPL +0x2bf5f473 ata_host_alloc vmlinux EXPORT_SYMBOL_GPL +0x1ba071aa is_software_node vmlinux EXPORT_SYMBOL_GPL +0xf532ebf5 blkg_print_stat_bytes vmlinux EXPORT_SYMBOL_GPL +0x7984eefc key_update vmlinux EXPORT_SYMBOL +0x550ce709 pat_enabled vmlinux EXPORT_SYMBOL_GPL +0xf6678db7 ib_get_cached_subnet_prefix drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa8b4d1d5 l3mdev_master_ifindex_rcu vmlinux EXPORT_SYMBOL_GPL +0x916e7c95 xfrm_register_km vmlinux EXPORT_SYMBOL +0xb20dc818 iscsi_conn_bind vmlinux EXPORT_SYMBOL_GPL +0xf04cb3c7 __generic_fsdax_supported vmlinux EXPORT_SYMBOL_GPL +0xe5b2d952 device_create_with_groups vmlinux EXPORT_SYMBOL_GPL +0xfbead616 virtqueue_get_vring_size vmlinux EXPORT_SYMBOL_GPL +0xab781570 fb_get_options vmlinux EXPORT_SYMBOL +0xa5be1d0b phy_init vmlinux EXPORT_SYMBOL_GPL +0xfb384d37 kasprintf vmlinux EXPORT_SYMBOL +0xb3089d09 debugfs_create_automount vmlinux EXPORT_SYMBOL +0x0371e892 simple_rename vmlinux EXPORT_SYMBOL +0x751ead31 vfs_getxattr vmlinux EXPORT_SYMBOL_GPL +0xddb76dd8 vfs_setxattr vmlinux EXPORT_SYMBOL_GPL +0x4205e25b trace_seq_path vmlinux EXPORT_SYMBOL_GPL +0xd6583add sched_setattr vmlinux EXPORT_SYMBOL_GPL +0xb62c133b kernel_param_unlock vmlinux EXPORT_SYMBOL +0xb2afda33 iw_cm_reject drivers/infiniband/core/iw_cm EXPORT_SYMBOL +0x81b9c102 amd64_get_dram_hole_info drivers/edac/amd64_edac_mod EXPORT_SYMBOL_GPL +0x850bb6db devlink_health_reporter_destroy vmlinux EXPORT_SYMBOL_GPL +0x4671028d sock_init_data vmlinux EXPORT_SYMBOL +0xaaa7d91f ata_host_get vmlinux EXPORT_SYMBOL_GPL +0xc2424641 agp3_generic_cleanup vmlinux EXPORT_SYMBOL +0x2c2eb31a tty_set_termios vmlinux EXPORT_SYMBOL_GPL +0x130afd75 acpi_get_sleep_type_data vmlinux EXPORT_SYMBOL +0x3a536bd7 ring_buffer_read_finish vmlinux EXPORT_SYMBOL_GPL +0xe02eb6d0 ring_buffer_commit_overrun_cpu vmlinux EXPORT_SYMBOL_GPL +0xdbf17652 _raw_spin_lock vmlinux EXPORT_SYMBOL +0x44214bba mlx4_vf_smi_enabled drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0xbb5d31c7 cxgbi_iscsi_cleanup drivers/scsi/cxgbi/libcxgbi EXPORT_SYMBOL_GPL +0xe2e88a4a dlm_print_one_lock fs/ocfs2/dlm/ocfs2_dlm EXPORT_SYMBOL_GPL +0xe0399917 rtnl_register_module vmlinux EXPORT_SYMBOL_GPL +0x5023877e put_cmsg_scm_timestamping64 vmlinux EXPORT_SYMBOL +0xf5e03630 class_dev_iter_init vmlinux EXPORT_SYMBOL_GPL +0x74936c2f clk_register_fixed_rate vmlinux EXPORT_SYMBOL_GPL +0xe62f5a90 pci_get_slot vmlinux EXPORT_SYMBOL +0xbaae42cb devm_gen_pool_create vmlinux EXPORT_SYMBOL +0xe419bc99 iowrite32be vmlinux EXPORT_SYMBOL +0x26f8f0b8 iowrite16be vmlinux EXPORT_SYMBOL +0xff87cd18 lockref_get_not_dead vmlinux EXPORT_SYMBOL +0x4050512f blk_queue_flag_set vmlinux EXPORT_SYMBOL +0xd60f639e perf_aux_output_skip vmlinux EXPORT_SYMBOL_GPL +0xbf1e9ae4 task_cgroup_path vmlinux EXPORT_SYMBOL_GPL +0xf6f36dac module_disable_ro vmlinux EXPORT_SYMBOL_GPL +0xefe4f679 ZSTD_CCtxWorkspaceBound lib/zstd/zstd_compress EXPORT_SYMBOL +0xb9230d6d inet_diag_msg_attrs_fill net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0xf18711d4 nf_ct_helper_expectfn_register net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x6eec7036 vhost_log_access_ok drivers/vhost/vhost EXPORT_SYMBOL_GPL +0xb1a8de49 ib_find_pkey drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x99dd289c ib_dealloc_pd_user drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xfeea43d7 vfio_register_iommu_driver drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x9dbda053 km_query vmlinux EXPORT_SYMBOL +0x286c7cd5 inet_release vmlinux EXPORT_SYMBOL +0x560b1580 alloc_dca_provider vmlinux EXPORT_SYMBOL_GPL +0x7d964b77 mpt_resume vmlinux EXPORT_SYMBOL +0xe163009d ahci_platform_get_resources vmlinux EXPORT_SYMBOL_GPL +0x6a2d581f jbd2_journal_force_commit vmlinux EXPORT_SYMBOL +0x1622512d xprt_alloc net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x3da980e6 rdma_addr_cancel drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xa83588eb dm_rh_recovery_end drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0xa8a5afa3 bch_btree_sort_partial drivers/md/bcache/bcache EXPORT_SYMBOL +0xa2bbc4da usb_stor_CB_transport drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL +0xa898e544 drm_connector_init drivers/gpu/drm/drm EXPORT_SYMBOL +0x53ec5d40 phy_read_paged vmlinux EXPORT_SYMBOL +0x9fce80db fb_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL +0x8c8569cb kstrtoint vmlinux EXPORT_SYMBOL +0x91351a47 blk_mq_sched_try_insert_merge vmlinux EXPORT_SYMBOL_GPL +0x8a6633a4 crypto_alloc_akcipher vmlinux EXPORT_SYMBOL_GPL +0x110666f5 crypto_alloc_skcipher vmlinux EXPORT_SYMBOL_GPL +0x7a2f6915 get_tree_single vmlinux EXPORT_SYMBOL +0x434c0ba4 call_srcu vmlinux EXPORT_SYMBOL_GPL +0x60a634c4 vfio_info_cap_add drivers/vfio/vfio EXPORT_SYMBOL_GPL +0x60443957 mdio45_probe drivers/net/mdio EXPORT_SYMBOL +0x1f93326b mlxsw_core_event_listener_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0x0cc48678 current_vcpu arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0x8a240bff __xas_next vmlinux EXPORT_SYMBOL_GPL +0xfda5f96e cpufreq_get_policy vmlinux EXPORT_SYMBOL +0xdd805159 ioc_list vmlinux EXPORT_SYMBOL +0x89b3ade2 sha224_final vmlinux EXPORT_SYMBOL +0x9c593c4c dquot_set_dqblk vmlinux EXPORT_SYMBOL +0x5e12ecc1 __tracepoint_rpm_return_int vmlinux EXPORT_SYMBOL_GPL +0x8f6cee77 __round_jiffies_relative vmlinux EXPORT_SYMBOL_GPL +0x8f0748af rcu_expedite_gp vmlinux EXPORT_SYMBOL_GPL +0x5f2da8c4 check_tsc_unstable vmlinux EXPORT_SYMBOL_GPL +0x66be9f2d svc_pool_stats_open net/sunrpc/sunrpc EXPORT_SYMBOL +0x56b51a28 rdma_nl_unicast_wait drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x58bcce9a mlx5_core_destroy_rqt drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0xb11d37b1 mlx5_core_destroy_dct drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0xe11895c1 vmci_event_unsubscribe drivers/misc/vmw_vmci/vmw_vmci EXPORT_SYMBOL_GPL +0xb5b813d4 inet_twsk_alloc vmlinux EXPORT_SYMBOL_GPL +0x16da95dd __serio_register_driver vmlinux EXPORT_SYMBOL +0x29951712 pmem_sector_size vmlinux EXPORT_SYMBOL +0x981f5bbd nvdimm_bus_check_dimm_count vmlinux EXPORT_SYMBOL_GPL +0xf067257c register_memory_isolate_notifier vmlinux EXPORT_SYMBOL +0x8880f24a dev_pm_qos_expose_latency_tolerance vmlinux EXPORT_SYMBOL_GPL +0x48476bcb intel_gtt_insert_page vmlinux EXPORT_SYMBOL +0x1c057347 tty_put_char vmlinux EXPORT_SYMBOL_GPL +0x09501b7f acpi_release_memory vmlinux EXPORT_SYMBOL_GPL +0x26c622ee percpu_ref_switch_to_percpu vmlinux EXPORT_SYMBOL_GPL +0x329aecd2 crypto_register_ahashes vmlinux EXPORT_SYMBOL_GPL +0x037a0cba kfree vmlinux EXPORT_SYMBOL +0x5c724709 memory_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x1936c1a2 __rt_mutex_init vmlinux EXPORT_SYMBOL_GPL +0xd9d8fd16 register_restart_handler vmlinux EXPORT_SYMBOL +0x8e2d1236 ex_handler_wrmsr_unsafe vmlinux EXPORT_SYMBOL +0x5a98e821 ib_modify_device drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x67e37067 target_show_cmd drivers/target/target_core_mod EXPORT_SYMBOL +0xe0ea0824 mlx5_lag_query_cong_counters drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL +0x12b908eb vlan_dev_real_dev vmlinux EXPORT_SYMBOL +0xf7dd4bfe xfrm_state_delete vmlinux EXPORT_SYMBOL +0x321dab10 nf_register_sockopt vmlinux EXPORT_SYMBOL +0xe3fe4e7c sock_kmalloc vmlinux EXPORT_SYMBOL +0x7d6358ca genphy_c45_read_mdix vmlinux EXPORT_SYMBOL_GPL +0x80988059 sas_target_alloc vmlinux EXPORT_SYMBOL_GPL +0x7cd7d6be __tracepoint_iscsi_dbg_conn vmlinux EXPORT_SYMBOL_GPL +0xbe243c0d amd_iommu_domain_set_gcr3 vmlinux EXPORT_SYMBOL +0xe8fbf4fa __alloc_bucket_spinlocks vmlinux EXPORT_SYMBOL +0xfa600a92 housekeeping_test_cpu vmlinux EXPORT_SYMBOL_GPL +0xf881cecd load_fixmap_gdt vmlinux EXPORT_SYMBOL_GPL +0x9fefa3cb ceph_calc_file_object_mapping net/ceph/libceph EXPORT_SYMBOL +0x4845f474 l2tp_tunnel_free net/l2tp/l2tp_core EXPORT_SYMBOL +0xe94712da xprt_destroy_backchannel net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x4b5445a4 rdma_init_netdev drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x7286e3ca mlx5_buf_free drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x6275e8d2 mlx4_buf_free drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x107742a9 drm_get_subpixel_order_name drivers/gpu/drm/drm EXPORT_SYMBOL +0x0712e21d drm_edid_get_monitor_name drivers/gpu/drm/drm EXPORT_SYMBOL +0x717acc3b drm_gem_lock_reservations drivers/gpu/drm/drm EXPORT_SYMBOL +0x9939eba0 backlight_unregister_notifier drivers/video/backlight/backlight EXPORT_SYMBOL +0xda0ba367 nfs_free_inode fs/nfs/nfs EXPORT_SYMBOL_GPL +0xecdcabd2 copy_user_generic_unrolled vmlinux EXPORT_SYMBOL +0x06106aba br_multicast_has_querier_anywhere vmlinux EXPORT_SYMBOL_GPL +0x7f01ab8e ip_tunnel_get_stats64 vmlinux EXPORT_SYMBOL_GPL +0x6f3a2e83 dax_region_put vmlinux EXPORT_SYMBOL_GPL +0x42fee84d acpi_dma_controller_register vmlinux EXPORT_SYMBOL_GPL +0x766cffe3 pci_lost_interrupt vmlinux EXPORT_SYMBOL +0xe7d4daac seq_list_next vmlinux EXPORT_SYMBOL +0xc41ff0e9 drm_mode_put_tile_group drivers/gpu/drm/drm EXPORT_SYMBOL +0xfd7adc4f nfs_alloc_fattr fs/nfs/nfs EXPORT_SYMBOL_GPL +0xe02c9c92 __xa_erase vmlinux EXPORT_SYMBOL +0x2fe252cc unregister_inet6addr_notifier vmlinux EXPORT_SYMBOL +0x77e5c6b0 devm_hwmon_device_unregister vmlinux EXPORT_SYMBOL_GPL +0x7704383d tpm_chip_register vmlinux EXPORT_SYMBOL_GPL +0xcf8dc068 virtqueue_get_buf_ctx vmlinux EXPORT_SYMBOL_GPL +0x399ad043 __kfifo_dma_out_finish_r vmlinux EXPORT_SYMBOL +0x9aa71c2a efi_query_variable_store vmlinux EXPORT_SYMBOL_GPL +0xa5b4f1f4 set_pages_array_wb vmlinux EXPORT_SYMBOL +0x686fec98 set_pages_array_wt vmlinux EXPORT_SYMBOL_GPL +0xab150296 set_pages_array_wc vmlinux EXPORT_SYMBOL +0xecafdb54 ip6_tnl_rcv net/ipv6/ip6_tunnel EXPORT_SYMBOL +0x18a2e29f nf_tproxy_laddr6 net/ipv6/netfilter/nf_tproxy_ipv6 EXPORT_SYMBOL_GPL +0x9a66e7c5 mlxsw_core_rx_listener_unregister drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xdaa295db drm_hdmi_avi_infoframe_colorspace drivers/gpu/drm/drm EXPORT_SYMBOL +0x18268bfd strp_done vmlinux EXPORT_SYMBOL_GPL +0x70c8b8f2 ipv6_chk_addr vmlinux EXPORT_SYMBOL +0x4fff80f4 neigh_xmit vmlinux EXPORT_SYMBOL +0xb73713d7 nvmem_add_cell_lookups vmlinux EXPORT_SYMBOL_GPL +0x74519862 dev_pm_qos_flags vmlinux EXPORT_SYMBOL_GPL +0x5cd7ee0a __devm_alloc_percpu vmlinux EXPORT_SYMBOL_GPL +0xd8f712d0 devm_kstrdup vmlinux EXPORT_SYMBOL_GPL +0xd6721954 amd_iommu_device_info vmlinux EXPORT_SYMBOL +0x1e3ac82d uart_suspend_port vmlinux EXPORT_SYMBOL +0xab3697e4 irq_poll_init vmlinux EXPORT_SYMBOL +0x049a1006 __blkdev_driver_ioctl vmlinux EXPORT_SYMBOL_GPL +0x4a16ae85 blk_alloc_queue_node vmlinux EXPORT_SYMBOL +0xe5dc9245 ceph_parse_options net/ceph/libceph EXPORT_SYMBOL +0x4afedab1 xprtiod_workqueue net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x8e88c38b nf_connlabels_get net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0xf92b8a3d dm_rh_get_region_size drivers/md/dm-region-hash EXPORT_SYMBOL_GPL +0x5fe3a147 kvm_hv_get_assist_page arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xe75bede8 hid_field_extract vmlinux EXPORT_SYMBOL_GPL +0x3824b2ef scsi_device_resume vmlinux EXPORT_SYMBOL +0xd96393d4 ata_bmdma_qc_issue vmlinux EXPORT_SYMBOL_GPL +0xdaf03cf1 platform_get_irq_byname_optional vmlinux EXPORT_SYMBOL_GPL +0x932b6202 queue_iova vmlinux EXPORT_SYMBOL_GPL +0x66452a5b sbitmap_show vmlinux EXPORT_SYMBOL_GPL +0xdd8585d7 kernel_read_file_from_path vmlinux EXPORT_SYMBOL_GPL +0x561eb75c ib_dealloc_xrcd drivers/infiniband/core/ib_core EXPORT_SYMBOL +0xad8095ed ipv6_find_tlv vmlinux EXPORT_SYMBOL_GPL +0x7fd3600b neigh_changeaddr vmlinux EXPORT_SYMBOL +0xb3b05944 devm_free_percpu vmlinux EXPORT_SYMBOL_GPL +0x69c94d74 do_unregister_con_driver vmlinux EXPORT_SYMBOL_GPL +0xa120d33c tty_unregister_ldisc vmlinux EXPORT_SYMBOL +0xcf33b5a5 tty_standard_install vmlinux EXPORT_SYMBOL_GPL +0x710c73b6 crypto_unregister_notifier vmlinux EXPORT_SYMBOL_GPL +0x79df4426 invalidate_inode_pages2 vmlinux EXPORT_SYMBOL_GPL +0x97e7f902 trace_vbprintk vmlinux EXPORT_SYMBOL_GPL +0x216de4e1 rcu_get_gp_kthreads_prio vmlinux EXPORT_SYMBOL_GPL +0x5dbcfa4f boot_cpu_physical_apicid vmlinux EXPORT_SYMBOL_GPL +0x9a6451dd udp_sock_create4 net/ipv4/udp_tunnel EXPORT_SYMBOL +0x73f64268 ipt_do_table net/ipv4/netfilter/ip_tables EXPORT_SYMBOL +0x53603447 __hw_addr_ref_sync_dev vmlinux EXPORT_SYMBOL +0xb12df95e generic_xdp_tx vmlinux EXPORT_SYMBOL_GPL +0xaf923f60 dma_resv_get_fences_rcu vmlinux EXPORT_SYMBOL_GPL +0xc3114ac3 nvdimm_namespace_detach_btt vmlinux EXPORT_SYMBOL +0x141036c4 nvdimm_namespace_attach_btt vmlinux EXPORT_SYMBOL +0xe07fe508 pnp_possible_config vmlinux EXPORT_SYMBOL +0xbf62d313 kmem_cache_destroy vmlinux EXPORT_SYMBOL +0xb5c9c351 add_to_page_cache_locked vmlinux EXPORT_SYMBOL +0x4e109192 ring_buffer_entries vmlinux EXPORT_SYMBOL_GPL +0x7da4db61 nft_set_gc_batch_alloc net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0xd7ab2c0c speedstep_detect_processor drivers/cpufreq/speedstep-lib EXPORT_SYMBOL_GPL +0x8dc217f2 drm_mode_object_find drivers/gpu/drm/drm EXPORT_SYMBOL +0xb0b05ff5 drm_clflush_sg drivers/gpu/drm/drm EXPORT_SYMBOL +0x7be8b811 nfs_request_remove_commit_list fs/nfs/nfs EXPORT_SYMBOL_GPL +0x3baef836 alloc_skb_for_msg vmlinux EXPORT_SYMBOL_GPL +0x9de5d769 is_nd_dax vmlinux EXPORT_SYMBOL +0x64308cb2 blkcg_root_css vmlinux EXPORT_SYMBOL_GPL +0x6c64771d proc_create_net_data vmlinux EXPORT_SYMBOL_GPL +0xa4c48917 d_instantiate_anon vmlinux EXPORT_SYMBOL +0xd8cbb3b6 rpc_proc_register net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xd754f92a svc_set_num_threads_sync net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xf2a36612 pptp_msg_name net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL +0x3315cca6 nf_connlabels_replace net/netfilter/nf_conntrack EXPORT_SYMBOL_GPL +0x89783bda dm_array_cursor_end drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xe68d70a9 __tracepoint_bcache_write drivers/md/bcache/bcache EXPORT_SYMBOL_GPL +0x9333ffcd drm_atomic_helper_plane_destroy_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x91837d3c nfs_fs_mount_common fs/nfs/nfs EXPORT_SYMBOL_GPL +0xa532b8d5 nf_ct_get_tuple_skb vmlinux EXPORT_SYMBOL +0xcb2bfe2b nvmem_register_notifier vmlinux EXPORT_SYMBOL_GPL +0x28438d4f phy_mii_ioctl vmlinux EXPORT_SYMBOL +0xf167cb7a fcoe_wwn_to_str vmlinux EXPORT_SYMBOL_GPL +0xecde6bf7 vring_new_virtqueue vmlinux EXPORT_SYMBOL_GPL +0xa8caa845 clk_bulk_put_all vmlinux EXPORT_SYMBOL +0xd2e2a9d0 hdmi_spd_infoframe_pack_only vmlinux EXPORT_SYMBOL +0x18728552 sprint_OID vmlinux EXPORT_SYMBOL_GPL +0xfc201b66 sprint_oid vmlinux EXPORT_SYMBOL_GPL +0xc71741f3 acomp_request_free vmlinux EXPORT_SYMBOL_GPL +0x26d24cb8 vm_event_states vmlinux EXPORT_SYMBOL +0x5dcb432b __module_get vmlinux EXPORT_SYMBOL +0xfd94814e complete_all vmlinux EXPORT_SYMBOL +0x3c1902d9 __release_region vmlinux EXPORT_SYMBOL +0x1d957c57 r5c_journal_mode_set vmlinux EXPORT_SYMBOL +0x5ed040b0 pm_set_vt_switch vmlinux EXPORT_SYMBOL +0xed34ebbc acpi_any_gpe_status_set vmlinux EXPORT_SYMBOL +0xa859468e bdi_register vmlinux EXPORT_SYMBOL +0xcf54ea93 async_unregister_domain vmlinux EXPORT_SYMBOL_GPL +0xf40701ab ceph_object_locator_to_pg net/ceph/libceph EXPORT_SYMBOL +0xfb57975a xprt_alloc_slot net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x2b785e72 rdma_destroy_id drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x4770f43d rdma_destroy_qp drivers/infiniband/core/rdma_cm EXPORT_SYMBOL +0x6a75720b vfio_virqfd_disable drivers/vfio/vfio_virqfd EXPORT_SYMBOL_GPL +0xc248b707 iscsit_add_cmd_to_immediate_queue drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x43a9b87e mlxsw_afa_block_terminate drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xa9a4c775 drm_panel_get_modes drivers/gpu/drm/drm EXPORT_SYMBOL +0x6c39fe6e __drm_atomic_helper_private_obj_duplicate_state drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0xf2b3fbc6 nfs_pgheader_init fs/nfs/nfs EXPORT_SYMBOL_GPL +0x83c52fba xfrm4_protocol_init vmlinux EXPORT_SYMBOL +0x14640a9b __iptunnel_pull_header vmlinux EXPORT_SYMBOL_GPL +0x1dbb1660 inet_csk_clear_xmit_timers vmlinux EXPORT_SYMBOL +0x64252865 reuseport_alloc vmlinux EXPORT_SYMBOL +0x7e907ce2 nvdimm_bus_unregister vmlinux EXPORT_SYMBOL_GPL +0xe10cd6ad erst_get_record_id_begin vmlinux EXPORT_SYMBOL_GPL +0x7129e5f8 hex_asc vmlinux EXPORT_SYMBOL +0x6708386a fat_time_unix2fat vmlinux EXPORT_SYMBOL_GPL +0x80c748ac d_tmpfile vmlinux EXPORT_SYMBOL +0xa20e183d mds_idle_clear vmlinux EXPORT_SYMBOL_GPL +0xa6dff490 nf_nat_follow_master net/netfilter/nf_nat EXPORT_SYMBOL +0x184621a4 ib_get_rdma_header_version drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x2c68ced3 mlxsw_core_read_frc_h drivers/net/ethernet/mellanox/mlxsw/mlxsw_core EXPORT_SYMBOL +0xa3e1ef7f mlx5_query_hca_vport_node_guid drivers/net/ethernet/mellanox/mlx5/core/mlx5_core EXPORT_SYMBOL_GPL +0x002a753c drm_modeset_lock_single_interruptible drivers/gpu/drm/drm EXPORT_SYMBOL +0xc1660e2b ocfs2_stack_glue_register fs/ocfs2/ocfs2_stackglue EXPORT_SYMBOL_GPL +0xb2f3dded fuse_dev_release fs/fuse/fuse EXPORT_SYMBOL_GPL +0x96b29254 strncasecmp vmlinux EXPORT_SYMBOL +0xe7b6b9a9 srp_rport_add vmlinux EXPORT_SYMBOL_GPL +0x66f6d4e6 fc_remote_port_rolechg vmlinux EXPORT_SYMBOL +0xf8f3a0fb ata_ratelimit vmlinux EXPORT_SYMBOL_GPL +0x2248aea2 nvme_init_identify vmlinux EXPORT_SYMBOL_GPL +0x5377041f agp_copy_info vmlinux EXPORT_SYMBOL +0x23e4fb04 agp_free_memory vmlinux EXPORT_SYMBOL +0x5a0b2dbe tty_get_pgrp vmlinux EXPORT_SYMBOL_GPL +0x8caf9305 uuid_is_valid vmlinux EXPORT_SYMBOL +0x52c8e2c8 blkcg_dkstats_find vmlinux EXPORT_SYMBOL_GPL +0x9520f26e unregister_asymmetric_key_parser vmlinux EXPORT_SYMBOL_GPL +0xda9b9638 default_llseek vmlinux EXPORT_SYMBOL +0xdeb32765 ring_buffer_oldest_event_ts vmlinux EXPORT_SYMBOL_GPL +0x3c427f67 cpu_die_map vmlinux EXPORT_SYMBOL +0x77e5f76d svc_bind net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0xcfee5a2e cm_class drivers/infiniband/core/ib_cm EXPORT_SYMBOL +0xb33c2252 kvm_mmu_load arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xa0cd0d83 tcp_read_sock vmlinux EXPORT_SYMBOL +0x76199f4a __flow_indr_block_cb_unregister vmlinux EXPORT_SYMBOL_GPL +0x5b841dba scsi_host_busy vmlinux EXPORT_SYMBOL +0x051d58e8 dma_fence_wait_any_timeout vmlinux EXPORT_SYMBOL +0xb2a9756b virtqueue_get_desc_addr vmlinux EXPORT_SYMBOL_GPL +0x95c37046 pci_reset_pri vmlinux EXPORT_SYMBOL_GPL +0xb89b6e6b guid_parse vmlinux EXPORT_SYMBOL +0xa9e05660 io_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL +0x4eb6842a rpc_peeraddr net/sunrpc/sunrpc EXPORT_SYMBOL_GPL +0x89f767d2 nft_obj_lookup net/netfilter/nf_tables EXPORT_SYMBOL_GPL +0x3e3e88eb rdma_link_register drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x6d340cfb drm_self_refresh_helper_cleanup drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x64e6d830 nfs_put_lock_context fs/nfs/nfs EXPORT_SYMBOL_GPL +0xba8ed7e9 cpuidle_register_driver vmlinux EXPORT_SYMBOL_GPL +0x096556d9 rdev_clear_badblocks vmlinux EXPORT_SYMBOL_GPL +0xc932cca9 scsi_eh_prep_cmnd vmlinux EXPORT_SYMBOL +0xe0aeed36 tty_port_register_device_attr vmlinux EXPORT_SYMBOL_GPL +0xb8aafa35 pci_set_cacheline_size vmlinux EXPORT_SYMBOL_GPL +0x373714c1 copy_page_from_iter vmlinux EXPORT_SYMBOL +0x815b5dd4 match_octal vmlinux EXPORT_SYMBOL +0x8576c883 cdev_device_del vmlinux EXPORT_SYMBOL +0xf430ba59 cdev_device_add vmlinux EXPORT_SYMBOL +0x6081c558 bch_btree_keys_free drivers/md/bcache/bcache EXPORT_SYMBOL +0xe276ffec i2c_smbus_write_byte drivers/i2c/i2c-core EXPORT_SYMBOL +0xf0767f5e xt_register_targets vmlinux EXPORT_SYMBOL +0x78a73ec3 scsi_register_driver vmlinux EXPORT_SYMBOL +0xacaa4c72 dma_fence_match_context vmlinux EXPORT_SYMBOL +0x827b66d9 unregister_virtio_driver vmlinux EXPORT_SYMBOL_GPL +0x148f46e3 hdmi_avi_infoframe_init vmlinux EXPORT_SYMBOL +0xb15b4109 crc32c vmlinux EXPORT_SYMBOL +0xea2a7129 iomap_dio_rw vmlinux EXPORT_SYMBOL_GPL +0x78292861 vfs_fsync vmlinux EXPORT_SYMBOL +0x2bff3ca6 poll_freewait vmlinux EXPORT_SYMBOL +0x40c7247c si_meminfo vmlinux EXPORT_SYMBOL +0xd1b9a720 kthread_create_on_node vmlinux EXPORT_SYMBOL +0x9e64fbfe rtc_cmos_read vmlinux EXPORT_SYMBOL +0xdd2a5941 vhost_chr_poll drivers/vhost/vhost EXPORT_SYMBOL +0x3a905c56 iscsit_free_cmd drivers/target/iscsi/iscsi_target_mod EXPORT_SYMBOL +0x7890d535 dm_cache_policy_get_name drivers/md/dm-cache EXPORT_SYMBOL_GPL +0xe36d45b6 crypto_finalize_akcipher_request crypto/crypto_engine EXPORT_SYMBOL_GPL +0xeda89d4e kvm_unmap_gfn arch/x86/kvm/kvm EXPORT_SYMBOL_GPL +0xd2237016 radix_tree_delete_item vmlinux EXPORT_SYMBOL +0xb1647fc2 devlink_info_version_running_put vmlinux EXPORT_SYMBOL_GPL +0xe4e6a5ef component_add vmlinux EXPORT_SYMBOL_GPL +0xa8054643 iommu_domain_set_attr vmlinux EXPORT_SYMBOL_GPL +0x894bb60a iommu_domain_get_attr vmlinux EXPORT_SYMBOL_GPL +0x9afcf48a _copy_from_iter_full_nocache vmlinux EXPORT_SYMBOL +0x8d62b23a blkg_print_stat_bytes_recursive vmlinux EXPORT_SYMBOL_GPL +0x5b6c00e6 xor_blocks vmlinux EXPORT_SYMBOL +0xe0f61954 af_alg_free_resources vmlinux EXPORT_SYMBOL_GPL +0x5e95b1cd current_umask vmlinux EXPORT_SYMBOL +0x6ae33400 get_fs_type vmlinux EXPORT_SYMBOL +0xfa85b609 ref_module vmlinux EXPORT_SYMBOL_GPL +0x40d59096 unregister_restart_handler vmlinux EXPORT_SYMBOL +0x6ea7d92f nf_nat_pptp_hook_outbound net/netfilter/nf_conntrack_pptp EXPORT_SYMBOL_GPL +0x0fb815dc mlx4_alloc_hwq_res drivers/net/ethernet/mellanox/mlx4/mlx4_core EXPORT_SYMBOL_GPL +0x29f078d1 drm_mode_legacy_fb_format drivers/gpu/drm/drm EXPORT_SYMBOL +0x45151682 usb_hub_claim_port vmlinux EXPORT_SYMBOL_GPL +0x16ec5bba tty_driver_kref_put vmlinux EXPORT_SYMBOL +0x6c389761 acpi_bus_get_private_data vmlinux EXPORT_SYMBOL_GPL +0x80d5e57a mpi_free vmlinux EXPORT_SYMBOL_GPL +0x1449b4ea dquot_acquire vmlinux EXPORT_SYMBOL +0x3c3fce39 __local_bh_enable_ip vmlinux EXPORT_SYMBOL +0x8ebcc5fb drm_atomic_get_private_obj_state drivers/gpu/drm/drm EXPORT_SYMBOL +0x765dc392 drm_dp_mst_topology_state_funcs drivers/gpu/drm/drm_kms_helper EXPORT_SYMBOL +0x225f1075 xfrm_local_error vmlinux EXPORT_SYMBOL_GPL +0xe1944c6b sock_prot_inuse_get vmlinux EXPORT_SYMBOL_GPL +0x1f1356cc usb_wakeup_notification vmlinux EXPORT_SYMBOL_GPL +0xf92971ce mpt_send_handshake_request vmlinux EXPORT_SYMBOL +0x5b32e347 phy_support_asym_pause vmlinux EXPORT_SYMBOL +0x2c612f82 fc_exch_recv vmlinux EXPORT_SYMBOL +0x998d79d6 x509_decode_time vmlinux EXPORT_SYMBOL_GPL +0xcc36bd39 generic_write_end vmlinux EXPORT_SYMBOL +0xbdb4aa0e seq_file_path vmlinux EXPORT_SYMBOL +0x67955ce6 profile_hits vmlinux EXPORT_SYMBOL_GPL +0x27880942 devm_request_threaded_irq vmlinux EXPORT_SYMBOL +0x52b2da04 flow_offload_free net/netfilter/nf_flow_table EXPORT_SYMBOL_GPL +0x37cf991a nat_t120_hook net/netfilter/nf_conntrack_h323 EXPORT_SYMBOL_GPL +0x6e1e3821 dm_array_walk drivers/md/persistent-data/dm-persistent-data EXPORT_SYMBOL_GPL +0xd61772c6 udp6_seq_ops vmlinux EXPORT_SYMBOL +0x568aa691 mini_qdisc_pair_init vmlinux EXPORT_SYMBOL +0xca385a63 bpf_prog_create vmlinux EXPORT_SYMBOL_GPL +0x2a4cf402 property_entries_free vmlinux EXPORT_SYMBOL_GPL +0x10f40912 security_inode_copy_up vmlinux EXPORT_SYMBOL +0x7a4497db kzfree vmlinux EXPORT_SYMBOL +0x352ec68b bpf_offload_dev_destroy vmlinux EXPORT_SYMBOL_GPL +0xc9c37d51 ceph_cls_set_cookie net/ceph/libceph EXPORT_SYMBOL +0x7f2d35df inet_sk_diag_fill net/ipv4/inet_diag EXPORT_SYMBOL_GPL +0x5129e114 arpt_unregister_table net/ipv4/netfilter/arp_tables EXPORT_SYMBOL +0x2513572c rdma_link_unregister drivers/infiniband/core/ib_core EXPORT_SYMBOL +0x4a49e9a8 tcp_ca_openreq_child vmlinux EXPORT_SYMBOL_GPL +0x33b0409d bpf_prog_destroy vmlinux EXPORT_SYMBOL_GPL +0x052fed31 devm_remove_action vmlinux EXPORT_SYMBOL_GPL +0xd2aa4f73 devm_clk_put vmlinux EXPORT_SYMBOL +0x21be37e1 hdmi_avi_infoframe_check vmlinux EXPORT_SYMBOL +0xa66ebcc8 pci_enable_atomic_ops_to_root vmlinux EXPORT_SYMBOL +0xa84ce9e0 crypto_aes_inv_sbox vmlinux EXPORT_SYMBOL +0x36f25335 bio_copy_data_iter vmlinux EXPORT_SYMBOL +0xae2e3812 inode_insert5 vmlinux EXPORT_SYMBOL +0x2d7eb406 deactivate_super vmlinux EXPORT_SYMBOL +0x39284054 vmap vmlinux EXPORT_SYMBOL +0xe92c0692 ww_mutex_lock_interruptible vmlinux EXPORT_SYMBOL diff --git a/package/default/config.default b/package/default/config.default deleted file mode 100644 index c592bfa82ed4..000000000000 --- a/package/default/config.default +++ /dev/null @@ -1,5077 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86 5.4.32-1 Kernel Configuration -# - -# -# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) -# -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=80301 -CONFIG_CLANG_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_HAS_ASM_GOTO=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="-tlinux4-0001" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -CONFIG_USELIB=y -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y -CONFIG_GENERIC_IRQ_RESERVATION_MODE=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -# end of IRQ subsystem - -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_ARCH_CLOCKSOURCE_INIT=y -CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -# end of Timers subsystem - -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_HAVE_SCHED_AVG_IRQ=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_PSI=y -CONFIG_PSI_DEFAULT_DISABLED=y -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -# end of RCU Subsystem - -CONFIG_BUILD_BIN2C=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=19 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y - -# -# Scheduler features -# -# CONFIG_UCLAMP_TASK is not set -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -# CONFIG_CGROUP_RDMA is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -# CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_SCHED_AUTOGROUP=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -# CONFIG_RD_BZIP2 is not set -CONFIG_RD_LZMA=y -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -CONFIG_RD_LZ4=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BPF=y -CONFIG_EXPERT=y -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_PRINTK_NMI=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_USERFAULTFD=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_RSEQ=y -# CONFIG_DEBUG_RSEQ is not set -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -# end of Kernel Performance Events And Counters - -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLUB_DEBUG=y -CONFIG_SLUB_MEMCG_SYSFS_ON=y -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_SLOB is not set -# CONFIG_SLAB_MERGE_DEFAULT is not set -# CONFIG_SLAB_FREELIST_RANDOM is not set -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set -CONFIG_SLUB_CPU_PARTIAL=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -# end of General setup - -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_FILTER_PGPROT=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DYNAMIC_PHYSICAL_MASK=y -CONFIG_PGTABLE_LEVELS=5 -CONFIG_CC_HAS_SANE_STACKPROTECTOR=y - -# -# Processor type and features -# -CONFIG_ZONE_DMA=y -CONFIG_SMP=y -CONFIG_X86_FEATURE_NAMES=y -CONFIG_X86_X2APIC=y -CONFIG_X86_MPPARSE=y -# CONFIG_GOLDFISH is not set -# CONFIG_RETPOLINE is not set -CONFIG_X86_CPU_RESCTRL=y -CONFIG_X86_EXTENDED_PLATFORM=y -CONFIG_X86_NUMACHIP=y -# CONFIG_X86_VSMP is not set -# CONFIG_X86_UV is not set -# CONFIG_X86_GOLDFISH is not set -# CONFIG_X86_INTEL_LPSS is not set -CONFIG_X86_AMD_PLATFORM_DEVICE=y -CONFIG_IOSF_MBI=m -# CONFIG_IOSF_MBI_DEBUG is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_DEBUG is not set -CONFIG_PARAVIRT_SPINLOCKS=y -# CONFIG_XEN is not set -CONFIG_KVM_GUEST=y -CONFIG_ARCH_CPUIDLE_HALTPOLL=y -CONFIG_PVH=y -# CONFIG_KVM_DEBUG_FS is not set -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_PARAVIRT_CLOCK=y -CONFIG_JAILHOUSE_GUEST=y -# CONFIG_ACRN_GUEST is not set -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -# CONFIG_PROCESSOR_SELECT is not set -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_HYGON=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_CPU_SUP_ZHAOXIN=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -CONFIG_GART_IOMMU=y -# CONFIG_CALGARY_IOMMU is not set -CONFIG_MAXSMP=y -CONFIG_NR_CPUS_RANGE_BEGIN=8192 -CONFIG_NR_CPUS_RANGE_END=8192 -CONFIG_NR_CPUS_DEFAULT=8192 -CONFIG_NR_CPUS=8192 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_MC_PRIO=y -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCELOG_LEGACY=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -CONFIG_X86_MCE_INJECT=m -CONFIG_X86_THERMAL_VECTOR=y - -# -# Performance monitoring -# -CONFIG_PERF_EVENTS_INTEL_UNCORE=y -CONFIG_PERF_EVENTS_INTEL_RAPL=y -CONFIG_PERF_EVENTS_INTEL_CSTATE=y -CONFIG_PERF_EVENTS_AMD_POWER=y -# end of Performance monitoring - -CONFIG_X86_16BIT=y -CONFIG_X86_ESPFIX64=y -CONFIG_X86_VSYSCALL_EMULATION=y -# CONFIG_I8K is not set -CONFIG_MICROCODE=y -CONFIG_MICROCODE_INTEL=y -CONFIG_MICROCODE_AMD=y -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -CONFIG_X86_5LEVEL=y -CONFIG_X86_DIRECT_GBPAGES=y -# CONFIG_X86_CPA_STATISTICS is not set -CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set -CONFIG_NUMA=y -CONFIG_AMD_NUMA=y -CONFIG_X86_64_ACPI_NUMA=y -CONFIG_NODES_SPAN_OTHER_NODES=y -# CONFIG_NUMA_EMU is not set -CONFIG_NODES_SHIFT=10 -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -# CONFIG_X86_PMEM_LEGACY is not set -CONFIG_X86_CHECK_BIOS_CORRUPTION=y -# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set -CONFIG_X86_RESERVE_LOW=64 -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_ARCH_RANDOM=y -CONFIG_X86_SMAP=y -CONFIG_X86_INTEL_UMIP=y -CONFIG_X86_INTEL_MPX=y -CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -CONFIG_X86_INTEL_TSX_MODE_OFF=y -# CONFIG_X86_INTEL_TSX_MODE_ON is not set -# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set -CONFIG_EFI=y -CONFIG_EFI_STUB=y -# CONFIG_EFI_MIXED is not set -CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_ARCH_HAS_KEXEC_PURGATORY=y -# CONFIG_KEXEC_SIG is not set -CONFIG_CRASH_DUMP=y -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -# CONFIG_RANDOMIZE_BASE is not set -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_DYNAMIC_MEMORY_LAYOUT=y -CONFIG_HOTPLUG_CPU=y -# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set -# CONFIG_DEBUG_HOTPLUG_CPU0 is not set -CONFIG_COMPAT_VDSO=y -CONFIG_LEGACY_VSYSCALL_EMULATE=y -# CONFIG_LEGACY_VSYSCALL_XONLY is not set -# CONFIG_LEGACY_VSYSCALL_NONE is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_MODIFY_LDT_SYSCALL=y -CONFIG_HAVE_LIVEPATCH=y -CONFIG_LIVEPATCH=y -# end of Processor type and features - -CONFIG_ARCH_HAS_ADD_PAGES=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y - -# -# Power management and ACPI options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -# CONFIG_SUSPEND_SKIP_SYNC is not set -# CONFIG_HIBERNATION is not set -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y -CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y -CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y -CONFIG_ACPI_LPIT=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_PROCFS_POWER=y -CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y -CONFIG_ACPI_EC_DEBUGFS=m -# CONFIG_ACPI_AC is not set -# CONFIG_ACPI_BATTERY is not set -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -# CONFIG_ACPI_FAN is not set -# CONFIG_ACPI_TAD is not set -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_CPU_FREQ_PSS=y -CONFIG_ACPI_PROCESSOR_CSTATE=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=y -CONFIG_ACPI_NUMA=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HOTPLUG_IOAPIC=y -CONFIG_ACPI_SBS=m -CONFIG_ACPI_HED=y -CONFIG_ACPI_CUSTOM_METHOD=m -CONFIG_ACPI_BGRT=y -# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set -CONFIG_ACPI_NFIT=y -# CONFIG_NFIT_SECURITY_DEBUG is not set -CONFIG_ACPI_HMAT=y -CONFIG_HAVE_ACPI_APEI=y -CONFIG_HAVE_ACPI_APEI_NMI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -CONFIG_ACPI_APEI_EINJ=m -CONFIG_ACPI_APEI_ERST_DEBUG=m -# CONFIG_DPTF_POWER is not set -CONFIG_ACPI_EXTLOG=m -CONFIG_ACPI_ADXL=y -# CONFIG_PMIC_OPREGION is not set -# CONFIG_ACPI_CONFIGFS is not set -CONFIG_X86_PM_TIMER=y -# CONFIG_SFI is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -# CONFIG_CPU_FREQ_STAT is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y - -# -# CPU frequency scaling drivers -# -CONFIG_X86_INTEL_PSTATE=y -CONFIG_X86_PCC_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ_CPB=y -CONFIG_X86_POWERNOW_K8=m -CONFIG_X86_AMD_FREQ_SENSITIVITY=m -CONFIG_X86_SPEEDSTEP_CENTRINO=m -CONFIG_X86_P4_CLOCKMOD=m - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=m -# end of CPU Frequency scaling - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set -CONFIG_HALTPOLL_CPUIDLE=y -# end of CPU Idle - -CONFIG_INTEL_IDLE=y -# end of Power management and ACPI options - -# -# Bus options (PCI etc.) -# -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_MMCONF_FAM10H=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -# CONFIG_ISA_BUS is not set -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -# CONFIG_X86_SYSFB is not set -# end of Bus options (PCI etc.) - -# -# Binary Emulations -# -CONFIG_IA32_EMULATION=y -# CONFIG_X86_X32 is not set -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -# end of Binary Emulations - -CONFIG_X86_DEV_DMA_OPS=y - -# -# Firmware Drivers -# -# CONFIG_EDD is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m -# CONFIG_FW_CFG_SYSFS is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=y -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=m -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_RUNTIME_MAP=y -# CONFIG_EFI_FAKE_MEMMAP is not set -CONFIG_EFI_RUNTIME_WRAPPERS=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_APPLE_PROPERTIES is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -# CONFIG_EFI_RCI2_TABLE is not set -# end of EFI (Extensible Firmware Interface) Support - -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_X86=y -CONFIG_EFI_EARLYCON=y - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_NO_POLL=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -# CONFIG_KVM_MMU_AUDIT is not set -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# General architecture-dependent options -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HOTPLUG_SMT=y -CONFIG_OPROFILE=m -CONFIG_OPROFILE_EVENT_MULTIPLEX=y -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -CONFIG_KPROBES=y -# CONFIG_JUMP_LABEL is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_ARCH_STACKLEAK=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOVE_PMD=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=28 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_HAVE_COPY_THREAD_TLS=y -CONFIG_HAVE_STACK_VALIDATION=y -CONFIG_HAVE_RELIABLE_STACKTRACE=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_64BIT_TIME=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_REFCOUNT=y -# CONFIG_REFCOUNT_FULL is not set -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_ARCH_HAS_MEM_ENCRYPT=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling - -CONFIG_PLUGIN_HOSTCC="" -CONFIG_HAVE_GCC_PLUGINS=y -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_MODULE_SIG is not set -# CONFIG_MODULE_COMPRESS is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_RQ_ALLOC_TIME=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_ZONED is not set -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_CMDLINE_PARSER is not set -CONFIG_BLK_WBT=y -CONFIG_BLK_CGROUP_IOLATENCY=y -CONFIG_BLK_CGROUP_IOCOST=y -CONFIG_BLK_WBT_MQ=y -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -# end of Partition Types - -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y -CONFIG_BLK_PM=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_BFQ_CGROUP_DEBUG is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m -CONFIG_TRANSPARENT_HUGEPAGE=y -# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set -CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_THP_SWAP=y -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -# CONFIG_CLEANCACHE is not set -# CONFIG_FRONTSWAP is not set -# CONFIG_CMA is not set -# CONFIG_ZPOOL is not set -# CONFIG_ZBUD is not set -# CONFIG_ZSMALLOC is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_PTE_DEVMAP=y -CONFIG_ZONE_DEVICE=y -CONFIG_DEV_PAGEMAP_OPS=y -# CONFIG_DEVICE_PRIVATE is not set -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_BENCHMARK is not set -# CONFIG_READ_ONLY_THP_FOR_FS is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_NET_REDIRECT=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -# CONFIG_TLS_DEVICE is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_INTERFACE is not set -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -# CONFIG_SMC is not set -CONFIG_XDP_SOCKETS=y -# CONFIG_XDP_SOCKETS_DIAG is not set -CONFIG_TOA=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_INET_ESP_OFFLOAD is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -# CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -CONFIG_TCP_CONG_CDG=m -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -# CONFIG_INET6_ESP_OFFLOAD is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -# CONFIG_IPV6_VTI is not set -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -# CONFIG_IPV6_SUBTREES is not set -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -# CONFIG_NETLABEL is not set -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_COMMON=m -CONFIG_NF_LOG_NETDEV=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -# CONFIG_NF_CONNTRACK_TIMEOUT is not set -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -# CONFIG_NF_CT_PROTO_DCCP is not set -CONFIG_NF_CT_PROTO_GRE=y -# CONFIG_NF_CT_PROTO_SCTP is not set -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_SET=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_COUNTER=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_TUNNEL=m -CONFIG_NFT_OBJREF=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=m -CONFIG_NFT_OSF=m -CONFIG_NFT_TPROXY=m -CONFIG_NFT_SYNPROXY=m -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -# CONFIG_NETFILTER_XT_MATCH_DCCP is not set -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -# CONFIG_NETFILTER_XT_MATCH_SCTP is not set -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -# CONFIG_IP_SET_HASH_IPMAC is not set -# CONFIG_IP_SET_HASH_MAC is not set -# CONFIG_IP_SET_HASH_NETPORTNET is not set -CONFIG_IP_SET_HASH_NET=m -# CONFIG_IP_SET_HASH_NETNET is not set -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -# CONFIG_IP_VS_PROTO_SCTP is not set - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_FLOW_TABLE_IPV4=m -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -# CONFIG_IP_NF_TARGET_SYNPROXY is not set -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -# CONFIG_IP_NF_TARGET_NETMAP is not set -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -# CONFIG_IP_NF_SECURITY is not set -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -# CONFIG_IP6_NF_MATCH_SRH is not set -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -# CONFIG_IP6_NF_TARGET_SYNPROXY is not set -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -# CONFIG_IP6_NF_SECURITY is not set -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -# CONFIG_IP6_NF_TARGET_NPT is not set -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NFT_BRIDGE_META=m -CONFIG_NFT_BRIDGE_REJECT=m -CONFIG_NF_LOG_BRIDGE=m -# CONFIG_NF_CONNTRACK_BRIDGE is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_BPFILTER=y -CONFIG_BPFILTER_UMH=m -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=y -CONFIG_GARP=y -CONFIG_MRP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=y -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -# CONFIG_DECNET is not set -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -CONFIG_IEEE802154=m -# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set -CONFIG_IEEE802154_SOCKET=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_CBS=m -CONFIG_NET_SCH_ETF=m -CONFIG_NET_SCH_TAPRIO=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_SKBPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -CONFIG_NET_SCH_CAKE=m -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -# CONFIG_NET_ACT_SAMPLE is not set -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -# CONFIG_NET_ACT_MPLS is not set -# CONFIG_NET_ACT_VLAN is not set -# CONFIG_NET_ACT_BPF is not set -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -# CONFIG_NET_ACT_TUNNEL_KEY is not set -# CONFIG_NET_ACT_CT is not set -# CONFIG_NET_TC_SKB_EXT is not set -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VMWARE_VMCI_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=m -# CONFIG_MPLS_ROUTING is not set -CONFIG_NET_NSH=m -# CONFIG_HSR is not set -# CONFIG_NET_SWITCHDEV is not set -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_NET_NCSI is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_JIT=y -# CONFIG_BPF_STREAM_PARSER is not set -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -CONFIG_NET_DROP_MONITOR=y -# end of Network testing -# end of Networking options - -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set - -# -# CFG80211 needs to be enabled for MAC80211 -# -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set -# CONFIG_NFC is not set -# CONFIG_PSAMPLE is not set -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_NET_DEVLINK=y -CONFIG_PAGE_POOL=y -CONFIG_FAILOVER=m -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# -CONFIG_HAVE_EISA=y -# CONFIG_EISA is not set -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -# CONFIG_PCIE_DPC is not set -# CONFIG_PCIE_PTM is not set -# CONFIG_PCIE_BW is not set -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=m -CONFIG_PCI_PF_STUB=m -CONFIG_PCI_ATS=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -# CONFIG_PCI_P2PDMA is not set -CONFIG_PCI_LABEL=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# PCI controller drivers -# - -# -# Cadence PCIe controllers support -# -# end of Cadence PCIe controllers support - -CONFIG_VMD=y - -# -# DesignWare PCI Core Support -# -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCI_MESON is not set -# end of DesignWare PCI Core Support -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -# CONFIG_PCCARD is not set -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -# CONFIG_FW_LOADER_COMPRESS is not set -# end of Firmware loader - -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -CONFIG_HMEM_REPORTING=y -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -# end of Generic Driver Options - -# -# Bus devices -# -# end of Bus devices - -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_GNSS is not set -# CONFIG_MTD is not set -# CONFIG_OF is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -CONFIG_BLK_DEV_FD=m -CONFIG_CDROM=m -CONFIG_BLK_DEV_PCIESSD_MTIP32XX=y -# CONFIG_BLK_DEV_UMEM is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -CONFIG_BLK_DEV_DRBD=m -# CONFIG_DRBD_FAULT_INJECTION is not set -CONFIG_BLK_DEV_NBD=y -# CONFIG_BLK_DEV_SKD is not set -CONFIG_BLK_DEV_SX8=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_BLK_SCSI=y -CONFIG_BLK_DEV_RBD=m -# CONFIG_BLK_DEV_RSXX is not set - -# -# NVME Support -# -CONFIG_NVME_CORE=y -CONFIG_BLK_DEV_NVME=y -CONFIG_NVME_MULTIPATH=y -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -# CONFIG_NVME_FC is not set -# CONFIG_NVME_TCP is not set -# CONFIG_NVME_TARGET is not set -# end of NVME Support - -# -# Misc devices -# -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -CONFIG_HP_ILO=m -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -CONFIG_VMWARE_BALLOON=m -# CONFIG_SRAM is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -# CONFIG_PVPANIC is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -# CONFIG_CB710_CORE is not set - -# -# Texas Instruments shared transport line discipline -# -# end of Texas Instruments shared transport line discipline - -# CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module (requires I2C) -# -CONFIG_ALTERA_STAPL=m -# CONFIG_INTEL_MEI is not set -# CONFIG_INTEL_MEI_ME is not set -# CONFIG_INTEL_MEI_TXE is not set -# CONFIG_INTEL_MEI_HDCP is not set -CONFIG_VMWARE_VMCI=m - -# -# Intel MIC & related support -# - -# -# Intel MIC Bus Driver -# -# CONFIG_INTEL_MIC_BUS is not set - -# -# SCIF Bus Driver -# -# CONFIG_SCIF_BUS is not set - -# -# VOP Bus Driver -# -# CONFIG_VOP_BUS is not set - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# - -# -# VOP Driver -# -# end of Intel MIC & related support - -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MISC_RTSX_PCI is not set -# CONFIG_MISC_RTSX_USB is not set -# CONFIG_HABANA_AI is not set -# end of Misc devices - -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -# CONFIG_SCSI_SCAN_ASYNC is not set - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=y -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=y -CONFIG_SCSI_SAS_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=y -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=y -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=y -CONFIG_ISCSI_BOOT_SYSFS=y -CONFIG_SCSI_CXGB3_ISCSI=m -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_SCSI_BNX2X_FCOE is not set -CONFIG_BE2ISCSI=y -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -CONFIG_SCSI_3W_9XXX=y -CONFIG_SCSI_3W_SAS=m -CONFIG_SCSI_ACARD=y -CONFIG_SCSI_AACRAID=y -CONFIG_SCSI_AIC7XXX=y -CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 -CONFIG_AIC7XXX_RESET_DELAY_MS=5000 -CONFIG_AIC7XXX_DEBUG_ENABLE=y -CONFIG_AIC7XXX_DEBUG_MASK=0 -CONFIG_AIC7XXX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC79XX=y -CONFIG_AIC79XX_CMDS_PER_DEVICE=4 -CONFIG_AIC79XX_RESET_DELAY_MS=15000 -# CONFIG_AIC79XX_DEBUG_ENABLE is not set -CONFIG_AIC79XX_DEBUG_MASK=0 -# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_AIC94XX=y -CONFIG_AIC94XX_DEBUG=y -CONFIG_SCSI_MVSAS=y -# CONFIG_SCSI_MVSAS_DEBUG is not set -CONFIG_SCSI_MVSAS_TASKLET=y -CONFIG_SCSI_MVUMI=y -# CONFIG_SCSI_DPT_I2O is not set -# CONFIG_SCSI_ADVANSYS is not set -CONFIG_SCSI_ARCMSR=y -# CONFIG_SCSI_ESAS2R is not set -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=y -CONFIG_MEGARAID_MAILBOX=y -CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_SMARTPQI is not set -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PCI=y -# CONFIG_SCSI_UFS_DWC_TC_PCI is not set -# CONFIG_SCSI_UFSHCD_PLATFORM is not set -# CONFIG_SCSI_UFS_BSG is not set -CONFIG_SCSI_HPTIOP=y -CONFIG_SCSI_BUSLOGIC=y -# CONFIG_SCSI_FLASHPOINT is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_VMWARE_PVSCSI=m -CONFIG_LIBFC=y -CONFIG_LIBFCOE=y -CONFIG_FCOE=y -CONFIG_FCOE_FNIC=y -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -CONFIG_SCSI_GDTH=y -CONFIG_SCSI_ISCI=y -CONFIG_SCSI_IPS=y -CONFIG_SCSI_INITIO=y -# CONFIG_SCSI_INIA100 is not set -CONFIG_SCSI_STEX=y -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -CONFIG_SCSI_QLA_FC=y -# CONFIG_TCM_QLA2XXX is not set -CONFIG_SCSI_QLA_ISCSI=y -CONFIG_SCSI_LPFC=y -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -# CONFIG_SCSI_DEBUG is not set -CONFIG_SCSI_PMCRAID=y -CONFIG_SCSI_PM8001=y -CONFIG_SCSI_BFA_FC=y -CONFIG_SCSI_VIRTIO=y -# CONFIG_SCSI_CHELSIO_FCOE is not set -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -# end of SCSI device support - -CONFIG_ATA=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=y -# CONFIG_SATA_INIC162X is not set -CONFIG_SATA_ACARD_AHCI=y -CONFIG_SATA_SIL24=y -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=y -CONFIG_SATA_QSTOR=y -CONFIG_SATA_SX4=y -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -# CONFIG_SATA_DWC is not set -CONFIG_SATA_MV=y -CONFIG_SATA_NV=y -CONFIG_SATA_PROMISE=y -CONFIG_SATA_SIL=y -CONFIG_SATA_SIS=y -CONFIG_SATA_SVW=y -CONFIG_SATA_ULI=y -CONFIG_SATA_VIA=y -CONFIG_SATA_VITESSE=y - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=y -CONFIG_PATA_AMD=y -CONFIG_PATA_ARTOP=y -CONFIG_PATA_ATIIXP=y -CONFIG_PATA_ATP867X=y -CONFIG_PATA_CMD64X=y -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -CONFIG_PATA_HPT366=y -CONFIG_PATA_HPT37X=y -CONFIG_PATA_HPT3X2N=y -CONFIG_PATA_HPT3X3=y -# CONFIG_PATA_HPT3X3_DMA is not set -CONFIG_PATA_IT8213=y -CONFIG_PATA_IT821X=y -CONFIG_PATA_JMICRON=y -CONFIG_PATA_MARVELL=y -CONFIG_PATA_NETCELL=y -CONFIG_PATA_NINJA32=y -# CONFIG_PATA_NS87415 is not set -CONFIG_PATA_OLDPIIX=y -# CONFIG_PATA_OPTIDMA is not set -CONFIG_PATA_PDC2027X=y -CONFIG_PATA_PDC_OLD=y -# CONFIG_PATA_RADISYS is not set -CONFIG_PATA_RDC=y -CONFIG_PATA_SCH=y -CONFIG_PATA_SERVERWORKS=y -CONFIG_PATA_SIL680=y -CONFIG_PATA_SIS=y -CONFIG_PATA_TOSHIBA=y -# CONFIG_PATA_TRIFLEX is not set -CONFIG_PATA_VIA=y -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -CONFIG_PATA_ACPI=y -CONFIG_ATA_GENERIC=y -CONFIG_PATA_LEGACY=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_MD_RAID10=y -CONFIG_MD_RAID456=y -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BCACHE=m -# CONFIG_BCACHE_DEBUG is not set -# CONFIG_BCACHE_CLOSURES_DEBUG is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=y -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -# CONFIG_DM_WRITECACHE is not set -# CONFIG_DM_ERA is not set -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set -# CONFIG_DM_INIT is not set -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set -# CONFIG_DM_SWITCH is not set -# CONFIG_DM_LOG_WRITES is not set -# CONFIG_DM_INTEGRITY is not set -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -# CONFIG_TCM_USER2 is not set -CONFIG_LOOPBACK_TARGET=m -CONFIG_TCM_FC=m -CONFIG_ISCSI_TARGET=m -# CONFIG_ISCSI_TARGET_CXGB4 is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y -CONFIG_FUSION_FC=y -CONFIG_FUSION_SAS=y -CONFIG_FUSION_MAX_SGE=128 -CONFIG_FUSION_CTL=y -# CONFIG_FUSION_LAN is not set -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -# CONFIG_GENEVE is not set -# CONFIG_GTP is not set -# CONFIG_MACSEC is not set -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -# CONFIG_NLMON is not set -CONFIG_NET_VRF=m -CONFIG_VSOCKMON=m -# CONFIG_ARCNET is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -# end of Distributed Switch Architecture drivers - -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -CONFIG_NET_VENDOR_AGERE=y -# CONFIG_ET131X is not set -CONFIG_NET_VENDOR_ALACRITECH=y -# CONFIG_SLICOSS is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set -# CONFIG_NET_VENDOR_AMD is not set -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQTION is not set -CONFIG_NET_VENDOR_ARC=y -CONFIG_NET_VENDOR_ATHEROS=y -CONFIG_ATL2=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -# CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -# CONFIG_CNIC is not set -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -CONFIG_NET_VENDOR_BROCADE=y -CONFIG_BNA=m -CONFIG_NET_VENDOR_CADENCE=y -CONFIG_MACB=m -CONFIG_MACB_USE_HWSTAMP=y -# CONFIG_MACB_PCI is not set -CONFIG_NET_VENDOR_CAVIUM=y -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_THUNDER_NIC_RGX is not set -# CONFIG_CAVIUM_PTP is not set -CONFIG_LIQUIDIO=m -# CONFIG_LIQUIDIO_VF is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -CONFIG_NET_VENDOR_CORTINA=y -# CONFIG_CX_ECAT is not set -CONFIG_DNET=m -CONFIG_NET_VENDOR_DEC=y -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_DE2104X_DSL=0 -CONFIG_TULIP=m -# CONFIG_TULIP_MWI is not set -CONFIG_TULIP_MMIO=y -# CONFIG_TULIP_NAPI is not set -CONFIG_DE4X5=m -CONFIG_WINBOND_840=m -CONFIG_DM9102=m -CONFIG_ULI526X=m -# CONFIG_NET_VENDOR_DLINK is not set -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_HWMON=y -CONFIG_BE2NET_BE2=y -CONFIG_BE2NET_BE3=y -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -CONFIG_NET_VENDOR_EZCHIP=y -CONFIG_NET_VENDOR_GOOGLE=y -# CONFIG_GVE is not set -# CONFIG_NET_VENDOR_HP is not set -CONFIG_NET_VENDOR_HUAWEI=y -# CONFIG_HINIC is not set -# CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGB_DCA=y -CONFIG_IGBVF=m -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCA=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_IAVF=m -CONFIG_I40EVF=m -# CONFIG_ICE is not set -# CONFIG_FM10K is not set -# CONFIG_IGC is not set -CONFIG_JME=m -CONFIG_NET_VENDOR_MARVELL=y -CONFIG_MVMDIO=m -CONFIG_SKGE=m -# CONFIG_SKGE_DEBUG is not set -CONFIG_SKGE_GENESIS=y -CONFIG_SKY2=m -# CONFIG_SKY2_DEBUG is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -CONFIG_MLX4_CORE_GEN2=y -CONFIG_MLX5_CORE=m -# CONFIG_MLX5_FPGA is not set -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_CORE_EN_DCB=y -# CONFIG_MLX5_CORE_IPOIB is not set -# CONFIG_MLXSW_CORE is not set -# CONFIG_MLXFW is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_NET_VENDOR_MICROCHIP=y -# CONFIG_LAN743X is not set -CONFIG_NET_VENDOR_MICROSEMI=y -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_NET_VENDOR_NETERION=y -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -CONFIG_NET_VENDOR_NETRONOME=y -# CONFIG_NFP is not set -CONFIG_NET_VENDOR_NI=y -# CONFIG_NI_XGE_MANAGEMENT_ENET is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -CONFIG_NET_VENDOR_PACKET_ENGINES=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QLA3XXX=m -CONFIG_QLCNIC=m -CONFIG_QLCNIC_SRIOV=y -CONFIG_QLCNIC_DCB=y -CONFIG_QLCNIC_HWMON=y -CONFIG_NETXEN_NIC=m -# CONFIG_QED is not set -CONFIG_NET_VENDOR_QUALCOMM=y -# CONFIG_QCOM_EMAC is not set -# CONFIG_RMNET is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -CONFIG_NET_VENDOR_RENESAS=y -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_NET_VENDOR_SAMSUNG=y -# CONFIG_SXGBE_ETH is not set -# CONFIG_NET_VENDOR_SEEQ is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -CONFIG_NET_VENDOR_SMSC=y -CONFIG_EPIC100=m -# CONFIG_SMSC911X is not set -CONFIG_SMSC9420=m -CONFIG_NET_VENDOR_SOCIONEXT=y -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -CONFIG_NET_VENDOR_SYNOPSYS=y -# CONFIG_DWC_XLGMAC is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_VENDOR_XILINX=y -# CONFIG_XILINX_AXI_EMAC is not set -# CONFIG_XILINX_LL_TEMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -# CONFIG_MDIO_BCM_UNIMAC is not set -CONFIG_MDIO_BITBANG=m -# CONFIG_MDIO_MSCC_MIIM is not set -# CONFIG_MDIO_THUNDER is not set -CONFIG_PHYLIB=y -CONFIG_SWPHY=y - -# -# MII PHY device drivers -# -# CONFIG_ADIN_PHY is not set -CONFIG_AMD_PHY=m -# CONFIG_AQUANTIA_PHY is not set -# CONFIG_AX88796B_PHY is not set -CONFIG_AT803X_PHY=m -# CONFIG_BCM7XXX_PHY is not set -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BROADCOM_PHY=m -CONFIG_CICADA_PHY=m -# CONFIG_CORTINA_PHY is not set -CONFIG_DAVICOM_PHY=m -# CONFIG_DP83822_PHY is not set -# CONFIG_DP83TC811_PHY is not set -# CONFIG_DP83848_PHY is not set -# CONFIG_DP83867_PHY is not set -CONFIG_FIXED_PHY=y -CONFIG_ICPLUS_PHY=m -# CONFIG_INTEL_XWAY_PHY is not set -CONFIG_LSI_ET1011C_PHY=m -CONFIG_LXT_PHY=m -CONFIG_MARVELL_PHY=m -# CONFIG_MARVELL_10G_PHY is not set -CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_PHY is not set -# CONFIG_MICROCHIP_T1_PHY is not set -# CONFIG_MICROSEMI_PHY is not set -CONFIG_NATIONAL_PHY=m -# CONFIG_NXP_TJA11XX_PHY is not set -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -# CONFIG_RENESAS_PHY is not set -# CONFIG_ROCKCHIP_PHY is not set -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -# CONFIG_TERANETICS_PHY is not set -CONFIG_VITESSE_PHY=m -# CONFIG_XILINX_GMII2RGMII is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -# CONFIG_USB_RTL8152 is not set -# CONFIG_USB_LAN78XX is not set -# CONFIG_USB_USBNET is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_WLAN is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -CONFIG_IEEE802154_DRIVERS=m -CONFIG_IEEE802154_FAKELB=m -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_HWSIM is not set -# CONFIG_VMXNET3 is not set -# CONFIG_FUJITSU_ES is not set -# CONFIG_NETDEVSIM is not set -CONFIG_NET_FAILOVER=m -# CONFIG_ISDN is not set -# CONFIG_NVM is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -CONFIG_KEYBOARD_SUNKBD=y -CONFIG_KEYBOARD_XTKBD=y -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -# CONFIG_INPUT_MSM_VIBRATOR is not set -# CONFIG_INPUT_PCSPKR is not set -# CONFIG_INPUT_MMA8450 is not set -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -# CONFIG_INPUT_KXTJ9 is not set -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_CMA3000 is not set -# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -# CONFIG_RMI4_CORE is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=y -CONFIG_SERIO_ALTERA_PS2=y -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=y -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=256 -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_ROCKETPORT is not set -CONFIG_CYCLADES=m -# CONFIG_CYZ_INTR is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_SYNCLINK=m -CONFIG_SYNCLINKMP=m -CONFIG_SYNCLINK_GT=m -CONFIG_NOZOMI=m -# CONFIG_ISI is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -# CONFIG_TRACE_SINK is not set -# CONFIG_NULL_TTY is not set -CONFIG_LDISC_AUTOLOAD=y -CONFIG_DEVMEM=y -# CONFIG_DEVKMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y -# CONFIG_SERIAL_8250_DW is not set -# CONFIG_SERIAL_8250_RT288X is not set -CONFIG_SERIAL_8250_LPSS=y -CONFIG_SERIAL_8250_MID=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_SERIAL_JSM=m -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -CONFIG_SERIAL_ARC=m -CONFIG_SERIAL_ARC_NR_PORTS=1 -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# end of Serial drivers - -# CONFIG_SERIAL_DEV_BUS is not set -# CONFIG_TTY_PRINTK is not set -CONFIG_HVC_DRIVER=y -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y -# CONFIG_IPMI_PANIC_EVENT is not set -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -# CONFIG_IPMI_SSIF is not set -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_NVRAM=y -# CONFIG_APPLICOM is not set -# CONFIG_MWAVE is not set -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=8192 -CONFIG_HPET=y -CONFIG_HPET_MMAP=y -CONFIG_HPET_MMAP_DEFAULT=y -CONFIG_HANGCHECK_TIMER=m -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=y -CONFIG_TCG_TIS=y -# CONFIG_TCG_TIS_I2C_ATMEL is not set -# CONFIG_TCG_TIS_I2C_INFINEON is not set -# CONFIG_TCG_TIS_I2C_NUVOTON is not set -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -# CONFIG_TCG_CRB is not set -# CONFIG_TCG_VTPM_PROXY is not set -# CONFIG_TCG_TIS_ST33ZP24_I2C is not set -CONFIG_TELCLOCK=m -CONFIG_DEVPORT=y -# CONFIG_XILLYBUS is not set -# end of Character devices - -# CONFIG_RANDOM_TRUST_CPU is not set -# CONFIG_RANDOM_TRUST_BOOTLOADER is not set - -# -# I2C support -# -CONFIG_I2C=m -CONFIG_I2C_BOARDINFO=y -# CONFIG_I2C_COMPAT is not set -# CONFIG_I2C_CHARDEV is not set -# CONFIG_I2C_MUX is not set -# CONFIG_I2C_HELPER_AUTO is not set -# CONFIG_I2C_SMBUS is not set - -# -# I2C Algorithms -# -CONFIG_I2C_ALGOBIT=m -# CONFIG_I2C_ALGOPCF is not set -# CONFIG_I2C_ALGOPCA is not set -# end of I2C Algorithms - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_AMD_MP2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_ISMT is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_MLXCPLD is not set -# end of I2C Hardware Bus support - -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -# CONFIG_SPI is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=m -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -# CONFIG_PPS_CLIENT_LDISC is not set -# CONFIG_PPS_CLIENT_GPIO is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=m -# CONFIG_DP83640_PHY is not set -CONFIG_PTP_1588_CLOCK_KVM=m -# end of PTP clock support - -CONFIG_PINCTRL=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_BAYTRAIL is not set -# CONFIG_PINCTRL_CHERRYVIEW is not set -# CONFIG_PINCTRL_BROXTON is not set -# CONFIG_PINCTRL_CANNONLAKE is not set -# CONFIG_PINCTRL_CEDARFORK is not set -# CONFIG_PINCTRL_DENVERTON is not set -# CONFIG_PINCTRL_GEMINILAKE is not set -# CONFIG_PINCTRL_ICELAKE is not set -# CONFIG_PINCTRL_LEWISBURG is not set -# CONFIG_PINCTRL_SUNRISEPOINT is not set -# CONFIG_GPIOLIB is not set -# CONFIG_W1 is not set -# CONFIG_POWER_AVS is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -CONFIG_HWMON=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_ABITUGURU is not set -# CONFIG_SENSORS_ABITUGURU3 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_AS370 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_K8TEMP is not set -# CONFIG_SENSORS_K10TEMP is not set -# CONFIG_SENSORS_FAM15H_POWER is not set -# CONFIG_SENSORS_APPLESMC is not set -# CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ASPEED is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_DELL_SMM is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_HIH6130 is not set -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -# CONFIG_SENSORS_I5500 is not set -# CONFIG_SENSORS_CORETEMP is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4222 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4260 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX6621 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA_CPUTEMP is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83773G is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -# CONFIG_SENSORS_XGENE is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -# CONFIG_SENSORS_ATK0110 is not set -CONFIG_THERMAL=y -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -# CONFIG_CLOCK_THERMAL is not set -# CONFIG_DEVFREQ_THERMAL is not set -# CONFIG_THERMAL_EMULATION is not set - -# -# Intel thermal drivers -# -# CONFIG_INTEL_POWERCLAMP is not set -CONFIG_X86_PKG_TEMP_THERMAL=m -# CONFIG_INTEL_SOC_DTS_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# -# CONFIG_INT340X_THERMAL is not set -# end of ACPI INT340X thermal drivers - -# CONFIG_INTEL_PCH_THERMAL is not set -# end of Intel thermal drivers - -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -# CONFIG_BCMA is not set - -# -# Multifunction device drivers -# -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_INTEL_LPSS_ACPI is not set -# CONFIG_MFD_INTEL_LPSS_PCI is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TQMX86 is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_WM8994 is not set -# end of Multifunction device drivers - -# CONFIG_REGULATOR is not set -CONFIG_RC_CORE=y -CONFIG_RC_MAP=y -# CONFIG_LIRC is not set -CONFIG_RC_DECODERS=y -CONFIG_IR_NEC_DECODER=y -CONFIG_IR_RC5_DECODER=y -CONFIG_IR_RC6_DECODER=y -CONFIG_IR_JVC_DECODER=y -CONFIG_IR_SONY_DECODER=y -CONFIG_IR_SANYO_DECODER=y -CONFIG_IR_SHARP_DECODER=y -CONFIG_IR_MCE_KBD_DECODER=y -CONFIG_IR_XMP_DECODER=y -# CONFIG_IR_IMON_DECODER is not set -# CONFIG_IR_RCMM_DECODER is not set -# CONFIG_RC_DEVICES is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -CONFIG_AGP=y -# CONFIG_AGP_AMD64 is not set -CONFIG_AGP_INTEL=y -# CONFIG_AGP_SIS is not set -# CONFIG_AGP_VIA is not set -CONFIG_INTEL_GTT=y -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=64 -# CONFIG_VGA_SWITCHEROO is not set -CONFIG_DRM=m -CONFIG_DRM_MIPI_DSI=y -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DEBUG_SELFTEST is not set -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_KMS_FB_HELPER=y -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set -# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set -# CONFIG_DRM_DP_CEC is not set - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# end of ARM devices - -# CONFIG_DRM_RADEON is not set -# CONFIG_ARCH_PHYTIUM_1500A_MEM_TYPE_DEVICE is not set -# CONFIG_DRM_AMDGPU is not set - -# -# ACP (Audio CoProcessor) Configuration -# -# end of ACP (Audio CoProcessor) Configuration - -# CONFIG_DRM_NOUVEAU is not set -CONFIG_DRM_I915=m -# CONFIG_DRM_I915_ALPHA_SUPPORT is not set -CONFIG_DRM_I915_FORCE_PROBE="" -CONFIG_DRM_I915_CAPTURE_ERROR=y -CONFIG_DRM_I915_COMPRESS_ERROR=y -CONFIG_DRM_I915_USERPTR=y -# CONFIG_DRM_I915_GVT is not set - -# -# drm/i915 Debugging -# -# CONFIG_DRM_I915_WERROR is not set -# CONFIG_DRM_I915_DEBUG is not set -# CONFIG_DRM_I915_DEBUG_MMIO is not set -# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set -# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set -# CONFIG_DRM_I915_DEBUG_GUC is not set -# CONFIG_DRM_I915_SELFTEST is not set -# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set -# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set -# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set -# end of drm/i915 Debugging - -# -# drm/i915 Profile Guided Optimisation -# -CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 -CONFIG_DRM_I915_SPIN_REQUEST=5 -# end of drm/i915 Profile Guided Optimisation - -# CONFIG_DRM_VGEM is not set -# CONFIG_DRM_VKMS is not set -# CONFIG_DRM_VMWGFX is not set -# CONFIG_DRM_GMA500 is not set -# CONFIG_DRM_UDL is not set -# CONFIG_DRM_AST is not set -# CONFIG_DRM_MGAG200 is not set -# CONFIG_DRM_CIRRUS_QEMU is not set -# CONFIG_DRM_QXL is not set -# CONFIG_DRM_BOCHS is not set -# CONFIG_DRM_VIRTIO_GPU is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# end of Display Interface Bridges - -# CONFIG_DRM_ETNAVIV is not set -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_VBOXVIDEO is not set -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y - -# -# Frame buffer Devices -# -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -# CONFIG_FB_VESA is not set -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_INTEL is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SM712 is not set -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_PLATFORM is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=m -CONFIG_BACKLIGHT_GENERIC=m -# CONFIG_BACKLIGHT_APPLE is not set -# CONFIG_BACKLIGHT_PM8941_WLED is not set -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3639 is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 -# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -# CONFIG_LOGO is not set -# end of Graphics support - -# CONFIG_SOUND is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -# CONFIG_UHID is not set -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=y -# CONFIG_HID_ACCUTOUCH is not set -# CONFIG_HID_ACRUX is not set -CONFIG_HID_APPLE=y -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_AUREAL is not set -CONFIG_HID_BELKIN=y -# CONFIG_HID_BETOP_FF is not set -CONFIG_HID_CHERRY=y -CONFIG_HID_CHICONY=y -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -CONFIG_HID_CYPRESS=y -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -CONFIG_HID_EZKEY=y -# CONFIG_HID_GEMBIRD is not set -# CONFIG_HID_GFRM is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_ITE is not set -# CONFIG_HID_JABRA is not set -# CONFIG_HID_TWINHAN is not set -CONFIG_HID_KENSINGTON=y -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO is not set -CONFIG_HID_LOGITECH=y -# CONFIG_HID_LOGITECH_HIDPP is not set -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_REDRAGON is not set -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MONTEREY=y -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTI is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PLANTRONICS is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_RETRODE is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEAM is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set -# CONFIG_HID_ALPS is not set -# end of Special HID drivers - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -CONFIG_USB_HIDDEV=y -# end of USB HID support - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set -# end of I2C HID support - -# -# Intel ISH HID support -# -# CONFIG_INTEL_ISH_HID is not set -# end of Intel ISH HID support -# end of HID support - -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -# CONFIG_USB_ULPI_BUS is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -CONFIG_USB_AUTOSUSPEND_DELAY=2 -# CONFIG_USB_MON is not set - -# -# USB Host Controller Drivers -# -# CONFIG_ARCH_PHYTIUM_1500A_USB_RESET_PROBE_REMOVE is not set -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_DBGCAP is not set -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PLATFORM is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -# CONFIG_USB_UAS is not set - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_CDNS3 is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -# CONFIG_USB_SERIAL is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HUB_USB251XB is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_ISP1301 is not set -# end of USB Physical Layer drivers - -# CONFIG_USB_GADGET is not set -# CONFIG_TYPEC is not set -# CONFIG_USB_ROLE_SWITCH is not set -# CONFIG_MMC is not set -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_MTHCA_DEBUG=y -# CONFIG_INFINIBAND_CXGB3 is not set -# CONFIG_INFINIBAND_CXGB4 is not set -# CONFIG_INFINIBAND_EFA is not set -# CONFIG_INFINIBAND_I40IW is not set -# CONFIG_MLX4_INFINIBAND is not set -# CONFIG_MLX5_INFINIBAND is not set -CONFIG_INFINIBAND_OCRDMA=m -# CONFIG_INFINIBAND_USNIC is not set -# CONFIG_INFINIBAND_BNXT_RE is not set -# CONFIG_INFINIBAND_RDMAVT is not set -# CONFIG_RDMA_RXE is not set -# CONFIG_RDMA_SIW is not set -CONFIG_INFINIBAND_IPOIB=m -# CONFIG_INFINIBAND_IPOIB_CM is not set -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -# CONFIG_INFINIBAND_OPA_VNIC is not set -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=m -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=m -CONFIG_EDAC_AMD64=m -# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_IE31200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I5000=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_EDAC_SBRIDGE=m -CONFIG_EDAC_SKX=m -CONFIG_EDAC_I10NM=m -CONFIG_EDAC_PND2=m -CONFIG_RTC_LIB=y -CONFIG_RTC_MC146818_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABEOZ9 is not set -# CONFIG_RTC_DRV_ABX80X is not set -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8010 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -CONFIG_RTC_I2C_AND_SPI=m - -# -# SPI and I2C RTC drivers -# -# CONFIG_RTC_DRV_DS3232 is not set -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_FTRTC010 is not set - -# -# HID Sensor RTC drivers -# -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_VIRTUAL_CHANNELS=y -CONFIG_DMA_ACPI=y -# CONFIG_ALTERA_MSGDMA is not set -CONFIG_INTEL_IDMA64=y -CONFIG_INTEL_IOATDMA=y -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -CONFIG_DW_DMAC_CORE=y -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set -# CONFIG_DW_EDMA is not set -# CONFIG_DW_EDMA_PCIE is not set -CONFIG_HSU_DMA=y - -# -# DMA Clients -# -# CONFIG_ASYNC_TX_DMA is not set -# CONFIG_DMATEST is not set -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_SELFTESTS is not set -# end of DMABUF options - -CONFIG_DCA=y -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=m -# CONFIG_UIO_CIF is not set -CONFIG_UIO_PDRV_GENIRQ=m -CONFIG_UIO_DMEM_GENIRQ=m -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_VIRQFD=m -CONFIG_VFIO=m -# CONFIG_VFIO_NOIOMMU is not set -CONFIG_VFIO_PCI=m -# CONFIG_VFIO_PCI_VGA is not set -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI_IGD=y -CONFIG_VFIO_MDEV=m -CONFIG_VFIO_MDEV_DEVICE=m -CONFIG_IRQ_BYPASS_MANAGER=m -CONFIG_VIRT_DRIVERS=y -CONFIG_VBOXGUEST=m -CONFIG_VIRTIO=y -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_PMEM=m -CONFIG_VIRTIO_BALLOON=m -# CONFIG_VIRTIO_INPUT is not set -CONFIG_VIRTIO_MMIO=y -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_HYPERV is not set -# end of Microsoft Hyper-V guest support - -# CONFIG_GREYBUS is not set -CONFIG_STAGING=y -# CONFIG_COMEDI is not set -# CONFIG_RTS5208 is not set -# CONFIG_FB_SM750 is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -# end of Speakup console speech - -# CONFIG_STAGING_MEDIA is not set - -# -# Android -# -CONFIG_ASHMEM=y -# CONFIG_ANDROID_VSOC is not set -# CONFIG_ION is not set -# end of Android - -# CONFIG_LTE_GDM724X is not set -# CONFIG_GS_FPGABOOT is not set -# CONFIG_UNISYSSPAR is not set -# CONFIG_MOST is not set - -# -# Gasket devices -# -# CONFIG_STAGING_GASKET_FRAMEWORK is not set -# end of Gasket devices - -# CONFIG_FIELDBUS_DEV is not set -# CONFIG_KPC2000 is not set -# CONFIG_USB_WUSB_CBAF is not set -# CONFIG_UWB is not set -# CONFIG_EXFAT_FS is not set -CONFIG_QLGE=m -# CONFIG_X86_PLATFORM_DEVICES is not set -CONFIG_PMC_ATOM=y -# CONFIG_MFD_CROS_EC is not set -# CONFIG_CHROME_PLATFORMS is not set -# CONFIG_MELLANOX_PLATFORM is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Common Clock Framework -# -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# end of Common Clock Framework - -# CONFIG_HWSPINLOCK is not set - -# -# Clock Source drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -# end of Clock Source drivers - -CONFIG_MAILBOX=y -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -CONFIG_IOMMU_IOVA=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -# end of Generic IOMMU Pagetable Support - -# CONFIG_IOMMU_DEBUGFS is not set -# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set -CONFIG_AMD_IOMMU=y -CONFIG_AMD_IOMMU_V2=m -CONFIG_DMAR_TABLE=y -CONFIG_INTEL_IOMMU=y -CONFIG_INTEL_IOMMU_SVM=y -# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set -CONFIG_INTEL_IOMMU_FLOPPY_WA=y -CONFIG_IRQ_REMAP=y -# CONFIG_SMMU_BYPASS_DEV is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Aspeed SoC drivers -# -# end of Aspeed SoC drivers - -# -# Broadcom SoC drivers -# -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# end of NXP/Freescale QorIQ SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Qualcomm SoC drivers -# -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# CONFIG_XILINX_VCU is not set -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - -CONFIG_PM_DEVFREQ=y - -# -# DEVFREQ Governors -# -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y -# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set -# CONFIG_DEVFREQ_GOV_POWERSAVE is not set -# CONFIG_DEVFREQ_GOV_USERSPACE is not set -# CONFIG_DEVFREQ_GOV_PASSIVE is not set - -# -# DEVFREQ Drivers -# -# CONFIG_PM_DEVFREQ_EVENT is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -# CONFIG_PWM is not set - -# -# IRQ chip support -# -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# end of PHY Subsystem - -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# end of Performance monitor support - -CONFIG_RAS=y -# CONFIG_RAS_CEC is not set -# CONFIG_THUNDERBOLT is not set - -# -# Android -# -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -# CONFIG_ANDROID_BINDERFS is not set -CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" -# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set -# end of Android - -CONFIG_LIBNVDIMM=y -CONFIG_BLK_DEV_PMEM=y -CONFIG_ND_BLK=y -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=y -CONFIG_BTT=y -CONFIG_ND_PFN=y -CONFIG_NVDIMM_PFN=y -CONFIG_NVDIMM_DAX=y -CONFIG_NVDIMM_KEYS=y -CONFIG_DAX_DRIVER=y -CONFIG_DAX=y -CONFIG_DEV_DAX=m -CONFIG_DEV_DAX_PMEM=m -CONFIG_DEV_DAX_KMEM=m -CONFIG_DEV_DAX_PMEM_COMPAT=m -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# HW tracing support -# -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -CONFIG_PM_OPP=y -# CONFIG_UNISYS_VISORBUS is not set -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_EXT2_FS=y -# CONFIG_EXT2_FS_XATTR is not set -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -CONFIG_REISERFS_FS=m -# CONFIG_REISERFS_CHECK is not set -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -# CONFIG_JFS_DEBUG is not set -# CONFIG_JFS_STATISTICS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -# CONFIG_XFS_ONLINE_SCRUB is not set -CONFIG_XFS_WARN=y -# CONFIG_XFS_DEBUG is not set -CONFIG_GFS2_FS=m -CONFIG_OCFS2_FS=m -CONFIG_OCFS2_FS_O2CB=m -CONFIG_OCFS2_FS_STATS=y -CONFIG_OCFS2_DEBUG_MASKLOG=y -# CONFIG_OCFS2_DEBUG_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -CONFIG_NILFS2_FS=m -# CONFIG_F2FS_FS is not set -CONFIG_FS_DAX=y -CONFIG_FS_DAX_PMD=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=y -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_PRINT_QUOTA_WARNING=y -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_QUOTACTL_COMPAT=y -CONFIG_AUTOFS4_FS=m -CONFIG_AUTOFS_FS=m -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_VIRTIO_FS=m -CONFIG_OVERLAY_FS=m -# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set -# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set -# CONFIG_OVERLAY_FS_INDEX is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set - -# -# Caches -# -CONFIG_FSCACHE=m -# CONFIG_FSCACHE_STATS is not set -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -# CONFIG_CACHEFILES is not set -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -CONFIG_NTFS_RW=y -# end of DOS/FAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_CHILDREN is not set -CONFIG_PROC_PID_ARCH_STATUS=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_MEMFD_CREATE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_CRAMFS is not set -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -# CONFIG_SQUASHFS_ZLIB is not set -# CONFIG_SQUASHFS_LZ4 is not set -# CONFIG_SQUASHFS_LZO is not set -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFLATE_COMPRESS=m -CONFIG_PSTORE_LZO_COMPRESS=m -# CONFIG_PSTORE_LZ4_COMPRESS is not set -# CONFIG_PSTORE_LZ4HC_COMPRESS is not set -# CONFIG_PSTORE_842_COMPRESS is not set -# CONFIG_PSTORE_ZSTD_COMPRESS is not set -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y -# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set -CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" -# CONFIG_PSTORE_CONSOLE is not set -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -# CONFIG_PSTORE_RAM is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EROFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V2=m -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -CONFIG_NFSD_BLOCKLAYOUT=y -CONFIG_NFSD_SCSILAYOUT=y -CONFIG_NFSD_FLEXFILELAYOUT=y -# CONFIG_NFSD_V4_SECURITY_LABEL is not set -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -# CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=m -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -# CONFIG_CIFS_WEAK_PW_HASH is not set -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -# CONFIG_CIFS_DEBUG is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SMB_DIRECT is not set -CONFIG_CIFS_FSCACHE=y -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_MAC_ROMAN is not set -# CONFIG_NLS_MAC_CELTIC is not set -# CONFIG_NLS_MAC_CENTEURO is not set -# CONFIG_NLS_MAC_CROATIAN is not set -# CONFIG_NLS_MAC_CYRILLIC is not set -# CONFIG_NLS_MAC_GAELIC is not set -# CONFIG_NLS_MAC_GREEK is not set -# CONFIG_NLS_MAC_ICELAND is not set -# CONFIG_NLS_MAC_INUIT is not set -# CONFIG_NLS_MAC_ROMANIAN is not set -# CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -# CONFIG_DLM is not set -# CONFIG_UNICODE is not set -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_KEYS_COMPAT=y -# CONFIG_KEYS_REQUEST_CACHE is not set -# CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_BIG_KEYS is not set -# CONFIG_TRUSTED_KEYS is not set -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_PAGE_TABLE_ISOLATION=y -# CONFIG_SECURITY_INFINIBAND is not set -# CONFIG_SECURITY_NETWORK_XFRM is not set -CONFIG_SECURITY_PATH=y -# CONFIG_INTEL_TXT is not set -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -# CONFIG_HARDENED_USERCOPY is not set -# CONFIG_FORTIFY_SOURCE is not set -# CONFIG_STATIC_USERMODEHELPER is not set -# CONFIG_SECURITY_SELINUX is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -CONFIG_SECURITY_APPARMOR=y -CONFIG_SECURITY_APPARMOR_HASH=y -CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y -# CONFIG_SECURITY_APPARMOR_DEBUG is not set -# CONFIG_SECURITY_LOADPIN is not set -# CONFIG_SECURITY_YAMA is not set -# CONFIG_SECURITY_SAFESETID is not set -# CONFIG_SECURITY_LOCKDOWN_LSM is not set -# CONFIG_INTEGRITY is not set -# CONFIG_DEFAULT_SECURITY_APPARMOR is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -# end of Memory initialization -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=y -CONFIG_ASYNC_CORE=y -CONFIG_ASYNC_MEMCPY=y -CONFIG_ASYNC_XOR=y -CONFIG_ASYNC_PQ=y -CONFIG_ASYNC_RAID6_RECOV=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=m -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_SIMD=m -CONFIG_CRYPTO_GLUE_HELPER_X86=m -CONFIG_CRYPTO_ENGINE=m - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=m -# CONFIG_CRYPTO_ECDH is not set -# CONFIG_CRYPTO_ECRDSA is not set - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -# CONFIG_CRYPTO_CHACHA20POLY1305 is not set -# CONFIG_CRYPTO_AEGIS128 is not set -# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=m - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_CFB is not set -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -# CONFIG_CRYPTO_OFB is not set -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -# CONFIG_CRYPTO_KEYWRAP is not set -# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set -# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_ESSIV=m - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32C_INTEL=m -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRC32_PCLMUL=m -# CONFIG_CRYPTO_XXHASH is not set -CONFIG_CRYPTO_CRCT10DIF=y -# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set -CONFIG_CRYPTO_GHASH=m -# CONFIG_CRYPTO_POLY1305 is not set -# CONFIG_CRYPTO_POLY1305_X86_64 is not set -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_SSSE3=m -CONFIG_CRYPTO_SHA256_SSSE3=m -CONFIG_CRYPTO_SHA512_SSSE3=m -CONFIG_CRYPTO_LIB_SHA256=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_SHA3 is not set -# CONFIG_CRYPTO_SM3 is not set -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m - -# -# Ciphers -# -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_AES_NI_INTEL=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_BLOWFISH_X86_64=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAMELLIA_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST5_AVX_X86_64=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_CAST6_AVX_X86_64=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_DES=m -# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -# CONFIG_CRYPTO_CHACHA20 is not set -# CONFIG_CRYPTO_CHACHA20_X86_64 is not set -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -# CONFIG_CRYPTO_SM4 is not set -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_TWOFISH_X86_64=m -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -# CONFIG_CRYPTO_LZ4 is not set -# CONFIG_CRYPTO_LZ4HC is not set -# CONFIG_CRYPTO_ZSTD is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -# CONFIG_CRYPTO_DRBG_HASH is not set -# CONFIG_CRYPTO_DRBG_CTR is not set -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -# CONFIG_CRYPTO_USER_API_RNG is not set -# CONFIG_CRYPTO_USER_API_AEAD is not set -# CONFIG_CRYPTO_STATS is not set -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_CCP is not set -CONFIG_CRYPTO_DEV_QAT=m -CONFIG_CRYPTO_DEV_QAT_DH895xCC=m -CONFIG_CRYPTO_DEV_QAT_C3XXX=m -CONFIG_CRYPTO_DEV_QAT_C62X=m -CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m -CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m -CONFIG_CRYPTO_DEV_QAT_C62XVF=m -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -# CONFIG_CRYPTO_DEV_CHELSIO is not set -# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set -CONFIG_CRYPTO_DEV_VIRTIO=m -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y - -# -# Certificates for signature checking -# -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=y -CONFIG_RAID6_PQ_BENCHMARK=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_CORDIC=m -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_CRC_CCITT=m -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=y -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=m -# CONFIG_CRC4 is not set -# CONFIG_CRC7 is not set -CONFIG_LIBCRC32C=y -CONFIG_CRC8=m -CONFIG_XXHASH=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=m -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -# CONFIG_XZ_DEC_IA64 is not set -# CONFIG_XZ_DEC_ARM is not set -# CONFIG_XZ_DEC_ARMTHUMB is not set -# CONFIG_XZ_DEC_SPARC is not set -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y -CONFIG_SWIOTLB=y -# CONFIG_DMA_API_DEBUG is not set -CONFIG_SGL_ALLOC=y -CONFIG_IOMMU_HELPER=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPUMASK_OFFSTACK=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_DIMLIB=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAS_UACCESS_MCSAFE=y -CONFIG_ARCH_STACKWALK=y -CONFIG_SBITMAP=y -# CONFIG_STRING_SELFTEST is not set -# end of Library routines - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y -# end of printk and dmesg options - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_BTF is not set -# CONFIG_GDB_SCRIPTS is not set -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_INSTALL is not set -CONFIG_OPTIMIZE_INLINING=y -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_FRAME_POINTER=y -CONFIG_STACK_VALIDATION=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_CC_HAS_KASAN_GENERIC=y -# CONFIG_KASAN is not set -CONFIG_KASAN_STACK=1 -# end of Memory Debugging - -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -CONFIG_DEBUG_SHIRQ=y - -# -# Debug Lockups and Hangs -# -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -CONFIG_HARDLOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -# CONFIG_WQ_WATCHDOG is not set -# end of Debug Lockups and Hangs - -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -# CONFIG_SCHED_STACK_END_CHECK is not set -# CONFIG_DEBUG_TIMEKEEPING is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_RCU_PERF_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -CONFIG_LATENCYTOP=y -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_RING_BUFFER_ALLOW_SWAP=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -CONFIG_SCHED_TRACER=y -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_FUNCTION_PROFILER=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_STRSCPY is not set -CONFIG_TEST_KSTRTOX=y -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_BITFIELD is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_XARRAY is not set -# CONFIG_TEST_OVERFLOW is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_USER_COPY is not set -CONFIG_TEST_BPF=m -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_LIVEPATCH is not set -# CONFIG_TEST_STACKINIT is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_MEMTEST is not set -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_UBSAN_ALIGNMENT=y -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_EARLY_PRINTK_USB=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -# CONFIG_EARLY_PRINTK_USB_XDBC is not set -# CONFIG_X86_PTDUMP is not set -# CONFIG_EFI_PGT_DUMP is not set -# CONFIG_DEBUG_WX is not set -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_X86_DECODER_SELFTEST=y -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -CONFIG_X86_DEBUG_FPU=y -# CONFIG_PUNIT_ATOM_DEBUG is not set -# CONFIG_UNWINDER_ORC is not set -CONFIG_UNWINDER_FRAME_POINTER=y -# CONFIG_UNWINDER_GUESS is not set -# end of Kernel hacking diff --git a/package/default/config.default_kasan b/package/default/config.default_kasan index de4d63d7ab42..4590481565e2 100644 --- a/package/default/config.default_kasan +++ b/package/default/config.default_kasan @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 5.4.32-1 Kernel Configuration +# Linux/x86 5.4.119-1 Kernel Configuration # # @@ -396,6 +396,7 @@ CONFIG_X86_DIRECT_GBPAGES=y CONFIG_AMD_MEM_ENCRYPT=y # CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set CONFIG_NUMA=y +CONFIG_NUMA_AWARE_SPINLOCKS=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y CONFIG_NODES_SPAN_OTHER_NODES=y @@ -1761,7 +1762,7 @@ CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=y # CONFIG_BLK_DEV_SKD is not set CONFIG_BLK_DEV_SX8=y -CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_CDROM_PKTCDVD=m @@ -2246,7 +2247,7 @@ CONFIG_NET_VENDOR_ALACRITECH=y # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_ALTERA_TSE is not set CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set +CONFIG_ENA_ETHERNET=m # CONFIG_NET_VENDOR_AMD is not set CONFIG_NET_VENDOR_AQUANTIA=y # CONFIG_AQTION is not set @@ -4788,7 +4789,7 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_INFO_REDUCED is not set # CONFIG_DEBUG_INFO_SPLIT is not set # CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_BTF is not set +CONFIG_DEBUG_INFO_BTF=y # CONFIG_GDB_SCRIPTS is not set CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=2048 diff --git a/package/default/config.default_nosign b/package/default/config.default_nosign deleted file mode 100644 index f58f31116dec..000000000000 --- a/package/default/config.default_nosign +++ /dev/null @@ -1,5047 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86 5.4.32-1 Kernel Configuration -# - -# -# Compiler: gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5) -# -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=80301 -CONFIG_CLANG_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_HAS_ASM_GOTO=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="-tlinux4-0001" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -CONFIG_USELIB=y -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y -CONFIG_GENERIC_IRQ_RESERVATION_MODE=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -# end of IRQ subsystem - -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_ARCH_CLOCKSOURCE_INIT=y -CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -# end of Timers subsystem - -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_HAVE_SCHED_AVG_IRQ=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -# CONFIG_PSI is not set -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -# end of RCU Subsystem - -CONFIG_BUILD_BIN2C=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=19 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y - -# -# Scheduler features -# -# CONFIG_UCLAMP_TASK is not set -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -# CONFIG_CGROUP_RDMA is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -# CONFIG_CGROUP_BPF is not set -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -# CONFIG_USER_NS is not set -CONFIG_PID_NS=y -CONFIG_NET_NS=y -# CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_SCHED_AUTOGROUP=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -# CONFIG_RD_BZIP2 is not set -CONFIG_RD_LZMA=y -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -CONFIG_RD_LZ4=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BPF=y -CONFIG_EXPERT=y -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_PRINTK_NMI=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_BPF_SYSCALL=y -# CONFIG_BPF_JIT_ALWAYS_ON is not set -CONFIG_USERFAULTFD=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_RSEQ=y -# CONFIG_DEBUG_RSEQ is not set -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -# end of Kernel Performance Events And Counters - -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLUB_DEBUG=y -CONFIG_SLUB_MEMCG_SYSFS_ON=y -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_SLOB is not set -# CONFIG_SLAB_MERGE_DEFAULT is not set -# CONFIG_SLAB_FREELIST_RANDOM is not set -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set -CONFIG_SLUB_CPU_PARTIAL=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -# end of General setup - -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_FILTER_PGPROT=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DYNAMIC_PHYSICAL_MASK=y -CONFIG_PGTABLE_LEVELS=5 -CONFIG_CC_HAS_SANE_STACKPROTECTOR=y - -# -# Processor type and features -# -CONFIG_ZONE_DMA=y -CONFIG_SMP=y -CONFIG_X86_FEATURE_NAMES=y -CONFIG_X86_X2APIC=y -CONFIG_X86_MPPARSE=y -# CONFIG_GOLDFISH is not set -# CONFIG_RETPOLINE is not set -CONFIG_X86_CPU_RESCTRL=y -CONFIG_X86_EXTENDED_PLATFORM=y -CONFIG_X86_NUMACHIP=y -# CONFIG_X86_VSMP is not set -# CONFIG_X86_UV is not set -# CONFIG_X86_GOLDFISH is not set -# CONFIG_X86_INTEL_LPSS is not set -CONFIG_X86_AMD_PLATFORM_DEVICE=y -CONFIG_IOSF_MBI=m -# CONFIG_IOSF_MBI_DEBUG is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_DEBUG is not set -CONFIG_PARAVIRT_SPINLOCKS=y -# CONFIG_XEN is not set -CONFIG_KVM_GUEST=y -CONFIG_ARCH_CPUIDLE_HALTPOLL=y -CONFIG_PVH=y -# CONFIG_KVM_DEBUG_FS is not set -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_PARAVIRT_CLOCK=y -CONFIG_JAILHOUSE_GUEST=y -# CONFIG_ACRN_GUEST is not set -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -# CONFIG_PROCESSOR_SELECT is not set -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_HYGON=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_CPU_SUP_ZHAOXIN=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -CONFIG_GART_IOMMU=y -# CONFIG_CALGARY_IOMMU is not set -CONFIG_MAXSMP=y -CONFIG_NR_CPUS_RANGE_BEGIN=8192 -CONFIG_NR_CPUS_RANGE_END=8192 -CONFIG_NR_CPUS_DEFAULT=8192 -CONFIG_NR_CPUS=8192 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_MC_PRIO=y -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCELOG_LEGACY=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -CONFIG_X86_MCE_INJECT=m -CONFIG_X86_THERMAL_VECTOR=y - -# -# Performance monitoring -# -CONFIG_PERF_EVENTS_INTEL_UNCORE=y -CONFIG_PERF_EVENTS_INTEL_RAPL=y -CONFIG_PERF_EVENTS_INTEL_CSTATE=y -CONFIG_PERF_EVENTS_AMD_POWER=y -# end of Performance monitoring - -CONFIG_X86_16BIT=y -CONFIG_X86_ESPFIX64=y -CONFIG_X86_VSYSCALL_EMULATION=y -# CONFIG_I8K is not set -CONFIG_MICROCODE=y -CONFIG_MICROCODE_INTEL=y -CONFIG_MICROCODE_AMD=y -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -CONFIG_X86_5LEVEL=y -CONFIG_X86_DIRECT_GBPAGES=y -# CONFIG_X86_CPA_STATISTICS is not set -CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set -CONFIG_NUMA=y -CONFIG_AMD_NUMA=y -CONFIG_X86_64_ACPI_NUMA=y -CONFIG_NODES_SPAN_OTHER_NODES=y -# CONFIG_NUMA_EMU is not set -CONFIG_NODES_SHIFT=10 -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -# CONFIG_X86_PMEM_LEGACY is not set -CONFIG_X86_CHECK_BIOS_CORRUPTION=y -# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set -CONFIG_X86_RESERVE_LOW=64 -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_ARCH_RANDOM=y -CONFIG_X86_SMAP=y -CONFIG_X86_INTEL_UMIP=y -CONFIG_X86_INTEL_MPX=y -CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -CONFIG_X86_INTEL_TSX_MODE_OFF=y -# CONFIG_X86_INTEL_TSX_MODE_ON is not set -# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set -CONFIG_EFI=y -CONFIG_EFI_STUB=y -# CONFIG_EFI_MIXED is not set -CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_ARCH_HAS_KEXEC_PURGATORY=y -# CONFIG_KEXEC_SIG is not set -CONFIG_CRASH_DUMP=y -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -# CONFIG_RANDOMIZE_BASE is not set -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_DYNAMIC_MEMORY_LAYOUT=y -CONFIG_HOTPLUG_CPU=y -# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set -# CONFIG_DEBUG_HOTPLUG_CPU0 is not set -CONFIG_COMPAT_VDSO=y -CONFIG_LEGACY_VSYSCALL_EMULATE=y -# CONFIG_LEGACY_VSYSCALL_XONLY is not set -# CONFIG_LEGACY_VSYSCALL_NONE is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_MODIFY_LDT_SYSCALL=y -CONFIG_HAVE_LIVEPATCH=y -CONFIG_LIVEPATCH=y -# end of Processor type and features - -CONFIG_ARCH_HAS_ADD_PAGES=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y - -# -# Power management and ACPI options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -# CONFIG_SUSPEND_SKIP_SYNC is not set -# CONFIG_HIBERNATION is not set -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y -CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y -CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y -CONFIG_ACPI_LPIT=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_PROCFS_POWER=y -CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y -CONFIG_ACPI_EC_DEBUGFS=m -# CONFIG_ACPI_AC is not set -# CONFIG_ACPI_BATTERY is not set -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -# CONFIG_ACPI_FAN is not set -# CONFIG_ACPI_TAD is not set -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_CPU_FREQ_PSS=y -CONFIG_ACPI_PROCESSOR_CSTATE=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=y -CONFIG_ACPI_NUMA=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HOTPLUG_IOAPIC=y -CONFIG_ACPI_SBS=m -CONFIG_ACPI_HED=y -CONFIG_ACPI_CUSTOM_METHOD=m -CONFIG_ACPI_BGRT=y -# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set -CONFIG_ACPI_NFIT=y -# CONFIG_NFIT_SECURITY_DEBUG is not set -CONFIG_ACPI_HMAT=y -CONFIG_HAVE_ACPI_APEI=y -CONFIG_HAVE_ACPI_APEI_NMI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -CONFIG_ACPI_APEI_EINJ=m -CONFIG_ACPI_APEI_ERST_DEBUG=m -# CONFIG_DPTF_POWER is not set -CONFIG_ACPI_EXTLOG=m -CONFIG_ACPI_ADXL=y -# CONFIG_PMIC_OPREGION is not set -# CONFIG_ACPI_CONFIGFS is not set -CONFIG_X86_PM_TIMER=y -# CONFIG_SFI is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -# CONFIG_CPU_FREQ_STAT is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y - -# -# CPU frequency scaling drivers -# -CONFIG_X86_INTEL_PSTATE=y -CONFIG_X86_PCC_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ_CPB=y -CONFIG_X86_POWERNOW_K8=m -CONFIG_X86_AMD_FREQ_SENSITIVITY=m -CONFIG_X86_SPEEDSTEP_CENTRINO=m -CONFIG_X86_P4_CLOCKMOD=m - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=m -# end of CPU Frequency scaling - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set -CONFIG_HALTPOLL_CPUIDLE=y -# end of CPU Idle - -CONFIG_INTEL_IDLE=y -# end of Power management and ACPI options - -# -# Bus options (PCI etc.) -# -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_MMCONF_FAM10H=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -# CONFIG_ISA_BUS is not set -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -# CONFIG_X86_SYSFB is not set -# end of Bus options (PCI etc.) - -# -# Binary Emulations -# -CONFIG_IA32_EMULATION=y -# CONFIG_X86_X32 is not set -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -# end of Binary Emulations - -CONFIG_X86_DEV_DMA_OPS=y - -# -# Firmware Drivers -# -# CONFIG_EDD is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m -# CONFIG_FW_CFG_SYSFS is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=y -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=m -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_RUNTIME_MAP=y -# CONFIG_EFI_FAKE_MEMMAP is not set -CONFIG_EFI_RUNTIME_WRAPPERS=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_APPLE_PROPERTIES is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -# CONFIG_EFI_RCI2_TABLE is not set -# end of EFI (Extensible Firmware Interface) Support - -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_X86=y -CONFIG_EFI_EARLYCON=y - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_NO_POLL=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -# CONFIG_KVM_MMU_AUDIT is not set -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# General architecture-dependent options -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HOTPLUG_SMT=y -CONFIG_OPROFILE=m -CONFIG_OPROFILE_EVENT_MULTIPLEX=y -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -CONFIG_KPROBES=y -# CONFIG_JUMP_LABEL is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_ARCH_STACKLEAK=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOVE_PMD=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=28 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_HAVE_COPY_THREAD_TLS=y -CONFIG_HAVE_STACK_VALIDATION=y -CONFIG_HAVE_RELIABLE_STACKTRACE=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_64BIT_TIME=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_REFCOUNT=y -# CONFIG_REFCOUNT_FULL is not set -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_ARCH_HAS_MEM_ENCRYPT=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling - -CONFIG_PLUGIN_HOSTCC="" -CONFIG_HAVE_GCC_PLUGINS=y -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_MODULE_SIG is not set -# CONFIG_MODULE_COMPRESS is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_RQ_ALLOC_TIME=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_ZONED is not set -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_CMDLINE_PARSER is not set -CONFIG_BLK_WBT=y -CONFIG_BLK_CGROUP_IOLATENCY=y -CONFIG_BLK_CGROUP_IOCOST=y -CONFIG_BLK_WBT_MQ=y -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -# end of Partition Types - -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y -CONFIG_BLK_PM=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_BFQ_CGROUP_DEBUG is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m -CONFIG_TRANSPARENT_HUGEPAGE=y -# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set -CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_THP_SWAP=y -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -# CONFIG_CLEANCACHE is not set -# CONFIG_FRONTSWAP is not set -# CONFIG_CMA is not set -# CONFIG_ZPOOL is not set -# CONFIG_ZBUD is not set -# CONFIG_ZSMALLOC is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_PTE_DEVMAP=y -CONFIG_ZONE_DEVICE=y -CONFIG_DEV_PAGEMAP_OPS=y -# CONFIG_DEVICE_PRIVATE is not set -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_BENCHMARK is not set -# CONFIG_READ_ONLY_THP_FOR_FS is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_NET_REDIRECT=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -# CONFIG_TLS_DEVICE is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_INTERFACE is not set -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -# CONFIG_SMC is not set -CONFIG_XDP_SOCKETS=y -# CONFIG_XDP_SOCKETS_DIAG is not set -CONFIG_TOA=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_INET_ESP_OFFLOAD is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -# CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -CONFIG_TCP_CONG_CDG=m -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -# CONFIG_INET6_ESP_OFFLOAD is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -# CONFIG_IPV6_VTI is not set -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -# CONFIG_IPV6_SUBTREES is not set -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_COMMON=m -CONFIG_NF_LOG_NETDEV=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -# CONFIG_NF_CONNTRACK_TIMEOUT is not set -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -# CONFIG_NF_CT_PROTO_DCCP is not set -CONFIG_NF_CT_PROTO_GRE=y -# CONFIG_NF_CT_PROTO_SCTP is not set -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_SET=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_COUNTER=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_TUNNEL=m -CONFIG_NFT_OBJREF=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=m -CONFIG_NFT_OSF=m -CONFIG_NFT_TPROXY=m -CONFIG_NFT_SYNPROXY=m -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -# CONFIG_NETFILTER_XT_MATCH_DCCP is not set -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -# CONFIG_NETFILTER_XT_MATCH_SCTP is not set -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -# CONFIG_IP_SET_HASH_IPMAC is not set -# CONFIG_IP_SET_HASH_MAC is not set -# CONFIG_IP_SET_HASH_NETPORTNET is not set -CONFIG_IP_SET_HASH_NET=m -# CONFIG_IP_SET_HASH_NETNET is not set -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -# CONFIG_IP_VS_PROTO_SCTP is not set - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_FLOW_TABLE_IPV4=m -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -# CONFIG_IP_NF_TARGET_SYNPROXY is not set -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -# CONFIG_IP_NF_TARGET_NETMAP is not set -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -# CONFIG_IP6_NF_MATCH_SRH is not set -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -# CONFIG_IP6_NF_TARGET_SYNPROXY is not set -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -# CONFIG_IP6_NF_TARGET_NPT is not set -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NFT_BRIDGE_META=m -CONFIG_NFT_BRIDGE_REJECT=m -CONFIG_NF_LOG_BRIDGE=m -# CONFIG_NF_CONNTRACK_BRIDGE is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_BPFILTER is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=y -CONFIG_GARP=y -CONFIG_MRP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=y -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -# CONFIG_DECNET is not set -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -CONFIG_IEEE802154=m -# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set -CONFIG_IEEE802154_SOCKET=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_CBS=m -CONFIG_NET_SCH_ETF=m -CONFIG_NET_SCH_TAPRIO=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_SKBPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -CONFIG_NET_SCH_CAKE=m -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -# CONFIG_NET_ACT_SAMPLE is not set -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -# CONFIG_NET_ACT_MPLS is not set -# CONFIG_NET_ACT_VLAN is not set -# CONFIG_NET_ACT_BPF is not set -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -# CONFIG_NET_ACT_TUNNEL_KEY is not set -# CONFIG_NET_ACT_CT is not set -# CONFIG_NET_TC_SKB_EXT is not set -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VMWARE_VMCI_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=m -# CONFIG_MPLS_ROUTING is not set -CONFIG_NET_NSH=m -# CONFIG_HSR is not set -# CONFIG_NET_SWITCHDEV is not set -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_NET_NCSI is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_JIT=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -CONFIG_NET_DROP_MONITOR=y -# end of Network testing -# end of Networking options - -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set - -# -# CFG80211 needs to be enabled for MAC80211 -# -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set -# CONFIG_NFC is not set -# CONFIG_PSAMPLE is not set -# CONFIG_NET_IFE is not set -# CONFIG_LWTUNNEL is not set -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_NET_DEVLINK=y -CONFIG_PAGE_POOL=y -CONFIG_FAILOVER=m -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# -CONFIG_HAVE_EISA=y -# CONFIG_EISA is not set -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -# CONFIG_PCIE_DPC is not set -# CONFIG_PCIE_PTM is not set -# CONFIG_PCIE_BW is not set -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=m -CONFIG_PCI_PF_STUB=m -CONFIG_PCI_ATS=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -# CONFIG_PCI_P2PDMA is not set -CONFIG_PCI_LABEL=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# PCI controller drivers -# - -# -# Cadence PCIe controllers support -# -# end of Cadence PCIe controllers support - -CONFIG_VMD=y - -# -# DesignWare PCI Core Support -# -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCI_MESON is not set -# end of DesignWare PCI Core Support -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -# CONFIG_PCCARD is not set -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -# CONFIG_FW_LOADER_COMPRESS is not set -# end of Firmware loader - -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -CONFIG_HMEM_REPORTING=y -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -# end of Generic Driver Options - -# -# Bus devices -# -# end of Bus devices - -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_GNSS is not set -# CONFIG_MTD is not set -# CONFIG_OF is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -CONFIG_BLK_DEV_FD=m -CONFIG_CDROM=m -CONFIG_BLK_DEV_PCIESSD_MTIP32XX=y -# CONFIG_BLK_DEV_UMEM is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -CONFIG_BLK_DEV_DRBD=m -# CONFIG_DRBD_FAULT_INJECTION is not set -CONFIG_BLK_DEV_NBD=y -# CONFIG_BLK_DEV_SKD is not set -CONFIG_BLK_DEV_SX8=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_BLK_SCSI=y -CONFIG_BLK_DEV_RBD=m -# CONFIG_BLK_DEV_RSXX is not set - -# -# NVME Support -# -CONFIG_NVME_CORE=y -CONFIG_BLK_DEV_NVME=y -CONFIG_NVME_MULTIPATH=y -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -# CONFIG_NVME_FC is not set -# CONFIG_NVME_TCP is not set -# CONFIG_NVME_TARGET is not set -# end of NVME Support - -# -# Misc devices -# -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -CONFIG_HP_ILO=m -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -CONFIG_VMWARE_BALLOON=m -# CONFIG_SRAM is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -# CONFIG_PVPANIC is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -# CONFIG_CB710_CORE is not set - -# -# Texas Instruments shared transport line discipline -# -# end of Texas Instruments shared transport line discipline - -# CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module (requires I2C) -# -CONFIG_ALTERA_STAPL=m -# CONFIG_INTEL_MEI is not set -# CONFIG_INTEL_MEI_ME is not set -# CONFIG_INTEL_MEI_TXE is not set -# CONFIG_INTEL_MEI_HDCP is not set -CONFIG_VMWARE_VMCI=m - -# -# Intel MIC & related support -# - -# -# Intel MIC Bus Driver -# -# CONFIG_INTEL_MIC_BUS is not set - -# -# SCIF Bus Driver -# -# CONFIG_SCIF_BUS is not set - -# -# VOP Bus Driver -# -# CONFIG_VOP_BUS is not set - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# - -# -# VOP Driver -# -# end of Intel MIC & related support - -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MISC_RTSX_PCI is not set -# CONFIG_MISC_RTSX_USB is not set -# CONFIG_HABANA_AI is not set -# end of Misc devices - -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=y -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -# CONFIG_SCSI_SCAN_ASYNC is not set - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=y -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=y -CONFIG_SCSI_SAS_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=y -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=y -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=y -CONFIG_ISCSI_BOOT_SYSFS=y -CONFIG_SCSI_CXGB3_ISCSI=m -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_SCSI_BNX2X_FCOE is not set -CONFIG_BE2ISCSI=y -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -CONFIG_SCSI_3W_9XXX=y -CONFIG_SCSI_3W_SAS=m -CONFIG_SCSI_ACARD=y -CONFIG_SCSI_AACRAID=y -CONFIG_SCSI_AIC7XXX=y -CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 -CONFIG_AIC7XXX_RESET_DELAY_MS=5000 -CONFIG_AIC7XXX_DEBUG_ENABLE=y -CONFIG_AIC7XXX_DEBUG_MASK=0 -CONFIG_AIC7XXX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC79XX=y -CONFIG_AIC79XX_CMDS_PER_DEVICE=4 -CONFIG_AIC79XX_RESET_DELAY_MS=15000 -# CONFIG_AIC79XX_DEBUG_ENABLE is not set -CONFIG_AIC79XX_DEBUG_MASK=0 -# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_AIC94XX=y -CONFIG_AIC94XX_DEBUG=y -CONFIG_SCSI_MVSAS=y -# CONFIG_SCSI_MVSAS_DEBUG is not set -CONFIG_SCSI_MVSAS_TASKLET=y -CONFIG_SCSI_MVUMI=y -# CONFIG_SCSI_DPT_I2O is not set -# CONFIG_SCSI_ADVANSYS is not set -CONFIG_SCSI_ARCMSR=y -# CONFIG_SCSI_ESAS2R is not set -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=y -CONFIG_MEGARAID_MAILBOX=y -CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_SMARTPQI is not set -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PCI=y -# CONFIG_SCSI_UFS_DWC_TC_PCI is not set -# CONFIG_SCSI_UFSHCD_PLATFORM is not set -# CONFIG_SCSI_UFS_BSG is not set -CONFIG_SCSI_HPTIOP=y -CONFIG_SCSI_BUSLOGIC=y -# CONFIG_SCSI_FLASHPOINT is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_VMWARE_PVSCSI=m -CONFIG_LIBFC=y -CONFIG_LIBFCOE=y -CONFIG_FCOE=y -CONFIG_FCOE_FNIC=y -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -CONFIG_SCSI_GDTH=y -CONFIG_SCSI_ISCI=y -CONFIG_SCSI_IPS=y -CONFIG_SCSI_INITIO=y -# CONFIG_SCSI_INIA100 is not set -CONFIG_SCSI_STEX=y -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -CONFIG_SCSI_QLA_FC=y -# CONFIG_TCM_QLA2XXX is not set -CONFIG_SCSI_QLA_ISCSI=y -CONFIG_SCSI_LPFC=y -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -# CONFIG_SCSI_DEBUG is not set -CONFIG_SCSI_PMCRAID=y -CONFIG_SCSI_PM8001=y -CONFIG_SCSI_BFA_FC=y -CONFIG_SCSI_VIRTIO=y -# CONFIG_SCSI_CHELSIO_FCOE is not set -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -# end of SCSI device support - -CONFIG_ATA=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=y -# CONFIG_SATA_INIC162X is not set -CONFIG_SATA_ACARD_AHCI=y -CONFIG_SATA_SIL24=y -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=y -CONFIG_SATA_QSTOR=y -CONFIG_SATA_SX4=y -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -# CONFIG_SATA_DWC is not set -CONFIG_SATA_MV=y -CONFIG_SATA_NV=y -CONFIG_SATA_PROMISE=y -CONFIG_SATA_SIL=y -CONFIG_SATA_SIS=y -CONFIG_SATA_SVW=y -CONFIG_SATA_ULI=y -CONFIG_SATA_VIA=y -CONFIG_SATA_VITESSE=y - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=y -CONFIG_PATA_AMD=y -CONFIG_PATA_ARTOP=y -CONFIG_PATA_ATIIXP=y -CONFIG_PATA_ATP867X=y -CONFIG_PATA_CMD64X=y -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -CONFIG_PATA_HPT366=y -CONFIG_PATA_HPT37X=y -CONFIG_PATA_HPT3X2N=y -CONFIG_PATA_HPT3X3=y -# CONFIG_PATA_HPT3X3_DMA is not set -CONFIG_PATA_IT8213=y -CONFIG_PATA_IT821X=y -CONFIG_PATA_JMICRON=y -CONFIG_PATA_MARVELL=y -CONFIG_PATA_NETCELL=y -CONFIG_PATA_NINJA32=y -# CONFIG_PATA_NS87415 is not set -CONFIG_PATA_OLDPIIX=y -# CONFIG_PATA_OPTIDMA is not set -CONFIG_PATA_PDC2027X=y -CONFIG_PATA_PDC_OLD=y -# CONFIG_PATA_RADISYS is not set -CONFIG_PATA_RDC=y -CONFIG_PATA_SCH=y -CONFIG_PATA_SERVERWORKS=y -CONFIG_PATA_SIL680=y -CONFIG_PATA_SIS=y -CONFIG_PATA_TOSHIBA=y -# CONFIG_PATA_TRIFLEX is not set -CONFIG_PATA_VIA=y -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -CONFIG_PATA_ACPI=y -CONFIG_ATA_GENERIC=y -CONFIG_PATA_LEGACY=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_MD_RAID10=y -CONFIG_MD_RAID456=y -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BCACHE=m -# CONFIG_BCACHE_DEBUG is not set -# CONFIG_BCACHE_CLOSURES_DEBUG is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=y -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -# CONFIG_DM_WRITECACHE is not set -# CONFIG_DM_ERA is not set -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set -# CONFIG_DM_INIT is not set -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set -# CONFIG_DM_SWITCH is not set -# CONFIG_DM_LOG_WRITES is not set -# CONFIG_DM_INTEGRITY is not set -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -# CONFIG_TCM_USER2 is not set -CONFIG_LOOPBACK_TARGET=m -CONFIG_TCM_FC=m -CONFIG_ISCSI_TARGET=m -# CONFIG_ISCSI_TARGET_CXGB4 is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y -CONFIG_FUSION_FC=y -CONFIG_FUSION_SAS=y -CONFIG_FUSION_MAX_SGE=128 -CONFIG_FUSION_CTL=y -# CONFIG_FUSION_LAN is not set -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -# CONFIG_GENEVE is not set -# CONFIG_GTP is not set -# CONFIG_MACSEC is not set -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -# CONFIG_NLMON is not set -CONFIG_NET_VRF=m -CONFIG_VSOCKMON=m -# CONFIG_ARCNET is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -# end of Distributed Switch Architecture drivers - -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -CONFIG_NET_VENDOR_AGERE=y -# CONFIG_ET131X is not set -CONFIG_NET_VENDOR_ALACRITECH=y -# CONFIG_SLICOSS is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set -# CONFIG_NET_VENDOR_AMD is not set -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQTION is not set -CONFIG_NET_VENDOR_ARC=y -CONFIG_NET_VENDOR_ATHEROS=y -CONFIG_ATL2=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -# CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -# CONFIG_CNIC is not set -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -CONFIG_NET_VENDOR_BROCADE=y -CONFIG_BNA=m -CONFIG_NET_VENDOR_CADENCE=y -CONFIG_MACB=m -CONFIG_MACB_USE_HWSTAMP=y -# CONFIG_MACB_PCI is not set -CONFIG_NET_VENDOR_CAVIUM=y -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_THUNDER_NIC_RGX is not set -# CONFIG_CAVIUM_PTP is not set -CONFIG_LIQUIDIO=m -# CONFIG_LIQUIDIO_VF is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -CONFIG_NET_VENDOR_CORTINA=y -# CONFIG_CX_ECAT is not set -CONFIG_DNET=m -CONFIG_NET_VENDOR_DEC=y -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_DE2104X_DSL=0 -CONFIG_TULIP=m -# CONFIG_TULIP_MWI is not set -CONFIG_TULIP_MMIO=y -# CONFIG_TULIP_NAPI is not set -CONFIG_DE4X5=m -CONFIG_WINBOND_840=m -CONFIG_DM9102=m -CONFIG_ULI526X=m -# CONFIG_NET_VENDOR_DLINK is not set -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_HWMON=y -CONFIG_BE2NET_BE2=y -CONFIG_BE2NET_BE3=y -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -CONFIG_NET_VENDOR_EZCHIP=y -CONFIG_NET_VENDOR_GOOGLE=y -# CONFIG_GVE is not set -# CONFIG_NET_VENDOR_HP is not set -CONFIG_NET_VENDOR_HUAWEI=y -# CONFIG_HINIC is not set -# CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGB_DCA=y -CONFIG_IGBVF=m -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCA=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_IAVF=m -CONFIG_I40EVF=m -# CONFIG_ICE is not set -# CONFIG_FM10K is not set -# CONFIG_IGC is not set -CONFIG_JME=m -CONFIG_NET_VENDOR_MARVELL=y -CONFIG_MVMDIO=m -CONFIG_SKGE=m -# CONFIG_SKGE_DEBUG is not set -CONFIG_SKGE_GENESIS=y -CONFIG_SKY2=m -# CONFIG_SKY2_DEBUG is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -CONFIG_MLX4_CORE_GEN2=y -CONFIG_MLX5_CORE=m -# CONFIG_MLX5_FPGA is not set -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_CORE_EN_DCB=y -# CONFIG_MLX5_CORE_IPOIB is not set -# CONFIG_MLXSW_CORE is not set -# CONFIG_MLXFW is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_NET_VENDOR_MICROCHIP=y -# CONFIG_LAN743X is not set -CONFIG_NET_VENDOR_MICROSEMI=y -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_NET_VENDOR_NETERION=y -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -CONFIG_NET_VENDOR_NETRONOME=y -# CONFIG_NFP is not set -CONFIG_NET_VENDOR_NI=y -# CONFIG_NI_XGE_MANAGEMENT_ENET is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -CONFIG_NET_VENDOR_PACKET_ENGINES=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QLA3XXX=m -CONFIG_QLCNIC=m -CONFIG_QLCNIC_SRIOV=y -CONFIG_QLCNIC_DCB=y -CONFIG_QLCNIC_HWMON=y -CONFIG_NETXEN_NIC=m -# CONFIG_QED is not set -CONFIG_NET_VENDOR_QUALCOMM=y -# CONFIG_QCOM_EMAC is not set -# CONFIG_RMNET is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -CONFIG_NET_VENDOR_RENESAS=y -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_NET_VENDOR_SAMSUNG=y -# CONFIG_SXGBE_ETH is not set -# CONFIG_NET_VENDOR_SEEQ is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -CONFIG_NET_VENDOR_SMSC=y -CONFIG_EPIC100=m -# CONFIG_SMSC911X is not set -CONFIG_SMSC9420=m -CONFIG_NET_VENDOR_SOCIONEXT=y -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -CONFIG_NET_VENDOR_SYNOPSYS=y -# CONFIG_DWC_XLGMAC is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_VENDOR_XILINX=y -# CONFIG_XILINX_AXI_EMAC is not set -# CONFIG_XILINX_LL_TEMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -# CONFIG_MDIO_BCM_UNIMAC is not set -CONFIG_MDIO_BITBANG=m -# CONFIG_MDIO_MSCC_MIIM is not set -# CONFIG_MDIO_THUNDER is not set -CONFIG_PHYLIB=y -CONFIG_SWPHY=y - -# -# MII PHY device drivers -# -# CONFIG_ADIN_PHY is not set -CONFIG_AMD_PHY=m -# CONFIG_AQUANTIA_PHY is not set -# CONFIG_AX88796B_PHY is not set -CONFIG_AT803X_PHY=m -# CONFIG_BCM7XXX_PHY is not set -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BROADCOM_PHY=m -CONFIG_CICADA_PHY=m -# CONFIG_CORTINA_PHY is not set -CONFIG_DAVICOM_PHY=m -# CONFIG_DP83822_PHY is not set -# CONFIG_DP83TC811_PHY is not set -# CONFIG_DP83848_PHY is not set -# CONFIG_DP83867_PHY is not set -CONFIG_FIXED_PHY=y -CONFIG_ICPLUS_PHY=m -# CONFIG_INTEL_XWAY_PHY is not set -CONFIG_LSI_ET1011C_PHY=m -CONFIG_LXT_PHY=m -CONFIG_MARVELL_PHY=m -# CONFIG_MARVELL_10G_PHY is not set -CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_PHY is not set -# CONFIG_MICROCHIP_T1_PHY is not set -# CONFIG_MICROSEMI_PHY is not set -CONFIG_NATIONAL_PHY=m -# CONFIG_NXP_TJA11XX_PHY is not set -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -# CONFIG_RENESAS_PHY is not set -# CONFIG_ROCKCHIP_PHY is not set -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -# CONFIG_TERANETICS_PHY is not set -CONFIG_VITESSE_PHY=m -# CONFIG_XILINX_GMII2RGMII is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -# CONFIG_USB_RTL8152 is not set -# CONFIG_USB_LAN78XX is not set -# CONFIG_USB_USBNET is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_WLAN is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -CONFIG_IEEE802154_DRIVERS=m -CONFIG_IEEE802154_FAKELB=m -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_HWSIM is not set -# CONFIG_VMXNET3 is not set -# CONFIG_FUJITSU_ES is not set -# CONFIG_NETDEVSIM is not set -CONFIG_NET_FAILOVER=m -# CONFIG_ISDN is not set -# CONFIG_NVM is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -CONFIG_KEYBOARD_SUNKBD=y -CONFIG_KEYBOARD_XTKBD=y -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -# CONFIG_INPUT_MSM_VIBRATOR is not set -# CONFIG_INPUT_PCSPKR is not set -# CONFIG_INPUT_MMA8450 is not set -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -# CONFIG_INPUT_KXTJ9 is not set -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_CMA3000 is not set -# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -# CONFIG_RMI4_CORE is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=y -CONFIG_SERIO_ALTERA_PS2=y -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=y -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=256 -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_ROCKETPORT is not set -CONFIG_CYCLADES=m -# CONFIG_CYZ_INTR is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_SYNCLINK=m -CONFIG_SYNCLINKMP=m -CONFIG_SYNCLINK_GT=m -CONFIG_NOZOMI=m -# CONFIG_ISI is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -# CONFIG_TRACE_SINK is not set -# CONFIG_NULL_TTY is not set -CONFIG_LDISC_AUTOLOAD=y -CONFIG_DEVMEM=y -# CONFIG_DEVKMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y -# CONFIG_SERIAL_8250_DW is not set -# CONFIG_SERIAL_8250_RT288X is not set -CONFIG_SERIAL_8250_LPSS=y -CONFIG_SERIAL_8250_MID=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_SERIAL_JSM=m -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -CONFIG_SERIAL_ARC=m -CONFIG_SERIAL_ARC_NR_PORTS=1 -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# end of Serial drivers - -# CONFIG_SERIAL_DEV_BUS is not set -# CONFIG_TTY_PRINTK is not set -CONFIG_HVC_DRIVER=y -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y -# CONFIG_IPMI_PANIC_EVENT is not set -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -# CONFIG_IPMI_SSIF is not set -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_NVRAM=y -# CONFIG_APPLICOM is not set -# CONFIG_MWAVE is not set -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=8192 -CONFIG_HPET=y -CONFIG_HPET_MMAP=y -CONFIG_HPET_MMAP_DEFAULT=y -CONFIG_HANGCHECK_TIMER=m -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=y -CONFIG_TCG_TIS=y -# CONFIG_TCG_TIS_I2C_ATMEL is not set -# CONFIG_TCG_TIS_I2C_INFINEON is not set -# CONFIG_TCG_TIS_I2C_NUVOTON is not set -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -# CONFIG_TCG_CRB is not set -# CONFIG_TCG_VTPM_PROXY is not set -# CONFIG_TCG_TIS_ST33ZP24_I2C is not set -CONFIG_TELCLOCK=m -CONFIG_DEVPORT=y -# CONFIG_XILLYBUS is not set -# end of Character devices - -# CONFIG_RANDOM_TRUST_CPU is not set -# CONFIG_RANDOM_TRUST_BOOTLOADER is not set - -# -# I2C support -# -CONFIG_I2C=m -CONFIG_I2C_BOARDINFO=y -# CONFIG_I2C_COMPAT is not set -# CONFIG_I2C_CHARDEV is not set -# CONFIG_I2C_MUX is not set -# CONFIG_I2C_HELPER_AUTO is not set -# CONFIG_I2C_SMBUS is not set - -# -# I2C Algorithms -# -CONFIG_I2C_ALGOBIT=m -# CONFIG_I2C_ALGOPCF is not set -# CONFIG_I2C_ALGOPCA is not set -# end of I2C Algorithms - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_AMD_MP2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_ISMT is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_MLXCPLD is not set -# end of I2C Hardware Bus support - -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -# CONFIG_SPI is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=m -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -# CONFIG_PPS_CLIENT_LDISC is not set -# CONFIG_PPS_CLIENT_GPIO is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=m -# CONFIG_DP83640_PHY is not set -CONFIG_PTP_1588_CLOCK_KVM=m -# end of PTP clock support - -CONFIG_PINCTRL=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_BAYTRAIL is not set -# CONFIG_PINCTRL_CHERRYVIEW is not set -# CONFIG_PINCTRL_BROXTON is not set -# CONFIG_PINCTRL_CANNONLAKE is not set -# CONFIG_PINCTRL_CEDARFORK is not set -# CONFIG_PINCTRL_DENVERTON is not set -# CONFIG_PINCTRL_GEMINILAKE is not set -# CONFIG_PINCTRL_ICELAKE is not set -# CONFIG_PINCTRL_LEWISBURG is not set -# CONFIG_PINCTRL_SUNRISEPOINT is not set -# CONFIG_GPIOLIB is not set -# CONFIG_W1 is not set -# CONFIG_POWER_AVS is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -CONFIG_HWMON=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_ABITUGURU is not set -# CONFIG_SENSORS_ABITUGURU3 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_AS370 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_K8TEMP is not set -# CONFIG_SENSORS_K10TEMP is not set -# CONFIG_SENSORS_FAM15H_POWER is not set -# CONFIG_SENSORS_APPLESMC is not set -# CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ASPEED is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_DELL_SMM is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_HIH6130 is not set -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -# CONFIG_SENSORS_I5500 is not set -# CONFIG_SENSORS_CORETEMP is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4222 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4260 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX6621 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA_CPUTEMP is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83773G is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -# CONFIG_SENSORS_XGENE is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -# CONFIG_SENSORS_ATK0110 is not set -CONFIG_THERMAL=y -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -# CONFIG_CLOCK_THERMAL is not set -# CONFIG_DEVFREQ_THERMAL is not set -# CONFIG_THERMAL_EMULATION is not set - -# -# Intel thermal drivers -# -# CONFIG_INTEL_POWERCLAMP is not set -CONFIG_X86_PKG_TEMP_THERMAL=m -# CONFIG_INTEL_SOC_DTS_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# -# CONFIG_INT340X_THERMAL is not set -# end of ACPI INT340X thermal drivers - -# CONFIG_INTEL_PCH_THERMAL is not set -# end of Intel thermal drivers - -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -# CONFIG_BCMA is not set - -# -# Multifunction device drivers -# -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_INTEL_LPSS_ACPI is not set -# CONFIG_MFD_INTEL_LPSS_PCI is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TQMX86 is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_WM8994 is not set -# end of Multifunction device drivers - -# CONFIG_REGULATOR is not set -CONFIG_RC_CORE=y -CONFIG_RC_MAP=y -# CONFIG_LIRC is not set -CONFIG_RC_DECODERS=y -CONFIG_IR_NEC_DECODER=y -CONFIG_IR_RC5_DECODER=y -CONFIG_IR_RC6_DECODER=y -CONFIG_IR_JVC_DECODER=y -CONFIG_IR_SONY_DECODER=y -CONFIG_IR_SANYO_DECODER=y -CONFIG_IR_SHARP_DECODER=y -CONFIG_IR_MCE_KBD_DECODER=y -CONFIG_IR_XMP_DECODER=y -# CONFIG_IR_IMON_DECODER is not set -# CONFIG_IR_RCMM_DECODER is not set -# CONFIG_RC_DEVICES is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -CONFIG_AGP=y -# CONFIG_AGP_AMD64 is not set -CONFIG_AGP_INTEL=y -# CONFIG_AGP_SIS is not set -# CONFIG_AGP_VIA is not set -CONFIG_INTEL_GTT=y -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=64 -# CONFIG_VGA_SWITCHEROO is not set -CONFIG_DRM=m -CONFIG_DRM_MIPI_DSI=y -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DEBUG_SELFTEST is not set -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_KMS_FB_HELPER=y -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set -# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set -# CONFIG_DRM_DP_CEC is not set - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# end of ARM devices - -# CONFIG_DRM_RADEON is not set -# CONFIG_DRM_AMDGPU is not set - -# -# ACP (Audio CoProcessor) Configuration -# -# end of ACP (Audio CoProcessor) Configuration - -# CONFIG_DRM_NOUVEAU is not set -CONFIG_DRM_I915=m -# CONFIG_DRM_I915_ALPHA_SUPPORT is not set -CONFIG_DRM_I915_FORCE_PROBE="" -CONFIG_DRM_I915_CAPTURE_ERROR=y -CONFIG_DRM_I915_COMPRESS_ERROR=y -CONFIG_DRM_I915_USERPTR=y -# CONFIG_DRM_I915_GVT is not set - -# -# drm/i915 Debugging -# -# CONFIG_DRM_I915_WERROR is not set -# CONFIG_DRM_I915_DEBUG is not set -# CONFIG_DRM_I915_DEBUG_MMIO is not set -# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set -# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set -# CONFIG_DRM_I915_DEBUG_GUC is not set -# CONFIG_DRM_I915_SELFTEST is not set -# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set -# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set -# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set -# end of drm/i915 Debugging - -# -# drm/i915 Profile Guided Optimisation -# -CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 -CONFIG_DRM_I915_SPIN_REQUEST=5 -# end of drm/i915 Profile Guided Optimisation - -# CONFIG_DRM_VGEM is not set -# CONFIG_DRM_VKMS is not set -# CONFIG_DRM_VMWGFX is not set -# CONFIG_DRM_GMA500 is not set -# CONFIG_DRM_UDL is not set -# CONFIG_DRM_AST is not set -# CONFIG_DRM_MGAG200 is not set -# CONFIG_DRM_CIRRUS_QEMU is not set -# CONFIG_DRM_QXL is not set -# CONFIG_DRM_BOCHS is not set -# CONFIG_DRM_VIRTIO_GPU is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# end of Display Interface Bridges - -# CONFIG_DRM_ETNAVIV is not set -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_VBOXVIDEO is not set -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y - -# -# Frame buffer Devices -# -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -# CONFIG_FB_VESA is not set -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_INTEL is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SM712 is not set -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_PLATFORM is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=m -CONFIG_BACKLIGHT_GENERIC=m -# CONFIG_BACKLIGHT_APPLE is not set -# CONFIG_BACKLIGHT_PM8941_WLED is not set -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3639 is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 -# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -# CONFIG_LOGO is not set -# end of Graphics support - -# CONFIG_SOUND is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -# CONFIG_UHID is not set -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=y -# CONFIG_HID_ACCUTOUCH is not set -# CONFIG_HID_ACRUX is not set -CONFIG_HID_APPLE=y -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_AUREAL is not set -CONFIG_HID_BELKIN=y -# CONFIG_HID_BETOP_FF is not set -CONFIG_HID_CHERRY=y -CONFIG_HID_CHICONY=y -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -CONFIG_HID_CYPRESS=y -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -CONFIG_HID_EZKEY=y -# CONFIG_HID_GEMBIRD is not set -# CONFIG_HID_GFRM is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_ITE is not set -# CONFIG_HID_JABRA is not set -# CONFIG_HID_TWINHAN is not set -CONFIG_HID_KENSINGTON=y -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO is not set -CONFIG_HID_LOGITECH=y -# CONFIG_HID_LOGITECH_HIDPP is not set -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_REDRAGON is not set -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MONTEREY=y -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTI is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PLANTRONICS is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_RETRODE is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEAM is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set -# CONFIG_HID_ALPS is not set -# end of Special HID drivers - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -CONFIG_USB_HIDDEV=y -# end of USB HID support - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set -# end of I2C HID support - -# -# Intel ISH HID support -# -# CONFIG_INTEL_ISH_HID is not set -# end of Intel ISH HID support -# end of HID support - -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -# CONFIG_USB_ULPI_BUS is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -CONFIG_USB_AUTOSUSPEND_DELAY=2 -# CONFIG_USB_MON is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_DBGCAP is not set -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PLATFORM is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -# CONFIG_USB_UAS is not set - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_CDNS3 is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -# CONFIG_USB_SERIAL is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HUB_USB251XB is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_ISP1301 is not set -# end of USB Physical Layer drivers - -# CONFIG_USB_GADGET is not set -# CONFIG_TYPEC is not set -# CONFIG_USB_ROLE_SWITCH is not set -# CONFIG_MMC is not set -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_MTHCA_DEBUG=y -# CONFIG_INFINIBAND_CXGB3 is not set -# CONFIG_INFINIBAND_CXGB4 is not set -# CONFIG_INFINIBAND_EFA is not set -# CONFIG_INFINIBAND_I40IW is not set -# CONFIG_MLX4_INFINIBAND is not set -# CONFIG_MLX5_INFINIBAND is not set -CONFIG_INFINIBAND_OCRDMA=m -# CONFIG_INFINIBAND_USNIC is not set -# CONFIG_INFINIBAND_BNXT_RE is not set -# CONFIG_INFINIBAND_RDMAVT is not set -# CONFIG_RDMA_RXE is not set -# CONFIG_RDMA_SIW is not set -CONFIG_INFINIBAND_IPOIB=m -# CONFIG_INFINIBAND_IPOIB_CM is not set -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -# CONFIG_INFINIBAND_OPA_VNIC is not set -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=m -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=m -CONFIG_EDAC_AMD64=m -# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_IE31200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I5000=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_EDAC_SBRIDGE=m -CONFIG_EDAC_SKX=m -CONFIG_EDAC_I10NM=m -CONFIG_EDAC_PND2=m -CONFIG_RTC_LIB=y -CONFIG_RTC_MC146818_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABEOZ9 is not set -# CONFIG_RTC_DRV_ABX80X is not set -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8010 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -CONFIG_RTC_I2C_AND_SPI=m - -# -# SPI and I2C RTC drivers -# -# CONFIG_RTC_DRV_DS3232 is not set -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_FTRTC010 is not set - -# -# HID Sensor RTC drivers -# -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_VIRTUAL_CHANNELS=y -CONFIG_DMA_ACPI=y -# CONFIG_ALTERA_MSGDMA is not set -CONFIG_INTEL_IDMA64=y -CONFIG_INTEL_IOATDMA=y -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -CONFIG_DW_DMAC_CORE=y -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set -# CONFIG_DW_EDMA is not set -# CONFIG_DW_EDMA_PCIE is not set -CONFIG_HSU_DMA=y - -# -# DMA Clients -# -# CONFIG_ASYNC_TX_DMA is not set -# CONFIG_DMATEST is not set -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_SELFTESTS is not set -# end of DMABUF options - -CONFIG_DCA=y -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=m -# CONFIG_UIO_CIF is not set -CONFIG_UIO_PDRV_GENIRQ=m -CONFIG_UIO_DMEM_GENIRQ=m -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_VIRQFD=m -CONFIG_VFIO=m -# CONFIG_VFIO_NOIOMMU is not set -CONFIG_VFIO_PCI=m -# CONFIG_VFIO_PCI_VGA is not set -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI_IGD=y -CONFIG_VFIO_MDEV=m -CONFIG_VFIO_MDEV_DEVICE=m -CONFIG_IRQ_BYPASS_MANAGER=m -CONFIG_VIRT_DRIVERS=y -CONFIG_VBOXGUEST=m -CONFIG_VIRTIO=y -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_PMEM=m -CONFIG_VIRTIO_BALLOON=m -# CONFIG_VIRTIO_INPUT is not set -CONFIG_VIRTIO_MMIO=y -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_HYPERV is not set -# end of Microsoft Hyper-V guest support - -# CONFIG_GREYBUS is not set -CONFIG_STAGING=y -# CONFIG_COMEDI is not set -# CONFIG_RTS5208 is not set -# CONFIG_FB_SM750 is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -# end of Speakup console speech - -# CONFIG_STAGING_MEDIA is not set - -# -# Android -# -CONFIG_ASHMEM=y -# CONFIG_ANDROID_VSOC is not set -# CONFIG_ION is not set -# end of Android - -# CONFIG_LTE_GDM724X is not set -# CONFIG_GS_FPGABOOT is not set -# CONFIG_UNISYSSPAR is not set -# CONFIG_MOST is not set - -# -# Gasket devices -# -# CONFIG_STAGING_GASKET_FRAMEWORK is not set -# end of Gasket devices - -# CONFIG_FIELDBUS_DEV is not set -# CONFIG_KPC2000 is not set -# CONFIG_USB_WUSB_CBAF is not set -# CONFIG_UWB is not set -# CONFIG_EXFAT_FS is not set -CONFIG_QLGE=m -# CONFIG_X86_PLATFORM_DEVICES is not set -CONFIG_PMC_ATOM=y -# CONFIG_MFD_CROS_EC is not set -# CONFIG_CHROME_PLATFORMS is not set -# CONFIG_MELLANOX_PLATFORM is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Common Clock Framework -# -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# end of Common Clock Framework - -# CONFIG_HWSPINLOCK is not set - -# -# Clock Source drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -# end of Clock Source drivers - -CONFIG_MAILBOX=y -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -CONFIG_IOMMU_IOVA=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -# end of Generic IOMMU Pagetable Support - -# CONFIG_IOMMU_DEBUGFS is not set -# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set -CONFIG_AMD_IOMMU=y -CONFIG_AMD_IOMMU_V2=m -CONFIG_DMAR_TABLE=y -CONFIG_INTEL_IOMMU=y -CONFIG_INTEL_IOMMU_SVM=y -# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set -CONFIG_INTEL_IOMMU_FLOPPY_WA=y -CONFIG_IRQ_REMAP=y -# CONFIG_SMMU_BYPASS_DEV is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Aspeed SoC drivers -# -# end of Aspeed SoC drivers - -# -# Broadcom SoC drivers -# -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# end of NXP/Freescale QorIQ SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Qualcomm SoC drivers -# -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# CONFIG_XILINX_VCU is not set -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - -CONFIG_PM_DEVFREQ=y - -# -# DEVFREQ Governors -# -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y -# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set -# CONFIG_DEVFREQ_GOV_POWERSAVE is not set -# CONFIG_DEVFREQ_GOV_USERSPACE is not set -# CONFIG_DEVFREQ_GOV_PASSIVE is not set - -# -# DEVFREQ Drivers -# -# CONFIG_PM_DEVFREQ_EVENT is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -# CONFIG_PWM is not set - -# -# IRQ chip support -# -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# end of PHY Subsystem - -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# end of Performance monitor support - -CONFIG_RAS=y -# CONFIG_RAS_CEC is not set -# CONFIG_THUNDERBOLT is not set - -# -# Android -# -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -# CONFIG_ANDROID_BINDERFS is not set -CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" -# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set -# end of Android - -CONFIG_LIBNVDIMM=y -CONFIG_BLK_DEV_PMEM=y -CONFIG_ND_BLK=y -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=y -CONFIG_BTT=y -CONFIG_ND_PFN=y -CONFIG_NVDIMM_PFN=y -CONFIG_NVDIMM_DAX=y -CONFIG_NVDIMM_KEYS=y -CONFIG_DAX_DRIVER=y -CONFIG_DAX=y -CONFIG_DEV_DAX=m -CONFIG_DEV_DAX_PMEM=m -CONFIG_DEV_DAX_KMEM=m -CONFIG_DEV_DAX_PMEM_COMPAT=m -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# HW tracing support -# -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -CONFIG_PM_OPP=y -# CONFIG_UNISYS_VISORBUS is not set -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_EXT2_FS=y -# CONFIG_EXT2_FS_XATTR is not set -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -CONFIG_REISERFS_FS=m -# CONFIG_REISERFS_CHECK is not set -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -# CONFIG_JFS_DEBUG is not set -# CONFIG_JFS_STATISTICS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -# CONFIG_XFS_ONLINE_SCRUB is not set -CONFIG_XFS_WARN=y -# CONFIG_XFS_DEBUG is not set -CONFIG_GFS2_FS=m -CONFIG_OCFS2_FS=m -CONFIG_OCFS2_FS_O2CB=m -CONFIG_OCFS2_FS_STATS=y -CONFIG_OCFS2_DEBUG_MASKLOG=y -# CONFIG_OCFS2_DEBUG_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -CONFIG_NILFS2_FS=m -# CONFIG_F2FS_FS is not set -CONFIG_FS_DAX=y -CONFIG_FS_DAX_PMD=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=y -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_PRINT_QUOTA_WARNING=y -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_QUOTACTL_COMPAT=y -CONFIG_AUTOFS4_FS=m -CONFIG_AUTOFS_FS=m -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_VIRTIO_FS=m -CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -CONFIG_OVERLAY_FS_INDEX=y -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -CONFIG_OVERLAY_FS_METACOPY=y - -# -# Caches -# -CONFIG_FSCACHE=m -# CONFIG_FSCACHE_STATS is not set -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -# CONFIG_CACHEFILES is not set -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -CONFIG_NTFS_RW=y -# end of DOS/FAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_CHILDREN is not set -CONFIG_PROC_PID_ARCH_STATUS=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_MEMFD_CREATE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_CRAMFS is not set -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -# CONFIG_SQUASHFS_ZLIB is not set -# CONFIG_SQUASHFS_LZ4 is not set -# CONFIG_SQUASHFS_LZO is not set -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFLATE_COMPRESS=m -CONFIG_PSTORE_LZO_COMPRESS=m -# CONFIG_PSTORE_LZ4_COMPRESS is not set -# CONFIG_PSTORE_LZ4HC_COMPRESS is not set -# CONFIG_PSTORE_842_COMPRESS is not set -# CONFIG_PSTORE_ZSTD_COMPRESS is not set -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y -# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set -CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" -# CONFIG_PSTORE_CONSOLE is not set -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -# CONFIG_PSTORE_RAM is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EROFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V2=m -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -CONFIG_NFSD_BLOCKLAYOUT=y -CONFIG_NFSD_SCSILAYOUT=y -CONFIG_NFSD_FLEXFILELAYOUT=y -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -CONFIG_CIFS=m -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -# CONFIG_CIFS_WEAK_PW_HASH is not set -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -# CONFIG_CIFS_DEBUG is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SMB_DIRECT is not set -CONFIG_CIFS_FSCACHE=y -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_MAC_ROMAN is not set -# CONFIG_NLS_MAC_CELTIC is not set -# CONFIG_NLS_MAC_CENTEURO is not set -# CONFIG_NLS_MAC_CROATIAN is not set -# CONFIG_NLS_MAC_CYRILLIC is not set -# CONFIG_NLS_MAC_GAELIC is not set -# CONFIG_NLS_MAC_GREEK is not set -# CONFIG_NLS_MAC_ICELAND is not set -# CONFIG_NLS_MAC_INUIT is not set -# CONFIG_NLS_MAC_ROMANIAN is not set -# CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -# CONFIG_DLM is not set -# CONFIG_UNICODE is not set -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_KEYS_COMPAT=y -# CONFIG_KEYS_REQUEST_CACHE is not set -# CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_BIG_KEYS is not set -# CONFIG_TRUSTED_KEYS is not set -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -# CONFIG_SECURITY is not set -CONFIG_SECURITYFS=y -CONFIG_PAGE_TABLE_ISOLATION=y -# CONFIG_INTEL_TXT is not set -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -# CONFIG_HARDENED_USERCOPY is not set -# CONFIG_FORTIFY_SOURCE is not set -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -# end of Memory initialization -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=y -CONFIG_ASYNC_CORE=y -CONFIG_ASYNC_MEMCPY=y -CONFIG_ASYNC_XOR=y -CONFIG_ASYNC_PQ=y -CONFIG_ASYNC_RAID6_RECOV=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=m -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_SIMD=m -CONFIG_CRYPTO_GLUE_HELPER_X86=m -CONFIG_CRYPTO_ENGINE=m - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=m -# CONFIG_CRYPTO_ECDH is not set -# CONFIG_CRYPTO_ECRDSA is not set - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -# CONFIG_CRYPTO_CHACHA20POLY1305 is not set -# CONFIG_CRYPTO_AEGIS128 is not set -# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=m - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_CFB is not set -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -# CONFIG_CRYPTO_OFB is not set -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -# CONFIG_CRYPTO_KEYWRAP is not set -# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set -# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_ESSIV=m - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32C_INTEL=m -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRC32_PCLMUL=m -# CONFIG_CRYPTO_XXHASH is not set -CONFIG_CRYPTO_CRCT10DIF=y -# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set -CONFIG_CRYPTO_GHASH=m -# CONFIG_CRYPTO_POLY1305 is not set -# CONFIG_CRYPTO_POLY1305_X86_64 is not set -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_SSSE3=m -CONFIG_CRYPTO_SHA256_SSSE3=m -CONFIG_CRYPTO_SHA512_SSSE3=m -CONFIG_CRYPTO_LIB_SHA256=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_SHA3 is not set -# CONFIG_CRYPTO_SM3 is not set -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m - -# -# Ciphers -# -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_AES_NI_INTEL=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_BLOWFISH_X86_64=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAMELLIA_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST5_AVX_X86_64=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_CAST6_AVX_X86_64=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_DES=m -# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -# CONFIG_CRYPTO_CHACHA20 is not set -# CONFIG_CRYPTO_CHACHA20_X86_64 is not set -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -# CONFIG_CRYPTO_SM4 is not set -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_TWOFISH_X86_64=m -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -# CONFIG_CRYPTO_LZ4 is not set -# CONFIG_CRYPTO_LZ4HC is not set -# CONFIG_CRYPTO_ZSTD is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -# CONFIG_CRYPTO_DRBG_HASH is not set -# CONFIG_CRYPTO_DRBG_CTR is not set -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -# CONFIG_CRYPTO_USER_API_RNG is not set -# CONFIG_CRYPTO_USER_API_AEAD is not set -# CONFIG_CRYPTO_STATS is not set -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_CCP is not set -CONFIG_CRYPTO_DEV_QAT=m -CONFIG_CRYPTO_DEV_QAT_DH895xCC=m -CONFIG_CRYPTO_DEV_QAT_C3XXX=m -CONFIG_CRYPTO_DEV_QAT_C62X=m -CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m -CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m -CONFIG_CRYPTO_DEV_QAT_C62XVF=m -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -# CONFIG_CRYPTO_DEV_CHELSIO is not set -# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set -CONFIG_CRYPTO_DEV_VIRTIO=m -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y - -# -# Certificates for signature checking -# -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=y -CONFIG_RAID6_PQ_BENCHMARK=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_CORDIC=m -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_CRC_CCITT=m -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=y -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=m -# CONFIG_CRC4 is not set -# CONFIG_CRC7 is not set -CONFIG_LIBCRC32C=y -CONFIG_CRC8=m -CONFIG_XXHASH=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=m -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -# CONFIG_XZ_DEC_IA64 is not set -# CONFIG_XZ_DEC_ARM is not set -# CONFIG_XZ_DEC_ARMTHUMB is not set -# CONFIG_XZ_DEC_SPARC is not set -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y -CONFIG_SWIOTLB=y -# CONFIG_DMA_API_DEBUG is not set -CONFIG_SGL_ALLOC=y -CONFIG_IOMMU_HELPER=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPUMASK_OFFSTACK=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_DIMLIB=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAS_UACCESS_MCSAFE=y -CONFIG_ARCH_STACKWALK=y -CONFIG_SBITMAP=y -# CONFIG_STRING_SELFTEST is not set -# end of Library routines - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y -# end of printk and dmesg options - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_BTF is not set -# CONFIG_GDB_SCRIPTS is not set -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_INSTALL is not set -CONFIG_OPTIMIZE_INLINING=y -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_FRAME_POINTER=y -CONFIG_STACK_VALIDATION=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_CC_HAS_KASAN_GENERIC=y -# CONFIG_KASAN is not set -CONFIG_KASAN_STACK=1 -# end of Memory Debugging - -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -CONFIG_DEBUG_SHIRQ=y - -# -# Debug Lockups and Hangs -# -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -CONFIG_HARDLOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -# CONFIG_WQ_WATCHDOG is not set -# end of Debug Lockups and Hangs - -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -# CONFIG_SCHED_STACK_END_CHECK is not set -# CONFIG_DEBUG_TIMEKEEPING is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_RCU_PERF_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -CONFIG_LATENCYTOP=y -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_RING_BUFFER_ALLOW_SWAP=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -CONFIG_SCHED_TRACER=y -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_FUNCTION_PROFILER=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_STRSCPY is not set -CONFIG_TEST_KSTRTOX=y -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_BITFIELD is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_XARRAY is not set -# CONFIG_TEST_OVERFLOW is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_LIVEPATCH is not set -# CONFIG_TEST_STACKINIT is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_MEMTEST is not set -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_UBSAN_ALIGNMENT=y -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_EARLY_PRINTK_USB=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -# CONFIG_EARLY_PRINTK_USB_XDBC is not set -# CONFIG_X86_PTDUMP is not set -# CONFIG_EFI_PGT_DUMP is not set -# CONFIG_DEBUG_WX is not set -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_X86_DECODER_SELFTEST=y -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -CONFIG_X86_DEBUG_FPU=y -# CONFIG_PUNIT_ATOM_DEBUG is not set -# CONFIG_UNWINDER_ORC is not set -CONFIG_UNWINDER_FRAME_POINTER=y -# CONFIG_UNWINDER_GUESS is not set -# end of Kernel hacking diff --git a/package/default/generate-rpms.sh b/package/default/generate-rpms.sh deleted file mode 100755 index ec17f3e8651c..000000000000 --- a/package/default/generate-rpms.sh +++ /dev/null @@ -1,256 +0,0 @@ -#!/bin/bash - -# ============================================================================== -# Description: Automaticly generating kernel rpm package with specified branch -# according to HEAD tag. -# -# Author: David Shan <davidshan@tencent.com> -# =============================================================================== - -#variables -kernel_full_name="" -kernel_version="" -tlinux_release="" -curdir=`pwd` -build_specdir="/usr/src/packages/SPECS" -build_srcdir="/usr/src/packages/SOURCES" -kernel_type="default" -spec_file_name="" -make_jobs="" -nosign_suffix="" -build_src_only=0 -strip_sign_token=0 -raw_tagged_name="" -kasan=0 -# to control wheter to build rpm for xen domU -isXenU="" -build_perf="" -kernel_default_types=(default) - -#funcitons -usage() -{ - echo "generate-rpms [-t \$tag_name ] [-j \$jobs ][-h]" - echo " -t tag_name" - echo " Tagged name in the git tree." - echo " -j jobs" - echo " Specifies the number of jobs (threads) to run make." - echo " -d" - echo " disable module digital signature." - echo " -p" - echo " build perf rpm only." - - echo -e "\n" - echo " Note:By default, we only generate latest tag kernel for standard kernels" - echo " without -t and -k option." - echo " So, if you hope to get certain tag kernel, please use -t." - echo " Also, only for state type kernel, please use -k option with state value." -} - -get_kernel_version() -{ - local makefile="$1/Makefile" - - if [ ! -e $makefile ]; then - echo "Error: No Makefile found." - exit 1 - fi - - kernel_version+=`grep "^VERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - kernel_version+="." - kernel_version+=`grep "^PATCHLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - kernel_version+="." - kernel_version+=`grep "^SUBLEVEL" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - #kernel_version+=`grep "^EXTRAVERSION" ${makefile} | head -1 | awk -F "=" '{print $2}' | tr -d ' '` - - echo "kernel version: $kernel_version" -} - -get_tlinux_name() -{ - if [ -n "$tag_name" ]; then - raw_tagged_name="$tag_name" - else - raw_tagged_name=`git describe --tags` - fi - - if [ -z "${raw_tagged_name}" ];then - echo "Error:Can't get kernel version from git tree." - exit 1 - fi - - if [[ $raw_tagged_name != public-x86-* ]]; then - echo "$raw_tagged_name not valid tag name for public-x86" - exit 11 - fi - - tagged_name=${raw_tagged_name#public-x86-} - - echo "${tagged_name}" | grep 'kasan' - if [ $? -eq 0 ]; then - kasan=1 - fi - - if [ "$build_perf" = "yes" ]; then - rpm_name="perf" - else - rpm_name="kernel" - fi - - tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` - extra_version=`echo $tagged_name | cut -d- -f2` - kernel_full_name=${rpm_name}-${kernel_version} - - echo "$kernel_version" - echo "$tlinux_release" - echo "$kernel_full_name" - echo "extra_version $extra_version" -} - -prepare_tlinux_spec() -{ - local sign_token - spec_file_name=${build_specdir}/${kernel_full_name}.spec - spec_contents="%define name ${rpm_name} \n" - spec_contents+="%define version ${kernel_version} \n" - spec_contents+="%define release_os ${tlinux_release} \n" - spec_contents+="%define tagged_name ${tagged_name} \n" - spec_contents+="%define extra_version ${extra_version} \n" - - if [ -z "${kernel_type}" ];then - spec_contents+="%define kernel_all_types ${kernel_default_types[@]} \n" - else - spec_contents+="%define kernel_all_types ${kernel_type} \n" - fi - - if [ -n "${make_jobs}" ];then - spec_contents+="%define jobs ${make_jobs} \n\n" - fi - - if [ -e /etc/redhat-release ];then - spec_contents+="%define debug_package %{nil}\n" - spec_contents+="%define __os_install_post %{nil}\n" - - fi - - echo -e "${spec_contents}" > $spec_file_name - - if [ "$build_perf" = "yes" ]; then - cat $curdir/perf-tlinux.spec >> $spec_file_name - else - cat $curdir/kernel-tlinux.spec >> $spec_file_name - fi -} - -#main function - -#Parse Parameters -while getopts ":t:k:j:hdpx:" option "$@" -do - case $option in - "t" ) - tag_name=$OPTARG - ;; - "j" ) - make_jobs=$OPTARG - ;; - "h" ) - usage - exit 0 - ;; - "p" ) - build_perf="yes" - ;; - "?" ) - usage - exit 1 - ;; - esac -done - -#test if parameter of kernel type is valid -if [ -n "${kernel_type}" ] && [ ! -e config."${kernel_type}" ] -then - echo "Error: bad kernel type name. Config file for ${kernel_type} does not exist." - usage - exit 1 -elif [ -n "${kernel_type}" ] && [ -n "$isXenU" ] && [ ! -e config."${kernel_type}".xenU ] -then - echo "Error: bad kernel type name. Config file for ${kernel_type}.xenU does not exist." - usage - exit 1 - -fi - -if [ -e /etc/SuSE-release ];then - build_specdir="/usr/src/packages/SPECS" - build_srcdir="/usr/src/packages/SOURCES" -elif [ -e /etc/redhat-release ];then - build_specdir="$HOME/rpmbuild/SPECS" - build_srcdir="$HOME/rpmbuild/SOURCES" -else - echo "Error: Compile on unknown system." - exit 1 -fi - - -if [ ! -e "${build_srcdir}" ]; then - mkdir $build_srcdir - [ $? == 1 ] && exit 1 -fi - -if [ ! -e "${build_specdir}" ]; then - mkdir $build_specdir - [ $? == 1 ] && exit 1 -fi - -cd ../../ -origsrc=`pwd` - - -if [ $kasan -eq 1 ]; then - nosign_suffix="_kasan" -else - nosign_suffix="" -fi - -get_kernel_version $origsrc -get_tlinux_name -echo "Prepare $kernel_full_name source." -prepare_tlinux_spec - -cp package/default/tlinux_cciss_link.modules $build_srcdir - -if test -e ${build_srcdir}/${kernel_full_name}; then - rm -fr ${build_srcdir}/${kernel_full_name} -fi - -#tagged_name is a confirmed tag name and will be used for final source. -git archive --format=tar --prefix=${kernel_full_name}/ ${raw_tagged_name} | (cd ${build_srcdir} && tar xf -) -if [ $? -ne 0 ];then - echo "Error:can't prepare $kernel_full_name source with git archive!" - exit 1 -fi - -cd ${build_srcdir}/${kernel_full_name} - -echo $config_suffix - -cd .. -tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} -rm -fr ${kernel_full_name} - -echo "Build kernel rpm package." -if [ $build_src_only -eq 0 ]; then - rpmbuild -bb ${target} ${spec_file_name} -fi - -echo "Build kernel source rpm package." -pwd -tar -zxf ${kernel_full_name}.tar.gz - -rm -fr ${kernel_full_name}/package/default/generate-rpms.sh -tar -zcf ${kernel_full_name}.tar.gz ${kernel_full_name} - -rpmbuild -bs ${target} ${spec_file_name} - diff --git a/package/default/kernel-tlinux.spec b/package/default/kernel-tlinux.spec deleted file mode 100644 index 53501eb71faa..000000000000 --- a/package/default/kernel-tlinux.spec +++ /dev/null @@ -1,553 +0,0 @@ -%global with_debuginfo 0 -%global with_perf 1 -%if 0%{?rhel} == 6 -%global rdist .tl1 -%global debug_path /usr/lib/debug/lib/ -%else -%global debug_path /usr/lib/debug/usr/lib/ -%if 0%{?rhel} == 7 -%global rdist .tl2 -%else -%if 0%{?rhel} == 8 -%global rdist .tl3 -%global __python /usr/bin/python2 -%global _enable_debug_packages %{nil} -%global debug_package %{nil} -%global __debug_package %{nil} -%global __debug_install_post %{nil} -%global _build_id_links none -%endif -%endif -%endif - -%global dist %{nil} - -Summary: Kernel for Tencent physical machine -Name: %{name} -Version: %{version} -Release: %{release_os}%{?rdist} -License: GPLv2 -Vendor: Tencent -Packager: tlinux team <g_APD_SRDC_OS@tencent.com> -Provides: kernel = %{version}-%{release} -Group: System Environment/Kernel -Source0: %{name}-%{version}.tar.gz -Source1: tlinux_cciss_link.modules -URL: http://www.tencent.com -ExclusiveArch: x86_64 -Distribution: Tencent Linux -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build -BuildRequires: wget bc module-init-tools curl -%if %{with_perf} -BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex openssl-devel -BuildRequires: xmlto asciidoc -BuildRequires: audit-libs-devel -%if 0%{?rhel} == 8 -BuildRequires: python2-devel -%else -BuildRequires: python-devel -%endif -%ifnarch s390 s390x -BuildRequires: numactl-devel -%endif -%endif -Requires(pre): linux-firmware >= 20150904-44 - -# for the 'hostname' command -%if 0%{?rhel} >= 7 -BuildRequires: hostname -%else -BuildRequires: net-tools -%endif - -%description -This package contains tlinux kernel for physical machine - -%package debuginfo-common -Summary: tlinux kernel vmlinux for crash debug -Release: %{release} -Group: System Environment/Kernel -Provides: kernel-debuginfo-common kernel-debuginfo -Obsoletes: kernel-debuginfo -AutoReqprov: no - -%description debuginfo-common -This package container vmlinux, System.map and config files for kernel -debugging. - -%package devel -Summary: Development package for building kernel modules to match the %{version}-%{release} kernel -Release: %{release} -Group: System Environment/Kernel -Provides: kernel-devel -Obsoletes: kernel-devel -AutoReqprov: no - -%description devel -This package provides kernel headers and makefiles sufficient to build modules -against the %{version}-%{release} kernel package. - - -%package headers -Summary: Header files for the Linux kernel for use by glibc -Group: Development/System -Obsoletes: glibc-kernheaders -Provides: glibc-kernheaders = 3.0-46 -Provides: kernel-headers -Obsoletes: kernel-headers -AutoReqprov: no - -%description headers -Kernel-headers includes the C header files that specify the interface -between the Linux kernel and userspace libraries and programs. The -header files define structures and constants that are needed for -building most standard programs and are also needed for rebuilding the -glibc package. - -%if %{with_perf} -%package -n perf -Summary: Performance monitoring for the Linux kernel -Group: Development/System -License: GPLv2 -%description -n perf -This package contains the perf tool, which enables performance monitoring -of the Linux kernel. - -%package -n perf-debuginfo -Summary: Debug information for package perf -Group: Development/Debug -#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} -AutoReqProv: no -%description -n perf-debuginfo -This package provides debug information for the perf package. - -# Note that this pattern only works right to match the .build-id -# symlinks because of the trailing nonmatching alternation and -# the leading .*, because of find-debuginfo.sh's buggy handling -# of matching the pattern against the symlinks file. -%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|XXX' -o perf-debuginfo.list} - -%package -n python-perf -Summary: Python bindings for apps which will manipulate perf events -Group: Development/Libraries -%description -n python-perf -The python-perf package contains a module that permits applications -written in the Python programming language to use the interface -to manipulate perf events. - -%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} - -%package -n python-perf-debuginfo -Summary: Debug information for package perf python bindings -Group: Development/Debug -#Requires: %%{name}-debuginfo-common-%%{_target_cpu} = %%{version}-%%{release} -AutoReqProv: no -%description -n python-perf-debuginfo -This package provides debug information for the perf python bindings. - -# the python_sitearch macro should already be defined from above -%{expand:%%global debuginfo_args %{?debuginfo_args} -p '.*%%{python_sitearch}/perf.so(\.debug)?|XXX' -o python-perf-debuginfo.list} - - -%endif # with_perf - -# prep ######################################################################### -%prep - -%setup -q -c -T -a 0 - -# build ######################################################################## -%build - -cd %{name}-%{version} - -all_types="%{kernel_all_types}" -num_processor=`cat /proc/cpuinfo | grep 'processor' | wc -l` -if [ $num_processor -gt 8 ]; then - num_processor=8 -fi -for type_name in $all_types -do - sed -i '/^EXTRAVERSION/cEXTRAVERSION = -%{extra_version}' Makefile - objdir=../%{name}-%{version}-obj-${type_name} - [ -d ${objdir} ] || mkdir ${objdir} - echo "include $PWD/Makefile" > ${objdir}/Makefile - cp ./package/default/config.${type_name} ${objdir}/.config - localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' - sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config - make -C ${objdir} olddefconfig > /dev/null - smp_jobs=%{?jobs:%jobs} - if [ "$smp_jobs" = "" ]; then - make -j ${num_processor} -C ${objdir} RPM_BUILD_MODULE=y all - else - make -C ${objdir} RPM_BUILD_MODULE=y %{?jobs:-j%jobs} all - fi -# dealing with files under lib/modules - make -C ${objdir} RPM_BUILD_MODULE=y modules - - %global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} - %if %{with_perf} - # perf - %{perf_make} all - %{perf_make} man - %endif -done - -# install ###################################################################### -%install - -echo install %{version} -arch=`uname -m` -if [ "$arch" != "x86_64" ];then - echo "Unexpected error. Cannot build this rpm in non-x86_64 platform\n" - exit 1 -fi -mkdir -p %buildroot/boot -cd %{name}-%{version} - -all_types="%{kernel_all_types}" -for type_name in $all_types -do - elfname=vmlinuz-%{tagged_name}%{?dist} - mapname=System.map-%{tagged_name}%{?dist} - configname=config-%{tagged_name}%{?dist} - builddir=../%{name}-%{version}-obj-${type_name} -%if 0%{?rhel} == 7 - cp -rpf ${builddir}/arch/x86/boot/bzImage %buildroot/boot/${elfname} -%endif - cp -rpf ${builddir}/System.map %buildroot/boot/${mapname} - cp -rpf ${builddir}/.config %buildroot/boot/${configname} - cp -rpf ${builddir}/vmlinux %buildroot/boot/vmlinux-%{tagged_name}%{?dist} - sha512hmac %buildroot/boot/${elfname} | sed -e "s,$RPM_BUILD_ROOT,," > %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac - - # dealing with kernel-devel package - objdir=${builddir} - #KernelVer=%{version}-%{title}-%{release_os}-${type_name} - KernelVer=%{tagged_name}%{?dist} - - ####### make kernel-devel package: methods comes from rhel6(kernel.spec) ####### - make -C ${objdir} RPM_BUILD_MODULE=y INSTALL_MOD_PATH=%buildroot modules_install > /dev/null - #don't package firmware, use linux-firmware rpm instead - rm -rf %buildroot/lib/firmware - # And save the headers/makefiles etc for building modules against - # - # This all looks scary, but the end result is supposed to be: - # * all arch relevant include/ files - # * all Makefile/Kconfig files - # * all script/ files - rm -rf %buildroot/lib/modules/${KernelVer}/build - rm -rf %buildroot/lib/modules/${KernelVer}/source - mkdir -p %buildroot/lib/modules/${KernelVer}/build - (cd $RPM_BUILD_ROOT/lib/modules/${KernelVer} ; ln -s build source) - # dirs for additional modules per module-init-tools, kbuild/modules.txt - mkdir -p %buildroot/lib/modules/${KernelVer}/extra - mkdir -p %buildroot/lib/modules/${KernelVer}/updates - mkdir -p %buildroot/lib/modules/${KernelVer}/weak-updates - -%if 0%{?rhel} == 8 - cp -rpf ${builddir}/arch/x86/boot/bzImage %buildroot/lib/modules/${KernelVer}/vmlinuz -%endif - - # first copy everything - cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` %buildroot/lib/modules/${KernelVer}/build - cp ${builddir}/Module.symvers %buildroot/lib/modules/${KernelVer}/build - cp ${builddir}/System.map %buildroot/lib/modules/${KernelVer}/build - - if [ -s ${builddir}/Module.markers ]; then - cp ${builddir}/Module.markers %buildroot/lib/modules/${KernelVer}/build - fi - - # create the kABI metadata for use in packaging - echo "**** GENERATING kernel ABI metadata ****" - gzip -c9 < ${builddir}/Module.symvers > %buildroot/boot/symvers-${KernelVer}.gz - # chmod 0755 %_sourcedir/kabitool - # rm -f %{_tmppath}/kernel-$KernelVer-kabideps - # %_sourcedir/kabitool -s Module.symvers -o %{_tmppath}/kernel-$KernelVer-kabideps - - rm -rf %buildroot/lib/modules/${KernelVer}/build/Documentation - rm -rf %buildroot/lib/modules/${KernelVer}/build/scripts - rm -rf %buildroot/lib/modules/${KernelVer}/build/include - cp ${builddir}/.config %buildroot/lib/modules/${KernelVer}/build - cp -a scripts %buildroot/lib/modules/${KernelVer}/build - cp -a ${builddir}/scripts %buildroot/lib/modules/${KernelVer}/build - rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*.o - rm -f %buildroot/lib/modules/${KernelVer}/build/scripts/*/*.o - cp -a --parents arch/x86/include %buildroot/lib/modules/${KernelVer}/build/ - (cd ${builddir} ; cp -a --parents arch/x86/include/generated %buildroot/lib/modules/${KernelVer}/build/) - mkdir -p %buildroot/lib/modules/${KernelVer}/build/include - - cp -a ${builddir}/include/config %buildroot/lib/modules/${KernelVer}/build/include/ - cp -a ${builddir}/include/generated %buildroot/lib/modules/${KernelVer}/build/include/ - - cd include - cp -a * %buildroot/lib/modules/${KernelVer}/build/include - - cp -a ../arch/x86 $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/asm-x86 - pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include - ln -s asm-x86 asm - popd - - # Make sure the Makefile and version.h have a matching timestamp so that - # external modules can be built - touch -r %buildroot/lib/modules/${KernelVer}/build/Makefile %buildroot/lib/modules/${KernelVer}/build/include/generated/uapi/linux/version.h - #touch -r %buildroot/lib/modules/${KernelVer}/build/.config %buildroot/lib/modules/${KernelVer}/build/include/linux/autoconf.h - # Copy .config to include/config/auto.conf so "make prepare" is unnecessary. - cp %buildroot/lib/modules/$KernelVer/build/.config %buildroot/lib/modules/${KernelVer}/build/include/config/auto.conf - cd .. - - - find %buildroot/lib/modules/${KernelVer} -name "*.ko" -type f >modnames - - - mkdir -p %buildroot%debug_path/modules/${KernelVer} - cp -r %buildroot/lib/modules/${KernelVer}/kernel %buildroot%debug_path/modules/${KernelVer} - xargs --no-run-if-empty strip -g < modnames - - xargs --no-run-if-empty chmod u+x < modnames - - # Generate a list of modules for block and networking. - fgrep /drivers/ modnames | xargs --no-run-if-empty nm -upA | - sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef - - collect_modules_list() - { - sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | - LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 - if [ ! -z "$3" ]; then - sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 - fi - } - - collect_modules_list networking 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt2x00(pci|usb)_probe|register_netdevice' - collect_modules_list block 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' - collect_modules_list drm 'drm_open|drm_init' - collect_modules_list modesetting 'drm_crtc_init' - # detect missing or incorrect license tags - rm -f modinfo - while read i - do - echo -n "${i#%buildroot/lib/modules/${KernelVer}/} " >> modinfo - /sbin/modinfo -l $i >> modinfo - done < modnames - - egrep -v \ - 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' \ - modinfo && exit 1 - - rm -f modinfo modnames - cp tools_key.pub %buildroot/lib/modules/${KernelVer}/kernel - # remove files that will be auto generated by depmod at rpm -i time - #for i in alias alias.bin ccwmap dep dep.bin ieee1394map inputmap isapnpmap ofmap pcimap seriomap symbols symbols.bin usbmap - #do - # rm -f %buildroot/lib/modules/${KernelVer}/modules.$i - #done - - # Move the devel headers out of the root file system - mkdir -p %buildroot/usr/src/kernels - mkdir -p %buildroot/lib/modules/${KernelVer}/build/tools/objtool - cp ${builddir}/tools/objtool/objtool %buildroot/lib/modules/${KernelVer}/build/tools/objtool/objtool - mv %buildroot/lib/modules/${KernelVer}/build %buildroot/usr/src/kernels/${KernelVer} - ln -sf /usr/src/kernels/${KernelVer} %buildroot/lib/modules/${KernelVer}/build - # - # Generate the kernel-modules-* files lists - # - -%if 0%{?rhel} == 8 - mkdir -p %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/boot/$configname %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/boot/System.map-$KernelVer %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/boot/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/lib/modules/$KernelVer/dist_compat -%endif - - - # Copy the System.map file for depmod to use, and create a backup of the - # full module tree so we can restore it after we're done filtering - cp ${builddir}/System.map $RPM_BUILD_ROOT/. - pushd $RPM_BUILD_ROOT - mkdir restore - cp -r lib/modules/$KernelVer/* restore/. - - find lib/modules/$KernelVer/ -not -type d | sort -n > modules.list - find lib/modules/$KernelVer/ -type d | sort -n > module-dirs.list - - # Run depmod on the resulting module tree and make sure it isn't broken - depmod -b . -aeF ./System.map $KernelVer &> depmod.out - if [ -s depmod.out ]; then - echo "Depmod failure" - cat depmod.out - exit 1 - else - rm depmod.out - fi - - # Cleanup - rm System.map - cp -r restore/* lib/modules/$KernelVer/. - rm -rf restore - popd - -%if 0%{?rhel} == 8 - mv %buildroot/lib/modules/$KernelVer/modules.symbols.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.symbols %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.softdep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.devname %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.dep %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.builtin.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias.bin %buildroot/lib/modules/$KernelVer/dist_compat - mv %buildroot/lib/modules/$KernelVer/modules.alias %buildroot/lib/modules/$KernelVer/dist_compat -%endif - - # Make sure the files lists start with absolute paths or rpmbuild fails. - # Also add in the dir entries - sed -e 's/^lib*/%dir \/lib/' $RPM_BUILD_ROOT/module-dirs.list > ../core.list - sed -e 's/^lib*/\/lib/' $RPM_BUILD_ROOT/modules.list >> ../core.list - - # Cleanup - rm -f $RPM_BUILD_ROOT/modules.list - rm -f $RPM_BUILD_ROOT/module-dirs.list - - %if %{with_perf} - # perf tool binary and supporting scripts/binaries - %{perf_make} DESTDIR=$RPM_BUILD_ROOT install - - # perf-python extension - %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext - - # perf man pages (note: implicit rpm magic compresses them later) - %{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man - %endif -done - -mkdir -p %buildroot/etc/sysconfig/modules -install -m755 %SOURCE1 %buildroot/etc/sysconfig/modules - -#### Header -make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install -# Do headers_check but don't die if it fails. -make INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_check \ - > hdrwarnings.txt || : -if grep -q exist hdrwarnings.txt; then - sed s:^$RPM_BUILD_ROOT/usr/include/:: hdrwarnings.txt - # Temporarily cause a build failure if header inconsistencies. - # exit 1 -fi - -find $RPM_BUILD_ROOT/usr/include \ - \( -name .install -o -name .check -o \ - -name ..install.cmd -o -name ..check.cmd \) | xargs rm -f - -# glibc provides scsi headers for itself, for now -rm -rf $RPM_BUILD_ROOT/usr/include/scsi -rm -f $RPM_BUILD_ROOT/usr/include/asm*/atomic.h -rm -f $RPM_BUILD_ROOT/usr/include/asm*/io.h -rm -f $RPM_BUILD_ROOT/usr/include/asm*/irq.h - -%pre -# pre ######################################################################### -system_arch=`uname -m` - -if [ %{_target_cpu} != ${system_arch} ]; then - echo "ERROR: Failed installing this rpm!!!!" - echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; - exit 1; -fi; - -%post -# post ######################################################################### -echo "Install tlinux kernel" -%if 0%{?rhel} == 8 -kernel-install add %{tagged_name} /lib/modules/%{tagged_name}/vmlinuz -cp -p /lib/modules/%{tagged_name}/dist_compat/config-%{tagged_name}%{?dist} /boot -cp -p /lib/modules/%{tagged_name}/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac /boot -cp -p /lib/modules/%{tagged_name}/dist_compat/System.map-%{tagged_name}%{?dist} /boot -%else -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --package kernel-tlinux4 --install %{tagged_name}%{?dist} --kernel-args="crashkernel=512M-12G:128M,12G-64G:256M,64G-128G:512M,128G-:768M" --make-default|| exit $? -%endif -%endif - -%preun -# preun ####################################################################### -%if 0%{?rhel} == 8 -kernel-install remove %{tagged_name} -%else -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? -echo -e "Remove \"%{tagged_name}%{?dist}\" Done." -%endif -%endif - -%posttrans -# posttrans #################################################################### -%if 0%{?rhel} == 7 -/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? -/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? -%endif - -%files -f core.list -# files ######################################################################## -%defattr(-,root,root) -%if 0%{?rhel} == 7 -/boot/vmlinuz-%{tagged_name}%{?dist} -/boot/System.map-%{tagged_name}%{?dist} -/boot/config-%{tagged_name}%{?dist} -%endif -/boot/symvers-%{tagged_name}%{?dist}* -/etc/sysconfig/modules/tlinux_cciss_link.modules -#do not package firmware ,use linux-firmware rpm instead -#/lib/firmware/ - -%files debuginfo-common -%defattr(-,root,root) -/boot/vmlinux-%{tagged_name}%{?dist} -%dir %debug_path -%debug_path/modules -#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* -#/boot/config-%%{version}-%%{title}-%%{release_os}-* - -%files headers -%defattr(-,root,root) -/usr/include/* - -%files devel -%defattr(-,root,root) -/usr/src/kernels/%{tagged_name}%{?dist} - -%if %{with_perf} -%files -n perf -%defattr(-,root,root) -%{_bindir}/* -/usr/lib64/* -/usr/lib/perf/* -/usr/share/* -%dir %{_libexecdir}/perf-core -%{_libexecdir}/perf-core/* -%{_sysconfdir}/bash_completion.d/perf - -%files -n python-perf -%defattr(-,root,root) -%{python_sitearch} - -%if %{with_debuginfo} -%files -f perf-debuginfo.list -n perf-debuginfo -%defattr(-,root,root) - -%files -f python-perf-debuginfo.list -n python-perf-debuginfo -%defattr(-,root,root) -%endif -%endif # with_perf - -# changelog ################################################################### -%changelog -* Thu Feb 2 2012 Samuel Liao <samuelliao@tencent.com> - - Initial 3.0.18 repository diff --git a/package/default/perf-tlinux.spec b/package/default/perf-tlinux.spec deleted file mode 100644 index f4a426197b7b..000000000000 --- a/package/default/perf-tlinux.spec +++ /dev/null @@ -1,154 +0,0 @@ -%define sign_server 10.12.65.118 - -%global with_debuginfo 0 -%if 0%{?rhel} == 6 -%global dist .tl1 -%global debug_path /usr/lib/debug/lib/ -%else -%global debug_path /usr/lib/debug/usr/lib/ -%if 0%{?rhel} == 7 -%global dist .tl2 -%endif -%endif - -Summary: Performance monitoring for the Linux kernel -Group: Development/System -Name: %{name} -Version: %{version} -Release: %{release_os}%{?dist} -License: GPLv2 -Vendor: Tencent -Packager: tlinux team <g_APD_SRDC_OS@tencent.com> -Provides: perf = %{version}-%{release} -Source0: %{name}-%{version}.tar.gz -URL: http://www.tencent.com -ExclusiveArch: x86_64 -Distribution: Tencent Linux -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build -BuildRequires: wget bc module-init-tools curl -BuildRequires: elfutils-devel zlib-devel binutils-devel newt-devel python-devel perl(ExtUtils::Embed) bison flex -BuildRequires: xmlto asciidoc -BuildRequires: audit-libs-devel -%ifnarch s390 s390x -BuildRequires: numactl-devel -%endif - -# for the 'hostname' command -%if 0%{?rhel} == 7 -BuildRequires: hostname -%else -BuildRequires: net-tools -%endif - -%description -This package contains the perf tool, which enables performance monitoring -of the Linux kernel. - - -%package -n python-perf -Summary: Python bindings for apps which will manipulate perf events -Group: Development/Libraries -%description -n python-perf -The python-perf package contains a module that permits applications -written in the Python programming language to use the interface -to manipulate perf events. - -%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} - - -# prep ######################################################################### -%prep - -%setup -q -c -T -a 0 - -# build ######################################################################## -%build - - -if [ ! -f /etc/tlinux-release ]; then - echo "Error: please build this rpm on tlinux\n" - exit 1 -fi - - -cd %{name}-%{version} -all_types="%{kernel_all_types}" -num_processor=`cat /proc/cpuinfo | grep 'processor' | wc -l` -if [ $num_processor -gt 8 ]; then - num_processor=8 -fi - -type_name=default - -objdir=../%{name}-%{version}-obj-${type_name} -[ -d ${objdir} ] || mkdir ${objdir} -bash scripts/mkmakefile $PWD ${objdir} 2 6 -if [ "$no_sign" != "yes" ]; then -cp ./package/default/config.${type_name} ${objdir}/.config -else -cp ./package/default/config.${type_name}_nosign ${objdir}/.config -fi -localversion='"'-`echo %{tagged_name}|cut -d- -f3-`%{?dist}'"' -sed -i -e 's/CONFIG_LOCALVERSION=.*/CONFIG_LOCALVERSION='$localversion'/' ${objdir}/.config - - -%global perf_make make %{?_smp_mflags} -C tools/perf -s V=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 prefix=%{_prefix} -# perf -%{perf_make} all -%{perf_make} man - -# install ###################################################################### -%install -cd %{name}-%{version} -%{perf_make} DESTDIR=$RPM_BUILD_ROOT install - -# perf-python extension -%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext - -# perf man pages (note: implicit rpm magic compresses them later) -%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man - -%pre -# pre ######################################################################### -system_arch=`uname -m` - -if [ %{_target_cpu} != ${system_arch} ]; then - echo "ERROR: Failed installing this rpm!!!!" - echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; - exit 1; -fi; - -if [ ! -f /etc/tlinux-release -o ! -f /etc/redhat-release ]; then - echo "Error: Cannot install this rpm on non-tlinux OS" - exit 1; -fi - -%post -# post ######################################################################### - - -%postun - -%files -%defattr(-,root,root) -%{_bindir}/perf -%dir %{_libexecdir}/perf-core -%{_libexecdir}/perf-core/* -%{_mandir}/man[1-8]/perf* -%{_sysconfdir}/bash_completion.d/perf -/usr/bin/trace -/usr/bin/perf* -/usr/lib64/traceevent/plugins/* -/usr/share/doc/* -/usr/share/perf-core/* -/usr/lib/perf/* - -%files -n python-perf -%defattr(-,root,root) -%{python_sitearch} - - -# changelog ################################################################### -%changelog -* Thu Feb 2 2012 Samuel Liao <samuelliao@tencent.com> - - Initial 3.0.18 repository diff --git a/package/default/repackage/generate-rpms.sh b/package/default/repackage/generate-rpms.sh deleted file mode 100755 index 23a9f494c8ca..000000000000 --- a/package/default/repackage/generate-rpms.sh +++ /dev/null @@ -1,174 +0,0 @@ -#!/bin/bash -# ============================================================================== -# Description: Automaticly generating kernel rpm package with specified branch -# according to HEAD tag. -# -# Author: David Shan <davidshan@tencent.com> -# =============================================================================== - -#variables -kernel_full_name="" -kernel_version="" -tlinux_release="" -curdir=`pwd` - - -build_specdir=`readlink -f ~/rpmbuild/SPECS` -build_srcdir=`readlink -f ~/rpmbuild/SOURCES` -dist="tl2" - -kernel_type="default" -spec_file_name="" -make_jobs="" -tag_name="" -nosign_suffix="" -build_src_only=0 -strip_sign_token=0 -# to control wheter to build rpm for xen domU -isXenU="" -kernel_default_types=(default) - -get_kernel_version() -{ - local tagged_name - raw_tagged_name=$tag_name - - if [[ $raw_tagged_name != public-x86-* ]]; then - echo "$raw_tagged_name not valid tag name for public-x86" - exit 1 - fi - - tagged_name=${raw_tagged_name#public-x86-} - - kernel_version=`echo $tagged_name|cut -d- -f1` - #kernel_version=${kernel_version}-1 - #echo "kernel version: $kernel_version" - echo "kernel version: ${kernel_version}" -} - -get_tlinux_name() -{ - if [ -n "$tag_name" ]; then - raw_tagged_name="$tag_name" - else - raw_tagged_name=`git describe --tags` - fi - - if [ -z "${raw_tagged_name}" ];then - echo "Error:Can't get kernel version from git tree." - exit 1 - fi - - if [[ $raw_tagged_name != public-x86-* ]]; then - echo "$raw_tagged_name not valid tag name for public-x86" - exit 1 - fi - - tagged_name=${raw_tagged_name#public-x86-} - - #if [ "${tagged_name#*-*-*}" != ${tagged_name} ];then - #echo "Error: bad tag name:$tagged_name." - #exit 1 - #fi - - rpm_name="kernel" - echo "rpm_name $rpm_name" - - tlinux_release=`echo $tagged_name|cut -d- -f2`.`echo $tagged_name|cut -d- -f3` - extra_version=`echo $tagged_name | cut -d- -f2` - kernel_full_name=${rpm_name}-${kernel_version} - #tlinux_release=${tagged_name#*-} -} - -prepare_tlinux_spec() -{ - spec_file_name=${build_specdir}/${kernel_full_name}_${dist}.spec - spec_contents="%define name ${rpm_name} \n" - spec_contents+="%define version ${kernel_version} \n" - spec_contents+="%define release_os ${tlinux_release} \n" - spec_contents+="%define tagged_name ${tagged_name} \n" - - echo -e "${spec_contents}" > $spec_file_name - - if [ "$kvm_guest" = "yes" ] ; then - cat $curdir/kernel-tlinux_kvmguest_${dist}.spec >> $spec_file_name - else - cat $curdir/kernel-tlinux_${dist}.spec >> $spec_file_name - #cp $curdir/${kernel_full_name}_${dist}.spec $spec_file_name - fi -} - -prepare_tlinux_bin_sources() -{ - local kernel_tl3_rpm=${rpm_name}-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm - local kernel_debuginfo_tl3_rpm=${rpm_name}-debuginfo-common-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm - local kernel_headers_tl3_rpm=${rpm_name}-headers-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm - local kernel_devel_tl3_rpm=${rpm_name}-devel-${kernel_version}-${tlinux_release}.tl3.x86_64.rpm - - if [ ! -f $kernel_tl3_rpm ]; then - echo "$kernel_tl3_rpm not exist" - exit 1 - fi - - if [ ! -f $kernel_debuginfo_tl3_rpm ]; then - echo "$kernel_debuginfo_tl3_rpm not exist" - exit 1 - fi - - if [ ! -f $kernel_headers_tl3_rpm ]; then - echo "$kernel_headers_tl3_rpm not exist" - exit 1 - fi - - rm -rf ${rpm_name}-${kernel_version} - rm -rf ${rpm_name}-${kernel_version}_bin.tar.gz - mkdir ${rpm_name}-${kernel_version} - pushd . - cd ${rpm_name}-${kernel_version} - rpm2cpio ../$kernel_tl3_rpm | cpio -divm - rpm2cpio ../$kernel_debuginfo_tl3_rpm | cpio -divm - rpm2cpio ../$kernel_headers_tl3_rpm | cpio -divm - rpm2cpio ../$kernel_devel_tl3_rpm | cpio -divm - popd - tar -czvf ${rpm_name}-${kernel_version}_bin.tar.gz ${rpm_name}-${kernel_version} -} - -#main function - -if [ $# -eq 0 ]; then - echo "please specify tag" - exit 1 -fi - -tag_name=$1 - -if [ ! -e "${build_srcdir}" ]; then - mkdir $build_srcdir - [ $? == 1 ] && exit 1 -fi - -if [ ! -e "${build_specdir}" ]; then - mkdir $build_specdir - [ $? == 1 ] && exit 1 -fi - -origsrc=`pwd` - -get_kernel_version $origsrc -get_tlinux_name -prepare_tlinux_bin_sources -echo "Prepare $kernel_full_name source." -prepare_tlinux_spec - - -if test -e ${build_srcdir}/${kernel_full_name}; then - rm -fr ${build_srcdir}/${kernel_full_name} -fi - -rm -rf /var/tmp/${kernel_full_name} - -cp ${kernel_full_name}_bin.tar.gz ${build_srcdir}/ - -echo "Build kernel rpm package." - -rpmbuild -bb ${target} ${spec_file_name} diff --git a/package/default/repackage/kernel-tlinux_tl2.spec b/package/default/repackage/kernel-tlinux_tl2.spec deleted file mode 100644 index aa1c0b0de0ae..000000000000 --- a/package/default/repackage/kernel-tlinux_tl2.spec +++ /dev/null @@ -1,268 +0,0 @@ -%global with_debuginfo 0 -%global with_perf 1 -%if 0%{?rhel} == 6 -%global rdist .tl1 -%global debug_path /usr/lib/debug/lib/ -%else -%global debug_path /usr/lib/debug/usr/lib/ -%if 0%{?rhel} == 7 -%global rdist .tl2 -%global _enable_debug_packages %{nil} -%global debug_package %{nil} -%global __debug_package %{nil} -%global __debug_install_post %{nil} -%else -%if 0%{?rhel} == 8 -%global rdist .tl3 -%global __python /usr/bin/python2 -%global _enable_debug_packages %{nil} -%global debug_package %{nil} -%global __debug_package %{nil} -%global __debug_install_post %{nil} -%endif -%endif -%endif - -%global dist %{nil} - -Summary: Kernel for Tencent physical machine -Name: %{name} -Version: %{version} -Release: %{release_os}%{?rdist} -License: GPLv2 -Vendor: Tencent -Packager: tlinux team <g_APD_SRDC_OS@tencent.com> -Provides: kernel = %{version}-%{release} -Group: System Environment/Kernel -Source0: %{name}-%{version}_bin.tar.gz -Source1: tlinux_cciss_link.modules -URL: http://www.tencent.com -ExclusiveArch: x86_64 -Distribution: Tencent Linux -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-build -BuildRequires: wget bc module-init-tools curl -Requires(pre): linux-firmware >= 20150904-44 - -# for the 'hostname' command -%if 0%{?rhel} >= 7 -BuildRequires: hostname -%else -BuildRequires: net-tools -%endif - -%description -This package contains tlinux kernel for physical machine - -%package debuginfo-common -Summary: tlinux kernel vmlinux for crash debug -Release: %{release} -Group: System Environment/Kernel -Provides: kernel-debuginfo-common kernel-debuginfo -Obsoletes: kernel-debuginfo -AutoReqprov: no - -%description debuginfo-common -This package container vmlinux, System.map and config files for kernel -debugging. - -%package devel -Summary: Development package for building kernel modules to match the %{version}-%{release} kernel -Release: %{release} -Group: System Environment/Kernel -AutoReqprov: no - -%description devel -This package provides kernel headers and makefiles sufficient to build modules -against the %{version}-%{release} kernel package. - -%package headers -Summary: Header files for the Linux kernel for use by glibc -Group: Development/System -Obsoletes: glibc-kernheaders -Provides: glibc-kernheaders = 3.0-46 -Provides: kernel-headers -Obsoletes: kernel-headers -AutoReqprov: no - -%description headers -Kernel-headers includes the C header files that specify the interface -between the Linux kernel and userspace libraries and programs. The -header files define structures and constants that are needed for -building most standard programs and are also needed for rebuilding the -glibc package. - -# prep ######################################################################### -%prep - -%setup -q -c -T -a 0 - -# build ######################################################################## -%build - -# install ###################################################################### -%install - -echo install %{version} -arch=`uname -m` -if [ "$arch" != "x86_64" ];then - echo "Unexpected error. Cannot build this rpm in non-x86_64 platform\n" - exit 1 -fi - -pwd - -cd %{name}-%{version} - -cp -rp * %buildroot/ - -elfname=vmlinuz-%{tagged_name}%{?dist} -mapname=System.map-%{tagged_name}%{?dist} -configname=config-%{tagged_name}%{?dist} - -mkdir -p %buildroot/boot -mv %buildroot/lib/modules/%tagged_name/vmlinuz %buildroot/boot/${elfname} -mv %buildroot/lib/modules/%tagged_name/dist_compat/${mapname} %buildroot/boot/${mapname} -mv %buildroot/lib/modules/%tagged_name/dist_compat/${configname} %buildroot/boot/${configname} -mv %buildroot/lib/modules/%tagged_name/dist_compat/.vmlinuz-%{tagged_name}%{?dist}.hmac %buildroot/boot/ - -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.symbols %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.softdep %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.devname %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.dep %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.builtin.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias.bin %buildroot/lib/modules/%tagged_name/ -mv %buildroot/lib/modules/%tagged_name/dist_compat/modules.alias %buildroot/lib/modules/%tagged_name/ - -%pre -# pre ######################################################################### -system_arch=`uname -m` - -if [ %{_target_cpu} != ${system_arch} ]; then - echo "ERROR: Failed installing this rpm!!!!" - echo "This rpm is intended for %{_target_cpu} platform. It seems your system is ${system_arch}."; - exit 1; -fi; - -%post -# post ######################################################################### -echo "Install tlinux kernel" -%if 0%{?rhel} >= 7 -/sbin/new-kernel-pkg --package kernel --install %{tagged_name}%{?dist} --kernel-args="crashkernel=512M-12G:128M,12G-64G:256M,64G-128G:512M,128G-:768M" --make-default|| exit $? -%else -/sbin/new-kernel-pkg --install %{tagged_name}%{?dist} --kernel-args="crashkernel=512M-12G:128M,12G-64G:256M,64G-128G:512M,128G-:768M" --banner="tkernel 3.0" || exit $? - -# check relase info to get suffix -suffix=default - -# modify boot menu -grubconfig=/boot/grub/grub.conf -if [ -d /sys/firmware/efi -a -f /boot/efi/EFI/tencent/grub.efi ]; then - grubconfig=/boot/efi/EFI/tencent/grub.conf -fi - -# reduce multiple blank lines to one -sed '/^$/{ - N - /^\n$/D -}' ${grubconfig} > ${grubconfig}.tmp -mv ${grubconfig}.tmp ${grubconfig} -rm -f ${grubconfig}.tmp - -# set grub default according to $suffix we already got -count=-1 -defcount=0 -while read line -do - echo $line | grep -q "title" - if [ $? -eq 0 ]; then - (( count++ )) - fi - if [ -f /etc/SuSE-release ]; then - echo $line | grep -q "%{tagged_name}%{?dist}" - elif [ -f /etc/redhat-release ]; then - echo $line | grep -q "title.*\(%{tagged_name}%{?dist}\).*" - fi - result=$? - if [ $result -eq 0 ] ; then - break - fi - if [ -f /etc/SuSE-release ]; then - echo $line | grep -q "title %{tagged_name}%{?dist}" - elif [ -f /etc/redhat-release ]; then - echo $line | grep -q "title.*(%{tagged_name}%{?dist}).*" - fi - if [ $? -eq 0 ] ; then - defcount=$count - fi -done < $grubconfig - -if [ $result -eq 0 ] ; then - index=$count -else - index=$defcount -fi - -if [ -f /etc/SuSE-release ]; then - sed -n "/^default /!p; - {/^default /c\ - \default $index - } - " $grubconfig > $grubconfig.NEW - mv $grubconfig.NEW $grubconfig -elif [ -f /etc/redhat-release ]; then - sed -n "/^default=/!p; - {/^default=/c\ - \default=$index - } - " $grubconfig > $grubconfig.NEW - mv $grubconfig.NEW $grubconfig -fi -%endif -echo -e "Set Grub default to \"%{tagged_name}%{?dist}\" Done." - -%postun -# postun ####################################################################### -/sbin/new-kernel-pkg --rminitrd --dracut --remove %{tagged_name}%{?dist} || exit $? -echo -e "Remove \"%{tagged_name}%{?dist}\" Done." - -%if 0%{?rhel} >= 7 -%posttrans -# posttrans #################################################################### -/sbin/new-kernel-pkg --package kernel --mkinitrd --dracut --depmod --update %{tagged_name}%{?dist} || exit $? -/sbin/new-kernel-pkg --package kernel --rpmposttrans %{tagged_name}%{?dist} || exit $? -%endif - -%files -# files ######################################################################## -%defattr(-,root,root) -/boot/vmlinuz-%%{tagged_name}%%{?dist} -/boot/.vmlinuz-%%{tagged_name}%%{?dist}.hmac -/boot/System.map-%%{tagged_name}%%{?dist} -/boot/config-%%{tagged_name}%%{?dist} -/boot/symvers-%{tagged_name}%{?dist}* -/etc/sysconfig/modules/tlinux_cciss_link.modules -/lib/modules/%%{tagged_name}/* - -%files debuginfo-common -%defattr(-,root,root) -/boot/vmlinux-%{tagged_name}%{?dist} -%dir %debug_path -%debug_path/modules -#/boot/System.map-%%{version}-%%{title}-%%{release_os}-* -#/boot/config-%%{version}-%%{title}-%%{release_os}-* - - -%files devel -%defattr(-,root,root) -/usr/src/kernels/%{tagged_name}%{?dist} - -%files headers -%defattr(-,root,root) -/usr/include/* - -# changelog ################################################################### -%changelog -* Thu Feb 2 2012 Samuel Liao <samuelliao@tencent.com> - - Initial 3.0.18 repository diff --git a/package/default/tlinux_cciss_link.modules b/package/default/tlinux_cciss_link.modules deleted file mode 100644 index 8d15da12d851..000000000000 --- a/package/default/tlinux_cciss_link.modules +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -if [ -b /dev/sda -a ! -b /dev/cciss/c0d0 ]; then - mkdir -p /dev/cciss - set -- 0 a 1 b 2 c 3 d 4 e 5 f 6 g 7 h 8 i 9 j 10 k 11 l 12 m 13 n 14 o 15 p - while [ $# -ge 2 ]; do - c=/dev/cciss/c0d$1; shift - s=/dev/sd$1; shift - ln -s ${s} ${c} - ln -s ${s}1 ${c}p1 - ln -s ${s}2 ${c}p2 - ln -s ${s}3 ${c}p3 - ln -s ${s}4 ${c}p4 - done -fi - diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c index 2d4b717726b6..34b3fca788e8 100644 --- a/samples/bpf/fds_example.c +++ b/samples/bpf/fds_example.c @@ -30,6 +30,8 @@ #define BPF_M_MAP 1 #define BPF_M_PROG 2 +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + static void usage(void) { printf("Usage: fds_example [...]\n"); @@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object) BPF_EXIT_INSN(), }; size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn); - char bpf_log_buf[BPF_LOG_BUF_SIZE]; struct bpf_object *obj; int prog_fd; diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh old mode 100644 new mode 100755 index 090b96eaf7f7..0eda9754f50b --- a/samples/bpf/lwt_len_hist.sh +++ b/samples/bpf/lwt_len_hist.sh @@ -8,6 +8,8 @@ VETH1=tst_lwt1b TRACE_ROOT=/sys/kernel/debug/tracing function cleanup { + # To reset saved histogram, remove pinned map + rm /sys/fs/bpf/tc/globals/lwt_len_hist_map ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null ip link del $VETH0 2> /dev/null ip link del $VETH1 2> /dev/null diff --git a/samples/bpf/lwt_len_hist_user.c b/samples/bpf/lwt_len_hist_user.c index 587b68b1f8dd..430a4b7e353e 100644 --- a/samples/bpf/lwt_len_hist_user.c +++ b/samples/bpf/lwt_len_hist_user.c @@ -15,8 +15,6 @@ #define MAX_INDEX 64 #define MAX_STARS 38 -char bpf_log_buf[BPF_LOG_BUF_SIZE]; - static void stars(char *str, long val, long max, int width) { int i; diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh old mode 100644 new mode 100755 diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c index dd558cbb2309..ef53b93db573 100644 --- a/samples/bpf/xdp_monitor_user.c +++ b/samples/bpf/xdp_monitor_user.c @@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size) { unsigned int nr_cpus = bpf_num_possible_cpus(); void *array; - size_t size; - size = record_size * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, record_size); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void) int i; /* Alloc main stats_record structure */ - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + rec = calloc(1, sizeof(*rec)); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index cfcc31e51197..d94a999b4b4b 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -15,7 +15,7 @@ #include "bpf_helpers.h" #include "hash_func01.h" -#define MAX_CPUS 64 /* WARNING - sync with _user.c */ +#define MAX_CPUS NR_CPUS /* Special map type that can XDP_REDIRECT frames to another CPU */ struct { diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index 8b862a7a6c6a..0a7672556822 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -13,6 +13,7 @@ static const char *__doc__ = #include <unistd.h> #include <locale.h> #include <sys/resource.h> +#include <sys/sysinfo.h> #include <getopt.h> #include <net/if.h> #include <time.h> @@ -24,8 +25,6 @@ static const char *__doc__ = #include <arpa/inet.h> #include <linux/if_link.h> -#define MAX_CPUS 64 /* WARNING - sync with _kern.c */ - /* How many xdp_progs are defined in _kern.c */ #define MAX_PROG 6 @@ -40,6 +39,7 @@ static char *ifname; static __u32 prog_id; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; +static int n_cpus; static int cpu_map_fd; static int rx_cnt_map_fd; static int redirect_err_cnt_map_fd; @@ -170,7 +170,7 @@ struct stats_record { struct record redir_err; struct record kthread; struct record exception; - struct record enq[MAX_CPUS]; + struct record enq[]; }; static bool map_collect_percpu(int fd, __u32 key, struct record *rec) @@ -210,11 +210,8 @@ static struct datarec *alloc_record_per_cpu(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); struct datarec *array; - size_t size; - size = sizeof(struct datarec) * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, sizeof(struct datarec)); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -225,19 +222,20 @@ static struct datarec *alloc_record_per_cpu(void) static struct stats_record *alloc_stats_record(void) { struct stats_record *rec; - int i; + int i, size; - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + size = sizeof(*rec) + n_cpus * sizeof(struct record); + rec = malloc(size); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); } + memset(rec, 0, size); rec->rx_cnt.cpu = alloc_record_per_cpu(); rec->redir_err.cpu = alloc_record_per_cpu(); rec->kthread.cpu = alloc_record_per_cpu(); rec->exception.cpu = alloc_record_per_cpu(); - for (i = 0; i < MAX_CPUS; i++) + for (i = 0; i < n_cpus; i++) rec->enq[i].cpu = alloc_record_per_cpu(); return rec; @@ -247,7 +245,7 @@ static void free_stats_record(struct stats_record *r) { int i; - for (i = 0; i < MAX_CPUS; i++) + for (i = 0; i < n_cpus; i++) free(r->enq[i].cpu); free(r->exception.cpu); free(r->kthread.cpu); @@ -350,7 +348,7 @@ static void stats_print(struct stats_record *stats_rec, } /* cpumap enqueue stats */ - for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) { + for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) { char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n"; char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n"; char *errstr = ""; @@ -475,7 +473,7 @@ static void stats_collect(struct stats_record *rec) map_collect_percpu(fd, 1, &rec->redir_err); fd = cpumap_enqueue_cnt_map_fd; - for (i = 0; i < MAX_CPUS; i++) + for (i = 0; i < n_cpus; i++) map_collect_percpu(fd, i, &rec->enq[i]); fd = cpumap_kthread_cnt_map_fd; @@ -549,10 +547,10 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size, */ static void mark_cpus_unavailable(void) { - __u32 invalid_cpu = MAX_CPUS; + __u32 invalid_cpu = n_cpus; int ret, i; - for (i = 0; i < MAX_CPUS; i++) { + for (i = 0; i < n_cpus; i++) { ret = bpf_map_update_elem(cpus_available_map_fd, &i, &invalid_cpu, 0); if (ret) { @@ -688,6 +686,8 @@ int main(int argc, char **argv) int prog_fd; __u32 qsize; + n_cpus = get_nprocs_conf(); + /* Notice: choosing he queue size is very important with the * ixgbe driver, because it's driver page recycling trick is * dependend on pages being returned quickly. The number of @@ -757,7 +757,7 @@ int main(int argc, char **argv) case 'c': /* Add multiple CPUs */ add_cpu = strtoul(optarg, NULL, 0); - if (add_cpu >= MAX_CPUS) { + if (add_cpu >= n_cpus) { fprintf(stderr, "--cpu nr too large for cpumap err(%d):%s\n", errno, strerror(errno)); diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index b88df17853b8..21d6e5067a83 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); struct datarec *array; - size_t size; - size = sizeof(struct datarec) * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, sizeof(struct datarec)); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void) { unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; struct record *array; - size_t size; - size = sizeof(struct record) * nr_rxqs; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_rxqs, sizeof(struct record)); if (!array) { fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs); exit(EXIT_FAIL_MEM); @@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void) struct stats_record *rec; int i; - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + rec = calloc(1, sizeof(struct stats_record)); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index df011ac33402..3b604c1eb3c3 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -677,6 +677,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) while (ret != rcvd) { if (ret < 0) exit_with_error(-ret); + complete_tx_l2fwd(xsk, fds); if (xsk_ring_prod__needs_wakeup(&xsk->tx)) kick_tx(xsk); ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx); @@ -782,5 +783,7 @@ int main(int argc, char **argv) else l2fwd_all(); + munmap(bufs, NUM_FRAMES * opt_xsk_frame_size); + return 0; } diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c index 9ca3e4400c98..ecae2139274f 100644 --- a/samples/kfifo/bytestream-example.c +++ b/samples/kfifo/bytestream-example.c @@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_lock); + if (ret) + return ret; - return ret ? ret : copied; + return copied; } static ssize_t fifo_read(struct file *file, char __user *buf, @@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf, ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_lock); + if (ret) + return ret; - return ret ? ret : copied; + return copied; } static const struct file_operations fifo_fops = { diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c index 6cdeb72f83f1..7b4489e7a9a5 100644 --- a/samples/kfifo/inttype-example.c +++ b/samples/kfifo/inttype-example.c @@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_lock); + if (ret) + return ret; - return ret ? ret : copied; + return copied; } static ssize_t fifo_read(struct file *file, char __user *buf, @@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf, ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_lock); + if (ret) + return ret; - return ret ? ret : copied; + return copied; } static const struct file_operations fifo_fops = { diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c index 79ae8bb04120..eafe0838997d 100644 --- a/samples/kfifo/record-example.c +++ b/samples/kfifo/record-example.c @@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_lock); + if (ret) + return ret; - return ret ? ret : copied; + return copied; } static ssize_t fifo_read(struct file *file, char __user *buf, @@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf, ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_lock); + if (ret) + return ret; - return ret ? ret : copied; + return copied; } static const struct file_operations fifo_fops = { diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c index a11bf6c5b53b..cd3f16a6f5ca 100644 --- a/samples/mic/mpssd/mpssd.c +++ b/samples/mic/mpssd/mpssd.c @@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd, static inline unsigned _vring_size(unsigned int num, unsigned long align) { - return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + align - 1) & ~(align - 1)) - + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; + + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4); } /* diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index cc86bf6566e4..9894693f3be1 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) return -EINVAL; return remap_vmalloc_range_partial(vma, vma->vm_start, - mdev_state->memblk, + mdev_state->memblk, 0, vma->vm_end - vma->vm_start); } diff --git a/samples/vfs/test-statx.c b/samples/vfs/test-statx.c index a3d68159fb51..507f09c38b49 100644 --- a/samples/vfs/test-statx.c +++ b/samples/vfs/test-statx.c @@ -23,6 +23,8 @@ #include <linux/fcntl.h> #define statx foo #define statx_timestamp foo_timestamp +struct statx; +struct statx_timestamp; #include <sys/stat.h> #undef statx #undef statx_timestamp diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index d1dd4a6b6adb..7da10afc92c6 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -82,20 +82,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \ $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c)))) # output directory for tests below -TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) +TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$ # try-run # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) # Exit code chooses option. "$$TMP" serves as a temporary file and is # automatically cleaned up. try-run = $(shell set -e; \ - TMP="$(TMPOUT).$$$$.tmp"; \ - TMPO="$(TMPOUT).$$$$.o"; \ + TMP=$(TMPOUT)/tmp; \ + TMPO=$(TMPOUT)/tmp.o; \ + mkdir -p $(TMPOUT); \ + trap "rm -rf $(TMPOUT)" EXIT; \ if ($(1)) >/dev/null 2>&1; \ then echo "$(2)"; \ else echo "$(3)"; \ - fi; \ - rm -f "$$TMP" "$$TMPO") + fi) # as-option # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) diff --git a/scripts/Makefile b/scripts/Makefile index 3e86b300f5a1..b4b7d8b58cd6 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -10,6 +10,9 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include +CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto) +CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null) + hostprogs-$(CONFIG_BUILD_BIN2C) += bin2c hostprogs-$(CONFIG_KALLSYMS) += kallsyms hostprogs-$(CONFIG_LOGO) += pnmtologo @@ -23,8 +26,10 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include -HOSTLDLIBS_sign-file = -lcrypto -HOSTLDLIBS_extract-cert = -lcrypto +HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS) +HOSTLDLIBS_sign-file = $(CRYPTO_LIBS) +HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS) +HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS) always := $(hostprogs-y) $(hostprogs-m) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 24a33c01bbf7..9c689d011bce 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -234,6 +234,9 @@ objtool_dep = $(objtool_obj) \ ifdef CONFIG_TRIM_UNUSED_KSYMS cmd_gen_ksymdeps = \ $(CONFIG_SHELL) $(srctree)/scripts/gen_ksymdeps.sh $@ >> $(dot-target).cmd + +# List module undefined symbols +undefined_syms = $(NM) $< | $(AWK) '$$1 == "U" { printf("%s%s", x++ ? " " : "", $$2) }'; endif define rule_cc_o_c @@ -253,13 +256,6 @@ define rule_as_o_S $(call cmd,modversions_S) endef -# List module undefined symbols (or empty line if not enabled) -ifdef CONFIG_TRIM_UNUSED_KSYMS -cmd_undef_syms = $(NM) $< | sed -n 's/^ *U //p' | xargs echo -else -cmd_undef_syms = echo -endif - # Built-in and composite module parts $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE $(call cmd,force_checksrc) @@ -267,7 +263,7 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE cmd_mod = { \ echo $(if $($*-objs)$($*-y)$($*-m), $(addprefix $(obj)/, $($*-objs) $($*-y) $($*-m)), $(@:.mod=.o)); \ - $(cmd_undef_syms); \ + $(undefined_syms) echo; \ } > $@ $(obj)/%.mod: $(obj)/%.o FORCE diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index a66fc0acad1e..a6d0044328b1 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -230,7 +230,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ # --------------------------------------------------------------------------- quiet_cmd_gzip = GZIP $@ - cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@ + cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@ # DTC # --------------------------------------------------------------------------- @@ -297,7 +297,7 @@ define rule_dtc endef $(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE - $(call if_changed_rule,dtc) + $(call if_changed_rule,dtc,yaml) dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) @@ -322,19 +322,19 @@ printf "%08x\n" $$dec_size | \ ) quiet_cmd_bzip2 = BZIP2 $@ - cmd_bzip2 = { cat $(real-prereqs) | bzip2 -9; $(size_append); } > $@ + cmd_bzip2 = { cat $(real-prereqs) | $(KBZIP2) -9; $(size_append); } > $@ # Lzma # --------------------------------------------------------------------------- quiet_cmd_lzma = LZMA $@ - cmd_lzma = { cat $(real-prereqs) | lzma -9; $(size_append); } > $@ + cmd_lzma = { cat $(real-prereqs) | $(LZMA) -9; $(size_append); } > $@ quiet_cmd_lzo = LZO $@ - cmd_lzo = { cat $(real-prereqs) | lzop -9; $(size_append); } > $@ + cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@ quiet_cmd_lz4 = LZ4 $@ - cmd_lz4 = { cat $(real-prereqs) | lz4c -l -c1 stdin stdout; \ + cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \ $(size_append); } > $@ # U-Boot mkimage @@ -381,7 +381,7 @@ quiet_cmd_xzkern = XZKERN $@ $(size_append); } > $@ quiet_cmd_xzmisc = XZMISC $@ - cmd_xzmisc = cat $(real-prereqs) | xz --check=crc32 --lzma2=dict=1MiB > $@ + cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@ # ASM offsets # --------------------------------------------------------------------------- diff --git a/scripts/Makefile.package b/scripts/Makefile.package index 56eadcc48d46..35a617c29611 100644 --- a/scripts/Makefile.package +++ b/scripts/Makefile.package @@ -45,7 +45,7 @@ if test "$(objtree)" != "$(srctree)"; then \ false; \ fi ; \ $(srctree)/scripts/setlocalversion --save-scmversion; \ -tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \ +tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \ rm -f $(objtree)/.scmversion @@ -127,9 +127,9 @@ util/PERF-VERSION-GEN $(CURDIR)/$(perf-tar)/); \ tar rf $(perf-tar).tar $(perf-tar)/HEAD $(perf-tar)/PERF-VERSION-FILE; \ rm -r $(perf-tar); \ $(if $(findstring tar-src,$@),, \ -$(if $(findstring bz2,$@),bzip2, \ -$(if $(findstring gz,$@),gzip, \ -$(if $(findstring xz,$@),xz, \ +$(if $(findstring bz2,$@),$(KBZIP2), \ +$(if $(findstring gz,$@),$(KGZIP), \ +$(if $(findstring xz,$@),$(XZ), \ $(error unknown target $@)))) \ -f -9 $(perf-tar).tar) diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 894cc58c1a03..1ae89bb5d5fe 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2018-2019 Netronome Systems, Inc. @@ -158,8 +158,6 @@ class HeaderParser(object): break self.reader.close() - print('Parsed description of %d helper function(s)' % len(self.helpers), - file=sys.stderr) ############################################################################### @@ -320,6 +318,11 @@ may be interested in: of eBPF maps are used with a given helper function. * *kernel/bpf/* directory contains other files in which additional helpers are defined (for cgroups, sockmaps, etc.). +* The bpftool utility can be used to probe the availability of helper functions + on the system (as well as supported program and map types, and a number of + other parameters). To do so, run **bpftool feature probe** (see + **bpftool-feature**\ (8) for details). Add the **unprivileged** keyword to + list features available to unprivileged users. Compatibility between helper functions and program types can generally be found in the files where helper functions are defined. Look for the **struct @@ -340,6 +343,7 @@ SEE ALSO ======== **bpf**\ (2), +**bpftool**\ (8), **cgroups**\ (7), **ip**\ (8), **perf_event_open**\ (2), @@ -391,6 +395,186 @@ SEE ALSO print('') +class PrinterHelpers(Printer): + """ + A printer for dumping collected information about helpers as C header to + be included from BPF program. + @helpers: array of Helper objects to print to standard output + """ + + type_fwds = [ + 'struct bpf_fib_lookup', + 'struct bpf_sk_lookup', + 'struct bpf_perf_event_data', + 'struct bpf_perf_event_value', + 'struct bpf_pidns_info', + 'struct bpf_redir_neigh', + 'struct bpf_sock', + 'struct bpf_sock_addr', + 'struct bpf_sock_ops', + 'struct bpf_sock_tuple', + 'struct bpf_spin_lock', + 'struct bpf_sysctl', + 'struct bpf_tcp_sock', + 'struct bpf_tunnel_key', + 'struct bpf_xfrm_state', + 'struct pt_regs', + 'struct sk_reuseport_md', + 'struct sockaddr', + 'struct tcphdr', + 'struct seq_file', + 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', + 'struct udp6_sock', + 'struct task_struct', + + 'struct __sk_buff', + 'struct sk_msg_md', + 'struct xdp_md', + 'struct path', + 'struct btf_ptr', + 'struct bpf_timer', + ] + known_types = { + '...', + 'void', + 'const void', + 'char', + 'const char', + 'int', + 'long', + 'unsigned long', + + '__be16', + '__be32', + '__wsum', + + 'struct bpf_fib_lookup', + 'struct bpf_perf_event_data', + 'struct bpf_perf_event_value', + 'struct bpf_pidns_info', + 'struct bpf_redir_neigh', + 'struct bpf_sk_lookup', + 'struct bpf_sock', + 'struct bpf_sock_addr', + 'struct bpf_sock_ops', + 'struct bpf_sock_tuple', + 'struct bpf_spin_lock', + 'struct bpf_sysctl', + 'struct bpf_tcp_sock', + 'struct bpf_tunnel_key', + 'struct bpf_xfrm_state', + 'struct pt_regs', + 'struct sk_reuseport_md', + 'struct sockaddr', + 'struct tcphdr', + 'struct seq_file', + 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', + 'struct udp6_sock', + 'struct task_struct', + 'struct path', + 'struct btf_ptr', + 'struct bpf_timer', + } + mapped_types = { + 'u8': '__u8', + 'u16': '__u16', + 'u32': '__u32', + 'u64': '__u64', + 's8': '__s8', + 's16': '__s16', + 's32': '__s32', + 's64': '__s64', + 'size_t': 'unsigned long', + 'struct bpf_map': 'void', + 'struct sk_buff': 'struct __sk_buff', + 'const struct sk_buff': 'const struct __sk_buff', + 'struct sk_msg_buff': 'struct sk_msg_md', + 'struct xdp_buff': 'struct xdp_md', + 'struct bpf_sock_ops_kern': 'struct bpf_sock_ops', + } + # Helpers overloaded for different context types. + overloaded_helpers = [ + 'bpf_get_socket_cookie', + 'bpf_sk_assign', + ] + + def print_header(self): + header = '''\ +/* This is auto-generated file. See bpf_helpers_doc.py for details. */ + +/* Forward declarations of BPF structs */''' + + print(header) + for fwd in self.type_fwds: + print('%s;' % fwd) + print('') + + def print_footer(self): + footer = '' + print(footer) + + def map_type(self, t): + if t in self.known_types: + return t + if t in self.mapped_types: + return self.mapped_types[t] + print("Unrecognized type '%s', please add it to known types!" % t, + file=sys.stderr) + sys.exit(1) + + seen_helpers = set() + + def print_one(self, helper): + proto = helper.proto_break_down() + + if proto['name'] in self.seen_helpers: + return + self.seen_helpers.add(proto['name']) + + print('/*') + print(" * %s" % proto['name']) + print(" *") + if (helper.desc): + # Do not strip all newline characters: formatted code at the end of + # a section must be followed by a blank line. + for line in re.sub('\n$', '', helper.desc, count=1).split('\n'): + print(' *{}{}'.format(' \t' if line else '', line)) + + if (helper.ret): + print(' *') + print(' * Returns') + for line in helper.ret.rstrip().split('\n'): + print(' *{}{}'.format(' \t' if line else '', line)) + + print(' */') + print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']), + proto['ret_star'], proto['name']), end='') + comma = '' + for i, a in enumerate(proto['args']): + t = a['type'] + n = a['name'] + if proto['name'] in self.overloaded_helpers and i == 0: + t = 'void' + n = 'ctx' + one_arg = '{}{}'.format(comma, self.map_type(t)) + if n: + if a['star']: + one_arg += ' {}'.format(a['star']) + else: + one_arg += ' ' + one_arg += '{}'.format(n) + comma = ', ' + print(one_arg, end='') + + print(') = (void *) %d;' % len(self.seen_helpers)) + print('') + ############################################################################### # If script is launched from scripts/ from kernel tree and can access @@ -405,6 +589,8 @@ Parse eBPF header file and generate documentation for eBPF helper functions. The RST-formatted output produced can be turned into a manual page with the rst2man utility. """) +argParser.add_argument('--header', action='store_true', + help='generate C header file') if (os.path.isfile(bpfh)): argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h', default=bpfh) @@ -417,5 +603,8 @@ headerParser = HeaderParser(args.filename) headerParser.run() # Print formatted output to standard output. -printer = PrinterRST(headerParser.helpers) +if args.header: + printer = PrinterHelpers(headerParser.helpers) +else: + printer = PrinterRST(headerParser.helpers) printer.print_all() diff --git a/scripts/check-kabi b/scripts/check-kabi new file mode 100755 index 000000000000..ac420d89684c --- /dev/null +++ b/scripts/check-kabi @@ -0,0 +1,146 @@ +#!/usr/bin/python2 +# +# check-kabi - Red Hat kABI reference checking tool +# +# We use this script to check against reference Module.kabi files. +# +# Author: Jon Masters <jcm@redhat.com> +# Copyright (C) 2007-2009 Red Hat, Inc. +# +# This software may be freely redistributed under the terms of the GNU +# General Public License (GPL). + +# Changelog: +# +# 2009/08/15 - Updated for use in RHEL6. +# 2007/06/13 - Initial rewrite in python by Jon Masters. + +__author__ = "Jon Masters <jcm@redhat.com>" +__version__ = "2.0" +__date__ = "2009/08/15" +__copyright__ = "Copyright (C) 2007-2009 Red Hat, Inc" +__license__ = "GPL" + +import getopt +import os +import re +import string +import sys + +true = 1 +false = 0 + +def load_symvers(symvers,filename): + """Load a Module.symvers file.""" + + symvers_file = open(filename,"r") + + while true: + in_line = symvers_file.readline() + if in_line == "": + break + if in_line == "\n": + continue + checksum,symbol,directory,type = string.split(in_line) + + symvers[symbol] = in_line[0:-1] + +def load_kabi(kabi,filename): + """Load a Module.kabi file.""" + + kabi_file = open(filename,"r") + + while true: + in_line = kabi_file.readline() + if in_line == "": + break + if in_line == "\n": + continue + checksum,symbol,directory,type = string.split(in_line) + + kabi[symbol] = in_line[0:-1] + +def check_kabi(symvers,kabi): + """Check Module.kabi and Module.symvers files.""" + + fail=0 + warn=0 + changed_symbols=[] + moved_symbols=[] + + for symbol in kabi: + abi_hash,abi_sym,abi_dir,abi_type = string.split(kabi[symbol]) + if symvers.has_key(symbol): + sym_hash,sym_sym,sym_dir,sym_type = string.split(symvers[symbol]) + if abi_hash != sym_hash: + fail=1 + changed_symbols.append(symbol) + + if abi_dir != sym_dir: + warn=1 + moved_symbols.append(symbol) + else: + fail=1 + changed_symbols.append(symbol) + + if fail: + print "*** ERROR - ABI BREAKAGE WAS DETECTED ***" + print "" + print "The following symbols have been changed (this will cause an ABI breakage):" + print "" + for symbol in changed_symbols: + print symbol + print "" + + if warn: + print "*** WARNING - ABI SYMBOLS MOVED ***" + print "" + print "The following symbols moved (typically caused by moving a symbol from being" + print "provided by the kernel vmlinux out to a loadable module):" + print "" + for symbol in moved_symbols: + print symbol + print "" + + """Halt the build, if we got errors and/or warnings. In either case, + double-checkig is required to avoid introducing / concealing + KABI inconsistencies.""" + if fail or warn: + sys.exit(1) + sys.exit(0) + +def usage(): + print """ +check-kabi: check Module.kabi and Module.symvers files. + + check-kabi [ -k Module.kabi ] [ -s Module.symvers ] + +""" + +if __name__ == "__main__": + + symvers_file = "" + kabi_file = "" + + opts, args = getopt.getopt(sys.argv[1:], 'hk:s:') + + for o, v in opts: + if o == "-s": + symvers_file = v + if o == "-h": + usage() + sys.exit(0) + if o == "-k": + kabi_file = v + + if (symvers_file == "") or (kabi_file == ""): + usage() + sys.exit(1) + + symvers={} + kabi={} + + load_symvers(symvers,symvers_file) + load_kabi(kabi,kabi_file) + check_kabi(symvers,kabi) + diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6fcc66afb088..a358af93cd7f 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2576,8 +2576,8 @@ sub process { # Check if the commit log has what seems like a diff which can confuse patch if ($in_commit_log && !$commit_log_has_diff && - (($line =~ m@^\s+diff\b.*a/[\w/]+@ && - $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) || + (($line =~ m@^\s+diff\b.*a/([\w/]+)@ && + $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) || $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ || $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) { ERROR("DIFF_IN_COMMIT_MSG", @@ -4150,7 +4150,7 @@ sub process { $fix) { fix_delete_line($fixlinenr, $rawline); my $fixed_line = $rawline; - $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/; + $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*)\{(.*)$/; my $line1 = $1; my $line2 = $2; fix_insert_line($fixlinenr, ltrim($line1)); diff --git a/scripts/coccinelle/misc/add_namespace.cocci b/scripts/coccinelle/misc/add_namespace.cocci index 99e93a6c2e24..cbf1614163cb 100644 --- a/scripts/coccinelle/misc/add_namespace.cocci +++ b/scripts/coccinelle/misc/add_namespace.cocci @@ -6,6 +6,7 @@ /// add a missing namespace tag to a module source file. /// +virtual nsdeps virtual report @has_ns_import@ @@ -16,10 +17,15 @@ MODULE_IMPORT_NS(ns); // Add missing imports, but only adjacent to a MODULE_LICENSE statement. // That ensures we are adding it only to the main module source file. -@do_import depends on !has_ns_import@ +@do_import depends on !has_ns_import && nsdeps@ declarer name MODULE_LICENSE; expression license; identifier virtual.ns; @@ MODULE_LICENSE(license); + MODULE_IMPORT_NS(ns); + +// Dummy rule for report mode that would otherwise be empty and make spatch +// fail ("No rules apply.") +@script:python depends on report@ +@@ diff --git a/scripts/config b/scripts/config index e0e39826dae9..eee5b7f3a092 100755 --- a/scripts/config +++ b/scripts/config @@ -7,6 +7,9 @@ myname=${0##*/} # If no prefix forced, use the default CONFIG_ CONFIG_="${CONFIG_-CONFIG_}" +# We use an uncommon delimiter for sed substitutions +SED_DELIM=$(echo -en "\001") + usage() { cat >&2 <<EOL Manipulate options in a .config file from the command line. @@ -83,7 +86,7 @@ txt_subst() { local infile="$3" local tmpfile="$infile.swp" - sed -e "s:$before:$after:" "$infile" >"$tmpfile" + sed -e "s$SED_DELIM$before$SED_DELIM$after$SED_DELIM" "$infile" >"$tmpfile" # replace original file with the edited one mv "$tmpfile" "$infile" } diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 13e5fbafdf2f..fe7076fdac8a 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -84,8 +84,8 @@ parse_symbol() { return fi - # Strip out the base of the path - code=${code#$basepath/} + # Strip out the base of the path on each line + code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code") # In the case of inlines, move everything to same line code=${code//$'\n'/' '} diff --git a/scripts/decodecode b/scripts/decodecode index ba8b8d5834e6..fbdb325cdf4f 100755 --- a/scripts/decodecode +++ b/scripts/decodecode @@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` -cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" +cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" echo cat $T.aa cleanup diff --git a/scripts/depmod.sh b/scripts/depmod.sh index e083bcae343f..3643b4f896ed 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh @@ -15,6 +15,8 @@ if ! test -r System.map ; then exit 0 fi +# legacy behavior: "depmod" in /sbin, no /sbin in PATH +PATH="$PATH:/sbin" if [ -z $(command -v $DEPMOD) ]; then echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 echo "This is probably in the kmod package." >&2 diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile index b5a5b1c548c9..c2dac994896b 100644 --- a/scripts/dtc/Makefile +++ b/scripts/dtc/Makefile @@ -9,7 +9,7 @@ dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \ dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o # Source files need to get at the userspace version of libfdt_env.h to compile -HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt +HOST_EXTRACFLAGS += -I $(srctree)/$(src)/libfdt ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),) ifneq ($(CHECK_DTBS),) diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile index aa0d0ec6936d..9e95862f2788 100644 --- a/scripts/gcc-plugins/Makefile +++ b/scripts/gcc-plugins/Makefile @@ -11,6 +11,7 @@ else HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable + HOST_EXTRACXXFLAGS += -Wno-format-diag export HOST_EXTRACXXFLAGS endif diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h index 17f06079a712..9ad76b7f3f10 100644 --- a/scripts/gcc-plugins/gcc-common.h +++ b/scripts/gcc-plugins/gcc-common.h @@ -35,7 +35,9 @@ #include "ggc.h" #include "timevar.h" +#if BUILDING_GCC_VERSION < 10000 #include "params.h" +#endif #if BUILDING_GCC_VERSION <= 4009 #include "pointer-set.h" @@ -847,6 +849,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT); } +#if BUILDING_GCC_VERSION < 10000 template <> template <> inline bool is_a_helper<const ggoto *>::test(const_gimple gs) @@ -860,6 +863,7 @@ inline bool is_a_helper<const greturn *>::test(const_gimple gs) { return gs->code == GIMPLE_RETURN; } +#endif static inline gasm *as_a_gasm(gimple stmt) { diff --git a/scripts/gdb/linux/rbtree.py b/scripts/gdb/linux/rbtree.py index 39db889b874c..c4b991607917 100644 --- a/scripts/gdb/linux/rbtree.py +++ b/scripts/gdb/linux/rbtree.py @@ -12,7 +12,7 @@ rb_node_type = utils.CachedType("struct rb_node") def rb_first(root): if root.type == rb_root_type.get_type(): - node = node.address.cast(rb_root_type.get_type().pointer()) + node = root.address.cast(rb_root_type.get_type().pointer()) elif root.type != rb_root_type.get_type().pointer(): raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) @@ -28,7 +28,7 @@ def rb_first(root): def rb_last(root): if root.type == rb_root_type.get_type(): - node = node.address.cast(rb_root_type.get_type().pointer()) + node = root.address.cast(rb_root_type.get_type().pointer()) elif root.type != rb_root_type.get_type().pointer(): raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index be984aa29b75..1be9763cf8bb 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -96,7 +96,7 @@ lx-symbols command.""" return "" attrs = sect_attrs['attrs'] section_name_to_address = { - attrs[n]['name'].string(): attrs[n]['address'] + attrs[n]['battr']['attr']['name'].string(): attrs[n]['address'] for n in range(int(sect_attrs['nsections']))} args = [] for section_name in [".data", ".data..read_mostly", ".rodata", ".bss", diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index a07668a5c36b..94a833597a88 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -64,7 +64,7 @@ configs=$(sed -e ' d ' $OUTFILE) -# The entries in the following list are not warned. +# The entries in the following list do not result in an error. # Please do not add a new entry. This list is only for existing ones. # The list will be reduced gradually, and deleted eventually. (hopefully) # @@ -98,18 +98,19 @@ include/uapi/linux/raw.h:CONFIG_MAX_RAW_DEVS for c in $configs do - warn=1 + leak_error=1 for ignore in $config_leak_ignores do if echo "$INFILE:$c" | grep -q "$ignore$"; then - warn= + leak_error= break fi done - if [ "$warn" = 1 ]; then - echo "warning: $INFILE: leak $c to user-space" >&2 + if [ "$leak_error" = 1 ]; then + echo "error: $INFILE: leak $c to user-space" >&2 + exit 1 fi done diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index ef2f2336c469..38efdd6fc5e6 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -86,6 +86,9 @@ else $(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG) endif +occonfig: $(obj)/conf + $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/oc.config $(Kconfig) + %_defconfig: $(obj)/conf $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig) @@ -124,6 +127,7 @@ help: @echo ' xconfig - Update current config utilising a Qt based front-end' @echo ' gconfig - Update current config utilising a GTK+ based front-end' @echo ' oldconfig - Update current config utilising a provided .config as base' + @echo ' occonfig - Defconfig configrations for OpencloudOS kernel' @echo ' localmodconfig - Update current config disabling modules not loaded' @echo ' localyesconfig - Update current config converting local mods to core' @echo ' defconfig - New config with default from ARCH supplied defconfig' diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c index 0243086fb168..0590f86df6e4 100644 --- a/scripts/kconfig/preprocess.c +++ b/scripts/kconfig/preprocess.c @@ -114,7 +114,7 @@ static char *do_error_if(int argc, char *argv[]) if (!strcmp(argv[0], "y")) pperror("%s", argv[1]); - return NULL; + return xstrdup(""); } static char *do_filename(int argc, char *argv[]) diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 82773cc35d35..a94909ad9a53 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -627,7 +627,7 @@ void ConfigList::updateMenuList(ConfigItem *parent, struct menu* menu) last = item; continue; } - hide: +hide: if (item && item->menu == child) { last = parent->firstChild(); if (last == item) @@ -692,7 +692,7 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu) last = item; continue; } - hide: +hide: if (item && item->menu == child) { last = (ConfigItem*)parent->topLevelItem(0); if (last == item) @@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e) void ConfigList::contextMenuEvent(QContextMenuEvent *e) { - if (e->y() <= header()->geometry().bottom()) { - if (!headerPopup) { - QAction *action; + if (!headerPopup) { + QAction *action; - headerPopup = new QMenu(this); - action = new QAction("Show Name", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowName(bool))); - connect(parent(), SIGNAL(showNameChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showName); - headerPopup->addAction(action); - action = new QAction("Show Range", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowRange(bool))); - connect(parent(), SIGNAL(showRangeChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showRange); - headerPopup->addAction(action); - action = new QAction("Show Data", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowData(bool))); - connect(parent(), SIGNAL(showDataChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showData); - headerPopup->addAction(action); - } - headerPopup->exec(e->globalPos()); - e->accept(); - } else - e->ignore(); + headerPopup = new QMenu(this); + action = new QAction("Show Name", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowName(bool))); + connect(parent(), SIGNAL(showNameChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showName); + headerPopup->addAction(action); + + action = new QAction("Show Range", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowRange(bool))); + connect(parent(), SIGNAL(showRangeChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showRange); + headerPopup->addAction(action); + + action = new QAction("Show Data", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowData(bool))); + connect(parent(), SIGNAL(showDataChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showData); + headerPopup->addAction(action); + } + + headerPopup->exec(e->globalPos()); + e->accept(); } ConfigView*ConfigView::viewList; @@ -1225,10 +1225,11 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) { QMenu* popup = Parent::createStandardContextMenu(pos); QAction* action = new QAction("Show Debug Info", popup); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); - action->setChecked(showDebug()); + + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool))); + action->setChecked(showDebug()); popup->addSeparator(); popup->addAction(action); return popup; diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 408b5c0b99b1..dc9c0483b168 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -63,12 +63,18 @@ vmlinux_link() local lds="${objtree}/${KBUILD_LDS}" local output=${1} local objects + local strip_debug info LD ${output} # skip output file argument shift + # The kallsyms linking does not need debug symbols included. + if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" ] ; then + strip_debug=-Wl,--strip-debug + fi + if [ "${SRCARCH}" != "um" ]; then objects="--whole-archive \ ${KBUILD_VMLINUX_OBJS} \ @@ -79,6 +85,7 @@ vmlinux_link() ${@}" ${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \ + ${strip_debug#-Wl,} \ -o ${output} \ -T ${lds} ${objects} else @@ -91,6 +98,7 @@ vmlinux_link() ${@}" ${CC} ${CFLAGS_vmlinux} \ + ${strip_debug} \ -o ${output} \ -Wl,-T,${lds} \ ${objects} \ @@ -105,7 +113,6 @@ vmlinux_link() gen_btf() { local pahole_ver - local bin_arch if ! [ -x "$(command -v ${PAHOLE})" ]; then echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available" @@ -118,20 +125,21 @@ gen_btf() return 1 fi - info "BTF" ${2} vmlinux_link ${1} + + info "BTF" ${2} LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} - # dump .BTF section into raw binary file to link with final vmlinux - bin_arch=$(LANG=C ${OBJDUMP} -f ${1} | grep architecture | \ - cut -d, -f1 | cut -d' ' -f2) - bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \ - awk '{print $4}') - ${OBJCOPY} --change-section-address .BTF=0 \ - --set-section-flags .BTF=alloc -O binary \ - --only-section=.BTF ${1} .btf.vmlinux.bin - ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \ - --rename-section .data=.BTF .btf.vmlinux.bin ${2} + # Create ${2} which contains just .BTF section but no symbols. Add + # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all + # deletes all symbols including __start_BTF and __stop_BTF, which will + # be redefined in the linker script. Add 2>/dev/null to suppress GNU + # objcopy warnings: "empty loadable segment detected at ..." + ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \ + --strip-all ${1} ${2} 2>/dev/null + # Change e_type to ET_REL so that it can be used to link final vmlinux. + # Unlike GNU ld, lld does not allow an ET_EXEC input. + printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none } # Create ${2} .o file with all symbols from the ${1} object file @@ -166,8 +174,8 @@ kallsyms() kallsyms_step() { kallsymso_prev=${kallsymso} - kallsymso=.tmp_kallsyms${1}.o - kallsyms_vmlinux=.tmp_vmlinux${1} + kallsyms_vmlinux=.tmp_vmlinux.kallsyms${1} + kallsymso=${kallsyms_vmlinux}.o vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o} kallsyms ${kallsyms_vmlinux} ${kallsymso} @@ -190,7 +198,6 @@ cleanup() { rm -f .btf.* rm -f .tmp_System.map - rm -f .tmp_kallsyms* rm -f .tmp_vmlinux* rm -f System.map rm -f vmlinux @@ -253,9 +260,8 @@ ${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo btf_vmlinux_bin_o="" if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then - if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then - btf_vmlinux_bin_o=.btf.vmlinux.bin.o - else + btf_vmlinux_bin_o=.btf.vmlinux.bin.o + if ! gen_btf .tmp_vmlinux.btf $btf_vmlinux_bin_o ; then echo >&2 "Failed to generate BTF for vmlinux" echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF" exit 1 @@ -304,6 +310,12 @@ fi vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o} +# fill in BTF IDs +if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then +info BTFIDS vmlinux +${RESOLVE_BTFIDS} vmlinux +fi + if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then info SORTEX vmlinux sortextable vmlinux diff --git a/scripts/mksysmap b/scripts/mksysmap index a35acc0d0b82..9aa23d15862a 100755 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -41,4 +41,4 @@ # so we just ignore them to let readprofile continue to work. # (At least sparc64 has __crc_ in the middle). -$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2 +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 52f1152c9838..13cda6aa2688 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -960,7 +960,7 @@ static void check_section(const char *modname, struct elf_info *elf, #define DATA_SECTIONS ".data", ".data.rel" #define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ - ".kprobes.text", ".cpuidle.text" + ".kprobes.text", ".cpuidle.text", ".noinstr.text" #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ ".fixup", ".entry.text", ".exception.text", ".text.*", \ ".coldtext" diff --git a/scripts/nsdeps b/scripts/nsdeps index 04cea0921673..e547f33b96a6 100644 --- a/scripts/nsdeps +++ b/scripts/nsdeps @@ -23,7 +23,7 @@ fi generate_deps_for_ns() { $SPATCH --very-quiet --in-place --sp-file \ - $srctree/scripts/coccinelle/misc/add_namespace.cocci -D ns=$1 $2 + $srctree/scripts/coccinelle/misc/add_namespace.cocci -D nsdeps -D ns=$1 $2 } generate_deps() { diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 2f66c81e4021..3d541cee16ed 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -28,15 +28,15 @@ case "${1}" in opts= ;; targz-pkg) - opts=--gzip + opts="-I ${KGZIP}" tarball=${tarball}.gz ;; tarbz2-pkg) - opts=--bzip2 + opts="-I ${KBZIP2}" tarball=${tarball}.bz2 ;; tarxz-pkg) - opts=--xz + opts="-I ${XZ}" tarball=${tarball}.xz ;; *) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 7225107a9aaf..cce12e1971d8 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -42,6 +42,8 @@ #define R_ARM_THM_CALL 10 #define R_ARM_CALL 28 +#define R_AARCH64_CALL26 283 + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ @@ -434,6 +436,11 @@ static int arm_is_fake_mcount(Elf32_Rel const *rp) return 1; } +static int arm64_is_fake_mcount(Elf64_Rel const *rp) +{ + return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26; +} + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] @@ -547,6 +554,7 @@ static int do_file(char const *const fname) make_nop = make_nop_arm64; rel_type_nop = R_AARCH64_NONE; ideal_nop = ideal_nop4_arm64; + is_fake_mcount64 = arm64_is_fake_mcount; break; case EM_IA_64: reltype = R_IA64_IMM64; break; case EM_MIPS: /* reltype: e_class */ break; diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 74eab03e31d4..f9b19524da11 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -29,6 +29,11 @@ #undef has_rel_mcount #undef tot_relsize #undef get_mcountsym +#undef find_symtab +#undef get_shnum +#undef set_shnum +#undef get_shstrndx +#undef get_symindex #undef get_sym_str_and_relp #undef do_func #undef Elf_Addr @@ -58,6 +63,11 @@ # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize +# define find_symtab find_symtab64 +# define get_shnum get_shnum64 +# define set_shnum set_shnum64 +# define get_shstrndx get_shstrndx64 +# define get_symindex get_symindex64 # define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 # define get_mcountsym get_mcountsym_64 @@ -91,6 +101,11 @@ # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize +# define find_symtab find_symtab32 +# define get_shnum get_shnum32 +# define set_shnum set_shnum32 +# define get_shstrndx get_shstrndx32 +# define get_symindex get_symindex32 # define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 # define get_mcountsym get_mcountsym_32 @@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp) return is_fake; } +static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx) +{ + unsigned long offset; + int index; + + if (sym->st_shndx != SHN_XINDEX) + return w2(sym->st_shndx); + + offset = (unsigned long)sym - (unsigned long)symtab; + index = offset / sizeof(*sym); + + return w(symtab_shndx[index]); +} + +static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (shdr0 && !ehdr->e_shnum) + return w(shdr0->sh_size); + + return w2(ehdr->e_shnum); +} + +static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum) +{ + if (new_shnum >= SHN_LORESERVE) { + ehdr->e_shnum = 0; + shdr0->sh_size = w(new_shnum); + } else + ehdr->e_shnum = w2(new_shnum); +} + +static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (ehdr->e_shstrndx != SHN_XINDEX) + return w2(ehdr->e_shstrndx); + + return w(shdr0->sh_link); +} + +static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0, + unsigned const nhdr, Elf32_Word **symtab, + Elf32_Word **symtab_shndx) +{ + Elf_Shdr const *relhdr; + unsigned k; + + *symtab = NULL; + *symtab_shndx = NULL; + + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { + if (relhdr->sh_type == SHT_SYMTAB) + *symtab = (void *)ehdr + relhdr->sh_offset; + else if (relhdr->sh_type == SHT_SYMTAB_SHNDX) + *symtab_shndx = (void *)ehdr + relhdr->sh_offset; + + if (*symtab && *symtab_shndx) + break; + } +} + /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */ static int append_func(Elf_Ehdr *const ehdr, Elf_Shdr *const shstr, @@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr, char const *mc_name = (sizeof(Elf_Rela) == rel_entsize) ? ".rela__mcount_loc" : ".rel__mcount_loc"; - unsigned const old_shnum = w2(ehdr->e_shnum); uint_t const old_shoff = _w(ehdr->e_shoff); uint_t const old_shstr_sh_size = _w(shstr->sh_size); uint_t const old_shstr_sh_offset = _w(shstr->sh_offset); + Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr); + unsigned int const old_shnum = get_shnum(ehdr, shdr0); + unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */ uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size); uint_t new_e_shoff; @@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr, t += (_align & -t); /* word-byte align */ new_e_shoff = t; + set_shnum(ehdr, shdr0, new_shnum); + /* body for new shstrtab */ if (ulseek(sb.st_size, SEEK_SET) < 0) return -1; @@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr, return -1; ehdr->e_shoff = _w(new_e_shoff); - ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */ if (ulseek(0, SEEK_SET) < 0) return -1; if (uwrite(ehdr, sizeof(*ehdr)) < 0) @@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx, uint_t *const recvalp, unsigned int *sym_index, Elf_Shdr const *const symhdr, + Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx, Elf_Ehdr const *const ehdr) { Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset) @@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx, for (symp = sym0, t = nsym; t; --t, ++symp) { unsigned int const st_bind = ELF_ST_BIND(symp->st_info); - if (txtndx == w2(symp->st_shndx) + if (txtndx == get_symindex(symp, symtab, symtab_shndx) /* avoid STB_WEAK */ && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) { /* function symbols on ARM have quirks, avoid them */ @@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, return totrelsz; } - /* Overall supervision for Elf32 ET_REL file. */ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); - unsigned const nhdr = w2(ehdr->e_shnum); - Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)]; + unsigned const nhdr = get_shnum(ehdr, shdr0); + Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)]; char const *const shstrtab = (char const *)(_w(shstr->sh_offset) + (void *)ehdr); Elf_Shdr const *relhdr; unsigned k; + Elf32_Word *symtab; + Elf32_Word *symtab_shndx; + /* Upper bound on space: assume all relevant relocs are for mcount. */ unsigned totrelsz; @@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, return -1; } + find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx); + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); @@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, result = find_secsym_ndx(w(relhdr->sh_info), txtname, &recval, &recsym, &shdr0[symsec_sh_link], + symtab, symtab_shndx, ehdr); if (result) goto out; diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 3f77a5d695c1..0bafed857e17 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -268,7 +268,11 @@ if ($arch eq "x86_64") { # force flags for this arch $ld .= " -m shlelf_linux"; - $objcopy .= " -O elf32-sh-linux"; + if ($endian eq "big") { + $objcopy .= " -O elf32-shbig-linux"; + } else { + $objcopy .= " -O elf32-sh-linux"; + } } elsif ($arch eq "powerpc") { my $ldemulation; diff --git a/scripts/setlocalversion b/scripts/setlocalversion index a2998b118ef9..45609dba7d72 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -45,7 +45,7 @@ scm_version() # Check for git and a git repo. if test -z "$(git rev-parse --show-cdup 2>/dev/null)" && - head=`git rev-parse --verify --short HEAD 2>/dev/null`; then + head=$(git rev-parse --verify HEAD 2>/dev/null); then # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore # it, because this version is defined in the top level Makefile. @@ -59,11 +59,22 @@ scm_version() fi # If we are past a tagged commit (like # "v2.6.30-rc5-302-g72357d5"), we pretty print it. - if atag="`git describe 2>/dev/null`"; then - echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' + # + # Ensure the abbreviated sha1 has exactly 12 + # hex characters, to make the output + # independent of git version, local + # core.abbrev settings and/or total number of + # objects in the current repository - passing + # --abbrev=12 ensures a minimum of 12, and the + # awk substr() then picks the 'g' and first 12 + # hex chars. + if atag="$(git describe --abbrev=12 2>/dev/null)"; then + echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),substr($(NF),0,13))}' - # If we don't have a tag at all we print -g{commitish}. + # If we don't have a tag at all we print -g{commitish}, + # again using exactly 12 hex chars. else + head="$(echo $head | cut -c1-12)" printf '%s%s' -g $head fi fi diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh index 7a2d372f4885..76e9cbcfbeab 100755 --- a/scripts/xz_wrap.sh +++ b/scripts/xz_wrap.sh @@ -20,4 +20,4 @@ case $SRCARCH in sparc) BCJ=--sparc ;; esac -exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB +exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB diff --git a/security/Kconfig b/security/Kconfig index 2a1a2d396228..cd3cc7da3a55 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -277,11 +277,11 @@ endchoice config LSM string "Ordered list of enabled LSMs" - default "lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK - default "lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR - default "lockdown,yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO - default "lockdown,yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC - default "lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" + default "lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor,bpf" if DEFAULT_SECURITY_SMACK + default "lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo,bpf" if DEFAULT_SECURITY_APPARMOR + default "lockdown,yama,loadpin,safesetid,integrity,tomoyo,bpf" if DEFAULT_SECURITY_TOMOYO + default "lockdown,yama,loadpin,safesetid,integrity,bpf" if DEFAULT_SECURITY_DAC + default "lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor,bpf" help A comma-separated list of LSMs, in initialization order. Any LSMs left off this list will be ignored. This can be diff --git a/security/Makefile b/security/Makefile index be1dd9d2cb2f..eb00547cc109 100644 --- a/security/Makefile +++ b/security/Makefile @@ -12,6 +12,7 @@ subdir-$(CONFIG_SECURITY_YAMA) += yama subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin subdir-$(CONFIG_SECURITY_SAFESETID) += safesetid subdir-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown +subdir-$(CONFIG_BPF_LSM) += bpf # always enable default capabilities obj-y += commoncap.o @@ -30,6 +31,7 @@ obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/ obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/ obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o +obj-$(CONFIG_BPF_LSM) += bpf/ # Object integrity file lists subdir-$(CONFIG_INTEGRITY) += integrity diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 90d21675c3ad..47e4f2d91df7 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -424,7 +424,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, */ error = aa_may_manage_policy(label, ns, mask); if (error) - return error; + goto end_section; data = aa_simple_write_to_buffer(buf, size, size, pos); error = PTR_ERR(data); @@ -432,6 +432,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, error = aa_replace_profiles(ns, label, mask, data); aa_put_loaddata(data); } +end_section: end_current_label_crit_section(label); return error; diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index 5a98661a8b46..597732503815 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c @@ -197,8 +197,9 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr, GFP_KERNEL, true, false); if (IS_ERR(rule->label)) { + int err = PTR_ERR(rule->label); aa_audit_rule_free(rule); - return PTR_ERR(rule->label); + return err; } *vrule = rule; diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 039ca71872ce..1a33f490e667 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -935,7 +935,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) * aways results in a further reduction of permissions. */ if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) && - !unconfined(label) && !aa_label_is_subset(new, ctx->nnp)) { + !unconfined(label) && + !aa_label_is_unconfined_subset(new, ctx->nnp)) { error = -EPERM; info = "no new privs"; goto audit; @@ -1213,7 +1214,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) * reduce restrictions. */ if (task_no_new_privs(current) && !unconfined(label) && - !aa_label_is_subset(new, ctx->nnp)) { + !aa_label_is_unconfined_subset(new, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ AA_DEBUG("no_new_privs - change_hat denied"); error = -EPERM; @@ -1234,7 +1235,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) * reduce restrictions. */ if (task_no_new_privs(current) && !unconfined(label) && - !aa_label_is_subset(previous, ctx->nnp)) { + !aa_label_is_unconfined_subset(previous, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ AA_DEBUG("no_new_privs - change_hat denied"); error = -EPERM; @@ -1334,6 +1335,7 @@ int aa_change_profile(const char *fqname, int flags) ctx->nnp = aa_get_label(label); if (!fqname || !*fqname) { + aa_put_label(label); AA_DEBUG("no profile name"); return -EINVAL; } @@ -1352,8 +1354,6 @@ int aa_change_profile(const char *fqname, int flags) op = OP_CHANGE_PROFILE; } - label = aa_get_current_label(); - if (*fqname == '&') { stack = true; /* don't have label_parse() do stacking */ @@ -1430,7 +1430,7 @@ check: * reduce restrictions. */ if (task_no_new_privs(current) && !unconfined(label) && - !aa_label_is_subset(new, ctx->nnp)) { + !aa_label_is_unconfined_subset(new, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ AA_DEBUG("no_new_privs - change_hat denied"); error = -EPERM; diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h index 47942c4ba7ca..255764ab06e2 100644 --- a/security/apparmor/include/label.h +++ b/security/apparmor/include/label.h @@ -281,6 +281,7 @@ bool aa_label_init(struct aa_label *label, int size, gfp_t gfp); struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp); bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub); +bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub); struct aa_profile *__aa_label_next_not_in_set(struct label_it *I, struct aa_label *set, struct aa_label *sub); diff --git a/security/apparmor/label.c b/security/apparmor/label.c index 470693239e64..5f324d63ceaa 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c @@ -550,6 +550,39 @@ bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub) return __aa_label_next_not_in_set(&i, set, sub) == NULL; } +/** + * aa_label_is_unconfined_subset - test if @sub is a subset of @set + * @set: label to test against + * @sub: label to test if is subset of @set + * + * This checks for subset but taking into account unconfined. IF + * @sub contains an unconfined profile that does not have a matching + * unconfined in @set then this will not cause the test to fail. + * Conversely we don't care about an unconfined in @set that is not in + * @sub + * + * Returns: true if @sub is special_subset of @set + * else false + */ +bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub) +{ + struct label_it i = { }; + struct aa_profile *p; + + AA_BUG(!set); + AA_BUG(!sub); + + if (sub == set) + return true; + + do { + p = __aa_label_next_not_in_set(&i, set, sub); + if (p && !profile_unconfined(p)) + break; + } while (p); + + return p == NULL; +} /** @@ -1531,13 +1564,13 @@ static const char *label_modename(struct aa_ns *ns, struct aa_label *label, label_for_each(i, label, profile) { if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { - if (profile->mode == APPARMOR_UNCONFINED) + count++; + if (profile == profile->ns->unconfined) /* special case unconfined so stacks with * unconfined don't report as mixed. ie. * profile_foo//&:ns1:unconfined (mixed) */ continue; - count++; if (mode == -1) mode = profile->mode; else if (mode != profile->mode) diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index ec3a928af829..e31965dc6dd1 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -791,7 +791,12 @@ static void apparmor_sk_clone_security(const struct sock *sk, struct aa_sk_ctx *ctx = SK_CTX(sk); struct aa_sk_ctx *new = SK_CTX(newsk); + if (new->label) + aa_put_label(new->label); new->label = aa_get_label(ctx->label); + + if (new->peer) + aa_put_label(new->peer); new->peer = aa_get_label(ctx->peer); } diff --git a/security/apparmor/match.c b/security/apparmor/match.c index 6ccd3734a841..43669403f755 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c @@ -97,6 +97,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize) th.td_flags == YYTD_DATA8)) goto out; + /* if we have a table it must have some entries */ + if (th.td_lolen == 0) + goto out; tsize = table_size(th.td_lolen, th.td_flags); if (bsize < tsize) goto out; @@ -198,6 +201,8 @@ static int verify_dfa(struct aa_dfa *dfa) state_count = dfa->tables[YYTD_ID_BASE]->td_lolen; trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen; + if (state_count == 0) + goto out; for (i = 0; i < state_count; i++) { if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) && (DEFAULT_TABLE(dfa)[i] >= state_count)) diff --git a/security/bpf/Makefile b/security/bpf/Makefile new file mode 100644 index 000000000000..c7a89a962084 --- /dev/null +++ b/security/bpf/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2020 Google LLC. + +obj-$(CONFIG_BPF_LSM) := hooks.o diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c new file mode 100644 index 000000000000..788667d582ae --- /dev/null +++ b/security/bpf/hooks.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ +#include <linux/lsm_hooks.h> +#include <linux/bpf_lsm.h> + +static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = { + #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ + LSM_HOOK_INIT(NAME, bpf_lsm_##NAME), + #include <linux/lsm_hook_defs.h> + #undef LSM_HOOK + LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free), +}; + +static int __init bpf_lsm_init(void) +{ + security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf"); + pr_info("LSM support for eBPF active\n"); + return 0; +} + +struct lsm_blob_sizes bpf_lsm_blob_sizes __lsm_ro_after_init = { + .lbs_inode = sizeof(struct bpf_storage_blob), +}; + +DEFINE_LSM(bpf) = { + .name = "bpf", + .init = bpf_lsm_init, + .blobs = &bpf_lsm_blob_sizes +}; diff --git a/security/commoncap.c b/security/commoncap.c index f4ee0ae106b2..1c70d1149186 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -371,10 +371,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, { int size, ret; kuid_t kroot; + u32 nsmagic, magic; uid_t root, mappedroot; char *tmpbuf = NULL; struct vfs_cap_data *cap; - struct vfs_ns_cap_data *nscap; + struct vfs_ns_cap_data *nscap = NULL; struct dentry *dentry; struct user_namespace *fs_ns; @@ -390,52 +391,67 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, &tmpbuf, size, GFP_NOFS); dput(dentry); - if (ret < 0) + if (ret < 0 || !tmpbuf) return ret; fs_ns = inode->i_sb->s_user_ns; cap = (struct vfs_cap_data *) tmpbuf; if (is_v2header((size_t) ret, cap)) { - /* If this is sizeof(vfs_cap_data) then we're ok with the - * on-disk value, so return that. */ - if (alloc) - *buffer = tmpbuf; - else - kfree(tmpbuf); - return ret; - } else if (!is_v3header((size_t) ret, cap)) { - kfree(tmpbuf); - return -EINVAL; + root = 0; + } else if (is_v3header((size_t) ret, cap)) { + nscap = (struct vfs_ns_cap_data *) tmpbuf; + root = le32_to_cpu(nscap->rootid); + } else { + size = -EINVAL; + goto out_free; } - nscap = (struct vfs_ns_cap_data *) tmpbuf; - root = le32_to_cpu(nscap->rootid); kroot = make_kuid(fs_ns, root); /* If the root kuid maps to a valid uid in current ns, then return * this as a nscap. */ mappedroot = from_kuid(current_user_ns(), kroot); if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) { + size = sizeof(struct vfs_ns_cap_data); if (alloc) { - *buffer = tmpbuf; + if (!nscap) { + /* v2 -> v3 conversion */ + nscap = kzalloc(size, GFP_ATOMIC); + if (!nscap) { + size = -ENOMEM; + goto out_free; + } + nsmagic = VFS_CAP_REVISION_3; + magic = le32_to_cpu(cap->magic_etc); + if (magic & VFS_CAP_FLAGS_EFFECTIVE) + nsmagic |= VFS_CAP_FLAGS_EFFECTIVE; + memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32); + nscap->magic_etc = cpu_to_le32(nsmagic); + } else { + /* use allocated v3 buffer */ + tmpbuf = NULL; + } nscap->rootid = cpu_to_le32(mappedroot); - } else - kfree(tmpbuf); - return size; + *buffer = nscap; + } + goto out_free; } if (!rootid_owns_currentns(kroot)) { - kfree(tmpbuf); - return -EOPNOTSUPP; + size = -EOVERFLOW; + goto out_free; } /* This comes from a parent namespace. Return as a v2 capability */ size = sizeof(struct vfs_cap_data); if (alloc) { - *buffer = kmalloc(size, GFP_ATOMIC); - if (*buffer) { - struct vfs_cap_data *cap = *buffer; - __le32 nsmagic, magic; + if (nscap) { + /* v3 -> v2 conversion */ + cap = kzalloc(size, GFP_ATOMIC); + if (!cap) { + size = -ENOMEM; + goto out_free; + } magic = VFS_CAP_REVISION_2; nsmagic = le32_to_cpu(nscap->magic_etc); if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE) @@ -443,9 +459,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32); cap->magic_etc = cpu_to_le32(magic); } else { - size = -ENOMEM; + /* use unconverted v2 */ + tmpbuf = NULL; } + *buffer = cap; } +out_free: kfree(tmpbuf); return size; } @@ -812,6 +831,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) int ret; kuid_t root_uid; + new->cap_ambient = old->cap_ambient; if (WARN_ON(!cap_ambient_invariant_ok(old))) return -EPERM; diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 725674f3276d..5d7bb91c6487 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -352,7 +352,8 @@ static bool match_exception_partial(struct list_head *exceptions, short type, { struct dev_exception_item *ex; - list_for_each_entry_rcu(ex, exceptions, list) { + list_for_each_entry_rcu(ex, exceptions, list, + lockdep_is_held(&devcgroup_mutex)) { if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK)) continue; if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR)) diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index d485f6fc908e..25dac691491b 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -75,7 +75,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) { long rc; const char *algo; - struct crypto_shash **tfm; + struct crypto_shash **tfm, *tmp_tfm = NULL; struct shash_desc *desc; if (type == EVM_XATTR_HMAC) { @@ -93,40 +93,43 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) algo = hash_algo_name[hash_algo]; } - if (*tfm == NULL) { - mutex_lock(&mutex); - if (*tfm) - goto out; - *tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD); - if (IS_ERR(*tfm)) { - rc = PTR_ERR(*tfm); - pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); - *tfm = NULL; + if (*tfm) + goto alloc; + mutex_lock(&mutex); + if (*tfm) + goto unlock; + + tmp_tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD); + if (IS_ERR(tmp_tfm)) { + pr_err("Can not allocate %s (reason: %ld)\n", algo, + PTR_ERR(tmp_tfm)); + mutex_unlock(&mutex); + return ERR_CAST(tmp_tfm); + } + if (type == EVM_XATTR_HMAC) { + rc = crypto_shash_setkey(tmp_tfm, evmkey, evmkey_len); + if (rc) { + crypto_free_shash(tmp_tfm); mutex_unlock(&mutex); return ERR_PTR(rc); } - if (type == EVM_XATTR_HMAC) { - rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len); - if (rc) { - crypto_free_shash(*tfm); - *tfm = NULL; - mutex_unlock(&mutex); - return ERR_PTR(rc); - } - } -out: - mutex_unlock(&mutex); } - + *tfm = tmp_tfm; +unlock: + mutex_unlock(&mutex); +alloc: desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), GFP_KERNEL); - if (!desc) + if (!desc) { + crypto_free_shash(tmp_tfm); return ERR_PTR(-ENOMEM); + } desc->tfm = *tfm; rc = crypto_shash_init(desc); if (rc) { + crypto_free_shash(tmp_tfm); kfree(desc); return ERR_PTR(rc); } @@ -209,7 +212,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, data->hdr.length = crypto_shash_digestsize(desc->tfm); error = -ENODATA; - list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) { + list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) { bool is_ima = false; if (strcmp(xattr->name, XATTR_NAME_IMA) == 0) @@ -243,7 +246,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, /* Portable EVM signatures must include an IMA hash */ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) - return -EPERM; + error = -EPERM; out: kfree(xattr_value); kfree(desc); diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index f9a81b187fae..615094eda36d 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -99,7 +99,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry) if (!(inode->i_opflags & IOP_XATTR)) return -EOPNOTSUPP; - list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) { + list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) { error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0); if (error < 0) { if (error == -ENODATA) @@ -183,6 +183,12 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, break; case EVM_IMA_XATTR_DIGSIG: case EVM_XATTR_PORTABLE_DIGSIG: + /* accept xattr with non-empty signature field */ + if (xattr_len <= sizeof(struct signature_v2_hdr)) { + evm_status = INTEGRITY_FAIL; + goto out; + } + hdr = (struct signature_v2_hdr *)xattr_data; digest.hdr.algo = hdr->hash_algo; rc = evm_calc_hash(dentry, xattr_name, xattr_value, @@ -230,7 +236,7 @@ static int evm_protected_xattr(const char *req_xattr_name) struct xattr_list *xattr; namelen = strlen(req_xattr_name); - list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) { + list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) { if ((strlen(xattr->name) == namelen) && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) { found = 1; diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c index c11c1f7b3ddd..0f37ef27268d 100644 --- a/security/integrity/evm/evm_secfs.c +++ b/security/integrity/evm/evm_secfs.c @@ -234,7 +234,14 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf, goto out; } - /* Guard against races in evm_read_xattrs */ + /* + * xattr_list_mutex guards against races in evm_read_xattrs(). + * Entries are only added to the evm_config_xattrnames list + * and never deleted. Therefore, the list is traversed + * using list_for_each_entry_lockless() without holding + * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs() + * and evm_protected_xattr(). + */ mutex_lock(&xattr_list_mutex); list_for_each_entry(tmp, &evm_config_xattrnames, list) { if (strcmp(xattr->name, tmp->name) == 0) { diff --git a/security/integrity/iint.c b/security/integrity/iint.c index e12c4900510f..0b9cb639a0ed 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c @@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode) struct rb_node *node, *parent = NULL; struct integrity_iint_cache *iint, *test_iint; + /* + * The integrity's "iint_cache" is initialized at security_init(), + * unless it is not included in the ordered list of LSMs enabled + * on the boot command line. + */ + if (!iint_cache) + panic("%s: lsm=integrity required.\n", __func__); + iint = integrity_iint_find(inode); if (iint) return iint; diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 838476d780e5..d2054bec4909 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -227,7 +227,7 @@ config IMA_APPRAISE_REQUIRE_POLICY_SIGS config IMA_APPRAISE_BOOTPARAM bool "ima_appraise boot parameter" - depends on IMA_APPRAISE && !IMA_ARCH_POLICY + depends on IMA_APPRAISE default y help This option enables the different "ima_appraise=" modes diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 3689081aaf38..5fae6cfe8d91 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -30,13 +30,13 @@ enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN, IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII }; -enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; +enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 }; /* digest size for IMA, fits SHA1 or MD5 */ #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE #define IMA_EVENT_NAME_LEN_MAX 255 -#define IMA_HASH_BITS 9 +#define IMA_HASH_BITS 10 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 @@ -52,6 +52,7 @@ extern int ima_policy_flag; extern int ima_hash_algo; extern int ima_appraise; extern struct tpm_chip *ima_tpm_chip; +extern const char boot_aggregate_name[]; /* IMA event related data */ struct ima_event_data { @@ -140,7 +141,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len, int ima_calc_field_array_hash(struct ima_field_data *field_data, struct ima_template_desc *desc, int num_fields, struct ima_digest_data *hash); -int __init ima_calc_boot_aggregate(struct ima_digest_data *hash); +int ima_calc_boot_aggregate(struct ima_digest_data *hash); void ima_add_violation(struct file *file, const unsigned char *filename, struct integrity_iint_cache *iint, const char *op, const char *cause); @@ -175,9 +176,10 @@ struct ima_h_table { }; extern struct ima_h_table ima_htable; -static inline unsigned long ima_hash_key(u8 *digest) +static inline unsigned int ima_hash_key(u8 *digest) { - return hash_long(*digest, IMA_HASH_BITS); + /* there is no point in taking a hash of part of a digest */ + return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; } #define __ima_hooks(hook) \ @@ -360,6 +362,7 @@ static inline void ima_free_modsig(struct modsig *modsig) #ifdef CONFIG_IMA_LSM_RULES #define security_filter_rule_init security_audit_rule_init +#define security_filter_rule_free security_audit_rule_free #define security_filter_rule_match security_audit_rule_match #else @@ -370,6 +373,10 @@ static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr, return -EINVAL; } +static inline void security_filter_rule_free(void *lsmrule) +{ +} + static inline int security_filter_rule_match(u32 secid, u32 field, u32 op, void *lsmrule) { diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 136ae4e0ee92..23b04c6521b2 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -18,6 +18,12 @@ static int __init default_appraise_setup(char *str) { #ifdef CONFIG_IMA_APPRAISE_BOOTPARAM + if (arch_ima_get_secureboot()) { + pr_info("Secure boot enabled: ignoring ima_appraise=%s boot parameter option", + str); + return 1; + } + if (strncmp(str, "off", 3) == 0) ima_appraise = 0; else if (strncmp(str, "log", 3) == 0) diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index 73044fc6a952..e15f8d37d1f2 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -411,7 +411,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) loff_t i_size; int rc; struct file *f = file; - bool new_file_instance = false, modified_flags = false; + bool new_file_instance = false; /* * For consistency, fail file's opened with the O_DIRECT flag on @@ -429,18 +429,10 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); flags |= O_RDONLY; f = dentry_open(&file->f_path, flags, file->f_cred); - if (IS_ERR(f)) { - /* - * Cannot open the file again, lets modify f_flags - * of original and continue - */ - pr_info_ratelimited("Unable to reopen file for reading.\n"); - f = file; - f->f_flags |= FMODE_READ; - modified_flags = true; - } else { - new_file_instance = true; - } + if (IS_ERR(f)) + return PTR_ERR(f); + + new_file_instance = true; } i_size = i_size_read(file_inode(f)); @@ -455,8 +447,6 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) out: if (new_file_instance) fput(f); - else if (modified_flags) - f->f_flags &= ~FMODE_READ; return rc; } @@ -645,7 +635,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len, return calc_buffer_shash(buf, len, hash); } -static void __init ima_pcrread(u32 idx, struct tpm_digest *d) +static void ima_pcrread(u32 idx, struct tpm_digest *d) { if (!ima_tpm_chip) return; @@ -655,44 +645,94 @@ static void __init ima_pcrread(u32 idx, struct tpm_digest *d) } /* - * Calculate the boot aggregate hash + * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With + * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with + * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, + * allowing firmware to configure and enable different banks. + * + * Knowing which TPM bank is read to calculate the boot_aggregate digest + * needs to be conveyed to a verifier. For this reason, use the same + * hash algorithm for reading the TPM PCRs as for calculating the boot + * aggregate digest as stored in the measurement list. */ -static int __init ima_calc_boot_aggregate_tfm(char *digest, - struct crypto_shash *tfm) +static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, + struct crypto_shash *tfm) { - struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} }; + struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; int rc; u32 i; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; + pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n", + d.alg_id); + rc = crypto_shash_init(shash); if (rc != 0) return rc; - /* cumulative sha1 over tpm registers 0-7 */ + /* cumulative digest over TPM registers 0-7 */ for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, &d); /* now accumulate with current aggregate */ - rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE); + rc = crypto_shash_update(shash, d.digest, + crypto_shash_digestsize(tfm)); + if (rc != 0) + return rc; + } + /* + * Extend cumulative digest over TPM registers 8-9, which contain + * measurement for the kernel command line (reg. 8) and image (reg. 9) + * in a typical PCR allocation. Registers 8-9 are only included in + * non-SHA1 boot_aggregate digests to avoid ambiguity. + */ + if (alg_id != TPM_ALG_SHA1) { + for (i = TPM_PCR8; i < TPM_PCR10; i++) { + ima_pcrread(i, &d); + rc = crypto_shash_update(shash, d.digest, + crypto_shash_digestsize(tfm)); + } } if (!rc) crypto_shash_final(shash, digest); return rc; } -int __init ima_calc_boot_aggregate(struct ima_digest_data *hash) +int ima_calc_boot_aggregate(struct ima_digest_data *hash) { struct crypto_shash *tfm; - int rc; + u16 crypto_id, alg_id; + int rc, i, bank_idx = -1; + + for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { + crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; + if (crypto_id == hash->algo) { + bank_idx = i; + break; + } + + if (crypto_id == HASH_ALGO_SHA256) + bank_idx = i; + + if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) + bank_idx = i; + } + + if (bank_idx == -1) { + pr_err("No suitable TPM algorithm for boot aggregate\n"); + return 0; + } + + hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); hash->length = crypto_shash_digestsize(tfm); - rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); + alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; + rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm); ima_free_tfm(tfm); diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index 2000e8df0301..68571c40d61f 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -340,8 +340,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf, integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, "policy_update", "signed policy required", 1, 0); - if (ima_appraise & IMA_APPRAISE_ENFORCE) - result = -EACCES; + result = -EACCES; } else { result = ima_parse_add_rule(data); } diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 5d55ade5f3b9..a94177042eaa 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c @@ -21,13 +21,13 @@ #include "ima.h" /* name for boot aggregate entry */ -static const char boot_aggregate_name[] = "boot_aggregate"; +const char boot_aggregate_name[] = "boot_aggregate"; struct tpm_chip *ima_tpm_chip; /* Add the boot aggregate to the IMA measurement list and extend * the PCR register. * - * Calculate the boot aggregate, a SHA1 over tpm registers 0-7, + * Calculate the boot aggregate, a hash over tpm registers 0-7, * assuming a TPM chip exists, and zeroes if the TPM chip does not * exist. Add the boot aggregate measurement to the measurement * list and extend the PCR register. @@ -51,15 +51,27 @@ static int __init ima_add_boot_aggregate(void) int violation = 0; struct { struct ima_digest_data hdr; - char digest[TPM_DIGEST_SIZE]; + char digest[TPM_MAX_DIGEST_SIZE]; } hash; memset(iint, 0, sizeof(*iint)); memset(&hash, 0, sizeof(hash)); iint->ima_hash = &hash.hdr; - iint->ima_hash->algo = HASH_ALGO_SHA1; - iint->ima_hash->length = SHA1_DIGEST_SIZE; + iint->ima_hash->algo = ima_hash_algo; + iint->ima_hash->length = hash_digest_size[ima_hash_algo]; + /* + * With TPM 2.0 hash agility, TPM chips could support multiple TPM + * PCR banks, allowing firmware to configure and enable different + * banks. The SHA1 bank is not necessarily enabled. + * + * Use the same hash algorithm for reading the TPM PCRs as for + * calculating the boot aggregate digest. Preference is given to + * the configured IMA default hash algorithm. Otherwise, use the + * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2. + * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank + * is not found. + */ if (ima_tpm_chip) { result = ima_calc_boot_aggregate(&hash.hdr); if (result < 0) { diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 9e94eca48b89..955e4b4d09e2 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c @@ -120,6 +120,7 @@ void ima_add_kexec_buffer(struct kimage *image) ret = kexec_add_buffer(&kbuf); if (ret) { pr_err("Error passing over kexec measurement buffer.\n"); + vfree(kexec_buffer); return; } @@ -129,6 +130,8 @@ void ima_add_kexec_buffer(struct kimage *image) return; } + image->ima_buffer = kexec_buffer; + pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n", kbuf.mem); } diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 60027c643ecd..a768f37a0a4d 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -712,6 +712,9 @@ static int __init init_ima(void) error = ima_init(); } + if (error) + return error; + error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier); if (error) pr_warn("Couldn't register LSM notifier, error %d\n", error); diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c index 36cadadbfba4..1e5c01916173 100644 --- a/security/integrity/ima/ima_mok.c +++ b/security/integrity/ima/ima_mok.c @@ -38,13 +38,12 @@ __init int ima_mok_init(void) (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_WRITE | KEY_USR_SEARCH, - KEY_ALLOC_NOT_IN_QUOTA, + KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_SET_KEEP, restriction, NULL); if (IS_ERR(ima_blacklist_keyring)) panic("Can't allocate IMA blacklist keyring."); - - set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags); return 0; } device_initcall(ima_mok_init); diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index ee9aec5e98f0..e725d4187271 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -204,7 +204,7 @@ static struct ima_rule_entry *arch_policy_entry __ro_after_init; static LIST_HEAD(ima_default_rules); static LIST_HEAD(ima_policy_rules); static LIST_HEAD(ima_temp_rules); -static struct list_head *ima_rules; +static struct list_head *ima_rules = &ima_default_rules; static int ima_policy __initdata; @@ -254,7 +254,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry) int i; for (i = 0; i < MAX_LSM_RULES; i++) { - kfree(entry->lsm[i].rule); + security_filter_rule_free(entry->lsm[i].rule); kfree(entry->lsm[i].args_p); } kfree(entry); @@ -591,9 +591,12 @@ static void add_rules(struct ima_rule_entry *entries, int count, list_add_tail(&entry->list, &ima_policy_rules); } if (entries[i].action == APPRAISE) { - temp_ima_appraise |= ima_appraise_flag(entries[i].func); - if (entries[i].func == POLICY_CHECK) - temp_ima_appraise |= IMA_APPRAISE_POLICY; + if (entries != build_appraise_rules) + temp_ima_appraise |= + ima_appraise_flag(entries[i].func); + else + build_ima_appraise |= + ima_appraise_flag(entries[i].func); } } } @@ -712,7 +715,6 @@ void __init ima_init_policy(void) ARRAY_SIZE(default_appraise_rules), IMA_DEFAULT_POLICY); - ima_rules = &ima_default_rules; ima_update_policy_flag(); } diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index 32ae05d88257..1be146e17d9f 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -288,6 +288,24 @@ int ima_eventdigest_init(struct ima_event_data *event_data, goto out; } + if ((const char *)event_data->filename == boot_aggregate_name) { + if (ima_tpm_chip) { + hash.hdr.algo = HASH_ALGO_SHA1; + result = ima_calc_boot_aggregate(&hash.hdr); + + /* algo can change depending on available PCR banks */ + if (!result && hash.hdr.algo != HASH_ALGO_SHA1) + result = -EINVAL; + + if (result < 0) + memset(&hash, 0, sizeof(hash)); + } + + cur_digest = hash.hdr.digest; + cur_digestsize = hash_digest_size[HASH_ALGO_SHA1]; + goto out; + } + if (!event_data->file) /* missing info to re-calculate the digest */ return -EINVAL; diff --git a/security/keys/big_key.c b/security/keys/big_key.c index 001abe530a0d..82008f900930 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c @@ -352,7 +352,7 @@ void big_key_describe(const struct key *key, struct seq_file *m) * read the key data * - the key's semaphore is read-locked */ -long big_key_read(const struct key *key, char __user *buffer, size_t buflen) +long big_key_read(const struct key *key, char *buffer, size_t buflen) { size_t datalen = (size_t)key->payload.data[big_key_len]; long ret; @@ -391,9 +391,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) ret = datalen; - /* copy decrypted data to user */ - if (copy_to_user(buffer, buf->virt, datalen) != 0) - ret = -EFAULT; + /* copy out decrypted data */ + memcpy(buffer, buf->virt, datalen); err_fput: fput(file); @@ -401,9 +400,7 @@ error: big_key_free_buffer(buf); } else { ret = datalen; - if (copy_to_user(buffer, key->payload.data[big_key_data], - datalen) != 0) - ret = -EFAULT; + memcpy(buffer, key->payload.data[big_key_data], datalen); } return ret; diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 60720f58cbe0..f6797ba44bf7 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -902,14 +902,14 @@ out: } /* - * encrypted_read - format and copy the encrypted data to userspace + * encrypted_read - format and copy out the encrypted data * * The resulting datablob format is: * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> * * On success, return to userspace the encrypted key datablob size. */ -static long encrypted_read(const struct key *key, char __user *buffer, +static long encrypted_read(const struct key *key, char *buffer, size_t buflen) { struct encrypted_key_payload *epayload; @@ -957,8 +957,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); - if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) - ret = -EFAULT; + memcpy(buffer, ascii_buf, asciiblob_len); kzfree(ascii_buf); return asciiblob_len; diff --git a/security/keys/internal.h b/security/keys/internal.h index c039373488bd..1ca8bfaed0e8 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -16,6 +16,8 @@ #include <linux/keyctl.h> #include <linux/refcount.h> #include <linux/compat.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> struct iovec; @@ -348,5 +350,4 @@ static inline void key_check(const struct key *key) #define key_check(key) do {} while(0) #endif - #endif /* _INTERNAL_H */ diff --git a/security/keys/key.c b/security/keys/key.c index 764f4c57913e..623fcb4094dd 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, key->flags |= 1 << KEY_FLAG_BUILTIN; if (flags & KEY_ALLOC_UID_KEYRING) key->flags |= 1 << KEY_FLAG_UID_KEYRING; + if (flags & KEY_ALLOC_SET_KEEP) + key->flags |= 1 << KEY_FLAG_KEEP; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; @@ -381,7 +383,7 @@ int key_payload_reserve(struct key *key, size_t datalen) spin_lock(&key->user->lock); if (delta > 0 && - (key->user->qnbytes + delta >= maxbytes || + (key->user->qnbytes + delta > maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 9b898c969558..edde63a63007 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -142,10 +142,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, key_ref_put(keyring_ref); error3: - if (payload) { - memzero_explicit(payload, plen); - kvfree(payload); - } + kvfree_sensitive(payload, plen); error2: kfree(description); error: @@ -339,7 +336,7 @@ long keyctl_update_key(key_serial_t id, payload = NULL; if (plen) { ret = -ENOMEM; - payload = kmalloc(plen, GFP_KERNEL); + payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; @@ -360,7 +357,7 @@ long keyctl_update_key(key_serial_t id, key_ref_put(key_ref); error2: - kzfree(payload); + kvfree_sensitive(payload, plen); error: return ret; } @@ -797,6 +794,21 @@ error: return ret; } +/* + * Call the read method + */ +static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen) +{ + long ret; + + down_read(&key->sem); + ret = key_validate(key); + if (ret == 0) + ret = key->type->read(key, buffer, buflen); + up_read(&key->sem); + return ret; +} + /* * Read a key's payload. * @@ -812,26 +824,28 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) struct key *key; key_ref_t key_ref; long ret; + char *key_data = NULL; + size_t key_data_len; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; - goto error; + goto out; } key = key_ref_to_ptr(key_ref); ret = key_read_state(key); if (ret < 0) - goto error2; /* Negatively instantiated */ + goto key_put_out; /* Negatively instantiated */ /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) - goto error2; + goto key_put_out; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be @@ -839,26 +853,78 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) */ if (!is_key_possessed(key_ref)) { ret = -EACCES; - goto error2; + goto key_put_out; } /* the key is probably readable - now try to read it */ can_read_key: - ret = -EOPNOTSUPP; - if (key->type->read) { - /* Read the data with the semaphore held (since we might sleep) - * to protect against the key being updated or revoked. - */ - down_read(&key->sem); - ret = key_validate(key); - if (ret == 0) - ret = key->type->read(key, buffer, buflen); - up_read(&key->sem); + if (!key->type->read) { + ret = -EOPNOTSUPP; + goto key_put_out; } -error2: + if (!buffer || !buflen) { + /* Get the key length from the read method */ + ret = __keyctl_read_key(key, NULL, 0); + goto key_put_out; + } + + /* + * Read the data with the semaphore held (since we might sleep) + * to protect against the key being updated or revoked. + * + * Allocating a temporary buffer to hold the keys before + * transferring them to user buffer to avoid potential + * deadlock involving page fault and mmap_sem. + * + * key_data_len = (buflen <= PAGE_SIZE) + * ? buflen : actual length of key data + * + * This prevents allocating arbitrary large buffer which can + * be much larger than the actual key length. In the latter case, + * at least 2 passes of this loop is required. + */ + key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0; + for (;;) { + if (key_data_len) { + key_data = kvmalloc(key_data_len, GFP_KERNEL); + if (!key_data) { + ret = -ENOMEM; + goto key_put_out; + } + } + + ret = __keyctl_read_key(key, key_data, key_data_len); + + /* + * Read methods will just return the required length without + * any copying if the provided length isn't large enough. + */ + if (ret <= 0 || ret > buflen) + break; + + /* + * The key may change (unlikely) in between 2 consecutive + * __keyctl_read_key() calls. In this case, we reallocate + * a larger buffer and redo the key read when + * key_data_len < ret <= buflen. + */ + if (ret > key_data_len) { + if (unlikely(key_data)) + kvfree_sensitive(key_data, key_data_len); + key_data_len = ret; + continue; /* Allocate buffer */ + } + + if (copy_to_user(buffer, key_data, ret)) + ret = -EFAULT; + break; + } + kvfree_sensitive(key_data, key_data_len); + +key_put_out: key_put(key); -error: +out: return ret; } @@ -937,8 +1003,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); - if (newowner->qnkeys + 1 >= maxkeys || - newowner->qnbytes + key->quotalen >= maxbytes || + if (newowner->qnkeys + 1 > maxkeys || + newowner->qnbytes + key->quotalen > maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; @@ -1156,10 +1222,7 @@ long keyctl_instantiate_key_common(key_serial_t id, keyctl_change_reqkey_auth(NULL); error2: - if (payload) { - memzero_explicit(payload, plen); - kvfree(payload); - } + kvfree_sensitive(payload, plen); error: return ret; } diff --git a/security/keys/keyring.c b/security/keys/keyring.c index febf36c6ddc5..5ca620d31cd3 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -459,7 +459,6 @@ static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); - int ret; kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->buflen); @@ -467,10 +466,7 @@ static int keyring_read_iterator(const void *object, void *data) if (ctx->count >= ctx->buflen) return 1; - ret = put_user(key->serial, ctx->buffer); - if (ret < 0) - return ret; - ctx->buffer++; + *ctx->buffer++ = key->serial; ctx->count += sizeof(key->serial); return 0; } diff --git a/security/keys/proc.c b/security/keys/proc.c index 415f3f1c2da0..d0cde6685627 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -139,6 +139,8 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos) n = key_serial_next(p, v); if (n) *_pos = key_node_serial(n); + else + (*_pos)++; return n; } diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index ecba39c93fd9..41e9735006d0 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -22,7 +22,7 @@ static int request_key_auth_instantiate(struct key *, static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); -static long request_key_auth_read(const struct key *, char __user *, size_t); +static long request_key_auth_read(const struct key *, char *, size_t); /* * The request-key authorisation key type definition. @@ -80,7 +80,7 @@ static void request_key_auth_describe(const struct key *key, * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { struct request_key_auth *rka = dereference_key_locked(key); size_t datalen; @@ -97,8 +97,7 @@ static long request_key_auth_read(const struct key *key, if (buflen > datalen) buflen = datalen; - if (copy_to_user(buffer, rka->callout_info, buflen) != 0) - ret = -EFAULT; + memcpy(buffer, rka->callout_info, buflen); } return ret; diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 1fbd77816610..92a14ab82f72 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -805,7 +805,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay, case Opt_migratable: if (*args[0].from == '0') pay->migratable = 0; - else + else if (*args[0].from != '1') return -EINVAL; break; case Opt_pcrlock: @@ -1144,11 +1144,10 @@ out: * trusted_read - copy the sealed blob data to userspace in hex. * On success, return to userspace the trusted key datablob size. */ -static long trusted_read(const struct key *key, char __user *buffer, +static long trusted_read(const struct key *key, char *buffer, size_t buflen) { const struct trusted_key_payload *p; - char *ascii_buf; char *bufp; int i; @@ -1157,18 +1156,9 @@ static long trusted_read(const struct key *key, char __user *buffer, return -EINVAL; if (buffer && buflen >= 2 * p->blob_len) { - ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL); - if (!ascii_buf) - return -ENOMEM; - - bufp = ascii_buf; + bufp = buffer; for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); - if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { - kzfree(ascii_buf); - return -EFAULT; - } - kzfree(ascii_buf); } return 2 * p->blob_len; } diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 6f12de4ce549..07d4287e9084 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(user_describe); * read the key data * - the key's semaphore is read-locked */ -long user_read(const struct key *key, char __user *buffer, size_t buflen) +long user_read(const struct key *key, char *buffer, size_t buflen) { const struct user_key_payload *upayload; long ret; @@ -181,8 +181,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) if (buflen > upayload->datalen) buflen = upayload->datalen; - if (copy_to_user(buffer, upayload->data, buflen) != 0) - ret = -EFAULT; + memcpy(buffer, upayload->data, buflen); } return ret; diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c index 40b790536def..3f38583bed06 100644 --- a/security/lockdown/lockdown.c +++ b/security/lockdown/lockdown.c @@ -32,12 +32,14 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters", [LOCKDOWN_MMIOTRACE] = "unsafe mmio", [LOCKDOWN_DEBUGFS] = "debugfs access", + [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", + [LOCKDOWN_XMON_RW] = "xmon read and write access", [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality", }; @@ -175,7 +177,7 @@ static int __init lockdown_secfs_init(void) { struct dentry *dentry; - dentry = securityfs_create_file("lockdown", 0600, NULL, NULL, + dentry = securityfs_create_file("lockdown", 0644, NULL, NULL, &lockdown_ops); return PTR_ERR_OR_ZERO(dentry); } diff --git a/security/lsm_audit.c b/security/lsm_audit.c index e40874373f2b..d025f575a9e3 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -274,7 +274,9 @@ static void dump_common_audit_data(struct audit_buffer *ab, struct inode *inode; audit_log_format(ab, " name="); + spin_lock(&a->u.dentry->d_lock); audit_log_untrustedstring(ab, a->u.dentry->d_name.name); + spin_unlock(&a->u.dentry->d_lock); inode = d_backing_inode(a->u.dentry); if (inode) { @@ -292,8 +294,9 @@ static void dump_common_audit_data(struct audit_buffer *ab, dentry = d_find_alias(inode); if (dentry) { audit_log_format(ab, " name="); - audit_log_untrustedstring(ab, - dentry->d_name.name); + spin_lock(&dentry->d_lock); + audit_log_untrustedstring(ab, dentry->d_name.name); + spin_unlock(&dentry->d_lock); dput(dentry); } audit_log_format(ab, " dev="); diff --git a/security/security.c b/security/security.c index 1bc000f834e2..c6272872154d 100644 --- a/security/security.c +++ b/security/security.c @@ -636,6 +636,25 @@ static void __init lsm_early_task(struct task_struct *task) panic("%s: Early task alloc failed.\n", __func__); } +/* + * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and + * can be accessed with: + * + * LSM_RET_DEFAULT(<hook_name>) + * + * The macros below define static constants for the default value of each + * LSM hook. + */ +#define LSM_RET_DEFAULT(NAME) (NAME##_default) +#define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME) +#define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \ + static const int LSM_RET_DEFAULT(NAME) = (DEFAULT); +#define LSM_HOOK(RET, DEFAULT, NAME, ...) \ + DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME) + +#include <linux/lsm_hook_defs.h> +#undef LSM_HOOK + /* * Hook list operation macros. * @@ -1306,16 +1325,16 @@ int security_inode_getsecurity(struct inode *inode, const char *name, void **buf int rc; if (unlikely(IS_PRIVATE(inode))) - return -EOPNOTSUPP; + return LSM_RET_DEFAULT(inode_getsecurity); /* * Only one module will provide an attribute with a given name. */ hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) { rc = hp->hook.inode_getsecurity(inode, name, buffer, alloc); - if (rc != -EOPNOTSUPP) + if (rc != LSM_RET_DEFAULT(inode_getsecurity)) return rc; } - return -EOPNOTSUPP; + return LSM_RET_DEFAULT(inode_getsecurity); } int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) @@ -1324,17 +1343,17 @@ int security_inode_setsecurity(struct inode *inode, const char *name, const void int rc; if (unlikely(IS_PRIVATE(inode))) - return -EOPNOTSUPP; + return LSM_RET_DEFAULT(inode_setsecurity); /* * Only one module will provide an attribute with a given name. */ hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) { rc = hp->hook.inode_setsecurity(inode, name, value, size, flags); - if (rc != -EOPNOTSUPP) + if (rc != LSM_RET_DEFAULT(inode_setsecurity)) return rc; } - return -EOPNOTSUPP; + return LSM_RET_DEFAULT(inode_setsecurity); } int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) @@ -1708,12 +1727,12 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int thisrc; - int rc = -ENOSYS; + int rc = LSM_RET_DEFAULT(task_prctl); struct security_hook_list *hp; hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) { thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5); - if (thisrc != -ENOSYS) { + if (thisrc != LSM_RET_DEFAULT(task_prctl)) { rc = thisrc; if (thisrc != 0) break; @@ -1885,7 +1904,7 @@ int security_getprocattr(struct task_struct *p, const char *lsm, char *name, continue; return hp->hook.getprocattr(p, name, value); } - return -EINVAL; + return LSM_RET_DEFAULT(getprocattr); } int security_setprocattr(const char *lsm, const char *name, void *value, @@ -1898,7 +1917,7 @@ int security_setprocattr(const char *lsm, const char *name, void *value, continue; return hp->hook.setprocattr(name, value, size); } - return -EINVAL; + return LSM_RET_DEFAULT(setprocattr); } int security_netlink_send(struct sock *sk, struct sk_buff *skb) @@ -2283,7 +2302,7 @@ int security_xfrm_state_pol_flow_match(struct xfrm_state *x, const struct flowi *fl) { struct security_hook_list *hp; - int rc = 1; + int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match); /* * Since this function is expected to return 0 or 1, the judgment diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 39410913a694..717a398ef4d0 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1499,7 +1499,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent * inode_doinit with a dentry, before these inodes could * be used again by userspace. */ - goto out; + goto out_invalid; } rc = inode_doinit_use_xattr(inode, dentry, sbsec->def_sid, @@ -1554,7 +1554,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent * could be used again by userspace. */ if (!dentry) - goto out; + goto out_invalid; rc = selinux_genfs_get_sid(dentry, sclass, sbsec->flags, &sid); if (rc) { @@ -1579,11 +1579,10 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent out: spin_lock(&isec->lock); if (isec->initialized == LABEL_PENDING) { - if (!sid || rc) { + if (rc) { isec->initialized = LABEL_INVALID; goto out_unlock; } - isec->initialized = LABEL_INITIALIZED; isec->sid = sid; } @@ -1591,6 +1590,15 @@ out: out_unlock: spin_unlock(&isec->lock); return rc; + +out_invalid: + spin_lock(&isec->lock); + if (isec->initialized == LABEL_PENDING) { + isec->initialized = LABEL_INVALID; + isec->sid = sid; + } + spin_unlock(&isec->lock); + return 0; } /* Convert a Linux signal to an access vector. */ @@ -3156,6 +3164,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); } + if (!selinux_state.initialized) + return (inode_owner_or_capable(inode) ? 0 : -EPERM); + sbsec = inode->i_sb->s_security; if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; @@ -3239,6 +3250,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name, return; } + if (!selinux_state.initialized) { + /* If we haven't even been initialized, then we can't validate + * against a policy, so leave the label as invalid. It may + * resolve to a valid label on the next revalidation try if + * we've since initialized. + */ + return; + } + rc = security_context_to_sid_force(&selinux_state, value, size, &newsid); if (rc) { @@ -5521,40 +5541,60 @@ static int selinux_tun_dev_open(void *security) static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb) { - int err = 0; - u32 perm; + int rc = 0; + unsigned int msg_len; + unsigned int data_len = skb->len; + unsigned char *data = skb->data; struct nlmsghdr *nlh; struct sk_security_struct *sksec = sk->sk_security; + u16 sclass = sksec->sclass; + u32 perm; - if (skb->len < NLMSG_HDRLEN) { - err = -EINVAL; - goto out; - } - nlh = nlmsg_hdr(skb); + while (data_len >= nlmsg_total_size(0)) { + nlh = (struct nlmsghdr *)data; - err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm); - if (err) { - if (err == -EINVAL) { + /* NOTE: the nlmsg_len field isn't reliably set by some netlink + * users which means we can't reject skb's with bogus + * length fields; our solution is to follow what + * netlink_rcv_skb() does and simply skip processing at + * messages with length fields that are clearly junk + */ + if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len) + return 0; + + rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm); + if (rc == 0) { + rc = sock_has_perm(sk, perm); + if (rc) + return rc; + } else if (rc == -EINVAL) { + /* -EINVAL is a missing msg/perm mapping */ pr_warn_ratelimited("SELinux: unrecognized netlink" - " message: protocol=%hu nlmsg_type=%hu sclass=%s" - " pig=%d comm=%s\n", - sk->sk_protocol, nlh->nlmsg_type, - secclass_map[sksec->sclass - 1].name, - task_pid_nr(current), current->comm); - if (!enforcing_enabled(&selinux_state) || - security_get_allow_unknown(&selinux_state)) - err = 0; + " message: protocol=%hu nlmsg_type=%hu sclass=%s" + " pid=%d comm=%s\n", + sk->sk_protocol, nlh->nlmsg_type, + secclass_map[sclass - 1].name, + task_pid_nr(current), current->comm); + if (enforcing_enabled(&selinux_state) && + !security_get_allow_unknown(&selinux_state)) + return rc; + rc = 0; + } else if (rc == -ENOENT) { + /* -ENOENT is a missing socket/class mapping, ignore */ + rc = 0; + } else { + return rc; } - /* Ignore */ - if (err == -ENOENT) - err = 0; - goto out; + /* move to the next message after applying netlink padding */ + msg_len = NLMSG_ALIGN(nlh->nlmsg_len); + if (msg_len >= data_len) + return 0; + data_len -= msg_len; + data += msg_len; } - err = sock_has_perm(sk, perm); -out: - return err; + return rc; } #ifdef CONFIG_NETFILTER diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c index de92365e4324..5887bff50560 100644 --- a/security/selinux/ibpkey.c +++ b/security/selinux/ibpkey.c @@ -151,8 +151,10 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid) * is valid, it just won't be added to the cache. */ new = kzalloc(sizeof(*new), GFP_ATOMIC); - if (!new) + if (!new) { + ret = -ENOMEM; goto out; + } new->psec.subnet_prefix = subnet_prefix; new->psec.pkey = pkey_num; diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 32e9b03be3dd..576586370199 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -27,9 +27,9 @@ "audit_control", "setfcap" #define COMMON_CAP2_PERMS "mac_override", "mac_admin", "syslog", \ - "wake_alarm", "block_suspend", "audit_read" + "wake_alarm", "block_suspend", "audit_read", "perfmon", "bpf" -#if CAP_LAST_CAP > CAP_AUDIT_READ +#if CAP_LAST_CAP > CAP_BPF #error New capability defined, please update COMMON_CAP2_PERMS. #endif diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index e6c7643c3fc0..e9eaff90cbcc 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1508,6 +1508,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) *idx = cpu + 1; return &per_cpu(avc_cache_stats, cpu); } + (*idx)++; return NULL; } diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 1260f5fb766e..dd7aabd94a92 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2496,6 +2496,7 @@ int policydb_read(struct policydb *p, void *fp) if (rc) goto bad; + rc = -ENOMEM; p->type_attr_map_array = kvcalloc(p->p_types.nprim, sizeof(*p->type_attr_map_array), GFP_KERNEL); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index a5813c7629c1..f62adf3cfce8 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2844,8 +2844,12 @@ err: if (*names) { for (i = 0; i < *len; i++) kfree((*names)[i]); + kfree(*names); } kfree(*values); + *len = 0; + *names = NULL; + *values = NULL; goto out; } diff --git a/security/smack/smack.h b/security/smack/smack.h index 62529f382942..335d2411abe4 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -148,7 +148,6 @@ struct smk_net4addr { struct smack_known *smk_label; /* label */ }; -#if IS_ENABLED(CONFIG_IPV6) /* * An entry in the table identifying IPv6 hosts. */ @@ -159,9 +158,7 @@ struct smk_net6addr { int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; -#endif /* CONFIG_IPV6 */ -#ifdef SMACK_IPV6_PORT_LABELING /* * An entry in the table identifying ports. */ @@ -174,7 +171,6 @@ struct smk_port_label { short smk_sock_type; /* Socket type */ short smk_can_reuse; }; -#endif /* SMACK_IPV6_PORT_LABELING */ struct smack_known_list_elem { struct list_head list; @@ -335,9 +331,7 @@ extern struct smack_known smack_known_web; extern struct mutex smack_known_lock; extern struct list_head smack_known_list; extern struct list_head smk_net4addr_list; -#if IS_ENABLED(CONFIG_IPV6) extern struct list_head smk_net6addr_list; -#endif /* CONFIG_IPV6 */ extern struct mutex smack_onlycap_lock; extern struct list_head smack_onlycap_list; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index ad22066eba04..12c0fa85d9f8 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -51,10 +51,8 @@ #define SMK_RECEIVING 1 #define SMK_SENDING 2 -#ifdef SMACK_IPV6_PORT_LABELING -DEFINE_MUTEX(smack_ipv6_lock); +static DEFINE_MUTEX(smack_ipv6_lock); static LIST_HEAD(smk_ipv6_port_list); -#endif static struct kmem_cache *smack_inode_cache; struct kmem_cache *smack_rule_cache; int smack_enabled; @@ -2326,7 +2324,6 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) return NULL; } -#if IS_ENABLED(CONFIG_IPV6) /* * smk_ipv6_localhost - Check for local ipv6 host address * @sip: the address @@ -2394,7 +2391,6 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) return NULL; } -#endif /* CONFIG_IPV6 */ /** * smack_netlabel - Set the secattr on a socket @@ -2483,7 +2479,6 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) return smack_netlabel(sk, sk_lbl); } -#if IS_ENABLED(CONFIG_IPV6) /** * smk_ipv6_check - check Smack access * @subject: subject Smack label @@ -2516,7 +2511,6 @@ static int smk_ipv6_check(struct smack_known *subject, rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); return rc; } -#endif /* CONFIG_IPV6 */ #ifdef SMACK_IPV6_PORT_LABELING /** @@ -2605,6 +2599,7 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) mutex_unlock(&smack_ipv6_lock); return; } +#endif /** * smk_ipv6_port_check - check Smack port access @@ -2667,7 +2662,6 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, return smk_ipv6_check(skp, object, address, act); } -#endif /* SMACK_IPV6_PORT_LABELING */ /** * smack_inode_setsecurity - set smack xattrs @@ -2842,24 +2836,21 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return 0; if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; -#ifdef SMACK_IPV6_SECMARK_LABELING - struct smack_known *rsp; -#endif + struct smack_known *rsp = NULL; if (addrlen < SIN6_LEN_RFC2133) return 0; -#ifdef SMACK_IPV6_SECMARK_LABELING - rsp = smack_ipv6host_label(sip); + if (__is_defined(SMACK_IPV6_SECMARK_LABELING)) + rsp = smack_ipv6host_label(sip); if (rsp != NULL) { struct socket_smack *ssp = sock->sk->sk_security; rc = smk_ipv6_check(ssp->smk_out, rsp, sip, SMK_CONNECTING); } -#endif -#ifdef SMACK_IPV6_PORT_LABELING - rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); -#endif + if (__is_defined(SMACK_IPV6_PORT_LABELING)) + rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); + return rc; } if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index e3e05c04dbd1..5e75ff2e1b14 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -878,11 +878,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, else rule += strlen(skp->smk_known) + 1; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } + ret = sscanf(rule, "%d", &maplevel); - if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) + if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; rule += SMK_DIGITLEN; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } + ret = sscanf(rule, "%d", &catlen); if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) goto out; @@ -895,6 +905,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, for (i = 0; i < catlen; i++) { rule += SMK_DIGITLEN; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } ret = sscanf(rule, "%u", &cat); if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM) goto out; @@ -1149,7 +1163,7 @@ static ssize_t smk_write_net4addr(struct file *file, const char __user *buf, return -EPERM; if (*ppos != 0) return -EINVAL; - if (count < SMK_NETLBLADDRMIN) + if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1) return -EINVAL; data = memdup_user_nul(buf, count); @@ -1409,7 +1423,7 @@ static ssize_t smk_write_net6addr(struct file *file, const char __user *buf, return -EPERM; if (*ppos != 0) return -EINVAL; - if (count < SMK_NETLBLADDRMIN) + if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1) return -EINVAL; data = memdup_user_nul(buf, count); @@ -1816,6 +1830,10 @@ static ssize_t smk_write_ambient(struct file *file, const char __user *buf, if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; + /* Enough data must be present */ + if (count == 0 || count > PAGE_SIZE) + return -EINVAL; + data = memdup_user_nul(buf, count); if (IS_ERR(data)) return PTR_ERR(data); @@ -1987,6 +2005,9 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; + if (count > PAGE_SIZE) + return -EINVAL; + data = memdup_user_nul(buf, count); if (IS_ERR(data)) return PTR_ERR(data); @@ -2074,6 +2095,9 @@ static ssize_t smk_write_unconfined(struct file *file, const char __user *buf, if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; + if (count > PAGE_SIZE) + return -EINVAL; + data = memdup_user_nul(buf, count); if (IS_ERR(data)) return PTR_ERR(data); @@ -2629,6 +2653,10 @@ static ssize_t smk_write_syslog(struct file *file, const char __user *buf, if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; + /* Enough data must be present */ + if (count == 0 || count > PAGE_SIZE) + return -EINVAL; + data = memdup_user_nul(buf, count); if (IS_ERR(data)) return PTR_ERR(data); @@ -2710,7 +2738,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file) static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct task_smack *tsp = smack_cred(current_cred()); char *data; int rc; LIST_HEAD(list_tmp); @@ -2722,10 +2749,13 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, return -EPERM; /* + * No partial write. * Enough data must be present. */ if (*ppos != 0) return -EINVAL; + if (count == 0 || count > PAGE_SIZE) + return -EINVAL; data = memdup_user_nul(buf, count); if (IS_ERR(data)) @@ -2735,11 +2765,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, kfree(data); if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) { + struct cred *new; + struct task_smack *tsp; + + new = prepare_creds(); + if (!new) { + rc = -ENOMEM; + goto out; + } + tsp = smack_cred(new); smk_destroy_label_list(&tsp->smk_relabel); list_splice(&list_tmp, &tsp->smk_relabel); + commit_creds(new); return count; } - +out: smk_destroy_label_list(&list_tmp); return rc; } diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index f34ce564d92c..1afa06b80f06 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -722,6 +722,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream) retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); if (!retval) { + /* clear flags and stop any drain wait */ + stream->partial_drain = false; + stream->metadata_set = false; snd_compr_drain_notify(stream); stream->runtime->total_bytes_available = 0; stream->runtime->total_bytes_transferred = 0; @@ -879,6 +882,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream) if (stream->next_track == false) return -EPERM; + stream->partial_drain = true; retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); if (retval) { pr_debug("Partial drain returned failure\n"); diff --git a/sound/core/control.c b/sound/core/control.c index 08ca7666e84c..15efa4b6ea94 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1350,7 +1350,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, unlock: up_write(&card->controls_rwsem); - return 0; + return err; } static int snd_ctl_elem_add_user(struct snd_ctl_file *file, diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 00cb5aed10a9..28bec15b0959 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -216,12 +216,12 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw, if (info.index >= 32) return -EINVAL; /* check whether the dsp was already loaded */ - if (hw->dsp_loaded & (1 << info.index)) + if (hw->dsp_loaded & (1u << info.index)) return -EBUSY; err = hw->ops.dsp_load(hw, &info); if (err < 0) return err; - hw->dsp_loaded |= (1 << info.index); + hw->dsp_loaded |= (1u << info.index); return 0; } diff --git a/sound/core/info.c b/sound/core/info.c index e051a029ccfb..f18f4ef6661e 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -608,7 +608,9 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) { int c = -1; - if (snd_BUG_ON(!buffer || !buffer->buffer)) + if (snd_BUG_ON(!buffer)) + return 1; + if (!buffer->buffer) return 1; if (len <= 0 || buffer->stop || buffer->error) return 1; diff --git a/sound/core/init.c b/sound/core/init.c index db99b7fad6ad..45bbc4884ef0 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -384,10 +384,8 @@ int snd_card_disconnect(struct snd_card *card) return 0; } card->shutdown = 1; - spin_unlock(&card->files_lock); /* replace file->f_op with special dummy operations */ - spin_lock(&card->files_lock); list_for_each_entry(mfile, &card->files_list, list) { /* it's critical part, use endless loop */ /* we have no room to fail */ diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 6850d13aa98c..fe1ea03582cb 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -76,7 +76,8 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) /* Assign the pool into private_data field */ dmab->private_data = pool; - dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr); + dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, + PAGE_SIZE); } /** diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c index 3788906421a7..fe27034f2846 100644 --- a/sound/core/oss/mulaw.c +++ b/sound/core/oss/mulaw.c @@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug, snd_BUG(); return -EINVAL; } - if (snd_BUG_ON(!snd_pcm_format_linear(format->format))) - return -ENXIO; + if (!snd_pcm_format_linear(format->format)) + return -EINVAL; err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion", src_format, dst_format, diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index f57c610d7523..0b03777d0111 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -693,6 +693,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, oss_buffer_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; + if (!oss_buffer_size) + return -EINVAL; oss_buffer_size = rounddown_pow_of_two(oss_buffer_size); if (atomic_read(&substream->mmap_count)) { if (oss_buffer_size > runtime->oss.mmap_bytes) @@ -728,17 +730,21 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, min_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); - min_period_size *= oss_frame_size; - min_period_size = roundup_pow_of_two(min_period_size); - if (oss_period_size < min_period_size) - oss_period_size = min_period_size; + if (min_period_size) { + min_period_size *= oss_frame_size; + min_period_size = roundup_pow_of_two(min_period_size); + if (oss_period_size < min_period_size) + oss_period_size = min_period_size; + } max_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); - max_period_size *= oss_frame_size; - max_period_size = rounddown_pow_of_two(max_period_size); - if (oss_period_size > max_period_size) - oss_period_size = max_period_size; + if (max_period_size) { + max_period_size *= oss_frame_size; + max_period_size = rounddown_pow_of_two(max_period_size); + if (oss_period_size > max_period_size) + oss_period_size = max_period_size; + } oss_periods = oss_buffer_size / oss_period_size; @@ -1934,11 +1940,15 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val) { struct snd_pcm_runtime *runtime; + int fragshift; runtime = substream->runtime; if (runtime->oss.subdivision || runtime->oss.fragshift) return -EINVAL; - runtime->oss.fragshift = val & 0xffff; + fragshift = val & 0xffff; + if (fragshift >= 31) + return -EINVAL; + runtime->oss.fragshift = fragshift; runtime->oss.maxfrags = (val >> 16) & 0xffff; if (runtime->oss.fragshift < 4) /* < 16 */ runtime->oss.fragshift = 4; diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index 732bbede7ebf..da400da1fafe 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c @@ -196,7 +196,9 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin) return 0; } -snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames) +static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t drv_frames, + bool check_size) { struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; int stream; @@ -209,21 +211,23 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p if (stream == SNDRV_PCM_STREAM_PLAYBACK) { plugin = snd_pcm_plug_last(plug); while (plugin && drv_frames > 0) { - if (drv_frames > plugin->buf_frames) - drv_frames = plugin->buf_frames; plugin_prev = plugin->prev; if (plugin->src_frames) drv_frames = plugin->src_frames(plugin, drv_frames); + if (check_size && plugin->buf_frames && + drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; plugin = plugin_prev; } } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { plugin = snd_pcm_plug_first(plug); while (plugin && drv_frames > 0) { plugin_next = plugin->next; + if (check_size && plugin->buf_frames && + drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; if (plugin->dst_frames) drv_frames = plugin->dst_frames(plugin, drv_frames); - if (drv_frames > plugin->buf_frames) - drv_frames = plugin->buf_frames; plugin = plugin_next; } } else @@ -231,7 +235,9 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p return drv_frames; } -snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames) +static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t clt_frames, + bool check_size) { struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; snd_pcm_sframes_t frames; @@ -247,26 +253,28 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc plugin = snd_pcm_plug_first(plug); while (plugin && frames > 0) { plugin_next = plugin->next; + if (check_size && plugin->buf_frames && + frames > plugin->buf_frames) + frames = plugin->buf_frames; if (plugin->dst_frames) { frames = plugin->dst_frames(plugin, frames); if (frames < 0) return frames; } - if (frames > plugin->buf_frames) - frames = plugin->buf_frames; plugin = plugin_next; } } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { plugin = snd_pcm_plug_last(plug); while (plugin) { - if (frames > plugin->buf_frames) - frames = plugin->buf_frames; plugin_prev = plugin->prev; if (plugin->src_frames) { frames = plugin->src_frames(plugin, frames); if (frames < 0) return frames; } + if (check_size && plugin->buf_frames && + frames > plugin->buf_frames) + frames = plugin->buf_frames; plugin = plugin_prev; } } else @@ -274,6 +282,18 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc return frames; } +snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t drv_frames) +{ + return plug_client_size(plug, drv_frames, false); +} + +snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t clt_frames) +{ + return plug_slave_size(plug, clt_frames, false); +} + static int snd_pcm_plug_formats(const struct snd_mask *mask, snd_pcm_format_t format) { @@ -630,7 +650,7 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st src_channels = dst_channels; plugin = next; } - return snd_pcm_plug_client_size(plug, frames); + return plug_client_size(plug, frames, true); } snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size) @@ -640,7 +660,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str snd_pcm_sframes_t frames = size; int err; - frames = snd_pcm_plug_slave_size(plug, frames); + frames = plug_slave_size(plug, frames, true); if (frames < 0) return frames; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 2236b5e0c1f2..1662573a4030 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -423,6 +423,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) { + runtime->hw_ptr_jiffies = curr_jiffies; update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return 0; } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 5c74ea2bb44b..0c5b7a54ca81 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -136,6 +136,16 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) } EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); +static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream) +{ + struct snd_pcm_group *group = &substream->self_group; + + if (substream->pcm->nonatomic) + mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING); + else + spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); +} + /** * snd_pcm_stream_unlock_irq - Unlock the PCM stream * @substream: PCM substream @@ -707,8 +717,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, runtime->boundary *= 2; /* clear the buffer for avoiding possible kernel info leaks */ - if (runtime->dma_area && !substream->ops->copy_user) - memset(runtime->dma_area, 0, runtime->dma_bytes); + if (runtime->dma_area && !substream->ops->copy_user) { + size_t size = runtime->dma_bytes; + + if (runtime->info & SNDRV_PCM_INFO_MMAP) + size = PAGE_ALIGN(size); + memset(runtime->dma_area, 0, size); + } snd_pcm_timer_resolution_change(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP); @@ -1994,6 +2009,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) } pcm_file = f.file->private_data; substream1 = pcm_file->substream; + + if (substream == substream1) { + res = -EINVAL; + goto _badf; + } + group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) { res = -ENOMEM; @@ -2022,7 +2043,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) snd_pcm_stream_unlock_irq(substream); snd_pcm_group_lock_irq(target_group, nonatomic); - snd_pcm_stream_lock(substream1); + snd_pcm_stream_lock_nested(substream1); snd_pcm_group_assign(substream1, target_group); refcount_inc(&target_group->refs); snd_pcm_stream_unlock(substream1); @@ -2038,7 +2059,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) static void relink_to_local(struct snd_pcm_substream *substream) { - snd_pcm_stream_lock(substream); + snd_pcm_stream_lock_nested(substream); snd_pcm_group_assign(substream, &substream->self_group); snd_pcm_stream_unlock(substream); } diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 8a12a7538d63..6a3543b8455f 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -72,11 +72,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file) } } -static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream) +static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime) +{ + return runtime->avail >= runtime->avail_min; +} + +static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream) { struct snd_rawmidi_runtime *runtime = substream->runtime; + unsigned long flags; + bool ready; - return runtime->avail >= runtime->avail_min; + spin_lock_irqsave(&runtime->lock, flags); + ready = __snd_rawmidi_ready(runtime); + spin_unlock_irqrestore(&runtime->lock, flags); + return ready; } static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream, @@ -97,6 +107,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work) runtime->event(runtime->substream); } +/* buffer refcount management: call with runtime->lock held */ +static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime) +{ + runtime->buffer_ref++; +} + +static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime) +{ + runtime->buffer_ref--; +} + static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) { struct snd_rawmidi_runtime *runtime; @@ -646,6 +667,11 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime, if (!newbuf) return -ENOMEM; spin_lock_irq(&runtime->lock); + if (runtime->buffer_ref) { + spin_unlock_irq(&runtime->lock); + kvfree(newbuf); + return -EBUSY; + } oldbuf = runtime->buffer; runtime->buffer = newbuf; runtime->buffer_size = params->buffer_size; @@ -929,7 +955,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream, if (result > 0) { if (runtime->event) schedule_work(&runtime->event_work); - else if (snd_rawmidi_ready(substream)) + else if (__snd_rawmidi_ready(runtime)) wake_up(&runtime->sleep); } spin_unlock_irqrestore(&runtime->lock, flags); @@ -945,8 +971,10 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, long result = 0, count1; struct snd_rawmidi_runtime *runtime = substream->runtime; unsigned long appl_ptr; + int err = 0; spin_lock_irqsave(&runtime->lock, flags); + snd_rawmidi_buffer_ref(runtime); while (count > 0 && runtime->avail) { count1 = runtime->buffer_size - runtime->appl_ptr; if (count1 > count) @@ -965,16 +993,19 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, if (userbuf) { spin_unlock_irqrestore(&runtime->lock, flags); if (copy_to_user(userbuf + result, - runtime->buffer + appl_ptr, count1)) { - return result > 0 ? result : -EFAULT; - } + runtime->buffer + appl_ptr, count1)) + err = -EFAULT; spin_lock_irqsave(&runtime->lock, flags); + if (err) + goto out; } result += count1; count -= count1; } + out: + snd_rawmidi_buffer_unref(runtime); spin_unlock_irqrestore(&runtime->lock, flags); - return result; + return result > 0 ? result : err; } long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream, @@ -1003,7 +1034,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun result = 0; while (count > 0) { spin_lock_irq(&runtime->lock); - while (!snd_rawmidi_ready(substream)) { + while (!__snd_rawmidi_ready(runtime)) { wait_queue_entry_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { @@ -1020,9 +1051,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun return -ENODEV; if (signal_pending(current)) return result > 0 ? result : -ERESTARTSYS; - if (!runtime->avail) - return result > 0 ? result : -EIO; spin_lock_irq(&runtime->lock); + if (!runtime->avail) { + spin_unlock_irq(&runtime->lock); + return result > 0 ? result : -EIO; + } } spin_unlock_irq(&runtime->lock); count1 = snd_rawmidi_kernel_read1(substream, @@ -1160,7 +1193,7 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun runtime->avail += count; substream->bytes += count; if (count > 0) { - if (runtime->drain || snd_rawmidi_ready(substream)) + if (runtime->drain || __snd_rawmidi_ready(runtime)) wake_up(&runtime->sleep); } return count; @@ -1268,6 +1301,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, return -EAGAIN; } } + snd_rawmidi_buffer_ref(runtime); while (count > 0 && runtime->avail > 0) { count1 = runtime->buffer_size - runtime->appl_ptr; if (count1 > count) @@ -1299,6 +1333,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, } __end: count1 = runtime->avail < runtime->buffer_size; + snd_rawmidi_buffer_unref(runtime); spin_unlock_irqrestore(&runtime->lock, flags); if (count1) snd_rawmidi_output_trigger(substream, 1); @@ -1347,9 +1382,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, return -ENODEV; if (signal_pending(current)) return result > 0 ? result : -ERESTARTSYS; - if (!runtime->avail && !timeout) - return result > 0 ? result : -EIO; spin_lock_irq(&runtime->lock); + if (!runtime->avail && !timeout) { + spin_unlock_irq(&runtime->lock); + return result > 0 ? result : -EIO; + } } spin_unlock_irq(&runtime->lock); count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count); @@ -1429,6 +1466,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry, struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *substream; struct snd_rawmidi_runtime *runtime; + unsigned long buffer_size, avail, xruns; rmidi = entry->private_data; snd_iprintf(buffer, "%s\n\n", rmidi->name); @@ -1447,13 +1485,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry, " Owner PID : %d\n", pid_vnr(substream->pid)); runtime = substream->runtime; + spin_lock_irq(&runtime->lock); + buffer_size = runtime->buffer_size; + avail = runtime->avail; + spin_unlock_irq(&runtime->lock); snd_iprintf(buffer, " Mode : %s\n" " Buffer size : %lu\n" " Avail : %lu\n", runtime->oss ? "OSS compatible" : "native", - (unsigned long) runtime->buffer_size, - (unsigned long) runtime->avail); + buffer_size, avail); } } } @@ -1471,13 +1512,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry, " Owner PID : %d\n", pid_vnr(substream->pid)); runtime = substream->runtime; + spin_lock_irq(&runtime->lock); + buffer_size = runtime->buffer_size; + avail = runtime->avail; + xruns = runtime->xruns; + spin_unlock_irq(&runtime->lock); snd_iprintf(buffer, " Buffer size : %lu\n" " Avail : %lu\n" " Overruns : %lu\n", - (unsigned long) runtime->buffer_size, - (unsigned long) runtime->avail, - (unsigned long) runtime->xruns); + buffer_size, avail, xruns); } } } diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c index 17f913657304..250a92b18726 100644 --- a/sound/core/seq/oss/seq_oss.c +++ b/sound/core/seq/oss/seq_oss.c @@ -168,10 +168,19 @@ static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct seq_oss_devinfo *dp; + long rc; + dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; - return snd_seq_oss_ioctl(dp, cmd, arg); + + if (cmd != SNDCTL_SEQ_SYNC && + mutex_lock_interruptible(®ister_mutex)) + return -ERESTARTSYS; + rc = snd_seq_oss_ioctl(dp, cmd, arg); + if (cmd != SNDCTL_SEQ_SYNC) + mutex_unlock(®ister_mutex); + return rc; } #ifdef CONFIG_COMPAT diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c index 11554d0412f0..1b8409ec2c97 100644 --- a/sound/core/seq/oss/seq_oss_synth.c +++ b/sound/core/seq/oss/seq_oss_synth.c @@ -611,7 +611,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in if (info->is_midi) { struct midi_info minf; - snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf); + if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf)) + return -ENXIO; inf->synth_type = SYNTH_TYPE_MIDI; inf->synth_subtype = 0; inf->nr_voices = 16; diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h index 9254c8dbe5e3..25d2d6b61007 100644 --- a/sound/core/seq/seq_queue.h +++ b/sound/core/seq/seq_queue.h @@ -26,10 +26,10 @@ struct snd_seq_queue { struct snd_seq_timer *timer; /* time keeper for this queue */ int owner; /* client that 'owns' the timer */ - unsigned int locked:1, /* timer is only accesibble by owner if set */ - klocked:1, /* kernel lock (after START) */ - check_again:1, - check_blocked:1; + bool locked; /* timer is only accesibble by owner if set */ + bool klocked; /* kernel lock (after START) */ + bool check_again; /* concurrent access happened during check */ + bool check_blocked; /* queue being checked */ unsigned int flags; /* status flags */ unsigned int info_flags; /* info for sync */ diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 9ccdad89c288..452b9eaca815 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -1035,6 +1035,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) return -ENOMEM; kctl->id.device = dev; kctl->id.subdevice = substr; + + /* Add the control before copying the id so that + * the numid field of the id is set in the copy. + */ + err = snd_ctl_add(card, kctl); + if (err < 0) + return err; + switch (idx) { case ACTIVE_IDX: setup->active_id = kctl->id; @@ -1051,9 +1059,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) default: break; } - err = snd_ctl_add(card, kctl); - if (err < 0) - return err; } } } diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c index e69a4ef0d6bd..08c10ac9d6c8 100644 --- a/sound/drivers/opl3/opl3_synth.c +++ b/sound/drivers/opl3/opl3_synth.c @@ -91,6 +91,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file, { struct snd_dm_fm_info info; + memset(&info, 0, sizeof(info)); + info.fm_mode = opl3->fm_mode; info.rhythm = opl3->rhythm; if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info))) diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c index 67d735e9a6a4..fea92e148790 100644 --- a/sound/firewire/amdtp-am824.c +++ b/sound/firewire/amdtp-am824.c @@ -82,7 +82,8 @@ int amdtp_am824_set_parameters(struct amdtp_stream *s, unsigned int rate, if (err < 0) return err; - s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc; + if (s->direction == AMDTP_OUT_STREAM) + s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc; p->pcm_channels = pcm_channels; p->midi_ports = midi_ports; diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h index 16c7f6605511..26e7cb555d3c 100644 --- a/sound/firewire/amdtp-stream-trace.h +++ b/sound/firewire/amdtp-stream-trace.h @@ -66,8 +66,7 @@ TRACE_EVENT(amdtp_packet, __entry->irq, __entry->index, __print_array(__get_dynamic_array(cip_header), - __get_dynamic_array_len(cip_header), - sizeof(u8))) + __get_dynamic_array_len(cip_header), 1)) ); #endif diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c index 45b740f44c45..c362eb38ab90 100644 --- a/sound/firewire/bebob/bebob_hwdep.c +++ b/sound/firewire/bebob/bebob_hwdep.c @@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, } memset(&event, 0, sizeof(event)); + count = min_t(long, count, sizeof(event.lock_status)); if (bebob->dev_lock_changed) { event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event.lock_status.status = (bebob->dev_lock_count > 0); bebob->dev_lock_changed = false; - - count = min_t(long, count, sizeof(event.lock_status)); } spin_unlock_irq(&bebob->lock); diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c index f6a8627ae5a2..0aa3c56cf390 100644 --- a/sound/firewire/dice/dice-stream.c +++ b/sound/firewire/dice/dice-stream.c @@ -489,11 +489,10 @@ void snd_dice_stream_stop_duplex(struct snd_dice *dice) struct reg_params tx_params, rx_params; if (dice->substreams_counter == 0) { - if (get_register_params(dice, &tx_params, &rx_params) >= 0) { - amdtp_domain_stop(&dice->domain); + if (get_register_params(dice, &tx_params, &rx_params) >= 0) finish_session(dice, &tx_params, &rx_params); - } + amdtp_domain_stop(&dice->domain); release_resources(dice); } } diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index 1f5fc0e7c024..0e4b0eac3015 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -14,6 +14,7 @@ MODULE_LICENSE("GPL v2"); #define VENDOR_DIGIDESIGN 0x00a07e #define MODEL_CONSOLE 0x000001 #define MODEL_RACK 0x000002 +#define SPEC_VERSION 0x000001 static int name_card(struct snd_dg00x *dg00x) { @@ -175,14 +176,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { /* Both of 002/003 use the same ID. */ { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_CONSOLE, }, { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_RACK, }, {} diff --git a/sound/firewire/fireface/ff-protocol-latter.c b/sound/firewire/fireface/ff-protocol-latter.c index 0e4c3a9ed5e4..76ae568489ef 100644 --- a/sound/firewire/fireface/ff-protocol-latter.c +++ b/sound/firewire/fireface/ff-protocol-latter.c @@ -107,18 +107,18 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate) int err; // Set the number of data blocks transferred in a second. - if (rate % 32000 == 0) - code = 0x00; + if (rate % 48000 == 0) + code = 0x04; else if (rate % 44100 == 0) code = 0x02; - else if (rate % 48000 == 0) - code = 0x04; + else if (rate % 32000 == 0) + code = 0x00; else return -EINVAL; if (rate >= 64000 && rate < 128000) code |= 0x08; - else if (rate >= 128000 && rate < 192000) + else if (rate >= 128000) code |= 0x10; reg = cpu_to_le32(code); @@ -140,7 +140,7 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate) if (curr_rate == rate) break; } - if (count == 10) + if (count > 10) return -ETIMEDOUT; for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); ++i) { diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c index 7f82762ccc8c..ee7122c461d4 100644 --- a/sound/firewire/fireface/ff-transaction.c +++ b/sound/firewire/fireface/ff-transaction.c @@ -88,7 +88,7 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port) /* Set interval to next transaction. */ ff->next_ktime[port] = ktime_add_ns(ktime_get(), - ff->rx_bytes[port] * 8 * NSEC_PER_SEC / 31250); + ff->rx_bytes[port] * 8 * (NSEC_PER_SEC / 31250)); if (quad_count == 1) tcode = TCODE_WRITE_QUADLET_REQUEST; diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c index 0f533f5bd960..9f8c53b39f95 100644 --- a/sound/firewire/fireworks/fireworks_transaction.c +++ b/sound/firewire/fireworks/fireworks_transaction.c @@ -123,7 +123,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode) t = (struct snd_efw_transaction *)data; length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); - spin_lock_irq(&efw->lock); + spin_lock(&efw->lock); if (efw->push_ptr < efw->pull_ptr) capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); @@ -190,7 +190,7 @@ handle_resp_for_user(struct fw_card *card, int generation, int source, copy_resp_to_buf(efw, data, length, rcode); end: - spin_unlock_irq(&instances_lock); + spin_unlock(&instances_lock); } static void diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c index 90288b4b4637..a073cece4a7d 100644 --- a/sound/firewire/tascam/tascam-transaction.c +++ b/sound/firewire/tascam/tascam-transaction.c @@ -209,7 +209,7 @@ static void midi_port_work(struct work_struct *work) /* Set interval to next transaction. */ port->next_ktime = ktime_add_ns(ktime_get(), - port->consume_bytes * 8 * NSEC_PER_SEC / 31250); + port->consume_bytes * 8 * (NSEC_PER_SEC / 31250)); /* Start this transaction. */ port->idling = false; diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index addc464503bc..0175e3e835ea 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -39,9 +39,6 @@ static const struct snd_tscm_spec model_specs[] = { .midi_capture_ports = 2, .midi_playback_ports = 4, }, - // This kernel module doesn't support FE-8 because the most of features - // can be implemented in userspace without any specific support of this - // module. }; static int identify_model(struct snd_tscm *tscm) @@ -211,11 +208,39 @@ static void snd_tscm_remove(struct fw_unit *unit) } static const struct ieee1394_device_id snd_tscm_id_table[] = { + // Tascam, FW-1884. { .match_flags = IEEE1394_MATCH_VENDOR_ID | - IEEE1394_MATCH_SPECIFIER_ID, + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, .vendor_id = 0x00022e, .specifier_id = 0x00022e, + .version = 0x800000, + }, + // Tascam, FE-8 (.version = 0x800001) + // This kernel module doesn't support FE-8 because the most of features + // can be implemented in userspace without any specific support of this + // module. + // + // .version = 0x800002 is unknown. + // + // Tascam, FW-1082. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800003, + }, + // Tascam, FW-1804. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800004, }, {} }; diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c index 09ff209df4a3..c87187f63573 100644 --- a/sound/hda/ext/hdac_ext_controller.c +++ b/sound/hda/ext/hdac_ext_controller.c @@ -148,6 +148,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus, return NULL; if (bus->idx != bus_idx) return NULL; + if (addr < 0 || addr > 31) + return NULL; list_for_each_entry(hlink, &bus->hlink_list, list) { for (i = 0; i < HDA_MAX_CODECS; i++) { diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c index 8f19876244eb..53be2cac98e7 100644 --- a/sound/hda/hdac_bus.c +++ b/sound/hda/hdac_bus.c @@ -158,6 +158,7 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work) struct hdac_driver *drv; unsigned int rp, caddr, res; + spin_lock_irq(&bus->reg_lock); while (bus->unsol_rp != bus->unsol_wp) { rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; bus->unsol_rp = rp; @@ -169,10 +170,13 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work) codec = bus->caddr_tbl[caddr & 0x0f]; if (!codec || !codec->dev.driver) continue; + spin_unlock_irq(&bus->reg_lock); drv = drv_to_hdac_driver(codec->dev.driver); if (drv->unsol_event) drv->unsol_event(codec, res); + spin_lock_irq(&bus->reg_lock); } + spin_unlock_irq(&bus->reg_lock); } /** diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index 9f3e37511408..b84e12f4f804 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -57,6 +57,7 @@ int snd_hdac_device_init(struct hdac_device *codec, struct hdac_bus *bus, codec->addr = addr; codec->type = HDA_DEV_CORE; mutex_init(&codec->widget_lock); + mutex_init(&codec->regmap_lock); pm_runtime_set_active(&codec->dev); pm_runtime_get_noresume(&codec->dev); atomic_set(&codec->in_pm, 0); @@ -126,6 +127,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init); void snd_hdac_device_exit(struct hdac_device *codec) { pm_runtime_put_noidle(&codec->dev); + /* keep balance of runtime PM child_count in parent device */ + pm_runtime_set_suspended(&codec->dev); snd_hdac_bus_remove_device(codec->bus, codec); kfree(codec->vendor_name); kfree(codec->chip_name); diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index 286361ecd640..49780399c284 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -425,12 +425,29 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_add_vendor_verb); static int reg_raw_write(struct hdac_device *codec, unsigned int reg, unsigned int val) { + int err; + + mutex_lock(&codec->regmap_lock); if (!codec->regmap) - return hda_reg_write(codec, reg, val); + err = hda_reg_write(codec, reg, val); else - return regmap_write(codec->regmap, reg, val); + err = regmap_write(codec->regmap, reg, val); + mutex_unlock(&codec->regmap_lock); + return err; } +/* a helper macro to call @func_call; retry with power-up if failed */ +#define CALL_RAW_FUNC(codec, func_call) \ + ({ \ + int _err = func_call; \ + if (_err == -EAGAIN) { \ + _err = snd_hdac_power_up_pm(codec); \ + if (_err >= 0) \ + _err = func_call; \ + snd_hdac_power_down_pm(codec); \ + } \ + _err;}) + /** * snd_hdac_regmap_write_raw - write a pseudo register with power mgmt * @codec: the codec object @@ -442,42 +459,29 @@ static int reg_raw_write(struct hdac_device *codec, unsigned int reg, int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, unsigned int val) { - int err; - - err = reg_raw_write(codec, reg, val); - if (err == -EAGAIN) { - err = snd_hdac_power_up_pm(codec); - if (err >= 0) - err = reg_raw_write(codec, reg, val); - snd_hdac_power_down_pm(codec); - } - return err; + return CALL_RAW_FUNC(codec, reg_raw_write(codec, reg, val)); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw); static int reg_raw_read(struct hdac_device *codec, unsigned int reg, unsigned int *val, bool uncached) { + int err; + + mutex_lock(&codec->regmap_lock); if (uncached || !codec->regmap) - return hda_reg_read(codec, reg, val); + err = hda_reg_read(codec, reg, val); else - return regmap_read(codec->regmap, reg, val); + err = regmap_read(codec->regmap, reg, val); + mutex_unlock(&codec->regmap_lock); + return err; } static int __snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, unsigned int *val, bool uncached) { - int err; - - err = reg_raw_read(codec, reg, val, uncached); - if (err == -EAGAIN) { - err = snd_hdac_power_up_pm(codec); - if (err >= 0) - err = reg_raw_read(codec, reg, val, uncached); - snd_hdac_power_down_pm(codec); - } - return err; + return CALL_RAW_FUNC(codec, reg_raw_read(codec, reg, val, uncached)); } /** @@ -504,6 +508,35 @@ int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, return __snd_hdac_regmap_read_raw(codec, reg, val, true); } +static int reg_raw_update(struct hdac_device *codec, unsigned int reg, + unsigned int mask, unsigned int val) +{ + unsigned int orig; + bool change; + int err; + + mutex_lock(&codec->regmap_lock); + if (codec->regmap) { + err = regmap_update_bits_check(codec->regmap, reg, mask, val, + &change); + if (!err) + err = change ? 1 : 0; + } else { + err = hda_reg_read(codec, reg, &orig); + if (!err) { + val &= mask; + val |= orig & ~mask; + if (val != orig) { + err = hda_reg_write(codec, reg, val); + if (!err) + err = 1; + } + } + } + mutex_unlock(&codec->regmap_lock); + return err; +} + /** * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt * @codec: the codec object @@ -515,20 +548,58 @@ int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, */ int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, unsigned int mask, unsigned int val) +{ + return CALL_RAW_FUNC(codec, reg_raw_update(codec, reg, mask, val)); +} +EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw); + +static int reg_raw_update_once(struct hdac_device *codec, unsigned int reg, + unsigned int mask, unsigned int val) { unsigned int orig; int err; - val &= mask; - err = snd_hdac_regmap_read_raw(codec, reg, &orig); + if (!codec->regmap) + return reg_raw_update(codec, reg, mask, val); + + mutex_lock(&codec->regmap_lock); + regcache_cache_only(codec->regmap, true); + err = regmap_read(codec->regmap, reg, &orig); + regcache_cache_only(codec->regmap, false); if (err < 0) - return err; - val |= orig & ~mask; - if (val == orig) - return 0; - err = snd_hdac_regmap_write_raw(codec, reg, val); - if (err < 0) - return err; - return 1; + err = regmap_update_bits(codec->regmap, reg, mask, val); + mutex_unlock(&codec->regmap_lock); + return err; } -EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw); + +/** + * snd_hdac_regmap_update_raw_once - initialize the register value only once + * @codec: the codec object + * @reg: pseudo register + * @mask: bit mask to update + * @val: value to update + * + * Performs the update of the register bits only once when the register + * hasn't been initialized yet. Used in HD-audio legacy driver. + * Returns zero if successful or a negative error code + */ +int snd_hdac_regmap_update_raw_once(struct hdac_device *codec, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return CALL_RAW_FUNC(codec, reg_raw_update_once(codec, reg, mask, val)); +} +EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once); + +/** + * snd_hdac_regmap_sync - sync out the cached values for PM resume + * @codec: the codec object + */ +void snd_hdac_regmap_sync(struct hdac_device *codec) +{ + if (codec->regmap) { + mutex_lock(&codec->regmap_lock); + regcache_sync(codec->regmap); + mutex_unlock(&codec->regmap_lock); + } +} +EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync); diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c index daede96f28ee..6ed80a4cba01 100644 --- a/sound/hda/intel-nhlt.c +++ b/sound/hda/intel-nhlt.c @@ -64,18 +64,49 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) struct nhlt_endpoint *epnt; struct nhlt_dmic_array_config *cfg; struct nhlt_vendor_dmic_array_config *cfg_vendor; + struct nhlt_fmt *fmt_configs; unsigned int dmic_geo = 0; - u8 j; + u16 max_ch = 0; + u8 i, j; if (!nhlt) return 0; - epnt = (struct nhlt_endpoint *)nhlt->desc; + if (nhlt->header.length <= sizeof(struct acpi_table_header)) { + dev_warn(dev, "Invalid DMIC description table\n"); + return 0; + } - for (j = 0; j < nhlt->endpoint_count; j++) { - if (epnt->linktype == NHLT_LINK_DMIC) { - cfg = (struct nhlt_dmic_array_config *) - (epnt->config.caps); + for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++, + epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) { + + if (epnt->linktype != NHLT_LINK_DMIC) + continue; + + cfg = (struct nhlt_dmic_array_config *)(epnt->config.caps); + fmt_configs = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size); + + /* find max number of channels based on format_configuration */ + if (fmt_configs->fmt_count) { + dev_dbg(dev, "%s: found %d format definitions\n", + __func__, fmt_configs->fmt_count); + + for (i = 0; i < fmt_configs->fmt_count; i++) { + struct wav_fmt_ext *fmt_ext; + + fmt_ext = &fmt_configs->fmt_config[i].fmt_ext; + + if (fmt_ext->fmt.channels > max_ch) + max_ch = fmt_ext->fmt.channels; + } + dev_dbg(dev, "%s: max channels found %d\n", __func__, max_ch); + } else { + dev_dbg(dev, "%s: No format information found\n", __func__); + } + + if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) { + dmic_geo = max_ch; + } else { switch (cfg->array_type) { case NHLT_MIC_ARRAY_2CH_SMALL: case NHLT_MIC_ARRAY_2CH_BIG: @@ -92,13 +123,23 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) dmic_geo = cfg_vendor->nb_mics; break; default: - dev_warn(dev, "undefined DMIC array_type 0x%0x\n", - cfg->array_type); + dev_warn(dev, "%s: undefined DMIC array_type 0x%0x\n", + __func__, cfg->array_type); + } + + if (dmic_geo > 0) { + dev_dbg(dev, "%s: Array with %d dmics\n", __func__, dmic_geo); + } + if (max_ch > dmic_geo) { + dev_dbg(dev, "%s: max channels %d exceed dmic number %d\n", + __func__, max_ch, dmic_geo); } } - epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length); } + dev_dbg(dev, "%s: dmic number %d max_ch %d\n", + __func__, dmic_geo, max_ch); + return dmic_geo; } EXPORT_SYMBOL_GPL(intel_nhlt_get_dmic_geo); diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c index 9be89377171b..b4e9b0de3b42 100644 --- a/sound/isa/es1688/es1688.c +++ b/sound/isa/es1688/es1688.c @@ -267,8 +267,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard, return error; } error = snd_es1688_probe(card, dev); - if (error < 0) + if (error < 0) { + snd_card_free(card); return error; + } pnp_set_card_drvdata(pcard, card); snd_es968_pnp_is_probed = 1; return 0; diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c index 0458934de1c7..9ca5c83de8a7 100644 --- a/sound/isa/opti9xx/miro.c +++ b/sound/isa/opti9xx/miro.c @@ -867,10 +867,13 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg, spin_unlock_irqrestore(&chip->lock, flags); } +static inline void snd_miro_write_mask(struct snd_miro *chip, + unsigned char reg, unsigned char value, unsigned char mask) +{ + unsigned char oldval = snd_miro_read(chip, reg); -#define snd_miro_write_mask(chip, reg, value, mask) \ - snd_miro_write(chip, reg, \ - (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask))) + snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask)); +} /* * Proc Interface diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c index fb36bb5d55df..fb87eedc8121 100644 --- a/sound/isa/opti9xx/opti92x-ad1848.c +++ b/sound/isa/opti9xx/opti92x-ad1848.c @@ -317,10 +317,13 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg, } -#define snd_opti9xx_write_mask(chip, reg, value, mask) \ - snd_opti9xx_write(chip, reg, \ - (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask))) +static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip, + unsigned char reg, unsigned char value, unsigned char mask) +{ + unsigned char oldval = snd_opti9xx_read(chip, reg); + snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask)); +} static int snd_opti9xx_configure(struct snd_opti9xx *chip, long port, diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c index 433e32e254f9..91c96fad0618 100644 --- a/sound/isa/sb/emu8000.c +++ b/sound/isa/sb/emu8000.c @@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu) memset(emu->controls, 0, sizeof(emu->controls)); for (i = 0; i < EMU8000_NUM_CONTROLS; i++) { - if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) + if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) { + emu->controls[i] = NULL; goto __error; + } } return 0; diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c index 4ad0ff0c4508..69960cf1bb51 100644 --- a/sound/isa/sb/sb16_csp.c +++ b/sound/isa/sb/sb16_csp.c @@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p) spin_lock_init(&p->q_lock); - if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) + if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) { + p->qsound_switch = NULL; goto __error; - if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) + } + if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) { + p->qsound_space = NULL; goto __error; + } return 0; diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index c5b1d5900eed..d6420d224d09 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -1171,7 +1171,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) "alias for %d\n", header->number, header->hdr.a.OriginalSample); - + + if (header->number >= WF_MAX_SAMPLE) + return -EINVAL; + munge_int32 (header->number, &alias_hdr[0], 2); munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), @@ -1202,6 +1205,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) int num_samples; unsigned char *msample_hdr; + if (header->number >= WF_MAX_SAMPLE) + return -EINVAL; + msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); if (! msample_hdr) return -ENOMEM; diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 496dcde9715d..9790f5108a16 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -343,7 +343,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; - struct hpi_pci pci; + struct hpi_pci pci = { 0 }; memset(&adapter, 0, sizeof(adapter)); @@ -499,7 +499,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, return 0; err: - for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { + while (--idx >= 0) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index 478412e0aa3c..7aedaeb7a196 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -537,7 +537,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id, else /* Power down */ chip->spi_dac_reg[reg] |= bit; - return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]); + if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0) + return -ENXIO; } return 0; } diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c index 5b888b795f7e..c07a9e735733 100644 --- a/sound/pci/cs46xx/cs46xx_lib.c +++ b/sound/pci/cs46xx/cs46xx_lib.c @@ -766,7 +766,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned rate = 48000 / 9; /* - * We can not capture at at rate greater than the Input Rate (48000). + * We can not capture at a rate greater than the Input Rate (48000). * Return an error if an attempt is made to stray outside that limit. */ if (rate > 48000) diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c index 715ead59613d..0bef823c5f61 100644 --- a/sound/pci/cs46xx/dsp_spos_scb_lib.c +++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c @@ -1716,7 +1716,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip) struct dsp_spos_instance * ins = chip->dsp_spos_instance; if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) { - /* remove AsynchFGTxSCB and and PCMSerialInput_II */ + /* remove AsynchFGTxSCB and PCMSerialInput_II */ cs46xx_dsp_disable_spdif_out (chip); /* save state */ diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c index 3cd4b7dad945..b1cc4cdc6c41 100644 --- a/sound/pci/ctxfi/cthw20k2.c +++ b/sound/pci/ctxfi/cthw20k2.c @@ -991,7 +991,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf) if (idx < 4) { /* S/PDIF output */ - switch ((conf & 0x7)) { + switch ((conf & 0xf)) { case 1: set_field(&ctl->txctl[idx], ATXCTL_NUC, 0); break; diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index ca9125726be2..8596ae4c2bde 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -2198,7 +2198,6 @@ static int snd_echo_resume(struct device *dev) if (err < 0) { kfree(commpage_bak); dev_err(dev, "resume init_hw err=%d\n", err); - snd_echo_free(chip); return err; } @@ -2225,7 +2224,6 @@ static int snd_echo_resume(struct device *dev) if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { dev_err(chip->card->dev, "cannot grab irq\n"); - snd_echo_free(chip); return -EBUSY; } chip->irq = pci->irq; diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 2c6d2becfe1a..824f4ac1a8ce 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -72,6 +72,12 @@ static int compare_input_type(const void *ap, const void *bp) if (a->type != b->type) return (int)(a->type - b->type); + /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */ + if (a->is_headset_mic && b->is_headphone_mic) + return -1; /* don't swap */ + else if (a->is_headphone_mic && b->is_headset_mic) + return 1; /* swap */ + /* In case one has boost and the other one has not, pick the one with boost first. */ return (int)(b->has_boost_on_pin - a->has_boost_on_pin); diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index b7d9160ed868..c6e1e03a5e4d 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -290,8 +290,12 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol, { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_beep *beep = codec->beep; + int chs = get_amp_channels(kcontrol); + if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) { - ucontrol->value.integer.value[0] = + if (chs & 1) + ucontrol->value.integer.value[0] = beep->enabled; + if (chs & 2) ucontrol->value.integer.value[1] = beep->enabled; return 0; } diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index 6a8564566375..17a25e453f60 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c @@ -47,6 +47,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev) if (codec->bus->shutdown) return; + /* ignore unsol events during system suspend/resume */ + if (codec->core.dev.power.power_state.event != PM_EVENT_ON) + return; + if (codec->patch_ops.unsol_event) codec->patch_ops.unsol_event(codec, ev); } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 6cb72336433a..326f95ce5ceb 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -641,8 +641,18 @@ static void hda_jackpoll_work(struct work_struct *work) struct hda_codec *codec = container_of(work, struct hda_codec, jackpoll_work.work); - snd_hda_jack_set_dirty_all(codec); - snd_hda_jack_poll_all(codec); + /* for non-polling trigger: we need nothing if already powered on */ + if (!codec->jackpoll_interval && snd_hdac_is_power_on(&codec->core)) + return; + + /* the power-up/down sequence triggers the runtime resume */ + snd_hda_power_up_pm(codec); + /* update jacks manually if polling is required, too */ + if (codec->jackpoll_interval) { + snd_hda_jack_set_dirty_all(codec); + snd_hda_jack_poll_all(codec); + } + snd_hda_power_down_pm(codec); if (!codec->jackpoll_interval) return; @@ -1267,6 +1277,18 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir, } EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps); +static unsigned int encode_amp(struct hda_codec *codec, hda_nid_t nid, + int ch, int dir, int idx) +{ + unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); + + /* enable fake mute if no h/w mute but min=mute */ + if ((query_amp_caps(codec, nid, dir) & + (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE) + cmd |= AC_AMP_FAKE_MUTE; + return cmd; +} + /** * snd_hda_codec_amp_update - update the AMP mono value * @codec: HD-audio codec @@ -1282,12 +1304,8 @@ EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps); int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, int ch, int dir, int idx, int mask, int val) { - unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); + unsigned int cmd = encode_amp(codec, nid, ch, dir, idx); - /* enable fake mute if no h/w mute but min=mute */ - if ((query_amp_caps(codec, nid, dir) & - (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE) - cmd |= AC_AMP_FAKE_MUTE; return snd_hdac_regmap_update_raw(&codec->core, cmd, mask, val); } EXPORT_SYMBOL_GPL(snd_hda_codec_amp_update); @@ -1335,16 +1353,11 @@ EXPORT_SYMBOL_GPL(snd_hda_codec_amp_stereo); int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch, int dir, int idx, int mask, int val) { - int orig; + unsigned int cmd = encode_amp(codec, nid, ch, dir, idx); if (!codec->core.regmap) return -EINVAL; - regcache_cache_only(codec->core.regmap, true); - orig = snd_hda_codec_amp_read(codec, nid, ch, dir, idx); - regcache_cache_only(codec->core.regmap, false); - if (orig >= 0) - return 0; - return snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val); + return snd_hdac_regmap_update_raw_once(&codec->core, cmd, mask, val); } EXPORT_SYMBOL_GPL(snd_hda_codec_amp_init); @@ -1785,7 +1798,7 @@ int snd_hda_codec_reset(struct hda_codec *codec) return -EBUSY; /* OK, let it free */ - snd_hdac_device_unregister(&codec->core); + device_release_driver(hda_codec_dev(codec)); /* allow device access again */ snd_hda_unlock_devices(bus); @@ -2905,8 +2918,7 @@ static void hda_call_codec_resume(struct hda_codec *codec) else { if (codec->patch_ops.init) codec->patch_ops.init(codec); - if (codec->core.regmap) - regcache_sync(codec->core.regmap); + snd_hda_regmap_sync(codec); } if (codec->jackpoll_interval) @@ -2922,6 +2934,10 @@ static int hda_codec_runtime_suspend(struct device *dev) struct hda_codec *codec = dev_to_hda_codec(dev); unsigned int state; + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + cancel_delayed_work_sync(&codec->jackpoll_work); state = hda_call_codec_suspend(codec); if (codec->link_down_at_suspend || @@ -2936,6 +2952,10 @@ static int hda_codec_runtime_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + codec_display_power(codec, true); snd_hdac_codec_link_up(&codec->core); hda_call_codec_resume(codec); @@ -2948,18 +2968,14 @@ static int hda_codec_runtime_resume(struct device *dev) static int hda_codec_force_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); - bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used; int ret; - /* The get/put pair below enforces the runtime resume even if the - * device hasn't been used at suspend time. This trick is needed to - * update the jack state change during the sleep. - */ - if (forced_resume) - pm_runtime_get_noresume(dev); ret = pm_runtime_force_resume(dev); - if (forced_resume) - pm_runtime_put(dev); + /* schedule jackpoll work for jack detection update */ + if (codec->jackpoll_interval || + (pm_runtime_suspended(dev) && hda_codec_need_resume(codec))) + schedule_delayed_work(&codec->jackpoll_work, + codec->jackpoll_interval); return ret; } @@ -3410,7 +3426,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save); * @nid: NID to check / update * * Check whether the given NID is in the amp list. If it's in the list, - * check the current AMP status, and update the the power-status according + * check the current AMP status, and update the power-status according * to the mute status. * * This function is supposed to be set or called from the check_power_status diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 76b507058cb4..6a159c6c2f54 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -613,13 +613,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream) 20, 178000000); - /* by some reason, the playback stream stalls on PulseAudio with - * tsched=1 when a capture stream triggers. Until we figure out the - * real cause, disable tsched mode by telling the PCM info flag. - */ - if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) - runtime->hw.info |= SNDRV_PCM_INFO_BATCH; - if (chip->align_buffer_size) /* constrain buffer sizes to be multiple of 128 bytes. This is more efficient in terms of memory @@ -1159,16 +1152,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) active = true; - /* clear rirb int */ status = azx_readb(chip, RIRBSTS); if (status & RIRB_INT_MASK) { + /* + * Clearing the interrupt status here ensures that no + * interrupt gets masked after the RIRB wp is read in + * snd_hdac_bus_update_rirb. This avoids a possible + * race condition where codec response in RIRB may + * remain unserviced by IRQ, eventually falling back + * to polling mode in azx_rirb_get_response. + */ + azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) udelay(80); snd_hdac_bus_update_rirb(bus); } - azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); } } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 82e26442724b..9da7a06d024f 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -143,6 +143,7 @@ struct azx { unsigned int align_buffer_size:1; unsigned int region_requested:1; unsigned int disabled:1; /* disabled by vga_switcheroo */ + unsigned int pm_prepared:1; /* GTS present */ unsigned int gts_present:1; diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 10d502328b76..efceeae09045 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -813,7 +813,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path, } } -/* sync power of each widget in the the given path */ +/* sync power of each widget in the given path */ static hda_nid_t path_power_update(struct hda_codec *codec, struct nid_path *path, bool allow_powerdown) @@ -1364,16 +1364,20 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs, struct nid_path *path; hda_nid_t pin = pins[i]; - path = snd_hda_get_path_from_idx(codec, path_idx[i]); - if (path) { - badness += assign_out_path_ctls(codec, path); - continue; + if (!spec->obey_preferred_dacs) { + path = snd_hda_get_path_from_idx(codec, path_idx[i]); + if (path) { + badness += assign_out_path_ctls(codec, path); + continue; + } } dacs[i] = get_preferred_dac(codec, pin); if (dacs[i]) { if (is_dac_already_used(codec, dacs[i])) badness += bad->shared_primary; + } else if (spec->obey_preferred_dacs) { + badness += BAD_NO_PRIMARY_DAC; } if (!dacs[i]) @@ -4013,7 +4017,7 @@ int snd_hda_gen_add_micmute_led(struct hda_codec *codec, spec->micmute_led.led_mode = MICMUTE_LED_FOLLOW_MUTE; spec->micmute_led.capture = 0; - spec->micmute_led.led_value = 0; + spec->micmute_led.led_value = -1; spec->micmute_led.old_hook = spec->cap_sync_hook; spec->micmute_led.update = hook; spec->cap_sync_hook = update_micmute_led; @@ -4401,7 +4405,7 @@ EXPORT_SYMBOL_GPL(snd_hda_gen_fix_pin_power); */ /* check each pin in the given array; returns true if any of them is plugged */ -static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins) +static bool detect_jacks(struct hda_codec *codec, int num_pins, const hda_nid_t *pins) { int i; bool present = false; @@ -4420,7 +4424,7 @@ static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins) } /* standard HP/line-out auto-mute helper */ -static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins, +static void do_automute(struct hda_codec *codec, int num_pins, const hda_nid_t *pins, int *paths, bool mute) { struct hda_gen_spec *spec = codec->spec; @@ -6027,7 +6031,7 @@ int snd_hda_gen_init(struct hda_codec *codec) /* call init functions of standard auto-mute helpers */ update_automute_all(codec); - regcache_sync(codec->core.regmap); + snd_hda_regmap_sync(codec); if (spec->vmaster_mute.sw_kctl && spec->vmaster_mute.hook) snd_hda_sync_vmaster_hook(&spec->vmaster_mute); diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index fb9f1a90238b..e728df6145ad 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -236,6 +236,7 @@ struct hda_gen_spec { unsigned int power_down_unused:1; /* power down unused widgets */ unsigned int dac_min_mute:1; /* minimal = mute for DACs */ unsigned int suppress_vmaster:1; /* don't create vmaster kctls */ + unsigned int obey_preferred_dacs:1; /* obey preferred_dacs assignment */ /* other internal flags */ unsigned int no_analog:1; /* digital I/O only */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 85beb172d810..ebb1ee69dd0c 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -983,7 +983,7 @@ static void __azx_runtime_suspend(struct azx *chip) display_power(chip, false); } -static void __azx_runtime_resume(struct azx *chip, bool from_rt) +static void __azx_runtime_resume(struct azx *chip) { struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct hdac_bus *bus = azx_bus(chip); @@ -1000,11 +1000,15 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) azx_init_pci(chip); hda_intel_init_chip(chip, true); - if (status && from_rt) { - list_for_each_codec(codec, &chip->bus) - if (status & (1 << codec->addr)) - schedule_delayed_work(&codec->jackpoll_work, - codec->jackpoll_interval); + /* Avoid codec resume if runtime resume is for system suspend */ + if (!chip->pm_prepared) { + list_for_each_codec(codec, &chip->bus) { + if (codec->relaxed_resume) + continue; + + if (codec->forced_resume || (status & (1 << codec->addr))) + pm_request_resume(hda_codec_dev(codec)); + } } /* power down again for link-controlled chips */ @@ -1013,6 +1017,39 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) } #ifdef CONFIG_PM_SLEEP +static int azx_prepare(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + + if (!azx_is_pm_ready(card)) + return 0; + + chip = card->private_data; + chip->pm_prepared = 1; + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); + + flush_work(&azx_bus(chip)->unsol_work); + + /* HDA controller always requires different WAKEEN for runtime suspend + * and system suspend, so don't use direct-complete here. + */ + return 0; +} + +static void azx_complete(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + + if (!azx_is_pm_ready(card)) + return; + + chip = card->private_data; + snd_power_change_state(card, SNDRV_CTL_POWER_D0); + chip->pm_prepared = 0; +} + static int azx_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); @@ -1024,7 +1061,6 @@ static int azx_suspend(struct device *dev) chip = card->private_data; bus = azx_bus(chip); - snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); __azx_runtime_suspend(chip); if (bus->irq >= 0) { free_irq(bus->irq, chip); @@ -1052,8 +1088,8 @@ static int azx_resume(struct device *dev) chip->msi = 0; if (azx_acquire_irq(chip, 1) < 0) return -EIO; - __azx_runtime_resume(chip, false); - snd_power_change_state(card, SNDRV_CTL_POWER_D0); + + __azx_runtime_resume(chip); trace_azx_resume(chip); return 0; @@ -1068,6 +1104,8 @@ static int azx_freeze_noirq(struct device *dev) struct azx *chip = card->private_data; struct pci_dev *pci = to_pci_dev(dev); + if (!azx_is_pm_ready(card)) + return 0; if (chip->driver_type == AZX_DRIVER_SKL) pci_set_power_state(pci, PCI_D3hot); @@ -1080,6 +1118,8 @@ static int azx_thaw_noirq(struct device *dev) struct azx *chip = card->private_data; struct pci_dev *pci = to_pci_dev(dev); + if (!azx_is_pm_ready(card)) + return 0; if (chip->driver_type == AZX_DRIVER_SKL) pci_set_power_state(pci, PCI_D0); @@ -1095,12 +1135,9 @@ static int azx_runtime_suspend(struct device *dev) if (!azx_is_pm_ready(card)) return 0; chip = card->private_data; - if (!azx_has_pm_runtime(chip)) - return 0; /* enable controller wake up event */ - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | - STATESTS_INT_MASK); + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | STATESTS_INT_MASK); __azx_runtime_suspend(chip); trace_azx_runtime_suspend(chip); @@ -1115,13 +1152,10 @@ static int azx_runtime_resume(struct device *dev) if (!azx_is_pm_ready(card)) return 0; chip = card->private_data; - if (!azx_has_pm_runtime(chip)) - return 0; - __azx_runtime_resume(chip, true); + __azx_runtime_resume(chip); /* disable controller Wake Up event*/ - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & - ~STATESTS_INT_MASK); + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ~STATESTS_INT_MASK); trace_azx_runtime_resume(chip); return 0; @@ -1155,6 +1189,8 @@ static int azx_runtime_idle(struct device *dev) static const struct dev_pm_ops azx_pm = { SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) #ifdef CONFIG_PM_SLEEP + .prepare = azx_prepare, + .complete = azx_complete, .freeze_noirq = azx_freeze_noirq, .thaw_noirq = azx_thaw_noirq, #endif @@ -1196,10 +1232,8 @@ static void azx_vs_set_state(struct pci_dev *pci, if (!disabled) { dev_info(chip->card->dev, "Start delayed initialization\n"); - if (azx_probe_continue(chip) < 0) { + if (azx_probe_continue(chip) < 0) dev_err(chip->card->dev, "initialization error\n"); - hda->init_failed = true; - } } } else { dev_info(chip->card->dev, "%s via vga_switcheroo\n", @@ -1332,12 +1366,15 @@ static int register_vga_switcheroo(struct azx *chip) /* * destructor */ -static int azx_free(struct azx *chip) +static void azx_free(struct azx *chip) { struct pci_dev *pci = chip->pci; struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct hdac_bus *bus = azx_bus(chip); + if (hda->freed) + return; + if (azx_has_pm_runtime(chip) && chip->running) pm_runtime_get_noresume(&pci->dev); chip->running = 0; @@ -1381,9 +1418,8 @@ static int azx_free(struct azx *chip) if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT) snd_hdac_i915_exit(bus); - kfree(hda); - return 0; + hda->freed = 1; } static int azx_dev_disconnect(struct snd_device *device) @@ -1399,7 +1435,8 @@ static int azx_dev_disconnect(struct snd_device *device) static int azx_dev_free(struct snd_device *device) { - return azx_free(device->device_data); + azx_free(device->device_data); + return 0; } #ifdef SUPPORT_VGA_SWITCHEROO @@ -1713,7 +1750,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, if (err < 0) return err; - hda = kzalloc(sizeof(*hda), GFP_KERNEL); + hda = devm_kzalloc(&pci->dev, sizeof(*hda), GFP_KERNEL); if (!hda) { pci_disable_device(pci); return -ENOMEM; @@ -1754,7 +1791,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, err = azx_bus_init(chip, model[dev]); if (err < 0) { - kfree(hda); pci_disable_device(pci); return err; } @@ -1954,7 +1990,7 @@ static int azx_first_init(struct azx *chip) /* codec detection */ if (!azx_bus(chip)->codec_mask) { dev_err(card->dev, "no codecs found!\n"); - return -ENODEV; + /* keep running the rest for the runtime PM */ } if (azx_acquire_irq(chip, 0) < 0) @@ -1976,24 +2012,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context) { struct snd_card *card = context; struct azx *chip = card->private_data; - struct pci_dev *pci = chip->pci; - if (!fw) { - dev_err(card->dev, "Cannot load firmware, aborting\n"); - goto error; - } - - chip->fw = fw; + if (fw) + chip->fw = fw; + else + dev_err(card->dev, "Cannot load firmware, continue without patching\n"); if (!chip->disabled) { /* continue probing */ - if (azx_probe_continue(chip)) - goto error; + azx_probe_continue(chip); } - return; /* OK */ - - error: - snd_card_free(card); - pci_set_drvdata(pci, NULL); } #endif @@ -2024,6 +2051,17 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream, #endif } +/* Blacklist for skipping the whole probe: + * some HD-audio PCI entries are exposed without any codecs, and such devices + * should be ignored from the beginning. + */ +static const struct pci_device_id driver_blacklist[] = { + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */ + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */ + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */ + {} +}; + static const struct hda_controller_ops pci_hda_ops = { .disable_msi_reset_irq = disable_msi_reset_irq, .pcm_mmap_prepare = pcm_mmap_prepare, @@ -2059,6 +2097,11 @@ static int azx_probe(struct pci_dev *pci, bool schedule_probe; int err; + if (pci_match_id(driver_blacklist, pci)) { + dev_info(&pci->dev, "Skipping the blacklisted device\n"); + return -ENODEV; + } + if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { @@ -2153,8 +2196,6 @@ static struct snd_pci_quirk power_save_blacklist[] = { SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), - /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */ - SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ @@ -2258,9 +2299,11 @@ static int azx_probe_continue(struct azx *chip) #endif /* create codec instances */ - err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); - if (err < 0) - goto out_free; + if (bus->codec_mask) { + err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); + if (err < 0) + goto out_free; + } #ifdef CONFIG_SND_HDA_PATCH_LOADER if (chip->fw) { @@ -2274,7 +2317,7 @@ static int azx_probe_continue(struct azx *chip) #endif } #endif - if ((probe_only[dev] & 1) == 0) { + if (bus->codec_mask && !(probe_only[dev] & 1)) { err = azx_codec_configure(chip); if (err < 0) goto out_free; @@ -2291,17 +2334,23 @@ static int azx_probe_continue(struct azx *chip) set_default_power_save(chip); - if (azx_has_pm_runtime(chip)) + if (azx_has_pm_runtime(chip)) { + pm_runtime_use_autosuspend(&pci->dev); + pm_runtime_allow(&pci->dev); pm_runtime_put_autosuspend(&pci->dev); + } out_free: - if (err < 0 || !hda->need_i915_power) + if (err < 0) { + azx_free(chip); + return err; + } + + if (!hda->need_i915_power) display_power(chip, false); - if (err < 0) - hda->init_failed = 1; complete_all(&hda->probe_wait); to_hda_bus(bus)->bus_probing = 0; - return err; + return 0; } static void azx_remove(struct pci_dev *pci) @@ -2408,12 +2457,20 @@ static const struct pci_device_id azx_ids[] = { /* CometLake-H */ { PCI_DEVICE(0x8086, 0x06C8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0xf1c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* CometLake-S */ { PCI_DEVICE(0x8086, 0xa3f0), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* CometLake-R */ + { PCI_DEVICE(0x8086, 0xf0c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Icelake */ { PCI_DEVICE(0x8086, 0x34c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Icelake-H */ + { PCI_DEVICE(0x8086, 0x3dc8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Jasperlake */ { PCI_DEVICE(0x8086, 0x38c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, @@ -2422,9 +2479,14 @@ static const struct pci_device_id azx_ids[] = { /* Tigerlake */ { PCI_DEVICE(0x8086, 0xa0c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Tigerlake-H */ + { PCI_DEVICE(0x8086, 0x43c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Elkhart Lake */ { PCI_DEVICE(0x8086, 0x4b55), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4b58), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON }, diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h index 2acfff3da1a0..3fb119f09040 100644 --- a/sound/pci/hda/hda_intel.h +++ b/sound/pci/hda/hda_intel.h @@ -27,6 +27,7 @@ struct hda_intel { unsigned int use_vga_switcheroo:1; unsigned int vga_switcheroo_registered:1; unsigned int init_failed:1; /* delayed init failed */ + unsigned int freed:1; /* resources already released */ bool need_i915_power:1; /* the hda controller needs i915 power */ }; diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 3942e1b528d8..3dca65d79b02 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -138,6 +138,8 @@ int snd_hda_codec_reset(struct hda_codec *codec); void snd_hda_codec_register(struct hda_codec *codec); void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec); +#define snd_hda_regmap_sync(codec) snd_hdac_regmap_sync(&(codec)->core) + enum { HDA_VMUTE_OFF, HDA_VMUTE_ON, diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c index 6dbe99131bc4..91b4a29a8c36 100644 --- a/sound/pci/hda/hda_sysfs.c +++ b/sound/pci/hda/hda_sysfs.c @@ -139,7 +139,7 @@ static int reconfig_codec(struct hda_codec *codec) "The codec is being used, can't reconfigure.\n"); goto error; } - err = snd_hda_codec_configure(codec); + err = device_reprobe(hda_codec_dev(codec)); if (err < 0) goto error; err = snd_card_register(codec->card); diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index e5191584638a..e378cb33c69d 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -169,6 +169,10 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev) struct hdac_bus *bus = azx_bus(chip); if (chip && chip->running) { + /* enable controller wake up event */ + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | + STATESTS_INT_MASK); + azx_stop_chip(chip); synchronize_irq(bus->irq); azx_enter_link_reset(chip); @@ -191,6 +195,9 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) if (chip && chip->running) { hda_tegra_init(hda); azx_init_chip(chip, 1); + /* disable controller wake up event*/ + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & + ~STATESTS_INT_MASK); } return 0; diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index bc9dd8e6fd86..c64895f99299 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -389,7 +389,7 @@ static int patch_ad1986a(struct hda_codec *codec) { int err; struct ad198x_spec *spec; - static hda_nid_t preferred_pairs[] = { + static const hda_nid_t preferred_pairs[] = { 0x1a, 0x03, 0x1b, 0x03, 0x1c, 0x04, @@ -519,9 +519,9 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec) static int patch_ad1983(struct hda_codec *codec) { + static const hda_nid_t conn_0c[] = { 0x08 }; + static const hda_nid_t conn_0d[] = { 0x09 }; struct ad198x_spec *spec; - static hda_nid_t conn_0c[] = { 0x08 }; - static hda_nid_t conn_0d[] = { 0x09 }; int err; err = alloc_ad_spec(codec); diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index adad3651889e..9412bdda85c8 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -93,7 +93,7 @@ enum { }; /* Strings for Input Source Enum Control */ -static const char *const in_src_str[3] = {"Rear Mic", "Line", "Front Mic" }; +static const char *const in_src_str[3] = { "Microphone", "Line In", "Front Microphone" }; #define IN_SRC_NUM_OF_INPUTS 3 enum { REAR_MIC, @@ -1065,6 +1065,7 @@ enum { QUIRK_R3DI, QUIRK_R3D, QUIRK_AE5, + QUIRK_AE7, }; #ifdef CONFIG_PCI @@ -1146,7 +1147,7 @@ static const struct hda_pintbl ae5_pincfgs[] = { { 0x0e, 0x01c510f0 }, /* SPDIF In */ { 0x0f, 0x01017114 }, /* Port A -- Rear L/R. */ { 0x10, 0x01017012 }, /* Port D -- Center/LFE or FP Hp */ - { 0x11, 0x01a170ff }, /* Port B -- LineMicIn2 / Rear Headphone */ + { 0x11, 0x012170ff }, /* Port B -- LineMicIn2 / Rear Headphone */ { 0x12, 0x01a170f0 }, /* Port C -- LineIn1 */ { 0x13, 0x908700f0 }, /* What U Hear In*/ { 0x18, 0x50d000f0 }, /* N/A */ @@ -1182,7 +1183,10 @@ static const struct snd_pci_quirk ca0132_quirks[] = { SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI), SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), + SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D), SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5), + SND_PCI_QUIRK(0x1102, 0x0191, "Sound Blaster AE-5 Plus", QUIRK_AE5), + SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7), {} }; @@ -4670,9 +4674,18 @@ static int ca0132_alt_select_in(struct hda_codec *codec) tmp = FLOAT_ONE; break; case QUIRK_AE5: - ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00); + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); tmp = FLOAT_THREE; break; + case QUIRK_AE7: + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); + tmp = FLOAT_THREE; + chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, + SR_96_000); + chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, + SR_96_000); + dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO); + break; default: tmp = FLOAT_ONE; break; @@ -4716,7 +4729,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec) r3di_gpio_mic_set(codec, R3DI_REAR_MIC); break; case QUIRK_AE5: - ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00); + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); + break; + case QUIRK_AE7: + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f); + chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, + SR_96_000); + chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, + SR_96_000); + dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO); break; default: break; @@ -4727,7 +4748,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec) if (ca0132_quirk(spec) == QUIRK_R3DI) chipio_set_conn_rate(codec, 0x0F, SR_96_000); - tmp = FLOAT_ZERO; + if (ca0132_quirk(spec) == QUIRK_AE7) + tmp = FLOAT_THREE; + else + tmp = FLOAT_ZERO; dspio_set_uint_param(codec, 0x80, 0x00, tmp); switch (ca0132_quirk(spec)) { @@ -4755,7 +4779,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec) tmp = FLOAT_ONE; break; case QUIRK_AE5: - ca0113_mmio_command_set(codec, 0x48, 0x28, 0x3f); + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f); tmp = FLOAT_THREE; break; default: @@ -5747,6 +5771,11 @@ static int ca0132_switch_get(struct snd_kcontrol *kcontrol, return 0; } + if (nid == ZXR_HEADPHONE_GAIN) { + *valp = spec->zxr_gain_set; + return 0; + } + return 0; } @@ -7803,23 +7832,23 @@ static void sbz_region2_exit(struct hda_codec *codec) static void sbz_set_pin_ctl_default(struct hda_codec *codec) { - hda_nid_t pins[5] = {0x0B, 0x0C, 0x0E, 0x12, 0x13}; + static const hda_nid_t pins[] = {0x0B, 0x0C, 0x0E, 0x12, 0x13}; unsigned int i; snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40); - for (i = 0; i < 5; i++) + for (i = 0; i < ARRAY_SIZE(pins); i++) snd_hda_codec_write(codec, pins[i], 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00); } static void ca0132_clear_unsolicited(struct hda_codec *codec) { - hda_nid_t pins[7] = {0x0B, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13}; + static const hda_nid_t pins[] = {0x0B, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13}; unsigned int i; - for (i = 0; i < 7; i++) { + for (i = 0; i < ARRAY_SIZE(pins); i++) { snd_hda_codec_write(codec, pins[i], 0, AC_VERB_SET_UNSOLICITED_ENABLE, 0x00); } @@ -7843,10 +7872,10 @@ static void sbz_gpio_shutdown_commands(struct hda_codec *codec, int dir, static void zxr_dbpro_power_state_shutdown(struct hda_codec *codec) { - hda_nid_t pins[7] = {0x05, 0x0c, 0x09, 0x0e, 0x08, 0x11, 0x01}; + static const hda_nid_t pins[] = {0x05, 0x0c, 0x09, 0x0e, 0x08, 0x11, 0x01}; unsigned int i; - for (i = 0; i < 7; i++) + for (i = 0; i < ARRAY_SIZE(pins); i++) snd_hda_codec_write(codec, pins[i], 0, AC_VERB_SET_POWER_STATE, 0x03); } diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 1e20e85e9b46..5e2fadb264e4 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -116,7 +116,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec) } static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins, - hda_nid_t *pins, bool on) + const hda_nid_t *pins, bool on) { int i; for (i = 0; i < num_pins; i++) { @@ -898,18 +898,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), - SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), - SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), - SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), - SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), - SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), - SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), - SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), @@ -960,10 +960,10 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { static void add_cx5051_fake_mutes(struct hda_codec *codec) { struct conexant_spec *spec = codec->spec; - static hda_nid_t out_nids[] = { + static const hda_nid_t out_nids[] = { 0x10, 0x11, 0 }; - hda_nid_t *p; + const hda_nid_t *p; for (p = out_nids; *p; p++) snd_hda_override_amp_caps(codec, *p, HDA_OUTPUT, @@ -1075,6 +1075,7 @@ static int patch_conexant_auto(struct hda_codec *codec) static const struct hda_device_id snd_hda_id_conexant[] = { HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 307ca1f03676..ce38b5d4670d 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -57,6 +57,10 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info"); #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883) #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec)) +static bool enable_acomp = true; +module_param(enable_acomp, bool, 0444); +MODULE_PARM_DESC(enable_acomp, "Enable audio component binding (default=yes)"); + struct hdmi_spec_per_cvt { hda_nid_t cvt_nid; int assigned; @@ -172,6 +176,7 @@ struct hdmi_spec { bool use_jack_detect; /* jack detection enabled */ bool use_acomp_notifier; /* use eld_notify callback for hotplug */ bool acomp_registered; /* audio component registered in this driver */ + bool force_connect; /* force connectivity */ struct drm_audio_component_audio_ops drm_audio_ops; int (*port2pin)(struct hda_codec *, int); /* reverse port/pin mapping */ @@ -1707,7 +1712,8 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) * all device entries on the same pin */ config = snd_hda_codec_get_pincfg(codec, pin_nid); - if (get_defcfg_connect(config) == AC_JACK_PORT_NONE) + if (get_defcfg_connect(config) == AC_JACK_PORT_NONE && + !spec->force_connect) return 0; /* @@ -1811,35 +1817,58 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) return 0; } +static const struct snd_pci_quirk force_connect_list[] = { + SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + {} +}; + static int hdmi_parse_codec(struct hda_codec *codec) { - hda_nid_t nid; + struct hdmi_spec *spec = codec->spec; + hda_nid_t start_nid; + unsigned int caps; int i, nodes; + const struct snd_pci_quirk *q; - nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid); - if (!nid || nodes < 0) { + nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid); + if (!start_nid || nodes < 0) { codec_warn(codec, "HDMI: failed to get afg sub nodes\n"); return -EINVAL; } - for (i = 0; i < nodes; i++, nid++) { - unsigned int caps; - unsigned int type; + q = snd_pci_quirk_lookup(codec->bus->pci, force_connect_list); + + if (q && q->value) + spec->force_connect = true; + + /* + * hdmi_add_pin() assumes total amount of converters to + * be known, so first discover all converters + */ + for (i = 0; i < nodes; i++) { + hda_nid_t nid = start_nid + i; caps = get_wcaps(codec, nid); - type = get_wcaps_type(caps); if (!(caps & AC_WCAP_DIGITAL)) continue; - switch (type) { - case AC_WID_AUD_OUT: + if (get_wcaps_type(caps) == AC_WID_AUD_OUT) hdmi_add_cvt(codec, nid); - break; - case AC_WID_PIN: + } + + /* discover audio pins */ + for (i = 0; i < nodes; i++) { + hda_nid_t nid = start_nid + i; + + caps = get_wcaps(codec, nid); + + if (!(caps & AC_WCAP_DIGITAL)) + continue; + + if (get_wcaps_type(caps) == AC_WID_PIN) hdmi_add_pin(codec, nid); - break; - } } return 0; @@ -1857,8 +1886,10 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) /* Add sanity check to pass klockwork check. * This should never happen. */ - if (WARN_ON(spdif == NULL)) + if (WARN_ON(spdif == NULL)) { + mutex_unlock(&codec->spdif_mutex); return true; + } non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO); mutex_unlock(&codec->spdif_mutex); return non_pcm; @@ -1970,22 +2001,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, int pinctl; int err = 0; + mutex_lock(&spec->pcm_lock); if (hinfo->nid) { pcm_idx = hinfo_to_pcm_index(codec, hinfo); - if (snd_BUG_ON(pcm_idx < 0)) - return -EINVAL; + if (snd_BUG_ON(pcm_idx < 0)) { + err = -EINVAL; + goto unlock; + } cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid); - if (snd_BUG_ON(cvt_idx < 0)) - return -EINVAL; + if (snd_BUG_ON(cvt_idx < 0)) { + err = -EINVAL; + goto unlock; + } per_cvt = get_cvt(spec, cvt_idx); - snd_BUG_ON(!per_cvt->assigned); per_cvt->assigned = 0; hinfo->nid = 0; azx_stream(get_azx_dev(substream))->stripe = 0; - mutex_lock(&spec->pcm_lock); snd_hda_spdif_ctls_unassign(codec, pcm_idx); clear_bit(pcm_idx, &spec->pcm_in_use); pin_idx = hinfo_to_pin_index(codec, hinfo); @@ -2013,10 +2047,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, per_pin->setup = false; per_pin->channels = 0; mutex_unlock(&per_pin->lock); - unlock: - mutex_unlock(&spec->pcm_lock); } +unlock: + mutex_unlock(&spec->pcm_lock); + return err; } @@ -2228,7 +2263,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec) for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); + struct hdmi_eld *pin_eld = &per_pin->sink_eld; + pin_eld->eld_valid = false; hdmi_present_sense(per_pin, 0); } @@ -2345,13 +2382,25 @@ static void generic_hdmi_free(struct hda_codec *codec) } #ifdef CONFIG_PM +static int generic_hdmi_suspend(struct hda_codec *codec) +{ + struct hdmi_spec *spec = codec->spec; + int pin_idx; + + for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); + cancel_delayed_work_sync(&per_pin->work); + } + return 0; +} + static int generic_hdmi_resume(struct hda_codec *codec) { struct hdmi_spec *spec = codec->spec; int pin_idx; codec->patch_ops.init(codec); - regcache_sync(codec->core.regmap); + snd_hda_regmap_sync(codec); for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); @@ -2368,6 +2417,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = { .build_controls = generic_hdmi_build_controls, .unsol_event = hdmi_unsol_event, #ifdef CONFIG_PM + .suspend = generic_hdmi_suspend, .resume = generic_hdmi_resume, #endif }; @@ -2465,6 +2515,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp, mutex_lock(&spec->bind_lock); spec->use_acomp_notifier = use_acomp; spec->codec->relaxed_resume = use_acomp; + spec->codec->bus->keep_power = 0; /* reprogram each jack detection logic depending on the notifier */ if (spec->use_jack_detect) { for (i = 0; i < spec->num_pins; i++) @@ -2550,12 +2601,16 @@ static void generic_acomp_init(struct hda_codec *codec, { struct hdmi_spec *spec = codec->spec; + if (!enable_acomp) { + codec_info(codec, "audio component disabled by module option\n"); + return; + } + spec->port2pin = port2pin; setup_drm_audio_ops(codec, ops); if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops, match_bound_vga, 0)) { spec->acomp_registered = true; - codec->bus->keep_power = 0; } } @@ -2760,6 +2815,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec, hda_nid_t cvt_nid) { if (per_pin) { + haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid); snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id); intel_verify_pin_cvt_connect(codec, per_pin); @@ -3639,6 +3695,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec) static int patch_tegra_hdmi(struct hda_codec *codec) { + struct hdmi_spec *spec; int err; err = patch_generic_hdmi(codec); @@ -3646,6 +3703,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec) return err; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; + spec = codec->spec; + spec->chmap.ops.chmap_cea_alloc_validate_get_type = + nvhdmi_chmap_cea_alloc_validate_get_type; + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; return 0; } @@ -4133,6 +4194,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), @@ -4156,6 +4222,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), +HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 128db2e6bc64..b7297a22438a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -81,11 +81,20 @@ struct alc_spec { /* mute LED for HP laptops, see alc269_fixup_mic_mute_hook() */ int mute_led_polarity; + int micmute_led_polarity; hda_nid_t mute_led_nid; hda_nid_t cap_mute_led_nid; unsigned int gpio_mute_led_mask; unsigned int gpio_mic_led_mask; + unsigned int mute_led_coef_idx; + unsigned int mute_led_coefbit_mask; + unsigned int mute_led_coefbit_on; + unsigned int mute_led_coefbit_off; + unsigned int mic_led_coef_idx; + unsigned int mic_led_coefbit_mask; + unsigned int mic_led_coefbit_on; + unsigned int mic_led_coefbit_off; hda_nid_t headset_mic_pin; hda_nid_t headphone_mic_pin; @@ -107,6 +116,7 @@ struct alc_spec { unsigned int done_hp_init:1; unsigned int no_shutup_pins:1; unsigned int ultra_low_power:1; + unsigned int has_hs_key:1; /* for PLL fix */ hda_nid_t pll_nid; @@ -367,11 +377,15 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0215: case 0x10ec0233: case 0x10ec0235: + case 0x10ec0236: + case 0x10ec0245: case 0x10ec0255: + case 0x10ec0256: case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: case 0x10ec0286: + case 0x10ec0287: case 0x10ec0288: case 0x10ec0285: case 0x10ec0298: @@ -379,11 +393,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0300: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; - case 0x10ec0236: - case 0x10ec0256: - alc_write_coef_idx(codec, 0x36, 0x5757); - alc_update_coef_idx(codec, 0x10, 1<<9, 0); - break; case 0x10ec0275: alc_update_coef_idx(codec, 0xe, 0, 1<<0); break; @@ -427,6 +436,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) alc_update_coef_idx(codec, 0x7, 1<<5, 0); break; case 0x10ec0892: + case 0x10ec0897: alc_update_coef_idx(codec, 0x7, 1<<5, 0); break; case 0x10ec0899: @@ -466,10 +476,10 @@ static void set_eapd(struct hda_codec *codec, hda_nid_t nid, int on) static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) { /* We currently only handle front, HP */ - static hda_nid_t pins[] = { + static const hda_nid_t pins[] = { 0x0f, 0x10, 0x14, 0x15, 0x17, 0 }; - hda_nid_t *p; + const hda_nid_t *p; for (p = pins; *p; p++) set_eapd(codec, *p, on); } @@ -791,9 +801,11 @@ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports) { if (!alc_subsystem_id(codec, ports)) { struct alc_spec *spec = codec->spec; - codec_dbg(codec, - "realtek: Enable default setup for auto mode as fallback\n"); - spec->init_amp = ALC_INIT_DEFAULT; + if (spec->init_amp == ALC_INIT_UNDEFINED) { + codec_dbg(codec, + "realtek: Enable default setup for auto mode as fallback\n"); + spec->init_amp = ALC_INIT_DEFAULT; + } } } @@ -907,7 +919,7 @@ static int alc_resume(struct hda_codec *codec) if (!spec->no_depop_delay) msleep(150); /* to avoid pop noise */ codec->patch_ops.init(codec); - regcache_sync(codec->core.regmap); + snd_hda_regmap_sync(codec); hda_call_check_power_status(codec, 0x01); return 0; } @@ -1130,6 +1142,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid) codec->single_adc_amp = 1; /* FIXME: do we need this for all Realtek codec models? */ codec->spdif_status_reset = 1; + codec->forced_resume = 1; codec->patch_ops = alc_patch_ops; err = alc_codec_rename_from_preset(codec); @@ -1882,6 +1895,7 @@ enum { ALC889_FIXUP_FRONT_HP_NO_PRESENCE, ALC889_FIXUP_VAIO_TT, ALC888_FIXUP_EEE1601, + ALC886_FIXUP_EAPD, ALC882_FIXUP_EAPD, ALC883_FIXUP_EAPD, ALC883_FIXUP_ACER_EAPD, @@ -1909,6 +1923,8 @@ enum { ALC1220_FIXUP_CLEVO_P950, ALC1220_FIXUP_CLEVO_PB51ED, ALC1220_FIXUP_CLEVO_PB51ED_PINS, + ALC887_FIXUP_ASUS_AUDIO, + ALC887_FIXUP_ASUS_HMIC, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -1938,19 +1954,19 @@ static void alc889_fixup_dac_route(struct hda_codec *codec, { if (action == HDA_FIXUP_ACT_PRE_PROBE) { /* fake the connections during parsing the tree */ - hda_nid_t conn1[2] = { 0x0c, 0x0d }; - hda_nid_t conn2[2] = { 0x0e, 0x0f }; - snd_hda_override_conn_list(codec, 0x14, 2, conn1); - snd_hda_override_conn_list(codec, 0x15, 2, conn1); - snd_hda_override_conn_list(codec, 0x18, 2, conn2); - snd_hda_override_conn_list(codec, 0x1a, 2, conn2); + static const hda_nid_t conn1[] = { 0x0c, 0x0d }; + static const hda_nid_t conn2[] = { 0x0e, 0x0f }; + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); + snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn1), conn1); + snd_hda_override_conn_list(codec, 0x18, ARRAY_SIZE(conn2), conn2); + snd_hda_override_conn_list(codec, 0x1a, ARRAY_SIZE(conn2), conn2); } else if (action == HDA_FIXUP_ACT_PROBE) { /* restore the connections */ - hda_nid_t conn[5] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 }; - snd_hda_override_conn_list(codec, 0x14, 5, conn); - snd_hda_override_conn_list(codec, 0x15, 5, conn); - snd_hda_override_conn_list(codec, 0x18, 5, conn); - snd_hda_override_conn_list(codec, 0x1a, 5, conn); + static const hda_nid_t conn[] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 }; + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); + snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn); + snd_hda_override_conn_list(codec, 0x18, ARRAY_SIZE(conn), conn); + snd_hda_override_conn_list(codec, 0x1a, ARRAY_SIZE(conn), conn); } } @@ -1958,8 +1974,8 @@ static void alc889_fixup_dac_route(struct hda_codec *codec, static void alc889_fixup_mbp_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { + static const hda_nid_t nids[] = { 0x14, 0x15, 0x19 }; struct alc_spec *spec = codec->spec; - static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 }; int i; if (action != HDA_FIXUP_ACT_INIT) @@ -1995,7 +2011,7 @@ static void alc889_fixup_mac_pins(struct hda_codec *codec, static void alc889_fixup_imac91_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { - static hda_nid_t nids[2] = { 0x18, 0x1a }; + static const hda_nid_t nids[] = { 0x18, 0x1a }; if (action == HDA_FIXUP_ACT_INIT) alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); @@ -2005,7 +2021,7 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec, static void alc889_fixup_mba11_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { - static hda_nid_t nids[1] = { 0x18 }; + static const hda_nid_t nids[] = { 0x18 }; if (action == HDA_FIXUP_ACT_INIT) alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); @@ -2015,7 +2031,7 @@ static void alc889_fixup_mba11_vref(struct hda_codec *codec, static void alc889_fixup_mba21_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { - static hda_nid_t nids[2] = { 0x18, 0x19 }; + static const hda_nid_t nids[] = { 0x18, 0x19 }; if (action == HDA_FIXUP_ACT_INIT) alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); @@ -2097,7 +2113,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, const struct hda_fixup *fix, int action) { - hda_nid_t conn1[1] = { 0x0c }; + static const hda_nid_t conn1[] = { 0x0c }; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; @@ -2106,8 +2122,8 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, /* We therefore want to make sure 0x14 (front headphone) and * 0x1b (speakers) use the stereo DAC 0x02 */ - snd_hda_override_conn_list(codec, 0x14, 1, conn1); - snd_hda_override_conn_list(codec, 0x1b, 1, conn1); + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); + snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1); } static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, @@ -2121,6 +2137,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec, alc_fixup_headset_mode_no_hp_mic(codec, fix, action); } +static void alc887_asus_hp_automute_hook(struct hda_codec *codec, + struct hda_jack_callback *jack) +{ + struct alc_spec *spec = codec->spec; + unsigned int vref; + + snd_hda_gen_hp_automute(codec, jack); + + if (spec->gen.hp_jack_present) + vref = AC_PINCTL_VREF_80; + else + vref = AC_PINCTL_VREF_HIZ; + snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref); +} + +static void alc887_fixup_asus_jack(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + if (action != HDA_FIXUP_ACT_PROBE) + return; + snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP); + spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook; +} + static const struct hda_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { .type = HDA_FIXUP_PINS, @@ -2188,6 +2229,15 @@ static const struct hda_fixup alc882_fixups[] = { { } } }, + [ALC886_FIXUP_EAPD] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* change to EAPD mode */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x0068 }, + { } + } + }, [ALC882_FIXUP_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -2378,6 +2428,20 @@ static const struct hda_fixup alc882_fixups[] = { .chained = true, .chain_id = ALC1220_FIXUP_CLEVO_PB51ED, }, + [ALC887_FIXUP_ASUS_AUDIO] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */ + { 0x19, 0x22219420 }, + {} + }, + }, + [ALC887_FIXUP_ASUS_HMIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc887_fixup_asus_jack, + .chained = true, + .chain_id = ALC887_FIXUP_ASUS_AUDIO, + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -2396,13 +2460,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { ALC882_FIXUP_ACER_ASPIRE_8930G), SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G", ALC882_FIXUP_ACER_ASPIRE_8930G), + SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", + ALC882_FIXUP_ACER_ASPIRE_4930G), + SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", ALC882_FIXUP_ACER_ASPIRE_4930G), - SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", - ALC882_FIXUP_ACER_ASPIRE_4930G), - SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), @@ -2411,14 +2475,15 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), + SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC), SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), - SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), - SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), @@ -2445,23 +2510,44 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), + SND_PCI_QUIRK(0x1462, 0xcc34, "MSI Godlike X570", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), + SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x95e4, "Clevo P955ER", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x95e5, "Clevo P955EE6", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x95e6, "Clevo P950R[CDF]", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950), - SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), - SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), @@ -2982,6 +3068,109 @@ static int alc269_parse_auto_config(struct hda_codec *codec) return alc_parse_auto_config(codec, alc269_ignore, ssids); } +static const struct hda_jack_keymap alc_headset_btn_keymap[] = { + { SND_JACK_BTN_0, KEY_PLAYPAUSE }, + { SND_JACK_BTN_1, KEY_VOICECOMMAND }, + { SND_JACK_BTN_2, KEY_VOLUMEUP }, + { SND_JACK_BTN_3, KEY_VOLUMEDOWN }, + {} +}; + +static void alc_headset_btn_callback(struct hda_codec *codec, + struct hda_jack_callback *jack) +{ + int report = 0; + + if (jack->unsol_res & (7 << 13)) + report |= SND_JACK_BTN_0; + + if (jack->unsol_res & (1 << 16 | 3 << 8)) + report |= SND_JACK_BTN_1; + + /* Volume up key */ + if (jack->unsol_res & (7 << 23)) + report |= SND_JACK_BTN_2; + + /* Volume down key */ + if (jack->unsol_res & (7 << 10)) + report |= SND_JACK_BTN_3; + + jack->jack->button_state = report; +} + +static void alc_disable_headset_jack_key(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + if (!spec->has_hs_key) + return; + + switch (codec->core.vendor_id) { + case 0x10ec0215: + case 0x10ec0225: + case 0x10ec0285: + case 0x10ec0287: + case 0x10ec0295: + case 0x10ec0289: + case 0x10ec0299: + alc_write_coef_idx(codec, 0x48, 0x0); + alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); + alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0); + break; + case 0x10ec0236: + case 0x10ec0256: + alc_write_coef_idx(codec, 0x48, 0x0); + alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); + break; + } +} + +static void alc_enable_headset_jack_key(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + if (!spec->has_hs_key) + return; + + switch (codec->core.vendor_id) { + case 0x10ec0215: + case 0x10ec0225: + case 0x10ec0285: + case 0x10ec0287: + case 0x10ec0295: + case 0x10ec0289: + case 0x10ec0299: + alc_write_coef_idx(codec, 0x48, 0xd011); + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); + alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8); + break; + case 0x10ec0236: + case 0x10ec0256: + alc_write_coef_idx(codec, 0x48, 0xd011); + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); + break; + } +} + +static void alc_fixup_headset_jack(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + spec->has_hs_key = 1; + snd_hda_jack_detect_enable_callback(codec, 0x55, + alc_headset_btn_callback); + snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false, + SND_JACK_HEADSET, alc_headset_btn_keymap); + break; + case HDA_FIXUP_ACT_INIT: + alc_enable_headset_jack_key(codec); + break; + } +} + static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up) { alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0); @@ -3269,7 +3458,13 @@ static void alc256_init(struct hda_codec *codec) alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15); - alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ + /* + * Expose headphone mic (or possibly Line In on some machines) instead + * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See + * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of + * this register. + */ + alc_write_coef_idx(codec, 0x36, 0x5757); } static void alc256_shutup(struct hda_codec *codec) @@ -3294,7 +3489,11 @@ static void alc256_shutup(struct hda_codec *codec) /* 3k pull low control for Headset jack. */ /* NOTE: call this before clearing the pin, otherwise codec stalls */ - alc_update_coef_idx(codec, 0x46, 0, 3 << 12); + /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly + * when booting with headset plugged. So skip setting it for the codec alc257 + */ + if (codec->core.vendor_id != 0x10ec0257) + alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, @@ -3372,6 +3571,8 @@ static void alc225_shutup(struct hda_codec *codec) if (!hp_pin) hp_pin = 0x21; + + alc_disable_headset_jack_key(codec); /* 3k pull low control for Headset jack. */ alc_update_coef_idx(codec, 0x4a, 0, 3 << 10); @@ -3411,6 +3612,9 @@ static void alc225_shutup(struct hda_codec *codec) alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4); msleep(30); } + + alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); + alc_enable_headset_jack_key(codec); } static void alc_default_init(struct hda_codec *codec) @@ -3605,8 +3809,8 @@ static void alc5505_dsp_init(struct hda_codec *codec) } #ifdef HALT_REALTEK_ALC5505 -#define alc5505_dsp_suspend(codec) /* NOP */ -#define alc5505_dsp_resume(codec) /* NOP */ +#define alc5505_dsp_suspend(codec) do { } while (0) /* NOP */ +#define alc5505_dsp_resume(codec) do { } while (0) /* NOP */ #else #define alc5505_dsp_suspend(codec) alc5505_dsp_halt(codec) #define alc5505_dsp_resume(codec) alc5505_dsp_back_from_halt(codec) @@ -3642,7 +3846,7 @@ static int alc269_resume(struct hda_codec *codec) msleep(200); } - regcache_sync(codec->core.regmap); + snd_hda_regmap_sync(codec); hda_call_check_power_status(codec, 0x01); /* on some machine, the BIOS will clear the codec gpio data when enter @@ -3715,6 +3919,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec, snd_hda_sequence_write(codec, verbs); } +/* Fix the speaker amp after resume, etc */ +static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + if (action == HDA_FIXUP_ACT_INIT) + alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000); +} + static void alc269_fixup_pcm_44k(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -3953,11 +4166,9 @@ static void alc269_fixup_hp_mute_led_mic3(struct hda_codec *codec, /* update LED status via GPIO */ static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask, - bool enabled) + int polarity, bool enabled) { - struct alc_spec *spec = codec->spec; - - if (spec->mute_led_polarity) + if (polarity) enabled = !enabled; alc_update_gpio_data(codec, mask, !enabled); /* muted -> LED on */ } @@ -3968,7 +4179,8 @@ static void alc_fixup_gpio_mute_hook(void *private_data, int enabled) struct hda_codec *codec = private_data; struct alc_spec *spec = codec->spec; - alc_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); + alc_update_gpio_led(codec, spec->gpio_mute_led_mask, + spec->mute_led_polarity, enabled); } /* turn on/off mic-mute LED via GPIO per capture hook */ @@ -3977,6 +4189,7 @@ static void alc_gpio_micmute_update(struct hda_codec *codec) struct alc_spec *spec = codec->spec; alc_update_gpio_led(codec, spec->gpio_mic_led_mask, + spec->micmute_led_polarity, spec->gen.micmute_led.led_value); } @@ -4008,6 +4221,12 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec, alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10); } +static void alc285_fixup_hp_gpio_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc_fixup_hp_gpio_led(codec, action, 0x04, 0x00); +} + static void alc286_fixup_hp_gpio_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -4062,6 +4281,111 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec, } } +/* update mute-LED according to the speaker mute state via COEF bit */ +static void alc_fixup_mute_led_coefbit_hook(void *private_data, int enabled) +{ + struct hda_codec *codec = private_data; + struct alc_spec *spec = codec->spec; + + if (spec->mute_led_polarity) + enabled = !enabled; + + /* temporarily power up/down for setting COEF bit */ + enabled ? alc_update_coef_idx(codec, spec->mute_led_coef_idx, + spec->mute_led_coefbit_mask, spec->mute_led_coefbit_off) : + alc_update_coef_idx(codec, spec->mute_led_coef_idx, + spec->mute_led_coefbit_mask, spec->mute_led_coefbit_on); +} + +static void alc285_fixup_hp_mute_led_coefbit(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_polarity = 0; + spec->mute_led_coef_idx = 0x0b; + spec->mute_led_coefbit_mask = 1<<3; + spec->mute_led_coefbit_on = 1<<3; + spec->mute_led_coefbit_off = 0; + spec->gen.vmaster_mute.hook = alc_fixup_mute_led_coefbit_hook; + spec->gen.vmaster_mute_enum = 1; + } +} + +static void alc236_fixup_hp_mute_led_coefbit(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_polarity = 0; + spec->mute_led_coef_idx = 0x34; + spec->mute_led_coefbit_mask = 1<<5; + spec->mute_led_coefbit_on = 0; + spec->mute_led_coefbit_off = 1<<5; + spec->gen.vmaster_mute.hook = alc_fixup_mute_led_coefbit_hook; + spec->gen.vmaster_mute_enum = 1; + } +} + +/* turn on/off mic-mute LED per capture hook by coef bit */ +static void alc_hp_cap_micmute_update(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + if (spec->gen.micmute_led.led_value) + alc_update_coef_idx(codec, spec->mic_led_coef_idx, + spec->mic_led_coefbit_mask, spec->mic_led_coefbit_on); + else + alc_update_coef_idx(codec, spec->mic_led_coef_idx, + spec->mic_led_coefbit_mask, spec->mic_led_coefbit_off); +} + +static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mic_led_coef_idx = 0x19; + spec->mic_led_coefbit_mask = 1<<13; + spec->mic_led_coefbit_on = 1<<13; + spec->mic_led_coefbit_off = 0; + snd_hda_gen_add_micmute_led(codec, alc_hp_cap_micmute_update); + } +} + +static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mic_led_coef_idx = 0x35; + spec->mic_led_coefbit_mask = 3<<2; + spec->mic_led_coefbit_on = 2<<2; + spec->mic_led_coefbit_off = 1<<2; + snd_hda_gen_add_micmute_led(codec, alc_hp_cap_micmute_update); + } +} + +static void alc285_fixup_hp_mute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc285_fixup_hp_mute_led_coefbit(codec, fix, action); + alc285_fixup_hp_coef_micmute_led(codec, fix, action); +} + +static void alc236_fixup_hp_mute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc236_fixup_hp_mute_led_coefbit(codec, fix, action); + alc236_fixup_hp_coef_micmute_led(codec, fix, action); +} + #if IS_REACHABLE(CONFIG_INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) @@ -4151,6 +4475,7 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, { struct alc_spec *spec = codec->spec; + spec->micmute_led_polarity = 1; alc_fixup_hp_gpio_led(codec, action, 0, 0x04); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->init_amp = ALC_INIT_DEFAULT; @@ -4878,7 +5203,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) case 0x10ec0274: case 0x10ec0294: alc_process_coef_fw(codec, coef0274); - msleep(80); + msleep(850); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x00f0) == 0x00f0; break; @@ -5062,6 +5387,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_callback *jack) { snd_hda_gen_hp_automute(codec, jack); + alc_update_headset_mode(codec); } static void alc_probe_headset_mode(struct hda_codec *codec) @@ -5247,18 +5573,9 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, { 0x19, 0x21a11010 }, /* dock mic */ { } }; - /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise - * the speaker output becomes too low by some reason on Thinkpads with - * ALC298 codec - */ - static hda_nid_t preferred_pairs[] = { - 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, - 0 - }; struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { - spec->gen.preferred_dacs = preferred_pairs; spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; snd_hda_apply_pincfgs(codec, pincfgs); } else if (action == HDA_FIXUP_ACT_INIT) { @@ -5271,6 +5588,23 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, } } +static void alc_fixup_tpt470_dacs(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise + * the speaker output becomes too low by some reason on Thinkpads with + * ALC298 codec + */ + static const hda_nid_t preferred_pairs[] = { + 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, + 0 + }; + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) + spec->gen.preferred_dacs = preferred_pairs; +} + static void alc_shutup_dell_xps13(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -5375,17 +5709,6 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec, } } -static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec, - const struct hda_fixup *fix, - int action) -{ - if (action != HDA_FIXUP_ACT_PRE_PROBE) - return; - - snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1); - snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP); -} - static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5523,9 +5846,9 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec, /* DAC node 0x03 is giving mono output. We therefore want to make sure 0x14 (front speaker) and 0x15 (headphones) use the stereo DAC, while leaving 0x17 (bass speaker) for node 0x03. */ - hda_nid_t conn1[2] = { 0x0c }; - snd_hda_override_conn_list(codec, 0x14, 1, conn1); - snd_hda_override_conn_list(codec, 0x15, 1, conn1); + static const hda_nid_t conn1[] = { 0x0c }; + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); + snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn1), conn1); } } @@ -5540,8 +5863,8 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec, Pin Complex), since Node 0x02 has Amp-out caps, we can adjust speaker's volume now. */ - hda_nid_t conn1[1] = { 0x0c }; - snd_hda_override_conn_list(codec, 0x17, 1, conn1); + static const hda_nid_t conn1[] = { 0x0c }; + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn1), conn1); } } @@ -5550,8 +5873,8 @@ static void alc295_fixup_disable_dac3(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { - hda_nid_t conn[2] = { 0x02, 0x03 }; - snd_hda_override_conn_list(codec, 0x17, 2, conn); + static const hda_nid_t conn[] = { 0x02, 0x03 }; + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); } } @@ -5560,8 +5883,8 @@ static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { - hda_nid_t conn[1] = { 0x02 }; - snd_hda_override_conn_list(codec, 0x17, 1, conn); + static const hda_nid_t conn[] = { 0x02 }; + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); } } @@ -5573,7 +5896,8 @@ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, snd_hda_gen_hp_automute(codec, jack); /* mute_led_polarity is set to 0, so we pass inverted value here */ - alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present); + alc_update_gpio_led(codec, 0x10, spec->mute_led_polarity, + !spec->gen.hp_jack_present); } /* Manage GPIOs for HP EliteBook Folio 9480m. @@ -5610,6 +5934,39 @@ static void alc275_fixup_gpio4_off(struct hda_codec *codec, } } +/* Quirk for Thinkpad X1 7th and 8th Gen + * The following fixed routing needed + * DAC1 (NID 0x02) -> Speaker (NID 0x14); some eq applied secretly + * DAC2 (NID 0x03) -> Bass (NID 0x17) & Headphone (NID 0x21); sharing a DAC + * DAC3 (NID 0x06) -> Unused, due to the lack of volume amp + */ +static void alc285_fixup_thinkpad_x1_gen7(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ + static const hda_nid_t preferred_pairs[] = { + 0x14, 0x02, 0x17, 0x03, 0x21, 0x03, 0 + }; + struct alc_spec *spec = codec->spec; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + spec->gen.preferred_dacs = preferred_pairs; + break; + case HDA_FIXUP_ACT_BUILD: + /* The generic parser creates somewhat unintuitive volume ctls + * with the fixed routing above, and the shared DAC2 may be + * confusing for PA. + * Rename those to unique names so that PA doesn't touch them + * and use only Master volume. + */ + rename_ctl(codec, "Front Playback Volume", "DAC1 Playback Volume"); + rename_ctl(codec, "Bass Speaker Playback Volume", "DAC2 Playback Volume"); + break; + } +} + static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5634,12 +5991,21 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, } } +static void alc225_fixup_s3_pop_noise(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action != HDA_FIXUP_ACT_PRE_PROBE) + return; + + codec->power_save_node = 1; +} + /* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */ static void alc274_fixup_bind_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; - static hda_nid_t preferred_pairs[] = { + static const hda_nid_t preferred_pairs[] = { 0x21, 0x03, 0x1b, 0x03, 0x16, 0x02, 0 }; @@ -5652,6 +6018,21 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec, codec->power_save_node = 0; } +/* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */ +static void alc289_fixup_asus_ga401(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const hda_nid_t preferred_pairs[] = { + 0x14, 0x02, 0x17, 0x02, 0x21, 0x03, 0 + }; + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->gen.preferred_dacs = preferred_pairs; + spec->gen.obey_preferred_dacs = 1; + } +} + /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5662,65 +6043,23 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec, snd_hda_override_wcaps(codec, 0x03, 0); } -static const struct hda_jack_keymap alc_headset_btn_keymap[] = { - { SND_JACK_BTN_0, KEY_PLAYPAUSE }, - { SND_JACK_BTN_1, KEY_VOICECOMMAND }, - { SND_JACK_BTN_2, KEY_VOLUMEUP }, - { SND_JACK_BTN_3, KEY_VOLUMEDOWN }, - {} -}; - -static void alc_headset_btn_callback(struct hda_codec *codec, - struct hda_jack_callback *jack) +static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec) { - int report = 0; - - if (jack->unsol_res & (7 << 13)) - report |= SND_JACK_BTN_0; - - if (jack->unsol_res & (1 << 16 | 3 << 8)) - report |= SND_JACK_BTN_1; - - /* Volume up key */ - if (jack->unsol_res & (7 << 23)) - report |= SND_JACK_BTN_2; - - /* Volume down key */ - if (jack->unsol_res & (7 << 10)) - report |= SND_JACK_BTN_3; - - jack->jack->button_state = report; -} - -static void alc_fixup_headset_jack(struct hda_codec *codec, - const struct hda_fixup *fix, int action) -{ - - switch (action) { - case HDA_FIXUP_ACT_PRE_PROBE: - snd_hda_jack_detect_enable_callback(codec, 0x55, - alc_headset_btn_callback); - snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false, - SND_JACK_HEADSET, alc_headset_btn_keymap); + switch (codec->core.vendor_id) { + case 0x10ec0274: + case 0x10ec0294: + case 0x10ec0225: + case 0x10ec0295: + case 0x10ec0299: + alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */ + alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15); break; - case HDA_FIXUP_ACT_INIT: - switch (codec->core.vendor_id) { - case 0x10ec0215: - case 0x10ec0225: - case 0x10ec0285: - case 0x10ec0295: - case 0x10ec0289: - case 0x10ec0299: - alc_write_coef_idx(codec, 0x48, 0xd011); - alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); - alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8); - break; - case 0x10ec0236: - case 0x10ec0256: - alc_write_coef_idx(codec, 0x48, 0xd011); - alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); - break; - } + case 0x10ec0235: + case 0x10ec0236: + case 0x10ec0255: + case 0x10ec0256: + alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ + alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); break; } } @@ -5735,16 +6074,7 @@ static void alc295_fixup_chromebook(struct hda_codec *codec, spec->ultra_low_power = true; break; case HDA_FIXUP_ACT_INIT: - switch (codec->core.vendor_id) { - case 0x10ec0295: - alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */ - alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15); - break; - case 0x10ec0236: - alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ - alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); - break; - } + alc_combo_jack_hp_jd_restart(codec); break; } } @@ -5756,6 +6086,60 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec, snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); } + +static void alc294_gx502_toggle_output(struct hda_codec *codec, + struct hda_jack_callback *cb) +{ + /* The Windows driver sets the codec up in a very different way where + * it appears to leave 0x10 = 0x8a20 set. For Linux we need to toggle it + */ + if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT) + alc_write_coef_idx(codec, 0x10, 0x8a20); + else + alc_write_coef_idx(codec, 0x10, 0x0a20); +} + +static void alc294_fixup_gx502_hp(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + /* Pin 0x21: headphones/headset mic */ + if (!is_jack_detectable(codec, 0x21)) + return; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_jack_detect_enable_callback(codec, 0x21, + alc294_gx502_toggle_output); + break; + case HDA_FIXUP_ACT_INIT: + /* Make sure to start in a correct state, i.e. if + * headphones have been plugged in before powering up the system + */ + alc294_gx502_toggle_output(codec, NULL); + break; + } +} + +static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action != HDA_FIXUP_ACT_INIT) + return; + + msleep(100); + alc_write_coef_idx(codec, 0x65, 0x0); +} + +static void alc274_fixup_hp_headset_mic(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + switch (action) { + case HDA_FIXUP_ACT_INIT: + alc_combo_jack_hp_jd_restart(codec); + break; + } +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -5770,6 +6154,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, #include "hp_x360_helper.c" enum { + ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, ALC275_FIXUP_SONY_VAIO_GPIO2, ALC269_FIXUP_DELL_M101Z, @@ -5801,6 +6186,7 @@ enum { ALC269_FIXUP_HP_LINE1_MIC1_LED, ALC269_FIXUP_INV_DMIC, ALC269_FIXUP_LENOVO_DOCK, + ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, ALC269_FIXUP_NO_SHUTUP, ALC286_FIXUP_SONY_MIC_NO_PRESENCE, ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, @@ -5845,6 +6231,7 @@ enum { ALC283_FIXUP_HEADSET_MIC, ALC255_FIXUP_MIC_MUTE_LED, ALC282_FIXUP_ASPIRE_V5_PINS, + ALC269VB_FIXUP_ASPIRE_E1_COEF, ALC280_FIXUP_HP_GPIO4, ALC286_FIXUP_HP_GPIO_LED, ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, @@ -5863,8 +6250,6 @@ enum { ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, ALC275_FIXUP_DELL_XPS, - ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, - ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2, ALC293_FIXUP_LENOVO_SPK_NOISE, ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, ALC255_FIXUP_DELL_SPK_NOISE, @@ -5876,6 +6261,7 @@ enum { ALC221_FIXUP_HP_FRONT_MIC, ALC292_FIXUP_TPT460, ALC298_FIXUP_SPK_VOLUME, + ALC298_FIXUP_LENOVO_SPK_VOLUME, ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER, ALC269_FIXUP_ATIV_BOOK_8, ALC221_FIXUP_HP_MIC_NO_PRESENCE, @@ -5888,9 +6274,11 @@ enum { ALC233_FIXUP_ACER_HEADSET_MIC, ALC294_FIXUP_LENOVO_MIC_LOCATION, ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, + ALC225_FIXUP_S3_POP_NOISE, ALC700_FIXUP_INTEL_REFERENCE, ALC274_FIXUP_DELL_BIND_DACS, ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, + ALC298_FIXUP_TPT470_DOCK_FIX, ALC298_FIXUP_TPT470_DOCK, ALC255_FIXUP_DUMMY_LINEOUT_VERB, ALC255_FIXUP_DELL_HEADSET_MIC, @@ -5921,11 +6309,46 @@ enum { ALC289_FIXUP_DUAL_SPK, ALC294_FIXUP_SPK2_TO_DAC1, ALC294_FIXUP_ASUS_DUAL_SPK, + ALC285_FIXUP_THINKPAD_X1_GEN7, ALC285_FIXUP_THINKPAD_HEADSET_JACK, ALC294_FIXUP_ASUS_HPE, + ALC294_FIXUP_ASUS_COEF_1B, + ALC294_FIXUP_ASUS_GX502_HP, + ALC294_FIXUP_ASUS_GX502_PINS, + ALC294_FIXUP_ASUS_GX502_VERBS, + ALC285_FIXUP_HP_GPIO_LED, + ALC285_FIXUP_HP_MUTE_LED, + ALC236_FIXUP_HP_MUTE_LED, + ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, + ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, + ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, + ALC269VC_FIXUP_ACER_HEADSET_MIC, + ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE, + ALC289_FIXUP_ASUS_GA401, + ALC289_FIXUP_ASUS_GA502, + ALC256_FIXUP_ACER_MIC_NO_PRESENCE, + ALC285_FIXUP_HP_GPIO_AMP_INIT, + ALC269_FIXUP_CZC_B20, + ALC269_FIXUP_CZC_TMI, + ALC269_FIXUP_CZC_L101, + ALC269_FIXUP_LEMOTE_A1802, + ALC269_FIXUP_LEMOTE_A190X, + ALC256_FIXUP_INTEL_NUC8_RUGGED, + ALC233_FIXUP_INTEL_NUC8_DMIC, + ALC233_FIXUP_INTEL_NUC8_BOOST, + ALC256_FIXUP_INTEL_NUC10, + ALC255_FIXUP_XIAOMI_HEADSET_MIC, + ALC274_FIXUP_HP_MIC, + ALC274_FIXUP_HP_HEADSET_MIC, + ALC256_FIXUP_ASUS_HPE, + ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, }; static const struct hda_fixup alc269_fixups[] = { + [ALC269_FIXUP_GPIO2] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_gpio2, + }, [ALC269_FIXUP_SONY_VAIO] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { @@ -6120,6 +6543,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT }, + [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC269_FIXUP_LENOVO_DOCK, + }, [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_no_hp_to_lineout, @@ -6475,6 +6904,10 @@ static const struct hda_fixup alc269_fixups[] = { { }, }, }, + [ALC269VB_FIXUP_ASPIRE_E1_COEF] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269vb_fixup_aspire_e1_coef, + }, [ALC280_FIXUP_HP_GPIO4] = { .type = HDA_FIXUP_FUNC, .v.func = alc280_fixup_hp_gpio4, @@ -6604,23 +7037,6 @@ static const struct hda_fixup alc269_fixups[] = { {} } }, - [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = { - .type = HDA_FIXUP_VERBS, - .v.verbs = (const struct hda_verb[]) { - /* Disable pass-through path for FRONT 14h */ - {0x20, AC_VERB_SET_COEF_INDEX, 0x36}, - {0x20, AC_VERB_SET_PROC_COEF, 0x1737}, - {} - }, - .chained = true, - .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE - }, - [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = { - .type = HDA_FIXUP_FUNC, - .v.func = alc256_fixup_dell_xps_13_headphone_noise2, - .chained = true, - .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE - }, [ALC293_FIXUP_LENOVO_SPK_NOISE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, @@ -6631,6 +7047,16 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_fixup_lenovo_line2_mic_hotkey, }, + [ALC233_FIXUP_INTEL_NUC8_DMIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_inv_dmic, + .chained = true, + .chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST, + }, + [ALC233_FIXUP_INTEL_NUC8_BOOST] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost + }, [ALC255_FIXUP_DELL_SPK_NOISE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, @@ -6679,6 +7105,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, }, + [ALC298_FIXUP_LENOVO_SPK_VOLUME] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc298_fixup_speaker_volume, + }, [ALC295_FIXUP_DISABLE_DAC3] = { .type = HDA_FIXUP_FUNC, .v.func = alc295_fixup_disable_dac3, @@ -6756,6 +7186,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC233_FIXUP_LENOVO_MULTI_CODECS] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_alc662_fixup_lenovo_dual_codecs, + .chained = true, + .chain_id = ALC269_FIXUP_GPIO2 }, [ALC233_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_VERBS, @@ -6789,6 +7221,12 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, + .chain_id = ALC225_FIXUP_S3_POP_NOISE + }, + [ALC225_FIXUP_S3_POP_NOISE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc225_fixup_s3_pop_noise, + .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC700_FIXUP_INTEL_REFERENCE] = { @@ -6821,12 +7259,18 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC274_FIXUP_DELL_BIND_DACS }, - [ALC298_FIXUP_TPT470_DOCK] = { + [ALC298_FIXUP_TPT470_DOCK_FIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_tpt470_dock, .chained = true, .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE }, + [ALC298_FIXUP_TPT470_DOCK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_tpt470_dacs, + .chained = true, + .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX + }, [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -6887,7 +7331,7 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, @@ -6896,7 +7340,7 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_SPK] = { .type = HDA_FIXUP_VERBS, @@ -6904,6 +7348,8 @@ static const struct hda_fixup alc269_fixups[] = { /* Set EAPD high */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 }, + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 }, { } }, .chained = true, @@ -7044,11 +7490,17 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 }, + [ALC285_FIXUP_THINKPAD_X1_GEN7] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_thinkpad_x1_gen7, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI + }, [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_jack, .chained = true, - .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1 + .chain_id = ALC285_FIXUP_THINKPAD_X1_GEN7 }, [ALC294_FIXUP_ASUS_HPE] = { .type = HDA_FIXUP_VERBS, @@ -7061,6 +7513,271 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC }, + [ALC294_FIXUP_ASUS_GX502_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, /* front HP mic */ + { 0x1a, 0x01a11830 }, /* rear external mic */ + { 0x21, 0x03211020 }, /* front HP out */ + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_GX502_VERBS + }, + [ALC294_FIXUP_ASUS_GX502_VERBS] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* set 0x15 to HP-OUT ctrl */ + { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 }, + /* unmute the 0x15 amp */ + { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 }, + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_GX502_HP + }, + [ALC294_FIXUP_ASUS_GX502_HP] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc294_fixup_gx502_hp, + }, + [ALC294_FIXUP_ASUS_COEF_1B] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* Set bit 10 to correct noisy output after reboot from + * Windows 10 (due to pop noise reduction?) + */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x1b }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b }, + { } + }, + }, + [ALC285_FIXUP_HP_GPIO_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_led, + }, + [ALC285_FIXUP_HP_MUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_mute_led, + }, + [ALC236_FIXUP_HP_MUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc236_fixup_hp_mute_led, + }, + [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc5 }, + { } + }, + }, + [ALC295_FIXUP_ASUS_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE + }, + [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90100120 }, /* use as internal speaker */ + { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */ + { 0x1a, 0x01011020 }, /* use as line out */ + { }, + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC269VC_FIXUP_ACER_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x02a11030 }, /* use as headset mic */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC289_FIXUP_ASUS_GA401] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc289_fixup_asus_ga401, + .chained = true, + .chain_id = ALC289_FIXUP_ASUS_GA502, + }, + [ALC289_FIXUP_ASUS_GA502] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ + { } + }, + }, + [ALC256_FIXUP_ACER_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x02a11120 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE + }, + [ALC285_FIXUP_HP_GPIO_AMP_INIT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_amp_init, + .chained = true, + .chain_id = ALC285_FIXUP_HP_GPIO_LED + }, + [ALC269_FIXUP_CZC_B20] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x411111f0 }, + { 0x14, 0x90170110 }, /* speaker */ + { 0x15, 0x032f1020 }, /* HP out */ + { 0x17, 0x411111f0 }, + { 0x18, 0x03ab1040 }, /* mic */ + { 0x19, 0xb7a7013f }, + { 0x1a, 0x0181305f }, + { 0x1b, 0x411111f0 }, + { 0x1d, 0x411111f0 }, + { 0x1e, 0x411111f0 }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_CZC_TMI] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x4000c000 }, + { 0x14, 0x90170110 }, /* speaker */ + { 0x15, 0x0421401f }, /* HP out */ + { 0x17, 0x411111f0 }, + { 0x18, 0x04a19020 }, /* mic */ + { 0x19, 0x411111f0 }, + { 0x1a, 0x411111f0 }, + { 0x1b, 0x411111f0 }, + { 0x1d, 0x40448505 }, + { 0x1e, 0x411111f0 }, + { 0x20, 0x8000ffff }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_CZC_L101] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x40000000 }, + { 0x14, 0x01014010 }, /* speaker */ + { 0x15, 0x411111f0 }, /* HP out */ + { 0x16, 0x411111f0 }, + { 0x18, 0x01a19020 }, /* mic */ + { 0x19, 0x02a19021 }, + { 0x1a, 0x0181302f }, + { 0x1b, 0x0221401f }, + { 0x1c, 0x411111f0 }, + { 0x1d, 0x4044c601 }, + { 0x1e, 0x411111f0 }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_LEMOTE_A1802] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x40000000 }, + { 0x14, 0x90170110 }, /* speaker */ + { 0x17, 0x411111f0 }, + { 0x18, 0x03a19040 }, /* mic1 */ + { 0x19, 0x90a70130 }, /* mic2 */ + { 0x1a, 0x411111f0 }, + { 0x1b, 0x411111f0 }, + { 0x1d, 0x40489d2d }, + { 0x1e, 0x411111f0 }, + { 0x20, 0x0003ffff }, + { 0x21, 0x03214020 }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_LEMOTE_A190X] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x99130110 }, /* speaker */ + { 0x15, 0x0121401f }, /* HP out */ + { 0x18, 0x01a19c20 }, /* rear mic */ + { 0x19, 0x99a3092f }, /* front mic */ + { 0x1b, 0x0201401f }, /* front lineout */ + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC256_FIXUP_INTEL_NUC8_RUGGED] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1b, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE + }, + [ALC256_FIXUP_INTEL_NUC10] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE + }, + [ALC255_FIXUP_XIAOMI_HEADSET_MIC] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, + { } + }, + .chained = true, + .chain_id = ALC289_FIXUP_ASUS_GA502 + }, + [ALC274_FIXUP_HP_MIC] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, + { } + }, + }, + [ALC274_FIXUP_HP_HEADSET_MIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc274_fixup_hp_headset_mic, + .chained = true, + .chain_id = ALC274_FIXUP_HP_MIC + }, + [ALC256_FIXUP_ASUS_HPE] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* Set EAPD high */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x7778 }, + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC + }, + [ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_headset_jack, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7069,23 +7786,31 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC), - SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), + SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF), + SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x1025, 0x1167, "Acer Veriton N6640G", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK), + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), @@ -7114,17 +7839,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), - SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP), - SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), - SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), + SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), @@ -7133,8 +7855,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), - SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -7142,35 +7864,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), - SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY), - /* ALC282 */ SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), + SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS), - SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS), - SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M), - SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - /* ALC290 */ - SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), @@ -7178,36 +7883,59 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY), SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS), SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS), SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M), + SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), - SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC), SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360), + SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -7216,62 +7944,125 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B), SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), - SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), + SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), - SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), + SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), + SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x1403, "Clevo N140CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x1404, "Clevo N150CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x40a1, "Clevo NL40GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x40c1, "Clevo NL40[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x40d1, "Clevo NL41DU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x50a3, "Clevo NJ51GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x50b3, "Clevo NK50S[BEZ]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x50b6, "Clevo NK50S5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8535, "Clevo NH50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8536, "Clevo NH79D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC), + SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), + SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), - SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), - SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST), + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), @@ -7297,8 +8088,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), - SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), - SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), @@ -7309,9 +8102,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), @@ -7330,12 +8125,21 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), + SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ + SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), + SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), + SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), - SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), + SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10), #if 0 /* Below is a quirk table taken from the old code. @@ -7407,6 +8211,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"}, {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, + {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"}, {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, @@ -7418,6 +8223,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, {.id = ALC292_FIXUP_TPT460, .name = "tpt460"}, + {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"}, {.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"}, {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, {.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"}, @@ -7463,6 +8269,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"}, {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"}, {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"}, + {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"}, {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"}, {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"}, @@ -7477,7 +8284,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"}, {.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"}, {.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"}, - {.id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, .name = "alc256-dell-xps13"}, {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, @@ -7506,6 +8312,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"}, + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"}, + {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"}, {} }; #define ALC225_STANDARD_PINS \ @@ -7734,6 +8543,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x1a, 0x90a70130}, {0x1b, 0x90170110}, {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0274, 0x103c, "HP", ALC274_FIXUP_HP_HEADSET_MIC, + {0x17, 0x90170110}, + {0x19, 0x03a11030}, + {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, {0x12, 0x90a60130}, {0x14, 0x90170110}, @@ -7789,6 +8602,20 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x14, 0x90170110}, {0x19, 0x04a11040}, {0x21, 0x04211020}), + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, + {0x14, 0x90170110}, + {0x19, 0x04a11040}, + {0x1d, 0x40600001}, + {0x21, 0x04211020}), + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, + {0x14, 0x90170110}, + {0x19, 0x04a11040}, + {0x21, 0x04211020}), + SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_HEADSET_JACK, + {0x14, 0x90170110}, + {0x17, 0x90170111}, + {0x19, 0x03a11030}, + {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, {0x12, 0x90a60130}, {0x17, 0x90170110}, @@ -7852,6 +8679,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x13, 0x90a60140}), + SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_HPE, + {0x17, 0x90170110}, + {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC, {0x14, 0x90170110}, {0x1b, 0x90a70130}, @@ -7868,6 +8698,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, + {0x12, 0x90a60120}, + {0x17, 0x90170110}, + {0x21, 0x04211030}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, + {0x12, 0x90a60130}, + {0x17, 0x90170110}, + {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, + {0x12, 0x90a60130}, + {0x17, 0x90170110}, + {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x21, 0x04211020}), @@ -7908,6 +8750,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x17, 0x90170110}), + SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC, + {0x14, 0x01014010}, + {0x17, 0x90170120}, + {0x18, 0x02a11030}, + {0x19, 0x02a1103f}, + {0x21, 0x0221101f}), {} }; @@ -8072,7 +8920,9 @@ static int patch_alc269(struct hda_codec *codec) spec->gen.mixer_nid = 0; break; case 0x10ec0215: + case 0x10ec0245: case 0x10ec0285: + case 0x10ec0287: case 0x10ec0289: spec->codec_variant = ALC269_TYPE_ALC215; spec->shutup = alc225_shutup; @@ -8080,8 +8930,6 @@ static int patch_alc269(struct hda_codec *codec) spec->gen.mixer_nid = 0; break; case 0x10ec0225: - codec->power_save_node = 1; - /* fall through */ case 0x10ec0295: case 0x10ec0299: spec->codec_variant = ALC269_TYPE_ALC225; @@ -8242,8 +9090,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP), SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F), SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT), - SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F), - SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F), + SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F), SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505), {} }; @@ -8601,6 +9448,7 @@ enum { ALC662_FIXUP_LED_GPIO1, ALC662_FIXUP_IDEAPAD, ALC272_FIXUP_MARIO, + ALC662_FIXUP_CZC_ET26, ALC662_FIXUP_CZC_P10T, ALC662_FIXUP_SKU_IGNORE, ALC662_FIXUP_HP_RP5800, @@ -8670,6 +9518,25 @@ static const struct hda_fixup alc662_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc272_fixup_mario, }, + [ALC662_FIXUP_CZC_ET26] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + {0x12, 0x403cc000}, + {0x14, 0x90170110}, /* speaker */ + {0x15, 0x411111f0}, + {0x16, 0x411111f0}, + {0x18, 0x01a19030}, /* mic */ + {0x19, 0x90a7013f}, /* int-mic */ + {0x1a, 0x01014020}, + {0x1b, 0x0121401f}, + {0x1c, 0x411111f0}, + {0x1d, 0x411111f0}, + {0x1e, 0x40478e35}, + {} + }, + .chained = true, + .chain_id = ALC662_FIXUP_SKU_IGNORE + }, [ALC662_FIXUP_CZC_P10T] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -9018,6 +9885,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), + SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS), SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE), SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE), SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), @@ -9031,11 +9899,12 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), + SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), - SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A), SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50), SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751), + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16), SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51), @@ -9053,8 +9922,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON), + SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26), SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), - SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS), #if 0 /* Below is a quirk table taken from the old code. @@ -9333,6 +10202,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0245, "ALC245", patch_alc269), HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269), @@ -9352,6 +10222,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269), HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269), HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269), HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269), HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269), HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269), @@ -9393,6 +10264,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882), HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882), HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662), + HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662), HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882), HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882), HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 894f3f509e76..bfd3fe5eff31 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -795,7 +795,7 @@ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity) static bool has_builtin_speaker(struct hda_codec *codec) { struct sigmatel_spec *spec = codec->spec; - hda_nid_t *nid_pin; + const hda_nid_t *nid_pin; int nids, i; if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) { @@ -832,7 +832,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec, static struct snd_kcontrol_new beep_vol_ctl = HDA_CODEC_VOLUME(NULL, 0, 0, 0); - /* check for mute support for the the amp */ + /* check for mute support for the amp */ if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) { const struct snd_kcontrol_new *temp; if (spec->anabeep_nid == nid) @@ -2182,7 +2182,7 @@ static void hp_envy_ts_fixup_dac_bind(struct hda_codec *codec, int action) { struct sigmatel_spec *spec = codec->spec; - static hda_nid_t preferred_pairs[] = { + static const hda_nid_t preferred_pairs[] = { 0xd, 0x13, 0 }; diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 29dcdb8b36db..a5c1a2c4eae4 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -113,6 +113,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec) spec->codec_type = VT1708S; spec->gen.indep_hp = 1; spec->gen.keep_eapd_on = 1; + spec->gen.dac_min_mute = 1; spec->gen.pcm_playback_hook = via_playback_pcm_hook; spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO; codec->power_save_node = 1; @@ -396,7 +397,7 @@ static int via_resume(struct hda_codec *codec) /* some delay here to make jack detection working (bko#98921) */ msleep(10); codec->patch_ops.init(codec); - regcache_sync(codec->core.regmap); + snd_hda_regmap_sync(codec); return 0; } #endif @@ -1002,6 +1003,7 @@ static const struct hda_verb vt1802_init_verbs[] = { enum { VIA_FIXUP_INTMIC_BOOST, VIA_FIXUP_ASUS_G75, + VIA_FIXUP_POWER_SAVE, }; static void via_fixup_intmic_boost(struct hda_codec *codec, @@ -1011,6 +1013,13 @@ static void via_fixup_intmic_boost(struct hda_codec *codec, override_mic_boost(codec, 0x30, 0, 2, 40); } +static void via_fixup_power_save(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) + codec->power_save_node = 0; +} + static const struct hda_fixup via_fixups[] = { [VIA_FIXUP_INTMIC_BOOST] = { .type = HDA_FIXUP_FUNC, @@ -1025,11 +1034,16 @@ static const struct hda_fixup via_fixups[] = { { } } }, + [VIA_FIXUP_POWER_SAVE] = { + .type = HDA_FIXUP_FUNC, + .v.func = via_fixup_power_save, + }, }; static const struct snd_pci_quirk vt2002p_fixups[] = { SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE), {} }; @@ -1038,8 +1052,8 @@ static const struct snd_pci_quirk vt2002p_fixups[] = { */ static void fix_vt1802_connections(struct hda_codec *codec) { - static hda_nid_t conn_24[] = { 0x14, 0x1c }; - static hda_nid_t conn_33[] = { 0x1c }; + static const hda_nid_t conn_24[] = { 0x14, 0x1c }; + static const hda_nid_t conn_33[] = { 0x1c }; snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24); snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33); diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c index 4b0dea7f7669..2654eebd5663 100644 --- a/sound/pci/ice1712/ice1712.c +++ b/sound/pci/ice1712/ice1712.c @@ -2360,7 +2360,8 @@ static int snd_ice1712_chip_init(struct snd_ice1712 *ice) pci_write_config_byte(ice->pci, 0x61, ice->eeprom.data[ICE_EEP1_ACLINK]); pci_write_config_byte(ice->pci, 0x62, ice->eeprom.data[ICE_EEP1_I2SID]); pci_write_config_byte(ice->pci, 0x63, ice->eeprom.data[ICE_EEP1_SPDIF]); - if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24) { + if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24 && + ice->eeprom.subvendor != ICE1712_SUBDEVICE_STAUDIO_ADCIII) { ice->gpio.write_mask = ice->eeprom.gpiomask; ice->gpio.direction = ice->eeprom.gpiodir; snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c index 98f8ac658796..243f757da3ed 100644 --- a/sound/pci/ice1712/prodigy192.c +++ b/sound/pci/ice1712/prodigy192.c @@ -32,7 +32,7 @@ * Experimentally I found out that only a combination of * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 - * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct - * sampling rate. That means the the FPGA doubles the + * sampling rate. That means that the FPGA doubles the * MCK01 rate. * * Copyright (c) 2003 Takashi Iwai <tiwai@suse.de> diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c index 9d71e9d5c9a0..3cf41c11a405 100644 --- a/sound/pci/ice1712/prodigy_hifi.c +++ b/sound/pci/ice1712/prodigy_hifi.c @@ -536,7 +536,7 @@ static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); - ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; + ucontrol->value.enumerated.item[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; mutex_unlock(&ice->gpio_mutex); return 0; } @@ -550,7 +550,7 @@ static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol, mutex_lock(&ice->gpio_mutex); oval = wm_get(ice, WM_ADC_MUX); - nval = (oval & 0xe0) | ucontrol->value.integer.value[0]; + nval = (oval & 0xe0) | ucontrol->value.enumerated.item[0]; if (nval != oval) { wm_put(ice, WM_ADC_MUX, nval); change = 1; diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c index 048a2660d18d..363b26e85af5 100644 --- a/sound/pci/mixart/mixart_core.c +++ b/sound/pci/mixart/mixart_core.c @@ -70,7 +70,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp, unsigned int i; #endif - mutex_lock(&mgr->msg_lock); err = 0; /* copy message descriptor from miXart to driver */ @@ -119,8 +118,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp, writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD)); _clean_exit: - mutex_unlock(&mgr->msg_lock); - return err; } @@ -258,7 +255,9 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int resp.data = resp_data; resp.size = max_resp_size; + mutex_lock(&mgr->msg_lock); err = get_msg(mgr, &resp, msg_frame); + mutex_unlock(&mgr->msg_lock); if( request->message_id != resp.message_id ) dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n"); diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c index c3f8721624cd..b90421a1d909 100644 --- a/sound/pci/oxygen/xonar_dg.c +++ b/sound/pci/oxygen/xonar_dg.c @@ -29,7 +29,7 @@ * GPIO 4 <- headphone detect * GPIO 5 -> enable ADC analog circuit for the left channel * GPIO 6 -> enable ADC analog circuit for the right channel - * GPIO 7 -> switch green rear output jack between CS4245 and and the first + * GPIO 7 -> switch green rear output jack between CS4245 and the first * channel of CS4361 (mechanical relay) * GPIO 8 -> enable output to speakers * diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c index f4ee6798154a..1612ec65aaf6 100644 --- a/sound/soc/amd/acp-da7219-max98357a.c +++ b/sound/soc/amd/acp-da7219-max98357a.c @@ -73,8 +73,13 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) return ret; } - da7219_dai_wclk = clk_get(component->dev, "da7219-dai-wclk"); - da7219_dai_bclk = clk_get(component->dev, "da7219-dai-bclk"); + da7219_dai_wclk = devm_clk_get(component->dev, "da7219-dai-wclk"); + if (IS_ERR(da7219_dai_wclk)) + return PTR_ERR(da7219_dai_wclk); + + da7219_dai_bclk = devm_clk_get(component->dev, "da7219-dai-bclk"); + if (IS_ERR(da7219_dai_bclk)) + return PTR_ERR(da7219_dai_bclk); ret = snd_soc_card_jack_new(card, "Headset Jack", SND_JACK_HEADSET | SND_JACK_LINEOUT | diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c index 71562154c0b1..eca5fc559d69 100644 --- a/sound/soc/codecs/ak4458.c +++ b/sound/soc/codecs/ak4458.c @@ -523,18 +523,10 @@ static struct snd_soc_dai_driver ak4497_dai = { .ops = &ak4458_dai_ops, }; -static void ak4458_power_off(struct ak4458_priv *ak4458) +static void ak4458_reset(struct ak4458_priv *ak4458, bool active) { if (ak4458->reset_gpiod) { - gpiod_set_value_cansleep(ak4458->reset_gpiod, 0); - usleep_range(1000, 2000); - } -} - -static void ak4458_power_on(struct ak4458_priv *ak4458) -{ - if (ak4458->reset_gpiod) { - gpiod_set_value_cansleep(ak4458->reset_gpiod, 1); + gpiod_set_value_cansleep(ak4458->reset_gpiod, active); usleep_range(1000, 2000); } } @@ -548,7 +540,7 @@ static int ak4458_init(struct snd_soc_component *component) if (ak4458->mute_gpiod) gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); - ak4458_power_on(ak4458); + ak4458_reset(ak4458, false); ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1, 0x80, 0x80); /* ACKS bit = 1; 10000000 */ @@ -571,7 +563,7 @@ static void ak4458_remove(struct snd_soc_component *component) { struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component); - ak4458_power_off(ak4458); + ak4458_reset(ak4458, true); } #ifdef CONFIG_PM @@ -581,7 +573,7 @@ static int __maybe_unused ak4458_runtime_suspend(struct device *dev) regcache_cache_only(ak4458->regmap, true); - ak4458_power_off(ak4458); + ak4458_reset(ak4458, true); if (ak4458->mute_gpiod) gpiod_set_value_cansleep(ak4458->mute_gpiod, 0); @@ -596,8 +588,8 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev) if (ak4458->mute_gpiod) gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); - ak4458_power_off(ak4458); - ak4458_power_on(ak4458); + ak4458_reset(ak4458, true); + ak4458_reset(ak4458, false); regcache_cache_only(ak4458->regmap, false); regcache_mark_dirty(ak4458->regmap); @@ -715,6 +707,7 @@ static const struct of_device_id ak4458_of_match[] = { { .compatible = "asahi-kasei,ak4497", .data = &ak4497_drvdata}, { }, }; +MODULE_DEVICE_TABLE(of, ak4458_of_match); static struct i2c_driver ak4458_i2c_driver = { .driver = { diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c index 8179512129d3..e98312a6b840 100644 --- a/sound/soc/codecs/ak5558.c +++ b/sound/soc/codecs/ak5558.c @@ -264,7 +264,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558) if (!ak5558->reset_gpiod) return; - gpiod_set_value_cansleep(ak5558->reset_gpiod, 0); + gpiod_set_value_cansleep(ak5558->reset_gpiod, 1); usleep_range(1000, 2000); } @@ -273,7 +273,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558) if (!ak5558->reset_gpiod) return; - gpiod_set_value_cansleep(ak5558->reset_gpiod, 1); + gpiod_set_value_cansleep(ak5558->reset_gpiod, 0); usleep_range(1000, 2000); } @@ -389,6 +389,7 @@ static const struct of_device_id ak5558_i2c_dt_ids[] = { { .compatible = "asahi-kasei,ak5558"}, { } }; +MODULE_DEVICE_TABLE(of, ak5558_i2c_dt_ids); static struct i2c_driver ak5558_i2c_driver = { .driver = { diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c index d7f05b384f1f..1902689c5ea2 100644 --- a/sound/soc/codecs/cpcap.c +++ b/sound/soc/codecs/cpcap.c @@ -1263,12 +1263,12 @@ static int cpcap_voice_hw_params(struct snd_pcm_substream *substream, if (direction == SNDRV_PCM_STREAM_CAPTURE) { mask = 0x0000; - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT0; - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT1; - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT2; - mask |= CPCAP_BIT_MIC2_TIMESLOT0; - mask |= CPCAP_BIT_MIC2_TIMESLOT1; - mask |= CPCAP_BIT_MIC2_TIMESLOT2; + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0); + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT1); + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT2); + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT0); + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT1); + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT2); val = 0x0000; if (channels >= 2) val = BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0); diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c index 5125bb9b37b5..dcd2acb2c3ce 100644 --- a/sound/soc/codecs/cs42l42.c +++ b/sound/soc/codecs/cs42l42.c @@ -401,7 +401,7 @@ static const struct regmap_config cs42l42_regmap = { }; static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false); -static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false); +static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true); static const char * const cs42l42_hpf_freq_text[] = { "1.86Hz", "120Hz", "235Hz", "466Hz" @@ -458,7 +458,7 @@ static const struct snd_kcontrol_new cs42l42_snd_controls[] = { CS42L42_DAC_HPF_EN_SHIFT, true, false), SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL, CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT, - 0x3e, 1, mixer_tlv) + 0x3f, 1, mixer_tlv) }; static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w, @@ -691,24 +691,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component) CS42L42_CLK_OASRC_SEL_MASK, CS42L42_CLK_OASRC_SEL_12 << CS42L42_CLK_OASRC_SEL_SHIFT); - /* channel 1 on low LRCLK, 32 bit */ - snd_soc_component_update_bits(component, - CS42L42_ASP_RX_DAI0_CH1_AP_RES, - CS42L42_ASP_RX_CH_AP_MASK | - CS42L42_ASP_RX_CH_RES_MASK, - (CS42L42_ASP_RX_CH_AP_LOW << - CS42L42_ASP_RX_CH_AP_SHIFT) | - (CS42L42_ASP_RX_CH_RES_32 << - CS42L42_ASP_RX_CH_RES_SHIFT)); - /* Channel 2 on high LRCLK, 32 bit */ - snd_soc_component_update_bits(component, - CS42L42_ASP_RX_DAI0_CH2_AP_RES, - CS42L42_ASP_RX_CH_AP_MASK | - CS42L42_ASP_RX_CH_RES_MASK, - (CS42L42_ASP_RX_CH_AP_HI << - CS42L42_ASP_RX_CH_AP_SHIFT) | - (CS42L42_ASP_RX_CH_RES_32 << - CS42L42_ASP_RX_CH_RES_SHIFT)); if (pll_ratio_table[i].mclk_src_sel == 0) { /* Pass the clock straight through */ snd_soc_component_update_bits(component, @@ -797,27 +779,23 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) /* Bitclock/frame inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: + asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT; break; case SND_SOC_DAIFMT_NB_IF: - asp_cfg_val |= CS42L42_ASP_POL_INV << - CS42L42_ASP_LCPOL_IN_SHIFT; + asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT; + asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT; break; case SND_SOC_DAIFMT_IB_NF: - asp_cfg_val |= CS42L42_ASP_POL_INV << - CS42L42_ASP_SCPOL_IN_DAC_SHIFT; break; case SND_SOC_DAIFMT_IB_IF: - asp_cfg_val |= CS42L42_ASP_POL_INV << - CS42L42_ASP_LCPOL_IN_SHIFT; - asp_cfg_val |= CS42L42_ASP_POL_INV << - CS42L42_ASP_SCPOL_IN_DAC_SHIFT; + asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT; break; } - snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, - CS42L42_ASP_MODE_MASK | - CS42L42_ASP_SCPOL_IN_DAC_MASK | - CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val); + snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK | + CS42L42_ASP_SCPOL_MASK | + CS42L42_ASP_LCPOL_MASK, + asp_cfg_val); return 0; } @@ -828,14 +806,29 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream, { struct snd_soc_component *component = dai->component; struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component); - int retval; + unsigned int width = (params_width(params) / 8) - 1; + unsigned int val = 0; cs42l42->srate = params_rate(params); - cs42l42->swidth = params_width(params); - retval = cs42l42_pll_config(component); + switch(substream->stream) { + case SNDRV_PCM_STREAM_PLAYBACK: + val |= width << CS42L42_ASP_RX_CH_RES_SHIFT; + /* channel 1 on low LRCLK */ + snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES, + CS42L42_ASP_RX_CH_AP_MASK | + CS42L42_ASP_RX_CH_RES_MASK, val); + /* Channel 2 on high LRCLK */ + val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT; + snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES, + CS42L42_ASP_RX_CH_AP_MASK | + CS42L42_ASP_RX_CH_RES_MASK, val); + break; + default: + break; + } - return retval; + return cs42l42_pll_config(component); } static int cs42l42_set_sysclk(struct snd_soc_dai *dai, @@ -900,9 +893,9 @@ static int cs42l42_digital_mute(struct snd_soc_dai *dai, int mute) return 0; } -#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \ - SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \ - SNDRV_PCM_FMTBIT_S32_LE) +#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE |\ + SNDRV_PCM_FMTBIT_S32_LE ) static const struct snd_soc_dai_ops cs42l42_ops = { @@ -1803,7 +1796,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client, dev_dbg(&i2c_client->dev, "Found reset GPIO\n"); gpiod_set_value_cansleep(cs42l42->reset_gpio, 1); } - mdelay(3); + usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2); /* Request IRQ */ ret = devm_request_threaded_irq(&i2c_client->dev, @@ -1928,6 +1921,7 @@ static int cs42l42_runtime_resume(struct device *dev) } gpiod_set_value_cansleep(cs42l42->reset_gpio, 1); + usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2); regcache_cache_only(cs42l42->regmap, false); regcache_sync(cs42l42->regmap); diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h index 9e3cc528dcff..866d7c873e3c 100644 --- a/sound/soc/codecs/cs42l42.h +++ b/sound/soc/codecs/cs42l42.h @@ -258,11 +258,12 @@ #define CS42L42_ASP_SLAVE_MODE 0x00 #define CS42L42_ASP_MODE_SHIFT 4 #define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT) -#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2 -#define CS42L42_ASP_SCPOL_IN_DAC_MASK (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT) -#define CS42L42_ASP_LCPOL_IN_SHIFT 0 -#define CS42L42_ASP_LCPOL_IN_MASK (1 << CS42L42_ASP_LCPOL_IN_SHIFT) -#define CS42L42_ASP_POL_INV 1 +#define CS42L42_ASP_SCPOL_SHIFT 2 +#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT) +#define CS42L42_ASP_SCPOL_NOR 3 +#define CS42L42_ASP_LCPOL_SHIFT 0 +#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT) +#define CS42L42_ASP_LCPOL_INV 3 #define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08) #define CS42L42_ASP_STP_SHIFT 4 @@ -739,6 +740,7 @@ #define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16) #define CS42L42_NUM_SUPPLIES 5 +#define CS42L42_BOOT_TIME_US 3000 static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = { "VA", @@ -756,7 +758,6 @@ struct cs42l42_private { struct completion pdn_done; u32 sclk; u32 srate; - u32 swidth; u8 plug_state; u8 hs_type; u8 ts_inv; diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index 55408c8fcb4e..cdd7ae90c2b5 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -247,8 +247,28 @@ static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = { &cs42l51_adcr_mux_controls), }; +static int mclk_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm); + struct cs42l51_private *cs42l51 = snd_soc_component_get_drvdata(comp); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + return clk_prepare_enable(cs42l51->mclk_handle); + case SND_SOC_DAPM_POST_PMD: + /* Delay mclk shutdown to fulfill power-down sequence requirements */ + msleep(20); + clk_disable_unprepare(cs42l51->mclk_handle); + break; + } + + return 0; +} + static const struct snd_soc_dapm_widget cs42l51_dapm_mclk_widgets[] = { - SND_SOC_DAPM_CLOCK_SUPPLY("MCLK") + SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, mclk_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route cs42l51_routes[] = { diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index ac569ab3d30f..51d7a87ab4c3 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -1248,6 +1248,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, dev_err(&i2c_client->dev, "CS42L56 Device ID (%X). Expected %X\n", devid, CS42L56_DEVID); + ret = -EINVAL; goto err_enable; } alpha_rev = reg & CS42L56_AREV_MASK; @@ -1305,7 +1306,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, ret = devm_snd_soc_register_component(&i2c_client->dev, &soc_component_dev_cs42l56, &cs42l56_dai, 1); if (ret < 0) - return ret; + goto err_enable; return 0; diff --git a/sound/soc/codecs/cx2072x.c b/sound/soc/codecs/cx2072x.c index 1c1ba7bea4d8..8ee4b2e1ff68 100644 --- a/sound/soc/codecs/cx2072x.c +++ b/sound/soc/codecs/cx2072x.c @@ -1579,7 +1579,7 @@ static struct snd_soc_dai_driver soc_codec_cx2072x_dai[] = { .id = CX2072X_DAI_DSP, .probe = cx2072x_dsp_dai_probe, .playback = { - .stream_name = "Playback", + .stream_name = "DSP Playback", .channels_min = 2, .channels_max = 2, .rates = CX2072X_RATES_DSP, @@ -1591,7 +1591,7 @@ static struct snd_soc_dai_driver soc_codec_cx2072x_dai[] = { .name = "cx2072x-aec", .id = 3, .capture = { - .stream_name = "Capture", + .stream_name = "AEC Capture", .channels_min = 2, .channels_max = 2, .rates = CX2072X_RATES_DSP, diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c index 36eef1fb3d18..b781b28de012 100644 --- a/sound/soc/codecs/es8316.c +++ b/sound/soc/codecs/es8316.c @@ -63,13 +63,8 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv, 1, 1, TLV_DB_SCALE_ITEM(0, 0, 0), 2, 2, TLV_DB_SCALE_ITEM(250, 0, 0), 3, 3, TLV_DB_SCALE_ITEM(450, 0, 0), - 4, 4, TLV_DB_SCALE_ITEM(700, 0, 0), - 5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0), - 6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0), - 7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0), - 8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0), - 9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0), - 10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0), + 4, 7, TLV_DB_SCALE_ITEM(700, 300, 0), + 8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0), ); static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv, diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 18c173e6a13b..78d5b4d31bb6 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -150,14 +150,14 @@ static struct hdac_hdmi_pcm * hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi, struct hdac_hdmi_cvt *cvt) { - struct hdac_hdmi_pcm *pcm = NULL; + struct hdac_hdmi_pcm *pcm; list_for_each_entry(pcm, &hdmi->pcm_list, head) { if (pcm->cvt == cvt) - break; + return pcm; } - return pcm; + return NULL; } static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm, diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index 45da2b51543e..6b9d326e11b0 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -2112,10 +2112,16 @@ static void max98090_pll_work(struct max98090_priv *max98090) dev_info_ratelimited(component->dev, "PLL unlocked\n"); + /* + * As the datasheet suggested, the maximum PLL lock time should be + * 7 msec. The workaround resets the codec softly by toggling SHDN + * off and on if PLL failed to lock for 10 msec. Notably, there is + * no suggested hold time for SHDN off. + */ + /* Toggle shutdown OFF then ON */ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, 0); - msleep(10); snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, M98090_SHDNN_MASK); diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index cae1def8902d..16fbc9faed90 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c @@ -410,11 +410,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w, regmap_update_bits(max98373->regmap, MAX98373_R20FF_GLOBAL_SHDN, MAX98373_GLOBAL_EN_MASK, 1); + usleep_range(30000, 31000); break; case SND_SOC_DAPM_POST_PMD: regmap_update_bits(max98373->regmap, MAX98373_R20FF_GLOBAL_SHDN, MAX98373_GLOBAL_EN_MASK, 0); + usleep_range(30000, 31000); max98373->tdm_mode = false; break; default: @@ -850,8 +852,8 @@ static int max98373_resume(struct device *dev) { struct max98373_priv *max98373 = dev_get_drvdata(dev); - max98373_reset(max98373, dev); regcache_cache_only(max98373->regmap, false); + max98373_reset(max98373, dev); regcache_sync(max98373->regmap); return 0; } diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c index 8600c5439e1e..2e4aa23b5a60 100644 --- a/sound/soc/codecs/max9867.c +++ b/sound/soc/codecs/max9867.c @@ -46,13 +46,13 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_micboost_tlv, static const struct snd_kcontrol_new max9867_snd_controls[] = { SOC_DOUBLE_R_TLV("Master Playback Volume", MAX9867_LEFTVOL, - MAX9867_RIGHTVOL, 0, 41, 1, max9867_master_tlv), + MAX9867_RIGHTVOL, 0, 40, 1, max9867_master_tlv), SOC_DOUBLE_R_TLV("Line Capture Volume", MAX9867_LEFTLINELVL, MAX9867_RIGHTLINELVL, 0, 15, 1, max9867_line_tlv), SOC_DOUBLE_R_TLV("Mic Capture Volume", MAX9867_LEFTMICGAIN, MAX9867_RIGHTMICGAIN, 0, 20, 1, max9867_mic_tlv), SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", MAX9867_LEFTMICGAIN, - MAX9867_RIGHTMICGAIN, 5, 4, 0, max9867_micboost_tlv), + MAX9867_RIGHTMICGAIN, 5, 3, 0, max9867_micboost_tlv), SOC_SINGLE("Digital Sidetone Volume", MAX9867_SIDETONE, 0, 31, 1), SOC_SINGLE_TLV("Digital Playback Volume", MAX9867_DACLEVEL, 0, 15, 1, max9867_dac_tlv), diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 84289ebeae87..337bddb7c2a4 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -19,8 +19,8 @@ #define CDC_D_REVISION1 (0xf000) #define CDC_D_PERPH_SUBTYPE (0xf005) -#define CDC_D_INT_EN_SET (0x015) -#define CDC_D_INT_EN_CLR (0x016) +#define CDC_D_INT_EN_SET (0xf015) +#define CDC_D_INT_EN_CLR (0xf016) #define MBHC_SWITCH_INT BIT(7) #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6) #define MBHC_BUTTON_PRESS_DET BIT(5) diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c index 88b75695fbf7..b37e5fbbd301 100644 --- a/sound/soc/codecs/pcm3168a.c +++ b/sound/soc/codecs/pcm3168a.c @@ -302,6 +302,13 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai, struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(dai->component); int ret; + /* + * Some sound card sets 0 Hz as reset, + * but it is impossible to set. Ignore it here + */ + if (freq == 0) + return 0; + if (freq > PCM3168A_MAX_SYSCLK) return -EINVAL; diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index 747ca248bf10..3bc63fbcb188 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -339,9 +339,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg) } static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); -static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); +static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); -static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); +static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 19662ee330d6..c83f7f5da96b 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3625,6 +3625,12 @@ static const struct rt5645_platform_data asus_t100ha_platform_data = { .inv_jd1_1 = true, }; +static const struct rt5645_platform_data asus_t101ha_platform_data = { + .dmic1_data_pin = RT5645_DMIC_DATA_IN2N, + .dmic2_data_pin = RT5645_DMIC2_DISABLE, + .jd_mode = 3, +}; + static const struct rt5645_platform_data lenovo_ideapad_miix_310_pdata = { .jd_mode = 3, .in2_diff = true, @@ -3702,6 +3708,14 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&asus_t100ha_platform_data, }, + { + .ident = "ASUS T101HA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"), + }, + .driver_data = (void *)&asus_t101ha_platform_data, + }, { .ident = "MINIX Z83-4", .matches = { diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c index c506c9305043..829cf552fe3e 100644 --- a/sound/soc/codecs/rt5651.c +++ b/sound/soc/codecs/rt5651.c @@ -285,9 +285,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg) } static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); -static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); +static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); -static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); +static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c index e66d08398f74..afd61599d94c 100644 --- a/sound/soc/codecs/rt5659.c +++ b/sound/soc/codecs/rt5659.c @@ -3463,12 +3463,17 @@ static int rt5659_set_component_sysclk(struct snd_soc_component *component, int { struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component); unsigned int reg_val = 0; + int ret; if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src) return 0; switch (clk_id) { case RT5659_SCLK_S_MCLK: + ret = clk_set_rate(rt5659->mclk, freq); + if (ret) + return ret; + reg_val |= RT5659_SCLK_SRC_MCLK; break; case RT5659_SCLK_S_PLL1: diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index 70fee6849ab0..f21181734170 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -31,18 +31,19 @@ #include "rt5670.h" #include "rt5670-dsp.h" -#define RT5670_DEV_GPIO BIT(0) -#define RT5670_IN2_DIFF BIT(1) -#define RT5670_DMIC_EN BIT(2) -#define RT5670_DMIC1_IN2P BIT(3) -#define RT5670_DMIC1_GPIO6 BIT(4) -#define RT5670_DMIC1_GPIO7 BIT(5) -#define RT5670_DMIC2_INR BIT(6) -#define RT5670_DMIC2_GPIO8 BIT(7) -#define RT5670_DMIC3_GPIO5 BIT(8) -#define RT5670_JD_MODE1 BIT(9) -#define RT5670_JD_MODE2 BIT(10) -#define RT5670_JD_MODE3 BIT(11) +#define RT5670_DEV_GPIO BIT(0) +#define RT5670_IN2_DIFF BIT(1) +#define RT5670_DMIC_EN BIT(2) +#define RT5670_DMIC1_IN2P BIT(3) +#define RT5670_DMIC1_GPIO6 BIT(4) +#define RT5670_DMIC1_GPIO7 BIT(5) +#define RT5670_DMIC2_INR BIT(6) +#define RT5670_DMIC2_GPIO8 BIT(7) +#define RT5670_DMIC3_GPIO5 BIT(8) +#define RT5670_JD_MODE1 BIT(9) +#define RT5670_JD_MODE2 BIT(10) +#define RT5670_JD_MODE3 BIT(11) +#define RT5670_GPIO1_IS_EXT_SPK_EN BIT(12) static unsigned long rt5670_quirk; static unsigned int quirk_override; @@ -1447,6 +1448,33 @@ static int rt5670_hp_event(struct snd_soc_dapm_widget *w, return 0; } +static int rt5670_spk_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); + + if (!rt5670->pdata.gpio1_is_ext_spk_en) + return 0; + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_HI); + break; + + case SND_SOC_DAPM_PRE_PMD: + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_LO); + break; + + default: + return 0; + } + + return 0; +} + static int rt5670_bst1_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -1860,7 +1888,9 @@ static const struct snd_soc_dapm_widget rt5670_specific_dapm_widgets[] = { }; static const struct snd_soc_dapm_widget rt5672_specific_dapm_widgets[] = { - SND_SOC_DAPM_PGA("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA_E("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0, + rt5670_spk_event, SND_SOC_DAPM_PRE_PMD | + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_OUTPUT("SPOLP"), SND_SOC_DAPM_OUTPUT("SPOLN"), SND_SOC_DAPM_OUTPUT("SPORP"), @@ -2857,14 +2887,14 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = { }, { .callback = rt5670_quirk_cb, - .ident = "Lenovo Thinkpad Tablet 10", + .ident = "Lenovo Miix 2 10", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"), }, .driver_data = (unsigned long *)(RT5670_DMIC_EN | RT5670_DMIC1_IN2P | - RT5670_DEV_GPIO | + RT5670_GPIO1_IS_EXT_SPK_EN | RT5670_JD_MODE2), }, { @@ -2924,6 +2954,10 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, rt5670->pdata.dev_gpio = true; dev_info(&i2c->dev, "quirk dev_gpio\n"); } + if (rt5670_quirk & RT5670_GPIO1_IS_EXT_SPK_EN) { + rt5670->pdata.gpio1_is_ext_spk_en = true; + dev_info(&i2c->dev, "quirk GPIO1 is external speaker enable\n"); + } if (rt5670_quirk & RT5670_IN2_DIFF) { rt5670->pdata.in2_diff = true; dev_info(&i2c->dev, "quirk IN2_DIFF\n"); @@ -3023,6 +3057,13 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); } + if (rt5670->pdata.gpio1_is_ext_spk_en) { + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1, + RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_GPIO1); + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); + } + if (rt5670->pdata.jd_mode) { regmap_update_bits(rt5670->regmap, RT5670_GLB_CLK, RT5670_SCLK_SRC_MASK, RT5670_SCLK_SRC_RCCLK); diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h index a8c3e44770b8..de0203369b7c 100644 --- a/sound/soc/codecs/rt5670.h +++ b/sound/soc/codecs/rt5670.h @@ -757,7 +757,7 @@ #define RT5670_PWR_VREF2_BIT 4 #define RT5670_PWR_FV2 (0x1 << 3) #define RT5670_PWR_FV2_BIT 3 -#define RT5670_LDO_SEL_MASK (0x3) +#define RT5670_LDO_SEL_MASK (0x7) #define RT5670_LDO_SEL_SFT 0 /* Power Management for Analog 2 (0x64) */ diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index e949b372cead..8a1e485982d8 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -71,7 +71,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = { { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f }, { SGTL5000_DAP_MAIN_CHAN, 0x8000 }, { SGTL5000_DAP_MIX_CHAN, 0x0000 }, - { SGTL5000_DAP_AVC_CTRL, 0x0510 }, + { SGTL5000_DAP_AVC_CTRL, 0x5100 }, { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 }, { SGTL5000_DAP_AVC_ATTACK, 0x0028 }, { SGTL5000_DAP_AVC_DECAY, 0x0050 }, @@ -1645,6 +1645,40 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, dev_err(&client->dev, "Error %d initializing CHIP_CLK_CTRL\n", ret); + /* Mute everything to avoid pop from the following power-up */ + ret = regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_CTRL, + SGTL5000_CHIP_ANA_CTRL_DEFAULT); + if (ret) { + dev_err(&client->dev, + "Error %d muting outputs via CHIP_ANA_CTRL\n", ret); + goto disable_clk; + } + + /* + * If VAG is powered-on (e.g. from previous boot), it would be disabled + * by the write to ANA_POWER in later steps of the probe code. This + * may create a loud pop even with all outputs muted. The proper way + * to circumvent this is disabling the bit first and waiting the proper + * cool-down time. + */ + ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, &value); + if (ret) { + dev_err(&client->dev, "Failed to read ANA_POWER: %d\n", ret); + goto disable_clk; + } + if (value & SGTL5000_VAG_POWERUP) { + ret = regmap_update_bits(sgtl5000->regmap, + SGTL5000_CHIP_ANA_POWER, + SGTL5000_VAG_POWERUP, + 0); + if (ret) { + dev_err(&client->dev, "Error %d disabling VAG\n", ret); + goto disable_clk; + } + + msleep(SGTL5000_VAG_POWERDOWN_DELAY); + } + /* Follow section 2.2.1.1 of AN3663 */ ana_pwr = SGTL5000_ANA_POWER_DEFAULT; if (sgtl5000->num_supplies <= VDDD) { diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index a4bf4bca95bf..56ec5863f250 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -233,6 +233,7 @@ /* * SGTL5000_CHIP_ANA_CTRL */ +#define SGTL5000_CHIP_ANA_CTRL_DEFAULT 0x0133 #define SGTL5000_LINE_OUT_MUTE 0x0100 #define SGTL5000_HP_SEL_MASK 0x0040 #define SGTL5000_HP_SEL_SHIFT 6 diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c index 1554631cb397..5b7f9fcf6cbf 100644 --- a/sound/soc/codecs/tas571x.c +++ b/sound/soc/codecs/tas571x.c @@ -820,8 +820,10 @@ static int tas571x_i2c_probe(struct i2c_client *client, priv->regmap = devm_regmap_init(dev, NULL, client, priv->chip->regmap_config); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); + if (IS_ERR(priv->regmap)) { + ret = PTR_ERR(priv->regmap); + goto disable_regs; + } priv->pdn_gpio = devm_gpiod_get_optional(dev, "pdn", GPIOD_OUT_LOW); if (IS_ERR(priv->pdn_gpio)) { @@ -845,7 +847,7 @@ static int tas571x_i2c_probe(struct i2c_client *client, ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0); if (ret) - return ret; + goto disable_regs; usleep_range(50000, 60000); @@ -861,12 +863,20 @@ static int tas571x_i2c_probe(struct i2c_client *client, */ ret = regmap_update_bits(priv->regmap, TAS571X_MVOL_REG, 1, 0); if (ret) - return ret; + goto disable_regs; } - return devm_snd_soc_register_component(&client->dev, + ret = devm_snd_soc_register_component(&client->dev, &priv->component_driver, &tas571x_dai, 1); + if (ret) + goto disable_regs; + + return ret; + +disable_regs: + regulator_bulk_disable(priv->chip->num_supply_names, priv->supplies); + return ret; } static int tas571x_i2c_remove(struct i2c_client *client) diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index 68165de1c8de..7a1ffbaf48be 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c @@ -662,7 +662,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component, } static int aic32x4_setup_clocks(struct snd_soc_component *component, - unsigned int sample_rate) + unsigned int sample_rate, unsigned int channels) { u8 aosr; u16 dosr; @@ -750,7 +750,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component, dosr); clk_set_rate(clocks[5].clk, - sample_rate * 32); + sample_rate * 32 * + channels); + return 0; } } @@ -772,7 +774,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream, u8 iface1_reg = 0; u8 dacsetup_reg = 0; - aic32x4_setup_clocks(component, params_rate(params)); + aic32x4_setup_clocks(component, params_rate(params), + params_channels(params)); switch (params_width(params)) { case 16: diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index f318403133e9..81906c25e4a8 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -618,7 +618,7 @@ static const char * const sb_tx8_mux_text[] = { "ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192" }; -static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0); +static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400); static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1); static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1); static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0); diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c index 18535b326680..04f23477039a 100644 --- a/sound/soc/codecs/wm8958-dsp2.c +++ b/sound/soc/codecs/wm8958-dsp2.c @@ -416,8 +416,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct wm8994 *control = dev_get_drvdata(component->dev->parent); int i; + if (control->type != WM8958) + return 0; + switch (event) { case SND_SOC_DAPM_POST_PMU: case SND_SOC_DAPM_PRE_PMU: diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 55112c1bba5e..708fc4ed54ed 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in, best_freq_out = -EINVAL; *sysclk_idx = *dac_idx = *bclk_idx = -1; - for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { + /* + * From Datasheet, the PLL performs best when f2 is between + * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz + * or 12.288MHz, then sysclkdiv = 2 is the best choice. + * So search sysclk_divs from 2 to 1 other than from 1 to 2. + */ + for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) { if (sysclk_divs[i] == -1) continue; for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) { @@ -860,8 +866,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream, wm8960->is_stream_in_use[tx] = true; - if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_ON && - !wm8960->is_stream_in_use[!tx]) + if (!wm8960->is_stream_in_use[!tx]) return wm8960_configure_clocking(component); return 0; diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index d5fb7f5dd551..6dbab3fc6537 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -3372,6 +3372,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * return -EINVAL; } + pm_runtime_get_sync(component->dev); + switch (micbias) { case 1: micdet = &wm8994->micdet[0]; @@ -3419,6 +3421,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * snd_soc_dapm_sync(dapm); + pm_runtime_put(component->dev); + return 0; } EXPORT_SYMBOL_GPL(wm8994_mic_detect); @@ -3786,6 +3790,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * return -EINVAL; } + pm_runtime_get_sync(component->dev); + if (jack) { snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS"); snd_soc_dapm_sync(dapm); @@ -3854,6 +3860,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * snd_soc_dapm_sync(dapm); } + pm_runtime_put(component->dev); + return 0; } EXPORT_SYMBOL_GPL(wm8958_mic_detect); @@ -4047,11 +4055,13 @@ static int wm8994_component_probe(struct snd_soc_component *component) wm8994->hubs.dcs_readback_mode = 2; break; } + wm8994->hubs.micd_scthr = true; break; case WM8958: wm8994->hubs.dcs_readback_mode = 1; wm8994->hubs.hp_startup_mode = 1; + wm8994->hubs.micd_scthr = true; switch (control->revision) { case 0: diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c index 37e4bb3dbd8a..229f2986cd96 100644 --- a/sound/soc/codecs/wm8997.c +++ b/sound/soc/codecs/wm8997.c @@ -1177,6 +1177,8 @@ static int wm8997_probe(struct platform_device *pdev) goto err_spk_irqs; } + return ret; + err_spk_irqs: arizona_free_spk_irqs(arizona); diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c index 7c1899219573..817ccddd6344 100644 --- a/sound/soc/codecs/wm8998.c +++ b/sound/soc/codecs/wm8998.c @@ -1375,7 +1375,7 @@ static int wm8998_probe(struct platform_device *pdev) ret = arizona_init_spk_irqs(arizona); if (ret < 0) - return ret; + goto err_pm_disable; ret = devm_snd_soc_register_component(&pdev->dev, &soc_component_dev_wm8998, @@ -1390,6 +1390,8 @@ static int wm8998_probe(struct platform_device *pdev) err_spk_irqs: arizona_free_spk_irqs(arizona); +err_pm_disable: + pm_runtime_disable(&pdev->dev); return ret; } diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 9b8bb7bbe945..13672928da99 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1496,7 +1496,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL); if (!ctl_work) { ret = -ENOMEM; - goto err_ctl_cache; + goto err_list_del; } ctl_work->dsp = dsp; @@ -1506,7 +1506,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, return 0; -err_ctl_cache: +err_list_del: + list_del(&ctl->list); kfree(ctl->cache); err_ctl_name: kfree(ctl->name); @@ -1912,6 +1913,7 @@ static int wm_adsp_load(struct wm_adsp *dsp) mem = wm_adsp_find_region(dsp, type); if (!mem) { adsp_err(dsp, "No region of type: %x\n", type); + ret = -EINVAL; goto out_fw; } diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index e93af7edd8f7..dd421e2fe7b2 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -1223,6 +1223,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component, snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL, WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB); + if (!hubs->micd_scthr) + return 0; + snd_soc_component_update_bits(component, WM8993_MICBIAS, WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK | WM8993_MICB1_LVL | WM8993_MICB2_LVL, diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h index 4b8e5f0d6e32..988b29e63060 100644 --- a/sound/soc/codecs/wm_hubs.h +++ b/sound/soc/codecs/wm_hubs.h @@ -27,6 +27,7 @@ struct wm_hubs_data { int hp_startup_mode; int series_startup; int no_series_update; + bool micd_scthr; bool no_cache_dac_hp_direct; struct list_head dcs_cache; diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c index 01052a0808b0..5aee6b8366d2 100644 --- a/sound/soc/fsl/fsl_asrc_dma.c +++ b/sound/soc/fsl/fsl_asrc_dma.c @@ -241,6 +241,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream, ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be); if (ret) { dev_err(dev, "failed to config DMA channel for Back-End\n"); + dma_release_channel(pair->dma_chan[dir]); return ret; } diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index c7a49d03463a..33ade79fa032 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -87,6 +87,10 @@ static irqreturn_t esai_isr(int irq, void *devid) if ((saisr & (ESAI_SAISR_TUE | ESAI_SAISR_ROE)) && esai_priv->reset_at_xrun) { dev_dbg(&pdev->dev, "reset module for xrun\n"); + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, + ESAI_xCR_xEIE_MASK, 0); + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, + ESAI_xCR_xEIE_MASK, 0); tasklet_schedule(&esai_priv->task); } @@ -490,11 +494,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream, ESAI_SAICR_SYNC, esai_priv->synchronous ? ESAI_SAICR_SYNC : 0); - /* Set a default slot number -- 2 */ + /* Set slots count */ regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, - ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2)); + ESAI_xCCR_xDC_MASK, + ESAI_xCCR_xDC(esai_priv->slots)); regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, - ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2)); + ESAI_xCCR_xDC_MASK, + ESAI_xCCR_xDC(esai_priv->slots)); } return 0; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 41b83ecaf008..027259695551 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -680,10 +680,11 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs), - FSL_SAI_CR1_RFW_MASK, + FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth), sai->soc_data->fifo_depth - FSL_SAI_MAXBURST_TX); regmap_update_bits(sai->regmap, FSL_SAI_RCR1(ofs), - FSL_SAI_CR1_RFW_MASK, FSL_SAI_MAXBURST_RX - 1); + FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth), + FSL_SAI_MAXBURST_RX - 1); snd_soc_dai_init_dma_data(cpu_dai, &sai->dma_params_tx, &sai->dma_params_rx); @@ -693,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) return 0; } -static struct snd_soc_dai_driver fsl_sai_dai = { +static struct snd_soc_dai_driver fsl_sai_dai_template = { .probe = fsl_sai_dai_probe, .playback = { .stream_name = "CPU-Playback", @@ -964,12 +965,15 @@ static int fsl_sai_probe(struct platform_device *pdev) return ret; } + memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template, + sizeof(fsl_sai_dai_template)); + /* Sync Tx with Rx as default by following old DT binding */ sai->synchronous[RX] = true; sai->synchronous[TX] = false; - fsl_sai_dai.symmetric_rates = 1; - fsl_sai_dai.symmetric_channels = 1; - fsl_sai_dai.symmetric_samplebits = 1; + sai->cpu_dai_drv.symmetric_rates = 1; + sai->cpu_dai_drv.symmetric_channels = 1; + sai->cpu_dai_drv.symmetric_samplebits = 1; if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) && of_find_property(np, "fsl,sai-asynchronous", NULL)) { @@ -986,9 +990,9 @@ static int fsl_sai_probe(struct platform_device *pdev) /* Discard all settings for asynchronous mode */ sai->synchronous[RX] = false; sai->synchronous[TX] = false; - fsl_sai_dai.symmetric_rates = 0; - fsl_sai_dai.symmetric_channels = 0; - fsl_sai_dai.symmetric_samplebits = 0; + sai->cpu_dai_drv.symmetric_rates = 0; + sai->cpu_dai_drv.symmetric_channels = 0; + sai->cpu_dai_drv.symmetric_samplebits = 0; } if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) && @@ -1017,7 +1021,7 @@ static int fsl_sai_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component, - &fsl_sai_dai, 1); + &sai->cpu_dai_drv, 1); if (ret) goto err_pm_disable; diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h index 76b15deea80c..677ecfc1ec68 100644 --- a/sound/soc/fsl/fsl_sai.h +++ b/sound/soc/fsl/fsl_sai.h @@ -94,7 +94,7 @@ #define FSL_SAI_CSR_FRDE BIT(0) /* SAI Transmit and Receive Configuration 1 Register */ -#define FSL_SAI_CR1_RFW_MASK 0x1f +#define FSL_SAI_CR1_RFW_MASK(x) ((x) - 1) /* SAI Transmit and Receive Configuration 2 Register */ #define FSL_SAI_CR2_SYNC BIT(30) @@ -180,6 +180,7 @@ struct fsl_sai { unsigned int bclk_ratio; const struct fsl_sai_soc_data *soc_data; + struct snd_soc_dai_driver cpu_dai_drv; struct snd_dmaengine_dai_dma_data dma_params_rx; struct snd_dmaengine_dai_dma_data dma_params_tx; }; diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 537dc69256f0..ed18bc69e095 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, struct regmap *regs = ssi->regs; u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i; unsigned long clkrate, baudrate, tmprate; - unsigned int slots = params_channels(hw_params); - unsigned int slot_width = 32; + unsigned int channels = params_channels(hw_params); + unsigned int slot_width = params_width(hw_params); + unsigned int slots = 2; u64 sub, savesub = 100000; unsigned int freq; bool baudclk_is_used; @@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, /* Override slots and slot_width if being specifically set... */ if (ssi->slots) slots = ssi->slots; - /* ...but keep 32 bits if slots is 2 -- I2S Master mode */ - if (ssi->slot_width && slots != 2) + if (ssi->slot_width) slot_width = ssi->slot_width; + /* ...but force 32 bits for stereo audio using I2S Master Mode */ + if (channels == 2 && + (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER) + slot_width = 32; + /* Generate bit clock based on the slot number and slot width */ freq = slots * slot_width * params_rate(hw_params); @@ -868,6 +873,7 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream, static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt) { u32 strcr = 0, scr = 0, stcr, srcr, mask; + unsigned int slots; ssi->dai_fmt = fmt; @@ -899,10 +905,11 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt) return -EINVAL; } + slots = ssi->slots ? : 2; regmap_update_bits(ssi->regs, REG_SSI_STCCR, - SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2)); + SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots)); regmap_update_bits(ssi->regs, REG_SSI_SRCCR, - SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2)); + SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots)); /* Data on rising edge of bclk, frame low, 1clk before data */ strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP | SSI_STCR_TEFS; diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c index 15a27a2cd0ca..fad1eb6253d5 100644 --- a/sound/soc/fsl/imx-es8328.c +++ b/sound/soc/fsl/imx-es8328.c @@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev) data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; - goto fail; + goto put_device; } comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL); if (!comp) { ret = -ENOMEM; - goto fail; + goto put_device; } data->dev = dev; @@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev) ret = snd_soc_of_parse_card_name(&data->card, "model"); if (ret) { dev_err(dev, "Unable to parse card name\n"); - goto fail; + goto put_device; } ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing"); if (ret) { dev_err(dev, "Unable to parse routing: %d\n", ret); - goto fail; + goto put_device; } data->card.num_links = 1; data->card.owner = THIS_MODULE; @@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev) ret = snd_soc_register_card(&data->card); if (ret) { dev_err(dev, "Unable to register: %d\n", ret); - goto fail; + goto put_device; } platform_set_drvdata(pdev, data); +put_device: + put_device(&ssi_pdev->dev); fail: of_node_put(ssi_np); of_node_put(codec_np); diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index 6007e6305735..1bc498124689 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c @@ -340,7 +340,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv, struct device_node *top = dev->of_node; struct asoc_simple_dai *cpu_dai; struct asoc_simple_dai *codec_dai; - int ret, single_cpu; + int ret, single_cpu = 0; /* Do it only CPU turn */ if (!li->cpu) diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index fc9c753db8dd..4484c40c7a39 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv, struct device_node *plat = NULL; char prop[128]; char *prefix = ""; - int ret, single_cpu; + int ret, single_cpu = 0; /* * |CPU |Codec : turn diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c index fdd2c73fd2fa..bb668551dd4b 100644 --- a/sound/soc/img/img-i2s-in.c +++ b/sound/soc/img/img-i2s-in.c @@ -343,8 +343,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK; ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + } for (i = 0; i < i2s->active_channels; i++) img_i2s_in_ch_disable(i2s, i); @@ -482,6 +484,7 @@ static int img_i2s_in_probe(struct platform_device *pdev) if (IS_ERR(rst)) { if (PTR_ERR(rst) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; + pm_runtime_put(&pdev->dev); goto err_suspend; } diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c index 4b1853409633..9c4212f2f726 100644 --- a/sound/soc/img/img-i2s-out.c +++ b/sound/soc/img/img-i2s-out.c @@ -347,8 +347,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK; ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + } img_i2s_out_disable(i2s); @@ -488,8 +490,10 @@ static int img_i2s_out_probe(struct platform_device *pdev) goto err_pm_disable; } ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto err_suspend; + } reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK; img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL); diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c index 5ddbe3a31c2e..4da49a42e854 100644 --- a/sound/soc/img/img-parallel-out.c +++ b/sound/soc/img/img-parallel-out.c @@ -163,8 +163,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) } ret = pm_runtime_get_sync(prl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(prl->dev); return ret; + } reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL); reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set; diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c index baef461a99f1..df8f7994d3b7 100644 --- a/sound/soc/intel/atom/sst-atom-controls.c +++ b/sound/soc/intel/atom/sst-atom-controls.c @@ -966,7 +966,9 @@ static int sst_set_be_modules(struct snd_soc_dapm_widget *w, dev_dbg(c->dev, "Enter: widget=%s\n", w->name); if (SND_SOC_DAPM_EVENT_ON(event)) { + mutex_lock(&drv->lock); ret = sst_send_slot_map(drv); + mutex_unlock(&drv->lock); if (ret) return ret; ret = sst_send_pipe_module_params(w, k); @@ -1333,7 +1335,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute) dai->capture_widget->name); w = dai->capture_widget; snd_soc_dapm_widget_for_each_source_path(w, p) { - if (p->connected && !p->connected(w, p->sink)) + if (p->connected && !p->connected(w, p->source)) continue; if (p->connect && p->source->power && diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 8cc3cc363eb0..c3ff203c3f44 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, ret_val = power_up_sst(stream); if (ret_val < 0) - return ret_val; + goto out_power_up; /* Make sure, that the period size is always even */ snd_pcm_hw_constraint_step(substream->runtime, 0, @@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); out_ops: - kfree(stream); mutex_unlock(&sst_lock); +out_power_up: + kfree(stream); return ret_val; } @@ -499,14 +500,14 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "Headset Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { @@ -517,7 +518,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { diff --git a/sound/soc/intel/atom/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c index d952719bc098..5862fe968083 100644 --- a/sound/soc/intel/atom/sst/sst_pci.c +++ b/sound/soc/intel/atom/sst/sst_pci.c @@ -99,7 +99,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx) dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram); do_release_regions: pci_release_regions(pci); - return 0; + return ret; } /* diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c index adf416a49b48..60fb87495050 100644 --- a/sound/soc/intel/boards/bxt_rt298.c +++ b/sound/soc/intel/boards/bxt_rt298.c @@ -556,6 +556,7 @@ static int bxt_card_late_probe(struct snd_soc_card *card) /* broxton audio machine driver for SPT + RT298S */ static struct snd_soc_card broxton_rt298 = { .name = "broxton-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, @@ -571,6 +572,7 @@ static struct snd_soc_card broxton_rt298 = { static struct snd_soc_card geminilake_rt298 = { .name = "geminilake-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 54e97455d7f6..ed332177b0f9 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -548,8 +548,10 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev) if (cnt) { ret = device_add_properties(codec_dev, props); - if (ret) + if (ret) { + put_device(codec_dev); return ret; + } } devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios); diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 243f683bc02a..cfd307717473 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -400,6 +400,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* Acer One 10 S1002 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "One S1002"), + }, + .driver_data = (void *)(BYT_RT5640_IN1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_SSP0_AIF2 | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -422,6 +435,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 140 CESIUM"), + }, + .driver_data = (void *)(BYT_RT5640_IN1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), @@ -513,6 +538,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_MONO_SPEAKER | BYT_RT5640_MCLK_EN), }, + { /* Estar Beauty HD MID 7316R */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Estar"), + DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), @@ -591,6 +626,27 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"), + DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, + { + /* MPMAN MPWIN895CL */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MPMAN"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* MSI S100 tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), @@ -731,6 +787,44 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* Toshiba Encore WT8-A */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT8-A"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_JD_NOT_INV | + BYT_RT5640_MCLK_EN), + }, + { /* Toshiba Encore WT10-A */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A-103"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD1_IN4P | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_SSP0_AIF2 | + BYT_RT5640_MCLK_EN), + }, + { /* Voyo Winpad A15 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "11/20/2014"), + }, + .driver_data = (void *)(BYT_RT5640_IN1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_MCLK_EN), + }, { /* Catch-all for generic Insyde tablets, must be last */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c index 4606f6f582d6..921c09cdb480 100644 --- a/sound/soc/intel/boards/bytcr_rt5651.c +++ b/sound/soc/intel/boards/bytcr_rt5651.c @@ -435,6 +435,19 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = { BYT_RT5651_SSP0_AIF1 | BYT_RT5651_MONO_SPEAKER), }, + { + /* Jumper EZpad 7 */ + .callback = byt_rt5651_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Jumper"), + DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"), + /* Jumper12x.WJ2012.bsBKRCP05 with the version dropped */ + DMI_MATCH(DMI_BIOS_VERSION, "Jumper12x.WJ2012.bsBKRCP"), + }, + .driver_data = (void *)(BYT_RT5651_DEFAULT_QUIRKS | + BYT_RT5651_IN2_MAP | + BYT_RT5651_JD_NOT_INV), + }, { /* KIANO SlimNote 14.2 */ .callback = byt_rt5651_quirk_cb, diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c index 3dadf9bff796..cf47fd9cd506 100644 --- a/sound/soc/intel/boards/haswell.c +++ b/sound/soc/intel/boards/haswell.c @@ -206,6 +206,7 @@ static struct platform_driver haswell_audio = { .probe = haswell_audio_probe, .driver = { .name = "haswell-audio", + .pm = &snd_soc_pm_ops, }, }; diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c index 829f95fc4179..16996f6aa2dc 100644 --- a/sound/soc/intel/boards/kbl_da7219_max98927.c +++ b/sound/soc/intel/boards/kbl_da7219_max98927.c @@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); - struct snd_soc_dpcm *dpcm = container_of( - params, struct snd_soc_dpcm, hw_params); - struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link; - struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link; + struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL; + /* + * The following loop will be called only for playback stream + * In this platform, there is only one playback device on every SSP + */ + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) { + rtd_dpcm = dpcm; + break; + } + + /* + * This following loop will be called only for capture stream + * In this platform, there is only one capture device on every SSP + */ + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) { + rtd_dpcm = dpcm; + break; + } + + if (!rtd_dpcm) + return -EINVAL; + + /* + * The above 2 loops are mutually exclusive based on the stream direction, + * thus rtd_dpcm variable will never be overwritten + */ /* * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE, * where as kblda7219m98927 & kblmax98927 supports S16_LE by default. @@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, /* * The ADSP will convert the FE rate to 48k, stereo, 24 bit */ - if (!strcmp(fe_dai_link->name, "Kbl Audio Port") || - !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") || - !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) { + if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") || + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") || + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) { rate->min = rate->max = 48000; channels->min = channels->max = 2; snd_mask_none(fmt); @@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, * The speaker on the SSP0 supports S16_LE and not S24_LE. * thus changing the mask here */ - if (!strcmp(be_dai_link->name, "SSP0-Codec")) + if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec")) snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); return 0; diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c index 7cefda341fbf..a540a2dad80c 100644 --- a/sound/soc/intel/boards/kbl_rt5663_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c @@ -401,17 +401,40 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); - struct snd_soc_dpcm *dpcm = container_of( - params, struct snd_soc_dpcm, hw_params); - struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link; - struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link; + struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL; + + /* + * The following loop will be called only for playback stream + * In this platform, there is only one playback device on every SSP + */ + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) { + rtd_dpcm = dpcm; + break; + } + + /* + * This following loop will be called only for capture stream + * In this platform, there is only one capture device on every SSP + */ + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) { + rtd_dpcm = dpcm; + break; + } + + if (!rtd_dpcm) + return -EINVAL; + + /* + * The above 2 loops are mutually exclusive based on the stream direction, + * thus rtd_dpcm variable will never be overwritten + */ /* * The ADSP will convert the FE rate to 48k, stereo, 24 bit */ - if (!strcmp(fe_dai_link->name, "Kbl Audio Port") || - !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") || - !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) { + if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") || + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") || + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) { rate->min = rate->max = 48000; channels->min = channels->max = 2; snd_mask_none(fmt); @@ -421,7 +444,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, * The speaker on the SSP0 supports S16_LE and not S24_LE. * thus changing the mask here */ - if (!strcmp(be_dai_link->name, "SSP0-Codec")) + if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec")) snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); return 0; diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c index 67b276a65a8d..8ad31c91fc75 100644 --- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c @@ -626,7 +626,7 @@ static int kabylake_card_late_probe(struct snd_soc_card *card) * kabylake audio machine driver for MAX98927 + RT5514 + RT5663 */ static struct snd_soc_card kabylake_audio_card = { - .name = "kbl_r5514_5663_max", + .name = "kbl-r5514-5663-max", .owner = THIS_MODULE, .dai_link = kabylake_dais, .num_links = ARRAY_SIZE(kabylake_dais), diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c index 1778acdc367c..e8d676c192f6 100644 --- a/sound/soc/intel/boards/skl_hda_dsp_generic.c +++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c @@ -90,7 +90,7 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link) } static struct snd_soc_card hda_soc_card = { - .name = "skl_hda_card", + .name = "hda-dsp", .owner = THIS_MODULE, .dai_link = skl_hda_be_dai_links, .dapm_widgets = skl_hda_widgets, diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c index 06b7d6c6c9a0..302ca1920791 100644 --- a/sound/soc/intel/boards/sof_rt5682.c +++ b/sound/soc/intel/boards/sof_rt5682.c @@ -374,7 +374,7 @@ static int dmic_init(struct snd_soc_pcm_runtime *rtd) /* sof audio machine driver for rt5682 codec */ static struct snd_soc_card sof_audio_card_rt5682 = { - .name = "sof_rt5682", + .name = "rt5682", /* the sof- prefix is added by the core */ .owner = THIS_MODULE, .controls = sof_controls, .num_controls = ARRAY_SIZE(sof_controls), diff --git a/sound/soc/intel/common/soc-intel-quirks.h b/sound/soc/intel/common/soc-intel-quirks.h index 863a477d3405..645baf0ed3dd 100644 --- a/sound/soc/intel/common/soc-intel-quirks.h +++ b/sound/soc/intel/common/soc-intel-quirks.h @@ -11,6 +11,7 @@ #if IS_ENABLED(CONFIG_X86) +#include <linux/dmi.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/iosf_mbi.h> @@ -40,12 +41,36 @@ SOC_INTEL_IS_CPU(cml, INTEL_FAM6_KABYLAKE_L); static inline bool soc_intel_is_byt_cr(struct platform_device *pdev) { + /* + * List of systems which: + * 1. Use a non CR version of the Bay Trail SoC + * 2. Contain at least 6 interrupt resources so that the + * platform_get_resource(pdev, IORESOURCE_IRQ, 5) check below + * succeeds + * 3. Despite 1. and 2. still have their IPC IRQ at index 0 rather then 5 + * + * This needs to be here so that it can be shared between the SST and + * SOF drivers. We rely on the compiler to optimize this out in files + * where soc_intel_is_byt_cr is not used. + */ + static const struct dmi_system_id force_bytcr_table[] = { + { /* Lenovo Yoga Tablet 2 series */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"), + }, + }, + {} + }; struct device *dev = &pdev->dev; int status = 0; if (!soc_intel_is_byt()) return false; + if (dmi_check_system(force_bytcr_table)) + return true; + if (iosf_mbi_available()) { u32 bios_status; diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c index 92a82e6b5fe6..38b9d7494083 100644 --- a/sound/soc/intel/skylake/bxt-sst.c +++ b/sound/soc/intel/skylake/bxt-sst.c @@ -17,7 +17,6 @@ #include "skl.h" #define BXT_BASEFW_TIMEOUT 3000 -#define BXT_INIT_TIMEOUT 300 #define BXT_ROM_INIT_TIMEOUT 70 #define BXT_IPC_PURGE_FW 0x01004000 @@ -38,8 +37,6 @@ /* Delay before scheduling D0i3 entry */ #define BXT_D0I3_DELAY 5000 -#define BXT_FW_ROM_INIT_RETRY 3 - static unsigned int bxt_get_errorcode(struct sst_dsp *ctx) { return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE); diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c index 4f64f097e9ae..e808f62960ba 100644 --- a/sound/soc/intel/skylake/cnl-sst.c +++ b/sound/soc/intel/skylake/cnl-sst.c @@ -57,18 +57,34 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize) ctx->dsp_ops.stream_tag = stream_tag; memcpy(ctx->dmab.area, fwdata, fwsize); + ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK); + if (ret < 0) { + dev_err(ctx->dev, "dsp core0 power up failed\n"); + ret = -EIO; + goto base_fw_load_failed; + } + /* purge FW request */ sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR, CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE | ((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID))); - ret = cnl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK); + ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK); if (ret < 0) { - dev_err(ctx->dev, "dsp boot core failed ret: %d\n", ret); + dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret); ret = -EIO; goto base_fw_load_failed; } + ret = sst_dsp_register_poll(ctx, CNL_ADSP_REG_HIPCIDA, + CNL_ADSP_REG_HIPCIDA_DONE, + CNL_ADSP_REG_HIPCIDA_DONE, + BXT_INIT_TIMEOUT, "HIPCIDA Done"); + if (ret < 0) { + dev_err(ctx->dev, "timeout for purge request: %d\n", ret); + goto base_fw_load_failed; + } + /* enable interrupt */ cnl_ipc_int_enable(ctx); cnl_ipc_op_int_enable(ctx); @@ -109,7 +125,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) { struct firmware stripped_fw; struct skl_dev *cnl = ctx->thread_context; - int ret; + int ret, i; if (!ctx->fw) { ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev); @@ -131,12 +147,16 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) stripped_fw.size = ctx->fw->size; skl_dsp_strip_extended_manifest(&stripped_fw); - ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size); - if (ret < 0) { - dev_err(ctx->dev, "prepare firmware failed: %d\n", ret); - goto cnl_load_base_firmware_failed; + for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) { + ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size); + if (!ret) + break; + dev_dbg(ctx->dev, "prepare firmware failed: %d\n", ret); } + if (ret < 0) + goto cnl_load_base_firmware_failed; + ret = sst_transfer_fw_host_dma(ctx); if (ret < 0) { dev_err(ctx->dev, "transfer firmware failed: %d\n", ret); @@ -158,6 +178,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx) return 0; cnl_load_base_firmware_failed: + dev_err(ctx->dev, "firmware load failed: %d\n", ret); release_firmware(ctx->fw); ctx->fw = NULL; @@ -203,6 +224,7 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id) "dsp boot timeout, status=%#x error=%#x\n", sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS), sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE)); + ret = -ETIMEDOUT; goto err; } } else { diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c index 19f328d71f24..d9c8f5cb389e 100644 --- a/sound/soc/intel/skylake/skl-nhlt.c +++ b/sound/soc/intel/skylake/skl-nhlt.c @@ -182,7 +182,8 @@ void skl_nhlt_remove_sysfs(struct skl_dev *skl) { struct device *dev = &skl->pci->dev; - sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr); + if (skl->nhlt) + sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr); } /* diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h index cdfec0fca577..1df9ef422f61 100644 --- a/sound/soc/intel/skylake/skl-sst-dsp.h +++ b/sound/soc/intel/skylake/skl-sst-dsp.h @@ -67,6 +67,8 @@ struct skl_dev; #define SKL_FW_INIT 0x1 #define SKL_FW_RFW_START 0xf +#define BXT_FW_ROM_INIT_RETRY 3 +#define BXT_INIT_TIMEOUT 300 #define SKL_ADSPIC_IPC 1 #define SKL_ADSPIS_IPC 1 diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c index 69cd7a81bf2a..1940b17f27ef 100644 --- a/sound/soc/intel/skylake/skl-topology.c +++ b/sound/soc/intel/skylake/skl-topology.c @@ -14,6 +14,7 @@ #include <linux/uuid.h> #include <sound/intel-nhlt.h> #include <sound/soc.h> +#include <sound/soc-acpi.h> #include <sound/soc-topology.h> #include <uapi/sound/snd_sst_tokens.h> #include <uapi/sound/skl-tplg-interface.h> @@ -578,6 +579,38 @@ static int skl_tplg_unload_pipe_modules(struct skl_dev *skl, return ret; } +static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe) +{ + struct skl_pipe_fmt *cur_fmt; + struct skl_pipe_fmt *next_fmt; + int i; + + if (pipe->nr_cfgs <= 1) + return false; + + if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) + return true; + + for (i = 0; i < pipe->nr_cfgs - 1; i++) { + if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) { + cur_fmt = &pipe->configs[i].out_fmt; + next_fmt = &pipe->configs[i + 1].out_fmt; + } else { + cur_fmt = &pipe->configs[i].in_fmt; + next_fmt = &pipe->configs[i + 1].in_fmt; + } + + if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq, + cur_fmt->bps, + next_fmt->channels, + next_fmt->freq, + next_fmt->bps)) + return true; + } + + return false; +} + /* * Here, we select pipe format based on the pipe type and pipe * direction to determine the current config index for the pipeline. @@ -600,6 +633,14 @@ skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig) return 0; } + if (skl_tplg_is_multi_fmt(skl, pipe)) { + pipe->cur_config_idx = pipe->pipe_config_idx; + pipe->memory_pages = pconfig->mem_pages; + dev_dbg(skl->dev, "found pipe config idx:%d\n", + pipe->cur_config_idx); + return 0; + } + if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) { dev_dbg(skl->dev, "No conn_type detected, take 0th config\n"); pipe->cur_config_idx = 0; @@ -1314,6 +1355,68 @@ static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, return 0; } +static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol, + bool is_set) +{ + struct snd_soc_component *component = + snd_soc_kcontrol_component(kcontrol); + struct hdac_bus *bus = snd_soc_component_get_drvdata(component); + struct skl_dev *skl = bus_to_skl(bus); + struct skl_pipeline *ppl; + struct skl_pipe *pipe = NULL; + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + u32 *pipe_id; + + if (!ec) + return -EINVAL; + + if (is_set && ucontrol->value.enumerated.item[0] > ec->items) + return -EINVAL; + + pipe_id = ec->dobj.private; + + list_for_each_entry(ppl, &skl->ppl_list, node) { + if (ppl->pipe->ppl_id == *pipe_id) { + pipe = ppl->pipe; + break; + } + } + if (!pipe) + return -EIO; + + if (is_set) + pipe->pipe_config_idx = ucontrol->value.enumerated.item[0]; + else + ucontrol->value.enumerated.item[0] = pipe->pipe_config_idx; + + return 0; +} + +static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false); +} + +static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true); +} + +static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false); +} + +static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true); +} + static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, unsigned int __user *data, unsigned int size) { @@ -1853,6 +1956,16 @@ static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { .get = skl_tplg_mic_control_get, .put = skl_tplg_mic_control_set, }, + { + .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT, + .get = skl_tplg_multi_config_get, + .put = skl_tplg_multi_config_set, + }, + { + .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC, + .get = skl_tplg_multi_config_get_dmic, + .put = skl_tplg_multi_config_set_dmic, + } }; static int skl_tplg_fill_pipe_cfg(struct device *dev, @@ -3013,12 +3126,21 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt, case SND_SOC_TPLG_CTL_ENUM: tplg_ec = container_of(hdr, struct snd_soc_tplg_enum_control, hdr); - if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) { + if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) { se = (struct soc_enum *)kctl->private_value; if (tplg_ec->priv.size) - return skl_init_enum_data(bus->dev, se, - tplg_ec); + skl_init_enum_data(bus->dev, se, tplg_ec); } + + /* + * now that the control initializations are done, remove + * write permission for the DMIC configuration enums to + * avoid conflicts between NHLT settings and user interaction + */ + + if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC) + kctl->access = SNDRV_CTL_ELEM_ACCESS_READ; + break; default: @@ -3488,6 +3610,38 @@ static int skl_manifest_load(struct snd_soc_component *cmpnt, int index, return 0; } +static void skl_tplg_complete(struct snd_soc_component *component) +{ + struct snd_soc_dobj *dobj; + struct snd_soc_acpi_mach *mach = + dev_get_platdata(component->card->dev); + int i; + + list_for_each_entry(dobj, &component->dobj_list, list) { + struct snd_kcontrol *kcontrol = dobj->control.kcontrol; + struct soc_enum *se; + char **texts; + char chan_text[4]; + + if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol || + kcontrol->put != skl_tplg_multi_config_set_dmic) + continue; + + se = (struct soc_enum *)kcontrol->private_value; + texts = dobj->control.dtexts; + sprintf(chan_text, "c%d", mach->mach_params.dmic_num); + + for (i = 0; i < se->items; i++) { + struct snd_ctl_elem_value val = {}; + + if (strstr(texts[i], chan_text)) { + val.value.enumerated.item[0] = i; + kcontrol->put(kcontrol, &val); + } + } + } +} + static struct snd_soc_tplg_ops skl_tplg_ops = { .widget_load = skl_tplg_widget_load, .control_load = skl_tplg_control_load, @@ -3497,6 +3651,7 @@ static struct snd_soc_tplg_ops skl_tplg_ops = { .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), .manifest = skl_manifest_load, .dai_load = skl_dai_load, + .complete = skl_tplg_complete, }; /* @@ -3565,8 +3720,20 @@ int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) ret = request_firmware(&fw, skl->tplg_name, bus->dev); if (ret < 0) { - dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin", - skl->tplg_name, ret); + char alt_tplg_name[64]; + + snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin", + skl->mach->drv_name); + dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s", + skl->tplg_name, ret, alt_tplg_name); + + ret = request_firmware(&fw, alt_tplg_name, bus->dev); + if (!ret) + goto component_load; + + dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin", + alt_tplg_name, ret); + ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); if (ret < 0) { dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", @@ -3575,6 +3742,8 @@ int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) } } +component_load: + /* * The complete tplg for SKL is loaded as index 0, we don't use * any other index diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h index e967800dbb62..06576147cc29 100644 --- a/sound/soc/intel/skylake/skl-topology.h +++ b/sound/soc/intel/skylake/skl-topology.h @@ -306,6 +306,7 @@ struct skl_pipe { struct skl_path_config configs[SKL_MAX_PATH_CONFIGS]; struct list_head w_list; bool passthru; + u32 pipe_config_idx; }; enum skl_module_state { diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index 141dbbf975ac..2e5fbd220923 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -129,6 +129,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset) struct hdac_ext_link *hlink; int ret; + snd_hdac_set_codec_wakeup(bus, true); skl_enable_miscbdcge(bus->dev, false); ret = snd_hdac_bus_init_chip(bus, full_reset); @@ -137,6 +138,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset) writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV); skl_enable_miscbdcge(bus->dev, true); + snd_hdac_set_codec_wakeup(bus, false); return ret; } @@ -480,13 +482,8 @@ static struct skl_ssp_clk skl_ssp_clks[] = { static struct snd_soc_acpi_mach *skl_find_hda_machine(struct skl_dev *skl, struct snd_soc_acpi_mach *machines) { - struct hdac_bus *bus = skl_to_bus(skl); struct snd_soc_acpi_mach *mach; - /* check if we have any codecs detected on bus */ - if (bus->codec_mask == 0) - return NULL; - /* point to common table */ mach = snd_soc_acpi_intel_hda_machines; @@ -635,6 +632,9 @@ static int skl_clock_device_register(struct skl_dev *skl) struct platform_device_info pdevinfo = {NULL}; struct skl_clk_pdata *clk_pdata; + if (!skl->nhlt) + return 0; + clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata), GFP_KERNEL); if (!clk_pdata) @@ -807,6 +807,9 @@ static void skl_probe_work(struct work_struct *work) return; } + skl_init_pci(skl); + skl_dum_set(bus); + err = skl_init_chip(bus, true); if (err < 0) { dev_err(bus->dev, "Init chip failed with err: %d\n", err); @@ -922,8 +925,6 @@ static int skl_first_init(struct hdac_bus *bus) return -ENXIO; } - snd_hdac_bus_reset_link(bus, true); - snd_hdac_bus_parse_capabilities(bus); /* check if PPCAP exists */ @@ -971,11 +972,7 @@ static int skl_first_init(struct hdac_bus *bus) if (err < 0) return err; - /* initialize chip */ - skl_init_pci(skl); - skl_dum_set(bus); - - return skl_init_chip(bus, true); + return 0; } static int skl_probe(struct pci_dev *pci, @@ -1080,8 +1077,6 @@ static int skl_probe(struct pci_dev *pci, if (bus->mlcap) snd_hdac_ext_bus_get_ml_capabilities(bus); - snd_hdac_bus_stop_chip(bus); - /* create device for soc dmic */ err = skl_dmic_device_register(skl); if (err < 0) { @@ -1098,7 +1093,8 @@ out_dsp_free: out_clk_free: skl_clock_device_unregister(skl); out_nhlt_free: - intel_nhlt_free(skl->nhlt); + if (skl->nhlt) + intel_nhlt_free(skl->nhlt); out_free: skl_free(bus); @@ -1147,7 +1143,8 @@ static void skl_remove(struct pci_dev *pci) skl_dmic_device_unregister(skl); skl_clock_device_unregister(skl); skl_nhlt_remove_sysfs(skl); - intel_nhlt_free(skl->nhlt); + if (skl->nhlt) + intel_nhlt_free(skl->nhlt); skl_free(bus); dev_set_drvdata(&pci->dev, NULL); } diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 0bbd86390be5..9bfd2aabbfe6 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -309,10 +309,14 @@ static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, switch (clk_id) { case JZ4740_I2S_CLKSRC_EXT: parent = clk_get(NULL, "ext"); + if (IS_ERR(parent)) + return PTR_ERR(parent); clk_set_parent(i2s->clk_i2s, parent); break; case JZ4740_I2S_CLKSRC_PLL: parent = clk_get(NULL, "pll half"); + if (IS_ERR(parent)) + return PTR_ERR(parent); clk_set_parent(i2s->clk_i2s, parent); ret = clk_set_rate(i2s->clk_i2s, freq); break; diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index 6f69f314f2c2..d2d5c25bf550 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -132,7 +132,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, "kirkwood-i2s", priv); if (err) - return -EBUSY; + return err; /* * Enable Error interrupts. We're only ack'ing them but diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig index 2e3676147cea..e0d24592ebd7 100644 --- a/sound/soc/meson/Kconfig +++ b/sound/soc/meson/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only menu "ASoC support for Amlogic platforms" - depends on ARCH_MESON || COMPILE_TEST + depends on ARCH_MESON || (COMPILE_TEST && COMMON_CLK) config SND_MESON_AXG_FIFO tristate diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c index 1f698adde506..7126344017fa 100644 --- a/sound/soc/meson/axg-card.c +++ b/sound/soc/meson/axg-card.c @@ -266,7 +266,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card, lb = &card->dai_link[*index + 1]; - lb->name = kasprintf(GFP_KERNEL, "%s-lb", pad->name); + lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name); if (!lb->name) return -ENOMEM; diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c index d286dff3171d..898ef1d5608f 100644 --- a/sound/soc/meson/axg-fifo.c +++ b/sound/soc/meson/axg-fifo.c @@ -244,7 +244,7 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss) /* Enable pclk to access registers and clock the fifo ip */ ret = clk_prepare_enable(fifo->pclk); if (ret) - return ret; + goto free_irq; /* Setup status2 so it reports the memory pointer */ regmap_update_bits(fifo->map, FIFO_CTRL1, @@ -264,8 +264,14 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss) /* Take memory arbitror out of reset */ ret = reset_control_deassert(fifo->arb); if (ret) - clk_disable_unprepare(fifo->pclk); + goto free_clk; + return 0; + +free_clk: + clk_disable_unprepare(fifo->pclk); +free_irq: + free_irq(fifo->irq, ss); return ret; } diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c index 358c8c0d861c..f7e8e9da68a0 100644 --- a/sound/soc/meson/axg-tdm-formatter.c +++ b/sound/soc/meson/axg-tdm-formatter.c @@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks); static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter) { struct axg_tdm_stream *ts = formatter->stream; - bool invert = formatter->drv->quirks->invert_sclk; + bool invert; int ret; /* Do nothing if the formatter is already enabled */ @@ -96,11 +96,12 @@ static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter) return ret; /* - * If sclk is inverted, invert it back and provide the inversion - * required by the formatter + * If sclk is inverted, it means the bit should latched on the + * rising edge which is what our HW expects. If not, we need to + * invert it before the formatter. */ - invert ^= axg_tdm_sclk_invert(ts->iface->fmt); - ret = clk_set_phase(formatter->sclk, invert ? 180 : 0); + invert = axg_tdm_sclk_invert(ts->iface->fmt); + ret = clk_set_phase(formatter->sclk, invert ? 0 : 180); if (ret) return ret; diff --git a/sound/soc/meson/axg-tdm-formatter.h b/sound/soc/meson/axg-tdm-formatter.h index 9ef98e955cb2..a1f0dcc0ff13 100644 --- a/sound/soc/meson/axg-tdm-formatter.h +++ b/sound/soc/meson/axg-tdm-formatter.h @@ -16,7 +16,6 @@ struct snd_kcontrol; struct axg_tdm_formatter_hw { unsigned int skew_offset; - bool invert_sclk; }; struct axg_tdm_formatter_ops { diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c index d51f3344be7c..f5a431b8de6c 100644 --- a/sound/soc/meson/axg-tdm-interface.c +++ b/sound/soc/meson/axg-tdm-interface.c @@ -119,16 +119,23 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai); - /* These modes are not supported */ - if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) { - dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); - return -EINVAL; - } + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBS_CFS: + if (!iface->mclk) { + dev_err(dai->dev, "cpu clock master: mclk missing\n"); + return -ENODEV; + } + break; - /* If the TDM interface is the clock master, it requires mclk */ - if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) { - dev_err(dai->dev, "cpu clock master: mclk missing\n"); - return -ENODEV; + case SND_SOC_DAIFMT_CBM_CFM: + break; + + case SND_SOC_DAIFMT_CBS_CFM: + case SND_SOC_DAIFMT_CBM_CFS: + dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); + /* Fall-through */ + default: + return -EINVAL; } iface->fmt = fmt; @@ -319,7 +326,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream, if (ret) return ret; - if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) { + if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) == + SND_SOC_DAIFMT_CBS_CFS) { ret = axg_tdm_iface_set_sclk(dai, params); if (ret) return ret; @@ -459,8 +467,20 @@ static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component, return ret; } +static const struct snd_soc_dapm_widget axg_tdm_iface_dapm_widgets[] = { + SND_SOC_DAPM_SIGGEN("Playback Signal"), +}; + +static const struct snd_soc_dapm_route axg_tdm_iface_dapm_routes[] = { + { "Loopback", NULL, "Playback Signal" }, +}; + static const struct snd_soc_component_driver axg_tdm_iface_component_drv = { - .set_bias_level = axg_tdm_iface_set_bias_level, + .dapm_widgets = axg_tdm_iface_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(axg_tdm_iface_dapm_widgets), + .dapm_routes = axg_tdm_iface_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(axg_tdm_iface_dapm_routes), + .set_bias_level = axg_tdm_iface_set_bias_level, }; static const struct of_device_id axg_tdm_iface_of_match[] = { diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c index 973d4c02ef8d..b4faf9d5c1aa 100644 --- a/sound/soc/meson/axg-tdmin.c +++ b/sound/soc/meson/axg-tdmin.c @@ -228,8 +228,7 @@ static const struct axg_tdm_formatter_driver axg_tdmin_drv = { .regmap_cfg = &axg_tdmin_regmap_cfg, .ops = &axg_tdmin_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = false, - .skew_offset = 2, + .skew_offset = 3, }, }; @@ -237,6 +236,12 @@ static const struct of_device_id axg_tdmin_of_match[] = { { .compatible = "amlogic,axg-tdmin", .data = &axg_tdmin_drv, + }, { + .compatible = "amlogic,g12a-tdmin", + .data = &axg_tdmin_drv, + }, { + .compatible = "amlogic,sm1-tdmin", + .data = &axg_tdmin_drv, }, {} }; MODULE_DEVICE_TABLE(of, axg_tdmin_of_match); diff --git a/sound/soc/meson/axg-tdmout.c b/sound/soc/meson/axg-tdmout.c index 418ec314b37d..3ceabddae629 100644 --- a/sound/soc/meson/axg-tdmout.c +++ b/sound/soc/meson/axg-tdmout.c @@ -238,7 +238,6 @@ static const struct axg_tdm_formatter_driver axg_tdmout_drv = { .regmap_cfg = &axg_tdmout_regmap_cfg, .ops = &axg_tdmout_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = true, .skew_offset = 1, }, }; @@ -248,7 +247,6 @@ static const struct axg_tdm_formatter_driver g12a_tdmout_drv = { .regmap_cfg = &axg_tdmout_regmap_cfg, .ops = &axg_tdmout_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = true, .skew_offset = 2, }, }; @@ -309,7 +307,6 @@ static const struct axg_tdm_formatter_driver sm1_tdmout_drv = { .regmap_cfg = &axg_tdmout_regmap_cfg, .ops = &axg_tdmout_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = true, .skew_offset = 2, }, }; diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c index ecf41c7549a6..32b9fd59353a 100644 --- a/sound/soc/meson/axg-toddr.c +++ b/sound/soc/meson/axg-toddr.c @@ -18,6 +18,7 @@ #define CTRL0_TODDR_SEL_RESAMPLE BIT(30) #define CTRL0_TODDR_EXT_SIGNED BIT(29) #define CTRL0_TODDR_PP_MODE BIT(28) +#define CTRL0_TODDR_SYNC_CH BIT(27) #define CTRL0_TODDR_TYPE_MASK GENMASK(15, 13) #define CTRL0_TODDR_TYPE(x) ((x) << 13) #define CTRL0_TODDR_MSB_POS_MASK GENMASK(12, 8) @@ -184,10 +185,31 @@ static const struct axg_fifo_match_data axg_toddr_match_data = { .dai_drv = &axg_toddr_dai_drv }; +static int g12a_toddr_dai_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai); + int ret; + + ret = axg_toddr_dai_startup(substream, dai); + if (ret) + return ret; + + /* + * Make sure the first channel ends up in the at beginning of the output + * As weird as it looks, without this the first channel may be misplaced + * in memory, with a random shift of 2 channels. + */ + regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SYNC_CH, + CTRL0_TODDR_SYNC_CH); + + return 0; +} + static const struct snd_soc_dai_ops g12a_toddr_ops = { .prepare = g12a_toddr_dai_prepare, .hw_params = axg_toddr_dai_hw_params, - .startup = axg_toddr_dai_startup, + .startup = g12a_toddr_dai_startup, .shutdown = axg_toddr_dai_shutdown, }; diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig index 60086858e920..b9d8fe9f996a 100644 --- a/sound/soc/qcom/Kconfig +++ b/sound/soc/qcom/Kconfig @@ -72,7 +72,7 @@ config SND_SOC_QDSP6_ASM_DAI config SND_SOC_QDSP6 tristate "SoC ALSA audio driver for QDSP6" - depends on QCOM_APR && HAS_DMA + depends on QCOM_APR select SND_SOC_QDSP6_COMMON select SND_SOC_QDSP6_CORE select SND_SOC_QDSP6_AFE diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c index ac75838bbfab..15a88020dfab 100644 --- a/sound/soc/qcom/apq8016_sbc.c +++ b/sound/soc/qcom/apq8016_sbc.c @@ -235,6 +235,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; card->dapm_widgets = apq8016_sbc_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets); data = apq8016_sbc_parse_of(card); diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c index 94363fd6846a..c10c5f2ec29b 100644 --- a/sound/soc/qcom/apq8096.c +++ b/sound/soc/qcom/apq8096.c @@ -114,6 +114,7 @@ static int apq8096_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c index 6c20bdd850f3..10322690c0ea 100644 --- a/sound/soc/qcom/common.c +++ b/sound/soc/qcom/common.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include "common.h" +#include "qdsp6/q6afe.h" int qcom_snd_parse_of(struct snd_soc_card *card) { @@ -44,8 +45,10 @@ int qcom_snd_parse_of(struct snd_soc_card *card) for_each_child_of_node(dev->of_node, np) { dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL); - if (!dlc) - return -ENOMEM; + if (!dlc) { + ret = -ENOMEM; + goto err; + } link->cpus = &dlc[0]; link->platforms = &dlc[1]; @@ -101,6 +104,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card) } link->no_pcm = 1; link->ignore_pmdown_time = 1; + + if (q6afe_is_rx_port(link->id)) { + link->dpcm_playback = 1; + link->dpcm_capture = 0; + } else { + link->dpcm_playback = 0; + link->dpcm_capture = 1; + } + } else { dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL); if (!dlc) @@ -113,12 +125,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card) link->codecs->dai_name = "snd-soc-dummy-dai"; link->codecs->name = "snd-soc-dummy"; link->dynamic = 1; + link->dpcm_playback = 1; + link->dpcm_capture = 1; } link->ignore_suspend = 1; link->nonatomic = 1; - link->dpcm_playback = 1; - link->dpcm_capture = 1; link->stream_name = link->name; link++; diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index dbce7e92baf3..c5d6952a4a33 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -174,21 +174,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream, return 0; } -static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); - int ret; - - ret = regmap_write(drvdata->lpaif_map, - LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), - 0); - if (ret) - dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret); - - return ret; -} - static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { @@ -269,7 +254,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = { .startup = lpass_cpu_daiops_startup, .shutdown = lpass_cpu_daiops_shutdown, .hw_params = lpass_cpu_daiops_hw_params, - .hw_free = lpass_cpu_daiops_hw_free, .prepare = lpass_cpu_daiops_prepare, .trigger = lpass_cpu_daiops_trigger, }; diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index 4c745baa39f7..b1981d84ac18 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c @@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) int ret, dma_ch, dir = substream->stream; struct lpass_pcm_data *data; - data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -73,8 +73,10 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) else dma_ch = 0; - if (dma_ch < 0) + if (dma_ch < 0) { + kfree(data); return dma_ch; + } drvdata->substream[dma_ch] = substream; @@ -95,6 +97,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) { + kfree(data); dev_err(soc_runtime->dev, "setting constraints failed: %d\n", ret); return -EINVAL; @@ -119,6 +122,7 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream) if (v->free_dma_channel) v->free_dma_channel(drvdata, data->dma_ch); + kfree(data); return 0; } diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c index c1a7624eaf17..0168af849272 100644 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c @@ -902,6 +902,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -917,6 +919,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -931,6 +935,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -946,6 +952,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -960,6 +968,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -975,6 +985,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -989,6 +1001,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -1004,6 +1018,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -1134,206 +1150,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component, } static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = { - SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1", "Secondary MI2S Playback SD1", - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL, - 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0), }; static const struct snd_soc_component_driver q6afe_dai_component = { diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c index e0945f7a58c8..0ce4eb60f984 100644 --- a/sound/soc/qcom/qdsp6/q6afe.c +++ b/sound/soc/qcom/qdsp6/q6afe.c @@ -800,6 +800,14 @@ int q6afe_get_port_id(int index) } EXPORT_SYMBOL_GPL(q6afe_get_port_id); +int q6afe_is_rx_port(int index) +{ + if (index < 0 || index >= AFE_PORT_MAX) + return -EINVAL; + + return port_maps[index].is_rx; +} +EXPORT_SYMBOL_GPL(q6afe_is_rx_port); static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt, struct q6afe_port *port) { diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h index c7ed5422baff..1a0f80a14afe 100644 --- a/sound/soc/qcom/qdsp6/q6afe.h +++ b/sound/soc/qcom/qdsp6/q6afe.h @@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port); int q6afe_port_stop(struct q6afe_port *port); void q6afe_port_put(struct q6afe_port *port); int q6afe_get_port_id(int index); +int q6afe_is_rx_port(int index); void q6afe_hdmi_port_prepare(struct q6afe_port *port, struct q6afe_hdmi_cfg *cfg); void q6afe_slim_port_prepare(struct q6afe_port *port, diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 548eb4fa2da6..9f0ffdcef637 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -171,7 +171,7 @@ static const struct snd_compr_codec_caps q6asm_compr_caps = { }; static void event_handler(uint32_t opcode, uint32_t token, - uint32_t *payload, void *priv) + void *payload, void *priv) { struct q6asm_dai_rtd *prtd = priv; struct snd_pcm_substream *substream = prtd->substream; @@ -494,7 +494,7 @@ static struct snd_pcm_ops q6asm_dai_ops = { }; static void compress_event_handler(uint32_t opcode, uint32_t token, - uint32_t *payload, void *priv) + void *payload, void *priv) { struct q6asm_dai_rtd *prtd = priv; struct snd_compr_stream *substream = prtd->cstream; diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c index e8141a33a55e..835ac98a789c 100644 --- a/sound/soc/qcom/qdsp6/q6asm.c +++ b/sound/soc/qcom/qdsp6/q6asm.c @@ -25,6 +25,7 @@ #define ASM_STREAM_CMD_FLUSH 0x00010BCE #define ASM_SESSION_CMD_PAUSE 0x00010BD3 #define ASM_DATA_CMD_EOS 0x00010BDB +#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C #define ASM_NULL_POPP_TOPOLOGY 0x00010C68 #define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10 @@ -546,9 +547,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev, case ASM_SESSION_CMD_SUSPEND: client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE; break; - case ASM_DATA_CMD_EOS: - client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE; - break; case ASM_STREAM_CMD_FLUSH: client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE; break; @@ -651,6 +649,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev, spin_unlock_irqrestore(&ac->lock, flags); } + break; + case ASM_DATA_EVENT_RENDERED_EOS: + client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE; break; } diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index ddcd9978cf57..745cc9dd14f3 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -996,6 +996,20 @@ static int msm_routing_probe(struct snd_soc_component *c) return 0; } +static unsigned int q6routing_reg_read(struct snd_soc_component *component, + unsigned int reg) +{ + /* default value */ + return 0; +} + +static int q6routing_reg_write(struct snd_soc_component *component, + unsigned int reg, unsigned int val) +{ + /* dummy */ + return 0; +} + static const struct snd_soc_component_driver msm_soc_routing_component = { .ops = &q6pcm_routing_ops, .probe = msm_routing_probe, @@ -1004,6 +1018,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = { .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets), .dapm_routes = intercon, .num_dapm_routes = ARRAY_SIZE(intercon), + .read = q6routing_reg_read, + .write = q6routing_reg_write, }; static int q6pcm_routing_probe(struct platform_device *pdev) diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 28f3cef696e6..23e1de61e92e 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -16,6 +16,7 @@ #include "qdsp6/q6afe.h" #include "../codecs/rt5663.h" +#define DRIVER_NAME "sdm845" #define DEFAULT_SAMPLE_RATE_48K 48000 #define DEFAULT_MCLK_RATE 24576000 #define TDM_BCLK_RATE 6144000 @@ -407,9 +408,11 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev) goto data_alloc_fail; } + card->driver_name = DRIVER_NAME; card->dapm_widgets = sdm845_snd_widgets; card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets); card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c index e6666e597265..236759179100 100644 --- a/sound/soc/qcom/storm.c +++ b/sound/soc/qcom/storm.c @@ -96,6 +96,7 @@ static int storm_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = &pdev->dev; + card->owner = THIS_MODULE; ret = snd_soc_of_parse_card_name(card, "qcom,model"); if (ret) { diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 7cd42fcfcf38..1707414cfa92 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(pdm->regmap); diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c index bb9910d4cbe2..058847071584 100644 --- a/sound/soc/samsung/tm2_wm5110.c +++ b/sound/soc/samsung/tm2_wm5110.c @@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev) ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller", cells_name, i, &args); - if (!args.np) { + if (ret) { dev_err(dev, "i2s-controller property parse error: %d\n", i); ret = -EINVAL; goto dai_node_put; diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c index af19010b9d88..8bd49c8a9517 100644 --- a/sound/soc/sh/rcar/gen.c +++ b/sound/soc/sh/rcar/gen.c @@ -224,6 +224,14 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv) RSND_GEN_S_REG(SSI_SYS_STATUS5, 0x884), RSND_GEN_S_REG(SSI_SYS_STATUS6, 0x888), RSND_GEN_S_REG(SSI_SYS_STATUS7, 0x88c), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE0, 0x850), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE1, 0x854), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE2, 0x858), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE3, 0x85c), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE4, 0x890), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE5, 0x894), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE6, 0x898), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE7, 0x89c), RSND_GEN_S_REG(HDMI0_SEL, 0x9e0), RSND_GEN_S_REG(HDMI1_SEL, 0x9e4), diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index ea6cbaa9743e..d47608ff5fac 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -189,6 +189,14 @@ enum rsnd_reg { SSI_SYS_STATUS5, SSI_SYS_STATUS6, SSI_SYS_STATUS7, + SSI_SYS_INT_ENABLE0, + SSI_SYS_INT_ENABLE1, + SSI_SYS_INT_ENABLE2, + SSI_SYS_INT_ENABLE3, + SSI_SYS_INT_ENABLE4, + SSI_SYS_INT_ENABLE5, + SSI_SYS_INT_ENABLE6, + SSI_SYS_INT_ENABLE7, HDMI0_SEL, HDMI1_SEL, SSI9_BUSIF0_MODE, @@ -237,6 +245,7 @@ enum rsnd_reg { #define SSI9_BUSIF_ADINR(i) (SSI9_BUSIF0_ADINR + (i)) #define SSI9_BUSIF_DALIGN(i) (SSI9_BUSIF0_DALIGN + (i)) #define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i)) +#define SSI_SYS_INT_ENABLE(i) (SSI_SYS_INT_ENABLE0 + (i)) struct rsnd_priv; diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index fc5d089868df..47d5ddb526f2 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -372,6 +372,9 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod, u32 wsr = ssi->wsr; int width; int is_tdm, is_tdm_split; + int id = rsnd_mod_id(mod); + int i; + u32 sys_int_enable = 0; is_tdm = rsnd_runtime_is_tdm(io); is_tdm_split = rsnd_runtime_is_tdm_split(io); @@ -447,6 +450,38 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod, cr_mode = DIEN; /* PIO : enable Data interrupt */ } + /* enable busif buffer over/under run interrupt. */ + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE(i * 2)); + sys_int_enable |= 0xf << (id * 4); + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE(i * 2), + sys_int_enable); + } + + break; + case 9: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1)); + sys_int_enable |= 0xf << 4; + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1), + sys_int_enable); + } + + break; + } + } + init_end: ssi->cr_own = cr_own; ssi->cr_mode = cr_mode; @@ -496,6 +531,13 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod, { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); struct device *dev = rsnd_priv_to_dev(priv); + int is_tdm, is_tdm_split; + int id = rsnd_mod_id(mod); + int i; + u32 sys_int_enable = 0; + + is_tdm = rsnd_runtime_is_tdm(io); + is_tdm_split = rsnd_runtime_is_tdm_split(io); if (!rsnd_ssi_is_run_mods(mod, io)) return 0; @@ -517,6 +559,38 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod, ssi->wsr = 0; } + /* disable busif buffer over/under run interrupt. */ + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE(i * 2)); + sys_int_enable &= ~(0xf << (id * 4)); + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE(i * 2), + sys_int_enable); + } + + break; + case 9: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1)); + sys_int_enable &= ~(0xf << 4); + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1), + sys_int_enable); + } + + break; + } + } + return 0; } @@ -594,10 +668,16 @@ static int rsnd_ssi_stop(struct rsnd_mod *mod, * Capture: It might not receave data. Do nothing */ if (rsnd_io_is_play(io)) { - rsnd_mod_write(mod, SSICR, cr | EN); + rsnd_mod_write(mod, SSICR, cr | ssi->cr_en); rsnd_ssi_status_check(mod, DIRQ); } + /* In multi-SSI mode, stop is performed by setting ssi0129 in + * SSI_CONTROL to 0 (in rsnd_ssio_stop_gen2). Do nothing here. + */ + if (rsnd_ssi_multi_slaves_runtime(io)) + return 0; + /* * disable SSI, * and, wait idle state @@ -616,6 +696,11 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod, int enable) { u32 val = 0; + int is_tdm, is_tdm_split; + int id = rsnd_mod_id(mod); + + is_tdm = rsnd_runtime_is_tdm(io); + is_tdm_split = rsnd_runtime_is_tdm_split(io); if (rsnd_is_gen1(priv)) return 0; @@ -629,6 +714,19 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod, if (enable) val = rsnd_ssi_is_dma_mode(mod) ? 0x0e000000 : 0x0f000000; + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 9: + val |= 0x0000ff00; + break; + } + } + rsnd_mod_write(mod, SSI_INT_ENABLE, val); return 0; @@ -645,6 +743,12 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, u32 status; bool elapsed = false; bool stop = false; + int id = rsnd_mod_id(mod); + int i; + int is_tdm, is_tdm_split; + + is_tdm = rsnd_runtime_is_tdm(io); + is_tdm_split = rsnd_runtime_is_tdm_split(io); spin_lock(&priv->lock); @@ -666,6 +770,53 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, stop = true; } + status = 0; + + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + for (i = 0; i < 4; i++) { + status = rsnd_mod_read(mod, + SSI_SYS_STATUS(i * 2)); + status &= 0xf << (id * 4); + + if (status) { + rsnd_dbg_irq_status(dev, + "%s err status : 0x%08x\n", + rsnd_mod_name(mod), status); + rsnd_mod_write(mod, + SSI_SYS_STATUS(i * 2), + 0xf << (id * 4)); + stop = true; + break; + } + } + break; + case 9: + for (i = 0; i < 4; i++) { + status = rsnd_mod_read(mod, + SSI_SYS_STATUS((i * 2) + 1)); + status &= 0xf << 4; + + if (status) { + rsnd_dbg_irq_status(dev, + "%s err status : 0x%08x\n", + rsnd_mod_name(mod), status); + rsnd_mod_write(mod, + SSI_SYS_STATUS((i * 2) + 1), + 0xf << 4); + stop = true; + break; + } + } + break; + } + } + rsnd_ssi_status_clear(mod); rsnd_ssi_interrupt_out: spin_unlock(&priv->lock); @@ -737,6 +888,9 @@ static void rsnd_ssi_parent_attach(struct rsnd_mod *mod, if (!rsnd_rdai_is_clk_master(rdai)) return; + if (rsnd_ssi_is_multi_slave(mod, io)) + return; + switch (rsnd_mod_id(mod)) { case 1: case 2: diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index f35d88211887..9c7c3e7539c9 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c @@ -221,7 +221,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod, i; for_each_rsnd_mod_array(i, pos, io, rsnd_ssi_array) { - shift = (i * 4) + 16; + shift = (i * 4) + 20; val = (val & ~(0xF << shift)) | rsnd_mod_id(pos) << shift; } diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 9d3b546bae7b..9df20768a8f2 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1076,8 +1076,18 @@ static int soc_probe_component(struct snd_soc_card *card, ret = snd_soc_dapm_add_routes(dapm, component->driver->dapm_routes, component->driver->num_dapm_routes); - if (ret < 0) - goto err_probe; + if (ret < 0) { + if (card->disable_route_checks) { + dev_info(card->dev, + "%s: disable_route_checks set, ignoring errors on add_routes\n", + __func__); + } else { + dev_err(card->dev, + "%s: snd_soc_dapm_add_routes failed: %d\n", + __func__, ret); + goto err_probe; + } + } /* see for_each_card_components */ list_add(&component->card_list, &card->component_dev_list); @@ -1885,9 +1895,25 @@ match: dai_link->platforms->name = component->name; /* convert non BE into BE */ - dai_link->no_pcm = 1; - dai_link->dpcm_playback = 1; - dai_link->dpcm_capture = 1; + if (!dai_link->no_pcm) { + dai_link->no_pcm = 1; + + if (dai_link->dpcm_playback) + dev_warn(card->dev, + "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n", + dai_link->name); + if (dai_link->dpcm_capture) + dev_warn(card->dev, + "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n", + dai_link->name); + + /* convert normal link into DPCM one */ + if (!(dai_link->dpcm_playback || + dai_link->dpcm_capture)) { + dai_link->dpcm_playback = !dai_link->capture_only; + dai_link->dpcm_capture = !dai_link->playback_only; + } + } /* override any BE fixups */ dai_link->be_hw_params_fixup = @@ -2067,8 +2093,18 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes, card->num_dapm_routes); - if (ret < 0) - goto probe_end; + if (ret < 0) { + if (card->disable_route_checks) { + dev_info(card->dev, + "%s: disable_route_checks set, ignoring errors on add_routes\n", + __func__); + } else { + dev_err(card->dev, + "%s: snd_soc_dapm_add_routes failed: %d\n", + __func__, ret); + goto probe_end; + } + } ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes, card->num_of_dapm_routes); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index ebd785f9aa46..7c4d5963692d 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -423,7 +423,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, memset(&template, 0, sizeof(template)); template.reg = e->reg; - template.mask = e->mask << e->shift_l; + template.mask = e->mask; template.shift = e->shift_l; template.off_val = snd_soc_enum_item_to_val(e, 0); template.on_val = template.off_val; @@ -546,8 +546,22 @@ static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol, if (data->value == value) return false; - if (data->widget) - data->widget->on_val = value; + if (data->widget) { + switch (dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->id) { + case snd_soc_dapm_switch: + case snd_soc_dapm_mixer: + case snd_soc_dapm_mixer_named_ctl: + data->widget->on_val = value & data->widget->mask; + break; + case snd_soc_dapm_demux: + case snd_soc_dapm_mux: + data->widget->on_val = value >> data->widget->shift; + break; + default: + data->widget->on_val = value; + break; + } + } data->value = value; @@ -802,7 +816,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, val = max - val; p->connect = !!val; } else { - p->connect = 0; + /* since a virtual mixer has no backing registers to + * decide which path to connect, it will try to match + * with initial state. This is to ensure + * that the default mixer choice will be + * correctly powered up during initialization. + */ + p->connect = invert; } } @@ -2464,6 +2484,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w) enum snd_soc_dapm_direction dir; list_del(&w->list); + list_del(&w->dirty); /* * remove source and sink paths associated to this widget. * While removing the path, remove reference to it from both diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index f4dc3d445aae..95fc24580f85 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -832,7 +832,7 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; - unsigned int regwmask = (1<<regwshift)-1; + unsigned int regwmask = (1UL<<regwshift)-1; unsigned int invert = mc->invert; unsigned long mask = (1UL<<mc->nbits)-1; long min = mc->min; @@ -881,7 +881,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; - unsigned int regwmask = (1<<regwshift)-1; + unsigned int regwmask = (1UL<<regwshift)-1; unsigned int invert = mc->invert; unsigned long mask = (1UL<<mc->nbits)-1; long max = mc->max; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index d978df95c5c6..1196167364d4 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -2222,7 +2222,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, switch (cmd) { case SNDRV_PCM_TRIGGER_START: if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && - (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) continue; ret = dpcm_do_trigger(dpcm, be_substream, cmd); @@ -2252,7 +2253,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, be->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_STOP: - if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) continue; if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) @@ -2344,6 +2346,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + case SNDRV_PCM_TRIGGER_DRAIN: ret = dpcm_dai_trigger_fe_be(substream, cmd, true); break; case SNDRV_PCM_TRIGGER_STOP: @@ -2361,6 +2364,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + case SNDRV_PCM_TRIGGER_DRAIN: ret = dpcm_dai_trigger_fe_be(substream, cmd, false); break; case SNDRV_PCM_TRIGGER_STOP: diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index b19ecaf0febf..c367609433bf 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -362,7 +362,7 @@ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg, struct snd_soc_component *comp = tplg->comp; return soc_tplg_add_dcontrol(comp->card->snd_card, - comp->dev, k, NULL, comp, kcontrol); + comp->dev, k, comp->name_prefix, comp, kcontrol); } /* remove a mixer kcontrol */ @@ -893,7 +893,13 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count, } /* create any TLV data */ - soc_tplg_create_tlv(tplg, &kc, &mc->hdr); + err = soc_tplg_create_tlv(tplg, &kc, &mc->hdr); + if (err < 0) { + dev_err(tplg->dev, "ASoC: failed to create TLV %s\n", + mc->hdr.name); + kfree(sm); + continue; + } /* pass control to driver for optional further init */ err = soc_tplg_init_kcontrol(tplg, &kc, @@ -976,7 +982,7 @@ static int soc_tplg_denum_create_values(struct soc_enum *se, return -EINVAL; se->dobj.control.dvalues = kzalloc(le32_to_cpu(ec->items) * - sizeof(u32), + sizeof(*se->dobj.control.dvalues), GFP_KERNEL); if (!se->dobj.control.dvalues) return -ENOMEM; @@ -1117,6 +1123,7 @@ static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { struct snd_soc_tplg_ctl_hdr *control_hdr; + int ret; int i; if (tplg->pass != SOC_TPLG_PASS_MIXER) { @@ -1145,25 +1152,30 @@ static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg, case SND_SOC_TPLG_CTL_RANGE: case SND_SOC_TPLG_DAPM_CTL_VOLSW: case SND_SOC_TPLG_DAPM_CTL_PIN: - soc_tplg_dmixer_create(tplg, 1, - le32_to_cpu(hdr->payload_size)); + ret = soc_tplg_dmixer_create(tplg, 1, + le32_to_cpu(hdr->payload_size)); break; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_CTL_ENUM_VALUE: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE: - soc_tplg_denum_create(tplg, 1, - le32_to_cpu(hdr->payload_size)); + ret = soc_tplg_denum_create(tplg, 1, + le32_to_cpu(hdr->payload_size)); break; case SND_SOC_TPLG_CTL_BYTES: - soc_tplg_dbytes_create(tplg, 1, - le32_to_cpu(hdr->payload_size)); + ret = soc_tplg_dbytes_create(tplg, 1, + le32_to_cpu(hdr->payload_size)); break; default: soc_bind_err(tplg, control_hdr, i); return -EINVAL; } + if (ret < 0) { + dev_err(tplg->dev, "ASoC: invalid control\n"); + return ret; + } + } return 0; @@ -1271,16 +1283,30 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg, routes[i]->dobj.index = tplg->index; list_add(&routes[i]->dobj.list, &tplg->comp->dobj_list); - soc_tplg_add_route(tplg, routes[i]); + ret = soc_tplg_add_route(tplg, routes[i]); + if (ret < 0) { + /* + * this route was added to the list, it will + * be freed in remove_route() so increment the + * counter to skip it in the error handling + * below. + */ + i++; + break; + } /* add route, but keep going if some fail */ snd_soc_dapm_add_routes(dapm, routes[i], 1); } - /* free memory allocated for all dapm routes in case of error */ - if (ret < 0) - for (i = 0; i < count ; i++) - kfree(routes[i]); + /* + * free memory allocated for all dapm routes not added to the + * list in case of error + */ + if (ret < 0) { + while (i < count) + kfree(routes[i++]); + } /* * free pointer to array of dapm routes as this is no longer needed. @@ -1354,7 +1380,13 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( } /* create any TLV data */ - soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr); + err = soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr); + if (err < 0) { + dev_err(tplg->dev, "ASoC: failed to create TLV %s\n", + mc->hdr.name); + kfree(sm); + continue; + } /* pass control to driver for optional further init */ err = soc_tplg_init_kcontrol(tplg, &kc[i], @@ -1362,7 +1394,6 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( if (err < 0) { dev_err(tplg->dev, "ASoC: failed to init %s\n", mc->hdr.name); - soc_tplg_free_tlv(tplg, &kc[i]); goto err_sm; } } @@ -1370,6 +1401,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( err_sm: for (; i >= 0; i--) { + soc_tplg_free_tlv(tplg, &kc[i]); sm = (struct soc_mixer_control *)kc[i].private_value; kfree(sm); kfree(kc[i].name); @@ -2072,7 +2104,9 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg, _pcm = pcm; } else { abi_match = false; - pcm_new_ver(tplg, pcm, &_pcm); + ret = pcm_new_ver(tplg, pcm, &_pcm); + if (ret < 0) + return ret; } /* create the FE DAIs and DAI links */ @@ -2409,7 +2443,7 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg, { struct snd_soc_tplg_dai *dai; int count; - int i; + int i, ret; count = le32_to_cpu(hdr->count); @@ -2424,7 +2458,12 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg, return -EINVAL; } - soc_tplg_dai_config(tplg, dai); + ret = soc_tplg_dai_config(tplg, dai); + if (ret < 0) { + dev_err(tplg->dev, "ASoC: failed to configure DAI\n"); + return ret; + } + tplg->pos += (sizeof(*dai) + le32_to_cpu(dai->priv.size)); } @@ -2532,7 +2571,7 @@ static int soc_valid_header(struct soc_tplg *tplg, } /* big endian firmware objects not supported atm */ - if (hdr->magic == SOC_TPLG_MAGIC_BIG_ENDIAN) { + if (le32_to_cpu(hdr->magic) == SOC_TPLG_MAGIC_BIG_ENDIAN) { dev_err(tplg->dev, "ASoC: pass %d big endian not supported header got %x at offset 0x%lx size 0x%zx.\n", tplg->pass, hdr->magic, diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c index 12aec140819a..2a6b84d2781e 100644 --- a/sound/soc/sof/core.c +++ b/sound/soc/sof/core.c @@ -372,6 +372,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) /* init the IPC */ sdev->ipc = snd_sof_ipc_init(sdev); if (!sdev->ipc) { + ret = -ENOMEM; dev_err(sdev->dev, "error: failed to init DSP IPC %d\n", ret); goto ipc_err; } diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c index 5529e8eeca46..08726034ff09 100644 --- a/sound/soc/sof/debug.c +++ b/sound/soc/sof/debug.c @@ -135,7 +135,7 @@ static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer, char *string; int ret; - string = kzalloc(count, GFP_KERNEL); + string = kzalloc(count+1, GFP_KERNEL); if (!string) return -ENOMEM; diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig index 71f318bc2c74..b4f0426685c4 100644 --- a/sound/soc/sof/imx/Kconfig +++ b/sound/soc/sof/imx/Kconfig @@ -14,7 +14,7 @@ if SND_SOC_SOF_IMX_TOPLEVEL config SND_SOC_SOF_IMX8_SUPPORT bool "SOF support for i.MX8" depends on IMX_SCU - depends on IMX_DSP + select IMX_DSP help This adds support for Sound Open Firmware for NXP i.MX8 platforms Say Y if you have such a device. diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c index 9e8233c10d86..df38616c431a 100644 --- a/sound/soc/sof/intel/hda-codec.c +++ b/sound/soc/sof/intel/hda-codec.c @@ -68,8 +68,7 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev) * has been recorded in STATESTS */ if (codec->jacktbl.used) - schedule_delayed_work(&codec->jackpoll_work, - codec->jackpoll_interval); + pm_request_resume(&codec->core.dev); } #else void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {} diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c index fb55a3c5afd0..06715b3d8c31 100644 --- a/sound/soc/sof/intel/hda-dsp.c +++ b/sound/soc/sof/intel/hda-dsp.c @@ -179,7 +179,7 @@ int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask) return snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS, adspcs, - !(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)), + !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)), HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC); } @@ -192,10 +192,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS); - is_enable = ((val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) && - (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) && - !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && - !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask))); +#define MASK_IS_EQUAL(v, m, field) ({ \ + u32 _m = field(m); \ + ((v) & _m) == _m; \ +}) + + is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) && + MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) && + !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && + !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); + +#undef MASK_IS_EQUAL dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n", is_enable, core_mask); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 3c4b604412f0..a10166741865 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -672,6 +672,7 @@ free_streams: /* dsp_unmap: not currently used */ iounmap(sdev->bar[HDA_DSP_BAR]); hdac_bus_unmap: + platform_device_unregister(hdev->dmic_dev); iounmap(bus->remap_addr); err: return ret; diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c index e7b1a80e2a14..f38f651da224 100644 --- a/sound/soc/sof/ipc.c +++ b/sound/soc/sof/ipc.c @@ -215,15 +215,17 @@ static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg, snd_sof_trace_notify_for_error(ipc->sdev); ret = -ETIMEDOUT; } else { - /* copy the data returned from DSP */ ret = msg->reply_error; - if (msg->reply_size) - memcpy(reply_data, msg->reply_data, msg->reply_size); - if (ret < 0) + if (ret < 0) { dev_err(sdev->dev, "error: ipc error for 0x%x size %zu\n", hdr->cmd, msg->reply_size); - else + } else { ipc_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd); + if (msg->reply_size) + /* copy the data returned from DSP */ + memcpy(reply_data, msg->reply_data, + msg->reply_size); + } } return ret; diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c index 3d128e5a132c..71410116add1 100644 --- a/sound/soc/sof/nocodec.c +++ b/sound/soc/sof/nocodec.c @@ -14,6 +14,7 @@ static struct snd_soc_card sof_nocodec_card = { .name = "nocodec", /* the sof- prefix is added by the core */ + .owner = THIS_MODULE }; static int sof_nocodec_bes_setup(struct device *dev, @@ -52,8 +53,10 @@ static int sof_nocodec_bes_setup(struct device *dev, links[i].platforms->name = dev_name(dev); links[i].codecs->dai_name = "snd-soc-dummy-dai"; links[i].codecs->name = "snd-soc-dummy"; - links[i].dpcm_playback = 1; - links[i].dpcm_capture = 1; + if (ops->drv[i].playback.channels_min) + links[i].dpcm_playback = 1; + if (ops->drv[i].capture.channels_min) + links[i].dpcm_capture = 1; } card->dai_link = links; diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c index 195af259e78e..128680b09c20 100644 --- a/sound/soc/sof/pm.c +++ b/sound/soc/sof/pm.c @@ -266,7 +266,10 @@ static int sof_resume(struct device *dev, bool runtime_resume) int ret; /* do nothing if dsp resume callbacks are not set */ - if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume) + if (!runtime_resume && !sof_ops(sdev)->resume) + return 0; + + if (runtime_resume && !sof_ops(sdev)->runtime_resume) return 0; /* DSP was never successfully started, nothing to resume */ @@ -346,7 +349,10 @@ static int sof_suspend(struct device *dev, bool runtime_suspend) int ret; /* do nothing if dsp suspend callback is not set */ - if (!sof_ops(sdev)->suspend) + if (!runtime_suspend && !sof_ops(sdev)->suspend) + return 0; + + if (runtime_suspend && !sof_ops(sdev)->runtime_suspend) return 0; if (sdev->fw_state != SOF_FW_BOOT_COMPLETE) diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c index d66412a77873..3f79cd03507c 100644 --- a/sound/soc/sof/sof-pci-dev.c +++ b/sound/soc/sof/sof-pci-dev.c @@ -420,6 +420,8 @@ static const struct pci_device_id sof_pci_ids[] = { #if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H) { PCI_DEVICE(0x8086, 0x06c8), .driver_data = (unsigned long)&cml_desc}, + { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */ + .driver_data = (unsigned long)&cml_desc}, #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE) { PCI_DEVICE(0x8086, 0xa0c8), diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c index 4c3cff031fd6..fd6f5913782b 100644 --- a/sound/soc/sof/trace.c +++ b/sound/soc/sof/trace.c @@ -328,7 +328,10 @@ void snd_sof_free_trace(struct snd_sof_dev *sdev) { snd_sof_release_trace(sdev); - snd_dma_free_pages(&sdev->dmatb); - snd_dma_free_pages(&sdev->dmatp); + if (sdev->dma_trace_pages) { + snd_dma_free_pages(&sdev->dmatb); + snd_dma_free_pages(&sdev->dmatp); + sdev->dma_trace_pages = 0; + } } EXPORT_SYMBOL(snd_sof_free_trace); diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index 10eb4b8e8e7e..7e965848796c 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -1543,6 +1543,9 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) return ret; } + if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) + conf = &stm32_sai_pcm_config_spdif; + ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0); if (ret) { dev_err(&pdev->dev, "Could not register pcm dma\n"); @@ -1552,12 +1555,9 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) ret = snd_soc_register_component(&pdev->dev, &stm32_component, &sai->cpu_dai_drv, 1); if (ret) - return ret; + snd_dmaengine_pcm_unregister(&pdev->dev); - if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) - conf = &stm32_sai_pcm_config_spdif; - - return 0; + return ret; } static int stm32_sai_sub_remove(struct platform_device *pdev) diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c index e53fb4bd66b3..9fc2a1767eb1 100644 --- a/sound/soc/stm/stm32_spdifrx.c +++ b/sound/soc/stm/stm32_spdifrx.c @@ -995,6 +995,8 @@ static int stm32_spdifrx_probe(struct platform_device *pdev) if (idr == SPDIFRX_IPIDR_NUMBER) { ret = regmap_read(spdifrx->regmap, STM32_SPDIFRX_VERR, &ver); + if (ret) + goto error; dev_dbg(&pdev->dev, "SPDIFRX version: %lu.%lu registered\n", FIELD_GET(SPDIFRX_VERR_MAJ_MASK, ver), diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c index ee448d5e07a6..c4021d6ac9df 100644 --- a/sound/soc/sunxi/sun4i-codec.c +++ b/sound/soc/sunxi/sun4i-codec.c @@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM); card->dev = dev; + card->owner = THIS_MODULE; card->name = "sun4i-codec"; card->dapm_widgets = sun4i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun4i_codec_card_dapm_widgets); @@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM); card->dev = dev; + card->owner = THIS_MODULE; card->name = "A31 Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); @@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM); card->dev = dev; + card->owner = THIS_MODULE; card->name = "A23 Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); @@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM); card->dev = dev; + card->owner = THIS_MODULE; card->name = "H3 Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); @@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM); card->dev = dev; + card->owner = THIS_MODULE; card->name = "V3s Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c index d0a8d5810c0a..9655dec4166b 100644 --- a/sound/soc/sunxi/sun4i-i2s.c +++ b/sound/soc/sunxi/sun4i-i2s.c @@ -442,11 +442,11 @@ static int sun8i_i2s_set_chan_cfg(const struct sun4i_i2s *i2s, switch (i2s->format & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: - case SND_SOC_DAIFMT_LEFT_J: - case SND_SOC_DAIFMT_RIGHT_J: lrck_period = params_physical_width(params) * slots; break; + case SND_SOC_DAIFMT_LEFT_J: + case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_I2S: lrck_period = params_physical_width(params); break; diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 635eacbd28d4..156e3b9d613c 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c @@ -643,8 +643,10 @@ static int tegra30_ahub_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(ahub->regmap_ahub); ret |= regcache_sync(ahub->regmap_apbif); pm_runtime_put(dev); diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index e6d548fa980b..8894b7c16a01 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -538,8 +538,10 @@ static int tegra30_i2s_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(i2s->regmap); pm_runtime_put(dev); diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c index 6211dfda2195..0fa01cacfec9 100644 --- a/sound/soc/tegra/tegra_wm8903.c +++ b/sound/soc/tegra/tegra_wm8903.c @@ -159,6 +159,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) struct snd_soc_component *component = codec_dai->component; struct snd_soc_card *card = rtd->card; struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); + int shrt = 0; if (gpio_is_valid(machine->gpio_hp_det)) { tegra_wm8903_hp_jack_gpio.gpio = machine->gpio_hp_det; @@ -171,12 +172,15 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) &tegra_wm8903_hp_jack_gpio); } + if (of_property_read_bool(card->dev->of_node, "nvidia,headset")) + shrt = SND_JACK_MICROPHONE; + snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE, &tegra_wm8903_mic_jack, tegra_wm8903_mic_jack_pins, ARRAY_SIZE(tegra_wm8903_mic_jack_pins)); wm8903_mic_detect(component, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE, - 0); + shrt); snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS"); diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c index 7aa3c32e4a49..0541071f454b 100644 --- a/sound/soc/ti/davinci-mcasp.c +++ b/sound/soc/ti/davinci-mcasp.c @@ -1875,8 +1875,10 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp) PTR_ERR(chan)); return PTR_ERR(chan); } - if (WARN_ON(!chan->device || !chan->device->dev)) + if (WARN_ON(!chan->device || !chan->device->dev)) { + dma_release_channel(chan); return -EINVAL; + } if (chan->device->dev->of_node) ret = of_property_read_string(chan->device->dev->of_node, diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c index 26b503bbdb5f..3273b317fa3b 100644 --- a/sound/soc/ti/omap-mcbsp.c +++ b/sound/soc/ti/omap-mcbsp.c @@ -686,7 +686,7 @@ static int omap_mcbsp_init(struct platform_device *pdev) mcbsp->dma_data[1].addr = omap_mcbsp_dma_reg_params(mcbsp, SNDRV_PCM_STREAM_CAPTURE); - mcbsp->fclk = clk_get(&pdev->dev, "fck"); + mcbsp->fclk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(mcbsp->fclk)) { ret = PTR_ERR(mcbsp->fclk); dev_err(mcbsp->dev, "unable to get fck: %d\n", ret); @@ -711,7 +711,7 @@ static int omap_mcbsp_init(struct platform_device *pdev) if (ret) { dev_err(mcbsp->dev, "Unable to create additional controls\n"); - goto err_thres; + return ret; } } @@ -724,8 +724,6 @@ static int omap_mcbsp_init(struct platform_device *pdev) err_st: if (mcbsp->pdata->buffer_size) sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group); -err_thres: - clk_put(mcbsp->fclk); return ret; } @@ -1442,8 +1440,6 @@ static int asoc_mcbsp_remove(struct platform_device *pdev) omap_mcbsp_st_cleanup(pdev); - clk_put(mcbsp->fclk); - return 0; } diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c index 2873e8e6f02b..cdae1190b930 100644 --- a/sound/soc/ux500/mop500.c +++ b/sound/soc/ux500/mop500.c @@ -63,10 +63,11 @@ static void mop500_of_node_put(void) { int i; - for (i = 0; i < 2; i++) { + for (i = 0; i < 2; i++) of_node_put(mop500_dai_links[i].cpus->of_node); - of_node_put(mop500_dai_links[i].codecs->of_node); - } + + /* Both links use the same codec, which is refcounted only once */ + of_node_put(mop500_dai_links[0].codecs->of_node); } static int mop500_of_probe(struct platform_device *pdev, @@ -81,7 +82,9 @@ static int mop500_of_probe(struct platform_device *pdev, if (!(msp_np[0] && msp_np[1] && codec_np)) { dev_err(&pdev->dev, "Phandle missing or invalid\n"); - mop500_of_node_put(); + for (i = 0; i < 2; i++) + of_node_put(msp_np[i]); + of_node_put(codec_np); return -EINVAL; } diff --git a/sound/usb/card.c b/sound/usb/card.c index 54f9ce38471e..c2dd18c5cadb 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -176,9 +176,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int ctrlif, interface); return -EINVAL; } - usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); - - return 0; + return usb_driver_claim_interface(&usb_audio_driver, iface, + USB_AUDIO_IFACE_UNUSED); } if ((altsd->bInterfaceClass != USB_CLASS_AUDIO && @@ -198,7 +197,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int if (! snd_usb_parse_audio_interface(chip, interface)) { usb_set_interface(dev, interface, 0); /* reset the current interface */ - usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); + return usb_driver_claim_interface(&usb_audio_driver, iface, + USB_AUDIO_IFACE_UNUSED); } return 0; @@ -659,10 +659,14 @@ static int usb_audio_probe(struct usb_interface *intf, goto __error; } - /* we are allowed to call snd_card_register() many times */ - err = snd_card_register(chip->card); - if (err < 0) - goto __error; + /* we are allowed to call snd_card_register() many times, but first + * check to see if a device needs to skip it or do anything special + */ + if (!snd_usb_registration_quirk(chip, ifnum)) { + err = snd_card_register(chip->card); + if (err < 0) + goto __error; + } if (quirk && quirk->shares_media_device) { /* don't want to fail when snd_media_device_create() fails */ @@ -699,7 +703,7 @@ static void usb_audio_disconnect(struct usb_interface *intf) struct snd_card *card; struct list_head *p; - if (chip == (void *)-1L) + if (chip == USB_AUDIO_IFACE_UNUSED) return; card = chip->card; @@ -807,12 +811,9 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) struct usb_mixer_interface *mixer; struct list_head *p; - if (chip == (void *)-1L) + if (chip == USB_AUDIO_IFACE_UNUSED) return 0; - chip->autosuspended = !!PMSG_IS_AUTO(message); - if (!chip->autosuspended) - snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (!chip->num_suspended_intf++) { list_for_each_entry(as, &chip->pcm_list, list) { snd_usb_pcm_suspend(as); @@ -825,6 +826,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) snd_usb_mixer_suspend(mixer); } + if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); + chip->system_suspend = chip->num_suspended_intf; + } + return 0; } @@ -836,12 +842,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) struct list_head *p; int err = 0; - if (chip == (void *)-1L) - return 0; - if (--chip->num_suspended_intf) + if (chip == USB_AUDIO_IFACE_UNUSED) return 0; atomic_inc(&chip->active); /* avoid autopm */ + if (chip->num_suspended_intf > 1) + goto out; list_for_each_entry(as, &chip->pcm_list, list) { err = snd_usb_pcm_resume(as); @@ -863,9 +869,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) snd_usbmidi_resume(p); } - if (!chip->autosuspended) + out: + if (chip->num_suspended_intf == chip->system_suspend) { snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); - chip->autosuspended = 0; + chip->system_suspend = 0; + } + chip->num_suspended_intf--; err_out: atomic_dec(&chip->active); /* allow autopm after this point */ diff --git a/sound/usb/card.h b/sound/usb/card.h index 395403a2d33f..d8ec5caf464d 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -104,6 +104,7 @@ struct snd_usb_endpoint { int iface, altsetting; int skip_packets; /* quirks for devices to ignore the first n packets in a stream */ + bool is_implicit_feedback; /* This endpoint is used as implicit feedback */ spinlock_t lock; struct list_head list; @@ -132,6 +133,7 @@ struct snd_usb_substream { unsigned int tx_length_quirk:1; /* add length specifier to transfers */ unsigned int fmt_type; /* USB audio format type (1-3) */ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */ + unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */ unsigned int running: 1; /* running status */ diff --git a/sound/usb/clock.c b/sound/usb/clock.c index a48313dfa967..6a51b9d20eeb 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -151,16 +151,15 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i return ret; } -/* - * Assume the clock is valid if clock source supports only one single sample - * rate, the terminal is connected directly to it (there is no clock selector) - * and clock type is internal. This is to deal with some Denon DJ controllers - * that always reports that clock is invalid. - */ static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip, struct audioformat *fmt, int source_id) { + bool ret = false; + int count; + unsigned char data; + struct usb_device *dev = chip->dev; + if (fmt->protocol == UAC_VERSION_2) { struct uac_clock_source_descriptor *cs_desc = snd_usb_find_clock_source(chip->ctrl_intf, source_id); @@ -168,13 +167,51 @@ static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip, if (!cs_desc) return false; - return (fmt->nr_rates == 1 && - (fmt->clock & 0xff) == cs_desc->bClockID && - (cs_desc->bmAttributes & 0x3) != - UAC_CLOCK_SOURCE_TYPE_EXT); + /* + * Assume the clock is valid if clock source supports only one + * single sample rate, the terminal is connected directly to it + * (there is no clock selector) and clock type is internal. + * This is to deal with some Denon DJ controllers that always + * reports that clock is invalid. + */ + if (fmt->nr_rates == 1 && + (fmt->clock & 0xff) == cs_desc->bClockID && + (cs_desc->bmAttributes & 0x3) != + UAC_CLOCK_SOURCE_TYPE_EXT) + return true; } - return false; + /* + * MOTU MicroBook IIc + * Sample rate changes takes more than 2 seconds for this device. Clock + * validity request returns false during that period. + */ + if (chip->usb_id == USB_ID(0x07fd, 0x0004)) { + count = 0; + + while ((!ret) && (count < 50)) { + int err; + + msleep(100); + + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, + UAC2_CS_CONTROL_CLOCK_VALID << 8, + snd_usb_ctrl_intf(chip) | (source_id << 8), + &data, sizeof(data)); + if (err < 0) { + dev_warn(&dev->dev, + "%s(): cannot get clock validity for id %d\n", + __func__, source_id); + return false; + } + + ret = !!data; + count++; + } + } + + return ret; } static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, @@ -259,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id); if (selector) { - int ret, i, cur; + int ret, i, cur, err; /* the entity ID we are looking for is a selector. * find out what it currently selects */ @@ -281,13 +318,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, ret = __uac_clock_find_source(chip, fmt, selector->baCSourceID[ret - 1], visited, validate); + if (ret > 0) { + err = uac_clock_selector_set_val(chip, entity_id, cur); + if (err < 0) + return err; + } + if (!validate || ret > 0 || !chip->autoclock) return ret; /* The current clock source is invalid, try others. */ for (i = 1; i <= selector->bNrInPins; i++) { - int err; - if (i == cur) continue; @@ -353,7 +394,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id); if (selector) { - int ret, i, cur; + int ret, i, cur, err; /* the entity ID we are looking for is a selector. * find out what it currently selects */ @@ -375,6 +416,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, ret = __uac3_clock_find_source(chip, fmt, selector->baCSourceID[ret - 1], visited, validate); + if (ret > 0) { + err = uac_clock_selector_set_val(chip, entity_id, cur); + if (err < 0) + return err; + } + if (!validate || ret > 0 || !chip->autoclock) return ret; @@ -494,6 +541,12 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface, } crate = data[0] | (data[1] << 8) | (data[2] << 16); + if (!crate) { + dev_info(&dev->dev, "failed to read current rate; disabling the check\n"); + chip->sample_rate_read_error = 3; /* three strikes, see above */ + return 0; + } + if (crate != rate) { dev_warn(&dev->dev, "current rate %d is different from the runtime rate %d\n", crate, rate); // runtime->rate = crate; diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 4a9a2f6ef5a4..87cc249a31b9 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -321,17 +321,17 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep) ep->next_packet_read_pos %= MAX_URBS; /* take URB out of FIFO */ - if (!list_empty(&ep->ready_playback_urbs)) + if (!list_empty(&ep->ready_playback_urbs)) { ctx = list_first_entry(&ep->ready_playback_urbs, struct snd_urb_ctx, ready_list); + list_del_init(&ctx->ready_list); + } } spin_unlock_irqrestore(&ep->lock, flags); if (ctx == NULL) return; - list_del_init(&ctx->ready_list); - /* copy over the length information */ for (i = 0; i < packet->packets; i++) ctx->packet_size[i] = packet->packet_size[i]; @@ -497,6 +497,8 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip, list_add_tail(&ep->list, &chip->ep_list); + ep->is_implicit_feedback = 0; + __exit_unlock: mutex_unlock(&chip->mutex); @@ -596,6 +598,178 @@ static void release_urbs(struct snd_usb_endpoint *ep, int force) ep->nurbs = 0; } +/* + * Check data endpoint for format differences + */ +static bool check_ep_params(struct snd_usb_endpoint *ep, + snd_pcm_format_t pcm_format, + unsigned int channels, + unsigned int period_bytes, + unsigned int frames_per_period, + unsigned int periods_per_buffer, + struct audioformat *fmt, + struct snd_usb_endpoint *sync_ep) +{ + unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb; + unsigned int max_packs_per_period, urbs_per_period, urb_packs; + unsigned int max_urbs; + int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels; + int tx_length_quirk = (ep->chip->tx_length_quirk && + usb_pipeout(ep->pipe)); + bool ret = 1; + + if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) { + /* + * When operating in DSD DOP mode, the size of a sample frame + * in hardware differs from the actual physical format width + * because we need to make room for the DOP markers. + */ + frame_bits += channels << 3; + } + + ret = ret && (ep->datainterval == fmt->datainterval); + ret = ret && (ep->stride == frame_bits >> 3); + + switch (pcm_format) { + case SNDRV_PCM_FORMAT_U8: + ret = ret && (ep->silence_value == 0x80); + break; + case SNDRV_PCM_FORMAT_DSD_U8: + case SNDRV_PCM_FORMAT_DSD_U16_LE: + case SNDRV_PCM_FORMAT_DSD_U32_LE: + case SNDRV_PCM_FORMAT_DSD_U16_BE: + case SNDRV_PCM_FORMAT_DSD_U32_BE: + ret = ret && (ep->silence_value == 0x69); + break; + default: + ret = ret && (ep->silence_value == 0); + } + + /* assume max. frequency is 50% higher than nominal */ + ret = ret && (ep->freqmax == ep->freqn + (ep->freqn >> 1)); + /* Round up freqmax to nearest integer in order to calculate maximum + * packet size, which must represent a whole number of frames. + * This is accomplished by adding 0x0.ffff before converting the + * Q16.16 format into integer. + * In order to accurately calculate the maximum packet size when + * the data interval is more than 1 (i.e. ep->datainterval > 0), + * multiply by the data interval prior to rounding. For instance, + * a freqmax of 41 kHz will result in a max packet size of 6 (5.125) + * frames with a data interval of 1, but 11 (10.25) frames with a + * data interval of 2. + * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the + * maximum datainterval value of 3, at USB full speed, higher for + * USB high speed, noting that ep->freqmax is in units of + * frames per packet in Q16.16 format.) + */ + maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) * + (frame_bits >> 3); + if (tx_length_quirk) + maxsize += sizeof(__le32); /* Space for length descriptor */ + /* but wMaxPacketSize might reduce this */ + if (ep->maxpacksize && ep->maxpacksize < maxsize) { + /* whatever fits into a max. size packet */ + unsigned int data_maxsize = maxsize = ep->maxpacksize; + + if (tx_length_quirk) + /* Need to remove the length descriptor to calc freq */ + data_maxsize -= sizeof(__le32); + ret = ret && (ep->freqmax == (data_maxsize / (frame_bits >> 3)) + << (16 - ep->datainterval)); + } + + if (ep->fill_max) + ret = ret && (ep->curpacksize == ep->maxpacksize); + else + ret = ret && (ep->curpacksize == maxsize); + + if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) { + packs_per_ms = 8 >> ep->datainterval; + max_packs_per_urb = MAX_PACKS_HS; + } else { + packs_per_ms = 1; + max_packs_per_urb = MAX_PACKS; + } + if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep)) + max_packs_per_urb = min(max_packs_per_urb, + 1U << sync_ep->syncinterval); + max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval); + + /* + * Capture endpoints need to use small URBs because there's no way + * to tell in advance where the next period will end, and we don't + * want the next URB to complete much after the period ends. + * + * Playback endpoints with implicit sync much use the same parameters + * as their corresponding capture endpoint. + */ + if (usb_pipein(ep->pipe) || + snd_usb_endpoint_implicit_feedback_sink(ep)) { + + urb_packs = packs_per_ms; + /* + * Wireless devices can poll at a max rate of once per 4ms. + * For dataintervals less than 5, increase the packet count to + * allow the host controller to use bursting to fill in the + * gaps. + */ + if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) { + int interval = ep->datainterval; + + while (interval < 5) { + urb_packs <<= 1; + ++interval; + } + } + /* make capture URBs <= 1 ms and smaller than a period */ + urb_packs = min(max_packs_per_urb, urb_packs); + while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) + urb_packs >>= 1; + ret = ret && (ep->nurbs == MAX_URBS); + + /* + * Playback endpoints without implicit sync are adjusted so that + * a period fits as evenly as possible in the smallest number of + * URBs. The total number of URBs is adjusted to the size of the + * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits. + */ + } else { + /* determine how small a packet can be */ + minsize = (ep->freqn >> (16 - ep->datainterval)) * + (frame_bits >> 3); + /* with sync from device, assume it can be 12% lower */ + if (sync_ep) + minsize -= minsize >> 3; + minsize = max(minsize, 1u); + + /* how many packets will contain an entire ALSA period? */ + max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize); + + /* how many URBs will contain a period? */ + urbs_per_period = DIV_ROUND_UP(max_packs_per_period, + max_packs_per_urb); + /* how many packets are needed in each URB? */ + urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period); + + /* limit the number of frames in a single URB */ + ret = ret && (ep->max_urb_frames == + DIV_ROUND_UP(frames_per_period, urbs_per_period)); + + /* try to use enough URBs to contain an entire ALSA buffer */ + max_urbs = min((unsigned) MAX_URBS, + MAX_QUEUE * packs_per_ms / urb_packs); + ret = ret && (ep->nurbs == min(max_urbs, + urbs_per_period * periods_per_buffer)); + } + + ret = ret && (ep->datainterval == fmt->datainterval); + ret = ret && (ep->maxpacksize == fmt->maxpacksize); + ret = ret && + (ep->fill_max == !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX)); + + return ret; +} + /* * configure a data endpoint */ @@ -861,10 +1035,23 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, int err; if (ep->use_count != 0) { - usb_audio_warn(ep->chip, - "Unable to change format on ep #%x: already in use\n", - ep->ep_num); - return -EBUSY; + bool check = ep->is_implicit_feedback && + check_ep_params(ep, pcm_format, + channels, period_bytes, + period_frames, buffer_periods, + fmt, sync_ep); + + if (!check) { + usb_audio_warn(ep->chip, + "Unable to change format on ep #%x: already in use\n", + ep->ep_num); + return -EBUSY; + } + + usb_audio_dbg(ep->chip, + "Ep #%x already in use as implicit feedback but format not changed\n", + ep->ep_num); + return 0; } /* release old buffers, if any */ diff --git a/sound/usb/format.c b/sound/usb/format.c index f4f0cf3deaf0..9e9d4c10dfac 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -40,6 +40,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, case UAC_VERSION_1: default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; + if (format >= 64) + return 0; /* invalid format */ sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; format = 1ULL << format; @@ -226,6 +228,52 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof return 0; } +/* + * Many Focusrite devices supports a limited set of sampling rates per + * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type + * descriptor which has a non-standard bLength = 10. + */ +static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip, + struct audioformat *fp, + unsigned int rate) +{ + struct usb_interface *iface; + struct usb_host_interface *alts; + unsigned char *fmt; + unsigned int max_rate; + + iface = usb_ifnum_to_if(chip->dev, fp->iface); + if (!iface) + return true; + + alts = &iface->altsetting[fp->altset_idx]; + fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, + NULL, UAC_FORMAT_TYPE); + if (!fmt) + return true; + + if (fmt[0] == 10) { /* bLength */ + max_rate = combine_quad(&fmt[6]); + + /* Validate max rate */ + if (max_rate != 48000 && + max_rate != 96000 && + max_rate != 192000 && + max_rate != 384000) { + + usb_audio_info(chip, + "%u:%d : unexpected max rate: %u\n", + fp->iface, fp->altsetting, max_rate); + + return true; + } + + return rate <= max_rate; + } + + return true; +} + /* * Helper function to walk the array of sample rate triplets reported by * the device. The problem is that we need to parse whole array first to @@ -262,6 +310,11 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, } for (rate = min; rate <= max; rate += res) { + /* Filter out invalid rates on Focusrite devices */ + if (USB_ID_VENDOR(chip->usb_id) == 0x1235 && + !focusrite_valid_sample_rate(chip, fp, rate)) + goto skip_rate; + if (fp->rate_table) fp->rate_table[nr_rates] = rate; if (!fp->rate_min || rate < fp->rate_min) @@ -276,6 +329,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, break; } +skip_rate: /* avoid endless loop */ if (res == 0) break; diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c index 82abef3fe90d..4b6e99e055dc 100644 --- a/sound/usb/line6/capture.c +++ b/sound/usb/line6/capture.c @@ -287,6 +287,8 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_in_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 4f096685ed65..0caf53f5764c 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -820,7 +820,7 @@ void line6_disconnect(struct usb_interface *interface) if (WARN_ON(usbdev != line6->usbdev)) return; - cancel_delayed_work(&line6->startup_work); + cancel_delayed_work_sync(&line6->startup_work); if (line6->urb_listen != NULL) line6_stop_listen(line6); diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c index 2e8ead3f9bc2..797ced329b79 100644 --- a/sound/usb/line6/playback.c +++ b/sound/usb/line6/playback.c @@ -432,6 +432,8 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_out_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c index 27bf61c177c0..5d9954a2d05e 100644 --- a/sound/usb/line6/podhd.c +++ b/sound/usb/line6/podhd.c @@ -21,8 +21,7 @@ enum { LINE6_PODHD300, LINE6_PODHD400, - LINE6_PODHD500_0, - LINE6_PODHD500_1, + LINE6_PODHD500, LINE6_PODX3, LINE6_PODX3LIVE, LINE6_PODHD500X, @@ -318,8 +317,7 @@ static const struct usb_device_id podhd_id_table[] = { /* TODO: no need to alloc data interfaces when only audio is used */ { LINE6_DEVICE(0x5057), .driver_info = LINE6_PODHD300 }, { LINE6_DEVICE(0x5058), .driver_info = LINE6_PODHD400 }, - { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500_0 }, - { LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 }, + { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500 }, { LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 }, { LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE }, { LINE6_IF_NUM(0x4159, 0), .driver_info = LINE6_PODHD500X }, @@ -352,23 +350,13 @@ static const struct line6_properties podhd_properties_table[] = { .ep_audio_r = 0x82, .ep_audio_w = 0x01, }, - [LINE6_PODHD500_0] = { + [LINE6_PODHD500] = { .id = "PODHD500", .name = "POD HD500", - .capabilities = LINE6_CAP_PCM + .capabilities = LINE6_CAP_PCM | LINE6_CAP_CONTROL | LINE6_CAP_HWMON, .altsetting = 1, - .ep_ctrl_r = 0x81, - .ep_ctrl_w = 0x01, - .ep_audio_r = 0x86, - .ep_audio_w = 0x02, - }, - [LINE6_PODHD500_1] = { - .id = "PODHD500", - .name = "POD HD500", - .capabilities = LINE6_CAP_PCM - | LINE6_CAP_HWMON, - .altsetting = 0, + .ctrl_if = 1, .ep_ctrl_r = 0x81, .ep_ctrl_w = 0x01, .ep_audio_r = 0x86, diff --git a/sound/usb/midi.c b/sound/usb/midi.c index b737f0ec77d0..c205a26ef509 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi, error: snd_usbmidi_in_endpoint_delete(ep); - return -ENOMEM; + return err; } /* @@ -1499,6 +1499,8 @@ void snd_usbmidi_disconnect(struct list_head *p) spin_unlock_irq(&umidi->disc_lock); up_write(&umidi->disc_rwsem); + del_timer_sync(&umidi->error_timer); + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; if (ep->out) @@ -1525,7 +1527,6 @@ void snd_usbmidi_disconnect(struct list_head *p) ep->in = NULL; } } - del_timer_sync(&umidi->error_timer); } EXPORT_SYMBOL(snd_usbmidi_disconnect); @@ -1826,6 +1827,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi, return 0; } +static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor( + struct usb_host_endpoint *hostep) +{ + unsigned char *extra = hostep->extra; + int extralen = hostep->extralen; + + while (extralen > 3) { + struct usb_ms_endpoint_descriptor *ms_ep = + (struct usb_ms_endpoint_descriptor *)extra; + + if (ms_ep->bLength > 3 && + ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT && + ms_ep->bDescriptorSubtype == UAC_MS_GENERAL) + return ms_ep; + if (!extra[0]) + break; + extralen -= extra[0]; + extra += extra[0]; + } + return NULL; +} + /* * Returns MIDIStreaming device capabilities. */ @@ -1863,11 +1886,10 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, ep = get_ep_desc(hostep); if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) continue; - ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra; - if (hostep->extralen < 4 || - ms_ep->bLength < 4 || - ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT || - ms_ep->bDescriptorSubtype != UAC_MS_GENERAL) + ms_ep = find_usb_ms_endpoint_descriptor(hostep); + if (!ms_ep) + continue; + if (ms_ep->bNumEmbMIDIJack > 0x10) continue; if (usb_endpoint_dir_out(ep)) { if (endpoints[epidx].out_ep) { @@ -2121,6 +2143,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi, cs_desc[1] == USB_DT_CS_INTERFACE && cs_desc[2] == 0xf1 && cs_desc[3] == 0x02) { + if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10) + continue; endpoint->in_cables = (1 << cs_desc[4]) - 1; endpoint->out_cables = (1 << cs_desc[5]) - 1; return snd_usbmidi_detect_endpoints(umidi, endpoint, 1); @@ -2282,16 +2306,22 @@ void snd_usbmidi_input_stop(struct list_head *p) } EXPORT_SYMBOL(snd_usbmidi_input_stop); -static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep) +static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi, + struct snd_usb_midi_in_endpoint *ep) { unsigned int i; + unsigned long flags; if (!ep) return; for (i = 0; i < INPUT_URBS; ++i) { struct urb *urb = ep->urbs[i]; - urb->dev = ep->umidi->dev; - snd_usbmidi_submit_urb(urb, GFP_KERNEL); + spin_lock_irqsave(&umidi->disc_lock, flags); + if (!atomic_read(&urb->use_count)) { + urb->dev = ep->umidi->dev; + snd_usbmidi_submit_urb(urb, GFP_ATOMIC); + } + spin_unlock_irqrestore(&umidi->disc_lock, flags); } } @@ -2307,7 +2337,7 @@ void snd_usbmidi_input_start(struct list_head *p) if (umidi->input_running || !umidi->opened[1]) return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) - snd_usbmidi_input_start_ep(umidi->endpoints[i].in); + snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in); umidi->input_running = 1; } EXPORT_SYMBOL(snd_usbmidi_input_start); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index d2a050bb8341..f4f8778e907a 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -576,8 +576,9 @@ static int check_matrix_bitmap(unsigned char *bmap, * if failed, give up and free the control instance. */ -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl) +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info) { struct usb_mixer_interface *mixer = list->mixer; int err; @@ -591,6 +592,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, return err; } list->kctl = kctl; + list->is_std_info = is_std_info; list->next_id_elem = mixer->id_elems[list->id]; mixer->id_elems[list->id] = list; return 0; @@ -1028,7 +1030,7 @@ struct usb_feature_control_info { int type_uac2; /* data type for uac2 if different from uac1, else -1 */ }; -static struct usb_feature_control_info audio_feature_info[] = { +static const struct usb_feature_control_info audio_feature_info[] = { { UAC_FU_MUTE, "Mute", USB_MIXER_INV_BOOLEAN, -1 }, { UAC_FU_VOLUME, "Volume", USB_MIXER_S16, -1 }, { UAC_FU_BASS, "Tone Control - Bass", USB_MIXER_S8, -1 }, @@ -1171,6 +1173,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, cval->res = 384; } break; + case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ + if ((strstr(kctl->id.name, "Playback Volume") != NULL) || + strstr(kctl->id.name, "Capture Volume") != NULL) { + cval->min >>= 8; + cval->max = 0; + cval->res = 1; + } + break; } } @@ -1446,7 +1456,7 @@ error: usb_audio_err(chip, "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", UAC_GET_CUR, validx, idx, cval->val_type); - return ret; + return filter_error(cval, ret); } ucontrol->value.integer.value[0] = val; @@ -1534,7 +1544,7 @@ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl, strlcpy(kctl->id.name, "Headphone", sizeof(kctl->id.name)); } -static struct usb_feature_control_info *get_feature_control_info(int control) +static const struct usb_feature_control_info *get_feature_control_info(int control) { int i; @@ -1552,7 +1562,7 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer, struct usb_audio_term *oterm, int unitid, int nameid, int readonly_mask) { - struct usb_feature_control_info *ctl_info; + const struct usb_feature_control_info *ctl_info; unsigned int len = 0; int mapped_name = 0; struct snd_kcontrol *kctl; @@ -1674,6 +1684,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer, /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl); + /* skip a bogus volume range */ + if (cval->max <= cval->min) { + usb_audio_dbg(mixer->chip, + "[%d] FU [%s] skipped due to invalid volume\n", + cval->head.id, kctl->id.name); + snd_ctl_free_one(kctl); + return; + } + + if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) { @@ -1750,10 +1770,16 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer, /* Build a mixer control for a UAC connector control (jack-detect) */ static void build_connector_control(struct usb_mixer_interface *mixer, + const struct usbmix_name_map *imap, struct usb_audio_term *term, bool is_input) { struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; + const struct usbmix_name_map *map; + + map = find_map(imap, term->id, 0); + if (check_ignored_ctl(map)) + return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) @@ -1784,8 +1810,12 @@ static void build_connector_control(struct usb_mixer_interface *mixer, usb_mixer_elem_info_free(cval); return; } - get_connector_control_name(mixer, term, is_input, kctl->id.name, - sizeof(kctl->id.name)); + + if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) + strlcat(kctl->id.name, " Jack", sizeof(kctl->id.name)); + else + get_connector_control_name(mixer, term, is_input, kctl->id.name, + sizeof(kctl->id.name)); kctl->private_free = snd_usb_mixer_elem_free; snd_usb_mixer_add_control(&cval->head, kctl); } @@ -2088,8 +2118,9 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid, check_input_term(state, term_id, &iterm); /* Check for jack detection. */ - if (uac_v2v3_control_is_readable(bmctls, control)) - build_connector_control(state->mixer, &iterm, true); + if ((iterm.type & 0xff00) != 0x0100 && + uac_v2v3_control_is_readable(bmctls, control)) + build_connector_control(state->mixer, state->map, &iterm, true); return 0; } @@ -2206,7 +2237,7 @@ static const struct snd_kcontrol_new mixer_procunit_ctl = { */ struct procunit_value_info { int control; - char *suffix; + const char *suffix; int val_type; int min_value; }; @@ -2214,44 +2245,44 @@ struct procunit_value_info { struct procunit_info { int type; char *name; - struct procunit_value_info *values; + const struct procunit_value_info *values; }; -static struct procunit_value_info undefined_proc_info[] = { +static const struct procunit_value_info undefined_proc_info[] = { { 0x00, "Control Undefined", 0 }, { 0 } }; -static struct procunit_value_info updown_proc_info[] = { +static const struct procunit_value_info updown_proc_info[] = { { UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; -static struct procunit_value_info prologic_proc_info[] = { +static const struct procunit_value_info prologic_proc_info[] = { { UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; -static struct procunit_value_info threed_enh_proc_info[] = { +static const struct procunit_value_info threed_enh_proc_info[] = { { UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 }, { 0 } }; -static struct procunit_value_info reverb_proc_info[] = { +static const struct procunit_value_info reverb_proc_info[] = { { UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 }, { UAC_REVERB_TIME, "Time", USB_MIXER_U16 }, { UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 }, { 0 } }; -static struct procunit_value_info chorus_proc_info[] = { +static const struct procunit_value_info chorus_proc_info[] = { { UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 }, { UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 }, { UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 }, { 0 } }; -static struct procunit_value_info dcr_proc_info[] = { +static const struct procunit_value_info dcr_proc_info[] = { { UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DCR_RATE, "Ratio", USB_MIXER_U16 }, { UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 }, @@ -2261,7 +2292,7 @@ static struct procunit_value_info dcr_proc_info[] = { { 0 } }; -static struct procunit_info procunits[] = { +static const struct procunit_info procunits[] = { { UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info }, { UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info }, { UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info }, @@ -2271,16 +2302,16 @@ static struct procunit_info procunits[] = { { 0 }, }; -static struct procunit_value_info uac3_updown_proc_info[] = { +static const struct procunit_value_info uac3_updown_proc_info[] = { { UAC3_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; -static struct procunit_value_info uac3_stereo_ext_proc_info[] = { +static const struct procunit_value_info uac3_stereo_ext_proc_info[] = { { UAC3_EXT_WIDTH_CONTROL, "Width Control", USB_MIXER_U8 }, { 0 } }; -static struct procunit_info uac3_procunits[] = { +static const struct procunit_info uac3_procunits[] = { { UAC3_PROCESS_UP_DOWNMIX, "Up Down", uac3_updown_proc_info }, { UAC3_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", uac3_stereo_ext_proc_info }, { UAC3_PROCESS_MULTI_FUNCTION, "Multi-Function", undefined_proc_info }, @@ -2290,23 +2321,23 @@ static struct procunit_info uac3_procunits[] = { /* * predefined data for extension units */ -static struct procunit_value_info clock_rate_xu_info[] = { +static const struct procunit_value_info clock_rate_xu_info[] = { { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 }, { 0 } }; -static struct procunit_value_info clock_source_xu_info[] = { +static const struct procunit_value_info clock_source_xu_info[] = { { USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN }, { 0 } }; -static struct procunit_value_info spdif_format_xu_info[] = { +static const struct procunit_value_info spdif_format_xu_info[] = { { USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN }, { 0 } }; -static struct procunit_value_info soft_limit_xu_info[] = { +static const struct procunit_value_info soft_limit_xu_info[] = { { USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN }, { 0 } }; -static struct procunit_info extunits[] = { +static const struct procunit_info extunits[] = { { USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info }, { USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info }, { USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info }, @@ -2318,7 +2349,7 @@ static struct procunit_info extunits[] = { * build a processing/extension unit */ static int build_audio_procunit(struct mixer_build *state, int unitid, - void *raw_desc, struct procunit_info *list, + void *raw_desc, const struct procunit_info *list, bool extension_unit) { struct uac_processing_unit_descriptor *desc = raw_desc; @@ -2326,14 +2357,14 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int i, err, nameid, type, len; - struct procunit_info *info; - struct procunit_value_info *valinfo; + const struct procunit_info *info; + const struct procunit_value_info *valinfo; const struct usbmix_name_map *map; - static struct procunit_value_info default_value_info[] = { + static const struct procunit_value_info default_value_info[] = { { 0x01, "Switch", USB_MIXER_BOOLEAN }, { 0 } }; - static struct procunit_info default_info = { + static const struct procunit_info default_info = { 0, NULL, default_value_info }; const char *name = extension_unit ? @@ -2811,7 +2842,7 @@ struct uac3_badd_profile { int st_chmask; /* side tone mixing channel mask */ }; -static struct uac3_badd_profile uac3_badd_profiles[] = { +static const struct uac3_badd_profile uac3_badd_profiles[] = { { /* * BAIF, BAOF or combination of both @@ -2872,7 +2903,7 @@ static struct uac3_badd_profile uac3_badd_profiles[] = { }; static bool uac3_badd_func_has_valid_channels(struct usb_mixer_interface *mixer, - struct uac3_badd_profile *f, + const struct uac3_badd_profile *f, int c_chmask, int p_chmask) { /* @@ -2916,7 +2947,7 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, struct usb_device *dev = mixer->chip->dev; struct usb_interface_assoc_descriptor *assoc; int badd_profile = mixer->chip->badd_profile; - struct uac3_badd_profile *f; + const struct uac3_badd_profile *f; const struct usbmix_ctl_map *map; int p_chmask = 0, c_chmask = 0, st_chmask = 0; int i; @@ -3050,13 +3081,13 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, memset(&iterm, 0, sizeof(iterm)); iterm.id = UAC3_BADD_IT_ID4; iterm.type = UAC_BIDIR_TERMINAL_HEADSET; - build_connector_control(mixer, &iterm, true); + build_connector_control(mixer, map->map, &iterm, true); /* Output Term - Insertion control */ memset(&oterm, 0, sizeof(oterm)); oterm.id = UAC3_BADD_OT_ID3; oterm.type = UAC_BIDIR_TERMINAL_HEADSET; - build_connector_control(mixer, &oterm, false); + build_connector_control(mixer, map->map, &oterm, false); } return 0; @@ -3085,7 +3116,8 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (map->id == state.chip->usb_id) { state.map = map->map; state.selector_map = map->selector_map; - mixer->ignore_ctl_error = map->ignore_ctl_error; + mixer->connector_map = map->connector_map; + mixer->ignore_ctl_error |= map->ignore_ctl_error; break; } } @@ -3128,10 +3160,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (err < 0 && err != -EINVAL) return err; - if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), + if ((state.oterm.type & 0xff00) != 0x0100 && + uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), UAC2_TE_CONNECTOR)) { - build_connector_control(state.mixer, &state.oterm, - false); + build_connector_control(state.mixer, state.map, + &state.oterm, false); } } else { /* UAC_VERSION_3 */ struct uac3_output_terminal_descriptor *desc = p; @@ -3153,10 +3186,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (err < 0 && err != -EINVAL) return err; - if (uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls), + if ((state.oterm.type & 0xff00) != 0x0100 && + uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls), UAC3_TE_INSERTION)) { - build_connector_control(state.mixer, &state.oterm, - false); + build_connector_control(state.mixer, state.map, + &state.oterm, false); } } } @@ -3164,13 +3198,38 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) return 0; } +static int delegate_notify(struct usb_mixer_interface *mixer, int unitid, + u8 *control, u8 *channel) +{ + const struct usbmix_connector_map *map = mixer->connector_map; + + if (!map) + return unitid; + + for (; map->id; map++) { + if (map->id == unitid) { + if (control && map->control) + *control = map->control; + if (channel && map->channel) + *channel = map->channel; + return map->delegated_id; + } + } + return unitid; +} + void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; + unitid = delegate_notify(mixer, unitid, NULL, NULL); + for_each_mixer_elem(list, mixer, unitid) { - struct usb_mixer_elem_info *info = - mixer_elem_list_to_info(list); + struct usb_mixer_elem_info *info; + + if (!list->is_std_info) + continue; + info = mixer_elem_list_to_info(list); /* invalidate cache, so the value is read from the device */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, @@ -3182,7 +3241,7 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, struct usb_mixer_elem_list *list) { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); - static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN", + static const char * const val_types[] = {"BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16"}; snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " "channels=%i, type=\"%s\"\n", cval->head.id, @@ -3237,6 +3296,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, return; } + unitid = delegate_notify(mixer, unitid, &control, &channel); + for_each_mixer_elem(list, mixer, unitid) count++; @@ -3248,6 +3309,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, if (!list->kctl) continue; + if (!list->is_std_info) + continue; info = mixer_elem_list_to_info(list); if (count > 1 && info->control != control) diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 37e1b234c802..01b5e5cc2221 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -6,6 +6,13 @@ struct media_mixer_ctl; +struct usbmix_connector_map { + u8 id; + u8 delegated_id; + u8 control; + u8 channel; +}; + struct usb_mixer_interface { struct snd_usb_audio *chip; struct usb_host_interface *hostif; @@ -18,6 +25,9 @@ struct usb_mixer_interface { /* the usb audio specification version this interface complies to */ int protocol; + /* optional connector delegation map */ + const struct usbmix_connector_map *connector_map; + /* Sound Blaster remote control stuff */ const struct rc_config *rc_cfg; u32 rc_code; @@ -56,6 +66,7 @@ struct usb_mixer_elem_list { struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */ struct snd_kcontrol *kctl; unsigned int id; + bool is_std_info; usb_mixer_elem_dump_func_t dump; usb_mixer_elem_resume_func_t resume; }; @@ -93,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set); -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl); +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info); + +#define snd_usb_mixer_add_control(list, kctl) \ + snd_usb_mixer_add_list(list, kctl, true) void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 73baf398c84a..dda13624a28c 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -14,7 +14,7 @@ struct usbmix_name_map { int id; const char *name; int control; - struct usbmix_dB_map *dB; + const struct usbmix_dB_map *dB; }; struct usbmix_selector_map { @@ -27,6 +27,7 @@ struct usbmix_ctl_map { u32 id; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; + const struct usbmix_connector_map *connector_map; int ignore_ctl_error; }; @@ -52,7 +53,7 @@ Mic-IN[9] --+->FU[10]----------------------------+ | ++--+->SU[11]-->FU[12] --------------------------------------------------------------------------------------> USB_OUT[13] */ -static struct usbmix_name_map extigy_map[] = { +static const struct usbmix_name_map extigy_map[] = { /* 1: IT pcm */ { 2, "PCM Playback" }, /* FU */ /* 3: IT pcm */ @@ -93,12 +94,12 @@ static struct usbmix_name_map extigy_map[] = { * e.g. no Master and fake PCM volume * Pavel Mihaylov <bin@bash.info> */ -static struct usbmix_dB_map mp3plus_dB_1 = {.min = -4781, .max = 0}; +static const struct usbmix_dB_map mp3plus_dB_1 = {.min = -4781, .max = 0}; /* just guess */ -static struct usbmix_dB_map mp3plus_dB_2 = {.min = -1781, .max = 618}; +static const struct usbmix_dB_map mp3plus_dB_2 = {.min = -1781, .max = 618}; /* just guess */ -static struct usbmix_name_map mp3plus_map[] = { +static const struct usbmix_name_map mp3plus_map[] = { /* 1: IT pcm */ /* 2: IT mic */ /* 3: IT line */ @@ -139,7 +140,7 @@ Lin_IN[7]-+--->FU[8]---+ +->EU[23]->FU[28]------------->Spk_OUT[19] | ^ +->FU[13]--------------------------------------+ */ -static struct usbmix_name_map audigy2nx_map[] = { +static const struct usbmix_name_map audigy2nx_map[] = { /* 1: IT pcm playback */ /* 4: IT digital in */ { 6, "Digital In Playback" }, /* FU */ @@ -167,12 +168,12 @@ static struct usbmix_name_map audigy2nx_map[] = { { 0 } /* terminator */ }; -static struct usbmix_name_map mbox1_map[] = { +static const struct usbmix_name_map mbox1_map[] = { { 1, "Clock" }, { 0 } /* terminator */ }; -static struct usbmix_selector_map c400_selectors[] = { +static const struct usbmix_selector_map c400_selectors[] = { { .id = 0x80, .count = 2, @@ -181,7 +182,7 @@ static struct usbmix_selector_map c400_selectors[] = { { 0 } /* terminator */ }; -static struct usbmix_selector_map audigy2nx_selectors[] = { +static const struct usbmix_selector_map audigy2nx_selectors[] = { { .id = 14, /* Capture Source */ .count = 3, @@ -201,21 +202,21 @@ static struct usbmix_selector_map audigy2nx_selectors[] = { }; /* Creative SoundBlaster Live! 24-bit External */ -static struct usbmix_name_map live24ext_map[] = { +static const struct usbmix_name_map live24ext_map[] = { /* 2: PCM Playback Volume */ { 5, "Mic Capture" }, /* FU, default PCM Capture Volume */ { 0 } /* terminator */ }; /* LineX FM Transmitter entry - needed to bypass controls bug */ -static struct usbmix_name_map linex_map[] = { +static const struct usbmix_name_map linex_map[] = { /* 1: IT pcm */ /* 2: OT Speaker */ { 3, "Master" }, /* FU: master volume - left / right / mute */ { 0 } /* terminator */ }; -static struct usbmix_name_map maya44_map[] = { +static const struct usbmix_name_map maya44_map[] = { /* 1: IT line */ { 2, "Line Playback" }, /* FU */ /* 3: IT line */ @@ -238,7 +239,7 @@ static struct usbmix_name_map maya44_map[] = { * so this map removes all unwanted sliders from alsamixer */ -static struct usbmix_name_map justlink_map[] = { +static const struct usbmix_name_map justlink_map[] = { /* 1: IT pcm playback */ /* 2: Not present */ { 3, NULL}, /* IT mic (No mic input on device) */ @@ -255,7 +256,7 @@ static struct usbmix_name_map justlink_map[] = { }; /* TerraTec Aureon 5.1 MkII USB */ -static struct usbmix_name_map aureon_51_2_map[] = { +static const struct usbmix_name_map aureon_51_2_map[] = { /* 1: IT USB */ /* 2: IT Mic */ /* 3: IT Line */ @@ -274,7 +275,7 @@ static struct usbmix_name_map aureon_51_2_map[] = { {} /* terminator */ }; -static struct usbmix_name_map scratch_live_map[] = { +static const struct usbmix_name_map scratch_live_map[] = { /* 1: IT Line 1 (USB streaming) */ /* 2: OT Line 1 (Speaker) */ /* 3: IT Line 1 (Line connector) */ @@ -290,7 +291,7 @@ static struct usbmix_name_map scratch_live_map[] = { { 0 } /* terminator */ }; -static struct usbmix_name_map ebox44_map[] = { +static const struct usbmix_name_map ebox44_map[] = { { 4, NULL }, /* FU */ { 6, NULL }, /* MU */ { 7, NULL }, /* FU */ @@ -305,7 +306,7 @@ static struct usbmix_name_map ebox44_map[] = { * FIXME: or mp3plus_map should use "Capture Source" too, * so this maps can be merget */ -static struct usbmix_name_map hercules_usb51_map[] = { +static const struct usbmix_name_map hercules_usb51_map[] = { { 8, "Capture Source" }, /* SU, default "PCM Capture Source" */ { 9, "Master Playback" }, /* FU, default "Speaker Playback" */ { 10, "Mic Boost", 7 }, /* FU, default "Auto Gain Input" */ @@ -316,7 +317,7 @@ static struct usbmix_name_map hercules_usb51_map[] = { }; /* Plantronics Gamecom 780 has a broken volume control, better to disable it */ -static struct usbmix_name_map gamecom780_map[] = { +static const struct usbmix_name_map gamecom780_map[] = { { 9, NULL }, /* FU, speaker out */ {} }; @@ -330,12 +331,19 @@ static const struct usbmix_name_map scms_usb3318_map[] = { }; /* Bose companion 5, the dB conversion factor is 16 instead of 256 */ -static struct usbmix_dB_map bose_companion5_dB = {-5006, -6}; -static struct usbmix_name_map bose_companion5_map[] = { +static const struct usbmix_dB_map bose_companion5_dB = {-5006, -6}; +static const struct usbmix_name_map bose_companion5_map[] = { { 3, NULL, .dB = &bose_companion5_dB }, { 0 } /* terminator */ }; +/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum */ +static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0}; +static const struct usbmix_name_map sennheiser_pc8_map[] = { + { 9, NULL, .dB = &sennheiser_pc8_dB }, + { 0 } /* terminator */ +}; + /* * Dell usb dock with ALC4020 codec had a firmware problem where it got * screwed up when zero volume is passed; just skip it as a workaround @@ -349,11 +357,63 @@ static const struct usbmix_name_map dell_alc4020_map[] = { { 0 } }; +/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX + * response for Input Gain Pad (id=19, control=12) and the connector status + * for SPDIF terminal (id=18). Skip them. + */ +static const struct usbmix_name_map asus_rog_map[] = { + { 18, NULL }, /* OT, connector control */ + { 19, NULL, 12 }, /* FU, Input Gain Pad */ + {} +}; + +/* TRX40 mobos with Realtek ALC1220-VB */ +static const struct usbmix_name_map trx40_mobo_map[] = { + { 18, NULL }, /* OT, IEC958 - broken response, disabled */ + { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ + { 16, "Speaker" }, /* OT */ + { 22, "Speaker Playback" }, /* FU */ + { 7, "Line" }, /* IT */ + { 19, "Line Capture" }, /* FU */ + { 17, "Front Headphone" }, /* OT */ + { 23, "Front Headphone Playback" }, /* FU */ + { 8, "Mic" }, /* IT */ + { 20, "Mic Capture" }, /* FU */ + { 9, "Front Mic" }, /* IT */ + { 21, "Front Mic Capture" }, /* FU */ + { 24, "IEC958 Playback" }, /* FU */ + {} +}; + +static const struct usbmix_connector_map trx40_mobo_connector_map[] = { + { 10, 16 }, /* (Back) Speaker */ + { 11, 17 }, /* Front Headphone */ + { 13, 7 }, /* Line */ + { 14, 8 }, /* Mic */ + { 15, 9 }, /* Front Mic */ + {} +}; + +/* Rear panel + front mic on Gigabyte TRX40 Aorus Master with ALC1220-VB */ +static const struct usbmix_name_map aorus_master_alc1220vb_map[] = { + { 17, NULL }, /* OT, IEC958?, disabled */ + { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ + { 16, "Line Out" }, /* OT */ + { 22, "Line Out Playback" }, /* FU */ + { 7, "Line" }, /* IT */ + { 19, "Line Capture" }, /* FU */ + { 8, "Mic" }, /* IT */ + { 20, "Mic Capture" }, /* FU */ + { 9, "Front Mic" }, /* IT */ + { 21, "Front Mic Capture" }, /* FU */ + {} +}; + /* * Control map entries */ -static struct usbmix_ctl_map usbmix_ctl_maps[] = { +static const struct usbmix_ctl_map usbmix_ctl_maps[] = { { .id = USB_ID(0x041e, 0x3000), .map = extigy_map, @@ -468,6 +528,38 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x05a7, 0x1020), .map = bose_companion5_map, }, + { /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */ + .id = USB_ID(0x0414, 0xa001), + .map = aorus_master_alc1220vb_map, + }, + { /* Gigabyte TRX40 Aorus Pro WiFi */ + .id = USB_ID(0x0414, 0xa002), + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, + { /* ASUS ROG Zenith II */ + .id = USB_ID(0x0b05, 0x1916), + .map = asus_rog_map, + }, + { /* ASUS ROG Strix */ + .id = USB_ID(0x0b05, 0x1917), + .map = asus_rog_map, + }, + { /* MSI TRX40 Creator */ + .id = USB_ID(0x0db0, 0x0d64), + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, + { /* MSI TRX40 */ + .id = USB_ID(0x0db0, 0x543d), + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, + { /* Asrock TRX40 Creator */ + .id = USB_ID(0x26ce, 0x0a01), + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, { 0 } /* terminator */ }; @@ -475,37 +567,37 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { * Control map entries for UAC3 BADD profiles */ -static struct usbmix_name_map uac3_badd_generic_io_map[] = { +static const struct usbmix_name_map uac3_badd_generic_io_map[] = { { UAC3_BADD_FU_ID2, "Generic Out Playback" }, { UAC3_BADD_FU_ID5, "Generic In Capture" }, { 0 } /* terminator */ }; -static struct usbmix_name_map uac3_badd_headphone_map[] = { +static const struct usbmix_name_map uac3_badd_headphone_map[] = { { UAC3_BADD_FU_ID2, "Headphone Playback" }, { 0 } /* terminator */ }; -static struct usbmix_name_map uac3_badd_speaker_map[] = { +static const struct usbmix_name_map uac3_badd_speaker_map[] = { { UAC3_BADD_FU_ID2, "Speaker Playback" }, { 0 } /* terminator */ }; -static struct usbmix_name_map uac3_badd_microphone_map[] = { +static const struct usbmix_name_map uac3_badd_microphone_map[] = { { UAC3_BADD_FU_ID5, "Mic Capture" }, { 0 } /* terminator */ }; /* Covers also 'headset adapter' profile */ -static struct usbmix_name_map uac3_badd_headset_map[] = { +static const struct usbmix_name_map uac3_badd_headset_map[] = { { UAC3_BADD_FU_ID2, "Headset Playback" }, { UAC3_BADD_FU_ID5, "Headset Capture" }, { UAC3_BADD_FU_ID7, "Sidetone Mixing" }, { 0 } /* terminator */ }; -static struct usbmix_name_map uac3_badd_speakerphone_map[] = { +static const struct usbmix_name_map uac3_badd_speakerphone_map[] = { { UAC3_BADD_FU_ID2, "Speaker Playback" }, { UAC3_BADD_FU_ID5, "Mic Capture" }, { 0 } /* terminator */ }; -static struct usbmix_ctl_map uac3_badd_usbmix_ctl_maps[] = { +static const struct usbmix_ctl_map uac3_badd_usbmix_ctl_maps[] = { { .id = UAC3_FUNCTION_SUBCLASS_GENERIC_IO, .map = uac3_badd_generic_io_map, @@ -534,5 +626,10 @@ static struct usbmix_ctl_map uac3_badd_usbmix_ctl_maps[] = { .id = UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE, .map = uac3_badd_speakerphone_map, }, + { + /* Sennheiser Communications Headset [PC 8] */ + .id = USB_ID(0x1395, 0x0025), + .map = sennheiser_pc8_map, + }, { 0 } /* terminator */ }; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 39e27ae6c597..2040fecea17b 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -119,7 +119,7 @@ static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer, * Create a set of standard UAC controls from a table */ static int snd_create_std_mono_table(struct usb_mixer_interface *mixer, - struct std_mono_table *t) + const struct std_mono_table *t) { int err; @@ -157,7 +157,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer, return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; - return snd_usb_mixer_add_control(list, kctl); + /* don't use snd_usb_mixer_add_control() here, this is a special list element */ + return snd_usb_mixer_add_list(list, kctl, false); } /* @@ -183,6 +184,7 @@ static const struct rc_config { { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ + { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ }; @@ -1386,7 +1388,7 @@ static int snd_c400_create_mixer(struct usb_mixer_interface *mixer) * are valid they presents mono controls as L and R channels of * stereo. So we provide a good mixer here. */ -static struct std_mono_table ebox44_table[] = { +static const struct std_mono_table ebox44_table[] = { { .unitid = 4, .control = 1, @@ -1508,11 +1510,15 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol, /* use known values for that card: interface#1 altsetting#1 */ iface = usb_ifnum_to_if(chip->dev, 1); - if (!iface || iface->num_altsetting < 2) - return -EINVAL; + if (!iface || iface->num_altsetting < 2) { + err = -EINVAL; + goto end; + } alts = &iface->altsetting[1]; - if (get_iface_desc(alts)->bNumEndpoints < 1) - return -EINVAL; + if (get_iface_desc(alts)->bNumEndpoints < 1) { + err = -EINVAL; + goto end; + } ep = get_endpoint(alts, 0)->bEndpointAddress; err = snd_usb_ctl_msg(chip->dev, @@ -1691,7 +1697,7 @@ static struct snd_kcontrol_new snd_microii_mixer_spdif[] = { static int snd_microii_controls_create(struct usb_mixer_interface *mixer) { int err, i; - static usb_mixer_elem_resume_func_t resume_funcs[] = { + const static usb_mixer_elem_resume_func_t resume_funcs[] = { snd_microii_spdif_default_update, NULL, snd_microii_spdif_switch_update diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c index 83715fd8dfd6..6217424e973f 100644 --- a/sound/usb/mixer_scarlett.c +++ b/sound/usb/mixer_scarlett.c @@ -623,7 +623,7 @@ static int add_output_ctls(struct usb_mixer_interface *mixer, /********************** device-specific config *************************/ /* untested... */ -static struct scarlett_device_info s6i6_info = { +static const struct scarlett_device_info s6i6_info = { .matrix_in = 18, .matrix_out = 8, .input_len = 6, @@ -665,7 +665,7 @@ static struct scarlett_device_info s6i6_info = { }; /* untested... */ -static struct scarlett_device_info s8i6_info = { +static const struct scarlett_device_info s8i6_info = { .matrix_in = 18, .matrix_out = 6, .input_len = 8, @@ -704,7 +704,7 @@ static struct scarlett_device_info s8i6_info = { } }; -static struct scarlett_device_info s18i6_info = { +static const struct scarlett_device_info s18i6_info = { .matrix_in = 18, .matrix_out = 6, .input_len = 18, @@ -741,7 +741,7 @@ static struct scarlett_device_info s18i6_info = { } }; -static struct scarlett_device_info s18i8_info = { +static const struct scarlett_device_info s18i8_info = { .matrix_in = 18, .matrix_out = 8, .input_len = 18, @@ -783,7 +783,7 @@ static struct scarlett_device_info s18i8_info = { } }; -static struct scarlett_device_info s18i20_info = { +static const struct scarlett_device_info s18i20_info = { .matrix_in = 18, .matrix_out = 8, .input_len = 18, @@ -833,7 +833,7 @@ static struct scarlett_device_info s18i20_info = { static int scarlett_controls_create_generic(struct usb_mixer_interface *mixer, - struct scarlett_device_info *info) + const struct scarlett_device_info *info) { int i, err; char mx[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; @@ -896,7 +896,7 @@ int snd_scarlett_controls_create(struct usb_mixer_interface *mixer) { int err, i, o; char mx[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - struct scarlett_device_info *info; + const struct scarlett_device_info *info; struct usb_mixer_elem_info *elem; static char sample_rate_buffer[4] = { '\x80', '\xbb', '\x00', '\x00' }; diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c index f0e8e1539450..c6c834ac83ac 100644 --- a/sound/usb/mixer_us16x08.c +++ b/sound/usb/mixer_us16x08.c @@ -607,7 +607,7 @@ static int snd_us16x08_eq_put(struct snd_kcontrol *kcontrol, static int snd_us16x08_meter_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { - uinfo->count = 1; + uinfo->count = 34; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->value.integer.max = 0x7FFF; uinfo->value.integer.min = 0; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index ad8f38380aa3..75218539fb10 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -323,6 +323,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, switch (subs->stream->chip->usb_id) { case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ + case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */ ep = 0x81; ifnum = 3; goto add_sync_ep_from_ifnum; @@ -332,6 +333,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ifnum = 2; goto add_sync_ep_from_ifnum; case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */ + case USB_ID(0x0499, 0x172a): /* Yamaha MODX */ ep = 0x86; ifnum = 2; goto add_sync_ep_from_ifnum; @@ -339,16 +341,29 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; + case USB_ID(0x1686, 0xf029): /* Zoom UAC-2 */ + ep = 0x82; + ifnum = 2; + goto add_sync_ep_from_ifnum; case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */ case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ ep = 0x81; ifnum = 1; goto add_sync_ep_from_ifnum; - case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */ + case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II/IIc */ + /* MicroBook IIc */ + if (altsd->bInterfaceClass == USB_CLASS_AUDIO) + return 0; + + /* MicroBook II */ ep = 0x84; ifnum = 0; goto add_sync_ep_from_ifnum; case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */ + case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ + case USB_ID(0x0499, 0x172f): /* Steinberg UR22C */ + case USB_ID(0x0d9a, 0x00df): /* RTX6001 */ ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; @@ -386,6 +401,8 @@ add_sync_ep: if (!subs->sync_endpoint) return -EINVAL; + subs->sync_endpoint->is_implicit_feedback = 1; + subs->data_endpoint->sync_master = subs->sync_endpoint; return 1; @@ -484,12 +501,15 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, implicit_fb ? SND_USB_ENDPOINT_TYPE_DATA : SND_USB_ENDPOINT_TYPE_SYNC); + if (!subs->sync_endpoint) { if (is_playback && attr == USB_ENDPOINT_SYNC_NONE) return 0; return -EINVAL; } + subs->sync_endpoint->is_implicit_feedback = implicit_fb; + subs->data_endpoint->sync_master = subs->sync_endpoint; return 0; @@ -1404,6 +1424,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs, // continue; } bytes = urb->iso_frame_desc[i].actual_length; + if (subs->stream_offset_adj > 0) { + unsigned int adj = min(subs->stream_offset_adj, bytes); + cp += adj; + bytes -= adj; + subs->stream_offset_adj -= adj; + } frames = bytes / stride; if (!subs->txfr_quirk) bytes = frames * stride; @@ -1771,6 +1797,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream return 0; case SNDRV_PCM_TRIGGER_STOP: stop_endpoints(subs, false); + subs->data_endpoint->retire_data_urb = NULL; subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: @@ -1858,7 +1885,7 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs) { struct snd_pcm *pcm = subs->stream->pcm; struct snd_pcm_substream *s = pcm->streams[subs->direction].substream; - struct device *dev = subs->dev->bus->controller; + struct device *dev = subs->dev->bus->sysdev; if (!snd_usb_use_vmalloc) snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_DEV_SG, diff --git a/sound/usb/proc.c b/sound/usb/proc.c index 49e3f176aaf5..ffbf4bd9208c 100644 --- a/sound/usb/proc.c +++ b/sound/usb/proc.c @@ -60,7 +60,7 @@ void snd_usb_audio_create_proc(struct snd_usb_audio *chip) static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer) { struct audioformat *fp; - static char *sync_types[4] = { + static const char * const sync_types[4] = { "NONE", "ASYNC", "ADAPTIVE", "SYNC" }; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index d187aa6d50db..441335abb401 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -25,6 +25,26 @@ .idProduct = prod, \ .bInterfaceClass = USB_CLASS_VENDOR_SPEC +/* HP Thunderbolt Dock Audio Headset */ +{ + USB_DEVICE(0x03f0, 0x0269), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "HP", + .product_name = "Thunderbolt Dock Audio Headset", + .profile_name = "HP-Thunderbolt-Dock-Audio-Headset", + .ifnum = QUIRK_NO_INTERFACE + } +}, +/* HP Thunderbolt Dock Audio Module */ +{ + USB_DEVICE(0x03f0, 0x0567), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "HP", + .product_name = "Thunderbolt Dock Audio Module", + .profile_name = "HP-Thunderbolt-Dock-Audio-Module", + .ifnum = QUIRK_NO_INTERFACE + } +}, /* FTDI devices */ { USB_DEVICE(0x0403, 0xb8d8), @@ -2465,6 +2485,16 @@ YAMAHA_DEVICE(0x7010, "UB99"), } }, +{ + USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .vendor_name = "KORG, Inc.", + /* .product_name = "ToneLab EX", */ + .ifnum = 3, + .type = QUIRK_MIDI_STANDARD_INTERFACE, + } +}, + /* AKAI devices */ { USB_DEVICE(0x09e8, 0x0062), @@ -2675,6 +2705,10 @@ YAMAHA_DEVICE(0x7010, "UB99"), .ifnum = QUIRK_ANY_INTERFACE, .type = QUIRK_COMPOSITE, .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, { .ifnum = 0, .type = QUIRK_AUDIO_FIXED_ENDPOINT, @@ -2687,6 +2721,32 @@ YAMAHA_DEVICE(0x7010, "UB99"), .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE, .endpoint = 0x01, .ep_attr = USB_ENDPOINT_XFER_ISOC, + .datainterval = 1, + .maxpacksize = 0x024c, + .rates = SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000, + .rate_min = 44100, + .rate_max = 48000, + .nr_rates = 2, + .rate_table = (unsigned int[]) { + 44100, 48000 + } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 2, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC, + .datainterval = 1, + .maxpacksize = 0x0126, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, @@ -2756,90 +2816,6 @@ YAMAHA_DEVICE(0x7010, "UB99"), .type = QUIRK_MIDI_NOVATION } }, -{ - /* - * Focusrite Scarlett Solo 2nd generation - * Reports that playback should use Synch: Synchronous - * while still providing a feedback endpoint. Synchronous causes - * snapping on some sample rates. - * Force it to use Synch: Asynchronous. - */ - USB_DEVICE(0x1235, 0x8205), - .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { - .ifnum = QUIRK_ANY_INTERFACE, - .type = QUIRK_COMPOSITE, - .data = (const struct snd_usb_audio_quirk[]) { - { - .ifnum = 1, - .type = QUIRK_AUDIO_FIXED_ENDPOINT, - .data = & (const struct audioformat) { - .formats = SNDRV_PCM_FMTBIT_S32_LE, - .channels = 2, - .iface = 1, - .altsetting = 1, - .altset_idx = 1, - .attributes = 0, - .endpoint = 0x01, - .ep_attr = USB_ENDPOINT_XFER_ISOC | - USB_ENDPOINT_SYNC_ASYNC, - .protocol = UAC_VERSION_2, - .rates = SNDRV_PCM_RATE_44100 | - SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_88200 | - SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_176400 | - SNDRV_PCM_RATE_192000, - .rate_min = 44100, - .rate_max = 192000, - .nr_rates = 6, - .rate_table = (unsigned int[]) { - 44100, 48000, 88200, - 96000, 176400, 192000 - }, - .clock = 41 - } - }, - { - .ifnum = 2, - .type = QUIRK_AUDIO_FIXED_ENDPOINT, - .data = & (const struct audioformat) { - .formats = SNDRV_PCM_FMTBIT_S32_LE, - .channels = 2, - .iface = 2, - .altsetting = 1, - .altset_idx = 1, - .attributes = 0, - .endpoint = 0x82, - .ep_attr = USB_ENDPOINT_XFER_ISOC | - USB_ENDPOINT_SYNC_ASYNC | - USB_ENDPOINT_USAGE_IMPLICIT_FB, - .protocol = UAC_VERSION_2, - .rates = SNDRV_PCM_RATE_44100 | - SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_88200 | - SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_176400 | - SNDRV_PCM_RATE_192000, - .rate_min = 44100, - .rate_max = 192000, - .nr_rates = 6, - .rate_table = (unsigned int[]) { - 44100, 48000, 88200, - 96000, 176400, 192000 - }, - .clock = 41 - } - }, - { - .ifnum = 3, - .type = QUIRK_IGNORE_INTERFACE - }, - { - .ifnum = -1 - } - } - } -}, /* Access Music devices */ { @@ -3472,7 +3448,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), }, /* MOTU Microbook II */ { - USB_DEVICE(0x07fd, 0x0004), + USB_DEVICE_VENDOR_SPEC(0x07fd, 0x0004), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { .vendor_name = "MOTU", .product_name = "MicroBookII", @@ -3592,5 +3568,201 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } } }, +{ + /* + * Pioneer DJ DJM-250MK2 + * PCM is 8 channels out @ 48 fixed (endpoints 0x01). + * The output from computer to the mixer is usable. + * + * The input (phono or line to computer) is not working. + * It should be at endpoint 0x82 and probably also 8 channels, + * but it seems that it works only with Pioneer proprietary software. + * Even on officially supported OS, the Audacity was unable to record + * and Mixxx to recognize the control vinyls. + */ + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 8, // outputs + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x01, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_48000, + .rate_min = 48000, + .rate_max = 48000, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 48000 } + } + }, + { + .ifnum = -1 + } + } + } +}, +{ + /* + * PIONEER DJ DDJ-RB + * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed + * The feedback for the output is the dummy input. + */ + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 4, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x01, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 2, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC| + USB_ENDPOINT_USAGE_IMPLICIT_FB, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = -1 + } + } + } +}, + +#define ALC1220_VB_DESKTOP(vend, prod) { \ + USB_DEVICE(vend, prod), \ + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \ + .vendor_name = "Realtek", \ + .product_name = "ALC1220-VB-DT", \ + .profile_name = "Realtek-ALC1220-VB-Desktop", \ + .ifnum = QUIRK_NO_INTERFACE \ + } \ +} +ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */ +ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */ +ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */ +ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ +#undef ALC1220_VB_DESKTOP + +/* Two entries for Gigabyte TRX40 Aorus Master: + * TRX40 Aorus Master has two USB-audio devices, one for the front headphone + * with ESS SABRE9218 DAC chip, while another for the rest I/O (the rear + * panel and the front mic) with Realtek ALC1220-VB. + * Here we provide two distinct names for making UCM profiles easier. + */ +{ + USB_DEVICE(0x0414, 0xa000), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .vendor_name = "Gigabyte", + .product_name = "Aorus Master Front Headphone", + .profile_name = "Gigabyte-Aorus-Master-Front-Headphone", + .ifnum = QUIRK_NO_INTERFACE + } +}, +{ + USB_DEVICE(0x0414, 0xa001), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .vendor_name = "Gigabyte", + .product_name = "Aorus Master Main Audio", + .profile_name = "Gigabyte-Aorus-Master-Main-Audio", + .ifnum = QUIRK_NO_INTERFACE + } +}, + +/* + * MacroSilicon MS2109 based HDMI capture cards + * + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if + * they pretend to be 96kHz mono as a workaround for stereo being broken + * by that... + * + * They also have an issue with initial stream alignment that causes the + * channels to be swapped and out of phase, which is dealt with in quirks.c. + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_CLASS | + USB_DEVICE_ID_MATCH_INT_SUBCLASS, + .idVendor = 0x534d, + .idProduct = 0x2109, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "MacroSilicon", + .product_name = "MS2109", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = &(const struct snd_usb_audio_quirk[]) { + { + .ifnum = 2, + .type = QUIRK_AUDIO_ALIGN_TRANSFER, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, + { + .ifnum = 3, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 2, + .iface = 3, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC | + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 48000, + .rate_max = 48000, + } + }, + { + .ifnum = -1 + } + } + } +}, #undef USB_DEVICE_VENDOR_SPEC diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 7448ab07bd36..186e90e3636c 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip, if (!iface) continue; if (quirk->ifnum != probed_ifnum && - !usb_interface_claimed(iface)) - usb_driver_claim_interface(driver, iface, (void *)-1L); + !usb_interface_claimed(iface)) { + err = usb_driver_claim_interface(driver, iface, + USB_AUDIO_IFACE_UNUSED); + if (err < 0) + return err; + } } return 0; @@ -390,8 +394,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip, continue; err = create_autodetect_quirk(chip, iface, driver); - if (err >= 0) - usb_driver_claim_interface(driver, iface, (void *)-1L); + if (err >= 0) { + err = usb_driver_claim_interface(driver, iface, + USB_AUDIO_IFACE_UNUSED); + if (err < 0) + return err; + } } return 0; @@ -1316,7 +1324,15 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev, case USB_ID(0x2466, 0x8010): /* Fractal Audio Axe-Fx 3 */ return snd_usb_axefx3_boot_quirk(dev); case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */ - return snd_usb_motu_microbookii_boot_quirk(dev); + /* + * For some reason interface 3 with vendor-spec class is + * detected on MicroBook IIc. + */ + if (get_iface_desc(intf->altsetting)->bInterfaceClass == + USB_CLASS_VENDOR_SPEC && + get_iface_desc(intf->altsetting)->bInterfaceNumber < 3) + return snd_usb_motu_microbookii_boot_quirk(dev); + break; } return 0; @@ -1424,6 +1440,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ set_format_emu_quirk(subs, fmt); break; + case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ + subs->stream_offset_adj = 2; + break; } } @@ -1441,6 +1460,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */ + case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */ + case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */ return true; } @@ -1461,6 +1482,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) static bool is_itf_usb_dsd_dac(unsigned int id) { switch (id) { + case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */ case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ @@ -1592,15 +1614,33 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) msleep(20); - /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here, - * otherwise requests like get/set frequency return as failed despite - * actually succeeding. + /* + * Plantronics headsets (C320, C320-M, etc) need a delay to avoid + * random microhpone failures. + */ + if (USB_ID_VENDOR(chip->usb_id) == 0x047f && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + msleep(20); + + /* Zoom R16/24, many Logitech(at least H650e/H570e/BCC950), + * Jabra 550a, Kingston HyperX needs a tiny delay here, + * otherwise requests like get/set frequency return + * as failed despite actually succeeding. */ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || - chip->usb_id == USB_ID(0x046d, 0x0a46) || - chip->usb_id == USB_ID(0x0b0e, 0x0349)) && + USB_ID_VENDOR(chip->usb_id) == 0x046d || /* Logitech */ + chip->usb_id == USB_ID(0x0b0e, 0x0349) || + chip->usb_id == USB_ID(0x0951, 0x16ad)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) usleep_range(1000, 2000); + + /* + * Samsung USBC Headset (AKG) need a tiny delay after each + * class compliant request. (Model number: AAM625R or AAM627R) + */ + if (chip->usb_id == USB_ID(0x04e8, 0xa051) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + usleep_range(5000, 6000); } /* @@ -1643,7 +1683,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */ - case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */ + case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */ case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */ @@ -1709,7 +1749,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, case 0x25ce: /* Mytek devices */ case 0x278b: /* Rotel? */ case 0x292b: /* Gustard/Ess based devices */ + case 0x2972: /* FiiO devices */ case 0x2ab6: /* T+A devices */ + case 0x3353: /* Khadas devices */ case 0x3842: /* EVGA */ case 0xc502: /* HiBy devices */ if (fp->dsd_raw) @@ -1754,5 +1796,62 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip, else fp->ep_attr |= USB_ENDPOINT_SYNC_SYNC; break; + case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook IIc */ + /* + * MaxPacketsOnly attribute is erroneously set in endpoint + * descriptors. As a result this card produces noise with + * all sample rates other than 96 KHz. + */ + fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX; + break; + case USB_ID(0x1235, 0x8202): /* Focusrite Scarlett 2i2 2nd gen */ + case USB_ID(0x1235, 0x8205): /* Focusrite Scarlett Solo 2nd gen */ + /* + * Reports that playback should use Synch: Synchronous + * while still providing a feedback endpoint. + * Synchronous causes snapping on some sample rates. + * Force it to use Synch: Asynchronous. + */ + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + fp->ep_attr &= ~USB_ENDPOINT_SYNCTYPE; + fp->ep_attr |= USB_ENDPOINT_SYNC_ASYNC; + } + break; } } + +/* + * registration quirk: + * the registration is skipped if a device matches with the given ID, + * unless the interface reaches to the defined one. This is for delaying + * the registration until the last known interface, so that the card and + * devices appear at the same time. + */ + +struct registration_quirk { + unsigned int usb_id; /* composed via USB_ID() */ + unsigned int interface; /* the interface to trigger register */ +}; + +#define REG_QUIRK_ENTRY(vendor, product, iface) \ + { .usb_id = USB_ID(vendor, product), .interface = (iface) } + +static const struct registration_quirk registration_quirks[] = { + REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */ + REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */ + REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */ + { 0 } /* terminator */ +}; + +/* return true if skipping registration */ +bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface) +{ + const struct registration_quirk *q; + + for (q = registration_quirks; q->usb_id; q++) + if (chip->usb_id == q->usb_id) + return iface != q->interface; + + /* Register as normal */ + return false; +} diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h index df0355843a4c..c76cf24a640a 100644 --- a/sound/usb/quirks.h +++ b/sound/usb/quirks.h @@ -51,4 +51,6 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip, struct audioformat *fp, int stream); +bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface); + #endif /* __USBAUDIO_QUIRKS_H */ diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 11785f9652ad..eff1ac1dc9ba 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -94,6 +94,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, subs->tx_length_quirk = as->chip->tx_length_quirk; subs->speed = snd_usb_get_speed(subs->dev); subs->pkt_offset_adj = 0; + subs->stream_offset_adj = 0; snd_usb_set_pcm_ops(as->pcm, stream); @@ -192,16 +193,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct snd_usb_substream *subs = info->private_data; struct snd_pcm_chmap_elem *chmap = NULL; - int i; + int i = 0; - memset(ucontrol->value.integer.value, 0, - sizeof(ucontrol->value.integer.value)); if (subs->cur_audiofmt) chmap = subs->cur_audiofmt->chmap; if (chmap) { for (i = 0; i < chmap->channels; i++) ucontrol->value.integer.value[i] = chmap->map[i]; } + for (; i < subs->channels_max; i++) + ucontrol->value.integer.value[i] = 0; return 0; } @@ -239,7 +240,7 @@ static int add_chmap(struct snd_pcm *pcm, int stream, static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits, int protocol) { - static unsigned int uac1_maps[] = { + static const unsigned int uac1_maps[] = { SNDRV_CHMAP_FL, /* left front */ SNDRV_CHMAP_FR, /* right front */ SNDRV_CHMAP_FC, /* center front */ @@ -254,7 +255,7 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits, SNDRV_CHMAP_TC, /* top */ 0 /* terminator */ }; - static unsigned int uac2_maps[] = { + static const unsigned int uac2_maps[] = { SNDRV_CHMAP_FL, /* front left */ SNDRV_CHMAP_FR, /* front right */ SNDRV_CHMAP_FC, /* front center */ diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index e360680f45f3..ff97fdcf63bd 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -26,7 +26,7 @@ struct snd_usb_audio { struct usb_interface *pm_intf; u32 usb_id; struct mutex mutex; - unsigned int autosuspended:1; + unsigned int system_suspend; atomic_t active; atomic_t shutdown; atomic_t usage_count; @@ -59,6 +59,8 @@ struct snd_usb_audio { struct media_intf_devnode *ctl_intf_media_devnode; }; +#define USB_AUDIO_IFACE_UNUSED ((void *)-1L) + #define usb_audio_err(chip, fmt, args...) \ dev_err(&(chip)->dev->dev, fmt, ##args) #define usb_audio_warn(chip, fmt, args...) \ diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index 89fa287678fc..e0bace4d1c40 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -681,6 +681,8 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate) us->submitted = 2*NOOF_SETRATE_URBS; for (i = 0; i < NOOF_SETRATE_URBS; ++i) { struct urb *urb = us->urb[i]; + if (!urb) + continue; if (urb->status) { if (!err) err = -ENODEV; diff --git a/sound/usb/validate.c b/sound/usb/validate.c index 5a3c4f7882b0..89a48d731719 100644 --- a/sound/usb/validate.c +++ b/sound/usb/validate.c @@ -233,7 +233,7 @@ static bool validate_midi_out_jack(const void *p, #define FIXED(p, t, s) { .protocol = (p), .type = (t), .size = sizeof(s) } #define FUNC(p, t, f) { .protocol = (p), .type = (t), .func = (f) } -static struct usb_desc_validator audio_validators[] = { +static const struct usb_desc_validator audio_validators[] = { /* UAC1 */ FUNC(UAC_VERSION_1, UAC_HEADER, validate_uac1_header), FIXED(UAC_VERSION_1, UAC_INPUT_TERMINAL, @@ -288,7 +288,7 @@ static struct usb_desc_validator audio_validators[] = { { } /* terminator */ }; -static struct usb_desc_validator midi_validators[] = { +static const struct usb_desc_validator midi_validators[] = { FIXED(UAC_VERSION_ALL, USB_MS_HEADER, struct usb_ms_header_descriptor), FIXED(UAC_VERSION_ALL, USB_MS_MIDI_IN_JACK, diff --git a/tools/Makefile b/tools/Makefile index 7e42f7b8bfa7..2d77a42d78d0 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -66,6 +66,9 @@ cpupower: FORCE cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE $(call descend,$@) +bpf/%: FORCE + $(call descend,$@) + liblockdep: FORCE $(call descend,lib/lockdep) diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h index 4703d218663a..f83a70e07df8 100644 --- a/tools/arch/arm64/include/uapi/asm/unistd.h +++ b/tools/arch/arm64/include/uapi/asm/unistd.h @@ -19,5 +19,6 @@ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS +#define __ARCH_WANT_SYS_CLONE3 #include <asm-generic/unistd.h> diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h index 4d471d9511a5..6fffe5682713 100644 --- a/tools/arch/ia64/include/asm/barrier.h +++ b/tools/arch/ia64/include/asm/barrier.h @@ -39,9 +39,6 @@ * sequential memory pages only. */ -/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */ -#define ia64_mf() asm volatile ("mf" ::: "memory") - #define mb() ia64_mf() #define rmb() mb() #define wmb() mb() diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h index 37a4c390750b..d7f0ae8f3c44 100644 --- a/tools/arch/x86/include/asm/insn.h +++ b/tools/arch/x86/include/asm/insn.h @@ -195,6 +195,21 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +/** + * for_each_insn_prefix() -- Iterate prefixes in the instruction + * @insn: Pointer to struct insn. + * @idx: Index storage. + * @prefix: Prefix byte. + * + * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix + * and the index is stored in @idx (note that this @idx is just for a cursor, + * do not change it.) + * Since prefixes.nbytes can be bigger than 4 if some prefixes + * are repeated, it cannot be used for looping over the prefixes. + */ +#define for_each_insn_prefix(insn, idx, prefix) \ + for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) + #define POP_SS_OPCODE 0x1f #define MOV_SREG_OPCODE 0x8e diff --git a/tools/arch/x86/include/uapi/asm/unistd.h b/tools/arch/x86/include/uapi/asm/unistd.h index 196fdd02b8b1..30d7d04d72d6 100644 --- a/tools/arch/x86/include/uapi/asm/unistd.h +++ b/tools/arch/x86/include/uapi/asm/unistd.h @@ -3,7 +3,7 @@ #define _UAPI_ASM_X86_UNISTD_H /* x32 syscall flag bit */ -#define __X32_SYSCALL_BIT 0x40000000UL +#define __X32_SYSCALL_BIT 0x40000000 #ifndef __KERNEL__ # ifdef __i386__ diff --git a/tools/bpf/.gitignore b/tools/bpf/.gitignore index 59024197e71d..cf53342175e7 100644 --- a/tools/bpf/.gitignore +++ b/tools/bpf/.gitignore @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only FEATURE-DUMP.bpf feature bpf_asm diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 5535650800ab..39bb322707b4 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -3,14 +3,14 @@ include ../scripts/Makefile.include prefix ?= /usr/local -CC = gcc LEX = flex YACC = bison MAKE = make INSTALL ?= install CFLAGS += -Wall -O2 -CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include +CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi \ + -I$(srctree)/tools/include # This will work when bpf is built in tools env. where srctree # isn't set and when invoked from selftests build, where srctree @@ -38,7 +38,7 @@ FEATURE_TESTS = libbfd disassembler-four-args FEATURE_DISPLAY = libbfd disassembler-four-args check_feat := 1 -NON_CHECK_FEAT_TARGETS := clean bpftool_clean +NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean resolve_btfids_clean ifdef MAKECMDGOALS ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),) check_feat := 0 @@ -64,16 +64,16 @@ $(OUTPUT)%.lex.c: $(srctree)/tools/bpf/%.l $(QUIET_FLEX)$(LEX) -o $@ $< $(OUTPUT)%.o: $(srctree)/tools/bpf/%.c - $(QUIET_CC)$(COMPILE.c) -o $@ $< + $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $< $(OUTPUT)%.yacc.o: $(OUTPUT)%.yacc.c - $(QUIET_CC)$(COMPILE.c) -o $@ $< + $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $< $(OUTPUT)%.lex.o: $(OUTPUT)%.lex.c - $(QUIET_CC)$(COMPILE.c) -o $@ $< + $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $< PROGS = $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg $(OUTPUT)bpf_asm -all: $(PROGS) bpftool +all: $(PROGS) bpftool runqslower $(OUTPUT)bpf_jit_disasm: CFLAGS += -DPACKAGE='bpf_jit_disasm' $(OUTPUT)bpf_jit_disasm: $(OUTPUT)bpf_jit_disasm.o @@ -89,7 +89,7 @@ $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c $(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c $(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c -clean: bpftool_clean +clean: bpftool_clean runqslower_clean resolve_btfids_clean $(call QUIET_CLEAN, bpf-progs) $(Q)$(RM) -r -- $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \ $(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.* @@ -97,7 +97,7 @@ clean: bpftool_clean $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf $(Q)$(RM) -r -- $(OUTPUT)feature -install: $(PROGS) bpftool_install +install: $(PROGS) bpftool_install runqslower_install $(call QUIET_INSTALL, bpf_jit_disasm) $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin $(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm @@ -115,4 +115,21 @@ bpftool_install: bpftool_clean: $(call descend,bpftool,clean) -.PHONY: all install clean bpftool bpftool_install bpftool_clean +runqslower: + $(call descend,runqslower) + +runqslower_install: + $(call descend,runqslower,install) + +runqslower_clean: + $(call descend,runqslower,clean) + +resolve_btfids: + $(call descend,resolve_btfids) + +resolve_btfids_clean: + $(call descend,resolve_btfids,clean) + +.PHONY: all install clean bpftool bpftool_install bpftool_clean \ + runqslower runqslower_install runqslower_clean \ + resolve_btfids resolve_btfids_clean diff --git a/tools/bpf/bpf_asm.c b/tools/bpf/bpf_asm.c index e5f95e3eede3..0063c3c029e7 100644 --- a/tools/bpf/bpf_asm.c +++ b/tools/bpf/bpf_asm.c @@ -11,7 +11,7 @@ * * How to get into it: * - * 1) read Documentation/networking/filter.txt + * 1) read Documentation/networking/filter.rst * 2) Run `bpf_asm [-c] <filter-prog file>` to translate into binary * blob that is loadable with xt_bpf, cls_bpf et al. Note: -c will * pretty print a C-like construct. diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c index 9d3766e653a9..a0ebcdf59c31 100644 --- a/tools/bpf/bpf_dbg.c +++ b/tools/bpf/bpf_dbg.c @@ -13,7 +13,7 @@ * for making a verdict when multiple simple BPF programs are combined * into one in order to prevent parsing same headers multiple times. * - * More on how to debug BPF opcodes see Documentation/networking/filter.txt + * More on how to debug BPF opcodes see Documentation/networking/filter.rst * which is the main document on BPF. Mini howto for getting started: * * 1) `./bpf_dbg` to enter the shell (shell cmds denoted with '>'): diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y index 56ba1de50784..8d48e896be50 100644 --- a/tools/bpf/bpf_exp.y +++ b/tools/bpf/bpf_exp.y @@ -545,6 +545,16 @@ static void bpf_reduce_k_jumps(void) } } +static uint8_t bpf_encode_jt_jf_offset(int off, int i) +{ + int delta = off - i - 1; + + if (delta < 0 || delta > 255) + fprintf(stderr, "warning: insn #%d jumps to insn #%d, " + "which is out of range\n", i, off); + return (uint8_t) delta; +} + static void bpf_reduce_jt_jumps(void) { int i; @@ -552,7 +562,7 @@ static void bpf_reduce_jt_jumps(void) for (i = 0; i < curr_instr; i++) { if (labels_jt[i]) { int off = bpf_find_insns_offset(labels_jt[i]); - out[i].jt = (uint8_t) (off - i -1); + out[i].jt = bpf_encode_jt_jf_offset(off, i); } } } @@ -564,7 +574,7 @@ static void bpf_reduce_jf_jumps(void) for (i = 0; i < curr_instr; i++) { if (labels_jf[i]) { int off = bpf_find_insns_offset(labels_jf[i]); - out[i].jf = (uint8_t) (off - i - 1); + out[i].jf = bpf_encode_jt_jf_offset(off, i); } } } diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore index b13926432b84..3e601bcfd461 100644 --- a/tools/bpf/bpftool/.gitignore +++ b/tools/bpf/bpftool/.gitignore @@ -1,7 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only *.d +/bpftool-bootstrap /bpftool bpftool*.8 bpf-helpers.* FEATURE-DUMP.bpftool feature libbpf +/*.skel.h +/vmlinux.h diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index 815ac9804aee..f33cb02de95c 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -19,7 +19,7 @@ man8dir = $(mandir)/man8 # Load targets for building eBPF helpers man page. include ../../Makefile.helpers -MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst)) +MAN8_RST = $(wildcard bpftool*.rst) _DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST)) DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8)) @@ -28,12 +28,23 @@ man: man8 helpers man8: $(DOC_MAN8) RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null) +RST2MAN_OPTS += --verbose + +list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST)))) +see_also = $(subst " ",, \ + "\n" \ + "SEE ALSO\n" \ + "========\n" \ + "\t**bpf**\ (2),\n" \ + "\t**bpf-helpers**\\ (7)" \ + $(foreach page,$(call list_pages,$(1)),",\n\t**$(page)**\\ (8)") \ + "\n") $(OUTPUT)%.8: %.rst ifndef RST2MAN_DEP $(error "rst2man not found, but required to generate man pages") endif - $(QUIET_GEN)rst2man $< > $@ + $(QUIET_GEN)( cat $< ; printf "%b" $(call see_also,$<) ) | rst2man $(RST2MAN_OPTS) > $@ clean: helpers-clean $(call QUIET_CLEAN, Documentation) diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index 39615f8e145b..ff4d327a582e 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -36,6 +36,11 @@ DESCRIPTION otherwise list all BTF objects currently loaded on the system. + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BTF + objects. On such kernels bpftool will automatically emit this + information as well. + **bpftool btf dump** *BTF_SRC* Dump BTF entries from a given *BTF_SRC*. @@ -66,26 +71,12 @@ DESCRIPTION OPTIONS ======= - -h, --help - Print short generic help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. - - -d, --debug - Print all logs available from libbpf, including debug-level - information. + .. include:: common_options.rst EXAMPLES ======== **# bpftool btf dump id 1226** + :: [1] PTR '(anon)' type_id=2 @@ -99,6 +90,7 @@ EXAMPLES This gives an example of default output for all supported BTF kinds. **$ cat prog.c** + :: struct fwd_struct; @@ -139,6 +131,7 @@ This gives an example of default output for all supported BTF kinds. } **$ bpftool btf dump file prog.o** + :: [1] PTR '(anon)' type_id=2 @@ -224,15 +217,3 @@ All the standard ways to specify map or program are supported: **# bpftool btf dump prog tag b88e0a09b1d9759d** **# bpftool btf dump prog pinned /sys/fs/bpf/prog_name** - -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool**\ (8), - **bpftool-map**\ (8), - **bpftool-prog**\ (8), - **bpftool-cgroup**\ (8), - **bpftool-feature**\ (8), - **bpftool-net**\ (8), - **bpftool-perf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 06a28b07787d..790944c35602 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -20,7 +20,7 @@ SYNOPSIS CGROUP COMMANDS =============== -| **bpftool** **cgroup { show | list }** *CGROUP* [**effective**] +| **bpftool** **cgroup** { **show** | **list** } *CGROUP* [**effective**] | **bpftool** **cgroup tree** [*CGROUP_ROOT*] [**effective**] | **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] | **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* @@ -29,8 +29,8 @@ CGROUP COMMANDS | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } | *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** | | **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** | -| **sendmsg4** | **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | -| **getsockopt** | **setsockopt** } +| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** | +| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** } | *ATTACH_FLAGS* := { **multi** | **override** } DESCRIPTION @@ -101,7 +101,11 @@ DESCRIPTION an unconnected udp6 socket (since 5.2); **sysctl** sysctl access (since 5.2); **getsockopt** call to getsockopt (since 5.3); - **setsockopt** call to setsockopt (since 5.3). + **setsockopt** call to setsockopt (since 5.3); + **getpeername4** call to getpeername(2) for an inet4 socket (since 5.8); + **getpeername6** call to getpeername(2) for an inet6 socket (since 5.8); + **getsockname4** call to getsockname(2) for an inet4 socket (since 5.8); + **getsockname6** call to getsockname(2) for an inet6 socket (since 5.8). **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* Detach *PROG* from the cgroup *CGROUP* and attach type @@ -112,26 +116,11 @@ DESCRIPTION OPTIONS ======= - -h, --help - Print short generic help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. + .. include:: common_options.rst -f, --bpffs Show file names of pinned programs. - -d, --debug - Print all logs available from libbpf, including debug-level - information. - EXAMPLES ======== | @@ -154,15 +143,3 @@ EXAMPLES :: ID AttachType AttachFlags Name - -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool**\ (8), - **bpftool-prog**\ (8), - **bpftool-map**\ (8), - **bpftool-feature**\ (8), - **bpftool-net**\ (8), - **bpftool-perf**\ (8), - **bpftool-btf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index 4d08f35034a2..dd3771bdbc57 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -19,19 +19,24 @@ SYNOPSIS FEATURE COMMANDS ================ -| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]] +| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**unprivileged**] [**macros** [**prefix** *PREFIX*]] | **bpftool** **feature help** | | *COMPONENT* := { **kernel** | **dev** *NAME* } DESCRIPTION =========== - **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]] + **bpftool feature probe** [**kernel**] [**full**] [**macros** [**prefix** *PREFIX*]] Probe the running kernel and dump a number of eBPF-related - parameters, such as availability of the **bpf()** system call, + parameters, such as availability of the **bpf**\ () system call, JIT status, eBPF program types availability, eBPF helper functions availability, and more. + By default, bpftool **does not run probes** for + **bpf_probe_write_user**\ () and **bpf_trace_printk**\() + helpers which print warnings to kernel logs. To enable them + and run all probes, the **full** keyword should be used. + If the **macros** keyword (but not the **-j** option) is passed, a subset of the output is dumped as a list of **#define** macros that are ready to be included in a C @@ -44,47 +49,26 @@ DESCRIPTION Keyword **kernel** can be omitted. If no probe target is specified, probing the kernel is the default behaviour. - Note that when probed, some eBPF helpers (e.g. - **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may - print warnings to kernel logs. + When the **unprivileged** keyword is used, bpftool will dump + only the features available to a user who does not have the + **CAP_SYS_ADMIN** capability set. The features available in + that case usually represent a small subset of the parameters + supported by the system. Unprivileged users MUST use the + **unprivileged** keyword: This is to avoid misdetection if + bpftool is inadvertently run as non-root, for example. This + keyword is unavailable if bpftool was compiled without + libcap. - **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]] + **bpftool feature probe dev** *NAME* [**full**] [**macros** [**prefix** *PREFIX*]] Probe network device for supported eBPF features and dump results to the console. - The two keywords **macros** and **prefix** have the same - role as when probing the kernel. + The keywords **full**, **macros** and **prefix** have the + same role as when probing the kernel. **bpftool feature help** Print short help message. OPTIONS ======= - -h, --help - Print short generic help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. - - -d, --debug - Print all logs available from libbpf, including debug-level - information. - -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool**\ (8), - **bpftool-prog**\ (8), - **bpftool-map**\ (8), - **bpftool-cgroup**\ (8), - **bpftool-net**\ (8), - **bpftool-perf**\ (8), - **bpftool-btf**\ (8) + .. include:: common_options.rst diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst new file mode 100644 index 000000000000..84cf0639696f --- /dev/null +++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst @@ -0,0 +1,281 @@ +================ +bpftool-gen +================ +------------------------------------------------------------------------------- +tool for BPF code-generation +------------------------------------------------------------------------------- + +:Manual section: 8 + +SYNOPSIS +======== + + **bpftool** [*OPTIONS*] **gen** *COMMAND* + + *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] } + + *COMMAND* := { **skeleton** | **help** } + +GEN COMMANDS +============= + +| **bpftool** **gen skeleton** *FILE* +| **bpftool** **gen help** + +DESCRIPTION +=========== + **bpftool gen skeleton** *FILE* + Generate BPF skeleton C header file for a given *FILE*. + + BPF skeleton is an alternative interface to existing libbpf + APIs for working with BPF objects. Skeleton code is intended + to significantly shorten and simplify code to load and work + with BPF programs from userspace side. Generated code is + tailored to specific input BPF object *FILE*, reflecting its + structure by listing out available maps, program, variables, + etc. Skeleton eliminates the need to lookup mentioned + components by name. Instead, if skeleton instantiation + succeeds, they are populated in skeleton structure as valid + libbpf types (e.g., **struct bpf_map** pointer) and can be + passed to existing generic libbpf APIs. + + In addition to simple and reliable access to maps and + programs, skeleton provides a storage for BPF links (**struct + bpf_link**) for each BPF program within BPF object. When + requested, supported BPF programs will be automatically + attached and resulting BPF links stored for further use by + user in pre-allocated fields in skeleton struct. For BPF + programs that can't be automatically attached by libbpf, + user can attach them manually, but store resulting BPF link + in per-program link field. All such set up links will be + automatically destroyed on BPF skeleton destruction. This + eliminates the need for users to manage links manually and + rely on libbpf support to detach programs and free up + resources. + + Another facility provided by BPF skeleton is an interface to + global variables of all supported kinds: mutable, read-only, + as well as extern ones. This interface allows to pre-setup + initial values of variables before BPF object is loaded and + verified by kernel. For non-read-only variables, the same + interface can be used to fetch values of global variables on + userspace side, even if they are modified by BPF code. + + During skeleton generation, contents of source BPF object + *FILE* is embedded within generated code and is thus not + necessary to keep around. This ensures skeleton and BPF + object file are matching 1-to-1 and always stay in sync. + Generated code is dual-licensed under LGPL-2.1 and + BSD-2-Clause licenses. + + It is a design goal and guarantee that skeleton interfaces + are interoperable with generic libbpf APIs. User should + always be able to use skeleton API to create and load BPF + object, and later use libbpf APIs to keep working with + specific maps, programs, etc. + + As part of skeleton, few custom functions are generated. + Each of them is prefixed with object name, derived from + object file name. I.e., if BPF object file name is + **example.o**, BPF object name will be **example**. The + following custom functions are provided in such case: + + - **example__open** and **example__open_opts**. + These functions are used to instantiate skeleton. It + corresponds to libbpf's **bpf_object__open**\ () API. + **_opts** variants accepts extra **bpf_object_open_opts** + options. + + - **example__load**. + This function creates maps, loads and verifies BPF + programs, initializes global data maps. It corresponds to + libppf's **bpf_object__load**\ () API. + + - **example__open_and_load** combines **example__open** and + **example__load** invocations in one commonly used + operation. + + - **example__attach** and **example__detach** + This pair of functions allow to attach and detach, + correspondingly, already loaded BPF object. Only BPF + programs of types supported by libbpf for auto-attachment + will be auto-attached and their corresponding BPF links + instantiated. For other BPF programs, user can manually + create a BPF link and assign it to corresponding fields in + skeleton struct. **example__detach** will detach both + links created automatically, as well as those populated by + user manually. + + - **example__destroy** + Detach and unload BPF programs, free up all the resources + used by skeleton and BPF object. + + If BPF object has global variables, corresponding structs + with memory layout corresponding to global data data section + layout will be created. Currently supported ones are: *.data*, + *.bss*, *.rodata*, and *.kconfig* structs/data sections. + These data sections/structs can be used to set up initial + values of variables, if set before **example__load**. + Afterwards, if target kernel supports memory-mapped BPF + arrays, same structs can be used to fetch and update + (non-read-only) data from userspace, with same simplicity + as for BPF side. + + **bpftool gen help** + Print short help message. + +OPTIONS +======= + .. include:: common_options.rst + +EXAMPLES +======== +**$ cat example.c** + +:: + + #include <stdbool.h> + #include <linux/ptrace.h> + #include <linux/bpf.h> + #include "bpf_helpers.h" + + const volatile int param1 = 42; + bool global_flag = true; + struct { int x; } data = {}; + + struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 128); + __type(key, int); + __type(value, long); + } my_map SEC(".maps"); + + SEC("raw_tp/sys_enter") + int handle_sys_enter(struct pt_regs *ctx) + { + static long my_static_var; + if (global_flag) + my_static_var++; + else + data.x += param1; + return 0; + } + + SEC("raw_tp/sys_exit") + int handle_sys_exit(struct pt_regs *ctx) + { + int zero = 0; + bpf_map_lookup_elem(&my_map, &zero); + return 0; + } + +This is example BPF application with two BPF programs and a mix of BPF maps +and global variables. + +**$ bpftool gen skeleton example.o** + +:: + + /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + + /* THIS FILE IS AUTOGENERATED! */ + #ifndef __EXAMPLE_SKEL_H__ + #define __EXAMPLE_SKEL_H__ + + #include <stdlib.h> + #include <bpf/libbpf.h> + + struct example { + struct bpf_object_skeleton *skeleton; + struct bpf_object *obj; + struct { + struct bpf_map *rodata; + struct bpf_map *data; + struct bpf_map *bss; + struct bpf_map *my_map; + } maps; + struct { + struct bpf_program *handle_sys_enter; + struct bpf_program *handle_sys_exit; + } progs; + struct { + struct bpf_link *handle_sys_enter; + struct bpf_link *handle_sys_exit; + } links; + struct example__bss { + struct { + int x; + } data; + } *bss; + struct example__data { + _Bool global_flag; + long int handle_sys_enter_my_static_var; + } *data; + struct example__rodata { + int param1; + } *rodata; + }; + + static void example__destroy(struct example *obj); + static inline struct example *example__open_opts( + const struct bpf_object_open_opts *opts); + static inline struct example *example__open(); + static inline int example__load(struct example *obj); + static inline struct example *example__open_and_load(); + static inline int example__attach(struct example *obj); + static inline void example__detach(struct example *obj); + + #endif /* __EXAMPLE_SKEL_H__ */ + +**$ cat example_user.c** + +:: + + #include "example.skel.h" + + int main() + { + struct example *skel; + int err = 0; + + skel = example__open(); + if (!skel) + goto cleanup; + + skel->rodata->param1 = 128; + + err = example__load(skel); + if (err) + goto cleanup; + + err = example__attach(skel); + if (err) + goto cleanup; + + /* all libbpf APIs are usable */ + printf("my_map name: %s\n", bpf_map__name(skel->maps.my_map)); + printf("sys_enter prog FD: %d\n", + bpf_program__fd(skel->progs.handle_sys_enter)); + + /* detach and re-attach sys_exit program */ + bpf_link__destroy(skel->links.handle_sys_exit); + skel->links.handle_sys_exit = + bpf_program__attach(skel->progs.handle_sys_exit); + + printf("my_static_var: %ld\n", + skel->bss->handle_sys_enter_my_static_var); + + cleanup: + example__destroy(skel); + return err; + } + +**# ./example_user** + +:: + + my_map name: my_map + sys_enter prog FD: 8 + my_static_var: 7 + +This is a stripped-out version of skeleton generated for above example code. diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst new file mode 100644 index 000000000000..51f49bead619 --- /dev/null +++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst @@ -0,0 +1,70 @@ +============ +bpftool-iter +============ +------------------------------------------------------------------------------- +tool to create BPF iterators +------------------------------------------------------------------------------- + +:Manual section: 8 + +SYNOPSIS +======== + + **bpftool** [*OPTIONS*] **iter** *COMMAND* + + *COMMANDS* := { **pin** | **help** } + +ITER COMMANDS +=================== + +| **bpftool** **iter pin** *OBJ* *PATH* [**map** *MAP*] +| **bpftool** **iter help** +| +| *OBJ* := /a/file/of/bpf_iter_target.o +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } + +DESCRIPTION +=========== + **bpftool iter pin** *OBJ* *PATH* [**map** *MAP*] + A bpf iterator combines a kernel iterating of + particular kernel data (e.g., tasks, bpf_maps, etc.) + and a bpf program called for each kernel data object + (e.g., one task, one bpf_map, etc.). User space can + *read* kernel iterator output through *read()* syscall. + + The *pin* command creates a bpf iterator from *OBJ*, + and pin it to *PATH*. The *PATH* should be located + in *bpffs* mount. It must not contain a dot + character ('.'), which is reserved for future extensions + of *bpffs*. + + Map element bpf iterator requires an additional parameter + *MAP* so bpf program can iterate over map elements for + that map. User can have a bpf program in kernel to run + with each map element, do checking, filtering, aggregation, + etc. without copying data to user space. + + User can then *cat PATH* to see the bpf iterator output. + + **bpftool iter help** + Print short help message. + +OPTIONS +======= + .. include:: common_options.rst + +EXAMPLES +======== +**# bpftool iter pin bpf_iter_netlink.o /sys/fs/bpf/my_netlink** + +:: + + Create a file-based bpf iterator from bpf_iter_netlink.o and pin it + to /sys/fs/bpf/my_netlink + +**# bpftool iter pin bpf_iter_hashmap.o /sys/fs/bpf/my_hashmap map id 20** + +:: + + Create a file-based bpf iterator from bpf_iter_hashmap.o and map with + id 20, and pin it to /sys/fs/bpf/my_hashmap diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst new file mode 100644 index 000000000000..5f7db2a837cc --- /dev/null +++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst @@ -0,0 +1,108 @@ +================ +bpftool-link +================ +------------------------------------------------------------------------------- +tool for inspection and simple manipulation of eBPF links +------------------------------------------------------------------------------- + +:Manual section: 8 + +SYNOPSIS +======== + + **bpftool** [*OPTIONS*] **link** *COMMAND* + + *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } } + + *COMMANDS* := { **show** | **list** | **pin** | **help** } + +LINK COMMANDS +============= + +| **bpftool** **link { show | list }** [*LINK*] +| **bpftool** **link pin** *LINK* *FILE* +| **bpftool** **link detach** *LINK* +| **bpftool** **link help** +| +| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* } + + +DESCRIPTION +=========== + **bpftool link { show | list }** [*LINK*] + Show information about active links. If *LINK* is + specified show information only about given link, + otherwise list all links currently active on the system. + + Output will start with link ID followed by link type and + zero or more named attributes, some of which depend on type + of link. + + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BPF + links. On such kernels bpftool will automatically emit this + information as well. + + **bpftool link pin** *LINK* *FILE* + Pin link *LINK* as *FILE*. + + Note: *FILE* must be located in *bpffs* mount. It must not + contain a dot character ('.'), which is reserved for future + extensions of *bpffs*. + + **bpftool link detach** *LINK* + Force-detach link *LINK*. BPF link and its underlying BPF + program will stay valid, but they will be detached from the + respective BPF hook and BPF link will transition into + a defunct state until last open file descriptor for that + link is closed. + + **bpftool link help** + Print short help message. + +OPTIONS +======= + .. include:: common_options.rst + + -f, --bpffs + When showing BPF links, show file names of pinned + links. + + -n, --nomount + Do not automatically attempt to mount any virtual file system + (such as tracefs or BPF virtual file system) when necessary. + +EXAMPLES +======== +**# bpftool link show** + +:: + + 10: cgroup prog 25 + cgroup_id 614 attach_type egress + pids test_progs(223) + +**# bpftool --json --pretty link show** + +:: + + [{ + "type": "cgroup", + "prog_id": 25, + "cgroup_id": 614, + "attach_type": "egress", + "pids": [{ + "pid": 223, + "comm": "test_progs" + } + ] + } + ] + +| +| **# bpftool link pin id 10 /sys/fs/bpf/link** +| **# ls -l /sys/fs/bpf/** + +:: + + -rw------- 1 root root 0 Apr 23 21:39 link diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 1c0f7146aab0..dade10cdf295 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -21,9 +21,10 @@ SYNOPSIS MAP COMMANDS ============= -| **bpftool** **map { show | list }** [*MAP*] +| **bpftool** **map** { **show** | **list** } [*MAP*] | **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \ -| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*] +| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \ +| [**dev** *NAME*] | **bpftool** **map dump** *MAP* | **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] | **bpftool** **map lookup** *MAP* [**key** *DATA*] @@ -39,9 +40,9 @@ MAP COMMANDS | **bpftool** **map freeze** *MAP* | **bpftool** **map help** | -| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* | **name** *MAP_NAME* } | *DATA* := { [**hex**] *BYTES* } -| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } +| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } | *VALUE* := { *DATA* | *MAP* | *PROG* } | *UPDATE_FLAGS* := { **any** | **exist** | **noexist** } | *TYPE* := { **hash** | **array** | **prog_array** | **perf_event_array** | **percpu_hash** @@ -49,24 +50,43 @@ MAP COMMANDS | | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps** | | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash** | | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** -| | **queue** | **stack** } +| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage** } DESCRIPTION =========== **bpftool map { show | list }** [*MAP*] Show information about loaded maps. If *MAP* is specified - show information only about given map, otherwise list all - maps currently loaded on the system. + show information only about given maps, otherwise list all + maps currently loaded on the system. In case of **name**, + *MAP* may match several maps which will all be shown. Output will start with map ID followed by map type and zero or more named attributes (depending on kernel version). - **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*] + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BPF + maps. On such kernels bpftool will automatically emit this + information as well. + + **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**dev** *NAME*] Create a new map with given parameters and pin it to *bpffs* as *FILE*. + *FLAGS* should be an integer which is the combination of + desired flags, e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h + UAPI header for existing flags). + + To create maps of type array-of-maps or hash-of-maps, the + **inner_map** keyword must be used to pass an inner map. The + kernel needs it to collect metadata related to the inner maps + that the new map will work with. + + Keyword **dev** expects a network interface name, and is used + to request hardware offload for the map. + **bpftool map dump** *MAP* - Dump all entries in a given *MAP*. + Dump all entries in a given *MAP*. In case of **name**, + *MAP* may match several maps which will all be dumped. **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] Update map entry for a given *KEY*. @@ -76,7 +96,7 @@ DESCRIPTION exists; **noexist** update only if entry doesn't exist. If the **hex** keyword is provided in front of the bytes - sequence, the bytes are parsed as hexadeximal values, even if + sequence, the bytes are parsed as hexadecimal values, even if no "0x" prefix is added. If the keyword is not provided, then the bytes are parsed as decimal values, unless a "0x" prefix (for hexadecimal) or a "0" prefix (for octal) is provided. @@ -98,10 +118,10 @@ DESCRIPTION extensions of *bpffs*. **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*] - Read events from a BPF_MAP_TYPE_PERF_EVENT_ARRAY map. + Read events from a **BPF_MAP_TYPE_PERF_EVENT_ARRAY** map. Install perf rings into a perf event array map and dump - output of any bpf_perf_event_output() call in the kernel. + output of any **bpf_perf_event_output**\ () call in the kernel. By default read the number of CPUs on the system and install perf ring for each CPU in the corresponding index in the array. @@ -114,24 +134,24 @@ DESCRIPTION receiving events if it installed its rings earlier. **bpftool map peek** *MAP* - Peek next **value** in the queue or stack. + Peek next value in the queue or stack. **bpftool map push** *MAP* **value** *VALUE* - Push **value** onto the stack. + Push *VALUE* onto the stack. **bpftool map pop** *MAP* - Pop and print **value** from the stack. + Pop and print value from the stack. **bpftool map enqueue** *MAP* **value** *VALUE* - Enqueue **value** into the queue. + Enqueue *VALUE* into the queue. **bpftool map dequeue** *MAP* - Dequeue and print **value** from the queue. + Dequeue and print value from the queue. **bpftool map freeze** *MAP* Freeze the map as read-only from user space. Entries from a frozen map can not longer be updated or deleted with the - **bpf\ ()** system call. This operation is not reversible, + **bpf**\ () system call. This operation is not reversible, and the map remains immutable from user space until its destruction. However, read and write permissions for BPF programs to the map remain unchanged. @@ -141,18 +161,7 @@ DESCRIPTION OPTIONS ======= - -h, --help - Print short generic help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. + .. include:: common_options.rst -f, --bpffs Show file names of pinned maps. @@ -161,17 +170,15 @@ OPTIONS Do not automatically attempt to mount any virtual file system (such as tracefs or BPF virtual file system) when necessary. - -d, --debug - Print all logs available from libbpf, including debug-level - information. - EXAMPLES ======== **# bpftool map show** + :: 10: hash name some_map flags 0x0 - key 4B value 8B max_entries 2048 memlock 167936B + key 4B value 8B max_entries 2048 memlock 167936B + pids systemd(1) The following three commands are equivalent: @@ -188,6 +195,7 @@ The following three commands are equivalent: **# bpftool map dump id 10** + :: key: 00 01 02 03 value: 00 01 02 03 04 05 06 07 @@ -195,6 +203,7 @@ The following three commands are equivalent: Found 2 elements **# bpftool map getnext id 10 key 0 1 2 3** + :: key: @@ -261,15 +270,3 @@ would be lost as soon as bpftool exits). key: 00 00 00 00 value: 22 02 00 00 Found 1 element - -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool**\ (8), - **bpftool-prog**\ (8), - **bpftool-cgroup**\ (8), - **bpftool-feature**\ (8), - **bpftool-net**\ (8), - **bpftool-perf**\ (8), - **bpftool-btf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index 8651b00b81ea..d8165d530937 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -20,7 +20,7 @@ SYNOPSIS NET COMMANDS ============ -| **bpftool** **net { show | list }** [ **dev** *NAME* ] +| **bpftool** **net** { **show** | **list** } [ **dev** *NAME* ] | **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] | **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME* | **bpftool** **net help** @@ -75,22 +75,7 @@ DESCRIPTION OPTIONS ======= - -h, --help - Print short generic help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. - - -d, --debug - Print all logs available from libbpf, including debug-level - information. + .. include:: common_options.rst EXAMPLES ======== @@ -187,16 +172,3 @@ EXAMPLES :: xdp: - - -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool**\ (8), - **bpftool-prog**\ (8), - **bpftool-map**\ (8), - **bpftool-cgroup**\ (8), - **bpftool-feature**\ (8), - **bpftool-perf**\ (8), - **bpftool-btf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst index e252bd0bc434..e958ce91de72 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst @@ -20,7 +20,7 @@ SYNOPSIS PERF COMMANDS ============= -| **bpftool** **perf { show | list }** +| **bpftool** **perf** { **show** | **list** } | **bpftool** **perf help** DESCRIPTION @@ -40,22 +40,7 @@ DESCRIPTION OPTIONS ======= - -h, --help - Print short generic help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. - - -d, --debug - Print all logs available from libbpf, including debug-level - information. + .. include:: common_options.rst EXAMPLES ======== @@ -78,16 +63,3 @@ EXAMPLES {"pid":21765,"fd":5,"prog_id":7,"fd_type":"kretprobe","func":"__x64_sys_nanosleep","offset":0}, \ {"pid":21767,"fd":5,"prog_id":8,"fd_type":"tracepoint","tracepoint":"sys_enter_nanosleep"}, \ {"pid":21800,"fd":5,"prog_id":9,"fd_type":"uprobe","filename":"/home/yhs/a.out","offset":1159}] - - -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool**\ (8), - **bpftool-prog**\ (8), - **bpftool-map**\ (8), - **bpftool-cgroup**\ (8), - **bpftool-feature**\ (8), - **bpftool-net**\ (8), - **bpftool-btf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 7a374b3c851d..358c7309d419 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -21,40 +21,48 @@ SYNOPSIS PROG COMMANDS ============= -| **bpftool** **prog { show | list }** [*PROG*] +| **bpftool** **prog** { **show** | **list** } [*PROG*] | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}] | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}] | **bpftool** **prog pin** *PROG* *FILE* -| **bpftool** **prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] +| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] | **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*] | **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*] | **bpftool** **prog tracelog** | **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*] +| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs* | **bpftool** **prog help** | | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } -| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } +| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } | *TYPE* := { | **socket** | **kprobe** | **kretprobe** | **classifier** | **action** | | **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** | | **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** | | **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** | | **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** | -| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** | +| **cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** | +| **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** | | **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** | -| **cgroup/getsockopt** | **cgroup/setsockopt** +| **cgroup/getsockopt** | **cgroup/setsockopt** | +| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup** | } | *ATTACH_TYPE* := { | **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector** | } +| *METRICs* := { +| **cycles** | **instructions** | **l1d_loads** | **llc_misses** +| } DESCRIPTION =========== **bpftool prog { show | list }** [*PROG*] Show information about loaded programs. If *PROG* is - specified show information only about given program, otherwise - list all programs currently loaded on the system. + specified show information only about given programs, + otherwise list all programs currently loaded on the system. + In case of **tag** or **name**, *PROG* may match several + programs which will all be shown. Output will start with program ID followed by program type and zero or more named attributes (depending on kernel version). @@ -67,12 +75,21 @@ DESCRIPTION program run. Activation or deactivation of the feature is performed via the **kernel.bpf_stats_enabled** sysctl knob. + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BPF + programs. On such kernels bpftool will automatically emit this + information as well. + **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }] - Dump eBPF instructions of the program from the kernel. By + Dump eBPF instructions of the programs from the kernel. By default, eBPF will be disassembled and printed to standard output in human-readable format. In this case, **opcodes** controls if raw opcodes should be printed as well. + In case of **tag** or **name**, *PROG* may match several + programs which will all be dumped. However, if **file** or + **visual** is specified, *PROG* must match a single program. + If **file** is specified, the binary image will instead be written to *FILE*. @@ -80,15 +97,17 @@ DESCRIPTION built instead, and eBPF instructions will be presented with CFG in DOT format, on standard output. - If the prog has line_info available, the source line will + If the programs have line_info available, the source line will be displayed by default. If **linum** is specified, the filename, line number and line column will also be displayed on top of the source line. **bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** | **linum** }] Dump jited image (host machine code) of the program. + If *FILE* is specified image will be written to a file, otherwise it will be disassembled and printed to stdout. + *PROG* must match a single program when **file** is specified. **opcodes** controls if raw opcodes will be printed. @@ -142,7 +161,7 @@ DESCRIPTION **bpftool prog tracelog** Dump the trace pipe of the system to the console (stdout). Hit <Ctrl+C> to stop printing. BPF programs can write to this - trace pipe at runtime with the **bpf_trace_printk()** helper. + trace pipe at runtime with the **bpf_trace_printk**\ () helper. This should be used only for debugging purposes. For streaming data from BPF programs to user space, one can use perf events (see also **bpftool-map**\ (8)). @@ -180,23 +199,18 @@ DESCRIPTION not all of them can take the **ctx_in**/**ctx_out** arguments. bpftool does not perform checks on program types. + **bpftool prog profile** *PROG* [**duration** *DURATION*] *METRICs* + Profile *METRICs* for bpf program *PROG* for *DURATION* + seconds or until user hits <Ctrl+C>. *DURATION* is optional. + If *DURATION* is not specified, the profiling will run up to + **UINT_MAX** seconds. + **bpftool prog help** Print short help message. OPTIONS ======= - -h, --help - Print short generic help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. + .. include:: common_options.rst -f, --bpffs When showing BPF programs, show file names of pinned @@ -209,11 +223,6 @@ OPTIONS Do not automatically attempt to mount any virtual file system (such as tracefs or BPF virtual file system) when necessary. - -d, --debug - Print all logs available, even debug-level information. This - includes logs from libbpf as well as from the verifier, when - attempting to load programs. - EXAMPLES ======== **# bpftool prog show** @@ -223,6 +232,7 @@ EXAMPLES 10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10 loaded_at 2017-09-29T20:11:00+0000 uid 0 xlated 528B jited 370B memlock 4096B map_ids 10 + pids systemd(1) **# bpftool --json --pretty prog show** @@ -242,13 +252,18 @@ EXAMPLES "bytes_jited": 370, "bytes_memlock": 4096, "map_ids": [10 + ], + "pids": [{ + "pid": 1, + "comm": "systemd" + } ] } ] | | **# bpftool prog dump xlated id 10 file /tmp/t** -| **# ls -l /tmp/t** +| **$ ls -l /tmp/t** :: @@ -302,14 +317,12 @@ EXAMPLES **# rm /sys/fs/bpf/xdp1** -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool**\ (8), - **bpftool-map**\ (8), - **bpftool-cgroup**\ (8), - **bpftool-feature**\ (8), - **bpftool-net**\ (8), - **bpftool-perf**\ (8), - **bpftool-btf**\ (8) +| +| **# bpftool prog profile id 337 duration 10 cycles instructions llc_misses** + +:: + + 51397 run_cnt + 40176203 cycles (83.05%) + 42518139 instructions # 1.06 insns per cycle (83.39%) + 123 llc_misses # 2.89 LLC misses per million insns (83.15%) diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst new file mode 100644 index 000000000000..506e70ee78e9 --- /dev/null +++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst @@ -0,0 +1,84 @@ +================== +bpftool-struct_ops +================== +------------------------------------------------------------------------------- +tool to register/unregister/introspect BPF struct_ops +------------------------------------------------------------------------------- + +:Manual section: 8 + +SYNOPSIS +======== + + **bpftool** [*OPTIONS*] **struct_ops** *COMMAND* + + *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] } + + *COMMANDS* := + { **show** | **list** | **dump** | **register** | **unregister** | **help** } + +STRUCT_OPS COMMANDS +=================== + +| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*] +| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*] +| **bpftool** **struct_ops register** *OBJ* +| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP* +| **bpftool** **struct_ops help** +| +| *STRUCT_OPS_MAP* := { **id** *STRUCT_OPS_MAP_ID* | **name** *STRUCT_OPS_MAP_NAME* } +| *OBJ* := /a/file/of/bpf_struct_ops.o + + +DESCRIPTION +=========== + **bpftool struct_ops { show | list }** [*STRUCT_OPS_MAP*] + Show brief information about the struct_ops in the system. + If *STRUCT_OPS_MAP* is specified, it shows information only + for the given struct_ops. Otherwise, it lists all struct_ops + currently existing in the system. + + Output will start with struct_ops map ID, followed by its map + name and its struct_ops's kernel type. + + **bpftool struct_ops dump** [*STRUCT_OPS_MAP*] + Dump details information about the struct_ops in the system. + If *STRUCT_OPS_MAP* is specified, it dumps information only + for the given struct_ops. Otherwise, it dumps all struct_ops + currently existing in the system. + + **bpftool struct_ops register** *OBJ* + Register bpf struct_ops from *OBJ*. All struct_ops under + the ELF section ".struct_ops" will be registered to + its kernel subsystem. + + **bpftool struct_ops unregister** *STRUCT_OPS_MAP* + Unregister the *STRUCT_OPS_MAP* from the kernel subsystem. + + **bpftool struct_ops help** + Print short help message. + +OPTIONS +======= + .. include:: common_options.rst + +EXAMPLES +======== +**# bpftool struct_ops show** + +:: + + 100: dctcp tcp_congestion_ops + 105: cubic tcp_congestion_ops + +**# bpftool struct_ops unregister id 105** + +:: + + Unregistered tcp_congestion_ops cubic id 105 + +**# bpftool struct_ops register bpf_cubic.o** + +:: + + Registered tcp_congestion_ops cubic id 110 diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index 6a9c52ef84a9..e7d949334961 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -46,18 +46,7 @@ DESCRIPTION OPTIONS ======= - -h, --help - Print short help message (similar to **bpftool help**). - - -V, --version - Print version number (similar to **bpftool version**). - - -j, --json - Generate JSON output. For commands that cannot produce JSON, this - option has no effect. - - -p, --pretty - Generate human-readable JSON output. Implies **-j**. + .. include:: common_options.rst -m, --mapcompat Allow loading maps with unknown map definitions. @@ -65,20 +54,3 @@ OPTIONS -n, --nomount Do not automatically attempt to mount any virtual file system (such as tracefs or BPF virtual file system) when necessary. - - -d, --debug - Print all logs available, even debug-level information. This - includes logs from libbpf as well as from the verifier, when - attempting to load programs. - -SEE ALSO -======== - **bpf**\ (2), - **bpf-helpers**\ (7), - **bpftool-prog**\ (8), - **bpftool-map**\ (8), - **bpftool-cgroup**\ (8), - **bpftool-feature**\ (8), - **bpftool-net**\ (8), - **bpftool-perf**\ (8), - **bpftool-btf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst new file mode 100644 index 000000000000..05d06c74dcaa --- /dev/null +++ b/tools/bpf/bpftool/Documentation/common_options.rst @@ -0,0 +1,22 @@ +-h, --help + Print short help message (similar to **bpftool help**). + +-V, --version + Print version number (similar to **bpftool version**), and optional + features that were included when bpftool was compiled. Optional + features include linking against libbfd to provide the disassembler + for JIT-ted programs (**bpftool prog dump jited**) and usage of BPF + skeletons (some features like **bpftool prog profile** or showing + pids associated to BPF objects may rely on it). + +-j, --json + Generate JSON output. For commands that cannot produce JSON, this + option has no effect. + +-p, --pretty + Generate human-readable JSON output. Implies **-j**. + +-d, --debug + Print all logs available, even debug-level information. This includes + logs from libbpf as well as from the verifier, when attempting to + load programs. diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 39bc6f0f4f0b..f60e6ad3a1df 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -25,7 +25,7 @@ endif LIBBPF = $(LIBBPF_PATH)libbpf.a -BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion) +BPFTOOL_VERSION ?= $(shell make -rR --no-print-directory -sC ../../.. kernelversion) $(LIBBPF): FORCE $(if $(LIBBPF_OUTPUT),@mkdir -p $(LIBBPF_OUTPUT)) @@ -40,12 +40,13 @@ bash_compdir ?= /usr/share/bash-completion/completions CFLAGS += -O2 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers -CFLAGS += $(filter-out -Wswitch-enum,$(EXTRA_WARNINGS)) +CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS)) CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \ + -I$(if $(OUTPUT),$(OUTPUT),.) \ -I$(srctree)/kernel/bpf/ \ -I$(srctree)/tools/include \ -I$(srctree)/tools/include/uapi \ - -I$(srctree)/tools/lib/bpf \ + -I$(srctree)/tools/lib \ -I$(srctree)/tools/perf CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"' ifneq ($(EXTRA_CFLAGS),) @@ -55,14 +56,16 @@ ifneq ($(EXTRA_LDFLAGS),) LDFLAGS += $(EXTRA_LDFLAGS) endif -LIBS = $(LIBBPF) -lelf -lz - INSTALL ?= install RM ?= rm -f +CLANG ?= clang +LLVM_STRIP ?= llvm-strip FEATURE_USER = .bpftool -FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib -FEATURE_DISPLAY = libbfd disassembler-four-args zlib +FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib libcap \ + clang-bpf-co-re +FEATURE_DISPLAY = libbfd disassembler-four-args zlib libcap \ + clang-bpf-co-re check_feat := 1 NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall @@ -88,6 +91,12 @@ ifeq ($(feature-reallocarray), 0) CFLAGS += -DCOMPAT_NEED_REALLOCARRAY endif +LIBS = $(LIBBPF) -lelf -lz +ifeq ($(feature-libcap), 1) +CFLAGS += -DUSE_LIBCAP +LIBS += -lcap +endif + include $(wildcard $(OUTPUT)*.d) all: $(OUTPUT)bpftool @@ -109,22 +118,72 @@ CFLAGS += -DHAVE_LIBBFD_SUPPORT SRCS += $(BFD_SRCS) endif +BPFTOOL_BOOTSTRAP := $(if $(OUTPUT),$(OUTPUT)bpftool-bootstrap,./bpftool-bootstrap) + +BOOTSTRAP_OBJS = $(addprefix $(OUTPUT),main.o common.o json_writer.o gen.o btf.o) OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ + $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ + ../../../vmlinux \ + /sys/kernel/btf/vmlinux \ + /boot/vmlinux-$(shell uname -r) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) + +ifneq ($(VMLINUX_BTF)$(VMLINUX_H),) +ifeq ($(feature-clang-bpf-co-re),1) + +BUILD_BPF_SKELS := 1 + +$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP) +ifeq ($(VMLINUX_H),) + $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@ +else + $(Q)cp "$(VMLINUX_H)" $@ +endif + +$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF) + $(QUIET_CLANG)$(CLANG) \ + -I$(if $(OUTPUT),$(OUTPUT),.) \ + -I$(srctree)/tools/include/uapi/ \ + -I$(LIBBPF_PATH) \ + -I$(srctree)/tools/lib \ + -g -O2 -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@ + +$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP) + $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@ + +$(OUTPUT)prog.o: $(OUTPUT)profiler.skel.h + +$(OUTPUT)pids.o: $(OUTPUT)pid_iter.skel.h + +endif +endif + +CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS) + $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c - $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $< + $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< $(OUTPUT)feature.o: | zdep +$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) $(LIBS) + $(OUTPUT)bpftool: $(OBJS) $(LIBBPF) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS) $(OUTPUT)%.o: %.c - $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $< + $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< -clean: $(LIBBPF)-clean +feature-detect-clean: + $(call QUIET_CLEAN, feature-detect) + $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null + +clean: $(LIBBPF)-clean feature-detect-clean $(call QUIET_CLEAN, bpftool) $(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d + $(Q)$(RM) -- $(BPFTOOL_BOOTSTRAP) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h $(Q)$(RM) -r -- $(OUTPUT)libbpf/ $(call QUIET_CLEAN, core-gen) $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool @@ -159,6 +218,7 @@ FORCE: zdep: @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi +.SECONDARY: .PHONY: all FORCE clean install uninstall zdep .PHONY: doc doc-clean doc-install doc-uninstall .DEFAULT_GOAL := all diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 70493a6da206..3f1da30c4da6 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -59,6 +59,21 @@ _bpftool_get_map_ids_for_type() command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) ) } +_bpftool_get_map_names() +{ + COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ + command sed -n 's/.*"name": \(.*\),$/\1/p' )" -- "$cur" ) ) +} + +# Takes map type and adds matching map names to the list of suggestions. +_bpftool_get_map_names_for_type() +{ + local type="$1" + COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ + command grep -C2 "$type" | \ + command sed -n 's/.*"name": \(.*\),$/\1/p' )" -- "$cur" ) ) +} + _bpftool_get_prog_ids() { COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ @@ -71,12 +86,24 @@ _bpftool_get_prog_tags() command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) ) } +_bpftool_get_prog_names() +{ + COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ + command sed -n 's/.*"name": "\(.*\)",$/\1/p' )" -- "$cur" ) ) +} + _bpftool_get_btf_ids() { COMPREPLY+=( $( compgen -W "$( bpftool -jp btf 2>&1 | \ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) ) } +_bpftool_get_link_ids() +{ + COMPREPLY+=( $( compgen -W "$( bpftool -jp link 2>&1 | \ + command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) ) +} + _bpftool_get_obj_map_names() { local obj @@ -180,6 +207,52 @@ _bpftool_map_update_get_id() esac } +_bpftool_map_update_get_name() +{ + local command="$1" + + # Is it the map to update, or a map to insert into the map to update? + # Search for "value" keyword. + local idx value + for (( idx=7; idx < ${#words[@]}-1; idx++ )); do + if [[ ${words[idx]} == "value" ]]; then + value=1 + break + fi + done + if [[ $value -eq 0 ]]; then + case "$command" in + push) + _bpftool_get_map_names_for_type stack + ;; + enqueue) + _bpftool_get_map_names_for_type queue + ;; + *) + _bpftool_get_map_names + ;; + esac + return 0 + fi + + # Name to complete is for a value. It can be either prog name or map name. This + # depends on the type of the map to update. + local type=$(_bpftool_map_guess_map_type) + case $type in + array_of_maps|hash_of_maps) + _bpftool_get_map_names + return 0 + ;; + prog_array) + _bpftool_get_prog_names + return 0 + ;; + *) + return 0 + ;; + esac +} + _bpftool() { local cur prev words objword @@ -251,7 +324,8 @@ _bpftool() # Completion depends on object and command in use case $object in prog) - # Complete id, only for subcommands that use prog (but no map) ids + # Complete id and name, only for subcommands that use prog (but no + # map) ids/names. case $command in show|list|dump|pin) case $prev in @@ -259,12 +333,17 @@ _bpftool() _bpftool_get_prog_ids return 0 ;; + name) + _bpftool_get_prog_names + return 0 + ;; esac ;; esac - local PROG_TYPE='id pinned tag' - local MAP_TYPE='id pinned' + local PROG_TYPE='id pinned tag name' + local MAP_TYPE='id pinned name' + local METRIC_TYPE='cycles instructions l1d_loads llc_misses' case $command in show|list) [[ $prev != "$command" ]] && return 0 @@ -315,6 +394,9 @@ _bpftool() id) _bpftool_get_prog_ids ;; + name) + _bpftool_get_prog_names + ;; pinned) _filedir ;; @@ -335,6 +417,9 @@ _bpftool() id) _bpftool_get_map_ids ;; + name) + _bpftool_get_map_names + ;; pinned) _filedir ;; @@ -387,11 +472,14 @@ _bpftool() lwt_seg6local sockops sk_skb sk_msg \ lirc_mode2 cgroup/bind4 cgroup/bind6 \ cgroup/connect4 cgroup/connect6 \ + cgroup/getpeername4 cgroup/getpeername6 \ + cgroup/getsockname4 cgroup/getsockname6 \ cgroup/sendmsg4 cgroup/sendmsg6 \ cgroup/recvmsg4 cgroup/recvmsg6 \ cgroup/post_bind4 cgroup/post_bind6 \ cgroup/sysctl cgroup/getsockopt \ - cgroup/setsockopt" -- \ + cgroup/setsockopt struct_ops \ + fentry fexit freplace sk_lookup" -- \ "$cur" ) ) return 0 ;; @@ -399,6 +487,10 @@ _bpftool() _bpftool_get_map_ids return 0 ;; + name) + _bpftool_get_map_names + return 0 + ;; pinned|pinmaps) _filedir return 0 @@ -415,9 +507,51 @@ _bpftool() tracelog) return 0 ;; + profile) + case $cword in + 3) + COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) ) + return 0 + ;; + 4) + case $prev in + id) + _bpftool_get_prog_ids + ;; + name) + _bpftool_get_prog_names + ;; + pinned) + _filedir + ;; + esac + return 0 + ;; + 5) + COMPREPLY=( $( compgen -W "$METRIC_TYPE duration" -- "$cur" ) ) + return 0 + ;; + 6) + case $prev in + duration) + return 0 + ;; + *) + COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) ) + return 0 + ;; + esac + return 0 + ;; + *) + COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) ) + return 0 + ;; + esac + ;; run) - if [[ ${#words[@]} -lt 5 ]]; then - _filedir + if [[ ${#words[@]} -eq 4 ]]; then + COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) ) return 0 fi case $prev in @@ -425,6 +559,10 @@ _bpftool() _bpftool_get_prog_ids return 0 ;; + name) + _bpftool_get_prog_names + return 0 + ;; data_in|data_out|ctx_in|ctx_out) _filedir return 0 @@ -442,12 +580,69 @@ _bpftool() *) [[ $prev == $object ]] && \ COMPREPLY=( $( compgen -W 'dump help pin attach detach \ - load loadall show list tracelog run' -- "$cur" ) ) + load loadall show list tracelog run profile' -- "$cur" ) ) + ;; + esac + ;; + struct_ops) + local STRUCT_OPS_TYPE='id name' + case $command in + show|list|dump|unregister) + case $prev in + $command) + COMPREPLY=( $( compgen -W "$STRUCT_OPS_TYPE" -- "$cur" ) ) + ;; + id) + _bpftool_get_map_ids_for_type struct_ops + ;; + name) + _bpftool_get_map_names_for_type struct_ops + ;; + esac + return 0 + ;; + register) + _filedir + return 0 + ;; + *) + [[ $prev == $object ]] && \ + COMPREPLY=( $( compgen -W 'register unregister show list dump help' \ + -- "$cur" ) ) + ;; + esac + ;; + iter) + case $command in + pin) + case $prev in + $command) + _filedir + ;; + id) + _bpftool_get_map_ids + ;; + name) + _bpftool_get_map_names + ;; + pinned) + _filedir + ;; + *) + _bpftool_one_of_list $MAP_TYPE + ;; + esac + return 0 + ;; + *) + [[ $prev == $object ]] && \ + COMPREPLY=( $( compgen -W 'pin help' \ + -- "$cur" ) ) ;; esac ;; map) - local MAP_TYPE='id pinned' + local MAP_TYPE='id pinned name' case $command in show|list|dump|peek|pop|dequeue|freeze) case $prev in @@ -473,6 +668,24 @@ _bpftool() esac return 0 ;; + name) + case "$command" in + peek) + _bpftool_get_map_names_for_type stack + _bpftool_get_map_names_for_type queue + ;; + pop) + _bpftool_get_map_names_for_type stack + ;; + dequeue) + _bpftool_get_map_names_for_type queue + ;; + *) + _bpftool_get_map_names + ;; + esac + return 0 + ;; *) return 0 ;; @@ -491,13 +704,31 @@ _bpftool() lru_percpu_hash lpm_trie array_of_maps \ hash_of_maps devmap devmap_hash sockmap cpumap \ xskmap sockhash cgroup_storage reuseport_sockarray \ - percpu_cgroup_storage queue stack' -- \ + percpu_cgroup_storage queue stack sk_storage \ + struct_ops inode_storage' -- \ "$cur" ) ) return 0 ;; - key|value|flags|name|entries) + key|value|flags|entries) return 0 ;; + inner_map) + COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) ) + return 0 + ;; + id) + _bpftool_get_map_ids + ;; + name) + case $pprev in + inner_map) + _bpftool_get_map_names + ;; + *) + return 0 + ;; + esac + ;; *) _bpftool_once_attr 'type' _bpftool_once_attr 'key' @@ -505,6 +736,9 @@ _bpftool() _bpftool_once_attr 'entries' _bpftool_once_attr 'name' _bpftool_once_attr 'flags' + if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then + _bpftool_once_attr 'inner_map' + fi _bpftool_once_attr 'dev' return 0 ;; @@ -520,6 +754,10 @@ _bpftool() _bpftool_get_map_ids return 0 ;; + name) + _bpftool_get_map_names + return 0 + ;; key) COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) ) ;; @@ -545,6 +783,10 @@ _bpftool() _bpftool_map_update_get_id $command return 0 ;; + name) + _bpftool_map_update_get_name $command + return 0 + ;; key) COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) ) ;; @@ -553,13 +795,13 @@ _bpftool() # map, depending on the type of the map to update. case "$(_bpftool_map_guess_map_type)" in array_of_maps|hash_of_maps) - local MAP_TYPE='id pinned' + local MAP_TYPE='id pinned name' COMPREPLY+=( $( compgen -W "$MAP_TYPE" \ -- "$cur" ) ) return 0 ;; prog_array) - local PROG_TYPE='id pinned tag' + local PROG_TYPE='id pinned tag name' COMPREPLY+=( $( compgen -W "$PROG_TYPE" \ -- "$cur" ) ) return 0 @@ -604,11 +846,17 @@ _bpftool() esac ;; pin) - if [[ $prev == "$command" ]]; then - COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) ) - else - _filedir - fi + case $prev in + $command) + COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) ) + ;; + id) + _bpftool_get_map_ids + ;; + name) + _bpftool_get_map_names + ;; + esac return 0 ;; event_pipe) @@ -621,6 +869,10 @@ _bpftool() _bpftool_get_map_ids_for_type perf_event_array return 0 ;; + name) + _bpftool_get_map_names_for_type perf_event_array + return 0 + ;; cpu) return 0 ;; @@ -644,8 +896,8 @@ _bpftool() esac ;; btf) - local PROG_TYPE='id pinned tag' - local MAP_TYPE='id pinned' + local PROG_TYPE='id pinned tag name' + local MAP_TYPE='id pinned name' case $command in dump) case $prev in @@ -676,6 +928,17 @@ _bpftool() esac return 0 ;; + name) + case $pprev in + prog) + _bpftool_get_prog_names + ;; + map) + _bpftool_get_map_names + ;; + esac + return 0 + ;; format) COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) ) ;; @@ -716,6 +979,17 @@ _bpftool() ;; esac ;; + gen) + case $command in + skeleton) + _filedir + ;; + *) + [[ $prev == $object ]] && \ + COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) ) + ;; + esac + ;; cgroup) case $command in show|list|tree) @@ -731,20 +1005,21 @@ _bpftool() ;; attach|detach) local ATTACH_TYPES='ingress egress sock_create sock_ops \ - device bind4 bind6 post_bind4 post_bind6 connect4 \ - connect6 sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl \ - getsockopt setsockopt' + device bind4 bind6 post_bind4 post_bind6 connect4 connect6 \ + getpeername4 getpeername6 getsockname4 getsockname6 \ + sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl getsockopt \ + setsockopt' local ATTACH_FLAGS='multi override' - local PROG_TYPE='id pinned tag' + local PROG_TYPE='id pinned tag name' case $prev in $command) _filedir return 0 ;; ingress|egress|sock_create|sock_ops|device|bind4|bind6|\ - post_bind4|post_bind6|connect4|connect6|sendmsg4|\ - sendmsg6|recvmsg4|recvmsg6|sysctl|getsockopt|\ - setsockopt) + post_bind4|post_bind6|connect4|connect6|getpeername4|\ + getpeername6|getsockname4|getsockname6|sendmsg4|sendmsg6|\ + recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt) COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \ "$cur" ) ) return 0 @@ -760,7 +1035,7 @@ _bpftool() elif [[ "$command" == "attach" ]]; then # We have an attach type on the command line, # but it is not the previous word, or - # "id|pinned|tag" (we already checked for + # "id|pinned|tag|name" (we already checked for # that). This should only leave the case when # we need attach flags for "attach" commamnd. _bpftool_one_of_list "$ATTACH_FLAGS" @@ -786,7 +1061,7 @@ _bpftool() esac ;; net) - local PROG_TYPE='id pinned tag' + local PROG_TYPE='id pinned tag name' local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload' case $command in show|list) @@ -809,6 +1084,9 @@ _bpftool() id) _bpftool_get_prog_ids ;; + name) + _bpftool_get_prog_names + ;; pinned) _filedir ;; @@ -849,11 +1127,12 @@ _bpftool() probe) [[ $prev == "prefix" ]] && return 0 if _bpftool_search_list 'macros'; then - COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) ) + _bpftool_once_attr 'prefix' else COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) ) fi _bpftool_one_of_list 'kernel dev' + _bpftool_once_attr 'full unprivileged' return 0 ;; *) @@ -862,6 +1141,39 @@ _bpftool() ;; esac ;; + link) + case $command in + show|list|pin|detach) + case $prev in + id) + _bpftool_get_link_ids + return 0 + ;; + esac + ;; + esac + + local LINK_TYPE='id pinned' + case $command in + show|list) + [[ $prev != "$command" ]] && return 0 + COMPREPLY=( $( compgen -W "$LINK_TYPE" -- "$cur" ) ) + return 0 + ;; + pin|detach) + if [[ $prev == "$command" ]]; then + COMPREPLY=( $( compgen -W "$LINK_TYPE" -- "$cur" ) ) + else + _filedir + fi + return 0 + ;; + *) + [[ $prev == $object ]] && \ + COMPREPLY=( $( compgen -W 'help pin show list' -- "$cur" ) ) + ;; + esac + ;; esac } && complete -F _bpftool bpftool diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 9a9376d1d3df..2afb7d5b1aca 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -8,12 +8,14 @@ #include <stdio.h> #include <string.h> #include <unistd.h> -#include <bpf.h> -#include <libbpf.h> +#include <bpf/bpf.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> #include <linux/btf.h> #include <linux/hashtable.h> +#include <sys/types.h> +#include <sys/stat.h> -#include "btf.h" #include "json_writer.h" #include "main.h" @@ -74,6 +76,20 @@ static const char *btf_var_linkage_str(__u32 linkage) } } +static const char *btf_func_linkage_str(const struct btf_type *t) +{ + switch (btf_vlen(t)) { + case BTF_FUNC_STATIC: + return "static"; + case BTF_FUNC_GLOBAL: + return "global"; + case BTF_FUNC_EXTERN: + return "extern"; + default: + return "(unknown)"; + } +} + static const char *btf_str(const struct btf *btf, __u32 off) { if (!off) @@ -228,12 +244,17 @@ static int dump_btf_type(const struct btf *btf, __u32 id, printf(" fwd_kind=%s", fwd_kind); break; } - case BTF_KIND_FUNC: - if (json_output) + case BTF_KIND_FUNC: { + const char *linkage = btf_func_linkage_str(t); + + if (json_output) { jsonw_uint_field(w, "type_id", t->type); - else - printf(" type_id=%u", t->type); + jsonw_string_field(w, "linkage", linkage); + } else { + printf(" type_id=%u linkage=%s", t->type, linkage); + } break; + } case BTF_KIND_FUNC_PROTO: { const struct btf_param *p = (const void *)(t + 1); __u16 vlen = BTF_INFO_VLEN(t->info); @@ -367,6 +388,13 @@ static int dump_btf_c(const struct btf *btf, if (IS_ERR(d)) return PTR_ERR(d); + printf("#ifndef __VMLINUX_H__\n"); + printf("#define __VMLINUX_H__\n"); + printf("\n"); + printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n"); + printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n"); + printf("#endif\n\n"); + if (root_type_cnt) { for (i = 0; i < root_type_cnt; i++) { err = btf_dump__dump_type(d, root_type_ids[i]); @@ -383,6 +411,12 @@ static int dump_btf_c(const struct btf *btf, } } + printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n"); + printf("#pragma clang attribute pop\n"); + printf("#endif\n"); + printf("\n"); + printf("#endif /* __VMLINUX_H__ */\n"); + done: btf_dump__free(d); return err; @@ -465,11 +499,11 @@ static int do_dump(int argc, char **argv) } NEXT_ARG(); } else if (is_prefix(src, "file")) { - btf = btf__parse_elf(*argv, NULL); + btf = btf__parse(*argv, NULL); if (IS_ERR(btf)) { - err = PTR_ERR(btf); + err = -PTR_ERR(btf); btf = NULL; - p_err("failed to load BTF from %s: %s", + p_err("failed to load BTF from %s: %s", *argv, strerror(err)); goto done; } @@ -510,7 +544,7 @@ static int do_dump(int argc, char **argv) goto done; } if (!btf) { - err = ENOENT; + err = -ENOENT; p_err("can't find btf with ID (%u)", btf_id); goto done; } @@ -659,6 +693,7 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type, obj_node = calloc(1, sizeof(*obj_node)); if (!obj_node) { p_err("failed to allocate memory: %s", strerror(errno)); + err = -ENOMEM; goto err_free; } @@ -723,6 +758,7 @@ show_btf_plain(struct bpf_btf_info *info, int fd, printf("%s%u", n++ == 0 ? " map_ids " : ",", obj->obj_id); } + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); printf("\n"); } @@ -755,6 +791,9 @@ show_btf_json(struct bpf_btf_info *info, int fd, jsonw_uint(json_wtr, obj->obj_id); } jsonw_end_array(json_wtr); /* map_ids */ + + emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */ + jsonw_end_object(json_wtr); /* btf object */ } @@ -807,6 +846,7 @@ static int do_show(int argc, char **argv) close(fd); return err; } + build_obj_refs_table(&refs_table, BPF_OBJ_BTF); if (fd >= 0) { err = show_btf(fd, &btf_prog_table, &btf_map_table); @@ -853,6 +893,7 @@ static int do_show(int argc, char **argv) exit_free: delete_btf_table(&btf_prog_table); delete_btf_table(&btf_map_table); + delete_obj_refs_table(&refs_table); return err; } @@ -865,9 +906,9 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s btf { show | list } [id BTF_ID]\n" - " %s btf dump BTF_SRC [format FORMAT]\n" - " %s btf help\n" + "Usage: %1$s %2$s { show | list } [id BTF_ID]\n" + " %1$s %2$s dump BTF_SRC [format FORMAT]\n" + " %1$s %2$s help\n" "\n" " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n" " FORMAT := { raw | c }\n" @@ -875,7 +916,7 @@ static int do_help(int argc, char **argv) " " HELP_SPEC_PROGRAM "\n" " " HELP_SPEC_OPTIONS "\n" "", - bin_name, bin_name, bin_name); + bin_name, "btf"); return 0; } diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 397e5716ab6d..0e9310727281 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -4,12 +4,14 @@ #include <ctype.h> #include <stdio.h> /* for (FILE *) used by json_writer */ #include <string.h> +#include <unistd.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/btf.h> #include <linux/err.h> +#include <bpf/btf.h> +#include <bpf/bpf.h> -#include "btf.h" #include "json_writer.h" #include "main.h" @@ -22,13 +24,102 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, __u8 bit_offset, const void *data); -static void btf_dumper_ptr(const void *data, json_writer_t *jw, - bool is_plain_text) +static int btf_dump_func(const struct btf *btf, char *func_sig, + const struct btf_type *func_proto, + const struct btf_type *func, int pos, int size); + +static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, + const struct btf_type *func_proto, + __u32 prog_id) { - if (is_plain_text) - jsonw_printf(jw, "%p", *(void **)data); + struct bpf_prog_info_linear *prog_info = NULL; + const struct btf_type *func_type; + const char *prog_name = NULL; + struct bpf_func_info *finfo; + struct btf *prog_btf = NULL; + struct bpf_prog_info *info; + int prog_fd, func_sig_len; + char prog_str[1024]; + + /* Get the ptr's func_proto */ + func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0, + sizeof(prog_str)); + if (func_sig_len == -1) + return -1; + + if (!prog_id) + goto print; + + /* Get the bpf_prog's name. Obtain from func_info. */ + prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (prog_fd == -1) + goto print; + + prog_info = bpf_program__get_prog_info_linear(prog_fd, + 1UL << BPF_PROG_INFO_FUNC_INFO); + close(prog_fd); + if (IS_ERR(prog_info)) { + prog_info = NULL; + goto print; + } + info = &prog_info->info; + + if (!info->btf_id || !info->nr_func_info || + btf__get_from_id(info->btf_id, &prog_btf)) + goto print; + finfo = u64_to_ptr(info->func_info); + func_type = btf__type_by_id(prog_btf, finfo->type_id); + if (!func_type || !btf_is_func(func_type)) + goto print; + + prog_name = btf__name_by_offset(prog_btf, func_type->name_off); + +print: + if (!prog_id) + snprintf(&prog_str[func_sig_len], + sizeof(prog_str) - func_sig_len, " 0"); + else if (prog_name) + snprintf(&prog_str[func_sig_len], + sizeof(prog_str) - func_sig_len, + " %s/prog_id:%u", prog_name, prog_id); else - jsonw_printf(jw, "%lu", *(unsigned long *)data); + snprintf(&prog_str[func_sig_len], + sizeof(prog_str) - func_sig_len, + " <unknown_prog_name>/prog_id:%u", prog_id); + + prog_str[sizeof(prog_str) - 1] = '\0'; + jsonw_string(d->jw, prog_str); + btf__free(prog_btf); + free(prog_info); + return 0; +} + +static void btf_dumper_ptr(const struct btf_dumper *d, + const struct btf_type *t, + const void *data) +{ + unsigned long value = *(unsigned long *)data; + const struct btf_type *ptr_type; + __s32 ptr_type_id; + + if (!d->prog_id_as_func_ptr || value > UINT32_MAX) + goto print_ptr_value; + + ptr_type_id = btf__resolve_type(d->btf, t->type); + if (ptr_type_id < 0) + goto print_ptr_value; + ptr_type = btf__type_by_id(d->btf, ptr_type_id); + if (!ptr_type || !btf_is_func_proto(ptr_type)) + goto print_ptr_value; + + if (!dump_prog_id_as_func_ptr(d, ptr_type, value)) + return; + +print_ptr_value: + if (d->is_plain_text) + jsonw_printf(d->jw, "%p", (void *)value); + else + jsonw_printf(d->jw, "%lu", value); } static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, @@ -43,9 +134,78 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, return btf_dumper_do_type(d, actual_type_id, bit_offset, data); } -static void btf_dumper_enum(const void *data, json_writer_t *jw) +static int btf_dumper_enum(const struct btf_dumper *d, + const struct btf_type *t, + const void *data) { - jsonw_printf(jw, "%d", *(int *)data); + const struct btf_enum *enums = btf_enum(t); + __s64 value; + __u16 i; + + switch (t->size) { + case 8: + value = *(__s64 *)data; + break; + case 4: + value = *(__s32 *)data; + break; + case 2: + value = *(__s16 *)data; + break; + case 1: + value = *(__s8 *)data; + break; + default: + return -EINVAL; + } + + for (i = 0; i < btf_vlen(t); i++) { + if (value == enums[i].val) { + jsonw_string(d->jw, + btf__name_by_offset(d->btf, + enums[i].name_off)); + return 0; + } + } + + jsonw_int(d->jw, value); + return 0; +} + +static bool is_str_array(const struct btf *btf, const struct btf_array *arr, + const char *s) +{ + const struct btf_type *elem_type; + const char *end_s; + + if (!arr->nelems) + return false; + + elem_type = btf__type_by_id(btf, arr->type); + /* Not skipping typedef. typedef to char does not count as + * a string now. + */ + while (elem_type && btf_is_mod(elem_type)) + elem_type = btf__type_by_id(btf, elem_type->type); + + if (!elem_type || !btf_is_int(elem_type) || elem_type->size != 1) + return false; + + if (btf_int_encoding(elem_type) != BTF_INT_CHAR && + strcmp("char", btf__name_by_offset(btf, elem_type->name_off))) + return false; + + end_s = s + arr->nelems; + while (s < end_s) { + if (!*s) + return true; + if (*s <= 0x1f || *s >= 0x7f) + return false; + s++; + } + + /* '\0' is not found */ + return false; } static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id, @@ -57,6 +217,11 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id, int ret = 0; __u32 i; + if (is_str_array(d->btf, arr, data)) { + jsonw_string(d->jw, data); + return 0; + } + elem_size = btf__resolve_size(d->btf, arr->type); if (elem_size < 0) return elem_size; @@ -106,8 +271,8 @@ static void btf_int128_print(json_writer_t *jw, const void *data, } } -static void btf_int128_shift(__u64 *print_num, u16 left_shift_bits, - u16 right_shift_bits) +static void btf_int128_shift(__u64 *print_num, __u16 left_shift_bits, + __u16 right_shift_bits) { __u64 upper_num, lower_num; @@ -366,10 +531,9 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, case BTF_KIND_ARRAY: return btf_dumper_array(d, type_id, data); case BTF_KIND_ENUM: - btf_dumper_enum(data, d->jw); - return 0; + return btf_dumper_enum(d, t, data); case BTF_KIND_PTR: - btf_dumper_ptr(data, d->jw, d->is_plain_text); + btf_dumper_ptr(d, t, data); return 0; case BTF_KIND_UNKN: jsonw_printf(d->jw, "(unknown)"); @@ -414,10 +578,6 @@ int btf_dumper_type(const struct btf_dumper *d, __u32 type_id, return -1; \ } while (0) -static int btf_dump_func(const struct btf *btf, char *func_sig, - const struct btf_type *func_proto, - const struct btf_type *func, int pos, int size); - static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id, char *func_sig, int pos, int size) { @@ -526,8 +686,15 @@ static int btf_dump_func(const struct btf *btf, char *func_sig, BTF_PRINT_ARG(", "); if (arg->type) { BTF_PRINT_TYPE(arg->type); - BTF_PRINT_ARG("%s", - btf__name_by_offset(btf, arg->name_off)); + if (arg->name_off) + BTF_PRINT_ARG("%s", + btf__name_by_offset(btf, arg->name_off)); + else if (pos && func_sig[pos - 1] == ' ') + /* Remove unnecessary space for + * FUNC_PROTO that does not have + * arg->name_off + */ + func_sig[--pos] = '\0'; } else { BTF_PRINT_ARG("..."); } diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c index 3e21f994f262..1951219a9af7 100644 --- a/tools/bpf/bpftool/cfg.c +++ b/tools/bpf/bpftool/cfg.c @@ -157,7 +157,7 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur, return false; } -static bool is_jmp_insn(u8 code) +static bool is_jmp_insn(__u8 code) { return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32; } @@ -176,7 +176,7 @@ static bool func_partition_bb_head(struct func_node *func) for (; cur <= end; cur++) { if (is_jmp_insn(cur->code)) { - u8 opcode = BPF_OP(cur->code); + __u8 opcode = BPF_OP(cur->code); if (opcode == BPF_EXIT || opcode == BPF_CALL) continue; diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 2f017caa678d..d901cc1b904a 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -14,7 +14,7 @@ #include <sys/types.h> #include <unistd.h> -#include <bpf.h> +#include <bpf/bpf.h> #include "main.h" @@ -25,48 +25,27 @@ " ATTACH_TYPE := { ingress | egress | sock_create |\n" \ " sock_ops | device | bind4 | bind6 |\n" \ " post_bind4 | post_bind6 | connect4 |\n" \ - " connect6 | sendmsg4 | sendmsg6 |\n" \ - " recvmsg4 | recvmsg6 | sysctl |\n" \ - " getsockopt | setsockopt }" + " connect6 | getpeername4 | getpeername6 |\n" \ + " getsockname4 | getsockname6 | sendmsg4 |\n" \ + " sendmsg6 | recvmsg4 | recvmsg6 |\n" \ + " sysctl | getsockopt | setsockopt }" static unsigned int query_flags; -static const char * const attach_type_strings[] = { - [BPF_CGROUP_INET_INGRESS] = "ingress", - [BPF_CGROUP_INET_EGRESS] = "egress", - [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create", - [BPF_CGROUP_SOCK_OPS] = "sock_ops", - [BPF_CGROUP_DEVICE] = "device", - [BPF_CGROUP_INET4_BIND] = "bind4", - [BPF_CGROUP_INET6_BIND] = "bind6", - [BPF_CGROUP_INET4_CONNECT] = "connect4", - [BPF_CGROUP_INET6_CONNECT] = "connect6", - [BPF_CGROUP_INET4_POST_BIND] = "post_bind4", - [BPF_CGROUP_INET6_POST_BIND] = "post_bind6", - [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4", - [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6", - [BPF_CGROUP_SYSCTL] = "sysctl", - [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4", - [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6", - [BPF_CGROUP_GETSOCKOPT] = "getsockopt", - [BPF_CGROUP_SETSOCKOPT] = "setsockopt", - [__MAX_BPF_ATTACH_TYPE] = NULL, -}; - static enum bpf_attach_type parse_attach_type(const char *str) { enum bpf_attach_type type; for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { - if (attach_type_strings[type] && - is_prefix(str, attach_type_strings[type])) + if (attach_type_name[type] && + is_prefix(str, attach_type_name[type])) return type; } return __MAX_BPF_ATTACH_TYPE; } -static int show_bpf_prog(int id, const char *attach_type_str, +static int show_bpf_prog(int id, enum bpf_attach_type attach_type, const char *attach_flags_str, int level) { @@ -86,18 +65,22 @@ static int show_bpf_prog(int id, const char *attach_type_str, if (json_output) { jsonw_start_object(json_wtr); jsonw_uint_field(json_wtr, "id", info.id); - jsonw_string_field(json_wtr, "attach_type", - attach_type_str); + if (attach_type < ARRAY_SIZE(attach_type_name)) + jsonw_string_field(json_wtr, "attach_type", + attach_type_name[attach_type]); + else + jsonw_uint_field(json_wtr, "attach_type", attach_type); jsonw_string_field(json_wtr, "attach_flags", attach_flags_str); jsonw_string_field(json_wtr, "name", info.name); jsonw_end_object(json_wtr); } else { - printf("%s%-8u %-15s %-15s %-15s\n", level ? " " : "", - info.id, - attach_type_str, - attach_flags_str, - info.name); + printf("%s%-8u ", level ? " " : "", info.id); + if (attach_type < ARRAY_SIZE(attach_type_name)) + printf("%-15s", attach_type_name[attach_type]); + else + printf("type %-10u", attach_type); + printf(" %-15s %-15s\n", attach_flags_str, info.name); } close(prog_fd); @@ -171,7 +154,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, } for (iter = 0; iter < prog_cnt; iter++) - show_bpf_prog(prog_ids[iter], attach_type_strings[type], + show_bpf_prog(prog_ids[iter], type, attach_flags_str, level); return 0; @@ -508,20 +491,18 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s { show | list } CGROUP [**effective**]\n" - " %s %s tree [CGROUP_ROOT] [**effective**]\n" - " %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n" - " %s %s detach CGROUP ATTACH_TYPE PROG\n" - " %s %s help\n" + "Usage: %1$s %2$s { show | list } CGROUP [**effective**]\n" + " %1$s %2$s tree [CGROUP_ROOT] [**effective**]\n" + " %1$s %2$s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n" + " %1$s %2$s detach CGROUP ATTACH_TYPE PROG\n" + " %1$s %2$s help\n" "\n" HELP_SPEC_ATTACH_TYPES "\n" " " HELP_SPEC_ATTACH_FLAGS "\n" " " HELP_SPEC_PROGRAM "\n" " " HELP_SPEC_OPTIONS "\n" "", - bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2]); + bin_name, argv[-2]); return 0; } diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 88264abaa738..65303664417e 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ +#define _GNU_SOURCE #include <ctype.h> #include <errno.h> #include <fcntl.h> -#include <fts.h> +#include <ftw.h> #include <libgen.h> #include <mntent.h> #include <stdbool.h> @@ -20,8 +21,8 @@ #include <sys/stat.h> #include <sys/vfs.h> -#include <bpf.h> -#include <libbpf.h> /* libbpf_num_possible_cpus */ +#include <bpf/bpf.h> +#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */ #include "main.h" @@ -29,6 +30,44 @@ #define BPF_FS_MAGIC 0xcafe4a11 #endif +const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { + [BPF_CGROUP_INET_INGRESS] = "ingress", + [BPF_CGROUP_INET_EGRESS] = "egress", + [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create", + [BPF_CGROUP_INET_SOCK_RELEASE] = "sock_release", + [BPF_CGROUP_SOCK_OPS] = "sock_ops", + [BPF_CGROUP_DEVICE] = "device", + [BPF_CGROUP_INET4_BIND] = "bind4", + [BPF_CGROUP_INET6_BIND] = "bind6", + [BPF_CGROUP_INET4_CONNECT] = "connect4", + [BPF_CGROUP_INET6_CONNECT] = "connect6", + [BPF_CGROUP_INET4_POST_BIND] = "post_bind4", + [BPF_CGROUP_INET6_POST_BIND] = "post_bind6", + [BPF_CGROUP_INET4_GETPEERNAME] = "getpeername4", + [BPF_CGROUP_INET6_GETPEERNAME] = "getpeername6", + [BPF_CGROUP_INET4_GETSOCKNAME] = "getsockname4", + [BPF_CGROUP_INET6_GETSOCKNAME] = "getsockname6", + [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4", + [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6", + [BPF_CGROUP_SYSCTL] = "sysctl", + [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4", + [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6", + [BPF_CGROUP_GETSOCKOPT] = "getsockopt", + [BPF_CGROUP_SETSOCKOPT] = "setsockopt", + + [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", + [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", + [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", + [BPF_LIRC_MODE2] = "lirc_mode2", + [BPF_FLOW_DISSECTOR] = "flow_dissector", + [BPF_TRACE_RAW_TP] = "raw_tp", + [BPF_TRACE_FENTRY] = "fentry", + [BPF_TRACE_FEXIT] = "fexit", + [BPF_MODIFY_RETURN] = "mod_ret", + [BPF_LSM_MAC] = "lsm_mac", + [BPF_SK_LOOKUP] = "sk_lookup", +}; + void p_err(const char *fmt, ...) { va_list ap; @@ -123,24 +162,35 @@ int mount_tracefs(const char *target) return err; } -int open_obj_pinned(char *path, bool quiet) +int open_obj_pinned(const char *path, bool quiet) { - int fd; + char *pname; + int fd = -1; - fd = bpf_obj_get(path); - if (fd < 0) { + pname = strdup(path); + if (!pname) { if (!quiet) - p_err("bpf obj get (%s): %s", path, - errno == EACCES && !is_bpffs(dirname(path)) ? - "directory not in bpf file system (bpffs)" : - strerror(errno)); - return -1; + p_err("mem alloc failed"); + goto out_ret; } + fd = bpf_obj_get(pname); + if (fd < 0) { + if (!quiet) + p_err("bpf obj get (%s): %s", pname, + errno == EACCES && !is_bpffs(dirname(pname)) ? + "directory not in bpf file system (bpffs)" : + strerror(errno)); + goto out_free; + } + +out_free: + free(pname); +out_ret: return fd; } -int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type) +int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type) { enum bpf_obj_type type; int fd; @@ -211,39 +261,14 @@ int do_pin_fd(int fd, const char *name) return err; } -int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) +int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***)) { - unsigned int id; - char *endptr; int err; int fd; - if (argc < 3) { - p_err("too few arguments, id ID and FILE path is required"); - return -1; - } else if (argc > 3) { - p_err("too many arguments"); - return -1; - } - - if (!is_prefix(*argv, "id")) { - p_err("expected 'id' got %s", *argv); - return -1; - } - NEXT_ARG(); - - id = strtoul(*argv, &endptr, 0); - if (*endptr) { - p_err("can't parse %s as ID", *argv); - return -1; - } - NEXT_ARG(); - - fd = get_fd_by_id(id); - if (fd < 0) { - p_err("can't open object by id (%u): %s", id, strerror(errno)); - return -1; - } + fd = get_fd(&argc, &argv); + if (fd < 0) + return fd; err = do_pin_fd(fd, *argv); @@ -287,6 +312,8 @@ int get_fd_type(int fd) return BPF_OBJ_MAP; else if (strstr(buf, "bpf-prog")) return BPF_OBJ_PROG; + else if (strstr(buf, "bpf-link")) + return BPF_OBJ_LINK; return BPF_OBJ_UNKNOWN; } @@ -353,71 +380,82 @@ void print_hex_data_json(uint8_t *data, size_t len) jsonw_end_array(json_wtr); } +/* extra params for nftw cb */ +static struct pinned_obj_table *build_fn_table; +static enum bpf_obj_type build_fn_type; + +static int do_build_table_cb(const char *fpath, const struct stat *sb, + int typeflag, struct FTW *ftwbuf) +{ + struct bpf_prog_info pinned_info; + __u32 len = sizeof(pinned_info); + struct pinned_obj *obj_node; + enum bpf_obj_type objtype; + int fd, err = 0; + + if (typeflag != FTW_F) + goto out_ret; + + fd = open_obj_pinned(fpath, true); + if (fd < 0) + goto out_ret; + + objtype = get_fd_type(fd); + if (objtype != build_fn_type) + goto out_close; + + memset(&pinned_info, 0, sizeof(pinned_info)); + if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len)) + goto out_close; + + obj_node = calloc(1, sizeof(*obj_node)); + if (!obj_node) { + err = -1; + goto out_close; + } + + obj_node->id = pinned_info.id; + obj_node->path = strdup(fpath); + if (!obj_node->path) { + err = -1; + free(obj_node); + goto out_close; + } + + hash_add(build_fn_table->table, &obj_node->hash, obj_node->id); +out_close: + close(fd); +out_ret: + return err; +} + int build_pinned_obj_table(struct pinned_obj_table *tab, enum bpf_obj_type type) { - struct bpf_prog_info pinned_info = {}; - struct pinned_obj *obj_node = NULL; - __u32 len = sizeof(pinned_info); struct mntent *mntent = NULL; - enum bpf_obj_type objtype; FILE *mntfile = NULL; - FTSENT *ftse = NULL; - FTS *fts = NULL; - int fd, err; + int flags = FTW_PHYS; + int nopenfd = 16; + int err = 0; mntfile = setmntent("/proc/mounts", "r"); if (!mntfile) return -1; + build_fn_table = tab; + build_fn_type = type; + while ((mntent = getmntent(mntfile))) { - char *path[] = { mntent->mnt_dir, NULL }; + char *path = mntent->mnt_dir; if (strncmp(mntent->mnt_type, "bpf", 3) != 0) continue; - - fts = fts_open(path, 0, NULL); - if (!fts) - continue; - - while ((ftse = fts_read(fts))) { - if (!(ftse->fts_info & FTS_F)) - continue; - fd = open_obj_pinned(ftse->fts_path, true); - if (fd < 0) - continue; - - objtype = get_fd_type(fd); - if (objtype != type) { - close(fd); - continue; - } - memset(&pinned_info, 0, sizeof(pinned_info)); - err = bpf_obj_get_info_by_fd(fd, &pinned_info, &len); - if (err) { - close(fd); - continue; - } - - obj_node = malloc(sizeof(*obj_node)); - if (!obj_node) { - close(fd); - fts_close(fts); - fclose(mntfile); - return -1; - } - - memset(obj_node, 0, sizeof(*obj_node)); - obj_node->id = pinned_info.id; - obj_node->path = strdup(ftse->fts_path); - hash_add(tab->table, &obj_node->hash, obj_node->id); - - close(fd); - } - fts_close(fts); + err = nftw(path, do_build_table_cb, nopenfd, flags); + if (err) + break; } fclose(mntfile); - return 0; + return err; } void delete_pinned_obj_table(struct pinned_obj_table *tab) @@ -597,3 +635,318 @@ int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what) return 0; } + +int __printf(2, 0) +print_all_levels(__maybe_unused enum libbpf_print_level level, + const char *format, va_list args) +{ + return vfprintf(stderr, format, args); +} + +static int prog_fd_by_nametag(void *nametag, int **fds, bool tag) +{ + unsigned int id = 0; + int fd, nb_fds = 0; + void *tmp; + int err; + + while (true) { + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + + err = bpf_prog_get_next_id(id, &id); + if (err) { + if (errno != ENOENT) { + p_err("%s", strerror(errno)); + goto err_close_fds; + } + return nb_fds; + } + + fd = bpf_prog_get_fd_by_id(id); + if (fd < 0) { + p_err("can't get prog by id (%u): %s", + id, strerror(errno)); + goto err_close_fds; + } + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + p_err("can't get prog info (%u): %s", + id, strerror(errno)); + goto err_close_fd; + } + + if ((tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) || + (!tag && strncmp(nametag, info.name, BPF_OBJ_NAME_LEN))) { + close(fd); + continue; + } + + if (nb_fds > 0) { + tmp = realloc(*fds, (nb_fds + 1) * sizeof(int)); + if (!tmp) { + p_err("failed to realloc"); + goto err_close_fd; + } + *fds = tmp; + } + (*fds)[nb_fds++] = fd; + } + +err_close_fd: + close(fd); +err_close_fds: + while (--nb_fds >= 0) + close((*fds)[nb_fds]); + return -1; +} + +int prog_parse_fds(int *argc, char ***argv, int **fds) +{ + if (is_prefix(**argv, "id")) { + unsigned int id; + char *endptr; + + NEXT_ARGP(); + + id = strtoul(**argv, &endptr, 0); + if (*endptr) { + p_err("can't parse %s as ID", **argv); + return -1; + } + NEXT_ARGP(); + + (*fds)[0] = bpf_prog_get_fd_by_id(id); + if ((*fds)[0] < 0) { + p_err("get by id (%u): %s", id, strerror(errno)); + return -1; + } + return 1; + } else if (is_prefix(**argv, "tag")) { + unsigned char tag[BPF_TAG_SIZE]; + + NEXT_ARGP(); + + if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2, + tag + 3, tag + 4, tag + 5, tag + 6, tag + 7) + != BPF_TAG_SIZE) { + p_err("can't parse tag"); + return -1; + } + NEXT_ARGP(); + + return prog_fd_by_nametag(tag, fds, true); + } else if (is_prefix(**argv, "name")) { + char *name; + + NEXT_ARGP(); + + name = **argv; + if (strlen(name) > BPF_OBJ_NAME_LEN - 1) { + p_err("can't parse name"); + return -1; + } + NEXT_ARGP(); + + return prog_fd_by_nametag(name, fds, false); + } else if (is_prefix(**argv, "pinned")) { + char *path; + + NEXT_ARGP(); + + path = **argv; + NEXT_ARGP(); + + (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG); + if ((*fds)[0] < 0) + return -1; + return 1; + } + + p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv); + return -1; +} + +int prog_parse_fd(int *argc, char ***argv) +{ + int *fds = NULL; + int nb_fds, fd; + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = prog_parse_fds(argc, argv, &fds); + if (nb_fds != 1) { + if (nb_fds > 1) { + p_err("several programs match this handle"); + while (nb_fds--) + close(fds[nb_fds]); + } + fd = -1; + goto exit_free; + } + + fd = fds[0]; +exit_free: + free(fds); + return fd; +} + +static int map_fd_by_name(char *name, int **fds) +{ + unsigned int id = 0; + int fd, nb_fds = 0; + void *tmp; + int err; + + while (true) { + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + + err = bpf_map_get_next_id(id, &id); + if (err) { + if (errno != ENOENT) { + p_err("%s", strerror(errno)); + goto err_close_fds; + } + return nb_fds; + } + + fd = bpf_map_get_fd_by_id(id); + if (fd < 0) { + p_err("can't get map by id (%u): %s", + id, strerror(errno)); + goto err_close_fds; + } + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + p_err("can't get map info (%u): %s", + id, strerror(errno)); + goto err_close_fd; + } + + if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) { + close(fd); + continue; + } + + if (nb_fds > 0) { + tmp = realloc(*fds, (nb_fds + 1) * sizeof(int)); + if (!tmp) { + p_err("failed to realloc"); + goto err_close_fd; + } + *fds = tmp; + } + (*fds)[nb_fds++] = fd; + } + +err_close_fd: + close(fd); +err_close_fds: + while (--nb_fds >= 0) + close((*fds)[nb_fds]); + return -1; +} + +int map_parse_fds(int *argc, char ***argv, int **fds) +{ + if (is_prefix(**argv, "id")) { + unsigned int id; + char *endptr; + + NEXT_ARGP(); + + id = strtoul(**argv, &endptr, 0); + if (*endptr) { + p_err("can't parse %s as ID", **argv); + return -1; + } + NEXT_ARGP(); + + (*fds)[0] = bpf_map_get_fd_by_id(id); + if ((*fds)[0] < 0) { + p_err("get map by id (%u): %s", id, strerror(errno)); + return -1; + } + return 1; + } else if (is_prefix(**argv, "name")) { + char *name; + + NEXT_ARGP(); + + name = **argv; + if (strlen(name) > BPF_OBJ_NAME_LEN - 1) { + p_err("can't parse name"); + return -1; + } + NEXT_ARGP(); + + return map_fd_by_name(name, fds); + } else if (is_prefix(**argv, "pinned")) { + char *path; + + NEXT_ARGP(); + + path = **argv; + NEXT_ARGP(); + + (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP); + if ((*fds)[0] < 0) + return -1; + return 1; + } + + p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv); + return -1; +} + +int map_parse_fd(int *argc, char ***argv) +{ + int *fds = NULL; + int nb_fds, fd; + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = map_parse_fds(argc, argv, &fds); + if (nb_fds != 1) { + if (nb_fds > 1) { + p_err("several maps match this handle"); + while (nb_fds--) + close(fds[nb_fds]); + } + fd = -1; + goto exit_free; + } + + fd = fds[0]; +exit_free: + free(fds); + return fd; +} + +int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) +{ + int err; + int fd; + + fd = map_parse_fd(argc, argv); + if (fd < 0) + return -1; + + err = bpf_obj_get_info_by_fd(fd, info, info_len); + if (err) { + p_err("can't get map info: %s", strerror(errno)); + close(fd); + return err; + } + + return fd; +} diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 03bdc5b3ac49..359960a8f1de 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -6,14 +6,17 @@ #include <string.h> #include <unistd.h> #include <net/if.h> +#ifdef USE_LIBCAP +#include <sys/capability.h> +#endif #include <sys/utsname.h> #include <sys/vfs.h> #include <linux/filter.h> #include <linux/limits.h> -#include <bpf.h> -#include <libbpf.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> #include <zlib.h> #include "main.h" @@ -35,6 +38,11 @@ static const char * const helper_name[] = { #undef BPF_HELPER_MAKE_ENTRY +static bool full_mode; +#ifdef USE_LIBCAP +static bool run_as_unprivileged; +#endif + /* Miscellaneous utility functions */ static bool check_procfs(void) @@ -72,13 +80,12 @@ print_bool_feature(const char *feat_name, const char *plain_name, printf("%s is %savailable\n", plain_name, res ? "" : "NOT "); } -static void print_kernel_option(const char *name, const char *value) +static void print_kernel_option(const char *name, const char *value, + const char *define_prefix) { char *endptr; int res; - /* No support for C-style ouptut */ - if (json_output) { if (!value) { jsonw_null_field(json_wtr, name); @@ -90,6 +97,12 @@ static void print_kernel_option(const char *name, const char *value) jsonw_int_field(json_wtr, name, res); else jsonw_string_field(json_wtr, name, value); + } else if (define_prefix) { + if (value) + printf("#define %s%s %s\n", define_prefix, + name, value); + else + printf("/* %s%s is not set */\n", define_prefix, name); } else { if (value) printf("%s is set to %s\n", name, value); @@ -112,18 +125,12 @@ print_start_section(const char *json_title, const char *plain_title, } } -static void -print_end_then_start_section(const char *json_title, const char *plain_title, - const char *define_comment, - const char *define_prefix) +static void print_end_section(void) { if (json_output) jsonw_end_object(json_wtr); else printf("\n"); - - print_start_section(json_title, plain_title, define_comment, - define_prefix); } /* Probing functions */ @@ -313,77 +320,84 @@ static bool read_next_kernel_config_option(gzFile file, char *buf, size_t n, return false; } -static void probe_kernel_image_config(void) +static void probe_kernel_image_config(const char *define_prefix) { - static const char * const options[] = { + static const struct { + const char * const name; + bool macro_dump; + } options[] = { /* Enable BPF */ - "CONFIG_BPF", + { "CONFIG_BPF", }, /* Enable bpf() syscall */ - "CONFIG_BPF_SYSCALL", + { "CONFIG_BPF_SYSCALL", }, /* Does selected architecture support eBPF JIT compiler */ - "CONFIG_HAVE_EBPF_JIT", + { "CONFIG_HAVE_EBPF_JIT", }, /* Compile eBPF JIT compiler */ - "CONFIG_BPF_JIT", + { "CONFIG_BPF_JIT", }, /* Avoid compiling eBPF interpreter (use JIT only) */ - "CONFIG_BPF_JIT_ALWAYS_ON", + { "CONFIG_BPF_JIT_ALWAYS_ON", }, /* cgroups */ - "CONFIG_CGROUPS", + { "CONFIG_CGROUPS", }, /* BPF programs attached to cgroups */ - "CONFIG_CGROUP_BPF", + { "CONFIG_CGROUP_BPF", }, /* bpf_get_cgroup_classid() helper */ - "CONFIG_CGROUP_NET_CLASSID", + { "CONFIG_CGROUP_NET_CLASSID", }, /* bpf_skb_{,ancestor_}cgroup_id() helpers */ - "CONFIG_SOCK_CGROUP_DATA", + { "CONFIG_SOCK_CGROUP_DATA", }, /* Tracing: attach BPF to kprobes, tracepoints, etc. */ - "CONFIG_BPF_EVENTS", + { "CONFIG_BPF_EVENTS", }, /* Kprobes */ - "CONFIG_KPROBE_EVENTS", + { "CONFIG_KPROBE_EVENTS", }, /* Uprobes */ - "CONFIG_UPROBE_EVENTS", + { "CONFIG_UPROBE_EVENTS", }, /* Tracepoints */ - "CONFIG_TRACING", + { "CONFIG_TRACING", }, /* Syscall tracepoints */ - "CONFIG_FTRACE_SYSCALLS", + { "CONFIG_FTRACE_SYSCALLS", }, /* bpf_override_return() helper support for selected arch */ - "CONFIG_FUNCTION_ERROR_INJECTION", + { "CONFIG_FUNCTION_ERROR_INJECTION", }, /* bpf_override_return() helper */ - "CONFIG_BPF_KPROBE_OVERRIDE", + { "CONFIG_BPF_KPROBE_OVERRIDE", }, /* Network */ - "CONFIG_NET", + { "CONFIG_NET", }, /* AF_XDP sockets */ - "CONFIG_XDP_SOCKETS", + { "CONFIG_XDP_SOCKETS", }, /* BPF_PROG_TYPE_LWT_* and related helpers */ - "CONFIG_LWTUNNEL_BPF", + { "CONFIG_LWTUNNEL_BPF", }, /* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */ - "CONFIG_NET_ACT_BPF", + { "CONFIG_NET_ACT_BPF", }, /* BPF_PROG_TYPE_SCHED_CLS, TC filters */ - "CONFIG_NET_CLS_BPF", + { "CONFIG_NET_CLS_BPF", }, /* TC clsact qdisc */ - "CONFIG_NET_CLS_ACT", + { "CONFIG_NET_CLS_ACT", }, /* Ingress filtering with TC */ - "CONFIG_NET_SCH_INGRESS", + { "CONFIG_NET_SCH_INGRESS", }, /* bpf_skb_get_xfrm_state() helper */ - "CONFIG_XFRM", + { "CONFIG_XFRM", }, /* bpf_get_route_realm() helper */ - "CONFIG_IP_ROUTE_CLASSID", + { "CONFIG_IP_ROUTE_CLASSID", }, /* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */ - "CONFIG_IPV6_SEG6_BPF", + { "CONFIG_IPV6_SEG6_BPF", }, /* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */ - "CONFIG_BPF_LIRC_MODE2", + { "CONFIG_BPF_LIRC_MODE2", }, /* BPF stream parser and BPF socket maps */ - "CONFIG_BPF_STREAM_PARSER", + { "CONFIG_BPF_STREAM_PARSER", }, /* xt_bpf module for passing BPF programs to netfilter */ - "CONFIG_NETFILTER_XT_MATCH_BPF", + { "CONFIG_NETFILTER_XT_MATCH_BPF", }, /* bpfilter back-end for iptables */ - "CONFIG_BPFILTER", + { "CONFIG_BPFILTER", }, /* bpftilter module with "user mode helper" */ - "CONFIG_BPFILTER_UMH", + { "CONFIG_BPFILTER_UMH", }, /* test_bpf module for BPF tests */ - "CONFIG_TEST_BPF", + { "CONFIG_TEST_BPF", }, + + /* Misc configs useful in BPF C programs */ + /* jiffies <-> sec conversion for bpf_jiffies64() helper */ + { "CONFIG_HZ", true, } }; char *values[ARRAY_SIZE(options)] = { }; struct utsname utsn; @@ -425,7 +439,8 @@ static void probe_kernel_image_config(void) while (read_next_kernel_config_option(file, buf, sizeof(buf), &value)) { for (i = 0; i < ARRAY_SIZE(options); i++) { - if (values[i] || strcmp(buf, options[i])) + if ((define_prefix && !options[i].macro_dump) || + values[i] || strcmp(buf, options[i].name)) continue; values[i] = strdup(value); @@ -437,7 +452,9 @@ end_parse: gzclose(file); for (i = 0; i < ARRAY_SIZE(options); i++) { - print_kernel_option(options[i], values[i]); + if (define_prefix && !options[i].macro_dump) + continue; + print_kernel_option(options[i].name, values[i], define_prefix); free(values[i]); } } @@ -477,9 +494,20 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, } res = bpf_probe_prog_type(prog_type, ifindex); +#ifdef USE_LIBCAP + /* Probe may succeed even if program load fails, for unprivileged users + * check that we did not fail because of insufficient permissions + */ + if (run_as_unprivileged && errno == EPERM) + res = false; +#endif supported_types[prog_type] |= res; + if (!prog_type_name[prog_type]) { + p_info("program type name not found (type %d)", prog_type); + return; + } maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1; if (strlen(prog_type_name[prog_type]) > maxlen) { p_info("program type name too long"); @@ -505,6 +533,14 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix, res = bpf_probe_map_type(map_type, ifindex); + /* Probe result depends on the success of map creation, no additional + * check required for unprivileged users + */ + + if (!map_type_name[map_type]) { + p_info("map type name not found (type %d)", map_type); + return; + } maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1; if (strlen(map_type_name[map_type]) > maxlen) { p_info("map type name too long"); @@ -519,6 +555,38 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix, define_prefix); } +static void +probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type, + const char *define_prefix, unsigned int id, + const char *ptype_name, __u32 ifindex) +{ + bool res = false; + + if (supported_type) { + res = bpf_probe_helper(id, prog_type, ifindex); +#ifdef USE_LIBCAP + /* Probe may succeed even if program load fails, for + * unprivileged users check that we did not fail because of + * insufficient permissions + */ + if (run_as_unprivileged && errno == EPERM) + res = false; +#endif + } + + if (json_output) { + if (res) + jsonw_string(json_wtr, helper_name[id]); + } else if (define_prefix) { + printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n", + define_prefix, ptype_name, helper_name[id], + res ? "1" : "0"); + } else { + if (res) + printf("\n\t- %s", helper_name[id]); + } +} + static void probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, const char *define_prefix, __u32 ifindex) @@ -526,7 +594,6 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, const char *ptype_name = prog_type_name[prog_type]; char feat_name[128]; unsigned int id; - bool res; if (ifindex) /* Only test helpers for offload-able program types */ @@ -548,21 +615,19 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, } for (id = 1; id < ARRAY_SIZE(helper_name); id++) { - if (!supported_type) - res = false; - else - res = bpf_probe_helper(id, prog_type, ifindex); - - if (json_output) { - if (res) - jsonw_string(json_wtr, helper_name[id]); - } else if (define_prefix) { - printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n", - define_prefix, ptype_name, helper_name[id], - res ? "1" : "0"); - } else { - if (res) - printf("\n\t- %s", helper_name[id]); + /* Skip helper functions which emit dmesg messages when not in + * the full mode. + */ + switch (id) { + case BPF_FUNC_trace_printk: + case BPF_FUNC_probe_write_user: + if (!full_mode) + continue; + /* fallthrough */ + default: + probe_helper_for_progtype(prog_type, supported_type, + define_prefix, id, ptype_name, + ifindex); } } @@ -572,23 +637,267 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, printf("\n"); } +static void +probe_large_insn_limit(const char *define_prefix, __u32 ifindex) +{ + bool res; + + res = bpf_probe_large_insn_limit(ifindex); + print_bool_feature("have_large_insn_limit", + "Large program size limit", + "LARGE_INSN_LIMIT", + res, define_prefix); +} + +static void +section_system_config(enum probe_component target, const char *define_prefix) +{ + switch (target) { + case COMPONENT_KERNEL: + case COMPONENT_UNSPEC: + print_start_section("system_config", + "Scanning system configuration...", + "/*** Misc kernel config items ***/", + define_prefix); + if (!define_prefix) { + if (check_procfs()) { + probe_unprivileged_disabled(); + probe_jit_enable(); + probe_jit_harden(); + probe_jit_kallsyms(); + probe_jit_limit(); + } else { + p_info("/* procfs not mounted, skipping related probes */"); + } + } + probe_kernel_image_config(define_prefix); + print_end_section(); + break; + default: + break; + } +} + +static bool section_syscall_config(const char *define_prefix) +{ + bool res; + + print_start_section("syscall_config", + "Scanning system call availability...", + "/*** System call availability ***/", + define_prefix); + res = probe_bpf_syscall(define_prefix); + print_end_section(); + + return res; +} + +static void +section_program_types(bool *supported_types, const char *define_prefix, + __u32 ifindex) +{ + unsigned int i; + + print_start_section("program_types", + "Scanning eBPF program types...", + "/*** eBPF program types ***/", + define_prefix); + + for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++) + probe_prog_type(i, supported_types, define_prefix, ifindex); + + print_end_section(); +} + +static void section_map_types(const char *define_prefix, __u32 ifindex) +{ + unsigned int i; + + print_start_section("map_types", + "Scanning eBPF map types...", + "/*** eBPF map types ***/", + define_prefix); + + for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++) + probe_map_type(i, define_prefix, ifindex); + + print_end_section(); +} + +static void +section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex) +{ + unsigned int i; + + print_start_section("helpers", + "Scanning eBPF helper functions...", + "/*** eBPF helper functions ***/", + define_prefix); + + if (define_prefix) + printf("/*\n" + " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n" + " * to determine if <helper_name> is available for <prog_type_name>,\n" + " * e.g.\n" + " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n" + " * // do stuff with this helper\n" + " * #elif\n" + " * // use a workaround\n" + " * #endif\n" + " */\n" + "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n" + " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n", + define_prefix, define_prefix, define_prefix, + define_prefix); + for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++) + probe_helpers_for_progtype(i, supported_types[i], define_prefix, + ifindex); + + print_end_section(); +} + +static void section_misc(const char *define_prefix, __u32 ifindex) +{ + print_start_section("misc", + "Scanning miscellaneous eBPF features...", + "/*** eBPF misc features ***/", + define_prefix); + probe_large_insn_limit(define_prefix, ifindex); + print_end_section(); +} + +#ifdef USE_LIBCAP +#define capability(c) { c, false, #c } +#define capability_msg(a, i) a[i].set ? "" : a[i].name, a[i].set ? "" : ", " +#endif + +static int handle_perms(void) +{ +#ifdef USE_LIBCAP + struct { + cap_value_t cap; + bool set; + char name[14]; /* strlen("CAP_SYS_ADMIN") */ + } bpf_caps[] = { + capability(CAP_SYS_ADMIN), +#ifdef CAP_BPF + capability(CAP_BPF), + capability(CAP_NET_ADMIN), + capability(CAP_PERFMON), +#endif + }; + cap_value_t cap_list[ARRAY_SIZE(bpf_caps)]; + unsigned int i, nb_bpf_caps = 0; + bool cap_sys_admin_only = true; + cap_flag_value_t val; + int res = -1; + cap_t caps; + + caps = cap_get_proc(); + if (!caps) { + p_err("failed to get capabilities for process: %s", + strerror(errno)); + return -1; + } + +#ifdef CAP_BPF + if (CAP_IS_SUPPORTED(CAP_BPF)) + cap_sys_admin_only = false; +#endif + + for (i = 0; i < ARRAY_SIZE(bpf_caps); i++) { + const char *cap_name = bpf_caps[i].name; + cap_value_t cap = bpf_caps[i].cap; + + if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val)) { + p_err("bug: failed to retrieve %s status: %s", cap_name, + strerror(errno)); + goto exit_free; + } + + if (val == CAP_SET) { + bpf_caps[i].set = true; + cap_list[nb_bpf_caps++] = cap; + } + + if (cap_sys_admin_only) + /* System does not know about CAP_BPF, meaning that + * CAP_SYS_ADMIN is the only capability required. We + * just checked it, break. + */ + break; + } + + if ((run_as_unprivileged && !nb_bpf_caps) || + (!run_as_unprivileged && nb_bpf_caps == ARRAY_SIZE(bpf_caps)) || + (!run_as_unprivileged && cap_sys_admin_only && nb_bpf_caps)) { + /* We are all good, exit now */ + res = 0; + goto exit_free; + } + + if (!run_as_unprivileged) { + if (cap_sys_admin_only) + p_err("missing %s, required for full feature probing; run as root or use 'unprivileged'", + bpf_caps[0].name); + else + p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'", + capability_msg(bpf_caps, 0), +#ifdef CAP_BPF + capability_msg(bpf_caps, 1), + capability_msg(bpf_caps, 2), + capability_msg(bpf_caps, 3) +#else + "", "", "", "", "", "" +#endif /* CAP_BPF */ + ); + goto exit_free; + } + + /* if (run_as_unprivileged && nb_bpf_caps > 0), drop capabilities. */ + if (cap_set_flag(caps, CAP_EFFECTIVE, nb_bpf_caps, cap_list, + CAP_CLEAR)) { + p_err("bug: failed to clear capabilities: %s", strerror(errno)); + goto exit_free; + } + + if (cap_set_proc(caps)) { + p_err("failed to drop capabilities: %s", strerror(errno)); + goto exit_free; + } + + res = 0; + +exit_free: + if (cap_free(caps) && !res) { + p_err("failed to clear storage object for capabilities: %s", + strerror(errno)); + res = -1; + } + + return res; +#else + /* Detection assumes user has specific privileges. + * We do not use libpcap so let's approximate, and restrict usage to + * root user only. + */ + if (geteuid()) { + p_err("full feature probing requires root privileges"); + return -1; + } + + return 0; +#endif /* USE_LIBCAP */ +} + static int do_probe(int argc, char **argv) { enum probe_component target = COMPONENT_UNSPEC; const char *define_prefix = NULL; bool supported_types[128] = {}; __u32 ifindex = 0; - unsigned int i; char *ifname; - /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN). - * Let's approximate, and restrict usage to root user only. - */ - if (geteuid()) { - p_err("please run this command as root user"); - return -1; - } - set_max_rlimit(); while (argc) { @@ -617,6 +926,9 @@ static int do_probe(int argc, char **argv) strerror(errno)); return -1; } + } else if (is_prefix(*argv, "full")) { + full_mode = true; + NEXT_ARG(); } else if (is_prefix(*argv, "macros") && !define_prefix) { define_prefix = ""; NEXT_ARG(); @@ -634,6 +946,14 @@ static int do_probe(int argc, char **argv) if (!REQ_ARGS(1)) return -1; define_prefix = GET_ARG(); + } else if (is_prefix(*argv, "unprivileged")) { +#ifdef USE_LIBCAP + run_as_unprivileged = true; + NEXT_ARG(); +#else + p_err("unprivileged run not supported, recompile bpftool with libcap"); + return -1; +#endif } else { p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?", *argv); @@ -641,96 +961,30 @@ static int do_probe(int argc, char **argv) } } + /* Full feature detection requires specific privileges. + * Let's approximate, and warn if user is not root. + */ + if (handle_perms()) + return -1; + if (json_output) { define_prefix = NULL; jsonw_start_object(json_wtr); } - switch (target) { - case COMPONENT_KERNEL: - case COMPONENT_UNSPEC: - if (define_prefix) - break; - - print_start_section("system_config", - "Scanning system configuration...", - NULL, /* define_comment never used here */ - NULL); /* define_prefix always NULL here */ - if (check_procfs()) { - probe_unprivileged_disabled(); - probe_jit_enable(); - probe_jit_harden(); - probe_jit_kallsyms(); - probe_jit_limit(); - } else { - p_info("/* procfs not mounted, skipping related probes */"); - } - probe_kernel_image_config(); - if (json_output) - jsonw_end_object(json_wtr); - else - printf("\n"); - break; - default: - break; - } - - print_start_section("syscall_config", - "Scanning system call availability...", - "/*** System call availability ***/", - define_prefix); - - if (!probe_bpf_syscall(define_prefix)) + section_system_config(target, define_prefix); + if (!section_syscall_config(define_prefix)) /* bpf() syscall unavailable, don't probe other BPF features */ goto exit_close_json; - - print_end_then_start_section("program_types", - "Scanning eBPF program types...", - "/*** eBPF program types ***/", - define_prefix); - - for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) - probe_prog_type(i, supported_types, define_prefix, ifindex); - - print_end_then_start_section("map_types", - "Scanning eBPF map types...", - "/*** eBPF map types ***/", - define_prefix); - - for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++) - probe_map_type(i, define_prefix, ifindex); - - print_end_then_start_section("helpers", - "Scanning eBPF helper functions...", - "/*** eBPF helper functions ***/", - define_prefix); - - if (define_prefix) - printf("/*\n" - " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n" - " * to determine if <helper_name> is available for <prog_type_name>,\n" - " * e.g.\n" - " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n" - " * // do stuff with this helper\n" - " * #elif\n" - " * // use a workaround\n" - " * #endif\n" - " */\n" - "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n" - " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n", - define_prefix, define_prefix, define_prefix, - define_prefix); - for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) - probe_helpers_for_progtype(i, supported_types[i], - define_prefix, ifindex); + section_program_types(supported_types, define_prefix, ifindex); + section_map_types(define_prefix, ifindex); + section_helpers(supported_types, define_prefix, ifindex); + section_misc(define_prefix, ifindex); exit_close_json: - if (json_output) { - /* End current "section" of probes */ - jsonw_end_object(json_wtr); + if (json_output) /* End root object */ jsonw_end_object(json_wtr); - } return 0; } @@ -743,12 +997,12 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n" - " %s %s help\n" + "Usage: %1$s %2$s probe [COMPONENT] [full] [unprivileged] [macros [prefix PREFIX]]\n" + " %1$s %2$s help\n" "\n" " COMPONENT := { kernel | dev NAME }\n" "", - bin_name, argv[-2], bin_name, argv[-2]); + bin_name, argv[-2]); return 0; } diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c new file mode 100644 index 000000000000..4033c46d83e7 --- /dev/null +++ b/tools/bpf/bpftool/gen.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2019 Facebook */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include <ctype.h> +#include <errno.h> +#include <fcntl.h> +#include <linux/err.h> +#include <stdbool.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/mman.h> +#include <bpf/btf.h> + +#include "json_writer.h" +#include "main.h" + +#define MAX_OBJ_NAME_LEN 64 + +static void sanitize_identifier(char *name) +{ + int i; + + for (i = 0; name[i]; i++) + if (!isalnum(name[i]) && name[i] != '_') + name[i] = '_'; +} + +static bool str_has_suffix(const char *str, const char *suffix) +{ + size_t i, n1 = strlen(str), n2 = strlen(suffix); + + if (n1 < n2) + return false; + + for (i = 0; i < n2; i++) { + if (str[n1 - i - 1] != suffix[n2 - i - 1]) + return false; + } + + return true; +} + +static void get_obj_name(char *name, const char *file) +{ + /* Using basename() GNU version which doesn't modify arg. */ + strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1); + name[MAX_OBJ_NAME_LEN - 1] = '\0'; + if (str_has_suffix(name, ".o")) + name[strlen(name) - 2] = '\0'; + sanitize_identifier(name); +} + +static void get_header_guard(char *guard, const char *obj_name) +{ + int i; + + sprintf(guard, "__%s_SKEL_H__", obj_name); + for (i = 0; guard[i]; i++) + guard[i] = toupper(guard[i]); +} + +static const char *get_map_ident(const struct bpf_map *map) +{ + const char *name = bpf_map__name(map); + + if (!bpf_map__is_internal(map)) + return name; + + if (str_has_suffix(name, ".data")) + return "data"; + else if (str_has_suffix(name, ".rodata")) + return "rodata"; + else if (str_has_suffix(name, ".bss")) + return "bss"; + else if (str_has_suffix(name, ".kconfig")) + return "kconfig"; + else + return NULL; +} + +static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) +{ + vprintf(fmt, args); +} + +static int codegen_datasec_def(struct bpf_object *obj, + struct btf *btf, + struct btf_dump *d, + const struct btf_type *sec, + const char *obj_name) +{ + const char *sec_name = btf__name_by_offset(btf, sec->name_off); + const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec); + int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); + const char *sec_ident; + char var_ident[256]; + bool strip_mods = false; + + if (strcmp(sec_name, ".data") == 0) { + sec_ident = "data"; + } else if (strcmp(sec_name, ".bss") == 0) { + sec_ident = "bss"; + } else if (strcmp(sec_name, ".rodata") == 0) { + sec_ident = "rodata"; + strip_mods = true; + } else if (strcmp(sec_name, ".kconfig") == 0) { + sec_ident = "kconfig"; + } else { + return 0; + } + + printf(" struct %s__%s {\n", obj_name, sec_ident); + for (i = 0; i < vlen; i++, sec_var++) { + const struct btf_type *var = btf__type_by_id(btf, sec_var->type); + const char *var_name = btf__name_by_offset(btf, var->name_off); + DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, + .field_name = var_ident, + .indent_level = 2, + .strip_mods = strip_mods, + ); + int need_off = sec_var->offset, align_off, align; + __u32 var_type_id = var->type; + + if (off > need_off) { + p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", + sec_name, i, need_off, off); + return -EINVAL; + } + + align = btf__align_of(btf, var->type); + if (align <= 0) { + p_err("Failed to determine alignment of variable '%s': %d", + var_name, align); + return -EINVAL; + } + /* Assume 32-bit architectures when generating data section + * struct memory layout. Given bpftool can't know which target + * host architecture it's emitting skeleton for, we need to be + * conservative and assume 32-bit one to ensure enough padding + * bytes are generated for pointer and long types. This will + * still work correctly for 64-bit architectures, because in + * the worst case we'll generate unnecessary padding field, + * which on 64-bit architectures is not strictly necessary and + * would be handled by natural 8-byte alignment. But it still + * will be a correct memory layout, based on recorded offsets + * in BTF. + */ + if (align > 4) + align = 4; + + align_off = (off + align - 1) / align * align; + if (align_off != need_off) { + printf("\t\tchar __pad%d[%d];\n", + pad_cnt, need_off - off); + pad_cnt++; + } + + /* sanitize variable name, e.g., for static vars inside + * a function, it's name is '<function name>.<variable name>', + * which we'll turn into a '<function name>_<variable name>' + */ + var_ident[0] = '\0'; + strncat(var_ident, var_name, sizeof(var_ident) - 1); + sanitize_identifier(var_ident); + + printf("\t\t"); + err = btf_dump__emit_type_decl(d, var_type_id, &opts); + if (err) + return err; + printf(";\n"); + + off = sec_var->offset + sec_var->size; + } + printf(" } *%s;\n", sec_ident); + return 0; +} + +static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) +{ + struct btf *btf = bpf_object__btf(obj); + int n = btf__get_nr_types(btf); + struct btf_dump *d; + int i, err = 0; + + d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf); + if (IS_ERR(d)) + return PTR_ERR(d); + + for (i = 1; i <= n; i++) { + const struct btf_type *t = btf__type_by_id(btf, i); + + if (!btf_is_datasec(t)) + continue; + + err = codegen_datasec_def(obj, btf, d, t, obj_name); + if (err) + goto out; + } +out: + btf_dump__free(d); + return err; +} + +static void codegen(const char *template, ...) +{ + const char *src, *end; + int skip_tabs = 0, n; + char *s, *dst; + va_list args; + char c; + + n = strlen(template); + s = malloc(n + 1); + if (!s) + exit(-1); + src = template; + dst = s; + + /* find out "baseline" indentation to skip */ + while ((c = *src++)) { + if (c == '\t') { + skip_tabs++; + } else if (c == '\n') { + break; + } else { + p_err("unrecognized character at pos %td in template '%s'", + src - template - 1, template); + free(s); + exit(-1); + } + } + + while (*src) { + /* skip baseline indentation tabs */ + for (n = skip_tabs; n > 0; n--, src++) { + if (*src != '\t') { + p_err("not enough tabs at pos %td in template '%s'", + src - template - 1, template); + free(s); + exit(-1); + } + } + /* trim trailing whitespace */ + end = strchrnul(src, '\n'); + for (n = end - src; n > 0 && isspace(src[n - 1]); n--) + ; + memcpy(dst, src, n); + dst += n; + if (*end) + *dst++ = '\n'; + src = *end ? end + 1 : end; + } + *dst++ = '\0'; + + /* print out using adjusted template */ + va_start(args, template); + n = vprintf(s, args); + va_end(args); + + free(s); +} + +static int do_skeleton(int argc, char **argv) +{ + char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; + size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); + char obj_name[MAX_OBJ_NAME_LEN], *obj_data; + struct bpf_object *obj = NULL; + const char *file, *ident; + struct bpf_program *prog; + int fd, len, err = -1; + struct bpf_map *map; + struct btf *btf; + struct stat st; + + if (!REQ_ARGS(1)) { + usage(); + return -1; + } + file = GET_ARG(); + + if (argc) { + p_err("extra unknown arguments"); + return -1; + } + + if (stat(file, &st)) { + p_err("failed to stat() %s: %s", file, strerror(errno)); + return -1; + } + file_sz = st.st_size; + mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); + fd = open(file, O_RDONLY); + if (fd < 0) { + p_err("failed to open() %s: %s", file, strerror(errno)); + return -1; + } + obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); + if (obj_data == MAP_FAILED) { + obj_data = NULL; + p_err("failed to mmap() %s: %s", file, strerror(errno)); + goto out; + } + get_obj_name(obj_name, file); + opts.object_name = obj_name; + obj = bpf_object__open_mem(obj_data, file_sz, &opts); + if (IS_ERR(obj)) { + char err_buf[256]; + + libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf)); + p_err("failed to open BPF object file: %s", err_buf); + obj = NULL; + goto out; + } + + bpf_object__for_each_map(map, obj) { + ident = get_map_ident(map); + if (!ident) { + p_err("ignoring unrecognized internal map '%s'...", + bpf_map__name(map)); + continue; + } + map_cnt++; + } + bpf_object__for_each_program(prog, obj) { + prog_cnt++; + } + + get_header_guard(header_guard, obj_name); + codegen("\ + \n\ + /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ + \n\ + /* THIS FILE IS AUTOGENERATED! */ \n\ + #ifndef %2$s \n\ + #define %2$s \n\ + \n\ + #include <stdlib.h> \n\ + #include <bpf/libbpf.h> \n\ + \n\ + struct %1$s { \n\ + struct bpf_object_skeleton *skeleton; \n\ + struct bpf_object *obj; \n\ + ", + obj_name, header_guard + ); + + if (map_cnt) { + printf("\tstruct {\n"); + bpf_object__for_each_map(map, obj) { + ident = get_map_ident(map); + if (!ident) + continue; + printf("\t\tstruct bpf_map *%s;\n", ident); + } + printf("\t} maps;\n"); + } + + if (prog_cnt) { + printf("\tstruct {\n"); + bpf_object__for_each_program(prog, obj) { + printf("\t\tstruct bpf_program *%s;\n", + bpf_program__name(prog)); + } + printf("\t} progs;\n"); + printf("\tstruct {\n"); + bpf_object__for_each_program(prog, obj) { + printf("\t\tstruct bpf_link *%s;\n", + bpf_program__name(prog)); + } + printf("\t} links;\n"); + } + + btf = bpf_object__btf(obj); + if (btf) { + err = codegen_datasecs(obj, obj_name); + if (err) + goto out; + } + + codegen("\ + \n\ + }; \n\ + \n\ + static void \n\ + %1$s__destroy(struct %1$s *obj) \n\ + { \n\ + if (!obj) \n\ + return; \n\ + if (obj->skeleton) \n\ + bpf_object__destroy_skeleton(obj->skeleton);\n\ + free(obj); \n\ + } \n\ + \n\ + static inline int \n\ + %1$s__create_skeleton(struct %1$s *obj); \n\ + \n\ + static inline struct %1$s * \n\ + %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\ + { \n\ + struct %1$s *obj; \n\ + \n\ + obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ + if (!obj) \n\ + return NULL; \n\ + if (%1$s__create_skeleton(obj)) \n\ + goto err; \n\ + if (bpf_object__open_skeleton(obj->skeleton, opts)) \n\ + goto err; \n\ + \n\ + return obj; \n\ + err: \n\ + %1$s__destroy(obj); \n\ + return NULL; \n\ + } \n\ + \n\ + static inline struct %1$s * \n\ + %1$s__open(void) \n\ + { \n\ + return %1$s__open_opts(NULL); \n\ + } \n\ + \n\ + static inline int \n\ + %1$s__load(struct %1$s *obj) \n\ + { \n\ + return bpf_object__load_skeleton(obj->skeleton); \n\ + } \n\ + \n\ + static inline struct %1$s * \n\ + %1$s__open_and_load(void) \n\ + { \n\ + struct %1$s *obj; \n\ + \n\ + obj = %1$s__open(); \n\ + if (!obj) \n\ + return NULL; \n\ + if (%1$s__load(obj)) { \n\ + %1$s__destroy(obj); \n\ + return NULL; \n\ + } \n\ + return obj; \n\ + } \n\ + \n\ + static inline int \n\ + %1$s__attach(struct %1$s *obj) \n\ + { \n\ + return bpf_object__attach_skeleton(obj->skeleton); \n\ + } \n\ + \n\ + static inline void \n\ + %1$s__detach(struct %1$s *obj) \n\ + { \n\ + return bpf_object__detach_skeleton(obj->skeleton); \n\ + } \n\ + ", + obj_name + ); + + codegen("\ + \n\ + \n\ + static inline int \n\ + %1$s__create_skeleton(struct %1$s *obj) \n\ + { \n\ + struct bpf_object_skeleton *s; \n\ + \n\ + s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ + if (!s) \n\ + return -1; \n\ + obj->skeleton = s; \n\ + \n\ + s->sz = sizeof(*s); \n\ + s->name = \"%1$s\"; \n\ + s->obj = &obj->obj; \n\ + ", + obj_name + ); + if (map_cnt) { + codegen("\ + \n\ + \n\ + /* maps */ \n\ + s->map_cnt = %zu; \n\ + s->map_skel_sz = sizeof(*s->maps); \n\ + s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ + if (!s->maps) \n\ + goto err; \n\ + ", + map_cnt + ); + i = 0; + bpf_object__for_each_map(map, obj) { + ident = get_map_ident(map); + + if (!ident) + continue; + + codegen("\ + \n\ + \n\ + s->maps[%zu].name = \"%s\"; \n\ + s->maps[%zu].map = &obj->maps.%s; \n\ + ", + i, bpf_map__name(map), i, ident); + /* memory-mapped internal maps */ + if (bpf_map__is_internal(map) && + (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) { + printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", + i, ident); + } + i++; + } + } + if (prog_cnt) { + codegen("\ + \n\ + \n\ + /* programs */ \n\ + s->prog_cnt = %zu; \n\ + s->prog_skel_sz = sizeof(*s->progs); \n\ + s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ + if (!s->progs) \n\ + goto err; \n\ + ", + prog_cnt + ); + i = 0; + bpf_object__for_each_program(prog, obj) { + codegen("\ + \n\ + \n\ + s->progs[%1$zu].name = \"%2$s\"; \n\ + s->progs[%1$zu].prog = &obj->progs.%2$s;\n\ + s->progs[%1$zu].link = &obj->links.%2$s;\n\ + ", + i, bpf_program__name(prog)); + i++; + } + } + codegen("\ + \n\ + \n\ + s->data_sz = %d; \n\ + s->data = (void *)\"\\ \n\ + ", + file_sz); + + /* embed contents of BPF object file */ + for (i = 0, len = 0; i < file_sz; i++) { + int w = obj_data[i] ? 4 : 2; + + len += w; + if (len > 78) { + printf("\\\n"); + len = w; + } + if (!obj_data[i]) + printf("\\0"); + else + printf("\\x%02x", (unsigned char)obj_data[i]); + } + + codegen("\ + \n\ + \"; \n\ + \n\ + return 0; \n\ + err: \n\ + bpf_object__destroy_skeleton(s); \n\ + return -1; \n\ + } \n\ + \n\ + #endif /* %s */ \n\ + ", + header_guard); + err = 0; +out: + bpf_object__close(obj); + if (obj_data) + munmap(obj_data, mmap_sz); + close(fd); + return err; +} + +static int do_help(int argc, char **argv) +{ + if (json_output) { + jsonw_null(json_wtr); + return 0; + } + + fprintf(stderr, + "Usage: %1$s %2$s skeleton FILE\n" + " %1$s %2$s help\n" + "\n" + " " HELP_SPEC_OPTIONS "\n" + "", + bin_name, "gen"); + + return 0; +} + +static const struct cmd cmds[] = { + { "skeleton", do_skeleton }, + { "help", do_help }, + { 0 } +}; + +int do_gen(int argc, char **argv) +{ + return cmd_select(cmds, argc, argv, do_help); +} diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c new file mode 100644 index 000000000000..3b1aad7535dd --- /dev/null +++ b/tools/bpf/bpftool/iter.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +// Copyright (C) 2020 Facebook + +#define _GNU_SOURCE +#include <unistd.h> +#include <linux/err.h> +#include <bpf/libbpf.h> + +#include "main.h" + +static int do_pin(int argc, char **argv) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts); + union bpf_iter_link_info linfo; + const char *objfile, *path; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_link *link; + int err = -1, map_fd = -1; + + if (!REQ_ARGS(2)) + usage(); + + objfile = GET_ARG(); + path = GET_ARG(); + + /* optional arguments */ + if (argc) { + if (is_prefix(*argv, "map")) { + NEXT_ARG(); + + if (!REQ_ARGS(2)) { + p_err("incorrect map spec"); + return -1; + } + + map_fd = map_parse_fd(&argc, &argv); + if (map_fd < 0) + return -1; + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + iter_opts.link_info = &linfo; + iter_opts.link_info_len = sizeof(linfo); + } + } + + obj = bpf_object__open(objfile); + if (IS_ERR(obj)) { + p_err("can't open objfile %s", objfile); + goto close_map_fd; + } + + err = bpf_object__load(obj); + if (err) { + p_err("can't load objfile %s", objfile); + goto close_obj; + } + + prog = bpf_program__next(NULL, obj); + if (!prog) { + p_err("can't find bpf program in objfile %s", objfile); + goto close_obj; + } + + link = bpf_program__attach_iter(prog, &iter_opts); + if (IS_ERR(link)) { + err = PTR_ERR(link); + p_err("attach_iter failed for program %s", + bpf_program__name(prog)); + goto close_obj; + } + + err = mount_bpffs_for_pin(path); + if (err) + goto close_link; + + err = bpf_link__pin(link, path); + if (err) { + p_err("pin_iter failed for program %s to path %s", + bpf_program__name(prog), path); + goto close_link; + } + +close_link: + bpf_link__destroy(link); +close_obj: + bpf_object__close(obj); +close_map_fd: + if (map_fd >= 0) + close(map_fd); + return err; +} + +static int do_help(int argc, char **argv) +{ + fprintf(stderr, + "Usage: %1$s %2$s pin OBJ PATH [map MAP]\n" + " %1$s %2$s help\n" + " " HELP_SPEC_MAP "\n" + "", + bin_name, "iter"); + + return 0; +} + +static const struct cmd cmds[] = { + { "help", do_help }, + { "pin", do_pin }, + { 0 } +}; + +int do_iter(int argc, char **argv) +{ + return cmd_select(cmds, argc, argv, do_help); +} diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c index bfed711258ce..e7e7eee9f172 100644 --- a/tools/bpf/bpftool/jit_disasm.c +++ b/tools/bpf/bpftool/jit_disasm.c @@ -15,7 +15,6 @@ #include <stdio.h> #include <stdarg.h> #include <stdint.h> -#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <unistd.h> @@ -24,7 +23,7 @@ #include <dis-asm.h> #include <sys/stat.h> #include <limits.h> -#include <libbpf.h> +#include <bpf/libbpf.h> #include "json_writer.h" #include "main.h" diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c index 86501cd3c763..7fea83bedf48 100644 --- a/tools/bpf/bpftool/json_writer.c +++ b/tools/bpf/bpftool/json_writer.c @@ -119,6 +119,12 @@ void jsonw_pretty(json_writer_t *self, bool on) self->pretty = on; } +void jsonw_reset(json_writer_t *self) +{ + assert(self->depth == 0); + self->sep = '\0'; +} + /* Basic blocks */ static void jsonw_begin(json_writer_t *self, int c) { diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h index 35cf1f00f96c..8ace65cdb92f 100644 --- a/tools/bpf/bpftool/json_writer.h +++ b/tools/bpf/bpftool/json_writer.h @@ -27,6 +27,9 @@ void jsonw_destroy(json_writer_t **self_p); /* Cause output to have pretty whitespace */ void jsonw_pretty(json_writer_t *self, bool on); +/* Reset separator to create new JSON */ +void jsonw_reset(json_writer_t *self); + /* Add property name */ void jsonw_name(json_writer_t *self, const char *name); diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c new file mode 100644 index 000000000000..e77e1525d20a --- /dev/null +++ b/tools/bpf/bpftool/link.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2020 Facebook */ + +#include <errno.h> +#include <net/if.h> +#include <stdio.h> +#include <unistd.h> + +#include <bpf/bpf.h> + +#include "json_writer.h" +#include "main.h" + +static const char * const link_type_name[] = { + [BPF_LINK_TYPE_UNSPEC] = "unspec", + [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", + [BPF_LINK_TYPE_TRACING] = "tracing", + [BPF_LINK_TYPE_CGROUP] = "cgroup", + [BPF_LINK_TYPE_ITER] = "iter", + [BPF_LINK_TYPE_NETNS] = "netns", +}; + +static int link_parse_fd(int *argc, char ***argv) +{ + int fd; + + if (is_prefix(**argv, "id")) { + unsigned int id; + char *endptr; + + NEXT_ARGP(); + + id = strtoul(**argv, &endptr, 0); + if (*endptr) { + p_err("can't parse %s as ID", **argv); + return -1; + } + NEXT_ARGP(); + + fd = bpf_link_get_fd_by_id(id); + if (fd < 0) + p_err("failed to get link with ID %d: %s", id, strerror(errno)); + return fd; + } else if (is_prefix(**argv, "pinned")) { + char *path; + + NEXT_ARGP(); + + path = **argv; + NEXT_ARGP(); + + return open_obj_pinned_any(path, BPF_OBJ_LINK); + } + + p_err("expected 'id' or 'pinned', got: '%s'?", **argv); + return -1; +} + +static void +show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr) +{ + jsonw_uint_field(wtr, "id", info->id); + if (info->type < ARRAY_SIZE(link_type_name)) + jsonw_string_field(wtr, "type", link_type_name[info->type]); + else + jsonw_uint_field(wtr, "type", info->type); + + jsonw_uint_field(json_wtr, "prog_id", info->prog_id); +} + +static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr) +{ + if (attach_type < ARRAY_SIZE(attach_type_name)) + jsonw_string_field(wtr, "attach_type", + attach_type_name[attach_type]); + else + jsonw_uint_field(wtr, "attach_type", attach_type); +} + +static bool is_iter_map_target(const char *target_name) +{ + return strcmp(target_name, "bpf_map_elem") == 0 || + strcmp(target_name, "bpf_sk_storage_map") == 0; +} + +static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr) +{ + const char *target_name = u64_to_ptr(info->iter.target_name); + + jsonw_string_field(wtr, "target_name", target_name); + + if (is_iter_map_target(target_name)) + jsonw_uint_field(wtr, "map_id", info->iter.map.map_id); +} + +static int get_prog_info(int prog_id, struct bpf_prog_info *info) +{ + __u32 len = sizeof(*info); + int err, prog_fd; + + prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (prog_fd < 0) + return prog_fd; + + memset(info, 0, sizeof(*info)); + err = bpf_obj_get_info_by_fd(prog_fd, info, &len); + if (err) + p_err("can't get prog info: %s", strerror(errno)); + close(prog_fd); + return err; +} + +static int show_link_close_json(int fd, struct bpf_link_info *info) +{ + struct bpf_prog_info prog_info; + int err; + + jsonw_start_object(json_wtr); + + show_link_header_json(info, json_wtr); + + switch (info->type) { + case BPF_LINK_TYPE_RAW_TRACEPOINT: + jsonw_string_field(json_wtr, "tp_name", + u64_to_ptr(info->raw_tracepoint.tp_name)); + break; + case BPF_LINK_TYPE_TRACING: + err = get_prog_info(info->prog_id, &prog_info); + if (err) + return err; + + if (prog_info.type < prog_type_name_size) + jsonw_string_field(json_wtr, "prog_type", + prog_type_name[prog_info.type]); + else + jsonw_uint_field(json_wtr, "prog_type", + prog_info.type); + + show_link_attach_type_json(info->tracing.attach_type, + json_wtr); + break; + case BPF_LINK_TYPE_CGROUP: + jsonw_lluint_field(json_wtr, "cgroup_id", + info->cgroup.cgroup_id); + show_link_attach_type_json(info->cgroup.attach_type, json_wtr); + break; + case BPF_LINK_TYPE_ITER: + show_iter_json(info, json_wtr); + break; + case BPF_LINK_TYPE_NETNS: + jsonw_uint_field(json_wtr, "netns_ino", + info->netns.netns_ino); + show_link_attach_type_json(info->netns.attach_type, json_wtr); + break; + default: + break; + } + + if (!hash_empty(link_table.table)) { + struct pinned_obj *obj; + + jsonw_name(json_wtr, "pinned"); + jsonw_start_array(json_wtr); + hash_for_each_possible(link_table.table, obj, hash, info->id) { + if (obj->id == info->id) + jsonw_string(json_wtr, obj->path); + } + jsonw_end_array(json_wtr); + } + + emit_obj_refs_json(&refs_table, info->id, json_wtr); + + jsonw_end_object(json_wtr); + + return 0; +} + +static void show_link_header_plain(struct bpf_link_info *info) +{ + printf("%u: ", info->id); + if (info->type < ARRAY_SIZE(link_type_name)) + printf("%s ", link_type_name[info->type]); + else + printf("type %u ", info->type); + + printf("prog %u ", info->prog_id); +} + +static void show_link_attach_type_plain(__u32 attach_type) +{ + if (attach_type < ARRAY_SIZE(attach_type_name)) + printf("attach_type %s ", attach_type_name[attach_type]); + else + printf("attach_type %u ", attach_type); +} + +static void show_iter_plain(struct bpf_link_info *info) +{ + const char *target_name = u64_to_ptr(info->iter.target_name); + + printf("target_name %s ", target_name); + + if (is_iter_map_target(target_name)) + printf("map_id %u ", info->iter.map.map_id); +} + +static int show_link_close_plain(int fd, struct bpf_link_info *info) +{ + struct bpf_prog_info prog_info; + int err; + + show_link_header_plain(info); + + switch (info->type) { + case BPF_LINK_TYPE_RAW_TRACEPOINT: + printf("\n\ttp '%s' ", + (const char *)u64_to_ptr(info->raw_tracepoint.tp_name)); + break; + case BPF_LINK_TYPE_TRACING: + err = get_prog_info(info->prog_id, &prog_info); + if (err) + return err; + + if (prog_info.type < prog_type_name_size) + printf("\n\tprog_type %s ", + prog_type_name[prog_info.type]); + else + printf("\n\tprog_type %u ", prog_info.type); + + show_link_attach_type_plain(info->tracing.attach_type); + break; + case BPF_LINK_TYPE_CGROUP: + printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id); + show_link_attach_type_plain(info->cgroup.attach_type); + break; + case BPF_LINK_TYPE_ITER: + show_iter_plain(info); + break; + case BPF_LINK_TYPE_NETNS: + printf("\n\tnetns_ino %u ", info->netns.netns_ino); + show_link_attach_type_plain(info->netns.attach_type); + break; + default: + break; + } + + if (!hash_empty(link_table.table)) { + struct pinned_obj *obj; + + hash_for_each_possible(link_table.table, obj, hash, info->id) { + if (obj->id == info->id) + printf("\n\tpinned %s", obj->path); + } + } + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + + printf("\n"); + + return 0; +} + +static int do_show_link(int fd) +{ + struct bpf_link_info info; + __u32 len = sizeof(info); + char buf[256]; + int err; + + memset(&info, 0, sizeof(info)); +again: + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + p_err("can't get link info: %s", + strerror(errno)); + close(fd); + return err; + } + if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT && + !info.raw_tracepoint.tp_name) { + info.raw_tracepoint.tp_name = (unsigned long)&buf; + info.raw_tracepoint.tp_name_len = sizeof(buf); + goto again; + } + if (info.type == BPF_LINK_TYPE_ITER && + !info.iter.target_name) { + info.iter.target_name = (unsigned long)&buf; + info.iter.target_name_len = sizeof(buf); + goto again; + } + + if (json_output) + show_link_close_json(fd, &info); + else + show_link_close_plain(fd, &info); + + close(fd); + return 0; +} + +static int do_show(int argc, char **argv) +{ + __u32 id = 0; + int err, fd; + + if (show_pinned) + build_pinned_obj_table(&link_table, BPF_OBJ_LINK); + build_obj_refs_table(&refs_table, BPF_OBJ_LINK); + + if (argc == 2) { + fd = link_parse_fd(&argc, &argv); + if (fd < 0) + return fd; + return do_show_link(fd); + } + + if (argc) + return BAD_ARG(); + + if (json_output) + jsonw_start_array(json_wtr); + while (true) { + err = bpf_link_get_next_id(id, &id); + if (err) { + if (errno == ENOENT) + break; + p_err("can't get next link: %s%s", strerror(errno), + errno == EINVAL ? " -- kernel too old?" : ""); + break; + } + + fd = bpf_link_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; + p_err("can't get link by id (%u): %s", + id, strerror(errno)); + break; + } + + err = do_show_link(fd); + if (err) + break; + } + if (json_output) + jsonw_end_array(json_wtr); + + delete_obj_refs_table(&refs_table); + + return errno == ENOENT ? 0 : -1; +} + +static int do_pin(int argc, char **argv) +{ + int err; + + err = do_pin_any(argc, argv, link_parse_fd); + if (!err && json_output) + jsonw_null(json_wtr); + return err; +} + +static int do_detach(int argc, char **argv) +{ + int err, fd; + + if (argc != 2) { + p_err("link specifier is invalid or missing\n"); + return 1; + } + + fd = link_parse_fd(&argc, &argv); + if (fd < 0) + return 1; + + err = bpf_link_detach(fd); + if (err) + err = -errno; + close(fd); + if (err) { + p_err("failed link detach: %s", strerror(-err)); + return 1; + } + + if (json_output) + jsonw_null(json_wtr); + + return 0; +} + +static int do_help(int argc, char **argv) +{ + if (json_output) { + jsonw_null(json_wtr); + return 0; + } + + fprintf(stderr, + "Usage: %1$s %2$s { show | list } [LINK]\n" + " %1$s %2$s pin LINK FILE\n" + " %1$s %2$s detach LINK\n" + " %1$s %2$s help\n" + "\n" + " " HELP_SPEC_LINK "\n" + " " HELP_SPEC_OPTIONS "\n" + "", + bin_name, argv[-2]); + + return 0; +} + +static const struct cmd cmds[] = { + { "show", do_show }, + { "list", do_show }, + { "help", do_help }, + { "pin", do_pin }, + { "detach", do_detach }, + { 0 } +}; + +int do_link(int argc, char **argv) +{ + return cmd_select(cmds, argc, argv, do_help); +} diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 93d008687020..682daaa49e6a 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -9,8 +9,8 @@ #include <stdlib.h> #include <string.h> -#include <bpf.h> -#include <libbpf.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> #include "main.h" @@ -27,9 +27,11 @@ bool json_output; bool show_pinned; bool block_mount; bool verifier_logs; -int bpf_flags; +bool relaxed_maps; struct pinned_obj_table prog_table; struct pinned_obj_table map_table; +struct pinned_obj_table link_table; +struct obj_refs_table refs_table; static void __noreturn clean_and_exit(int i) { @@ -58,7 +60,7 @@ static int do_help(int argc, char **argv) " %s batch file FILE\n" " %s version\n" "\n" - " OBJECT := { prog | map | cgroup | perf | net | feature | btf }\n" + " OBJECT := { prog | map | link | cgroup | perf | net | feature | btf | gen | struct_ops | iter }\n" " " HELP_SPEC_OPTIONS "\n" "", bin_name, bin_name, bin_name); @@ -68,24 +70,46 @@ static int do_help(int argc, char **argv) static int do_version(int argc, char **argv) { +#ifdef HAVE_LIBBFD_SUPPORT + const bool has_libbfd = true; +#else + const bool has_libbfd = false; +#endif +#ifdef BPFTOOL_WITHOUT_SKELETONS + const bool has_skeletons = false; +#else + const bool has_skeletons = true; +#endif + if (json_output) { - jsonw_start_object(json_wtr); + jsonw_start_object(json_wtr); /* root object */ + jsonw_name(json_wtr, "version"); jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION); - jsonw_end_object(json_wtr); + + jsonw_name(json_wtr, "features"); + jsonw_start_object(json_wtr); /* features */ + jsonw_bool_field(json_wtr, "libbfd", has_libbfd); + jsonw_bool_field(json_wtr, "skeletons", has_skeletons); + jsonw_end_object(json_wtr); /* features */ + + jsonw_end_object(json_wtr); /* root object */ } else { + unsigned int nb_features = 0; + printf("%s v%s\n", bin_name, BPFTOOL_VERSION); + printf("features:"); + if (has_libbfd) { + printf(" libbfd"); + nb_features++; + } + if (has_skeletons) + printf("%s skeletons", nb_features++ ? "," : ""); + printf("\n"); } return 0; } -static int __printf(2, 0) -print_all_levels(__maybe_unused enum libbpf_print_level level, - const char *format, va_list args) -{ - return vfprintf(stderr, format, args); -} - int cmd_select(const struct cmd *cmds, int argc, char **argv, int (*help)(int argc, char **argv)) { @@ -98,9 +122,16 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv, if (argc < 1 && cmds[0].func) return cmds[0].func(argc, argv); - for (i = 0; cmds[i].func; i++) - if (is_prefix(*argv, cmds[i].cmd)) + for (i = 0; cmds[i].cmd; i++) { + if (is_prefix(*argv, cmds[i].cmd)) { + if (!cmds[i].func) { + p_err("command '%s' is not supported in bootstrap mode", + cmds[i].cmd); + return -1; + } return cmds[i].func(argc - 1, argv + 1); + } + } help(argc - 1, argv + 1); @@ -222,11 +253,15 @@ static const struct cmd cmds[] = { { "batch", do_batch }, { "prog", do_prog }, { "map", do_map }, + { "link", do_link }, { "cgroup", do_cgroup }, { "perf", do_perf }, { "net", do_net }, { "feature", do_feature }, { "btf", do_btf }, + { "gen", do_gen }, + { "struct_ops", do_struct_ops }, + { "iter", do_iter }, { "version", do_version }, { 0 } }; @@ -369,6 +404,7 @@ int main(int argc, char **argv) hash_init(prog_table.table); hash_init(map_table.table); + hash_init(link_table.table); opterr = 0; while ((opt = getopt_long(argc, argv, "Vhpjfmnd", @@ -396,7 +432,7 @@ int main(int argc, char **argv) show_pinned = true; break; case 'm': - bpf_flags = MAPS_RELAX_COMPAT; + relaxed_maps = true; break; case 'n': block_mount = true; @@ -427,6 +463,7 @@ int main(int argc, char **argv) if (show_pinned) { delete_pinned_obj_table(&prog_table); delete_pinned_obj_table(&map_table); + delete_pinned_obj_table(&link_table); } return ret; diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index af9ad56c303a..c46e52137b87 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -14,9 +14,22 @@ #include <linux/hashtable.h> #include <tools/libc_compat.h> +#include <bpf/libbpf.h> + #include "json_writer.h" -#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) +/* Make sure we do not use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64)(unsigned long)ptr; +} + +static inline void *u64_to_ptr(__u64 ptr) +{ + return (void *)(unsigned long)ptr; +} #define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) #define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) @@ -42,49 +55,30 @@ #define BPF_TAG_FMT "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx" #define HELP_SPEC_PROGRAM \ - "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }" + "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }" #define HELP_SPEC_OPTIONS \ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} |\n" \ "\t {-m|--mapcompat} | {-n|--nomount} }" #define HELP_SPEC_MAP \ - "MAP := { id MAP_ID | pinned FILE }" + "MAP := { id MAP_ID | pinned FILE | name MAP_NAME }" +#define HELP_SPEC_LINK \ + "LINK := { id LINK_ID | pinned FILE }" -static const char * const prog_type_name[] = { - [BPF_PROG_TYPE_UNSPEC] = "unspec", - [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", - [BPF_PROG_TYPE_KPROBE] = "kprobe", - [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", - [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", - [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", - [BPF_PROG_TYPE_XDP] = "xdp", - [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", - [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", - [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", - [BPF_PROG_TYPE_LWT_IN] = "lwt_in", - [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", - [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", - [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", - [BPF_PROG_TYPE_SK_SKB] = "sk_skb", - [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", - [BPF_PROG_TYPE_SK_MSG] = "sk_msg", - [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", - [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", - [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", - [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", - [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", - [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", - [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", - [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", - [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", -}; +extern const char * const prog_type_name[]; +extern const size_t prog_type_name_size; + +extern const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE]; extern const char * const map_type_name[]; extern const size_t map_type_name_size; +/* keep in sync with the definition in skeleton/pid_iter.bpf.c */ enum bpf_obj_type { BPF_OBJ_UNKNOWN, BPF_OBJ_PROG, BPF_OBJ_MAP, + BPF_OBJ_LINK, + BPF_OBJ_BTF, }; extern const char *bin_name; @@ -92,11 +86,14 @@ extern const char *bin_name; extern json_writer_t *json_wtr; extern bool json_output; extern bool show_pinned; +extern bool show_pids; extern bool block_mount; extern bool verifier_logs; -extern int bpf_flags; +extern bool relaxed_maps; extern struct pinned_obj_table prog_table; extern struct pinned_obj_table map_table; +extern struct pinned_obj_table link_table; +extern struct obj_refs_table refs_table; void __printf(1, 2) p_err(const char *fmt, ...); void __printf(1, 2) p_info(const char *fmt, ...); @@ -120,12 +117,35 @@ struct pinned_obj { struct hlist_node hash; }; +struct obj_refs_table { + DECLARE_HASHTABLE(table, 16); +}; + +struct obj_ref { + int pid; + char comm[16]; +}; + +struct obj_refs { + struct hlist_node node; + __u32 id; + int ref_cnt; + struct obj_ref *refs; +}; + struct btf; struct bpf_line_info; int build_pinned_obj_table(struct pinned_obj_table *table, enum bpf_obj_type type); void delete_pinned_obj_table(struct pinned_obj_table *tab); +__weak int build_obj_refs_table(struct obj_refs_table *table, + enum bpf_obj_type type); +__weak void delete_obj_refs_table(struct obj_refs_table *table); +__weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, + json_writer_t *json_wtr); +__weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, + const char *prefix); void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); @@ -140,25 +160,34 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv, int get_fd_type(int fd); const char *get_fd_type_name(enum bpf_obj_type type); char *get_fdinfo(int fd, const char *key); -int open_obj_pinned(char *path, bool quiet); -int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type); +int open_obj_pinned(const char *path, bool quiet); +int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type); int mount_bpffs_for_pin(const char *name); -int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)); +int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***)); int do_pin_fd(int fd, const char *name); -int do_prog(int argc, char **arg); -int do_map(int argc, char **arg); -int do_event_pipe(int argc, char **argv); -int do_cgroup(int argc, char **arg); -int do_perf(int argc, char **arg); -int do_net(int argc, char **arg); -int do_tracelog(int argc, char **arg); -int do_feature(int argc, char **argv); +/* commands available in bootstrap mode */ +int do_gen(int argc, char **argv); int do_btf(int argc, char **argv); +/* non-bootstrap only commands */ +int do_prog(int argc, char **arg) __weak; +int do_map(int argc, char **arg) __weak; +int do_link(int argc, char **arg) __weak; +int do_event_pipe(int argc, char **argv) __weak; +int do_cgroup(int argc, char **arg) __weak; +int do_perf(int argc, char **arg) __weak; +int do_net(int argc, char **arg) __weak; +int do_tracelog(int argc, char **arg) __weak; +int do_feature(int argc, char **argv) __weak; +int do_struct_ops(int argc, char **argv) __weak; +int do_iter(int argc, char **argv) __weak; + int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what); int prog_parse_fd(int *argc, char ***argv); +int prog_parse_fds(int *argc, char ***argv, int **fds); int map_parse_fd(int *argc, char ***argv); +int map_parse_fds(int *argc, char ***argv, int **fds); int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len); struct bpf_prog_linfo; @@ -199,6 +228,7 @@ struct btf_dumper { const struct btf *btf; json_writer_t *jw; bool is_plain_text; + bool prog_id_as_func_ptr; }; /* btf_dumper_type - print data along with type information @@ -225,4 +255,7 @@ struct tcmsg; int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb); int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind, const char *devname, int ifindex); + +int print_all_levels(__maybe_unused enum libbpf_print_level level, + const char *format, va_list args); #endif diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index de61d73b9030..a7efbd84fbcc 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -15,9 +15,9 @@ #include <sys/types.h> #include <sys/stat.h> -#include <bpf.h> +#include <bpf/bpf.h> +#include <bpf/btf.h> -#include "btf.h" #include "json_writer.h" #include "main.h" @@ -48,6 +48,9 @@ const char * const map_type_name[] = { [BPF_MAP_TYPE_QUEUE] = "queue", [BPF_MAP_TYPE_STACK] = "stack", [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage", + [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops", + [BPF_MAP_TYPE_RINGBUF] = "ringbuf", + [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage", }; const size_t map_type_name_size = ARRAY_SIZE(map_type_name); @@ -91,65 +94,11 @@ static void *alloc_value(struct bpf_map_info *info) return malloc(info->value_size); } -int map_parse_fd(int *argc, char ***argv) -{ - int fd; - - if (is_prefix(**argv, "id")) { - unsigned int id; - char *endptr; - - NEXT_ARGP(); - - id = strtoul(**argv, &endptr, 0); - if (*endptr) { - p_err("can't parse %s as ID", **argv); - return -1; - } - NEXT_ARGP(); - - fd = bpf_map_get_fd_by_id(id); - if (fd < 0) - p_err("get map by id (%u): %s", id, strerror(errno)); - return fd; - } else if (is_prefix(**argv, "pinned")) { - char *path; - - NEXT_ARGP(); - - path = **argv; - NEXT_ARGP(); - - return open_obj_pinned_any(path, BPF_OBJ_MAP); - } - - p_err("expected 'id' or 'pinned', got: '%s'?", **argv); - return -1; -} - -int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) -{ - int err; - int fd; - - fd = map_parse_fd(argc, argv); - if (fd < 0) - return -1; - - err = bpf_obj_get_info_by_fd(fd, info, info_len); - if (err) { - p_err("can't get map info: %s", strerror(errno)); - close(fd); - return err; - } - - return fd; -} - static int do_dump_btf(const struct btf_dumper *d, struct bpf_map_info *map_info, void *key, void *value) { + __u32 value_id; int ret; /* start of key-value pair */ @@ -163,9 +112,12 @@ static int do_dump_btf(const struct btf_dumper *d, goto err_end_obj; } + value_id = map_info->btf_vmlinux_value_type_id ? + : map_info->btf_value_type_id; + if (!map_is_per_cpu(map_info->type)) { jsonw_name(d->jw, "value"); - ret = btf_dumper_type(d, map_info->btf_value_type_id, value); + ret = btf_dumper_type(d, value_id, value); } else { unsigned int i, n, step; @@ -177,8 +129,7 @@ static int do_dump_btf(const struct btf_dumper *d, jsonw_start_object(d->jw); jsonw_int_field(d->jw, "cpu", i); jsonw_name(d->jw, "value"); - ret = btf_dumper_type(d, map_info->btf_value_type_id, - value + i * step); + ret = btf_dumper_type(d, value_id, value + i * step); jsonw_end_object(d->jw); if (ret) break; @@ -262,8 +213,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key, jsonw_end_object(json_wtr); } -static void print_entry_error(struct bpf_map_info *info, unsigned char *key, - const char *error_msg) +static void +print_entry_error_msg(struct bpf_map_info *info, unsigned char *key, + const char *error_msg) { int msg_size = strlen(error_msg); bool single_line, break_names; @@ -281,6 +233,40 @@ static void print_entry_error(struct bpf_map_info *info, unsigned char *key, printf("\n"); } +static void +print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno) +{ + /* For prog_array maps or arrays of maps, failure to lookup the value + * means there is no entry for that key. Do not print an error message + * in that case. + */ + if ((map_is_map_of_maps(map_info->type) || + map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT) + return; + + if (json_output) { + jsonw_start_object(json_wtr); /* entry */ + jsonw_name(json_wtr, "key"); + print_hex_data_json(key, map_info->key_size); + jsonw_name(json_wtr, "value"); + jsonw_start_object(json_wtr); /* error */ + jsonw_string_field(json_wtr, "error", strerror(lookup_errno)); + jsonw_end_object(json_wtr); /* error */ + jsonw_end_object(json_wtr); /* entry */ + } else { + const char *msg = NULL; + + if (lookup_errno == ENOENT) + msg = "<no entry>"; + else if (lookup_errno == ENOSPC && + map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) + msg = "<cannot read>"; + + print_entry_error_msg(map_info, key, + msg ? : strerror(lookup_errno)); + } +} + static void print_entry_plain(struct bpf_map_info *info, unsigned char *key, unsigned char *value) { @@ -479,6 +465,21 @@ static int parse_elem(char **argv, struct bpf_map_info *info, return -1; } +static void show_map_header_json(struct bpf_map_info *info, json_writer_t *wtr) +{ + jsonw_uint_field(wtr, "id", info->id); + if (info->type < ARRAY_SIZE(map_type_name)) + jsonw_string_field(wtr, "type", map_type_name[info->type]); + else + jsonw_uint_field(wtr, "type", info->type); + + if (*info->name) + jsonw_string_field(wtr, "name", info->name); + + jsonw_name(wtr, "flags"); + jsonw_printf(wtr, "%d", info->map_flags); +} + static int show_map_close_json(int fd, struct bpf_map_info *info) { char *memlock, *frozen_str; @@ -489,18 +490,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) jsonw_start_object(json_wtr); - jsonw_uint_field(json_wtr, "id", info->id); - if (info->type < ARRAY_SIZE(map_type_name)) - jsonw_string_field(json_wtr, "type", - map_type_name[info->type]); - else - jsonw_uint_field(json_wtr, "type", info->type); - - if (*info->name) - jsonw_string_field(json_wtr, "name", info->name); - - jsonw_name(json_wtr, "flags"); - jsonw_printf(json_wtr, "%d", info->map_flags); + show_map_header_json(info, json_wtr); print_dev_json(info->ifindex, info->netns_dev, info->netns_ino); @@ -519,7 +509,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) if (owner_prog_type) { unsigned int prog_type = atoi(owner_prog_type); - if (prog_type < ARRAY_SIZE(prog_type_name)) + if (prog_type < prog_type_name_size) jsonw_string_field(json_wtr, "owner_prog_type", prog_type_name[prog_type]); else @@ -556,19 +546,15 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) jsonw_end_array(json_wtr); } + emit_obj_refs_json(&refs_table, info->id, json_wtr); + jsonw_end_object(json_wtr); return 0; } -static int show_map_close_plain(int fd, struct bpf_map_info *info) +static void show_map_header_plain(struct bpf_map_info *info) { - char *memlock, *frozen_str; - int frozen = 0; - - memlock = get_fdinfo(fd, "memlock"); - frozen_str = get_fdinfo(fd, "frozen"); - printf("%u: ", info->id); if (info->type < ARRAY_SIZE(map_type_name)) printf("%s ", map_type_name[info->type]); @@ -581,6 +567,17 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) printf("flags 0x%x", info->map_flags); print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino); printf("\n"); +} + +static int show_map_close_plain(int fd, struct bpf_map_info *info) +{ + char *memlock, *frozen_str; + int frozen = 0; + + memlock = get_fdinfo(fd, "memlock"); + frozen_str = get_fdinfo(fd, "frozen"); + + show_map_header_plain(info); printf("\tkey %uB value %uB max_entries %u", info->key_size, info->value_size, info->max_entries); @@ -597,7 +594,7 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) if (owner_prog_type) { unsigned int prog_type = atoi(owner_prog_type); - if (prog_type < ARRAY_SIZE(prog_type_name)) + if (prog_type < prog_type_name_size) printf("owner_prog_type %s ", prog_type_name[prog_type]); else @@ -638,10 +635,56 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) if (frozen) printf("%sfrozen", info->btf_id ? " " : ""); + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + printf("\n"); return 0; } +static int do_show_subset(int argc, char **argv) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + int *fds = NULL; + int nb_fds, i; + int err = -1; + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = map_parse_fds(&argc, &argv, &fds); + if (nb_fds < 1) + goto exit_free; + + if (json_output && nb_fds > 1) + jsonw_start_array(json_wtr); /* root array */ + for (i = 0; i < nb_fds; i++) { + err = bpf_obj_get_info_by_fd(fds[i], &info, &len); + if (err) { + p_err("can't get map info: %s", + strerror(errno)); + for (; i < nb_fds; i++) + close(fds[i]); + break; + } + + if (json_output) + show_map_close_json(fds[i], &info); + else + show_map_close_plain(fds[i], &info); + + close(fds[i]); + } + if (json_output && nb_fds > 1) + jsonw_end_array(json_wtr); /* root array */ + +exit_free: + free(fds); + return err; +} + static int do_show(int argc, char **argv) { struct bpf_map_info info = {}; @@ -652,17 +695,10 @@ static int do_show(int argc, char **argv) if (show_pinned) build_pinned_obj_table(&map_table, BPF_OBJ_MAP); + build_obj_refs_table(&refs_table, BPF_OBJ_MAP); - if (argc == 2) { - fd = map_parse_fd_and_info(&argc, &argv, &info, &len); - if (fd < 0) - return -1; - - if (json_output) - return show_map_close_json(fd, &info); - else - return show_map_close_plain(fd, &info); - } + if (argc == 2) + return do_show_subset(argc, argv); if (argc) return BAD_ARG(); @@ -703,6 +739,8 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); + delete_obj_refs_table(&refs_table); + return errno == ENOENT ? 0 : -1; } @@ -710,81 +748,97 @@ static int dump_map_elem(int fd, void *key, void *value, struct bpf_map_info *map_info, struct btf *btf, json_writer_t *btf_wtr) { - int num_elems = 0; - int lookup_errno; - - if (!bpf_map_lookup_elem(fd, key, value)) { - if (json_output) { - print_entry_json(map_info, key, value, btf); - } else { - if (btf) { - struct btf_dumper d = { - .btf = btf, - .jw = btf_wtr, - .is_plain_text = true, - }; - - do_dump_btf(&d, map_info, key, value); - } else { - print_entry_plain(map_info, key, value); - } - num_elems++; - } - return num_elems; + if (bpf_map_lookup_elem(fd, key, value)) { + print_entry_error(map_info, key, errno); + return -1; } - /* lookup error handling */ - lookup_errno = errno; - - if (map_is_map_of_maps(map_info->type) || - map_is_map_of_progs(map_info->type)) - return 0; - if (json_output) { - jsonw_start_object(json_wtr); - jsonw_name(json_wtr, "key"); - print_hex_data_json(key, map_info->key_size); - jsonw_name(json_wtr, "value"); - jsonw_start_object(json_wtr); - jsonw_string_field(json_wtr, "error", strerror(lookup_errno)); - jsonw_end_object(json_wtr); - jsonw_end_object(json_wtr); + print_entry_json(map_info, key, value, btf); + } else if (btf) { + struct btf_dumper d = { + .btf = btf, + .jw = btf_wtr, + .is_plain_text = true, + }; + + do_dump_btf(&d, map_info, key, value); } else { - const char *msg = NULL; - - if (lookup_errno == ENOENT) - msg = "<no entry>"; - else if (lookup_errno == ENOSPC && - map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) - msg = "<cannot read>"; - - print_entry_error(map_info, key, - msg ? : strerror(lookup_errno)); + print_entry_plain(map_info, key, value); } return 0; } -static int do_dump(int argc, char **argv) +static int maps_have_btf(int *fds, int nb_fds) { struct bpf_map_info info = {}; + __u32 len = sizeof(info); + int err, i; + + for (i = 0; i < nb_fds; i++) { + err = bpf_obj_get_info_by_fd(fds[i], &info, &len); + if (err) { + p_err("can't get map info: %s", strerror(errno)); + return -1; + } + + if (!info.btf_id) + return 0; + } + + return 1; +} + +static struct btf *btf_vmlinux; + +static struct btf *get_map_kv_btf(const struct bpf_map_info *info) +{ + struct btf *btf = NULL; + + if (info->btf_vmlinux_value_type_id) { + if (!btf_vmlinux) { + btf_vmlinux = libbpf_find_kernel_btf(); + if (IS_ERR(btf_vmlinux)) + p_err("failed to get kernel btf"); + } + return btf_vmlinux; + } else if (info->btf_value_type_id) { + int err; + + err = btf__get_from_id(info->btf_id, &btf); + if (err || !btf) { + p_err("failed to get btf"); + btf = err ? ERR_PTR(err) : ERR_PTR(-ESRCH); + } + } + + return btf; +} + +static void free_map_kv_btf(struct btf *btf) +{ + if (!IS_ERR(btf) && btf != btf_vmlinux) + btf__free(btf); +} + +static void free_btf_vmlinux(void) +{ + if (!IS_ERR(btf_vmlinux)) + btf__free(btf_vmlinux); +} + +static int +map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr, + bool show_header) +{ void *key, *value, *prev_key; unsigned int num_elems = 0; - __u32 len = sizeof(info); - json_writer_t *btf_wtr; struct btf *btf = NULL; int err; - int fd; - if (argc != 2) - usage(); - - fd = map_parse_fd_and_info(&argc, &argv, &info, &len); - if (fd < 0) - return -1; - - key = malloc(info.key_size); - value = alloc_value(&info); + key = malloc(info->key_size); + value = alloc_value(info); if (!key || !value) { p_err("mem alloc failed"); err = -1; @@ -793,30 +847,27 @@ static int do_dump(int argc, char **argv) prev_key = NULL; - err = btf__get_from_id(info.btf_id, &btf); - if (err) { - p_err("failed to get btf"); - goto exit_free; - } - - if (json_output) - jsonw_start_array(json_wtr); - else - if (btf) { - btf_wtr = get_btf_writer(); - if (!btf_wtr) { - p_info("failed to create json writer for btf. falling back to plain output"); - btf__free(btf); - btf = NULL; - } else { - jsonw_start_array(btf_wtr); - } + if (wtr) { + btf = get_map_kv_btf(info); + if (IS_ERR(btf)) { + err = PTR_ERR(btf); + goto exit_free; } - if (info.type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && - info.value_size != 8) + if (show_header) { + jsonw_start_object(wtr); /* map object */ + show_map_header_json(info, wtr); + jsonw_name(wtr, "elements"); + } + jsonw_start_array(wtr); /* elements */ + } else if (show_header) { + show_map_header_plain(info); + } + + if (info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && + info->value_size != 8) p_info("Warning: cannot read values from %s map with value_size != 8", - map_type_name[info.type]); + map_type_name[info->type]); while (true) { err = bpf_map_get_next_key(fd, prev_key, key); if (err) { @@ -824,15 +875,15 @@ static int do_dump(int argc, char **argv) err = 0; break; } - num_elems += dump_map_elem(fd, key, value, &info, btf, btf_wtr); + if (!dump_map_elem(fd, key, value, info, btf, wtr)) + num_elems++; prev_key = key; } - if (json_output) - jsonw_end_array(json_wtr); - else if (btf) { - jsonw_end_array(btf_wtr); - jsonw_destroy(&btf_wtr); + if (wtr) { + jsonw_end_array(wtr); /* elements */ + if (show_header) + jsonw_end_object(wtr); /* map object */ } else { printf("Found %u element%s\n", num_elems, num_elems != 1 ? "s" : ""); @@ -842,11 +893,78 @@ exit_free: free(key); free(value); close(fd); - btf__free(btf); + free_map_kv_btf(btf); return err; } +static int do_dump(int argc, char **argv) +{ + json_writer_t *wtr = NULL, *btf_wtr = NULL; + struct bpf_map_info info = {}; + int nb_fds, i = 0; + __u32 len = sizeof(info); + int *fds = NULL; + int err = -1; + + if (argc != 2) + usage(); + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = map_parse_fds(&argc, &argv, &fds); + if (nb_fds < 1) + goto exit_free; + + if (json_output) { + wtr = json_wtr; + } else { + int do_plain_btf; + + do_plain_btf = maps_have_btf(fds, nb_fds); + if (do_plain_btf < 0) + goto exit_close; + + if (do_plain_btf) { + btf_wtr = get_btf_writer(); + wtr = btf_wtr; + if (!btf_wtr) + p_info("failed to create json writer for btf. falling back to plain output"); + } + } + + if (wtr && nb_fds > 1) + jsonw_start_array(wtr); /* root array */ + for (i = 0; i < nb_fds; i++) { + if (bpf_obj_get_info_by_fd(fds[i], &info, &len)) { + p_err("can't get map info: %s", strerror(errno)); + break; + } + err = map_dump(fds[i], &info, wtr, nb_fds > 1); + if (!wtr && i != nb_fds - 1) + printf("\n"); + + if (err) + break; + close(fds[i]); + } + if (wtr && nb_fds > 1) + jsonw_end_array(wtr); /* root array */ + + if (btf_wtr) + jsonw_destroy(&btf_wtr); +exit_close: + for (; i < nb_fds; i++) + close(fds[i]); +exit_free: + free(fds); + free_btf_vmlinux(); + return err; +} + static int alloc_key_value(struct bpf_map_info *info, void **key, void **value) { *key = NULL; @@ -1122,7 +1240,7 @@ static int do_pin(int argc, char **argv) { int err; - err = do_pin_any(argc, argv, bpf_map_get_fd_by_id); + err = do_pin_any(argc, argv, map_parse_fd); if (!err && json_output) jsonw_null(json_wtr); return err; @@ -1132,7 +1250,7 @@ static int do_create(int argc, char **argv) { struct bpf_create_map_attr attr = { NULL, }; const char *pinfile; - int err, fd; + int err = -1, fd; if (!REQ_ARGS(7)) return -1; @@ -1147,13 +1265,13 @@ static int do_create(int argc, char **argv) if (attr.map_type) { p_err("map type already specified"); - return -1; + goto exit; } attr.map_type = map_type_from_str(*argv); if ((int)attr.map_type < 0) { p_err("unrecognized map type: %s", *argv); - return -1; + goto exit; } NEXT_ARG(); } else if (is_prefix(*argv, "name")) { @@ -1162,43 +1280,56 @@ static int do_create(int argc, char **argv) } else if (is_prefix(*argv, "key")) { if (parse_u32_arg(&argc, &argv, &attr.key_size, "key size")) - return -1; + goto exit; } else if (is_prefix(*argv, "value")) { if (parse_u32_arg(&argc, &argv, &attr.value_size, "value size")) - return -1; + goto exit; } else if (is_prefix(*argv, "entries")) { if (parse_u32_arg(&argc, &argv, &attr.max_entries, "max entries")) - return -1; + goto exit; } else if (is_prefix(*argv, "flags")) { if (parse_u32_arg(&argc, &argv, &attr.map_flags, "flags")) - return -1; + goto exit; } else if (is_prefix(*argv, "dev")) { NEXT_ARG(); if (attr.map_ifindex) { p_err("offload device already specified"); - return -1; + goto exit; } attr.map_ifindex = if_nametoindex(*argv); if (!attr.map_ifindex) { p_err("unrecognized netdevice '%s': %s", *argv, strerror(errno)); - return -1; + goto exit; } NEXT_ARG(); + } else if (is_prefix(*argv, "inner_map")) { + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + int inner_map_fd; + + NEXT_ARG(); + if (!REQ_ARGS(2)) + usage(); + inner_map_fd = map_parse_fd_and_info(&argc, &argv, + &info, &len); + if (inner_map_fd < 0) + return -1; + attr.inner_map_fd = inner_map_fd; } else { p_err("unknown arg %s", *argv); - return -1; + goto exit; } } if (!attr.name) { p_err("map name not specified"); - return -1; + goto exit; } set_max_rlimit(); @@ -1206,17 +1337,22 @@ static int do_create(int argc, char **argv) fd = bpf_create_map_xattr(&attr); if (fd < 0) { p_err("map create failed: %s", strerror(errno)); - return -1; + goto exit; } err = do_pin_fd(fd, pinfile); close(fd); if (err) - return err; + goto exit; if (json_output) jsonw_null(json_wtr); - return 0; + +exit: + if (attr.inner_map_fd > 0) + close(attr.inner_map_fd); + + return err; } static int do_pop_dequeue(int argc, char **argv) @@ -1299,24 +1435,24 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s { show | list } [MAP]\n" - " %s %s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n" - " entries MAX_ENTRIES name NAME [flags FLAGS] \\\n" - " [dev NAME]\n" - " %s %s dump MAP\n" - " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n" - " %s %s lookup MAP [key DATA]\n" - " %s %s getnext MAP [key DATA]\n" - " %s %s delete MAP key DATA\n" - " %s %s pin MAP FILE\n" - " %s %s event_pipe MAP [cpu N index M]\n" - " %s %s peek MAP\n" - " %s %s push MAP value VALUE\n" - " %s %s pop MAP\n" - " %s %s enqueue MAP value VALUE\n" - " %s %s dequeue MAP\n" - " %s %s freeze MAP\n" - " %s %s help\n" + "Usage: %1$s %2$s { show | list } [MAP]\n" + " %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n" + " entries MAX_ENTRIES name NAME [flags FLAGS] \\\n" + " [inner_map MAP] [dev NAME]\n" + " %1$s %2$s dump MAP\n" + " %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n" + " %1$s %2$s lookup MAP [key DATA]\n" + " %1$s %2$s getnext MAP [key DATA]\n" + " %1$s %2$s delete MAP key DATA\n" + " %1$s %2$s pin MAP FILE\n" + " %1$s %2$s event_pipe MAP [cpu N index M]\n" + " %1$s %2$s peek MAP\n" + " %1$s %2$s push MAP value VALUE\n" + " %1$s %2$s pop MAP\n" + " %1$s %2$s enqueue MAP value VALUE\n" + " %1$s %2$s dequeue MAP\n" + " %1$s %2$s freeze MAP\n" + " %1$s %2$s help\n" "\n" " " HELP_SPEC_MAP "\n" " DATA := { [hex] BYTES }\n" @@ -1327,14 +1463,10 @@ static int do_help(int argc, char **argv) " percpu_array | stack_trace | cgroup_array | lru_hash |\n" " lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n" " devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n" - " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage }\n" + " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n" + " queue | stack | sk_storage | struct_ops | ringbuf | inode_storage }\n" " " HELP_SPEC_OPTIONS "\n" "", - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]); return 0; diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index 4c5531d1a450..825f29f93a57 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c @@ -6,7 +6,7 @@ */ #include <errno.h> #include <fcntl.h> -#include <libbpf.h> +#include <bpf/libbpf.h> #include <poll.h> #include <signal.h> #include <stdbool.h> @@ -21,7 +21,7 @@ #include <sys/mman.h> #include <sys/syscall.h> -#include <bpf.h> +#include <bpf/bpf.h> #include <perf-sys.h> #include "main.h" @@ -39,7 +39,7 @@ struct event_ring_info { struct perf_event_sample { struct perf_event_header header; - u64 time; + __u64 time; __u32 size; unsigned char data[]; }; diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c index 4f52d3151616..3fae61ef6339 100644 --- a/tools/bpf/bpftool/net.c +++ b/tools/bpf/bpftool/net.c @@ -6,21 +6,27 @@ #include <fcntl.h> #include <stdlib.h> #include <string.h> +#include <time.h> #include <unistd.h> -#include <libbpf.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> #include <net/if.h> #include <linux/if.h> #include <linux/rtnetlink.h> +#include <linux/socket.h> #include <linux/tc_act/tc_bpf.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/types.h> -#include <bpf.h> -#include <nlattr.h> +#include "bpf/nlattr.h" #include "main.h" #include "netlink_dumper.h" +#ifndef SOL_NETLINK +#define SOL_NETLINK 270 +#endif + struct ip_devname_ifindex { char devname[64]; int ifindex; @@ -84,6 +90,266 @@ static enum net_attach_type parse_attach_type(const char *str) return net_attach_type_size; } +typedef int (*dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); + +typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, dump_nlmsg_t, void *cookie); + +static int netlink_open(__u32 *nl_pid) +{ + struct sockaddr_nl sa; + socklen_t addrlen; + int one = 1, ret; + int sock; + + memset(&sa, 0, sizeof(sa)); + sa.nl_family = AF_NETLINK; + + sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (sock < 0) + return -errno; + + if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, + &one, sizeof(one)) < 0) { + p_err("Netlink error reporting not supported"); + } + + if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { + ret = -errno; + goto cleanup; + } + + addrlen = sizeof(sa); + if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { + ret = -errno; + goto cleanup; + } + + if (addrlen != sizeof(sa)) { + ret = -LIBBPF_ERRNO__INTERNAL; + goto cleanup; + } + + *nl_pid = sa.nl_pid; + return sock; + +cleanup: + close(sock); + return ret; +} + +static int netlink_recv(int sock, __u32 nl_pid, __u32 seq, + __dump_nlmsg_t _fn, dump_nlmsg_t fn, + void *cookie) +{ + bool multipart = true; + struct nlmsgerr *err; + struct nlmsghdr *nh; + char buf[4096]; + int len, ret; + + while (multipart) { + multipart = false; + len = recv(sock, buf, sizeof(buf), 0); + if (len < 0) { + ret = -errno; + goto done; + } + + if (len == 0) + break; + + for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); + nh = NLMSG_NEXT(nh, len)) { + if (nh->nlmsg_pid != nl_pid) { + ret = -LIBBPF_ERRNO__WRNGPID; + goto done; + } + if (nh->nlmsg_seq != seq) { + ret = -LIBBPF_ERRNO__INVSEQ; + goto done; + } + if (nh->nlmsg_flags & NLM_F_MULTI) + multipart = true; + switch (nh->nlmsg_type) { + case NLMSG_ERROR: + err = (struct nlmsgerr *)NLMSG_DATA(nh); + if (!err->error) + continue; + ret = err->error; + libbpf_nla_dump_errormsg(nh); + goto done; + case NLMSG_DONE: + return 0; + default: + break; + } + if (_fn) { + ret = _fn(nh, fn, cookie); + if (ret) + return ret; + } + } + } + ret = 0; +done: + return ret; +} + +static int __dump_class_nlmsg(struct nlmsghdr *nlh, + dump_nlmsg_t dump_class_nlmsg, + void *cookie) +{ + struct nlattr *tb[TCA_MAX + 1], *attr; + struct tcmsg *t = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); + attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); + if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_class_nlmsg(cookie, t, tb); +} + +static int netlink_get_class(int sock, unsigned int nl_pid, int ifindex, + dump_nlmsg_t dump_class_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct tcmsg t; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), + .nlh.nlmsg_type = RTM_GETTCLASS, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .t.tcm_family = AF_UNSPEC, + .t.tcm_ifindex = ifindex, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg, + dump_class_nlmsg, cookie); +} + +static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh, + dump_nlmsg_t dump_qdisc_nlmsg, + void *cookie) +{ + struct nlattr *tb[TCA_MAX + 1], *attr; + struct tcmsg *t = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); + attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); + if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_qdisc_nlmsg(cookie, t, tb); +} + +static int netlink_get_qdisc(int sock, unsigned int nl_pid, int ifindex, + dump_nlmsg_t dump_qdisc_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct tcmsg t; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), + .nlh.nlmsg_type = RTM_GETQDISC, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .t.tcm_family = AF_UNSPEC, + .t.tcm_ifindex = ifindex, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg, + dump_qdisc_nlmsg, cookie); +} + +static int __dump_filter_nlmsg(struct nlmsghdr *nlh, + dump_nlmsg_t dump_filter_nlmsg, + void *cookie) +{ + struct nlattr *tb[TCA_MAX + 1], *attr; + struct tcmsg *t = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); + attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); + if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_filter_nlmsg(cookie, t, tb); +} + +static int netlink_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, + dump_nlmsg_t dump_filter_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct tcmsg t; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), + .nlh.nlmsg_type = RTM_GETTFILTER, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .t.tcm_family = AF_UNSPEC, + .t.tcm_ifindex = ifindex, + .t.tcm_parent = handle, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg, + dump_filter_nlmsg, cookie); +} + +static int __dump_link_nlmsg(struct nlmsghdr *nlh, + dump_nlmsg_t dump_link_nlmsg, void *cookie) +{ + struct nlattr *tb[IFLA_MAX + 1], *attr; + struct ifinfomsg *ifi = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)); + attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi))); + if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_link_nlmsg(cookie, ifi, tb); +} + +static int netlink_get_link(int sock, unsigned int nl_pid, + dump_nlmsg_t dump_link_nlmsg, void *cookie) +{ + struct { + struct nlmsghdr nlh; + struct ifinfomsg ifm; + } req = { + .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + .nlh.nlmsg_type = RTM_GETLINK, + .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .ifm.ifi_family = AF_PACKET, + }; + int seq = time(NULL); + + req.nlh.nlmsg_seq = seq; + if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) + return -errno; + + return netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg, + dump_link_nlmsg, cookie); +} + static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb) { struct bpf_netdev_t *netinfo = cookie; @@ -167,14 +433,14 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid, tcinfo.array_len = 0; tcinfo.is_qdisc = false; - ret = libbpf_nl_get_class(sock, nl_pid, dev->ifindex, - dump_class_qdisc_nlmsg, &tcinfo); + ret = netlink_get_class(sock, nl_pid, dev->ifindex, + dump_class_qdisc_nlmsg, &tcinfo); if (ret) goto out; tcinfo.is_qdisc = true; - ret = libbpf_nl_get_qdisc(sock, nl_pid, dev->ifindex, - dump_class_qdisc_nlmsg, &tcinfo); + ret = netlink_get_qdisc(sock, nl_pid, dev->ifindex, + dump_class_qdisc_nlmsg, &tcinfo); if (ret) goto out; @@ -182,9 +448,9 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid, filter_info.ifindex = dev->ifindex; for (i = 0; i < tcinfo.used_len; i++) { filter_info.kind = tcinfo.handle_array[i].kind; - ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, - tcinfo.handle_array[i].handle, - dump_filter_nlmsg, &filter_info); + ret = netlink_get_filter(sock, nl_pid, dev->ifindex, + tcinfo.handle_array[i].handle, + dump_filter_nlmsg, &filter_info); if (ret) goto out; } @@ -192,22 +458,22 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid, /* root, ingress and egress handle */ handle = TC_H_ROOT; filter_info.kind = "root"; - ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle, - dump_filter_nlmsg, &filter_info); + ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle, + dump_filter_nlmsg, &filter_info); if (ret) goto out; handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS); filter_info.kind = "clsact/ingress"; - ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle, - dump_filter_nlmsg, &filter_info); + ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle, + dump_filter_nlmsg, &filter_info); if (ret) goto out; handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS); filter_info.kind = "clsact/egress"; - ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle, - dump_filter_nlmsg, &filter_info); + ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle, + dump_filter_nlmsg, &filter_info); if (ret) goto out; @@ -312,8 +578,8 @@ static int do_attach(int argc, char **argv) ifindex = net_parse_dev(&argc, &argv); if (ifindex < 1) { - close(progfd); - return -EINVAL; + err = -EINVAL; + goto cleanup; } if (argc) { @@ -321,8 +587,8 @@ static int do_attach(int argc, char **argv) overwrite = true; } else { p_err("expected 'overwrite', got: '%s'?", *argv); - close(progfd); - return -EINVAL; + err = -EINVAL; + goto cleanup; } } @@ -330,17 +596,17 @@ static int do_attach(int argc, char **argv) if (is_prefix("xdp", attach_type_strings[attach_type])) err = do_attach_detach_xdp(progfd, attach_type, ifindex, overwrite); - - if (err < 0) { + if (err) { p_err("interface %s attach failed: %s", attach_type_strings[attach_type], strerror(-err)); - return err; + goto cleanup; } if (json_output) jsonw_null(json_wtr); - - return 0; +cleanup: + close(progfd); + return err; } static int do_detach(int argc, char **argv) @@ -385,7 +651,7 @@ static int do_show(int argc, char **argv) struct bpf_attach_info attach_info = {}; int i, sock, ret, filter_idx = -1; struct bpf_netdev_t dev_array; - unsigned int nl_pid; + unsigned int nl_pid = 0; char err_buf[256]; if (argc == 2) { @@ -400,7 +666,7 @@ static int do_show(int argc, char **argv) if (ret) return -1; - sock = libbpf_netlink_open(&nl_pid); + sock = netlink_open(&nl_pid); if (sock < 0) { fprintf(stderr, "failed to open netlink sock\n"); return -1; @@ -415,7 +681,7 @@ static int do_show(int argc, char **argv) jsonw_start_array(json_wtr); NET_START_OBJECT; NET_START_ARRAY("xdp", "%s:\n"); - ret = libbpf_nl_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array); + ret = netlink_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array); NET_END_ARRAY("\n"); if (!ret) { @@ -457,10 +723,10 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s { show | list } [dev <devname>]\n" - " %s %s attach ATTACH_TYPE PROG dev <devname> [ overwrite ]\n" - " %s %s detach ATTACH_TYPE dev <devname>\n" - " %s %s help\n" + "Usage: %1$s %2$s { show | list } [dev <devname>]\n" + " %1$s %2$s attach ATTACH_TYPE PROG dev <devname> [ overwrite ]\n" + " %1$s %2$s detach ATTACH_TYPE dev <devname>\n" + " %1$s %2$s help\n" "\n" " " HELP_SPEC_PROGRAM "\n" " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n" @@ -469,8 +735,8 @@ static int do_help(int argc, char **argv) " For progs attached to cgroups, use \"bpftool cgroup\"\n" " to dump program attachments. For program types\n" " sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n" - " consult iproute2.\n", - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], + " consult iproute2.\n" + "", bin_name, argv[-2]); return 0; diff --git a/tools/bpf/bpftool/netlink_dumper.c b/tools/bpf/bpftool/netlink_dumper.c index 550a0f537eed..5f65140b003b 100644 --- a/tools/bpf/bpftool/netlink_dumper.c +++ b/tools/bpf/bpftool/netlink_dumper.c @@ -3,11 +3,11 @@ #include <stdlib.h> #include <string.h> -#include <libbpf.h> +#include <bpf/libbpf.h> #include <linux/rtnetlink.h> #include <linux/tc_act/tc_bpf.h> -#include <nlattr.h> +#include "bpf/nlattr.h" #include "main.h" #include "netlink_dumper.h" diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c index b2046f33e23f..ad23934819c7 100644 --- a/tools/bpf/bpftool/perf.c +++ b/tools/bpf/bpftool/perf.c @@ -13,7 +13,7 @@ #include <unistd.h> #include <ftw.h> -#include <bpf.h> +#include <bpf/bpf.h> #include "main.h" @@ -231,7 +231,7 @@ static int do_show(int argc, char **argv) static int do_help(int argc, char **argv) { fprintf(stderr, - "Usage: %s %s { show | list | help }\n" + "Usage: %1$s %2$s { show | list | help }\n" "", bin_name, argv[-2]); diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c new file mode 100644 index 000000000000..477e55d59c34 --- /dev/null +++ b/tools/bpf/bpftool/pids.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2020 Facebook */ +#include <errno.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <bpf/bpf.h> + +#include "main.h" +#include "skeleton/pid_iter.h" + +#ifdef BPFTOOL_WITHOUT_SKELETONS + +int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) +{ + return -ENOTSUP; +} +void delete_obj_refs_table(struct obj_refs_table *table) {} +void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {} +void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {} + +#else /* BPFTOOL_WITHOUT_SKELETONS */ + +#include "pid_iter.skel.h" + +static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e) +{ + struct obj_refs *refs; + struct obj_ref *ref; + void *tmp; + int i; + + hash_for_each_possible(table->table, refs, node, e->id) { + if (refs->id != e->id) + continue; + + for (i = 0; i < refs->ref_cnt; i++) { + if (refs->refs[i].pid == e->pid) + return; + } + + tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref)); + if (!tmp) { + p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...", + e->id, e->pid, e->comm); + return; + } + refs->refs = tmp; + ref = &refs->refs[refs->ref_cnt]; + ref->pid = e->pid; + memcpy(ref->comm, e->comm, sizeof(ref->comm)); + refs->ref_cnt++; + + return; + } + + /* new ref */ + refs = calloc(1, sizeof(*refs)); + if (!refs) { + p_err("failed to alloc memory for ID %u, PID %d, COMM %s...", + e->id, e->pid, e->comm); + return; + } + + refs->id = e->id; + refs->refs = malloc(sizeof(*refs->refs)); + if (!refs->refs) { + free(refs); + p_err("failed to alloc memory for ID %u, PID %d, COMM %s...", + e->id, e->pid, e->comm); + return; + } + ref = &refs->refs[0]; + ref->pid = e->pid; + memcpy(ref->comm, e->comm, sizeof(ref->comm)); + refs->ref_cnt = 1; + hash_add(table->table, &refs->node, e->id); +} + +static int __printf(2, 0) +libbpf_print_none(__maybe_unused enum libbpf_print_level level, + __maybe_unused const char *format, + __maybe_unused va_list args) +{ + return 0; +} + +int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) +{ + struct pid_iter_entry *e; + char buf[4096 / sizeof(*e) * sizeof(*e)]; + struct pid_iter_bpf *skel; + int err, ret, fd = -1, i; + libbpf_print_fn_t default_print; + + hash_init(table->table); + set_max_rlimit(); + + skel = pid_iter_bpf__open(); + if (!skel) { + p_err("failed to open PID iterator skeleton"); + return -1; + } + + skel->rodata->obj_type = type; + + /* we don't want output polluted with libbpf errors if bpf_iter is not + * supported + */ + default_print = libbpf_set_print(libbpf_print_none); + err = pid_iter_bpf__load(skel); + libbpf_set_print(default_print); + if (err) { + /* too bad, kernel doesn't support BPF iterators yet */ + err = 0; + goto out; + } + err = pid_iter_bpf__attach(skel); + if (err) { + /* if we loaded above successfully, attach has to succeed */ + p_err("failed to attach PID iterator: %d", err); + goto out; + } + + fd = bpf_iter_create(bpf_link__fd(skel->links.iter)); + if (fd < 0) { + err = -errno; + p_err("failed to create PID iterator session: %d", err); + goto out; + } + + while (true) { + ret = read(fd, buf, sizeof(buf)); + if (ret < 0) { + if (errno == EAGAIN) + continue; + err = -errno; + p_err("failed to read PID iterator output: %d", err); + goto out; + } + if (ret == 0) + break; + if (ret % sizeof(*e)) { + err = -EINVAL; + p_err("invalid PID iterator output format"); + goto out; + } + ret /= sizeof(*e); + + e = (void *)buf; + for (i = 0; i < ret; i++, e++) { + add_ref(table, e); + } + } + err = 0; +out: + if (fd >= 0) + close(fd); + pid_iter_bpf__destroy(skel); + return err; +} + +void delete_obj_refs_table(struct obj_refs_table *table) +{ + struct obj_refs *refs; + struct hlist_node *tmp; + unsigned int bkt; + + hash_for_each_safe(table->table, bkt, tmp, refs, node) { + hash_del(&refs->node); + free(refs->refs); + free(refs); + } +} + +void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, + json_writer_t *json_writer) +{ + struct obj_refs *refs; + struct obj_ref *ref; + int i; + + if (hash_empty(table->table)) + return; + + hash_for_each_possible(table->table, refs, node, id) { + if (refs->id != id) + continue; + if (refs->ref_cnt == 0) + break; + + jsonw_name(json_writer, "pids"); + jsonw_start_array(json_writer); + for (i = 0; i < refs->ref_cnt; i++) { + ref = &refs->refs[i]; + jsonw_start_object(json_writer); + jsonw_int_field(json_writer, "pid", ref->pid); + jsonw_string_field(json_writer, "comm", ref->comm); + jsonw_end_object(json_writer); + } + jsonw_end_array(json_writer); + break; + } +} + +void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) +{ + struct obj_refs *refs; + struct obj_ref *ref; + int i; + + if (hash_empty(table->table)) + return; + + hash_for_each_possible(table->table, refs, node, id) { + if (refs->id != id) + continue; + if (refs->ref_cnt == 0) + break; + + printf("%s", prefix); + for (i = 0; i < refs->ref_cnt; i++) { + ref = &refs->refs[i]; + printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid); + } + break; + } +} + + +#endif diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2e388421c32f..acdb2c245f0a 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -4,6 +4,7 @@ #define _GNU_SOURCE #include <errno.h> #include <fcntl.h> +#include <signal.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> @@ -11,20 +12,67 @@ #include <time.h> #include <unistd.h> #include <net/if.h> +#include <sys/ioctl.h> #include <sys/types.h> #include <sys/stat.h> +#include <sys/syscall.h> #include <linux/err.h> +#include <linux/perf_event.h> #include <linux/sizes.h> -#include <bpf.h> -#include <btf.h> -#include <libbpf.h> +#include <bpf/bpf.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> #include "cfg.h" #include "main.h" #include "xlated_dumper.h" +#define BPF_METADATA_PREFIX "bpf_metadata_" +#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1) + +const char * const prog_type_name[] = { + [BPF_PROG_TYPE_UNSPEC] = "unspec", + [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", + [BPF_PROG_TYPE_KPROBE] = "kprobe", + [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", + [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", + [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", + [BPF_PROG_TYPE_XDP] = "xdp", + [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", + [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", + [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", + [BPF_PROG_TYPE_LWT_IN] = "lwt_in", + [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", + [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", + [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", + [BPF_PROG_TYPE_SK_SKB] = "sk_skb", + [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", + [BPF_PROG_TYPE_SK_MSG] = "sk_msg", + [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", + [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", + [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", + [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", + [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", + [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", + [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", + [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", + [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", + [BPF_PROG_TYPE_TRACING] = "tracing", + [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", + [BPF_PROG_TYPE_EXT] = "ext", + [BPF_PROG_TYPE_LSM] = "lsm", + [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", +}; + +const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name); + +enum dump_mode { + DUMP_JITED, + DUMP_XLATED, +}; + static const char * const attach_type_strings[] = { [BPF_SK_SKB_STREAM_PARSER] = "stream_parser", [BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict", @@ -77,95 +125,7 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) strftime(buf, size, "%FT%T%z", &load_tm); } -static int prog_fd_by_tag(unsigned char *tag) -{ - unsigned int id = 0; - int err; - int fd; - - while (true) { - struct bpf_prog_info info = {}; - __u32 len = sizeof(info); - - err = bpf_prog_get_next_id(id, &id); - if (err) { - p_err("%s", strerror(errno)); - return -1; - } - - fd = bpf_prog_get_fd_by_id(id); - if (fd < 0) { - p_err("can't get prog by id (%u): %s", - id, strerror(errno)); - return -1; - } - - err = bpf_obj_get_info_by_fd(fd, &info, &len); - if (err) { - p_err("can't get prog info (%u): %s", - id, strerror(errno)); - close(fd); - return -1; - } - - if (!memcmp(tag, info.tag, BPF_TAG_SIZE)) - return fd; - - close(fd); - } -} - -int prog_parse_fd(int *argc, char ***argv) -{ - int fd; - - if (is_prefix(**argv, "id")) { - unsigned int id; - char *endptr; - - NEXT_ARGP(); - - id = strtoul(**argv, &endptr, 0); - if (*endptr) { - p_err("can't parse %s as ID", **argv); - return -1; - } - NEXT_ARGP(); - - fd = bpf_prog_get_fd_by_id(id); - if (fd < 0) - p_err("get by id (%u): %s", id, strerror(errno)); - return fd; - } else if (is_prefix(**argv, "tag")) { - unsigned char tag[BPF_TAG_SIZE]; - - NEXT_ARGP(); - - if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2, - tag + 3, tag + 4, tag + 5, tag + 6, tag + 7) - != BPF_TAG_SIZE) { - p_err("can't parse tag"); - return -1; - } - NEXT_ARGP(); - - return prog_fd_by_tag(tag); - } else if (is_prefix(**argv, "pinned")) { - char *path; - - NEXT_ARGP(); - - path = **argv; - NEXT_ARGP(); - - return open_obj_pinned_any(path, BPF_OBJ_PROG); - } - - p_err("expected 'id', 'tag' or 'pinned', got: '%s'?", **argv); - return -1; -} - -static void show_prog_maps(int fd, u32 num_maps) +static void show_prog_maps(int fd, __u32 num_maps) { struct bpf_prog_info info = {}; __u32 len = sizeof(info); @@ -194,11 +154,200 @@ static void show_prog_maps(int fd, u32 num_maps) } } -static void print_prog_json(struct bpf_prog_info *info, int fd) +static void *find_metadata(int prog_fd, struct bpf_map_info *map_info) { - char *memlock; + struct bpf_prog_info prog_info; + __u32 prog_info_len; + __u32 map_info_len; + void *value = NULL; + __u32 *map_ids; + int nr_maps; + int key = 0; + int map_fd; + int ret; + __u32 i; - jsonw_start_object(json_wtr); + memset(&prog_info, 0, sizeof(prog_info)); + prog_info_len = sizeof(prog_info); + ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + if (ret) + return NULL; + + if (!prog_info.nr_map_ids) + return NULL; + + map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32)); + if (!map_ids) + return NULL; + + nr_maps = prog_info.nr_map_ids; + memset(&prog_info, 0, sizeof(prog_info)); + prog_info.nr_map_ids = nr_maps; + prog_info.map_ids = ptr_to_u64(map_ids); + prog_info_len = sizeof(prog_info); + + ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + if (ret) + goto free_map_ids; + + for (i = 0; i < prog_info.nr_map_ids; i++) { + map_fd = bpf_map_get_fd_by_id(map_ids[i]); + if (map_fd < 0) + goto free_map_ids; + + memset(map_info, 0, sizeof(*map_info)); + map_info_len = sizeof(*map_info); + ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len); + if (ret < 0) { + close(map_fd); + goto free_map_ids; + } + + if (map_info->type != BPF_MAP_TYPE_ARRAY || + map_info->key_size != sizeof(int) || + map_info->max_entries != 1 || + !map_info->btf_value_type_id || + !strstr(map_info->name, ".rodata")) { + close(map_fd); + continue; + } + + value = malloc(map_info->value_size); + if (!value) { + close(map_fd); + goto free_map_ids; + } + + if (bpf_map_lookup_elem(map_fd, &key, value)) { + close(map_fd); + free(value); + value = NULL; + goto free_map_ids; + } + + close(map_fd); + break; + } + +free_map_ids: + free(map_ids); + return value; +} + +static bool has_metadata_prefix(const char *s) +{ + return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0; +} + +static void show_prog_metadata(int fd, __u32 num_maps) +{ + const struct btf_type *t_datasec, *t_var; + struct bpf_map_info map_info; + struct btf_var_secinfo *vsi; + bool printed_header = false; + struct btf *btf = NULL; + unsigned int i, vlen; + void *value = NULL; + const char *name; + int err; + + if (!num_maps) + return; + + memset(&map_info, 0, sizeof(map_info)); + value = find_metadata(fd, &map_info); + if (!value) + return; + + err = btf__get_from_id(map_info.btf_id, &btf); + if (err || !btf) + goto out_free; + + t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id); + if (!btf_is_datasec(t_datasec)) + goto out_free; + + vlen = btf_vlen(t_datasec); + vsi = btf_var_secinfos(t_datasec); + + /* We don't proceed to check the kinds of the elements of the DATASEC. + * The verifier enforces them to be BTF_KIND_VAR. + */ + + if (json_output) { + struct btf_dumper d = { + .btf = btf, + .jw = json_wtr, + .is_plain_text = false, + }; + + for (i = 0; i < vlen; i++, vsi++) { + t_var = btf__type_by_id(btf, vsi->type); + name = btf__name_by_offset(btf, t_var->name_off); + + if (!has_metadata_prefix(name)) + continue; + + if (!printed_header) { + jsonw_name(json_wtr, "metadata"); + jsonw_start_object(json_wtr); + printed_header = true; + } + + jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN); + err = btf_dumper_type(&d, t_var->type, value + vsi->offset); + if (err) { + p_err("btf dump failed: %d", err); + break; + } + } + if (printed_header) + jsonw_end_object(json_wtr); + } else { + json_writer_t *btf_wtr = jsonw_new(stdout); + struct btf_dumper d = { + .btf = btf, + .jw = btf_wtr, + .is_plain_text = true, + }; + + if (!btf_wtr) { + p_err("jsonw alloc failed"); + goto out_free; + } + + for (i = 0; i < vlen; i++, vsi++) { + t_var = btf__type_by_id(btf, vsi->type); + name = btf__name_by_offset(btf, t_var->name_off); + + if (!has_metadata_prefix(name)) + continue; + + if (!printed_header) { + printf("\tmetadata:"); + printed_header = true; + } + + printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN); + + jsonw_reset(btf_wtr); + err = btf_dumper_type(&d, t_var->type, value + vsi->offset); + if (err) { + p_err("btf dump failed: %d", err); + break; + } + } + if (printed_header) + jsonw_destroy(&btf_wtr); + } + +out_free: + btf__free(btf); + free(value); +} + +static void print_prog_header_json(struct bpf_prog_info *info) +{ jsonw_uint_field(json_wtr, "id", info->id); if (info->type < ARRAY_SIZE(prog_type_name)) jsonw_string_field(json_wtr, "type", @@ -219,7 +368,14 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns); jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt); } +} +static void print_prog_json(struct bpf_prog_info *info, int fd) +{ + char *memlock; + + jsonw_start_object(json_wtr); + print_prog_header_json(info); print_dev_json(info->ifindex, info->netns_dev, info->netns_ino); if (info->load_time) { @@ -265,13 +421,15 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) jsonw_end_array(json_wtr); } + emit_obj_refs_json(&refs_table, info->id, json_wtr); + + show_prog_metadata(fd, info->nr_map_ids); + jsonw_end_object(json_wtr); } -static void print_prog_plain(struct bpf_prog_info *info, int fd) +static void print_prog_header_plain(struct bpf_prog_info *info) { - char *memlock; - printf("%u: ", info->id); if (info->type < ARRAY_SIZE(prog_type_name)) printf("%s ", prog_type_name[info->type]); @@ -289,6 +447,13 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) printf(" run_time_ns %lld run_cnt %lld", info->run_time_ns, info->run_cnt); printf("\n"); +} + +static void print_prog_plain(struct bpf_prog_info *info, int fd) +{ + char *memlock; + + print_prog_header_plain(info); if (info->load_time) { char buf[32]; @@ -326,7 +491,11 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) if (info->btf_id) printf("\n\tbtf_id %d", info->btf_id); + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + printf("\n"); + + show_prog_metadata(fd, info->nr_map_ids); } static int show_prog(int fd) @@ -349,6 +518,40 @@ static int show_prog(int fd) return 0; } +static int do_show_subset(int argc, char **argv) +{ + int *fds = NULL; + int nb_fds, i; + int err = -1; + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = prog_parse_fds(&argc, &argv, &fds); + if (nb_fds < 1) + goto exit_free; + + if (json_output && nb_fds > 1) + jsonw_start_array(json_wtr); /* root array */ + for (i = 0; i < nb_fds; i++) { + err = show_prog(fds[i]); + if (err) { + for (; i < nb_fds; i++) + close(fds[i]); + break; + } + close(fds[i]); + } + if (json_output && nb_fds > 1) + jsonw_end_array(json_wtr); /* root array */ + +exit_free: + free(fds); + return err; +} + static int do_show(int argc, char **argv) { __u32 id = 0; @@ -357,16 +560,10 @@ static int do_show(int argc, char **argv) if (show_pinned) build_pinned_obj_table(&prog_table, BPF_OBJ_PROG); + build_obj_refs_table(&refs_table, BPF_OBJ_PROG); - if (argc == 2) { - fd = prog_parse_fd(&argc, &argv); - if (fd < 0) - return -1; - - err = show_prog(fd); - close(fd); - return err; - } + if (argc == 2) + return do_show_subset(argc, argv); if (argc) return BAD_ARG(); @@ -405,115 +602,48 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); + delete_obj_refs_table(&refs_table); + return err; } -static int do_dump(int argc, char **argv) +static int +prog_dump(struct bpf_prog_info *info, enum dump_mode mode, + char *filepath, bool opcodes, bool visual, bool linum) { - struct bpf_prog_info_linear *info_linear; struct bpf_prog_linfo *prog_linfo = NULL; - enum {DUMP_JITED, DUMP_XLATED} mode; const char *disasm_opt = NULL; - struct bpf_prog_info *info; struct dump_data dd = {}; void *func_info = NULL; struct btf *btf = NULL; - char *filepath = NULL; - bool opcodes = false; - bool visual = false; char func_sig[1024]; unsigned char *buf; - bool linum = false; __u32 member_len; - __u64 arrays; ssize_t n; int fd; - if (is_prefix(*argv, "jited")) { - if (disasm_init()) - return -1; - mode = DUMP_JITED; - } else if (is_prefix(*argv, "xlated")) { - mode = DUMP_XLATED; - } else { - p_err("expected 'xlated' or 'jited', got: %s", *argv); - return -1; - } - NEXT_ARG(); - - if (argc < 2) - usage(); - - fd = prog_parse_fd(&argc, &argv); - if (fd < 0) - return -1; - - if (is_prefix(*argv, "file")) { - NEXT_ARG(); - if (!argc) { - p_err("expected file path"); - return -1; - } - - filepath = *argv; - NEXT_ARG(); - } else if (is_prefix(*argv, "opcodes")) { - opcodes = true; - NEXT_ARG(); - } else if (is_prefix(*argv, "visual")) { - visual = true; - NEXT_ARG(); - } else if (is_prefix(*argv, "linum")) { - linum = true; - NEXT_ARG(); - } - - if (argc) { - usage(); - return -1; - } - - if (mode == DUMP_JITED) - arrays = 1UL << BPF_PROG_INFO_JITED_INSNS; - else - arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS; - - arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS; - arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; - arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; - arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; - arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; - - info_linear = bpf_program__get_prog_info_linear(fd, arrays); - close(fd); - if (IS_ERR_OR_NULL(info_linear)) { - p_err("can't get prog info: %s", strerror(errno)); - return -1; - } - - info = &info_linear->info; if (mode == DUMP_JITED) { if (info->jited_prog_len == 0 || !info->jited_prog_insns) { p_info("no instructions returned"); - goto err_free; + return -1; } - buf = (unsigned char *)(info->jited_prog_insns); + buf = u64_to_ptr(info->jited_prog_insns); member_len = info->jited_prog_len; } else { /* DUMP_XLATED */ if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { p_err("error retrieving insn dump: kernel.kptr_restrict set?"); - goto err_free; + return -1; } - buf = (unsigned char *)info->xlated_prog_insns; + buf = u64_to_ptr(info->xlated_prog_insns); member_len = info->xlated_prog_len; } if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) { p_err("failed to get btf"); - goto err_free; + return -1; } - func_info = (void *)info->func_info; + func_info = u64_to_ptr(info->func_info); if (info->nr_line_info) { prog_linfo = bpf_prog_linfo__new(info); @@ -526,15 +656,15 @@ static int do_dump(int argc, char **argv) if (fd < 0) { p_err("can't open file %s: %s", filepath, strerror(errno)); - goto err_free; + return -1; } n = write(fd, buf, member_len); close(fd); - if (n != member_len) { + if (n != (ssize_t)member_len) { p_err("error writing output file: %s", n < 0 ? strerror(errno) : "short write"); - goto err_free; + return -1; } if (json_output) @@ -548,7 +678,7 @@ static int do_dump(int argc, char **argv) info->netns_ino, &disasm_opt); if (!name) - goto err_free; + return -1; } if (info->nr_jited_func_lens && info->jited_func_lens) { @@ -561,13 +691,13 @@ static int do_dump(int argc, char **argv) __u32 i; if (info->nr_jited_ksyms) { kernel_syms_load(&dd); - ksyms = (__u64 *) info->jited_ksyms; + ksyms = u64_to_ptr(info->jited_ksyms); } if (json_output) jsonw_start_array(json_wtr); - lens = (__u32 *) info->jited_func_lens; + lens = u64_to_ptr(info->jited_func_lens); for (i = 0; i < info->nr_jited_func_lens; i++) { if (ksyms) { sym = kernel_syms_search(&dd, ksyms[i]); @@ -628,7 +758,7 @@ static int do_dump(int argc, char **argv) } else { kernel_syms_load(&dd); dd.nr_jited_ksyms = info->nr_jited_ksyms; - dd.jited_ksyms = (__u64 *) info->jited_ksyms; + dd.jited_ksyms = u64_to_ptr(info->jited_ksyms); dd.btf = btf; dd.func_info = func_info; dd.finfo_rec_size = info->func_info_rec_size; @@ -643,19 +773,137 @@ static int do_dump(int argc, char **argv) kernel_syms_destroy(&dd); } - free(info_linear); return 0; +} -err_free: - free(info_linear); - return -1; +static int do_dump(int argc, char **argv) +{ + struct bpf_prog_info_linear *info_linear; + char *filepath = NULL; + bool opcodes = false; + bool visual = false; + enum dump_mode mode; + bool linum = false; + int *fds = NULL; + int nb_fds, i = 0; + int err = -1; + __u64 arrays; + + if (is_prefix(*argv, "jited")) { + if (disasm_init()) + return -1; + mode = DUMP_JITED; + } else if (is_prefix(*argv, "xlated")) { + mode = DUMP_XLATED; + } else { + p_err("expected 'xlated' or 'jited', got: %s", *argv); + return -1; + } + NEXT_ARG(); + + if (argc < 2) + usage(); + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = prog_parse_fds(&argc, &argv, &fds); + if (nb_fds < 1) + goto exit_free; + + if (is_prefix(*argv, "file")) { + NEXT_ARG(); + if (!argc) { + p_err("expected file path"); + goto exit_close; + } + if (nb_fds > 1) { + p_err("several programs matched"); + goto exit_close; + } + + filepath = *argv; + NEXT_ARG(); + } else if (is_prefix(*argv, "opcodes")) { + opcodes = true; + NEXT_ARG(); + } else if (is_prefix(*argv, "visual")) { + if (nb_fds > 1) { + p_err("several programs matched"); + goto exit_close; + } + + visual = true; + NEXT_ARG(); + } else if (is_prefix(*argv, "linum")) { + linum = true; + NEXT_ARG(); + } + + if (argc) { + usage(); + goto exit_close; + } + + if (mode == DUMP_JITED) + arrays = 1UL << BPF_PROG_INFO_JITED_INSNS; + else + arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS; + + arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS; + arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; + arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; + arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; + arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; + + if (json_output && nb_fds > 1) + jsonw_start_array(json_wtr); /* root array */ + for (i = 0; i < nb_fds; i++) { + info_linear = bpf_program__get_prog_info_linear(fds[i], arrays); + if (IS_ERR_OR_NULL(info_linear)) { + p_err("can't get prog info: %s", strerror(errno)); + break; + } + + if (json_output && nb_fds > 1) { + jsonw_start_object(json_wtr); /* prog object */ + print_prog_header_json(&info_linear->info); + jsonw_name(json_wtr, "insns"); + } else if (nb_fds > 1) { + print_prog_header_plain(&info_linear->info); + } + + err = prog_dump(&info_linear->info, mode, filepath, opcodes, + visual, linum); + + if (json_output && nb_fds > 1) + jsonw_end_object(json_wtr); /* prog object */ + else if (i != nb_fds - 1 && nb_fds > 1) + printf("\n"); + + free(info_linear); + if (err) + break; + close(fds[i]); + } + if (json_output && nb_fds > 1) + jsonw_end_array(json_wtr); /* root array */ + +exit_close: + for (; i < nb_fds; i++) + close(fds[i]); +exit_free: + free(fds); + return err; } static int do_pin(int argc, char **argv) { int err; - err = do_pin_any(argc, argv, bpf_prog_get_fd_by_id); + err = do_pin_any(argc, argv, prog_parse_fd); if (!err && json_output) jsonw_null(json_wtr); return err; @@ -692,7 +940,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd, } if (*attach_type == BPF_FLOW_DISSECTOR) { - *mapfd = -1; + *mapfd = 0; return 0; } @@ -1089,12 +1337,32 @@ free_data_in: return err; } +static int +get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type) +{ + libbpf_print_fn_t print_backup; + int ret; + + ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type); + if (!ret) + return ret; + + /* libbpf_prog_type_by_name() failed, let's re-run with debug level */ + print_backup = libbpf_set_print(print_all_levels); + ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type); + libbpf_set_print(print_backup); + + return ret; +} + static int load_with_options(int argc, char **argv, bool first_prog_only) { + enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, + .relaxed_maps = relaxed_maps, + ); struct bpf_object_load_attr load_attr = { 0 }; - struct bpf_object_open_attr open_attr = { - .prog_type = BPF_PROG_TYPE_UNSPEC, - }; enum bpf_attach_type expected_attach_type; struct map_replace *map_replace = NULL; struct bpf_program *prog = NULL, *pos; @@ -1105,11 +1373,13 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) const char *pinfile; unsigned int i, j; __u32 ifindex = 0; + const char *file; int idx, err; + if (!REQ_ARGS(2)) return -1; - open_attr.file = GET_ARG(); + file = GET_ARG(); pinfile = GET_ARG(); while (argc) { @@ -1118,7 +1388,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) NEXT_ARG(); - if (open_attr.prog_type != BPF_PROG_TYPE_UNSPEC) { + if (common_prog_type != BPF_PROG_TYPE_UNSPEC) { p_err("program type already specified"); goto err_free_reuse_maps; } @@ -1135,9 +1405,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) strcat(type, *argv); strcat(type, "/"); - err = libbpf_prog_type_by_name(type, - &open_attr.prog_type, - &expected_attach_type); + err = get_prog_type_by_name(type, &common_prog_type, + &expected_attach_type); free(type); if (err < 0) goto err_free_reuse_maps; @@ -1224,20 +1493,20 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) set_max_rlimit(); - obj = __bpf_object__open_xattr(&open_attr, bpf_flags); + obj = bpf_object__open_file(file, &open_opts); if (IS_ERR_OR_NULL(obj)) { p_err("failed to open object file"); goto err_free_reuse_maps; } bpf_object__for_each_program(pos, obj) { - enum bpf_prog_type prog_type = open_attr.prog_type; + enum bpf_prog_type prog_type = common_prog_type; - if (open_attr.prog_type == BPF_PROG_TYPE_UNSPEC) { - const char *sec_name = bpf_program__title(pos, false); + if (prog_type == BPF_PROG_TYPE_UNSPEC) { + const char *sec_name = bpf_program__section_name(pos); - err = libbpf_prog_type_by_name(sec_name, &prog_type, - &expected_attach_type); + err = get_prog_type_by_name(sec_name, &prog_type, + &expected_attach_type); if (err < 0) goto err_close_obj; } @@ -1328,7 +1597,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) err = bpf_obj_pin(bpf_program__fd(prog), pinfile); if (err) { p_err("failed to pin program %s", - bpf_program__title(prog, false)); + bpf_program__section_name(prog)); goto err_close_obj; } } else { @@ -1381,6 +1650,422 @@ static int do_loadall(int argc, char **argv) return load_with_options(argc, argv, false); } +#ifdef BPFTOOL_WITHOUT_SKELETONS + +static int do_profile(int argc, char **argv) +{ + p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0"); + return 0; +} + +#else /* BPFTOOL_WITHOUT_SKELETONS */ + +#include "profiler.skel.h" + +struct profile_metric { + const char *name; + struct bpf_perf_event_value val; + struct perf_event_attr attr; + bool selected; + + /* calculate ratios like instructions per cycle */ + const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */ + const char *ratio_desc; + const float ratio_mul; +} metrics[] = { + { + .name = "cycles", + .attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .exclude_user = 1, + }, + }, + { + .name = "instructions", + .attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_INSTRUCTIONS, + .exclude_user = 1, + }, + .ratio_metric = 1, + .ratio_desc = "insns per cycle", + .ratio_mul = 1.0, + }, + { + .name = "l1d_loads", + .attr = { + .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_L1D | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16), + .exclude_user = 1, + }, + }, + { + .name = "llc_misses", + .attr = { + .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_LL | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_MISS << 16), + .exclude_user = 1 + }, + .ratio_metric = 2, + .ratio_desc = "LLC misses per million insns", + .ratio_mul = 1e6, + }, +}; + +static __u64 profile_total_count; + +#define MAX_NUM_PROFILE_METRICS 4 + +static int profile_parse_metrics(int argc, char **argv) +{ + unsigned int metric_cnt; + int selected_cnt = 0; + unsigned int i; + + metric_cnt = sizeof(metrics) / sizeof(struct profile_metric); + + while (argc > 0) { + for (i = 0; i < metric_cnt; i++) { + if (is_prefix(argv[0], metrics[i].name)) { + if (!metrics[i].selected) + selected_cnt++; + metrics[i].selected = true; + break; + } + } + if (i == metric_cnt) { + p_err("unknown metric %s", argv[0]); + return -1; + } + NEXT_ARG(); + } + if (selected_cnt > MAX_NUM_PROFILE_METRICS) { + p_err("too many (%d) metrics, please specify no more than %d metrics at at time", + selected_cnt, MAX_NUM_PROFILE_METRICS); + return -1; + } + return selected_cnt; +} + +static void profile_read_values(struct profiler_bpf *obj) +{ + __u32 m, cpu, num_cpu = obj->rodata->num_cpu; + int reading_map_fd, count_map_fd; + __u64 counts[num_cpu]; + __u32 key = 0; + int err; + + reading_map_fd = bpf_map__fd(obj->maps.accum_readings); + count_map_fd = bpf_map__fd(obj->maps.counts); + if (reading_map_fd < 0 || count_map_fd < 0) { + p_err("failed to get fd for map"); + return; + } + + err = bpf_map_lookup_elem(count_map_fd, &key, counts); + if (err) { + p_err("failed to read count_map: %s", strerror(errno)); + return; + } + + profile_total_count = 0; + for (cpu = 0; cpu < num_cpu; cpu++) + profile_total_count += counts[cpu]; + + for (m = 0; m < ARRAY_SIZE(metrics); m++) { + struct bpf_perf_event_value values[num_cpu]; + + if (!metrics[m].selected) + continue; + + err = bpf_map_lookup_elem(reading_map_fd, &key, values); + if (err) { + p_err("failed to read reading_map: %s", + strerror(errno)); + return; + } + for (cpu = 0; cpu < num_cpu; cpu++) { + metrics[m].val.counter += values[cpu].counter; + metrics[m].val.enabled += values[cpu].enabled; + metrics[m].val.running += values[cpu].running; + } + key++; + } +} + +static void profile_print_readings_json(void) +{ + __u32 m; + + jsonw_start_array(json_wtr); + for (m = 0; m < ARRAY_SIZE(metrics); m++) { + if (!metrics[m].selected) + continue; + jsonw_start_object(json_wtr); + jsonw_string_field(json_wtr, "metric", metrics[m].name); + jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count); + jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter); + jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled); + jsonw_lluint_field(json_wtr, "running", metrics[m].val.running); + + jsonw_end_object(json_wtr); + } + jsonw_end_array(json_wtr); +} + +static void profile_print_readings_plain(void) +{ + __u32 m; + + printf("\n%18llu %-20s\n", profile_total_count, "run_cnt"); + for (m = 0; m < ARRAY_SIZE(metrics); m++) { + struct bpf_perf_event_value *val = &metrics[m].val; + int r; + + if (!metrics[m].selected) + continue; + printf("%18llu %-20s", val->counter, metrics[m].name); + + r = metrics[m].ratio_metric - 1; + if (r >= 0 && metrics[r].selected && + metrics[r].val.counter > 0) { + printf("# %8.2f %-30s", + val->counter * metrics[m].ratio_mul / + metrics[r].val.counter, + metrics[m].ratio_desc); + } else { + printf("%-41s", ""); + } + + if (val->enabled > val->running) + printf("(%4.2f%%)", + val->running * 100.0 / val->enabled); + printf("\n"); + } +} + +static void profile_print_readings(void) +{ + if (json_output) + profile_print_readings_json(); + else + profile_print_readings_plain(); +} + +static char *profile_target_name(int tgt_fd) +{ + struct bpf_prog_info_linear *info_linear; + struct bpf_func_info *func_info; + const struct btf_type *t; + char *name = NULL; + struct btf *btf; + + info_linear = bpf_program__get_prog_info_linear( + tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO); + if (IS_ERR_OR_NULL(info_linear)) { + p_err("failed to get info_linear for prog FD %d", tgt_fd); + return NULL; + } + + if (info_linear->info.btf_id == 0 || + btf__get_from_id(info_linear->info.btf_id, &btf)) { + p_err("prog FD %d doesn't have valid btf", tgt_fd); + goto out; + } + + func_info = u64_to_ptr(info_linear->info.func_info); + t = btf__type_by_id(btf, func_info[0].type_id); + if (!t) { + p_err("btf %d doesn't have type %d", + info_linear->info.btf_id, func_info[0].type_id); + goto out; + } + name = strdup(btf__name_by_offset(btf, t->name_off)); +out: + free(info_linear); + return name; +} + +static struct profiler_bpf *profile_obj; +static int profile_tgt_fd = -1; +static char *profile_tgt_name; +static int *profile_perf_events; +static int profile_perf_event_cnt; + +static void profile_close_perf_events(struct profiler_bpf *obj) +{ + int i; + + for (i = profile_perf_event_cnt - 1; i >= 0; i--) + close(profile_perf_events[i]); + + free(profile_perf_events); + profile_perf_event_cnt = 0; +} + +static int profile_open_perf_events(struct profiler_bpf *obj) +{ + unsigned int cpu, m; + int map_fd, pmu_fd; + + profile_perf_events = calloc( + sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric); + if (!profile_perf_events) { + p_err("failed to allocate memory for perf_event array: %s", + strerror(errno)); + return -1; + } + map_fd = bpf_map__fd(obj->maps.events); + if (map_fd < 0) { + p_err("failed to get fd for events map"); + return -1; + } + + for (m = 0; m < ARRAY_SIZE(metrics); m++) { + if (!metrics[m].selected) + continue; + for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) { + pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr, + -1/*pid*/, cpu, -1/*group_fd*/, 0); + if (pmu_fd < 0 || + bpf_map_update_elem(map_fd, &profile_perf_event_cnt, + &pmu_fd, BPF_ANY) || + ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) { + p_err("failed to create event %s on cpu %d", + metrics[m].name, cpu); + return -1; + } + profile_perf_events[profile_perf_event_cnt++] = pmu_fd; + } + } + return 0; +} + +static void profile_print_and_cleanup(void) +{ + profile_close_perf_events(profile_obj); + profile_read_values(profile_obj); + profile_print_readings(); + profiler_bpf__destroy(profile_obj); + + close(profile_tgt_fd); + free(profile_tgt_name); +} + +static void int_exit(int signo) +{ + profile_print_and_cleanup(); + exit(0); +} + +static int do_profile(int argc, char **argv) +{ + int num_metric, num_cpu, err = -1; + struct bpf_program *prog; + unsigned long duration; + char *endptr; + + /* we at least need two args for the prog and one metric */ + if (!REQ_ARGS(3)) + return -EINVAL; + + /* parse target fd */ + profile_tgt_fd = prog_parse_fd(&argc, &argv); + if (profile_tgt_fd < 0) { + p_err("failed to parse fd"); + return -1; + } + + /* parse profiling optional duration */ + if (argc > 2 && is_prefix(argv[0], "duration")) { + NEXT_ARG(); + duration = strtoul(*argv, &endptr, 0); + if (*endptr) + usage(); + NEXT_ARG(); + } else { + duration = UINT_MAX; + } + + num_metric = profile_parse_metrics(argc, argv); + if (num_metric <= 0) + goto out; + + num_cpu = libbpf_num_possible_cpus(); + if (num_cpu <= 0) { + p_err("failed to identify number of CPUs"); + goto out; + } + + profile_obj = profiler_bpf__open(); + if (!profile_obj) { + p_err("failed to open and/or load BPF object"); + goto out; + } + + profile_obj->rodata->num_cpu = num_cpu; + profile_obj->rodata->num_metric = num_metric; + + /* adjust map sizes */ + bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu); + bpf_map__resize(profile_obj->maps.fentry_readings, num_metric); + bpf_map__resize(profile_obj->maps.accum_readings, num_metric); + bpf_map__resize(profile_obj->maps.counts, 1); + + /* change target name */ + profile_tgt_name = profile_target_name(profile_tgt_fd); + if (!profile_tgt_name) + goto out; + + bpf_object__for_each_program(prog, profile_obj->obj) { + err = bpf_program__set_attach_target(prog, profile_tgt_fd, + profile_tgt_name); + if (err) { + p_err("failed to set attach target\n"); + goto out; + } + } + + set_max_rlimit(); + err = profiler_bpf__load(profile_obj); + if (err) { + p_err("failed to load profile_obj"); + goto out; + } + + err = profile_open_perf_events(profile_obj); + if (err) + goto out; + + err = profiler_bpf__attach(profile_obj); + if (err) { + p_err("failed to attach profile_obj"); + goto out; + } + signal(SIGINT, int_exit); + + sleep(duration); + profile_print_and_cleanup(); + return 0; + +out: + profile_close_perf_events(profile_obj); + if (profile_obj) + profiler_bpf__destroy(profile_obj); + close(profile_tgt_fd); + free(profile_tgt_name); + return err; +} + +#endif /* BPFTOOL_WITHOUT_SKELETONS */ + static int do_help(int argc, char **argv) { if (json_output) { @@ -1389,23 +2074,24 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s { show | list } [PROG]\n" - " %s %s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n" - " %s %s dump jited PROG [{ file FILE | opcodes | linum }]\n" - " %s %s pin PROG FILE\n" - " %s %s { load | loadall } OBJ PATH \\\n" + "Usage: %1$s %2$s { show | list } [PROG]\n" + " %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n" + " %1$s %2$s dump jited PROG [{ file FILE | opcodes | linum }]\n" + " %1$s %2$s pin PROG FILE\n" + " %1$s %2$s { load | loadall } OBJ PATH \\\n" " [type TYPE] [dev NAME] \\\n" " [map { idx IDX | name NAME } MAP]\\\n" " [pinmaps MAP_DIR]\n" - " %s %s attach PROG ATTACH_TYPE [MAP]\n" - " %s %s detach PROG ATTACH_TYPE [MAP]\n" - " %s %s run PROG \\\n" + " %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n" + " %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n" + " %1$s %2$s run PROG \\\n" " data_in FILE \\\n" " [data_out FILE [data_size_out L]] \\\n" " [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n" " [repeat N]\n" - " %s %s tracelog\n" - " %s %s help\n" + " %1$s %2$s profile PROG [duration DURATION] METRICs\n" + " %1$s %2$s tracelog\n" + " %1$s %2$s help\n" "\n" " " HELP_SPEC_MAP "\n" " " HELP_SPEC_PROGRAM "\n" @@ -1416,16 +2102,16 @@ static int do_help(int argc, char **argv) " sk_reuseport | flow_dissector | cgroup/sysctl |\n" " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n" " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n" - " cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n" - " cgroup/recvmsg6 | cgroup/getsockopt |\n" - " cgroup/setsockopt }\n" + " cgroup/getpeername4 | cgroup/getpeername6 |\n" + " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n" + " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n" + " cgroup/getsockopt | cgroup/setsockopt |\n" + " struct_ops | fentry | fexit | freplace | sk_lookup }\n" " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n" " flow_dissector }\n" + " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n" " " HELP_SPEC_OPTIONS "\n" "", - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]); return 0; @@ -1443,6 +2129,7 @@ static const struct cmd cmds[] = { { "detach", do_detach }, { "tracelog", do_tracelog }, { "run", do_run }, + { "profile", do_profile }, { 0 } }; diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c new file mode 100644 index 000000000000..d9b420972934 --- /dev/null +++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (c) 2020 Facebook */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include <bpf/bpf_tracing.h> +#include "pid_iter.h" + +/* keep in sync with the definition in main.h */ +enum bpf_obj_type { + BPF_OBJ_UNKNOWN, + BPF_OBJ_PROG, + BPF_OBJ_MAP, + BPF_OBJ_LINK, + BPF_OBJ_BTF, +}; + +extern const void bpf_link_fops __ksym; +extern const void bpf_map_fops __ksym; +extern const void bpf_prog_fops __ksym; +extern const void btf_fops __ksym; + +const volatile enum bpf_obj_type obj_type = BPF_OBJ_UNKNOWN; + +static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type) +{ + switch (type) { + case BPF_OBJ_PROG: + return BPF_CORE_READ((struct bpf_prog *)ent, aux, id); + case BPF_OBJ_MAP: + return BPF_CORE_READ((struct bpf_map *)ent, id); + case BPF_OBJ_BTF: + return BPF_CORE_READ((struct btf *)ent, id); + case BPF_OBJ_LINK: + return BPF_CORE_READ((struct bpf_link *)ent, id); + default: + return 0; + } +} + +SEC("iter/task_file") +int iter(struct bpf_iter__task_file *ctx) +{ + struct file *file = ctx->file; + struct task_struct *task = ctx->task; + struct pid_iter_entry e; + const void *fops; + + if (!file || !task) + return 0; + + switch (obj_type) { + case BPF_OBJ_PROG: + fops = &bpf_prog_fops; + break; + case BPF_OBJ_MAP: + fops = &bpf_map_fops; + break; + case BPF_OBJ_BTF: + fops = &btf_fops; + break; + case BPF_OBJ_LINK: + fops = &bpf_link_fops; + break; + default: + return 0; + } + + if (file->f_op != fops) + return 0; + + e.pid = task->tgid; + e.id = get_obj_id(file->private_data, obj_type); + bpf_probe_read_kernel(&e.comm, sizeof(e.comm), + task->group_leader->comm); + bpf_seq_write(ctx->meta->seq, &e, sizeof(e)); + + return 0; +} + +char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/bpf/bpftool/skeleton/pid_iter.h b/tools/bpf/bpftool/skeleton/pid_iter.h new file mode 100644 index 000000000000..5692cf257adb --- /dev/null +++ b/tools/bpf/bpftool/skeleton/pid_iter.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2020 Facebook */ +#ifndef __PID_ITER_H +#define __PID_ITER_H + +struct pid_iter_entry { + __u32 id; + int pid; + char comm[16]; +}; + +#endif diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c new file mode 100644 index 000000000000..ce5b65e07ab1 --- /dev/null +++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +// Copyright (c) 2020 Facebook +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +/* map of perf event fds, num_cpu * num_metric entries */ +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(int)); +} events SEC(".maps"); + +/* readings at fentry */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); +} fentry_readings SEC(".maps"); + +/* accumulated readings */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); +} accum_readings SEC(".maps"); + +/* sample counts, one per cpu */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); +} counts SEC(".maps"); + +const volatile __u32 num_cpu = 1; +const volatile __u32 num_metric = 1; +#define MAX_NUM_MATRICS 4 + +SEC("fentry/XXX") +int BPF_PROG(fentry_XXX) +{ + struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS]; + u32 key = bpf_get_smp_processor_id(); + u32 i; + + /* look up before reading, to reduce error */ + for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { + u32 flag = i; + + ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag); + if (!ptrs[i]) + return 0; + } + + for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { + struct bpf_perf_event_value reading; + int err; + + err = bpf_perf_event_read_value(&events, key, &reading, + sizeof(reading)); + if (err) + return 0; + *(ptrs[i]) = reading; + key += num_cpu; + } + + return 0; +} + +static inline void +fexit_update_maps(u32 id, struct bpf_perf_event_value *after) +{ + struct bpf_perf_event_value *before, diff; + + before = bpf_map_lookup_elem(&fentry_readings, &id); + /* only account samples with a valid fentry_reading */ + if (before && before->counter) { + struct bpf_perf_event_value *accum; + + diff.counter = after->counter - before->counter; + diff.enabled = after->enabled - before->enabled; + diff.running = after->running - before->running; + + accum = bpf_map_lookup_elem(&accum_readings, &id); + if (accum) { + accum->counter += diff.counter; + accum->enabled += diff.enabled; + accum->running += diff.running; + } + } +} + +SEC("fexit/XXX") +int BPF_PROG(fexit_XXX) +{ + struct bpf_perf_event_value readings[MAX_NUM_MATRICS]; + u32 cpu = bpf_get_smp_processor_id(); + u32 i, zero = 0; + int err; + u64 *count; + + /* read all events before updating the maps, to reduce error */ + for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { + err = bpf_perf_event_read_value(&events, cpu + i * num_cpu, + readings + i, sizeof(*readings)); + if (err) + return 0; + } + count = bpf_map_lookup_elem(&counts, &zero); + if (count) { + *count += 1; + for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) + fexit_update_maps(i, &readings[i]); + } + return 0; +} + +char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c new file mode 100644 index 000000000000..b58b91f62ffb --- /dev/null +++ b/tools/bpf/bpftool/struct_ops.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2020 Facebook */ + +#include <errno.h> +#include <stdio.h> +#include <unistd.h> + +#include <linux/err.h> + +#include <bpf/bpf.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> + +#include "json_writer.h" +#include "main.h" + +#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" + +static const struct btf_type *map_info_type; +static __u32 map_info_alloc_len; +static struct btf *btf_vmlinux; +static __s32 map_info_type_id; + +struct res { + unsigned int nr_maps; + unsigned int nr_errs; +}; + +static const struct btf *get_btf_vmlinux(void) +{ + if (btf_vmlinux) + return btf_vmlinux; + + btf_vmlinux = libbpf_find_kernel_btf(); + if (IS_ERR(btf_vmlinux)) + p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y"); + + return btf_vmlinux; +} + +static const char *get_kern_struct_ops_name(const struct bpf_map_info *info) +{ + const struct btf *kern_btf; + const struct btf_type *t; + const char *st_ops_name; + + kern_btf = get_btf_vmlinux(); + if (IS_ERR(kern_btf)) + return "<btf_vmlinux_not_found>"; + + t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id); + st_ops_name = btf__name_by_offset(kern_btf, t->name_off); + st_ops_name += strlen(STRUCT_OPS_VALUE_PREFIX); + + return st_ops_name; +} + +static __s32 get_map_info_type_id(void) +{ + const struct btf *kern_btf; + + if (map_info_type_id) + return map_info_type_id; + + kern_btf = get_btf_vmlinux(); + if (IS_ERR(kern_btf)) { + map_info_type_id = PTR_ERR(kern_btf); + return map_info_type_id; + } + + map_info_type_id = btf__find_by_name_kind(kern_btf, "bpf_map_info", + BTF_KIND_STRUCT); + if (map_info_type_id < 0) { + p_err("can't find bpf_map_info from btf_vmlinux"); + return map_info_type_id; + } + map_info_type = btf__type_by_id(kern_btf, map_info_type_id); + + /* Ensure map_info_alloc() has at least what the bpftool needs */ + map_info_alloc_len = map_info_type->size; + if (map_info_alloc_len < sizeof(struct bpf_map_info)) + map_info_alloc_len = sizeof(struct bpf_map_info); + + return map_info_type_id; +} + +/* If the subcmd needs to print out the bpf_map_info, + * it should always call map_info_alloc to allocate + * a bpf_map_info object instead of allocating it + * on the stack. + * + * map_info_alloc() will take the running kernel's btf + * into account. i.e. it will consider the + * sizeof(struct bpf_map_info) of the running kernel. + * + * It will enable the "struct_ops" cmd to print the latest + * "struct bpf_map_info". + * + * [ Recall that "struct_ops" requires the kernel's btf to + * be available ] + */ +static struct bpf_map_info *map_info_alloc(__u32 *alloc_len) +{ + struct bpf_map_info *info; + + if (get_map_info_type_id() < 0) + return NULL; + + info = calloc(1, map_info_alloc_len); + if (!info) + p_err("mem alloc failed"); + else + *alloc_len = map_info_alloc_len; + + return info; +} + +/* It iterates all struct_ops maps of the system. + * It returns the fd in "*res_fd" and map_info in "*info". + * In the very first iteration, info->id should be 0. + * An optional map "*name" filter can be specified. + * The filter can be made more flexible in the future. + * e.g. filter by kernel-struct-ops-name, regex-name, glob-name, ...etc. + * + * Return value: + * 1: A struct_ops map found. It is returned in "*res_fd" and "*info". + * The caller can continue to call get_next in the future. + * 0: No struct_ops map is returned. + * All struct_ops map has been found. + * -1: Error and the caller should abort the iteration. + */ +static int get_next_struct_ops_map(const char *name, int *res_fd, + struct bpf_map_info *info, __u32 info_len) +{ + __u32 id = info->id; + int err, fd; + + while (true) { + err = bpf_map_get_next_id(id, &id); + if (err) { + if (errno == ENOENT) + return 0; + p_err("can't get next map: %s", strerror(errno)); + return -1; + } + + fd = bpf_map_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; + p_err("can't get map by id (%u): %s", + id, strerror(errno)); + return -1; + } + + err = bpf_obj_get_info_by_fd(fd, info, &info_len); + if (err) { + p_err("can't get map info: %s", strerror(errno)); + close(fd); + return -1; + } + + if (info->type == BPF_MAP_TYPE_STRUCT_OPS && + (!name || !strcmp(name, info->name))) { + *res_fd = fd; + return 1; + } + close(fd); + } +} + +static int cmd_retval(const struct res *res, bool must_have_one_map) +{ + if (res->nr_errs || (!res->nr_maps && must_have_one_map)) + return -1; + + return 0; +} + +/* "data" is the work_func private storage */ +typedef int (*work_func)(int fd, const struct bpf_map_info *info, void *data, + struct json_writer *wtr); + +/* Find all struct_ops map in the system. + * Filter out by "name" (if specified). + * Then call "func(fd, info, data, wtr)" on each struct_ops map found. + */ +static struct res do_search(const char *name, work_func func, void *data, + struct json_writer *wtr) +{ + struct bpf_map_info *info; + struct res res = {}; + __u32 info_len; + int fd, err; + + info = map_info_alloc(&info_len); + if (!info) { + res.nr_errs++; + return res; + } + + if (wtr) + jsonw_start_array(wtr); + while ((err = get_next_struct_ops_map(name, &fd, info, info_len)) == 1) { + res.nr_maps++; + err = func(fd, info, data, wtr); + if (err) + res.nr_errs++; + close(fd); + } + if (wtr) + jsonw_end_array(wtr); + + if (err) + res.nr_errs++; + + if (!wtr && name && !res.nr_errs && !res.nr_maps) + /* It is not printing empty []. + * Thus, needs to specifically say nothing found + * for "name" here. + */ + p_err("no struct_ops found for %s", name); + else if (!wtr && json_output && !res.nr_errs) + /* The "func()" above is not writing any json (i.e. !wtr + * test here). + * + * However, "-j" is enabled and there is no errs here, + * so call json_null() as the current convention of + * other cmds. + */ + jsonw_null(json_wtr); + + free(info); + return res; +} + +static struct res do_one_id(const char *id_str, work_func func, void *data, + struct json_writer *wtr) +{ + struct bpf_map_info *info; + struct res res = {}; + unsigned long id; + __u32 info_len; + char *endptr; + int fd; + + id = strtoul(id_str, &endptr, 0); + if (*endptr || !id || id > UINT32_MAX) { + p_err("invalid id %s", id_str); + res.nr_errs++; + return res; + } + + fd = bpf_map_get_fd_by_id(id); + if (fd == -1) { + p_err("can't get map by id (%lu): %s", id, strerror(errno)); + res.nr_errs++; + return res; + } + + info = map_info_alloc(&info_len); + if (!info) { + res.nr_errs++; + goto done; + } + + if (bpf_obj_get_info_by_fd(fd, info, &info_len)) { + p_err("can't get map info: %s", strerror(errno)); + res.nr_errs++; + goto done; + } + + if (info->type != BPF_MAP_TYPE_STRUCT_OPS) { + p_err("%s id %u is not a struct_ops map", info->name, info->id); + res.nr_errs++; + goto done; + } + + res.nr_maps++; + + if (func(fd, info, data, wtr)) + res.nr_errs++; + else if (!wtr && json_output) + /* The "func()" above is not writing any json (i.e. !wtr + * test here). + * + * However, "-j" is enabled and there is no errs here, + * so call json_null() as the current convention of + * other cmds. + */ + jsonw_null(json_wtr); + +done: + free(info); + close(fd); + + return res; +} + +static struct res do_work_on_struct_ops(const char *search_type, + const char *search_term, + work_func func, void *data, + struct json_writer *wtr) +{ + if (search_type) { + if (is_prefix(search_type, "id")) + return do_one_id(search_term, func, data, wtr); + else if (!is_prefix(search_type, "name")) + usage(); + } + + return do_search(search_term, func, data, wtr); +} + +static int __do_show(int fd, const struct bpf_map_info *info, void *data, + struct json_writer *wtr) +{ + if (wtr) { + jsonw_start_object(wtr); + jsonw_uint_field(wtr, "id", info->id); + jsonw_string_field(wtr, "name", info->name); + jsonw_string_field(wtr, "kernel_struct_ops", + get_kern_struct_ops_name(info)); + jsonw_end_object(wtr); + } else { + printf("%u: %-15s %-32s\n", info->id, info->name, + get_kern_struct_ops_name(info)); + } + + return 0; +} + +static int do_show(int argc, char **argv) +{ + const char *search_type = NULL, *search_term = NULL; + struct res res; + + if (argc && argc != 2) + usage(); + + if (argc == 2) { + search_type = GET_ARG(); + search_term = GET_ARG(); + } + + res = do_work_on_struct_ops(search_type, search_term, __do_show, + NULL, json_wtr); + + return cmd_retval(&res, !!search_term); +} + +static int __do_dump(int fd, const struct bpf_map_info *info, void *data, + struct json_writer *wtr) +{ + struct btf_dumper *d = (struct btf_dumper *)data; + const struct btf_type *struct_ops_type; + const struct btf *kern_btf = d->btf; + const char *struct_ops_name; + int zero = 0; + void *value; + + /* note: d->jw == wtr */ + + kern_btf = d->btf; + + /* The kernel supporting BPF_MAP_TYPE_STRUCT_OPS must have + * btf_vmlinux_value_type_id. + */ + struct_ops_type = btf__type_by_id(kern_btf, + info->btf_vmlinux_value_type_id); + struct_ops_name = btf__name_by_offset(kern_btf, + struct_ops_type->name_off); + value = calloc(1, info->value_size); + if (!value) { + p_err("mem alloc failed"); + return -1; + } + + if (bpf_map_lookup_elem(fd, &zero, value)) { + p_err("can't lookup struct_ops map %s id %u", + info->name, info->id); + free(value); + return -1; + } + + jsonw_start_object(wtr); + jsonw_name(wtr, "bpf_map_info"); + btf_dumper_type(d, map_info_type_id, (void *)info); + jsonw_end_object(wtr); + + jsonw_start_object(wtr); + jsonw_name(wtr, struct_ops_name); + btf_dumper_type(d, info->btf_vmlinux_value_type_id, value); + jsonw_end_object(wtr); + + free(value); + + return 0; +} + +static int do_dump(int argc, char **argv) +{ + const char *search_type = NULL, *search_term = NULL; + json_writer_t *wtr = json_wtr; + const struct btf *kern_btf; + struct btf_dumper d = {}; + struct res res; + + if (argc && argc != 2) + usage(); + + if (argc == 2) { + search_type = GET_ARG(); + search_term = GET_ARG(); + } + + kern_btf = get_btf_vmlinux(); + if (IS_ERR(kern_btf)) + return -1; + + if (!json_output) { + wtr = jsonw_new(stdout); + if (!wtr) { + p_err("can't create json writer"); + return -1; + } + jsonw_pretty(wtr, true); + } + + d.btf = kern_btf; + d.jw = wtr; + d.is_plain_text = !json_output; + d.prog_id_as_func_ptr = true; + + res = do_work_on_struct_ops(search_type, search_term, __do_dump, &d, + wtr); + + if (!json_output) + jsonw_destroy(&wtr); + + return cmd_retval(&res, !!search_term); +} + +static int __do_unregister(int fd, const struct bpf_map_info *info, void *data, + struct json_writer *wtr) +{ + int zero = 0; + + if (bpf_map_delete_elem(fd, &zero)) { + p_err("can't unload %s %s id %u: %s", + get_kern_struct_ops_name(info), info->name, + info->id, strerror(errno)); + return -1; + } + + p_info("Unregistered %s %s id %u", + get_kern_struct_ops_name(info), info->name, + info->id); + + return 0; +} + +static int do_unregister(int argc, char **argv) +{ + const char *search_type, *search_term; + struct res res; + + if (argc != 2) + usage(); + + search_type = GET_ARG(); + search_term = GET_ARG(); + + res = do_work_on_struct_ops(search_type, search_term, + __do_unregister, NULL, NULL); + + return cmd_retval(&res, true); +} + +static int do_register(int argc, char **argv) +{ + struct bpf_object_load_attr load_attr = {}; + const struct bpf_map_def *def; + struct bpf_map_info info = {}; + __u32 info_len = sizeof(info); + int nr_errs = 0, nr_maps = 0; + struct bpf_object *obj; + struct bpf_link *link; + struct bpf_map *map; + const char *file; + + if (argc != 1) + usage(); + + file = GET_ARG(); + + obj = bpf_object__open(file); + if (IS_ERR_OR_NULL(obj)) + return -1; + + set_max_rlimit(); + + load_attr.obj = obj; + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + load_attr.log_level = 1 + 2 + 4; + + if (bpf_object__load_xattr(&load_attr)) { + bpf_object__close(obj); + return -1; + } + + bpf_object__for_each_map(map, obj) { + def = bpf_map__def(map); + if (def->type != BPF_MAP_TYPE_STRUCT_OPS) + continue; + + link = bpf_map__attach_struct_ops(map); + if (IS_ERR(link)) { + p_err("can't register struct_ops %s: %s", + bpf_map__name(map), + strerror(-PTR_ERR(link))); + nr_errs++; + continue; + } + nr_maps++; + + bpf_link__disconnect(link); + bpf_link__destroy(link); + + if (!bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, + &info_len)) + p_info("Registered %s %s id %u", + get_kern_struct_ops_name(&info), + bpf_map__name(map), + info.id); + else + /* Not p_err. The struct_ops was attached + * successfully. + */ + p_info("Registered %s but can't find id: %s", + bpf_map__name(map), strerror(errno)); + } + + bpf_object__close(obj); + + if (nr_errs) + return -1; + + if (!nr_maps) { + p_err("no struct_ops found in %s", file); + return -1; + } + + if (json_output) + jsonw_null(json_wtr); + + return 0; +} + +static int do_help(int argc, char **argv) +{ + if (json_output) { + jsonw_null(json_wtr); + return 0; + } + + fprintf(stderr, + "Usage: %1$s %2$s { show | list } [STRUCT_OPS_MAP]\n" + " %1$s %2$s dump [STRUCT_OPS_MAP]\n" + " %1$s %2$s register OBJ\n" + " %1$s %2$s unregister STRUCT_OPS_MAP\n" + " %1$s %2$s help\n" + "\n" + " OPTIONS := { {-j|--json} [{-p|--pretty}] }\n" + " STRUCT_OPS_MAP := [ id STRUCT_OPS_MAP_ID | name STRUCT_OPS_MAP_NAME ]\n" + "", + bin_name, argv[-2]); + + return 0; +} + +static const struct cmd cmds[] = { + { "show", do_show }, + { "list", do_show }, + { "register", do_register }, + { "unregister", do_unregister }, + { "dump", do_dump }, + { "help", do_help }, + { 0 } +}; + +int do_struct_ops(int argc, char **argv) +{ + int err; + + err = cmd_select(cmds, argc, argv, do_help); + + if (!IS_ERR(btf_vmlinux)) + btf__free(btf_vmlinux); + + return err; +} diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index 5b91ee65a080..6fc3e6f7f40c 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -7,7 +7,7 @@ #include <stdlib.h> #include <string.h> #include <sys/types.h> -#include <libbpf.h> +#include <bpf/libbpf.h> #include "disasm.h" #include "json_writer.h" @@ -196,6 +196,9 @@ static const char *print_imm(void *private_data, else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm); + else if (insn->src_reg == BPF_PSEUDO_FUNC) + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "subprog[%+d]", insn->imm); else snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), "0x%llx", (unsigned long long)full_imm); diff --git a/tools/bpf/resolve_btfids/.gitignore b/tools/bpf/resolve_btfids/.gitignore new file mode 100644 index 000000000000..16913fffc985 --- /dev/null +++ b/tools/bpf/resolve_btfids/.gitignore @@ -0,0 +1,3 @@ +/fixdep +/resolve_btfids +/libbpf/ diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build new file mode 100644 index 000000000000..ae82da03f9bf --- /dev/null +++ b/tools/bpf/resolve_btfids/Build @@ -0,0 +1,10 @@ +resolve_btfids-y += main.o +resolve_btfids-y += rbtree.o +resolve_btfids-y += zalloc.o +resolve_btfids-y += string.o +resolve_btfids-y += ctype.o +resolve_btfids-y += str_error_r.o + +$(OUTPUT)%.o: ../../lib/%.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile new file mode 100644 index 000000000000..19a3112e271a --- /dev/null +++ b/tools/bpf/resolve_btfids/Makefile @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: GPL-2.0-only +include ../../scripts/Makefile.include +include ../../scripts/Makefile.arch + +srctree := $(abspath $(CURDIR)/../../../) + +ifeq ($(V),1) + Q = + msg = +else + Q = @ + ifeq ($(silent),1) + msg = + else + msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; + endif + MAKEFLAGS=--no-print-directory +endif + +# always use the host compiler +AR = $(HOSTAR) +CC = $(HOSTCC) +LD = $(HOSTLD) +ARCH = $(HOSTARCH) +RM ?= rm +CROSS_COMPILE = +CFLAGS := $(KBUILD_HOSTCFLAGS) +LDFLAGS := $(KBUILD_HOSTLDFLAGS) + +OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/ + +LIBBPF_SRC := $(srctree)/tools/lib/bpf/ +SUBCMD_SRC := $(srctree)/tools/lib/subcmd/ + +BPFOBJ := $(OUTPUT)/libbpf/libbpf.a +LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/ +SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a + +LIBBPF_DESTDIR := $(LIBBPF_OUT) +LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include + +BINARY := $(OUTPUT)/resolve_btfids +BINARY_IN := $(BINARY)-in.o + +all: $(BINARY) + +$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT): + $(call msg,MKDIR,,$@) + $(Q)mkdir -p $(@) + +$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd + $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) + +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT) + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \ + DESTDIR=$(LIBBPF_DESTDIR) prefix= EXTRA_CFLAGS="$(CFLAGS)" \ + $(abspath $@) install_headers + +CFLAGS += -g \ + -I$(srctree)/tools/include \ + -I$(srctree)/tools/include/uapi \ + -I$(LIBBPF_INCLUDE) \ + -I$(SUBCMD_SRC) + +LIBS = -lelf -lz + +export srctree OUTPUT CFLAGS Q +include $(srctree)/tools/build/Makefile.include + +$(BINARY_IN): $(BPFOBJ) fixdep FORCE | $(OUTPUT) + $(Q)$(MAKE) $(build)=resolve_btfids + +$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) + $(call msg,LINK,$@) + $(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) + +clean_objects := $(wildcard $(OUTPUT)/*.o \ + $(OUTPUT)/.*.o.cmd \ + $(OUTPUT)/.*.o.d \ + $(LIBBPF_OUT) \ + $(LIBBPF_DESTDIR) \ + $(OUTPUT)/libsubcmd \ + $(OUTPUT)/resolve_btfids) + +ifneq ($(clean_objects),) +clean: fixdep-clean + $(call msg,CLEAN,$(BINARY)) + $(Q)$(RM) -rf $(clean_objects) +else +clean: +endif + +tags: + $(call msg,GEN,,tags) + $(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC) + +FORCE: + +.PHONY: all FORCE clean tags diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c new file mode 100644 index 000000000000..2d18903538ea --- /dev/null +++ b/tools/bpf/resolve_btfids/main.c @@ -0,0 +1,755 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * resolve_btfids scans Elf object for .BTF_ids section and resolves + * its symbols with BTF ID values. + * + * Each symbol points to 4 bytes data and is expected to have + * following name syntax: + * + * __BTF_ID__<type>__<symbol>[__<id>] + * + * type is: + * + * func - lookup BTF_KIND_FUNC symbol with <symbol> name + * and store its ID into the data: + * + * __BTF_ID__func__vfs_close__1: + * .zero 4 + * + * struct - lookup BTF_KIND_STRUCT symbol with <symbol> name + * and store its ID into the data: + * + * __BTF_ID__struct__sk_buff__1: + * .zero 4 + * + * union - lookup BTF_KIND_UNION symbol with <symbol> name + * and store its ID into the data: + * + * __BTF_ID__union__thread_union__1: + * .zero 4 + * + * typedef - lookup BTF_KIND_TYPEDEF symbol with <symbol> name + * and store its ID into the data: + * + * __BTF_ID__typedef__pid_t__1: + * .zero 4 + * + * set - store symbol size into first 4 bytes and sort following + * ID list + * + * __BTF_ID__set__list: + * .zero 4 + * list: + * __BTF_ID__func__vfs_getattr__3: + * .zero 4 + * __BTF_ID__func__vfs_fallocate__4: + * .zero 4 + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <stdlib.h> +#include <libelf.h> +#include <gelf.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <errno.h> +#include <linux/rbtree.h> +#include <linux/zalloc.h> +#include <linux/err.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> +#include <parse-options.h> + +#define BTF_IDS_SECTION ".BTF_ids" +#define BTF_ID "__BTF_ID__" + +#define BTF_STRUCT "struct" +#define BTF_UNION "union" +#define BTF_TYPEDEF "typedef" +#define BTF_FUNC "func" +#define BTF_SET "set" + +#define ADDR_CNT 100 + +struct btf_id { + struct rb_node rb_node; + char *name; + union { + int id; + int cnt; + }; + int addr_cnt; + bool is_set; + Elf64_Addr addr[ADDR_CNT]; +}; + +struct object { + const char *path; + const char *btf; + const char *base_btf_path; + + struct { + int fd; + Elf *elf; + Elf_Data *symbols; + Elf_Data *idlist; + int symbols_shndx; + int idlist_shndx; + size_t strtabidx; + unsigned long idlist_addr; + } efile; + + struct rb_root sets; + struct rb_root structs; + struct rb_root unions; + struct rb_root typedefs; + struct rb_root funcs; + + int nr_funcs; + int nr_structs; + int nr_unions; + int nr_typedefs; +}; + +static int verbose; + +static int eprintf(int level, int var, const char *fmt, ...) +{ + va_list args; + int ret = 0; + + if (var >= level) { + va_start(args, fmt); + ret = vfprintf(stderr, fmt, args); + va_end(args); + } + return ret; +} + +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_debug(fmt, ...) \ + eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debugN(n, fmt, ...) \ + eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err(fmt, ...) \ + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info(fmt, ...) \ + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) + +static bool is_btf_id(const char *name) +{ + return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1); +} + +static struct btf_id *btf_id__find(struct rb_root *root, const char *name) +{ + struct rb_node *p = root->rb_node; + struct btf_id *id; + int cmp; + + while (p) { + id = rb_entry(p, struct btf_id, rb_node); + cmp = strcmp(id->name, name); + if (cmp < 0) + p = p->rb_left; + else if (cmp > 0) + p = p->rb_right; + else + return id; + } + return NULL; +} + +static struct btf_id * +btf_id__add(struct rb_root *root, char *name, bool unique) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct btf_id *id; + int cmp; + + while (*p != NULL) { + parent = *p; + id = rb_entry(parent, struct btf_id, rb_node); + cmp = strcmp(id->name, name); + if (cmp < 0) + p = &(*p)->rb_left; + else if (cmp > 0) + p = &(*p)->rb_right; + else + return unique ? NULL : id; + } + + id = zalloc(sizeof(*id)); + if (id) { + pr_debug("adding symbol %s\n", name); + id->name = name; + rb_link_node(&id->rb_node, parent, p); + rb_insert_color(&id->rb_node, root); + } + return id; +} + +static char *get_id(const char *prefix_end) +{ + /* + * __BTF_ID__func__vfs_truncate__0 + * prefix_end = ^ + * pos = ^ + */ + int len = strlen(prefix_end); + int pos = sizeof("__") - 1; + char *p, *id; + + if (pos >= len) + return NULL; + + id = strdup(prefix_end + pos); + if (id) { + /* + * __BTF_ID__func__vfs_truncate__0 + * id = ^ + * + * cut the unique id part + */ + p = strrchr(id, '_'); + p--; + if (*p != '_') { + free(id); + return NULL; + } + *p = '\0'; + } + return id; +} + +static struct btf_id *add_set(struct object *obj, char *name) +{ + /* + * __BTF_ID__set__name + * name = ^ + * id = ^ + */ + char *id = name + sizeof(BTF_SET "__") - 1; + int len = strlen(name); + + if (id >= name + len) { + pr_err("FAILED to parse set name: %s\n", name); + return NULL; + } + + return btf_id__add(&obj->sets, id, true); +} + +static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) +{ + char *id; + + id = get_id(name + size); + if (!id) { + pr_err("FAILED to parse symbol name: %s\n", name); + return NULL; + } + + return btf_id__add(root, id, false); +} + +/* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */ +#ifndef SHF_COMPRESSED +#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */ +#endif + +/* + * The data of compressed section should be aligned to 4 + * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld + * sets sh_addralign to 1, which makes libelf fail with + * misaligned section error during the update: + * FAILED elf_update(WRITE): invalid section alignment + * + * While waiting for ld fix, we fix the compressed sections + * sh_addralign value manualy. + */ +static int compressed_section_fix(Elf *elf, Elf_Scn *scn, GElf_Shdr *sh) +{ + int expected = gelf_getclass(elf) == ELFCLASS32 ? 4 : 8; + + if (!(sh->sh_flags & SHF_COMPRESSED)) + return 0; + + if (sh->sh_addralign == expected) + return 0; + + pr_debug2(" - fixing wrong alignment sh_addralign %u, expected %u\n", + sh->sh_addralign, expected); + + sh->sh_addralign = expected; + + if (gelf_update_shdr(scn, sh) == 0) { + pr_err("FAILED cannot update section header: %s\n", + elf_errmsg(-1)); + return -1; + } + return 0; +} + +static int elf_collect(struct object *obj) +{ + Elf_Scn *scn = NULL; + size_t shdrstrndx; + int idx = 0; + Elf *elf; + int fd; + + fd = open(obj->path, O_RDWR, 0666); + if (fd == -1) { + pr_err("FAILED cannot open %s: %s\n", + obj->path, strerror(errno)); + return -1; + } + + elf_version(EV_CURRENT); + + elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL); + if (!elf) { + close(fd); + pr_err("FAILED cannot create ELF descriptor: %s\n", + elf_errmsg(-1)); + return -1; + } + + obj->efile.fd = fd; + obj->efile.elf = elf; + + elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT); + + if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) { + pr_err("FAILED cannot get shdr str ndx\n"); + return -1; + } + + /* + * Scan all the elf sections and look for save data + * from .BTF_ids section and symbols. + */ + while ((scn = elf_nextscn(elf, scn)) != NULL) { + Elf_Data *data; + GElf_Shdr sh; + char *name; + + idx++; + if (gelf_getshdr(scn, &sh) != &sh) { + pr_err("FAILED get section(%d) header\n", idx); + return -1; + } + + name = elf_strptr(elf, shdrstrndx, sh.sh_name); + if (!name) { + pr_err("FAILED get section(%d) name\n", idx); + return -1; + } + + data = elf_getdata(scn, 0); + if (!data) { + pr_err("FAILED to get section(%d) data from %s\n", + idx, name); + return -1; + } + + pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", + idx, name, (unsigned long) data->d_size, + (int) sh.sh_link, (unsigned long) sh.sh_flags, + (int) sh.sh_type); + + if (sh.sh_type == SHT_SYMTAB) { + obj->efile.symbols = data; + obj->efile.symbols_shndx = idx; + obj->efile.strtabidx = sh.sh_link; + } else if (!strcmp(name, BTF_IDS_SECTION)) { + obj->efile.idlist = data; + obj->efile.idlist_shndx = idx; + obj->efile.idlist_addr = sh.sh_addr; + } + + if (compressed_section_fix(elf, scn, &sh)) + return -1; + } + + return 0; +} + +static int symbols_collect(struct object *obj) +{ + Elf_Scn *scn = NULL; + int n, i; + GElf_Shdr sh; + char *name; + + scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx); + if (!scn) + return -1; + + if (gelf_getshdr(scn, &sh) != &sh) + return -1; + + n = sh.sh_size / sh.sh_entsize; + + /* + * Scan symbols and look for the ones starting with + * __BTF_ID__* over .BTF_ids section. + */ + for (i = 0; i < n; i++) { + char *prefix; + struct btf_id *id; + GElf_Sym sym; + + if (!gelf_getsym(obj->efile.symbols, i, &sym)) + return -1; + + if (sym.st_shndx != obj->efile.idlist_shndx) + continue; + + name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, + sym.st_name); + + if (!is_btf_id(name)) + continue; + + /* + * __BTF_ID__TYPE__vfs_truncate__0 + * prefix = ^ + */ + prefix = name + sizeof(BTF_ID) - 1; + + /* struct */ + if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) { + obj->nr_structs++; + id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1); + /* union */ + } else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) { + obj->nr_unions++; + id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1); + /* typedef */ + } else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) { + obj->nr_typedefs++; + id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1); + /* func */ + } else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) { + obj->nr_funcs++; + id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1); + /* set */ + } else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) { + id = add_set(obj, prefix); + /* + * SET objects store list's count, which is encoded + * in symbol's size, together with 'cnt' field hence + * that - 1. + */ + if (id) { + id->cnt = sym.st_size / sizeof(int) - 1; + id->is_set = true; + } + } else { + pr_err("FAILED unsupported prefix %s\n", prefix); + return -1; + } + + if (!id) + return -ENOMEM; + + if (id->addr_cnt >= ADDR_CNT) { + pr_err("FAILED symbol %s crossed the number of allowed lists\n", + id->name); + return -1; + } + id->addr[id->addr_cnt++] = sym.st_value; + } + + return 0; +} + +static int symbols_resolve(struct object *obj) +{ + int nr_typedefs = obj->nr_typedefs; + int nr_structs = obj->nr_structs; + int nr_unions = obj->nr_unions; + int nr_funcs = obj->nr_funcs; + struct btf *base_btf = NULL; + int err, type_id; + struct btf *btf; + __u32 nr; + + if (obj->base_btf_path) { + base_btf = btf__parse(obj->base_btf_path, NULL); + err = libbpf_get_error(base_btf); + if (err) { + pr_err("FAILED: load base BTF from %s: %s\n", + obj->base_btf_path, strerror(-err)); + return -1; + } + } + + btf = btf__parse(obj->btf ?: obj->path, NULL); + err = libbpf_get_error(btf); + if (err) { + pr_err("FAILED: load BTF from %s: %s\n", + obj->btf ?: obj->path, strerror(-err)); + goto out; + } + + err = -1; + nr = btf__get_nr_types(btf); + + /* + * Iterate all the BTF types and search for collected symbol IDs. + */ + for (type_id = 1; type_id <= nr; type_id++) { + const struct btf_type *type; + struct rb_root *root; + struct btf_id *id; + const char *str; + int *nr; + + type = btf__type_by_id(btf, type_id); + if (!type) { + pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n", + type_id); + goto out; + } + + if (btf_is_func(type) && nr_funcs) { + nr = &nr_funcs; + root = &obj->funcs; + } else if (btf_is_struct(type) && nr_structs) { + nr = &nr_structs; + root = &obj->structs; + } else if (btf_is_union(type) && nr_unions) { + nr = &nr_unions; + root = &obj->unions; + } else if (btf_is_typedef(type) && nr_typedefs) { + nr = &nr_typedefs; + root = &obj->typedefs; + } else + continue; + + str = btf__name_by_offset(btf, type->name_off); + if (!str) { + pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n", + type_id); + goto out; + } + + id = btf_id__find(root, str); + if (id) { + if (id->id) { + pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n", + str, id->id, type_id, id->id); + } else { + id->id = type_id; + (*nr)--; + } + } + } + + err = 0; +out: + btf__free(base_btf); + btf__free(btf); + return err; +} + +static int id_patch(struct object *obj, struct btf_id *id) +{ + Elf_Data *data = obj->efile.idlist; + int *ptr = data->d_buf; + int i; + + if (!id->id && !id->is_set) + pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name); + + for (i = 0; i < id->addr_cnt; i++) { + unsigned long addr = id->addr[i]; + unsigned long idx = addr - obj->efile.idlist_addr; + + pr_debug("patching addr %5lu: ID %7d [%s]\n", + idx, id->id, id->name); + + if (idx >= data->d_size) { + pr_err("FAILED patching index %lu out of bounds %lu\n", + idx, data->d_size); + return -1; + } + + idx = idx / sizeof(int); + ptr[idx] = id->id; + } + + return 0; +} + +static int __symbols_patch(struct object *obj, struct rb_root *root) +{ + struct rb_node *next; + struct btf_id *id; + + next = rb_first(root); + while (next) { + id = rb_entry(next, struct btf_id, rb_node); + + if (id_patch(obj, id)) + return -1; + + next = rb_next(next); + } + return 0; +} + +static int cmp_id(const void *pa, const void *pb) +{ + const int *a = pa, *b = pb; + + return *a - *b; +} + +static int sets_patch(struct object *obj) +{ + Elf_Data *data = obj->efile.idlist; + int *ptr = data->d_buf; + struct rb_node *next; + + next = rb_first(&obj->sets); + while (next) { + unsigned long addr, idx; + struct btf_id *id; + int *base; + int cnt; + + id = rb_entry(next, struct btf_id, rb_node); + addr = id->addr[0]; + idx = addr - obj->efile.idlist_addr; + + /* sets are unique */ + if (id->addr_cnt != 1) { + pr_err("FAILED malformed data for set '%s'\n", + id->name); + return -1; + } + + idx = idx / sizeof(int); + base = &ptr[idx] + 1; + cnt = ptr[idx]; + + pr_debug("sorting addr %5lu: cnt %6d [%s]\n", + (idx + 1) * sizeof(int), cnt, id->name); + + qsort(base, cnt, sizeof(int), cmp_id); + + next = rb_next(next); + } + return 0; +} + +static int symbols_patch(struct object *obj) +{ + int err; + + if (__symbols_patch(obj, &obj->structs) || + __symbols_patch(obj, &obj->unions) || + __symbols_patch(obj, &obj->typedefs) || + __symbols_patch(obj, &obj->funcs) || + __symbols_patch(obj, &obj->sets)) + return -1; + + if (sets_patch(obj)) + return -1; + + /* Set type to ensure endian translation occurs. */ + obj->efile.idlist->d_type = ELF_T_WORD; + + elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY); + + err = elf_update(obj->efile.elf, ELF_C_WRITE); + if (err < 0) { + pr_err("FAILED elf_update(WRITE): %s\n", + elf_errmsg(-1)); + } + + pr_debug("update %s for %s\n", + err >= 0 ? "ok" : "failed", obj->path); + return err < 0 ? -1 : 0; +} + +static const char * const resolve_btfids_usage[] = { + "resolve_btfids [<options>] <ELF object>", + NULL +}; + +int main(int argc, const char **argv) +{ + struct object obj = { + .efile = { + .idlist_shndx = -1, + .symbols_shndx = -1, + }, + .structs = RB_ROOT, + .unions = RB_ROOT, + .typedefs = RB_ROOT, + .funcs = RB_ROOT, + .sets = RB_ROOT, + }; + struct option btfid_options[] = { + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show errors, etc)"), + OPT_STRING(0, "btf", &obj.btf, "BTF data", + "BTF data"), + OPT_STRING('b', "btf_base", &obj.base_btf_path, "file", + "path of file providing base BTF"), + OPT_END() + }; + int err = -1; + + argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + if (argc != 1) + usage_with_options(resolve_btfids_usage, btfid_options); + + obj.path = argv[0]; + + if (elf_collect(&obj)) + goto out; + + /* + * We did not find .BTF_ids section or symbols section, + * nothing to do.. + */ + if (obj.efile.idlist_shndx == -1 || + obj.efile.symbols_shndx == -1) { + pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n"); + err = 0; + goto out; + } + + if (symbols_collect(&obj)) + goto out; + + if (symbols_resolve(&obj)) + goto out; + + if (symbols_patch(&obj)) + goto out; + + err = 0; +out: + if (obj.efile.elf) { + elf_end(obj.efile.elf); + close(obj.efile.fd); + } + return err; +} diff --git a/tools/bpf/runqslower/.gitignore b/tools/bpf/runqslower/.gitignore new file mode 100644 index 000000000000..ffdb70230c8b --- /dev/null +++ b/tools/bpf/runqslower/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +/.output diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile new file mode 100644 index 000000000000..fb1337d69868 --- /dev/null +++ b/tools/bpf/runqslower/Makefile @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +OUTPUT := .output +CLANG ?= clang +LLC ?= llc +LLVM_STRIP ?= llvm-strip +DEFAULT_BPFTOOL := $(OUTPUT)/sbin/bpftool +BPFTOOL ?= $(DEFAULT_BPFTOOL) +LIBBPF_SRC := $(abspath ../../lib/bpf) +BPFOBJ := $(OUTPUT)/libbpf.a +BPF_INCLUDE := $(OUTPUT) +INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../lib) \ + -I$(abspath ../../include/uapi) +CFLAGS := -g -Wall + +# Try to detect best kernel BTF source +KERNEL_REL := $(shell uname -r) +VMLINUX_BTF_PATHS := /sys/kernel/btf/vmlinux /boot/vmlinux-$(KERNEL_REL) +VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \ + $(wildcard $(VMLINUX_BTF_PATHS)))) + +abs_out := $(abspath $(OUTPUT)) +ifeq ($(V),1) +Q = +msg = +else +Q = @ +msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; +MAKEFLAGS += --no-print-directory +submake_extras := feature_display=0 +endif + +.DELETE_ON_ERROR: + +.PHONY: all clean runqslower +all: runqslower + +runqslower: $(OUTPUT)/runqslower + +clean: + $(call msg,CLEAN) + $(Q)rm -rf $(OUTPUT) runqslower + +$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) + $(call msg,BINARY,$@) + $(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ + +$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ + $(OUTPUT)/runqslower.bpf.o + +$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h + +$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL) + $(call msg,GEN-SKEL,$@) + $(Q)$(BPFTOOL) gen skeleton $< > $@ + +$(OUTPUT)/%.bpf.o: %.bpf.c $(BPFOBJ) | $(OUTPUT) + $(call msg,BPF,$@) + $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \ + -c $(filter %.c,$^) -o $@ && \ + $(LLVM_STRIP) -g $@ + +$(OUTPUT)/%.o: %.c | $(OUTPUT) + $(call msg,CC,$@) + $(Q)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@ + +$(OUTPUT): + $(call msg,MKDIR,$@) + $(Q)mkdir -p $(OUTPUT) + +$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL) + $(call msg,GEN,$@) + $(Q)if [ ! -e "$(VMLINUX_BTF_PATH)" ] ; then \ + echo "Couldn't find kernel BTF; set VMLINUX_BTF to" \ + "specify its location." >&2; \ + exit 1;\ + fi + $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@ + +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \ + OUTPUT=$(abspath $(dir $@))/ $(abspath $@) + +$(DEFAULT_BPFTOOL): + $(Q)$(MAKE) $(submake_extras) -C ../bpftool \ + prefix= OUTPUT=$(abs_out)/ DESTDIR=$(abs_out) install diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c new file mode 100644 index 000000000000..1f18a409f044 --- /dev/null +++ b/tools/bpf/runqslower/runqslower.bpf.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include "runqslower.h" + +#define TASK_RUNNING 0 +#define BPF_F_CURRENT_CPU 0xffffffffULL + +const volatile __u64 min_us = 0; +const volatile pid_t targ_pid = 0; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10240); + __type(key, u32); + __type(value, u64); +} start SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u32)); +} events SEC(".maps"); + +/* record enqueue timestamp */ +__always_inline +static int trace_enqueue(u32 tgid, u32 pid) +{ + u64 ts; + + if (!pid || (targ_pid && targ_pid != pid)) + return 0; + + ts = bpf_ktime_get_ns(); + bpf_map_update_elem(&start, &pid, &ts, 0); + return 0; +} + +SEC("tp_btf/sched_wakeup") +int handle__sched_wakeup(u64 *ctx) +{ + /* TP_PROTO(struct task_struct *p) */ + struct task_struct *p = (void *)ctx[0]; + + return trace_enqueue(p->tgid, p->pid); +} + +SEC("tp_btf/sched_wakeup_new") +int handle__sched_wakeup_new(u64 *ctx) +{ + /* TP_PROTO(struct task_struct *p) */ + struct task_struct *p = (void *)ctx[0]; + + return trace_enqueue(p->tgid, p->pid); +} + +SEC("tp_btf/sched_switch") +int handle__sched_switch(u64 *ctx) +{ + /* TP_PROTO(bool preempt, struct task_struct *prev, + * struct task_struct *next) + */ + struct task_struct *prev = (struct task_struct *)ctx[1]; + struct task_struct *next = (struct task_struct *)ctx[2]; + struct event event = {}; + u64 *tsp, delta_us; + long state; + u32 pid; + + /* ivcsw: treat like an enqueue event and store timestamp */ + if (prev->state == TASK_RUNNING) + trace_enqueue(prev->tgid, prev->pid); + + pid = next->pid; + + /* fetch timestamp and calculate delta */ + tsp = bpf_map_lookup_elem(&start, &pid); + if (!tsp) + return 0; /* missed enqueue */ + + delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; + if (min_us && delta_us <= min_us) + return 0; + + event.pid = pid; + event.delta_us = delta_us; + bpf_get_current_comm(&event.task, sizeof(event.task)); + + /* output */ + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, + &event, sizeof(event)); + + bpf_map_delete_elem(&start, &pid); + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c new file mode 100644 index 000000000000..d89715844952 --- /dev/null +++ b/tools/bpf/runqslower/runqslower.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +// Copyright (c) 2019 Facebook +#include <argp.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/resource.h> +#include <time.h> +#include <bpf/libbpf.h> +#include <bpf/bpf.h> +#include "runqslower.h" +#include "runqslower.skel.h" + +struct env { + pid_t pid; + __u64 min_us; + bool verbose; +} env = { + .min_us = 10000, +}; + +const char *argp_program_version = "runqslower 0.1"; +const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; +const char argp_program_doc[] = +"runqslower Trace long process scheduling delays.\n" +" For Linux, uses eBPF, BPF CO-RE, libbpf, BTF.\n" +"\n" +"This script traces high scheduling delays between tasks being\n" +"ready to run and them running on CPU after that.\n" +"\n" +"USAGE: runqslower [-p PID] [min_us]\n" +"\n" +"EXAMPLES:\n" +" runqslower # trace run queue latency higher than 10000 us (default)\n" +" runqslower 1000 # trace run queue latency higher than 1000 us\n" +" runqslower -p 123 # trace pid 123 only\n"; + +static const struct argp_option opts[] = { + { "pid", 'p', "PID", 0, "Process PID to trace"}, + { "verbose", 'v', NULL, 0, "Verbose debug output" }, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + static int pos_args; + int pid; + long long min_us; + + switch (key) { + case 'v': + env.verbose = true; + break; + case 'p': + errno = 0; + pid = strtol(arg, NULL, 10); + if (errno || pid <= 0) { + fprintf(stderr, "Invalid PID: %s\n", arg); + argp_usage(state); + } + env.pid = pid; + break; + case ARGP_KEY_ARG: + if (pos_args++) { + fprintf(stderr, + "Unrecognized positional argument: %s\n", arg); + argp_usage(state); + } + errno = 0; + min_us = strtoll(arg, NULL, 10); + if (errno || min_us <= 0) { + fprintf(stderr, "Invalid delay (in us): %s\n", arg); + argp_usage(state); + } + env.min_us = min_us; + break; + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +int libbpf_print_fn(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !env.verbose) + return 0; + return vfprintf(stderr, format, args); +} + +static int bump_memlock_rlimit(void) +{ + struct rlimit rlim_new = { + .rlim_cur = RLIM_INFINITY, + .rlim_max = RLIM_INFINITY, + }; + + return setrlimit(RLIMIT_MEMLOCK, &rlim_new); +} + +void handle_event(void *ctx, int cpu, void *data, __u32 data_sz) +{ + const struct event *e = data; + struct tm *tm; + char ts[32]; + time_t t; + + time(&t); + tm = localtime(&t); + strftime(ts, sizeof(ts), "%H:%M:%S", tm); + printf("%-8s %-16s %-6d %14llu\n", ts, e->task, e->pid, e->delta_us); +} + +void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt) +{ + printf("Lost %llu events on CPU #%d!\n", lost_cnt, cpu); +} + +int main(int argc, char **argv) +{ + static const struct argp argp = { + .options = opts, + .parser = parse_arg, + .doc = argp_program_doc, + }; + struct perf_buffer_opts pb_opts; + struct perf_buffer *pb = NULL; + struct runqslower_bpf *obj; + int err; + + err = argp_parse(&argp, argc, argv, 0, NULL, NULL); + if (err) + return err; + + libbpf_set_print(libbpf_print_fn); + + err = bump_memlock_rlimit(); + if (err) { + fprintf(stderr, "failed to increase rlimit: %d", err); + return 1; + } + + obj = runqslower_bpf__open(); + if (!obj) { + fprintf(stderr, "failed to open and/or load BPF object\n"); + return 1; + } + + /* initialize global data (filtering options) */ + obj->rodata->targ_pid = env.pid; + obj->rodata->min_us = env.min_us; + + err = runqslower_bpf__load(obj); + if (err) { + fprintf(stderr, "failed to load BPF object: %d\n", err); + goto cleanup; + } + + err = runqslower_bpf__attach(obj); + if (err) { + fprintf(stderr, "failed to attach BPF programs\n"); + goto cleanup; + } + + printf("Tracing run queue latency higher than %llu us\n", env.min_us); + printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)"); + + pb_opts.sample_cb = handle_event; + pb_opts.lost_cb = handle_lost_events; + pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, &pb_opts); + err = libbpf_get_error(pb); + if (err) { + pb = NULL; + fprintf(stderr, "failed to open perf buffer: %d\n", err); + goto cleanup; + } + + while ((err = perf_buffer__poll(pb, 100)) >= 0) + ; + printf("Error polling perf buffer: %d\n", err); + +cleanup: + perf_buffer__free(pb); + runqslower_bpf__destroy(obj); + + return err != 0; +} diff --git a/tools/bpf/runqslower/runqslower.h b/tools/bpf/runqslower/runqslower.h new file mode 100644 index 000000000000..9db225425e5f --- /dev/null +++ b/tools/bpf/runqslower/runqslower.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __RUNQSLOWER_H +#define __RUNQSLOWER_H + +#define TASK_COMM_LEN 16 + +struct event { + char task[TASK_COMM_LEN]; + __u64 delta_us; + pid_t pid; +}; + +#endif /* __RUNQSLOWER_H */ diff --git a/tools/build/Build.include b/tools/build/Build.include index 9ec01f4454f9..585486e40995 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), # dependencies in the cmd file if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \ @set -e; \ - $(echo-cmd) $(cmd_$(1)) && $(dep-cmd)) + $(echo-cmd) $(cmd_$(1)); \ + $(dep-cmd)) # if_changed - execute command if any prerequisite is newer than # target, or command line has changed diff --git a/tools/build/Makefile b/tools/build/Makefile index 727050c40f09..8a55378e8b7c 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -15,10 +15,6 @@ endef $(call allow-override,CC,$(CROSS_COMPILE)gcc) $(call allow-override,LD,$(CROSS_COMPILE)ld) -HOSTCC ?= gcc -HOSTLD ?= ld -HOSTAR ?= ar - export HOSTCC HOSTLD HOSTAR ifeq ($(V),1) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 8a19753cc26a..8c6e1ea67f21 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -8,7 +8,7 @@ endif feature_check = $(eval $(feature_check_code)) define feature_check_code - feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) + feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) endef feature_set = $(eval $(feature_set_code)) diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 8499385365c0..054e09ab4a9e 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -70,8 +70,6 @@ FILES= \ FILES := $(addprefix $(OUTPUT),$(FILES)) -CC ?= $(CROSS_COMPILE)gcc -CXX ?= $(CROSS_COMPILE)g++ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config LLVM_CONFIG ?= llvm-config diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py index f79b23582a1d..103605f5be8c 100644 --- a/tools/cgroup/iocost_monitor.py +++ b/tools/cgroup/iocost_monitor.py @@ -72,7 +72,7 @@ class BlkgIterator: name = BlkgIterator.blkcg_name(blkcg) path = parent_path + '/' + name if parent_path else name blkg = drgn.Object(prog, 'struct blkcg_gq', - address=radix_tree_lookup(blkcg.blkg_tree, q_id)) + address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) if not blkg.address_: return @@ -112,14 +112,14 @@ class IocStat: def dict(self, now): return { 'device' : devname, - 'timestamp' : str(now), - 'enabled' : str(int(self.enabled)), - 'running' : str(int(self.running)), - 'period_ms' : str(self.period_ms), - 'period_at' : str(self.period_at), - 'period_vtime_at' : str(self.vperiod_at), - 'busy_level' : str(self.busy_level), - 'vrate_pct' : str(self.vrate_pct), } + 'timestamp' : now, + 'enabled' : self.enabled, + 'running' : self.running, + 'period_ms' : self.period_ms, + 'period_at' : self.period_at, + 'period_vtime_at' : self.vperiod_at, + 'busy_level' : self.busy_level, + 'vrate_pct' : self.vrate_pct, } def table_preamble_str(self): state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF' @@ -159,7 +159,12 @@ class IocgStat: else: self.inflight_pct = 0 - self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 + # vdebt used to be an atomic64_t and is now u64, support both + try: + self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 + except: + self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000 + self.use_delay = blkg.use_delay.counter.value_() self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000 @@ -174,19 +179,19 @@ class IocgStat: def dict(self, now, path): out = { 'cgroup' : path, - 'timestamp' : str(now), - 'is_active' : str(int(self.is_active)), - 'weight' : str(self.weight), - 'weight_active' : str(self.active), - 'weight_inuse' : str(self.inuse), - 'hweight_active_pct' : str(self.hwa_pct), - 'hweight_inuse_pct' : str(self.hwi_pct), - 'inflight_pct' : str(self.inflight_pct), - 'debt_ms' : str(self.debt_ms), - 'use_delay' : str(self.use_delay), - 'delay_ms' : str(self.delay_ms), - 'usage_pct' : str(self.usage), - 'address' : str(hex(self.address)) } + 'timestamp' : now, + 'is_active' : self.is_active, + 'weight' : self.weight, + 'weight_active' : self.active, + 'weight_inuse' : self.inuse, + 'hweight_active_pct' : self.hwa_pct, + 'hweight_inuse_pct' : self.hwi_pct, + 'inflight_pct' : self.inflight_pct, + 'debt_ms' : self.debt_ms, + 'use_delay' : self.use_delay, + 'delay_ms' : self.delay_ms, + 'usage_pct' : self.usage, + 'address' : self.address } for i in range(len(self.usages)): out[f'usage_pct_{i}'] = str(self.usages[i]) return out @@ -228,7 +233,7 @@ q_id = None root_iocg = None ioc = None -for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree): +for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()): blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr) try: if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'): diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile index 6080de58861f..6289b8d20dff 100644 --- a/tools/gpio/Makefile +++ b/tools/gpio/Makefile @@ -35,7 +35,7 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h prepare: $(OUTPUT)include/linux/gpio.h -GPIO_UTILS_IN := $(output)gpio-utils-in.o +GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o $(GPIO_UTILS_IN): prepare FORCE $(Q)$(MAKE) $(build)=gpio-utils diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c index 0e0060a6eb34..083399d276e4 100644 --- a/tools/gpio/gpio-hammer.c +++ b/tools/gpio/gpio-hammer.c @@ -135,7 +135,14 @@ int main(int argc, char **argv) device_name = optarg; break; case 'o': - lines[i] = strtoul(optarg, NULL, 10); + /* + * Avoid overflow. Do not immediately error, we want to + * be able to accurately report on the amount of times + * '-o' was given to give an accurate error message + */ + if (i < GPIOHANDLES_MAX) + lines[i] = strtoul(optarg, NULL, 10); + i++; break; case '?': @@ -143,6 +150,14 @@ int main(int argc, char **argv) return -1; } } + + if (i >= GPIOHANDLES_MAX) { + fprintf(stderr, + "Only %d occurences of '-o' are allowed, %d were found\n", + GPIOHANDLES_MAX, i + 1); + return -1; + } + nlines = i; if (!device_name || !nlines) { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 77c6be96d676..17da40178c55 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key { __u32 attach_type; /* program attach type */ }; +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -107,6 +113,18 @@ enum bpf_cmd { BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, BPF_BTF_GET_NEXT_ID, + BPF_MAP_LOOKUP_BATCH, + BPF_MAP_LOOKUP_AND_DELETE_BATCH, + BPF_MAP_UPDATE_BATCH, + BPF_MAP_DELETE_BATCH, + BPF_LINK_CREATE, + BPF_LINK_UPDATE, + BPF_LINK_GET_FD_BY_ID, + BPF_LINK_GET_NEXT_ID, + BPF_ENABLE_STATS, + BPF_ITER_CREATE, + BPF_LINK_DETACH, + BPF_PROG_BIND_MAP, }; enum bpf_map_type { @@ -136,6 +154,9 @@ enum bpf_map_type { BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, BPF_MAP_TYPE_DEVMAP_HASH, + BPF_MAP_TYPE_STRUCT_OPS, + BPF_MAP_TYPE_RINGBUF, + BPF_MAP_TYPE_INODE_STORAGE, }; /* Note that tracing related programs such as @@ -173,6 +194,11 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, + BPF_PROG_TYPE_STRUCT_OPS, + BPF_PROG_TYPE_EXT, + BPF_PROG_TYPE_LSM, + BPF_PROG_TYPE_SK_LOOKUP, }; enum bpf_attach_type { @@ -199,11 +225,38 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, + BPF_TRACE_FENTRY, + BPF_TRACE_FEXIT, + BPF_MODIFY_RETURN, + BPF_LSM_MAC, + BPF_TRACE_ITER, + BPF_CGROUP_INET4_GETPEERNAME, + BPF_CGROUP_INET6_GETPEERNAME, + BPF_CGROUP_INET4_GETSOCKNAME, + BPF_CGROUP_INET6_GETSOCKNAME, + BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, + BPF_XDP_CPUMAP, + BPF_SK_LOOKUP, + BPF_XDP, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + + MAX_BPF_LINK_TYPE, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. @@ -227,6 +280,11 @@ enum bpf_attach_type { * When children program makes decision (like picking TCP CA or sock bind) * parent program has a chance to override it. * + * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of + * programs for a cgroup. Though it's possible to replace an old program at + * any position by also specifying BPF_F_REPLACE flag and position itself in + * replace_bpf_fd attribute. Old program at this position will be released. + * * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. * A cgroup with NONE doesn't allow any programs in sub-cgroups. * Ex1: @@ -245,6 +303,7 @@ enum bpf_attach_type { */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_MULTI (1U << 1) +#define BPF_F_REPLACE (1U << 2) /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel @@ -288,19 +347,54 @@ enum bpf_attach_type { /* The verifier internal test flag. Behavior is undefined */ #define BPF_F_TEST_STATE_FREQ (1U << 3) +/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will + * restrict map and helper usage for such programs. Sleepable BPF programs can + * only be attached to hooks where kernel execution context allows sleeping. + * Such programs are allowed to use helpers that may sleep like + * bpf_copy_from_user(). + */ +#define BPF_F_SLEEPABLE (1U << 4) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have - * two extensions: + * the following extensions: * - * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE - * insn[0].imm: map fd map fd - * insn[1].imm: 0 offset into value - * insn[0].off: 0 0 - * insn[1].off: 0 0 - * ldimm64 rewrite: address of map address of map[0]+offset - * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE + * insn[0].src_reg: BPF_PSEUDO_MAP_FD + * insn[0].imm: map fd + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of map + * verifier type: CONST_PTR_TO_MAP */ #define BPF_PSEUDO_MAP_FD 1 +/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE + * insn[0].imm: map fd + * insn[1].imm: offset into value + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of map[0]+offset + * verifier type: PTR_TO_MAP_VALUE + */ #define BPF_PSEUDO_MAP_VALUE 2 +/* insn[0].src_reg: BPF_PSEUDO_BTF_ID + * insn[0].imm: kernel btd id of VAR + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of the kernel variable + * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var + * is struct/union. + */ +#define BPF_PSEUDO_BTF_ID 3 +/* insn[0].src_reg: BPF_PSEUDO_FUNC + * insn[0].imm: insn offset to the func + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of the function + * verifier type: PTR_TO_FUNC. + */ +#define BPF_PSEUDO_FUNC 4 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative * offset to another bpf function @@ -308,45 +402,72 @@ enum bpf_attach_type { #define BPF_PSEUDO_CALL 1 /* flags for BPF_MAP_UPDATE_ELEM command */ -#define BPF_ANY 0 /* create new element or update existing */ -#define BPF_NOEXIST 1 /* create new element if it didn't exist */ -#define BPF_EXIST 2 /* update existing element */ -#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */ +enum { + BPF_ANY = 0, /* create new element or update existing */ + BPF_NOEXIST = 1, /* create new element if it didn't exist */ + BPF_EXIST = 2, /* update existing element */ + BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ +}; /* flags for BPF_MAP_CREATE command */ -#define BPF_F_NO_PREALLOC (1U << 0) +enum { + BPF_F_NO_PREALLOC = (1U << 0), /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * which can scale and perform better. * Note, the LRU nodes (including free nodes) cannot be moved * across different LRU lists. */ -#define BPF_F_NO_COMMON_LRU (1U << 1) + BPF_F_NO_COMMON_LRU = (1U << 1), /* Specify numa node during map creation */ -#define BPF_F_NUMA_NODE (1U << 2) - -#define BPF_OBJ_NAME_LEN 16U + BPF_F_NUMA_NODE = (1U << 2), /* Flags for accessing BPF object from syscall side. */ -#define BPF_F_RDONLY (1U << 3) -#define BPF_F_WRONLY (1U << 4) + BPF_F_RDONLY = (1U << 3), + BPF_F_WRONLY = (1U << 4), /* Flag for stack_map, store build_id+offset instead of pointer */ -#define BPF_F_STACK_BUILD_ID (1U << 5) + BPF_F_STACK_BUILD_ID = (1U << 5), /* Zero-initialize hash function seed. This should only be used for testing. */ -#define BPF_F_ZERO_SEED (1U << 6) + BPF_F_ZERO_SEED = (1U << 6), /* Flags for accessing BPF object from program side. */ -#define BPF_F_RDONLY_PROG (1U << 7) -#define BPF_F_WRONLY_PROG (1U << 8) + BPF_F_RDONLY_PROG = (1U << 7), + BPF_F_WRONLY_PROG = (1U << 8), /* Clone map from listener for newly accepted socket */ -#define BPF_F_CLONE (1U << 9) + BPF_F_CLONE = (1U << 9), -/* flags for BPF_PROG_QUERY */ +/* Enable memory-mapping BPF map */ + BPF_F_MMAPABLE = (1U << 10), + +/* Share perf_event among processes */ + BPF_F_PRESERVE_ELEMS = (1U << 11), + +/* Create a map that is suitable to be an inner map with dynamic max entries */ + BPF_F_INNER_MAP = (1U << 12), +}; + +/* Flags for BPF_PROG_QUERY. */ + +/* Query effective (directly attached + inherited from ancestor cgroups) + * programs that will be executed for events within a cgroup. + * attach_flags with this flag are returned only for directly attached programs. + */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) +/* Flags for BPF_PROG_TEST_RUN */ + +/* If set, run the test on the cpu specified by bpf_attr.test.cpu */ +#define BPF_F_TEST_RUN_ON_CPU (1U << 0) + +/* type for BPF_ENABLE_STATS */ +enum bpf_stats_type { + /* enabled run_time_ns and run_cnt */ + BPF_STATS_RUN_TIME = 0, +}; + enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, @@ -366,6 +487,8 @@ struct bpf_stack_build_id { }; }; +#define BPF_OBJ_NAME_LEN 16U + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -384,6 +507,10 @@ union bpf_attr { __u32 btf_fd; /* fd pointing to a BTF type data */ __u32 btf_key_type_id; /* BTF type_id of the key */ __u32 btf_value_type_id; /* BTF type_id of the value */ + __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- + * struct stored as the + * map value + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -396,6 +523,23 @@ union bpf_attr { __u64 flags; }; + struct { /* struct used by BPF_MAP_*_BATCH commands */ + __aligned_u64 in_batch; /* start batch, + * NULL to start from beginning + */ + __aligned_u64 out_batch; /* output: next start batch */ + __aligned_u64 keys; + __aligned_u64 values; + __u32 count; /* input/output: + * input: # of key/value + * elements + * output: # of filled elements + */ + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { /* anonymous struct used by BPF_PROG_LOAD command */ __u32 prog_type; /* one of enum bpf_prog_type */ __u32 insn_cnt; @@ -420,6 +564,8 @@ union bpf_attr { __u32 line_info_rec_size; /* userspace bpf_line_info size */ __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ + __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + __u32 attach_prog_fd; /* 0 to attach to vmlinux */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -433,6 +579,10 @@ union bpf_attr { __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; __u32 attach_flags; + __u32 replace_bpf_fd; /* previously attached eBPF + * program to replace if + * BPF_F_REPLACE is used + */ }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ @@ -454,6 +604,8 @@ union bpf_attr { */ __aligned_u64 ctx_in; __aligned_u64 ctx_out; + __u32 flags; + __u32 cpu; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ @@ -462,6 +614,7 @@ union bpf_attr { __u32 prog_id; __u32 map_id; __u32 btf_id; + __u32 link_id; }; __u32 next_id; __u32 open_flags; @@ -482,7 +635,7 @@ union bpf_attr { __u32 prog_cnt; } query; - struct { + struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ __u64 name; __u32 prog_fd; } raw_tracepoint; @@ -510,6 +663,53 @@ union bpf_attr { __u64 probe_offset; /* output: probe_offset */ __u64 probe_addr; /* output: probe_addr */ } task_fd_query; + + struct { /* struct used by BPF_LINK_CREATE command */ + __u32 prog_fd; /* eBPF program to attach */ + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ + }; + __u32 attach_type; /* attach type */ + __u32 flags; /* extra flags */ + union { + __u32 target_btf_id; /* btf_id of target to attach to */ + struct { + __aligned_u64 iter_info; /* extra bpf_iter_link_info */ + __u32 iter_info_len; /* iter_info length */ + }; + }; + } link_create; + + struct { /* struct used by BPF_LINK_UPDATE command */ + __u32 link_fd; /* link fd */ + /* new program fd to update link with */ + __u32 new_prog_fd; + __u32 flags; /* extra flags */ + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags */ + __u32 old_prog_fd; + } link_update; + + struct { + __u32 link_fd; + } link_detach; + + struct { /* struct used by BPF_ENABLE_STATS command */ + __u32 type; + } enable_stats; + + struct { /* struct used by BPF_ITER_CREATE command */ + __u32 link_fd; + __u32 flags; + } iter_create; + + struct { /* struct used by BPF_PROG_BIND_MAP command */ + __u32 prog_fd; + __u32 map_fd; + __u32 flags; /* extra flags */ + } prog_bind_map; + } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF @@ -536,7 +736,7 @@ union bpf_attr { * Map value associated to *key*, or **NULL** if no entry was * found. * - * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) + * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: @@ -554,26 +754,31 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_delete_elem(struct bpf_map *map, const void *key) + * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *src) + * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from - * address *src* and store the data in *dst*. + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use **bpf_probe_read_user**\ () or + * **bpf_probe_read_kernel**\ () instead. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_ktime_get_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. + * Does not include time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) * Return * Current *ktime*. * - * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) + * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) @@ -623,7 +828,7 @@ union bpf_attr { * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice - * bloc (spanning several lines) is printed to kernel logs and + * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values @@ -653,7 +858,7 @@ union bpf_attr { * Return * The SMP id of the processor running the program. * - * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) + * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of @@ -670,7 +875,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) + * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper @@ -695,7 +900,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) + * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the @@ -727,7 +932,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) + * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack @@ -758,7 +963,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) + * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress @@ -794,7 +999,7 @@ union bpf_attr { * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * - * int bpf_get_current_comm(char *buf, u32 size_of_buf) + * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of @@ -831,7 +1036,7 @@ union bpf_attr { * Return * The classid, or 0 for the default unconfigured classid. * - * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update @@ -847,7 +1052,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_vlan_pop(struct sk_buff *skb) + * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * @@ -859,7 +1064,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be @@ -889,14 +1094,14 @@ union bpf_attr { * * int ret; * struct bpf_tunnel_key key = {}; - * + * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet - * + * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet - * + * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices @@ -910,7 +1115,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The @@ -976,7 +1181,7 @@ union bpf_attr { * The value of the perf event counter read from the map, or a * negative error code in case of failure. * - * int bpf_redirect(u32 ifindex, u64 flags) + * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ @@ -990,9 +1195,9 @@ union bpf_attr { * supports redirection to the egress interface, and accepts no * flag at all. * - * The same effect can be attained with the more generic - * **bpf_redirect_map**\ (), which requires specific maps to be - * used but offers better performance. + * The same effect can also be attained with the more generic + * **bpf_redirect_map**\ (), which uses a BPF map to store the + * redirect target instead of providing it directly to the helper. * Return * For XDP, the helper returns **XDP_REDIRECT** on success or * **XDP_ABORTED** on error. For other program types, the values @@ -1003,7 +1208,7 @@ union bpf_attr { * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The - * indentifier retrieved is a user-provided tag, similar to the + * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. @@ -1023,7 +1228,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1068,7 +1273,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) + * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from @@ -1085,7 +1290,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags) + * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1154,7 +1359,7 @@ union bpf_attr { * The checksum result, or a negative error code in case of * failure. * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) + * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* @@ -1172,7 +1377,7 @@ union bpf_attr { * Return * The size of the option data retrieved. * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) + * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. @@ -1182,7 +1387,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) + * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to @@ -1209,7 +1414,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_type(struct sk_buff *skb, u32 type) + * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except @@ -1236,7 +1441,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) + * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. @@ -1267,7 +1472,7 @@ union bpf_attr { * Return * A pointer to the current task struct. * - * int bpf_probe_write_user(void *dst, const void *src, u32 len) + * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in @@ -1286,7 +1491,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) + * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by @@ -1294,11 +1499,11 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if the *skb* task belongs to the cgroup2. - * * 1, if the *skb* task does not belong to the cgroup2. + * * 0, if current task belongs to the cgroup2. + * * 1, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * - * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must @@ -1322,7 +1527,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) + * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes @@ -1378,7 +1583,7 @@ union bpf_attr { * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * - * int bpf_get_numa_node_id(void) + * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA @@ -1389,7 +1594,7 @@ union bpf_attr { * Return * The id of current NUMA node. * - * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of @@ -1410,7 +1615,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper @@ -1425,45 +1630,14 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description - * Copy a NUL terminated string from an unsafe address - * *unsafe_ptr* to *dst*. The *size* should include the - * terminating NUL byte. In case the string length is smaller than - * *size*, the target is not padded with further NUL bytes. If the - * string length is larger than *size*, just *size*-1 bytes are - * copied and the last byte is set to NUL. + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for + * more details. * - * On success, the length of the copied string is returned. This - * makes this helper useful in tracing programs for reading - * strings, and more importantly to get its length at runtime. See - * the following snippet: - * - * :: - * - * SEC("kprobe/sys_open") - * void bpf_sys_open(struct pt_regs *ctx) - * { - * char buf[PATHLEN]; // PATHLEN is defined to 256 - * int res = bpf_probe_read_str(buf, sizeof(buf), - * ctx->di); - * - * // Consume buf, for example push it to - * // userspace via bpf_perf_event_output(); we - * // can use res (the string length) as event - * // size, after checking its boundaries. - * } - * - * In comparison, using **bpf_probe_read()** helper here instead - * to read the string would require to estimate the length at - * compile time, and would often result in copying more memory - * than necessary. - * - * Another useful use case is when parsing individual process - * arguments or individual environment variables navigating - * *current*\ **->mm->arg_start** and *current*\ - * **->mm->env_start**: using this helper and the return value, - * one can quickly iterate at the right offset of the memory area. + * Generally, use **bpf_probe_read_user_str**\ () or + * **bpf_probe_read_kernel_str**\ () instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative @@ -1491,7 +1665,7 @@ union bpf_attr { * * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) * Description - * Equivalent to bpf_get_socket_cookie() helper that accepts + * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts * *skb*, but gets socket from **struct bpf_sock_ops** context. * Return * A 8-byte long non-decreasing number. @@ -1504,14 +1678,14 @@ union bpf_attr { * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * - * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) + * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * - * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) + * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1519,25 +1693,41 @@ union bpf_attr { * must be specified, see **setsockopt(2)** for more information. * The option value of length *optlen* is pointed by *optval*. * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, - * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, - * **TCP_BPF_SNDCWND_CLAMP**. + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) + * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. * + * By default, the helper will reset any offloaded checksum + * indicator of the skb to CHECKSUM_NONE. This can be avoided + * by the following flag: + * + * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded + * checksum data of the skb to CHECKSUM_NONE. + * * There are two supported modes at this time: * * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer @@ -1572,7 +1762,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain @@ -1583,19 +1773,17 @@ union bpf_attr { * * The lower two bits of *flags* are used as the return code if * the map lookup fails. This is so that the return value can be - * one of the XDP program return codes up to XDP_TX, as chosen by - * the caller. Any higher bits in the *flags* argument must be + * one of the XDP program return codes up to **XDP_TX**, as chosen + * by the caller. Any higher bits in the *flags* argument must be * unset. * - * When used to redirect packets to net devices, this helper - * provides a high performance increase over **bpf_redirect**\ (). - * This is due to various implementation details of the underlying - * mechanisms, one of which is the fact that **bpf_redirect_map**\ - * () tries to send packet as a "bulk" to the device. + * See also **bpf_redirect**\ (), which only supports redirecting + * to an ifindex, but doesn't require a map to do so. * Return - * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error. + * **XDP_REDIRECT** on success, or the value of the two lower bits + * of the *flags* argument on error. * - * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1606,7 +1794,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to @@ -1625,7 +1813,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this @@ -1654,7 +1842,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type @@ -1698,13 +1886,13 @@ union bpf_attr { * the time running for event since last normalization. The * enabled and running times are accumulated since the perf event * open. To achieve scaling factor between two invocations of an - * eBPF program, users can can use CPU id as the key (which is + * eBPF program, users can use CPU id as the key (which is * typical for perf array usage model) to remember the previous * value and do the calculation inside the eBPF program. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in @@ -1715,7 +1903,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) + * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1724,6 +1912,12 @@ union bpf_attr { * The retrieved value is stored in the structure pointed by * *opval* and of length *optlen*. * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * * This helper actually implements a subset of **getsockopt()**. * It supports the following *level*\ s: * @@ -1734,14 +1928,14 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_regs *regs, u64 rc) + * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. * The first argument is the context *regs* on which the kprobe * works. * - * This helper works by setting setting the PC (program counter) + * This helper works by setting the PC (program counter) * to an override function which is run in place of the original * probed function. This means the probed function is not run at * all. The replacement function just returns with the required @@ -1759,7 +1953,7 @@ union bpf_attr { * Return * 0 * - * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) + * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to @@ -1803,7 +1997,7 @@ union bpf_attr { * be set is returned (which comes down to 0 if all bits were set * as required). * - * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) + * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -1817,7 +2011,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. @@ -1851,7 +2045,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been @@ -1869,7 +2063,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) + * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ @@ -1900,7 +2094,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) + * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing @@ -1910,18 +2104,19 @@ union bpf_attr { * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or - * **AF_INET6**). Looking for a free port to bind to can be - * expensive, therefore binding to port is not permitted by the - * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) - * must be set to zero. + * **AF_INET6**). It's advised to pass zero port (**sin_port** + * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like + * behavior and lets the kernel efficiently pick up an unused + * port as long as 4-tuple is unique. Passing non-zero port might + * lead to degraded performance. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is - * only possible to shrink the packet as of this writing, - * therefore *delta* must be a negative integer. + * possible to both shrink and grow the packet tail. + * Shrink done via *delta* being a negative integer. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers @@ -1931,7 +2126,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) + * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. @@ -1947,7 +2142,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) + * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer @@ -1980,7 +2175,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -2002,7 +2197,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) + * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be @@ -2033,7 +2228,7 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * - * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to @@ -2052,7 +2247,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) + * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -2066,11 +2261,11 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. - * if the verdeict eBPF program returns **SK_PASS**), redirect it + * if the verdict eBPF program returns **SK_PASS**), redirect it * to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and * egress interfaces can be used for redirection. The @@ -2080,7 +2275,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) + * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at @@ -2117,7 +2312,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) + * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs @@ -2132,7 +2327,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) + * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to @@ -2148,7 +2343,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) + * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter @@ -2177,7 +2372,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_repeat(void *ctx) + * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays @@ -2196,7 +2391,7 @@ union bpf_attr { * Return * 0 * - * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) + * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, @@ -2207,7 +2402,7 @@ union bpf_attr { * **bpf_rc_keydown**\ () again with the same values, or calling * **bpf_rc_repeat**\ (). * - * Some protocols include a toggle bit, in case the button was + * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. * * The *ctx* should point to the lirc sample as passed into @@ -2261,7 +2456,7 @@ union bpf_attr { * Return * A pointer to the local storage area. * - * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. @@ -2306,7 +2501,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -2343,7 +2538,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -2362,7 +2557,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_sk_release(struct bpf_sock *sock) + * long bpf_sk_release(void *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from @@ -2370,7 +2565,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * @@ -2380,19 +2575,19 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags) + * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. @@ -2408,9 +2603,9 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags) + * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description - * Will remove *pop* bytes from a *msg* starting at byte *start*. + * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if * an allocation and copy are required due to a full ring buffer. * However, the helper will try to avoid doing the allocation @@ -2420,7 +2615,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. @@ -2434,7 +2629,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_lock(struct bpf_spin_lock *lock) + * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to @@ -2482,7 +2677,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_unlock(struct bpf_spin_lock *lock) + * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). @@ -2505,7 +2700,7 @@ union bpf_attr { * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * - * int bpf_skb_ecn_set_ce(struct sk_buf *skb) + * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** @@ -2542,7 +2737,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. @@ -2553,12 +2748,11 @@ union bpf_attr { * * *th* points to the start of the TCP header, while *th_len* * contains **sizeof**\ (**struct tcphdr**). - * * Return * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * - * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) + * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. @@ -2574,7 +2768,7 @@ union bpf_attr { * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * - * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided @@ -2593,7 +2787,7 @@ union bpf_attr { * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * - * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into @@ -2610,7 +2804,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) + * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. @@ -2627,7 +2821,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) + * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base @@ -2651,7 +2845,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) + * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the @@ -2674,7 +2868,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) + * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) * Description * Get a bpf-local-storage from a *sk*. * @@ -2690,6 +2884,9 @@ union bpf_attr { * "type". The bpf-local-storage "type" (i.e. the *map*) is * searched against all bpf-local-storages residing at *sk*. * + * *sk* is a kernel **struct sock** pointer for LSM program. + * *sk* is a **struct bpf_sock** pointer for other program types. + * * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used @@ -2702,17 +2899,19 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. + * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). * - * int bpf_send_signal(u32 sig) + * long bpf_send_signal(u32 sig) * Description - * Send signal *sig* to the current task. + * Send signal *sig* to the process of the current task. + * The signal may be delivered to any of this process's threads. * Return * 0 on success or successfully queued. * @@ -2724,7 +2923,7 @@ union bpf_attr { * * **-EAGAIN** if bpf program can try again. * - * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Try to issue a SYN cookie for the packet with corresponding * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. @@ -2735,7 +2934,6 @@ union bpf_attr { * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header. - * * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, @@ -2750,6 +2948,902 @@ union bpf_attr { * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 + * + * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * Description + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct sk_buff. + * + * This helper is similar to **bpf_perf_event_output**\ () but + * restricted to raw_tracepoint bpf programs. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, the length of the copied string is returned. This + * makes this helper useful in tracing programs for reading + * strings, and more importantly to get its length at runtime. See + * the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user**\ () helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * Return + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + * + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. + * Return + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. + * + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * Description + * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. + * *rcv_nxt* is the ack_seq to be sent out. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_send_signal_thread(u32 sig) + * Description + * Send signal *sig* to the thread corresponding to the current task. + * Return + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + * + * u64 bpf_jiffies64(void) + * Description + * Obtain the 64bit jiffies + * Return + * The 64 bit jiffies + * + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * Description + * For an eBPF program attached to a perf event, retrieve the + * branch records (**struct perf_branch_entry**) associated to *ctx* + * and store it in the buffer pointed by *buf* up to size + * *size* bytes. + * Return + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to + * instead return the number of bytes required to store all the + * branch entries. If this flag is set, *buf* may be NULL. + * + * **-EINVAL** if arguments invalid or **size** not a multiple + * of **sizeof**\ (**struct perf_branch_entry**\ ). + * + * **-ENOENT** if architecture does not support branch records. + * + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * Description + * Returns 0 on success, values for *pid* and *tgid* as seen from the current + * *namespace* will be returned in *nsdata*. + * Return + * 0 on success, or one of the following in case of failure: + * + * **-EINVAL** if dev and inum supplied don't match dev_t and inode number + * with nsfs of current task, or if dev conversion to dev_t lost high bits. + * + * **-ENOENT** if pidns does not exists for the current task. + * + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * Description + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct xdp_buff. + * + * This helper is similar to **bpf_perf_eventoutput**\ () but + * restricted to raw_tracepoint bpf programs. + * Return + * 0 on success, or a negative error in case of failure. + * + * u64 bpf_get_netns_cookie(void *ctx) + * Description + * Retrieve the cookie (generated by the kernel) of the network + * namespace the input *ctx* is associated with. The network + * namespace cookie remains stable for its lifetime and provides + * a global identifier that can be assumed unique. If *ctx* is + * NULL, then the helper returns the cookie for the initial + * network namespace. The cookie itself is very similar to that + * of **bpf_get_socket_cookie**\ () helper, but for network + * namespaces instead of sockets. + * Return + * A 8-byte long opaque number. + * + * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) + * Description + * Return id of cgroup v2 that is ancestor of the cgroup associated + * with the current task at the *ancestor_level*. The root cgroup + * is at *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with the current task, then return value will be the + * same as that of **bpf_get_current_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with the current task. + * + * The format of returned id and helper limitations are same as in + * **bpf_get_current_cgroup_id**\ (). + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) + * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SCHED_CLS** and + * **BPF_PROG_TYPE_SCHED_ACT** programs. + * + * Assign the *sk* to the *skb*. When combined with appropriate + * routing configuration to receive the packet towards the socket, + * will cause *skb* to be delivered to the specified socket. + * Subsequent redirection of *skb* via **bpf_redirect**\ (), + * **bpf_clone_redirect**\ () or other methods outside of BPF may + * interfere with successful delivery to the socket. + * + * This operation is only valid from TC ingress path. + * + * The *flags* argument must be zero. + * Return + * 0 on success, or a negative error in case of failure: + * + * **-EINVAL** if specified *flags* are not supported. + * + * **-ENOENT** if the socket is unavailable for assignment. + * + * **-ENETUNREACH** if the socket is unreachable (wrong netns). + * + * **-EOPNOTSUPP** if the operation is not supported, for example + * a call from outside of TC ingress. + * + * **-ESOCKTNOSUPPORT** if the socket type is not supported + * (reuseport). + * + * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) + * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. + * + * Select the *sk* as a result of a socket lookup. + * + * For the operation to succeed passed socket must be compatible + * with the packet description provided by the *ctx* object. + * + * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must + * be an exact match. While IP family (**AF_INET** or + * **AF_INET6**) must be compatible, that is IPv6 sockets + * that are not v6-only can be selected for IPv4 packets. + * + * Only TCP listeners and UDP unconnected sockets can be + * selected. *sk* can also be NULL to reset any previous + * selection. + * + * *flags* argument can combination of following values: + * + * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous + * socket selection, potentially done by a BPF program + * that ran before us. + * + * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip + * load-balancing within reuseport group for the socket + * being selected. + * + * On success *ctx->sk* will point to the selected socket. + * + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EAFNOSUPPORT** if socket family (*sk->family*) is + * not compatible with packet family (*ctx->family*). + * + * * **-EEXIST** if socket has been already selected, + * potentially by another program, and + * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. + * + * * **-EINVAL** if unsupported flags were specified. + * + * * **-EPROTOTYPE** if socket L4 protocol + * (*sk->protocol*) doesn't match packet protocol + * (*ctx->protocol*). + * + * * **-ESOCKTNOSUPPORT** if socket is not in allowed + * state (TCP listening or UDP unconnected). + * + * u64 bpf_ktime_get_boot_ns(void) + * Description + * Return the time elapsed since system boot, in nanoseconds. + * Does include the time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) + * Return + * Current *ktime*. + * + * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * Description + * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print + * out the format string. + * The *m* represents the seq_file. The *fmt* and *fmt_size* are for + * the format string itself. The *data* and *data_len* are format string + * arguments. The *data* are a **u64** array and corresponding format string + * values are stored in the array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* array. + * The *data_len* is the size of *data* in bytes. + * + * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. + * Reading kernel memory may fail due to either invalid address or + * valid address but requiring a major memory fault. If reading kernel memory + * fails, the string for **%s** will be an empty string, and the ip + * address for **%p{i,I}{4,6}** will be 0. Not returning error to + * bpf program is consistent with what **bpf_trace_printk**\ () does for now. + * Return + * 0 on success, or a negative error in case of failure: + * + * **-EBUSY** if per-CPU memory copy buffer is busy, can try again + * by returning 1 from bpf program. + * + * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. + * + * **-E2BIG** if *fmt* contains too many format specifiers. + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + * + * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * Description + * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. + * The *m* represents the seq_file. The *data* and *len* represent the + * data to write in bytes. + * Return + * 0 on success, or a negative error in case of failure: + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + * + * u64 bpf_sk_cgroup_id(void *sk) + * Description + * Return the cgroup v2 id of the socket *sk*. + * + * *sk* must be a non-**NULL** pointer to a socket, e.g. one + * returned from **bpf_sk_lookup_xxx**\ (), + * **bpf_sk_fullsock**\ (), etc. The format of returned id is + * same as in **bpf_skb_cgroup_id**\ (). + * + * This helper is available only if the kernel was compiled with + * the **CONFIG_SOCK_CGROUP_DATA** configuration option. + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) + * Description + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *sk* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *sk*, then return value will be same as that + * of **bpf_sk_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *sk*. + * + * The format of returned id and helper limitations are same as in + * **bpf_sk_cgroup_id**\ (). + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * Description + * Copy *size* bytes from *data* into a ring buffer *ringbuf*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * Return + * 0 on success, or a negative error in case of failure. + * + * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) + * Description + * Reserve *size* bytes of payload in a ring buffer *ringbuf*. + * Return + * Valid pointer with *size* bytes of memory available; NULL, + * otherwise. + * + * void bpf_ringbuf_submit(void *data, u64 flags) + * Description + * Submit reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * Return + * Nothing. Always succeeds. + * + * void bpf_ringbuf_discard(void *data, u64 flags) + * Description + * Discard reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * Return + * Nothing. Always succeeds. + * + * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) + * Description + * Query various characteristics of provided ring buffer. What + * exactly is queries is determined by *flags*: + * + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. + * * **BPF_RB_RING_SIZE**: The size of ring buffer. + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). + * + * Data returned is just a momentary snapshot of actual values + * and could be inaccurate, so this facility should be used to + * power heuristics and for reporting, not to make 100% correct + * calculation. + * Return + * Requested value, or 0, if *flags* are not recognized. + * + * long bpf_csum_level(struct sk_buff *skb, u64 level) + * Description + * Change the skbs checksum level by one layer up or down, or + * reset it entirely to none in order to have the stack perform + * checksum validation. The level is applicable to the following + * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of + * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | + * through **bpf_skb_adjust_room**\ () helper with passing in + * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call + * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since + * the UDP header is removed. Similarly, an encap of the latter + * into the former could be accompanied by a helper call to + * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the + * skb is still intended to be processed in higher layers of the + * stack instead of just egressing at tc. + * + * There are three supported level settings at this time: + * + * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and + * sets CHECKSUM_NONE to force checksum validation by the stack. + * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current + * skb->csum_level. + * Return + * 0 on success, or a negative error in case of failure. In the + * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level + * is returned or the error code -EACCES in case the skb is not + * subject to CHECKSUM_UNNECESSARY. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or **NULL** otherwise. + * + * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) + * Description + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to **struct task_struct**. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack=<new value> + * Return + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + * + * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) + * Description + * Load header option. Support reading a particular TCP header + * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). + * + * If *flags* is 0, it will search the option from the + * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** + * has details on what skb_data contains under different + * *skops*\ **->op**. + * + * The first byte of the *searchby_res* specifies the + * kind that it wants to search. + * + * If the searching kind is an experimental kind + * (i.e. 253 or 254 according to RFC6994). It also + * needs to specify the "magic" which is either + * 2 bytes or 4 bytes. It then also needs to + * specify the size of the magic by using + * the 2nd byte which is "kind-length" of a TCP + * header option and the "kind-length" also + * includes the first 2 bytes "kind" and "kind-length" + * itself as a normal TCP header option also does. + * + * For example, to search experimental kind 254 with + * 2 byte magic 0xeB9F, the searchby_res should be + * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. + * + * To search for the standard window scale option (3), + * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. + * Note, kind-length must be 0 for regular option. + * + * Searching for No-Op (0) and End-of-Option-List (1) are + * not supported. + * + * *len* must be at least 2 bytes which is the minimal size + * of a header option. + * + * Supported flags: + * + * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the + * saved_syn packet or the just-received syn packet. + * + * Return + * > 0 when found, the header option is copied to *searchby_res*. + * The return value is the total length copied. On failure, a + * negative error code is returned: + * + * **-EINVAL** if a parameter is invalid. + * + * **-ENOMSG** if the option is not found. + * + * **-ENOENT** if no syn packet is available when + * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. + * + * **-ENOSPC** if there is not enough space. Only *len* number of + * bytes are copied. + * + * **-EFAULT** on failure to parse the header options in the + * packet. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + * + * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) + * Description + * Store header option. The data will be copied + * from buffer *from* with length *len* to the TCP header. + * + * The buffer *from* should have the whole option that + * includes the kind, kind-length, and the actual + * option data. The *len* must be at least kind-length + * long. The kind-length does not have to be 4 byte + * aligned. The kernel will take care of the padding + * and setting the 4 bytes aligned value to th->doff. + * + * This helper will check for duplicated option + * by searching the same option in the outgoing skb. + * + * This helper can only be called during + * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. + * + * Return + * 0 on success, or negative error in case of failure: + * + * **-EINVAL** If param is invalid. + * + * **-ENOSPC** if there is not enough space in the header. + * Nothing has been written + * + * **-EEXIST** if the option already exists. + * + * **-EFAULT** on failrue to parse the existing header options. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + * + * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) + * Description + * Reserve *len* bytes for the bpf header option. The + * space will be used by **bpf_store_hdr_opt**\ () later in + * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. + * + * If **bpf_reserve_hdr_opt**\ () is called multiple times, + * the total number of bytes will be reserved. + * + * This helper can only be called during + * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. + * + * Return + * 0 on success, or negative error in case of failure: + * + * **-EINVAL** if a parameter is invalid. + * + * **-ENOSPC** if there is not enough space in the header. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + * + * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) + * Description + * Get a bpf_local_storage from an *inode*. + * + * Logically, it could be thought of as getting the value from + * a *map* with *inode* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this + * helper enforces the key must be an inode and the map must also + * be a **BPF_MAP_TYPE_INODE_STORAGE**. + * + * Underneath, the value is stored locally at *inode* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf_local_storage residing at *inode*. + * + * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf_local_storage will be + * created if one does not exist. *value* can be used + * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf_local_storage. If *value* is + * **NULL**, the new bpf_local_storage will be zero initialized. + * Return + * A bpf_local_storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf_local_storage. + * + * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) + * Description + * Delete a bpf_local_storage from an *inode*. + * Return + * 0 on success. + * + * **-ENOENT** if the bpf_local_storage cannot be found. + * + * long bpf_d_path(struct path *path, char *buf, u32 sz) + * Description + * Return full path for given **struct path** object, which + * needs to be the kernel BTF *path* object. The path is + * returned in the provided buffer *buf* of size *sz* and + * is zero terminated. + * + * Return + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + * + * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) + * Description + * Read *size* bytes from user space address *user_ptr* and store + * the data in *dst*. This is a wrapper of **copy_from_user**\ (). + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) + * Description + * Use BTF to store a string representation of *ptr*->ptr in *str*, + * using *ptr*->type_id. This value should specify the type + * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) + * can be used to look up vmlinux BTF type ids. Traversing the + * data structure using BTF, the type information and values are + * stored in the first *str_size* - 1 bytes of *str*. Safe copy of + * the pointer data is carried out to avoid kernel crashes during + * operation. Smaller types can use string space on the stack; + * larger programs can use map data to store the string + * representation. + * + * The string can be subsequently shared with userspace via + * bpf_perf_event_output() or ring buffer interfaces. + * bpf_trace_printk() is to be avoided as it places too small + * a limit on string size to be useful. + * + * *flags* is a combination of + * + * **BTF_F_COMPACT** + * no formatting around type information + * **BTF_F_NONAME** + * no struct/union member names/types + * **BTF_F_PTR_RAW** + * show raw (unobfuscated) pointer values; + * equivalent to printk specifier %px. + * **BTF_F_ZERO** + * show zero-valued struct/union members; they + * are not displayed by default + * + * Return + * The number of bytes that were written (or would have been + * written if output had to be truncated due to string size), + * or a negative error in cases of failure. + * + * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) + * Description + * Use BTF to write to seq_write a string representation of + * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). + * *flags* are identical to those used for bpf_snprintf_btf. + * Return + * 0 on success or a negative error in case of failure. + * + * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) + * Description + * See **bpf_get_cgroup_classid**\ () for the main description. + * This helper differs from **bpf_get_cgroup_classid**\ () in that + * the cgroup v1 net_cls class is retrieved only from the *skb*'s + * associated socket instead of the current process. + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) + * Description + * Redirect the packet to another net device of index *ifindex* + * and fill in L2 addresses from neighboring subsystem. This helper + * is somewhat similar to **bpf_redirect**\ (), except that it + * populates L2 addresses as well, meaning, internally, the helper + * relies on the neighbor lookup for the L2 address of the nexthop. + * + * The helper will perform a FIB lookup based on the skb's + * networking header to get the address of the next hop, unless + * this is supplied by the caller in the *params* argument. The + * *plen* argument indicates the len of *params* and should be set + * to 0 if *params* is NULL. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types, and enabled + * for IPv4 and IPv6 protocols. + * Return + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. + * + * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu) + * Description + * Take a pointer to a percpu ksym, *percpu_ptr*, and return a + * pointer to the percpu kernel variable on *cpu*. A ksym is an + * extern variable decorated with '__ksym'. For ksym, there is a + * global var (either static or global) defined of the same name + * in the kernel. The ksym is percpu if the global var is percpu. + * The returned pointer points to the global percpu var on *cpu*. + * + * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the + * kernel, except that bpf_per_cpu_ptr() may return NULL. This + * happens if *cpu* is larger than nr_cpu_ids. The caller of + * bpf_per_cpu_ptr() must check the returned value. + * Return + * A pointer pointing to the kernel percpu variable on *cpu*, or + * NULL, if *cpu* is invalid. + * + * void *bpf_this_cpu_ptr(const void *percpu_ptr) + * Description + * Take a pointer to a percpu ksym, *percpu_ptr*, and return a + * pointer to the percpu kernel variable on this cpu. See the + * description of 'ksym' in **bpf_per_cpu_ptr**\ (). + * + * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in + * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would + * never return NULL. + * Return + * A pointer pointing to the kernel percpu variable on this cpu. + * + * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags) + * Description + * For each element in **map**, call **callback_fn** function with + * **map**, **callback_ctx** and other map-specific parameters. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. + * + * The following are a list of supported map types and their + * respective expected callback signatures: + * + * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, + * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, + * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY + * + * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); + * + * For per_cpu maps, the map_value is the value on the cpu where the + * bpf_prog is running. + * + * If **callback_fn** return 0, the helper will continue to the next + * element. If return value is 1, the helper will skip the rest of + * elements and return. Other return values are not used now. + * + * Return + * The number of traversed map elements for success, **-EINVAL** for + * invalid **flags**. + * + * long bpf_redirect_peer(u32 ifindex, u64 flags) + * Description + * Redirect the packet to another net device of index *ifindex*. + * This helper is somewhat similar to **bpf_redirect**\ (), except + * that the redirection happens to the *ifindex*' peer device and + * the netns switch takes place from ingress to ingress without + * going through the CPU's backlog queue. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types at the ingress + * hook and for veth device types. The peer device must reside in a + * different network namespace. + * Return + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. + * + * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) + * Description + * Initialize the timer. + * First 4 bits of *flags* specify clockid. + * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. + * All other bits of *flags* are reserved. + * The verifier will reject the program if *timer* is not from + * the same *map*. + * Return + * 0 on success. + * **-EBUSY** if *timer* is already initialized. + * **-EINVAL** if invalid *flags* are passed. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) + * Description + * Configure the timer to call *callback_fn* static function. + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) + * Description + * Set timer expiration N nanoseconds from the current time. The + * configured callback will be invoked in soft irq context on some cpu + * and will not repeat unless another bpf_timer_start() is made. + * In such case the next invocation can migrate to a different cpu. + * Since struct bpf_timer is a field inside map element the map + * owns the timer. The bpf_timer_set_callback() will increment refcnt + * of BPF program to make sure that callback_fn code stays valid. + * When user space reference to a map reaches zero all timers + * in a map are cancelled and corresponding program's refcnts are + * decremented. This is done to make sure that Ctrl-C of a user + * process doesn't leave any timers running. If map is pinned in + * bpffs the callback_fn can re-arm itself indefinitely. + * bpf_map_update/delete_elem() helpers and user space sys_bpf commands + * cancel and free the timer in the given map element. + * The map can contain timers that invoke callback_fn-s from different + * programs. The same callback_fn can serve different timers from + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier + * or invalid *flags* are passed. + * + * long bpf_timer_cancel(struct bpf_timer *timer) + * Description + * Cancel the timer and wait for callback_fn to finish if it was running. + * Return + * 0 if the timer was not active. + * 1 if the timer was active. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its + * own timer which would have led to a deadlock otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2862,7 +3956,70 @@ union bpf_attr { FN(sk_storage_get), \ FN(sk_storage_delete), \ FN(send_signal), \ - FN(tcp_gen_syncookie), + FN(tcp_gen_syncookie), \ + FN(skb_output), \ + FN(probe_read_user), \ + FN(probe_read_kernel), \ + FN(probe_read_user_str), \ + FN(probe_read_kernel_str), \ + FN(tcp_send_ack), \ + FN(send_signal_thread), \ + FN(jiffies64), \ + FN(read_branch_records), \ + FN(get_ns_current_pid_tgid), \ + FN(xdp_output), \ + FN(get_netns_cookie), \ + FN(get_current_ancestor_cgroup_id), \ + FN(sk_assign), \ + FN(ktime_get_boot_ns), \ + FN(seq_printf), \ + FN(seq_write), \ + FN(sk_cgroup_id), \ + FN(sk_ancestor_cgroup_id), \ + FN(ringbuf_output), \ + FN(ringbuf_reserve), \ + FN(ringbuf_submit), \ + FN(ringbuf_discard), \ + FN(ringbuf_query), \ + FN(csum_level), \ + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), \ + FN(get_task_stack), \ + FN(load_hdr_opt), \ + FN(store_hdr_opt), \ + FN(reserve_hdr_opt), \ + FN(inode_storage_get), \ + FN(inode_storage_delete), \ + FN(d_path), \ + FN(copy_from_user), \ + FN(snprintf_btf), \ + FN(seq_printf_btf), \ + FN(skb_cgroup_classid), \ + FN(redirect_neigh), \ + FN(per_cpu_ptr), \ + FN(this_cpu_ptr), \ + FN(redirect_peer), \ + FN(task_storage_get), \ + FN(task_storage_delete), \ + FN(get_current_task_btf), \ + FN(bprm_opts_set), \ + FN(ktime_get_coarse_ns), \ + FN(ima_inode_hash), \ + FN(sock_from_file), \ + FN(check_mtu), \ + FN(for_each_map_elem), \ + FN(snprintf), \ + FN(sys_bpf), \ + FN(btf_find_by_name_kind), \ + FN(sys_close), \ + FN(timer_init), \ + FN(timer_set_callback), \ + FN(timer_start), \ + FN(timer_cancel), \ + /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2877,69 +4034,142 @@ enum bpf_func_id { /* All flags used by eBPF helper functions, placed here. */ /* BPF_FUNC_skb_store_bytes flags. */ -#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) -#define BPF_F_INVALIDATE_HASH (1ULL << 1) +enum { + BPF_F_RECOMPUTE_CSUM = (1ULL << 0), + BPF_F_INVALIDATE_HASH = (1ULL << 1), +}; /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. */ -#define BPF_F_HDR_FIELD_MASK 0xfULL +enum { + BPF_F_HDR_FIELD_MASK = 0xfULL, +}; /* BPF_FUNC_l4_csum_replace flags. */ -#define BPF_F_PSEUDO_HDR (1ULL << 4) -#define BPF_F_MARK_MANGLED_0 (1ULL << 5) -#define BPF_F_MARK_ENFORCE (1ULL << 6) +enum { + BPF_F_PSEUDO_HDR = (1ULL << 4), + BPF_F_MARK_MANGLED_0 = (1ULL << 5), + BPF_F_MARK_ENFORCE = (1ULL << 6), +}; /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ -#define BPF_F_INGRESS (1ULL << 0) +enum { + BPF_F_INGRESS = (1ULL << 0), +}; /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ -#define BPF_F_TUNINFO_IPV6 (1ULL << 0) +enum { + BPF_F_TUNINFO_IPV6 = (1ULL << 0), +}; /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ -#define BPF_F_SKIP_FIELD_MASK 0xffULL -#define BPF_F_USER_STACK (1ULL << 8) +enum { + BPF_F_SKIP_FIELD_MASK = 0xffULL, + BPF_F_USER_STACK = (1ULL << 8), /* flags used by BPF_FUNC_get_stackid only. */ -#define BPF_F_FAST_STACK_CMP (1ULL << 9) -#define BPF_F_REUSE_STACKID (1ULL << 10) + BPF_F_FAST_STACK_CMP = (1ULL << 9), + BPF_F_REUSE_STACKID = (1ULL << 10), /* flags used by BPF_FUNC_get_stack only. */ -#define BPF_F_USER_BUILD_ID (1ULL << 11) + BPF_F_USER_BUILD_ID = (1ULL << 11), +}; /* BPF_FUNC_skb_set_tunnel_key flags. */ -#define BPF_F_ZERO_CSUM_TX (1ULL << 1) -#define BPF_F_DONT_FRAGMENT (1ULL << 2) -#define BPF_F_SEQ_NUMBER (1ULL << 3) +enum { + BPF_F_ZERO_CSUM_TX = (1ULL << 1), + BPF_F_DONT_FRAGMENT = (1ULL << 2), + BPF_F_SEQ_NUMBER = (1ULL << 3), +}; /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ -#define BPF_F_INDEX_MASK 0xffffffffULL -#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK +enum { + BPF_F_INDEX_MASK = 0xffffffffULL, + BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, /* BPF_FUNC_perf_event_output for sk_buff input context. */ -#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) + BPF_F_CTXLEN_MASK = (0xfffffULL << 32), +}; /* Current network namespace */ -#define BPF_F_CURRENT_NETNS (-1L) +enum { + BPF_F_CURRENT_NETNS = (-1L), +}; + +/* BPF_FUNC_csum_level level values. */ +enum { + BPF_CSUM_LEVEL_QUERY, + BPF_CSUM_LEVEL_INC, + BPF_CSUM_LEVEL_DEC, + BPF_CSUM_LEVEL_RESET, +}; /* BPF_FUNC_skb_adjust_room flags. */ -#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0) +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), + BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), +}; -#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff -#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56 +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1) -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2) -#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3) -#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4) #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ BPF_ADJ_ROOM_ENCAP_L2_MASK) \ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) /* BPF_FUNC_sysctl_get_name flags. */ -#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0) +enum { + BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), +}; -/* BPF_FUNC_sk_storage_get flags */ -#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0) +/* BPF_FUNC_<kernel_obj>_storage_get flags */ +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), + /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility + * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. + */ + BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, +}; + +/* BPF_FUNC_read_branch_records flags. */ +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), +}; + +/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and + * BPF_FUNC_bpf_ringbuf_output flags. + */ +enum { + BPF_RB_NO_WAKEUP = (1ULL << 0), + BPF_RB_FORCE_WAKEUP = (1ULL << 1), +}; + +/* BPF_FUNC_bpf_ringbuf_query flags */ +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +/* BPF ring buffer constants */ +enum { + BPF_RINGBUF_BUSY_BIT = (1U << 31), + BPF_RINGBUF_DISCARD_BIT = (1U << 30), + BPF_RINGBUF_HDR_SZ = 8, +}; + +/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ +enum { + BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), + BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), +}; /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { @@ -3072,6 +4302,7 @@ struct bpf_sock { __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; + __s32 rx_queue_mapping; }; struct bpf_tcp_sock { @@ -3165,6 +4396,34 @@ struct xdp_md { /* Below access go through struct xdp_rxq_info */ __u32 ingress_ifindex; /* rxq->dev->ifindex */ __u32 rx_queue_index; /* rxq->queue_index */ + + __u32 egress_ifindex; /* txq->dev->ifindex */ +}; + +/* DEVMAP map-value layout + * + * The struct data-layout of map-value is a configuration interface. + * New members can only be added to the end of this structure. + */ +struct bpf_devmap_val { + __u32 ifindex; /* device index */ + union { + int fd; /* prog fd on map write */ + __u32 id; /* prog id on map read */ + } bpf_prog; +}; + +/* CPUMAP map-value layout + * + * The struct data-layout of map-value is a configuration interface. + * New members can only be added to the end of this structure. + */ +struct bpf_cpumap_val { + __u32 qsize; /* queue size to remote target CPU */ + union { + int fd; /* prog fd on map write */ + __u32 id; /* prog id on map read */ + } bpf_prog; }; enum sk_action { @@ -3187,6 +4446,8 @@ struct sk_msg_md { __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 size; /* Total size of sk_msg */ + + __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ }; struct sk_reuseport_md { @@ -3263,7 +4524,7 @@ struct bpf_map_info { __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; - __u32 :32; + __u32 btf_vmlinux_value_type_id; __u64 netns_dev; __u64 netns_ino; __u32 btf_id; @@ -3277,9 +4538,44 @@ struct bpf_btf_info { __u32 id; } __attribute__((aligned(8))); +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ + __u32 tp_name_len; /* in/out: tp_name buffer len */ + } raw_tracepoint; + struct { + __u32 attach_type; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __aligned_u64 target_name; /* in/out: target_name buffer ptr */ + __u32 target_name_len; /* in/out: target_name buffer len */ + union { + struct { + __u32 map_id; + } map; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + }; +} __attribute__((aligned(8))); + /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on - * attach attach type). + * attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ @@ -3289,7 +4585,7 @@ struct bpf_sock_addr { __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ - __u32 user_port; /* Allows 4-byte read and write. + __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order */ __u32 family; /* Allows 4-byte read, but no write */ @@ -3354,16 +4650,90 @@ struct bpf_sock_ops { __u64 bytes_received; __u64 bytes_acked; __bpf_md_ptr(struct bpf_sock *, sk); + /* [skb_data, skb_data_end) covers the whole TCP header. + * + * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received + * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the + * header has not been written. + * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have + * been written so far. + * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes + * the 3WHS. + * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes + * the 3WHS. + * + * bpf_load_hdr_opt() can also be used to read a particular option. + */ + __bpf_md_ptr(void *, skb_data); + __bpf_md_ptr(void *, skb_data_end); + __u32 skb_len; /* The total length of a packet. + * It includes the header, options, + * and payload. + */ + __u32 skb_tcp_flags; /* tcp_flags of the header. It provides + * an easy way to check for tcp_flags + * without parsing skb_data. + * + * In particular, the skb_tcp_flags + * will still be available in + * BPF_SOCK_OPS_HDR_OPT_LEN even though + * the outgoing header has not + * been written yet. + */ }; /* Definitions for bpf_sock_ops_cb_flags */ -#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) -#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) -#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) -#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3) -#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently - * supported cb flags - */ +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), + BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), + BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), + BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), + /* Call bpf for all received TCP headers. The bpf prog will be + * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB + * + * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB + * for the header option related helpers that will be useful + * to the bpf programs. + * + * It could be used at the client/active side (i.e. connect() side) + * when the server told it that the server was in syncookie + * mode and required the active side to resend the bpf-written + * options. The active side can keep writing the bpf-options until + * it received a valid packet from the server side to confirm + * the earlier packet (and options) has been received. The later + * example patch is using it like this at the active side when the + * server is in syncookie mode. + * + * The bpf prog will usually turn this off in the common cases. + */ + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), + /* Call bpf when kernel has received a header option that + * the kernel cannot handle. The bpf prog will be called under + * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. + * + * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB + * for the header option related helpers that will be useful + * to the bpf programs. + */ + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), + /* Call bpf when the kernel is writing header options for the + * outgoing packet. The bpf prog will first be called + * to reserve space in a skb under + * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then + * the bpf prog will be called to write the header option(s) + * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. + * + * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB + * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option + * related helpers that will be useful to the bpf programs. + * + * The kernel gets its chance to reserve space and write + * options first before the BPF program does. + */ + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), +/* Mask of all currently supported cb flags */ + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, +}; /* List of known BPF sock_ops operators. * New entries can only be added at the end @@ -3418,6 +4788,63 @@ enum { */ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. */ + BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. + * It will be called to handle + * the packets received at + * an already established + * connection. + * + * sock_ops->skb_data: + * Referring to the received skb. + * It covers the TCP header only. + * + * bpf_load_hdr_opt() can also + * be used to search for a + * particular option. + */ + BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the + * header option later in + * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. + * Arg1: bool want_cookie. (in + * writing SYNACK only) + * + * sock_ops->skb_data: + * Not available because no header has + * been written yet. + * + * sock_ops->skb_tcp_flags: + * The tcp_flags of the + * outgoing skb. (e.g. SYN, ACK, FIN). + * + * bpf_reserve_hdr_opt() should + * be used to reserve space. + */ + BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options + * Arg1: bool want_cookie. (in + * writing SYNACK only) + * + * sock_ops->skb_data: + * Referring to the outgoing skb. + * It covers the TCP header + * that has already been written + * by the kernel and the + * earlier bpf-progs. + * + * sock_ops->skb_tcp_flags: + * The tcp_flags of the outgoing + * skb. (e.g. SYN, ACK, FIN). + * + * bpf_store_hdr_opt() should + * be used to write the + * option. + * + * bpf_load_hdr_opt() can also + * be used to search for a + * particular option that + * has already been written + * by the kernel or the + * earlier bpf-progs. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect @@ -3442,8 +4869,67 @@ enum { BPF_TCP_MAX_STATES /* Leave at the end! */ }; -#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ -#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ +enum { + TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ + TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ + TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ + TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ + /* Copy the SYN pkt to optval + * + * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the + * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit + * to only getting from the saved_syn. It can either get the + * syn packet from: + * + * 1. the just-received SYN packet (only available when writing the + * SYNACK). It will be useful when it is not necessary to + * save the SYN packet for latter use. It is also the only way + * to get the SYN during syncookie mode because the syn + * packet cannot be saved during syncookie. + * + * OR + * + * 2. the earlier saved syn which was done by + * bpf_setsockopt(TCP_SAVE_SYN). + * + * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the + * SYN packet is obtained. + * + * If the bpf-prog does not need the IP[46] header, the + * bpf-prog can avoid parsing the IP header by using + * TCP_BPF_SYN. Otherwise, the bpf-prog can get both + * IP[46] and TCP header by using TCP_BPF_SYN_IP. + * + * >0: Total number of bytes copied + * -ENOSPC: Not enough space in optval. Only optlen number of + * bytes is copied. + * -ENOENT: The SYN skb is not available now and the earlier SYN pkt + * is not saved by setsockopt(TCP_SAVE_SYN). + */ + TCP_BPF_SYN = 1005, /* Copy the TCP header */ + TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ + TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), +}; + +/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and + * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. + */ +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the + * total option spaces + * required for an established + * sk in order to calculate the + * MSS. No skb is actually + * sent. + */ + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode + * when sending a SYN. + */ +}; struct bpf_perf_event_value { __u64 counter; @@ -3451,12 +4937,16 @@ struct bpf_perf_event_value { __u64 running; }; -#define BPF_DEVCG_ACC_MKNOD (1ULL << 0) -#define BPF_DEVCG_ACC_READ (1ULL << 1) -#define BPF_DEVCG_ACC_WRITE (1ULL << 2) +enum { + BPF_DEVCG_ACC_MKNOD = (1ULL << 0), + BPF_DEVCG_ACC_READ = (1ULL << 1), + BPF_DEVCG_ACC_WRITE = (1ULL << 2), +}; -#define BPF_DEVCG_DEV_BLOCK (1ULL << 0) -#define BPF_DEVCG_DEV_CHAR (1ULL << 1) +enum { + BPF_DEVCG_DEV_BLOCK = (1ULL << 0), + BPF_DEVCG_DEV_CHAR = (1ULL << 1), +}; struct bpf_cgroup_dev_ctx { /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ @@ -3472,8 +4962,10 @@ struct bpf_raw_tracepoint_args { /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ -#define BPF_FIB_LOOKUP_DIRECT (1U << 0) -#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) +enum { + BPF_FIB_LOOKUP_DIRECT = (1U << 0), + BPF_FIB_LOOKUP_OUTPUT = (1U << 1), +}; enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ @@ -3536,6 +5028,16 @@ struct bpf_fib_lookup { __u8 dmac[6]; /* ETH_ALEN */ }; +struct bpf_redir_neigh { + /* network family for lookup (AF_INET, AF_INET6) */ + __u32 nh_family; + /* network address of nexthop; skips fib lookup to find gateway */ + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; /* in6_addr; network order */ + }; +}; + enum bpf_task_fd_type { BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ BPF_FD_TYPE_TRACEPOINT, /* tp name */ @@ -3545,9 +5047,11 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; -#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), +}; struct bpf_flow_keys { __u16 nhoff; @@ -3593,6 +5097,11 @@ struct bpf_spin_lock { __u32 val; }; +struct bpf_timer { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. @@ -3613,4 +5122,53 @@ struct bpf_sockopt { __s32 retval; }; +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ +struct bpf_sk_lookup { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + + __u32 family; /* Protocol family (AF_INET, AF_INET6) */ + __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ + __u32 remote_ip4; /* Network byte order */ + __u32 remote_ip6[4]; /* Network byte order */ + __u32 remote_port; /* Network byte order */ + __u32 local_ip4; /* Network byte order */ + __u32 local_ip6[4]; /* Network byte order */ + __u32 local_port; /* Host byte order */ +}; + +/* + * struct btf_ptr is used for typed pointer representation; the + * type id is used to render the pointer data as the appropriate type + * via the bpf_snprintf_btf() helper described above. A flags field - + * potentially to specify additional details about the BTF pointer + * (rather than its mode of display) - is included for future use. + * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. + */ +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; /* BTF ptr flags; unused at present. */ +}; + +/* + * Flags to control bpf_snprintf_btf() behaviour. + * - BTF_F_COMPACT: no formatting around type information + * - BTF_F_NONAME: no struct/union member names/types + * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; + * equivalent to %px. + * - BTF_F_ZERO: show zero-valued struct/union members; they + * are not displayed by default + */ +enum { + BTF_F_COMPACT = (1ULL << 0), + BTF_F_NONAME = (1ULL << 1), + BTF_F_PTR_RAW = (1ULL << 2), + BTF_F_ZERO = (1ULL << 3), +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h index 63ae4a39e58b..5a667107ad2c 100644 --- a/tools/include/uapi/linux/btf.h +++ b/tools/include/uapi/linux/btf.h @@ -22,9 +22,9 @@ struct btf_header { }; /* Max # of type identifier */ -#define BTF_MAX_TYPE 0x0000ffff +#define BTF_MAX_TYPE 0x000fffff /* Max offset into the string section */ -#define BTF_MAX_NAME_OFFSET 0x0000ffff +#define BTF_MAX_NAME_OFFSET 0x00ffffff /* Max # of struct/union/enum members or func args */ #define BTF_MAX_VLEN 0xffff @@ -142,7 +142,14 @@ struct btf_param { enum { BTF_VAR_STATIC = 0, - BTF_VAR_GLOBAL_ALLOCATED, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, }; /* BTF_KIND_VAR is followed by a single "struct btf_var" to describe diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h index 5ed721ad5b19..af2a44c08683 100644 --- a/tools/include/uapi/linux/const.h +++ b/tools/include/uapi/linux/const.h @@ -28,4 +28,9 @@ #define _BITUL(x) (_UL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x)) +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) + +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + #endif /* _UAPI_LINUX_CONST_H */ diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 4a8c02cafa9a..781e482dc499 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -167,6 +167,9 @@ enum { IFLA_NEW_IFINDEX, IFLA_MIN_MTU, IFLA_MAX_MTU, + IFLA_PROP_LIST, + IFLA_ALT_IFNAME, /* Alternative ifname */ + IFLA_PERM_ADDRESS, __IFLA_MAX }; @@ -340,6 +343,8 @@ enum { IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, + IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -460,6 +465,7 @@ enum { IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, + IFLA_MACSEC_OFFLOAD, __IFLA_MACSEC_MAX, }; @@ -483,6 +489,14 @@ enum macsec_validation_type { MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, }; +enum macsec_offload { + MACSEC_OFFLOAD_OFF = 0, + MACSEC_OFFLOAD_PHY = 1, + MACSEC_OFFLOAD_MAC = 2, + __MACSEC_OFFLOAD_END, + MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1, +}; + /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, @@ -950,11 +964,12 @@ enum { #define XDP_FLAGS_SKB_MODE (1U << 1) #define XDP_FLAGS_DRV_MODE (1U << 2) #define XDP_FLAGS_HW_MODE (1U << 3) +#define XDP_FLAGS_REPLACE (1U << 4) #define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \ XDP_FLAGS_DRV_MODE | \ XDP_FLAGS_HW_MODE) #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ - XDP_FLAGS_MODES) + XDP_FLAGS_MODES | XDP_FLAGS_REPLACE) /* These are stored into IFLA_XDP_ATTACHED on dump. */ enum { @@ -974,6 +989,7 @@ enum { IFLA_XDP_DRV_PROG_ID, IFLA_XDP_SKB_PROG_ID, IFLA_XDP_HW_PROG_ID, + IFLA_XDP_EXPECTED_FD, __IFLA_XDP_MAX, }; diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index bb7b271397a6..fabe5aeaa351 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1131,7 +1131,7 @@ union perf_mem_data_src { #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ -#define PERF_MEM_SNOOPX_SHIFT 37 +#define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c index 0f257139b003..7703f0118385 100644 --- a/tools/io_uring/io_uring-bench.c +++ b/tools/io_uring/io_uring-bench.c @@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s) s->nr_files); } -static int gettid(void) +static int lk_gettid(void) { return syscall(__NR_gettid); } @@ -281,7 +281,7 @@ static void *submitter_fn(void *data) struct io_sq_ring *ring = &s->sq_ring; int ret, prepped; - printf("submitter=%d\n", gettid()); + printf("submitter=%d\n", lk_gettid()); srand48_r(pthread_self(), &s->rand); diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index bd021a0eeef8..4cc69675c2a9 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -90,6 +90,7 @@ struct fs { const char * const *mounts; char path[PATH_MAX]; bool found; + bool checked; long magic; }; @@ -111,31 +112,37 @@ static struct fs fs__entries[] = { .name = "sysfs", .mounts = sysfs__fs_known_mountpoints, .magic = SYSFS_MAGIC, + .checked = false, }, [FS__PROCFS] = { .name = "proc", .mounts = procfs__known_mountpoints, .magic = PROC_SUPER_MAGIC, + .checked = false, }, [FS__DEBUGFS] = { .name = "debugfs", .mounts = debugfs__known_mountpoints, .magic = DEBUGFS_MAGIC, + .checked = false, }, [FS__TRACEFS] = { .name = "tracefs", .mounts = tracefs__known_mountpoints, .magic = TRACEFS_MAGIC, + .checked = false, }, [FS__HUGETLBFS] = { .name = "hugetlbfs", .mounts = hugetlbfs__known_mountpoints, .magic = HUGETLBFS_MAGIC, + .checked = false, }, [FS__BPF_FS] = { .name = "bpf", .mounts = bpf_fs__known_mountpoints, .magic = BPF_FS_MAGIC, + .checked = false, }, }; @@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs) } fclose(fp); + fs->checked = true; return fs->found = found; } @@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs) return false; fs->found = true; + fs->checked = true; strncpy(fs->path, override_path, sizeof(fs->path) - 1); fs->path[sizeof(fs->path) - 1] = '\0'; return true; @@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx) if (fs->found) return (const char *)fs->path; + /* the mount point was already checked for the mount point + * but and did not exist, so return NULL to avoid scanning again. + * This makes the found and not found paths cost equivalent + * in case of multiple calls. + */ + if (fs->checked) + return NULL; + return fs__get_mountpoint(fs); } diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h index 92d03b8396b1..3b70003e7cfb 100644 --- a/tools/lib/api/fs/fs.h +++ b/tools/lib/api/fs/fs.h @@ -18,6 +18,18 @@ const char *name##__mount(void); \ bool name##__configured(void); \ +/* + * The xxxx__mountpoint() entry points find the first match mount point for each + * filesystems listed below, where xxxx is the filesystem type. + * + * The interface is as follows: + * + * - If a mount point is found on first call, it is cached and used for all + * subsequent calls. + * + * - If a mount point is not found, NULL is returned on first call and all + * subsequent calls. + */ FS(sysfs) FS(procfs) FS(debugfs) diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore index d9e9dec04605..8a81b3679d2b 100644 --- a/tools/lib/bpf/.gitignore +++ b/tools/lib/bpf/.gitignore @@ -1,5 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only libbpf_version.h libbpf.pc FEATURE-DUMP.libbpf -test_libbpf libbpf.so.* +TAGS +tags +cscope.* +/bpf_helper_defs.h diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index e3962cfbc9a6..190366d05588 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1,3 +1,3 @@ libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \ netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \ - btf_dump.o + btf_dump.o ringbuf.o diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 33e2638ef7f0..55bd78b3496f 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -1,6 +1,9 @@ # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) # Most of this file is copied from tools/lib/traceevent/Makefile +RM ?= rm +srctree = $(abs_srctree) + LIBBPF_VERSION := $(shell \ grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \ sort -rV | head -n1 | cut -d'_' -f2) @@ -56,10 +59,10 @@ ifndef VERBOSE endif FEATURE_USER = .libbpf -FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx -FEATURE_DISPLAY = libelf bpf +FEATURE_TESTS = libelf zlib bpf +FEATURE_DISPLAY = libelf zlib bpf -INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi +INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES) check_feat := 1 @@ -95,27 +98,18 @@ PC_FILE = libbpf.pc ifdef EXTRA_CFLAGS CFLAGS := $(EXTRA_CFLAGS) else - CFLAGS := -g -Wall -endif - -ifeq ($(feature-libelf-mmap), 1) - override CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT -endif - -ifeq ($(feature-reallocarray), 0) - override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY + CFLAGS := -g -O2 endif # Append required CFLAGS -override CFLAGS += $(EXTRA_WARNINGS) +override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum override CFLAGS += -Werror -Wall -override CFLAGS += -fPIC override CFLAGS += $(INCLUDES) override CFLAGS += -fvisibility=hidden override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 # flags specific for shared library -SHLIB_FLAGS := -DSHARED +SHLIB_FLAGS := -DSHARED -fPIC ifeq ($(VERBOSE),1) Q = @@ -138,34 +132,32 @@ STATIC_OBJDIR := $(OUTPUT)staticobjs/ BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o VERSION_SCRIPT := libbpf.map +BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE)) +TAGS_PROG := $(if $(shell which etags 2>/dev/null),etags,ctags) + GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \ + sed 's/\[.*\]//' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ sort -u | wc -l) -VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ +VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ + sed 's/\[.*\]//' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) -CXX_TEST_TARGET = $(OUTPUT)test_libbpf - -ifeq ($(feature-cxx), 1) - CMD_TARGETS += $(CXX_TEST_TARGET) -endif - -TARGETS = $(CMD_TARGETS) - all: fixdep $(Q)$(MAKE) all_cmd all_cmd: $(CMD_TARGETS) check -$(BPF_IN_SHARED): force elfdep bpfdep +$(BPF_IN_SHARED): force elfdep zdep bpfdep $(BPF_HELPER_DEFS) @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \ (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true @@ -183,22 +175,24 @@ $(BPF_IN_SHARED): force elfdep bpfdep echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)" -$(BPF_IN_STATIC): force elfdep bpfdep +$(BPF_IN_STATIC): force elfdep zdep bpfdep $(BPF_HELPER_DEFS) $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) +$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h + $(QUIET_GEN)$(srctree)/scripts/bpf_helpers_doc.py --header \ + --file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS) + $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED) - $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \ - -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@ + $(QUIET_LINK)$(CC) $(LDFLAGS) \ + --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \ + -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -lz -o $@ @ln -sf $(@F) $(OUTPUT)libbpf.so @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION) $(OUTPUT)libbpf.a: $(BPF_IN_STATIC) - $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ - -$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a - $(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@ + $(QUIET_LINK)$(RM) -f $@; $(AR) rcs $@ $^ $(OUTPUT)libbpf.pc: $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \ @@ -217,9 +211,12 @@ check_abi: $(OUTPUT)libbpf.so "versioned in $(VERSION_SCRIPT)." >&2; \ readelf -s --wide $(BPF_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ + sed 's/\[.*\]//' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ - readelf -s --wide $(OUTPUT)libbpf.so | \ + readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ + sed 's/\[.*\]//' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \ sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \ diff -u $(OUTPUT)libbpf_global_syms.tmp \ @@ -247,43 +244,65 @@ install_lib: all_cmd $(call do_install_mkdir,$(libdir_SQ)); \ cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ) -install_headers: +install_headers: $(BPF_HELPER_DEFS) $(call QUIET_INSTALL, headers) \ $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \ $(call do_install,btf.h,$(prefix)/include/bpf,644); \ $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \ - $(call do_install,xsk.h,$(prefix)/include/bpf,644); + $(call do_install,libbpf_common.h,$(prefix)/include/bpf,644); \ + $(call do_install,xsk.h,$(prefix)/include/bpf,644); \ + $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \ + $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \ + $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \ + $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \ + $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644); install_pkgconfig: $(PC_FILE) $(call QUIET_INSTALL, $(PC_FILE)) \ $(call do_install,$(PC_FILE),$(libdir_SQ)/pkgconfig,644) -install: install_lib install_pkgconfig +install: install_lib install_pkgconfig install_headers ### Cleaning rules config-clean: - $(call QUIET_CLEAN, config) + $(call QUIET_CLEAN, feature-detect) $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null -clean: - $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \ - *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \ - *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR) +clean: config-clean + $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \ + *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \ + $(SHARED_OBJDIR) $(STATIC_OBJDIR) \ + $(addprefix $(OUTPUT), \ + *.o *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) *.pc) $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf -PHONY += force elfdep bpfdep +PHONY += force elfdep zdep bpfdep cscope tags force: elfdep: @if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit 1 ; fi +zdep: + @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi + bpfdep: @if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi +cscope: + ls *.c *.h > cscope.files + cscope -b -q -I $(srctree)/include -f cscope.out + +tags: + $(RM) -f TAGS tags + ls *.c *.h | xargs $(TAGS_PROG) -a + # Declare the contents of the .PHONY variable as phony. We keep that # information in a variable so we can use it in if_changed and friends. .PHONY: $(PHONY) + +# Delete partially updated (corrupted) files on error +.DELETE_ON_ERROR: diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 9d0485959308..d27e34133973 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -95,7 +95,11 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) attr.btf_key_type_id = create_attr->btf_key_type_id; attr.btf_value_type_id = create_attr->btf_value_type_id; attr.map_ifindex = create_attr->map_ifindex; - attr.inner_map_fd = create_attr->inner_map_fd; + if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) + attr.btf_vmlinux_value_type_id = + create_attr->btf_vmlinux_value_type_id; + else + attr.inner_map_fd = create_attr->inner_map_fd; return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } @@ -228,6 +232,17 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; + if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS || + attr.prog_type == BPF_PROG_TYPE_LSM) { + attr.attach_btf_id = load_attr->attach_btf_id; + } else if (attr.prog_type == BPF_PROG_TYPE_TRACING || + attr.prog_type == BPF_PROG_TYPE_EXT) { + attr.attach_btf_id = load_attr->attach_btf_id; + attr.attach_prog_fd = load_attr->attach_prog_fd; + } else { + attr.prog_ifindex = load_attr->prog_ifindex; + attr.kern_version = load_attr->kern_version; + } attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); @@ -241,8 +256,6 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, attr.log_size = 0; } - attr.kern_version = load_attr->kern_version; - attr.prog_ifindex = load_attr->prog_ifindex; attr.prog_btf_fd = load_attr->prog_btf_fd; attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; @@ -438,6 +451,64 @@ int bpf_map_freeze(int fd) return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); } +static int bpf_map_batch_common(int cmd, int fd, void *in_batch, + void *out_batch, void *keys, void *values, + __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + union bpf_attr attr; + int ret; + + if (!OPTS_VALID(opts, bpf_map_batch_opts)) + return -EINVAL; + + memset(&attr, 0, sizeof(attr)); + attr.batch.map_fd = fd; + attr.batch.in_batch = ptr_to_u64(in_batch); + attr.batch.out_batch = ptr_to_u64(out_batch); + attr.batch.keys = ptr_to_u64(keys); + attr.batch.values = ptr_to_u64(values); + attr.batch.count = *count; + attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); + attr.batch.flags = OPTS_GET(opts, flags, 0); + + ret = sys_bpf(cmd, &attr, sizeof(attr)); + *count = attr.batch.count; + + return ret; +} + +int bpf_map_delete_batch(int fd, void *keys, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, + NULL, keys, NULL, count, opts); +} + +int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, + void *values, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, + out_batch, keys, values, count, opts); +} + +int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, + void *keys, void *values, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, + fd, in_batch, out_batch, keys, values, + count, opts); +} + +int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, + keys, values, count, opts); +} + int bpf_obj_pin(int fd, const char *pathname) { union bpf_attr attr; @@ -461,14 +532,29 @@ int bpf_obj_get(const char *pathname) int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, unsigned int flags) +{ + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, + .flags = flags, + ); + + return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts); +} + +int bpf_prog_attach_xattr(int prog_fd, int target_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts) { union bpf_attr attr; + if (!OPTS_VALID(opts, bpf_prog_attach_opts)) + return -EINVAL; + memset(&attr, 0, sizeof(attr)); attr.target_fd = target_fd; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; - attr.attach_flags = flags; + attr.attach_flags = OPTS_GET(opts, flags, 0); + attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); } @@ -496,6 +582,76 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } +int bpf_link_create(int prog_fd, int target_fd, + enum bpf_attach_type attach_type, + const struct bpf_link_create_opts *opts) +{ + __u32 target_btf_id, iter_info_len; + union bpf_attr attr; + + if (!OPTS_VALID(opts, bpf_link_create_opts)) + return -EINVAL; + + iter_info_len = OPTS_GET(opts, iter_info_len, 0); + target_btf_id = OPTS_GET(opts, target_btf_id, 0); + + if (iter_info_len && target_btf_id) + return -EINVAL; + + memset(&attr, 0, sizeof(attr)); + attr.link_create.prog_fd = prog_fd; + attr.link_create.target_fd = target_fd; + attr.link_create.attach_type = attach_type; + attr.link_create.flags = OPTS_GET(opts, flags, 0); + + if (iter_info_len) { + attr.link_create.iter_info = + ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); + attr.link_create.iter_info_len = iter_info_len; + } else if (target_btf_id) { + attr.link_create.target_btf_id = target_btf_id; + } + + return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); +} + +int bpf_link_detach(int link_fd) +{ + union bpf_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.link_detach.link_fd = link_fd; + + return sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); +} + +int bpf_link_update(int link_fd, int new_prog_fd, + const struct bpf_link_update_opts *opts) +{ + union bpf_attr attr; + + if (!OPTS_VALID(opts, bpf_link_update_opts)) + return -EINVAL; + + memset(&attr, 0, sizeof(attr)); + attr.link_update.link_fd = link_fd; + attr.link_update.new_prog_fd = new_prog_fd; + attr.link_update.flags = OPTS_GET(opts, flags, 0); + attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + + return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); +} + +int bpf_iter_create(int link_fd) +{ + union bpf_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.iter_create.link_fd = link_fd; + + return sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr)); +} + int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) { @@ -568,6 +724,37 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr) return ret; } +int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) +{ + union bpf_attr attr; + int ret; + + if (!OPTS_VALID(opts, bpf_test_run_opts)) + return -EINVAL; + + memset(&attr, 0, sizeof(attr)); + attr.test.prog_fd = prog_fd; + attr.test.cpu = OPTS_GET(opts, cpu, 0); + attr.test.flags = OPTS_GET(opts, flags, 0); + attr.test.repeat = OPTS_GET(opts, repeat, 0); + attr.test.duration = OPTS_GET(opts, duration, 0); + attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); + attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); + attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); + attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); + attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); + attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); + attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); + attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); + + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + OPTS_SET(opts, data_size_out, attr.test.data_size_out); + OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); + OPTS_SET(opts, duration, attr.test.duration); + OPTS_SET(opts, retval, attr.test.retval); + return ret; +} + static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) { union bpf_attr attr; @@ -598,6 +785,11 @@ int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); } +int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) +{ + return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); +} + int bpf_prog_get_fd_by_id(__u32 id) { union bpf_attr attr; @@ -628,13 +820,23 @@ int bpf_btf_get_fd_by_id(__u32 id) return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); } -int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len) +int bpf_link_get_fd_by_id(__u32 id) +{ + union bpf_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.link_id = id; + + return sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); +} + +int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) { union bpf_attr attr; int err; memset(&attr, 0, sizeof(attr)); - attr.info.bpf_fd = prog_fd; + attr.info.bpf_fd = bpf_fd; attr.info.info_len = *info_len; attr.info.info = ptr_to_u64(info); @@ -656,7 +858,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); } -int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, +int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log) { union bpf_attr attr = {}; @@ -703,3 +905,29 @@ int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, return err; } + +int bpf_enable_stats(enum bpf_stats_type type) +{ + union bpf_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.enable_stats.type = type; + + return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr)); +} + +int bpf_prog_bind_map(int prog_fd, int map_fd, + const struct bpf_prog_bind_opts *opts) +{ + union bpf_attr attr; + + if (!OPTS_VALID(opts, bpf_prog_bind_opts)) + return -EINVAL; + + memset(&attr, 0, sizeof(attr)); + attr.prog_bind_map.prog_fd = prog_fd; + attr.prog_bind_map.map_fd = map_fd; + attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); + + return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); +} diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 0db01334740f..875dde20d56e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -28,14 +28,12 @@ #include <stddef.h> #include <stdint.h> +#include "libbpf_common.h" + #ifdef __cplusplus extern "C" { #endif -#ifndef LIBBPF_API -#define LIBBPF_API __attribute__((visibility("default"))) -#endif - struct bpf_create_map_attr { const char *name; enum bpf_map_type map_type; @@ -48,7 +46,10 @@ struct bpf_create_map_attr { __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 map_ifindex; - __u32 inner_map_fd; + union { + __u32 inner_map_fd; + __u32 btf_vmlinux_value_type_id; + }; }; LIBBPF_API int @@ -77,8 +78,14 @@ struct bpf_load_program_attr { const struct bpf_insn *insns; size_t insns_cnt; const char *license; - __u32 kern_version; - __u32 prog_ifindex; + union { + __u32 kern_version; + __u32 attach_prog_fd; + }; + union { + __u32 prog_ifindex; + __u32 attach_btf_id; + }; __u32 prog_btf_fd; __u32 func_info_rec_size; const void *func_info; @@ -120,14 +127,75 @@ LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key, LIBBPF_API int bpf_map_delete_elem(int fd, const void *key); LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key); LIBBPF_API int bpf_map_freeze(int fd); + +struct bpf_map_batch_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u64 elem_flags; + __u64 flags; +}; +#define bpf_map_batch_opts__last_field flags + +LIBBPF_API int bpf_map_delete_batch(int fd, void *keys, + __u32 *count, + const struct bpf_map_batch_opts *opts); +LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, + void *keys, void *values, __u32 *count, + const struct bpf_map_batch_opts *opts); +LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, + void *out_batch, void *keys, + void *values, __u32 *count, + const struct bpf_map_batch_opts *opts); +LIBBPF_API int bpf_map_update_batch(int fd, void *keys, void *values, + __u32 *count, + const struct bpf_map_batch_opts *opts); + LIBBPF_API int bpf_obj_pin(int fd, const char *pathname); LIBBPF_API int bpf_obj_get(const char *pathname); + +struct bpf_prog_attach_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + unsigned int flags; + int replace_prog_fd; +}; +#define bpf_prog_attach_opts__last_field replace_prog_fd + LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); +LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts); LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type); +union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */ +struct bpf_link_create_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; + union bpf_iter_link_info *iter_info; + __u32 iter_info_len; + __u32 target_btf_id; +}; +#define bpf_link_create_opts__last_field target_btf_id + +LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, + enum bpf_attach_type attach_type, + const struct bpf_link_create_opts *opts); + +LIBBPF_API int bpf_link_detach(int link_fd); + +struct bpf_link_update_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; /* extra flags */ + __u32 old_prog_fd; /* expected old program FD */ +}; +#define bpf_link_update_opts__last_field old_prog_fd + +LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd, + const struct bpf_link_update_opts *opts); + +LIBBPF_API int bpf_iter_create(int link_fd); + struct bpf_prog_test_run_attr { int prog_fd; int repeat; @@ -157,20 +225,59 @@ LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data, LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id); LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); LIBBPF_API int bpf_map_get_fd_by_id(__u32 id); LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id); -LIBBPF_API int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len); +LIBBPF_API int bpf_link_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len); LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt); LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd); -LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, +LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log); LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); +enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ +LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); + +struct bpf_prog_bind_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; +}; +#define bpf_prog_bind_opts__last_field flags + +LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd, + const struct bpf_prog_bind_opts *opts); + +struct bpf_test_run_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + const void *data_in; /* optional */ + void *data_out; /* optional */ + __u32 data_size_in; + __u32 data_size_out; /* in: max length of data_out + * out: length of data_out + */ + const void *ctx_in; /* optional */ + void *ctx_out; /* optional */ + __u32 ctx_size_in; + __u32 ctx_size_out; /* in: max length of ctx_out + * out: length of cxt_out + */ + __u32 retval; /* out: return code of the BPF program */ + int repeat; + __u32 duration; /* out: average per repetition in ns */ + __u32 flags; + __u32 cpu; +}; +#define bpf_test_run_opts__last_field cpu + +LIBBPF_API int bpf_prog_test_run_opts(int prog_fd, + struct bpf_test_run_opts *opts); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h new file mode 100644 index 000000000000..bbcefb3ff5a5 --- /dev/null +++ b/tools/lib/bpf/bpf_core_read.h @@ -0,0 +1,345 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_CORE_READ_H__ +#define __BPF_CORE_READ_H__ + +/* + * enum bpf_field_info_kind is passed as a second argument into + * __builtin_preserve_field_info() built-in to get a specific aspect of + * a field, captured as a first argument. __builtin_preserve_field_info(field, + * info_kind) returns __u32 integer and produces BTF field relocation, which + * is understood and processed by libbpf during BPF object loading. See + * selftests/bpf for examples. + */ +enum bpf_field_info_kind { + BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, + BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, +}; + +/* second argument to __builtin_btf_type_id() built-in */ +enum bpf_type_id_kind { + BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */ + BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */ +}; + +/* second argument to __builtin_preserve_type_info() built-in */ +enum bpf_type_info_kind { + BPF_TYPE_EXISTS = 0, /* type existence in target kernel */ + BPF_TYPE_SIZE = 1, /* type size in target kernel */ +}; + +/* second argument to __builtin_preserve_enum_value() built-in */ +enum bpf_enum_value_kind { + BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */ + BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */ +}; + +#define __CORE_RELO(src, field, info) \ + __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read_kernel( \ + (void *)dst, \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#else +/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so + * for big-endian we need to adjust destination pointer accordingly, based on + * field byte size + */ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read_kernel( \ + (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#endif + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * All this is done in relocatable manner, so bitfield changes such as + * signedness, bit size, offset changes, this will be handled automatically. + * This version of macro is using bpf_probe_read_kernel() to read underlying + * integer storage. Macro functions as an expression and its return type is + * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error. + */ +#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ + unsigned long long val = 0; \ + \ + __CORE_BITFIELD_PROBE_READ(&val, s, field); \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * This version of macro is using direct memory reads and should be used from + * BPF program types that support such functionality (e.g., typed raw + * tracepoints). + */ +#define BPF_CORE_READ_BITFIELD(s, field) ({ \ + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ + unsigned long long val; \ + \ + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ + case 1: val = *(const unsigned char *)p; \ + case 2: val = *(const unsigned short *)p; \ + case 4: val = *(const unsigned int *)p; \ + case 8: val = *(const unsigned long long *)p; \ + } \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Convenience macro to check that field actually exists in target kernel's. + * Returns: + * 1, if matching field is present in target kernel; + * 0, if no matching field found. + */ +#define bpf_core_field_exists(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_EXISTS) + +/* + * Convenience macro to get the byte size of a field. Works for integers, + * struct/unions, pointers, arrays, and enums. + */ +#define bpf_core_field_size(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE) + +/* + * Convenience macro to get BTF type ID of a specified type, using a local BTF + * information. Return 32-bit unsigned integer with type ID from program's own + * BTF. Always succeeds. + */ +#define bpf_core_type_id_local(type) \ + __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL) + +/* + * Convenience macro to get BTF type ID of a target kernel's type that matches + * specified local type. + * Returns: + * - valid 32-bit unsigned type ID in kernel BTF; + * - 0, if no matching type was found in a target kernel BTF. + */ +#define bpf_core_type_id_kernel(type) \ + __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET) + +/* + * Convenience macro to check that provided named type + * (struct/union/enum/typedef) exists in a target kernel. + * Returns: + * 1, if such type is present in target kernel's BTF; + * 0, if no matching type is found. + */ +#define bpf_core_type_exists(type) \ + __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS) + +/* + * Convenience macro to get the byte size of a provided named type + * (struct/union/enum/typedef) in a target kernel. + * Returns: + * >= 0 size (in bytes), if type is present in target kernel's BTF; + * 0, if no matching type is found. + */ +#define bpf_core_type_size(type) \ + __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE) + +/* + * Convenience macro to check that provided enumerator value is defined in + * a target kernel. + * Returns: + * 1, if specified enum type and its enumerator value are present in target + * kernel's BTF; + * 0, if no matching enum and/or enum value within that enum is found. + */ +#define bpf_core_enum_value_exists(enum_type, enum_value) \ + __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS) + +/* + * Convenience macro to get the integer value of an enumerator value in + * a target kernel. + * Returns: + * 64-bit value, if specified enum type and its enumerator value are + * present in target kernel's BTF; + * 0, if no matching enum and/or enum value within that enum is found. + */ +#define bpf_core_enum_value(enum_type, enum_value) \ + __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE) + +/* + * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures + * offset relocation for source address using __builtin_preserve_access_index() + * built-in, provided by Clang. + * + * __builtin_preserve_access_index() takes as an argument an expression of + * taking an address of a field within struct/union. It makes compiler emit + * a relocation, which records BTF type ID describing root struct/union and an + * accessor string which describes exact embedded field that was used to take + * an address. See detailed description of this relocation format and + * semantics in comments to struct bpf_field_reloc in libbpf_internal.h. + * + * This relocation allows libbpf to adjust BPF instruction to use correct + * actual field offset, based on target kernel BTF type that matches original + * (local) BTF, used to record relocation. + */ +#define bpf_core_read(dst, sz, src) \ + bpf_probe_read_kernel(dst, sz, \ + (const void *)__builtin_preserve_access_index(src)) + +/* + * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str() + * additionally emitting BPF CO-RE field relocation for specified source + * argument. + */ +#define bpf_core_read_str(dst, sz, src) \ + bpf_probe_read_kernel_str(dst, sz, \ + (const void *)__builtin_preserve_access_index(src)) + +#define ___concat(a, b) a ## b +#define ___apply(fn, n) ___concat(fn, n) +#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N + +/* + * return number of provided arguments; used for switch-based variadic macro + * definitions (see ___last, ___arrow, etc below) + */ +#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +/* + * return 0 if no arguments are passed, N - otherwise; used for + * recursively-defined macros to specify termination (0) case, and generic + * (N) case (e.g., ___read_ptrs, ___core_read) + */ +#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___last1(x) x +#define ___last2(a, x) x +#define ___last3(a, b, x) x +#define ___last4(a, b, c, x) x +#define ___last5(a, b, c, d, x) x +#define ___last6(a, b, c, d, e, x) x +#define ___last7(a, b, c, d, e, f, x) x +#define ___last8(a, b, c, d, e, f, g, x) x +#define ___last9(a, b, c, d, e, f, g, h, x) x +#define ___last10(a, b, c, d, e, f, g, h, i, x) x +#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___nolast2(a, _) a +#define ___nolast3(a, b, _) a, b +#define ___nolast4(a, b, c, _) a, b, c +#define ___nolast5(a, b, c, d, _) a, b, c, d +#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e +#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f +#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g +#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h +#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i +#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___arrow1(a) a +#define ___arrow2(a, b) a->b +#define ___arrow3(a, b, c) a->b->c +#define ___arrow4(a, b, c, d) a->b->c->d +#define ___arrow5(a, b, c, d, e) a->b->c->d->e +#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f +#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g +#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h +#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i +#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j +#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___type(...) typeof(___arrow(__VA_ARGS__)) + +#define ___read(read_fn, dst, src_type, src, accessor) \ + read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) + +/* "recursively" read a sequence of inner pointers using local __t var */ +#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a); +#define ___rd_last(...) \ + ___read(bpf_core_read, &__t, \ + ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__)); +#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__) +#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__) +#define ___read_ptrs(src, ...) \ + ___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__) + +#define ___core_read0(fn, dst, src, a) \ + ___read(fn, dst, ___type(src), src, a); +#define ___core_readN(fn, dst, src, ...) \ + ___read_ptrs(src, ___nolast(__VA_ARGS__)) \ + ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \ + ___last(__VA_ARGS__)); +#define ___core_read(fn, dst, src, a, ...) \ + ___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst, \ + src, a, ##__VA_ARGS__) + +/* + * BPF_CORE_READ_INTO() is a more performance-conscious variant of + * BPF_CORE_READ(), in which final field is read into user-provided storage. + * See BPF_CORE_READ() below for more details on general usage. + */ +#define BPF_CORE_READ_INTO(dst, src, a, ...) \ + ({ \ + ___core_read(bpf_core_read, dst, (src), a, ##__VA_ARGS__) \ + }) + +/* + * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as + * BPF_CORE_READ() for intermediate pointers, but then executes (and returns + * corresponding error code) bpf_core_read_str() for final string read. + */ +#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \ + ({ \ + ___core_read(bpf_core_read_str, dst, (src), a, ##__VA_ARGS__)\ + }) + +/* + * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially + * when there are few pointer chasing steps. + * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like: + * int x = s->a.b.c->d.e->f->g; + * can be succinctly achieved using BPF_CORE_READ as: + * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g); + * + * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF + * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically + * equivalent to: + * 1. const void *__t = s->a.b.c; + * 2. __t = __t->d.e; + * 3. __t = __t->f; + * 4. return __t->g; + * + * Equivalence is logical, because there is a heavy type casting/preservation + * involved, as well as all the reads are happening through + * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to + * emit CO-RE relocations. + * + * N.B. Only up to 9 "field accessors" are supported, which should be more + * than enough for any practical purpose. + */ +#define BPF_CORE_READ(src, a, ...) \ + ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ + }) + +#endif + diff --git a/tools/lib/bpf/bpf_endian.h b/tools/lib/bpf/bpf_endian.h new file mode 100644 index 000000000000..ec9db4feca9f --- /dev/null +++ b/tools/lib/bpf/bpf_endian.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_ENDIAN__ +#define __BPF_ENDIAN__ + +/* + * Isolate byte #n and put it into byte #m, for __u##b type. + * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: + * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx + * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 + * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn + * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 + */ +#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) + +#define ___bpf_swab16(x) ((__u16)( \ + ___bpf_mvb(x, 16, 0, 1) | \ + ___bpf_mvb(x, 16, 1, 0))) + +#define ___bpf_swab32(x) ((__u32)( \ + ___bpf_mvb(x, 32, 0, 3) | \ + ___bpf_mvb(x, 32, 1, 2) | \ + ___bpf_mvb(x, 32, 2, 1) | \ + ___bpf_mvb(x, 32, 3, 0))) + +#define ___bpf_swab64(x) ((__u64)( \ + ___bpf_mvb(x, 64, 0, 7) | \ + ___bpf_mvb(x, 64, 1, 6) | \ + ___bpf_mvb(x, 64, 2, 5) | \ + ___bpf_mvb(x, 64, 3, 4) | \ + ___bpf_mvb(x, 64, 4, 3) | \ + ___bpf_mvb(x, 64, 5, 2) | \ + ___bpf_mvb(x, 64, 6, 1) | \ + ___bpf_mvb(x, 64, 7, 0))) + +/* LLVM's BPF target selects the endianness of the CPU + * it compiles on, or the user specifies (bpfel/bpfeb), + * respectively. The used __BYTE_ORDER__ is defined by + * the compiler, we cannot rely on __BYTE_ORDER from + * libc headers, since it doesn't reflect the actual + * requested byte order. + * + * Note, LLVM's BPF target has different __builtin_bswapX() + * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE + * in bpfel and bpfeb case, which means below, that we map + * to cpu_to_be16(). We could use it unconditionally in BPF + * case, but better not rely on it, so that this header here + * can be used from application and BPF program side, which + * use different targets. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +# define __bpf_constant_ntohs(x) ___bpf_swab16(x) +# define __bpf_constant_htons(x) ___bpf_swab16(x) +# define __bpf_ntohl(x) __builtin_bswap32(x) +# define __bpf_htonl(x) __builtin_bswap32(x) +# define __bpf_constant_ntohl(x) ___bpf_swab32(x) +# define __bpf_constant_htonl(x) ___bpf_swab32(x) +# define __bpf_be64_to_cpu(x) __builtin_bswap64(x) +# define __bpf_cpu_to_be64(x) __builtin_bswap64(x) +# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) +# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +# define __bpf_constant_ntohs(x) (x) +# define __bpf_constant_htons(x) (x) +# define __bpf_ntohl(x) (x) +# define __bpf_htonl(x) (x) +# define __bpf_constant_ntohl(x) (x) +# define __bpf_constant_htonl(x) (x) +# define __bpf_be64_to_cpu(x) (x) +# define __bpf_cpu_to_be64(x) (x) +# define __bpf_constant_be64_to_cpu(x) (x) +# define __bpf_constant_cpu_to_be64(x) (x) +#else +# error "Fix your compiler's __BYTE_ORDER__?!" +#endif + +#define bpf_htons(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htons(x) : __bpf_htons(x)) +#define bpf_ntohs(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohs(x) : __bpf_ntohs(x)) +#define bpf_htonl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htonl(x) : __bpf_htonl(x)) +#define bpf_ntohl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohl(x) : __bpf_ntohl(x)) +#define bpf_cpu_to_be64(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) +#define bpf_be64_to_cpu(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) + +#endif /* __BPF_ENDIAN__ */ diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h new file mode 100644 index 000000000000..72b251110c4d --- /dev/null +++ b/tools/lib/bpf/bpf_helpers.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_HELPERS__ +#define __BPF_HELPERS__ + +/* + * Note that bpf programs need to include either + * vmlinux.h (auto-generated from BTF) or linux/types.h + * in advance since bpf_helper_defs.h uses such types + * as __u64. + */ +#include "bpf_helper_defs.h" + +#define __uint(name, val) int (*name)[val] +#define __type(name, val) typeof(val) *name +#define __array(name, val) typeof(val) *name[] + +/* Helper macro to print out debug messages */ +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +/* + * Helper macro to place programs, maps, license in + * different sections in elf_bpf file. Section names + * are interpreted by elf_bpf loader + */ +#define SEC(NAME) __attribute__((section(NAME), used)) + +#ifndef __always_inline +#define __always_inline __attribute__((always_inline)) +#endif +#ifndef __noinline +#define __noinline __attribute__((noinline)) +#endif +#ifndef __weak +#define __weak __attribute__((weak)) +#endif + +/* + * Helper macro to manipulate data structures + */ +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) +#endif +#ifndef container_of +#define container_of(ptr, type, member) \ + ({ \ + void *__mptr = (void *)(ptr); \ + ((type *)(__mptr - offsetof(type, member))); \ + }) +#endif + +/* + * Helper macro to throw a compilation error if __bpf_unreachable() gets + * built into the resulting code. This works given BPF back end does not + * implement __builtin_trap(). This is useful to assert that certain paths + * of the program code are never used and hence eliminated by the compiler. + * + * For example, consider a switch statement that covers known cases used by + * the program. __bpf_unreachable() can then reside in the default case. If + * the program gets extended such that a case is not covered in the switch + * statement, then it will throw a build error due to the default case not + * being compiled out. + */ +#ifndef __bpf_unreachable +# define __bpf_unreachable() __builtin_trap() +#endif + +/* + * Helper function to perform a tail call with a constant/immediate map slot. + */ +#if __clang_major__ >= 8 && defined(__bpf__) +static __always_inline void +bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) +{ + if (!__builtin_constant_p(slot)) + __bpf_unreachable(); + + /* + * Provide a hard guarantee that LLVM won't optimize setting r2 (map + * pointer) and r3 (constant map index) from _different paths_ ending + * up at the _same_ call insn as otherwise we won't be able to use the + * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel + * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key + * tracking for prog array pokes") for details on verifier tracking. + * + * Note on clobber list: we need to stay in-line with BPF calling + * convention, so even if we don't end up using r0, r4, r5, we need + * to mark them as clobber so that LLVM doesn't end up using them + * before / after the call. + */ + asm volatile("r1 = %[ctx]\n\t" + "r2 = %[map]\n\t" + "r3 = %[slot]\n\t" + "call 12" + :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) + : "r0", "r1", "r2", "r3", "r4", "r5"); +} +#endif + +/* + * Helper structure used by eBPF C program + * to describe BPF map attributes to libbpf loader + */ +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; + unsigned int map_flags; +}; + +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + +enum libbpf_tristate { + TRI_NO = 0, + TRI_YES = 1, + TRI_MODULE = 2, +}; + +#define __kconfig __attribute__((section(".kconfig"))) +#define __ksym __attribute__((section(".ksyms"))) + +#endif diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h new file mode 100644 index 000000000000..f9ef37707888 --- /dev/null +++ b/tools/lib/bpf/bpf_tracing.h @@ -0,0 +1,432 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_TRACING_H__ +#define __BPF_TRACING_H__ + +/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ +#if defined(__TARGET_ARCH_x86) + #define bpf_target_x86 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_s390) + #define bpf_target_s390 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm) + #define bpf_target_arm + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm64) + #define bpf_target_arm64 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_mips) + #define bpf_target_mips + #define bpf_target_defined +#elif defined(__TARGET_ARCH_powerpc) + #define bpf_target_powerpc + #define bpf_target_defined +#elif defined(__TARGET_ARCH_sparc) + #define bpf_target_sparc + #define bpf_target_defined +#else + #undef bpf_target_defined +#endif + +/* Fall back to what the compiler says */ +#ifndef bpf_target_defined +#if defined(__x86_64__) + #define bpf_target_x86 +#elif defined(__s390__) + #define bpf_target_s390 +#elif defined(__arm__) + #define bpf_target_arm +#elif defined(__aarch64__) + #define bpf_target_arm64 +#elif defined(__mips__) + #define bpf_target_mips +#elif defined(__powerpc__) + #define bpf_target_powerpc +#elif defined(__sparc__) + #define bpf_target_sparc +#endif +#endif + +#if defined(bpf_target_x86) + +#if defined(__KERNEL__) || defined(__VMLINUX_H__) + +#define PT_REGS_PARM1(x) ((x)->di) +#define PT_REGS_PARM2(x) ((x)->si) +#define PT_REGS_PARM3(x) ((x)->dx) +#define PT_REGS_PARM4(x) ((x)->cx) +#define PT_REGS_PARM5(x) ((x)->r8) +#define PT_REGS_RET(x) ((x)->sp) +#define PT_REGS_FP(x) ((x)->bp) +#define PT_REGS_RC(x) ((x)->ax) +#define PT_REGS_SP(x) ((x)->sp) +#define PT_REGS_IP(x) ((x)->ip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip) + +#else + +#ifdef __i386__ +/* i386 kernel is built with -mregparm=3 */ +#define PT_REGS_PARM1(x) ((x)->eax) +#define PT_REGS_PARM2(x) ((x)->edx) +#define PT_REGS_PARM3(x) ((x)->ecx) +#define PT_REGS_PARM4(x) 0 +#define PT_REGS_PARM5(x) 0 +#define PT_REGS_RET(x) ((x)->esp) +#define PT_REGS_FP(x) ((x)->ebp) +#define PT_REGS_RC(x) ((x)->eax) +#define PT_REGS_SP(x) ((x)->esp) +#define PT_REGS_IP(x) ((x)->eip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx) +#define PT_REGS_PARM4_CORE(x) 0 +#define PT_REGS_PARM5_CORE(x) 0 +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip) + +#else + +#define PT_REGS_PARM1(x) ((x)->rdi) +#define PT_REGS_PARM2(x) ((x)->rsi) +#define PT_REGS_PARM3(x) ((x)->rdx) +#define PT_REGS_PARM4(x) ((x)->rcx) +#define PT_REGS_PARM5(x) ((x)->r8) +#define PT_REGS_RET(x) ((x)->rsp) +#define PT_REGS_FP(x) ((x)->rbp) +#define PT_REGS_RC(x) ((x)->rax) +#define PT_REGS_SP(x) ((x)->rsp) +#define PT_REGS_IP(x) ((x)->rip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip) + +#endif +#endif + +#elif defined(bpf_target_s390) + +/* s390 provides user_pt_regs instead of struct pt_regs to userspace */ +struct pt_regs; +#define PT_REGS_S390 const volatile user_pt_regs +#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2]) +#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3]) +#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4]) +#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5]) +#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6]) +#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14]) +/* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11]) +#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2]) +#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15]) +#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[14]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), psw.addr) + +#elif defined(bpf_target_arm) + +#define PT_REGS_PARM1(x) ((x)->uregs[0]) +#define PT_REGS_PARM2(x) ((x)->uregs[1]) +#define PT_REGS_PARM3(x) ((x)->uregs[2]) +#define PT_REGS_PARM4(x) ((x)->uregs[3]) +#define PT_REGS_PARM5(x) ((x)->uregs[4]) +#define PT_REGS_RET(x) ((x)->uregs[14]) +#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_RC(x) ((x)->uregs[0]) +#define PT_REGS_SP(x) ((x)->uregs[13]) +#define PT_REGS_IP(x) ((x)->uregs[12]) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12]) + +#elif defined(bpf_target_arm64) + +/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ +struct pt_regs; +#define PT_REGS_ARM64 const volatile struct user_pt_regs +#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0]) +#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1]) +#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2]) +#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3]) +#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4]) +#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30]) +/* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29]) +#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0]) +#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp) +#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc) + +#elif defined(bpf_target_mips) + +#define PT_REGS_PARM1(x) ((x)->regs[4]) +#define PT_REGS_PARM2(x) ((x)->regs[5]) +#define PT_REGS_PARM3(x) ((x)->regs[6]) +#define PT_REGS_PARM4(x) ((x)->regs[7]) +#define PT_REGS_PARM5(x) ((x)->regs[8]) +#define PT_REGS_RET(x) ((x)->regs[31]) +#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ +#define PT_REGS_RC(x) ((x)->regs[2]) +#define PT_REGS_SP(x) ((x)->regs[29]) +#define PT_REGS_IP(x) ((x)->cp0_epc) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[2]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc) + +#elif defined(bpf_target_powerpc) + +#define PT_REGS_PARM1(x) ((x)->gpr[3]) +#define PT_REGS_PARM2(x) ((x)->gpr[4]) +#define PT_REGS_PARM3(x) ((x)->gpr[5]) +#define PT_REGS_PARM4(x) ((x)->gpr[6]) +#define PT_REGS_PARM5(x) ((x)->gpr[7]) +#define PT_REGS_RC(x) ((x)->gpr[3]) +#define PT_REGS_SP(x) ((x)->sp) +#define PT_REGS_IP(x) ((x)->nip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip) + +#elif defined(bpf_target_sparc) + +#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) +#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) +#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2]) +#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3]) +#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4]) +#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) +#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) +#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP]) + +/* Should this also be a bpf_target check for the sparc case? */ +#if defined(__arch64__) +#define PT_REGS_IP(x) ((x)->tpc) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc) +#else +#define PT_REGS_IP(x) ((x)->pc) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc) +#endif + +#endif + +#if defined(bpf_target_powerpc) +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP +#elif defined(bpf_target_sparc) +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP +#else +#define BPF_KPROBE_READ_RET_IP(ip, ctx) \ + ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) +#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \ + ({ bpf_probe_read_kernel(&(ip), sizeof(ip), \ + (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) +#endif + +#define ___bpf_concat(a, b) a ## b +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N +#define ___bpf_narg(...) \ + ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define ___bpf_empty(...) \ + ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___bpf_ctx_cast0() ctx +#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] +#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] +#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] +#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] +#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] +#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] +#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] +#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] +#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] +#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] +#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] +#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] +#define ___bpf_ctx_cast(args...) \ + ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) + +/* + * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and + * similar kinds of BPF programs, that accept input arguments as a single + * pointer to untyped u64 array, where each u64 can actually be a typed + * pointer or integer of different size. Instead of requring user to write + * manual casts and work with array elements by index, BPF_PROG macro + * allows user to declare a list of named and typed input arguments in the + * same syntax as for normal C function. All the casting is hidden and + * performed transparently, while user code can just assume working with + * function arguments of specified type and name. + * + * Original raw context argument is preserved as well as 'ctx' argument. + * This is useful when using BPF helpers that expect original context + * as one of the parameters (e.g., for bpf_perf_event_output()). + */ +#define BPF_PROG(name, args...) \ +name(unsigned long long *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args); \ +typeof(name(0)) name(unsigned long long *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_ctx_cast(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args) + +struct pt_regs; + +#define ___bpf_kprobe_args0() ctx +#define ___bpf_kprobe_args1(x) \ + ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) +#define ___bpf_kprobe_args2(x, args...) \ + ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) +#define ___bpf_kprobe_args3(x, args...) \ + ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) +#define ___bpf_kprobe_args4(x, args...) \ + ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) +#define ___bpf_kprobe_args5(x, args...) \ + ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) +#define ___bpf_kprobe_args(args...) \ + ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for + * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific + * low-level way of getting kprobe input arguments from struct pt_regs, and + * provides a familiar typed and named function arguments syntax and + * semantics of accessing kprobe input paremeters. + * + * Original struct pt_regs* context is preserved as 'ctx' argument. This might + * be necessary when using BPF helpers like bpf_perf_event_output(). + */ +#define BPF_KPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args) + +#define ___bpf_kretprobe_args0() ctx +#define ___bpf_kretprobe_args1(x) \ + ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx) +#define ___bpf_kretprobe_args(args...) \ + ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional + * return value (in addition to `struct pt_regs *ctx`), but no input + * arguments, because they will be clobbered by the time probed function + * returns. + */ +#define BPF_KRETPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kretprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) + +/* + * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values + * in a structure. + */ +#define BPF_SEQ_PRINTF(seq, fmt, args...) \ + ({ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[] = { args }; \ + _Pragma("GCC diagnostic pop") \ + int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ + ___param, sizeof(___param)); \ + ___ret; \ + }) + +#endif diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index d606a358480d..231b07203e3d 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2018 Facebook */ +#include <byteswap.h> #include <endian.h> #include <stdio.h> #include <stdlib.h> @@ -8,6 +9,10 @@ #include <fcntl.h> #include <unistd.h> #include <errno.h> +#include <sys/utsname.h> +#include <sys/param.h> +#include <sys/stat.h> +#include <linux/kernel.h> #include <linux/err.h> #include <linux/btf.h> #include <gelf.h> @@ -17,23 +22,79 @@ #include "libbpf_internal.h" #include "hashmap.h" -#define BTF_MAX_NR_TYPES 0x7fffffff -#define BTF_MAX_STR_OFFSET 0x7fffffff +#define BTF_MAX_NR_TYPES 0x7fffffffU +#define BTF_MAX_STR_OFFSET 0x7fffffffU static struct btf_type btf_void; struct btf { - union { - struct btf_header *hdr; - void *data; - }; - struct btf_type **types; - const char *strings; - void *nohdr_data; + /* raw BTF data in native endianness */ + void *raw_data; + /* raw BTF data in non-native endianness */ + void *raw_data_swapped; + __u32 raw_size; + /* whether target endianness differs from the native one */ + bool swapped_endian; + + /* + * When BTF is loaded from an ELF or raw memory it is stored + * in a contiguous memory block. The hdr, type_data, and, strs_data + * point inside that memory region to their respective parts of BTF + * representation: + * + * +--------------------------------+ + * | Header | Types | Strings | + * +--------------------------------+ + * ^ ^ ^ + * | | | + * hdr | | + * types_data-+ | + * strs_data------------+ + * + * If BTF data is later modified, e.g., due to types added or + * removed, BTF deduplication performed, etc, this contiguous + * representation is broken up into three independently allocated + * memory regions to be able to modify them independently. + * raw_data is nulled out at that point, but can be later allocated + * and cached again if user calls btf__get_raw_data(), at which point + * raw_data will contain a contiguous copy of header, types, and + * strings: + * + * +----------+ +---------+ +-----------+ + * | Header | | Types | | Strings | + * +----------+ +---------+ +-----------+ + * ^ ^ ^ + * | | | + * hdr | | + * types_data----+ | + * strs_data------------------+ + * + * +----------+---------+-----------+ + * | Header | Types | Strings | + * raw_data----->+----------+---------+-----------+ + */ + struct btf_header *hdr; + + void *types_data; + size_t types_data_cap; /* used size stored in hdr->type_len */ + + /* type ID to `struct btf_type *` lookup index */ + __u32 *type_offs; + size_t type_offs_cap; __u32 nr_types; - __u32 types_size; - __u32 data_size; + + void *strs_data; + size_t strs_data_cap; /* used size stored in hdr->str_len */ + + /* lookup index for each unique string in strings section */ + struct hashmap *strs_hash; + /* whether strings are already deduplicated */ + bool strs_deduped; + /* BTF object FD, if loaded into kernel */ int fd; + + /* Pointer size (in bytes) for a target architecture of this BTF */ + int ptr_sz; }; static inline __u64 ptr_to_u64(const void *ptr) @@ -41,60 +102,114 @@ static inline __u64 ptr_to_u64(const void *ptr) return (__u64) (unsigned long) ptr; } -static int btf_add_type(struct btf *btf, struct btf_type *t) +/* Ensure given dynamically allocated memory region pointed to by *data* with + * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough + * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements + * are already used. At most *max_cnt* elements can be ever allocated. + * If necessary, memory is reallocated and all existing data is copied over, + * new pointer to the memory region is stored at *data, new memory region + * capacity (in number of elements) is stored in *cap. + * On success, memory pointer to the beginning of unused memory is returned. + * On error, NULL is returned. + */ +void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, + size_t cur_cnt, size_t max_cnt, size_t add_cnt) { - if (btf->types_size - btf->nr_types < 2) { - struct btf_type **new_types; - __u32 expand_by, new_size; + size_t new_cnt; + void *new_data; - if (btf->types_size == BTF_MAX_NR_TYPES) - return -E2BIG; + if (cur_cnt + add_cnt <= *cap_cnt) + return *data + cur_cnt * elem_sz; - expand_by = max(btf->types_size >> 2, 16); - new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by); + /* requested more than the set limit */ + if (cur_cnt + add_cnt > max_cnt) + return NULL; - new_types = realloc(btf->types, sizeof(*new_types) * new_size); - if (!new_types) - return -ENOMEM; + new_cnt = *cap_cnt; + new_cnt += new_cnt / 4; /* expand by 25% */ + if (new_cnt < 16) /* but at least 16 elements */ + new_cnt = 16; + if (new_cnt > max_cnt) /* but not exceeding a set limit */ + new_cnt = max_cnt; + if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */ + new_cnt = cur_cnt + add_cnt; - if (btf->nr_types == 0) - new_types[0] = &btf_void; + new_data = libbpf_reallocarray(*data, new_cnt, elem_sz); + if (!new_data) + return NULL; - btf->types = new_types; - btf->types_size = new_size; - } + /* zero out newly allocated portion of memory */ + memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz); - btf->types[++(btf->nr_types)] = t; + *data = new_data; + *cap_cnt = new_cnt; + return new_data + cur_cnt * elem_sz; +} + +/* Ensure given dynamically allocated memory region has enough allocated space + * to accommodate *need_cnt* elements of size *elem_sz* bytes each + */ +int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt) +{ + void *p; + + if (need_cnt <= *cap_cnt) + return 0; + + p = btf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt); + if (!p) + return -ENOMEM; return 0; } +static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) +{ + __u32 *p; + + p = btf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), + btf->nr_types + 1, BTF_MAX_NR_TYPES, 1); + if (!p) + return -ENOMEM; + + *p = type_off; + return 0; +} + +static void btf_bswap_hdr(struct btf_header *h) +{ + h->magic = bswap_16(h->magic); + h->hdr_len = bswap_32(h->hdr_len); + h->type_off = bswap_32(h->type_off); + h->type_len = bswap_32(h->type_len); + h->str_off = bswap_32(h->str_off); + h->str_len = bswap_32(h->str_len); +} + static int btf_parse_hdr(struct btf *btf) { - const struct btf_header *hdr = btf->hdr; + struct btf_header *hdr = btf->hdr; __u32 meta_left; - if (btf->data_size < sizeof(struct btf_header)) { + if (btf->raw_size < sizeof(struct btf_header)) { pr_debug("BTF header not found\n"); return -EINVAL; } - if (hdr->magic != BTF_MAGIC) { + if (hdr->magic == bswap_16(BTF_MAGIC)) { + btf->swapped_endian = true; + if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) { + pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n", + bswap_32(hdr->hdr_len)); + return -ENOTSUP; + } + btf_bswap_hdr(hdr); + } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF magic:%x\n", hdr->magic); return -EINVAL; } - if (hdr->version != BTF_VERSION) { - pr_debug("Unsupported BTF version:%u\n", hdr->version); - return -ENOTSUP; - } - - if (hdr->flags) { - pr_debug("Unsupported BTF flags:%x\n", hdr->flags); - return -ENOTSUP; - } - - meta_left = btf->data_size - sizeof(*hdr); + meta_left = btf->raw_size - sizeof(*hdr); if (!meta_left) { pr_debug("BTF has no data\n"); return -EINVAL; @@ -120,15 +235,13 @@ static int btf_parse_hdr(struct btf *btf) return -EINVAL; } - btf->nohdr_data = btf->hdr + 1; - return 0; } static int btf_parse_str_sec(struct btf *btf) { const struct btf_header *hdr = btf->hdr; - const char *start = btf->nohdr_data + hdr->str_off; + const char *start = btf->strs_data; const char *end = start + btf->hdr->str_len; if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || @@ -137,14 +250,12 @@ static int btf_parse_str_sec(struct btf *btf) return -EINVAL; } - btf->strings = start; - return 0; } -static int btf_type_size(struct btf_type *t) +static int btf_type_size(const struct btf_type *t) { - int base_size = sizeof(struct btf_type); + const int base_size = sizeof(struct btf_type); __u16 vlen = btf_vlen(t); switch (btf_kind(t)) { @@ -177,25 +288,120 @@ static int btf_type_size(struct btf_type *t) } } +static void btf_bswap_type_base(struct btf_type *t) +{ + t->name_off = bswap_32(t->name_off); + t->info = bswap_32(t->info); + t->type = bswap_32(t->type); +} + +static int btf_bswap_type_rest(struct btf_type *t) +{ + struct btf_var_secinfo *v; + struct btf_member *m; + struct btf_array *a; + struct btf_param *p; + struct btf_enum *e; + __u16 vlen = btf_vlen(t); + int i; + + switch (btf_kind(t)) { + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + return 0; + case BTF_KIND_INT: + *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); + return 0; + case BTF_KIND_ENUM: + for (i = 0, e = btf_enum(t); i < vlen; i++, e++) { + e->name_off = bswap_32(e->name_off); + e->val = bswap_32(e->val); + } + return 0; + case BTF_KIND_ARRAY: + a = btf_array(t); + a->type = bswap_32(a->type); + a->index_type = bswap_32(a->index_type); + a->nelems = bswap_32(a->nelems); + return 0; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + for (i = 0, m = btf_members(t); i < vlen; i++, m++) { + m->name_off = bswap_32(m->name_off); + m->type = bswap_32(m->type); + m->offset = bswap_32(m->offset); + } + return 0; + case BTF_KIND_FUNC_PROTO: + for (i = 0, p = btf_params(t); i < vlen; i++, p++) { + p->name_off = bswap_32(p->name_off); + p->type = bswap_32(p->type); + } + return 0; + case BTF_KIND_VAR: + btf_var(t)->linkage = bswap_32(btf_var(t)->linkage); + return 0; + case BTF_KIND_DATASEC: + for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) { + v->type = bswap_32(v->type); + v->offset = bswap_32(v->offset); + v->size = bswap_32(v->size); + } + return 0; + default: + pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); + return -EINVAL; + } +} + static int btf_parse_type_sec(struct btf *btf) { struct btf_header *hdr = btf->hdr; - void *nohdr_data = btf->nohdr_data; - void *next_type = nohdr_data + hdr->type_off; - void *end_type = nohdr_data + hdr->str_off; + void *next_type = btf->types_data; + void *end_type = next_type + hdr->type_len; + int err, i = 0, type_size; - while (next_type < end_type) { - struct btf_type *t = next_type; - int type_size; - int err; + /* VOID (type_id == 0) is specially handled by btf__get_type_by_id(), + * so ensure we can never properly use its offset from index by + * setting it to a large value + */ + err = btf_add_type_idx_entry(btf, UINT_MAX); + if (err) + return err; - type_size = btf_type_size(t); + while (next_type + sizeof(struct btf_type) <= end_type) { + i++; + + if (btf->swapped_endian) + btf_bswap_type_base(next_type); + + type_size = btf_type_size(next_type); if (type_size < 0) return type_size; - next_type += type_size; - err = btf_add_type(btf, t); + if (next_type + type_size > end_type) { + pr_warn("BTF type [%d] is malformed\n", i); + return -EINVAL; + } + + if (btf->swapped_endian && btf_bswap_type_rest(next_type)) + return -EINVAL; + + err = btf_add_type_idx_entry(btf, next_type - btf->types_data); if (err) return err; + + next_type += type_size; + btf->nr_types++; + } + + if (next_type != end_type) { + pr_warn("BTF types data is malformed\n"); + return -EINVAL; } return 0; @@ -206,12 +412,116 @@ __u32 btf__get_nr_types(const struct btf *btf) return btf->nr_types; } +/* internal helper returning non-const pointer to a type */ +static struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id) +{ + if (type_id == 0) + return &btf_void; + + return btf->types_data + btf->type_offs[type_id]; +} + const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) { if (type_id > btf->nr_types) return NULL; + return btf_type_by_id((struct btf *)btf, type_id); +} - return btf->types[type_id]; +static int determine_ptr_size(const struct btf *btf) +{ + const struct btf_type *t; + const char *name; + int i; + + for (i = 1; i <= btf->nr_types; i++) { + t = btf__type_by_id(btf, i); + if (!btf_is_int(t)) + continue; + + name = btf__name_by_offset(btf, t->name_off); + if (!name) + continue; + + if (strcmp(name, "long int") == 0 || + strcmp(name, "long unsigned int") == 0) { + if (t->size != 4 && t->size != 8) + continue; + return t->size; + } + } + + return -1; +} + +static size_t btf_ptr_sz(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; +} + +/* Return pointer size this BTF instance assumes. The size is heuristically + * determined by looking for 'long' or 'unsigned long' integer type and + * recording its size in bytes. If BTF type information doesn't have any such + * type, this function returns 0. In the latter case, native architecture's + * pointer size is assumed, so will be either 4 or 8, depending on + * architecture that libbpf was compiled for. It's possible to override + * guessed value by using btf__set_pointer_size() API. + */ +size_t btf__pointer_size(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + + if (btf->ptr_sz < 0) + /* not enough BTF type info to guess */ + return 0; + + return btf->ptr_sz; +} + +/* Override or set pointer size in bytes. Only values of 4 and 8 are + * supported. + */ +int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) +{ + if (ptr_sz != 4 && ptr_sz != 8) + return -EINVAL; + btf->ptr_sz = ptr_sz; + return 0; +} + +static bool is_host_big_endian(void) +{ +#if __BYTE_ORDER == __LITTLE_ENDIAN + return false; +#elif __BYTE_ORDER == __BIG_ENDIAN + return true; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif +} + +enum btf_endianness btf__endianness(const struct btf *btf) +{ + if (is_host_big_endian()) + return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; + else + return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; +} + +int btf__set_endianness(struct btf *btf, enum btf_endianness endian) +{ + if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) + return -EINVAL; + + btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); + if (!btf->swapped_endian) { + free(btf->raw_data_swapped); + btf->raw_data_swapped = NULL; + } + return 0; } static bool btf_type_is_void(const struct btf_type *t) @@ -246,7 +556,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) size = t->size; goto done; case BTF_KIND_PTR: - size = sizeof(void *); + size = btf_ptr_sz(btf); goto done; case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: @@ -278,6 +588,45 @@ done: return nelems * size; } +int btf__align_of(const struct btf *btf, __u32 id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + __u16 kind = btf_kind(t); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_ENUM: + return min(btf_ptr_sz(btf), (size_t)t->size); + case BTF_KIND_PTR: + return btf_ptr_sz(btf); + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + return btf__align_of(btf, t->type); + case BTF_KIND_ARRAY: + return btf__align_of(btf, btf_array(t)->type); + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m = btf_members(t); + __u16 vlen = btf_vlen(t); + int i, max_align = 1, align; + + for (i = 0; i < vlen; i++, m++) { + align = btf__align_of(btf, m->type); + if (align <= 0) + return align; + max_align = max(max_align, align); + } + + return max_align; + } + default: + pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t)); + return 0; + } +} + int btf__resolve_type(const struct btf *btf, __u32 type_id) { const struct btf_type *t; @@ -306,7 +655,7 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name) return 0; for (i = 1; i <= btf->nr_types; i++) { - const struct btf_type *t = btf->types[i]; + const struct btf_type *t = btf__type_by_id(btf, i); const char *name = btf__name_by_offset(btf, t->name_off); if (name && !strcmp(type_name, name)) @@ -316,20 +665,91 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name) return -ENOENT; } +__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, + __u32 kind) +{ + __u32 i; + + if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) + return 0; + + for (i = 1; i <= btf->nr_types; i++) { + const struct btf_type *t = btf__type_by_id(btf, i); + const char *name; + + if (btf_kind(t) != kind) + continue; + name = btf__name_by_offset(btf, t->name_off); + if (name && !strcmp(type_name, name)) + return i; + } + + return -ENOENT; +} + +static bool btf_is_modifiable(const struct btf *btf) +{ + return (void *)btf->hdr != btf->raw_data; +} + void btf__free(struct btf *btf) { - if (!btf) + if (IS_ERR_OR_NULL(btf)) return; - if (btf->fd != -1) + if (btf->fd >= 0) close(btf->fd); - free(btf->data); - free(btf->types); + if (btf_is_modifiable(btf)) { + /* if BTF was modified after loading, it will have a split + * in-memory representation for header, types, and strings + * sections, so we need to free all of them individually. It + * might still have a cached contiguous raw data present, + * which will be unconditionally freed below. + */ + free(btf->hdr); + free(btf->types_data); + free(btf->strs_data); + } + free(btf->raw_data); + free(btf->raw_data_swapped); + free(btf->type_offs); free(btf); } -struct btf *btf__new(__u8 *data, __u32 size) +struct btf *btf__new_empty(void) +{ + struct btf *btf; + + btf = calloc(1, sizeof(*btf)); + if (!btf) + return ERR_PTR(-ENOMEM); + + btf->fd = -1; + btf->ptr_sz = sizeof(void *); + btf->swapped_endian = false; + + /* +1 for empty string at offset 0 */ + btf->raw_size = sizeof(struct btf_header) + 1; + btf->raw_data = calloc(1, btf->raw_size); + if (!btf->raw_data) { + free(btf); + return ERR_PTR(-ENOMEM); + } + + btf->hdr = btf->raw_data; + btf->hdr->hdr_len = sizeof(struct btf_header); + btf->hdr->magic = BTF_MAGIC; + btf->hdr->version = BTF_VERSION; + + btf->types_data = btf->raw_data + btf->hdr->hdr_len; + btf->strs_data = btf->raw_data + btf->hdr->hdr_len; + btf->hdr->str_len = 1; /* empty string at offset 0 */ + + return btf; +} + +struct btf *btf__new(const void *data, __u32 size) { struct btf *btf; int err; @@ -338,26 +758,28 @@ struct btf *btf__new(__u8 *data, __u32 size) if (!btf) return ERR_PTR(-ENOMEM); - btf->fd = -1; - - btf->data = malloc(size); - if (!btf->data) { + btf->raw_data = malloc(size); + if (!btf->raw_data) { err = -ENOMEM; goto done; } + memcpy(btf->raw_data, data, size); + btf->raw_size = size; - memcpy(btf->data, data, size); - btf->data_size = size; - + btf->hdr = btf->raw_data; err = btf_parse_hdr(btf); if (err) goto done; + btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off; + btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off; + err = btf_parse_str_sec(btf); + err = err ?: btf_parse_type_sec(btf); if (err) goto done; - err = btf_parse_type_sec(btf); + btf->fd = -1; done: if (err) { @@ -368,17 +790,6 @@ done: return btf; } -static bool btf_check_endianness(const GElf_Ehdr *ehdr) -{ -#if __BYTE_ORDER == __LITTLE_ENDIAN - return ehdr->e_ident[EI_DATA] == ELFDATA2LSB; -#elif __BYTE_ORDER == __BIG_ENDIAN - return ehdr->e_ident[EI_DATA] == ELFDATA2MSB; -#else -# error "Unrecognized __BYTE_ORDER__" -#endif -} - struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) { Elf_Data *btf_data = NULL, *btf_ext_data = NULL; @@ -389,14 +800,14 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) GElf_Ehdr ehdr; if (elf_version(EV_CURRENT) == EV_NONE) { - pr_warning("failed to init libelf for %s\n", path); + pr_warn("failed to init libelf for %s\n", path); return ERR_PTR(-LIBBPF_ERRNO__LIBELF); } fd = open(path, O_RDONLY); if (fd < 0) { err = -errno; - pr_warning("failed to open %s: %s\n", path, strerror(errno)); + pr_warn("failed to open %s: %s\n", path, strerror(errno)); return ERR_PTR(err); } @@ -404,19 +815,15 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) elf = elf_begin(fd, ELF_C_READ, NULL); if (!elf) { - pr_warning("failed to open %s as ELF file\n", path); + pr_warn("failed to open %s as ELF file\n", path); goto done; } if (!gelf_getehdr(elf, &ehdr)) { - pr_warning("failed to get EHDR from %s\n", path); - goto done; - } - if (!btf_check_endianness(&ehdr)) { - pr_warning("non-native ELF endianness is not supported\n"); + pr_warn("failed to get EHDR from %s\n", path); goto done; } if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { - pr_warning("failed to get e_shstrndx from %s\n", path); + pr_warn("failed to get e_shstrndx from %s\n", path); goto done; } @@ -426,29 +833,29 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) idx++; if (gelf_getshdr(scn, &sh) != &sh) { - pr_warning("failed to get section(%d) header from %s\n", - idx, path); + pr_warn("failed to get section(%d) header from %s\n", + idx, path); goto done; } name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); if (!name) { - pr_warning("failed to get section(%d) name from %s\n", - idx, path); + pr_warn("failed to get section(%d) name from %s\n", + idx, path); goto done; } if (strcmp(name, BTF_ELF_SEC) == 0) { btf_data = elf_getdata(scn, 0); if (!btf_data) { - pr_warning("failed to get section(%d, %s) data from %s\n", - idx, name, path); + pr_warn("failed to get section(%d, %s) data from %s\n", + idx, name, path); goto done; } continue; } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { btf_ext_data = elf_getdata(scn, 0); if (!btf_ext_data) { - pr_warning("failed to get section(%d, %s) data from %s\n", - idx, name, path); + pr_warn("failed to get section(%d, %s) data from %s\n", + idx, name, path); goto done; } continue; @@ -465,6 +872,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) if (IS_ERR(btf)) goto done; + switch (gelf_getclass(elf)) { + case ELFCLASS32: + btf__set_pointer_size(btf, 4); + break; + case ELFCLASS64: + btf__set_pointer_size(btf, 8); + break; + default: + pr_warn("failed to get ELF class (bitness) for %s\n", path); + break; + } + if (btf_ext && btf_ext_data) { *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); @@ -494,6 +913,83 @@ done: return btf; } +struct btf *btf__parse_raw(const char *path) +{ + struct btf *btf = NULL; + void *data = NULL; + FILE *f = NULL; + __u16 magic; + int err = 0; + long sz; + + f = fopen(path, "rb"); + if (!f) { + err = -errno; + goto err_out; + } + + /* check BTF magic */ + if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) { + err = -EIO; + goto err_out; + } + if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) { + /* definitely not a raw BTF */ + err = -EPROTO; + goto err_out; + } + + /* get file size */ + if (fseek(f, 0, SEEK_END)) { + err = -errno; + goto err_out; + } + sz = ftell(f); + if (sz < 0) { + err = -errno; + goto err_out; + } + /* rewind to the start */ + if (fseek(f, 0, SEEK_SET)) { + err = -errno; + goto err_out; + } + + /* pre-alloc memory and read all of BTF data */ + data = malloc(sz); + if (!data) { + err = -ENOMEM; + goto err_out; + } + if (fread(data, 1, sz, f) < sz) { + err = -EIO; + goto err_out; + } + + /* finally parse BTF data */ + btf = btf__new(data, sz); + +err_out: + free(data); + if (f) + fclose(f); + return err ? ERR_PTR(err) : btf; +} + +struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) +{ + struct btf *btf; + + if (btf_ext) + *btf_ext = NULL; + + btf = btf__parse_raw(path); + if (!IS_ERR(btf) || PTR_ERR(btf) != -EPROTO) + return btf; + + return btf__parse_elf(path, btf_ext); +} + static int compare_vsi_off(const void *_a, const void *_b) { const struct btf_var_secinfo *a = _a; @@ -517,6 +1013,12 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, return -ENOENT; } + /* .extern datasec size and var offsets were set correctly during + * extern collection step, so just skip straight to sorting variables + */ + if (t->size) + goto sort_vars; + ret = bpf_object__section_size(obj, name, &size); if (ret || !size || (t->size && t->size != size)) { pr_debug("Invalid size for section %s: %u bytes\n", name, size); @@ -553,7 +1055,8 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, vsi->offset = off; } - qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off); +sort_vars: + qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); return 0; } @@ -563,7 +1066,7 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf) __u32 i; for (i = 1; i <= btf->nr_types; i++) { - struct btf_type *t = btf->types[i]; + struct btf_type *t = btf_type_by_id(btf, i); /* Loader needs to fix up some of the things compiler * couldn't get its hands on while emitting BTF. This @@ -580,28 +1083,49 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf) return err; } +static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); + int btf__load(struct btf *btf) { - __u32 log_buf_size = BPF_LOG_BUF_SIZE; + __u32 log_buf_size = 0, raw_size; char *log_buf = NULL; + void *raw_data; int err = 0; if (btf->fd >= 0) return -EEXIST; - log_buf = malloc(log_buf_size); - if (!log_buf) - return -ENOMEM; +retry_load: + if (log_buf_size) { + log_buf = malloc(log_buf_size); + if (!log_buf) + return -ENOMEM; - *log_buf = 0; + *log_buf = 0; + } - btf->fd = bpf_load_btf(btf->data, btf->data_size, - log_buf, log_buf_size, false); + raw_data = btf_get_raw_data(btf, &raw_size, false); + if (!raw_data) { + err = -ENOMEM; + goto done; + } + /* cache native raw data representation */ + btf->raw_size = raw_size; + btf->raw_data = raw_data; + + btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false); if (btf->fd < 0) { + if (!log_buf || errno == ENOSPC) { + log_buf_size = max((__u32)BPF_LOG_BUF_SIZE, + log_buf_size << 1); + free(log_buf); + goto retry_load; + } + err = -errno; - pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno); + pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); if (*log_buf) - pr_warning("%s\n", log_buf); + pr_warn("%s\n", log_buf); goto done; } @@ -615,18 +1139,91 @@ int btf__fd(const struct btf *btf) return btf->fd; } -const void *btf__get_raw_data(const struct btf *btf, __u32 *size) +void btf__set_fd(struct btf *btf, int fd) { - *size = btf->data_size; - return btf->data; + btf->fd = fd; +} + +static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian) +{ + struct btf_header *hdr = btf->hdr; + struct btf_type *t; + void *data, *p; + __u32 data_sz; + int i; + + data = swap_endian ? btf->raw_data_swapped : btf->raw_data; + if (data) { + *size = btf->raw_size; + return data; + } + + data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len; + data = calloc(1, data_sz); + if (!data) + return NULL; + p = data; + + memcpy(p, hdr, hdr->hdr_len); + if (swap_endian) + btf_bswap_hdr(p); + p += hdr->hdr_len; + + memcpy(p, btf->types_data, hdr->type_len); + if (swap_endian) { + for (i = 1; i <= btf->nr_types; i++) { + t = p + btf->type_offs[i]; + /* btf_bswap_type_rest() relies on native t->info, so + * we swap base type info after we swapped all the + * additional information + */ + if (btf_bswap_type_rest(t)) + goto err_out; + btf_bswap_type_base(t); + } + } + p += hdr->type_len; + + memcpy(p, btf->strs_data, hdr->str_len); + p += hdr->str_len; + + *size = data_sz; + return data; +err_out: + free(data); + return NULL; +} + +const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size) +{ + struct btf *btf = (struct btf *)btf_ro; + __u32 data_sz; + void *data; + + data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); + if (!data) + return NULL; + + btf->raw_size = data_sz; + if (btf->swapped_endian) + btf->raw_data_swapped = data; + else + btf->raw_data = data; + *size = data_sz; + return data; +} + +const char *btf__str_by_offset(const struct btf *btf, __u32 offset) +{ + if (offset < btf->hdr->str_len) + return btf->strs_data + offset; + else + return NULL; } const char *btf__name_by_offset(const struct btf *btf, __u32 offset) { - if (offset < btf->hdr->str_len) - return &btf->strings[offset]; - else - return NULL; + return btf__str_by_offset(btf, offset); } int btf__get_from_id(__u32 id, struct btf **btf) @@ -706,8 +1303,8 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) { - pr_warning("map:%s length of '____btf_map_%s' is too long\n", - map_name, map_name); + pr_warn("map:%s length of '____btf_map_%s' is too long\n", + map_name, map_name); return -EINVAL; } @@ -720,14 +1317,14 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, container_type = btf__type_by_id(btf, container_id); if (!container_type) { - pr_warning("map:%s cannot find BTF type for container_id:%u\n", - map_name, container_id); + pr_warn("map:%s cannot find BTF type for container_id:%u\n", + map_name, container_id); return -EINVAL; } if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { - pr_warning("map:%s container_name:%s is an invalid container struct\n", - map_name, container_name); + pr_warn("map:%s container_name:%s is an invalid container struct\n", + map_name, container_name); return -EINVAL; } @@ -736,25 +1333,25 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, key_size = btf__resolve_size(btf, key->type); if (key_size < 0) { - pr_warning("map:%s invalid BTF key_type_size\n", map_name); + pr_warn("map:%s invalid BTF key_type_size\n", map_name); return key_size; } if (expected_key_size != key_size) { - pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", - map_name, (__u32)key_size, expected_key_size); + pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", + map_name, (__u32)key_size, expected_key_size); return -EINVAL; } value_size = btf__resolve_size(btf, value->type); if (value_size < 0) { - pr_warning("map:%s invalid BTF value_type_size\n", map_name); + pr_warn("map:%s invalid BTF value_type_size\n", map_name); return value_size; } if (expected_value_size != value_size) { - pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", - map_name, (__u32)value_size, expected_value_size); + pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", + map_name, (__u32)value_size, expected_value_size); return -EINVAL; } @@ -764,6 +1361,970 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, return 0; } +static size_t strs_hash_fn(const void *key, void *ctx) +{ + struct btf *btf = ctx; + const char *str = btf->strs_data + (long)key; + + return str_hash(str); +} + +static bool strs_hash_equal_fn(const void *key1, const void *key2, void *ctx) +{ + struct btf *btf = ctx; + const char *str1 = btf->strs_data + (long)key1; + const char *str2 = btf->strs_data + (long)key2; + + return strcmp(str1, str2) == 0; +} + +static void btf_invalidate_raw_data(struct btf *btf) +{ + if (btf->raw_data) { + free(btf->raw_data); + btf->raw_data = NULL; + } + if (btf->raw_data_swapped) { + free(btf->raw_data_swapped); + btf->raw_data_swapped = NULL; + } +} + +/* Ensure BTF is ready to be modified (by splitting into a three memory + * regions for header, types, and strings). Also invalidate cached + * raw_data, if any. + */ +static int btf_ensure_modifiable(struct btf *btf) +{ + void *hdr, *types, *strs, *strs_end, *s; + struct hashmap *hash = NULL; + long off; + int err; + + if (btf_is_modifiable(btf)) { + /* any BTF modification invalidates raw_data */ + btf_invalidate_raw_data(btf); + return 0; + } + + /* split raw data into three memory regions */ + hdr = malloc(btf->hdr->hdr_len); + types = malloc(btf->hdr->type_len); + strs = malloc(btf->hdr->str_len); + if (!hdr || !types || !strs) + goto err_out; + + memcpy(hdr, btf->hdr, btf->hdr->hdr_len); + memcpy(types, btf->types_data, btf->hdr->type_len); + memcpy(strs, btf->strs_data, btf->hdr->str_len); + + /* build lookup index for all strings */ + hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, btf); + if (IS_ERR(hash)) { + err = PTR_ERR(hash); + hash = NULL; + goto err_out; + } + + strs_end = strs + btf->hdr->str_len; + for (off = 0, s = strs; s < strs_end; off += strlen(s) + 1, s = strs + off) { + /* hashmap__add() returns EEXIST if string with the same + * content already is in the hash map + */ + err = hashmap__add(hash, (void *)off, (void *)off); + if (err == -EEXIST) + continue; /* duplicate */ + if (err) + goto err_out; + } + + /* only when everything was successful, update internal state */ + btf->hdr = hdr; + btf->types_data = types; + btf->types_data_cap = btf->hdr->type_len; + btf->strs_data = strs; + btf->strs_data_cap = btf->hdr->str_len; + btf->strs_hash = hash; + /* if BTF was created from scratch, all strings are guaranteed to be + * unique and deduplicated + */ + btf->strs_deduped = btf->hdr->str_len <= 1; + + /* invalidate raw_data representation */ + btf_invalidate_raw_data(btf); + + return 0; + +err_out: + hashmap__free(hash); + free(hdr); + free(types); + free(strs); + return -ENOMEM; +} + +static void *btf_add_str_mem(struct btf *btf, size_t add_sz) +{ + return btf_add_mem(&btf->strs_data, &btf->strs_data_cap, 1, + btf->hdr->str_len, BTF_MAX_STR_OFFSET, add_sz); +} + +/* Find an offset in BTF string section that corresponds to a given string *s*. + * Returns: + * - >0 offset into string section, if string is found; + * - -ENOENT, if string is not in the string section; + * - <0, on any other error. + */ +int btf__find_str(struct btf *btf, const char *s) +{ + long old_off, new_off, len; + void *p; + + /* BTF needs to be in a modifiable state to build string lookup index */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + /* see btf__add_str() for why we do this */ + len = strlen(s) + 1; + p = btf_add_str_mem(btf, len); + if (!p) + return -ENOMEM; + + new_off = btf->hdr->str_len; + memcpy(p, s, len); + + if (hashmap__find(btf->strs_hash, (void *)new_off, (void **)&old_off)) + return old_off; + + return -ENOENT; +} + +/* Add a string s to the BTF string section. + * Returns: + * - > 0 offset into string section, on success; + * - < 0, on error. + */ +int btf__add_str(struct btf *btf, const char *s) +{ + long old_off, new_off, len; + void *p; + int err; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + /* Hashmap keys are always offsets within btf->strs_data, so to even + * look up some string from the "outside", we need to first append it + * at the end, so that it can be addressed with an offset. Luckily, + * until btf->hdr->str_len is incremented, that string is just a piece + * of garbage for the rest of BTF code, so no harm, no foul. On the + * other hand, if the string is unique, it's already appended and + * ready to be used, only a simple btf->hdr->str_len increment away. + */ + len = strlen(s) + 1; + p = btf_add_str_mem(btf, len); + if (!p) + return -ENOMEM; + + new_off = btf->hdr->str_len; + memcpy(p, s, len); + + /* Now attempt to add the string, but only if the string with the same + * contents doesn't exist already (HASHMAP_ADD strategy). If such + * string exists, we'll get its offset in old_off (that's old_key). + */ + err = hashmap__insert(btf->strs_hash, (void *)new_off, (void *)new_off, + HASHMAP_ADD, (const void **)&old_off, NULL); + if (err == -EEXIST) + return old_off; /* duplicated string, return existing offset */ + if (err) + return err; + + btf->hdr->str_len += len; /* new unique string, adjust data length */ + return new_off; +} + +static void *btf_add_type_mem(struct btf *btf, size_t add_sz) +{ + return btf_add_mem(&btf->types_data, &btf->types_data_cap, 1, + btf->hdr->type_len, UINT_MAX, add_sz); +} + +static __u32 btf_type_info(int kind, int vlen, int kflag) +{ + return (kflag << 31) | (kind << 24) | vlen; +} + +static void btf_type_inc_vlen(struct btf_type *t) +{ + t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t)); +} + +/* + * Append new BTF_KIND_INT type with: + * - *name* - non-empty, non-NULL type name; + * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes; + * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL. + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding) +{ + struct btf_type *t; + int sz, err, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + /* byte_sz must be power of 2 */ + if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) + return -EINVAL; + if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) + return -EINVAL; + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type) + sizeof(int); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + /* if something goes wrong later, we might end up with an extra string, + * but that shouldn't be a problem, because BTF can't be constructed + * completely anyway and will most probably be just discarded + */ + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_INT, 0, 0); + t->size = byte_sz; + /* set INT info, we don't allow setting legacy bit offset/size */ + *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8); + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* it's completely legal to append BTF types with type IDs pointing forward to + * types that haven't been appended yet, so we only make sure that id looks + * sane, we can't guarantee that ID will always be valid + */ +static int validate_type_id(int id) +{ + if (id < 0 || id > BTF_MAX_NR_TYPES) + return -EINVAL; + return 0; +} + +/* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ +static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) +{ + struct btf_type *t; + int sz, name_off = 0, err; + + if (validate_type_id(ref_type_id)) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + t->name_off = name_off; + t->info = btf_type_info(kind, 0, 0); + t->type = ref_type_id; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new BTF_KIND_PTR type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_ptr(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_ARRAY type with: + * - *index_type_id* - type ID of the type describing array index; + * - *elem_type_id* - type ID of the type describing array element; + * - *nr_elems* - the size of the array; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems) +{ + struct btf_type *t; + struct btf_array *a; + int sz, err; + + if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type) + sizeof(struct btf_array); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + t->name_off = 0; + t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0); + t->size = 0; + + a = btf_array(t); + a->type = elem_type_id; + a->index_type = index_type_id; + a->nelems = nr_elems; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* generic STRUCT/UNION append function */ +static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz) +{ + struct btf_type *t; + int sz, err, name_off = 0; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + /* start out with vlen=0 and no kflag; this will be adjusted when + * adding each member + */ + t->name_off = name_off; + t->info = btf_type_info(kind, 0, 0); + t->size = bytes_sz; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new BTF_KIND_STRUCT type with: + * - *name* - name of the struct, can be NULL or empty for anonymous structs; + * - *byte_sz* - size of the struct, in bytes; + * + * Struct initially has no fields in it. Fields can be added by + * btf__add_field() right after btf__add_struct() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz) +{ + return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz); +} + +/* + * Append new BTF_KIND_UNION type with: + * - *name* - name of the union, can be NULL or empty for anonymous union; + * - *byte_sz* - size of the union, in bytes; + * + * Union initially has no fields in it. Fields can be added by + * btf__add_field() right after btf__add_union() succeeds. All fields + * should have *bit_offset* of 0. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz) +{ + return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz); +} + +/* + * Append new field for the current STRUCT/UNION type with: + * - *name* - name of the field, can be NULL or empty for anonymous field; + * - *type_id* - type ID for the type describing field type; + * - *bit_offset* - bit offset of the start of the field within struct/union; + * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields; + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_field(struct btf *btf, const char *name, int type_id, + __u32 bit_offset, __u32 bit_size) +{ + struct btf_type *t; + struct btf_member *m; + bool is_bitfield; + int sz, name_off = 0; + + /* last type should be union/struct */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_composite(t)) + return -EINVAL; + + if (validate_type_id(type_id)) + return -EINVAL; + /* best-effort bit field offset/size enforcement */ + is_bitfield = bit_size || (bit_offset % 8 != 0); + if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) + return -EINVAL; + + /* only offset 0 is allowed for unions */ + if (btf_is_union(t) && bit_offset) + return -EINVAL; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_member); + m = btf_add_type_mem(btf, sz); + if (!m) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + m->name_off = name_off; + m->type = type_id; + m->offset = bit_offset | (bit_size << 24); + + /* btf_add_type_mem can invalidate t pointer */ + t = btf_type_by_id(btf, btf->nr_types); + /* update parent type's vlen and kflag */ + t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t)); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_ENUM type with: + * - *name* - name of the enum, can be NULL or empty for anonymous enums; + * - *byte_sz* - size of the enum, in bytes. + * + * Enum initially has no enum values in it (and corresponds to enum forward + * declaration). Enumerator values can be added by btf__add_enum_value() + * immediately after btf__add_enum() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz) +{ + struct btf_type *t; + int sz, err, name_off = 0; + + /* byte_sz must be power of 2 */ + if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + /* start out with vlen=0; it will be adjusted when adding enum values */ + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_ENUM, 0, 0); + t->size = byte_sz; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new enum value for the current ENUM type with: + * - *name* - name of the enumerator value, can't be NULL or empty; + * - *value* - integer value corresponding to enum value *name*; + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_enum_value(struct btf *btf, const char *name, __s64 value) +{ + struct btf_type *t; + struct btf_enum *v; + int sz, name_off; + + /* last type should be BTF_KIND_ENUM */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_enum(t)) + return -EINVAL; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + if (value < INT_MIN || value > UINT_MAX) + return -E2BIG; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_enum); + v = btf_add_type_mem(btf, sz); + if (!v) + return -ENOMEM; + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + v->name_off = name_off; + v->val = value; + + /* update parent type's vlen */ + t = btf_type_by_id(btf, btf->nr_types); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_FWD type with: + * - *name*, non-empty/non-NULL name; + * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT, + * BTF_FWD_UNION, or BTF_FWD_ENUM; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) +{ + if (!name || !name[0]) + return -EINVAL; + + switch (fwd_kind) { + case BTF_FWD_STRUCT: + case BTF_FWD_UNION: { + struct btf_type *t; + int id; + + id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0); + if (id <= 0) + return id; + t = btf_type_by_id(btf, id); + t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION); + return id; + } + case BTF_FWD_ENUM: + /* enum forward in BTF currently is just an enum with no enum + * values; we also assume a standard 4-byte size for it + */ + return btf__add_enum(btf, name, sizeof(int)); + default: + return -EINVAL; + } +} + +/* + * Append new BTF_KING_TYPEDEF type with: + * - *name*, non-empty/non-NULL name; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) +{ + if (!name || !name[0]) + return -EINVAL; + + return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); +} + +/* + * Append new BTF_KIND_VOLATILE type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_volatile(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_CONST type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_const(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_RESTRICT type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_restrict(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_FUNC type with: + * - *name*, non-empty/non-NULL name; + * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_func(struct btf *btf, const char *name, + enum btf_func_linkage linkage, int proto_type_id) +{ + int id; + + if (!name || !name[0]) + return -EINVAL; + if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL && + linkage != BTF_FUNC_EXTERN) + return -EINVAL; + + id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); + if (id > 0) { + struct btf_type *t = btf_type_by_id(btf, id); + + t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0); + } + return id; +} + +/* + * Append new BTF_KIND_FUNC_PROTO with: + * - *ret_type_id* - type ID for return result of a function. + * + * Function prototype initially has no arguments, but they can be added by + * btf__add_func_param() one by one, immediately after + * btf__add_func_proto() succeeded. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_func_proto(struct btf *btf, int ret_type_id) +{ + struct btf_type *t; + int sz, err; + + if (validate_type_id(ret_type_id)) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + /* start out with vlen=0; this will be adjusted when adding enum + * values, if necessary + */ + t->name_off = 0; + t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0); + t->type = ret_type_id; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new function parameter for current FUNC_PROTO type with: + * - *name* - parameter name, can be NULL or empty; + * - *type_id* - type ID describing the type of the parameter. + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_func_param(struct btf *btf, const char *name, int type_id) +{ + struct btf_type *t; + struct btf_param *p; + int sz, name_off = 0; + + if (validate_type_id(type_id)) + return -EINVAL; + + /* last type should be BTF_KIND_FUNC_PROTO */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_func_proto(t)) + return -EINVAL; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_param); + p = btf_add_type_mem(btf, sz); + if (!p) + return -ENOMEM; + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + p->name_off = name_off; + p->type = type_id; + + /* update parent type's vlen */ + t = btf_type_by_id(btf, btf->nr_types); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_VAR type with: + * - *name* - non-empty/non-NULL name; + * - *linkage* - variable linkage, one of BTF_VAR_STATIC, + * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN; + * - *type_id* - type ID of the type describing the type of the variable. + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id) +{ + struct btf_type *t; + struct btf_var *v; + int sz, err, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED && + linkage != BTF_VAR_GLOBAL_EXTERN) + return -EINVAL; + if (validate_type_id(type_id)) + return -EINVAL; + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type) + sizeof(struct btf_var); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_VAR, 0, 0); + t->type = type_id; + + v = btf_var(t); + v->linkage = linkage; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new BTF_KIND_DATASEC type with: + * - *name* - non-empty/non-NULL name; + * - *byte_sz* - data section size, in bytes. + * + * Data section is initially empty. Variables info can be added with + * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz) +{ + struct btf_type *t; + int sz, err, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return -EINVAL; + + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return -ENOMEM; + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + /* start with vlen=0, which will be update as var_secinfos are added */ + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0); + t->size = byte_sz; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return err; + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + btf->nr_types++; + return btf->nr_types; +} + +/* + * Append new data section variable information entry for current DATASEC type: + * - *var_type_id* - type ID, describing type of the variable; + * - *offset* - variable offset within data section, in bytes; + * - *byte_sz* - variable size, in bytes. + * + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz) +{ + struct btf_type *t; + struct btf_var_secinfo *v; + int sz; + + /* last type should be BTF_KIND_DATASEC */ + if (btf->nr_types == 0) + return -EINVAL; + t = btf_type_by_id(btf, btf->nr_types); + if (!btf_is_datasec(t)) + return -EINVAL; + + if (validate_type_id(var_type_id)) + return -EINVAL; + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + + sz = sizeof(struct btf_var_secinfo); + v = btf_add_type_mem(btf, sz); + if (!v) + return -ENOMEM; + + v->type = var_type_id; + v->offset = offset; + v->size = byte_sz; + + /* update parent type's vlen */ + t = btf_type_by_id(btf, btf->nr_types); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + struct btf_ext_sec_setup_param { __u32 off; __u32 len; @@ -887,14 +2448,14 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext) return btf_ext_setup_info(btf_ext, ¶m); } -static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext) +static int btf_ext_setup_core_relos(struct btf_ext *btf_ext) { struct btf_ext_sec_setup_param param = { - .off = btf_ext->hdr->offset_reloc_off, - .len = btf_ext->hdr->offset_reloc_len, - .min_rec_size = sizeof(struct bpf_offset_reloc), - .ext_info = &btf_ext->offset_reloc_info, - .desc = "offset_reloc", + .off = btf_ext->hdr->core_relo_off, + .len = btf_ext->hdr->core_relo_len, + .min_rec_size = sizeof(struct bpf_core_relo), + .ext_info = &btf_ext->core_relo_info, + .desc = "core_relo", }; return btf_ext_setup_info(btf_ext, ¶m); @@ -910,7 +2471,10 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) return -EINVAL; } - if (hdr->magic != BTF_MAGIC) { + if (hdr->magic == bswap_16(BTF_MAGIC)) { + pr_warn("BTF.ext in non-native endianness is not supported\n"); + return -ENOTSUP; + } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); return -EINVAL; } @@ -935,7 +2499,7 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) void btf_ext__free(struct btf_ext *btf_ext) { - if (!btf_ext) + if (IS_ERR_OR_NULL(btf_ext)) return; free(btf_ext->data); free(btf_ext); @@ -973,10 +2537,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size) if (err) goto done; - if (btf_ext->hdr->hdr_len < - offsetofend(struct btf_ext_header, offset_reloc_len)) + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) goto done; - err = btf_ext_setup_offset_reloc(btf_ext); + err = btf_ext_setup_core_relos(btf_ext); if (err) goto done; @@ -1231,6 +2794,9 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, return -EINVAL; } + if (btf_ensure_modifiable(btf)) + return -ENOMEM; + err = btf_dedup_strings(d); if (err < 0) { pr_debug("btf_dedup_strings failed:%d\n", err); @@ -1330,8 +2896,8 @@ static int btf_dedup_hypot_map_add(struct btf_dedup *d, if (d->hypot_cnt == d->hypot_cap) { __u32 *new_list; - d->hypot_cap += max(16, d->hypot_cap / 2); - new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); + d->hypot_cap += max((size_t)16, d->hypot_cap / 2); + new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32)); if (!new_list) return -ENOMEM; d->hypot_list = new_list; @@ -1415,7 +2981,7 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, /* special BTF "void" type is made canonical immediately */ d->map[0] = 0; for (i = 1; i <= btf->nr_types; i++) { - struct btf_type *t = d->btf->types[i]; + struct btf_type *t = btf_type_by_id(d->btf, i); /* VAR and DATASEC are never deduped and are self-canonical */ if (btf_is_var(t) || btf_is_datasec(t)) @@ -1454,7 +3020,7 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) struct btf_type *t; for (i = 1; i <= d->btf->nr_types; i++) { - t = d->btf->types[i]; + t = btf_type_by_id(d->btf, i); r = fn(&t->name_off, ctx); if (r) return r; @@ -1608,8 +3174,7 @@ static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) */ static int btf_dedup_strings(struct btf_dedup *d) { - const struct btf_header *hdr = d->btf->hdr; - char *start = (char *)d->btf->nohdr_data + hdr->str_off; + char *start = d->btf->strs_data; char *end = start + d->btf->hdr->str_len; char *p = start, *tmp_strs = NULL; struct btf_str_ptrs strs = { @@ -1621,14 +3186,16 @@ static int btf_dedup_strings(struct btf_dedup *d) int i, j, err = 0, grp_idx; bool grp_used; + if (d->btf->strs_deduped) + return 0; + /* build index of all strings */ while (p < end) { if (strs.cnt + 1 > strs.cap) { struct btf_str_ptr *new_ptrs; - strs.cap += max(strs.cnt / 2, 16); - new_ptrs = realloc(strs.ptrs, - sizeof(strs.ptrs[0]) * strs.cap); + strs.cap += max(strs.cnt / 2, 16U); + new_ptrs = libbpf_reallocarray(strs.ptrs, strs.cap, sizeof(strs.ptrs[0])); if (!new_ptrs) { err = -ENOMEM; goto done; @@ -1714,6 +3281,7 @@ static int btf_dedup_strings(struct btf_dedup *d) goto done; d->btf->hdr->str_len = end - start; + d->btf->strs_deduped = true; done: free(tmp_strs); @@ -1990,7 +3558,7 @@ static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) */ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) { - struct btf_type *t = d->btf->types[type_id]; + struct btf_type *t = btf_type_by_id(d->btf, type_id); struct hashmap_entry *hash_entry; struct btf_type *cand; /* if we don't find equivalent type, then we are canonical */ @@ -2017,7 +3585,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_int(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_int(t, cand)) { new_id = cand_id; break; @@ -2029,7 +3597,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_enum(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_enum(t, cand)) { new_id = cand_id; break; @@ -2052,7 +3620,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_common(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_common(t, cand)) { new_id = cand_id; break; @@ -2111,13 +3679,13 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) { __u32 orig_type_id = type_id; - if (!btf_is_fwd(d->btf->types[type_id])) + if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) return type_id; while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) type_id = d->map[type_id]; - if (!btf_is_fwd(d->btf->types[type_id])) + if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) return type_id; return orig_type_id; @@ -2245,8 +3813,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) return -ENOMEM; - cand_type = d->btf->types[cand_id]; - canon_type = d->btf->types[canon_id]; + cand_type = btf_type_by_id(d->btf, cand_id); + canon_type = btf_type_by_id(d->btf, canon_id); cand_kind = btf_kind(cand_type); canon_kind = btf_kind(canon_type); @@ -2397,8 +3965,8 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d) targ_type_id = d->hypot_map[cand_type_id]; t_id = resolve_type_id(d, targ_type_id); c_id = resolve_type_id(d, cand_type_id); - t_kind = btf_kind(d->btf->types[t_id]); - c_kind = btf_kind(d->btf->types[c_id]); + t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); + c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); /* * Resolve FWD into STRUCT/UNION. * It's ok to resolve FWD into STRUCT/UNION that's not yet @@ -2466,7 +4034,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) if (d->map[type_id] <= BTF_MAX_NR_TYPES) return 0; - t = d->btf->types[type_id]; + t = btf_type_by_id(d->btf, type_id); kind = btf_kind(t); if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) @@ -2487,7 +4055,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because * FWD and compatible STRUCT/UNION are considered equivalent. */ - cand_type = d->btf->types[cand_id]; + cand_type = btf_type_by_id(d->btf, cand_id); if (!btf_shallow_equal_struct(t, cand_type)) continue; @@ -2559,7 +4127,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) if (d->map[type_id] <= BTF_MAX_NR_TYPES) return resolve_type_id(d, type_id); - t = d->btf->types[type_id]; + t = btf_type_by_id(d->btf, type_id); d->map[type_id] = BTF_IN_PROGRESS_ID; switch (btf_kind(t)) { @@ -2577,7 +4145,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_common(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_common(t, cand)) { new_id = cand_id; break; @@ -2601,7 +4169,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_array(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_array(t, cand)) { new_id = cand_id; break; @@ -2633,7 +4201,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) h = btf_hash_fnproto(t); for_each_dedup_cand(d, hash_entry, h) { cand_id = (__u32)(long)hash_entry->value; - cand = d->btf->types[cand_id]; + cand = btf_type_by_id(d->btf, cand_id); if (btf_equal_fnproto(t, cand)) { new_id = cand_id; break; @@ -2681,9 +4249,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d) */ static int btf_dedup_compact_types(struct btf_dedup *d) { - struct btf_type **new_types; + __u32 *new_offs; __u32 next_type_id = 1; - char *types_start, *p; + void *p; int i, len; /* we are going to reuse hypot_map to store compaction remapping */ @@ -2691,41 +4259,34 @@ static int btf_dedup_compact_types(struct btf_dedup *d) for (i = 1; i <= d->btf->nr_types; i++) d->hypot_map[i] = BTF_UNPROCESSED_ID; - types_start = d->btf->nohdr_data + d->btf->hdr->type_off; - p = types_start; + p = d->btf->types_data; for (i = 1; i <= d->btf->nr_types; i++) { if (d->map[i] != i) continue; - len = btf_type_size(d->btf->types[i]); + len = btf_type_size(btf__type_by_id(d->btf, i)); if (len < 0) return len; - memmove(p, d->btf->types[i], len); + memmove(p, btf__type_by_id(d->btf, i), len); d->hypot_map[i] = next_type_id; - d->btf->types[next_type_id] = (struct btf_type *)p; + d->btf->type_offs[next_type_id] = p - d->btf->types_data; p += len; next_type_id++; } /* shrink struct btf's internal types index and update btf_header */ d->btf->nr_types = next_type_id - 1; - d->btf->types_size = d->btf->nr_types; - d->btf->hdr->type_len = p - types_start; - new_types = realloc(d->btf->types, - (1 + d->btf->nr_types) * sizeof(struct btf_type *)); - if (!new_types) + d->btf->type_offs_cap = d->btf->nr_types + 1; + d->btf->hdr->type_len = p - d->btf->types_data; + new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, + sizeof(*new_offs)); + if (!new_offs) return -ENOMEM; - d->btf->types = new_types; - - /* make sure string section follows type information without gaps */ - d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; - memmove(p, d->btf->strings, d->btf->hdr->str_len); - d->btf->strings = p; - p += d->btf->hdr->str_len; - - d->btf->data_size = p - (char *)d->btf->data; + d->btf->type_offs = new_offs; + d->btf->hdr->str_off = d->btf->hdr->type_len; + d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len; return 0; } @@ -2758,7 +4319,7 @@ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id) */ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) { - struct btf_type *t = d->btf->types[type_id]; + struct btf_type *t = btf_type_by_id(d->btf, type_id); int i, r; switch (btf_kind(t)) { @@ -2860,3 +4421,54 @@ static int btf_dedup_remap_types(struct btf_dedup *d) } return 0; } + +/* + * Probe few well-known locations for vmlinux kernel image and try to load BTF + * data out of it to use for target BTF. + */ +struct btf *libbpf_find_kernel_btf(void) +{ + struct { + const char *path_fmt; + bool raw_btf; + } locations[] = { + /* try canonical vmlinux BTF through sysfs first */ + { "/sys/kernel/btf/vmlinux", true /* raw BTF */ }, + /* fall back to trying to find vmlinux ELF on disk otherwise */ + { "/boot/vmlinux-%1$s" }, + { "/lib/modules/%1$s/vmlinux-%1$s" }, + { "/lib/modules/%1$s/build/vmlinux" }, + { "/usr/lib/modules/%1$s/kernel/vmlinux" }, + { "/usr/lib/debug/boot/vmlinux-%1$s" }, + { "/usr/lib/debug/boot/vmlinux-%1$s.debug" }, + { "/usr/lib/debug/lib/modules/%1$s/vmlinux" }, + }; + char path[PATH_MAX + 1]; + struct utsname buf; + struct btf *btf; + int i; + + uname(&buf); + + for (i = 0; i < ARRAY_SIZE(locations); i++) { + snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release); + + if (access(path, R_OK)) + continue; + + if (locations[i].raw_btf) + btf = btf__parse_raw(path); + else + btf = btf__parse_elf(path, NULL); + + pr_debug("loading kernel BTF '%s': %ld\n", + path, IS_ERR(btf) ? PTR_ERR(btf) : 0); + if (IS_ERR(btf)) + continue; + + return btf; + } + + pr_warn("failed to find valid kernel BTF\n"); + return ERR_PTR(-ESRCH); +} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 9cb44b4fbf60..57247240a20a 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -5,17 +5,16 @@ #define __LIBBPF_BTF_H #include <stdarg.h> +#include <stdbool.h> #include <linux/btf.h> #include <linux/types.h> +#include "libbpf_common.h" + #ifdef __cplusplus extern "C" { #endif -#ifndef LIBBPF_API -#define LIBBPF_API __attribute__((visibility("default"))) -#endif - #define BTF_ELF_SEC ".BTF" #define BTF_EXT_ELF_SEC ".BTF.ext" #define MAPS_ELF_SEC ".maps" @@ -26,60 +25,38 @@ struct btf_type; struct bpf_object; -/* - * The .BTF.ext ELF section layout defined as - * struct btf_ext_header - * func_info subsection - * - * The func_info subsection layout: - * record size for struct bpf_func_info in the func_info subsection - * struct btf_sec_func_info for section #1 - * a list of bpf_func_info records for section #1 - * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h - * but may not be identical - * struct btf_sec_func_info for section #2 - * a list of bpf_func_info records for section #2 - * ...... - * - * Note that the bpf_func_info record size in .BTF.ext may not - * be the same as the one defined in include/uapi/linux/bpf.h. - * The loader should ensure that record_size meets minimum - * requirement and pass the record as is to the kernel. The - * kernel will handle the func_info properly based on its contents. - */ -struct btf_ext_header { - __u16 magic; - __u8 version; - __u8 flags; - __u32 hdr_len; - - /* All offsets are in bytes relative to the end of this header */ - __u32 func_info_off; - __u32 func_info_len; - __u32 line_info_off; - __u32 line_info_len; - - /* optional part of .BTF.ext header */ - __u32 offset_reloc_off; - __u32 offset_reloc_len; +enum btf_endianness { + BTF_LITTLE_ENDIAN = 0, + BTF_BIG_ENDIAN = 1, }; LIBBPF_API void btf__free(struct btf *btf); -LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); -LIBBPF_API struct btf *btf__parse_elf(const char *path, - struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__new(const void *data, __u32 size); +LIBBPF_API struct btf *btf__new_empty(void); +LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_raw(const char *path); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, const char *type_name); +LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, + const char *type_name, __u32 kind); LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); +LIBBPF_API size_t btf__pointer_size(const struct btf *btf); +LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz); +LIBBPF_API enum btf_endianness btf__endianness(const struct btf *btf); +LIBBPF_API int btf__set_endianness(struct btf *btf, enum btf_endianness endian); LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); +LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__fd(const struct btf *btf); +LIBBPF_API void btf__set_fd(struct btf *btf, int fd); LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); +LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, __u32 expected_key_size, @@ -90,17 +67,62 @@ LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); -LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf, - const struct btf_ext *btf_ext, - const char *sec_name, __u32 insns_cnt, - void **func_info, __u32 *cnt); -LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf, - const struct btf_ext *btf_ext, - const char *sec_name, __u32 insns_cnt, - void **line_info, __u32 *cnt); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions") +int btf_ext__reloc_func_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **func_info, __u32 *cnt); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions") +int btf_ext__reloc_line_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **line_info, __u32 *cnt); LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); +LIBBPF_API struct btf *libbpf_find_kernel_btf(void); + +LIBBPF_API int btf__find_str(struct btf *btf, const char *s); +LIBBPF_API int btf__add_str(struct btf *btf, const char *s); + +LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding); +LIBBPF_API int btf__add_ptr(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_array(struct btf *btf, + int index_type_id, int elem_type_id, __u32 nr_elems); +/* struct/union construction APIs */ +LIBBPF_API int btf__add_struct(struct btf *btf, const char *name, __u32 sz); +LIBBPF_API int btf__add_union(struct btf *btf, const char *name, __u32 sz); +LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_id, + __u32 bit_offset, __u32 bit_size); + +/* enum construction APIs */ +LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz); +LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value); + +enum btf_fwd_kind { + BTF_FWD_STRUCT = 0, + BTF_FWD_UNION = 1, + BTF_FWD_ENUM = 2, +}; + +LIBBPF_API int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind); +LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id); +LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id); + +/* func and func_proto construction APIs */ +LIBBPF_API int btf__add_func(struct btf *btf, const char *name, + enum btf_func_linkage linkage, int proto_type_id); +LIBBPF_API int btf__add_func_proto(struct btf *btf, int ret_type_id); +LIBBPF_API int btf__add_func_param(struct btf *btf, const char *name, int type_id); + +/* var & datasec construction APIs */ +LIBBPF_API int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id); +LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz); +LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, + __u32 offset, __u32 byte_sz); + struct btf_dedup_opts { unsigned int dedup_table_size; bool dont_resolve_fwds; @@ -125,6 +147,30 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d); LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); +struct btf_dump_emit_type_decl_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* optional field name for type declaration, e.g.: + * - struct my_struct <FNAME> + * - void (*<FNAME>)(int) + * - char (*<FNAME>)[123] + */ + const char *field_name; + /* extra indentation level (in number of tabs) to emit for multi-line + * type declarations (e.g., anonymous struct); applies for lines + * starting from the second one (first line is assumed to have + * necessary indentation already + */ + int indent_level; + /* strip all the const/volatile/restrict mods */ + bool strip_mods; +}; +#define btf_dump_emit_type_decl_opts__last_field strip_mods + +LIBBPF_API int +btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, + const struct btf_dump_emit_type_decl_opts *opts); + /* * A set of helpers for easier BTF types handling */ @@ -143,6 +189,11 @@ static inline bool btf_kflag(const struct btf_type *t) return BTF_INFO_KFLAG(t->info); } +static inline bool btf_is_void(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_UNKN; +} + static inline bool btf_is_int(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_INT; diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 87f27e2664c5..2f9d685bd522 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -13,6 +13,7 @@ #include <errno.h> #include <linux/err.h> #include <linux/btf.h> +#include <linux/kernel.h> #include "btf.h" #include "hashmap.h" #include "libbpf.h" @@ -57,11 +58,16 @@ struct btf_dump { const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; struct btf_dump_opts opts; + int ptr_sz; + bool strip_mods; + int last_id; /* per-type auxiliary state */ struct btf_dump_type_aux_state *type_states; + size_t type_states_cap; /* per-type optional cached unique name, must be freed, if present */ const char **cached_names; + size_t cached_names_cap; /* topo-sorted list of dependent type definitions */ __u32 *emit_queue; @@ -87,14 +93,7 @@ struct btf_dump { static size_t str_hash_fn(const void *key, void *ctx) { - const char *s = key; - size_t h = 0; - - while (*s) { - h = h * 31 + *s; - s++; - } - return h; + return str_hash(key); } static bool str_equal_fn(const void *a, const void *b, void *ctx) @@ -116,6 +115,9 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...) va_end(args); } +static int btf_dump_mark_referenced(struct btf_dump *d); +static int btf_dump_resize(struct btf_dump *d); + struct btf_dump *btf_dump__new(const struct btf *btf, const struct btf_ext *btf_ext, const struct btf_dump_opts *opts, @@ -132,36 +134,71 @@ struct btf_dump *btf_dump__new(const struct btf *btf, d->btf_ext = btf_ext; d->printf_fn = printf_fn; d->opts.ctx = opts ? opts->ctx : NULL; + d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *); d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); if (IS_ERR(d->type_names)) { err = PTR_ERR(d->type_names); d->type_names = NULL; - btf_dump__free(d); - return ERR_PTR(err); + goto err; } d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); if (IS_ERR(d->ident_names)) { err = PTR_ERR(d->ident_names); d->ident_names = NULL; - btf_dump__free(d); - return ERR_PTR(err); + goto err; } + err = btf_dump_resize(d); + if (err) + goto err; + return d; +err: + btf_dump__free(d); + return ERR_PTR(err); +} + +static int btf_dump_resize(struct btf_dump *d) +{ + int err, last_id = btf__get_nr_types(d->btf); + + if (last_id <= d->last_id) + return 0; + + if (btf_ensure_mem((void **)&d->type_states, &d->type_states_cap, + sizeof(*d->type_states), last_id + 1)) + return -ENOMEM; + if (btf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap, + sizeof(*d->cached_names), last_id + 1)) + return -ENOMEM; + + if (d->last_id == 0) { + /* VOID is special */ + d->type_states[0].order_state = ORDERED; + d->type_states[0].emit_state = EMITTED; + } + + /* eagerly determine referenced types for anon enums */ + err = btf_dump_mark_referenced(d); + if (err) + return err; + + d->last_id = last_id; + return 0; } void btf_dump__free(struct btf_dump *d) { - int i, cnt; + int i; - if (!d) + if (IS_ERR_OR_NULL(d)) return; free(d->type_states); if (d->cached_names) { /* any set cached name is owned by us and should be freed */ - for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) { + for (i = 0; i <= d->last_id; i++) { if (d->cached_names[i]) free((void *)d->cached_names[i]); } @@ -175,7 +212,6 @@ void btf_dump__free(struct btf_dump *d) free(d); } -static int btf_dump_mark_referenced(struct btf_dump *d); static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr); static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id); @@ -202,26 +238,9 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) if (id > btf__get_nr_types(d->btf)) return -EINVAL; - /* type states are lazily allocated, as they might not be needed */ - if (!d->type_states) { - d->type_states = calloc(1 + btf__get_nr_types(d->btf), - sizeof(d->type_states[0])); - if (!d->type_states) - return -ENOMEM; - d->cached_names = calloc(1 + btf__get_nr_types(d->btf), - sizeof(d->cached_names[0])); - if (!d->cached_names) - return -ENOMEM; - - /* VOID is special */ - d->type_states[0].order_state = ORDERED; - d->type_states[0].emit_state = EMITTED; - - /* eagerly determine referenced types for anon enums */ - err = btf_dump_mark_referenced(d); - if (err) - return err; - } + err = btf_dump_resize(d); + if (err) + return err; d->emit_queue_cnt = 0; err = btf_dump_order_type(d, id, false); @@ -252,7 +271,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d) const struct btf_type *t; __u16 vlen; - for (i = 1; i <= n; i++) { + for (i = d->last_id + 1; i <= n; i++) { t = btf__type_by_id(d->btf, i); vlen = btf_vlen(t); @@ -307,6 +326,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d) } return 0; } + static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id) { __u32 *new_queue; @@ -314,8 +334,7 @@ static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id) if (d->emit_queue_cnt >= d->emit_queue_cap) { new_cap = max(16, d->emit_queue_cap * 3 / 2); - new_queue = realloc(d->emit_queue, - new_cap * sizeof(new_queue[0])); + new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0])); if (!new_queue) return -ENOMEM; d->emit_queue = new_queue; @@ -428,7 +447,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) /* type loop, but resolvable through fwd declaration */ if (btf_is_composite(t) && through_ptr && t->name_off != 0) return 0; - pr_warning("unsatisfiable type cycle, id:[%u]\n", id); + pr_warn("unsatisfiable type cycle, id:[%u]\n", id); return -ELOOP; } @@ -543,6 +562,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) } } +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t); + static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, @@ -636,8 +658,8 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) if (id == cont_id) return; if (t->name_off == 0) { - pr_warning("anonymous struct/union loop, id:[%u]\n", - id); + pr_warn("anonymous struct/union loop, id:[%u]\n", + id); return; } btf_dump_emit_struct_fwd(d, id, t); @@ -653,7 +675,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) if (!btf_dump_is_blacklisted(d, id)) { btf_dump_emit_typedef_def(d, id, t, 0); btf_dump_printf(d, ";\n\n"); - }; + } tstate->fwd_emitted = 1; break; default: @@ -665,6 +687,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) switch (kind) { case BTF_KIND_INT: + /* Emit type alias definitions if necessary */ + btf_dump_emit_missing_aliases(d, id, t); + tstate->emit_state = EMITTED; break; case BTF_KIND_ENUM: @@ -752,41 +777,6 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) } } -static int btf_align_of(const struct btf *btf, __u32 id) -{ - const struct btf_type *t = btf__type_by_id(btf, id); - __u16 kind = btf_kind(t); - - switch (kind) { - case BTF_KIND_INT: - case BTF_KIND_ENUM: - return min(sizeof(void *), t->size); - case BTF_KIND_PTR: - return sizeof(void *); - case BTF_KIND_TYPEDEF: - case BTF_KIND_VOLATILE: - case BTF_KIND_CONST: - case BTF_KIND_RESTRICT: - return btf_align_of(btf, t->type); - case BTF_KIND_ARRAY: - return btf_align_of(btf, btf_array(t)->type); - case BTF_KIND_STRUCT: - case BTF_KIND_UNION: { - const struct btf_member *m = btf_members(t); - __u16 vlen = btf_vlen(t); - int i, align = 1; - - for (i = 0; i < vlen; i++, m++) - align = max(align, btf_align_of(btf, m->type)); - - return align; - } - default: - pr_warning("unsupported BTF_KIND:%u\n", btf_kind(t)); - return 1; - } -} - static bool btf_is_struct_packed(const struct btf *btf, __u32 id, const struct btf_type *t) { @@ -794,18 +784,18 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id, int align, i, bit_sz; __u16 vlen; - align = btf_align_of(btf, id); + align = btf__align_of(btf, id); /* size of a non-packed struct has to be a multiple of its alignment*/ - if (t->size % align) + if (align && t->size % align) return true; m = btf_members(t); vlen = btf_vlen(t); /* all non-bitfield fields have to be naturally aligned */ for (i = 0; i < vlen; i++, m++) { - align = btf_align_of(btf, m->type); + align = btf__align_of(btf, m->type); bit_sz = btf_member_bitfield_size(t, i); - if (bit_sz == 0 && m->offset % (8 * align) != 0) + if (align && bit_sz == 0 && m->offset % (8 * align) != 0) return true; } @@ -826,7 +816,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d, int align, int lvl) { int off_diff = m_off - cur_off; - int ptr_bits = sizeof(void *) * 8; + int ptr_bits = d->ptr_sz * 8; if (off_diff <= 0) /* no gap */ @@ -889,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, fname = btf_name_of(d, m->name_off); m_sz = btf_member_bitfield_size(t, i); m_off = btf_member_bit_offset(t, i); - align = packed ? 1 : btf_align_of(d->btf, m->type); + align = packed ? 1 : btf__align_of(d->btf, m->type); btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1); btf_dump_printf(d, "\n%s", pfx(lvl + 1)); @@ -899,7 +889,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, ": %d", m_sz); off = m_off + m_sz; } else { - m_sz = max(0, btf__resolve_size(d->btf, m->type)); + m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type)); off = m_off + m_sz * 8; } btf_dump_printf(d, ";"); @@ -907,7 +897,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, /* pad at the end, if necessary */ if (is_struct) { - align = packed ? 1 : btf_align_of(d->btf, id); + align = packed ? 1 : btf__align_of(d->btf, id); btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align, lvl + 1); } @@ -919,6 +909,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, " __attribute__((packed))"); } +static const char *missing_base_types[][2] = { + /* + * GCC emits typedefs to its internal __PolyX_t types when compiling Arm + * SIMD intrinsics. Alias them to standard base types. + */ + { "__Poly8_t", "unsigned char" }, + { "__Poly16_t", "unsigned short" }, + { "__Poly64_t", "unsigned long long" }, + { "__Poly128_t", "unsigned __int128" }, +}; + +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + const char *name = btf_dump_type_name(d, id); + int i; + + for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) { + if (strcmp(name, missing_base_types[i][0]) == 0) { + btf_dump_printf(d, "typedef %s %s;\n\n", + missing_base_types[i][1], name); + break; + } + } +} + static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t) { @@ -946,13 +962,13 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, /* enumerators share namespace with typedef idents */ dup_cnt = btf_dump_name_dups(d, d->ident_names, name); if (dup_cnt > 1) { - btf_dump_printf(d, "\n%s%s___%zu = %d,", + btf_dump_printf(d, "\n%s%s___%zu = %u,", pfx(lvl + 1), name, dup_cnt, - (__s32)v->val); + (__u32)v->val); } else { - btf_dump_printf(d, "\n%s%s = %d,", + btf_dump_printf(d, "\n%s%s = %u,", pfx(lvl + 1), name, - (__s32)v->val); + (__u32)v->val); } } btf_dump_printf(d, "\n%s}", pfx(lvl)); @@ -975,6 +991,17 @@ static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id, { const char *name = btf_dump_ident_name(d, id); + /* + * Old GCC versions are emitting invalid typedef for __gnuc_va_list + * pointing to VOID. This generates warnings from btf_dump() and + * results in uncompilable header file, so we are fixing it up here + * with valid typedef into __builtin_va_list. + */ + if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) { + btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list"); + return; + } + btf_dump_printf(d, "typedef "); btf_dump_emit_type_decl(d, t->type, name, lvl); } @@ -986,8 +1013,7 @@ static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id) if (d->decl_stack_cnt >= d->decl_stack_cap) { new_cap = max(16, d->decl_stack_cap * 3 / 2); - new_stack = realloc(d->decl_stack, - new_cap * sizeof(new_stack[0])); + new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0])); if (!new_stack) return -ENOMEM; d->decl_stack = new_stack; @@ -1040,6 +1066,27 @@ static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id) * of a stack frame. Some care is required to "pop" stack frames after * processing type declaration chain. */ +int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, + const struct btf_dump_emit_type_decl_opts *opts) +{ + const char *fname; + int lvl, err; + + if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts)) + return -EINVAL; + + err = btf_dump_resize(d); + if (err) + return -EINVAL; + + fname = OPTS_GET(opts, field_name, ""); + lvl = OPTS_GET(opts, indent_level, 0); + d->strip_mods = OPTS_GET(opts, strip_mods, false); + btf_dump_emit_type_decl(d, id, fname, lvl); + d->strip_mods = false; + return 0; +} + static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, const char *fname, int lvl) { @@ -1049,6 +1096,10 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, stack_start = d->decl_stack_cnt; for (;;) { + t = btf__type_by_id(d->btf, id); + if (d->strip_mods && btf_is_mod(t)) + goto skip_mod; + err = btf_dump_push_decl_stack_id(d, id); if (err < 0) { /* @@ -1056,16 +1107,15 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, * chain, restore stack, emit warning, and try to * proceed nevertheless */ - pr_warning("not enough memory for decl stack:%d", err); + pr_warn("not enough memory for decl stack:%d", err); d->decl_stack_cnt = stack_start; return; } - +skip_mod: /* VOID */ if (id == 0) break; - t = btf__type_by_id(d->btf, id); switch (btf_kind(t)) { case BTF_KIND_PTR: case BTF_KIND_VOLATILE: @@ -1085,8 +1135,8 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, case BTF_KIND_TYPEDEF: goto done; default: - pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n", - btf_kind(t), id); + pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n", + btf_kind(t), id); goto done; } } @@ -1141,6 +1191,20 @@ static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack) } } +static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack) +{ + const struct btf_type *t; + __u32 id; + + while (decl_stack->cnt) { + id = decl_stack->ids[decl_stack->cnt - 1]; + t = btf__type_by_id(d->btf, id); + if (!btf_is_mod(t)) + return; + decl_stack->cnt--; + } +} + static void btf_dump_emit_name(const struct btf_dump *d, const char *name, bool last_was_ptr) { @@ -1239,14 +1303,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, * a const/volatile modifier for array, so we are * going to silently skip them here. */ - while (decls->cnt) { - next_id = decls->ids[decls->cnt - 1]; - next_t = btf__type_by_id(d->btf, next_id); - if (btf_is_mod(next_t)) - decls->cnt--; - else - break; - } + btf_dump_drop_mods(d, decls); if (decls->cnt == 0) { btf_dump_emit_name(d, fname, last_was_ptr); @@ -1274,7 +1331,15 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, __u16 vlen = btf_vlen(t); int i; - btf_dump_emit_mods(d, decls); + /* + * GCC emits extra volatile qualifier for + * __attribute__((noreturn)) function pointers. Clang + * doesn't do it. It's a GCC quirk for backwards + * compatibility with code written for GCC <2.5. So, + * similarly to extra qualifiers for array, just drop + * them, instead of handling them. + */ + btf_dump_drop_mods(d, decls); if (decls->cnt) { btf_dump_printf(d, " ("); btf_dump_emit_type_chain(d, decls, fname, lvl); @@ -1312,8 +1377,8 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, return; } default: - pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n", - kind, id); + pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n", + kind, id); return; } diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c index 6122272943e6..3c20b126d60d 100644 --- a/tools/lib/bpf/hashmap.c +++ b/tools/lib/bpf/hashmap.c @@ -12,6 +12,12 @@ #include <linux/err.h> #include "hashmap.h" +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +/* prevent accidental re-addition of reallocarray() */ +#pragma GCC poison reallocarray + /* start with 4 buckets */ #define HASHMAP_MIN_CAP_BITS 2 @@ -56,7 +62,14 @@ struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, void hashmap__clear(struct hashmap *map) { + struct hashmap_entry *cur, *tmp; + size_t bkt; + + hashmap__for_each_entry_safe(map, cur, tmp, bkt) { + free(cur); + } free(map->buckets); + map->buckets = NULL; map->cap = map->cap_bits = map->sz = 0; } @@ -90,8 +103,7 @@ static int hashmap_grow(struct hashmap *map) struct hashmap_entry **new_buckets; struct hashmap_entry *cur, *tmp; size_t new_cap_bits, new_cap; - size_t h; - int bkt; + size_t h, bkt; new_cap_bits = map->cap_bits + 1; if (new_cap_bits < HASHMAP_MIN_CAP_BITS) diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h index bae8879cdf58..10a4c4cd13cf 100644 --- a/tools/lib/bpf/hashmap.h +++ b/tools/lib/bpf/hashmap.h @@ -10,17 +10,34 @@ #include <stdbool.h> #include <stddef.h> -#ifdef __GLIBC__ -#include <bits/wordsize.h> -#else -#include <bits/reg.h> -#endif -#include "libbpf_internal.h" +#include <limits.h> static inline size_t hash_bits(size_t h, int bits) { /* shuffle bits and return requested number of upper bits */ - return (h * 11400714819323198485llu) >> (__WORDSIZE - bits); + if (bits == 0) + return 0; + +#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) + /* LP64 case */ + return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); +#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__) + return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits); +#else +# error "Unsupported size_t size" +#endif +} + +/* generic C-string hashing function */ +static inline size_t str_hash(const char *s) +{ + size_t h = 0; + + while (*s) { + h = h * 31 + *s; + s++; + } + return h; } typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); @@ -160,17 +177,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value); * @key: key to iterate entries for */ #define hashmap__for_each_key_entry(map, cur, _key) \ - for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ - map->cap_bits); \ - map->buckets ? map->buckets[bkt] : NULL; }); \ + for (cur = map->buckets \ + ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ + : NULL; \ cur; \ cur = cur->next) \ if (map->equal_fn(cur->key, (_key), map->ctx)) #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ - for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ - map->cap_bits); \ - cur = map->buckets ? map->buckets[bkt] : NULL; }); \ + for (cur = map->buckets \ + ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ + : NULL; \ cur && ({ tmp = cur->next; true; }); \ cur = tmp) \ if (map->equal_fn(cur->key, (_key), map->ctx)) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b6403712c2f4..2707a60e7eef 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -18,11 +18,13 @@ #include <stdarg.h> #include <libgen.h> #include <inttypes.h> +#include <limits.h> #include <string.h> #include <unistd.h> #include <endian.h> #include <fcntl.h> #include <errno.h> +#include <ctype.h> #include <asm/unistd.h> #include <linux/err.h> #include <linux/kernel.h> @@ -33,6 +35,7 @@ #include <linux/limits.h> #include <linux/perf_event.h> #include <linux/ring_buffer.h> +#include <linux/version.h> #include <sys/epoll.h> #include <sys/ioctl.h> #include <sys/mman.h> @@ -40,9 +43,10 @@ #include <sys/types.h> #include <sys/vfs.h> #include <sys/utsname.h> -#include <tools/libc_compat.h> +#include <sys/resource.h> #include <libelf.h> #include <gelf.h> +#include <zlib.h> #include "libbpf.h" #include "bpf.h" @@ -59,6 +63,8 @@ #define BPF_FS_MAGIC 0xcafe4a11 #endif +#define BPF_INSN_SZ (sizeof(struct bpf_insn)) + /* vsprintf() in __base_pr() uses nonliteral format string. It may break * compilation if user enables corresponding warning. Disable it explicitly. */ @@ -66,6 +72,10 @@ #define __printf(a, b) __attribute__((format(printf, a, b))) +static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); +static const struct btf_type * +skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); + static int __base_pr(enum libbpf_print_level level, const char *format, va_list args) { @@ -98,15 +108,34 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...) va_end(args); } +static void pr_perm_msg(int err) +{ + struct rlimit limit; + char buf[100]; + + if (err != -EPERM || geteuid() != 0) + return; + + err = getrlimit(RLIMIT_MEMLOCK, &limit); + if (err) + return; + + if (limit.rlim_cur == RLIM_INFINITY) + return; + + if (limit.rlim_cur < 1024) + snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); + else if (limit.rlim_cur < 1024*1024) + snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); + else + snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); + + pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", + buf); +} + #define STRERR_BUFSIZE 128 -#define CHECK_ERR(action, err, out) do { \ - err = action; \ - if (err) \ - goto out; \ -} while(0) - - /* Copied from tools/perf/util/util.h */ #ifndef zfree # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) @@ -121,26 +150,68 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...) ___err; }) #endif -#ifdef HAVE_LIBELF_MMAP_SUPPORT -# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP -#else -# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ -#endif - static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; } -struct bpf_capabilities { +enum kern_feature_id { /* v4.14: kernel support for program & map names. */ - __u32 name:1; + FEAT_PROG_NAME, /* v5.2: kernel support for global data sections. */ - __u32 global_data:1; + FEAT_GLOBAL_DATA, + /* BTF support */ + FEAT_BTF, /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ - __u32 btf_func:1; + FEAT_BTF_FUNC, /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ - __u32 btf_datasec:1; + FEAT_BTF_DATASEC, + /* BTF_FUNC_GLOBAL is supported */ + FEAT_BTF_GLOBAL_FUNC, + /* BPF_F_MMAPABLE is supported for arrays */ + FEAT_ARRAY_MMAP, + /* kernel support for expected_attach_type in BPF_PROG_LOAD */ + FEAT_EXP_ATTACH_TYPE, + /* bpf_probe_read_{kernel,user}[_str] helpers */ + FEAT_PROBE_READ_KERN, + /* BPF_PROG_BIND_MAP is supported */ + FEAT_PROG_BIND_MAP, + __FEAT_CNT, +}; + +static bool kernel_supports(enum kern_feature_id feat_id); + +enum reloc_type { + RELO_LD64, + RELO_CALL, + RELO_DATA, + RELO_EXTERN, + RELO_SUBPROG_ADDR, +}; + +struct reloc_desc { + enum reloc_type type; + int insn_idx; + int map_idx; + int sym_off; + bool processed; +}; + +struct bpf_sec_def; + +typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec, + struct bpf_program *prog); + +struct bpf_sec_def { + const char *sec; + size_t len; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + bool is_exp_attach_type_optional; + bool is_attachable; + bool is_attach_btf; + bool is_sleepable; + attach_fn_t attach_fn; }; /* @@ -148,31 +219,47 @@ struct bpf_capabilities { * linux/filter.h. */ struct bpf_program { - /* Index in elf obj file, for relocation use. */ - int idx; + const struct bpf_sec_def *sec_def; + char *sec_name; + size_t sec_idx; + /* this program's instruction offset (in number of instructions) + * within its containing ELF section + */ + size_t sec_insn_off; + /* number of original instructions in ELF section belonging to this + * program, not taking into account subprogram instructions possible + * appended later during relocation + */ + size_t sec_insn_cnt; + /* Offset (in number of instructions) of the start of instruction + * belonging to this BPF program within its containing main BPF + * program. For the entry-point (main) BPF program, this is always + * zero. For a sub-program, this gets reset before each of main BPF + * programs are processed and relocated and is used to determined + * whether sub-program was already appended to the main program, and + * if yes, at which instruction offset. + */ + size_t sub_insn_off; + char *name; - int prog_ifindex; - char *section_name; - /* section_name with / replaced by _; makes recursive pinning + /* sec_name with / replaced by _; makes recursive pinning * in bpf_object__pin_programs easier */ char *pin_name; - struct bpf_insn *insns; - size_t insns_cnt, main_prog_cnt; - enum bpf_prog_type type; - struct reloc_desc { - enum { - RELO_LD64, - RELO_CALL, - RELO_DATA, - } type; - int insn_idx; - union { - int map_idx; - int text_off; - }; - } *reloc_desc; + /* instructions that belong to BPF program; insns[0] is located at + * sec_insn_off instruction within its ELF section in ELF file, so + * when mapping ELF file instruction index to the local instruction, + * one needs to subtract sec_insn_off; and vice versa. + */ + struct bpf_insn *insns; + /* actual number of instruction in this BPF program's image; for + * entry-point BPF programs this includes the size of main program + * itself plus all the used sub-programs, appended at the end + */ + size_t insns_cnt; + + struct reloc_desc *reloc_desc; int nr_reloc; int log_level; @@ -186,50 +273,132 @@ struct bpf_program { void *priv; bpf_program_clear_priv_t clear_priv; + bool load; + enum bpf_prog_type type; enum bpf_attach_type expected_attach_type; + int prog_ifindex; + __u32 attach_btf_id; + __u32 attach_prog_fd; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; - struct bpf_capabilities *caps; - void *line_info; __u32 line_info_rec_size; __u32 line_info_cnt; __u32 prog_flags; }; +struct bpf_struct_ops { + const char *tname; + const struct btf_type *type; + struct bpf_program **progs; + __u32 *kern_func_off; + /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ + void *data; + /* e.g. struct bpf_struct_ops_tcp_congestion_ops in + * btf_vmlinux's format. + * struct bpf_struct_ops_tcp_congestion_ops { + * [... some other kernel fields ...] + * struct tcp_congestion_ops data; + * } + * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) + * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" + * from "data". + */ + void *kern_vdata; + __u32 type_id; +}; + +#define DATA_SEC ".data" +#define BSS_SEC ".bss" +#define RODATA_SEC ".rodata" +#define KCONFIG_SEC ".kconfig" +#define KSYMS_SEC ".ksyms" +#define STRUCT_OPS_SEC ".struct_ops" + enum libbpf_map_type { LIBBPF_MAP_UNSPEC, LIBBPF_MAP_DATA, LIBBPF_MAP_BSS, LIBBPF_MAP_RODATA, + LIBBPF_MAP_KCONFIG, }; static const char * const libbpf_type_to_btf_name[] = { - [LIBBPF_MAP_DATA] = ".data", - [LIBBPF_MAP_BSS] = ".bss", - [LIBBPF_MAP_RODATA] = ".rodata", + [LIBBPF_MAP_DATA] = DATA_SEC, + [LIBBPF_MAP_BSS] = BSS_SEC, + [LIBBPF_MAP_RODATA] = RODATA_SEC, + [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC, }; struct bpf_map { - int fd; char *name; + int fd; int sec_idx; size_t sec_offset; int map_ifindex; int inner_map_fd; struct bpf_map_def def; + __u32 numa_node; + __u32 btf_var_idx; __u32 btf_key_type_id; __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; void *priv; bpf_map_clear_priv_t clear_priv; enum libbpf_map_type libbpf_type; + void *mmaped; + struct bpf_struct_ops *st_ops; + struct bpf_map *inner_map; + void **init_slots; + int init_slots_sz; + char *pin_path; + bool pinned; + bool reused; }; -struct bpf_secdata { - void *rodata; - void *data; +enum extern_type { + EXT_UNKNOWN, + EXT_KCFG, + EXT_KSYM, +}; + +enum kcfg_type { + KCFG_UNKNOWN, + KCFG_CHAR, + KCFG_BOOL, + KCFG_INT, + KCFG_TRISTATE, + KCFG_CHAR_ARR, +}; + +struct extern_desc { + enum extern_type type; + int sym_idx; + int btf_id; + int sec_btf_id; + const char *name; + bool is_set; + bool is_weak; + union { + struct { + enum kcfg_type type; + int sz; + int align; + int data_off; + bool is_signed; + } kcfg; + struct { + unsigned long long addr; + + /* target btf_id of the corresponding kernel var. */ + int vmlinux_btf_id; + + /* local btf_id of the ksym extern's type. */ + __u32 type_id; + } ksym; + }; }; static LIST_HEAD(bpf_objects_list); @@ -244,10 +413,15 @@ struct bpf_object { struct bpf_map *maps; size_t nr_maps; size_t maps_cap; - struct bpf_secdata sections; + + char *kconfig; + struct extern_desc *externs; + int nr_extern; + int kconfig_map_idx; + int rodata_map_idx; bool loaded; - bool has_pseudo_calls; + bool has_subcalls; /* * Information when doing elf related work. Only valid if fd @@ -255,7 +429,7 @@ struct bpf_object { */ struct { int fd; - void *obj_buf; + const void *obj_buf; size_t obj_buf_sz; Elf *elf; GElf_Ehdr ehdr; @@ -263,18 +437,23 @@ struct bpf_object { Elf_Data *data; Elf_Data *rodata; Elf_Data *bss; + Elf_Data *st_ops_data; + size_t shstrndx; /* section index for section name strings */ size_t strtabidx; struct { GElf_Shdr shdr; Elf_Data *data; - } *reloc; - int nr_reloc; + } *reloc_sects; + int nr_reloc_sects; int maps_shndx; int btf_maps_shndx; + __u32 btf_maps_sec_btf_id; int text_shndx; + int symbols_shndx; int data_shndx; int rodata_shndx; int bss_shndx; + int st_ops_shndx; } efile; /* * All loaded bpf_object is linked in a list, which is @@ -284,17 +463,29 @@ struct bpf_object { struct list_head list; struct btf *btf; + /* Parse and load BTF vmlinux if any of the programs in the object need + * it at load time. + */ + struct btf *btf_vmlinux; struct btf_ext *btf_ext; void *priv; bpf_object_clear_priv_t clear_priv; - struct bpf_capabilities caps; - char path[]; }; #define obj_elf_valid(o) ((o)->efile.elf) +static const char *elf_sym_str(const struct bpf_object *obj, size_t off); +static const char *elf_sec_str(const struct bpf_object *obj, size_t off); +static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); +static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); +static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr); +static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); +static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); +static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx, + size_t off, __u32 sym_type, GElf_Sym *sym); + void bpf_program__unload(struct bpf_program *prog) { int i; @@ -310,8 +501,8 @@ void bpf_program__unload(struct bpf_program *prog) for (i = 0; i < prog->instances.nr; i++) zclose(prog->instances.fds[i]); } else if (prog->instances.nr != -1) { - pr_warning("Internal error: instances.nr is %d\n", - prog->instances.nr); + pr_warn("Internal error: instances.nr is %d\n", + prog->instances.nr); } prog->instances.nr = -1; @@ -334,182 +525,545 @@ static void bpf_program__exit(struct bpf_program *prog) bpf_program__unload(prog); zfree(&prog->name); - zfree(&prog->section_name); + zfree(&prog->sec_name); zfree(&prog->pin_name); zfree(&prog->insns); zfree(&prog->reloc_desc); prog->nr_reloc = 0; prog->insns_cnt = 0; - prog->idx = -1; + prog->sec_idx = -1; } static char *__bpf_program__pin_name(struct bpf_program *prog) { char *name, *p; - name = p = strdup(prog->section_name); + name = p = strdup(prog->sec_name); while ((p = strchr(p, '/'))) *p = '_'; return name; } -static int -bpf_program__init(void *data, size_t size, char *section_name, int idx, - struct bpf_program *prog) +static bool insn_is_subprog_call(const struct bpf_insn *insn) { - const size_t bpf_insn_sz = sizeof(struct bpf_insn); + return BPF_CLASS(insn->code) == BPF_JMP && + BPF_OP(insn->code) == BPF_CALL && + BPF_SRC(insn->code) == BPF_K && + insn->src_reg == BPF_PSEUDO_CALL && + insn->dst_reg == 0 && + insn->off == 0; +} - if (size == 0 || size % bpf_insn_sz) { - pr_warning("corrupted section '%s', size: %zu\n", - section_name, size); +static bool is_ldimm64(struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); +} + +static bool insn_is_pseudo_func(struct bpf_insn *insn) +{ + return is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; +} + +static int +bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, + const char *name, size_t sec_idx, const char *sec_name, + size_t sec_off, void *insn_data, size_t insn_data_sz) +{ + if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { + pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", + sec_name, name, sec_off, insn_data_sz); return -EINVAL; } memset(prog, 0, sizeof(*prog)); + prog->obj = obj; - prog->section_name = strdup(section_name); - if (!prog->section_name) { - pr_warning("failed to alloc name for prog under section(%d) %s\n", - idx, section_name); - goto errout; - } + prog->sec_idx = sec_idx; + prog->sec_insn_off = sec_off / BPF_INSN_SZ; + prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; + /* insns_cnt can later be increased by appending used subprograms */ + prog->insns_cnt = prog->sec_insn_cnt; - prog->pin_name = __bpf_program__pin_name(prog); - if (!prog->pin_name) { - pr_warning("failed to alloc pin name for prog under section(%d) %s\n", - idx, section_name); - goto errout; - } + prog->type = BPF_PROG_TYPE_UNSPEC; + prog->load = true; - prog->insns = malloc(size); - if (!prog->insns) { - pr_warning("failed to alloc insns for prog under section %s\n", - section_name); - goto errout; - } - prog->insns_cnt = size / bpf_insn_sz; - memcpy(prog->insns, data, size); - prog->idx = idx; prog->instances.fds = NULL; prog->instances.nr = -1; - prog->type = BPF_PROG_TYPE_UNSPEC; + + prog->sec_name = strdup(sec_name); + if (!prog->sec_name) + goto errout; + + prog->name = strdup(name); + if (!prog->name) + goto errout; + + prog->pin_name = __bpf_program__pin_name(prog); + if (!prog->pin_name) + goto errout; + + prog->insns = malloc(insn_data_sz); + if (!prog->insns) + goto errout; + memcpy(prog->insns, insn_data, insn_data_sz); return 0; errout: + pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); bpf_program__exit(prog); return -ENOMEM; } static int -bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, - char *section_name, int idx) +bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, + const char *sec_name, int sec_idx) { - struct bpf_program prog, *progs; + struct bpf_program *prog, *progs; + void *data = sec_data->d_buf; + size_t sec_sz = sec_data->d_size, sec_off, prog_sz; int nr_progs, err; + const char *name; + GElf_Sym sym; - err = bpf_program__init(data, size, section_name, idx, &prog); - if (err) - return err; - - prog.caps = &obj->caps; progs = obj->programs; nr_progs = obj->nr_programs; + sec_off = 0; - progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0])); - if (!progs) { - /* - * In this case the original obj->programs - * is still valid, so don't need special treat for - * bpf_close_object(). - */ - pr_warning("failed to alloc a new program under section '%s'\n", - section_name); - bpf_program__exit(&prog); - return -ENOMEM; + while (sec_off < sec_sz) { + if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) { + pr_warn("sec '%s': failed to find program symbol at offset %zu\n", + sec_name, sec_off); + return -LIBBPF_ERRNO__FORMAT; + } + + prog_sz = sym.st_size; + + name = elf_sym_str(obj, sym.st_name); + if (!name) { + pr_warn("sec '%s': failed to get symbol name for offset %zu\n", + sec_name, sec_off); + return -LIBBPF_ERRNO__FORMAT; + } + + if (sec_off + prog_sz > sec_sz) { + pr_warn("sec '%s': program at offset %zu crosses section boundary\n", + sec_name, sec_off); + return -LIBBPF_ERRNO__FORMAT; + } + + pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", + sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); + + progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); + if (!progs) { + /* + * In this case the original obj->programs + * is still valid, so don't need special treat for + * bpf_close_object(). + */ + pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", + sec_name, name); + return -ENOMEM; + } + obj->programs = progs; + + prog = &progs[nr_progs]; + + err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, + sec_off, data + sec_off, prog_sz); + if (err) + return err; + + nr_progs++; + obj->nr_programs = nr_progs; + + sec_off += prog_sz; } - pr_debug("found program %s\n", prog.section_name); - obj->programs = progs; - obj->nr_programs = nr_progs + 1; - prog.obj = obj; - progs[nr_progs] = prog; return 0; } -static int -bpf_object__init_prog_names(struct bpf_object *obj) +static __u32 get_kernel_version(void) { - Elf_Data *symbols = obj->efile.symbols; - struct bpf_program *prog; - size_t pi, si; + __u32 major, minor, patch; + struct utsname info; - for (pi = 0; pi < obj->nr_programs; pi++) { - const char *name = NULL; + uname(&info); + if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) + return 0; + return KERNEL_VERSION(major, minor, patch); +} - prog = &obj->programs[pi]; +static const struct btf_member * +find_member_by_offset(const struct btf_type *t, __u32 bit_offset) +{ + struct btf_member *m; + int i; - for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; - si++) { - GElf_Sym sym; + for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { + if (btf_member_bit_offset(t, i) == bit_offset) + return m; + } - if (!gelf_getsym(symbols, si, &sym)) - continue; - if (sym.st_shndx != prog->idx) - continue; - if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL) - continue; + return NULL; +} - name = elf_strptr(obj->efile.elf, - obj->efile.strtabidx, - sym.st_name); - if (!name) { - pr_warning("failed to get sym name string for prog %s\n", - prog->section_name); - return -LIBBPF_ERRNO__LIBELF; - } +static const struct btf_member * +find_member_by_name(const struct btf *btf, const struct btf_type *t, + const char *name) +{ + struct btf_member *m; + int i; + + for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { + if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) + return m; + } + + return NULL; +} + +#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" +static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, + const char *name, __u32 kind); + +static int +find_struct_ops_kern_types(const struct btf *btf, const char *tname, + const struct btf_type **type, __u32 *type_id, + const struct btf_type **vtype, __u32 *vtype_id, + const struct btf_member **data_member) +{ + const struct btf_type *kern_type, *kern_vtype; + const struct btf_member *kern_data_member; + __s32 kern_vtype_id, kern_type_id; + __u32 i; + + kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); + if (kern_type_id < 0) { + pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", + tname); + return kern_type_id; + } + kern_type = btf__type_by_id(btf, kern_type_id); + + /* Find the corresponding "map_value" type that will be used + * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, + * find "struct bpf_struct_ops_tcp_congestion_ops" from the + * btf_vmlinux. + */ + kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, + tname, BTF_KIND_STRUCT); + if (kern_vtype_id < 0) { + pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", + STRUCT_OPS_VALUE_PREFIX, tname); + return kern_vtype_id; + } + kern_vtype = btf__type_by_id(btf, kern_vtype_id); + + /* Find "struct tcp_congestion_ops" from + * struct bpf_struct_ops_tcp_congestion_ops { + * [ ... ] + * struct tcp_congestion_ops data; + * } + */ + kern_data_member = btf_members(kern_vtype); + for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { + if (kern_data_member->type == kern_type_id) + break; + } + if (i == btf_vlen(kern_vtype)) { + pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", + tname, STRUCT_OPS_VALUE_PREFIX, tname); + return -EINVAL; + } + + *type = kern_type; + *type_id = kern_type_id; + *vtype = kern_vtype; + *vtype_id = kern_vtype_id; + *data_member = kern_data_member; + + return 0; +} + +static bool bpf_map__is_struct_ops(const struct bpf_map *map) +{ + return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; +} + +/* Init the map's fields that depend on kern_btf */ +static int bpf_map__init_kern_struct_ops(struct bpf_map *map, + const struct btf *btf, + const struct btf *kern_btf) +{ + const struct btf_member *member, *kern_member, *kern_data_member; + const struct btf_type *type, *kern_type, *kern_vtype; + __u32 i, kern_type_id, kern_vtype_id, kern_data_off; + struct bpf_struct_ops *st_ops; + void *data, *kern_data; + const char *tname; + int err; + + st_ops = map->st_ops; + type = st_ops->type; + tname = st_ops->tname; + err = find_struct_ops_kern_types(kern_btf, tname, + &kern_type, &kern_type_id, + &kern_vtype, &kern_vtype_id, + &kern_data_member); + if (err) + return err; + + pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", + map->name, st_ops->type_id, kern_type_id, kern_vtype_id); + + map->def.value_size = kern_vtype->size; + map->btf_vmlinux_value_type_id = kern_vtype_id; + + st_ops->kern_vdata = calloc(1, kern_vtype->size); + if (!st_ops->kern_vdata) + return -ENOMEM; + + data = st_ops->data; + kern_data_off = kern_data_member->offset / 8; + kern_data = st_ops->kern_vdata + kern_data_off; + + member = btf_members(type); + for (i = 0; i < btf_vlen(type); i++, member++) { + const struct btf_type *mtype, *kern_mtype; + __u32 mtype_id, kern_mtype_id; + void *mdata, *kern_mdata; + __s64 msize, kern_msize; + __u32 moff, kern_moff; + __u32 kern_member_idx; + const char *mname; + + mname = btf__name_by_offset(btf, member->name_off); + kern_member = find_member_by_name(kern_btf, kern_type, mname); + if (!kern_member) { + pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", + map->name, mname); + return -ENOTSUP; } - if (!name && prog->idx == obj->efile.text_shndx) - name = ".text"; + kern_member_idx = kern_member - btf_members(kern_type); + if (btf_member_bitfield_size(type, i) || + btf_member_bitfield_size(kern_type, kern_member_idx)) { + pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", + map->name, mname); + return -ENOTSUP; + } - if (!name) { - pr_warning("failed to find sym for prog %s\n", - prog->section_name); + moff = member->offset / 8; + kern_moff = kern_member->offset / 8; + + mdata = data + moff; + kern_mdata = kern_data + kern_moff; + + mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); + kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, + &kern_mtype_id); + if (BTF_INFO_KIND(mtype->info) != + BTF_INFO_KIND(kern_mtype->info)) { + pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", + map->name, mname, BTF_INFO_KIND(mtype->info), + BTF_INFO_KIND(kern_mtype->info)); + return -ENOTSUP; + } + + if (btf_is_ptr(mtype)) { + struct bpf_program *prog; + + mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id); + kern_mtype = skip_mods_and_typedefs(kern_btf, + kern_mtype->type, + &kern_mtype_id); + if (!btf_is_func_proto(mtype) || + !btf_is_func_proto(kern_mtype)) { + pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n", + map->name, mname); + return -ENOTSUP; + } + + prog = st_ops->progs[i]; + if (!prog) { + pr_debug("struct_ops init_kern %s: func ptr %s is not set\n", + map->name, mname); + continue; + } + + prog->attach_btf_id = kern_type_id; + prog->expected_attach_type = kern_member_idx; + + st_ops->kern_func_off[i] = kern_data_off + kern_moff; + + pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", + map->name, mname, prog->name, moff, + kern_moff); + + continue; + } + + msize = btf__resolve_size(btf, mtype_id); + kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); + if (msize < 0 || kern_msize < 0 || msize != kern_msize) { + pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", + map->name, mname, (ssize_t)msize, + (ssize_t)kern_msize); + return -ENOTSUP; + } + + pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", + map->name, mname, (unsigned int)msize, + moff, kern_moff); + memcpy(kern_mdata, mdata, msize); + } + + return 0; +} + +static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) +{ + struct bpf_map *map; + size_t i; + int err; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + + if (!bpf_map__is_struct_ops(map)) + continue; + + err = bpf_map__init_kern_struct_ops(map, obj->btf, + obj->btf_vmlinux); + if (err) + return err; + } + + return 0; +} + +static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) +{ + const struct btf_type *type, *datasec; + const struct btf_var_secinfo *vsi; + struct bpf_struct_ops *st_ops; + const char *tname, *var_name; + __s32 type_id, datasec_id; + const struct btf *btf; + struct bpf_map *map; + __u32 i; + + if (obj->efile.st_ops_shndx == -1) + return 0; + + btf = obj->btf; + datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC, + BTF_KIND_DATASEC); + if (datasec_id < 0) { + pr_warn("struct_ops init: DATASEC %s not found\n", + STRUCT_OPS_SEC); + return -EINVAL; + } + + datasec = btf__type_by_id(btf, datasec_id); + vsi = btf_var_secinfos(datasec); + for (i = 0; i < btf_vlen(datasec); i++, vsi++) { + type = btf__type_by_id(obj->btf, vsi->type); + var_name = btf__name_by_offset(obj->btf, type->name_off); + + type_id = btf__resolve_type(obj->btf, vsi->type); + if (type_id < 0) { + pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", + vsi->type, STRUCT_OPS_SEC); return -EINVAL; } - prog->name = strdup(name); - if (!prog->name) { - pr_warning("failed to allocate memory for prog sym %s\n", - name); - return -ENOMEM; + type = btf__type_by_id(obj->btf, type_id); + tname = btf__name_by_offset(obj->btf, type->name_off); + if (!tname[0]) { + pr_warn("struct_ops init: anonymous type is not supported\n"); + return -ENOTSUP; } + if (!btf_is_struct(type)) { + pr_warn("struct_ops init: %s is not a struct\n", tname); + return -EINVAL; + } + + map = bpf_object__add_map(obj); + if (IS_ERR(map)) + return PTR_ERR(map); + + map->sec_idx = obj->efile.st_ops_shndx; + map->sec_offset = vsi->offset; + map->name = strdup(var_name); + if (!map->name) + return -ENOMEM; + + map->def.type = BPF_MAP_TYPE_STRUCT_OPS; + map->def.key_size = sizeof(int); + map->def.value_size = type->size; + map->def.max_entries = 1; + + map->st_ops = calloc(1, sizeof(*map->st_ops)); + if (!map->st_ops) + return -ENOMEM; + st_ops = map->st_ops; + st_ops->data = malloc(type->size); + st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); + st_ops->kern_func_off = malloc(btf_vlen(type) * + sizeof(*st_ops->kern_func_off)); + if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) + return -ENOMEM; + + if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { + pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", + var_name, STRUCT_OPS_SEC); + return -EINVAL; + } + + memcpy(st_ops->data, + obj->efile.st_ops_data->d_buf + vsi->offset, + type->size); + st_ops->tname = tname; + st_ops->type = type; + st_ops->type_id = type_id; + + pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", + tname, type_id, var_name, vsi->offset); } return 0; } static struct bpf_object *bpf_object__new(const char *path, - void *obj_buf, - size_t obj_buf_sz) + const void *obj_buf, + size_t obj_buf_sz, + const char *obj_name) { struct bpf_object *obj; char *end; obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); if (!obj) { - pr_warning("alloc memory failed for %s\n", path); + pr_warn("alloc memory failed for %s\n", path); return ERR_PTR(-ENOMEM); } strcpy(obj->path, path); - /* Using basename() GNU version which doesn't modify arg. */ - strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1); - end = strchr(obj->name, '.'); - if (end) - *end = 0; + if (obj_name) { + strncpy(obj->name, obj_name, sizeof(obj->name) - 1); + obj->name[sizeof(obj->name) - 1] = 0; + } else { + /* Using basename() GNU version which doesn't modify arg. */ + strncpy(obj->name, basename((void *)path), + sizeof(obj->name) - 1); + end = strchr(obj->name, '.'); + if (end) + *end = 0; + } obj->efile.fd = -1; /* @@ -525,7 +1079,11 @@ static struct bpf_object *bpf_object__new(const char *path, obj->efile.data_shndx = -1; obj->efile.rodata_shndx = -1; obj->efile.bss_shndx = -1; + obj->efile.st_ops_shndx = -1; + obj->kconfig_map_idx = -1; + obj->rodata_map_idx = -1; + obj->kern_version = get_kernel_version(); obj->loaded = false; INIT_LIST_HEAD(&obj->list); @@ -546,21 +1104,27 @@ static void bpf_object__elf_finish(struct bpf_object *obj) obj->efile.data = NULL; obj->efile.rodata = NULL; obj->efile.bss = NULL; + obj->efile.st_ops_data = NULL; - zfree(&obj->efile.reloc); - obj->efile.nr_reloc = 0; + zfree(&obj->efile.reloc_sects); + obj->efile.nr_reloc_sects = 0; zclose(obj->efile.fd); obj->efile.obj_buf = NULL; obj->efile.obj_buf_sz = 0; } +/* if libelf is old and doesn't support mmap(), fall back to read() */ +#ifndef ELF_C_READ_MMAP +#define ELF_C_READ_MMAP ELF_C_READ +#endif + static int bpf_object__elf_init(struct bpf_object *obj) { int err = 0; GElf_Ehdr *ep; if (obj_elf_valid(obj)) { - pr_warning("elf init: internal error\n"); + pr_warn("elf: init internal error\n"); return -LIBBPF_ERRNO__LIBELF; } @@ -569,7 +1133,7 @@ static int bpf_object__elf_init(struct bpf_object *obj) * obj_buf should have been validated by * bpf_object__open_buffer(). */ - obj->efile.elf = elf_memory(obj->efile.obj_buf, + obj->efile.elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); } else { obj->efile.fd = open(obj->path, O_RDONLY); @@ -578,31 +1142,44 @@ static int bpf_object__elf_init(struct bpf_object *obj) err = -errno; cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warning("failed to open %s: %s\n", obj->path, cp); + pr_warn("elf: failed to open %s: %s\n", obj->path, cp); return err; } - obj->efile.elf = elf_begin(obj->efile.fd, - LIBBPF_ELF_C_READ_MMAP, NULL); + obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); } if (!obj->efile.elf) { - pr_warning("failed to open %s as ELF file\n", obj->path); + pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__LIBELF; goto errout; } if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { - pr_warning("failed to get EHDR from %s\n", obj->path); + pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); err = -LIBBPF_ERRNO__FORMAT; goto errout; } ep = &obj->efile.ehdr; + if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { + pr_warn("elf: failed to get section names section index for %s: %s\n", + obj->path, elf_errmsg(-1)); + err = -LIBBPF_ERRNO__FORMAT; + goto errout; + } + + /* Elf is corrupted/truncated, avoid calling elf_strptr. */ + if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { + pr_warn("elf: failed to get section names strings from %s: %s\n", + obj->path, elf_errmsg(-1)); + return -LIBBPF_ERRNO__FORMAT; + } + /* Old LLVM set e_machine to EM_NONE */ if (ep->e_type != ET_REL || (ep->e_machine && ep->e_machine != EM_BPF)) { - pr_warning("%s is not an eBPF object file\n", obj->path); + pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); err = -LIBBPF_ERRNO__FORMAT; goto errout; } @@ -624,7 +1201,7 @@ static int bpf_object__check_endianness(struct bpf_object *obj) #else # error "Unrecognized __BYTE_ORDER__" #endif - pr_warning("endianness mismatch.\n"); + pr_warn("elf: endianness mismatch in %s.\n", obj->path); return -LIBBPF_ERRNO__ENDIAN; } @@ -642,7 +1219,7 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) __u32 kver; if (size != sizeof(kver)) { - pr_warning("invalid kver section in %s\n", obj->path); + pr_warn("invalid kver section in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } memcpy(&kver, data, sizeof(kver)); @@ -651,16 +1228,6 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) return 0; } -static int compare_bpf_map(const void *_a, const void *_b) -{ - const struct bpf_map *a = _a; - const struct bpf_map *b = _b; - - if (a->sec_idx != b->sec_idx) - return a->sec_idx - b->sec_idx; - return a->sec_offset - b->sec_offset; -} - static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) { if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || @@ -669,72 +1236,34 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) return false; } -static int bpf_object_search_section_size(const struct bpf_object *obj, - const char *name, size_t *d_size) -{ - const GElf_Ehdr *ep = &obj->efile.ehdr; - Elf *elf = obj->efile.elf; - Elf_Scn *scn = NULL; - int idx = 0; - - while ((scn = elf_nextscn(elf, scn)) != NULL) { - const char *sec_name; - Elf_Data *data; - GElf_Shdr sh; - - idx++; - if (gelf_getshdr(scn, &sh) != &sh) { - pr_warning("failed to get section(%d) header from %s\n", - idx, obj->path); - return -EIO; - } - - sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); - if (!sec_name) { - pr_warning("failed to get section(%d) name from %s\n", - idx, obj->path); - return -EIO; - } - - if (strcmp(name, sec_name)) - continue; - - data = elf_getdata(scn, 0); - if (!data) { - pr_warning("failed to get section(%d) data from %s(%s)\n", - idx, name, obj->path); - return -EIO; - } - - *d_size = data->d_size; - return 0; - } - - return -ENOENT; -} - int bpf_object__section_size(const struct bpf_object *obj, const char *name, __u32 *size) { int ret = -ENOENT; - size_t d_size; *size = 0; if (!name) { return -EINVAL; - } else if (!strcmp(name, ".data")) { + } else if (!strcmp(name, DATA_SEC)) { if (obj->efile.data) *size = obj->efile.data->d_size; - } else if (!strcmp(name, ".bss")) { + } else if (!strcmp(name, BSS_SEC)) { if (obj->efile.bss) *size = obj->efile.bss->d_size; - } else if (!strcmp(name, ".rodata")) { + } else if (!strcmp(name, RODATA_SEC)) { if (obj->efile.rodata) *size = obj->efile.rodata->d_size; + } else if (!strcmp(name, STRUCT_OPS_SEC)) { + if (obj->efile.st_ops_data) + *size = obj->efile.st_ops_data->d_size; } else { - ret = bpf_object_search_section_size(obj, name, &d_size); - if (!ret) - *size = d_size; + Elf_Scn *scn = elf_sec_by_name(obj, name); + Elf_Data *data = elf_sec_data(obj, scn); + + if (data) { + ret = 0; /* found it */ + *size = data->d_size; + } } return *size ? 0 : ret; @@ -759,11 +1288,10 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, GELF_ST_TYPE(sym.st_info) != STT_OBJECT) continue; - sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name); + sname = elf_sym_str(obj, sym.st_name); if (!sname) { - pr_warning("failed to get sym name string for var %s\n", - name); + pr_warn("failed to get sym name string for var %s\n", + name); return -EIO; } if (strcmp(name, sname) == 0) { @@ -785,9 +1313,9 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) return &obj->maps[obj->nr_maps++]; new_cap = max((size_t)4, obj->maps_cap * 3 / 2); - new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps)); + new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); if (!new_maps) { - pr_warning("alloc maps for object failed\n"); + pr_warn("alloc maps for object failed\n"); return ERR_PTR(-ENOMEM); } @@ -809,13 +1337,43 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) return &obj->maps[obj->nr_maps++]; } +static size_t bpf_map_mmap_sz(const struct bpf_map *map) +{ + long page_sz = sysconf(_SC_PAGE_SIZE); + size_t map_sz; + + map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; + map_sz = roundup(map_sz, page_sz); + return map_sz; +} + +static char *internal_map_name(struct bpf_object *obj, + enum libbpf_map_type type) +{ + char map_name[BPF_OBJ_NAME_LEN], *p; + const char *sfx = libbpf_type_to_btf_name[type]; + int sfx_len = max((size_t)7, strlen(sfx)); + int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, + strlen(obj->name)); + + snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, + sfx_len, libbpf_type_to_btf_name[type]); + + /* sanitise map name to characters allowed by kernel */ + for (p = map_name; *p && p < map_name + sizeof(map_name); p++) + if (!isalnum(*p) && *p != '_' && *p != '.') + *p = '_'; + + return strdup(map_name); +} + static int bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, - int sec_idx, Elf_Data *data, void **data_buff) + int sec_idx, void *data, size_t data_sz) { - char map_name[BPF_OBJ_NAME_LEN]; struct bpf_map_def *def; struct bpf_map *map; + int err; map = bpf_object__add_map(obj); if (IS_ERR(map)) @@ -824,32 +1382,38 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, map->libbpf_type = type; map->sec_idx = sec_idx; map->sec_offset = 0; - snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name, - libbpf_type_to_btf_name[type]); - map->name = strdup(map_name); + map->name = internal_map_name(obj, type); if (!map->name) { - pr_warning("failed to alloc map name\n"); + pr_warn("failed to alloc map name\n"); return -ENOMEM; } - pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n", - map_name, map->sec_idx, map->sec_offset); def = &map->def; def->type = BPF_MAP_TYPE_ARRAY; def->key_size = sizeof(int); - def->value_size = data->d_size; + def->value_size = data_sz; def->max_entries = 1; - def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0; - if (data_buff) { - *data_buff = malloc(data->d_size); - if (!*data_buff) { - zfree(&map->name); - pr_warning("failed to alloc map content buffer\n"); - return -ENOMEM; - } - memcpy(*data_buff, data->d_buf, data->d_size); + def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG + ? BPF_F_RDONLY_PROG : 0; + def->map_flags |= BPF_F_MMAPABLE; + + pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", + map->name, map->sec_idx, map->sec_offset, def->map_flags); + + map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (map->mmaped == MAP_FAILED) { + err = -errno; + map->mmaped = NULL; + pr_warn("failed to alloc map '%s' content buffer: %d\n", + map->name, err); + zfree(&map->name); + return err; } + if (data) + memcpy(map->mmaped, data, data_sz); + pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); return 0; } @@ -858,37 +1422,338 @@ static int bpf_object__init_global_data_maps(struct bpf_object *obj) { int err; - if (!obj->caps.global_data) - return 0; /* * Populate obj->maps with libbpf internal maps. */ if (obj->efile.data_shndx >= 0) { err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, obj->efile.data_shndx, - obj->efile.data, - &obj->sections.data); + obj->efile.data->d_buf, + obj->efile.data->d_size); if (err) return err; } if (obj->efile.rodata_shndx >= 0) { err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, obj->efile.rodata_shndx, - obj->efile.rodata, - &obj->sections.rodata); + obj->efile.rodata->d_buf, + obj->efile.rodata->d_size); if (err) return err; + + obj->rodata_map_idx = obj->nr_maps - 1; } if (obj->efile.bss_shndx >= 0) { err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, obj->efile.bss_shndx, - obj->efile.bss, NULL); + NULL, + obj->efile.bss->d_size); if (err) return err; } return 0; } + +static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, + const void *name) +{ + int i; + + for (i = 0; i < obj->nr_extern; i++) { + if (strcmp(obj->externs[i].name, name) == 0) + return &obj->externs[i]; + } + return NULL; +} + +static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, + char value) +{ + switch (ext->kcfg.type) { + case KCFG_BOOL: + if (value == 'm') { + pr_warn("extern (kcfg) %s=%c should be tristate or char\n", + ext->name, value); + return -EINVAL; + } + *(bool *)ext_val = value == 'y' ? true : false; + break; + case KCFG_TRISTATE: + if (value == 'y') + *(enum libbpf_tristate *)ext_val = TRI_YES; + else if (value == 'm') + *(enum libbpf_tristate *)ext_val = TRI_MODULE; + else /* value == 'n' */ + *(enum libbpf_tristate *)ext_val = TRI_NO; + break; + case KCFG_CHAR: + *(char *)ext_val = value; + break; + case KCFG_UNKNOWN: + case KCFG_INT: + case KCFG_CHAR_ARR: + default: + pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n", + ext->name, value); + return -EINVAL; + } + ext->is_set = true; + return 0; +} + +static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, + const char *value) +{ + size_t len; + + if (ext->kcfg.type != KCFG_CHAR_ARR) { + pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); + return -EINVAL; + } + + len = strlen(value); + if (value[len - 1] != '"') { + pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", + ext->name, value); + return -EINVAL; + } + + /* strip quotes */ + len -= 2; + if (len >= ext->kcfg.sz) { + pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", + ext->name, value, len, ext->kcfg.sz - 1); + len = ext->kcfg.sz - 1; + } + memcpy(ext_val, value + 1, len); + ext_val[len] = '\0'; + ext->is_set = true; + return 0; +} + +static int parse_u64(const char *value, __u64 *res) +{ + char *value_end; + int err; + + errno = 0; + *res = strtoull(value, &value_end, 0); + if (errno) { + err = -errno; + pr_warn("failed to parse '%s' as integer: %d\n", value, err); + return err; + } + if (*value_end) { + pr_warn("failed to parse '%s' as integer completely\n", value); + return -EINVAL; + } + return 0; +} + +static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) +{ + int bit_sz = ext->kcfg.sz * 8; + + if (ext->kcfg.sz == 8) + return true; + + /* Validate that value stored in u64 fits in integer of `ext->sz` + * bytes size without any loss of information. If the target integer + * is signed, we rely on the following limits of integer type of + * Y bits and subsequent transformation: + * + * -2^(Y-1) <= X <= 2^(Y-1) - 1 + * 0 <= X + 2^(Y-1) <= 2^Y - 1 + * 0 <= X + 2^(Y-1) < 2^Y + * + * For unsigned target integer, check that all the (64 - Y) bits are + * zero. + */ + if (ext->kcfg.is_signed) + return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); + else + return (v >> bit_sz) == 0; +} + +static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, + __u64 value) +{ + if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { + pr_warn("extern (kcfg) %s=%llu should be integer\n", + ext->name, (unsigned long long)value); + return -EINVAL; + } + if (!is_kcfg_value_in_range(ext, value)) { + pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n", + ext->name, (unsigned long long)value, ext->kcfg.sz); + return -ERANGE; + } + switch (ext->kcfg.sz) { + case 1: *(__u8 *)ext_val = value; break; + case 2: *(__u16 *)ext_val = value; break; + case 4: *(__u32 *)ext_val = value; break; + case 8: *(__u64 *)ext_val = value; break; + default: + return -EINVAL; + } + ext->is_set = true; + return 0; +} + +static int bpf_object__process_kconfig_line(struct bpf_object *obj, + char *buf, void *data) +{ + struct extern_desc *ext; + char *sep, *value; + int len, err = 0; + void *ext_val; + __u64 num; + + if (strncmp(buf, "CONFIG_", 7)) + return 0; + + sep = strchr(buf, '='); + if (!sep) { + pr_warn("failed to parse '%s': no separator\n", buf); + return -EINVAL; + } + + /* Trim ending '\n' */ + len = strlen(buf); + if (buf[len - 1] == '\n') + buf[len - 1] = '\0'; + /* Split on '=' and ensure that a value is present. */ + *sep = '\0'; + if (!sep[1]) { + *sep = '='; + pr_warn("failed to parse '%s': no value\n", buf); + return -EINVAL; + } + + ext = find_extern_by_name(obj, buf); + if (!ext || ext->is_set) + return 0; + + ext_val = data + ext->kcfg.data_off; + value = sep + 1; + + switch (*value) { + case 'y': case 'n': case 'm': + err = set_kcfg_value_tri(ext, ext_val, *value); + break; + case '"': + err = set_kcfg_value_str(ext, ext_val, value); + break; + default: + /* assume integer */ + err = parse_u64(value, &num); + if (err) { + pr_warn("extern (kcfg) %s=%s should be integer\n", + ext->name, value); + return err; + } + err = set_kcfg_value_num(ext, ext_val, num); + break; + } + if (err) + return err; + pr_debug("extern (kcfg) %s=%s\n", ext->name, value); + return 0; +} + +static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) +{ + char buf[PATH_MAX]; + struct utsname uts; + int len, err = 0; + gzFile file; + + uname(&uts); + len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + + /* gzopen also accepts uncompressed files. */ + file = gzopen(buf, "r"); + if (!file) + file = gzopen("/proc/config.gz", "r"); + + if (!file) { + pr_warn("failed to open system Kconfig\n"); + return -ENOENT; + } + + while (gzgets(file, buf, sizeof(buf))) { + err = bpf_object__process_kconfig_line(obj, buf, data); + if (err) { + pr_warn("error parsing system Kconfig line '%s': %d\n", + buf, err); + goto out; + } + } + +out: + gzclose(file); + return err; +} + +static int bpf_object__read_kconfig_mem(struct bpf_object *obj, + const char *config, void *data) +{ + char buf[PATH_MAX]; + int err = 0; + FILE *file; + + file = fmemopen((void *)config, strlen(config), "r"); + if (!file) { + err = -errno; + pr_warn("failed to open in-memory Kconfig: %d\n", err); + return err; + } + + while (fgets(buf, sizeof(buf), file)) { + err = bpf_object__process_kconfig_line(obj, buf, data); + if (err) { + pr_warn("error parsing in-memory Kconfig line '%s': %d\n", + buf, err); + break; + } + } + + fclose(file); + return err; +} + +static int bpf_object__init_kconfig_map(struct bpf_object *obj) +{ + struct extern_desc *last_ext = NULL, *ext; + size_t map_sz; + int i, err; + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type == EXT_KCFG) + last_ext = ext; + } + + if (!last_ext) + return 0; + + map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, + obj->efile.symbols_shndx, + NULL, map_sz); + if (err) + return err; + + obj->kconfig_map_idx = obj->nr_maps - 1; + + return 0; +} + static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) { Elf_Data *symbols = obj->efile.symbols; @@ -902,12 +1767,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) if (!symbols) return -EINVAL; - scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx); - if (scn) - data = elf_getdata(scn, NULL); + + scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); + data = elf_sec_data(obj, scn); if (!scn || !data) { - pr_warning("failed to get Elf_Data from map section %d\n", - obj->efile.maps_shndx); + pr_warn("elf: failed to get legacy map definitions for %s\n", + obj->path); return -EINVAL; } @@ -929,16 +1794,15 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) nr_maps++; } /* Assume equally sized map definitions */ - pr_debug("maps in %s: %d maps in %zd bytes\n", - obj->path, nr_maps, data->d_size); + pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n", + nr_maps, data->d_size, obj->path); - map_def_sz = data->d_size / nr_maps; - if (!data->d_size || (data->d_size % nr_maps) != 0) { - pr_warning("unable to determine map definition size " - "section %s, %d maps in %zd bytes\n", - obj->path, nr_maps, data->d_size); + if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { + pr_warn("elf: unable to determine legacy map definition size in %s\n", + obj->path); return -EINVAL; } + map_def_sz = data->d_size / nr_maps; /* Fill obj->maps using data in "maps" section. */ for (i = 0; i < nr_syms; i++) { @@ -956,11 +1820,10 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) if (IS_ERR(map)) return PTR_ERR(map); - map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name); + map_name = elf_sym_str(obj, sym.st_name); if (!map_name) { - pr_warning("failed to get map #%d name sym string for obj %s\n", - i, obj->path); + pr_warn("failed to get map #%d name sym string for obj %s\n", + i, obj->path); return -LIBBPF_ERRNO__FORMAT; } @@ -970,14 +1833,14 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", map_name, map->sec_idx, map->sec_offset); if (sym.st_value + map_def_sz > data->d_size) { - pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", - obj->path, map_name); + pr_warn("corrupted maps section in %s: last map \"%s\" too small\n", + obj->path, map_name); return -EINVAL; } map->name = strdup(map_name); if (!map->name) { - pr_warning("failed to alloc map name\n"); + pr_warn("failed to alloc map name\n"); return -ENOMEM; } pr_debug("map %d is \"%s\"\n", i, map->name); @@ -998,13 +1861,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) * incompatible. */ char *b; + for (b = ((char *)def) + sizeof(struct bpf_map_def); b < ((char *)def) + map_def_sz; b++) { if (*b != 0) { - pr_warning("maps section in %s: \"%s\" " - "has unrecognized, non-zero " - "options\n", - obj->path, map_name); + pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", + obj->path, map_name); if (strict) return -EINVAL; } @@ -1032,6 +1894,43 @@ skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) return t; } +static const struct btf_type * +resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) +{ + const struct btf_type *t; + + t = skip_mods_and_typedefs(btf, id, NULL); + if (!btf_is_ptr(t)) + return NULL; + + t = skip_mods_and_typedefs(btf, t->type, res_id); + + return btf_is_func_proto(t) ? t : NULL; +} + +static const char *btf_kind_str(const struct btf_type *t) +{ + switch (btf_kind(t)) { + case BTF_KIND_UNKN: return "void"; + case BTF_KIND_INT: return "int"; + case BTF_KIND_PTR: return "ptr"; + case BTF_KIND_ARRAY: return "array"; + case BTF_KIND_STRUCT: return "struct"; + case BTF_KIND_UNION: return "union"; + case BTF_KIND_ENUM: return "enum"; + case BTF_KIND_FWD: return "fwd"; + case BTF_KIND_TYPEDEF: return "typedef"; + case BTF_KIND_VOLATILE: return "volatile"; + case BTF_KIND_CONST: return "const"; + case BTF_KIND_RESTRICT: return "restrict"; + case BTF_KIND_FUNC: return "func"; + case BTF_KIND_FUNC_PROTO: return "func_proto"; + case BTF_KIND_VAR: return "var"; + case BTF_KIND_DATASEC: return "datasec"; + default: return "unknown"; + } +} + /* * Fetch integer attribute of BTF map definition. Such attributes are * represented using a pointer to an array, in which dimensionality of array @@ -1040,28 +1939,28 @@ skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) * type definition, while using only sizeof(void *) space in ELF data section. */ static bool get_map_field_int(const char *map_name, const struct btf *btf, - const struct btf_type *def, - const struct btf_member *m, __u32 *res) { + const struct btf_member *m, __u32 *res) +{ const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); const char *name = btf__name_by_offset(btf, m->name_off); const struct btf_array *arr_info; const struct btf_type *arr_t; if (!btf_is_ptr(t)) { - pr_warning("map '%s': attr '%s': expected PTR, got %u.\n", - map_name, name, btf_kind(t)); + pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", + map_name, name, btf_kind_str(t)); return false; } arr_t = btf__type_by_id(btf, t->type); if (!arr_t) { - pr_warning("map '%s': attr '%s': type [%u] not found.\n", - map_name, name, t->type); + pr_warn("map '%s': attr '%s': type [%u] not found.\n", + map_name, name, t->type); return false; } if (!btf_is_array(arr_t)) { - pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n", - map_name, name, btf_kind(arr_t)); + pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", + map_name, name, btf_kind_str(arr_t)); return false; } arr_info = btf_array(arr_t); @@ -1069,110 +1968,75 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf, return true; } -static int bpf_object__init_user_btf_map(struct bpf_object *obj, - const struct btf_type *sec, - int var_idx, int sec_idx, - const Elf_Data *data, bool strict) +static int build_map_pin_path(struct bpf_map *map, const char *path) { - const struct btf_type *var, *def, *t; - const struct btf_var_secinfo *vi; - const struct btf_var *var_extra; + char buf[PATH_MAX]; + int len; + + if (!path) + path = "/sys/fs/bpf"; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + + return bpf_map__set_pin_path(map, buf); +} + + +static int parse_btf_map_def(struct bpf_object *obj, + struct bpf_map *map, + const struct btf_type *def, + bool strict, bool is_inner, + const char *pin_root_path) +{ + const struct btf_type *t; const struct btf_member *m; - const char *map_name; - struct bpf_map *map; int vlen, i; - vi = btf_var_secinfos(sec) + var_idx; - var = btf__type_by_id(obj->btf, vi->type); - var_extra = btf_var(var); - map_name = btf__name_by_offset(obj->btf, var->name_off); - vlen = btf_vlen(var); - - if (map_name == NULL || map_name[0] == '\0') { - pr_warning("map #%d: empty name.\n", var_idx); - return -EINVAL; - } - if ((__u64)vi->offset + vi->size > data->d_size) { - pr_warning("map '%s' BTF data is corrupted.\n", map_name); - return -EINVAL; - } - if (!btf_is_var(var)) { - pr_warning("map '%s': unexpected var kind %u.\n", - map_name, btf_kind(var)); - return -EINVAL; - } - if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && - var_extra->linkage != BTF_VAR_STATIC) { - pr_warning("map '%s': unsupported var linkage %u.\n", - map_name, var_extra->linkage); - return -EOPNOTSUPP; - } - - def = skip_mods_and_typedefs(obj->btf, var->type, NULL); - if (!btf_is_struct(def)) { - pr_warning("map '%s': unexpected def kind %u.\n", - map_name, btf_kind(var)); - return -EINVAL; - } - if (def->size > vi->size) { - pr_warning("map '%s': invalid def size.\n", map_name); - return -EINVAL; - } - - map = bpf_object__add_map(obj); - if (IS_ERR(map)) - return PTR_ERR(map); - map->name = strdup(map_name); - if (!map->name) { - pr_warning("map '%s': failed to alloc map name.\n", map_name); - return -ENOMEM; - } - map->libbpf_type = LIBBPF_MAP_UNSPEC; - map->def.type = BPF_MAP_TYPE_UNSPEC; - map->sec_idx = sec_idx; - map->sec_offset = vi->offset; - pr_debug("map '%s': at sec_idx %d, offset %zu.\n", - map_name, map->sec_idx, map->sec_offset); - vlen = btf_vlen(def); m = btf_members(def); for (i = 0; i < vlen; i++, m++) { const char *name = btf__name_by_offset(obj->btf, m->name_off); if (!name) { - pr_warning("map '%s': invalid field #%d.\n", - map_name, i); + pr_warn("map '%s': invalid field #%d.\n", map->name, i); return -EINVAL; } if (strcmp(name, "type") == 0) { - if (!get_map_field_int(map_name, obj->btf, def, m, + if (!get_map_field_int(map->name, obj->btf, m, &map->def.type)) return -EINVAL; pr_debug("map '%s': found type = %u.\n", - map_name, map->def.type); + map->name, map->def.type); } else if (strcmp(name, "max_entries") == 0) { - if (!get_map_field_int(map_name, obj->btf, def, m, + if (!get_map_field_int(map->name, obj->btf, m, &map->def.max_entries)) return -EINVAL; pr_debug("map '%s': found max_entries = %u.\n", - map_name, map->def.max_entries); + map->name, map->def.max_entries); } else if (strcmp(name, "map_flags") == 0) { - if (!get_map_field_int(map_name, obj->btf, def, m, + if (!get_map_field_int(map->name, obj->btf, m, &map->def.map_flags)) return -EINVAL; pr_debug("map '%s': found map_flags = %u.\n", - map_name, map->def.map_flags); + map->name, map->def.map_flags); + } else if (strcmp(name, "numa_node") == 0) { + if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) + return -EINVAL; + pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); } else if (strcmp(name, "key_size") == 0) { __u32 sz; - if (!get_map_field_int(map_name, obj->btf, def, m, - &sz)) + if (!get_map_field_int(map->name, obj->btf, m, &sz)) return -EINVAL; pr_debug("map '%s': found key_size = %u.\n", - map_name, sz); + map->name, sz); if (map->def.key_size && map->def.key_size != sz) { - pr_warning("map '%s': conflicting key size %u != %u.\n", - map_name, map->def.key_size, sz); + pr_warn("map '%s': conflicting key size %u != %u.\n", + map->name, map->def.key_size, sz); return -EINVAL; } map->def.key_size = sz; @@ -1181,26 +2045,26 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, t = btf__type_by_id(obj->btf, m->type); if (!t) { - pr_warning("map '%s': key type [%d] not found.\n", - map_name, m->type); + pr_warn("map '%s': key type [%d] not found.\n", + map->name, m->type); return -EINVAL; } if (!btf_is_ptr(t)) { - pr_warning("map '%s': key spec is not PTR: %u.\n", - map_name, btf_kind(t)); + pr_warn("map '%s': key spec is not PTR: %s.\n", + map->name, btf_kind_str(t)); return -EINVAL; } sz = btf__resolve_size(obj->btf, t->type); if (sz < 0) { - pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n", - map_name, t->type, sz); + pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", + map->name, t->type, (ssize_t)sz); return sz; } - pr_debug("map '%s': found key [%u], sz = %lld.\n", - map_name, t->type, sz); + pr_debug("map '%s': found key [%u], sz = %zd.\n", + map->name, t->type, (ssize_t)sz); if (map->def.key_size && map->def.key_size != sz) { - pr_warning("map '%s': conflicting key size %u != %lld.\n", - map_name, map->def.key_size, sz); + pr_warn("map '%s': conflicting key size %u != %zd.\n", + map->name, map->def.key_size, (ssize_t)sz); return -EINVAL; } map->def.key_size = sz; @@ -1208,14 +2072,13 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, } else if (strcmp(name, "value_size") == 0) { __u32 sz; - if (!get_map_field_int(map_name, obj->btf, def, m, - &sz)) + if (!get_map_field_int(map->name, obj->btf, m, &sz)) return -EINVAL; pr_debug("map '%s': found value_size = %u.\n", - map_name, sz); + map->name, sz); if (map->def.value_size && map->def.value_size != sz) { - pr_warning("map '%s': conflicting value size %u != %u.\n", - map_name, map->def.value_size, sz); + pr_warn("map '%s': conflicting value size %u != %u.\n", + map->name, map->def.value_size, sz); return -EINVAL; } map->def.value_size = sz; @@ -1224,50 +2087,210 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, t = btf__type_by_id(obj->btf, m->type); if (!t) { - pr_warning("map '%s': value type [%d] not found.\n", - map_name, m->type); + pr_warn("map '%s': value type [%d] not found.\n", + map->name, m->type); return -EINVAL; } if (!btf_is_ptr(t)) { - pr_warning("map '%s': value spec is not PTR: %u.\n", - map_name, btf_kind(t)); + pr_warn("map '%s': value spec is not PTR: %s.\n", + map->name, btf_kind_str(t)); return -EINVAL; } sz = btf__resolve_size(obj->btf, t->type); if (sz < 0) { - pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n", - map_name, t->type, sz); + pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", + map->name, t->type, (ssize_t)sz); return sz; } - pr_debug("map '%s': found value [%u], sz = %lld.\n", - map_name, t->type, sz); + pr_debug("map '%s': found value [%u], sz = %zd.\n", + map->name, t->type, (ssize_t)sz); if (map->def.value_size && map->def.value_size != sz) { - pr_warning("map '%s': conflicting value size %u != %lld.\n", - map_name, map->def.value_size, sz); + pr_warn("map '%s': conflicting value size %u != %zd.\n", + map->name, map->def.value_size, (ssize_t)sz); return -EINVAL; } map->def.value_size = sz; map->btf_value_type_id = t->type; + } + else if (strcmp(name, "values") == 0) { + int err; + + if (is_inner) { + pr_warn("map '%s': multi-level inner maps not supported.\n", + map->name); + return -ENOTSUP; + } + if (i != vlen - 1) { + pr_warn("map '%s': '%s' member should be last.\n", + map->name, name); + return -EINVAL; + } + if (!bpf_map_type__is_map_in_map(map->def.type)) { + pr_warn("map '%s': should be map-in-map.\n", + map->name); + return -ENOTSUP; + } + if (map->def.value_size && map->def.value_size != 4) { + pr_warn("map '%s': conflicting value size %u != 4.\n", + map->name, map->def.value_size); + return -EINVAL; + } + map->def.value_size = 4; + t = btf__type_by_id(obj->btf, m->type); + if (!t) { + pr_warn("map '%s': map-in-map inner type [%d] not found.\n", + map->name, m->type); + return -EINVAL; + } + if (!btf_is_array(t) || btf_array(t)->nelems) { + pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", + map->name); + return -EINVAL; + } + t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type, + NULL); + if (!btf_is_ptr(t)) { + pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", + map->name, btf_kind_str(t)); + return -EINVAL; + } + t = skip_mods_and_typedefs(obj->btf, t->type, NULL); + if (!btf_is_struct(t)) { + pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", + map->name, btf_kind_str(t)); + return -EINVAL; + } + + map->inner_map = calloc(1, sizeof(*map->inner_map)); + if (!map->inner_map) + return -ENOMEM; + map->inner_map->sec_idx = obj->efile.btf_maps_shndx; + map->inner_map->name = malloc(strlen(map->name) + + sizeof(".inner") + 1); + if (!map->inner_map->name) + return -ENOMEM; + sprintf(map->inner_map->name, "%s.inner", map->name); + + err = parse_btf_map_def(obj, map->inner_map, t, strict, + true /* is_inner */, NULL); + if (err) + return err; + } else if (strcmp(name, "pinning") == 0) { + __u32 val; + int err; + + if (is_inner) { + pr_debug("map '%s': inner def can't be pinned.\n", + map->name); + return -EINVAL; + } + if (!get_map_field_int(map->name, obj->btf, m, &val)) + return -EINVAL; + pr_debug("map '%s': found pinning = %u.\n", + map->name, val); + + if (val != LIBBPF_PIN_NONE && + val != LIBBPF_PIN_BY_NAME) { + pr_warn("map '%s': invalid pinning value %u.\n", + map->name, val); + return -EINVAL; + } + if (val == LIBBPF_PIN_BY_NAME) { + err = build_map_pin_path(map, pin_root_path); + if (err) { + pr_warn("map '%s': couldn't build pin path.\n", + map->name); + return err; + } + } } else { if (strict) { - pr_warning("map '%s': unknown field '%s'.\n", - map_name, name); + pr_warn("map '%s': unknown field '%s'.\n", + map->name, name); return -ENOTSUP; } pr_debug("map '%s': ignoring unknown field '%s'.\n", - map_name, name); + map->name, name); } } if (map->def.type == BPF_MAP_TYPE_UNSPEC) { - pr_warning("map '%s': map type isn't specified.\n", map_name); + pr_warn("map '%s': map type isn't specified.\n", map->name); return -EINVAL; } return 0; } -static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) +static int bpf_object__init_user_btf_map(struct bpf_object *obj, + const struct btf_type *sec, + int var_idx, int sec_idx, + const Elf_Data *data, bool strict, + const char *pin_root_path) +{ + const struct btf_type *var, *def; + const struct btf_var_secinfo *vi; + const struct btf_var *var_extra; + const char *map_name; + struct bpf_map *map; + + vi = btf_var_secinfos(sec) + var_idx; + var = btf__type_by_id(obj->btf, vi->type); + var_extra = btf_var(var); + map_name = btf__name_by_offset(obj->btf, var->name_off); + + if (map_name == NULL || map_name[0] == '\0') { + pr_warn("map #%d: empty name.\n", var_idx); + return -EINVAL; + } + if ((__u64)vi->offset + vi->size > data->d_size) { + pr_warn("map '%s' BTF data is corrupted.\n", map_name); + return -EINVAL; + } + if (!btf_is_var(var)) { + pr_warn("map '%s': unexpected var kind %s.\n", + map_name, btf_kind_str(var)); + return -EINVAL; + } + if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && + var_extra->linkage != BTF_VAR_STATIC) { + pr_warn("map '%s': unsupported var linkage %u.\n", + map_name, var_extra->linkage); + return -EOPNOTSUPP; + } + + def = skip_mods_and_typedefs(obj->btf, var->type, NULL); + if (!btf_is_struct(def)) { + pr_warn("map '%s': unexpected def kind %s.\n", + map_name, btf_kind_str(var)); + return -EINVAL; + } + if (def->size > vi->size) { + pr_warn("map '%s': invalid def size.\n", map_name); + return -EINVAL; + } + + map = bpf_object__add_map(obj); + if (IS_ERR(map)) + return PTR_ERR(map); + map->name = strdup(map_name); + if (!map->name) { + pr_warn("map '%s': failed to alloc map name.\n", map_name); + return -ENOMEM; + } + map->libbpf_type = LIBBPF_MAP_UNSPEC; + map->def.type = BPF_MAP_TYPE_UNSPEC; + map->sec_idx = sec_idx; + map->sec_offset = vi->offset; + map->btf_var_idx = var_idx; + pr_debug("map '%s': at sec_idx %d, offset %zu.\n", + map_name, map->sec_idx, map->sec_offset); + + return parse_btf_map_def(obj, map, def, strict, false, pin_root_path); +} + +static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, + const char *pin_root_path) { const struct btf_type *sec = NULL; int nr_types, i, vlen, err; @@ -1279,12 +2302,11 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) if (obj->efile.btf_maps_shndx < 0) return 0; - scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx); - if (scn) - data = elf_getdata(scn, NULL); + scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); + data = elf_sec_data(obj, scn); if (!scn || !data) { - pr_warning("failed to get Elf_Data from map section %d (%s)\n", - obj->efile.maps_shndx, MAPS_ELF_SEC); + pr_warn("elf: failed to get %s map definitions for %s\n", + MAPS_ELF_SEC, obj->path); return -EINVAL; } @@ -1296,12 +2318,13 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) name = btf__name_by_offset(obj->btf, t->name_off); if (strcmp(name, MAPS_ELF_SEC) == 0) { sec = t; + obj->efile.btf_maps_sec_btf_id = i; break; } } if (!sec) { - pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC); + pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); return -ENOENT; } @@ -1309,7 +2332,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) for (i = 0; i < vlen; i++) { err = bpf_object__init_user_btf_map(obj, sec, i, obj->efile.btf_maps_shndx, - data, strict); + data, strict, + pin_root_path); if (err) return err; } @@ -1317,59 +2341,54 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) return 0; } -static int bpf_object__init_maps(struct bpf_object *obj, int flags) +static int bpf_object__init_maps(struct bpf_object *obj, + const struct bpf_object_open_opts *opts) { - bool strict = !(flags & MAPS_RELAX_COMPAT); + const char *pin_root_path; + bool strict; int err; + strict = !OPTS_GET(opts, relaxed_maps, false); + pin_root_path = OPTS_GET(opts, pin_root_path, NULL); + err = bpf_object__init_user_maps(obj, strict); + err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path); + err = err ?: bpf_object__init_global_data_maps(obj); + err = err ?: bpf_object__init_kconfig_map(obj); + err = err ?: bpf_object__init_struct_ops_maps(obj); if (err) return err; - err = bpf_object__init_user_btf_maps(obj, strict); - if (err) - return err; - - err = bpf_object__init_global_data_maps(obj); - if (err) - return err; - - if (obj->nr_maps) { - qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), - compare_bpf_map); - } return 0; } static bool section_have_execinstr(struct bpf_object *obj, int idx) { - Elf_Scn *scn; GElf_Shdr sh; - scn = elf_getscn(obj->efile.elf, idx); - if (!scn) + if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh)) return false; - if (gelf_getshdr(scn, &sh) != &sh) - return false; - - if (sh.sh_flags & SHF_EXECINSTR) - return true; - - return false; + return sh.sh_flags & SHF_EXECINSTR; } -static void bpf_object__sanitize_btf(struct bpf_object *obj) +static bool btf_needs_sanitization(struct bpf_object *obj) { - bool has_datasec = obj->caps.btf_datasec; - bool has_func = obj->caps.btf_func; - struct btf *btf = obj->btf; + bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); + bool has_func = kernel_supports(FEAT_BTF_FUNC); + + return !has_func || !has_datasec || !has_func_global; +} + +static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) +{ + bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(FEAT_BTF_DATASEC); + bool has_func = kernel_supports(FEAT_BTF_FUNC); struct btf_type *t; int i, j, vlen; - if (!obj->btf || (has_func && has_datasec)) - return; - for (i = 1; i <= btf__get_nr_types(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); @@ -1415,46 +2434,43 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj) } else if (!has_func && btf_is_func(t)) { /* replace FUNC with TYPEDEF */ t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); + } else if (!has_func_global && btf_is_func(t)) { + /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ + t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); } } } -static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) +static bool libbpf_needs_btf(const struct bpf_object *obj) { - if (!obj->btf_ext) - return; - - if (!obj->caps.btf_func) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } + return obj->efile.btf_maps_shndx >= 0 || + obj->efile.st_ops_shndx >= 0 || + obj->nr_extern > 0; } -static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj) +static bool kernel_needs_btf(const struct bpf_object *obj) { - return obj->efile.btf_maps_shndx >= 0; + return obj->efile.st_ops_shndx >= 0; } static int bpf_object__init_btf(struct bpf_object *obj, Elf_Data *btf_data, Elf_Data *btf_ext_data) { - bool btf_required = bpf_object__is_btf_mandatory(obj); - int err = 0; + int err = -ENOENT; if (btf_data) { obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); if (IS_ERR(obj->btf)) { - pr_warning("Error loading ELF section %s: %d.\n", - BTF_ELF_SEC, err); - goto out; - } - err = btf__finalize_data(obj, obj->btf); - if (err) { - pr_warning("Error finalizing %s: %d.\n", - BTF_ELF_SEC, err); + err = PTR_ERR(obj->btf); + obj->btf = NULL; + pr_warn("Error loading ELF section %s: %d.\n", + BTF_ELF_SEC, err); goto out; } + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); + err = 0; } if (btf_ext_data) { if (!obj->btf) { @@ -1465,112 +2481,398 @@ static int bpf_object__init_btf(struct bpf_object *obj, obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); if (IS_ERR(obj->btf_ext)) { - pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", - BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); + pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n", + BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); obj->btf_ext = NULL; goto out; } } out: - if (err || IS_ERR(obj->btf)) { - if (btf_required) - err = err ? : PTR_ERR(obj->btf); - else - err = 0; - if (!IS_ERR_OR_NULL(obj->btf)) - btf__free(obj->btf); - obj->btf = NULL; + if (err && libbpf_needs_btf(obj)) { + pr_warn("BTF is required, but is missing or corrupted.\n"); + return err; } - if (btf_required && !obj->btf) { - pr_warning("BTF is required, but is missing or corrupted.\n"); - return err == 0 ? -ENOENT : err; + return 0; +} + +static int bpf_object__finalize_btf(struct bpf_object *obj) +{ + int err; + + if (!obj->btf) + return 0; + + err = btf__finalize_data(obj, obj->btf); + if (err) { + pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); + return err; + } + + return 0; +} + +static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) +{ + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || + prog->type == BPF_PROG_TYPE_LSM) + return true; + + /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs + * also need vmlinux BTF + */ + if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) + return true; + + return false; +} + +static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) +{ + bool need_vmlinux_btf = false; + struct bpf_program *prog; + int i, err; + + /* CO-RE relocations need kernel BTF */ + if (obj->btf_ext && obj->btf_ext->core_relo_info.len) + need_vmlinux_btf = true; + + /* Support for typed ksyms needs kernel BTF */ + for (i = 0; i < obj->nr_extern; i++) { + const struct extern_desc *ext; + + ext = &obj->externs[i]; + if (ext->type == EXT_KSYM && ext->ksym.type_id) { + need_vmlinux_btf = true; + break; + } + } + + bpf_object__for_each_program(prog, obj) { + if (!prog->load) + continue; + if (libbpf_prog_needs_vmlinux_btf(prog)) { + need_vmlinux_btf = true; + break; + } + } + + if (!need_vmlinux_btf) + return 0; + + obj->btf_vmlinux = libbpf_find_kernel_btf(); + if (IS_ERR(obj->btf_vmlinux)) { + err = PTR_ERR(obj->btf_vmlinux); + pr_warn("Error loading vmlinux BTF: %d\n", err); + obj->btf_vmlinux = NULL; + return err; } return 0; } static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) { + struct btf *kern_btf = obj->btf; + bool btf_mandatory, sanitize; int err = 0; if (!obj->btf) return 0; - bpf_object__sanitize_btf(obj); - bpf_object__sanitize_btf_ext(obj); - - err = btf__load(obj->btf); - if (err) { - pr_warning("Error loading %s into kernel: %d.\n", - BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - /* btf_ext can't exist without btf, so free it as well */ - if (obj->btf_ext) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; + if (!kernel_supports(FEAT_BTF)) { + if (kernel_needs_btf(obj)) { + err = -EOPNOTSUPP; + goto report; } - - if (bpf_object__is_btf_mandatory(obj)) - return err; + pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); + return 0; } + + sanitize = btf_needs_sanitization(obj); + if (sanitize) { + const void *raw_data; + __u32 sz; + + /* clone BTF to sanitize a copy and leave the original intact */ + raw_data = btf__get_raw_data(obj->btf, &sz); + kern_btf = btf__new(raw_data, sz); + if (IS_ERR(kern_btf)) + return PTR_ERR(kern_btf); + + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); + bpf_object__sanitize_btf(obj, kern_btf); + } + + err = btf__load(kern_btf); + if (sanitize) { + if (!err) { + /* move fd to libbpf's BTF */ + btf__set_fd(obj->btf, btf__fd(kern_btf)); + btf__set_fd(kern_btf, -1); + } + btf__free(kern_btf); + } +report: + if (err) { + btf_mandatory = kernel_needs_btf(obj); + pr_warn("Error loading .BTF into kernel: %d. %s\n", err, + btf_mandatory ? "BTF is mandatory, can't proceed." + : "BTF is optional, ignoring."); + if (!btf_mandatory) + err = 0; + } + return err; +} + +static const char *elf_sym_str(const struct bpf_object *obj, size_t off) +{ + const char *name; + + name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); + if (!name) { + pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", + off, obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; +} + +static const char *elf_sec_str(const struct bpf_object *obj, size_t off) +{ + const char *name; + + name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); + if (!name) { + pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", + off, obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; +} + +static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) +{ + Elf_Scn *scn; + + scn = elf_getscn(obj->efile.elf, idx); + if (!scn) { + pr_warn("elf: failed to get section(%zu) from %s: %s\n", + idx, obj->path, elf_errmsg(-1)); + return NULL; + } + return scn; +} + +static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) +{ + Elf_Scn *scn = NULL; + Elf *elf = obj->efile.elf; + const char *sec_name; + + while ((scn = elf_nextscn(elf, scn)) != NULL) { + sec_name = elf_sec_name(obj, scn); + if (!sec_name) + return NULL; + + if (strcmp(sec_name, name) != 0) + continue; + + return scn; + } + return NULL; +} + +static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr) +{ + if (!scn) + return -EINVAL; + + if (gelf_getshdr(scn, hdr) != hdr) { + pr_warn("elf: failed to get section(%zu) header from %s: %s\n", + elf_ndxscn(scn), obj->path, elf_errmsg(-1)); + return -EINVAL; + } + return 0; } -static int bpf_object__elf_collect(struct bpf_object *obj, int flags) +static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) { - Elf *elf = obj->efile.elf; - GElf_Ehdr *ep = &obj->efile.ehdr; - Elf_Data *btf_ext_data = NULL; - Elf_Data *btf_data = NULL; - Elf_Scn *scn = NULL; - int idx = 0, err = 0; + const char *name; + GElf_Shdr sh; - /* Elf is corrupted/truncated, avoid calling elf_strptr. */ - if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { - pr_warning("failed to get e_shstrndx from %s\n", obj->path); - return -LIBBPF_ERRNO__FORMAT; + if (!scn) + return NULL; + + if (elf_sec_hdr(obj, scn, &sh)) + return NULL; + + name = elf_sec_str(obj, sh.sh_name); + if (!name) { + pr_warn("elf: failed to get section(%zu) name from %s: %s\n", + elf_ndxscn(scn), obj->path, elf_errmsg(-1)); + return NULL; } + return name; +} + +static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) +{ + Elf_Data *data; + + if (!scn) + return NULL; + + data = elf_getdata(scn, 0); + if (!data) { + pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", + elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", + obj->path, elf_errmsg(-1)); + return NULL; + } + + return data; +} + +static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx, + size_t off, __u32 sym_type, GElf_Sym *sym) +{ + Elf_Data *symbols = obj->efile.symbols; + size_t n = symbols->d_size / sizeof(GElf_Sym); + int i; + + for (i = 0; i < n; i++) { + if (!gelf_getsym(symbols, i, sym)) + continue; + if (sym->st_shndx != sec_idx || sym->st_value != off) + continue; + if (GELF_ST_TYPE(sym->st_info) != sym_type) + continue; + return 0; + } + + return -ENOENT; +} + +static bool is_sec_name_dwarf(const char *name) +{ + /* approximation, but the actual list is too long */ + return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; +} + +static bool ignore_elf_section(GElf_Shdr *hdr, const char *name) +{ + /* no special handling of .strtab */ + if (hdr->sh_type == SHT_STRTAB) + return true; + + /* ignore .llvm_addrsig section as well */ + if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */) + return true; + + /* no subprograms will lead to an empty .text section, ignore it */ + if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && + strcmp(name, ".text") == 0) + return true; + + /* DWARF sections */ + if (is_sec_name_dwarf(name)) + return true; + + if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { + name += sizeof(".rel") - 1; + /* DWARF section relocations */ + if (is_sec_name_dwarf(name)) + return true; + + /* .BTF and .BTF.ext don't need relocations */ + if (strcmp(name, BTF_ELF_SEC) == 0 || + strcmp(name, BTF_EXT_ELF_SEC) == 0) + return true; + } + + return false; +} + +static int cmp_progs(const void *_a, const void *_b) +{ + const struct bpf_program *a = _a; + const struct bpf_program *b = _b; + + if (a->sec_idx != b->sec_idx) + return a->sec_idx < b->sec_idx ? -1 : 1; + + /* sec_insn_off can't be the same within the section */ + return a->sec_insn_off < b->sec_insn_off ? -1 : 1; +} + +static int bpf_object__elf_collect(struct bpf_object *obj) +{ + Elf *elf = obj->efile.elf; + Elf_Data *btf_ext_data = NULL; + Elf_Data *btf_data = NULL; + int idx = 0, err = 0; + const char *name; + Elf_Data *data; + Elf_Scn *scn; + GElf_Shdr sh; + + /* a bunch of ELF parsing functionality depends on processing symbols, + * so do the first pass and find the symbol table + */ + scn = NULL; while ((scn = elf_nextscn(elf, scn)) != NULL) { - char *name; - GElf_Shdr sh; - Elf_Data *data; + if (elf_sec_hdr(obj, scn, &sh)) + return -LIBBPF_ERRNO__FORMAT; + if (sh.sh_type == SHT_SYMTAB) { + if (obj->efile.symbols) { + pr_warn("elf: multiple symbol tables in %s\n", obj->path); + return -LIBBPF_ERRNO__FORMAT; + } + + data = elf_sec_data(obj, scn); + if (!data) + return -LIBBPF_ERRNO__FORMAT; + + obj->efile.symbols = data; + obj->efile.symbols_shndx = elf_ndxscn(scn); + obj->efile.strtabidx = sh.sh_link; + } + } + + scn = NULL; + while ((scn = elf_nextscn(elf, scn)) != NULL) { idx++; - if (gelf_getshdr(scn, &sh) != &sh) { - pr_warning("failed to get section(%d) header from %s\n", - idx, obj->path); - return -LIBBPF_ERRNO__FORMAT; - } - name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); - if (!name) { - pr_warning("failed to get section(%d) name from %s\n", - idx, obj->path); + if (elf_sec_hdr(obj, scn, &sh)) return -LIBBPF_ERRNO__FORMAT; - } - data = elf_getdata(scn, 0); - if (!data) { - pr_warning("failed to get section(%d) data from %s(%s)\n", - idx, name, obj->path); + name = elf_sec_str(obj, sh.sh_name); + if (!name) return -LIBBPF_ERRNO__FORMAT; - } - pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", + + if (ignore_elf_section(&sh, name)) + continue; + + data = elf_sec_data(obj, scn); + if (!data) + return -LIBBPF_ERRNO__FORMAT; + + pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", idx, name, (unsigned long)data->d_size, (int)sh.sh_link, (unsigned long)sh.sh_flags, (int)sh.sh_type); if (strcmp(name, "license") == 0) { - err = bpf_object__init_license(obj, - data->d_buf, - data->d_size); + err = bpf_object__init_license(obj, data->d_buf, data->d_size); if (err) return err; } else if (strcmp(name, "version") == 0) { - err = bpf_object__init_kversion(obj, - data->d_buf, - data->d_size); + err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); if (err) return err; } else if (strcmp(name, "maps") == 0) { @@ -1582,95 +2884,403 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { btf_ext_data = data; } else if (sh.sh_type == SHT_SYMTAB) { - if (obj->efile.symbols) { - pr_warning("bpf: multiple SYMTAB in %s\n", - obj->path); - return -LIBBPF_ERRNO__FORMAT; - } - obj->efile.symbols = data; - obj->efile.strtabidx = sh.sh_link; + /* already processed during the first pass above */ } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { if (sh.sh_flags & SHF_EXECINSTR) { if (strcmp(name, ".text") == 0) obj->efile.text_shndx = idx; - err = bpf_object__add_program(obj, data->d_buf, - data->d_size, name, idx); - if (err) { - char errmsg[STRERR_BUFSIZE]; - char *cp = libbpf_strerror_r(-err, errmsg, - sizeof(errmsg)); - - pr_warning("failed to alloc program %s (%s): %s", - name, obj->path, cp); + err = bpf_object__add_programs(obj, data, name, idx); + if (err) return err; - } - } else if (strcmp(name, ".data") == 0) { + } else if (strcmp(name, DATA_SEC) == 0) { obj->efile.data = data; obj->efile.data_shndx = idx; - } else if (strcmp(name, ".rodata") == 0) { + } else if (strcmp(name, RODATA_SEC) == 0) { obj->efile.rodata = data; obj->efile.rodata_shndx = idx; + } else if (strcmp(name, STRUCT_OPS_SEC) == 0) { + obj->efile.st_ops_data = data; + obj->efile.st_ops_shndx = idx; } else { - pr_debug("skip section(%d) %s\n", idx, name); + pr_info("elf: skipping unrecognized data section(%d) %s\n", + idx, name); } } else if (sh.sh_type == SHT_REL) { - int nr_reloc = obj->efile.nr_reloc; - void *reloc = obj->efile.reloc; + int nr_sects = obj->efile.nr_reloc_sects; + void *sects = obj->efile.reloc_sects; int sec = sh.sh_info; /* points to other section */ /* Only do relo for section with exec instructions */ - if (!section_have_execinstr(obj, sec)) { - pr_debug("skip relo %s(%d) for section(%d)\n", - name, idx, sec); + if (!section_have_execinstr(obj, sec) && + strcmp(name, ".rel" STRUCT_OPS_SEC) && + strcmp(name, ".rel" MAPS_ELF_SEC)) { + pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", + idx, name, sec, + elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>"); continue; } - reloc = reallocarray(reloc, nr_reloc + 1, - sizeof(*obj->efile.reloc)); - if (!reloc) { - pr_warning("realloc failed\n"); + sects = libbpf_reallocarray(sects, nr_sects + 1, + sizeof(*obj->efile.reloc_sects)); + if (!sects) return -ENOMEM; - } - obj->efile.reloc = reloc; - obj->efile.nr_reloc++; + obj->efile.reloc_sects = sects; + obj->efile.nr_reloc_sects++; - obj->efile.reloc[nr_reloc].shdr = sh; - obj->efile.reloc[nr_reloc].data = data; - } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) { + obj->efile.reloc_sects[nr_sects].shdr = sh; + obj->efile.reloc_sects[nr_sects].data = data; + } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) { obj->efile.bss = data; obj->efile.bss_shndx = idx; } else { - pr_debug("skip section(%d) %s\n", idx, name); + pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, + (size_t)sh.sh_size); } } - if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { - pr_warning("Corrupted ELF file: index of strtab invalid\n"); + if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { + pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); return -LIBBPF_ERRNO__FORMAT; } - err = bpf_object__init_btf(obj, btf_data, btf_ext_data); - if (!err) - err = bpf_object__init_maps(obj, flags); - if (!err) - err = bpf_object__sanitize_and_load_btf(obj); - if (!err) - err = bpf_object__init_prog_names(obj); - return err; + + /* sort BPF programs by section name and in-section instruction offset + * for faster search */ + qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); + + return bpf_object__init_btf(obj, btf_data, btf_ext_data); } -static struct bpf_program * -bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) +static bool sym_is_extern(const GElf_Sym *sym) { - struct bpf_program *prog; - size_t i; + int bind = GELF_ST_BIND(sym->st_info); + /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ + return sym->st_shndx == SHN_UNDEF && + (bind == STB_GLOBAL || bind == STB_WEAK) && + GELF_ST_TYPE(sym->st_info) == STT_NOTYPE; +} - for (i = 0; i < obj->nr_programs; i++) { - prog = &obj->programs[i]; - if (prog->idx == idx) - return prog; +static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx) +{ + int bind = GELF_ST_BIND(sym->st_info); + int type = GELF_ST_TYPE(sym->st_info); + + /* in .text section */ + if (sym->st_shndx != text_shndx) + return false; + + /* local function */ + if (bind == STB_LOCAL && type == STT_SECTION) + return true; + + /* global function */ + return bind == STB_GLOBAL && type == STT_FUNC; +} + +static int find_extern_btf_id(const struct btf *btf, const char *ext_name) +{ + const struct btf_type *t; + const char *var_name; + int i, n; + + if (!btf) + return -ESRCH; + + n = btf__get_nr_types(btf); + for (i = 1; i <= n; i++) { + t = btf__type_by_id(btf, i); + + if (!btf_is_var(t)) + continue; + + var_name = btf__name_by_offset(btf, t->name_off); + if (strcmp(var_name, ext_name)) + continue; + + if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) + return -EINVAL; + + return i; } - return NULL; + + return -ENOENT; +} + +static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { + const struct btf_var_secinfo *vs; + const struct btf_type *t; + int i, j, n; + + if (!btf) + return -ESRCH; + + n = btf__get_nr_types(btf); + for (i = 1; i <= n; i++) { + t = btf__type_by_id(btf, i); + + if (!btf_is_datasec(t)) + continue; + + vs = btf_var_secinfos(t); + for (j = 0; j < btf_vlen(t); j++, vs++) { + if (vs->type == ext_btf_id) + return i; + } + } + + return -ENOENT; +} + +static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, + bool *is_signed) +{ + const struct btf_type *t; + const char *name; + + t = skip_mods_and_typedefs(btf, id, NULL); + name = btf__name_by_offset(btf, t->name_off); + + if (is_signed) + *is_signed = false; + switch (btf_kind(t)) { + case BTF_KIND_INT: { + int enc = btf_int_encoding(t); + + if (enc & BTF_INT_BOOL) + return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; + if (is_signed) + *is_signed = enc & BTF_INT_SIGNED; + if (t->size == 1) + return KCFG_CHAR; + if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) + return KCFG_UNKNOWN; + return KCFG_INT; + } + case BTF_KIND_ENUM: + if (t->size != 4) + return KCFG_UNKNOWN; + if (strcmp(name, "libbpf_tristate")) + return KCFG_UNKNOWN; + return KCFG_TRISTATE; + case BTF_KIND_ARRAY: + if (btf_array(t)->nelems == 0) + return KCFG_UNKNOWN; + if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) + return KCFG_UNKNOWN; + return KCFG_CHAR_ARR; + default: + return KCFG_UNKNOWN; + } +} + +static int cmp_externs(const void *_a, const void *_b) +{ + const struct extern_desc *a = _a; + const struct extern_desc *b = _b; + + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + if (a->type == EXT_KCFG) { + /* descending order by alignment requirements */ + if (a->kcfg.align != b->kcfg.align) + return a->kcfg.align > b->kcfg.align ? -1 : 1; + /* ascending order by size, within same alignment class */ + if (a->kcfg.sz != b->kcfg.sz) + return a->kcfg.sz < b->kcfg.sz ? -1 : 1; + } + + /* resolve ties by name */ + return strcmp(a->name, b->name); +} + +static int find_int_btf_id(const struct btf *btf) +{ + const struct btf_type *t; + int i, n; + + n = btf__get_nr_types(btf); + for (i = 1; i <= n; i++) { + t = btf__type_by_id(btf, i); + + if (btf_is_int(t) && btf_int_bits(t) == 32) + return i; + } + + return 0; +} + +static int bpf_object__collect_externs(struct bpf_object *obj) +{ + struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; + const struct btf_type *t; + struct extern_desc *ext; + int i, n, off; + const char *ext_name, *sec_name; + Elf_Scn *scn; + GElf_Shdr sh; + + if (!obj->efile.symbols) + return 0; + + scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); + if (elf_sec_hdr(obj, scn, &sh)) + return -LIBBPF_ERRNO__FORMAT; + + n = sh.sh_size / sh.sh_entsize; + pr_debug("looking for externs among %d symbols...\n", n); + + for (i = 0; i < n; i++) { + GElf_Sym sym; + + if (!gelf_getsym(obj->efile.symbols, i, &sym)) + return -LIBBPF_ERRNO__FORMAT; + if (!sym_is_extern(&sym)) + continue; + ext_name = elf_sym_str(obj, sym.st_name); + if (!ext_name || !ext_name[0]) + continue; + + ext = obj->externs; + ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); + if (!ext) + return -ENOMEM; + obj->externs = ext; + ext = &ext[obj->nr_extern]; + memset(ext, 0, sizeof(*ext)); + obj->nr_extern++; + + ext->btf_id = find_extern_btf_id(obj->btf, ext_name); + if (ext->btf_id <= 0) { + pr_warn("failed to find BTF for extern '%s': %d\n", + ext_name, ext->btf_id); + return ext->btf_id; + } + t = btf__type_by_id(obj->btf, ext->btf_id); + ext->name = btf__name_by_offset(obj->btf, t->name_off); + ext->sym_idx = i; + ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; + + ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); + if (ext->sec_btf_id <= 0) { + pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", + ext_name, ext->btf_id, ext->sec_btf_id); + return ext->sec_btf_id; + } + sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); + sec_name = btf__name_by_offset(obj->btf, sec->name_off); + + if (strcmp(sec_name, KCONFIG_SEC) == 0) { + kcfg_sec = sec; + ext->type = EXT_KCFG; + ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); + if (ext->kcfg.sz <= 0) { + pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.sz); + return ext->kcfg.sz; + } + ext->kcfg.align = btf__align_of(obj->btf, t->type); + if (ext->kcfg.align <= 0) { + pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.align); + return -EINVAL; + } + ext->kcfg.type = find_kcfg_type(obj->btf, t->type, + &ext->kcfg.is_signed); + if (ext->kcfg.type == KCFG_UNKNOWN) { + pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); + return -ENOTSUP; + } + } else if (strcmp(sec_name, KSYMS_SEC) == 0) { + ksym_sec = sec; + ext->type = EXT_KSYM; + skip_mods_and_typedefs(obj->btf, t->type, + &ext->ksym.type_id); + } else { + pr_warn("unrecognized extern section '%s'\n", sec_name); + return -ENOTSUP; + } + } + pr_debug("collected %d externs total\n", obj->nr_extern); + + if (!obj->nr_extern) + return 0; + + /* sort externs by type, for kcfg ones also by (align, size, name) */ + qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); + + /* for .ksyms section, we need to turn all externs into allocated + * variables in BTF to pass kernel verification; we do this by + * pretending that each extern is a 8-byte variable + */ + if (ksym_sec) { + /* find existing 4-byte integer type in BTF to use for fake + * extern variables in DATASEC + */ + int int_btf_id = find_int_btf_id(obj->btf); + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KSYM) + continue; + pr_debug("extern (ksym) #%d: symbol %d, name %s\n", + i, ext->sym_idx, ext->name); + } + + sec = ksym_sec; + n = btf_vlen(sec); + for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + struct btf_type *vt; + + vt = (void *)btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, vt->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF var '%s'\n", + ext_name); + return -ESRCH; + } + btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vt->type = int_btf_id; + vs->offset = off; + vs->size = sizeof(int); + } + sec->size = off; + } + + if (kcfg_sec) { + sec = kcfg_sec; + /* for kcfg externs calculate their offsets within a .kconfig map */ + off = 0; + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KCFG) + continue; + + ext->kcfg.data_off = roundup(off, ext->kcfg.align); + off = ext->kcfg.data_off + ext->kcfg.sz; + pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", + i, ext->sym_idx, ext->kcfg.data_off, ext->name); + } + sec->size = off; + n = btf_vlen(sec); + for (i = 0; i < n; i++) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + + t = btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, t->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF var '%s'\n", + ext_name); + return -ESRCH; + } + btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vs->offset = ext->kcfg.data_off; + } + } + return 0; } struct bpf_program * @@ -1680,12 +3290,45 @@ bpf_object__find_program_by_title(const struct bpf_object *obj, struct bpf_program *pos; bpf_object__for_each_program(pos, obj) { - if (pos->section_name && !strcmp(pos->section_name, title)) + if (pos->sec_name && !strcmp(pos->sec_name, title)) return pos; } return NULL; } +static bool prog_is_subprog(const struct bpf_object *obj, + const struct bpf_program *prog) +{ + /* For legacy reasons, libbpf supports an entry-point BPF programs + * without SEC() attribute, i.e., those in the .text section. But if + * there are 2 or more such programs in the .text section, they all + * must be subprograms called from entry-point BPF programs in + * designated SEC()'tions, otherwise there is no way to distinguish + * which of those programs should be loaded vs which are a subprogram. + * Similarly, if there is a function/program in .text and at least one + * other BPF program with custom SEC() attribute, then we just assume + * .text programs are subprograms (even if they are not called from + * other programs), because libbpf never explicitly supported mixing + * SEC()-designated BPF programs and .text entry-point BPF programs. + */ + return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; +} + +struct bpf_program * +bpf_object__find_program_by_name(const struct bpf_object *obj, + const char *name) +{ + struct bpf_program *prog; + + bpf_object__for_each_program(prog, obj) { + if (prog_is_subprog(obj, prog)) + continue; + if (!strcmp(prog->name, name)) + return prog; + } + return NULL; +} + static bool bpf_object__shndx_is_data(const struct bpf_object *obj, int shndx) { @@ -1701,14 +3344,6 @@ static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, shndx == obj->efile.btf_maps_shndx; } -static bool bpf_object__relo_in_known_section(const struct bpf_object *obj, - int shndx) -{ - return shndx == obj->efile.text_shndx || - bpf_object__shndx_is_maps(obj, shndx) || - bpf_object__shndx_is_data(obj, shndx); -} - static enum libbpf_map_type bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) { @@ -1718,134 +3353,271 @@ bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) return LIBBPF_MAP_BSS; else if (shndx == obj->efile.rodata_shndx) return LIBBPF_MAP_RODATA; + else if (shndx == obj->efile.symbols_shndx) + return LIBBPF_MAP_KCONFIG; else return LIBBPF_MAP_UNSPEC; } +static int bpf_program__record_reloc(struct bpf_program *prog, + struct reloc_desc *reloc_desc, + __u32 insn_idx, const char *sym_name, + const GElf_Sym *sym, const GElf_Rel *rel) +{ + struct bpf_insn *insn = &prog->insns[insn_idx]; + size_t map_idx, nr_maps = prog->obj->nr_maps; + struct bpf_object *obj = prog->obj; + __u32 shdr_idx = sym->st_shndx; + enum libbpf_map_type type; + const char *sym_sec_name; + struct bpf_map *map; + + reloc_desc->processed = false; + + /* sub-program call relocation */ + if (insn->code == (BPF_JMP | BPF_CALL)) { + if (insn->src_reg != BPF_PSEUDO_CALL) { + pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); + return -LIBBPF_ERRNO__RELOC; + } + /* text_shndx can be 0, if no default "main" program exists */ + if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { + sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); + pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", + prog->name, sym_name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + if (sym->st_value % BPF_INSN_SZ) { + pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", + prog->name, sym_name, (size_t)sym->st_value); + return -LIBBPF_ERRNO__RELOC; + } + reloc_desc->type = RELO_CALL; + reloc_desc->insn_idx = insn_idx; + reloc_desc->sym_off = sym->st_value; + return 0; + } + + if (!is_ldimm64(insn)) { + pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", + prog->name, sym_name, insn_idx, insn->code); + return -LIBBPF_ERRNO__RELOC; + } + + if (sym_is_extern(sym)) { + int sym_idx = GELF_R_SYM(rel->r_info); + int i, n = obj->nr_extern; + struct extern_desc *ext; + + for (i = 0; i < n; i++) { + ext = &obj->externs[i]; + if (ext->sym_idx == sym_idx) + break; + } + if (i >= n) { + pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", + prog->name, sym_name, sym_idx); + return -LIBBPF_ERRNO__RELOC; + } + pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", + prog->name, i, ext->name, ext->sym_idx, insn_idx); + reloc_desc->type = RELO_EXTERN; + reloc_desc->insn_idx = insn_idx; + reloc_desc->sym_off = i; /* sym_off stores extern index */ + return 0; + } + + if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { + pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", + prog->name, sym_name, shdr_idx); + return -LIBBPF_ERRNO__RELOC; + } + + /* loading subprog addresses */ + if (sym_is_subprog(sym, obj->efile.text_shndx)) { + /* global_func: sym->st_value = offset in the section, insn->imm = 0. + * local_func: sym->st_value = 0, insn->imm = offset in the section. + */ + if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { + pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", + prog->name, sym_name, (size_t)sym->st_value, insn->imm); + return -LIBBPF_ERRNO__RELOC; + } + + reloc_desc->type = RELO_SUBPROG_ADDR; + reloc_desc->insn_idx = insn_idx; + reloc_desc->sym_off = sym->st_value; + return 0; + } + + type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); + sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); + + /* generic map reference relocation */ + if (type == LIBBPF_MAP_UNSPEC) { + if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { + pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", + prog->name, sym_name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + for (map_idx = 0; map_idx < nr_maps; map_idx++) { + map = &obj->maps[map_idx]; + if (map->libbpf_type != type || + map->sec_idx != sym->st_shndx || + map->sec_offset != sym->st_value) + continue; + pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", + prog->name, map_idx, map->name, map->sec_idx, + map->sec_offset, insn_idx); + break; + } + if (map_idx >= nr_maps) { + pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", + prog->name, sym_sec_name, (size_t)sym->st_value); + return -LIBBPF_ERRNO__RELOC; + } + reloc_desc->type = RELO_LD64; + reloc_desc->insn_idx = insn_idx; + reloc_desc->map_idx = map_idx; + reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ + return 0; + } + + /* global data map relocation */ + if (!bpf_object__shndx_is_data(obj, shdr_idx)) { + pr_warn("prog '%s': bad data relo against section '%s'\n", + prog->name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + for (map_idx = 0; map_idx < nr_maps; map_idx++) { + map = &obj->maps[map_idx]; + if (map->libbpf_type != type) + continue; + pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", + prog->name, map_idx, map->name, map->sec_idx, + map->sec_offset, insn_idx); + break; + } + if (map_idx >= nr_maps) { + pr_warn("prog '%s': data relo failed to find map for section '%s'\n", + prog->name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + + reloc_desc->type = RELO_DATA; + reloc_desc->insn_idx = insn_idx; + reloc_desc->map_idx = map_idx; + reloc_desc->sym_off = sym->st_value; + return 0; +} + +static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) +{ + return insn_idx >= prog->sec_insn_off && + insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; +} + +static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, + size_t sec_idx, size_t insn_idx) +{ + int l = 0, r = obj->nr_programs - 1, m; + struct bpf_program *prog; + + while (l < r) { + m = l + (r - l + 1) / 2; + prog = &obj->programs[m]; + + if (prog->sec_idx < sec_idx || + (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) + l = m; + else + r = m - 1; + } + /* matching program could be at index l, but it still might be the + * wrong one, so we need to double check conditions for the last time + */ + prog = &obj->programs[l]; + if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) + return prog; + return NULL; +} + static int -bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, - Elf_Data *data, struct bpf_object *obj) +bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data) { Elf_Data *symbols = obj->efile.symbols; - struct bpf_map *maps = obj->maps; - size_t nr_maps = obj->nr_maps; - int i, nrels; + const char *relo_sec_name, *sec_name; + size_t sec_idx = shdr->sh_info; + struct bpf_program *prog; + struct reloc_desc *relos; + int err, i, nrels; + const char *sym_name; + __u32 insn_idx; + GElf_Sym sym; + GElf_Rel rel; - pr_debug("collecting relocating info for: '%s'\n", prog->section_name); + relo_sec_name = elf_sec_str(obj, shdr->sh_name); + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + if (!relo_sec_name || !sec_name) + return -EINVAL; + + pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", + relo_sec_name, sec_idx, sec_name); nrels = shdr->sh_size / shdr->sh_entsize; - prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); - if (!prog->reloc_desc) { - pr_warning("failed to alloc memory in relocation\n"); - return -ENOMEM; - } - prog->nr_reloc = nrels; - for (i = 0; i < nrels; i++) { - struct bpf_insn *insns = prog->insns; - enum libbpf_map_type type; - unsigned int insn_idx; - unsigned int shdr_idx; - const char *name; - size_t map_idx; - GElf_Sym sym; - GElf_Rel rel; - if (!gelf_getrel(data, i, &rel)) { - pr_warning("relocation: failed to get %d reloc\n", i); + pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); return -LIBBPF_ERRNO__FORMAT; } - if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { - pr_warning("relocation: symbol %"PRIx64" not found\n", - GELF_R_SYM(rel.r_info)); + pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n", + relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); + return -LIBBPF_ERRNO__FORMAT; + } + if (rel.r_offset % BPF_INSN_SZ) { + pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", + relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i); return -LIBBPF_ERRNO__FORMAT; } - name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, - sym.st_name) ? : "<?>"; + insn_idx = rel.r_offset / BPF_INSN_SZ; + /* relocations against static functions are recorded as + * relocations against the section that contains a function; + * in such case, symbol will be STT_SECTION and sym.st_name + * will point to empty string (0), so fetch section name + * instead + */ + if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0) + sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx)); + else + sym_name = elf_sym_str(obj, sym.st_name); + sym_name = sym_name ?: "<?"; - pr_debug("relo for %lld value %lld name %d (\'%s\')\n", - (long long) (rel.r_info >> 32), - (long long) sym.st_value, sym.st_name, name); + pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", + relo_sec_name, i, insn_idx, sym_name); - shdr_idx = sym.st_shndx; - insn_idx = rel.r_offset / sizeof(struct bpf_insn); - pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n", - insn_idx, shdr_idx); - - if (shdr_idx >= SHN_LORESERVE) { - pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n", - name, shdr_idx, insn_idx, - insns[insn_idx].code); - return -LIBBPF_ERRNO__RELOC; - } - if (!bpf_object__relo_in_known_section(obj, shdr_idx)) { - pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n", - prog->section_name, shdr_idx); + prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); + if (!prog) { + pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n", + relo_sec_name, i, sec_name, insn_idx); return -LIBBPF_ERRNO__RELOC; } - if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { - if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { - pr_warning("incorrect bpf_call opcode\n"); - return -LIBBPF_ERRNO__RELOC; - } - prog->reloc_desc[i].type = RELO_CALL; - prog->reloc_desc[i].insn_idx = insn_idx; - prog->reloc_desc[i].text_off = sym.st_value; - obj->has_pseudo_calls = true; - continue; - } + relos = libbpf_reallocarray(prog->reloc_desc, + prog->nr_reloc + 1, sizeof(*relos)); + if (!relos) + return -ENOMEM; + prog->reloc_desc = relos; - if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { - pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", - insn_idx, insns[insn_idx].code); - return -LIBBPF_ERRNO__RELOC; - } + /* adjust insn_idx to local BPF program frame of reference */ + insn_idx -= prog->sec_insn_off; + err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], + insn_idx, sym_name, &sym, &rel); + if (err) + return err; - if (bpf_object__shndx_is_maps(obj, shdr_idx) || - bpf_object__shndx_is_data(obj, shdr_idx)) { - type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); - if (type != LIBBPF_MAP_UNSPEC) { - if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) { - pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n", - name, insn_idx, insns[insn_idx].code); - return -LIBBPF_ERRNO__RELOC; - } - if (!obj->caps.global_data) { - pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n", - name, insn_idx); - return -LIBBPF_ERRNO__RELOC; - } - } - - for (map_idx = 0; map_idx < nr_maps; map_idx++) { - if (maps[map_idx].libbpf_type != type) - continue; - if (type != LIBBPF_MAP_UNSPEC || - (maps[map_idx].sec_idx == sym.st_shndx && - maps[map_idx].sec_offset == sym.st_value)) { - pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n", - map_idx, maps[map_idx].name, - maps[map_idx].sec_idx, - maps[map_idx].sec_offset, - insn_idx); - break; - } - } - - if (map_idx >= nr_maps) { - pr_warning("bpf relocation: map_idx %d larger than %d\n", - (int)map_idx, (int)nr_maps - 1); - return -LIBBPF_ERRNO__RELOC; - } - - prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ? - RELO_DATA : RELO_LD64; - prog->reloc_desc[i].insn_idx = insn_idx; - prog->reloc_desc[i].map_idx = map_idx; - } + prog->nr_reloc++; } return 0; } @@ -1856,8 +3628,12 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) __u32 key_type_id = 0, value_type_id = 0; int ret; - /* if it's BTF-defined map, we don't need to search for type IDs */ - if (map->sec_idx == obj->efile.btf_maps_shndx) + /* if it's BTF-defined map, we don't need to search for type IDs. + * For struct_ops map, it does not need btf_key_type_id and + * btf_value_type_id. + */ + if (map->sec_idx == obj->efile.btf_maps_shndx || + bpf_map__is_struct_ops(map)) return 0; if (!bpf_map__is_internal(map)) { @@ -1924,6 +3700,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) map->def.map_flags = info.map_flags; map->btf_key_type_id = info.btf_key_type_id; map->btf_value_type_id = info.btf_value_type_id; + map->reused = true; return 0; @@ -1934,22 +3711,29 @@ err_free_new_name: return err; } +__u32 bpf_map__max_entries(const struct bpf_map *map) +{ + return map->def.max_entries; +} + +int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.max_entries = max_entries; + return 0; +} + int bpf_map__resize(struct bpf_map *map, __u32 max_entries) { if (!map || !max_entries) return -EINVAL; - /* If map already created, its attributes can't be changed. */ - if (map->fd >= 0) - return -EBUSY; - - map->def.max_entries = max_entries; - - return 0; + return bpf_map__set_max_entries(map, max_entries); } static int -bpf_object__probe_name(struct bpf_object *obj) +bpf_object__probe_loading(struct bpf_object *obj) { struct bpf_load_program_attr attr; char *cp, errmsg[STRERR_BUFSIZE]; @@ -1969,27 +3753,48 @@ bpf_object__probe_name(struct bpf_object *obj) ret = bpf_load_program_xattr(&attr, NULL, 0); if (ret < 0) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n", - __func__, cp, errno); - return -errno; + ret = errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); + pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " + "program. Make sure your kernel supports BPF " + "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " + "set to big enough value.\n", __func__, cp, ret); + return -ret; } close(ret); - /* now try the same program, but with the name */ - - attr.name = "test"; - ret = bpf_load_program_xattr(&attr, NULL, 0); - if (ret >= 0) { - obj->caps.name = 1; - close(ret); - } - return 0; } -static int -bpf_object__probe_global_data(struct bpf_object *obj) +static int probe_fd(int fd) +{ + if (fd >= 0) + close(fd); + return fd >= 0; +} + +static int probe_kern_prog_name(void) +{ + struct bpf_load_program_attr attr; + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int ret; + + /* make sure loading with name works */ + + memset(&attr, 0, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; + attr.insns = insns; + attr.insns_cnt = ARRAY_SIZE(insns); + attr.license = "GPL"; + attr.name = "test"; + ret = bpf_load_program_xattr(&attr, NULL, 0); + return probe_fd(ret); +} + +static int probe_kern_global_data(void) { struct bpf_load_program_attr prg_attr; struct bpf_create_map_attr map_attr; @@ -2010,10 +3815,11 @@ bpf_object__probe_global_data(struct bpf_object *obj) map = bpf_create_map_xattr(&map_attr); if (map < 0) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n", - __func__, cp, errno); - return -errno; + ret = -errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); + pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", + __func__, cp, -ret); + return ret; } insns[0].imm = map; @@ -2025,18 +3831,25 @@ bpf_object__probe_global_data(struct bpf_object *obj) prg_attr.license = "GPL"; ret = bpf_load_program_xattr(&prg_attr, NULL, 0); - if (ret >= 0) { - obj->caps.global_data = 1; - close(ret); - } - close(map); - return 0; + return probe_fd(ret); } -static int bpf_object__probe_btf_func(struct bpf_object *obj) +static int probe_kern_btf(void) { - const char strs[] = "\0int\0x\0a"; + static const char strs[] = "\0int"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_func(void) +{ + static const char strs[] = "\0int\0x\0a"; /* void x(int a) {} */ __u32 types[] = { /* int */ @@ -2047,22 +3860,32 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj) /* FUNC x */ /* [3] */ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), }; - int btf_fd; - btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), - strs, sizeof(strs)); - if (btf_fd >= 0) { - obj->caps.btf_func = 1; - close(btf_fd); - return 1; - } - - return 0; + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); } -static int bpf_object__probe_btf_datasec(struct bpf_object *obj) +static int probe_kern_btf_func_global(void) { - const char strs[] = "\0x\0.data"; + static const char strs[] = "\0int\0x\0a"; + /* static void x(int a) {} */ + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* FUNC_PROTO */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), + BTF_PARAM_ENC(7, 1), + /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ + BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_datasec(void) +{ + static const char strs[] = "\0x\0.data"; /* static int a; */ __u32 types[] = { /* int */ @@ -2074,232 +3897,458 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj) BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), BTF_VAR_SECINFO_ENC(2, 0, 4), }; - int btf_fd; - btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), - strs, sizeof(strs)); - if (btf_fd >= 0) { - obj->caps.btf_datasec = 1; - close(btf_fd); - return 1; + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_array_mmap(void) +{ + struct bpf_create_map_attr attr = { + .map_type = BPF_MAP_TYPE_ARRAY, + .map_flags = BPF_F_MMAPABLE, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 1, + }; + + return probe_fd(bpf_create_map_xattr(&attr)); +} + +static int probe_kern_exp_attach_type(void) +{ + struct bpf_load_program_attr attr; + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + memset(&attr, 0, sizeof(attr)); + /* use any valid combination of program type and (optional) + * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) + * to see if kernel supports expected_attach_type field for + * BPF_PROG_LOAD command + */ + attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK; + attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE; + attr.insns = insns; + attr.insns_cnt = ARRAY_SIZE(insns); + attr.license = "GPL"; + + return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); +} + +static int probe_kern_probe_read_kernel(void) +{ + struct bpf_load_program_attr attr; + struct bpf_insn insns[] = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ + BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ + BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), + BPF_EXIT_INSN(), + }; + + memset(&attr, 0, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_KPROBE; + attr.insns = insns; + attr.insns_cnt = ARRAY_SIZE(insns); + attr.license = "GPL"; + + return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); +} + +static int probe_prog_bind_map(void) +{ + struct bpf_load_program_attr prg_attr; + struct bpf_create_map_attr map_attr; + char *cp, errmsg[STRERR_BUFSIZE]; + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int ret, map, prog; + + memset(&map_attr, 0, sizeof(map_attr)); + map_attr.map_type = BPF_MAP_TYPE_ARRAY; + map_attr.key_size = sizeof(int); + map_attr.value_size = 32; + map_attr.max_entries = 1; + + map = bpf_create_map_xattr(&map_attr); + if (map < 0) { + ret = -errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); + pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", + __func__, cp, -ret); + return ret; } - return 0; + memset(&prg_attr, 0, sizeof(prg_attr)); + prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; + prg_attr.insns = insns; + prg_attr.insns_cnt = ARRAY_SIZE(insns); + prg_attr.license = "GPL"; + + prog = bpf_load_program_xattr(&prg_attr, NULL, 0); + if (prog < 0) { + close(map); + return 0; + } + + ret = bpf_prog_bind_map(prog, map, NULL); + + close(map); + close(prog); + + return ret >= 0; +} + +enum kern_feature_result { + FEAT_UNKNOWN = 0, + FEAT_SUPPORTED = 1, + FEAT_MISSING = 2, +}; + +typedef int (*feature_probe_fn)(void); + +static struct kern_feature_desc { + const char *desc; + feature_probe_fn probe; + enum kern_feature_result res; +} feature_probes[__FEAT_CNT] = { + [FEAT_PROG_NAME] = { + "BPF program name", probe_kern_prog_name, + }, + [FEAT_GLOBAL_DATA] = { + "global variables", probe_kern_global_data, + }, + [FEAT_BTF] = { + "minimal BTF", probe_kern_btf, + }, + [FEAT_BTF_FUNC] = { + "BTF functions", probe_kern_btf_func, + }, + [FEAT_BTF_GLOBAL_FUNC] = { + "BTF global function", probe_kern_btf_func_global, + }, + [FEAT_BTF_DATASEC] = { + "BTF data section and variable", probe_kern_btf_datasec, + }, + [FEAT_ARRAY_MMAP] = { + "ARRAY map mmap()", probe_kern_array_mmap, + }, + [FEAT_EXP_ATTACH_TYPE] = { + "BPF_PROG_LOAD expected_attach_type attribute", + probe_kern_exp_attach_type, + }, + [FEAT_PROBE_READ_KERN] = { + "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, + }, + [FEAT_PROG_BIND_MAP] = { + "BPF_PROG_BIND_MAP support", probe_prog_bind_map, + } +}; + +static bool kernel_supports(enum kern_feature_id feat_id) +{ + struct kern_feature_desc *feat = &feature_probes[feat_id]; + int ret; + + if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { + ret = feat->probe(); + if (ret > 0) { + WRITE_ONCE(feat->res, FEAT_SUPPORTED); + } else if (ret == 0) { + WRITE_ONCE(feat->res, FEAT_MISSING); + } else { + pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); + WRITE_ONCE(feat->res, FEAT_MISSING); + } + } + + return READ_ONCE(feat->res) == FEAT_SUPPORTED; +} + +static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) +{ + struct bpf_map_info map_info = {}; + char msg[STRERR_BUFSIZE]; + __u32 map_info_len; + + map_info_len = sizeof(map_info); + + if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(errno, msg, sizeof(msg))); + return false; + } + + return (map_info.type == map->def.type && + map_info.key_size == map->def.key_size && + map_info.value_size == map->def.value_size && + map_info.max_entries == map->def.max_entries && + map_info.map_flags == map->def.map_flags); } static int -bpf_object__probe_caps(struct bpf_object *obj) +bpf_object__reuse_map(struct bpf_map *map) { - int (*probe_fn[])(struct bpf_object *obj) = { - bpf_object__probe_name, - bpf_object__probe_global_data, - bpf_object__probe_btf_func, - bpf_object__probe_btf_datasec, - }; - int i, ret; + char *cp, errmsg[STRERR_BUFSIZE]; + int err, pin_fd; - for (i = 0; i < ARRAY_SIZE(probe_fn); i++) { - ret = probe_fn[i](obj); - if (ret < 0) - pr_debug("Probe #%d failed with %d.\n", i, ret); + pin_fd = bpf_obj_get(map->pin_path); + if (pin_fd < 0) { + err = -errno; + if (err == -ENOENT) { + pr_debug("found no pinned map to reuse at '%s'\n", + map->pin_path); + return 0; + } + + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("couldn't retrieve pinned map '%s': %s\n", + map->pin_path, cp); + return err; } + if (!map_is_reuse_compat(map, pin_fd)) { + pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", + map->pin_path); + close(pin_fd); + return -EINVAL; + } + + err = bpf_map__reuse_fd(map, pin_fd); + if (err) { + close(pin_fd); + return err; + } + map->pinned = true; + pr_debug("reused pinned map at '%s'\n", map->pin_path); + return 0; } static int bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) { + enum libbpf_map_type map_type = map->libbpf_type; char *cp, errmsg[STRERR_BUFSIZE]; int err, zero = 0; - __u8 *data; - /* Nothing to do here since kernel already zero-initializes .bss map. */ - if (map->libbpf_type == LIBBPF_MAP_BSS) - return 0; + err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); + if (err) { + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("Error setting initial map(%s) contents: %s\n", + map->name, cp); + return err; + } - data = map->libbpf_type == LIBBPF_MAP_DATA ? - obj->sections.data : obj->sections.rodata; - - err = bpf_map_update_elem(map->fd, &zero, data, 0); - /* Freeze .rodata map as read-only from syscall side. */ - if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) { + /* Freeze .rodata and .kconfig map as read-only from syscall side. */ + if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { err = bpf_map_freeze(map->fd); if (err) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warning("Error freezing map(%s) as read-only: %s\n", - map->name, cp); - err = 0; + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("Error freezing map(%s) as read-only: %s\n", + map->name, cp); + return err; } } - return err; + return 0; +} + +static void bpf_map__destroy(struct bpf_map *map); + +static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) +{ + struct bpf_create_map_attr create_attr; + struct bpf_map_def *def = &map->def; + + memset(&create_attr, 0, sizeof(create_attr)); + + if (kernel_supports(FEAT_PROG_NAME)) + create_attr.name = map->name; + create_attr.map_ifindex = map->map_ifindex; + create_attr.map_type = def->type; + create_attr.map_flags = def->map_flags; + create_attr.key_size = def->key_size; + create_attr.value_size = def->value_size; + create_attr.numa_node = map->numa_node; + + if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { + int nr_cpus; + + nr_cpus = libbpf_num_possible_cpus(); + if (nr_cpus < 0) { + pr_warn("map '%s': failed to determine number of system CPUs: %d\n", + map->name, nr_cpus); + return nr_cpus; + } + pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); + create_attr.max_entries = nr_cpus; + } else { + create_attr.max_entries = def->max_entries; + } + + if (bpf_map__is_struct_ops(map)) + create_attr.btf_vmlinux_value_type_id = + map->btf_vmlinux_value_type_id; + + create_attr.btf_fd = 0; + create_attr.btf_key_type_id = 0; + create_attr.btf_value_type_id = 0; + if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { + create_attr.btf_fd = btf__fd(obj->btf); + create_attr.btf_key_type_id = map->btf_key_type_id; + create_attr.btf_value_type_id = map->btf_value_type_id; + } + + if (bpf_map_type__is_map_in_map(def->type)) { + if (map->inner_map) { + int err; + + err = bpf_object__create_map(obj, map->inner_map); + if (err) { + pr_warn("map '%s': failed to create inner map: %d\n", + map->name, err); + return err; + } + map->inner_map_fd = bpf_map__fd(map->inner_map); + } + if (map->inner_map_fd >= 0) + create_attr.inner_map_fd = map->inner_map_fd; + } + + map->fd = bpf_create_map_xattr(&create_attr); + if (map->fd < 0 && (create_attr.btf_key_type_id || + create_attr.btf_value_type_id)) { + char *cp, errmsg[STRERR_BUFSIZE]; + int err = -errno; + + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", + map->name, cp, err); + create_attr.btf_fd = 0; + create_attr.btf_key_type_id = 0; + create_attr.btf_value_type_id = 0; + map->btf_key_type_id = 0; + map->btf_value_type_id = 0; + map->fd = bpf_create_map_xattr(&create_attr); + } + + if (map->fd < 0) + return -errno; + + if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { + bpf_map__destroy(map->inner_map); + zfree(&map->inner_map); + } + + return 0; +} + +static int init_map_slots(struct bpf_map *map) +{ + const struct bpf_map *targ_map; + unsigned int i; + int fd, err; + + for (i = 0; i < map->init_slots_sz; i++) { + if (!map->init_slots[i]) + continue; + + targ_map = map->init_slots[i]; + fd = bpf_map__fd(targ_map); + err = bpf_map_update_elem(map->fd, &i, &fd, 0); + if (err) { + err = -errno; + pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", + map->name, i, targ_map->name, + fd, err); + return err; + } + pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", + map->name, i, targ_map->name, fd); + } + + zfree(&map->init_slots); + map->init_slots_sz = 0; + + return 0; } static int bpf_object__create_maps(struct bpf_object *obj) { - struct bpf_create_map_attr create_attr = {}; - int nr_cpus = 0; - unsigned int i; + struct bpf_map *map; + char *cp, errmsg[STRERR_BUFSIZE]; + unsigned int i, j; int err; for (i = 0; i < obj->nr_maps; i++) { - struct bpf_map *map = &obj->maps[i]; - struct bpf_map_def *def = &map->def; - char *cp, errmsg[STRERR_BUFSIZE]; - int *pfd = &map->fd; + map = &obj->maps[i]; + + if (map->pin_path) { + err = bpf_object__reuse_map(map); + if (err) { + pr_warn("map '%s': error reusing pinned map\n", + map->name); + goto err_out; + } + } if (map->fd >= 0) { - pr_debug("skip map create (preset) %s: fd=%d\n", + pr_debug("map '%s': skipping creation (preset fd=%d)\n", map->name, map->fd); - continue; - } - - if (obj->caps.name) - create_attr.name = map->name; - create_attr.map_ifindex = map->map_ifindex; - create_attr.map_type = def->type; - create_attr.map_flags = def->map_flags; - create_attr.key_size = def->key_size; - create_attr.value_size = def->value_size; - if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && - !def->max_entries) { - if (!nr_cpus) - nr_cpus = libbpf_num_possible_cpus(); - if (nr_cpus < 0) { - pr_warning("failed to determine number of system CPUs: %d\n", - nr_cpus); - err = nr_cpus; - goto err_out; - } - pr_debug("map '%s': setting size to %d\n", - map->name, nr_cpus); - create_attr.max_entries = nr_cpus; } else { - create_attr.max_entries = def->max_entries; - } - create_attr.btf_fd = 0; - create_attr.btf_key_type_id = 0; - create_attr.btf_value_type_id = 0; - if (bpf_map_type__is_map_in_map(def->type) && - map->inner_map_fd >= 0) - create_attr.inner_map_fd = map->inner_map_fd; - - if (obj->btf && !bpf_map_find_btf_info(obj, map)) { - create_attr.btf_fd = btf__fd(obj->btf); - create_attr.btf_key_type_id = map->btf_key_type_id; - create_attr.btf_value_type_id = map->btf_value_type_id; - } - - *pfd = bpf_create_map_xattr(&create_attr); - if (*pfd < 0 && (create_attr.btf_key_type_id || - create_attr.btf_value_type_id)) { - err = -errno; - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", - map->name, cp, err); - create_attr.btf_fd = 0; - create_attr.btf_key_type_id = 0; - create_attr.btf_value_type_id = 0; - map->btf_key_type_id = 0; - map->btf_value_type_id = 0; - *pfd = bpf_create_map_xattr(&create_attr); - } - - if (*pfd < 0) { - size_t j; - - err = -errno; -err_out: - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warning("failed to create map (name: '%s'): %s(%d)\n", - map->name, cp, err); - for (j = 0; j < i; j++) - zclose(obj->maps[j].fd); - return err; - } - - if (bpf_map__is_internal(map)) { - err = bpf_object__populate_internal_map(obj, map); - if (err < 0) { - zclose(*pfd); + err = bpf_object__create_map(obj, map); + if (err) goto err_out; + + pr_debug("map '%s': created successfully, fd=%d\n", + map->name, map->fd); + + if (bpf_map__is_internal(map)) { + err = bpf_object__populate_internal_map(obj, map); + if (err < 0) { + zclose(map->fd); + goto err_out; + } + } + + if (map->init_slots_sz) { + err = init_map_slots(map); + if (err < 0) { + zclose(map->fd); + goto err_out; + } } } - pr_debug("created map %s: fd=%d\n", map->name, *pfd); + if (map->pin_path && !map->pinned) { + err = bpf_map__pin(map, NULL); + if (err) { + pr_warn("map '%s': failed to auto-pin at '%s': %d\n", + map->name, map->pin_path, err); + zclose(map->fd); + goto err_out; + } + } } return 0; -} -static int -check_btf_ext_reloc_err(struct bpf_program *prog, int err, - void *btf_prog_info, const char *info_name) -{ - if (err != -ENOENT) { - pr_warning("Error in loading %s for sec %s.\n", - info_name, prog->section_name); - return err; - } - - /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */ - - if (btf_prog_info) { - /* - * Some info has already been found but has problem - * in the last btf_ext reloc. Must have to error out. - */ - pr_warning("Error in relocating %s for sec %s.\n", - info_name, prog->section_name); - return err; - } - - /* Have problem loading the very first info. Ignore the rest. */ - pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", - info_name, prog->section_name, info_name); - return 0; -} - -static int -bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, - const char *section_name, __u32 insn_offset) -{ - int err; - - if (!insn_offset || prog->func_info) { - /* - * !insn_offset => main program - * - * For sub prog, the main program's func_info has to - * be loaded first (i.e. prog->func_info != NULL) - */ - err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext, - section_name, insn_offset, - &prog->func_info, - &prog->func_info_cnt); - if (err) - return check_btf_ext_reloc_err(prog, err, - prog->func_info, - "bpf_func_info"); - - prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext); - } - - if (!insn_offset || prog->line_info) { - err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext, - section_name, insn_offset, - &prog->line_info, - &prog->line_info_cnt); - if (err) - return check_btf_ext_reloc_err(prog, err, - prog->line_info, - "bpf_line_info"); - - prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); - } - - return 0; +err_out: + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); + pr_perm_msg(err); + for (j = 0; j < i; j++) + zclose(obj->maps[j].fd); + return err; } #define BPF_CORE_SPEC_MAX_LEN 64 @@ -2315,14 +4364,18 @@ struct bpf_core_spec { const struct btf *btf; /* high-level spec: named fields and array indices only */ struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; + /* original unresolved (no skip_mods_or_typedefs) root type ID */ + __u32 root_type_id; + /* CO-RE relocation kind */ + enum bpf_core_relo_kind relo_kind; /* high-level spec length */ int len; /* raw, low-level spec: 1-to-1 with accessor spec string */ int raw_spec[BPF_CORE_SPEC_MAX_LEN]; /* raw spec length */ int raw_len; - /* field byte offset represented by spec */ - __u32 offset; + /* field bit offset represented by spec */ + __u32 bit_offset; }; static bool str_is_empty(const char *s) @@ -2330,11 +4383,84 @@ static bool str_is_empty(const char *s) return !s || !s[0]; } +static bool is_flex_arr(const struct btf *btf, + const struct bpf_core_accessor *acc, + const struct btf_array *arr) +{ + const struct btf_type *t; + + /* not a flexible array, if not inside a struct or has non-zero size */ + if (!acc->name || arr->nelems > 0) + return false; + + /* has to be the last member of enclosing struct */ + t = btf__type_by_id(btf, acc->type_id); + return acc->idx == btf_vlen(t) - 1; +} + +static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_FIELD_BYTE_OFFSET: return "byte_off"; + case BPF_FIELD_BYTE_SIZE: return "byte_sz"; + case BPF_FIELD_EXISTS: return "field_exists"; + case BPF_FIELD_SIGNED: return "signed"; + case BPF_FIELD_LSHIFT_U64: return "lshift_u64"; + case BPF_FIELD_RSHIFT_U64: return "rshift_u64"; + case BPF_TYPE_ID_LOCAL: return "local_type_id"; + case BPF_TYPE_ID_TARGET: return "target_type_id"; + case BPF_TYPE_EXISTS: return "type_exists"; + case BPF_TYPE_SIZE: return "type_size"; + case BPF_ENUMVAL_EXISTS: return "enumval_exists"; + case BPF_ENUMVAL_VALUE: return "enumval_value"; + default: return "unknown"; + } +} + +static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_FIELD_BYTE_OFFSET: + case BPF_FIELD_BYTE_SIZE: + case BPF_FIELD_EXISTS: + case BPF_FIELD_SIGNED: + case BPF_FIELD_LSHIFT_U64: + case BPF_FIELD_RSHIFT_U64: + return true; + default: + return false; + } +} + +static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_TYPE_ID_LOCAL: + case BPF_TYPE_ID_TARGET: + case BPF_TYPE_EXISTS: + case BPF_TYPE_SIZE: + return true; + default: + return false; + } +} + +static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_ENUMVAL_EXISTS: + case BPF_ENUMVAL_VALUE: + return true; + default: + return false; + } +} + /* - * Turn bpf_offset_reloc into a low- and high-level spec representation, + * Turn bpf_core_relo into a low- and high-level spec representation, * validating correctness along the way, as well as calculating resulting - * field offset (in bytes), specified by accessor string. Low-level spec - * captures every single level of nestedness, including traversing anonymous + * field bit offset, specified by accessor string. Low-level spec captures + * every single level of nestedness, including traversing anonymous * struct/union members. High-level one only captures semantically meaningful * "turning points": named fields and array indicies. * E.g., for this case: @@ -2360,13 +4486,21 @@ static bool str_is_empty(const char *s) * - field 'a' access (corresponds to '2' in low-level spec); * - array element #3 access (corresponds to '3' in low-level spec). * + * Type-based relocations (TYPE_EXISTS/TYPE_SIZE, + * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their + * spec and raw_spec are kept empty. + * + * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access + * string to specify enumerator's value index that need to be relocated. */ -static int bpf_core_spec_parse(const struct btf *btf, +static int bpf_core_parse_spec(const struct btf *btf, __u32 type_id, const char *spec_str, + enum bpf_core_relo_kind relo_kind, struct bpf_core_spec *spec) { int access_idx, parsed_len, i; + struct bpf_core_accessor *acc; const struct btf_type *t; const char *name; __u32 id; @@ -2377,6 +4511,15 @@ static int bpf_core_spec_parse(const struct btf *btf, memset(spec, 0, sizeof(*spec)); spec->btf = btf; + spec->root_type_id = type_id; + spec->relo_kind = relo_kind; + + /* type-based relocations don't have a field access string */ + if (core_relo_is_type_based(relo_kind)) { + if (strcmp(spec_str, "0")) + return -EINVAL; + return 0; + } /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ while (*spec_str) { @@ -2393,20 +4536,32 @@ static int bpf_core_spec_parse(const struct btf *btf, if (spec->raw_len == 0) return -EINVAL; - /* first spec value is always reloc type array index */ t = skip_mods_and_typedefs(btf, type_id, &id); if (!t) return -EINVAL; access_idx = spec->raw_spec[0]; - spec->spec[0].type_id = id; - spec->spec[0].idx = access_idx; + acc = &spec->spec[0]; + acc->type_id = id; + acc->idx = access_idx; spec->len++; + if (core_relo_is_enumval_based(relo_kind)) { + if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) + return -EINVAL; + + /* record enumerator name in a first accessor */ + acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); + return 0; + } + + if (!core_relo_is_field_based(relo_kind)) + return -EINVAL; + sz = btf__resolve_size(btf, id); if (sz < 0) return sz; - spec->offset = access_idx * sz; + spec->bit_offset = access_idx * sz * 8; for (i = 1; i < spec->raw_len; i++) { t = skip_mods_and_typedefs(btf, id, &id); @@ -2414,20 +4569,17 @@ static int bpf_core_spec_parse(const struct btf *btf, return -EINVAL; access_idx = spec->raw_spec[i]; + acc = &spec->spec[spec->len]; if (btf_is_composite(t)) { const struct btf_member *m; - __u32 offset; + __u32 bit_offset; if (access_idx >= btf_vlen(t)) return -EINVAL; - if (btf_member_bitfield_size(t, access_idx)) - return -EINVAL; - offset = btf_member_bit_offset(t, access_idx); - if (offset % 8) - return -EINVAL; - spec->offset += offset / 8; + bit_offset = btf_member_bit_offset(t, access_idx); + spec->bit_offset += bit_offset; m = btf_members(t) + access_idx; if (m->name_off) { @@ -2435,18 +4587,23 @@ static int bpf_core_spec_parse(const struct btf *btf, if (str_is_empty(name)) return -EINVAL; - spec->spec[spec->len].type_id = id; - spec->spec[spec->len].idx = access_idx; - spec->spec[spec->len].name = name; + acc->type_id = id; + acc->idx = access_idx; + acc->name = name; spec->len++; } id = m->type; } else if (btf_is_array(t)) { const struct btf_array *a = btf_array(t); + bool flex; t = skip_mods_and_typedefs(btf, a->type, &id); - if (!t || access_idx >= a->nelems) + if (!t) + return -EINVAL; + + flex = is_flex_arr(btf, acc - 1, a); + if (!flex && access_idx >= a->nelems) return -EINVAL; spec->spec[spec->len].type_id = id; @@ -2456,10 +4613,10 @@ static int bpf_core_spec_parse(const struct btf *btf, sz = btf__resolve_size(btf, id); if (sz < 0) return sz; - spec->offset += access_idx * sz; + spec->bit_offset += access_idx * sz * 8; } else { - pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n", - type_id, spec_str, i, id, btf_kind(t)); + pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", + type_id, spec_str, i, id, btf_kind_str(t)); return -EINVAL; } } @@ -2509,16 +4666,16 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf, { size_t local_essent_len, targ_essent_len; const char *local_name, *targ_name; - const struct btf_type *t; + const struct btf_type *t, *local_t; struct ids_vec *cand_ids; __u32 *new_ids; int i, err, n; - t = btf__type_by_id(local_btf, local_type_id); - if (!t) + local_t = btf__type_by_id(local_btf, local_type_id); + if (!local_t) return ERR_PTR(-EINVAL); - local_name = btf__name_by_offset(local_btf, t->name_off); + local_name = btf__name_by_offset(local_btf, local_t->name_off); if (str_is_empty(local_name)) return ERR_PTR(-EINVAL); local_essent_len = bpf_core_essential_name_len(local_name); @@ -2530,6 +4687,9 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf, n = btf__get_nr_types(targ_btf); for (i = 1; i <= n; i++) { t = btf__type_by_id(targ_btf, i); + if (btf_kind(t) != btf_kind(local_t)) + continue; + targ_name = btf__name_by_offset(targ_btf, t->name_off); if (str_is_empty(targ_name)) continue; @@ -2539,11 +4699,12 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf, continue; if (strncmp(local_name, targ_name, local_essent_len) == 0) { - pr_debug("[%d] %s: found candidate [%d] %s\n", - local_type_id, local_name, i, targ_name); - new_ids = reallocarray(cand_ids->data, - cand_ids->len + 1, - sizeof(*cand_ids->data)); + pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", + local_type_id, btf_kind_str(local_t), + local_name, i, btf_kind_str(t), targ_name); + new_ids = libbpf_reallocarray(cand_ids->data, + cand_ids->len + 1, + sizeof(*cand_ids->data)); if (!new_ids) { err = -ENOMEM; goto err_out; @@ -2558,13 +4719,16 @@ err_out: return ERR_PTR(err); } -/* Check two types for compatibility, skipping const/volatile/restrict and - * typedefs, to ensure we are relocating offset to the compatible entities: +/* Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: * - any two STRUCTs/UNIONs are compatible and can be mixed; - * - any two FWDs are compatible; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; * - for ENUMs, check sizes, names are ignored; - * - for INT, size and bitness should match, signedness is ignored; + * - for INT, size and signedness are ignored; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * - everything else shouldn't be ever a target of relocation. @@ -2590,23 +4754,36 @@ recur: return 0; switch (btf_kind(local_type)) { - case BTF_KIND_FWD: case BTF_KIND_PTR: return 1; - case BTF_KIND_ENUM: - return local_type->size == targ_type->size; + case BTF_KIND_FWD: + case BTF_KIND_ENUM: { + const char *local_name, *targ_name; + size_t local_len, targ_len; + + local_name = btf__name_by_offset(local_btf, + local_type->name_off); + targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); + local_len = bpf_core_essential_name_len(local_name); + targ_len = bpf_core_essential_name_len(targ_name); + /* one of them is anonymous or both w/ same flavor-less names */ + return local_len == 0 || targ_len == 0 || + (local_len == targ_len && + strncmp(local_name, targ_name, local_len) == 0); + } case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ return btf_int_offset(local_type) == 0 && - btf_int_offset(targ_type) == 0 && - local_type->size == targ_type->size && - btf_int_bits(local_type) == btf_int_bits(targ_type); + btf_int_offset(targ_type) == 0; case BTF_KIND_ARRAY: local_id = btf_array(local_type)->type; targ_id = btf_array(targ_type)->type; goto recur; default: - pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n", - btf_kind(local_type), local_id, targ_id); + pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n", + btf_kind(local_type), local_id, targ_id); return 0; } } @@ -2615,7 +4792,7 @@ recur: * Given single high-level named field accessor in local type, find * corresponding high-level accessor for a target type. Along the way, * maintain low-level spec for target as well. Also keep updating target - * offset. + * bit offset. * * Searching is performed through recursive exhaustive enumeration of all * fields of a struct/union. If there are any anonymous (embedded) @@ -2654,21 +4831,16 @@ static int bpf_core_match_member(const struct btf *local_btf, n = btf_vlen(targ_type); m = btf_members(targ_type); for (i = 0; i < n; i++, m++) { - __u32 offset; + __u32 bit_offset; - /* bitfield relocations not supported */ - if (btf_member_bitfield_size(targ_type, i)) - continue; - offset = btf_member_bit_offset(targ_type, i); - if (offset % 8) - continue; + bit_offset = btf_member_bit_offset(targ_type, i); /* too deep struct/union/array nesting */ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) return -E2BIG; /* speculate this member will be the good one */ - spec->offset += offset / 8; + spec->bit_offset += bit_offset; spec->raw_spec[spec->raw_len++] = i; targ_name = btf__name_by_offset(targ_btf, m->name_off); @@ -2697,16 +4869,110 @@ static int bpf_core_match_member(const struct btf *local_btf, return found; } /* member turned out not to be what we looked for */ - spec->offset -= offset / 8; + spec->bit_offset -= bit_offset; spec->raw_len--; } return 0; } +/* Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + */ +static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, + const struct btf *targ_btf, __u32 targ_id) +{ + const struct btf_type *local_type, *targ_type; + int depth = 32; /* max recursion depth */ + + /* caller made sure that names match (ignoring flavor suffix) */ + local_type = btf__type_by_id(local_btf, local_id); + targ_type = btf__type_by_id(targ_btf, targ_id); + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + +recur: + depth--; + if (depth < 0) + return -EINVAL; + + local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); + targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); + if (!local_type || !targ_type) + return -EINVAL; + + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + + switch (btf_kind(local_type)) { + case BTF_KIND_UNKN: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + return 1; + case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ + return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; + case BTF_KIND_PTR: + local_id = local_type->type; + targ_id = targ_type->type; + goto recur; + case BTF_KIND_ARRAY: + local_id = btf_array(local_type)->type; + targ_id = btf_array(targ_type)->type; + goto recur; + case BTF_KIND_FUNC_PROTO: { + struct btf_param *local_p = btf_params(local_type); + struct btf_param *targ_p = btf_params(targ_type); + __u16 local_vlen = btf_vlen(local_type); + __u16 targ_vlen = btf_vlen(targ_type); + int i, err; + + if (local_vlen != targ_vlen) + return 0; + + for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { + skip_mods_and_typedefs(local_btf, local_p->type, &local_id); + skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); + err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id); + if (err <= 0) + return err; + } + + /* tail recurse for return type check */ + skip_mods_and_typedefs(local_btf, local_type->type, &local_id); + skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); + goto recur; + } + default: + pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n", + btf_kind_str(local_type), local_id, targ_id); + return 0; + } +} + /* * Try to match local spec to a target type and, if successful, produce full - * target spec (high-level, low-level + offset). + * target spec (high-level, low-level + bit offset). */ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, const struct btf *targ_btf, __u32 targ_id, @@ -2719,10 +4985,51 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, memset(targ_spec, 0, sizeof(*targ_spec)); targ_spec->btf = targ_btf; + targ_spec->root_type_id = targ_id; + targ_spec->relo_kind = local_spec->relo_kind; + + if (core_relo_is_type_based(local_spec->relo_kind)) { + return bpf_core_types_are_compat(local_spec->btf, + local_spec->root_type_id, + targ_btf, targ_id); + } local_acc = &local_spec->spec[0]; targ_acc = &targ_spec->spec[0]; + if (core_relo_is_enumval_based(local_spec->relo_kind)) { + size_t local_essent_len, targ_essent_len; + const struct btf_enum *e; + const char *targ_name; + + /* has to resolve to an enum */ + targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); + if (!btf_is_enum(targ_type)) + return 0; + + local_essent_len = bpf_core_essential_name_len(local_acc->name); + + for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) { + targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); + targ_essent_len = bpf_core_essential_name_len(targ_name); + if (targ_essent_len != local_essent_len) + continue; + if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { + targ_acc->type_id = targ_id; + targ_acc->idx = i; + targ_acc->name = targ_name; + targ_spec->len++; + targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; + targ_spec->raw_len++; + return 1; + } + } + return 0; + } + + if (!core_relo_is_field_based(local_spec->relo_kind)) + return -EINVAL; + for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); @@ -2743,12 +5050,14 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, */ if (i > 0) { const struct btf_array *a; + bool flex; if (!btf_is_array(targ_type)) return 0; a = btf_array(targ_type); - if (local_acc->idx >= a->nelems) + flex = is_flex_arr(targ_btf, targ_acc - 1, a); + if (!flex && local_acc->idx >= a->nelems) return 0; if (!skip_mods_and_typedefs(targ_btf, a->type, &targ_id)) @@ -2769,144 +5078,512 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, sz = btf__resolve_size(targ_btf, targ_id); if (sz < 0) return sz; - targ_spec->offset += local_acc->idx * sz; + targ_spec->bit_offset += local_acc->idx * sz * 8; } } return 1; } +static int bpf_core_calc_field_relo(const struct bpf_program *prog, + const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val, __u32 *field_sz, __u32 *type_id, + bool *validate) +{ + const struct bpf_core_accessor *acc; + const struct btf_type *t; + __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; + const struct btf_member *m; + const struct btf_type *mt; + bool bitfield; + __s64 sz; + + *field_sz = 0; + + if (relo->kind == BPF_FIELD_EXISTS) { + *val = spec ? 1 : 0; + return 0; + } + + if (!spec) + return -EUCLEAN; /* request instruction poisoning */ + + acc = &spec->spec[spec->len - 1]; + t = btf__type_by_id(spec->btf, acc->type_id); + + /* a[n] accessor needs special handling */ + if (!acc->name) { + if (relo->kind == BPF_FIELD_BYTE_OFFSET) { + *val = spec->bit_offset / 8; + /* remember field size for load/store mem size */ + sz = btf__resolve_size(spec->btf, acc->type_id); + if (sz < 0) + return -EINVAL; + *field_sz = sz; + *type_id = acc->type_id; + } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { + sz = btf__resolve_size(spec->btf, acc->type_id); + if (sz < 0) + return -EINVAL; + *val = sz; + } else { + pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n", + prog->name, relo->kind, relo->insn_off / 8); + return -EINVAL; + } + if (validate) + *validate = true; + return 0; + } + + m = btf_members(t) + acc->idx; + mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); + bit_off = spec->bit_offset; + bit_sz = btf_member_bitfield_size(t, acc->idx); + + bitfield = bit_sz > 0; + if (bitfield) { + byte_sz = mt->size; + byte_off = bit_off / 8 / byte_sz * byte_sz; + /* figure out smallest int size necessary for bitfield load */ + while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { + if (byte_sz >= 8) { + /* bitfield can't be read with 64-bit read */ + pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n", + prog->name, relo->kind, relo->insn_off / 8); + return -E2BIG; + } + byte_sz *= 2; + byte_off = bit_off / 8 / byte_sz * byte_sz; + } + } else { + sz = btf__resolve_size(spec->btf, field_type_id); + if (sz < 0) + return -EINVAL; + byte_sz = sz; + byte_off = spec->bit_offset / 8; + bit_sz = byte_sz * 8; + } + + /* for bitfields, all the relocatable aspects are ambiguous and we + * might disagree with compiler, so turn off validation of expected + * value, except for signedness + */ + if (validate) + *validate = !bitfield; + + switch (relo->kind) { + case BPF_FIELD_BYTE_OFFSET: + *val = byte_off; + if (!bitfield) { + *field_sz = byte_sz; + *type_id = field_type_id; + } + break; + case BPF_FIELD_BYTE_SIZE: + *val = byte_sz; + break; + case BPF_FIELD_SIGNED: + /* enums will be assumed unsigned */ + *val = btf_is_enum(mt) || + (btf_int_encoding(mt) & BTF_INT_SIGNED); + if (validate) + *validate = true; /* signedness is never ambiguous */ + break; + case BPF_FIELD_LSHIFT_U64: +#if __BYTE_ORDER == __LITTLE_ENDIAN + *val = 64 - (bit_off + bit_sz - byte_off * 8); +#else + *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); +#endif + break; + case BPF_FIELD_RSHIFT_U64: + *val = 64 - bit_sz; + if (validate) + *validate = true; /* right shift is never ambiguous */ + break; + case BPF_FIELD_EXISTS: + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val) +{ + __s64 sz; + + /* type-based relos return zero when target type is not found */ + if (!spec) { + *val = 0; + return 0; + } + + switch (relo->kind) { + case BPF_TYPE_ID_TARGET: + *val = spec->root_type_id; + break; + case BPF_TYPE_EXISTS: + *val = 1; + break; + case BPF_TYPE_SIZE: + sz = btf__resolve_size(spec->btf, spec->root_type_id); + if (sz < 0) + return -EINVAL; + *val = sz; + break; + case BPF_TYPE_ID_LOCAL: + /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */ + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val) +{ + const struct btf_type *t; + const struct btf_enum *e; + + switch (relo->kind) { + case BPF_ENUMVAL_EXISTS: + *val = spec ? 1 : 0; + break; + case BPF_ENUMVAL_VALUE: + if (!spec) + return -EUCLEAN; /* request instruction poisoning */ + t = btf__type_by_id(spec->btf, spec->spec[0].type_id); + e = btf_enum(t) + spec->spec[0].idx; + *val = e->val; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +struct bpf_core_relo_res +{ + /* expected value in the instruction, unless validate == false */ + __u32 orig_val; + /* new value that needs to be patched up to */ + __u32 new_val; + /* relocation unsuccessful, poison instruction, but don't fail load */ + bool poison; + /* some relocations can't be validated against orig_val */ + bool validate; + /* for field byte offset relocations or the forms: + * *(T *)(rX + <off>) = rY + * rX = *(T *)(rY + <off>), + * we remember original and resolved field size to adjust direct + * memory loads of pointers and integers; this is necessary for 32-bit + * host kernel architectures, but also allows to automatically + * relocate fields that were resized from, e.g., u32 to u64, etc. + */ + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; +}; + +/* Calculate original and target relocation values, given local and target + * specs and relocation kind. These values are calculated for each candidate. + * If there are multiple candidates, resulting values should all be consistent + * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity. + * If instruction has to be poisoned, *poison will be set to true. + */ +static int bpf_core_calc_relo(const struct bpf_program *prog, + const struct bpf_core_relo *relo, + int relo_idx, + const struct bpf_core_spec *local_spec, + const struct bpf_core_spec *targ_spec, + struct bpf_core_relo_res *res) +{ + int err = -EOPNOTSUPP; + + res->orig_val = 0; + res->new_val = 0; + res->poison = false; + res->validate = true; + res->fail_memsz_adjust = false; + res->orig_sz = res->new_sz = 0; + res->orig_type_id = res->new_type_id = 0; + + if (core_relo_is_field_based(relo->kind)) { + err = bpf_core_calc_field_relo(prog, relo, local_spec, + &res->orig_val, &res->orig_sz, + &res->orig_type_id, &res->validate); + err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec, + &res->new_val, &res->new_sz, + &res->new_type_id, NULL); + if (err) + goto done; + /* Validate if it's safe to adjust load/store memory size. + * Adjustments are performed only if original and new memory + * sizes differ. + */ + res->fail_memsz_adjust = false; + if (res->orig_sz != res->new_sz) { + const struct btf_type *orig_t, *new_t; + + orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); + new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); + + /* There are two use cases in which it's safe to + * adjust load/store's mem size: + * - reading a 32-bit kernel pointer, while on BPF + * size pointers are always 64-bit; in this case + * it's safe to "downsize" instruction size due to + * pointer being treated as unsigned integer with + * zero-extended upper 32-bits; + * - reading unsigned integers, again due to + * zero-extension is preserving the value correctly. + * + * In all other cases it's incorrect to attempt to + * load/store field because read value will be + * incorrect, so we poison relocated instruction. + */ + if (btf_is_ptr(orig_t) && btf_is_ptr(new_t)) + goto done; + if (btf_is_int(orig_t) && btf_is_int(new_t) && + btf_int_encoding(orig_t) != BTF_INT_SIGNED && + btf_int_encoding(new_t) != BTF_INT_SIGNED) + goto done; + + /* mark as invalid mem size adjustment, but this will + * only be checked for LDX/STX/ST insns + */ + res->fail_memsz_adjust = true; + } + } else if (core_relo_is_type_based(relo->kind)) { + err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val); + err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val); + } else if (core_relo_is_enumval_based(relo->kind)) { + err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); + err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); + } + +done: + if (err == -EUCLEAN) { + /* EUCLEAN is used to signal instruction poisoning request */ + res->poison = true; + err = 0; + } else if (err == -EOPNOTSUPP) { + /* EOPNOTSUPP means unknown/unsupported relocation */ + pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", + prog->name, relo_idx, core_relo_kind_str(relo->kind), + relo->kind, relo->insn_off / 8); + } + + return err; +} + +/* + * Turn instruction for which CO_RE relocation failed into invalid one with + * distinct signature. + */ +static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx, + int insn_idx, struct bpf_insn *insn) +{ + pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n", + prog->name, relo_idx, insn_idx); + insn->code = BPF_JMP | BPF_CALL; + insn->dst_reg = 0; + insn->src_reg = 0; + insn->off = 0; + /* if this instruction is reachable (not a dead code), + * verifier will complain with the following message: + * invalid func unknown#195896080 + */ + insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ +} + +static int insn_bpf_size_to_bytes(struct bpf_insn *insn) +{ + switch (BPF_SIZE(insn->code)) { + case BPF_DW: return 8; + case BPF_W: return 4; + case BPF_H: return 2; + case BPF_B: return 1; + default: return -1; + } +} + +static int insn_bytes_to_bpf_size(__u32 sz) +{ + switch (sz) { + case 8: return BPF_DW; + case 4: return BPF_W; + case 2: return BPF_H; + case 1: return BPF_B; + default: return -1; + } +} + /* * Patch relocatable BPF instruction. - * Expected insn->imm value is provided for validation, as well as the new - * relocated value. * - * Currently three kinds of BPF instructions are supported: + * Patched value is determined by relocation kind and target specification. + * For existence relocations target spec will be NULL if field/type is not found. + * Expected insn->imm value is determined using relocation kind and local + * spec, and is checked before patching instruction. If actual insn->imm value + * is wrong, bail out with error. + * + * Currently supported classes of BPF instruction are: * 1. rX = <imm> (assignment with immediate operand); * 2. rX += <imm> (arithmetic operations with immediate operand); - * 3. *(rX) = <imm> (indirect memory assignment with immediate operand). - * - * If actual insn->imm value is wrong, bail out. + * 3. rX = <imm64> (load with 64-bit immediate value); + * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64}; + * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64}; + * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}. */ -static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off, - __u32 orig_off, __u32 new_off) +static int bpf_core_patch_insn(struct bpf_program *prog, + const struct bpf_core_relo *relo, + int relo_idx, + const struct bpf_core_relo_res *res) { + __u32 orig_val, new_val; struct bpf_insn *insn; int insn_idx; __u8 class; - if (insn_off % sizeof(struct bpf_insn)) + if (relo->insn_off % BPF_INSN_SZ) return -EINVAL; - insn_idx = insn_off / sizeof(struct bpf_insn); - + insn_idx = relo->insn_off / BPF_INSN_SZ; + /* adjust insn_idx from section frame of reference to the local + * program's frame of reference; (sub-)program code is not yet + * relocated, so it's enough to just subtract in-section offset + */ + insn_idx = insn_idx - prog->sec_insn_off; insn = &prog->insns[insn_idx]; class = BPF_CLASS(insn->code); - if (class == BPF_ALU || class == BPF_ALU64) { + if (res->poison) { +poison: + /* poison second part of ldimm64 to avoid confusing error from + * verifier about "unknown opcode 00" + */ + if (is_ldimm64(insn)) + bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1); + bpf_core_poison_insn(prog, relo_idx, insn_idx, insn); + return 0; + } + + orig_val = res->orig_val; + new_val = res->new_val; + + switch (class) { + case BPF_ALU: + case BPF_ALU64: if (BPF_SRC(insn->code) != BPF_K) return -EINVAL; - if (insn->imm != orig_off) + if (res->validate && insn->imm != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", + prog->name, relo_idx, + insn_idx, insn->imm, orig_val, new_val); return -EINVAL; - insn->imm = new_off; - pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n", - bpf_program__title(prog, false), - insn_idx, orig_off, new_off); - } else { - pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n", - bpf_program__title(prog, false), - insn_idx, insn->code, insn->src_reg, insn->dst_reg, - insn->off, insn->imm); + } + orig_val = insn->imm; + insn->imm = new_val; + pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n", + prog->name, relo_idx, insn_idx, + orig_val, new_val); + break; + case BPF_LDX: + case BPF_ST: + case BPF_STX: + if (res->validate && insn->off != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", + prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val); + return -EINVAL; + } + if (new_val > SHRT_MAX) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n", + prog->name, relo_idx, insn_idx, new_val); + return -ERANGE; + } + if (res->fail_memsz_adjust) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. " + "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n", + prog->name, relo_idx, insn_idx); + goto poison; + } + + orig_val = insn->off; + insn->off = new_val; + pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n", + prog->name, relo_idx, insn_idx, orig_val, new_val); + + if (res->new_sz != res->orig_sz) { + int insn_bytes_sz, insn_bpf_sz; + + insn_bytes_sz = insn_bpf_size_to_bytes(insn); + if (insn_bytes_sz != res->orig_sz) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n", + prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); + return -EINVAL; + } + + insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); + if (insn_bpf_sz < 0) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n", + prog->name, relo_idx, insn_idx, res->new_sz); + return -EINVAL; + } + + insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); + pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", + prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz); + } + break; + case BPF_LD: { + __u64 imm; + + if (!is_ldimm64(insn) || + insn[0].src_reg != 0 || insn[0].off != 0 || + insn_idx + 1 >= prog->insns_cnt || + insn[1].code != 0 || insn[1].dst_reg != 0 || + insn[1].src_reg != 0 || insn[1].off != 0) { + pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n", + prog->name, relo_idx, insn_idx); + return -EINVAL; + } + + imm = insn[0].imm + ((__u64)insn[1].imm << 32); + if (res->validate && imm != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", + prog->name, relo_idx, + insn_idx, (unsigned long long)imm, + orig_val, new_val); + return -EINVAL; + } + + insn[0].imm = new_val; + insn[1].imm = 0; /* currently only 32-bit values are supported */ + pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", + prog->name, relo_idx, insn_idx, + (unsigned long long)imm, new_val); + break; + } + default: + pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n", + prog->name, relo_idx, insn_idx, insn->code, + insn->src_reg, insn->dst_reg, insn->off, insn->imm); return -EINVAL; } + return 0; } -static struct btf *btf_load_raw(const char *path) -{ - struct btf *btf; - size_t read_cnt; - struct stat st; - void *data; - FILE *f; - - if (stat(path, &st)) - return ERR_PTR(-errno); - - data = malloc(st.st_size); - if (!data) - return ERR_PTR(-ENOMEM); - - f = fopen(path, "rb"); - if (!f) { - btf = ERR_PTR(-errno); - goto cleanup; - } - - read_cnt = fread(data, 1, st.st_size, f); - fclose(f); - if (read_cnt < st.st_size) { - btf = ERR_PTR(-EBADF); - goto cleanup; - } - - btf = btf__new(data, read_cnt); - -cleanup: - free(data); - return btf; -} - -/* - * Probe few well-known locations for vmlinux kernel image and try to load BTF - * data out of it to use for target BTF. - */ -static struct btf *bpf_core_find_kernel_btf(void) -{ - struct { - const char *path_fmt; - bool raw_btf; - } locations[] = { - /* try canonical vmlinux BTF through sysfs first */ - { "/sys/kernel/btf/vmlinux", true /* raw BTF */ }, - /* fall back to trying to find vmlinux ELF on disk otherwise */ - { "/boot/vmlinux-%1$s" }, - { "/lib/modules/%1$s/vmlinux-%1$s" }, - { "/lib/modules/%1$s/build/vmlinux" }, - { "/usr/lib/modules/%1$s/kernel/vmlinux" }, - { "/usr/lib/debug/boot/vmlinux-%1$s" }, - { "/usr/lib/debug/boot/vmlinux-%1$s.debug" }, - { "/usr/lib/debug/lib/modules/%1$s/vmlinux" }, - }; - char path[PATH_MAX + 1]; - struct utsname buf; - struct btf *btf; - int i; - - uname(&buf); - - for (i = 0; i < ARRAY_SIZE(locations); i++) { - snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release); - - if (access(path, R_OK)) - continue; - - if (locations[i].raw_btf) - btf = btf_load_raw(path); - else - btf = btf__parse_elf(path, NULL); - - pr_debug("loading kernel BTF '%s': %ld\n", - path, IS_ERR(btf) ? PTR_ERR(btf) : 0); - if (IS_ERR(btf)) - continue; - - return btf; - } - - pr_warning("failed to find valid kernel BTF\n"); - return ERR_PTR(-ESRCH); -} - /* Output spec definition in the format: * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>, * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b @@ -2914,28 +5591,48 @@ static struct btf *bpf_core_find_kernel_btf(void) static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) { const struct btf_type *t; + const struct btf_enum *e; const char *s; __u32 type_id; int i; - type_id = spec->spec[0].type_id; + type_id = spec->root_type_id; t = btf__type_by_id(spec->btf, type_id); s = btf__name_by_offset(spec->btf, t->name_off); - libbpf_print(level, "[%u] %s + ", type_id, s); - for (i = 0; i < spec->raw_len; i++) - libbpf_print(level, "%d%s", spec->raw_spec[i], - i == spec->raw_len - 1 ? " => " : ":"); + libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s); - libbpf_print(level, "%u @ &x", spec->offset); + if (core_relo_is_type_based(spec->relo_kind)) + return; - for (i = 0; i < spec->len; i++) { - if (spec->spec[i].name) - libbpf_print(level, ".%s", spec->spec[i].name); - else - libbpf_print(level, "[%u]", spec->spec[i].idx); + if (core_relo_is_enumval_based(spec->relo_kind)) { + t = skip_mods_and_typedefs(spec->btf, type_id, NULL); + e = btf_enum(t) + spec->raw_spec[0]; + s = btf__name_by_offset(spec->btf, e->name_off); + + libbpf_print(level, "::%s = %u", s, e->val); + return; } + if (core_relo_is_field_based(spec->relo_kind)) { + for (i = 0; i < spec->len; i++) { + if (spec->spec[i].name) + libbpf_print(level, ".%s", spec->spec[i].name); + else if (i > 0 || spec->spec[i].idx > 0) + libbpf_print(level, "[%u]", spec->spec[i].idx); + } + + libbpf_print(level, " ("); + for (i = 0; i < spec->raw_len; i++) + libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); + + if (spec->bit_offset % 8) + libbpf_print(level, " @ offset %u.%u)", + spec->bit_offset / 8, spec->bit_offset % 8); + else + libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); + return; + } } static size_t bpf_core_hash_fn(const void *key, void *ctx) @@ -2984,7 +5681,7 @@ static void *u32_as_hash_key(__u32 x) * types should be compatible (see bpf_core_fields_are_compat for details). * 3. It is supported and expected that there might be multiple flavors * matching the spec. As long as all the specs resolve to the same set of - * offsets across all candidates, there is not error. If there is any + * offsets across all candidates, there is no error. If there is any * ambiguity, CO-RE relocation will fail. This is necessary to accomodate * imprefection of BTF deduplication, which can cause slight duplication of * the same BTF type, if some directly or indirectly referenced (by @@ -2999,22 +5696,22 @@ static void *u32_as_hash_key(__u32 x) * CPU-wise compared to prebuilding a map from all local type names to * a list of candidate type names. It's also sped up by caching resolved * list of matching candidates per each local "root" type ID, that has at - * least one bpf_offset_reloc associated with it. This list is shared + * least one bpf_core_relo associated with it. This list is shared * between multiple relocations for the same type ID and is updated as some * of the candidates are pruned due to structural incompatibility. */ -static int bpf_core_reloc_offset(struct bpf_program *prog, - const struct bpf_offset_reloc *relo, - int relo_idx, - const struct btf *local_btf, - const struct btf *targ_btf, - struct hashmap *cand_cache) +static int bpf_core_apply_relo(struct bpf_program *prog, + const struct bpf_core_relo *relo, + int relo_idx, + const struct btf *local_btf, + const struct btf *targ_btf, + struct hashmap *cand_cache) { - const char *prog_name = bpf_program__title(prog, false); - struct bpf_core_spec local_spec, cand_spec, targ_spec; + struct bpf_core_spec local_spec, cand_spec, targ_spec = {}; const void *type_key = u32_as_hash_key(relo->type_id); - const struct btf_type *local_type, *cand_type; - const char *local_name, *cand_name; + struct bpf_core_relo_res cand_res, targ_res; + const struct btf_type *local_type; + const char *local_name; struct ids_vec *cand_ids; __u32 local_id, cand_id; const char *spec_str; @@ -3026,31 +5723,49 @@ static int bpf_core_reloc_offset(struct bpf_program *prog, return -EINVAL; local_name = btf__name_by_offset(local_btf, local_type->name_off); - if (str_is_empty(local_name)) + if (!local_name) return -EINVAL; spec_str = btf__name_by_offset(local_btf, relo->access_str_off); if (str_is_empty(spec_str)) return -EINVAL; - err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec); + err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); if (err) { - pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n", - prog_name, relo_idx, local_id, local_name, spec_str, - err); + pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", + prog->name, relo_idx, local_id, btf_kind_str(local_type), + str_is_empty(local_name) ? "<anon>" : local_name, + spec_str, err); return -EINVAL; } - pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx); + pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name, + relo_idx, core_relo_kind_str(relo->kind), relo->kind); bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); libbpf_print(LIBBPF_DEBUG, "\n"); + /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ + if (relo->kind == BPF_TYPE_ID_LOCAL) { + targ_res.validate = true; + targ_res.poison = false; + targ_res.orig_val = local_spec.root_type_id; + targ_res.new_val = local_spec.root_type_id; + goto patch_insn; + } + + /* libbpf doesn't support candidate search for anonymous types */ + if (str_is_empty(spec_str)) { + pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n", + prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); + return -EOPNOTSUPP; + } + if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) { cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf); if (IS_ERR(cand_ids)) { - pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld", - prog_name, relo_idx, local_id, local_name, - PTR_ERR(cand_ids)); + pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld", + prog->name, relo_idx, local_id, btf_kind_str(local_type), + local_name, PTR_ERR(cand_ids)); return PTR_ERR(cand_ids); } err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL); @@ -3062,50 +5777,91 @@ static int bpf_core_reloc_offset(struct bpf_program *prog, for (i = 0, j = 0; i < cand_ids->len; i++) { cand_id = cand_ids->data[i]; - cand_type = btf__type_by_id(targ_btf, cand_id); - cand_name = btf__name_by_offset(targ_btf, cand_type->name_off); - - err = bpf_core_spec_match(&local_spec, targ_btf, - cand_id, &cand_spec); - pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ", - prog_name, relo_idx, i, cand_name); - bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec); - libbpf_print(LIBBPF_DEBUG, ": %d\n", err); + err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec); if (err < 0) { - pr_warning("prog '%s': relo #%d: matching error: %d\n", - prog_name, relo_idx, err); + pr_warn("prog '%s': relo #%d: error matching candidate #%d ", + prog->name, relo_idx, i); + bpf_core_dump_spec(LIBBPF_WARN, &cand_spec); + libbpf_print(LIBBPF_WARN, ": %d\n", err); return err; } + + pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name, + relo_idx, err == 0 ? "non-matching" : "matching", i); + bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec); + libbpf_print(LIBBPF_DEBUG, "\n"); + if (err == 0) continue; + err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res); + if (err) + return err; + if (j == 0) { + targ_res = cand_res; targ_spec = cand_spec; - } else if (cand_spec.offset != targ_spec.offset) { - /* if there are many candidates, they should all - * resolve to the same offset + } else if (cand_spec.bit_offset != targ_spec.bit_offset) { + /* if there are many field relo candidates, they + * should all resolve to the same bit offset */ - pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n", - prog_name, relo_idx, cand_spec.offset, - targ_spec.offset); + pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", + prog->name, relo_idx, cand_spec.bit_offset, + targ_spec.bit_offset); + return -EINVAL; + } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) { + /* all candidates should result in the same relocation + * decision and value, otherwise it's dangerous to + * proceed due to ambiguity + */ + pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n", + prog->name, relo_idx, + cand_res.poison ? "failure" : "success", cand_res.new_val, + targ_res.poison ? "failure" : "success", targ_res.new_val); return -EINVAL; } - cand_ids->data[j++] = cand_spec.spec[0].type_id; + cand_ids->data[j++] = cand_spec.root_type_id; } - cand_ids->len = j; - if (cand_ids->len == 0) { - pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n", - prog_name, relo_idx, local_id, local_name, spec_str); - return -ESRCH; + /* + * For BPF_FIELD_EXISTS relo or when used BPF program has field + * existence checks or kernel version/config checks, it's expected + * that we might not find any candidates. In this case, if field + * wasn't found in any candidate, the list of candidates shouldn't + * change at all, we'll just handle relocating appropriately, + * depending on relo's kind. + */ + if (j > 0) + cand_ids->len = j; + + /* + * If no candidates were found, it might be both a programmer error, + * as well as expected case, depending whether instruction w/ + * relocation is guarded in some way that makes it unreachable (dead + * code) if relocation can't be resolved. This is handled in + * bpf_core_patch_insn() uniformly by replacing that instruction with + * BPF helper call insn (using invalid helper ID). If that instruction + * is indeed unreachable, then it will be ignored and eliminated by + * verifier. If it was an error, then verifier will complain and point + * to a specific instruction number in its log. + */ + if (j == 0) { + pr_debug("prog '%s': relo #%d: no matching targets found\n", + prog->name, relo_idx); + + /* calculate single target relo result explicitly */ + err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res); + if (err) + return err; } - err = bpf_core_reloc_insn(prog, relo->insn_off, - local_spec.offset, targ_spec.offset); +patch_insn: + /* bpf_core_patch_insn() should know how to handle missing targ_spec */ + err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res); if (err) { - pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n", - prog_name, relo_idx, relo->insn_off, err); + pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n", + prog->name, relo_idx, relo->insn_off, err); return -EINVAL; } @@ -3113,25 +5869,27 @@ static int bpf_core_reloc_offset(struct bpf_program *prog, } static int -bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path) +bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) { const struct btf_ext_info_sec *sec; - const struct bpf_offset_reloc *rec; + const struct bpf_core_relo *rec; const struct btf_ext_info *seg; struct hashmap_entry *entry; struct hashmap *cand_cache = NULL; struct bpf_program *prog; struct btf *targ_btf; const char *sec_name; - int i, err = 0; + int i, err = 0, insn_idx, sec_idx; + + if (obj->btf_ext->core_relo_info.len == 0) + return 0; if (targ_btf_path) - targ_btf = btf__parse_elf(targ_btf_path, NULL); + targ_btf = btf__parse(targ_btf_path, NULL); else - targ_btf = bpf_core_find_kernel_btf(); - if (IS_ERR(targ_btf)) { - pr_warning("failed to get target BTF: %ld\n", - PTR_ERR(targ_btf)); + targ_btf = obj->btf_vmlinux; + if (IS_ERR_OR_NULL(targ_btf)) { + pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf)); return PTR_ERR(targ_btf); } @@ -3141,37 +5899,63 @@ bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path) goto out; } - seg = &obj->btf_ext->offset_reloc_info; + seg = &obj->btf_ext->core_relo_info; for_each_btf_ext_sec(seg, sec) { sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); if (str_is_empty(sec_name)) { err = -EINVAL; goto out; } - prog = bpf_object__find_program_by_title(obj, sec_name); - if (!prog) { - pr_warning("failed to find program '%s' for CO-RE offset relocation\n", - sec_name); - err = -EINVAL; - goto out; + /* bpf_object's ELF is gone by now so it's not easy to find + * section index by section name, but we can find *any* + * bpf_program within desired section name and use it's + * prog->sec_idx to do a proper search by section index and + * instruction offset + */ + prog = NULL; + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + if (strcmp(prog->sec_name, sec_name) == 0) + break; } + if (!prog) { + pr_warn("sec '%s': failed to find a BPF program\n", sec_name); + return -ENOENT; + } + sec_idx = prog->sec_idx; - pr_debug("prog '%s': performing %d CO-RE offset relocs\n", + pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info); for_each_btf_ext_rec(seg, sec, i, rec) { - err = bpf_core_reloc_offset(prog, rec, i, obj->btf, - targ_btf, cand_cache); + insn_idx = rec->insn_off / BPF_INSN_SZ; + prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); + if (!prog) { + pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n", + sec_name, insn_idx, i); + err = -EINVAL; + goto out; + } + /* no need to apply CO-RE relocation if the program is + * not going to be loaded + */ + if (!prog->load) + continue; + + err = bpf_core_apply_relo(prog, rec, i, obj->btf, + targ_btf, cand_cache); if (err) { - pr_warning("prog '%s': relo #%d: failed to relocate: %d\n", - sec_name, i, err); + pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", + prog->name, i, err); goto out; } } } out: - btf__free(targ_btf); + /* obj->btf_vmlinux is freed at the end of object load phase */ + if (targ_btf != obj->btf_vmlinux) + btf__free(targ_btf); if (!IS_ERR_OR_NULL(cand_cache)) { hashmap__for_each_entry(cand_cache, entry, i) { bpf_core_free_cands(entry->value); @@ -3181,121 +5965,450 @@ out: return err; } +/* Relocate data references within program code: + * - map references; + * - global variable references; + * - extern references. + */ static int -bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) +bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) { - int err = 0; + int i; - if (obj->btf_ext->offset_reloc_info.len) - err = bpf_core_reloc_offsets(obj, targ_btf_path); + for (i = 0; i < prog->nr_reloc; i++) { + struct reloc_desc *relo = &prog->reloc_desc[i]; + struct bpf_insn *insn = &prog->insns[relo->insn_idx]; + struct extern_desc *ext; - return err; -} - -static int -bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, - struct reloc_desc *relo) -{ - struct bpf_insn *insn, *new_insn; - struct bpf_program *text; - size_t new_cnt; - int err; - - if (relo->type != RELO_CALL) - return -LIBBPF_ERRNO__RELOC; - - if (prog->idx == obj->efile.text_shndx) { - pr_warning("relo in .text insn %d into off %d\n", - relo->insn_idx, relo->text_off); - return -LIBBPF_ERRNO__RELOC; + switch (relo->type) { + case RELO_LD64: + insn[0].src_reg = BPF_PSEUDO_MAP_FD; + insn[0].imm = obj->maps[relo->map_idx].fd; + relo->processed = true; + break; + case RELO_DATA: + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[1].imm = insn[0].imm + relo->sym_off; + insn[0].imm = obj->maps[relo->map_idx].fd; + relo->processed = true; + break; + case RELO_EXTERN: + ext = &obj->externs[relo->sym_off]; + if (ext->type == EXT_KCFG) { + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; + insn[1].imm = ext->kcfg.data_off; + } else /* EXT_KSYM */ { + if (ext->ksym.type_id) { /* typed ksyms */ + insn[0].src_reg = BPF_PSEUDO_BTF_ID; + insn[0].imm = ext->ksym.vmlinux_btf_id; + } else { /* typeless ksyms */ + insn[0].imm = (__u32)ext->ksym.addr; + insn[1].imm = ext->ksym.addr >> 32; + } + } + relo->processed = true; + break; + case RELO_SUBPROG_ADDR: + insn[0].src_reg = BPF_PSEUDO_FUNC; + /* will be handled as a follow up pass */ + break; + case RELO_CALL: + /* will be handled as a follow up pass */ + break; + default: + pr_warn("prog '%s': relo #%d: bad relo type %d\n", + prog->name, i, relo->type); + return -EINVAL; + } } - if (prog->main_prog_cnt == 0) { - text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); - if (!text) { - pr_warning("no .text section found yet relo into text exist\n"); - return -LIBBPF_ERRNO__RELOC; - } - new_cnt = prog->insns_cnt + text->insns_cnt; - new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn)); - if (!new_insn) { - pr_warning("oom in prog realloc\n"); - return -ENOMEM; - } - prog->insns = new_insn; - - if (obj->btf_ext) { - err = bpf_program_reloc_btf_ext(prog, obj, - text->section_name, - prog->insns_cnt); - if (err) - return err; - } - - memcpy(new_insn + prog->insns_cnt, text->insns, - text->insns_cnt * sizeof(*insn)); - prog->main_prog_cnt = prog->insns_cnt; - prog->insns_cnt = new_cnt; - pr_debug("added %zd insn from %s to prog %s\n", - text->insns_cnt, text->section_name, - prog->section_name); - } - insn = &prog->insns[relo->insn_idx]; - insn->imm += prog->main_prog_cnt - relo->insn_idx; return 0; } -static int -bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) +static int adjust_prog_btf_ext_info(const struct bpf_object *obj, + const struct bpf_program *prog, + const struct btf_ext_info *ext_info, + void **prog_info, __u32 *prog_rec_cnt, + __u32 *prog_rec_sz) { - int i, err; + void *copy_start = NULL, *copy_end = NULL; + void *rec, *rec_end, *new_prog_info; + const struct btf_ext_info_sec *sec; + size_t old_sz, new_sz; + const char *sec_name; + int i, off_adj; - if (!prog) + for_each_btf_ext_sec(ext_info, sec) { + sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); + if (!sec_name) + return -EINVAL; + if (strcmp(sec_name, prog->sec_name) != 0) + continue; + + for_each_btf_ext_rec(ext_info, sec, i, rec) { + __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; + + if (insn_off < prog->sec_insn_off) + continue; + if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) + break; + + if (!copy_start) + copy_start = rec; + copy_end = rec + ext_info->rec_size; + } + + if (!copy_start) + return -ENOENT; + + /* append func/line info of a given (sub-)program to the main + * program func/line info + */ + old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; + new_sz = old_sz + (copy_end - copy_start); + new_prog_info = realloc(*prog_info, new_sz); + if (!new_prog_info) + return -ENOMEM; + *prog_info = new_prog_info; + *prog_rec_cnt = new_sz / ext_info->rec_size; + memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); + + /* Kernel instruction offsets are in units of 8-byte + * instructions, while .BTF.ext instruction offsets generated + * by Clang are in units of bytes. So convert Clang offsets + * into kernel offsets and adjust offset according to program + * relocated position. + */ + off_adj = prog->sub_insn_off - prog->sec_insn_off; + rec = new_prog_info + old_sz; + rec_end = new_prog_info + new_sz; + for (; rec < rec_end; rec += ext_info->rec_size) { + __u32 *insn_off = rec; + + *insn_off = *insn_off / BPF_INSN_SZ + off_adj; + } + *prog_rec_sz = ext_info->rec_size; return 0; - - if (obj->btf_ext) { - err = bpf_program_reloc_btf_ext(prog, obj, - prog->section_name, 0); - if (err) - return err; } - if (!prog->reloc_desc) + return -ENOENT; +} + +static int +reloc_prog_func_and_line_info(const struct bpf_object *obj, + struct bpf_program *main_prog, + const struct bpf_program *prog) +{ + int err; + + /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't + * supprot func/line info + */ + if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC)) return 0; - for (i = 0; i < prog->nr_reloc; i++) { - if (prog->reloc_desc[i].type == RELO_LD64 || - prog->reloc_desc[i].type == RELO_DATA) { - bool relo_data = prog->reloc_desc[i].type == RELO_DATA; - struct bpf_insn *insns = prog->insns; - int insn_idx, map_idx; + /* only attempt func info relocation if main program's func_info + * relocation was successful + */ + if (main_prog != prog && !main_prog->func_info) + goto line_info; - insn_idx = prog->reloc_desc[i].insn_idx; - map_idx = prog->reloc_desc[i].map_idx; + err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, + &main_prog->func_info, + &main_prog->func_info_cnt, + &main_prog->func_info_rec_size); + if (err) { + if (err != -ENOENT) { + pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", + prog->name, err); + return err; + } + if (main_prog->func_info) { + /* + * Some info has already been found but has problem + * in the last btf_ext reloc. Must have to error out. + */ + pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); + return err; + } + /* Have problem loading the very first info. Ignore the rest. */ + pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", + prog->name); + } - if (insn_idx + 1 >= (int)prog->insns_cnt) { - pr_warning("relocation out of range: '%s'\n", - prog->section_name); - return -LIBBPF_ERRNO__RELOC; +line_info: + /* don't relocate line info if main program's relocation failed */ + if (main_prog != prog && !main_prog->line_info) + return 0; + + err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, + &main_prog->line_info, + &main_prog->line_info_cnt, + &main_prog->line_info_rec_size); + if (err) { + if (err != -ENOENT) { + pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", + prog->name, err); + return err; + } + if (main_prog->line_info) { + /* + * Some info has already been found but has problem + * in the last btf_ext reloc. Must have to error out. + */ + pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); + return err; + } + /* Have problem loading the very first info. Ignore the rest. */ + pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", + prog->name); + } + return 0; +} + +static int cmp_relo_by_insn_idx(const void *key, const void *elem) +{ + size_t insn_idx = *(const size_t *)key; + const struct reloc_desc *relo = elem; + + if (insn_idx == relo->insn_idx) + return 0; + return insn_idx < relo->insn_idx ? -1 : 1; +} + +static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) +{ + return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, + sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); +} + +static int +bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, + struct bpf_program *prog) +{ + size_t sub_insn_idx, insn_idx, new_cnt; + struct bpf_program *subprog; + struct bpf_insn *insns, *insn; + struct reloc_desc *relo; + int err; + + err = reloc_prog_func_and_line_info(obj, main_prog, prog); + if (err) + return err; + + for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { + insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; + if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) + continue; + + relo = find_prog_insn_relo(prog, insn_idx); + if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { + pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", + prog->name, insn_idx, relo->type); + return -LIBBPF_ERRNO__RELOC; + } + if (relo) { + /* sub-program instruction index is a combination of + * an offset of a symbol pointed to by relocation and + * call instruction's imm field; for global functions, + * call always has imm = -1, but for static functions + * relocation is against STT_SECTION and insn->imm + * points to a start of a static function + * + * for subprog addr relocation, the relo->sym_off + insn->imm is + * the byte offset in the corresponding section. + */ + if (relo->type == RELO_CALL) + sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; + else + sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; + } else if (insn_is_pseudo_func(insn)) { + /* + * RELO_SUBPROG_ADDR relo is always emitted even if both + * functions are in the same section, so it shouldn't reach here. + */ + pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", + prog->name, insn_idx); + return -LIBBPF_ERRNO__RELOC; + } else { + /* if subprogram call is to a static function within + * the same ELF section, there won't be any relocation + * emitted, but it also means there is no additional + * offset necessary, insns->imm is relative to + * instruction's original position within the section + */ + sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; + } + + /* we enforce that sub-programs should be in .text section */ + subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); + if (!subprog) { + pr_warn("prog '%s': no .text section found yet sub-program call exists\n", + prog->name); + return -LIBBPF_ERRNO__RELOC; + } + + /* if it's the first call instruction calling into this + * subprogram (meaning this subprog hasn't been processed + * yet) within the context of current main program: + * - append it at the end of main program's instructions blog; + * - process is recursively, while current program is put on hold; + * - if that subprogram calls some other not yet processes + * subprogram, same thing will happen recursively until + * there are no more unprocesses subprograms left to append + * and relocate. + */ + if (subprog->sub_insn_off == 0) { + subprog->sub_insn_off = main_prog->insns_cnt; + + new_cnt = main_prog->insns_cnt + subprog->insns_cnt; + insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); + if (!insns) { + pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); + return -ENOMEM; } + main_prog->insns = insns; + main_prog->insns_cnt = new_cnt; - if (!relo_data) { - insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; - } else { - insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE; - insns[insn_idx + 1].imm = insns[insn_idx].imm; - } - insns[insn_idx].imm = obj->maps[map_idx].fd; - } else if (prog->reloc_desc[i].type == RELO_CALL) { - err = bpf_program__reloc_text(prog, obj, - &prog->reloc_desc[i]); + memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, + subprog->insns_cnt * sizeof(*insns)); + + pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", + main_prog->name, subprog->insns_cnt, subprog->name); + + err = bpf_object__reloc_code(obj, main_prog, subprog); if (err) return err; } + + /* main_prog->insns memory could have been re-allocated, so + * calculate pointer again + */ + insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; + /* calculate correct instruction position within current main + * prog; each main prog can have a different set of + * subprograms appended (potentially in different order as + * well), so position of any subprog can be different for + * different main programs */ + insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; + + if (relo) + relo->processed = true; + + pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", + prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); } - zfree(&prog->reloc_desc); - prog->nr_reloc = 0; + return 0; +} + +/* + * Relocate sub-program calls. + * + * Algorithm operates as follows. Each entry-point BPF program (referred to as + * main prog) is processed separately. For each subprog (non-entry functions, + * that can be called from either entry progs or other subprogs) gets their + * sub_insn_off reset to zero. This serves as indicator that this subprogram + * hasn't been yet appended and relocated within current main prog. Once its + * relocated, sub_insn_off will point at the position within current main prog + * where given subprog was appended. This will further be used to relocate all + * the call instructions jumping into this subprog. + * + * We start with main program and process all call instructions. If the call + * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off + * is zero), subprog instructions are appended at the end of main program's + * instruction array. Then main program is "put on hold" while we recursively + * process newly appended subprogram. If that subprogram calls into another + * subprogram that hasn't been appended, new subprogram is appended again to + * the *main* prog's instructions (subprog's instructions are always left + * untouched, as they need to be in unmodified state for subsequent main progs + * and subprog instructions are always sent only as part of a main prog) and + * the process continues recursively. Once all the subprogs called from a main + * prog or any of its subprogs are appended (and relocated), all their + * positions within finalized instructions array are known, so it's easy to + * rewrite call instructions with correct relative offsets, corresponding to + * desired target subprog. + * + * Its important to realize that some subprogs might not be called from some + * main prog and any of its called/used subprogs. Those will keep their + * subprog->sub_insn_off as zero at all times and won't be appended to current + * main prog and won't be relocated within the context of current main prog. + * They might still be used from other main progs later. + * + * Visually this process can be shown as below. Suppose we have two main + * programs mainA and mainB and BPF object contains three subprogs: subA, + * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and + * subC both call subB: + * + * +--------+ +-------+ + * | v v | + * +--+---+ +--+-+-+ +---+--+ + * | subA | | subB | | subC | + * +--+---+ +------+ +---+--+ + * ^ ^ + * | | + * +---+-------+ +------+----+ + * | mainA | | mainB | + * +-----------+ +-----------+ + * + * We'll start relocating mainA, will find subA, append it and start + * processing sub A recursively: + * + * +-----------+------+ + * | mainA | subA | + * +-----------+------+ + * + * At this point we notice that subB is used from subA, so we append it and + * relocate (there are no further subcalls from subB): + * + * +-----------+------+------+ + * | mainA | subA | subB | + * +-----------+------+------+ + * + * At this point, we relocate subA calls, then go one level up and finish with + * relocatin mainA calls. mainA is done. + * + * For mainB process is similar but results in different order. We start with + * mainB and skip subA and subB, as mainB never calls them (at least + * directly), but we see subC is needed, so we append and start processing it: + * + * +-----------+------+ + * | mainB | subC | + * +-----------+------+ + * Now we see subC needs subB, so we go back to it, append and relocate it: + * + * +-----------+------+------+ + * | mainB | subC | subB | + * +-----------+------+------+ + * + * At this point we unwind recursion, relocate calls in subC, then in mainB. + */ +static int +bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) +{ + struct bpf_program *subprog; + int i, j, err; + + /* mark all subprogs as not relocated (yet) within the context of + * current main program + */ + for (i = 0; i < obj->nr_programs; i++) { + subprog = &obj->programs[i]; + if (!prog_is_subprog(obj, subprog)) + continue; + + subprog->sub_insn_off = 0; + for (j = 0; j < subprog->nr_reloc; j++) + if (subprog->reloc_desc[j].type == RELO_CALL) + subprog->reloc_desc[j].processed = false; + } + + err = bpf_object__reloc_code(obj, prog, prog); + if (err) + return err; + + return 0; } @@ -3309,54 +6422,261 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) if (obj->btf_ext) { err = bpf_object__relocate_core(obj, targ_btf_path); if (err) { - pr_warning("failed to perform CO-RE relocations: %d\n", - err); + pr_warn("failed to perform CO-RE relocations: %d\n", + err); return err; } } + /* relocate data references first for all programs and sub-programs, + * as they don't change relative to code locations, so subsequent + * subprogram processing won't need to re-calculate any of them + */ for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; - - err = bpf_program__relocate(prog, obj); + err = bpf_object__relocate_data(obj, prog); if (err) { - pr_warning("failed to relocate '%s'\n", - prog->section_name); + pr_warn("prog '%s': failed to relocate data references: %d\n", + prog->name, err); return err; } } + /* now relocate subprogram calls and append used subprograms to main + * programs; each copy of subprogram code needs to be relocated + * differently for each main program, because its code location might + * have changed + */ + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + /* sub-program's sub-calls are relocated within the context of + * its main program only + */ + if (prog_is_subprog(obj, prog)) + continue; + + err = bpf_object__relocate_calls(obj, prog); + if (err) { + pr_warn("prog '%s': failed to relocate calls: %d\n", + prog->name, err); + return err; + } + } + /* free up relocation descriptors */ + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + zfree(&prog->reloc_desc); + prog->nr_reloc = 0; + } return 0; } -static int bpf_object__collect_reloc(struct bpf_object *obj) +static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, + GElf_Shdr *shdr, Elf_Data *data); + +static int bpf_object__collect_map_relos(struct bpf_object *obj, + GElf_Shdr *shdr, Elf_Data *data) { - int i, err; + const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); + int i, j, nrels, new_sz; + const struct btf_var_secinfo *vi = NULL; + const struct btf_type *sec, *var, *def; + struct bpf_map *map = NULL, *targ_map; + const struct btf_member *member; + const char *name, *mname; + Elf_Data *symbols; + unsigned int moff; + GElf_Sym sym; + GElf_Rel rel; + void *tmp; - if (!obj_elf_valid(obj)) { - pr_warning("Internal error: elf object is closed\n"); - return -LIBBPF_ERRNO__INTERNAL; - } + if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) + return -EINVAL; + sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); + if (!sec) + return -EINVAL; - for (i = 0; i < obj->efile.nr_reloc; i++) { - GElf_Shdr *shdr = &obj->efile.reloc[i].shdr; - Elf_Data *data = obj->efile.reloc[i].data; - int idx = shdr->sh_info; - struct bpf_program *prog; - - if (shdr->sh_type != SHT_REL) { - pr_warning("internal error at %d\n", __LINE__); - return -LIBBPF_ERRNO__INTERNAL; + symbols = obj->efile.symbols; + nrels = shdr->sh_size / shdr->sh_entsize; + for (i = 0; i < nrels; i++) { + if (!gelf_getrel(data, i, &rel)) { + pr_warn(".maps relo #%d: failed to get ELF relo\n", i); + return -LIBBPF_ERRNO__FORMAT; } - - prog = bpf_object__find_prog_by_idx(obj, idx); - if (!prog) { - pr_warning("relocation failed: no section(%d)\n", idx); + if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { + pr_warn(".maps relo #%d: symbol %zx not found\n", + i, (size_t)GELF_R_SYM(rel.r_info)); + return -LIBBPF_ERRNO__FORMAT; + } + name = elf_sym_str(obj, sym.st_name) ?: "<?>"; + if (sym.st_shndx != obj->efile.btf_maps_shndx) { + pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", + i, name); return -LIBBPF_ERRNO__RELOC; } - err = bpf_program__collect_reloc(prog, shdr, data, obj); + pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n", + i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value, + (size_t)rel.r_offset, sym.st_name, name); + + for (j = 0; j < obj->nr_maps; j++) { + map = &obj->maps[j]; + if (map->sec_idx != obj->efile.btf_maps_shndx) + continue; + + vi = btf_var_secinfos(sec) + map->btf_var_idx; + if (vi->offset <= rel.r_offset && + rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) + break; + } + if (j == obj->nr_maps) { + pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n", + i, name, (size_t)rel.r_offset); + return -EINVAL; + } + + if (!bpf_map_type__is_map_in_map(map->def.type)) + return -EINVAL; + if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && + map->def.key_size != sizeof(int)) { + pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", + i, map->name, sizeof(int)); + return -EINVAL; + } + + targ_map = bpf_object__find_map_by_name(obj, name); + if (!targ_map) + return -ESRCH; + + var = btf__type_by_id(obj->btf, vi->type); + def = skip_mods_and_typedefs(obj->btf, var->type, NULL); + if (btf_vlen(def) == 0) + return -EINVAL; + member = btf_members(def) + btf_vlen(def) - 1; + mname = btf__name_by_offset(obj->btf, member->name_off); + if (strcmp(mname, "values")) + return -EINVAL; + + moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; + if (rel.r_offset - vi->offset < moff) + return -EINVAL; + + moff = rel.r_offset - vi->offset - moff; + /* here we use BPF pointer size, which is always 64 bit, as we + * are parsing ELF that was built for BPF target + */ + if (moff % bpf_ptr_sz) + return -EINVAL; + moff /= bpf_ptr_sz; + if (moff >= map->init_slots_sz) { + new_sz = moff + 1; + tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); + if (!tmp) + return -ENOMEM; + map->init_slots = tmp; + memset(map->init_slots + map->init_slots_sz, 0, + (new_sz - map->init_slots_sz) * host_ptr_sz); + map->init_slots_sz = new_sz; + } + map->init_slots[moff] = targ_map; + + pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n", + i, map->name, moff, name); + } + + return 0; +} + +static int cmp_relocs(const void *_a, const void *_b) +{ + const struct reloc_desc *a = _a; + const struct reloc_desc *b = _b; + + if (a->insn_idx != b->insn_idx) + return a->insn_idx < b->insn_idx ? -1 : 1; + + /* no two relocations should have the same insn_idx, but ... */ + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + return 0; +} + +static int bpf_object__collect_relos(struct bpf_object *obj) +{ + int i, err; + + for (i = 0; i < obj->efile.nr_reloc_sects; i++) { + GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr; + Elf_Data *data = obj->efile.reloc_sects[i].data; + int idx = shdr->sh_info; + + if (shdr->sh_type != SHT_REL) { + pr_warn("internal error at %d\n", __LINE__); + return -LIBBPF_ERRNO__INTERNAL; + } + + if (idx == obj->efile.st_ops_shndx) + err = bpf_object__collect_st_ops_relos(obj, shdr, data); + else if (idx == obj->efile.btf_maps_shndx) + err = bpf_object__collect_map_relos(obj, shdr, data); + else + err = bpf_object__collect_prog_relos(obj, shdr, data); if (err) return err; } + + for (i = 0; i < obj->nr_programs; i++) { + struct bpf_program *p = &obj->programs[i]; + + if (!p->nr_reloc) + continue; + + qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); + } + return 0; +} + +static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) +{ + if (BPF_CLASS(insn->code) == BPF_JMP && + BPF_OP(insn->code) == BPF_CALL && + BPF_SRC(insn->code) == BPF_K && + insn->src_reg == 0 && + insn->dst_reg == 0) { + *func_id = insn->imm; + return true; + } + return false; +} + +static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog) +{ + struct bpf_insn *insn = prog->insns; + enum bpf_func_id func_id; + int i; + + for (i = 0; i < prog->insns_cnt; i++, insn++) { + if (!insn_is_helper_call(insn, &func_id)) + continue; + + /* on kernels that don't yet support + * bpf_probe_read_{kernel,user}[_str] helpers, fall back + * to bpf_probe_read() which works well for old kernels + */ + switch (func_id) { + case BPF_FUNC_probe_read_kernel: + case BPF_FUNC_probe_read_user: + if (!kernel_supports(FEAT_PROBE_READ_KERN)) + insn->imm = BPF_FUNC_probe_read; + break; + case BPF_FUNC_probe_read_kernel_str: + case BPF_FUNC_probe_read_user_str: + if (!kernel_supports(FEAT_PROBE_READ_KERN)) + insn->imm = BPF_FUNC_probe_read_str; + break; + default: + break; + } + } return 0; } @@ -3366,8 +6686,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, { struct bpf_load_program_attr load_attr; char *cp, errmsg[STRERR_BUFSIZE]; - int log_buf_size = BPF_LOG_BUF_SIZE; - char *log_buf; + size_t log_buf_size = 0; + char *log_buf = NULL; int btf_fd, ret; if (!insns || !insns_cnt) @@ -3375,79 +6695,108 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); load_attr.prog_type = prog->type; - load_attr.expected_attach_type = prog->expected_attach_type; - if (prog->caps->name) + /* old kernels might not support specifying expected_attach_type */ + if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && + prog->sec_def->is_exp_attach_type_optional) + load_attr.expected_attach_type = 0; + else + load_attr.expected_attach_type = prog->expected_attach_type; + if (kernel_supports(FEAT_PROG_NAME)) load_attr.name = prog->name; load_attr.insns = insns; load_attr.insns_cnt = insns_cnt; load_attr.license = license; - load_attr.kern_version = kern_version; - load_attr.prog_ifindex = prog->prog_ifindex; - /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ - if (prog->obj->btf_ext) - btf_fd = bpf_object__btf_fd(prog->obj); - else - btf_fd = -1; - load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; - load_attr.func_info = prog->func_info; - load_attr.func_info_rec_size = prog->func_info_rec_size; - load_attr.func_info_cnt = prog->func_info_cnt; - load_attr.line_info = prog->line_info; - load_attr.line_info_rec_size = prog->line_info_rec_size; - load_attr.line_info_cnt = prog->line_info_cnt; + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || + prog->type == BPF_PROG_TYPE_LSM) { + load_attr.attach_btf_id = prog->attach_btf_id; + } else if (prog->type == BPF_PROG_TYPE_TRACING || + prog->type == BPF_PROG_TYPE_EXT) { + load_attr.attach_prog_fd = prog->attach_prog_fd; + load_attr.attach_btf_id = prog->attach_btf_id; + } else { + load_attr.kern_version = kern_version; + load_attr.prog_ifindex = prog->prog_ifindex; + } + /* specify func_info/line_info only if kernel supports them */ + btf_fd = bpf_object__btf_fd(prog->obj); + if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) { + load_attr.prog_btf_fd = btf_fd; + load_attr.func_info = prog->func_info; + load_attr.func_info_rec_size = prog->func_info_rec_size; + load_attr.func_info_cnt = prog->func_info_cnt; + load_attr.line_info = prog->line_info; + load_attr.line_info_rec_size = prog->line_info_rec_size; + load_attr.line_info_cnt = prog->line_info_cnt; + } load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; retry_load: - log_buf = malloc(log_buf_size); - if (!log_buf) - pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); + if (log_buf_size) { + log_buf = malloc(log_buf_size); + if (!log_buf) + return -ENOMEM; + + *log_buf = 0; + } ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size); if (ret >= 0) { - if (load_attr.log_level) + if (log_buf && load_attr.log_level) pr_debug("verifier log:\n%s", log_buf); + + if (prog->obj->rodata_map_idx >= 0 && + kernel_supports(FEAT_PROG_BIND_MAP)) { + struct bpf_map *rodata_map = + &prog->obj->maps[prog->obj->rodata_map_idx]; + + if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) { + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warn("prog '%s': failed to bind .rodata map: %s\n", + prog->name, cp); + /* Don't fail hard if can't bind rodata. */ + } + } + *pfd = ret; ret = 0; goto out; } - if (errno == ENOSPC) { - log_buf_size <<= 1; + if (!log_buf || errno == ENOSPC) { + log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, + log_buf_size << 1); + free(log_buf); goto retry_load; } - ret = -LIBBPF_ERRNO__LOAD; + ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warning("load bpf program failed: %s\n", cp); + pr_warn("load bpf program failed: %s\n", cp); + pr_perm_msg(ret); if (log_buf && log_buf[0] != '\0') { ret = -LIBBPF_ERRNO__VERIFY; - pr_warning("-- BEGIN DUMP LOG ---\n"); - pr_warning("\n%s\n", log_buf); - pr_warning("-- END LOG --\n"); + pr_warn("-- BEGIN DUMP LOG ---\n"); + pr_warn("\n%s\n", log_buf); + pr_warn("-- END LOG --\n"); } else if (load_attr.insns_cnt >= BPF_MAXINSNS) { - pr_warning("Program too large (%zu insns), at most %d insns\n", - load_attr.insns_cnt, BPF_MAXINSNS); + pr_warn("Program too large (%zu insns), at most %d insns\n", + load_attr.insns_cnt, BPF_MAXINSNS); ret = -LIBBPF_ERRNO__PROG2BIG; - } else { + } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { /* Wrong program type? */ - if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { - int fd; + int fd; - load_attr.prog_type = BPF_PROG_TYPE_KPROBE; - load_attr.expected_attach_type = 0; - fd = bpf_load_program_xattr(&load_attr, NULL, 0); - if (fd >= 0) { - close(fd); - ret = -LIBBPF_ERRNO__PROGTYPE; - goto out; - } + load_attr.prog_type = BPF_PROG_TYPE_KPROBE; + load_attr.expected_attach_type = 0; + fd = bpf_load_program_xattr(&load_attr, NULL, 0); + if (fd >= 0) { + close(fd); + ret = -LIBBPF_ERRNO__PROGTYPE; + goto out; } - - if (log_buf) - ret = -LIBBPF_ERRNO__KVER; } out: @@ -3455,22 +6804,36 @@ out: return ret; } -int -bpf_program__load(struct bpf_program *prog, - char *license, __u32 kern_version) +static int libbpf_find_attach_btf_id(struct bpf_program *prog); + +int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) { - int err = 0, fd, i; + int err = 0, fd, i, btf_id; + + if (prog->obj->loaded) { + pr_warn("prog '%s': can't load after object was loaded\n", prog->name); + return -EINVAL; + } + + if ((prog->type == BPF_PROG_TYPE_TRACING || + prog->type == BPF_PROG_TYPE_LSM || + prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { + btf_id = libbpf_find_attach_btf_id(prog); + if (btf_id <= 0) + return btf_id; + prog->attach_btf_id = btf_id; + } if (prog->instances.nr < 0 || !prog->instances.fds) { if (prog->preprocessor) { - pr_warning("Internal error: can't load program '%s'\n", - prog->section_name); + pr_warn("Internal error: can't load program '%s'\n", + prog->name); return -LIBBPF_ERRNO__INTERNAL; } prog->instances.fds = malloc(sizeof(int)); if (!prog->instances.fds) { - pr_warning("Not enough memory for BPF fds\n"); + pr_warn("Not enough memory for BPF fds\n"); return -ENOMEM; } prog->instances.nr = 1; @@ -3479,11 +6842,11 @@ bpf_program__load(struct bpf_program *prog, if (!prog->preprocessor) { if (prog->instances.nr != 1) { - pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", - prog->section_name, prog->instances.nr); + pr_warn("prog '%s': inconsistent nr(%d) != 1\n", + prog->name, prog->instances.nr); } err = load_program(prog, prog->insns, prog->insns_cnt, - license, kern_version, &fd); + license, kern_ver, &fd); if (!err) prog->instances.fds[0] = fd; goto out; @@ -3497,14 +6860,14 @@ bpf_program__load(struct bpf_program *prog, err = preprocessor(prog, i, prog->insns, prog->insns_cnt, &result); if (err) { - pr_warning("Preprocessing the %dth instance of program '%s' failed\n", - i, prog->section_name); + pr_warn("Preprocessing the %dth instance of program '%s' failed\n", + i, prog->name); goto out; } if (!result.new_insn_ptr || !result.new_insn_cnt) { pr_debug("Skip loading the %dth instance of program '%s'\n", - i, prog->section_name); + i, prog->name); prog->instances.fds[i] = -1; if (result.pfd) *result.pfd = -1; @@ -3512,12 +6875,10 @@ bpf_program__load(struct bpf_program *prog, } err = load_program(prog, result.new_insn_ptr, - result.new_insn_cnt, - license, kern_version, &fd); - + result.new_insn_cnt, license, kern_ver, &fd); if (err) { - pr_warning("Loading the %dth instance of program '%s' failed\n", - i, prog->section_name); + pr_warn("Loading the %dth instance of program '%s' failed\n", + i, prog->name); goto out; } @@ -3527,125 +6888,133 @@ bpf_program__load(struct bpf_program *prog, } out: if (err) - pr_warning("failed to load program '%s'\n", - prog->section_name); + pr_warn("failed to load program '%s'\n", prog->name); zfree(&prog->insns); prog->insns_cnt = 0; return err; } -static bool bpf_program__is_function_storage(const struct bpf_program *prog, - const struct bpf_object *obj) -{ - return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls; -} - static int bpf_object__load_progs(struct bpf_object *obj, int log_level) { + struct bpf_program *prog; size_t i; int err; for (i = 0; i < obj->nr_programs; i++) { - if (bpf_program__is_function_storage(&obj->programs[i], obj)) + prog = &obj->programs[i]; + err = bpf_object__sanitize_prog(obj, prog); + if (err) + return err; + } + + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + if (prog_is_subprog(obj, prog)) continue; - obj->programs[i].log_level |= log_level; - err = bpf_program__load(&obj->programs[i], - obj->license, - obj->kern_version); + if (!prog->load) { + pr_debug("prog '%s': skipped loading\n", prog->name); + continue; + } + prog->log_level |= log_level; + err = bpf_program__load(prog, obj->license, obj->kern_version); if (err) return err; } return 0; } -static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) -{ - switch (type) { - case BPF_PROG_TYPE_SOCKET_FILTER: - case BPF_PROG_TYPE_SCHED_CLS: - case BPF_PROG_TYPE_SCHED_ACT: - case BPF_PROG_TYPE_XDP: - case BPF_PROG_TYPE_CGROUP_SKB: - case BPF_PROG_TYPE_CGROUP_SOCK: - case BPF_PROG_TYPE_LWT_IN: - case BPF_PROG_TYPE_LWT_OUT: - case BPF_PROG_TYPE_LWT_XMIT: - case BPF_PROG_TYPE_LWT_SEG6LOCAL: - case BPF_PROG_TYPE_SOCK_OPS: - case BPF_PROG_TYPE_SK_SKB: - case BPF_PROG_TYPE_CGROUP_DEVICE: - case BPF_PROG_TYPE_SK_MSG: - case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: - case BPF_PROG_TYPE_LIRC_MODE2: - case BPF_PROG_TYPE_SK_REUSEPORT: - case BPF_PROG_TYPE_FLOW_DISSECTOR: - case BPF_PROG_TYPE_UNSPEC: - case BPF_PROG_TYPE_TRACEPOINT: - case BPF_PROG_TYPE_RAW_TRACEPOINT: - case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: - case BPF_PROG_TYPE_PERF_EVENT: - case BPF_PROG_TYPE_CGROUP_SYSCTL: - case BPF_PROG_TYPE_CGROUP_SOCKOPT: - return false; - case BPF_PROG_TYPE_KPROBE: - default: - return true; - } -} - -static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) -{ - if (needs_kver && obj->kern_version == 0) { - pr_warning("%s doesn't provide kernel version\n", - obj->path); - return -LIBBPF_ERRNO__KVERSION; - } - return 0; -} +static const struct bpf_sec_def *find_sec_def(const char *sec_name); static struct bpf_object * -__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, - bool needs_kver, int flags) +__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) { + const char *obj_name, *kconfig; + struct bpf_program *prog; struct bpf_object *obj; + char tmp_name[64]; int err; if (elf_version(EV_CURRENT) == EV_NONE) { - pr_warning("failed to init libelf for %s\n", path); + pr_warn("failed to init libelf for %s\n", + path ? : "(mem buf)"); return ERR_PTR(-LIBBPF_ERRNO__LIBELF); } - obj = bpf_object__new(path, obj_buf, obj_buf_sz); + if (!OPTS_VALID(opts, bpf_object_open_opts)) + return ERR_PTR(-EINVAL); + + obj_name = OPTS_GET(opts, object_name, NULL); + if (obj_buf) { + if (!obj_name) { + snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", + (unsigned long)obj_buf, + (unsigned long)obj_buf_sz); + obj_name = tmp_name; + } + path = obj_name; + pr_debug("loading object '%s' from buffer\n", obj_name); + } + + obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); if (IS_ERR(obj)) return obj; - CHECK_ERR(bpf_object__elf_init(obj), err, out); - CHECK_ERR(bpf_object__check_endianness(obj), err, out); - CHECK_ERR(bpf_object__probe_caps(obj), err, out); - CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); - CHECK_ERR(bpf_object__collect_reloc(obj), err, out); - CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); + kconfig = OPTS_GET(opts, kconfig, NULL); + if (kconfig) { + obj->kconfig = strdup(kconfig); + if (!obj->kconfig) + return ERR_PTR(-ENOMEM); + } + err = bpf_object__elf_init(obj); + err = err ? : bpf_object__check_endianness(obj); + err = err ? : bpf_object__elf_collect(obj); + err = err ? : bpf_object__collect_externs(obj); + err = err ? : bpf_object__finalize_btf(obj); + err = err ? : bpf_object__init_maps(obj, opts); + err = err ? : bpf_object__collect_relos(obj); + if (err) + goto out; bpf_object__elf_finish(obj); + + bpf_object__for_each_program(prog, obj) { + prog->sec_def = find_sec_def(prog->sec_name); + if (!prog->sec_def) + /* couldn't guess, but user might manually specify */ + continue; + + if (prog->sec_def->is_sleepable) + prog->prog_flags |= BPF_F_SLEEPABLE; + bpf_program__set_type(prog, prog->sec_def->prog_type); + bpf_program__set_expected_attach_type(prog, + prog->sec_def->expected_attach_type); + + if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || + prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) + prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); + } + return obj; out: bpf_object__close(obj); return ERR_PTR(err); } -struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, - int flags) +static struct bpf_object * +__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags) { + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .relaxed_maps = flags & MAPS_RELAX_COMPAT, + ); + /* param validation */ if (!attr->file) return NULL; pr_debug("loading %s\n", attr->file); - - return __bpf_object__open(attr->file, NULL, 0, - bpf_prog_type__needs_kver(attr->prog_type), - flags); + return __bpf_object__open(attr->file, NULL, 0, &opts); } struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) @@ -3663,25 +7032,42 @@ struct bpf_object *bpf_object__open(const char *path) return bpf_object__open_xattr(&attr); } -struct bpf_object *bpf_object__open_buffer(void *obj_buf, - size_t obj_buf_sz, - const char *name) +struct bpf_object * +bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) { - char tmp_name[64]; + if (!path) + return ERR_PTR(-EINVAL); - /* param validation */ - if (!obj_buf || obj_buf_sz <= 0) + pr_debug("loading %s\n", path); + + return __bpf_object__open(path, NULL, 0, opts); +} + +struct bpf_object * +bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) +{ + if (!obj_buf || obj_buf_sz == 0) + return ERR_PTR(-EINVAL); + + return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts); +} + +struct bpf_object * +bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, + const char *name) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .object_name = name, + /* wrong default, but backwards-compatible */ + .relaxed_maps = true, + ); + + /* returning NULL is wrong, but backwards-compatible */ + if (!obj_buf || obj_buf_sz == 0) return NULL; - if (!name) { - snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", - (unsigned long)obj_buf, - (unsigned long)obj_buf_sz); - name = tmp_name; - } - pr_debug("loading object '%s' from buffer\n", name); - - return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); + return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts); } int bpf_object__unload(struct bpf_object *obj) @@ -3691,8 +7077,11 @@ int bpf_object__unload(struct bpf_object *obj) if (!obj) return -EINVAL; - for (i = 0; i < obj->nr_maps; i++) + for (i = 0; i < obj->nr_maps; i++) { zclose(obj->maps[i].fd); + if (obj->maps[i].st_ops) + zfree(&obj->maps[i].st_ops->kern_vdata); + } for (i = 0; i < obj->nr_programs; i++) bpf_program__unload(&obj->programs[i]); @@ -3700,10 +7089,224 @@ int bpf_object__unload(struct bpf_object *obj) return 0; } +static int bpf_object__sanitize_maps(struct bpf_object *obj) +{ + struct bpf_map *m; + + bpf_object__for_each_map(m, obj) { + if (!bpf_map__is_internal(m)) + continue; + if (!kernel_supports(FEAT_GLOBAL_DATA)) { + pr_warn("kernel doesn't support global data\n"); + return -ENOTSUP; + } + if (!kernel_supports(FEAT_ARRAY_MMAP)) + m->def.map_flags ^= BPF_F_MMAPABLE; + } + + return 0; +} + +static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +{ + char sym_type, sym_name[500]; + unsigned long long sym_addr; + struct extern_desc *ext; + int ret, err = 0; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (!f) { + err = -errno; + pr_warn("failed to open /proc/kallsyms: %d\n", err); + return err; + } + + while (true) { + ret = fscanf(f, "%llx %c %499s%*[^\n]\n", + &sym_addr, &sym_type, sym_name); + if (ret == EOF && feof(f)) + break; + if (ret != 3) { + pr_warn("failed to read kallsyms entry: %d\n", ret); + err = -EINVAL; + goto out; + } + + ext = find_extern_by_name(obj, sym_name); + if (!ext || ext->type != EXT_KSYM) + continue; + + if (ext->is_set && ext->ksym.addr != sym_addr) { + pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", + sym_name, ext->ksym.addr, sym_addr); + err = -EINVAL; + goto out; + } + if (!ext->is_set) { + ext->is_set = true; + ext->ksym.addr = sym_addr; + pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); + } + } + +out: + fclose(f); + return err; +} + +static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) +{ + struct extern_desc *ext; + int i, id; + + for (i = 0; i < obj->nr_extern; i++) { + const struct btf_type *targ_var, *targ_type; + __u32 targ_type_id, local_type_id; + const char *targ_var_name; + int ret; + + ext = &obj->externs[i]; + if (ext->type != EXT_KSYM || !ext->ksym.type_id) + continue; + + id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name, + BTF_KIND_VAR); + if (id <= 0) { + pr_warn("extern (ksym) '%s': failed to find BTF ID in vmlinux BTF.\n", + ext->name); + return -ESRCH; + } + + /* find local type_id */ + local_type_id = ext->ksym.type_id; + + /* find target type_id */ + targ_var = btf__type_by_id(obj->btf_vmlinux, id); + targ_var_name = btf__name_by_offset(obj->btf_vmlinux, + targ_var->name_off); + targ_type = skip_mods_and_typedefs(obj->btf_vmlinux, + targ_var->type, + &targ_type_id); + + ret = bpf_core_types_are_compat(obj->btf, local_type_id, + obj->btf_vmlinux, targ_type_id); + if (ret <= 0) { + const struct btf_type *local_type; + const char *targ_name, *local_name; + + local_type = btf__type_by_id(obj->btf, local_type_id); + local_name = btf__name_by_offset(obj->btf, + local_type->name_off); + targ_name = btf__name_by_offset(obj->btf_vmlinux, + targ_type->name_off); + + pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", + ext->name, local_type_id, + btf_kind_str(local_type), local_name, targ_type_id, + btf_kind_str(targ_type), targ_name); + return -EINVAL; + } + + ext->is_set = true; + ext->ksym.vmlinux_btf_id = id; + pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n", + ext->name, id, btf_kind_str(targ_var), targ_var_name); + } + return 0; +} + +static int bpf_object__resolve_externs(struct bpf_object *obj, + const char *extra_kconfig) +{ + bool need_config = false, need_kallsyms = false; + bool need_vmlinux_btf = false; + struct extern_desc *ext; + void *kcfg_data = NULL; + int err, i; + + if (obj->nr_extern == 0) + return 0; + + if (obj->kconfig_map_idx >= 0) + kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + + if (ext->type == EXT_KCFG && + strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { + void *ext_val = kcfg_data + ext->kcfg.data_off; + __u32 kver = get_kernel_version(); + + if (!kver) { + pr_warn("failed to get kernel version\n"); + return -EINVAL; + } + err = set_kcfg_value_num(ext, ext_val, kver); + if (err) + return err; + pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); + } else if (ext->type == EXT_KCFG && + strncmp(ext->name, "CONFIG_", 7) == 0) { + need_config = true; + } else if (ext->type == EXT_KSYM) { + if (ext->ksym.type_id) + need_vmlinux_btf = true; + else + need_kallsyms = true; + } else { + pr_warn("unrecognized extern '%s'\n", ext->name); + return -EINVAL; + } + } + if (need_config && extra_kconfig) { + err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); + if (err) + return -EINVAL; + need_config = false; + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type == EXT_KCFG && !ext->is_set) { + need_config = true; + break; + } + } + } + if (need_config) { + err = bpf_object__read_kconfig_file(obj, kcfg_data); + if (err) + return -EINVAL; + } + if (need_kallsyms) { + err = bpf_object__read_kallsyms_file(obj); + if (err) + return -EINVAL; + } + if (need_vmlinux_btf) { + err = bpf_object__resolve_ksyms_btf_id(obj); + if (err) + return -EINVAL; + } + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + + if (!ext->is_set && !ext->is_weak) { + pr_warn("extern %s (strong) not resolved\n", ext->name); + return -ESRCH; + } else if (!ext->is_set) { + pr_debug("extern %s (weak) not resolved, defaulting to zero\n", + ext->name); + } + } + + return 0; +} + int bpf_object__load_xattr(struct bpf_object_load_attr *attr) { struct bpf_object *obj; - int err; + int err, i; if (!attr) return -EINVAL; @@ -3712,20 +7315,37 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) return -EINVAL; if (obj->loaded) { - pr_warning("object should not be loaded twice\n"); + pr_warn("object '%s': load can't be attempted twice\n", obj->name); return -EINVAL; } - obj->loaded = true; + err = bpf_object__probe_loading(obj); + err = err ? : bpf_object__load_vmlinux_btf(obj); + err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); + err = err ? : bpf_object__sanitize_and_load_btf(obj); + err = err ? : bpf_object__sanitize_maps(obj); + err = err ? : bpf_object__init_kern_struct_ops_maps(obj); + err = err ? : bpf_object__create_maps(obj); + err = err ? : bpf_object__relocate(obj, attr->target_btf_path); + err = err ? : bpf_object__load_progs(obj, attr->log_level); - CHECK_ERR(bpf_object__create_maps(obj), err, out); - CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out); - CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out); + btf__free(obj->btf_vmlinux); + obj->btf_vmlinux = NULL; + + obj->loaded = true; /* doesn't matter if successfully or not */ + + if (err) + goto out; return 0; out: + /* unpin any maps that were auto-pinned during load */ + for (i = 0; i < obj->nr_maps; i++) + if (obj->maps[i].pinned && !obj->maps[i].reused) + bpf_map__unpin(&obj->maps[i], NULL); + bpf_object__unload(obj); - pr_warning("failed to load object '%s'\n", obj->path); + pr_warn("failed to load object '%s'\n", obj->path); return err; } @@ -3738,6 +7358,28 @@ int bpf_object__load(struct bpf_object *obj) return bpf_object__load_xattr(&attr); } +static int make_parent_dir(const char *path) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + char *dname, *dir; + int err = 0; + + dname = strdup(path); + if (dname == NULL) + return -ENOMEM; + + dir = dirname(dname); + if (mkdir(dir, 0700) && errno != EEXIST) + err = -errno; + + free(dname); + if (err) { + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to mkdir %s: %s\n", path, cp); + } + return err; +} + static int check_path(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; @@ -3755,13 +7397,13 @@ static int check_path(const char *path) dir = dirname(dname); if (statfs(dir, &st_fs)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warning("failed to statfs %s: %s\n", dir, cp); + pr_warn("failed to statfs %s: %s\n", dir, cp); err = -errno; } free(dname); if (!err && st_fs.f_type != BPF_FS_MAGIC) { - pr_warning("specified path %s is not on BPF FS\n", path); + pr_warn("specified path %s is not on BPF FS\n", path); err = -EINVAL; } @@ -3774,25 +7416,30 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, char *cp, errmsg[STRERR_BUFSIZE]; int err; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); if (err) return err; if (prog == NULL) { - pr_warning("invalid program pointer\n"); + pr_warn("invalid program pointer\n"); return -EINVAL; } if (instance < 0 || instance >= prog->instances.nr) { - pr_warning("invalid prog instance %d of prog %s (max %d)\n", - instance, prog->section_name, prog->instances.nr); + pr_warn("invalid prog instance %d of prog %s (max %d)\n", + instance, prog->name, prog->instances.nr); return -EINVAL; } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warning("failed to pin program: %s\n", cp); - return -errno; + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("failed to pin program: %s\n", cp); + return err; } pr_debug("pinned program '%s'\n", path); @@ -3809,13 +7456,13 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, return err; if (prog == NULL) { - pr_warning("invalid program pointer\n"); + pr_warn("invalid program pointer\n"); return -EINVAL; } if (instance < 0 || instance >= prog->instances.nr) { - pr_warning("invalid prog instance %d of prog %s (max %d)\n", - instance, prog->section_name, prog->instances.nr); + pr_warn("invalid prog instance %d of prog %s (max %d)\n", + instance, prog->name, prog->instances.nr); return -EINVAL; } @@ -3827,37 +7474,25 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, return 0; } -static int make_dir(const char *path) -{ - char *cp, errmsg[STRERR_BUFSIZE]; - int err = 0; - - if (mkdir(path, 0700) && errno != EEXIST) - err = -errno; - - if (err) { - cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); - pr_warning("failed to mkdir %s: %s\n", path, cp); - } - return err; -} - int bpf_program__pin(struct bpf_program *prog, const char *path) { int i, err; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); if (err) return err; if (prog == NULL) { - pr_warning("invalid program pointer\n"); + pr_warn("invalid program pointer\n"); return -EINVAL; } if (prog->instances.nr <= 0) { - pr_warning("no instances of prog %s to pin\n", - prog->section_name); + pr_warn("no instances of prog %s to pin\n", prog->name); return -EINVAL; } @@ -3866,10 +7501,6 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) return bpf_program__pin_instance(prog, path, 0); } - err = make_dir(path); - if (err) - return err; - for (i = 0; i < prog->instances.nr; i++) { char buf[PATH_MAX]; int len; @@ -3918,13 +7549,12 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path) return err; if (prog == NULL) { - pr_warning("invalid program pointer\n"); + pr_warn("invalid program pointer\n"); return -EINVAL; } if (prog->instances.nr <= 0) { - pr_warning("no instances of prog %s to pin\n", - prog->section_name); + pr_warn("no instances of prog %s to pin\n", prog->name); return -EINVAL; } @@ -3960,47 +7590,123 @@ int bpf_map__pin(struct bpf_map *map, const char *path) char *cp, errmsg[STRERR_BUFSIZE]; int err; - err = check_path(path); - if (err) - return err; - if (map == NULL) { - pr_warning("invalid map pointer\n"); + pr_warn("invalid map pointer\n"); return -EINVAL; } - if (bpf_obj_pin(map->fd, path)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warning("failed to pin map: %s\n", cp); - return -errno; + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return -EINVAL; + } else if (map->pinned) { + pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", + bpf_map__name(map), map->pin_path); + return 0; + } + } else { + if (!path) { + pr_warn("missing a path to pin map '%s' at\n", + bpf_map__name(map)); + return -EINVAL; + } else if (map->pinned) { + pr_warn("map '%s' already pinned\n", bpf_map__name(map)); + return -EEXIST; + } + + map->pin_path = strdup(path); + if (!map->pin_path) { + err = -errno; + goto out_err; + } } - pr_debug("pinned map '%s'\n", path); + err = make_parent_dir(map->pin_path); + if (err) + return err; + + err = check_path(map->pin_path); + if (err) + return err; + + if (bpf_obj_pin(map->fd, map->pin_path)) { + err = -errno; + goto out_err; + } + + map->pinned = true; + pr_debug("pinned map '%s'\n", map->pin_path); return 0; + +out_err: + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to pin map: %s\n", cp); + return err; } int bpf_map__unpin(struct bpf_map *map, const char *path) { int err; + if (map == NULL) { + pr_warn("invalid map pointer\n"); + return -EINVAL; + } + + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return -EINVAL; + } + path = map->pin_path; + } else if (!path) { + pr_warn("no path to unpin map '%s' from\n", + bpf_map__name(map)); + return -EINVAL; + } + err = check_path(path); if (err) return err; - if (map == NULL) { - pr_warning("invalid map pointer\n"); - return -EINVAL; - } - err = unlink(path); if (err != 0) return -errno; - pr_debug("unpinned map '%s'\n", path); + + map->pinned = false; + pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); return 0; } +int bpf_map__set_pin_path(struct bpf_map *map, const char *path) +{ + char *new = NULL; + + if (path) { + new = strdup(path); + if (!new) + return -errno; + } + + free(map->pin_path); + map->pin_path = new; + return 0; +} + +const char *bpf_map__get_pin_path(const struct bpf_map *map) +{ + return map->pin_path; +} + +bool bpf_map__is_pinned(const struct bpf_map *map) +{ + return map->pinned; +} + int bpf_object__pin_maps(struct bpf_object *obj, const char *path) { struct bpf_map *map; @@ -4010,29 +7716,32 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) return -ENOENT; if (!obj->loaded) { - pr_warning("object not yet loaded; load it first\n"); + pr_warn("object not yet loaded; load it first\n"); return -ENOENT; } - err = make_dir(path); - if (err) - return err; - bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; char buf[PATH_MAX]; - int len; - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) { - err = -EINVAL; - goto err_unpin_maps; - } else if (len >= PATH_MAX) { - err = -ENAMETOOLONG; - goto err_unpin_maps; + if (path) { + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) { + err = -EINVAL; + goto err_unpin_maps; + } else if (len >= PATH_MAX) { + err = -ENAMETOOLONG; + goto err_unpin_maps; + } + pin_path = buf; + } else if (!map->pin_path) { + continue; } - err = bpf_map__pin(map, buf); + err = bpf_map__pin(map, pin_path); if (err) goto err_unpin_maps; } @@ -4041,17 +7750,10 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) err_unpin_maps: while ((map = bpf_map__prev(map, obj))) { - char buf[PATH_MAX]; - int len; - - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) - continue; - else if (len >= PATH_MAX) + if (!map->pin_path) continue; - bpf_map__unpin(map, buf); + bpf_map__unpin(map, NULL); } return err; @@ -4066,17 +7768,24 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) return -ENOENT; bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; char buf[PATH_MAX]; - int len; - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) - return -EINVAL; - else if (len >= PATH_MAX) - return -ENAMETOOLONG; + if (path) { + int len; - err = bpf_map__unpin(map, buf); + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + pin_path = buf; + } else if (!map->pin_path) { + continue; + } + + err = bpf_map__unpin(map, pin_path); if (err) return err; } @@ -4093,14 +7802,10 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path) return -ENOENT; if (!obj->loaded) { - pr_warning("object not yet loaded; load it first\n"); + pr_warn("object not yet loaded; load it first\n"); return -ENOENT; } - err = make_dir(path); - if (err) - return err; - bpf_object__for_each_program(prog, obj) { char buf[PATH_MAX]; int len; @@ -4184,11 +7889,45 @@ int bpf_object__pin(struct bpf_object *obj, const char *path) return 0; } +static void bpf_map__destroy(struct bpf_map *map) +{ + if (map->clear_priv) + map->clear_priv(map, map->priv); + map->priv = NULL; + map->clear_priv = NULL; + + if (map->inner_map) { + bpf_map__destroy(map->inner_map); + zfree(&map->inner_map); + } + + zfree(&map->init_slots); + map->init_slots_sz = 0; + + if (map->mmaped) { + munmap(map->mmaped, bpf_map_mmap_sz(map)); + map->mmaped = NULL; + } + + if (map->st_ops) { + zfree(&map->st_ops->data); + zfree(&map->st_ops->progs); + zfree(&map->st_ops->kern_func_off); + zfree(&map->st_ops); + } + + zfree(&map->name); + zfree(&map->pin_path); + + if (map->fd >= 0) + zclose(map->fd); +} + void bpf_object__close(struct bpf_object *obj) { size_t i; - if (!obj) + if (IS_ERR_OR_NULL(obj)) return; if (obj->clear_priv) @@ -4199,17 +7938,13 @@ void bpf_object__close(struct bpf_object *obj) btf__free(obj->btf); btf_ext__free(obj->btf_ext); - for (i = 0; i < obj->nr_maps; i++) { - zfree(&obj->maps[i].name); - if (obj->maps[i].clear_priv) - obj->maps[i].clear_priv(&obj->maps[i], - obj->maps[i].priv); - obj->maps[i].priv = NULL; - obj->maps[i].clear_priv = NULL; - } + for (i = 0; i < obj->nr_maps; i++) + bpf_map__destroy(&obj->maps[i]); + + zfree(&obj->kconfig); + zfree(&obj->externs); + obj->nr_extern = 0; - zfree(&obj->sections.rodata); - zfree(&obj->sections.data); zfree(&obj->maps); obj->nr_maps = 0; @@ -4244,7 +7979,7 @@ bpf_object__next(struct bpf_object *prev) const char *bpf_object__name(const struct bpf_object *obj) { - return obj ? obj->path : ERR_PTR(-EINVAL); + return obj ? obj->name : ERR_PTR(-EINVAL); } unsigned int bpf_object__kversion(const struct bpf_object *obj) @@ -4294,7 +8029,7 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, &obj->programs[nr_programs - 1]; if (p->obj != obj) { - pr_warning("error: program handler doesn't match object\n"); + pr_warn("error: program handler doesn't match object\n"); return NULL; } @@ -4311,7 +8046,7 @@ bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj) do { prog = __bpf_program__iter(prog, obj, true); - } while (prog && bpf_program__is_function_storage(prog, obj)); + } while (prog && prog_is_subprog(obj, prog)); return prog; } @@ -4323,7 +8058,7 @@ bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj) do { prog = __bpf_program__iter(prog, obj, false); - } while (prog && bpf_program__is_function_storage(prog, obj)); + } while (prog && prog_is_subprog(obj, prog)); return prog; } @@ -4349,15 +8084,25 @@ void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) prog->prog_ifindex = ifindex; } +const char *bpf_program__name(const struct bpf_program *prog) +{ + return prog->name; +} + +const char *bpf_program__section_name(const struct bpf_program *prog) +{ + return prog->sec_name; +} + const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) { const char *title; - title = prog->section_name; + title = prog->sec_name; if (needs_copy) { title = strdup(title); if (!title) { - pr_warning("failed to strdup program title\n"); + pr_warn("failed to strdup program title\n"); return ERR_PTR(-ENOMEM); } } @@ -4365,11 +8110,30 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) return title; } +bool bpf_program__autoload(const struct bpf_program *prog) +{ + return prog->load; +} + +int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) +{ + if (prog->obj->loaded) + return -EINVAL; + + prog->load = autoload; + return 0; +} + int bpf_program__fd(const struct bpf_program *prog) { return bpf_program__nth_fd(prog, 0); } +size_t bpf_program__size(const struct bpf_program *prog) +{ + return prog->insns_cnt * BPF_INSN_SZ; +} + int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, bpf_program_prep_t prep) { @@ -4379,13 +8143,13 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, return -EINVAL; if (prog->instances.nr > 0 || prog->instances.fds) { - pr_warning("Can't set pre-processor after loading\n"); + pr_warn("Can't set pre-processor after loading\n"); return -EINVAL; } instances_fds = malloc(sizeof(int) * nr_instances); if (!instances_fds) { - pr_warning("alloc memory failed for fds\n"); + pr_warn("alloc memory failed for fds\n"); return -ENOMEM; } @@ -4406,21 +8170,26 @@ int bpf_program__nth_fd(const struct bpf_program *prog, int n) return -EINVAL; if (n >= prog->instances.nr || n < 0) { - pr_warning("Can't get the %dth fd from program %s: only %d instances\n", - n, prog->section_name, prog->instances.nr); + pr_warn("Can't get the %dth fd from program %s: only %d instances\n", + n, prog->name, prog->instances.nr); return -EINVAL; } fd = prog->instances.fds[n]; if (fd < 0) { - pr_warning("%dth instance of program '%s' is invalid\n", - n, prog->section_name); + pr_warn("%dth instance of program '%s' is invalid\n", + n, prog->name); return -ENOENT; } return fd; } +enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog) +{ + return prog->type; +} + void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { prog->type = type; @@ -4447,6 +8216,7 @@ bool bpf_program__is_##NAME(const struct bpf_program *prog) \ } \ BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); +BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM); BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); @@ -4454,6 +8224,16 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); +BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); +BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); +BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); +BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); + +enum bpf_attach_type +bpf_program__get_expected_attach_type(struct bpf_program *prog) +{ + return prog->expected_attach_type; +} void bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type) @@ -4461,41 +8241,130 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, prog->expected_attach_type = type; } -#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ - { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } +#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \ + attachable, attach_btf) \ + { \ + .sec = string, \ + .len = sizeof(string) - 1, \ + .prog_type = ptype, \ + .expected_attach_type = eatype, \ + .is_exp_attach_type_optional = eatype_optional, \ + .is_attachable = attachable, \ + .is_attach_btf = attach_btf, \ + } /* Programs that can NOT be attached. */ -#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) +#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0) /* Programs that can be attached. */ #define BPF_APROG_SEC(string, ptype, atype) \ - BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) + BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0) /* Programs that must specify expected attach type at load time. */ #define BPF_EAPROG_SEC(string, ptype, eatype) \ - BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) + BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0) + +/* Programs that use BTF to identify attach point */ +#define BPF_PROG_BTF(string, ptype, eatype) \ + BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1) /* Programs that can be attached but attach type can't be identified by section * name. Kept for backward compatibility. */ #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype) -static const struct { - const char *sec; - size_t len; - enum bpf_prog_type prog_type; - enum bpf_attach_type expected_attach_type; - int is_attachable; - enum bpf_attach_type attach_type; -} section_names[] = { +#define SEC_DEF(sec_pfx, ptype, ...) { \ + .sec = sec_pfx, \ + .len = sizeof(sec_pfx) - 1, \ + .prog_type = BPF_PROG_TYPE_##ptype, \ + __VA_ARGS__ \ +} + +static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, + struct bpf_program *prog); +static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, + struct bpf_program *prog); +static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, + struct bpf_program *prog); +static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, + struct bpf_program *prog); +static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, + struct bpf_program *prog); +static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, + struct bpf_program *prog); + +static const struct bpf_sec_def section_defs[] = { BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), - BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE), - BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE), + BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT), + SEC_DEF("kprobe/", KPROBE, + .attach_fn = attach_kprobe), + BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE), + SEC_DEF("kretprobe/", KPROBE, + .attach_fn = attach_kprobe), + BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE), BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS), BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT), - BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT), - BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), - BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), + SEC_DEF("tracepoint/", TRACEPOINT, + .attach_fn = attach_tp), + SEC_DEF("tp/", TRACEPOINT, + .attach_fn = attach_tp), + SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, + .attach_fn = attach_raw_tp), + SEC_DEF("raw_tp/", RAW_TRACEPOINT, + .attach_fn = attach_raw_tp), + SEC_DEF("tp_btf/", TRACING, + .expected_attach_type = BPF_TRACE_RAW_TP, + .is_attach_btf = true, + .attach_fn = attach_trace), + SEC_DEF("fentry/", TRACING, + .expected_attach_type = BPF_TRACE_FENTRY, + .is_attach_btf = true, + .attach_fn = attach_trace), + SEC_DEF("fmod_ret/", TRACING, + .expected_attach_type = BPF_MODIFY_RETURN, + .is_attach_btf = true, + .attach_fn = attach_trace), + SEC_DEF("fexit/", TRACING, + .expected_attach_type = BPF_TRACE_FEXIT, + .is_attach_btf = true, + .attach_fn = attach_trace), + SEC_DEF("fentry.s/", TRACING, + .expected_attach_type = BPF_TRACE_FENTRY, + .is_attach_btf = true, + .is_sleepable = true, + .attach_fn = attach_trace), + SEC_DEF("fmod_ret.s/", TRACING, + .expected_attach_type = BPF_MODIFY_RETURN, + .is_attach_btf = true, + .is_sleepable = true, + .attach_fn = attach_trace), + SEC_DEF("fexit.s/", TRACING, + .expected_attach_type = BPF_TRACE_FEXIT, + .is_attach_btf = true, + .is_sleepable = true, + .attach_fn = attach_trace), + SEC_DEF("freplace/", EXT, + .is_attach_btf = true, + .attach_fn = attach_trace), + SEC_DEF("lsm/", LSM, + .is_attach_btf = true, + .expected_attach_type = BPF_LSM_MAC, + .attach_fn = attach_lsm), + SEC_DEF("lsm.s/", LSM, + .is_attach_btf = true, + .is_sleepable = true, + .expected_attach_type = BPF_LSM_MAC, + .attach_fn = attach_lsm), + SEC_DEF("iter/", TRACING, + .expected_attach_type = BPF_TRACE_ITER, + .is_attach_btf = true, + .attach_fn = attach_iter), + BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, + BPF_XDP_DEVMAP), + BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, + BPF_XDP_CPUMAP), + BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP, + BPF_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), @@ -4506,6 +8375,10 @@ static const struct { BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS), BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), + BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_CREATE), + BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_RELEASE), BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE), BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, @@ -4543,12 +8416,23 @@ static const struct { BPF_CGROUP_UDP4_RECVMSG), BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG), + BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET4_GETPEERNAME), + BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET6_GETPEERNAME), + BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET4_GETSOCKNAME), + BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_CGROUP_INET6_GETSOCKNAME), BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL), BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT), BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT), + BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS), + BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP, + BPF_SK_LOOKUP), }; #undef BPF_PROG_SEC_IMPL @@ -4556,12 +8440,26 @@ static const struct { #undef BPF_APROG_SEC #undef BPF_EAPROG_SEC #undef BPF_APROG_COMPAT +#undef SEC_DEF #define MAX_TYPE_NAME_SIZE 32 +static const struct bpf_sec_def *find_sec_def(const char *sec_name) +{ + int i, n = ARRAY_SIZE(section_defs); + + for (i = 0; i < n; i++) { + if (strncmp(sec_name, + section_defs[i].sec, section_defs[i].len)) + continue; + return §ion_defs[i]; + } + return NULL; +} + static char *libbpf_get_type_names(bool attach_type) { - int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; + int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; char *buf; buf = malloc(len); @@ -4570,16 +8468,16 @@ static char *libbpf_get_type_names(bool attach_type) buf[0] = '\0'; /* Forge string buf with all available names */ - for (i = 0; i < ARRAY_SIZE(section_names); i++) { - if (attach_type && !section_names[i].is_attachable) + for (i = 0; i < ARRAY_SIZE(section_defs); i++) { + if (attach_type && !section_defs[i].is_attachable) continue; - if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { + if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { free(buf); return NULL; } strcat(buf, " "); - strcat(buf, section_names[i].sec); + strcat(buf, section_defs[i].sec); } return buf; @@ -4588,29 +8486,286 @@ static char *libbpf_get_type_names(bool attach_type) int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type) { + const struct bpf_sec_def *sec_def; char *type_names; - int i; if (!name) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(section_names); i++) { - if (strncmp(name, section_names[i].sec, section_names[i].len)) - continue; - *prog_type = section_names[i].prog_type; - *expected_attach_type = section_names[i].expected_attach_type; + sec_def = find_sec_def(name); + if (sec_def) { + *prog_type = sec_def->prog_type; + *expected_attach_type = sec_def->expected_attach_type; return 0; } - pr_warning("failed to guess program type based on ELF section name '%s'\n", name); + + pr_debug("failed to guess program type from ELF section '%s'\n", name); type_names = libbpf_get_type_names(false); if (type_names != NULL) { - pr_info("supported section(type) names are:%s\n", type_names); + pr_debug("supported section(type) names are:%s\n", type_names); free(type_names); } + return -ESRCH; +} + +static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, + size_t offset) +{ + struct bpf_map *map; + size_t i; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + if (!bpf_map__is_struct_ops(map)) + continue; + if (map->sec_offset <= offset && + offset - map->sec_offset < map->def.value_size) + return map; + } + + return NULL; +} + +/* Collect the reloc from ELF and populate the st_ops->progs[] */ +static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, + GElf_Shdr *shdr, Elf_Data *data) +{ + const struct btf_member *member; + struct bpf_struct_ops *st_ops; + struct bpf_program *prog; + unsigned int shdr_idx; + const struct btf *btf; + struct bpf_map *map; + Elf_Data *symbols; + unsigned int moff, insn_idx; + const char *name; + __u32 member_idx; + GElf_Sym sym; + GElf_Rel rel; + int i, nrels; + + symbols = obj->efile.symbols; + btf = obj->btf; + nrels = shdr->sh_size / shdr->sh_entsize; + for (i = 0; i < nrels; i++) { + if (!gelf_getrel(data, i, &rel)) { + pr_warn("struct_ops reloc: failed to get %d reloc\n", i); + return -LIBBPF_ERRNO__FORMAT; + } + + if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) { + pr_warn("struct_ops reloc: symbol %zx not found\n", + (size_t)GELF_R_SYM(rel.r_info)); + return -LIBBPF_ERRNO__FORMAT; + } + + name = elf_sym_str(obj, sym.st_name) ?: "<?>"; + map = find_struct_ops_map_by_offset(obj, rel.r_offset); + if (!map) { + pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n", + (size_t)rel.r_offset); + return -EINVAL; + } + + moff = rel.r_offset - map->sec_offset; + shdr_idx = sym.st_shndx; + st_ops = map->st_ops; + pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", + map->name, + (long long)(rel.r_info >> 32), + (long long)sym.st_value, + shdr_idx, (size_t)rel.r_offset, + map->sec_offset, sym.st_name, name); + + if (shdr_idx >= SHN_LORESERVE) { + pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n", + map->name, (size_t)rel.r_offset, shdr_idx); + return -LIBBPF_ERRNO__RELOC; + } + if (sym.st_value % BPF_INSN_SZ) { + pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", + map->name, (unsigned long long)sym.st_value); + return -LIBBPF_ERRNO__FORMAT; + } + insn_idx = sym.st_value / BPF_INSN_SZ; + + member = find_member_by_offset(st_ops->type, moff * 8); + if (!member) { + pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", + map->name, moff); + return -EINVAL; + } + member_idx = member - btf_members(st_ops->type); + name = btf__name_by_offset(btf, member->name_off); + + if (!resolve_func_ptr(btf, member->type, NULL)) { + pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", + map->name, name); + return -EINVAL; + } + + prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); + if (!prog) { + pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", + map->name, shdr_idx, name); + return -EINVAL; + } + + if (prog->type == BPF_PROG_TYPE_UNSPEC) { + const struct bpf_sec_def *sec_def; + + sec_def = find_sec_def(prog->sec_name); + if (sec_def && + sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { + /* for pr_warn */ + prog->type = sec_def->prog_type; + goto invalid_prog; + } + + prog->type = BPF_PROG_TYPE_STRUCT_OPS; + prog->attach_btf_id = st_ops->type_id; + prog->expected_attach_type = member_idx; + } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || + prog->attach_btf_id != st_ops->type_id || + prog->expected_attach_type != member_idx) { + goto invalid_prog; + } + st_ops->progs[member_idx] = prog; + } + + return 0; + +invalid_prog: + pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", + map->name, prog->name, prog->sec_name, prog->type, + prog->attach_btf_id, prog->expected_attach_type, name); return -EINVAL; } +#define BTF_TRACE_PREFIX "btf_trace_" +#define BTF_LSM_PREFIX "bpf_lsm_" +#define BTF_ITER_PREFIX "bpf_iter_" +#define BTF_MAX_NAME_SIZE 128 + +static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, + const char *name, __u32 kind) +{ + char btf_type_name[BTF_MAX_NAME_SIZE]; + int ret; + + ret = snprintf(btf_type_name, sizeof(btf_type_name), + "%s%s", prefix, name); + /* snprintf returns the number of characters written excluding the + * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it + * indicates truncation. + */ + if (ret < 0 || ret >= sizeof(btf_type_name)) + return -ENAMETOOLONG; + return btf__find_by_name_kind(btf, btf_type_name, kind); +} + +static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name, + enum bpf_attach_type attach_type) +{ + int err; + + if (attach_type == BPF_TRACE_RAW_TP) + err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name, + BTF_KIND_TYPEDEF); + else if (attach_type == BPF_LSM_MAC) + err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name, + BTF_KIND_FUNC); + else if (attach_type == BPF_TRACE_ITER) + err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name, + BTF_KIND_FUNC); + else + err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); + + if (err <= 0) + pr_warn("%s is not found in vmlinux BTF\n", name); + + return err; +} + +int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type) +{ + struct btf *btf; + int err; + + btf = libbpf_find_kernel_btf(); + if (IS_ERR(btf)) { + pr_warn("vmlinux BTF is not found\n"); + return -EINVAL; + } + + err = __find_vmlinux_btf_id(btf, name, attach_type); + btf__free(btf); + return err; +} + +static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) +{ + struct bpf_prog_info_linear *info_linear; + struct bpf_prog_info *info; + struct btf *btf = NULL; + int err = -EINVAL; + + info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0); + if (IS_ERR_OR_NULL(info_linear)) { + pr_warn("failed get_prog_info_linear for FD %d\n", + attach_prog_fd); + return -EINVAL; + } + info = &info_linear->info; + if (!info->btf_id) { + pr_warn("The target program doesn't have BTF\n"); + goto out; + } + if (btf__get_from_id(info->btf_id, &btf)) { + pr_warn("Failed to get BTF of the program\n"); + goto out; + } + err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); + btf__free(btf); + if (err <= 0) { + pr_warn("%s is not found in prog's BTF\n", name); + goto out; + } +out: + free(info_linear); + return err; +} + +static int libbpf_find_attach_btf_id(struct bpf_program *prog) +{ + enum bpf_attach_type attach_type = prog->expected_attach_type; + __u32 attach_prog_fd = prog->attach_prog_fd; + const char *name = prog->sec_name; + int i, err; + + if (!name) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(section_defs); i++) { + if (!section_defs[i].is_attach_btf) + continue; + if (strncmp(name, section_defs[i].sec, section_defs[i].len)) + continue; + if (attach_prog_fd) + err = libbpf_find_prog_btf_id(name + section_defs[i].len, + attach_prog_fd); + else + err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, + name + section_defs[i].len, + attach_type); + return err; + } + pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); + return -ESRCH; +} + int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { @@ -4620,33 +8775,24 @@ int libbpf_attach_type_by_name(const char *name, if (!name) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(section_names); i++) { - if (strncmp(name, section_names[i].sec, section_names[i].len)) + for (i = 0; i < ARRAY_SIZE(section_defs); i++) { + if (strncmp(name, section_defs[i].sec, section_defs[i].len)) continue; - if (!section_names[i].is_attachable) + if (!section_defs[i].is_attachable) return -EINVAL; - *attach_type = section_names[i].attach_type; + *attach_type = section_defs[i].expected_attach_type; return 0; } - pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); + pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); type_names = libbpf_get_type_names(true); if (type_names != NULL) { - pr_info("attachable section(type) names are:%s\n", type_names); + pr_debug("attachable section(type) names are:%s\n", type_names); free(type_names); } return -EINVAL; } -static int -bpf_program__identify_section(struct bpf_program *prog, - enum bpf_prog_type *prog_type, - enum bpf_attach_type *expected_attach_type) -{ - return libbpf_prog_type_by_name(prog->section_name, prog_type, - expected_attach_type); -} - int bpf_map__fd(const struct bpf_map *map) { return map ? map->fd : -EINVAL; @@ -4662,6 +8808,71 @@ const char *bpf_map__name(const struct bpf_map *map) return map ? map->name : NULL; } +enum bpf_map_type bpf_map__type(const struct bpf_map *map) +{ + return map->def.type; +} + +int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.type = type; + return 0; +} + +__u32 bpf_map__map_flags(const struct bpf_map *map) +{ + return map->def.map_flags; +} + +int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.map_flags = flags; + return 0; +} + +__u32 bpf_map__numa_node(const struct bpf_map *map) +{ + return map->numa_node; +} + +int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) +{ + if (map->fd >= 0) + return -EBUSY; + map->numa_node = numa_node; + return 0; +} + +__u32 bpf_map__key_size(const struct bpf_map *map) +{ + return map->def.key_size; +} + +int bpf_map__set_key_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.key_size = size; + return 0; +} + +__u32 bpf_map__value_size(const struct bpf_map *map) +{ + return map->def.value_size; +} + +int bpf_map__set_value_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.value_size = size; + return 0; +} + __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) { return map ? map->btf_key_type_id : 0; @@ -4693,6 +8904,17 @@ void *bpf_map__priv(const struct bpf_map *map) return map ? map->priv : ERR_PTR(-EINVAL); } +int bpf_map__set_initial_value(struct bpf_map *map, + const void *data, size_t size) +{ + if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || + size != map->def.value_size || map->fd >= 0) + return -EINVAL; + + memcpy(map->mmaped, data, size); + return 0; +} + bool bpf_map__is_offload_neutral(const struct bpf_map *map) { return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; @@ -4703,19 +8925,27 @@ bool bpf_map__is_internal(const struct bpf_map *map) return map->libbpf_type != LIBBPF_MAP_UNSPEC; } -void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +__u32 bpf_map__ifindex(const struct bpf_map *map) { + return map->map_ifindex; +} + +int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +{ + if (map->fd >= 0) + return -EBUSY; map->map_ifindex = ifindex; + return 0; } int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) { if (!bpf_map_type__is_map_in_map(map->def.type)) { - pr_warning("error: unsupported map type\n"); + pr_warn("error: unsupported map type\n"); return -EINVAL; } if (map->inner_map_fd != -1) { - pr_warning("error: inner_map_fd already specified\n"); + pr_warn("error: inner_map_fd already specified\n"); return -EINVAL; } map->inner_map_fd = fd; @@ -4735,8 +8965,8 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) e = obj->maps + obj->nr_maps; if ((m < s) || (m >= e)) { - pr_warning("error in %s: map handler doesn't belong to object\n", - __func__); + pr_warn("error in %s: map handler doesn't belong to object\n", + __func__); return NULL; } @@ -4814,8 +9044,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, { struct bpf_object_open_attr open_attr = {}; struct bpf_program *prog, *first_prog = NULL; - enum bpf_attach_type expected_attach_type; - enum bpf_prog_type prog_type; struct bpf_object *obj; struct bpf_map *map; int err; @@ -4833,28 +9061,29 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, return -ENOENT; bpf_object__for_each_program(prog, obj) { + enum bpf_attach_type attach_type = attr->expected_attach_type; /* - * If type is not specified, try to guess it based on - * section name. + * to preserve backwards compatibility, bpf_prog_load treats + * attr->prog_type, if specified, as an override to whatever + * bpf_object__open guessed */ - prog_type = attr->prog_type; - prog->prog_ifindex = attr->ifindex; - expected_attach_type = attr->expected_attach_type; - if (prog_type == BPF_PROG_TYPE_UNSPEC) { - err = bpf_program__identify_section(prog, &prog_type, - &expected_attach_type); - if (err < 0) { - bpf_object__close(obj); - return -EINVAL; - } + if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { + bpf_program__set_type(prog, attr->prog_type); + bpf_program__set_expected_attach_type(prog, + attach_type); + } + if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) { + /* + * we haven't guessed from section name and user + * didn't provide a fallback type, too bad... + */ + bpf_object__close(obj); + return -EINVAL; } - bpf_program__set_type(prog, prog_type); - bpf_program__set_expected_attach_type(prog, - expected_attach_type); - + prog->prog_ifindex = attr->ifindex; prog->log_level = attr->log_level; - prog->prog_flags = attr->prog_flags; + prog->prog_flags |= attr->prog_flags; if (!first_prog) first_prog = prog; } @@ -4865,7 +9094,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, } if (!first_prog) { - pr_warning("object file doesn't contain bpf program\n"); + pr_warn("object file doesn't contain bpf program\n"); bpf_object__close(obj); return -ENOENT; } @@ -4873,7 +9102,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, err = bpf_object__load(obj); if (err) { bpf_object__close(obj); - return -EINVAL; + return err; } *pobj = obj; @@ -4882,37 +9111,153 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, } struct bpf_link { + int (*detach)(struct bpf_link *link); int (*destroy)(struct bpf_link *link); + char *pin_path; /* NULL, if not pinned */ + int fd; /* hook FD, -1 if not applicable */ + bool disconnected; }; +/* Replace link's underlying BPF program with the new one */ +int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) +{ + return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL); +} + +/* Release "ownership" of underlying BPF resource (typically, BPF program + * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected + * link, when destructed through bpf_link__destroy() call won't attempt to + * detach/unregisted that BPF resource. This is useful in situations where, + * say, attached BPF program has to outlive userspace program that attached it + * in the system. Depending on type of BPF program, though, there might be + * additional steps (like pinning BPF program in BPF FS) necessary to ensure + * exit of userspace program doesn't trigger automatic detachment and clean up + * inside the kernel. + */ +void bpf_link__disconnect(struct bpf_link *link) +{ + link->disconnected = true; +} + int bpf_link__destroy(struct bpf_link *link) { - int err; + int err = 0; - if (!link) + if (IS_ERR_OR_NULL(link)) return 0; - err = link->destroy(link); + if (!link->disconnected && link->detach) + err = link->detach(link); + if (link->destroy) + link->destroy(link); + if (link->pin_path) + free(link->pin_path); free(link); return err; } -struct bpf_link_fd { - struct bpf_link link; /* has to be at the top of struct */ - int fd; /* hook FD */ -}; - -static int bpf_link__destroy_perf_event(struct bpf_link *link) +int bpf_link__fd(const struct bpf_link *link) +{ + return link->fd; +} + +const char *bpf_link__pin_path(const struct bpf_link *link) +{ + return link->pin_path; +} + +static int bpf_link__detach_fd(struct bpf_link *link) +{ + return close(link->fd); +} + +struct bpf_link *bpf_link__open(const char *path) +{ + struct bpf_link *link; + int fd; + + fd = bpf_obj_get(path); + if (fd < 0) { + fd = -errno; + pr_warn("failed to open link at %s: %d\n", path, fd); + return ERR_PTR(fd); + } + + link = calloc(1, sizeof(*link)); + if (!link) { + close(fd); + return ERR_PTR(-ENOMEM); + } + link->detach = &bpf_link__detach_fd; + link->fd = fd; + + link->pin_path = strdup(path); + if (!link->pin_path) { + bpf_link__destroy(link); + return ERR_PTR(-ENOMEM); + } + + return link; +} + +int bpf_link__detach(struct bpf_link *link) +{ + return bpf_link_detach(link->fd) ? -errno : 0; +} + +int bpf_link__pin(struct bpf_link *link, const char *path) { - struct bpf_link_fd *l = (void *)link; int err; - err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0); + if (link->pin_path) + return -EBUSY; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); + if (err) + return err; + + link->pin_path = strdup(path); + if (!link->pin_path) + return -ENOMEM; + + if (bpf_obj_pin(link->fd, link->pin_path)) { + err = -errno; + zfree(&link->pin_path); + return err; + } + + pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); + return 0; +} + +int bpf_link__unpin(struct bpf_link *link) +{ + int err; + + if (!link->pin_path) + return -EINVAL; + + err = unlink(link->pin_path); + if (err != 0) + return -errno; + + pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); + zfree(&link->pin_path); + return 0; +} + +static int bpf_link__detach_perf_event(struct bpf_link *link) +{ + int err; + + err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0); if (err) err = -errno; - close(l->fd); + close(link->fd); return err; } @@ -4920,44 +9265,45 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd) { char errmsg[STRERR_BUFSIZE]; - struct bpf_link_fd *link; + struct bpf_link *link; int prog_fd, err; if (pfd < 0) { - pr_warning("program '%s': invalid perf event FD %d\n", - bpf_program__title(prog, false), pfd); + pr_warn("prog '%s': invalid perf event FD %d\n", + prog->name, pfd); return ERR_PTR(-EINVAL); } prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { - pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n", - bpf_program__title(prog, false)); + pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", + prog->name); return ERR_PTR(-EINVAL); } - link = malloc(sizeof(*link)); + link = calloc(1, sizeof(*link)); if (!link) return ERR_PTR(-ENOMEM); - link->link.destroy = &bpf_link__destroy_perf_event; + link->detach = &bpf_link__detach_perf_event; link->fd = pfd; if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { err = -errno; free(link); - pr_warning("program '%s': failed to attach to pfd %d: %s\n", - bpf_program__title(prog, false), pfd, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to attach to pfd %d: %s\n", + prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + if (err == -EPROTO) + pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", + prog->name, pfd); return ERR_PTR(err); } if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { err = -errno; free(link); - pr_warning("program '%s': failed to enable pfd %d: %s\n", - bpf_program__title(prog, false), pfd, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to enable pfd %d: %s\n", + prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return ERR_PTR(err); } - return (struct bpf_link *)link; + return link; } /* @@ -5028,9 +9374,9 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, type = uprobe ? determine_uprobe_perf_type() : determine_kprobe_perf_type(); if (type < 0) { - pr_warning("failed to determine %s perf type: %s\n", - uprobe ? "uprobe" : "kprobe", - libbpf_strerror_r(type, errmsg, sizeof(errmsg))); + pr_warn("failed to determine %s perf type: %s\n", + uprobe ? "uprobe" : "kprobe", + libbpf_strerror_r(type, errmsg, sizeof(errmsg))); return type; } if (retprobe) { @@ -5038,10 +9384,9 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, : determine_kprobe_retprobe_bit(); if (bit < 0) { - pr_warning("failed to determine %s retprobe bit: %s\n", - uprobe ? "uprobe" : "kprobe", - libbpf_strerror_r(bit, errmsg, - sizeof(errmsg))); + pr_warn("failed to determine %s retprobe bit: %s\n", + uprobe ? "uprobe" : "kprobe", + libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); return bit; } attr.config |= 1 << bit; @@ -5058,9 +9403,9 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); if (pfd < 0) { err = -errno; - pr_warning("%s perf_event_open() failed: %s\n", - uprobe ? "uprobe" : "kprobe", - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("%s perf_event_open() failed: %s\n", + uprobe ? "uprobe" : "kprobe", + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return err; } return pfd; @@ -5077,25 +9422,35 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, 0 /* offset */, -1 /* pid */); if (pfd < 0) { - pr_warning("program '%s': failed to create %s '%s' perf event: %s\n", - bpf_program__title(prog, false), - retprobe ? "kretprobe" : "kprobe", func_name, - libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n", + prog->name, retprobe ? "kretprobe" : "kprobe", func_name, + libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return ERR_PTR(pfd); } link = bpf_program__attach_perf_event(prog, pfd); if (IS_ERR(link)) { close(pfd); err = PTR_ERR(link); - pr_warning("program '%s': failed to attach to %s '%s': %s\n", - bpf_program__title(prog, false), - retprobe ? "kretprobe" : "kprobe", func_name, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to attach to %s '%s': %s\n", + prog->name, retprobe ? "kretprobe" : "kprobe", func_name, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return link; } return link; } +static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, + struct bpf_program *prog) +{ + const char *func_name; + bool retprobe; + + func_name = prog->sec_name + sec->len; + retprobe = strcmp(sec->sec, "kretprobe/") == 0; + + return bpf_program__attach_kprobe(prog, retprobe, func_name); +} + struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe, pid_t pid, const char *binary_path, @@ -5108,22 +9463,20 @@ struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, func_offset, pid); if (pfd < 0) { - pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n", - bpf_program__title(prog, false), - retprobe ? "uretprobe" : "uprobe", - binary_path, func_offset, - libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", + prog->name, retprobe ? "uretprobe" : "uprobe", + binary_path, func_offset, + libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return ERR_PTR(pfd); } link = bpf_program__attach_perf_event(prog, pfd); if (IS_ERR(link)) { close(pfd); err = PTR_ERR(link); - pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n", - bpf_program__title(prog, false), - retprobe ? "uretprobe" : "uprobe", - binary_path, func_offset, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", + prog->name, retprobe ? "uretprobe" : "uprobe", + binary_path, func_offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return link; } return link; @@ -5157,9 +9510,9 @@ static int perf_event_open_tracepoint(const char *tp_category, tp_id = determine_tracepoint_id(tp_category, tp_name); if (tp_id < 0) { - pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n", - tp_category, tp_name, - libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); + pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", + tp_category, tp_name, + libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); return tp_id; } @@ -5171,9 +9524,9 @@ static int perf_event_open_tracepoint(const char *tp_category, -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); if (pfd < 0) { err = -errno; - pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n", - tp_category, tp_name, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", + tp_category, tp_name, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return err; } return pfd; @@ -5189,64 +9542,333 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog, pfd = perf_event_open_tracepoint(tp_category, tp_name); if (pfd < 0) { - pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n", - bpf_program__title(prog, false), - tp_category, tp_name, - libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", + prog->name, tp_category, tp_name, + libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return ERR_PTR(pfd); } link = bpf_program__attach_perf_event(prog, pfd); if (IS_ERR(link)) { close(pfd); err = PTR_ERR(link); - pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n", - bpf_program__title(prog, false), - tp_category, tp_name, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", + prog->name, tp_category, tp_name, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return link; } return link; } -static int bpf_link__destroy_fd(struct bpf_link *link) +static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, + struct bpf_program *prog) { - struct bpf_link_fd *l = (void *)link; + char *sec_name, *tp_cat, *tp_name; + struct bpf_link *link; - return close(l->fd); + sec_name = strdup(prog->sec_name); + if (!sec_name) + return ERR_PTR(-ENOMEM); + + /* extract "tp/<category>/<name>" */ + tp_cat = sec_name + sec->len; + tp_name = strchr(tp_cat, '/'); + if (!tp_name) { + link = ERR_PTR(-EINVAL); + goto out; + } + *tp_name = '\0'; + tp_name++; + + link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); +out: + free(sec_name); + return link; } struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, const char *tp_name) { char errmsg[STRERR_BUFSIZE]; - struct bpf_link_fd *link; + struct bpf_link *link; int prog_fd, pfd; prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { - pr_warning("program '%s': can't attach before loaded\n", - bpf_program__title(prog, false)); + pr_warn("prog '%s': can't attach before loaded\n", prog->name); return ERR_PTR(-EINVAL); } - link = malloc(sizeof(*link)); + link = calloc(1, sizeof(*link)); if (!link) return ERR_PTR(-ENOMEM); - link->link.destroy = &bpf_link__destroy_fd; + link->detach = &bpf_link__detach_fd; pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); if (pfd < 0) { pfd = -errno; free(link); - pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n", - bpf_program__title(prog, false), tp_name, - libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", + prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + return ERR_PTR(pfd); + } + link->fd = pfd; + return link; +} + +static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, + struct bpf_program *prog) +{ + const char *tp_name = prog->sec_name + sec->len; + + return bpf_program__attach_raw_tracepoint(prog, tp_name); +} + +/* Common logic for all BPF program types that attach to a btf_id */ +static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog) +{ + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int prog_fd, pfd; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return ERR_PTR(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return ERR_PTR(-ENOMEM); + link->detach = &bpf_link__detach_fd; + + pfd = bpf_raw_tracepoint_open(NULL, prog_fd); + if (pfd < 0) { + pfd = -errno; + free(link); + pr_warn("prog '%s': failed to attach: %s\n", + prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); return ERR_PTR(pfd); } link->fd = pfd; return (struct bpf_link *)link; } +struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) +{ + return bpf_program__attach_btf_id(prog); +} + +struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog) +{ + return bpf_program__attach_btf_id(prog); +} + +static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, + struct bpf_program *prog) +{ + return bpf_program__attach_trace(prog); +} + +static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, + struct bpf_program *prog) +{ + return bpf_program__attach_lsm(prog); +} + +static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, + struct bpf_program *prog) +{ + return bpf_program__attach_iter(prog, NULL); +} + +static struct bpf_link * +bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id, + const char *target_name) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts, + .target_btf_id = btf_id); + enum bpf_attach_type attach_type; + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int prog_fd, link_fd; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return ERR_PTR(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return ERR_PTR(-ENOMEM); + link->detach = &bpf_link__detach_fd; + + attach_type = bpf_program__get_expected_attach_type(prog); + link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts); + if (link_fd < 0) { + link_fd = -errno; + free(link); + pr_warn("prog '%s': failed to attach to %s: %s\n", + prog->name, target_name, + libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + return ERR_PTR(link_fd); + } + link->fd = link_fd; + return link; +} + +struct bpf_link * +bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd) +{ + return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup"); +} + +struct bpf_link * +bpf_program__attach_netns(struct bpf_program *prog, int netns_fd) +{ + return bpf_program__attach_fd(prog, netns_fd, 0, "netns"); +} + +struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex) +{ + /* target_fd/target_ifindex use the same field in LINK_CREATE */ + return bpf_program__attach_fd(prog, ifindex, 0, "xdp"); +} + +struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog, + int target_fd, + const char *attach_func_name) +{ + int btf_id; + + if (!!target_fd != !!attach_func_name) { + pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", + prog->name); + return ERR_PTR(-EINVAL); + } + + if (prog->type != BPF_PROG_TYPE_EXT) { + pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", + prog->name); + return ERR_PTR(-EINVAL); + } + + if (target_fd) { + btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); + if (btf_id < 0) + return ERR_PTR(btf_id); + + return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace"); + } else { + /* no target, so use raw_tracepoint_open for compatibility + * with old kernels + */ + return bpf_program__attach_trace(prog); + } +} + +struct bpf_link * +bpf_program__attach_iter(struct bpf_program *prog, + const struct bpf_iter_attach_opts *opts) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int prog_fd, link_fd; + __u32 target_fd = 0; + + if (!OPTS_VALID(opts, bpf_iter_attach_opts)) + return ERR_PTR(-EINVAL); + + link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); + link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return ERR_PTR(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return ERR_PTR(-ENOMEM); + link->detach = &bpf_link__detach_fd; + + link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, + &link_create_opts); + if (link_fd < 0) { + link_fd = -errno; + free(link); + pr_warn("prog '%s': failed to attach to iterator: %s\n", + prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + return ERR_PTR(link_fd); + } + link->fd = link_fd; + return link; +} + +struct bpf_link *bpf_program__attach(struct bpf_program *prog) +{ + const struct bpf_sec_def *sec_def; + + sec_def = find_sec_def(prog->sec_name); + if (!sec_def || !sec_def->attach_fn) + return ERR_PTR(-ESRCH); + + return sec_def->attach_fn(sec_def, prog); +} + +static int bpf_link__detach_struct_ops(struct bpf_link *link) +{ + __u32 zero = 0; + + if (bpf_map_delete_elem(link->fd, &zero)) + return -errno; + + return 0; +} + +struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) +{ + struct bpf_struct_ops *st_ops; + struct bpf_link *link; + __u32 i, zero = 0; + int err; + + if (!bpf_map__is_struct_ops(map) || map->fd == -1) + return ERR_PTR(-EINVAL); + + link = calloc(1, sizeof(*link)); + if (!link) + return ERR_PTR(-EINVAL); + + st_ops = map->st_ops; + for (i = 0; i < btf_vlen(st_ops->type); i++) { + struct bpf_program *prog = st_ops->progs[i]; + void *kern_data; + int prog_fd; + + if (!prog) + continue; + + prog_fd = bpf_program__fd(prog); + kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; + *(unsigned long *)kern_data = prog_fd; + } + + err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); + if (err) { + err = -errno; + free(link); + return ERR_PTR(err); + } + + link->detach = bpf_link__detach_struct_ops; + link->fd = map->fd; + + return link; +} + enum bpf_perf_event_ret bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, void **copy_mem, size_t *copy_size, @@ -5330,7 +9952,7 @@ struct perf_buffer { size_t mmap_size; struct perf_cpu_buf **cpu_bufs; struct epoll_event *events; - int cpu_cnt; + int cpu_cnt; /* number of allocated CPU buffers */ int epoll_fd; /* perf event FD */ int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ }; @@ -5342,7 +9964,7 @@ static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, return; if (cpu_buf->base && munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) - pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); + pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); if (cpu_buf->fd >= 0) { ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); close(cpu_buf->fd); @@ -5355,12 +9977,15 @@ void perf_buffer__free(struct perf_buffer *pb) { int i; - if (!pb) + if (IS_ERR_OR_NULL(pb)) return; if (pb->cpu_bufs) { - for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) { + for (i = 0; i < pb->cpu_cnt; i++) { struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; + if (!cpu_buf) + continue; + bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); perf_buffer__free_cpu_buf(pb, cpu_buf); } @@ -5392,8 +10017,8 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, -1, PERF_FLAG_FD_CLOEXEC); if (cpu_buf->fd < 0) { err = -errno; - pr_warning("failed to open perf buffer event on cpu #%d: %s\n", - cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + pr_warn("failed to open perf buffer event on cpu #%d: %s\n", + cpu, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } @@ -5403,15 +10028,15 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, if (cpu_buf->base == MAP_FAILED) { cpu_buf->base = NULL; err = -errno; - pr_warning("failed to mmap perf buffer on cpu #%d: %s\n", - cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", + cpu, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { err = -errno; - pr_warning("failed to enable perf buffer event on cpu #%d: %s\n", - cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", + cpu, libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } @@ -5431,7 +10056,7 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params p = {}; struct perf_event_attr attr = { 0, }; - attr.config = PERF_COUNT_SW_BPF_OUTPUT, + attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; attr.sample_period = 1; @@ -5464,31 +10089,42 @@ perf_buffer__new_raw(int map_fd, size_t page_cnt, static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p) { - struct bpf_map_info map = {}; + const char *online_cpus_file = "/sys/devices/system/cpu/online"; + struct bpf_map_info map; char msg[STRERR_BUFSIZE]; struct perf_buffer *pb; + bool *online = NULL; __u32 map_info_len; - int err, i; + int err, i, j, n; if (page_cnt & (page_cnt - 1)) { - pr_warning("page count should be power of two, but is %zu\n", - page_cnt); + pr_warn("page count should be power of two, but is %zu\n", + page_cnt); return ERR_PTR(-EINVAL); } + /* best-effort sanity checks */ + memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; - pr_warning("failed to get map info for map FD %d: %s\n", - map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); - return ERR_PTR(err); - } - - if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { - pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", - map.name); - return ERR_PTR(-EINVAL); + /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return + * -EBADFD, -EFAULT, or -E2BIG on real error + */ + if (err != -EINVAL) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); + return ERR_PTR(err); + } + pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", + map_fd); + } else { + if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", + map.name); + return ERR_PTR(-EINVAL); + } } pb = calloc(1, sizeof(*pb)); @@ -5507,8 +10143,8 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (pb->epoll_fd < 0) { err = -errno; - pr_warning("failed to create epoll instance: %s\n", - libbpf_strerror_r(err, msg, sizeof(msg))); + pr_warn("failed to create epoll instance: %s\n", + libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } @@ -5520,63 +10156,79 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = pb->cpu_cnt; goto error; } - if (map.max_entries < pb->cpu_cnt) + if (map.max_entries && map.max_entries < pb->cpu_cnt) pb->cpu_cnt = map.max_entries; } pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); if (!pb->events) { err = -ENOMEM; - pr_warning("failed to allocate events: out of memory\n"); + pr_warn("failed to allocate events: out of memory\n"); goto error; } pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); if (!pb->cpu_bufs) { err = -ENOMEM; - pr_warning("failed to allocate buffers: out of memory\n"); + pr_warn("failed to allocate buffers: out of memory\n"); goto error; } - for (i = 0; i < pb->cpu_cnt; i++) { + err = parse_cpu_mask_file(online_cpus_file, &online, &n); + if (err) { + pr_warn("failed to get online CPU mask: %d\n", err); + goto error; + } + + for (i = 0, j = 0; i < pb->cpu_cnt; i++) { struct perf_cpu_buf *cpu_buf; int cpu, map_key; cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; + /* in case user didn't explicitly requested particular CPUs to + * be attached to, skip offline/not present CPUs + */ + if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) + continue; + cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); if (IS_ERR(cpu_buf)) { err = PTR_ERR(cpu_buf); goto error; } - pb->cpu_bufs[i] = cpu_buf; + pb->cpu_bufs[j] = cpu_buf; err = bpf_map_update_elem(pb->map_fd, &map_key, &cpu_buf->fd, 0); if (err) { err = -errno; - pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n", - cpu, map_key, cpu_buf->fd, - libbpf_strerror_r(err, msg, sizeof(msg))); + pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", + cpu, map_key, cpu_buf->fd, + libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } - pb->events[i].events = EPOLLIN; - pb->events[i].data.ptr = cpu_buf; + pb->events[j].events = EPOLLIN; + pb->events[j].data.ptr = cpu_buf; if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, - &pb->events[i]) < 0) { + &pb->events[j]) < 0) { err = -errno; - pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n", - cpu, cpu_buf->fd, - libbpf_strerror_r(err, msg, sizeof(msg))); + pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", + cpu, cpu_buf->fd, + libbpf_strerror_r(err, msg, sizeof(msg))); goto error; } + j++; } + pb->cpu_cnt = j; + free(online); return pb; error: + free(online); if (pb) perf_buffer__free(pb); return ERR_PTR(err); @@ -5585,7 +10237,7 @@ error: struct perf_sample_raw { struct perf_event_header header; uint32_t size; - char data[0]; + char data[]; }; struct perf_sample_lost { @@ -5622,7 +10274,7 @@ perf_buffer__process_record(struct perf_event_header *e, void *ctx) break; } default: - pr_warning("unknown perf sample type %d\n", e->type); + pr_warn("unknown perf sample type %d\n", e->type); return LIBBPF_PERF_EVENT_ERROR; } return LIBBPF_PERF_EVENT_CONT; @@ -5642,6 +10294,11 @@ static int perf_buffer__process_records(struct perf_buffer *pb, return 0; } +int perf_buffer__epoll_fd(const struct perf_buffer *pb) +{ + return pb->epoll_fd; +} + int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) { int i, cnt, err; @@ -5652,13 +10309,81 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) err = perf_buffer__process_records(pb, cpu_buf); if (err) { - pr_warning("error while processing records: %d\n", err); + pr_warn("error while processing records: %d\n", err); return err; } } return cnt < 0 ? -errno : cnt; } +/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer + * manager. + */ +size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) +{ + return pb->cpu_cnt; +} + +/* + * Return perf_event FD of a ring buffer in *buf_idx* slot of + * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using + * select()/poll()/epoll() Linux syscalls. + */ +int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) +{ + struct perf_cpu_buf *cpu_buf; + + if (buf_idx >= pb->cpu_cnt) + return -EINVAL; + + cpu_buf = pb->cpu_bufs[buf_idx]; + if (!cpu_buf) + return -ENOENT; + + return cpu_buf->fd; +} + +/* + * Consume data from perf ring buffer corresponding to slot *buf_idx* in + * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to + * consume, do nothing and return success. + * Returns: + * - 0 on success; + * - <0 on failure. + */ +int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) +{ + struct perf_cpu_buf *cpu_buf; + + if (buf_idx >= pb->cpu_cnt) + return -EINVAL; + + cpu_buf = pb->cpu_bufs[buf_idx]; + if (!cpu_buf) + return -ENOENT; + + return perf_buffer__process_records(pb, cpu_buf); +} + +int perf_buffer__consume(struct perf_buffer *pb) +{ + int i, err; + + for (i = 0; i < pb->cpu_cnt; i++) { + struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; + + if (!cpu_buf) + continue; + + err = perf_buffer__process_records(pb, cpu_buf); + if (err) { + pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); + return err; + } + } + return 0; +} + struct bpf_prog_info_array_desc { int array_offset; /* e.g. offset of jited_prog_insns */ int count_offset; /* e.g. offset of jited_prog_len */ @@ -5716,7 +10441,8 @@ static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { }; -static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset) +static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, + int offset) { __u32 *array = (__u32 *)info; @@ -5725,7 +10451,8 @@ static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offse return -(int)offset; } -static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset) +static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, + int offset) { __u64 *array = (__u64 *)info; @@ -5849,13 +10576,13 @@ bpf_program__get_prog_info_linear(int fd, __u64 arrays) v2 = bpf_prog_info_read_offset_u32(&info_linear->info, desc->count_offset); if (v1 != v2) - pr_warning("%s: mismatch in element count\n", __func__); + pr_warn("%s: mismatch in element count\n", __func__); v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); v2 = bpf_prog_info_read_offset_u32(&info_linear->info, desc->size_offset); if (v1 != v2) - pr_warning("%s: mismatch in rec size\n", __func__); + pr_warn("%s: mismatch in rec size\n", __func__); } /* step 7: update info_len and data_len */ @@ -5905,63 +10632,292 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) } } +int bpf_program__set_attach_target(struct bpf_program *prog, + int attach_prog_fd, + const char *attach_func_name) +{ + int btf_id; + + if (!prog || attach_prog_fd < 0 || !attach_func_name) + return -EINVAL; + + if (attach_prog_fd) + btf_id = libbpf_find_prog_btf_id(attach_func_name, + attach_prog_fd); + else + btf_id = libbpf_find_vmlinux_btf_id(attach_func_name, + prog->expected_attach_type); + + if (btf_id < 0) + return btf_id; + + prog->attach_btf_id = btf_id; + prog->attach_prog_fd = attach_prog_fd; + return 0; +} + +int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) +{ + int err = 0, n, len, start, end = -1; + bool *tmp; + + *mask = NULL; + *mask_sz = 0; + + /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ + while (*s) { + if (*s == ',' || *s == '\n') { + s++; + continue; + } + n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); + if (n <= 0 || n > 2) { + pr_warn("Failed to get CPU range %s: %d\n", s, n); + err = -EINVAL; + goto cleanup; + } else if (n == 1) { + end = start; + } + if (start < 0 || start > end) { + pr_warn("Invalid CPU range [%d,%d] in %s\n", + start, end, s); + err = -EINVAL; + goto cleanup; + } + tmp = realloc(*mask, end + 1); + if (!tmp) { + err = -ENOMEM; + goto cleanup; + } + *mask = tmp; + memset(tmp + *mask_sz, 0, start - *mask_sz); + memset(tmp + start, 1, end - start + 1); + *mask_sz = end + 1; + s += len; + } + if (!*mask_sz) { + pr_warn("Empty CPU range\n"); + return -EINVAL; + } + return 0; +cleanup: + free(*mask); + *mask = NULL; + return err; +} + +int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) +{ + int fd, err = 0, len; + char buf[128]; + + fd = open(fcpu, O_RDONLY); + if (fd < 0) { + err = -errno; + pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); + return err; + } + len = read(fd, buf, sizeof(buf)); + close(fd); + if (len <= 0) { + err = len ? -errno : -EINVAL; + pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); + return err; + } + if (len >= sizeof(buf)) { + pr_warn("CPU mask is too big in file %s\n", fcpu); + return -E2BIG; + } + buf[len] = '\0'; + + return parse_cpu_mask_str(buf, mask, mask_sz); +} + int libbpf_num_possible_cpus(void) { static const char *fcpu = "/sys/devices/system/cpu/possible"; - int len = 0, n = 0, il = 0, ir = 0; - unsigned int start = 0, end = 0; - int tmp_cpus = 0; static int cpus; - char buf[128]; - int error = 0; - int fd = -1; + int err, n, i, tmp_cpus; + bool *mask; tmp_cpus = READ_ONCE(cpus); if (tmp_cpus > 0) return tmp_cpus; - fd = open(fcpu, O_RDONLY); - if (fd < 0) { - error = errno; - pr_warning("Failed to open file %s: %s\n", - fcpu, strerror(error)); - return -error; - } - len = read(fd, buf, sizeof(buf)); - close(fd); - if (len <= 0) { - error = len ? errno : EINVAL; - pr_warning("Failed to read # of possible cpus from %s: %s\n", - fcpu, strerror(error)); - return -error; - } - if (len == sizeof(buf)) { - pr_warning("File %s size overflow\n", fcpu); - return -EOVERFLOW; - } - buf[len] = '\0'; + err = parse_cpu_mask_file(fcpu, &mask, &n); + if (err) + return err; - for (ir = 0, tmp_cpus = 0; ir <= len; ir++) { - /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ - if (buf[ir] == ',' || buf[ir] == '\0') { - buf[ir] = '\0'; - n = sscanf(&buf[il], "%u-%u", &start, &end); - if (n <= 0) { - pr_warning("Failed to get # CPUs from %s\n", - &buf[il]); - return -EINVAL; - } else if (n == 1) { - end = start; - } - tmp_cpus += end - start + 1; - il = ir + 1; - } - } - if (tmp_cpus <= 0) { - pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu); - return -EINVAL; + tmp_cpus = 0; + for (i = 0; i < n; i++) { + if (mask[i]) + tmp_cpus++; } + free(mask); WRITE_ONCE(cpus, tmp_cpus); return tmp_cpus; } + +int bpf_object__open_skeleton(struct bpf_object_skeleton *s, + const struct bpf_object_open_opts *opts) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, + .object_name = s->name, + ); + struct bpf_object *obj; + int i; + + /* Attempt to preserve opts->object_name, unless overriden by user + * explicitly. Overwriting object name for skeletons is discouraged, + * as it breaks global data maps, because they contain object name + * prefix as their own map name prefix. When skeleton is generated, + * bpftool is making an assumption that this name will stay the same. + */ + if (opts) { + memcpy(&skel_opts, opts, sizeof(*opts)); + if (!opts->object_name) + skel_opts.object_name = s->name; + } + + obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); + if (IS_ERR(obj)) { + pr_warn("failed to initialize skeleton BPF object '%s': %ld\n", + s->name, PTR_ERR(obj)); + return PTR_ERR(obj); + } + + *s->obj = obj; + + for (i = 0; i < s->map_cnt; i++) { + struct bpf_map **map = s->maps[i].map; + const char *name = s->maps[i].name; + void **mmaped = s->maps[i].mmaped; + + *map = bpf_object__find_map_by_name(obj, name); + if (!*map) { + pr_warn("failed to find skeleton map '%s'\n", name); + return -ESRCH; + } + + /* externs shouldn't be pre-setup from user code */ + if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) + *mmaped = (*map)->mmaped; + } + + for (i = 0; i < s->prog_cnt; i++) { + struct bpf_program **prog = s->progs[i].prog; + const char *name = s->progs[i].name; + + *prog = bpf_object__find_program_by_name(obj, name); + if (!*prog) { + pr_warn("failed to find skeleton program '%s'\n", name); + return -ESRCH; + } + } + + return 0; +} + +int bpf_object__load_skeleton(struct bpf_object_skeleton *s) +{ + int i, err; + + err = bpf_object__load(*s->obj); + if (err) { + pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); + return err; + } + + for (i = 0; i < s->map_cnt; i++) { + struct bpf_map *map = *s->maps[i].map; + size_t mmap_sz = bpf_map_mmap_sz(map); + int prot, map_fd = bpf_map__fd(map); + void **mmaped = s->maps[i].mmaped; + + if (!mmaped) + continue; + + if (!(map->def.map_flags & BPF_F_MMAPABLE)) { + *mmaped = NULL; + continue; + } + + if (map->def.map_flags & BPF_F_RDONLY_PROG) + prot = PROT_READ; + else + prot = PROT_READ | PROT_WRITE; + + /* Remap anonymous mmap()-ed "map initialization image" as + * a BPF map-backed mmap()-ed memory, but preserving the same + * memory address. This will cause kernel to change process' + * page table to point to a different piece of kernel memory, + * but from userspace point of view memory address (and its + * contents, being identical at this point) will stay the + * same. This mapping will be released by bpf_object__close() + * as per normal clean up procedure, so we don't need to worry + * about it from skeleton's clean up perspective. + */ + *mmaped = mmap(map->mmaped, mmap_sz, prot, + MAP_SHARED | MAP_FIXED, map_fd, 0); + if (*mmaped == MAP_FAILED) { + err = -errno; + *mmaped = NULL; + pr_warn("failed to re-mmap() map '%s': %d\n", + bpf_map__name(map), err); + return err; + } + } + + return 0; +} + +int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) +{ + int i; + + for (i = 0; i < s->prog_cnt; i++) { + struct bpf_program *prog = *s->progs[i].prog; + struct bpf_link **link = s->progs[i].link; + const struct bpf_sec_def *sec_def; + + if (!prog->load) + continue; + + sec_def = find_sec_def(prog->sec_name); + if (!sec_def || !sec_def->attach_fn) + continue; + + *link = sec_def->attach_fn(sec_def, prog); + if (IS_ERR(*link)) { + pr_warn("failed to auto-attach program '%s': %ld\n", + bpf_program__name(prog), PTR_ERR(*link)); + return PTR_ERR(*link); + } + } + + return 0; +} + +void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) +{ + int i; + + for (i = 0; i < s->prog_cnt; i++) { + struct bpf_link **link = s->progs[i].link; + + bpf_link__destroy(*link); + *link = NULL; + } +} + +void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) +{ + if (s->progs) + bpf_object__detach_skeleton(s); + if (s->obj) + bpf_object__close(*s->obj); + free(s->maps); + free(s->progs); + free(s); +} diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index e8f70977d137..6909ee81113a 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -17,14 +17,12 @@ #include <sys/types.h> // for size_t #include <linux/bpf.h> +#include "libbpf_common.h" + #ifdef __cplusplus extern "C" { #endif -#ifndef LIBBPF_API -#define LIBBPF_API __attribute__((visibility("default"))) -#endif - enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, @@ -67,18 +65,61 @@ struct bpf_object_open_attr { enum bpf_prog_type prog_type; }; +struct bpf_object_open_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* object name override, if provided: + * - for object open from file, this will override setting object + * name from file path's base name; + * - for object open from memory buffer, this will specify an object + * name and will override default "<addr>-<buf-size>" name; + */ + const char *object_name; + /* parse map definitions non-strictly, allowing extra attributes/data */ + bool relaxed_maps; + /* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures. + * Value is ignored. Relocations always are processed non-strictly. + * Non-relocatable instructions are replaced with invalid ones to + * prevent accidental errors. + * */ + bool relaxed_core_relocs; + /* maps that set the 'pinning' attribute in their definition will have + * their pin_path attribute set to a file in this directory, and be + * auto-pinned to that path on load; defaults to "/sys/fs/bpf". + */ + const char *pin_root_path; + __u32 attach_prog_fd; + /* Additional kernel config content that augments and overrides + * system Kconfig for CONFIG_xxx externs. + */ + const char *kconfig; +}; +#define bpf_object_open_opts__last_field kconfig + LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object * +bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); +LIBBPF_API struct bpf_object * +bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts); + +/* deprecated bpf_object__open variants */ +LIBBPF_API struct bpf_object * +bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, + const char *name); +LIBBPF_API struct bpf_object * bpf_object__open_xattr(struct bpf_object_open_attr *attr); -struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr, - int flags); -LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf, - size_t obj_buf_sz, - const char *name); -int bpf_object__section_size(const struct bpf_object *obj, const char *name, - __u32 *size); -int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, - __u32 *off); + +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + +/* pin_maps and unpin_maps can both be called with a NULL path, in which case + * they will use the pin_path attribute of each map (and ignore all maps that + * don't have a pin_path set). + */ LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, const char *path); @@ -99,6 +140,7 @@ struct bpf_object_load_attr { LIBBPF_API int bpf_object__load(struct bpf_object *obj); LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); LIBBPF_API int bpf_object__unload(struct bpf_object *obj); + LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); @@ -109,6 +151,9 @@ LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); LIBBPF_API struct bpf_program * bpf_object__find_program_by_title(const struct bpf_object *obj, const char *title); +LIBBPF_API struct bpf_program * +bpf_object__find_program_by_name(const struct bpf_object *obj, + const char *name); LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev); #define bpf_object__for_each_safe(pos, tmp) \ @@ -127,6 +172,8 @@ libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type); LIBBPF_API int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type); +LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type); /* Accessors of bpf_program */ struct bpf_program; @@ -150,8 +197,15 @@ LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog); LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex); -LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog, - bool needs_copy); +LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); +LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog); +LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead") +const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy); +LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); + +/* returns program size in bytes */ +LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_version); @@ -168,8 +222,19 @@ LIBBPF_API void bpf_program__unload(struct bpf_program *prog); struct bpf_link; +LIBBPF_API struct bpf_link *bpf_link__open(const char *path); +LIBBPF_API int bpf_link__fd(const struct bpf_link *link); +LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); +LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); +LIBBPF_API int bpf_link__unpin(struct bpf_link *link); +LIBBPF_API int bpf_link__update_program(struct bpf_link *link, + struct bpf_program *prog); +LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); +LIBBPF_API int bpf_link__detach(struct bpf_link *link); LIBBPF_API int bpf_link__destroy(struct bpf_link *link); +LIBBPF_API struct bpf_link * +bpf_program__attach(struct bpf_program *prog); LIBBPF_API struct bpf_link * bpf_program__attach_perf_event(struct bpf_program *prog, int pfd); LIBBPF_API struct bpf_link * @@ -186,6 +251,34 @@ bpf_program__attach_tracepoint(struct bpf_program *prog, LIBBPF_API struct bpf_link * bpf_program__attach_raw_tracepoint(struct bpf_program *prog, const char *tp_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_trace(struct bpf_program *prog); +LIBBPF_API struct bpf_link * +bpf_program__attach_lsm(struct bpf_program *prog); +LIBBPF_API struct bpf_link * +bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd); +LIBBPF_API struct bpf_link * +bpf_program__attach_netns(struct bpf_program *prog, int netns_fd); +LIBBPF_API struct bpf_link * +bpf_program__attach_xdp(struct bpf_program *prog, int ifindex); +LIBBPF_API struct bpf_link * +bpf_program__attach_freplace(struct bpf_program *prog, + int target_fd, const char *attach_func_name); + +struct bpf_map; + +LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map); + +struct bpf_iter_attach_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + union bpf_iter_link_info *link_info; + __u32 link_info_len; +}; +#define bpf_iter_attach_opts__last_field link_info_len + +LIBBPF_API struct bpf_link * +bpf_program__attach_iter(struct bpf_program *prog, + const struct bpf_iter_attach_opts *opts); struct bpf_insn; @@ -258,24 +351,43 @@ LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog); LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog); LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog); LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog); + +LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type); + +LIBBPF_API enum bpf_attach_type +bpf_program__get_expected_attach_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); +LIBBPF_API int +bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, + const char *attach_func_name); + LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' @@ -295,7 +407,6 @@ struct bpf_map_def { * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel, * so no need to worry about a name clash. */ -struct bpf_map; LIBBPF_API struct bpf_map * bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); @@ -320,21 +431,50 @@ bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); LIBBPF_API struct bpf_map * bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); +/* get/set map FD */ LIBBPF_API int bpf_map__fd(const struct bpf_map *map); +LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); +/* get map definition */ LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); +/* get map name */ LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); +/* get/set map type */ +LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); +/* get/set map size (max_entries) */ +LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); +LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); +/* get/set map flags */ +LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); +/* get/set map NUMA node */ +LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); +/* get/set map key size */ +LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); +/* get/set map value size */ +LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); +/* get map key/value BTF type IDs */ LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); +/* get/set map if_index */ +LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); -LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); -LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); +LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, + const void *data, size_t size); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); -LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); +LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); +LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); +LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); @@ -356,9 +496,48 @@ LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd); -LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); -LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); +struct xdp_link_info { + __u32 prog_id; + __u32 drv_prog_id; + __u32 hw_prog_id; + __u32 skb_prog_id; + __u8 attach_mode; +}; +struct bpf_xdp_set_link_opts { + size_t sz; + int old_fd; +}; +#define bpf_xdp_set_link_opts__last_field old_fd + +LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); +LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, + const struct bpf_xdp_set_link_opts *opts); +LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); +LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags); + +/* Ring buffer APIs */ +struct ring_buffer; + +typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); + +struct ring_buffer_opts { + size_t sz; /* size of this struct, for forward/backward compatiblity */ +}; + +#define ring_buffer_opts__last_field sz + +LIBBPF_API struct ring_buffer * +ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, + const struct ring_buffer_opts *opts); +LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); +LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, + ring_buffer_sample_fn sample_cb, void *ctx); +LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); +LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); + +/* Perf buffer APIs */ struct perf_buffer; typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu, @@ -413,7 +592,12 @@ perf_buffer__new_raw(int map_fd, size_t page_cnt, const struct perf_buffer_raw_opts *opts); LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); +LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb); LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); +LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb); +LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx); +LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb); +LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx); typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, @@ -423,18 +607,6 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, void **copy_mem, size_t *copy_size, bpf_perf_event_print_t fn, void *private_data); -struct nlattr; -typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); -int libbpf_netlink_open(unsigned int *nl_pid); -int libbpf_nl_get_link(int sock, unsigned int nl_pid, - libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie); -int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex, - libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie); -int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex, - libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie); -int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, - libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie); - struct bpf_prog_linfo; struct bpf_prog_info; @@ -461,6 +633,7 @@ LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex); LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex); +LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex); /* * Get bpf_prog_info in continuous memory @@ -541,6 +714,50 @@ bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); */ LIBBPF_API int libbpf_num_possible_cpus(void); +struct bpf_map_skeleton { + const char *name; + struct bpf_map **map; + void **mmaped; +}; + +struct bpf_prog_skeleton { + const char *name; + struct bpf_program **prog; + struct bpf_link **link; +}; + +struct bpf_object_skeleton { + size_t sz; /* size of this struct, for forward/backward compatibility */ + + const char *name; + void *data; + size_t data_sz; + + struct bpf_object **obj; + + int map_cnt; + int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */ + struct bpf_map_skeleton *maps; + + int prog_cnt; + int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */ + struct bpf_prog_skeleton *progs; +}; + +LIBBPF_API int +bpf_object__open_skeleton(struct bpf_object_skeleton *s, + const struct bpf_object_open_opts *opts); +LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); + +enum libbpf_tristate { + TRI_NO = 0, + TRI_YES = 1, + TRI_MODULE = 2, +}; + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index d04c7cb623ed..4ebfadf45b47 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -190,3 +190,150 @@ LIBBPF_0.0.5 { global: bpf_btf_get_next_id; } LIBBPF_0.0.4; + +LIBBPF_0.0.6 { + global: + bpf_get_link_xdp_info; + bpf_map__get_pin_path; + bpf_map__is_pinned; + bpf_map__set_pin_path; + bpf_object__open_file; + bpf_object__open_mem; + bpf_program__attach_trace; + bpf_program__get_expected_attach_type; + bpf_program__get_type; + bpf_program__is_tracing; + bpf_program__set_tracing; + bpf_program__size; + btf__find_by_name_kind; + libbpf_find_vmlinux_btf_id; +} LIBBPF_0.0.5; + +LIBBPF_0.0.7 { + global: + btf_dump__emit_type_decl; + bpf_link__disconnect; + bpf_map__attach_struct_ops; + bpf_map_delete_batch; + bpf_map_lookup_and_delete_batch; + bpf_map_lookup_batch; + bpf_map_update_batch; + bpf_object__find_program_by_name; + bpf_object__attach_skeleton; + bpf_object__destroy_skeleton; + bpf_object__detach_skeleton; + bpf_object__load_skeleton; + bpf_object__open_skeleton; + bpf_probe_large_insn_limit; + bpf_prog_attach_xattr; + bpf_program__attach; + bpf_program__name; + bpf_program__is_extension; + bpf_program__is_struct_ops; + bpf_program__set_extension; + bpf_program__set_struct_ops; + btf__align_of; + libbpf_find_kernel_btf; +} LIBBPF_0.0.6; + +LIBBPF_0.0.8 { + global: + bpf_link__fd; + bpf_link__open; + bpf_link__pin; + bpf_link__pin_path; + bpf_link__unpin; + bpf_link__update_program; + bpf_link_create; + bpf_link_update; + bpf_map__set_initial_value; + bpf_program__attach_cgroup; + bpf_program__attach_lsm; + bpf_program__is_lsm; + bpf_program__set_attach_target; + bpf_program__set_lsm; + bpf_set_link_xdp_fd_opts; +} LIBBPF_0.0.7; + +LIBBPF_0.0.9 { + global: + bpf_enable_stats; + bpf_iter_create; + bpf_link_get_fd_by_id; + bpf_link_get_next_id; + bpf_program__attach_iter; + bpf_program__attach_netns; + perf_buffer__consume; + ring_buffer__add; + ring_buffer__consume; + ring_buffer__free; + ring_buffer__new; + ring_buffer__poll; +} LIBBPF_0.0.8; + +LIBBPF_0.1.0 { + global: + bpf_link__detach; + bpf_link_detach; + bpf_map__ifindex; + bpf_map__key_size; + bpf_map__map_flags; + bpf_map__max_entries; + bpf_map__numa_node; + bpf_map__set_key_size; + bpf_map__set_map_flags; + bpf_map__set_max_entries; + bpf_map__set_numa_node; + bpf_map__set_type; + bpf_map__set_value_size; + bpf_map__type; + bpf_map__value_size; + bpf_program__attach_xdp; + bpf_program__autoload; + bpf_program__is_sk_lookup; + bpf_program__set_autoload; + bpf_program__set_sk_lookup; + btf__parse; + btf__parse_raw; + btf__pointer_size; + btf__set_fd; + btf__set_pointer_size; +} LIBBPF_0.0.9; + +LIBBPF_0.2.0 { + global: + bpf_prog_bind_map; + bpf_prog_test_run_opts; + bpf_program__attach_freplace; + bpf_program__section_name; + btf__add_array; + btf__add_const; + btf__add_enum; + btf__add_enum_value; + btf__add_datasec; + btf__add_datasec_var_info; + btf__add_field; + btf__add_func; + btf__add_func_param; + btf__add_func_proto; + btf__add_fwd; + btf__add_int; + btf__add_ptr; + btf__add_restrict; + btf__add_str; + btf__add_struct; + btf__add_typedef; + btf__add_union; + btf__add_var; + btf__add_volatile; + btf__endianness; + btf__find_str; + btf__new_empty; + btf__set_endianness; + btf__str_by_offset; + perf_buffer__buffer_cnt; + perf_buffer__buffer_fd; + perf_buffer__epoll_fd; + perf_buffer__consume_buffer; + xsk_socket__create_shared; +} LIBBPF_0.1.0; diff --git a/tools/lib/bpf/libbpf.pc.template b/tools/lib/bpf/libbpf.pc.template index ac17fcef2108..b45ed534bdfb 100644 --- a/tools/lib/bpf/libbpf.pc.template +++ b/tools/lib/bpf/libbpf.pc.template @@ -8,5 +8,5 @@ Name: libbpf Description: BPF library Version: @VERSION@ Libs: -L${libdir} -lbpf -Requires.private: libelf +Requires.private: libelf zlib Cflags: -I${includedir} diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h new file mode 100644 index 000000000000..f7d78fde1ce8 --- /dev/null +++ b/tools/lib/bpf/libbpf_common.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Common user-facing libbpf helpers. + * + * Copyright (c) 2019 Facebook + */ + +#ifndef __LIBBPF_LIBBPF_COMMON_H +#define __LIBBPF_LIBBPF_COMMON_H + +#include <string.h> + +#ifndef LIBBPF_API +#define LIBBPF_API __attribute__((visibility("default"))) +#endif + +#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg))) + +/* Helper macro to declare and initialize libbpf options struct + * + * This dance with uninitialized declaration, followed by memset to zero, + * followed by assignment using compound literal syntax is done to preserve + * ability to use a nice struct field initialization syntax and **hopefully** + * have all the padding bytes initialized to zero. It's not guaranteed though, + * when copying literal, that compiler won't copy garbage in literal's padding + * bytes, but that's the best way I've found and it seems to work in practice. + * + * Macro declares opts struct of given type and name, zero-initializes, + * including any extra padding, it with memset() and then assigns initial + * values provided by users in struct initializer-syntax as varargs. + */ +#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \ + struct TYPE NAME = ({ \ + memset(&NAME, 0, sizeof(struct TYPE)); \ + (struct TYPE) { \ + .sz = sizeof(struct TYPE), \ + __VA_ARGS__ \ + }; \ + }) + +/* Helper macro to declare and initialize libbpf options struct + * + * This dance with uninitialized declaration, followed by memset to zero, + * followed by assignment using compound literal syntax is done to preserve + * ability to use a nice struct field initialization syntax and **hopefully** + * have all the padding bytes initialized to zero. It's not guaranteed though, + * when copying literal, that compiler won't copy garbage in literal's padding + * bytes, but that's the best way I've found and it seems to work in practice. + * + * Macro declares opts struct of given type and name, zero-initializes, + * including any extra padding, it with memset() and then assigns initial + * values provided by users in struct initializer-syntax as varargs. + */ +#define LIBBPF_OPTS(TYPE, NAME, ...) \ + struct TYPE NAME = ({ \ + memset(&NAME, 0, sizeof(struct TYPE)); \ + (struct TYPE) { \ + .sz = sizeof(struct TYPE), \ + __VA_ARGS__ \ + }; \ + }) + +#endif /* __LIBBPF_LIBBPF_COMMON_H */ diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c index 4343e40588c6..0afb51f7a919 100644 --- a/tools/lib/bpf/libbpf_errno.c +++ b/tools/lib/bpf/libbpf_errno.c @@ -13,6 +13,9 @@ #include "libbpf.h" +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 98216a69c32f..d99bc847bf84 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -9,6 +9,15 @@ #ifndef __LIBBPF_LIBBPF_INTERNAL_H #define __LIBBPF_LIBBPF_INTERNAL_H +#include <stdlib.h> +#include <limits.h> + +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +/* prevent accidental re-addition of reallocarray() */ +#pragma GCC poison reallocarray + #include "libbpf.h" #define BTF_INFO_ENC(kind, kind_flag, vlen) \ @@ -23,6 +32,12 @@ #define BTF_PARAM_ENC(name, type) (name), (type) #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif #ifndef min # define min(x, y) ((x) < (y) ? (x) : (y)) #endif @@ -59,13 +74,88 @@ do { \ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ } while (0) -#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) +#define pr_warn(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif +/* + * Re-implement glibc's reallocarray() for libbpf internal-only use. + * reallocarray(), unfortunately, is not available in all versions of glibc, + * so requires extra feature detection and using reallocarray() stub from + * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates + * build of libbpf unnecessarily and is just a maintenance burden. Instead, + * it's trivial to implement libbpf-specific internal version and use it + * throughout libbpf. + */ +static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) +{ + size_t total; + +#if __has_builtin(__builtin_mul_overflow) + if (unlikely(__builtin_mul_overflow(nmemb, size, &total))) + return NULL; +#else + if (size == 0 || nmemb > ULONG_MAX / size) + return NULL; + total = nmemb * size; +#endif + return realloc(ptr, total); +} + +void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, + size_t cur_cnt, size_t max_cnt, size_t add_cnt); +int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt); + +static inline bool libbpf_validate_opts(const char *opts, + size_t opts_sz, size_t user_sz, + const char *type_name) +{ + if (user_sz < sizeof(size_t)) { + pr_warn("%s size (%zu) is too small\n", type_name, user_sz); + return false; + } + if (user_sz > opts_sz) { + size_t i; + + for (i = opts_sz; i < user_sz; i++) { + if (opts[i]) { + pr_warn("%s has non-zero extra bytes\n", + type_name); + return false; + } + } + } + return true; +} + +#define OPTS_VALID(opts, type) \ + (!(opts) || libbpf_validate_opts((const char *)opts, \ + offsetofend(struct type, \ + type##__last_field), \ + (opts)->sz, #type)) +#define OPTS_HAS(opts, field) \ + ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) +#define OPTS_GET(opts, field, fallback_value) \ + (OPTS_HAS(opts, field) ? (opts)->field : fallback_value) +#define OPTS_SET(opts, field, value) \ + do { \ + if (OPTS_HAS(opts, field)) \ + (opts)->field = value; \ + } while (0) + +int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); +int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); int libbpf__load_raw_btf(const char *raw_types, size_t types_len, const char *str_sec, size_t str_len); +int bpf_object__section_size(const struct bpf_object *obj, const char *name, + __u32 *size); +int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, + __u32 *off); + struct btf_ext_info { /* * info points to the individual info section (e.g. func_info and @@ -87,6 +177,44 @@ struct btf_ext_info { i < (sec)->num_info; \ i++, rec = (void *)rec + (seg)->rec_size) +/* + * The .BTF.ext ELF section layout defined as + * struct btf_ext_header + * func_info subsection + * + * The func_info subsection layout: + * record size for struct bpf_func_info in the func_info subsection + * struct btf_sec_func_info for section #1 + * a list of bpf_func_info records for section #1 + * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h + * but may not be identical + * struct btf_sec_func_info for section #2 + * a list of bpf_func_info records for section #2 + * ...... + * + * Note that the bpf_func_info record size in .BTF.ext may not + * be the same as the one defined in include/uapi/linux/bpf.h. + * The loader should ensure that record_size meets minimum + * requirement and pass the record as is to the kernel. The + * kernel will handle the func_info properly based on its contents. + */ +struct btf_ext_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 func_info_off; + __u32 func_info_len; + __u32 line_info_off; + __u32 line_info_len; + + /* optional part of .BTF.ext header */ + __u32 core_relo_off; + __u32 core_relo_len; +}; + struct btf_ext { union { struct btf_ext_header *hdr; @@ -94,7 +222,7 @@ struct btf_ext { }; struct btf_ext_info func_info; struct btf_ext_info line_info; - struct btf_ext_info offset_reloc_info; + struct btf_ext_info core_relo_info; __u32 data_size; }; @@ -102,7 +230,7 @@ struct btf_ext_info_sec { __u32 sec_name_off; __u32 num_info; /* Followed by num_info * record_size number of bytes */ - __u8 data[0]; + __u8 data[]; }; /* The minimum bpf_func_info checked by the loader */ @@ -119,18 +247,40 @@ struct bpf_line_info_min { __u32 line_col; }; -/* The minimum bpf_offset_reloc checked by the loader +/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value + * has to be adjusted by relocations. + */ +enum bpf_core_relo_kind { + BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, /* field size in bytes */ + BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ + BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ + BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ + BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ + BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */ + BPF_TYPE_EXISTS = 8, /* type existence in target kernel */ + BPF_TYPE_SIZE = 9, /* type size in bytes */ + BPF_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ + BPF_ENUMVAL_VALUE = 11, /* enum value integer value */ +}; + +/* The minimum bpf_core_relo checked by the loader * - * Offset relocation captures the following data: + * CO-RE relocation captures the following data: * - insn_off - instruction offset (in bytes) within a BPF program that needs - * its insn->imm field to be relocated with actual offset; + * its insn->imm field to be relocated with actual field info; * - type_id - BTF type ID of the "root" (containing) entity of a relocatable - * offset; + * type or field; * - access_str_off - offset into corresponding .BTF string section. String - * itself encodes an accessed field using a sequence of field and array - * indicies, separated by colon (:). It's conceptually very close to LLVM's - * getelementptr ([0]) instruction's arguments for identifying offset to - * a field. + * interpretation depends on specific relocation kind: + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; * * Example to provide a better feel. * @@ -156,15 +306,16 @@ struct bpf_line_info_min { * bpf_probe_read(&dst, sizeof(dst), * __builtin_preserve_access_index(&src->a.b.c)); * - * In this case Clang will emit offset relocation recording necessary data to + * In this case Clang will emit field relocation recording necessary data to * be able to find offset of embedded `a.b.c` field within `src` struct. * * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction */ -struct bpf_offset_reloc { +struct bpf_core_relo { __u32 insn_off; __u32 type_id; __u32 access_str_off; + enum bpf_core_relo_kind kind; }; #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 4b0b0364f5fc..5482a9b7ae2d 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -75,6 +75,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; break; + case BPF_PROG_TYPE_SK_LOOKUP: + xattr.expected_attach_type = BPF_SK_LOOKUP; + break; case BPF_PROG_TYPE_KPROBE: xattr.kern_version = get_kernel_version(); break; @@ -102,6 +105,10 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_STRUCT_OPS: + case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_LSM: default: break; } @@ -163,7 +170,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len, return btf_fd; } -static int load_sk_storage_btf(void) +static int load_local_storage_btf(void) { const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l"; /* struct bpf_spin_lock { @@ -222,15 +229,21 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) key_size = 0; break; case BPF_MAP_TYPE_SK_STORAGE: + case BPF_MAP_TYPE_INODE_STORAGE: btf_key_type_id = 1; btf_value_type_id = 3; value_size = 8; max_entries = 0; map_flags = BPF_F_NO_PREALLOC; - btf_fd = load_sk_storage_btf(); + btf_fd = load_local_storage_btf(); if (btf_fd < 0) return false; break; + case BPF_MAP_TYPE_RINGBUF: + key_size = 0; + value_size = 0; + max_entries = 4096; + break; case BPF_MAP_TYPE_UNSPEC: case BPF_MAP_TYPE_HASH: case BPF_MAP_TYPE_ARRAY: @@ -250,6 +263,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_SOCKHASH: case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: + case BPF_MAP_TYPE_STRUCT_OPS: default: break; } @@ -320,3 +334,24 @@ bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, return res; } + +/* + * Probe for availability of kernel commit (5.3): + * + * c04c0d2b968a ("bpf: increase complexity limit and maximum program size") + */ +bool bpf_probe_large_insn_limit(__u32 ifindex) +{ + struct bpf_insn insns[BPF_MAXINSNS + 1]; + int i; + + for (i = 0; i < BPF_MAXINSNS; i++) + insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1); + insns[BPF_MAXINSNS] = BPF_EXIT_INSN(); + + errno = 0; + probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0, + ifindex); + + return errno != E2BIG && errno != EINVAL; +} diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index ce3ec81b71c0..4dd73de00b6f 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -12,22 +12,25 @@ #include "bpf.h" #include "libbpf.h" +#include "libbpf_internal.h" #include "nlattr.h" #ifndef SOL_NETLINK #define SOL_NETLINK 270 #endif +typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); + typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, void *cookie); struct xdp_id_md { int ifindex; __u32 flags; - __u32 id; + struct xdp_link_info info; }; -int libbpf_netlink_open(__u32 *nl_pid) +static int libbpf_netlink_open(__u32 *nl_pid) { struct sockaddr_nl sa; socklen_t addrlen; @@ -43,7 +46,7 @@ int libbpf_netlink_open(__u32 *nl_pid) if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) { - fprintf(stderr, "Netlink error reporting not supported\n"); + pr_warn("Netlink error reporting not supported\n"); } if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { @@ -128,7 +131,8 @@ done: return ret; } -int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) +static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, + __u32 flags) { int sock, seq = 0, ret; struct nlattr *nla, *nla_xdp; @@ -137,7 +141,7 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) struct ifinfomsg ifinfo; char attrbuf[64]; } req; - __u32 nl_pid; + __u32 nl_pid = 0; sock = libbpf_netlink_open(&nl_pid); if (sock < 0) @@ -174,6 +178,14 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) nla->nla_len += nla_xdp->nla_len; } + if (flags & XDP_FLAGS_REPLACE) { + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); + nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD; + nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd); + memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd)); + nla->nla_len += nla_xdp->nla_len; + } + req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { @@ -187,6 +199,29 @@ cleanup: return ret; } +int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, + const struct bpf_xdp_set_link_opts *opts) +{ + int old_fd = -1; + + if (!OPTS_VALID(opts, bpf_xdp_set_link_opts)) + return -EINVAL; + + if (OPTS_HAS(opts, old_fd)) { + old_fd = OPTS_GET(opts, old_fd, -1); + flags |= XDP_FLAGS_REPLACE; + } + + return __bpf_set_link_xdp_fd_replace(ifindex, fd, + old_fd, + flags); +} + +int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) +{ + return __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags); +} + static int __dump_link_nlmsg(struct nlmsghdr *nlh, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { @@ -202,26 +237,11 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh, return dump_link_nlmsg(cookie, ifi, tb); } -static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags) -{ - if (mode != XDP_ATTACHED_MULTI) - return IFLA_XDP_PROG_ID; - if (flags & XDP_FLAGS_DRV_MODE) - return IFLA_XDP_DRV_PROG_ID; - if (flags & XDP_FLAGS_HW_MODE) - return IFLA_XDP_HW_PROG_ID; - if (flags & XDP_FLAGS_SKB_MODE) - return IFLA_XDP_SKB_PROG_ID; - - return IFLA_XDP_UNSPEC; -} - -static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb) +static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb) { struct nlattr *xdp_tb[IFLA_XDP_MAX + 1]; struct xdp_id_md *xdp_id = cookie; struct ifinfomsg *ifinfo = msg; - unsigned char mode, xdp_attr; int ret; if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index) @@ -237,27 +257,43 @@ static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb) if (!xdp_tb[IFLA_XDP_ATTACHED]) return 0; - mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]); - if (mode == XDP_ATTACHED_NONE) + xdp_id->info.attach_mode = libbpf_nla_getattr_u8( + xdp_tb[IFLA_XDP_ATTACHED]); + + if (xdp_id->info.attach_mode == XDP_ATTACHED_NONE) return 0; - xdp_attr = get_xdp_id_attr(mode, xdp_id->flags); - if (!xdp_attr || !xdp_tb[xdp_attr]) - return 0; + if (xdp_tb[IFLA_XDP_PROG_ID]) + xdp_id->info.prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_PROG_ID]); - xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]); + if (xdp_tb[IFLA_XDP_SKB_PROG_ID]) + xdp_id->info.skb_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_SKB_PROG_ID]); + + if (xdp_tb[IFLA_XDP_DRV_PROG_ID]) + xdp_id->info.drv_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_DRV_PROG_ID]); + + if (xdp_tb[IFLA_XDP_HW_PROG_ID]) + xdp_id->info.hw_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_HW_PROG_ID]); return 0; } -int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +static int libbpf_nl_get_link(int sock, unsigned int nl_pid, + libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie); + +int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags) { struct xdp_id_md xdp_id = {}; int sock, ret; - __u32 nl_pid; + __u32 nl_pid = 0; __u32 mask; - if (flags & ~XDP_FLAGS_MASK) + if (flags & ~XDP_FLAGS_MASK || !info_size) return -EINVAL; /* Check whether the single {HW,DRV,SKB} mode is set */ @@ -273,14 +309,46 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) xdp_id.ifindex = ifindex; xdp_id.flags = flags; - ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id); - if (!ret) - *prog_id = xdp_id.id; + ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id); + if (!ret) { + size_t sz = min(info_size, sizeof(xdp_id.info)); + + memcpy(info, &xdp_id.info, sz); + memset((void *) info + sz, 0, info_size - sz); + } close(sock); return ret; } +static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags) +{ + flags &= XDP_FLAGS_MODES; + + if (info->attach_mode != XDP_ATTACHED_MULTI && !flags) + return info->prog_id; + if (flags & XDP_FLAGS_DRV_MODE) + return info->drv_prog_id; + if (flags & XDP_FLAGS_HW_MODE) + return info->hw_prog_id; + if (flags & XDP_FLAGS_SKB_MODE) + return info->skb_prog_id; + + return 0; +} + +int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +{ + struct xdp_link_info info; + int ret; + + ret = bpf_get_link_xdp_info(ifindex, &info, sizeof(info), flags); + if (!ret) + *prog_id = get_xdp_id(&info, flags); + + return ret; +} + int libbpf_nl_get_link(int sock, unsigned int nl_pid, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { @@ -302,121 +370,3 @@ int libbpf_nl_get_link(int sock, unsigned int nl_pid, return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg, dump_link_nlmsg, cookie); } - -static int __dump_class_nlmsg(struct nlmsghdr *nlh, - libbpf_dump_nlmsg_t dump_class_nlmsg, - void *cookie) -{ - struct nlattr *tb[TCA_MAX + 1], *attr; - struct tcmsg *t = NLMSG_DATA(nlh); - int len; - - len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); - attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); - if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) - return -LIBBPF_ERRNO__NLPARSE; - - return dump_class_nlmsg(cookie, t, tb); -} - -int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex, - libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie) -{ - struct { - struct nlmsghdr nlh; - struct tcmsg t; - } req = { - .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), - .nlh.nlmsg_type = RTM_GETTCLASS, - .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, - .t.tcm_family = AF_UNSPEC, - .t.tcm_ifindex = ifindex, - }; - int seq = time(NULL); - - req.nlh.nlmsg_seq = seq; - if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) - return -errno; - - return bpf_netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg, - dump_class_nlmsg, cookie); -} - -static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh, - libbpf_dump_nlmsg_t dump_qdisc_nlmsg, - void *cookie) -{ - struct nlattr *tb[TCA_MAX + 1], *attr; - struct tcmsg *t = NLMSG_DATA(nlh); - int len; - - len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); - attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); - if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) - return -LIBBPF_ERRNO__NLPARSE; - - return dump_qdisc_nlmsg(cookie, t, tb); -} - -int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex, - libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie) -{ - struct { - struct nlmsghdr nlh; - struct tcmsg t; - } req = { - .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), - .nlh.nlmsg_type = RTM_GETQDISC, - .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, - .t.tcm_family = AF_UNSPEC, - .t.tcm_ifindex = ifindex, - }; - int seq = time(NULL); - - req.nlh.nlmsg_seq = seq; - if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) - return -errno; - - return bpf_netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg, - dump_qdisc_nlmsg, cookie); -} - -static int __dump_filter_nlmsg(struct nlmsghdr *nlh, - libbpf_dump_nlmsg_t dump_filter_nlmsg, - void *cookie) -{ - struct nlattr *tb[TCA_MAX + 1], *attr; - struct tcmsg *t = NLMSG_DATA(nlh); - int len; - - len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t)); - attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t))); - if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0) - return -LIBBPF_ERRNO__NLPARSE; - - return dump_filter_nlmsg(cookie, t, tb); -} - -int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle, - libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie) -{ - struct { - struct nlmsghdr nlh; - struct tcmsg t; - } req = { - .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)), - .nlh.nlmsg_type = RTM_GETTFILTER, - .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, - .t.tcm_family = AF_UNSPEC, - .t.tcm_ifindex = ifindex, - .t.tcm_parent = handle, - }; - int seq = time(NULL); - - req.nlh.nlmsg_seq = seq; - if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0) - return -errno; - - return bpf_netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg, - dump_filter_nlmsg, cookie); -} diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 1e69c0c8d413..b607fa9852b1 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -7,10 +7,11 @@ */ #include <errno.h> -#include "nlattr.h" -#include <linux/rtnetlink.h> #include <string.h> #include <stdio.h> +#include <linux/rtnetlink.h> +#include "nlattr.h" +#include "libbpf_internal.h" static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = { [LIBBPF_NLA_U8] = sizeof(uint8_t), @@ -121,8 +122,8 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, } if (tb[type]) - fprintf(stderr, "Attribute of type %#x found multiple times in message, " - "previous attribute is being ignored.\n", type); + pr_warn("Attribute of type %#x found multiple times in message, " + "previous attribute is being ignored.\n", type); tb[type] = nla; } @@ -181,15 +182,14 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { - fprintf(stderr, - "Failed to parse extended error attributes\n"); + pr_warn("Failed to parse extended error attributes\n"); return 0; } if (tb[NLMSGERR_ATTR_MSG]) errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); - fprintf(stderr, "Kernel error message: %s\n", errmsg); + pr_warn("Kernel error message: %s\n", errmsg); return 0; } diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c new file mode 100644 index 000000000000..98537ff2679e --- /dev/null +++ b/tools/lib/bpf/ringbuf.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* + * Ring buffer operations. + * + * Copyright (C) 2020 Facebook, Inc. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <unistd.h> +#include <linux/err.h> +#include <linux/bpf.h> +#include <asm/barrier.h> +#include <sys/mman.h> +#include <sys/epoll.h> + +#include "libbpf.h" +#include "libbpf_internal.h" +#include "bpf.h" + +struct ring { + ring_buffer_sample_fn sample_cb; + void *ctx; + void *data; + unsigned long *consumer_pos; + unsigned long *producer_pos; + unsigned long mask; + int map_fd; +}; + +struct ring_buffer { + struct epoll_event *events; + struct ring *rings; + size_t page_size; + int epoll_fd; + int ring_cnt; +}; + +static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r) +{ + if (r->consumer_pos) { + munmap(r->consumer_pos, rb->page_size); + r->consumer_pos = NULL; + } + if (r->producer_pos) { + munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); + r->producer_pos = NULL; + } +} + +/* Add extra RINGBUF maps to this ring buffer manager */ +int ring_buffer__add(struct ring_buffer *rb, int map_fd, + ring_buffer_sample_fn sample_cb, void *ctx) +{ + struct bpf_map_info info; + __u32 len = sizeof(info); + struct epoll_event *e; + struct ring *r; + void *tmp; + int err; + + memset(&info, 0, sizeof(info)); + + err = bpf_obj_get_info_by_fd(map_fd, &info, &len); + if (err) { + err = -errno; + pr_warn("ringbuf: failed to get map info for fd=%d: %d\n", + map_fd, err); + return err; + } + + if (info.type != BPF_MAP_TYPE_RINGBUF) { + pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n", + map_fd); + return -EINVAL; + } + + tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); + if (!tmp) + return -ENOMEM; + rb->rings = tmp; + + tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); + if (!tmp) + return -ENOMEM; + rb->events = tmp; + + r = &rb->rings[rb->ring_cnt]; + memset(r, 0, sizeof(*r)); + + r->map_fd = map_fd; + r->sample_cb = sample_cb; + r->ctx = ctx; + r->mask = info.max_entries - 1; + + /* Map writable consumer page */ + tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, + map_fd, 0); + if (tmp == MAP_FAILED) { + err = -errno; + pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", + map_fd, err); + return err; + } + r->consumer_pos = tmp; + + /* Map read-only producer page and data pages. We map twice as big + * data size to allow simple reading of samples that wrap around the + * end of a ring buffer. See kernel implementation for details. + * */ + tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ, + MAP_SHARED, map_fd, rb->page_size); + if (tmp == MAP_FAILED) { + err = -errno; + ringbuf_unmap_ring(rb, r); + pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n", + map_fd, err); + return err; + } + r->producer_pos = tmp; + r->data = tmp + rb->page_size; + + e = &rb->events[rb->ring_cnt]; + memset(e, 0, sizeof(*e)); + + e->events = EPOLLIN; + e->data.fd = rb->ring_cnt; + if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) { + err = -errno; + ringbuf_unmap_ring(rb, r); + pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n", + map_fd, err); + return err; + } + + rb->ring_cnt++; + return 0; +} + +void ring_buffer__free(struct ring_buffer *rb) +{ + int i; + + if (!rb) + return; + + for (i = 0; i < rb->ring_cnt; ++i) + ringbuf_unmap_ring(rb, &rb->rings[i]); + if (rb->epoll_fd >= 0) + close(rb->epoll_fd); + + free(rb->events); + free(rb->rings); + free(rb); +} + +struct ring_buffer * +ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, + const struct ring_buffer_opts *opts) +{ + struct ring_buffer *rb; + int err; + + if (!OPTS_VALID(opts, ring_buffer_opts)) + return NULL; + + rb = calloc(1, sizeof(*rb)); + if (!rb) + return NULL; + + rb->page_size = getpagesize(); + + rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); + if (rb->epoll_fd < 0) { + err = -errno; + pr_warn("ringbuf: failed to create epoll instance: %d\n", err); + goto err_out; + } + + err = ring_buffer__add(rb, map_fd, sample_cb, ctx); + if (err) + goto err_out; + + return rb; + +err_out: + ring_buffer__free(rb); + return NULL; +} + +static inline int roundup_len(__u32 len) +{ + /* clear out top 2 bits (discard and busy, if set) */ + len <<= 2; + len >>= 2; + /* add length prefix */ + len += BPF_RINGBUF_HDR_SZ; + /* round up to 8 byte alignment */ + return (len + 7) / 8 * 8; +} + +static int ringbuf_process_ring(struct ring* r) +{ + int *len_ptr, len, err, cnt = 0; + unsigned long cons_pos, prod_pos; + bool got_new_data; + void *sample; + + cons_pos = smp_load_acquire(r->consumer_pos); + do { + got_new_data = false; + prod_pos = smp_load_acquire(r->producer_pos); + while (cons_pos < prod_pos) { + len_ptr = r->data + (cons_pos & r->mask); + len = smp_load_acquire(len_ptr); + + /* sample not committed yet, bail out for now */ + if (len & BPF_RINGBUF_BUSY_BIT) + goto done; + + got_new_data = true; + cons_pos += roundup_len(len); + + if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) { + sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ; + err = r->sample_cb(r->ctx, sample, len); + if (err) { + /* update consumer pos and bail out */ + smp_store_release(r->consumer_pos, + cons_pos); + return err; + } + cnt++; + } + + smp_store_release(r->consumer_pos, cons_pos); + } + } while (got_new_data); +done: + return cnt; +} + +/* Consume available ring buffer(s) data without event polling. + * Returns number of records consumed across all registered ring buffers, or + * negative number if any of the callbacks return error. + */ +int ring_buffer__consume(struct ring_buffer *rb) +{ + int i, err, res = 0; + + for (i = 0; i < rb->ring_cnt; i++) { + struct ring *ring = &rb->rings[i]; + + err = ringbuf_process_ring(ring); + if (err < 0) + return err; + res += err; + } + return res; +} + +/* Poll for available data and consume records, if any are available. + * Returns number of records consumed, or negative number, if any of the + * registered callbacks returned error. + */ +int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms) +{ + int i, cnt, err, res = 0; + + cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); + for (i = 0; i < cnt; i++) { + __u32 ring_id = rb->events[i].data.fd; + struct ring *ring = &rb->rings[ring_id]; + + err = ringbuf_process_ring(ring); + if (err < 0) + return err; + res += err; + } + return cnt < 0 ? -errno : res; +} diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c index b8064eedc177..146da01979c7 100644 --- a/tools/lib/bpf/str_error.c +++ b/tools/lib/bpf/str_error.c @@ -4,6 +4,9 @@ #include <stdio.h> #include "str_error.h" +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + /* * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl * libc, while checking strerror_r() return to avoid having to check this in diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.cpp deleted file mode 100644 index fc134873bb6d..000000000000 --- a/tools/lib/bpf/test_libbpf.cpp +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ -#include "libbpf.h" -#include "bpf.h" -#include "btf.h" - -/* do nothing, just make sure we can link successfully */ - -int main(int argc, char *argv[]) -{ - /* libbpf.h */ - libbpf_set_print(NULL); - - /* bpf.h */ - bpf_prog_get_fd_by_id(0); - - /* btf.h */ - btf__new(NULL, 0); -} diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 0c7386b0e42e..9bc537d0b92d 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -20,6 +20,8 @@ #include <linux/if_ether.h> #include <linux/if_packet.h> #include <linux/if_xdp.h> +#include <linux/kernel.h> +#include <linux/list.h> #include <linux/sockios.h> #include <net/if.h> #include <sys/ioctl.h> @@ -45,26 +47,35 @@ #endif struct xsk_umem { - struct xsk_ring_prod *fill; - struct xsk_ring_cons *comp; + struct xsk_ring_prod *fill_save; + struct xsk_ring_cons *comp_save; char *umem_area; struct xsk_umem_config config; int fd; int refcount; + struct list_head ctx_list; +}; + +struct xsk_ctx { + struct xsk_ring_prod *fill; + struct xsk_ring_cons *comp; + __u32 queue_id; + struct xsk_umem *umem; + int refcount; + int ifindex; + struct list_head list; + int prog_fd; + int xsks_map_fd; + char ifname[IFNAMSIZ]; }; struct xsk_socket { struct xsk_ring_cons *rx; struct xsk_ring_prod *tx; __u64 outstanding_tx; - struct xsk_umem *umem; + struct xsk_ctx *ctx; struct xsk_socket_config config; int fd; - int ifindex; - int prog_fd; - int xsks_map_fd; - __u32 queue_id; - char ifname[IFNAMSIZ]; }; struct xsk_nl_info { @@ -200,15 +211,73 @@ static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) return -EINVAL; } +static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp) +{ + struct xdp_mmap_offsets off; + void *map; + int err; + + err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING, + &umem->config.fill_size, + sizeof(umem->config.fill_size)); + if (err) + return -errno; + + err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING, + &umem->config.comp_size, + sizeof(umem->config.comp_size)); + if (err) + return -errno; + + err = xsk_get_mmap_offsets(fd, &off); + if (err) + return -errno; + + map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, + XDP_UMEM_PGOFF_FILL_RING); + if (map == MAP_FAILED) + return -errno; + + fill->mask = umem->config.fill_size - 1; + fill->size = umem->config.fill_size; + fill->producer = map + off.fr.producer; + fill->consumer = map + off.fr.consumer; + fill->flags = map + off.fr.flags; + fill->ring = map + off.fr.desc; + fill->cached_cons = umem->config.fill_size; + + map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, + XDP_UMEM_PGOFF_COMPLETION_RING); + if (map == MAP_FAILED) { + err = -errno; + goto out_mmap; + } + + comp->mask = umem->config.comp_size - 1; + comp->size = umem->config.comp_size; + comp->producer = map + off.cr.producer; + comp->consumer = map + off.cr.consumer; + comp->flags = map + off.cr.flags; + comp->ring = map + off.cr.desc; + + return 0; + +out_mmap: + munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64)); + return err; +} + int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_umem_config *usr_config) { - struct xdp_mmap_offsets off; struct xdp_umem_reg mr; struct xsk_umem *umem; - void *map; int err; if (!umem_area || !umem_ptr || !fill || !comp) @@ -227,6 +296,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, } umem->umem_area = umem_area; + INIT_LIST_HEAD(&umem->ctx_list); xsk_set_umem_config(&umem->config, usr_config); memset(&mr, 0, sizeof(mr)); @@ -241,65 +311,16 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, err = -errno; goto out_socket; } - err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING, - &umem->config.fill_size, - sizeof(umem->config.fill_size)); - if (err) { - err = -errno; + + err = xsk_create_umem_rings(umem, umem->fd, fill, comp); + if (err) goto out_socket; - } - err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING, - &umem->config.comp_size, - sizeof(umem->config.comp_size)); - if (err) { - err = -errno; - goto out_socket; - } - - err = xsk_get_mmap_offsets(umem->fd, &off); - if (err) { - err = -errno; - goto out_socket; - } - - map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), - PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, - XDP_UMEM_PGOFF_FILL_RING); - if (map == MAP_FAILED) { - err = -errno; - goto out_socket; - } - - umem->fill = fill; - fill->mask = umem->config.fill_size - 1; - fill->size = umem->config.fill_size; - fill->producer = map + off.fr.producer; - fill->consumer = map + off.fr.consumer; - fill->flags = map + off.fr.flags; - fill->ring = map + off.fr.desc; - fill->cached_cons = umem->config.fill_size; - - map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), - PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, - XDP_UMEM_PGOFF_COMPLETION_RING); - if (map == MAP_FAILED) { - err = -errno; - goto out_mmap; - } - - umem->comp = comp; - comp->mask = umem->config.comp_size - 1; - comp->size = umem->config.comp_size; - comp->producer = map + off.cr.producer; - comp->consumer = map + off.cr.consumer; - comp->flags = map + off.cr.flags; - comp->ring = map + off.cr.desc; + umem->fill_save = fill; + umem->comp_save = comp; *umem_ptr = umem; return 0; -out_mmap: - munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64)); out_socket: close(umem->fd); out_umem_alloc: @@ -333,39 +354,62 @@ DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4) static int xsk_load_xdp_prog(struct xsk_socket *xsk) { static const int log_buf_size = 16 * 1024; + struct xsk_ctx *ctx = xsk->ctx; char log_buf[log_buf_size]; int err, prog_fd; /* This is the C-program: * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) * { - * int index = ctx->rx_queue_index; + * int ret, index = ctx->rx_queue_index; * * // A set entry here means that the correspnding queue_id * // has an active AF_XDP socket bound to it. + * ret = bpf_redirect_map(&xsks_map, index, XDP_PASS); + * if (ret > 0) + * return ret; + * + * // Fallback for pre-5.3 kernels, not supporting default + * // action in the flags parameter. * if (bpf_map_lookup_elem(&xsks_map, &index)) * return bpf_redirect_map(&xsks_map, index, 0); - * * return XDP_PASS; * } */ struct bpf_insn prog[] = { - /* r1 = *(u32 *)(r1 + 16) */ - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16), - /* *(u32 *)(r10 - 4) = r1 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4), + /* r2 = *(u32 *)(r1 + 16) */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16), + /* *(u32 *)(r10 - 4) = r2 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4), + /* r1 = xskmap[] */ + BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd), + /* r3 = XDP_PASS */ + BPF_MOV64_IMM(BPF_REG_3, 2), + /* call bpf_redirect_map */ + BPF_EMIT_CALL(BPF_FUNC_redirect_map), + /* if w0 != 0 goto pc+13 */ + BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13), + /* r2 = r10 */ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + /* r2 += -4 */ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd), + /* r1 = xskmap[] */ + BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd), + /* call bpf_map_lookup_elem */ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + /* r1 = r0 */ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV32_IMM(BPF_REG_0, 2), - /* if r1 == 0 goto +5 */ + /* r0 = XDP_PASS */ + BPF_MOV64_IMM(BPF_REG_0, 2), + /* if r1 == 0 goto pc+5 */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5), /* r2 = *(u32 *)(r10 - 4) */ - BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd), BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4), - BPF_MOV32_IMM(BPF_REG_3, 0), + /* r1 = xskmap[] */ + BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd), + /* r3 = 0 */ + BPF_MOV64_IMM(BPF_REG_3, 0), + /* call bpf_redirect_map */ BPF_EMIT_CALL(BPF_FUNC_redirect_map), /* The jumps are to this instruction */ BPF_EXIT_INSN(), @@ -376,23 +420,25 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) "LGPL-2.1 or BSD-2-Clause", 0, log_buf, log_buf_size); if (prog_fd < 0) { - pr_warning("BPF log buffer:\n%s", log_buf); + pr_warn("BPF log buffer:\n%s", log_buf); return prog_fd; } - err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags); + err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, prog_fd, + xsk->config.xdp_flags); if (err) { close(prog_fd); return err; } - xsk->prog_fd = prog_fd; + ctx->prog_fd = prog_fd; return 0; } static int xsk_get_max_queues(struct xsk_socket *xsk) { struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; + struct xsk_ctx *ctx = xsk->ctx; struct ifreq ifr = {}; int fd, err, ret; @@ -401,7 +447,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk) return -errno; ifr.ifr_data = (void *)&channels; - memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1); + memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1); ifr.ifr_name[IFNAMSIZ - 1] = '\0'; err = ioctl(fd, SIOCETHTOOL, &ifr); if (err && errno != EOPNOTSUPP) { @@ -429,6 +475,7 @@ out: static int xsk_create_bpf_maps(struct xsk_socket *xsk) { + struct xsk_ctx *ctx = xsk->ctx; int max_queues; int fd; @@ -441,15 +488,17 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk) if (fd < 0) return fd; - xsk->xsks_map_fd = fd; + ctx->xsks_map_fd = fd; return 0; } static void xsk_delete_bpf_maps(struct xsk_socket *xsk) { - bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id); - close(xsk->xsks_map_fd); + struct xsk_ctx *ctx = xsk->ctx; + + bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id); + close(ctx->xsks_map_fd); } static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) @@ -457,10 +506,11 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info); __u32 map_len = sizeof(struct bpf_map_info); struct bpf_prog_info prog_info = {}; + struct xsk_ctx *ctx = xsk->ctx; struct bpf_map_info map_info; int fd, err; - err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len); + err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len); if (err) return err; @@ -474,11 +524,11 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) prog_info.nr_map_ids = num_maps; prog_info.map_ids = (__u64)(unsigned long)map_ids; - err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len); + err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len); if (err) goto out_map_ids; - xsk->xsks_map_fd = -1; + ctx->xsks_map_fd = -1; for (i = 0; i < prog_info.nr_map_ids; i++) { fd = bpf_map_get_fd_by_id(map_ids[i]); @@ -492,7 +542,7 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) } if (!strcmp(map_info.name, "xsks_map")) { - xsk->xsks_map_fd = fd; + ctx->xsks_map_fd = fd; continue; } @@ -500,7 +550,7 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) } err = 0; - if (xsk->xsks_map_fd == -1) + if (ctx->xsks_map_fd == -1) err = -ENOENT; out_map_ids: @@ -510,16 +560,19 @@ out_map_ids: static int xsk_set_bpf_maps(struct xsk_socket *xsk) { - return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id, + struct xsk_ctx *ctx = xsk->ctx; + + return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id, &xsk->fd, 0); } static int xsk_setup_xdp_prog(struct xsk_socket *xsk) { + struct xsk_ctx *ctx = xsk->ctx; __u32 prog_id = 0; int err; - err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id, + err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags); if (err) return err; @@ -535,49 +588,141 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk) return err; } } else { - xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id); - if (xsk->prog_fd < 0) + ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (ctx->prog_fd < 0) return -errno; err = xsk_lookup_bpf_maps(xsk); if (err) { - close(xsk->prog_fd); + close(ctx->prog_fd); return err; } } - err = xsk_set_bpf_maps(xsk); + if (xsk->rx) + err = xsk_set_bpf_maps(xsk); if (err) { xsk_delete_bpf_maps(xsk); - close(xsk->prog_fd); + close(ctx->prog_fd); return err; } return 0; } -int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, - __u32 queue_id, struct xsk_umem *umem, - struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, - const struct xsk_socket_config *usr_config) +static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex, + __u32 queue_id) +{ + struct xsk_ctx *ctx; + + if (list_empty(&umem->ctx_list)) + return NULL; + + list_for_each_entry(ctx, &umem->ctx_list, list) { + if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) { + ctx->refcount++; + return ctx; + } + } + + return NULL; +} + +static void xsk_put_ctx(struct xsk_ctx *ctx) +{ + struct xsk_umem *umem = ctx->umem; + struct xdp_mmap_offsets off; + int err; + + if (--ctx->refcount == 0) { + err = xsk_get_mmap_offsets(umem->fd, &off); + if (!err) { + munmap(ctx->fill->ring - off.fr.desc, + off.fr.desc + umem->config.fill_size * + sizeof(__u64)); + munmap(ctx->comp->ring - off.cr.desc, + off.cr.desc + umem->config.comp_size * + sizeof(__u64)); + } + + list_del(&ctx->list); + free(ctx); + } +} + +static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk, + struct xsk_umem *umem, int ifindex, + const char *ifname, __u32 queue_id, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp) +{ + struct xsk_ctx *ctx; + int err; + + ctx = calloc(1, sizeof(*ctx)); + if (!ctx) + return NULL; + + if (!umem->fill_save) { + err = xsk_create_umem_rings(umem, xsk->fd, fill, comp); + if (err) { + free(ctx); + return NULL; + } + } else if (umem->fill_save != fill || umem->comp_save != comp) { + /* Copy over rings to new structs. */ + memcpy(fill, umem->fill_save, sizeof(*fill)); + memcpy(comp, umem->comp_save, sizeof(*comp)); + } + + ctx->ifindex = ifindex; + ctx->refcount = 1; + ctx->umem = umem; + ctx->queue_id = queue_id; + memcpy(ctx->ifname, ifname, IFNAMSIZ - 1); + ctx->ifname[IFNAMSIZ - 1] = '\0'; + + umem->fill_save = NULL; + umem->comp_save = NULL; + ctx->fill = fill; + ctx->comp = comp; + list_add(&ctx->list, &umem->ctx_list); + return ctx; +} + +int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, + const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_socket_config *usr_config) { void *rx_map = NULL, *tx_map = NULL; struct sockaddr_xdp sxdp = {}; struct xdp_mmap_offsets off; struct xsk_socket *xsk; - int err; + struct xsk_ctx *ctx; + int err, ifindex; - if (!umem || !xsk_ptr || !rx || !tx) + if (!umem || !xsk_ptr || !(rx || tx)) return -EFAULT; - if (umem->refcount) { - pr_warning("Error: shared umems not supported by libbpf.\n"); - return -EBUSY; - } - xsk = calloc(1, sizeof(*xsk)); if (!xsk) return -ENOMEM; + err = xsk_set_xdp_socket_config(&xsk->config, usr_config); + if (err) + goto out_xsk_alloc; + + xsk->outstanding_tx = 0; + ifindex = if_nametoindex(ifname); + if (!ifindex) { + err = -errno; + goto out_xsk_alloc; + } + if (umem->refcount++ > 0) { xsk->fd = socket(AF_XDP, SOCK_RAW, 0); if (xsk->fd < 0) { @@ -588,20 +733,21 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, xsk->fd = umem->fd; } - xsk->outstanding_tx = 0; - xsk->queue_id = queue_id; - xsk->umem = umem; - xsk->ifindex = if_nametoindex(ifname); - if (!xsk->ifindex) { - err = -errno; - goto out_socket; - } - memcpy(xsk->ifname, ifname, IFNAMSIZ - 1); - xsk->ifname[IFNAMSIZ - 1] = '\0'; + ctx = xsk_get_ctx(umem, ifindex, queue_id); + if (!ctx) { + if (!fill || !comp) { + err = -EFAULT; + goto out_socket; + } - err = xsk_set_xdp_socket_config(&xsk->config, usr_config); - if (err) - goto out_socket; + ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id, + fill, comp); + if (!ctx) { + err = -ENOMEM; + goto out_socket; + } + } + xsk->ctx = ctx; if (rx) { err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, @@ -609,7 +755,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, sizeof(xsk->config.rx_size)); if (err) { err = -errno; - goto out_socket; + goto out_put_ctx; } } if (tx) { @@ -618,14 +764,14 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, sizeof(xsk->config.tx_size)); if (err) { err = -errno; - goto out_socket; + goto out_put_ctx; } } err = xsk_get_mmap_offsets(xsk->fd, &off); if (err) { err = -errno; - goto out_socket; + goto out_put_ctx; } if (rx) { @@ -635,7 +781,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, xsk->fd, XDP_PGOFF_RX_RING); if (rx_map == MAP_FAILED) { err = -errno; - goto out_socket; + goto out_put_ctx; } rx->mask = xsk->config.rx_size - 1; @@ -644,6 +790,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, rx->consumer = rx_map + off.rx.consumer; rx->flags = rx_map + off.rx.flags; rx->ring = rx_map + off.rx.desc; + rx->cached_prod = *rx->producer; + rx->cached_cons = *rx->consumer; } xsk->rx = rx; @@ -663,14 +811,23 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, tx->consumer = tx_map + off.tx.consumer; tx->flags = tx_map + off.tx.flags; tx->ring = tx_map + off.tx.desc; - tx->cached_cons = xsk->config.tx_size; + tx->cached_prod = *tx->producer; + /* cached_cons is r->size bigger than the real consumer pointer + * See xsk_prod_nb_free + */ + tx->cached_cons = *tx->consumer + xsk->config.tx_size; } xsk->tx = tx; sxdp.sxdp_family = PF_XDP; - sxdp.sxdp_ifindex = xsk->ifindex; - sxdp.sxdp_queue_id = xsk->queue_id; - sxdp.sxdp_flags = xsk->config.bind_flags; + sxdp.sxdp_ifindex = ctx->ifindex; + sxdp.sxdp_queue_id = ctx->queue_id; + if (umem->refcount > 1) { + sxdp.sxdp_flags |= XDP_SHARED_UMEM; + sxdp.sxdp_shared_umem_fd = umem->fd; + } else { + sxdp.sxdp_flags = xsk->config.bind_flags; + } err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp)); if (err) { @@ -678,7 +835,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, goto out_mmap_tx; } - xsk->prog_fd = -1; + ctx->prog_fd = -1; if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { err = xsk_setup_xdp_prog(xsk); @@ -697,6 +854,8 @@ out_mmap_rx: if (rx) munmap(rx_map, off.rx.desc + xsk->config.rx_size * sizeof(struct xdp_desc)); +out_put_ctx: + xsk_put_ctx(ctx); out_socket: if (--umem->refcount) close(xsk->fd); @@ -705,25 +864,24 @@ out_xsk_alloc: return err; } +int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, + const struct xsk_socket_config *usr_config) +{ + return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem, + rx, tx, umem->fill_save, + umem->comp_save, usr_config); +} + int xsk_umem__delete(struct xsk_umem *umem) { - struct xdp_mmap_offsets off; - int err; - if (!umem) return 0; if (umem->refcount) return -EBUSY; - err = xsk_get_mmap_offsets(umem->fd, &off); - if (!err) { - munmap(umem->fill->ring - off.fr.desc, - off.fr.desc + umem->config.fill_size * sizeof(__u64)); - munmap(umem->comp->ring - off.cr.desc, - off.cr.desc + umem->config.comp_size * sizeof(__u64)); - } - close(umem->fd); free(umem); @@ -734,14 +892,18 @@ void xsk_socket__delete(struct xsk_socket *xsk) { size_t desc_sz = sizeof(struct xdp_desc); struct xdp_mmap_offsets off; + struct xsk_umem *umem; + struct xsk_ctx *ctx; int err; if (!xsk) return; - if (xsk->prog_fd != -1) { + ctx = xsk->ctx; + umem = ctx->umem; + if (ctx->prog_fd != -1) { xsk_delete_bpf_maps(xsk); - close(xsk->prog_fd); + close(ctx->prog_fd); } err = xsk_get_mmap_offsets(xsk->fd, &off); @@ -754,14 +916,15 @@ void xsk_socket__delete(struct xsk_socket *xsk) munmap(xsk->tx->ring - off.tx.desc, off.tx.desc + xsk->config.tx_size * desc_sz); } - } - xsk->umem->refcount--; + xsk_put_ctx(ctx); + + umem->refcount--; /* Do not close an fd that also has an associated umem connected * to it. */ - if (xsk->fd != xsk->umem->fd) + if (xsk->fd != umem->fd) close(xsk->fd); free(xsk); } diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h index 584f6820a639..1069c46364ff 100644 --- a/tools/lib/bpf/xsk.h +++ b/tools/lib/bpf/xsk.h @@ -234,6 +234,15 @@ LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *config); +LIBBPF_API int +xsk_socket__create_shared(struct xsk_socket **xsk_ptr, + const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_socket_config *config); /* Returns 0 for success and -EBUSY if the umem is still in use. */ LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem); diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h index 794a375dad36..b2aec04fce8f 100644 --- a/tools/lib/subcmd/subcmd-util.h +++ b/tools/lib/subcmd/subcmd-util.h @@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...) static inline void *xrealloc(void *ptr, size_t size) { void *ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) { - ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) - die("Out of memory, realloc failed"); - } + if (!ret) + die("Out of memory, realloc failed"); return ret; } diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index d948475585ce..4559a15e6657 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -1425,13 +1425,28 @@ static unsigned int type_size(const char *name) return 0; } +static int append(char **buf, const char *delim, const char *str) +{ + char *new_buf; + + new_buf = realloc(*buf, strlen(*buf) + strlen(delim) + strlen(str) + 1); + if (!new_buf) + return -1; + strcat(new_buf, delim); + strcat(new_buf, str); + *buf = new_buf; + return 0; +} + static int event_read_fields(struct tep_event *event, struct tep_format_field **fields) { struct tep_format_field *field = NULL; enum tep_event_type type; char *token; char *last_token; + char *delim = " "; int count = 0; + int ret; do { unsigned int size_dynamic = 0; @@ -1490,24 +1505,51 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** field->flags |= TEP_FIELD_IS_POINTER; if (field->type) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(last_token) + 2); - if (!new_type) { - free(last_token); - goto fail; - } - field->type = new_type; - strcat(field->type, " "); - strcat(field->type, last_token); + ret = append(&field->type, delim, last_token); free(last_token); + if (ret < 0) + goto fail; } else field->type = last_token; last_token = token; + delim = " "; continue; } + /* Handle __attribute__((user)) */ + if ((type == TEP_EVENT_DELIM) && + strcmp("__attribute__", last_token) == 0 && + token[0] == '(') { + int depth = 1; + int ret; + + ret = append(&field->type, " ", last_token); + ret |= append(&field->type, "", "("); + if (ret < 0) + goto fail; + + delim = " "; + while ((type = read_token(&token)) != TEP_EVENT_NONE) { + if (type == TEP_EVENT_DELIM) { + if (token[0] == '(') + depth++; + else if (token[0] == ')') + depth--; + if (!depth) + break; + ret = append(&field->type, "", token); + delim = ""; + } else { + ret = append(&field->type, delim, token); + delim = " "; + } + if (ret < 0) + goto fail; + free(last_token); + last_token = token; + } + continue; + } break; } @@ -1523,8 +1565,6 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** if (strcmp(token, "[") == 0) { enum tep_event_type last_type = type; char *brackets = token; - char *new_brackets; - int len; field->flags |= TEP_FIELD_IS_ARRAY; @@ -1536,29 +1576,27 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** field->arraylen = 0; while (strcmp(token, "]") != 0) { + const char *delim; + if (last_type == TEP_EVENT_ITEM && type == TEP_EVENT_ITEM) - len = 2; + delim = " "; else - len = 1; + delim = ""; + last_type = type; - new_brackets = realloc(brackets, - strlen(brackets) + - strlen(token) + len); - if (!new_brackets) { + ret = append(&brackets, delim, token); + if (ret < 0) { free(brackets); goto fail; } - brackets = new_brackets; - if (len == 2) - strcat(brackets, " "); - strcat(brackets, token); /* We only care about the last token */ field->arraylen = strtoul(token, NULL, 0); free_token(token); type = read_token(&token); if (type == TEP_EVENT_NONE) { + free(brackets); do_warning_event(event, "failed to find token"); goto fail; } @@ -1566,13 +1604,11 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** free_token(token); - new_brackets = realloc(brackets, strlen(brackets) + 2); - if (!new_brackets) { + ret = append(&brackets, "", "]"); + if (ret < 0) { free(brackets); goto fail; } - brackets = new_brackets; - strcat(brackets, "]"); /* add brackets to type */ @@ -1582,34 +1618,23 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** * the format: type [] item; */ if (type == TEP_EVENT_ITEM) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(field->name) + - strlen(brackets) + 2); - if (!new_type) { + ret = append(&field->type, " ", field->name); + if (ret < 0) { free(brackets); goto fail; } - field->type = new_type; - strcat(field->type, " "); - strcat(field->type, field->name); + ret = append(&field->type, "", brackets); + size_dynamic = type_size(field->name); free_token(field->name); - strcat(field->type, brackets); field->name = field->alias = token; type = read_token(&token); } else { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(brackets) + 1); - if (!new_type) { + ret = append(&field->type, "", brackets); + if (ret < 0) { free(brackets); goto fail; } - field->type = new_type; - strcat(field->type, brackets); } free(brackets); } @@ -2046,19 +2071,16 @@ process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok) /* could just be a type pointer */ if ((strcmp(arg->op.op, "*") == 0) && type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) { - char *new_atom; + int ret; if (left->type != TEP_PRINT_ATOM) { do_warning_event(event, "bad pointer type"); goto out_free; } - new_atom = realloc(left->atom.atom, - strlen(left->atom.atom) + 3); - if (!new_atom) + ret = append(&left->atom.atom, " ", "*"); + if (ret < 0) goto out_warn_free; - left->atom.atom = new_atom; - strcat(left->atom.atom, " *"); free(arg->op.op); *arg = *left; free(left); @@ -2839,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg, if (read_expected(TEP_EVENT_DELIM, ")") < 0) goto out_err; + free_token(token); type = read_token(&token); *tok = token; @@ -3151,18 +3174,15 @@ process_arg_token(struct tep_event *event, struct tep_print_arg *arg, } /* atoms can be more than one token long */ while (type == TEP_EVENT_ITEM) { - char *new_atom; - new_atom = realloc(atom, - strlen(atom) + strlen(token) + 2); - if (!new_atom) { + int ret; + + ret = append(&atom, " ", token); + if (ret < 0) { free(atom); *tok = NULL; free_token(token); return TEP_EVENT_ERROR; } - atom = new_atom; - strcat(atom, " "); - strcat(atom, token); free_token(token); type = read_token_item(&token); } diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile index f440989fa55e..23c3535bcbd6 100644 --- a/tools/lib/traceevent/plugins/Makefile +++ b/tools/lib/traceevent/plugins/Makefile @@ -196,7 +196,7 @@ define do_generate_dynamic_list_file xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\ if [ "$$symbol_type" = "U W" ];then \ (echo '{'; \ - $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\ + $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\ echo '};'; \ ) > $2; \ else \ diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index ee08aeff30a1..9ae4a10438ee 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -3,9 +3,6 @@ include ../scripts/Makefile.include include ../scripts/Makefile.arch # always use the host compiler -HOSTAR ?= ar -HOSTCC ?= gcc -HOSTLD ?= ld AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index f53d3c515cdc..06aaf04e629c 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -556,7 +556,7 @@ static int add_jump_destinations(struct objtool_file *file) insn->type != INSN_JUMP_UNCONDITIONAL) continue; - if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) + if (insn->offset == FAKE_JUMP_OFFSET) continue; rela = find_rela_by_dest_range(insn->sec, insn->offset, @@ -626,8 +626,8 @@ static int add_jump_destinations(struct objtool_file *file) * case where the parent function's only reference to a * subfunction is through a jump table. */ - if (!strstr(insn->func->name, ".cold.") && - strstr(insn->jump_dest->func->name, ".cold.")) { + if (!strstr(insn->func->name, ".cold") && + strstr(insn->jump_dest->func->name, ".cold")) { insn->func->cfunc = insn->jump_dest->func; insn->jump_dest->func->pfunc = insn->func; @@ -865,6 +865,12 @@ static int add_special_section_alts(struct objtool_file *file) } if (special_alt->group) { + if (!special_alt->orig_len) { + WARN_FUNC("empty alternative entry", + orig_insn->sec, orig_insn->offset); + continue; + } + ret = handle_group_alt(file, special_alt, orig_insn, &new_insn); if (ret) @@ -1010,10 +1016,7 @@ static struct rela *find_jump_table(struct objtool_file *file, * it. */ for (; - &insn->list != &file->insn_list && - insn->sec == func->sec && - insn->offset >= func->offset; - + &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func; insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) @@ -1405,7 +1408,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s struct cfi_reg *cfa = &state->cfa; struct stack_op *op = &insn->stack_op; - if (cfa->base != CFI_SP) + if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) return 0; /* push */ @@ -2189,15 +2192,19 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, break; case INSN_STD: - if (state.df) + if (state.df) { WARN_FUNC("recursive STD", sec, insn->offset); + return 1; + } state.df = true; break; case INSN_CLD: - if (!state.df && func) + if (!state.df && func) { WARN_FUNC("redundant CLD", sec, insn->offset); + return 1; + } state.df = false; break; @@ -2309,14 +2316,27 @@ static bool ignore_unreachable_insn(struct instruction *insn) !strcmp(insn->sec->name, ".altinstr_aux")) return true; + if (!insn->func) + return false; + + /* + * CONFIG_UBSAN_TRAP inserts a UD2 when it sees + * __builtin_unreachable(). The BUG() macro has an unreachable() after + * the UD2, which causes GCC's undefined trap logic to emit another UD2 + * (or occasionally a JMP to UD2). + */ + if (list_prev_entry(insn, list)->dead_end && + (insn->type == INSN_BUG || + (insn->type == INSN_JUMP_UNCONDITIONAL && + insn->jump_dest && insn->jump_dest->type == INSN_BUG))) + return true; + /* * Check if this (or a subsequent) instruction is related to * CONFIG_UBSAN or CONFIG_KASAN. * * End the search at 5 instructions to avoid going into the weeds. */ - if (!insn->func) - return false; for (i = 0; i < 5; i++) { if (is_kasan_insn(insn) || is_ubsan_insn(insn)) diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index edba4745f25a..693d740107a8 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -214,8 +214,11 @@ static int read_symbols(struct elf *elf) symtab = find_section_by_name(elf, ".symtab"); if (!symtab) { - WARN("missing symbol table"); - return -1; + /* + * A missing symbol table is actually possible if it's an empty + * .o file. This can happen for thunk_64.o. + */ + return 0; } symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize; diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 13ccf775a83a..ba4cbb1cdd63 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -66,7 +66,7 @@ int orc_dump(const char *_objname) char *name; size_t nr_sections; Elf64_Addr orc_ip_addr = 0; - size_t shstrtab_idx; + size_t shstrtab_idx, strtab_idx = 0; Elf *elf; Elf_Scn *scn; GElf_Shdr sh; @@ -127,6 +127,8 @@ int orc_dump(const char *_objname) if (!strcmp(name, ".symtab")) { symtab = data; + } else if (!strcmp(name, ".strtab")) { + strtab_idx = i; } else if (!strcmp(name, ".orc_unwind")) { orc = data->d_buf; orc_size = sh.sh_size; @@ -138,7 +140,7 @@ int orc_dump(const char *_objname) } } - if (!symtab || !orc || !orc_ip) + if (!symtab || !strtab_idx || !orc || !orc_ip) return 0; if (orc_size % sizeof(*orc) != 0) { @@ -159,21 +161,29 @@ int orc_dump(const char *_objname) return -1; } - scn = elf_getscn(elf, sym.st_shndx); - if (!scn) { - WARN_ELF("elf_getscn"); - return -1; - } + if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) { + scn = elf_getscn(elf, sym.st_shndx); + if (!scn) { + WARN_ELF("elf_getscn"); + return -1; + } - if (!gelf_getshdr(scn, &sh)) { - WARN_ELF("gelf_getshdr"); - return -1; - } + if (!gelf_getshdr(scn, &sh)) { + WARN_ELF("gelf_getshdr"); + return -1; + } - name = elf_strptr(elf, shstrtab_idx, sh.sh_name); - if (!name || !*name) { - WARN_ELF("elf_strptr"); - return -1; + name = elf_strptr(elf, shstrtab_idx, sh.sh_name); + if (!name) { + WARN_ELF("elf_strptr"); + return -1; + } + } else { + name = elf_strptr(elf, strtab_idx, sym.st_name); + if (!name) { + WARN_ELF("elf_strptr"); + return -1; + } } printf("%s+%llx:", name, (unsigned long long)rela.r_addend); diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index 27a4112848c2..e8c34b0a8b00 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -88,11 +88,6 @@ static int create_orc_entry(struct section *u_sec, struct section *ip_relasec, struct orc_entry *orc; struct rela *rela; - if (!insn_sec->sym) { - WARN("missing symbol for section %s", insn_sec->name); - return -1; - } - /* populate ORC data */ orc = (struct orc_entry *)u_sec->data->d_buf + idx; memcpy(orc, o, sizeof(*orc)); @@ -105,8 +100,32 @@ static int create_orc_entry(struct section *u_sec, struct section *ip_relasec, } memset(rela, 0, sizeof(*rela)); - rela->sym = insn_sec->sym; - rela->addend = insn_off; + if (insn_sec->sym) { + rela->sym = insn_sec->sym; + rela->addend = insn_off; + } else { + /* + * The Clang assembler doesn't produce section symbols, so we + * have to reference the function symbol instead: + */ + rela->sym = find_symbol_containing(insn_sec, insn_off); + if (!rela->sym) { + /* + * Hack alert. This happens when we need to reference + * the NOP pad insn immediately after the function. + */ + rela->sym = find_symbol_containing(insn_sec, + insn_off - 1); + } + if (!rela->sym) { + WARN("missing symbol for insn at offset 0x%lx\n", + insn_off); + return -1; + } + + rela->addend = insn_off - rela->sym->offset; + } + rela->type = R_X86_64_PC32; rela->offset = idx * sizeof(int); diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index bf1252dc2cb0..1d8a7301d995 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore @@ -2,7 +2,6 @@ PERF-CFLAGS PERF-GUI-VARS PERF-VERSION-FILE FEATURE-DUMP -perf perf-read-vdso32 perf-read-vdsox32 perf-help diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index c6f9f31b6039..15fd108afbe6 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -33,6 +33,10 @@ OPTIONS - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where 'param1', 'param2', etc are defined as formats for the PMU in /sys/bus/event_source/devices/<pmu>/format/*. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 930c51c01201..9abf1cf217e2 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -39,6 +39,10 @@ report:: - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed event like 'pmu/param1=0x3,param2/' where param1 and param2 are defined as formats for the PMU in /sys/bus/event_source/devices/<pmu>/format/* diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 46f7fba2306c..9832affd5d54 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -228,8 +228,17 @@ strip-libs = $(filter-out -l%,$(1)) PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) +# Python 3.8 changed the output of `python-config --ldflags` to not include the +# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for +# libpython fails if that flag is not included in LDFLAGS +ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0) + PYTHON_CONFIG_LDFLAGS := --ldflags --embed +else + PYTHON_CONFIG_LDFLAGS := --ldflags +endif + ifdef PYTHON_CONFIG - PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) + PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null) PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 902c792f326a..961f5e4fd656 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -163,10 +163,6 @@ endef LD += $(EXTRA_LDFLAGS) -HOSTCC ?= gcc -HOSTLD ?= ld -HOSTAR ?= ar - PKG_CONFIG = $(CROSS_COMPILE)pkg-config LLVM_CONFIG ?= llvm-config diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c index 0a6e75b8777a..28a5d0c18b1d 100644 --- a/tools/perf/arch/arm/util/auxtrace.c +++ b/tools/perf/arch/arm/util/auxtrace.c @@ -56,7 +56,7 @@ struct auxtrace_record struct perf_pmu *cs_etm_pmu; struct evsel *evsel; bool found_etm = false; - bool found_spe = false; + struct perf_pmu *found_spe = NULL; static struct perf_pmu **arm_spe_pmus = NULL; static int nr_spes = 0; int i = 0; @@ -74,12 +74,12 @@ struct auxtrace_record evsel->core.attr.type == cs_etm_pmu->type) found_etm = true; - if (!nr_spes) + if (!nr_spes || found_spe) continue; for (i = 0; i < nr_spes; i++) { if (evsel->core.attr.type == arm_spe_pmus[i]->type) { - found_spe = true; + found_spe = arm_spe_pmus[i]; break; } } @@ -96,7 +96,7 @@ struct auxtrace_record #if defined(__aarch64__) if (found_spe) - return arm_spe_recording_init(err, arm_spe_pmus[i]); + return arm_spe_recording_init(err, found_spe); #endif /* diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index d43f9dec6998..e768c02ef2ab 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -596,6 +596,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } evsel->core.attr.freq = 0; evsel->core.attr.sample_period = 1; + evsel->no_aux_samples = true; intel_pt_evsel = evsel; opts->full_auxtrace = true; } diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index fddb3ced9db6..4aa6de1aa67d 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -2,6 +2,10 @@ #ifndef BENCH_H #define BENCH_H +#include <sys/time.h> + +extern struct timeval bench__start, bench__end, bench__runtime; + /* * The madvise transparent hugepage constants were added in glibc * 2.13. For compatibility with older versions of glibc, define these diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c index bb617e568841..a7526c05df38 100644 --- a/tools/perf/bench/epoll-ctl.c +++ b/tools/perf/bench/epoll-ctl.c @@ -35,7 +35,6 @@ static unsigned int nthreads = 0; static unsigned int nsecs = 8; -struct timeval start, end, runtime; static bool done, __verbose, randomize; /* @@ -94,8 +93,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void nest_epollfd(void) @@ -361,7 +360,7 @@ int bench_epoll_ctl(int argc, const char **argv) threads_starting = nthreads; - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); do_threads(worker, cpu); diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c index 7af694437f4e..d1c5cb526b9f 100644 --- a/tools/perf/bench/epoll-wait.c +++ b/tools/perf/bench/epoll-wait.c @@ -90,7 +90,6 @@ static unsigned int nthreads = 0; static unsigned int nsecs = 8; -struct timeval start, end, runtime; static bool wdone, done, __verbose, randomize, nonblocking; /* @@ -276,8 +275,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void print_summary(void) @@ -287,7 +286,7 @@ static void print_summary(void) printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", avg, rel_stddev_stats(stddev, avg), - (int) runtime.tv_sec); + (int)bench__runtime.tv_sec); } static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) @@ -479,7 +478,7 @@ int bench_epoll_wait(int argc, const char **argv) threads_starting = nthreads; - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); do_threads(worker, cpu); @@ -519,7 +518,7 @@ int bench_epoll_wait(int argc, const char **argv) qsort(worker, nthreads, sizeof(struct worker), cmpworker); for (i = 0; i < nthreads; i++) { - unsigned long t = worker[i].ops/runtime.tv_sec; + unsigned long t = worker[i].ops / bench__runtime.tv_sec; update_stats(&throughput_stats, t); diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c index 8ba0c3330a9a..21776862e940 100644 --- a/tools/perf/bench/futex-hash.c +++ b/tools/perf/bench/futex-hash.c @@ -37,7 +37,7 @@ static unsigned int nfutexes = 1024; static bool fshared = false, done = false, silent = false; static int futex_flag = 0; -struct timeval start, end, runtime; +struct timeval bench__start, bench__end, bench__runtime; static pthread_mutex_t thread_lock; static unsigned int threads_starting; static struct stats throughput_stats; @@ -103,8 +103,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void print_summary(void) @@ -114,7 +114,7 @@ static void print_summary(void) printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), - (int) runtime.tv_sec); + (int)bench__runtime.tv_sec); } int bench_futex_hash(int argc, const char **argv) @@ -161,7 +161,7 @@ int bench_futex_hash(int argc, const char **argv) threads_starting = nthreads; pthread_attr_init(&thread_attr); - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); for (i = 0; i < nthreads; i++) { worker[i].tid = i; worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex)); @@ -204,7 +204,7 @@ int bench_futex_hash(int argc, const char **argv) pthread_mutex_destroy(&thread_lock); for (i = 0; i < nthreads; i++) { - unsigned long t = worker[i].ops/runtime.tv_sec; + unsigned long t = worker[i].ops / bench__runtime.tv_sec; update_stats(&throughput_stats, t); if (!silent) { if (nfutexes == 1) diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c index d0cae8125423..30d97121dc4f 100644 --- a/tools/perf/bench/futex-lock-pi.c +++ b/tools/perf/bench/futex-lock-pi.c @@ -37,7 +37,6 @@ static bool silent = false, multi = false; static bool done = false, fshared = false; static unsigned int nthreads = 0; static int futex_flag = 0; -struct timeval start, end, runtime; static pthread_mutex_t thread_lock; static unsigned int threads_starting; static struct stats throughput_stats; @@ -64,7 +63,7 @@ static void print_summary(void) printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), - (int) runtime.tv_sec); + (int)bench__runtime.tv_sec); } static void toggle_done(int sig __maybe_unused, @@ -73,8 +72,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void *workerfn(void *arg) @@ -185,7 +184,7 @@ int bench_futex_lock_pi(int argc, const char **argv) threads_starting = nthreads; pthread_attr_init(&thread_attr); - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); create_threads(worker, thread_attr, cpu); pthread_attr_destroy(&thread_attr); @@ -211,7 +210,7 @@ int bench_futex_lock_pi(int argc, const char **argv) pthread_mutex_destroy(&thread_lock); for (i = 0; i < nthreads; i++) { - unsigned long t = worker[i].ops/runtime.tv_sec; + unsigned long t = worker[i].ops / bench__runtime.tv_sec; update_stats(&throughput_stats, t); if (!silent) diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c index 9235b76501be..19d45c377ac1 100644 --- a/tools/perf/bench/mem-functions.c +++ b/tools/perf/bench/mem-functions.c @@ -223,12 +223,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info * return 0; } -static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst) { - u64 cycle_start = 0ULL, cycle_end = 0ULL; - memcpy_t fn = r->fn.memcpy; - int i; - /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */ memset(src, 0, size); @@ -237,6 +233,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo * to not measure page fault overhead: */ fn(dst, src, size); +} + +static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +{ + u64 cycle_start = 0ULL, cycle_end = 0ULL; + memcpy_t fn = r->fn.memcpy; + int i; + + memcpy_prefault(fn, size, src, dst); cycle_start = get_cycles(); for (i = 0; i < nr_loops; ++i) @@ -252,11 +257,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void memcpy_t fn = r->fn.memcpy; int i; - /* - * We prefault the freshly allocated memory range here, - * to not measure page fault overhead: - */ - fn(dst, src, size); + memcpy_prefault(fn, size, src, dst); BUG_ON(gettimeofday(&tv_start, NULL)); for (i = 0; i < nr_loops; ++i) diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index d5adc417a4ca..40b179f54428 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -161,7 +161,7 @@ static int set_tracing_pid(struct perf_ftrace *ftrace) for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) { scnprintf(buf, sizeof(buf), "%d", - ftrace->evlist->core.threads->map[i]); + perf_thread_map__pid(ftrace->evlist->core.threads, i)); if (append_tracing_file("set_ftrace_pid", buf) < 0) return -1; } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 372ecb3e2c06..0d524ef3606d 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -835,7 +835,7 @@ int cmd_inject(int argc, const char **argv) inject.tool.ordered_events = inject.sched_stat; data.path = inject.input_name; - inject.session = perf_session__new(&data, true, &inject.tool); + inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool); if (IS_ERR(inject.session)) return PTR_ERR(inject.session); diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 474dfd59d7eb..d06665077dfe 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -621,7 +621,7 @@ static int report_lock_release_event(struct evsel *evsel, case SEQ_STATE_READ_ACQUIRED: seq->read_count--; BUG_ON(seq->read_count < 0); - if (!seq->read_count) { + if (seq->read_count) { ls->nr_release++; goto end; } diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 26bc5923e6b5..2f05f59e9758 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -364,6 +364,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs) for (k = 0; k < pev->ntevs; k++) { struct probe_trace_event *tev = &pev->tevs[k]; + /* Skipped events have no event name */ + if (!tev->event) + continue; /* We use tev's name for showing new events */ show_perf_probe_event(tev->group, tev->event, pev, diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 23332861de6e..454e275cd5df 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -2137,7 +2137,7 @@ static struct option __record_options[] = { OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize, "synthesize non-sample events at the end of output"), OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"), - OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"), + OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"), OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq, "Fail if the specified frequency can't be used"), OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'", diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 6407dff405d9..d3c0b04e2e22 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -181,24 +181,23 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter, { struct hist_entry *he = iter->he; struct report *rep = arg; - struct branch_info *bi; + struct branch_info *bi = he->branch_info; struct perf_sample *sample = iter->sample; struct evsel *evsel = iter->evsel; int err; + branch_type_count(&rep->brtype_stat, &bi->flags, + bi->from.addr, bi->to.addr); + if (!ui__has_annotation() && !rep->symbol_ipc) return 0; - bi = he->branch_info; err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); if (err) goto out; err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); - branch_type_count(&rep->brtype_stat, &bi->flags, - bi->from.addr, bi->to.addr); - out: return err; } @@ -463,8 +462,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report if (rep->time_str) ret += fprintf(fp, " (time slices: %s)", rep->time_str); - if (symbol_conf.show_ref_callgraph && - strstr(evname, "call-graph=no")) { + if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { ret += fprintf(fp, ", show reference callgraph"); } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 468fc49420ce..ea183922c4ef 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -351,7 +351,7 @@ static void process_interval(void) } init_stats(&walltime_nsecs_stats); - update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000); + update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); print_counters(&rs, 0, NULL); } @@ -1671,8 +1671,10 @@ static void setup_system_wide(int forks) struct evsel *counter; evlist__for_each_entry(evsel_list, counter) { - if (!counter->core.system_wide) + if (!counter->core.system_wide && + strcmp(counter->name, "duration_time")) { return; + } } if (evsel_list->core.nr_entries) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1f60124eb19b..a30d62186f5e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -683,7 +683,9 @@ repeat: delay_msecs = top->delay_secs * MSEC_PER_SEC; set_term_quiet_input(&save); /* trash return*/ - getc(stdin); + clearerr(stdin); + if (poll(&stdin_poll, 1, 0) > 0) + getc(stdin); while (!done) { perf_top__print_sym_table(top); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index bb5130d02155..a5201de1a191 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -3979,9 +3979,9 @@ do_concat: err = 0; if (lists[0]) { - struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event", - "event selector. use 'perf list' to list available events", - parse_events_option); + struct option o = { + .value = &trace->evlist, + }; err = parse_events_option(&o, lists[0], 0); } out: @@ -3995,9 +3995,12 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u { struct trace *trace = opt->value; - if (!list_empty(&trace->evlist->core.entries)) - return parse_cgroups(opt, str, unset); - + if (!list_empty(&trace->evlist->core.entries)) { + struct option o = { + .value = &trace->evlist, + }; + return parse_cgroups(&o, str, unset); + } trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); return 0; diff --git a/tools/perf/lib/include/perf/a b/tools/perf/lib/include/perf/a new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/perf/lib/include/perf/core.h b/tools/perf/lib/include/perf/core.h new file mode 100644 index 000000000000..cfd70e720c1c --- /dev/null +++ b/tools/perf/lib/include/perf/core.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBPERF_CORE_H +#define __LIBPERF_CORE_H + +#include <stdarg.h> + +#ifndef LIBPERF_API +#define LIBPERF_API __attribute__((visibility("default"))) +#endif + +enum libperf_print_level { + LIBPERF_WARN, + LIBPERF_INFO, + LIBPERF_DEBUG, +}; + +typedef int (*libperf_print_fn_t)(enum libperf_print_level level, + const char *, va_list ap); + +LIBPERF_API void libperf_init(libperf_print_fn_t fn); + +#endif /* __LIBPERF_CORE_H */ diff --git a/tools/perf/lib/include/perf/cpumap.h b/tools/perf/lib/include/perf/cpumap.h new file mode 100644 index 000000000000..ac9aa497f84a --- /dev/null +++ b/tools/perf/lib/include/perf/cpumap.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBPERF_CPUMAP_H +#define __LIBPERF_CPUMAP_H + +#include <perf/core.h> +#include <stdio.h> +#include <stdbool.h> + +struct perf_cpu_map; + +LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void); +LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); +LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file); +LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); +LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); +LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); +LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); +LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); +LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map); + +#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ + for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ + (idx) < perf_cpu_map__nr(cpus); \ + (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx)) + +#endif /* __LIBPERF_CPUMAP_H */ diff --git a/tools/perf/lib/include/perf/event.h b/tools/perf/lib/include/perf/event.h new file mode 100644 index 000000000000..a7944ddc6f90 --- /dev/null +++ b/tools/perf/lib/include/perf/event.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBPERF_EVENT_H +#define __LIBPERF_EVENT_H + +#include <linux/perf_event.h> +#include <linux/types.h> +#include <linux/limits.h> +#include <linux/bpf.h> +#include <sys/types.h> /* pid_t */ + + +struct perf_record_mmap { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + char filename[PATH_MAX]; +}; + +struct perf_record_mmap2 { + struct perf_event_header header; + __u32 pid, tid; + __u64 start; + __u64 len; + __u64 pgoff; + __u32 maj; + __u32 min; + __u64 ino; + __u64 ino_generation; + __u32 prot; + __u32 flags; + char filename[PATH_MAX]; +}; + +struct perf_record_comm { + struct perf_event_header header; + __u32 pid, tid; + char comm[16]; +}; + +struct perf_record_namespaces { + struct perf_event_header header; + __u32 pid, tid; + __u64 nr_namespaces; + struct perf_ns_link_info link_info[]; +}; + +struct perf_record_fork { + struct perf_event_header header; + __u32 pid, ppid; + __u32 tid, ptid; + __u64 time; +}; + +struct perf_record_lost { + struct perf_event_header header; + __u64 id; + __u64 lost; +}; + +struct perf_record_lost_samples { + struct perf_event_header header; + __u64 lost; +}; + +/* + * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID + */ +struct perf_record_read { + struct perf_event_header header; + __u32 pid, tid; + __u64 value; + __u64 time_enabled; + __u64 time_running; + __u64 id; +}; + +struct perf_record_throttle { + struct perf_event_header header; + __u64 time; + __u64 id; + __u64 stream_id; +}; + +#ifndef KSYM_NAME_LEN +#define KSYM_NAME_LEN 256 +#endif + +struct perf_record_ksymbol { + struct perf_event_header header; + __u64 addr; + __u32 len; + __u16 ksym_type; + __u16 flags; + char name[KSYM_NAME_LEN]; +}; + +struct perf_record_bpf_event { + struct perf_event_header header; + __u16 type; + __u16 flags; + __u32 id; + + /* for bpf_prog types */ + __u8 tag[BPF_TAG_SIZE]; // prog tag +}; + +struct perf_record_sample { + struct perf_event_header header; + __u64 array[]; +}; + +struct perf_record_switch { + struct perf_event_header header; + __u32 next_prev_pid; + __u32 next_prev_tid; +}; + +struct perf_record_header_attr { + struct perf_event_header header; + struct perf_event_attr attr; + __u64 id[]; +}; + +enum { + PERF_CPU_MAP__CPUS = 0, + PERF_CPU_MAP__MASK = 1, +}; + +struct cpu_map_entries { + __u16 nr; + __u16 cpu[]; +}; + +struct perf_record_record_cpu_map { + __u16 nr; + __u16 long_size; + unsigned long mask[]; +}; + +struct perf_record_cpu_map_data { + __u16 type; + char data[]; +}; + +struct perf_record_cpu_map { + struct perf_event_header header; + struct perf_record_cpu_map_data data; +}; + +enum { + PERF_EVENT_UPDATE__UNIT = 0, + PERF_EVENT_UPDATE__SCALE = 1, + PERF_EVENT_UPDATE__NAME = 2, + PERF_EVENT_UPDATE__CPUS = 3, +}; + +struct perf_record_event_update_cpus { + struct perf_record_cpu_map_data cpus; +}; + +struct perf_record_event_update_scale { + double scale; +}; + +struct perf_record_event_update { + struct perf_event_header header; + __u64 type; + __u64 id; + char data[]; +}; + +#define MAX_EVENT_NAME 64 + +struct perf_trace_event_type { + __u64 event_id; + char name[MAX_EVENT_NAME]; +}; + +struct perf_record_header_event_type { + struct perf_event_header header; + struct perf_trace_event_type event_type; +}; + +struct perf_record_header_tracing_data { + struct perf_event_header header; + __u32 size; +}; + +struct perf_record_header_build_id { + struct perf_event_header header; + pid_t pid; + __u8 build_id[24]; + char filename[]; +}; + +struct id_index_entry { + __u64 id; + __u64 idx; + __u64 cpu; + __u64 tid; +}; + +struct perf_record_id_index { + struct perf_event_header header; + __u64 nr; + struct id_index_entry entries[0]; +}; + +struct perf_record_auxtrace_info { + struct perf_event_header header; + __u32 type; + __u32 reserved__; /* For alignment */ + __u64 priv[]; +}; + +struct perf_record_auxtrace { + struct perf_event_header header; + __u64 size; + __u64 offset; + __u64 reference; + __u32 idx; + __u32 tid; + __u32 cpu; + __u32 reserved__; /* For alignment */ +}; + +#define MAX_AUXTRACE_ERROR_MSG 64 + +struct perf_record_auxtrace_error { + struct perf_event_header header; + __u32 type; + __u32 code; + __u32 cpu; + __u32 pid; + __u32 tid; + __u32 fmt; + __u64 ip; + __u64 time; + char msg[MAX_AUXTRACE_ERROR_MSG]; +}; + +struct perf_record_aux { + struct perf_event_header header; + __u64 aux_offset; + __u64 aux_size; + __u64 flags; +}; + +struct perf_record_itrace_start { + struct perf_event_header header; + __u32 pid; + __u32 tid; +}; + +struct perf_record_thread_map_entry { + __u64 pid; + char comm[16]; +}; + +struct perf_record_thread_map { + struct perf_event_header header; + __u64 nr; + struct perf_record_thread_map_entry entries[]; +}; + +enum { + PERF_STAT_CONFIG_TERM__AGGR_MODE = 0, + PERF_STAT_CONFIG_TERM__INTERVAL = 1, + PERF_STAT_CONFIG_TERM__SCALE = 2, + PERF_STAT_CONFIG_TERM__MAX = 3, +}; + +struct perf_record_stat_config_entry { + __u64 tag; + __u64 val; +}; + +struct perf_record_stat_config { + struct perf_event_header header; + __u64 nr; + struct perf_record_stat_config_entry data[]; +}; + +struct perf_record_stat { + struct perf_event_header header; + + __u64 id; + __u32 cpu; + __u32 thread; + + union { + struct { + __u64 val; + __u64 ena; + __u64 run; + }; + __u64 values[3]; + }; +}; + +struct perf_record_stat_round { + struct perf_event_header header; + __u64 type; + __u64 time; +}; + +struct perf_record_time_conv { + struct perf_event_header header; + __u64 time_shift; + __u64 time_mult; + __u64 time_zero; +}; + +struct perf_record_header_feature { + struct perf_event_header header; + __u64 feat_id; + char data[]; +}; + +struct perf_record_compressed { + struct perf_event_header header; + char data[]; +}; + +enum perf_user_event_type { /* above any possible kernel type */ + PERF_RECORD_USER_TYPE_START = 64, + PERF_RECORD_HEADER_ATTR = 64, + PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */ + PERF_RECORD_HEADER_TRACING_DATA = 66, + PERF_RECORD_HEADER_BUILD_ID = 67, + PERF_RECORD_FINISHED_ROUND = 68, + PERF_RECORD_ID_INDEX = 69, + PERF_RECORD_AUXTRACE_INFO = 70, + PERF_RECORD_AUXTRACE = 71, + PERF_RECORD_AUXTRACE_ERROR = 72, + PERF_RECORD_THREAD_MAP = 73, + PERF_RECORD_CPU_MAP = 74, + PERF_RECORD_STAT_CONFIG = 75, + PERF_RECORD_STAT = 76, + PERF_RECORD_STAT_ROUND = 77, + PERF_RECORD_EVENT_UPDATE = 78, + PERF_RECORD_TIME_CONV = 79, + PERF_RECORD_HEADER_FEATURE = 80, + PERF_RECORD_COMPRESSED = 81, + PERF_RECORD_HEADER_MAX +}; + +union perf_event { + struct perf_event_header header; + struct perf_record_mmap mmap; + struct perf_record_mmap2 mmap2; + struct perf_record_comm comm; + struct perf_record_namespaces namespaces; + struct perf_record_fork fork; + struct perf_record_lost lost; + struct perf_record_lost_samples lost_samples; + struct perf_record_read read; + struct perf_record_throttle throttle; + struct perf_record_sample sample; + struct perf_record_bpf_event bpf; + struct perf_record_ksymbol ksymbol; + struct perf_record_header_attr attr; + struct perf_record_event_update event_update; + struct perf_record_header_event_type event_type; + struct perf_record_header_tracing_data tracing_data; + struct perf_record_header_build_id build_id; + struct perf_record_id_index id_index; + struct perf_record_auxtrace_info auxtrace_info; + struct perf_record_auxtrace auxtrace; + struct perf_record_auxtrace_error auxtrace_error; + struct perf_record_aux aux; + struct perf_record_itrace_start itrace_start; + struct perf_record_switch context_switch; + struct perf_record_thread_map thread_map; + struct perf_record_cpu_map cpu_map; + struct perf_record_stat_config stat_config; + struct perf_record_stat stat; + struct perf_record_stat_round stat_round; + struct perf_record_time_conv time_conv; + struct perf_record_header_feature feat; + struct perf_record_compressed pack; +}; + +#endif /* __LIBPERF_EVENT_H */ diff --git a/tools/perf/lib/include/perf/evlist.h b/tools/perf/lib/include/perf/evlist.h new file mode 100644 index 000000000000..8a2ce0757ab2 --- /dev/null +++ b/tools/perf/lib/include/perf/evlist.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBPERF_EVLIST_H +#define __LIBPERF_EVLIST_H + +#include <perf/core.h> + +struct perf_evlist; +struct perf_evsel; +struct perf_cpu_map; +struct perf_thread_map; + +LIBPERF_API void perf_evlist__init(struct perf_evlist *evlist); +LIBPERF_API void perf_evlist__add(struct perf_evlist *evlist, + struct perf_evsel *evsel); +LIBPERF_API void perf_evlist__remove(struct perf_evlist *evlist, + struct perf_evsel *evsel); +LIBPERF_API struct perf_evlist *perf_evlist__new(void); +LIBPERF_API void perf_evlist__delete(struct perf_evlist *evlist); +LIBPERF_API struct perf_evsel* perf_evlist__next(struct perf_evlist *evlist, + struct perf_evsel *evsel); +LIBPERF_API int perf_evlist__open(struct perf_evlist *evlist); +LIBPERF_API void perf_evlist__close(struct perf_evlist *evlist); +LIBPERF_API void perf_evlist__enable(struct perf_evlist *evlist); +LIBPERF_API void perf_evlist__disable(struct perf_evlist *evlist); + +#define perf_evlist__for_each_evsel(evlist, pos) \ + for ((pos) = perf_evlist__next((evlist), NULL); \ + (pos) != NULL; \ + (pos) = perf_evlist__next((evlist), (pos))) + +LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist, + struct perf_cpu_map *cpus, + struct perf_thread_map *threads); +LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout); + +#endif /* __LIBPERF_EVLIST_H */ diff --git a/tools/perf/lib/include/perf/evsel.h b/tools/perf/lib/include/perf/evsel.h new file mode 100644 index 000000000000..4388667f265c --- /dev/null +++ b/tools/perf/lib/include/perf/evsel.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBPERF_EVSEL_H +#define __LIBPERF_EVSEL_H + +#include <stdint.h> +#include <perf/core.h> + +struct perf_evsel; +struct perf_event_attr; +struct perf_cpu_map; +struct perf_thread_map; + +struct perf_counts_values { + union { + struct { + uint64_t val; + uint64_t ena; + uint64_t run; + }; + uint64_t values[3]; + }; +}; + +LIBPERF_API void perf_evsel__init(struct perf_evsel *evsel, + struct perf_event_attr *attr); +LIBPERF_API struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr); +LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel); +LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads); +LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel); +LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, + struct perf_counts_values *count); +LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel); +LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel); +LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); +LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); +LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel); + +#endif /* __LIBPERF_EVSEL_H */ diff --git a/tools/perf/lib/include/perf/threadmap.h b/tools/perf/lib/include/perf/threadmap.h new file mode 100644 index 000000000000..a7c50de8d010 --- /dev/null +++ b/tools/perf/lib/include/perf/threadmap.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LIBPERF_THREADMAP_H +#define __LIBPERF_THREADMAP_H + +#include <perf/core.h> +#include <sys/types.h> + +struct perf_thread_map; + +LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void); + +LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid); +LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread); +LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads); +LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread); + +LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map); +LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map); + +#endif /* __LIBPERF_THREADMAP_H */ diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json index df9201434cb6..b0a10a219b50 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json @@ -114,7 +114,7 @@ "PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event counts on any instruciton access which causes L2I_TLB_REFILL to count", "EventCode": "0x35", "EventName": "L2I_TLB_ACCESS", - "BriefDescription": "L2D TLB access" + "BriefDescription": "L2I TLB access" }, { "PublicDescription": "Branch target buffer misprediction", diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 99e3fd04a5cb..f4a0d72246cb 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -137,7 +137,7 @@ static char *fixregex(char *s) return s; /* allocate space for a new string */ - fixed = (char *) malloc(len + 1); + fixed = (char *) malloc(len + esc_count + 1); if (!fixed) return NULL; @@ -1068,10 +1068,9 @@ static int process_one_file(const char *fpath, const struct stat *sb, */ int main(int argc, char *argv[]) { - int rc; + int rc, ret = 0; int maxfds; char ldirname[PATH_MAX]; - const char *arch; const char *output_file; const char *start_dirname; @@ -1142,7 +1141,8 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; + goto out_free_mapfile; } else if (rc) { goto empty_map; } @@ -1160,14 +1160,17 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; } - return 0; + + goto out_free_mapfile; empty_map: fclose(eventsfp); create_empty_mapping(output_file); free_arch_std_events(); - return 0; +out_free_mapfile: + free(mapfile); + return ret; } diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 7bd73a904b4e..d187e46c2683 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -1055,7 +1055,7 @@ def cbr(id, raw_buf): cbr = data[0] MHz = (data[4] + 500) / 1000 percent = ((cbr * 1000 / data[2]) + 5) / 10 - value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent) + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent)) cbr_file.write(value) def mwait(id, raw_buf): diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index 4b28c9d08d5a..04217e8f535a 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -756,7 +756,8 @@ class CallGraphModel(CallGraphModelBase): " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" - " WHERE symbols.name" + match + + " WHERE calls.id <> 0" + " AND symbols.name" + match + " GROUP BY comm_id, thread_id, call_path_id" " ORDER BY comm_id, thread_id, call_path_id") @@ -950,7 +951,8 @@ class CallTreeModel(CallGraphModelBase): " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" - " WHERE symbols.name" + match + + " WHERE calls.id <> 0" + " AND symbols.name" + match + " ORDER BY comm_id, thread_id, call_time, calls.id") def FindPath(self, query): @@ -1016,6 +1018,7 @@ class TreeWindowBase(QMdiSubWindow): child = self.model.index(row, 0, parent) if child.internalPointer().dbid == dbid: found = True + self.view.setExpanded(parent, True) self.view.setCurrentIndex(child) parent = child break diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c index 016bba2c142d..55a9de311d7b 100644 --- a/tools/perf/tests/bp_account.c +++ b/tools/perf/tests/bp_account.c @@ -23,7 +23,7 @@ #include "../perf-sys.h" #include "cloexec.h" -volatile long the_var; +static volatile long the_var; static noinline int test_function(void) { diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c index 166f411568a5..b5cdedd13cbc 100644 --- a/tools/perf/tests/bp_signal.c +++ b/tools/perf/tests/bp_signal.c @@ -45,10 +45,13 @@ volatile long the_var; #if defined (__x86_64__) extern void __test_function(volatile long *ptr); asm ( + ".pushsection .text;" ".globl __test_function\n" + ".type __test_function, @function;" "__test_function:\n" "incq (%rdi)\n" - "ret\n"); + "ret\n" + ".popsection\n"); #else static void __test_function(volatile long *ptr) { diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 74379ff1f7fa..46cd1db85bd0 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -173,6 +173,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused) ret = 0; } while (0); + perf_pmu__del_formats(&formats); test_format_dir_put(format); return ret; } diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c index 3a02426db9a6..2f76d4a9de86 100644 --- a/tools/perf/tests/sample-parsing.c +++ b/tools/perf/tests/sample-parsing.c @@ -180,7 +180,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format) .data = {1, 211, 212, 213}, }; u64 regs[64]; - const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL}; + const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 }; const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL}; struct perf_sample sample = { .ip = 101, diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 7cb99b433888..c2cc42daf924 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -14,7 +14,7 @@ add_probe_vfs_getname() { if [ $had_vfs_getname -eq 1 ] ; then line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" fi } diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh index 63a91ec473bb..045723b3d992 100755 --- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh +++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh @@ -12,7 +12,8 @@ skip_if_no_z_record() { collect_z_record() { echo "Collecting compressed record file:" - $perf_tool record -o $trace_file -g -z -F 5000 -- \ + [[ "$(uname -m)" != s390x ]] && gflag='-g' + $perf_tool record -o $trace_file $gflag -z -F 5000 -- \ dd count=500 if=/dev/urandom of=/dev/null } diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 4a800499d7c3..22daf2bdf5fa 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -33,10 +33,8 @@ static int session_write_header(char *path) { struct perf_session *session; struct perf_data data = { - .file = { - .path = path, - }, - .mode = PERF_DATA_MODE_WRITE, + .path = path, + .mode = PERF_DATA_MODE_WRITE, }; session = perf_session__new(&data, false, NULL); @@ -63,10 +61,8 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) { struct perf_session *session; struct perf_data data = { - .file = { - .path = path, - }, - .mode = PERF_DATA_MODE_READ, + .path = path, + .mode = PERF_DATA_MODE_READ, }; int i; diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index 22c9fc900c84..f8c44a85650b 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -91,7 +91,7 @@ EoHEADER # in tools/perf/arch archlist="" for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do - test -d arch/$arch && archlist="$archlist $arch" + test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch" done for arch in x86 $archlist generic; do diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh index 83fb24df05c9..bc6ef7bb7a5f 100755 --- a/tools/perf/trace/beauty/fsconfig.sh +++ b/tools/perf/trace/beauty/fsconfig.sh @@ -10,8 +10,7 @@ fi linux_mount=${linux_header_dir}/mount.h printf "static const char *fsconfig_cmds[] = {\n" -regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*' -egrep $regex ${linux_mount} | \ - sed -r "s/$regex/\2 \1/g" | \ - xargs printf "\t[%s] = \"%s\",\n" +ms='[[:space:]]*' +sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \ + ${linux_mount} printf "};\n" diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 88c3df24b748..514cef3a17b4 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2224,6 +2224,11 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser return browser->he_selection->thread; } +static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser) +{ + return browser->he_selection ? browser->he_selection->res_samples : NULL; +} + /* Check whether the browser is for 'top' or 'report' */ static inline bool is_report_browser(void *timer) { @@ -3170,16 +3175,16 @@ skip_annotation: &options[nr_options], NULL, NULL, evsel); nr_options += add_res_sample_opt(browser, &actions[nr_options], &options[nr_options], - hist_browser__selected_entry(browser)->res_samples, - evsel, A_NORMAL); + hist_browser__selected_res_sample(browser), + evsel, A_NORMAL); nr_options += add_res_sample_opt(browser, &actions[nr_options], &options[nr_options], - hist_browser__selected_entry(browser)->res_samples, - evsel, A_ASM); + hist_browser__selected_res_sample(browser), + evsel, A_ASM); nr_options += add_res_sample_opt(browser, &actions[nr_options], &options[nr_options], - hist_browser__selected_entry(browser)->res_samples, - evsel, A_SOURCE); + hist_browser__selected_res_sample(browser), + evsel, A_SOURCE); nr_options += add_switch_opt(browser, &actions[nr_options], &options[nr_options]); skip_scripting: diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 8470dfe9fe97..ae5b97427192 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -252,10 +252,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, queue->set = true; queue->tid = buffer->tid; queue->cpu = buffer->cpu; - } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) { - pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n", - queue->cpu, queue->tid, buffer->cpu, buffer->tid); - return -EINVAL; } buffer->buffer_nr = queues->next_buffer_nr++; @@ -590,7 +586,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, break; } - if (itr) + if (itr && itr->parse_snapshot_options) return itr->parse_snapshot_options(itr, opts, str); pr_err("No AUX area tracing to snapshot\n"); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index a22c1114e880..324ec0456c83 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -299,7 +299,7 @@ static void set_max_cpu_num(void) /* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -310,7 +310,7 @@ static void set_max_cpu_num(void) /* get the highest present cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -338,7 +338,7 @@ static void set_max_node_num(void) /* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -423,7 +423,7 @@ int cpu__setup_cpunode_map(void) return 0; n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); return -1; } @@ -438,7 +438,7 @@ int cpu__setup_cpunode_map(void) continue; n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); continue; } diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index f5f855fff412..f5a9cb408808 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -94,6 +94,9 @@ struct cs_etm_queue { struct cs_etm_traceid_queue **traceid_queues; }; +/* RB tree for quick conversion between traceID and metadata pointers */ +static struct intlist *traceid_list; + static int cs_etm__update_queues(struct cs_etm_auxtrace *etm); static int cs_etm__process_queues(struct cs_etm_auxtrace *etm); static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, @@ -363,6 +366,23 @@ struct cs_etm_packet_queue return NULL; } +static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm, + struct cs_etm_traceid_queue *tidq) +{ + struct cs_etm_packet *tmp; + + if (etm->sample_branches || etm->synth_opts.last_branch || + etm->sample_instructions) { + /* + * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for + * the next incoming packet. + */ + tmp = tidq->packet; + tidq->packet = tidq->prev_packet; + tidq->prev_packet = tmp; + } +} + static void cs_etm__packet_dump(const char *pkt_string) { const char *color = PERF_COLOR_BLUE; @@ -1340,12 +1360,14 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, struct cs_etm_traceid_queue *tidq) { struct cs_etm_auxtrace *etm = etmq->etm; - struct cs_etm_packet *tmp; int ret; u8 trace_chan_id = tidq->trace_chan_id; - u64 instrs_executed = tidq->packet->instr_count; + u64 instrs_prev; - tidq->period_instructions += instrs_executed; + /* Get instructions remainder from previous packet */ + instrs_prev = tidq->period_instructions; + + tidq->period_instructions += tidq->packet->instr_count; /* * Record a branch when the last instruction in @@ -1363,26 +1385,76 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, * TODO: allow period to be defined in cycles and clock time */ - /* Get number of instructions executed after the sample point */ - u64 instrs_over = tidq->period_instructions - - etm->instructions_sample_period; + /* + * Below diagram demonstrates the instruction samples + * generation flows: + * + * Instrs Instrs Instrs Instrs + * Sample(n) Sample(n+1) Sample(n+2) Sample(n+3) + * | | | | + * V V V V + * -------------------------------------------------- + * ^ ^ + * | | + * Period Period + * instructions(Pi) instructions(Pi') + * + * | | + * \---------------- -----------------/ + * V + * tidq->packet->instr_count + * + * Instrs Sample(n...) are the synthesised samples occurring + * every etm->instructions_sample_period instructions - as + * defined on the perf command line. Sample(n) is being the + * last sample before the current etm packet, n+1 to n+3 + * samples are generated from the current etm packet. + * + * tidq->packet->instr_count represents the number of + * instructions in the current etm packet. + * + * Period instructions (Pi) contains the the number of + * instructions executed after the sample point(n) from the + * previous etm packet. This will always be less than + * etm->instructions_sample_period. + * + * When generate new samples, it combines with two parts + * instructions, one is the tail of the old packet and another + * is the head of the new coming packet, to generate + * sample(n+1); sample(n+2) and sample(n+3) consume the + * instructions with sample period. After sample(n+3), the rest + * instructions will be used by later packet and it is assigned + * to tidq->period_instructions for next round calculation. + */ /* - * Calculate the address of the sampled instruction (-1 as - * sample is reported as though instruction has just been - * executed, but PC has not advanced to next instruction) + * Get the initial offset into the current packet instructions; + * entry conditions ensure that instrs_prev is less than + * etm->instructions_sample_period. */ - u64 offset = (instrs_executed - instrs_over - 1); - u64 addr = cs_etm__instr_addr(etmq, trace_chan_id, - tidq->packet, offset); + u64 offset = etm->instructions_sample_period - instrs_prev; + u64 addr; - ret = cs_etm__synth_instruction_sample( - etmq, tidq, addr, etm->instructions_sample_period); - if (ret) - return ret; + while (tidq->period_instructions >= + etm->instructions_sample_period) { + /* + * Calculate the address of the sampled instruction (-1 + * as sample is reported as though instruction has just + * been executed, but PC has not advanced to next + * instruction) + */ + addr = cs_etm__instr_addr(etmq, trace_chan_id, + tidq->packet, offset - 1); + ret = cs_etm__synth_instruction_sample( + etmq, tidq, addr, + etm->instructions_sample_period); + if (ret) + return ret; - /* Carry remaining instructions into next sample period */ - tidq->period_instructions = instrs_over; + offset += etm->instructions_sample_period; + tidq->period_instructions -= + etm->instructions_sample_period; + } } if (etm->sample_branches) { @@ -1404,15 +1476,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, } } - if (etm->sample_branches || etm->synth_opts.last_branch) { - /* - * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for - * the next incoming packet. - */ - tmp = tidq->packet; - tidq->packet = tidq->prev_packet; - tidq->prev_packet = tmp; - } + cs_etm__packet_swap(etm, tidq); return 0; } @@ -1441,7 +1505,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq, { int err = 0; struct cs_etm_auxtrace *etm = etmq->etm; - struct cs_etm_packet *tmp; /* Handle start tracing packet */ if (tidq->prev_packet->sample_type == CS_ETM_EMPTY) @@ -1476,15 +1539,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq, } swap_packet: - if (etm->sample_branches || etm->synth_opts.last_branch) { - /* - * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for - * the next incoming packet. - */ - tmp = tidq->packet; - tidq->packet = tidq->prev_packet; - tidq->prev_packet = tmp; - } + cs_etm__packet_swap(etm, tidq); return err; } diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h index 650ecc2a6349..4ad925d6d799 100644 --- a/tools/perf/util/cs-etm.h +++ b/tools/perf/util/cs-etm.h @@ -114,9 +114,6 @@ enum cs_etm_isa { CS_ETM_ISA_T32, }; -/* RB tree for quick conversion between traceID and metadata pointers */ -struct intlist *traceid_list; - struct cs_etm_queue; struct cs_etm_packet { diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index 88fba2ba549f..7534455ffc6a 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -35,7 +35,7 @@ void perf_data__close_dir(struct perf_data *data) int perf_data__create_dir(struct perf_data *data, int nr) { struct perf_data_file *files = NULL; - int i, ret = -1; + int i, ret; if (WARN_ON(!data->is_dir)) return -EINVAL; @@ -51,7 +51,8 @@ int perf_data__create_dir(struct perf_data *data, int nr) for (i = 0; i < nr; i++) { struct perf_data_file *file = &files[i]; - if (asprintf(&file->path, "%s/data.%d", data->path, i) < 0) + ret = asprintf(&file->path, "%s/data.%d", data->path, i); + if (ret < 0) goto out_err; ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR); diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index e11ddf86f2b3..ab2e130dc07a 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -47,6 +47,7 @@ char dso__symtab_origin(const struct dso *dso) [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', + [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', @@ -129,6 +130,21 @@ int dso__read_binary_type_filename(const struct dso *dso, snprintf(filename + len, size - len, "%s", dso->long_name); break; + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: + /* + * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in + * /usr/lib/debug/lib when it is expected to be in + * /usr/lib/debug/usr/lib + */ + if (strlen(dso->long_name) < 9 || + strncmp(dso->long_name, "/usr/lib/", 9)) { + ret = -1; + break; + } + len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); + snprintf(filename + len, size - len, "%s", dso->long_name + 4); + break; + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: { const char *last_slash; diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index e4dddb76770d..69bb77d19164 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -30,6 +30,7 @@ enum dso_binary_type { DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, DSO_BINARY_TYPE__FEDORA_DEBUGINFO, DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__BUILDID_DEBUGINFO, DSO_BINARY_TYPE__SYSTEM_PATH_DSO, DSO_BINARY_TYPE__GUEST_KMODULE, diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 5544bfbd0f6c..ab34ef2c661f 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -319,6 +319,7 @@ bool die_is_func_def(Dwarf_Die *dw_die) int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr) { Dwarf_Addr base, end; + Dwarf_Attribute attr; if (!addr) return -EINVAL; @@ -326,6 +327,13 @@ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr) if (dwarf_entrypc(dw_die, addr) == 0) return 0; + /* + * Since the dwarf_ranges() will return 0 if there is no + * DW_AT_ranges attribute, we should check it first. + */ + if (!dwarf_attr(dw_die, DW_AT_ranges, &attr)) + return -ENOENT; + return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0; } diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 3baca06786fb..018ecf7b6da9 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -326,11 +326,11 @@ static const char *normalize_arch(char *arch) const char *perf_env__arch(struct perf_env *env) { - struct utsname uts; char *arch_name; if (!env || !env->arch) { /* Assume local operation */ - if (uname(&uts) < 0) + static struct utsname uts = { .machine[0] = '\0', }; + if (uts.machine[0] == '\0' && uname(&uts) < 0) return NULL; arch_name = uts.machine; } else diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index fc1e5a991008..bfaa9afdb8b4 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -597,6 +597,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al, } al->sym = map__find_symbol(al->map, al->addr); + } else if (symbol_conf.dso_list) { + al->filtered |= (1 << HIST_FILTER__DSO); } if (symbol_conf.sym_list && diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index de79c735e441..505b890ac85c 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -976,6 +976,10 @@ int perf_evlist__create_maps(struct evlist *evlist, struct target *target) perf_evlist__set_maps(&evlist->core, cpus, threads); + /* as evlist now has references, put count here */ + perf_cpu_map__put(cpus); + perf_thread_map__put(threads); + return 0; out_delete_threads: @@ -1230,11 +1234,12 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist) goto out_put; perf_evlist__set_maps(&evlist->core, cpus, threads); -out: - return err; + + perf_thread_map__put(threads); out_put: perf_cpu_map__put(cpus); - goto out; +out: + return err; } int evlist__open(struct evlist *evlist) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index abc7fda4a0fe..9dd9e3f4ef59 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1028,12 +1028,12 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts, if (callchain && callchain->enabled && !evsel->no_aux_samples) perf_evsel__config_callchain(evsel, opts, callchain); - if (opts->sample_intr_regs) { + if (opts->sample_intr_regs && !evsel->no_aux_samples) { attr->sample_regs_intr = opts->sample_intr_regs; perf_evsel__set_sample_bit(evsel, REGS_INTR); } - if (opts->sample_user_regs) { + if (opts->sample_user_regs && !evsel->no_aux_samples) { attr->sample_regs_user |= opts->sample_user_regs; perf_evsel__set_sample_bit(evsel, REGS_USER); } @@ -1254,6 +1254,9 @@ void perf_evsel__exit(struct evsel *evsel) perf_thread_map__put(evsel->core.threads); zfree(&evsel->group_name); zfree(&evsel->name); + zfree(&evsel->pmu_name); + zfree(&evsel->per_pkg_mask); + zfree(&evsel->metric_events); perf_evsel__object.fini(evsel); } @@ -2357,6 +2360,10 @@ bool perf_evsel__fallback(struct evsel *evsel, int err, char *new_name; const char *sep = ":"; + /* If event has exclude user then don't exclude kernel. */ + if (evsel->core.attr.exclude_user) + return false; + /* Is there already the separator in the name. */ if (strchr(name, '/') || strchr(name, ':')) diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y index f9a20a39b64a..7d226241f1d7 100644 --- a/tools/perf/util/expr.y +++ b/tools/perf/util/expr.y @@ -12,7 +12,8 @@ #define MAXIDLEN 256 %} -%pure-parser +%define api.pure full + %parse-param { double *final_val } %parse-param { struct parse_ctx *ctx } %parse-param { const char **pp } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index f8ccfd6be0ee..7f53b63088b2 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1164,6 +1164,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) return 0; if (err == -EAGAIN || intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) { + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; if (intel_pt_fup_event(decoder)) return 0; return -EAGAIN; @@ -1744,6 +1745,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) break; case INTEL_PT_CYC: + intel_pt_calc_cyc_timestamp(decoder); + break; + case INTEL_PT_VMCS: case INTEL_PT_MNT: case INTEL_PT_PAD: @@ -1942,17 +1946,13 @@ next: } if (decoder->set_fup_mwait) no_tip = true; + if (no_tip) + decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP; + else + decoder->pkt_state = INTEL_PT_STATE_FUP; err = intel_pt_walk_fup(decoder); - if (err != -EAGAIN) { - if (err) - return err; - if (no_tip) - decoder->pkt_state = - INTEL_PT_STATE_FUP_NO_TIP; - else - decoder->pkt_state = INTEL_PT_STATE_FUP; - return 0; - } + if (err != -EAGAIN) + return err; if (no_tip) { no_tip = false; break; @@ -1980,8 +1980,10 @@ next: * possibility of another CBR change that gets caught up * in the PSB+. */ - if (decoder->cbr != decoder->cbr_seen) + if (decoder->cbr != decoder->cbr_seen) { + decoder->state.type = 0; return 0; + } break; case INTEL_PT_PIP: @@ -2022,8 +2024,10 @@ next: case INTEL_PT_CBR: intel_pt_calc_cbr(decoder); - if (decoder->cbr != decoder->cbr_seen) + if (decoder->cbr != decoder->cbr_seen) { + decoder->state.type = 0; return 0; + } break; case INTEL_PT_MODE_EXEC: @@ -2599,15 +2603,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) err = intel_pt_walk_tip(decoder); break; case INTEL_PT_STATE_FUP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_fup_tip(decoder); - else if (!err) - decoder->pkt_state = INTEL_PT_STATE_FUP; break; case INTEL_PT_STATE_FUP_NO_TIP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_trace(decoder); @@ -2637,9 +2637,18 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) } if (intel_pt_sample_time(decoder->pkt_state)) { intel_pt_update_sample_time(decoder); - if (decoder->sample_cyc) + if (decoder->sample_cyc) { decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt; + decoder->state.flags |= INTEL_PT_SAMPLE_IPC; + decoder->sample_cyc = false; + } } + /* + * When using only TSC/MTC to compute cycles, IPC can be + * sampled as soon as the cycle count changes. + */ + if (!decoder->have_cyc) + decoder->state.flags |= INTEL_PT_SAMPLE_IPC; } decoder->state.timestamp = decoder->sample_timestamp; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h index e289e463d635..7396da0fa3a7 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h @@ -17,6 +17,7 @@ #define INTEL_PT_ABORT_TX (1 << 1) #define INTEL_PT_ASYNC (1 << 2) #define INTEL_PT_FUP_IP (1 << 3) +#define INTEL_PT_SAMPLE_IPC (1 << 4) enum intel_pt_sample_type { INTEL_PT_BRANCH = 1 << 0, diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index a1c9eb6d4f40..d0e0ce11faf5 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -974,6 +974,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, if (queue->tid == -1 || pt->have_sched_switch) { ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); + if (ptq->tid == -1) + ptq->pid = -1; thread__zput(ptq->thread); } @@ -1302,7 +1304,8 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) sample.branch_stack = (struct branch_stack *)&dummy_bs; } - sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt; + if (ptq->state->flags & INTEL_PT_SAMPLE_IPC) + sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt; if (sample.cyc_cnt) { sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt; ptq->last_br_insn_cnt = ptq->ipc_insn_cnt; @@ -1364,7 +1367,8 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) sample.stream_id = ptq->pt->instructions_id; sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; - sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt; + if (ptq->state->flags & INTEL_PT_SAMPLE_IPC) + sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt; if (sample.cyc_cnt) { sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt; ptq->last_in_insn_cnt = ptq->ipc_insn_cnt; @@ -1707,6 +1711,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) u64 sample_type = evsel->core.attr.sample_type; u64 id = evsel->core.id[0]; u8 cpumode; + u64 regs[8 * sizeof(sample.intr_regs.mask)]; if (intel_pt_skip_event(pt)) return 0; @@ -1756,8 +1761,8 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) } if (sample_type & PERF_SAMPLE_REGS_INTR && - items->mask[INTEL_PT_GP_REGS_POS]) { - u64 regs[sizeof(sample.intr_regs.mask)]; + (items->mask[INTEL_PT_GP_REGS_POS] || + items->mask[INTEL_PT_XMM_POS])) { u64 regs_mask = evsel->core.attr.sample_regs_intr; u64 *pos; @@ -1898,14 +1903,8 @@ static int intel_pt_sample(struct intel_pt_queue *ptq) ptq->have_sample = false; - if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) { - /* - * Cycle count and instruction count only go together to create - * a valid IPC ratio when the cycle count changes. - */ - ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; - ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; - } + ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; + ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; /* * Do PEBS first to allow for the possibility that the PEBS timestamp @@ -2487,10 +2486,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, tid = sample->tid; } - if (tid == -1) { - pr_err("context_switch event has no tid\n"); - return -EINVAL; - } + if (tid == -1) + intel_pt_log("context_switch event has no tid\n"); intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time, diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index ea277ce63a46..767fe1bfd922 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2587,7 +2587,7 @@ int machines__for_each_thread(struct machines *machines, pid_t machine__get_current_tid(struct machine *machine, int cpu) { - int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS); + int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS); if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid) return -1; @@ -2599,7 +2599,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, pid_t tid) { struct thread *thread; - int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS); + int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS); if (cpu < 0) return -EINVAL; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 4b07b1cc22dc..571e99c908a0 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -93,8 +93,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) if (!strncmp(filename, "/system/lib/", 12)) { char *ndk, *app; const char *arch; - size_t ndk_length; - size_t app_length; + int ndk_length, app_length; ndk = getenv("NDK_ROOT"); app = getenv("APP_PLATFORM"); @@ -122,8 +121,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) if (new_length > PATH_MAX) return false; snprintf(newfilename, new_length, - "%s/platforms/%s/arch-%s/usr/lib/%s", - ndk, app, arch, libname); + "%.*s/platforms/%.*s/arch-%s/usr/lib/%s", + ndk_length, ndk, app_length, app, arch, libname); return true; } diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c index 797d86a1ab09..c84f5841c7ab 100644 --- a/tools/perf/util/mem2node.c +++ b/tools/perf/util/mem2node.c @@ -1,5 +1,6 @@ #include <errno.h> #include <inttypes.h> +#include <asm/bug.h> #include <linux/bitmap.h> #include <linux/kernel.h> #include <linux/zalloc.h> @@ -95,7 +96,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env) /* Cut unused entries, due to merging. */ tmp_entries = realloc(entries, sizeof(*entries) * j); - if (tmp_entries) + if (tmp_entries || WARN_ON_ONCE(j == 0)) entries = tmp_entries; for (i = 0; i < j; i++) { diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 940a6e7a6854..7753c3091478 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -174,6 +174,7 @@ static int metricgroup__setup_events(struct list_head *groups, if (!evsel) { pr_debug("Cannot resolve %s: %s\n", eg->metric_name, eg->metric_expr); + free(metric_events); continue; } for (i = 0; i < eg->idnum; i++) @@ -181,11 +182,13 @@ static int metricgroup__setup_events(struct list_head *groups, me = metricgroup__lookup(metric_events_list, evsel, true); if (!me) { ret = -ENOMEM; + free(metric_events); break; } expr = malloc(sizeof(struct metric_expr)); if (!expr) { ret = -ENOMEM; + free(metric_events); break; } expr->metric_expr = eg->metric_expr; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 422ad1888e74..2d651c93b476 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -370,7 +370,7 @@ static int add_event_tool(struct list_head *list, int *idx, return -ENOMEM; evsel->tool_event = tool_event; if (tool_event == PERF_TOOL_DURATION_TIME) - evsel->unit = strdup("ns"); + evsel->unit = "ns"; return 0; } @@ -1344,7 +1344,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats, NULL); if (evsel) { - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; return 0; } else { @@ -1385,7 +1385,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel->snapshot = info.snapshot; evsel->metric_expr = info.metric_expr; evsel->metric_name = info.metric_name; - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; evsel->percore = config_term_percore(&evsel->config_terms); } @@ -1505,12 +1505,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, * event. That can be used to distinguish the leader from * other members, even they have the same event name. */ - if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) { + if ((leader != evsel) && + !strcmp(leader->pmu_name, evsel->pmu_name)) { is_leader = false; continue; } - /* The name is always alias name */ - WARN_ON(strcmp(leader->name, evsel->name)); /* Store the leader event for each PMU */ leaders[nr_pmu++] = (uintptr_t) evsel; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 48126ae4cd13..776d77093a19 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -1,4 +1,4 @@ -%pure-parser +%define api.pure full %parse-param {void *_parse_state} %parse-param {void *scanner} %lex-param {void* scanner} diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c index ef46c2848808..869ef7e22bd9 100644 --- a/tools/perf/util/parse-regs-options.c +++ b/tools/perf/util/parse-regs-options.c @@ -52,7 +52,7 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr) } fputc('\n', stderr); /* just printing available regs */ - return -1; + goto error; } for (r = sample_reg_masks; r->name; r++) { if ((r->mask & mask) && !strcasecmp(s, r->name)) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 5608da82ad23..628a6d5a5b38 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1294,6 +1294,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) set_bit(b, bits); } +void perf_pmu__del_formats(struct list_head *formats) +{ + struct perf_pmu_format *fmt, *tmp; + + list_for_each_entry_safe(fmt, tmp, formats, list) { + list_del(&fmt->list); + free(fmt->name); + free(fmt); + } +} + static int sub_non_neg(int a, int b) { if (b > a) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index f36ade6df76d..9570d9b26250 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -81,6 +81,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits); void perf_pmu__set_format(unsigned long *bits, long from, long to); int perf_pmu__format_parse(char *dir, struct list_head *head); +void perf_pmu__del_formats(struct list_head *formats); struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); diff --git a/tools/perf/util/print_binary.c b/tools/perf/util/print_binary.c index 599a1543871d..13fdc51c61d9 100644 --- a/tools/perf/util/print_binary.c +++ b/tools/perf/util/print_binary.c @@ -50,7 +50,7 @@ int is_printable_array(char *p, unsigned int len) len--; - for (i = 0; i < len; i++) { + for (i = 0; i < len && p[i]; i++) { if (!isprint(p[i]) && !isspace(p[i])) return 0; } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 91cab5f669d2..a5cb1a3a1064 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -102,7 +102,7 @@ void exit_probe_symbol_maps(void) symbol__exit(); } -static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) +static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap) { /* kmap->ref_reloc_sym should be set if host_machine is initialized */ struct kmap *kmap; @@ -114,6 +114,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) kmap = map__kmap(map); if (!kmap) return NULL; + + if (pmap) + *pmap = map; + return kmap->ref_reloc_sym; } @@ -125,7 +129,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, struct map *map; /* ref_reloc_sym is just a label. Need a special fix*/ - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(NULL); if (reloc_sym && strcmp(name, reloc_sym->name) == 0) *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; else { @@ -232,21 +236,22 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) static bool kprobe_blacklist__listed(unsigned long address); static bool kprobe_warn_out_range(const char *symbol, unsigned long address) { - u64 etext_addr = 0; - int ret; + struct map *map; + bool ret = false; - /* Get the address of _etext for checking non-probable text symbol */ - ret = kernel_get_symbol_address_by_name("_etext", &etext_addr, - false, false); - - if (ret == 0 && etext_addr < address) - pr_warning("%s is out of .text, skip it.\n", symbol); - else if (kprobe_blacklist__listed(address)) + map = kernel_get_module_map(NULL); + if (map) { + ret = address <= map->start || map->end < address; + if (ret) + pr_warning("%s is out of .text, skip it.\n", symbol); + map__put(map); + } + if (!ret && kprobe_blacklist__listed(address)) { pr_warning("%s is blacklisted function, skip it.\n", symbol); - else - return false; + ret = true; + } - return true; + return ret; } /* @@ -745,6 +750,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, int ntevs) { struct ref_reloc_sym *reloc_sym; + struct map *map; char *tmp; int i, skipped = 0; @@ -753,7 +759,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, return post_process_offline_probe_trace_events(tevs, ntevs, symbol_conf.vmlinux_name); - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(&map); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); return -EINVAL; @@ -764,9 +770,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, continue; if (tevs[i].point.retprobe && !kretprobe_offset_is_supported()) continue; - /* If we found a wrong one, mark it by NULL symbol */ + /* + * If we found a wrong one, mark it by NULL symbol. + * Since addresses in debuginfo is same as objdump, we need + * to convert it to addresses on memory. + */ if (kprobe_warn_out_range(tevs[i].point.symbol, - tevs[i].point.address)) { + map__objdump_2mem(map, tevs[i].point.address))) { tmp = NULL; skipped++; } else { @@ -1757,8 +1767,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev) fmt1_str = strtok_r(argv0_str, ":", &fmt); fmt2_str = strtok_r(NULL, "/", &fmt); fmt3_str = strtok_r(NULL, " \t", &fmt); - if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL - || fmt3_str == NULL) { + if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) { semantic_error("Failed to parse event name: %s\n", argv[0]); ret = -EINVAL; goto out; @@ -2923,7 +2932,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, /* Note that the symbols in the kmodule are not relocated */ if (!pev->uprobes && !pev->target && (!pp->retprobe || kretprobe_offset_is_supported())) { - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(NULL); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); ret = -EINVAL; diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index bf50f464234f..f778f8e7e65a 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -777,7 +777,7 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note, const char *sdtgrp) { struct strbuf buf; - char *ret = NULL, **args; + char *ret = NULL; int i, args_count, err; unsigned long long ref_ctr_offset; @@ -799,12 +799,19 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note, goto out; if (note->args) { - args = argv_split(note->args, &args_count); + char **args = argv_split(note->args, &args_count); + + if (args == NULL) + goto error; for (i = 0; i < args_count; ++i) { - if (synthesize_sdt_probe_arg(&buf, i, args[i]) < 0) + if (synthesize_sdt_probe_arg(&buf, i, args[i]) < 0) { + argv_free(args); goto error; + } } + + argv_free(args); } out: diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index aaf3b24fffa4..849d8d2e5976 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -101,6 +101,7 @@ enum dso_binary_type distro_dwarf_types[] = { DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; @@ -1361,7 +1362,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, tf.ntevs = 0; ret = debuginfo__find_probes(dbg, &tf.pf); - if (ret < 0) { + if (ret < 0 || tf.ntevs == 0) { for (i = 0; i < tf.ntevs; i++) clear_probe_trace_event(&tf.tevs[i]); zfree(tevs); diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 93c03b39cd9c..3b02c3f1b289 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -1587,7 +1587,6 @@ static void _free_command_line(wchar_t **command_line, int num) static int python_start_script(const char *script, int argc, const char **argv) { struct tables *tables = &tables_global; - PyMODINIT_FUNC (*initfunc)(void); #if PY_MAJOR_VERSION < 3 const char **command_line; #else @@ -1602,20 +1601,18 @@ static int python_start_script(const char *script, int argc, const char **argv) FILE *fp; #if PY_MAJOR_VERSION < 3 - initfunc = initperf_trace_context; command_line = malloc((argc + 1) * sizeof(const char *)); command_line[0] = script; for (i = 1; i < argc + 1; i++) command_line[i] = argv[i - 1]; + PyImport_AppendInittab(name, initperf_trace_context); #else - initfunc = PyInit_perf_trace_context; command_line = malloc((argc + 1) * sizeof(wchar_t *)); command_line[0] = Py_DecodeLocale(script, NULL); for (i = 1; i < argc + 1; i++) command_line[i] = Py_DecodeLocale(argv[i - 1], NULL); + PyImport_AppendInittab(name, PyInit_perf_trace_context); #endif - - PyImport_AppendInittab(name, initfunc); Py_Initialize(); #if PY_MAJOR_VERSION < 3 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 5c172845fa5a..56f3039fe2a7 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -88,7 +88,7 @@ static int perf_session__process_compressed_event(struct perf_session *session, session->decomp_last = decomp; } - pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size); + pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size); return 0; } @@ -588,6 +588,7 @@ static void perf_event__mmap2_swap(union perf_event *event, event->mmap2.maj = bswap_32(event->mmap2.maj); event->mmap2.min = bswap_32(event->mmap2.min); event->mmap2.ino = bswap_64(event->mmap2.ino); + event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation); if (sample_id_all) { void *data = &event->mmap2.filename; @@ -2313,7 +2314,7 @@ int perf_session__cpu_bitmap(struct perf_session *session, { int i, err = -1; struct perf_cpu_map *map; - int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS); + int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS); for (i = 0; i < PERF_TYPE_MAX; ++i) { struct evsel *evsel; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 43d1d410854a..4027906fd3e3 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2788,7 +2788,7 @@ static char *prefix_if_not_in(const char *pre, char *str) return str; if (asprintf(&n, "%s,%s", pre, str) < 0) - return NULL; + n = NULL; free(str); return n; diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index 6ccf6f6d09df..5b7d6c16d33f 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c @@ -193,16 +193,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data) bfd_vma pc, vma; bfd_size_type size; struct a2l_data *a2l = data; + flagword flags; if (a2l->found) return; - if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0) +#ifdef bfd_get_section_flags + flags = bfd_get_section_flags(abfd, section); +#else + flags = bfd_section_flags(section); +#endif + if ((flags & SEC_ALLOC) == 0) return; pc = a2l->addr; +#ifdef bfd_get_section_vma vma = bfd_get_section_vma(abfd, section); +#else + vma = bfd_section_vma(section); +#endif +#ifdef bfd_get_section_size size = bfd_get_section_size(section); +#else + size = bfd_section_size(section); +#endif if (pc < vma || pc >= vma + size) return; diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index ed3b0ac2f785..93147cc40162 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -316,13 +316,10 @@ static int first_shadow_cpu(struct perf_stat_config *config, struct evlist *evlist = evsel->evlist; int i; - if (!config->aggr_get_id) - return 0; - if (config->aggr_mode == AGGR_NONE) return id; - if (config->aggr_mode == AGGR_GLOBAL) + if (!config->aggr_get_id) return 0; for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) { @@ -661,7 +658,7 @@ static void print_aggr(struct perf_stat_config *config, int s; bool first; - if (!(config->aggr_map || config->aggr_get_id)) + if (!config->aggr_map || !config->aggr_get_id) return; aggr_update_shadow(config, evlist); @@ -1140,7 +1137,7 @@ static void print_percore(struct perf_stat_config *config, int s; bool first = true; - if (!(config->aggr_map || config->aggr_get_id)) + if (!config->aggr_map || !config->aggr_get_id) return; for (s = 0; s < config->aggr_map->nr; s++) { diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index ebdd130557fb..5156aa971fbb 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -367,8 +367,10 @@ int perf_stat_process_counter(struct perf_stat_config *config, * interval mode, otherwise overall avg running * averages will be shown for each interval. */ - if (config->interval) - init_stats(ps->res_stats); + if (config->interval) { + for (i = 0; i < 3; i++) + init_stats(&ps->res_stats[i]); + } if (counter->per_pkg) zero_per_pkg(counter); diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 66f4be1df573..2ec0a32da579 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1449,6 +1449,7 @@ struct kcore_copy_info { u64 first_symbol; u64 last_symbol; u64 first_module; + u64 first_module_symbol; u64 last_module_symbol; size_t phnum; struct list_head phdrs; @@ -1525,6 +1526,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, return 0; if (strchr(name, '[')) { + if (!kci->first_module_symbol || start < kci->first_module_symbol) + kci->first_module_symbol = start; if (start > kci->last_module_symbol) kci->last_module_symbol = start; return 0; @@ -1722,6 +1725,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, kci->etext += page_size; } + if (kci->first_module_symbol && + (!kci->first_module || kci->first_module_symbol < kci->first_module)) + kci->first_module = kci->first_module_symbol; + kci->first_module = round_down(kci->first_module, page_size); if (kci->last_module_symbol) { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a8f80e427674..901ad7f6f4dc 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -79,6 +79,7 @@ static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; @@ -1220,6 +1221,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) m->end = old_map->start; list_add_tail(&m->node, &merged); + new_map->pgoff += old_map->end - new_map->start; new_map->start = old_map->end; } } else { @@ -1240,6 +1242,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) * |new......| -> |new...| * |old....| -> |old....| */ + new_map->pgoff += old_map->end - new_map->start; new_map->start = old_map->end; } } @@ -1530,6 +1533,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: return !kmod && dso->kernel == DSO_TYPE_USER; diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c index 35c936ce33ef..2664fb65e47a 100644 --- a/tools/perf/util/symbol_fprintf.c +++ b/tools/perf/util/symbol_fprintf.c @@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso, for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) { pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); - fprintf(fp, "%s\n", pos->sym.name); + ret += fprintf(fp, "%s\n", pos->sym.name); } return ret; diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 8593d3c200c6..0116b0c06e97 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -361,6 +361,7 @@ static int read_saved_cmdline(struct tep_handle *pevent) pr_debug("error reading saved cmdlines\n"); goto out; } + buf[ret] = '\0'; parse_saved_cmdline(pevent, buf, size); ret = 0; diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c index d2202392ffdb..48dd2b018c47 100644 --- a/tools/perf/util/zstd.c +++ b/tools/perf/util/zstd.c @@ -99,7 +99,7 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size while (input.pos < input.size) { ret = ZSTD_decompressStream(data->dstream, &output, &input); if (ZSTD_isError(ret)) { - pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n", + pr_err("failed to decompress (B): %zd -> %zd, dst_size %zd : %s\n", src_size, output.size, dst_size, ZSTD_getErrorName(ret)); break; } diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index 54a2857c2510..331f6d30f472 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config @@ -54,7 +54,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM} CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- CROSS_COMPILE ?= $(CROSS) LD = $(CC) -HOSTCC = gcc # check if compiler option is supported cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;} diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py index 2d6d342b148f..1351975d0769 100755 --- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py +++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py @@ -11,11 +11,11 @@ then this utility enables and collects trace data for a user specified interval and generates performance plots. Prerequisites: - Python version 2.7.x + Python version 2.7.x or higher gnuplot 5.0 or higher - gnuplot-py 1.8 + gnuplot-py 1.8 or higher (Most of the distributions have these required packages. They may be called - gnuplot-py, phython-gnuplot. ) + gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... ) HWP (Hardware P-States are disabled) Kernel config for Linux trace is enabled @@ -181,7 +181,7 @@ def plot_pstate_cpu_with_sample(): g_plot('set xlabel "Samples"') g_plot('set ylabel "P-State"') g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -198,7 +198,7 @@ def plot_pstate_cpu(): # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file. # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s' # - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -212,7 +212,7 @@ def plot_load_cpu(): g_plot('set ylabel "CPU load (percent)"') g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -226,7 +226,7 @@ def plot_frequency_cpu(): g_plot('set ylabel "CPU Frequency (GHz)"') g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -241,7 +241,7 @@ def plot_duration_cpu(): g_plot('set ylabel "Timer Duration (MilliSeconds)"') g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -255,7 +255,7 @@ def plot_scaled_cpu(): g_plot('set ylabel "Scaled Busy (Unitless)"') g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -269,7 +269,7 @@ def plot_boost_cpu(): g_plot('set ylabel "CPU IO Boost (percent)"') g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -283,7 +283,7 @@ def plot_ghz_cpu(): g_plot('set ylabel "TSC Frequency (GHz)"') g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 6d2f3a1b2249..812fc97bb1a9 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -59,6 +59,16 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld) $(call allow-override,CXX,$(CROSS_COMPILE)g++) $(call allow-override,STRIP,$(CROSS_COMPILE)strip) +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else +HOSTAR ?= ar +HOSTCC ?= gcc +HOSTLD ?= ld +endif + ifeq ($(CC_NO_CLANG), 1) EXTRA_WARNINGS += -Wstrict-aliasing=3 endif diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 42b6cd41d2ea..fe587a959463 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -2008,7 +2008,7 @@ sub reboot_to { if ($reboot_type eq "grub") { run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'"; - } elsif ($reboot_type eq "grub2") { + } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) { run_ssh "$grub_reboot $grub_number"; } elsif ($reboot_type eq "syslinux") { run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path"; @@ -4197,7 +4197,12 @@ sub do_send_mail { $mail_command =~ s/\$SUBJECT/$subject/g; $mail_command =~ s/\$MESSAGE/$message/g; - run_command $mail_command; + my $ret = run_command $mail_command; + if (!$ret && defined($file)) { + # try again without the file + $message .= "\n\n*** FAILED TO SEND LOG ***\n\n"; + do_send_email($subject, $message); + } } sub send_email { diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild index c4a9196d794c..cb80d3b81179 100644 --- a/tools/testing/nvdimm/Kbuild +++ b/tools/testing/nvdimm/Kbuild @@ -21,8 +21,8 @@ DRIVERS := ../../../drivers NVDIMM_SRC := $(DRIVERS)/nvdimm ACPI_SRC := $(DRIVERS)/acpi/nfit DAX_SRC := $(DRIVERS)/dax -ccflags-y := -I$(src)/$(NVDIMM_SRC)/ -ccflags-y += -I$(src)/$(ACPI_SRC)/ +ccflags-y := -I$(srctree)/drivers/nvdimm/ +ccflags-y += -I$(srctree)/drivers/acpi/nfit/ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o diff --git a/tools/testing/nvdimm/test/Kbuild b/tools/testing/nvdimm/test/Kbuild index fb3c3d7cdb9b..75baebf8f4ba 100644 --- a/tools/testing/nvdimm/test/Kbuild +++ b/tools/testing/nvdimm/test/Kbuild @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -I$(src)/../../../../drivers/nvdimm/ -ccflags-y += -I$(src)/../../../../drivers/acpi/nfit/ +ccflags-y := -I$(srctree)/drivers/nvdimm/ +ccflags-y += -I$(srctree)/drivers/acpi/nfit/ obj-m += nfit_test.o obj-m += nfit_test_iomap.o diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index bf6422a6af7f..a8ee5c4d41eb 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -3164,7 +3164,9 @@ static __init int nfit_test_init(void) mcsafe_test(); dax_pmem_test(); dax_pmem_core_test(); +#ifdef CONFIG_DEV_DAX_PMEM_COMPAT dax_pmem_compat_test(); +#endif nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm); diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 397d6b612502..aa6abfe0749c 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -7,8 +7,8 @@ LDLIBS+= -lpthread -lurcu TARGETS = main idr-test multiorder xarray CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ - regression4.o \ - tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o + regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \ + iteration_check_2.o benchmark.o ifndef SHIFT SHIFT=3 diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 8995092d541e..6ce7460f3c7a 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -301,16 +301,20 @@ void idr_find_test_1(int anchor_id, int throbber_id) pthread_t throbber; time_t start = time(NULL); - pthread_create(&throbber, NULL, idr_throbber, &throbber_id); - BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id, anchor_id + 1, GFP_KERNEL) != anchor_id); + pthread_create(&throbber, NULL, idr_throbber, &throbber_id); + + rcu_read_lock(); do { int id = 0; void *entry = idr_get_next(&find_idr, &id); + rcu_read_unlock(); BUG_ON(entry != xa_mk_value(id)); + rcu_read_lock(); } while (time(NULL) < start + 11); + rcu_read_unlock(); pthread_join(throbber, NULL); @@ -523,8 +527,27 @@ static void *ida_random_fn(void *arg) return NULL; } +static void *ida_leak_fn(void *arg) +{ + struct ida *ida = arg; + time_t s = time(NULL); + int i, ret; + + rcu_register_thread(); + + do for (i = 0; i < 1000; i++) { + ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL); + if (ret >= 0) + ida_free(ida, 128); + } while (time(NULL) < s + 2); + + rcu_unregister_thread(); + return NULL; +} + void ida_thread_tests(void) { + DEFINE_IDA(ida); pthread_t threads[20]; int i; @@ -536,6 +559,16 @@ void ida_thread_tests(void) while (i--) pthread_join(threads[i], NULL); + + for (i = 0; i < ARRAY_SIZE(threads); i++) + if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) { + perror("creating ida thread"); + exit(1); + } + + while (i--) + pthread_join(threads[i], NULL); + assert(ida_is_empty(&ida)); } void ida_tests(void) @@ -548,6 +581,7 @@ void ida_tests(void) int __weak main(void) { + rcu_register_thread(); radix_tree_init(); idr_checks(); ida_tests(); @@ -555,5 +589,6 @@ int __weak main(void) rcu_barrier(); if (nr_allocated) printf("nr_allocated = %d\n", nr_allocated); + rcu_unregister_thread(); return 0; } diff --git a/tools/testing/radix-tree/iteration_check_2.c b/tools/testing/radix-tree/iteration_check_2.c new file mode 100644 index 000000000000..aac5c50a3674 --- /dev/null +++ b/tools/testing/radix-tree/iteration_check_2.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * iteration_check_2.c: Check that deleting a tagged entry doesn't cause + * an RCU walker to finish early. + * Copyright (c) 2020 Oracle + * Author: Matthew Wilcox <willy@infradead.org> + */ +#include <pthread.h> +#include "test.h" + +static volatile bool test_complete; + +static void *iterator(void *arg) +{ + XA_STATE(xas, arg, 0); + void *entry; + + rcu_register_thread(); + + while (!test_complete) { + xas_set(&xas, 0); + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) + ; + rcu_read_unlock(); + assert(xas.xa_index >= 100); + } + + rcu_unregister_thread(); + return NULL; +} + +static void *throbber(void *arg) +{ + struct xarray *xa = arg; + + rcu_register_thread(); + + while (!test_complete) { + int i; + + for (i = 0; i < 100; i++) { + xa_store(xa, i, xa_mk_value(i), GFP_KERNEL); + xa_set_mark(xa, i, XA_MARK_0); + } + for (i = 0; i < 100; i++) + xa_erase(xa, i); + } + + rcu_unregister_thread(); + return NULL; +} + +void iteration_test2(unsigned test_duration) +{ + pthread_t threads[2]; + DEFINE_XARRAY(array); + int i; + + printv(1, "Running iteration test 2 for %d seconds\n", test_duration); + + test_complete = false; + + xa_store(&array, 100, xa_mk_value(100), GFP_KERNEL); + xa_set_mark(&array, 100, XA_MARK_0); + + if (pthread_create(&threads[0], NULL, iterator, &array)) { + perror("create iterator thread"); + exit(1); + } + if (pthread_create(&threads[1], NULL, throbber, &array)) { + perror("create throbber thread"); + exit(1); + } + + sleep(test_duration); + test_complete = true; + + for (i = 0; i < 2; i++) { + if (pthread_join(threads[i], NULL)) { + perror("pthread_join"); + exit(1); + } + } + + xa_destroy(&array); +} diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 7a22d6e3732e..f2cbc8e5b97c 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -311,6 +311,7 @@ int main(int argc, char **argv) regression4_test(); iteration_test(0, 10 + 90 * long_run); iteration_test(7, 10 + 90 * long_run); + iteration_test2(10 + 90 * long_run); single_thread_tests(long_run); /* Free any remaining preallocated nodes */ diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 9eae0fb5a67d..e00520cc6349 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -224,7 +224,9 @@ void multiorder_checks(void) int __weak main(void) { + rcu_register_thread(); radix_tree_init(); multiorder_checks(); + rcu_unregister_thread(); return 0; } diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 1ee4b2c0ad10..34dab4d18744 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -34,6 +34,7 @@ void xarray_tests(void); void tag_check(void); void multiorder_checks(void); void iteration_test(unsigned order, unsigned duration); +void iteration_test2(unsigned duration); void benchmark(void); void idr_checks(void); void ida_tests(void); diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c index e61e43efe463..f20e12cbbfd4 100644 --- a/tools/testing/radix-tree/xarray.c +++ b/tools/testing/radix-tree/xarray.c @@ -25,11 +25,13 @@ void xarray_tests(void) int __weak main(void) { + rcu_register_thread(); radix_tree_init(); xarray_tests(); radix_tree_cpu_dead(1); rcu_barrier(); if (nr_allocated) printf("nr_allocated = %d\n", nr_allocated); + rcu_unregister_thread(); return 0; } diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 612f6757015d..6ede894d5c50 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -40,6 +40,7 @@ TARGETS += ptrace TARGETS += rseq TARGETS += rtc TARGETS += seccomp +TARGETS += sgx TARGETS += sigaltstack TARGETS += size TARGETS += sparc64 diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 7470327edcfe..3ab1200e172f 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only test_verifier test_maps test_lru_map @@ -5,37 +6,33 @@ test_lpm_map test_tag FEATURE-DUMP.libbpf fixdep -test_align test_dev_cgroup -test_progs +/test_progs* test_tcpbpf_user test_verifier_log feature -test_libbpf_open test_sock test_sock_addr -test_sock_fields urandom_read -test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user test_skb_cgroup_id_user test_socket_cookie -test_cgroup_attach test_cgroup_storage -test_select_reuseport test_flow_dissector flow_dissector_load test_netcnt -test_section_names test_tcpnotify_user test_libbpf test_tcp_check_syncookie_user test_sysctl -alu32 -libbpf.pc -libbpf.so.* -test_hashmap -test_btf_dump +test_current_pid_tgid_new_ns xdping +test_cpp +*.skel.h +/no_alu32 +/bpf_gcc +/tools +/runqslower +/bench diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 6889c19a628c..a9a4fe37859e 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -2,10 +2,16 @@ include ../../../../scripts/Kbuild.include include ../../../scripts/Makefile.arch -LIBDIR := ../../../lib +CXX ?= $(CROSS_COMPILE)g++ + +CURDIR := $(abspath .) +TOOLSDIR := $(abspath ../../..) +LIBDIR := $(TOOLSDIR)/lib BPFDIR := $(LIBDIR)/bpf -APIDIR := ../../../include/uapi -GENDIR := ../../../../include/generated +TOOLSINCDIR := $(TOOLSDIR)/include +BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool +APIDIR := $(TOOLSINCDIR)/uapi +GENDIR := $(abspath ../../../../include/generated) GENHDR := $(GENDIR)/autoconf.h ifneq ($(wildcard $(GENHDR)),) @@ -15,45 +21,35 @@ endif CLANG ?= clang LLC ?= llc LLVM_OBJCOPY ?= llvm-objcopy -LLVM_READELF ?= llvm-readelf -BTF_PAHOLE ?= pahole BPF_GCC ?= $(shell command -v bpf-gcc;) -CFLAGS += -g -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include \ - -Dbpf_prog_load=bpf_prog_test_load \ +SAN_CFLAGS ?= +CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) $(SAN_CFLAGS) \ + -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ + -I$(TOOLSINCDIR) -I$(APIDIR) -I$(BPFDIR) \ + -Dbpf_prog_load=bpf_prog_test_load \ -Dbpf_load_program=bpf_test_load_program -LDLIBS += -lcap -lelf -lrt -lpthread +LDLIBS += -lcap -lelf -lz -lrt -lpthread # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ - test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ - test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \ - test_cgroup_storage test_select_reuseport test_section_names \ - test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ - test_btf_dump test_cgroup_attach xdping - -BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) -TEST_GEN_FILES = $(BPF_OBJ_FILES) - -BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c) -TEST_FILES = $(BTF_C_FILES) - -# Also test sub-register code-gen if LLVM has eBPF v3 processor support which -# contains both ALU32 and JMP32 instructions. -SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ - $(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \ - $(LLC) -mattr=+alu32 -mcpu=v3 2>&1 | \ - grep 'if w') -ifneq ($(SUBREG_CODEGEN),) -TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES)) -endif + test_verifier_log test_dev_cgroup test_tcpbpf_user \ + test_sock test_sockmap get_cgroup_id_user test_socket_cookie \ + test_cgroup_storage \ + test_netcnt test_tcpnotify_user test_sysctl \ + test_progs-no_alu32 \ + test_current_pid_tgid_new_ns +# Also test bpf-gcc, if present ifneq ($(BPF_GCC),) -TEST_GEN_FILES += $(patsubst %.o,bpf_gcc/%.o, $(BPF_OBJ_FILES)) +TEST_GEN_PROGS += test_progs-bpf_gcc endif +TEST_GEN_FILES = +TEST_FILES = test_lwt_ip_encap.o \ + test_tc_edt.o + # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ - test_libbpf.sh \ test_xdp_redirect.sh \ test_xdp_meta.sh \ test_xdp_veth.sh \ @@ -71,7 +67,9 @@ TEST_PROGS := test_kmod.sh \ test_tc_tunnel.sh \ test_tc_edt.sh \ test_xdping.sh \ - test_bpftool_build.sh + test_bpftool_build.sh \ + test_bpftool.sh \ + test_bpftool_metadata.sh \ TEST_PROGS_EXTENDED := with_addr.sh \ with_tunnels.sh \ @@ -80,27 +78,84 @@ TEST_PROGS_EXTENDED := with_addr.sh \ test_xdp_vlan.sh # Compile but not part of 'make run_tests' -TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ +TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ - test_lirc_mode2_user + test_lirc_mode2_user xdping runqslower bench + +TEST_CUSTOM_PROGS = urandom_read + +# Emit succinct information message describing current building step +# $1 - generic step name (e.g., CC, LINK, etc); +# $2 - optional "flavor" specifier; if provided, will be emitted as [flavor]; +# $3 - target (assumed to be file); only file name will be emitted; +# $4 - optional extra arg, emitted as-is, if provided. +ifeq ($(V),1) +Q = +msg = +else +Q = @ +msg = @printf ' %-8s%s %s%s\n' "$(1)" "$(if $(2), [$(2)])" "$(notdir $(3))" "$(if $(4), $(4))"; +MAKEFLAGS += --no-print-directory +submake_extras := feature_display=0 +endif + +# override lib.mk's default rules +OVERRIDE_TARGETS := 1 +override define CLEAN + $(call msg,CLEAN) + $(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN) +endef include ../lib.mk -# NOTE: $(OUTPUT) won't get default value if used before lib.mk -TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read -all: $(TEST_CUSTOM_PROGS) +SCRATCH_DIR := $(OUTPUT)/tools +BUILD_DIR := $(SCRATCH_DIR)/build +INCLUDE_DIR := $(SCRATCH_DIR)/include +BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a +RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids -$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c - $(CC) -o $@ $< -Wl,--build-id +# Define simple and short `make test_progs`, `make test_sysctl`, etc targets +# to build individual tests. +# NOTE: Semicolon at the end is critical to override lib.mk's default static +# rule for binaries. +$(notdir $(TEST_GEN_PROGS) \ + $(TEST_PROGS) \ + $(TEST_PROGS_EXTENDED) \ + $(TEST_GEN_PROGS_EXTENDED) \ + $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ; -$(OUTPUT)/test_stub.o: test_stub.c - $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) -c -o $@ $< +$(OUTPUT)/%.o: %.c + $(call msg,CC,,$@) + $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@ -BPFOBJ := $(OUTPUT)/libbpf.a +$(OUTPUT)/%:%.c + $(call msg,BINARY,,$@) + $(Q)$(LINK.c) $^ $(LDLIBS) -o $@ -$(TEST_GEN_PROGS): $(OUTPUT)/test_stub.o $(BPFOBJ) +$(OUTPUT)/urandom_read: urandom_read.c + $(call msg,BINARY,,$@) + $(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id=sha1 -$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(OUTPUT)/libbpf.a +$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) + $(call msg,CC,,$@) + $(Q)$(CC) -c $(CFLAGS) -o $@ $< + +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ + $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ + ../../../../vmlinux \ + /sys/kernel/btf/vmlinux \ + /boot/vmlinux-$(shell uname -r) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) + +DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool + +$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ + OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \ + BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \ + cp $(SCRATCH_DIR)/runqslower $@ + +$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ) $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c $(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c @@ -110,32 +165,53 @@ $(OUTPUT)/test_socket_cookie: cgroup_helpers.c $(OUTPUT)/test_sockmap: cgroup_helpers.c $(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c $(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c -$(OUTPUT)/test_progs: cgroup_helpers.c trace_helpers.c $(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c $(OUTPUT)/test_netcnt: cgroup_helpers.c $(OUTPUT)/test_sock_fields: cgroup_helpers.c $(OUTPUT)/test_sysctl: cgroup_helpers.c -$(OUTPUT)/test_cgroup_attach: cgroup_helpers.c -.PHONY: force +BPFTOOL ?= $(DEFAULT_BPFTOOL) +$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ + $(BPFOBJ) | $(BUILD_DIR)/bpftool + $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ + OUTPUT=$(BUILD_DIR)/bpftool/ \ + prefix= DESTDIR=$(SCRATCH_DIR)/ install + $(Q)mkdir -p $(BUILD_DIR)/bpftool/Documentation + $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \ + -C $(BPFTOOLDIR)/Documentation \ + OUTPUT=$(BUILD_DIR)/bpftool/Documentation/ \ + prefix= DESTDIR=$(SCRATCH_DIR)/ install -# force a rebuild of BPFOBJ when its dependencies are updated -force: +$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ + ../../../include/uapi/linux/bpf.h \ + | $(INCLUDE_DIR) $(BUILD_DIR)/libbpf + $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ + DESTDIR=$(SCRATCH_DIR) prefix= all install_headers -$(BPFOBJ): force - $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ +$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR): + $(call msg,MKDIR,,$@) + $(Q)mkdir -p $@ -PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) - -# Let newer LLVM versions transparently probe the kernel for availability -# of full BPF instruction set. -ifeq ($(PROBE),) - CPU ?= probe +$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR) +ifeq ($(VMLINUX_H),) + $(call msg,GEN,,$@) + $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@ else - CPU ?= generic + $(call msg,CP,,$@) + $(Q)cp "$(VMLINUX_H)" $@ endif +$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \ + $(TOOLSDIR)/bpf/resolve_btfids/main.c \ + $(TOOLSDIR)/lib/rbtree.c \ + $(TOOLSDIR)/lib/zalloc.c \ + $(TOOLSDIR)/lib/string.c \ + $(TOOLSDIR)/lib/ctype.c \ + $(TOOLSDIR)/lib/str_error_r.c + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ + OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ) + # Get Clang's default includes on this system, as opposed to those seen by # '-target bpf'. This fixes "missing" files on some architectures/distros, # such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc. @@ -146,9 +222,16 @@ define get_sys_includes $(shell $(1) -v -E - </dev/null 2>&1 \ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') endef + +# Determine target endianness. +IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ + grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') +MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) + CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG)) -BPF_CFLAGS = -I. -I./include/uapi -I../../../include/uapi \ - -I$(OUTPUT)/../usr/include -D__TARGET_ARCH_$(SRCARCH) +BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ + -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \ + -I$(abspath $(OUTPUT)/../usr/include) CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ -Wno-compare-distinct-pointer-types @@ -156,167 +239,200 @@ CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ $(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline -$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h -$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h - $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h -$(OUTPUT)/test_progs.o: flow_dissector_load.h -BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris) -BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) -BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') -BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \ - $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \ - $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \ - /bin/rm -f ./llvm_btf_verify.o) +# Build BPF object using Clang +# $1 - input .c file +# $2 - output .o file +# $3 - CFLAGS +# $4 - LDFLAGS +define CLANG_BPF_BUILD_RULE + $(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2) + $(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm \ + -c $1 -o - || echo "BPF obj compilation failed") | \ + $(LLC) -mattr=dwarfris -march=bpf -mcpu=v3 $4 -filetype=obj -o $2 +endef +# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 +define CLANG_NOALU32_BPF_BUILD_RULE + $(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2) + $(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm \ + -c $1 -o - || echo "BPF obj compilation failed") | \ + $(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2 +endef +# Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC +define CLANG_NATIVE_BPF_BUILD_RULE + $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(Q)($(CLANG) $3 -O2 -emit-llvm \ + -c $1 -o - || echo "BPF obj compilation failed") | \ + $(LLC) -march=bpf -mcpu=v3 $4 -filetype=obj -o $2 +endef +# Build BPF object using GCC +define GCC_BPF_BUILD_RULE + $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2) + $(Q)$(BPF_GCC) $3 $4 -O2 -c $1 -o $2 +endef -ifneq ($(BTF_LLVM_PROBE),) - BPF_CFLAGS += -g -else -ifneq ($(BTF_LLC_PROBE),) -ifneq ($(BTF_PAHOLE_PROBE),) -ifneq ($(BTF_OBJCOPY_PROBE),) - BPF_CFLAGS += -g - LLC_FLAGS += -mattr=dwarfris - DWARF2BTF = y -endif -endif -endif +SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c + +# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on +# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES. +# Parameters: +# $1 - test runner base binary name (e.g., test_progs) +# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc) +define DEFINE_TEST_RUNNER + +TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2 +TRUNNER_BINARY := $1$(if $2,-)$2 +TRUNNER_TEST_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.test.o, \ + $$(notdir $$(wildcard $(TRUNNER_TESTS_DIR)/*.c))) +TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \ + $$(filter %.c,$(TRUNNER_EXTRA_SOURCES))) +TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES)) +TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h +TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c)) +TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)) +TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \ + $$(filter-out $(SKEL_BLACKLIST), \ + $$(TRUNNER_BPF_SRCS))) +TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS) + +# Evaluate rules now with extra TRUNNER_XXX variables above already defined +$$(eval $$(call DEFINE_TEST_RUNNER_RULES,$1,$2)) + +endef + +# Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and +# set up by DEFINE_TEST_RUNNER itself, create test runner build rules with: +# $1 - test runner base binary name (e.g., test_progs) +# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc) +define DEFINE_TEST_RUNNER_RULES + +ifeq ($($(TRUNNER_OUTPUT)-dir),) +$(TRUNNER_OUTPUT)-dir := y +$(TRUNNER_OUTPUT): + $$(call msg,MKDIR,,$$@) + $(Q)mkdir -p $$@ endif -TEST_PROGS_CFLAGS := -I. -I$(OUTPUT) -TEST_MAPS_CFLAGS := -I. -I$(OUTPUT) -TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier +# ensure we set up BPF objects generation rule just once for a given +# input/output directory combination +ifeq ($($(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs),) +$(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs := y +$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \ + $(TRUNNER_BPF_PROGS_DIR)/%.c \ + $(TRUNNER_BPF_PROGS_DIR)/*.h \ + $$(INCLUDE_DIR)/vmlinux.h \ + $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT) + $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ + $(TRUNNER_BPF_CFLAGS), \ + $(TRUNNER_BPF_LDFLAGS)) -ifneq ($(SUBREG_CODEGEN),) -ALU32_BUILD_DIR = $(OUTPUT)/alu32 -TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32 -$(ALU32_BUILD_DIR): - mkdir -p $@ - -$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read | $(ALU32_BUILD_DIR) - cp $< $@ - -$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\ - $(ALU32_BUILD_DIR)/urandom_read \ - | $(ALU32_BUILD_DIR) - $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \ - -o $(ALU32_BUILD_DIR)/test_progs_32 \ - test_progs.c test_stub.c cgroup_helpers.c trace_helpers.c prog_tests/*.c \ - $(OUTPUT)/libbpf.a $(LDLIBS) - -$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H) -$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c - -$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR)/test_progs_32 \ - | $(ALU32_BUILD_DIR) - ($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -target bpf -emit-llvm \ - -c $< -o - || echo "clang failed") | \ - $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \ - -filetype=obj -o $@ -ifeq ($(DWARF2BTF),y) - $(BTF_PAHOLE) -J $@ -endif +$(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \ + $(TRUNNER_OUTPUT)/%.o \ + | $(BPFTOOL) $(TRUNNER_OUTPUT) + $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) + $(Q)$$(BPFTOOL) gen skeleton $$< > $$@ endif +# ensure we set up tests.h header generation rule just once +ifeq ($($(TRUNNER_TESTS_DIR)-tests-hdr),) +$(TRUNNER_TESTS_DIR)-tests-hdr := y +$(TRUNNER_TESTS_HDR): $(TRUNNER_TESTS_DIR)/*.c + $$(call msg,TEST-HDR,$(TRUNNER_BINARY),$$@) + $$(shell ( cd $(TRUNNER_TESTS_DIR); \ + echo '/* Generated header, do not edit */'; \ + ls *.c 2> /dev/null | \ + sed -e 's@\([^\.]*\)\.c@DEFINE_TEST(\1)@'; \ + ) > $$@) +endif + +# compile individual test files +# Note: we cd into output directory to ensure embedded BPF object is found +$(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \ + $(TRUNNER_TESTS_DIR)/%.c \ + $(TRUNNER_EXTRA_HDRS) \ + $(TRUNNER_BPF_OBJS) \ + $(TRUNNER_BPF_SKELS) \ + $$(BPFOBJ) | $(TRUNNER_OUTPUT) + $$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@) + $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F) + +$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \ + %.c \ + $(TRUNNER_EXTRA_HDRS) \ + $(TRUNNER_TESTS_HDR) \ + $$(BPFOBJ) | $(TRUNNER_OUTPUT) + $$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@) + $(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@ + +# only copy extra resources if in flavored build +$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT) +ifneq ($2,) + $$(call msg,EXT-COPY,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES)) + $(Q)cp -a $$^ $(TRUNNER_OUTPUT)/ +endif + +$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ + $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ + $(RESOLVE_BTFIDS) \ + | $(TRUNNER_BINARY)-extras + $$(call msg,BINARY,,$$@) + $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ + $(Q)$(RESOLVE_BTFIDS) --btf btf_data.o $$@ + +endef + +# Define test_progs test runner. +TRUNNER_TESTS_DIR := prog_tests +TRUNNER_BPF_PROGS_DIR := progs +TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ + network_helpers.c flow_dissector_load.h +TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \ + $(wildcard progs/btf_dump_test_case_*.c) +TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE +TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) +TRUNNER_BPF_LDFLAGS := -mattr=+alu32 +$(eval $(call DEFINE_TEST_RUNNER,test_progs)) + +# Define test_progs-no_alu32 test runner. +TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE +TRUNNER_BPF_LDFLAGS := +$(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32)) + +# Define test_progs BPF-GCC-flavored test runner. ifneq ($(BPF_GCC),) -GCC_SYS_INCLUDES = $(call get_sys_includes,gcc) -IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ - grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') -ifeq ($(IS_LITTLE_ENDIAN),) -MENDIAN=-mbig-endian -else -MENDIAN=-mlittle-endian -endif -BPF_GCC_CFLAGS = $(GCC_SYS_INCLUDES) $(MENDIAN) -BPF_GCC_BUILD_DIR = $(OUTPUT)/bpf_gcc -TEST_CUSTOM_PROGS += $(BPF_GCC_BUILD_DIR)/test_progs_bpf_gcc -$(BPF_GCC_BUILD_DIR): - mkdir -p $@ - -$(BPF_GCC_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read | $(BPF_GCC_BUILD_DIR) - cp $< $@ - -$(BPF_GCC_BUILD_DIR)/test_progs_bpf_gcc: $(OUTPUT)/test_progs \ - | $(BPF_GCC_BUILD_DIR) - cp $< $@ - -$(BPF_GCC_BUILD_DIR)/%.o: progs/%.c $(BPF_GCC_BUILD_DIR)/test_progs_bpf_gcc \ - | $(BPF_GCC_BUILD_DIR) - $(BPF_GCC) $(BPF_CFLAGS) $(BPF_GCC_CFLAGS) -O2 -c $< -o $@ +TRUNNER_BPF_BUILD_RULE := GCC_BPF_BUILD_RULE +TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc) +TRUNNER_BPF_LDFLAGS := +$(eval $(call DEFINE_TEST_RUNNER,test_progs,bpf_gcc)) endif -# Have one program compiled without "-target bpf" to test whether libbpf loads -# it successfully -$(OUTPUT)/test_xdp.o: progs/test_xdp.c - ($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -emit-llvm -c $< -o - || \ - echo "clang failed") | \ - $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@ -ifeq ($(DWARF2BTF),y) - $(BTF_PAHOLE) -J $@ -endif +# Define test_maps test runner. +TRUNNER_TESTS_DIR := map_tests +TRUNNER_BPF_PROGS_DIR := progs +TRUNNER_EXTRA_SOURCES := test_maps.c +TRUNNER_EXTRA_FILES := +TRUNNER_BPF_BUILD_RULE := $$(error no BPF objects should be built) +TRUNNER_BPF_CFLAGS := +TRUNNER_BPF_LDFLAGS := +$(eval $(call DEFINE_TEST_RUNNER,test_maps)) -$(OUTPUT)/%.o: progs/%.c - ($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -target bpf -emit-llvm \ - -c $< -o - || echo "clang failed") | \ - $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@ -ifeq ($(DWARF2BTF),y) - $(BTF_PAHOLE) -J $@ -endif - -PROG_TESTS_DIR = $(OUTPUT)/prog_tests -$(PROG_TESTS_DIR): - mkdir -p $@ -PROG_TESTS_H := $(PROG_TESTS_DIR)/tests.h -PROG_TESTS_FILES := $(wildcard prog_tests/*.c) -test_progs.c: $(PROG_TESTS_H) -$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS) -$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_FILES) | $(PROG_TESTS_H) -$(PROG_TESTS_H): $(PROG_TESTS_FILES) | $(PROG_TESTS_DIR) - $(shell ( cd prog_tests/; \ - echo '/* Generated header, do not edit */'; \ - ls *.c 2> /dev/null | \ - sed -e 's@\([^\.]*\)\.c@DEFINE_TEST(\1)@'; \ - ) > $(PROG_TESTS_H)) - -MAP_TESTS_DIR = $(OUTPUT)/map_tests -$(MAP_TESTS_DIR): - mkdir -p $@ -MAP_TESTS_H := $(MAP_TESTS_DIR)/tests.h -MAP_TESTS_FILES := $(wildcard map_tests/*.c) -test_maps.c: $(MAP_TESTS_H) -$(OUTPUT)/test_maps: CFLAGS += $(TEST_MAPS_CFLAGS) -$(OUTPUT)/test_maps: test_maps.c $(MAP_TESTS_FILES) | $(MAP_TESTS_H) -$(MAP_TESTS_H): $(MAP_TESTS_FILES) | $(MAP_TESTS_DIR) - $(shell ( cd map_tests/; \ - echo '/* Generated header, do not edit */'; \ - echo '#ifdef DECLARE'; \ - ls *.c 2> /dev/null | \ - sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \ - echo '#endif'; \ - echo '#ifdef CALL'; \ - ls *.c 2> /dev/null | \ - sed -e 's@\([^\.]*\)\.c@test_\1();@'; \ - echo '#endif' \ - ) > $(MAP_TESTS_H)) - -VERIFIER_TESTS_DIR = $(OUTPUT)/verifier -$(VERIFIER_TESTS_DIR): - mkdir -p $@ -VERIFIER_TESTS_H := $(VERIFIER_TESTS_DIR)/tests.h -VERIFIER_TEST_FILES := $(wildcard verifier/*.c) -test_verifier.c: $(VERIFIER_TESTS_H) -$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS) -$(OUTPUT)/test_verifier: test_verifier.c | $(VERIFIER_TEST_FILES) $(VERIFIER_TESTS_H) -$(VERIFIER_TESTS_H): $(VERIFIER_TEST_FILES) | $(VERIFIER_TESTS_DIR) +# Define test_verifier test runner. +# It is much simpler than test_maps/test_progs and sufficiently different from +# them (e.g., test.h is using completely pattern), that it's worth just +# explicitly defining all the rules explicitly. +verifier/tests.h: verifier/*.c $(shell ( cd verifier/; \ echo '/* Generated header, do not edit */'; \ echo '#ifdef FILL_ARRAY'; \ - ls *.c 2> /dev/null | \ - sed -e 's@\(.*\)@#include \"\1\"@'; \ + ls *.c 2> /dev/null | sed -e 's@\(.*\)@#include \"\1\"@'; \ echo '#endif' \ - ) > $(VERIFIER_TESTS_H)) + ) > verifier/tests.h) +$(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT) + $(call msg,BINARY,,$@) + $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@ -EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) $(BPF_GCC_BUILD_DIR) \ - $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \ - feature +EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) \ + prog_tests/tests.h map_tests/tests.h verifier/tests.h \ + feature \ + $(addprefix $(OUTPUT)/,*.o *.skel.h no_alu32 bpf_gcc) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 9f77cbaac01c..9385232e2426 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -233,6 +233,8 @@ static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal; static long long (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *ip, int ip_len, void *tcp, int tcp_len) = (void *) BPF_FUNC_tcp_gen_syncookie; +static long (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = + (void *)BPF_FUNC_tcp_send_ack; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions @@ -498,20 +500,6 @@ struct pt_regs; #endif -#if defined(bpf_target_powerpc) -#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) -#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP -#elif defined(bpf_target_sparc) -#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) -#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP -#else -#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \ - bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) -#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \ - bpf_probe_read(&(ip), sizeof(ip), \ - (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) -#endif - /* * BPF_CORE_READ abstracts away bpf_probe_read() call and captures offset * relocation for source address using __builtin_preserve_access_index() @@ -532,4 +520,17 @@ struct pt_regs; bpf_probe_read((dst), sizeof(*(src)), \ __builtin_preserve_access_index(src)) +/* The following types should be used by BPF_PROG_TYPE_TRACING program to + * access kernel function arguments. BPF trampoline and raw tracepoints + * typecast arguments to 'unsigned long long'. + */ +typedef int __attribute__((aligned(8))) ks32; +typedef char __attribute__((aligned(8))) ks8; +typedef short __attribute__((aligned(8))) ks16; +typedef long long __attribute__((aligned(8))) ks64; +typedef unsigned int __attribute__((aligned(8))) ku32; +typedef unsigned char __attribute__((aligned(8))) ku8; +typedef unsigned short __attribute__((aligned(8))) ku16; +typedef unsigned long long __attribute__((aligned(8))) ku64; + #endif diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h new file mode 100644 index 000000000000..4b300895445f --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BPF_TCP_HELPERS_H +#define __BPF_TCP_HELPERS_H + +#include <stdbool.h> +#include <linux/types.h> +#include <bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_trace_helpers.h" + +/* "struct_ops/" is only a convention. not a requirement. */ +#define BPF_TCP_OPS_0(fname, ret_type, ...) BPF_TRACE_x(0, "struct_ops/"#fname, fname, ret_type, __VA_ARGS__) +#define BPF_TCP_OPS_1(fname, ret_type, ...) BPF_TRACE_x(1, "struct_ops/"#fname, fname, ret_type, __VA_ARGS__) +#define BPF_TCP_OPS_2(fname, ret_type, ...) BPF_TRACE_x(2, "struct_ops/"#fname, fname, ret_type, __VA_ARGS__) +#define BPF_TCP_OPS_3(fname, ret_type, ...) BPF_TRACE_x(3, "struct_ops/"#fname, fname, ret_type, __VA_ARGS__) +#define BPF_TCP_OPS_4(fname, ret_type, ...) BPF_TRACE_x(4, "struct_ops/"#fname, fname, ret_type, __VA_ARGS__) +#define BPF_TCP_OPS_5(fname, ret_type, ...) BPF_TRACE_x(5, "struct_ops/"#fname, fname, ret_type, __VA_ARGS__) + +struct sock_common { + unsigned char skc_state; +} __attribute__((preserve_access_index)); + +struct sock { + struct sock_common __sk_common; +} __attribute__((preserve_access_index)); + +struct inet_sock { + struct sock sk; +} __attribute__((preserve_access_index)); + +struct inet_connection_sock { + struct inet_sock icsk_inet; + __u8 icsk_ca_state:6, + icsk_ca_setsockopt:1, + icsk_ca_dst_locked:1; + struct { + __u8 pending; + } icsk_ack; + __u64 icsk_ca_priv[104 / sizeof(__u64)]; +} __attribute__((preserve_access_index)); + +struct tcp_sock { + struct inet_connection_sock inet_conn; + + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u8 ecn_flags; + __u32 delivered; + __u32 delivered_ce; + __u32 snd_cwnd; + __u32 snd_cwnd_cnt; + __u32 snd_cwnd_clamp; + __u32 snd_ssthresh; + __u8 syn_data:1, /* SYN includes data */ + syn_fastopen:1, /* SYN includes Fast Open option */ + syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ + syn_fastopen_ch:1, /* Active TFO re-enabling probe */ + syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ + save_syn:1, /* Save headers of SYN packet */ + is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ + syn_smc:1; /* SYN includes SMC */ + __u32 max_packets_out; + __u32 lsndtime; + __u32 prior_cwnd; +} __attribute__((preserve_access_index)); + +static __always_inline struct inet_connection_sock *inet_csk(const struct sock *sk) +{ + return (struct inet_connection_sock *)sk; +} + +static __always_inline void *inet_csk_ca(const struct sock *sk) +{ + return (void *)inet_csk(sk)->icsk_ca_priv; +} + +static __always_inline struct tcp_sock *tcp_sk(const struct sock *sk) +{ + return (struct tcp_sock *)sk; +} + +static __always_inline bool before(__u32 seq1, __u32 seq2) +{ + return (__s32)(seq1-seq2) < 0; +} +#define after(seq2, seq1) before(seq1, seq2) + +#define TCP_ECN_OK 1 +#define TCP_ECN_QUEUE_CWR 2 +#define TCP_ECN_DEMAND_CWR 4 +#define TCP_ECN_SEEN 8 + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */ +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4 +}; + +struct ack_sample { + __u32 pkts_acked; + __s32 rtt_us; + __u32 in_flight; +} __attribute__((preserve_access_index)); + +struct rate_sample { + __u64 prior_mstamp; /* starting timestamp for interval */ + __u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ + __s32 delivered; /* number of packets delivered over interval */ + long interval_us; /* time for tp->delivered to incr "delivered" */ + __u32 snd_interval_us; /* snd interval for delivered packets */ + __u32 rcv_interval_us; /* rcv interval for delivered packets */ + long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ + int losses; /* number of packets marked lost upon ACK */ + __u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ + __u32 prior_in_flight; /* in flight before this ACK */ + bool is_app_limited; /* is sample from packet with bubble in pipe? */ + bool is_retrans; /* is sample from retransmission? */ + bool is_ack_delayed; /* is this (likely) a delayed ACK? */ +} __attribute__((preserve_access_index)); + +#define TCP_CA_NAME_MAX 16 +#define TCP_CONG_NEEDS_ECN 0x2 + +struct tcp_congestion_ops { + char name[TCP_CA_NAME_MAX]; + __u32 flags; + + /* initialize private data (optional) */ + void (*init)(struct sock *sk); + /* cleanup private data (optional) */ + void (*release)(struct sock *sk); + + /* return slow start threshold (required) */ + __u32 (*ssthresh)(struct sock *sk); + /* do new cwnd calculation (required) */ + void (*cong_avoid)(struct sock *sk, __u32 ack, __u32 acked); + /* call before changing ca_state (optional) */ + void (*set_state)(struct sock *sk, __u8 new_state); + /* call when cwnd event occurs (optional) */ + void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); + /* call when ack arrives (optional) */ + void (*in_ack_event)(struct sock *sk, __u32 flags); + /* new value of cwnd after loss (required) */ + __u32 (*undo_cwnd)(struct sock *sk); + /* hook for packet ack accounting (optional) */ + void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); + /* override sysctl_tcp_min_tso_segs */ + __u32 (*min_tso_segs)(struct sock *sk); + /* returns the multiplier used in tcp_sndbuf_expand (optional) */ + __u32 (*sndbuf_expand)(struct sock *sk); + /* call when packets are delivered to update cwnd and pacing rate, + * after all the ca_state processing. (optional) + */ + void (*cong_control)(struct sock *sk, const struct rate_sample *rs); +}; + +#define min(a, b) ((a) < (b) ? (a) : (b)) +#define max(a, b) ((a) > (b) ? (a) : (b)) +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +static __always_inline __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) +{ + __u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); + + acked -= cwnd - tp->snd_cwnd; + tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); + + return acked; +} + +static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp) +{ + return tp->snd_cwnd < tp->snd_ssthresh; +} + +static __always_inline bool tcp_is_cwnd_limited(const struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + + /* If in slow start, ensure cwnd grows to twice what was ACKed. */ + if (tcp_in_slow_start(tp)) + return tp->snd_cwnd < 2 * tp->max_packets_out; + + return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited); +} + +static __always_inline void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) +{ + /* If credits accumulated at a higher w, apply them gently now. */ + if (tp->snd_cwnd_cnt >= w) { + tp->snd_cwnd_cnt = 0; + tp->snd_cwnd++; + } + + tp->snd_cwnd_cnt += acked; + if (tp->snd_cwnd_cnt >= w) { + __u32 delta = tp->snd_cwnd_cnt / w; + + tp->snd_cwnd_cnt -= delta * w; + tp->snd_cwnd += delta; + } + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); +} + +#endif diff --git a/tools/testing/selftests/bpf/bpf_trace_helpers.h b/tools/testing/selftests/bpf/bpf_trace_helpers.h new file mode 100644 index 000000000000..c76a214a53b0 --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_trace_helpers.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BPF_TRACE_HELPERS_H +#define __BPF_TRACE_HELPERS_H + +#include "bpf_helpers.h" + +#define __BPF_MAP_0(i, m, v, ...) v +#define __BPF_MAP_1(i, m, v, t, a, ...) m(t, a, ctx[i]) +#define __BPF_MAP_2(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_1(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_3(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_2(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_4(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_3(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_5(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_4(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_6(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_5(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_7(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_6(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_8(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_7(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_9(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_8(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_10(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_9(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_11(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_10(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP_12(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_11(i+1, m, v, __VA_ARGS__) +#define __BPF_MAP(n, ...) __BPF_MAP_##n(0, __VA_ARGS__) + +/* BPF sizeof(void *) is always 8, so no need to cast to long first + * for ptr to avoid compiler warning. + */ +#define __BPF_CAST(t, a, ctx) (t) ctx +#define __BPF_V void +#define __BPF_N + +#define __BPF_DECL_ARGS(t, a, ctx) t a + +#define BPF_TRACE_x(x, sec_name, fname, ret_type, ...) \ +static __always_inline ret_type \ +____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ + \ +SEC(sec_name) \ +ret_type fname(__u64 *ctx) \ +{ \ + return ____##fname(__BPF_MAP(x, __BPF_CAST, __BPF_N, __VA_ARGS__));\ +} \ + \ +static __always_inline \ +ret_type ____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) + +#define BPF_TRACE_0(sec, fname, ...) BPF_TRACE_x(0, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_1(sec, fname, ...) BPF_TRACE_x(1, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_2(sec, fname, ...) BPF_TRACE_x(2, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_3(sec, fname, ...) BPF_TRACE_x(3, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_4(sec, fname, ...) BPF_TRACE_x(4, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_5(sec, fname, ...) BPF_TRACE_x(5, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_6(sec, fname, ...) BPF_TRACE_x(6, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_7(sec, fname, ...) BPF_TRACE_x(7, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_8(sec, fname, ...) BPF_TRACE_x(8, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_9(sec, fname, ...) BPF_TRACE_x(9, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_10(sec, fname, ...) BPF_TRACE_x(10, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_11(sec, fname, ...) BPF_TRACE_x(11, sec, fname, int, __VA_ARGS__) +#define BPF_TRACE_12(sec, fname, ...) BPF_TRACE_x(12, sec, fname, int, __VA_ARGS__) + +#endif diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 5dc109f4c097..48e058552eb7 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -25,6 +25,7 @@ CONFIG_XDP_SOCKETS=y CONFIG_FTRACE_SYSCALLS=y CONFIG_IPV6_TUNNEL=y CONFIG_IPV6_GRE=y +CONFIG_IPV6_SEG6_BPF=y CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_IPV6_FOU=m @@ -35,3 +36,5 @@ CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_IPV6_SIT=m CONFIG_BPF_JIT=y +CONFIG_BPF_LSM=y +CONFIG_SECURITY=y diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c new file mode 100644 index 000000000000..f56655690f9b --- /dev/null +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <errno.h> +#include <stdbool.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> + +#include <arpa/inet.h> + +#include <linux/err.h> +#include <linux/in.h> +#include <linux/in6.h> + +#include "bpf_util.h" +#include "network_helpers.h" + +#define clean_errno() (errno == 0 ? "None" : strerror(errno)) +#define log_err(MSG, ...) ({ \ + int __save = errno; \ + fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, clean_errno(), \ + ##__VA_ARGS__); \ + errno = __save; \ +}) + +struct ipv4_packet pkt_v4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.urg_ptr = 123, + .tcp.doff = 5, +}; + +struct ipv6_packet pkt_v6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.urg_ptr = 123, + .tcp.doff = 5, +}; + +static int settimeo(int fd, int timeout_ms) +{ + struct timeval timeout = { .tv_sec = 3 }; + + if (timeout_ms > 0) { + timeout.tv_sec = timeout_ms / 1000; + timeout.tv_usec = (timeout_ms % 1000) * 1000; + } + + if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, + sizeof(timeout))) { + log_err("Failed to set SO_RCVTIMEO"); + return -1; + } + + if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout, + sizeof(timeout))) { + log_err("Failed to set SO_SNDTIMEO"); + return -1; + } + + return 0; +} + +#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; }) + +int start_server(int family, int type, const char *addr_str, __u16 port, + int timeout_ms) +{ + struct sockaddr_storage addr = {}; + socklen_t len; + int fd; + + if (make_sockaddr(family, addr_str, port, &addr, &len)) + return -1; + + fd = socket(family, type, 0); + if (fd < 0) { + log_err("Failed to create server socket"); + return -1; + } + + if (settimeo(fd, timeout_ms)) + goto error_close; + + if (bind(fd, (const struct sockaddr *)&addr, len) < 0) { + log_err("Failed to bind socket"); + goto error_close; + } + + if (type == SOCK_STREAM) { + if (listen(fd, 1) < 0) { + log_err("Failed to listed on socket"); + goto error_close; + } + } + + return fd; + +error_close: + save_errno_close(fd); + return -1; +} + +static int connect_fd_to_addr(int fd, + const struct sockaddr_storage *addr, + socklen_t addrlen) +{ + if (connect(fd, (const struct sockaddr *)addr, addrlen)) { + log_err("Failed to connect to server"); + return -1; + } + + return 0; +} + +int connect_to_fd(int server_fd, int timeout_ms) +{ + struct sockaddr_storage addr; + struct sockaddr_in *addr_in; + socklen_t addrlen, optlen; + int fd, type; + + optlen = sizeof(type); + if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { + log_err("getsockopt(SOL_TYPE)"); + return -1; + } + + addrlen = sizeof(addr); + if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) { + log_err("Failed to get server addr"); + return -1; + } + + addr_in = (struct sockaddr_in *)&addr; + fd = socket(addr_in->sin_family, type, 0); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } + + if (settimeo(fd, timeout_ms)) + goto error_close; + + if (connect_fd_to_addr(fd, &addr, addrlen)) + goto error_close; + + return fd; + +error_close: + save_errno_close(fd); + return -1; +} + +int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + + if (settimeo(client_fd, timeout_ms)) + return -1; + + if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + return -1; + } + + if (connect_fd_to_addr(client_fd, &addr, len)) + return -1; + + return 0; +} + +int make_sockaddr(int family, const char *addr_str, __u16 port, + struct sockaddr_storage *addr, socklen_t *len) +{ + if (family == AF_INET) { + struct sockaddr_in *sin = (void *)addr; + + sin->sin_family = AF_INET; + sin->sin_port = htons(port); + if (addr_str && + inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) { + log_err("inet_pton(AF_INET, %s)", addr_str); + return -1; + } + if (len) + *len = sizeof(*sin); + return 0; + } else if (family == AF_INET6) { + struct sockaddr_in6 *sin6 = (void *)addr; + + sin6->sin6_family = AF_INET6; + sin6->sin6_port = htons(port); + if (addr_str && + inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) { + log_err("inet_pton(AF_INET6, %s)", addr_str); + return -1; + } + if (len) + *len = sizeof(*sin6); + return 0; + } + return -1; +} diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h new file mode 100644 index 000000000000..c3728f6667e4 --- /dev/null +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NETWORK_HELPERS_H +#define __NETWORK_HELPERS_H +#include <sys/socket.h> +#include <sys/types.h> +#include <linux/types.h> +typedef __u16 __sum16; +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <netinet/tcp.h> +#include <bpf/bpf_endian.h> + +#define MAGIC_VAL 0x1234 +#define NUM_ITER 100000 +#define VIP_NUM 5 +#define MAGIC_BYTES 123 + +/* ipv4 test vector */ +struct ipv4_packet { + struct ethhdr eth; + struct iphdr iph; + struct tcphdr tcp; +} __packed; +extern struct ipv4_packet pkt_v4; + +/* ipv6 test vector */ +struct ipv6_packet { + struct ethhdr eth; + struct ipv6hdr iph; + struct tcphdr tcp; +} __packed; +extern struct ipv6_packet pkt_v6; + +int start_server(int family, int type, const char *addr, __u16 port, + int timeout_ms); +int connect_to_fd(int server_fd, int timeout_ms); +int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms); +int make_sockaddr(int family, const char *addr_str, __u16 port, + struct sockaddr_storage *addr, socklen_t *len); + +#endif diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index fad615c22e4d..af967ebd60f7 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -24,8 +24,8 @@ ssize_t get_base_addr() { void test_attach_probe(void) { - const char *kprobe_name = "kprobe/sys_nanosleep"; - const char *kretprobe_name = "kretprobe/sys_nanosleep"; + const char *kprobe_name = "kprobe/hrtimer_nanosleep"; + const char *kretprobe_name = "kretprobe/hrtimer_nanosleep"; const char *uprobe_name = "uprobe/trigger_func"; const char *uretprobe_name = "uretprobe/trigger_func"; const int kprobe_idx = 0, kretprobe_idx = 1; @@ -79,7 +79,7 @@ void test_attach_probe(void) kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); + "hrtimer_nanosleep"); if (CHECK(IS_ERR(kprobe_link), "attach_kprobe", "err %ld\n", PTR_ERR(kprobe_link))) { kprobe_link = NULL; @@ -87,7 +87,7 @@ void test_attach_probe(void) } kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); + "hrtimer_nanosleep"); if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe", "err %ld\n", PTR_ERR(kretprobe_link))) { kretprobe_link = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c new file mode 100644 index 000000000000..efcd82d4f150 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -0,0 +1,930 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include "bpf_iter_ipv6_route.skel.h" +#include "bpf_iter_netlink.skel.h" +#include "bpf_iter_bpf_map.skel.h" +#include "bpf_iter_task.skel.h" +#include "bpf_iter_task_stack.skel.h" +#include "bpf_iter_task_file.skel.h" +#include "bpf_iter_tcp4.skel.h" +#include "bpf_iter_tcp6.skel.h" +#include "bpf_iter_udp4.skel.h" +#include "bpf_iter_udp6.skel.h" +#include "bpf_iter_test_kern1.skel.h" +#include "bpf_iter_test_kern2.skel.h" +#include "bpf_iter_test_kern3.skel.h" +#include "bpf_iter_test_kern4.skel.h" +#include "bpf_iter_bpf_hash_map.skel.h" +#include "bpf_iter_bpf_percpu_hash_map.skel.h" +#include "bpf_iter_bpf_array_map.skel.h" +#include "bpf_iter_bpf_percpu_array_map.skel.h" +#include "bpf_iter_bpf_sk_storage_map.skel.h" + +static int duration; + +static void test_btf_id_or_null(void) +{ + struct bpf_iter_test_kern3 *skel; + + skel = bpf_iter_test_kern3__open_and_load(); + if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", + "skeleton open_and_load unexpectedly succeeded\n")) { + bpf_iter_test_kern3__destroy(skel); + return; + } +} + +static void do_dummy_read(struct bpf_program *prog) +{ + struct bpf_link *link; + char buf[16] = {}; + int iter_fd, len; + + link = bpf_program__attach_iter(prog, NULL); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* not check contents, but ensure read() ends without error */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); + + close(iter_fd); + +free_link: + bpf_link__destroy(link); +} + +static void test_ipv6_route(void) +{ + struct bpf_iter_ipv6_route *skel; + + skel = bpf_iter_ipv6_route__open_and_load(); + if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_ipv6_route); + + bpf_iter_ipv6_route__destroy(skel); +} + +static void test_netlink(void) +{ + struct bpf_iter_netlink *skel; + + skel = bpf_iter_netlink__open_and_load(); + if (CHECK(!skel, "bpf_iter_netlink__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_netlink); + + bpf_iter_netlink__destroy(skel); +} + +static void test_bpf_map(void) +{ + struct bpf_iter_bpf_map *skel; + + skel = bpf_iter_bpf_map__open_and_load(); + if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_bpf_map); + + bpf_iter_bpf_map__destroy(skel); +} + +static void test_task(void) +{ + struct bpf_iter_task *skel; + + skel = bpf_iter_task__open_and_load(); + if (CHECK(!skel, "bpf_iter_task__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_task); + + bpf_iter_task__destroy(skel); +} + +static void test_task_stack(void) +{ + struct bpf_iter_task_stack *skel; + + skel = bpf_iter_task_stack__open_and_load(); + if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_task_stack); + + bpf_iter_task_stack__destroy(skel); +} + +static void test_task_file(void) +{ + struct bpf_iter_task_file *skel; + + skel = bpf_iter_task_file__open_and_load(); + if (CHECK(!skel, "bpf_iter_task_file__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_task_file); + + bpf_iter_task_file__destroy(skel); +} + +static void test_tcp4(void) +{ + struct bpf_iter_tcp4 *skel; + + skel = bpf_iter_tcp4__open_and_load(); + if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_tcp4); + + bpf_iter_tcp4__destroy(skel); +} + +static void test_tcp6(void) +{ + struct bpf_iter_tcp6 *skel; + + skel = bpf_iter_tcp6__open_and_load(); + if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_tcp6); + + bpf_iter_tcp6__destroy(skel); +} + +static void test_udp4(void) +{ + struct bpf_iter_udp4 *skel; + + skel = bpf_iter_udp4__open_and_load(); + if (CHECK(!skel, "bpf_iter_udp4__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_udp4); + + bpf_iter_udp4__destroy(skel); +} + +static void test_udp6(void) +{ + struct bpf_iter_udp6 *skel; + + skel = bpf_iter_udp6__open_and_load(); + if (CHECK(!skel, "bpf_iter_udp6__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_udp6); + + bpf_iter_udp6__destroy(skel); +} + +/* The expected string is less than 16 bytes */ +static int do_read_with_fd(int iter_fd, const char *expected, + bool read_one_char) +{ + int err = -1, len, read_buf_len, start; + char buf[16] = {}; + + read_buf_len = read_one_char ? 1 : 16; + start = 0; + while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { + start += len; + if (CHECK(start >= 16, "read", "read len %d\n", len)) + return -1; + read_buf_len = read_one_char ? 1 : 16 - start; + } + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + return -1; + + err = strcmp(buf, expected); + if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", + buf, expected)) + return -1; + + return 0; +} + +static void test_anon_iter(bool read_one_char) +{ + struct bpf_iter_test_kern1 *skel; + struct bpf_link *link; + int iter_fd, err; + + skel = bpf_iter_test_kern1__open_and_load(); + if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", + "skeleton open_and_load failed\n")) + return; + + err = bpf_iter_test_kern1__attach(skel); + if (CHECK(err, "bpf_iter_test_kern1__attach", + "skeleton attach failed\n")) { + goto out; + } + + link = skel->links.dump_task; + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto out; + + do_read_with_fd(iter_fd, "abcd", read_one_char); + close(iter_fd); + +out: + bpf_iter_test_kern1__destroy(skel); +} + +static int do_read(const char *path, const char *expected) +{ + int err, iter_fd; + + iter_fd = open(path, O_RDONLY); + if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", + path, strerror(errno))) + return -1; + + err = do_read_with_fd(iter_fd, expected, false); + close(iter_fd); + return err; +} + +static void test_file_iter(void) +{ + const char *path = "/sys/fs/bpf/bpf_iter_test1"; + struct bpf_iter_test_kern1 *skel1; + struct bpf_iter_test_kern2 *skel2; + struct bpf_link *link; + int err; + + skel1 = bpf_iter_test_kern1__open_and_load(); + if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", + "skeleton open_and_load failed\n")) + return; + + link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + goto out; + + /* unlink this path if it exists. */ + unlink(path); + + err = bpf_link__pin(link, path); + if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) + goto free_link; + + err = do_read(path, "abcd"); + if (err) + goto unlink_path; + + /* file based iterator seems working fine. Let us a link update + * of the underlying link and `cat` the iterator again, its content + * should change. + */ + skel2 = bpf_iter_test_kern2__open_and_load(); + if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", + "skeleton open_and_load failed\n")) + goto unlink_path; + + err = bpf_link__update_program(link, skel2->progs.dump_task); + if (CHECK(err, "update_prog", "update_prog failed\n")) + goto destroy_skel2; + + do_read(path, "ABCD"); + +destroy_skel2: + bpf_iter_test_kern2__destroy(skel2); +unlink_path: + unlink(path); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_test_kern1__destroy(skel1); +} + +static void test_overflow(bool test_e2big_overflow, bool ret1) +{ + __u32 map_info_len, total_read_len, expected_read_len; + int err, iter_fd, map1_fd, map2_fd, len; + struct bpf_map_info map_info = {}; + struct bpf_iter_test_kern4 *skel; + struct bpf_link *link; + __u32 page_size; + char *buf; + + skel = bpf_iter_test_kern4__open(); + if (CHECK(!skel, "bpf_iter_test_kern4__open", + "skeleton open failed\n")) + return; + + /* create two maps: bpf program will only do bpf_seq_write + * for these two maps. The goal is one map output almost + * fills seq_file buffer and then the other will trigger + * overflow and needs restart. + */ + map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); + if (CHECK(map1_fd < 0, "bpf_create_map", + "map_creation failed: %s\n", strerror(errno))) + goto out; + map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); + if (CHECK(map2_fd < 0, "bpf_create_map", + "map_creation failed: %s\n", strerror(errno))) + goto free_map1; + + /* bpf_seq_printf kernel buffer is one page, so one map + * bpf_seq_write will mostly fill it, and the other map + * will partially fill and then trigger overflow and need + * bpf_seq_read restart. + */ + page_size = sysconf(_SC_PAGE_SIZE); + + if (test_e2big_overflow) { + skel->rodata->print_len = (page_size + 8) / 8; + expected_read_len = 2 * (page_size + 8); + } else if (!ret1) { + skel->rodata->print_len = (page_size - 8) / 8; + expected_read_len = 2 * (page_size - 8); + } else { + skel->rodata->print_len = 1; + expected_read_len = 2 * 8; + } + skel->rodata->ret1 = ret1; + + if (CHECK(bpf_iter_test_kern4__load(skel), + "bpf_iter_test_kern4__load", "skeleton load failed\n")) + goto free_map2; + + /* setup filtering map_id in bpf program */ + map_info_len = sizeof(map_info); + err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); + if (CHECK(err, "get_map_info", "get map info failed: %s\n", + strerror(errno))) + goto free_map2; + skel->bss->map1_id = map_info.id; + + err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); + if (CHECK(err, "get_map_info", "get map info failed: %s\n", + strerror(errno))) + goto free_map2; + skel->bss->map2_id = map_info.id; + + link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + goto free_map2; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + buf = malloc(expected_read_len); + if (!buf) + goto close_iter; + + /* do read */ + total_read_len = 0; + if (test_e2big_overflow) { + while ((len = read(iter_fd, buf, expected_read_len)) > 0) + total_read_len += len; + + CHECK(len != -1 || errno != E2BIG, "read", + "expected ret -1, errno E2BIG, but get ret %d, error %s\n", + len, strerror(errno)); + goto free_buf; + } else if (!ret1) { + while ((len = read(iter_fd, buf, expected_read_len)) > 0) + total_read_len += len; + + if (CHECK(len < 0, "read", "read failed: %s\n", + strerror(errno))) + goto free_buf; + } else { + do { + len = read(iter_fd, buf, expected_read_len); + if (len > 0) + total_read_len += len; + } while (len > 0 || len == -EAGAIN); + + if (CHECK(len < 0, "read", "read failed: %s\n", + strerror(errno))) + goto free_buf; + } + + if (CHECK(total_read_len != expected_read_len, "read", + "total len %u, expected len %u\n", total_read_len, + expected_read_len)) + goto free_buf; + + if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", + "expected 1 actual %d\n", skel->bss->map1_accessed)) + goto free_buf; + + if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", + "expected 2 actual %d\n", skel->bss->map2_accessed)) + goto free_buf; + + CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, + "map2_seqnum", "two different seqnum %lld %lld\n", + skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); + +free_buf: + free(buf); +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +free_map2: + close(map2_fd); +free_map1: + close(map1_fd); +out: + bpf_iter_test_kern4__destroy(skel); +} + +static void test_bpf_hash_map(void) +{ + __u32 expected_key_a = 0, expected_key_b = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_hash_map *skel; + int err, i, len, map_fd, iter_fd; + union bpf_iter_link_info linfo; + __u64 val, expected_val = 0; + struct bpf_link *link; + struct key_t { + int a; + int b; + int c; + } key; + char buf[64]; + + skel = bpf_iter_bpf_hash_map__open(); + if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", + "skeleton open failed\n")) + return; + + skel->bss->in_test_mode = true; + + err = bpf_iter_bpf_hash_map__load(skel); + if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", + "skeleton load failed\n")) + goto out; + + /* iterator with hashmap2 and hashmap3 should fail */ + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (!ASSERT_ERR_PTR(link, "attach_iter")) + goto out; + + linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (!ASSERT_ERR_PTR(link, "attach_iter")) + goto out; + + /* hashmap1 should be good, update map values here */ + map_fd = bpf_map__fd(skel->maps.hashmap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { + key.a = i + 1; + key.b = i + 2; + key.c = i + 3; + val = i + 4; + expected_key_a += key.a; + expected_key_b += key.b; + expected_val += val; + + err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + linfo.map.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->key_sum_a != expected_key_a, + "key_sum_a", "got %u expected %u\n", + skel->bss->key_sum_a, expected_key_a)) + goto close_iter; + if (CHECK(skel->bss->key_sum_b != expected_key_b, + "key_sum_b", "got %u expected %u\n", + skel->bss->key_sum_b, expected_key_b)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %llu expected %llu\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_hash_map__destroy(skel); +} + +static void test_bpf_percpu_hash_map(void) +{ + __u32 expected_key_a = 0, expected_key_b = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_percpu_hash_map *skel; + int err, i, j, len, map_fd, iter_fd; + union bpf_iter_link_info linfo; + __u32 expected_val = 0; + struct bpf_link *link; + struct key_t { + int a; + int b; + int c; + } key; + char buf[64]; + void *val; + + skel = bpf_iter_bpf_percpu_hash_map__open(); + if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", + "skeleton open failed\n")) + return; + + skel->rodata->num_cpus = bpf_num_possible_cpus(); + val = malloc(8 * bpf_num_possible_cpus()); + + err = bpf_iter_bpf_percpu_hash_map__load(skel); + if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", + "skeleton load failed\n")) + goto out; + + /* update map values here */ + map_fd = bpf_map__fd(skel->maps.hashmap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { + key.a = i + 1; + key.b = i + 2; + key.c = i + 3; + expected_key_a += key.a; + expected_key_b += key.b; + + for (j = 0; j < bpf_num_possible_cpus(); j++) { + *(__u32 *)(val + j * 8) = i + j; + expected_val += i + j; + } + + err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->key_sum_a != expected_key_a, + "key_sum_a", "got %u expected %u\n", + skel->bss->key_sum_a, expected_key_a)) + goto close_iter; + if (CHECK(skel->bss->key_sum_b != expected_key_b, + "key_sum_b", "got %u expected %u\n", + skel->bss->key_sum_b, expected_key_b)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %u expected %u\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_percpu_hash_map__destroy(skel); + free(val); +} + +static void test_bpf_array_map(void) +{ + __u64 val, expected_val = 0, res_first_val, first_val = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + __u32 expected_key = 0, res_first_key; + struct bpf_iter_bpf_array_map *skel; + union bpf_iter_link_info linfo; + int err, i, map_fd, iter_fd; + struct bpf_link *link; + char buf[64] = {}; + int len, start; + + skel = bpf_iter_bpf_array_map__open_and_load(); + if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", + "skeleton open_and_load failed\n")) + return; + + map_fd = bpf_map__fd(skel->maps.arraymap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + val = i + 4; + expected_key += i; + expected_val += val; + + if (i == 0) + first_val = val; + + err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + start = 0; + while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) + start += len; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + res_first_key = *(__u32 *)buf; + res_first_val = *(__u64 *)(buf + sizeof(__u32)); + if (CHECK(res_first_key != 0 || res_first_val != first_val, + "bpf_seq_write", + "seq_write failure: first key %u vs expected 0, " + " first value %llu vs expected %llu\n", + res_first_key, res_first_val, first_val)) + goto close_iter; + + if (CHECK(skel->bss->key_sum != expected_key, + "key_sum", "got %u expected %u\n", + skel->bss->key_sum, expected_key)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %llu expected %llu\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + err = bpf_map_lookup_elem(map_fd, &i, &val); + if (CHECK(err, "map_lookup", "map_lookup failed\n")) + goto out; + if (CHECK(i != val, "invalid_val", + "got value %llu expected %u\n", val, i)) + goto out; + } + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_array_map__destroy(skel); +} + +static void test_bpf_percpu_array_map(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_percpu_array_map *skel; + __u32 expected_key = 0, expected_val = 0; + union bpf_iter_link_info linfo; + int err, i, j, map_fd, iter_fd; + struct bpf_link *link; + char buf[64]; + void *val; + int len; + + skel = bpf_iter_bpf_percpu_array_map__open(); + if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", + "skeleton open failed\n")) + return; + + skel->rodata->num_cpus = bpf_num_possible_cpus(); + val = malloc(8 * bpf_num_possible_cpus()); + + err = bpf_iter_bpf_percpu_array_map__load(skel); + if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", + "skeleton load failed\n")) + goto out; + + /* update map values here */ + map_fd = bpf_map__fd(skel->maps.arraymap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + expected_key += i; + + for (j = 0; j < bpf_num_possible_cpus(); j++) { + *(__u32 *)(val + j * 8) = i + j; + expected_val += i + j; + } + + err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->key_sum != expected_key, + "key_sum", "got %u expected %u\n", + skel->bss->key_sum, expected_key)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %u expected %u\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_percpu_array_map__destroy(skel); + free(val); +} + +static void test_bpf_sk_storage_map(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + int err, i, len, map_fd, iter_fd, num_sockets; + struct bpf_iter_bpf_sk_storage_map *skel; + union bpf_iter_link_info linfo; + int sock_fd[3] = {-1, -1, -1}; + __u32 val, expected_val = 0; + struct bpf_link *link; + char buf[64]; + + skel = bpf_iter_bpf_sk_storage_map__open_and_load(); + if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", + "skeleton open_and_load failed\n")) + return; + + map_fd = bpf_map__fd(skel->maps.sk_stg_map); + num_sockets = ARRAY_SIZE(sock_fd); + for (i = 0; i < num_sockets; i++) { + sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) + goto out; + + val = i + 1; + expected_val += val; + + err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, + BPF_NOEXIST); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->ipv6_sk_count != num_sockets, + "ipv6_sk_count", "got %u expected %u\n", + skel->bss->ipv6_sk_count, num_sockets)) + goto close_iter; + + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %u expected %u\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + for (i = 0; i < num_sockets; i++) { + if (sock_fd[i] >= 0) + close(sock_fd[i]); + } + bpf_iter_bpf_sk_storage_map__destroy(skel); +} + +void test_bpf_iter(void) +{ + if (test__start_subtest("btf_id_or_null")) + test_btf_id_or_null(); + if (test__start_subtest("ipv6_route")) + test_ipv6_route(); + if (test__start_subtest("netlink")) + test_netlink(); + if (test__start_subtest("bpf_map")) + test_bpf_map(); + if (test__start_subtest("task")) + test_task(); + if (test__start_subtest("task_stack")) + test_task_stack(); + if (test__start_subtest("task_file")) + test_task_file(); + if (test__start_subtest("tcp4")) + test_tcp4(); + if (test__start_subtest("tcp6")) + test_tcp6(); + if (test__start_subtest("udp4")) + test_udp4(); + if (test__start_subtest("udp6")) + test_udp6(); + if (test__start_subtest("anon")) + test_anon_iter(false); + if (test__start_subtest("anon-read-one-char")) + test_anon_iter(true); + if (test__start_subtest("file")) + test_file_iter(); + if (test__start_subtest("overflow")) + test_overflow(false, false); + if (test__start_subtest("overflow-e2big")) + test_overflow(true, false); + if (test__start_subtest("prog-ret-1")) + test_overflow(false, true); + if (test__start_subtest("bpf_hash_map")) + test_bpf_hash_map(); + if (test__start_subtest("bpf_percpu_hash_map")) + test_bpf_percpu_hash_map(); + if (test__start_subtest("bpf_array_map")) + test_bpf_array_map(); + if (test__start_subtest("bpf_percpu_array_map")) + test_bpf_percpu_array_map(); + if (test__start_subtest("bpf_sk_storage_map")) + test_bpf_sk_storage_map(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index f10029821e16..7afa4160416f 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -1,26 +1,30 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#define nr_iters 2 + void test_bpf_obj_id(void) { const __u64 array_magic_value = 0xfaceb00c; const __u32 array_key = 0; - const int nr_iters = 2; const char *file = "./test_obj_id.o"; const char *expected_prog_name = "test_obj_id"; const char *expected_map_name = "test_map_id"; const __u64 nsec_per_sec = 1000000000; - struct bpf_object *objs[nr_iters]; + struct bpf_object *objs[nr_iters] = {}; + struct bpf_link *links[nr_iters] = {}; + struct bpf_program *prog; int prog_fds[nr_iters], map_fds[nr_iters]; /* +1 to test for the info_len returned by kernel */ struct bpf_prog_info prog_infos[nr_iters + 1]; struct bpf_map_info map_infos[nr_iters + 1]; + struct bpf_link_info link_infos[nr_iters + 1]; /* Each prog only uses one map. +1 to test nr_map_ids * returned by kernel. */ __u32 map_ids[nr_iters + 1]; - char jited_insns[128], xlated_insns[128], zeros[128]; + char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128]; __u32 i, next_id, info_len, nr_id_found, duration = 0; struct timespec real_time_ts, boot_time_ts; int err = 0; @@ -36,14 +40,15 @@ void test_bpf_obj_id(void) CHECK(err >= 0 || errno != ENOENT, "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); - for (i = 0; i < nr_iters; i++) - objs[i] = NULL; + err = bpf_link_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno); /* Check bpf_obj_get_info_by_fd() */ bzero(zeros, sizeof(zeros)); for (i = 0; i < nr_iters; i++) { now = time(NULL); - err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, + err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &objs[i], &prog_fds[i]); /* test_obj_id.o is a dumb prog. It should never fail * to load. @@ -60,6 +65,17 @@ void test_bpf_obj_id(void) if (CHECK_FAIL(err)) goto done; + prog = bpf_object__find_program_by_title(objs[i], + "raw_tp/sys_enter"); + if (CHECK_FAIL(!prog)) + goto done; + links[i] = bpf_program__attach(prog); + err = libbpf_get_error(links[i]); + if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) { + links[i] = NULL; + goto done; + } + /* Check getting map info */ info_len = sizeof(struct bpf_map_info) * 2; bzero(&map_infos[i], info_len); @@ -107,7 +123,7 @@ void test_bpf_obj_id(void) load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + (prog_infos[i].load_time / nsec_per_sec); if (CHECK(err || - prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || + prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT || info_len != sizeof(struct bpf_prog_info) || (env.jit_enabled && !prog_infos[i].jited_prog_len) || (env.jit_enabled && @@ -120,7 +136,11 @@ void test_bpf_obj_id(void) *(int *)(long)prog_infos[i].map_ids != map_infos[i].id || strcmp((char *)prog_infos[i].name, expected_prog_name), "get-prog-info(fd)", - "err %d errno %d i %d type %d(%d) info_len %u(%zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", + "err %d errno %d i %d type %d(%d) info_len %u(%zu) " + "jit_enabled %d jited_prog_len %u xlated_prog_len %u " + "jited_prog %d xlated_prog %d load_time %lu(%lu) " + "uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) " + "name %s(%s)\n", err, errno, i, prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, info_len, sizeof(struct bpf_prog_info), @@ -135,6 +155,33 @@ void test_bpf_obj_id(void) *(int *)(long)prog_infos[i].map_ids, map_infos[i].id, prog_infos[i].name, expected_prog_name)) goto done; + + /* Check getting link info */ + info_len = sizeof(struct bpf_link_info) * 2; + bzero(&link_infos[i], info_len); + link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name; + link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); + err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]), + &link_infos[i], &info_len); + if (CHECK(err || + link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT || + link_infos[i].prog_id != prog_infos[i].id || + link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name || + strcmp((char *)link_infos[i].raw_tracepoint.tp_name, + "sys_enter") || + info_len != sizeof(struct bpf_link_info), + "get-link-info(fd)", + "err %d errno %d info_len %u(%zu) type %d(%d) id %d " + "prog_id %d (%d) tp_name %s(%s)\n", + err, errno, + info_len, sizeof(struct bpf_link_info), + link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, + link_infos[i].id, + link_infos[i].prog_id, prog_infos[i].id, + (char *)link_infos[i].raw_tracepoint.tp_name, + "sys_enter")) + goto done; + } /* Check bpf_prog_get_next_id() */ @@ -247,7 +294,52 @@ void test_bpf_obj_id(void) "nr_id_found %u(%u)\n", nr_id_found, nr_iters); + /* Check bpf_link_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_link_get_next_id(next_id, &next_id)) { + struct bpf_link_info link_info; + int link_fd, cmp_res; + + info_len = sizeof(link_info); + memset(&link_info, 0, info_len); + + link_fd = bpf_link_get_fd_by_id(next_id); + if (link_fd < 0 && errno == ENOENT) + /* The bpf_link is in the dead row */ + continue; + if (CHECK(link_fd < 0, "get-link-fd(next_id)", + "link_fd %d next_id %u errno %d\n", + link_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (link_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + err = bpf_obj_get_info_by_fd(link_fd, &link_info, &info_len); + cmp_res = memcmp(&link_info, &link_infos[i], + offsetof(struct bpf_link_info, raw_tracepoint)); + CHECK(err || info_len != sizeof(link_info) || cmp_res, + "check get-link-info(next_id->fd)", + "err %d errno %d info_len %u(%zu) memcmp %d\n", + err, errno, info_len, sizeof(struct bpf_link_info), + cmp_res); + + close(link_fd); + } + CHECK(nr_id_found != nr_iters, + "check total link id found by get_next_id", + "nr_id_found %u(%u)\n", nr_id_found, nr_iters); + done: - for (i = 0; i < nr_iters; i++) + for (i = 0; i < nr_iters; i++) { + bpf_link__destroy(links[i]); bpf_object__close(objs[i]); + } } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c new file mode 100644 index 000000000000..517318f05b1d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <linux/err.h> +#include <test_progs.h> +#include "bpf_dctcp.skel.h" + +#define min(a, b) ((a) < (b) ? (a) : (b)) + +static const unsigned int total_bytes = 10 * 1024 * 1024; +static const struct timeval timeo_sec = { .tv_sec = 10 }; +static const size_t timeo_optlen = sizeof(timeo_sec); +static int stop, duration; + +static int settimeo(int fd) +{ + int err; + + err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, + timeo_optlen); + if (CHECK(err == -1, "setsockopt(fd, SO_RCVTIMEO)", "errno:%d\n", + errno)) + return -1; + + err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec, + timeo_optlen); + if (CHECK(err == -1, "setsockopt(fd, SO_SNDTIMEO)", "errno:%d\n", + errno)) + return -1; + + return 0; +} + +static int settcpca(int fd, const char *tcp_ca) +{ + int err; + + err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca)); + if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n", + errno)) + return -1; + + return 0; +} + +static void *server(void *arg) +{ + int lfd = (int)(long)arg, err = 0, fd; + ssize_t nr_sent = 0, bytes = 0; + char batch[1500]; + + fd = accept(lfd, NULL, NULL); + while (fd == -1) { + if (errno == EINTR) + continue; + err = -errno; + goto done; + } + + if (settimeo(fd)) { + err = -errno; + goto done; + } + + while (bytes < total_bytes && !READ_ONCE(stop)) { + nr_sent = send(fd, &batch, + min(total_bytes - bytes, sizeof(batch)), 0); + if (nr_sent == -1 && errno == EINTR) + continue; + if (nr_sent == -1) { + err = -errno; + break; + } + bytes += nr_sent; + } + + CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n", + bytes, total_bytes, nr_sent, errno); + +done: + if (fd != -1) + close(fd); + if (err) { + WRITE_ONCE(stop, 1); + return ERR_PTR(err); + } + return NULL; +} + +static void do_test(const char *tcp_ca) +{ + struct sockaddr_in6 sa6 = {}; + ssize_t nr_recv = 0, bytes = 0; + int lfd = -1, fd = -1; + pthread_t srv_thread; + socklen_t addrlen = sizeof(sa6); + void *thread_ret; + char batch[1500]; + int err; + + WRITE_ONCE(stop, 0); + + lfd = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(lfd == -1, "socket", "errno:%d\n", errno)) + return; + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) { + close(lfd); + return; + } + + if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) || + settimeo(lfd) || settimeo(fd)) + goto done; + + /* bind, listen and start server thread to accept */ + sa6.sin6_family = AF_INET6; + sa6.sin6_addr = in6addr_loopback; + err = bind(lfd, (struct sockaddr *)&sa6, addrlen); + if (CHECK(err == -1, "bind", "errno:%d\n", errno)) + goto done; + err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen); + if (CHECK(err == -1, "getsockname", "errno:%d\n", errno)) + goto done; + err = listen(lfd, 1); + if (CHECK(err == -1, "listen", "errno:%d\n", errno)) + goto done; + err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd); + if (CHECK(err != 0, "pthread_create", "err:%d\n", err)) + goto done; + + /* connect to server */ + err = connect(fd, (struct sockaddr *)&sa6, addrlen); + if (CHECK(err == -1, "connect", "errno:%d\n", errno)) + goto wait_thread; + + /* recv total_bytes */ + while (bytes < total_bytes && !READ_ONCE(stop)) { + nr_recv = recv(fd, &batch, + min(total_bytes - bytes, sizeof(batch)), 0); + if (nr_recv == -1 && errno == EINTR) + continue; + if (nr_recv == -1) + break; + bytes += nr_recv; + } + + CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n", + bytes, total_bytes, nr_recv, errno); + +wait_thread: + WRITE_ONCE(stop, 1); + pthread_join(srv_thread, &thread_ret); + CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld", + PTR_ERR(thread_ret)); +done: + close(lfd); + close(fd); +} + +static void test_dctcp(void) +{ + struct bpf_dctcp *dctcp_skel; + struct bpf_link *link; + + dctcp_skel = bpf_dctcp__open_and_load(); + if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n")) + return; + + link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); + if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n", + PTR_ERR(link))) { + bpf_dctcp__destroy(dctcp_skel); + return; + } + + do_test("bpf_dctcp"); + + bpf_link__destroy(link); + bpf_dctcp__destroy(dctcp_skel); +} + +void test_bpf_tcp_ca(void) +{ + if (test__start_subtest("dctcp")) + test_dctcp(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 1c01ee2600a9..9486c13af6b2 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -15,6 +15,8 @@ static int libbpf_debug_print(enum libbpf_print_level level, return 0; } +extern int extra_prog_load_log_flags; + static int check_load(const char *file, enum bpf_prog_type type) { struct bpf_prog_load_attr attr; @@ -24,7 +26,7 @@ static int check_load(const char *file, enum bpf_prog_type type) memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); attr.file = file; attr.prog_type = type; - attr.log_level = 4; + attr.log_level = 4 | extra_prog_load_log_flags; attr.prog_flags = BPF_F_TEST_RND_HI32; err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); bpf_object__close(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c new file mode 100644 index 000000000000..70e94e783070 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +static char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int prog_load(void) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + + return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); +} + +void test_cgroup_attach_autodetach(void) +{ + __u32 duration = 0, prog_cnt = 4, attach_flags; + int allow_prog[2] = {-1}; + __u32 prog_ids[2] = {0}; + void *ptr = NULL; + int cg = 0, i; + int attempts; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load(); + if (CHECK(allow_prog[i] < 0, "prog_load", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + } + + if (CHECK_FAIL(setup_cgroup_environment())) + goto err; + + /* create a cgroup, attach two programs and remember their ids */ + cg = create_and_get_cgroup("/cg_autodetach"); + if (CHECK_FAIL(cg < 0)) + goto err; + + if (CHECK_FAIL(join_cgroup("/cg_autodetach"))) + goto err; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (CHECK(bpf_prog_attach(allow_prog[i], cg, + BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog_attach", "prog[%d], errno=%d\n", i, errno)) + goto err; + + /* make sure that programs are attached and run some traffic */ + if (CHECK(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags, + prog_ids, &prog_cnt), + "prog_query", "errno=%d\n", errno)) + goto err; + if (CHECK_FAIL(system(PING_CMD))) + goto err; + + /* allocate some memory (4Mb) to pin the original cgroup */ + ptr = malloc(4 * (1 << 20)); + if (CHECK_FAIL(!ptr)) + goto err; + + /* close programs and cgroup fd */ + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + close(allow_prog[i]); + allow_prog[i] = -1; + } + + close(cg); + cg = 0; + + /* leave the cgroup and remove it. don't detach programs */ + cleanup_cgroup_environment(); + + /* wait for the asynchronous auto-detachment. + * wait for no more than 5 sec and give up. + */ + for (i = 0; i < ARRAY_SIZE(prog_ids); i++) { + for (attempts = 5; attempts >= 0; attempts--) { + int fd = bpf_prog_get_fd_by_id(prog_ids[i]); + + if (fd < 0) + break; + + /* don't leave the fd open */ + close(fd); + + if (CHECK_FAIL(!attempts)) + goto err; + + sleep(1); + } + } + +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] >= 0) + close(allow_prog[i]); + if (cg) + close(cg); + free(ptr); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c new file mode 100644 index 000000000000..2ff21dbce179 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int map_fd = -1; + +static int prog_load_cnt(int verdict, int val) +{ + int cgroup_storage_fd, percpu_cgroup_storage_fd; + + if (map_fd < 0) + map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); + if (map_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + percpu_cgroup_storage_fd = bpf_create_map( + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + if (percpu_cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + struct bpf_insn prog[] = { + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ + + BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_MOV64_IMM(BPF_REG_1, val), + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), + + BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + int ret; + + ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); + + close(cgroup_storage_fd); + return ret; +} + +void test_cgroup_attach_multi(void) +{ + __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; + int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts); + int allow_prog[7] = {-1}; + unsigned long long value; + __u32 duration = 0; + int i = 0; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load_cnt(1, 1 << i); + if (CHECK(allow_prog[i] < 0, "prog_load", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + } + + if (CHECK_FAIL(setup_cgroup_environment())) + goto err; + + cg1 = create_and_get_cgroup("/cg1"); + if (CHECK_FAIL(cg1 < 0)) + goto err; + cg2 = create_and_get_cgroup("/cg1/cg2"); + if (CHECK_FAIL(cg2 < 0)) + goto err; + cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); + if (CHECK_FAIL(cg3 < 0)) + goto err; + cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); + if (CHECK_FAIL(cg4 < 0)) + goto err; + cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); + if (CHECK_FAIL(cg5 < 0)) + goto err; + + if (CHECK_FAIL(join_cgroup("/cg1/cg2/cg3/cg4/cg5"))) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog0_attach_to_cg1_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "fail_same_prog_attach_to_cg1", "unexpected success\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog1_attach_to_cg1_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog2_attach_to_cg2_override", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog3_attach_to_cg3_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog4_attach_to_cg4_override", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0), + "prog5_attach_to_cg5_none", "errno=%d\n", errno)) + goto err; + + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 1 + 2 + 8 + 32); + + /* query the number of effective progs in cg5 */ + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt)); + CHECK_FAIL(prog_cnt != 4); + /* retrieve prog_ids of effective progs in cg5 */ + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 4); + CHECK_FAIL(attach_flags != 0); + saved_prog_id = prog_ids[0]; + /* check enospc handling */ + prog_ids[0] = 0; + prog_cnt = 2; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt) != -1); + CHECK_FAIL(errno != ENOSPC); + CHECK_FAIL(prog_cnt != 4); + /* check that prog_ids are returned even when buffer is too small */ + CHECK_FAIL(prog_ids[0] != saved_prog_id); + /* retrieve prog_id of single attached prog in cg5 */ + prog_ids[0] = 0; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 1); + CHECK_FAIL(prog_ids[0] != saved_prog_id); + + /* detach bottom program and ping again */ + if (CHECK(bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS), + "prog_detach_from_cg5", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 1 + 2 + 8 + 16); + + /* test replace */ + + attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE; + attach_opts.replace_prog_fd = allow_prog[0]; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_override", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EINVAL); + + attach_opts.flags = BPF_F_REPLACE; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_no_multi", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EINVAL); + + attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE; + attach_opts.replace_prog_fd = -1; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_bad_fd", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EBADF); + + /* replacing a program that is not attached to cgroup should fail */ + attach_opts.replace_prog_fd = allow_prog[3]; + if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_no_ent", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != ENOENT); + + /* replace 1st from the top program */ + attach_opts.replace_prog_fd = allow_prog[0]; + if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "prog_replace", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 8 + 16); + + /* detach 3rd from bottom program and ping again */ + if (CHECK(!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS), + "fail_prog_detach_from_cg3", "unexpected success\n")) + goto err; + + if (CHECK(bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS), + "prog3_detach_from_cg3", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 16); + + /* detach 2nd from bottom program and ping again */ + if (CHECK(bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS), + "prog_detach_from_cg4", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 4); + + prog_cnt = 4; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 3); + CHECK_FAIL(attach_flags != 0); + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 0); + +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] >= 0) + close(allow_prog[i]); + close(cg1); + close(cg2); + close(cg3); + close(cg4); + close(cg5); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c new file mode 100644 index 000000000000..9e96f8d87fea --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define FOO "/foo" +#define BAR "/foo/bar/" +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +static char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int prog_load(int verdict) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + + return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); +} + +void test_cgroup_attach_override(void) +{ + int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1; + __u32 duration = 0; + + allow_prog = prog_load(1); + if (CHECK(allow_prog < 0, "prog_load_allow", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + + drop_prog = prog_load(0); + if (CHECK(drop_prog < 0, "prog_load_drop", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + + foo = test__join_cgroup(FOO); + if (CHECK(foo < 0, "cgroup_join_foo", "cgroup setup failed\n")) + goto err; + + if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_drop_foo_override", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + bar = test__join_cgroup(BAR); + if (CHECK(bar < 0, "cgroup_join_bar", "cgroup setup failed\n")) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n")) + goto err; + + if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS), + "prog_detach_bar", + "detach prog from %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), + "prog_detach_foo", + "detach prog from %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0), + "fail_prog_attach_allow_bar_none", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS), + "prog_detach_bar", + "detach prog from %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), + "fail_prog_detach_foo", + "double detach from %s unexpectedly succeeded\n", FOO)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0), + "prog_attach_allow_foo_none", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0), + "fail_prog_attach_allow_bar_none", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "fail_prog_attach_allow_bar_override", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "fail_prog_attach_allow_foo_override", + "attach prog to %s unexpectedly succeeded\n", FOO)) + goto err; + + if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0), + "prog_attach_drop_foo_none", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + +err: + close(foo); + close(bar); + close(allow_prog); + close(drop_prog); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c new file mode 100644 index 000000000000..6e04f8d1d15b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "test_cgroup_link.skel.h" + +static __u32 duration = 0; +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +static struct test_cgroup_link *skel = NULL; + +int ping_and_check(int exp_calls, int exp_alt_calls) +{ + skel->bss->calls = 0; + skel->bss->alt_calls = 0; + CHECK_FAIL(system(PING_CMD)); + if (CHECK(skel->bss->calls != exp_calls, "call_cnt", + "exp %d, got %d\n", exp_calls, skel->bss->calls)) + return -EINVAL; + if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt", + "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls)) + return -EINVAL; + return 0; +} + +void test_cgroup_link(void) +{ + struct { + const char *path; + int fd; + } cgs[] = { + { "/cg1" }, + { "/cg1/cg2" }, + { "/cg1/cg2/cg3" }, + { "/cg1/cg2/cg3/cg4" }, + }; + int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts); + struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link; + __u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags; + int i = 0, err, prog_fd; + bool detach_legacy = false; + + skel = test_cgroup_link__open_and_load(); + if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n")) + return; + prog_fd = bpf_program__fd(skel->progs.egress); + + err = setup_cgroup_environment(); + if (CHECK(err, "cg_init", "failed: %d\n", err)) + goto cleanup; + + for (i = 0; i < cg_nr; i++) { + cgs[i].fd = create_and_get_cgroup(cgs[i].path); + if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd)) + goto cleanup; + } + + err = join_cgroup(cgs[last_cg].path); + if (CHECK(err, "cg_join", "fail: %d\n", err)) + goto cleanup; + + for (i = 0; i < cg_nr; i++) { + links[i] = bpf_program__attach_cgroup(skel->progs.egress, + cgs[i].fd); + if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n", + i, PTR_ERR(links[i]))) + goto cleanup; + } + + ping_and_check(cg_nr, 0); + + /* query the number of effective progs and attach flags in root cg */ + err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL, + &prog_cnt); + CHECK_FAIL(err); + CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI); + if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt)) + goto cleanup; + + /* query the number of effective progs in last cg */ + err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, NULL, NULL, + &prog_cnt); + CHECK_FAIL(err); + CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI); + if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n", + cg_nr, prog_cnt)) + goto cleanup; + + /* query the effective prog IDs in last cg */ + err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt); + CHECK_FAIL(err); + CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI); + if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n", + cg_nr, prog_cnt)) + goto cleanup; + for (i = 1; i < prog_cnt; i++) { + CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check", + "idx %d, prev id %d, cur id %d\n", + i, prog_ids[i - 1], prog_ids[i]); + } + + /* detach bottom program and ping again */ + bpf_link__destroy(links[last_cg]); + links[last_cg] = NULL; + + ping_and_check(cg_nr - 1, 0); + + /* mix in with non link-based multi-attachments */ + err = bpf_prog_attach(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI); + if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = true; + + links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress, + cgs[last_cg].fd); + if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n", + PTR_ERR(links[last_cg]))) + goto cleanup; + + ping_and_check(cg_nr + 1, 0); + + /* detach link */ + bpf_link__destroy(links[last_cg]); + links[last_cg] = NULL; + + /* detach legacy */ + err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS); + if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = false; + + /* attach legacy exclusive prog attachment */ + err = bpf_prog_attach(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS, 0); + if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = true; + + /* attempt to mix in with multi-attach bpf_link */ + tmp_link = bpf_program__attach_cgroup(skel->progs.egress, + cgs[last_cg].fd); + if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) { + bpf_link__destroy(tmp_link); + goto cleanup; + } + + ping_and_check(cg_nr, 0); + + /* detach */ + err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS); + if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = false; + + ping_and_check(cg_nr - 1, 0); + + /* attach back link-based one */ + links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress, + cgs[last_cg].fd); + if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n", + PTR_ERR(links[last_cg]))) + goto cleanup; + + ping_and_check(cg_nr, 0); + + /* check legacy exclusive prog can't be attached */ + err = bpf_prog_attach(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS, 0); + if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) { + bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS); + goto cleanup; + } + + /* replace BPF programs inside their links for all but first link */ + for (i = 1; i < cg_nr; i++) { + err = bpf_link__update_program(links[i], skel->progs.egress_alt); + if (CHECK(err, "prog_upd", "link #%d\n", i)) + goto cleanup; + } + + ping_and_check(1, cg_nr - 1); + + /* Attempt program update with wrong expected BPF program */ + link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt); + link_upd_opts.flags = BPF_F_REPLACE; + err = bpf_link_update(bpf_link__fd(links[0]), + bpf_program__fd(skel->progs.egress_alt), + &link_upd_opts); + if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1", + "unexpectedly succeeded, err %d, errno %d\n", err, -errno)) + goto cleanup; + + /* Compare-exchange single link program from egress to egress_alt */ + link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress); + link_upd_opts.flags = BPF_F_REPLACE; + err = bpf_link_update(bpf_link__fd(links[0]), + bpf_program__fd(skel->progs.egress_alt), + &link_upd_opts); + if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno)) + goto cleanup; + + /* ping */ + ping_and_check(0, cg_nr); + + /* close cgroup FDs before detaching links */ + for (i = 0; i < cg_nr; i++) { + if (cgs[i].fd > 0) { + close(cgs[i].fd); + cgs[i].fd = -1; + } + } + + /* BPF programs should still get called */ + ping_and_check(0, cg_nr); + + /* leave cgroup and remove them, don't detach programs */ + cleanup_cgroup_environment(); + + /* BPF programs should have been auto-detached */ + ping_and_check(0, 0); + +cleanup: + if (detach_legacy) + bpf_prog_detach2(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS); + + for (i = 0; i < cg_nr; i++) { + if (!IS_ERR(links[i])) + bpf_link__destroy(links[i]); + } + test_cgroup_link__destroy(skel); + + for (i = 0; i < cg_nr; i++) { + if (cgs[i].fd > 0) + close(cgs[i].fd); + } + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c new file mode 100644 index 000000000000..9229db2f5ca5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "network_helpers.h" + +static int verify_ports(int family, int fd, + __u16 expected_local, __u16 expected_peer) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + __u16 port; + + if (getsockname(fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + return -1; + } + + if (family == AF_INET) + port = ((struct sockaddr_in *)&addr)->sin_port; + else + port = ((struct sockaddr_in6 *)&addr)->sin6_port; + + if (ntohs(port) != expected_local) { + log_err("Unexpected local port %d, expected %d", ntohs(port), + expected_local); + return -1; + } + + if (getpeername(fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get peer addr"); + return -1; + } + + if (family == AF_INET) + port = ((struct sockaddr_in *)&addr)->sin_port; + else + port = ((struct sockaddr_in6 *)&addr)->sin6_port; + + if (ntohs(port) != expected_peer) { + log_err("Unexpected peer port %d, expected %d", ntohs(port), + expected_peer); + return -1; + } + + return 0; +} + +static int run_test(int cgroup_fd, int server_fd, int family, int type) +{ + bool v4 = family == AF_INET; + __u16 expected_local_port = v4 ? 22222 : 22223; + __u16 expected_peer_port = 60000; + struct bpf_prog_load_attr attr = { + .file = v4 ? "./connect_force_port4.o" : + "./connect_force_port6.o", + }; + struct bpf_program *prog; + struct bpf_object *obj; + int xlate_fd, fd, err; + __u32 duration = 0; + + err = bpf_prog_load_xattr(&attr, &obj, &xlate_fd); + if (err) { + log_err("Failed to load BPF object"); + return -1; + } + + prog = bpf_object__find_program_by_title(obj, v4 ? + "cgroup/connect4" : + "cgroup/connect6"); + if (CHECK(!prog, "find_prog", "connect prog not found\n")) { + err = -EIO; + goto close_bpf_object; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ? + BPF_CGROUP_INET4_CONNECT : + BPF_CGROUP_INET6_CONNECT, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + prog = bpf_object__find_program_by_title(obj, v4 ? + "cgroup/getpeername4" : + "cgroup/getpeername6"); + if (CHECK(!prog, "find_prog", "getpeername prog not found\n")) { + err = -EIO; + goto close_bpf_object; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ? + BPF_CGROUP_INET4_GETPEERNAME : + BPF_CGROUP_INET6_GETPEERNAME, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + prog = bpf_object__find_program_by_title(obj, v4 ? + "cgroup/getsockname4" : + "cgroup/getsockname6"); + if (CHECK(!prog, "find_prog", "getsockname prog not found\n")) { + err = -EIO; + goto close_bpf_object; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ? + BPF_CGROUP_INET4_GETSOCKNAME : + BPF_CGROUP_INET6_GETSOCKNAME, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + fd = connect_to_fd(server_fd, 0); + if (fd < 0) { + err = -1; + goto close_bpf_object; + } + + err = verify_ports(family, fd, expected_local_port, + expected_peer_port); + close(fd); + +close_bpf_object: + bpf_object__close(obj); + return err; +} + +void test_connect_force_port(void) +{ + int server_fd, cgroup_fd; + + cgroup_fd = test__join_cgroup("/connect_force_port"); + if (CHECK_FAIL(cgroup_fd < 0)) + return; + + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM)); + close(server_fd); + + server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM)); + close(server_fd); + + server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM)); + close(server_fd); + + server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM)); + close(server_fd); + +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c new file mode 100644 index 000000000000..83493bd5745c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include "fentry_test.skel.h" +#include "fexit_test.skel.h" + +void test_fentry_fexit(void) +{ + struct fentry_test *fentry_skel = NULL; + struct fexit_test *fexit_skel = NULL; + __u64 *fentry_res, *fexit_res; + __u32 duration = 0, retval; + int err, prog_fd, i; + + fentry_skel = fentry_test__open_and_load(); + if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) + goto close_prog; + fexit_skel = fexit_test__open_and_load(); + if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) + goto close_prog; + + err = fentry_test__attach(fentry_skel); + if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) + goto close_prog; + err = fexit_test__attach(fexit_skel); + if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) + goto close_prog; + + prog_fd = bpf_program__fd(fexit_skel->progs.test1); + err = bpf_prog_test_run(prog_fd, 1, NULL, 0, + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + fentry_res = (__u64 *)fentry_skel->bss; + fexit_res = (__u64 *)fexit_skel->bss; + printf("%lld\n", fentry_skel->bss->test1_result); + for (i = 0; i < 6; i++) { + CHECK(fentry_res[i] != 1, "result", + "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]); + CHECK(fexit_res[i] != 1, "result", + "fexit_test%d failed err %lld\n", i + 1, fexit_res[i]); + } + +close_prog: + fentry_test__destroy(fentry_skel); + fexit_test__destroy(fexit_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c new file mode 100644 index 000000000000..1bf8a16f9da9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include "fentry_test.skel.h" + +static int fentry_test(struct fentry_test *fentry_skel) +{ + int err, prog_fd, i; + __u32 duration = 0, retval; + struct bpf_link *link; + __u64 *result; + + err = fentry_test__attach(fentry_skel); + if (!ASSERT_OK(err, "fentry_attach")) + return err; + + /* Check that already linked program can't be attached again. */ + link = bpf_program__attach(fentry_skel->progs.test1); + if (!ASSERT_ERR_PTR(link, "fentry_attach_link")) + return -1; + + prog_fd = bpf_program__fd(fentry_skel->progs.test1); + err = bpf_prog_test_run(prog_fd, 1, NULL, 0, + NULL, NULL, &retval, &duration); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(retval, 0, "test_run"); + + result = (__u64 *)fentry_skel->bss; + for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) { + if (!ASSERT_EQ(result[i], 1, "fentry_result")) + return -1; + } + + fentry_test__detach(fentry_skel); + + /* zero results for re-attach test */ + memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss)); + return 0; +} + +void test_fentry_test(void) +{ + struct fentry_test *fentry_skel = NULL; + int err; + + fentry_skel = fentry_test__open_and_load(); + if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) + goto cleanup; + + err = fentry_test(fentry_skel); + ASSERT_OK(err, "fentry_first_attach"); + +cleanup: + fentry_test__destroy(fentry_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c new file mode 100644 index 000000000000..28d2a9d14b85 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include <network_helpers.h> + +#define PROG_CNT 3 + +void test_fexit_bpf2bpf(void) +{ + const char *prog_name[PROG_CNT] = { + "fexit/test_pkt_access", + "fexit/test_pkt_access_subprog1", + "fexit/test_pkt_access_subprog2", + }; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, i; + struct bpf_link *link[PROG_CNT] = {}; + struct bpf_program *prog[PROG_CNT]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[PROG_CNT]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .attach_prog_fd = pkt_fd, + ); + + obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts); + if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", + "failed to open fexit_bpf2bpf: %ld\n", + PTR_ERR(obj))) + goto close_prog; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) { + prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i])) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) + if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", + result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < PROG_CNT; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + if (!IS_ERR_OR_NULL(obj)) + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c new file mode 100644 index 000000000000..3b9dbf7433f0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +/* x86-64 fits 55 JITed and 43 interpreted progs into half page */ +#define CNT 40 + +void test_fexit_stress(void) +{ + char test_skb[128] = {}; + int fexit_fd[CNT] = {}; + int link_fd[CNT] = {}; + __u32 duration = 0; + char error[4096]; + __u32 prog_ret; + int err, i, filter_fd; + + const struct bpf_insn trace_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr load_attr = { + .prog_type = BPF_PROG_TYPE_TRACING, + .license = "GPL", + .insns = trace_program, + .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn), + .expected_attach_type = BPF_TRACE_FEXIT, + }; + + const struct bpf_insn skb_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr skb_load_attr = { + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .license = "GPL", + .insns = skb_program, + .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn), + }; + + err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", + load_attr.expected_attach_type); + if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err)) + goto out; + load_attr.attach_btf_id = err; + + for (i = 0; i < CNT; i++) { + fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + if (CHECK(fexit_fd[i] < 0, "fexit loaded", + "failed: %d errno %d\n", fexit_fd[i], errno)) + goto out; + link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]); + if (CHECK(link_fd[i] < 0, "fexit attach failed", + "prog %d failed: %d err %d\n", i, link_fd[i], errno)) + goto out; + } + + filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error)); + if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", + filter_fd, errno)) + goto out; + + err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, + 0, &prog_ret, 0); + close(filter_fd); + CHECK_FAIL(err); +out: + for (i = 0; i < CNT; i++) { + if (link_fd[i]) + close(link_fd[i]); + if (fexit_fd[i]) + close(fexit_fd[i]); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c new file mode 100644 index 000000000000..62b2639f937b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include "fexit_test.skel.h" + +static int fexit_test(struct fexit_test *fexit_skel) +{ + int err, prog_fd, i; + __u32 duration = 0, retval; + struct bpf_link *link; + __u64 *result; + + err = fexit_test__attach(fexit_skel); + if (!ASSERT_OK(err, "fexit_attach")) + return err; + + /* Check that already linked program can't be attached again. */ + link = bpf_program__attach(fexit_skel->progs.test1); + if (!ASSERT_ERR_PTR(link, "fexit_attach_link")) + return -1; + + prog_fd = bpf_program__fd(fexit_skel->progs.test1); + err = bpf_prog_test_run(prog_fd, 1, NULL, 0, + NULL, NULL, &retval, &duration); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(retval, 0, "test_run"); + + result = (__u64 *)fexit_skel->bss; + for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) { + if (!ASSERT_EQ(result[i], 1, "fexit_result")) + return -1; + } + + fexit_test__detach(fexit_skel); + + /* zero results for re-attach test */ + memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss)); + return 0; +} + +void test_fexit_test(void) +{ + struct fexit_test *fexit_skel = NULL; + int err; + + fexit_skel = fexit_test__open_and_load(); + if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) + goto cleanup; + + err = fexit_test(fexit_skel); + ASSERT_OK(err, "fexit_first_attach"); + +cleanup: + fexit_test__destroy(fexit_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 92563898867c..ea14e3ece812 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> #include <error.h> #include <linux/if.h> #include <linux/if_tun.h> #include <sys/uio.h> +#include "bpf_flow.skel.h" + #ifndef IP_MF #define IP_MF 0x2000 #endif @@ -100,6 +103,7 @@ struct test { #define VLAN_HLEN 4 +static __u32 duration; struct test tests[] = { { .name = "ipv4", @@ -443,17 +447,130 @@ static int ifup(const char *ifname) return 0; } +static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) +{ + int i, err, map_fd, prog_fd; + struct bpf_program *prog; + char prog_name[32]; + + map_fd = bpf_map__fd(prog_array); + if (map_fd < 0) + return -1; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "flow_dissector/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (!prog) + return -1; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) + return -1; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (err) + return -1; + } + return 0; +} + +static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) +{ + int i, err, keys_fd; + + keys_fd = bpf_map__fd(keys); + if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) + return; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + /* Keep in sync with 'flags' from eth_get_headlen. */ + __u32 eth_get_headlen_flags = + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; + struct bpf_prog_test_run_attr tattr = {}; + struct bpf_flow_keys flow_keys = {}; + __u32 key = (__u32)(tests[i].keys.sport) << 16 | + tests[i].keys.dport; + + /* For skb-less case we can't pass input flags; run + * only the tests that have a matching set of flags. + */ + + if (tests[i].flags != eth_get_headlen_flags) + continue; + + err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); + CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); + + err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); + CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); + + CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); + CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + + err = bpf_map_delete_elem(keys_fd, &key); + CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); + } +} + +static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd) +{ + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) + return; + + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno)) + return; + + run_tests_skb_less(tap_fd, skel->maps.last_dissection); + + err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); + CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno); +} + +static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd) +{ + struct bpf_link *link; + int err, net_fd; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno)) + return; + + link = bpf_program__attach_netns(skel->progs._dissect, net_fd); + if (CHECK(IS_ERR(link), "attach_netns", "err %ld\n", PTR_ERR(link))) + goto out_close; + + run_tests_skb_less(tap_fd, skel->maps.last_dissection); + + err = bpf_link__destroy(link); + CHECK(err, "bpf_link__destroy", "err %d\n", err); +out_close: + close(net_fd); +} + void test_flow_dissector(void) { int i, err, prog_fd, keys_fd = -1, tap_fd; - struct bpf_object *obj; - __u32 duration = 0; + struct bpf_flow *skel; - err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", - "jmp_table", "last_dissection", &prog_fd, &keys_fd); - if (CHECK_FAIL(err)) + skel = bpf_flow__open_and_load(); + if (CHECK(!skel, "skel", "failed to open/load skeleton\n")) return; + prog_fd = bpf_program__fd(skel->progs._dissect); + if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) + goto out_destroy_skel; + keys_fd = bpf_map__fd(skel->maps.last_dissection); + if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) + goto out_destroy_skel; + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (CHECK(err, "init_prog_array", "err %d\n", err)) + goto out_destroy_skel; + for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_flow_keys flow_keys; struct bpf_prog_test_run_attr tattr = { @@ -486,43 +603,17 @@ void test_flow_dissector(void) * via BPF map in this case. */ - err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); - CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno); - tap_fd = create_tap("tap0"); CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); err = ifup("tap0"); CHECK(err, "ifup", "err %d errno %d\n", err, errno); - for (i = 0; i < ARRAY_SIZE(tests); i++) { - /* Keep in sync with 'flags' from eth_get_headlen. */ - __u32 eth_get_headlen_flags = - BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; - struct bpf_prog_test_run_attr tattr = {}; - struct bpf_flow_keys flow_keys = {}; - __u32 key = (__u32)(tests[i].keys.sport) << 16 | - tests[i].keys.dport; + /* Test direct prog attachment */ + test_skb_less_prog_attach(skel, tap_fd); + /* Test indirect prog attachment via link */ + test_skb_less_link_create(skel, tap_fd); - /* For skb-less case we can't pass input flags; run - * only the tests that have a matching set of flags. - */ - - if (tests[i].flags != eth_get_headlen_flags) - continue; - - err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); - CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); - - err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); - CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); - - CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); - CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); - - err = bpf_map_delete_elem(keys_fd, &key); - CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); - } - - bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); - bpf_object__close(obj); + close(tap_fd); +out_destroy_skel: + bpf_flow__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c index dc5ef155ec28..0e8a4d2f023d 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_flow_dissector_load_bytes(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c new file mode 100644 index 000000000000..15cb554a66d8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -0,0 +1,654 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test that the flow_dissector program can be updated with a single + * syscall by attaching a new program that replaces the existing one. + * + * Corner case - the same program cannot be attached twice. + */ + +#define _GNU_SOURCE +#include <errno.h> +#include <fcntl.h> +#include <sched.h> +#include <stdbool.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <linux/bpf.h> +#include <bpf/bpf.h> + +#include "test_progs.h" + +static int init_net = -1; + +static __u32 query_attached_prog_id(int netns) +{ + __u32 prog_ids[1] = {}; + __u32 prog_cnt = ARRAY_SIZE(prog_ids); + int err; + + err = bpf_prog_query(netns, BPF_FLOW_DISSECTOR, 0, NULL, + prog_ids, &prog_cnt); + if (CHECK_FAIL(err)) { + perror("bpf_prog_query"); + return 0; + } + + return prog_cnt == 1 ? prog_ids[0] : 0; +} + +static bool prog_is_attached(int netns) +{ + return query_attached_prog_id(netns) > 0; +} + +static int load_prog(enum bpf_prog_type type) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, BPF_OK), + BPF_EXIT_INSN(), + }; + int fd; + + fd = bpf_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + if (CHECK_FAIL(fd < 0)) + perror("bpf_load_program"); + + return fd; +} + +static __u32 query_prog_id(int prog) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int err; + + err = bpf_obj_get_info_by_fd(prog, &info, &info_len); + if (CHECK_FAIL(err || info_len != sizeof(info))) { + perror("bpf_obj_get_info_by_fd"); + return 0; + } + + return info.id; +} + +static int unshare_net(int old_net) +{ + int err, new_net; + + err = unshare(CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("unshare(CLONE_NEWNET)"); + return -1; + } + new_net = open("/proc/self/ns/net", O_RDONLY); + if (CHECK_FAIL(new_net < 0)) { + perror("open(/proc/self/ns/net)"); + setns(old_net, CLONE_NEWNET); + return -1; + } + return new_net; +} + +static void test_prog_attach_prog_attach(int netns, int prog1, int prog2) +{ + int err; + + err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success when attaching a different program */ + err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog2) #1"); + goto out_detach; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + + /* Expect failure when attaching the same program twice */ + err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_prog_attach(prog2) #2"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + +out_detach: + err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(err)) + perror("bpf_prog_detach"); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_link_create(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int link1, link2; + + link1 = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure creating link when another link exists */ + errno = 0; + link2 = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link2 != -1 || errno != E2BIG)) + perror("bpf_prog_attach(prog2) expected E2BIG"); + if (link2 != -1) + close(link2); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link1); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_prog_attach_link_create(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int err, link; + + err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure creating link when prog attached */ + errno = 0; + link = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link != -1 || errno != EEXIST)) + perror("bpf_link_create(prog2) expected EEXIST"); + if (link != -1) + close(link); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(err)) + perror("bpf_prog_detach"); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_prog_attach(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure attaching prog when link exists */ + errno = 0; + err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(!err || errno != EEXIST)) + perror("bpf_prog_attach(prog2) expected EEXIST"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_prog_detach(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure detaching prog when link exists */ + errno = 0; + err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_prog_detach expected EINVAL"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_prog_attach_detach_query(int netns, int prog1, int prog2) +{ + int err; + + err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(err)) { + perror("bpf_prog_detach"); + return; + } + + /* Expect no prog attached after successful detach */ + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_close_query(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + /* Expect no prog attached after closing last link FD */ + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_no_old_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success replacing the prog when old prog not specified */ + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(err)) + perror("bpf_link_update"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_replace_old_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success F_REPLACE and old prog specified to succeed */ + update_opts.flags = BPF_F_REPLACE; + update_opts.old_prog_fd = prog1; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(err)) + perror("bpf_link_update"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_invalid_opts(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail w/ old prog FD but w/o F_REPLACE*/ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = prog1; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EINVAL)) { + perror("bpf_link_update expected EINVAL"); + goto out_close; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail on old prog FD mismatch */ + errno = 0; + update_opts.flags = BPF_F_REPLACE; + update_opts.old_prog_fd = prog2; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EPERM)) { + perror("bpf_link_update expected EPERM"); + goto out_close; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail for invalid old prog FD */ + errno = 0; + update_opts.flags = BPF_F_REPLACE; + update_opts.old_prog_fd = -1; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EBADF)) { + perror("bpf_link_update expected EBADF"); + goto out_close; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail with invalid flags */ + errno = 0; + update_opts.flags = BPF_F_ALLOW_MULTI; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_link_update expected EINVAL"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + +out_close: + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_invalid_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link, prog3; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure when new prog FD is not valid */ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, -1, &update_opts); + if (CHECK_FAIL(!err || errno != EBADF)) { + perror("bpf_link_update expected EINVAL"); + goto out_close_link; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + prog3 = load_prog(BPF_PROG_TYPE_SOCKET_FILTER); + if (prog3 < 0) + goto out_close_link; + + /* Expect failure when new prog FD type doesn't match */ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog3, &update_opts); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_link_update expected EINVAL"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(prog3); +out_close_link: + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_netns_gone(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link, old_net; + + old_net = netns; + netns = unshare_net(old_net); + if (netns < 0) + return; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(netns); + err = setns(old_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("setns(CLONE_NEWNET)"); + close(link); + return; + } + + /* Expect failure when netns destroyed */ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != ENOLINK)) + perror("bpf_link_update"); + + close(link); +} + +static void test_link_get_info(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + struct bpf_link_info info = {}; + struct stat netns_stat = {}; + __u32 info_len, link_id; + int err, link, old_net; + + old_net = netns; + netns = unshare_net(old_net); + if (netns < 0) + return; + + err = fstat(netns, &netns_stat); + if (CHECK_FAIL(err)) { + perror("stat(netns)"); + goto out_resetns; + } + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + goto out_resetns; + } + + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(link, &info, &info_len); + if (CHECK_FAIL(err)) { + perror("bpf_obj_get_info"); + goto out_unlink; + } + CHECK_FAIL(info_len != sizeof(info)); + + /* Expect link info to be sane and match prog and netns details */ + CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS); + CHECK_FAIL(info.id == 0); + CHECK_FAIL(info.prog_id != query_prog_id(prog1)); + CHECK_FAIL(info.netns.netns_ino != netns_stat.st_ino); + CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR); + + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(err)) { + perror("bpf_link_update(prog2)"); + goto out_unlink; + } + + link_id = info.id; + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(link, &info, &info_len); + if (CHECK_FAIL(err)) { + perror("bpf_obj_get_info"); + goto out_unlink; + } + CHECK_FAIL(info_len != sizeof(info)); + + /* Expect no info change after update except in prog id */ + CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS); + CHECK_FAIL(info.id != link_id); + CHECK_FAIL(info.prog_id != query_prog_id(prog2)); + CHECK_FAIL(info.netns.netns_ino != netns_stat.st_ino); + CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR); + + /* Leave netns link is attached to and close last FD to it */ + err = setns(old_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("setns(NEWNET)"); + goto out_unlink; + } + close(netns); + old_net = -1; + netns = -1; + + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(link, &info, &info_len); + if (CHECK_FAIL(err)) { + perror("bpf_obj_get_info"); + goto out_unlink; + } + CHECK_FAIL(info_len != sizeof(info)); + + /* Expect netns_ino to change to 0 */ + CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS); + CHECK_FAIL(info.id != link_id); + CHECK_FAIL(info.prog_id != query_prog_id(prog2)); + CHECK_FAIL(info.netns.netns_ino != 0); + CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR); + +out_unlink: + close(link); +out_resetns: + if (old_net != -1) + setns(old_net, CLONE_NEWNET); + if (netns != -1) + close(netns); +} + +static void run_tests(int netns) +{ + struct test { + const char *test_name; + void (*test_func)(int netns, int prog1, int prog2); + } tests[] = { + { "prog attach, prog attach", + test_prog_attach_prog_attach }, + { "link create, link create", + test_link_create_link_create }, + { "prog attach, link create", + test_prog_attach_link_create }, + { "link create, prog attach", + test_link_create_prog_attach }, + { "link create, prog detach", + test_link_create_prog_detach }, + { "prog attach, detach, query", + test_prog_attach_detach_query }, + { "link create, close, query", + test_link_create_close_query }, + { "link update no old prog", + test_link_update_no_old_prog }, + { "link update with replace old prog", + test_link_update_replace_old_prog }, + { "link update invalid opts", + test_link_update_invalid_opts }, + { "link update invalid prog", + test_link_update_invalid_prog }, + { "link update netns gone", + test_link_update_netns_gone }, + { "link get info", + test_link_get_info }, + }; + int i, progs[2] = { -1, -1 }; + char test_name[80]; + + for (i = 0; i < ARRAY_SIZE(progs); i++) { + progs[i] = load_prog(BPF_PROG_TYPE_FLOW_DISSECTOR); + if (progs[i] < 0) + goto out_close; + } + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + snprintf(test_name, sizeof(test_name), + "flow dissector %s%s", + tests[i].test_name, + netns == init_net ? " (init_net)" : ""); + if (test__start_subtest(test_name)) + tests[i].test_func(netns, progs[0], progs[1]); + } +out_close: + for (i = 0; i < ARRAY_SIZE(progs); i++) { + if (progs[i] != -1) + CHECK_FAIL(close(progs[i])); + } +} + +void test_flow_dissector_reattach(void) +{ + int err, new_net, saved_net; + + saved_net = open("/proc/self/ns/net", O_RDONLY); + if (CHECK_FAIL(saved_net < 0)) { + perror("open(/proc/self/ns/net"); + return; + } + + init_net = open("/proc/1/ns/net", O_RDONLY); + if (CHECK_FAIL(init_net < 0)) { + perror("open(/proc/1/ns/net)"); + goto out_close; + } + + err = setns(init_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("setns(/proc/1/ns/net)"); + goto out_close; + } + + if (prog_is_attached(init_net)) { + test__skip(); + printf("Can't test with flow dissector attached to init_net\n"); + goto out_setns; + } + + /* First run tests in root network namespace */ + run_tests(init_net); + + /* Then repeat tests in a non-root namespace */ + new_net = unshare_net(init_net); + if (new_net < 0) + goto out_setns; + run_tests(new_net); + close(new_net); + +out_setns: + /* Move back to netns we started in. */ + err = setns(saved_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) + perror("setns(/proc/self/ns/net)"); + +out_close: + close(init_net); + close(saved_net); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index eba9a970703b..925722217edf 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -82,6 +82,7 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) void test_get_stack_raw_tp(void) { const char *file = "./test_get_stack_rawtp.o"; + const char *file_err = "./test_get_stack_rawtp_err.o"; const char *prog_name = "raw_tracepoint/sys_enter"; int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; struct perf_buffer_opts pb_opts = {}; @@ -93,6 +94,10 @@ void test_get_stack_raw_tp(void) struct bpf_map *map; cpu_set_t cpu_set; + err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index c680926fce73..e3cb62b0a110 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> static void test_global_data_number(struct bpf_object *obj, __u32 duration) { diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c index eaf64595be88..c2d373e294bb 100644 --- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> static void test_l4lb(const char *file) { diff --git a/tools/testing/selftests/bpf/prog_tests/link_pinning.c b/tools/testing/selftests/bpf/prog_tests/link_pinning.c new file mode 100644 index 000000000000..a743288cf384 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/link_pinning.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <sys/stat.h> + +#include "test_link_pinning.skel.h" + +static int duration = 0; + +void test_link_pinning_subtest(struct bpf_program *prog, + struct test_link_pinning__bss *bss) +{ + const char *link_pin_path = "/sys/fs/bpf/pinned_link_test"; + struct stat statbuf = {}; + struct bpf_link *link; + int err, i; + + link = bpf_program__attach(prog); + if (CHECK(IS_ERR(link), "link_attach", "err: %ld\n", PTR_ERR(link))) + goto cleanup; + + bss->in = 1; + usleep(1); + CHECK(bss->out != 1, "res_check1", "exp %d, got %d\n", 1, bss->out); + + /* pin link */ + err = bpf_link__pin(link, link_pin_path); + if (CHECK(err, "link_pin", "err: %d\n", err)) + goto cleanup; + + CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path1", + "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link)); + + /* check that link was pinned */ + err = stat(link_pin_path, &statbuf); + if (CHECK(err, "stat_link", "err %d errno %d\n", err, errno)) + goto cleanup; + + bss->in = 2; + usleep(1); + CHECK(bss->out != 2, "res_check2", "exp %d, got %d\n", 2, bss->out); + + /* destroy link, pinned link should keep program attached */ + bpf_link__destroy(link); + link = NULL; + + bss->in = 3; + usleep(1); + CHECK(bss->out != 3, "res_check3", "exp %d, got %d\n", 3, bss->out); + + /* re-open link from BPFFS */ + link = bpf_link__open(link_pin_path); + if (CHECK(IS_ERR(link), "link_open", "err: %ld\n", PTR_ERR(link))) + goto cleanup; + + CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path2", + "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link)); + + /* unpin link from BPFFS, program still attached */ + err = bpf_link__unpin(link); + if (CHECK(err, "link_unpin", "err: %d\n", err)) + goto cleanup; + + /* still active, as we have FD open now */ + bss->in = 4; + usleep(1); + CHECK(bss->out != 4, "res_check4", "exp %d, got %d\n", 4, bss->out); + + bpf_link__destroy(link); + link = NULL; + + /* Validate it's finally detached. + * Actual detachment might get delayed a bit, so there is no reliable + * way to validate it immediately here, let's count up for long enough + * and see if eventually output stops being updated + */ + for (i = 5; i < 10000; i++) { + bss->in = i; + usleep(1); + if (bss->out == i - 1) + break; + } + CHECK(i == 10000, "link_attached", "got to iteration #%d\n", i); + +cleanup: + if (!IS_ERR(link)) + bpf_link__destroy(link); +} + +void test_link_pinning(void) +{ + struct test_link_pinning* skel; + + skel = test_link_pinning__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + if (test__start_subtest("pin_raw_tp")) + test_link_pinning_subtest(skel->progs.raw_tp_prog, skel->bss); + if (test__start_subtest("pin_tp_btf")) + test_link_pinning_subtest(skel->progs.tp_btf_prog, skel->bss); + + test_link_pinning__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c index 8f91f1881d11..ce17b1ed8709 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -1,5 +1,19 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> + +static void *spin_lock_thread(void *arg) +{ + __u32 duration, retval; + int err, prog_fd = *(u32 *) arg; + + err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + pthread_exit(arg); +} static void *parallel_map_access(void *arg) { diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c new file mode 100644 index 000000000000..4972f92205c7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <test_progs.h> +#include <network_helpers.h> + +#include "map_ptr_kern.skel.h" + +void test_map_ptr(void) +{ + struct map_ptr_kern *skel; + __u32 duration = 0, retval; + char buf[128]; + int err; + int page_size = getpagesize(); + + skel = map_ptr_kern__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + err = bpf_map__set_max_entries(skel->maps.m_ringbuf, page_size); + if (!ASSERT_OK(err, "bpf_map__set_max_entries")) + goto cleanup; + + err = map_ptr_kern__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + skel->bss->page_size = page_size; + + err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4, + sizeof(pkt_v4), buf, NULL, &retval, NULL); + + if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno)) + goto cleanup; + + if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval, + skel->bss->g_map_type, skel->bss->g_line)) + goto cleanup; + +cleanup: + map_ptr_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c new file mode 100644 index 000000000000..97fec70c600b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2020 Google LLC. + */ + +#include <test_progs.h> +#include "modify_return.skel.h" + +#define LOWER(x) ((x) & 0xffff) +#define UPPER(x) ((x) >> 16) + + +static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret) +{ + struct modify_return *skel = NULL; + int err, prog_fd; + __u32 duration = 0, retval; + __u16 side_effect; + __s16 ret; + + skel = modify_return__open_and_load(); + if (CHECK(!skel, "skel_load", "modify_return skeleton failed\n")) + goto cleanup; + + err = modify_return__attach(skel); + if (CHECK(err, "modify_return", "attach failed: %d\n", err)) + goto cleanup; + + skel->bss->input_retval = input_retval; + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, 0, + &retval, &duration); + + CHECK(err, "test_run", "err %d errno %d\n", err, errno); + + side_effect = UPPER(retval); + ret = LOWER(retval); + + CHECK(ret != want_ret, "test_run", + "unexpected ret: %d, expected: %d\n", ret, want_ret); + CHECK(side_effect != want_side_effect, "modify_return", + "unexpected side_effect: %d\n", side_effect); + + CHECK(skel->bss->fentry_result != 1, "modify_return", + "fentry failed\n"); + CHECK(skel->bss->fexit_result != 1, "modify_return", + "fexit failed\n"); + CHECK(skel->bss->fmod_ret_result != 1, "modify_return", + "fmod_ret failed\n"); + +cleanup: + modify_return__destroy(skel); +} + +void test_modify_return(void) +{ + run_test(0 /* input_retval */, + 1 /* want_side_effect */, + 4 /* want_ret */); + run_test(-EINVAL /* input_retval */, + 0 /* want_side_effect */, + -EINVAL /* want_ret */); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index cf6c87936c69..ff6ae3619d2f 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -4,6 +4,7 @@ #include <sched.h> #include <sys/socket.h> #include <test_progs.h> +#include "test_perf_buffer.skel.h" #include "libbpf_internal.h" static void on_sample(void *ctx, int cpu, void *data, __u32 size) @@ -20,16 +21,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_perf_buffer(void) { - int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; - const char *prog_name = "kprobe/sys_nanosleep"; - const char *file = "./test_perf_buffer.o"; + int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; struct perf_buffer_opts pb_opts = {}; - struct bpf_map *perf_buf_map; + struct test_perf_buffer *skel; cpu_set_t cpu_set, cpu_seen; - struct bpf_program *prog; - struct bpf_object *obj; struct perf_buffer *pb; - struct bpf_link *link; bool *online; nr_cpus = libbpf_num_possible_cpus(); @@ -46,33 +42,21 @@ void test_perf_buffer(void) nr_on_cpus++; /* load program */ - err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); - if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { - obj = NULL; - goto out_close; - } - - prog = bpf_object__find_program_by_title(obj, prog_name); - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) + skel = test_perf_buffer__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) goto out_close; - /* load map */ - perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); - if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) - goto out_close; - - /* attach kprobe */ - link = bpf_program__attach_kprobe(prog, false /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); - if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) + /* attach probe */ + err = test_perf_buffer__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) goto out_close; /* set up perf buffer */ pb_opts.sample_cb = on_sample; pb_opts.ctx = &cpu_seen; - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts); if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) - goto out_detach; + goto out_close; /* trigger kprobe on every CPU */ CPU_ZERO(&cpu_seen); @@ -89,7 +73,7 @@ void test_perf_buffer(void) &cpu_set); if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", i, err)) - goto out_detach; + goto out_close; usleep(1); } @@ -105,9 +89,7 @@ void test_perf_buffer(void) out_free_pb: perf_buffer__free(pb); -out_detach: - bpf_link__destroy(link); out_close: - bpf_object__close(obj); + test_perf_buffer__destroy(skel); free(online); } diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c index a2537dfa899c..44b514fabccd 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_pkt_access(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c index 5f7aea605019..939015cd6dba 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_pkt_md_access(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c index 5dd89b941f53..dde2b7ae7bc9 100644 --- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_prog_run_xattr(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index faccc66f4e39..f47e7b1cb32c 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> enum { QUEUE, diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c new file mode 100644 index 000000000000..1a48c6f7f54e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <linux/compiler.h> +#include <asm/barrier.h> +#include <test_progs.h> +#include <sys/mman.h> +#include <sys/epoll.h> +#include <time.h> +#include <sched.h> +#include <signal.h> +#include <pthread.h> +#include <sys/sysinfo.h> +#include <linux/perf_event.h> +#include <linux/ring_buffer.h> +#include "test_ringbuf.skel.h" + +#define EDONE 7777 + +static int duration = 0; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +static int sample_cnt; + +static void atomic_inc(int *cnt) +{ + __atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST); +} + +static int atomic_xchg(int *cnt, int val) +{ + return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST); +} + +static int process_sample(void *ctx, void *data, size_t len) +{ + struct sample *s = data; + + atomic_inc(&sample_cnt); + + switch (s->seq) { + case 0: + CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n", + 333L, s->value); + return 0; + case 1: + CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n", + 777L, s->value); + return -EDONE; + default: + /* we don't care about the rest */ + return 0; + } +} + +static struct test_ringbuf *skel; +static struct ring_buffer *ringbuf; + +static void trigger_samples() +{ + skel->bss->dropped = 0; + skel->bss->total = 0; + skel->bss->discarded = 0; + + /* trigger exactly two samples */ + skel->bss->value = 333; + syscall(__NR_getpgid); + skel->bss->value = 777; + syscall(__NR_getpgid); +} + +static void *poll_thread(void *input) +{ + long timeout = (long)input; + + return (void *)(long)ring_buffer__poll(ringbuf, timeout); +} + +void test_ringbuf(void) +{ + const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample); + pthread_t thread; + long bg_ret = -1; + int err, cnt; + + skel = test_ringbuf__open_and_load(); + if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n")) + return; + + /* only trigger BPF program for current process */ + skel->bss->pid = getpid(); + + ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), + process_sample, NULL, NULL); + if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) + goto cleanup; + + err = test_ringbuf__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err)) + goto cleanup; + + trigger_samples(); + + /* 2 submitted + 1 discarded records */ + CHECK(skel->bss->avail_data != 3 * rec_sz, + "err_avail_size", "exp %ld, got %ld\n", + 3L * rec_sz, skel->bss->avail_data); + CHECK(skel->bss->ring_size != 4096, + "err_ring_size", "exp %ld, got %ld\n", + 4096L, skel->bss->ring_size); + CHECK(skel->bss->cons_pos != 0, + "err_cons_pos", "exp %ld, got %ld\n", + 0L, skel->bss->cons_pos); + CHECK(skel->bss->prod_pos != 3 * rec_sz, + "err_prod_pos", "exp %ld, got %ld\n", + 3L * rec_sz, skel->bss->prod_pos); + + /* poll for samples */ + err = ring_buffer__poll(ringbuf, -1); + + /* -EDONE is used as an indicator that we are done */ + if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err)) + goto cleanup; + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt); + + /* we expect extra polling to return nothing */ + err = ring_buffer__poll(ringbuf, 0); + if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err)) + goto cleanup; + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); + + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", + 1L, skel->bss->discarded); + + /* now validate consumer position is updated and returned */ + trigger_samples(); + CHECK(skel->bss->cons_pos != 3 * rec_sz, + "err_cons_pos", "exp %ld, got %ld\n", + 3L * rec_sz, skel->bss->cons_pos); + err = ring_buffer__poll(ringbuf, -1); + CHECK(err <= 0, "poll_err", "err %d\n", err); + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt); + + /* start poll in background w/ long timeout */ + err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000); + if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err)) + goto cleanup; + + /* turn off notifications now */ + skel->bss->flags = BPF_RB_NO_WAKEUP; + + /* give background thread a bit of a time */ + usleep(50000); + trigger_samples(); + /* sleeping arbitrarily is bad, but no better way to know that + * epoll_wait() **DID NOT** unblock in background thread + */ + usleep(50000); + /* background poll should still be blocked */ + err = pthread_tryjoin_np(thread, (void **)&bg_ret); + if (CHECK(err != EBUSY, "try_join", "err %d\n", err)) + goto cleanup; + + /* BPF side did everything right */ + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", + 1L, skel->bss->discarded); + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); + + /* clear flags to return to "adaptive" notification mode */ + skel->bss->flags = 0; + + /* produce new samples, no notification should be triggered, because + * consumer is now behind + */ + trigger_samples(); + + /* background poll should still be blocked */ + err = pthread_tryjoin_np(thread, (void **)&bg_ret); + if (CHECK(err != EBUSY, "try_join", "err %d\n", err)) + goto cleanup; + + /* still no samples, because consumer is behind */ + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); + + skel->bss->dropped = 0; + skel->bss->total = 0; + skel->bss->discarded = 0; + + skel->bss->value = 333; + syscall(__NR_getpgid); + /* now force notifications */ + skel->bss->flags = BPF_RB_FORCE_WAKEUP; + skel->bss->value = 777; + syscall(__NR_getpgid); + + /* now we should get a pending notification */ + usleep(50000); + err = pthread_tryjoin_np(thread, (void **)&bg_ret); + if (CHECK(err, "join_bg", "err %d\n", err)) + goto cleanup; + + if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret)) + goto cleanup; + + /* 3 rounds, 2 samples each */ + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt); + + /* BPF side did everything right */ + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", + 1L, skel->bss->discarded); + + test_ringbuf__detach(skel); +cleanup: + ring_buffer__free(ringbuf); + test_ringbuf__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c new file mode 100644 index 000000000000..d37161e59bb2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <test_progs.h> +#include <sys/epoll.h> +#include "test_ringbuf_multi.skel.h" + +static int duration = 0; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +static int process_sample(void *ctx, void *data, size_t len) +{ + int ring = (unsigned long)ctx; + struct sample *s = data; + + switch (s->seq) { + case 0: + CHECK(ring != 1, "sample1_ring", "exp %d, got %d\n", 1, ring); + CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n", + 333L, s->value); + break; + case 1: + CHECK(ring != 2, "sample2_ring", "exp %d, got %d\n", 2, ring); + CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n", + 777L, s->value); + break; + default: + CHECK(true, "extra_sample", "unexpected sample seq %d, val %ld\n", + s->seq, s->value); + return -1; + } + + return 0; +} + +void test_ringbuf_multi(void) +{ + struct test_ringbuf_multi *skel; + struct ring_buffer *ringbuf; + int err; + + skel = test_ringbuf_multi__open_and_load(); + if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n")) + return; + + /* only trigger BPF program for current process */ + skel->bss->pid = getpid(); + + ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1), + process_sample, (void *)(long)1, NULL); + if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) + goto cleanup; + + err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2), + process_sample, (void *)(long)2); + if (CHECK(err, "ringbuf_add", "failed to add another ring\n")) + goto cleanup; + + err = test_ringbuf_multi__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err)) + goto cleanup; + + /* trigger few samples, some will be skipped */ + skel->bss->target_ring = 0; + skel->bss->value = 333; + syscall(__NR_getpgid); + + /* skipped, no ringbuf in slot 1 */ + skel->bss->target_ring = 1; + skel->bss->value = 555; + syscall(__NR_getpgid); + + skel->bss->target_ring = 2; + skel->bss->value = 777; + syscall(__NR_getpgid); + + /* poll for samples, should get 2 ringbufs back */ + err = ring_buffer__poll(ringbuf, -1); + if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err)) + goto cleanup; + + /* expect extra polling to return nothing */ + err = ring_buffer__poll(ringbuf, 0); + if (CHECK(err < 0, "extra_samples", "poll result: %d\n", err)) + goto cleanup; + + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->skipped != 1, "err_skipped", "exp %ld, got %ld\n", + 1L, skel->bss->skipped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + +cleanup: + ring_buffer__free(ringbuf); + test_ringbuf_multi__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index b607112c64e7..da7318501e61 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -142,7 +142,7 @@ prog_load_failure: static void test_send_signal_tracepoint(void) { - const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id"; + const char *id_path = "/sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/id"; struct perf_event_attr attr = { .type = PERF_TYPE_TRACEPOINT, .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN, @@ -155,14 +155,14 @@ static void test_send_signal_tracepoint(void) efd = open(id_path, O_RDONLY, 0); if (CHECK(efd < 0, "tracepoint", - "open syscalls/sys_enter_nanosleep/id failure: %s\n", + "open raw_syscalls/sys_enter/id failure: %s\n", strerror(errno))) return; bytes = read(efd, buf, sizeof(buf)); close(efd); if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint", - "read syscalls/sys_enter_nanosleep/id failure: %s\n", + "read raw_syscalls/sys_enter/id failure: %s\n", strerror(errno))) return; diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c index 996e808f43a2..dfcbddcbe4d3 100644 --- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> static void sigalrm_handler(int s) {} static struct sigaction sigalrm_action = { diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c new file mode 100644 index 000000000000..d572e1a2c297 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook +// Copyright (c) 2019 Cloudflare +// Copyright (c) 2020 Isovalent, Inc. +/* + * Test that the socket assign program is able to redirect traffic towards a + * socket, regardless of whether the port or address destination of the traffic + * matches the port. + */ + +#define _GNU_SOURCE +#include <fcntl.h> +#include <signal.h> +#include <stdlib.h> +#include <unistd.h> + +#include "test_progs.h" + +#define BIND_PORT 1234 +#define CONNECT_PORT 4321 +#define TEST_DADDR (0xC0A80203) +#define NS_SELF "/proc/self/ns/net" + +static const struct timeval timeo_sec = { .tv_sec = 3 }; +static const size_t timeo_optlen = sizeof(timeo_sec); +static int stop, duration; + +static bool +configure_stack(void) +{ + char tc_cmd[BUFSIZ]; + + /* Move to a new networking namespace */ + if (CHECK_FAIL(unshare(CLONE_NEWNET))) + return false; + + /* Configure necessary links, routes */ + if (CHECK_FAIL(system("ip link set dev lo up"))) + return false; + if (CHECK_FAIL(system("ip route add local default dev lo"))) + return false; + if (CHECK_FAIL(system("ip -6 route add local default dev lo"))) + return false; + + /* Load qdisc, BPF program */ + if (CHECK_FAIL(system("tc qdisc add dev lo clsact"))) + return false; + sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf", + "direct-action object-file ./test_sk_assign.o", + "section classifier/sk_assign_test", + (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : ""); + if (CHECK(system(tc_cmd), "BPF load failed;", + "run with -vv for more info\n")) + return false; + + return true; +} + +static int +start_server(const struct sockaddr *addr, socklen_t len, int type) +{ + int fd; + + fd = socket(addr->sa_family, type, 0); + if (CHECK_FAIL(fd == -1)) + goto out; + if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, + timeo_optlen))) + goto close_out; + if (CHECK_FAIL(bind(fd, addr, len) == -1)) + goto close_out; + if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1)) + goto close_out; + + goto out; +close_out: + close(fd); + fd = -1; +out: + return fd; +} + +static int +connect_to_server(const struct sockaddr *addr, socklen_t len, int type) +{ + int fd = -1; + + fd = socket(addr->sa_family, type, 0); + if (CHECK_FAIL(fd == -1)) + goto out; + if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec, + timeo_optlen))) + goto close_out; + if (CHECK_FAIL(connect(fd, addr, len))) + goto close_out; + + goto out; +close_out: + close(fd); + fd = -1; +out: + return fd; +} + +static in_port_t +get_port(int fd) +{ + struct sockaddr_storage ss; + socklen_t slen = sizeof(ss); + in_port_t port = 0; + + if (CHECK_FAIL(getsockname(fd, (struct sockaddr *)&ss, &slen))) + return port; + + switch (ss.ss_family) { + case AF_INET: + port = ((struct sockaddr_in *)&ss)->sin_port; + break; + case AF_INET6: + port = ((struct sockaddr_in6 *)&ss)->sin6_port; + break; + default: + CHECK(1, "Invalid address family", "%d\n", ss.ss_family); + } + return port; +} + +static ssize_t +rcv_msg(int srv_client, int type) +{ + struct sockaddr_storage ss; + char buf[BUFSIZ]; + socklen_t slen; + + if (type == SOCK_STREAM) + return read(srv_client, &buf, sizeof(buf)); + else + return recvfrom(srv_client, &buf, sizeof(buf), 0, + (struct sockaddr *)&ss, &slen); +} + +static int +run_test(int server_fd, const struct sockaddr *addr, socklen_t len, int type) +{ + int client = -1, srv_client = -1; + char buf[] = "testing"; + in_port_t port; + int ret = 1; + + client = connect_to_server(addr, len, type); + if (client == -1) { + perror("Cannot connect to server"); + goto out; + } + + if (type == SOCK_STREAM) { + srv_client = accept(server_fd, NULL, NULL); + if (CHECK_FAIL(srv_client == -1)) { + perror("Can't accept connection"); + goto out; + } + } else { + srv_client = server_fd; + } + if (CHECK_FAIL(write(client, buf, sizeof(buf)) != sizeof(buf))) { + perror("Can't write on client"); + goto out; + } + if (CHECK_FAIL(rcv_msg(srv_client, type) != sizeof(buf))) { + perror("Can't read on server"); + goto out; + } + + port = get_port(srv_client); + if (CHECK_FAIL(!port)) + goto out; + /* SOCK_STREAM is connected via accept(), so the server's local address + * will be the CONNECT_PORT rather than the BIND port that corresponds + * to the listen socket. SOCK_DGRAM on the other hand is connectionless + * so we can't really do the same check there; the server doesn't ever + * create a socket with CONNECT_PORT. + */ + if (type == SOCK_STREAM && + CHECK(port != htons(CONNECT_PORT), "Expected", "port %u but got %u", + CONNECT_PORT, ntohs(port))) + goto out; + else if (type == SOCK_DGRAM && + CHECK(port != htons(BIND_PORT), "Expected", + "port %u but got %u", BIND_PORT, ntohs(port))) + goto out; + + ret = 0; +out: + close(client); + if (srv_client != server_fd) + close(srv_client); + if (ret) + WRITE_ONCE(stop, 1); + return ret; +} + +static void +prepare_addr(struct sockaddr *addr, int family, __u16 port, bool rewrite_addr) +{ + struct sockaddr_in *addr4; + struct sockaddr_in6 *addr6; + + switch (family) { + case AF_INET: + addr4 = (struct sockaddr_in *)addr; + memset(addr4, 0, sizeof(*addr4)); + addr4->sin_family = family; + addr4->sin_port = htons(port); + if (rewrite_addr) + addr4->sin_addr.s_addr = htonl(TEST_DADDR); + else + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + break; + case AF_INET6: + addr6 = (struct sockaddr_in6 *)addr; + memset(addr6, 0, sizeof(*addr6)); + addr6->sin6_family = family; + addr6->sin6_port = htons(port); + addr6->sin6_addr = in6addr_loopback; + if (rewrite_addr) + addr6->sin6_addr.s6_addr32[3] = htonl(TEST_DADDR); + break; + default: + fprintf(stderr, "Invalid family %d", family); + } +} + +struct test_sk_cfg { + const char *name; + int family; + struct sockaddr *addr; + socklen_t len; + int type; + bool rewrite_addr; +}; + +#define TEST(NAME, FAMILY, TYPE, REWRITE) \ +{ \ + .name = NAME, \ + .family = FAMILY, \ + .addr = (FAMILY == AF_INET) ? (struct sockaddr *)&addr4 \ + : (struct sockaddr *)&addr6, \ + .len = (FAMILY == AF_INET) ? sizeof(addr4) : sizeof(addr6), \ + .type = TYPE, \ + .rewrite_addr = REWRITE, \ +} + +void test_sk_assign(void) +{ + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + struct test_sk_cfg tests[] = { + TEST("ipv4 tcp port redir", AF_INET, SOCK_STREAM, false), + TEST("ipv4 tcp addr redir", AF_INET, SOCK_STREAM, true), + TEST("ipv6 tcp port redir", AF_INET6, SOCK_STREAM, false), + TEST("ipv6 tcp addr redir", AF_INET6, SOCK_STREAM, true), + TEST("ipv4 udp port redir", AF_INET, SOCK_DGRAM, false), + TEST("ipv4 udp addr redir", AF_INET, SOCK_DGRAM, true), + TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false), + TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true), + }; + int server = -1; + int self_net; + + self_net = open(NS_SELF, O_RDONLY); + if (CHECK_FAIL(self_net < 0)) { + perror("Unable to open "NS_SELF); + return; + } + + if (!configure_stack()) { + perror("configure_stack"); + goto cleanup; + } + + for (int i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) { + struct test_sk_cfg *test = &tests[i]; + const struct sockaddr *addr; + + if (!test__start_subtest(test->name)) + continue; + prepare_addr(test->addr, test->family, BIND_PORT, false); + addr = (const struct sockaddr *)test->addr; + server = start_server(addr, test->len, test->type); + if (server == -1) + goto cleanup; + + /* connect to unbound ports */ + prepare_addr(test->addr, test->family, CONNECT_PORT, + test->rewrite_addr); + if (run_test(server, addr, test->len, test->type)) + goto close; + + close(server); + server = -1; + } + +close: + close(server); +cleanup: + if (CHECK_FAIL(setns(self_net, CLONE_NEWNET))) + perror("Failed to setns("NS_SELF")"); + close(self_net); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c new file mode 100644 index 000000000000..f1784ae4565a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -0,0 +1,1282 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare +/* + * Test BPF attach point for INET socket lookup (BPF_SK_LOOKUP). + * + * Tests exercise: + * - attaching/detaching/querying programs to BPF_SK_LOOKUP hook, + * - redirecting socket lookup to a socket selected by BPF program, + * - failing a socket lookup on BPF program's request, + * - error scenarios for selecting a socket from BPF program, + * - accessing BPF program context, + * - attaching and running multiple BPF programs. + * + * Tests run in a dedicated network namespace. + */ + +#define _GNU_SOURCE +#include <arpa/inet.h> +#include <assert.h> +#include <errno.h> +#include <error.h> +#include <fcntl.h> +#include <sched.h> +#include <stdio.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <bpf/libbpf.h> +#include <bpf/bpf.h> + +#include "test_progs.h" +#include "bpf_rlimit.h" +#include "bpf_util.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "test_sk_lookup.skel.h" + +/* External (address, port) pairs the client sends packets to. */ +#define EXT_IP4 "127.0.0.1" +#define EXT_IP6 "fd00::1" +#define EXT_PORT 7007 + +/* Internal (address, port) pairs the server listens/receives at. */ +#define INT_IP4 "127.0.0.2" +#define INT_IP4_V6 "::ffff:127.0.0.2" +#define INT_IP6 "fd00::2" +#define INT_PORT 8008 + +#define IO_TIMEOUT_SEC 3 + +enum server { + SERVER_A = 0, + SERVER_B = 1, + MAX_SERVERS, +}; + +enum { + PROG1 = 0, + PROG2, +}; + +struct inet_addr { + const char *ip; + unsigned short port; +}; + +struct test { + const char *desc; + struct bpf_program *lookup_prog; + struct bpf_program *reuseport_prog; + struct bpf_map *sock_map; + int sotype; + struct inet_addr connect_to; + struct inet_addr listen_at; + enum server accept_on; +}; + +static __u32 duration; /* for CHECK macro */ + +static bool is_ipv6(const char *ip) +{ + return !!strchr(ip, ':'); +} + +static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog) +{ + int err, prog_fd; + + prog_fd = bpf_program__fd(reuseport_prog); + if (prog_fd < 0) { + errno = -prog_fd; + return -1; + } + + err = setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, + &prog_fd, sizeof(prog_fd)); + if (err) + return -1; + + return 0; +} + +static socklen_t inetaddr_len(const struct sockaddr_storage *addr) +{ + return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) : + addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0); +} + +static int make_socket(int sotype, const char *ip, int port, + struct sockaddr_storage *addr) +{ + struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC }; + int err, family, fd; + + family = is_ipv6(ip) ? AF_INET6 : AF_INET; + err = make_sockaddr(family, ip, port, addr, NULL); + if (CHECK(err, "make_address", "failed\n")) + return -1; + + fd = socket(addr->ss_family, sotype, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to make socket"); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) { + log_err("failed to set SNDTIMEO"); + close(fd); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) { + log_err("failed to set RCVTIMEO"); + close(fd); + return -1; + } + + return fd; +} + +static int make_server(int sotype, const char *ip, int port, + struct bpf_program *reuseport_prog) +{ + struct sockaddr_storage addr = {0}; + const int one = 1; + int err, fd = -1; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + /* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */ + if (sotype == SOCK_DGRAM) { + err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IP_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) { + err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IPV6_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_STREAM) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) { + log_err("failed to enable SO_REUSEADDR"); + goto fail; + } + } + + if (reuseport_prog) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) { + log_err("failed to enable SO_REUSEPORT"); + goto fail; + } + } + + err = bind(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "bind", "failed\n")) { + log_err("failed to bind listen socket"); + goto fail; + } + + if (sotype == SOCK_STREAM) { + err = listen(fd, SOMAXCONN); + if (CHECK(err, "make_server", "listen")) { + log_err("failed to listen on port %d", port); + goto fail; + } + } + + /* Late attach reuseport prog so we can have one init path */ + if (reuseport_prog) { + err = attach_reuseport(fd, reuseport_prog); + if (CHECK(err, "attach_reuseport", "failed\n")) { + log_err("failed to attach reuseport prog"); + goto fail; + } + } + + return fd; +fail: + close(fd); + return -1; +} + +static int make_client(int sotype, const char *ip, int port) +{ + struct sockaddr_storage addr = {0}; + int err, fd; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + err = connect(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "make_client", "connect")) { + log_err("failed to connect client socket"); + goto fail; + } + + return fd; +fail: + close(fd); + return -1; +} + +static int send_byte(int fd) +{ + ssize_t n; + + errno = 0; + n = send(fd, "a", 1, 0); + if (CHECK(n <= 0, "send_byte", "send")) { + log_err("failed/partial send"); + return -1; + } + return 0; +} + +static int recv_byte(int fd) +{ + char buf[1]; + ssize_t n; + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv_byte", "recv")) { + log_err("failed/partial recv"); + return -1; + } + return 0; +} + +static int tcp_recv_send(int server_fd) +{ + char buf[1]; + int ret, fd; + ssize_t n; + + fd = accept(server_fd, NULL, NULL); + if (CHECK(fd < 0, "accept", "failed\n")) { + log_err("failed to accept"); + return -1; + } + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv", "failed\n")) { + log_err("failed/partial recv"); + ret = -1; + goto close; + } + + n = send(fd, buf, n, 0); + if (CHECK(n <= 0, "send", "failed\n")) { + log_err("failed/partial send"); + ret = -1; + goto close; + } + + ret = 0; +close: + close(fd); + return ret; +} + +static void v4_to_v6(struct sockaddr_storage *ss) +{ + struct sockaddr_in6 *v6 = (struct sockaddr_in6 *)ss; + struct sockaddr_in v4 = *(struct sockaddr_in *)ss; + + v6->sin6_family = AF_INET6; + v6->sin6_port = v4.sin_port; + v6->sin6_addr.s6_addr[10] = 0xff; + v6->sin6_addr.s6_addr[11] = 0xff; + memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4); +} + +static int udp_recv_send(int server_fd) +{ + char cmsg_buf[CMSG_SPACE(sizeof(struct sockaddr_storage))]; + struct sockaddr_storage _src_addr = { 0 }; + struct sockaddr_storage *src_addr = &_src_addr; + struct sockaddr_storage *dst_addr = NULL; + struct msghdr msg = { 0 }; + struct iovec iov = { 0 }; + struct cmsghdr *cm; + char buf[1]; + int ret, fd; + ssize_t n; + + iov.iov_base = buf; + iov.iov_len = sizeof(buf); + + msg.msg_name = src_addr; + msg.msg_namelen = sizeof(*src_addr); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = cmsg_buf; + msg.msg_controllen = sizeof(cmsg_buf); + + errno = 0; + n = recvmsg(server_fd, &msg, 0); + if (CHECK(n <= 0, "recvmsg", "failed\n")) { + log_err("failed to receive"); + return -1; + } + if (CHECK(msg.msg_flags & MSG_CTRUNC, "recvmsg", "truncated cmsg\n")) + return -1; + + for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { + if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_ORIGDSTADDR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_ORIGDSTADDR)) { + dst_addr = (struct sockaddr_storage *)CMSG_DATA(cm); + break; + } + log_err("warning: ignored cmsg at level %d type %d", + cm->cmsg_level, cm->cmsg_type); + } + if (CHECK(!dst_addr, "recvmsg", "missing ORIGDSTADDR\n")) + return -1; + + /* Server socket bound to IPv4-mapped IPv6 address */ + if (src_addr->ss_family == AF_INET6 && + dst_addr->ss_family == AF_INET) { + v4_to_v6(dst_addr); + } + + /* Reply from original destination address. */ + fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to create tx socket"); + return -1; + } + + ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr)); + if (CHECK(ret, "bind", "failed\n")) { + log_err("failed to bind tx socket"); + goto out; + } + + msg.msg_control = NULL; + msg.msg_controllen = 0; + n = sendmsg(fd, &msg, 0); + if (CHECK(n <= 0, "sendmsg", "failed\n")) { + log_err("failed to send echo reply"); + ret = -1; + goto out; + } + + ret = 0; +out: + close(fd); + return ret; +} + +static int tcp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = tcp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static int udp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = udp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static struct bpf_link *attach_lookup_prog(struct bpf_program *prog) +{ + struct bpf_link *link; + int net_fd; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return NULL; + } + + link = bpf_program__attach_netns(prog, net_fd); + if (CHECK(IS_ERR(link), "bpf_program__attach_netns", "failed\n")) { + errno = -PTR_ERR(link); + log_err("failed to attach program '%s' to netns", + bpf_program__name(prog)); + link = NULL; + } + + close(net_fd); + return link; +} + +static int update_lookup_map(struct bpf_map *map, int index, int sock_fd) +{ + int err, map_fd; + uint64_t value; + + map_fd = bpf_map__fd(map); + if (CHECK(map_fd < 0, "bpf_map__fd", "failed\n")) { + errno = -map_fd; + log_err("failed to get map FD"); + return -1; + } + + value = (uint64_t)sock_fd; + err = bpf_map_update_elem(map_fd, &index, &value, BPF_NOEXIST); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) { + log_err("failed to update redir_map @ %d", index); + return -1; + } + + return 0; +} + +static __u32 link_info_prog_id(struct bpf_link *link) +{ + struct bpf_link_info info = {}; + __u32 info_len = sizeof(info); + int link_fd, err; + + link_fd = bpf_link__fd(link); + if (CHECK(link_fd < 0, "bpf_link__fd", "failed\n")) { + errno = -link_fd; + log_err("bpf_link__fd failed"); + return 0; + } + + err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len); + if (CHECK(err, "bpf_obj_get_info_by_fd", "failed\n")) { + log_err("bpf_obj_get_info_by_fd"); + return 0; + } + if (CHECK(info_len != sizeof(info), "bpf_obj_get_info_by_fd", + "unexpected info len %u\n", info_len)) + return 0; + + return info.prog_id; +} + +static void query_lookup_prog(struct test_sk_lookup *skel) +{ + struct bpf_link *link[3] = {}; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + __u32 prog_id; + int net_fd; + int err; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return; + } + + link[0] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[0]) + goto close; + link[1] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[1]) + goto detach; + link[2] = attach_lookup_prog(skel->progs.lookup_drop); + if (!link[2]) + goto detach; + + err = bpf_prog_query(net_fd, BPF_SK_LOOKUP, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + if (CHECK(err, "bpf_prog_query", "failed\n")) { + log_err("failed to query lookup prog"); + goto detach; + } + + errno = 0; + if (CHECK(attach_flags != 0, "bpf_prog_query", + "wrong attach_flags on query: %u", attach_flags)) + goto detach; + if (CHECK(prog_cnt != 3, "bpf_prog_query", + "wrong program count on query: %u", prog_cnt)) + goto detach; + prog_id = link_info_prog_id(link[0]); + CHECK(prog_ids[0] != prog_id, "bpf_prog_query", + "invalid program #0 id on query: %u != %u\n", + prog_ids[0], prog_id); + prog_id = link_info_prog_id(link[1]); + CHECK(prog_ids[1] != prog_id, "bpf_prog_query", + "invalid program #1 id on query: %u != %u\n", + prog_ids[1], prog_id); + prog_id = link_info_prog_id(link[2]); + CHECK(prog_ids[2] != prog_id, "bpf_prog_query", + "invalid program #2 id on query: %u != %u\n", + prog_ids[2], prog_id); + +detach: + if (link[2]) + bpf_link__destroy(link[2]); + if (link[1]) + bpf_link__destroy(link[1]); + if (link[0]) + bpf_link__destroy(link[0]); +close: + close(net_fd); +} + +static void run_lookup_prog(const struct test *t) +{ + int client_fd, server_fds[MAX_SERVERS] = { -1 }; + struct bpf_link *lookup_link; + int i, err; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(t->sotype, t->listen_at.ip, + t->listen_at.port, + t->reuseport_prog); + if (server_fds[i] < 0) + goto close; + + err = update_lookup_map(t->sock_map, i, server_fds[i]); + if (err) + goto close; + + /* want just one server for non-reuseport test */ + if (!t->reuseport_prog) + break; + } + + client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port); + if (client_fd < 0) + goto close; + + if (t->sotype == SOCK_STREAM) + tcp_echo_test(client_fd, server_fds[t->accept_on]); + else + udp_echo_test(client_fd, server_fds[t->accept_on]); + + close(client_fd); +close: + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } + bpf_link__destroy(lookup_link); +} + +static void test_redirect_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "TCP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "TCP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4_V6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "UDP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .listen_at = { INT_IP4_V6, INT_PORT }, + .connect_to = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 redir and reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + run_lookup_prog(t); + } +} + +static void drop_on_lookup(const struct test *t) +{ + struct sockaddr_storage dst = {}; + int client_fd, server_fd, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server_fd < 0) + goto detach; + + client_fd = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client_fd < 0) + goto close_srv; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client_fd); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client_fd, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client_fd); +close_srv: + close(server_fd); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_lookup(t); + } +} + +static void drop_on_reuseport(const struct test *t) +{ + struct sockaddr_storage dst = { 0 }; + int client, server1, server2, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server1 = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server1 < 0) + goto detach; + + err = update_lookup_map(t->sock_map, SERVER_A, server1); + if (err) + goto detach; + + /* second server on destination address we should never reach */ + server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port, + NULL /* reuseport prog */); + if (server2 < 0) + goto close_srv1; + + client = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client < 0) + goto close_srv2; + + err = connect(client, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client); +close_srv2: + close(server2); +close_srv1: + close(server1); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_reuseport(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_reuseport(t); + } +} + +static void run_sk_assign(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog, + const char *listen_ip, const char *connect_ip) +{ + int client_fd, peer_fd, server_fds[MAX_SERVERS] = { -1 }; + struct bpf_link *lookup_link; + int i, err; + + lookup_link = attach_lookup_prog(lookup_prog); + if (!lookup_link) + return; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(SOCK_STREAM, listen_ip, 0, NULL); + if (server_fds[i] < 0) + goto close_servers; + + err = update_lookup_map(skel->maps.redir_map, i, + server_fds[i]); + if (err) + goto close_servers; + } + + client_fd = make_client(SOCK_STREAM, connect_ip, EXT_PORT); + if (client_fd < 0) + goto close_servers; + + peer_fd = accept(server_fds[SERVER_B], NULL, NULL); + if (CHECK(peer_fd < 0, "accept", "failed\n")) + goto close_client; + + close(peer_fd); +close_client: + close(client_fd); +close_servers: + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } + bpf_link__destroy(lookup_link); +} + +static void run_sk_assign_v4(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP4, EXT_IP4); +} + +static void run_sk_assign_v6(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP6, EXT_IP6); +} + +static void run_sk_assign_connected(struct test_sk_lookup *skel, + int sotype) +{ + int err, client_fd, connected_fd, server_fd; + struct bpf_link *lookup_link; + + server_fd = make_server(sotype, EXT_IP4, EXT_PORT, NULL); + if (server_fd < 0) + return; + + connected_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (connected_fd < 0) + goto out_close_server; + + /* Put a connected socket in redirect map */ + err = update_lookup_map(skel->maps.redir_map, SERVER_A, connected_fd); + if (err) + goto out_close_connected; + + lookup_link = attach_lookup_prog(skel->progs.sk_assign_esocknosupport); + if (!lookup_link) + goto out_close_connected; + + /* Try to redirect TCP SYN / UDP packet to a connected socket */ + client_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (client_fd < 0) + goto out_unlink_prog; + if (sotype == SOCK_DGRAM) { + send_byte(client_fd); + recv_byte(server_fd); + } + + close(client_fd); +out_unlink_prog: + bpf_link__destroy(lookup_link); +out_close_connected: + close(connected_fd); +out_close_server: + close(server_fd); +} + +static void test_sk_assign_helper(struct test_sk_lookup *skel) +{ + if (test__start_subtest("sk_assign returns EEXIST")) + run_sk_assign_v4(skel, skel->progs.sk_assign_eexist); + if (test__start_subtest("sk_assign honors F_REPLACE")) + run_sk_assign_v4(skel, skel->progs.sk_assign_replace_flag); + if (test__start_subtest("sk_assign accepts NULL socket")) + run_sk_assign_v4(skel, skel->progs.sk_assign_null); + if (test__start_subtest("access ctx->sk")) + run_sk_assign_v4(skel, skel->progs.access_ctx_sk); + if (test__start_subtest("narrow access to ctx v4")) + run_sk_assign_v4(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("narrow access to ctx v6")) + run_sk_assign_v6(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("sk_assign rejects TCP established")) + run_sk_assign_connected(skel, SOCK_STREAM); + if (test__start_subtest("sk_assign rejects UDP connected")) + run_sk_assign_connected(skel, SOCK_DGRAM); +} + +struct test_multi_prog { + const char *desc; + struct bpf_program *prog1; + struct bpf_program *prog2; + struct bpf_map *redir_map; + struct bpf_map *run_map; + int expect_errno; + struct inet_addr listen_at; +}; + +static void run_multi_prog_lookup(const struct test_multi_prog *t) +{ + struct sockaddr_storage dst = {}; + int map_fd, server_fd, client_fd; + struct bpf_link *link1, *link2; + int prog_idx, done, err; + + map_fd = bpf_map__fd(t->run_map); + + done = 0; + prog_idx = PROG1; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + prog_idx = PROG2; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + + link1 = attach_lookup_prog(t->prog1); + if (!link1) + return; + link2 = attach_lookup_prog(t->prog2); + if (!link2) + goto out_unlink1; + + server_fd = make_server(SOCK_STREAM, t->listen_at.ip, + t->listen_at.port, NULL); + if (server_fd < 0) + goto out_unlink2; + + err = update_lookup_map(t->redir_map, SERVER_A, server_fd); + if (err) + goto out_close_server; + + client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst); + if (client_fd < 0) + goto out_close_server; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (CHECK(err && !t->expect_errno, "connect", + "unexpected error %d\n", errno)) + goto out_close_client; + if (CHECK(err && t->expect_errno && errno != t->expect_errno, + "connect", "unexpected error %d\n", errno)) + goto out_close_client; + + done = 0; + prog_idx = PROG1; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG1 !done\n"); + + done = 0; + prog_idx = PROG2; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG2 !done\n"); + +out_close_client: + close(client_fd); +out_close_server: + close(server_fd); +out_unlink2: + bpf_link__destroy(link2); +out_unlink1: + bpf_link__destroy(link1); +} + +static void test_multi_prog_lookup(struct test_sk_lookup *skel) +{ + struct test_multi_prog tests[] = { + { + .desc = "multi prog - pass, pass", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "multi prog - drop, drop", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, drop", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - drop, pass", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, redir", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, pass", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - drop, redir", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, drop", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, redir", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + }; + struct test_multi_prog *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + t->redir_map = skel->maps.redir_map; + t->run_map = skel->maps.run_map; + if (test__start_subtest(t->desc)) + run_multi_prog_lookup(t); + } +} + +static void run_tests(struct test_sk_lookup *skel) +{ + if (test__start_subtest("query lookup prog")) + query_lookup_prog(skel); + test_redirect_lookup(skel); + test_drop_on_lookup(skel); + test_drop_on_reuseport(skel); + test_sk_assign_helper(skel); + test_multi_prog_lookup(skel); +} + +static int switch_netns(void) +{ + static const char * const setup_script[] = { + "ip -6 addr add dev lo " EXT_IP6 "/128 nodad", + "ip -6 addr add dev lo " INT_IP6 "/128 nodad", + "ip link set dev lo up", + NULL, + }; + const char * const *cmd; + int err; + + err = unshare(CLONE_NEWNET); + if (CHECK(err, "unshare", "failed\n")) { + log_err("unshare(CLONE_NEWNET)"); + return -1; + } + + for (cmd = setup_script; *cmd; cmd++) { + err = system(*cmd); + if (CHECK(err, "system", "failed\n")) { + log_err("system(%s)", *cmd); + return -1; + } + } + + return 0; +} + +void test_sk_lookup(void) +{ + struct test_sk_lookup *skel; + int err; + + err = switch_netns(); + if (err) + return; + + skel = test_sk_lookup__open_and_load(); + if (CHECK(!skel, "skel open_and_load", "failed\n")) + return; + + run_tests(skel); + + test_sk_lookup__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index e95baa32e277..e5874e97f235 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_skb_ctx(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index 29188d6f5c8d..51fac975b316 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -138,7 +138,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, */ buf = 0x40; - if (setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1) < 0) { + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { log_err("Failed to call setsockopt(IP_TOS)"); goto detach; } diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c index 1ae00cd3174e..7577a77a4c4c 100644 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -1,5 +1,19 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> + +static void *spin_lock_thread(void *arg) +{ + __u32 duration, retval; + int err, prog_fd = *(u32 *) arg; + + err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + pthread_exit(arg); +} void test_spinlock(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index 1735faf17536..437cb93e72ac 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -52,7 +52,7 @@ retry: if (pmu_fd < 0 && errno == ENOENT) { printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); test__skip(); - goto cleanup; + goto close_prog; } if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, errno)) diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c new file mode 100644 index 000000000000..bb8fe646dd9f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +/* test_tailcall_1 checks basic functionality by patching multiple locations + * in a single program for a single tail call slot with nop->jmp, jmp->nop + * and jmp->jmp rewrites. Also checks for nop->nop. + */ +static void test_tailcall_1(void) +{ + int err, map_fd, prog_fd, main_fd, i, j; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + char prog_name[32]; + char buff[128] = {}; + + err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != i, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + j = bpf_map__def(prog_array)->max_entries - 1 - i; + snprintf(prog_name, sizeof(prog_name), "classifier/%i", j); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + j = bpf_map__def(prog_array)->max_entries - 1 - i; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != j, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err >= 0 || errno != ENOENT)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } + +out: + bpf_object__close(obj); +} + +/* test_tailcall_2 checks that patching multiple programs for a single + * tail call slot works. It also jumps through several programs and tests + * the tail call limit counter. + */ +static void test_tailcall_2(void) +{ + int err, map_fd, prog_fd, main_fd, i; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + char prog_name[32]; + char buff[128] = {}; + + err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + i = 2; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); +out: + bpf_object__close(obj); +} + +/* test_tailcall_3 checks that the count value of the tail call limit + * enforcement matches with expectations. + */ +static void test_tailcall_3(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i, val; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + char buff[128] = {}; + + err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + prog = bpf_object__find_program_by_title(obj, "classifier/0"); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + return; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(map_fd < 0)) + return; + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n", + err, errno, val); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); +out: + bpf_object__close(obj); +} + +/* test_tailcall_4 checks that the kernel properly selects indirect jump + * for the case where the key is not known. Latter is passed via global + * data to select different targets we can compare return value of. + */ +static void test_tailcall_4(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + static const int zero = 0; + char buff[128] = {}; + char prog_name[32]; + + err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + return; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(map_fd < 0)) + return; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != i, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } +out: + bpf_object__close(obj); +} + +/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates + * an indirect jump when the keys are const but different from different branches. + */ +static void test_tailcall_5(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 }; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + static const int zero = 0; + char buff[128] = {}; + char prog_name[32]; + + err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + return; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(map_fd < 0)) + return; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != i, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } +out: + bpf_object__close(obj); +} + +void test_tailcalls(void) +{ + if (test__start_subtest("tailcall_1")) + test_tailcall_1(); + if (test__start_subtest("tailcall_2")) + test_tailcall_2(); + if (test__start_subtest("tailcall_3")) + test_tailcall_3(); + if (test__start_subtest("tailcall_4")) + test_tailcall_4(); + if (test__start_subtest("tailcall_5")) + test_tailcall_5(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index f4cd60d6fba2..d207e968e6b1 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include "cgroup_helpers.h" +#include "network_helpers.h" struct tcp_rtt_storage { __u32 invoked; @@ -87,34 +88,6 @@ static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked, return err; } -static int connect_to_server(int server_fd) -{ - struct sockaddr_storage addr; - socklen_t len = sizeof(addr); - int fd; - - fd = socket(AF_INET, SOCK_STREAM, 0); - if (fd < 0) { - log_err("Failed to create client socket"); - return -1; - } - - if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { - log_err("Failed to get server addr"); - goto out; - } - - if (connect(fd, (const struct sockaddr *)&addr, len) < 0) { - log_err("Fail to connect to server"); - goto out; - } - - return fd; - -out: - close(fd); - return -1; -} static int run_test(int cgroup_fd, int server_fd) { @@ -145,7 +118,7 @@ static int run_test(int cgroup_fd, int server_fd) goto close_bpf_object; } - client_fd = connect_to_server(server_fd); + client_fd = connect_to_fd(server_fd, 0); if (client_fd < 0) { err = -1; goto close_bpf_object; @@ -180,95 +153,22 @@ close_bpf_object: return err; } -static int start_server(void) -{ - struct sockaddr_in addr = { - .sin_family = AF_INET, - .sin_addr.s_addr = htonl(INADDR_LOOPBACK), - }; - int fd; - - fd = socket(AF_INET, SOCK_STREAM, 0); - if (fd < 0) { - log_err("Failed to create server socket"); - return -1; - } - - if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) < 0) { - log_err("Failed to bind socket"); - close(fd); - return -1; - } - - return fd; -} - -static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER; -static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER; - -static void *server_thread(void *arg) -{ - struct sockaddr_storage addr; - socklen_t len = sizeof(addr); - int fd = *(int *)arg; - int client_fd; - int err; - - err = listen(fd, 1); - - pthread_mutex_lock(&server_started_mtx); - pthread_cond_signal(&server_started); - pthread_mutex_unlock(&server_started_mtx); - - if (CHECK_FAIL(err < 0)) { - perror("Failed to listed on socket"); - return NULL; - } - - client_fd = accept(fd, (struct sockaddr *)&addr, &len); - if (CHECK_FAIL(client_fd < 0)) { - perror("Failed to accept client"); - return NULL; - } - - /* Wait for the next connection (that never arrives) - * to keep this thread alive to prevent calling - * close() on client_fd. - */ - if (CHECK_FAIL(accept(fd, (struct sockaddr *)&addr, &len) >= 0)) { - perror("Unexpected success in second accept"); - return NULL; - } - - close(client_fd); - - return NULL; -} - void test_tcp_rtt(void) { int server_fd, cgroup_fd; - pthread_t tid; cgroup_fd = test__join_cgroup("/tcp_rtt"); if (CHECK_FAIL(cgroup_fd < 0)) return; - server_fd = start_server(); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; - if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread, - (void *)&server_fd))) - goto close_server_fd; - - pthread_mutex_lock(&server_started_mtx); - pthread_cond_wait(&server_started, &server_started_mtx); - pthread_mutex_unlock(&server_started_mtx); - CHECK_FAIL(run_test(cgroup_fd, server_fd)); -close_server_fd: + close(server_fd); + close_cgroup_fd: close(cgroup_fd); } diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c new file mode 100644 index 000000000000..91cd6f357246 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ + +#include <test_progs.h> +#include <linux/limits.h> + +#include "local_storage.skel.h" +#include "network_helpers.h" + +int create_and_unlink_file(void) +{ + char fname[PATH_MAX] = "/tmp/fileXXXXXX"; + int fd; + + fd = mkstemp(fname); + if (fd < 0) + return fd; + + close(fd); + unlink(fname); + return 0; +} + +void test_test_local_storage(void) +{ + struct local_storage *skel = NULL; + int err, duration = 0, serv_sk = -1; + + skel = local_storage__open_and_load(); + if (CHECK(!skel, "skel_load", "lsm skeleton failed\n")) + goto close_prog; + + err = local_storage__attach(skel); + if (CHECK(err, "attach", "lsm attach failed: %d\n", err)) + goto close_prog; + + skel->bss->monitored_pid = getpid(); + + err = create_and_unlink_file(); + if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno)) + goto close_prog; + + CHECK(skel->data->inode_storage_result != 0, "inode_storage_result", + "inode_local_storage not set\n"); + + serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (CHECK(serv_sk < 0, "start_server", "failed to start server\n")) + goto close_prog; + + CHECK(skel->data->sk_storage_result != 0, "sk_storage_result", + "sk_local_storage not set\n"); + + close(serv_sk); + +close_prog: + local_storage__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c new file mode 100644 index 000000000000..b17eb2045c1d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ + +#include <test_progs.h> +#include <sys/mman.h> +#include <sys/wait.h> +#include <unistd.h> +#include <malloc.h> +#include <stdlib.h> + +#include "lsm.skel.h" + +char *CMD_ARGS[] = {"true", NULL}; + +#define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \ + (char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1)) + +int stack_mprotect(void) +{ + void *buf; + long sz; + int ret; + + sz = sysconf(_SC_PAGESIZE); + if (sz < 0) + return sz; + + buf = alloca(sz * 3); + ret = mprotect(GET_PAGE_ADDR(buf, sz), sz, + PROT_READ | PROT_WRITE | PROT_EXEC); + return ret; +} + +int exec_cmd(int *monitored_pid) +{ + int child_pid, child_status; + + child_pid = fork(); + if (child_pid == 0) { + *monitored_pid = getpid(); + execvp(CMD_ARGS[0], CMD_ARGS); + return -EINVAL; + } else if (child_pid > 0) { + waitpid(child_pid, &child_status, 0); + return child_status; + } + + return -EINVAL; +} + +void test_test_lsm(void) +{ + struct lsm *skel = NULL; + int err, duration = 0; + + skel = lsm__open_and_load(); + if (CHECK(!skel, "skel_load", "lsm skeleton failed\n")) + goto close_prog; + + err = lsm__attach(skel); + if (CHECK(err, "attach", "lsm attach failed: %d\n", err)) + goto close_prog; + + err = exec_cmd(&skel->bss->monitored_pid); + if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno)) + goto close_prog; + + CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n", + skel->bss->bprm_count); + + skel->bss->monitored_pid = getpid(); + + err = stack_mprotect(); + if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n", + errno)) + goto close_prog; + + CHECK(skel->bss->mprotect_count != 1, "mprotect_count", + "mprotect_count = %d\n", skel->bss->mprotect_count); + +close_prog: + lsm__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index dcb5ecac778e..48921ff74850 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_xdp(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 3744196d7cba..6c8ca1c93f9b 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_xdp_adjust_tail(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c index c9404e6b226e..f284f72158ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <network_helpers.h> void test_xdp_noinline(void) { diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c new file mode 100644 index 000000000000..7185bee16fe4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_xdp_perf(void) +{ + const char *file = "./xdp_dummy.o"; + __u32 duration, retval, size; + struct bpf_object *obj; + char in[128], out[128]; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + err = bpf_prog_test_run(prog_fd, 1000000, &in[0], 128, + out, &size, &retval, &duration); + + CHECK(err || retval != XDP_PASS || size != 128, + "xdp-perf", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c new file mode 100644 index 000000000000..5f9b613663e5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +/* WARNING: This implemenation is not necessarily the same + * as the tcp_dctcp.c. The purpose is mainly for testing + * the kernel BPF logic. + */ + +#include <linux/bpf.h> +#include <linux/types.h> +#include "bpf_tcp_helpers.h" + +char _license[] SEC("license") = "GPL"; + +#define DCTCP_MAX_ALPHA 1024U + +struct dctcp { + __u32 old_delivered; + __u32 old_delivered_ce; + __u32 prior_rcv_nxt; + __u32 dctcp_alpha; + __u32 next_seq; + __u32 ce_state; + __u32 loss_cwnd; +}; + +static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */ +static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA; + +static __always_inline void dctcp_reset(const struct tcp_sock *tp, + struct dctcp *ca) +{ + ca->next_seq = tp->snd_nxt; + + ca->old_delivered = tp->delivered; + ca->old_delivered_ce = tp->delivered_ce; +} + +BPF_TCP_OPS_1(dctcp_init, void, struct sock *, sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct dctcp *ca = inet_csk_ca(sk); + + ca->prior_rcv_nxt = tp->rcv_nxt; + ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); + ca->loss_cwnd = 0; + ca->ce_state = 0; + + dctcp_reset(tp, ca); +} + +BPF_TCP_OPS_1(dctcp_ssthresh, __u32, struct sock *, sk) +{ + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + ca->loss_cwnd = tp->snd_cwnd; + return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); +} + +BPF_TCP_OPS_2(dctcp_update_alpha, void, + struct sock *, sk, __u32, flags) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct dctcp *ca = inet_csk_ca(sk); + + /* Expired RTT */ + if (!before(tp->snd_una, ca->next_seq)) { + __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce; + __u32 alpha = ca->dctcp_alpha; + + /* alpha = (1 - g) * alpha + g * F */ + + alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); + if (delivered_ce) { + __u32 delivered = tp->delivered - ca->old_delivered; + + /* If dctcp_shift_g == 1, a 32bit value would overflow + * after 8 M packets. + */ + delivered_ce <<= (10 - dctcp_shift_g); + delivered_ce /= max(1U, delivered); + + alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA); + } + ca->dctcp_alpha = alpha; + dctcp_reset(tp, ca); + } +} + +static __always_inline void dctcp_react_to_loss(struct sock *sk) +{ + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + ca->loss_cwnd = tp->snd_cwnd; + tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); +} + +BPF_TCP_OPS_2(dctcp_state, void, struct sock *, sk, __u8, new_state) +{ + if (new_state == TCP_CA_Recovery && + new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) + dctcp_react_to_loss(sk); + /* We handle RTO in dctcp_cwnd_event to ensure that we perform only + * one loss-adjustment per RTT. + */ +} + +static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (ce_state == 1) + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + else + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +} + +/* Minimal DCTP CE state machine: + * + * S: 0 <- last pkt was non-CE + * 1 <- last pkt was CE + */ +static __always_inline +void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, + __u32 *prior_rcv_nxt, __u32 *ce_state) +{ + __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0; + + if (*ce_state != new_ce_state) { + /* CE state has changed, force an immediate ACK to + * reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { + dctcp_ece_ack_cwr(sk, *ce_state); + bpf_tcp_send_ack(sk, *prior_rcv_nxt); + } + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; + } + *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt; + *ce_state = new_ce_state; + dctcp_ece_ack_cwr(sk, new_ce_state); +} + +BPF_TCP_OPS_2(dctcp_cwnd_event, void, + struct sock *, sk, enum tcp_ca_event, ev) +{ + struct dctcp *ca = inet_csk_ca(sk); + + switch (ev) { + case CA_EVENT_ECN_IS_CE: + case CA_EVENT_ECN_NO_CE: + dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); + break; + case CA_EVENT_LOSS: + dctcp_react_to_loss(sk); + break; + default: + /* Don't care for the rest. */ + break; + } +} + +BPF_TCP_OPS_1(dctcp_cwnd_undo, __u32, struct sock *, sk) +{ + const struct dctcp *ca = inet_csk_ca(sk); + + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); +} + +BPF_TCP_OPS_3(tcp_reno_cong_avoid, void, + struct sock *, sk, __u32, ack, __u32, acked) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (!tcp_is_cwnd_limited(sk)) + return; + + /* In "safe" area, increase. */ + if (tcp_in_slow_start(tp)) { + acked = tcp_slow_start(tp, acked); + if (!acked) + return; + } + /* In dangerous area, increase slowly. */ + tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); +} + +SEC(".struct_ops") +struct tcp_congestion_ops dctcp_nouse = { + .init = (void *)dctcp_init, + .set_state = (void *)dctcp_state, + .flags = TCP_CONG_NEEDS_ECN, + .name = "bpf_dctcp_nouse", +}; + +SEC(".struct_ops") +struct tcp_congestion_ops dctcp = { + .init = (void *)dctcp_init, + .in_ack_event = (void *)dctcp_update_alpha, + .cwnd_event = (void *)dctcp_cwnd_event, + .ssthresh = (void *)dctcp_ssthresh, + .cong_avoid = (void *)tcp_reno_cong_avoid, + .undo_cwnd = (void *)dctcp_cwnd_undo, + .set_state = (void *)dctcp_state, + .flags = TCP_CONG_NEEDS_ECN, + .name = "bpf_dctcp", +}; diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index 040a44206f29..f3164235524a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -20,20 +20,20 @@ #include "bpf_endian.h" int _version SEC("version") = 1; -#define PROG(F) SEC(#F) int bpf_func_##F +#define PROG(F) PROG_(F, _##F) +#define PROG_(NUM, NAME) SEC("flow_dissector/"#NUM) int bpf_func##NAME /* These are the identifiers of the BPF programs that will be used in tail * calls. Name is limited to 16 characters, with the terminating character and * bpf_func_ above, we have only 6 to work with, anything after will be cropped. */ -enum { - IP, - IPV6, - IPV6OP, /* Destination/Hop-by-Hop Options IPv6 Extension header */ - IPV6FR, /* Fragmentation IPv6 Extension Header */ - MPLS, - VLAN, -}; +#define IP 0 +#define IPV6 1 +#define IPV6OP 2 /* Destination/Hop-by-Hop Options IPv6 Ext. Header */ +#define IPV6FR 3 /* Fragmentation IPv6 Extension Header */ +#define MPLS 4 +#define VLAN 5 +#define MAX_PROG 6 #define IP_MF 0x2000 #define IP_OFFSET 0x1FFF @@ -59,7 +59,7 @@ struct frag_hdr { struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); - __uint(max_entries, 8); + __uint(max_entries, MAX_PROG); __uint(key_size, sizeof(__u32)); __uint(value_size, sizeof(__u32)); } jmp_table SEC(".maps"); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h new file mode 100644 index 000000000000..17db3bac518b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Facebook */ +/* "undefine" structs in vmlinux.h, because we "override" them below */ +#define bpf_iter_meta bpf_iter_meta___not_used +#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used +#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used +#define bpf_iter__netlink bpf_iter__netlink___not_used +#define bpf_iter__task bpf_iter__task___not_used +#define bpf_iter__task_file bpf_iter__task_file___not_used +#define bpf_iter__tcp bpf_iter__tcp___not_used +#define tcp6_sock tcp6_sock___not_used +#define bpf_iter__udp bpf_iter__udp___not_used +#define udp6_sock udp6_sock___not_used +#include "vmlinux.h" +#undef bpf_iter_meta +#undef bpf_iter__bpf_map +#undef bpf_iter__ipv6_route +#undef bpf_iter__netlink +#undef bpf_iter__task +#undef bpf_iter__task_file +#undef bpf_iter__tcp +#undef tcp6_sock +#undef bpf_iter__udp +#undef udp6_sock + +struct bpf_iter_meta { + struct seq_file *seq; + __u64 session_id; + __u64 seq_num; +} __attribute__((preserve_access_index)); + +struct bpf_iter__ipv6_route { + struct bpf_iter_meta *meta; + struct fib6_info *rt; +} __attribute__((preserve_access_index)); + +struct bpf_iter__netlink { + struct bpf_iter_meta *meta; + struct netlink_sock *sk; +} __attribute__((preserve_access_index)); + +struct bpf_iter__task { + struct bpf_iter_meta *meta; + struct task_struct *task; +} __attribute__((preserve_access_index)); + +struct bpf_iter__task_file { + struct bpf_iter_meta *meta; + struct task_struct *task; + __u32 fd; + struct file *file; +} __attribute__((preserve_access_index)); + +struct bpf_iter__bpf_map { + struct bpf_iter_meta *meta; + struct bpf_map *map; +} __attribute__((preserve_access_index)); + +struct bpf_iter__tcp { + struct bpf_iter_meta *meta; + struct sock_common *sk_common; + uid_t uid; +} __attribute__((preserve_access_index)); + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; +} __attribute__((preserve_access_index)); + +struct bpf_iter__udp { + struct bpf_iter_meta *meta; + struct udp_sock *udp_sk; + uid_t uid __attribute__((aligned(8))); + int bucket __attribute__((aligned(8))); +} __attribute__((preserve_access_index)); + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; +} __attribute__((preserve_access_index)); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c new file mode 100644 index 000000000000..6286023fd62b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 3); + __type(key, __u32); + __type(value, __u64); +} arraymap1 SEC(".maps"); + +__u32 key_sum = 0; +__u64 val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx) +{ + __u32 *key = ctx->key; + __u64 *val = ctx->value; + + if (key == (void *)0 || val == (void *)0) + return 0; + + bpf_seq_write(ctx->meta->seq, key, sizeof(__u32)); + bpf_seq_write(ctx->meta->seq, val, sizeof(__u64)); + key_sum += *key; + val_sum += *val; + *val = *key; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c new file mode 100644 index 000000000000..07ddbfdbcab7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, __u64); +} hashmap1 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, __u64); + __type(value, __u64); +} hashmap2 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, __u32); +} hashmap3 SEC(".maps"); + +/* will set before prog run */ +bool in_test_mode = 0; + +/* will collect results during prog run */ +__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0; +__u64 val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + __u32 seq_num = ctx->meta->seq_num; + struct bpf_map *map = ctx->map; + struct key_t *key = ctx->key; + __u64 *val = ctx->value; + + if (in_test_mode) { + /* test mode is used by selftests to + * test functionality of bpf_hash_map iter. + * + * the above hashmap1 will have correct size + * and will be accepted, hashmap2 and hashmap3 + * should be rejected due to smaller key/value + * size. + */ + if (key == (void *)0 || val == (void *)0) + return 0; + + key_sum_a += key->a; + key_sum_b += key->b; + key_sum_c += key->c; + val_sum += *val; + return 0; + } + + /* non-test mode, the map is prepared with the + * below bpftool command sequence: + * bpftool map create /sys/fs/bpf/m1 type hash \ + * key 12 value 8 entries 3 name map1 + * bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 1 \ + * value 0 0 0 1 0 0 0 1 + * bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 2 \ + * value 0 0 0 1 0 0 0 2 + * The bpftool iter command line: + * bpftool iter pin ./bpf_iter_bpf_hash_map.o /sys/fs/bpf/p1 \ + * map id 77 + * The below output will be: + * map dump starts + * 77: (1000000 0 2000000) (200000001000000) + * 77: (1000000 0 1000000) (100000001000000) + * map dump ends + */ + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, "map dump starts\n"); + + if (key == (void *)0 || val == (void *)0) { + BPF_SEQ_PRINTF(seq, "map dump ends\n"); + return 0; + } + + BPF_SEQ_PRINTF(seq, "%d: (%x %d %x) (%llx)\n", map->id, + key->a, key->b, key->c, *val); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c new file mode 100644 index 000000000000..08651b23edba --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +SEC("iter/bpf_map") +int dump_bpf_map(struct bpf_iter__bpf_map *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + __u64 seq_num = ctx->meta->seq_num; + struct bpf_map *map = ctx->map; + + if (map == (void *)0) { + BPF_SEQ_PRINTF(seq, " %%%%%% END %%%%%%\n"); + return 0; + } + + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, " id refcnt usercnt locked_vm\n"); + + BPF_SEQ_PRINTF(seq, "%8u %8ld %8ld %10lu\n", map->id, map->refcnt.counter, + map->usercnt.counter, + map->memory.user->locked_vm.counter); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c new file mode 100644 index 000000000000..85fa710fad90 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 3); + __type(key, __u32); + __type(value, __u32); +} arraymap1 SEC(".maps"); + +/* will set before prog run */ +volatile const __u32 num_cpus = 0; + +__u32 key_sum = 0, val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_percpu_array_map(struct bpf_iter__bpf_map_elem *ctx) +{ + __u32 *key = ctx->key; + void *pptr = ctx->value; + __u32 step; + int i; + + if (key == (void *)0 || pptr == (void *)0) + return 0; + + key_sum += *key; + + step = 8; + for (i = 0; i < num_cpus; i++) { + val_sum += *(__u32 *)pptr; + pptr += step; + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c new file mode 100644 index 000000000000..feaaa2b89c57 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, __u32); +} hashmap1 SEC(".maps"); + +/* will set before prog run */ +volatile const __u32 num_cpus = 0; + +/* will collect results during prog run */ +__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0; +__u32 val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_percpu_hash_map(struct bpf_iter__bpf_map_elem *ctx) +{ + struct key_t *key = ctx->key; + void *pptr = ctx->value; + __u32 step; + int i; + + if (key == (void *)0 || pptr == (void *)0) + return 0; + + key_sum_a += key->a; + key_sum_b += key->b; + key_sum_c += key->c; + + step = 8; + for (i = 0; i < num_cpus; i++) { + val_sum += *(__u32 *)pptr; + pptr += step; + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c new file mode 100644 index 000000000000..6b70ccaba301 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); +} sk_stg_map SEC(".maps"); + +__u32 val_sum = 0; +__u32 ipv6_sk_count = 0; + +SEC("iter/bpf_sk_storage_map") +int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx) +{ + struct sock *sk = ctx->sk; + __u32 *val = ctx->value; + + if (sk == (void *)0 || val == (void *)0) + return 0; + + if (sk->sk_family == AF_INET6) + ipv6_sk_count++; + + val_sum += *val; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c new file mode 100644 index 000000000000..d58d9f1642b5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +extern bool CONFIG_IPV6_SUBTREES __kconfig __weak; + +SEC("iter/ipv6_route") +int dump_ipv6_route(struct bpf_iter__ipv6_route *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct fib6_info *rt = ctx->rt; + const struct net_device *dev; + struct fib6_nh *fib6_nh; + unsigned int flags; + struct nexthop *nh; + + if (rt == (void *)0) + return 0; + + fib6_nh = &rt->fib6_nh[0]; + flags = rt->fib6_flags; + + /* FIXME: nexthop_is_multipath is not handled here. */ + nh = rt->nh; + if (rt->nh) + fib6_nh = &nh->nh_info->fib6_nh; + + BPF_SEQ_PRINTF(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen); + + if (CONFIG_IPV6_SUBTREES) + BPF_SEQ_PRINTF(seq, "%pi6 %02x ", &rt->fib6_src.addr, + rt->fib6_src.plen); + else + BPF_SEQ_PRINTF(seq, "00000000000000000000000000000000 00 "); + + if (fib6_nh->fib_nh_gw_family) { + flags |= RTF_GATEWAY; + BPF_SEQ_PRINTF(seq, "%pi6 ", &fib6_nh->fib_nh_gw6); + } else { + BPF_SEQ_PRINTF(seq, "00000000000000000000000000000000 "); + } + + dev = fib6_nh->fib_nh_dev; + if (dev) + BPF_SEQ_PRINTF(seq, "%08x %08x %08x %08x %8s\n", rt->fib6_metric, + rt->fib6_ref.refs.counter, 0, flags, dev->name); + else + BPF_SEQ_PRINTF(seq, "%08x %08x %08x %08x\n", rt->fib6_metric, + rt->fib6_ref.refs.counter, 0, flags); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c new file mode 100644 index 000000000000..cec82a419800 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +static inline struct inode *SOCK_INODE(struct socket *socket) +{ + return &container_of(socket, struct socket_alloc, socket)->vfs_inode; +} + +SEC("iter/netlink") +int dump_netlink(struct bpf_iter__netlink *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct netlink_sock *nlk = ctx->sk; + unsigned long group, ino; + struct inode *inode; + struct socket *sk; + struct sock *s; + + if (nlk == (void *)0) + return 0; + + if (ctx->meta->seq_num == 0) + BPF_SEQ_PRINTF(seq, "sk Eth Pid Groups " + "Rmem Wmem Dump Locks Drops " + "Inode\n"); + + s = &nlk->sk; + BPF_SEQ_PRINTF(seq, "%pK %-3d ", s, s->sk_protocol); + + if (!nlk->groups) { + group = 0; + } else { + /* FIXME: temporary use bpf_probe_read here, needs + * verifier support to do direct access. + */ + bpf_probe_read(&group, sizeof(group), &nlk->groups[0]); + } + BPF_SEQ_PRINTF(seq, "%-10u %08x %-8d %-8d %-5d %-8d ", + nlk->portid, (u32)group, + s->sk_rmem_alloc.counter, + s->sk_wmem_alloc.refs.counter - 1, + nlk->cb_running, s->sk_refcnt.refs.counter); + + sk = s->sk_socket; + if (!sk) { + ino = 0; + } else { + /* FIXME: container_of inside SOCK_INODE has a forced + * type conversion, and direct access cannot be used + * with current verifier. + */ + inode = SOCK_INODE(sk); + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + } + BPF_SEQ_PRINTF(seq, "%-8u %-8lu\n", s->sk_drops.counter, ino); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c new file mode 100644 index 000000000000..4983087852a0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +SEC("iter/task") +int dump_task(struct bpf_iter__task *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct task_struct *task = ctx->task; + + if (task == (void *)0) { + BPF_SEQ_PRINTF(seq, " === END ===\n"); + return 0; + } + + if (ctx->meta->seq_num == 0) + BPF_SEQ_PRINTF(seq, " tgid gid\n"); + + BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c new file mode 100644 index 000000000000..8b787baa2654 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +SEC("iter/task_file") +int dump_task_file(struct bpf_iter__task_file *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct task_struct *task = ctx->task; + __u32 fd = ctx->fd; + struct file *file = ctx->file; + + if (task == (void *)0 || file == (void *)0) + return 0; + + if (ctx->meta->seq_num == 0) + BPF_SEQ_PRINTF(seq, " tgid gid fd file\n"); + + BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd, + (long)file->f_op); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c new file mode 100644 index 000000000000..e40d32a2ed93 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +#define MAX_STACK_TRACE_DEPTH 64 +unsigned long entries[MAX_STACK_TRACE_DEPTH]; +#define SIZE_OF_ULONG (sizeof(unsigned long)) + +SEC("iter/task") +int dump_task_stack(struct bpf_iter__task *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct task_struct *task = ctx->task; + long i, retlen; + + if (task == (void *)0) + return 0; + + retlen = bpf_get_task_stack(task, entries, + MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0); + if (retlen < 0) + return 0; + + BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid, + retlen / SIZE_OF_ULONG); + for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) { + if (retlen > i * SIZE_OF_ULONG) + BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]); + } + BPF_SEQ_PRINTF(seq, "\n"); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c new file mode 100644 index 000000000000..30fd587cb325 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +static int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !(h->pprev); +} + +static int timer_pending(const struct timer_list * timer) +{ + return !hlist_unhashed_lockless(&timer->entry); +} + +extern unsigned CONFIG_HZ __kconfig; + +#define USER_HZ 100 +#define NSEC_PER_SEC 1000000000ULL +static clock_t jiffies_to_clock_t(unsigned long x) +{ + /* The implementation here tailored to a particular + * setting of USER_HZ. + */ + u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ; + u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ; + + if ((tick_nsec % user_hz_nsec) == 0) { + if (CONFIG_HZ < USER_HZ) + return x * (USER_HZ / CONFIG_HZ); + else + return x / (CONFIG_HZ / USER_HZ); + } + return x * tick_nsec/user_hz_nsec; +} + +static clock_t jiffies_delta_to_clock_t(long delta) +{ + if (delta <= 0) + return 0; + + return jiffies_to_clock_t(delta); +} + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +static bool +inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk) +{ + return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; +} + +static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp) +{ + return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + +static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, + uid_t uid, __u32 seq_num) +{ + const struct inet_connection_sock *icsk; + const struct fastopen_queue *fastopenq; + const struct inet_sock *inet; + unsigned long timer_expires; + const struct sock *sp; + __u16 destp, srcp; + __be32 dest, src; + int timer_active; + int rx_queue; + int state; + + icsk = &tp->inet_conn; + inet = &icsk->icsk_inet; + sp = &inet->sk; + fastopenq = &icsk->icsk_accept_queue.fastopenq; + + dest = inet->inet_daddr; + src = inet->inet_rcv_saddr; + destp = bpf_ntohs(inet->inet_dport); + srcp = bpf_ntohs(inet->inet_sport); + + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + timer_active = 1; + timer_expires = icsk->icsk_timeout; + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + timer_active = 4; + timer_expires = icsk->icsk_timeout; + } else if (timer_pending(&sp->sk_timer)) { + timer_active = 2; + timer_expires = sp->sk_timer.expires; + } else { + timer_active = 0; + timer_expires = bpf_jiffies64(); + } + + state = sp->sk_state; + if (state == TCP_LISTEN) { + rx_queue = sp->sk_ack_backlog; + } else { + rx_queue = tp->rcv_nxt - tp->copied_seq; + if (rx_queue < 0) + rx_queue = 0; + } + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, src, srcp, destp, destp); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ", + state, + tp->write_seq - tp->snd_una, rx_queue, + timer_active, + jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()), + icsk->icsk_retransmits, uid, + icsk->icsk_probes_out, + sock_i_ino(sp), + sp->sk_refcnt.refs.counter); + BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n", + tp, + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk), + tp->snd_cwnd, + state == TCP_LISTEN ? fastopenq->max_qlen + : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) + ); + + return 0; +} + +static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw, + uid_t uid, __u32 seq_num) +{ + struct inet_timewait_sock *tw = &ttw->tw_sk; + __u16 destp, srcp; + __be32 dest, src; + long delta; + + delta = tw->tw_timer.expires - bpf_jiffies64(); + dest = tw->tw_daddr; + src = tw->tw_rcv_saddr; + destp = bpf_ntohs(tw->tw_dport); + srcp = bpf_ntohs(tw->tw_sport); + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, src, srcp, dest, destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + tw->tw_substate, 0, 0, + 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, + tw->tw_refcnt.refs.counter, tw); + + return 0; +} + +static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq, + uid_t uid, __u32 seq_num) +{ + struct inet_request_sock *irsk = &treq->req; + struct request_sock *req = &irsk->req; + long ttd; + + ttd = req->rsk_timer.expires - bpf_jiffies64(); + + if (ttd < 0) + ttd = 0; + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, irsk->ir_loc_addr, + irsk->ir_num, irsk->ir_rmt_addr, + bpf_ntohs(irsk->ir_rmt_port)); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd), + req->num_timeout, uid, 0, 0, 0, req); + + return 0; +} + +SEC("iter/tcp") +int dump_tcp4(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + struct seq_file *seq = ctx->meta->seq; + struct tcp_timewait_sock *tw; + struct tcp_request_sock *req; + struct tcp_sock *tp; + uid_t uid = ctx->uid; + __u32 seq_num; + + if (sk_common == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, " sl " + "local_address " + "rem_address " + "st tx_queue rx_queue tr tm->when retrnsmt" + " uid timeout inode\n"); + + if (sk_common->skc_family != AF_INET) + return 0; + + tp = bpf_skc_to_tcp_sock(sk_common); + if (tp) + return dump_tcp_sock(seq, tp, uid, seq_num); + + tw = bpf_skc_to_tcp_timewait_sock(sk_common); + if (tw) + return dump_tw_sock(seq, tw, uid, seq_num); + + req = bpf_skc_to_tcp_request_sock(sk_common); + if (req) + return dump_req_sock(seq, req, uid, seq_num); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c new file mode 100644 index 000000000000..10dec4392031 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +static int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !(h->pprev); +} + +static int timer_pending(const struct timer_list * timer) +{ + return !hlist_unhashed_lockless(&timer->entry); +} + +extern unsigned CONFIG_HZ __kconfig; + +#define USER_HZ 100 +#define NSEC_PER_SEC 1000000000ULL +static clock_t jiffies_to_clock_t(unsigned long x) +{ + /* The implementation here tailored to a particular + * setting of USER_HZ. + */ + u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ; + u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ; + + if ((tick_nsec % user_hz_nsec) == 0) { + if (CONFIG_HZ < USER_HZ) + return x * (USER_HZ / CONFIG_HZ); + else + return x / (CONFIG_HZ / USER_HZ); + } + return x * tick_nsec/user_hz_nsec; +} + +static clock_t jiffies_delta_to_clock_t(long delta) +{ + if (delta <= 0) + return 0; + + return jiffies_to_clock_t(delta); +} + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +static bool +inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk) +{ + return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; +} + +static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp) +{ + return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + +static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp, + uid_t uid, __u32 seq_num) +{ + const struct inet_connection_sock *icsk; + const struct fastopen_queue *fastopenq; + const struct in6_addr *dest, *src; + const struct inet_sock *inet; + unsigned long timer_expires; + const struct sock *sp; + __u16 destp, srcp; + int timer_active; + int rx_queue; + int state; + + icsk = &tp->tcp.inet_conn; + inet = &icsk->icsk_inet; + sp = &inet->sk; + fastopenq = &icsk->icsk_accept_queue.fastopenq; + + dest = &sp->sk_v6_daddr; + src = &sp->sk_v6_rcv_saddr; + destp = bpf_ntohs(inet->inet_dport); + srcp = bpf_ntohs(inet->inet_sport); + + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + timer_active = 1; + timer_expires = icsk->icsk_timeout; + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + timer_active = 4; + timer_expires = icsk->icsk_timeout; + } else if (timer_pending(&sp->sk_timer)) { + timer_active = 2; + timer_expires = sp->sk_timer.expires; + } else { + timer_active = 0; + timer_expires = bpf_jiffies64(); + } + + state = sp->sk_state; + if (state == TCP_LISTEN) { + rx_queue = sp->sk_ack_backlog; + } else { + rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq; + if (rx_queue < 0) + rx_queue = 0; + } + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ", + state, + tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, + timer_active, + jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()), + icsk->icsk_retransmits, uid, + icsk->icsk_probes_out, + sock_i_ino(sp), + sp->sk_refcnt.refs.counter); + BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n", + tp, + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk), + tp->tcp.snd_cwnd, + state == TCP_LISTEN ? fastopenq->max_qlen + : (tcp_in_initial_slowstart(&tp->tcp) ? -1 + : tp->tcp.snd_ssthresh) + ); + + return 0; +} + +static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw, + uid_t uid, __u32 seq_num) +{ + struct inet_timewait_sock *tw = &ttw->tw_sk; + const struct in6_addr *dest, *src; + __u16 destp, srcp; + long delta; + + delta = tw->tw_timer.expires - bpf_jiffies64(); + dest = &tw->tw_v6_daddr; + src = &tw->tw_v6_rcv_saddr; + destp = bpf_ntohs(tw->tw_dport); + srcp = bpf_ntohs(tw->tw_sport); + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + tw->tw_substate, 0, 0, + 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, + tw->tw_refcnt.refs.counter, tw); + + return 0; +} + +static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq, + uid_t uid, __u32 seq_num) +{ + struct inet_request_sock *irsk = &treq->req; + struct request_sock *req = &irsk->req; + struct in6_addr *src, *dest; + long ttd; + + ttd = req->rsk_timer.expires - bpf_jiffies64(); + src = &irsk->ir_v6_loc_addr; + dest = &irsk->ir_v6_rmt_addr; + + if (ttd < 0) + ttd = 0; + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], + irsk->ir_num, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], + bpf_ntohs(irsk->ir_rmt_port)); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd), + req->num_timeout, uid, 0, 0, 0, req); + + return 0; +} + +SEC("iter/tcp") +int dump_tcp6(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + struct seq_file *seq = ctx->meta->seq; + struct tcp_timewait_sock *tw; + struct tcp_request_sock *req; + struct tcp6_sock *tp; + uid_t uid = ctx->uid; + __u32 seq_num; + + if (sk_common == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, " sl " + "local_address " + "remote_address " + "st tx_queue rx_queue tr tm->when retrnsmt" + " uid timeout inode\n"); + + if (sk_common->skc_family != AF_INET6) + return 0; + + tp = bpf_skc_to_tcp6_sock(sk_common); + if (tp) + return dump_tcp6_sock(seq, tp, uid, seq_num); + + tw = bpf_skc_to_tcp_timewait_sock(sk_common); + if (tw) + return dump_tw_sock(seq, tw, uid, seq_num); + + req = bpf_skc_to_tcp_request_sock(sk_common); + if (req) + return dump_req_sock(seq, req, uid, seq_num); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern1.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern1.c new file mode 100644 index 000000000000..c71a7c283108 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern1.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#define START_CHAR 'a' +#include "bpf_iter_test_kern_common.h" diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern2.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern2.c new file mode 100644 index 000000000000..8bdc8dc07444 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern2.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#define START_CHAR 'A' +#include "bpf_iter_test_kern_common.h" diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c new file mode 100644 index 000000000000..2a4647f20c46 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +SEC("iter/task") +int dump_task(struct bpf_iter__task *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct task_struct *task = ctx->task; + int tgid; + + tgid = task->tgid; + bpf_seq_write(seq, &tgid, sizeof(tgid)); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c new file mode 100644 index 000000000000..ee49493dc125 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +__u32 map1_id = 0, map2_id = 0; +__u32 map1_accessed = 0, map2_accessed = 0; +__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0; + +static volatile const __u32 print_len; +static volatile const __u32 ret1; + +SEC("iter/bpf_map") +int dump_bpf_map(struct bpf_iter__bpf_map *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct bpf_map *map = ctx->map; + __u64 seq_num; + int i, ret = 0; + + if (map == (void *)0) + return 0; + + /* only dump map1_id and map2_id */ + if (map->id != map1_id && map->id != map2_id) + return 0; + + seq_num = ctx->meta->seq_num; + if (map->id == map1_id) { + map1_seqnum = seq_num; + map1_accessed++; + } + + if (map->id == map2_id) { + if (map2_accessed == 0) { + map2_seqnum1 = seq_num; + if (ret1) + ret = 1; + } else { + map2_seqnum2 = seq_num; + } + map2_accessed++; + } + + /* fill seq_file buffer */ + for (i = 0; i < print_len; i++) + bpf_seq_write(seq, &seq_num, sizeof(seq_num)); + + return ret; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h new file mode 100644 index 000000000000..d5e3df66ad9a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; +int count = 0; + +SEC("iter/task") +int dump_task(struct bpf_iter__task *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + char c; + + if (count < 4) { + c = START_CHAR + count; + bpf_seq_write(seq, &c, sizeof(c)); + count++; + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c new file mode 100644 index 000000000000..7053784575e4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +SEC("iter/udp") +int dump_udp4(struct bpf_iter__udp *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct udp_sock *udp_sk = ctx->udp_sk; + struct inet_sock *inet; + __u16 srcp, destp; + __be32 dest, src; + __u32 seq_num; + int rqueue; + + if (udp_sk == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, + " sl local_address rem_address st tx_queue " + "rx_queue tr tm->when retrnsmt uid timeout " + "inode ref pointer drops\n"); + + /* filter out udp6 sockets */ + inet = &udp_sk->inet; + if (inet->sk.sk_family == AF_INET6) + return 0; + + inet = &udp_sk->inet; + dest = inet->inet_daddr; + src = inet->inet_rcv_saddr; + srcp = bpf_ntohs(inet->inet_sport); + destp = bpf_ntohs(inet->inet_dport); + rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; + + BPF_SEQ_PRINTF(seq, "%5d: %08X:%04X %08X:%04X ", + ctx->bucket, src, srcp, dest, destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", + inet->sk.sk_state, + inet->sk.sk_wmem_alloc.refs.counter - 1, + rqueue, + 0, 0L, 0, ctx->uid, 0, + sock_i_ino(&inet->sk), + inet->sk.sk_refcnt.refs.counter, udp_sk, + inet->sk.sk_drops.counter); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c new file mode 100644 index 000000000000..c1175a6ecf43 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +#define IPV6_SEQ_DGRAM_HEADER \ + " sl " \ + "local_address " \ + "remote_address " \ + "st tx_queue rx_queue tr tm->when retrnsmt" \ + " uid timeout inode ref pointer drops\n" + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +SEC("iter/udp") +int dump_udp6(struct bpf_iter__udp *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct udp_sock *udp_sk = ctx->udp_sk; + const struct in6_addr *dest, *src; + struct udp6_sock *udp6_sk; + struct inet_sock *inet; + __u16 srcp, destp; + __u32 seq_num; + int rqueue; + + if (udp_sk == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, IPV6_SEQ_DGRAM_HEADER); + + udp6_sk = bpf_skc_to_udp6_sock(udp_sk); + if (udp6_sk == (void *)0) + return 0; + + inet = &udp_sk->inet; + srcp = bpf_ntohs(inet->inet_sport); + destp = bpf_ntohs(inet->inet_dport); + rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; + dest = &inet->sk.sk_v6_daddr; + src = &inet->sk.sk_v6_rcv_saddr; + + BPF_SEQ_PRINTF(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + ctx->bucket, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", + inet->sk.sk_state, + inet->sk.sk_wmem_alloc.refs.counter - 1, + rqueue, + 0, 0L, 0, ctx->uid, 0, + sock_i_ino(&inet->sk), + inet->sk.sk_refcnt.refs.counter, udp_sk, + inet->sk.sk_drops.counter); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h new file mode 100644 index 000000000000..01378911252b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_TRACING_NET_H__ +#define __BPF_TRACING_NET_H__ + +#define AF_INET 2 +#define AF_INET6 10 + +#define ICSK_TIME_RETRANS 1 +#define ICSK_TIME_PROBE0 3 +#define ICSK_TIME_LOSS_PROBE 5 +#define ICSK_TIME_REO_TIMEOUT 6 + +#define IFNAMSIZ 16 + +#define RTF_GATEWAY 0x0002 + +#define TCP_INFINITE_SSTHRESH 0x7fffffff +#define TCP_PINGPONG_THRESH 3 + +#define fib_nh_dev nh_common.nhc_dev +#define fib_nh_gw_family nh_common.nhc_gw_family +#define fib_nh_gw6 nh_common.nhc_gw.ipv6 + +#define inet_daddr sk.__sk_common.skc_daddr +#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr +#define inet_dport sk.__sk_common.skc_dport + +#define ir_loc_addr req.__req_common.skc_rcv_saddr +#define ir_num req.__req_common.skc_num +#define ir_rmt_addr req.__req_common.skc_daddr +#define ir_rmt_port req.__req_common.skc_dport +#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr +#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr + +#define sk_family __sk_common.skc_family +#define sk_rmem_alloc sk_backlog.rmem_alloc +#define sk_refcnt __sk_common.skc_refcnt +#define sk_state __sk_common.skc_state +#define sk_v6_daddr __sk_common.skc_v6_daddr +#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr + +#define s6_addr32 in6_u.u6_addr32 + +#define tw_daddr __tw_common.skc_daddr +#define tw_rcv_saddr __tw_common.skc_rcv_saddr +#define tw_dport __tw_common.skc_dport +#define tw_refcnt __tw_common.skc_refcnt +#define tw_v6_daddr __tw_common.skc_v6_daddr +#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr + +#endif diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c new file mode 100644 index 000000000000..f5a7c832d0f2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_arrays___err_wrong_val_type x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c deleted file mode 100644 index 795a5b729176..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_arrays___err_wrong_val_type1 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c deleted file mode 100644 index 3af74b837c4d..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_arrays___err_wrong_val_type2 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c deleted file mode 100644 index 50369e8320a0..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_bitfield x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c deleted file mode 100644 index 823bac13d641..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_16 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c deleted file mode 100644 index b44f3be18535..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_32 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c deleted file mode 100644 index 9a3dd2099c0f..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_64 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c deleted file mode 100644 index 9f11ef5f6e88..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_8 x) {} diff --git a/tools/testing/selftests/bpf/progs/connect_force_port4.c b/tools/testing/selftests/bpf/progs/connect_force_port4.c new file mode 100644 index 000000000000..7396308677a3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/connect_force_port4.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <string.h> +#include <stdbool.h> + +#include <linux/bpf.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <sys/socket.h> + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; +int _version SEC("version") = 1; + +struct svc_addr { + __be32 addr; + __be16 port; +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct svc_addr); +} service_mapping SEC(".maps"); + +SEC("cgroup/connect4") +int connect4(struct bpf_sock_addr *ctx) +{ + struct sockaddr_in sa = {}; + struct svc_addr *orig; + + /* Force local address to 127.0.0.1:22222. */ + sa.sin_family = AF_INET; + sa.sin_port = bpf_htons(22222); + sa.sin_addr.s_addr = bpf_htonl(0x7f000001); + + if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0) + return 0; + + /* Rewire service 1.2.3.4:60000 to backend 127.0.0.1:60123. */ + if (ctx->user_port == bpf_htons(60000)) { + orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!orig) + return 0; + + orig->addr = ctx->user_ip4; + orig->port = ctx->user_port; + + ctx->user_ip4 = bpf_htonl(0x7f000001); + ctx->user_port = bpf_htons(60123); + } + return 1; +} + +SEC("cgroup/getsockname4") +int getsockname4(struct bpf_sock_addr *ctx) +{ + /* Expose local server as 1.2.3.4:60000 to client. */ + if (ctx->user_port == bpf_htons(60123)) { + ctx->user_ip4 = bpf_htonl(0x01020304); + ctx->user_port = bpf_htons(60000); + } + return 1; +} + +SEC("cgroup/getpeername4") +int getpeername4(struct bpf_sock_addr *ctx) +{ + struct svc_addr *orig; + + /* Expose service 1.2.3.4:60000 as peer instead of backend. */ + if (ctx->user_port == bpf_htons(60123)) { + orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0); + if (orig) { + ctx->user_ip4 = orig->addr; + ctx->user_port = orig->port; + } + } + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/connect_force_port6.c b/tools/testing/selftests/bpf/progs/connect_force_port6.c new file mode 100644 index 000000000000..c1a2b555e9ad --- /dev/null +++ b/tools/testing/selftests/bpf/progs/connect_force_port6.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <string.h> + +#include <linux/bpf.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <sys/socket.h> + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; +int _version SEC("version") = 1; + +struct svc_addr { + __be32 addr[4]; + __be16 port; +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct svc_addr); +} service_mapping SEC(".maps"); + +SEC("cgroup/connect6") +int connect6(struct bpf_sock_addr *ctx) +{ + struct sockaddr_in6 sa = {}; + struct svc_addr *orig; + + /* Force local address to [::1]:22223. */ + sa.sin6_family = AF_INET6; + sa.sin6_port = bpf_htons(22223); + sa.sin6_addr.s6_addr32[3] = bpf_htonl(1); + + if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0) + return 0; + + /* Rewire service [fc00::1]:60000 to backend [::1]:60124. */ + if (ctx->user_port == bpf_htons(60000)) { + orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!orig) + return 0; + + orig->addr[0] = ctx->user_ip6[0]; + orig->addr[1] = ctx->user_ip6[1]; + orig->addr[2] = ctx->user_ip6[2]; + orig->addr[3] = ctx->user_ip6[3]; + orig->port = ctx->user_port; + + ctx->user_ip6[0] = 0; + ctx->user_ip6[1] = 0; + ctx->user_ip6[2] = 0; + ctx->user_ip6[3] = bpf_htonl(1); + ctx->user_port = bpf_htons(60124); + } + return 1; +} + +SEC("cgroup/getsockname6") +int getsockname6(struct bpf_sock_addr *ctx) +{ + /* Expose local server as [fc00::1]:60000 to client. */ + if (ctx->user_port == bpf_htons(60124)) { + ctx->user_ip6[0] = bpf_htonl(0xfc000000); + ctx->user_ip6[1] = 0; + ctx->user_ip6[2] = 0; + ctx->user_ip6[3] = bpf_htonl(1); + ctx->user_port = bpf_htons(60000); + } + return 1; +} + +SEC("cgroup/getpeername6") +int getpeername6(struct bpf_sock_addr *ctx) +{ + struct svc_addr *orig; + + /* Expose service [fc00::1]:60000 as peer instead of backend. */ + if (ctx->user_port == bpf_htons(60124)) { + orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0); + if (orig) { + ctx->user_ip6[0] = orig->addr[0]; + ctx->user_ip6[1] = orig->addr[1]; + ctx->user_ip6[2] = orig->addr[2]; + ctx->user_ip6[3] = orig->addr[3]; + ctx->user_port = orig->port; + } + } + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index f686a8138d90..78167e23bade 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -377,14 +377,7 @@ struct core_reloc_arrays___err_non_array { struct core_reloc_arrays_substruct d[1][2]; }; -struct core_reloc_arrays___err_wrong_val_type1 { - char a[5]; /* char instead of int */ - char b[2][3][4]; - struct core_reloc_arrays_substruct c[3]; - struct core_reloc_arrays_substruct d[1][2]; -}; - -struct core_reloc_arrays___err_wrong_val_type2 { +struct core_reloc_arrays___err_wrong_val_type { int a[5]; char b[2][3][4]; int c[3]; /* value is not a struct */ @@ -580,67 +573,6 @@ struct core_reloc_ints___bool { int64_t s64_field; }; -struct core_reloc_ints___err_bitfield { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field: 32; /* bitfields are not supported */ - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_8 { - uint16_t u8_field; /* not 8-bit anymore */ - int16_t s8_field; /* not 8-bit anymore */ - - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field; - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_16 { - uint8_t u8_field; - int8_t s8_field; - - uint32_t u16_field; /* not 16-bit anymore */ - int32_t s16_field; /* not 16-bit anymore */ - - uint32_t u32_field; - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_32 { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - - uint64_t u32_field; /* not 32-bit anymore */ - int64_t s32_field; /* not 32-bit anymore */ - - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_64 { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field; - int32_t s32_field; - - uint32_t u64_field; /* not 64-bit anymore */ - int32_t s64_field; /* not 64-bit anymore */ -}; - /* * MISC */ diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c new file mode 100644 index 000000000000..9365b686f84b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fentry_test.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +__u64 test1_result = 0; +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test1, int a) +{ + test1_result = a == 1; + return 0; +} + +__u64 test2_result = 0; +SEC("fentry/bpf_fentry_test2") +int BPF_PROG(test2, int a, __u64 b) +{ + test2_result = a == 2 && b == 3; + return 0; +} + +__u64 test3_result = 0; +SEC("fentry/bpf_fentry_test3") +int BPF_PROG(test3, char a, int b, __u64 c) +{ + test3_result = a == 4 && b == 5 && c == 6; + return 0; +} + +__u64 test4_result = 0; +SEC("fentry/bpf_fentry_test4") +int BPF_PROG(test4, void *a, char b, int c, __u64 d) +{ + test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10; + return 0; +} + +__u64 test5_result = 0; +SEC("fentry/bpf_fentry_test5") +int BPF_PROG(test5, __u64 a, void *b, short c, int d, __u64 e) +{ + test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 && + e == 15; + return 0; +} + +__u64 test6_result = 0; +SEC("fentry/bpf_fentry_test6") +int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f) +{ + test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 && + e == (void *)20 && f == 21; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c new file mode 100644 index 000000000000..78434c1fa964 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/stddef.h> +#include <linux/if_ether.h> +#include <linux/ipv6.h> +#include <linux/bpf.h> +#include <linux/tcp.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +#include <bpf/bpf_tracing.h> + +struct sk_buff { + unsigned int len; +}; + +__u64 test_result = 0; +SEC("fexit/test_pkt_access") +int BPF_PROG(test_main, struct sk_buff *skb, int ret) +{ + int len; + + __builtin_preserve_access_index(({ + len = skb->len; + })); + if (len != 74 || ret != 0) + return 0; + test_result = 1; + return 0; +} + +__u64 test_result_subprog1 = 0; +SEC("fexit/test_pkt_access_subprog1") +int BPF_PROG(test_subprog1, struct sk_buff *skb, int ret) +{ + int len; + + __builtin_preserve_access_index(({ + len = skb->len; + })); + if (len != 74 || ret != 148) + return 0; + test_result_subprog1 = 1; + return 0; +} + +/* Though test_pkt_access_subprog2() is defined in C as: + * static __attribute__ ((noinline)) + * int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) + * { + * return skb->len * val; + * } + * llvm optimizations remove 'int val' argument and generate BPF assembly: + * r0 = *(u32 *)(r1 + 0) + * w0 <<= 1 + * exit + * In such case the verifier falls back to conservative and + * tracing program can access arguments and return value as u64 + * instead of accurate types. + */ +struct args_subprog2 { + __u64 args[5]; + __u64 ret; +}; +__u64 test_result_subprog2 = 0; +SEC("fexit/test_pkt_access_subprog2") +int test_subprog2(struct args_subprog2 *ctx) +{ + struct sk_buff *skb = (void *)ctx->args[0]; + __u64 ret; + int len; + + bpf_probe_read_kernel(&len, sizeof(len), + __builtin_preserve_access_index(&skb->len)); + + ret = ctx->ret; + /* bpf_prog_test_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32 + * which randomizes upper 32 bits after BPF_ALU32 insns. + * Hence after 'w0 <<= 1' upper bits of $rax are random. + * That is expected and correct. Trim them. + */ + ret = (__u32) ret; + if (len != 74 || ret != 148) + return 0; + test_result_subprog2 = 1; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c new file mode 100644 index 000000000000..8b98b1a51784 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fexit_test.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +struct test1 { + ks32 a; + ks32 ret; +}; +static volatile __u64 test1_result; +SEC("fexit/bpf_fentry_test1") +int test1(struct test1 *ctx) +{ + test1_result = ctx->a == 1 && ctx->ret == 2; + return 0; +} + +struct test2 { + ks32 a; + ku64 b; + ks32 ret; +}; +static volatile __u64 test2_result; +SEC("fexit/bpf_fentry_test2") +int test2(struct test2 *ctx) +{ + test2_result = ctx->a == 2 && ctx->b == 3 && ctx->ret == 5; + return 0; +} + +struct test3 { + ks8 a; + ks32 b; + ku64 c; + ks32 ret; +}; +static volatile __u64 test3_result; +SEC("fexit/bpf_fentry_test3") +int test3(struct test3 *ctx) +{ + test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6 && + ctx->ret == 15; + return 0; +} + +struct test4 { + void *a; + ks8 b; + ks32 c; + ku64 d; + ks32 ret; +}; +static volatile __u64 test4_result; +SEC("fexit/bpf_fentry_test4") +int test4(struct test4 *ctx) +{ + test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 && + ctx->d == 10 && ctx->ret == 34; + return 0; +} + +struct test5 { + ku64 a; + void *b; + ks16 c; + ks32 d; + ku64 e; + ks32 ret; +}; +static volatile __u64 test5_result; +SEC("fexit/bpf_fentry_test5") +int test5(struct test5 *ctx) +{ + test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 && + ctx->d == 14 && ctx->e == 15 && ctx->ret == 65; + return 0; +} + +struct test6 { + ku64 a; + void *b; + ks16 c; + ks32 d; + void *e; + ks64 f; + ks32 ret; +}; +static volatile __u64 test6_result; +SEC("fexit/bpf_fentry_test6") +int test6(struct test6 *ctx) +{ + test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 && + ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21 && + ctx->ret == 111; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c new file mode 100644 index 000000000000..75e8e1069fe7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 3); + __type(key, __u32); + __type(value, __u64); +} arraymap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} percpu_map SEC(".maps"); + +struct callback_ctx { + int output; +}; + +static __u64 +check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val, + struct callback_ctx *data) +{ + data->output += *val; + if (*key == 1) + return 1; /* stop the iteration */ + return 0; +} + +__u32 cpu = 0; +__u64 percpu_val = 0; + +static __u64 +check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val, + struct callback_ctx *data) +{ + cpu = bpf_get_smp_processor_id(); + percpu_val = *val; + return 0; +} + +u32 arraymap_output = 0; + +SEC("classifier") +int test_pkt_access(struct __sk_buff *skb) +{ + struct callback_ctx data; + + data.output = 0; + bpf_for_each_map_elem(&arraymap, check_array_elem, &data, 0); + arraymap_output = data.output; + + bpf_for_each_map_elem(&percpu_map, check_percpu_elem, (void *)0, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c new file mode 100644 index 000000000000..913dd91aafff --- /dev/null +++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, __u32); + __type(value, __u64); +} hashmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} percpu_map SEC(".maps"); + +struct callback_ctx { + struct __sk_buff *ctx; + int input; + int output; +}; + +static __u64 +check_hash_elem(struct bpf_map *map, __u32 *key, __u64 *val, + struct callback_ctx *data) +{ + struct __sk_buff *skb = data->ctx; + __u32 k; + __u64 v; + + if (skb) { + k = *key; + v = *val; + if (skb->len == 10000 && k == 10 && v == 10) + data->output = 3; /* impossible path */ + else + data->output = 4; + } else { + data->output = data->input; + bpf_map_delete_elem(map, key); + } + + return 0; +} + +__u32 cpu = 0; +__u32 percpu_called = 0; +__u32 percpu_key = 0; +__u64 percpu_val = 0; +int percpu_output = 0; + +static __u64 +check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val, + struct callback_ctx *unused) +{ + struct callback_ctx data; + + percpu_called++; + cpu = bpf_get_smp_processor_id(); + percpu_key = *key; + percpu_val = *val; + + data.ctx = 0; + data.input = 100; + data.output = 0; + bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0); + percpu_output = data.output; + + return 0; +} + +int hashmap_output = 0; +int hashmap_elems = 0; +int percpu_map_elems = 0; + +SEC("classifier") +int test_pkt_access(struct __sk_buff *skb) +{ + struct callback_ctx data; + + data.ctx = skb; + data.input = 10; + data.output = 0; + hashmap_elems = bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0); + hashmap_output = data.output; + + percpu_map_elems = bpf_for_each_map_elem(&percpu_map, check_percpu_elem, + (void *)0, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c new file mode 100644 index 000000000000..0758ba229ae0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/local_storage.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2020 Google LLC. + */ + +#include <errno.h> +#include <linux/bpf.h> +#include <stdbool.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +#define DUMMY_STORAGE_VALUE 0xdeadbeef + +int monitored_pid = 0; +int inode_storage_result = -1; +int sk_storage_result = -1; + +struct dummy_storage { + __u32 value; +}; + +struct { + __uint(type, BPF_MAP_TYPE_INODE_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct dummy_storage); +} inode_storage_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE); + __type(key, int); + __type(value, struct dummy_storage); +} sk_storage_map SEC(".maps"); + +/* TODO Use vmlinux.h once BTF pruning for embedded types is fixed. + */ +struct sock {} __attribute__((preserve_access_index)); +struct sockaddr {} __attribute__((preserve_access_index)); +struct socket { + struct sock *sk; +} __attribute__((preserve_access_index)); + +struct inode {} __attribute__((preserve_access_index)); +struct dentry { + struct inode *d_inode; +} __attribute__((preserve_access_index)); +struct file { + struct inode *f_inode; +} __attribute__((preserve_access_index)); + + +SEC("lsm/inode_unlink") +int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct dummy_storage *storage; + + if (pid != monitored_pid) + return 0; + + storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + + if (storage->value == DUMMY_STORAGE_VALUE) + inode_storage_result = -1; + + inode_storage_result = + bpf_inode_storage_delete(&inode_storage_map, victim->d_inode); + + return 0; +} + +SEC("lsm/socket_bind") +int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, + int addrlen) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct dummy_storage *storage; + + if (pid != monitored_pid) + return 0; + + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + + if (storage->value == DUMMY_STORAGE_VALUE) + sk_storage_result = -1; + + sk_storage_result = bpf_sk_storage_delete(&sk_storage_map, sock->sk); + return 0; +} + +SEC("lsm/socket_post_create") +int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, + int protocol, int kern) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct dummy_storage *storage; + + if (pid != monitored_pid) + return 0; + + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + + storage->value = DUMMY_STORAGE_VALUE; + + return 0; +} + +SEC("lsm/file_open") +int BPF_PROG(file_open, struct file *file) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct dummy_storage *storage; + + if (pid != monitored_pid) + return 0; + + if (!file->f_inode) + return 0; + + storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + + storage->value = DUMMY_STORAGE_VALUE; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c new file mode 100644 index 000000000000..b4598d4bc4f7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/lsm.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2020 Google LLC. + */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <errno.h> + +char _license[] SEC("license") = "GPL"; + +int monitored_pid = 0; +int mprotect_count = 0; +int bprm_count = 0; + +SEC("lsm/file_mprotect") +int BPF_PROG(test_int_hook, struct vm_area_struct *vma, + unsigned long reqprot, unsigned long prot, int ret) +{ + if (ret != 0) + return ret; + + __u32 pid = bpf_get_current_pid_tgid() >> 32; + int is_stack = 0; + + is_stack = (vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack); + + if (is_stack && monitored_pid == pid) { + mprotect_count++; + ret = -EPERM; + } + + return ret; +} + +SEC("lsm/bprm_committed_creds") +int BPF_PROG(test_void_hook, struct linux_binprm *bprm) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + + if (monitored_pid == pid) + bprm_count++; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c new file mode 100644 index 000000000000..e6e92249920e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -0,0 +1,686 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +#define LOOP_BOUND 0xf +#define MAX_ENTRIES 8 +#define HALF_ENTRIES (MAX_ENTRIES >> 1) + +_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND"); + +enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC; +__u32 g_line = 0; +int page_size = 0; /* userspace should set it */ + +#define VERIFY_TYPE(type, func) ({ \ + g_map_type = type; \ + if (!func()) \ + return 0; \ +}) + + +#define VERIFY(expr) ({ \ + g_line = __LINE__; \ + if (!(expr)) \ + return 0; \ +}) + +struct bpf_map_memory { + __u32 pages; +} __attribute__((preserve_access_index)); + +struct bpf_map { + enum bpf_map_type map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 id; + struct bpf_map_memory memory; +} __attribute__((preserve_access_index)); + +static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size, + __u32 value_size, __u32 max_entries) +{ + VERIFY(map->map_type == g_map_type); + VERIFY(map->key_size == key_size); + VERIFY(map->value_size == value_size); + VERIFY(map->max_entries == max_entries); + VERIFY(map->id > 0); + VERIFY(map->memory.pages > 0); + + return 1; +} + +static inline int check_bpf_map_ptr(struct bpf_map *indirect, + struct bpf_map *direct) +{ + VERIFY(indirect->map_type == direct->map_type); + VERIFY(indirect->key_size == direct->key_size); + VERIFY(indirect->value_size == direct->value_size); + VERIFY(indirect->max_entries == direct->max_entries); + VERIFY(indirect->id == direct->id); + VERIFY(indirect->memory.pages == direct->memory.pages); + + return 1; +} + +static inline int check(struct bpf_map *indirect, struct bpf_map *direct, + __u32 key_size, __u32 value_size, __u32 max_entries) +{ + VERIFY(check_bpf_map_ptr(indirect, direct)); + VERIFY(check_bpf_map_fields(indirect, key_size, value_size, + max_entries)); + return 1; +} + +static inline int check_default(struct bpf_map *indirect, + struct bpf_map *direct) +{ + VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32), + MAX_ENTRIES)); + return 1; +} + +typedef struct { + int counter; +} atomic_t; + +struct bpf_htab { + struct bpf_map map; + atomic_t count; + __u32 n_buckets; + __u32 elem_size; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */ + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_hash SEC(".maps"); + +static inline int check_hash(void) +{ + struct bpf_htab *hash = (struct bpf_htab *)&m_hash; + struct bpf_map *map = (struct bpf_map *)&m_hash; + int i; + + VERIFY(check_default(&hash->map, map)); + + VERIFY(hash->n_buckets == MAX_ENTRIES); + VERIFY(hash->elem_size == 64); + + VERIFY(hash->count.counter == 0); + for (i = 0; i < HALF_ENTRIES; ++i) { + const __u32 key = i; + const __u32 val = 1; + + if (bpf_map_update_elem(hash, &key, &val, 0)) + return 0; + } + VERIFY(hash->count.counter == HALF_ENTRIES); + + return 1; +} + +struct bpf_array { + struct bpf_map map; + __u32 elem_size; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_array SEC(".maps"); + +static inline int check_array(void) +{ + struct bpf_array *array = (struct bpf_array *)&m_array; + struct bpf_map *map = (struct bpf_map *)&m_array; + int i, n_lookups = 0, n_keys = 0; + + VERIFY(check_default(&array->map, map)); + + VERIFY(array->elem_size == 8); + + for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) { + const __u32 key = i; + __u32 *val = bpf_map_lookup_elem(array, &key); + + ++n_lookups; + if (val) + ++n_keys; + } + + VERIFY(n_lookups == MAX_ENTRIES); + VERIFY(n_keys == MAX_ENTRIES); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_prog_array SEC(".maps"); + +static inline int check_prog_array(void) +{ + struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array; + struct bpf_map *map = (struct bpf_map *)&m_prog_array; + + VERIFY(check_default(&prog_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_perf_event_array SEC(".maps"); + +static inline int check_perf_event_array(void) +{ + struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array; + struct bpf_map *map = (struct bpf_map *)&m_perf_event_array; + + VERIFY(check_default(&perf_event_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_percpu_hash SEC(".maps"); + +static inline int check_percpu_hash(void) +{ + struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash; + struct bpf_map *map = (struct bpf_map *)&m_percpu_hash; + + VERIFY(check_default(&percpu_hash->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_percpu_array SEC(".maps"); + +static inline int check_percpu_array(void) +{ + struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array; + struct bpf_map *map = (struct bpf_map *)&m_percpu_array; + + VERIFY(check_default(&percpu_array->map, map)); + + return 1; +} + +struct bpf_stack_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u64); +} m_stack_trace SEC(".maps"); + +static inline int check_stack_trace(void) +{ + struct bpf_stack_map *stack_trace = + (struct bpf_stack_map *)&m_stack_trace; + struct bpf_map *map = (struct bpf_map *)&m_stack_trace; + + VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64), + MAX_ENTRIES)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_cgroup_array SEC(".maps"); + +static inline int check_cgroup_array(void) +{ + struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array; + struct bpf_map *map = (struct bpf_map *)&m_cgroup_array; + + VERIFY(check_default(&cgroup_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_lru_hash SEC(".maps"); + +static inline int check_lru_hash(void) +{ + struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash; + struct bpf_map *map = (struct bpf_map *)&m_lru_hash; + + VERIFY(check_default(&lru_hash->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_lru_percpu_hash SEC(".maps"); + +static inline int check_lru_percpu_hash(void) +{ + struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash; + struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash; + + VERIFY(check_default(&lru_percpu_hash->map, map)); + + return 1; +} + +struct lpm_trie { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct lpm_key { + struct bpf_lpm_trie_key trie_key; + __u32 data; +}; + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, MAX_ENTRIES); + __type(key, struct lpm_key); + __type(value, __u32); +} m_lpm_trie SEC(".maps"); + +static inline int check_lpm_trie(void) +{ + struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie; + struct bpf_map *map = (struct bpf_map *)&m_lpm_trie; + + VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32), + MAX_ENTRIES)); + + return 1; +} + +struct inner_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); + }); +} m_array_of_maps SEC(".maps") = { + .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 }, +}; + +static inline int check_array_of_maps(void) +{ + struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps; + struct bpf_map *map = (struct bpf_map *)&m_array_of_maps; + + VERIFY(check_default(&array_of_maps->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); + __array(values, struct inner_map); +} m_hash_of_maps SEC(".maps") = { + .values = { + [2] = &inner_map, + }, +}; + +static inline int check_hash_of_maps(void) +{ + struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps; + struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps; + + VERIFY(check_default(&hash_of_maps->map, map)); + + return 1; +} + +struct bpf_dtab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_devmap SEC(".maps"); + +static inline int check_devmap(void) +{ + struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap; + struct bpf_map *map = (struct bpf_map *)&m_devmap; + + VERIFY(check_default(&devmap->map, map)); + + return 1; +} + +struct bpf_stab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_sockmap SEC(".maps"); + +static inline int check_sockmap(void) +{ + struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap; + struct bpf_map *map = (struct bpf_map *)&m_sockmap; + + VERIFY(check_default(&sockmap->map, map)); + + return 1; +} + +struct bpf_cpu_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_cpumap SEC(".maps"); + +static inline int check_cpumap(void) +{ + struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap; + struct bpf_map *map = (struct bpf_map *)&m_cpumap; + + VERIFY(check_default(&cpumap->map, map)); + + return 1; +} + +struct xsk_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_xskmap SEC(".maps"); + +static inline int check_xskmap(void) +{ + struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap; + struct bpf_map *map = (struct bpf_map *)&m_xskmap; + + VERIFY(check_default(&xskmap->map, map)); + + return 1; +} + +struct bpf_shtab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_sockhash SEC(".maps"); + +static inline int check_sockhash(void) +{ + struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash; + struct bpf_map *map = (struct bpf_map *)&m_sockhash; + + VERIFY(check_default(&sockhash->map, map)); + + return 1; +} + +struct bpf_cgroup_storage_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); +} m_cgroup_storage SEC(".maps"); + +static inline int check_cgroup_storage(void) +{ + struct bpf_cgroup_storage_map *cgroup_storage = + (struct bpf_cgroup_storage_map *)&m_cgroup_storage; + struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage; + + VERIFY(check(&cgroup_storage->map, map, + sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0)); + + return 1; +} + +struct reuseport_array { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_reuseport_sockarray SEC(".maps"); + +static inline int check_reuseport_sockarray(void) +{ + struct reuseport_array *reuseport_sockarray = + (struct reuseport_array *)&m_reuseport_sockarray; + struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray; + + VERIFY(check_default(&reuseport_sockarray->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); +} m_percpu_cgroup_storage SEC(".maps"); + +static inline int check_percpu_cgroup_storage(void) +{ + struct bpf_cgroup_storage_map *percpu_cgroup_storage = + (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage; + struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage; + + VERIFY(check(&percpu_cgroup_storage->map, map, + sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0)); + + return 1; +} + +struct bpf_queue_stack { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_ENTRIES); + __type(value, __u32); +} m_queue SEC(".maps"); + +static inline int check_queue(void) +{ + struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue; + struct bpf_map *map = (struct bpf_map *)&m_queue; + + VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_STACK); + __uint(max_entries, MAX_ENTRIES); + __type(value, __u32); +} m_stack SEC(".maps"); + +static inline int check_stack(void) +{ + struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack; + struct bpf_map *map = (struct bpf_map *)&m_stack; + + VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES)); + + return 1; +} + +struct bpf_sk_storage_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, __u32); + __type(value, __u32); +} m_sk_storage SEC(".maps"); + +static inline int check_sk_storage(void) +{ + struct bpf_sk_storage_map *sk_storage = + (struct bpf_sk_storage_map *)&m_sk_storage; + struct bpf_map *map = (struct bpf_map *)&m_sk_storage; + + VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_devmap_hash SEC(".maps"); + +static inline int check_devmap_hash(void) +{ + struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash; + struct bpf_map *map = (struct bpf_map *)&m_devmap_hash; + + VERIFY(check_default(&devmap_hash->map, map)); + + return 1; +} + +struct bpf_ringbuf_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); +} m_ringbuf SEC(".maps"); + +static inline int check_ringbuf(void) +{ + struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf; + struct bpf_map *map = (struct bpf_map *)&m_ringbuf; + + VERIFY(check(&ringbuf->map, map, 0, 0, page_size)); + + return 1; +} + +SEC("cgroup_skb/egress") +int cg_skb(void *ctx) +{ + VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash); + VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array); + VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array); + VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array); + VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace); + VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array); + VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash); + VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash); + VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie); + VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps); + VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps); + VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap); + VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap); + VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap); + VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap); + VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash); + VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage); + VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + check_reuseport_sockarray); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + check_percpu_cgroup_storage); + VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue); + VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack); + // VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage); + VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash); + VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf); + + return 1; +} + +__u32 _version SEC("version") = 1; +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/modify_return.c b/tools/testing/selftests/bpf/progs/modify_return.c new file mode 100644 index 000000000000..8b7466a15c6b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/modify_return.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2020 Google LLC. + */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +static int sequence = 0; +__s32 input_retval = 0; + +__u64 fentry_result = 0; +SEC("fentry/bpf_modify_return_test") +int BPF_PROG(fentry_test, int a, __u64 b) +{ + sequence++; + fentry_result = (sequence == 1); + return 0; +} + +__u64 fmod_ret_result = 0; +SEC("fmod_ret/bpf_modify_return_test") +int BPF_PROG(fmod_ret_test, int a, int *b, int ret) +{ + sequence++; + /* This is the first fmod_ret program, the ret passed should be 0 */ + fmod_ret_result = (sequence == 2 && ret == 0); + return input_retval; +} + +__u64 fexit_result = 0; +SEC("fexit/bpf_modify_return_test") +int BPF_PROG(fexit_test, int a, __u64 b, int ret) +{ + sequence++; + /* If the input_reval is non-zero a successful modification should have + * occurred. + */ + if (input_retval) + fexit_result = (sequence == 3 && ret == input_retval); + else + fexit_result = (sequence == 3 && ret == 4); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c new file mode 100644 index 000000000000..63531e1a9fa4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall1.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> + +#include "bpf_helpers.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 3); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +#define TAIL_FUNC(x) \ + SEC("classifier/" #x) \ + int bpf_func_##x(struct __sk_buff *skb) \ + { \ + return x; \ + } +TAIL_FUNC(0) +TAIL_FUNC(1) +TAIL_FUNC(2) + +SEC("classifier") +int entry(struct __sk_buff *skb) +{ + /* Multiple locations to make sure we patch + * all of them. + */ + bpf_tail_call(skb, &jmp_table, 0); + bpf_tail_call(skb, &jmp_table, 0); + bpf_tail_call(skb, &jmp_table, 0); + bpf_tail_call(skb, &jmp_table, 0); + + bpf_tail_call(skb, &jmp_table, 1); + bpf_tail_call(skb, &jmp_table, 1); + bpf_tail_call(skb, &jmp_table, 1); + bpf_tail_call(skb, &jmp_table, 1); + + bpf_tail_call(skb, &jmp_table, 2); + bpf_tail_call(skb, &jmp_table, 2); + bpf_tail_call(skb, &jmp_table, 2); + bpf_tail_call(skb, &jmp_table, 2); + + return 3; +} + +char __license[] SEC("license") = "GPL"; +int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c new file mode 100644 index 000000000000..21c85c477210 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall2.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> + +#include "bpf_helpers.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 5); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +SEC("classifier/0") +int bpf_func_0(struct __sk_buff *skb) +{ + bpf_tail_call(skb, &jmp_table, 1); + return 0; +} + +SEC("classifier/1") +int bpf_func_1(struct __sk_buff *skb) +{ + bpf_tail_call(skb, &jmp_table, 2); + return 1; +} + +SEC("classifier/2") +int bpf_func_2(struct __sk_buff *skb) +{ + return 2; +} + +SEC("classifier/3") +int bpf_func_3(struct __sk_buff *skb) +{ + bpf_tail_call(skb, &jmp_table, 4); + return 3; +} + +SEC("classifier/4") +int bpf_func_4(struct __sk_buff *skb) +{ + bpf_tail_call(skb, &jmp_table, 3); + return 4; +} + +SEC("classifier") +int entry(struct __sk_buff *skb) +{ + bpf_tail_call(skb, &jmp_table, 0); + /* Check multi-prog update. */ + bpf_tail_call(skb, &jmp_table, 2); + /* Check tail call limit. */ + bpf_tail_call(skb, &jmp_table, 3); + return 3; +} + +char __license[] SEC("license") = "GPL"; +int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c new file mode 100644 index 000000000000..1ecae198b8c1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall3.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> + +#include "bpf_helpers.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +static volatile int count; + +SEC("classifier/0") +int bpf_func_0(struct __sk_buff *skb) +{ + count++; + bpf_tail_call(skb, &jmp_table, 0); + return 1; +} + +SEC("classifier") +int entry(struct __sk_buff *skb) +{ + bpf_tail_call(skb, &jmp_table, 0); + return 0; +} + +char __license[] SEC("license") = "GPL"; +int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c new file mode 100644 index 000000000000..499388758119 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall4.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> + +#include "bpf_helpers.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 3); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +static volatile int selector; + +#define TAIL_FUNC(x) \ + SEC("classifier/" #x) \ + int bpf_func_##x(struct __sk_buff *skb) \ + { \ + return x; \ + } +TAIL_FUNC(0) +TAIL_FUNC(1) +TAIL_FUNC(2) + +SEC("classifier") +int entry(struct __sk_buff *skb) +{ + bpf_tail_call(skb, &jmp_table, selector); + return 3; +} + +char __license[] SEC("license") = "GPL"; +int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c new file mode 100644 index 000000000000..49c64eb53f19 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall5.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> + +#include "bpf_helpers.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 3); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +static volatile int selector; + +#define TAIL_FUNC(x) \ + SEC("classifier/" #x) \ + int bpf_func_##x(struct __sk_buff *skb) \ + { \ + return x; \ + } +TAIL_FUNC(0) +TAIL_FUNC(1) +TAIL_FUNC(2) + +SEC("classifier") +int entry(struct __sk_buff *skb) +{ + int idx = 0; + + if (selector == 1234) + idx = 1; + else if (selector == 5678) + idx = 2; + + bpf_tail_call(skb, &jmp_table, idx); + return 3; +} + +char __license[] SEC("license") = "GPL"; +int _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c index 63a8dfef893b..b37398974d92 100644 --- a/tools/testing/selftests/bpf/progs/test_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c @@ -12,7 +12,7 @@ struct { __type(value, int); } results_map SEC(".maps"); -SEC("kprobe/sys_nanosleep") +SEC("kprobe/hrtimer_nanosleep") int handle_sys_nanosleep_entry(struct pt_regs *ctx) { const int key = 0, value = 1; @@ -21,7 +21,7 @@ int handle_sys_nanosleep_entry(struct pt_regs *ctx) return 0; } -SEC("kretprobe/sys_nanosleep") +SEC("kretprobe/hrtimer_nanosleep") int handle_sys_getpid_return(struct pt_regs *ctx) { const int key = 1, value = 2; diff --git a/tools/testing/selftests/bpf/progs/test_cgroup_link.c b/tools/testing/selftests/bpf/progs/test_cgroup_link.c new file mode 100644 index 000000000000..77e47b9e4446 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_cgroup_link.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +int calls = 0; +int alt_calls = 0; + +SEC("cgroup_skb/egress1") +int egress(struct __sk_buff *skb) +{ + __sync_fetch_and_add(&calls, 1); + return 1; +} + +SEC("cgroup_skb/egress2") +int egress_alt(struct __sk_buff *skb) +{ + __sync_fetch_and_add(&alt_calls, 1); + return 1; +} + +char _license[] SEC("license") = "GPL"; + diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c new file mode 100644 index 000000000000..cce6d605c017 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include "bpf_helpers.h" + +#define MAX_STACK_RAWTP 10 + +SEC("raw_tracepoint/sys_enter") +int bpf_prog2(void *ctx) +{ + __u64 stack[MAX_STACK_RAWTP]; + int error; + + /* set all the flags which should return -EINVAL */ + error = bpf_get_stack(ctx, stack, 0, -1); + if (error < 0) + goto loop; + + return error; +loop: + while (1) { + error++; + } +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_link_pinning.c b/tools/testing/selftests/bpf/progs/test_link_pinning.c new file mode 100644 index 000000000000..bbf2a5264dc0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_link_pinning.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <stdbool.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +int in = 0; +int out = 0; + +SEC("raw_tp/sys_enter") +int raw_tp_prog(const void *ctx) +{ + out = in; + return 0; +} + +SEC("tp_btf/sys_enter") +int tp_btf_prog(const void *ctx) +{ + out = in; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c index 3d30c02bdae9..ded71b3ff6b4 100644 --- a/tools/testing/selftests/bpf/progs/test_obj_id.c +++ b/tools/testing/selftests/bpf/progs/test_obj_id.c @@ -3,15 +3,7 @@ */ #include <stddef.h> #include <linux/bpf.h> -#include <linux/pkt_cls.h> -#include "bpf_helpers.h" - -/* It is a dumb bpf program such that it must have no - * issue to be loaded since testing the verifier is - * not the focus here. - */ - -int _version SEC("version") = 1; +#include <bpf/bpf_helpers.h> struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -20,13 +12,13 @@ struct { __type(value, __u64); } test_map_id SEC(".maps"); -SEC("test_obj_id_dummy") -int test_obj_id(struct __sk_buff *skb) +SEC("raw_tp/sys_enter") +int test_obj_id(void *ctx) { __u32 key = 0; __u64 *value; value = bpf_map_lookup_elem(&test_map_id, &key); - return TC_ACT_OK; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index 876c27deb65a..ac0b5072d4c8 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -11,8 +11,8 @@ struct { __uint(value_size, sizeof(int)); } perf_buf_map SEC(".maps"); -SEC("kprobe/sys_nanosleep") -int handle_sys_nanosleep_entry(struct pt_regs *ctx) +SEC("tp/raw_syscalls/sys_enter") +int handle_sys_enter(void *ctx) { int cpu = bpf_get_smp_processor_id(); diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 7cf42d14103f..3a7b4b607ed3 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -17,8 +17,38 @@ #define barrier() __asm__ __volatile__("": : :"memory") int _version SEC("version") = 1; -SEC("test1") -int process(struct __sk_buff *skb) +/* llvm will optimize both subprograms into exactly the same BPF assembly + * + * Disassembly of section .text: + * + * 0000000000000000 test_pkt_access_subprog1: + * ; return skb->len * 2; + * 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) + * 1: 64 00 00 00 01 00 00 00 w0 <<= 1 + * 2: 95 00 00 00 00 00 00 00 exit + * + * 0000000000000018 test_pkt_access_subprog2: + * ; return skb->len * val; + * 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) + * 4: 64 00 00 00 01 00 00 00 w0 <<= 1 + * 5: 95 00 00 00 00 00 00 00 exit + * + * Which makes it an interesting test for BTF-enabled verifier. + */ +static __attribute__ ((noinline)) +int test_pkt_access_subprog1(volatile struct __sk_buff *skb) +{ + return skb->len * 2; +} + +static __attribute__ ((noinline)) +int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) +{ + return skb->len * val; +} + +SEC("classifier/test_pkt_access") +int test_pkt_access(struct __sk_buff *skb) { void *data_end = (void *)(long)skb->data_end; void *data = (void *)(long)skb->data; @@ -48,6 +78,10 @@ int process(struct __sk_buff *skb) tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len); } + if (test_pkt_access_subprog1(skb) != skb->len * 2) + return TC_ACT_SHOT; + if (test_pkt_access_subprog2(2, skb) != skb->len * 2) + return TC_ACT_SHOT; if (tcp) { if (((void *)(tcp) + 20) > data_end || proto != 6) return TC_ACT_SHOT; diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c new file mode 100644 index 000000000000..8ba9959b036b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 12); +} ringbuf SEC(".maps"); + +/* inputs */ +int pid = 0; +long value = 0; +long flags = 0; + +/* outputs */ +long total = 0; +long discarded = 0; +long dropped = 0; + +long avail_data = 0; +long ring_size = 0; +long cons_pos = 0; +long prod_pos = 0; + +/* inner state */ +long seq = 0; + +SEC("tp/syscalls/sys_enter_getpgid") +int test_ringbuf(void *ctx) +{ + int cur_pid = bpf_get_current_pid_tgid() >> 32; + struct sample *sample; + int zero = 0; + + if (cur_pid != pid) + return 0; + + sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0); + if (!sample) { + __sync_fetch_and_add(&dropped, 1); + return 1; + } + + sample->pid = pid; + bpf_get_current_comm(sample->comm, sizeof(sample->comm)); + sample->value = value; + + sample->seq = seq++; + __sync_fetch_and_add(&total, 1); + + if (sample->seq & 1) { + /* copy from reserved sample to a new one... */ + bpf_ringbuf_output(&ringbuf, sample, sizeof(*sample), flags); + /* ...and then discard reserved sample */ + bpf_ringbuf_discard(sample, flags); + __sync_fetch_and_add(&discarded, 1); + } else { + bpf_ringbuf_submit(sample, flags); + } + + avail_data = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA); + ring_size = bpf_ringbuf_query(&ringbuf, BPF_RB_RING_SIZE); + cons_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_CONS_POS); + prod_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_PROD_POS); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c new file mode 100644 index 000000000000..edf3b6953533 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +struct ringbuf_map { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 12); +} ringbuf1 SEC(".maps"), + ringbuf2 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 4); + __type(key, int); + __array(values, struct ringbuf_map); +} ringbuf_arr SEC(".maps") = { + .values = { + [0] = &ringbuf1, + [2] = &ringbuf2, + }, +}; + +/* inputs */ +int pid = 0; +int target_ring = 0; +long value = 0; + +/* outputs */ +long total = 0; +long dropped = 0; +long skipped = 0; + +SEC("tp/syscalls/sys_enter_getpgid") +int test_ringbuf(void *ctx) +{ + int cur_pid = bpf_get_current_pid_tgid() >> 32; + struct sample *sample; + void *rb; + int zero = 0; + + if (cur_pid != pid) + return 0; + + rb = bpf_map_lookup_elem(&ringbuf_arr, &target_ring); + if (!rb) { + skipped += 1; + return 1; + } + + sample = bpf_ringbuf_reserve(rb, sizeof(*sample), 0); + if (!sample) { + dropped += 1; + return 1; + } + + sample->pid = pid; + bpf_get_current_comm(sample->comm, sizeof(sample->comm)); + sample->value = value; + + sample->seq = total; + total += 1; + + bpf_ringbuf_submit(sample, 0); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c new file mode 100644 index 000000000000..840391e7c014 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Cloudflare Ltd. +// Copyright (c) 2020 Isovalent, Inc. + +#include <stddef.h> +#include <stdbool.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/pkt_cls.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <sys/socket.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +int _version SEC("version") = 1; +char _license[] SEC("license") = "GPL"; + +/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */ +static inline struct bpf_sock_tuple * +get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct bpf_sock_tuple *result; + struct ethhdr *eth; + __u64 tuple_len; + __u8 proto = 0; + __u64 ihl_len; + + eth = (struct ethhdr *)(data); + if (eth + 1 > data_end) + return NULL; + + if (eth->h_proto == bpf_htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth)); + + if (iph + 1 > data_end) + return NULL; + if (iph->ihl != 5) + /* Options are not supported */ + return NULL; + ihl_len = iph->ihl * 4; + proto = iph->protocol; + *ipv4 = true; + result = (struct bpf_sock_tuple *)&iph->saddr; + } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth)); + + if (ip6h + 1 > data_end) + return NULL; + ihl_len = sizeof(*ip6h); + proto = ip6h->nexthdr; + *ipv4 = false; + result = (struct bpf_sock_tuple *)&ip6h->saddr; + } else { + return (struct bpf_sock_tuple *)data; + } + + if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) + return NULL; + + *tcp = (proto == IPPROTO_TCP); + return result; +} + +static inline int +handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4) +{ + struct bpf_sock_tuple ln = {0}; + struct bpf_sock *sk; + size_t tuple_len; + int ret; + + tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); + if ((void *)tuple + tuple_len > (void *)(long)skb->data_end) + return TC_ACT_SHOT; + + sk = bpf_sk_lookup_udp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0); + if (sk) + goto assign; + + if (ipv4) { + if (tuple->ipv4.dport != bpf_htons(4321)) + return TC_ACT_OK; + + ln.ipv4.daddr = bpf_htonl(0x7f000001); + ln.ipv4.dport = bpf_htons(1234); + + sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv4), + BPF_F_CURRENT_NETNS, 0); + } else { + if (tuple->ipv6.dport != bpf_htons(4321)) + return TC_ACT_OK; + + /* Upper parts of daddr are already zero. */ + ln.ipv6.daddr[3] = bpf_htonl(0x1); + ln.ipv6.dport = bpf_htons(1234); + + sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv6), + BPF_F_CURRENT_NETNS, 0); + } + + /* workaround: We can't do a single socket lookup here, because then + * the compiler will likely spill tuple_len to the stack. This makes it + * lose all bounds information in the verifier, which then rejects the + * call as unsafe. + */ + if (!sk) + return TC_ACT_SHOT; + +assign: + ret = bpf_sk_assign(skb, sk, 0); + bpf_sk_release(sk); + return ret; +} + +static inline int +handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4) +{ + struct bpf_sock_tuple ln = {0}; + struct bpf_sock *sk; + size_t tuple_len; + int ret; + + tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); + if ((void *)tuple + tuple_len > (void *)(long)skb->data_end) + return TC_ACT_SHOT; + + sk = bpf_skc_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0); + if (sk) { + if (sk->state != BPF_TCP_LISTEN) + goto assign; + bpf_sk_release(sk); + } + + if (ipv4) { + if (tuple->ipv4.dport != bpf_htons(4321)) + return TC_ACT_OK; + + ln.ipv4.daddr = bpf_htonl(0x7f000001); + ln.ipv4.dport = bpf_htons(1234); + + sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv4), + BPF_F_CURRENT_NETNS, 0); + } else { + if (tuple->ipv6.dport != bpf_htons(4321)) + return TC_ACT_OK; + + /* Upper parts of daddr are already zero. */ + ln.ipv6.daddr[3] = bpf_htonl(0x1); + ln.ipv6.dport = bpf_htons(1234); + + sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv6), + BPF_F_CURRENT_NETNS, 0); + } + + /* workaround: We can't do a single socket lookup here, because then + * the compiler will likely spill tuple_len to the stack. This makes it + * lose all bounds information in the verifier, which then rejects the + * call as unsafe. + */ + if (!sk) + return TC_ACT_SHOT; + + if (sk->state != BPF_TCP_LISTEN) { + bpf_sk_release(sk); + return TC_ACT_SHOT; + } + +assign: + ret = bpf_sk_assign(skb, sk, 0); + bpf_sk_release(sk); + return ret; +} + +SEC("classifier/sk_assign_test") +int bpf_sk_assign_test(struct __sk_buff *skb) +{ + struct bpf_sock_tuple *tuple, ln = {0}; + bool ipv4 = false; + bool tcp = false; + int tuple_len; + int ret = 0; + + tuple = get_tuple(skb, &ipv4, &tcp); + if (!tuple) + return TC_ACT_SHOT; + +#define TCP4_LEN (sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct tcphdr)) +#define TCP6_LEN (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) +#define UDP4_LEN (sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct udphdr)) +#define UDP6_LEN (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr)) + + /* Note that the verifier socket return type for bpf_skc_lookup_tcp() + * differs from bpf_sk_lookup_udp(), so even though the C-level type is + * the same here, if we try to share the implementations they will + * fail to verify because we're crossing pointer types. + */ + if (tcp) { + if (ipv4 && skb->data + TCP4_LEN > skb->data_end) + return 0; + if (!ipv4 && skb->data + TCP6_LEN > skb->data_end) + return 0; + ret = handle_tcp(skb, tuple, ipv4); + } else { + if (ipv4 && skb->data + UDP4_LEN > skb->data_end) + return 0; + if (!ipv4 && skb->data + UDP6_LEN> skb->data_end) + return 0; + ret = handle_udp(skb, tuple, ipv4); + } + + return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT; +} diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c new file mode 100644 index 000000000000..bbf8296f4d66 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare + +#include <errno.h> +#include <stdbool.h> +#include <stddef.h> +#include <linux/bpf.h> +#include <linux/in.h> +#include <sys/socket.h> + +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> + +#define IP4(a, b, c, d) \ + bpf_htonl((((__u32)(a) & 0xffU) << 24) | \ + (((__u32)(b) & 0xffU) << 16) | \ + (((__u32)(c) & 0xffU) << 8) | \ + (((__u32)(d) & 0xffU) << 0)) +#define IP6(aaaa, bbbb, cccc, dddd) \ + { bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) } + +#define MAX_SOCKS 32 + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, MAX_SOCKS); + __type(key, __u32); + __type(value, __u64); +} redir_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, int); + __type(value, int); +} run_map SEC(".maps"); + +enum { + PROG1 = 0, + PROG2, +}; + +enum { + SERVER_A = 0, + SERVER_B, +}; + +/* Addressable key/value constants for convenience */ +static const int KEY_PROG1 = PROG1; +static const int KEY_PROG2 = PROG2; +static const int PROG_DONE = 1; + +static const __u32 KEY_SERVER_A = SERVER_A; +static const __u32 KEY_SERVER_B = SERVER_B; + +static const __u16 DST_PORT = 7007; /* Host byte order */ +static const __u32 DST_IP4 = IP4(127, 0, 0, 1); +static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001); + +SEC("sk_lookup/lookup_pass") +int lookup_pass(struct bpf_sk_lookup *ctx) +{ + return SK_PASS; +} + +SEC("sk_lookup/lookup_drop") +int lookup_drop(struct bpf_sk_lookup *ctx) +{ + return SK_DROP; +} + +SEC("sk_reuseport/reuse_pass") +int reuseport_pass(struct sk_reuseport_md *ctx) +{ + return SK_PASS; +} + +SEC("sk_reuseport/reuse_drop") +int reuseport_drop(struct sk_reuseport_md *ctx) +{ + return SK_DROP; +} + +/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */ +SEC("sk_lookup/redir_port") +int redir_port(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->local_port != DST_PORT) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */ +SEC("sk_lookup/redir_ip4") +int redir_ip4(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->family != AF_INET) + return SK_PASS; + if (ctx->local_port != DST_PORT) + return SK_PASS; + if (ctx->local_ip4 != DST_IP4) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */ +SEC("sk_lookup/redir_ip6") +int redir_ip6(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->family != AF_INET6) + return SK_PASS; + if (ctx->local_port != DST_PORT) + return SK_PASS; + if (ctx->local_ip6[0] != DST_IP6[0] || + ctx->local_ip6[1] != DST_IP6[1] || + ctx->local_ip6[2] != DST_IP6[2] || + ctx->local_ip6[3] != DST_IP6[3]) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_lookup/select_sock_a") +int select_sock_a(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_lookup/select_sock_a_no_reuseport") +int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_DROP; + + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_reuseport/select_sock_b") +int select_sock_b(struct sk_reuseport_md *ctx) +{ + __u32 key = KEY_SERVER_B; + int err; + + err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0); + return err ? SK_DROP : SK_PASS; +} + +/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */ +SEC("sk_lookup/sk_assign_eexist") +int sk_assign_eexist(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err) + goto out; + bpf_sk_release(sk); + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err != -EEXIST) { + bpf_printk("sk_assign returned %d, expected %d\n", + err, -EEXIST); + goto out; + } + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */ +SEC("sk_lookup/sk_assign_replace_flag") +int sk_assign_replace_flag(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err) + goto out; + bpf_sk_release(sk); + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that bpf_sk_assign(sk=NULL) is accepted. */ +SEC("sk_lookup/sk_assign_null") +int sk_assign_null(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk = NULL; + int err, ret; + + ret = SK_DROP; + + err = bpf_sk_assign(ctx, NULL, 0); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + if (ctx->sk != sk) + goto out; + err = bpf_sk_assign(ctx, NULL, 0); + if (err != -EEXIST) + goto out; + err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that selected sk is accessible through context. */ +SEC("sk_lookup/access_ctx_sk") +int access_ctx_sk(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk1 = NULL, *sk2 = NULL; + int err, ret; + + ret = SK_DROP; + + /* Try accessing unassigned (NULL) ctx->sk field */ + if (ctx->sk && ctx->sk->family != AF_INET) + goto out; + + /* Assign a value to ctx->sk */ + sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk1) + goto out; + err = bpf_sk_assign(ctx, sk1, 0); + if (err) + goto out; + if (ctx->sk != sk1) + goto out; + + /* Access ctx->sk fields */ + if (ctx->sk->family != AF_INET || + ctx->sk->type != SOCK_STREAM || + ctx->sk->state != BPF_TCP_LISTEN) + goto out; + + /* Reset selection */ + err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + if (ctx->sk) + goto out; + + /* Assign another socket */ + sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk2) + goto out; + err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + if (ctx->sk != sk2) + goto out; + + /* Access reassigned ctx->sk fields */ + if (ctx->sk->family != AF_INET || + ctx->sk->type != SOCK_STREAM || + ctx->sk->state != BPF_TCP_LISTEN) + goto out; + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk1) + bpf_sk_release(sk1); + if (sk2) + bpf_sk_release(sk2); + return ret; +} + +/* Check narrow loads from ctx fields that support them. + * + * Narrow loads of size >= target field size from a non-zero offset + * are not covered because they give bogus results, that is the + * verifier ignores the offset. + */ +SEC("sk_lookup/ctx_narrow_access") +int ctx_narrow_access(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, family; + __u16 *half; + __u8 *byte; + bool v4; + + v4 = (ctx->family == AF_INET); + + /* Narrow loads from family field */ + byte = (__u8 *)&ctx->family; + half = (__u16 *)&ctx->family; + if (byte[0] != (v4 ? AF_INET : AF_INET6) || + byte[1] != 0 || byte[2] != 0 || byte[3] != 0) + return SK_DROP; + if (half[0] != (v4 ? AF_INET : AF_INET6)) + return SK_DROP; + + byte = (__u8 *)&ctx->protocol; + if (byte[0] != IPPROTO_TCP || + byte[1] != 0 || byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->protocol; + if (half[0] != IPPROTO_TCP) + return SK_DROP; + + /* Narrow loads from remote_port field. Expect non-0 value. */ + byte = (__u8 *)&ctx->remote_port; + if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_port; + if (half[0] == 0) + return SK_DROP; + + /* Narrow loads from local_port field. Expect DST_PORT. */ + byte = (__u8 *)&ctx->local_port; + if (byte[0] != ((DST_PORT >> 0) & 0xff) || + byte[1] != ((DST_PORT >> 8) & 0xff) || + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_port; + if (half[0] != DST_PORT) + return SK_DROP; + + /* Narrow loads from IPv4 fields */ + if (v4) { + /* Expect non-0.0.0.0 in remote_ip4 */ + byte = (__u8 *)&ctx->remote_ip4; + if (byte[0] == 0 && byte[1] == 0 && + byte[2] == 0 && byte[3] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip4; + if (half[0] == 0 && half[1] == 0) + return SK_DROP; + + /* Expect DST_IP4 in local_ip4 */ + byte = (__u8 *)&ctx->local_ip4; + if (byte[0] != ((DST_IP4 >> 0) & 0xff) || + byte[1] != ((DST_IP4 >> 8) & 0xff) || + byte[2] != ((DST_IP4 >> 16) & 0xff) || + byte[3] != ((DST_IP4 >> 24) & 0xff)) + return SK_DROP; + half = (__u16 *)&ctx->local_ip4; + if (half[0] != ((DST_IP4 >> 0) & 0xffff) || + half[1] != ((DST_IP4 >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect 0.0.0.0 IPs when family != AF_INET */ + byte = (__u8 *)&ctx->remote_ip4; + if (byte[0] != 0 || byte[1] != 0 && + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip4; + if (half[0] != 0 || half[1] != 0) + return SK_DROP; + + byte = (__u8 *)&ctx->local_ip4; + if (byte[0] != 0 || byte[1] != 0 && + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_ip4; + if (half[0] != 0 || half[1] != 0) + return SK_DROP; + } + + /* Narrow loads from IPv6 fields */ + if (!v4) { + /* Expenct non-:: IP in remote_ip6 */ + byte = (__u8 *)&ctx->remote_ip6; + if (byte[0] == 0 && byte[1] == 0 && + byte[2] == 0 && byte[3] == 0 && + byte[4] == 0 && byte[5] == 0 && + byte[6] == 0 && byte[7] == 0 && + byte[8] == 0 && byte[9] == 0 && + byte[10] == 0 && byte[11] == 0 && + byte[12] == 0 && byte[13] == 0 && + byte[14] == 0 && byte[15] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip6; + if (half[0] == 0 && half[1] == 0 && + half[2] == 0 && half[3] == 0 && + half[4] == 0 && half[5] == 0 && + half[6] == 0 && half[7] == 0) + return SK_DROP; + + /* Expect DST_IP6 in local_ip6 */ + byte = (__u8 *)&ctx->local_ip6; + if (byte[0] != ((DST_IP6[0] >> 0) & 0xff) || + byte[1] != ((DST_IP6[0] >> 8) & 0xff) || + byte[2] != ((DST_IP6[0] >> 16) & 0xff) || + byte[3] != ((DST_IP6[0] >> 24) & 0xff) || + byte[4] != ((DST_IP6[1] >> 0) & 0xff) || + byte[5] != ((DST_IP6[1] >> 8) & 0xff) || + byte[6] != ((DST_IP6[1] >> 16) & 0xff) || + byte[7] != ((DST_IP6[1] >> 24) & 0xff) || + byte[8] != ((DST_IP6[2] >> 0) & 0xff) || + byte[9] != ((DST_IP6[2] >> 8) & 0xff) || + byte[10] != ((DST_IP6[2] >> 16) & 0xff) || + byte[11] != ((DST_IP6[2] >> 24) & 0xff) || + byte[12] != ((DST_IP6[3] >> 0) & 0xff) || + byte[13] != ((DST_IP6[3] >> 8) & 0xff) || + byte[14] != ((DST_IP6[3] >> 16) & 0xff) || + byte[15] != ((DST_IP6[3] >> 24) & 0xff)) + return SK_DROP; + half = (__u16 *)&ctx->local_ip6; + if (half[0] != ((DST_IP6[0] >> 0) & 0xffff) || + half[1] != ((DST_IP6[0] >> 16) & 0xffff) || + half[2] != ((DST_IP6[1] >> 0) & 0xffff) || + half[3] != ((DST_IP6[1] >> 16) & 0xffff) || + half[4] != ((DST_IP6[2] >> 0) & 0xffff) || + half[5] != ((DST_IP6[2] >> 16) & 0xffff) || + half[6] != ((DST_IP6[3] >> 0) & 0xffff) || + half[7] != ((DST_IP6[3] >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect :: IPs when family != AF_INET6 */ + byte = (__u8 *)&ctx->remote_ip6; + if (byte[0] != 0 || byte[1] != 0 || + byte[2] != 0 || byte[3] != 0 || + byte[4] != 0 || byte[5] != 0 || + byte[6] != 0 || byte[7] != 0 || + byte[8] != 0 || byte[9] != 0 || + byte[10] != 0 || byte[11] != 0 || + byte[12] != 0 || byte[13] != 0 || + byte[14] != 0 || byte[15] != 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip6; + if (half[0] != 0 || half[1] != 0 || + half[2] != 0 || half[3] != 0 || + half[4] != 0 || half[5] != 0 || + half[6] != 0 || half[7] != 0) + return SK_DROP; + + byte = (__u8 *)&ctx->local_ip6; + if (byte[0] != 0 || byte[1] != 0 || + byte[2] != 0 || byte[3] != 0 || + byte[4] != 0 || byte[5] != 0 || + byte[6] != 0 || byte[7] != 0 || + byte[8] != 0 || byte[9] != 0 || + byte[10] != 0 || byte[11] != 0 || + byte[12] != 0 || byte[13] != 0 || + byte[14] != 0 || byte[15] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_ip6; + if (half[0] != 0 || half[1] != 0 || + half[2] != 0 || half[3] != 0 || + half[4] != 0 || half[5] != 0 || + half[6] != 0 || half[7] != 0) + return SK_DROP; + } + + /* Success, redirect to KEY_SERVER_B */ + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (sk) { + bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + } + return SK_PASS; +} + +/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */ +SEC("sk_lookup/sk_assign_esocknosupport") +int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + + err = bpf_sk_assign(ctx, sk, 0); + if (err != -ESOCKTNOSUPPORT) { + bpf_printk("sk_assign returned %d, expected %d\n", + err, -ESOCKTNOSUPPORT); + goto out; + } + + ret = SK_PASS; /* Success, pass to regular lookup */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +SEC("sk_lookup/multi_prog_pass1") +int multi_prog_pass1(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_pass2") +int multi_prog_pass2(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_drop1") +int multi_prog_drop1(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_DROP; +} + +SEC("sk_lookup/multi_prog_drop2") +int multi_prog_drop2(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_DROP; +} + +static __always_inline int select_server_a(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_DROP; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + if (err) + return SK_DROP; + + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_redir1") +int multi_prog_redir1(struct bpf_sk_lookup *ctx) +{ + int ret; + + ret = select_server_a(ctx); + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_redir2") +int multi_prog_redir2(struct bpf_sk_lookup *ctx) +{ + int ret; + + ret = select_server_a(ctx); + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +char _license[] SEC("license") = "Dual BSD/GPL"; +__u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index d22e438198cf..9af8822ece47 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -18,11 +18,11 @@ #define MAX_ULONG_STR_LEN 7 #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN) +const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string"; static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) { - volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string"; unsigned char i; - char name[64]; + char name[sizeof(tcp_mem_name)]; int ret; memset(name, 0, sizeof(name)); diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c index cb201cbe11e7..55251046c9b7 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c @@ -18,11 +18,11 @@ #define MAX_ULONG_STR_LEN 7 #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN) +const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop"; static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx) { - volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop"; unsigned char i; - char name[64]; + char name[sizeof(tcp_mem_name)]; int ret; memset(name, 0, sizeof(name)); diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c index 5cbbff416998..4396faf33394 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c @@ -19,11 +19,11 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif +const char tcp_mem_name[] = "net/ipv4/tcp_mem"; static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) { - char tcp_mem_name[] = "net/ipv4/tcp_mem"; unsigned char i; - char name[64]; + char name[sizeof(tcp_mem_name)]; int ret; memset(name, 0, sizeof(name)); diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c new file mode 100644 index 000000000000..f6650fccf1f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdint.h> +#include <stddef.h> +#include <stdbool.h> + +#include <linux/bpf.h> +#include <linux/stddef.h> +#include <linux/pkt_cls.h> +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#ifndef ctx_ptr +# define ctx_ptr(field) (void *)(long)(field) +#endif + +#define ip4_src 0xac100164 /* 172.16.1.100 */ +#define ip4_dst 0xac100264 /* 172.16.2.100 */ + +#define ip6_src { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe } +#define ip6_dst { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x02, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe } + +#ifndef v6_equal +# define v6_equal(a, b) (a.s6_addr32[0] == b.s6_addr32[0] && \ + a.s6_addr32[1] == b.s6_addr32[1] && \ + a.s6_addr32[2] == b.s6_addr32[2] && \ + a.s6_addr32[3] == b.s6_addr32[3]) +#endif + +enum { + dev_src, + dev_dst, +}; + +struct bpf_map_def SEC("maps") ifindex_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 2, +}; + +static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb, + __be32 addr) +{ + void *data_end = ctx_ptr(skb->data_end); + void *data = ctx_ptr(skb->data); + struct iphdr *ip4h; + + if (data + sizeof(struct ethhdr) > data_end) + return false; + + ip4h = (struct iphdr *)(data + sizeof(struct ethhdr)); + if ((void *)(ip4h + 1) > data_end) + return false; + + return ip4h->daddr == addr; +} + +static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb, + struct in6_addr addr) +{ + void *data_end = ctx_ptr(skb->data_end); + void *data = ctx_ptr(skb->data); + struct ipv6hdr *ip6h; + + if (data + sizeof(struct ethhdr) > data_end) + return false; + + ip6h = (struct ipv6hdr *)(data + sizeof(struct ethhdr)); + if ((void *)(ip6h + 1) > data_end) + return false; + + return v6_equal(ip6h->daddr, addr); +} + +static __always_inline int get_dev_ifindex(int which) +{ + int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which); + + return ifindex ? *ifindex : 0; +} + +SEC("chk_egress") int tc_chk(struct __sk_buff *skb) +{ + void *data_end = ctx_ptr(skb->data_end); + void *data = ctx_ptr(skb->data); + __u32 *raw = data; + + if (data + sizeof(struct ethhdr) > data_end) + return TC_ACT_SHOT; + + return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK; +} + +SEC("dst_ingress") int tc_dst(struct __sk_buff *skb) +{ + __u8 zero[ETH_ALEN * 2]; + bool redirect = false; + + switch (skb->protocol) { + case __bpf_constant_htons(ETH_P_IP): + redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src)); + break; + case __bpf_constant_htons(ETH_P_IPV6): + redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_src); + break; + } + + if (!redirect) + return TC_ACT_OK; + + __builtin_memset(&zero, 0, sizeof(zero)); + if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0) + return TC_ACT_SHOT; + + return bpf_redirect_neigh(get_dev_ifindex(dev_src), NULL, 0, 0); +} + +SEC("src_ingress") int tc_src(struct __sk_buff *skb) +{ + __u8 zero[ETH_ALEN * 2]; + bool redirect = false; + + switch (skb->protocol) { + case __bpf_constant_htons(ETH_P_IP): + redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst)); + break; + case __bpf_constant_htons(ETH_P_IPV6): + redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_dst); + break; + } + + if (!redirect) + return TC_ACT_OK; + + __builtin_memset(&zero, 0, sizeof(zero)); + if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0) + return TC_ACT_SHOT; + + return bpf_redirect_neigh(get_dev_ifindex(dev_dst), NULL, 0, 0); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c new file mode 100644 index 000000000000..fc84a7685aa2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdint.h> +#include <stdbool.h> + +#include <linux/bpf.h> +#include <linux/stddef.h> +#include <linux/pkt_cls.h> + +#include <bpf/bpf_helpers.h> + +enum { + dev_src, + dev_dst, +}; + +struct bpf_map_def SEC("maps") ifindex_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 2, +}; + +static __always_inline int get_dev_ifindex(int which) +{ + int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which); + + return ifindex ? *ifindex : 0; +} + +SEC("chk_egress") int tc_chk(struct __sk_buff *skb) +{ + return TC_ACT_SHOT; +} + +SEC("dst_ingress") int tc_dst(struct __sk_buff *skb) +{ + return bpf_redirect_peer(get_dev_ifindex(dev_src), 0); +} + +SEC("src_ingress") int tc_src(struct __sk_buff *skb) +{ + return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 2e233613d1fc..7fa4595d2b66 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -131,6 +131,7 @@ int bpf_testcb(struct bpf_sock_ops *skops) g.bytes_received = skops->bytes_received; g.bytes_acked = skops->bytes_acked; } + g.num_close_events++; bpf_map_update_elem(&global_map, &key, &g, BPF_ANY); } diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 504df69c83df..141670ab4e67 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -15,7 +15,6 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/types.h> -#include <linux/tcp.h> #include <linux/socket.h> #include <linux/pkt_cls.h> #include <linux/erspan.h> @@ -447,10 +446,8 @@ int _geneve_get_tunnel(struct __sk_buff *skb) } ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); - if (ret < 0) { - ERROR(ret); - return TC_ACT_SHOT; - } + if (ret < 0) + gopt.opt_class = 0; bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4, gopt.opt_class); @@ -511,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb) } ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); - if (ret < 0) { - ERROR(ret); - return TC_ACT_SHOT; - } + if (ret < 0) + gopt.opt_class = 0; bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4, gopt.opt_class); @@ -528,12 +523,11 @@ int _ipip_set_tunnel(struct __sk_buff *skb) struct bpf_tunnel_key key = {}; void *data = (void *)(long)skb->data; struct iphdr *iph = data; - struct tcphdr *tcp = data + sizeof(*iph); void *data_end = (void *)(long)skb->data_end; int ret; /* single length check */ - if (data + sizeof(*iph) + sizeof(*tcp) > data_end) { + if (data + sizeof(*iph) > data_end) { ERROR(1); return TC_ACT_SHOT; } @@ -541,16 +535,6 @@ int _ipip_set_tunnel(struct __sk_buff *skb) key.tunnel_ttl = 64; if (iph->protocol == IPPROTO_ICMP) { key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ - } else { - if (iph->protocol != IPPROTO_TCP || iph->ihl != 5) - return TC_ACT_SHOT; - - if (tcp->dest == bpf_htons(5200)) - key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ - else if (tcp->dest == bpf_htons(5201)) - key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */ - else - return TC_ACT_SHOT; } ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); @@ -585,19 +569,20 @@ int _ipip6_set_tunnel(struct __sk_buff *skb) struct bpf_tunnel_key key = {}; void *data = (void *)(long)skb->data; struct iphdr *iph = data; - struct tcphdr *tcp = data + sizeof(*iph); void *data_end = (void *)(long)skb->data_end; int ret; /* single length check */ - if (data + sizeof(*iph) + sizeof(*tcp) > data_end) { + if (data + sizeof(*iph) > data_end) { ERROR(1); return TC_ACT_SHOT; } __builtin_memset(&key, 0x0, sizeof(key)); - key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */ key.tunnel_ttl = 64; + if (iph->protocol == IPPROTO_ICMP) { + key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */ + } ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6); @@ -634,35 +619,18 @@ int _ip6ip6_set_tunnel(struct __sk_buff *skb) struct bpf_tunnel_key key = {}; void *data = (void *)(long)skb->data; struct ipv6hdr *iph = data; - struct tcphdr *tcp = data + sizeof(*iph); void *data_end = (void *)(long)skb->data_end; int ret; /* single length check */ - if (data + sizeof(*iph) + sizeof(*tcp) > data_end) { + if (data + sizeof(*iph) > data_end) { ERROR(1); return TC_ACT_SHOT; } - key.remote_ipv6[0] = bpf_htonl(0x2401db00); key.tunnel_ttl = 64; - if (iph->nexthdr == 58 /* NEXTHDR_ICMP */) { - key.remote_ipv6[3] = bpf_htonl(1); - } else { - if (iph->nexthdr != 6 /* NEXTHDR_TCP */) { - ERROR(iph->nexthdr); - return TC_ACT_SHOT; - } - - if (tcp->dest == bpf_htons(5200)) { - key.remote_ipv6[3] = bpf_htonl(1); - } else if (tcp->dest == bpf_htons(5201)) { - key.remote_ipv6[3] = bpf_htonl(2); - } else { - ERROR(tcp->dest); - return TC_ACT_SHOT; - } + key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */ } ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c new file mode 100644 index 000000000000..9eed75eb47c5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/timer.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <linux/bpf.h> +#include <time.h> +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_tcp_helpers.h" + +char _license[] SEC("license") = "GPL"; +struct hmap_elem { + int counter; + struct bpf_timer timer; + struct bpf_spin_lock lock; /* unused */ +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1000); + __type(key, int); + __type(value, struct hmap_elem); +} hmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, 1000); + __type(key, int); + __type(value, struct hmap_elem); +} hmap_malloc SEC(".maps"); + +struct elem { + struct bpf_timer t; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, int); + __type(value, struct elem); +} array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 4); + __type(key, int); + __type(value, struct elem); +} lru SEC(".maps"); + +__u64 bss_data; +__u64 err; +__u64 ok; +__u64 callback_check = 52; +__u64 callback2_check = 52; + +#define ARRAY 1 +#define HTAB 2 +#define HTAB_MALLOC 3 +#define LRU 4 + +/* callback for array and lru timers */ +static int timer_cb1(void *map, int *key, struct bpf_timer *timer) +{ + /* increment bss variable twice. + * Once via array timer callback and once via lru timer callback + */ + bss_data += 5; + + /* *key == 0 - the callback was called for array timer. + * *key == 4 - the callback was called from lru timer. + */ + if (*key == ARRAY) { + struct bpf_timer *lru_timer; + int lru_key = LRU; + + /* rearm array timer to be called again in ~35 seconds */ + if (bpf_timer_start(timer, 1ull << 35, 0) != 0) + err |= 1; + + lru_timer = bpf_map_lookup_elem(&lru, &lru_key); + if (!lru_timer) + return 0; + bpf_timer_set_callback(lru_timer, timer_cb1); + if (bpf_timer_start(lru_timer, 0, 0) != 0) + err |= 2; + } else if (*key == LRU) { + int lru_key, i; + + for (i = LRU + 1; + i <= 100 /* for current LRU eviction algorithm this number + * should be larger than ~ lru->max_entries * 2 + */; + i++) { + struct elem init = {}; + + /* lru_key cannot be used as loop induction variable + * otherwise the loop will be unbounded. + */ + lru_key = i; + + /* add more elements into lru map to push out current + * element and force deletion of this timer + */ + bpf_map_update_elem(map, &lru_key, &init, 0); + /* look it up to bump it into active list */ + bpf_map_lookup_elem(map, &lru_key); + + /* keep adding until *key changes underneath, + * which means that key/timer memory was reused + */ + if (*key != LRU) + break; + } + + /* check that the timer was removed */ + if (bpf_timer_cancel(timer) != -EINVAL) + err |= 4; + ok |= 1; + } + return 0; +} + +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test1, int a) +{ + struct bpf_timer *arr_timer, *lru_timer; + struct elem init = {}; + int lru_key = LRU; + int array_key = ARRAY; + + arr_timer = bpf_map_lookup_elem(&array, &array_key); + if (!arr_timer) + return 0; + bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC); + + bpf_map_update_elem(&lru, &lru_key, &init, 0); + lru_timer = bpf_map_lookup_elem(&lru, &lru_key); + if (!lru_timer) + return 0; + bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC); + + bpf_timer_set_callback(arr_timer, timer_cb1); + bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0); + + /* init more timers to check that array destruction + * doesn't leak timer memory. + */ + array_key = 0; + arr_timer = bpf_map_lookup_elem(&array, &array_key); + if (!arr_timer) + return 0; + bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC); + return 0; +} + +/* callback for prealloc and non-prealloca hashtab timers */ +static int timer_cb2(void *map, int *key, struct hmap_elem *val) +{ + if (*key == HTAB) + callback_check--; + else + callback2_check--; + if (val->counter > 0 && --val->counter) { + /* re-arm the timer again to execute after 1 usec */ + bpf_timer_start(&val->timer, 1000, 0); + } else if (*key == HTAB) { + struct bpf_timer *arr_timer; + int array_key = ARRAY; + + /* cancel arr_timer otherwise bpf_fentry_test1 prog + * will stay alive forever. + */ + arr_timer = bpf_map_lookup_elem(&array, &array_key); + if (!arr_timer) + return 0; + if (bpf_timer_cancel(arr_timer) != 1) + /* bpf_timer_cancel should return 1 to indicate + * that arr_timer was active at this time + */ + err |= 8; + + /* try to cancel ourself. It shouldn't deadlock. */ + if (bpf_timer_cancel(&val->timer) != -EDEADLK) + err |= 16; + + /* delete this key and this timer anyway. + * It shouldn't deadlock either. + */ + bpf_map_delete_elem(map, key); + + /* in preallocated hashmap both 'key' and 'val' could have been + * reused to store another map element (like in LRU above), + * but in controlled test environment the below test works. + * It's not a use-after-free. The memory is owned by the map. + */ + if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL) + err |= 32; + ok |= 2; + } else { + if (*key != HTAB_MALLOC) + err |= 64; + + /* try to cancel ourself. It shouldn't deadlock. */ + if (bpf_timer_cancel(&val->timer) != -EDEADLK) + err |= 128; + + /* delete this key and this timer anyway. + * It shouldn't deadlock either. + */ + bpf_map_delete_elem(map, key); + + /* in non-preallocated hashmap both 'key' and 'val' are RCU + * protected and still valid though this element was deleted + * from the map. Arm this timer for ~35 seconds. When callback + * finishes the call_rcu will invoke: + * htab_elem_free_rcu + * check_and_free_timer + * bpf_timer_cancel_and_free + * to cancel this 35 second sleep and delete the timer for real. + */ + if (bpf_timer_start(&val->timer, 1ull << 35, 0) != 0) + err |= 256; + ok |= 4; + } + return 0; +} + +int bpf_timer_test(void) +{ + struct hmap_elem *val; + int key = HTAB, key_malloc = HTAB_MALLOC; + + val = bpf_map_lookup_elem(&hmap, &key); + if (val) { + if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0) + err |= 512; + bpf_timer_set_callback(&val->timer, timer_cb2); + bpf_timer_start(&val->timer, 1000, 0); + } + val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); + if (val) { + if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0) + err |= 1024; + bpf_timer_set_callback(&val->timer, timer_cb2); + bpf_timer_start(&val->timer, 1000, 0); + } + return 0; +} + +SEC("fentry/bpf_fentry_test2") +int BPF_PROG(test2, int a, int b) +{ + struct hmap_elem init = {}, *val; + int key = HTAB, key_malloc = HTAB_MALLOC; + + init.counter = 10; /* number of times to trigger timer_cb2 */ + bpf_map_update_elem(&hmap, &key, &init, 0); + val = bpf_map_lookup_elem(&hmap, &key); + if (val) + bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME); + /* update the same key to free the timer */ + bpf_map_update_elem(&hmap, &key, &init, 0); + + bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); + val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); + if (val) + bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME); + /* update the same key to free the timer */ + bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); + + /* init more timers to check that htab operations + * don't leak timer memory. + */ + key = 0; + bpf_map_update_elem(&hmap, &key, &init, 0); + val = bpf_map_lookup_elem(&hmap, &key); + if (val) + bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME); + bpf_map_delete_elem(&hmap, &key); + bpf_map_update_elem(&hmap, &key, &init, 0); + val = bpf_map_lookup_elem(&hmap, &key); + if (val) + bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME); + + /* and with non-prealloc htab */ + key_malloc = 0; + bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); + val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); + if (val) + bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME); + bpf_map_delete_elem(&hmap_malloc, &key_malloc); + bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0); + val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc); + if (val) + bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME); + + return bpf_timer_test(); +} diff --git a/tools/testing/selftests/bpf/progs/timer_mim.c b/tools/testing/selftests/bpf/progs/timer_mim.c new file mode 100644 index 000000000000..2024be7204a1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/timer_mim.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <linux/bpf.h> +#include <time.h> +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include "bpf_tcp_helpers.h" + +char _license[] SEC("license") = "GPL"; +struct hmap_elem { + int pad; /* unused */ + struct bpf_timer timer; +}; + +struct inner_map { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1024); + __type(key, int); + __type(value, struct hmap_elem); +} inner_htab SEC(".maps"); + +#define ARRAY_KEY 1 +#define HASH_KEY 1234 + +struct outer_arr { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 2); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __array(values, struct inner_map); +} outer_arr SEC(".maps") = { + .values = { [ARRAY_KEY] = &inner_htab }, +}; + +__u64 err; +__u64 ok; +__u64 cnt; + +static int timer_cb1(void *map, int *key, struct hmap_elem *val); + +static int timer_cb2(void *map, int *key, struct hmap_elem *val) +{ + cnt++; + bpf_timer_set_callback(&val->timer, timer_cb1); + if (bpf_timer_start(&val->timer, 1000, 0)) + err |= 1; + ok |= 1; + return 0; +} + +/* callback for inner hash map */ +static int timer_cb1(void *map, int *key, struct hmap_elem *val) +{ + cnt++; + bpf_timer_set_callback(&val->timer, timer_cb2); + if (bpf_timer_start(&val->timer, 1000, 0)) + err |= 2; + /* Do a lookup to make sure 'map' and 'key' pointers are correct */ + bpf_map_lookup_elem(map, key); + ok |= 2; + return 0; +} + +#include <bpf/bpf_tracing.h> +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test1, int a) +{ + struct hmap_elem init = {}; + struct bpf_map *inner_map; + struct hmap_elem *val; + int array_key = ARRAY_KEY; + int hash_key = HASH_KEY; + + inner_map = bpf_map_lookup_elem(&outer_arr, &array_key); + if (!inner_map) + return 0; + + bpf_map_update_elem(inner_map, &hash_key, &init, 0); + val = bpf_map_lookup_elem(inner_map, &hash_key); + if (!val) + return 0; + + bpf_timer_init(&val->timer, inner_map, CLOCK_MONOTONIC); + if (bpf_timer_set_callback(&val->timer, timer_cb1)) + err |= 4; + if (bpf_timer_start(&val->timer, 0, 0)) + err |= 8; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/timer_mim_reject.c b/tools/testing/selftests/bpf/progs/timer_mim_reject.c new file mode 100644 index 000000000000..666d307e6d10 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/timer_mim_reject.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <linux/bpf.h> +#include <time.h> +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include "bpf_tcp_helpers.h" + +char _license[] SEC("license") = "GPL"; +struct hmap_elem { + int pad; /* unused */ + struct bpf_timer timer; +}; + +struct inner_map { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1024); + __type(key, int); + __type(value, struct hmap_elem); +} inner_htab SEC(".maps"); + +#define ARRAY_KEY 1 +#define ARRAY_KEY2 2 +#define HASH_KEY 1234 + +struct outer_arr { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 2); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __array(values, struct inner_map); +} outer_arr SEC(".maps") = { + .values = { [ARRAY_KEY] = &inner_htab }, +}; + +__u64 err; +__u64 ok; +__u64 cnt; + +/* callback for inner hash map */ +static int timer_cb(void *map, int *key, struct hmap_elem *val) +{ + return 0; +} + +#include <bpf/bpf_tracing.h> +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test1, int a) +{ + struct hmap_elem init = {}; + struct bpf_map *inner_map, *inner_map2; + struct hmap_elem *val; + int array_key = ARRAY_KEY; + int array_key2 = ARRAY_KEY2; + int hash_key = HASH_KEY; + + inner_map = bpf_map_lookup_elem(&outer_arr, &array_key); + if (!inner_map) + return 0; + + inner_map2 = bpf_map_lookup_elem(&outer_arr, &array_key2); + if (!inner_map2) + return 0; + bpf_map_update_elem(inner_map, &hash_key, &init, 0); + val = bpf_map_lookup_elem(inner_map, &hash_key); + if (!val) + return 0; + + bpf_timer_init(&val->timer, inner_map2, CLOCK_MONOTONIC); + if (bpf_timer_set_callback(&val->timer, timer_cb)) + err |= 4; + if (bpf_timer_start(&val->timer, 0, 0)) + err |= 8; + return 0; +} diff --git a/tools/testing/selftests/bpf/test_cgroup_attach.c b/tools/testing/selftests/bpf/test_cgroup_attach.c deleted file mode 100644 index 7671909ee1cb..000000000000 --- a/tools/testing/selftests/bpf/test_cgroup_attach.c +++ /dev/null @@ -1,571 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* eBPF example program: - * - * - Creates arraymap in kernel with 4 bytes keys and 8 byte values - * - * - Loads eBPF program - * - * The eBPF program accesses the map passed in to store two pieces of - * information. The number of invocations of the program, which maps - * to the number of packets received, is stored to key 0. Key 1 is - * incremented on each iteration by the number of bytes stored in - * the skb. The program also stores the number of received bytes - * in the cgroup storage. - * - * - Attaches the new program to a cgroup using BPF_PROG_ATTACH - * - * - Every second, reads map[0] and map[1] to see how many bytes and - * packets were seen on any socket of tasks in the given cgroup. - */ - -#define _GNU_SOURCE - -#include <stdio.h> -#include <stdlib.h> -#include <assert.h> -#include <sys/resource.h> -#include <sys/time.h> -#include <unistd.h> -#include <linux/filter.h> - -#include <linux/bpf.h> -#include <bpf/bpf.h> - -#include "bpf_util.h" -#include "bpf_rlimit.h" -#include "cgroup_helpers.h" - -#define FOO "/foo" -#define BAR "/foo/bar/" -#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" - -char bpf_log_buf[BPF_LOG_BUF_SIZE]; - -#ifdef DEBUG -#define debug(args...) printf(args) -#else -#define debug(args...) -#endif - -static int prog_load(int verdict) -{ - int ret; - struct bpf_insn prog[] = { - BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ - BPF_EXIT_INSN(), - }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); - - ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, - prog, insns_cnt, "GPL", 0, - bpf_log_buf, BPF_LOG_BUF_SIZE); - - if (ret < 0) { - log_err("Loading program"); - printf("Output from verifier:\n%s\n-------\n", bpf_log_buf); - return 0; - } - return ret; -} - -static int test_foo_bar(void) -{ - int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0; - - allow_prog = prog_load(1); - if (!allow_prog) - goto err; - - drop_prog = prog_load(0); - if (!drop_prog) - goto err; - - if (setup_cgroup_environment()) - goto err; - - /* Create cgroup /foo, get fd, and join it */ - foo = create_and_get_cgroup(FOO); - if (foo < 0) - goto err; - - if (join_cgroup(FOO)) - goto err; - - if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo"); - goto err; - } - - debug("Attached DROP prog. This ping in cgroup /foo should fail...\n"); - assert(system(PING_CMD) != 0); - - /* Create cgroup /foo/bar, get fd, and join it */ - bar = create_and_get_cgroup(BAR); - if (bar < 0) - goto err; - - if (join_cgroup(BAR)) - goto err; - - debug("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); - assert(system(PING_CMD) != 0); - - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo/bar"); - goto err; - } - - debug("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); - assert(system(PING_CMD) == 0); - - if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching program from /foo/bar"); - goto err; - } - - debug("Detached PASS from /foo/bar while DROP is attached to /foo.\n" - "This ping in cgroup /foo/bar should fail...\n"); - assert(system(PING_CMD) != 0); - - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo/bar"); - goto err; - } - - if (bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching program from /foo"); - goto err; - } - - debug("Attached PASS from /foo/bar and detached DROP from /foo.\n" - "This ping in cgroup /foo/bar should pass...\n"); - assert(system(PING_CMD) == 0); - - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to /foo/bar"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { - errno = 0; - log_err("Unexpected success attaching prog to /foo/bar"); - goto err; - } - - if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching program from /foo/bar"); - goto err; - } - - if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { - errno = 0; - log_err("Unexpected success in double detach from /foo"); - goto err; - } - - if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { - log_err("Attaching non-overridable prog to /foo"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { - errno = 0; - log_err("Unexpected success attaching non-overridable prog to /foo/bar"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - errno = 0; - log_err("Unexpected success attaching overridable prog to /foo/bar"); - goto err; - } - - if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - errno = 0; - log_err("Unexpected success attaching overridable prog to /foo"); - goto err; - } - - if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { - log_err("Attaching different non-overridable prog to /foo"); - goto err; - } - - goto out; - -err: - rc = 1; - -out: - close(foo); - close(bar); - cleanup_cgroup_environment(); - if (!rc) - printf("#override:PASS\n"); - else - printf("#override:FAIL\n"); - return rc; -} - -static int map_fd = -1; - -static int prog_load_cnt(int verdict, int val) -{ - int cgroup_storage_fd, percpu_cgroup_storage_fd; - - if (map_fd < 0) - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); - if (map_fd < 0) { - printf("failed to create map '%s'\n", strerror(errno)); - return -1; - } - - cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); - if (cgroup_storage_fd < 0) { - printf("failed to create map '%s'\n", strerror(errno)); - return -1; - } - - percpu_cgroup_storage_fd = bpf_create_map( - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); - if (percpu_cgroup_storage_fd < 0) { - printf("failed to create map '%s'\n", strerror(errno)); - return -1; - } - - struct bpf_insn prog[] = { - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ - BPF_LD_MAP_FD(BPF_REG_1, map_fd), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ - - BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_MOV64_IMM(BPF_REG_1, val), - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), - - BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), - - BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ - BPF_EXIT_INSN(), - }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); - int ret; - - ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, - prog, insns_cnt, "GPL", 0, - bpf_log_buf, BPF_LOG_BUF_SIZE); - - if (ret < 0) { - log_err("Loading program"); - printf("Output from verifier:\n%s\n-------\n", bpf_log_buf); - return 0; - } - close(cgroup_storage_fd); - return ret; -} - - -static int test_multiprog(void) -{ - __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; - int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; - int drop_prog, allow_prog[6] = {}, rc = 0; - unsigned long long value; - int i = 0; - - for (i = 0; i < 6; i++) { - allow_prog[i] = prog_load_cnt(1, 1 << i); - if (!allow_prog[i]) - goto err; - } - drop_prog = prog_load_cnt(0, 1); - if (!drop_prog) - goto err; - - if (setup_cgroup_environment()) - goto err; - - cg1 = create_and_get_cgroup("/cg1"); - if (cg1 < 0) - goto err; - cg2 = create_and_get_cgroup("/cg1/cg2"); - if (cg2 < 0) - goto err; - cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); - if (cg3 < 0) - goto err; - cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); - if (cg4 < 0) - goto err; - cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); - if (cg5 < 0) - goto err; - - if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) - goto err; - - if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog to cg1"); - goto err; - } - if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Unexpected success attaching the same prog to cg1"); - goto err; - } - if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog2 to cg1"); - goto err; - } - if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to cg2"); - goto err; - } - if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog to cg3"); - goto err; - } - if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_OVERRIDE)) { - log_err("Attaching prog to cg4"); - goto err; - } - if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) { - log_err("Attaching prog to cg5"); - goto err; - } - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 8 + 32); - - /* query the number of effective progs in cg5 */ - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - NULL, NULL, &prog_cnt) == 0); - assert(prog_cnt == 4); - /* retrieve prog_ids of effective progs in cg5 */ - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - &attach_flags, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 4); - assert(attach_flags == 0); - saved_prog_id = prog_ids[0]; - /* check enospc handling */ - prog_ids[0] = 0; - prog_cnt = 2; - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - &attach_flags, prog_ids, &prog_cnt) == -1 && - errno == ENOSPC); - assert(prog_cnt == 4); - /* check that prog_ids are returned even when buffer is too small */ - assert(prog_ids[0] == saved_prog_id); - /* retrieve prog_id of single attached prog in cg5 */ - prog_ids[0] = 0; - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, - NULL, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 1); - assert(prog_ids[0] == saved_prog_id); - - /* detach bottom program and ping again */ - if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching prog from cg5"); - goto err; - } - value = 0; - assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 8 + 16); - - /* detach 3rd from bottom program and ping again */ - errno = 0; - if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) { - log_err("Unexpected success on detach from cg3"); - goto err; - } - if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching from cg3"); - goto err; - } - value = 0; - assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 16); - - /* detach 2nd from bottom program and ping again */ - if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) { - log_err("Detaching prog from cg4"); - goto err; - } - value = 0; - assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); - assert(system(PING_CMD) == 0); - assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); - assert(value == 1 + 2 + 4); - - prog_cnt = 4; - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, - &attach_flags, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 3); - assert(attach_flags == 0); - assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, - NULL, prog_ids, &prog_cnt) == 0); - assert(prog_cnt == 0); - goto out; -err: - rc = 1; - -out: - for (i = 0; i < 6; i++) - if (allow_prog[i] > 0) - close(allow_prog[i]); - close(cg1); - close(cg2); - close(cg3); - close(cg4); - close(cg5); - cleanup_cgroup_environment(); - if (!rc) - printf("#multi:PASS\n"); - else - printf("#multi:FAIL\n"); - return rc; -} - -static int test_autodetach(void) -{ - __u32 prog_cnt = 4, attach_flags; - int allow_prog[2] = {0}; - __u32 prog_ids[2] = {0}; - int cg = 0, i, rc = -1; - void *ptr = NULL; - int attempts; - - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { - allow_prog[i] = prog_load_cnt(1, 1 << i); - if (!allow_prog[i]) - goto err; - } - - if (setup_cgroup_environment()) - goto err; - - /* create a cgroup, attach two programs and remember their ids */ - cg = create_and_get_cgroup("/cg_autodetach"); - if (cg < 0) - goto err; - - if (join_cgroup("/cg_autodetach")) - goto err; - - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { - if (bpf_prog_attach(allow_prog[i], cg, BPF_CGROUP_INET_EGRESS, - BPF_F_ALLOW_MULTI)) { - log_err("Attaching prog[%d] to cg:egress", i); - goto err; - } - } - - /* make sure that programs are attached and run some traffic */ - assert(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags, - prog_ids, &prog_cnt) == 0); - assert(system(PING_CMD) == 0); - - /* allocate some memory (4Mb) to pin the original cgroup */ - ptr = malloc(4 * (1 << 20)); - if (!ptr) - goto err; - - /* close programs and cgroup fd */ - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { - close(allow_prog[i]); - allow_prog[i] = 0; - } - - close(cg); - cg = 0; - - /* leave the cgroup and remove it. don't detach programs */ - cleanup_cgroup_environment(); - - /* wait for the asynchronous auto-detachment. - * wait for no more than 5 sec and give up. - */ - for (i = 0; i < ARRAY_SIZE(prog_ids); i++) { - for (attempts = 5; attempts >= 0; attempts--) { - int fd = bpf_prog_get_fd_by_id(prog_ids[i]); - - if (fd < 0) - break; - - /* don't leave the fd open */ - close(fd); - - if (!attempts) - goto err; - - sleep(1); - } - } - - rc = 0; -err: - for (i = 0; i < ARRAY_SIZE(allow_prog); i++) - if (allow_prog[i] > 0) - close(allow_prog[i]); - if (cg) - close(cg); - free(ptr); - cleanup_cgroup_environment(); - if (!rc) - printf("#autodetach:PASS\n"); - else - printf("#autodetach:FAIL\n"); - return rc; -} - -int main(void) -{ - int (*tests[])(void) = { - test_foo_bar, - test_multiprog, - test_autodetach, - }; - int errors = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(tests); i++) - if (tests[i]()) - errors++; - - if (errors) - printf("test_cgroup_attach:FAIL\n"); - else - printf("test_cgroup_attach:PASS\n"); - - return errors ? EXIT_FAILURE : EXIT_SUCCESS; -} diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index e1f1becda529..afd626724826 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -756,11 +756,7 @@ static void test_sockmap(unsigned int tasks, void *data) /* Test update without programs */ for (i = 0; i < 6; i++) { err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); - if (i < 2 && !err) { - printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n", - i, sfd[i]); - goto out_sockmap; - } else if (i >= 2 && err) { + if (err) { printf("Failed noprog update sockmap '%i:%i'\n", i, sfd[i]); goto out_sockmap; @@ -793,19 +789,19 @@ static void test_sockmap(unsigned int tasks, void *data) } err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER); - if (err) { + if (!err) { printf("Failed empty parser prog detach\n"); goto out_sockmap; } err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT); - if (err) { + if (!err) { printf("Failed empty verdict prog detach\n"); goto out_sockmap; } err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT); - if (err) { + if (!err) { printf("Failed empty msg verdict prog detach\n"); goto out_sockmap; } @@ -1094,19 +1090,19 @@ static void test_sockmap(unsigned int tasks, void *data) assert(status == 0); } - err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE); + err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE); if (!err) { printf("Detached an invalid prog type.\n"); goto out_sockmap; } - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER); + err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER); if (err) { printf("Failed parser prog detach\n"); goto out_sockmap; } - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); + err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); if (err) { printf("Failed parser prog detach\n"); goto out_sockmap; @@ -1282,6 +1278,8 @@ static void __run_parallel(unsigned int tasks, pid_t pid[tasks]; int i; + fflush(stdout); + for (i = 0; i < tasks; i++) { pid[i] = fork(); if (pid[i] == 0) { @@ -1717,9 +1715,9 @@ static void run_all_tests(void) test_map_in_map(); } -#define DECLARE +#define DEFINE_TEST(name) extern void test_##name(void); #include <map_tests/tests.h> -#undef DECLARE +#undef DEFINE_TEST int main(void) { @@ -1731,9 +1729,9 @@ int main(void) map_flags = BPF_F_NO_PREALLOC; run_all_tests(); -#define CALL +#define DEFINE_TEST(name) test_##name(); #include <map_tests/tests.h> -#undef CALL +#undef DEFINE_TEST printf("test_maps: OK, %d SKIPPED\n", skips); return 0; diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 1afa22c88e42..8f918847ddf8 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -930,6 +930,7 @@ try: start_test("Test disabling TC offloads is rejected while filters installed...") ret, _ = sim.set_ethtool_tc_offloads(False, fail=False) fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...") + sim.set_ethtool_tc_offloads(True) start_test("Test qdisc removal frees things...") sim.tc_flush_filters() diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 3bf18364c67c..174055323b70 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -7,6 +7,8 @@ #include <argp.h> #include <string.h> +#define EXIT_NO_TEST 2 + /* defined in test_progs.h */ struct test_env env; @@ -45,7 +47,7 @@ static void dump_test_log(const struct prog_test_def *test, bool failed) fflush(stdout); /* exports env.log_buf & env.log_cnt */ - if (env.verbose || test->force_log || failed) { + if (env.verbosity > VERBOSE_NONE || test->force_log || failed) { if (env.log_cnt) { env.log_buf[env.log_cnt] = '\0'; fprintf(env.stdout, "%s", env.log_buf); @@ -164,23 +166,6 @@ int test__join_cgroup(const char *path) return fd; } -struct ipv4_packet pkt_v4 = { - .eth.h_proto = __bpf_constant_htons(ETH_P_IP), - .iph.ihl = 5, - .iph.protocol = IPPROTO_TCP, - .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), - .tcp.urg_ptr = 123, - .tcp.doff = 5, -}; - -struct ipv6_packet pkt_v6 = { - .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), - .iph.nexthdr = IPPROTO_TCP, - .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), - .tcp.urg_ptr = 123, - .tcp.doff = 5, -}; - int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) { struct bpf_map *map; @@ -293,25 +278,13 @@ int extract_build_id(char *build_id, size_t size) len = size; memcpy(build_id, line, len); build_id[len] = '\0'; + free(line); return 0; err: fclose(fp); return -1; } -void *spin_lock_thread(void *arg) -{ - __u32 duration, retval; - int err, prog_fd = *(u32 *) arg; - - err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); - pthread_exit(arg); -} - /* extern declarations for test funcs */ #define DEFINE_TEST(name) extern void test_##name(); #include <prog_tests/tests.h> @@ -346,14 +319,14 @@ static const struct argp_option opts[] = { { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0, "Output verifier statistics", }, { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, - "Verbose output (use -vv for extra verbose output)" }, + "Verbose output (use -vv or -vvv for progressively verbose output)" }, {}, }; static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) { - if (!env.very_verbose && level == LIBBPF_DEBUG) + if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) return 0; vprintf(format, args); return 0; @@ -419,6 +392,8 @@ int parse_num_list(const char *s, struct test_selector *sel) return 0; } +extern int extra_prog_load_log_flags; + static error_t parse_arg(int key, char *arg, struct argp_state *state) { struct test_env *env = state->input; @@ -460,9 +435,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) env->verifier_stats = true; break; case ARG_VERBOSE: + env->verbosity = VERBOSE_NORMAL; if (arg) { if (strcmp(arg, "v") == 0) { - env->very_verbose = true; + env->verbosity = VERBOSE_VERY; + extra_prog_load_log_flags = 1; + } else if (strcmp(arg, "vv") == 0) { + env->verbosity = VERBOSE_SUPER; + extra_prog_load_log_flags = 2; } else { fprintf(stderr, "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n", @@ -470,7 +450,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) return -EINVAL; } } - env->verbose = true; break; case ARGP_KEY_ARG: argp_usage(state); @@ -489,7 +468,7 @@ static void stdio_hijack(void) env.stdout = stdout; env.stderr = stderr; - if (env.verbose) { + if (env.verbosity > VERBOSE_NONE) { /* nothing to do, output to stdout by default */ return; } @@ -583,5 +562,8 @@ int main(int argc, char **argv) free(env.test_selector.num_set); free(env.subtest_selector.num_set); + if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) + return EXIT_NO_TEST; + return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 0c48f64f732b..a747188e531e 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -39,6 +39,13 @@ typedef __u16 __sum16; #include "trace_helpers.h" #include "flow_dissector_load.h" +enum verbosity { + VERBOSE_NONE, + VERBOSE_NORMAL, + VERBOSE_VERY, + VERBOSE_SUPER, +}; + struct test_selector { const char *name; bool *num_set; @@ -49,8 +56,7 @@ struct test_env { struct test_selector test_selector; struct test_selector subtest_selector; bool verifier_stats; - bool verbose; - bool very_verbose; + enum verbosity verbosity; bool jit_enabled; @@ -74,24 +80,6 @@ extern void test__skip(void); extern void test__fail(void); extern int test__join_cgroup(const char *path); -#define MAGIC_BYTES 123 - -/* ipv4 test vector */ -struct ipv4_packet { - struct ethhdr eth; - struct iphdr iph; - struct tcphdr tcp; -} __packed; -extern struct ipv4_packet pkt_v4; - -/* ipv6 test vector */ -struct ipv6_packet { - struct ethhdr eth; - struct ipv6hdr iph; - struct tcphdr tcp; -} __packed; -extern struct ipv6_packet pkt_v6; - #define _CHECK(condition, tag, duration, format...) ({ \ int __ret = !!(condition); \ if (__ret) { \ @@ -119,9 +107,162 @@ extern struct ipv6_packet pkt_v6; #define CHECK_ATTR(condition, tag, format...) \ _CHECK(condition, tag, tattr.duration, format) -#define MAGIC_VAL 0x1234 -#define NUM_ITER 100000 -#define VIP_NUM 5 +#define ASSERT_TRUE(actual, name) ({ \ + static int duration = 0; \ + bool ___ok = (actual); \ + CHECK(!___ok, (name), "unexpected %s: got FALSE\n", (name)); \ + ___ok; \ +}) + +#define ASSERT_FALSE(actual, name) ({ \ + static int duration = 0; \ + bool ___ok = !(actual); \ + CHECK(!___ok, (name), "unexpected %s: got TRUE\n", (name)); \ + ___ok; \ +}) + +#define ASSERT_EQ(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act == ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld != expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_NEQ(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act != ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld == expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_LT(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act < ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld >= expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_LE(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act <= ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld > expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_GT(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act > ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld <= expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_GE(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act >= ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld < expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_LT(actual, expected, name) ({ \ + static int duration = 0; \ + typeof(actual) ___act = (actual); \ + typeof(expected) ___exp = (expected); \ + bool ___ok = ___act < ___exp; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual %lld >= expected %lld\n", \ + (name), (long long)(___act), (long long)(___exp)); \ + ___ok; \ +}) + +#define ASSERT_STREQ(actual, expected, name) ({ \ + static int duration = 0; \ + const char *___act = actual; \ + const char *___exp = expected; \ + bool ___ok = strcmp(___act, ___exp) == 0; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual '%s' != expected '%s'\n", \ + (name), ___act, ___exp); \ + ___ok; \ +}) + +#define ASSERT_STRNEQ(actual, expected, len, name) ({ \ + static int duration = 0; \ + const char *___act = actual; \ + const char *___exp = expected; \ + int ___len = len; \ + bool ___ok = strncmp(___act, ___exp, ___len) == 0; \ + CHECK(!___ok, (name), \ + "unexpected %s: actual '%.*s' != expected '%.*s'\n", \ + (name), ___len, ___act, ___len, ___exp); \ + ___ok; \ +}) + +#define ASSERT_OK(res, name) ({ \ + static int duration = 0; \ + long long ___res = (res); \ + bool ___ok = ___res == 0; \ + CHECK(!___ok, (name), "unexpected error: %lld (errno %d)\n", \ + ___res, errno); \ + ___ok; \ +}) + +#define ASSERT_ERR(res, name) ({ \ + static int duration = 0; \ + long long ___res = (res); \ + bool ___ok = ___res < 0; \ + CHECK(!___ok, (name), "unexpected success: %lld\n", ___res); \ + ___ok; \ +}) + +#define ASSERT_NULL(ptr, name) ({ \ + static int duration = 0; \ + const void *___res = (ptr); \ + bool ___ok = !___res; \ + CHECK(!___ok, (name), "unexpected pointer: %p\n", ___res); \ + ___ok; \ +}) + +#define ASSERT_OK_PTR(ptr, name) ({ \ + static int duration = 0; \ + const void *___res = (ptr); \ + int ___err = libbpf_get_error(___res); \ + bool ___ok = ___err == 0; \ + CHECK(!___ok, (name), "unexpected error: %d\n", ___err); \ + ___ok; \ +}) + +#define ASSERT_ERR_PTR(ptr, name) ({ \ + static int duration = 0; \ + const void *___res = (ptr); \ + int ___err = libbpf_get_error(___res); \ + bool ___ok = ___err != 0; \ + CHECK(!___ok, (name), "unexpected pointer: %p\n", ___res); \ + ___ok; \ +}) static inline __u64 ptr_to_u64(const void *ptr) { @@ -132,7 +273,6 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); int extract_build_id(char *build_id, size_t size); -void *spin_lock_thread(void *arg); #ifdef __x86_64__ #define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep" diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c index 079d0f5a2909..7e4c91f2238d 100644 --- a/tools/testing/selftests/bpf/test_select_reuseport.c +++ b/tools/testing/selftests/bpf/test_select_reuseport.c @@ -668,12 +668,12 @@ static void cleanup_per_test(void) for (i = 0; i < NR_RESULTS; i++) { err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY); - RET_IF(err, "reset elem in result_map", + CHECK(err, "reset elem in result_map", "i:%u err:%d errno:%d\n", i, err, errno); } err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY); - RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n", + CHECK(err, "reset line number in linum_map", "err:%d errno:%d\n", err, errno); for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) diff --git a/tools/testing/selftests/bpf/test_stub.c b/tools/testing/selftests/bpf/test_stub.c index 84e81a89e2f9..47e132726203 100644 --- a/tools/testing/selftests/bpf/test_stub.c +++ b/tools/testing/selftests/bpf/test_stub.c @@ -5,6 +5,8 @@ #include <bpf/libbpf.h> #include <string.h> +int extra_prog_load_log_flags = 0; + int bpf_prog_test_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd) { @@ -15,6 +17,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type, attr.prog_type = type; attr.expected_attach_type = 0; attr.prog_flags = BPF_F_TEST_RND_HI32; + attr.log_level = extra_prog_load_log_flags; return bpf_prog_load_xattr(&attr, pobj, prog_fd); } @@ -35,6 +38,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, load_attr.license = license; load_attr.kern_version = kern_version; load_attr.prog_flags = BPF_F_TEST_RND_HI32; + load_attr.log_level = extra_prog_load_log_flags; return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); } diff --git a/tools/testing/selftests/bpf/test_tc_redirect.sh b/tools/testing/selftests/bpf/test_tc_redirect.sh new file mode 100755 index 000000000000..6d7482562140 --- /dev/null +++ b/tools/testing/selftests/bpf/test_tc_redirect.sh @@ -0,0 +1,204 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link +# between src and dst. The netns fwd has veth links to each src and dst. The +# client is in src and server in dst. The test installs a TC BPF program to each +# host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the +# neigh addr population and redirect or ii) bpf_redirect_peer() for namespace +# switch from ingress side; it also installs a checker prog on the egress side +# to drop unexpected traffic. + +if [[ $EUID -ne 0 ]]; then + echo "This script must be run as root" + echo "FAIL" + exit 1 +fi + +# check that needed tools are present +command -v nc >/dev/null 2>&1 || \ + { echo >&2 "nc is not available"; exit 1; } +command -v dd >/dev/null 2>&1 || \ + { echo >&2 "dd is not available"; exit 1; } +command -v timeout >/dev/null 2>&1 || \ + { echo >&2 "timeout is not available"; exit 1; } +command -v ping >/dev/null 2>&1 || \ + { echo >&2 "ping is not available"; exit 1; } +command -v ping6 >/dev/null 2>&1 || \ + { echo >&2 "ping6 is not available"; exit 1; } +command -v perl >/dev/null 2>&1 || \ + { echo >&2 "perl is not available"; exit 1; } +command -v jq >/dev/null 2>&1 || \ + { echo >&2 "jq is not available"; exit 1; } +command -v bpftool >/dev/null 2>&1 || \ + { echo >&2 "bpftool is not available"; exit 1; } + +readonly GREEN='\033[0;92m' +readonly RED='\033[0;31m' +readonly NC='\033[0m' # No Color + +readonly PING_ARG="-c 3 -w 10 -q" + +readonly TIMEOUT=10 + +readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)" +readonly NS_FWD="ns-fwd-$(mktemp -u XXXXXX)" +readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)" + +readonly IP4_SRC="172.16.1.100" +readonly IP4_DST="172.16.2.100" + +readonly IP6_SRC="::1:dead:beef:cafe" +readonly IP6_DST="::2:dead:beef:cafe" + +readonly IP4_SLL="169.254.0.1" +readonly IP4_DLL="169.254.0.2" +readonly IP4_NET="169.254.0.0" + +netns_cleanup() +{ + ip netns del ${NS_SRC} + ip netns del ${NS_FWD} + ip netns del ${NS_DST} +} + +netns_setup() +{ + ip netns add "${NS_SRC}" + ip netns add "${NS_FWD}" + ip netns add "${NS_DST}" + + ip link add veth_src type veth peer name veth_src_fwd + ip link add veth_dst type veth peer name veth_dst_fwd + + ip link set veth_src netns ${NS_SRC} + ip link set veth_src_fwd netns ${NS_FWD} + + ip link set veth_dst netns ${NS_DST} + ip link set veth_dst_fwd netns ${NS_FWD} + + ip -netns ${NS_SRC} addr add ${IP4_SRC}/32 dev veth_src + ip -netns ${NS_DST} addr add ${IP4_DST}/32 dev veth_dst + + # The fwd netns automatically get a v6 LL address / routes, but also + # needs v4 one in order to start ARP probing. IP4_NET route is added + # to the endpoints so that the ARP processing will reply. + + ip -netns ${NS_FWD} addr add ${IP4_SLL}/32 dev veth_src_fwd + ip -netns ${NS_FWD} addr add ${IP4_DLL}/32 dev veth_dst_fwd + + ip -netns ${NS_SRC} addr add ${IP6_SRC}/128 dev veth_src nodad + ip -netns ${NS_DST} addr add ${IP6_DST}/128 dev veth_dst nodad + + ip -netns ${NS_SRC} link set dev veth_src up + ip -netns ${NS_FWD} link set dev veth_src_fwd up + + ip -netns ${NS_DST} link set dev veth_dst up + ip -netns ${NS_FWD} link set dev veth_dst_fwd up + + ip -netns ${NS_SRC} route add ${IP4_DST}/32 dev veth_src scope global + ip -netns ${NS_SRC} route add ${IP4_NET}/16 dev veth_src scope global + ip -netns ${NS_FWD} route add ${IP4_SRC}/32 dev veth_src_fwd scope global + + ip -netns ${NS_SRC} route add ${IP6_DST}/128 dev veth_src scope global + ip -netns ${NS_FWD} route add ${IP6_SRC}/128 dev veth_src_fwd scope global + + ip -netns ${NS_DST} route add ${IP4_SRC}/32 dev veth_dst scope global + ip -netns ${NS_DST} route add ${IP4_NET}/16 dev veth_dst scope global + ip -netns ${NS_FWD} route add ${IP4_DST}/32 dev veth_dst_fwd scope global + + ip -netns ${NS_DST} route add ${IP6_SRC}/128 dev veth_dst scope global + ip -netns ${NS_FWD} route add ${IP6_DST}/128 dev veth_dst_fwd scope global + + fmac_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/address) + fmac_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/address) + + ip -netns ${NS_SRC} neigh add ${IP4_DST} dev veth_src lladdr $fmac_src + ip -netns ${NS_DST} neigh add ${IP4_SRC} dev veth_dst lladdr $fmac_dst + + ip -netns ${NS_SRC} neigh add ${IP6_DST} dev veth_src lladdr $fmac_src + ip -netns ${NS_DST} neigh add ${IP6_SRC} dev veth_dst lladdr $fmac_dst +} + +netns_test_connectivity() +{ + set +e + + ip netns exec ${NS_DST} bash -c "nc -4 -l -p 9004 &" + ip netns exec ${NS_DST} bash -c "nc -6 -l -p 9006 &" + + TEST="TCPv4 connectivity test" + ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP4_DST}/9004" + if [ $? -ne 0 ]; then + echo -e "${TEST}: ${RED}FAIL${NC}" + exit 1 + fi + echo -e "${TEST}: ${GREEN}PASS${NC}" + + TEST="TCPv6 connectivity test" + ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP6_DST}/9006" + if [ $? -ne 0 ]; then + echo -e "${TEST}: ${RED}FAIL${NC}" + exit 1 + fi + echo -e "${TEST}: ${GREEN}PASS${NC}" + + TEST="ICMPv4 connectivity test" + ip netns exec ${NS_SRC} ping $PING_ARG ${IP4_DST} + if [ $? -ne 0 ]; then + echo -e "${TEST}: ${RED}FAIL${NC}" + exit 1 + fi + echo -e "${TEST}: ${GREEN}PASS${NC}" + + TEST="ICMPv6 connectivity test" + ip netns exec ${NS_SRC} ping6 $PING_ARG ${IP6_DST} + if [ $? -ne 0 ]; then + echo -e "${TEST}: ${RED}FAIL${NC}" + exit 1 + fi + echo -e "${TEST}: ${GREEN}PASS${NC}" + + set -e +} + +hex_mem_str() +{ + perl -e 'print join(" ", unpack("(H2)8", pack("L", @ARGV)))' $1 +} + +netns_setup_bpf() +{ + local obj=$1 + + ip netns exec ${NS_FWD} tc qdisc add dev veth_src_fwd clsact + ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd ingress bpf da obj $obj sec src_ingress + ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd egress bpf da obj $obj sec chk_egress + + ip netns exec ${NS_FWD} tc qdisc add dev veth_dst_fwd clsact + ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd ingress bpf da obj $obj sec dst_ingress + ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd egress bpf da obj $obj sec chk_egress + + veth_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/ifindex) + veth_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/ifindex) + + progs=$(ip netns exec ${NS_FWD} bpftool net --json | jq -r '.[] | .tc | map(.id) | .[]') + for prog in $progs; do + map=$(bpftool prog show id $prog --json | jq -r '.map_ids | .? | .[]') + if [ ! -z "$map" ]; then + bpftool map update id $map key hex $(hex_mem_str 0) value hex $(hex_mem_str $veth_src) + bpftool map update id $map key hex $(hex_mem_str 1) value hex $(hex_mem_str $veth_dst) + fi + done +} + +trap netns_cleanup EXIT +set -e + +netns_setup +netns_setup_bpf test_tc_neigh.o +netns_test_connectivity +netns_cleanup +netns_setup +netns_setup_bpf test_tc_peer.o +netns_test_connectivity diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h index 7bcfa6207005..6220b95cbd02 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf.h +++ b/tools/testing/selftests/bpf/test_tcpbpf.h @@ -13,5 +13,6 @@ struct tcpbpf_globals { __u64 bytes_received; __u64 bytes_acked; __u32 num_listen; + __u32 num_close_events; }; #endif diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index 716b4e3be581..3ae127620463 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -16,6 +16,9 @@ #include "test_tcpbpf.h" +/* 3 comes from one listening socket + both ends of the connection */ +#define EXPECTED_CLOSE_EVENTS 3 + #define EXPECT_EQ(expected, actual, fmt) \ do { \ if ((expected) != (actual)) { \ @@ -23,13 +26,14 @@ " Actual: %" fmt "\n" \ " Expected: %" fmt "\n", \ (actual), (expected)); \ - goto err; \ + ret--; \ } \ } while (0) int verify_result(const struct tcpbpf_globals *result) { __u32 expected_events; + int ret = 0; expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | (1 << BPF_SOCK_OPS_RWND_INIT) | @@ -48,15 +52,15 @@ int verify_result(const struct tcpbpf_globals *result) EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32); EXPECT_EQ(0, result->good_cb_test_rv, PRIu32); EXPECT_EQ(1, result->num_listen, PRIu32); + EXPECT_EQ(EXPECTED_CLOSE_EVENTS, result->num_close_events, PRIu32); - return 0; -err: - return -1; + return ret; } int verify_sockopt_result(int sock_map_fd) { __u32 key = 0; + int ret = 0; int res; int rv; @@ -69,9 +73,7 @@ int verify_sockopt_result(int sock_map_fd) rv = bpf_map_lookup_elem(sock_map_fd, &key, &res); EXPECT_EQ(0, rv, "d"); EXPECT_EQ(1, res, "d"); - return 0; -err: - return -1; + return ret; } static int bpf_find_map(const char *test, struct bpf_object *obj, @@ -96,6 +98,7 @@ int main(int argc, char **argv) int error = EXIT_FAILURE; struct bpf_object *obj; int cg_fd = -1; + int retry = 10; __u32 key = 0; int rv; @@ -134,12 +137,20 @@ int main(int argc, char **argv) if (sock_map_fd < 0) goto err; +retry_lookup: rv = bpf_map_lookup_elem(map_fd, &key, &g); if (rv != 0) { printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); goto err; } + if (g.num_close_events != EXPECTED_CLOSE_EVENTS && retry--) { + printf("Unexpected number of close events (%d), retrying!\n", + g.num_close_events); + usleep(100); + goto retry_lookup; + } + if (verify_result(&g)) { printf("FAILED: Wrong stats\n"); goto err; diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh index bd12ec97a44d..1ccbe804e8e1 100755 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tunnel.sh @@ -24,12 +24,12 @@ # Root namespace with metadata-mode tunnel + BPF # Device names and addresses: # veth1 IP: 172.16.1.200, IPv6: 00::22 (underlay) -# tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200 (overlay) +# tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay) # # Namespace at_ns0 with native tunnel # Device names and addresses: # veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay) -# tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100 (overlay) +# tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay) # # # End-to-end ping packet flow @@ -250,7 +250,7 @@ add_ipip_tunnel() ip addr add dev $DEV 10.1.1.200/24 } -add_ipip6tnl_tunnel() +add_ip6tnl_tunnel() { ip netns exec at_ns0 ip addr add ::11/96 dev veth0 ip netns exec at_ns0 ip link set dev veth0 up @@ -262,11 +262,13 @@ add_ipip6tnl_tunnel() ip link add dev $DEV_NS type $TYPE \ local ::11 remote ::22 ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24 + ip netns exec at_ns0 ip addr add dev $DEV_NS 1::11/96 ip netns exec at_ns0 ip link set dev $DEV_NS up # root namespace ip link add dev $DEV type $TYPE external ip addr add dev $DEV 10.1.1.200/24 + ip addr add dev $DEV 1::22/96 ip link set dev $DEV up } @@ -534,7 +536,7 @@ test_ipip6() check $TYPE config_device - add_ipip6tnl_tunnel + add_ip6tnl_tunnel ip link set dev veth1 mtu 1500 attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel # underlay @@ -553,6 +555,34 @@ test_ipip6() echo -e ${GREEN}"PASS: $TYPE"${NC} } +test_ip6ip6() +{ + TYPE=ip6tnl + DEV_NS=ip6ip6tnl00 + DEV=ip6ip6tnl11 + ret=0 + + check $TYPE + config_device + add_ip6tnl_tunnel + ip link set dev veth1 mtu 1500 + attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel + # underlay + ping6 $PING_ARG ::11 + # ip6 over ip6 + ping6 $PING_ARG 1::11 + check_err $? + ip netns exec at_ns0 ping6 $PING_ARG 1::22 + check_err $? + cleanup + + if [ $ret -ne 0 ]; then + echo -e ${RED}"FAIL: ip6$TYPE"${NC} + return 1 + fi + echo -e ${GREEN}"PASS: ip6$TYPE"${NC} +} + setup_xfrm_tunnel() { auth=0x$(printf '1%.0s' {1..40}) @@ -646,6 +676,7 @@ cleanup() ip link del veth1 2> /dev/null ip link del ipip11 2> /dev/null ip link del ipip6tnl11 2> /dev/null + ip link del ip6ip6tnl11 2> /dev/null ip link del gretap11 2> /dev/null ip link del ip6gre11 2> /dev/null ip link del ip6gretap11 2> /dev/null @@ -742,6 +773,10 @@ bpf_tunnel_test() test_ipip6 errors=$(( $errors + $? )) + echo "Testing IP6IP6 tunnel..." + test_ip6ip6 + errors=$(( $errors + $? )) + echo "Testing IPSec tunnel..." test_xfrm_tunnel errors=$(( $errors + $? )) diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c index e0fad1548737..d781bc86e100 100644 --- a/tools/testing/selftests/bpf/verifier/and.c +++ b/tools/testing/selftests/bpf/verifier/and.c @@ -15,7 +15,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, - .errstr = "R0 max value is outside of the array range", + .errstr = "R0 max value is outside of the allowed memory range", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -44,7 +44,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, - .errstr = "R0 max value is outside of the array range", + .errstr = "R0 max value is outside of the allowed memory range", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c index f3c33e128709..25ef091b4f60 100644 --- a/tools/testing/selftests/bpf/verifier/array_access.c +++ b/tools/testing/selftests/bpf/verifier/array_access.c @@ -117,7 +117,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, - .errstr = "R0 min value is outside of the array range", + .errstr = "R0 min value is outside of the allowed memory range", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -137,7 +137,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, - .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map", + .errstr = "R0 unbounded memory access, make sure to bounds check any such access", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -250,12 +250,13 @@ BPF_MOV64_IMM(BPF_REG_5, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_map_array_ro = { 3 }, .result = ACCEPT, - .retval = -29, + .retval = 65507, }, { "invalid write map access into a read-only array 1", diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c index d55f476f2237..a14912cb234e 100644 --- a/tools/testing/selftests/bpf/verifier/bounds.c +++ b/tools/testing/selftests/bpf/verifier/bounds.c @@ -20,7 +20,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the array range", + .errstr = "R0 max value is outside of the allowed memory range", .result = REJECT, }, { @@ -146,7 +146,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_8b = { 3 }, - .errstr = "R0 min value is outside of the array range", + .errstr = "R0 min value is outside of the allowed memory range", .result = REJECT }, { @@ -362,7 +362,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the array range", + .errstr = "R0 max value is outside of the allowed memory range", .result = REJECT }, { diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c index 1fd07a4f27ac..91869aea6d64 100644 --- a/tools/testing/selftests/bpf/verifier/bounds_deduction.c +++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c @@ -6,8 +6,9 @@ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }, - .result = REJECT, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, }, { "check deducing bounds from const, 2", @@ -20,6 +21,8 @@ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), BPF_EXIT_INSN(), }, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .result_unpriv = REJECT, .result = ACCEPT, .retval = 1, }, @@ -31,20 +34,24 @@ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }, - .result = REJECT, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, }, { "check deducing bounds from const, 4", .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0), BPF_EXIT_INSN(), }, + .errstr_unpriv = "R6 has pointer with unsupported alu operation", + .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -55,8 +62,9 @@ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }, - .result = REJECT, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, }, { "check deducing bounds from const, 6", @@ -67,8 +75,9 @@ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }, - .result = REJECT, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, }, { "check deducing bounds from const, 7", @@ -80,8 +89,9 @@ offsetof(struct __sk_buff, mark)), BPF_EXIT_INSN(), }, - .result = REJECT, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "dereference of modified ctx ptr", + .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { @@ -94,8 +104,9 @@ offsetof(struct __sk_buff, mark)), BPF_EXIT_INSN(), }, - .result = REJECT, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "dereference of modified ctx ptr", + .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { @@ -106,8 +117,9 @@ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }, - .result = REJECT, + .errstr_unpriv = "R1 has pointer with unsupported alu operation", .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, }, { "check deducing bounds from const, 10", @@ -119,6 +131,6 @@ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }, - .result = REJECT, .errstr = "math between ctx pointer and register with unbounded min value is not allowed", + .result = REJECT, }, diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c index 9baca7a75c42..c2aa6f26738b 100644 --- a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c +++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c @@ -19,7 +19,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -43,7 +42,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -69,7 +67,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -94,7 +91,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -141,7 +137,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -210,7 +205,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -260,7 +254,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -287,7 +280,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -313,7 +305,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -342,7 +333,6 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -372,7 +362,6 @@ }, .fixup_map_hash_8b = { 4 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, }, { @@ -400,7 +389,5 @@ }, .fixup_map_hash_8b = { 3 }, .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", .result = REJECT, - .result_unpriv = REJECT, }, diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c index f24d50f09dbe..371926771db5 100644 --- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c +++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c @@ -9,17 +9,17 @@ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)), + BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2), BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)), + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2), BPF_MOV64_IMM(BPF_REG_4, 256), BPF_EMIT_CALL(BPF_FUNC_get_stack), BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), - BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16), + BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16), BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), @@ -29,7 +29,7 @@ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)), + BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 2d752c4f8d9d..bd8b4faf452c 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -105,7 +105,7 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_map_hash_8b = { 16 }, .result = REJECT, - .errstr = "R0 min value is outside of the array range", + .errstr = "R0 min value is outside of the allowed memory range", }, { "calls: overlapping caller/callee", diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c new file mode 100644 index 000000000000..2ad5f974451c --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c @@ -0,0 +1,492 @@ +{ + "valid 1,2,4,8-byte reads from bpf_sk_lookup", + .insns = { + /* 1-byte read from family field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 3), + /* 2-byte read from family field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 2), + /* 4-byte read from family field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + + /* 1-byte read from protocol field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 3), + /* 2-byte read from protocol field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 2), + /* 4-byte read from protocol field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + + /* 1-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 3), + /* 2-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 2), + /* 4-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + + /* 1-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 6), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 7), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 9), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 10), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 11), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 13), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 14), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 15), + /* 2-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 6), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 10), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 14), + /* 4-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + + /* 1-byte read from remote_port field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 3), + /* 2-byte read from remote_port field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 2), + /* 4-byte read from remote_port field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + + /* 1-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 3), + /* 2-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 2), + /* 4-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + + /* 1-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 6), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 7), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 9), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 10), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 11), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 13), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 14), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 15), + /* 2-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 6), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 10), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 14), + /* 4-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + + /* 1-byte read from local_port field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 3), + /* 2-byte read from local_port field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 2), + /* 4-byte read from local_port field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + + /* 8-byte read from sk field */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */ +{ + "invalid 8-byte read from bpf_sk_lookup family field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup protocol field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_ip4 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_ip6 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_port field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_ip4 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_ip6 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_port field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */ +{ + "invalid 4-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 2-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 1-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* out of bounds and unaligned reads from bpf_sk_lookup */ +{ + "invalid 4-byte read past end of bpf_sk_lookup", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + sizeof(struct bpf_sk_lookup)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte unaligned read from bpf_sk_lookup at odd offset", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte unaligned read from bpf_sk_lookup at even offset", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 2), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* in-bound and out-of-bound writes to bpf_sk_lookup */ +{ + "invalid 8-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 2-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 1-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte write past end of bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + sizeof(struct bpf_sk_lookup)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, diff --git a/tools/testing/selftests/bpf/verifier/direct_value_access.c b/tools/testing/selftests/bpf/verifier/direct_value_access.c index b9fb28e8e224..988f46a1a4c7 100644 --- a/tools/testing/selftests/bpf/verifier/direct_value_access.c +++ b/tools/testing/selftests/bpf/verifier/direct_value_access.c @@ -68,7 +68,7 @@ }, .fixup_map_array_48b = { 1 }, .result = REJECT, - .errstr = "R1 min value is outside of the array range", + .errstr = "R1 min value is outside of the allowed memory range", }, { "direct map access, write test 7", @@ -220,7 +220,7 @@ }, .fixup_map_array_small = { 1 }, .result = REJECT, - .errstr = "R1 min value is outside of the array range", + .errstr = "R1 min value is outside of the allowed memory range", }, { "direct map access, write test 19", diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c index 67ab12410050..5a605ae131a9 100644 --- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c +++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c @@ -318,7 +318,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 4 }, - .errstr = "R1 min value is outside of the array range", + .errstr = "R1 min value is outside of the allowed memory range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, diff --git a/tools/testing/selftests/bpf/verifier/helper_value_access.c b/tools/testing/selftests/bpf/verifier/helper_value_access.c index 7572e403ddb9..961f28139b96 100644 --- a/tools/testing/selftests/bpf/verifier/helper_value_access.c +++ b/tools/testing/selftests/bpf/verifier/helper_value_access.c @@ -280,7 +280,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, - .errstr = "R1 min value is outside of the array range", + .errstr = "R1 min value is outside of the allowed memory range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -415,7 +415,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, - .errstr = "R1 min value is outside of the array range", + .errstr = "R1 min value is outside of the allowed memory range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -926,7 +926,7 @@ }, .fixup_map_hash_16b = { 3, 10 }, .result = REJECT, - .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map", + .errstr = "R2 unbounded memory access, make sure to bounds check any such access", .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, { diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c new file mode 100644 index 000000000000..b52209db8250 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/map_ptr.c @@ -0,0 +1,62 @@ +{ + "bpf_map_ptr: read with negative offset rejected", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "R1 is bpf_array invalid negative access: off=-8", +}, +{ + "bpf_map_ptr: write rejected", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "only read from bpf_array is supported", +}, +{ + "bpf_map_ptr: read non-existent field rejected", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4", +}, +{ + "bpf_map_ptr: read ops field accepted", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = ACCEPT, + .retval = 1, +}, diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c index cd26ee6b7b1d..1f2b8c4cb26d 100644 --- a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c +++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c @@ -56,7 +56,7 @@ .fixup_map_in_map = { 16 }, .fixup_map_array_48b = { 13 }, .result = REJECT, - .errstr = "R0 invalid mem access 'map_ptr'", + .errstr = "only read from bpf_array is supported", }, { "cond: two branches returning different map pointers for lookup (tail, tail)", diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c index 91bb77c24a2e..c3f6f650deb7 100644 --- a/tools/testing/selftests/bpf/verifier/unpriv.c +++ b/tools/testing/selftests/bpf/verifier/unpriv.c @@ -495,7 +495,7 @@ .result = ACCEPT, }, { - "unpriv: adding of fp", + "unpriv: adding of fp, reg", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_1, 0), @@ -507,6 +507,19 @@ .result_unpriv = REJECT, .result = ACCEPT, }, +{ + "unpriv: adding of fp, imm", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result_unpriv = REJECT, + .result = ACCEPT, +}, { "unpriv: cmp of stack pointer", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c index 7f6c232cd842..ed1c2cea1dea 100644 --- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c +++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c @@ -88,6 +88,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "leaking pointer from stack off -8", .errstr = "R0 invalid mem access 'inv'", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index a53d99cebd9f..a7bf1e626725 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -21,8 +21,6 @@ .fixup_map_hash_16b = { 5 }, .fixup_map_array_48b = { 8 }, .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps", .retval = 1, }, { @@ -50,7 +48,7 @@ .fixup_map_array_48b = { 8 }, .result = ACCEPT, .result_unpriv = REJECT, - .errstr_unpriv = "R0 min value is outside of the array range", + .errstr_unpriv = "R0 min value is outside of the allowed memory range", .retval = 1, }, { @@ -122,7 +120,7 @@ .fixup_map_array_48b = { 1 }, .result = ACCEPT, .result_unpriv = REJECT, - .errstr_unpriv = "R2 tried to add from different pointers or scalars", + .errstr_unpriv = "R2 tried to add from different maps, paths or scalars", .retval = 0, }, { @@ -169,7 +167,7 @@ .fixup_map_array_48b = { 1 }, .result = ACCEPT, .result_unpriv = REJECT, - .errstr_unpriv = "R2 tried to add from different maps or paths", + .errstr_unpriv = "R2 tried to add from different maps, paths or scalars", .retval = 0, }, { @@ -325,7 +323,7 @@ }, .fixup_map_array_48b = { 3 }, .result = REJECT, - .errstr = "R0 min value is outside of the array range", + .errstr = "R0 min value is outside of the allowed memory range", .result_unpriv = REJECT, .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", }, @@ -516,6 +514,27 @@ .result = ACCEPT, .retval = 0xabcdef12, }, +{ + "map access: value_ptr += N, value_ptr -= N known scalar", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV32_IMM(BPF_REG_1, 0x12345678), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 0x12345678, +}, { "map access: unknown scalar += value_ptr, 1", .insns = { @@ -601,7 +620,7 @@ }, .fixup_map_array_48b = { 3 }, .result = REJECT, - .errstr = "R1 max value is outside of the array range", + .errstr = "R1 max value is outside of the allowed memory range", .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -726,7 +745,7 @@ }, .fixup_map_array_48b = { 3 }, .result = REJECT, - .errstr = "R0 min value is outside of the array range", + .errstr = "R0 min value is outside of the allowed memory range", }, { "map access: value_ptr -= known scalar, 2", diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index bdb69599c4bd..5e939ff1e3f9 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -105,7 +105,7 @@ int cg_read_strcmp(const char *cgroup, const char *control, /* Handle the case of comparing against empty string */ if (!expected) - size = 32; + return -1; else size = strlen(expected) + 1; diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh index 24dd8ed48580..b025daea062d 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -300,7 +300,7 @@ test_uc_aware() local i for ((i = 0; i < attempts; ++i)); do - if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then + if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then ((passes++)) fi diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index 063ecb290a5a..19e9236dec5e 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest @@ -17,6 +17,7 @@ echo " -v|--verbose Increase verbosity of test messages" echo " -vv Alias of -v -v (Show all results in stdout)" echo " -vvv Alias of -v -v -v (Show all commands immediately)" echo " --fail-unsupported Treat UNSUPPORTED as a failure" +echo " --fail-unresolved Treat UNRESOLVED as a failure" echo " -d|--debug Debug mode (trace all shell commands)" echo " -l|--logdir <dir> Save logs on the <dir>" echo " If <dir> is -, all logs output in console only" @@ -29,8 +30,25 @@ err_ret=1 # kselftest skip code is 4 err_skip=4 +# cgroup RT scheduling prevents chrt commands from succeeding, which +# induces failures in test wakeup tests. Disable for the duration of +# the tests. + +readonly sched_rt_runtime=/proc/sys/kernel/sched_rt_runtime_us + +sched_rt_runtime_orig=$(cat $sched_rt_runtime) + +setup() { + echo -1 > $sched_rt_runtime +} + +cleanup() { + echo $sched_rt_runtime_orig > $sched_rt_runtime +} + errexit() { # message echo "Error: $1" 1>&2 + cleanup exit $err_ret } @@ -39,6 +57,8 @@ if [ `id -u` -ne 0 ]; then errexit "this must be run by root user" fi +setup + # Utilities absdir() { # file_path (cd `dirname $1`; pwd) @@ -93,6 +113,10 @@ parse_opts() { # opts UNSUPPORTED_RESULT=1 shift 1 ;; + --fail-unresolved) + UNRESOLVED_RESULT=1 + shift 1 + ;; --logdir|-l) LOG_DIR=$2 shift 2 @@ -157,6 +181,7 @@ KEEP_LOG=0 DEBUG=0 VERBOSE=0 UNSUPPORTED_RESULT=0 +UNRESOLVED_RESULT=0 STOP_FAILURE=0 # Parse command-line options parse_opts $* @@ -235,6 +260,7 @@ TOTAL_RESULT=0 INSTANCE= CASENO=0 + testcase() { # testfile CASENO=$((CASENO+1)) desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:` @@ -260,7 +286,7 @@ eval_result() { # sigval $UNRESOLVED) prlog " [${color_blue}UNRESOLVED${color_reset}]" UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO" - return 1 # this is a kind of bug.. something happened. + return $UNRESOLVED_RESULT # depends on use case ;; $UNTESTED) prlog " [${color_blue}UNTESTED${color_reset}]" @@ -406,5 +432,7 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w` prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w` prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w` +cleanup + # if no error, return 0 exit $TOTAL_RESULT diff --git a/tools/testing/selftests/ftrace/settings b/tools/testing/selftests/ftrace/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/ftrace/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc index 27a54a17da65..f4e92afab14b 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc @@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$' ftrace_filter_check 'schedule*' '^schedule.*$' # filter by *mid*end -ftrace_filter_check '*aw*lock' '.*aw.*lock$' +ftrace_filter_check '*pin*lock' '.*pin.*lock$' # filter by start*mid* ftrace_filter_check 'mutex*try*' '^mutex.*try.*' diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc index 021c03fd885d..23465823532b 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc @@ -14,6 +14,8 @@ if [ ! -f set_event ]; then exit_unsupported fi +[ -f error_log ] || exit_unsupported + ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter' exit 0 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc index 1bcb67dcae26..81490ecaaa92 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc @@ -38,7 +38,7 @@ for width in 64 32 16 8; do echo 0 > events/kprobes/testprobe/enable : "Confirm the arguments is recorded in given types correctly" - ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'` + ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'` check_types $ARGS $width : "Clear event for next loop" diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc index f3eb8aacec0e..a2b0e4eb1fe4 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc @@ -34,12 +34,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger -echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events -echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger -echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger +echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events +echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger +echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger ping $LOCALHOST -c 3 -if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then +if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then fail "Failed to create combined histogram" fi diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c index 4c156aeab6b8..5ec4d9e18806 100644 --- a/tools/testing/selftests/ipc/msgque.c +++ b/tools/testing/selftests/ipc/msgque.c @@ -137,7 +137,7 @@ int dump_queue(struct msgque_data *msgque) for (kern_id = 0; kern_id < 256; kern_id++) { ret = msgctl(kern_id, MSG_STAT, &ds); if (ret < 0) { - if (errno == -EINVAL) + if (errno == EINVAL) continue; printf("Failed to get stats for IPC queue with id %d\n", kern_id); diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh index 8b944cf042f6..315a43111e04 100755 --- a/tools/testing/selftests/kmod/kmod.sh +++ b/tools/testing/selftests/kmod/kmod.sh @@ -505,18 +505,23 @@ function test_num() fi } -function get_test_count() +function get_test_data() { test_num $1 - TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + local field_num=$(echo $1 | sed 's/^0*//') + echo $ALL_TESTS | awk '{print $'$field_num'}' +} + +function get_test_count() +{ + TEST_DATA=$(get_test_data $1) LAST_TWO=${TEST_DATA#*:*} echo ${LAST_TWO%:*} } function get_test_enabled() { - test_num $1 - TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + TEST_DATA=$(get_test_data $1) echo ${TEST_DATA#*:*:} } diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h index 4912d23844bc..e31ac9c5ead0 100644 --- a/tools/testing/selftests/kvm/include/evmcs.h +++ b/tools/testing/selftests/kvm/include/evmcs.h @@ -217,8 +217,8 @@ struct hv_enlightened_vmcs { #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \ (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) -struct hv_enlightened_vmcs *current_evmcs; -struct hv_vp_assist_page *current_vp_assist; +extern struct hv_enlightened_vmcs *current_evmcs; +extern struct hv_vp_assist_page *current_vp_assist; int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id); diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index ff234018219c..aead07c24afc 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -57,7 +57,7 @@ enum x86_register { struct desc64 { uint16_t limit0; uint16_t base0; - unsigned base1:8, s:1, type:4, dpl:2, p:1; + unsigned base1:8, type:4, s:1, dpl:2, p:1; unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; uint32_t base3; uint32_t zero1; diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 6698cb741e10..7d8f7fc73646 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -446,11 +446,12 @@ static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) desc->limit0 = segp->limit & 0xFFFF; desc->base0 = segp->base & 0xFFFF; desc->base1 = segp->base >> 16; - desc->s = segp->s; desc->type = segp->type; + desc->s = segp->s; desc->dpl = segp->dpl; desc->p = segp->present; desc->limit1 = segp->limit >> 16; + desc->avl = segp->avl; desc->l = segp->l; desc->db = segp->db; desc->g = segp->g; diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index f6ec97b7eaef..8cc4a59ff369 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -17,6 +17,9 @@ bool enable_evmcs; +struct hv_enlightened_vmcs *current_evmcs; +struct hv_vp_assist_page *current_vp_assist; + struct eptPageTableEntry { uint64_t readable:1; uint64_t writable:1; diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index b8503a8119b0..81fcc25a54db 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -29,3 +29,4 @@ CONFIG_NET_SCH_FQ=m CONFIG_NET_SCH_ETF=m CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_KALLSYMS=y +CONFIG_NET_FOU=m diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 9fd3a0b97f0d..38133da2973d 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -239,6 +239,28 @@ setup_cmd_nsb() fi } +setup_cmd_nsc() +{ + local cmd="$*" + local rc + + run_cmd_nsc ${cmd} + rc=$? + if [ $rc -ne 0 ]; then + # show user the command if not done so already + if [ "$VERBOSE" = "0" ]; then + echo "setup command: $cmd" + fi + echo "failed. stopping tests" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue" + read a + fi + exit $rc + fi +} + # set sysctl values in NS-A set_sysctl() { @@ -447,6 +469,36 @@ setup() sleep 1 } +setup_lla_only() +{ + # make sure we are starting with a clean slate + kill_procs + cleanup 2>/dev/null + + log_debug "Configuring network namespaces" + set -e + + create_ns ${NSA} "-" "-" + create_ns ${NSB} "-" "-" + create_ns ${NSC} "-" "-" + connect_ns ${NSA} ${NSA_DEV} "-" "-" \ + ${NSB} ${NSB_DEV} "-" "-" + connect_ns ${NSA} ${NSA_DEV2} "-" "-" \ + ${NSC} ${NSC_DEV} "-" "-" + + NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV}) + NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV}) + NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV}) + + create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-" + ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF} + ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF} + + set +e + + sleep 1 +} + ################################################################################ # IPv4 @@ -3329,10 +3381,53 @@ use_case_br() setup_cmd_nsb ip li del vlan100 2>/dev/null } +# VRF only. +# ns-A device is connected to both ns-B and ns-C on a single VRF but only has +# LLA on the interfaces +use_case_ping_lla_multi() +{ + setup_lla_only + # only want reply from ns-A + setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 + setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 + + log_start + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B" + + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C" + + # cycle/flap the first ns-A interface + setup_cmd ip link set ${NSA_DEV} down + setup_cmd ip link set ${NSA_DEV} up + sleep 1 + + log_start + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B" + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C" + + # cycle/flap the second ns-A interface + setup_cmd ip link set ${NSA_DEV2} down + setup_cmd ip link set ${NSA_DEV2} up + sleep 1 + + log_start + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B" + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C" +} + use_cases() { log_section "Use cases" + log_subsection "Device enslaved to bridge" use_case_br + log_subsection "Ping LLA with multiple interfaces" + use_case_ping_lla_multi } ################################################################################ diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh index 9dc35a16e415..51df5e305855 100755 --- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh +++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh @@ -144,7 +144,7 @@ setup() cleanup() { - for n in h1 r1 h2 h3 h4 + for n in h0 r1 h1 h2 h3 do ip netns del ${n} 2>/dev/null done diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 796670ebc65b..09830b88ec8c 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -512,6 +512,19 @@ ipv6_fcnal_runtime() run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1" run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81" + # rpfilter and default route + $IP nexthop flush >/dev/null 2>&1 + run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP" + run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1" + run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3" + run_cmd "$IP nexthop add id 93 group 91/92" + run_cmd "$IP -6 ro add default nhid 91" + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" + log_test $? 0 "Nexthop with default route and rpfilter" + run_cmd "$IP -6 ro replace default nhid 93" + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" + log_test $? 0 "Nexthop with multipath default route and rpfilter" + # TO-DO: # existing route with old nexthop; append route with new nexthop # existing route with old nexthop; replace route with new @@ -749,6 +762,29 @@ ipv4_fcnal_runtime() run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" log_test $? 0 "Ping - multipath" + run_cmd "$IP ro delete 172.16.101.1/32 nhid 122" + + # + # multiple default routes + # - tests fib_select_default + run_cmd "$IP nexthop add id 501 via 172.16.1.2 dev veth1" + run_cmd "$IP ro add default nhid 501" + run_cmd "$IP ro add default via 172.16.1.3 dev veth1 metric 20" + run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" + log_test $? 0 "Ping - multiple default routes, nh first" + + # flip the order + run_cmd "$IP ro del default nhid 501" + run_cmd "$IP ro del default via 172.16.1.3 dev veth1 metric 20" + run_cmd "$IP ro add default via 172.16.1.2 dev veth1 metric 20" + run_cmd "$IP nexthop replace id 501 via 172.16.1.3 dev veth1" + run_cmd "$IP ro add default nhid 501 metric 20" + run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" + log_test $? 0 "Ping - multiple default routes, nh second" + + run_cmd "$IP nexthop delete nhid 501" + run_cmd "$IP ro del default" + # # IPv4 with blackhole nexthops # diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 09854f8a0b57..4e19a1c00ddd 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -618,16 +618,22 @@ fib_nexthop_test() fib_suppress_test() { + echo + echo "FIB rule with suppress_prefixlength" + setup + $IP link add dummy1 type dummy $IP link set dummy1 up $IP -6 route add default dev dummy1 $IP -6 rule add table main suppress_prefixlength 0 - ping -f -c 1000 -W 1 1234::1 || true + ping -f -c 1000 -W 1 1234::1 >/dev/null 2>&1 $IP -6 rule del table main suppress_prefixlength 0 $IP link del dummy1 # If we got here without crashing, we're good. - return 0 + log_test 0 0 "FIB rule suppress test" + + cleanup } ################################################################################ @@ -1049,7 +1055,6 @@ ipv6_addr_metric_test() check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260" log_test $? 0 "Set metric with peer route on local side" - log_test $? 0 "User specified metric on local address" check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260" log_test $? 0 "Set metric with peer route on peer side" diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh index 197e769c2ed1..f8cda822c1ce 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh @@ -86,11 +86,20 @@ test_ip6gretap() test_gretap_stp() { + # Sometimes after mirror installation, the neighbor's state is not valid. + # The reason is that there is no SW datapath activity related to the + # neighbor for the remote GRE address. Therefore whether the corresponding + # neighbor will be valid is a matter of luck, and the test is thus racy. + # Set the neighbor's state to permanent, so it would be always valid. + ip neigh replace 192.0.2.130 lladdr $(mac_get $h3) \ + nud permanent dev br2 full_test_span_gre_stp gt4 $swp3.555 "mirror to gretap" } test_ip6gretap_stp() { + ip neigh replace 2001:db8:2::2 lladdr $(mac_get $h3) \ + nud permanent dev br2 full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap" } diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh index c02291e9841e..880e3ab9d088 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh @@ -271,7 +271,7 @@ test_span_gre_fdb_roaming() while ((RET == 0)); do bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null - bridge fdb add dev $swp2 $h3mac vlan 555 master + bridge fdb add dev $swp2 $h3mac vlan 555 master static sleep 1 fail_test_span_gre_dir $tundev ingress diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh index cf3d26c233e8..7fcc42bc076f 100755 --- a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh +++ b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh @@ -197,7 +197,7 @@ multipath4_test() t0_rp12=$(link_stats_tx_packets_get $rp12) t0_rp13=$(link_stats_tx_packets_get $rp13) - ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ + ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ -d 1msec -t udp "sp=1024,dp=0-32768" t1_rp12=$(link_stats_tx_packets_get $rp12) diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh index 79a209927962..464821c587a5 100755 --- a/tools/testing/selftests/net/forwarding/router_multipath.sh +++ b/tools/testing/selftests/net/forwarding/router_multipath.sh @@ -178,7 +178,7 @@ multipath4_test() t0_rp12=$(link_stats_tx_packets_get $rp12) t0_rp13=$(link_stats_tx_packets_get $rp13) - ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ + ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ -d 1msec -t udp "sp=1024,dp=0-32768" t1_rp12=$(link_stats_tx_packets_get $rp12) diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh index 058c746ee300..b11d8e6b5bc1 100755 --- a/tools/testing/selftests/net/forwarding/tc_flower.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh @@ -3,7 +3,7 @@ ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \ match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \ - match_ip_tos_test match_indev_test" + match_ip_tos_test match_indev_test match_ip_ttl_test" NUM_NETIFS=2 source tc_common.sh source lib.sh @@ -310,6 +310,42 @@ match_ip_tos_test() log_test "ip_tos match ($tcflags)" } +match_ip_ttl_test() +{ + RET=0 + + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + $tcflags dst_ip 192.0.2.2 ip_ttl 63 action drop + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \ + $tcflags dst_ip 192.0.2.2 action drop + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip "ttl=63" -q + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip "ttl=63,mf,frag=256" -q + + tc_check_packets "dev $h2 ingress" 102 1 + check_fail $? "Matched on the wrong filter (no check on ttl)" + + tc_check_packets "dev $h2 ingress" 101 2 + check_err $? "Did not match on correct filter (ttl=63)" + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t ip "ttl=255" -q + + tc_check_packets "dev $h2 ingress" 101 3 + check_fail $? "Matched on a wrong filter (ttl=63)" + + tc_check_packets "dev $h2 ingress" 102 1 + check_err $? "Did not match on correct filter (no check on ttl)" + + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + + log_test "ip_ttl match ($tcflags)" +} + match_indev_test() { RET=0 diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh index a0b5f57d6bd3..0727e2012b68 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh @@ -215,10 +215,16 @@ switch_create() bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 + + sysctl_set net.ipv4.conf.all.rp_filter 0 + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 } switch_destroy() { + sysctl_restore net.ipv4.conf.all.rp_filter + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20 bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10 @@ -359,6 +365,10 @@ ns_switch_create() bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 + + sysctl_set net.ipv4.conf.all.rp_filter 0 + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 } export -f ns_switch_create diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh index ce6bea9675c0..0ccb1dda099a 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh @@ -658,7 +658,7 @@ test_ecn_decap() # In accordance with INET_ECN_decapsulate() __test_ecn_decap 00 00 0x00 __test_ecn_decap 01 01 0x01 - __test_ecn_decap 02 01 0x02 + __test_ecn_decap 02 01 0x01 __test_ecn_decap 01 03 0x03 __test_ecn_decap 02 03 0x03 test_ecn_decap_error diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh index 1209031bc794..5d97fa347d75 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh @@ -237,10 +237,16 @@ switch_create() bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 + + sysctl_set net.ipv4.conf.all.rp_filter 0 + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 } switch_destroy() { + sysctl_restore net.ipv4.conf.all.rp_filter + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20 bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10 @@ -402,6 +408,10 @@ ns_switch_create() bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 + + sysctl_set net.ipv4.conf.all.rp_filter 0 + sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 + sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 } export -f ns_switch_create diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh index 18c5de53558a..bf361f30d6ef 100755 --- a/tools/testing/selftests/net/icmp_redirect.sh +++ b/tools/testing/selftests/net/icmp_redirect.sh @@ -180,6 +180,8 @@ setup() ;; r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1 ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1 + ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0 + ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0 ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1 ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10 diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh index 15d3489ecd9c..ceb7ad4dbd94 100755 --- a/tools/testing/selftests/net/ip_defrag.sh +++ b/tools/testing/selftests/net/ip_defrag.sh @@ -6,6 +6,8 @@ set +x set -e +modprobe -q nf_defrag_ipv6 + readonly NETNS="ns-$(mktemp -u XXXXXX)" setup() { diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c index 4b02933cab8a..bdc03a2097e8 100644 --- a/tools/testing/selftests/net/msg_zerocopy.c +++ b/tools/testing/selftests/net/msg_zerocopy.c @@ -125,9 +125,8 @@ static int do_setcpu(int cpu) CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (sched_setaffinity(0, sizeof(mask), &mask)) - error(1, 0, "setaffinity %d", cpu); - - if (cfg_verbose) + fprintf(stderr, "cpu: unable to pin, may increase variance.\n"); + else if (cfg_verbose) fprintf(stderr, "cpu: %u\n", cpu); return 0; diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 71a62e7e35b1..3429767cadcd 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -119,7 +119,15 @@ # - list_flush_ipv6_exception # Using the same topology as in pmtu_ipv6, create exceptions, and check # they are shown when listing exception caches, gone after flushing them - +# +# - pmtu_ipv4_route_change +# Use the same topology as in pmtu_ipv4, but issue a route replacement +# command and delete the corresponding device afterward. This tests for +# proper cleanup of the PMTU exceptions by the route replacement path. +# Device unregistration should complete successfully +# +# - pmtu_ipv6_route_change +# Same as above but with IPv6 # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 @@ -161,7 +169,9 @@ tests=" cleanup_ipv4_exception ipv4: cleanup of cached exceptions 1 cleanup_ipv6_exception ipv6: cleanup of cached exceptions 1 list_flush_ipv4_exception ipv4: list and flush cached exceptions 1 - list_flush_ipv6_exception ipv6: list and flush cached exceptions 1" + list_flush_ipv6_exception ipv6: list and flush cached exceptions 1 + pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1 + pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1" NS_A="ns-A" NS_B="ns-B" @@ -1316,6 +1326,63 @@ test_list_flush_ipv6_exception() { return ${fail} } +test_pmtu_ipvX_route_change() { + family=${1} + + setup namespaces routing || return 2 + trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ + "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \ + "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \ + "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2 + + if [ ${family} -eq 4 ]; then + ping=ping + dst1="${prefix4}.${b_r1}.1" + dst2="${prefix4}.${b_r2}.1" + gw="${prefix4}.${a_r1}.2" + else + ping=${ping6} + dst1="${prefix6}:${b_r1}::1" + dst2="${prefix6}:${b_r2}::1" + gw="${prefix6}:${a_r1}::2" + fi + + # Set up initial MTU values + mtu "${ns_a}" veth_A-R1 2000 + mtu "${ns_r1}" veth_R1-A 2000 + mtu "${ns_r1}" veth_R1-B 1400 + mtu "${ns_b}" veth_B-R1 1400 + + mtu "${ns_a}" veth_A-R2 2000 + mtu "${ns_r2}" veth_R2-A 2000 + mtu "${ns_r2}" veth_R2-B 1500 + mtu "${ns_b}" veth_B-R2 1500 + + # Create route exceptions + run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} + run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} + + # Check that exceptions have been created with the correct PMTU + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" + check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" + check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 + + # Replace the route from A to R1 + run_cmd ${ns_a} ip route change default via ${gw} + + # Delete the device in A + run_cmd ${ns_a} ip link del "veth_A-R1" +} + +test_pmtu_ipv4_route_change() { + test_pmtu_ipvX_route_change 4 +} + +test_pmtu_ipv6_route_change() { + test_pmtu_ipvX_route_change 6 +} + usage() { echo echo "$0 [OPTIONS] [TEST]..." diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 8c8c7d79c38d..2c522f7a0aec 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off, int fds[2], fds_udp[2][2], ret; fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n", - typeflags, PORT_BASE, PORT_BASE + port_off); + typeflags, (uint16_t)PORT_BASE, + (uint16_t)(PORT_BASE + port_off)); fds[0] = sock_fanout_open(typeflags, 0); fds[1] = sock_fanout_open(typeflags, 0); diff --git a/tools/testing/selftests/net/reuseport_addr_any.c b/tools/testing/selftests/net/reuseport_addr_any.c index c6233935fed1..b8475cb29be7 100644 --- a/tools/testing/selftests/net/reuseport_addr_any.c +++ b/tools/testing/selftests/net/reuseport_addr_any.c @@ -21,6 +21,10 @@ #include <sys/socket.h> #include <unistd.h> +#ifndef SOL_DCCP +#define SOL_DCCP 269 +#endif + static const char *IP4_ADDR = "127.0.0.1"; static const char *IP6_ADDR = "::1"; static const char *IP4_MAPPED6 = "::ffff:127.0.0.1"; diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index bdbf4b3125b6..28ea3753da20 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -521,6 +521,11 @@ kci_test_encap_fou() return $ksft_skip fi + if ! /sbin/modprobe -q -n fou; then + echo "SKIP: module fou is not found" + return $ksft_skip + fi + /sbin/modprobe -q fou ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null if [ $? -ne 0 ];then echo "FAIL: can't add fou port 7777, skipping test" diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c index 383bac05ac32..3155fbbf644b 100644 --- a/tools/testing/selftests/net/so_txtime.c +++ b/tools/testing/selftests/net/so_txtime.c @@ -15,8 +15,9 @@ #include <inttypes.h> #include <linux/net_tstamp.h> #include <linux/errqueue.h> +#include <linux/if_ether.h> #include <linux/ipv6.h> -#include <linux/tcp.h> +#include <linux/udp.h> #include <stdbool.h> #include <stdlib.h> #include <stdio.h> @@ -120,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts) if (rbuf[0] != ts->data) error(1, 0, "payload mismatch. expected %c", ts->data); - if (labs(tstop - texpect) > cfg_variance_us) + if (llabs(tstop - texpect) > cfg_variance_us) error(1, 0, "exceeds variance (%d us)", cfg_variance_us); return false; @@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt) { char control[CMSG_SPACE(sizeof(struct sock_extended_err)) + CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0}; - char data[sizeof(struct ipv6hdr) + - sizeof(struct tcphdr) + 1]; + char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + + sizeof(struct udphdr) + 1]; struct sock_extended_err *err; struct msghdr msg = {0}; struct iovec iov = {0}; @@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt) msg.msg_controllen = sizeof(control); while (1) { + const char *reason; + ret = recvmsg(fdt, &msg, MSG_ERRQUEUE); if (ret == -1 && errno == EAGAIN) break; @@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt) err = (struct sock_extended_err *)CMSG_DATA(cm); if (err->ee_origin != SO_EE_ORIGIN_TXTIME) error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin); - if (err->ee_code != ECANCELED) - error(1, 0, "errqueue: code 0x%x\n", err->ee_code); + + switch (err->ee_errno) { + case ECANCELED: + if (err->ee_code != SO_EE_CODE_TXTIME_MISSED) + error(1, 0, "errqueue: unknown ECANCELED %u\n", + err->ee_code); + reason = "missed txtime"; + break; + case EINVAL: + if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM) + error(1, 0, "errqueue: unknown EINVAL %u\n", + err->ee_code); + reason = "invalid txtime"; + break; + default: + error(1, 0, "errqueue: errno %u code %u\n", + err->ee_errno, err->ee_code); + }; tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info; tstamp -= (int64_t) glob_tstart; tstamp /= 1000 * 1000; - fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n", - data[ret - 1], tstamp); + fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n", + data[ret - 1], tstamp, reason); msg.msg_flags = 0; msg.msg_controllen = sizeof(control); diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh index ac2a30be9b32..f8a19f548ae9 100755 --- a/tools/testing/selftests/net/udpgro.sh +++ b/tools/testing/selftests/net/udpgro.sh @@ -5,6 +5,14 @@ readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" +# set global exit status, but never reset nonzero one. +check_err() +{ + if [ $ret -eq 0 ]; then + ret=$1 + fi +} + cleanup() { local -r jobs="$(jobs -p)" local -r ns="$(ip netns list|grep $PEER_NS)" @@ -44,7 +52,9 @@ run_one() { # Hack: let bg programs complete the startup sleep 0.1 ./udpgso_bench_tx ${tx_args} + ret=$? wait $(jobs -p) + return $ret } run_test() { @@ -87,8 +97,10 @@ run_one_nat() { sleep 0.1 ./udpgso_bench_tx ${tx_args} + ret=$? kill -INT $pid wait $(jobs -p) + return $ret } run_one_2sock() { @@ -110,7 +122,9 @@ run_one_2sock() { sleep 0.1 # first UDP GSO socket should be closed at this point ./udpgso_bench_tx ${tx_args} + ret=$? wait $(jobs -p) + return $ret } run_nat_test() { @@ -131,36 +145,54 @@ run_all() { local -r core_args="-l 4" local -r ipv4_args="${core_args} -4 -D 192.168.1.1" local -r ipv6_args="${core_args} -6 -D 2001:db8::1" + ret=0 echo "ipv4" run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400" + check_err $? # explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1) # when GRO does not take place run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1" + check_err $? # the GSO packets are aggregated because: # * veth schedule napi after each xmit # * segmentation happens in BH context, veth napi poll is delayed after # the transmission of the last segment run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720" + check_err $? run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472" + check_err $? run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720" + check_err $? run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500" + check_err $? run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472" + check_err $? run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472" + check_err $? echo "ipv6" run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400" + check_err $? run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1" + check_err $? run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520" + check_err $? run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452" + check_err $? run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520" + check_err $? run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500" + check_err $? run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452" + check_err $? run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452" + check_err $? + return $ret } if [ ! -f ../bpf/xdp_dummy.o ]; then @@ -180,3 +212,5 @@ elif [[ $1 == "__subprocess_2sock" ]]; then shift run_one_2sock $@ fi + +exit $? diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c index db3d4a8b5a4c..76a24052f4b4 100644 --- a/tools/testing/selftests/net/udpgso_bench_rx.c +++ b/tools/testing/selftests/net/udpgso_bench_rx.c @@ -113,6 +113,9 @@ static void do_poll(int fd, int timeout_ms) interrupted = true; break; } + + /* no events and more time to wait, do poll again */ + continue; } if (pfd.revents != POLLIN) error(1, errno, "poll: 0x%x expected 0x%x\n", diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh index 7a1bf94c5bd3..bdf450eaf60c 100755 --- a/tools/testing/selftests/net/xfrm_policy.sh +++ b/tools/testing/selftests/net/xfrm_policy.sh @@ -202,7 +202,7 @@ check_xfrm() { # 1: iptables -m policy rule count != 0 rval=$1 ip=$2 - lret=0 + local lret=0 ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null @@ -287,6 +287,47 @@ check_hthresh_repeat() return 0 } +# insert non-overlapping policies in a random order and check that +# all of them can be fetched using the traffic selectors. +check_random_order() +{ + local ns=$1 + local log=$2 + + for i in $(seq 100); do + ip -net $ns xfrm policy flush + for j in $(seq 0 16 255 | sort -R); do + ip -net $ns xfrm policy add dst $j.0.0.0/24 dir out priority 10 action allow + done + for j in $(seq 0 16 255); do + if ! ip -net $ns xfrm policy get dst $j.0.0.0/24 dir out > /dev/null; then + echo "FAIL: $log" 1>&2 + return 1 + fi + done + done + + for i in $(seq 100); do + ip -net $ns xfrm policy flush + for j in $(seq 0 16 255 | sort -R); do + local addr=$(printf "e000:0000:%02x00::/56" $j) + ip -net $ns xfrm policy add dst $addr dir out priority 10 action allow + done + for j in $(seq 0 16 255); do + local addr=$(printf "e000:0000:%02x00::/56" $j) + if ! ip -net $ns xfrm policy get dst $addr dir out > /dev/null; then + echo "FAIL: $log" 1>&2 + return 1 + fi + done + done + + ip -net $ns xfrm policy flush + + echo "PASS: $log" + return 0 +} + #check for needed privileges if [ "$(id -u)" -ne 0 ];then echo "SKIP: Need root privileges" @@ -438,6 +479,8 @@ check_exceptions "exceptions and block policies after htresh change to normal" check_hthresh_repeat "policies with repeated htresh change" +check_random_order ns3 "policies inserted in random order" + for i in 1 2 3 4;do ip netns del ns$i;done exit $ret diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c index 6dee9e636a95..bcb79ba1f214 100644 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c @@ -115,6 +115,7 @@ static struct option long_options[] = { { "tcp", no_argument, 0, 't' }, { "udp", no_argument, 0, 'u' }, { "ip", no_argument, 0, 'i' }, + { NULL, 0, NULL, 0 }, }; static int next_port = 19999; @@ -328,8 +329,7 @@ int main(int argc, char **argv) bool all_tests = true; int arg_index = 0; int failures = 0; - int s, t; - char opt; + int s, t, opt; while ((opt = getopt_long(argc, argv, "", long_options, &arg_index)) != -1) { diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c index aca3491174a1..f4bb4fef0f39 100644 --- a/tools/testing/selftests/networking/timestamping/timestamping.c +++ b/tools/testing/selftests/networking/timestamping/timestamping.c @@ -313,10 +313,16 @@ int main(int argc, char **argv) int val; socklen_t len; struct timeval next; + size_t if_len; if (argc < 2) usage(0); interface = argv[1]; + if_len = strlen(interface); + if (if_len >= IFNAMSIZ) { + printf("interface name exceeds IFNAMSIZ\n"); + exit(1); + } for (i = 2; i < argc; i++) { if (!strcasecmp(argv[i], "SO_TIMESTAMP")) @@ -350,12 +356,12 @@ int main(int argc, char **argv) bail("socket"); memset(&device, 0, sizeof(device)); - strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); + memcpy(device.ifr_name, interface, if_len + 1); if (ioctl(sock, SIOCGIFADDR, &device) < 0) bail("getting interface IP address"); memset(&hwtstamp, 0, sizeof(hwtstamp)); - strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); + memcpy(hwtstamp.ifr_name, interface, if_len + 1); hwtstamp.ifr_data = (void *)&hwconfig; memset(&hwconfig, 0, sizeof(hwconfig)); hwconfig.tx_type = diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c index 7e386be47120..2fce2e8f47f5 100644 --- a/tools/testing/selftests/networking/timestamping/txtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c @@ -26,6 +26,7 @@ #include <inttypes.h> #include <linux/errqueue.h> #include <linux/if_ether.h> +#include <linux/if_packet.h> #include <linux/ipv6.h> #include <linux/net_tstamp.h> #include <netdb.h> @@ -34,7 +35,6 @@ #include <netinet/ip.h> #include <netinet/udp.h> #include <netinet/tcp.h> -#include <netpacket/packet.h> #include <poll.h> #include <stdarg.h> #include <stdbool.h> @@ -396,12 +396,12 @@ static void do_test(int family, unsigned int report_opt) total_len = cfg_payload_len; if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) { total_len += sizeof(struct udphdr); - if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) + if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) { if (family == PF_INET) total_len += sizeof(struct iphdr); else total_len += sizeof(struct ipv6hdr); - + } /* special case, only rawv6_sendmsg: * pass proto in sin6_port if not connected * also see ANK comment in net/ipv4/raw.c diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh index 9c60337317c6..020137b61407 100755 --- a/tools/testing/selftests/ntb/ntb_test.sh +++ b/tools/testing/selftests/ntb/ntb_test.sh @@ -241,7 +241,7 @@ function get_files_count() split_remote $LOC if [[ "$REMOTE" == "" ]]; then - echo $(ls -1 "$LOC"/${NAME}* 2>/dev/null | wc -l) + echo $(ls -1 "$VPATH"/${NAME}* 2>/dev/null | wc -l) else echo $(ssh "$REMOTE" "ls -1 \"$VPATH\"/${NAME}* | \ wc -l" 2> /dev/null) diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c index b9fe75fc3e51..8a59438ccc78 100644 --- a/tools/testing/selftests/pidfd/pidfd_open_test.c +++ b/tools/testing/selftests/pidfd/pidfd_open_test.c @@ -6,7 +6,6 @@ #include <inttypes.h> #include <limits.h> #include <linux/types.h> -#include <linux/wait.h> #include <sched.h> #include <signal.h> #include <stdbool.h> diff --git a/tools/testing/selftests/pidfd/pidfd_poll_test.c b/tools/testing/selftests/pidfd/pidfd_poll_test.c index 4b115444dfe9..610811275357 100644 --- a/tools/testing/selftests/pidfd/pidfd_poll_test.c +++ b/tools/testing/selftests/pidfd/pidfd_poll_test.c @@ -3,7 +3,6 @@ #define _GNU_SOURCE #include <errno.h> #include <linux/types.h> -#include <linux/wait.h> #include <poll.h> #include <signal.h> #include <stdbool.h> diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c index 0453c50c949c..0725239bbd85 100644 --- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c +++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c @@ -380,7 +380,6 @@ int test_alignment_handler_integer(void) LOAD_DFORM_TEST(ldu); LOAD_XFORM_TEST(ldx); LOAD_XFORM_TEST(ldux); - LOAD_DFORM_TEST(lmw); STORE_DFORM_TEST(stb); STORE_XFORM_TEST(stbx); STORE_DFORM_TEST(stbu); @@ -399,7 +398,11 @@ int test_alignment_handler_integer(void) STORE_XFORM_TEST(stdx); STORE_DFORM_TEST(stdu); STORE_XFORM_TEST(stdux); + +#ifdef __BIG_ENDIAN__ + LOAD_DFORM_TEST(lmw); STORE_DFORM_TEST(stmw); +#endif return rc; } diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c index a2e8c9da7fa5..d50cc05df495 100644 --- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c +++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c @@ -19,6 +19,7 @@ #include <limits.h> #include <sys/time.h> #include <sys/syscall.h> +#include <sys/sysinfo.h> #include <sys/types.h> #include <sys/shm.h> #include <linux/futex.h> @@ -104,8 +105,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu) static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) { - int pid; - cpu_set_t cpuset; + int pid, ncpus; + cpu_set_t *cpuset; + size_t size; pid = fork(); if (pid == -1) { @@ -116,14 +118,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) if (pid) return; - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); + ncpus = get_nprocs(); + size = CPU_ALLOC_SIZE(ncpus); + cpuset = CPU_ALLOC(ncpus); + if (!cpuset) { + perror("malloc"); + exit(1); + } + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu, size, cpuset); - if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { + if (sched_setaffinity(0, size, cpuset)) { perror("sched_setaffinity"); + CPU_FREE(cpuset); exit(1); } + CPU_FREE(cpuset); fn(arg); exit(0); diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh index f988d2f42e8f..7c2cb04569da 100755 --- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh +++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh @@ -1,17 +1,19 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0-only +KSELFTESTS_SKIP=4 + . ./eeh-functions.sh if ! eeh_supported ; then echo "EEH not supported on this system, skipping" - exit 0; + exit $KSELFTESTS_SKIP; fi if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \ [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then echo "debugfs EEH testing files are missing. Is debugfs mounted?" - exit 1; + exit $KSELFTESTS_SKIP; fi pre_lspci=`mktemp` @@ -79,4 +81,5 @@ echo "$failed devices failed to recover ($dev_count tested)" lspci | diff -u $pre_lspci - rm -f $pre_lspci -exit $failed +test "$failed" -eq 0 +exit $? diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh index f52ed92b53e7..00dc32c0ed75 100755 --- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh +++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh @@ -5,12 +5,17 @@ pe_ok() { local dev="$1" local path="/sys/bus/pci/devices/$dev/eeh_pe_state" - if ! [ -e "$path" ] ; then + # if a driver doesn't support the error handling callbacks then the + # device is recovered by removing and re-probing it. This causes the + # sysfs directory to disappear so read the PE state once and squash + # any potential error messages + local eeh_state="$(cat $path 2>/dev/null)" + if [ -z "$eeh_state" ]; then return 1; fi - local fw_state="$(cut -d' ' -f1 < $path)" - local sw_state="$(cut -d' ' -f2 < $path)" + local fw_state="$(echo $eeh_state | cut -d' ' -f1)" + local sw_state="$(echo $eeh_state | cut -d' ' -f2)" # If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an # error state or being recovered. Either way, not ok. diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 7101ffd08d66..d021172fa2eb 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -5,3 +5,4 @@ prot_sao segv_errors wild_bctr large_vm_fork_separation +tlbie_test diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c index a2d7b0e3dca9..a26ac122c759 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c @@ -91,8 +91,6 @@ int back_to_back_ebbs(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c index bc893813483e..bb9f587fa76e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c @@ -42,8 +42,6 @@ int cycles(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c index dcd351d20328..9ae795ce314e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c @@ -99,8 +99,6 @@ int cycles_with_freeze(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); printf("EBBs while frozen %d\n", ebbs_while_frozen); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c index 94c99c12c0f2..4b45a2e70f62 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c @@ -71,8 +71,6 @@ int cycles_with_mmcr2(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c index dfbc5c3ad52d..21537d6eb6b7 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c @@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c index ca2f7d729155..b208bf6ad58d 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c @@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); FAIL_IF(ebb_state.stats.ebb_count == 0); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c index ac3e6e182614..ba2681a12cc7 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c @@ -75,7 +75,6 @@ static int test_body(void) ebb_freeze_pmcs(); ebb_global_disable(); - count_pmc(4, sample_period); mtspr(SPRN_PMC4, 0xdead); dump_summary_ebb_state(); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c index b8242e9d97d2..791d37ba327b 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c @@ -70,13 +70,6 @@ int multi_counter(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - count_pmc(2, sample_period); - count_pmc(3, sample_period); - count_pmc(4, sample_period); - count_pmc(5, sample_period); - count_pmc(6, sample_period); - dump_ebb_state(); for (i = 0; i < 6; i++) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c index a05c0e18ded6..9b0f70d59702 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c @@ -61,8 +61,6 @@ static int cycles_child(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_summary_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c index 153ebc92234f..2904c741e04e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c @@ -82,8 +82,6 @@ static int test_body(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); if (mmcr0_mismatch) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c index eadad75ed7e6..b29f8ba22d1e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c @@ -76,8 +76,6 @@ int pmc56_overflow(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(2, sample_period); - dump_ebb_state(); printf("PMC5/6 overflow %d\n", pmc56_overflowed); diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index bdbbbe8431e0..3694613f418f 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -44,7 +44,7 @@ struct shared_info { unsigned long amr2; /* AMR value that ptrace should refuse to write to the child. */ - unsigned long amr3; + unsigned long invalid_amr; /* IAMR value the parent expects to read from the child. */ unsigned long expected_iamr; @@ -57,8 +57,8 @@ struct shared_info { * (even though they're valid ones) because userspace doesn't have * access to those registers. */ - unsigned long new_iamr; - unsigned long new_uamor; + unsigned long invalid_iamr; + unsigned long invalid_uamor; }; static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) @@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) return syscall(__NR_pkey_alloc, flags, init_access_rights); } -static int sys_pkey_free(int pkey) -{ - return syscall(__NR_pkey_free, pkey); -} - static int child(struct shared_info *info) { unsigned long reg; @@ -100,28 +95,32 @@ static int child(struct shared_info *info) info->amr1 |= 3ul << pkeyshift(pkey1); info->amr2 |= 3ul << pkeyshift(pkey2); - info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3); + /* + * invalid amr value where we try to force write + * things which are deined by a uamor setting. + */ + info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor); + /* + * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr + */ if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); else info->expected_iamr &= ~(1ul << pkeyshift(pkey1)); - info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3)); - - info->expected_uamor |= 3ul << pkeyshift(pkey1) | - 3ul << pkeyshift(pkey2); - info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); - info->new_uamor |= 3ul << pkeyshift(pkey1); + /* + * We allocated pkey2 and pkey 3 above. Clear the IAMR bits. + */ + info->expected_iamr &= ~(1ul << pkeyshift(pkey2)); + info->expected_iamr &= ~(1ul << pkeyshift(pkey3)); /* - * We won't use pkey3. We just want a plausible but invalid key to test - * whether ptrace will let us write to AMR bits we are not supposed to. - * - * This also tests whether the kernel restores the UAMOR permissions - * after a key is freed. + * Create an IAMR value different from expected value. + * Kernel will reject an IAMR and UAMOR change. */ - sys_pkey_free(pkey3); + info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2)); + info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1)); printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n", user_write, info->amr1, pkey1, pkey2, pkey3); @@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid) PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync); PARENT_FAIL_IF(ret, &info->child_sync); - info->amr1 = info->amr2 = info->amr3 = regs[0]; - info->expected_iamr = info->new_iamr = regs[1]; - info->expected_uamor = info->new_uamor = regs[2]; + info->amr1 = info->amr2 = regs[0]; + info->expected_iamr = regs[1]; + info->expected_uamor = regs[2]; /* Wake up child so that it can set itself up. */ ret = prod_child(&info->child_sync); @@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid) return ret; /* Write invalid AMR value in child. */ - ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1); + ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1); PARENT_FAIL_IF(ret, &info->child_sync); - printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3); + printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr); /* Wake up child so that it can verify it didn't change. */ ret = prod_child(&info->child_sync); @@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid) /* Try to write to IAMR. */ regs[0] = info->amr1; - regs[1] = info->new_iamr; + regs[1] = info->invalid_iamr; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2); PARENT_FAIL_IF(!ret, &info->child_sync); @@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid) ptrace_write_running, regs[0], regs[1]); /* Try to write to IAMR and UAMOR. */ - regs[2] = info->new_uamor; + regs[2] = info->invalid_uamor; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3); PARENT_FAIL_IF(!ret, &info->child_sync); diff --git a/tools/testing/selftests/powerpc/security/.gitignore b/tools/testing/selftests/powerpc/security/.gitignore index 0b969fba3beb..b8afb4f2481e 100644 --- a/tools/testing/selftests/powerpc/security/.gitignore +++ b/tools/testing/selftests/powerpc/security/.gitignore @@ -1 +1,2 @@ rfi_flush +entry_flush diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile index 85861c46b445..e550a287768f 100644 --- a/tools/testing/selftests/powerpc/security/Makefile +++ b/tools/testing/selftests/powerpc/security/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ -TEST_GEN_PROGS := rfi_flush +TEST_GEN_PROGS := rfi_flush entry_flush top_srcdir = ../../../../.. CFLAGS += -I../../../../../usr/include diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c new file mode 100644 index 000000000000..e8d24f9a5d3e --- /dev/null +++ b/tools/testing/selftests/powerpc/security/entry_flush.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Copyright 2018 IBM Corporation. + */ + +#define __SANE_USERSPACE_TYPES__ + +#include <sys/types.h> +#include <stdint.h> +#include <malloc.h> +#include <unistd.h> +#include <signal.h> +#include <stdlib.h> +#include <string.h> +#include <stdio.h> +#include "utils.h" + +#define CACHELINE_SIZE 128 + +struct perf_event_read { + __u64 nr; + __u64 l1d_misses; +}; + +static inline __u64 load(void *addr) +{ + __u64 tmp; + + asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr)); + + return tmp; +} + +static void syscall_loop(char *p, unsigned long iterations, + unsigned long zero_size) +{ + for (unsigned long i = 0; i < iterations; i++) { + for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE) + load(p + j); + getppid(); + } +} + +int entry_flush_test(void) +{ + char *p; + int repetitions = 10; + int fd, passes = 0, iter, rc = 0; + struct perf_event_read v; + __u64 l1d_misses_total = 0; + unsigned long iterations = 100000, zero_size = 24 * 1024; + unsigned long l1d_misses_expected; + int rfi_flush_orig; + int entry_flush, entry_flush_orig; + + SKIP_IF(geteuid() != 0); + + // The PMU event we use only works on Power7 or later + SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); + + if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) { + perror("Unable to read powerpc/rfi_flush debugfs file"); + SKIP_IF(1); + } + + if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) { + perror("Unable to read powerpc/entry_flush debugfs file"); + SKIP_IF(1); + } + + if (rfi_flush_orig != 0) { + if (write_debugfs_file("powerpc/rfi_flush", 0) < 0) { + perror("error writing to powerpc/rfi_flush debugfs file"); + FAIL_IF(1); + } + } + + entry_flush = entry_flush_orig; + + fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1); + FAIL_IF(fd < 0); + + p = (char *)memalign(zero_size, CACHELINE_SIZE); + + FAIL_IF(perf_event_enable(fd)); + + // disable L1 prefetching + set_dscr(1); + + iter = repetitions; + + /* + * We expect to see l1d miss for each cacheline access when entry_flush + * is set. Allow a small variation on this. + */ + l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2); + +again: + FAIL_IF(perf_event_reset(fd)); + + syscall_loop(p, iterations, zero_size); + + FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v)); + + if (entry_flush && v.l1d_misses >= l1d_misses_expected) + passes++; + else if (!entry_flush && v.l1d_misses < (l1d_misses_expected / 2)) + passes++; + + l1d_misses_total += v.l1d_misses; + + while (--iter) + goto again; + + if (passes < repetitions) { + printf("FAIL (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d failures]\n", + entry_flush, l1d_misses_total, entry_flush ? '<' : '>', + entry_flush ? repetitions * l1d_misses_expected : + repetitions * l1d_misses_expected / 2, + repetitions - passes, repetitions); + rc = 1; + } else + printf("PASS (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d pass]\n", + entry_flush, l1d_misses_total, entry_flush ? '>' : '<', + entry_flush ? repetitions * l1d_misses_expected : + repetitions * l1d_misses_expected / 2, + passes, repetitions); + + if (entry_flush == entry_flush_orig) { + entry_flush = !entry_flush_orig; + if (write_debugfs_file("powerpc/entry_flush", entry_flush) < 0) { + perror("error writing to powerpc/entry_flush debugfs file"); + return 1; + } + iter = repetitions; + l1d_misses_total = 0; + passes = 0; + goto again; + } + + perf_event_disable(fd); + close(fd); + + set_dscr(0); + + if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) { + perror("unable to restore original value of powerpc/rfi_flush debugfs file"); + return 1; + } + + if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) { + perror("unable to restore original value of powerpc/entry_flush debugfs file"); + return 1; + } + + return rc; +} + +int main(int argc, char *argv[]) +{ + return test_harness(entry_flush_test, "entry_flush_test"); +} diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c index 0a7d0afb26b8..533315e68133 100644 --- a/tools/testing/selftests/powerpc/security/rfi_flush.c +++ b/tools/testing/selftests/powerpc/security/rfi_flush.c @@ -50,16 +50,30 @@ int rfi_flush_test(void) __u64 l1d_misses_total = 0; unsigned long iterations = 100000, zero_size = 24 * 1024; unsigned long l1d_misses_expected; - int rfi_flush_org, rfi_flush; + int rfi_flush_orig, rfi_flush; + int have_entry_flush, entry_flush_orig; SKIP_IF(geteuid() != 0); - if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_org)) { + if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) { perror("Unable to read powerpc/rfi_flush debugfs file"); SKIP_IF(1); } - rfi_flush = rfi_flush_org; + if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) { + have_entry_flush = 0; + } else { + have_entry_flush = 1; + + if (entry_flush_orig != 0) { + if (write_debugfs_file("powerpc/entry_flush", 0) < 0) { + perror("error writing to powerpc/entry_flush debugfs file"); + return 1; + } + } + } + + rfi_flush = rfi_flush_orig; fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1); FAIL_IF(fd < 0); @@ -68,6 +82,7 @@ int rfi_flush_test(void) FAIL_IF(perf_event_enable(fd)); + // disable L1 prefetching set_dscr(1); iter = repetitions; @@ -109,8 +124,8 @@ again: repetitions * l1d_misses_expected / 2, passes, repetitions); - if (rfi_flush == rfi_flush_org) { - rfi_flush = !rfi_flush_org; + if (rfi_flush == rfi_flush_orig) { + rfi_flush = !rfi_flush_orig; if (write_debugfs_file("powerpc/rfi_flush", rfi_flush) < 0) { perror("error writing to powerpc/rfi_flush debugfs file"); return 1; @@ -126,11 +141,19 @@ again: set_dscr(0); - if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_org) < 0) { + if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) { perror("unable to restore original value of powerpc/rfi_flush debugfs file"); return 1; } + if (have_entry_flush) { + if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) { + perror("unable to restore original value of powerpc/entry_flush " + "debugfs file"); + return 1; + } + } + return rc; } diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c index c02d24835db4..176102eca994 100644 --- a/tools/testing/selftests/powerpc/utils.c +++ b/tools/testing/selftests/powerpc/utils.c @@ -16,6 +16,7 @@ #include <string.h> #include <sys/ioctl.h> #include <sys/stat.h> +#include <sys/sysinfo.h> #include <sys/types.h> #include <sys/utsname.h> #include <unistd.h> @@ -88,28 +89,40 @@ void *get_auxv_entry(int type) int pick_online_cpu(void) { - cpu_set_t mask; - int cpu; + int ncpus, cpu = -1; + cpu_set_t *mask; + size_t size; - CPU_ZERO(&mask); - - if (sched_getaffinity(0, sizeof(mask), &mask)) { - perror("sched_getaffinity"); + ncpus = get_nprocs_conf(); + size = CPU_ALLOC_SIZE(ncpus); + mask = CPU_ALLOC(ncpus); + if (!mask) { + perror("malloc"); return -1; } + CPU_ZERO_S(size, mask); + + if (sched_getaffinity(0, size, mask)) { + perror("sched_getaffinity"); + goto done; + } + /* We prefer a primary thread, but skip 0 */ - for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = 8; cpu < ncpus; cpu += 8) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; /* Search for anything, but in reverse */ - for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = ncpus - 1; cpu >= 0; cpu--) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; printf("No cpus in affinity mask?!\n"); - return -1; + +done: + CPU_FREE(mask); + return cpu; } bool is_ppc64le(void) diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c index 471e2aa28077..fb4fe9188806 100644 --- a/tools/testing/selftests/proc/proc-loadavg-001.c +++ b/tools/testing/selftests/proc/proc-loadavg-001.c @@ -14,7 +14,6 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Test that /proc/loadavg correctly reports last pid in pid namespace. */ -#define _GNU_SOURCE #include <errno.h> #include <sched.h> #include <sys/types.h> diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c index 9f6d000c0245..8511dcfe67c7 100644 --- a/tools/testing/selftests/proc/proc-self-syscall.c +++ b/tools/testing/selftests/proc/proc-self-syscall.c @@ -13,7 +13,6 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#define _GNU_SOURCE #include <unistd.h> #include <sys/syscall.h> #include <sys/types.h> diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c index 30e2b7849089..e7ceabed7f51 100644 --- a/tools/testing/selftests/proc/proc-uptime-002.c +++ b/tools/testing/selftests/proc/proc-uptime-002.c @@ -15,7 +15,6 @@ */ // Test that values in /proc/uptime increment monotonically // while shifting across CPUs. -#define _GNU_SOURCE #undef NDEBUG #include <assert.h> #include <unistd.h> diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile index c0b7f89f0930..2f1f532c39db 100644 --- a/tools/testing/selftests/ptrace/Makefile +++ b/tools/testing/selftests/ptrace/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -CFLAGS += -iquote../../../../include/uapi -Wall +CFLAGS += -std=c99 -pthread -iquote../../../../include/uapi -Wall -TEST_GEN_PROGS := get_syscall_info peeksiginfo +TEST_GEN_PROGS := get_syscall_info peeksiginfo vmaccess include ../lib.mk diff --git a/tools/testing/selftests/ptrace/vmaccess.c b/tools/testing/selftests/ptrace/vmaccess.c new file mode 100644 index 000000000000..4db327b44586 --- /dev/null +++ b/tools/testing/selftests/ptrace/vmaccess.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2020 Bernd Edlinger <bernd.edlinger@hotmail.de> + * All rights reserved. + * + * Check whether /proc/$pid/mem can be accessed without causing deadlocks + * when de_thread is blocked with ->cred_guard_mutex held. + */ + +#include "../kselftest_harness.h" +#include <stdio.h> +#include <fcntl.h> +#include <pthread.h> +#include <signal.h> +#include <unistd.h> +#include <sys/ptrace.h> + +static void *thread(void *arg) +{ + ptrace(PTRACE_TRACEME, 0, 0L, 0L); + return NULL; +} + +TEST(vmaccess) +{ + int f, pid = fork(); + char mm[64]; + + if (!pid) { + pthread_t pt; + + pthread_create(&pt, NULL, thread, NULL); + pthread_join(pt, NULL); + execlp("true", "true", NULL); + } + + sleep(1); + sprintf(mm, "/proc/%d/mem", pid); + f = open(mm, O_RDONLY); + ASSERT_GE(f, 0); + close(f); + f = kill(pid, SIGCONT); + ASSERT_EQ(f, 0); +} + +TEST(attach) +{ + int s, k, pid = fork(); + + if (!pid) { + pthread_t pt; + + pthread_create(&pt, NULL, thread, NULL); + pthread_join(pt, NULL); + execlp("sleep", "sleep", "2", NULL); + } + + sleep(1); + k = ptrace(PTRACE_ATTACH, pid, 0L, 0L); + ASSERT_EQ(errno, EAGAIN); + ASSERT_EQ(k, -1); + k = waitpid(-1, &s, WNOHANG); + ASSERT_NE(k, -1); + ASSERT_NE(k, 0); + ASSERT_NE(k, pid); + ASSERT_EQ(WIFEXITED(s), 1); + ASSERT_EQ(WEXITSTATUS(s), 0); + sleep(1); + k = ptrace(PTRACE_ATTACH, pid, 0L, 0L); + ASSERT_EQ(k, 0); + k = waitpid(-1, &s, 0); + ASSERT_EQ(k, pid); + ASSERT_EQ(WIFSTOPPED(s), 1); + ASSERT_EQ(WSTOPSIG(s), SIGSTOP); + k = ptrace(PTRACE_DETACH, pid, 0L, 0L); + ASSERT_EQ(k, 0); + k = waitpid(-1, &s, 0); + ASSERT_EQ(k, pid); + ASSERT_EQ(WIFEXITED(s), 1); + ASSERT_EQ(WEXITSTATUS(s), 0); + k = waitpid(-1, NULL, 0); + ASSERT_EQ(k, -1); + ASSERT_EQ(errno, ECHILD); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/seccomp/config b/tools/testing/selftests/seccomp/config index db1e11b08c8a..764af1f853f9 100644 --- a/tools/testing/selftests/seccomp/config +++ b/tools/testing/selftests/seccomp/config @@ -1,2 +1,3 @@ +CONFIG_PID_NS=y CONFIG_SECCOMP=y CONFIG_SECCOMP_FILTER=y diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 96bbda4f10fc..19c7351eeb74 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -177,7 +177,7 @@ struct seccomp_metadata { #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ struct seccomp_notif_resp) -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) struct seccomp_notif { __u64 id; diff --git a/tools/testing/selftests/sgx/.gitignore b/tools/testing/selftests/sgx/.gitignore new file mode 100644 index 000000000000..fbaf0bda9a92 --- /dev/null +++ b/tools/testing/selftests/sgx/.gitignore @@ -0,0 +1,2 @@ +test_sgx +test_encl.elf diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile new file mode 100644 index 000000000000..7f12d55b97f8 --- /dev/null +++ b/tools/testing/selftests/sgx/Makefile @@ -0,0 +1,57 @@ +top_srcdir = ../../../.. + +include ../lib.mk + +.PHONY: all clean + +CAN_BUILD_X86_64 := $(shell ../x86/check_cc.sh $(CC) \ + ../x86/trivial_64bit_program.c) + +ifndef OBJCOPY +OBJCOPY := $(CROSS_COMPILE)objcopy +endif + +INCLUDES := -I$(top_srcdir)/tools/include +HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC -z noexecstack +ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \ + -fno-stack-protector -mrdrnd $(INCLUDES) + +TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx + +ifeq ($(CAN_BUILD_X86_64), 1) +all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf +endif + +$(OUTPUT)/test_sgx: $(OUTPUT)/main.o \ + $(OUTPUT)/load.o \ + $(OUTPUT)/sigstruct.o \ + $(OUTPUT)/call.o \ + $(OUTPUT)/sign_key.o + $(CC) $(HOST_CFLAGS) -o $@ $^ -lcrypto + +$(OUTPUT)/main.o: main.c + $(CC) $(HOST_CFLAGS) -c $< -o $@ + +$(OUTPUT)/load.o: load.c + $(CC) $(HOST_CFLAGS) -c $< -o $@ + +$(OUTPUT)/sigstruct.o: sigstruct.c + $(CC) $(HOST_CFLAGS) -c $< -o $@ + +$(OUTPUT)/call.o: call.S + $(CC) $(HOST_CFLAGS) -c $< -o $@ + +$(OUTPUT)/sign_key.o: sign_key.S + $(CC) $(HOST_CFLAGS) -c $< -o $@ + +$(OUTPUT)/test_encl.elf: test_encl.lds test_encl.c test_encl_bootstrap.S + $(CC) $(ENCL_CFLAGS) -T $^ -o $@ + +EXTRA_CLEAN := \ + $(OUTPUT)/test_encl.elf \ + $(OUTPUT)/load.o \ + $(OUTPUT)/call.o \ + $(OUTPUT)/main.o \ + $(OUTPUT)/sigstruct.o \ + $(OUTPUT)/test_sgx \ + $(OUTPUT)/test_sgx.o \ diff --git a/tools/testing/selftests/sgx/call.S b/tools/testing/selftests/sgx/call.S new file mode 100644 index 000000000000..4ecadc7490f4 --- /dev/null +++ b/tools/testing/selftests/sgx/call.S @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** +* Copyright(c) 2016-20 Intel Corporation. +*/ + + .text + + .global sgx_call_vdso +sgx_call_vdso: + .cfi_startproc + push %r15 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r15, 0 + push %r14 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r14, 0 + push %r13 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r13, 0 + push %r12 + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %r12, 0 + push %rbx + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rbx, 0 + push $0 + .cfi_adjust_cfa_offset 8 + push 0x38(%rsp) + .cfi_adjust_cfa_offset 8 + call *eenter(%rip) + add $0x10, %rsp + .cfi_adjust_cfa_offset -0x10 + pop %rbx + .cfi_adjust_cfa_offset -8 + pop %r12 + .cfi_adjust_cfa_offset -8 + pop %r13 + .cfi_adjust_cfa_offset -8 + pop %r14 + .cfi_adjust_cfa_offset -8 + pop %r15 + .cfi_adjust_cfa_offset -8 + ret + .cfi_endproc diff --git a/tools/testing/selftests/sgx/defines.h b/tools/testing/selftests/sgx/defines.h new file mode 100644 index 000000000000..592c1ccf4576 --- /dev/null +++ b/tools/testing/selftests/sgx/defines.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2016-20 Intel Corporation. + */ + +#ifndef DEFINES_H +#define DEFINES_H + +#include <stdint.h> + +#define PAGE_SIZE 4096 +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define __aligned(x) __attribute__((__aligned__(x))) +#define __packed __attribute__((packed)) + +#include "../../../../arch/x86/kernel/cpu/sgx/arch.h" +#include "../../../../arch/x86/include/asm/enclu.h" +#include "../../../../arch/x86/include/uapi/asm/sgx.h" + +#endif /* DEFINES_H */ diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c new file mode 100644 index 000000000000..9d43b75aaa55 --- /dev/null +++ b/tools/testing/selftests/sgx/load.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#include <assert.h> +#include <elf.h> +#include <errno.h> +#include <fcntl.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/time.h> +#include <sys/types.h> +#include "defines.h" +#include "main.h" + +void encl_delete(struct encl *encl) +{ + if (encl->encl_base) + munmap((void *)encl->encl_base, encl->encl_size); + + if (encl->bin) + munmap(encl->bin, encl->bin_size); + + if (encl->fd) + close(encl->fd); + + if (encl->segment_tbl) + free(encl->segment_tbl); + + memset(encl, 0, sizeof(*encl)); +} + +static bool encl_map_bin(const char *path, struct encl *encl) +{ + struct stat sb; + void *bin; + int ret; + int fd; + + fd = open(path, O_RDONLY); + if (fd == -1) { + perror("open()"); + return false; + } + + ret = stat(path, &sb); + if (ret) { + perror("stat()"); + goto err; + } + + bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (bin == MAP_FAILED) { + perror("mmap()"); + goto err; + } + + encl->bin = bin; + encl->bin_size = sb.st_size; + + close(fd); + return true; + +err: + close(fd); + return false; +} + +static bool encl_ioc_create(struct encl *encl) +{ + struct sgx_secs *secs = &encl->secs; + struct sgx_enclave_create ioc; + int rc; + + assert(encl->encl_base != 0); + + memset(secs, 0, sizeof(*secs)); + secs->ssa_frame_size = 1; + secs->attributes = SGX_ATTR_MODE64BIT; + secs->xfrm = 3; + secs->base = encl->encl_base; + secs->size = encl->encl_size; + + ioc.src = (unsigned long)secs; + rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc); + if (rc) { + fprintf(stderr, "SGX_IOC_ENCLAVE_CREATE failed: errno=%d\n", + errno); + munmap((void *)secs->base, encl->encl_size); + return false; + } + + return true; +} + +static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg) +{ + struct sgx_enclave_add_pages ioc; + struct sgx_secinfo secinfo; + int rc; + + memset(&secinfo, 0, sizeof(secinfo)); + secinfo.flags = seg->flags; + + ioc.src = (uint64_t)encl->src + seg->offset; + ioc.offset = seg->offset; + ioc.length = seg->size; + ioc.secinfo = (unsigned long)&secinfo; + ioc.flags = SGX_PAGE_MEASURE; + + rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc); + if (rc < 0) { + fprintf(stderr, "SGX_IOC_ENCLAVE_ADD_PAGES failed: errno=%d.\n", + errno); + return false; + } + + return true; +} + +bool encl_load(const char *path, struct encl *encl) +{ + Elf64_Phdr *phdr_tbl; + off_t src_offset; + Elf64_Ehdr *ehdr; + int i, j; + int ret; + + memset(encl, 0, sizeof(*encl)); + + ret = open("/dev/sgx_enclave", O_RDWR); + if (ret < 0) { + fprintf(stderr, "Unable to open /dev/sgx_enclave\n"); + goto err; + } + + encl->fd = ret; + + if (!encl_map_bin(path, encl)) + goto err; + + ehdr = encl->bin; + phdr_tbl = encl->bin + ehdr->e_phoff; + + for (i = 0; i < ehdr->e_phnum; i++) { + Elf64_Phdr *phdr = &phdr_tbl[i]; + + if (phdr->p_type == PT_LOAD) + encl->nr_segments++; + } + + encl->segment_tbl = calloc(encl->nr_segments, + sizeof(struct encl_segment)); + if (!encl->segment_tbl) + goto err; + + for (i = 0, j = 0; i < ehdr->e_phnum; i++) { + Elf64_Phdr *phdr = &phdr_tbl[i]; + unsigned int flags = phdr->p_flags; + struct encl_segment *seg; + + if (phdr->p_type != PT_LOAD) + continue; + + seg = &encl->segment_tbl[j]; + + if (!!(flags & ~(PF_R | PF_W | PF_X))) { + fprintf(stderr, + "%d has invalid segment flags 0x%02x.\n", i, + phdr->p_flags); + goto err; + } + + if (j == 0 && flags != (PF_R | PF_W)) { + fprintf(stderr, + "TCS has invalid segment flags 0x%02x.\n", + phdr->p_flags); + goto err; + } + + if (j == 0) { + src_offset = phdr->p_offset & PAGE_MASK; + + seg->prot = PROT_READ | PROT_WRITE; + seg->flags = SGX_PAGE_TYPE_TCS << 8; + } else { + seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0; + seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0; + seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0; + seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot; + } + + seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset; + seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK; + + printf("0x%016lx 0x%016lx 0x%02x\n", seg->offset, seg->size, + seg->prot); + + j++; + } + + assert(j == encl->nr_segments); + + encl->src = encl->bin + src_offset; + encl->src_size = encl->segment_tbl[j - 1].offset + + encl->segment_tbl[j - 1].size; + + for (encl->encl_size = 4096; encl->encl_size < encl->src_size; ) + encl->encl_size <<= 1; + + return true; + +err: + encl_delete(encl); + return false; +} + +static bool encl_map_area(struct encl *encl) +{ + size_t encl_size = encl->encl_size; + void *area; + + area = mmap(NULL, encl_size * 2, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (area == MAP_FAILED) { + perror("mmap"); + return false; + } + + encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1); + + munmap(area, encl->encl_base - (uint64_t)area); + munmap((void *)(encl->encl_base + encl_size), + (uint64_t)area + encl_size - encl->encl_base); + + return true; +} + +bool encl_build(struct encl *encl) +{ + struct sgx_enclave_init ioc; + int ret; + int i; + + if (!encl_map_area(encl)) + return false; + + if (!encl_ioc_create(encl)) + return false; + + /* + * Pages must be added before mapping VMAs because their permissions + * cap the VMA permissions. + */ + for (i = 0; i < encl->nr_segments; i++) { + struct encl_segment *seg = &encl->segment_tbl[i]; + + if (!encl_ioc_add_pages(encl, seg)) + return false; + } + + ioc.sigstruct = (uint64_t)&encl->sigstruct; + ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc); + if (ret) { + fprintf(stderr, "SGX_IOC_ENCLAVE_INIT failed: errno=%d\n", + errno); + return false; + } + + return true; +} diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c new file mode 100644 index 000000000000..724cec700926 --- /dev/null +++ b/tools/testing/selftests/sgx/main.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#include <elf.h> +#include <errno.h> +#include <fcntl.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/time.h> +#include <sys/types.h> +#include "defines.h" +#include "main.h" +#include "../kselftest.h" + +static const uint64_t MAGIC = 0x1122334455667788ULL; +vdso_sgx_enter_enclave_t eenter; + +struct vdso_symtab { + Elf64_Sym *elf_symtab; + const char *elf_symstrtab; + Elf64_Word *elf_hashtab; +}; + +static void *vdso_get_base_addr(char *envp[]) +{ + Elf64_auxv_t *auxv; + int i; + + for (i = 0; envp[i]; i++) + ; + + auxv = (Elf64_auxv_t *)&envp[i + 1]; + + for (i = 0; auxv[i].a_type != AT_NULL; i++) { + if (auxv[i].a_type == AT_SYSINFO_EHDR) + return (void *)auxv[i].a_un.a_val; + } + + return NULL; +} + +static Elf64_Dyn *vdso_get_dyntab(void *addr) +{ + Elf64_Ehdr *ehdr = addr; + Elf64_Phdr *phdrtab = addr + ehdr->e_phoff; + int i; + + for (i = 0; i < ehdr->e_phnum; i++) + if (phdrtab[i].p_type == PT_DYNAMIC) + return addr + phdrtab[i].p_offset; + + return NULL; +} + +static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag) +{ + int i; + + for (i = 0; dyntab[i].d_tag != DT_NULL; i++) + if (dyntab[i].d_tag == tag) + return addr + dyntab[i].d_un.d_ptr; + + return NULL; +} + +static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab) +{ + Elf64_Dyn *dyntab = vdso_get_dyntab(addr); + + symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB); + if (!symtab->elf_symtab) + return false; + + symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB); + if (!symtab->elf_symstrtab) + return false; + + symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH); + if (!symtab->elf_hashtab) + return false; + + return true; +} + +static unsigned long elf_sym_hash(const char *name) +{ + unsigned long h = 0, high; + + while (*name) { + h = (h << 4) + *name++; + high = h & 0xf0000000; + + if (high) + h ^= high >> 24; + + h &= ~high; + } + + return h; +} + +static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name) +{ + Elf64_Word bucketnum = symtab->elf_hashtab[0]; + Elf64_Word *buckettab = &symtab->elf_hashtab[2]; + Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum]; + Elf64_Sym *sym; + Elf64_Word i; + + for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF; + i = chaintab[i]) { + sym = &symtab->elf_symtab[i]; + if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name])) + return sym; + } + + return NULL; +} + +bool report_results(struct sgx_enclave_run *run, int ret, uint64_t result, + const char *test) +{ + bool valid = true; + + if (ret) { + printf("FAIL: %s() returned: %d\n", test, ret); + valid = false; + } + + if (run->function != EEXIT) { + printf("FAIL: %s() function, expected: %u, got: %u\n", test, EEXIT, + run->function); + valid = false; + } + + if (result != MAGIC) { + printf("FAIL: %s(), expected: 0x%lx, got: 0x%lx\n", test, MAGIC, + result); + valid = false; + } + + if (run->user_data) { + printf("FAIL: %s() user data, expected: 0x0, got: 0x%llx\n", + test, run->user_data); + valid = false; + } + + return valid; +} + +static int user_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9, + struct sgx_enclave_run *run) +{ + run->user_data = 0; + return 0; +} + +int main(int argc, char *argv[], char *envp[]) +{ + struct sgx_enclave_run run; + struct vdso_symtab symtab; + Elf64_Sym *eenter_sym; + uint64_t result = 0; + struct encl encl; + unsigned int i; + void *addr; + int ret; + + memset(&run, 0, sizeof(run)); + + if (!encl_load("test_encl.elf", &encl)) { + encl_delete(&encl); + ksft_exit_skip("cannot load enclaves\n"); + } + + if (!encl_measure(&encl)) + goto err; + + if (!encl_build(&encl)) + goto err; + + /* + * An enclave consumer only must do this. + */ + for (i = 0; i < encl.nr_segments; i++) { + struct encl_segment *seg = &encl.segment_tbl[i]; + + addr = mmap((void *)encl.encl_base + seg->offset, seg->size, + seg->prot, MAP_SHARED | MAP_FIXED, encl.fd, 0); + if (addr == MAP_FAILED) { + fprintf(stderr, "mmap() failed, errno=%d.\n", errno); + exit(KSFT_FAIL); + } + } + + memset(&run, 0, sizeof(run)); + run.tcs = encl.encl_base; + + addr = vdso_get_base_addr(envp); + if (!addr) + goto err; + + if (!vdso_get_symtab(addr, &symtab)) + goto err; + + eenter_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave"); + if (!eenter_sym) + goto err; + + eenter = addr + eenter_sym->st_value; + + ret = sgx_call_vdso((void *)&MAGIC, &result, 0, EENTER, NULL, NULL, &run); + if (!report_results(&run, ret, result, "sgx_call_vdso")) + goto err; + + + /* Invoke the vDSO directly. */ + result = 0; + ret = eenter((unsigned long)&MAGIC, (unsigned long)&result, 0, EENTER, + 0, 0, &run); + if (!report_results(&run, ret, result, "eenter")) + goto err; + + /* And with an exit handler. */ + run.user_handler = (__u64)user_handler; + run.user_data = 0xdeadbeef; + ret = eenter((unsigned long)&MAGIC, (unsigned long)&result, 0, EENTER, + 0, 0, &run); + if (!report_results(&run, ret, result, "user_handler")) + goto err; + + printf("SUCCESS\n"); + encl_delete(&encl); + exit(KSFT_PASS); + +err: + encl_delete(&encl); + exit(KSFT_FAIL); +} diff --git a/tools/testing/selftests/sgx/main.h b/tools/testing/selftests/sgx/main.h new file mode 100644 index 000000000000..67211a708f04 --- /dev/null +++ b/tools/testing/selftests/sgx/main.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2016-20 Intel Corporation. + */ + +#ifndef MAIN_H +#define MAIN_H + +struct encl_segment { + off_t offset; + size_t size; + unsigned int prot; + unsigned int flags; +}; + +struct encl { + int fd; + void *bin; + off_t bin_size; + void *src; + size_t src_size; + size_t encl_size; + off_t encl_base; + unsigned int nr_segments; + struct encl_segment *segment_tbl; + struct sgx_secs secs; + struct sgx_sigstruct sigstruct; +}; + +extern unsigned char sign_key[]; +extern unsigned char sign_key_end[]; + +void encl_delete(struct encl *ctx); +bool encl_load(const char *path, struct encl *encl); +bool encl_measure(struct encl *encl); +bool encl_build(struct encl *encl); + +int sgx_call_vdso(void *rdi, void *rsi, long rdx, u32 function, void *r8, void *r9, + struct sgx_enclave_run *run); + +#endif /* MAIN_H */ diff --git a/tools/testing/selftests/sgx/sign_key.S b/tools/testing/selftests/sgx/sign_key.S new file mode 100644 index 000000000000..e4fbe948444a --- /dev/null +++ b/tools/testing/selftests/sgx/sign_key.S @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** +* Copyright(c) 2016-20 Intel Corporation. +*/ + + .section ".rodata", "a" + +sign_key: + .globl sign_key + .incbin "sign_key.pem" +sign_key_end: + .globl sign_key_end diff --git a/tools/testing/selftests/sgx/sign_key.pem b/tools/testing/selftests/sgx/sign_key.pem new file mode 100644 index 000000000000..d76f21f19187 --- /dev/null +++ b/tools/testing/selftests/sgx/sign_key.pem @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG4wIBAAKCAYEApalGbq7Q+usM91CPtksu3D+b0Prc8gAFL6grM3mg85A5Bx8V +cfMXPgtrw8EYFwQxDAvzZWwl+9VfOX0ECrFRBkOHcOiG0SnADN8+FLj1UiNUQwbp +S6OzhNWuRcSbGraSOyUlVlV0yMQSvewyzGklOaXBe30AJqzIBc8QfdSxKuP8rs0Z +ga6k/Bl73osrYKByILJTUUeZqjLERsE6GebsdzbWgKn8qVqng4ZS4yMNg6LeRlH3 ++9CIPgg4jwpSLHcp7dq2qTIB9a0tGe9ayp+5FbucpB6U7ePold0EeRN6RlJGDF9k +L93v8P5ykz5G5gYZ2g0K1X2sHIWV4huxPgv5PXgdyQYbK+6olqj0d5rjYuwX57Ul +k6SroPS1U6UbdCjG5txM+BNGU0VpD0ZhrIRw0leQdnNcCO9sTJuInZrgYacSVJ7u +mtB+uCt+uzUesc+l+xPRYA+9e14lLkZp7AAmo9FvL816XDI09deehJ3i/LmHKCRN +tuqC5TprRjFwUr6dAgEDAoIBgG5w2Z8fNfycs0+LCnmHdJLVEotR6KFVWMpwHMz7 +wKJgJgS/Y6FMuilc8oKAuroCy11dTO5IGVKOP3uorVx2NgQtBPXwWeDGgAiU1A3Q +o4wXjYIEm4fCd63jyYPYZ2ckYXzDbjmOTdstYdPyzIhGGNEZK6eoqsRzMAPfYFPj +IMdCqHSIu6vJw1K7p+myHOsVoWshjODaZnF3LYSA0WaZ8vokjwBxUxuRxQJZjJds +s60XPtmL+qfgWtQFewoG4XL6GuD8FcXccynRRtzrLtFNPIl9BQfWfjBBhTC1/Te1 +0Z6XbZvpdUTD9OfLB7SbR2OUFNpKQgriO0iYVdbW3cr7uu38Zwp4W1TX73DPjoi6 +KNooP6SGWd4mRJW2+dUmSYS4QNG8eVVZswKcploEIXlAKRsOe4kzJJ1iETugIe85 +uX8nd1WYEp65xwoRUg8hqng0MeyveVbXqNKuJG6tzNDt9kgFYo+hmC/oouAW2Dtc +T9jdRAwKJXqA2Eg6OkgXCEv+kwKBwQDYaQiFMlFhsmLlqI+EzCUh7c941/cL7m6U +7j98+8ngl0HgCEcrc10iJVCKakQW3YbPzAx3XkKTaGjWazvvrFarXIGlOud64B8a +iWyQ7VdlnmZnNEdk+C83tI91OQeaTKqRLDGzKh29Ry/jL8Pcbazt+kDgxa0H7qJp +roADUanLQuNkYubpbhFBh3xpa2EExaVq6rF7nIVsD8W9TrbmPKA4LgH7z0iy544D +kVCNYsTjYDdUWP+WiSor8kCnnpjnN9sCgcEAw/eNezUD1UDf6OYFC9+5JZJFn4Tg +mZMyN93JKIb199ffwnjtHUSjcyiWeesXucpzwtGbTcwQnDisSW4oneYKLSEBlBaq +scqiUugyGZZOthFSCbdXYXMViK2vHrKlkse7GxVlROKcEhM/pRBrmjaGO8eWR+D4 +FO2wCXzVs3KgV6j779frw0vC54oHOxc9+Lu1rSHp4i+600koyvL/zF6U/5tZXIvN +YW2yoiQJnjCmVA1pwbwV6KAUTPDTMnBK+YjnAoHBAJBGBa4hi5Z27JkbCliIGMFJ +NPs6pLKe9GNJf6in2+sPgUAFhMeiPhbDiwbxgrnpBIqICE+ULGJFmzmc0p/IOceT +ARjR76dAFLxbnbXzj5kURETNhO36yiUjCk4mBRGIcbYddndxaSjaH+zKgpLzyJ6m +1esuc1qfFvEfAAI2cTIsl5hB70ZJYNZaUvDyQK3ZGPHxy6e9rkgKg9OJz0QoatAe +q/002yHvtAJg4F5B2JeVejg7VQ8GHB1MKxppu0TP5wKBwQCCpQj8zgKOKz/wmViy +lSYZDC5qWJW7t3bP6TDFr06lOpUsUJ4TgxeiGw778g/RMaKB4RIz3WBoJcgw9BsT +7rFza1ZiucchMcGMmswRDt8kC4wGejpA92Owc8oUdxkMhSdnY5jYlxK2t3/DYEe8 +JFl9L7mFQKVjSSAGUzkiTGrlG1Kf5UfXh9dFBq98uilQfSPIwUaWynyM23CHTKqI +Pw3/vOY9sojrnncWwrEUIG7is5vWfWPwargzSzd29YdRBe8CgcEAuRVewK/YeNOX +B7ZG6gKKsfsvrGtY7FPETzLZAHjoVXYNea4LVZ2kn4hBXXlvw/4HD+YqcTt4wmif +5JQlDvjNobUiKJZpzy7hklVhF7wZFl4pCF7Yh43q9iQ7gKTaeUG7MiaK+G8Zz8aY +HW9rsiihbdZkccMvnPfO9334XMxl3HtBRzLstjUlbLB7Sdh+7tZ3JQidCOFNs5pE +XyWwnASPu4tKfDahH1UUTp1uJcq/6716CSWg080avYxFcn75qqsb +-----END RSA PRIVATE KEY----- diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c new file mode 100644 index 000000000000..dee7a3d6c5a5 --- /dev/null +++ b/tools/testing/selftests/sgx/sigstruct.c @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#define _GNU_SOURCE +#include <assert.h> +#include <getopt.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <openssl/err.h> +#include <openssl/pem.h> +#include "defines.h" +#include "main.h" + +struct q1q2_ctx { + BN_CTX *bn_ctx; + BIGNUM *m; + BIGNUM *s; + BIGNUM *q1; + BIGNUM *qr; + BIGNUM *q2; +}; + +static void free_q1q2_ctx(struct q1q2_ctx *ctx) +{ + BN_CTX_free(ctx->bn_ctx); + BN_free(ctx->m); + BN_free(ctx->s); + BN_free(ctx->q1); + BN_free(ctx->qr); + BN_free(ctx->q2); +} + +static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m, + struct q1q2_ctx *ctx) +{ + ctx->bn_ctx = BN_CTX_new(); + ctx->s = BN_bin2bn(s, SGX_MODULUS_SIZE, NULL); + ctx->m = BN_bin2bn(m, SGX_MODULUS_SIZE, NULL); + ctx->q1 = BN_new(); + ctx->qr = BN_new(); + ctx->q2 = BN_new(); + + if (!ctx->bn_ctx || !ctx->s || !ctx->m || !ctx->q1 || !ctx->qr || + !ctx->q2) { + free_q1q2_ctx(ctx); + return false; + } + + return true; +} + +static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1, + uint8_t *q2) +{ + struct q1q2_ctx ctx; + + if (!alloc_q1q2_ctx(s, m, &ctx)) { + fprintf(stderr, "Not enough memory for Q1Q2 calculation\n"); + return false; + } + + if (!BN_mul(ctx.q1, ctx.s, ctx.s, ctx.bn_ctx)) + goto out; + + if (!BN_div(ctx.q1, ctx.qr, ctx.q1, ctx.m, ctx.bn_ctx)) + goto out; + + if (BN_num_bytes(ctx.q1) > SGX_MODULUS_SIZE) { + fprintf(stderr, "Too large Q1 %d bytes\n", + BN_num_bytes(ctx.q1)); + goto out; + } + + if (!BN_mul(ctx.q2, ctx.s, ctx.qr, ctx.bn_ctx)) + goto out; + + if (!BN_div(ctx.q2, NULL, ctx.q2, ctx.m, ctx.bn_ctx)) + goto out; + + if (BN_num_bytes(ctx.q2) > SGX_MODULUS_SIZE) { + fprintf(stderr, "Too large Q2 %d bytes\n", + BN_num_bytes(ctx.q2)); + goto out; + } + + BN_bn2bin(ctx.q1, q1); + BN_bn2bin(ctx.q2, q2); + + free_q1q2_ctx(&ctx); + return true; +out: + free_q1q2_ctx(&ctx); + return false; +} + +struct sgx_sigstruct_payload { + struct sgx_sigstruct_header header; + struct sgx_sigstruct_body body; +}; + +static bool check_crypto_errors(void) +{ + int err; + bool had_errors = false; + const char *filename; + int line; + char str[256]; + + for ( ; ; ) { + if (ERR_peek_error() == 0) + break; + + had_errors = true; + err = ERR_get_error_line(&filename, &line); + ERR_error_string_n(err, str, sizeof(str)); + fprintf(stderr, "crypto: %s: %s:%d\n", str, filename, line); + } + + return had_errors; +} + +static inline const BIGNUM *get_modulus(RSA *key) +{ + const BIGNUM *n; + + RSA_get0_key(key, &n, NULL, NULL); + return n; +} + +static RSA *gen_sign_key(void) +{ + unsigned long sign_key_length; + BIO *bio; + RSA *key; + + sign_key_length = (unsigned long)&sign_key_end - + (unsigned long)&sign_key; + + bio = BIO_new_mem_buf(&sign_key, sign_key_length); + if (!bio) + return NULL; + + key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, NULL); + BIO_free(bio); + + return key; +} + +static void reverse_bytes(void *data, int length) +{ + int i = 0; + int j = length - 1; + uint8_t temp; + uint8_t *ptr = data; + + while (i < j) { + temp = ptr[i]; + ptr[i] = ptr[j]; + ptr[j] = temp; + i++; + j--; + } +} + +enum mrtags { + MRECREATE = 0x0045544145524345, + MREADD = 0x0000000044444145, + MREEXTEND = 0x00444E4554584545, +}; + +static bool mrenclave_update(EVP_MD_CTX *ctx, const void *data) +{ + if (!EVP_DigestUpdate(ctx, data, 64)) { + fprintf(stderr, "digest update failed\n"); + return false; + } + + return true; +} + +static bool mrenclave_commit(EVP_MD_CTX *ctx, uint8_t *mrenclave) +{ + unsigned int size; + + if (!EVP_DigestFinal_ex(ctx, (unsigned char *)mrenclave, &size)) { + fprintf(stderr, "digest commit failed\n"); + return false; + } + + if (size != 32) { + fprintf(stderr, "invalid digest size = %u\n", size); + return false; + } + + return true; +} + +struct mrecreate { + uint64_t tag; + uint32_t ssaframesize; + uint64_t size; + uint8_t reserved[44]; +} __attribute__((__packed__)); + + +static bool mrenclave_ecreate(EVP_MD_CTX *ctx, uint64_t blob_size) +{ + struct mrecreate mrecreate; + uint64_t encl_size; + + for (encl_size = 0x1000; encl_size < blob_size; ) + encl_size <<= 1; + + memset(&mrecreate, 0, sizeof(mrecreate)); + mrecreate.tag = MRECREATE; + mrecreate.ssaframesize = 1; + mrecreate.size = encl_size; + + if (!EVP_DigestInit_ex(ctx, EVP_sha256(), NULL)) + return false; + + return mrenclave_update(ctx, &mrecreate); +} + +struct mreadd { + uint64_t tag; + uint64_t offset; + uint64_t flags; /* SECINFO flags */ + uint8_t reserved[40]; +} __attribute__((__packed__)); + +static bool mrenclave_eadd(EVP_MD_CTX *ctx, uint64_t offset, uint64_t flags) +{ + struct mreadd mreadd; + + memset(&mreadd, 0, sizeof(mreadd)); + mreadd.tag = MREADD; + mreadd.offset = offset; + mreadd.flags = flags; + + return mrenclave_update(ctx, &mreadd); +} + +struct mreextend { + uint64_t tag; + uint64_t offset; + uint8_t reserved[48]; +} __attribute__((__packed__)); + +static bool mrenclave_eextend(EVP_MD_CTX *ctx, uint64_t offset, + const uint8_t *data) +{ + struct mreextend mreextend; + int i; + + for (i = 0; i < 0x1000; i += 0x100) { + memset(&mreextend, 0, sizeof(mreextend)); + mreextend.tag = MREEXTEND; + mreextend.offset = offset + i; + + if (!mrenclave_update(ctx, &mreextend)) + return false; + + if (!mrenclave_update(ctx, &data[i + 0x00])) + return false; + + if (!mrenclave_update(ctx, &data[i + 0x40])) + return false; + + if (!mrenclave_update(ctx, &data[i + 0x80])) + return false; + + if (!mrenclave_update(ctx, &data[i + 0xC0])) + return false; + } + + return true; +} + +static bool mrenclave_segment(EVP_MD_CTX *ctx, struct encl *encl, + struct encl_segment *seg) +{ + uint64_t end = seg->offset + seg->size; + uint64_t offset; + + for (offset = seg->offset; offset < end; offset += PAGE_SIZE) { + if (!mrenclave_eadd(ctx, offset, seg->flags)) + return false; + + if (!mrenclave_eextend(ctx, offset, encl->src + offset)) + return false; + } + + return true; +} + +bool encl_measure(struct encl *encl) +{ + uint64_t header1[2] = {0x000000E100000006, 0x0000000000010000}; + uint64_t header2[2] = {0x0000006000000101, 0x0000000100000060}; + struct sgx_sigstruct *sigstruct = &encl->sigstruct; + struct sgx_sigstruct_payload payload; + uint8_t digest[SHA256_DIGEST_LENGTH]; + unsigned int siglen; + RSA *key = NULL; + EVP_MD_CTX *ctx; + int i; + + memset(sigstruct, 0, sizeof(*sigstruct)); + + sigstruct->header.header1[0] = header1[0]; + sigstruct->header.header1[1] = header1[1]; + sigstruct->header.header2[0] = header2[0]; + sigstruct->header.header2[1] = header2[1]; + sigstruct->exponent = 3; + sigstruct->body.attributes = SGX_ATTR_MODE64BIT; + sigstruct->body.xfrm = 3; + + /* sanity check */ + if (check_crypto_errors()) + goto err; + + key = gen_sign_key(); + if (!key) { + ERR_print_errors_fp(stdout); + goto err; + } + + BN_bn2bin(get_modulus(key), sigstruct->modulus); + + ctx = EVP_MD_CTX_create(); + if (!ctx) + goto err; + + if (!mrenclave_ecreate(ctx, encl->src_size)) + goto err; + + for (i = 0; i < encl->nr_segments; i++) { + struct encl_segment *seg = &encl->segment_tbl[i]; + + if (!mrenclave_segment(ctx, encl, seg)) + goto err; + } + + if (!mrenclave_commit(ctx, sigstruct->body.mrenclave)) + goto err; + + memcpy(&payload.header, &sigstruct->header, sizeof(sigstruct->header)); + memcpy(&payload.body, &sigstruct->body, sizeof(sigstruct->body)); + + SHA256((unsigned char *)&payload, sizeof(payload), digest); + + if (!RSA_sign(NID_sha256, digest, SHA256_DIGEST_LENGTH, + sigstruct->signature, &siglen, key)) + goto err; + + if (!calc_q1q2(sigstruct->signature, sigstruct->modulus, sigstruct->q1, + sigstruct->q2)) + goto err; + + /* BE -> LE */ + reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE); + reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE); + reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE); + reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE); + + EVP_MD_CTX_destroy(ctx); + RSA_free(key); + return true; + +err: + EVP_MD_CTX_destroy(ctx); + RSA_free(key); + return false; +} diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c new file mode 100644 index 000000000000..cf25b5dc1e03 --- /dev/null +++ b/tools/testing/selftests/sgx/test_encl.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-20 Intel Corporation. */ + +#include <stddef.h> +#include "defines.h" + +static void *memcpy(void *dest, const void *src, size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) + ((char *)dest)[i] = ((char *)src)[i]; + + return dest; +} + +void encl_body(void *rdi, void *rsi) +{ + memcpy(rsi, rdi, 8); +} diff --git a/tools/testing/selftests/sgx/test_encl.lds b/tools/testing/selftests/sgx/test_encl.lds new file mode 100644 index 000000000000..0fbbda7e665e --- /dev/null +++ b/tools/testing/selftests/sgx/test_encl.lds @@ -0,0 +1,40 @@ +OUTPUT_FORMAT(elf64-x86-64) + +PHDRS +{ + tcs PT_LOAD; + text PT_LOAD; + data PT_LOAD; +} + +SECTIONS +{ + . = 0; + .tcs : { + *(.tcs*) + } : tcs + + . = ALIGN(4096); + .text : { + *(.text*) + *(.rodata*) + } : text + + . = ALIGN(4096); + .data : { + *(.data*) + } : data + + /DISCARD/ : { + *(.comment*) + *(.note*) + *(.debug*) + *(.eh_frame*) + } +} + +ASSERT(!DEFINED(.altinstructions), "ALTERNATIVES are not supported in enclaves") +ASSERT(!DEFINED(.altinstr_replacement), "ALTERNATIVES are not supported in enclaves") +ASSERT(!DEFINED(.discard.retpoline_safe), "RETPOLINE ALTERNATIVES are not supported in enclaves") +ASSERT(!DEFINED(.discard.nospec), "RETPOLINE ALTERNATIVES are not supported in enclaves") +ASSERT(!DEFINED(.got.plt), "Libcalls are not supported in enclaves") diff --git a/tools/testing/selftests/sgx/test_encl_bootstrap.S b/tools/testing/selftests/sgx/test_encl_bootstrap.S new file mode 100644 index 000000000000..5d5680d4ea39 --- /dev/null +++ b/tools/testing/selftests/sgx/test_encl_bootstrap.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2016-20 Intel Corporation. + */ + + .macro ENCLU + .byte 0x0f, 0x01, 0xd7 + .endm + + .section ".tcs", "aw" + .balign 4096 + + .fill 1, 8, 0 # STATE (set by CPU) + .fill 1, 8, 0 # FLAGS + .quad encl_ssa # OSSA + .fill 1, 4, 0 # CSSA (set by CPU) + .fill 1, 4, 1 # NSSA + .quad encl_entry # OENTRY + .fill 1, 8, 0 # AEP (set by EENTER and ERESUME) + .fill 1, 8, 0 # OFSBASE + .fill 1, 8, 0 # OGSBASE + .fill 1, 4, 0xFFFFFFFF # FSLIMIT + .fill 1, 4, 0xFFFFFFFF # GSLIMIT + .fill 4024, 1, 0 # Reserved + + # Identical to the previous TCS. + .fill 1, 8, 0 # STATE (set by CPU) + .fill 1, 8, 0 # FLAGS + .quad encl_ssa # OSSA + .fill 1, 4, 0 # CSSA (set by CPU) + .fill 1, 4, 1 # NSSA + .quad encl_entry # OENTRY + .fill 1, 8, 0 # AEP (set by EENTER and ERESUME) + .fill 1, 8, 0 # OFSBASE + .fill 1, 8, 0 # OGSBASE + .fill 1, 4, 0xFFFFFFFF # FSLIMIT + .fill 1, 4, 0xFFFFFFFF # GSLIMIT + .fill 4024, 1, 0 # Reserved + + .text + +encl_entry: + # RBX contains the base address for TCS, which is also the first address + # inside the enclave. By adding the value of le_stack_end to it, we get + # the absolute address for the stack. + lea (encl_stack)(%rbx), %rax + xchg %rsp, %rax + push %rax + + push %rcx # push the address after EENTER + push %rbx # push the enclave base address + + call encl_body + + pop %rbx # pop the enclave base address + + /* Clear volatile GPRs, except RAX (EEXIT function). */ + xor %rcx, %rcx + xor %rdx, %rdx + xor %rdi, %rdi + xor %rsi, %rsi + xor %r8, %r8 + xor %r9, %r9 + xor %r10, %r10 + xor %r11, %r11 + + # Reset status flags. + add %rdx, %rdx # OF = SF = AF = CF = 0; ZF = PF = 1 + + # Prepare EEXIT target by popping the address of the instruction after + # EENTER to RBX. + pop %rbx + + # Restore the caller stack. + pop %rax + mov %rax, %rsp + + # EEXIT + mov $4, %rax + enclu + + .section ".data", "aw" + +encl_ssa: + .space 4096 + + .balign 4096 + .space 8192 +encl_stack: diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json index 0f89cd50a94b..152ffa45e857 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json @@ -54,7 +54,7 @@ "setup": [ "$TC qdisc add dev $DEV2 ingress" ], - "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 parent ffff: handle 0xffffffff flower action ok", + "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress handle 0xffffffff flower action ok", "expExitCode": "0", "verifyCmd": "$TC filter show dev $DEV2 ingress", "matchPattern": "filter protocol ip pref 1 flower.*handle 0xffffffff", @@ -99,9 +99,9 @@ }, "setup": [ "$TC qdisc add dev $DEV2 ingress", - "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop" + "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop" ], - "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop", + "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop", "expExitCode": "2", "verifyCmd": "$TC -s filter show dev $DEV2 ingress", "matchPattern": "filter protocol ip pref 1 flower chain 0 handle", diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py index 6a2bd2cf528e..995f66ce43eb 100755 --- a/tools/testing/selftests/tc-testing/tdc_batch.py +++ b/tools/testing/selftests/tc-testing/tdc_batch.py @@ -72,21 +72,21 @@ mac_prefix = args.mac_prefix def format_add_filter(device, prio, handle, skip, src_mac, dst_mac, share_action): - return ("filter add dev {} {} protocol ip parent ffff: handle {} " + return ("filter add dev {} {} protocol ip ingress handle {} " " flower {} src_mac {} dst_mac {} action drop {}".format( device, prio, handle, skip, src_mac, dst_mac, share_action)) def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac, share_action): - return ("filter replace dev {} {} protocol ip parent ffff: handle {} " + return ("filter replace dev {} {} protocol ip ingress handle {} " " flower {} src_mac {} dst_mac {} action drop {}".format( device, prio, handle, skip, src_mac, dst_mac, share_action)) def format_del_filter(device, prio, handle, skip, src_mac, dst_mac, share_action): - return ("filter del dev {} {} protocol ip parent ffff: handle {} " + return ("filter del dev {} {} protocol ip ingress handle {} " "flower".format(device, prio, handle)) diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index 7656c7ce79d9..0e73a16874c4 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile @@ -13,6 +13,7 @@ DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \ TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS) +TEST_FILES := settings include ../lib.mk diff --git a/tools/testing/selftests/timers/settings b/tools/testing/selftests/timers/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/timers/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh index 80521d46220c..31fb8265f643 100755 --- a/tools/testing/selftests/tpm2/test_smoke.sh +++ b/tools/testing/selftests/tpm2/test_smoke.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) python -m unittest -v tpm2_tests.SmokeTest diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh index a6f5e346635e..3ded3011b642 100755 --- a/tools/testing/selftests/tpm2/test_space.sh +++ b/tools/testing/selftests/tpm2/test_space.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) python -m unittest -v tpm2_tests.SpaceTest diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c index 5a2d7b8efc40..312889edb84a 100644 --- a/tools/testing/selftests/vm/map_hugetlb.c +++ b/tools/testing/selftests/vm/map_hugetlb.c @@ -45,20 +45,20 @@ static void check_bytes(char *addr) printf("First hex is %x\n", *((unsigned int *)addr)); } -static void write_bytes(char *addr) +static void write_bytes(char *addr, size_t length) { unsigned long i; - for (i = 0; i < LENGTH; i++) + for (i = 0; i < length; i++) *(addr + i) = (char)i; } -static int read_bytes(char *addr) +static int read_bytes(char *addr, size_t length) { unsigned long i; check_bytes(addr); - for (i = 0; i < LENGTH; i++) + for (i = 0; i < length; i++) if (*(addr + i) != (char)i) { printf("Mismatch at %lu\n", i); return 1; @@ -83,7 +83,7 @@ int main(int argc, char **argv) } if (shift) - printf("%u kB hugepages\n", 1 << shift); + printf("%u kB hugepages\n", 1 << (shift - 10)); else printf("Default size hugepages\n"); printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20); @@ -96,11 +96,11 @@ int main(int argc, char **argv) printf("Returned address is %p\n", addr); check_bytes(addr); - write_bytes(addr); - ret = read_bytes(addr); + write_bytes(addr, length); + ret = read_bytes(addr, length); /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */ - if (munmap(addr, LENGTH)) { + if (munmap(addr, length)) { perror("munmap"); exit(1); } diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c index 637b6d0ac0d0..11b2301f3aa3 100644 --- a/tools/testing/selftests/vm/mlock2-tests.c +++ b/tools/testing/selftests/vm/mlock2-tests.c @@ -67,59 +67,6 @@ out: return ret; } -static uint64_t get_pageflags(unsigned long addr) -{ - FILE *file; - uint64_t pfn; - unsigned long offset; - - file = fopen("/proc/self/pagemap", "r"); - if (!file) { - perror("fopen pagemap"); - _exit(1); - } - - offset = addr / getpagesize() * sizeof(pfn); - - if (fseek(file, offset, SEEK_SET)) { - perror("fseek pagemap"); - _exit(1); - } - - if (fread(&pfn, sizeof(pfn), 1, file) != 1) { - perror("fread pagemap"); - _exit(1); - } - - fclose(file); - return pfn; -} - -static uint64_t get_kpageflags(unsigned long pfn) -{ - uint64_t flags; - FILE *file; - - file = fopen("/proc/kpageflags", "r"); - if (!file) { - perror("fopen kpageflags"); - _exit(1); - } - - if (fseek(file, pfn * sizeof(flags), SEEK_SET)) { - perror("fseek kpageflags"); - _exit(1); - } - - if (fread(&flags, sizeof(flags), 1, file) != 1) { - perror("fread kpageflags"); - _exit(1); - } - - fclose(file); - return flags; -} - #define VMFLAGS "VmFlags:" static bool is_vmflag_set(unsigned long addr, const char *vmflag) @@ -159,19 +106,13 @@ out: #define RSS "Rss:" #define LOCKED "lo" -static bool is_vma_lock_on_fault(unsigned long addr) +static unsigned long get_value_for_name(unsigned long addr, const char *name) { - bool ret = false; - bool locked; - FILE *smaps = NULL; - unsigned long vma_size, vma_rss; char *line = NULL; - char *value; size_t size = 0; - - locked = is_vmflag_set(addr, LOCKED); - if (!locked) - goto out; + char *value_ptr; + FILE *smaps = NULL; + unsigned long value = -1UL; smaps = seek_to_smaps_entry(addr); if (!smaps) { @@ -180,112 +121,70 @@ static bool is_vma_lock_on_fault(unsigned long addr) } while (getline(&line, &size, smaps) > 0) { - if (!strstr(line, SIZE)) { + if (!strstr(line, name)) { free(line); line = NULL; size = 0; continue; } - value = line + strlen(SIZE); - if (sscanf(value, "%lu kB", &vma_size) < 1) { + value_ptr = line + strlen(name); + if (sscanf(value_ptr, "%lu kB", &value) < 1) { printf("Unable to parse smaps entry for Size\n"); goto out; } break; } - while (getline(&line, &size, smaps) > 0) { - if (!strstr(line, RSS)) { - free(line); - line = NULL; - size = 0; - continue; - } - - value = line + strlen(RSS); - if (sscanf(value, "%lu kB", &vma_rss) < 1) { - printf("Unable to parse smaps entry for Rss\n"); - goto out; - } - break; - } - - ret = locked && (vma_rss < vma_size); out: - free(line); if (smaps) fclose(smaps); - return ret; + free(line); + return value; +} + +static bool is_vma_lock_on_fault(unsigned long addr) +{ + bool locked; + unsigned long vma_size, vma_rss; + + locked = is_vmflag_set(addr, LOCKED); + if (!locked) + return false; + + vma_size = get_value_for_name(addr, SIZE); + vma_rss = get_value_for_name(addr, RSS); + + /* only one page is faulted in */ + return (vma_rss < vma_size); } #define PRESENT_BIT 0x8000000000000000ULL #define PFN_MASK 0x007FFFFFFFFFFFFFULL #define UNEVICTABLE_BIT (1UL << 18) -static int lock_check(char *map) +static int lock_check(unsigned long addr) { - unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; + bool locked; + unsigned long vma_size, vma_rss; - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); + locked = is_vmflag_set(addr, LOCKED); + if (!locked) + return false; - /* Both pages should be present */ - if (((page1_flags & PRESENT_BIT) == 0) || - ((page2_flags & PRESENT_BIT) == 0)) { - printf("Failed to make both pages present\n"); - return 1; - } + vma_size = get_value_for_name(addr, SIZE); + vma_rss = get_value_for_name(addr, RSS); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - page2_flags = get_kpageflags(page2_flags & PFN_MASK); - - /* Both pages should be unevictable */ - if (((page1_flags & UNEVICTABLE_BIT) == 0) || - ((page2_flags & UNEVICTABLE_BIT) == 0)) { - printf("Failed to make both pages unevictable\n"); - return 1; - } - - if (!is_vmflag_set((unsigned long)map, LOCKED)) { - printf("VMA flag %s is missing on page 1\n", LOCKED); - return 1; - } - - if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) { - printf("VMA flag %s is missing on page 2\n", LOCKED); - return 1; - } - - return 0; + return (vma_rss == vma_size); } static int unlock_lock_check(char *map) { - unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; - - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - page2_flags = get_kpageflags(page2_flags & PFN_MASK); - - if ((page1_flags & UNEVICTABLE_BIT) || (page2_flags & UNEVICTABLE_BIT)) { - printf("A page is still marked unevictable after unlock\n"); - return 1; - } - if (is_vmflag_set((unsigned long)map, LOCKED)) { printf("VMA flag %s is present on page 1 after unlock\n", LOCKED); return 1; } - if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) { - printf("VMA flag %s is present on page 2 after unlock\n", LOCKED); - return 1; - } - return 0; } @@ -311,7 +210,7 @@ static int test_mlock_lock() goto unmap; } - if (lock_check(map)) + if (!lock_check((unsigned long)map)) goto unmap; /* Now unlock and recheck attributes */ @@ -330,64 +229,18 @@ out: static int onfault_check(char *map) { - unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; - - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - - /* Neither page should be present */ - if ((page1_flags & PRESENT_BIT) || (page2_flags & PRESENT_BIT)) { - printf("Pages were made present by MLOCK_ONFAULT\n"); - return 1; - } - *map = 'a'; - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - - /* Only page 1 should be present */ - if ((page1_flags & PRESENT_BIT) == 0) { - printf("Page 1 is not present after fault\n"); - return 1; - } else if (page2_flags & PRESENT_BIT) { - printf("Page 2 was made present\n"); - return 1; - } - - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - - /* Page 1 should be unevictable */ - if ((page1_flags & UNEVICTABLE_BIT) == 0) { - printf("Failed to make faulted page unevictable\n"); - return 1; - } - if (!is_vma_lock_on_fault((unsigned long)map)) { printf("VMA is not marked for lock on fault\n"); return 1; } - if (!is_vma_lock_on_fault((unsigned long)map + page_size)) { - printf("VMA is not marked for lock on fault\n"); - return 1; - } - return 0; } static int unlock_onfault_check(char *map) { unsigned long page_size = getpagesize(); - uint64_t page1_flags; - - page1_flags = get_pageflags((unsigned long)map); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - - if (page1_flags & UNEVICTABLE_BIT) { - printf("Page 1 is still marked unevictable after unlock\n"); - return 1; - } if (is_vma_lock_on_fault((unsigned long)map) || is_vma_lock_on_fault((unsigned long)map + page_size)) { @@ -445,7 +298,6 @@ static int test_lock_onfault_of_present() char *map; int ret = 1; unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); @@ -465,17 +317,6 @@ static int test_lock_onfault_of_present() goto unmap; } - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - page2_flags = get_kpageflags(page2_flags & PFN_MASK); - - /* Page 1 should be unevictable */ - if ((page1_flags & UNEVICTABLE_BIT) == 0) { - printf("Failed to make present page unevictable\n"); - goto unmap; - } - if (!is_vma_lock_on_fault((unsigned long)map) || !is_vma_lock_on_fault((unsigned long)map + page_size)) { printf("VMA with present pages is not marked lock on fault\n"); @@ -507,7 +348,7 @@ static int test_munlockall() goto out; } - if (lock_check(map)) + if (!lock_check((unsigned long)map)) goto unmap; if (munlockall()) { @@ -549,7 +390,7 @@ static int test_munlockall() goto out; } - if (lock_check(map)) + if (!lock_check((unsigned long)map)) goto unmap; if (munlockall()) { diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 5d49bfec1e9a..ac44a33b4c39 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -13,7 +13,7 @@ CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ check_initial_reg_state sigreturn iopl ioperm \ protection_keys test_vdso test_vsyscall mov_ss_trap \ - syscall_arg_fault + syscall_arg_fault fsgsbase_restore TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer @@ -70,10 +70,10 @@ all_64: $(BINARIES_64) EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64) -$(BINARIES_32): $(OUTPUT)/%_32: %.c +$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm -$(BINARIES_64): $(OUTPUT)/%_64: %.c +$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl # x86_64 users should be encouraged to install 32-bit libraries diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c index 15a329da59fa..8c780cce941d 100644 --- a/tools/testing/selftests/x86/fsgsbase.c +++ b/tools/testing/selftests/x86/fsgsbase.c @@ -285,7 +285,8 @@ static unsigned short load_gs(void) /* 32-bit set_thread_area */ long ret; asm volatile ("int $0x80" - : "=a" (ret) : "a" (243), "b" (low_desc) + : "=a" (ret), "+m" (*low_desc) + : "a" (243), "b" (low_desc) : "r8", "r9", "r10", "r11"); memcpy(&desc, low_desc, sizeof(desc)); munmap(low_desc, sizeof(desc)); @@ -391,8 +392,8 @@ static void set_gs_and_switch_to(unsigned long local, local = read_base(GS); /* - * Signal delivery seems to mess up weird selectors. Put it - * back. + * Signal delivery is quite likely to change a selector + * of 1, 2, or 3 back to 0 due to IRET being defective. */ asm volatile ("mov %0, %%gs" : : "rm" (force_sel)); } else { @@ -410,6 +411,14 @@ static void set_gs_and_switch_to(unsigned long local, if (base == local && sel_pre_sched == sel_post_sched) { printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n", sel_pre_sched, local); + } else if (base == local && sel_pre_sched >= 1 && sel_pre_sched <= 3 && + sel_post_sched == 0) { + /* + * IRET is misdesigned and will squash selectors 1, 2, or 3 + * to zero. Don't fail the test just because this happened. + */ + printf("[OK]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx because IRET is defective\n", + sel_pre_sched, local, sel_post_sched, base); } else { nerrs++; printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n", @@ -442,6 +451,68 @@ static void test_unexpected_base(void) #define USER_REGS_OFFSET(r) offsetof(struct user_regs_struct, r) +static void test_ptrace_write_gs_read_base(void) +{ + int status; + pid_t child = fork(); + + if (child < 0) + err(1, "fork"); + + if (child == 0) { + printf("[RUN]\tPTRACE_POKE GS, read GSBASE back\n"); + + printf("[RUN]\tARCH_SET_GS to 1\n"); + if (syscall(SYS_arch_prctl, ARCH_SET_GS, 1) != 0) + err(1, "ARCH_SET_GS"); + + if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) + err(1, "PTRACE_TRACEME"); + + raise(SIGTRAP); + _exit(0); + } + + wait(&status); + + if (WSTOPSIG(status) == SIGTRAP) { + unsigned long base; + unsigned long gs_offset = USER_REGS_OFFSET(gs); + unsigned long base_offset = USER_REGS_OFFSET(gs_base); + + /* Read the initial base. It should be 1. */ + base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL); + if (base == 1) { + printf("[OK]\tGSBASE started at 1\n"); + } else { + nerrs++; + printf("[FAIL]\tGSBASE started at 0x%lx\n", base); + } + + printf("[RUN]\tSet GS = 0x7, read GSBASE\n"); + + /* Poke an LDT selector into GS. */ + if (ptrace(PTRACE_POKEUSER, child, gs_offset, 0x7) != 0) + err(1, "PTRACE_POKEUSER"); + + /* And read the base. */ + base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL); + + if (base == 0 || base == 1) { + printf("[OK]\tGSBASE reads as 0x%lx with invalid GS\n", base); + } else { + nerrs++; + printf("[FAIL]\tGSBASE=0x%lx (should be 0 or 1)\n", base); + } + } + + ptrace(PTRACE_CONT, child, NULL, NULL); + + wait(&status); + if (!WIFEXITED(status)) + printf("[WARN]\tChild didn't exit cleanly.\n"); +} + static void test_ptrace_write_gsbase(void) { int status; @@ -489,16 +560,36 @@ static void test_ptrace_write_gsbase(void) * selector value is changed or not by the GSBASE write in * a ptracer. */ - if (gs == 0 && base == 0xFF) { - printf("[OK]\tGS was reset as expected\n"); - } else { + if (gs != *shared_scratch) { nerrs++; - printf("[FAIL]\tGS=0x%lx, GSBASE=0x%lx (should be 0, 0xFF)\n", gs, base); + printf("[FAIL]\tGS changed to %lx\n", gs); + + /* + * On older kernels, poking a nonzero value into the + * base would zero the selector. On newer kernels, + * this behavior has changed -- poking the base + * changes only the base and, if FSGSBASE is not + * available, this may have no effect once the tracee + * is resumed. + */ + if (gs == 0) + printf("\tNote: this is expected behavior on older kernels.\n"); + } else if (have_fsgsbase && (base != 0xFF)) { + nerrs++; + printf("[FAIL]\tGSBASE changed to %lx\n", base); + } else { + printf("[OK]\tGS remained 0x%hx", *shared_scratch); + if (have_fsgsbase) + printf(" and GSBASE changed to 0xFF"); + printf("\n"); } } END: ptrace(PTRACE_CONT, child, NULL, NULL); + wait(&status); + if (!WIFEXITED(status)) + printf("[WARN]\tChild didn't exit cleanly.\n"); } int main() @@ -508,6 +599,9 @@ int main() shared_scratch = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0); + /* Do these tests before we have an LDT. */ + test_ptrace_write_gs_read_base(); + /* Probe FSGSBASE */ sethandler(SIGILL, sigill, 0); if (sigsetjmp(jmpbuf, 1) == 0) { diff --git a/tools/testing/selftests/x86/fsgsbase_restore.c b/tools/testing/selftests/x86/fsgsbase_restore.c new file mode 100644 index 000000000000..6fffadc51579 --- /dev/null +++ b/tools/testing/selftests/x86/fsgsbase_restore.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * fsgsbase_restore.c, test ptrace vs fsgsbase + * Copyright (c) 2020 Andy Lutomirski + * + * This test case simulates a tracer redirecting tracee execution to + * a function and then restoring tracee state using PTRACE_GETREGS and + * PTRACE_SETREGS. This is similar to what gdb does when doing + * 'p func()'. The catch is that this test has the called function + * modify a segment register. This makes sure that ptrace correctly + * restores segment state when using PTRACE_SETREGS. + * + * This is not part of fsgsbase.c, because that test is 64-bit only. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <string.h> +#include <sys/syscall.h> +#include <unistd.h> +#include <err.h> +#include <sys/user.h> +#include <asm/prctl.h> +#include <sys/prctl.h> +#include <asm/ldt.h> +#include <sys/mman.h> +#include <stddef.h> +#include <sys/ptrace.h> +#include <sys/wait.h> +#include <stdint.h> + +#define EXPECTED_VALUE 0x1337f00d + +#ifdef __x86_64__ +# define SEG "%gs" +#else +# define SEG "%fs" +#endif + +static unsigned int dereference_seg_base(void) +{ + int ret; + asm volatile ("mov %" SEG ":(0), %0" : "=rm" (ret)); + return ret; +} + +static void init_seg(void) +{ + unsigned int *target = mmap( + NULL, sizeof(unsigned int), + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0); + if (target == MAP_FAILED) + err(1, "mmap"); + + *target = EXPECTED_VALUE; + + printf("\tsegment base address = 0x%lx\n", (unsigned long)target); + + struct user_desc desc = { + .entry_number = 0, + .base_addr = (unsigned int)(uintptr_t)target, + .limit = sizeof(unsigned int) - 1, + .seg_32bit = 1, + .contents = 0, /* Data, grow-up */ + .read_exec_only = 0, + .limit_in_pages = 0, + .seg_not_present = 0, + .useable = 0 + }; + if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) == 0) { + printf("\tusing LDT slot 0\n"); + asm volatile ("mov %0, %" SEG :: "rm" ((unsigned short)0x7)); + } else { + /* No modify_ldt for us (configured out, perhaps) */ + + struct user_desc *low_desc = mmap( + NULL, sizeof(desc), + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0); + memcpy(low_desc, &desc, sizeof(desc)); + + low_desc->entry_number = -1; + + /* 32-bit set_thread_area */ + long ret; + asm volatile ("int $0x80" + : "=a" (ret), "+m" (*low_desc) + : "a" (243), "b" (low_desc) +#ifdef __x86_64__ + : "r8", "r9", "r10", "r11" +#endif + ); + memcpy(&desc, low_desc, sizeof(desc)); + munmap(low_desc, sizeof(desc)); + + if (ret != 0) { + printf("[NOTE]\tcould not create a segment -- can't test anything\n"); + exit(0); + } + printf("\tusing GDT slot %d\n", desc.entry_number); + + unsigned short sel = (unsigned short)((desc.entry_number << 3) | 0x3); + asm volatile ("mov %0, %" SEG :: "rm" (sel)); + } +} + +static void tracee_zap_segment(void) +{ + /* + * The tracer will redirect execution here. This is meant to + * work like gdb's 'p func()' feature. The tricky bit is that + * we modify a segment register in order to make sure that ptrace + * can correctly restore segment registers. + */ + printf("\tTracee: in tracee_zap_segment()\n"); + + /* + * Write a nonzero selector with base zero to the segment register. + * Using a null selector would defeat the test on AMD pre-Zen2 + * CPUs, as such CPUs don't clear the base when loading a null + * selector. + */ + unsigned short sel; + asm volatile ("mov %%ss, %0\n\t" + "mov %0, %" SEG + : "=rm" (sel)); + + pid_t pid = getpid(), tid = syscall(SYS_gettid); + + printf("\tTracee is going back to sleep\n"); + syscall(SYS_tgkill, pid, tid, SIGSTOP); + + /* Should not get here. */ + while (true) { + printf("[FAIL]\tTracee hit unreachable code\n"); + pause(); + } +} + +int main() +{ + printf("\tSetting up a segment\n"); + init_seg(); + + unsigned int val = dereference_seg_base(); + if (val != EXPECTED_VALUE) { + printf("[FAIL]\tseg[0] == %x; should be %x\n", val, EXPECTED_VALUE); + return 1; + } + printf("[OK]\tThe segment points to the right place.\n"); + + pid_t chld = fork(); + if (chld < 0) + err(1, "fork"); + + if (chld == 0) { + prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0, 0); + + if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0) + err(1, "PTRACE_TRACEME"); + + pid_t pid = getpid(), tid = syscall(SYS_gettid); + + printf("\tTracee will take a nap until signaled\n"); + syscall(SYS_tgkill, pid, tid, SIGSTOP); + + printf("\tTracee was resumed. Will re-check segment.\n"); + + val = dereference_seg_base(); + if (val != EXPECTED_VALUE) { + printf("[FAIL]\tseg[0] == %x; should be %x\n", val, EXPECTED_VALUE); + exit(1); + } + + printf("[OK]\tThe segment points to the right place.\n"); + exit(0); + } + + int status; + + /* Wait for SIGSTOP. */ + if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status)) + err(1, "waitpid"); + + struct user_regs_struct regs; + + if (ptrace(PTRACE_GETREGS, chld, NULL, ®s) != 0) + err(1, "PTRACE_GETREGS"); + +#ifdef __x86_64__ + printf("\tChild GS=0x%lx, GSBASE=0x%lx\n", (unsigned long)regs.gs, (unsigned long)regs.gs_base); +#else + printf("\tChild FS=0x%lx\n", (unsigned long)regs.xfs); +#endif + + struct user_regs_struct regs2 = regs; +#ifdef __x86_64__ + regs2.rip = (unsigned long)tracee_zap_segment; + regs2.rsp -= 128; /* Don't clobber the redzone. */ +#else + regs2.eip = (unsigned long)tracee_zap_segment; +#endif + + printf("\tTracer: redirecting tracee to tracee_zap_segment()\n"); + if (ptrace(PTRACE_SETREGS, chld, NULL, ®s2) != 0) + err(1, "PTRACE_GETREGS"); + if (ptrace(PTRACE_CONT, chld, NULL, NULL) != 0) + err(1, "PTRACE_GETREGS"); + + /* Wait for SIGSTOP. */ + if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status)) + err(1, "waitpid"); + + printf("\tTracer: restoring tracee state\n"); + if (ptrace(PTRACE_SETREGS, chld, NULL, ®s) != 0) + err(1, "PTRACE_GETREGS"); + if (ptrace(PTRACE_DETACH, chld, NULL, NULL) != 0) + err(1, "PTRACE_GETREGS"); + + /* Wait for SIGSTOP. */ + if (waitpid(chld, &status, 0) != chld) + err(1, "waitpid"); + + if (WIFSIGNALED(status)) { + printf("[FAIL]\tTracee crashed\n"); + return 1; + } + + if (!WIFEXITED(status)) { + printf("[FAIL]\tTracee stopped for an unexpected reason: %d\n", status); + return 1; + } + + int exitcode = WEXITSTATUS(status); + if (exitcode != 0) { + printf("[FAIL]\tTracee reported failure\n"); + return 1; + } + + printf("[OK]\tAll is well.\n"); + return 0; +} diff --git a/tools/testing/selftests/x86/helpers.h b/tools/testing/selftests/x86/helpers.h new file mode 100644 index 000000000000..4ef42c4559a9 --- /dev/null +++ b/tools/testing/selftests/x86/helpers.h @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __SELFTESTS_X86_HELPERS_H +#define __SELFTESTS_X86_HELPERS_H + +#include <asm/processor-flags.h> + +static inline unsigned long get_eflags(void) +{ +#ifdef __x86_64__ + return __builtin_ia32_readeflags_u64(); +#else + return __builtin_ia32_readeflags_u32(); +#endif +} + +static inline void set_eflags(unsigned long eflags) +{ +#ifdef __x86_64__ + __builtin_ia32_writeeflags_u64(eflags); +#else + __builtin_ia32_writeeflags_u32(eflags); +#endif +} + +#endif /* __SELFTESTS_X86_HELPERS_H */ diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index 480995bceefa..47191af46617 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -24,6 +24,7 @@ #define _GNU_SOURCE #include <errno.h> #include <linux/futex.h> +#include <time.h> #include <sys/time.h> #include <sys/syscall.h> #include <string.h> @@ -612,10 +613,10 @@ int alloc_random_pkey(void) int nr_alloced = 0; int random_index; memset(alloced_pkeys, 0, sizeof(alloced_pkeys)); + srand((unsigned int)time(NULL)); /* allocate every possible key and make a note of which ones we got */ max_nr_pkey_allocs = NR_PKEYS; - max_nr_pkey_allocs = 1; for (i = 0; i < max_nr_pkey_allocs; i++) { int new_pkey = alloc_pkey(); if (new_pkey < 0) diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c index 6f22238f3217..12aaa063196e 100644 --- a/tools/testing/selftests/x86/ptrace_syscall.c +++ b/tools/testing/selftests/x86/ptrace_syscall.c @@ -414,8 +414,12 @@ int main() #if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16) vsyscall32 = (void *)getauxval(AT_SYSINFO); - printf("[RUN]\tCheck AT_SYSINFO return regs\n"); - test_sys32_regs(do_full_vsyscall32); + if (vsyscall32) { + printf("[RUN]\tCheck AT_SYSINFO return regs\n"); + test_sys32_regs(do_full_vsyscall32); + } else { + printf("[SKIP]\tAT_SYSINFO is not available\n"); + } #endif test_ptrace_syscall_restart(); diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c index 50ce6c3dd904..d4a19b6f3ee1 100644 --- a/tools/testing/selftests/x86/single_step_syscall.c +++ b/tools/testing/selftests/x86/single_step_syscall.c @@ -31,6 +31,8 @@ #include <sys/ptrace.h> #include <sys/user.h> +#include "helpers.h" + static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) { @@ -55,21 +57,6 @@ static volatile sig_atomic_t sig_traps; # define INT80_CLOBBERS #endif -static unsigned long get_eflags(void) -{ - unsigned long eflags; - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); - return eflags; -} - -static void set_eflags(unsigned long eflags) -{ - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH - : : "rm" (eflags) : "flags"); -} - -#define X86_EFLAGS_TF (1UL << 8) - static void sigtrap(int sig, siginfo_t *info, void *ctx_void) { ucontext_t *ctx = (ucontext_t*)ctx_void; diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c index bc0ecc2e862e..bff474b5efc6 100644 --- a/tools/testing/selftests/x86/syscall_arg_fault.c +++ b/tools/testing/selftests/x86/syscall_arg_fault.c @@ -15,30 +15,11 @@ #include <setjmp.h> #include <errno.h> -#ifdef __x86_64__ -# define WIDTH "q" -#else -# define WIDTH "l" -#endif +#include "helpers.h" /* Our sigaltstack scratch space. */ static unsigned char altstack_data[SIGSTKSZ]; -static unsigned long get_eflags(void) -{ - unsigned long eflags; - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); - return eflags; -} - -static void set_eflags(unsigned long eflags) -{ - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH - : : "rm" (eflags) : "flags"); -} - -#define X86_EFLAGS_TF (1UL << 8) - static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) { @@ -72,6 +53,7 @@ static void sigsegv_or_sigbus(int sig, siginfo_t *info, void *ctx_void) if (ax != -EFAULT && ax != -ENOSYS) { printf("[FAIL]\tAX had the wrong value: 0x%lx\n", (unsigned long)ax); + printf("\tIP = 0x%lx\n", (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); n_errs++; } else { printf("[OK]\tSeems okay\n"); @@ -226,5 +208,30 @@ int main() } set_eflags(get_eflags() & ~X86_EFLAGS_TF); +#ifdef __x86_64__ + printf("[RUN]\tSYSENTER with TF, invalid state, and GSBASE < 0\n"); + + if (sigsetjmp(jmpbuf, 1) == 0) { + sigtrap_consecutive_syscalls = 0; + + asm volatile ("wrgsbase %%rax\n\t" + :: "a" (0xffffffffffff0000UL)); + + set_eflags(get_eflags() | X86_EFLAGS_TF); + asm volatile ( + "movl $-1, %%eax\n\t" + "movl $-1, %%ebx\n\t" + "movl $-1, %%ecx\n\t" + "movl $-1, %%edx\n\t" + "movl $-1, %%esi\n\t" + "movl $-1, %%edi\n\t" + "movl $-1, %%ebp\n\t" + "movl $-1, %%esp\n\t" + "sysenter" + : : : "memory", "flags"); + } + set_eflags(get_eflags() & ~X86_EFLAGS_TF); +#endif + return 0; } diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c index 02309a195041..fa7059cc2343 100644 --- a/tools/testing/selftests/x86/syscall_nt.c +++ b/tools/testing/selftests/x86/syscall_nt.c @@ -13,29 +13,11 @@ #include <signal.h> #include <err.h> #include <sys/syscall.h> -#include <asm/processor-flags.h> -#ifdef __x86_64__ -# define WIDTH "q" -#else -# define WIDTH "l" -#endif +#include "helpers.h" static unsigned int nerrs; -static unsigned long get_eflags(void) -{ - unsigned long eflags; - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); - return eflags; -} - -static void set_eflags(unsigned long eflags) -{ - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH - : : "rm" (eflags) : "flags"); -} - static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) { @@ -59,6 +41,7 @@ static void do_it(unsigned long extraflags) set_eflags(get_eflags() | extraflags); syscall(SYS_getpid); flags = get_eflags(); + set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); if ((flags & extraflags) == extraflags) { printf("[OK]\tThe syscall worked and flags are still set\n"); } else { diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index a4f4d4cf22c3..c41f24b517f4 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -20,6 +20,8 @@ #include <setjmp.h> #include <sys/uio.h> +#include "helpers.h" + #ifdef __x86_64__ # define VSYS(x) (x) #else @@ -493,21 +495,8 @@ static int test_process_vm_readv(void) } #ifdef __x86_64__ -#define X86_EFLAGS_TF (1UL << 8) static volatile sig_atomic_t num_vsyscall_traps; -static unsigned long get_eflags(void) -{ - unsigned long eflags; - asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags)); - return eflags; -} - -static void set_eflags(unsigned long eflags) -{ - asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags"); -} - static void sigtrap(int sig, siginfo_t *info, void *ctx_void) { ucontext_t *ctx = (ucontext_t *)ctx_void; diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c index 0075ccd65407..4c311e1af4c7 100644 --- a/tools/testing/selftests/x86/unwind_vdso.c +++ b/tools/testing/selftests/x86/unwind_vdso.c @@ -11,6 +11,8 @@ #include <features.h> #include <stdio.h> +#include "helpers.h" + #if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16 int main() @@ -53,27 +55,6 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), err(1, "sigaction"); } -#ifdef __x86_64__ -# define WIDTH "q" -#else -# define WIDTH "l" -#endif - -static unsigned long get_eflags(void) -{ - unsigned long eflags; - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); - return eflags; -} - -static void set_eflags(unsigned long eflags) -{ - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH - : : "rm" (eflags) : "flags"); -} - -#define X86_EFLAGS_TF (1UL << 8) - static volatile sig_atomic_t nerrs; static unsigned long sysinfo; static bool got_sysinfo = false; diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c index d1d8ba2a4a40..ca78aa368476 100644 --- a/tools/usb/usbip/libsrc/usbip_host_common.c +++ b/tools/usb/usbip/libsrc/usbip_host_common.c @@ -23,7 +23,7 @@ #include "list.h" #include "sysfs_utils.h" -struct udev *udev_context; +extern struct udev *udev_context; static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) { diff --git a/tools/vm/Makefile b/tools/vm/Makefile index 20f6cf04377f..9860622cbb15 100644 --- a/tools/vm/Makefile +++ b/tools/vm/Makefile @@ -1,6 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for vm tools # +include ../scripts/Makefile.include + TARGETS=page-types slabinfo page_owner_sort LIB_DIR = ../lib/api diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index 0a356aa91aa1..f2047fc69006 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { [7] = { 4, 4 }, /* FIQ, unused */ }; +static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (kvm_arm_vcpu_loaded(vcpu)) { + kvm_arch_vcpu_put(vcpu); + return true; + } + + preempt_enable(); + return false; +} + +static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) +{ + if (loaded) { + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + } +} + /* * When an exception is taken, most CPSR fields are left unchanged in the * handler. However, some are explicitly overridden (e.g. M[4:0]). @@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) void kvm_inject_undef32(struct kvm_vcpu *vcpu) { + bool loaded = pre_fault_synchronize(vcpu); + prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); + post_fault_synchronize(vcpu, loaded); } /* @@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 vect_offset; u32 *far, *fsr; bool is_lpae; + bool loaded; + + loaded = pre_fault_synchronize(vcpu); if (is_pabt) { vect_offset = 12; @@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, /* no need to shuffle FS[4] into DFSR[10] as its 0 */ *fsr = DFSR_FSC_EXTABT_nLPAE; } + + post_fault_synchronize(vcpu, loaded); } void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 86c6aa1cb58e..2e7d2b3f2907 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -354,6 +354,16 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return kvm_vgic_vcpu_init(vcpu); } +#ifdef CONFIG_ARM64 +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) +#else +#define __ptrauth_save_key(regs, key) do { } while (0) +#endif + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; @@ -363,11 +373,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) cpu_data = this_cpu_ptr(&kvm_host_data); /* + * We guarantee that both TLBs and I-cache are private to each + * vcpu. If detecting that a vcpu from the same VM has + * previously run on the same physical CPU, call into the + * hypervisor code to nuke the relevant contexts. + * + * We might get preempted before the vCPU actually runs, but * We might get preempted before the vCPU actually runs, but * over-invalidation doesn't affect correctness. */ if (*last_ran != vcpu->vcpu_id) { - kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); + kvm_call_hyp(__kvm_flush_cpu_context, vcpu); *last_ran = vcpu->vcpu_id; } @@ -386,7 +402,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfe_traps(vcpu); - vcpu_ptrauth_setup_lazy(vcpu); + if (vcpu_has_ptrauth(vcpu)) { + struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context; + + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + + vcpu_ptrauth_disable(vcpu); + } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c index d31f267961e7..25c0e47d57cb 100644 --- a/virt/kvm/arm/hyp/aarch32.c +++ b/virt/kvm/arm/hyp/aarch32.c @@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) */ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) { + u32 pc = *vcpu_pc(vcpu); bool is_thumb; is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; + pc += 2; else - *vcpu_pc(vcpu) += 4; + pc += 4; + + *vcpu_pc(vcpu) = pc; + kvm_adjust_itstate(vcpu); } diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c index f274fabb4301..1e9ec878d56d 100644 --- a/virt/kvm/arm/mmio.c +++ b/virt/kvm/arm/mmio.c @@ -130,7 +130,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) bool sign_extend; bool sixty_four; - if (kvm_vcpu_dabt_iss1tw(vcpu)) { + if (kvm_vcpu_abt_iss1tw(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index ce7fa37987e1..c6ba672f07cc 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -332,7 +332,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, + bool may_block) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; @@ -357,11 +358,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(kvm, start, size, true); +} + static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -1199,7 +1205,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, return true; } -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz) { pud_t *pudp; pmd_t *pmdp; @@ -1211,11 +1217,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) return false; if (pudp) - return kvm_s2pud_exec(pudp); + return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); else if (pmdp) - return kvm_s2pmd_exec(pmdp); + return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); else - return kvm_s2pte_exec(ptep); + return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); } static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, @@ -1684,7 +1690,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, unsigned long vma_pagesize, flags = 0; write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_iabt(vcpu); + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); if (fault_status == FSC_PERM && !write_fault && !exec_fault) { @@ -1750,6 +1756,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (kvm_is_device_pfn(pfn)) { mem_type = PAGE_S2_DEVICE; flags |= KVM_S2PTE_FLAG_IS_IOMAP; + force_pte = true; } else if (logging_active) { /* * Faults on pages in a memslot with logging enabled @@ -1805,9 +1812,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * execute permissions, and we preserve whatever we have. */ needs_exec = exec_fault || - (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa)); + (fault_status == FSC_PERM && + stage2_is_exec(kvm, fault_ipa, vma_pagesize)); - if (vma_pagesize == PUD_SIZE) { + /* + * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and + * all we have is a 2-level page table. Trying to map a PUD in + * this case would be fatally wrong. + */ + if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); new_pud = kvm_pud_mkhuge(new_pud); @@ -2044,18 +2057,21 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(kvm, gpa, size); + unsigned flags = *(unsigned *)data; + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; + + __unmap_stage2_range(kvm, gpa, size, may_block); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, unsigned flags) { if (!kvm->arch.pgd) return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); return 0; } @@ -2291,8 +2307,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * Prevent userspace from creating a memory region outside of the IPA * space addressable by the KVM guest IPA space. */ - if (memslot->base_gfn + memslot->npages >= - (kvm_phys_size(kvm) >> PAGE_SHIFT)) + if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) return -EFAULT; down_read(¤t->mm->mmap_sem); diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 87927f7e1ee7..48fde38d64c3 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -408,7 +408,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) val = SMCCC_RET_SUCCESS; break; case KVM_BP_HARDEN_NOT_REQUIRED: - val = SMCCC_RET_NOT_REQUIRED; + val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; break; } break; diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 6f50c429196d..689910153889 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -177,6 +177,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) break; default: kfree(dist->spis); + dist->spis = NULL; return -EINVAL; } } @@ -357,6 +358,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + /* + * Retire all pending LPIs on this vcpu anyway as we're + * going to destroy it. + */ + vgic_flush_pending_lpis(vcpu); + INIT_LIST_HEAD(&vgic_cpu->ap_list_head); } @@ -368,10 +375,10 @@ static void __kvm_vgic_destroy(struct kvm *kvm) vgic_debug_destroy(kvm); - kvm_vgic_dist_destroy(kvm); - kvm_for_each_vcpu(i, vcpu, kvm) kvm_vgic_vcpu_destroy(vcpu); + + kvm_vgic_dist_destroy(kvm); } void kvm_vgic_destroy(struct kvm *kvm) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index f8ad7096555d..35be0e2a4639 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -96,14 +96,21 @@ out_unlock: * We "cache" the configuration table entries in our struct vgic_irq's. * However we only have those structs for mapped IRQs, so we read in * the respective config data from memory here upon mapping the LPI. + * + * Should any of these fail, behave as if we couldn't create the LPI + * by dropping the refcount and returning the error. */ ret = update_lpi_config(kvm, irq, NULL, false); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + } ret = vgic_v3_lpi_sync_pending_status(kvm, irq); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + } return irq; } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 5945f062d749..d63881f60e1a 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -422,11 +422,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, vgic_mmio_read_active, vgic_mmio_write_sactive, - NULL, vgic_mmio_uaccess_write_sactive, 1, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, vgic_mmio_read_active, vgic_mmio_write_cactive, - NULL, vgic_mmio_uaccess_write_cactive, 1, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 7dfd15dbb308..b1e639ea22e9 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -223,6 +223,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, return extract_bytes(value, addr & 7, len); } +static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + int target_vcpu_id = vcpu->vcpu_id; + u64 value; + + value = (u64)(mpidr & GENMASK(23, 0)) << 32; + value |= ((target_vcpu_id & 0xffff) << 8); + + if (vgic_has_its(vcpu->kvm)) + value |= GICR_TYPER_PLPIS; + + /* reporting of the Last bit is not supported for userspace */ + return extract_bytes(value, addr & 7, len); +} + static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len) { @@ -491,11 +508,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, vgic_mmio_read_active, vgic_mmio_write_sactive, - NULL, vgic_mmio_uaccess_write_sactive, 1, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, vgic_mmio_read_active, vgic_mmio_write_cactive, - NULL, vgic_mmio_uaccess_write_cactive, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, @@ -528,8 +545,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { REGISTER_DESC_WITH_LENGTH(GICR_IIDR, vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_LENGTH(GICR_TYPER, - vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8, + REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER, + vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, + vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICR_WAKER, vgic_mmio_read_raz, vgic_mmio_write_wi, 4, @@ -563,12 +581,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0, vgic_mmio_read_active, vgic_mmio_write_sactive, - NULL, vgic_mmio_uaccess_write_sactive, - 4, VGIC_ACCESS_32bit), + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4, + VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0, vgic_mmio_read_active, vgic_mmio_write_cactive, - NULL, vgic_mmio_uaccess_write_cactive, - 4, VGIC_ACCESS_32bit), + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4, + VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0, vgic_mmio_read_priority, vgic_mmio_write_priority, 32, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 0d090482720d..fb1dcd397b93 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -300,8 +300,39 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, } } -unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) + +/* + * If we are fiddling with an IRQ's active state, we have to make sure the IRQ + * is not queued on some running VCPU's LRs, because then the change to the + * active state can be overwritten when the VCPU's state is synced coming back + * from the guest. + * + * For shared interrupts as well as GICv3 private interrupts, we have to + * stop all the VCPUs because interrupts can be migrated while we don't hold + * the IRQ locks and we don't want to be chasing moving targets. + * + * For GICv2 private interrupts we don't have to do anything because + * userspace accesses to the VGIC state already require all VCPUs to be + * stopped, and only the VCPU itself can modify its private interrupts + * active state, which guarantees that the VCPU is not running. + */ +static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid) +{ + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || + intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_halt_guest(vcpu->kvm); +} + +/* See vgic_access_active_prepare */ +static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid) +{ + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || + intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_resume_guest(vcpu->kvm); +} + +static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 value = 0; @@ -311,6 +342,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + /* + * Even for HW interrupts, don't evaluate the HW state as + * all the guest is interested in is the virtual state. + */ if (irq->active) value |= (1U << i); @@ -320,6 +355,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, return value; } +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 val; + + mutex_lock(&vcpu->kvm->lock); + vgic_access_active_prepare(vcpu, intid); + + val = __vgic_mmio_read_active(vcpu, addr, len); + + vgic_access_active_finish(vcpu, intid); + mutex_unlock(&vcpu->kvm->lock); + + return val; +} + +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __vgic_mmio_read_active(vcpu, addr, len); +} + /* Must be called with irq->irq_lock held */ static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool active, bool is_uaccess) @@ -371,36 +429,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } -/* - * If we are fiddling with an IRQ's active state, we have to make sure the IRQ - * is not queued on some running VCPU's LRs, because then the change to the - * active state can be overwritten when the VCPU's state is synced coming back - * from the guest. - * - * For shared interrupts, we have to stop all the VCPUs because interrupts can - * be migrated while we don't hold the IRQ locks and we don't want to be - * chasing moving targets. - * - * For private interrupts we don't have to do anything because userspace - * accesses to the VGIC state already require all VCPUs to be stopped, and - * only the VCPU itself can modify its private interrupts active state, which - * guarantees that the VCPU is not running. - */ -static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) -{ - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) - kvm_arm_halt_guest(vcpu->kvm); -} - -/* See vgic_change_active_prepare */ -static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) -{ - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) - kvm_arm_resume_guest(vcpu->kvm); -} - static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val) @@ -422,11 +450,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); mutex_lock(&vcpu->kvm->lock); - vgic_change_active_prepare(vcpu, intid); + vgic_access_active_prepare(vcpu, intid); __vgic_mmio_write_cactive(vcpu, addr, len, val); - vgic_change_active_finish(vcpu, intid); + vgic_access_active_finish(vcpu, intid); mutex_unlock(&vcpu->kvm->lock); } @@ -459,11 +487,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); mutex_lock(&vcpu->kvm->lock); - vgic_change_active_prepare(vcpu, intid); + vgic_access_active_prepare(vcpu, intid); __vgic_mmio_write_sactive(vcpu, addr, len, val); - vgic_change_active_finish(vcpu, intid); + vgic_access_active_finish(vcpu, intid); mutex_unlock(&vcpu->kvm->lock); } diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 836f418f1ee8..b6aff5252429 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -157,6 +157,9 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len); +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 8ffd07e2a160..6b870e4b9b97 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -178,21 +178,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { struct kvm_coalesced_mmio_dev *dev, *tmp; + int r; if (zone->pio != 1 && zone->pio != 0) return -EINVAL; mutex_lock(&kvm->slots_lock); - list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) + list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) { if (zone->pio == dev->zone.pio && coalesced_mmio_in_range(dev, zone->addr, zone->size)) { - kvm_io_bus_unregister_dev(kvm, + r = kvm_io_bus_unregister_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); kvm_iodevice_destructor(&dev->dev); + + /* + * On failure, unregister destroys all devices on the + * bus _except_ the target device, i.e. coalesced_zones + * has been modified. No need to restart the walk as + * there aren't any zones left. + */ + if (r) + break; } + } mutex_unlock(&kvm->slots_lock); + /* + * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's + * perspective, the coalesced MMIO is most definitely unregistered. + */ return 0; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 03c681568ab1..f83fa0aeeb45 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -157,10 +157,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; -__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable) +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) { - return 0; } bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) @@ -186,6 +185,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) */ if (pfn_valid(pfn)) return PageReserved(pfn_to_page(pfn)) && + !is_zero_pfn(pfn) && !kvm_is_zone_device_pfn(pfn); return true; @@ -381,6 +381,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) return container_of(mn, struct kvm, mmu_notifier); } +static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int idx; + + idx = srcu_read_lock(&kvm->srcu); + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); + srcu_read_unlock(&kvm->srcu, idx); +} + static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, @@ -405,7 +417,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush = 0, idx; - int ret; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); @@ -415,21 +426,16 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); - need_tlb_flush |= kvm->tlbs_dirty; + need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, + range->flags); /* we've to flush the tlb before the pages can be freed */ - if (need_tlb_flush) + if (need_tlb_flush || kvm->tlbs_dirty) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); - - ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start, - range->end, - mmu_notifier_range_blockable(range)); - srcu_read_unlock(&kvm->srcu, idx); - return ret; + return 0; } static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, @@ -535,6 +541,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { + .invalidate_range = kvm_mmu_notifier_invalidate_range, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, @@ -1010,6 +1017,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* We can read the guest memory with __xxx_user() later on. */ if ((id < KVM_USER_MEM_SLOTS) && ((mem->userspace_addr & (PAGE_SIZE - 1)) || + (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || !access_ok((void __user *)(unsigned long)mem->userspace_addr, mem->memory_size))) goto out; @@ -1590,10 +1598,12 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, bool write_fault, bool *writable, kvm_pfn_t *p_pfn) { - unsigned long pfn; + kvm_pfn_t pfn; + pte_t *ptep; + spinlock_t *ptl; int r; - r = follow_pfn(vma, addr, &pfn); + r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); if (r) { /* * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does @@ -1608,14 +1618,19 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, if (r) return r; - r = follow_pfn(vma, addr, &pfn); + r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); if (r) return r; + } + if (write_fault && !pte_write(*ptep)) { + pfn = KVM_PFN_ERR_RO_FAULT; + goto out; } if (writable) - *writable = true; + *writable = pte_write(*ptep); + pfn = pte_pfn(*ptep); /* * Get a reference here because callers of *hva_to_pfn* and @@ -1630,6 +1645,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, */ kvm_get_pfn(pfn); +out: + pte_unmap_unlock(ptep, ptl); *p_pfn = pfn; return 0; } @@ -4000,15 +4017,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, } /* Caller must hold slots_lock. */ -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev) +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) { - int i; + int i, j; struct kvm_io_bus *new_bus, *bus; bus = kvm_get_bus(kvm, bus_idx); if (!bus) - return; + return 0; for (i = 0; i < bus->dev_count; i++) if (bus->range[i].dev == dev) { @@ -4016,25 +4033,28 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, } if (i == bus->dev_count) - return; + return 0; new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), GFP_KERNEL_ACCOUNT); - if (!new_bus) { + if (new_bus) { + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); + new_bus->dev_count--; + memcpy(new_bus->range + i, bus->range + i + 1, + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); + } else { pr_err("kvm: failed to shrink bus, removing it completely\n"); - goto broken; + for (j = 0; j < bus->dev_count; j++) { + if (j == i) + continue; + kvm_iodevice_destructor(bus->range[j].dev); + } } - memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); - new_bus->dev_count--; - memcpy(new_bus->range + i, bus->range + i + 1, - (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); - -broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); - return; + return new_bus ? 0 : -ENOMEM; } struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,